From 06bb8cb865c840d639475f8605693131aedfbccf Mon Sep 17 00:00:00 2001 From: Apple OSS Distributions <91980991+AppleOSSDistributions@users.noreply.github.com> Date: Tue, 15 Sep 2009 01:00:29 +0000 Subject: [PATCH 01/18] libdispatch-84.5 Imported from libdispatch-84.5.tar.gz --- LICENSE | 202 ++ .../project.pbxproj | 609 ++++ examples/Dispatch Samples/ReadMe.txt | 93 + examples/Dispatch Samples/apply.c | 123 + examples/Dispatch Samples/nWide.c | 127 + examples/Dispatch Samples/netcat.c | 596 ++++ examples/Dispatch Samples/proc.c | 209 ++ examples/Dispatch Samples/readFile.c | 115 + examples/Dispatch Samples/readFileF.c | 117 + examples/Dispatch Samples/timers.c | 85 + examples/DispatchLife/DispatchLife.c | 392 +++ .../DispatchLife.xcodeproj/project.pbxproj | 252 ++ examples/DispatchLife/DispatchLifeGLView.h | 59 + examples/DispatchLife/DispatchLifeGLView.m | 203 ++ .../English.lproj/InfoPlist.strings | Bin 0 -> 92 bytes .../English.lproj/MainMenu.nib/designable.nib | 2651 +++++++++++++++++ .../MainMenu.nib/keyedobjects.nib | Bin 0 -> 19575 bytes examples/DispatchLife/Info.plist | 28 + examples/DispatchLife/ReadMe.txt | 37 + examples/DispatchLife/main.m | 49 + .../DispatchWebServer/DispatchWebServer.c | 956 ++++++ .../project.pbxproj | 203 ++ examples/DispatchWebServer/ReadMe.txt | 44 + libdispatch.xcodeproj/project.pbxproj | 497 +++ man/dispatch.3 | 38 + man/dispatch_after.3 | 57 + man/dispatch_api.3 | 44 + man/dispatch_apply.3 | 80 + man/dispatch_async.3 | 234 ++ man/dispatch_benchmark.3 | 55 + man/dispatch_group_create.3 | 149 + man/dispatch_object.3 | 99 + man/dispatch_once.3 | 44 + man/dispatch_queue_create.3 | 318 ++ man/dispatch_semaphore_create.3 | 114 + man/dispatch_source_create.3 | 456 +++ man/dispatch_time.3 | 110 + src/apply.c | 178 ++ src/base.h | 113 + src/benchmark.c | 114 + src/benchmark.h | 83 + src/dispatch.h | 52 + src/group.h | 273 ++ src/hw_shims.h | 72 + src/internal.h | 299 ++ src/legacy.c | 444 +++ src/legacy.h | 748 +++++ src/object.c | 200 ++ src/object.h | 195 ++ src/object_internal.h | 110 + src/once.c | 104 + src/once.h | 77 + src/os_shims.h | 152 + src/private.h | 114 + src/protocol.defs | 91 + src/queue.c | 2080 +++++++++++++ src/queue.h | 568 ++++ src/queue_internal.h | 136 + src/queue_private.h | 122 + src/semaphore.c | 532 ++++ src/semaphore.h | 112 + src/semaphore_internal.h | 51 + src/shims.c | 65 + src/source.c | 1995 +++++++++++++ src/source.h | 583 ++++ src/source_internal.h | 102 + src/source_private.h | 129 + src/time.c | 183 ++ src/time.h | 113 + 69 files changed, 19635 insertions(+) create mode 100644 LICENSE create mode 100644 examples/Dispatch Samples/Dispatch Samples.xcodeproj/project.pbxproj create mode 100644 examples/Dispatch Samples/ReadMe.txt create mode 100644 examples/Dispatch Samples/apply.c create mode 100644 examples/Dispatch Samples/nWide.c create mode 100644 examples/Dispatch Samples/netcat.c create mode 100644 examples/Dispatch Samples/proc.c create mode 100644 examples/Dispatch Samples/readFile.c create mode 100644 examples/Dispatch Samples/readFileF.c create mode 100644 examples/Dispatch Samples/timers.c create mode 100644 examples/DispatchLife/DispatchLife.c create mode 100644 examples/DispatchLife/DispatchLife.xcodeproj/project.pbxproj create mode 100644 examples/DispatchLife/DispatchLifeGLView.h create mode 100644 examples/DispatchLife/DispatchLifeGLView.m create mode 100644 examples/DispatchLife/English.lproj/InfoPlist.strings create mode 100644 examples/DispatchLife/English.lproj/MainMenu.nib/designable.nib create mode 100644 examples/DispatchLife/English.lproj/MainMenu.nib/keyedobjects.nib create mode 100644 examples/DispatchLife/Info.plist create mode 100644 examples/DispatchLife/ReadMe.txt create mode 100644 examples/DispatchLife/main.m create mode 100644 examples/DispatchWebServer/DispatchWebServer.c create mode 100644 examples/DispatchWebServer/DispatchWebServer.xcodeproj/project.pbxproj create mode 100644 examples/DispatchWebServer/ReadMe.txt create mode 100644 libdispatch.xcodeproj/project.pbxproj create mode 100644 man/dispatch.3 create mode 100644 man/dispatch_after.3 create mode 100644 man/dispatch_api.3 create mode 100644 man/dispatch_apply.3 create mode 100644 man/dispatch_async.3 create mode 100644 man/dispatch_benchmark.3 create mode 100644 man/dispatch_group_create.3 create mode 100644 man/dispatch_object.3 create mode 100644 man/dispatch_once.3 create mode 100644 man/dispatch_queue_create.3 create mode 100644 man/dispatch_semaphore_create.3 create mode 100644 man/dispatch_source_create.3 create mode 100644 man/dispatch_time.3 create mode 100644 src/apply.c create mode 100644 src/base.h create mode 100644 src/benchmark.c create mode 100644 src/benchmark.h create mode 100644 src/dispatch.h create mode 100644 src/group.h create mode 100644 src/hw_shims.h create mode 100644 src/internal.h create mode 100644 src/legacy.c create mode 100644 src/legacy.h create mode 100644 src/object.c create mode 100644 src/object.h create mode 100644 src/object_internal.h create mode 100644 src/once.c create mode 100644 src/once.h create mode 100644 src/os_shims.h create mode 100644 src/private.h create mode 100644 src/protocol.defs create mode 100644 src/queue.c create mode 100644 src/queue.h create mode 100644 src/queue_internal.h create mode 100644 src/queue_private.h create mode 100644 src/semaphore.c create mode 100644 src/semaphore.h create mode 100644 src/semaphore_internal.h create mode 100644 src/shims.c create mode 100644 src/source.c create mode 100644 src/source.h create mode 100644 src/source_internal.h create mode 100644 src/source_private.h create mode 100644 src/time.c create mode 100644 src/time.h diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/examples/Dispatch Samples/Dispatch Samples.xcodeproj/project.pbxproj b/examples/Dispatch Samples/Dispatch Samples.xcodeproj/project.pbxproj new file mode 100644 index 000000000..15482f381 --- /dev/null +++ b/examples/Dispatch Samples/Dispatch Samples.xcodeproj/project.pbxproj @@ -0,0 +1,609 @@ +// !$*UTF8*$! +{ + archiveVersion = 1; + classes = { + }; + objectVersion = 45; + objects = { + +/* Begin PBXAggregateTarget section */ + 4C96F87F0F8288070051687B /* Samples */ = { + isa = PBXAggregateTarget; + buildConfigurationList = 4C96F88F0F8288290051687B /* Build configuration list for PBXAggregateTarget "Samples" */; + buildPhases = ( + ); + dependencies = ( + 4C96F88B0F82881B0051687B /* PBXTargetDependency */, + 4C96F8890F8288190051687B /* PBXTargetDependency */, + 4C96F8870F8288170051687B /* PBXTargetDependency */, + 4C96F8850F8288140051687B /* PBXTargetDependency */, + 4C96F8830F82880E0051687B /* PBXTargetDependency */, + ); + name = Samples; + productName = Samples; + }; +/* End PBXAggregateTarget section */ + +/* Begin PBXBuildFile section */ + 4CBAB02C0F780242006D97F1 /* apply.c in Sources */ = {isa = PBXBuildFile; fileRef = 4CBAB02A0F780242006D97F1 /* apply.c */; }; + 4CBAB04C0F7802DA006D97F1 /* netcat.c in Sources */ = {isa = PBXBuildFile; fileRef = 4CBAB04A0F7802DA006D97F1 /* netcat.c */; }; + 4CBAB0530F7802F1006D97F1 /* proc.c in Sources */ = {isa = PBXBuildFile; fileRef = 4CBAB0510F7802F1006D97F1 /* proc.c */; }; + 4CBAB0560F780314006D97F1 /* readFile.c in Sources */ = {isa = PBXBuildFile; fileRef = 4CBAB0540F780314006D97F1 /* readFile.c */; }; + 4CBAB0590F780327006D97F1 /* timers.c in Sources */ = {isa = PBXBuildFile; fileRef = 4CBAB0570F780327006D97F1 /* timers.c */; }; +/* End PBXBuildFile section */ + +/* Begin PBXContainerItemProxy section */ + 4C96F8820F82880E0051687B /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 4CBAB0240F7801C6006D97F1; + remoteInfo = "dispatch-apply"; + }; + 4C96F8840F8288140051687B /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 4CBAB0300F780272006D97F1; + remoteInfo = "dispatch-netcat"; + }; + 4C96F8860F8288170051687B /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 4CBAB0370F78028E006D97F1; + remoteInfo = "dispatch-proc"; + }; + 4C96F8880F8288190051687B /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 4CBAB03E0F7802A6006D97F1; + remoteInfo = "dispatch-readFile"; + }; + 4C96F88A0F82881B0051687B /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 4CBAB0450F7802BA006D97F1; + remoteInfo = "dispatch-timers"; + }; +/* End PBXContainerItemProxy section */ + +/* Begin PBXFileReference section */ + 4CBAB0250F7801C6006D97F1 /* dispatch-apply */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "dispatch-apply"; sourceTree = BUILT_PRODUCTS_DIR; }; + 4CBAB02A0F780242006D97F1 /* apply.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = apply.c; sourceTree = ""; }; + 4CBAB0310F780272006D97F1 /* dispatch-netcat */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "dispatch-netcat"; sourceTree = BUILT_PRODUCTS_DIR; }; + 4CBAB0380F78028E006D97F1 /* dispatch-proc */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "dispatch-proc"; sourceTree = BUILT_PRODUCTS_DIR; }; + 4CBAB03F0F7802A6006D97F1 /* dispatch-readFile */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "dispatch-readFile"; sourceTree = BUILT_PRODUCTS_DIR; }; + 4CBAB0460F7802BA006D97F1 /* dispatch-timers */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "dispatch-timers"; sourceTree = BUILT_PRODUCTS_DIR; }; + 4CBAB04A0F7802DA006D97F1 /* netcat.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = netcat.c; sourceTree = ""; }; + 4CBAB0510F7802F1006D97F1 /* proc.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = proc.c; sourceTree = ""; }; + 4CBAB0540F780314006D97F1 /* readFile.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = readFile.c; sourceTree = ""; }; + 4CBAB0570F780327006D97F1 /* timers.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = timers.c; sourceTree = ""; }; +/* End PBXFileReference section */ + +/* Begin PBXFrameworksBuildPhase section */ + 4CBAB0230F7801C6006D97F1 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 4CBAB02F0F780272006D97F1 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 4CBAB0360F78028E006D97F1 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 4CBAB03D0F7802A6006D97F1 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 4CBAB0440F7802BA006D97F1 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXFrameworksBuildPhase section */ + +/* Begin PBXGroup section */ + 08FB7794FE84155DC02AAC07 /* Dispatch Samples */ = { + isa = PBXGroup; + children = ( + 08FB7795FE84155DC02AAC07 /* Source */, + C6A0FF2B0290797F04C91782 /* Documentation */, + 1AB674ADFE9D54B511CA2CBB /* Products */, + ); + name = "Dispatch Samples"; + sourceTree = ""; + }; + 08FB7795FE84155DC02AAC07 /* Source */ = { + isa = PBXGroup; + children = ( + 4CBAB0570F780327006D97F1 /* timers.c */, + 4CBAB0540F780314006D97F1 /* readFile.c */, + 4CBAB0510F7802F1006D97F1 /* proc.c */, + 4CBAB04A0F7802DA006D97F1 /* netcat.c */, + 4CBAB02A0F780242006D97F1 /* apply.c */, + ); + name = Source; + sourceTree = ""; + }; + 1AB674ADFE9D54B511CA2CBB /* Products */ = { + isa = PBXGroup; + children = ( + 4CBAB0250F7801C6006D97F1 /* dispatch-apply */, + 4CBAB0310F780272006D97F1 /* dispatch-netcat */, + 4CBAB0380F78028E006D97F1 /* dispatch-proc */, + 4CBAB03F0F7802A6006D97F1 /* dispatch-readFile */, + 4CBAB0460F7802BA006D97F1 /* dispatch-timers */, + ); + name = Products; + sourceTree = ""; + }; + C6A0FF2B0290797F04C91782 /* Documentation */ = { + isa = PBXGroup; + children = ( + ); + name = Documentation; + sourceTree = ""; + }; +/* End PBXGroup section */ + +/* Begin PBXNativeTarget section */ + 4CBAB0240F7801C6006D97F1 /* dispatch-apply */ = { + isa = PBXNativeTarget; + buildConfigurationList = 4CBAB0290F7801E5006D97F1 /* Build configuration list for PBXNativeTarget "dispatch-apply" */; + buildPhases = ( + 4CBAB0220F7801C6006D97F1 /* Sources */, + 4CBAB0230F7801C6006D97F1 /* Frameworks */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = "dispatch-apply"; + productName = "dispatch-apply"; + productReference = 4CBAB0250F7801C6006D97F1 /* dispatch-apply */; + productType = "com.apple.product-type.tool"; + }; + 4CBAB0300F780272006D97F1 /* dispatch-netcat */ = { + isa = PBXNativeTarget; + buildConfigurationList = 4CBAB04D0F7802DA006D97F1 /* Build configuration list for PBXNativeTarget "dispatch-netcat" */; + buildPhases = ( + 4CBAB02E0F780272006D97F1 /* Sources */, + 4CBAB02F0F780272006D97F1 /* Frameworks */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = "dispatch-netcat"; + productName = "dispatch-netcat"; + productReference = 4CBAB0310F780272006D97F1 /* dispatch-netcat */; + productType = "com.apple.product-type.tool"; + }; + 4CBAB0370F78028E006D97F1 /* dispatch-proc */ = { + isa = PBXNativeTarget; + buildConfigurationList = 4CBAB04E0F7802DA006D97F1 /* Build configuration list for PBXNativeTarget "dispatch-proc" */; + buildPhases = ( + 4CBAB0350F78028E006D97F1 /* Sources */, + 4CBAB0360F78028E006D97F1 /* Frameworks */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = "dispatch-proc"; + productName = "dispatch-proc"; + productReference = 4CBAB0380F78028E006D97F1 /* dispatch-proc */; + productType = "com.apple.product-type.tool"; + }; + 4CBAB03E0F7802A6006D97F1 /* dispatch-readFile */ = { + isa = PBXNativeTarget; + buildConfigurationList = 4CBAB04F0F7802DA006D97F1 /* Build configuration list for PBXNativeTarget "dispatch-readFile" */; + buildPhases = ( + 4CBAB03C0F7802A6006D97F1 /* Sources */, + 4CBAB03D0F7802A6006D97F1 /* Frameworks */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = "dispatch-readFile"; + productName = "dispatch-readFile"; + productReference = 4CBAB03F0F7802A6006D97F1 /* dispatch-readFile */; + productType = "com.apple.product-type.tool"; + }; + 4CBAB0450F7802BA006D97F1 /* dispatch-timers */ = { + isa = PBXNativeTarget; + buildConfigurationList = 4CBAB0500F7802DA006D97F1 /* Build configuration list for PBXNativeTarget "dispatch-timers" */; + buildPhases = ( + 4CBAB0430F7802BA006D97F1 /* Sources */, + 4CBAB0440F7802BA006D97F1 /* Frameworks */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = "dispatch-timers"; + productName = "dispatch-timers"; + productReference = 4CBAB0460F7802BA006D97F1 /* dispatch-timers */; + productType = "com.apple.product-type.tool"; + }; +/* End PBXNativeTarget section */ + +/* Begin PBXProject section */ + 08FB7793FE84155DC02AAC07 /* Project object */ = { + isa = PBXProject; + buildConfigurationList = 1DEB928908733DD80010E9CD /* Build configuration list for PBXProject "Dispatch Samples" */; + compatibilityVersion = "Xcode 3.1"; + hasScannedForEncodings = 1; + mainGroup = 08FB7794FE84155DC02AAC07 /* Dispatch Samples */; + projectDirPath = ""; + projectRoot = ""; + targets = ( + 4C96F87F0F8288070051687B /* Samples */, + 4CBAB0240F7801C6006D97F1 /* dispatch-apply */, + 4CBAB0300F780272006D97F1 /* dispatch-netcat */, + 4CBAB0370F78028E006D97F1 /* dispatch-proc */, + 4CBAB03E0F7802A6006D97F1 /* dispatch-readFile */, + 4CBAB0450F7802BA006D97F1 /* dispatch-timers */, + ); + }; +/* End PBXProject section */ + +/* Begin PBXSourcesBuildPhase section */ + 4CBAB0220F7801C6006D97F1 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 4CBAB02C0F780242006D97F1 /* apply.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 4CBAB02E0F780272006D97F1 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 4CBAB04C0F7802DA006D97F1 /* netcat.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 4CBAB0350F78028E006D97F1 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 4CBAB0530F7802F1006D97F1 /* proc.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 4CBAB03C0F7802A6006D97F1 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 4CBAB0560F780314006D97F1 /* readFile.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 4CBAB0430F7802BA006D97F1 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 4CBAB0590F780327006D97F1 /* timers.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXSourcesBuildPhase section */ + +/* Begin PBXTargetDependency section */ + 4C96F8830F82880E0051687B /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 4CBAB0240F7801C6006D97F1 /* dispatch-apply */; + targetProxy = 4C96F8820F82880E0051687B /* PBXContainerItemProxy */; + }; + 4C96F8850F8288140051687B /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 4CBAB0300F780272006D97F1 /* dispatch-netcat */; + targetProxy = 4C96F8840F8288140051687B /* PBXContainerItemProxy */; + }; + 4C96F8870F8288170051687B /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 4CBAB0370F78028E006D97F1 /* dispatch-proc */; + targetProxy = 4C96F8860F8288170051687B /* PBXContainerItemProxy */; + }; + 4C96F8890F8288190051687B /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 4CBAB03E0F7802A6006D97F1 /* dispatch-readFile */; + targetProxy = 4C96F8880F8288190051687B /* PBXContainerItemProxy */; + }; + 4C96F88B0F82881B0051687B /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 4CBAB0450F7802BA006D97F1 /* dispatch-timers */; + targetProxy = 4C96F88A0F82881B0051687B /* PBXContainerItemProxy */; + }; +/* End PBXTargetDependency section */ + +/* Begin XCBuildConfiguration section */ + 1DEB928A08733DD80010E9CD /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ARCHS = "$(ARCHS_STANDARD_32_64_BIT)"; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_WARN_ABOUT_RETURN_TYPE = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + ONLY_ACTIVE_ARCH = YES; + PREBINDING = NO; + SDKROOT = macosx10.6; + }; + name = Debug; + }; + 1DEB928B08733DD80010E9CD /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ARCHS = "$(ARCHS_STANDARD_32_64_BIT)"; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_WARN_ABOUT_RETURN_TYPE = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + PREBINDING = NO; + SDKROOT = macosx10.6; + }; + name = Release; + }; + 4C96F8800F8288080051687B /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + COPY_PHASE_STRIP = NO; + GCC_DYNAMIC_NO_PIC = NO; + GCC_OPTIMIZATION_LEVEL = 0; + PRODUCT_NAME = Samples; + }; + name = Debug; + }; + 4C96F8810F8288080051687B /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + COPY_PHASE_STRIP = YES; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + GCC_ENABLE_FIX_AND_CONTINUE = NO; + PRODUCT_NAME = Samples; + ZERO_LINK = NO; + }; + name = Release; + }; + 4CBAB0270F7801C7006D97F1 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + COPY_PHASE_STRIP = NO; + GCC_DYNAMIC_NO_PIC = NO; + GCC_ENABLE_FIX_AND_CONTINUE = YES; + GCC_MODEL_TUNING = G5; + GCC_OPTIMIZATION_LEVEL = 0; + INSTALL_PATH = /usr/local/bin; + PREBINDING = NO; + PRODUCT_NAME = "dispatch-apply"; + }; + name = Debug; + }; + 4CBAB0280F7801C7006D97F1 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + COPY_PHASE_STRIP = YES; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + GCC_ENABLE_FIX_AND_CONTINUE = NO; + GCC_MODEL_TUNING = G5; + INSTALL_PATH = /usr/local/bin; + PREBINDING = NO; + PRODUCT_NAME = "dispatch-apply"; + ZERO_LINK = NO; + }; + name = Release; + }; + 4CBAB0330F780273006D97F1 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + COPY_PHASE_STRIP = NO; + GCC_DYNAMIC_NO_PIC = NO; + GCC_ENABLE_FIX_AND_CONTINUE = YES; + GCC_MODEL_TUNING = G5; + GCC_OPTIMIZATION_LEVEL = 0; + INSTALL_PATH = /usr/local/bin; + PREBINDING = NO; + PRODUCT_NAME = "dispatch-netcat"; + }; + name = Debug; + }; + 4CBAB0340F780273006D97F1 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + COPY_PHASE_STRIP = YES; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + GCC_ENABLE_FIX_AND_CONTINUE = NO; + GCC_MODEL_TUNING = G5; + INSTALL_PATH = /usr/local/bin; + PREBINDING = NO; + PRODUCT_NAME = "dispatch-netcat"; + ZERO_LINK = NO; + }; + name = Release; + }; + 4CBAB03A0F78028F006D97F1 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + COPY_PHASE_STRIP = NO; + GCC_DYNAMIC_NO_PIC = NO; + GCC_ENABLE_FIX_AND_CONTINUE = YES; + GCC_MODEL_TUNING = G5; + GCC_OPTIMIZATION_LEVEL = 0; + INSTALL_PATH = /usr/local/bin; + PREBINDING = NO; + PRODUCT_NAME = "dispatch-proc"; + }; + name = Debug; + }; + 4CBAB03B0F78028F006D97F1 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + COPY_PHASE_STRIP = YES; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + GCC_ENABLE_FIX_AND_CONTINUE = NO; + GCC_MODEL_TUNING = G5; + INSTALL_PATH = /usr/local/bin; + PREBINDING = NO; + PRODUCT_NAME = "dispatch-proc"; + ZERO_LINK = NO; + }; + name = Release; + }; + 4CBAB0410F7802A7006D97F1 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + COPY_PHASE_STRIP = NO; + GCC_DYNAMIC_NO_PIC = NO; + GCC_ENABLE_FIX_AND_CONTINUE = YES; + GCC_MODEL_TUNING = G5; + GCC_OPTIMIZATION_LEVEL = 0; + INSTALL_PATH = /usr/local/bin; + PREBINDING = NO; + PRODUCT_NAME = "dispatch-readFile"; + }; + name = Debug; + }; + 4CBAB0420F7802A7006D97F1 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + COPY_PHASE_STRIP = YES; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + GCC_ENABLE_FIX_AND_CONTINUE = NO; + GCC_MODEL_TUNING = G5; + INSTALL_PATH = /usr/local/bin; + PREBINDING = NO; + PRODUCT_NAME = "dispatch-readFile"; + ZERO_LINK = NO; + }; + name = Release; + }; + 4CBAB0480F7802BB006D97F1 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + COPY_PHASE_STRIP = NO; + GCC_DYNAMIC_NO_PIC = NO; + GCC_ENABLE_FIX_AND_CONTINUE = YES; + GCC_MODEL_TUNING = G5; + GCC_OPTIMIZATION_LEVEL = 0; + INSTALL_PATH = /usr/local/bin; + PREBINDING = NO; + PRODUCT_NAME = "dispatch-timers"; + }; + name = Debug; + }; + 4CBAB0490F7802BB006D97F1 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + COPY_PHASE_STRIP = YES; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + GCC_ENABLE_FIX_AND_CONTINUE = NO; + GCC_MODEL_TUNING = G5; + INSTALL_PATH = /usr/local/bin; + PREBINDING = NO; + PRODUCT_NAME = "dispatch-timers"; + ZERO_LINK = NO; + }; + name = Release; + }; +/* End XCBuildConfiguration section */ + +/* Begin XCConfigurationList section */ + 1DEB928908733DD80010E9CD /* Build configuration list for PBXProject "Dispatch Samples" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 1DEB928A08733DD80010E9CD /* Debug */, + 1DEB928B08733DD80010E9CD /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 4C96F88F0F8288290051687B /* Build configuration list for PBXAggregateTarget "Samples" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 4C96F8800F8288080051687B /* Debug */, + 4C96F8810F8288080051687B /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 4CBAB0290F7801E5006D97F1 /* Build configuration list for PBXNativeTarget "dispatch-apply" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 4CBAB0270F7801C7006D97F1 /* Debug */, + 4CBAB0280F7801C7006D97F1 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 4CBAB04D0F7802DA006D97F1 /* Build configuration list for PBXNativeTarget "dispatch-netcat" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 4CBAB0330F780273006D97F1 /* Debug */, + 4CBAB0340F780273006D97F1 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 4CBAB04E0F7802DA006D97F1 /* Build configuration list for PBXNativeTarget "dispatch-proc" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 4CBAB03A0F78028F006D97F1 /* Debug */, + 4CBAB03B0F78028F006D97F1 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 4CBAB04F0F7802DA006D97F1 /* Build configuration list for PBXNativeTarget "dispatch-readFile" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 4CBAB0410F7802A7006D97F1 /* Debug */, + 4CBAB0420F7802A7006D97F1 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 4CBAB0500F7802DA006D97F1 /* Build configuration list for PBXNativeTarget "dispatch-timers" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 4CBAB0480F7802BB006D97F1 /* Debug */, + 4CBAB0490F7802BB006D97F1 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; +/* End XCConfigurationList section */ + }; + rootObject = 08FB7793FE84155DC02AAC07 /* Project object */; +} diff --git a/examples/Dispatch Samples/ReadMe.txt b/examples/Dispatch Samples/ReadMe.txt new file mode 100644 index 000000000..3214db16d --- /dev/null +++ b/examples/Dispatch Samples/ReadMe.txt @@ -0,0 +1,93 @@ +### DispatchProcessMonitor ### + +=========================================================================== +DESCRIPTION: + +Sample code showing how to: monitor process, do file and network I/O, +create and manage timers, and use dispatch_apply + +=========================================================================== +BUILD REQUIREMENTS: + +Mac OS X version 10.6 Snow Leopard + +=========================================================================== +RUNTIME REQUIREMENTS: + +Mac OS X version 10.6 Snow Leopard + +=========================================================================== +PACKAGING LIST: + +apply.c - dispatch_apply examples +netcat.c - network I/O examples +nWide.c - use of dispatch_semaphore to limit number of in-flight blocks +proc.c - process monitoring example +readFile.c - file I/O examples +readFileF.c - file I/O examples without Blocks +timers.c - create and manage timers + +=========================================================================== +SAMPLE USAGE: + +dispatch-apply + +dispatch-apply takes no arguments. When run it will display some status +messages and timing information. + +dispatch-netcat + +Open two terminal windows. In one window run the "server": + +cat ReadMe.txt | dispatch-netcat -l localhost 5050 + +In the other run the "client": + +dispatch-netcat localhost 5050 + +Your server will send the contents of ReadMe.txt to the client, the server +will close it's connection and exit. The client will display whatever +the server sent (the ReadMe.txt file). See the main function in netcat.c +for more options. + +dispatch-nWide + +dispatch-nWide takes no arguments. When run it will display explanatory +text. + +dispatch-proc + +dispatch-proc takes no arguments. When run it will display output from +some processes it runs, and it will display information from the +process lifecycle events dispatch generates. + +dispatch-readFile + +Run dispatch-readFile with a filename as an argument: + +dispatch-readFile ReadMe.txt + +It will read the file 10 (or fewer) bytes at a time and display how many +bytes dispatch thinks are remaining to read. + +dispatch-readFileF + +Exactly the same as dispatch-readFile, but written without the use of Blocks. + +dispatch-timers + +dispatch-timers takes no arguments, running it display timer ticks for +a timer with an initial interval of one second, changing to one half second +after the first three events. It will exit after six events. + +=========================================================================== +CHANGES FROM PREVIOUS VERSIONS: + +Version 1.1 +- Updated to current libdispatch API, and added samples readFileF.c and +nWide.c +Version 1.0 +- First version + +=========================================================================== +Copyright (C) 2009 Apple Inc. All rights reserved. diff --git a/examples/Dispatch Samples/apply.c b/examples/Dispatch Samples/apply.c new file mode 100644 index 000000000..3eb39a586 --- /dev/null +++ b/examples/Dispatch Samples/apply.c @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2008 Apple Inc. All rights reserved. + * + * @APPLE_DTS_LICENSE_HEADER_START@ + * + * IMPORTANT: This Apple software is supplied to you by Apple Computer, Inc. + * ("Apple") in consideration of your agreement to the following terms, and your + * use, installation, modification or redistribution of this Apple software + * constitutes acceptance of these terms. If you do not agree with these terms, + * please do not use, install, modify or redistribute this Apple software. + * + * In consideration of your agreement to abide by the following terms, and + * subject to these terms, Apple grants you a personal, non-exclusive license, + * under Apple's copyrights in this original Apple software (the "Apple Software"), + * to use, reproduce, modify and redistribute the Apple Software, with or without + * modifications, in source and/or binary forms; provided that if you redistribute + * the Apple Software in its entirety and without modifications, you must retain + * this notice and the following text and disclaimers in all such redistributions + * of the Apple Software. Neither the name, trademarks, service marks or logos of + * Apple Computer, Inc. may be used to endorse or promote products derived from + * the Apple Software without specific prior written permission from Apple. Except + * as expressly stated in this notice, no other rights or licenses, express or + * implied, are granted by Apple herein, including but not limited to any patent + * rights that may be infringed by your derivative works or by other works in + * which the Apple Software may be incorporated. + * + * The Apple Software is provided by Apple on an "AS IS" basis. APPLE MAKES NO + * WARRANTIES, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED + * WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND OPERATION ALONE OR IN + * COMBINATION WITH YOUR PRODUCTS. + * + * IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR + * DISTRIBUTION OF THE APPLE SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF + * CONTRACT, TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF + * APPLE HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * @APPLE_DTS_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include +#include +#include + +#define kIT 10 + +uint64_t elapsed_time; + +void timer_start() { + elapsed_time = mach_absolute_time(); +} + +double timer_milePost() { + static dispatch_once_t justOnce; + static double scale; + + dispatch_once(&justOnce, ^{ + mach_timebase_info_data_t tbi; + mach_timebase_info(&tbi); + scale = tbi.numer; + scale = scale/tbi.denom; + printf("Scale is %10.4f Just computed once courtesy of dispatch_once()\n", scale); + }); + + uint64_t now = mach_absolute_time()-elapsed_time; + double fTotalT = now; + fTotalT = fTotalT * scale; // convert this to nanoseconds... + fTotalT = fTotalT / 1000000000.0; + return fTotalT; +} + +int +main(void) +{ + dispatch_queue_t myQueue = dispatch_queue_create("myQueue", NULL); + dispatch_group_t myGroup = dispatch_group_create(); + +// dispatch_apply on a serial queue finishes each block in order so the following code will take a little more than a second + timer_start(); + dispatch_apply(kIT, myQueue, ^(size_t current){ + printf("Block #%ld of %d is being run\n", + current+1, // adjusting the zero based current iteration we get passed in + kIT); + usleep(USEC_PER_SEC/10); + }); + printf("and dispatch_apply( serial queue ) returned after %10.4lf seconds\n",timer_milePost()); + +// dispatch_apply on a concurrent queue returns after all blocks are finished, however it can execute them concurrently with each other +// so this will take quite a bit less time + timer_start(); + dispatch_apply(kIT, dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^(size_t current){ + printf("Block #%ld of %d is being run\n",current+1, kIT); + usleep(USEC_PER_SEC/10); + }); + printf("and dispatch_apply( concurrent queue) returned after %10.4lf seconds\n",timer_milePost()); + +// To execute all blocks in a dispatch_apply asynchonously, you will need to perform the dispatch_apply +// asynchonously, like this (NOTE the nested dispatch_apply inside of the async block.) +// Also note the use of the dispatch_group so that we can ultimatly know when the work is +// all completed + + timer_start(); + dispatch_group_async(myGroup, dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{ + dispatch_apply(kIT, myQueue, ^(size_t current){ + printf("Block #%ld of %d is being run\n",current+1, kIT); + usleep(USEC_PER_SEC/10); + }); + }); + + printf("and dispatch_group_async( dispatch_apply( )) returned after %10.4lf seconds\n",timer_milePost()); + printf("Now to wait for the dispatch group to finish...\n"); + dispatch_group_wait(myGroup, UINT64_MAX); + printf("and we are done with dispatch_group_async( dispatch_apply( )) after %10.4lf seconds\n",timer_milePost()); + return 0; +} + diff --git a/examples/Dispatch Samples/nWide.c b/examples/Dispatch Samples/nWide.c new file mode 100644 index 000000000..92914a922 --- /dev/null +++ b/examples/Dispatch Samples/nWide.c @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2009 Apple Inc. All rights reserved. + * + * @APPLE_DTS_LICENSE_HEADER_START@ + * + * IMPORTANT: This Apple software is supplied to you by Apple Computer, Inc. + * ("Apple") in consideration of your agreement to the following terms, and your + * use, installation, modification or redistribution of this Apple software + * constitutes acceptance of these terms. If you do not agree with these terms, + * please do not use, install, modify or redistribute this Apple software. + * + * In consideration of your agreement to abide by the following terms, and + * subject to these terms, Apple grants you a personal, non-exclusive license, + * under Apple's copyrights in this original Apple software (the "Apple Software"), + * to use, reproduce, modify and redistribute the Apple Software, with or without + * modifications, in source and/or binary forms; provided that if you redistribute + * the Apple Software in its entirety and without modifications, you must retain + * this notice and the following text and disclaimers in all such redistributions + * of the Apple Software. Neither the name, trademarks, service marks or logos of + * Apple Computer, Inc. may be used to endorse or promote products derived from + * the Apple Software without specific prior written permission from Apple. Except + * as expressly stated in this notice, no other rights or licenses, express or + * implied, are granted by Apple herein, including but not limited to any patent + * rights that may be infringed by your derivative works or by other works in + * which the Apple Software may be incorporated. + * + * The Apple Software is provided by Apple on an "AS IS" basis. APPLE MAKES NO + * WARRANTIES, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED + * WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND OPERATION ALONE OR IN + * COMBINATION WITH YOUR PRODUCTS. + * + * IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR + * DISTRIBUTION OF THE APPLE SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF + * CONTRACT, TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF + * APPLE HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * @APPLE_DTS_LICENSE_HEADER_END@ + */ + +/* + * nWide.c + * Samples project + * + * Created by Mensch on 5/1/09. + * Copyright 2009 Apple, Inc. All rights reserved. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#import +#include + + +/* + * Demonstrate using dispatch_semaphore to create a concurrent queue that + * allows only a fixed number of blocks to be in flight at any given time + */ + +int main (int argc, const char * argv[]) { + dispatch_group_t mg = dispatch_group_create(); + dispatch_semaphore_t ds; + __block int numRunning = 0; + int qWidth = 5; + int numWorkBlocks = 100; + + if (argc >= 2) { + qWidth = atoi(argv[1]); // use the command 1st line parameter as the queue width + if (qWidth==0) qWidth==1; // protect against bad values + } + + if (argc >=3) { + numWorkBlocks = atoi(argv[2]); // use the 2nd command line parameter as the queue width + if (numWorkBlocks==0) numWorkBlocks==1; // protect against bad values + } + + printf("Starting dispatch semaphore test to simulate a %d wide dispatch queue\n", qWidth ); + ds = dispatch_semaphore_create(qWidth); + + int i; + for (i=0; i +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// #define DEBUG 1 + +#if DEBUG +#define dlog(a) dispatch_debug(a, #a) +#else +#define dlog(a) do { } while(0) +#endif + +void usage(void); +void *run_block(void *); +void setup_fd_relay(int netfd /* bidirectional */, + int infd /* local input */, + int outfd /* local output */, + void (^finalizer_block)(void)); +void doreadwrite(int fd1, int fd2, char *buffer, size_t len); + +#define BUFFER_SIZE 1099 + +int main(int argc, char *argv[]) { + + int ch; + bool use_v4_only = false, use_v6_only = false; + bool debug = false, no_stdin = false; + bool keep_listening = false, do_listen = false; + bool do_loookups = true, verbose = false; + bool do_udp = false, do_bind_ip = false, do_bind_port = false; + const char *hostname, *servname; + int ret; + struct addrinfo hints, *aires, *aires0; + const char *bind_hostname, *bind_servname; + + dispatch_queue_t dq; + dispatch_group_t listen_group = NULL; + + while ((ch = getopt(argc, argv, "46Ddhklnvup:s:")) != -1) { + switch (ch) { + case '4': + use_v4_only = true; + break; + case '6': + use_v6_only = true; + break; + case 'D': + debug = true; + break; + case 'd': + no_stdin = true; + break; + case 'h': + usage(); + break; + case 'k': + keep_listening = true; + break; + case 'l': + do_listen = true; + break; + case 'n': + do_loookups = false; + break; + case 'v': + verbose = true; + break; + case 'u': + do_udp = true; + break; + case 'p': + do_bind_port = true; + bind_servname = optarg; + break; + case 's': + do_bind_ip = true; + bind_hostname = optarg; + break; + case '?': + default: + usage(); + break; + } + } + + argc -= optind; + argv += optind; + + if (use_v4_only && use_v6_only) { + errx(EX_USAGE, "-4 and -6 specified"); + } + + if (keep_listening && !do_listen) { + errx(EX_USAGE, "-k specified but no -l"); + } + + if (do_listen && (do_bind_ip || do_bind_port)) { + errx(EX_USAGE, "-p or -s option with -l"); + } + + if (do_listen) { + if (argc >= 2) { + hostname = argv[0]; + servname = argv[1]; + } else if (argc >= 1) { + hostname = NULL; + servname = argv[0]; + } else { + errx(EX_USAGE, "No service name provided"); + } + } else { + if (argc >= 2) { + hostname = argv[0]; + servname = argv[1]; + } else { + errx(EX_USAGE, "No hostname and service name provided"); + } + } + + if (do_bind_ip || do_bind_port) { + if (!do_bind_ip) { + bind_hostname = NULL; + } + if (!do_bind_port) { + bind_servname = NULL; + } + } + + openlog(getprogname(), LOG_PERROR|LOG_CONS, LOG_DAEMON); + setlogmask(debug ? LOG_UPTO(LOG_DEBUG) : verbose ? LOG_UPTO(LOG_INFO) : LOG_UPTO(LOG_ERR)); + + dq = dispatch_queue_create("netcat", NULL); + listen_group = dispatch_group_create(); + + bzero(&hints, sizeof(hints)); + hints.ai_family = use_v4_only ? PF_INET : (use_v6_only ? PF_INET6 : PF_UNSPEC); + hints.ai_socktype = do_udp ? SOCK_DGRAM : SOCK_STREAM; + hints.ai_protocol = do_udp ? IPPROTO_UDP : IPPROTO_TCP; + hints.ai_flags = (!do_loookups ? AI_NUMERICHOST | AI_NUMERICSERV : 0) | (do_listen ? AI_PASSIVE : 0); + + ret = getaddrinfo(hostname, servname, &hints, &aires0); + if (ret) { + errx(1, "getaddrinfo(%s, %s): %s", hostname, servname, gai_strerror(ret)); + } + + for (aires = aires0; aires; aires = aires->ai_next) { + if (do_listen) { + // asynchronously set up the socket + dispatch_retain(dq); + dispatch_group_async(listen_group, + dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), + ^{ + int s, val = 1; + dispatch_source_t ds; + + s = socket(aires->ai_family, aires->ai_socktype, aires->ai_protocol); + if (s < 0) { + warn("socket"); + return; + } + + if(setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (const char *)&val, sizeof(val)) < 0) { + warn("Could not set SO_REUSEADDR"); + } + + if(setsockopt(s, SOL_SOCKET, SO_REUSEPORT, (const char *)&val, sizeof(val)) < 0) { + warn("Could not set SO_REUSEPORT"); + } + + if(setsockopt(s, SOL_SOCKET, SO_NOSIGPIPE, &val, sizeof(val)) < 0) { + warn("Could not set SO_NOSIGPIPE"); + } + + if (bind(s, aires->ai_addr, aires->ai_addrlen) < 0) { + warn("bind"); + close(s); + return; + } + + listen(s, 2); + syslog(LOG_DEBUG, "listening on socket %d", s); + ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, s, 0, dq); + dispatch_source_set_event_handler(ds, ^{ + // got an incoming connection + int s2, lfd = dispatch_source_get_handle(ds); + dispatch_queue_t listen_queue = dispatch_get_current_queue(); + + // prevent further accept(2)s across multiple sources + dispatch_retain(listen_queue); + dispatch_suspend(listen_queue); + + if (do_udp) { + // lfd is our socket, but let's connect in the reverse + // direction to set up the connection fully + char udpbuf[4]; + struct sockaddr_storage sockin; + socklen_t socklen; + ssize_t peeklen; + int cret; + + socklen = sizeof(sockin); + peeklen = recvfrom(lfd, udpbuf, sizeof(udpbuf), + MSG_PEEK, (struct sockaddr *)&sockin, &socklen); + if (peeklen < 0) { + warn("recvfrom"); + dispatch_resume(listen_queue); + dispatch_release(listen_queue); + return; + } + + cret = connect(lfd, (struct sockaddr *)&sockin, socklen); + if (cret < 0) { + warn("connect"); + dispatch_resume(listen_queue); + dispatch_release(listen_queue); + return; + } + + s2 = lfd; + syslog(LOG_DEBUG, "accepted socket %d", s2); + } else { + s2 = accept(lfd, NULL, NULL); + if (s2 < 0) { + warn("accept"); + dispatch_resume(listen_queue); + dispatch_release(listen_queue); + return; + } + syslog(LOG_DEBUG, "accepted socket %d -> %d", lfd, s2); + } + + + setup_fd_relay(s2, no_stdin ? -1 : STDIN_FILENO, STDOUT_FILENO, ^{ + if (!do_udp) { + close(s2); + } + dispatch_resume(listen_queue); + dispatch_release(listen_queue); + if (!keep_listening) { + exit(0); + } + }); + }); + dispatch_resume(ds); + dispatch_release(dq); + }); + } else { + // synchronously try each address to try to connect + __block bool did_connect = false; + + dispatch_sync(dq, ^{ + int s, val = 1; + + s = socket(aires->ai_family, aires->ai_socktype, aires->ai_protocol); + if (s < 0) { + warn("socket"); + return; + } + + if(setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (const char *)&val, sizeof(val)) < 0) { + warn("Could not set SO_REUSEADDR"); + } + + if(setsockopt(s, SOL_SOCKET, SO_REUSEPORT, (const char *)&val, sizeof(val)) < 0) { + warn("Could not set SO_REUSEPORT"); + } + + if(setsockopt(s, SOL_SOCKET, SO_NOSIGPIPE, &val, sizeof(val)) < 0) { + warn("Could not set SO_NOSIGPIPE"); + } + + if (do_bind_port || do_bind_ip) { + struct addrinfo bhints, *bind_aires; + int bret; + in_port_t bport; + + bzero(&bhints, sizeof(bhints)); + bhints.ai_family = aires->ai_family; + bhints.ai_socktype = aires->ai_socktype; + bhints.ai_protocol = aires->ai_protocol; + bhints.ai_flags = (do_bind_ip ? AI_NUMERICHOST : 0) | (do_bind_port ? AI_NUMERICSERV : 0) | AI_PASSIVE; + + bret = getaddrinfo(bind_hostname, bind_servname, &bhints, &bind_aires); + if (bret) { + warnx("getaddrinfo(%s, %s): %s", bind_hostname, bind_servname, gai_strerror(bret)); + close(s); + freeaddrinfo(bind_aires); + return; + } + + switch(bind_aires->ai_family) { + case PF_INET: + bport = ((struct sockaddr_in *)bind_aires->ai_addr)->sin_port; + break; + case PF_INET6: + bport = ((struct sockaddr_in6 *)bind_aires->ai_addr)->sin6_port; + break; + default: + bport = htons(0); + break; + } + + if (ntohs(bport) > 0 && ntohs(bport) < IPPORT_RESERVED) { + bret = bindresvport_sa(s, (struct sockaddr *)bind_aires->ai_addr); + } else { + bret = bind(s, bind_aires->ai_addr, bind_aires->ai_addrlen); + } + + if (bret < 0) { + warn("bind"); + close(s); + freeaddrinfo(bind_aires); + return; + } + + freeaddrinfo(bind_aires); + } + + if (connect(s, aires->ai_addr, aires->ai_addrlen) < 0) { + syslog(LOG_INFO, "connect to %s port %s (%s) failed: %s", + hostname, + servname, + aires->ai_protocol == IPPROTO_TCP ? "tcp" : aires->ai_protocol == IPPROTO_UDP ? "udp" : "unknown", + strerror(errno)); + close(s); + return; + } + + syslog(LOG_INFO, "Connection to %s %s port [%s] succeeded!", + hostname, + servname, + aires->ai_protocol == IPPROTO_TCP ? "tcp" : aires->ai_protocol == IPPROTO_UDP ? "udp" : "unknown"); + did_connect = true; + + if (do_udp) { + // netcat sends a few bytes to set up the connection + doreadwrite(-1, s, "XXXX", 4); + } + + setup_fd_relay(s, no_stdin ? -1 : STDIN_FILENO, STDOUT_FILENO, ^{ + close(s); + exit(0); + }); + }); + + if (did_connect) { + break; + } + } + } + + dispatch_group_wait(listen_group, DISPATCH_TIME_FOREVER); + freeaddrinfo(aires0); + + if (!do_listen && aires == NULL) { + // got to the end of the address list without connecting + exit(1); + } + + dispatch_main(); + + return 0; +} + +void usage(void) +{ + fprintf(stderr, "Usage: %s [-4] [-6] [-D] [-d] [-h] [-k] [-l] [-n] [-v]\n", getprogname()); + fprintf(stderr, " \t[-u] [-p ] [-s ]\n"); + exit(EX_USAGE); +} + +void *run_block(void *arg) +{ + void (^b)(void) = (void (^)(void))arg; + + b(); + + _Block_release(arg); + + return NULL; +} + +/* + * Read up-to as much as is requested, and write + * that to the other fd, taking into account exceptional + * conditions and re-trying + */ +void doreadwrite(int fd1, int fd2, char *buffer, size_t len) { + ssize_t readBytes, writeBytes, totalWriteBytes; + + if (fd1 != -1) { + syslog(LOG_DEBUG, "trying to read %ld bytes from fd %d", len, fd1); + readBytes = read(fd1, buffer, len); + if (readBytes < 0) { + if (errno == EINTR || errno == EAGAIN) { + /* can't do anything now, hope we get called again */ + syslog(LOG_DEBUG, "error read fd %d: %s (%d)", fd1, strerror(errno), errno); + return; + } else { + err(1, "read fd %d", fd1); + } + } else if (readBytes == 0) { + syslog(LOG_DEBUG, "EOF on fd %d", fd1); + return; + } + syslog(LOG_DEBUG, "read %ld bytes from fd %d", readBytes, fd1); + } else { + readBytes = len; + syslog(LOG_DEBUG, "read buffer has %ld bytes", readBytes); + } + + totalWriteBytes = 0; + do { + writeBytes = write(fd2, buffer+totalWriteBytes, readBytes-totalWriteBytes); + if (writeBytes < 0) { + if (errno == EINTR || errno == EAGAIN) { + continue; + } else { + err(1, "write fd %d", fd2); + } + } + syslog(LOG_DEBUG, "wrote %ld bytes to fd %d", writeBytes, fd2); + totalWriteBytes += writeBytes; + + } while (totalWriteBytes < readBytes); + + return; +} + +/* + * We set up dispatch sources for netfd and infd. + * Since only one callback is called at a time per-source, + * we don't need any additional serialization, and the network + * and infd could be read from at the same time. + */ +void setup_fd_relay(int netfd /* bidirectional */, + int infd /* local input */, + int outfd /* local output */, + void (^finalizer_block)(void)) +{ + dispatch_source_t netsource = NULL, insource = NULL; + + dispatch_queue_t teardown_queue = dispatch_queue_create("teardown_queue", NULL); + + void (^finalizer_block_copy)(void) = _Block_copy(finalizer_block); // release after calling + void (^cancel_hander)(dispatch_source_t source) = ^(dispatch_source_t source){ + dlog(source); + dlog(teardown_queue); + + /* + * allowing the teardown queue to become runnable will get + * the teardown block scheduled, which will cancel all other + * sources and call the client-supplied finalizer + */ + dispatch_resume(teardown_queue); + dispatch_release(teardown_queue); + }; + void (^event_handler)(dispatch_source_t source, int wfd) = ^(dispatch_source_t source, int wfd) { + int rfd = dispatch_source_get_handle(source); + size_t bytesAvail = dispatch_source_get_data(source); + char *buffer; + + syslog(LOG_DEBUG, "dispatch source %d -> %d has %lu bytes available", + rfd, wfd, bytesAvail); + if (bytesAvail == 0) { + dlog(source); + dispatch_source_cancel(source); + return; + } + buffer = malloc(BUFFER_SIZE); + doreadwrite(rfd,wfd, buffer, MIN(BUFFER_SIZE, bytesAvail+2)); + free(buffer); + }; + + /* + * Suspend this now twice so that neither source can accidentally resume it + * while we're still setting up the teardown block. When either source + * gets an EOF, the queue is resumed so that it can teardown the other source + * and call the client-supplied finalizer + */ + dispatch_suspend(teardown_queue); + dispatch_suspend(teardown_queue); + + if (infd != -1) { + dispatch_retain(teardown_queue); // retain so that we can resume in this block later + + dlog(teardown_queue); + + // since the event handler serializes, put this on a concurrent queue + insource = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, infd, 0, dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0)); + dispatch_source_set_event_handler(insource, ^{ event_handler(insource, netfd); }); + dispatch_source_set_cancel_handler(insource, ^{ cancel_hander(insource); }); + dispatch_resume(insource); + dlog(insource); + } + + dispatch_retain(teardown_queue); // retain so that we can resume in this block later + + dlog(teardown_queue); + + // since the event handler serializes, put this on a concurrent queue + netsource = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, netfd, 0, dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0)); + dispatch_source_set_event_handler(netsource, ^{ event_handler(netsource, outfd); }); + dispatch_source_set_cancel_handler(netsource, ^{ cancel_hander(netsource); }); + dispatch_resume(netsource); + dlog(netsource); + + dispatch_async(teardown_queue, ^{ + syslog(LOG_DEBUG, "Closing connection on fd %d -> %d -> %d", infd, netfd, outfd); + + if (insource) { + dlog(insource); + dispatch_source_cancel(insource); + dispatch_release(insource); // matches initial create + dlog(insource); + } + + dlog(netsource); + dispatch_source_cancel(netsource); + dispatch_release(netsource); // matches initial create + dlog(netsource); + + dlog(teardown_queue); + + finalizer_block_copy(); + _Block_release(finalizer_block_copy); + }); + + /* Resume this once so their either source can do the second resume + * to start the teardown block running + */ + dispatch_resume(teardown_queue); + dispatch_release(teardown_queue); // matches initial create + dlog(teardown_queue); +} + diff --git a/examples/Dispatch Samples/proc.c b/examples/Dispatch Samples/proc.c new file mode 100644 index 000000000..511b42f03 --- /dev/null +++ b/examples/Dispatch Samples/proc.c @@ -0,0 +1,209 @@ +/* + * Copyright (c) 2008 Apple Inc. All rights reserved. + * + * @APPLE_DTS_LICENSE_HEADER_START@ + * + * IMPORTANT: This Apple software is supplied to you by Apple Computer, Inc. + * ("Apple") in consideration of your agreement to the following terms, and your + * use, installation, modification or redistribution of this Apple software + * constitutes acceptance of these terms. If you do not agree with these terms, + * please do not use, install, modify or redistribute this Apple software. + * + * In consideration of your agreement to abide by the following terms, and + * subject to these terms, Apple grants you a personal, non-exclusive license, + * under Apple's copyrights in this original Apple software (the "Apple Software"), + * to use, reproduce, modify and redistribute the Apple Software, with or without + * modifications, in source and/or binary forms; provided that if you redistribute + * the Apple Software in its entirety and without modifications, you must retain + * this notice and the following text and disclaimers in all such redistributions + * of the Apple Software. Neither the name, trademarks, service marks or logos of + * Apple Computer, Inc. may be used to endorse or promote products derived from + * the Apple Software without specific prior written permission from Apple. Except + * as expressly stated in this notice, no other rights or licenses, express or + * implied, are granted by Apple herein, including but not limited to any patent + * rights that may be infringed by your derivative works or by other works in + * which the Apple Software may be incorporated. + * + * The Apple Software is provided by Apple on an "AS IS" basis. APPLE MAKES NO + * WARRANTIES, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED + * WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND OPERATION ALONE OR IN + * COMBINATION WITH YOUR PRODUCTS. + * + * IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR + * DISTRIBUTION OF THE APPLE SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF + * CONTRACT, TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF + * APPLE HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * @APPLE_DTS_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +extern char **environ; +dispatch_queue_t qpf; +volatile int exitcount = 0; + +// maximum value for exitcount before we quit +#define proccount 2 + + +struct qp_msg { + FILE *f; + char *str; +}; + +void qpf_puts(void *m_) { + struct qp_msg *m = m_; + fputs(m->str, m->f); + free(m->str); + free(m); +} + +void qfprintf(FILE *f, const char *fmt, ...) { + va_list ap; + va_start(ap, fmt); + struct qp_msg *m = malloc(sizeof(struct qp_msg)); + assert(m); + vasprintf(&m->str, fmt, ap); + m->f = f; + dispatch_async(qpf, ^(void) { qpf_puts(m); }); + va_end(ap); +} + +#define qprintf(fmt...) qfprintf(stdout, ## fmt) + +/* context structure, contains a process id and the + * command line arguments used to launch it. Used to + * provide context info to the block associated + * with a process event source. + */ +struct pinfo { + pid_t pid; + dispatch_source_t source; + char **argv; +}; + +/* pid_finalize() is called when the dispatch source is released. + * this block is attached to the attribute that is passed to dispatch_source_proc_create(), + * and is thus associated with the dispatch source. */ +void pid_finalize(struct pinfo *pi) { + qprintf("process %d is done watching %s (%d)\n", getpid(), pi->argv[0], pi->pid); + dispatch_release(pi->source); + if (OSAtomicIncrement32(&exitcount) == proccount) { + qprintf("both processes exited\n"); + dispatch_sync(qpf,^{}); + exit(0); + } +} + + +/* pid_event() is called from a block that is associated with a process event + * source for a specific process id (via dispatch_source_proc_create()). When + * such an event occurs, pid_event() calls dispatch_source_get_context() to + * gain access to the pid and process name that were stored in the context at + * the time the block was attached to the event source. + */ +#define FLAG(X) ((dispatch_source_get_data(src) & DISPATCH_PROC_##X) ? #X" " : "") + +void pid_event(struct pinfo *pi) { + dispatch_source_t src = pi->source; + + qprintf("process %d %s, flags: %x %s%s%s%s\n", pi->pid, pi->argv[0], dispatch_source_get_data(src), FLAG(EXIT), FLAG(FORK), FLAG(EXEC), FLAG(SIGNAL)); + if (dispatch_source_get_data(src) & DISPATCH_PROC_EXIT) { + int s; + waitpid(dispatch_source_get_handle(src), &s, WNOHANG); + qprintf(" %s exit status %d\n", pi->argv[0], s); + dispatch_source_cancel(src); + } +} + +/* proc_start() takes a context pointer (ppi), and a dispatch queue (pq), + * and spawns the process named in ppi->argv[0]. The resulting process id + * is stored in the context (ppi->pid). On successfully spawning the process, + * it creates a dispatch source for the purpose of executing the routine pid_event(pi,ev) + * when certain events (exit, fork, exec, reap, or signal) occur to the process. + */ +void proc_start(void *ppi, dispatch_queue_t pq) { + struct pinfo *pi = ppi; + + int rc = posix_spawnp(&pi->pid, pi->argv[0], NULL, NULL, pi->argv, environ); + if (rc) { + int e = errno; + qprintf("Can't spawn %s (rc=%d, e=%d %s)\n", pi->argv[0], rc, e, strerror(e)); + } else { + + dispatch_source_t dsp = dispatch_source_create(DISPATCH_SOURCE_TYPE_PROC, pi->pid, DISPATCH_PROC_EXIT|DISPATCH_PROC_FORK|DISPATCH_PROC_EXEC|DISPATCH_PROC_SIGNAL, pq); + dispatch_source_set_event_handler_f(dsp, (dispatch_function_t)pid_event); + dispatch_source_set_cancel_handler_f(dsp, (dispatch_function_t)pid_finalize); + pi->source = dsp; + dispatch_set_context(dsp, pi); + dispatch_resume(dsp); + + qprintf("process %d spawned %s: %d, watching with event source: %p\n", getpid(), pi->argv[0], pi->pid, dsp); + + } +} + +int main(int argc, char *argv[]) { + struct pinfo pi, pi2, pi3; + struct pinfo *ppi2 = & pi2, *ppi3 = &pi3; + + char *av[] = {argv[0], NULL}; // set up context info (struct pinfo) for this process. + pi.pid = getpid(); + pi.argv = av; + + char *av2[] = {"sleep", "3", NULL}; // set up context info (struct pinfo) for the sleep tool + pi2.argv = av2; + + char *av3[] = {"script", "/tmp/LOG", "banner", "-w80", "!", NULL}; // set up context info (struct pinfo) for the script tool + pi3.argv = av3; + + dispatch_queue_t pq = dispatch_queue_create("PQ", NULL); // create our main processing queue + + qpf = dispatch_queue_create("qprintf", NULL); // create a separate queue for printf + + /* create a dispatch source that will call the routine pid_event(pi,ev) + * when certain events occur to the specified process (pi->pid). The dispatch source is + * associated with the dispatch queue that was created in this routine (pq). This example + * requests the block be executed whenever one of the following events occurs: + * exit, fork, exec, reap, or signal. + */ + dispatch_source_t procSource = dispatch_source_create(DISPATCH_SOURCE_TYPE_PROC, pi.pid, DISPATCH_PROC_EXIT|DISPATCH_PROC_FORK|DISPATCH_PROC_EXEC|DISPATCH_PROC_SIGNAL, pq); + + dispatch_source_set_event_handler_f(procSource, (dispatch_function_t)pid_event); + dispatch_source_set_cancel_handler_f(procSource, (dispatch_function_t)pid_finalize); + pi.source = procSource; + dispatch_set_context(procSource, &pi); + dispatch_resume(procSource); + + /* create a block (which simply calls proc_start()), and dispatch it to the queue. + * proc_start() will spawn the process named by ppiX->argv[0], and set up + * another block (containing a call to pid_event()) on an event source that + * will recieve process events... + */ + dispatch_async(pq, ^(void) { proc_start( ppi2, pq ); }); // launch the sleep tool, and create the process watcher for it + dispatch_async(pq, ^(void) { proc_start( ppi3, pq ); }); // launch the script tool, and create the process watcher for it + + + dispatch_main(); // wait for all the queued and spawned items to finish... +} diff --git a/examples/Dispatch Samples/readFile.c b/examples/Dispatch Samples/readFile.c new file mode 100644 index 000000000..9c537c50b --- /dev/null +++ b/examples/Dispatch Samples/readFile.c @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2008 Apple Inc. All rights reserved. + * + * @APPLE_DTS_LICENSE_HEADER_START@ + * + * IMPORTANT: This Apple software is supplied to you by Apple Computer, Inc. + * ("Apple") in consideration of your agreement to the following terms, and your + * use, installation, modification or redistribution of this Apple software + * constitutes acceptance of these terms. If you do not agree with these terms, + * please do not use, install, modify or redistribute this Apple software. + * + * In consideration of your agreement to abide by the following terms, and + * subject to these terms, Apple grants you a personal, non-exclusive license, + * under Apple's copyrights in this original Apple software (the "Apple Software"), + * to use, reproduce, modify and redistribute the Apple Software, with or without + * modifications, in source and/or binary forms; provided that if you redistribute + * the Apple Software in its entirety and without modifications, you must retain + * this notice and the following text and disclaimers in all such redistributions + * of the Apple Software. Neither the name, trademarks, service marks or logos of + * Apple Computer, Inc. may be used to endorse or promote products derived from + * the Apple Software without specific prior written permission from Apple. Except + * as expressly stated in this notice, no other rights or licenses, express or + * implied, are granted by Apple herein, including but not limited to any patent + * rights that may be infringed by your derivative works or by other works in + * which the Apple Software may be incorporated. + * + * The Apple Software is provided by Apple on an "AS IS" basis. APPLE MAKES NO + * WARRANTIES, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED + * WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND OPERATION ALONE OR IN + * COMBINATION WITH YOUR PRODUCTS. + * + * IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR + * DISTRIBUTION OF THE APPLE SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF + * CONTRACT, TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF + * APPLE HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * @APPLE_DTS_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +int main(int argc, char* argv[]) +{ + int infd; + dispatch_source_t fileSource; + + if (argc != 2) { + fprintf(stderr, "usage: %s file ...\n", argv[0]); + exit(1); + } + + + infd = open(argv[1], O_RDONLY); + if (infd == -1) { + perror(argv[1]); + exit(1); + } + + if (fcntl(infd, F_SETFL, O_NONBLOCK) != 0) { + perror(argv[1]); + exit(1); + } + + fileSource = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, infd, 0, dispatch_queue_create("read source queue",NULL)); + + dispatch_source_set_event_handler( fileSource, ^{ + char buffer[10]; + size_t estimated = dispatch_source_get_data(fileSource); + printf("Estimated bytes available: %ld\n", estimated); + ssize_t actual = read(infd, buffer, sizeof(buffer)); + if (actual == -1) { + if (errno != EAGAIN) { + perror("read"); + exit(-1); + } + } else { + if (estimated>actual) { + printf(" bytes read: %ld\n", actual); + } else { + // end of file has been reached. + printf(" last bytes read: %ld\n", actual); + dispatch_source_cancel(fileSource); + } + } + }); + + dispatch_source_set_cancel_handler( fileSource, ^{ + // release all our associated dispatch data structures + dispatch_release(fileSource); + dispatch_release(dispatch_get_current_queue()); + // close the file descriptor because we are done reading it + close(infd); + // and since we have nothing left to do, exit the tool + exit(0); + + }); + + dispatch_resume(fileSource); + + dispatch_main(); + + return 0; +} diff --git a/examples/Dispatch Samples/readFileF.c b/examples/Dispatch Samples/readFileF.c new file mode 100644 index 000000000..6546714b9 --- /dev/null +++ b/examples/Dispatch Samples/readFileF.c @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2008 Apple Inc. All rights reserved. + * + * @APPLE_DTS_LICENSE_HEADER_START@ + * + * IMPORTANT: This Apple software is supplied to you by Apple Computer, Inc. + * ("Apple") in consideration of your agreement to the following terms, and your + * use, installation, modification or redistribution of this Apple software + * constitutes acceptance of these terms. If you do not agree with these terms, + * please do not use, install, modify or redistribute this Apple software. + * + * In consideration of your agreement to abide by the following terms, and + * subject to these terms, Apple grants you a personal, non-exclusive license, + * under Apple's copyrights in this original Apple software (the "Apple Software"), + * to use, reproduce, modify and redistribute the Apple Software, with or without + * modifications, in source and/or binary forms; provided that if you redistribute + * the Apple Software in its entirety and without modifications, you must retain + * this notice and the following text and disclaimers in all such redistributions + * of the Apple Software. Neither the name, trademarks, service marks or logos of + * Apple Computer, Inc. may be used to endorse or promote products derived from + * the Apple Software without specific prior written permission from Apple. Except + * as expressly stated in this notice, no other rights or licenses, express or + * implied, are granted by Apple herein, including but not limited to any patent + * rights that may be infringed by your derivative works or by other works in + * which the Apple Software may be incorporated. + * + * The Apple Software is provided by Apple on an "AS IS" basis. APPLE MAKES NO + * WARRANTIES, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED + * WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND OPERATION ALONE OR IN + * COMBINATION WITH YOUR PRODUCTS. + * + * IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR + * DISTRIBUTION OF THE APPLE SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF + * CONTRACT, TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF + * APPLE HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * @APPLE_DTS_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + + +void readFileData(void* theSource) { + char buffer[10]; + size_t estimated = dispatch_source_get_data(theSource); + printf("Estimated bytes available: %ld\n", estimated); + ssize_t actual = read(dispatch_source_get_handle(theSource), buffer, sizeof(buffer)); + if (actual == -1) { + if (errno != EAGAIN) { + perror("read"); + exit(-1); + } + } else { + if (estimated>actual) { + printf(" bytes read: %ld\n", actual); + } else { + // end of file has been reached. + printf(" last bytes read: %ld\n", actual); + dispatch_source_cancel(theSource); + } + } +} + +void cancelSource(void* theSource) { + close(dispatch_source_get_handle(theSource)); + dispatch_release(theSource); + dispatch_release(dispatch_get_current_queue()); + printf("Everything is finished, goodbye.\n"); + exit(0); +} + +int main(int argc, char* argv[]) +{ + int infd; + dispatch_source_t fileSource; + + if (argc != 2) { + fprintf(stderr, "usage: %s file ...\n", argv[0]); + exit(1); + } + + + infd = open(argv[1], O_RDONLY); + if (infd == -1) { + perror(argv[1]); + exit(1); + } + + if (fcntl(infd, F_SETFL, O_NONBLOCK) != 0) { + perror(argv[1]); + exit(1); + } + + fileSource = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, infd, 0, dispatch_queue_create("read source queue",NULL)); + dispatch_source_set_event_handler_f( fileSource, readFileData); + dispatch_source_set_cancel_handler_f( fileSource, cancelSource); + // setting the context pointer to point to the source itself means the functions will get the source + // as a paremeter, from there they can get all the information they need. + dispatch_set_context(fileSource, fileSource); + dispatch_resume(fileSource); + + dispatch_main(); + + return 0; +} diff --git a/examples/Dispatch Samples/timers.c b/examples/Dispatch Samples/timers.c new file mode 100644 index 000000000..7dc9f8c0f --- /dev/null +++ b/examples/Dispatch Samples/timers.c @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2008 Apple Inc. All rights reserved. + * + * @APPLE_DTS_LICENSE_HEADER_START@ + * + * IMPORTANT: This Apple software is supplied to you by Apple Computer, Inc. + * ("Apple") in consideration of your agreement to the following terms, and your + * use, installation, modification or redistribution of this Apple software + * constitutes acceptance of these terms. If you do not agree with these terms, + * please do not use, install, modify or redistribute this Apple software. + * + * In consideration of your agreement to abide by the following terms, and + * subject to these terms, Apple grants you a personal, non-exclusive license, + * under Apple's copyrights in this original Apple software (the "Apple Software"), + * to use, reproduce, modify and redistribute the Apple Software, with or without + * modifications, in source and/or binary forms; provided that if you redistribute + * the Apple Software in its entirety and without modifications, you must retain + * this notice and the following text and disclaimers in all such redistributions + * of the Apple Software. Neither the name, trademarks, service marks or logos of + * Apple Computer, Inc. may be used to endorse or promote products derived from + * the Apple Software without specific prior written permission from Apple. Except + * as expressly stated in this notice, no other rights or licenses, express or + * implied, are granted by Apple herein, including but not limited to any patent + * rights that may be infringed by your derivative works or by other works in + * which the Apple Software may be incorporated. + * + * The Apple Software is provided by Apple on an "AS IS" basis. APPLE MAKES NO + * WARRANTIES, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED + * WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND OPERATION ALONE OR IN + * COMBINATION WITH YOUR PRODUCTS. + * + * IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR + * DISTRIBUTION OF THE APPLE SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF + * CONTRACT, TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF + * APPLE HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * @APPLE_DTS_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include +#include + +#include + +int main(int argc, char* argv[]) +{ + dispatch_source_t theTimer = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, dispatch_queue_create("timer queue",NULL)); + + __block int i = 0; + + printf("Starting to count by seconds\n"); + + dispatch_source_set_event_handler(theTimer, ^{ + printf("%d\n", ++i); + if (i >= 6) { + printf("i>6\n"); + dispatch_source_cancel(theTimer); + } + if (i == 3) { + printf("switching to half seconds\n"); + dispatch_source_set_timer(theTimer, DISPATCH_TIME_NOW, NSEC_PER_SEC / 2, 0); + } + }); + + dispatch_source_set_cancel_handler(theTimer, ^{ + printf("dispatch source canceled OK\n"); + dispatch_release(theTimer); + exit(0); + }); + + dispatch_source_set_timer(theTimer, dispatch_time(DISPATCH_TIME_NOW,NSEC_PER_SEC) , NSEC_PER_SEC, 0); + + dispatch_resume(theTimer); + dispatch_main(); + + return 0; +} diff --git a/examples/DispatchLife/DispatchLife.c b/examples/DispatchLife/DispatchLife.c new file mode 100644 index 000000000..0871e4a3c --- /dev/null +++ b/examples/DispatchLife/DispatchLife.c @@ -0,0 +1,392 @@ +/* + * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * + * @APPLE_DTS_LICENSE_HEADER_START@ + * + * IMPORTANT: This Apple software is supplied to you by Apple Computer, Inc. + * ("Apple") in consideration of your agreement to the following terms, and your + * use, installation, modification or redistribution of this Apple software + * constitutes acceptance of these terms. If you do not agree with these terms, + * please do not use, install, modify or redistribute this Apple software. + * + * In consideration of your agreement to abide by the following terms, and + * subject to these terms, Apple grants you a personal, non-exclusive license, + * under Apple's copyrights in this original Apple software (the "Apple Software"), + * to use, reproduce, modify and redistribute the Apple Software, with or without + * modifications, in source and/or binary forms; provided that if you redistribute + * the Apple Software in its entirety and without modifications, you must retain + * this notice and the following text and disclaimers in all such redistributions + * of the Apple Software. Neither the name, trademarks, service marks or logos of + * Apple Computer, Inc. may be used to endorse or promote products derived from + * the Apple Software without specific prior written permission from Apple. Except + * as expressly stated in this notice, no other rights or licenses, express or + * implied, are granted by Apple herein, including but not limited to any patent + * rights that may be infringed by your derivative works or by other works in + * which the Apple Software may be incorporated. + * + * The Apple Software is provided by Apple on an "AS IS" basis. APPLE MAKES NO + * WARRANTIES, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED + * WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND OPERATION ALONE OR IN + * COMBINATION WITH YOUR PRODUCTS. + * + * IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR + * DISTRIBUTION OF THE APPLE SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF + * CONTRACT, TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF + * APPLE HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * @APPLE_DTS_LICENSE_HEADER_END@ + */ +/*! + @header Life + An asynchronous variation of Conway's Game of Life implemented with + GCD. Like the classic version, the game board consists of a grid of + cells that can live, die or multiply by the following rules[1]: + + 1. Survivals. Every [living cell] with two or three neighboring + [living cells] survives for the next generation. + 2. Deaths. Each [living cell] wiht four or more neighbors dies (is + removed) from overpopulation. Every [living cell] with one + neighbor or none dies from isolation. + 3. Births. Each empty cell adjacent to exactly three neighbors--no + more, no fewer--is a birth cell. A [living cell] is placed on it + at the next move. + + However, unlike the classic version, not all deaths and births occur + simultaneously in a single, synchronous, "move" of the game board. + Instead the rules are applies to each cell independently based on its + observations of the cells around it. + + Each cell is backed by a GCD queue which manages the synchronization + of the cells internal state (living or dead). When a cell's state + changes, a notification in the form of a dispatch_call() of the + cell_needs_update() work function is sent to all adjacent cells so + that the state of those cells may be re-evaluated. + + To re-evaluate the state of a cell, a request of the current state of + all adjecent cells is sent in the form of a dispatch_call() of the + _cell_is_alive() work function. The state of the adjacent cells is + returned to the requestor via the _cell_is_alive_callback() completion + callback. Once all outstanding completion callbacks have been + received, the cell updates its state according to the aforementioned + rules. If the application of these rules results in another state + change, the update_cell() notification is once again sent out, + repeating the process. + + Due to the highly asynchronous nature of this implementation, the + simulation's results may differ from the classic version for the same + set of initial conditions. In particular, due to non-deterministic + scheduling factors, the same set of initial condiitions is likely to + produce dramatically different results on subsequent simulations. + + [1] Martin Gardner. "MATHEMATICAL GAMES: The fantastic combinations of + John Conway's new solitaire game 'life'" Scientific American 223 + (October 1970): 120-123. + + @copyright Copyright (c) 2008-2009 Apple Inc. All rights reserved. + @updated 2009-03-31 +*/ +//////////////////////////////////////////////////////////////////////////////// + +// Adjustable parameters +unsigned long grid_x_size = 40; +unsigned long grid_y_size = 20; + +int use_curses = 1; + +//////////////////////////////////////////////////////////////////////////////// +#include +#include +#include +#include +#include +#include +#include + +#define CELL_MAX_NEIGHBORS 8 + +struct cell { + dispatch_queue_t q; + int alive; + char display; + + // tracks whether a update_cell() notification arrived while + // an update was already in progress + int needs_update; + int living_neighbors; + int queries_outstanding; + + struct cell* neighbors[CELL_MAX_NEIGHBORS]; + char* label; +} __attribute__((aligned(64))); + +//////////////////////////////////////////////////////////////////////////////// + +/*! @function init_grid + Initializes the grid data structure based on the global variables + grid_x_size and grid_y_size. Must be called before any calls to + cell_set_alive. */ +struct cell* init_grid(size_t grid_x_size, size_t grid_y_size); + +/*! @function init_display + Initializes the display subsystem. Starts a periodic timer to update the + display based on the current contents of the cell grid. + */ +void init_display(struct cell* grid); + +//////////////////////////////////////////////////////////////////////////////// + +// Macro to test whether x,y coordinates are within bounds of the grid +#define GRID_VALID(u,v) (((u) >= 0) && ((v) >= 0) && \ + ((u) < grid_x_size) && ((v) < grid_y_size)) +// Macro to translate from 2d grid coordinates to array offest +#define GRID_OFF(u,v) ((v) * grid_x_size + (u)) + +#if !defined(DISPATCH_LIFE_GL) +int main(int argc, char* argv[]) { + + struct ttysize tsz; + int res; + + res = ioctl(STDIN_FILENO, TIOCGWINSZ, &tsz); + if (res == 0) { + grid_x_size = tsz.ts_cols; + grid_y_size = tsz.ts_lines; + } + + int dispflag = 1; + int ch; + + while ((ch = getopt(argc, argv, "x:y:q")) != -1) { + char* endptr; + switch (ch) { + case 'x': + grid_x_size = strtol(optarg, &endptr, 10); + if (grid_x_size < 0 || (endptr && *endptr != 0)) { + fprintf(stderr, "life: invalid x size\n"); + exit(1); + } + break; + case 'y': + grid_y_size = strtol(optarg, &endptr, 10); + if (grid_y_size < 0 || (endptr && *endptr != 0)) { + fprintf(stderr, "life: invalid y size\n"); + exit(1); + } + break; + case 'q': + dispflag = 0; + break; + case '?': + default: + fprintf(stderr, "usage: life [-q] [-x size] [-y size]\n"); + fprintf(stderr, "\t-x: grid x size (default is terminal columns)\n"); + fprintf(stderr, "\t-y: grid y size (default is terminal rows)\n"); + fprintf(stderr, "\t-q: suppress display output\n"); + exit(1); + } + } + + struct cell* grid = init_grid(grid_x_size, grid_y_size); + + if (dispflag) { + init_display(grid); + if (use_curses) { + initscr(); cbreak(); noecho(); + nonl(); + intrflush(stdscr, FALSE); + keypad(stdscr, TRUE); + } + } + + dispatch_main(); + + if (dispflag && use_curses) { + endwin(); + } + + return 0; +} +#endif /* defined(DISPATCH_LIFE_GL) */ + +//////////////////////////////////////////////////////////////////////////////// + +static void cell_set_alive(struct cell*, int alive); + +/*! @function update_cell + GCD work function. Begins the update process for a cell by + sending cell_is_alive() messages with cell_is_alive_callback() + completion callbacks to all adjacent cells. If an update is already + in progress, simply sets the needs_update flag of the cell. */ +static void update_cell(struct cell*); + +/*! @function cell_is_alive_callback + GCD completion callback. Receives the result from cell_is_alive. When + all _cell_is_alive_callback() completion callbacks have been received + from an update, recalculates the internal state of the cell. If the + state changes, sends update_cell() to all adjacent cells. */ +static void update_cell_response(struct cell*, int); + +//////////////////////////////////////////////////////////////////////////////// + +void +foreach_neighbor(struct cell* self, void (^action)(struct cell* other)) { + int i; + for (i = 0; i < CELL_MAX_NEIGHBORS; ++i) { + struct cell* other = self->neighbors[i]; + if (other) { + action(other); + } + } +} + + +// Change cell state, update the screen, and update neighbors. +void +cell_set_alive(struct cell* self, int alive) { + if (alive == self->alive) return; // nothing to do + + dispatch_async(self->q, ^{ + self->alive = alive; + self->display = (self->alive) ? '#' : ' '; + + foreach_neighbor(self, ^(struct cell* other) { + dispatch_async(other->q, ^{ update_cell(other); }); + }); + }); +} + +void +update_cell(struct cell* self) { + if (self->queries_outstanding == 0) { + self->needs_update = 0; + self->living_neighbors = 0; + + foreach_neighbor(self, ^(struct cell* other) { + ++self->queries_outstanding; + dispatch_async(other->q, ^{ + dispatch_async(self->q, ^{ update_cell_response(self, other->alive); }); + }); + }); + + // '.' indicates the cell is not alive but needs an update + if (!self->alive) self->display = '.'; + } else { + self->needs_update = 1; + } +} + +void +update_cell_response(struct cell* self, int response) { + if (response) ++self->living_neighbors; + --self->queries_outstanding; + + // when all neighbors have replied with their state, + // recalculate our internal state + if (self->queries_outstanding == 0) { + const int living_neighbors = self->living_neighbors; + int alive = self->alive; + + // Conway's Game of Life + if (living_neighbors < 2 || living_neighbors > 3) { + alive = 0; + } else if (living_neighbors == 3) { + alive = 1; + } + + // Notify neighbors of state change + cell_set_alive(self, alive); + + // if a request for an update came in while we were + // already processing one, kick off the next update + if (self->needs_update) { + dispatch_async(self->q, ^{ update_cell(self); }); + } else { + // otherwise clear the '.' character that was + // displayed during the update + if (!self->alive) { + self->display = ' '; + } + } + } +} + +//////////////////////////////////////////////////////////////////////////////// + +struct cell* +init_grid(size_t grid_x_size, size_t grid_y_size) { + struct cell* grid = calloc(sizeof(struct cell),grid_x_size*grid_y_size); + + int i,j; + for (i = 0; i < grid_x_size; ++i) { + for (j = 0; j < grid_y_size; ++j) { + struct cell* ptr = &grid[GRID_OFF(i,j)]; + + asprintf(&ptr->label, "x%dy%d", i, j); + + ptr->q = dispatch_queue_create(ptr->label, NULL); + dispatch_set_target_queue(ptr->q, dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_LOW, 0)); + dispatch_queue_set_context(ptr->q, ptr); + + ptr->neighbors[0] = GRID_VALID(i ,j-1) ? + &grid[GRID_OFF(i ,j-1)] : NULL; // N + ptr->neighbors[1] = GRID_VALID(i+1,j-1) ? + &grid[GRID_OFF(i+1,j-1)] : NULL; // NE + ptr->neighbors[2] = GRID_VALID(i+1,j ) ? + &grid[GRID_OFF(i+1,j )] : NULL; // E + ptr->neighbors[3] = GRID_VALID(i+1,j+1) ? + &grid[GRID_OFF(i+1,j+1)] : NULL; // SE + ptr->neighbors[4] = GRID_VALID(i ,j+1) ? + &grid[GRID_OFF(i ,j+1)] : NULL; // S + ptr->neighbors[5] = GRID_VALID(i-1,j+1) ? + &grid[GRID_OFF(i-1,j+1)] : NULL; // SW + ptr->neighbors[6] = GRID_VALID(i-1,j ) ? + &grid[GRID_OFF(i-1,j )] : NULL; // W + ptr->neighbors[7] = GRID_VALID(i-1,j-1) ? + &grid[GRID_OFF(i-1,j-1)] : NULL; // NW + } + } + + srandomdev(); + for (i = 0; i < grid_x_size; ++i) { + for (j = 0; j < grid_y_size; ++j) { + if (random() & 1) { + cell_set_alive(&grid[GRID_OFF(i,j)], 1); + } + } + } + + return grid; +} + +#if defined(DISPATCH_LIFE_GL) +char +get_grid_display_char(struct cell* grid, size_t x, size_t y) { + return grid[GRID_OFF(x,y)].display; +} +#endif /* defined(DISPATCH_LIFE_GL) */ + +#if !defined(DISPATCH_LIFE_GL) +void +init_display(struct cell* grid) +{ + dispatch_source_t timer; + + timer = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, dispatch_get_main_queue()); + dispatch_source_set_timer(dispatch_time(DISPATCH_TIME_NOW, 0). 10000000, 1000); + dispatch_source_set_event_handler(^{ + int x,y; + x = 0; + for (x = 0; x < grid_x_size; ++x) { + for (y = 0; y < grid_y_size; ++y) { + mvaddnstr(y, x, &grid[GRID_OFF(x,y)].display, 1); + } + } + refresh(); + }); + dispatch_resume(timer); +} +#endif /* defined(DISPATCH_LIFE_GL) */ diff --git a/examples/DispatchLife/DispatchLife.xcodeproj/project.pbxproj b/examples/DispatchLife/DispatchLife.xcodeproj/project.pbxproj new file mode 100644 index 000000000..68972782d --- /dev/null +++ b/examples/DispatchLife/DispatchLife.xcodeproj/project.pbxproj @@ -0,0 +1,252 @@ +// !$*UTF8*$! +{ + archiveVersion = 1; + classes = { + }; + objectVersion = 45; + objects = { + +/* Begin PBXBuildFile section */ + 8D11072A0486CEB800E47090 /* MainMenu.nib in Resources */ = {isa = PBXBuildFile; fileRef = 29B97318FDCFA39411CA2CEA /* MainMenu.nib */; }; + 8D11072B0486CEB800E47090 /* InfoPlist.strings in Resources */ = {isa = PBXBuildFile; fileRef = 089C165CFE840E0CC02AAC07 /* InfoPlist.strings */; }; + 8D11072D0486CEB800E47090 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = 29B97316FDCFA39411CA2CEA /* main.m */; settings = {ATTRIBUTES = (); }; }; + 8D11072F0486CEB800E47090 /* Cocoa.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1058C7A1FEA54F0111CA2CBB /* Cocoa.framework */; }; + FC0615200DF53162002BF852 /* DispatchLifeGLView.m in Sources */ = {isa = PBXBuildFile; fileRef = FC06151F0DF53162002BF852 /* DispatchLifeGLView.m */; }; + FC0615450DF535BD002BF852 /* OpenGL.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = FC0615440DF535BD002BF852 /* OpenGL.framework */; }; + FC787BF60DF67AAF009415DA /* DispatchLife.c in Sources */ = {isa = PBXBuildFile; fileRef = FC787BF50DF67AAF009415DA /* DispatchLife.c */; }; +/* End PBXBuildFile section */ + +/* Begin PBXFileReference section */ + 089C165DFE840E0CC02AAC07 /* English */ = {isa = PBXFileReference; fileEncoding = 10; lastKnownFileType = text.plist.strings; name = English; path = English.lproj/InfoPlist.strings; sourceTree = ""; }; + 1058C7A1FEA54F0111CA2CBB /* Cocoa.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Cocoa.framework; path = /System/Library/Frameworks/Cocoa.framework; sourceTree = ""; }; + 29B97316FDCFA39411CA2CEA /* main.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = ""; }; + 29B97319FDCFA39411CA2CEA /* English */ = {isa = PBXFileReference; lastKnownFileType = wrapper.nib; name = English; path = English.lproj/MainMenu.nib; sourceTree = ""; }; + 29B97324FDCFA39411CA2CEA /* AppKit.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = AppKit.framework; path = /System/Library/Frameworks/AppKit.framework; sourceTree = ""; }; + 29B97325FDCFA39411CA2CEA /* Foundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Foundation.framework; path = /System/Library/Frameworks/Foundation.framework; sourceTree = ""; }; + 8D1107310486CEB800E47090 /* Info.plist */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; + 8D1107320486CEB800E47090 /* DispatchLife.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = DispatchLife.app; sourceTree = BUILT_PRODUCTS_DIR; }; + FC06151E0DF53162002BF852 /* DispatchLifeGLView.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DispatchLifeGLView.h; sourceTree = ""; }; + FC06151F0DF53162002BF852 /* DispatchLifeGLView.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = DispatchLifeGLView.m; sourceTree = ""; }; + FC0615440DF535BD002BF852 /* OpenGL.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = OpenGL.framework; path = /System/Library/Frameworks/OpenGL.framework; sourceTree = ""; }; + FC787BF50DF67AAF009415DA /* DispatchLife.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = DispatchLife.c; sourceTree = ""; }; +/* End PBXFileReference section */ + +/* Begin PBXFrameworksBuildPhase section */ + 8D11072E0486CEB800E47090 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 8D11072F0486CEB800E47090 /* Cocoa.framework in Frameworks */, + FC0615450DF535BD002BF852 /* OpenGL.framework in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXFrameworksBuildPhase section */ + +/* Begin PBXGroup section */ + 080E96DDFE201D6D7F000001 /* Classes */ = { + isa = PBXGroup; + children = ( + FC06151E0DF53162002BF852 /* DispatchLifeGLView.h */, + FC06151F0DF53162002BF852 /* DispatchLifeGLView.m */, + ); + name = Classes; + sourceTree = ""; + }; + 1058C7A0FEA54F0111CA2CBB /* Linked Frameworks */ = { + isa = PBXGroup; + children = ( + 1058C7A1FEA54F0111CA2CBB /* Cocoa.framework */, + FC0615440DF535BD002BF852 /* OpenGL.framework */, + ); + name = "Linked Frameworks"; + sourceTree = ""; + }; + 1058C7A2FEA54F0111CA2CBB /* Other Frameworks */ = { + isa = PBXGroup; + children = ( + 29B97324FDCFA39411CA2CEA /* AppKit.framework */, + 29B97325FDCFA39411CA2CEA /* Foundation.framework */, + ); + name = "Other Frameworks"; + sourceTree = ""; + }; + 19C28FACFE9D520D11CA2CBB /* Products */ = { + isa = PBXGroup; + children = ( + 8D1107320486CEB800E47090 /* DispatchLife.app */, + ); + name = Products; + sourceTree = ""; + }; + 29B97314FDCFA39411CA2CEA /* DispatchLife */ = { + isa = PBXGroup; + children = ( + 080E96DDFE201D6D7F000001 /* Classes */, + 29B97315FDCFA39411CA2CEA /* Other Sources */, + 29B97317FDCFA39411CA2CEA /* Resources */, + 29B97323FDCFA39411CA2CEA /* Frameworks */, + 19C28FACFE9D520D11CA2CBB /* Products */, + ); + name = DispatchLife; + sourceTree = ""; + }; + 29B97315FDCFA39411CA2CEA /* Other Sources */ = { + isa = PBXGroup; + children = ( + FC787BF50DF67AAF009415DA /* DispatchLife.c */, + 29B97316FDCFA39411CA2CEA /* main.m */, + ); + name = "Other Sources"; + sourceTree = ""; + }; + 29B97317FDCFA39411CA2CEA /* Resources */ = { + isa = PBXGroup; + children = ( + 8D1107310486CEB800E47090 /* Info.plist */, + 089C165CFE840E0CC02AAC07 /* InfoPlist.strings */, + 29B97318FDCFA39411CA2CEA /* MainMenu.nib */, + ); + name = Resources; + sourceTree = ""; + }; + 29B97323FDCFA39411CA2CEA /* Frameworks */ = { + isa = PBXGroup; + children = ( + 1058C7A0FEA54F0111CA2CBB /* Linked Frameworks */, + 1058C7A2FEA54F0111CA2CBB /* Other Frameworks */, + ); + name = Frameworks; + sourceTree = ""; + }; +/* End PBXGroup section */ + +/* Begin PBXNativeTarget section */ + 8D1107260486CEB800E47090 /* DispatchLife */ = { + isa = PBXNativeTarget; + buildConfigurationList = C01FCF4A08A954540054247B /* Build configuration list for PBXNativeTarget "DispatchLife" */; + buildPhases = ( + 8D1107290486CEB800E47090 /* Resources */, + 8D11072C0486CEB800E47090 /* Sources */, + 8D11072E0486CEB800E47090 /* Frameworks */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = DispatchLife; + productInstallPath = "$(HOME)/Applications"; + productName = DispatchLife; + productReference = 8D1107320486CEB800E47090 /* DispatchLife.app */; + productType = "com.apple.product-type.application"; + }; +/* End PBXNativeTarget section */ + +/* Begin PBXProject section */ + 29B97313FDCFA39411CA2CEA /* Project object */ = { + isa = PBXProject; + buildConfigurationList = C01FCF4E08A954540054247B /* Build configuration list for PBXProject "DispatchLife" */; + compatibilityVersion = "Xcode 3.1"; + hasScannedForEncodings = 1; + mainGroup = 29B97314FDCFA39411CA2CEA /* DispatchLife */; + projectDirPath = ""; + projectRoot = ""; + targets = ( + 8D1107260486CEB800E47090 /* DispatchLife */, + ); + }; +/* End PBXProject section */ + +/* Begin PBXResourcesBuildPhase section */ + 8D1107290486CEB800E47090 /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 8D11072A0486CEB800E47090 /* MainMenu.nib in Resources */, + 8D11072B0486CEB800E47090 /* InfoPlist.strings in Resources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXResourcesBuildPhase section */ + +/* Begin PBXSourcesBuildPhase section */ + 8D11072C0486CEB800E47090 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 8D11072D0486CEB800E47090 /* main.m in Sources */, + FC0615200DF53162002BF852 /* DispatchLifeGLView.m in Sources */, + FC787BF60DF67AAF009415DA /* DispatchLife.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXSourcesBuildPhase section */ + +/* Begin PBXVariantGroup section */ + 089C165CFE840E0CC02AAC07 /* InfoPlist.strings */ = { + isa = PBXVariantGroup; + children = ( + 089C165DFE840E0CC02AAC07 /* English */, + ); + name = InfoPlist.strings; + sourceTree = ""; + }; + 29B97318FDCFA39411CA2CEA /* MainMenu.nib */ = { + isa = PBXVariantGroup; + children = ( + 29B97319FDCFA39411CA2CEA /* English */, + ); + name = MainMenu.nib; + sourceTree = ""; + }; +/* End PBXVariantGroup section */ + +/* Begin XCBuildConfiguration section */ + C01FCF4C08A954540054247B /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + INFOPLIST_FILE = Info.plist; + PRODUCT_NAME = DispatchLife; + }; + name = Release; + }; + C01FCF5008A954540054247B /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + ARCHS = "$(ARCHS_STANDARD_32_BIT)"; + GCC_C_LANGUAGE_STANDARD = c99; + GCC_WARN_ABOUT_RETURN_TYPE = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + OTHER_CFLAGS = ( + "-DDISPATCH_LIFE_GL", + "-fblocks", + ); + PREBINDING = NO; + SDKROOT = ""; + }; + name = Release; + }; +/* End XCBuildConfiguration section */ + +/* Begin XCConfigurationList section */ + C01FCF4A08A954540054247B /* Build configuration list for PBXNativeTarget "DispatchLife" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + C01FCF4C08A954540054247B /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + C01FCF4E08A954540054247B /* Build configuration list for PBXProject "DispatchLife" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + C01FCF5008A954540054247B /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; +/* End XCConfigurationList section */ + }; + rootObject = 29B97313FDCFA39411CA2CEA /* Project object */; +} diff --git a/examples/DispatchLife/DispatchLifeGLView.h b/examples/DispatchLife/DispatchLifeGLView.h new file mode 100644 index 000000000..7ed6bbdd7 --- /dev/null +++ b/examples/DispatchLife/DispatchLifeGLView.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2009-2009 Apple Inc. All rights reserved. + * + * @APPLE_DTS_LICENSE_HEADER_START@ + * + * IMPORTANT: This Apple software is supplied to you by Apple Computer, Inc. + * ("Apple") in consideration of your agreement to the following terms, and your + * use, installation, modification or redistribution of this Apple software + * constitutes acceptance of these terms. If you do not agree with these terms, + * please do not use, install, modify or redistribute this Apple software. + * + * In consideration of your agreement to abide by the following terms, and + * subject to these terms, Apple grants you a personal, non-exclusive license, + * under Apple's copyrights in this original Apple software (the "Apple Software"), + * to use, reproduce, modify and redistribute the Apple Software, with or without + * modifications, in source and/or binary forms; provided that if you redistribute + * the Apple Software in its entirety and without modifications, you must retain + * this notice and the following text and disclaimers in all such redistributions + * of the Apple Software. Neither the name, trademarks, service marks or logos of + * Apple Computer, Inc. may be used to endorse or promote products derived from + * the Apple Software without specific prior written permission from Apple. Except + * as expressly stated in this notice, no other rights or licenses, express or + * implied, are granted by Apple herein, including but not limited to any patent + * rights that may be infringed by your derivative works or by other works in + * which the Apple Software may be incorporated. + * + * The Apple Software is provided by Apple on an "AS IS" basis. APPLE MAKES NO + * WARRANTIES, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED + * WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND OPERATION ALONE OR IN + * COMBINATION WITH YOUR PRODUCTS. + * + * IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR + * DISTRIBUTION OF THE APPLE SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF + * CONTRACT, TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF + * APPLE HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * @APPLE_DTS_LICENSE_HEADER_END@ + */ + +#import + +struct cell; + +// From DispatchLife.c +extern struct cell* init_grid(size_t grid_x_size, size_t grid_y_size); +extern char get_grid_display_char(struct cell* grid, size_t x, size_t y); + +@interface DispatchLifeGLView : NSOpenGLView { + struct cell* grid; + uint32_t* image; +} + +- (void)adjustGLViewBounds; + +@end diff --git a/examples/DispatchLife/DispatchLifeGLView.m b/examples/DispatchLife/DispatchLifeGLView.m new file mode 100644 index 000000000..5aa843b65 --- /dev/null +++ b/examples/DispatchLife/DispatchLifeGLView.m @@ -0,0 +1,203 @@ +/* + * Copyright (c) 2009-2009 Apple Inc. All rights reserved. + * + * @APPLE_DTS_LICENSE_HEADER_START@ + * + * IMPORTANT: This Apple software is supplied to you by Apple Computer, Inc. + * ("Apple") in consideration of your agreement to the following terms, and your + * use, installation, modification or redistribution of this Apple software + * constitutes acceptance of these terms. If you do not agree with these terms, + * please do not use, install, modify or redistribute this Apple software. + * + * In consideration of your agreement to abide by the following terms, and + * subject to these terms, Apple grants you a personal, non-exclusive license, + * under Apple's copyrights in this original Apple software (the "Apple Software"), + * to use, reproduce, modify and redistribute the Apple Software, with or without + * modifications, in source and/or binary forms; provided that if you redistribute + * the Apple Software in its entirety and without modifications, you must retain + * this notice and the following text and disclaimers in all such redistributions + * of the Apple Software. Neither the name, trademarks, service marks or logos of + * Apple Computer, Inc. may be used to endorse or promote products derived from + * the Apple Software without specific prior written permission from Apple. Except + * as expressly stated in this notice, no other rights or licenses, express or + * implied, are granted by Apple herein, including but not limited to any patent + * rights that may be infringed by your derivative works or by other works in + * which the Apple Software may be incorporated. + * + * The Apple Software is provided by Apple on an "AS IS" basis. APPLE MAKES NO + * WARRANTIES, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED + * WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND OPERATION ALONE OR IN + * COMBINATION WITH YOUR PRODUCTS. + * + * IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR + * DISTRIBUTION OF THE APPLE SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF + * CONTRACT, TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF + * APPLE HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * @APPLE_DTS_LICENSE_HEADER_END@ + */ + +#import "DispatchLifeGLView.h" + +#import + +#include +#include +#include + +#include +#include + +extern size_t grid_x_size; +extern size_t grid_y_size; + +@implementation DispatchLifeGLView + +#define CELL_WIDTH 8 +#define CELL_HEIGHT 8 + +- (void)goFullScreen:(NSOpenGLView*)view { + NSOpenGLPixelFormatAttribute attrs[] = + { + NSOpenGLPFAFullScreen, + + NSOpenGLPFAScreenMask, CGDisplayIDToOpenGLDisplayMask(kCGDirectMainDisplay), + + NSOpenGLPFAAccelerated, + NSOpenGLPFANoRecovery, + NSOpenGLPFADoubleBuffer, + 0 + }; + NSOpenGLPixelFormat* pixFmt = [[NSOpenGLPixelFormat alloc] initWithAttributes:attrs]; + + NSOpenGLContext* screen = [[NSOpenGLContext alloc] initWithFormat:pixFmt shareContext:[view openGLContext]]; + + CGDisplayErr err = CGCaptureAllDisplays(); + if (err != CGDisplayNoErr) { + [screen release]; + return; + } + + [screen setFullScreen]; + [screen makeCurrentContext]; + + glClearColor(0.0, 0.0, 0.0, 0.0); + glClear(GL_COLOR_BUFFER_BIT); + [screen flushBuffer]; + glClear(GL_COLOR_BUFFER_BIT); + [screen flushBuffer]; +} + + +- (id)initWithFrame:(NSRect)frame { + NSOpenGLPixelFormatAttribute attrs[] = + { + NSOpenGLPFAAccelerated, + NSOpenGLPFANoRecovery, + NSOpenGLPFADoubleBuffer, + 0 + }; + NSOpenGLPixelFormat* pixFmt = [[NSOpenGLPixelFormat alloc] initWithAttributes:attrs]; + + self = [super initWithFrame:frame pixelFormat:pixFmt]; + if (self) { + + [[self openGLContext] makeCurrentContext]; + glPixelStorei(GL_UNPACK_ALIGNMENT, 1); + glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE); + glClearColor(1.0, 1.0, 1.0, 1.0); + glColor4f(1.0, 1.0, 1.0, 1.0); + glEnable(GL_RASTER_POSITION_UNCLIPPED_IBM); + glDisable(GL_DITHER); + + grid_x_size = 128; + grid_y_size = 96; + + self->grid = init_grid(grid_x_size, grid_y_size); + size_t image_size = grid_x_size * grid_y_size * sizeof(uint32_t); + self->image = malloc(image_size); + memset(self->image, 0xFF, image_size); + + [self adjustGLViewBounds]; + + [[NSTimer scheduledTimerWithTimeInterval: (1.0f / 15.0) target: self selector:@selector(drawRect:) userInfo:self repeats:true] retain]; + + } + return self; +} + +- (void)drawRect:(NSRect)rect { + [[self openGLContext] makeCurrentContext]; + + glClear(GL_COLOR_BUFFER_BIT); + + NSRect bounds = [self bounds]; + glRasterPos2f(-bounds.size.width/2, -bounds.size.height/2); + glPixelZoom(bounds.size.width/grid_x_size, bounds.size.height/grid_y_size); + + const int width = grid_x_size; + const int height = grid_y_size; + + int x, y; + for (y = 0; y < height; ++y) { + for (x = 0; x < width; ++x) { + int i = y * width + x; + switch (get_grid_display_char(grid, x, y)) { + case '.': + image[i] = 0xCCCCCCFF; + break; + case '#': + image[i] = 0x000000FF; + break; + case ' ': + image[i] = 0xFFFFFFFF; + break; + default: + image[i] = 0x0000FFFF; + break; + } + } + } + + glDrawPixels(width, height, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8, image); + + glFinish(); + + [[self openGLContext] flushBuffer]; + +} + +- (void)adjustGLViewBounds +{ + [[self openGLContext] makeCurrentContext]; + [[self openGLContext] update]; + + NSRect rect = [self bounds]; + + glViewport(0, 0, (GLint) rect.size.width, (GLint) rect.size.height); + glMatrixMode(GL_PROJECTION); + glLoadIdentity(); + gluOrtho2D(-(rect.size.width/2), rect.size.width/2, -(rect.size.height/2), rect.size.height/2); + glMatrixMode(GL_MODELVIEW); + glLoadIdentity(); + + [self setNeedsDisplay:true]; +} + +- (void)update // moved or resized +{ + [super update]; + [self adjustGLViewBounds]; +} + +- (void)reshape // scrolled, moved or resized +{ + [super reshape]; + [self adjustGLViewBounds]; +} + +@end diff --git a/examples/DispatchLife/English.lproj/InfoPlist.strings b/examples/DispatchLife/English.lproj/InfoPlist.strings new file mode 100644 index 0000000000000000000000000000000000000000..5e45963c382ba690b781b953a00585212b898ac5 GIT binary patch literal 92 zcmW-XQ3`+{5C!MkQ~2$No+IcIkqMDxWCV8j>LCj|yTg2Mz+o9F%uHlf9u}h9EuK`F a!Y*1dX%G66ZqL#C$|bw0ZoP5@jOGW1ArT7z literal 0 HcmV?d00001 diff --git a/examples/DispatchLife/English.lproj/MainMenu.nib/designable.nib b/examples/DispatchLife/English.lproj/MainMenu.nib/designable.nib new file mode 100644 index 000000000..0cdc4e13a --- /dev/null +++ b/examples/DispatchLife/English.lproj/MainMenu.nib/designable.nib @@ -0,0 +1,2651 @@ + + + + 0 + 10A219 + 708 + 994.4 + 404.00 + + com.apple.InterfaceBuilder.CocoaPlugin + 708 + + + YES + + + YES + com.apple.InterfaceBuilder.CocoaPlugin + + + YES + + YES + + + YES + + + + YES + + NSApplication + + + FirstResponder + + + NSApplication + + + AMainMenu + + YES + + + DispatchLife + + 1048576 + 2147483647 + + NSImage + NSMenuCheckmark + + + NSImage + NSMenuMixedState + + submenuAction: + + DispatchLife + + YES + + + About DispatchLife + + 2147483647 + + + + + + YES + YES + + + 1048576 + 2147483647 + + + + + + UHJlZmVyZW5jZXPigKY + , + 1048576 + 2147483647 + + + + + + YES + YES + + + 1048576 + 2147483647 + + + + + + Services + + 1048576 + 2147483647 + + + submenuAction: + + Services + + YES + + _NSServicesMenu + + + + + YES + YES + + + 1048576 + 2147483647 + + + + + + Hide DispatchLife + h + 1048576 + 2147483647 + + + + + + Hide Others + h + 1572864 + 2147483647 + + + + + + Show All + + 1048576 + 2147483647 + + + + + + YES + YES + + + 1048576 + 2147483647 + + + + + + Quit DispatchLife + q + 1048576 + 2147483647 + + + + + _NSAppleMenu + + + + + File + + 1048576 + 2147483647 + + + submenuAction: + + File + + YES + + + New + n + 1048576 + 2147483647 + + + + + + T3BlbuKApg + o + 1048576 + 2147483647 + + + + + + Open Recent + + 1048576 + 2147483647 + + + submenuAction: + + Open Recent + + YES + + + Clear Menu + + 1048576 + 2147483647 + + + + + _NSRecentDocumentsMenu + + + + + YES + YES + + + 1048576 + 2147483647 + + + + + + Close + w + 1048576 + 2147483647 + + + + + + Save + s + 1048576 + 2147483647 + + + + + + U2F2ZSBBc+KApg + S + 1179648 + 2147483647 + + + + + + Revert to Saved + + 2147483647 + + + + + + YES + YES + + + 1048576 + 2147483647 + + + + + + Page Setup... + P + 1179648 + 2147483647 + + + + + + + UHJpbnTigKY + p + 1048576 + 2147483647 + + + + + + + + + Edit + + 1048576 + 2147483647 + + + submenuAction: + + Edit + + YES + + + Undo + z + 1048576 + 2147483647 + + + + + + Redo + Z + 1179648 + 2147483647 + + + + + + YES + YES + + + 1048576 + 2147483647 + + + + + + Cut + x + 1048576 + 2147483647 + + + + + + Copy + c + 1048576 + 2147483647 + + + + + + Paste + v + 1048576 + 2147483647 + + + + + + Delete + + 1048576 + 2147483647 + + + + + + Select All + a + 1048576 + 2147483647 + + + + + + YES + YES + + + 1048576 + 2147483647 + + + + + + Find + + 1048576 + 2147483647 + + + submenuAction: + + Find + + YES + + + RmluZOKApg + f + 1048576 + 2147483647 + + + 1 + + + + Find Next + g + 1048576 + 2147483647 + + + 2 + + + + Find Previous + G + 1179648 + 2147483647 + + + 3 + + + + Use Selection for Find + e + 1048576 + 2147483647 + + + 7 + + + + Jump to Selection + j + 1048576 + 2147483647 + + + + + + + + + Spelling and Grammar + + 1048576 + 2147483647 + + + submenuAction: + + Spelling and Grammar + + YES + + + U2hvdyBTcGVsbGluZ+KApg + : + 1048576 + 2147483647 + + + + + + Check Spelling + ; + 1048576 + 2147483647 + + + + + + Check Spelling While Typing + + 1048576 + 2147483647 + + + + + + Check Grammar With Spelling + + 1048576 + 2147483647 + + + + + + + + + Substitutions + + 1048576 + 2147483647 + + + submenuAction: + + Substitutions + + YES + + + Smart Copy/Paste + f + 1048576 + 2147483647 + + + 1 + + + + Smart Quotes + g + 1048576 + 2147483647 + + + 2 + + + + Smart Links + G + 1179648 + 2147483647 + + + 3 + + + + + + + Speech + + 1048576 + 2147483647 + + + submenuAction: + + Speech + + YES + + + Start Speaking + + 1048576 + 2147483647 + + + + + + Stop Speaking + + 1048576 + 2147483647 + + + + + + + + + + + + Format + + 1048576 + 2147483647 + + + submenuAction: + + Format + + YES + + + Show Fonts + t + 1048576 + 2147483647 + + + + + + Show Colors + C + 1179648 + 2147483647 + + + + + + + + + View + + 1048576 + 2147483647 + + + submenuAction: + + View + + YES + + + Show Toolbar + t + 1572864 + 2147483647 + + + + + + Q3VzdG9taXplIFRvb2xiYXLigKY + + 1048576 + 2147483647 + + + + + + + + + Window + + 1048576 + 2147483647 + + + submenuAction: + + Window + + YES + + + Minimize + m + 1048576 + 2147483647 + + + + + + Zoom + + 1048576 + 2147483647 + + + + + + YES + YES + + + 1048576 + 2147483647 + + + + + + Bring All to Front + + 1048576 + 2147483647 + + + + + _NSWindowsMenu + + + + + Help + + 1048576 + 2147483647 + + + submenuAction: + + Help + + YES + + + DispatchLife Help + ? + 1048576 + 2147483647 + + + + + + + + _NSMainMenu + + + 15 + 2 + {{384, 348}, {512, 384}} + 1954021376 + DispatchLife + NSWindow + + {3.40282e+38, 3.40282e+38} + + + 256 + + YES + + + 4415 + {512, 384} + + + DispatchLifeGLView + + + {512, 384} + + + + {{0, 0}, {1280, 1002}} + {3.40282e+38, 3.40282e+38} + + + + + YES + + + performMiniaturize: + + + + 37 + + + + arrangeInFront: + + + + 39 + + + + print: + + + + 86 + + + + runPageLayout: + + + + 87 + + + + clearRecentDocuments: + + + + 127 + + + + orderFrontStandardAboutPanel: + + + + 142 + + + + performClose: + + + + 193 + + + + toggleContinuousSpellChecking: + + + + 222 + + + + undo: + + + + 223 + + + + copy: + + + + 224 + + + + checkSpelling: + + + + 225 + + + + paste: + + + + 226 + + + + stopSpeaking: + + + + 227 + + + + cut: + + + + 228 + + + + showGuessPanel: + + + + 230 + + + + redo: + + + + 231 + + + + selectAll: + + + + 232 + + + + startSpeaking: + + + + 233 + + + + delete: + + + + 235 + + + + performZoom: + + + + 240 + + + + performFindPanelAction: + + + + 241 + + + + centerSelectionInVisibleArea: + + + + 245 + + + + toggleGrammarChecking: + + + + 347 + + + + toggleSmartInsertDelete: + + + + 355 + + + + toggleAutomaticQuoteSubstitution: + + + + 356 + + + + toggleAutomaticLinkDetection: + + + + 357 + + + + showHelp: + + + + 360 + + + + orderFrontColorPanel: + + + + 361 + + + + saveDocument: + + + + 362 + + + + saveDocumentAs: + + + + 363 + + + + revertDocumentToSaved: + + + + 364 + + + + runToolbarCustomizationPalette: + + + + 365 + + + + toggleToolbarShown: + + + + 366 + + + + hide: + + + + 367 + + + + hideOtherApplications: + + + + 368 + + + + terminate: + + + + 369 + + + + unhideAllApplications: + + + + 370 + + + + newDocument: + + + + 373 + + + + openDocument: + + + + 374 + + + + + YES + + 0 + + YES + + + + + + -2 + + + RmlsZSdzIE93bmVyA + + + -1 + + + First Responder + + + -3 + + + Application + + + 29 + + + YES + + + + + + + + + + MainMenu + + + 19 + + + YES + + + + + + 56 + + + YES + + + + + + 103 + + + YES + + + + 1 + + + 217 + + + YES + + + + + + 83 + + + YES + + + + + + 81 + + + YES + + + + + + + + + + + + + + + + 75 + + + 3 + + + 80 + + + 8 + + + 78 + + + 6 + + + 72 + + + + + 82 + + + 9 + + + 124 + + + YES + + + + + + 77 + + + 5 + + + 73 + + + 1 + + + 79 + + + 7 + + + 112 + + + 10 + + + 74 + + + 2 + + + 125 + + + YES + + + + + + 126 + + + + + 205 + + + YES + + + + + + + + + + + + + + + + + + 202 + + + + + 198 + + + + + 207 + + + + + 214 + + + + + 199 + + + + + 203 + + + + + 197 + + + + + 206 + + + + + 215 + + + + + 218 + + + YES + + + + + + 216 + + + YES + + + + + + 200 + + + YES + + + + + + + + + 219 + + + + + 201 + + + + + 204 + + + + + 220 + + + YES + + + + + + + + + + 213 + + + + + 210 + + + + + 221 + + + + + 208 + + + + + 209 + + + + + 106 + + + YES + + + + 2 + + + 111 + + + + + 57 + + + YES + + + + + + + + + + + + + + + + 58 + + + + + 134 + + + + + 150 + + + + + 136 + + + 1111 + + + 144 + + + + + 129 + + + 121 + + + 143 + + + + + 236 + + + + + 131 + + + YES + + + + + + 149 + + + + + 145 + + + + + 130 + + + + + 24 + + + YES + + + + + + + + + 92 + + + + + 5 + + + + + 239 + + + + + 23 + + + + + 295 + + + YES + + + + + + 296 + + + YES + + + + + + + 297 + + + + + 298 + + + + + 299 + + + YES + + + + + + 300 + + + YES + + + + + + + 344 + + + + + 345 + + + + + 211 + + + YES + + + + + + 212 + + + YES + + + + + + + 195 + + + + + 196 + + + + + 346 + + + + + 348 + + + YES + + + + + + 349 + + + YES + + + + + + + + 350 + + + + + 351 + + + + + 354 + + + + + 371 + + + YES + + + + + + 372 + + + YES + + + + + + 377 + + + + + + + YES + + YES + 103.IBPluginDependency + 103.ImportedFromIB2 + 106.IBEditorWindowLastContentRect + 106.IBPluginDependency + 106.ImportedFromIB2 + 106.editorWindowContentRectSynchronizationRect + 111.IBPluginDependency + 111.ImportedFromIB2 + 112.IBPluginDependency + 112.ImportedFromIB2 + 124.IBPluginDependency + 124.ImportedFromIB2 + 125.IBPluginDependency + 125.ImportedFromIB2 + 125.editorWindowContentRectSynchronizationRect + 126.IBPluginDependency + 126.ImportedFromIB2 + 129.IBPluginDependency + 129.ImportedFromIB2 + 130.IBPluginDependency + 130.ImportedFromIB2 + 130.editorWindowContentRectSynchronizationRect + 131.IBPluginDependency + 131.ImportedFromIB2 + 134.IBPluginDependency + 134.ImportedFromIB2 + 136.IBPluginDependency + 136.ImportedFromIB2 + 143.IBPluginDependency + 143.ImportedFromIB2 + 144.IBPluginDependency + 144.ImportedFromIB2 + 145.IBPluginDependency + 145.ImportedFromIB2 + 149.IBPluginDependency + 149.ImportedFromIB2 + 150.IBPluginDependency + 150.ImportedFromIB2 + 19.IBPluginDependency + 19.ImportedFromIB2 + 195.IBPluginDependency + 195.ImportedFromIB2 + 196.IBPluginDependency + 196.ImportedFromIB2 + 197.IBPluginDependency + 197.ImportedFromIB2 + 198.IBPluginDependency + 198.ImportedFromIB2 + 199.IBPluginDependency + 199.ImportedFromIB2 + 200.IBPluginDependency + 200.ImportedFromIB2 + 200.editorWindowContentRectSynchronizationRect + 201.IBPluginDependency + 201.ImportedFromIB2 + 202.IBPluginDependency + 202.ImportedFromIB2 + 203.IBPluginDependency + 203.ImportedFromIB2 + 204.IBPluginDependency + 204.ImportedFromIB2 + 205.IBEditorWindowLastContentRect + 205.IBPluginDependency + 205.ImportedFromIB2 + 205.editorWindowContentRectSynchronizationRect + 206.IBPluginDependency + 206.ImportedFromIB2 + 207.IBPluginDependency + 207.ImportedFromIB2 + 208.IBPluginDependency + 208.ImportedFromIB2 + 209.IBPluginDependency + 209.ImportedFromIB2 + 210.IBPluginDependency + 210.ImportedFromIB2 + 211.IBPluginDependency + 211.ImportedFromIB2 + 212.IBPluginDependency + 212.ImportedFromIB2 + 212.editorWindowContentRectSynchronizationRect + 213.IBPluginDependency + 213.ImportedFromIB2 + 214.IBPluginDependency + 214.ImportedFromIB2 + 215.IBPluginDependency + 215.ImportedFromIB2 + 216.IBPluginDependency + 216.ImportedFromIB2 + 217.IBPluginDependency + 217.ImportedFromIB2 + 218.IBPluginDependency + 218.ImportedFromIB2 + 219.IBPluginDependency + 219.ImportedFromIB2 + 220.IBPluginDependency + 220.ImportedFromIB2 + 220.editorWindowContentRectSynchronizationRect + 221.IBPluginDependency + 221.ImportedFromIB2 + 23.IBPluginDependency + 23.ImportedFromIB2 + 236.IBPluginDependency + 236.ImportedFromIB2 + 239.IBPluginDependency + 239.ImportedFromIB2 + 24.IBEditorWindowLastContentRect + 24.IBPluginDependency + 24.ImportedFromIB2 + 24.editorWindowContentRectSynchronizationRect + 29.IBEditorWindowLastContentRect + 29.IBPluginDependency + 29.ImportedFromIB2 + 29.WindowOrigin + 29.editorWindowContentRectSynchronizationRect + 295.IBPluginDependency + 296.IBEditorWindowLastContentRect + 296.IBPluginDependency + 296.editorWindowContentRectSynchronizationRect + 297.IBPluginDependency + 298.IBPluginDependency + 299.IBPluginDependency + 300.IBEditorWindowLastContentRect + 300.IBPluginDependency + 300.editorWindowContentRectSynchronizationRect + 344.IBPluginDependency + 345.IBPluginDependency + 346.IBPluginDependency + 346.ImportedFromIB2 + 348.IBPluginDependency + 348.ImportedFromIB2 + 349.IBPluginDependency + 349.ImportedFromIB2 + 349.editorWindowContentRectSynchronizationRect + 350.IBPluginDependency + 350.ImportedFromIB2 + 351.IBPluginDependency + 351.ImportedFromIB2 + 354.IBPluginDependency + 354.ImportedFromIB2 + 371.IBEditorWindowLastContentRect + 371.IBPluginDependency + 371.IBWindowTemplateEditedContentRect + 371.NSWindowTemplate.visibleAtLaunch + 371.editorWindowContentRectSynchronizationRect + 371.windowTemplate.maxSize + 372.IBPluginDependency + 377.IBPluginDependency + 377.IBViewIntegration.shadowBlurRadius + 377.IBViewIntegration.shadowColor + 377.IBViewIntegration.shadowOffsetHeight + 377.IBViewIntegration.shadowOffsetWidth + 5.IBPluginDependency + 5.ImportedFromIB2 + 56.IBPluginDependency + 56.ImportedFromIB2 + 57.IBEditorWindowLastContentRect + 57.IBPluginDependency + 57.ImportedFromIB2 + 57.editorWindowContentRectSynchronizationRect + 58.IBPluginDependency + 58.ImportedFromIB2 + 72.IBPluginDependency + 72.ImportedFromIB2 + 73.IBPluginDependency + 73.ImportedFromIB2 + 74.IBPluginDependency + 74.ImportedFromIB2 + 75.IBPluginDependency + 75.ImportedFromIB2 + 77.IBPluginDependency + 77.ImportedFromIB2 + 78.IBPluginDependency + 78.ImportedFromIB2 + 79.IBPluginDependency + 79.ImportedFromIB2 + 80.IBPluginDependency + 80.ImportedFromIB2 + 81.IBEditorWindowLastContentRect + 81.IBPluginDependency + 81.ImportedFromIB2 + 81.editorWindowContentRectSynchronizationRect + 82.IBPluginDependency + 82.ImportedFromIB2 + 83.IBPluginDependency + 83.ImportedFromIB2 + 92.IBPluginDependency + 92.ImportedFromIB2 + + + YES + com.apple.InterfaceBuilder.CocoaPlugin + + {{394, 713}, {191, 23}} + com.apple.InterfaceBuilder.CocoaPlugin + + {{596, 852}, {216, 23}} + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + {{522, 812}, {146, 23}} + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + {{436, 809}, {64, 6}} + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + {{608, 612}, {275, 83}} + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + {{163, 493}, {240, 243}} + com.apple.InterfaceBuilder.CocoaPlugin + + {{187, 434}, {243, 243}} + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + {{608, 612}, {167, 43}} + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + {{608, 612}, {241, 103}} + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + {{323, 663}, {194, 73}} + com.apple.InterfaceBuilder.CocoaPlugin + + {{525, 802}, {197, 73}} + {{0, 736}, {455, 20}} + com.apple.InterfaceBuilder.CocoaPlugin + + {74, 862} + {{6, 978}, {478, 20}} + com.apple.InterfaceBuilder.CocoaPlugin + {{273, 693}, {231, 43}} + com.apple.InterfaceBuilder.CocoaPlugin + {{475, 832}, {234, 43}} + com.apple.InterfaceBuilder.CocoaPlugin + com.apple.InterfaceBuilder.CocoaPlugin + com.apple.InterfaceBuilder.CocoaPlugin + {{207, 693}, {173, 43}} + com.apple.InterfaceBuilder.CocoaPlugin + {{231, 634}, {176, 43}} + com.apple.InterfaceBuilder.CocoaPlugin + com.apple.InterfaceBuilder.CocoaPlugin + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + {{608, 612}, {215, 63}} + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + {{314, 134}, {512, 384}} + com.apple.InterfaceBuilder.CocoaPlugin + {{314, 134}, {512, 384}} + + {{33, 99}, {480, 360}} + {3.40282e+38, 3.40282e+38} + com.apple.InterfaceBuilder.CocoaPlugin + com.apple.InterfaceBuilder.CocoaPlugin + + + 3 + MAA + + + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + {{12, 553}, {220, 183}} + com.apple.InterfaceBuilder.CocoaPlugin + + {{23, 794}, {245, 183}} + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + {{121, 533}, {196, 203}} + com.apple.InterfaceBuilder.CocoaPlugin + + {{145, 474}, {199, 203}} + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + com.apple.InterfaceBuilder.CocoaPlugin + + + + + YES + + YES + + + YES + + + + + YES + + YES + + + YES + + + + 377 + + + + YES + + DispatchLifeGLView + NSOpenGLView + + IBProjectSource + DispatchLifeGLView.h + + + + + YES + + NSApplication + NSResponder + + IBFrameworkSource + AppKit.framework/Headers/NSApplication.h + + + + NSApplication + + IBFrameworkSource + AppKit.framework/Headers/NSApplicationScripting.h + + + + NSApplication + + IBFrameworkSource + AppKit.framework/Headers/NSColorPanel.h + + + + NSApplication + + IBFrameworkSource + AppKit.framework/Headers/NSHelpManager.h + + + + NSApplication + + IBFrameworkSource + AppKit.framework/Headers/NSPageLayout.h + + + + NSApplication + + IBFrameworkSource + AppKit.framework/Headers/NSUserInterfaceItemSearching.h + + + + NSBrowser + NSControl + + IBFrameworkSource + AppKit.framework/Headers/NSBrowser.h + + + + NSControl + NSView + + IBFrameworkSource + AppKit.framework/Headers/NSControl.h + + + + NSDocument + NSObject + + YES + + YES + printDocument: + revertDocumentToSaved: + runPageLayout: + saveDocument: + saveDocumentAs: + saveDocumentTo: + + + YES + id + id + id + id + id + id + + + + IBFrameworkSource + AppKit.framework/Headers/NSDocument.h + + + + NSDocument + + IBFrameworkSource + AppKit.framework/Headers/NSDocumentScripting.h + + + + NSDocumentController + NSObject + + YES + + YES + clearRecentDocuments: + newDocument: + openDocument: + saveAllDocuments: + + + YES + id + id + id + id + + + + IBFrameworkSource + AppKit.framework/Headers/NSDocumentController.h + + + + NSFormatter + NSObject + + IBFrameworkSource + Foundation.framework/Headers/NSFormatter.h + + + + NSMatrix + NSControl + + IBFrameworkSource + AppKit.framework/Headers/NSMatrix.h + + + + NSMenu + NSObject + + IBFrameworkSource + AppKit.framework/Headers/NSMenu.h + + + + NSMenuItem + NSObject + + IBFrameworkSource + AppKit.framework/Headers/NSMenuItem.h + + + + NSMovieView + NSView + + IBFrameworkSource + AppKit.framework/Headers/NSMovieView.h + + + + NSObject + + IBFrameworkSource + AppKit.framework/Headers/NSAccessibility.h + + + + NSObject + + + + NSObject + + + + NSObject + + + + NSObject + + + + NSObject + + IBFrameworkSource + AppKit.framework/Headers/NSDictionaryController.h + + + + NSObject + + IBFrameworkSource + AppKit.framework/Headers/NSDragging.h + + + + NSObject + + IBFrameworkSource + AppKit.framework/Headers/NSFontManager.h + + + + NSObject + + IBFrameworkSource + AppKit.framework/Headers/NSFontPanel.h + + + + NSObject + + IBFrameworkSource + AppKit.framework/Headers/NSKeyValueBinding.h + + + + NSObject + + + + NSObject + + IBFrameworkSource + AppKit.framework/Headers/NSNibLoading.h + + + + NSObject + + IBFrameworkSource + AppKit.framework/Headers/NSOutlineView.h + + + + NSObject + + IBFrameworkSource + AppKit.framework/Headers/NSPasteboard.h + + + + NSObject + + IBFrameworkSource + AppKit.framework/Headers/NSSavePanel.h + + + + NSObject + + IBFrameworkSource + AppKit.framework/Headers/NSTableView.h + + + + NSObject + + IBFrameworkSource + AppKit.framework/Headers/NSToolbarItem.h + + + + NSObject + + IBFrameworkSource + AppKit.framework/Headers/NSView.h + + + + NSObject + + IBFrameworkSource + Foundation.framework/Headers/NSArchiver.h + + + + NSObject + + IBFrameworkSource + Foundation.framework/Headers/NSClassDescription.h + + + + NSObject + + IBFrameworkSource + Foundation.framework/Headers/NSError.h + + + + NSObject + + IBFrameworkSource + Foundation.framework/Headers/NSFileManager.h + + + + NSObject + + IBFrameworkSource + Foundation.framework/Headers/NSKeyValueCoding.h + + + + NSObject + + IBFrameworkSource + Foundation.framework/Headers/NSKeyValueObserving.h + + + + NSObject + + IBFrameworkSource + Foundation.framework/Headers/NSKeyedArchiver.h + + + + NSObject + + IBFrameworkSource + Foundation.framework/Headers/NSObject.h + + + + NSObject + + IBFrameworkSource + Foundation.framework/Headers/NSObjectScripting.h + + + + NSObject + + IBFrameworkSource + Foundation.framework/Headers/NSPortCoder.h + + + + NSObject + + IBFrameworkSource + Foundation.framework/Headers/NSRunLoop.h + + + + NSObject + + IBFrameworkSource + Foundation.framework/Headers/NSScriptClassDescription.h + + + + NSObject + + IBFrameworkSource + Foundation.framework/Headers/NSScriptKeyValueCoding.h + + + + NSObject + + IBFrameworkSource + Foundation.framework/Headers/NSScriptObjectSpecifiers.h + + + + NSObject + + IBFrameworkSource + Foundation.framework/Headers/NSScriptWhoseTests.h + + + + NSObject + + IBFrameworkSource + Foundation.framework/Headers/NSThread.h + + + + NSObject + + IBFrameworkSource + Foundation.framework/Headers/NSURL.h + + + + NSObject + + IBFrameworkSource + Foundation.framework/Headers/NSURLConnection.h + + + + NSObject + + IBFrameworkSource + Foundation.framework/Headers/NSURLDownload.h + + + + NSOpenGLView + NSView + + IBFrameworkSource + AppKit.framework/Headers/NSOpenGLView.h + + + + NSResponder + + IBFrameworkSource + AppKit.framework/Headers/NSInterfaceStyle.h + + + + NSResponder + NSObject + + IBFrameworkSource + AppKit.framework/Headers/NSResponder.h + + + + NSTableView + NSControl + + + + NSText + NSView + + IBFrameworkSource + AppKit.framework/Headers/NSText.h + + + + NSView + + IBFrameworkSource + AppKit.framework/Headers/NSClipView.h + + + + NSView + + + + NSView + + IBFrameworkSource + AppKit.framework/Headers/NSRulerView.h + + + + NSView + NSResponder + + + + NSWindow + + IBFrameworkSource + AppKit.framework/Headers/NSDrawer.h + + + + NSWindow + NSResponder + + IBFrameworkSource + AppKit.framework/Headers/NSWindow.h + + + + NSWindow + + IBFrameworkSource + AppKit.framework/Headers/NSWindowScripting.h + + + + + 0 + ../DispatchLifeGL.xcodeproj + 3 + + diff --git a/examples/DispatchLife/English.lproj/MainMenu.nib/keyedobjects.nib b/examples/DispatchLife/English.lproj/MainMenu.nib/keyedobjects.nib new file mode 100644 index 0000000000000000000000000000000000000000..05cdecfbf52574b74d6f15bb75a4b6cfa9159b00 GIT binary patch literal 19575 zcmb7r2YgfI*Z=d}o7{Vwj@+#7CTY`|Cf!Yw6qLPJX(?sajzC+w>;THVSN28(1OZvf z5(E(tkRKvj5Rkn$$QBh*hQR;aqzx(U@BP1T{b24q-*e7$p0l3k=9U*1R#wHuokAE9 zh(sbJW@?yW%|1Hu@FaN57$e&@=QLy}$@F*b{qUDOTV>ti{1N1nY1Fj>GXd0Vm=V+zdCz zEpSWR7I(+#I0N^Kqh!}-{T3vmUm#8tQ&*Wi!v7(5n_!;|nVJQsh8=i&Ky5&jad zz$@`rcoW`+x8v{e5BMPd2_M3T@iBZ3pT`&QMSK%ZL@kH^gRQ3-KMXmDoY-BlZ&qh=at> z#A)ITah5noTq150zY=$d2gF~*Q{r#p1xb=L$&hZOg!CePNMF*A3?joxJsC|JNFy0f zTFF$h3E7NnPj)7|kX^}ivM>`!Ksd1MJ$N|ur3k!DrzHYD{3d|D(WrjBkC*46_toeMP;HYQMIT>G)y#BG($8~ z^s#8MXpQJw(I(Mm(Js+p(a)kIq7$OiqBElFqKBeKqGzJ#q8DODtPm^3DzTr~UmPZm z6Q_!sh?|OAh&zfqiF=B(#J$A*#f9P`@gQ-9c%*ogc#?Rsc#3$sc!v06@m;DJHIJH4 zeMT*y7E+6-&#A@K5^5>6j9N~8L48TBpjJ|=sMXXO>MLq3wT}9lT2F1DzM(c!-%^{X z&D0j^J8CPnjoMCqPwk-oL+zw~pmtF|QoE@=)Lv>IwVygb9i)Du4pE1xpQ$6%QR*1= z3w4}2L7k*dQKzXh)LH5rb)LFFU8F8im#Hh%Rq7gbow`BYq;65Ssb8r()LrTxb)R}b zJ)|B{zf(`>3i=QFPx=Y{7yXp}oBoG>Mn9)t&@btK=~wh?`VE5^%n%I8h!`A#Iwq6}W5SsT zCX$I_^h`8kV2n%*6U)Rg@k|1f$RshzjEPBM%#4MxGO0`xrYZ9t^FGszY0k7@S~9Je z)=V3wEz^!^&valqGM$*tOc$mr(~U`Ex-;oa2GfJdWO_1LOfRN4(}(HH^ke!n*~|w_ z4wK8|G5L&*8Ndu=3YbEsh#ABbGbKzpQ^^eO5n5VZTzr+4BL=x4335js$P;-XDPoZf z$&msnkqUVuALNVtkUt7QfhY*6kp^i|FbY9B6pF%7IEp}#C<^IObe}HiU2;lnl^tz0 zmHoP;w=B-7taO~goi-J4Ye-qeplsH!OM3IXJX>XDVQyh@VO34n!u-n2oZ@O*W$!NO z!ihj2o0YIJ;n zhE@UfY?keko>>Us#kQV>rTJw;1ZM4wJ8^1ph4=e9SaRIT49ydSvh!9ep5sPCp=y~c ztElAhdpw@QiFv|@$Di`JDTlZKp77=f86M72Jbs`1mB;gV!k5Q$c|vspG;TyOC>FG4 z2N8JVL>vihHZO-Dk(VRLd;%q)M3jV*vm8u;_8bSL@c1&6j7J4)Cy~rT+YrF* znbD>=XJ92LQ@CKqVWpfe=f+7mh9x*3&Ykm+R}Dpo^W#u9>pOgSqB+T+O-wS6G-!t> z$0xwCIcem`zO4!?%X6yo3OW@Iuz@liyk)cg!xLkY;u6dWw&+ANJp1-!IVcz9p?qXRIiTwF>fFJ=l*6C|`|NJ3EH5j~w^abU>DA@73gI?j4X%^H zK4O9%oeXeZoF}J}VZ?cJO3u?Uk_D(}DJtZooZ>71*RhZdc2x|+l!nSsIj~Z}v786z zA(Oq&$$^Nh_giVRS-GpYZ956dbC}#{pTwRp|Q?GeNMNUmmuxR1e1=#ThhlVb9AY`+u;ltw$+Bjh_;uFkpn37^1 zIr4whyMxGBG;SFhhfMYvUu$%>lJer5Dx2eh325SSG!ad*%Tdc$x~-}Q=-yW4P~;R; zw4RMXQ_(au9nC;9(Z^^O`UK5JbI@G$DVm4oqtDO+v=A*qpQFWS30jJlq2=fc^d(w> zR-#pCHCltdLTk}F^fg+KHlT0NMlO)kbFo|!XXW1KT5;{UE?hd7#r5ZMxq)0U_aRr! z)o`P@kGP54G;S6*k6XwsS5RcFM`M6@z`GmsMBf*|K*o=TciqS!rQjr?NczR0c2D`orqN!JW(Ug{cex_Nfa~t7~cd67a=f$aT+v zJ#;P{YRk7@=hX7iZ{EC-Lp(v8h70DjTo@N|3V1jS_&=i~=qNgdegR~`!-3fd479JZ zm4M4k;IWB&&f_F7#1<7W`L)f9i?vl{+BOw1+q<1ar+|bJoknNSS@3gUaCTwsu2RQ^ zav{K`oe8@e5eT;|u;mRZ$*CCR82gLp(lT_Z);JtCTt}pi_!WUT7cNkL+?jBacge@B0yKVcaE5{9v!&JCb*qoDJw&V{9gCEzvEO0ro6 zxRn8A6(z#8oT};yxNJRzUe+3s4!r`nH(&>?Y?W1orS@3E5nLj`$DOrXk<*4S#snto zEQpJ@4>y<4V5G4dmY`$UgU9YX4iJXb6Na^UZdrAe*44{ovq5DQ;QZ`b10|Q{=Tzj| zAM2J=YAd$ZDg?7=E0%K>;3&C)La-96us8NW`>~&0Ato0iOaa!+f$X{)ll>JkVoPUKKP4(HwjR!b`wr7rP5{tF06+&|j%SpW6=xKdb9HKz!(46yU~L63 zPTQ{=H~=lU%j)u&n3%L~{VS?Vg-e}sYG8Doa%BQ=7S|boJ2WO&AKVxB1K8{aa&-i# zP6E^bNH8i;Au2^xFg)d%<=_ zRzcE=9t>TZLU zM3vyC1uKM9+q=SkrB23-GW&HW)+)eS!wmqeyoRE##q03bc)b%Z-$hhg9jG&!;LUgo zh`W`?!8{HXB-ZtAS!~Ox&Z@a>LTRnf4R?wpWt~tg18BmUL<$zMs7|L;c0-pqw(+yBc z0i~=KC9S%!zSm6quy>WMqC{}jZ=o&$)D^A@peh=Jx{hz)n+*i=1q-dL6R3|}oUT;` z&~k!-6a=xKxRfAo^#>seKjMZ0+~CIGe#d`cAw=0TjoLWj(BF`{fy_z=LllBMU{#k2 zXmH#p)(gP;mm3LK!y00}#%~BjU?7p?hj!NW=I|7{*}UM-I)_75OCDo z1i%^70Ef^L!9)n5L;Hy^yP=GA5pf*gjCXMXL==b^&E4aka}P3FmX+6}<@LyeA16{A zASG~90clcWNXdkWNC6~E1Ek4-G(|w_oo*|JbVV4tw4C0R_7gaEA~XYp7Tm{xFugH^ z)r@pKw4`5JRxVSJ9kQy58wm11pq$3A-I+(B#MYZ zPAhO7ozDQ=?kY1|ftsppJ@f59PQsNyxSCr6gcmhNcqmar3xMd<_8W z1ppSju00(jHi9v1;vR!B{9%JOhag@)h&KS*OSS1*8|WC>1#E6_JURK<2K;=_Z3cce zHWYIev6J|L*ah6|wu|Y?#kau4CV>kGARVep%I(3^K8c{mlU4*Nu@JFr6^>a3*V>oX zHm{mj*m9isKLP$>Zad(A*AQPz93_qszX1M;dTJIDpsj$v&93I|Hc)f#9u@H8Bzqpn zUgUNH*&PkZW)N41tHd>@sk_dz{{Y$AtRSHNw9@u0C@i*VGiu6Vj%Bmzsv$mD(N@xv`x$sR)CdnGOUg*OQ@%PLgpBfVoqT1` z$ZZKLJi~6vl>XzYk6?gy@kWD+mX>A3Wfhfa zEg>;`Tk_d2)^i_Z3@{PLodG6JHDH3sAQQsZO_80 z0;iLQY{M}swRhh&JhCY;@IH4D7&zC6qamA+Lu;BHth%%PD6pnM|S6~0Y8@;=cfmmNkZ%+ z?XeGasmIS1;OD9^GJPG$Y1L&_w#qb^hQSWp=GD+H25ZQ(-+}fdv;eBAg?6)(fe(Oz zT<$h7aHDYsY~%oPATUrU7zSDFWI!;Do4~*=fdQ86<6xmvVdohMgB;r%dULb6quig|T^>j9(6_k3#c^vnHIMzdv0OekhR4xdDvuQ$&0}va zo}13&K<*RnHIJ>_B3CdAP+)ttZf>SKY_LnlRrZa8dW#z$dk||084>BNi{Sb z9dVJn>oN<*LjGAB4Y>Qxf{i>)o`KnLj>im-Ey9d&%V?cnSk=GML8~^vItKm{Dq2U( zAr6z9h!%vJs3r!$P;P_4Tn@v!9n|q1h&G)Y!z}{QCUK8IZ7&235}cw|4Cs1-8015t z_enk?wB%#*cc@1WfT7wOXXIbdyKG+xC9=?+&WHY2`^t1%c}_(RbmBPIEKUo7E!a&k zVz>WB`vv*3KJEVxf%-Zov?8&Hf*fXJosLDG^$4tk6s1m~zh?+3Nn=VA$xzXk#FsEb zR*_YZoU8>K{0r=HBd2I&;u5Pw{-OX;po`~lO;euKd5&HJoK{$?Xq-r>C`=UYGW+X@ z2($lb-R$pYC(;d?1BDRr8z&SaiWSA7{h|cHjfj%#rEdQKp=WkCLW2YEQ7Lqwgdzm| zlonc>T7i6;&E9quSwz-lA}cc0HnEL9dS3M@qL*S@o zg2$+zt9Pz!6uciQS`IF(JNWB!FxPMVci?=H4ZntZxK8@wdcfcw$hXL%OpxA|SI{VA zIVxHPe)L~(O~W`vhR|8E&tTDDSdjvk*o#y2YTh}guvF;EIPMHbMJvfQIt_A;F0FHUE;K)my?A7-(7>kyQmc8Q(T>D2Z z&ZI$p123-?trL9>lVF2A$#Qv-$1DepE}1Y1itVjY8yW!lUIqLsg%w5{5?YvsMjaH; z{Z6#?f5%j#(*+3Z7VRM?ikz!QqC>7-9f)H*R=(A`Dl1)B$%G~UcQoz{2BLkUW9XRZ zIK0MVUm+OyIE>>YDq6xRpuEf$7Udj71k?F%f}Inc|33-VV7P9IZvF2F^aYND{*=vy zo%$0RZ1b_`cj&4IIc@WwdbVj2Rvw%)S`D7O*!GTw99nn@Hi?H~Ib;_ba5y&M0=$si zjoUy^J%fX=4Z%B%OND6PmCJ@84l%uh(;(~fgq326*d4FMo9lQI%bmS_p`OTx2I^a$ zLi%)tWe}~SZQ2-XVsBwyuYr^V2FV*#@=k}bsVu!4GEkit6G`goj}=~!ZK z6s{P;AbLdHCx1lSAcfrp$tL8gb-48{1ML=9iL2ikSl5!?#WMPX#W`RrT`aA^IK$=} zJQ;uDlw)E&IeH6ABu=H7-w~h>K)elD_N_VMX`G11sgNVOG^p*_hIl3#xlGvTV4t_* zS&n+81}lkGumcda4xeL-!HTVe(_r{ZJ;H(C65iQa|o~?xh zcP2aqp%JLOgkDDm)Hhm`iZOl-HA)&#se$J*$SF|$2(=9s=)<29p4AHP2q$p#8s1b2 z6oF=QK!am)yYLiNLRMdcCv$h9f>9O$NYskDIGL1 z?dVb!g8rc{guG!H?6A?fR)$bNX4Te)P!~c+ZJDz!-1kmh2s(zkP*9(vFcci}+tN@N ziFb;_u7blq2D!bXIvfTCp->-w=co@iLa|rjC=0n4!f=@&S2`_Tq=*sr)<;!Y9LiW6;egiAgY)up-QPT zs+{_es-P;VDyo_qObwxiQZ>{tYB)858cB_!MpGQcQy)=dsIk;IYCJW8nn+EeCR0$AfrW%;ORsm-4ub$K^c!kjE7~uHZJRZhl(EJD7~p^@JnT7@=f6>4Dz=l{^&M^V{qh~Qqe z!sZ-pWaIT#R9XVG+U$=-HdY=15g!*KE?vN=K90A9U=@{B=WAhyh|A-S*c;WyS&|zQ z-=Sx3rP;elfTVVLwc$RKs4`aeU)11i`U^s{E>wipge!bfEz|bv^&hp%QU8+A;sb)- zE(G6(`l$4HV|<;skuC`ryrFb`qc;DA%PL3E(?+^3>BDuY{qe}UOc|~y99J5B&M};@ zx>h#=Lfs7SxXLZ$q;SdKg^&9Ee7Nc7a|lXE(vQgu2z>^esJ}Ahzo?b=d>} zcU-4c!(DmsK%fi$yUlI5?P@6XT0n5mzE7jp!~7Tj;p;-ZPS1jx--N@sR9Xsj=UuMa=Wp#g3cTQ|dAri;F<5J|j#=wE zpr&3_O8*?>$yPU!_O*7R>;Uwc~D6VeH#Y-oo0S~UaaaD_b4-&A`aIBmL z;JR2CWl#2~-^l$-oGHj3t|xy9`N#-+Li`d6<%zIi_e-=9(!V$;oR<@GpvN>GnxPZP zJ!m;3b4p=zE-VhbPYgicklSGc?;~jG4<#4DR^8obDkN;4Lwd66(oRaG?N^P z3d!A&rZ0m6JQqKLE_8D=4-&*_P}wX&^U1xikT4t)$IsA0G7vTePeU7s?_ulkEa+Kl zp$Vuap2K$FH&B3Q5#3QLT7`F_rO?U#7Pbt3j6Q)yi|Md!_)~HiS_joqcanodb24l! zUW?X4D|akf0j2*HXaHbHGdJNiXbx_JW&>5W1T@n)OQBasI<1&Q2@d)JDfslY7f)sofY)B4* zbagaz*R7CdN`-tAlK*&EJGcRh43i)^&xQW`YoVr_$U(A`A}mhGq5oG5JDHEdUS>V) zRNe?Xo?AhCek^oAk3rS>0~ZH{K?mrF(y*_&0CqY*64KCxuqC>^upDBA{mzh6MZi|) zUEF0ziiPC0H*AXD0~?>$3U&4h=yp^L_;PE#+-pJ!`dAy0on|Zv2$53Ew zzpY!+yk6-fmUmm~W@oOHx;R(bOm?z+iK$0g!JR#-@iYI8EV0hRrMo4(VohLka z!jmVwctXk(EKkUILe3Kko=}46v6t8&8IEMTvSl(nnEh3TZR~j&?#+&1&#}kYbao7T zn*B(IBiILQJ2s8I!(L$z$Z&u5kPPRuC)sY?ut^u_xH+Y)|$udxX6x!v-0SVt;3?>`1l?`;!b;vQOFZ>@D^T`;fgM z!$oW++g<^i`Gvn%>>n~*EW^<N$qhz?6?JUEC*oo`~87{DUa;DVY+;tqo(k_^#W7aZNbtmwN6ElPvx{MiW|8}H( z3&*ESt#YbzBvOT}+qKm}PHdnLyuCne`3mnByv*CjV4 zwapBom&b0810Fwl9QHWkam?em#|@9$9(O(NdpzxwSJ(E2% zJ+nOfdFFcNdk*j{@GSBy_N?(7;W^5a_nhiE-E*PmYR|7c*Lkk@{KfNx=ULB7o>x4t zdEW56<@u}UL(ktl|Mhb7a`*D|l6u8?C3vNHHSv1StC?2|uU1}dygGVy_UhwR;8o;R z>{aUZiPs{p#a>@{t@irLYn|76uW!7*_1f&U$7`S0DX;5ZH@$9q-H|dR-xxYM6u9j=%A@WeUS#FiLmUowD$TQ_x@*4SYIVT?{pCF$kpCX?opCMl$ z-z5K0zDK@Ken5U#eqa7r{+Il3`7`+o`M>hl3R6SE6yk`DlRLoDy}PTDsC(OQj$tmDOW0$ z-pUlEMfskxrLwiMt+Ktcqq4KIx3aIYzw!g6O*v3mq8z3ip&X^;luMP%m8+ENl zD8E&1R(_}4t=y|Tro5!QqP(WOp(0gcl|&^~$y5rJO68;SQw6Css!&zDDp8fJN>OF0 zdaJTk`Kke`0#%W!SXHVTt{SNtt>RTPRkKt}RbQ(%s5YuLsZOiTsxGOnt8S`ptL~`o zsUE2QRK4(~z1_Usy*<5SyyLu+y{+C&yx;R~=H0@(m3Ig4PTuL>A9&|_=X($Ep5Q&n zd%E{0-gCS^^`7s&zw~cQ*-$LI(zU98vzC(O#e24pv z^d0Rx)_1(`RNwi&3w#&(F81B!yW97G@6WzReSh&i;d{#WjPGB5D!&lFP`_}$NWb=e z9sRob_3-QI*UPVuUq8QWzkI(B{YLq5ejoXb_50FqrQce=Z~VUX+wAw9-!{MR{SNva z_B-PDi{E9xtA0=Y34f74<`A7T5`6v6Q_*?u_{hRu~??1qQqW>KKPyOfnFYw>t zztew@|3Uvl{y+O4_5a2Hg#T6l8~(Ta@AyCQe;VK!;2#hepbpRmG!JMQ&@P~JK-Yk@ zfb@VK0X+k%1EvQo2v`)bIACeO{(yr4M*>a+oC-J-a4z6Nz@>nD0S^Km1^gaJ2C4(2 z1C4>Pf$@Qvfmwn50&@fN0|x{a1QrDr2i61*4_q9$Ht_4f4S^d2&jy|kyb^db@OI#x zzgMW}>elME>h|hR>aOZEb-KETx~ICAy05yw`U7>I+NK_;E>sUv zm#E9s73wPWVD(V-F!c!aX!S?xaq5ZcDe7tJnd)uoJ?fv;N7X0Q=hPR}m(*9(*VH%E zx75F?|53kCzf!-^5E@$Jth?HFq`lH4invX&!6-&^*!nr47<*w4vH4 zZM4>?jn&3$6Sc|O6m1i2M{R~STbrZJ)7rEHwT0S2+L_u}+E29$wV!L3XqRcf(5}#K z(4N+w)n3wG*WT3L*51+H(>~BX4^{_jgTsRL!G_?N;JDy~;G|$vaM$3p;NHR6!2^P4 z1b-YnH+VttqTt2BOM{mOe;K?ocy;iu;A6q(g0BbP489$FC-`3QgAiqicSt~pHY6k@ zG$cGEGDII@2#F188qz7GM@Zk0{vjWP7obz?w7T}Xj=FBT z9=e{o-nzcJ{<;rzxjLJ!LdWYS>z3-4>sIJi>(=Vl>o)2(>9**$>b}?gNB4v7uI|3> zvFGXMChr|Goj}~FNWR{8Ozp&h}{ICIG1!09@gThL}O2a-4n;-Uh*z&M1!&Zi^4qFqpHtg%L^dd*pslo!=8n`2>UnebvO>E!d2nw@Z|86a7%cT@b|-;hqnxG9o{y)LwKj~ zF5%t6Yr=*zd5v?OSM0AZvi%5^?5s?{@712AQPsG@W@exxZW=714m=p17#QcZ_ z5sM<0M68V167fUC-iQMcKSdmlco^|p#FL0;5icWNMZAf`kz}M-WJ;tZ^1aBGk*y=! zMRthn6xk)RTV#44cSRnE zJQH~}@^0k)$cK@SBmane5)~Mwj?zU%M(LvrQ87`mQSniUQAtrPql%+Sqbj3@Mh%M^ z88teJj~Wv-E^1=br%}tJ)<%6D^-a_dQ3s=rM4i#|`Z4-(`U(2U`l6IGT(W zN7K=6(eBZn(b8yHv?5v+?Gx=69S|K9t%(kf)lhp(ilJj9F`hAOj3UN6#xEu?MiUbf6BZL06Ahnf#>XVZq{LWb zn#MGXX&KWdrhQDOn65G1V|v77#q^2kACnW4AG084SIq91YcVfksaR=jXlz_;huA@} zGh>&>u8;jb_Gs+&*cY*{;}miFxE664aYb<>;wHu|h}#->D(+g`^LUSVU3^%4RD4Q& z*Z8#fnei**_rza_f095Y_$7oTv`ENJn2@kEVRypygjb2$#MH#LiJcM$Ck{=VlDIf= zY2uE=ONsXr|4u?lq9nhh@TA0~c1amYg-K(RCL~QtT9ULZX+zSXq%%q9lAa}pBu6J3 zl4Fz2$(H0M$*oPzOf5{UOl?fsjn&9lxxa24Kx**icO`a za#Mw=$~4$C)HKXA!Zg~%o5q;NnI@Pfo2Htkn`WA3nP!{jn&z24Gc7cIZdzhmX8OXk z!nDftm1&)6gK48_lWB`-t7*GwhiRv2muZh_pXq?{5}p#7qE9iT#HPfjB&H;%q@-96VWzvn`)mKC>*cEU_%Ntgx)MthKDSY_x2)Y_)uE*=hOFve$CJa>(+t<*4Pj<)r1b z<(%b$<+A0f<%Z>!9L)tH?@Q-K_3bPpi}_ zvns6KR$r^XHPEWD23vL3aBHMhZ#7tBtZ~)^YqB-PYPB}8zGrQ2ZE0<7ZD;LZ?PTq0 zO|zz3Gp$+HKGuHLY-_I7W-YK5S&OY@)(@?f)@thzYmIfdb(EF2jdd+&ndfR%(de8dM`qcWj^;s%TB~rzyUa8Vld1^qaHZ?XiDYZ>% W$JAcP{znjwUuKl!x9 + + + + CFBundleDevelopmentRegion + English + CFBundleExecutable + ${EXECUTABLE_NAME} + CFBundleIconFile + + CFBundleIdentifier + com.apple.example.DispatchLife + CFBundleInfoDictionaryVersion + 6.0 + CFBundleName + ${PRODUCT_NAME} + CFBundlePackageType + APPL + CFBundleSignature + ???? + CFBundleVersion + 1.1 + NSMainNibFile + MainMenu + NSPrincipalClass + NSApplication + + diff --git a/examples/DispatchLife/ReadMe.txt b/examples/DispatchLife/ReadMe.txt new file mode 100644 index 000000000..2beea13e8 --- /dev/null +++ b/examples/DispatchLife/ReadMe.txt @@ -0,0 +1,37 @@ +### DispatchLife ### + +=========================================================================== +DESCRIPTION: + +The classic game of Life showing use of dispatch queues as lightweight threads (each cell is a queue), and an example of how to avoid overloading a slow queue (OpenGL or curses screen updates) with many requests (cell updates) by using a timer to drive the screen updates and allowing the cells to update as fast as they can. + +=========================================================================== +BUILD REQUIREMENTS: + +Mac OS X version 10.6 Snow Leopard + +=========================================================================== +RUNTIME REQUIREMENTS: + +Mac OS X version 10.6 Snow Leopard + +=========================================================================== +PACKAGING LIST: + +DispatchLife.c - Simulation engine using GCD. +DispatchLifeGLView.h - OpenGL view for visualization. +DispatchLifeGLView.m - OpenGL view for visualization. + +=========================================================================== +CHANGES FROM PREVIOUS VERSIONS: + +Version 1.2 +- Updated to use current GCD source API. +Version 1.1 +- Updated to use current GCD API. +- Added OpenGL view for visualization. +Version 1.0 +- First version (WWDC 2008). + +=========================================================================== +Copyright (C) 2008-2009 Apple Inc. All rights reserved. diff --git a/examples/DispatchLife/main.m b/examples/DispatchLife/main.m new file mode 100644 index 000000000..59eb37af7 --- /dev/null +++ b/examples/DispatchLife/main.m @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2009-2009 Apple Inc. All rights reserved. + * + * @APPLE_DTS_LICENSE_HEADER_START@ + * + * IMPORTANT: This Apple software is supplied to you by Apple Computer, Inc. + * ("Apple") in consideration of your agreement to the following terms, and your + * use, installation, modification or redistribution of this Apple software + * constitutes acceptance of these terms. If you do not agree with these terms, + * please do not use, install, modify or redistribute this Apple software. + * + * In consideration of your agreement to abide by the following terms, and + * subject to these terms, Apple grants you a personal, non-exclusive license, + * under Apple's copyrights in this original Apple software (the "Apple Software"), + * to use, reproduce, modify and redistribute the Apple Software, with or without + * modifications, in source and/or binary forms; provided that if you redistribute + * the Apple Software in its entirety and without modifications, you must retain + * this notice and the following text and disclaimers in all such redistributions + * of the Apple Software. Neither the name, trademarks, service marks or logos of + * Apple Computer, Inc. may be used to endorse or promote products derived from + * the Apple Software without specific prior written permission from Apple. Except + * as expressly stated in this notice, no other rights or licenses, express or + * implied, are granted by Apple herein, including but not limited to any patent + * rights that may be infringed by your derivative works or by other works in + * which the Apple Software may be incorporated. + * + * The Apple Software is provided by Apple on an "AS IS" basis. APPLE MAKES NO + * WARRANTIES, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED + * WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND OPERATION ALONE OR IN + * COMBINATION WITH YOUR PRODUCTS. + * + * IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR + * DISTRIBUTION OF THE APPLE SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF + * CONTRACT, TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF + * APPLE HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * @APPLE_DTS_LICENSE_HEADER_END@ + */ + +#import + +int main(int argc, char *argv[]) +{ + return NSApplicationMain(argc, (const char **) argv); +} diff --git a/examples/DispatchWebServer/DispatchWebServer.c b/examples/DispatchWebServer/DispatchWebServer.c new file mode 100644 index 000000000..d839d3bde --- /dev/null +++ b/examples/DispatchWebServer/DispatchWebServer.c @@ -0,0 +1,956 @@ +/* + * Copyright (c) 2008 Apple Inc. All rights reserved. + * + * @APPLE_DTS_LICENSE_HEADER_START@ + * + * IMPORTANT: This Apple software is supplied to you by Apple Computer, Inc. + * ("Apple") in consideration of your agreement to the following terms, and your + * use, installation, modification or redistribution of this Apple software + * constitutes acceptance of these terms. If you do not agree with these terms, + * please do not use, install, modify or redistribute this Apple software. + * + * In consideration of your agreement to abide by the following terms, and + * subject to these terms, Apple grants you a personal, non-exclusive license, + * under Apple's copyrights in this original Apple software (the "Apple Software"), + * to use, reproduce, modify and redistribute the Apple Software, with or without + * modifications, in source and/or binary forms; provided that if you redistribute + * the Apple Software in its entirety and without modifications, you must retain + * this notice and the following text and disclaimers in all such redistributions + * of the Apple Software. Neither the name, trademarks, service marks or logos of + * Apple Computer, Inc. may be used to endorse or promote products derived from + * the Apple Software without specific prior written permission from Apple. Except + * as expressly stated in this notice, no other rights or licenses, express or + * implied, are granted by Apple herein, including but not limited to any patent + * rights that may be infringed by your derivative works or by other works in + * which the Apple Software may be incorporated. + * + * The Apple Software is provided by Apple on an "AS IS" basis. APPLE MAKES NO + * WARRANTIES, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED + * WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND OPERATION ALONE OR IN + * COMBINATION WITH YOUR PRODUCTS. + * + * IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR + * DISTRIBUTION OF THE APPLE SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF + * CONTRACT, TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF + * APPLE HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * @APPLE_DTS_LICENSE_HEADER_END@ + */ + +/* A tiny web server that does as much stuff the "dispatch way" as it can, like a queue per connection... */ + +/**************************************************************************** +overview of dispatch related operations: + +main() { + have dump_reqs() called every 5 to 6 seconds, and on every SIGINFO + and SIGPIPE + + have accept_cb() called when there are new connections on our port + + have reopen_logfile_when_needed() called whenever our logfile is + renamed, deleted, or forcibly closed +} + +reopen_logfile_when_needed() { + call ourself whenever our logfile is renamed, deleted, or forcibly + closed +} + +accept_cb() { + allocate a new queue to handle network and file I/O, and timers + for a series of HTTP requests coming from a new network connection + + have read_req() called (on the new queue) when there + is network traffic for the new connection + + have req_free(new_req) called when the connection is "done" (no + pending work to be executed on the queue, an no sources left to + generate new work for the queue) +} + +req_free() { + uses dispatch_get_current_queue() and dispatch_async() to call itself + "on the right queue" +} + +read_req() { + If there is a timeout source delete_source() it + + if (we have a whole request) { + make a new dispatch source (req->fd_rd.ds) for the + content file + + have clean up fd, req->fd and req->fd_rd (if + appropriate) when the content file source is canceled + + have read_filedata called when the content file is + read to be read + + if we already have a dispatch source for "network + socket ready to be written", enable it. Otherwise + make one, and have write_filedata called when it + time to write to it. + + disable the call to read_req + } + + close the connection if something goes wrong +} + +write_filedata() { + close the connection if anything goes wrong + + if (we have written the whole HTTP document) { + timeout in a little bit, closing the connection if we + haven't received a new command + + enable the call to read_req + } + + if (we have written all the buffered data) { + disable the call to write_filedata() + } +} + +read_filedata() { + if (nothing left to read) { + delete the content file dispatch source + } else { + enable the call to write_filedata() + } +} + +qprintf, qfprintf, qflush + schedule stdio calls on a single queue + +disable_source, enable_source + implements a binary enable/disable on top of dispatch's + counted suspend/resume + +delete_source + cancels the source (this example program uses source + cancelation to schedule any source cleanup it needs, + so "delete" needs a cancel). + + ensure the source isn't suspended + + release the reference, which _should_ be the last + reference (this example program never has more + then one reference to a source) + +****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +char *DOC_BASE = NULL; +char *log_name = NULL; +FILE *logfile = NULL; +char *argv0 = "a.out"; +char *server_port = "8080"; +const int re_request_nmatch = 4; +regex_t re_first_request, re_nth_request, re_accept_deflate, re_host; + + +// qpf is the queue that we schedule our "stdio file I/O", which serves as a lock, +// and orders the output, and also gets it "out of the way" of our main line execution +dispatch_queue_t qpf; + +void qfprintf(FILE *f, const char *fmt, ...) __attribute__((format(printf, 2, 3))); + +void qfprintf(FILE *f, const char *fmt, ...) { + va_list ap; + va_start(ap, fmt); + char *str; + /* We gennerate the formatted string on the same queue (or + thread) that calls qfprintf, that way the values can change + while the fputs call is being sent to the qpf queue, or waiting + for other work to complete ont he qpf queue. */ + + vasprintf(&str, fmt, ap); + dispatch_async(qpf, ^{ fputs(str, f); free(str); }); + if ('*' == *fmt) { + dispatch_sync(qpf, ^{ fflush(f); }); + } + va_end(ap); +} + +void qfflush(FILE *f) { + dispatch_sync(qpf, ^{ fflush(f); }); +} + +void reopen_logfile_when_needed() { + // We don't want to use a fd with a lifetime managed by something else + // because we need to close it inside the cancel handler (see below) + int lf_dup = dup(fileno(logfile)); + FILE **lf = &logfile; + + // We register the vnode callback on the qpf queue since that is where + // we do all our logfile printing. (we set up to reopen the logfile + // if the "old one" has been deleted or renamed (or revoked). This + // makes it pretty safe to mv the file to a new name, delay breifly, + // then gzip it. Safer to move the file to a new name, wait for the + // "old" file to reappear, then gzip. Niftier then doing the move, + // sending a SIGHUP to the right process (somehow) and then doing + // as above. Well, maybe it'll never catch on as "the new right + /// thing", but it makes a nifty demo. + dispatch_source_t vn = dispatch_source_create(DISPATCH_SOURCE_TYPE_VNODE, lf_dup, DISPATCH_VNODE_REVOKE|DISPATCH_VNODE_RENAME|DISPATCH_VNODE_DELETE, qpf); + + dispatch_source_set_event_handler(vn, ^{ + printf("lf_dup is %d (logfile's fileno=%d), closing it\n", lf_dup, fileno(logfile)); + fprintf(logfile, "# flush n' roll!\n"); + dispatch_cancel(vn); + dispatch_release(vn); + fflush(logfile); + *lf = freopen(log_name, "a", logfile); + + // The new logfile has (or may have) a diffrent fd from the old one, so + // we have to register it again + reopen_logfile_when_needed(); + }); + + dispatch_source_set_cancel_handler(vn, ^{ close(lf_dup); }); + + dispatch_resume(vn); +} + +#define qprintf(fmt...) qfprintf(stdout, ## fmt); + +struct buffer { + // Manage a buffer, currently at sz bytes, but will realloc if needed + // The buffer has a part that we read data INTO, and a part that we + // write data OUT OF. + // + // Best use of the space would be a circular buffer (and we would + // use readv/writev and pass around iovec structs), but we use a + // simpler layout: + // data from buf to outof is wasted. From outof to into is + // "ready to write data OUT OF", from into until buf+sz is + // "ready to read data IN TO". + size_t sz; + unsigned char *buf; + unsigned char *into, *outof; +}; + +struct request_source { + // libdispatch gives suspension a counting behaviour, we want a simple on/off behaviour, so we use + // this struct to provide track suspensions + dispatch_source_t ds; + bool suspended; +}; + +// The request struct manages an actiave HTTP request/connection. It gets reused for pipelined HTTP clients. +// Every request has it's own queue where all of it's network traffic, and source file I/O as well as +// compression (when requested by the HTTP client) is done. +struct request { + struct sockaddr_in r_addr; + z_stream *deflate; + // cmd_buf holds the HTTP request + char cmd_buf[8196], *cb; + char chunk_num[13], *cnp; // Big enough for 8 digits plus \r\n\r\n\0 + bool needs_zero_chunk; + bool reuse_guard; + short status_number; + size_t chunk_bytes_remaining; + char *q_name; + int req_num; // For debugging + int files_served; // For this socket + dispatch_queue_t q; + // "sd" is the socket descriptor, where the network I/O for this request goes. "fd" is the source file (or -1) + int sd, fd; + // fd_rd is for read events from the source file (say /Users/YOU/Sites/index.html for a GET /index.html request) + // sd_rd is for read events from the network socket (we suspend it after we read an HTTP request header, and + // resume it when we complete a request) + // sd_wr is for write events to the network socket (we suspend it when we have no buffered source data to send, + // and resume it when we have data ready to send) + // timeo is the timeout event waiting for a new client request header. + struct request_source fd_rd, sd_rd, sd_wr, timeo; + uint64_t timeout_at; + struct stat sb; + + // file_b is where we read data from fd into. + // For compressed GET requests: + // - data is compressed from file_b into deflate_b + // - data is written to the network socket from deflate_b + // For uncompressed GET requests + // - data is written to the network socket from file_b + // - deflate_b is unused + struct buffer file_b, deflate_b; + + ssize_t total_written; +}; + +void req_free(struct request *req); + +void disable_source(struct request *req, struct request_source *rs) { + // we want a binary suspend state, not a counted state. Our + // suspend flag is "locked" by only being used on req->q, this + // assert makes sure we are in a valid context to write the new + // suspend value. + assert(req->q == dispatch_get_current_queue()); + if (!rs->suspended) { + rs->suspended = true; + dispatch_suspend(rs->ds); + } +} + +void enable_source(struct request *req, struct request_source *rs) { + assert(req->q == dispatch_get_current_queue()); + if (rs->suspended) { + rs->suspended = false; + dispatch_resume(rs->ds); + } +} + +void delete_source(struct request *req, struct request_source *rs) { + assert(req->q == dispatch_get_current_queue()); + if (rs->ds) { + /* sources need to be resumed before they can be deleted + (otherwise an I/O and/or cancel block might be stranded + waiting for a resume that will never come, causing + leaks) */ + + enable_source(req, rs); + dispatch_cancel(rs->ds); + dispatch_release(rs->ds); + } + rs->ds = NULL; + rs->suspended = false; +} + +size_t buf_into_sz(struct buffer *b) { + return (b->buf + b->sz) - b->into; +} + +void buf_need_into(struct buffer *b, size_t cnt) { + // resize buf so into has at least cnt bytes ready to use + size_t sz = buf_into_sz(b); + if (cnt <= sz) { + return; + } + sz = malloc_good_size(cnt - sz + b->sz); + unsigned char *old = b->buf; + // We could special case b->buf == b->into && b->into == b->outof to + // do a free & malloc rather then realloc, but after testing it happens + // only for the 1st use of the buffer, where realloc is the same cost as + // malloc anyway. + b->buf = reallocf(b->buf, sz); + assert(b->buf); + b->sz = sz; + b->into = b->buf + (b->into - old); + b->outof = b->buf + (b->outof - old); +} + +void buf_used_into(struct buffer *b, size_t used) { + b->into += used; + assert(b->into <= b->buf + b->sz); +} + +size_t buf_outof_sz(struct buffer *b) { + return b->into - b->outof; +} + +int buf_sprintf(struct buffer *b, char *fmt, ...) __attribute__((format(printf,2,3))); + +int buf_sprintf(struct buffer *b, char *fmt, ...) { + va_list ap; + va_start(ap, fmt); + size_t s = buf_into_sz(b); + int l = vsnprintf((char *)(b->into), s, fmt, ap); + if (l < s) { + buf_used_into(b, l); + } else { + // Reset ap -- vsnprintf has already used it. + va_end(ap); + va_start(ap, fmt); + buf_need_into(b, l); + s = buf_into_sz(b); + l = vsnprintf((char *)(b->into), s, fmt, ap); + assert(l <= s); + buf_used_into(b, l); + } + va_end(ap); + + return l; +} + +void buf_used_outof(struct buffer *b, size_t used) { + b->outof += used; + //assert(b->into <= b->outof); + assert(b->outof <= b->into); + if (b->into == b->outof) { + b->into = b->outof = b->buf; + } +} + +char *buf_debug_str(struct buffer *b) { + char *ret = NULL; + asprintf(&ret, "S%d i#%d o#%d", b->sz, buf_into_sz(b), buf_outof_sz(b)); + return ret; +} + +uint64_t getnanotime() { + struct timeval tv; + gettimeofday(&tv, NULL); + + return tv.tv_sec * NSEC_PER_SEC + tv.tv_usec * NSEC_PER_USEC; +} + +int n_req; +struct request **debug_req; + +void dump_reqs() { + int i = 0; + static last_reported = -1; + + // We want to see the transition into n_req == 0, but we don't need to + // keep seeing it. + if (n_req == 0 && n_req == last_reported) { + return; + } else { + last_reported = n_req; + } + + qprintf("%d actiave requests to dump\n", n_req); + uint64_t now = getnanotime(); + /* Because we iterate over the debug_req array in this queue + ("the main queue"), it has to "own" that array. All manipulation + of the array as a whole will have to be done on this queue. */ + + for(i = 0; i < n_req; i++) { + struct request *req = debug_req[i]; + qprintf("%s sources: fd_rd %p%s, sd_rd %p%s, sd_wr %p%s, timeo %p%s\n", req->q_name, req->fd_rd.ds, req->fd_rd.suspended ? " (SUSPENDED)" : "", req->sd_rd.ds, req->sd_rd.suspended ? " (SUSPENDED)" : "", req->sd_wr.ds, req->sd_wr.suspended ? " (SUSPENDED)" : "", req->timeo.ds, req->timeo.suspended ? " (SUSPENDED)" : ""); + if (req->timeout_at) { + double when = req->timeout_at - now; + when /= NSEC_PER_SEC; + if (when < 0) { + qprintf(" timeout %f seconds ago\n", -when); + } else { + qprintf(" timeout in %f seconds\n", when); + } + } else { + qprintf(" timeout_at not set\n"); + } + char *file_bd = buf_debug_str(&req->file_b), *deflate_bd = buf_debug_str(&req->deflate_b); + qprintf(" file_b %s; deflate_b %s\n cmd_buf used %ld; fd#%d; files_served %d\n", file_bd, deflate_bd, (long)(req->cb - req->cmd_buf), req->fd, req->files_served); + if (req->deflate) { + qprintf(" deflate total in: %ld ", req->deflate->total_in); + } + qprintf("%s total_written %lu, file size %lld\n", req->deflate ? "" : " ", req->total_written, req->sb.st_size); + free(file_bd); + free(deflate_bd); + } +} + +void req_free(struct request *req) { + assert(!req->reuse_guard); + if (dispatch_get_main_queue() != dispatch_get_current_queue()) { + /* dispatch_set_finalizer_f arranges to have us "invoked + asynchronously on req->q's target queue". However, + we want to manipulate the debug_req array in ways + that are unsafe anywhere except the same queue that + dump_reqs runs on (which happens to be the main queue). + So if we are running anywhere but the main queue, we + just arrange to be called there */ + + dispatch_async(dispatch_get_main_queue(), ^{ req_free(req); }); + return; + } + + req->reuse_guard = true; + *(req->cb) = '\0'; + qprintf("$$$ req_free %s; fd#%d; buf: %s\n", dispatch_queue_get_label(req->q), req->fd, req->cmd_buf); + assert(req->sd_rd.ds == NULL && req->sd_wr.ds == NULL); + close(req->sd); + assert(req->fd_rd.ds == NULL); + if (req->fd >= 0) close(req->fd); + free(req->file_b.buf); + free(req->deflate_b.buf); + free(req->q_name); + free(req->deflate); + free(req); + + int i; + bool found = false; + for(i = 0; i < n_req; i++) { + if (found) { + debug_req[i -1] = debug_req[i]; + } else { + found = (debug_req[i] == req); + } + } + debug_req = reallocf(debug_req, sizeof(struct request *) * --n_req); + assert(n_req >= 0); +} + +void close_connection(struct request *req) { + qprintf("$$$ close_connection %s, served %d files -- canceling all sources\n", dispatch_queue_get_label(req->q), req->files_served); + delete_source(req, &req->fd_rd); + delete_source(req, &req->sd_rd); + delete_source(req, &req->sd_wr); + delete_source(req, &req->timeo); +} + +// We have some "content data" (either from the file, or from +// compressing the file), and the network socket is ready for us to +// write it +void write_filedata(struct request *req, size_t avail) { + /* We always attempt to write as much data as we have. This + is safe becuase we use non-blocking I/O. It is a good idea + becuase the amount of buffer space that dispatch tells us may + be stale (more space could have opened up, or memory presure + may have caused it to go down). */ + + struct buffer *w_buf = req->deflate ? &req->deflate_b : &req->file_b; + ssize_t sz = buf_outof_sz(w_buf); + if (req->deflate) { + struct iovec iov[2]; + if (!req->chunk_bytes_remaining) { + req->chunk_bytes_remaining = sz; + req->needs_zero_chunk = sz != 0; + req->cnp = req->chunk_num; + int n = snprintf(req->chunk_num, sizeof(req->chunk_num), "\r\n%lx\r\n%s", sz, sz ? "" : "\r\n"); + assert(n <= sizeof(req->chunk_num)); + } + iov[0].iov_base = req->cnp; + iov[0].iov_len = req->cnp ? strlen(req->cnp) : 0; + iov[1].iov_base = w_buf->outof; + iov[1].iov_len = (req->chunk_bytes_remaining < sz) ? req->chunk_bytes_remaining : sz; + sz = writev(req->sd, iov, 2); + if (sz > 0) { + if (req->cnp) { + if (sz >= strlen(req->cnp)) { + req->cnp = NULL; + } else { + req->cnp += sz; + } + } + sz -= iov[0].iov_len; + sz = (sz < 0) ? 0 : sz; + req->chunk_bytes_remaining -= sz; + } + } else { + sz = write(req->sd, w_buf->outof, sz); + } + if (sz > 0) { + buf_used_outof(w_buf, sz); + } else if (sz < 0) { + int e = errno; + qprintf("write_filedata %s write error: %d %s\n", dispatch_queue_get_label(req->q), e, strerror(e)); + close_connection(req); + return; + } + + req->total_written += sz; + off_t bytes = req->total_written; + if (req->deflate) { + bytes = req->deflate->total_in - buf_outof_sz(w_buf); + if (req->deflate->total_in < buf_outof_sz(w_buf)) { + bytes = 0; + } + } + if (bytes == req->sb.st_size) { + if (req->needs_zero_chunk && req->deflate && (sz || req->cnp)) { + return; + } + + // We have transfered the file, time to write the log entry. + + // We don't deal with " in the request string, this is an example of how + // to use dispatch, not how to do C string manipulation, eh? + size_t rlen = strcspn(req->cmd_buf, "\r\n"); + char tstr[45], astr[45]; + struct tm tm; + time_t clock; + time(&clock); + strftime(tstr, sizeof(tstr), "%d/%b/%Y:%H:%M:%S +0", gmtime_r(&clock, &tm)); + addr2ascii(AF_INET, &req->r_addr.sin_addr, sizeof(struct in_addr), astr); + qfprintf(logfile, "%s - - [%s] \"%.*s\" %hd %zd\n", astr, tstr, (int)rlen, req->cmd_buf, req->status_number, req->total_written); + + int64_t t_offset = 5 * NSEC_PER_SEC + req->files_served * NSEC_PER_SEC / 10; + int64_t timeout_at = req->timeout_at = getnanotime() + t_offset; + + req->timeo.ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, req->q); + dispatch_source_set_timer(req->timeo.ds, dispatch_time(DISPATCH_TIME_NOW, t_offset), NSEC_PER_SEC, NSEC_PER_SEC); + dispatch_source_set_event_handler(req->timeo.ds, ^{ + if (req->timeout_at == timeout_at) { + qfprintf(stderr, "$$$ -- timeo fire (delta=%f) -- close connection: q=%s\n", (getnanotime() - (double)timeout_at) / NSEC_PER_SEC, dispatch_queue_get_label(req->q)); + close_connection(req); + } else { + // This happens if the timeout value has been updated, but a pending timeout event manages to race in before the cancel + } + }); + dispatch_resume(req->timeo.ds); + + req->files_served++; + qprintf("$$$ wrote whole file (%s); timeo %p, about to enable %p and close %d, total_written=%zd, this is the %d%s file served\n", dispatch_queue_get_label(req->q), req->timeo.ds, req->sd_rd.ds, req->fd, req->total_written, req->files_served, (1 == req->files_served) ? "st" : (2 == req->files_served) ? "nd" : "th"); + enable_source(req, &req->sd_rd); + if (req->fd_rd.ds) { + delete_source(req, &req->fd_rd); + } + req->cb = req->cmd_buf; + } else { + assert(bytes <= req->sb.st_size); + } + + if (0 == buf_outof_sz(w_buf)) { + // The write buffer is now empty, so we don't need to know when sd is ready for us to write to it. + disable_source(req, &req->sd_wr); + } +} + +// Our "content file" has some data ready for us to read. +void read_filedata(struct request *req, size_t avail) { + if (avail == 0) { + delete_source(req, &req->fd_rd); + return; + } + + /* We make sure we can read at least as many bytes as dispatch + says are avilable, but if our buffer is bigger we will read as + much as we have space for. We have the file opened in non-blocking + mode so this is safe. */ + + buf_need_into(&req->file_b, avail); + size_t rsz = buf_into_sz(&req->file_b); + ssize_t sz = read(req->fd, req->file_b.into, rsz); + if (sz >= 0) { + assert(req->sd_wr.ds); + size_t sz0 = buf_outof_sz(&req->file_b); + buf_used_into(&req->file_b, sz); + assert(sz == buf_outof_sz(&req->file_b) - sz0); + } else { + int e = errno; + qprintf("read_filedata %s read error: %d %s\n", dispatch_queue_get_label(req->q), e, strerror(e)); + close_connection(req); + return; + } + if (req->deflate) { + // Note:: deflateBound is "worst case", we could try with any non-zero + // buffer, and alloc more if we get Z_BUF_ERROR... + buf_need_into(&req->deflate_b, deflateBound(req->deflate, buf_outof_sz(&req->file_b))); + req->deflate->next_in = (req->file_b.outof); + size_t o_sz = buf_outof_sz(&req->file_b); + req->deflate->avail_in = o_sz; + req->deflate->next_out = req->deflate_b.into; + size_t i_sz = buf_into_sz(&req->deflate_b); + req->deflate->avail_out = i_sz; + assert(req->deflate->avail_in + req->deflate->total_in <= req->sb.st_size); + // at EOF we want to use Z_FINISH, otherwise we pass Z_NO_FLUSH so we get maximum compression + int rc = deflate(req->deflate, (req->deflate->avail_in + req->deflate->total_in >= req->sb.st_size) ? Z_FINISH : Z_NO_FLUSH); + assert(rc == Z_OK || rc == Z_STREAM_END); + buf_used_outof(&req->file_b, o_sz - req->deflate->avail_in); + buf_used_into(&req->deflate_b, i_sz - req->deflate->avail_out); + if (i_sz != req->deflate->avail_out) { + enable_source(req, &req->sd_wr); + } + } else { + enable_source(req, &req->sd_wr); + } +} + +// We are waiting to for an HTTP request (we eitther havn't gotten +// the first request, or pipelneing is on, and we finished a request), +// and there is data to read on the network socket. +void read_req(struct request *req, size_t avail) { + if (req->timeo.ds) { + delete_source(req, &req->timeo); + } + + // -1 to account for the trailing NUL + int s = (sizeof(req->cmd_buf) - (req->cb - req->cmd_buf)) -1; + if (s == 0) { + qprintf("read_req fd#%d command overflow\n", req->sd); + close_connection(req); + return; + } + int rd = read(req->sd, req->cb, s); + if (rd > 0) { + req->cb += rd; + if (req->cb > req->cmd_buf + 4) { + int i; + for(i = -4; i != 0; i++) { + char ch = *(req->cb + i); + if (ch != '\n' && ch != '\r') { + break; + } + } + if (i == 0) { + *(req->cb) = '\0'; + + assert(buf_outof_sz(&req->file_b) == 0); + assert(buf_outof_sz(&req->deflate_b) == 0); + regmatch_t pmatch[re_request_nmatch]; + regex_t *rex = req->files_served ? &re_first_request : &re_nth_request; + int rc = regexec(rex, req->cmd_buf, re_request_nmatch, pmatch, 0); + if (rc) { + char ebuf[1024]; + regerror(rc, rex, ebuf, sizeof(ebuf)); + qprintf("\n$$$ regexec error: %s, ditching request: '%s'\n", ebuf, req->cmd_buf); + close_connection(req); + return; + } else { + if (!strncmp("GET", req->cmd_buf + pmatch[1].rm_so, pmatch[1].rm_eo - pmatch[1].rm_so)) { + rc = regexec(&re_accept_deflate, req->cmd_buf, 0, NULL, 0); + assert(rc == 0 || rc == REG_NOMATCH); + // to disable deflate code: + // rc = REG_NOMATCH; + if (req->deflate) { + deflateEnd(req->deflate); + free(req->deflate); + } + req->deflate = (0 == rc) ? calloc(1, sizeof(z_stream)) : NULL; + char path_buf[4096]; + strlcpy(path_buf, DOC_BASE, sizeof(path_buf)); + // WARNING: this doesn't avoid use of .. in the path + // do get outside of DOC_ROOT, a real web server would + // really have to avoid that. + char ch = *(req->cmd_buf + pmatch[2].rm_eo); + *(req->cmd_buf + pmatch[2].rm_eo) = '\0'; + strlcat(path_buf, req->cmd_buf + pmatch[2].rm_so, sizeof(path_buf)); + *(req->cmd_buf + pmatch[2].rm_eo) = ch; + req->fd = open(path_buf, O_RDONLY|O_NONBLOCK); + qprintf("GET req for %s, path: %s, deflate: %p; fd#%d\n", dispatch_queue_get_label(req->q), path_buf, req->deflate, req->fd); + size_t n; + if (req->fd < 0) { + const char *msg = "404 Page not here

You step in the stream,
but the water has moved on.
This page is not here.
"; + req->status_number = 404; + n = buf_sprintf(&req->file_b, "HTTP/1.1 404 Not Found\r\nContent-Length: %zu\r\nExpires: now\r\nServer: %s\r\n\r\n%s", strlen(msg), argv0, msg); + req->sb.st_size = 0; + } else { + rc = fstat(req->fd, &req->sb); + assert(rc >= 0); + if (req->sb.st_mode & S_IFDIR) { + req->status_number = 301; + regmatch_t hmatch[re_request_nmatch]; + rc = regexec(&re_host, req->cmd_buf, re_request_nmatch, hmatch, 0); + assert(rc == 0 || rc == REG_NOMATCH); + if (rc == REG_NOMATCH) { + hmatch[1].rm_so = hmatch[1].rm_eo = 0; + } + n = buf_sprintf(&req->file_b, "HTTP/1.1 301 Redirect\r\nContent-Length: 0\r\nExpires: now\r\nServer: %s\r\nLocation: http://%*.0s/%*.0s/index.html\r\n\r\n", argv0, (int)(hmatch[1].rm_eo - hmatch[1].rm_so), req->cmd_buf + hmatch[1].rm_so, (int)(pmatch[2].rm_eo - pmatch[2].rm_so), req->cmd_buf + pmatch[2].rm_so); + req->sb.st_size = 0; + close(req->fd); + req->fd = -1; + } else { + req->status_number = 200; + if (req->deflate) { + n = buf_sprintf(&req->deflate_b, "HTTP/1.1 200 OK\r\nTransfer-Encoding: chunked\r\nContent-Encoding: deflate\r\nExpires: now\r\nServer: %s\r\n", argv0); + req->chunk_bytes_remaining = buf_outof_sz(&req->deflate_b); + } else { + n = buf_sprintf(req->deflate ? &req->deflate_b : &req->file_b, "HTTP/1.1 200 OK\r\nContent-Length: %lld\r\nExpires: now\r\nServer: %s\r\n\r\n", req->sb.st_size, argv0); + } + } + } + + if (req->status_number != 200) { + free(req->deflate); + req->deflate = NULL; + } + + if (req->deflate) { + rc = deflateInit(req->deflate, Z_BEST_COMPRESSION); + assert(rc == Z_OK); + } + + // Cheat: we don't count the header bytes as part of total_written + req->total_written = -buf_outof_sz(&req->file_b); + if (req->fd >= 0) { + req->fd_rd.ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, req->fd, 0, req->q); + // Cancelation is async, so we capture the fd and read sources we will want to operate on as the req struct may have moved on to a new set of values + int fd = req->fd; + dispatch_source_t fd_rd = req->fd_rd.ds; + dispatch_source_set_cancel_handler(req->fd_rd.ds, ^{ + close(fd); + if (req->fd == fd) { + req->fd = -1; + } + if (req->fd_rd.ds == fd_rd) { + req->fd_rd.ds = NULL; + } + }); + dispatch_source_set_event_handler(req->fd_rd.ds, ^{ + if (req->fd_rd.ds) { + read_filedata(req, dispatch_source_get_data(req->fd_rd.ds)); + } + }); + dispatch_resume(req->fd_rd.ds); + } else { + req->fd_rd.ds = NULL; + } + + if (req->sd_wr.ds) { + enable_source(req, &req->sd_wr); + } else { + req->sd_wr.ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_WRITE, req->sd, 0, req->q); + dispatch_source_set_event_handler(req->sd_wr.ds, ^{ write_filedata(req, dispatch_source_get_data(req->sd_wr.ds)); }); + dispatch_resume(req->sd_wr.ds); + } + disable_source(req, &req->sd_rd); + } + } + } + } + } else if (rd == 0) { + qprintf("### (%s) read_req fd#%d rd=0 (%s); %d files served\n", dispatch_queue_get_label(req->q), req->sd, (req->cb == req->cmd_buf) ? "no final request" : "incomplete request", req->files_served); + close_connection(req); + return; + } else { + int e = errno; + qprintf("reqd_req fd#%d rd=%d err=%d %s\n", req->sd, rd, e, strerror(e)); + close_connection(req); + return; + } +} + +// We have a new connection, allocate a req struct & set up a read event handler +void accept_cb(int fd) { + static int req_num = 0; + struct request *new_req = calloc(1, sizeof(struct request)); + assert(new_req); + new_req->cb = new_req->cmd_buf; + socklen_t r_len = sizeof(new_req->r_addr); + int s = accept(fd, (struct sockaddr *)&(new_req->r_addr), &r_len); + if (s < 0) { + qfprintf(stderr, "accept failure (rc=%d, errno=%d %s)\n", s, errno, strerror(errno)); + return; + } + assert(s >= 0); + new_req->sd = s; + new_req->req_num = req_num; + asprintf(&(new_req->q_name), "req#%d s#%d", req_num++, s); + qprintf("accept_cb fd#%d; made: %s\n", fd, new_req->q_name); + + // All further work for this request will happen "on" new_req->q, + // except the final tear down (see req_free()) + new_req->q = dispatch_queue_create(new_req->q_name, NULL); + dispatch_set_context(new_req->q, new_req); + dispatch_set_finalizer_f(new_req->q, (dispatch_function_t)req_free); + + debug_req = reallocf(debug_req, sizeof(struct request *) * ++n_req); + debug_req[n_req -1] = new_req; + + + new_req->sd_rd.ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, new_req->sd, 0, new_req->q); + dispatch_source_set_event_handler(new_req->sd_rd.ds, ^{ + read_req(new_req, dispatch_source_get_data(new_req->sd_rd.ds)); + }); + + // We want our queue to go away when all of it's sources do, so we + // drop the reference dispatch_queue_create gave us & rely on the + // references each source holds on the queue to keep it alive. + dispatch_release(new_req->q); + dispatch_resume(new_req->sd_rd.ds); +} + +int main(int argc, char *argv[]) { + int sock = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); + assert(sock > 0); + int rc; + struct addrinfo ai_hints, *my_addr; + + qpf = dispatch_queue_create("printf", NULL); + + argv0 = basename(argv[0]); + struct passwd *pw = getpwuid(getuid()); + assert(pw); + asprintf(&DOC_BASE, "%s/Sites/", pw->pw_dir); + asprintf(&log_name, "%s/Library/Logs/%s-transfer.log", pw->pw_dir, argv0); + logfile = fopen(log_name, "a"); + reopen_logfile_when_needed(logfile, log_name); + + bzero(&ai_hints, sizeof(ai_hints)); + ai_hints.ai_flags = AI_PASSIVE; + ai_hints.ai_family = PF_INET; + ai_hints.ai_socktype = SOCK_STREAM; + ai_hints.ai_protocol = IPPROTO_TCP; + rc = getaddrinfo(NULL, server_port, &ai_hints, &my_addr); + assert(rc == 0); + + qprintf("Serving content from %s on port %s, logging transfers to %s\n", DOC_BASE, server_port, log_name); + + int yes = 1; + rc = setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)); + assert(rc == 0); + yes = 1; + rc = setsockopt(sock, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes)); + assert(rc == 0); + + rc = bind(sock, my_addr->ai_addr, my_addr->ai_addr->sa_len); + assert(rc >= 0); + + rc = listen(sock, 25); + assert(rc >= 0); + + rc = regcomp(&re_first_request, "^([A-Z]+)[ \t]+([^ \t\n]+)[ \t]+HTTP/1\\.1[\r\n]+", REG_EXTENDED); + assert(rc == 0); + + rc = regcomp(&re_nth_request, "^([A-Z]+)[ \t]+([^ \t\n]+)([ \t]+HTTP/1\\.1)?[\r\n]+", REG_EXTENDED); + assert(rc == 0); + + rc = regcomp(&re_accept_deflate, "[\r\n]+Accept-Encoding:(.*,)? *deflate[,\r\n]+", REG_EXTENDED); + assert(rc == 0); + + rc = regcomp(&re_host, "[\r\n]+Host: *([^ \r\n]+)[ \r\n]+", REG_EXTENDED); + assert(rc == 0); + + dispatch_source_t accept_ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, sock, 0, dispatch_get_main_queue()); + dispatch_source_set_event_handler(accept_ds, ^{ accept_cb(sock); }); + assert(accept_ds); + dispatch_resume(accept_ds); + + sigset_t sigs; + sigemptyset(&sigs); + sigaddset(&sigs, SIGINFO); + sigaddset(&sigs, SIGPIPE); + + int s; + for(s = 0; s < NSIG; s++) { + if (sigismember(&sigs, s)) { + dispatch_source_t sig_ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, s, 0, dispatch_get_main_queue()); + assert(sig_ds); + dispatch_source_set_event_handler(sig_ds, ^{ dump_reqs(); }); + dispatch_resume(sig_ds); + } + } + + rc = sigprocmask(SIG_BLOCK, &sigs, NULL); + assert(rc == 0); + + dispatch_source_t dump_timer_ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, dispatch_get_main_queue()); + dispatch_source_set_timer(dump_timer_ds, DISPATCH_TIME_NOW, 5 * NSEC_PER_SEC, NSEC_PER_SEC); + dispatch_source_set_event_handler(dump_timer_ds, ^{ dump_reqs(); }); + dispatch_resume(dump_timer_ds); + + dispatch_main(); + printf("dispatch_main returned\n"); + + return 1; +} diff --git a/examples/DispatchWebServer/DispatchWebServer.xcodeproj/project.pbxproj b/examples/DispatchWebServer/DispatchWebServer.xcodeproj/project.pbxproj new file mode 100644 index 000000000..444288a7a --- /dev/null +++ b/examples/DispatchWebServer/DispatchWebServer.xcodeproj/project.pbxproj @@ -0,0 +1,203 @@ +// !$*UTF8*$! +{ + archiveVersion = 1; + classes = { + }; + objectVersion = 45; + objects = { + +/* Begin PBXBuildFile section */ + 4CDA1C1F0F795F5B00E0869E /* DispatchWebServer.c in Sources */ = {isa = PBXBuildFile; fileRef = 4CDA1C1E0F795F5B00E0869E /* DispatchWebServer.c */; }; + 4CDA1C400F79786E00E0869E /* libz.1.dylib in Frameworks */ = {isa = PBXBuildFile; fileRef = 4CDA1C3F0F79786E00E0869E /* libz.1.dylib */; }; +/* End PBXBuildFile section */ + +/* Begin PBXCopyFilesBuildPhase section */ + 8DD76FAF0486AB0100D96B5E /* CopyFiles */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 8; + dstPath = /usr/share/man/man1/; + dstSubfolderSpec = 0; + files = ( + ); + runOnlyForDeploymentPostprocessing = 1; + }; +/* End PBXCopyFilesBuildPhase section */ + +/* Begin PBXFileReference section */ + 4CDA1C1E0F795F5B00E0869E /* DispatchWebServer.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = DispatchWebServer.c; sourceTree = ""; }; + 4CDA1C3F0F79786E00E0869E /* libz.1.dylib */ = {isa = PBXFileReference; lastKnownFileType = "compiled.mach-o.dylib"; name = libz.1.dylib; path = /usr/lib/libz.1.dylib; sourceTree = ""; }; + 8DD76FB20486AB0100D96B5E /* DispatchWebServer */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = DispatchWebServer; sourceTree = BUILT_PRODUCTS_DIR; }; +/* End PBXFileReference section */ + +/* Begin PBXFrameworksBuildPhase section */ + 8DD76FAD0486AB0100D96B5E /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 4CDA1C400F79786E00E0869E /* libz.1.dylib in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXFrameworksBuildPhase section */ + +/* Begin PBXGroup section */ + 08FB7794FE84155DC02AAC07 /* DispatchWebServer */ = { + isa = PBXGroup; + children = ( + 4CDA1C3F0F79786E00E0869E /* libz.1.dylib */, + 08FB7795FE84155DC02AAC07 /* Source */, + C6A0FF2B0290797F04C91782 /* Documentation */, + 1AB674ADFE9D54B511CA2CBB /* Products */, + ); + name = DispatchWebServer; + sourceTree = ""; + }; + 08FB7795FE84155DC02AAC07 /* Source */ = { + isa = PBXGroup; + children = ( + 4CDA1C1E0F795F5B00E0869E /* DispatchWebServer.c */, + ); + name = Source; + sourceTree = ""; + }; + 1AB674ADFE9D54B511CA2CBB /* Products */ = { + isa = PBXGroup; + children = ( + 8DD76FB20486AB0100D96B5E /* DispatchWebServer */, + ); + name = Products; + sourceTree = ""; + }; + C6A0FF2B0290797F04C91782 /* Documentation */ = { + isa = PBXGroup; + children = ( + ); + name = Documentation; + sourceTree = ""; + }; +/* End PBXGroup section */ + +/* Begin PBXNativeTarget section */ + 8DD76FA90486AB0100D96B5E /* DispatchWebServer */ = { + isa = PBXNativeTarget; + buildConfigurationList = 1DEB928508733DD80010E9CD /* Build configuration list for PBXNativeTarget "DispatchWebServer" */; + buildPhases = ( + 8DD76FAB0486AB0100D96B5E /* Sources */, + 8DD76FAD0486AB0100D96B5E /* Frameworks */, + 8DD76FAF0486AB0100D96B5E /* CopyFiles */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = DispatchWebServer; + productInstallPath = "$(HOME)/bin"; + productName = DispatchWebServer; + productReference = 8DD76FB20486AB0100D96B5E /* DispatchWebServer */; + productType = "com.apple.product-type.tool"; + }; +/* End PBXNativeTarget section */ + +/* Begin PBXProject section */ + 08FB7793FE84155DC02AAC07 /* Project object */ = { + isa = PBXProject; + buildConfigurationList = 1DEB928908733DD80010E9CD /* Build configuration list for PBXProject "DispatchWebServer" */; + compatibilityVersion = "Xcode 3.1"; + hasScannedForEncodings = 1; + mainGroup = 08FB7794FE84155DC02AAC07 /* DispatchWebServer */; + projectDirPath = ""; + projectRoot = ""; + targets = ( + 8DD76FA90486AB0100D96B5E /* DispatchWebServer */, + ); + }; +/* End PBXProject section */ + +/* Begin PBXSourcesBuildPhase section */ + 8DD76FAB0486AB0100D96B5E /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 4CDA1C1F0F795F5B00E0869E /* DispatchWebServer.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXSourcesBuildPhase section */ + +/* Begin XCBuildConfiguration section */ + 1DEB928608733DD80010E9CD /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + COPY_PHASE_STRIP = NO; + GCC_DYNAMIC_NO_PIC = NO; + GCC_ENABLE_FIX_AND_CONTINUE = YES; + GCC_MODEL_TUNING = G5; + GCC_OPTIMIZATION_LEVEL = 0; + INSTALL_PATH = /usr/local/bin; + PRODUCT_NAME = DispatchWebServer; + }; + name = Debug; + }; + 1DEB928708733DD80010E9CD /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + GCC_MODEL_TUNING = G5; + INSTALL_PATH = /usr/local/bin; + PRODUCT_NAME = DispatchWebServer; + }; + name = Release; + }; + 1DEB928A08733DD80010E9CD /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ARCHS = "$(ARCHS_STANDARD_32_64_BIT)"; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_WARN_ABOUT_RETURN_TYPE = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + ONLY_ACTIVE_ARCH = YES; + PREBINDING = NO; + SDKROOT = macosx10.6; + }; + name = Debug; + }; + 1DEB928B08733DD80010E9CD /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ARCHS = "$(ARCHS_STANDARD_32_64_BIT)"; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_WARN_ABOUT_RETURN_TYPE = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + PREBINDING = NO; + SDKROOT = macosx10.6; + }; + name = Release; + }; +/* End XCBuildConfiguration section */ + +/* Begin XCConfigurationList section */ + 1DEB928508733DD80010E9CD /* Build configuration list for PBXNativeTarget "DispatchWebServer" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 1DEB928608733DD80010E9CD /* Debug */, + 1DEB928708733DD80010E9CD /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 1DEB928908733DD80010E9CD /* Build configuration list for PBXProject "DispatchWebServer" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 1DEB928A08733DD80010E9CD /* Debug */, + 1DEB928B08733DD80010E9CD /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; +/* End XCConfigurationList section */ + }; + rootObject = 08FB7793FE84155DC02AAC07 /* Project object */; +} diff --git a/examples/DispatchWebServer/ReadMe.txt b/examples/DispatchWebServer/ReadMe.txt new file mode 100644 index 000000000..4a6359611 --- /dev/null +++ b/examples/DispatchWebServer/ReadMe.txt @@ -0,0 +1,44 @@ +### DispatchWebServer ### + +=========================================================================== +DESCRIPTION: + +Sample code showing how to: Use dispatch in a real world setting, +schedule file and network I/O, use vnode sources, create and manage +timers. + +=========================================================================== +BUILD REQUIREMENTS: + +Mac OS X version 10.6 Snow Leopard + +=========================================================================== +RUNTIME REQUIREMENTS: + +Mac OS X version 10.6 Snow Leopard + +=========================================================================== +PACKAGING LIST: + +DispatchWebServer.c - the web server + +=========================================================================== +RUNNING: + +Running the program will start a web server on port 8080, it will read +content from ~/Sites and write ~/Library/Logs/DispatchWebServer-transfer.log +each time complets a request. + +It will write some to stdout when it makes new connections, recieves +requests, completes requests, and when it closes connections. It also +shows the state of each actiave request once evey five seconds and any +time you send a SIGINFO signal to it. + +=========================================================================== +CHANGES FROM PREVIOUS VERSIONS: + +Version 1.0 +- First version + +=========================================================================== +Copyright (C) 2009 Apple Inc. All rights reserved. diff --git a/libdispatch.xcodeproj/project.pbxproj b/libdispatch.xcodeproj/project.pbxproj new file mode 100644 index 000000000..01b065c02 --- /dev/null +++ b/libdispatch.xcodeproj/project.pbxproj @@ -0,0 +1,497 @@ +// !$*UTF8*$! +{ + archiveVersion = 1; + classes = { + }; + objectVersion = 45; + objects = { + +/* Begin PBXBuildFile section */ + 2EC9C9B80E8809EF00E2499A /* legacy.c in Sources */ = {isa = PBXBuildFile; fileRef = 2EC9C9B70E8809EF00E2499A /* legacy.c */; }; + 5A5D13AC0F6B280500197CC3 /* semaphore_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */; }; + 721F5C5D0F15520500FF03A6 /* semaphore.h in Headers */ = {isa = PBXBuildFile; fileRef = 721F5C5C0F15520500FF03A6 /* semaphore.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 721F5CCF0F15553500FF03A6 /* semaphore.c in Sources */ = {isa = PBXBuildFile; fileRef = 721F5CCE0F15553500FF03A6 /* semaphore.c */; }; + 72CC94300ECCD8750031B751 /* base.h in Headers */ = {isa = PBXBuildFile; fileRef = 72CC942F0ECCD8750031B751 /* base.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 96032E4B0F5CC8C700241C5F /* time.c in Sources */ = {isa = PBXBuildFile; fileRef = 96032E4A0F5CC8C700241C5F /* time.c */; }; + 96032E4D0F5CC8D100241C5F /* time.h in Headers */ = {isa = PBXBuildFile; fileRef = 96032E4C0F5CC8D100241C5F /* time.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 961B99360F3E83980006BC96 /* benchmark.h in Headers */ = {isa = PBXBuildFile; fileRef = 961B99350F3E83980006BC96 /* benchmark.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 961B99500F3E85C30006BC96 /* object.h in Headers */ = {isa = PBXBuildFile; fileRef = 961B994F0F3E85C30006BC96 /* object.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 965CD6350F3E806200D4E28D /* benchmark.c in Sources */ = {isa = PBXBuildFile; fileRef = 965CD6340F3E806200D4E28D /* benchmark.c */; }; + 965ECC210F3EAB71004DDD89 /* object_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 965ECC200F3EAB71004DDD89 /* object_internal.h */; }; + 9661E56B0F3E7DDF00749F3E /* object.c in Sources */ = {isa = PBXBuildFile; fileRef = 9661E56A0F3E7DDF00749F3E /* object.c */; }; + 9676A0E10F3E755D00713ADB /* apply.c in Sources */ = {isa = PBXBuildFile; fileRef = 9676A0E00F3E755D00713ADB /* apply.c */; }; + 96929D840F3EA1020041FF5D /* hw_shims.h in Headers */ = {isa = PBXBuildFile; fileRef = 96929D820F3EA1020041FF5D /* hw_shims.h */; }; + 96929D850F3EA1020041FF5D /* os_shims.h in Headers */ = {isa = PBXBuildFile; fileRef = 96929D830F3EA1020041FF5D /* os_shims.h */; }; + 96929D960F3EA2170041FF5D /* queue_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 96929D950F3EA2170041FF5D /* queue_internal.h */; }; + 96A8AA870F41E7A400CD570B /* source.c in Sources */ = {isa = PBXBuildFile; fileRef = 96A8AA860F41E7A400CD570B /* source.c */; }; + 96BC39BD0F3EBAB100C59689 /* queue_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 96BC39BC0F3EBAB100C59689 /* queue_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 96C9553B0F3EAEDD000D2CA4 /* once.h in Headers */ = {isa = PBXBuildFile; fileRef = 96C9553A0F3EAEDD000D2CA4 /* once.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 96DF70BE0F38FE3C0074BD99 /* once.c in Sources */ = {isa = PBXBuildFile; fileRef = 96DF70BD0F38FE3C0074BD99 /* once.c */; }; + FC0B34790FA2851C0080FFA0 /* source_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = FC0B34780FA2851C0080FFA0 /* source_internal.h */; }; + FC5C9C1E0EADABE3006E462D /* group.h in Headers */ = {isa = PBXBuildFile; fileRef = FC5C9C1D0EADABE3006E462D /* group.h */; settings = {ATTRIBUTES = (Public, ); }; }; + FC7BED990E8361E600161930 /* queue.c in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED8A0E8361E600161930 /* queue.c */; }; + FC7BED9A0E8361E600161930 /* queue.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED8B0E8361E600161930 /* queue.h */; settings = {ATTRIBUTES = (Public, ); }; }; + FC7BED9C0E8361E600161930 /* source.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED8D0E8361E600161930 /* source.h */; settings = {ATTRIBUTES = (Public, ); }; }; + FC7BED9E0E8361E600161930 /* internal.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED8F0E8361E600161930 /* internal.h */; settings = {ATTRIBUTES = (); }; }; + FC7BED9F0E8361E600161930 /* legacy.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED900E8361E600161930 /* legacy.h */; settings = {ATTRIBUTES = (Private, ); }; }; + FC7BEDA20E8361E600161930 /* private.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED930E8361E600161930 /* private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + FC7BEDA40E8361E600161930 /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; }; + FC7BEDA50E8361E600161930 /* dispatch.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED960E8361E600161930 /* dispatch.h */; settings = {ATTRIBUTES = (Public, ); }; }; + FC7BEDA60E8361E600161930 /* shims.c in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED970E8361E600161930 /* shims.c */; }; + FCEF04800F5661960067401F /* source_private.h in Headers */ = {isa = PBXBuildFile; fileRef = FCEF047F0F5661960067401F /* source_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; +/* End PBXBuildFile section */ + +/* Begin PBXFileReference section */ + 2EC9C9B70E8809EF00E2499A /* legacy.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = legacy.c; path = src/legacy.c; sourceTree = ""; }; + 5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = semaphore_internal.h; path = src/semaphore_internal.h; sourceTree = ""; }; + 721F5C5C0F15520500FF03A6 /* semaphore.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = semaphore.h; path = src/semaphore.h; sourceTree = ""; }; + 721F5CCE0F15553500FF03A6 /* semaphore.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = semaphore.c; path = src/semaphore.c; sourceTree = ""; }; + 72B54F690EB169EB00DBECBA /* dispatch_source_create.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; name = dispatch_source_create.3; path = man/dispatch_source_create.3; sourceTree = ""; }; + 72CC940C0ECCD5720031B751 /* dispatch_object.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; name = dispatch_object.3; path = man/dispatch_object.3; sourceTree = ""; }; + 72CC940D0ECCD5720031B751 /* dispatch.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; name = dispatch.3; path = man/dispatch.3; sourceTree = ""; }; + 72CC942F0ECCD8750031B751 /* base.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = base.h; path = src/base.h; sourceTree = ""; }; + 96032E4A0F5CC8C700241C5F /* time.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = time.c; path = src/time.c; sourceTree = ""; }; + 96032E4C0F5CC8D100241C5F /* time.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = time.h; path = src/time.h; sourceTree = ""; }; + 960F0E7D0F3FB232000D88BF /* dispatch_apply.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = dispatch_apply.3; path = man/dispatch_apply.3; sourceTree = ""; }; + 960F0E7E0F3FB232000D88BF /* dispatch_once.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = dispatch_once.3; path = man/dispatch_once.3; sourceTree = ""; }; + 961B99350F3E83980006BC96 /* benchmark.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = benchmark.h; path = src/benchmark.h; sourceTree = ""; }; + 961B994F0F3E85C30006BC96 /* object.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = object.h; path = src/object.h; sourceTree = ""; }; + 963FDDE50F3FB6BD00BF2D00 /* dispatch_semaphore_create.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = dispatch_semaphore_create.3; path = man/dispatch_semaphore_create.3; sourceTree = ""; }; + 965CD6340F3E806200D4E28D /* benchmark.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = benchmark.c; path = src/benchmark.c; sourceTree = ""; }; + 965ECC200F3EAB71004DDD89 /* object_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = object_internal.h; path = src/object_internal.h; sourceTree = ""; }; + 9661E56A0F3E7DDF00749F3E /* object.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = object.c; path = src/object.c; sourceTree = ""; }; + 9676A0E00F3E755D00713ADB /* apply.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = apply.c; path = src/apply.c; sourceTree = ""; }; + 96859A3D0EF71BAD003EB3FB /* dispatch_benchmark.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; name = dispatch_benchmark.3; path = man/dispatch_benchmark.3; sourceTree = ""; }; + 96929D820F3EA1020041FF5D /* hw_shims.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = hw_shims.h; path = src/hw_shims.h; sourceTree = ""; }; + 96929D830F3EA1020041FF5D /* os_shims.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = os_shims.h; path = src/os_shims.h; sourceTree = ""; }; + 96929D950F3EA2170041FF5D /* queue_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = queue_internal.h; path = src/queue_internal.h; sourceTree = ""; }; + 96A8AA860F41E7A400CD570B /* source.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = source.c; path = src/source.c; sourceTree = ""; }; + 96BC39BC0F3EBAB100C59689 /* queue_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = queue_private.h; path = src/queue_private.h; sourceTree = ""; }; + 96C9553A0F3EAEDD000D2CA4 /* once.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = once.h; path = src/once.h; sourceTree = ""; }; + 96DF70BD0F38FE3C0074BD99 /* once.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = once.c; path = src/once.c; sourceTree = ""; }; + D2AAC046055464E500DB518D /* libdispatch.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch.a; sourceTree = BUILT_PRODUCTS_DIR; }; + FC0B34780FA2851C0080FFA0 /* source_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = source_internal.h; path = src/source_internal.h; sourceTree = ""; }; + FC36279C0E933ED80054F1A3 /* dispatch_queue_create.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; name = dispatch_queue_create.3; path = man/dispatch_queue_create.3; sourceTree = ""; }; + FC5C9C1D0EADABE3006E462D /* group.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = group.h; path = src/group.h; sourceTree = ""; }; + FC678DE80F97E0C300AB5993 /* dispatch_after.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = dispatch_after.3; path = man/dispatch_after.3; sourceTree = ""; }; + FC678DE90F97E0C300AB5993 /* dispatch_api.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = dispatch_api.3; path = man/dispatch_api.3; sourceTree = ""; }; + FC678DEA0F97E0C300AB5993 /* dispatch_async.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = dispatch_async.3; path = man/dispatch_async.3; sourceTree = ""; }; + FC678DEB0F97E0C300AB5993 /* dispatch_group_create.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = dispatch_group_create.3; path = man/dispatch_group_create.3; sourceTree = ""; }; + FC678DEC0F97E0C300AB5993 /* dispatch_time.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = dispatch_time.3; path = man/dispatch_time.3; sourceTree = ""; }; + FC7BED8A0E8361E600161930 /* queue.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = queue.c; path = src/queue.c; sourceTree = ""; }; + FC7BED8B0E8361E600161930 /* queue.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = queue.h; path = src/queue.h; sourceTree = ""; }; + FC7BED8D0E8361E600161930 /* source.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = source.h; path = src/source.h; sourceTree = ""; }; + FC7BED8F0E8361E600161930 /* internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = internal.h; path = src/internal.h; sourceTree = ""; }; + FC7BED900E8361E600161930 /* legacy.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = legacy.h; path = src/legacy.h; sourceTree = ""; }; + FC7BED930E8361E600161930 /* private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = private.h; path = src/private.h; sourceTree = ""; }; + FC7BED950E8361E600161930 /* protocol.defs */ = {isa = PBXFileReference; explicitFileType = sourcecode.mig; fileEncoding = 4; name = protocol.defs; path = src/protocol.defs; sourceTree = ""; }; + FC7BED960E8361E600161930 /* dispatch.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = dispatch.h; path = src/dispatch.h; sourceTree = ""; }; + FC7BED970E8361E600161930 /* shims.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = shims.c; path = src/shims.c; sourceTree = ""; }; + FCEF047F0F5661960067401F /* source_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = source_private.h; path = src/source_private.h; sourceTree = ""; }; +/* End PBXFileReference section */ + +/* Begin PBXFrameworksBuildPhase section */ + D289987405E68DCB004EDB86 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXFrameworksBuildPhase section */ + +/* Begin PBXGroup section */ + 08FB7794FE84155DC02AAC07 /* libdispatch */ = { + isa = PBXGroup; + children = ( + FC7BEDAA0E83625200161930 /* Public Headers */, + FC7BEDAF0E83626100161930 /* Private Headers */, + FC7BEDB60E8363DC00161930 /* Project Headers */, + 08FB7795FE84155DC02AAC07 /* Source */, + C6A0FF2B0290797F04C91782 /* Documentation */, + 1AB674ADFE9D54B511CA2CBB /* Products */, + ); + name = libdispatch; + sourceTree = ""; + }; + 08FB7795FE84155DC02AAC07 /* Source */ = { + isa = PBXGroup; + children = ( + 965CD6340F3E806200D4E28D /* benchmark.c */, + 9661E56A0F3E7DDF00749F3E /* object.c */, + 9676A0E00F3E755D00713ADB /* apply.c */, + FC7BED8A0E8361E600161930 /* queue.c */, + 96DF70BD0F38FE3C0074BD99 /* once.c */, + 96032E4A0F5CC8C700241C5F /* time.c */, + 721F5CCE0F15553500FF03A6 /* semaphore.c */, + 96A8AA860F41E7A400CD570B /* source.c */, + FC7BED970E8361E600161930 /* shims.c */, + 2EC9C9B70E8809EF00E2499A /* legacy.c */, + FC7BED950E8361E600161930 /* protocol.defs */, + ); + name = Source; + sourceTree = ""; + }; + 1AB674ADFE9D54B511CA2CBB /* Products */ = { + isa = PBXGroup; + children = ( + D2AAC046055464E500DB518D /* libdispatch.a */, + ); + name = Products; + sourceTree = ""; + }; + C6A0FF2B0290797F04C91782 /* Documentation */ = { + isa = PBXGroup; + children = ( + FC678DE80F97E0C300AB5993 /* dispatch_after.3 */, + FC678DE90F97E0C300AB5993 /* dispatch_api.3 */, + FC678DEA0F97E0C300AB5993 /* dispatch_async.3 */, + FC678DEB0F97E0C300AB5993 /* dispatch_group_create.3 */, + FC678DEC0F97E0C300AB5993 /* dispatch_time.3 */, + 72CC940D0ECCD5720031B751 /* dispatch.3 */, + 960F0E7D0F3FB232000D88BF /* dispatch_apply.3 */, + 96859A3D0EF71BAD003EB3FB /* dispatch_benchmark.3 */, + 72CC940C0ECCD5720031B751 /* dispatch_object.3 */, + 960F0E7E0F3FB232000D88BF /* dispatch_once.3 */, + 963FDDE50F3FB6BD00BF2D00 /* dispatch_semaphore_create.3 */, + 72B54F690EB169EB00DBECBA /* dispatch_source_create.3 */, + FC36279C0E933ED80054F1A3 /* dispatch_queue_create.3 */, + ); + name = Documentation; + sourceTree = ""; + }; + FC7BEDAA0E83625200161930 /* Public Headers */ = { + isa = PBXGroup; + children = ( + 96032E4C0F5CC8D100241C5F /* time.h */, + 721F5C5C0F15520500FF03A6 /* semaphore.h */, + 961B994F0F3E85C30006BC96 /* object.h */, + 96C9553A0F3EAEDD000D2CA4 /* once.h */, + 961B99350F3E83980006BC96 /* benchmark.h */, + 72CC942F0ECCD8750031B751 /* base.h */, + FC7BED960E8361E600161930 /* dispatch.h */, + FC7BED8B0E8361E600161930 /* queue.h */, + FC7BED8D0E8361E600161930 /* source.h */, + FC5C9C1D0EADABE3006E462D /* group.h */, + ); + name = "Public Headers"; + sourceTree = ""; + }; + FC7BEDAF0E83626100161930 /* Private Headers */ = { + isa = PBXGroup; + children = ( + FC7BED930E8361E600161930 /* private.h */, + 96BC39BC0F3EBAB100C59689 /* queue_private.h */, + FC7BED900E8361E600161930 /* legacy.h */, + ); + name = "Private Headers"; + sourceTree = ""; + }; + FC7BEDB60E8363DC00161930 /* Project Headers */ = { + isa = PBXGroup; + children = ( + 965ECC200F3EAB71004DDD89 /* object_internal.h */, + 96929D950F3EA2170041FF5D /* queue_internal.h */, + 5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */, + FC0B34780FA2851C0080FFA0 /* source_internal.h */, + FCEF047F0F5661960067401F /* source_private.h */, + 96929D820F3EA1020041FF5D /* hw_shims.h */, + 96929D830F3EA1020041FF5D /* os_shims.h */, + FC7BED8F0E8361E600161930 /* internal.h */, + ); + name = "Project Headers"; + sourceTree = ""; + }; +/* End PBXGroup section */ + +/* Begin PBXHeadersBuildPhase section */ + D2AAC043055464E500DB518D /* Headers */ = { + isa = PBXHeadersBuildPhase; + buildActionMask = 2147483647; + files = ( + 72CC94300ECCD8750031B751 /* base.h in Headers */, + FC7BEDA50E8361E600161930 /* dispatch.h in Headers */, + FC7BED9A0E8361E600161930 /* queue.h in Headers */, + FC7BED9C0E8361E600161930 /* source.h in Headers */, + FC5C9C1E0EADABE3006E462D /* group.h in Headers */, + FC7BEDA20E8361E600161930 /* private.h in Headers */, + FC7BED9F0E8361E600161930 /* legacy.h in Headers */, + FC7BED9E0E8361E600161930 /* internal.h in Headers */, + 721F5C5D0F15520500FF03A6 /* semaphore.h in Headers */, + 961B99360F3E83980006BC96 /* benchmark.h in Headers */, + 961B99500F3E85C30006BC96 /* object.h in Headers */, + 96929D840F3EA1020041FF5D /* hw_shims.h in Headers */, + 96929D850F3EA1020041FF5D /* os_shims.h in Headers */, + 96929D960F3EA2170041FF5D /* queue_internal.h in Headers */, + 965ECC210F3EAB71004DDD89 /* object_internal.h in Headers */, + 96C9553B0F3EAEDD000D2CA4 /* once.h in Headers */, + 96BC39BD0F3EBAB100C59689 /* queue_private.h in Headers */, + FCEF04800F5661960067401F /* source_private.h in Headers */, + 96032E4D0F5CC8D100241C5F /* time.h in Headers */, + 5A5D13AC0F6B280500197CC3 /* semaphore_internal.h in Headers */, + FC0B34790FA2851C0080FFA0 /* source_internal.h in Headers */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXHeadersBuildPhase section */ + +/* Begin PBXLegacyTarget section */ + 721EB4790F69D26F00845379 /* testbots */ = { + isa = PBXLegacyTarget; + buildArgumentsString = testbots; + buildConfigurationList = 721EB4850F69D2A600845379 /* Build configuration list for PBXLegacyTarget "testbots" */; + buildPhases = ( + ); + buildToolPath = /usr/bin/make; + buildWorkingDirectory = testing; + dependencies = ( + ); + name = testbots; + passBuildSettingsInEnvironment = 0; + productName = testbots; + }; + 7276FCBA0EB10E0F00F7F487 /* test */ = { + isa = PBXLegacyTarget; + buildArgumentsString = test; + buildConfigurationList = 7276FCC80EB10E2300F7F487 /* Build configuration list for PBXLegacyTarget "test" */; + buildPhases = ( + ); + buildToolPath = /usr/bin/make; + buildWorkingDirectory = testing; + dependencies = ( + ); + name = test; + passBuildSettingsInEnvironment = 0; + productName = test; + }; +/* End PBXLegacyTarget section */ + +/* Begin PBXNativeTarget section */ + D2AAC045055464E500DB518D /* libdispatch */ = { + isa = PBXNativeTarget; + buildConfigurationList = 1DEB91EB08733DB70010E9CD /* Build configuration list for PBXNativeTarget "libdispatch" */; + buildPhases = ( + D2AAC043055464E500DB518D /* Headers */, + D2AAC044055464E500DB518D /* Sources */, + D289987405E68DCB004EDB86 /* Frameworks */, + 2EC9C9800E846B5200E2499A /* ShellScript */, + 4CED8B9D0EEDF8B600AF99AB /* ShellScript */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = libdispatch; + productName = libdispatch; + productReference = D2AAC046055464E500DB518D /* libdispatch.a */; + productType = "com.apple.product-type.library.static"; + }; +/* End PBXNativeTarget section */ + +/* Begin PBXProject section */ + 08FB7793FE84155DC02AAC07 /* Project object */ = { + isa = PBXProject; + buildConfigurationList = 1DEB91EF08733DB70010E9CD /* Build configuration list for PBXProject "libdispatch" */; + compatibilityVersion = "Xcode 3.1"; + hasScannedForEncodings = 1; + mainGroup = 08FB7794FE84155DC02AAC07 /* libdispatch */; + projectDirPath = ""; + projectRoot = ""; + targets = ( + D2AAC045055464E500DB518D /* libdispatch */, + 7276FCBA0EB10E0F00F7F487 /* test */, + 721EB4790F69D26F00845379 /* testbots */, + ); + }; +/* End PBXProject section */ + +/* Begin PBXShellScriptBuildPhase section */ + 2EC9C9800E846B5200E2499A /* ShellScript */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 8; + files = ( + ); + inputPaths = ( + ); + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 1; + shellPath = /bin/sh; + shellScript = "# private.h supersedes dispatch.h where available\nmv \"$DSTROOT\"/usr/local/include/dispatch/private.h \"$DSTROOT\"/usr/local/include/dispatch/dispatch.h\nln -sf dispatch.h \"$DSTROOT\"/usr/local/include/dispatch/private.h\n\n# keep events.h around for a little while\nln -sf ../../../include/dispatch/source.h \"$DSTROOT\"/usr/local/include/dispatch/events.h"; + }; + 4CED8B9D0EEDF8B600AF99AB /* ShellScript */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 8; + files = ( + ); + inputPaths = ( + ); + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 1; + shellPath = /bin/sh; + shellScript = "#!/bin/sh\n\nmkdir -p $DSTROOT/usr/share/man/man3 || true\nmkdir -p $DSTROOT/usr/local/share/man/man3 || true\n\n# Copy man pages\ncd $SRCROOT/man\nBASE_PAGES=\"dispatch.3 dispatch_after.3 dispatch_api.3 dispatch_apply.3 dispatch_async.3 dispatch_group_create.3 dispatch_object.3 dispatch_once.3 dispatch_queue_create.3 dispatch_semaphore_create.3 dispatch_source_create.3 dispatch_time.3\"\n\nPRIVATE_PAGES=\"dispatch_benchmark.3\"\n\ncp ${BASE_PAGES} $DSTROOT/usr/share/man/man3\ncp ${PRIVATE_PAGES} $DSTROOT/usr/local/share/man/man3\n\n# Make hard links (lots of hard links)\n\ncd $DSTROOT/usr/local/share/man/man3\nln -f dispatch_benchmark.3 dispatch_benchmark_f.3\nchown ${INSTALL_OWNER}:${INSTALL_GROUP} $PRIVATE_PAGES\nchmod $INSTALL_MODE_FLAG $PRIVATE_PAGES\n\n\ncd $DSTROOT/usr/share/man/man3\n\nchown ${INSTALL_OWNER}:${INSTALL_GROUP} $BASE_PAGES\nchmod $INSTALL_MODE_FLAG $BASE_PAGES\n\nln -f dispatch_after.3 dispatch_after_f.3\nln -f dispatch_apply.3 dispatch_apply_f.3\nln -f dispatch_once.3 dispatch_once_f.3\n\nfor m in dispatch_async_f dispatch_sync dispatch_sync_f; do\n\tln -f dispatch_async.3 ${m}.3\ndone\n\nfor m in dispatch_group_enter dispatch_group_leave dispatch_group_wait dispatch_group_async dispatch_group_async_f dispatch_group_notify dispatch_group_notify_f; do\n\tln -f dispatch_group_create.3 ${m}.3\ndone\n\nfor m in dispatch_retain dispatch_release dispatch_suspend dispatch_resume dispatch_get_context dispatch_set_context dispatch_set_finalizer_f; do\n\tln -f dispatch_object.3 ${m}.3\ndone\n\nfor m in dispatch_semaphore_signal dispatch_semaphore_wait; do\n\tln -f dispatch_semaphore_create.3 ${m}.3\ndone\n\nfor m in dispatch_get_current_queue dispatch_main dispatch_get_main_queue dispatch_get_global_queue dispatch_queue_get_label dispatch_set_target_queue; do\n\tln -f dispatch_queue_create.3 ${m}.3\ndone\n\nfor m in dispatch_source_set_event_handler dispatch_source_set_event_handler_f dispatch_source_set_cancel_handler dispatch_source_set_cancel_handler_f dispatch_source_cancel dispatch_source_testcancel dispatch_source_get_handle dispatch_source_get_mask dispatch_source_get_data dispatch_source_merge_data dispatch_source_set_timer; do\n\tln -f dispatch_source_create.3 ${m}.3\ndone\n\nln -f dispatch_time.3 dispatch_walltime.3"; + }; +/* End PBXShellScriptBuildPhase section */ + +/* Begin PBXSourcesBuildPhase section */ + D2AAC044055464E500DB518D /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + FC7BEDA40E8361E600161930 /* protocol.defs in Sources */, + FC7BED990E8361E600161930 /* queue.c in Sources */, + FC7BEDA60E8361E600161930 /* shims.c in Sources */, + 2EC9C9B80E8809EF00E2499A /* legacy.c in Sources */, + 721F5CCF0F15553500FF03A6 /* semaphore.c in Sources */, + 96DF70BE0F38FE3C0074BD99 /* once.c in Sources */, + 9676A0E10F3E755D00713ADB /* apply.c in Sources */, + 9661E56B0F3E7DDF00749F3E /* object.c in Sources */, + 965CD6350F3E806200D4E28D /* benchmark.c in Sources */, + 96A8AA870F41E7A400CD570B /* source.c in Sources */, + 96032E4B0F5CC8C700241C5F /* time.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXSourcesBuildPhase section */ + +/* Begin XCBuildConfiguration section */ + 1DEB91ED08733DB70010E9CD /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + COPY_PHASE_STRIP = NO; + CURRENT_PROJECT_VERSION = "$(RC_ProjectSourceVersion)"; + EXECUTABLE_PREFIX = ""; + GCC_CW_ASM_SYNTAX = NO; + GCC_ENABLE_CPP_EXCEPTIONS = NO; + GCC_ENABLE_CPP_RTTI = NO; + GCC_ENABLE_OBJC_EXCEPTIONS = NO; + GCC_OPTIMIZATION_LEVEL = s; + GCC_PREPROCESSOR_DEFINITIONS = "__DARWIN_NON_CANCELABLE=1"; + GENERATE_MASTER_OBJECT_FILE = NO; + INSTALL_PATH = /usr/local/lib/system; + LINK_WITH_STANDARD_LIBRARIES = NO; + OTHER_CFLAGS = ( + "-fno-unwind-tables", + "-fno-exceptions", + "-I$(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders", + "-fdiagnostics-show-option", + "-fsched-interblock", + "-freorder-blocks", + "-Xarch_x86_64", + "-momit-leaf-frame-pointer", + "-Xarch_i386", + "-momit-leaf-frame-pointer", + ); + OTHER_CFLAGS_debug = "-O0 -fstack-protector -fno-inline -DDISPATCH_DEBUG=1"; + PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/dispatch; + PRODUCT_NAME = libdispatch; + PUBLIC_HEADERS_FOLDER_PATH = /usr/include/dispatch; + SEPARATE_STRIP = NO; + VERSIONING_SYSTEM = "apple-generic"; + VERSION_INFO_PREFIX = __; + }; + name = Release; + }; + 1DEB91F108733DB70010E9CD /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + ARCHS = "$(ARCHS_STANDARD_32_64_BIT)"; + BUILD_VARIANTS = ( + normal, + debug, + profile, + ); + COPY_PHASE_STRIP = NO; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + GCC_ENABLE_PASCAL_STRINGS = NO; + GCC_OPTIMIZATION_LEVEL = s; + GCC_STRICT_ALIASING = YES; + GCC_SYMBOLS_PRIVATE_EXTERN = YES; + GCC_TREAT_WARNINGS_AS_ERRORS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_MISSING_NEWLINE = YES; + GCC_WARN_ABOUT_MISSING_PROTOTYPES = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES; + GCC_WARN_SHADOW = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + LINK_WITH_STANDARD_LIBRARIES = YES; + ONLY_ACTIVE_ARCH = NO; + OTHER_CFLAGS = ( + "-I$(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders", + "-fdiagnostics-show-option", + "-fsched-interblock", + "-freorder-blocks", + "-Xarch_x86_64", + "-momit-leaf-frame-pointer", + "-Xarch_i386", + "-momit-leaf-frame-pointer", + ); + OTHER_CFLAGS_debug = "-O0 -fstack-protector -fno-inline -DDISPATCH_DEBUG=1"; + PREBINDING = NO; + STRIP_INSTALLED_PRODUCT = NO; + WARNING_CFLAGS = ( + "-Wall", + "-Wextra", + "-Waggregate-return", + "-Wfloat-equal", + "-Wpacked", + "-Wmissing-declarations", + "-Wstrict-overflow=4", + "-Wstrict-aliasing=2", + ); + }; + name = Release; + }; + 721EB47A0F69D26F00845379 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + }; + name = Release; + }; + 7276FCBB0EB10E0F00F7F487 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + }; + name = Release; + }; +/* End XCBuildConfiguration section */ + +/* Begin XCConfigurationList section */ + 1DEB91EB08733DB70010E9CD /* Build configuration list for PBXNativeTarget "libdispatch" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 1DEB91ED08733DB70010E9CD /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 1DEB91EF08733DB70010E9CD /* Build configuration list for PBXProject "libdispatch" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 1DEB91F108733DB70010E9CD /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 721EB4850F69D2A600845379 /* Build configuration list for PBXLegacyTarget "testbots" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 721EB47A0F69D26F00845379 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 7276FCC80EB10E2300F7F487 /* Build configuration list for PBXLegacyTarget "test" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 7276FCBB0EB10E0F00F7F487 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; +/* End XCConfigurationList section */ + }; + rootObject = 08FB7793FE84155DC02AAC07 /* Project object */; +} diff --git a/man/dispatch.3 b/man/dispatch.3 new file mode 100644 index 000000000..c3618635b --- /dev/null +++ b/man/dispatch.3 @@ -0,0 +1,38 @@ +.\" Copyright (c) 2008-2009 Apple Inc. All rights reserved. +.Dd May 1, 2009 +.Dt dispatch 3 +.Os Darwin +.Sh NAME +.Nm dispatch +.Nd the dispatch framework +.Sh SYNOPSIS +.Fd #include +.Sh DESCRIPTION +The dispatch framework allows blocks to be scheduled for asynchronous and +concurrent execution via the core functions described in +.Xr dispatch_async 3 and +.Xr dispatch_apply 3 . +.Pp +Dispatch queues are the basic units of organization of blocks. Several queues +are created by default, and applications may create additional queues for their +own use. See +.Xr dispatch_queue_create 3 +for more information. +.Pp +Dispatch groups allow applications to track the progress of blocks submitted to +queues and take action when the blocks complete. See +.Xr dispatch_group_create 3 +for more information. +.Pp +The dispatch framework also provides functions to monitor underlying system +events and automatically submit event handler blocks to dispatch queues. +.Sh SEE ALSO +.Xr dispatch_async 3 , +.Xr dispatch_object 3 , +.Xr dispatch_queue_create 3 , +.Xr dispatch_group_create 3 , +.Xr dispatch_source_create 3 , +.Xr dispatch_benchmark 3 , +.Xr dispatch_time 3 , +.Xr dispatch_apply 3 , +.Xr dispatch_once 3 . diff --git a/man/dispatch_after.3 b/man/dispatch_after.3 new file mode 100644 index 000000000..404aefb4a --- /dev/null +++ b/man/dispatch_after.3 @@ -0,0 +1,57 @@ +.\" Copyright (c) 2008-2009 Apple Inc. All rights reserved. +.Dd May 1, 2009 +.Dt dispatch_after 3 +.Os Darwin +.Sh NAME +.Nm dispatch_after +.Nd schedule blocks for deferred execution +.Sh SYNOPSIS +.Fd #include +.Ft void +.Fo dispatch_after +.Fa "dispatch_time_t when" "dispatch_queue_t queue" "void (^block)(void)" +.Fc +.Ft void +.Fo dispatch_after_f +.Fa "dispatch_time_t when" "dispatch_queue_t queue" "void *context" "void (^function)(void *)" +.Fc +.Sh DESCRIPTION +The +.Fn dispatch_after +function submits the +.Fa block +to the given +.Fa queue +at the time specified by the +.Fa when +parameter. +The +.Fa when +parameter is a value created by +.Fn dispatch_time +or +.Fn dispatch_walltime . +.Pp +For a more detailed description about submitting blocks to queues, see +.Xr dispatch_async 3 . +.Sh CAVEATS +Specifying +.Vt DISPATCH_TIME_NOW +as the +.Fa when +parameter +is supported, but is not as efficient as calling +.Fn dispatch_async . +The result of passing +.Vt DISPATCH_TIME_FOREVER +as the +.Fa when +parameter is undefined. +.Sh FUNDAMENTALS +The +.Fn dispatch_after +function is a wrapper around +.Fn dispatch_after_f . +.Sh SEE ALSO +.Xr dispatch_async 3 , +.Xr dispatch_time 3 diff --git a/man/dispatch_api.3 b/man/dispatch_api.3 new file mode 100644 index 000000000..a39fa64d6 --- /dev/null +++ b/man/dispatch_api.3 @@ -0,0 +1,44 @@ +.\" Copyright (c) 2008-2009 Apple Inc. All rights reserved. +.Dd May 1, 2009 +.Dt dispatch_api 3 +.Os Darwin +.Sh NAME +.Nm dispatch_api +.Nd Designing API using dispatch +.Sh DESCRIPTION +The following is a brief summary of some of the common design patterns to +consider when designing and implementing API in terms of dispatch queues +and blocks. +.Pp +A general recommendation is to allow both a callback block and target dispatch +queue to be specified. This gives the application the greatest flexibility in +handling asynchronous events. +.Pp +It's also recommended that interfaces take only a single block as the last +parameter. This is both for consistency across projects, as well as the visual +aesthetics of multiline blocks that are declared inline. The dispatch queue to +which the block will be submitted should immediately precede the block argument +(second-to-last argument). For example: +.Pp +.Bd -literal -offset indent +read_async(file, callback_queue, ^{ + printf("received callback.\n"); +}); +.Ed +.Pp +When function pointer alternatives to interfaces that take blocks are provided, +the argument order of the function signature should be identical to the block +variant; with the exception that the block argument is replaced with a context +pointer, and a new last parameter is added, which is the function to call. +.Pp +The function based callback should pass the context pointer as the first +argument, and the subsequent arguments should be identical to the block based +variant (albeit offset by one in order). +.Pp +It is also important to use consistent naming. The dispatch API, for example, +uses the suffix "_f" for function based variants. +.Pp +.Sh SEE ALSO +.Xr dispatch 3 , +.Xr dispatch_async 3 , +.Xr dispatch_queue_create 3 diff --git a/man/dispatch_apply.3 b/man/dispatch_apply.3 new file mode 100644 index 000000000..48fb395c5 --- /dev/null +++ b/man/dispatch_apply.3 @@ -0,0 +1,80 @@ +.\" Copyright (c) 2008-2009 Apple Inc. All rights reserved. +.Dd May 1, 2009 +.Dt dispatch_apply 3 +.Os Darwin +.Sh NAME +.Nm dispatch_apply +.Nd schedule blocks for iterative execution +.Sh SYNOPSIS +.Fd #include +.Ft void +.Fo dispatch_apply +.Fa "size_t iterations" "dispatch_queue_t queue" "void (^block)(size_t)" +.Fc +.Ft void +.Fo dispatch_apply_f +.Fa "size_t iterations" "dispatch_queue_t queue" "void *context" "void (*function)(void *, size_t)" +.Fc +.Sh DESCRIPTION +The +.Fn dispatch_apply +function provides data-level concurrency through a "for (;;)" loop like primitive: +.Bd -literal +dispatch_queue_t the_queue = dispatch_get_concurrent_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT); +size_t iterations = 10; + +// 'idx' is zero indexed, just like: +// for (idx = 0; idx < iterations; idx++) + +dispatch_apply(iterations, the_queue, ^(size_t idx) { + printf("%zu\\n", idx); +}); +.Ed +.Pp +Like a "for (;;)" loop, the +.Fn dispatch_apply +function is synchronous. +If asynchronous behavior is desired, please wrap the call to +.Fn dispatch_apply +with a call to +.Fn dispatch_async +against another queue. +.Pp +Sometimes, when the block passed to +.Fn dispatch_apply +is simple, the use of striding can tune performance. +Calculating the optimal stride is best left to experimentation. +Start with a stride of one and work upwards until the desired performance is +achieved (perhaps using a power of two search): +.Bd -literal +#define STRIDE 3 + +dispatch_apply(count / STRIDE, queue, ^(size_t idx) { + size_t j = idx * STRIDE; + size_t j_stop = j + STRIDE; + do { + printf("%zu\\n", j++); + } while (j < j_stop); +}); + +size_t i; +for (i = count - (count % STRIDE); i < count; i++) { + printf("%zu\\n", i); +} +.Ed +.Sh FUNDAMENTALS +Conceptually, +.Fn dispatch_apply +is a convenient wrapper around +.Fn dispatch_async +and a semaphore to wait for completion. +In practice, the dispatch library optimizes this function. +.Pp +The +.Fn dispatch_apply +function is a wrapper around +.Fn dispatch_apply_f . +.Sh SEE ALSO +.Xr dispatch_async 3 , +.Xr dispatch_semaphore_create 3 , +.Xr dispatch_queue_create 3 diff --git a/man/dispatch_async.3 b/man/dispatch_async.3 new file mode 100644 index 000000000..4b874fb2b --- /dev/null +++ b/man/dispatch_async.3 @@ -0,0 +1,234 @@ +.\" Copyright (c) 2008-2009 Apple Inc. All rights reserved. +.Dd May 1, 2009 +.Dt dispatch_async 3 +.Os Darwin +.Sh NAME +.Nm dispatch_async , +.Nm dispatch_sync +.Nd schedule blocks for execution +.Sh SYNOPSIS +.Fd #include +.Ft void +.Fo dispatch_async +.Fa "dispatch_queue_t queue" "void (^block)(void)" +.Fc +.Ft void +.Fo dispatch_sync +.Fa "dispatch_queue_t queue" "void (^block)(void)" +.Fc +.Ft void +.Fo dispatch_async_f +.Fa "dispatch_queue_t queue" "void *context" "void (*function)(void *)" +.Fc +.Ft void +.Fo dispatch_sync_f +.Fa "dispatch_queue_t queue" "void *context" "void (*function)(void *)" +.Fc +.Sh DESCRIPTION +The +.Fn dispatch_async +and +.Fn dispatch_sync +functions schedule blocks for concurrent execution within the +.Xr dispatch 3 +framework. Blocks are submitted to a queue which dictates the policy for their +execution. See +.Xr dispatch_queue_create 3 +for more information about creating dispatch queues. +.Pp +These functions support efficient temporal synchronization, background +concurrency and data-level concurrency. These same functions can also be used +for efficient notification of the completion of asynchronous blocks (a.k.a. +callbacks). +.Sh TEMPORAL SYNCHRONIZATION +Synchronization is often required when multiple threads of execution access +shared data concurrently. The simplest form of synchronization is +mutual-exclusion (a lock), whereby different subsystems execute concurrently +until a shared critical section is entered. In the +.Xr pthread 3 +family of procedures, temporal synchronization is accomplished like so: +.Bd -literal -offset indent +int r = pthread_mutex_lock(&my_lock); +assert(r == 0); + +// critical section + +r = pthread_mutex_unlock(&my_lock); +assert(r == 0); +.Ed +.Pp +The +.Fn dispatch_sync +function may be used with a serial queue to accomplish the same style of +synchronization. For example: +.Bd -literal -offset indent +dispatch_sync(my_queue, ^{ + // critical section +}); +.Ed +.Pp +In addition to providing a more concise expression of synchronization, this +approach is less error prone as the critical section cannot be accidentally +left without restoring the queue to a reentrant state. +.Pp +The +.Fn dispatch_async +function may be used to implement deferred critical sections when the result +of the block is not needed locally. Deferred critical sections have the same +synchronization properties as the above code, but are non-blocking and +therefore more efficient to perform. For example: +.Bd -literal +dispatch_async(my_queue, ^{ + // critical section +}); +.Ed +.Sh BACKGROUND CONCURRENCY +.The +.Fn dispatch_async +function may be used to execute trivial backgound tasks on a global concurrent +queue. For example: +.Bd -literal +dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT,0), ^{ + // background operation +}); +.Ed +.Pp +This approach is an efficient replacement for +.Xr pthread_create 3 . +.Sh COMPLETION CALLBACKS +Completion callbacks can be accomplished via nested calls to the +.Fn dispatch_async +function. It is important to remember to retain the destination queue before the +first call to +.Fn dispatch_async , +and to release that queue at the end of the completion callback to ensure the +destination queue is not deallocated while the completion callback is pending. +For example: +.Bd -literal +void +async_read(object_t obj, + void *where, size_t bytes, + dispatch_queue_t destination_queue, + void (^reply_block)(ssize_t r, int err)) +{ + // There are better ways of doing async I/O. + // This is just an example of nested blocks. + + dispatch_retain(destination_queue); + + dispatch_async(obj->queue, ^{ + ssize_t r = read(obj->fd, where, bytes); + int err = errno; + + dispatch_async(destination_queue, ^{ + reply_block(r, err); + }); + dispatch_release(destination_queue); + }); +} +.Ed +.Sh RECURSIVE LOCKS +While +.Fn dispatch_sync +can replace a lock, it cannot replace a recursive lock. Unlike locks, queues +support both asynchronous and synchrnous operations, and those operations are +ordered by definition. A recursive call to +.Fn dispatch_sync +causes a simple deadlock as the currently executing block waits for the next +block to complete, but the next block will not start until the currently +running block completes. +.Pp +As the dispatch framework was designed, we studied recursive locks. We found +that the vast majority of recursive locks are deployed retroactively when +ill-defined lock hierarchies are discovered. As a consequence, the adoption of +recursive locks often mutates obvious bugs into obscure ones. This study also +revealed an insight: if reentrancy is unavoidable, then reader/writer locks are +preferable to recursive locks. Disciplined use of reader/writer locks enable +reentrancy only when reentrancy is safe (the "read" side of the lock). +.Pp +Nevertheless, if it is absolutely necessary, what follows is an imperfect way of +implementing recursive locks using the dispatch framework: +.Bd -literal +void +sloppy_lock(object_t object, void (^block)(void)) +{ + if (object->owner == pthread_self()) { + return block(); + } + dispatch_sync(object->queue, ^{ + object->owner = pthread_self(); + block(); + object->owner = NULL; + }); +} +.Ed +.Pp +The above example does not solve the case where queue A runs on thread X which +calls +.Fn dispatch_sync +against queue B which runs on thread Y which recursively calls +.Fn dispatch_sync +against queue A, which deadlocks both examples. This is bug-for-bug compatible +with nontrivial pthread usage. In fact, nontrivial reentrancy is impossible to +support in recursive locks once the ultimate level of reentrancy is deployed +(IPC or RPC). +.Sh IMPLIED REFERENCES +Synchronous functions within the dispatch framework hold an implied reference +on the target queue. In other words, the synchronous function borrows the +reference of the calling function (this is valid because the calling function +is blocked waiting for the result of the synchronous function, and therefore +cannot modify the reference count of the target queue until after the +synchronous function has returned). +For example: +.Bd -literal +queue = dispatch_queue_create("com.example.queue", NULL); +assert(queue); +dispatch_sync(queue, ^{ + do_something(); + //dispatch_release(queue); // NOT SAFE -- dispatch_sync() is still using 'queue' +}); +dispatch_release(queue); // SAFELY balanced outside of the block provided to dispatch_sync() +.Ed +.Pp +This is in contrast to asynchronous functions which must retain both the block +and target queue for the duration of the asynchronous operation (as the calling +function may immediately release its interest in these objects). +.Sh FUNDAMENTALS +Conceptually, +.Fn dispatch_sync +is a convenient wrapper around +.Fn dispatch_async +with the addition of a semaphore to wait for completion of the block, and a +wrapper around the block to signal its completion. See +.Xr dispatch_semaphore_create 3 +for more information about dispatch semaphores. The actual implementation of the +.Fn dispatch_sync +function may be optimized and differ from the above description. +.Pp +The +.Fn dispatch_async +function is a wrapper around +.Fn dispatch_async_f . +The application-defined +.Fa context +parameter is passed to the +.Fa function +when it is invoked on the target +.Fa queue . +.Pp +The +.Fn dispatch_sync +function is a wrapper around +.Fn dispatch_sync_f . +The application-defined +.Fa context +parameter is passed to the +.Fa function +when it is invoked on the target +.Fa queue . +.Pp +.Sh SEE ALSO +.Xr dispatch_once 3 , +.Xr dispatch_queue_create 3 , +.Xr dispatch_semaphore_create 3 , +.Xr dispatch_apply 3 diff --git a/man/dispatch_benchmark.3 b/man/dispatch_benchmark.3 new file mode 100644 index 000000000..0890aff31 --- /dev/null +++ b/man/dispatch_benchmark.3 @@ -0,0 +1,55 @@ +.\" Copyright (c) 2008-2009 Apple Inc. All rights reserved. +.Dd May 1, 2009 +.Dt dispatch_benchmark 3 +.Os Darwin +.Sh NAME +.Nm dispatch_benchmark +.Nd Measures block execution time +.Sh SYNOPSIS +.Fd #include +.Ft uint64_t +.Fo dispatch_benchmark +.Fa "size_t count" "void (^block)(void)" +.Fc +.Sh DESCRIPTION +The +.Fn dispatch_benchmark +function executes the given +.Fa block +multiple times according to the +.Fa count +variable and then returns the average number of nanoseconds per execution. +This function is for debugging and performance analysis work. +For the best +results, pass a high count value to +.Fn dispatch_benchmark . +When benchmarking concurrent code, please compare the +serial version of the code against the concurrent version, and compare the +concurrent version on different classes of hardware. +Please look for inflection +points with various data sets and keep the following facts in mind: +.Pp +.Bl -bullet -offset indent -compact +.It +Code bound by computational bandwidth may be inferred by proportional +changes in performance as concurrency is increased. +.It +Code bound by memory bandwidth may be inferred by negligible changes in +performance as concurrency is increased. +.It +Code bound by critical sections may be inferred by retrograde changes in +performance as concurrency is increased. +.Bl -bullet -offset indent -compact +.It +Intentional: locks, mutexes, and condition variables. +.It +Accidental: unrelated and frequently modified data on the same cache-line. +.El +.El +.Sh RETURN VALUE +The +.Fn dispatch_benchmark +function returns the average number of nanoseconds the given block takes to +execute. +.Sh SEE ALSO +.Xr dispatch 3 diff --git a/man/dispatch_group_create.3 b/man/dispatch_group_create.3 new file mode 100644 index 000000000..5cca4ca8a --- /dev/null +++ b/man/dispatch_group_create.3 @@ -0,0 +1,149 @@ +.\" Copyright (c) 2008-2009 Apple Inc. All rights reserved. +.Dd May 1, 2009 +.Dt dispatch_group_create 3 +.Os Darwin +.Sh NAME +.Nm dispatch_group_create , +.Nm dispatch_group_async , +.Nm dispatch_group_wait , +.Nm dispatch_group_notify +.Nd group blocks submitted to queues +.Sh SYNOPSIS +.Fd #include +.Ft dispatch_group_t +.Fo dispatch_group_create +.Fa void +.Fc +.Ft void +.Fo dispatch_group_enter +.Fa "dispatch_group_t group" +.Fc +.Ft void +.Fo dispatch_group_leave +.Fa "dispatch_group_t group" +.Fc +.Ft long +.Fo dispatch_group_wait +.Fa "dispatch_group_t group" "dispatch_time_t timeout" +.Fc +.Ft void +.Fo dispatch_group_notify +.Fa "dispatch_group_t group" "dispatch_queue_t queue" "void (^block)(void)" +.Fc +.Ft void +.Fo dispatch_group_notify_f +.Fa "dispatch_group_t group" "dispatch_queue_t queue" "void *context" "void (*function)(void *)" +.Fc +.Ft void +.Fo dispatch_group_async +.Fa "dispatch_group_t group" "dispatch_queue_t queue" "void (^block)(void)" +.Fc +.Ft void +.Fo dispatch_group_async_f +.Fa "dispatch_group_t group" "dispatch_queue_t queue" "void *context" "void (*function)(void *)" +.Fc +.Sh DESCRIPTION +A dispatch group is an association of one or more blocks submitted to dispatch +queues for asynchronous invocation. +Applications may use dispatch groups to +wait for the completion of blocks associated with the group. +.Pp +The +.Fn dispatch_group_create +function returns a new and empty dispatch group. +.Pp +The +.Fn dispatch_group_enter +and +.Fn dispatch_group_leave +functions update the number of blocks running within a group. +.Pp +The +.Fn dispatch_group_wait +function waits until all blocks associated with the +.Fa group +have completed, or until the specified +.Fa timeout +has elapsed. +If the +.Fa group +becomes empty within the specified amount of time, the function will return zero +indicating success. Otherwise, a non-zero return code will be returned. +When +.Va DISPATCH_TIME_FOREVER +is passed as the +.Fa timeout , +calls to this function will wait an unlimited amount of time until the group +becomes empty and the return value is always zero. +.Pp +The +.Fn dispatch_group_notify +function provides asynchronous notification of the completion of the blocks +associated with the +.Fa group +by submitting the +.Fa block +to the specified +.Fa queue +once all blocks associated with the +.Fa group +have completed. +The system holds a reference to the dispatch group while an asynchronous +notification is pending, therefore it is valid to release the +.Fa group +after setting a notification block. +The group will be empty at the time the notification block is submitted to the +target queue. The group may either be released with +.Fn dispatch_release +or reused for additional operations. +.Pp +The +.Fn dispatch_group_async +convenience function behaves like so: +.Bd -literal +void +dispatch_group_async(dispatch_group_t group, dispatch_queue_t queue, dispatch_block_t block) +{ + dispatch_retain(group); + dispatch_group_enter(group); + dispatch_async(queue, ^{ + block(); + dispatch_group_leave(group); + dispatch_release(group); + }); +} +.Ed +.Sh RETURN VALUE +The +.Fn dispatch_group_create +function returns NULL on failure and non-NULL on success. +.Pp +The +.Fn dispatch_group_wait +function returns zero upon success and non-zero after the timeout expires. +If the timeout is +.Va DISPATCH_TIME_FOREVER , +then +.Fn dispatch_group_wait +waits forever and always returns zero. +.Sh MEMORY MODEL +Dispatch groups are retained and released via calls to +.Fn dispatch_retain +and +.Fn dispatch_release . +.Sh FUNDAMENTALS +The +.Fn dispatch_group_async +and +.Fn dispatch_group_notify +functions are wrappers around +.Fn dispatch_group_async_f +and +.Fn dispatch_group_notify_f +respectively. +.Sh SEE ALSO +.Xr dispatch_object 3 , +.Xr dispatch_async 3 , +.Xr dispatch_time 3 , +.Xr dispatch_queue_create 3 , +.Xr dispatch_semaphore_create 3 diff --git a/man/dispatch_object.3 b/man/dispatch_object.3 new file mode 100644 index 000000000..b60831ac7 --- /dev/null +++ b/man/dispatch_object.3 @@ -0,0 +1,99 @@ +.\" Copyright (c) 2008-2009 Apple Inc. All rights reserved. +.Dd May 1, 2009 +.Dt dispatch_object 3 +.Os Darwin +.Sh NAME +.Nm dispatch_object +.Nd General manipulation of dispatch objects +.Sh SYNOPSIS +.Fd #include +.Ft void +.Fo dispatch_retain +.Fa "dispatch_object_t object" +.Fc +.Ft void +.Fo dispatch_release +.Fa "dispatch_object_t object" +.Fc +.Ft void +.Fo dispatch_suspend +.Fa "dispatch_object_t object" +.Fc +.Ft void +.Fo dispatch_resume +.Fa "dispatch_object_t object" +.Fc +.Ft "void *" +.Fo dispatch_get_context +.Fa "dispatch_object_t object" +.Fc +.Ft void +.Fo dispatch_set_context +.Fa "dispatch_object_t object" +.Fa "void *context" +.Fc +.Ft void +.Fo dispatch_set_finalizer_f +.Fa "dispatch_object_t object" +.Fa "dispatch_function_t finalizer" +.Fc +.Sh DESCRIPTION +Dispatch objects share functions for coordinating memory management, suspension, +cancellation and context pointers. While all dispatch objects are retainable, +not all objects support suspension, context pointers or finalizers (currently +only queues and sources support these additional interfaces). +.Sh MEMORY MANGEMENT +Objects returned by creation functions in the dispatch framework may be +uniformly retained and released with the functions +.Fn dispatch_retain +and +.Fn dispatch_release +respectively. +.Pp +The dispatch framework does not guarantee that any given client has the last or +only reference to a given object. Objects may be retained internally by the +system. +.Sh SUSPENSION +The invocation of blocks on dispatch queues or dispatch sources may be suspended +or resumed with the functions +.Fn dispatch_suspend +and +.Fn dispatch_resume +respectively. +The dispatch framework always checks the suspension status before executing a +block, but such changes never affect a block during execution (non-preemptive). +Therefore the suspension of an object is asynchronous, unless it is performed +from the context of the target queue for the given object. +The result of suspending or resuming an object that is not a dispatch queue or +a dispatch source is undefined. +.Pp +.Em Important : +suspension applies to all aspects of the dispatch object life cycle, including +the finalizer function and cancellation handler. Therefore it is important to +balance calls to +.Fn dispatch_suspend +and +.Fn dispatch_resume +such that the dispatch object is fully resumed when the last reference is +released. The result of releasing all references to a dispatch object while in +a suspended state is undefined. +.Sh CONTEXT POINTERS +Dispatch queues and sources support supplemental context pointers. The value of +the context point may be retrieved and updated with +.Fn dispatch_get_context +and +.Fn dispatch_set_context +respectively. +The +.Fn dispatch_set_finalizer_f +specifies an optional per-object finalizer function to that is invoked +asynchronously when the last reference to the object is released. This gives the +application an opportunity to free the context data associated with the object. +.Pp +The result of getting or setting the context of an object that is not a +dispatch queue or a dispatch source is undefined. +.Sh SEE ALSO +.Xr dispatch_group_create 3 , +.Xr dispatch_queue_create 3 , +.Xr dispatch_semaphore_create 3 , +.Xr dispatch_source_create 3 diff --git a/man/dispatch_once.3 b/man/dispatch_once.3 new file mode 100644 index 000000000..da218968c --- /dev/null +++ b/man/dispatch_once.3 @@ -0,0 +1,44 @@ +.\" Copyright (c) 2008-2009 Apple Inc. All rights reserved. +.Dd May 1, 2009 +.Dt dispatch_once 3 +.Os Darwin +.Sh NAME +.Nm dispatch_once +.Nd execute a block only once +.Sh SYNOPSIS +.Fd #include +.Ft void +.Fo dispatch_once +.Fa "dispatch_once_t *predicate" "void (^block)(void)" +.Fc +.Ft void +.Fo dispatch_once_f +.Fa "dispatch_once_t *predicate" "void *context" "void (*function)(void *)" +.Fc +.Sh DESCRIPTION +The +.Fn dispatch_once +function provides a simple and efficient mechanism to run an initializer +exactly once, similar to +.Xr pthread_once 3 . +Well designed code hides the use of lazy initialization. +For example: +.Bd -literal +FILE *getlogfile(void) +{ + static dispatch_once_t pred; + static FILE *logfile; + + dispatch_once(&pred, ^{ + logfile = fopen(MY_LOG_FILE, "a"); + }); + + return logfile; +} +.Ed +.Pp +.Sh FUNDAMENTALS +The +.Fn dispatch_once +function is a wrapper around +.Fn dispatch_once_f . diff --git a/man/dispatch_queue_create.3 b/man/dispatch_queue_create.3 new file mode 100644 index 000000000..d11c1c1d1 --- /dev/null +++ b/man/dispatch_queue_create.3 @@ -0,0 +1,318 @@ +.\" Copyright (c) 2008-2009 Apple Inc. All rights reserved. +.Dd May 1, 2008 +.Dt dispatch_queue_create 3 +.Os Darwin +.Sh NAME +.Nm dispatch_queue_create , +.Nm dispatch_queue_get_label , +.Nm dispatch_get_current_queue , +.Nm dispatch_get_global_queue , +.Nm dispatch_get_main_queue , +.Nm dispatch_main , +.Nm dispatch_set_target_queue +.Nd where blocks are scheduled for execution +.Sh SYNOPSIS +.Fd #include +.Ft dispatch_queue_t +.Fo dispatch_queue_create +.Fa "const char *label" "dispatch_queue_attr_t attr" +.Fc +.Ft "const char *" +.Fo dispatch_queue_get_label +.Fa "dispatch_queue_t queue" +.Fc +.Ft dispatch_queue_t +.Fo dispatch_get_current_queue +.Fa void +.Fc +.Ft dispatch_queue_t +.Fo dispatch_get_global_queue +.Fa "long priority" +.Fa "unsigned long flags" +.Fc +.Ft dispatch_queue_t +.Fo dispatch_get_main_queue +.Fa void +.Fc +.Ft void +.Fo dispatch_main +.Fa void +.Fc +.Ft void +.Fo dispatch_set_target_queue +.Fa "dispatch_object_t object" +.Fa "dispatch_queue_t target" +.Fc +.Sh DESCRIPTION +Queues are the fundamental mechanism for scheduling blocks for execution within +the +.Xr dispatch 3 +framework. +.Pp +All blocks submitted to dispatch queues are dequeued in FIFO order. +By default, queues created with +.Fn dispatch_queue_create +wait for the previously dequeued block to complete before dequeuing the next +block. This FIFO completion behavior is sometimes simply described as a "serial queue." +Queues are not bound to any specific thread of execution and blocks submitted +to independent queues may execute concurrently. +Queues, like all dispatch objects, are reference counted and newly created +queues have a reference count of one. +.Pp +The optional +.Fa label +argument is used to describe the purpose of the queue and is useful during +debugging and performance analysis. By convention, clients should pass a +reverse DNS style label. +If a label is provided, it is copied. If a label is not provided, then +.Fn dispatch_queue_get_label +returns an empty C string. +For example: +.Pp +.Bd -literal +my_queue = dispatch_queue_create("com.example.subsystem.taskXYZ", NULL); +.Ed +.Pp +The +.Fa attr +argument is reserved for future use and must be NULL. +.Pp +Queues may be temporarily suspended and resumed with the functions +.Fn dispatch_suspend +and +.Fn dispatch_resume +respectively. Suspension is checked prior to block execution and is +.Em not +preemptive. +.Sh MAIN QUEUE +The dispatch framework provides a default serial queue for the application to use. +This queue is accessed via +.Fn dispatch_get_main_queue . +Programs must call +.Fn dispatch_main +at the end of +.Fn main +in order to process blocks submitted to the main queue. (See the compatibility +section for exceptions.) +.Sh GLOBAL CONCURRENT QUEUES +Unlike the main queue or queues allocated with +.Fn dispatch_queue_create , +the global concurrent queues schedule blocks as soon as threads become +available (non-FIFO completion order). The global concurrent queues represent +three priority bands: +.Bl -bullet -compact -offset indent +.It +DISPATCH_QUEUE_PRIORITY_HIGH +.It +DISPATCH_QUEUE_PRIORITY_DEFAULT +.It +DISPATCH_QUEUE_PRIORITY_LOW +.El +.Pp +Blocks submitted to the high priority global queue will be invoked before those +submitted to the default or low priority global queues. Blocks submitted to the +low priority global queue will only be invoked if no blocks are pending on the +default or high priority queues. +.Pp +.Sh RETURN VALUES +The +.Fn dispatch_queue_create +function returns NULL on failure. +.Pp +The +.Fn dispatch_queue_get_label +function always returns a valid C string. An empty C string is returned if the +.Fa label +was NULL creation time. +.Pp +The +.Fn dispatch_get_main_queue +function returns the default main queue. +.Pp +The +.Fn dispatch_get_current_queue +function always returns a valid queue. When called from within a block submitted +to a dispatch queue, that queue will be returned. If this function is called from +the main thread before +.Fn dispatch_main +is called, then the result of +.Fn dispatch_get_main_queue +is returned. Otherwise, the result of +.Fo dispatch_get_global_queue +.Fa DISPATCH_QUEUE_PRIORITY_DEFAULT +.Fa 0 +.Fc +will be returned in all other cases. +.Pp +The +.Fn dispatch_main +function never returns. +.Sh TARGET QUEUE +The +.Fn dispatch_set_target_queue +function updates the target queue of the given dispatch object. The target +queue of an object is responsible for processing the object. Currently only +dispatch queues and dispatch sources are supported by this function. The result +of using +.Fn dispatch_set_target_queue +with any other dispatch object type is undefined. +.Pp +The new target queue is retained by the given object before the previous target +queue is released. The new target queue will take effect between block +executions, but not in the middle of any existing block executions +(non-preemptive). +.Pp +The priority of a dispatch queue is inherited by its target queue. +In order to change the priority of a queue created with +.Fn dispatch_queue_create , +use the +.Fn dispatch_get_global_queue +function to obtain a target queue of the desired priority. The +.Fa flags +argument is reserved for future use and must be zero. Passing any value other +than zero may result in a +.Vt NULL +return value. +.Pp +The target queue of a dispatch source specifies where its event handler and +cancellation handler blocks will be submitted. See +.Xr dispatch_source_create 3 +for more information about dispatch sources. +.Pp +The result of passing the main queue or a global concurrent queue to the first +argument of +.Fn dispatch_set_target_queue +is undefined. +.Pp +Directly or indirectly setting the target queue of a dispatch queue to itself is undefined. +.Sh CAVEATS +Code cannot make any assumptions about the queue returned by +.Fn dispatch_get_current_queue . +The returned queue may have arbitrary policies that may surprise code that tries +to schedule work with the queue. The list of policies includes, but is not +limited to, queue width (i.e. serial vs. concurrent), scheduling priority, +security credential or filesystem configuration. Therefore, +.Fn dispatch_get_current_queue +.Em MUST +only be used for identity tests or debugging. +.Sh COMPATIBILITY +Cocoa applications need not call +.Fn dispatch_main . +Blocks submitted to the main queue will be executed as part of the "common modes" +of the application's main NSRunLoop or CFRunLoop. +.Pp +The dispatch framework is a pure C level API. As a result, it does not catch +exceptions generated by higher level languages such as Objective-C or C++. +Applications +.Em MUST +catch all exceptions before returning from a block submitted to a dispatch +queue; otherwise the internal data structures of the dispatch framework will be +left in an inconsistent state. +.Pp +The dispatch framework manages the relationship between dispatch queues and +threads of execution. As a result, applications +.Em MUST NOT +delete or mutate objects that they did not create. The following interfaces +.Em MUST NOT +be called by blocks submitted to a dispatch queue: +.Bl -bullet -offset indent +.It +.Fn pthread_cancel +.It +.Fn pthread_detach +.It +.Fn pthread_join +.It +.Fn pthread_kill +.It +.Fn pthread_exit +.El +.Pp +Applications +.Em MAY +call the following interfaces from a block submitted to a dispatch queue if +and only if they restore the thread to its original state before returning: +.Bl -bullet -offset indent +.It +.Fn pthread_setcancelstate +.It +.Fn pthread_setcanceltype +.It +.Fn pthread_setschedparam +.It +.Fn pthread_sigmask +.It +.Fn pthread_setugid_np +.It +.Fn pthread_chdir +.It +.Fn pthread_fchdir +.El +.Pp +Applications +.Em MUST NOT +rely on the following interfaces returning predictable results between +invocations of blocks submitted to a dispatch queue: +.Bl -bullet -offset indent +.It +.Fn pthread_self +.It +.Fn pthread_getschedparam +.It +.Fn pthread_get_stacksize_np +.It +.Fn pthread_get_stackaddr_np +.It +.Fn pthread_mach_thread_np +.It +.Fn pthread_from_mach_thread_np +.El +.Pp +While the result of +.Fn pthread_self +may change between invocations of blocks, the value will not change during the +execution of any single block. Because the underlying thread may change beteween +block invocations on a single queue, using per-thread data as an out-of-band +return value is error prone. In other words, the result of calling +.Fn pthread_setspecific +and +.Fn pthread_getspecific +is well defined within a signle block, but not across multiple blocks. Also, +one cannot make any assumptions about when the destructor passed to +.Fn pthread_key_create +is called. The destructor may be called between the invocation of blocks on +the same queue, or during the idle state of a process. +.Pp +The following example code correctly handles per-thread return values: +.Bd -literal -offset indent +__block int r; +__block int e; +dispatch_sync(queue, ^{ + r = kill(1, 0); + // Copy the per-thread return value to the callee thread + e = errno; +}); +printf("kill(1,0) returned %d and errno %d\n", r, e); +.Ed +.Pp +Note that in the above example +.Va errno +is a per-thread variable and must be copied out explicitly as the block may be +invoked on different thread of execution than the caller. Another example of +per-thread data that would need to be copied is the use of +.Fn getpwnam +instead of +.Fn getpwnam_r . +.Pp +As an optimization, +.Fn dispatch_sync +invokes the block on the current thread when possible. In this case, the thread +specific data such as +.Va errno +may persist from the block until back to the caller. Great care should be taken +not to accidentally rely on this side-effect. +.Pp +.Sh SEE ALSO +.Xr dispatch_object 3 , +.Xr dispatch_async 3 , +.Xr dispatch_source_create 3 diff --git a/man/dispatch_semaphore_create.3 b/man/dispatch_semaphore_create.3 new file mode 100644 index 000000000..12506423e --- /dev/null +++ b/man/dispatch_semaphore_create.3 @@ -0,0 +1,114 @@ +.\" Copyright (c) 2008-2009 Apple Inc. All rights reserved. +.Dd May 1, 2009 +.Dt dispatch_semaphore_create 3 +.Os Darwin +.Sh NAME +.Nm dispatch_semaphore_create , +.Nm dispatch_semaphore_signal , +.Nm dispatch_semaphore_wait +.Nd synchronized counting semaphore +.Sh SYNOPSIS +.Fd #include +.Ft dispatch_semaphore_t +.Fo dispatch_semaphore_create +.Fa "long count" +.Fc +.Ft long +.Fo dispatch_semaphore_signal +.Fa "dispatch_semaphore_t semaphore" +.Fc +.Ft long +.Fo dispatch_semaphore_wait +.Fa "dispatch_semaphore_t semaphore" "dispatch_time_t timeout" +.Fc +.Sh DESCRIPTION +Dispatch semaphores are used to synchronize threads. +The +.Fa timeout +parameter is creatable with the +.Xr dispatch_time 3 +or +.Xr dispatch_walltime 3 +functions. +.Sh COMPLETION SYNCHRONIZATION +If the +.Fa count +parameter is equal to zero, then the semaphore is useful for synchronizing completion of work. +For example: +.Bd -literal -offset indent +sema = dispatch_semaphore_create(0); + +dispatch_async(queue, ^{ + foo(); + dispatch_semaphore_signal(sema); +}); + +bar(); + +dispatch_semaphore_wait(sema, DISPATCH_TIME_FOREVER); +.Ed +.Sh FINITE RESOURCE POOL +If the +.Fa count +parameter is greater than zero, then the semaphore is useful for managing a finite pool of resources. +For example, a library that wants to limit Unix descriptor usage: +.Bd -literal -offset indent +sema = dispatch_semaphore_create(getdtablesize() / 4); +.Ed +.Pp +At each Unix FD allocation: +.Bd -literal -offset indent +dispatch_semaphore_wait(sema, DISPATCH_TIME_FOREVER); +fd = open("/etc/services", O_RDONLY); +.Ed +.Pp +When each FD is closed: +.Bd -literal -offset indent +close(fd); +dispatch_semaphore_signal(sema); +.Ed +.Sh RETURN VALUES +The +.Fn dispatch_semaphore_create +function returns NULL if no memory is available or if the +.Fa count +parameter is less than zero. +.Pp +The +.Fn dispatch_semaphore_signal +function returns non-zero when a thread is woken. +Otherwise, zero is returned. +.Pp +The +.Fn dispatch_semaphore_wait +function returns zero upon success and non-zero after the timeout expires. If the timeout is DISPATCH_TIME_FOREVER, then +.Fn dispatch_semaphore_wait +waits forever and always returns zero. +.Sh MEMORY MODEL +Dispatch semaphores are retained and released via calls to +.Fn dispatch_retain +and +.Fn dispatch_release . +.Sh CAVEATS +Dispatch semaphores are strict counting semaphores. +In other words, dispatch semaphores do not saturate at any particular value. +Saturation can be achieved through atomic compare-and-swap logic. +What follows is a saturating binary semaphore: +.Bd -literal +void +saturating_semaphore_signal(dispatch_semaphore_t dsema, int *sent) +{ + if (__sync_bool_compare_and_swap(sent, 0, 1)) { + dispatch_semaphore_signal(dsema); + } +} + +void +saturating_semaphore_wait(dispatch_semaphore_t dsema, int *sent) +{ + *sent = 0; + dispatch_semaphore_wait(dsema, DISPATCH_TIME_FOREVER); +} +.Ed +.Sh SEE ALSO +.Xr dispatch_object 3 diff --git a/man/dispatch_source_create.3 b/man/dispatch_source_create.3 new file mode 100644 index 000000000..0a38cd294 --- /dev/null +++ b/man/dispatch_source_create.3 @@ -0,0 +1,456 @@ +.\" Copyright (c) 2008-2009 Apple Inc. All rights reserved. +.Dd May 1, 2009 +.Dt dispatch_source_create 3 +.Os Darwin +.Sh NAME +.Nm dispatch_source_create +.Nd dispatch event sources +.Sh SYNOPSIS +.Fd #include +.Ft dispatch_source_t +.Fo dispatch_source_create +.Fa "dispatch_source_type_t type" +.Fa "uintptr_t handle" +.Fa "unsigned long mask" +.Fa "dispatch_queue_t queue" +.Fc +.Ft void +.Fo dispatch_source_set_event_handler +.Fa "dispatch_source_t source" +.Fa "void (^block)(void)" +.Fc +.Ft void +.Fo dispatch_source_set_event_handler_f +.Fa "dispatch_source_t source" +.Fa "void (*function)(void *)" +.Fc +.Ft void +.Fo dispatch_source_set_cancel_handler +.Fa "dispatch_source_t source" +.Fa "void (^block)(void)" +.Fc +.Ft void +.Fo dispatch_source_set_cancel_handler_f +.Fa "dispatch_source_t source" +.Fa "void (*function)(void *)" +.Fc +.Ft void +.Fo dispatch_source_cancel +.Fa "dispatch_source_t source" +.Fc +.Ft void +.Fo dispatch_source_testcancel +.Fa "dispatch_source_t source" +.Fc +.Ft uintptr_t +.Fo dispatch_source_get_handle +.Fa "dispatch_source_t source" +.Fc +.Ft "unsigned long" +.Fo dispatch_source_get_mask +.Fa "dispatch_source_t source" +.Fc +.Ft "unsigned long" +.Fo dispatch_source_get_data +.Fa "dispatch_source_t source" +.Fc +.Ft void +.Fo dispatch_source_merge_data +.Fa "dispatch_source_t source" +.Fa "unsigned long data" +.Fc +.Ft void +.Fo dispatch_source_set_timer +.Fa "dispatch_source_t source" +.Fa "dispatch_time_t start" +.Fa "uint64_t interval" +.Fa "uint64_t leeway" +.Fc +.Sh DESCRIPTION +Dispatch event sources may be used to monitor a variety of system objects and +events including file descriptors, mach ports, processes, virtual filesystem +nodes, signal delivery and timers. +.Pp +When a state change occurs, the dispatch source will submit its event handler +block to its target queue. +.Pp +The +.Fn dispatch_source_create +function creates a new dispatch source object that may be retained and released +with calls to +.Fn dispatch_retain +and +.Fn dispatch_release +respectively. Newly created sources are created in a suspended state. After the +source has been configured by setting an event handler, cancellation handler, +context, etc., the source must be activated by a call to +.Fn dispatch_resume +before any events will be delivered. +.Pp +Dispatch sources may be one of the following types: +.Bl -bullet -compact -offset indent +.It +DISPATCH_SOURCE_TYPE_DATA_ADD +.It +DISPATCH_SOURCE_TYPE_DATA_OR +.It +DISPATCH_SOURCE_TYPE_MACH_SEND +.It +DISPATCH_SOURCE_TYPE_MACH_RECV +.It +DISPATCH_SOURCE_TYPE_PROC +.It +DISPATCH_SOURCE_TYPE_READ +.It +DISPATCH_SOURCE_TYPE_SIGNAL +.It +DISPATCH_SOURCE_TYPE_TIMER +.It +DISPATCH_SOURCE_TYPE_VNODE +.It +DISPATCH_SOURCE_TYPE_WRITE +.El +.Pp +The +.Fa handle +and +.Fa mask +arguments to +.Fn dispatch_source_create +and the return values of the +.Fn dispatch_source_get_handle , +.Fn dispatch_source_get_mask , +and +.Fn dispatch_source_get_data +functions should be interpreted according to the type of the dispatch source. +.Pp +The +.Fn dispatch_source_get_handle +function +returns the underlying handle to the dispatch source (i.e. file descriptor, +mach port, process identifer, etc.). The result of this function may be cast +directly to the underlying type. +.Pp +The +.Fn dispatch_source_get_mask +function +returns the set of flags that were specified at source creation time via the +.Fa mask +argument. +.Pp +The +.Fn dispatch_source_get_data +function returns the currently pending data for the dispatch source. +This function should only be called from within the source's event handler. +The result of calling this function from any other context is undefined. +.Pp +The +.Fn dispatch_source_merge_data +function is intended for use with the +.Vt DISPATCH_SOURCE_TYPE_DATA_ADD +and +.Vt DISPATCH_SOURCE_TYPE_DATA_OR +source types. The result of using this function with any other source type is +undefined. Calling this function will atomically add or logical OR the data +into the source's data, and trigger the delivery of the source's event handler. +.Pp +.Sh SOURCE EVENT HANDLERS +In order to receive events from the dispatch source, an event handler should be +specified via +.Fn dispatch_source_set_event_handler . +The event handler block is submitted to the source's target queue when the state +of the underlying system handle changes, or when an event occurs. +.Pp +Dispatch sources may be suspended or resumed independently of their target +queues using +.Fn dispatch_suspend +and +.Fn dispatch_resume +on the dispatch source directly. The data describing events which occur while a +source is suspended are coalesced and delivered once the source is resumed. +.Pp +The +.Fa handler +block +need not be reentrant safe, as it is not resubmitted to the target +.Fa queue +until any prior invocation for that dispatch source has completed. +When the hander is set, the dispatch source will perform a +.Fn Block_copy +on the +.Fa handler +block. +.Pp +.Sh CANCELLATION +The +.Fn dispatch_source_cancel +function asynchronously cancels the dispatch source, preventing any further +invocation of its event handler block. Cancellation does not interrupt a +currently executing handler block (non-preemptive). +.Pp +The +.Fn dispatch_source_testcancel +function may be used to determine whether the specified source has been +canceled. A non-zero value will be returned if the source is canceled. +.Pp +When a dispatch source is canceled its optional cancellation handler will be +submitted to its target queue. The cancellation handler may be specified via +.Fn dispatch_source_set_cancel_handler . +This cancellation handler is invoked only once, and only as a direct consequence +of calling +.Fn dispatch_source_cancel . +.Pp +.Em Important: +a cancellation handler is required for file descriptor and mach port based +sources in order to safely close the descriptor or destroy the port. Closing the +descriptor or port before the cancellation handler has run may result in a race +condition: if a new descriptor is allocated with the same value as the recently +cosed descriptor while the source's event handler is still running, the event +handler may read/write data to the wrong descriptor. +.Pp +.Sh DISPATCH SOURCE TYPES +The following section contains a summary of supported dispatch event types and +the interpretation of their parameters and returned data. +.Pp +.Vt DISPATCH_SOURCE_TYPE_DATA_ADD , +.Vt DISPATCH_SOURCE_TYPE_DATA_OR +.Pp +Sources of this type allow applications to manually trigger the source's event +handler via a call to +.Fn dispatch_source_merge_data . +The data will be merged with the source's pending data via an atomic add or +logic OR (based on the source's type), and the event handler block will be +submitted to the source's target queue. The +.Fa mask +and +.Fa data +are application defined. These sources have no +.Fa handle +and zero should be used. +.Pp +.Vt DISPATCH_SOURCE_TYPE_MACH_SEND +.Pp +Sources of this type monitor a mach port with a send right for state changes. +The +.Fa handle +is the mach port (mach_port_t) to monitor and the +.Fa mask +may be: +.Bl -tag -width "XXDISPATCH_PROC_SIGNAL" -compact -offset indent +.It \(bu DISPATCH_MACH_SEND_DEAD +The port's corresponding receive right has been destroyed +.El +.Pp +The data returned by +.Fn dispatch_source_get_data +indicates which of the events in the +.Fa mask +were observed. +.Pp +.Vt DISPATCH_SOURCE_TYPE_MACH_RECV +.Pp +Sources of this type monitor a mach port with a receive right for state changes. +The +.Fa handle +is the mach port (mach_port_t) to monitor and the +.Fa mask +is unused and should be zero. +The event handler block will be submitted to the target queue when a message +on the mach port is waiting to be received. +.Pp +.Vt DISPATCH_SOURCE_TYPE_PROC +.Pp +Sources of this type monitor processes for state changes. +The +.Fa handle +is the process identifier (pid_t) of the process to monitor and the +.Fa mask +may be one or more of the following: +.Bl -tag -width "XXDISPATCH_PROC_SIGNAL" -compact -offset indent +.It \(bu DISPATCH_PROC_EXIT +The process has exited and is available to +.Xr wait 2 . +.It \(bu DISPATCH_PROC_FORK +The process has created one or more child processes. +.It \(bu DISPATCH_PROC_EXEC +The process has become another executable image via a call to +.Xr execve 2 +or +.Xr posix_spawn 2 . +.It \(bu DISPATCH_PROC_REAP +The process status has been collected by its parent process via +.Xr wait 2 . +.It \(bu DISPATCH_PROC_SIGNAL +A signal was delivered to the process. +.El +.Pp +The data returned by +.Fn dispatch_source_get_data +indicates which of the events in the +.Fa mask +were observed. +.Pp +.Vt DISPATCH_SOURCE_TYPE_READ +.Pp +Sources of this type monitor file descriptors for pending data. +The +.Fa handle +is the file descriptor (int) to monitor and the +.Fa mask +is unused and should be zero. +.Pp +The data returned by +.Fn dispatch_source_get_data +is an estimated number of bytes available to be read from the descriptor. This +estimate should be treated as a suggested +.Em minimum +read buffer size. There are no guarantees that a complete read of this size +will be performed. +.Pp +Users of this source type are strongly encouraged to perform non-blocking I/O +and handle any truncated reads or error conditions that may occur. See +.Xr fnctl 2 +for additional information about setting the +.Vt O_NONBLOCK +flag on a file descriptor. +.Pp +.Vt DISPATCH_SOURCE_TYPE_SIGNAL +.Pp +Sources of this type monitor signals delivered to the current process. The +.Fa handle +is the signal number to monitor (int) and the +.Fa mask +is unused and should be zero. +.Pp +The data returned by +.Fn dispatch_source_get_data +is the number of signals received since the last invocation of the event handler +block. +.Pp +Unlike signal handlers specified via +.Fn sigaction , +the execution of the event handler block does not interrupt the current thread +of execution; therefore the handler block is not limited to the use of signal +safe interfaces defined in +.Xr sigaction 2 . +Furthermore, multiple observers of a given signal are supported; thus allowing +applications and libraries to cooperate safely. However, a dispatch source +.Em does not +install a signal handler or otherwise alter the behavior of signal delivery. +Therefore, applications must ignore or at least catch any signal that terminates +a process by default. For example, near the top of +.Fn main : +.Bd -literal -offset ident +signal(SIGTERM, SIG_IGN); +.Ed +.Pp +.Vt DISPATCH_SOURCE_TYPE_TIMER +.Pp +Sources of this type periodically submit the event handler block to the target +queue on an interval specified by +.Fn dispatch_source_set_timer . +The +.Fa handle +and +.Fa mask +arguments are unused and should be zero. +.Pp +A best effort attempt is made to submit the event handler block to the target +queue at the specified time; however, actual invocation may occur at a later +time. +.Pp +The data returned by +.Fn dispatch_source_get_data +is the number of times the timer has fired since the last invocation of the +event handler block. +.Pp +The function +.Fn dispatch_source_set_timer +takes as an argument the +.Fa start +time of the timer (initial fire time) represented as a +.Vt dispatch_time_t . +The timer dispatch source will use the same clock as the function used to +create this value. (See +.Xr dispatch_time 3 +for more information.) The +.Fa interval , +in nanoseconds, specifies the period at which the timer should repeat. All +timers will repeat indefinitely until +.Fn dispatch_source_cancel +is called. The +.Fa leeway , +in nanoseconds, is a hint to the system that it may defer the timer in order to +align with other system activity for improved system performance or reduced +power consumption. (For example, an application might perform a periodic task +every 5 minutes with a leeway of up to 30 seconds.) Note that some latency is +to be expected for all timers even when a value of zero is used. +.Pp +.Em Note : +Under the C language, untyped numbers default to the +.Vt int +type. This can lead to truncation bugs when arithmetic operations with other +numbers are expected to generate a +.Vt uint64_t +sized result. When in doubt, use +.Vt ull +as a suffix. For example: +.Bd -literal -offset indent +3ull * NSEC_PER_SEC +.Ed +.Pp +.Vt DISPATCH_SOURCE_TYPE_VNODE +.Pp +Sources of this type monitor the virtual filesystem nodes for state changes. +The +.Fa handle +is a file descriptor (int) referencing the node to monitor, and +the +.Fa mask +may be one or more of the following: +.Bl -tag -width "XXDISPATCH_VNODE_ATTRIB" -compact -offset indent +.It \(bu DISPATCH_VNODE_DELETE +The referenced node was removed from the filesystem namespace via +.Xr unlink 2 . +.It \(bu DISPATCH_VNODE_WRITE +A write to the referenced file occurred +.It \(bu DISPATCH_VNODE_EXTEND +The referenced file was extended +.It \(bu DISPATCH_VNODE_ATTRIB +The metadata attributes of the referenced node have changed +.It \(bu DISPATCH_VNODE_LINK +The link count on the referenced node has changed +.It \(bu DISPATCH_VNODE_RENAME +The referenced node was renamed +.It \(bu DISPATCH_VNODE_REVOKE +Access to the referenced node was revoked via +.Xr revoke 2 +or the underlying fileystem was unmounted. +.El +.Pp +The data returned by +.Fn dispatch_source_get_data +indicates which of the events in the +.Fa mask +were observed. +.Pp +.Vt DISPATCH_SOURCE_TYPE_WRITE +.Pp +Sources of this type monitor file descriptors for available write buffer space. +The +.Fa handle +is the file descriptor (int) to monitor and the +.Fa mask +is unused and should be zero. +.Pp +Users of this source type are strongly encouraged to perform non-blocking I/O +and handle any truncated reads or error conditions that may occur. See +.Xr fnctl 2 +for additional information about setting the +.Vt O_NONBLOCK +flag on a file descriptor. +.Pp +.Sh SEE ALSO +.Xr dispatch 3 , +.Xr dispatch_object 3 , +.Xr dispatch_queue_create 3 diff --git a/man/dispatch_time.3 b/man/dispatch_time.3 new file mode 100644 index 000000000..06d78e8e4 --- /dev/null +++ b/man/dispatch_time.3 @@ -0,0 +1,110 @@ +.\" Copyright (c) 2008-2009 Apple Inc. All rights reserved. +.Dd May 1, 2009 +.Dt dispatch_time 3 +.Os Darwin +.Sh NAME +.Nm dispatch_time , +.Nm dispatch_walltime +.Nd Calculate temporal milestones +.Sh SYNOPSIS +.Fd #include +.Vt static const dispatch_time_t DISPATCH_TIME_NOW = 0 ; +.Vt static const dispatch_time_t DISPATCH_TIME_FOREVER = ~0ull ; +.Ft dispatch_time_t +.Fo dispatch_time +.Fa "dispatch_time_t base" "int64_t offset" +.Fc +.Ft dispatch_time_t +.Fo dispatch_walltime +.Fa "struct timespec *base" "int64_t offset" +.Fc +.Sh DESCRIPTION +The +.Fn dispatch_time +and +.Fn dispatch_walltime +functions provide a simple mechanism for expressing temporal milestones for use +with dispatch functions that need timeouts or operate on a schedule. +.Pp +The +.Fa dispatch_time_t +type is a semi-opaque integer, with only the special values +.Vt DISPATCH_TIME_NOW +and +.Vt DISPATCH_TIME_FOREVER +being externally defined. All other values are represented using an internal +format that is not safe for integer arithmetic or comparison. +The internal format is subject to change. +.Pp +The +.Fn dispatch_time +function returns a milestone relative to an existing milestone after adding +.Fa offset +nanoseconds. +If the +.Fa base +parameter maps internally to a wall clock, then the returned value is +relative to the wall clock. +Otherwise, if +.Fa base +is +.Vt DISPATCH_TIME_NOW , +then the the current time of the default host clock is used. +.Pp +The +.Fn dispatch_walltime +function is useful for creating a milestone relative to a fixed point in time +using the wall clock, as specified by the optional +.Fa base +parameter. If +.Fa base +is NULL, then the current time of the wall clock is used. +.Sh EDGE CONDITIONS +The +.Fn dispatch_time +and +.Fn dispatch_walltime +functions detect overflow and underflow conditions when applying the +.Fa offset +parameter. +.Pp +Overflow causes +.Vt DISPATCH_TIME_FOREVER +to be returned. When +.Fa base +is +.Vt DISPATCH_TIME_FOREVER , +then the +.Fa offset +parameter is ignored. +.Pp +Underflow causes the smallest representable value to be +returned for a given clock. +.Sh EXAMPLES +Create a milestone two seconds in the future: +.Bd -literal -offset indent +milestone = dispatch_time(DISPATCH_TIME_NOW, 2LL * NSEC_PER_SEC); +.Ed +.Pp +Create a milestone for use as an infinite timeout: +.Bd -literal -offset indent +milestone = DISPATCH_TIME_FOREVER; +.Ed +.Pp +Create a milestone on Tuesday, January 19, 2038: +.Bd -literal -offset indent +struct timespec ts; +ts.tv_sec = 0x7FFFFFFF; +ts.tv_nsec = 0; +milestone = dispatch_walltime(&ts, 0); +.Ed +.Sh RETURN VALUE +These functions return an abstract value for use with +.Fn dispatch_after , +.Fn dispatch_group_wait , +or +.Fn dispatch_semaphore_wait . +.Sh SEE ALSO +.Xr dispatch_after 3 , +.Xr dispatch_group_create 3 , +.Xr dispatch_semaphore_create 3 diff --git a/src/apply.c b/src/apply.c new file mode 100644 index 000000000..2c51eb270 --- /dev/null +++ b/src/apply.c @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ +#include "internal.h" + +// We'd use __attribute__((aligned(x))), but it does not atually increase the +// alignment of stack variables. All we really need is the stack usage of the +// local thread to be sufficiently away to avoid cache-line contention with the +// busy 'da_index' variable. +// +// NOTE: 'char' arrays cause GCC to insert buffer overflow detection logic +struct dispatch_apply_s { + long _da_pad0[DISPATCH_CACHELINE_SIZE / sizeof(long)]; + void (*da_func)(void *, size_t); + void *da_ctxt; + size_t da_iterations; + size_t da_index; + uint32_t da_thr_cnt; + dispatch_semaphore_t da_sema; + long _da_pad1[DISPATCH_CACHELINE_SIZE / sizeof(long)]; +}; + +static void +_dispatch_apply2(void *_ctxt) +{ + struct dispatch_apply_s *da = _ctxt; + size_t const iter = da->da_iterations; + typeof(da->da_func) const func = da->da_func; + void *const ctxt = da->da_ctxt; + size_t idx; + + _dispatch_workitem_dec(); // this unit executes many items + + // Striding is the responsibility of the caller. + while (fastpath((idx = dispatch_atomic_inc(&da->da_index) - 1) < iter)) { + func(ctxt, idx); + _dispatch_workitem_inc(); + } + + if (dispatch_atomic_dec(&da->da_thr_cnt) == 0) { + dispatch_semaphore_signal(da->da_sema); + } +} + +static void +_dispatch_apply_serial(void *context) +{ + struct dispatch_apply_s *da = context; + size_t idx = 0; + + _dispatch_workitem_dec(); // this unit executes many items + do { + da->da_func(da->da_ctxt, idx); + _dispatch_workitem_inc(); + } while (++idx < da->da_iterations); +} + +#ifdef __BLOCKS__ +void +dispatch_apply(size_t iterations, dispatch_queue_t dq, void (^work)(size_t)) +{ + struct Block_basic *bb = (void *)work; + + dispatch_apply_f(iterations, dq, bb, (void *)bb->Block_invoke); +} +#endif + +// 256 threads should be good enough for the short to mid term +#define DISPATCH_APPLY_MAX_CPUS 256 + +DISPATCH_NOINLINE +void +dispatch_apply_f(size_t iterations, dispatch_queue_t dq, void *ctxt, void (*func)(void *, size_t)) +{ + struct dispatch_apply_dc_s { + DISPATCH_CONTINUATION_HEADER(dispatch_apply_dc_s); + } da_dc[DISPATCH_APPLY_MAX_CPUS]; + struct dispatch_apply_s da; + size_t i; + + da.da_func = func; + da.da_ctxt = ctxt; + da.da_iterations = iterations; + da.da_index = 0; + da.da_thr_cnt = _dispatch_hw_config.cc_max_active; + + if (da.da_thr_cnt > DISPATCH_APPLY_MAX_CPUS) { + da.da_thr_cnt = DISPATCH_APPLY_MAX_CPUS; + } + if (slowpath(iterations == 0)) { + return; + } + if (iterations < da.da_thr_cnt) { + da.da_thr_cnt = (uint32_t)iterations; + } + if (slowpath(dq->dq_width <= 2 || da.da_thr_cnt <= 1)) { + return dispatch_sync_f(dq, &da, _dispatch_apply_serial); + } + + for (i = 0; i < da.da_thr_cnt; i++) { + da_dc[i].do_vtable = NULL; + da_dc[i].do_next = &da_dc[i + 1]; + da_dc[i].dc_func = _dispatch_apply2; + da_dc[i].dc_ctxt = &da; + } + + da.da_sema = _dispatch_get_thread_semaphore(); + + // some queues are easy to borrow and some are not + if (slowpath(dq->do_targetq)) { + _dispatch_queue_push_list(dq, (void *)&da_dc[0], (void *)&da_dc[da.da_thr_cnt - 1]); + } else { + dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key); + // root queues are always concurrent and safe to borrow + _dispatch_queue_push_list(dq, (void *)&da_dc[1], (void *)&da_dc[da.da_thr_cnt - 1]); + _dispatch_thread_setspecific(dispatch_queue_key, dq); + // The first da_dc[] element was explicitly not pushed on to the queue. + // We need to either call it like so: + // da_dc[0].dc_func(da_dc[0].dc_ctxt); + // Or, given that we know the 'func' and 'ctxt', we can call it directly: + _dispatch_apply2(&da); + _dispatch_workitem_inc(); + _dispatch_thread_setspecific(dispatch_queue_key, old_dq); + } + dispatch_semaphore_wait(da.da_sema, DISPATCH_TIME_FOREVER); + _dispatch_put_thread_semaphore(da.da_sema); +} + +#if 0 +#ifdef __BLOCKS__ +void +dispatch_stride(size_t offset, size_t stride, size_t iterations, dispatch_queue_t dq, void (^work)(size_t)) +{ + struct Block_basic *bb = (void *)work; + dispatch_stride_f(offset, stride, iterations, dq, bb, (void *)bb->Block_invoke); +} +#endif + +DISPATCH_NOINLINE +void +dispatch_stride_f(size_t offset, size_t stride, size_t iterations, + dispatch_queue_t dq, void *ctxt, void (*func)(void *, size_t)) +{ + if (stride == 0) { + stride = 1; + } + dispatch_apply(iterations / stride, queue, ^(size_t idx) { + size_t i = idx * stride + offset; + size_t stop = i + stride; + do { + func(ctxt, i++); + } while (i < stop); + }); + + dispatch_sync(queue, ^{ + size_t i; + for (i = iterations - (iterations % stride); i < iterations; i++) { + func(ctxt, i + offset); + } + }); +} +#endif diff --git a/src/base.h b/src/base.h new file mode 100644 index 000000000..3799a9a6e --- /dev/null +++ b/src/base.h @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __DISPATCH_BASE__ +#define __DISPATCH_BASE__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#endif + +#ifdef __cplusplus +/* + * Dispatch objects are NOT C++ objects. Nevertheless, we can at least keep C++ + * aware of type compatibility. + */ +typedef struct dispatch_object_s { +private: + dispatch_object_s(); + ~dispatch_object_s(); + dispatch_object_s(const dispatch_object_s &); + void operator=(const dispatch_object_s &); +} *dispatch_object_t; +#else +typedef union { + struct dispatch_object_s *_do; + struct dispatch_continuation_s *_dc; + struct dispatch_queue_s *_dq; + struct dispatch_queue_attr_s *_dqa; + struct dispatch_group_s *_dg; + struct dispatch_source_s *_ds; + struct dispatch_source_attr_s *_dsa; + struct dispatch_semaphore_s *_dsema; +} dispatch_object_t __attribute__((transparent_union)); +#endif + +typedef void (*dispatch_function_t)(void *); + +#ifdef __cplusplus +#define DISPATCH_DECL(name) typedef struct name##_s : public dispatch_object_s {} *name##_t; +#else +/*! @parseOnly */ +#define DISPATCH_DECL(name) typedef struct name##_s *name##_t; +#endif + +#ifdef __GNUC__ +#define DISPATCH_NORETURN __attribute__((__noreturn__)) +#define DISPATCH_NOTHROW __attribute__((__nothrow__)) +#define DISPATCH_NONNULL1 __attribute__((__nonnull__(1))) +#define DISPATCH_NONNULL2 __attribute__((__nonnull__(2))) +#define DISPATCH_NONNULL3 __attribute__((__nonnull__(3))) +#define DISPATCH_NONNULL4 __attribute__((__nonnull__(4))) +#define DISPATCH_NONNULL5 __attribute__((__nonnull__(5))) +#define DISPATCH_NONNULL6 __attribute__((__nonnull__(6))) +#define DISPATCH_NONNULL7 __attribute__((__nonnull__(7))) +#if __clang__ +// rdar://problem/6857843 +#define DISPATCH_NONNULL_ALL +#else +#define DISPATCH_NONNULL_ALL __attribute__((__nonnull__)) +#endif +#define DISPATCH_SENTINEL __attribute__((__sentinel__)) +#define DISPATCH_PURE __attribute__((__pure__)) +#define DISPATCH_WARN_RESULT __attribute__((__warn_unused_result__)) +#define DISPATCH_MALLOC __attribute__((__malloc__)) +#else +/*! @parseOnly */ +#define DISPATCH_NORETURN +/*! @parseOnly */ +#define DISPATCH_NOTHROW +/*! @parseOnly */ +#define DISPATCH_NONNULL1 +/*! @parseOnly */ +#define DISPATCH_NONNULL2 +/*! @parseOnly */ +#define DISPATCH_NONNULL3 +/*! @parseOnly */ +#define DISPATCH_NONNULL4 +/*! @parseOnly */ +#define DISPATCH_NONNULL5 +/*! @parseOnly */ +#define DISPATCH_NONNULL6 +/*! @parseOnly */ +#define DISPATCH_NONNULL7 +/*! @parseOnly */ +#define DISPATCH_NONNULL_ALL +/*! @parseOnly */ +#define DISPATCH_SENTINEL +/*! @parseOnly */ +#define DISPATCH_PURE +/*! @parseOnly */ +#define DISPATCH_WARN_RESULT +/*! @parseOnly */ +#define DISPATCH_MALLOC +#endif + +#endif diff --git a/src/benchmark.c b/src/benchmark.c new file mode 100644 index 000000000..fafe90968 --- /dev/null +++ b/src/benchmark.c @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include "internal.h" + + +struct __dispatch_benchmark_data_s { + mach_timebase_info_data_t tbi; + uint64_t loop_cost; + void (*func)(void *); + void *ctxt; + size_t count; +}; + +static void +_dispatch_benchmark_init(void *context) +{ + struct __dispatch_benchmark_data_s *bdata = context; + // try and simulate performance of real benchmark as much as possible + // keep 'f', 'c' and 'cnt' in registers + register void (*f)(void *) = bdata->func; + register void *c = bdata->ctxt; + register size_t cnt = bdata->count; + uint64_t start, delta; +#ifdef __LP64__ + __uint128_t lcost; +#else + long double lcost; +#endif + kern_return_t kr; + size_t i = 0; + + kr = mach_timebase_info(&bdata->tbi); + dispatch_assert_zero(kr); + + start = mach_absolute_time(); + do { + i++; + f(c); + } while (i < cnt); + delta = mach_absolute_time() - start; + + lcost = delta; + lcost *= bdata->tbi.numer; + lcost /= bdata->tbi.denom; + lcost /= cnt; + + bdata->loop_cost = lcost; +} + +#ifdef __BLOCKS__ +uint64_t +dispatch_benchmark(size_t count, void (^block)(void)) +{ + struct Block_basic *bb = (void *)block; + return dispatch_benchmark_f(count, block, (void *)bb->Block_invoke); +} +#endif + +uint64_t +dispatch_benchmark_f(size_t count, register void *ctxt, register void (*func)(void *)) +{ + static struct __dispatch_benchmark_data_s bdata = { + .func = (void *)dummy_function, + .count = 10000000ul, // ten million + }; + static dispatch_once_t pred; + uint64_t ns, start, delta; +#ifdef __LP64__ + __uint128_t conversion, big_denom; +#else + long double conversion, big_denom; +#endif + size_t i = 0; + + dispatch_once_f(&pred, &bdata, _dispatch_benchmark_init); + + if (slowpath(count == 0)) { + return 0; + } + + start = mach_absolute_time(); + do { + i++; + func(ctxt); + } while (i < count); + delta = mach_absolute_time() - start; + + conversion = delta; + conversion *= bdata.tbi.numer; + big_denom = bdata.tbi.denom; + big_denom *= count; + conversion /= big_denom; + ns = conversion; + + return ns - bdata.loop_cost; +} diff --git a/src/benchmark.h b/src/benchmark.h new file mode 100644 index 000000000..b77af4586 --- /dev/null +++ b/src/benchmark.h @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_BENCHMARK__ +#define __DISPATCH_BENCHMARK__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +__BEGIN_DECLS + +/*! + * @function dispatch_benchmark + * + * @abstract + * Count the average number of cycles a given block takes to execute. + * + * @param count + * The number of times to serially execute the given block. + * + * @param block + * The block to execute. + * + * @result + * The approximate number of cycles the block takes to execute. + * + * @discussion + * This function is for debugging and performance analysis work. For the best + * results, pass a high count value to dispatch_benchmark(). When benchmarking + * concurrent code, please compare the serial version of the code against the + * concurrent version, and compare the concurrent version on different classes + * of hardware. Please look for inflection points with various data sets and + * keep the following facts in mind: + * + * 1) Code bound by computational bandwidth may be inferred by proportional + * changes in performance as concurrency is increased. + * 2) Code bound by memory bandwidth may be inferred by negligible changes in + * performance as concurrency is increased. + * 3) Code bound by critical sections may be inferred by retrograde changes in + * performance as concurrency is increased. + * 3a) Intentional: locks, mutexes, and condition variables. + * 3b) Accidental: unrelated and frequently modified data on the same cache-line. + */ +#ifdef __BLOCKS__ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL2 DISPATCH_NOTHROW +uint64_t +dispatch_benchmark(size_t count, void (^block)(void)); +#endif + +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL3 DISPATCH_NOTHROW +uint64_t +dispatch_benchmark_f(size_t count, void *ctxt, void (*func)(void *)); + +__END_DECLS + +#endif diff --git a/src/dispatch.h b/src/dispatch.h new file mode 100644 index 000000000..95331d7a4 --- /dev/null +++ b/src/dispatch.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __DISPATCH_PUBLIC__ +#define __DISPATCH_PUBLIC__ + +#include +#include +#include +#include +#include +#include + +#define DISPATCH_API_VERSION 20090501 + +#ifndef __DISPATCH_BUILDING_DISPATCH__ + +#ifndef __DISPATCH_INDIRECT__ +#define __DISPATCH_INDIRECT__ +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + +#undef __DISPATCH_INDIRECT__ + +#endif /* !__DISPATCH_BUILDING_DISPATCH__ */ + +#endif diff --git a/src/group.h b/src/group.h new file mode 100644 index 000000000..a2829482b --- /dev/null +++ b/src/group.h @@ -0,0 +1,273 @@ +/* + * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __DISPATCH_GROUP__ +#define __DISPATCH_GROUP__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +/*! + * @typedef dispatch_group_t + * @abstract + * A group of blocks submitted to queues for asynchronous invocation. + */ +DISPATCH_DECL(dispatch_group); + +__BEGIN_DECLS + +/*! + * @function dispatch_group_create + * + * @abstract + * Creates new group with which blocks may be associated. + * + * @discussion + * This function creates a new group with which blocks may be associated. + * The dispatch group may be used to wait for the completion of the blocks it + * references. The group object memory is freed with dispatch_release(). + * + * @result + * The newly created group, or NULL on failure. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_WARN_RESULT +dispatch_group_t +dispatch_group_create(void); + +/*! + * @function dispatch_group_async + * + * @abstract + * Submits a block to a dispatch queue and associates the block with the given + * dispatch group. + * + * @discussion + * Submits a block to a dispatch queue and associates the block with the given + * dispatch group. The dispatch group may be used to wait for the completion + * of the blocks it references. + * + * @param group + * A dispatch group to associate with the submitted block. + * The result of passing NULL in this parameter is undefined. + * + * @param queue + * The dispatch queue to which the block will be submitted for asynchronous + * invocation. + * + * @param block + * The block to perform asynchronously. + */ +#ifdef __BLOCKS__ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL +void +dispatch_group_async(dispatch_group_t group, + dispatch_queue_t queue, + dispatch_block_t block); +#endif /* __BLOCKS__ */ + +/*! + * @function dispatch_group_async_f + * + * @abstract + * Submits a function to a dispatch queue and associates the block with the + * given dispatch group. + * + * @discussion + * See dispatch_group_async() for details. + * + * @param group + * A dispatch group to associate with the submitted function. + * The result of passing NULL in this parameter is undefined. + * + * @param queue + * The dispatch queue to which the function will be submitted for asynchronous + * invocation. + * + * @param context + * The application-defined context parameter to pass to the function. + * + * @param work + * The application-defined function to invoke on the target queue. The first + * parameter passed to this function is the context provided to + * dispatch_group_async_f(). + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NONNULL4 +void +dispatch_group_async_f(dispatch_group_t group, + dispatch_queue_t queue, + void *context, + dispatch_function_t work); + +/*! + * @function dispatch_group_wait + * + * @abstract + * Wait synchronously for the previously submitted blocks to complete; + * returns if the blocks have not completed within the specified timeout. + * + * @discussion + * This function waits for the completion of the blocks associated with the + * given dispatch group, and returns after all blocks have completed or when + * the specified timeout has elapsed. When a timeout occurs, the group is + * restored to its original state. + * + * This function will return immediately if there are no blocks associated + * with the dispatch group (i.e. the group is empty). + * + * The result of calling this function from mulitple threads simultaneously + * with the same dispatch group is undefined. + * + * After the successful return of this function, the dispatch group is empty. + * It may either be released with dispatch_release() or re-used for additional + * blocks. See dispatch_group_async() for more information. + * + * @param group + * The dispatch group to wait on. + * The result of passing NULL in this parameter is undefined. + * + * @param timeout + * When to timeout (see dispatch_time). As a convenience, there are the + * DISPATCH_TIME_NOW and DISPATCH_TIME_FOREVER constants. + * + * @result + * Returns zero on success (all blocks associated with the group completed + * within the specified timeout) or non-zero on error (i.e. timed out). + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL +long +dispatch_group_wait(dispatch_group_t group, dispatch_time_t timeout); + +/*! + * @function dispatch_group_notify + * + * @abstract + * Schedule a block to be submitted to a queue when a group of previously + * submitted blocks have completed. + * + * @discussion + * This function schedules a notification block to be submitted to the specified + * queue once all blocks associated with the dispatch group have completed. + * + * If no blocks are associated with the dispatch group (i.e. the group is empty) + * then the notification block will be submitted immediately. + * + * The group will be empty at the time the notification block is submitted to + * the target queue. The group may either be released with dispatch_release() + * or reused for additional operations. + * See dispatch_group_async() for more information. + * + * @param group + * The dispatch group to observe. + * The result of passing NULL in this parameter is undefined. + * + * @param queue + * The queue to which the supplied block will be submitted when the group + * completes. + * + * @param block + * The block to submit when the group completes. + */ +#ifdef __BLOCKS__ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL +void +dispatch_group_notify(dispatch_group_t group, + dispatch_queue_t queue, + dispatch_block_t block); +#endif /* __BLOCKS__ */ + +/*! + * @function dispatch_group_notify_f + * + * @abstract + * Schedule a function to be submitted to a queue when a group of previously + * submitted functions have completed. + * + * @discussion + * See dispatch_group_notify() for details. + * + * @param group + * The dispatch group to observe. + * The result of passing NULL in this parameter is undefined. + * + * @param context + * The application-defined context parameter to pass to the function. + * + * @param work + * The application-defined function to invoke on the target queue. The first + * parameter passed to this function is the context provided to + * dispatch_group_notify_f(). + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NONNULL4 +void +dispatch_group_notify_f(dispatch_group_t group, + dispatch_queue_t queue, + void *context, + dispatch_function_t work); + +/*! + * @function dispatch_group_enter + * + * @abstract + * Manually indicate a block has entered the group + * + * @discussion + * Calling this function indicates another block has joined the group through + * a means other than dispatch_group_async(). Calls to this function must be + * balanced with dispatch_group_leave(). + * + * @param group + * The dispatch group to update. + * The result of passing NULL in this parameter is undefined. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NOTHROW DISPATCH_NONNULL_ALL +void +dispatch_group_enter(dispatch_group_t group); + +/*! + * @function dispatch_group_leave + * + * @abstract + * Manually indicate a block in the group has completed + * + * @discussion + * Calling this function indicates block has completed and left the dispatch + * groupJ by a means other than dispatch_group_async(). + * + * @param group + * The dispatch group to update. + * The result of passing NULL in this parameter is undefined. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NOTHROW DISPATCH_NONNULL_ALL +void +dispatch_group_leave(dispatch_group_t group); + +__END_DECLS + +#endif diff --git a/src/hw_shims.h b/src/hw_shims.h new file mode 100644 index 000000000..b99bf177f --- /dev/null +++ b/src/hw_shims.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_HW_SHIMS__ +#define __DISPATCH_HW_SHIMS__ + +/* x86 has a 64 byte cacheline */ +#define DISPATCH_CACHELINE_SIZE 64 +#define ROUND_UP_TO_CACHELINE_SIZE(x) (((x) + (DISPATCH_CACHELINE_SIZE - 1)) & ~(DISPATCH_CACHELINE_SIZE - 1)) +#define ROUND_UP_TO_VECTOR_SIZE(x) (((x) + 15) & ~15) + +#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2) +// GCC generates suboptimal register pressure +// LLVM does better, but doesn't support tail calls +// 6248590 __sync_*() intrinsics force a gratuitous "lea" instruction, with resulting register pressure +#if 0 && defined(__i386__) || defined(__x86_64__) +#define dispatch_atomic_xchg(p, n) ({ typeof(*(p)) _r; asm("xchg %0, %1" : "=r" (_r) : "m" (*(p)), "0" (n)); _r; }) +#else +#define dispatch_atomic_xchg(p, n) __sync_lock_test_and_set((p), (n)) +#endif +#define dispatch_atomic_cmpxchg(p, o, n) __sync_bool_compare_and_swap((p), (o), (n)) +#define dispatch_atomic_inc(p) __sync_add_and_fetch((p), 1) +#define dispatch_atomic_dec(p) __sync_sub_and_fetch((p), 1) +#define dispatch_atomic_add(p, v) __sync_add_and_fetch((p), (v)) +#define dispatch_atomic_sub(p, v) __sync_sub_and_fetch((p), (v)) +#define dispatch_atomic_or(p, v) __sync_fetch_and_or((p), (v)) +#define dispatch_atomic_and(p, v) __sync_fetch_and_and((p), (v)) +#if defined(__i386__) || defined(__x86_64__) +/* GCC emits nothing for __sync_synchronize() on i386/x86_64. */ +#define dispatch_atomic_barrier() __asm__ __volatile__("mfence") +#else +#define dispatch_atomic_barrier() __sync_synchronize() +#endif +#else +#error "Please upgrade to GCC 4.2 or newer." +#endif + +#if defined(__i386__) || defined(__x86_64__) +#define _dispatch_hardware_pause() asm("pause") +#define _dispatch_debugger() asm("int3") +#else +#define _dispatch_hardware_pause() asm("") +#define _dispatch_debugger() asm("trap") +#endif +// really just a low level abort() +#define _dispatch_hardware_crash() __builtin_trap() + + +#endif diff --git a/src/internal.h b/src/internal.h new file mode 100644 index 000000000..d55540b0d --- /dev/null +++ b/src/internal.h @@ -0,0 +1,299 @@ +/* + * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_INTERNAL__ +#define __DISPATCH_INTERNAL__ + +#define __DISPATCH_BUILDING_DISPATCH__ +#define __DISPATCH_INDIRECT__ +#include "dispatch.h" +#include "base.h" +#include "time.h" +#include "queue.h" +#include "object.h" +#include "source.h" +#include "group.h" +#include "semaphore.h" +#include "once.h" +#include "benchmark.h" + +/* private.h uses #include_next and must be included last to avoid picking + * up installed headers. */ +#include "queue_private.h" +#include "source_private.h" +#include "private.h" +#include "legacy.h" +/* More #includes at EOF (dependent on the contents of internal.h) ... */ + +/* The "_debug" library build */ +#ifndef DISPATCH_DEBUG +#define DISPATCH_DEBUG 0 +#endif + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __BLOCKS__ +#include +#include +#endif /* __BLOCKS__ */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DISPATCH_NOINLINE __attribute__((noinline)) + +// workaround 6368156 +#ifdef NSEC_PER_SEC +#undef NSEC_PER_SEC +#endif +#ifdef USEC_PER_SEC +#undef USEC_PER_SEC +#endif +#ifdef NSEC_PER_USEC +#undef NSEC_PER_USEC +#endif +#define NSEC_PER_SEC 1000000000ull +#define USEC_PER_SEC 1000000ull +#define NSEC_PER_USEC 1000ull + +/* I wish we had __builtin_expect_range() */ +#define fastpath(x) ((typeof(x))__builtin_expect((long)(x), ~0l)) +#define slowpath(x) ((typeof(x))__builtin_expect((long)(x), 0l)) + +void _dispatch_bug(size_t line, long val) __attribute__((__noinline__)); +void _dispatch_abort(size_t line, long val) __attribute__((__noinline__,__noreturn__)); +void _dispatch_log(const char *msg, ...) __attribute__((__noinline__,__format__(printf,1,2))); +void _dispatch_logv(const char *msg, va_list) __attribute__((__noinline__,__format__(printf,1,0))); + +/* + * For reporting bugs within libdispatch when using the "_debug" version of the library. + */ +#define dispatch_assert(e) do { \ + if (__builtin_constant_p(e)) { \ + char __compile_time_assert__[(bool)(e) ? 1 : -1] __attribute__((unused)); \ + } else { \ + typeof(e) _e = fastpath(e); /* always eval 'e' */ \ + if (DISPATCH_DEBUG && !_e) { \ + _dispatch_abort(__LINE__, (long)_e); \ + } \ + } \ + } while (0) +/* A lot of API return zero upon success and not-zero on fail. Let's capture and log the non-zero value */ +#define dispatch_assert_zero(e) do { \ + if (__builtin_constant_p(e)) { \ + char __compile_time_assert__[(bool)(!(e)) ? 1 : -1] __attribute__((unused)); \ + } else { \ + typeof(e) _e = slowpath(e); /* always eval 'e' */ \ + if (DISPATCH_DEBUG && _e) { \ + _dispatch_abort(__LINE__, (long)_e); \ + } \ + } \ + } while (0) + +/* + * For reporting bugs or impedance mismatches between libdispatch and external subsystems. + * These do NOT abort(), and are always compiled into the product. + * + * In particular, we wrap all system-calls with assume() macros. + */ +#define dispatch_assume(e) ({ \ + typeof(e) _e = fastpath(e); /* always eval 'e' */ \ + if (!_e) { \ + if (__builtin_constant_p(e)) { \ + char __compile_time_assert__[(e) ? 1 : -1]; \ + (void)__compile_time_assert__; \ + } \ + _dispatch_bug(__LINE__, (long)_e); \ + } \ + _e; \ + }) +/* A lot of API return zero upon success and not-zero on fail. Let's capture and log the non-zero value */ +#define dispatch_assume_zero(e) ({ \ + typeof(e) _e = slowpath(e); /* always eval 'e' */ \ + if (_e) { \ + if (__builtin_constant_p(e)) { \ + char __compile_time_assert__[(e) ? -1 : 1]; \ + (void)__compile_time_assert__; \ + } \ + _dispatch_bug(__LINE__, (long)_e); \ + } \ + _e; \ + }) + +/* + * For reporting bugs in clients when using the "_debug" version of the library. + */ +#define dispatch_debug_assert(e, msg, args...) do { \ + if (__builtin_constant_p(e)) { \ + char __compile_time_assert__[(bool)(e) ? 1 : -1] __attribute__((unused)); \ + } else { \ + typeof(e) _e = fastpath(e); /* always eval 'e' */ \ + if (DISPATCH_DEBUG && !_e) { \ + _dispatch_log("%s() 0x%lx: " msg, __func__, (long)_e, ##args); \ + abort(); \ + } \ + } \ + } while (0) + + + +#ifdef __BLOCKS__ +dispatch_block_t _dispatch_Block_copy(dispatch_block_t block); +void _dispatch_call_block_and_release(void *block); +void _dispatch_call_block_and_release2(void *block, void *ctxt); +#endif /* __BLOCKS__ */ + +void dummy_function(void); +long dummy_function_r0(void); + + +/* Make sure the debug statments don't get too stale */ +#define _dispatch_debug(x, args...) \ +({ \ + if (DISPATCH_DEBUG) { \ + _dispatch_log("libdispatch: %u\t%p\t" x, __LINE__, _dispatch_thread_self(), ##args); \ + } \ +}) + + +#if DISPATCH_DEBUG +void dispatch_debug_kevents(struct kevent* kev, size_t count, const char* str); +#else +#define dispatch_debug_kevents(x, y, z) +#endif + +uint64_t _dispatch_get_nanoseconds(void); + +void _dispatch_source_drain_kevent(struct kevent *); + +dispatch_source_t +_dispatch_source_create2(dispatch_source_t ds, + dispatch_source_attr_t attr, + void *context, + dispatch_source_handler_function_t handler); + +void _dispatch_update_kq(const struct kevent *); +void _dispatch_run_timers(void); +// Returns howsoon with updated time value, or NULL if no timers active. +struct timespec *_dispatch_get_next_timer_fire(struct timespec *howsoon); + +dispatch_semaphore_t _dispatch_get_thread_semaphore(void); +void _dispatch_put_thread_semaphore(dispatch_semaphore_t); + +bool _dispatch_source_testcancel(dispatch_source_t); + +uint64_t _dispatch_timeout(dispatch_time_t when); + +__private_extern__ bool _dispatch_safe_fork; + +__private_extern__ struct _dispatch_hw_config_s { + uint32_t cc_max_active; + uint32_t cc_max_logical; + uint32_t cc_max_physical; +} _dispatch_hw_config; + +/* #includes dependent on internal.h */ +#include "object_internal.h" +#include "hw_shims.h" +#include "os_shims.h" +#include "queue_internal.h" +#include "semaphore_internal.h" +#include "source_internal.h" + +// MIG_REPLY_MISMATCH means either: +// 1) A signal handler is NOT using async-safe API. See the sigaction(2) man page for more info. +// 2) A hand crafted call to mach_msg*() screwed up. Use MIG. +#define DISPATCH_VERIFY_MIG(x) do { \ + if ((x) == MIG_REPLY_MISMATCH) { \ + __crashreporter_info__ = "MIG_REPLY_MISMATCH"; \ + _dispatch_hardware_crash(); \ + } \ + } while (0) + +#if defined(__x86_64__) || defined(__i386__) +// total hack to ensure that return register of a function is not trashed +#define DISPATCH_CRASH(x) do { \ + asm("mov %1, %0" : "=m" (__crashreporter_info__) : "c" ("BUG IN LIBDISPATCH: " x)); \ + _dispatch_hardware_crash(); \ + } while (0) + +#define DISPATCH_CLIENT_CRASH(x) do { \ + asm("mov %1, %0" : "=m" (__crashreporter_info__) : "c" ("BUG IN CLIENT OF LIBDISPATCH: " x)); \ + _dispatch_hardware_crash(); \ + } while (0) + +#else + +#define DISPATCH_CRASH(x) do { \ + __crashreporter_info__ = "BUG IN LIBDISPATCH: " x; \ + _dispatch_hardware_crash(); \ + } while (0) + +#define DISPATCH_CLIENT_CRASH(x) do { \ + __crashreporter_info__ = "BUG IN CLIENT OF LIBDISPATCH: " x; \ + _dispatch_hardware_crash(); \ + } while (0) + +#endif + + +#endif /* __DISPATCH_INTERNAL__ */ diff --git a/src/legacy.c b/src/legacy.c new file mode 100644 index 000000000..62329902a --- /dev/null +++ b/src/legacy.c @@ -0,0 +1,444 @@ +/* + * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include "internal.h" +#include "legacy.h" + +/* + * LEGACY: This header file describles LEGACY interfaces to libdispatch from an + * earlier revision of the API. These interfaces WILL be removed in the future. + */ + +DISPATCH_PUBLIC_API DISPATCH_NONNULL1 DISPATCH_NONNULL2 +dispatch_item_t +LEGACY_dispatch_call(dispatch_queue_t, dispatch_legacy_block_t work, dispatch_legacy_block_t completion) +__asm__("_dispatch_call2"); + +DISPATCH_PUBLIC_API DISPATCH_PURE DISPATCH_WARN_RESULT +dispatch_queue_t +LEGACY_dispatch_queue_get_current(void) +__asm__("_dispatch_queue_get_current"); + +///////////////////////////////////////////////////////////////////////////// + +dispatch_queue_t +LEGACY_dispatch_queue_get_current(void) +{ + return _dispatch_queue_get_current(); +} + +dispatch_item_t +LEGACY_dispatch_call(dispatch_queue_t dq, + dispatch_legacy_block_t dispatch_block, + dispatch_legacy_block_t callback_block) +{ + dispatch_queue_t lq = _dispatch_queue_get_current() ?: dispatch_get_main_queue(); + dispatch_item_t di; + + di = dispatch_block ? calloc(1, ROUND_UP_TO_CACHELINE_SIZE(sizeof(*di))) : NULL; + + if (!di) { + return di; + } + + if (callback_block) { + dispatch_retain(lq); + } + + dispatch_async(dq, ^{ + dispatch_block(di); + + if (callback_block) { + dispatch_async(lq, ^{ + callback_block(di); + free(di); + dispatch_release(lq); + }); + } else { + free(di); + } + }); + + return di; +} + +sigset_t +dispatch_event_get_signals(dispatch_event_t de) +{ + sigset_t ret; + sigemptyset(&ret); + sigaddset(&ret, (int)dispatch_event_get_signal(de)); + return ret; +} + +void dispatch_cancel(dispatch_source_t ds) { dispatch_source_cancel(ds); } +long dispatch_testcancel(dispatch_source_t ds) { return dispatch_source_testcancel(ds); } + +void dispatch_queue_resume(dispatch_queue_t dq) { dispatch_resume(dq); } +void dispatch_queue_retain(dispatch_queue_t dq) { dispatch_retain(dq); } +void dispatch_queue_release(dispatch_queue_t dq) { dispatch_release(dq); } + +void dispatch_source_suspend(dispatch_source_t ds) { dispatch_suspend(ds); } +void dispatch_source_resume(dispatch_source_t ds) { dispatch_resume(ds); } +void dispatch_source_release(dispatch_source_t ds) { dispatch_release(ds); } + +void dispatch_source_attr_release(dispatch_source_attr_t attr) { dispatch_release(attr); } +void dispatch_queue_attr_release(dispatch_queue_attr_t attr) { dispatch_release(attr); } + +void *dispatch_queue_get_context(dispatch_queue_t dq) { return dispatch_get_context(dq); } +void dispatch_queue_set_context(dispatch_queue_t dq, void *context) { dispatch_set_context(dq, context); } + +void *dispatch_source_get_context(dispatch_source_t ds) { return dispatch_get_context(ds); } +void dispatch_source_set_context(dispatch_source_t ds, void *context) { dispatch_set_context(ds, context); } + +void dispatch_source_custom_trigger(dispatch_source_t ds) { dispatch_source_merge_data(ds, 1); } + +void +dispatch_source_trigger(dispatch_source_t ds, unsigned long val) +{ + dispatch_source_merge_data(ds, val); +} + +int dispatch_source_get_descriptor(dispatch_source_t ds) { return (int)dispatch_source_get_handle(ds); } + +pid_t dispatch_source_get_pid(dispatch_source_t ds) { return (pid_t)dispatch_source_get_handle(ds); } + +mach_port_t dispatch_source_get_machport(dispatch_source_t ds) { return (mach_port_t)dispatch_source_get_handle(ds); } + +uint64_t dispatch_source_get_flags(dispatch_source_t ds) { return dispatch_source_get_mask(ds); } + +dispatch_source_t dispatch_event_get_source(dispatch_event_t event) { return event; } + +long dispatch_event_get_error(dispatch_event_t event, long* error) { return dispatch_source_get_error(event, error); } + +uint64_t dispatch_event_get_flags(dispatch_event_t event) { return dispatch_source_get_data(event); } + +size_t dispatch_event_get_bytes_available(dispatch_event_t event) { return (size_t)dispatch_source_get_data(event); } + +unsigned long dispatch_event_get_count(dispatch_event_t event) { return (unsigned long)dispatch_source_get_data(event); } + +long dispatch_event_get_signal(dispatch_event_t event) { return (long)dispatch_source_get_handle(event); } + +dispatch_source_t +dispatch_source_custom_create( + unsigned long behavior, + dispatch_source_attr_t attr, + dispatch_queue_t queue, + dispatch_event_handler_t handler) { + return dispatch_source_data_create(behavior, attr, queue, handler); +} + +dispatch_source_t +dispatch_source_custom_create_f( + unsigned long behavior, + dispatch_source_attr_t attr, + dispatch_queue_t queue, + void *h_context, + dispatch_event_handler_function_t handler) { + return dispatch_source_data_create_f(behavior, attr, queue, h_context, handler); +} + +#define _dispatch_source_call_block ((void *)-1) + + + +#ifdef __BLOCKS__ +dispatch_source_t +dispatch_source_timer_create(uint64_t flags, + uint64_t nanoseconds, + uint64_t leeway, + dispatch_source_attr_t attr, + dispatch_queue_t q, + dispatch_source_handler_t callback) +{ + return dispatch_source_timer_create_f(flags, nanoseconds, leeway, + attr, q, callback, _dispatch_source_call_block); +} +#endif + +dispatch_source_t +dispatch_source_timer_create_f(uint64_t timer_flags, + uint64_t nanoseconds, + uint64_t leeway, + dispatch_source_attr_t attr, + dispatch_queue_t q, + void *context, + dispatch_source_handler_function_t callback) +{ + dispatch_source_t ds; + dispatch_time_t start; + + // 6866347 - make sure nanoseconds won't overflow + if ((int64_t)nanoseconds < 0) { + nanoseconds = INT64_MAX; + } + + if (timer_flags & DISPATCH_TIMER_ONESHOT) { + timer_flags |= DISPATCH_TIMER_WALL_CLOCK; + } + if (timer_flags == (DISPATCH_TIMER_ABSOLUTE|DISPATCH_TIMER_WALL_CLOCK)) { + static const struct timespec t0; + start = dispatch_walltime(&t0, nanoseconds); + } else if (timer_flags & DISPATCH_TIMER_WALL_CLOCK) { + start = dispatch_walltime(DISPATCH_TIME_NOW, nanoseconds); + } else { + start = dispatch_time(DISPATCH_TIME_NOW, nanoseconds); + } + if (timer_flags & DISPATCH_TIMER_ONESHOT) { + // 6866347 - make sure nanoseconds won't overflow + nanoseconds = INT64_MAX; // non-repeating (~292 years) + } + + ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, (unsigned long)timer_flags, q); + if (!ds) { + return NULL; + } + ds = _dispatch_source_create2(ds, attr, context, callback); + if (!ds) { + return NULL; + } + dispatch_source_set_timer(ds, start, nanoseconds, leeway); + + return ds; +} + +#ifdef __BLOCKS__ +dispatch_source_t +dispatch_source_read_create(int descriptor, + dispatch_source_attr_t attr, + dispatch_queue_t q, + dispatch_source_handler_t callback) +{ + return dispatch_source_read_create_f(descriptor, + attr, q, callback, _dispatch_source_call_block); +} +#endif + +dispatch_source_t +dispatch_source_read_create_f(int fd, + dispatch_source_attr_t attr, + dispatch_queue_t q, + void *context, + dispatch_source_handler_function_t callback) +{ + dispatch_source_t ds; + ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, fd, 0, q); + return _dispatch_source_create2(ds, attr, context, callback); +} + +#ifdef __BLOCKS__ +dispatch_source_t +dispatch_source_write_create(int descriptor, + dispatch_source_attr_t attr, + dispatch_queue_t q, + dispatch_source_handler_t callback) +{ + return dispatch_source_write_create_f(descriptor, + attr, q, callback, _dispatch_source_call_block); +} +#endif + +dispatch_source_t +dispatch_source_write_create_f(int fd, + dispatch_source_attr_t attr, + dispatch_queue_t q, + void *context, + dispatch_source_handler_function_t callback) +{ + dispatch_source_t ds; + ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_WRITE, fd, 0, q); + return _dispatch_source_create2(ds, attr, context, callback); +} + +#ifdef __BLOCKS__ +dispatch_source_t +dispatch_source_vnode_create(int descriptor, + uint64_t flags, + dispatch_source_attr_t attr, + dispatch_queue_t q, + dispatch_source_handler_t callback) +{ + return dispatch_source_vnode_create_f(descriptor, + flags, attr, q, callback, _dispatch_source_call_block); +} +#endif + +dispatch_source_t +dispatch_source_vnode_create_f(int fd, + uint64_t event_mask, + dispatch_source_attr_t attr, + dispatch_queue_t q, + void *context, + dispatch_source_handler_function_t callback) +{ + dispatch_source_t ds; + ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_VNODE, fd, (unsigned long)event_mask, q); + return _dispatch_source_create2(ds, attr, context, callback); +} + +#ifdef __BLOCKS__ +dispatch_source_t +dispatch_source_signal_create(unsigned long sig, + dispatch_source_attr_t attr, + dispatch_queue_t q, + dispatch_source_handler_t callback) +{ + return dispatch_source_signal_create_f(sig, + attr, q, callback, _dispatch_source_call_block); +} +#endif + +dispatch_source_t +dispatch_source_signal_create_f(unsigned long signo, + dispatch_source_attr_t attr, + dispatch_queue_t q, + void *context, + dispatch_source_handler_function_t callback) +{ + dispatch_source_t ds; + ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, signo, 0, q); + return _dispatch_source_create2(ds, attr, context, callback); +} + +#ifdef __BLOCKS__ +dispatch_source_t +dispatch_source_proc_create(pid_t pid, + uint64_t flags, + dispatch_source_attr_t attr, + dispatch_queue_t q, + dispatch_source_handler_t callback) +{ + return dispatch_source_proc_create_f(pid, + flags, attr, q, callback, _dispatch_source_call_block); +} +#endif + +dispatch_source_t +dispatch_source_proc_create_f(pid_t pid, + uint64_t event_mask, + dispatch_source_attr_t attr, + dispatch_queue_t q, + void *context, + dispatch_source_handler_function_t callback) +{ + dispatch_source_t ds; + ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_PROC, pid, (unsigned long)event_mask, q); + return _dispatch_source_create2(ds, attr, context, callback); +} + +#ifdef __BLOCKS__ +dispatch_source_t +dispatch_source_vfs_create(uint64_t flags, + dispatch_source_attr_t attr, + dispatch_queue_t q, + dispatch_source_handler_t callback) +{ + return dispatch_source_vfs_create_f(flags, + attr, q, callback, _dispatch_source_call_block); +} +#endif + +dispatch_source_t +dispatch_source_vfs_create_f(uint64_t event_mask, + dispatch_source_attr_t attr, + dispatch_queue_t q, + void *context, + dispatch_source_handler_function_t callback) +{ + dispatch_source_t ds; + ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_VFS, 0, (unsigned long)event_mask, q); + return _dispatch_source_create2(ds, attr, context, callback); +} + +#ifdef __BLOCKS__ +dispatch_source_t +dispatch_source_data_create(unsigned long behavior, + dispatch_source_attr_t attr, + dispatch_queue_t q, + dispatch_source_handler_t callback) +{ + return dispatch_source_data_create_f(behavior, + attr, q, callback, _dispatch_source_call_block); +} +#endif + +#ifdef __BLOCKS__ +dispatch_source_t +dispatch_source_machport_create(mach_port_t mport, + uint64_t flags, + dispatch_source_attr_t attr, + dispatch_queue_t dq, + dispatch_source_handler_t callback) +{ + return dispatch_source_machport_create_f(mport, flags, + attr, dq, callback, _dispatch_source_call_block); +} +#endif + +dispatch_source_t +dispatch_source_data_create_f(unsigned long behavior, + dispatch_source_attr_t attr, + dispatch_queue_t q, + void *context, + dispatch_source_handler_function_t callback) +{ + dispatch_source_t ds; + dispatch_source_type_t type; + switch (behavior) { + case DISPATCH_SOURCE_CUSTOM_ADD: + type = DISPATCH_SOURCE_TYPE_DATA_ADD; + break; + case DISPATCH_SOURCE_CUSTOM_OR: + type = DISPATCH_SOURCE_TYPE_DATA_OR; + break; + default: + return NULL; + } + ds = dispatch_source_create(type, 0, 0, q); + return _dispatch_source_create2(ds, attr, context, callback); +} + +dispatch_source_t +dispatch_source_machport_create_f(mach_port_t mport, + uint64_t flags, + dispatch_source_attr_t attr, + dispatch_queue_t dq, + void *ctxt, + dispatch_source_handler_function_t func) +{ + dispatch_source_t ds; + dispatch_source_type_t type; + unsigned long newflags = 0; + + if (flags & ~(DISPATCH_MACHPORT_DEAD|DISPATCH_MACHPORT_RECV)) { + return NULL; + } + // XXX DELETED + if (flags & DISPATCH_MACHPORT_DEAD) { + type = DISPATCH_SOURCE_TYPE_MACH_SEND; + newflags |= DISPATCH_MACH_SEND_DEAD; + } else { + type = DISPATCH_SOURCE_TYPE_MACH_RECV; + } + + ds = dispatch_source_create(type, mport, newflags, dq); + return _dispatch_source_create2(ds, attr, ctxt, func); +} + diff --git a/src/legacy.h b/src/legacy.h new file mode 100644 index 000000000..e6bffbc59 --- /dev/null +++ b/src/legacy.h @@ -0,0 +1,748 @@ +/* + * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +/* + * LEGACY: This header file describles LEGACY interfaces to libdispatch from an + * earlier revision of the API. These interfaces WILL be removed in the future. + */ + +#ifndef __DISPATCH_LEGACY__ +#define __DISPATCH_LEGACY__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +#include + +#define DISPATCH_DEPRECATED __attribute__((deprecated)) +#define DISPATCH_PUBLIC_API __attribute__((visibility("default"))) + +typedef struct dispatch_item_s *dispatch_item_t; + +struct dispatch_item_s { + void * di_objc_isa; /* FIXME -- someday... */ + struct dispatch_item_s *volatile di_next; + dispatch_queue_t di_cback_q; + uint32_t di_flags; + semaphore_t di_semaphore; + void * di_work_func; + void * di_work_ctxt; + void * di_cback_func; + void * di_cback_ctxt; + void * di_ctxt; +}; + +// Use: dispatch_source_t +typedef struct dispatch_source_s *dispatch_event_t; + +// Obsolete +#ifdef __BLOCKS__ +typedef void (^dispatch_legacy_block_t)(dispatch_item_t); +typedef void (^dispatch_queue_deletion_block_t)(dispatch_queue_t queue); +typedef void (^dispatch_source_deletion_t)(dispatch_source_t source); +typedef void (^dispatch_event_callback_t)(dispatch_event_t event); +typedef void (^dispatch_source_handler_t)(dispatch_source_t source); +typedef dispatch_source_handler_t dispatch_event_handler_t; +typedef void (^dispatch_source_finalizer_t)(dispatch_source_t source); +#endif /* __BLOCKS__ */ + +// Obsolete +typedef void (*dispatch_source_handler_function_t)(void *, dispatch_source_t); +typedef void (*dispatch_source_finalizer_function_t)(void *, dispatch_source_t); +typedef dispatch_source_handler_function_t dispatch_event_handler_function_t; + +DISPATCH_DECL(dispatch_source_attr); + +#define DISPATCH_SOURCE_CREATE_SUSPENDED ((dispatch_source_attr_t)~0ul) + +#ifdef __BLOCKS__ +typedef void (^dispatch_queue_finalizer_t)(dispatch_queue_t queue); +#endif + +typedef void (*dispatch_queue_finalizer_function_t)(void *, dispatch_queue_t); + +__BEGIN_DECLS + +/*! + * @function dispatch_queue_attr_create + * + * @abstract + * Creates a new dispatch queue attribute structure. These attributes may be + * provided at creation time to modify the default behavior of the queue. + * + * @discussion + * The values present in this structure are copied to newly created queues. + * The same attribute structure may be provided to multiple calls to + * dispatch_queue_create() but only the values in the structure at the time the + * call is made will be used. + * + * @result + * The new dispatch queue attribute structure, initialized to default values. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_MALLOC DISPATCH_WARN_RESULT DISPATCH_NOTHROW +dispatch_queue_attr_t +dispatch_queue_attr_create(void); + +/*! + * @function dispatch_queue_attr_set_priority + * + * @abstract + * Set the priority level for a dispatch queue. + * + * @discussion + * Priority levels may be: + * - DISPATCH_QUEUE_PRIORITY_HIGH + * - DISPATCH_QUEUE_PRIORITY_DEFAULT + * - DISPATCH_QUEUE_PRIORITY_LOW + * Queues set to high priority will be processed + * before queues set to default priority or low priority. + * Queues set to low priority will be processed only if all + * high priority and default priority queues are empty. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL1 DISPATCH_NOTHROW +void +dispatch_queue_attr_set_priority(dispatch_queue_attr_t attr, int priority); + +#ifdef __BLOCKS__ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL1 DISPATCH_NOTHROW +long +dispatch_queue_attr_set_finalizer( + dispatch_queue_attr_t attr, + dispatch_queue_finalizer_t finalizer); +#endif + +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL1 DISPATCH_NOTHROW +void +dispatch_queue_attr_set_finalizer_f(dispatch_queue_attr_t attr, void *context, dispatch_queue_finalizer_function_t finalizer); + +/*! + * @function dispatch_get_concurrent_queue + * + * @abstract + * Returns a well-known global concurrent queue of a given priority level. + * + * @discussion + * Blocks submitted to the returned queue may be invoked concurrently with + * respect to each other. + * + * These queues are useful for performing one-shot asynchronous operations, + * e.g. dispatch_async() to an "anonymous" queue; or for performing parallel + * loops concurrently on multiple processors, e.g. dispatch_apply(). + * + * The dispatch queues returned by this function are managed by the system for + * the lifetime of the application, and need not be retained or released + * directly by the application. Furthermore, dispatch_suspend() and + * dispatch_queue_resume() are not supported on these global queues, and will + * be ignored. + * + * @param priority + * The requested priority level for the queue (default is zero): + * - DISPATCH_QUEUE_PRIORITY_HIGH + * - DISPATCH_QUEUE_PRIORITY_DEFAULT + * - DISPATCH_QUEUE_PRIORITY_LOW + * + * @result + * Returns a concurrent dispatch queue for use with dispatch_async(), + * dispatch_apply(), et al. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW +dispatch_queue_t +dispatch_get_concurrent_queue(long priority); + +DISPATCH_PUBLIC_API //DISPATCH_DEPRECATED +void +dispatch_queue_attr_set_flags(dispatch_queue_attr_t attr, uint64_t flags); + +#ifdef __BLOCKS__ +DISPATCH_PUBLIC_API DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_DEPRECATED +dispatch_item_t +dispatch_call(dispatch_queue_t, dispatch_legacy_block_t work, dispatch_legacy_block_t completion) +__asm__("_dispatch_call2"); +#endif /* __BLOCKS__ */ + +DISPATCH_PUBLIC_API DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_DEPRECATED +dispatch_queue_t +dispatch_queue_get_current(void); + +// Use: dispatch_retain +DISPATCH_PUBLIC_API DISPATCH_NONNULL_ALL DISPATCH_DEPRECATED +void +dispatch_queue_retain(dispatch_queue_t); + +// Use: dispatch_release +DISPATCH_PUBLIC_API DISPATCH_NONNULL_ALL DISPATCH_DEPRECATED +void +dispatch_queue_release(dispatch_queue_t); + +// Use: dispatch_resume +DISPATCH_PUBLIC_API DISPATCH_NONNULL_ALL DISPATCH_DEPRECATED +void +dispatch_queue_resume(dispatch_queue_t); + +// Use: dispatch_release +DISPATCH_PUBLIC_API DISPATCH_NONNULL_ALL DISPATCH_DEPRECATED +void +dispatch_source_release(dispatch_source_t); + +// Use: dispatch_suspend +DISPATCH_PUBLIC_API DISPATCH_NONNULL_ALL DISPATCH_DEPRECATED +void +dispatch_source_suspend(dispatch_source_t); + +// Use: dispatch_resume +DISPATCH_PUBLIC_API DISPATCH_NONNULL_ALL DISPATCH_DEPRECATED +void +dispatch_source_resume(dispatch_source_t); + +// Use: dispatch_release +DISPATCH_PUBLIC_API DISPATCH_NONNULL_ALL DISPATCH_DEPRECATED +void +dispatch_queue_attr_release(dispatch_queue_attr_t); + +// Use: dispatch_release +DISPATCH_PUBLIC_API DISPATCH_NONNULL_ALL DISPATCH_DEPRECATED +void +dispatch_source_attr_release(dispatch_source_attr_t); + +// Use: dispatch_source_get_handle +DISPATCH_PUBLIC_API DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_DEPRECATED +sigset_t +dispatch_event_get_signals(dispatch_event_t event); + +// Use: dispatch_get_context +DISPATCH_PUBLIC_API DISPATCH_NONNULL_ALL //DISPATCH_DEPRECATED +void * +dispatch_queue_get_context(dispatch_queue_t queue); + +// Use: dispatch_set_context +DISPATCH_PUBLIC_API DISPATCH_NONNULL1 //DISPATCH_DEPRECATED +void +dispatch_queue_set_context(dispatch_queue_t queue, void *context); + +// Use: dispatch_get_context +DISPATCH_PUBLIC_API DISPATCH_NONNULL_ALL //DISPATCH_DEPRECATED +void * +dispatch_source_get_context(dispatch_source_t source); + +// Use: dispatch_set_context +DISPATCH_PUBLIC_API DISPATCH_NONNULL1 //DISPATCH_DEPRECATED +void +dispatch_source_set_context(dispatch_source_t source, void * context); + +// Use: dispatch_source_merge_data +DISPATCH_PUBLIC_API DISPATCH_NONNULL_ALL DISPATCH_DEPRECATED +void +dispatch_source_custom_trigger(dispatch_source_t ds); + +// Use: dispatch_source_cancel +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_cancel(dispatch_source_t); + +// Use: dispatch_source_testcancel +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +long +dispatch_testcancel(dispatch_source_t); + +// Use: dispatch_source_set_timer +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL1 DISPATCH_NOTHROW +long +dispatch_source_timer_set_time(dispatch_source_t ds, + uint64_t nanoseconds, + uint64_t leeway); + +// Use: dispatch_source_merge_data +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_source_trigger(dispatch_source_t source, unsigned long value); + +enum { + DISPATCH_ERROR_DOMAIN_NO_ERROR = 0, + DISPATCH_ERROR_DOMAIN_POSIX = 1, + DISPATCH_ERROR_DOMAIN_MACH = 2, +}; + +// Obsolete +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL1 DISPATCH_WARN_RESULT DISPATCH_NOTHROW +long +dispatch_source_get_error(dispatch_source_t source, long* error); + +// Use: dispatch_source_get_handle +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_NOTHROW +mach_port_t +dispatch_source_get_machport(dispatch_source_t source); + +// Use: dispatch_source_get_handle +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_NOTHROW +pid_t +dispatch_source_get_descriptor(dispatch_source_t source); + +// Use: dispatch_source_get_handle +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_NOTHROW +pid_t +dispatch_source_get_pid(dispatch_source_t source); + +// Use: dispatch_source_get_mask +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_NOTHROW +uint64_t +dispatch_source_get_flags(dispatch_source_t source); + +// LEGACY: dispatch_event_t == dispatch_source_t +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_NOTHROW +dispatch_source_t +dispatch_event_get_source(dispatch_event_t event); + +// Use: dispatch_source_get_error +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL1 DISPATCH_WARN_RESULT DISPATCH_NOTHROW +long +dispatch_event_get_error(dispatch_event_t event, long* error); + +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_NOTHROW +uint64_t +dispatch_event_get_nanoseconds(dispatch_event_t event); + +// Use: dispatch_source_get_handle +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_NOTHROW +long +dispatch_event_get_signal(dispatch_event_t event); + +// Use: dispatch_source_get_data +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_NOTHROW +uint64_t +dispatch_event_get_flags(dispatch_event_t event); + +// Use: dispatch_source_get_data +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_NOTHROW +size_t +dispatch_event_get_bytes_available(dispatch_event_t event); + +// Use: dispatch_source_get_data +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +unsigned long +dispatch_event_get_count(dispatch_event_t event); + +// Obsolete +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_MALLOC DISPATCH_WARN_RESULT DISPATCH_NOTHROW +dispatch_source_attr_t +dispatch_source_attr_create(void); + +// Obsolete +#if defined(__BLOCKS__) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NOTHROW +dispatch_source_finalizer_t +dispatch_source_attr_get_finalizer(dispatch_source_attr_t attr); +#endif /* __BLOCKS__ */ + +// Obsolete +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_MALLOC DISPATCH_WARN_RESULT DISPATCH_NOTHROW +dispatch_source_attr_t +dispatch_source_attr_copy(dispatch_source_attr_t proto); + +// Obsolete +#ifdef __BLOCKS__ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL1 DISPATCH_NOTHROW +long +dispatch_source_attr_set_finalizer( + dispatch_source_attr_t attr, + dispatch_source_finalizer_t finalizer); +#endif /* __BLOCKS__ */ + +// Obsolete +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL1 DISPATCH_NOTHROW +void +dispatch_source_attr_set_finalizer_f( + dispatch_source_attr_t attr, + void *context, + dispatch_source_finalizer_function_t finalizer); + +// Obsolete +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL1 DISPATCH_NOTHROW +void +dispatch_source_attr_set_context( + dispatch_source_attr_t attr, + void *context); + +// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_MACH_RECV, ...) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_MALLOC DISPATCH_NONNULL4 DISPATCH_NONNULL5 DISPATCH_NOTHROW +dispatch_source_t +dispatch_source_mig_create( + mach_port_t mport, + size_t max_size, + dispatch_source_attr_t attr, + dispatch_queue_t queue, + dispatch_mig_callback_t mig_callback); + +enum { + DISPATCH_TIMER_WALL_CLOCK = 0x4, +}; + +enum { + DISPATCH_TIMER_INTERVAL = 0x0, + DISPATCH_TIMER_ONESHOT = 0x1, + DISPATCH_TIMER_ABSOLUTE = 0x3, +}; + +// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, ...) +#ifdef __BLOCKS__ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_MALLOC DISPATCH_NONNULL5 DISPATCH_NONNULL6 DISPATCH_NOTHROW +dispatch_source_t +dispatch_source_timer_create( + uint64_t flags, + uint64_t nanoseconds, + uint64_t leeway, + dispatch_source_attr_t attr, + dispatch_queue_t queue, + dispatch_source_handler_t handler); +#endif /* __BLOCKS__ */ + +// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, ...) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_MALLOC DISPATCH_NONNULL5 DISPATCH_NONNULL7 DISPATCH_NOTHROW +dispatch_source_t +dispatch_source_timer_create_f( + uint64_t flags, + uint64_t nanoseconds, + uint64_t leeway, + dispatch_source_attr_t attr, + dispatch_queue_t queue, + void *h_context, + dispatch_source_handler_function_t handler); + +// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, ...) +#ifdef __BLOCKS__ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_MALLOC DISPATCH_NOTHROW +dispatch_source_t +dispatch_source_signal_create( + unsigned long signo, + dispatch_source_attr_t attr, + dispatch_queue_t queue, + dispatch_source_handler_t handler); +#endif /* __BLOCKS__ */ + +// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, ...) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_MALLOC DISPATCH_NONNULL3 DISPATCH_NONNULL5 DISPATCH_NOTHROW +dispatch_source_t +dispatch_source_signal_create_f( + unsigned long sig, + dispatch_source_attr_t attr, + dispatch_queue_t queue, + void *h_context, + dispatch_source_handler_function_t handler); + +// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, ...) +#ifdef __BLOCKS__ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_MALLOC DISPATCH_NONNULL3 DISPATCH_NONNULL4 DISPATCH_NOTHROW +dispatch_source_t +dispatch_source_read_create( + int descriptor, + dispatch_source_attr_t attr, + dispatch_queue_t queue, + dispatch_source_handler_t handler); +#endif /* __BLOCKS__ */ + +// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, ...) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_MALLOC DISPATCH_NONNULL3 DISPATCH_NONNULL5 DISPATCH_NOTHROW +dispatch_source_t +dispatch_source_read_create_f( + int descriptor, + dispatch_source_attr_t attr, + dispatch_queue_t queue, + void *h_context, + dispatch_source_handler_function_t handler); + +// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_WRITE, ...) +#ifdef __BLOCKS__ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_MALLOC DISPATCH_NONNULL3 DISPATCH_NONNULL4 DISPATCH_NOTHROW +dispatch_source_t +dispatch_source_write_create( + int descriptor, + dispatch_source_attr_t attr, + dispatch_queue_t queue, + dispatch_source_handler_t handler); +#endif /* __BLOCKS__ */ + +// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_WRITE, ...) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_MALLOC DISPATCH_NONNULL3 DISPATCH_NONNULL5 DISPATCH_NOTHROW +dispatch_source_t +dispatch_source_write_create_f( + int descriptor, + dispatch_source_attr_t attr, + dispatch_queue_t queue, + void *h_context, + dispatch_source_handler_function_t handler); + +// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_VNODE, ...) +#ifdef __BLOCKS__ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_MALLOC DISPATCH_NONNULL4 DISPATCH_NONNULL5 DISPATCH_NOTHROW +dispatch_source_t +dispatch_source_vnode_create( + int descriptor, + uint64_t flags, + dispatch_source_attr_t attr, + dispatch_queue_t queue, + dispatch_source_handler_t handler); +#endif /* __BLOCKS__ */ + +// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_VNODE, ...) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_MALLOC DISPATCH_NONNULL4 DISPATCH_NONNULL6 DISPATCH_NOTHROW +dispatch_source_t +dispatch_source_vnode_create_f( + int descriptor, + uint64_t flags, + dispatch_source_attr_t attr, + dispatch_queue_t queue, + void *h_context, + dispatch_source_handler_function_t handler); + +// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_PROC, ...) +#ifdef __BLOCKS__ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_MALLOC DISPATCH_NONNULL4 DISPATCH_NONNULL5 DISPATCH_NOTHROW +dispatch_source_t +dispatch_source_proc_create( + pid_t pid, + uint64_t flags, + dispatch_source_attr_t attr, + dispatch_queue_t queue, + dispatch_source_handler_t handler); +#endif /* __BLOCKS__ */ + +// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_PROC, ...) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_MALLOC DISPATCH_NONNULL4 DISPATCH_NONNULL6 DISPATCH_NOTHROW +dispatch_source_t +dispatch_source_proc_create_f( + pid_t pid, + uint64_t flags, + dispatch_source_attr_t attr, + dispatch_queue_t queue, + void *h_context, + dispatch_source_handler_function_t handler); + +enum { + DISPATCH_MACHPORT_DEAD = 0x1, + DISPATCH_MACHPORT_RECV = 0x2, + DISPATCH_MACHPORT_DELETED = 0x4, +}; + +// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_MACH_RECV, ...) +#ifdef __BLOCKS__ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_MALLOC DISPATCH_NOTHROW +dispatch_source_t +dispatch_source_machport_create( + mach_port_t mport, + uint64_t flags, + dispatch_source_attr_t attr, + dispatch_queue_t queue, + dispatch_source_handler_t handler); +#endif /* __BLOCKS__ */ + +// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_MACH_RECV, ...) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_MALLOC DISPATCH_NOTHROW +dispatch_source_t +dispatch_source_machport_create_f( + mach_port_t mport, + uint64_t flags, + dispatch_source_attr_t attr, + dispatch_queue_t queue, + void *h_context, + dispatch_source_handler_function_t handler); + +enum { + DISPATCH_SOURCE_DATA_ADD = 1, + DISPATCH_SOURCE_DATA_OR, +}; + +// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_DATA..., ...) +#ifdef __BLOCKS__ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_MALLOC DISPATCH_NONNULL3 DISPATCH_NONNULL4 DISPATCH_NOTHROW +dispatch_source_t +dispatch_source_data_create( + unsigned long behavior, + dispatch_source_attr_t attr, + dispatch_queue_t queue, + dispatch_source_handler_t handler); +#endif /* __BLOCKS__ */ + +// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_DATA..., ...) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_MALLOC DISPATCH_NONNULL3 DISPATCH_NONNULL5 DISPATCH_NOTHROW +dispatch_source_t +dispatch_source_data_create_f( + unsigned long behavior, + dispatch_source_attr_t attr, + dispatch_queue_t queue, + void *h_context, + dispatch_source_handler_function_t handler); + +enum { + DISPATCH_SOURCE_CUSTOM_ADD = DISPATCH_SOURCE_DATA_ADD, + DISPATCH_SOURCE_CUSTOM_OR = DISPATCH_SOURCE_DATA_OR, +}; + +// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_DATA..., ...) +#ifdef __BLOCKS__ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_MALLOC DISPATCH_NONNULL2 DISPATCH_NONNULL3 DISPATCH_NOTHROW +dispatch_source_t +dispatch_source_custom_create( + unsigned long behavior, + dispatch_source_attr_t attr, + dispatch_queue_t queue, + dispatch_event_handler_t handler); +#endif /* __BLOCKS__ */ + +// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_DATA..., ...) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_MALLOC DISPATCH_NONNULL2 DISPATCH_NONNULL4 DISPATCH_NOTHROW +dispatch_source_t +dispatch_source_custom_create_f( + unsigned long behavior, + dispatch_source_attr_t attr, + dispatch_queue_t queue, + void *h_context, + dispatch_event_handler_function_t handler); + +// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_VFS, ...) +#if defined(__BLOCKS__) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_MALLOC DISPATCH_NONNULL3 DISPATCH_NONNULL4 +dispatch_source_t +dispatch_source_vfs_create( + uint64_t flags, + dispatch_source_attr_t attr, + dispatch_queue_t queue, + dispatch_source_handler_t handler); +#endif /* __BLOCKS__ */ + +// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_VFS, ...) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_MALLOC DISPATCH_NONNULL3 DISPATCH_NONNULL5 +dispatch_source_t +dispatch_source_vfs_create_f( + uint64_t flags, + dispatch_source_attr_t attr, + dispatch_queue_t queue, + void *h_context, + dispatch_source_handler_function_t handler); + +/* + * Raw Mach message support from MIG source. + * + * It is possible to use the following callback style with the MIG source to + * obtain the raw mach message (and send no reply) similar to CFMachPort. + * (For more specific CFMachPort compatibility, see below). + * + * void handle_mach_msg(mach_msg_header *msg) { ... } + * ... + * DISPATCH_MACHPORT_CALLBACK_DECL(mig_compat_callback, handle_mach_msg); + * ... + * mig = dispatch_source_mig_create(mp, MY_MAX_MSG_SIZE, NULL, + * queue, mig_compat_callback); + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +boolean_t +_dispatch_machport_callback(mach_msg_header_t *msg, mach_msg_header_t *reply, void (*callback)(mach_msg_header_t *)); + +#define DISPATCH_MACHPORT_CALLBACK_DECL(new_callback, existing_callback) \ +__private_extern__ boolean_t \ +new_callback(mach_msg_header_t *msg, mach_msg_header_t *reply) \ +{ return _dispatch_machport_callback(msg, reply, existing_callback); } + +/* + * CFMachPort compatibility. + * + * It is possible to use existing CFMachPort callbacks with dispatch mig sources + * by delcaring the following shim and using the shim as the mig server callback + * to dispatch_source_mig_create(). + * The CFMachPortRef "port" parameter of the CFMachPortCallBack will be NULL. + * If mach_port_set_context() is used, that value will be passed into the "info" + * parameter of the CFMachPortCallBack. + * + * DISPATCH_CFMACHPORT_CALLBACK_DECL(mig_callback, MyCFMachPortCallBack); + * + * ... + * { + * kr = mach_port_set_context(mach_task_self(), mp, (mach_vm_address_t)context); + * mig = dispatch_source_mig_create(mp, MY_MAX_MSG_SIZE, NULL, + * queue, mig_callback); + */ +struct __CFMachPort; + +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +boolean_t +_dispatch_CFMachPortCallBack(mach_msg_header_t *msg, mach_msg_header_t *reply, void (*callback)(struct __CFMachPort *, void *msg, signed long size, void *)); + +#define DISPATCH_CFMACHPORT_CALLBACK_DECL(new_callback, existing_callback) \ +__private_extern__ boolean_t \ +new_callback(mach_msg_header_t *msg, mach_msg_header_t *reply) \ +{ return _dispatch_CFMachPortCallBack(msg, reply, existing_callback); } + +__END_DECLS + +#endif diff --git a/src/object.c b/src/object.c new file mode 100644 index 000000000..8746495bc --- /dev/null +++ b/src/object.c @@ -0,0 +1,200 @@ +/* + * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include "internal.h" + + +void +dispatch_debug(dispatch_object_t dou, const char *msg, ...) +{ + va_list ap; + + va_start(ap, msg); + + dispatch_debugv(dou._do, msg, ap); + + va_end(ap); +} + +void +dispatch_debugv(dispatch_object_t dou, const char *msg, va_list ap) +{ + char buf[4096]; + size_t offs; + + if (dou._do && dou._do->do_vtable->do_debug) { + offs = dx_debug(dou._do, buf, sizeof(buf)); + } else { + offs = snprintf(buf, sizeof(buf), "NULL vtable slot"); + } + + snprintf(buf + offs, sizeof(buf) - offs, ": %s", msg); + + _dispatch_logv(buf, ap); +} + +void +dispatch_retain(dispatch_object_t dou) +{ + if (dou._do->do_xref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) { + return; // global object + } + if ((dispatch_atomic_inc(&dou._do->do_xref_cnt) - 1) == 0) { + DISPATCH_CLIENT_CRASH("Resurrection of an object"); + } +} + +void +_dispatch_retain(dispatch_object_t dou) +{ + if (dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) { + return; // global object + } + if ((dispatch_atomic_inc(&dou._do->do_ref_cnt) - 1) == 0) { + DISPATCH_CLIENT_CRASH("Resurrection of an object"); + } +} + +void +dispatch_release(dispatch_object_t dou) +{ + typeof(dou._do->do_xref_cnt) oldval; + + if (dou._do->do_xref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) { + return; + } + + oldval = dispatch_atomic_dec(&dou._do->do_xref_cnt) + 1; + + if (fastpath(oldval > 1)) { + return; + } + if (oldval == 1) { +#ifndef DISPATCH_NO_LEGACY + if (dou._do->do_vtable == (void*)&_dispatch_source_kevent_vtable) { + return _dispatch_source_legacy_xref_release(dou._ds); + } +#endif + if (slowpath(DISPATCH_OBJECT_SUSPENDED(dou._do))) { + // Arguments for and against this assert are within 6705399 + DISPATCH_CLIENT_CRASH("Release of a suspended object"); + } + return _dispatch_release(dou._do); + } + DISPATCH_CLIENT_CRASH("Over-release of an object"); +} + +void +_dispatch_dispose(dispatch_object_t dou) +{ + dispatch_queue_t tq = dou._do->do_targetq; + dispatch_function_t func = dou._do->do_finalizer; + void *ctxt = dou._do->do_ctxt; + + dou._do->do_vtable = (void *)0x200; + + free(dou._do); + + if (func && ctxt) { + dispatch_async_f(tq, ctxt, func); + } + _dispatch_release(tq); +} + +void +_dispatch_release(dispatch_object_t dou) +{ + typeof(dou._do->do_ref_cnt) oldval; + + if (dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) { + return; // global object + } + + oldval = dispatch_atomic_dec(&dou._do->do_ref_cnt) + 1; + + if (fastpath(oldval > 1)) { + return; + } + if (oldval == 1) { + if (dou._do->do_next != DISPATCH_OBJECT_LISTLESS) { + DISPATCH_CRASH("release while enqueued"); + } + if (dou._do->do_xref_cnt) { + DISPATCH_CRASH("release while external references exist"); + } + + return dx_dispose(dou._do); + } + DISPATCH_CRASH("over-release"); +} + +void * +dispatch_get_context(dispatch_object_t dou) +{ + return dou._do->do_ctxt; +} + +void +dispatch_set_context(dispatch_object_t dou, void *context) +{ + if (dou._do->do_ref_cnt != DISPATCH_OBJECT_GLOBAL_REFCNT) { + dou._do->do_ctxt = context; + } +} + +void +dispatch_set_finalizer_f(dispatch_object_t dou, dispatch_function_t finalizer) +{ + dou._do->do_finalizer = finalizer; +} + +void +dispatch_suspend(dispatch_object_t dou) +{ + if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) { + return; + } + dispatch_atomic_add(&dou._do->do_suspend_cnt, DISPATCH_OBJECT_SUSPEND_INTERVAL); +} + +void +dispatch_resume(dispatch_object_t dou) +{ + if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) { + return; + } + switch (dispatch_atomic_sub(&dou._do->do_suspend_cnt, DISPATCH_OBJECT_SUSPEND_INTERVAL) + DISPATCH_OBJECT_SUSPEND_INTERVAL) { + case DISPATCH_OBJECT_SUSPEND_INTERVAL: + _dispatch_wakeup(dou._do); + break; + case 0: + DISPATCH_CLIENT_CRASH("Over-resume of an object"); + break; + default: + break; + } +} + +size_t +dispatch_object_debug_attr(dispatch_object_t dou, char* buf, size_t bufsiz) +{ + return snprintf(buf, bufsiz, "refcnt = 0x%x, suspend_cnt = 0x%x, ", + dou._do->do_ref_cnt, dou._do->do_suspend_cnt); +} diff --git a/src/object.h b/src/object.h new file mode 100644 index 000000000..febc960dc --- /dev/null +++ b/src/object.h @@ -0,0 +1,195 @@ +/* + * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __DISPATCH_OBJECT__ +#define __DISPATCH_OBJECT__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +__BEGIN_DECLS + +/*! + * @function dispatch_debug + * + * @abstract + * Programmatically log debug information about a dispatch object. + * + * @param object + * The object to introspect. + * + * @param message + * The message to log above and beyond the introspection. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL2 DISPATCH_NOTHROW __attribute__((__format__(printf,2,3))) +void +dispatch_debug(dispatch_object_t object, const char *message, ...); + +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL2 DISPATCH_NOTHROW __attribute__((__format__(printf,2,0))) +void +dispatch_debugv(dispatch_object_t object, const char *message, va_list ap); + +/*! + * @function dispatch_retain + * + * @abstract + * Increment the reference count of a dispatch object. + * + * @discussion + * Calls to dispatch_retain() must be balanced with calls to + * dispatch_release(). + * + * @param object + * The object to retain. + * The result of passing NULL in this parameter is undefined. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_retain(dispatch_object_t object); + +/*! + * @function dispatch_release + * + * @abstract + * Decrement the reference count of a dispatch object. + * + * @discussion + * A dispatch object is asynchronously deallocated once all references are + * released (i.e. the reference count becomes zero). The system does not + * guarantee that a given client is the last or only reference to a given + * object. + * + * @param object + * The object to release. + * The result of passing NULL in this parameter is undefined. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_release(dispatch_object_t object); + +/*! + * @function dispatch_get_context + * + * @abstract + * Returns the application defined context of the object. + * + * @param object + * The result of passing NULL in this parameter is undefined. + * + * @result + * The context of the object; may be NULL. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW +void * +dispatch_get_context(dispatch_object_t object); + +/*! + * @function dispatch_set_context + * + * @abstract + * Associates an application defined context with the object. + * + * @param object + * The result of passing NULL in this parameter is undefined. + * + * @param context + * The new client defined context for the object. This may be NULL. + * + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NOTHROW //DISPATCH_NONNULL1 +void +dispatch_set_context(dispatch_object_t object, void *context); + +/*! + * @function dispatch_set_finalizer_f + * + * @abstract + * Set the finalizer function for a dispatch object. + * + * @param + * The dispatch object to modify. + * The result of passing NULL in this parameter is undefined. + * + * @param + * The finalizer function pointer. + * + * @discussion + * A dispatch object's finalizer will be invoked on the object's target queue + * after all references to the object have been released. This finalizer may be + * used by the application to release any resources associated with the object, + * such as freeing the object's context. + * The context parameter passed to the finalizer function is the current + * context of the dispatch object at the time the finalizer call is made. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NOTHROW //DISPATCH_NONNULL1 +void +dispatch_set_finalizer_f(dispatch_object_t object, + dispatch_function_t finalizer); + +/*! + * @function dispatch_suspend + * + * @abstract + * Suspends the invocation of blocks on a dispatch object. + * + * @discussion + * A suspended object will not invoke any blocks associated with it. The + * suspension of an object will occur after any running block associated with + * the object completes. + * + * Calls to dispatch_suspend() must be balanced with calls + * to dispatch_resume(). + * + * @param object + * The object to be suspended. + * The result of passing NULL in this parameter is undefined. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_suspend(dispatch_object_t object); + +/*! + * @function dispatch_resume + * + * @abstract + * Resumes the invocation of blocks on a dispatch object. + * + * @param object + * The object to be resumed. + * The result of passing NULL in this parameter is undefined. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_resume(dispatch_object_t object); + +__END_DECLS + +#endif diff --git a/src/object_internal.h b/src/object_internal.h new file mode 100644 index 000000000..cc048be70 --- /dev/null +++ b/src/object_internal.h @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_OBJECT_INTERNAL__ +#define __DISPATCH_OBJECT_INTERNAL__ + +enum { + _DISPATCH_CONTINUATION_TYPE = 0x00000, // meta-type for continuations + _DISPATCH_QUEUE_TYPE = 0x10000, // meta-type for queues + _DISPATCH_SOURCE_TYPE = 0x20000, // meta-type for sources + _DISPATCH_SEMAPHORE_TYPE = 0x30000, // meta-type for semaphores + _DISPATCH_ATTR_TYPE = 0x10000000, // meta-type for attribute structures + + DISPATCH_CONTINUATION_TYPE = _DISPATCH_CONTINUATION_TYPE, + + DISPATCH_QUEUE_ATTR_TYPE = _DISPATCH_QUEUE_TYPE | _DISPATCH_ATTR_TYPE, + + DISPATCH_QUEUE_TYPE = 1 | _DISPATCH_QUEUE_TYPE, + DISPATCH_QUEUE_GLOBAL_TYPE = 2 | _DISPATCH_QUEUE_TYPE, + DISPATCH_QUEUE_MGR_TYPE = 3 | _DISPATCH_QUEUE_TYPE, + + DISPATCH_SEMAPHORE_TYPE = _DISPATCH_SEMAPHORE_TYPE, + + DISPATCH_SOURCE_ATTR_TYPE = _DISPATCH_SOURCE_TYPE | _DISPATCH_ATTR_TYPE, + + DISPATCH_SOURCE_KEVENT_TYPE = 1 | _DISPATCH_SOURCE_TYPE, +}; + +#define DISPATCH_VTABLE_HEADER(x) \ + unsigned long const do_type; \ + const char *const do_kind; \ + size_t (*const do_debug)(struct x *, char *, size_t); \ + struct dispatch_queue_s *(*const do_invoke)(struct x *); \ + bool (*const do_probe)(struct x *); \ + void (*const do_dispose)(struct x *) + +#define dx_type(x) (x)->do_vtable->do_type +#define dx_kind(x) (x)->do_vtable->do_kind +#define dx_debug(x, y, z) (x)->do_vtable->do_debug((x), (y), (z)) +#define dx_dispose(x) (x)->do_vtable->do_dispose(x) +#define dx_invoke(x) (x)->do_vtable->do_invoke(x) +#define dx_probe(x) (x)->do_vtable->do_probe(x) + +#define DISPATCH_STRUCT_HEADER(x, y) \ + const struct y *do_vtable; \ + struct x *volatile do_next; \ + unsigned int do_ref_cnt; \ + unsigned int do_xref_cnt; \ + unsigned int do_suspend_cnt; \ + struct dispatch_queue_s *do_targetq; \ + void *do_ctxt; \ + void *do_finalizer + +#define DISPATCH_OBJECT_GLOBAL_REFCNT (~0u) +#define DISPATCH_OBJECT_SUSPEND_LOCK 1u // "word and bit" must be a power of two to be safely subtracted +#define DISPATCH_OBJECT_SUSPEND_INTERVAL 2u +#define DISPATCH_OBJECT_SUSPENDED(x) ((x)->do_suspend_cnt >= DISPATCH_OBJECT_SUSPEND_INTERVAL) +#ifdef __LP64__ +// the bottom nibble must not be zero, the rest of the bits should be random +// we sign extend the 64-bit version so that a better instruction encoding is generated on Intel +#define DISPATCH_OBJECT_LISTLESS ((void *)0xffffffff89abcdef) +#else +#define DISPATCH_OBJECT_LISTLESS ((void *)0x89abcdef) +#endif + +#define _dispatch_trysuspend(x) __sync_bool_compare_and_swap(&(x)->do_suspend_cnt, 0, DISPATCH_OBJECT_SUSPEND_INTERVAL) +// _dispatch_source_invoke() relies on this testing the whole suspend count +// word, not just the lock bit. In other words, no point taking the lock +// if the source is suspended or canceled. +#define _dispatch_trylock(x) dispatch_atomic_cmpxchg(&(x)->do_suspend_cnt, 0, DISPATCH_OBJECT_SUSPEND_LOCK) + +struct dispatch_object_vtable_s { + DISPATCH_VTABLE_HEADER(dispatch_object_s); +}; + +struct dispatch_object_s { + DISPATCH_STRUCT_HEADER(dispatch_object_s, dispatch_object_vtable_s); +}; + +size_t dispatch_object_debug_attr(dispatch_object_t dou, char* buf, size_t bufsiz); + +void _dispatch_retain(dispatch_object_t dou); +void _dispatch_release(dispatch_object_t dou); +void _dispatch_dispose(dispatch_object_t dou); +dispatch_queue_t _dispatch_wakeup(dispatch_object_t dou); + +#endif diff --git a/src/once.c b/src/once.c new file mode 100644 index 000000000..9046c065e --- /dev/null +++ b/src/once.c @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include "internal.h" + +#undef dispatch_once +#undef dispatch_once_f + +#ifdef __BLOCKS__ +void +dispatch_once(dispatch_once_t *val, void (^block)(void)) +{ + struct Block_basic *bb = (void *)block; + + dispatch_once_f(val, block, (void *)bb->Block_invoke); +} +#endif + +DISPATCH_NOINLINE +void +dispatch_once_f(dispatch_once_t *val, void *ctxt, void (*func)(void *)) +{ + volatile long *vval = val; + + if (dispatch_atomic_cmpxchg(val, 0l, 1l)) { + func(ctxt); + + // The next barrier must be long and strong. + // + // The scenario: SMP systems with weakly ordered memory models + // and aggressive out-of-order instruction execution. + // + // The problem: + // + // The dispatch_once*() wrapper macro causes the callee's + // instruction stream to look like this (pseudo-RISC): + // + // load r5, pred-addr + // cmpi r5, -1 + // beq 1f + // call dispatch_once*() + // 1f: + // load r6, data-addr + // + // May be re-ordered like so: + // + // load r6, data-addr + // load r5, pred-addr + // cmpi r5, -1 + // beq 1f + // call dispatch_once*() + // 1f: + // + // Normally, a barrier on the read side is used to workaround + // the weakly ordered memory model. But barriers are expensive + // and we only need to synchronize once! After func(ctxt) + // completes, the predicate will be marked as "done" and the + // branch predictor will correctly skip the call to + // dispatch_once*(). + // + // A far faster alternative solution: Defeat the speculative + // read-ahead of peer CPUs. + // + // Modern architectures will throw away speculative results + // once a branch mis-prediction occurs. Therefore, if we can + // ensure that the predicate is not marked as being complete + // until long after the last store by func(ctxt), then we have + // defeated the read-ahead of peer CPUs. + // + // In other words, the last "store" by func(ctxt) must complete + // and then N cycles must elapse before ~0l is stored to *val. + // The value of N is whatever is sufficient to defeat the + // read-ahead mechanism of peer CPUs. + // + // On some CPUs, the most fully synchronizing instruction might + // need to be issued. + + dispatch_atomic_barrier(); + *val = ~0l; + } else { + do { + _dispatch_hardware_pause(); + } while (*vval != ~0l); + + dispatch_atomic_barrier(); + } +} diff --git a/src/once.h b/src/once.h new file mode 100644 index 000000000..8cd25d61b --- /dev/null +++ b/src/once.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __DISPATCH_ONCE__ +#define __DISPATCH_ONCE__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +__BEGIN_DECLS + +/*! + * @typedef dispatch_once_t + * + * @abstract + * A predicate for use with dispatch_once(). It must be initialized to zero. + * Note: static and global variables default to zero. + */ +typedef long dispatch_once_t; + +/*! + * @function dispatch_once + * + * @abstract + * Execute a block once and only once. + * + * @param predicate + * A pointer to a dispatch_once_t that is used to test whether the block has + * completed or not. + * + * @param block + * The block to execute once. + * + * @discussion + * Always call dispatch_once() before using or testing any variables that are + * initialized by the block. + */ +#ifdef __BLOCKS__ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_once(dispatch_once_t *predicate, dispatch_block_t block); +#ifdef __GNUC__ +#define dispatch_once(x, ...) do { if (__builtin_expect(*(x), ~0l) != ~0l) dispatch_once((x), (__VA_ARGS__)); } while (0) +#endif +#endif + +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +void +dispatch_once_f(dispatch_once_t *predicate, void *context, void (*function)(void *)); +#ifdef __GNUC__ +#define dispatch_once_f(x, y, z) do { if (__builtin_expect(*(x), ~0l) != ~0l) dispatch_once_f((x), (y), (z)); } while (0) +#endif + +__END_DECLS + +#endif diff --git a/src/os_shims.h b/src/os_shims.h new file mode 100644 index 000000000..7efd28e75 --- /dev/null +++ b/src/os_shims.h @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_OS_SHIMS__ +#define __DISPATCH_OS_SHIMS__ + +#include +#include +#include + +__private_extern__ const char *__crashreporter_info__; + +static const unsigned long dispatch_queue_key = __PTK_LIBDISPATCH_KEY0; +static const unsigned long dispatch_sema4_key = __PTK_LIBDISPATCH_KEY1; +static const unsigned long dispatch_cache_key = __PTK_LIBDISPATCH_KEY2; +static const unsigned long dispatch_bcounter_key = __PTK_LIBDISPATCH_KEY3; +//__PTK_LIBDISPATCH_KEY4 +//__PTK_LIBDISPATCH_KEY5 + + +#define SIMULATE_5491082 1 +#ifndef _PTHREAD_TSD_OFFSET +#define _PTHREAD_TSD_OFFSET 0 +#endif + +static inline void +_dispatch_thread_setspecific(unsigned long k, void *v) +{ +#if defined(SIMULATE_5491082) && defined(__i386__) + asm("movl %1, %%gs:%0" : "=m" (*(void **)(k * sizeof(void *) + _PTHREAD_TSD_OFFSET)) : "ri" (v) : "memory"); +#elif defined(SIMULATE_5491082) && defined(__x86_64__) + asm("movq %1, %%gs:%0" : "=m" (*(void **)(k * sizeof(void *) + _PTHREAD_TSD_OFFSET)) : "rn" (v) : "memory"); +#else + int res; + if (_pthread_has_direct_tsd()) { + res = _pthread_setspecific_direct(k, v); + } else { + res = pthread_setspecific(k, v); + } + dispatch_assert_zero(res); +#endif +} + +static inline void * +_dispatch_thread_getspecific(unsigned long k) +{ +#if defined(SIMULATE_5491082) && (defined(__i386__) || defined(__x86_64__)) + void *rval; + asm("mov %%gs:%1, %0" : "=r" (rval) : "m" (*(void **)(k * sizeof(void *) + _PTHREAD_TSD_OFFSET))); + return rval; +#else + if (_pthread_has_direct_tsd()) { + return _pthread_getspecific_direct(k); + } else { + return pthread_getspecific(k); + } +#endif +} + +static inline void +_dispatch_thread_key_init_np(unsigned long k, void (*d)(void *)) +{ + dispatch_assert_zero(pthread_key_init_np((int)k, d)); +} + +#define _dispatch_thread_self pthread_self + + +#if DISPATCH_PERF_MON + +#if defined(SIMULATE_5491082) && (defined(__i386__) || defined(__x86_64__)) +#ifdef __LP64__ +#define _dispatch_workitem_inc() asm("incq %%gs:%0" : "+m" \ + (*(void **)(dispatch_bcounter_key * sizeof(void *) + _PTHREAD_TSD_OFFSET)) :: "cc") +#define _dispatch_workitem_dec() asm("decq %%gs:%0" : "+m" \ + (*(void **)(dispatch_bcounter_key * sizeof(void *) + _PTHREAD_TSD_OFFSET)) :: "cc") +#else +#define _dispatch_workitem_inc() asm("incl %%gs:%0" : "+m" \ + (*(void **)(dispatch_bcounter_key * sizeof(void *) + _PTHREAD_TSD_OFFSET)) :: "cc") +#define _dispatch_workitem_dec() asm("decl %%gs:%0" : "+m" \ + (*(void **)(dispatch_bcounter_key * sizeof(void *) + _PTHREAD_TSD_OFFSET)) :: "cc") +#endif +#else +static inline void +_dispatch_workitem_inc(void) +{ + unsigned long cnt = (unsigned long)_dispatch_thread_getspecific(dispatch_bcounter_key); + _dispatch_thread_setspecific(dispatch_bcounter_key, (void *)++cnt); +} +static inline void +_dispatch_workitem_dec(void) +{ + unsigned long cnt = (unsigned long)_dispatch_thread_getspecific(dispatch_bcounter_key); + _dispatch_thread_setspecific(dispatch_bcounter_key, (void *)--cnt); +} +#endif + +// C99 doesn't define flsll() or ffsll() +#ifdef __LP64__ +#define flsll(x) flsl(x) +#else +static inline unsigned int +flsll(uint64_t val) +{ + union { + struct { +#ifdef __BIG_ENDIAN__ + unsigned int hi, low; +#else + unsigned int low, hi; +#endif + } words; + uint64_t word; + } _bucket = { + .word = val, + }; + if (_bucket.words.hi) { + return fls(_bucket.words.hi) + 32; + } + return fls(_bucket.words.low); +} +#endif + +#else +#define _dispatch_workitem_inc() +#define _dispatch_workitem_dec() +#endif // DISPATCH_PERF_MON + +#endif diff --git a/src/private.h b/src/private.h new file mode 100644 index 000000000..8d817fe9f --- /dev/null +++ b/src/private.h @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_PRIVATE__ +#define __DISPATCH_PRIVATE__ + +#include +#include +#include +#include +#include +#include +#include + +#ifndef __DISPATCH_BUILDING_DISPATCH__ +#include_next + +// Workaround +#ifndef __DISPATCH_PUBLIC__ +#include "/usr/include/dispatch/dispatch.h" +#endif + +#ifndef __DISPATCH_INDIRECT__ +#define __DISPATCH_INDIRECT__ +#endif + +#include +#include +#include + +#ifndef DISPATCH_NO_LEGACY +#include +#endif + +#undef __DISPATCH_INDIRECT__ + +#endif /* !__DISPATCH_BUILDING_DISPATCH__ */ + +/* LEGACY: Use DISPATCH_API_VERSION */ +#define LIBDISPATCH_VERSION DISPATCH_API_VERSION + +__BEGIN_DECLS + +DISPATCH_NOTHROW +void +libdispatch_init(void); + +#define DISPATCH_COCOA_COMPAT 1 +#if DISPATCH_COCOA_COMPAT + +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NOTHROW +mach_port_t +_dispatch_get_main_queue_port_4CF(void); + +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NOTHROW +void +_dispatch_main_queue_callback_4CF(mach_msg_header_t *msg); + +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +extern void (*dispatch_begin_thread_4GC)(void); + +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +extern void (*dispatch_end_thread_4GC)(void); + +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +extern void *(*_dispatch_begin_NSAutoReleasePool)(void); + +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +extern void (*_dispatch_end_NSAutoReleasePool)(void *); + +#endif + +/* pthreads magic */ + +DISPATCH_NOTHROW void dispatch_atfork_prepare(void); +DISPATCH_NOTHROW void dispatch_atfork_parent(void); +DISPATCH_NOTHROW void dispatch_atfork_child(void); +DISPATCH_NOTHROW void dispatch_init_pthread(pthread_t); + +/* + * Extract the context pointer from a mach message trailer. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +void * +dispatch_mach_msg_get_context(mach_msg_header_t *msg); + +__END_DECLS + +#endif diff --git a/src/protocol.defs b/src/protocol.defs new file mode 100644 index 000000000..e6bd40044 --- /dev/null +++ b/src/protocol.defs @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2009 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ +/* + * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + */ + +#include +#include + +// '64' is used to align with Mach notifications and so that we don't fight with the notify symbols in Libsystem +subsystem libdispatch_internal_protocol 64; + +serverprefix _dispatch_; +userprefix _dispatch_send_; + +skip; /* was MACH_NOTIFY_FIRST: 64 */ + +/* MACH_NOTIFY_PORT_DELETED: 65 */ +simpleroutine +mach_notify_port_deleted( + _notify : mach_port_move_send_once_t; + _name : mach_port_name_t +); + +skip; /* was MACH_NOTIFY_MSG_ACCEPTED: 66 */ + +skip; /* was NOTIFY_OWNERSHIP_RIGHTS: 67 */ + +skip; /* was NOTIFY_RECEIVE_RIGHTS: 68 */ + +/* MACH_NOTIFY_PORT_DESTROYED: 69 */ +simpleroutine +mach_notify_port_destroyed( + _notify : mach_port_move_send_once_t; + _rights : mach_port_move_receive_t +); + +/* MACH_NOTIFY_NO_SENDERS: 70 */ +simpleroutine +mach_notify_no_senders( + _notify : mach_port_move_send_once_t; + _mscnt : mach_port_mscount_t +); + +/* MACH_NOTIFY_SEND_ONCE: 71 */ +simpleroutine +mach_notify_send_once( + _notify : mach_port_move_send_once_t +); + +/* MACH_NOTIFY_DEAD_NAME: 72 */ +simpleroutine +mach_notify_dead_name( + _notify : mach_port_move_send_once_t; + _name : mach_port_name_t +); + +/* highly unlikely additional Mach notifications */ +skip; +skip; +skip; +skip; +skip; + +simpleroutine +wakeup_main_thread( + _port : mach_port_t; + WaitTime _waitTimeout : natural_t +); + +simpleroutine +consume_send_once_right( + _port : mach_port_move_send_once_t +); diff --git a/src/queue.c b/src/queue.c new file mode 100644 index 000000000..a3e89360f --- /dev/null +++ b/src/queue.c @@ -0,0 +1,2080 @@ +/* + * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include "internal.h" +#include "protocol.h" + +void +dummy_function(void) +{ +} + +long +dummy_function_r0(void) +{ + return 0; +} + +static bool _dispatch_select_workaround; +static fd_set _dispatch_rfds; +static fd_set _dispatch_wfds; +static void *_dispatch_rfd_ptrs[FD_SETSIZE]; +static void *_dispatch_wfd_ptrs[FD_SETSIZE]; + + +static struct dispatch_semaphore_s _dispatch_thread_mediator[] = { + { + .do_vtable = &_dispatch_semaphore_vtable, + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + }, + { + .do_vtable = &_dispatch_semaphore_vtable, + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + }, + { + .do_vtable = &_dispatch_semaphore_vtable, + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + }, + { + .do_vtable = &_dispatch_semaphore_vtable, + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + }, + { + .do_vtable = &_dispatch_semaphore_vtable, + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + }, + { + .do_vtable = &_dispatch_semaphore_vtable, + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + }, +}; + +static struct dispatch_queue_s _dispatch_root_queues[]; + +static inline dispatch_queue_t +_dispatch_get_root_queue(long priority, bool overcommit) +{ + if (overcommit) switch (priority) { + case DISPATCH_QUEUE_PRIORITY_LOW: + return &_dispatch_root_queues[1]; + case DISPATCH_QUEUE_PRIORITY_DEFAULT: + return &_dispatch_root_queues[3]; + case DISPATCH_QUEUE_PRIORITY_HIGH: + return &_dispatch_root_queues[5]; + } + switch (priority) { + case DISPATCH_QUEUE_PRIORITY_LOW: + return &_dispatch_root_queues[0]; + case DISPATCH_QUEUE_PRIORITY_DEFAULT: + return &_dispatch_root_queues[2]; + case DISPATCH_QUEUE_PRIORITY_HIGH: + return &_dispatch_root_queues[4]; + default: + return NULL; + } +} + +#ifdef __BLOCKS__ +dispatch_block_t +_dispatch_Block_copy(dispatch_block_t db) +{ + dispatch_block_t rval; + + while (!(rval = Block_copy(db))) { + sleep(1); + } + + return rval; +} +#define _dispatch_Block_copy(x) ((typeof(x))_dispatch_Block_copy(x)) + +void +_dispatch_call_block_and_release(void *block) +{ + void (^b)(void) = block; + b(); + Block_release(b); +} + +void +_dispatch_call_block_and_release2(void *block, void *ctxt) +{ + void (^b)(void*) = block; + b(ctxt); + Block_release(b); +} + +#endif /* __BLOCKS__ */ + +struct dispatch_queue_attr_vtable_s { + DISPATCH_VTABLE_HEADER(dispatch_queue_attr_s); +}; + +struct dispatch_queue_attr_s { + DISPATCH_STRUCT_HEADER(dispatch_queue_attr_s, dispatch_queue_attr_vtable_s); + + // Public: + int qa_priority; + void* finalizer_ctxt; + dispatch_queue_finalizer_function_t finalizer_func; + + // Private: + unsigned long qa_flags; +}; + +static int _dispatch_pthread_sigmask(int how, sigset_t *set, sigset_t *oset); + +#define _dispatch_queue_trylock(dq) dispatch_atomic_cmpxchg(&(dq)->dq_running, 0, 1) +static inline void _dispatch_queue_unlock(dispatch_queue_t dq); +static void _dispatch_queue_invoke(dispatch_queue_t dq); +static void _dispatch_queue_serial_drain_till_empty(dispatch_queue_t dq); +static bool _dispatch_queue_wakeup_global(dispatch_queue_t dq); +static struct dispatch_object_s *_dispatch_queue_concurrent_drain_one(dispatch_queue_t dq); + +static bool _dispatch_program_is_probably_callback_driven; + +#if DISPATCH_COCOA_COMPAT +void (*dispatch_begin_thread_4GC)(void) = dummy_function; +void (*dispatch_end_thread_4GC)(void) = dummy_function; +void *(*_dispatch_begin_NSAutoReleasePool)(void) = (void *)dummy_function; +void (*_dispatch_end_NSAutoReleasePool)(void *) = (void *)dummy_function; +static void _dispatch_queue_wakeup_main(void); + +static dispatch_once_t _dispatch_main_q_port_pred; +static bool main_q_is_draining; +static mach_port_t main_q_port; +#endif + +static void _dispatch_cache_cleanup2(void *value); +static void _dispatch_force_cache_cleanup(void); + +static const struct dispatch_queue_vtable_s _dispatch_queue_vtable = { + .do_type = DISPATCH_QUEUE_TYPE, + .do_kind = "queue", + .do_dispose = _dispatch_queue_dispose, + .do_invoke = (void *)dummy_function_r0, + .do_probe = (void *)dummy_function_r0, + .do_debug = dispatch_queue_debug, +}; + +static const struct dispatch_queue_vtable_s _dispatch_queue_root_vtable = { + .do_type = DISPATCH_QUEUE_GLOBAL_TYPE, + .do_kind = "global-queue", + .do_debug = dispatch_queue_debug, + .do_probe = _dispatch_queue_wakeup_global, +}; + +#define MAX_THREAD_COUNT 255 + +struct dispatch_root_queue_context_s { + pthread_workqueue_t dgq_kworkqueue; + uint32_t dgq_pending; + uint32_t dgq_thread_pool_size; + dispatch_semaphore_t dgq_thread_mediator; +}; + +#define DISPATCH_ROOT_QUEUE_COUNT (DISPATCH_QUEUE_PRIORITY_COUNT * 2) +static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { + { + .dgq_thread_mediator = &_dispatch_thread_mediator[0], + .dgq_thread_pool_size = MAX_THREAD_COUNT, + }, + { + .dgq_thread_mediator = &_dispatch_thread_mediator[1], + .dgq_thread_pool_size = MAX_THREAD_COUNT, + }, + { + .dgq_thread_mediator = &_dispatch_thread_mediator[2], + .dgq_thread_pool_size = MAX_THREAD_COUNT, + }, + { + .dgq_thread_mediator = &_dispatch_thread_mediator[3], + .dgq_thread_pool_size = MAX_THREAD_COUNT, + }, + { + .dgq_thread_mediator = &_dispatch_thread_mediator[4], + .dgq_thread_pool_size = MAX_THREAD_COUNT, + }, + { + .dgq_thread_mediator = &_dispatch_thread_mediator[5], + .dgq_thread_pool_size = MAX_THREAD_COUNT, + }, +}; + +// 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol +// dq_running is set to 2 so that barrier operations go through the slow path +static struct dispatch_queue_s _dispatch_root_queues[] = { + { + .do_vtable = &_dispatch_queue_root_vtable, + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, + .do_ctxt = &_dispatch_root_queue_contexts[0], + + .dq_label = "com.apple.root.low-priority", + .dq_running = 2, + .dq_width = UINT32_MAX, + .dq_serialnum = 4, + }, + { + .do_vtable = &_dispatch_queue_root_vtable, + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, + .do_ctxt = &_dispatch_root_queue_contexts[1], + + .dq_label = "com.apple.root.low-overcommit-priority", + .dq_running = 2, + .dq_width = UINT32_MAX, + .dq_serialnum = 5, + }, + { + .do_vtable = &_dispatch_queue_root_vtable, + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, + .do_ctxt = &_dispatch_root_queue_contexts[2], + + .dq_label = "com.apple.root.default-priority", + .dq_running = 2, + .dq_width = UINT32_MAX, + .dq_serialnum = 6, + }, + { + .do_vtable = &_dispatch_queue_root_vtable, + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, + .do_ctxt = &_dispatch_root_queue_contexts[3], + + .dq_label = "com.apple.root.default-overcommit-priority", + .dq_running = 2, + .dq_width = UINT32_MAX, + .dq_serialnum = 7, + }, + { + .do_vtable = &_dispatch_queue_root_vtable, + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, + .do_ctxt = &_dispatch_root_queue_contexts[4], + + .dq_label = "com.apple.root.high-priority", + .dq_running = 2, + .dq_width = UINT32_MAX, + .dq_serialnum = 8, + }, + { + .do_vtable = &_dispatch_queue_root_vtable, + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, + .do_ctxt = &_dispatch_root_queue_contexts[5], + + .dq_label = "com.apple.root.high-overcommit-priority", + .dq_running = 2, + .dq_width = UINT32_MAX, + .dq_serialnum = 9, + }, +}; + +// 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol +struct dispatch_queue_s _dispatch_main_q = { + .do_vtable = &_dispatch_queue_vtable, + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, + .do_targetq = &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_COUNT / 2], + + .dq_label = "com.apple.main-thread", + .dq_running = 1, + .dq_width = 1, + .dq_serialnum = 1, +}; + +#if DISPATCH_PERF_MON +static OSSpinLock _dispatch_stats_lock; +static size_t _dispatch_bad_ratio; +static struct { + uint64_t time_total; + uint64_t count_total; + uint64_t thread_total; +} _dispatch_stats[65]; // ffs*/fls*() returns zero when no bits are set +static void _dispatch_queue_merge_stats(uint64_t start); +#endif + +static void *_dispatch_worker_thread(void *context); +static void _dispatch_worker_thread2(void *context); + +malloc_zone_t *_dispatch_ccache_zone; + +static inline void +_dispatch_continuation_free(dispatch_continuation_t dc) +{ + dispatch_continuation_t prev_dc = _dispatch_thread_getspecific(dispatch_cache_key); + dc->do_next = prev_dc; + _dispatch_thread_setspecific(dispatch_cache_key, dc); +} + +static inline void +_dispatch_continuation_pop(dispatch_object_t dou) +{ + dispatch_continuation_t dc = dou._dc; + dispatch_group_t dg; + + if (DISPATCH_OBJ_IS_VTABLE(dou._do)) { + return _dispatch_queue_invoke(dou._dq); + } + + // Add the item back to the cache before calling the function. This + // allows the 'hot' continuation to be used for a quick callback. + // + // The ccache version is per-thread. + // Therefore, the object has not been reused yet. + // This generates better assembly. + if ((long)dou._do->do_vtable & DISPATCH_OBJ_ASYNC_BIT) { + _dispatch_continuation_free(dc); + } + if ((long)dou._do->do_vtable & DISPATCH_OBJ_GROUP_BIT) { + dg = dc->dc_group; + } else { + dg = NULL; + } + dc->dc_func(dc->dc_ctxt); + if (dg) { + dispatch_group_leave(dg); + _dispatch_release(dg); + } +} + +struct dispatch_object_s * +_dispatch_queue_concurrent_drain_one(dispatch_queue_t dq) +{ + struct dispatch_object_s *head, *next, *const mediator = (void *)~0ul; + + // The mediator value acts both as a "lock" and a signal + head = dispatch_atomic_xchg(&dq->dq_items_head, mediator); + + if (slowpath(head == NULL)) { + // The first xchg on the tail will tell the enqueueing thread that it + // is safe to blindly write out to the head pointer. A cmpxchg honors + // the algorithm. + dispatch_atomic_cmpxchg(&dq->dq_items_head, mediator, NULL); + _dispatch_debug("no work on global work queue"); + return NULL; + } + + if (slowpath(head == mediator)) { + // This thread lost the race for ownership of the queue. + // + // The ratio of work to libdispatch overhead must be bad. This + // scenario implies that there are too many threads in the pool. + // Create a new pending thread and then exit this thread. + // The kernel will grant a new thread when the load subsides. + _dispatch_debug("Contention on queue: %p", dq); + _dispatch_queue_wakeup_global(dq); +#if DISPATCH_PERF_MON + dispatch_atomic_inc(&_dispatch_bad_ratio); +#endif + return NULL; + } + + // Restore the head pointer to a sane value before returning. + // If 'next' is NULL, then this item _might_ be the last item. + next = fastpath(head->do_next); + + if (slowpath(!next)) { + dq->dq_items_head = NULL; + + if (dispatch_atomic_cmpxchg(&dq->dq_items_tail, head, NULL)) { + // both head and tail are NULL now + goto out; + } + + // There must be a next item now. This thread won't wait long. + while (!(next = head->do_next)) { + _dispatch_hardware_pause(); + } + } + + dq->dq_items_head = next; + _dispatch_queue_wakeup_global(dq); +out: + return head; +} + +dispatch_queue_t +dispatch_get_current_queue(void) +{ + return _dispatch_queue_get_current() ?: _dispatch_get_root_queue(0, true); +} + +#undef dispatch_get_main_queue +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +dispatch_queue_t dispatch_get_main_queue(void); + +dispatch_queue_t +dispatch_get_main_queue(void) +{ + return &_dispatch_main_q; +} +#define dispatch_get_main_queue() (&_dispatch_main_q) + +struct _dispatch_hw_config_s _dispatch_hw_config; + +static void +_dispatch_queue_set_width_init(void) +{ + size_t valsz = sizeof(uint32_t); + + errno = 0; + sysctlbyname("hw.activecpu", &_dispatch_hw_config.cc_max_active, &valsz, NULL, 0); + dispatch_assume_zero(errno); + dispatch_assume(valsz == sizeof(uint32_t)); + + errno = 0; + sysctlbyname("hw.logicalcpu_max", &_dispatch_hw_config.cc_max_logical, &valsz, NULL, 0); + dispatch_assume_zero(errno); + dispatch_assume(valsz == sizeof(uint32_t)); + + errno = 0; + sysctlbyname("hw.physicalcpu_max", &_dispatch_hw_config.cc_max_physical, &valsz, NULL, 0); + dispatch_assume_zero(errno); + dispatch_assume(valsz == sizeof(uint32_t)); +} + +void +dispatch_queue_set_width(dispatch_queue_t dq, long width) +{ + int w = (int)width; // intentional truncation + uint32_t tmp; + + if (slowpath(dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) { + return; + } + if (w == 1 || w == 0) { + dq->dq_width = 1; + return; + } + if (w > 0) { + tmp = w; + } else switch (w) { + case DISPATCH_QUEUE_WIDTH_MAX_PHYSICAL_CPUS: + tmp = _dispatch_hw_config.cc_max_physical; + break; + case DISPATCH_QUEUE_WIDTH_ACTIVE_CPUS: + tmp = _dispatch_hw_config.cc_max_active; + break; + default: + // fall through + case DISPATCH_QUEUE_WIDTH_MAX_LOGICAL_CPUS: + tmp = _dispatch_hw_config.cc_max_logical; + break; + } + // multiply by two since the running count is inc/dec by two (the low bit == barrier) + dq->dq_width = tmp * 2; + + // XXX if the queue has items and the width is increased, we should try to wake the queue +} + +// skip zero +// 1 - main_q +// 2 - mgr_q +// 3 - _unused_ +// 4,5,6,7,8,9 - global queues +// we use 'xadd' on Intel, so the initial value == next assigned +static unsigned long _dispatch_queue_serial_numbers = 10; + +// Note to later developers: ensure that any initialization changes are +// made for statically allocated queues (i.e. _dispatch_main_q). +inline void +_dispatch_queue_init(dispatch_queue_t dq) +{ + dq->do_vtable = &_dispatch_queue_vtable; + dq->do_next = DISPATCH_OBJECT_LISTLESS; + dq->do_ref_cnt = 1; + dq->do_xref_cnt = 1; + dq->do_targetq = _dispatch_get_root_queue(0, true); + dq->dq_running = 0; + dq->dq_width = 1; + dq->dq_serialnum = dispatch_atomic_inc(&_dispatch_queue_serial_numbers) - 1; +} + +dispatch_queue_t +dispatch_queue_create(const char *label, dispatch_queue_attr_t attr) +{ + dispatch_queue_t dq; + size_t label_len; + + if (!label) { + label = ""; + } + + label_len = strlen(label); + if (label_len < (DISPATCH_QUEUE_MIN_LABEL_SIZE - 1)) { + label_len = (DISPATCH_QUEUE_MIN_LABEL_SIZE - 1); + } + + // XXX switch to malloc() + dq = calloc(1ul, sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_MIN_LABEL_SIZE + label_len + 1); + if (slowpath(!dq)) { + return dq; + } + + _dispatch_queue_init(dq); + strcpy(dq->dq_label, label); + +#ifndef DISPATCH_NO_LEGACY + if (slowpath(attr)) { + dq->do_targetq = _dispatch_get_root_queue(attr->qa_priority, attr->qa_flags & DISPATCH_QUEUE_OVERCOMMIT); + dq->dq_finalizer_ctxt = attr->finalizer_ctxt; + dq->dq_finalizer_func = attr->finalizer_func; +#ifdef __BLOCKS__ + if (attr->finalizer_func == (void*)_dispatch_call_block_and_release2) { + // if finalizer_ctxt is a Block, retain it. + dq->dq_finalizer_ctxt = Block_copy(dq->dq_finalizer_ctxt); + if (!(dq->dq_finalizer_ctxt)) { + goto out_bad; + } + } +#endif + } +#endif + + return dq; + +out_bad: + free(dq); + return NULL; +} + +// 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol +void +_dispatch_queue_dispose(dispatch_queue_t dq) +{ + if (slowpath(dq == _dispatch_queue_get_current())) { + DISPATCH_CRASH("Release of a queue by itself"); + } + if (slowpath(dq->dq_items_tail)) { + DISPATCH_CRASH("Release of a queue while items are enqueued"); + } + +#ifndef DISPATCH_NO_LEGACY + if (dq->dq_finalizer_func) { + dq->dq_finalizer_func(dq->dq_finalizer_ctxt, dq); + } +#endif + + // trash the tail queue so that use after free will crash + dq->dq_items_tail = (void *)0x200; + + _dispatch_dispose(dq); +} + +DISPATCH_NOINLINE +static void +_dispatch_barrier_async_f_slow(dispatch_queue_t dq, void *context, dispatch_function_t func) +{ + dispatch_continuation_t dc = fastpath(_dispatch_continuation_alloc_from_heap()); + + dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_BARRIER_BIT); + dc->dc_func = func; + dc->dc_ctxt = context; + + _dispatch_queue_push(dq, dc); +} + +#ifdef __BLOCKS__ +void +dispatch_barrier_async(dispatch_queue_t dq, void (^work)(void)) +{ + dispatch_barrier_async_f(dq, _dispatch_Block_copy(work), _dispatch_call_block_and_release); +} +#endif + +DISPATCH_NOINLINE +void +dispatch_barrier_async_f(dispatch_queue_t dq, void *context, dispatch_function_t func) +{ + dispatch_continuation_t dc = fastpath(_dispatch_continuation_alloc_cacheonly()); + + if (!dc) { + return _dispatch_barrier_async_f_slow(dq, context, func); + } + + dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_BARRIER_BIT); + dc->dc_func = func; + dc->dc_ctxt = context; + + _dispatch_queue_push(dq, dc); +} + +DISPATCH_NOINLINE +static void +_dispatch_async_f_slow(dispatch_queue_t dq, void *context, dispatch_function_t func) +{ + dispatch_continuation_t dc = fastpath(_dispatch_continuation_alloc_from_heap()); + + dc->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT; + dc->dc_func = func; + dc->dc_ctxt = context; + + _dispatch_queue_push(dq, dc); +} + +#ifdef __BLOCKS__ +void +dispatch_async(dispatch_queue_t dq, void (^work)(void)) +{ + dispatch_async_f(dq, _dispatch_Block_copy(work), _dispatch_call_block_and_release); +} +#endif + +DISPATCH_NOINLINE +void +dispatch_async_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) +{ + dispatch_continuation_t dc = fastpath(_dispatch_continuation_alloc_cacheonly()); + + // unlike dispatch_sync_f(), we do NOT need to check the queue width, + // the "drain" function will do this test + + if (!dc) { + return _dispatch_async_f_slow(dq, ctxt, func); + } + + dc->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT; + dc->dc_func = func; + dc->dc_ctxt = ctxt; + + _dispatch_queue_push(dq, dc); +} + +struct dispatch_barrier_sync_slow2_s { + dispatch_function_t dbss2_func; + dispatch_function_t dbss2_ctxt; + dispatch_semaphore_t dbss2_sema; +}; + +static void +_dispatch_barrier_sync_f_slow_invoke(void *ctxt) +{ + struct dispatch_barrier_sync_slow2_s *dbss2 = ctxt; + + dbss2->dbss2_func(dbss2->dbss2_ctxt); + dispatch_semaphore_signal(dbss2->dbss2_sema); +} + +DISPATCH_NOINLINE +static void +_dispatch_barrier_sync_f_slow(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) +{ + struct dispatch_barrier_sync_slow2_s dbss2 = { + .dbss2_func = func, + .dbss2_ctxt = ctxt, + .dbss2_sema = _dispatch_get_thread_semaphore(), + }; + struct dispatch_barrier_sync_slow_s { + DISPATCH_CONTINUATION_HEADER(dispatch_barrier_sync_slow_s); + } dbss = { + .do_vtable = (void *)DISPATCH_OBJ_BARRIER_BIT, + .dc_func = _dispatch_barrier_sync_f_slow_invoke, + .dc_ctxt = &dbss2, + }; + + _dispatch_queue_push(dq, (void *)&dbss); + + while (dispatch_semaphore_wait(dbss2.dbss2_sema, dispatch_time(0, 3ull * NSEC_PER_SEC))) { + if (DISPATCH_OBJECT_SUSPENDED(dq)) { + continue; + } + if (_dispatch_queue_trylock(dq)) { + _dispatch_queue_drain(dq); + _dispatch_queue_unlock(dq); + } + } + _dispatch_put_thread_semaphore(dbss2.dbss2_sema); +} + +#ifdef __BLOCKS__ +void +dispatch_barrier_sync(dispatch_queue_t dq, void (^work)(void)) +{ + struct Block_basic *bb = (void *)work; + + dispatch_barrier_sync_f(dq, work, (dispatch_function_t)bb->Block_invoke); +} +#endif + +DISPATCH_NOINLINE +void +dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) +{ + dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key); + + // 1) ensure that this thread hasn't enqueued anything ahead of this call + // 2) the queue is not suspended + // 3) the queue is not weird + if (slowpath(dq->dq_items_tail) + || slowpath(DISPATCH_OBJECT_SUSPENDED(dq)) + || slowpath(!_dispatch_queue_trylock(dq))) { + return _dispatch_barrier_sync_f_slow(dq, ctxt, func); + } + + _dispatch_thread_setspecific(dispatch_queue_key, dq); + func(ctxt); + _dispatch_workitem_inc(); + _dispatch_thread_setspecific(dispatch_queue_key, old_dq); + _dispatch_queue_unlock(dq); +} + +static void +_dispatch_sync_f_slow2(void *ctxt) +{ + dispatch_queue_t dq = _dispatch_queue_get_current(); + dispatch_atomic_add(&dq->dq_running, 2); + dispatch_semaphore_signal(ctxt); +} + +DISPATCH_NOINLINE +static void +_dispatch_sync_f_slow(dispatch_queue_t dq) +{ + // the global root queues do not need strict ordering + if (dq->do_targetq == NULL) { + dispatch_atomic_add(&dq->dq_running, 2); + return; + } + + struct dispatch_sync_slow_s { + DISPATCH_CONTINUATION_HEADER(dispatch_sync_slow_s); + } dss = { + .do_vtable = NULL, + .dc_func = _dispatch_sync_f_slow2, + .dc_ctxt = _dispatch_get_thread_semaphore(), + }; + + // XXX FIXME -- concurrent queues can be come serial again + _dispatch_queue_push(dq, (void *)&dss); + + dispatch_semaphore_wait(dss.dc_ctxt, DISPATCH_TIME_FOREVER); + _dispatch_put_thread_semaphore(dss.dc_ctxt); +} + +#ifdef __BLOCKS__ +void +dispatch_sync(dispatch_queue_t dq, void (^work)(void)) +{ + struct Block_basic *bb = (void *)work; + dispatch_sync_f(dq, work, (dispatch_function_t)bb->Block_invoke); +} +#endif + +DISPATCH_NOINLINE +void +dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) +{ + typeof(dq->dq_running) prev_cnt; + dispatch_queue_t old_dq; + + if (dq->dq_width == 1) { + return dispatch_barrier_sync_f(dq, ctxt, func); + } + + // 1) ensure that this thread hasn't enqueued anything ahead of this call + // 2) the queue is not suspended + if (slowpath(dq->dq_items_tail) || slowpath(DISPATCH_OBJECT_SUSPENDED(dq))) { + _dispatch_sync_f_slow(dq); + } else { + prev_cnt = dispatch_atomic_add(&dq->dq_running, 2) - 2; + + if (slowpath(prev_cnt & 1)) { + if (dispatch_atomic_sub(&dq->dq_running, 2) == 0) { + _dispatch_wakeup(dq); + } + _dispatch_sync_f_slow(dq); + } + } + + old_dq = _dispatch_thread_getspecific(dispatch_queue_key); + _dispatch_thread_setspecific(dispatch_queue_key, dq); + func(ctxt); + _dispatch_workitem_inc(); + _dispatch_thread_setspecific(dispatch_queue_key, old_dq); + + if (slowpath(dispatch_atomic_sub(&dq->dq_running, 2) == 0)) { + _dispatch_wakeup(dq); + } +} + +const char * +dispatch_queue_get_label(dispatch_queue_t dq) +{ + return dq->dq_label; +} + +#if DISPATCH_COCOA_COMPAT +static void +_dispatch_main_q_port_init(void *ctxt __attribute__((unused))) +{ + kern_return_t kr; + + kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &main_q_port); + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + kr = mach_port_insert_right(mach_task_self(), main_q_port, main_q_port, MACH_MSG_TYPE_MAKE_SEND); + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + + _dispatch_program_is_probably_callback_driven = true; + _dispatch_safe_fork = false; +} + +// 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol +DISPATCH_NOINLINE +static void +_dispatch_queue_set_mainq_drain_state(bool arg) +{ + main_q_is_draining = arg; +} +#endif + +void +dispatch_main(void) +{ + if (pthread_main_np()) { + _dispatch_program_is_probably_callback_driven = true; + pthread_exit(NULL); + DISPATCH_CRASH("pthread_exit() returned"); + } + DISPATCH_CLIENT_CRASH("dispatch_main() must be called on the main thread"); +} + +static void +_dispatch_sigsuspend(void *ctxt __attribute__((unused))) +{ + static const sigset_t mask; + + for (;;) { + sigsuspend(&mask); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_queue_cleanup2(void) +{ + dispatch_atomic_dec(&_dispatch_main_q.dq_running); + + if (dispatch_atomic_sub(&_dispatch_main_q.do_suspend_cnt, DISPATCH_OBJECT_SUSPEND_LOCK) == 0) { + _dispatch_wakeup(&_dispatch_main_q); + } + + // overload the "probably" variable to mean that dispatch_main() or + // similar non-POSIX API was called + // this has to run before the DISPATCH_COCOA_COMPAT below + if (_dispatch_program_is_probably_callback_driven) { + dispatch_async_f(_dispatch_get_root_queue(0, 0), NULL, _dispatch_sigsuspend); + sleep(1); // workaround 6778970 + } + +#if DISPATCH_COCOA_COMPAT + dispatch_once_f(&_dispatch_main_q_port_pred, NULL, _dispatch_main_q_port_init); + + mach_port_t mp = main_q_port; + kern_return_t kr; + + main_q_port = 0; + + if (mp) { + kr = mach_port_deallocate(mach_task_self(), mp); + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + kr = mach_port_mod_refs(mach_task_self(), mp, MACH_PORT_RIGHT_RECEIVE, -1); + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + } +#endif +} + +dispatch_queue_t +dispatch_get_concurrent_queue(long pri) +{ + if (pri > 0) { + pri = DISPATCH_QUEUE_PRIORITY_HIGH; + } else if (pri < 0) { + pri = DISPATCH_QUEUE_PRIORITY_LOW; + } + return _dispatch_get_root_queue(pri, false); +} + +static void +_dispatch_queue_cleanup(void *ctxt) +{ + if (ctxt == &_dispatch_main_q) { + return _dispatch_queue_cleanup2(); + } + // POSIX defines that destructors are only called if 'ctxt' is non-null + DISPATCH_CRASH("Premature thread exit while a dispatch queue is running"); +} + +dispatch_queue_t +dispatch_get_global_queue(long priority, unsigned long flags) +{ + if (flags & ~DISPATCH_QUEUE_OVERCOMMIT) { + return NULL; + } + return _dispatch_get_root_queue(priority, flags & DISPATCH_QUEUE_OVERCOMMIT); +} + +#define countof(x) (sizeof(x) / sizeof(x[0])) +void +libdispatch_init(void) +{ + dispatch_assert(DISPATCH_QUEUE_PRIORITY_COUNT == 3); + dispatch_assert(DISPATCH_ROOT_QUEUE_COUNT == 6); + + dispatch_assert(DISPATCH_QUEUE_PRIORITY_LOW == -DISPATCH_QUEUE_PRIORITY_HIGH); + dispatch_assert(countof(_dispatch_root_queues) == DISPATCH_ROOT_QUEUE_COUNT); + dispatch_assert(countof(_dispatch_thread_mediator) == DISPATCH_ROOT_QUEUE_COUNT); + dispatch_assert(countof(_dispatch_root_queue_contexts) == DISPATCH_ROOT_QUEUE_COUNT); + + _dispatch_thread_key_init_np(dispatch_queue_key, _dispatch_queue_cleanup); + _dispatch_thread_key_init_np(dispatch_sema4_key, (void (*)(void *))dispatch_release); // use the extern release + _dispatch_thread_key_init_np(dispatch_cache_key, _dispatch_cache_cleanup2); +#if DISPATCH_PERF_MON + _dispatch_thread_key_init_np(dispatch_bcounter_key, NULL); +#endif + + _dispatch_thread_setspecific(dispatch_queue_key, &_dispatch_main_q); + + _dispatch_queue_set_width_init(); +} + +void +_dispatch_queue_unlock(dispatch_queue_t dq) +{ + if (slowpath(dispatch_atomic_dec(&dq->dq_running))) { + return; + } + + _dispatch_wakeup(dq); +} + +// 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol +dispatch_queue_t +_dispatch_wakeup(dispatch_object_t dou) +{ + dispatch_queue_t tq; + + if (slowpath(DISPATCH_OBJECT_SUSPENDED(dou._do))) { + return NULL; + } + if (!dx_probe(dou._do) && !dou._dq->dq_items_tail) { + return NULL; + } + + if (!_dispatch_trylock(dou._do)) { +#if DISPATCH_COCOA_COMPAT + if (dou._dq == &_dispatch_main_q) { + _dispatch_queue_wakeup_main(); + } +#endif + return NULL; + } + _dispatch_retain(dou._do); + tq = dou._do->do_targetq; + _dispatch_queue_push(tq, dou._do); + return tq; // libdispatch doesn't need this, but the Instrument DTrace probe does +} + +#if DISPATCH_COCOA_COMPAT +DISPATCH_NOINLINE +void +_dispatch_queue_wakeup_main(void) +{ + kern_return_t kr; + + dispatch_once_f(&_dispatch_main_q_port_pred, NULL, _dispatch_main_q_port_init); + + kr = _dispatch_send_wakeup_main_thread(main_q_port, 0); + + switch (kr) { + case MACH_SEND_TIMEOUT: + case MACH_SEND_TIMED_OUT: + case MACH_SEND_INVALID_DEST: + break; + default: + dispatch_assume_zero(kr); + break; + } + + _dispatch_safe_fork = false; +} +#endif + +static inline int +_dispatch_rootq2wq_pri(long idx) +{ +#ifdef WORKQ_DEFAULT_PRIOQUEUE + switch (idx) { + case 0: + case 1: + return WORKQ_LOW_PRIOQUEUE; + case 2: + case 3: + default: + return WORKQ_DEFAULT_PRIOQUEUE; + case 4: + case 5: + return WORKQ_HIGH_PRIOQUEUE; + } +#else + return pri; +#endif +} + +static void +_dispatch_root_queues_init(void *context __attribute__((unused))) +{ + bool disable_wq = getenv("LIBDISPATCH_DISABLE_KWQ"); + pthread_workqueue_attr_t pwq_attr; + kern_return_t kr; + int i, r; + + r = pthread_workqueue_attr_init_np(&pwq_attr); + dispatch_assume_zero(r); + + for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { + r = pthread_workqueue_attr_setqueuepriority_np(&pwq_attr, _dispatch_rootq2wq_pri(i)); + dispatch_assume_zero(r); + r = pthread_workqueue_attr_setovercommit_np(&pwq_attr, i & 1); + dispatch_assume_zero(r); +// some software hangs if the non-overcommitting queues do not overcommit when threads block +#if 0 + if (!(i & 1)) { + dispatch_root_queue_contexts[i].dgq_thread_pool_size = _dispatch_hw_config.cc_max_active; + } +#endif + + r = 0; + if (disable_wq || (r = pthread_workqueue_create_np(&_dispatch_root_queue_contexts[i].dgq_kworkqueue, &pwq_attr))) { + if (r != ENOTSUP) { + dispatch_assume_zero(r); + } + // override the default FIFO behavior for the pool semaphores + kr = semaphore_create(mach_task_self(), &_dispatch_thread_mediator[i].dsema_port, SYNC_POLICY_LIFO, 0); + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + dispatch_assume(_dispatch_thread_mediator[i].dsema_port); + } else { + dispatch_assume(_dispatch_root_queue_contexts[i].dgq_kworkqueue); + } + } + + r = pthread_workqueue_attr_destroy_np(&pwq_attr); + dispatch_assume_zero(r); +} + +bool +_dispatch_queue_wakeup_global(dispatch_queue_t dq) +{ + static dispatch_once_t pred; + struct dispatch_root_queue_context_s *qc = dq->do_ctxt; + pthread_workitem_handle_t wh; + unsigned int gen_cnt; + pthread_t pthr; + int r, t_count; + + if (!dq->dq_items_tail) { + return false; + } + + _dispatch_safe_fork = false; + + dispatch_debug_queue(dq, __PRETTY_FUNCTION__); + + dispatch_once_f(&pred, NULL, _dispatch_root_queues_init); + + if (qc->dgq_kworkqueue) { + if (dispatch_atomic_cmpxchg(&qc->dgq_pending, 0, 1)) { + _dispatch_debug("requesting new worker thread"); + + r = pthread_workqueue_additem_np(qc->dgq_kworkqueue, _dispatch_worker_thread2, dq, &wh, &gen_cnt); + dispatch_assume_zero(r); + } else { + _dispatch_debug("work thread request still pending on global queue: %p", dq); + } + goto out; + } + + if (dispatch_semaphore_signal(qc->dgq_thread_mediator)) { + goto out; + } + + do { + t_count = qc->dgq_thread_pool_size; + if (!t_count) { + _dispatch_debug("The thread pool is full: %p", dq); + goto out; + } + } while (!dispatch_atomic_cmpxchg(&qc->dgq_thread_pool_size, t_count, t_count - 1)); + + while ((r = pthread_create(&pthr, NULL, _dispatch_worker_thread, dq))) { + if (r != EAGAIN) { + dispatch_assume_zero(r); + } + sleep(1); + } + r = pthread_detach(pthr); + dispatch_assume_zero(r); + +out: + return false; +} + +void +_dispatch_queue_serial_drain_till_empty(dispatch_queue_t dq) +{ +#if DISPATCH_PERF_MON + uint64_t start = mach_absolute_time(); +#endif + _dispatch_queue_drain(dq); +#if DISPATCH_PERF_MON + _dispatch_queue_merge_stats(start); +#endif + _dispatch_force_cache_cleanup(); +} + +// 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol +DISPATCH_NOINLINE +void +_dispatch_queue_invoke(dispatch_queue_t dq) +{ + dispatch_queue_t tq = dq->do_targetq; + + if (!slowpath(DISPATCH_OBJECT_SUSPENDED(dq)) && fastpath(_dispatch_queue_trylock(dq))) { + _dispatch_queue_drain(dq); + if (tq == dq->do_targetq) { + tq = dx_invoke(dq); + } else { + tq = dq->do_targetq; + } + // We do not need to check the result. + // When the suspend-count lock is dropped, then the check will happen. + dispatch_atomic_dec(&dq->dq_running); + if (tq) { + return _dispatch_queue_push(tq, dq); + } + } + + dq->do_next = DISPATCH_OBJECT_LISTLESS; + if (dispatch_atomic_sub(&dq->do_suspend_cnt, DISPATCH_OBJECT_SUSPEND_LOCK) == 0) { + if (dq->dq_running == 0) { + _dispatch_wakeup(dq); // verify that the queue is idle + } + } + _dispatch_release(dq); // added when the queue is put on the list +} + +// 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol +static void +_dispatch_set_target_queue2(void *ctxt) +{ + dispatch_queue_t prev_dq, dq = _dispatch_queue_get_current(); + + prev_dq = dq->do_targetq; + dq->do_targetq = ctxt; + _dispatch_release(prev_dq); +} + +void +dispatch_set_target_queue(dispatch_object_t dou, dispatch_queue_t dq) +{ + if (slowpath(dou._do->do_xref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) { + return; + } + // NOTE: we test for NULL target queues internally to detect root queues + // therefore, if the retain crashes due to a bad input, that is OK + _dispatch_retain(dq); + dispatch_barrier_async_f(dou._dq, dq, _dispatch_set_target_queue2); +} + +static void +_dispatch_async_f_redirect2(void *_ctxt) +{ + struct dispatch_continuation_s *dc = _ctxt; + struct dispatch_continuation_s *other_dc = dc->dc_data[1]; + dispatch_queue_t old_dq, dq = dc->dc_data[0]; + + old_dq = _dispatch_thread_getspecific(dispatch_queue_key); + _dispatch_thread_setspecific(dispatch_queue_key, dq); + _dispatch_continuation_pop(other_dc); + _dispatch_thread_setspecific(dispatch_queue_key, old_dq); + + if (dispatch_atomic_sub(&dq->dq_running, 2) == 0) { + _dispatch_wakeup(dq); + } + _dispatch_release(dq); +} + +static void +_dispatch_async_f_redirect(dispatch_queue_t dq, struct dispatch_object_s *other_dc) +{ + dispatch_continuation_t dc = (void *)other_dc; + dispatch_queue_t root_dq = dq; + + if (dc->dc_func == _dispatch_sync_f_slow2) { + return dc->dc_func(dc->dc_ctxt); + } + + dispatch_atomic_add(&dq->dq_running, 2); + _dispatch_retain(dq); + + dc = _dispatch_continuation_alloc_cacheonly() ?: _dispatch_continuation_alloc_from_heap(); + + dc->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT; + dc->dc_func = _dispatch_async_f_redirect2; + dc->dc_ctxt = dc; + dc->dc_data[0] = dq; + dc->dc_data[1] = other_dc; + + do { + root_dq = root_dq->do_targetq; + } while (root_dq->do_targetq); + + _dispatch_queue_push(root_dq, dc); +} + + +void +_dispatch_queue_drain(dispatch_queue_t dq) +{ + dispatch_queue_t orig_tq, old_dq = _dispatch_thread_getspecific(dispatch_queue_key); + struct dispatch_object_s *dc = NULL, *next_dc = NULL; + + orig_tq = dq->do_targetq; + + _dispatch_thread_setspecific(dispatch_queue_key, dq); + + while (dq->dq_items_tail) { + while (!fastpath(dq->dq_items_head)) { + _dispatch_hardware_pause(); + } + + dc = dq->dq_items_head; + dq->dq_items_head = NULL; + + do { + // Enqueue is TIGHTLY controlled, we won't wait long. + do { + next_dc = fastpath(dc->do_next); + } while (!next_dc && !dispatch_atomic_cmpxchg(&dq->dq_items_tail, dc, NULL)); + if (DISPATCH_OBJECT_SUSPENDED(dq)) { + goto out; + } + if (dq->dq_running > dq->dq_width) { + goto out; + } + if (orig_tq != dq->do_targetq) { + goto out; + } + if (fastpath(dq->dq_width == 1)) { + _dispatch_continuation_pop(dc); + _dispatch_workitem_inc(); + } else if ((long)dc->do_vtable & DISPATCH_OBJ_BARRIER_BIT) { + if (dq->dq_running > 1) { + goto out; + } + _dispatch_continuation_pop(dc); + _dispatch_workitem_inc(); + } else { + _dispatch_async_f_redirect(dq, dc); + } + } while ((dc = next_dc)); + } + +out: + // if this is not a complete drain, we must undo some things + if (slowpath(dc)) { + // 'dc' must NOT be "popped" + // 'dc' might be the last item + if (next_dc || dispatch_atomic_cmpxchg(&dq->dq_items_tail, NULL, dc)) { + dq->dq_items_head = dc; + } else { + while (!(next_dc = dq->dq_items_head)) { + _dispatch_hardware_pause(); + } + dq->dq_items_head = dc; + dc->do_next = next_dc; + } + } + + _dispatch_thread_setspecific(dispatch_queue_key, old_dq); +} + +// 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol +void * +_dispatch_worker_thread(void *context) +{ + dispatch_queue_t dq = context; + struct dispatch_root_queue_context_s *qc = dq->do_ctxt; + sigset_t mask; + int r; + + // workaround tweaks the kernel workqueue does for us + r = sigfillset(&mask); + dispatch_assume_zero(r); + r = _dispatch_pthread_sigmask(SIG_BLOCK, &mask, NULL); + dispatch_assume_zero(r); + + do { + _dispatch_worker_thread2(context); + // we use 65 seconds in case there are any timers that run once a minute + } while (dispatch_semaphore_wait(qc->dgq_thread_mediator, dispatch_time(0, 65ull * NSEC_PER_SEC)) == 0); + + dispatch_atomic_inc(&qc->dgq_thread_pool_size); + if (dq->dq_items_tail) { + _dispatch_queue_wakeup_global(dq); + } + + return NULL; +} + +// 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol +void +_dispatch_worker_thread2(void *context) +{ + struct dispatch_object_s *item; + dispatch_queue_t dq = context; + struct dispatch_root_queue_context_s *qc = dq->do_ctxt; + + if (_dispatch_thread_getspecific(dispatch_queue_key)) { + DISPATCH_CRASH("Premature thread recycling"); + } + + _dispatch_thread_setspecific(dispatch_queue_key, dq); + qc->dgq_pending = 0; + +#if DISPATCH_COCOA_COMPAT + // ensure that high-level memory management techniques do not leak/crash + dispatch_begin_thread_4GC(); + void *pool = _dispatch_begin_NSAutoReleasePool(); +#endif + +#if DISPATCH_PERF_MON + uint64_t start = mach_absolute_time(); +#endif + while ((item = fastpath(_dispatch_queue_concurrent_drain_one(dq)))) { + _dispatch_continuation_pop(item); + } +#if DISPATCH_PERF_MON + _dispatch_queue_merge_stats(start); +#endif + +#if DISPATCH_COCOA_COMPAT + _dispatch_end_NSAutoReleasePool(pool); + dispatch_end_thread_4GC(); +#endif + + _dispatch_thread_setspecific(dispatch_queue_key, NULL); + + _dispatch_force_cache_cleanup(); +} + +#if DISPATCH_PERF_MON +void +_dispatch_queue_merge_stats(uint64_t start) +{ + uint64_t avg, delta = mach_absolute_time() - start; + unsigned long count, bucket; + + count = (size_t)_dispatch_thread_getspecific(dispatch_bcounter_key); + _dispatch_thread_setspecific(dispatch_bcounter_key, NULL); + + if (count) { + avg = delta / count; + bucket = flsll(avg); + } else { + bucket = 0; + } + + // 64-bit counters on 32-bit require a lock or a queue + OSSpinLockLock(&_dispatch_stats_lock); + + _dispatch_stats[bucket].time_total += delta; + _dispatch_stats[bucket].count_total += count; + _dispatch_stats[bucket].thread_total++; + + OSSpinLockUnlock(&_dispatch_stats_lock); +} +#endif + +size_t +dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf, size_t bufsiz) +{ + return snprintf(buf, bufsiz, "parent = %p ", dq->do_targetq); +} + +size_t +dispatch_queue_debug(dispatch_queue_t dq, char* buf, size_t bufsiz) +{ + size_t offset = 0; + offset += snprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", dq->dq_label, dq); + offset += dispatch_object_debug_attr(dq, &buf[offset], bufsiz - offset); + offset += dispatch_queue_debug_attr(dq, &buf[offset], bufsiz - offset); + offset += snprintf(&buf[offset], bufsiz - offset, "}"); + return offset; +} + +#if DISPATCH_DEBUG +void +dispatch_debug_queue(dispatch_queue_t dq, const char* str) { + if (fastpath(dq)) { + dispatch_debug(dq, "%s", str); + } else { + _dispatch_log("queue[NULL]: %s", str); + } +} +#endif + +#if DISPATCH_COCOA_COMPAT +void +_dispatch_main_queue_callback_4CF(mach_msg_header_t *msg __attribute__((unused))) +{ + if (main_q_is_draining) { + return; + } + _dispatch_queue_set_mainq_drain_state(true); + _dispatch_queue_serial_drain_till_empty(&_dispatch_main_q); + _dispatch_queue_set_mainq_drain_state(false); +} + +mach_port_t +_dispatch_get_main_queue_port_4CF(void) +{ + dispatch_once_f(&_dispatch_main_q_port_pred, NULL, _dispatch_main_q_port_init); + return main_q_port; +} +#endif + +static void +dispatch_queue_attr_dispose(dispatch_queue_attr_t attr) +{ + dispatch_queue_attr_set_finalizer(attr, NULL); + _dispatch_dispose(attr); +} + +static const struct dispatch_queue_attr_vtable_s dispatch_queue_attr_vtable = { + .do_type = DISPATCH_QUEUE_ATTR_TYPE, + .do_kind = "queue-attr", + .do_dispose = dispatch_queue_attr_dispose, +}; + +dispatch_queue_attr_t +dispatch_queue_attr_create(void) +{ + dispatch_queue_attr_t a = calloc(1, sizeof(struct dispatch_queue_attr_s)); + + if (a) { + a->do_vtable = &dispatch_queue_attr_vtable; + a->do_next = DISPATCH_OBJECT_LISTLESS; + a->do_ref_cnt = 1; + a->do_xref_cnt = 1; + a->do_targetq = _dispatch_get_root_queue(0, 0); + a->qa_flags = DISPATCH_QUEUE_OVERCOMMIT; + } + return a; +} + +void +dispatch_queue_attr_set_flags(dispatch_queue_attr_t attr, uint64_t flags) +{ + dispatch_assert_zero(flags & ~DISPATCH_QUEUE_FLAGS_MASK); + attr->qa_flags = (unsigned long)flags & DISPATCH_QUEUE_FLAGS_MASK; +} + +void +dispatch_queue_attr_set_priority(dispatch_queue_attr_t attr, int priority) +{ + dispatch_debug_assert(attr, "NULL pointer"); + dispatch_debug_assert(priority <= DISPATCH_QUEUE_PRIORITY_HIGH && priority >= DISPATCH_QUEUE_PRIORITY_LOW, "Invalid priority"); + + if (priority > 0) { + priority = DISPATCH_QUEUE_PRIORITY_HIGH; + } else if (priority < 0) { + priority = DISPATCH_QUEUE_PRIORITY_LOW; + } + + attr->qa_priority = priority; +} + +void +dispatch_queue_attr_set_finalizer_f(dispatch_queue_attr_t attr, + void *context, dispatch_queue_finalizer_function_t finalizer) +{ +#ifdef __BLOCKS__ + if (attr->finalizer_func == (void*)_dispatch_call_block_and_release2) { + Block_release(attr->finalizer_ctxt); + } +#endif + attr->finalizer_ctxt = context; + attr->finalizer_func = finalizer; +} + +#ifdef __BLOCKS__ +long +dispatch_queue_attr_set_finalizer(dispatch_queue_attr_t attr, + dispatch_queue_finalizer_t finalizer) +{ + void *ctxt; + dispatch_queue_finalizer_function_t func; + + if (finalizer) { + if (!(ctxt = Block_copy(finalizer))) { + return 1; + } + func = (void *)_dispatch_call_block_and_release2; + } else { + ctxt = NULL; + func = NULL; + } + + dispatch_queue_attr_set_finalizer_f(attr, ctxt, func); + + return 0; +} +#endif + +static void +_dispatch_ccache_init(void *context __attribute__((unused))) +{ + _dispatch_ccache_zone = malloc_create_zone(0, 0); + dispatch_assert(_dispatch_ccache_zone); + malloc_set_zone_name(_dispatch_ccache_zone, "DispatchContinuations"); +} + +dispatch_continuation_t +_dispatch_continuation_alloc_from_heap(void) +{ + static dispatch_once_t pred; + dispatch_continuation_t dc; + + dispatch_once_f(&pred, NULL, _dispatch_ccache_init); + + while (!(dc = fastpath(malloc_zone_calloc(_dispatch_ccache_zone, 1, ROUND_UP_TO_CACHELINE_SIZE(sizeof(*dc)))))) { + sleep(1); + } + + return dc; +} + +void +_dispatch_force_cache_cleanup(void) +{ + dispatch_continuation_t dc = _dispatch_thread_getspecific(dispatch_cache_key); + if (dc) { + _dispatch_thread_setspecific(dispatch_cache_key, NULL); + _dispatch_cache_cleanup2(dc); + } +} + +DISPATCH_NOINLINE +void +_dispatch_cache_cleanup2(void *value) +{ + dispatch_continuation_t dc, next_dc = value; + + while ((dc = next_dc)) { + next_dc = dc->do_next; + malloc_zone_free(_dispatch_ccache_zone, dc); + } +} + +static char _dispatch_build[16]; + +static void +_dispatch_bug_init(void *context __attribute__((unused))) +{ + int mib[] = { CTL_KERN, KERN_OSVERSION }; + size_t bufsz = sizeof(_dispatch_build); + + sysctl(mib, 2, _dispatch_build, &bufsz, NULL, 0); +} + +void +_dispatch_bug(size_t line, long val) +{ + static dispatch_once_t pred; + static void *last_seen; + void *ra = __builtin_return_address(0); + + dispatch_once_f(&pred, NULL, _dispatch_bug_init); + if (last_seen != ra) { + last_seen = ra; + _dispatch_log("BUG in libdispatch: %s - %lu - 0x%lx", _dispatch_build, line, val); + } +} + +void +_dispatch_abort(size_t line, long val) +{ + _dispatch_bug(line, val); + abort(); +} + +void +_dispatch_log(const char *msg, ...) +{ + va_list ap; + + va_start(ap, msg); + + _dispatch_logv(msg, ap); + + va_end(ap); +} + +void +_dispatch_logv(const char *msg, va_list ap) +{ +#if DISPATCH_DEBUG + static FILE *logfile, *tmp; + char newbuf[strlen(msg) + 2]; + char path[PATH_MAX]; + + sprintf(newbuf, "%s\n", msg); + + if (!logfile) { + snprintf(path, sizeof(path), "/var/tmp/libdispatch.%d.log", getpid()); + tmp = fopen(path, "a"); + assert(tmp); + if (!dispatch_atomic_cmpxchg(&logfile, NULL, tmp)) { + fclose(tmp); + } else { + struct timeval tv; + gettimeofday(&tv, NULL); + fprintf(logfile, "=== log file opened for %s[%u] at %ld.%06u ===\n", + getprogname() ?: "", getpid(), tv.tv_sec, tv.tv_usec); + } + } + vfprintf(logfile, newbuf, ap); + fflush(logfile); +#else + vsyslog(LOG_NOTICE, msg, ap); +#endif +} + +int +_dispatch_pthread_sigmask(int how, sigset_t *set, sigset_t *oset) +{ + int r; + + /* Workaround: 6269619 Not all signals can be delivered on any thread */ + + r = sigdelset(set, SIGILL); + dispatch_assume_zero(r); + r = sigdelset(set, SIGTRAP); + dispatch_assume_zero(r); + r = sigdelset(set, SIGEMT); + dispatch_assume_zero(r); + r = sigdelset(set, SIGFPE); + dispatch_assume_zero(r); + r = sigdelset(set, SIGBUS); + dispatch_assume_zero(r); + r = sigdelset(set, SIGSEGV); + dispatch_assume_zero(r); + r = sigdelset(set, SIGSYS); + dispatch_assume_zero(r); + r = sigdelset(set, SIGPIPE); + dispatch_assume_zero(r); + + return pthread_sigmask(how, set, oset); +} + +bool _dispatch_safe_fork = true; + +void +dispatch_atfork_prepare(void) +{ +} + +void +dispatch_atfork_parent(void) +{ +} + +void +dispatch_atfork_child(void) +{ + void *crash = (void *)0x100; + size_t i; + + if (_dispatch_safe_fork) { + return; + } + + _dispatch_main_q.dq_items_head = crash; + _dispatch_main_q.dq_items_tail = crash; + + _dispatch_mgr_q.dq_items_head = crash; + _dispatch_mgr_q.dq_items_tail = crash; + + for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { + _dispatch_root_queues[i].dq_items_head = crash; + _dispatch_root_queues[i].dq_items_tail = crash; + } +} + +void +dispatch_init_pthread(pthread_t pthr __attribute__((unused))) +{ +} + +static int _dispatch_kq; + +static void +_dispatch_get_kq_init(void *context __attribute__((unused))) +{ + static const struct kevent kev = { + .ident = 1, + .filter = EVFILT_USER, + .flags = EV_ADD|EV_CLEAR, + }; + + _dispatch_kq = kqueue(); + _dispatch_safe_fork = false; + // in case we fall back to select() + FD_SET(_dispatch_kq, &_dispatch_rfds); + + if (_dispatch_kq == -1) { + dispatch_assert_zero(errno); + } + + dispatch_assume_zero(kevent(_dispatch_kq, &kev, 1, NULL, 0, NULL)); + + _dispatch_queue_push(_dispatch_mgr_q.do_targetq, &_dispatch_mgr_q); +} + +static int +_dispatch_get_kq(void) +{ + static dispatch_once_t pred; + + dispatch_once_f(&pred, NULL, _dispatch_get_kq_init); + + return _dispatch_kq; +} + +static void +_dispatch_mgr_thread2(struct kevent *kev, size_t cnt) +{ + size_t i; + + for (i = 0; i < cnt; i++) { + // EVFILT_USER isn't used by sources + if (kev[i].filter == EVFILT_USER) { + // If _dispatch_mgr_thread2() ever is changed to return to the + // caller, then this should become _dispatch_queue_drain() + _dispatch_queue_serial_drain_till_empty(&_dispatch_mgr_q); + } else { + _dispatch_source_drain_kevent(&kev[i]); + } + } +} + +static dispatch_queue_t +_dispatch_mgr_invoke(dispatch_queue_t dq) +{ + static const struct timespec timeout_immediately = { 0, 0 }; + struct timespec timeout; + const struct timespec *timeoutp; + struct timeval sel_timeout, *sel_timeoutp; + fd_set tmp_rfds, tmp_wfds; + struct kevent kev[1]; + int k_cnt, k_err, i, r; + + _dispatch_thread_setspecific(dispatch_queue_key, dq); + + for (;;) { + _dispatch_run_timers(); + + timeoutp = _dispatch_get_next_timer_fire(&timeout); + + if (_dispatch_select_workaround) { + FD_COPY(&_dispatch_rfds, &tmp_rfds); + FD_COPY(&_dispatch_wfds, &tmp_wfds); + if (timeoutp) { + sel_timeout.tv_sec = timeoutp->tv_sec; + sel_timeout.tv_usec = (typeof(sel_timeout.tv_usec))(timeoutp->tv_nsec / 1000u); + sel_timeoutp = &sel_timeout; + } else { + sel_timeoutp = NULL; + } + + r = select(FD_SETSIZE, &tmp_rfds, &tmp_wfds, NULL, sel_timeoutp); + if (r == -1) { + if (errno != EBADF) { + dispatch_assume_zero(errno); + continue; + } + for (i = 0; i < FD_SETSIZE; i++) { + if (i == _dispatch_kq) { + continue; + } + if (!FD_ISSET(i, &_dispatch_rfds) && !FD_ISSET(i, &_dispatch_wfds)) { + continue; + } + r = dup(i); + if (r != -1) { + close(r); + } else { + FD_CLR(i, &_dispatch_rfds); + FD_CLR(i, &_dispatch_wfds); + _dispatch_rfd_ptrs[i] = 0; + _dispatch_wfd_ptrs[i] = 0; + } + } + continue; + } + + if (r > 0) { + for (i = 0; i < FD_SETSIZE; i++) { + if (i == _dispatch_kq) { + continue; + } + if (FD_ISSET(i, &tmp_rfds)) { + FD_CLR(i, &_dispatch_rfds); // emulate EV_DISABLE + EV_SET(&kev[0], i, EVFILT_READ, EV_ADD|EV_ENABLE|EV_DISPATCH, 0, 1, _dispatch_rfd_ptrs[i]); + _dispatch_rfd_ptrs[i] = 0; + _dispatch_mgr_thread2(kev, 1); + } + if (FD_ISSET(i, &tmp_wfds)) { + FD_CLR(i, &_dispatch_wfds); // emulate EV_DISABLE + EV_SET(&kev[0], i, EVFILT_WRITE, EV_ADD|EV_ENABLE|EV_DISPATCH, 0, 1, _dispatch_wfd_ptrs[i]); + _dispatch_wfd_ptrs[i] = 0; + _dispatch_mgr_thread2(kev, 1); + } + } + } + + timeoutp = &timeout_immediately; + } + + k_cnt = kevent(_dispatch_kq, NULL, 0, kev, sizeof(kev) / sizeof(kev[0]), timeoutp); + k_err = errno; + + switch (k_cnt) { + case -1: + if (k_err == EBADF) { + DISPATCH_CLIENT_CRASH("Do not close random Unix descriptors"); + } + dispatch_assume_zero(k_err); + continue; + default: + _dispatch_mgr_thread2(kev, (size_t)k_cnt); + // fall through + case 0: + _dispatch_force_cache_cleanup(); + continue; + } + } + + return NULL; +} + +static bool +_dispatch_mgr_wakeup(dispatch_queue_t dq) +{ + static const struct kevent kev = { + .ident = 1, + .filter = EVFILT_USER, +#ifdef EV_TRIGGER + .flags = EV_TRIGGER, +#endif +#ifdef NOTE_TRIGGER + .fflags = NOTE_TRIGGER, +#endif + }; + + _dispatch_debug("waking up the _dispatch_mgr_q: %p", dq); + + _dispatch_update_kq(&kev); + + return false; +} + +void +_dispatch_update_kq(const struct kevent *kev) +{ + struct kevent kev_copy = *kev; + kev_copy.flags |= EV_RECEIPT; + + if (kev_copy.flags & EV_DELETE) { + switch (kev_copy.filter) { + case EVFILT_READ: + if (FD_ISSET((int)kev_copy.ident, &_dispatch_rfds)) { + FD_CLR((int)kev_copy.ident, &_dispatch_rfds); + _dispatch_rfd_ptrs[kev_copy.ident] = 0; + return; + } + case EVFILT_WRITE: + if (FD_ISSET((int)kev_copy.ident, &_dispatch_wfds)) { + FD_CLR((int)kev_copy.ident, &_dispatch_wfds); + _dispatch_wfd_ptrs[kev_copy.ident] = 0; + return; + } + default: + break; + } + } + + int rval = kevent(_dispatch_get_kq(), &kev_copy, 1, &kev_copy, 1, NULL); + if (rval == -1) { + // If we fail to register with kevents, for other reasons aside from + // changelist elements. + dispatch_assume_zero(errno); + //kev_copy.flags |= EV_ERROR; + //kev_copy.data = error; + return; + } + + // The following select workaround only applies to adding kevents + if (!(kev->flags & EV_ADD)) { + return; + } + + switch (kev_copy.data) { + case 0: + return; + case EBADF: + break; + default: + // If an error occurred while registering with kevent, and it was + // because of a kevent changelist processing && the kevent involved + // either doing a read or write, it would indicate we were trying + // to register a /dev/* port; fall back to select + switch (kev_copy.filter) { + case EVFILT_READ: + _dispatch_select_workaround = true; + FD_SET((int)kev_copy.ident, &_dispatch_rfds); + _dispatch_rfd_ptrs[kev_copy.ident] = kev_copy.udata; + break; + case EVFILT_WRITE: + _dispatch_select_workaround = true; + FD_SET((int)kev_copy.ident, &_dispatch_wfds); + _dispatch_wfd_ptrs[kev_copy.ident] = kev_copy.udata; + break; + default: + _dispatch_source_drain_kevent(&kev_copy); + break; + } + break; + } +} + +static const struct dispatch_queue_vtable_s _dispatch_queue_mgr_vtable = { + .do_type = DISPATCH_QUEUE_MGR_TYPE, + .do_kind = "mgr-queue", + .do_invoke = _dispatch_mgr_invoke, + .do_debug = dispatch_queue_debug, + .do_probe = _dispatch_mgr_wakeup, +}; + +// 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol +struct dispatch_queue_s _dispatch_mgr_q = { + .do_vtable = &_dispatch_queue_mgr_vtable, + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, + .do_targetq = &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_COUNT - 1], + + .dq_label = "com.apple.libdispatch-manager", + .dq_width = 1, + .dq_serialnum = 2, +}; + +const struct dispatch_queue_offsets_s dispatch_queue_offsets = { + .dqo_version = 3, + .dqo_label = offsetof(struct dispatch_queue_s, dq_label), + .dqo_label_size = sizeof(_dispatch_main_q.dq_label), + .dqo_flags = 0, + .dqo_flags_size = 0, + .dqo_width = offsetof(struct dispatch_queue_s, dq_width), + .dqo_width_size = sizeof(_dispatch_main_q.dq_width), + .dqo_serialnum = offsetof(struct dispatch_queue_s, dq_serialnum), + .dqo_serialnum_size = sizeof(_dispatch_main_q.dq_serialnum), + .dqo_running = offsetof(struct dispatch_queue_s, dq_running), + .dqo_running_size = sizeof(_dispatch_main_q.dq_running), +}; + +#ifdef __BLOCKS__ +void +dispatch_after(dispatch_time_t when, dispatch_queue_t queue, dispatch_block_t work) +{ + // test before the copy of the block + if (when == DISPATCH_TIME_FOREVER) { +#if DISPATCH_DEBUG + DISPATCH_CLIENT_CRASH("dispatch_after() called with 'when' == infinity"); +#endif + return; + } + dispatch_after_f(when, queue, _dispatch_Block_copy(work), _dispatch_call_block_and_release); +} +#endif + +DISPATCH_NOINLINE +void +dispatch_after_f(dispatch_time_t when, dispatch_queue_t queue, void *ctxt, void (*func)(void *)) +{ + uint64_t delta; + if (when == DISPATCH_TIME_FOREVER) { +#if DISPATCH_DEBUG + DISPATCH_CLIENT_CRASH("dispatch_after_f() called with 'when' == infinity"); +#endif + return; + } + + // this function can and should be optimized to not use a dispatch source +again: + delta = _dispatch_timeout(when); + if (delta == 0) { + return dispatch_async_f(queue, ctxt, func); + } + if (!dispatch_source_timer_create(DISPATCH_TIMER_INTERVAL, delta, 0, NULL, queue, ^(dispatch_source_t ds) { + long err_dom, err_val; + if ((err_dom = dispatch_source_get_error(ds, &err_val))) { + dispatch_assert(err_dom == DISPATCH_ERROR_DOMAIN_POSIX); + dispatch_assert(err_val == ECANCELED); + func(ctxt); + dispatch_release(ds); // MUST NOT be _dispatch_release() + } else { + dispatch_source_cancel(ds); + } + })) { + goto again; + } +} diff --git a/src/queue.h b/src/queue.h new file mode 100644 index 000000000..6b55696bb --- /dev/null +++ b/src/queue.h @@ -0,0 +1,568 @@ +/* + * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __DISPATCH_QUEUE__ +#define __DISPATCH_QUEUE__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +/*! + * @header + * + * Dispatch is an abstract model for expressing concurrency via simple but + * powerful API. + * + * At the core, dispatch provides serial FIFO queues to which blocks may be + * submitted. Blocks submitted to these dispatch queues are invoked on a pool + * of threads fully managed by the system. No guarantee is made regarding + * which thread a block will be invoked on; however, it is guaranteed that only + * one block submitted to the FIFO dispatch queue will be invoked at a time. + * + * When multiple queues have blocks to be processed, the system is free to + * allocate additional threads to invoke the blocks concurrently. When the + * queues become empty, these threads are automatically released. + */ + +/*! + * @typedef dispatch_queue_t + * + * @abstract + * Dispatch queues invoke blocks submitted to them serially in FIFO order. A + * queue will only invoke one block at a time, but independent queues may each + * invoke their blocks concurrently with respect to each other. + * + * @discussion + * Dispatch queues are lightweight objects to which blocks may be submitted. + * The system manages a pool of threads which process dispatch queues and + * invoke blocks submitted to them. + * + * Conceptually a dispatch queue may have its own thread of execution, and + * interaction between queues is highly asynchronous. + * + * Dispatch queues are reference counted via calls to dispatch_retain() and + * dispatch_release(). Pending blocks submitted to a queue also hold a + * reference to the queue until they have finished. Once all references to a + * queue have been released, the queue will be deallocated by the system. + */ +DISPATCH_DECL(dispatch_queue); + +/*! + * @typedef dispatch_queue_attr_t + * + * @abstract + * Attribute and policy extensions for dispatch queues. + */ +DISPATCH_DECL(dispatch_queue_attr); + +/*! + * @typedef dispatch_block_t + * + * @abstract + * The prototype of blocks submitted to dispatch queues, which take no + * arguments and have no return value. + * + * @discussion + * The declaration of a block allocates storage on the stack. Therefore, this + * is an invalid construct: + * + * dispatch_block_t block; + * + * if (x) { + * block = ^{ printf("true\n"); }; + * } else { + * block = ^{ printf("false\n"); }; + * } + * block(); // unsafe!!! + * + * What is happening behind the scenes: + * + * if (x) { + * struct Block __tmp_1 = ...; // setup details + * block = &__tmp_1; + * } else { + * struct Block __tmp_2 = ...; // setup details + * block = &__tmp_2; + * } + * + * As the example demonstrates, the address of a stack variable is escaping the + * scope in which it is allocated. That is a classic C bug. + */ +#ifdef __BLOCKS__ +typedef void (^dispatch_block_t)(void); +#endif + +__BEGIN_DECLS + +/*! + * @function dispatch_async + * + * @abstract + * Submits a block for asynchronous execution on a dispatch queue. + * + * @discussion + * The dispatch_async() function is the fundamental mechanism for submitting + * blocks to a dispatch queue. + * + * Calls to dispatch_async() always return immediately after the block has + * been submitted, and never wait for the block to be invoked. + * + * The target queue determines whether the block will be invoked serially or + * concurrently with respect to other blocks submitted to that same queue. + * Serial queues are processed concurrently with with respect to each other. + * + * @param queue + * The target dispatch queue to which the block is submitted. + * The system will hold a reference on the target queue until the block + * has finished. + * The result of passing NULL in this parameter is undefined. + * + * @param block + * The block to submit to the target dispatch queue. This function performs + * Block_copy() and Block_release() on behalf of callers. + * The result of passing NULL in this parameter is undefined. + */ +#ifdef __BLOCKS__ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_async(dispatch_queue_t queue, dispatch_block_t block); +#endif + +/*! + * @function dispatch_async_f + * + * @abstract + * Submits a function for asynchronous execution on a dispatch queue. + * + * @discussion + * See dispatch_async() for details. + * + * @param queue + * The target dispatch queue to which the function is submitted. + * The system will hold a reference on the target queue until the function + * has returned. + * The result of passing NULL in this parameter is undefined. + * + * @param context + * The application-defined context parameter to pass to the function. + * + * @param work + * The application-defined function to invoke on the target queue. The first + * parameter passed to this function is the context provided to + * dispatch_async_f(). + * The result of passing NULL in this parameter is undefined. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +void +dispatch_async_f(dispatch_queue_t queue, + void *context, + dispatch_function_t work); + +/*! + * @function dispatch_sync + * + * @abstract + * Submits a block for synchronous execution on a dispatch queue. + * + * @discussion + * Submits a block to a dispatch queue like dispatch_async(), however + * dispatch_sync() will not return until the block has finished. + * + * Calls to dispatch_sync() targeting the current queue will result + * in dead-lock. Use of dispatch_sync() is also subject to the same + * multi-party dead-lock problems that may result from the use of a mutex. + * Use of dispatch_async() is preferred. + * + * Unlike dispatch_async(), no retain is performed on the target queue. Because + * calls to this function are synchronous, the dispatch_sync() "borrows" the + * reference of the caller. + * + * As an optimization, dispatch_sync() invokes the block on the current + * thread when possible. + * + * @param queue + * The target dispatch queue to which the block is submitted. + * The result of passing NULL in this parameter is undefined. + * + * @param block + * The block to be invoked on the target dispatch queue. + * The result of passing NULL in this parameter is undefined. + */ +#ifdef __BLOCKS__ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_sync(dispatch_queue_t queue, dispatch_block_t block); +#endif + +/*! + * @function dispatch_sync_f + * + * @abstract + * Submits a function for synchronous execution on a dispatch queue. + * + * @discussion + * See dispatch_sync() for details. + * + * @param queue + * The target dispatch queue to which the function is submitted. + * The result of passing NULL in this parameter is undefined. + * + * @param context + * The application-defined context parameter to pass to the function. + * + * @param work + * The application-defined function to invoke on the target queue. The first + * parameter passed to this function is the context provided to + * dispatch_sync_f(). + * The result of passing NULL in this parameter is undefined. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +void +dispatch_sync_f(dispatch_queue_t queue, + void *context, + dispatch_function_t work); + +/*! + * @function dispatch_apply + * + * @abstract + * Submits a block to a dispatch queue for multiple invocations. + * + * @discussion + * Submits a block to a dispatch queue for multiple invocations. This function + * waits for the task block to complete before returning. If the target queue + * is a concurrent queue returned by dispatch_get_concurrent_queue(), the block + * may be invoked concurrently, and it must therefore be reentrant safe. + * + * Each invocation of the block will be passed the current index of iteration. + * + * @param iterations + * The number of iterations to perform. + * + * @param queue + * The target dispatch queue to which the block is submitted. + * The result of passing NULL in this parameter is undefined. + * + * @param block + * The block to be invoked the specified number of iterations. + * The result of passing NULL in this parameter is undefined. + */ +#ifdef __BLOCKS__ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_apply(size_t iterations, dispatch_queue_t queue, void (^block)(size_t)); +#endif + +/*! + * @function dispatch_apply_f + * + * @abstract + * Submits a function to a dispatch queue for multiple invocations. + * + * @discussion + * See dispatch_apply() for details. + * + * @param iterations + * The number of iterations to perform. + * + * @param queue + * The target dispatch queue to which the function is submitted. + * The result of passing NULL in this parameter is undefined. + * + * @param context + * The application-defined context parameter to pass to the function. + * + * @param work + * The application-defined function to invoke on the target queue. The first + * parameter passed to this function is the context provided to + * dispatch_apply_f(). The second parameter passed to this function is the + * current index of iteration. + * The result of passing NULL in this parameter is undefined. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL2 DISPATCH_NONNULL4 DISPATCH_NOTHROW +void +dispatch_apply_f(size_t iterations, dispatch_queue_t queue, + void *context, + void (*work)(void *, size_t)); + +/*! + * @function dispatch_get_current_queue + * + * @abstract + * Returns the queue on which the currently executing block is running. + * + * @discussion + * Returns the queue on which the currently executing block is running. + * + * When dispatch_get_current_queue() is called outside of the context of a + * submitted block, it will return the default concurrent queue. + * + * @result + * Returns the current queue. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW +dispatch_queue_t +dispatch_get_current_queue(void); + +/*! + * @function dispatch_get_main_queue + * + * @abstract + * Returns the default queue that is bound to the main thread. + * + * @discussion + * In order to invoke blocks submitted to the main queue, the application must + * call dispatch_main(), NSApplicationMain(), or use a CFRunLoop on the main + * thread. + * + * @result + * Returns the main queue. This queue is created automatically on behalf of + * the main thread before main() is called. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +extern struct dispatch_queue_s _dispatch_main_q; +#define dispatch_get_main_queue() (&_dispatch_main_q) + +/*! + * @enum dispatch_queue_priority_t + * + * @constant DISPATCH_QUEUE_PRIORITY_HIGH + * Items dispatched to the queue will run at high priority, + * i.e. the queue will be scheduled for execution before + * any default priority or low priority queue. + * + * @constant DISPATCH_QUEUE_PRIORITY_DEFAULT + * Items dispatched to the queue will run at the default + * priority, i.e. the queue will be scheduled for execution + * after all high priority queues have been scheduled, but + * before any low priority queues have been scheduled. + * + * @constant DISPATCH_QUEUE_PRIORITY_LOW + * Items dispatched to the queue will run at low priority, + * i.e. the queue will be scheduled for execution after all + * default priority and high priority queues have been + * scheduled. + */ +enum { + DISPATCH_QUEUE_PRIORITY_HIGH = 2, + DISPATCH_QUEUE_PRIORITY_DEFAULT = 0, + DISPATCH_QUEUE_PRIORITY_LOW = -2, +}; + +/*! + * @function dispatch_get_global_queue + * + * @abstract + * Returns a well-known global concurrent queue of a given priority level. + * + * @discussion + * The well-known global concurrent queues may not be modified. Calls to + * dispatch_suspend(), dispatch_resume(), dispatch_set_context(), etc., will + * have no effect when used with queues returned by this function. + * + * @param priority + * A priority defined in dispatch_queue_priority_t + * + * @param flags + * Reserved for future use. Passing any value other than zero may result in + * a NULL return value. + * + * @result + * Returns the requested global queue. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW +dispatch_queue_t +dispatch_get_global_queue(long priority, unsigned long flags); + +/*! + * @function dispatch_queue_create + * + * @abstract + * Creates a new dispatch queue to which blocks may be submitted. + * + * @discussion + * Dispatch queues invoke blocks serially in FIFO order. + * + * When the dispatch queue is no longer needed, it should be released + * with dispatch_release(). Note that any pending blocks submitted + * to a queue will hold a reference to that queue. Therefore a queue + * will not be deallocated until all pending blocks have finished. + * + * @param label + * A string label to attach to the queue. + * This parameter is optional and may be NULL. + * + * @param attr + * Unused. Pass NULL for now. + * + * @result + * The newly created dispatch queue. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_MALLOC DISPATCH_WARN_RESULT DISPATCH_NOTHROW +dispatch_queue_t +dispatch_queue_create(const char *label, dispatch_queue_attr_t attr); + +/*! + * @function dispatch_queue_get_label + * + * @abstract + * Returns the label of the queue that was specified when the + * queue was created. + * + * @param queue + * The result of passing NULL in this parameter is undefined. + * + * @result + * The label of the queue. The result may be NULL. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW +const char * +dispatch_queue_get_label(dispatch_queue_t queue); + +/*! + * @function dispatch_set_target_queue + * + * @abstract + * Sets the target queue for the given object. + * + * @discussion + * An object's target queue is responsible for processing the object. + * + * A dispatch queue's priority is inherited by its target queue. Use the + * dispatch_get_global_queue() function to obtain suitable target queue + * of the desired priority. + * + * A dispatch source's target queue specifies where its event handler and + * cancellation handler blocks will be submitted. + * + * The result of calling dispatch_set_target_queue() on any other type of + * dispatch object is undefined. + * + * @param object + * The object to modify. + * The result of passing NULL in this parameter is undefined. + * + * @param queue + * The new target queue for the object. The queue is retained, and the + * previous one, if any, is released. + * The result of passing NULL in this parameter is undefined. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_set_target_queue(dispatch_object_t object, dispatch_queue_t queue); + +/*! + * @function dispatch_main + * + * @abstract + * Execute blocks submitted to the main queue. + * + * @discussion + * This function "parks" the main thread and waits for blocks to be submitted + * to the main queue. This function never returns. + * + * Applications that call NSApplicationMain() or CFRunLoopRun() on the + * main thread do not need to call dispatch_main(). + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NOTHROW DISPATCH_NORETURN +void +dispatch_main(void); + +/*! + * @function dispatch_after + * + * @abstract + * Schedule a block for execution on a given queue at a specified time. + * + * @discussion + * Passing DISPATCH_TIME_NOW as the "when" parameter is supported, but not as + * optimal as calling dispatch_async() instead. Passing DISPATCH_TIME_FOREVER + * is undefined. + * + * @param when + * A temporal milestone returned by dispatch_time() or dispatch_walltime(). + * + * @param queue + * A queue to which the given block will be submitted at the specified time. + * The result of passing NULL in this parameter is undefined. + * + * @param block + * The block of code to execute. + * The result of passing NULL in this parameter is undefined. + */ +#ifdef __BLOCKS__ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL2 DISPATCH_NONNULL3 DISPATCH_NOTHROW +void +dispatch_after(dispatch_time_t when, + dispatch_queue_t queue, + dispatch_block_t block); +#endif + +/*! + * @function dispatch_after_f + * + * @abstract + * Schedule a function for execution on a given queue at a specified time. + * + * @discussion + * See dispatch_after() for details. + * + * @param when + * A temporal milestone returned by dispatch_time() or dispatch_walltime(). + * + * @param queue + * A queue to which the given function will be submitted at the specified time. + * The result of passing NULL in this parameter is undefined. + * + * @param context + * The application-defined context parameter to pass to the function. + * + * @param work + * The application-defined function to invoke on the target queue. The first + * parameter passed to this function is the context provided to + * dispatch_after_f(). + * The result of passing NULL in this parameter is undefined. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL2 DISPATCH_NONNULL4 DISPATCH_NOTHROW +void +dispatch_after_f(dispatch_time_t when, + dispatch_queue_t queue, + void *context, + dispatch_function_t work); + +__END_DECLS + +#endif diff --git a/src/queue_internal.h b/src/queue_internal.h new file mode 100644 index 000000000..05237c202 --- /dev/null +++ b/src/queue_internal.h @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_QUEUE_INTERNAL__ +#define __DISPATCH_QUEUE_INTERNAL__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +// If dc_vtable is less than 127, then the object is a continuation. +// Otherwise, the object has a private layout and memory management rules. The +// first two words must align with normal objects. +#define DISPATCH_CONTINUATION_HEADER(x) \ + const void * do_vtable; \ + struct x *volatile do_next; \ + dispatch_function_t dc_func; \ + void * dc_ctxt + +#define DISPATCH_OBJ_ASYNC_BIT 0x1 +#define DISPATCH_OBJ_BARRIER_BIT 0x2 +#define DISPATCH_OBJ_GROUP_BIT 0x4 +// vtables are pointers far away from the low page in memory +#define DISPATCH_OBJ_IS_VTABLE(x) ((unsigned long)(x)->do_vtable > 127ul) + +struct dispatch_continuation_s { + DISPATCH_CONTINUATION_HEADER(dispatch_continuation_s); + dispatch_group_t dc_group; + void * dc_data[3]; +}; + +typedef struct dispatch_continuation_s *dispatch_continuation_t; + + +struct dispatch_queue_vtable_s { + DISPATCH_VTABLE_HEADER(dispatch_queue_s); +}; + +#define DISPATCH_QUEUE_MIN_LABEL_SIZE 64 + +#define DISPATCH_QUEUE_HEADER \ + uint32_t dq_running; \ + uint32_t dq_width; \ + struct dispatch_object_s *dq_items_tail; \ + struct dispatch_object_s *volatile dq_items_head; \ + unsigned long dq_serialnum; \ + void *dq_finalizer_ctxt; \ + dispatch_queue_finalizer_function_t dq_finalizer_func + +struct dispatch_queue_s { + DISPATCH_STRUCT_HEADER(dispatch_queue_s, dispatch_queue_vtable_s); + DISPATCH_QUEUE_HEADER; + char dq_label[DISPATCH_QUEUE_MIN_LABEL_SIZE]; // must be last +}; + +extern struct dispatch_queue_s _dispatch_mgr_q; + +void _dispatch_queue_init(dispatch_queue_t dq); +void _dispatch_queue_drain(dispatch_queue_t dq); +void _dispatch_queue_dispose(dispatch_queue_t dq); + +__attribute__((always_inline)) +static inline void +_dispatch_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head, dispatch_object_t _tail) +{ + struct dispatch_object_s *prev, *head = _head._do, *tail = _tail._do; + + tail->do_next = NULL; + prev = fastpath(dispatch_atomic_xchg(&dq->dq_items_tail, tail)); + if (prev) { + // if we crash here with a value less than 0x1000, then we are at a known bug in client code + // for example, see _dispatch_queue_dispose or _dispatch_atfork_child + prev->do_next = head; + } else { + dq->dq_items_head = head; + _dispatch_wakeup(dq); + } +} + +#define _dispatch_queue_push(x, y) _dispatch_queue_push_list((x), (y), (y)) + +#define DISPATCH_QUEUE_PRIORITY_COUNT 3 + +#if DISPATCH_DEBUG +void dispatch_debug_queue(dispatch_queue_t dq, const char* str); +#else +static inline void dispatch_debug_queue(dispatch_queue_t dq __attribute__((unused)), const char* str __attribute__((unused))) {} +#endif + +size_t dispatch_queue_debug(dispatch_queue_t dq, char* buf, size_t bufsiz); +size_t dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf, size_t bufsiz); + +static inline dispatch_queue_t +_dispatch_queue_get_current(void) +{ + return _dispatch_thread_getspecific(dispatch_queue_key); +} + +__private_extern__ malloc_zone_t *_dispatch_ccache_zone; +dispatch_continuation_t _dispatch_continuation_alloc_from_heap(void); + +static inline dispatch_continuation_t +_dispatch_continuation_alloc_cacheonly(void) +{ + dispatch_continuation_t dc = fastpath(_dispatch_thread_getspecific(dispatch_cache_key)); + if (dc) { + _dispatch_thread_setspecific(dispatch_cache_key, dc->do_next); + } + return dc; +} + +#endif diff --git a/src/queue_private.h b/src/queue_private.h new file mode 100644 index 000000000..85f87c010 --- /dev/null +++ b/src/queue_private.h @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_QUEUE_PRIVATE__ +#define __DISPATCH_QUEUE_PRIVATE__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +__BEGIN_DECLS + + +/*! + * @enum dispatch_queue_flags_t + * + * @constant DISPATCH_QUEUE_OVERCOMMIT + * The queue will create a new thread for invoking blocks, regardless of how + * busy the computer is. + */ +enum { + DISPATCH_QUEUE_OVERCOMMIT = 0x2ull, +}; + +#define DISPATCH_QUEUE_FLAGS_MASK (DISPATCH_QUEUE_OVERCOMMIT) + +#ifdef __BLOCKS__ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_barrier_sync(dispatch_queue_t queue, dispatch_block_t block); +#endif + +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +void +dispatch_barrier_sync_f(dispatch_queue_t dq, void *context, dispatch_function_t work); + +#ifdef __BLOCKS__ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_barrier_async(dispatch_queue_t queue, dispatch_block_t block); +#endif + +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +void +dispatch_barrier_async_f(dispatch_queue_t dq, void *context, dispatch_function_t work); + +/*! + * @function dispatch_queue_set_width + * + * @abstract + * Set the width of concurrency for a given queue. The default width of a + * privately allocated queue is one. + * + * @param queue + * The queue to adjust. Passing the main queue, a default concurrent queue or + * any other default queue will be ignored. + * + * @param width + * The new maximum width of concurrency depending on available resources. + * If zero is passed, then the value is promoted to one. + * Negative values are magic values that map to automatic width values. + * Unknown negative values default to DISPATCH_QUEUE_WIDTH_MAX_LOGICAL_CPUS. + */ +#define DISPATCH_QUEUE_WIDTH_ACTIVE_CPUS -1 +#define DISPATCH_QUEUE_WIDTH_MAX_PHYSICAL_CPUS -2 +#define DISPATCH_QUEUE_WIDTH_MAX_LOGICAL_CPUS -3 + +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_queue_set_width(dispatch_queue_t dq, long width); + + + +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +extern const struct dispatch_queue_offsets_s { + // always add new fields at the end + const uint16_t dqo_version; + const uint16_t dqo_label; + const uint16_t dqo_label_size; + const uint16_t dqo_flags; + const uint16_t dqo_flags_size; + const uint16_t dqo_serialnum; + const uint16_t dqo_serialnum_size; + const uint16_t dqo_width; + const uint16_t dqo_width_size; + const uint16_t dqo_running; + const uint16_t dqo_running_size; +} dispatch_queue_offsets; + + +__END_DECLS + +#endif diff --git a/src/semaphore.c b/src/semaphore.c new file mode 100644 index 000000000..9e36d4db1 --- /dev/null +++ b/src/semaphore.c @@ -0,0 +1,532 @@ +/* + * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include "internal.h" + +// semaphores are too fundamental to use the dispatch_assume*() macros +#define DISPATCH_SEMAPHORE_VERIFY_KR(x) do { \ + if (x) { \ + DISPATCH_CRASH("flawed group/semaphore logic"); \ + } \ + } while (0) + +struct dispatch_semaphore_vtable_s { + DISPATCH_VTABLE_HEADER(dispatch_semaphore_s); +}; + +static void _dispatch_semaphore_dispose(dispatch_semaphore_t dsema); +static size_t _dispatch_semaphore_debug(dispatch_semaphore_t dsema, char *buf, size_t bufsiz); +static long _dispatch_group_wake(dispatch_semaphore_t dsema); + +const struct dispatch_semaphore_vtable_s _dispatch_semaphore_vtable = { + .do_type = DISPATCH_SEMAPHORE_TYPE, + .do_kind = "semaphore", + .do_dispose = _dispatch_semaphore_dispose, + .do_debug = _dispatch_semaphore_debug, +}; + +dispatch_semaphore_t +_dispatch_get_thread_semaphore(void) +{ + dispatch_semaphore_t dsema; + + dsema = fastpath(_dispatch_thread_getspecific(dispatch_sema4_key)); + if (!dsema) { + while (!(dsema = dispatch_semaphore_create(0))) { + sleep(1); + } + } + _dispatch_thread_setspecific(dispatch_sema4_key, NULL); + return dsema; +} + +void +_dispatch_put_thread_semaphore(dispatch_semaphore_t dsema) +{ + dispatch_semaphore_t old_sema = _dispatch_thread_getspecific(dispatch_sema4_key); + _dispatch_thread_setspecific(dispatch_sema4_key, dsema); + if (old_sema) { + dispatch_release(old_sema); + } +} + +dispatch_group_t +dispatch_group_create(void) +{ + return (dispatch_group_t)dispatch_semaphore_create(LONG_MAX); +} + +dispatch_semaphore_t +dispatch_semaphore_create(long value) +{ + dispatch_semaphore_t dsema; + + // If the internal value is negative, then the absolute of the value is + // equal to the number of waiting threads. Therefore it is bogus to + // initialize the semaphore with a negative value. + if (value < 0) { + return NULL; + } + + dsema = calloc(1, sizeof(struct dispatch_semaphore_s)); + + if (fastpath(dsema)) { + dsema->do_vtable = &_dispatch_semaphore_vtable; + dsema->do_next = DISPATCH_OBJECT_LISTLESS; + dsema->do_ref_cnt = 1; + dsema->do_xref_cnt = 1; + dsema->do_targetq = dispatch_get_global_queue(0, 0); + dsema->dsema_value = value; + dsema->dsema_orig = value; + } + + return dsema; +} + +static void +_dispatch_semaphore_create_port(semaphore_t *s4) +{ + kern_return_t kr; + semaphore_t tmp; + + if (*s4) { + return; + } + + // lazily allocate the semaphore port + + // Someday: + // 1) Switch to a doubly-linked FIFO in user-space. + // 2) User-space timers for the timeout. + // 3) Use the per-thread semaphore port. + + while (dispatch_assume_zero(kr = semaphore_create(mach_task_self(), &tmp, SYNC_POLICY_FIFO, 0))) { + DISPATCH_VERIFY_MIG(kr); + sleep(1); + } + + if (!dispatch_atomic_cmpxchg(s4, 0, tmp)) { + kr = semaphore_destroy(mach_task_self(), tmp); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); + } + + _dispatch_safe_fork = false; +} + +DISPATCH_NOINLINE +static long +_dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout) +{ + mach_timespec_t _timeout; + kern_return_t kr; + uint64_t nsec; + long orig; + +again: + // Mach semaphores appear to sometimes spuriously wake up. Therefore, + // we keep a parallel count of the number of times a Mach semaphore is + // signaled. + while ((orig = dsema->dsema_sent_ksignals)) { + if (dispatch_atomic_cmpxchg(&dsema->dsema_sent_ksignals, orig, orig - 1)) { + return 0; + } + } + + _dispatch_semaphore_create_port(&dsema->dsema_port); + + // From xnu/osfmk/kern/sync_sema.c: + // wait_semaphore->count = -1; /* we don't keep an actual count */ + // + // The code above does not match the documentation, and that fact is + // not surprising. The documented semantics are clumsy to use in any + // practical way. The above hack effectively tricks the rest of the + // Mach semaphore logic to behave like the libdispatch algorithm. + + switch (timeout) { + default: + do { + // timeout() already calculates relative time left + nsec = _dispatch_timeout(timeout); + _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); + _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); + kr = slowpath(semaphore_timedwait(dsema->dsema_port, _timeout)); + } while (kr == KERN_ABORTED); + + if (kr != KERN_OPERATION_TIMED_OUT) { + DISPATCH_SEMAPHORE_VERIFY_KR(kr); + break; + } + // Fall through and try to undo what the fast path did to dsema->dsema_value + case DISPATCH_TIME_NOW: + while ((orig = dsema->dsema_value) < 0) { + if (dispatch_atomic_cmpxchg(&dsema->dsema_value, orig, orig + 1)) { + return KERN_OPERATION_TIMED_OUT; + } + } + // Another thread called semaphore_signal(). + // Fall through and drain the wakeup. + case DISPATCH_TIME_FOREVER: + do { + kr = semaphore_wait(dsema->dsema_port); + } while (kr == KERN_ABORTED); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); + break; + } + + goto again; +} + +DISPATCH_NOINLINE +void +dispatch_group_enter(dispatch_group_t dg) +{ + dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg; +#if defined(__OPTIMIZE__) && defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__)) + // This assumes: + // 1) Way too much about the optimizer of GCC. + // 2) There will never be more than LONG_MAX threads. + // Therefore: no overflow detection + asm( +#ifdef __LP64__ + "lock decq %0\n\t" +#else + "lock decl %0\n\t" +#endif + "js 1f\n\t" + "xor %%eax, %%eax\n\t" + "ret\n\t" + "1:" + : "+m" (dsema->dsema_value) + : + : "cc" + ); + _dispatch_semaphore_wait_slow(dsema, DISPATCH_TIME_FOREVER); +#else + dispatch_semaphore_wait(dsema, DISPATCH_TIME_FOREVER); +#endif +} + +DISPATCH_NOINLINE +long +dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout) +{ +#if defined(__OPTIMIZE__) && defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__)) + // This assumes: + // 1) Way too much about the optimizer of GCC. + // 2) There will never be more than LONG_MAX threads. + // Therefore: no overflow detection + asm( +#ifdef __LP64__ + "lock decq %0\n\t" +#else + "lock decl %0\n\t" +#endif + "js 1f\n\t" + "xor %%eax, %%eax\n\t" + "ret\n\t" + "1:" + : "+m" (dsema->dsema_value) + : + : "cc" + ); +#else + if (dispatch_atomic_dec(&dsema->dsema_value) >= 0) { + return 0; + } +#endif + return _dispatch_semaphore_wait_slow(dsema, timeout); +} + +DISPATCH_NOINLINE +static long +_dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema) +{ + kern_return_t kr; + + _dispatch_semaphore_create_port(&dsema->dsema_port); + + // Before dsema_sent_ksignals is incremented we can rely on the reference + // held by the waiter. However, once this value is incremented the waiter + // may return between the atomic increment and the semaphore_signal(), + // therefore an explicit reference must be held in order to safely access + // dsema after the atomic increment. + _dispatch_retain(dsema); + + dispatch_atomic_inc(&dsema->dsema_sent_ksignals); + + kr = semaphore_signal(dsema->dsema_port); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); + + _dispatch_release(dsema); + + return 1; +} + +void +dispatch_group_leave(dispatch_group_t dg) +{ + dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg; + + dispatch_semaphore_signal(dsema); + + if (dsema->dsema_value == dsema->dsema_orig) { + _dispatch_group_wake(dsema); + } +} + +DISPATCH_NOINLINE +long +dispatch_semaphore_signal(dispatch_semaphore_t dsema) +{ +#if defined(__OPTIMIZE__) && defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__)) + // overflow detection + // this assumes way too much about the optimizer of GCC + asm( +#ifdef __LP64__ + "lock incq %0\n\t" +#else + "lock incl %0\n\t" +#endif + "jo 1f\n\t" + "jle 2f\n\t" + "xor %%eax, %%eax\n\t" + "ret\n\t" + "1:\n\t" + "int $4\n\t" + "2:" + : "+m" (dsema->dsema_value) + : + : "cc" + ); +#else + if (dispatch_atomic_inc(&dsema->dsema_value) > 0) { + return 0; + } +#endif + return _dispatch_semaphore_signal_slow(dsema); +} + +DISPATCH_NOINLINE +long +_dispatch_group_wake(dispatch_semaphore_t dsema) +{ + struct dispatch_sema_notify_s *tmp, *head = dispatch_atomic_xchg(&dsema->dsema_notify_head, NULL); + long rval = dispatch_atomic_xchg(&dsema->dsema_group_waiters, 0); + bool do_rel = head; + long kr; + + // wake any "group" waiter or notify blocks + + if (rval) { + _dispatch_semaphore_create_port(&dsema->dsema_waiter_port); + do { + kr = semaphore_signal(dsema->dsema_waiter_port); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); + } while (--rval); + } + while (head) { + dispatch_async_f(head->dsn_queue, head->dsn_ctxt, head->dsn_func); + _dispatch_release(head->dsn_queue); + do { + tmp = head->dsn_next; + } while (!tmp && !dispatch_atomic_cmpxchg(&dsema->dsema_notify_tail, head, NULL)); + free(head); + head = tmp; + } + if (do_rel) { + _dispatch_release(dsema); + } + return 0; +} + +DISPATCH_NOINLINE +static long +_dispatch_group_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout) +{ + mach_timespec_t _timeout; + kern_return_t kr; + uint64_t nsec; + long orig; + +again: + // check before we cause another signal to be sent by incrementing dsema->dsema_group_waiters + if (dsema->dsema_value == dsema->dsema_orig) { + return _dispatch_group_wake(dsema); + } + // Mach semaphores appear to sometimes spuriously wake up. Therefore, + // we keep a parallel count of the number of times a Mach semaphore is + // signaled. + dispatch_atomic_inc(&dsema->dsema_group_waiters); + // check the values again in case we need to wake any threads + if (dsema->dsema_value == dsema->dsema_orig) { + return _dispatch_group_wake(dsema); + } + + _dispatch_semaphore_create_port(&dsema->dsema_waiter_port); + + // From xnu/osfmk/kern/sync_sema.c: + // wait_semaphore->count = -1; /* we don't keep an actual count */ + // + // The code above does not match the documentation, and that fact is + // not surprising. The documented semantics are clumsy to use in any + // practical way. The above hack effectively tricks the rest of the + // Mach semaphore logic to behave like the libdispatch algorithm. + + switch (timeout) { + default: + do { + nsec = _dispatch_timeout(timeout); + _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); + _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); + kr = slowpath(semaphore_timedwait(dsema->dsema_waiter_port, _timeout)); + } while (kr == KERN_ABORTED); + if (kr != KERN_OPERATION_TIMED_OUT) { + DISPATCH_SEMAPHORE_VERIFY_KR(kr); + break; + } + // Fall through and try to undo the earlier change to dsema->dsema_group_waiters + case DISPATCH_TIME_NOW: + while ((orig = dsema->dsema_group_waiters)) { + if (dispatch_atomic_cmpxchg(&dsema->dsema_group_waiters, orig, orig - 1)) { + return KERN_OPERATION_TIMED_OUT; + } + } + // Another thread called semaphore_signal(). + // Fall through and drain the wakeup. + case DISPATCH_TIME_FOREVER: + do { + kr = semaphore_wait(dsema->dsema_waiter_port); + } while (kr == KERN_ABORTED); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); + break; + } + + goto again; +} + +long +dispatch_group_wait(dispatch_group_t dg, dispatch_time_t timeout) +{ + dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg; + + if (dsema->dsema_value == dsema->dsema_orig) { + return 0; + } + if (timeout == 0) { + return KERN_OPERATION_TIMED_OUT; + } + return _dispatch_group_wait_slow(dsema, timeout); +} + +#ifdef __BLOCKS__ +void +dispatch_group_notify(dispatch_group_t dg, dispatch_queue_t dq, dispatch_block_t db) +{ + dispatch_group_notify_f(dg, dq, _dispatch_Block_copy(db), _dispatch_call_block_and_release); +} +#endif + +void +dispatch_group_notify_f(dispatch_group_t dg, dispatch_queue_t dq, void *ctxt, void (*func)(void *)) +{ + dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg; + struct dispatch_sema_notify_s *dsn, *prev; + + // FIXME -- this should be updated to use the continuation cache + while (!(dsn = malloc(sizeof(*dsn)))) { + sleep(1); + } + + dsn->dsn_next = NULL; + dsn->dsn_queue = dq; + dsn->dsn_ctxt = ctxt; + dsn->dsn_func = func; + _dispatch_retain(dq); + + prev = dispatch_atomic_xchg(&dsema->dsema_notify_tail, dsn); + if (fastpath(prev)) { + prev->dsn_next = dsn; + } else { + _dispatch_retain(dg); + dsema->dsema_notify_head = dsn; + if (dsema->dsema_value == dsema->dsema_orig) { + _dispatch_group_wake(dsema); + } + } +} + +void +_dispatch_semaphore_dispose(dispatch_semaphore_t dsema) +{ + kern_return_t kr; + + if (dsema->dsema_value < dsema->dsema_orig) { + DISPATCH_CLIENT_CRASH("Semaphore/group object deallocated while in use"); + } + + if (dsema->dsema_port) { + kr = semaphore_destroy(mach_task_self(), dsema->dsema_port); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); + } + if (dsema->dsema_waiter_port) { + kr = semaphore_destroy(mach_task_self(), dsema->dsema_waiter_port); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); + } + + _dispatch_dispose(dsema); +} + +size_t +_dispatch_semaphore_debug(dispatch_semaphore_t dsema, char *buf, size_t bufsiz) +{ + size_t offset = 0; + offset += snprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", dx_kind(dsema), dsema); + offset += dispatch_object_debug_attr(dsema, &buf[offset], bufsiz - offset); + offset += snprintf(&buf[offset], bufsiz - offset, "port = 0x%u, value = %ld, orig = %ld }", + dsema->dsema_port, dsema->dsema_value, dsema->dsema_orig); + return offset; +} + +#ifdef __BLOCKS__ +void +dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq, dispatch_block_t db) +{ + dispatch_group_async_f(dg, dq, _dispatch_Block_copy(db), _dispatch_call_block_and_release); +} +#endif + +DISPATCH_NOINLINE +void +dispatch_group_async_f(dispatch_group_t dg, dispatch_queue_t dq, void *ctxt, void (*func)(void *)) +{ + dispatch_continuation_t dc; + + _dispatch_retain(dg); + dispatch_group_enter(dg); + + dc = _dispatch_continuation_alloc_cacheonly() ?: _dispatch_continuation_alloc_from_heap(); + + dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT|DISPATCH_OBJ_GROUP_BIT); + dc->dc_func = func; + dc->dc_ctxt = ctxt; + dc->dc_group = dg; + + _dispatch_queue_push(dq, dc); +} diff --git a/src/semaphore.h b/src/semaphore.h new file mode 100644 index 000000000..882b567b4 --- /dev/null +++ b/src/semaphore.h @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __DISPATCH_SEMAPHORE__ +#define __DISPATCH_SEMAPHORE__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +/*! + * @typedef dispatch_semaphore_t + * + * @abstract + * A counting semaphore. + */ +DISPATCH_DECL(dispatch_semaphore); + +__BEGIN_DECLS + +/*! + * @function dispatch_semaphore_create + * + * @abstract + * Creates new counting semaphore with an initial value. + * + * @discussion + * Passing zero for the value is useful for when two threads need to reconcile + * the completion of a particular event. Passing a value greather than zero is + * useful for managing a finite pool of resources, where the pool size is equal + * to the value. + * + * @param value + * The starting value for the semaphore. Passing a value less than zero will + * cause NULL to be returned. + * + * @result + * The newly created semaphore, or NULL on failure. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_MALLOC DISPATCH_NOTHROW +dispatch_semaphore_t +dispatch_semaphore_create(long value); + +/*! + * @function dispatch_semaphore_wait + * + * @abstract + * Wait (decrement) for a semaphore. + * + * @discussion + * Decrement the counting semaphore. If the resulting value is less than zero, + * this function waits in FIFO order for a signal to occur before returning. + * + * @param dsema + * The semaphore. The result of passing NULL in this parameter is undefined. + * + * @param timeout + * When to timeout (see dispatch_time). As a convenience, there are the + * DISPATCH_TIME_NOW and DISPATCH_TIME_FOREVER constants. + * + * @result + * Returns zero on success, or non-zero if the timeout occurred. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +long +dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout); + +/*! + * @function dispatch_semaphore_signal + * + * @abstract + * Signal (increment) a semaphore. + * + * @discussion + * Increment the counting semaphore. If the previous value was less than zero, + * this function wakes a waiting thread before returning. + * + * @param dsema The counting semaphore. + * The result of passing NULL in this parameter is undefined. + * + * @result + * This function returns non-zero if a thread is woken. Otherwise, zero is + * returned. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +long +dispatch_semaphore_signal(dispatch_semaphore_t dsema); + +__END_DECLS + +#endif /* __DISPATCH_SEMAPHORE__ */ diff --git a/src/semaphore_internal.h b/src/semaphore_internal.h new file mode 100644 index 000000000..3af28c06b --- /dev/null +++ b/src/semaphore_internal.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_SEMAPHORE_INTERNAL__ +#define __DISPATCH_SEMAPHORE_INTERNAL__ + +struct dispatch_sema_notify_s { + struct dispatch_sema_notify_s *dsn_next; + dispatch_queue_t dsn_queue; + void *dsn_ctxt; + void (*dsn_func)(void *); +}; + +struct dispatch_semaphore_s { + DISPATCH_STRUCT_HEADER(dispatch_semaphore_s, dispatch_semaphore_vtable_s); + long dsema_value; + long dsema_orig; + size_t dsema_sent_ksignals; + semaphore_t dsema_port; + semaphore_t dsema_waiter_port; + size_t dsema_group_waiters; + struct dispatch_sema_notify_s *dsema_notify_head; + struct dispatch_sema_notify_s *dsema_notify_tail; +}; + +extern const struct dispatch_semaphore_vtable_s _dispatch_semaphore_vtable; + +#endif diff --git a/src/shims.c b/src/shims.c new file mode 100644 index 000000000..a02d4535e --- /dev/null +++ b/src/shims.c @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include "internal.h" + +void * +dispatch_mach_msg_get_context(mach_msg_header_t *msg) +{ + mach_msg_context_trailer_t *tp; + void *context = NULL; + + tp = (mach_msg_context_trailer_t *)((uint8_t *)msg + round_msg(msg->msgh_size)); + if (tp->msgh_trailer_size >= (mach_msg_size_t)sizeof(mach_msg_context_trailer_t)) { + context = (void *)(uintptr_t)tp->msgh_context; + } + + return context; +} + +/* + * Raw Mach message support + */ +boolean_t +_dispatch_machport_callback(mach_msg_header_t *msg, mach_msg_header_t *reply, + void (*callback)(mach_msg_header_t *)) +{ + mig_reply_setup(msg, reply); + ((mig_reply_error_t*)reply)->RetCode = MIG_NO_REPLY; + + callback(msg); + + return TRUE; +} + +/* + * CFMachPort compatibility + */ +boolean_t +_dispatch_CFMachPortCallBack(mach_msg_header_t *msg, mach_msg_header_t *reply, + void (*callback)(struct __CFMachPort *, void *msg, signed long size, void *)) +{ + mig_reply_setup(msg, reply); + ((mig_reply_error_t*)reply)->RetCode = MIG_NO_REPLY; + + callback(NULL, msg, msg->msgh_size, dispatch_mach_msg_get_context(msg)); + + return TRUE; +} diff --git a/src/source.c b/src/source.c new file mode 100644 index 000000000..7259b0b0d --- /dev/null +++ b/src/source.c @@ -0,0 +1,1995 @@ +/* + * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include "internal.h" +#include "protocol.h" +#include "protocolServer.h" +#include + +#define DISPATCH_EVFILT_TIMER (-EVFILT_SYSCOUNT - 1) +#define DISPATCH_EVFILT_CUSTOM_ADD (-EVFILT_SYSCOUNT - 2) +#define DISPATCH_EVFILT_CUSTOM_OR (-EVFILT_SYSCOUNT - 3) +#define DISPATCH_EVFILT_SYSCOUNT (EVFILT_SYSCOUNT + 3) + +#define DISPATCH_TIMER_INDEX_WALL 0 +#define DISPATCH_TIMER_INDEX_MACH 1 +static struct dispatch_kevent_s _dispatch_kevent_timer[] = { + { + .dk_kevent = { + .ident = DISPATCH_TIMER_INDEX_WALL, + .filter = DISPATCH_EVFILT_TIMER, + .udata = &_dispatch_kevent_timer[0], + }, + .dk_sources = TAILQ_HEAD_INITIALIZER(_dispatch_kevent_timer[0].dk_sources), + }, + { + .dk_kevent = { + .ident = DISPATCH_TIMER_INDEX_MACH, + .filter = DISPATCH_EVFILT_TIMER, + .udata = &_dispatch_kevent_timer[1], + }, + .dk_sources = TAILQ_HEAD_INITIALIZER(_dispatch_kevent_timer[1].dk_sources), + }, +}; +#define DISPATCH_TIMER_COUNT (sizeof _dispatch_kevent_timer / sizeof _dispatch_kevent_timer[0]) + +static struct dispatch_kevent_s _dispatch_kevent_data_or = { + .dk_kevent = { + .filter = DISPATCH_EVFILT_CUSTOM_OR, + .flags = EV_CLEAR, + .udata = &_dispatch_kevent_data_or, + }, + .dk_sources = TAILQ_HEAD_INITIALIZER(_dispatch_kevent_data_or.dk_sources), +}; +static struct dispatch_kevent_s _dispatch_kevent_data_add = { + .dk_kevent = { + .filter = DISPATCH_EVFILT_CUSTOM_ADD, + .udata = &_dispatch_kevent_data_add, + }, + .dk_sources = TAILQ_HEAD_INITIALIZER(_dispatch_kevent_data_add.dk_sources), +}; + +#ifndef DISPATCH_NO_LEGACY +struct dispatch_source_attr_vtable_s { + DISPATCH_VTABLE_HEADER(dispatch_source_attr_s); +}; + +struct dispatch_source_attr_s { + DISPATCH_STRUCT_HEADER(dispatch_source_attr_s, dispatch_source_attr_vtable_s); + void* finalizer_ctxt; + dispatch_source_finalizer_function_t finalizer_func; + void* context; +}; +#endif /* DISPATCH_NO_LEGACY */ + +#define _dispatch_source_call_block ((void *)-1) +static void _dispatch_source_latch_and_call(dispatch_source_t ds); +static void _dispatch_source_cancel_callout(dispatch_source_t ds); +static bool _dispatch_source_probe(dispatch_source_t ds); +static void _dispatch_source_dispose(dispatch_source_t ds); +static void _dispatch_source_merge_kevent(dispatch_source_t ds, const struct kevent *ke); +static size_t _dispatch_source_debug(dispatch_source_t ds, char* buf, size_t bufsiz); +static size_t dispatch_source_debug_attr(dispatch_source_t ds, char* buf, size_t bufsiz); +static dispatch_queue_t _dispatch_source_invoke(dispatch_source_t ds); + +static void _dispatch_kevent_merge(dispatch_source_t ds); +static void _dispatch_kevent_release(dispatch_source_t ds); +static void _dispatch_kevent_resume(dispatch_kevent_t dk, uint32_t new_flags, uint32_t del_flags); +static void _dispatch_kevent_machport_resume(dispatch_kevent_t dk, uint32_t new_flags, uint32_t del_flags); +static void _dispatch_kevent_machport_enable(dispatch_kevent_t dk); +static void _dispatch_kevent_machport_disable(dispatch_kevent_t dk); + +static void _dispatch_drain_mach_messages(struct kevent *ke); +static void _dispatch_timer_list_update(dispatch_source_t ds); + +static void +_dispatch_mach_notify_source_init(void *context __attribute__((unused))); + +static const char * +_evfiltstr(short filt) +{ + switch (filt) { +#define _evfilt2(f) case (f): return #f + _evfilt2(EVFILT_READ); + _evfilt2(EVFILT_WRITE); + _evfilt2(EVFILT_AIO); + _evfilt2(EVFILT_VNODE); + _evfilt2(EVFILT_PROC); + _evfilt2(EVFILT_SIGNAL); + _evfilt2(EVFILT_TIMER); + _evfilt2(EVFILT_MACHPORT); + _evfilt2(EVFILT_FS); + _evfilt2(EVFILT_USER); + _evfilt2(EVFILT_SESSION); + + _evfilt2(DISPATCH_EVFILT_TIMER); + _evfilt2(DISPATCH_EVFILT_CUSTOM_ADD); + _evfilt2(DISPATCH_EVFILT_CUSTOM_OR); + default: + return "EVFILT_missing"; + } +} + +#define DSL_HASH_SIZE 256u // must be a power of two +#define DSL_HASH(x) ((x) & (DSL_HASH_SIZE - 1)) + +static TAILQ_HEAD(, dispatch_kevent_s) _dispatch_sources[DSL_HASH_SIZE]; + +static dispatch_kevent_t +_dispatch_kevent_find(uintptr_t ident, short filter) +{ + uintptr_t hash = DSL_HASH(filter == EVFILT_MACHPORT ? MACH_PORT_INDEX(ident) : ident); + dispatch_kevent_t dki; + + TAILQ_FOREACH(dki, &_dispatch_sources[hash], dk_list) { + if (dki->dk_kevent.ident == ident && dki->dk_kevent.filter == filter) { + break; + } + } + return dki; +} + +static void +_dispatch_kevent_insert(dispatch_kevent_t dk) +{ + uintptr_t ident = dk->dk_kevent.ident; + uintptr_t hash = DSL_HASH(dk->dk_kevent.filter == EVFILT_MACHPORT ? MACH_PORT_INDEX(ident) : ident); + + TAILQ_INSERT_TAIL(&_dispatch_sources[hash], dk, dk_list); +} + +void +dispatch_source_cancel(dispatch_source_t ds) +{ +#if DISPATCH_DEBUG + dispatch_debug(ds, __FUNCTION__); +#endif + dispatch_atomic_or(&ds->ds_atomic_flags, DSF_CANCELED); + _dispatch_wakeup(ds); +} + +#ifndef DISPATCH_NO_LEGACY +void +_dispatch_source_legacy_xref_release(dispatch_source_t ds) +{ + if (ds->ds_is_legacy) { + if (!(ds->ds_timer.flags & DISPATCH_TIMER_ONESHOT)) { + dispatch_source_cancel(ds); + } + + // Clients often leave sources suspended at the last release + dispatch_atomic_and(&ds->do_suspend_cnt, DISPATCH_OBJECT_SUSPEND_LOCK); + } else if (slowpath(DISPATCH_OBJECT_SUSPENDED(ds))) { + // Arguments for and against this assert are within 6705399 + DISPATCH_CLIENT_CRASH("Release of a suspended object"); + } + _dispatch_wakeup(ds); + _dispatch_release(ds); +} +#endif /* DISPATCH_NO_LEGACY */ + +long +dispatch_source_testcancel(dispatch_source_t ds) +{ + return (bool)(ds->ds_atomic_flags & DSF_CANCELED); +} + + +unsigned long +dispatch_source_get_mask(dispatch_source_t ds) +{ + return ds->ds_pending_data_mask; +} + +uintptr_t +dispatch_source_get_handle(dispatch_source_t ds) +{ + return (int)ds->ds_ident_hack; +} + +unsigned long +dispatch_source_get_data(dispatch_source_t ds) +{ + return ds->ds_data; +} + +#if DISPATCH_DEBUG +void +dispatch_debug_kevents(struct kevent* kev, size_t count, const char* str) +{ + size_t i; + for (i = 0; i < count; ++i) { + _dispatch_log("kevent[%lu] = { ident = %p, filter = %s, flags = 0x%x, fflags = 0x%x, data = %p, udata = %p }: %s", + i, (void*)kev[i].ident, _evfiltstr(kev[i].filter), kev[i].flags, kev[i].fflags, (void*)kev[i].data, (void*)kev[i].udata, str); + } +} +#endif + +static size_t +_dispatch_source_kevent_debug(dispatch_source_t ds, char* buf, size_t bufsiz) +{ + size_t offset = _dispatch_source_debug(ds, buf, bufsiz); + offset += snprintf(&buf[offset], bufsiz - offset, "filter = %s }", + ds->ds_dkev ? _evfiltstr(ds->ds_dkev->dk_kevent.filter) : "????"); + return offset; +} + +static void +_dispatch_source_init_tail_queue_array(void *context __attribute__((unused))) +{ + unsigned int i; + for (i = 0; i < DSL_HASH_SIZE; i++) { + TAILQ_INIT(&_dispatch_sources[i]); + } + + TAILQ_INSERT_TAIL(&_dispatch_sources[DSL_HASH(DISPATCH_TIMER_INDEX_WALL)], &_dispatch_kevent_timer[DISPATCH_TIMER_INDEX_WALL], dk_list); + TAILQ_INSERT_TAIL(&_dispatch_sources[DSL_HASH(DISPATCH_TIMER_INDEX_MACH)], &_dispatch_kevent_timer[DISPATCH_TIMER_INDEX_MACH], dk_list); + TAILQ_INSERT_TAIL(&_dispatch_sources[0], &_dispatch_kevent_data_or, dk_list); + TAILQ_INSERT_TAIL(&_dispatch_sources[0], &_dispatch_kevent_data_add, dk_list); +} + +// Find existing kevents, and merge any new flags if necessary +void +_dispatch_kevent_merge(dispatch_source_t ds) +{ + static dispatch_once_t pred; + dispatch_kevent_t dk; + typeof(dk->dk_kevent.fflags) new_flags; + bool do_resume = false; + + if (ds->ds_is_installed) { + return; + } + ds->ds_is_installed = true; + + dispatch_once_f(&pred, NULL, _dispatch_source_init_tail_queue_array); + + dk = _dispatch_kevent_find(ds->ds_dkev->dk_kevent.ident, ds->ds_dkev->dk_kevent.filter); + + if (dk) { + // If an existing dispatch kevent is found, check to see if new flags + // need to be added to the existing kevent + new_flags = ~dk->dk_kevent.fflags & ds->ds_dkev->dk_kevent.fflags; + dk->dk_kevent.fflags |= ds->ds_dkev->dk_kevent.fflags; + free(ds->ds_dkev); + ds->ds_dkev = dk; + do_resume = new_flags; + } else { + dk = ds->ds_dkev; + _dispatch_kevent_insert(dk); + new_flags = dk->dk_kevent.fflags; + do_resume = true; + } + + TAILQ_INSERT_TAIL(&dk->dk_sources, ds, ds_list); + + // Re-register the kevent with the kernel if new flags were added + // by the dispatch kevent + if (do_resume) { + dk->dk_kevent.flags |= EV_ADD; + _dispatch_kevent_resume(ds->ds_dkev, new_flags, 0); + ds->ds_is_armed = true; + } +} + + +void +_dispatch_kevent_resume(dispatch_kevent_t dk, uint32_t new_flags, uint32_t del_flags) +{ + switch (dk->dk_kevent.filter) { + case DISPATCH_EVFILT_TIMER: + case DISPATCH_EVFILT_CUSTOM_ADD: + case DISPATCH_EVFILT_CUSTOM_OR: + // these types not registered with kevent + return; + case EVFILT_MACHPORT: + _dispatch_kevent_machport_resume(dk, new_flags, del_flags); + break; + case EVFILT_PROC: + if (dk->dk_kevent.flags & EV_ONESHOT) { + return; + } + // fall through + default: + _dispatch_update_kq(&dk->dk_kevent); + if (dk->dk_kevent.flags & EV_DISPATCH) { + dk->dk_kevent.flags &= ~EV_ADD; + } + break; + } +} + +dispatch_queue_t +_dispatch_source_invoke(dispatch_source_t ds) +{ + // This function performs all source actions. Each action is responsible + // for verifying that it takes place on the appropriate queue. If the + // current queue is not the correct queue for this action, the correct queue + // will be returned and the invoke will be re-driven on that queue. + + // The order of tests here in invoke and in probe should be consistent. + + dispatch_queue_t dq = _dispatch_queue_get_current(); + + if (!ds->ds_is_installed) { + // The source needs to be installed on the manager queue. + if (dq != &_dispatch_mgr_q) { + return &_dispatch_mgr_q; + } + _dispatch_kevent_merge(ds); + } else if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == 0)) { + // The source has been cancelled and needs to be uninstalled from the + // manager queue. After uninstallation, the cancellation handler needs + // to be delivered to the target queue. + if (ds->ds_dkev) { + if (dq != &_dispatch_mgr_q) { + return &_dispatch_mgr_q; + } + _dispatch_kevent_release(ds); + return ds->do_targetq; + } else if (ds->ds_cancel_handler) { + if (dq != ds->do_targetq) { + return ds->do_targetq; + } + } + _dispatch_source_cancel_callout(ds); + } else if (ds->ds_pending_data) { + // The source has pending data to deliver via the event handler callback + // on the target queue. Some sources need to be rearmed on the manager + // queue after event delivery. + if (dq != ds->do_targetq) { + return ds->do_targetq; + } + _dispatch_source_latch_and_call(ds); + if (ds->ds_needs_rearm) { + return &_dispatch_mgr_q; + } + } else if (ds->ds_needs_rearm && !ds->ds_is_armed) { + // The source needs to be rearmed on the manager queue. + if (dq != &_dispatch_mgr_q) { + return &_dispatch_mgr_q; + } + _dispatch_kevent_resume(ds->ds_dkev, 0, 0); + ds->ds_is_armed = true; + } + + return NULL; +} + +bool +_dispatch_source_probe(dispatch_source_t ds) +{ + // This function determines whether the source needs to be invoked. + // The order of tests here in probe and in invoke should be consistent. + + if (!ds->ds_is_installed) { + // The source needs to be installed on the manager queue. + return true; + } else if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == 0)) { + // The source needs to be uninstalled from the manager queue, or the + // cancellation handler needs to be delivered to the target queue. + // Note: cancellation assumes installation. + if (ds->ds_dkev || ds->ds_cancel_handler) { + return true; + } + } else if (ds->ds_pending_data) { + // The source has pending data to deliver to the target queue. + return true; + } else if (ds->ds_needs_rearm && !ds->ds_is_armed) { + // The source needs to be rearmed on the manager queue. + return true; + } + // Nothing to do. + return false; +} + +void +_dispatch_source_dispose(dispatch_source_t ds) +{ + _dispatch_queue_dispose((dispatch_queue_t)ds); +} + +static void +_dispatch_kevent_debugger2(void *context, dispatch_source_t unused __attribute__((unused))) +{ + struct sockaddr sa; + socklen_t sa_len = sizeof(sa); + int c, fd = (int)(long)context; + unsigned int i; + dispatch_kevent_t dk; + dispatch_source_t ds; + FILE *debug_stream; + + c = accept(fd, &sa, &sa_len); + if (c == -1) { + if (errno != EAGAIN) { + dispatch_assume_zero(errno); + } + return; + } +#if 0 + int r = fcntl(c, F_SETFL, 0); // disable non-blocking IO + if (r == -1) { + dispatch_assume_zero(errno); + } +#endif + debug_stream = fdopen(c, "a"); + if (!dispatch_assume(debug_stream)) { + close(c); + return; + } + + fprintf(debug_stream, "HTTP/1.0 200 OK\r\n"); + fprintf(debug_stream, "Content-type: text/html\r\n"); + fprintf(debug_stream, "Pragma: nocache\r\n"); + fprintf(debug_stream, "\r\n"); + fprintf(debug_stream, "\nPID %u\n\n

    \n", getpid()); + + //fprintf(debug_stream, "DKDKDKDKDKDKDK\n"); + + for (i = 0; i < DSL_HASH_SIZE; i++) { + if (TAILQ_EMPTY(&_dispatch_sources[i])) { + continue; + } + TAILQ_FOREACH(dk, &_dispatch_sources[i], dk_list) { + fprintf(debug_stream, "\t
  • DK %p ident %lu filter %s flags 0x%hx fflags 0x%x data 0x%lx udata %p\n", + dk, dk->dk_kevent.ident, _evfiltstr(dk->dk_kevent.filter), dk->dk_kevent.flags, + dk->dk_kevent.fflags, dk->dk_kevent.data, dk->dk_kevent.udata); + fprintf(debug_stream, "\t\t
      \n"); + TAILQ_FOREACH(ds, &dk->dk_sources, ds_list) { + fprintf(debug_stream, "\t\t\t
    • DS %p refcnt 0x%x suspend 0x%x data 0x%lx mask 0x%lx flags 0x%x
    • \n", + ds, ds->do_ref_cnt, ds->do_suspend_cnt, ds->ds_pending_data, ds->ds_pending_data_mask, + ds->ds_atomic_flags); + if (ds->do_suspend_cnt == DISPATCH_OBJECT_SUSPEND_LOCK) { + dispatch_queue_t dq = ds->do_targetq; + fprintf(debug_stream, "\t\t
      DQ: %p refcnt 0x%x suspend 0x%x label: %s\n", dq, dq->do_ref_cnt, dq->do_suspend_cnt, dq->dq_label); + } + } + fprintf(debug_stream, "\t\t
    \n"); + fprintf(debug_stream, "\t
  • \n"); + } + } + fprintf(debug_stream, "
\n\n\n"); + fflush(debug_stream); + fclose(debug_stream); +} + +static void +_dispatch_kevent_debugger(void *context __attribute__((unused))) +{ + union { + struct sockaddr_in sa_in; + struct sockaddr sa; + } sa_u = { + .sa_in = { + .sin_family = AF_INET, + .sin_addr = { htonl(INADDR_LOOPBACK), }, + }, + }; + dispatch_source_t ds; + const char *valstr; + int val, r, fd, sock_opt = 1; + socklen_t slen = sizeof(sa_u); + + if (issetugid()) { + return; + } + valstr = getenv("LIBDISPATCH_DEBUGGER"); + if (!valstr) { + return; + } + val = atoi(valstr); + if (val == 2) { + sa_u.sa_in.sin_addr.s_addr = 0; + } + fd = socket(PF_INET, SOCK_STREAM, 0); + if (fd == -1) { + dispatch_assume_zero(errno); + return; + } + r = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (void *)&sock_opt, (socklen_t) sizeof sock_opt); + if (r == -1) { + dispatch_assume_zero(errno); + goto out_bad; + } +#if 0 + r = fcntl(fd, F_SETFL, O_NONBLOCK); + if (r == -1) { + dispatch_assume_zero(errno); + goto out_bad; + } +#endif + r = bind(fd, &sa_u.sa, sizeof(sa_u)); + if (r == -1) { + dispatch_assume_zero(errno); + goto out_bad; + } + r = listen(fd, SOMAXCONN); + if (r == -1) { + dispatch_assume_zero(errno); + goto out_bad; + } + r = getsockname(fd, &sa_u.sa, &slen); + if (r == -1) { + dispatch_assume_zero(errno); + goto out_bad; + } + ds = dispatch_source_read_create_f(fd, NULL, &_dispatch_mgr_q, (void *)(long)fd, _dispatch_kevent_debugger2); + if (dispatch_assume(ds)) { + _dispatch_log("LIBDISPATCH: debug port: %hu", ntohs(sa_u.sa_in.sin_port)); + return; + } +out_bad: + close(fd); +} + +void +_dispatch_source_drain_kevent(struct kevent *ke) +{ + static dispatch_once_t pred; + dispatch_kevent_t dk = ke->udata; + dispatch_source_t dsi; + + dispatch_once_f(&pred, NULL, _dispatch_kevent_debugger); + + dispatch_debug_kevents(ke, 1, __func__); + + if (ke->filter == EVFILT_MACHPORT) { + return _dispatch_drain_mach_messages(ke); + } + dispatch_assert(dk); + + if (ke->flags & EV_ONESHOT) { + dk->dk_kevent.flags |= EV_ONESHOT; + } + + TAILQ_FOREACH(dsi, &dk->dk_sources, ds_list) { + _dispatch_source_merge_kevent(dsi, ke); + } +} + +static void +_dispatch_kevent_dispose(dispatch_kevent_t dk) +{ + uintptr_t key; + + switch (dk->dk_kevent.filter) { + case DISPATCH_EVFILT_TIMER: + case DISPATCH_EVFILT_CUSTOM_ADD: + case DISPATCH_EVFILT_CUSTOM_OR: + // these sources live on statically allocated lists + return; + case EVFILT_MACHPORT: + _dispatch_kevent_machport_resume(dk, 0, dk->dk_kevent.fflags); + break; + case EVFILT_PROC: + if (dk->dk_kevent.flags & EV_ONESHOT) { + break; // implicitly deleted + } + // fall through + default: + if (~dk->dk_kevent.flags & EV_DELETE) { + dk->dk_kevent.flags |= EV_DELETE; + _dispatch_update_kq(&dk->dk_kevent); + } + break; + } + + if (dk->dk_kevent.filter == EVFILT_MACHPORT) { + key = MACH_PORT_INDEX(dk->dk_kevent.ident); + } else { + key = dk->dk_kevent.ident; + } + + TAILQ_REMOVE(&_dispatch_sources[DSL_HASH(key)], dk, dk_list); + free(dk); +} + +void +_dispatch_kevent_release(dispatch_source_t ds) +{ + dispatch_kevent_t dk = ds->ds_dkev; + dispatch_source_t dsi; + uint32_t del_flags, fflags = 0; + + ds->ds_dkev = NULL; + + TAILQ_REMOVE(&dk->dk_sources, ds, ds_list); + + if (TAILQ_EMPTY(&dk->dk_sources)) { + _dispatch_kevent_dispose(dk); + } else { + TAILQ_FOREACH(dsi, &dk->dk_sources, ds_list) { + fflags |= (uint32_t)dsi->ds_pending_data_mask; + } + del_flags = (uint32_t)ds->ds_pending_data_mask & ~fflags; + if (del_flags) { + dk->dk_kevent.flags |= EV_ADD; + dk->dk_kevent.fflags = fflags; + _dispatch_kevent_resume(dk, 0, del_flags); + } + } + + ds->ds_is_armed = false; + ds->ds_needs_rearm = false; // re-arm is pointless and bad now + _dispatch_release(ds); // the retain is done at creation time +} + +void +_dispatch_source_merge_kevent(dispatch_source_t ds, const struct kevent *ke) +{ + struct kevent fake; + + if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == 0)) { + return; + } + + // EVFILT_PROC may fail with ESRCH when the process exists but is a zombie. + // We simulate an exit event in this case. + if (ke->flags & EV_ERROR) { + if (ke->filter == EVFILT_PROC && ke->data == ESRCH) { + fake = *ke; + fake.flags &= ~EV_ERROR; + fake.fflags = NOTE_EXIT; + fake.data = 0; + ke = &fake; + } else { + // log the unexpected error + dispatch_assume_zero(ke->data); + return; + } + } + + if (ds->ds_is_level) { + // ke->data is signed and "negative available data" makes no sense + // zero bytes happens when EV_EOF is set + // 10A268 does not fail this assert with EVFILT_READ and a 10 GB file + dispatch_assert(ke->data >= 0l); + ds->ds_pending_data = ~ke->data; + } else if (ds->ds_is_adder) { + dispatch_atomic_add(&ds->ds_pending_data, ke->data); + } else { + dispatch_atomic_or(&ds->ds_pending_data, ke->fflags & ds->ds_pending_data_mask); + } + + // EV_DISPATCH and EV_ONESHOT sources are no longer armed after delivery + if (ds->ds_needs_rearm) { + ds->ds_is_armed = false; + } + + _dispatch_wakeup(ds); +} + +void +_dispatch_source_latch_and_call(dispatch_source_t ds) +{ + unsigned long prev; + + if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == 0)) { + return; + } + prev = dispatch_atomic_xchg(&ds->ds_pending_data, 0); + if (ds->ds_is_level) { + ds->ds_data = ~prev; + } else { + ds->ds_data = prev; + } + if (dispatch_assume(prev)) { + if (ds->ds_handler_func) { + ds->ds_handler_func(ds->ds_handler_ctxt, ds); + } + } +} + +void +_dispatch_source_cancel_callout(dispatch_source_t ds) +{ + ds->ds_pending_data_mask = 0; + ds->ds_pending_data = 0; + ds->ds_data = 0; + +#ifdef __BLOCKS__ + if (ds->ds_handler_is_block) { + Block_release(ds->ds_handler_ctxt); + ds->ds_handler_is_block = false; + ds->ds_handler_func = NULL; + ds->ds_handler_ctxt = NULL; + } +#endif + + if (!ds->ds_cancel_handler) { + return; + } + if (ds->ds_cancel_is_block) { +#ifdef __BLOCKS__ + dispatch_block_t b = ds->ds_cancel_handler; + if (ds->ds_atomic_flags & DSF_CANCELED) { + b(); + } + Block_release(ds->ds_cancel_handler); + ds->ds_cancel_is_block = false; +#endif + } else { + dispatch_function_t f = ds->ds_cancel_handler; + if (ds->ds_atomic_flags & DSF_CANCELED) { + f(ds->do_ctxt); + } + } + ds->ds_cancel_handler = NULL; +} + +const struct dispatch_source_vtable_s _dispatch_source_kevent_vtable = { + .do_type = DISPATCH_SOURCE_KEVENT_TYPE, + .do_kind = "kevent-source", + .do_invoke = _dispatch_source_invoke, + .do_dispose = _dispatch_source_dispose, + .do_probe = _dispatch_source_probe, + .do_debug = _dispatch_source_kevent_debug, +}; + +void +dispatch_source_merge_data(dispatch_source_t ds, unsigned long val) +{ + struct kevent kev = { + .fflags = (typeof(kev.fflags))val, + .data = val, + }; + + dispatch_assert(ds->ds_dkev->dk_kevent.filter == DISPATCH_EVFILT_CUSTOM_ADD || + ds->ds_dkev->dk_kevent.filter == DISPATCH_EVFILT_CUSTOM_OR); + + _dispatch_source_merge_kevent(ds, &kev); +} + +size_t +dispatch_source_debug_attr(dispatch_source_t ds, char* buf, size_t bufsiz) +{ + dispatch_queue_t target = ds->do_targetq; + return snprintf(buf, bufsiz, + "target = %s[%p], pending_data = 0x%lx, pending_data_mask = 0x%lx, ", + target ? target->dq_label : "", target, + ds->ds_pending_data, ds->ds_pending_data_mask); +} + +size_t +_dispatch_source_debug(dispatch_source_t ds, char* buf, size_t bufsiz) +{ + size_t offset = 0; + offset += snprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", dx_kind(ds), ds); + offset += dispatch_object_debug_attr(ds, &buf[offset], bufsiz - offset); + offset += dispatch_source_debug_attr(ds, &buf[offset], bufsiz - offset); + return offset; +} + +#ifndef DISPATCH_NO_LEGACY +static void +dispatch_source_attr_dispose(dispatch_source_attr_t attr) +{ + // release the finalizer block if necessary + dispatch_source_attr_set_finalizer(attr, NULL); + _dispatch_dispose(attr); +} + +static const struct dispatch_source_attr_vtable_s dispatch_source_attr_vtable = { + .do_type = DISPATCH_SOURCE_ATTR_TYPE, + .do_kind = "source-attr", + .do_dispose = dispatch_source_attr_dispose, +}; + +dispatch_source_attr_t +dispatch_source_attr_create(void) +{ + dispatch_source_attr_t rval = calloc(1, sizeof(struct dispatch_source_attr_s)); + + if (rval) { + rval->do_vtable = &dispatch_source_attr_vtable; + rval->do_next = DISPATCH_OBJECT_LISTLESS; + rval->do_targetq = dispatch_get_global_queue(0, 0); + rval->do_ref_cnt = 1; + rval->do_xref_cnt = 1; + } + + return rval; +} + +void +dispatch_source_attr_set_finalizer_f(dispatch_source_attr_t attr, + void *context, dispatch_source_finalizer_function_t finalizer) +{ +#ifdef __BLOCKS__ + if (attr->finalizer_func == (void*)_dispatch_call_block_and_release2) { + Block_release(attr->finalizer_ctxt); + } +#endif + + attr->finalizer_ctxt = context; + attr->finalizer_func = finalizer; +} + +#ifdef __BLOCKS__ +long +dispatch_source_attr_set_finalizer(dispatch_source_attr_t attr, + dispatch_source_finalizer_t finalizer) +{ + void *ctxt; + dispatch_source_finalizer_function_t func; + + if (finalizer) { + if (!(ctxt = Block_copy(finalizer))) { + return 1; + } + func = (void *)_dispatch_call_block_and_release2; + } else { + ctxt = NULL; + func = NULL; + } + + dispatch_source_attr_set_finalizer_f(attr, ctxt, func); + + return 0; +} + +dispatch_source_finalizer_t +dispatch_source_attr_get_finalizer(dispatch_source_attr_t attr) +{ + if (attr->finalizer_func == (void*)_dispatch_call_block_and_release2) { + return (dispatch_source_finalizer_t)attr->finalizer_ctxt; + } else if (attr->finalizer_func == NULL) { + return NULL; + } else { + abort(); // finalizer is not a block... + } +} +#endif + +void +dispatch_source_attr_set_context(dispatch_source_attr_t attr, void *context) +{ + attr->context = context; +} + +dispatch_source_attr_t +dispatch_source_attr_copy(dispatch_source_attr_t proto) +{ + dispatch_source_attr_t rval = NULL; + + if (proto && (rval = malloc(sizeof(struct dispatch_source_attr_s)))) { + memcpy(rval, proto, sizeof(struct dispatch_source_attr_s)); +#ifdef __BLOCKS__ + if (rval->finalizer_func == (void*)_dispatch_call_block_and_release2) { + rval->finalizer_ctxt = Block_copy(rval->finalizer_ctxt); + } +#endif + } else if (!proto) { + rval = dispatch_source_attr_create(); + } + return rval; +} +#endif /* DISPATCH_NO_LEGACY */ + + +struct dispatch_source_type_s { + struct kevent ke; + uint64_t mask; +}; + +const struct dispatch_source_type_s _dispatch_source_type_timer = { + .ke = { + .filter = DISPATCH_EVFILT_TIMER, + }, + .mask = DISPATCH_TIMER_INTERVAL|DISPATCH_TIMER_ONESHOT|DISPATCH_TIMER_ABSOLUTE|DISPATCH_TIMER_WALL_CLOCK, +}; + +const struct dispatch_source_type_s _dispatch_source_type_read = { + .ke = { + .filter = EVFILT_READ, + .flags = EV_DISPATCH, + }, +}; + +const struct dispatch_source_type_s _dispatch_source_type_write = { + .ke = { + .filter = EVFILT_WRITE, + .flags = EV_DISPATCH, + }, +}; + +const struct dispatch_source_type_s _dispatch_source_type_proc = { + .ke = { + .filter = EVFILT_PROC, + .flags = EV_CLEAR, + }, + .mask = NOTE_EXIT|NOTE_FORK|NOTE_EXEC|NOTE_SIGNAL|NOTE_REAP, +}; + +const struct dispatch_source_type_s _dispatch_source_type_signal = { + .ke = { + .filter = EVFILT_SIGNAL, + }, +}; + +const struct dispatch_source_type_s _dispatch_source_type_vnode = { + .ke = { + .filter = EVFILT_VNODE, + .flags = EV_CLEAR, + }, + .mask = NOTE_DELETE|NOTE_WRITE|NOTE_EXTEND|NOTE_ATTRIB|NOTE_LINK|NOTE_RENAME|NOTE_REVOKE|NOTE_NONE, +}; + +const struct dispatch_source_type_s _dispatch_source_type_vfs = { + .ke = { + .filter = EVFILT_FS, + .flags = EV_CLEAR, + }, + .mask = VQ_NOTRESP|VQ_NEEDAUTH|VQ_LOWDISK|VQ_MOUNT|VQ_UNMOUNT|VQ_DEAD|VQ_ASSIST|VQ_NOTRESPLOCK|VQ_UPDATE|VQ_VERYLOWDISK, +}; + +const struct dispatch_source_type_s _dispatch_source_type_mach_send = { + .ke = { + .filter = EVFILT_MACHPORT, + .flags = EV_DISPATCH, + .fflags = DISPATCH_MACHPORT_DEAD, + }, + .mask = DISPATCH_MACH_SEND_DEAD, +}; + +const struct dispatch_source_type_s _dispatch_source_type_mach_recv = { + .ke = { + .filter = EVFILT_MACHPORT, + .flags = EV_DISPATCH, + .fflags = DISPATCH_MACHPORT_RECV, + }, +}; + +const struct dispatch_source_type_s _dispatch_source_type_data_add = { + .ke = { + .filter = DISPATCH_EVFILT_CUSTOM_ADD, + }, +}; + +const struct dispatch_source_type_s _dispatch_source_type_data_or = { + .ke = { + .filter = DISPATCH_EVFILT_CUSTOM_OR, + .flags = EV_CLEAR, + .fflags = ~0, + }, +}; + +dispatch_source_t +dispatch_source_create(dispatch_source_type_t type, + uintptr_t handle, + unsigned long mask, + dispatch_queue_t q) +{ + const struct kevent *proto_kev = &type->ke; + dispatch_source_t ds = NULL; + dispatch_kevent_t dk = NULL; + + // input validation + if (type == NULL || (mask & ~type->mask)) { + goto out_bad; + } + + switch (type->ke.filter) { + case EVFILT_SIGNAL: + if (handle >= NSIG) { + goto out_bad; + } + break; + case EVFILT_FS: + case DISPATCH_EVFILT_CUSTOM_ADD: + case DISPATCH_EVFILT_CUSTOM_OR: + case DISPATCH_EVFILT_TIMER: + if (handle) { + goto out_bad; + } + break; + default: + break; + } + + ds = calloc(1ul, sizeof(struct dispatch_source_s)); + if (slowpath(!ds)) { + goto out_bad; + } + dk = calloc(1ul, sizeof(struct dispatch_kevent_s)); + if (slowpath(!dk)) { + goto out_bad; + } + + dk->dk_kevent = *proto_kev; + dk->dk_kevent.ident = handle; + dk->dk_kevent.flags |= EV_ADD|EV_ENABLE; + dk->dk_kevent.fflags |= (uint32_t)mask; + dk->dk_kevent.udata = dk; + TAILQ_INIT(&dk->dk_sources); + + // Initialize as a queue first, then override some settings below. + _dispatch_queue_init((dispatch_queue_t)ds); + strlcpy(ds->dq_label, "source", sizeof(ds->dq_label)); + + // Dispatch Object + ds->do_vtable = &_dispatch_source_kevent_vtable; + ds->do_ref_cnt++; // the reference the manger queue holds + ds->do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_INTERVAL; + // do_targetq will be retained below, past point of no-return + ds->do_targetq = q; + + // Dispatch Source + ds->ds_ident_hack = dk->dk_kevent.ident; + ds->ds_dkev = dk; + ds->ds_pending_data_mask = dk->dk_kevent.fflags; + if ((EV_DISPATCH|EV_ONESHOT) & proto_kev->flags) { + if (proto_kev->filter != EVFILT_MACHPORT) { + ds->ds_is_level = true; + } + ds->ds_needs_rearm = true; + } else if (!(EV_CLEAR & proto_kev->flags)) { + // we cheat and use EV_CLEAR to mean a "flag thingy" + ds->ds_is_adder = true; + } + + // If its a timer source, it needs to be re-armed + if (type->ke.filter == DISPATCH_EVFILT_TIMER) { + ds->ds_needs_rearm = true; + } + + dispatch_assert(!(ds->ds_is_level && ds->ds_is_adder)); +#if DISPATCH_DEBUG + dispatch_debug(ds, __FUNCTION__); +#endif + + // Some sources require special processing + if (type == DISPATCH_SOURCE_TYPE_MACH_SEND) { + static dispatch_once_t pred; + dispatch_once_f(&pred, NULL, _dispatch_mach_notify_source_init); + } else if (type == DISPATCH_SOURCE_TYPE_TIMER) { + ds->ds_timer.flags = mask; + } + + _dispatch_retain(ds->do_targetq); + return ds; + +out_bad: + free(ds); + free(dk); + return NULL; +} + +// 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol +static void +_dispatch_source_set_event_handler2(void *context) +{ + struct Block_layout *bl = context; + + dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current(); + dispatch_assert(ds->do_vtable == &_dispatch_source_kevent_vtable); + + if (ds->ds_handler_is_block && ds->ds_handler_ctxt) { + Block_release(ds->ds_handler_ctxt); + } + ds->ds_handler_func = bl ? (void *)bl->invoke : NULL; + ds->ds_handler_ctxt = bl; + ds->ds_handler_is_block = true; +} + +void +dispatch_source_set_event_handler(dispatch_source_t ds, dispatch_block_t handler) +{ + dispatch_assert(!ds->ds_is_legacy); + handler = _dispatch_Block_copy(handler); + dispatch_barrier_async_f((dispatch_queue_t)ds, + handler, _dispatch_source_set_event_handler2); +} + +static void +_dispatch_source_set_event_handler_f(void *context) +{ + dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current(); + dispatch_assert(ds->do_vtable == &_dispatch_source_kevent_vtable); + + if (ds->ds_handler_is_block && ds->ds_handler_ctxt) { + Block_release(ds->ds_handler_ctxt); + } + ds->ds_handler_func = context; + ds->ds_handler_ctxt = ds->do_ctxt; + ds->ds_handler_is_block = false; +} + +void +dispatch_source_set_event_handler_f(dispatch_source_t ds, + dispatch_function_t handler) +{ + dispatch_assert(!ds->ds_is_legacy); + dispatch_barrier_async_f((dispatch_queue_t)ds, + handler, _dispatch_source_set_event_handler_f); +} + +// 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol +static void +_dispatch_source_set_cancel_handler2(void *context) +{ + dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current(); + dispatch_assert(ds->do_vtable == &_dispatch_source_kevent_vtable); + + if (ds->ds_cancel_is_block && ds->ds_cancel_handler) { + Block_release(ds->ds_cancel_handler); + } + ds->ds_cancel_handler = context; + ds->ds_cancel_is_block = true; +} + +void +dispatch_source_set_cancel_handler(dispatch_source_t ds, + dispatch_block_t handler) +{ + dispatch_assert(!ds->ds_is_legacy); + handler = _dispatch_Block_copy(handler); + dispatch_barrier_async_f((dispatch_queue_t)ds, + handler, _dispatch_source_set_cancel_handler2); +} + +static void +_dispatch_source_set_cancel_handler_f(void *context) +{ + dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current(); + dispatch_assert(ds->do_vtable == &_dispatch_source_kevent_vtable); + + if (ds->ds_cancel_is_block && ds->ds_cancel_handler) { + Block_release(ds->ds_cancel_handler); + } + ds->ds_cancel_handler = context; + ds->ds_cancel_is_block = false; +} + +void +dispatch_source_set_cancel_handler_f(dispatch_source_t ds, + dispatch_function_t handler) +{ + dispatch_assert(!ds->ds_is_legacy); + dispatch_barrier_async_f((dispatch_queue_t)ds, + handler, _dispatch_source_set_cancel_handler_f); +} + +#ifndef DISPATCH_NO_LEGACY +// 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol +dispatch_source_t +_dispatch_source_create2(dispatch_source_t ds, + dispatch_source_attr_t attr, + void *context, + dispatch_source_handler_function_t handler) +{ + if (ds == NULL || handler == NULL) { + return NULL; + } + + ds->ds_is_legacy = true; + + ds->ds_handler_func = handler; + ds->ds_handler_ctxt = context; + + if (attr && attr != DISPATCH_SOURCE_CREATE_SUSPENDED) { + ds->dq_finalizer_ctxt = attr->finalizer_ctxt; + ds->dq_finalizer_func = (typeof(ds->dq_finalizer_func))attr->finalizer_func; + ds->do_ctxt = attr->context; + } +#ifdef __BLOCKS__ + if (ds->dq_finalizer_func == (void*)_dispatch_call_block_and_release2) { + ds->dq_finalizer_ctxt = Block_copy(ds->dq_finalizer_ctxt); + if (!ds->dq_finalizer_ctxt) { + goto out_bad; + } + } + if (handler == _dispatch_source_call_block) { + struct Block_layout *bl = ds->ds_handler_ctxt = Block_copy(context); + if (!ds->ds_handler_ctxt) { + if (ds->dq_finalizer_func == (void*)_dispatch_call_block_and_release2) { + Block_release(ds->dq_finalizer_ctxt); + } + goto out_bad; + } + ds->ds_handler_func = (void *)bl->invoke; + ds->ds_handler_is_block = true; + } + + // all legacy sources get a cancellation event on the normal event handler. + dispatch_source_handler_function_t func = ds->ds_handler_func; + dispatch_source_handler_t block = ds->ds_handler_ctxt; + void *ctxt = ds->ds_handler_ctxt; + bool handler_is_block = ds->ds_handler_is_block; + + ds->ds_cancel_is_block = true; + if (handler_is_block) { + ds->ds_cancel_handler = _dispatch_Block_copy(^{ + block(ds); + }); + } else { + ds->ds_cancel_handler = _dispatch_Block_copy(^{ + func(ctxt, ds); + }); + } +#endif + if (attr != DISPATCH_SOURCE_CREATE_SUSPENDED) { + dispatch_resume(ds); + } + + return ds; + +out_bad: + free(ds); + return NULL; +} + +long +dispatch_source_get_error(dispatch_source_t ds, long *err_out) +{ + // 6863892 don't report ECANCELED until kevent is unregistered + if ((ds->ds_atomic_flags & DSF_CANCELED) && !ds->ds_dkev) { + if (err_out) { + *err_out = ECANCELED; + } + return DISPATCH_ERROR_DOMAIN_POSIX; + } else { + return DISPATCH_ERROR_DOMAIN_NO_ERROR; + } +} +#endif /* DISPATCH_NO_LEGACY */ + +// Updates the ordered list of timers based on next fire date for changes to ds. +// Should only be called from the context of _dispatch_mgr_q. +void +_dispatch_timer_list_update(dispatch_source_t ds) +{ + dispatch_source_t dsi = NULL; + int idx; + + dispatch_assert(_dispatch_queue_get_current() == &_dispatch_mgr_q); + + // do not reschedule timers unregistered with _dispatch_kevent_release() + if (!ds->ds_dkev) { + return; + } + + // Ensure the source is on the global kevent lists before it is removed and + // readded below. + _dispatch_kevent_merge(ds); + + TAILQ_REMOVE(&ds->ds_dkev->dk_sources, ds, ds_list); + + // change the list if the clock type has changed + if (ds->ds_timer.flags & DISPATCH_TIMER_WALL_CLOCK) { + idx = DISPATCH_TIMER_INDEX_WALL; + } else { + idx = DISPATCH_TIMER_INDEX_MACH; + } + ds->ds_dkev = &_dispatch_kevent_timer[idx]; + + if (ds->ds_timer.target) { + TAILQ_FOREACH(dsi, &ds->ds_dkev->dk_sources, ds_list) { + if (dsi->ds_timer.target == 0 || ds->ds_timer.target < dsi->ds_timer.target) { + break; + } + } + } + + if (dsi) { + TAILQ_INSERT_BEFORE(dsi, ds, ds_list); + } else { + TAILQ_INSERT_TAIL(&ds->ds_dkev->dk_sources, ds, ds_list); + } +} + +static void +_dispatch_run_timers2(unsigned int timer) +{ + dispatch_source_t ds; + uint64_t now, missed; + + if (timer == DISPATCH_TIMER_INDEX_MACH) { + now = mach_absolute_time(); + } else { + now = _dispatch_get_nanoseconds(); + } + + while ((ds = TAILQ_FIRST(&_dispatch_kevent_timer[timer].dk_sources))) { + // We may find timers on the wrong list due to a pending update from + // dispatch_source_set_timer. Force an update of the list in that case. + if (timer != ds->ds_ident_hack) { + _dispatch_timer_list_update(ds); + continue; + } + if (!ds->ds_timer.target) { + // no configured timers on the list + break; + } + if (ds->ds_timer.target > now) { + // Done running timers for now. + break; + } + + if (ds->ds_timer.flags & (DISPATCH_TIMER_ONESHOT|DISPATCH_TIMER_ABSOLUTE)) { + dispatch_atomic_inc(&ds->ds_pending_data); + ds->ds_timer.target = 0; + } else { + // Calculate number of missed intervals. + missed = (now - ds->ds_timer.target) / ds->ds_timer.interval; + dispatch_atomic_add(&ds->ds_pending_data, missed + 1); + ds->ds_timer.target += (missed + 1) * ds->ds_timer.interval; + } + + _dispatch_timer_list_update(ds); + _dispatch_wakeup(ds); + } +} + +void +_dispatch_run_timers(void) +{ + unsigned int i; + for (i = 0; i < DISPATCH_TIMER_COUNT; i++) { + _dispatch_run_timers2(i); + } +} + +#if defined(__i386__) || defined(__x86_64__) +// these architectures always return mach_absolute_time() in nanoseconds +#define _dispatch_convert_mach2nano(x) (x) +#define _dispatch_convert_nano2mach(x) (x) +#else +static mach_timebase_info_data_t tbi; +static dispatch_once_t tbi_pred; + +static void +_dispatch_convert_init(void *context __attribute__((unused))) +{ + dispatch_assume_zero(mach_timebase_info(&tbi)); +} + +static uint64_t +_dispatch_convert_mach2nano(uint64_t val) +{ +#ifdef __LP64__ + __uint128_t tmp; +#else + long double tmp; +#endif + + dispatch_once_f(&tbi_pred, NULL, _dispatch_convert_init); + + tmp = val; + tmp *= tbi.numer; + tmp /= tbi.denom; + + return tmp; +} + +static uint64_t +_dispatch_convert_nano2mach(uint64_t val) +{ +#ifdef __LP64__ + __uint128_t tmp; +#else + long double tmp; +#endif + + dispatch_once_f(&tbi_pred, NULL, _dispatch_convert_init); + + tmp = val; + tmp *= tbi.denom; + tmp /= tbi.numer; + + return tmp; +} +#endif + +// approx 1 year (60s * 60m * 24h * 365d) +#define FOREVER_SEC 3153600l +#define FOREVER_NSEC 31536000000000000ull + +struct timespec * +_dispatch_get_next_timer_fire(struct timespec *howsoon) +{ + // + // kevent(2) does not allow large timeouts, so we use a long timeout + // instead (approximately 1 year). + dispatch_source_t ds = NULL; + unsigned int timer; + uint64_t now, delta_tmp, delta = UINT64_MAX; + + // We are looking for the first unsuspended timer which has its target + // time set. Given timers are kept in order, if we hit an timer that's + // unset there's no point in continuing down the list. + for (timer = 0; timer < DISPATCH_TIMER_COUNT; timer++) { + TAILQ_FOREACH(ds, &_dispatch_kevent_timer[timer].dk_sources, ds_list) { + if (!ds->ds_timer.target) { + break; + } + if (DISPATCH_OBJECT_SUSPENDED(ds)) { + ds->ds_is_armed = false; + } else { + break; + } + } + + if (!ds || !ds->ds_timer.target) { + continue; + } + + if (ds->ds_timer.flags & DISPATCH_TIMER_WALL_CLOCK) { + now = _dispatch_get_nanoseconds(); + } else { + now = mach_absolute_time(); + } + if (ds->ds_timer.target <= now) { + howsoon->tv_sec = 0; + howsoon->tv_nsec = 0; + return howsoon; + } + + // the subtraction cannot go negative because the previous "if" + // verified that the target is greater than now. + delta_tmp = ds->ds_timer.target - now; + if (!(ds->ds_timer.flags & DISPATCH_TIMER_WALL_CLOCK)) { + delta_tmp = _dispatch_convert_mach2nano(delta_tmp); + } + if (delta_tmp < delta) { + delta = delta_tmp; + } + } + if (slowpath(delta > FOREVER_NSEC)) { + return NULL; + } else { + howsoon->tv_sec = (time_t)(delta / NSEC_PER_SEC); + howsoon->tv_nsec = (long)(delta % NSEC_PER_SEC); + } + return howsoon; +} + +struct dispatch_set_timer_params { + dispatch_source_t ds; + uintptr_t ident; + struct dispatch_timer_source_s values; +}; + +// To be called from the context of the _dispatch_mgr_q +static void +_dispatch_source_set_timer2(void *context) +{ + struct dispatch_set_timer_params *params = context; + dispatch_source_t ds = params->ds; + ds->ds_ident_hack = params->ident; + ds->ds_timer = params->values; + _dispatch_timer_list_update(ds); + dispatch_resume(ds); + dispatch_release(ds); + free(params); +} + +void +dispatch_source_set_timer(dispatch_source_t ds, + dispatch_time_t start, + uint64_t interval, + uint64_t leeway) +{ + struct dispatch_set_timer_params *params; + + // we use zero internally to mean disabled + if (interval == 0) { + interval = 1; + } else if ((int64_t)interval < 0) { + // 6866347 - make sure nanoseconds won't overflow + interval = INT64_MAX; + } + + // Suspend the source so that it doesn't fire with pending changes + // The use of suspend/resume requires the external retain/release + dispatch_retain(ds); + dispatch_suspend(ds); + + if (start == DISPATCH_TIME_NOW) { + start = mach_absolute_time(); + } else if (start == DISPATCH_TIME_FOREVER) { + start = INT64_MAX; + } + + while (!(params = malloc(sizeof(struct dispatch_set_timer_params)))) { + sleep(1); + } + + params->ds = ds; + params->values.flags = ds->ds_timer.flags; + + if ((int64_t)start < 0) { + // wall clock + params->ident = DISPATCH_TIMER_INDEX_WALL; + params->values.start = -((int64_t)start); + params->values.target = -((int64_t)start); + params->values.interval = interval; + params->values.leeway = leeway; + params->values.flags |= DISPATCH_TIMER_WALL_CLOCK; + } else { + // mach clock + params->ident = DISPATCH_TIMER_INDEX_MACH; + params->values.start = start; + params->values.target = start; + params->values.interval = _dispatch_convert_nano2mach(interval); + params->values.leeway = _dispatch_convert_nano2mach(leeway); + params->values.flags &= ~DISPATCH_TIMER_WALL_CLOCK; + } + + dispatch_barrier_async_f(&_dispatch_mgr_q, params, _dispatch_source_set_timer2); +} + +#ifndef DISPATCH_NO_LEGACY +// LEGACY +long +dispatch_source_timer_set_time(dispatch_source_t ds, uint64_t nanoseconds, uint64_t leeway) +{ + dispatch_time_t start; + if (nanoseconds == 0) { + nanoseconds = 1; + } + if (ds->ds_timer.flags == (DISPATCH_TIMER_ABSOLUTE|DISPATCH_TIMER_WALL_CLOCK)) { + static const struct timespec t0; + start = dispatch_walltime(&t0, nanoseconds); + } else if (ds->ds_timer.flags & DISPATCH_TIMER_WALL_CLOCK) { + start = dispatch_walltime(DISPATCH_TIME_NOW, nanoseconds); + } else { + start = dispatch_time(DISPATCH_TIME_NOW, nanoseconds); + } + if (ds->ds_timer.flags & (DISPATCH_TIMER_ABSOLUTE|DISPATCH_TIMER_ONESHOT)) { + // 6866347 - make sure nanoseconds won't overflow + nanoseconds = INT64_MAX; // non-repeating (~292 years) + } + dispatch_source_set_timer(ds, start, nanoseconds, leeway); + return 0; +} + +// LEGACY +uint64_t +dispatch_event_get_nanoseconds(dispatch_source_t ds) +{ + if (ds->ds_timer.flags & DISPATCH_TIMER_WALL_CLOCK) { + return ds->ds_timer.interval; + } else { + return _dispatch_convert_mach2nano(ds->ds_timer.interval); + } +} +#endif /* DISPATCH_NO_LEGACY */ + +static dispatch_source_t _dispatch_mach_notify_source; +static mach_port_t _dispatch_port_set; +static mach_port_t _dispatch_event_port; + +#define _DISPATCH_IS_POWER_OF_TWO(v) (!(v & (v - 1)) && v) +#define _DISPATCH_HASH(x, y) (_DISPATCH_IS_POWER_OF_TWO(y) ? (MACH_PORT_INDEX(x) & ((y) - 1)) : (MACH_PORT_INDEX(x) % (y))) + +#define _DISPATCH_MACHPORT_HASH_SIZE 32 +#define _DISPATCH_MACHPORT_HASH(x) _DISPATCH_HASH((x), _DISPATCH_MACHPORT_HASH_SIZE) + +static void _dispatch_port_set_init(void *); +static mach_port_t _dispatch_get_port_set(void); + +void +_dispatch_drain_mach_messages(struct kevent *ke) +{ + dispatch_source_t dsi; + dispatch_kevent_t dk; + struct kevent ke2; + + if (!dispatch_assume(ke->data)) { + return; + } + dk = _dispatch_kevent_find(ke->data, EVFILT_MACHPORT); + if (!dispatch_assume(dk)) { + return; + } + _dispatch_kevent_machport_disable(dk); // emulate EV_DISPATCH + + EV_SET(&ke2, ke->data, EVFILT_MACHPORT, EV_ADD|EV_ENABLE|EV_DISPATCH, DISPATCH_MACHPORT_RECV, 0, dk); + + TAILQ_FOREACH(dsi, &dk->dk_sources, ds_list) { + _dispatch_source_merge_kevent(dsi, &ke2); + } +} + +void +_dispatch_port_set_init(void *context __attribute__((unused))) +{ + struct kevent kev = { + .filter = EVFILT_MACHPORT, + .flags = EV_ADD, + }; + kern_return_t kr; + + kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET, &_dispatch_port_set); + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &_dispatch_event_port); + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + kr = mach_port_move_member(mach_task_self(), _dispatch_event_port, _dispatch_port_set); + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + + kev.ident = _dispatch_port_set; + + _dispatch_update_kq(&kev); +} + +mach_port_t +_dispatch_get_port_set(void) +{ + static dispatch_once_t pred; + + dispatch_once_f(&pred, NULL, _dispatch_port_set_init); + + return _dispatch_port_set; +} + +void +_dispatch_kevent_machport_resume(dispatch_kevent_t dk, uint32_t new_flags, uint32_t del_flags) +{ + mach_port_t previous, port = (mach_port_t)dk->dk_kevent.ident; + kern_return_t kr; + + if ((new_flags & DISPATCH_MACHPORT_RECV) || (!new_flags && !del_flags && dk->dk_kevent.fflags & DISPATCH_MACHPORT_RECV)) { + _dispatch_kevent_machport_enable(dk); + } + if (new_flags & DISPATCH_MACHPORT_DEAD) { + kr = mach_port_request_notification(mach_task_self(), port, MACH_NOTIFY_DEAD_NAME, 1, + _dispatch_event_port, MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous); + DISPATCH_VERIFY_MIG(kr); + + + switch(kr) { + case KERN_INVALID_NAME: + case KERN_INVALID_RIGHT: + // Supress errors + break; + default: + // Else, we dont expect any errors from mach. Log any errors if we do + if (dispatch_assume_zero(kr)) { + // log the error + } else if (dispatch_assume_zero(previous)) { + // Another subsystem has beat libdispatch to requesting the Mach + // dead-name notification on this port. We should technically cache the + // previous port and message it when the kernel messages our port. Or + // we can just say screw those subsystems and drop the previous port. + // They should adopt libdispatch :-P + kr = mach_port_deallocate(mach_task_self(), previous); + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + } + } + } + + if (del_flags & DISPATCH_MACHPORT_RECV) { + _dispatch_kevent_machport_disable(dk); + } + if (del_flags & DISPATCH_MACHPORT_DEAD) { + kr = mach_port_request_notification(mach_task_self(), (mach_port_t)dk->dk_kevent.ident, + MACH_NOTIFY_DEAD_NAME, 1, MACH_PORT_NULL, MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous); + DISPATCH_VERIFY_MIG(kr); + + switch (kr) { + case KERN_INVALID_NAME: + case KERN_INVALID_RIGHT: + case KERN_INVALID_ARGUMENT: + break; + default: + if (dispatch_assume_zero(kr)) { + // log the error + } else if (previous) { + // the kernel has not consumed the right yet + dispatch_assume_zero(_dispatch_send_consume_send_once_right(previous)); + } + } + } +} + +void +_dispatch_kevent_machport_enable(dispatch_kevent_t dk) +{ + mach_port_t mp = (mach_port_t)dk->dk_kevent.ident; + kern_return_t kr; + + kr = mach_port_move_member(mach_task_self(), mp, _dispatch_get_port_set()); + DISPATCH_VERIFY_MIG(kr); + switch (kr) { + case KERN_INVALID_NAME: +#if DISPATCH_DEBUG + _dispatch_log("Corruption: Mach receive right 0x%x destroyed prematurely", mp); +#endif + break; + default: + dispatch_assume_zero(kr); + } +} + +void +_dispatch_kevent_machport_disable(dispatch_kevent_t dk) +{ + mach_port_t mp = (mach_port_t)dk->dk_kevent.ident; + kern_return_t kr; + + kr = mach_port_move_member(mach_task_self(), mp, 0); + DISPATCH_VERIFY_MIG(kr); + switch (kr) { + case KERN_INVALID_RIGHT: + case KERN_INVALID_NAME: +#if DISPATCH_DEBUG + _dispatch_log("Corruption: Mach receive right 0x%x destroyed prematurely", mp); +#endif + break; + case 0: + break; + default: + dispatch_assume_zero(kr); + break; + } +} + +#define _DISPATCH_MIN_MSG_SZ (8ul * 1024ul - MAX_TRAILER_SIZE) +#ifndef DISPATCH_NO_LEGACY +dispatch_source_t +dispatch_source_mig_create(mach_port_t mport, size_t max_msg_size, dispatch_source_attr_t attr, + dispatch_queue_t dq, dispatch_mig_callback_t mig_callback) +{ + if (max_msg_size < _DISPATCH_MIN_MSG_SZ) { + max_msg_size = _DISPATCH_MIN_MSG_SZ; + } + return dispatch_source_machport_create(mport, DISPATCH_MACHPORT_RECV, attr, dq, + ^(dispatch_source_t ds) { + if (!dispatch_source_get_error(ds, NULL)) { + if (dq->dq_width != 1) { + dispatch_retain(ds); // this is a shim -- use the external retain + dispatch_async(dq, ^{ + dispatch_mig_server(ds, max_msg_size, mig_callback); + dispatch_release(ds); // this is a shim -- use the external release + }); + } else { + dispatch_mig_server(ds, max_msg_size, mig_callback); + } + } + }); +} +#endif /* DISPATCH_NO_LEGACY */ + +static void +_dispatch_mach_notify_source_init(void *context __attribute__((unused))) +{ + size_t maxsz = sizeof(union __RequestUnion___dispatch_send_libdispatch_internal_protocol_subsystem); + + if (sizeof(union __ReplyUnion___dispatch_libdispatch_internal_protocol_subsystem) > maxsz) { + maxsz = sizeof(union __ReplyUnion___dispatch_libdispatch_internal_protocol_subsystem); + } + + _dispatch_get_port_set(); + + _dispatch_mach_notify_source = dispatch_source_mig_create(_dispatch_event_port, + maxsz, NULL, &_dispatch_mgr_q, libdispatch_internal_protocol_server); + + dispatch_assert(_dispatch_mach_notify_source); +} + +kern_return_t +_dispatch_mach_notify_port_deleted(mach_port_t notify __attribute__((unused)), mach_port_name_t name) +{ + dispatch_source_t dsi; + dispatch_kevent_t dk; + struct kevent kev; + +#if DISPATCH_DEBUG + _dispatch_log("Corruption: Mach send/send-once/dead-name right 0x%x deleted prematurely", name); +#endif + + dk = _dispatch_kevent_find(name, EVFILT_MACHPORT); + if (!dk) { + goto out; + } + + EV_SET(&kev, name, EVFILT_MACHPORT, EV_ADD|EV_ENABLE|EV_DISPATCH|EV_EOF, DISPATCH_MACHPORT_DELETED, 0, dk); + + TAILQ_FOREACH(dsi, &dk->dk_sources, ds_list) { + _dispatch_source_merge_kevent(dsi, &kev); + // this can never happen again + // this must happen after the merge + // this may be racy in the future, but we don't provide a 'setter' API for the mask yet + dsi->ds_pending_data_mask &= ~DISPATCH_MACHPORT_DELETED; + } + + // no more sources have this flag + dk->dk_kevent.fflags &= ~DISPATCH_MACHPORT_DELETED; + +out: + return KERN_SUCCESS; +} + +kern_return_t +_dispatch_mach_notify_port_destroyed(mach_port_t notify __attribute__((unused)), mach_port_t name) +{ + kern_return_t kr; + // this function should never be called + dispatch_assume_zero(name); + kr = mach_port_mod_refs(mach_task_self(), name, MACH_PORT_RIGHT_RECEIVE, -1); + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + return KERN_SUCCESS; +} + +kern_return_t +_dispatch_mach_notify_no_senders(mach_port_t notify, mach_port_mscount_t mscnt __attribute__((unused))) +{ + // this function should never be called + dispatch_assume_zero(notify); + return KERN_SUCCESS; +} + +kern_return_t +_dispatch_mach_notify_send_once(mach_port_t notify __attribute__((unused))) +{ + // we only register for dead-name notifications + // some code deallocated our send-once right without consuming it +#if DISPATCH_DEBUG + _dispatch_log("Corruption: An app/library deleted a libdispatch dead-name notification"); +#endif + return KERN_SUCCESS; +} + +kern_return_t +_dispatch_mach_notify_dead_name(mach_port_t notify __attribute__((unused)), mach_port_name_t name) +{ + dispatch_source_t dsi; + dispatch_kevent_t dk; + struct kevent kev; + kern_return_t kr; + + dk = _dispatch_kevent_find(name, EVFILT_MACHPORT); + if (!dk) { + goto out; + } + + EV_SET(&kev, name, EVFILT_MACHPORT, EV_ADD|EV_ENABLE|EV_DISPATCH|EV_EOF, DISPATCH_MACHPORT_DEAD, 0, dk); + + TAILQ_FOREACH(dsi, &dk->dk_sources, ds_list) { + _dispatch_source_merge_kevent(dsi, &kev); + // this can never happen again + // this must happen after the merge + // this may be racy in the future, but we don't provide a 'setter' API for the mask yet + dsi->ds_pending_data_mask &= ~DISPATCH_MACHPORT_DEAD; + } + + // no more sources have this flag + dk->dk_kevent.fflags &= ~DISPATCH_MACHPORT_DEAD; + +out: + // the act of receiving a dead name notification allocates a dead-name right that must be deallocated + kr = mach_port_deallocate(mach_task_self(), name); + DISPATCH_VERIFY_MIG(kr); + //dispatch_assume_zero(kr); + + return KERN_SUCCESS; +} + +kern_return_t +_dispatch_wakeup_main_thread(mach_port_t mp __attribute__((unused))) +{ + // dummy function just to pop out the main thread out of mach_msg() + return 0; +} + +kern_return_t +_dispatch_consume_send_once_right(mach_port_t mp __attribute__((unused))) +{ + // dummy function to consume a send-once right + return 0; +} + +mach_msg_return_t +dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, dispatch_mig_callback_t callback) +{ + mach_msg_options_t options = MACH_RCV_MSG | MACH_RCV_TIMEOUT + | MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_CTX) + | MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0); + mach_msg_options_t tmp_options = options; + mig_reply_error_t *bufTemp, *bufRequest, *bufReply; + mach_msg_return_t kr = 0; + unsigned int cnt = 1000; // do not stall out serial queues + int demux_success; + + maxmsgsz += MAX_TRAILER_SIZE; + + // XXX FIXME -- allocate these elsewhere + bufRequest = alloca(maxmsgsz); + bufReply = alloca(maxmsgsz); + bufReply->Head.msgh_size = 0; // make CLANG happy + + // XXX FIXME -- change this to not starve out the target queue + for (;;) { + if (DISPATCH_OBJECT_SUSPENDED(ds) || (--cnt == 0)) { + options &= ~MACH_RCV_MSG; + tmp_options &= ~MACH_RCV_MSG; + + if (!(tmp_options & MACH_SEND_MSG)) { + break; + } + } + + kr = mach_msg(&bufReply->Head, tmp_options, bufReply->Head.msgh_size, + (mach_msg_size_t)maxmsgsz, (mach_port_t)ds->ds_ident_hack, 0, 0); + + tmp_options = options; + + if (slowpath(kr)) { + switch (kr) { + case MACH_SEND_INVALID_DEST: + case MACH_SEND_TIMED_OUT: + if (bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) { + mach_msg_destroy(&bufReply->Head); + } + break; + case MACH_RCV_TIMED_OUT: + case MACH_RCV_INVALID_NAME: + break; + default: + dispatch_assume_zero(kr); + break; + } + break; + } + + if (!(tmp_options & MACH_RCV_MSG)) { + break; + } + + bufTemp = bufRequest; + bufRequest = bufReply; + bufReply = bufTemp; + + demux_success = callback(&bufRequest->Head, &bufReply->Head); + + if (!demux_success) { + // destroy the request - but not the reply port + bufRequest->Head.msgh_remote_port = 0; + mach_msg_destroy(&bufRequest->Head); + } else if (!(bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX)) { + // if MACH_MSGH_BITS_COMPLEX is _not_ set, then bufReply->RetCode is present + if (slowpath(bufReply->RetCode)) { + if (bufReply->RetCode == MIG_NO_REPLY) { + continue; + } + + // destroy the request - but not the reply port + bufRequest->Head.msgh_remote_port = 0; + mach_msg_destroy(&bufRequest->Head); + } + } + + if (bufReply->Head.msgh_remote_port) { + tmp_options |= MACH_SEND_MSG; + if (MACH_MSGH_BITS_REMOTE(bufReply->Head.msgh_bits) != MACH_MSG_TYPE_MOVE_SEND_ONCE) { + tmp_options |= MACH_SEND_TIMEOUT; + } + } + } + + return kr; +} diff --git a/src/source.h b/src/source.h new file mode 100644 index 000000000..867ba86a0 --- /dev/null +++ b/src/source.h @@ -0,0 +1,583 @@ +/* + * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __DISPATCH_SOURCE__ +#define __DISPATCH_SOURCE__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +#include +#include +#include + +/*! + * @header + * The dispatch framework provides a suite of interfaces for monitoring low- + * level system objects (file descriptors, Mach ports, signals, VFS nodes, etc.) + * for activity and automatically submitting event handler blocks to dispatch + * queues when such activity occurs. + * + * This suite of interfaces is known as the Dispatch Source API. + */ + +/*! + * @typedef dispatch_source_t + * + * @abstract + * Dispatch sources are used to automatically submit event handler blocks to + * dispatch queues in response to external events. + */ +DISPATCH_DECL(dispatch_source); + +/*! + * @typedef dispatch_source_type_t + * + * @abstract + * Constants of this type represent the class of low-level system object that + * is being monitored by the dispatch source. Constants of this type are + * passed as a parameter to dispatch_source_create() and determine how the + * handle argument is interpreted (i.e. as a file descriptor, mach port, + * signal number, process identifer, etc.), and how the mask arugment is + * interpreted. + */ +typedef const struct dispatch_source_type_s *dispatch_source_type_t; + +/*! + * @const DISPATCH_SOURCE_TYPE_DATA_ADD + * @discussion A dispatch source that coalesces data obtained via calls to + * dispatch_source_merge_data(). An ADD is used to coalesce the data. + * The handle is unused (pass zero for now). + * The mask is unused (pass zero for now). + */ +#define DISPATCH_SOURCE_TYPE_DATA_ADD (&_dispatch_source_type_data_add) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +extern const struct dispatch_source_type_s _dispatch_source_type_data_add; + +/*! + * @const DISPATCH_SOURCE_TYPE_DATA_OR + * @discussion A dispatch source that coalesces data obtained via calls to + * dispatch_source_merge_data(). A logical OR is used to coalesce the data. + * The handle is unused (pass zero for now). + * The mask is used to perform a logical AND with the value passed to + * dispatch_source_merge_data(). + */ +#define DISPATCH_SOURCE_TYPE_DATA_OR (&_dispatch_source_type_data_or) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +extern const struct dispatch_source_type_s _dispatch_source_type_data_or; + +/*! + * @const DISPATCH_SOURCE_TYPE_MACH_SEND + * @discussion A dispatch source that monitors a Mach port for dead name + * notifications (send right no longer has any corresponding receive right). + * The handle is a Mach port with a send or send-once right (mach_port_t). + * The mask is a mask of desired events from dispatch_source_mach_send_flags_t. + */ +#define DISPATCH_SOURCE_TYPE_MACH_SEND (&_dispatch_source_type_mach_send) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +extern const struct dispatch_source_type_s _dispatch_source_type_mach_send; + +/*! + * @const DISPATCH_SOURCE_TYPE_MACH_RECV + * @discussion A dispatch source that monitors a Mach port for pending messages. + * The handle is a Mach port with a receive right (mach_port_t). + * The mask is unused (pass zero for now). + */ +#define DISPATCH_SOURCE_TYPE_MACH_RECV (&_dispatch_source_type_mach_recv) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +extern const struct dispatch_source_type_s _dispatch_source_type_mach_recv; + +/*! + * @const DISPATCH_SOURCE_TYPE_PROC + * @discussion A dispatch source that monitors an external process for events + * defined by dispatch_source_proc_flags_t. + * The handle is a process identifier (pid_t). + * The mask is a mask of desired events from dispatch_source_proc_flags_t. + */ +#define DISPATCH_SOURCE_TYPE_PROC (&_dispatch_source_type_proc) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +extern const struct dispatch_source_type_s _dispatch_source_type_proc; + +/*! + * @const DISPATCH_SOURCE_TYPE_READ + * @discussion A dispatch source that monitors a file descriptor for pending + * bytes available to be read. + * The handle is a file descriptor (int). + * The mask is unused (pass zero for now). + */ +#define DISPATCH_SOURCE_TYPE_READ (&_dispatch_source_type_read) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +extern const struct dispatch_source_type_s _dispatch_source_type_read; + +/*! + * @const DISPATCH_SOURCE_TYPE_SIGNAL + * @discussion A dispatch source that monitors the current process for signals. + * The handle is a signal number (int). + * The mask is unused (pass zero for now). + */ +#define DISPATCH_SOURCE_TYPE_SIGNAL (&_dispatch_source_type_signal) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +extern const struct dispatch_source_type_s _dispatch_source_type_signal; + +/*! + * @const DISPATCH_SOURCE_TYPE_TIMER + * @discussion A dispatch source that submits the event handler block based + * on a timer. + * The handle is unused (pass zero for now). + * The mask is unused (pass zero for now). + */ +#define DISPATCH_SOURCE_TYPE_TIMER (&_dispatch_source_type_timer) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +extern const struct dispatch_source_type_s _dispatch_source_type_timer; + +/*! + * @const DISPATCH_SOURCE_TYPE_VNODE + * @discussion A dispatch source that monitors a file descriptor for events + * defined by dispatch_source_vnode_flags_t. + * The handle is a file descriptor (int). + * The mask is a mask of desired events from dispatch_source_vnode_flags_t. + */ +#define DISPATCH_SOURCE_TYPE_VNODE (&_dispatch_source_type_vnode) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +extern const struct dispatch_source_type_s _dispatch_source_type_vnode; + +/*! + * @const DISPATCH_SOURCE_TYPE_WRITE + * @discussion A dispatch source that monitors a file descriptor for available + * buffer space to write bytes. + * The handle is a file descriptor (int). + * The mask is unused (pass zero for now). + */ +#define DISPATCH_SOURCE_TYPE_WRITE (&_dispatch_source_type_write) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +extern const struct dispatch_source_type_s _dispatch_source_type_write; + +/*! + * @enum dispatch_source_mach_send_flags_t + * + * @constant DISPATCH_MACH_SEND_DEAD + * The receive right corresponding to the given send right was destroyed. + */ +enum { + DISPATCH_MACH_SEND_DEAD = 0x1, +}; + +/*! + * @enum dispatch_source_proc_flags_t + * + * @constant DISPATCH_PROC_EXIT + * The process has exited (perhaps cleanly, perhaps not). + * + * @constant DISPATCH_PROC_FORK + * The process has created one or more child processes. + * + * @constant DISPATCH_PROC_EXEC + * The process has become another executable image via + * exec*() or posix_spawn*(). + * + * @constant DISPATCH_PROC_SIGNAL + * A Unix signal was delivered to the process. + */ +enum { + DISPATCH_PROC_EXIT = 0x80000000, + DISPATCH_PROC_FORK = 0x40000000, + DISPATCH_PROC_EXEC = 0x20000000, + DISPATCH_PROC_SIGNAL = 0x08000000, +}; + +/*! + * @enum dispatch_source_vnode_flags_t + * + * @constant DISPATCH_VNODE_DELETE + * The filesystem object was deleted from the namespace. + * + * @constant DISPATCH_VNODE_WRITE + * The filesystem object data changed. + * + * @constant DISPATCH_VNODE_EXTEND + * The filesystem object changed in size. + * + * @constant DISPATCH_VNODE_ATTRIB + * The filesystem object metadata changed. + * + * @constant DISPATCH_VNODE_LINK + * The filesystem object link count changed. + * + * @constant DISPATCH_VNODE_RENAME + * The filesystem object was renamed in the namespace. + * + * @constant DISPATCH_VNODE_REVOKE + * The filesystem object was revoked. + */ +enum { + DISPATCH_VNODE_DELETE = 0x1, + DISPATCH_VNODE_WRITE = 0x2, + DISPATCH_VNODE_EXTEND = 0x4, + DISPATCH_VNODE_ATTRIB = 0x8, + DISPATCH_VNODE_LINK = 0x10, + DISPATCH_VNODE_RENAME = 0x20, + DISPATCH_VNODE_REVOKE = 0x40, +}; + +__BEGIN_DECLS + +/*! + * @function dispatch_source_create + * + * @abstract + * Creates a new dispatch source to monitor low-level system objects and auto- + * matically submit a handler block to a dispatch queue in response to events. + * + * @discussion + * Dispatch sources are not reentrant. Any events received while the dispatch + * source is suspended or while the event handler block is currently executing + * will be coalesced and delivered after the dispatch source is resumed or the + * event handler block has returned. + * + * Dispatch sources are created in a suspended state. After creating the + * source and setting any desired attributes (i.e. the handler, context, etc.), + * a call must be made to dispatch_resume() in order to begin event delivery. + * + * @param type + * Declares the type of the dispatch source. Must be one of the defined + * dispatch_source_type_t constants. + * @param handle + * The underlying system handle to monitor. The interpretation of this argument + * is determined by the constant provided in the type parameter. + * @param mask + * A mask of flags specifying which events are desired. The interpretation of + * this argument is determined by the constant provided in the type parameter. + * @param queue + * The dispatch queue to which the event handler block will be submited. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_MALLOC DISPATCH_NOTHROW +dispatch_source_t +dispatch_source_create(dispatch_source_type_t type, + uintptr_t handle, + unsigned long mask, + dispatch_queue_t queue); + +/*! + * @function dispatch_source_set_event_handler + * + * @abstract + * Sets the event handler block for the given dispatch source. + * + * @param source + * The dispatch source to modify. + * The result of passing NULL in this parameter is undefined. + * + * @param handler + * The event handler block to submit to the source's target queue. + */ +#ifdef __BLOCKS__ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL1 DISPATCH_NOTHROW +void +dispatch_source_set_event_handler(dispatch_source_t source, + dispatch_block_t handler); +#endif /* __BLOCKS__ */ + +/*! + * @function dispatch_source_set_event_handler_f + * + * @abstract + * Sets the event handler function for the given dispatch source. + * + * @param source + * The dispatch source to modify. + * The result of passing NULL in this parameter is undefined. + * + * @param handler + * The event handler function to submit to the source's target queue. + * The context parameter passed to the event handler function is the current + * context of the dispatch source at the time the handler call is made. + * The result of passing NULL in this parameter is undefined. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL1 DISPATCH_NOTHROW +void +dispatch_source_set_event_handler_f(dispatch_source_t source, + dispatch_function_t handler); + +/*! + * @function dispatch_source_set_cancel_handler + * + * @abstract + * Sets the cancellation handler block for the given dispatch source. + * + * @discussion + * The cancellation handler (if specified) will be submitted to the source's + * target queue in response to a call to dispatch_source_cancel() once the + * system has released all references to the source's underlying handle and + * the source's event handler block has returned. + * + * IMPORTANT: + * A cancellation handler is required for file descriptor and mach port based + * sources in order to safely close the descriptor or destroy the port. Closing + * the descriptor or port before the cancellation handler may result in a race + * condition. If a new descriptor is allocated with the same value as the + * recently closed descriptor while the source's event handler is still running, + * the event handler may read/write data to the wrong descriptor. + * + * @param source + * The dispatch source to modify. + * The result of passing NULL in this parameter is undefined. + * + * @param handler + * The cancellation handler block to submit to the source's target queue. + */ +#ifdef __BLOCKS__ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL1 DISPATCH_NOTHROW +void +dispatch_source_set_cancel_handler(dispatch_source_t source, + dispatch_block_t cancel_handler); +#endif /* __BLOCKS__ */ + +/*! + * @function dispatch_source_set_cancel_handler_f + * + * @abstract + * Sets the cancellation handler function for the given dispatch source. + * + * @discussion + * See dispatch_source_set_cancel_handler() for more details. + * + * @param source + * The dispatch source to modify. + * The result of passing NULL in this parameter is undefined. + * + * @param handler + * The cancellation handler function to submit to the source's target queue. + * The context parameter passed to the event handler function is the current + * context of the dispatch source at the time the handler call is made. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL1 DISPATCH_NOTHROW +void +dispatch_source_set_cancel_handler_f(dispatch_source_t source, + dispatch_function_t cancel_handler); + +/*! + * @function dispatch_source_cancel + * + * @abstract + * Asynchronously cancel the dispatch source, preventing any further invocation + * of its event handler block. + * + * @discussion + * Cancellation prevents any further invocation of the event handler block for + * the specified dispatch source, but does not interrupt an event handler + * block that is already in progress. + * + * The cancellation handler is submitted to the source's target queue once the + * the source's event handler has finished, indicating it is now safe to close + * the source's handle (i.e. file descriptor or mach port). + * + * See dispatch_source_set_cancel_handler() for more information. + * + * @param source + * The dispatch source to be canceled. + * The result of passing NULL in this parameter is undefined. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_source_cancel(dispatch_source_t source); + +/*! + * @function dispatch_source_testcancel + * + * @abstract + * Tests whether the given dispatch source has been canceled. + * + * @param source + * The dispatch source to be tested. + * The result of passing NULL in this parameter is undefined. + * + * @result + * Non-zero if canceled and zero if not canceled. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +long +dispatch_source_testcancel(dispatch_source_t source); + +/*! + * @function dispatch_source_get_handle + * + * @abstract + * Returns the underlying system handle associated with this dispatch source. + * + * @param source + * The result of passing NULL in this parameter is undefined. + * + * @result + * The return value should be interpreted according to the type of the dispatch + * source, and may be one of the following handles: + * + * DISPATCH_SOURCE_TYPE_DATA_ADD: n/a + * DISPATCH_SOURCE_TYPE_DATA_OR: n/a + * DISPATCH_SOURCE_TYPE_MACH_SEND: mach port (mach_port_t) + * DISPATCH_SOURCE_TYPE_MACH_RECV: mach port (mach_port_t) + * DISPATCH_SOURCE_TYPE_PROC: process identifier (pid_t) + * DISPATCH_SOURCE_TYPE_READ: file descriptor (int) + * DISPATCH_SOURCE_TYPE_SIGNAL: signal number (int) + * DISPATCH_SOURCE_TYPE_TIMER: n/a + * DISPATCH_SOURCE_TYPE_VNODE: file descriptor (int) + * DISPATCH_SOURCE_TYPE_WRITE: file descriptor (int) + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW +uintptr_t +dispatch_source_get_handle(dispatch_source_t source); + +/*! + * @function dispatch_source_get_mask + * + * @abstract + * Returns the mask of events monitored by the dispatch source. + * + * @param source + * The result of passing NULL in this parameter is undefined. + * + * @result + * The return value should be interpreted according to the type of the dispatch + * source, and may be one of the following flag sets: + * + * DISPATCH_SOURCE_TYPE_DATA_ADD: n/a + * DISPATCH_SOURCE_TYPE_DATA_OR: n/a + * DISPATCH_SOURCE_TYPE_MACH_SEND: dispatch_source_mach_send_flags_t + * DISPATCH_SOURCE_TYPE_MACH_RECV: n/a + * DISPATCH_SOURCE_TYPE_PROC: dispatch_source_proc_flags_t + * DISPATCH_SOURCE_TYPE_READ: n/a + * DISPATCH_SOURCE_TYPE_SIGNAL: n/a + * DISPATCH_SOURCE_TYPE_TIMER: n/a + * DISPATCH_SOURCE_TYPE_VNODE: dispatch_source_vnode_flags_t + * DISPATCH_SOURCE_TYPE_WRITE: n/a + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW +unsigned long +dispatch_source_get_mask(dispatch_source_t source); + +/*! + * @function dispatch_source_get_data + * + * @abstract + * Returns pending data for the dispatch source. + * + * @discussion + * This function is intended to be called from within the event handler block. + * The result of calling this function outside of the event handler callback is + * undefined. + * + * @param source + * The result of passing NULL in this parameter is undefined. + * + * @result + * The return value should be interpreted according to the type of the dispatch + * source, and may be one of the following: + * + * DISPATCH_SOURCE_TYPE_DATA_ADD: application defined data + * DISPATCH_SOURCE_TYPE_DATA_OR: application defined data + * DISPATCH_SOURCE_TYPE_MACH_SEND: dispatch_source_mach_send_flags_t + * DISPATCH_SOURCE_TYPE_MACH_RECV: n/a + * DISPATCH_SOURCE_TYPE_PROC: dispatch_source_proc_flags_t + * DISPATCH_SOURCE_TYPE_READ: estimated bytes available to read + * DISPATCH_SOURCE_TYPE_SIGNAL: number of signals delivered since + * the last handler invocation + * DISPATCH_SOURCE_TYPE_TIMER: number of times the timer has fired + * since the last handler invocation + * DISPATCH_SOURCE_TYPE_VNODE: dispatch_source_vnode_flags_t + * DISPATCH_SOURCE_TYPE_WRITE: estimated buffer space available + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW +unsigned long +dispatch_source_get_data(dispatch_source_t source); + +/*! + * @function dispatch_source_merge_data + * + * @abstract + * Merges data into a dispatch source of type DISPATCH_SOURCE_TYPE_DATA_ADD or + * DISPATCH_SOURCE_TYPE_DATA_OR and submits its event handler block to its + * target queue. + * + * @param source + * The result of passing NULL in this parameter is undefined. + * + * @param value + * The value to coalesce with the pending data using a logical OR or an ADD + * as specified by the dispatch source type. A value of zero has no effect + * and will not result in the submission of the event handler block. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_source_merge_data(dispatch_source_t source, unsigned long value); + +/*! + * @function dispatch_source_set_timer + * + * @abstract + * Sets a start time, interval, and leeway value for a timer source. + * + * @discussion + * Calling this function has no effect if the timer source has already been + * canceled. + * + * The start time argument also determines which clock will be used for the + * timer. If the start time is DISPATCH_TIME_NOW or created with + * dispatch_time() then the timer is based on mach_absolute_time(). Otherwise, + * if the start time of the timer is created with dispatch_walltime() then the + * timer is based on gettimeofday(3). + * + * @param start + * The start time of the timer. See dispatch_time() and dispatch_walltime() + * for more information. + * + * @param interval + * The nanosecond interval for the timer. + * + * @param leeway + * A hint given to the system by the application for the amount of leeway, in + * nanoseconds, that the system may defer the timer in order to align with other + * system activity for improved system performance or power consumption. (For + * example, an application might perform a periodic task every 5 minutes, with + * a leeway of up to 30 seconds.) Note that some latency is to be expected for + * all timers even when a leeway value of zero is specified. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_source_set_timer(dispatch_source_t source, + dispatch_time_t start, + uint64_t interval, + uint64_t leeway); + +__END_DECLS + +#endif diff --git a/src/source_internal.h b/src/source_internal.h new file mode 100644 index 000000000..e7126dbb3 --- /dev/null +++ b/src/source_internal.h @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_SOURCE_INTERNAL__ +#define __DISPATCH_SOURCE_INTERNAL__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +struct dispatch_source_vtable_s { + DISPATCH_VTABLE_HEADER(dispatch_source_s); +}; + +extern const struct dispatch_source_vtable_s _dispatch_source_kevent_vtable; + +struct dispatch_kevent_s { + TAILQ_ENTRY(dispatch_kevent_s) dk_list; + TAILQ_HEAD(, dispatch_source_s) dk_sources; + struct kevent dk_kevent; +}; + +typedef struct dispatch_kevent_s *dispatch_kevent_t; + +struct dispatch_timer_source_s { + uint64_t target; + uint64_t start; + uint64_t interval; + uint64_t leeway; + uint64_t flags; // dispatch_timer_flags_t +}; + +#define DSF_CANCELED 1u // cancellation has been requested + +struct dispatch_source_s { + DISPATCH_STRUCT_HEADER(dispatch_source_s, dispatch_source_vtable_s); + DISPATCH_QUEUE_HEADER; + // Instruments always copies DISPATCH_QUEUE_MIN_LABEL_SIZE, which is 64, + // so the remainder of the structure must be big enough + union { + char _ds_pad[DISPATCH_QUEUE_MIN_LABEL_SIZE]; + struct { + char dq_label[8]; + dispatch_kevent_t ds_dkev; + + dispatch_source_handler_function_t ds_handler_func; + void *ds_handler_ctxt; + + void *ds_cancel_handler; + + unsigned int ds_is_level:1, + ds_is_adder:1, + ds_is_installed:1, + ds_needs_rearm:1, + ds_is_armed:1, + ds_is_legacy:1, + ds_cancel_is_block:1, + ds_handler_is_block:1; + + unsigned int ds_atomic_flags; + + unsigned long ds_data; + unsigned long ds_pending_data; + unsigned long ds_pending_data_mask; + + TAILQ_ENTRY(dispatch_source_s) ds_list; + + unsigned long ds_ident_hack; + + struct dispatch_timer_source_s ds_timer; + }; + }; +}; + + +void _dispatch_source_legacy_xref_release(dispatch_source_t ds); + +#endif /* __DISPATCH_SOURCE_INTERNAL__ */ diff --git a/src/source_private.h b/src/source_private.h new file mode 100644 index 000000000..9e45cc1bf --- /dev/null +++ b/src/source_private.h @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_SOURCE_PRIVATE__ +#define __DISPATCH_SOURCE_PRIVATE__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +/*! + * @const DISPATCH_SOURCE_TYPE_VFS + * @discussion Apple-internal dispatch source that monitors for vfs events + * defined by dispatch_vfs_flags_t. + * The handle is a process identifier (pid_t). + */ +#define DISPATCH_SOURCE_TYPE_VFS (&_dispatch_source_type_vfs) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +extern const struct dispatch_source_type_s _dispatch_source_type_vfs; + +/*! + * @enum dispatch_source_vfs_flags_t + * + * @constant DISPATCH_VFS_NOTRESP + * Server down. + * + * @constant DISPATCH_VFS_NEEDAUTH + * Server bad auth. + * + * @constant DISPATCH_VFS_LOWDISK + * We're low on space. + * + * @constant DISPATCH_VFS_MOUNT + * New filesystem arrived. + * + * @constant DISPATCH_VFS_UNMOUNT + * Filesystem has left. + * + * @constant DISPATCH_VFS_DEAD + * Filesystem is dead, needs force unmount. + * + * @constant DISPATCH_VFS_ASSIST + * Filesystem needs assistance from external program. + * + * @constant DISPATCH_VFS_NOTRESPLOCK + * Server lockd down. + * + * @constant DISPATCH_VFS_UPDATE + * Filesystem information has changed. + * + * @constant DISPATCH_VFS_VERYLOWDISK + * File system has *very* little disk space left. + */ +enum { + DISPATCH_VFS_NOTRESP = 0x0001, + DISPATCH_VFS_NEEDAUTH = 0x0002, + DISPATCH_VFS_LOWDISK = 0x0004, + DISPATCH_VFS_MOUNT = 0x0008, + DISPATCH_VFS_UNMOUNT = 0x0010, + DISPATCH_VFS_DEAD = 0x0020, + DISPATCH_VFS_ASSIST = 0x0040, + DISPATCH_VFS_NOTRESPLOCK = 0x0080, + DISPATCH_VFS_UPDATE = 0x0100, + DISPATCH_VFS_VERYLOWDISK = 0x0200, +}; + +/*! + * @enum dispatch_source_mach_send_flags_t + * + * @constant DISPATCH_MACH_SEND_DELETED + * The receive right corresponding to the given send right was destroyed. + */ +enum { + DISPATCH_MACH_SEND_DELETED = 0x2, +}; + +/*! + * @enum dispatch_source_proc_flags_t + * + * @constant DISPATCH_PROC_REAP + * The process has been reaped by the parent process via + * wait*(). + */ +enum { + DISPATCH_PROC_REAP = 0x10000000, +}; + +__BEGIN_DECLS + +/*! + * @typedef dispatch_mig_callback_t + * + * @abstract + * The signature of a function that handles Mach message delivery and response. + */ +typedef boolean_t (*dispatch_mig_callback_t)(mach_msg_header_t *message, mach_msg_header_t *reply); + +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +mach_msg_return_t +dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, dispatch_mig_callback_t callback); + +__END_DECLS + +#endif diff --git a/src/time.c b/src/time.c new file mode 100644 index 000000000..07506f256 --- /dev/null +++ b/src/time.c @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include "internal.h" + +uint64_t +_dispatch_get_nanoseconds(void) +{ + struct timeval now; + int r = gettimeofday(&now, NULL); + dispatch_assert_zero(r); + dispatch_assert(sizeof(NSEC_PER_SEC) == 8); + dispatch_assert(sizeof(NSEC_PER_USEC) == 8); + return now.tv_sec * NSEC_PER_SEC + now.tv_usec * NSEC_PER_USEC; +} + +#if defined(__i386__) || defined(__x86_64__) +// x86 currently implements mach time in nanoseconds; this is NOT likely to change +#define _dispatch_time_mach2nano(x) (x) +#define _dispatch_time_nano2mach(x) (x) +#else +static struct _dispatch_host_time_data_s { + mach_timebase_info_data_t tbi; + uint64_t safe_numer_math; + dispatch_once_t pred; +} _dispatch_host_time_data; + +static void +_dispatch_get_host_time_init(void *context __attribute__((unused))) +{ + dispatch_assume_zero(mach_timebase_info(&_dispatch_host_time_data.tbi)); + _dispatch_host_time_data.safe_numer_math = DISPATCH_TIME_FOREVER / _dispatch_host_time_data.tbi.numer; +} + +static uint64_t +_dispatch_time_mach2nano(uint64_t nsec) +{ + struct _dispatch_host_time_data_s *const data = &_dispatch_host_time_data; + uint64_t small_tmp = nsec; +#ifdef __LP64__ + __uint128_t big_tmp = nsec; +#else + long double big_tmp = nsec; +#endif + + dispatch_once_f(&data->pred, NULL, _dispatch_get_host_time_init); + + if (slowpath(data->tbi.numer != data->tbi.denom)) { + if (nsec < data->safe_numer_math) { + small_tmp *= data->tbi.numer; + small_tmp /= data->tbi.denom; + } else { + big_tmp *= data->tbi.numer; + big_tmp /= data->tbi.denom; + small_tmp = big_tmp; + } + } + return small_tmp; +} + +static int64_t +_dispatch_time_nano2mach(int64_t nsec) +{ + struct _dispatch_host_time_data_s *const data = &_dispatch_host_time_data; +#ifdef __LP64__ + __int128_t big_tmp = nsec; +#else + long double big_tmp = nsec; +#endif + + dispatch_once_f(&data->pred, NULL, _dispatch_get_host_time_init); + + if (fastpath(data->tbi.numer == data->tbi.denom)) { + return nsec; + } + + // Multiply by the inverse to convert nsec to Mach absolute time + big_tmp *= data->tbi.denom; + big_tmp /= data->tbi.numer; + + if (big_tmp > INT64_MAX) { + return INT64_MAX; + } + if (big_tmp < INT64_MIN) { + return INT64_MIN; + } + return big_tmp; +} +#endif + +dispatch_time_t +dispatch_time(dispatch_time_t inval, int64_t delta) +{ + if (inval == DISPATCH_TIME_FOREVER) { + return DISPATCH_TIME_FOREVER; + } + if ((int64_t)inval < 0) { + // wall clock + if (delta >= 0) { + if ((int64_t)(inval -= delta) >= 0) { + return DISPATCH_TIME_FOREVER; // overflow + } + return inval; + } + if ((int64_t)(inval -= delta) >= -1) { + // -1 is special == DISPATCH_TIME_FOREVER == forever + return -2; // underflow + } + return inval; + } + // mach clock + delta = _dispatch_time_nano2mach(delta); + if (inval == 0) { + inval = mach_absolute_time(); + } + if (delta >= 0) { + if ((int64_t)(inval += delta) <= 0) { + return DISPATCH_TIME_FOREVER; // overflow + } + return inval; + } + if ((int64_t)(inval += delta) < 1) { + return 1; // underflow + } + return inval; +} + +dispatch_time_t +dispatch_walltime(const struct timespec *inval, int64_t delta) +{ + int64_t nsec; + + if (inval) { + nsec = inval->tv_sec * 1000000000ull + inval->tv_nsec; + } else { + nsec = _dispatch_get_nanoseconds(); + } + + nsec += delta; + if (nsec <= 1) { + // -1 is special == DISPATCH_TIME_FOREVER == forever + return delta >= 0 ? DISPATCH_TIME_FOREVER : (uint64_t)-2ll; + } + + return -nsec; +} + +uint64_t +_dispatch_timeout(dispatch_time_t when) +{ + uint64_t now; + + if (when == DISPATCH_TIME_FOREVER) { + return DISPATCH_TIME_FOREVER; + } + if (when == 0) { + return 0; + } + if ((int64_t)when < 0) { + when = -(int64_t)when; + now = _dispatch_get_nanoseconds(); + return now >= when ? 0 : when - now; + } + now = mach_absolute_time(); + return now >= when ? 0 : _dispatch_time_mach2nano(when - now); +} diff --git a/src/time.h b/src/time.h new file mode 100644 index 000000000..510d6d7b7 --- /dev/null +++ b/src/time.h @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __DISPATCH_TIME__ +#define __DISPATCH_TIME__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +#include + +__BEGIN_DECLS + +struct timespec; + +// 6368156 +#ifdef NSEC_PER_SEC +#undef NSEC_PER_SEC +#endif +#ifdef USEC_PER_SEC +#undef USEC_PER_SEC +#endif +#ifdef NSEC_PER_USEC +#undef NSEC_PER_USEC +#endif +#define NSEC_PER_SEC 1000000000ull +#define USEC_PER_SEC 1000000ull +#define NSEC_PER_USEC 1000ull + +/*! + * @typedef dispatch_time_t + * + * @abstract + * An somewhat abstract representation of time; where zero means "now" and + * DISPATCH_TIME_FOREVER means "infinity" and every value in between is an + * opaque encoding. + */ +typedef uint64_t dispatch_time_t; + +#define DISPATCH_TIME_NOW 0 +#define DISPATCH_TIME_FOREVER (~0ull) + +/*! + * @function dispatch_time + * + * @abstract + * Create dispatch_time_t relative to the default clock or modify an existing + * dispatch_time_t. + * + * @discussion + * On Mac OS X the default clock is based on mach_absolute_time(). + * + * @param when + * An optional dispatch_time_t to add nanoseconds to. If zero is passed, then + * dispatch_time() will use the result of mach_absolute_time(). + * + * @param delta + * Nanoseconds to add. + * + * @result + * A new dispatch_time_t. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NOTHROW +dispatch_time_t +dispatch_time(dispatch_time_t when, int64_t delta); + +/*! + * @function dispatch_walltime + * + * @abstract + * Create a dispatch_time_t using the wall clock. + * + * @discussion + * On Mac OS X the wall clock is based on gettimeofday(3). + * + * @param when + * A struct timespect to add time to. If NULL is passed, then + * dispatch_walltime() will use the result of gettimeofday(3). + * + * @param delta + * Nanoseconds to add. + * + * @result + * A new dispatch_time_t. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +DISPATCH_NOTHROW +dispatch_time_t +dispatch_walltime(const struct timespec *when, int64_t delta); + +__END_DECLS + +#endif From 4acac767bfb4b990a5dab2533059f65b81d82dbb Mon Sep 17 00:00:00 2001 From: Apple OSS Distributions <91980991+AppleOSSDistributions@users.noreply.github.com> Date: Mon, 15 Aug 2011 15:06:34 +0000 Subject: [PATCH 02/18] libdispatch-187.5 Imported from libdispatch-187.5.tar.gz --- INSTALL | 81 + Makefile.am | 22 + PATCHES | 194 + autogen.sh | 2 + config/config.h | 203 + configure.ac | 266 ++ dispatch/Makefile.am | 19 + {src => dispatch}/base.h | 47 +- dispatch/data.h | 248 ++ {src => dispatch}/dispatch.h | 22 +- {src => dispatch}/group.h | 61 +- dispatch/io.h | 586 +++ {src => dispatch}/object.h | 60 +- {src => dispatch}/once.h | 51 +- {src => dispatch}/queue.h | 431 ++- {src => dispatch}/semaphore.h | 22 +- {src => dispatch}/source.h | 230 +- {src => dispatch}/time.h | 24 +- .../project.pbxproj | 609 --- examples/Dispatch Samples/ReadMe.txt | 93 - examples/Dispatch Samples/apply.c | 123 - examples/Dispatch Samples/nWide.c | 127 - examples/Dispatch Samples/netcat.c | 596 --- examples/Dispatch Samples/proc.c | 209 - examples/Dispatch Samples/readFile.c | 115 - examples/Dispatch Samples/readFileF.c | 117 - examples/Dispatch Samples/timers.c | 85 - examples/DispatchLife/DispatchLife.c | 392 -- .../DispatchLife.xcodeproj/project.pbxproj | 252 -- examples/DispatchLife/DispatchLifeGLView.h | 59 - examples/DispatchLife/DispatchLifeGLView.m | 203 - .../English.lproj/InfoPlist.strings | Bin 92 -> 0 bytes .../English.lproj/MainMenu.nib/designable.nib | 2651 ------------- .../MainMenu.nib/keyedobjects.nib | Bin 19575 -> 0 bytes examples/DispatchLife/Info.plist | 28 - examples/DispatchLife/ReadMe.txt | 37 - examples/DispatchLife/main.m | 49 - .../DispatchWebServer/DispatchWebServer.c | 956 ----- .../project.pbxproj | 203 - examples/DispatchWebServer/ReadMe.txt | 44 - libdispatch.xcodeproj/project.pbxproj | 1070 +++++- .../contents.xcworkspacedata | 6 + m4/atomic.m4 | 21 + m4/blocks.m4 | 112 + m4/pkg.m4 | 155 + man/Makefile.am | 89 + man/dispatch.3 | 18 +- man/dispatch_after.3 | 10 +- man/dispatch_api.3 | 2 +- man/dispatch_apply.3 | 33 +- man/dispatch_async.3 | 15 +- man/dispatch_benchmark.3 | 4 + man/dispatch_data_create.3 | 206 + man/dispatch_group_create.3 | 9 +- man/dispatch_io_create.3 | 238 ++ man/dispatch_io_read.3 | 151 + man/dispatch_object.3 | 22 +- man/dispatch_once.3 | 2 + man/dispatch_queue_create.3 | 127 +- man/dispatch_read.3 | 123 + man/dispatch_semaphore_create.3 | 21 +- man/dispatch_source_create.3 | 56 +- man/dispatch_time.3 | 1 + private/Makefile.am | 10 + {src => private}/benchmark.h | 19 +- {src => private}/private.h | 88 +- {src => private}/queue_private.h | 89 +- {src => private}/source_private.h | 62 +- resolver/resolved.h | 26 + resolver/resolver.c | 20 + resolver/resolver.h | 31 + src/Makefile.am | 73 + src/apply.c | 220 +- src/benchmark.c | 35 +- src/data.c | 429 +++ src/data_internal.h | 58 + src/hw_shims.h | 72 - src/init.c | 622 +++ src/internal.h | 404 +- src/io.c | 2155 +++++++++++ src/io_internal.h | 198 + src/legacy.c | 444 --- src/legacy.h | 748 ---- src/object.c | 130 +- src/object_internal.h | 84 +- src/once.c | 93 +- src/os_shims.h | 152 - src/protocol.defs | 49 +- src/provider.d | 42 + src/queue.c | 3387 ++++++++++------- src/queue_internal.h | 158 +- src/semaphore.c | 652 ++-- src/semaphore_internal.h | 27 +- src/shims.c | 65 - src/shims.h | 74 + src/shims/atomic.h | 157 + src/shims/getprogname.h | 37 + src/shims/hw_config.h | 106 + src/shims/malloc_zone.h | 98 + src/shims/perfmon.h | 97 + src/shims/time.h | 108 + src/shims/tsd.h | 104 + src/source.c | 2888 +++++++------- src/source_internal.h | 123 +- src/time.c | 106 +- src/trace.h | 152 + tools/dispatch_trace.d | 76 + xcodeconfig/libdispatch-resolved.xcconfig | 25 + xcodeconfig/libdispatch-resolver.xcconfig | 20 + xcodeconfig/libdispatch.xcconfig | 67 + xcodescripts/install-manpages.sh | 107 + xcodescripts/mig-headers.sh | 29 + xcodescripts/postprocess-headers.sh | 21 + xcodescripts/symlink-headers.sh | 29 + 114 files changed, 14474 insertions(+), 12600 deletions(-) create mode 100644 INSTALL create mode 100644 Makefile.am create mode 100644 PATCHES create mode 100644 autogen.sh create mode 100644 config/config.h create mode 100644 configure.ac create mode 100644 dispatch/Makefile.am rename {src => dispatch}/base.h (76%) create mode 100644 dispatch/data.h rename {src => dispatch}/dispatch.h (79%) rename {src => dispatch}/group.h (83%) create mode 100644 dispatch/io.h rename {src => dispatch}/object.h (74%) rename {src => dispatch}/once.h (60%) rename {src => dispatch}/queue.h (51%) rename {src => dispatch}/semaphore.h (87%) rename {src => dispatch}/source.h (75%) rename {src => dispatch}/time.h (84%) delete mode 100644 examples/Dispatch Samples/Dispatch Samples.xcodeproj/project.pbxproj delete mode 100644 examples/Dispatch Samples/ReadMe.txt delete mode 100644 examples/Dispatch Samples/apply.c delete mode 100644 examples/Dispatch Samples/nWide.c delete mode 100644 examples/Dispatch Samples/netcat.c delete mode 100644 examples/Dispatch Samples/proc.c delete mode 100644 examples/Dispatch Samples/readFile.c delete mode 100644 examples/Dispatch Samples/readFileF.c delete mode 100644 examples/Dispatch Samples/timers.c delete mode 100644 examples/DispatchLife/DispatchLife.c delete mode 100644 examples/DispatchLife/DispatchLife.xcodeproj/project.pbxproj delete mode 100644 examples/DispatchLife/DispatchLifeGLView.h delete mode 100644 examples/DispatchLife/DispatchLifeGLView.m delete mode 100644 examples/DispatchLife/English.lproj/InfoPlist.strings delete mode 100644 examples/DispatchLife/English.lproj/MainMenu.nib/designable.nib delete mode 100644 examples/DispatchLife/English.lproj/MainMenu.nib/keyedobjects.nib delete mode 100644 examples/DispatchLife/Info.plist delete mode 100644 examples/DispatchLife/ReadMe.txt delete mode 100644 examples/DispatchLife/main.m delete mode 100644 examples/DispatchWebServer/DispatchWebServer.c delete mode 100644 examples/DispatchWebServer/DispatchWebServer.xcodeproj/project.pbxproj delete mode 100644 examples/DispatchWebServer/ReadMe.txt create mode 100644 libdispatch.xcodeproj/project.xcworkspace/contents.xcworkspacedata create mode 100644 m4/atomic.m4 create mode 100644 m4/blocks.m4 create mode 100644 m4/pkg.m4 create mode 100644 man/Makefile.am create mode 100644 man/dispatch_data_create.3 create mode 100644 man/dispatch_io_create.3 create mode 100644 man/dispatch_io_read.3 create mode 100644 man/dispatch_read.3 create mode 100644 private/Makefile.am rename {src => private}/benchmark.h (91%) rename {src => private}/private.h (51%) rename {src => private}/queue_private.h (52%) rename {src => private}/source_private.h (62%) create mode 100644 resolver/resolved.h create mode 100644 resolver/resolver.c create mode 100644 resolver/resolver.h create mode 100644 src/Makefile.am create mode 100644 src/data.c create mode 100644 src/data_internal.h delete mode 100644 src/hw_shims.h create mode 100644 src/init.c create mode 100644 src/io.c create mode 100644 src/io_internal.h delete mode 100644 src/legacy.c delete mode 100644 src/legacy.h delete mode 100644 src/os_shims.h create mode 100644 src/provider.d delete mode 100644 src/shims.c create mode 100644 src/shims.h create mode 100644 src/shims/atomic.h create mode 100644 src/shims/getprogname.h create mode 100644 src/shims/hw_config.h create mode 100644 src/shims/malloc_zone.h create mode 100644 src/shims/perfmon.h create mode 100644 src/shims/time.h create mode 100644 src/shims/tsd.h create mode 100644 src/trace.h create mode 100755 tools/dispatch_trace.d create mode 100644 xcodeconfig/libdispatch-resolved.xcconfig create mode 100644 xcodeconfig/libdispatch-resolver.xcconfig create mode 100644 xcodeconfig/libdispatch.xcconfig create mode 100755 xcodescripts/install-manpages.sh create mode 100755 xcodescripts/mig-headers.sh create mode 100755 xcodescripts/postprocess-headers.sh create mode 100755 xcodescripts/symlink-headers.sh diff --git a/INSTALL b/INSTALL new file mode 100644 index 000000000..69fd5a6aa --- /dev/null +++ b/INSTALL @@ -0,0 +1,81 @@ +Grand Central Dispatch (GCD) + +GCD is a concurrent programming framework first shipped with Mac OS X Snow +Leopard. This package is an open source bundling of libdispatch, the core +user space library implementing GCD. At the time of writing, support for +the BSD kqueue API, and specifically extensions introduced in Mac OS X Snow +Leopard and FreeBSD 9-CURRENT, are required to use libdispatch. Other +systems are currently unsupported. + + Configuring and installing libdispatch + +GCD is built using autoconf, automake, and libtool, and has a number of +compile-time configuration options that should be reviewed before starting. +An uncustomized install requires: + + sh autogen.sh + ./configure + make + make install + +The following configure options may be of general interest: + +--with-apple-libc-source + + Specify the path to Apple's Libc package, so that appropriate headers + can be found and used. + +--with-apple-libclosure-source + + Specify the path to Apple's Libclosure package, so that appropriate headers + can be found and used. + +--with-apple-xnu-source + + Specify the path to Apple's XNU package, so that appropriate headers + can be found and used. + +--with-blocks-runtime + + On systems where -fblocks is supported, specify an additional library + path in which libBlocksRuntime can be found. This is not required on + Mac OS X, where the Blocks runtime is included in libSystem, but is + required on FreeBSD. + +The following options are likely to only be useful when building libdispatch +on Mac OS X as a replacement for /usr/lib/system/libdispatch.dylib: + +--disable-libdispatch-init-constructor + + Do not tag libdispatch's init routine as __constructor, in which case + it must be run manually before libdispatch routines can be called. + For the libdispatch library in /usr/lib/system, the init routine is called + automatically during process start. + +--enable-apple-tsd-optimizations + + Use a non-portable allocation scheme for pthread per-thread data (TSD) + keys when building libdispatch for /usr/lib/system on Mac OS X. This + should not be used on other OS's, or on Mac OS X when building a + stand-alone library. + + Typical configuration commands + +The following command lines create the configuration required to build +libdispatch for /usr/lib/system on Mac OS X Lion: + + sh autogen.sh + ./configure CFLAGS='-arch x86_64 -arch i386' \ + --prefix=/usr --libdir=/usr/lib/system \ + --disable-dependency-tracking --disable-static \ + --disable-libdispatch-init-constructor \ + --enable-apple-tsd-optimizations \ + --with-apple-libc-source=/path/to/10.7.0/Libc-763.11 \ + --with-apple-libclosure-source=/path/to/10.7.0/libclosure-53 \ + --with-apple-xnu-source=/path/to/10.7.0/xnu-1699.22.73 + +Typical configuration line for FreeBSD 8.x and 9.x to build libdispatch with +clang and blocks support: + + sh autogen.sh + ./configure CC=clang --with-blocks-runtime=/usr/local/lib diff --git a/Makefile.am b/Makefile.am new file mode 100644 index 000000000..4e3167c2a --- /dev/null +++ b/Makefile.am @@ -0,0 +1,22 @@ +# +# +# + +ACLOCAL_AMFLAGS = -I m4 + +SUBDIRS= \ + dispatch \ + man \ + private \ + src + +EXTRA_DIST= \ + LICENSE \ + PATCHES \ + autogen.sh \ + config/config.h \ + libdispatch.xcodeproj \ + resolver \ + tools \ + xcodeconfig \ + xcodescripts diff --git a/PATCHES b/PATCHES new file mode 100644 index 000000000..4f88387f1 --- /dev/null +++ b/PATCHES @@ -0,0 +1,194 @@ +The libdispatch project exists in a parallel open source repository at: + http://svn.macosforge.org/repository/libdispatch/trunk + +Externally committed revisions are periodically synchronized back to the +internal repository (this repository). + +Key: + APPLIED: change set was applied to internal repository. + INTERNAL: change set originated internally (i.e. already applied). + SKIPPED: change set was skipped. + +[ 1] SKIPPED +[ 2] SKIPPED +[ 3] INTERNAL rdar://problem/7148356 +[ 4] APPLIED rdar://problem/7323245 +[ 5] APPLIED rdar://problem/7323245 +[ 6] APPLIED rdar://problem/7323245 +[ 7] APPLIED rdar://problem/7323245 +[ 8] APPLIED rdar://problem/7323245 +[ 9] APPLIED rdar://problem/7323245 +[ 10] APPLIED rdar://problem/7323245 +[ 11] APPLIED rdar://problem/7323245 +[ 12] APPLIED rdar://problem/7323245 +[ 13] SKIPPED +[ 14] APPLIED rdar://problem/7323245 +[ 15] APPLIED rdar://problem/7323245 +[ 16] APPLIED rdar://problem/7323245 +[ 17] APPLIED rdar://problem/7323245 +[ 18] APPLIED rdar://problem/7323245 +[ 19] APPLIED rdar://problem/7323245 +[ 20] APPLIED rdar://problem/7323245 +[ 21] APPLIED rdar://problem/7323245 +[ 22] APPLIED rdar://problem/7323245 +[ 23] APPLIED rdar://problem/7323245 +[ 24] APPLIED rdar://problem/7323245 +[ 25] APPLIED rdar://problem/7323245 +[ 26] APPLIED rdar://problem/7323245 +[ 27] APPLIED rdar://problem/7323245 +[ 28] APPLIED rdar://problem/7323245 +[ 29] APPLIED rdar://problem/7323245 +[ 30] SKIPPED +[ 31] APPLIED rdar://problem/7323245 +[ 32] APPLIED rdar://problem/7323245 +[ 33] APPLIED rdar://problem/7323245 +[ 34] APPLIED rdar://problem/7323245 +[ 35] SKIPPED +[ 36] APPLIED rdar://problem/7323245 +[ 37] APPLIED rdar://problem/7323245 +[ 38] APPLIED rdar://problem/7323245 +[ 39] APPLIED rdar://problem/7323245 +[ 40] APPLIED rdar://problem/7323245 +[ 41] APPLIED rdar://problem/7323245 +[ 42] APPLIED rdar://problem/7323245 +[ 43] APPLIED rdar://problem/7323245 +[ 44] APPLIED rdar://problem/7323245 +[ 45] APPLIED rdar://problem/7323245 +[ 46] APPLIED rdar://problem/7323245 +[ 47] APPLIED rdar://problem/7323245 +[ 48] APPLIED rdar://problem/7323245 +[ 49] APPLIED rdar://problem/7323245 +[ 50] APPLIED rdar://problem/7323245 +[ 51] APPLIED rdar://problem/7323245 +[ 52] APPLIED rdar://problem/7323245 +[ 53] APPLIED rdar://problem/7323245 +[ 54] APPLIED rdar://problem/7323245 +[ 55] APPLIED rdar://problem/7323245 +[ 56] APPLIED rdar://problem/7323245 +[ 57] APPLIED rdar://problem/7323245 +[ 58] APPLIED rdar://problem/7323245 +[ 59] APPLIED rdar://problem/7323245 +[ 60] APPLIED rdar://problem/7323245 +[ 61] APPLIED rdar://problem/7323245 +[ 62] APPLIED rdar://problem/7323245 +[ 63] APPLIED rdar://problem/7323245 +[ 64] APPLIED rdar://problem/7323245 +[ 65] APPLIED rdar://problem/7323245 +[ 66] APPLIED rdar://problem/7323245 +[ 67] APPLIED rdar://problem/7323245 +[ 68] APPLIED rdar://problem/7323245 +[ 69] APPLIED rdar://problem/7323245 +[ 70] APPLIED rdar://problem/7323245 +[ 71] INTERNAL +[ 72] INTERNAL +[ 73] APPLIED rdar://problem/7531526 +[ 74] APPLIED rdar://problem/7531526 +[ 75] +[ 76] +[ 77] +[ 78] +[ 79] APPLIED rdar://problem/7531526 +[ 80] APPLIED rdar://problem/7531526 +[ 81] APPLIED rdar://problem/7531526 +[ 82] APPLIED rdar://problem/7531526 +[ 83] APPLIED rdar://problem/7531526 +[ 84] APPLIED rdar://problem/7531526 +[ 85] +[ 86] +[ 87] APPLIED rdar://problem/7531526 +[ 88] APPLIED rdar://problem/7531526 +[ 89] APPLIED rdar://problem/7531526 +[ 90] +[ 91] +[ 92] +[ 93] +[ 94] +[ 95] +[ 96] APPLIED rdar://problem/7531526 +[ 97] APPLIED rdar://problem/7531526 +[ 98] +[ 99] +[ 100] +[ 101] +[ 102] +[ 103] APPLIED rdar://problem/7531526 +[ 104] APPLIED rdar://problem/7531526 +[ 105] +[ 106] APPLIED rdar://problem/7531526 +[ 107] SKIPPED +[ 108] SKIPPED +[ 109] SKIPPED +[ 110] SKIPPED +[ 111] SKIPPED +[ 112] APPLIED rdar://problem/7531526 +[ 113] SKIPPED +[ 114] APPLIED rdar://problem/7531526 +[ 115] APPLIED rdar://problem/7531526 +[ 116] APPLIED rdar://problem/7531526 +[ 117] SKIPPED +[ 118] APPLIED rdar://problem/7531526 +[ 119] SKIPPED +[ 120] APPLIED rdar://problem/7531526 +[ 121] SKIPPED +[ 122] SKIPPED +[ 123] SKIPPED +[ 124] SKIPPED +[ 125] APPLIED rdar://problem/7531526 +[ 126] SKIPPED +[ 127] APPLIED rdar://problem/7531526 +[ 128] +[ 129] +[ 130] +[ 131] +[ 132] +[ 133] +[ 134] +[ 135] +[ 136] +[ 137] APPLIED rdar://problem/7647055 +[ 138] SKIPPED +[ 139] APPLIED rdar://problem/7531526 +[ 140] APPLIED rdar://problem/7531526 +[ 141] APPLIED rdar://problem/7531526 +[ 142] APPLIED rdar://problem/7531526 +[ 143] +[ 144] APPLIED rdar://problem/7531526 +[ 145] APPLIED rdar://problem/7531526 +[ 146] APPLIED rdar://problem/7531526 +[ 147] +[ 148] +[ 149] +[ 150] +[ 151] APPLIED rdar://problem/7531526 +[ 152] APPLIED rdar://problem/7531526 +[ 153] +[ 154] APPLIED rdar://problem/7531526 +[ 155] +[ 156] +[ 157] APPLIED rdar://problem/7531526 +[ 158] +[ 159] +[ 160] +[ 161] +[ 162] APPLIED rdar://problem/7531526 +[ 163] APPLIED rdar://problem/7531526 +[ 164] +[ 165] +[ 166] APPLIED rdar://problem/7531526 +[ 167] APPLIED rdar://problem/7531526 +[ 168] +[ 169] APPLIED rdar://problem/7531526 +[ 170] APPLIED rdar://problem/7531526 +[ 171] APPLIED rdar://problem/7531526 +[ 172] APPLIED rdar://problem/7531526 +[ 173] APPLIED rdar://problem/7531526 +[ 174] APPLIED rdar://problem/7531526 +[ 175] APPLIED rdar://problem/7531526 +[ 176] APPLIED rdar://problem/7531526 +[ 177] APPLIED rdar://problem/7531526 +[ 178] +[ 179] APPLIED rdar://problem/7531526 +[ 180] APPLIED rdar://problem/7531526 +[ 181] +[ 182] +[ 183] INTERNAL rdar://problem/7581831 diff --git a/autogen.sh b/autogen.sh new file mode 100644 index 000000000..3ebda4225 --- /dev/null +++ b/autogen.sh @@ -0,0 +1,2 @@ +#!/bin/sh +autoreconf -fvi diff --git a/config/config.h b/config/config.h new file mode 100644 index 000000000..040bf21a2 --- /dev/null +++ b/config/config.h @@ -0,0 +1,203 @@ +/* config/config.h. Generated from config.h.in by configure. */ +/* config/config.h.in. Generated from configure.ac by autoheader. */ + +/* Define to 1 if you have the declaration of `CLOCK_MONOTONIC', and to 0 if + you don't. */ +#define HAVE_DECL_CLOCK_MONOTONIC 0 + +/* Define to 1 if you have the declaration of `CLOCK_UPTIME', and to 0 if you + don't. */ +#define HAVE_DECL_CLOCK_UPTIME 0 + +/* Define to 1 if you have the declaration of `FD_COPY', and to 0 if you + don't. */ +#define HAVE_DECL_FD_COPY 1 + +/* Define to 1 if you have the declaration of `NOTE_NONE', and to 0 if you + don't. */ +#define HAVE_DECL_NOTE_NONE 1 + +/* Define to 1 if you have the declaration of `NOTE_REAP', and to 0 if you + don't. */ +#define HAVE_DECL_NOTE_REAP 1 + +/* Define to 1 if you have the declaration of `NOTE_SIGNAL', and to 0 if you + don't. */ +#define HAVE_DECL_NOTE_SIGNAL 1 + +/* Define to 1 if you have the declaration of `POSIX_SPAWN_START_SUSPENDED', + and to 0 if you don't. */ +#define HAVE_DECL_POSIX_SPAWN_START_SUSPENDED 1 + +/* Define to 1 if you have the declaration of `program_invocation_short_name', + and to 0 if you don't. */ +#define HAVE_DECL_PROGRAM_INVOCATION_SHORT_NAME 0 + +/* Define to 1 if you have the declaration of `SIGEMT', and to 0 if you don't. + */ +#define HAVE_DECL_SIGEMT 1 + +/* Define to 1 if you have the declaration of `VQ_UPDATE', and to 0 if you + don't. */ +#define HAVE_DECL_VQ_UPDATE 1 + +/* Define to 1 if you have the declaration of `VQ_VERYLOWDISK', and to 0 if + you don't. */ +#define HAVE_DECL_VQ_VERYLOWDISK 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_DLFCN_H 1 + +/* Define to 1 if you have the `getprogname' function. */ +#define HAVE_GETPROGNAME 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_INTTYPES_H 1 + +/* Define if Apple leaks program is present */ +#define HAVE_LEAKS 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_LIBKERN_OSATOMIC_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_LIBKERN_OSCROSSENDIAN_H 1 + +/* Define if mach is present */ +#define HAVE_MACH 1 + +/* Define to 1 if you have the `mach_absolute_time' function. */ +#define HAVE_MACH_ABSOLUTE_TIME 1 + +/* Define to 1 if you have the `malloc_create_zone' function. */ +#define HAVE_MALLOC_CREATE_ZONE 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_MALLOC_MALLOC_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_MEMORY_H 1 + +/* Define if __builtin_trap marked noreturn */ +#define HAVE_NORETURN_BUILTIN_TRAP 1 + +/* Define to 1 if you have the `pthread_key_init_np' function. */ +#define HAVE_PTHREAD_KEY_INIT_NP 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_PTHREAD_MACHDEP_H 1 + +/* Define to 1 if you have the `pthread_main_np' function. */ +#define HAVE_PTHREAD_MAIN_NP 1 + +/* Define to 1 if you have the header file. */ +/* #undef HAVE_PTHREAD_NP_H */ + +/* Define if pthread work queues are present */ +#define HAVE_PTHREAD_WORKQUEUES 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDINT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STDLIB_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRINGS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_STRING_H 1 + +/* Define to 1 if you have the `sysconf' function. */ +#define HAVE_SYSCONF 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_CDEFS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_STAT_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_TYPES_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_TARGETCONDITIONALS_H 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_UNISTD_H 1 + +/* Define to the sub-directory in which libtool stores uninstalled libraries. + */ +#define LT_OBJDIR ".libs/" + +/* Name of package */ +#define PACKAGE "libdispatch" + +/* Define to the address where bug reports for this package should be sent. */ +#define PACKAGE_BUGREPORT "libdispatch@macosforge.org" + +/* Define to the full name of this package. */ +#define PACKAGE_NAME "libdispatch" + +/* Define to the full name and version of this package. */ +#define PACKAGE_STRING "libdispatch 1.1" + +/* Define to the one symbol short name of this package. */ +#define PACKAGE_TARNAME "libdispatch" + +/* Define to the version of this package. */ +#define PACKAGE_VERSION "1.1" + +/* Define to 1 if you have the ANSI C header files. */ +#define STDC_HEADERS 1 + +/* Define to use non-portable pthread TSD optimizations for Mac OS X) */ +#define USE_APPLE_TSD_OPTIMIZATIONS 1 + +/* Define to tag libdispatch_init as a constructor */ +/* #undef USE_LIBDISPATCH_INIT_CONSTRUCTOR */ + +/* Define to use Mach semaphores */ +#define USE_MACH_SEM 1 + +/* Define to use POSIX semaphores */ +/* #undef USE_POSIX_SEM */ + +/* Version number of package */ +#define VERSION "1.1" + +/* Define to 1 if on AIX 3. + System headers sometimes define this. + We just want to avoid a redefinition error message. */ +#ifndef _ALL_SOURCE +/* # undef _ALL_SOURCE */ +#endif + +/* Enable GNU extensions on systems that have them. */ +#ifndef _GNU_SOURCE +# define _GNU_SOURCE 1 +#endif + +/* Define to 1 if on MINIX. */ +/* #undef _MINIX */ + +/* Define to 2 if the system does not provide POSIX.1 features except with + this defined. */ +/* #undef _POSIX_1_SOURCE */ + +/* Define to 1 if you need to in order for `stat' and other things to work. */ +/* #undef _POSIX_SOURCE */ + +/* Define if using Darwin $NOCANCEL */ +#define __DARWIN_NON_CANCELABLE 1 + +/* Enable extensions on Solaris. */ +#ifndef __EXTENSIONS__ +# define __EXTENSIONS__ 1 +#endif +#ifndef _POSIX_PTHREAD_SEMANTICS +# define _POSIX_PTHREAD_SEMANTICS 1 +#endif +#ifndef _TANDEM_SOURCE +# define _TANDEM_SOURCE 1 +#endif diff --git a/configure.ac b/configure.ac new file mode 100644 index 000000000..eeba91bbe --- /dev/null +++ b/configure.ac @@ -0,0 +1,266 @@ +# +# When this file changes, rerun autogen.sh. +# + +AC_PREREQ(2.59) +AC_INIT([libdispatch], [1.1], [libdispatch@macosforge.org], [libdispatch]) +AC_REVISION([$$]) +AC_CONFIG_AUX_DIR(config) +AC_CONFIG_HEADER([config/config.h]) +AC_CONFIG_MACRO_DIR([m4]) +AM_MAINTAINER_MODE + +# +# On Mac OS X, some required header files come from other source packages; +# allow specifying where those are. +# +AC_ARG_WITH([apple-libc-source], + [AS_HELP_STRING([--with-apple-libc-source], + [Specify path to Apple Libc source])], + [apple_libc_source_path=${withval}/pthreads + APPLE_LIBC_SOURCE_PATH=-I$apple_libc_source_path + CPPFLAGS="$CPPFLAGS -I$apple_libc_source_path"], + [APPLE_LIBC_SOURCE_PATH=] +) +AC_SUBST([APPLE_LIBC_SOURCE_PATH]) + +AC_ARG_WITH([apple-libclosure-source], + [AS_HELP_STRING([--with-apple-libclosure-source], + [Specify path to Apple libclosure source])], + [apple_libclosure_source_path=${withval} + APPLE_LIBCLOSURE_SOURCE_PATH=-I$apple_libclosure_source_path + CPPFLAGS="$CPPFLAGS -I$apple_libclosure_source_path"], + [APPLE_LIBCLOSURE_SOURCE_PATH=] +) +AC_SUBST([APPLE_LIBCLOSURE_SOURCE_PATH]) + +AC_ARG_WITH([apple-xnu-source], + [AS_HELP_STRING([--with-apple-xnu-source], + [Specify path to Apple XNU source])], + [apple_xnu_source_path=${withval}/libkern + APPLE_XNU_SOURCE_PATH=-I$apple_xnu_source_path + CPPFLAGS="$CPPFLAGS -I$apple_xnu_source_path" + apple_xnu_source_system_path=${withval}/osfmk + APPLE_XNU_SOURCE_SYSTEM_PATH=$apple_xnu_source_system_path], + [APPLE_XNU_SOURCE_PATH=] +) +AC_SUBST([APPLE_XNU_SOURCE_PATH]) +AC_SUBST([APPLE_XNU_SOURCE_SYSTEM_PATH]) +AM_CONDITIONAL(USE_XNU_SOURCE, [test -n "$apple_xnu_source_system_path"]) + +AC_CACHE_CHECK([for System.framework/PrivateHeaders], dispatch_cv_system_privateheaders, + [AS_IF([test -d /System/Library/Frameworks/System.framework/PrivateHeaders], + [dispatch_cv_system_privateheaders=yes], [dispatch_cv_system_privateheaders=no])] +) +AS_IF([test "x$dispatch_cv_system_privateheaders" != "xno"], + [CPPFLAGS="$CPPFLAGS -I/System/Library/Frameworks/System.framework/PrivateHeaders"] +) + +# +# On Mac OS X, libpispatch_init is automatically invoked during libSystem +# process initialization. On other systems, it is tagged as a library +# constructor to be run by automatically by the runtime linker. +# +AC_ARG_ENABLE([libdispatch-init-constructor], + [AS_HELP_STRING([--disable-libdispatch-init-constructor], + [Disable libdispatch_init as a constructor])] +) + +AS_IF([test "x$enable_libdispatch_init_constructor" != "xno"], + [AC_DEFINE(USE_LIBDISPATCH_INIT_CONSTRUCTOR, 1, + [Define to tag libdispatch_init as a constructor])] +) + +# +# On Mac OS X libdispatch can use the non-portable direct pthread TSD functions +# +AC_ARG_ENABLE([apple-tsd-optimizations], + [AS_HELP_STRING([--enable-apple-tsd-optimizations], + [Use non-portable pthread TSD optimizations for Mac OS X.])] +) + +AS_IF([test "x$enable_apple_tsd_optimizations" = "xyes"], + [AC_DEFINE(USE_APPLE_TSD_OPTIMIZATIONS, 1, + [Define to use non-portable pthread TSD optimizations for Mac OS X)])] +) + +AC_USE_SYSTEM_EXTENSIONS +AC_PROG_CC +AC_PROG_CXX +AC_PROG_INSTALL +AC_PROG_LIBTOOL +AC_PATH_PROGS(MIG, mig) + +AC_PATH_PROG(LEAKS, leaks) +AS_IF([test "x$LEAKS" != "x"], + [AC_DEFINE(HAVE_LEAKS, 1, [Define if Apple leaks program is present])] +) + +AM_INIT_AUTOMAKE([foreign]) + +DISPATCH_C_ATOMIC_BUILTINS + +case $dispatch_cv_atomic in + yes) ;; + -march*) MARCH_FLAGS="$dispatch_cv_atomic" + AC_SUBST([MARCH_FLAGS]) ;; + *) AC_MSG_ERROR([No gcc builtin atomic operations available]) ;; +esac + +# +# Find libraries we will need +# +AC_SEARCH_LIBS(clock_gettime, rt) +AC_SEARCH_LIBS(pthread_create, pthread) + +# +# Prefer native kqueue(2); otherwise use libkqueue if present. +# +AC_CHECK_HEADER(sys/event.h, [], + [PKG_CHECK_MODULES(KQUEUE, libkqueue)] +) + +# +# Checks for header files. +# +AC_HEADER_STDC +AC_CHECK_HEADERS([TargetConditionals.h pthread_np.h malloc/malloc.h libkern/OSCrossEndian.h libkern/OSAtomic.h]) + +# hack for pthread_machdep.h's #include +AS_IF([test -n "$apple_xnu_source_system_path"], [ + saveCPPFLAGS="$CPPFLAGS" + CPPFLAGS="$CPPFLAGS -I." + ln -fsh "$apple_xnu_source_system_path" System +]) +AC_CHECK_HEADERS([pthread_machdep.h]) +AS_IF([test -n "$apple_xnu_source_system_path"], [ + rm -f System + CPPFLAGS="$saveCPPFLAGS" +]) + +# +# Core Services is tested in one of the GCD regression tests, so test for its +# presence using its header file. +# +AC_CHECK_HEADER([CoreServices/CoreServices.h], + [have_coreservices=true], + [have_coreservices=false] +) +AM_CONDITIONAL(HAVE_CORESERVICES, $have_coreservices) + +# +# We use the availability of mach.h to decide whether to compile in all sorts +# of Machisms, including using Mach ports as event sources, etc. +# +AC_CHECK_HEADER([mach/mach.h], [ + AC_DEFINE(HAVE_MACH, 1, [Define if mach is present]) + AC_DEFINE(__DARWIN_NON_CANCELABLE, 1, [Define if using Darwin $NOCANCEL]) + have_mach=true], + [have_mach=false] +) +AM_CONDITIONAL(USE_MIG, $have_mach) + +# +# We use the availability of pthread_workqueue.h to decide whether to compile +# in support for pthread work queues. +# +AC_CHECK_HEADER([pthread_workqueue.h], + [AC_DEFINE(HAVE_PTHREAD_WORKQUEUES, 1, [Define if pthread work queues are present])] +) + +# +# Find functions and declarations we care about. +# +AC_CHECK_DECLS([CLOCK_UPTIME, CLOCK_MONOTONIC], [], [], + [[#include ]]) +AC_CHECK_DECLS([NOTE_NONE, NOTE_REAP, NOTE_SIGNAL], [], [], + [[#include ]]) +AC_CHECK_DECLS([FD_COPY], [], [], [[#include ]]) +AC_CHECK_DECLS([SIGEMT], [], [], [[#include ]]) +AC_CHECK_DECLS([VQ_UPDATE, VQ_VERYLOWDISK], [], [], [[#include ]]) +AC_CHECK_DECLS([program_invocation_short_name], [], [], [[#include ]]) +AC_CHECK_FUNCS([pthread_key_init_np pthread_main_np mach_absolute_time malloc_create_zone sysconf getprogname]) + +AC_CHECK_DECLS([POSIX_SPAWN_START_SUSPENDED], + [have_posix_spawn_start_suspended=true], + [have_posix_spawn_start_suspended=false], + [[#include ]] +) +AM_CONDITIONAL(HAVE_POSIX_SPAWN_START_SUSPENDED, $have_posix_spawn_start_suspended) + +AC_CHECK_FUNC([sem_init], + [have_sem_init=true], + [have_sem_init=false] +) + +# +# We support both Mach semaphores and POSIX semaphores; if the former are +# available, prefer them. +# +AC_MSG_CHECKING([what semaphore type to use]); +AS_IF([test "x$have_mach" = "xtrue"], + [AC_DEFINE(USE_MACH_SEM, 1, [Define to use Mach semaphores]) + AC_MSG_RESULT([Mach semaphores])], + [test "x$have_sem_init" = "xtrue"], + [AC_DEFINE(USE_POSIX_SEM, 1, [Define to use POSIX semaphores]) + AC_MSG_RESULT([POSIX semaphores])], + [AC_MSG_ERROR([no supported semaphore type])] +) + +AC_CHECK_HEADERS([sys/cdefs.h], [], [], + [#ifdef HAVE_SYS_CDEFS_H + #include + #endif]) + +DISPATCH_C_BLOCKS + +AC_CACHE_CHECK([for -fvisibility=hidden], [dispatch_cv_cc_visibility_hidden], [ + saveCFLAGS="$CFLAGS" + CFLAGS="$CFLAGS -fvisibility=hidden" + AC_LINK_IFELSE([AC_LANG_PROGRAM([ + extern __attribute__ ((visibility ("default"))) int foo; int foo;], [foo = 0;])], + [dispatch_cv_cc_visibility_hidden="yes"], [dispatch_cv_cc_visibility_hidden="no"]) + CFLAGS="$saveCFLAGS" +]) +AS_IF([test "x$dispatch_cv_cc_visibility_hidden" != "xno"], [ + VISIBILITY_FLAGS="-fvisibility=hidden" +]) +AC_SUBST([VISIBILITY_FLAGS]) + +AC_CACHE_CHECK([for -momit-leaf-frame-pointer], [dispatch_cv_cc_omit_leaf_fp], [ + saveCFLAGS="$CFLAGS" + CFLAGS="$CFLAGS -momit-leaf-frame-pointer" + AC_LINK_IFELSE([AC_LANG_PROGRAM([ + extern int foo(void); int foo(void) {return 1;}], [foo();])], + [dispatch_cv_cc_omit_leaf_fp="yes"], [dispatch_cv_cc_omit_leaf_fp="no"]) + CFLAGS="$saveCFLAGS" +]) +AS_IF([test "x$dispatch_cv_cc_omit_leaf_fp" != "xno"], [ + OMIT_LEAF_FP_FLAGS="-momit-leaf-frame-pointer" +]) +AC_SUBST([OMIT_LEAF_FP_FLAGS]) + +AC_CACHE_CHECK([for darwin linker], [dispatch_cv_ld_darwin], [ + saveLDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS -dynamiclib -compatibility_version 1.2.3 -current_version 4.5.6" + AC_LINK_IFELSE([AC_LANG_PROGRAM([ + extern int foo; int foo;], [foo = 0;])], + [dispatch_cv_ld_darwin="yes"], [dispatch_cv_ld_darwin="no"]) + LDFLAGS="$saveLDFLAGS" +]) +AM_CONDITIONAL(HAVE_DARWIN_LD, [test "x$dispatch_cv_ld_darwin" != "xno"]) + +# +# Temporary: some versions of clang do not mark __builtin_trap() as +# __attribute__((__noreturn__)). Detect and add if required. +# +AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM([void __attribute__((__noreturn__)) temp(void) { __builtin_trap(); }], [])], [ + AC_DEFINE(HAVE_NORETURN_BUILTIN_TRAP, 1, [Define if __builtin_trap marked noreturn]) + ], []) + +# +# Generate Makefiles. +# +AC_CONFIG_FILES([Makefile dispatch/Makefile man/Makefile private/Makefile src/Makefile]) +AC_OUTPUT diff --git a/dispatch/Makefile.am b/dispatch/Makefile.am new file mode 100644 index 000000000..5cba7138e --- /dev/null +++ b/dispatch/Makefile.am @@ -0,0 +1,19 @@ +# +# +# + +dispatchdir=$(includedir)/dispatch + +dispatch_HEADERS= \ + base.h \ + data.h \ + dispatch.h \ + group.h \ + io.h \ + object.h \ + once.h \ + queue.h \ + semaphore.h \ + source.h \ + time.h + diff --git a/src/base.h b/dispatch/base.h similarity index 76% rename from src/base.h rename to dispatch/base.h index 3799a9a6e..029e3e0a8 100644 --- a/src/base.h +++ b/dispatch/base.h @@ -1,20 +1,20 @@ /* - * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * Copyright (c) 2008-2011 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ - * + * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * + * * @APPLE_APACHE_LICENSE_HEADER_END@ */ @@ -47,19 +47,24 @@ typedef union { struct dispatch_source_s *_ds; struct dispatch_source_attr_s *_dsa; struct dispatch_semaphore_s *_dsema; + struct dispatch_data_s *_ddata; + struct dispatch_io_s *_dchannel; + struct dispatch_operation_s *_doperation; + struct dispatch_disk_s *_ddisk; } dispatch_object_t __attribute__((transparent_union)); #endif typedef void (*dispatch_function_t)(void *); #ifdef __cplusplus -#define DISPATCH_DECL(name) typedef struct name##_s : public dispatch_object_s {} *name##_t; +#define DISPATCH_DECL(name) \ + typedef struct name##_s : public dispatch_object_s {} *name##_t #else /*! @parseOnly */ -#define DISPATCH_DECL(name) typedef struct name##_s *name##_t; +#define DISPATCH_DECL(name) typedef struct name##_s *name##_t #endif -#ifdef __GNUC__ +#if __GNUC__ #define DISPATCH_NORETURN __attribute__((__noreturn__)) #define DISPATCH_NOTHROW __attribute__((__nothrow__)) #define DISPATCH_NONNULL1 __attribute__((__nonnull__(1))) @@ -69,7 +74,7 @@ typedef void (*dispatch_function_t)(void *); #define DISPATCH_NONNULL5 __attribute__((__nonnull__(5))) #define DISPATCH_NONNULL6 __attribute__((__nonnull__(6))) #define DISPATCH_NONNULL7 __attribute__((__nonnull__(7))) -#if __clang__ +#if __clang__ && __clang_major__ < 3 // rdar://problem/6857843 #define DISPATCH_NONNULL_ALL #else @@ -77,8 +82,10 @@ typedef void (*dispatch_function_t)(void *); #endif #define DISPATCH_SENTINEL __attribute__((__sentinel__)) #define DISPATCH_PURE __attribute__((__pure__)) +#define DISPATCH_CONST __attribute__((__const__)) #define DISPATCH_WARN_RESULT __attribute__((__warn_unused_result__)) #define DISPATCH_MALLOC __attribute__((__malloc__)) +#define DISPATCH_ALWAYS_INLINE __attribute__((__always_inline__)) #else /*! @parseOnly */ #define DISPATCH_NORETURN @@ -105,9 +112,31 @@ typedef void (*dispatch_function_t)(void *); /*! @parseOnly */ #define DISPATCH_PURE /*! @parseOnly */ +#define DISPATCH_CONST +/*! @parseOnly */ #define DISPATCH_WARN_RESULT /*! @parseOnly */ #define DISPATCH_MALLOC +/*! @parseOnly */ +#define DISPATCH_ALWAYS_INLINE +#endif + +#if __GNUC__ +#define DISPATCH_EXPORT extern __attribute__((visibility("default"))) +#else +#define DISPATCH_EXPORT extern +#endif + +#if __GNUC__ +#define DISPATCH_INLINE static __inline__ +#else +#define DISPATCH_INLINE static inline +#endif + +#if __GNUC__ +#define DISPATCH_EXPECT(x, v) __builtin_expect((x), (v)) +#else +#define DISPATCH_EXPECT(x, v) (x) #endif #endif diff --git a/dispatch/data.h b/dispatch/data.h new file mode 100644 index 000000000..2222e1bc0 --- /dev/null +++ b/dispatch/data.h @@ -0,0 +1,248 @@ +/* + * Copyright (c) 2009-2011 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __DISPATCH_DATA__ +#define __DISPATCH_DATA__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +__BEGIN_DECLS + +/*! @header + * Dispatch data objects describe contiguous or sparse regions of memory that + * may be managed by the system or by the application. + * Dispatch data objects are immutable, any direct access to memory regions + * represented by dispatch objects must not modify that memory. + */ + +/*! + * @typedef dispatch_data_t + * A dispatch object representing memory regions. + */ +DISPATCH_DECL(dispatch_data); + +/*! + * @var dispatch_data_empty + * @discussion The singleton dispatch data object representing a zero-length + * memory region. + */ +#define dispatch_data_empty (&_dispatch_data_empty) +__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +DISPATCH_EXPORT struct dispatch_data_s _dispatch_data_empty; + +#ifdef __BLOCKS__ + +/*! + * @const DISPATCH_DATA_DESTRUCTOR_DEFAULT + * @discussion The default destructor for dispatch data objects. + * Used at data object creation to indicate that the supplied buffer should + * be copied into internal storage managed by the system. + */ +#define DISPATCH_DATA_DESTRUCTOR_DEFAULT NULL + +/*! + * @const DISPATCH_DATA_DESTRUCTOR_FREE + * @discussion The destructor for dispatch data objects created from a malloc'd + * buffer. Used at data object creation to indicate that the supplied buffer + * was allocated by the malloc() family and should be destroyed with free(3). + */ +#define DISPATCH_DATA_DESTRUCTOR_FREE (_dispatch_data_destructor_free) +__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +DISPATCH_EXPORT const dispatch_block_t _dispatch_data_destructor_free; + +/*! + * @function dispatch_data_create + * Creates a dispatch data object from the given contiguous buffer of memory. If + * a non-default destructor is provided, ownership of the buffer remains with + * the caller (i.e. the bytes will not be copied). The last release of the data + * object will result in the invocation of the specified destructor on the + * specified queue to free the buffer. + * + * If the DISPATCH_DATA_DESTRUCTOR_FREE destructor is provided the buffer will + * be freed via free(3) and the queue argument ignored. + * + * If the DISPATCH_DATA_DESTRUCTOR_DEFAULT destructor is provided, data object + * creation will copy the buffer into internal memory managed by the system. + * + * @param buffer A contiguous buffer of data. + * @param size The size of the contiguous buffer of data. + * @param queue The queue to which the destructor should be submitted. + * @param destructor The destructor responsible for freeing the data when it + * is no longer needed. + * @result A newly created dispatch data object. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_WARN_RESULT DISPATCH_NOTHROW +dispatch_data_t +dispatch_data_create(const void *buffer, + size_t size, + dispatch_queue_t queue, + dispatch_block_t destructor); + +/*! + * @function dispatch_data_get_size + * Returns the logical size of the memory region(s) represented by the specified + * dispatch data object. + * + * @param data The dispatch data object to query. + * @result The number of bytes represented by the data object. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +DISPATCH_EXPORT DISPATCH_PURE DISPATCH_NONNULL1 DISPATCH_NOTHROW +size_t +dispatch_data_get_size(dispatch_data_t data); + +/*! + * @function dispatch_data_create_map + * Maps the memory represented by the specified dispatch data object as a single + * contiguous memory region and returns a new data object representing it. + * If non-NULL references to a pointer and a size variable are provided, they + * are filled with the location and extent of that region. These allow direct + * read access to the represented memory, but are only valid until the copy + * object is released. + * + * @param data The dispatch data object to map. + * @param buffer_ptr A pointer to a pointer variable to be filled with the + * location of the mapped contiguous memory region, or + * NULL. + * @param size_ptr A pointer to a size_t variable to be filled with the + * size of the mapped contiguous memory region, or NULL. + * @result A newly created dispatch data object. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_WARN_RESULT DISPATCH_NOTHROW +dispatch_data_t +dispatch_data_create_map(dispatch_data_t data, + const void **buffer_ptr, + size_t *size_ptr); + +/*! + * @function dispatch_data_create_concat + * Returns a new dispatch data object representing the concatenation of the + * specified data objects. Those objects may be released by the application + * after the call returns (however, the system might not deallocate the memory + * region(s) described by them until the newly created object has also been + * released). + * + * @param data1 The data object representing the region(s) of memory to place + * at the beginning of the newly created object. + * @param data2 The data object representing the region(s) of memory to place + * at the end of the newly created object. + * @result A newly created object representing the concatenation of the + * data1 and data2 objects. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_NOTHROW +dispatch_data_t +dispatch_data_create_concat(dispatch_data_t data1, dispatch_data_t data2); + +/*! + * @function dispatch_data_create_subrange + * Returns a new dispatch data object representing a subrange of the specified + * data object, which may be released by the application after the call returns + * (however, the system might not deallocate the memory region(s) described by + * that object until the newly created object has also been released). + * + * @param data The data object representing the region(s) of memory to + * create a subrange of. + * @param offset The offset into the data object where the subrange + * starts. + * @param length The length of the range. + * @result A newly created object representing the specified + * subrange of the data object. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_WARN_RESULT DISPATCH_NOTHROW +dispatch_data_t +dispatch_data_create_subrange(dispatch_data_t data, + size_t offset, + size_t length); + +/*! + * @typedef dispatch_data_applier_t + * A block to be invoked for every contiguous memory region in a data object. + * + * @param region A data object representing the current region. + * @param offset The logical offset of the current region to the start + * of the data object. + * @param buffer The location of the memory for the current region. + * @param size The size of the memory for the current region. + * @result A Boolean indicating whether traversal should continue. + */ +typedef bool (^dispatch_data_applier_t)(dispatch_data_t region, + size_t offset, + const void *buffer, + size_t size); + +/*! + * @function dispatch_data_apply + * Traverse the memory regions represented by the specified dispatch data object + * in logical order and invoke the specified block once for every contiguous + * memory region encountered. + * + * Each invocation of the block is passed a data object representing the current + * region and its logical offset, along with the memory location and extent of + * the region. These allow direct read access to the memory region, but are only + * valid until the passed-in region object is released. Note that the region + * object is released by the system when the block returns, it is the + * responsibility of the application to retain it if the region object or the + * associated memory location are needed after the block returns. + * + * @param data The data object to traverse. + * @param applier The block to be invoked for every contiguous memory + * region in the data object. + * @result A Boolean indicating whether traversal completed + * successfully. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +bool +dispatch_data_apply(dispatch_data_t data, dispatch_data_applier_t applier); + +/*! + * @function dispatch_data_copy_region + * Finds the contiguous memory region containing the specified location among + * the regions represented by the specified object and returns a copy of the + * internal dispatch data object representing that region along with its logical + * offset in the specified object. + * + * @param data The dispatch data object to query. + * @param location The logical position in the data object to query. + * @param offset_ptr A pointer to a size_t variable to be filled with the + * logical offset of the returned region object to the + * start of the queried data object. + * @result A newly created dispatch data object. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_WARN_RESULT +DISPATCH_NOTHROW +dispatch_data_t +dispatch_data_copy_region(dispatch_data_t data, + size_t location, + size_t *offset_ptr); + +#endif /* __BLOCKS__ */ + +__END_DECLS + +#endif /* __DISPATCH_DATA__ */ diff --git a/src/dispatch.h b/dispatch/dispatch.h similarity index 79% rename from src/dispatch.h rename to dispatch/dispatch.h index 95331d7a4..2ba2cce38 100644 --- a/src/dispatch.h +++ b/dispatch/dispatch.h @@ -1,34 +1,42 @@ /* - * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * Copyright (c) 2008-2011 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ - * + * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * + * * @APPLE_APACHE_LICENSE_HEADER_END@ */ #ifndef __DISPATCH_PUBLIC__ #define __DISPATCH_PUBLIC__ +#ifdef __APPLE__ #include +#include +#endif #include #include #include #include #include +#include + +#ifndef __OSX_AVAILABLE_STARTING +#define __OSX_AVAILABLE_STARTING(x, y) +#endif -#define DISPATCH_API_VERSION 20090501 +#define DISPATCH_API_VERSION 20110201 #ifndef __DISPATCH_BUILDING_DISPATCH__ @@ -44,6 +52,8 @@ #include #include #include +#include +#include #undef __DISPATCH_INDIRECT__ diff --git a/src/group.h b/dispatch/group.h similarity index 83% rename from src/group.h rename to dispatch/group.h index a2829482b..4e6e11d97 100644 --- a/src/group.h +++ b/dispatch/group.h @@ -1,20 +1,20 @@ /* - * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * Copyright (c) 2008-2011 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ - * + * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * + * * @APPLE_APACHE_LICENSE_HEADER_END@ */ @@ -40,7 +40,7 @@ __BEGIN_DECLS * * @abstract * Creates new group with which blocks may be associated. - * + * * @discussion * This function creates a new group with which blocks may be associated. * The dispatch group may be used to wait for the completion of the blocks it @@ -49,8 +49,8 @@ __BEGIN_DECLS * @result * The newly created group, or NULL on failure. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_WARN_RESULT +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_group_t dispatch_group_create(void); @@ -78,8 +78,8 @@ dispatch_group_create(void); * The block to perform asynchronously. */ #ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_group_async(dispatch_group_t group, dispatch_queue_t queue, @@ -112,8 +112,8 @@ dispatch_group_async(dispatch_group_t group, * parameter passed to this function is the context provided to * dispatch_group_async_f(). */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NONNULL4 +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NONNULL4 DISPATCH_NOTHROW void dispatch_group_async_f(dispatch_group_t group, dispatch_queue_t queue, @@ -124,11 +124,11 @@ dispatch_group_async_f(dispatch_group_t group, * @function dispatch_group_wait * * @abstract - * Wait synchronously for the previously submitted blocks to complete; - * returns if the blocks have not completed within the specified timeout. + * Wait synchronously until all the blocks associated with a group have + * completed or until the specified timeout has elapsed. * * @discussion - * This function waits for the completion of the blocks associated with the + * This function waits for the completion of the blocks associated with the * given dispatch group, and returns after all blocks have completed or when * the specified timeout has elapsed. When a timeout occurs, the group is * restored to its original state. @@ -155,8 +155,8 @@ dispatch_group_async_f(dispatch_group_t group, * Returns zero on success (all blocks associated with the group completed * within the specified timeout) or non-zero on error (i.e. timed out). */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW long dispatch_group_wait(dispatch_group_t group, dispatch_time_t timeout); @@ -164,8 +164,8 @@ dispatch_group_wait(dispatch_group_t group, dispatch_time_t timeout); * @function dispatch_group_notify * * @abstract - * Schedule a block to be submitted to a queue when a group of previously - * submitted blocks have completed. + * Schedule a block to be submitted to a queue when all the blocks associated + * with a group have completed. * * @discussion * This function schedules a notification block to be submitted to the specified @@ -173,7 +173,7 @@ dispatch_group_wait(dispatch_group_t group, dispatch_time_t timeout); * * If no blocks are associated with the dispatch group (i.e. the group is empty) * then the notification block will be submitted immediately. - * + * * The group will be empty at the time the notification block is submitted to * the target queue. The group may either be released with dispatch_release() * or reused for additional operations. @@ -191,8 +191,8 @@ dispatch_group_wait(dispatch_group_t group, dispatch_time_t timeout); * The block to submit when the group completes. */ #ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_group_notify(dispatch_group_t group, dispatch_queue_t queue, @@ -203,8 +203,8 @@ dispatch_group_notify(dispatch_group_t group, * @function dispatch_group_notify_f * * @abstract - * Schedule a function to be submitted to a queue when a group of previously - * submitted functions have completed. + * Schedule a function to be submitted to a queue when all the blocks + * associated with a group have completed. * * @discussion * See dispatch_group_notify() for details. @@ -221,8 +221,9 @@ dispatch_group_notify(dispatch_group_t group, * parameter passed to this function is the context provided to * dispatch_group_notify_f(). */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NONNULL4 +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NONNULL4 +DISPATCH_NOTHROW void dispatch_group_notify_f(dispatch_group_t group, dispatch_queue_t queue, @@ -244,8 +245,8 @@ dispatch_group_notify_f(dispatch_group_t group, * The dispatch group to update. * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NOTHROW DISPATCH_NONNULL_ALL +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_group_enter(dispatch_group_t group); @@ -263,8 +264,8 @@ dispatch_group_enter(dispatch_group_t group); * The dispatch group to update. * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NOTHROW DISPATCH_NONNULL_ALL +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_group_leave(dispatch_group_t group); diff --git a/dispatch/io.h b/dispatch/io.h new file mode 100644 index 000000000..f8fb2ff42 --- /dev/null +++ b/dispatch/io.h @@ -0,0 +1,586 @@ +/* + * Copyright (c) 2009-2010 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __DISPATCH_IO__ +#define __DISPATCH_IO__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +__BEGIN_DECLS + +/*! @header + * Dispatch I/O provides both stream and random access asynchronous read and + * write operations on file descriptors. One or more dispatch I/O channels may + * be created from a file descriptor as either the DISPATCH_IO_STREAM type or + * DISPATCH_IO_RANDOM type. Once a channel has been created the application may + * schedule asynchronous read and write operations. + * + * The application may set policies on the dispatch I/O channel to indicate the + * desired frequency of I/O handlers for long-running operations. + * + * Dispatch I/O also provides a memory managment model for I/O buffers that + * avoids unnecessary copying of data when pipelined between channels. Dispatch + * I/O monitors the overall memory pressure and I/O access patterns for the + * application to optimize resource utilization. + */ + +/*! + * @typedef dispatch_fd_t + * Native file descriptor type for the platform. + */ +typedef int dispatch_fd_t; + +#ifdef __BLOCKS__ + +/*! + * @functiongroup Dispatch I/O Convenience API + * Convenience wrappers around the dispatch I/O channel API, with simpler + * callback handler semantics and no explicit management of channel objects. + * File descriptors passed to the convenience API are treated as streams, and + * scheduling multiple operations on one file descriptor via the convenience API + * may incur more overhead than by using the dispatch I/O channel API directly. + */ + +/*! + * @function dispatch_read + * Schedule a read operation for asynchronous execution on the specified file + * descriptor. The specified handler is enqueued with the data read from the + * file descriptor when the operation has completed or an error occurs. + * + * The data object passed to the handler will be automatically released by the + * system when the handler returns. It is the responsibility of the application + * to retain, concatenate or copy the data object if it is needed after the + * handler returns. + * + * The data object passed to the handler will only contain as much data as is + * currently available from the file descriptor (up to the specified length). + * + * If an unrecoverable error occurs on the file descriptor, the handler will be + * enqueued with the appropriate error code along with a data object of any data + * that could be read successfully. + * + * An invocation of the handler with an error code of zero and an empty data + * object indicates that EOF was reached. + * + * The system takes control of the file descriptor until the handler is + * enqueued, and during this time file descriptor flags such as O_NONBLOCK will + * be modified by the system on behalf of the application. It is an error for + * the application to modify a file descriptor directly while it is under the + * control of the system, but it may create additional dispatch I/O convenience + * operations or dispatch I/O channels associated with that file descriptor. + * + * @param fd The file descriptor from which to read the data. + * @param length The length of data to read from the file descriptor, + * or SIZE_MAX to indicate that all of the data currently + * available from the file descriptor should be read. + * @param queue The dispatch queue to which the handler should be + * submitted. + * @param handler The handler to enqueue when data is ready to be + * delivered. + * @param data The data read from the file descriptor. + * @param error An errno condition for the read operation or + * zero if the read was successful. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_NONNULL4 DISPATCH_NOTHROW +void +dispatch_read(dispatch_fd_t fd, + size_t length, + dispatch_queue_t queue, + void (^handler)(dispatch_data_t data, int error)); + +/*! + * @function dispatch_write + * Schedule a write operation for asynchronous execution on the specified file + * descriptor. The specified handler is enqueued when the operation has + * completed or an error occurs. + * + * If an unrecoverable error occurs on the file descriptor, the handler will be + * enqueued with the appropriate error code along with the data that could not + * be successfully written. + * + * An invocation of the handler with an error code of zero indicates that the + * data was fully written to the channel. + * + * The system takes control of the file descriptor until the handler is + * enqueued, and during this time file descriptor flags such as O_NONBLOCK will + * be modified by the system on behalf of the application. It is an error for + * the application to modify a file descriptor directly while it is under the + * control of the system, but it may create additional dispatch I/O convenience + * operations or dispatch I/O channels associated with that file descriptor. + * + * @param fd The file descriptor to which to write the data. + * @param data The data object to write to the file descriptor. + * @param queue The dispatch queue to which the handler should be + * submitted. + * @param handler The handler to enqueue when the data has been written. + * @param data The data that could not be written to the I/O + * channel, or NULL. + * @param error An errno condition for the write operation or + * zero if the write was successful. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NONNULL3 DISPATCH_NONNULL4 +DISPATCH_NOTHROW +void +dispatch_write(dispatch_fd_t fd, + dispatch_data_t data, + dispatch_queue_t queue, + void (^handler)(dispatch_data_t data, int error)); + +/*! + * @functiongroup Dispatch I/O Channel API + */ + +/*! + * @typedef dispatch_io_t + * A dispatch I/O channel represents the asynchronous I/O policy applied to a + * file descriptor. I/O channels are first class dispatch objects and may be + * retained and released, suspended and resumed, etc. + */ +DISPATCH_DECL(dispatch_io); + +/*! + * @typedef dispatch_io_handler_t + * The prototype of I/O handler blocks for dispatch I/O operations. + * + * @param done A flag indicating whether the operation is complete. + * @param data The data object to be handled. + * @param error An errno condition for the operation. + */ +typedef void (^dispatch_io_handler_t)(bool done, dispatch_data_t data, + int error); + +/*! + * @typedef dispatch_io_type_t + * The type of a dispatch I/O channel: + * + * @const DISPATCH_IO_STREAM A dispatch I/O channel representing a stream of + * bytes. Read and write operations on a channel of this type are performed + * serially (in order of creation) and read/write data at the file pointer + * position that is current at the time the operation starts executing. + * Operations of different type (read vs. write) may be perfomed simultaneously. + * Offsets passed to operations on a channel of this type are ignored. + * + * @const DISPATCH_IO_RANDOM A dispatch I/O channel representing a random + * access file. Read and write operations on a channel of this type may be + * performed concurrently and read/write data at the specified offset. Offsets + * are interpreted relative to the file pointer position current at the time the + * I/O channel is created. Attempting to create a channel of this type for a + * file descriptor that is not seekable will result in an error. + */ +#define DISPATCH_IO_STREAM 0 +#define DISPATCH_IO_RANDOM 1 + +typedef unsigned long dispatch_io_type_t; + +/*! + * @function dispatch_io_create + * Create a dispatch I/O channel associated with a file descriptor. The system + * takes control of the file descriptor until the channel is closed, an error + * occurs on the file descriptor or all references to the channel are released. + * At that time the specified cleanup handler will be enqueued and control over + * the file descriptor relinquished. + * + * While a file descriptor is under the control of a dispatch I/O channel, file + * descriptor flags such as O_NONBLOCK will be modified by the system on behalf + * of the application. It is an error for the application to modify a file + * descriptor directly while it is under the control of a dispatch I/O channel, + * but it may create additional channels associated with that file descriptor. + * + * @param type The desired type of I/O channel (DISPATCH_IO_STREAM + * or DISPATCH_IO_RANDOM). + * @param fd The file descriptor to associate with the I/O channel. + * @param queue The dispatch queue to which the handler should be submitted. + * @param cleanup_handler The handler to enqueue when the system + * relinquishes control over the file descriptor. + * @param error An errno condition if control is relinquished + * because channel creation failed, zero otherwise. + * @result The newly created dispatch I/O channel or NULL if an error + * occurred. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_WARN_RESULT DISPATCH_NOTHROW +dispatch_io_t +dispatch_io_create(dispatch_io_type_t type, + dispatch_fd_t fd, + dispatch_queue_t queue, + void (^cleanup_handler)(int error)); + +/*! +* @function dispatch_io_create_with_path +* Create a dispatch I/O channel associated with a path name. The specified +* path, oflag and mode parameters will be passed to open(2) when the first I/O +* operation on the channel is ready to execute and the resulting file +* descriptor will remain open and under the control of the system until the +* channel is closed, an error occurs on the file descriptor or all references +* to the channel are released. At that time the file descriptor will be closed +* and the specified cleanup handler will be enqueued. +* +* @param type The desired type of I/O channel (DISPATCH_IO_STREAM +* or DISPATCH_IO_RANDOM). +* @param path The path to associate with the I/O channel. +* @param oflag The flags to pass to open(2) when opening the file at +* path. +* @param mode The mode to pass to open(2) when creating the file at +* path (i.e. with flag O_CREAT), zero otherwise. +* @param queue The dispatch queue to which the handler should be +* submitted. +* @param cleanup_handler The handler to enqueue when the system +* has closed the file at path. +* @param error An errno condition if control is relinquished +* because channel creation or opening of the +* specified file failed, zero otherwise. +* @result The newly created dispatch I/O channel or NULL if an error +* occurred. +*/ +__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_NONNULL2 DISPATCH_WARN_RESULT +DISPATCH_NOTHROW +dispatch_io_t +dispatch_io_create_with_path(dispatch_io_type_t type, + const char *path, int oflag, mode_t mode, + dispatch_queue_t queue, + void (^cleanup_handler)(int error)); + +/*! + * @function dispatch_io_create_with_io + * Create a new dispatch I/O channel from an existing dispatch I/O channel. + * The new channel inherits the file descriptor or path name associated with + * the existing channel, but not its channel type or policies. + * + * If the existing channel is associated with a file descriptor, control by the + * system over that file descriptor is extended until the new channel is also + * closed, an error occurs on the file descriptor, or all references to both + * channels are released. At that time the specified cleanup handler will be + * enqueued and control over the file descriptor relinquished. + * + * While a file descriptor is under the control of a dispatch I/O channel, file + * descriptor flags such as O_NONBLOCK will be modified by the system on behalf + * of the application. It is an error for the application to modify a file + * descriptor directly while it is under the control of a dispatch I/O channel, + * but it may create additional channels associated with that file descriptor. + * + * @param type The desired type of I/O channel (DISPATCH_IO_STREAM + * or DISPATCH_IO_RANDOM). + * @param io The existing channel to create the new I/O channel from. + * @param queue The dispatch queue to which the handler should be submitted. + * @param cleanup_handler The handler to enqueue when the system + * relinquishes control over the file descriptor + * (resp. closes the file at path) associated with + * the existing channel. + * @param error An errno condition if control is relinquished + * because channel creation failed, zero otherwise. + * @result The newly created dispatch I/O channel or NULL if an error + * occurred. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_MALLOC DISPATCH_WARN_RESULT +DISPATCH_NOTHROW +dispatch_io_t +dispatch_io_create_with_io(dispatch_io_type_t type, + dispatch_io_t io, + dispatch_queue_t queue, + void (^cleanup_handler)(int error)); + +/*! + * @function dispatch_io_read + * Schedule a read operation for asynchronous execution on the specified I/O + * channel. The I/O handler is enqueued one or more times depending on the + * general load of the system and the policy specified on the I/O channel. + * + * Any data read from the channel is described by the dispatch data object + * passed to the I/O handler. This object will be automatically released by the + * system when the I/O handler returns. It is the responsibility of the + * application to retain, concatenate or copy the data object if it is needed + * after the I/O handler returns. + * + * Dispatch I/O handlers are not reentrant. The system will ensure that no new + * I/O handler instance is invoked until the previously enqueued handler block + * has returned. + * + * An invocation of the I/O handler with the done flag set indicates that the + * read operation is complete and that the handler will not be enqueued again. + * + * If an unrecoverable error occurs on the I/O channel's underlying file + * descriptor, the I/O handler will be enqueued with the done flag set, the + * appropriate error code and a NULL data object. + * + * An invocation of the I/O handler with the done flag set, an error code of + * zero and an empty data object indicates that EOF was reached. + * + * @param channel The dispatch I/O channel from which to read the data. + * @param offset The offset relative to the channel position from which + * to start reading (only for DISPATCH_IO_RANDOM). + * @param length The length of data to read from the I/O channel, or + * SIZE_MAX to indicate that data should be read until EOF + * is reached. + * @param queue The dispatch queue to which the I/O handler should be + * submitted. + * @param io_handler The I/O handler to enqueue when data is ready to be + * delivered. + * @param done A flag indicating whether the operation is complete. + * @param data An object with the data most recently read from the + * I/O channel as part of this read operation, or NULL. + * @param error An errno condition for the read operation or zero if + * the read was successful. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL4 DISPATCH_NONNULL5 +DISPATCH_NOTHROW +void +dispatch_io_read(dispatch_io_t channel, + off_t offset, + size_t length, + dispatch_queue_t queue, + dispatch_io_handler_t io_handler); + +/*! + * @function dispatch_io_write + * Schedule a write operation for asynchronous execution on the specified I/O + * channel. The I/O handler is enqueued one or more times depending on the + * general load of the system and the policy specified on the I/O channel. + * + * Any data remaining to be written to the I/O channel is described by the + * dispatch data object passed to the I/O handler. This object will be + * automatically released by the system when the I/O handler returns. It is the + * responsibility of the application to retain, concatenate or copy the data + * object if it is needed after the I/O handler returns. + * + * Dispatch I/O handlers are not reentrant. The system will ensure that no new + * I/O handler instance is invoked until the previously enqueued handler block + * has returned. + * + * An invocation of the I/O handler with the done flag set indicates that the + * write operation is complete and that the handler will not be enqueued again. + * + * If an unrecoverable error occurs on the I/O channel's underlying file + * descriptor, the I/O handler will be enqueued with the done flag set, the + * appropriate error code and an object containing the data that could not be + * written. + * + * An invocation of the I/O handler with the done flag set and an error code of + * zero indicates that the data was fully written to the channel. + * + * @param channel The dispatch I/O channel on which to write the data. + * @param offset The offset relative to the channel position from which + * to start writing (only for DISPATCH_IO_RANDOM). + * @param data The data to write to the I/O channel. The data object + * will be retained by the system until the write operation + * is complete. + * @param queue The dispatch queue to which the I/O handler should be + * submitted. + * @param io_handler The I/O handler to enqueue when data has been delivered. + * @param done A flag indicating whether the operation is complete. + * @param data An object of the data remaining to be + * written to the I/O channel as part of this write + * operation, or NULL. + * @param error An errno condition for the write operation or zero + * if the write was successful. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NONNULL4 +DISPATCH_NONNULL5 DISPATCH_NOTHROW +void +dispatch_io_write(dispatch_io_t channel, + off_t offset, + dispatch_data_t data, + dispatch_queue_t queue, + dispatch_io_handler_t io_handler); + +/*! + * @typedef dispatch_io_close_flags_t + * The type of flags you can set on a dispatch_io_close() call + * + * @const DISPATCH_IO_STOP Stop outstanding operations on a channel when + * the channel is closed. + */ +#define DISPATCH_IO_STOP 0x1 + +typedef unsigned long dispatch_io_close_flags_t; + +/*! + * @function dispatch_io_close + * Close the specified I/O channel to new read or write operations; scheduling + * operations on a closed channel results in their handler returning an error. + * + * If the DISPATCH_IO_STOP flag is provided, the system will make a best effort + * to interrupt any outstanding read and write operations on the I/O channel, + * otherwise those operations will run to completion normally. + * Partial results of read and write operations may be returned even after a + * channel is closed with the DISPATCH_IO_STOP flag. + * The final invocation of an I/O handler of an interrupted operation will be + * passed an ECANCELED error code, as will the I/O handler of an operation + * scheduled on a closed channel. + * + * @param channel The dispatch I/O channel to close. + * @param flags The flags for the close operation. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW +void +dispatch_io_close(dispatch_io_t channel, dispatch_io_close_flags_t flags); + +/*! + * @function dispatch_io_barrier + * Schedule a barrier operation on the specified I/O channel; all previously + * scheduled operations on the channel will complete before the provided + * barrier block is enqueued onto the global queue determined by the channel's + * target queue, and no subsequently scheduled operations will start until the + * barrier block has returned. + * + * If multiple channels are associated with the same file descriptor, a barrier + * operation scheduled on any of these channels will act as a barrier across all + * channels in question, i.e. all previously scheduled operations on any of the + * channels will complete before the barrier block is enqueued, and no + * operations subsequently scheduled on any of the channels will start until the + * barrier block has returned. + * + * While the barrier block is running, it may safely operate on the channel's + * underlying file descriptor with fsync(2), lseek(2) etc. (but not close(2)). + * + * @param channel The dispatch I/O channel to close. + * @param barrier The flags for the close operation. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_io_barrier(dispatch_io_t channel, dispatch_block_t barrier); + +/*! + * @function dispatch_io_get_descriptor + * Returns the file descriptor underlying a dispatch I/O channel. + * + * Will return -1 for a channel closed with dispatch_io_close() and for a + * channel associated with a path name that has not yet been open(2)ed. + * + * If called from a barrier block scheduled on a channel associated with a path + * name that has not yet been open(2)ed, this will trigger the channel open(2) + * operation and return the resulting file descriptor. + * + * @param channel The dispatch I/O channel to query. + * @result The file descriptor underlying the channel, or -1. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_NOTHROW +dispatch_fd_t +dispatch_io_get_descriptor(dispatch_io_t channel); + +/*! + * @function dispatch_io_set_high_water + * Set a high water mark on the I/O channel for all operations. + * + * The system will make a best effort to enqueue I/O handlers with partial + * results as soon the number of bytes processed by an operation (i.e. read or + * written) reaches the high water mark. + * + * The size of data objects passed to I/O handlers for this channel will never + * exceed the specified high water mark. + * + * The default value for the high water mark is unlimited (i.e. SIZE_MAX). + * + * @param channel The dispatch I/O channel on which to set the policy. + * @param high_water The number of bytes to use as a high water mark. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW +void +dispatch_io_set_high_water(dispatch_io_t channel, size_t high_water); + +/*! + * @function dispatch_io_set_low_water + * Set a low water mark on the I/O channel for all operations. + * + * The system will process (i.e. read or write) at least the low water mark + * number of bytes for an operation before enqueueing I/O handlers with partial + * results. + * + * The size of data objects passed to intermediate I/O handler invocations for + * this channel (i.e. excluding the final invocation) will never be smaller than + * the specified low water mark, except if the channel has an interval with the + * DISPATCH_IO_STRICT_INTERVAL flag set or if EOF or an error was encountered. + * + * I/O handlers should be prepared to receive amounts of data significantly + * larger than the low water mark in general. If an I/O handler requires + * intermediate results of fixed size, set both the low and and the high water + * mark to that size. + * + * The default value for the low water mark is unspecified, but must be assumed + * to be such that intermediate handler invocations may occur. + * If I/O handler invocations with partial results are not desired, set the + * low water mark to SIZE_MAX. + * + * @param channel The dispatch I/O channel on which to set the policy. + * @param low_water The number of bytes to use as a low water mark. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW +void +dispatch_io_set_low_water(dispatch_io_t channel, size_t low_water); + +/*! + * @typedef dispatch_io_interval_flags_t + * Type of flags to set on dispatch_io_set_interval() + * + * @const DISPATCH_IO_STRICT_INTERVAL Enqueue I/O handlers at a channel's + * interval setting even if the amount of data ready to be delivered is inferior + * to the low water mark (or zero). + */ +#define DISPATCH_IO_STRICT_INTERVAL 0x1 + +typedef unsigned long dispatch_io_interval_flags_t; + +/*! + * @function dispatch_io_set_interval + * Set a nanosecond interval at which I/O handlers are to be enqueued on the + * I/O channel for all operations. + * + * This allows an application to receive periodic feedback on the progress of + * read and write operations, e.g. for the purposes of displaying progress bars. + * + * If the amount of data ready to be delivered to an I/O handler at the interval + * is inferior to the channel low water mark, the handler will only be enqueued + * if the DISPATCH_IO_STRICT_INTERVAL flag is set. + * + * Note that the system may defer enqueueing interval I/O handlers by a small + * unspecified amount of leeway in order to align with other system activity for + * improved system performance or power consumption. + * + * @param channel The dispatch I/O channel on which to set the policy. + * @param interval The interval in nanoseconds at which delivery of the I/O + * handler is desired. + * @param flags Flags indicating desired data delivery behavior at + * interval time. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW +void +dispatch_io_set_interval(dispatch_io_t channel, + uint64_t interval, + dispatch_io_interval_flags_t flags); + +#endif /* __BLOCKS__ */ + +__END_DECLS + +#endif /* __DISPATCH_IO__ */ diff --git a/src/object.h b/dispatch/object.h similarity index 74% rename from src/object.h rename to dispatch/object.h index febc960dc..2ecf25186 100644 --- a/src/object.h +++ b/dispatch/object.h @@ -1,20 +1,20 @@ /* - * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * Copyright (c) 2008-2010 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ - * + * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * + * * @APPLE_APACHE_LICENSE_HEADER_END@ */ @@ -34,19 +34,28 @@ __BEGIN_DECLS * @abstract * Programmatically log debug information about a dispatch object. * + * @discussion + * Programmatically log debug information about a dispatch object. By default, + * the log output is sent to syslog at notice level. In the debug version of + * the library, the log output is sent to a file in /var/tmp. + * The log output destination can be configured via the LIBDISPATCH_LOG + * environment variable, valid values are: YES, NO, syslog, stderr, file. + * * @param object * The object to introspect. * * @param message * The message to log above and beyond the introspection. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL2 DISPATCH_NOTHROW __attribute__((__format__(printf,2,3))) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW +__attribute__((__format__(printf,2,3))) void dispatch_debug(dispatch_object_t object, const char *message, ...); -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL2 DISPATCH_NOTHROW __attribute__((__format__(printf,2,0))) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW +__attribute__((__format__(printf,2,0))) void dispatch_debugv(dispatch_object_t object, const char *message, va_list ap); @@ -64,8 +73,8 @@ dispatch_debugv(dispatch_object_t object, const char *message, va_list ap); * The object to retain. * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_retain(dispatch_object_t object); @@ -85,8 +94,8 @@ dispatch_retain(dispatch_object_t object); * The object to release. * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_release(dispatch_object_t object); @@ -102,8 +111,9 @@ dispatch_release(dispatch_object_t object); * @result * The context of the object; may be NULL. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_PURE DISPATCH_WARN_RESULT +DISPATCH_NOTHROW void * dispatch_get_context(dispatch_object_t object); @@ -120,8 +130,8 @@ dispatch_get_context(dispatch_object_t object); * The new client defined context for the object. This may be NULL. * */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NOTHROW //DISPATCH_NONNULL1 +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NOTHROW //DISPATCH_NONNULL1 void dispatch_set_context(dispatch_object_t object, void *context); @@ -146,8 +156,8 @@ dispatch_set_context(dispatch_object_t object, void *context); * The context parameter passed to the finalizer function is the current * context of the dispatch object at the time the finalizer call is made. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NOTHROW //DISPATCH_NONNULL1 +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NOTHROW //DISPATCH_NONNULL1 void dispatch_set_finalizer_f(dispatch_object_t object, dispatch_function_t finalizer); @@ -166,12 +176,12 @@ dispatch_set_finalizer_f(dispatch_object_t object, * Calls to dispatch_suspend() must be balanced with calls * to dispatch_resume(). * - * @param object + * @param object * The object to be suspended. * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_suspend(dispatch_object_t object); @@ -181,12 +191,12 @@ dispatch_suspend(dispatch_object_t object); * @abstract * Resumes the invocation of blocks on a dispatch object. * - * @param object + * @param object * The object to be resumed. * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_resume(dispatch_object_t object); diff --git a/src/once.h b/dispatch/once.h similarity index 60% rename from src/once.h rename to dispatch/once.h index 8cd25d61b..32cf2e8de 100644 --- a/src/once.h +++ b/dispatch/once.h @@ -1,20 +1,20 @@ /* - * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * Copyright (c) 2008-2010 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ - * + * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * + * * @APPLE_APACHE_LICENSE_HEADER_END@ */ @@ -55,23 +55,42 @@ typedef long dispatch_once_t; * initialized by the block. */ #ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_once(dispatch_once_t *predicate, dispatch_block_t block); -#ifdef __GNUC__ -#define dispatch_once(x, ...) do { if (__builtin_expect(*(x), ~0l) != ~0l) dispatch_once((x), (__VA_ARGS__)); } while (0) -#endif -#endif -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +DISPATCH_INLINE DISPATCH_ALWAYS_INLINE DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void -dispatch_once_f(dispatch_once_t *predicate, void *context, void (*function)(void *)); -#ifdef __GNUC__ -#define dispatch_once_f(x, y, z) do { if (__builtin_expect(*(x), ~0l) != ~0l) dispatch_once_f((x), (y), (z)); } while (0) +_dispatch_once(dispatch_once_t *predicate, dispatch_block_t block) +{ + if (DISPATCH_EXPECT(*predicate, ~0l) != ~0l) { + dispatch_once(predicate, block); + } +} +#undef dispatch_once +#define dispatch_once _dispatch_once #endif +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +void +dispatch_once_f(dispatch_once_t *predicate, void *context, + dispatch_function_t function); + +DISPATCH_INLINE DISPATCH_ALWAYS_INLINE DISPATCH_NONNULL1 DISPATCH_NONNULL3 +DISPATCH_NOTHROW +void +_dispatch_once_f(dispatch_once_t *predicate, void *context, + dispatch_function_t function) +{ + if (DISPATCH_EXPECT(*predicate, ~0l) != ~0l) { + dispatch_once_f(predicate, context, function); + } +} +#undef dispatch_once_f +#define dispatch_once_f _dispatch_once_f + __END_DECLS #endif diff --git a/src/queue.h b/dispatch/queue.h similarity index 51% rename from src/queue.h rename to dispatch/queue.h index 6b55696bb..d76777136 100644 --- a/src/queue.h +++ b/dispatch/queue.h @@ -1,20 +1,20 @@ /* - * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * Copyright (c) 2008-2011 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ - * + * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * + * * @APPLE_APACHE_LICENSE_HEADER_END@ */ @@ -30,7 +30,7 @@ * @header * * Dispatch is an abstract model for expressing concurrency via simple but - * powerful API. + * powerful API. * * At the core, dispatch provides serial FIFO queues to which blocks may be * submitted. Blocks submitted to these dispatch queues are invoked on a pool @@ -70,7 +70,7 @@ DISPATCH_DECL(dispatch_queue); * @typedef dispatch_queue_attr_t * * @abstract - * Attribute and policy extensions for dispatch queues. + * Attribute for dispatch queues. */ DISPATCH_DECL(dispatch_queue_attr); @@ -128,8 +128,8 @@ __BEGIN_DECLS * * The target queue determines whether the block will be invoked serially or * concurrently with respect to other blocks submitted to that same queue. - * Serial queues are processed concurrently with with respect to each other. - * + * Serial queues are processed concurrently with respect to each other. + * * @param queue * The target dispatch queue to which the block is submitted. * The system will hold a reference on the target queue until the block @@ -142,8 +142,8 @@ __BEGIN_DECLS * The result of passing NULL in this parameter is undefined. */ #ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_async(dispatch_queue_t queue, dispatch_block_t block); #endif @@ -156,7 +156,7 @@ dispatch_async(dispatch_queue_t queue, dispatch_block_t block); * * @discussion * See dispatch_async() for details. - * + * * @param queue * The target dispatch queue to which the function is submitted. * The system will hold a reference on the target queue until the function @@ -172,8 +172,8 @@ dispatch_async(dispatch_queue_t queue, dispatch_block_t block); * dispatch_async_f(). * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_async_f(dispatch_queue_t queue, void *context, @@ -210,8 +210,8 @@ dispatch_async_f(dispatch_queue_t queue, * The result of passing NULL in this parameter is undefined. */ #ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_sync(dispatch_queue_t queue, dispatch_block_t block); #endif @@ -238,8 +238,8 @@ dispatch_sync(dispatch_queue_t queue, dispatch_block_t block); * dispatch_sync_f(). * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_sync_f(dispatch_queue_t queue, void *context, @@ -254,9 +254,9 @@ dispatch_sync_f(dispatch_queue_t queue, * @discussion * Submits a block to a dispatch queue for multiple invocations. This function * waits for the task block to complete before returning. If the target queue - * is a concurrent queue returned by dispatch_get_concurrent_queue(), the block - * may be invoked concurrently, and it must therefore be reentrant safe. - * + * is concurrent, the block may be invoked concurrently, and it must therefore + * be reentrant safe. + * * Each invocation of the block will be passed the current index of iteration. * * @param iterations @@ -271,10 +271,11 @@ dispatch_sync_f(dispatch_queue_t queue, * The result of passing NULL in this parameter is undefined. */ #ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void -dispatch_apply(size_t iterations, dispatch_queue_t queue, void (^block)(size_t)); +dispatch_apply(size_t iterations, dispatch_queue_t queue, + void (^block)(size_t)); #endif /*! @@ -303,8 +304,8 @@ dispatch_apply(size_t iterations, dispatch_queue_t queue, void (^block)(size_t)) * current index of iteration. * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL2 DISPATCH_NONNULL4 DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NONNULL4 DISPATCH_NOTHROW void dispatch_apply_f(size_t iterations, dispatch_queue_t queue, void *context, @@ -315,18 +316,25 @@ dispatch_apply_f(size_t iterations, dispatch_queue_t queue, * * @abstract * Returns the queue on which the currently executing block is running. - * + * * @discussion * Returns the queue on which the currently executing block is running. * * When dispatch_get_current_queue() is called outside of the context of a * submitted block, it will return the default concurrent queue. * + * Recommended for debugging and logging purposes only: + * The code must not make any assumptions about the queue returned, unless it + * is one of the global queues or a queue the code has itself created. + * The code must not assume that synchronous execution onto a queue is safe + * from deadlock if that queue is not the one returned by + * dispatch_get_current_queue(). + * * @result * Returns the current queue. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_t dispatch_get_current_queue(void); @@ -345,12 +353,13 @@ dispatch_get_current_queue(void); * Returns the main queue. This queue is created automatically on behalf of * the main thread before main() is called. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -extern struct dispatch_queue_s _dispatch_main_q; +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT struct dispatch_queue_s _dispatch_main_q; #define dispatch_get_main_queue() (&_dispatch_main_q) /*! - * @enum dispatch_queue_priority_t + * @typedef dispatch_queue_priority_t + * Type of dispatch_queue_priority * * @constant DISPATCH_QUEUE_PRIORITY_HIGH * Items dispatched to the queue will run at high priority, @@ -368,12 +377,20 @@ extern struct dispatch_queue_s _dispatch_main_q; * i.e. the queue will be scheduled for execution after all * default priority and high priority queues have been * scheduled. + * + * @constant DISPATCH_QUEUE_PRIORITY_BACKGROUND + * Items dispatched to the queue will run at background priority, i.e. the queue + * will be scheduled for execution after all higher priority queues have been + * scheduled and the system will run items on this queue on a thread with + * background status as per setpriority(2) (i.e. disk I/O is throttled and the + * thread's scheduling priority is set to lowest value). */ -enum { - DISPATCH_QUEUE_PRIORITY_HIGH = 2, - DISPATCH_QUEUE_PRIORITY_DEFAULT = 0, - DISPATCH_QUEUE_PRIORITY_LOW = -2, -}; +#define DISPATCH_QUEUE_PRIORITY_HIGH 2 +#define DISPATCH_QUEUE_PRIORITY_DEFAULT 0 +#define DISPATCH_QUEUE_PRIORITY_LOW (-2) +#define DISPATCH_QUEUE_PRIORITY_BACKGROUND INT16_MIN + +typedef long dispatch_queue_priority_t; /*! * @function dispatch_get_global_queue @@ -396,10 +413,27 @@ enum { * @result * Returns the requested global queue. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_CONST DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_t -dispatch_get_global_queue(long priority, unsigned long flags); +dispatch_get_global_queue(dispatch_queue_priority_t priority, + unsigned long flags); + +/*! + * @const DISPATCH_QUEUE_SERIAL + * @discussion A dispatch queue that invokes blocks serially in FIFO order. + */ +#define DISPATCH_QUEUE_SERIAL NULL + +/*! + * @const DISPATCH_QUEUE_CONCURRENT + * @discussion A dispatch queue that may invoke blocks concurrently and supports + * barrier blocks submitted with the dispatch barrier API. + */ +#define DISPATCH_QUEUE_CONCURRENT (&_dispatch_queue_attr_concurrent) +__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) +DISPATCH_EXPORT +struct dispatch_queue_attr_s _dispatch_queue_attr_concurrent; /*! * @function dispatch_queue_create @@ -408,25 +442,35 @@ dispatch_get_global_queue(long priority, unsigned long flags); * Creates a new dispatch queue to which blocks may be submitted. * * @discussion - * Dispatch queues invoke blocks serially in FIFO order. + * Dispatch queues created with the DISPATCH_QUEUE_SERIAL or a NULL attribute + * invoke blocks serially in FIFO order. + * + * Dispatch queues created with the DISPATCH_QUEUE_CONCURRENT attribute may + * invoke blocks concurrently (similarly to the global concurrent queues, but + * potentially with more overhead), and support barrier blocks submitted with + * the dispatch barrier API, which e.g. enables the implementation of efficient + * reader-writer schemes. + * + * When a dispatch queue is no longer needed, it should be released with + * dispatch_release(). Note that any pending blocks submitted to a queue will + * hold a reference to that queue. Therefore a queue will not be deallocated + * until all pending blocks have finished. * - * When the dispatch queue is no longer needed, it should be released - * with dispatch_release(). Note that any pending blocks submitted - * to a queue will hold a reference to that queue. Therefore a queue - * will not be deallocated until all pending blocks have finished. + * The target queue of a newly created dispatch queue is the default priority + * global concurrent queue. * * @param label * A string label to attach to the queue. * This parameter is optional and may be NULL. * * @param attr - * Unused. Pass NULL for now. + * DISPATCH_QUEUE_SERIAL or DISPATCH_QUEUE_CONCURRENT. * * @result * The newly created dispatch queue. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_MALLOC DISPATCH_WARN_RESULT DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_t dispatch_queue_create(const char *label, dispatch_queue_attr_t attr); @@ -443,11 +487,20 @@ dispatch_queue_create(const char *label, dispatch_queue_attr_t attr); * @result * The label of the queue. The result may be NULL. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_PURE DISPATCH_WARN_RESULT +DISPATCH_NOTHROW const char * dispatch_queue_get_label(dispatch_queue_t queue); +/*! + * @const DISPATCH_TARGET_QUEUE_DEFAULT + * @discussion Constant to pass to the dispatch_set_target_queue() and + * dispatch_source_create() functions to indicate that the default target queue + * for the given object type should be used. + */ +#define DISPATCH_TARGET_QUEUE_DEFAULT NULL + /*! * @function dispatch_set_target_queue * @@ -457,27 +510,38 @@ dispatch_queue_get_label(dispatch_queue_t queue); * @discussion * An object's target queue is responsible for processing the object. * - * A dispatch queue's priority is inherited by its target queue. Use the + * A dispatch queue's priority is inherited from its target queue. Use the * dispatch_get_global_queue() function to obtain suitable target queue * of the desired priority. * + * Blocks submitted to a serial queue whose target queue is another serial + * queue will not be invoked concurrently with blocks submitted to the target + * queue or to any other queue with that same target queue. + * + * The result of introducing a cycle into the hierarchy of target queues is + * undefined. + * * A dispatch source's target queue specifies where its event handler and * cancellation handler blocks will be submitted. * - * The result of calling dispatch_set_target_queue() on any other type of - * dispatch object is undefined. + * A dispatch I/O channel's target queue specifies where where its I/O + * operations are executed. * - * @param object + * For all other dispatch object types, the only function of the target queue + * is to determine where an object's finalizer function is invoked. + * + * @param object * The object to modify. * The result of passing NULL in this parameter is undefined. * - * @param queue + * @param queue * The new target queue for the object. The queue is retained, and the - * previous one, if any, is released. - * The result of passing NULL in this parameter is undefined. + * previous target queue, if any, is released. + * If queue is DISPATCH_TARGET_QUEUE_DEFAULT, set the object's target queue + * to the default target queue for the given object type. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NOTHROW // DISPATCH_NONNULL1 void dispatch_set_target_queue(dispatch_object_t object, dispatch_queue_t queue); @@ -494,8 +558,8 @@ dispatch_set_target_queue(dispatch_object_t object, dispatch_queue_t queue); * Applications that call NSApplicationMain() or CFRunLoopRun() on the * main thread do not need to call dispatch_main(). */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NOTHROW DISPATCH_NORETURN +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NOTHROW DISPATCH_NORETURN void dispatch_main(void); @@ -522,8 +586,8 @@ dispatch_main(void); * The result of passing NULL in this parameter is undefined. */ #ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL2 DISPATCH_NONNULL3 DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_after(dispatch_time_t when, dispatch_queue_t queue, @@ -555,14 +619,251 @@ dispatch_after(dispatch_time_t when, * dispatch_after_f(). * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL2 DISPATCH_NONNULL4 DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NONNULL4 DISPATCH_NOTHROW void dispatch_after_f(dispatch_time_t when, dispatch_queue_t queue, void *context, dispatch_function_t work); +/*! + * @functiongroup Dispatch Barrier API + * The dispatch barrier API is a mechanism for submitting barrier blocks to a + * dispatch queue, analogous to the dispatch_async()/dispatch_sync() API. + * It enables the implementation of efficient reader/writer schemes. + * Barrier blocks only behave specially when submitted to queues created with + * the DISPATCH_QUEUE_CONCURRENT attribute; on such a queue, a barrier block + * will not run until all blocks submitted to the queue earlier have completed, + * and any blocks submitted to the queue after a barrier block will not run + * until the barrier block has completed. + * When submitted to a a global queue or to a queue not created with the + * DISPATCH_QUEUE_CONCURRENT attribute, barrier blocks behave identically to + * blocks submitted with the dispatch_async()/dispatch_sync() API. + */ + +/*! + * @function dispatch_barrier_async + * + * @abstract + * Submits a barrier block for asynchronous execution on a dispatch queue. + * + * @discussion + * Submits a block to a dispatch queue like dispatch_async(), but marks that + * block as a barrier (relevant only on DISPATCH_QUEUE_CONCURRENT queues). + * + * See dispatch_async() for details. + * + * @param queue + * The target dispatch queue to which the block is submitted. + * The system will hold a reference on the target queue until the block + * has finished. + * The result of passing NULL in this parameter is undefined. + * + * @param block + * The block to submit to the target dispatch queue. This function performs + * Block_copy() and Block_release() on behalf of callers. + * The result of passing NULL in this parameter is undefined. + */ +#ifdef __BLOCKS__ +__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_barrier_async(dispatch_queue_t queue, dispatch_block_t block); +#endif + +/*! + * @function dispatch_barrier_async_f + * + * @abstract + * Submits a barrier function for asynchronous execution on a dispatch queue. + * + * @discussion + * Submits a function to a dispatch queue like dispatch_async_f(), but marks + * that function as a barrier (relevant only on DISPATCH_QUEUE_CONCURRENT + * queues). + * + * See dispatch_async_f() for details. + * + * @param queue + * The target dispatch queue to which the function is submitted. + * The system will hold a reference on the target queue until the function + * has returned. + * The result of passing NULL in this parameter is undefined. + * + * @param context + * The application-defined context parameter to pass to the function. + * + * @param work + * The application-defined function to invoke on the target queue. The first + * parameter passed to this function is the context provided to + * dispatch_barrier_async_f(). + * The result of passing NULL in this parameter is undefined. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +void +dispatch_barrier_async_f(dispatch_queue_t queue, + void *context, + dispatch_function_t work); + +/*! + * @function dispatch_barrier_sync + * + * @abstract + * Submits a barrier block for synchronous execution on a dispatch queue. + * + * @discussion + * Submits a block to a dispatch queue like dispatch_sync(), but marks that + * block as a barrier (relevant only on DISPATCH_QUEUE_CONCURRENT queues). + * + * See dispatch_sync() for details. + * + * @param queue + * The target dispatch queue to which the block is submitted. + * The result of passing NULL in this parameter is undefined. + * + * @param block + * The block to be invoked on the target dispatch queue. + * The result of passing NULL in this parameter is undefined. + */ +#ifdef __BLOCKS__ +__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_barrier_sync(dispatch_queue_t queue, dispatch_block_t block); +#endif + +/*! + * @function dispatch_barrier_sync_f + * + * @abstract + * Submits a barrier function for synchronous execution on a dispatch queue. + * + * @discussion + * Submits a function to a dispatch queue like dispatch_sync_f(), but marks that + * fuction as a barrier (relevant only on DISPATCH_QUEUE_CONCURRENT queues). + * + * See dispatch_sync_f() for details. + * + * @param queue + * The target dispatch queue to which the function is submitted. + * The result of passing NULL in this parameter is undefined. + * + * @param context + * The application-defined context parameter to pass to the function. + * + * @param work + * The application-defined function to invoke on the target queue. The first + * parameter passed to this function is the context provided to + * dispatch_barrier_sync_f(). + * The result of passing NULL in this parameter is undefined. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +void +dispatch_barrier_sync_f(dispatch_queue_t queue, + void *context, + dispatch_function_t work); + +/*! + * @functiongroup Dispatch queue-specific contexts + * This API allows different subsystems to associate context to a shared queue + * without risk of collision and to retrieve that context from blocks executing + * on that queue or any of its child queues in the target queue hierarchy. + */ + +/*! + * @function dispatch_queue_set_specific + * + * @abstract + * Associates a subsystem-specific context with a dispatch queue, for a key + * unique to the subsystem. + * + * @discussion + * The specified destructor will be invoked with the context on the default + * priority global concurrent queue when a new context is set for the same key, + * or after all references to the queue have been released. + * + * @param queue + * The dispatch queue to modify. + * The result of passing NULL in this parameter is undefined. + * + * @param key + * The key to set the context for, typically a pointer to a static variable + * specific to the subsystem. Keys are only compared as pointers and never + * dereferenced. Passing a string constant directly is not recommended. + * The NULL key is reserved and attemps to set a context for it are ignored. + * + * @param context + * The new subsystem-specific context for the object. This may be NULL. + * + * @param destructor + * The destructor function pointer. This may be NULL and is ignored if context + * is NULL. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NOTHROW +void +dispatch_queue_set_specific(dispatch_queue_t queue, const void *key, + void *context, dispatch_function_t destructor); + +/*! + * @function dispatch_queue_get_specific + * + * @abstract + * Returns the subsystem-specific context associated with a dispatch queue, for + * a key unique to the subsystem. + * + * @discussion + * Returns the context for the specified key if it has been set on the specified + * queue. + * + * @param queue + * The dispatch queue to query. + * The result of passing NULL in this parameter is undefined. + * + * @param key + * The key to get the context for, typically a pointer to a static variable + * specific to the subsystem. Keys are only compared as pointers and never + * dereferenced. Passing a string constant directly is not recommended. + * + * @result + * The context for the specified key or NULL if no context was found. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_PURE DISPATCH_WARN_RESULT +DISPATCH_NOTHROW +void * +dispatch_queue_get_specific(dispatch_queue_t queue, const void *key); + +/*! + * @function dispatch_get_specific + * + * @abstract + * Returns the current subsystem-specific context for a key unique to the + * subsystem. + * + * @discussion + * When called from a block executing on a queue, returns the context for the + * specified key if it has been set on the queue, otherwise returns the result + * of dispatch_get_specific() executed on the queue's target queue or NULL + * if the current queue is a global concurrent queue. + * + * @param key + * The key to get the context for, typically a pointer to a static variable + * specific to the subsystem. Keys are only compared as pointers and never + * dereferenced. Passing a string constant directly is not recommended. + * + * @result + * The context for the specified key or NULL if no context was found. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_PURE DISPATCH_WARN_RESULT +DISPATCH_NOTHROW +void * +dispatch_get_specific(const void *key); + __END_DECLS #endif diff --git a/src/semaphore.h b/dispatch/semaphore.h similarity index 87% rename from src/semaphore.h rename to dispatch/semaphore.h index 882b567b4..19b50af58 100644 --- a/src/semaphore.h +++ b/dispatch/semaphore.h @@ -1,20 +1,20 @@ /* - * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * Copyright (c) 2008-2011 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ - * + * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * + * * @APPLE_APACHE_LICENSE_HEADER_END@ */ @@ -55,8 +55,8 @@ __BEGIN_DECLS * @result * The newly created semaphore, or NULL on failure. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_MALLOC DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_semaphore_t dispatch_semaphore_create(long value); @@ -80,8 +80,8 @@ dispatch_semaphore_create(long value); * @result * Returns zero on success, or non-zero if the timeout occurred. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW long dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout); @@ -102,8 +102,8 @@ dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout); * This function returns non-zero if a thread is woken. Otherwise, zero is * returned. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW long dispatch_semaphore_signal(dispatch_semaphore_t dsema); diff --git a/src/source.h b/dispatch/source.h similarity index 75% rename from src/source.h rename to dispatch/source.h index 867ba86a0..4c9f601dd 100644 --- a/src/source.h +++ b/dispatch/source.h @@ -1,20 +1,20 @@ /* - * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * Copyright (c) 2008-2011 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ - * + * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * + * * @APPLE_APACHE_LICENSE_HEADER_END@ */ @@ -26,8 +26,10 @@ #include // for HeaderDoc #endif +#if TARGET_OS_MAC #include #include +#endif #include /*! @@ -70,8 +72,9 @@ typedef const struct dispatch_source_type_s *dispatch_source_type_t; * The mask is unused (pass zero for now). */ #define DISPATCH_SOURCE_TYPE_DATA_ADD (&_dispatch_source_type_data_add) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -extern const struct dispatch_source_type_s _dispatch_source_type_data_add; +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT +const struct dispatch_source_type_s _dispatch_source_type_data_add; /*! * @const DISPATCH_SOURCE_TYPE_DATA_OR @@ -82,8 +85,9 @@ extern const struct dispatch_source_type_s _dispatch_source_type_data_add; * dispatch_source_merge_data(). */ #define DISPATCH_SOURCE_TYPE_DATA_OR (&_dispatch_source_type_data_or) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -extern const struct dispatch_source_type_s _dispatch_source_type_data_or; +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT +const struct dispatch_source_type_s _dispatch_source_type_data_or; /*! * @const DISPATCH_SOURCE_TYPE_MACH_SEND @@ -93,8 +97,9 @@ extern const struct dispatch_source_type_s _dispatch_source_type_data_or; * The mask is a mask of desired events from dispatch_source_mach_send_flags_t. */ #define DISPATCH_SOURCE_TYPE_MACH_SEND (&_dispatch_source_type_mach_send) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -extern const struct dispatch_source_type_s _dispatch_source_type_mach_send; +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT +const struct dispatch_source_type_s _dispatch_source_type_mach_send; /*! * @const DISPATCH_SOURCE_TYPE_MACH_RECV @@ -103,8 +108,9 @@ extern const struct dispatch_source_type_s _dispatch_source_type_mach_send; * The mask is unused (pass zero for now). */ #define DISPATCH_SOURCE_TYPE_MACH_RECV (&_dispatch_source_type_mach_recv) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -extern const struct dispatch_source_type_s _dispatch_source_type_mach_recv; +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT +const struct dispatch_source_type_s _dispatch_source_type_mach_recv; /*! * @const DISPATCH_SOURCE_TYPE_PROC @@ -114,8 +120,9 @@ extern const struct dispatch_source_type_s _dispatch_source_type_mach_recv; * The mask is a mask of desired events from dispatch_source_proc_flags_t. */ #define DISPATCH_SOURCE_TYPE_PROC (&_dispatch_source_type_proc) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -extern const struct dispatch_source_type_s _dispatch_source_type_proc; +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT +const struct dispatch_source_type_s _dispatch_source_type_proc; /*! * @const DISPATCH_SOURCE_TYPE_READ @@ -125,8 +132,9 @@ extern const struct dispatch_source_type_s _dispatch_source_type_proc; * The mask is unused (pass zero for now). */ #define DISPATCH_SOURCE_TYPE_READ (&_dispatch_source_type_read) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -extern const struct dispatch_source_type_s _dispatch_source_type_read; +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT +const struct dispatch_source_type_s _dispatch_source_type_read; /*! * @const DISPATCH_SOURCE_TYPE_SIGNAL @@ -135,8 +143,9 @@ extern const struct dispatch_source_type_s _dispatch_source_type_read; * The mask is unused (pass zero for now). */ #define DISPATCH_SOURCE_TYPE_SIGNAL (&_dispatch_source_type_signal) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -extern const struct dispatch_source_type_s _dispatch_source_type_signal; +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT +const struct dispatch_source_type_s _dispatch_source_type_signal; /*! * @const DISPATCH_SOURCE_TYPE_TIMER @@ -146,8 +155,9 @@ extern const struct dispatch_source_type_s _dispatch_source_type_signal; * The mask is unused (pass zero for now). */ #define DISPATCH_SOURCE_TYPE_TIMER (&_dispatch_source_type_timer) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -extern const struct dispatch_source_type_s _dispatch_source_type_timer; +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT +const struct dispatch_source_type_s _dispatch_source_type_timer; /*! * @const DISPATCH_SOURCE_TYPE_VNODE @@ -157,8 +167,9 @@ extern const struct dispatch_source_type_s _dispatch_source_type_timer; * The mask is a mask of desired events from dispatch_source_vnode_flags_t. */ #define DISPATCH_SOURCE_TYPE_VNODE (&_dispatch_source_type_vnode) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -extern const struct dispatch_source_type_s _dispatch_source_type_vnode; +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT +const struct dispatch_source_type_s _dispatch_source_type_vnode; /*! * @const DISPATCH_SOURCE_TYPE_WRITE @@ -168,21 +179,24 @@ extern const struct dispatch_source_type_s _dispatch_source_type_vnode; * The mask is unused (pass zero for now). */ #define DISPATCH_SOURCE_TYPE_WRITE (&_dispatch_source_type_write) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -extern const struct dispatch_source_type_s _dispatch_source_type_write; +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT +const struct dispatch_source_type_s _dispatch_source_type_write; /*! - * @enum dispatch_source_mach_send_flags_t + * @typedef dispatch_source_mach_send_flags_t + * Type of dispatch_source_mach_send flags * * @constant DISPATCH_MACH_SEND_DEAD * The receive right corresponding to the given send right was destroyed. */ -enum { - DISPATCH_MACH_SEND_DEAD = 0x1, -}; +#define DISPATCH_MACH_SEND_DEAD 0x1 + +typedef unsigned long dispatch_source_mach_send_flags_t; /*! - * @enum dispatch_source_proc_flags_t + * @typedef dispatch_source_proc_flags_t + * Type of dispatch_source_proc flags * * @constant DISPATCH_PROC_EXIT * The process has exited (perhaps cleanly, perhaps not). @@ -197,15 +211,16 @@ enum { * @constant DISPATCH_PROC_SIGNAL * A Unix signal was delivered to the process. */ -enum { - DISPATCH_PROC_EXIT = 0x80000000, - DISPATCH_PROC_FORK = 0x40000000, - DISPATCH_PROC_EXEC = 0x20000000, - DISPATCH_PROC_SIGNAL = 0x08000000, -}; +#define DISPATCH_PROC_EXIT 0x80000000 +#define DISPATCH_PROC_FORK 0x40000000 +#define DISPATCH_PROC_EXEC 0x20000000 +#define DISPATCH_PROC_SIGNAL 0x08000000 + +typedef unsigned long dispatch_source_proc_flags_t; /*! - * @enum dispatch_source_vnode_flags_t + * @typedef dispatch_source_vnode_flags_t + * Type of dispatch_source_vnode flags * * @constant DISPATCH_VNODE_DELETE * The filesystem object was deleted from the namespace. @@ -228,15 +243,16 @@ enum { * @constant DISPATCH_VNODE_REVOKE * The filesystem object was revoked. */ -enum { - DISPATCH_VNODE_DELETE = 0x1, - DISPATCH_VNODE_WRITE = 0x2, - DISPATCH_VNODE_EXTEND = 0x4, - DISPATCH_VNODE_ATTRIB = 0x8, - DISPATCH_VNODE_LINK = 0x10, - DISPATCH_VNODE_RENAME = 0x20, - DISPATCH_VNODE_REVOKE = 0x40, -}; + +#define DISPATCH_VNODE_DELETE 0x1 +#define DISPATCH_VNODE_WRITE 0x2 +#define DISPATCH_VNODE_EXTEND 0x4 +#define DISPATCH_VNODE_ATTRIB 0x8 +#define DISPATCH_VNODE_LINK 0x10 +#define DISPATCH_VNODE_RENAME 0x20 +#define DISPATCH_VNODE_REVOKE 0x40 + +typedef unsigned long dispatch_source_vnode_flags_t; __BEGIN_DECLS @@ -267,10 +283,12 @@ __BEGIN_DECLS * A mask of flags specifying which events are desired. The interpretation of * this argument is determined by the constant provided in the type parameter. * @param queue - * The dispatch queue to which the event handler block will be submited. + * The dispatch queue to which the event handler block will be submitted. + * If queue is DISPATCH_TARGET_QUEUE_DEFAULT, the source will submit the event + * handler block to the default priority global queue. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_MALLOC DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_source_t dispatch_source_create(dispatch_source_type_t type, uintptr_t handle, @@ -286,13 +304,13 @@ dispatch_source_create(dispatch_source_type_t type, * @param source * The dispatch source to modify. * The result of passing NULL in this parameter is undefined. - * + * * @param handler * The event handler block to submit to the source's target queue. */ #ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL1 DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_source_set_event_handler(dispatch_source_t source, dispatch_block_t handler); @@ -314,8 +332,8 @@ dispatch_source_set_event_handler(dispatch_source_t source, * context of the dispatch source at the time the handler call is made. * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL1 DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_source_set_event_handler_f(dispatch_source_t source, dispatch_function_t handler); @@ -343,13 +361,13 @@ dispatch_source_set_event_handler_f(dispatch_source_t source, * @param source * The dispatch source to modify. * The result of passing NULL in this parameter is undefined. - * + * * @param handler * The cancellation handler block to submit to the source's target queue. */ #ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL1 DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_source_set_cancel_handler(dispatch_source_t source, dispatch_block_t cancel_handler); @@ -373,8 +391,8 @@ dispatch_source_set_cancel_handler(dispatch_source_t source, * The context parameter passed to the event handler function is the current * context of the dispatch source at the time the handler call is made. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL1 DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_source_set_cancel_handler_f(dispatch_source_t source, dispatch_function_t cancel_handler); @@ -401,8 +419,8 @@ dispatch_source_set_cancel_handler_f(dispatch_source_t source, * The dispatch source to be canceled. * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_source_cancel(dispatch_source_t source); @@ -419,8 +437,9 @@ dispatch_source_cancel(dispatch_source_t source); * @result * Non-zero if canceled and zero if not canceled. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE +DISPATCH_NOTHROW long dispatch_source_testcancel(dispatch_source_t source); @@ -443,13 +462,14 @@ dispatch_source_testcancel(dispatch_source_t source); * DISPATCH_SOURCE_TYPE_MACH_RECV: mach port (mach_port_t) * DISPATCH_SOURCE_TYPE_PROC: process identifier (pid_t) * DISPATCH_SOURCE_TYPE_READ: file descriptor (int) - * DISPATCH_SOURCE_TYPE_SIGNAL: signal number (int) + * DISPATCH_SOURCE_TYPE_SIGNAL: signal number (int) * DISPATCH_SOURCE_TYPE_TIMER: n/a * DISPATCH_SOURCE_TYPE_VNODE: file descriptor (int) * DISPATCH_SOURCE_TYPE_WRITE: file descriptor (int) */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE +DISPATCH_NOTHROW uintptr_t dispatch_source_get_handle(dispatch_source_t source); @@ -477,8 +497,9 @@ dispatch_source_get_handle(dispatch_source_t source); * DISPATCH_SOURCE_TYPE_VNODE: dispatch_source_vnode_flags_t * DISPATCH_SOURCE_TYPE_WRITE: n/a */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE +DISPATCH_NOTHROW unsigned long dispatch_source_get_mask(dispatch_source_t source); @@ -513,8 +534,9 @@ dispatch_source_get_mask(dispatch_source_t source); * DISPATCH_SOURCE_TYPE_VNODE: dispatch_source_vnode_flags_t * DISPATCH_SOURCE_TYPE_WRITE: estimated buffer space available */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE +DISPATCH_NOTHROW unsigned long dispatch_source_get_data(dispatch_source_t source); @@ -534,8 +556,8 @@ dispatch_source_get_data(dispatch_source_t source); * as specified by the dispatch source type. A value of zero has no effect * and will not result in the submission of the event handler block. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_source_merge_data(dispatch_source_t source, unsigned long value); @@ -547,14 +569,15 @@ dispatch_source_merge_data(dispatch_source_t source, unsigned long value); * * @discussion * Calling this function has no effect if the timer source has already been - * canceled. - * + * canceled. Once this function returns, any pending timer data accumulated + * for the previous timer values has been cleared + * * The start time argument also determines which clock will be used for the * timer. If the start time is DISPATCH_TIME_NOW or created with * dispatch_time() then the timer is based on mach_absolute_time(). Otherwise, * if the start time of the timer is created with dispatch_walltime() then the * timer is based on gettimeofday(3). - * + * * @param start * The start time of the timer. See dispatch_time() and dispatch_walltime() * for more information. @@ -570,14 +593,67 @@ dispatch_source_merge_data(dispatch_source_t source, unsigned long value); * a leeway of up to 30 seconds.) Note that some latency is to be expected for * all timers even when a leeway value of zero is specified. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_source_set_timer(dispatch_source_t source, dispatch_time_t start, uint64_t interval, uint64_t leeway); +/*! + * @function dispatch_source_set_registration_handler + * + * @abstract + * Sets the registration handler block for the given dispatch source. + * + * @discussion + * The registration handler (if specified) will be submitted to the source's + * target queue once the corresponding kevent() has been registered with the + * system, following the initial dispatch_resume() of the source. + * + * If a source is already registered when the registration handler is set, the + * registration handler will be invoked immediately. + * + * @param source + * The dispatch source to modify. + * The result of passing NULL in this parameter is undefined. + * + * @param handler + * The registration handler block to submit to the source's target queue. + */ +#ifdef __BLOCKS__ +__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW +void +dispatch_source_set_registration_handler(dispatch_source_t source, + dispatch_block_t registration_handler); +#endif /* __BLOCKS__ */ + +/*! + * @function dispatch_source_set_registration_handler_f + * + * @abstract + * Sets the registration handler function for the given dispatch source. + * + * @discussion + * See dispatch_source_set_registration_handler() for more details. + * + * @param source + * The dispatch source to modify. + * The result of passing NULL in this parameter is undefined. + * + * @param handler + * The registration handler function to submit to the source's target queue. + * The context parameter passed to the registration handler function is the + * current context of the dispatch source at the time the handler call is made. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW +void +dispatch_source_set_registration_handler_f(dispatch_source_t source, + dispatch_function_t registration_handler); + __END_DECLS #endif diff --git a/src/time.h b/dispatch/time.h similarity index 84% rename from src/time.h rename to dispatch/time.h index 510d6d7b7..d39578d66 100644 --- a/src/time.h +++ b/dispatch/time.h @@ -1,20 +1,20 @@ /* - * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * Copyright (c) 2008-2011 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ - * + * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * + * * @APPLE_APACHE_LICENSE_HEADER_END@ */ @@ -42,7 +42,11 @@ struct timespec; #ifdef NSEC_PER_USEC #undef NSEC_PER_USEC #endif +#ifdef NSEC_PER_MSEC +#undef NSEC_PER_MSEC +#endif #define NSEC_PER_SEC 1000000000ull +#define NSEC_PER_MSEC 1000000ull #define USEC_PER_SEC 1000000ull #define NSEC_PER_USEC 1000ull @@ -50,7 +54,7 @@ struct timespec; * @typedef dispatch_time_t * * @abstract - * An somewhat abstract representation of time; where zero means "now" and + * A somewhat abstract representation of time; where zero means "now" and * DISPATCH_TIME_FOREVER means "infinity" and every value in between is an * opaque encoding. */ @@ -79,8 +83,8 @@ typedef uint64_t dispatch_time_t; * @result * A new dispatch_time_t. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_time_t dispatch_time(dispatch_time_t when, int64_t delta); @@ -103,8 +107,8 @@ dispatch_time(dispatch_time_t when, int64_t delta); * @result * A new dispatch_time_t. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_time_t dispatch_walltime(const struct timespec *when, int64_t delta); diff --git a/examples/Dispatch Samples/Dispatch Samples.xcodeproj/project.pbxproj b/examples/Dispatch Samples/Dispatch Samples.xcodeproj/project.pbxproj deleted file mode 100644 index 15482f381..000000000 --- a/examples/Dispatch Samples/Dispatch Samples.xcodeproj/project.pbxproj +++ /dev/null @@ -1,609 +0,0 @@ -// !$*UTF8*$! -{ - archiveVersion = 1; - classes = { - }; - objectVersion = 45; - objects = { - -/* Begin PBXAggregateTarget section */ - 4C96F87F0F8288070051687B /* Samples */ = { - isa = PBXAggregateTarget; - buildConfigurationList = 4C96F88F0F8288290051687B /* Build configuration list for PBXAggregateTarget "Samples" */; - buildPhases = ( - ); - dependencies = ( - 4C96F88B0F82881B0051687B /* PBXTargetDependency */, - 4C96F8890F8288190051687B /* PBXTargetDependency */, - 4C96F8870F8288170051687B /* PBXTargetDependency */, - 4C96F8850F8288140051687B /* PBXTargetDependency */, - 4C96F8830F82880E0051687B /* PBXTargetDependency */, - ); - name = Samples; - productName = Samples; - }; -/* End PBXAggregateTarget section */ - -/* Begin PBXBuildFile section */ - 4CBAB02C0F780242006D97F1 /* apply.c in Sources */ = {isa = PBXBuildFile; fileRef = 4CBAB02A0F780242006D97F1 /* apply.c */; }; - 4CBAB04C0F7802DA006D97F1 /* netcat.c in Sources */ = {isa = PBXBuildFile; fileRef = 4CBAB04A0F7802DA006D97F1 /* netcat.c */; }; - 4CBAB0530F7802F1006D97F1 /* proc.c in Sources */ = {isa = PBXBuildFile; fileRef = 4CBAB0510F7802F1006D97F1 /* proc.c */; }; - 4CBAB0560F780314006D97F1 /* readFile.c in Sources */ = {isa = PBXBuildFile; fileRef = 4CBAB0540F780314006D97F1 /* readFile.c */; }; - 4CBAB0590F780327006D97F1 /* timers.c in Sources */ = {isa = PBXBuildFile; fileRef = 4CBAB0570F780327006D97F1 /* timers.c */; }; -/* End PBXBuildFile section */ - -/* Begin PBXContainerItemProxy section */ - 4C96F8820F82880E0051687B /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; - proxyType = 1; - remoteGlobalIDString = 4CBAB0240F7801C6006D97F1; - remoteInfo = "dispatch-apply"; - }; - 4C96F8840F8288140051687B /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; - proxyType = 1; - remoteGlobalIDString = 4CBAB0300F780272006D97F1; - remoteInfo = "dispatch-netcat"; - }; - 4C96F8860F8288170051687B /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; - proxyType = 1; - remoteGlobalIDString = 4CBAB0370F78028E006D97F1; - remoteInfo = "dispatch-proc"; - }; - 4C96F8880F8288190051687B /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; - proxyType = 1; - remoteGlobalIDString = 4CBAB03E0F7802A6006D97F1; - remoteInfo = "dispatch-readFile"; - }; - 4C96F88A0F82881B0051687B /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; - proxyType = 1; - remoteGlobalIDString = 4CBAB0450F7802BA006D97F1; - remoteInfo = "dispatch-timers"; - }; -/* End PBXContainerItemProxy section */ - -/* Begin PBXFileReference section */ - 4CBAB0250F7801C6006D97F1 /* dispatch-apply */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "dispatch-apply"; sourceTree = BUILT_PRODUCTS_DIR; }; - 4CBAB02A0F780242006D97F1 /* apply.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = apply.c; sourceTree = ""; }; - 4CBAB0310F780272006D97F1 /* dispatch-netcat */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "dispatch-netcat"; sourceTree = BUILT_PRODUCTS_DIR; }; - 4CBAB0380F78028E006D97F1 /* dispatch-proc */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "dispatch-proc"; sourceTree = BUILT_PRODUCTS_DIR; }; - 4CBAB03F0F7802A6006D97F1 /* dispatch-readFile */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "dispatch-readFile"; sourceTree = BUILT_PRODUCTS_DIR; }; - 4CBAB0460F7802BA006D97F1 /* dispatch-timers */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "dispatch-timers"; sourceTree = BUILT_PRODUCTS_DIR; }; - 4CBAB04A0F7802DA006D97F1 /* netcat.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = netcat.c; sourceTree = ""; }; - 4CBAB0510F7802F1006D97F1 /* proc.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = proc.c; sourceTree = ""; }; - 4CBAB0540F780314006D97F1 /* readFile.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = readFile.c; sourceTree = ""; }; - 4CBAB0570F780327006D97F1 /* timers.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = timers.c; sourceTree = ""; }; -/* End PBXFileReference section */ - -/* Begin PBXFrameworksBuildPhase section */ - 4CBAB0230F7801C6006D97F1 /* Frameworks */ = { - isa = PBXFrameworksBuildPhase; - buildActionMask = 2147483647; - files = ( - ); - runOnlyForDeploymentPostprocessing = 0; - }; - 4CBAB02F0F780272006D97F1 /* Frameworks */ = { - isa = PBXFrameworksBuildPhase; - buildActionMask = 2147483647; - files = ( - ); - runOnlyForDeploymentPostprocessing = 0; - }; - 4CBAB0360F78028E006D97F1 /* Frameworks */ = { - isa = PBXFrameworksBuildPhase; - buildActionMask = 2147483647; - files = ( - ); - runOnlyForDeploymentPostprocessing = 0; - }; - 4CBAB03D0F7802A6006D97F1 /* Frameworks */ = { - isa = PBXFrameworksBuildPhase; - buildActionMask = 2147483647; - files = ( - ); - runOnlyForDeploymentPostprocessing = 0; - }; - 4CBAB0440F7802BA006D97F1 /* Frameworks */ = { - isa = PBXFrameworksBuildPhase; - buildActionMask = 2147483647; - files = ( - ); - runOnlyForDeploymentPostprocessing = 0; - }; -/* End PBXFrameworksBuildPhase section */ - -/* Begin PBXGroup section */ - 08FB7794FE84155DC02AAC07 /* Dispatch Samples */ = { - isa = PBXGroup; - children = ( - 08FB7795FE84155DC02AAC07 /* Source */, - C6A0FF2B0290797F04C91782 /* Documentation */, - 1AB674ADFE9D54B511CA2CBB /* Products */, - ); - name = "Dispatch Samples"; - sourceTree = ""; - }; - 08FB7795FE84155DC02AAC07 /* Source */ = { - isa = PBXGroup; - children = ( - 4CBAB0570F780327006D97F1 /* timers.c */, - 4CBAB0540F780314006D97F1 /* readFile.c */, - 4CBAB0510F7802F1006D97F1 /* proc.c */, - 4CBAB04A0F7802DA006D97F1 /* netcat.c */, - 4CBAB02A0F780242006D97F1 /* apply.c */, - ); - name = Source; - sourceTree = ""; - }; - 1AB674ADFE9D54B511CA2CBB /* Products */ = { - isa = PBXGroup; - children = ( - 4CBAB0250F7801C6006D97F1 /* dispatch-apply */, - 4CBAB0310F780272006D97F1 /* dispatch-netcat */, - 4CBAB0380F78028E006D97F1 /* dispatch-proc */, - 4CBAB03F0F7802A6006D97F1 /* dispatch-readFile */, - 4CBAB0460F7802BA006D97F1 /* dispatch-timers */, - ); - name = Products; - sourceTree = ""; - }; - C6A0FF2B0290797F04C91782 /* Documentation */ = { - isa = PBXGroup; - children = ( - ); - name = Documentation; - sourceTree = ""; - }; -/* End PBXGroup section */ - -/* Begin PBXNativeTarget section */ - 4CBAB0240F7801C6006D97F1 /* dispatch-apply */ = { - isa = PBXNativeTarget; - buildConfigurationList = 4CBAB0290F7801E5006D97F1 /* Build configuration list for PBXNativeTarget "dispatch-apply" */; - buildPhases = ( - 4CBAB0220F7801C6006D97F1 /* Sources */, - 4CBAB0230F7801C6006D97F1 /* Frameworks */, - ); - buildRules = ( - ); - dependencies = ( - ); - name = "dispatch-apply"; - productName = "dispatch-apply"; - productReference = 4CBAB0250F7801C6006D97F1 /* dispatch-apply */; - productType = "com.apple.product-type.tool"; - }; - 4CBAB0300F780272006D97F1 /* dispatch-netcat */ = { - isa = PBXNativeTarget; - buildConfigurationList = 4CBAB04D0F7802DA006D97F1 /* Build configuration list for PBXNativeTarget "dispatch-netcat" */; - buildPhases = ( - 4CBAB02E0F780272006D97F1 /* Sources */, - 4CBAB02F0F780272006D97F1 /* Frameworks */, - ); - buildRules = ( - ); - dependencies = ( - ); - name = "dispatch-netcat"; - productName = "dispatch-netcat"; - productReference = 4CBAB0310F780272006D97F1 /* dispatch-netcat */; - productType = "com.apple.product-type.tool"; - }; - 4CBAB0370F78028E006D97F1 /* dispatch-proc */ = { - isa = PBXNativeTarget; - buildConfigurationList = 4CBAB04E0F7802DA006D97F1 /* Build configuration list for PBXNativeTarget "dispatch-proc" */; - buildPhases = ( - 4CBAB0350F78028E006D97F1 /* Sources */, - 4CBAB0360F78028E006D97F1 /* Frameworks */, - ); - buildRules = ( - ); - dependencies = ( - ); - name = "dispatch-proc"; - productName = "dispatch-proc"; - productReference = 4CBAB0380F78028E006D97F1 /* dispatch-proc */; - productType = "com.apple.product-type.tool"; - }; - 4CBAB03E0F7802A6006D97F1 /* dispatch-readFile */ = { - isa = PBXNativeTarget; - buildConfigurationList = 4CBAB04F0F7802DA006D97F1 /* Build configuration list for PBXNativeTarget "dispatch-readFile" */; - buildPhases = ( - 4CBAB03C0F7802A6006D97F1 /* Sources */, - 4CBAB03D0F7802A6006D97F1 /* Frameworks */, - ); - buildRules = ( - ); - dependencies = ( - ); - name = "dispatch-readFile"; - productName = "dispatch-readFile"; - productReference = 4CBAB03F0F7802A6006D97F1 /* dispatch-readFile */; - productType = "com.apple.product-type.tool"; - }; - 4CBAB0450F7802BA006D97F1 /* dispatch-timers */ = { - isa = PBXNativeTarget; - buildConfigurationList = 4CBAB0500F7802DA006D97F1 /* Build configuration list for PBXNativeTarget "dispatch-timers" */; - buildPhases = ( - 4CBAB0430F7802BA006D97F1 /* Sources */, - 4CBAB0440F7802BA006D97F1 /* Frameworks */, - ); - buildRules = ( - ); - dependencies = ( - ); - name = "dispatch-timers"; - productName = "dispatch-timers"; - productReference = 4CBAB0460F7802BA006D97F1 /* dispatch-timers */; - productType = "com.apple.product-type.tool"; - }; -/* End PBXNativeTarget section */ - -/* Begin PBXProject section */ - 08FB7793FE84155DC02AAC07 /* Project object */ = { - isa = PBXProject; - buildConfigurationList = 1DEB928908733DD80010E9CD /* Build configuration list for PBXProject "Dispatch Samples" */; - compatibilityVersion = "Xcode 3.1"; - hasScannedForEncodings = 1; - mainGroup = 08FB7794FE84155DC02AAC07 /* Dispatch Samples */; - projectDirPath = ""; - projectRoot = ""; - targets = ( - 4C96F87F0F8288070051687B /* Samples */, - 4CBAB0240F7801C6006D97F1 /* dispatch-apply */, - 4CBAB0300F780272006D97F1 /* dispatch-netcat */, - 4CBAB0370F78028E006D97F1 /* dispatch-proc */, - 4CBAB03E0F7802A6006D97F1 /* dispatch-readFile */, - 4CBAB0450F7802BA006D97F1 /* dispatch-timers */, - ); - }; -/* End PBXProject section */ - -/* Begin PBXSourcesBuildPhase section */ - 4CBAB0220F7801C6006D97F1 /* Sources */ = { - isa = PBXSourcesBuildPhase; - buildActionMask = 2147483647; - files = ( - 4CBAB02C0F780242006D97F1 /* apply.c in Sources */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; - 4CBAB02E0F780272006D97F1 /* Sources */ = { - isa = PBXSourcesBuildPhase; - buildActionMask = 2147483647; - files = ( - 4CBAB04C0F7802DA006D97F1 /* netcat.c in Sources */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; - 4CBAB0350F78028E006D97F1 /* Sources */ = { - isa = PBXSourcesBuildPhase; - buildActionMask = 2147483647; - files = ( - 4CBAB0530F7802F1006D97F1 /* proc.c in Sources */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; - 4CBAB03C0F7802A6006D97F1 /* Sources */ = { - isa = PBXSourcesBuildPhase; - buildActionMask = 2147483647; - files = ( - 4CBAB0560F780314006D97F1 /* readFile.c in Sources */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; - 4CBAB0430F7802BA006D97F1 /* Sources */ = { - isa = PBXSourcesBuildPhase; - buildActionMask = 2147483647; - files = ( - 4CBAB0590F780327006D97F1 /* timers.c in Sources */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; -/* End PBXSourcesBuildPhase section */ - -/* Begin PBXTargetDependency section */ - 4C96F8830F82880E0051687B /* PBXTargetDependency */ = { - isa = PBXTargetDependency; - target = 4CBAB0240F7801C6006D97F1 /* dispatch-apply */; - targetProxy = 4C96F8820F82880E0051687B /* PBXContainerItemProxy */; - }; - 4C96F8850F8288140051687B /* PBXTargetDependency */ = { - isa = PBXTargetDependency; - target = 4CBAB0300F780272006D97F1 /* dispatch-netcat */; - targetProxy = 4C96F8840F8288140051687B /* PBXContainerItemProxy */; - }; - 4C96F8870F8288170051687B /* PBXTargetDependency */ = { - isa = PBXTargetDependency; - target = 4CBAB0370F78028E006D97F1 /* dispatch-proc */; - targetProxy = 4C96F8860F8288170051687B /* PBXContainerItemProxy */; - }; - 4C96F8890F8288190051687B /* PBXTargetDependency */ = { - isa = PBXTargetDependency; - target = 4CBAB03E0F7802A6006D97F1 /* dispatch-readFile */; - targetProxy = 4C96F8880F8288190051687B /* PBXContainerItemProxy */; - }; - 4C96F88B0F82881B0051687B /* PBXTargetDependency */ = { - isa = PBXTargetDependency; - target = 4CBAB0450F7802BA006D97F1 /* dispatch-timers */; - targetProxy = 4C96F88A0F82881B0051687B /* PBXContainerItemProxy */; - }; -/* End PBXTargetDependency section */ - -/* Begin XCBuildConfiguration section */ - 1DEB928A08733DD80010E9CD /* Debug */ = { - isa = XCBuildConfiguration; - buildSettings = { - ARCHS = "$(ARCHS_STANDARD_32_64_BIT)"; - GCC_C_LANGUAGE_STANDARD = gnu99; - GCC_OPTIMIZATION_LEVEL = 0; - GCC_WARN_ABOUT_RETURN_TYPE = YES; - GCC_WARN_UNUSED_VARIABLE = YES; - ONLY_ACTIVE_ARCH = YES; - PREBINDING = NO; - SDKROOT = macosx10.6; - }; - name = Debug; - }; - 1DEB928B08733DD80010E9CD /* Release */ = { - isa = XCBuildConfiguration; - buildSettings = { - ARCHS = "$(ARCHS_STANDARD_32_64_BIT)"; - GCC_C_LANGUAGE_STANDARD = gnu99; - GCC_WARN_ABOUT_RETURN_TYPE = YES; - GCC_WARN_UNUSED_VARIABLE = YES; - PREBINDING = NO; - SDKROOT = macosx10.6; - }; - name = Release; - }; - 4C96F8800F8288080051687B /* Debug */ = { - isa = XCBuildConfiguration; - buildSettings = { - COPY_PHASE_STRIP = NO; - GCC_DYNAMIC_NO_PIC = NO; - GCC_OPTIMIZATION_LEVEL = 0; - PRODUCT_NAME = Samples; - }; - name = Debug; - }; - 4C96F8810F8288080051687B /* Release */ = { - isa = XCBuildConfiguration; - buildSettings = { - COPY_PHASE_STRIP = YES; - DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; - GCC_ENABLE_FIX_AND_CONTINUE = NO; - PRODUCT_NAME = Samples; - ZERO_LINK = NO; - }; - name = Release; - }; - 4CBAB0270F7801C7006D97F1 /* Debug */ = { - isa = XCBuildConfiguration; - buildSettings = { - ALWAYS_SEARCH_USER_PATHS = NO; - COPY_PHASE_STRIP = NO; - GCC_DYNAMIC_NO_PIC = NO; - GCC_ENABLE_FIX_AND_CONTINUE = YES; - GCC_MODEL_TUNING = G5; - GCC_OPTIMIZATION_LEVEL = 0; - INSTALL_PATH = /usr/local/bin; - PREBINDING = NO; - PRODUCT_NAME = "dispatch-apply"; - }; - name = Debug; - }; - 4CBAB0280F7801C7006D97F1 /* Release */ = { - isa = XCBuildConfiguration; - buildSettings = { - ALWAYS_SEARCH_USER_PATHS = NO; - COPY_PHASE_STRIP = YES; - DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; - GCC_ENABLE_FIX_AND_CONTINUE = NO; - GCC_MODEL_TUNING = G5; - INSTALL_PATH = /usr/local/bin; - PREBINDING = NO; - PRODUCT_NAME = "dispatch-apply"; - ZERO_LINK = NO; - }; - name = Release; - }; - 4CBAB0330F780273006D97F1 /* Debug */ = { - isa = XCBuildConfiguration; - buildSettings = { - ALWAYS_SEARCH_USER_PATHS = NO; - COPY_PHASE_STRIP = NO; - GCC_DYNAMIC_NO_PIC = NO; - GCC_ENABLE_FIX_AND_CONTINUE = YES; - GCC_MODEL_TUNING = G5; - GCC_OPTIMIZATION_LEVEL = 0; - INSTALL_PATH = /usr/local/bin; - PREBINDING = NO; - PRODUCT_NAME = "dispatch-netcat"; - }; - name = Debug; - }; - 4CBAB0340F780273006D97F1 /* Release */ = { - isa = XCBuildConfiguration; - buildSettings = { - ALWAYS_SEARCH_USER_PATHS = NO; - COPY_PHASE_STRIP = YES; - DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; - GCC_ENABLE_FIX_AND_CONTINUE = NO; - GCC_MODEL_TUNING = G5; - INSTALL_PATH = /usr/local/bin; - PREBINDING = NO; - PRODUCT_NAME = "dispatch-netcat"; - ZERO_LINK = NO; - }; - name = Release; - }; - 4CBAB03A0F78028F006D97F1 /* Debug */ = { - isa = XCBuildConfiguration; - buildSettings = { - ALWAYS_SEARCH_USER_PATHS = NO; - COPY_PHASE_STRIP = NO; - GCC_DYNAMIC_NO_PIC = NO; - GCC_ENABLE_FIX_AND_CONTINUE = YES; - GCC_MODEL_TUNING = G5; - GCC_OPTIMIZATION_LEVEL = 0; - INSTALL_PATH = /usr/local/bin; - PREBINDING = NO; - PRODUCT_NAME = "dispatch-proc"; - }; - name = Debug; - }; - 4CBAB03B0F78028F006D97F1 /* Release */ = { - isa = XCBuildConfiguration; - buildSettings = { - ALWAYS_SEARCH_USER_PATHS = NO; - COPY_PHASE_STRIP = YES; - DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; - GCC_ENABLE_FIX_AND_CONTINUE = NO; - GCC_MODEL_TUNING = G5; - INSTALL_PATH = /usr/local/bin; - PREBINDING = NO; - PRODUCT_NAME = "dispatch-proc"; - ZERO_LINK = NO; - }; - name = Release; - }; - 4CBAB0410F7802A7006D97F1 /* Debug */ = { - isa = XCBuildConfiguration; - buildSettings = { - ALWAYS_SEARCH_USER_PATHS = NO; - COPY_PHASE_STRIP = NO; - GCC_DYNAMIC_NO_PIC = NO; - GCC_ENABLE_FIX_AND_CONTINUE = YES; - GCC_MODEL_TUNING = G5; - GCC_OPTIMIZATION_LEVEL = 0; - INSTALL_PATH = /usr/local/bin; - PREBINDING = NO; - PRODUCT_NAME = "dispatch-readFile"; - }; - name = Debug; - }; - 4CBAB0420F7802A7006D97F1 /* Release */ = { - isa = XCBuildConfiguration; - buildSettings = { - ALWAYS_SEARCH_USER_PATHS = NO; - COPY_PHASE_STRIP = YES; - DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; - GCC_ENABLE_FIX_AND_CONTINUE = NO; - GCC_MODEL_TUNING = G5; - INSTALL_PATH = /usr/local/bin; - PREBINDING = NO; - PRODUCT_NAME = "dispatch-readFile"; - ZERO_LINK = NO; - }; - name = Release; - }; - 4CBAB0480F7802BB006D97F1 /* Debug */ = { - isa = XCBuildConfiguration; - buildSettings = { - ALWAYS_SEARCH_USER_PATHS = NO; - COPY_PHASE_STRIP = NO; - GCC_DYNAMIC_NO_PIC = NO; - GCC_ENABLE_FIX_AND_CONTINUE = YES; - GCC_MODEL_TUNING = G5; - GCC_OPTIMIZATION_LEVEL = 0; - INSTALL_PATH = /usr/local/bin; - PREBINDING = NO; - PRODUCT_NAME = "dispatch-timers"; - }; - name = Debug; - }; - 4CBAB0490F7802BB006D97F1 /* Release */ = { - isa = XCBuildConfiguration; - buildSettings = { - ALWAYS_SEARCH_USER_PATHS = NO; - COPY_PHASE_STRIP = YES; - DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; - GCC_ENABLE_FIX_AND_CONTINUE = NO; - GCC_MODEL_TUNING = G5; - INSTALL_PATH = /usr/local/bin; - PREBINDING = NO; - PRODUCT_NAME = "dispatch-timers"; - ZERO_LINK = NO; - }; - name = Release; - }; -/* End XCBuildConfiguration section */ - -/* Begin XCConfigurationList section */ - 1DEB928908733DD80010E9CD /* Build configuration list for PBXProject "Dispatch Samples" */ = { - isa = XCConfigurationList; - buildConfigurations = ( - 1DEB928A08733DD80010E9CD /* Debug */, - 1DEB928B08733DD80010E9CD /* Release */, - ); - defaultConfigurationIsVisible = 0; - defaultConfigurationName = Release; - }; - 4C96F88F0F8288290051687B /* Build configuration list for PBXAggregateTarget "Samples" */ = { - isa = XCConfigurationList; - buildConfigurations = ( - 4C96F8800F8288080051687B /* Debug */, - 4C96F8810F8288080051687B /* Release */, - ); - defaultConfigurationIsVisible = 0; - defaultConfigurationName = Release; - }; - 4CBAB0290F7801E5006D97F1 /* Build configuration list for PBXNativeTarget "dispatch-apply" */ = { - isa = XCConfigurationList; - buildConfigurations = ( - 4CBAB0270F7801C7006D97F1 /* Debug */, - 4CBAB0280F7801C7006D97F1 /* Release */, - ); - defaultConfigurationIsVisible = 0; - defaultConfigurationName = Release; - }; - 4CBAB04D0F7802DA006D97F1 /* Build configuration list for PBXNativeTarget "dispatch-netcat" */ = { - isa = XCConfigurationList; - buildConfigurations = ( - 4CBAB0330F780273006D97F1 /* Debug */, - 4CBAB0340F780273006D97F1 /* Release */, - ); - defaultConfigurationIsVisible = 0; - defaultConfigurationName = Release; - }; - 4CBAB04E0F7802DA006D97F1 /* Build configuration list for PBXNativeTarget "dispatch-proc" */ = { - isa = XCConfigurationList; - buildConfigurations = ( - 4CBAB03A0F78028F006D97F1 /* Debug */, - 4CBAB03B0F78028F006D97F1 /* Release */, - ); - defaultConfigurationIsVisible = 0; - defaultConfigurationName = Release; - }; - 4CBAB04F0F7802DA006D97F1 /* Build configuration list for PBXNativeTarget "dispatch-readFile" */ = { - isa = XCConfigurationList; - buildConfigurations = ( - 4CBAB0410F7802A7006D97F1 /* Debug */, - 4CBAB0420F7802A7006D97F1 /* Release */, - ); - defaultConfigurationIsVisible = 0; - defaultConfigurationName = Release; - }; - 4CBAB0500F7802DA006D97F1 /* Build configuration list for PBXNativeTarget "dispatch-timers" */ = { - isa = XCConfigurationList; - buildConfigurations = ( - 4CBAB0480F7802BB006D97F1 /* Debug */, - 4CBAB0490F7802BB006D97F1 /* Release */, - ); - defaultConfigurationIsVisible = 0; - defaultConfigurationName = Release; - }; -/* End XCConfigurationList section */ - }; - rootObject = 08FB7793FE84155DC02AAC07 /* Project object */; -} diff --git a/examples/Dispatch Samples/ReadMe.txt b/examples/Dispatch Samples/ReadMe.txt deleted file mode 100644 index 3214db16d..000000000 --- a/examples/Dispatch Samples/ReadMe.txt +++ /dev/null @@ -1,93 +0,0 @@ -### DispatchProcessMonitor ### - -=========================================================================== -DESCRIPTION: - -Sample code showing how to: monitor process, do file and network I/O, -create and manage timers, and use dispatch_apply - -=========================================================================== -BUILD REQUIREMENTS: - -Mac OS X version 10.6 Snow Leopard - -=========================================================================== -RUNTIME REQUIREMENTS: - -Mac OS X version 10.6 Snow Leopard - -=========================================================================== -PACKAGING LIST: - -apply.c - dispatch_apply examples -netcat.c - network I/O examples -nWide.c - use of dispatch_semaphore to limit number of in-flight blocks -proc.c - process monitoring example -readFile.c - file I/O examples -readFileF.c - file I/O examples without Blocks -timers.c - create and manage timers - -=========================================================================== -SAMPLE USAGE: - -dispatch-apply - -dispatch-apply takes no arguments. When run it will display some status -messages and timing information. - -dispatch-netcat - -Open two terminal windows. In one window run the "server": - -cat ReadMe.txt | dispatch-netcat -l localhost 5050 - -In the other run the "client": - -dispatch-netcat localhost 5050 - -Your server will send the contents of ReadMe.txt to the client, the server -will close it's connection and exit. The client will display whatever -the server sent (the ReadMe.txt file). See the main function in netcat.c -for more options. - -dispatch-nWide - -dispatch-nWide takes no arguments. When run it will display explanatory -text. - -dispatch-proc - -dispatch-proc takes no arguments. When run it will display output from -some processes it runs, and it will display information from the -process lifecycle events dispatch generates. - -dispatch-readFile - -Run dispatch-readFile with a filename as an argument: - -dispatch-readFile ReadMe.txt - -It will read the file 10 (or fewer) bytes at a time and display how many -bytes dispatch thinks are remaining to read. - -dispatch-readFileF - -Exactly the same as dispatch-readFile, but written without the use of Blocks. - -dispatch-timers - -dispatch-timers takes no arguments, running it display timer ticks for -a timer with an initial interval of one second, changing to one half second -after the first three events. It will exit after six events. - -=========================================================================== -CHANGES FROM PREVIOUS VERSIONS: - -Version 1.1 -- Updated to current libdispatch API, and added samples readFileF.c and -nWide.c -Version 1.0 -- First version - -=========================================================================== -Copyright (C) 2009 Apple Inc. All rights reserved. diff --git a/examples/Dispatch Samples/apply.c b/examples/Dispatch Samples/apply.c deleted file mode 100644 index 3eb39a586..000000000 --- a/examples/Dispatch Samples/apply.c +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Copyright (c) 2008 Apple Inc. All rights reserved. - * - * @APPLE_DTS_LICENSE_HEADER_START@ - * - * IMPORTANT: This Apple software is supplied to you by Apple Computer, Inc. - * ("Apple") in consideration of your agreement to the following terms, and your - * use, installation, modification or redistribution of this Apple software - * constitutes acceptance of these terms. If you do not agree with these terms, - * please do not use, install, modify or redistribute this Apple software. - * - * In consideration of your agreement to abide by the following terms, and - * subject to these terms, Apple grants you a personal, non-exclusive license, - * under Apple's copyrights in this original Apple software (the "Apple Software"), - * to use, reproduce, modify and redistribute the Apple Software, with or without - * modifications, in source and/or binary forms; provided that if you redistribute - * the Apple Software in its entirety and without modifications, you must retain - * this notice and the following text and disclaimers in all such redistributions - * of the Apple Software. Neither the name, trademarks, service marks or logos of - * Apple Computer, Inc. may be used to endorse or promote products derived from - * the Apple Software without specific prior written permission from Apple. Except - * as expressly stated in this notice, no other rights or licenses, express or - * implied, are granted by Apple herein, including but not limited to any patent - * rights that may be infringed by your derivative works or by other works in - * which the Apple Software may be incorporated. - * - * The Apple Software is provided by Apple on an "AS IS" basis. APPLE MAKES NO - * WARRANTIES, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED - * WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND OPERATION ALONE OR IN - * COMBINATION WITH YOUR PRODUCTS. - * - * IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE - * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR - * DISTRIBUTION OF THE APPLE SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF - * CONTRACT, TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF - * APPLE HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * @APPLE_DTS_LICENSE_HEADER_END@ - */ - -#include -#include -#include -#include -#include -#include -#include - -#define kIT 10 - -uint64_t elapsed_time; - -void timer_start() { - elapsed_time = mach_absolute_time(); -} - -double timer_milePost() { - static dispatch_once_t justOnce; - static double scale; - - dispatch_once(&justOnce, ^{ - mach_timebase_info_data_t tbi; - mach_timebase_info(&tbi); - scale = tbi.numer; - scale = scale/tbi.denom; - printf("Scale is %10.4f Just computed once courtesy of dispatch_once()\n", scale); - }); - - uint64_t now = mach_absolute_time()-elapsed_time; - double fTotalT = now; - fTotalT = fTotalT * scale; // convert this to nanoseconds... - fTotalT = fTotalT / 1000000000.0; - return fTotalT; -} - -int -main(void) -{ - dispatch_queue_t myQueue = dispatch_queue_create("myQueue", NULL); - dispatch_group_t myGroup = dispatch_group_create(); - -// dispatch_apply on a serial queue finishes each block in order so the following code will take a little more than a second - timer_start(); - dispatch_apply(kIT, myQueue, ^(size_t current){ - printf("Block #%ld of %d is being run\n", - current+1, // adjusting the zero based current iteration we get passed in - kIT); - usleep(USEC_PER_SEC/10); - }); - printf("and dispatch_apply( serial queue ) returned after %10.4lf seconds\n",timer_milePost()); - -// dispatch_apply on a concurrent queue returns after all blocks are finished, however it can execute them concurrently with each other -// so this will take quite a bit less time - timer_start(); - dispatch_apply(kIT, dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^(size_t current){ - printf("Block #%ld of %d is being run\n",current+1, kIT); - usleep(USEC_PER_SEC/10); - }); - printf("and dispatch_apply( concurrent queue) returned after %10.4lf seconds\n",timer_milePost()); - -// To execute all blocks in a dispatch_apply asynchonously, you will need to perform the dispatch_apply -// asynchonously, like this (NOTE the nested dispatch_apply inside of the async block.) -// Also note the use of the dispatch_group so that we can ultimatly know when the work is -// all completed - - timer_start(); - dispatch_group_async(myGroup, dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{ - dispatch_apply(kIT, myQueue, ^(size_t current){ - printf("Block #%ld of %d is being run\n",current+1, kIT); - usleep(USEC_PER_SEC/10); - }); - }); - - printf("and dispatch_group_async( dispatch_apply( )) returned after %10.4lf seconds\n",timer_milePost()); - printf("Now to wait for the dispatch group to finish...\n"); - dispatch_group_wait(myGroup, UINT64_MAX); - printf("and we are done with dispatch_group_async( dispatch_apply( )) after %10.4lf seconds\n",timer_milePost()); - return 0; -} - diff --git a/examples/Dispatch Samples/nWide.c b/examples/Dispatch Samples/nWide.c deleted file mode 100644 index 92914a922..000000000 --- a/examples/Dispatch Samples/nWide.c +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Copyright (c) 2009 Apple Inc. All rights reserved. - * - * @APPLE_DTS_LICENSE_HEADER_START@ - * - * IMPORTANT: This Apple software is supplied to you by Apple Computer, Inc. - * ("Apple") in consideration of your agreement to the following terms, and your - * use, installation, modification or redistribution of this Apple software - * constitutes acceptance of these terms. If you do not agree with these terms, - * please do not use, install, modify or redistribute this Apple software. - * - * In consideration of your agreement to abide by the following terms, and - * subject to these terms, Apple grants you a personal, non-exclusive license, - * under Apple's copyrights in this original Apple software (the "Apple Software"), - * to use, reproduce, modify and redistribute the Apple Software, with or without - * modifications, in source and/or binary forms; provided that if you redistribute - * the Apple Software in its entirety and without modifications, you must retain - * this notice and the following text and disclaimers in all such redistributions - * of the Apple Software. Neither the name, trademarks, service marks or logos of - * Apple Computer, Inc. may be used to endorse or promote products derived from - * the Apple Software without specific prior written permission from Apple. Except - * as expressly stated in this notice, no other rights or licenses, express or - * implied, are granted by Apple herein, including but not limited to any patent - * rights that may be infringed by your derivative works or by other works in - * which the Apple Software may be incorporated. - * - * The Apple Software is provided by Apple on an "AS IS" basis. APPLE MAKES NO - * WARRANTIES, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED - * WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND OPERATION ALONE OR IN - * COMBINATION WITH YOUR PRODUCTS. - * - * IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE - * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR - * DISTRIBUTION OF THE APPLE SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF - * CONTRACT, TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF - * APPLE HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * @APPLE_DTS_LICENSE_HEADER_END@ - */ - -/* - * nWide.c - * Samples project - * - * Created by Mensch on 5/1/09. - * Copyright 2009 Apple, Inc. All rights reserved. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#import -#include - - -/* - * Demonstrate using dispatch_semaphore to create a concurrent queue that - * allows only a fixed number of blocks to be in flight at any given time - */ - -int main (int argc, const char * argv[]) { - dispatch_group_t mg = dispatch_group_create(); - dispatch_semaphore_t ds; - __block int numRunning = 0; - int qWidth = 5; - int numWorkBlocks = 100; - - if (argc >= 2) { - qWidth = atoi(argv[1]); // use the command 1st line parameter as the queue width - if (qWidth==0) qWidth==1; // protect against bad values - } - - if (argc >=3) { - numWorkBlocks = atoi(argv[2]); // use the 2nd command line parameter as the queue width - if (numWorkBlocks==0) numWorkBlocks==1; // protect against bad values - } - - printf("Starting dispatch semaphore test to simulate a %d wide dispatch queue\n", qWidth ); - ds = dispatch_semaphore_create(qWidth); - - int i; - for (i=0; i -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -// #define DEBUG 1 - -#if DEBUG -#define dlog(a) dispatch_debug(a, #a) -#else -#define dlog(a) do { } while(0) -#endif - -void usage(void); -void *run_block(void *); -void setup_fd_relay(int netfd /* bidirectional */, - int infd /* local input */, - int outfd /* local output */, - void (^finalizer_block)(void)); -void doreadwrite(int fd1, int fd2, char *buffer, size_t len); - -#define BUFFER_SIZE 1099 - -int main(int argc, char *argv[]) { - - int ch; - bool use_v4_only = false, use_v6_only = false; - bool debug = false, no_stdin = false; - bool keep_listening = false, do_listen = false; - bool do_loookups = true, verbose = false; - bool do_udp = false, do_bind_ip = false, do_bind_port = false; - const char *hostname, *servname; - int ret; - struct addrinfo hints, *aires, *aires0; - const char *bind_hostname, *bind_servname; - - dispatch_queue_t dq; - dispatch_group_t listen_group = NULL; - - while ((ch = getopt(argc, argv, "46Ddhklnvup:s:")) != -1) { - switch (ch) { - case '4': - use_v4_only = true; - break; - case '6': - use_v6_only = true; - break; - case 'D': - debug = true; - break; - case 'd': - no_stdin = true; - break; - case 'h': - usage(); - break; - case 'k': - keep_listening = true; - break; - case 'l': - do_listen = true; - break; - case 'n': - do_loookups = false; - break; - case 'v': - verbose = true; - break; - case 'u': - do_udp = true; - break; - case 'p': - do_bind_port = true; - bind_servname = optarg; - break; - case 's': - do_bind_ip = true; - bind_hostname = optarg; - break; - case '?': - default: - usage(); - break; - } - } - - argc -= optind; - argv += optind; - - if (use_v4_only && use_v6_only) { - errx(EX_USAGE, "-4 and -6 specified"); - } - - if (keep_listening && !do_listen) { - errx(EX_USAGE, "-k specified but no -l"); - } - - if (do_listen && (do_bind_ip || do_bind_port)) { - errx(EX_USAGE, "-p or -s option with -l"); - } - - if (do_listen) { - if (argc >= 2) { - hostname = argv[0]; - servname = argv[1]; - } else if (argc >= 1) { - hostname = NULL; - servname = argv[0]; - } else { - errx(EX_USAGE, "No service name provided"); - } - } else { - if (argc >= 2) { - hostname = argv[0]; - servname = argv[1]; - } else { - errx(EX_USAGE, "No hostname and service name provided"); - } - } - - if (do_bind_ip || do_bind_port) { - if (!do_bind_ip) { - bind_hostname = NULL; - } - if (!do_bind_port) { - bind_servname = NULL; - } - } - - openlog(getprogname(), LOG_PERROR|LOG_CONS, LOG_DAEMON); - setlogmask(debug ? LOG_UPTO(LOG_DEBUG) : verbose ? LOG_UPTO(LOG_INFO) : LOG_UPTO(LOG_ERR)); - - dq = dispatch_queue_create("netcat", NULL); - listen_group = dispatch_group_create(); - - bzero(&hints, sizeof(hints)); - hints.ai_family = use_v4_only ? PF_INET : (use_v6_only ? PF_INET6 : PF_UNSPEC); - hints.ai_socktype = do_udp ? SOCK_DGRAM : SOCK_STREAM; - hints.ai_protocol = do_udp ? IPPROTO_UDP : IPPROTO_TCP; - hints.ai_flags = (!do_loookups ? AI_NUMERICHOST | AI_NUMERICSERV : 0) | (do_listen ? AI_PASSIVE : 0); - - ret = getaddrinfo(hostname, servname, &hints, &aires0); - if (ret) { - errx(1, "getaddrinfo(%s, %s): %s", hostname, servname, gai_strerror(ret)); - } - - for (aires = aires0; aires; aires = aires->ai_next) { - if (do_listen) { - // asynchronously set up the socket - dispatch_retain(dq); - dispatch_group_async(listen_group, - dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), - ^{ - int s, val = 1; - dispatch_source_t ds; - - s = socket(aires->ai_family, aires->ai_socktype, aires->ai_protocol); - if (s < 0) { - warn("socket"); - return; - } - - if(setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (const char *)&val, sizeof(val)) < 0) { - warn("Could not set SO_REUSEADDR"); - } - - if(setsockopt(s, SOL_SOCKET, SO_REUSEPORT, (const char *)&val, sizeof(val)) < 0) { - warn("Could not set SO_REUSEPORT"); - } - - if(setsockopt(s, SOL_SOCKET, SO_NOSIGPIPE, &val, sizeof(val)) < 0) { - warn("Could not set SO_NOSIGPIPE"); - } - - if (bind(s, aires->ai_addr, aires->ai_addrlen) < 0) { - warn("bind"); - close(s); - return; - } - - listen(s, 2); - syslog(LOG_DEBUG, "listening on socket %d", s); - ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, s, 0, dq); - dispatch_source_set_event_handler(ds, ^{ - // got an incoming connection - int s2, lfd = dispatch_source_get_handle(ds); - dispatch_queue_t listen_queue = dispatch_get_current_queue(); - - // prevent further accept(2)s across multiple sources - dispatch_retain(listen_queue); - dispatch_suspend(listen_queue); - - if (do_udp) { - // lfd is our socket, but let's connect in the reverse - // direction to set up the connection fully - char udpbuf[4]; - struct sockaddr_storage sockin; - socklen_t socklen; - ssize_t peeklen; - int cret; - - socklen = sizeof(sockin); - peeklen = recvfrom(lfd, udpbuf, sizeof(udpbuf), - MSG_PEEK, (struct sockaddr *)&sockin, &socklen); - if (peeklen < 0) { - warn("recvfrom"); - dispatch_resume(listen_queue); - dispatch_release(listen_queue); - return; - } - - cret = connect(lfd, (struct sockaddr *)&sockin, socklen); - if (cret < 0) { - warn("connect"); - dispatch_resume(listen_queue); - dispatch_release(listen_queue); - return; - } - - s2 = lfd; - syslog(LOG_DEBUG, "accepted socket %d", s2); - } else { - s2 = accept(lfd, NULL, NULL); - if (s2 < 0) { - warn("accept"); - dispatch_resume(listen_queue); - dispatch_release(listen_queue); - return; - } - syslog(LOG_DEBUG, "accepted socket %d -> %d", lfd, s2); - } - - - setup_fd_relay(s2, no_stdin ? -1 : STDIN_FILENO, STDOUT_FILENO, ^{ - if (!do_udp) { - close(s2); - } - dispatch_resume(listen_queue); - dispatch_release(listen_queue); - if (!keep_listening) { - exit(0); - } - }); - }); - dispatch_resume(ds); - dispatch_release(dq); - }); - } else { - // synchronously try each address to try to connect - __block bool did_connect = false; - - dispatch_sync(dq, ^{ - int s, val = 1; - - s = socket(aires->ai_family, aires->ai_socktype, aires->ai_protocol); - if (s < 0) { - warn("socket"); - return; - } - - if(setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (const char *)&val, sizeof(val)) < 0) { - warn("Could not set SO_REUSEADDR"); - } - - if(setsockopt(s, SOL_SOCKET, SO_REUSEPORT, (const char *)&val, sizeof(val)) < 0) { - warn("Could not set SO_REUSEPORT"); - } - - if(setsockopt(s, SOL_SOCKET, SO_NOSIGPIPE, &val, sizeof(val)) < 0) { - warn("Could not set SO_NOSIGPIPE"); - } - - if (do_bind_port || do_bind_ip) { - struct addrinfo bhints, *bind_aires; - int bret; - in_port_t bport; - - bzero(&bhints, sizeof(bhints)); - bhints.ai_family = aires->ai_family; - bhints.ai_socktype = aires->ai_socktype; - bhints.ai_protocol = aires->ai_protocol; - bhints.ai_flags = (do_bind_ip ? AI_NUMERICHOST : 0) | (do_bind_port ? AI_NUMERICSERV : 0) | AI_PASSIVE; - - bret = getaddrinfo(bind_hostname, bind_servname, &bhints, &bind_aires); - if (bret) { - warnx("getaddrinfo(%s, %s): %s", bind_hostname, bind_servname, gai_strerror(bret)); - close(s); - freeaddrinfo(bind_aires); - return; - } - - switch(bind_aires->ai_family) { - case PF_INET: - bport = ((struct sockaddr_in *)bind_aires->ai_addr)->sin_port; - break; - case PF_INET6: - bport = ((struct sockaddr_in6 *)bind_aires->ai_addr)->sin6_port; - break; - default: - bport = htons(0); - break; - } - - if (ntohs(bport) > 0 && ntohs(bport) < IPPORT_RESERVED) { - bret = bindresvport_sa(s, (struct sockaddr *)bind_aires->ai_addr); - } else { - bret = bind(s, bind_aires->ai_addr, bind_aires->ai_addrlen); - } - - if (bret < 0) { - warn("bind"); - close(s); - freeaddrinfo(bind_aires); - return; - } - - freeaddrinfo(bind_aires); - } - - if (connect(s, aires->ai_addr, aires->ai_addrlen) < 0) { - syslog(LOG_INFO, "connect to %s port %s (%s) failed: %s", - hostname, - servname, - aires->ai_protocol == IPPROTO_TCP ? "tcp" : aires->ai_protocol == IPPROTO_UDP ? "udp" : "unknown", - strerror(errno)); - close(s); - return; - } - - syslog(LOG_INFO, "Connection to %s %s port [%s] succeeded!", - hostname, - servname, - aires->ai_protocol == IPPROTO_TCP ? "tcp" : aires->ai_protocol == IPPROTO_UDP ? "udp" : "unknown"); - did_connect = true; - - if (do_udp) { - // netcat sends a few bytes to set up the connection - doreadwrite(-1, s, "XXXX", 4); - } - - setup_fd_relay(s, no_stdin ? -1 : STDIN_FILENO, STDOUT_FILENO, ^{ - close(s); - exit(0); - }); - }); - - if (did_connect) { - break; - } - } - } - - dispatch_group_wait(listen_group, DISPATCH_TIME_FOREVER); - freeaddrinfo(aires0); - - if (!do_listen && aires == NULL) { - // got to the end of the address list without connecting - exit(1); - } - - dispatch_main(); - - return 0; -} - -void usage(void) -{ - fprintf(stderr, "Usage: %s [-4] [-6] [-D] [-d] [-h] [-k] [-l] [-n] [-v]\n", getprogname()); - fprintf(stderr, " \t[-u] [-p ] [-s ]\n"); - exit(EX_USAGE); -} - -void *run_block(void *arg) -{ - void (^b)(void) = (void (^)(void))arg; - - b(); - - _Block_release(arg); - - return NULL; -} - -/* - * Read up-to as much as is requested, and write - * that to the other fd, taking into account exceptional - * conditions and re-trying - */ -void doreadwrite(int fd1, int fd2, char *buffer, size_t len) { - ssize_t readBytes, writeBytes, totalWriteBytes; - - if (fd1 != -1) { - syslog(LOG_DEBUG, "trying to read %ld bytes from fd %d", len, fd1); - readBytes = read(fd1, buffer, len); - if (readBytes < 0) { - if (errno == EINTR || errno == EAGAIN) { - /* can't do anything now, hope we get called again */ - syslog(LOG_DEBUG, "error read fd %d: %s (%d)", fd1, strerror(errno), errno); - return; - } else { - err(1, "read fd %d", fd1); - } - } else if (readBytes == 0) { - syslog(LOG_DEBUG, "EOF on fd %d", fd1); - return; - } - syslog(LOG_DEBUG, "read %ld bytes from fd %d", readBytes, fd1); - } else { - readBytes = len; - syslog(LOG_DEBUG, "read buffer has %ld bytes", readBytes); - } - - totalWriteBytes = 0; - do { - writeBytes = write(fd2, buffer+totalWriteBytes, readBytes-totalWriteBytes); - if (writeBytes < 0) { - if (errno == EINTR || errno == EAGAIN) { - continue; - } else { - err(1, "write fd %d", fd2); - } - } - syslog(LOG_DEBUG, "wrote %ld bytes to fd %d", writeBytes, fd2); - totalWriteBytes += writeBytes; - - } while (totalWriteBytes < readBytes); - - return; -} - -/* - * We set up dispatch sources for netfd and infd. - * Since only one callback is called at a time per-source, - * we don't need any additional serialization, and the network - * and infd could be read from at the same time. - */ -void setup_fd_relay(int netfd /* bidirectional */, - int infd /* local input */, - int outfd /* local output */, - void (^finalizer_block)(void)) -{ - dispatch_source_t netsource = NULL, insource = NULL; - - dispatch_queue_t teardown_queue = dispatch_queue_create("teardown_queue", NULL); - - void (^finalizer_block_copy)(void) = _Block_copy(finalizer_block); // release after calling - void (^cancel_hander)(dispatch_source_t source) = ^(dispatch_source_t source){ - dlog(source); - dlog(teardown_queue); - - /* - * allowing the teardown queue to become runnable will get - * the teardown block scheduled, which will cancel all other - * sources and call the client-supplied finalizer - */ - dispatch_resume(teardown_queue); - dispatch_release(teardown_queue); - }; - void (^event_handler)(dispatch_source_t source, int wfd) = ^(dispatch_source_t source, int wfd) { - int rfd = dispatch_source_get_handle(source); - size_t bytesAvail = dispatch_source_get_data(source); - char *buffer; - - syslog(LOG_DEBUG, "dispatch source %d -> %d has %lu bytes available", - rfd, wfd, bytesAvail); - if (bytesAvail == 0) { - dlog(source); - dispatch_source_cancel(source); - return; - } - buffer = malloc(BUFFER_SIZE); - doreadwrite(rfd,wfd, buffer, MIN(BUFFER_SIZE, bytesAvail+2)); - free(buffer); - }; - - /* - * Suspend this now twice so that neither source can accidentally resume it - * while we're still setting up the teardown block. When either source - * gets an EOF, the queue is resumed so that it can teardown the other source - * and call the client-supplied finalizer - */ - dispatch_suspend(teardown_queue); - dispatch_suspend(teardown_queue); - - if (infd != -1) { - dispatch_retain(teardown_queue); // retain so that we can resume in this block later - - dlog(teardown_queue); - - // since the event handler serializes, put this on a concurrent queue - insource = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, infd, 0, dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0)); - dispatch_source_set_event_handler(insource, ^{ event_handler(insource, netfd); }); - dispatch_source_set_cancel_handler(insource, ^{ cancel_hander(insource); }); - dispatch_resume(insource); - dlog(insource); - } - - dispatch_retain(teardown_queue); // retain so that we can resume in this block later - - dlog(teardown_queue); - - // since the event handler serializes, put this on a concurrent queue - netsource = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, netfd, 0, dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0)); - dispatch_source_set_event_handler(netsource, ^{ event_handler(netsource, outfd); }); - dispatch_source_set_cancel_handler(netsource, ^{ cancel_hander(netsource); }); - dispatch_resume(netsource); - dlog(netsource); - - dispatch_async(teardown_queue, ^{ - syslog(LOG_DEBUG, "Closing connection on fd %d -> %d -> %d", infd, netfd, outfd); - - if (insource) { - dlog(insource); - dispatch_source_cancel(insource); - dispatch_release(insource); // matches initial create - dlog(insource); - } - - dlog(netsource); - dispatch_source_cancel(netsource); - dispatch_release(netsource); // matches initial create - dlog(netsource); - - dlog(teardown_queue); - - finalizer_block_copy(); - _Block_release(finalizer_block_copy); - }); - - /* Resume this once so their either source can do the second resume - * to start the teardown block running - */ - dispatch_resume(teardown_queue); - dispatch_release(teardown_queue); // matches initial create - dlog(teardown_queue); -} - diff --git a/examples/Dispatch Samples/proc.c b/examples/Dispatch Samples/proc.c deleted file mode 100644 index 511b42f03..000000000 --- a/examples/Dispatch Samples/proc.c +++ /dev/null @@ -1,209 +0,0 @@ -/* - * Copyright (c) 2008 Apple Inc. All rights reserved. - * - * @APPLE_DTS_LICENSE_HEADER_START@ - * - * IMPORTANT: This Apple software is supplied to you by Apple Computer, Inc. - * ("Apple") in consideration of your agreement to the following terms, and your - * use, installation, modification or redistribution of this Apple software - * constitutes acceptance of these terms. If you do not agree with these terms, - * please do not use, install, modify or redistribute this Apple software. - * - * In consideration of your agreement to abide by the following terms, and - * subject to these terms, Apple grants you a personal, non-exclusive license, - * under Apple's copyrights in this original Apple software (the "Apple Software"), - * to use, reproduce, modify and redistribute the Apple Software, with or without - * modifications, in source and/or binary forms; provided that if you redistribute - * the Apple Software in its entirety and without modifications, you must retain - * this notice and the following text and disclaimers in all such redistributions - * of the Apple Software. Neither the name, trademarks, service marks or logos of - * Apple Computer, Inc. may be used to endorse or promote products derived from - * the Apple Software without specific prior written permission from Apple. Except - * as expressly stated in this notice, no other rights or licenses, express or - * implied, are granted by Apple herein, including but not limited to any patent - * rights that may be infringed by your derivative works or by other works in - * which the Apple Software may be incorporated. - * - * The Apple Software is provided by Apple on an "AS IS" basis. APPLE MAKES NO - * WARRANTIES, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED - * WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND OPERATION ALONE OR IN - * COMBINATION WITH YOUR PRODUCTS. - * - * IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE - * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR - * DISTRIBUTION OF THE APPLE SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF - * CONTRACT, TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF - * APPLE HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * @APPLE_DTS_LICENSE_HEADER_END@ - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include - -extern char **environ; -dispatch_queue_t qpf; -volatile int exitcount = 0; - -// maximum value for exitcount before we quit -#define proccount 2 - - -struct qp_msg { - FILE *f; - char *str; -}; - -void qpf_puts(void *m_) { - struct qp_msg *m = m_; - fputs(m->str, m->f); - free(m->str); - free(m); -} - -void qfprintf(FILE *f, const char *fmt, ...) { - va_list ap; - va_start(ap, fmt); - struct qp_msg *m = malloc(sizeof(struct qp_msg)); - assert(m); - vasprintf(&m->str, fmt, ap); - m->f = f; - dispatch_async(qpf, ^(void) { qpf_puts(m); }); - va_end(ap); -} - -#define qprintf(fmt...) qfprintf(stdout, ## fmt) - -/* context structure, contains a process id and the - * command line arguments used to launch it. Used to - * provide context info to the block associated - * with a process event source. - */ -struct pinfo { - pid_t pid; - dispatch_source_t source; - char **argv; -}; - -/* pid_finalize() is called when the dispatch source is released. - * this block is attached to the attribute that is passed to dispatch_source_proc_create(), - * and is thus associated with the dispatch source. */ -void pid_finalize(struct pinfo *pi) { - qprintf("process %d is done watching %s (%d)\n", getpid(), pi->argv[0], pi->pid); - dispatch_release(pi->source); - if (OSAtomicIncrement32(&exitcount) == proccount) { - qprintf("both processes exited\n"); - dispatch_sync(qpf,^{}); - exit(0); - } -} - - -/* pid_event() is called from a block that is associated with a process event - * source for a specific process id (via dispatch_source_proc_create()). When - * such an event occurs, pid_event() calls dispatch_source_get_context() to - * gain access to the pid and process name that were stored in the context at - * the time the block was attached to the event source. - */ -#define FLAG(X) ((dispatch_source_get_data(src) & DISPATCH_PROC_##X) ? #X" " : "") - -void pid_event(struct pinfo *pi) { - dispatch_source_t src = pi->source; - - qprintf("process %d %s, flags: %x %s%s%s%s\n", pi->pid, pi->argv[0], dispatch_source_get_data(src), FLAG(EXIT), FLAG(FORK), FLAG(EXEC), FLAG(SIGNAL)); - if (dispatch_source_get_data(src) & DISPATCH_PROC_EXIT) { - int s; - waitpid(dispatch_source_get_handle(src), &s, WNOHANG); - qprintf(" %s exit status %d\n", pi->argv[0], s); - dispatch_source_cancel(src); - } -} - -/* proc_start() takes a context pointer (ppi), and a dispatch queue (pq), - * and spawns the process named in ppi->argv[0]. The resulting process id - * is stored in the context (ppi->pid). On successfully spawning the process, - * it creates a dispatch source for the purpose of executing the routine pid_event(pi,ev) - * when certain events (exit, fork, exec, reap, or signal) occur to the process. - */ -void proc_start(void *ppi, dispatch_queue_t pq) { - struct pinfo *pi = ppi; - - int rc = posix_spawnp(&pi->pid, pi->argv[0], NULL, NULL, pi->argv, environ); - if (rc) { - int e = errno; - qprintf("Can't spawn %s (rc=%d, e=%d %s)\n", pi->argv[0], rc, e, strerror(e)); - } else { - - dispatch_source_t dsp = dispatch_source_create(DISPATCH_SOURCE_TYPE_PROC, pi->pid, DISPATCH_PROC_EXIT|DISPATCH_PROC_FORK|DISPATCH_PROC_EXEC|DISPATCH_PROC_SIGNAL, pq); - dispatch_source_set_event_handler_f(dsp, (dispatch_function_t)pid_event); - dispatch_source_set_cancel_handler_f(dsp, (dispatch_function_t)pid_finalize); - pi->source = dsp; - dispatch_set_context(dsp, pi); - dispatch_resume(dsp); - - qprintf("process %d spawned %s: %d, watching with event source: %p\n", getpid(), pi->argv[0], pi->pid, dsp); - - } -} - -int main(int argc, char *argv[]) { - struct pinfo pi, pi2, pi3; - struct pinfo *ppi2 = & pi2, *ppi3 = &pi3; - - char *av[] = {argv[0], NULL}; // set up context info (struct pinfo) for this process. - pi.pid = getpid(); - pi.argv = av; - - char *av2[] = {"sleep", "3", NULL}; // set up context info (struct pinfo) for the sleep tool - pi2.argv = av2; - - char *av3[] = {"script", "/tmp/LOG", "banner", "-w80", "!", NULL}; // set up context info (struct pinfo) for the script tool - pi3.argv = av3; - - dispatch_queue_t pq = dispatch_queue_create("PQ", NULL); // create our main processing queue - - qpf = dispatch_queue_create("qprintf", NULL); // create a separate queue for printf - - /* create a dispatch source that will call the routine pid_event(pi,ev) - * when certain events occur to the specified process (pi->pid). The dispatch source is - * associated with the dispatch queue that was created in this routine (pq). This example - * requests the block be executed whenever one of the following events occurs: - * exit, fork, exec, reap, or signal. - */ - dispatch_source_t procSource = dispatch_source_create(DISPATCH_SOURCE_TYPE_PROC, pi.pid, DISPATCH_PROC_EXIT|DISPATCH_PROC_FORK|DISPATCH_PROC_EXEC|DISPATCH_PROC_SIGNAL, pq); - - dispatch_source_set_event_handler_f(procSource, (dispatch_function_t)pid_event); - dispatch_source_set_cancel_handler_f(procSource, (dispatch_function_t)pid_finalize); - pi.source = procSource; - dispatch_set_context(procSource, &pi); - dispatch_resume(procSource); - - /* create a block (which simply calls proc_start()), and dispatch it to the queue. - * proc_start() will spawn the process named by ppiX->argv[0], and set up - * another block (containing a call to pid_event()) on an event source that - * will recieve process events... - */ - dispatch_async(pq, ^(void) { proc_start( ppi2, pq ); }); // launch the sleep tool, and create the process watcher for it - dispatch_async(pq, ^(void) { proc_start( ppi3, pq ); }); // launch the script tool, and create the process watcher for it - - - dispatch_main(); // wait for all the queued and spawned items to finish... -} diff --git a/examples/Dispatch Samples/readFile.c b/examples/Dispatch Samples/readFile.c deleted file mode 100644 index 9c537c50b..000000000 --- a/examples/Dispatch Samples/readFile.c +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Copyright (c) 2008 Apple Inc. All rights reserved. - * - * @APPLE_DTS_LICENSE_HEADER_START@ - * - * IMPORTANT: This Apple software is supplied to you by Apple Computer, Inc. - * ("Apple") in consideration of your agreement to the following terms, and your - * use, installation, modification or redistribution of this Apple software - * constitutes acceptance of these terms. If you do not agree with these terms, - * please do not use, install, modify or redistribute this Apple software. - * - * In consideration of your agreement to abide by the following terms, and - * subject to these terms, Apple grants you a personal, non-exclusive license, - * under Apple's copyrights in this original Apple software (the "Apple Software"), - * to use, reproduce, modify and redistribute the Apple Software, with or without - * modifications, in source and/or binary forms; provided that if you redistribute - * the Apple Software in its entirety and without modifications, you must retain - * this notice and the following text and disclaimers in all such redistributions - * of the Apple Software. Neither the name, trademarks, service marks or logos of - * Apple Computer, Inc. may be used to endorse or promote products derived from - * the Apple Software without specific prior written permission from Apple. Except - * as expressly stated in this notice, no other rights or licenses, express or - * implied, are granted by Apple herein, including but not limited to any patent - * rights that may be infringed by your derivative works or by other works in - * which the Apple Software may be incorporated. - * - * The Apple Software is provided by Apple on an "AS IS" basis. APPLE MAKES NO - * WARRANTIES, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED - * WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND OPERATION ALONE OR IN - * COMBINATION WITH YOUR PRODUCTS. - * - * IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE - * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR - * DISTRIBUTION OF THE APPLE SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF - * CONTRACT, TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF - * APPLE HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * @APPLE_DTS_LICENSE_HEADER_END@ - */ - -#include -#include -#include -#include -#include -#include -#include - -#include - -int main(int argc, char* argv[]) -{ - int infd; - dispatch_source_t fileSource; - - if (argc != 2) { - fprintf(stderr, "usage: %s file ...\n", argv[0]); - exit(1); - } - - - infd = open(argv[1], O_RDONLY); - if (infd == -1) { - perror(argv[1]); - exit(1); - } - - if (fcntl(infd, F_SETFL, O_NONBLOCK) != 0) { - perror(argv[1]); - exit(1); - } - - fileSource = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, infd, 0, dispatch_queue_create("read source queue",NULL)); - - dispatch_source_set_event_handler( fileSource, ^{ - char buffer[10]; - size_t estimated = dispatch_source_get_data(fileSource); - printf("Estimated bytes available: %ld\n", estimated); - ssize_t actual = read(infd, buffer, sizeof(buffer)); - if (actual == -1) { - if (errno != EAGAIN) { - perror("read"); - exit(-1); - } - } else { - if (estimated>actual) { - printf(" bytes read: %ld\n", actual); - } else { - // end of file has been reached. - printf(" last bytes read: %ld\n", actual); - dispatch_source_cancel(fileSource); - } - } - }); - - dispatch_source_set_cancel_handler( fileSource, ^{ - // release all our associated dispatch data structures - dispatch_release(fileSource); - dispatch_release(dispatch_get_current_queue()); - // close the file descriptor because we are done reading it - close(infd); - // and since we have nothing left to do, exit the tool - exit(0); - - }); - - dispatch_resume(fileSource); - - dispatch_main(); - - return 0; -} diff --git a/examples/Dispatch Samples/readFileF.c b/examples/Dispatch Samples/readFileF.c deleted file mode 100644 index 6546714b9..000000000 --- a/examples/Dispatch Samples/readFileF.c +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright (c) 2008 Apple Inc. All rights reserved. - * - * @APPLE_DTS_LICENSE_HEADER_START@ - * - * IMPORTANT: This Apple software is supplied to you by Apple Computer, Inc. - * ("Apple") in consideration of your agreement to the following terms, and your - * use, installation, modification or redistribution of this Apple software - * constitutes acceptance of these terms. If you do not agree with these terms, - * please do not use, install, modify or redistribute this Apple software. - * - * In consideration of your agreement to abide by the following terms, and - * subject to these terms, Apple grants you a personal, non-exclusive license, - * under Apple's copyrights in this original Apple software (the "Apple Software"), - * to use, reproduce, modify and redistribute the Apple Software, with or without - * modifications, in source and/or binary forms; provided that if you redistribute - * the Apple Software in its entirety and without modifications, you must retain - * this notice and the following text and disclaimers in all such redistributions - * of the Apple Software. Neither the name, trademarks, service marks or logos of - * Apple Computer, Inc. may be used to endorse or promote products derived from - * the Apple Software without specific prior written permission from Apple. Except - * as expressly stated in this notice, no other rights or licenses, express or - * implied, are granted by Apple herein, including but not limited to any patent - * rights that may be infringed by your derivative works or by other works in - * which the Apple Software may be incorporated. - * - * The Apple Software is provided by Apple on an "AS IS" basis. APPLE MAKES NO - * WARRANTIES, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED - * WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND OPERATION ALONE OR IN - * COMBINATION WITH YOUR PRODUCTS. - * - * IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE - * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR - * DISTRIBUTION OF THE APPLE SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF - * CONTRACT, TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF - * APPLE HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * @APPLE_DTS_LICENSE_HEADER_END@ - */ - -#include -#include -#include -#include -#include -#include -#include - -#include - - -void readFileData(void* theSource) { - char buffer[10]; - size_t estimated = dispatch_source_get_data(theSource); - printf("Estimated bytes available: %ld\n", estimated); - ssize_t actual = read(dispatch_source_get_handle(theSource), buffer, sizeof(buffer)); - if (actual == -1) { - if (errno != EAGAIN) { - perror("read"); - exit(-1); - } - } else { - if (estimated>actual) { - printf(" bytes read: %ld\n", actual); - } else { - // end of file has been reached. - printf(" last bytes read: %ld\n", actual); - dispatch_source_cancel(theSource); - } - } -} - -void cancelSource(void* theSource) { - close(dispatch_source_get_handle(theSource)); - dispatch_release(theSource); - dispatch_release(dispatch_get_current_queue()); - printf("Everything is finished, goodbye.\n"); - exit(0); -} - -int main(int argc, char* argv[]) -{ - int infd; - dispatch_source_t fileSource; - - if (argc != 2) { - fprintf(stderr, "usage: %s file ...\n", argv[0]); - exit(1); - } - - - infd = open(argv[1], O_RDONLY); - if (infd == -1) { - perror(argv[1]); - exit(1); - } - - if (fcntl(infd, F_SETFL, O_NONBLOCK) != 0) { - perror(argv[1]); - exit(1); - } - - fileSource = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, infd, 0, dispatch_queue_create("read source queue",NULL)); - dispatch_source_set_event_handler_f( fileSource, readFileData); - dispatch_source_set_cancel_handler_f( fileSource, cancelSource); - // setting the context pointer to point to the source itself means the functions will get the source - // as a paremeter, from there they can get all the information they need. - dispatch_set_context(fileSource, fileSource); - dispatch_resume(fileSource); - - dispatch_main(); - - return 0; -} diff --git a/examples/Dispatch Samples/timers.c b/examples/Dispatch Samples/timers.c deleted file mode 100644 index 7dc9f8c0f..000000000 --- a/examples/Dispatch Samples/timers.c +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright (c) 2008 Apple Inc. All rights reserved. - * - * @APPLE_DTS_LICENSE_HEADER_START@ - * - * IMPORTANT: This Apple software is supplied to you by Apple Computer, Inc. - * ("Apple") in consideration of your agreement to the following terms, and your - * use, installation, modification or redistribution of this Apple software - * constitutes acceptance of these terms. If you do not agree with these terms, - * please do not use, install, modify or redistribute this Apple software. - * - * In consideration of your agreement to abide by the following terms, and - * subject to these terms, Apple grants you a personal, non-exclusive license, - * under Apple's copyrights in this original Apple software (the "Apple Software"), - * to use, reproduce, modify and redistribute the Apple Software, with or without - * modifications, in source and/or binary forms; provided that if you redistribute - * the Apple Software in its entirety and without modifications, you must retain - * this notice and the following text and disclaimers in all such redistributions - * of the Apple Software. Neither the name, trademarks, service marks or logos of - * Apple Computer, Inc. may be used to endorse or promote products derived from - * the Apple Software without specific prior written permission from Apple. Except - * as expressly stated in this notice, no other rights or licenses, express or - * implied, are granted by Apple herein, including but not limited to any patent - * rights that may be infringed by your derivative works or by other works in - * which the Apple Software may be incorporated. - * - * The Apple Software is provided by Apple on an "AS IS" basis. APPLE MAKES NO - * WARRANTIES, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED - * WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND OPERATION ALONE OR IN - * COMBINATION WITH YOUR PRODUCTS. - * - * IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE - * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR - * DISTRIBUTION OF THE APPLE SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF - * CONTRACT, TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF - * APPLE HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * @APPLE_DTS_LICENSE_HEADER_END@ - */ - -#include -#include -#include -#include -#include -#include - -#include - -int main(int argc, char* argv[]) -{ - dispatch_source_t theTimer = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, dispatch_queue_create("timer queue",NULL)); - - __block int i = 0; - - printf("Starting to count by seconds\n"); - - dispatch_source_set_event_handler(theTimer, ^{ - printf("%d\n", ++i); - if (i >= 6) { - printf("i>6\n"); - dispatch_source_cancel(theTimer); - } - if (i == 3) { - printf("switching to half seconds\n"); - dispatch_source_set_timer(theTimer, DISPATCH_TIME_NOW, NSEC_PER_SEC / 2, 0); - } - }); - - dispatch_source_set_cancel_handler(theTimer, ^{ - printf("dispatch source canceled OK\n"); - dispatch_release(theTimer); - exit(0); - }); - - dispatch_source_set_timer(theTimer, dispatch_time(DISPATCH_TIME_NOW,NSEC_PER_SEC) , NSEC_PER_SEC, 0); - - dispatch_resume(theTimer); - dispatch_main(); - - return 0; -} diff --git a/examples/DispatchLife/DispatchLife.c b/examples/DispatchLife/DispatchLife.c deleted file mode 100644 index 0871e4a3c..000000000 --- a/examples/DispatchLife/DispatchLife.c +++ /dev/null @@ -1,392 +0,0 @@ -/* - * Copyright (c) 2008-2009 Apple Inc. All rights reserved. - * - * @APPLE_DTS_LICENSE_HEADER_START@ - * - * IMPORTANT: This Apple software is supplied to you by Apple Computer, Inc. - * ("Apple") in consideration of your agreement to the following terms, and your - * use, installation, modification or redistribution of this Apple software - * constitutes acceptance of these terms. If you do not agree with these terms, - * please do not use, install, modify or redistribute this Apple software. - * - * In consideration of your agreement to abide by the following terms, and - * subject to these terms, Apple grants you a personal, non-exclusive license, - * under Apple's copyrights in this original Apple software (the "Apple Software"), - * to use, reproduce, modify and redistribute the Apple Software, with or without - * modifications, in source and/or binary forms; provided that if you redistribute - * the Apple Software in its entirety and without modifications, you must retain - * this notice and the following text and disclaimers in all such redistributions - * of the Apple Software. Neither the name, trademarks, service marks or logos of - * Apple Computer, Inc. may be used to endorse or promote products derived from - * the Apple Software without specific prior written permission from Apple. Except - * as expressly stated in this notice, no other rights or licenses, express or - * implied, are granted by Apple herein, including but not limited to any patent - * rights that may be infringed by your derivative works or by other works in - * which the Apple Software may be incorporated. - * - * The Apple Software is provided by Apple on an "AS IS" basis. APPLE MAKES NO - * WARRANTIES, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED - * WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND OPERATION ALONE OR IN - * COMBINATION WITH YOUR PRODUCTS. - * - * IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE - * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR - * DISTRIBUTION OF THE APPLE SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF - * CONTRACT, TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF - * APPLE HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * @APPLE_DTS_LICENSE_HEADER_END@ - */ -/*! - @header Life - An asynchronous variation of Conway's Game of Life implemented with - GCD. Like the classic version, the game board consists of a grid of - cells that can live, die or multiply by the following rules[1]: - - 1. Survivals. Every [living cell] with two or three neighboring - [living cells] survives for the next generation. - 2. Deaths. Each [living cell] wiht four or more neighbors dies (is - removed) from overpopulation. Every [living cell] with one - neighbor or none dies from isolation. - 3. Births. Each empty cell adjacent to exactly three neighbors--no - more, no fewer--is a birth cell. A [living cell] is placed on it - at the next move. - - However, unlike the classic version, not all deaths and births occur - simultaneously in a single, synchronous, "move" of the game board. - Instead the rules are applies to each cell independently based on its - observations of the cells around it. - - Each cell is backed by a GCD queue which manages the synchronization - of the cells internal state (living or dead). When a cell's state - changes, a notification in the form of a dispatch_call() of the - cell_needs_update() work function is sent to all adjacent cells so - that the state of those cells may be re-evaluated. - - To re-evaluate the state of a cell, a request of the current state of - all adjecent cells is sent in the form of a dispatch_call() of the - _cell_is_alive() work function. The state of the adjacent cells is - returned to the requestor via the _cell_is_alive_callback() completion - callback. Once all outstanding completion callbacks have been - received, the cell updates its state according to the aforementioned - rules. If the application of these rules results in another state - change, the update_cell() notification is once again sent out, - repeating the process. - - Due to the highly asynchronous nature of this implementation, the - simulation's results may differ from the classic version for the same - set of initial conditions. In particular, due to non-deterministic - scheduling factors, the same set of initial condiitions is likely to - produce dramatically different results on subsequent simulations. - - [1] Martin Gardner. "MATHEMATICAL GAMES: The fantastic combinations of - John Conway's new solitaire game 'life'" Scientific American 223 - (October 1970): 120-123. - - @copyright Copyright (c) 2008-2009 Apple Inc. All rights reserved. - @updated 2009-03-31 -*/ -//////////////////////////////////////////////////////////////////////////////// - -// Adjustable parameters -unsigned long grid_x_size = 40; -unsigned long grid_y_size = 20; - -int use_curses = 1; - -//////////////////////////////////////////////////////////////////////////////// -#include -#include -#include -#include -#include -#include -#include - -#define CELL_MAX_NEIGHBORS 8 - -struct cell { - dispatch_queue_t q; - int alive; - char display; - - // tracks whether a update_cell() notification arrived while - // an update was already in progress - int needs_update; - int living_neighbors; - int queries_outstanding; - - struct cell* neighbors[CELL_MAX_NEIGHBORS]; - char* label; -} __attribute__((aligned(64))); - -//////////////////////////////////////////////////////////////////////////////// - -/*! @function init_grid - Initializes the grid data structure based on the global variables - grid_x_size and grid_y_size. Must be called before any calls to - cell_set_alive. */ -struct cell* init_grid(size_t grid_x_size, size_t grid_y_size); - -/*! @function init_display - Initializes the display subsystem. Starts a periodic timer to update the - display based on the current contents of the cell grid. - */ -void init_display(struct cell* grid); - -//////////////////////////////////////////////////////////////////////////////// - -// Macro to test whether x,y coordinates are within bounds of the grid -#define GRID_VALID(u,v) (((u) >= 0) && ((v) >= 0) && \ - ((u) < grid_x_size) && ((v) < grid_y_size)) -// Macro to translate from 2d grid coordinates to array offest -#define GRID_OFF(u,v) ((v) * grid_x_size + (u)) - -#if !defined(DISPATCH_LIFE_GL) -int main(int argc, char* argv[]) { - - struct ttysize tsz; - int res; - - res = ioctl(STDIN_FILENO, TIOCGWINSZ, &tsz); - if (res == 0) { - grid_x_size = tsz.ts_cols; - grid_y_size = tsz.ts_lines; - } - - int dispflag = 1; - int ch; - - while ((ch = getopt(argc, argv, "x:y:q")) != -1) { - char* endptr; - switch (ch) { - case 'x': - grid_x_size = strtol(optarg, &endptr, 10); - if (grid_x_size < 0 || (endptr && *endptr != 0)) { - fprintf(stderr, "life: invalid x size\n"); - exit(1); - } - break; - case 'y': - grid_y_size = strtol(optarg, &endptr, 10); - if (grid_y_size < 0 || (endptr && *endptr != 0)) { - fprintf(stderr, "life: invalid y size\n"); - exit(1); - } - break; - case 'q': - dispflag = 0; - break; - case '?': - default: - fprintf(stderr, "usage: life [-q] [-x size] [-y size]\n"); - fprintf(stderr, "\t-x: grid x size (default is terminal columns)\n"); - fprintf(stderr, "\t-y: grid y size (default is terminal rows)\n"); - fprintf(stderr, "\t-q: suppress display output\n"); - exit(1); - } - } - - struct cell* grid = init_grid(grid_x_size, grid_y_size); - - if (dispflag) { - init_display(grid); - if (use_curses) { - initscr(); cbreak(); noecho(); - nonl(); - intrflush(stdscr, FALSE); - keypad(stdscr, TRUE); - } - } - - dispatch_main(); - - if (dispflag && use_curses) { - endwin(); - } - - return 0; -} -#endif /* defined(DISPATCH_LIFE_GL) */ - -//////////////////////////////////////////////////////////////////////////////// - -static void cell_set_alive(struct cell*, int alive); - -/*! @function update_cell - GCD work function. Begins the update process for a cell by - sending cell_is_alive() messages with cell_is_alive_callback() - completion callbacks to all adjacent cells. If an update is already - in progress, simply sets the needs_update flag of the cell. */ -static void update_cell(struct cell*); - -/*! @function cell_is_alive_callback - GCD completion callback. Receives the result from cell_is_alive. When - all _cell_is_alive_callback() completion callbacks have been received - from an update, recalculates the internal state of the cell. If the - state changes, sends update_cell() to all adjacent cells. */ -static void update_cell_response(struct cell*, int); - -//////////////////////////////////////////////////////////////////////////////// - -void -foreach_neighbor(struct cell* self, void (^action)(struct cell* other)) { - int i; - for (i = 0; i < CELL_MAX_NEIGHBORS; ++i) { - struct cell* other = self->neighbors[i]; - if (other) { - action(other); - } - } -} - - -// Change cell state, update the screen, and update neighbors. -void -cell_set_alive(struct cell* self, int alive) { - if (alive == self->alive) return; // nothing to do - - dispatch_async(self->q, ^{ - self->alive = alive; - self->display = (self->alive) ? '#' : ' '; - - foreach_neighbor(self, ^(struct cell* other) { - dispatch_async(other->q, ^{ update_cell(other); }); - }); - }); -} - -void -update_cell(struct cell* self) { - if (self->queries_outstanding == 0) { - self->needs_update = 0; - self->living_neighbors = 0; - - foreach_neighbor(self, ^(struct cell* other) { - ++self->queries_outstanding; - dispatch_async(other->q, ^{ - dispatch_async(self->q, ^{ update_cell_response(self, other->alive); }); - }); - }); - - // '.' indicates the cell is not alive but needs an update - if (!self->alive) self->display = '.'; - } else { - self->needs_update = 1; - } -} - -void -update_cell_response(struct cell* self, int response) { - if (response) ++self->living_neighbors; - --self->queries_outstanding; - - // when all neighbors have replied with their state, - // recalculate our internal state - if (self->queries_outstanding == 0) { - const int living_neighbors = self->living_neighbors; - int alive = self->alive; - - // Conway's Game of Life - if (living_neighbors < 2 || living_neighbors > 3) { - alive = 0; - } else if (living_neighbors == 3) { - alive = 1; - } - - // Notify neighbors of state change - cell_set_alive(self, alive); - - // if a request for an update came in while we were - // already processing one, kick off the next update - if (self->needs_update) { - dispatch_async(self->q, ^{ update_cell(self); }); - } else { - // otherwise clear the '.' character that was - // displayed during the update - if (!self->alive) { - self->display = ' '; - } - } - } -} - -//////////////////////////////////////////////////////////////////////////////// - -struct cell* -init_grid(size_t grid_x_size, size_t grid_y_size) { - struct cell* grid = calloc(sizeof(struct cell),grid_x_size*grid_y_size); - - int i,j; - for (i = 0; i < grid_x_size; ++i) { - for (j = 0; j < grid_y_size; ++j) { - struct cell* ptr = &grid[GRID_OFF(i,j)]; - - asprintf(&ptr->label, "x%dy%d", i, j); - - ptr->q = dispatch_queue_create(ptr->label, NULL); - dispatch_set_target_queue(ptr->q, dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_LOW, 0)); - dispatch_queue_set_context(ptr->q, ptr); - - ptr->neighbors[0] = GRID_VALID(i ,j-1) ? - &grid[GRID_OFF(i ,j-1)] : NULL; // N - ptr->neighbors[1] = GRID_VALID(i+1,j-1) ? - &grid[GRID_OFF(i+1,j-1)] : NULL; // NE - ptr->neighbors[2] = GRID_VALID(i+1,j ) ? - &grid[GRID_OFF(i+1,j )] : NULL; // E - ptr->neighbors[3] = GRID_VALID(i+1,j+1) ? - &grid[GRID_OFF(i+1,j+1)] : NULL; // SE - ptr->neighbors[4] = GRID_VALID(i ,j+1) ? - &grid[GRID_OFF(i ,j+1)] : NULL; // S - ptr->neighbors[5] = GRID_VALID(i-1,j+1) ? - &grid[GRID_OFF(i-1,j+1)] : NULL; // SW - ptr->neighbors[6] = GRID_VALID(i-1,j ) ? - &grid[GRID_OFF(i-1,j )] : NULL; // W - ptr->neighbors[7] = GRID_VALID(i-1,j-1) ? - &grid[GRID_OFF(i-1,j-1)] : NULL; // NW - } - } - - srandomdev(); - for (i = 0; i < grid_x_size; ++i) { - for (j = 0; j < grid_y_size; ++j) { - if (random() & 1) { - cell_set_alive(&grid[GRID_OFF(i,j)], 1); - } - } - } - - return grid; -} - -#if defined(DISPATCH_LIFE_GL) -char -get_grid_display_char(struct cell* grid, size_t x, size_t y) { - return grid[GRID_OFF(x,y)].display; -} -#endif /* defined(DISPATCH_LIFE_GL) */ - -#if !defined(DISPATCH_LIFE_GL) -void -init_display(struct cell* grid) -{ - dispatch_source_t timer; - - timer = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, dispatch_get_main_queue()); - dispatch_source_set_timer(dispatch_time(DISPATCH_TIME_NOW, 0). 10000000, 1000); - dispatch_source_set_event_handler(^{ - int x,y; - x = 0; - for (x = 0; x < grid_x_size; ++x) { - for (y = 0; y < grid_y_size; ++y) { - mvaddnstr(y, x, &grid[GRID_OFF(x,y)].display, 1); - } - } - refresh(); - }); - dispatch_resume(timer); -} -#endif /* defined(DISPATCH_LIFE_GL) */ diff --git a/examples/DispatchLife/DispatchLife.xcodeproj/project.pbxproj b/examples/DispatchLife/DispatchLife.xcodeproj/project.pbxproj deleted file mode 100644 index 68972782d..000000000 --- a/examples/DispatchLife/DispatchLife.xcodeproj/project.pbxproj +++ /dev/null @@ -1,252 +0,0 @@ -// !$*UTF8*$! -{ - archiveVersion = 1; - classes = { - }; - objectVersion = 45; - objects = { - -/* Begin PBXBuildFile section */ - 8D11072A0486CEB800E47090 /* MainMenu.nib in Resources */ = {isa = PBXBuildFile; fileRef = 29B97318FDCFA39411CA2CEA /* MainMenu.nib */; }; - 8D11072B0486CEB800E47090 /* InfoPlist.strings in Resources */ = {isa = PBXBuildFile; fileRef = 089C165CFE840E0CC02AAC07 /* InfoPlist.strings */; }; - 8D11072D0486CEB800E47090 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = 29B97316FDCFA39411CA2CEA /* main.m */; settings = {ATTRIBUTES = (); }; }; - 8D11072F0486CEB800E47090 /* Cocoa.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1058C7A1FEA54F0111CA2CBB /* Cocoa.framework */; }; - FC0615200DF53162002BF852 /* DispatchLifeGLView.m in Sources */ = {isa = PBXBuildFile; fileRef = FC06151F0DF53162002BF852 /* DispatchLifeGLView.m */; }; - FC0615450DF535BD002BF852 /* OpenGL.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = FC0615440DF535BD002BF852 /* OpenGL.framework */; }; - FC787BF60DF67AAF009415DA /* DispatchLife.c in Sources */ = {isa = PBXBuildFile; fileRef = FC787BF50DF67AAF009415DA /* DispatchLife.c */; }; -/* End PBXBuildFile section */ - -/* Begin PBXFileReference section */ - 089C165DFE840E0CC02AAC07 /* English */ = {isa = PBXFileReference; fileEncoding = 10; lastKnownFileType = text.plist.strings; name = English; path = English.lproj/InfoPlist.strings; sourceTree = ""; }; - 1058C7A1FEA54F0111CA2CBB /* Cocoa.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Cocoa.framework; path = /System/Library/Frameworks/Cocoa.framework; sourceTree = ""; }; - 29B97316FDCFA39411CA2CEA /* main.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = ""; }; - 29B97319FDCFA39411CA2CEA /* English */ = {isa = PBXFileReference; lastKnownFileType = wrapper.nib; name = English; path = English.lproj/MainMenu.nib; sourceTree = ""; }; - 29B97324FDCFA39411CA2CEA /* AppKit.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = AppKit.framework; path = /System/Library/Frameworks/AppKit.framework; sourceTree = ""; }; - 29B97325FDCFA39411CA2CEA /* Foundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Foundation.framework; path = /System/Library/Frameworks/Foundation.framework; sourceTree = ""; }; - 8D1107310486CEB800E47090 /* Info.plist */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; - 8D1107320486CEB800E47090 /* DispatchLife.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = DispatchLife.app; sourceTree = BUILT_PRODUCTS_DIR; }; - FC06151E0DF53162002BF852 /* DispatchLifeGLView.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DispatchLifeGLView.h; sourceTree = ""; }; - FC06151F0DF53162002BF852 /* DispatchLifeGLView.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = DispatchLifeGLView.m; sourceTree = ""; }; - FC0615440DF535BD002BF852 /* OpenGL.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = OpenGL.framework; path = /System/Library/Frameworks/OpenGL.framework; sourceTree = ""; }; - FC787BF50DF67AAF009415DA /* DispatchLife.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = DispatchLife.c; sourceTree = ""; }; -/* End PBXFileReference section */ - -/* Begin PBXFrameworksBuildPhase section */ - 8D11072E0486CEB800E47090 /* Frameworks */ = { - isa = PBXFrameworksBuildPhase; - buildActionMask = 2147483647; - files = ( - 8D11072F0486CEB800E47090 /* Cocoa.framework in Frameworks */, - FC0615450DF535BD002BF852 /* OpenGL.framework in Frameworks */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; -/* End PBXFrameworksBuildPhase section */ - -/* Begin PBXGroup section */ - 080E96DDFE201D6D7F000001 /* Classes */ = { - isa = PBXGroup; - children = ( - FC06151E0DF53162002BF852 /* DispatchLifeGLView.h */, - FC06151F0DF53162002BF852 /* DispatchLifeGLView.m */, - ); - name = Classes; - sourceTree = ""; - }; - 1058C7A0FEA54F0111CA2CBB /* Linked Frameworks */ = { - isa = PBXGroup; - children = ( - 1058C7A1FEA54F0111CA2CBB /* Cocoa.framework */, - FC0615440DF535BD002BF852 /* OpenGL.framework */, - ); - name = "Linked Frameworks"; - sourceTree = ""; - }; - 1058C7A2FEA54F0111CA2CBB /* Other Frameworks */ = { - isa = PBXGroup; - children = ( - 29B97324FDCFA39411CA2CEA /* AppKit.framework */, - 29B97325FDCFA39411CA2CEA /* Foundation.framework */, - ); - name = "Other Frameworks"; - sourceTree = ""; - }; - 19C28FACFE9D520D11CA2CBB /* Products */ = { - isa = PBXGroup; - children = ( - 8D1107320486CEB800E47090 /* DispatchLife.app */, - ); - name = Products; - sourceTree = ""; - }; - 29B97314FDCFA39411CA2CEA /* DispatchLife */ = { - isa = PBXGroup; - children = ( - 080E96DDFE201D6D7F000001 /* Classes */, - 29B97315FDCFA39411CA2CEA /* Other Sources */, - 29B97317FDCFA39411CA2CEA /* Resources */, - 29B97323FDCFA39411CA2CEA /* Frameworks */, - 19C28FACFE9D520D11CA2CBB /* Products */, - ); - name = DispatchLife; - sourceTree = ""; - }; - 29B97315FDCFA39411CA2CEA /* Other Sources */ = { - isa = PBXGroup; - children = ( - FC787BF50DF67AAF009415DA /* DispatchLife.c */, - 29B97316FDCFA39411CA2CEA /* main.m */, - ); - name = "Other Sources"; - sourceTree = ""; - }; - 29B97317FDCFA39411CA2CEA /* Resources */ = { - isa = PBXGroup; - children = ( - 8D1107310486CEB800E47090 /* Info.plist */, - 089C165CFE840E0CC02AAC07 /* InfoPlist.strings */, - 29B97318FDCFA39411CA2CEA /* MainMenu.nib */, - ); - name = Resources; - sourceTree = ""; - }; - 29B97323FDCFA39411CA2CEA /* Frameworks */ = { - isa = PBXGroup; - children = ( - 1058C7A0FEA54F0111CA2CBB /* Linked Frameworks */, - 1058C7A2FEA54F0111CA2CBB /* Other Frameworks */, - ); - name = Frameworks; - sourceTree = ""; - }; -/* End PBXGroup section */ - -/* Begin PBXNativeTarget section */ - 8D1107260486CEB800E47090 /* DispatchLife */ = { - isa = PBXNativeTarget; - buildConfigurationList = C01FCF4A08A954540054247B /* Build configuration list for PBXNativeTarget "DispatchLife" */; - buildPhases = ( - 8D1107290486CEB800E47090 /* Resources */, - 8D11072C0486CEB800E47090 /* Sources */, - 8D11072E0486CEB800E47090 /* Frameworks */, - ); - buildRules = ( - ); - dependencies = ( - ); - name = DispatchLife; - productInstallPath = "$(HOME)/Applications"; - productName = DispatchLife; - productReference = 8D1107320486CEB800E47090 /* DispatchLife.app */; - productType = "com.apple.product-type.application"; - }; -/* End PBXNativeTarget section */ - -/* Begin PBXProject section */ - 29B97313FDCFA39411CA2CEA /* Project object */ = { - isa = PBXProject; - buildConfigurationList = C01FCF4E08A954540054247B /* Build configuration list for PBXProject "DispatchLife" */; - compatibilityVersion = "Xcode 3.1"; - hasScannedForEncodings = 1; - mainGroup = 29B97314FDCFA39411CA2CEA /* DispatchLife */; - projectDirPath = ""; - projectRoot = ""; - targets = ( - 8D1107260486CEB800E47090 /* DispatchLife */, - ); - }; -/* End PBXProject section */ - -/* Begin PBXResourcesBuildPhase section */ - 8D1107290486CEB800E47090 /* Resources */ = { - isa = PBXResourcesBuildPhase; - buildActionMask = 2147483647; - files = ( - 8D11072A0486CEB800E47090 /* MainMenu.nib in Resources */, - 8D11072B0486CEB800E47090 /* InfoPlist.strings in Resources */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; -/* End PBXResourcesBuildPhase section */ - -/* Begin PBXSourcesBuildPhase section */ - 8D11072C0486CEB800E47090 /* Sources */ = { - isa = PBXSourcesBuildPhase; - buildActionMask = 2147483647; - files = ( - 8D11072D0486CEB800E47090 /* main.m in Sources */, - FC0615200DF53162002BF852 /* DispatchLifeGLView.m in Sources */, - FC787BF60DF67AAF009415DA /* DispatchLife.c in Sources */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; -/* End PBXSourcesBuildPhase section */ - -/* Begin PBXVariantGroup section */ - 089C165CFE840E0CC02AAC07 /* InfoPlist.strings */ = { - isa = PBXVariantGroup; - children = ( - 089C165DFE840E0CC02AAC07 /* English */, - ); - name = InfoPlist.strings; - sourceTree = ""; - }; - 29B97318FDCFA39411CA2CEA /* MainMenu.nib */ = { - isa = PBXVariantGroup; - children = ( - 29B97319FDCFA39411CA2CEA /* English */, - ); - name = MainMenu.nib; - sourceTree = ""; - }; -/* End PBXVariantGroup section */ - -/* Begin XCBuildConfiguration section */ - C01FCF4C08A954540054247B /* Release */ = { - isa = XCBuildConfiguration; - buildSettings = { - INFOPLIST_FILE = Info.plist; - PRODUCT_NAME = DispatchLife; - }; - name = Release; - }; - C01FCF5008A954540054247B /* Release */ = { - isa = XCBuildConfiguration; - buildSettings = { - ALWAYS_SEARCH_USER_PATHS = NO; - ARCHS = "$(ARCHS_STANDARD_32_BIT)"; - GCC_C_LANGUAGE_STANDARD = c99; - GCC_WARN_ABOUT_RETURN_TYPE = YES; - GCC_WARN_UNUSED_VARIABLE = YES; - OTHER_CFLAGS = ( - "-DDISPATCH_LIFE_GL", - "-fblocks", - ); - PREBINDING = NO; - SDKROOT = ""; - }; - name = Release; - }; -/* End XCBuildConfiguration section */ - -/* Begin XCConfigurationList section */ - C01FCF4A08A954540054247B /* Build configuration list for PBXNativeTarget "DispatchLife" */ = { - isa = XCConfigurationList; - buildConfigurations = ( - C01FCF4C08A954540054247B /* Release */, - ); - defaultConfigurationIsVisible = 0; - defaultConfigurationName = Release; - }; - C01FCF4E08A954540054247B /* Build configuration list for PBXProject "DispatchLife" */ = { - isa = XCConfigurationList; - buildConfigurations = ( - C01FCF5008A954540054247B /* Release */, - ); - defaultConfigurationIsVisible = 0; - defaultConfigurationName = Release; - }; -/* End XCConfigurationList section */ - }; - rootObject = 29B97313FDCFA39411CA2CEA /* Project object */; -} diff --git a/examples/DispatchLife/DispatchLifeGLView.h b/examples/DispatchLife/DispatchLifeGLView.h deleted file mode 100644 index 7ed6bbdd7..000000000 --- a/examples/DispatchLife/DispatchLifeGLView.h +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright (c) 2009-2009 Apple Inc. All rights reserved. - * - * @APPLE_DTS_LICENSE_HEADER_START@ - * - * IMPORTANT: This Apple software is supplied to you by Apple Computer, Inc. - * ("Apple") in consideration of your agreement to the following terms, and your - * use, installation, modification or redistribution of this Apple software - * constitutes acceptance of these terms. If you do not agree with these terms, - * please do not use, install, modify or redistribute this Apple software. - * - * In consideration of your agreement to abide by the following terms, and - * subject to these terms, Apple grants you a personal, non-exclusive license, - * under Apple's copyrights in this original Apple software (the "Apple Software"), - * to use, reproduce, modify and redistribute the Apple Software, with or without - * modifications, in source and/or binary forms; provided that if you redistribute - * the Apple Software in its entirety and without modifications, you must retain - * this notice and the following text and disclaimers in all such redistributions - * of the Apple Software. Neither the name, trademarks, service marks or logos of - * Apple Computer, Inc. may be used to endorse or promote products derived from - * the Apple Software without specific prior written permission from Apple. Except - * as expressly stated in this notice, no other rights or licenses, express or - * implied, are granted by Apple herein, including but not limited to any patent - * rights that may be infringed by your derivative works or by other works in - * which the Apple Software may be incorporated. - * - * The Apple Software is provided by Apple on an "AS IS" basis. APPLE MAKES NO - * WARRANTIES, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED - * WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND OPERATION ALONE OR IN - * COMBINATION WITH YOUR PRODUCTS. - * - * IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE - * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR - * DISTRIBUTION OF THE APPLE SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF - * CONTRACT, TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF - * APPLE HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * @APPLE_DTS_LICENSE_HEADER_END@ - */ - -#import - -struct cell; - -// From DispatchLife.c -extern struct cell* init_grid(size_t grid_x_size, size_t grid_y_size); -extern char get_grid_display_char(struct cell* grid, size_t x, size_t y); - -@interface DispatchLifeGLView : NSOpenGLView { - struct cell* grid; - uint32_t* image; -} - -- (void)adjustGLViewBounds; - -@end diff --git a/examples/DispatchLife/DispatchLifeGLView.m b/examples/DispatchLife/DispatchLifeGLView.m deleted file mode 100644 index 5aa843b65..000000000 --- a/examples/DispatchLife/DispatchLifeGLView.m +++ /dev/null @@ -1,203 +0,0 @@ -/* - * Copyright (c) 2009-2009 Apple Inc. All rights reserved. - * - * @APPLE_DTS_LICENSE_HEADER_START@ - * - * IMPORTANT: This Apple software is supplied to you by Apple Computer, Inc. - * ("Apple") in consideration of your agreement to the following terms, and your - * use, installation, modification or redistribution of this Apple software - * constitutes acceptance of these terms. If you do not agree with these terms, - * please do not use, install, modify or redistribute this Apple software. - * - * In consideration of your agreement to abide by the following terms, and - * subject to these terms, Apple grants you a personal, non-exclusive license, - * under Apple's copyrights in this original Apple software (the "Apple Software"), - * to use, reproduce, modify and redistribute the Apple Software, with or without - * modifications, in source and/or binary forms; provided that if you redistribute - * the Apple Software in its entirety and without modifications, you must retain - * this notice and the following text and disclaimers in all such redistributions - * of the Apple Software. Neither the name, trademarks, service marks or logos of - * Apple Computer, Inc. may be used to endorse or promote products derived from - * the Apple Software without specific prior written permission from Apple. Except - * as expressly stated in this notice, no other rights or licenses, express or - * implied, are granted by Apple herein, including but not limited to any patent - * rights that may be infringed by your derivative works or by other works in - * which the Apple Software may be incorporated. - * - * The Apple Software is provided by Apple on an "AS IS" basis. APPLE MAKES NO - * WARRANTIES, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED - * WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND OPERATION ALONE OR IN - * COMBINATION WITH YOUR PRODUCTS. - * - * IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE - * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR - * DISTRIBUTION OF THE APPLE SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF - * CONTRACT, TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF - * APPLE HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * @APPLE_DTS_LICENSE_HEADER_END@ - */ - -#import "DispatchLifeGLView.h" - -#import - -#include -#include -#include - -#include -#include - -extern size_t grid_x_size; -extern size_t grid_y_size; - -@implementation DispatchLifeGLView - -#define CELL_WIDTH 8 -#define CELL_HEIGHT 8 - -- (void)goFullScreen:(NSOpenGLView*)view { - NSOpenGLPixelFormatAttribute attrs[] = - { - NSOpenGLPFAFullScreen, - - NSOpenGLPFAScreenMask, CGDisplayIDToOpenGLDisplayMask(kCGDirectMainDisplay), - - NSOpenGLPFAAccelerated, - NSOpenGLPFANoRecovery, - NSOpenGLPFADoubleBuffer, - 0 - }; - NSOpenGLPixelFormat* pixFmt = [[NSOpenGLPixelFormat alloc] initWithAttributes:attrs]; - - NSOpenGLContext* screen = [[NSOpenGLContext alloc] initWithFormat:pixFmt shareContext:[view openGLContext]]; - - CGDisplayErr err = CGCaptureAllDisplays(); - if (err != CGDisplayNoErr) { - [screen release]; - return; - } - - [screen setFullScreen]; - [screen makeCurrentContext]; - - glClearColor(0.0, 0.0, 0.0, 0.0); - glClear(GL_COLOR_BUFFER_BIT); - [screen flushBuffer]; - glClear(GL_COLOR_BUFFER_BIT); - [screen flushBuffer]; -} - - -- (id)initWithFrame:(NSRect)frame { - NSOpenGLPixelFormatAttribute attrs[] = - { - NSOpenGLPFAAccelerated, - NSOpenGLPFANoRecovery, - NSOpenGLPFADoubleBuffer, - 0 - }; - NSOpenGLPixelFormat* pixFmt = [[NSOpenGLPixelFormat alloc] initWithAttributes:attrs]; - - self = [super initWithFrame:frame pixelFormat:pixFmt]; - if (self) { - - [[self openGLContext] makeCurrentContext]; - glPixelStorei(GL_UNPACK_ALIGNMENT, 1); - glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE); - glClearColor(1.0, 1.0, 1.0, 1.0); - glColor4f(1.0, 1.0, 1.0, 1.0); - glEnable(GL_RASTER_POSITION_UNCLIPPED_IBM); - glDisable(GL_DITHER); - - grid_x_size = 128; - grid_y_size = 96; - - self->grid = init_grid(grid_x_size, grid_y_size); - size_t image_size = grid_x_size * grid_y_size * sizeof(uint32_t); - self->image = malloc(image_size); - memset(self->image, 0xFF, image_size); - - [self adjustGLViewBounds]; - - [[NSTimer scheduledTimerWithTimeInterval: (1.0f / 15.0) target: self selector:@selector(drawRect:) userInfo:self repeats:true] retain]; - - } - return self; -} - -- (void)drawRect:(NSRect)rect { - [[self openGLContext] makeCurrentContext]; - - glClear(GL_COLOR_BUFFER_BIT); - - NSRect bounds = [self bounds]; - glRasterPos2f(-bounds.size.width/2, -bounds.size.height/2); - glPixelZoom(bounds.size.width/grid_x_size, bounds.size.height/grid_y_size); - - const int width = grid_x_size; - const int height = grid_y_size; - - int x, y; - for (y = 0; y < height; ++y) { - for (x = 0; x < width; ++x) { - int i = y * width + x; - switch (get_grid_display_char(grid, x, y)) { - case '.': - image[i] = 0xCCCCCCFF; - break; - case '#': - image[i] = 0x000000FF; - break; - case ' ': - image[i] = 0xFFFFFFFF; - break; - default: - image[i] = 0x0000FFFF; - break; - } - } - } - - glDrawPixels(width, height, GL_BGRA, GL_UNSIGNED_INT_8_8_8_8, image); - - glFinish(); - - [[self openGLContext] flushBuffer]; - -} - -- (void)adjustGLViewBounds -{ - [[self openGLContext] makeCurrentContext]; - [[self openGLContext] update]; - - NSRect rect = [self bounds]; - - glViewport(0, 0, (GLint) rect.size.width, (GLint) rect.size.height); - glMatrixMode(GL_PROJECTION); - glLoadIdentity(); - gluOrtho2D(-(rect.size.width/2), rect.size.width/2, -(rect.size.height/2), rect.size.height/2); - glMatrixMode(GL_MODELVIEW); - glLoadIdentity(); - - [self setNeedsDisplay:true]; -} - -- (void)update // moved or resized -{ - [super update]; - [self adjustGLViewBounds]; -} - -- (void)reshape // scrolled, moved or resized -{ - [super reshape]; - [self adjustGLViewBounds]; -} - -@end diff --git a/examples/DispatchLife/English.lproj/InfoPlist.strings b/examples/DispatchLife/English.lproj/InfoPlist.strings deleted file mode 100644 index 5e45963c382ba690b781b953a00585212b898ac5..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 92 zcmW-XQ3`+{5C!MkQ~2$No+IcIkqMDxWCV8j>LCj|yTg2Mz+o9F%uHlf9u}h9EuK`F a!Y*1dX%G66ZqL#C$|bw0ZoP5@jOGW1ArT7z diff --git a/examples/DispatchLife/English.lproj/MainMenu.nib/designable.nib b/examples/DispatchLife/English.lproj/MainMenu.nib/designable.nib deleted file mode 100644 index 0cdc4e13a..000000000 --- a/examples/DispatchLife/English.lproj/MainMenu.nib/designable.nib +++ /dev/null @@ -1,2651 +0,0 @@ - - - - 0 - 10A219 - 708 - 994.4 - 404.00 - - com.apple.InterfaceBuilder.CocoaPlugin - 708 - - - YES - - - YES - com.apple.InterfaceBuilder.CocoaPlugin - - - YES - - YES - - - YES - - - - YES - - NSApplication - - - FirstResponder - - - NSApplication - - - AMainMenu - - YES - - - DispatchLife - - 1048576 - 2147483647 - - NSImage - NSMenuCheckmark - - - NSImage - NSMenuMixedState - - submenuAction: - - DispatchLife - - YES - - - About DispatchLife - - 2147483647 - - - - - - YES - YES - - - 1048576 - 2147483647 - - - - - - UHJlZmVyZW5jZXPigKY - , - 1048576 - 2147483647 - - - - - - YES - YES - - - 1048576 - 2147483647 - - - - - - Services - - 1048576 - 2147483647 - - - submenuAction: - - Services - - YES - - _NSServicesMenu - - - - - YES - YES - - - 1048576 - 2147483647 - - - - - - Hide DispatchLife - h - 1048576 - 2147483647 - - - - - - Hide Others - h - 1572864 - 2147483647 - - - - - - Show All - - 1048576 - 2147483647 - - - - - - YES - YES - - - 1048576 - 2147483647 - - - - - - Quit DispatchLife - q - 1048576 - 2147483647 - - - - - _NSAppleMenu - - - - - File - - 1048576 - 2147483647 - - - submenuAction: - - File - - YES - - - New - n - 1048576 - 2147483647 - - - - - - T3BlbuKApg - o - 1048576 - 2147483647 - - - - - - Open Recent - - 1048576 - 2147483647 - - - submenuAction: - - Open Recent - - YES - - - Clear Menu - - 1048576 - 2147483647 - - - - - _NSRecentDocumentsMenu - - - - - YES - YES - - - 1048576 - 2147483647 - - - - - - Close - w - 1048576 - 2147483647 - - - - - - Save - s - 1048576 - 2147483647 - - - - - - U2F2ZSBBc+KApg - S - 1179648 - 2147483647 - - - - - - Revert to Saved - - 2147483647 - - - - - - YES - YES - - - 1048576 - 2147483647 - - - - - - Page Setup... - P - 1179648 - 2147483647 - - - - - - - UHJpbnTigKY - p - 1048576 - 2147483647 - - - - - - - - - Edit - - 1048576 - 2147483647 - - - submenuAction: - - Edit - - YES - - - Undo - z - 1048576 - 2147483647 - - - - - - Redo - Z - 1179648 - 2147483647 - - - - - - YES - YES - - - 1048576 - 2147483647 - - - - - - Cut - x - 1048576 - 2147483647 - - - - - - Copy - c - 1048576 - 2147483647 - - - - - - Paste - v - 1048576 - 2147483647 - - - - - - Delete - - 1048576 - 2147483647 - - - - - - Select All - a - 1048576 - 2147483647 - - - - - - YES - YES - - - 1048576 - 2147483647 - - - - - - Find - - 1048576 - 2147483647 - - - submenuAction: - - Find - - YES - - - RmluZOKApg - f - 1048576 - 2147483647 - - - 1 - - - - Find Next - g - 1048576 - 2147483647 - - - 2 - - - - Find Previous - G - 1179648 - 2147483647 - - - 3 - - - - Use Selection for Find - e - 1048576 - 2147483647 - - - 7 - - - - Jump to Selection - j - 1048576 - 2147483647 - - - - - - - - - Spelling and Grammar - - 1048576 - 2147483647 - - - submenuAction: - - Spelling and Grammar - - YES - - - U2hvdyBTcGVsbGluZ+KApg - : - 1048576 - 2147483647 - - - - - - Check Spelling - ; - 1048576 - 2147483647 - - - - - - Check Spelling While Typing - - 1048576 - 2147483647 - - - - - - Check Grammar With Spelling - - 1048576 - 2147483647 - - - - - - - - - Substitutions - - 1048576 - 2147483647 - - - submenuAction: - - Substitutions - - YES - - - Smart Copy/Paste - f - 1048576 - 2147483647 - - - 1 - - - - Smart Quotes - g - 1048576 - 2147483647 - - - 2 - - - - Smart Links - G - 1179648 - 2147483647 - - - 3 - - - - - - - Speech - - 1048576 - 2147483647 - - - submenuAction: - - Speech - - YES - - - Start Speaking - - 1048576 - 2147483647 - - - - - - Stop Speaking - - 1048576 - 2147483647 - - - - - - - - - - - - Format - - 1048576 - 2147483647 - - - submenuAction: - - Format - - YES - - - Show Fonts - t - 1048576 - 2147483647 - - - - - - Show Colors - C - 1179648 - 2147483647 - - - - - - - - - View - - 1048576 - 2147483647 - - - submenuAction: - - View - - YES - - - Show Toolbar - t - 1572864 - 2147483647 - - - - - - Q3VzdG9taXplIFRvb2xiYXLigKY - - 1048576 - 2147483647 - - - - - - - - - Window - - 1048576 - 2147483647 - - - submenuAction: - - Window - - YES - - - Minimize - m - 1048576 - 2147483647 - - - - - - Zoom - - 1048576 - 2147483647 - - - - - - YES - YES - - - 1048576 - 2147483647 - - - - - - Bring All to Front - - 1048576 - 2147483647 - - - - - _NSWindowsMenu - - - - - Help - - 1048576 - 2147483647 - - - submenuAction: - - Help - - YES - - - DispatchLife Help - ? - 1048576 - 2147483647 - - - - - - - - _NSMainMenu - - - 15 - 2 - {{384, 348}, {512, 384}} - 1954021376 - DispatchLife - NSWindow - - {3.40282e+38, 3.40282e+38} - - - 256 - - YES - - - 4415 - {512, 384} - - - DispatchLifeGLView - - - {512, 384} - - - - {{0, 0}, {1280, 1002}} - {3.40282e+38, 3.40282e+38} - - - - - YES - - - performMiniaturize: - - - - 37 - - - - arrangeInFront: - - - - 39 - - - - print: - - - - 86 - - - - runPageLayout: - - - - 87 - - - - clearRecentDocuments: - - - - 127 - - - - orderFrontStandardAboutPanel: - - - - 142 - - - - performClose: - - - - 193 - - - - toggleContinuousSpellChecking: - - - - 222 - - - - undo: - - - - 223 - - - - copy: - - - - 224 - - - - checkSpelling: - - - - 225 - - - - paste: - - - - 226 - - - - stopSpeaking: - - - - 227 - - - - cut: - - - - 228 - - - - showGuessPanel: - - - - 230 - - - - redo: - - - - 231 - - - - selectAll: - - - - 232 - - - - startSpeaking: - - - - 233 - - - - delete: - - - - 235 - - - - performZoom: - - - - 240 - - - - performFindPanelAction: - - - - 241 - - - - centerSelectionInVisibleArea: - - - - 245 - - - - toggleGrammarChecking: - - - - 347 - - - - toggleSmartInsertDelete: - - - - 355 - - - - toggleAutomaticQuoteSubstitution: - - - - 356 - - - - toggleAutomaticLinkDetection: - - - - 357 - - - - showHelp: - - - - 360 - - - - orderFrontColorPanel: - - - - 361 - - - - saveDocument: - - - - 362 - - - - saveDocumentAs: - - - - 363 - - - - revertDocumentToSaved: - - - - 364 - - - - runToolbarCustomizationPalette: - - - - 365 - - - - toggleToolbarShown: - - - - 366 - - - - hide: - - - - 367 - - - - hideOtherApplications: - - - - 368 - - - - terminate: - - - - 369 - - - - unhideAllApplications: - - - - 370 - - - - newDocument: - - - - 373 - - - - openDocument: - - - - 374 - - - - - YES - - 0 - - YES - - - - - - -2 - - - RmlsZSdzIE93bmVyA - - - -1 - - - First Responder - - - -3 - - - Application - - - 29 - - - YES - - - - - - - - - - MainMenu - - - 19 - - - YES - - - - - - 56 - - - YES - - - - - - 103 - - - YES - - - - 1 - - - 217 - - - YES - - - - - - 83 - - - YES - - - - - - 81 - - - YES - - - - - - - - - - - - - - - - 75 - - - 3 - - - 80 - - - 8 - - - 78 - - - 6 - - - 72 - - - - - 82 - - - 9 - - - 124 - - - YES - - - - - - 77 - - - 5 - - - 73 - - - 1 - - - 79 - - - 7 - - - 112 - - - 10 - - - 74 - - - 2 - - - 125 - - - YES - - - - - - 126 - - - - - 205 - - - YES - - - - - - - - - - - - - - - - - - 202 - - - - - 198 - - - - - 207 - - - - - 214 - - - - - 199 - - - - - 203 - - - - - 197 - - - - - 206 - - - - - 215 - - - - - 218 - - - YES - - - - - - 216 - - - YES - - - - - - 200 - - - YES - - - - - - - - - 219 - - - - - 201 - - - - - 204 - - - - - 220 - - - YES - - - - - - - - - - 213 - - - - - 210 - - - - - 221 - - - - - 208 - - - - - 209 - - - - - 106 - - - YES - - - - 2 - - - 111 - - - - - 57 - - - YES - - - - - - - - - - - - - - - - 58 - - - - - 134 - - - - - 150 - - - - - 136 - - - 1111 - - - 144 - - - - - 129 - - - 121 - - - 143 - - - - - 236 - - - - - 131 - - - YES - - - - - - 149 - - - - - 145 - - - - - 130 - - - - - 24 - - - YES - - - - - - - - - 92 - - - - - 5 - - - - - 239 - - - - - 23 - - - - - 295 - - - YES - - - - - - 296 - - - YES - - - - - - - 297 - - - - - 298 - - - - - 299 - - - YES - - - - - - 300 - - - YES - - - - - - - 344 - - - - - 345 - - - - - 211 - - - YES - - - - - - 212 - - - YES - - - - - - - 195 - - - - - 196 - - - - - 346 - - - - - 348 - - - YES - - - - - - 349 - - - YES - - - - - - - - 350 - - - - - 351 - - - - - 354 - - - - - 371 - - - YES - - - - - - 372 - - - YES - - - - - - 377 - - - - - - - YES - - YES - 103.IBPluginDependency - 103.ImportedFromIB2 - 106.IBEditorWindowLastContentRect - 106.IBPluginDependency - 106.ImportedFromIB2 - 106.editorWindowContentRectSynchronizationRect - 111.IBPluginDependency - 111.ImportedFromIB2 - 112.IBPluginDependency - 112.ImportedFromIB2 - 124.IBPluginDependency - 124.ImportedFromIB2 - 125.IBPluginDependency - 125.ImportedFromIB2 - 125.editorWindowContentRectSynchronizationRect - 126.IBPluginDependency - 126.ImportedFromIB2 - 129.IBPluginDependency - 129.ImportedFromIB2 - 130.IBPluginDependency - 130.ImportedFromIB2 - 130.editorWindowContentRectSynchronizationRect - 131.IBPluginDependency - 131.ImportedFromIB2 - 134.IBPluginDependency - 134.ImportedFromIB2 - 136.IBPluginDependency - 136.ImportedFromIB2 - 143.IBPluginDependency - 143.ImportedFromIB2 - 144.IBPluginDependency - 144.ImportedFromIB2 - 145.IBPluginDependency - 145.ImportedFromIB2 - 149.IBPluginDependency - 149.ImportedFromIB2 - 150.IBPluginDependency - 150.ImportedFromIB2 - 19.IBPluginDependency - 19.ImportedFromIB2 - 195.IBPluginDependency - 195.ImportedFromIB2 - 196.IBPluginDependency - 196.ImportedFromIB2 - 197.IBPluginDependency - 197.ImportedFromIB2 - 198.IBPluginDependency - 198.ImportedFromIB2 - 199.IBPluginDependency - 199.ImportedFromIB2 - 200.IBPluginDependency - 200.ImportedFromIB2 - 200.editorWindowContentRectSynchronizationRect - 201.IBPluginDependency - 201.ImportedFromIB2 - 202.IBPluginDependency - 202.ImportedFromIB2 - 203.IBPluginDependency - 203.ImportedFromIB2 - 204.IBPluginDependency - 204.ImportedFromIB2 - 205.IBEditorWindowLastContentRect - 205.IBPluginDependency - 205.ImportedFromIB2 - 205.editorWindowContentRectSynchronizationRect - 206.IBPluginDependency - 206.ImportedFromIB2 - 207.IBPluginDependency - 207.ImportedFromIB2 - 208.IBPluginDependency - 208.ImportedFromIB2 - 209.IBPluginDependency - 209.ImportedFromIB2 - 210.IBPluginDependency - 210.ImportedFromIB2 - 211.IBPluginDependency - 211.ImportedFromIB2 - 212.IBPluginDependency - 212.ImportedFromIB2 - 212.editorWindowContentRectSynchronizationRect - 213.IBPluginDependency - 213.ImportedFromIB2 - 214.IBPluginDependency - 214.ImportedFromIB2 - 215.IBPluginDependency - 215.ImportedFromIB2 - 216.IBPluginDependency - 216.ImportedFromIB2 - 217.IBPluginDependency - 217.ImportedFromIB2 - 218.IBPluginDependency - 218.ImportedFromIB2 - 219.IBPluginDependency - 219.ImportedFromIB2 - 220.IBPluginDependency - 220.ImportedFromIB2 - 220.editorWindowContentRectSynchronizationRect - 221.IBPluginDependency - 221.ImportedFromIB2 - 23.IBPluginDependency - 23.ImportedFromIB2 - 236.IBPluginDependency - 236.ImportedFromIB2 - 239.IBPluginDependency - 239.ImportedFromIB2 - 24.IBEditorWindowLastContentRect - 24.IBPluginDependency - 24.ImportedFromIB2 - 24.editorWindowContentRectSynchronizationRect - 29.IBEditorWindowLastContentRect - 29.IBPluginDependency - 29.ImportedFromIB2 - 29.WindowOrigin - 29.editorWindowContentRectSynchronizationRect - 295.IBPluginDependency - 296.IBEditorWindowLastContentRect - 296.IBPluginDependency - 296.editorWindowContentRectSynchronizationRect - 297.IBPluginDependency - 298.IBPluginDependency - 299.IBPluginDependency - 300.IBEditorWindowLastContentRect - 300.IBPluginDependency - 300.editorWindowContentRectSynchronizationRect - 344.IBPluginDependency - 345.IBPluginDependency - 346.IBPluginDependency - 346.ImportedFromIB2 - 348.IBPluginDependency - 348.ImportedFromIB2 - 349.IBPluginDependency - 349.ImportedFromIB2 - 349.editorWindowContentRectSynchronizationRect - 350.IBPluginDependency - 350.ImportedFromIB2 - 351.IBPluginDependency - 351.ImportedFromIB2 - 354.IBPluginDependency - 354.ImportedFromIB2 - 371.IBEditorWindowLastContentRect - 371.IBPluginDependency - 371.IBWindowTemplateEditedContentRect - 371.NSWindowTemplate.visibleAtLaunch - 371.editorWindowContentRectSynchronizationRect - 371.windowTemplate.maxSize - 372.IBPluginDependency - 377.IBPluginDependency - 377.IBViewIntegration.shadowBlurRadius - 377.IBViewIntegration.shadowColor - 377.IBViewIntegration.shadowOffsetHeight - 377.IBViewIntegration.shadowOffsetWidth - 5.IBPluginDependency - 5.ImportedFromIB2 - 56.IBPluginDependency - 56.ImportedFromIB2 - 57.IBEditorWindowLastContentRect - 57.IBPluginDependency - 57.ImportedFromIB2 - 57.editorWindowContentRectSynchronizationRect - 58.IBPluginDependency - 58.ImportedFromIB2 - 72.IBPluginDependency - 72.ImportedFromIB2 - 73.IBPluginDependency - 73.ImportedFromIB2 - 74.IBPluginDependency - 74.ImportedFromIB2 - 75.IBPluginDependency - 75.ImportedFromIB2 - 77.IBPluginDependency - 77.ImportedFromIB2 - 78.IBPluginDependency - 78.ImportedFromIB2 - 79.IBPluginDependency - 79.ImportedFromIB2 - 80.IBPluginDependency - 80.ImportedFromIB2 - 81.IBEditorWindowLastContentRect - 81.IBPluginDependency - 81.ImportedFromIB2 - 81.editorWindowContentRectSynchronizationRect - 82.IBPluginDependency - 82.ImportedFromIB2 - 83.IBPluginDependency - 83.ImportedFromIB2 - 92.IBPluginDependency - 92.ImportedFromIB2 - - - YES - com.apple.InterfaceBuilder.CocoaPlugin - - {{394, 713}, {191, 23}} - com.apple.InterfaceBuilder.CocoaPlugin - - {{596, 852}, {216, 23}} - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - {{522, 812}, {146, 23}} - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - {{436, 809}, {64, 6}} - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - {{608, 612}, {275, 83}} - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - {{163, 493}, {240, 243}} - com.apple.InterfaceBuilder.CocoaPlugin - - {{187, 434}, {243, 243}} - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - {{608, 612}, {167, 43}} - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - {{608, 612}, {241, 103}} - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - {{323, 663}, {194, 73}} - com.apple.InterfaceBuilder.CocoaPlugin - - {{525, 802}, {197, 73}} - {{0, 736}, {455, 20}} - com.apple.InterfaceBuilder.CocoaPlugin - - {74, 862} - {{6, 978}, {478, 20}} - com.apple.InterfaceBuilder.CocoaPlugin - {{273, 693}, {231, 43}} - com.apple.InterfaceBuilder.CocoaPlugin - {{475, 832}, {234, 43}} - com.apple.InterfaceBuilder.CocoaPlugin - com.apple.InterfaceBuilder.CocoaPlugin - com.apple.InterfaceBuilder.CocoaPlugin - {{207, 693}, {173, 43}} - com.apple.InterfaceBuilder.CocoaPlugin - {{231, 634}, {176, 43}} - com.apple.InterfaceBuilder.CocoaPlugin - com.apple.InterfaceBuilder.CocoaPlugin - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - {{608, 612}, {215, 63}} - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - {{314, 134}, {512, 384}} - com.apple.InterfaceBuilder.CocoaPlugin - {{314, 134}, {512, 384}} - - {{33, 99}, {480, 360}} - {3.40282e+38, 3.40282e+38} - com.apple.InterfaceBuilder.CocoaPlugin - com.apple.InterfaceBuilder.CocoaPlugin - - - 3 - MAA - - - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - {{12, 553}, {220, 183}} - com.apple.InterfaceBuilder.CocoaPlugin - - {{23, 794}, {245, 183}} - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - {{121, 533}, {196, 203}} - com.apple.InterfaceBuilder.CocoaPlugin - - {{145, 474}, {199, 203}} - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - com.apple.InterfaceBuilder.CocoaPlugin - - - - - YES - - YES - - - YES - - - - - YES - - YES - - - YES - - - - 377 - - - - YES - - DispatchLifeGLView - NSOpenGLView - - IBProjectSource - DispatchLifeGLView.h - - - - - YES - - NSApplication - NSResponder - - IBFrameworkSource - AppKit.framework/Headers/NSApplication.h - - - - NSApplication - - IBFrameworkSource - AppKit.framework/Headers/NSApplicationScripting.h - - - - NSApplication - - IBFrameworkSource - AppKit.framework/Headers/NSColorPanel.h - - - - NSApplication - - IBFrameworkSource - AppKit.framework/Headers/NSHelpManager.h - - - - NSApplication - - IBFrameworkSource - AppKit.framework/Headers/NSPageLayout.h - - - - NSApplication - - IBFrameworkSource - AppKit.framework/Headers/NSUserInterfaceItemSearching.h - - - - NSBrowser - NSControl - - IBFrameworkSource - AppKit.framework/Headers/NSBrowser.h - - - - NSControl - NSView - - IBFrameworkSource - AppKit.framework/Headers/NSControl.h - - - - NSDocument - NSObject - - YES - - YES - printDocument: - revertDocumentToSaved: - runPageLayout: - saveDocument: - saveDocumentAs: - saveDocumentTo: - - - YES - id - id - id - id - id - id - - - - IBFrameworkSource - AppKit.framework/Headers/NSDocument.h - - - - NSDocument - - IBFrameworkSource - AppKit.framework/Headers/NSDocumentScripting.h - - - - NSDocumentController - NSObject - - YES - - YES - clearRecentDocuments: - newDocument: - openDocument: - saveAllDocuments: - - - YES - id - id - id - id - - - - IBFrameworkSource - AppKit.framework/Headers/NSDocumentController.h - - - - NSFormatter - NSObject - - IBFrameworkSource - Foundation.framework/Headers/NSFormatter.h - - - - NSMatrix - NSControl - - IBFrameworkSource - AppKit.framework/Headers/NSMatrix.h - - - - NSMenu - NSObject - - IBFrameworkSource - AppKit.framework/Headers/NSMenu.h - - - - NSMenuItem - NSObject - - IBFrameworkSource - AppKit.framework/Headers/NSMenuItem.h - - - - NSMovieView - NSView - - IBFrameworkSource - AppKit.framework/Headers/NSMovieView.h - - - - NSObject - - IBFrameworkSource - AppKit.framework/Headers/NSAccessibility.h - - - - NSObject - - - - NSObject - - - - NSObject - - - - NSObject - - - - NSObject - - IBFrameworkSource - AppKit.framework/Headers/NSDictionaryController.h - - - - NSObject - - IBFrameworkSource - AppKit.framework/Headers/NSDragging.h - - - - NSObject - - IBFrameworkSource - AppKit.framework/Headers/NSFontManager.h - - - - NSObject - - IBFrameworkSource - AppKit.framework/Headers/NSFontPanel.h - - - - NSObject - - IBFrameworkSource - AppKit.framework/Headers/NSKeyValueBinding.h - - - - NSObject - - - - NSObject - - IBFrameworkSource - AppKit.framework/Headers/NSNibLoading.h - - - - NSObject - - IBFrameworkSource - AppKit.framework/Headers/NSOutlineView.h - - - - NSObject - - IBFrameworkSource - AppKit.framework/Headers/NSPasteboard.h - - - - NSObject - - IBFrameworkSource - AppKit.framework/Headers/NSSavePanel.h - - - - NSObject - - IBFrameworkSource - AppKit.framework/Headers/NSTableView.h - - - - NSObject - - IBFrameworkSource - AppKit.framework/Headers/NSToolbarItem.h - - - - NSObject - - IBFrameworkSource - AppKit.framework/Headers/NSView.h - - - - NSObject - - IBFrameworkSource - Foundation.framework/Headers/NSArchiver.h - - - - NSObject - - IBFrameworkSource - Foundation.framework/Headers/NSClassDescription.h - - - - NSObject - - IBFrameworkSource - Foundation.framework/Headers/NSError.h - - - - NSObject - - IBFrameworkSource - Foundation.framework/Headers/NSFileManager.h - - - - NSObject - - IBFrameworkSource - Foundation.framework/Headers/NSKeyValueCoding.h - - - - NSObject - - IBFrameworkSource - Foundation.framework/Headers/NSKeyValueObserving.h - - - - NSObject - - IBFrameworkSource - Foundation.framework/Headers/NSKeyedArchiver.h - - - - NSObject - - IBFrameworkSource - Foundation.framework/Headers/NSObject.h - - - - NSObject - - IBFrameworkSource - Foundation.framework/Headers/NSObjectScripting.h - - - - NSObject - - IBFrameworkSource - Foundation.framework/Headers/NSPortCoder.h - - - - NSObject - - IBFrameworkSource - Foundation.framework/Headers/NSRunLoop.h - - - - NSObject - - IBFrameworkSource - Foundation.framework/Headers/NSScriptClassDescription.h - - - - NSObject - - IBFrameworkSource - Foundation.framework/Headers/NSScriptKeyValueCoding.h - - - - NSObject - - IBFrameworkSource - Foundation.framework/Headers/NSScriptObjectSpecifiers.h - - - - NSObject - - IBFrameworkSource - Foundation.framework/Headers/NSScriptWhoseTests.h - - - - NSObject - - IBFrameworkSource - Foundation.framework/Headers/NSThread.h - - - - NSObject - - IBFrameworkSource - Foundation.framework/Headers/NSURL.h - - - - NSObject - - IBFrameworkSource - Foundation.framework/Headers/NSURLConnection.h - - - - NSObject - - IBFrameworkSource - Foundation.framework/Headers/NSURLDownload.h - - - - NSOpenGLView - NSView - - IBFrameworkSource - AppKit.framework/Headers/NSOpenGLView.h - - - - NSResponder - - IBFrameworkSource - AppKit.framework/Headers/NSInterfaceStyle.h - - - - NSResponder - NSObject - - IBFrameworkSource - AppKit.framework/Headers/NSResponder.h - - - - NSTableView - NSControl - - - - NSText - NSView - - IBFrameworkSource - AppKit.framework/Headers/NSText.h - - - - NSView - - IBFrameworkSource - AppKit.framework/Headers/NSClipView.h - - - - NSView - - - - NSView - - IBFrameworkSource - AppKit.framework/Headers/NSRulerView.h - - - - NSView - NSResponder - - - - NSWindow - - IBFrameworkSource - AppKit.framework/Headers/NSDrawer.h - - - - NSWindow - NSResponder - - IBFrameworkSource - AppKit.framework/Headers/NSWindow.h - - - - NSWindow - - IBFrameworkSource - AppKit.framework/Headers/NSWindowScripting.h - - - - - 0 - ../DispatchLifeGL.xcodeproj - 3 - - diff --git a/examples/DispatchLife/English.lproj/MainMenu.nib/keyedobjects.nib b/examples/DispatchLife/English.lproj/MainMenu.nib/keyedobjects.nib deleted file mode 100644 index 05cdecfbf52574b74d6f15bb75a4b6cfa9159b00..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 19575 zcmb7r2YgfI*Z=d}o7{Vwj@+#7CTY`|Cf!Yw6qLPJX(?sajzC+w>;THVSN28(1OZvf z5(E(tkRKvj5Rkn$$QBh*hQR;aqzx(U@BP1T{b24q-*e7$p0l3k=9U*1R#wHuokAE9 zh(sbJW@?yW%|1Hu@FaN57$e&@=QLy}$@F*b{qUDOTV>ti{1N1nY1Fj>GXd0Vm=V+zdCz zEpSWR7I(+#I0N^Kqh!}-{T3vmUm#8tQ&*Wi!v7(5n_!;|nVJQsh8=i&Ky5&jad zz$@`rcoW`+x8v{e5BMPd2_M3T@iBZ3pT`&QMSK%ZL@kH^gRQ3-KMXmDoY-BlZ&qh=at> z#A)ITah5noTq150zY=$d2gF~*Q{r#p1xb=L$&hZOg!CePNMF*A3?joxJsC|JNFy0f zTFF$h3E7NnPj)7|kX^}ivM>`!Ksd1MJ$N|ur3k!DrzHYD{3d|D(WrjBkC*46_toeMP;HYQMIT>G)y#BG($8~ z^s#8MXpQJw(I(Mm(Js+p(a)kIq7$OiqBElFqKBeKqGzJ#q8DODtPm^3DzTr~UmPZm z6Q_!sh?|OAh&zfqiF=B(#J$A*#f9P`@gQ-9c%*ogc#?Rsc#3$sc!v06@m;DJHIJH4 zeMT*y7E+6-&#A@K5^5>6j9N~8L48TBpjJ|=sMXXO>MLq3wT}9lT2F1DzM(c!-%^{X z&D0j^J8CPnjoMCqPwk-oL+zw~pmtF|QoE@=)Lv>IwVygb9i)Du4pE1xpQ$6%QR*1= z3w4}2L7k*dQKzXh)LH5rb)LFFU8F8im#Hh%Rq7gbow`BYq;65Ssb8r()LrTxb)R}b zJ)|B{zf(`>3i=QFPx=Y{7yXp}oBoG>Mn9)t&@btK=~wh?`VE5^%n%I8h!`A#Iwq6}W5SsT zCX$I_^h`8kV2n%*6U)Rg@k|1f$RshzjEPBM%#4MxGO0`xrYZ9t^FGszY0k7@S~9Je z)=V3wEz^!^&valqGM$*tOc$mr(~U`Ex-;oa2GfJdWO_1LOfRN4(}(HH^ke!n*~|w_ z4wK8|G5L&*8Ndu=3YbEsh#ABbGbKzpQ^^eO5n5VZTzr+4BL=x4335js$P;-XDPoZf z$&msnkqUVuALNVtkUt7QfhY*6kp^i|FbY9B6pF%7IEp}#C<^IObe}HiU2;lnl^tz0 zmHoP;w=B-7taO~goi-J4Ye-qeplsH!OM3IXJX>XDVQyh@VO34n!u-n2oZ@O*W$!NO z!ihj2o0YIJ;n zhE@UfY?keko>>Us#kQV>rTJw;1ZM4wJ8^1ph4=e9SaRIT49ydSvh!9ep5sPCp=y~c ztElAhdpw@QiFv|@$Di`JDTlZKp77=f86M72Jbs`1mB;gV!k5Q$c|vspG;TyOC>FG4 z2N8JVL>vihHZO-Dk(VRLd;%q)M3jV*vm8u;_8bSL@c1&6j7J4)Cy~rT+YrF* znbD>=XJ92LQ@CKqVWpfe=f+7mh9x*3&Ykm+R}Dpo^W#u9>pOgSqB+T+O-wS6G-!t> z$0xwCIcem`zO4!?%X6yo3OW@Iuz@liyk)cg!xLkY;u6dWw&+ANJp1-!IVcz9p?qXRIiTwF>fFJ=l*6C|`|NJ3EH5j~w^abU>DA@73gI?j4X%^H zK4O9%oeXeZoF}J}VZ?cJO3u?Uk_D(}DJtZooZ>71*RhZdc2x|+l!nSsIj~Z}v786z zA(Oq&$$^Nh_giVRS-GpYZ956dbC}#{pTwRp|Q?GeNMNUmmuxR1e1=#ThhlVb9AY`+u;ltw$+Bjh_;uFkpn37^1 zIr4whyMxGBG;SFhhfMYvUu$%>lJer5Dx2eh325SSG!ad*%Tdc$x~-}Q=-yW4P~;R; zw4RMXQ_(au9nC;9(Z^^O`UK5JbI@G$DVm4oqtDO+v=A*qpQFWS30jJlq2=fc^d(w> zR-#pCHCltdLTk}F^fg+KHlT0NMlO)kbFo|!XXW1KT5;{UE?hd7#r5ZMxq)0U_aRr! z)o`P@kGP54G;S6*k6XwsS5RcFM`M6@z`GmsMBf*|K*o=TciqS!rQjr?NczR0c2D`orqN!JW(Ug{cex_Nfa~t7~cd67a=f$aT+v zJ#;P{YRk7@=hX7iZ{EC-Lp(v8h70DjTo@N|3V1jS_&=i~=qNgdegR~`!-3fd479JZ zm4M4k;IWB&&f_F7#1<7W`L)f9i?vl{+BOw1+q<1ar+|bJoknNSS@3gUaCTwsu2RQ^ zav{K`oe8@e5eT;|u;mRZ$*CCR82gLp(lT_Z);JtCTt}pi_!WUT7cNkL+?jBacge@B0yKVcaE5{9v!&JCb*qoDJw&V{9gCEzvEO0ro6 zxRn8A6(z#8oT};yxNJRzUe+3s4!r`nH(&>?Y?W1orS@3E5nLj`$DOrXk<*4S#snto zEQpJ@4>y<4V5G4dmY`$UgU9YX4iJXb6Na^UZdrAe*44{ovq5DQ;QZ`b10|Q{=Tzj| zAM2J=YAd$ZDg?7=E0%K>;3&C)La-96us8NW`>~&0Ato0iOaa!+f$X{)ll>JkVoPUKKP4(HwjR!b`wr7rP5{tF06+&|j%SpW6=xKdb9HKz!(46yU~L63 zPTQ{=H~=lU%j)u&n3%L~{VS?Vg-e}sYG8Doa%BQ=7S|boJ2WO&AKVxB1K8{aa&-i# zP6E^bNH8i;Au2^xFg)d%<=_ zRzcE=9t>TZLU zM3vyC1uKM9+q=SkrB23-GW&HW)+)eS!wmqeyoRE##q03bc)b%Z-$hhg9jG&!;LUgo zh`W`?!8{HXB-ZtAS!~Ox&Z@a>LTRnf4R?wpWt~tg18BmUL<$zMs7|L;c0-pqw(+yBc z0i~=KC9S%!zSm6quy>WMqC{}jZ=o&$)D^A@peh=Jx{hz)n+*i=1q-dL6R3|}oUT;` z&~k!-6a=xKxRfAo^#>seKjMZ0+~CIGe#d`cAw=0TjoLWj(BF`{fy_z=LllBMU{#k2 zXmH#p)(gP;mm3LK!y00}#%~BjU?7p?hj!NW=I|7{*}UM-I)_75OCDo z1i%^70Ef^L!9)n5L;Hy^yP=GA5pf*gjCXMXL==b^&E4aka}P3FmX+6}<@LyeA16{A zASG~90clcWNXdkWNC6~E1Ek4-G(|w_oo*|JbVV4tw4C0R_7gaEA~XYp7Tm{xFugH^ z)r@pKw4`5JRxVSJ9kQy58wm11pq$3A-I+(B#MYZ zPAhO7ozDQ=?kY1|ftsppJ@f59PQsNyxSCr6gcmhNcqmar3xMd<_8W z1ppSju00(jHi9v1;vR!B{9%JOhag@)h&KS*OSS1*8|WC>1#E6_JURK<2K;=_Z3cce zHWYIev6J|L*ah6|wu|Y?#kau4CV>kGARVep%I(3^K8c{mlU4*Nu@JFr6^>a3*V>oX zHm{mj*m9isKLP$>Zad(A*AQPz93_qszX1M;dTJIDpsj$v&93I|Hc)f#9u@H8Bzqpn zUgUNH*&PkZW)N41tHd>@sk_dz{{Y$AtRSHNw9@u0C@i*VGiu6Vj%Bmzsv$mD(N@xv`x$sR)CdnGOUg*OQ@%PLgpBfVoqT1` z$ZZKLJi~6vl>XzYk6?gy@kWD+mX>A3Wfhfa zEg>;`Tk_d2)^i_Z3@{PLodG6JHDH3sAQQsZO_80 z0;iLQY{M}swRhh&JhCY;@IH4D7&zC6qamA+Lu;BHth%%PD6pnM|S6~0Y8@;=cfmmNkZ%+ z?XeGasmIS1;OD9^GJPG$Y1L&_w#qb^hQSWp=GD+H25ZQ(-+}fdv;eBAg?6)(fe(Oz zT<$h7aHDYsY~%oPATUrU7zSDFWI!;Do4~*=fdQ86<6xmvVdohMgB;r%dULb6quig|T^>j9(6_k3#c^vnHIMzdv0OekhR4xdDvuQ$&0}va zo}13&K<*RnHIJ>_B3CdAP+)ttZf>SKY_LnlRrZa8dW#z$dk||084>BNi{Sb z9dVJn>oN<*LjGAB4Y>Qxf{i>)o`KnLj>im-Ey9d&%V?cnSk=GML8~^vItKm{Dq2U( zAr6z9h!%vJs3r!$P;P_4Tn@v!9n|q1h&G)Y!z}{QCUK8IZ7&235}cw|4Cs1-8015t z_enk?wB%#*cc@1WfT7wOXXIbdyKG+xC9=?+&WHY2`^t1%c}_(RbmBPIEKUo7E!a&k zVz>WB`vv*3KJEVxf%-Zov?8&Hf*fXJosLDG^$4tk6s1m~zh?+3Nn=VA$xzXk#FsEb zR*_YZoU8>K{0r=HBd2I&;u5Pw{-OX;po`~lO;euKd5&HJoK{$?Xq-r>C`=UYGW+X@ z2($lb-R$pYC(;d?1BDRr8z&SaiWSA7{h|cHjfj%#rEdQKp=WkCLW2YEQ7Lqwgdzm| zlonc>T7i6;&E9quSwz-lA}cc0HnEL9dS3M@qL*S@o zg2$+zt9Pz!6uciQS`IF(JNWB!FxPMVci?=H4ZntZxK8@wdcfcw$hXL%OpxA|SI{VA zIVxHPe)L~(O~W`vhR|8E&tTDDSdjvk*o#y2YTh}guvF;EIPMHbMJvfQIt_A;F0FHUE;K)my?A7-(7>kyQmc8Q(T>D2Z z&ZI$p123-?trL9>lVF2A$#Qv-$1DepE}1Y1itVjY8yW!lUIqLsg%w5{5?YvsMjaH; z{Z6#?f5%j#(*+3Z7VRM?ikz!QqC>7-9f)H*R=(A`Dl1)B$%G~UcQoz{2BLkUW9XRZ zIK0MVUm+OyIE>>YDq6xRpuEf$7Udj71k?F%f}Inc|33-VV7P9IZvF2F^aYND{*=vy zo%$0RZ1b_`cj&4IIc@WwdbVj2Rvw%)S`D7O*!GTw99nn@Hi?H~Ib;_ba5y&M0=$si zjoUy^J%fX=4Z%B%OND6PmCJ@84l%uh(;(~fgq326*d4FMo9lQI%bmS_p`OTx2I^a$ zLi%)tWe}~SZQ2-XVsBwyuYr^V2FV*#@=k}bsVu!4GEkit6G`goj}=~!ZK z6s{P;AbLdHCx1lSAcfrp$tL8gb-48{1ML=9iL2ikSl5!?#WMPX#W`RrT`aA^IK$=} zJQ;uDlw)E&IeH6ABu=H7-w~h>K)elD_N_VMX`G11sgNVOG^p*_hIl3#xlGvTV4t_* zS&n+81}lkGumcda4xeL-!HTVe(_r{ZJ;H(C65iQa|o~?xh zcP2aqp%JLOgkDDm)Hhm`iZOl-HA)&#se$J*$SF|$2(=9s=)<29p4AHP2q$p#8s1b2 z6oF=QK!am)yYLiNLRMdcCv$h9f>9O$NYskDIGL1 z?dVb!g8rc{guG!H?6A?fR)$bNX4Te)P!~c+ZJDz!-1kmh2s(zkP*9(vFcci}+tN@N ziFb;_u7blq2D!bXIvfTCp->-w=co@iLa|rjC=0n4!f=@&S2`_Tq=*sr)<;!Y9LiW6;egiAgY)up-QPT zs+{_es-P;VDyo_qObwxiQZ>{tYB)858cB_!MpGQcQy)=dsIk;IYCJW8nn+EeCR0$AfrW%;ORsm-4ub$K^c!kjE7~uHZJRZhl(EJD7~p^@JnT7@=f6>4Dz=l{^&M^V{qh~Qqe z!sZ-pWaIT#R9XVG+U$=-HdY=15g!*KE?vN=K90A9U=@{B=WAhyh|A-S*c;WyS&|zQ z-=Sx3rP;elfTVVLwc$RKs4`aeU)11i`U^s{E>wipge!bfEz|bv^&hp%QU8+A;sb)- zE(G6(`l$4HV|<;skuC`ryrFb`qc;DA%PL3E(?+^3>BDuY{qe}UOc|~y99J5B&M};@ zx>h#=Lfs7SxXLZ$q;SdKg^&9Ee7Nc7a|lXE(vQgu2z>^esJ}Ahzo?b=d>} zcU-4c!(DmsK%fi$yUlI5?P@6XT0n5mzE7jp!~7Tj;p;-ZPS1jx--N@sR9Xsj=UuMa=Wp#g3cTQ|dAri;F<5J|j#=wE zpr&3_O8*?>$yPU!_O*7R>;Uwc~D6VeH#Y-oo0S~UaaaD_b4-&A`aIBmL z;JR2CWl#2~-^l$-oGHj3t|xy9`N#-+Li`d6<%zIi_e-=9(!V$;oR<@GpvN>GnxPZP zJ!m;3b4p=zE-VhbPYgicklSGc?;~jG4<#4DR^8obDkN;4Lwd66(oRaG?N^P z3d!A&rZ0m6JQqKLE_8D=4-&*_P}wX&^U1xikT4t)$IsA0G7vTePeU7s?_ulkEa+Kl zp$Vuap2K$FH&B3Q5#3QLT7`F_rO?U#7Pbt3j6Q)yi|Md!_)~HiS_joqcanodb24l! zUW?X4D|akf0j2*HXaHbHGdJNiXbx_JW&>5W1T@n)OQBasI<1&Q2@d)JDfslY7f)sofY)B4* zbagaz*R7CdN`-tAlK*&EJGcRh43i)^&xQW`YoVr_$U(A`A}mhGq5oG5JDHEdUS>V) zRNe?Xo?AhCek^oAk3rS>0~ZH{K?mrF(y*_&0CqY*64KCxuqC>^upDBA{mzh6MZi|) zUEF0ziiPC0H*AXD0~?>$3U&4h=yp^L_;PE#+-pJ!`dAy0on|Zv2$53Ew zzpY!+yk6-fmUmm~W@oOHx;R(bOm?z+iK$0g!JR#-@iYI8EV0hRrMo4(VohLka z!jmVwctXk(EKkUILe3Kko=}46v6t8&8IEMTvSl(nnEh3TZR~j&?#+&1&#}kYbao7T zn*B(IBiILQJ2s8I!(L$z$Z&u5kPPRuC)sY?ut^u_xH+Y)|$udxX6x!v-0SVt;3?>`1l?`;!b;vQOFZ>@D^T`;fgM z!$oW++g<^i`Gvn%>>n~*EW^<N$qhz?6?JUEC*oo`~87{DUa;DVY+;tqo(k_^#W7aZNbtmwN6ElPvx{MiW|8}H( z3&*ESt#YbzBvOT}+qKm}PHdnLyuCne`3mnByv*CjV4 zwapBom&b0810Fwl9QHWkam?em#|@9$9(O(NdpzxwSJ(E2% zJ+nOfdFFcNdk*j{@GSBy_N?(7;W^5a_nhiE-E*PmYR|7c*Lkk@{KfNx=ULB7o>x4t zdEW56<@u}UL(ktl|Mhb7a`*D|l6u8?C3vNHHSv1StC?2|uU1}dygGVy_UhwR;8o;R z>{aUZiPs{p#a>@{t@irLYn|76uW!7*_1f&U$7`S0DX;5ZH@$9q-H|dR-xxYM6u9j=%A@WeUS#FiLmUowD$TQ_x@*4SYIVT?{pCF$kpCX?opCMl$ z-z5K0zDK@Ken5U#eqa7r{+Il3`7`+o`M>hl3R6SE6yk`DlRLoDy}PTDsC(OQj$tmDOW0$ z-pUlEMfskxrLwiMt+Ktcqq4KIx3aIYzw!g6O*v3mq8z3ip&X^;luMP%m8+ENl zD8E&1R(_}4t=y|Tro5!QqP(WOp(0gcl|&^~$y5rJO68;SQw6Css!&zDDp8fJN>OF0 zdaJTk`Kke`0#%W!SXHVTt{SNtt>RTPRkKt}RbQ(%s5YuLsZOiTsxGOnt8S`ptL~`o zsUE2QRK4(~z1_Usy*<5SyyLu+y{+C&yx;R~=H0@(m3Ig4PTuL>A9&|_=X($Ep5Q&n zd%E{0-gCS^^`7s&zw~cQ*-$LI(zU98vzC(O#e24pv z^d0Rx)_1(`RNwi&3w#&(F81B!yW97G@6WzReSh&i;d{#WjPGB5D!&lFP`_}$NWb=e z9sRob_3-QI*UPVuUq8QWzkI(B{YLq5ejoXb_50FqrQce=Z~VUX+wAw9-!{MR{SNva z_B-PDi{E9xtA0=Y34f74<`A7T5`6v6Q_*?u_{hRu~??1qQqW>KKPyOfnFYw>t zztew@|3Uvl{y+O4_5a2Hg#T6l8~(Ta@AyCQe;VK!;2#hepbpRmG!JMQ&@P~JK-Yk@ zfb@VK0X+k%1EvQo2v`)bIACeO{(yr4M*>a+oC-J-a4z6Nz@>nD0S^Km1^gaJ2C4(2 z1C4>Pf$@Qvfmwn50&@fN0|x{a1QrDr2i61*4_q9$Ht_4f4S^d2&jy|kyb^db@OI#x zzgMW}>elME>h|hR>aOZEb-KETx~ICAy05yw`U7>I+NK_;E>sUv zm#E9s73wPWVD(V-F!c!aX!S?xaq5ZcDe7tJnd)uoJ?fv;N7X0Q=hPR}m(*9(*VH%E zx75F?|53kCzf!-^5E@$Jth?HFq`lH4invX&!6-&^*!nr47<*w4vH4 zZM4>?jn&3$6Sc|O6m1i2M{R~STbrZJ)7rEHwT0S2+L_u}+E29$wV!L3XqRcf(5}#K z(4N+w)n3wG*WT3L*51+H(>~BX4^{_jgTsRL!G_?N;JDy~;G|$vaM$3p;NHR6!2^P4 z1b-YnH+VttqTt2BOM{mOe;K?ocy;iu;A6q(g0BbP489$FC-`3QgAiqicSt~pHY6k@ zG$cGEGDII@2#F188qz7GM@Zk0{vjWP7obz?w7T}Xj=FBT z9=e{o-nzcJ{<;rzxjLJ!LdWYS>z3-4>sIJi>(=Vl>o)2(>9**$>b}?gNB4v7uI|3> zvFGXMChr|Goj}~FNWR{8Ozp&h}{ICIG1!09@gThL}O2a-4n;-Uh*z&M1!&Zi^4qFqpHtg%L^dd*pslo!=8n`2>UnebvO>E!d2nw@Z|86a7%cT@b|-;hqnxG9o{y)LwKj~ zF5%t6Yr=*zd5v?OSM0AZvi%5^?5s?{@712AQPsG@W@exxZW=714m=p17#QcZ_ z5sM<0M68V167fUC-iQMcKSdmlco^|p#FL0;5icWNMZAf`kz}M-WJ;tZ^1aBGk*y=! zMRthn6xk)RTV#44cSRnE zJQH~}@^0k)$cK@SBmane5)~Mwj?zU%M(LvrQ87`mQSniUQAtrPql%+Sqbj3@Mh%M^ z88teJj~Wv-E^1=br%}tJ)<%6D^-a_dQ3s=rM4i#|`Z4-(`U(2U`l6IGT(W zN7K=6(eBZn(b8yHv?5v+?Gx=69S|K9t%(kf)lhp(ilJj9F`hAOj3UN6#xEu?MiUbf6BZL06Ahnf#>XVZq{LWb zn#MGXX&KWdrhQDOn65G1V|v77#q^2kACnW4AG084SIq91YcVfksaR=jXlz_;huA@} zGh>&>u8;jb_Gs+&*cY*{;}miFxE664aYb<>;wHu|h}#->D(+g`^LUSVU3^%4RD4Q& z*Z8#fnei**_rza_f095Y_$7oTv`ENJn2@kEVRypygjb2$#MH#LiJcM$Ck{=VlDIf= zY2uE=ONsXr|4u?lq9nhh@TA0~c1amYg-K(RCL~QtT9ULZX+zSXq%%q9lAa}pBu6J3 zl4Fz2$(H0M$*oPzOf5{UOl?fsjn&9lxxa24Kx**icO`a za#Mw=$~4$C)HKXA!Zg~%o5q;NnI@Pfo2Htkn`WA3nP!{jn&z24Gc7cIZdzhmX8OXk z!nDftm1&)6gK48_lWB`-t7*GwhiRv2muZh_pXq?{5}p#7qE9iT#HPfjB&H;%q@-96VWzvn`)mKC>*cEU_%Ntgx)MthKDSY_x2)Y_)uE*=hOFve$CJa>(+t<*4Pj<)r1b z<(%b$<+A0f<%Z>!9L)tH?@Q-K_3bPpi}_ zvns6KR$r^XHPEWD23vL3aBHMhZ#7tBtZ~)^YqB-PYPB}8zGrQ2ZE0<7ZD;LZ?PTq0 zO|zz3Gp$+HKGuHLY-_I7W-YK5S&OY@)(@?f)@thzYmIfdb(EF2jdd+&ndfR%(de8dM`qcWj^;s%TB~rzyUa8Vld1^qaHZ?XiDYZ>% W$JAcP{znjwUuKl!x9 - - - - CFBundleDevelopmentRegion - English - CFBundleExecutable - ${EXECUTABLE_NAME} - CFBundleIconFile - - CFBundleIdentifier - com.apple.example.DispatchLife - CFBundleInfoDictionaryVersion - 6.0 - CFBundleName - ${PRODUCT_NAME} - CFBundlePackageType - APPL - CFBundleSignature - ???? - CFBundleVersion - 1.1 - NSMainNibFile - MainMenu - NSPrincipalClass - NSApplication - - diff --git a/examples/DispatchLife/ReadMe.txt b/examples/DispatchLife/ReadMe.txt deleted file mode 100644 index 2beea13e8..000000000 --- a/examples/DispatchLife/ReadMe.txt +++ /dev/null @@ -1,37 +0,0 @@ -### DispatchLife ### - -=========================================================================== -DESCRIPTION: - -The classic game of Life showing use of dispatch queues as lightweight threads (each cell is a queue), and an example of how to avoid overloading a slow queue (OpenGL or curses screen updates) with many requests (cell updates) by using a timer to drive the screen updates and allowing the cells to update as fast as they can. - -=========================================================================== -BUILD REQUIREMENTS: - -Mac OS X version 10.6 Snow Leopard - -=========================================================================== -RUNTIME REQUIREMENTS: - -Mac OS X version 10.6 Snow Leopard - -=========================================================================== -PACKAGING LIST: - -DispatchLife.c - Simulation engine using GCD. -DispatchLifeGLView.h - OpenGL view for visualization. -DispatchLifeGLView.m - OpenGL view for visualization. - -=========================================================================== -CHANGES FROM PREVIOUS VERSIONS: - -Version 1.2 -- Updated to use current GCD source API. -Version 1.1 -- Updated to use current GCD API. -- Added OpenGL view for visualization. -Version 1.0 -- First version (WWDC 2008). - -=========================================================================== -Copyright (C) 2008-2009 Apple Inc. All rights reserved. diff --git a/examples/DispatchLife/main.m b/examples/DispatchLife/main.m deleted file mode 100644 index 59eb37af7..000000000 --- a/examples/DispatchLife/main.m +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright (c) 2009-2009 Apple Inc. All rights reserved. - * - * @APPLE_DTS_LICENSE_HEADER_START@ - * - * IMPORTANT: This Apple software is supplied to you by Apple Computer, Inc. - * ("Apple") in consideration of your agreement to the following terms, and your - * use, installation, modification or redistribution of this Apple software - * constitutes acceptance of these terms. If you do not agree with these terms, - * please do not use, install, modify or redistribute this Apple software. - * - * In consideration of your agreement to abide by the following terms, and - * subject to these terms, Apple grants you a personal, non-exclusive license, - * under Apple's copyrights in this original Apple software (the "Apple Software"), - * to use, reproduce, modify and redistribute the Apple Software, with or without - * modifications, in source and/or binary forms; provided that if you redistribute - * the Apple Software in its entirety and without modifications, you must retain - * this notice and the following text and disclaimers in all such redistributions - * of the Apple Software. Neither the name, trademarks, service marks or logos of - * Apple Computer, Inc. may be used to endorse or promote products derived from - * the Apple Software without specific prior written permission from Apple. Except - * as expressly stated in this notice, no other rights or licenses, express or - * implied, are granted by Apple herein, including but not limited to any patent - * rights that may be infringed by your derivative works or by other works in - * which the Apple Software may be incorporated. - * - * The Apple Software is provided by Apple on an "AS IS" basis. APPLE MAKES NO - * WARRANTIES, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED - * WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND OPERATION ALONE OR IN - * COMBINATION WITH YOUR PRODUCTS. - * - * IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE - * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR - * DISTRIBUTION OF THE APPLE SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF - * CONTRACT, TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF - * APPLE HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * @APPLE_DTS_LICENSE_HEADER_END@ - */ - -#import - -int main(int argc, char *argv[]) -{ - return NSApplicationMain(argc, (const char **) argv); -} diff --git a/examples/DispatchWebServer/DispatchWebServer.c b/examples/DispatchWebServer/DispatchWebServer.c deleted file mode 100644 index d839d3bde..000000000 --- a/examples/DispatchWebServer/DispatchWebServer.c +++ /dev/null @@ -1,956 +0,0 @@ -/* - * Copyright (c) 2008 Apple Inc. All rights reserved. - * - * @APPLE_DTS_LICENSE_HEADER_START@ - * - * IMPORTANT: This Apple software is supplied to you by Apple Computer, Inc. - * ("Apple") in consideration of your agreement to the following terms, and your - * use, installation, modification or redistribution of this Apple software - * constitutes acceptance of these terms. If you do not agree with these terms, - * please do not use, install, modify or redistribute this Apple software. - * - * In consideration of your agreement to abide by the following terms, and - * subject to these terms, Apple grants you a personal, non-exclusive license, - * under Apple's copyrights in this original Apple software (the "Apple Software"), - * to use, reproduce, modify and redistribute the Apple Software, with or without - * modifications, in source and/or binary forms; provided that if you redistribute - * the Apple Software in its entirety and without modifications, you must retain - * this notice and the following text and disclaimers in all such redistributions - * of the Apple Software. Neither the name, trademarks, service marks or logos of - * Apple Computer, Inc. may be used to endorse or promote products derived from - * the Apple Software without specific prior written permission from Apple. Except - * as expressly stated in this notice, no other rights or licenses, express or - * implied, are granted by Apple herein, including but not limited to any patent - * rights that may be infringed by your derivative works or by other works in - * which the Apple Software may be incorporated. - * - * The Apple Software is provided by Apple on an "AS IS" basis. APPLE MAKES NO - * WARRANTIES, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION THE IMPLIED - * WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND OPERATION ALONE OR IN - * COMBINATION WITH YOUR PRODUCTS. - * - * IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE - * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR - * DISTRIBUTION OF THE APPLE SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF - * CONTRACT, TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF - * APPLE HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * @APPLE_DTS_LICENSE_HEADER_END@ - */ - -/* A tiny web server that does as much stuff the "dispatch way" as it can, like a queue per connection... */ - -/**************************************************************************** -overview of dispatch related operations: - -main() { - have dump_reqs() called every 5 to 6 seconds, and on every SIGINFO - and SIGPIPE - - have accept_cb() called when there are new connections on our port - - have reopen_logfile_when_needed() called whenever our logfile is - renamed, deleted, or forcibly closed -} - -reopen_logfile_when_needed() { - call ourself whenever our logfile is renamed, deleted, or forcibly - closed -} - -accept_cb() { - allocate a new queue to handle network and file I/O, and timers - for a series of HTTP requests coming from a new network connection - - have read_req() called (on the new queue) when there - is network traffic for the new connection - - have req_free(new_req) called when the connection is "done" (no - pending work to be executed on the queue, an no sources left to - generate new work for the queue) -} - -req_free() { - uses dispatch_get_current_queue() and dispatch_async() to call itself - "on the right queue" -} - -read_req() { - If there is a timeout source delete_source() it - - if (we have a whole request) { - make a new dispatch source (req->fd_rd.ds) for the - content file - - have clean up fd, req->fd and req->fd_rd (if - appropriate) when the content file source is canceled - - have read_filedata called when the content file is - read to be read - - if we already have a dispatch source for "network - socket ready to be written", enable it. Otherwise - make one, and have write_filedata called when it - time to write to it. - - disable the call to read_req - } - - close the connection if something goes wrong -} - -write_filedata() { - close the connection if anything goes wrong - - if (we have written the whole HTTP document) { - timeout in a little bit, closing the connection if we - haven't received a new command - - enable the call to read_req - } - - if (we have written all the buffered data) { - disable the call to write_filedata() - } -} - -read_filedata() { - if (nothing left to read) { - delete the content file dispatch source - } else { - enable the call to write_filedata() - } -} - -qprintf, qfprintf, qflush - schedule stdio calls on a single queue - -disable_source, enable_source - implements a binary enable/disable on top of dispatch's - counted suspend/resume - -delete_source - cancels the source (this example program uses source - cancelation to schedule any source cleanup it needs, - so "delete" needs a cancel). - - ensure the source isn't suspended - - release the reference, which _should_ be the last - reference (this example program never has more - then one reference to a source) - -****************************************************************************/ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -char *DOC_BASE = NULL; -char *log_name = NULL; -FILE *logfile = NULL; -char *argv0 = "a.out"; -char *server_port = "8080"; -const int re_request_nmatch = 4; -regex_t re_first_request, re_nth_request, re_accept_deflate, re_host; - - -// qpf is the queue that we schedule our "stdio file I/O", which serves as a lock, -// and orders the output, and also gets it "out of the way" of our main line execution -dispatch_queue_t qpf; - -void qfprintf(FILE *f, const char *fmt, ...) __attribute__((format(printf, 2, 3))); - -void qfprintf(FILE *f, const char *fmt, ...) { - va_list ap; - va_start(ap, fmt); - char *str; - /* We gennerate the formatted string on the same queue (or - thread) that calls qfprintf, that way the values can change - while the fputs call is being sent to the qpf queue, or waiting - for other work to complete ont he qpf queue. */ - - vasprintf(&str, fmt, ap); - dispatch_async(qpf, ^{ fputs(str, f); free(str); }); - if ('*' == *fmt) { - dispatch_sync(qpf, ^{ fflush(f); }); - } - va_end(ap); -} - -void qfflush(FILE *f) { - dispatch_sync(qpf, ^{ fflush(f); }); -} - -void reopen_logfile_when_needed() { - // We don't want to use a fd with a lifetime managed by something else - // because we need to close it inside the cancel handler (see below) - int lf_dup = dup(fileno(logfile)); - FILE **lf = &logfile; - - // We register the vnode callback on the qpf queue since that is where - // we do all our logfile printing. (we set up to reopen the logfile - // if the "old one" has been deleted or renamed (or revoked). This - // makes it pretty safe to mv the file to a new name, delay breifly, - // then gzip it. Safer to move the file to a new name, wait for the - // "old" file to reappear, then gzip. Niftier then doing the move, - // sending a SIGHUP to the right process (somehow) and then doing - // as above. Well, maybe it'll never catch on as "the new right - /// thing", but it makes a nifty demo. - dispatch_source_t vn = dispatch_source_create(DISPATCH_SOURCE_TYPE_VNODE, lf_dup, DISPATCH_VNODE_REVOKE|DISPATCH_VNODE_RENAME|DISPATCH_VNODE_DELETE, qpf); - - dispatch_source_set_event_handler(vn, ^{ - printf("lf_dup is %d (logfile's fileno=%d), closing it\n", lf_dup, fileno(logfile)); - fprintf(logfile, "# flush n' roll!\n"); - dispatch_cancel(vn); - dispatch_release(vn); - fflush(logfile); - *lf = freopen(log_name, "a", logfile); - - // The new logfile has (or may have) a diffrent fd from the old one, so - // we have to register it again - reopen_logfile_when_needed(); - }); - - dispatch_source_set_cancel_handler(vn, ^{ close(lf_dup); }); - - dispatch_resume(vn); -} - -#define qprintf(fmt...) qfprintf(stdout, ## fmt); - -struct buffer { - // Manage a buffer, currently at sz bytes, but will realloc if needed - // The buffer has a part that we read data INTO, and a part that we - // write data OUT OF. - // - // Best use of the space would be a circular buffer (and we would - // use readv/writev and pass around iovec structs), but we use a - // simpler layout: - // data from buf to outof is wasted. From outof to into is - // "ready to write data OUT OF", from into until buf+sz is - // "ready to read data IN TO". - size_t sz; - unsigned char *buf; - unsigned char *into, *outof; -}; - -struct request_source { - // libdispatch gives suspension a counting behaviour, we want a simple on/off behaviour, so we use - // this struct to provide track suspensions - dispatch_source_t ds; - bool suspended; -}; - -// The request struct manages an actiave HTTP request/connection. It gets reused for pipelined HTTP clients. -// Every request has it's own queue where all of it's network traffic, and source file I/O as well as -// compression (when requested by the HTTP client) is done. -struct request { - struct sockaddr_in r_addr; - z_stream *deflate; - // cmd_buf holds the HTTP request - char cmd_buf[8196], *cb; - char chunk_num[13], *cnp; // Big enough for 8 digits plus \r\n\r\n\0 - bool needs_zero_chunk; - bool reuse_guard; - short status_number; - size_t chunk_bytes_remaining; - char *q_name; - int req_num; // For debugging - int files_served; // For this socket - dispatch_queue_t q; - // "sd" is the socket descriptor, where the network I/O for this request goes. "fd" is the source file (or -1) - int sd, fd; - // fd_rd is for read events from the source file (say /Users/YOU/Sites/index.html for a GET /index.html request) - // sd_rd is for read events from the network socket (we suspend it after we read an HTTP request header, and - // resume it when we complete a request) - // sd_wr is for write events to the network socket (we suspend it when we have no buffered source data to send, - // and resume it when we have data ready to send) - // timeo is the timeout event waiting for a new client request header. - struct request_source fd_rd, sd_rd, sd_wr, timeo; - uint64_t timeout_at; - struct stat sb; - - // file_b is where we read data from fd into. - // For compressed GET requests: - // - data is compressed from file_b into deflate_b - // - data is written to the network socket from deflate_b - // For uncompressed GET requests - // - data is written to the network socket from file_b - // - deflate_b is unused - struct buffer file_b, deflate_b; - - ssize_t total_written; -}; - -void req_free(struct request *req); - -void disable_source(struct request *req, struct request_source *rs) { - // we want a binary suspend state, not a counted state. Our - // suspend flag is "locked" by only being used on req->q, this - // assert makes sure we are in a valid context to write the new - // suspend value. - assert(req->q == dispatch_get_current_queue()); - if (!rs->suspended) { - rs->suspended = true; - dispatch_suspend(rs->ds); - } -} - -void enable_source(struct request *req, struct request_source *rs) { - assert(req->q == dispatch_get_current_queue()); - if (rs->suspended) { - rs->suspended = false; - dispatch_resume(rs->ds); - } -} - -void delete_source(struct request *req, struct request_source *rs) { - assert(req->q == dispatch_get_current_queue()); - if (rs->ds) { - /* sources need to be resumed before they can be deleted - (otherwise an I/O and/or cancel block might be stranded - waiting for a resume that will never come, causing - leaks) */ - - enable_source(req, rs); - dispatch_cancel(rs->ds); - dispatch_release(rs->ds); - } - rs->ds = NULL; - rs->suspended = false; -} - -size_t buf_into_sz(struct buffer *b) { - return (b->buf + b->sz) - b->into; -} - -void buf_need_into(struct buffer *b, size_t cnt) { - // resize buf so into has at least cnt bytes ready to use - size_t sz = buf_into_sz(b); - if (cnt <= sz) { - return; - } - sz = malloc_good_size(cnt - sz + b->sz); - unsigned char *old = b->buf; - // We could special case b->buf == b->into && b->into == b->outof to - // do a free & malloc rather then realloc, but after testing it happens - // only for the 1st use of the buffer, where realloc is the same cost as - // malloc anyway. - b->buf = reallocf(b->buf, sz); - assert(b->buf); - b->sz = sz; - b->into = b->buf + (b->into - old); - b->outof = b->buf + (b->outof - old); -} - -void buf_used_into(struct buffer *b, size_t used) { - b->into += used; - assert(b->into <= b->buf + b->sz); -} - -size_t buf_outof_sz(struct buffer *b) { - return b->into - b->outof; -} - -int buf_sprintf(struct buffer *b, char *fmt, ...) __attribute__((format(printf,2,3))); - -int buf_sprintf(struct buffer *b, char *fmt, ...) { - va_list ap; - va_start(ap, fmt); - size_t s = buf_into_sz(b); - int l = vsnprintf((char *)(b->into), s, fmt, ap); - if (l < s) { - buf_used_into(b, l); - } else { - // Reset ap -- vsnprintf has already used it. - va_end(ap); - va_start(ap, fmt); - buf_need_into(b, l); - s = buf_into_sz(b); - l = vsnprintf((char *)(b->into), s, fmt, ap); - assert(l <= s); - buf_used_into(b, l); - } - va_end(ap); - - return l; -} - -void buf_used_outof(struct buffer *b, size_t used) { - b->outof += used; - //assert(b->into <= b->outof); - assert(b->outof <= b->into); - if (b->into == b->outof) { - b->into = b->outof = b->buf; - } -} - -char *buf_debug_str(struct buffer *b) { - char *ret = NULL; - asprintf(&ret, "S%d i#%d o#%d", b->sz, buf_into_sz(b), buf_outof_sz(b)); - return ret; -} - -uint64_t getnanotime() { - struct timeval tv; - gettimeofday(&tv, NULL); - - return tv.tv_sec * NSEC_PER_SEC + tv.tv_usec * NSEC_PER_USEC; -} - -int n_req; -struct request **debug_req; - -void dump_reqs() { - int i = 0; - static last_reported = -1; - - // We want to see the transition into n_req == 0, but we don't need to - // keep seeing it. - if (n_req == 0 && n_req == last_reported) { - return; - } else { - last_reported = n_req; - } - - qprintf("%d actiave requests to dump\n", n_req); - uint64_t now = getnanotime(); - /* Because we iterate over the debug_req array in this queue - ("the main queue"), it has to "own" that array. All manipulation - of the array as a whole will have to be done on this queue. */ - - for(i = 0; i < n_req; i++) { - struct request *req = debug_req[i]; - qprintf("%s sources: fd_rd %p%s, sd_rd %p%s, sd_wr %p%s, timeo %p%s\n", req->q_name, req->fd_rd.ds, req->fd_rd.suspended ? " (SUSPENDED)" : "", req->sd_rd.ds, req->sd_rd.suspended ? " (SUSPENDED)" : "", req->sd_wr.ds, req->sd_wr.suspended ? " (SUSPENDED)" : "", req->timeo.ds, req->timeo.suspended ? " (SUSPENDED)" : ""); - if (req->timeout_at) { - double when = req->timeout_at - now; - when /= NSEC_PER_SEC; - if (when < 0) { - qprintf(" timeout %f seconds ago\n", -when); - } else { - qprintf(" timeout in %f seconds\n", when); - } - } else { - qprintf(" timeout_at not set\n"); - } - char *file_bd = buf_debug_str(&req->file_b), *deflate_bd = buf_debug_str(&req->deflate_b); - qprintf(" file_b %s; deflate_b %s\n cmd_buf used %ld; fd#%d; files_served %d\n", file_bd, deflate_bd, (long)(req->cb - req->cmd_buf), req->fd, req->files_served); - if (req->deflate) { - qprintf(" deflate total in: %ld ", req->deflate->total_in); - } - qprintf("%s total_written %lu, file size %lld\n", req->deflate ? "" : " ", req->total_written, req->sb.st_size); - free(file_bd); - free(deflate_bd); - } -} - -void req_free(struct request *req) { - assert(!req->reuse_guard); - if (dispatch_get_main_queue() != dispatch_get_current_queue()) { - /* dispatch_set_finalizer_f arranges to have us "invoked - asynchronously on req->q's target queue". However, - we want to manipulate the debug_req array in ways - that are unsafe anywhere except the same queue that - dump_reqs runs on (which happens to be the main queue). - So if we are running anywhere but the main queue, we - just arrange to be called there */ - - dispatch_async(dispatch_get_main_queue(), ^{ req_free(req); }); - return; - } - - req->reuse_guard = true; - *(req->cb) = '\0'; - qprintf("$$$ req_free %s; fd#%d; buf: %s\n", dispatch_queue_get_label(req->q), req->fd, req->cmd_buf); - assert(req->sd_rd.ds == NULL && req->sd_wr.ds == NULL); - close(req->sd); - assert(req->fd_rd.ds == NULL); - if (req->fd >= 0) close(req->fd); - free(req->file_b.buf); - free(req->deflate_b.buf); - free(req->q_name); - free(req->deflate); - free(req); - - int i; - bool found = false; - for(i = 0; i < n_req; i++) { - if (found) { - debug_req[i -1] = debug_req[i]; - } else { - found = (debug_req[i] == req); - } - } - debug_req = reallocf(debug_req, sizeof(struct request *) * --n_req); - assert(n_req >= 0); -} - -void close_connection(struct request *req) { - qprintf("$$$ close_connection %s, served %d files -- canceling all sources\n", dispatch_queue_get_label(req->q), req->files_served); - delete_source(req, &req->fd_rd); - delete_source(req, &req->sd_rd); - delete_source(req, &req->sd_wr); - delete_source(req, &req->timeo); -} - -// We have some "content data" (either from the file, or from -// compressing the file), and the network socket is ready for us to -// write it -void write_filedata(struct request *req, size_t avail) { - /* We always attempt to write as much data as we have. This - is safe becuase we use non-blocking I/O. It is a good idea - becuase the amount of buffer space that dispatch tells us may - be stale (more space could have opened up, or memory presure - may have caused it to go down). */ - - struct buffer *w_buf = req->deflate ? &req->deflate_b : &req->file_b; - ssize_t sz = buf_outof_sz(w_buf); - if (req->deflate) { - struct iovec iov[2]; - if (!req->chunk_bytes_remaining) { - req->chunk_bytes_remaining = sz; - req->needs_zero_chunk = sz != 0; - req->cnp = req->chunk_num; - int n = snprintf(req->chunk_num, sizeof(req->chunk_num), "\r\n%lx\r\n%s", sz, sz ? "" : "\r\n"); - assert(n <= sizeof(req->chunk_num)); - } - iov[0].iov_base = req->cnp; - iov[0].iov_len = req->cnp ? strlen(req->cnp) : 0; - iov[1].iov_base = w_buf->outof; - iov[1].iov_len = (req->chunk_bytes_remaining < sz) ? req->chunk_bytes_remaining : sz; - sz = writev(req->sd, iov, 2); - if (sz > 0) { - if (req->cnp) { - if (sz >= strlen(req->cnp)) { - req->cnp = NULL; - } else { - req->cnp += sz; - } - } - sz -= iov[0].iov_len; - sz = (sz < 0) ? 0 : sz; - req->chunk_bytes_remaining -= sz; - } - } else { - sz = write(req->sd, w_buf->outof, sz); - } - if (sz > 0) { - buf_used_outof(w_buf, sz); - } else if (sz < 0) { - int e = errno; - qprintf("write_filedata %s write error: %d %s\n", dispatch_queue_get_label(req->q), e, strerror(e)); - close_connection(req); - return; - } - - req->total_written += sz; - off_t bytes = req->total_written; - if (req->deflate) { - bytes = req->deflate->total_in - buf_outof_sz(w_buf); - if (req->deflate->total_in < buf_outof_sz(w_buf)) { - bytes = 0; - } - } - if (bytes == req->sb.st_size) { - if (req->needs_zero_chunk && req->deflate && (sz || req->cnp)) { - return; - } - - // We have transfered the file, time to write the log entry. - - // We don't deal with " in the request string, this is an example of how - // to use dispatch, not how to do C string manipulation, eh? - size_t rlen = strcspn(req->cmd_buf, "\r\n"); - char tstr[45], astr[45]; - struct tm tm; - time_t clock; - time(&clock); - strftime(tstr, sizeof(tstr), "%d/%b/%Y:%H:%M:%S +0", gmtime_r(&clock, &tm)); - addr2ascii(AF_INET, &req->r_addr.sin_addr, sizeof(struct in_addr), astr); - qfprintf(logfile, "%s - - [%s] \"%.*s\" %hd %zd\n", astr, tstr, (int)rlen, req->cmd_buf, req->status_number, req->total_written); - - int64_t t_offset = 5 * NSEC_PER_SEC + req->files_served * NSEC_PER_SEC / 10; - int64_t timeout_at = req->timeout_at = getnanotime() + t_offset; - - req->timeo.ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, req->q); - dispatch_source_set_timer(req->timeo.ds, dispatch_time(DISPATCH_TIME_NOW, t_offset), NSEC_PER_SEC, NSEC_PER_SEC); - dispatch_source_set_event_handler(req->timeo.ds, ^{ - if (req->timeout_at == timeout_at) { - qfprintf(stderr, "$$$ -- timeo fire (delta=%f) -- close connection: q=%s\n", (getnanotime() - (double)timeout_at) / NSEC_PER_SEC, dispatch_queue_get_label(req->q)); - close_connection(req); - } else { - // This happens if the timeout value has been updated, but a pending timeout event manages to race in before the cancel - } - }); - dispatch_resume(req->timeo.ds); - - req->files_served++; - qprintf("$$$ wrote whole file (%s); timeo %p, about to enable %p and close %d, total_written=%zd, this is the %d%s file served\n", dispatch_queue_get_label(req->q), req->timeo.ds, req->sd_rd.ds, req->fd, req->total_written, req->files_served, (1 == req->files_served) ? "st" : (2 == req->files_served) ? "nd" : "th"); - enable_source(req, &req->sd_rd); - if (req->fd_rd.ds) { - delete_source(req, &req->fd_rd); - } - req->cb = req->cmd_buf; - } else { - assert(bytes <= req->sb.st_size); - } - - if (0 == buf_outof_sz(w_buf)) { - // The write buffer is now empty, so we don't need to know when sd is ready for us to write to it. - disable_source(req, &req->sd_wr); - } -} - -// Our "content file" has some data ready for us to read. -void read_filedata(struct request *req, size_t avail) { - if (avail == 0) { - delete_source(req, &req->fd_rd); - return; - } - - /* We make sure we can read at least as many bytes as dispatch - says are avilable, but if our buffer is bigger we will read as - much as we have space for. We have the file opened in non-blocking - mode so this is safe. */ - - buf_need_into(&req->file_b, avail); - size_t rsz = buf_into_sz(&req->file_b); - ssize_t sz = read(req->fd, req->file_b.into, rsz); - if (sz >= 0) { - assert(req->sd_wr.ds); - size_t sz0 = buf_outof_sz(&req->file_b); - buf_used_into(&req->file_b, sz); - assert(sz == buf_outof_sz(&req->file_b) - sz0); - } else { - int e = errno; - qprintf("read_filedata %s read error: %d %s\n", dispatch_queue_get_label(req->q), e, strerror(e)); - close_connection(req); - return; - } - if (req->deflate) { - // Note:: deflateBound is "worst case", we could try with any non-zero - // buffer, and alloc more if we get Z_BUF_ERROR... - buf_need_into(&req->deflate_b, deflateBound(req->deflate, buf_outof_sz(&req->file_b))); - req->deflate->next_in = (req->file_b.outof); - size_t o_sz = buf_outof_sz(&req->file_b); - req->deflate->avail_in = o_sz; - req->deflate->next_out = req->deflate_b.into; - size_t i_sz = buf_into_sz(&req->deflate_b); - req->deflate->avail_out = i_sz; - assert(req->deflate->avail_in + req->deflate->total_in <= req->sb.st_size); - // at EOF we want to use Z_FINISH, otherwise we pass Z_NO_FLUSH so we get maximum compression - int rc = deflate(req->deflate, (req->deflate->avail_in + req->deflate->total_in >= req->sb.st_size) ? Z_FINISH : Z_NO_FLUSH); - assert(rc == Z_OK || rc == Z_STREAM_END); - buf_used_outof(&req->file_b, o_sz - req->deflate->avail_in); - buf_used_into(&req->deflate_b, i_sz - req->deflate->avail_out); - if (i_sz != req->deflate->avail_out) { - enable_source(req, &req->sd_wr); - } - } else { - enable_source(req, &req->sd_wr); - } -} - -// We are waiting to for an HTTP request (we eitther havn't gotten -// the first request, or pipelneing is on, and we finished a request), -// and there is data to read on the network socket. -void read_req(struct request *req, size_t avail) { - if (req->timeo.ds) { - delete_source(req, &req->timeo); - } - - // -1 to account for the trailing NUL - int s = (sizeof(req->cmd_buf) - (req->cb - req->cmd_buf)) -1; - if (s == 0) { - qprintf("read_req fd#%d command overflow\n", req->sd); - close_connection(req); - return; - } - int rd = read(req->sd, req->cb, s); - if (rd > 0) { - req->cb += rd; - if (req->cb > req->cmd_buf + 4) { - int i; - for(i = -4; i != 0; i++) { - char ch = *(req->cb + i); - if (ch != '\n' && ch != '\r') { - break; - } - } - if (i == 0) { - *(req->cb) = '\0'; - - assert(buf_outof_sz(&req->file_b) == 0); - assert(buf_outof_sz(&req->deflate_b) == 0); - regmatch_t pmatch[re_request_nmatch]; - regex_t *rex = req->files_served ? &re_first_request : &re_nth_request; - int rc = regexec(rex, req->cmd_buf, re_request_nmatch, pmatch, 0); - if (rc) { - char ebuf[1024]; - regerror(rc, rex, ebuf, sizeof(ebuf)); - qprintf("\n$$$ regexec error: %s, ditching request: '%s'\n", ebuf, req->cmd_buf); - close_connection(req); - return; - } else { - if (!strncmp("GET", req->cmd_buf + pmatch[1].rm_so, pmatch[1].rm_eo - pmatch[1].rm_so)) { - rc = regexec(&re_accept_deflate, req->cmd_buf, 0, NULL, 0); - assert(rc == 0 || rc == REG_NOMATCH); - // to disable deflate code: - // rc = REG_NOMATCH; - if (req->deflate) { - deflateEnd(req->deflate); - free(req->deflate); - } - req->deflate = (0 == rc) ? calloc(1, sizeof(z_stream)) : NULL; - char path_buf[4096]; - strlcpy(path_buf, DOC_BASE, sizeof(path_buf)); - // WARNING: this doesn't avoid use of .. in the path - // do get outside of DOC_ROOT, a real web server would - // really have to avoid that. - char ch = *(req->cmd_buf + pmatch[2].rm_eo); - *(req->cmd_buf + pmatch[2].rm_eo) = '\0'; - strlcat(path_buf, req->cmd_buf + pmatch[2].rm_so, sizeof(path_buf)); - *(req->cmd_buf + pmatch[2].rm_eo) = ch; - req->fd = open(path_buf, O_RDONLY|O_NONBLOCK); - qprintf("GET req for %s, path: %s, deflate: %p; fd#%d\n", dispatch_queue_get_label(req->q), path_buf, req->deflate, req->fd); - size_t n; - if (req->fd < 0) { - const char *msg = "404 Page not here

You step in the stream,
but the water has moved on.
This page is not here.
"; - req->status_number = 404; - n = buf_sprintf(&req->file_b, "HTTP/1.1 404 Not Found\r\nContent-Length: %zu\r\nExpires: now\r\nServer: %s\r\n\r\n%s", strlen(msg), argv0, msg); - req->sb.st_size = 0; - } else { - rc = fstat(req->fd, &req->sb); - assert(rc >= 0); - if (req->sb.st_mode & S_IFDIR) { - req->status_number = 301; - regmatch_t hmatch[re_request_nmatch]; - rc = regexec(&re_host, req->cmd_buf, re_request_nmatch, hmatch, 0); - assert(rc == 0 || rc == REG_NOMATCH); - if (rc == REG_NOMATCH) { - hmatch[1].rm_so = hmatch[1].rm_eo = 0; - } - n = buf_sprintf(&req->file_b, "HTTP/1.1 301 Redirect\r\nContent-Length: 0\r\nExpires: now\r\nServer: %s\r\nLocation: http://%*.0s/%*.0s/index.html\r\n\r\n", argv0, (int)(hmatch[1].rm_eo - hmatch[1].rm_so), req->cmd_buf + hmatch[1].rm_so, (int)(pmatch[2].rm_eo - pmatch[2].rm_so), req->cmd_buf + pmatch[2].rm_so); - req->sb.st_size = 0; - close(req->fd); - req->fd = -1; - } else { - req->status_number = 200; - if (req->deflate) { - n = buf_sprintf(&req->deflate_b, "HTTP/1.1 200 OK\r\nTransfer-Encoding: chunked\r\nContent-Encoding: deflate\r\nExpires: now\r\nServer: %s\r\n", argv0); - req->chunk_bytes_remaining = buf_outof_sz(&req->deflate_b); - } else { - n = buf_sprintf(req->deflate ? &req->deflate_b : &req->file_b, "HTTP/1.1 200 OK\r\nContent-Length: %lld\r\nExpires: now\r\nServer: %s\r\n\r\n", req->sb.st_size, argv0); - } - } - } - - if (req->status_number != 200) { - free(req->deflate); - req->deflate = NULL; - } - - if (req->deflate) { - rc = deflateInit(req->deflate, Z_BEST_COMPRESSION); - assert(rc == Z_OK); - } - - // Cheat: we don't count the header bytes as part of total_written - req->total_written = -buf_outof_sz(&req->file_b); - if (req->fd >= 0) { - req->fd_rd.ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, req->fd, 0, req->q); - // Cancelation is async, so we capture the fd and read sources we will want to operate on as the req struct may have moved on to a new set of values - int fd = req->fd; - dispatch_source_t fd_rd = req->fd_rd.ds; - dispatch_source_set_cancel_handler(req->fd_rd.ds, ^{ - close(fd); - if (req->fd == fd) { - req->fd = -1; - } - if (req->fd_rd.ds == fd_rd) { - req->fd_rd.ds = NULL; - } - }); - dispatch_source_set_event_handler(req->fd_rd.ds, ^{ - if (req->fd_rd.ds) { - read_filedata(req, dispatch_source_get_data(req->fd_rd.ds)); - } - }); - dispatch_resume(req->fd_rd.ds); - } else { - req->fd_rd.ds = NULL; - } - - if (req->sd_wr.ds) { - enable_source(req, &req->sd_wr); - } else { - req->sd_wr.ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_WRITE, req->sd, 0, req->q); - dispatch_source_set_event_handler(req->sd_wr.ds, ^{ write_filedata(req, dispatch_source_get_data(req->sd_wr.ds)); }); - dispatch_resume(req->sd_wr.ds); - } - disable_source(req, &req->sd_rd); - } - } - } - } - } else if (rd == 0) { - qprintf("### (%s) read_req fd#%d rd=0 (%s); %d files served\n", dispatch_queue_get_label(req->q), req->sd, (req->cb == req->cmd_buf) ? "no final request" : "incomplete request", req->files_served); - close_connection(req); - return; - } else { - int e = errno; - qprintf("reqd_req fd#%d rd=%d err=%d %s\n", req->sd, rd, e, strerror(e)); - close_connection(req); - return; - } -} - -// We have a new connection, allocate a req struct & set up a read event handler -void accept_cb(int fd) { - static int req_num = 0; - struct request *new_req = calloc(1, sizeof(struct request)); - assert(new_req); - new_req->cb = new_req->cmd_buf; - socklen_t r_len = sizeof(new_req->r_addr); - int s = accept(fd, (struct sockaddr *)&(new_req->r_addr), &r_len); - if (s < 0) { - qfprintf(stderr, "accept failure (rc=%d, errno=%d %s)\n", s, errno, strerror(errno)); - return; - } - assert(s >= 0); - new_req->sd = s; - new_req->req_num = req_num; - asprintf(&(new_req->q_name), "req#%d s#%d", req_num++, s); - qprintf("accept_cb fd#%d; made: %s\n", fd, new_req->q_name); - - // All further work for this request will happen "on" new_req->q, - // except the final tear down (see req_free()) - new_req->q = dispatch_queue_create(new_req->q_name, NULL); - dispatch_set_context(new_req->q, new_req); - dispatch_set_finalizer_f(new_req->q, (dispatch_function_t)req_free); - - debug_req = reallocf(debug_req, sizeof(struct request *) * ++n_req); - debug_req[n_req -1] = new_req; - - - new_req->sd_rd.ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, new_req->sd, 0, new_req->q); - dispatch_source_set_event_handler(new_req->sd_rd.ds, ^{ - read_req(new_req, dispatch_source_get_data(new_req->sd_rd.ds)); - }); - - // We want our queue to go away when all of it's sources do, so we - // drop the reference dispatch_queue_create gave us & rely on the - // references each source holds on the queue to keep it alive. - dispatch_release(new_req->q); - dispatch_resume(new_req->sd_rd.ds); -} - -int main(int argc, char *argv[]) { - int sock = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); - assert(sock > 0); - int rc; - struct addrinfo ai_hints, *my_addr; - - qpf = dispatch_queue_create("printf", NULL); - - argv0 = basename(argv[0]); - struct passwd *pw = getpwuid(getuid()); - assert(pw); - asprintf(&DOC_BASE, "%s/Sites/", pw->pw_dir); - asprintf(&log_name, "%s/Library/Logs/%s-transfer.log", pw->pw_dir, argv0); - logfile = fopen(log_name, "a"); - reopen_logfile_when_needed(logfile, log_name); - - bzero(&ai_hints, sizeof(ai_hints)); - ai_hints.ai_flags = AI_PASSIVE; - ai_hints.ai_family = PF_INET; - ai_hints.ai_socktype = SOCK_STREAM; - ai_hints.ai_protocol = IPPROTO_TCP; - rc = getaddrinfo(NULL, server_port, &ai_hints, &my_addr); - assert(rc == 0); - - qprintf("Serving content from %s on port %s, logging transfers to %s\n", DOC_BASE, server_port, log_name); - - int yes = 1; - rc = setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)); - assert(rc == 0); - yes = 1; - rc = setsockopt(sock, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes)); - assert(rc == 0); - - rc = bind(sock, my_addr->ai_addr, my_addr->ai_addr->sa_len); - assert(rc >= 0); - - rc = listen(sock, 25); - assert(rc >= 0); - - rc = regcomp(&re_first_request, "^([A-Z]+)[ \t]+([^ \t\n]+)[ \t]+HTTP/1\\.1[\r\n]+", REG_EXTENDED); - assert(rc == 0); - - rc = regcomp(&re_nth_request, "^([A-Z]+)[ \t]+([^ \t\n]+)([ \t]+HTTP/1\\.1)?[\r\n]+", REG_EXTENDED); - assert(rc == 0); - - rc = regcomp(&re_accept_deflate, "[\r\n]+Accept-Encoding:(.*,)? *deflate[,\r\n]+", REG_EXTENDED); - assert(rc == 0); - - rc = regcomp(&re_host, "[\r\n]+Host: *([^ \r\n]+)[ \r\n]+", REG_EXTENDED); - assert(rc == 0); - - dispatch_source_t accept_ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, sock, 0, dispatch_get_main_queue()); - dispatch_source_set_event_handler(accept_ds, ^{ accept_cb(sock); }); - assert(accept_ds); - dispatch_resume(accept_ds); - - sigset_t sigs; - sigemptyset(&sigs); - sigaddset(&sigs, SIGINFO); - sigaddset(&sigs, SIGPIPE); - - int s; - for(s = 0; s < NSIG; s++) { - if (sigismember(&sigs, s)) { - dispatch_source_t sig_ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, s, 0, dispatch_get_main_queue()); - assert(sig_ds); - dispatch_source_set_event_handler(sig_ds, ^{ dump_reqs(); }); - dispatch_resume(sig_ds); - } - } - - rc = sigprocmask(SIG_BLOCK, &sigs, NULL); - assert(rc == 0); - - dispatch_source_t dump_timer_ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, dispatch_get_main_queue()); - dispatch_source_set_timer(dump_timer_ds, DISPATCH_TIME_NOW, 5 * NSEC_PER_SEC, NSEC_PER_SEC); - dispatch_source_set_event_handler(dump_timer_ds, ^{ dump_reqs(); }); - dispatch_resume(dump_timer_ds); - - dispatch_main(); - printf("dispatch_main returned\n"); - - return 1; -} diff --git a/examples/DispatchWebServer/DispatchWebServer.xcodeproj/project.pbxproj b/examples/DispatchWebServer/DispatchWebServer.xcodeproj/project.pbxproj deleted file mode 100644 index 444288a7a..000000000 --- a/examples/DispatchWebServer/DispatchWebServer.xcodeproj/project.pbxproj +++ /dev/null @@ -1,203 +0,0 @@ -// !$*UTF8*$! -{ - archiveVersion = 1; - classes = { - }; - objectVersion = 45; - objects = { - -/* Begin PBXBuildFile section */ - 4CDA1C1F0F795F5B00E0869E /* DispatchWebServer.c in Sources */ = {isa = PBXBuildFile; fileRef = 4CDA1C1E0F795F5B00E0869E /* DispatchWebServer.c */; }; - 4CDA1C400F79786E00E0869E /* libz.1.dylib in Frameworks */ = {isa = PBXBuildFile; fileRef = 4CDA1C3F0F79786E00E0869E /* libz.1.dylib */; }; -/* End PBXBuildFile section */ - -/* Begin PBXCopyFilesBuildPhase section */ - 8DD76FAF0486AB0100D96B5E /* CopyFiles */ = { - isa = PBXCopyFilesBuildPhase; - buildActionMask = 8; - dstPath = /usr/share/man/man1/; - dstSubfolderSpec = 0; - files = ( - ); - runOnlyForDeploymentPostprocessing = 1; - }; -/* End PBXCopyFilesBuildPhase section */ - -/* Begin PBXFileReference section */ - 4CDA1C1E0F795F5B00E0869E /* DispatchWebServer.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = DispatchWebServer.c; sourceTree = ""; }; - 4CDA1C3F0F79786E00E0869E /* libz.1.dylib */ = {isa = PBXFileReference; lastKnownFileType = "compiled.mach-o.dylib"; name = libz.1.dylib; path = /usr/lib/libz.1.dylib; sourceTree = ""; }; - 8DD76FB20486AB0100D96B5E /* DispatchWebServer */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = DispatchWebServer; sourceTree = BUILT_PRODUCTS_DIR; }; -/* End PBXFileReference section */ - -/* Begin PBXFrameworksBuildPhase section */ - 8DD76FAD0486AB0100D96B5E /* Frameworks */ = { - isa = PBXFrameworksBuildPhase; - buildActionMask = 2147483647; - files = ( - 4CDA1C400F79786E00E0869E /* libz.1.dylib in Frameworks */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; -/* End PBXFrameworksBuildPhase section */ - -/* Begin PBXGroup section */ - 08FB7794FE84155DC02AAC07 /* DispatchWebServer */ = { - isa = PBXGroup; - children = ( - 4CDA1C3F0F79786E00E0869E /* libz.1.dylib */, - 08FB7795FE84155DC02AAC07 /* Source */, - C6A0FF2B0290797F04C91782 /* Documentation */, - 1AB674ADFE9D54B511CA2CBB /* Products */, - ); - name = DispatchWebServer; - sourceTree = ""; - }; - 08FB7795FE84155DC02AAC07 /* Source */ = { - isa = PBXGroup; - children = ( - 4CDA1C1E0F795F5B00E0869E /* DispatchWebServer.c */, - ); - name = Source; - sourceTree = ""; - }; - 1AB674ADFE9D54B511CA2CBB /* Products */ = { - isa = PBXGroup; - children = ( - 8DD76FB20486AB0100D96B5E /* DispatchWebServer */, - ); - name = Products; - sourceTree = ""; - }; - C6A0FF2B0290797F04C91782 /* Documentation */ = { - isa = PBXGroup; - children = ( - ); - name = Documentation; - sourceTree = ""; - }; -/* End PBXGroup section */ - -/* Begin PBXNativeTarget section */ - 8DD76FA90486AB0100D96B5E /* DispatchWebServer */ = { - isa = PBXNativeTarget; - buildConfigurationList = 1DEB928508733DD80010E9CD /* Build configuration list for PBXNativeTarget "DispatchWebServer" */; - buildPhases = ( - 8DD76FAB0486AB0100D96B5E /* Sources */, - 8DD76FAD0486AB0100D96B5E /* Frameworks */, - 8DD76FAF0486AB0100D96B5E /* CopyFiles */, - ); - buildRules = ( - ); - dependencies = ( - ); - name = DispatchWebServer; - productInstallPath = "$(HOME)/bin"; - productName = DispatchWebServer; - productReference = 8DD76FB20486AB0100D96B5E /* DispatchWebServer */; - productType = "com.apple.product-type.tool"; - }; -/* End PBXNativeTarget section */ - -/* Begin PBXProject section */ - 08FB7793FE84155DC02AAC07 /* Project object */ = { - isa = PBXProject; - buildConfigurationList = 1DEB928908733DD80010E9CD /* Build configuration list for PBXProject "DispatchWebServer" */; - compatibilityVersion = "Xcode 3.1"; - hasScannedForEncodings = 1; - mainGroup = 08FB7794FE84155DC02AAC07 /* DispatchWebServer */; - projectDirPath = ""; - projectRoot = ""; - targets = ( - 8DD76FA90486AB0100D96B5E /* DispatchWebServer */, - ); - }; -/* End PBXProject section */ - -/* Begin PBXSourcesBuildPhase section */ - 8DD76FAB0486AB0100D96B5E /* Sources */ = { - isa = PBXSourcesBuildPhase; - buildActionMask = 2147483647; - files = ( - 4CDA1C1F0F795F5B00E0869E /* DispatchWebServer.c in Sources */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; -/* End PBXSourcesBuildPhase section */ - -/* Begin XCBuildConfiguration section */ - 1DEB928608733DD80010E9CD /* Debug */ = { - isa = XCBuildConfiguration; - buildSettings = { - ALWAYS_SEARCH_USER_PATHS = NO; - COPY_PHASE_STRIP = NO; - GCC_DYNAMIC_NO_PIC = NO; - GCC_ENABLE_FIX_AND_CONTINUE = YES; - GCC_MODEL_TUNING = G5; - GCC_OPTIMIZATION_LEVEL = 0; - INSTALL_PATH = /usr/local/bin; - PRODUCT_NAME = DispatchWebServer; - }; - name = Debug; - }; - 1DEB928708733DD80010E9CD /* Release */ = { - isa = XCBuildConfiguration; - buildSettings = { - ALWAYS_SEARCH_USER_PATHS = NO; - DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; - GCC_MODEL_TUNING = G5; - INSTALL_PATH = /usr/local/bin; - PRODUCT_NAME = DispatchWebServer; - }; - name = Release; - }; - 1DEB928A08733DD80010E9CD /* Debug */ = { - isa = XCBuildConfiguration; - buildSettings = { - ARCHS = "$(ARCHS_STANDARD_32_64_BIT)"; - GCC_C_LANGUAGE_STANDARD = gnu99; - GCC_OPTIMIZATION_LEVEL = 0; - GCC_WARN_ABOUT_RETURN_TYPE = YES; - GCC_WARN_UNUSED_VARIABLE = YES; - ONLY_ACTIVE_ARCH = YES; - PREBINDING = NO; - SDKROOT = macosx10.6; - }; - name = Debug; - }; - 1DEB928B08733DD80010E9CD /* Release */ = { - isa = XCBuildConfiguration; - buildSettings = { - ARCHS = "$(ARCHS_STANDARD_32_64_BIT)"; - GCC_C_LANGUAGE_STANDARD = gnu99; - GCC_WARN_ABOUT_RETURN_TYPE = YES; - GCC_WARN_UNUSED_VARIABLE = YES; - PREBINDING = NO; - SDKROOT = macosx10.6; - }; - name = Release; - }; -/* End XCBuildConfiguration section */ - -/* Begin XCConfigurationList section */ - 1DEB928508733DD80010E9CD /* Build configuration list for PBXNativeTarget "DispatchWebServer" */ = { - isa = XCConfigurationList; - buildConfigurations = ( - 1DEB928608733DD80010E9CD /* Debug */, - 1DEB928708733DD80010E9CD /* Release */, - ); - defaultConfigurationIsVisible = 0; - defaultConfigurationName = Release; - }; - 1DEB928908733DD80010E9CD /* Build configuration list for PBXProject "DispatchWebServer" */ = { - isa = XCConfigurationList; - buildConfigurations = ( - 1DEB928A08733DD80010E9CD /* Debug */, - 1DEB928B08733DD80010E9CD /* Release */, - ); - defaultConfigurationIsVisible = 0; - defaultConfigurationName = Release; - }; -/* End XCConfigurationList section */ - }; - rootObject = 08FB7793FE84155DC02AAC07 /* Project object */; -} diff --git a/examples/DispatchWebServer/ReadMe.txt b/examples/DispatchWebServer/ReadMe.txt deleted file mode 100644 index 4a6359611..000000000 --- a/examples/DispatchWebServer/ReadMe.txt +++ /dev/null @@ -1,44 +0,0 @@ -### DispatchWebServer ### - -=========================================================================== -DESCRIPTION: - -Sample code showing how to: Use dispatch in a real world setting, -schedule file and network I/O, use vnode sources, create and manage -timers. - -=========================================================================== -BUILD REQUIREMENTS: - -Mac OS X version 10.6 Snow Leopard - -=========================================================================== -RUNTIME REQUIREMENTS: - -Mac OS X version 10.6 Snow Leopard - -=========================================================================== -PACKAGING LIST: - -DispatchWebServer.c - the web server - -=========================================================================== -RUNNING: - -Running the program will start a web server on port 8080, it will read -content from ~/Sites and write ~/Library/Logs/DispatchWebServer-transfer.log -each time complets a request. - -It will write some to stdout when it makes new connections, recieves -requests, completes requests, and when it closes connections. It also -shows the state of each actiave request once evey five seconds and any -time you send a SIGINFO signal to it. - -=========================================================================== -CHANGES FROM PREVIOUS VERSIONS: - -Version 1.0 -- First version - -=========================================================================== -Copyright (C) 2009 Apple Inc. All rights reserved. diff --git a/libdispatch.xcodeproj/project.pbxproj b/libdispatch.xcodeproj/project.pbxproj index 01b065c02..e36948cd1 100644 --- a/libdispatch.xcodeproj/project.pbxproj +++ b/libdispatch.xcodeproj/project.pbxproj @@ -3,12 +3,41 @@ archiveVersion = 1; classes = { }; - objectVersion = 45; + objectVersion = 46; objects = { +/* Begin PBXAggregateTarget section */ + 3F3C9326128E637B0042B1F7 /* libdispatch_Sim */ = { + isa = PBXAggregateTarget; + buildConfigurationList = 3F3C9356128E637B0042B1F7 /* Build configuration list for PBXAggregateTarget "libdispatch_Sim" */; + buildPhases = ( + ); + dependencies = ( + E4128E4A13B94BCE00ABB2CB /* PBXTargetDependency */, + ); + name = libdispatch_Sim; + productName = libdispatch_Sim; + }; + C927F35A10FD7F0600C5AB8B /* libdispatch_tools */ = { + isa = PBXAggregateTarget; + buildConfigurationList = C927F35E10FD7F0B00C5AB8B /* Build configuration list for PBXAggregateTarget "libdispatch_tools" */; + buildPhases = ( + ); + dependencies = ( + C927F36910FD7F1A00C5AB8B /* PBXTargetDependency */, + ); + name = libdispatch_tools; + productName = ddt; + }; +/* End PBXAggregateTarget section */ + /* Begin PBXBuildFile section */ - 2EC9C9B80E8809EF00E2499A /* legacy.c in Sources */ = {isa = PBXBuildFile; fileRef = 2EC9C9B70E8809EF00E2499A /* legacy.c */; }; + 5A0095A210F274B0000E2A31 /* io_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 5A0095A110F274B0000E2A31 /* io_internal.h */; }; + 5A27262610F26F1900751FBC /* io.c in Sources */ = {isa = PBXBuildFile; fileRef = 5A27262510F26F1900751FBC /* io.c */; }; 5A5D13AC0F6B280500197CC3 /* semaphore_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */; }; + 5AAB45C010D30B79004407EA /* data.c in Sources */ = {isa = PBXBuildFile; fileRef = 5AAB45BF10D30B79004407EA /* data.c */; }; + 5AAB45C410D30CC7004407EA /* io.h in Headers */ = {isa = PBXBuildFile; fileRef = 5AAB45C310D30CC7004407EA /* io.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 5AAB45C610D30D0C004407EA /* data.h in Headers */ = {isa = PBXBuildFile; fileRef = 5AAB45C510D30D0C004407EA /* data.h */; settings = {ATTRIBUTES = (Public, ); }; }; 721F5C5D0F15520500FF03A6 /* semaphore.h in Headers */ = {isa = PBXBuildFile; fileRef = 721F5C5C0F15520500FF03A6 /* semaphore.h */; settings = {ATTRIBUTES = (Public, ); }; }; 721F5CCF0F15553500FF03A6 /* semaphore.c in Sources */ = {isa = PBXBuildFile; fileRef = 721F5CCE0F15553500FF03A6 /* semaphore.c */; }; 72CC94300ECCD8750031B751 /* base.h in Headers */ = {isa = PBXBuildFile; fileRef = 72CC942F0ECCD8750031B751 /* base.h */; settings = {ATTRIBUTES = (Public, ); }; }; @@ -20,74 +49,227 @@ 965ECC210F3EAB71004DDD89 /* object_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 965ECC200F3EAB71004DDD89 /* object_internal.h */; }; 9661E56B0F3E7DDF00749F3E /* object.c in Sources */ = {isa = PBXBuildFile; fileRef = 9661E56A0F3E7DDF00749F3E /* object.c */; }; 9676A0E10F3E755D00713ADB /* apply.c in Sources */ = {isa = PBXBuildFile; fileRef = 9676A0E00F3E755D00713ADB /* apply.c */; }; - 96929D840F3EA1020041FF5D /* hw_shims.h in Headers */ = {isa = PBXBuildFile; fileRef = 96929D820F3EA1020041FF5D /* hw_shims.h */; }; - 96929D850F3EA1020041FF5D /* os_shims.h in Headers */ = {isa = PBXBuildFile; fileRef = 96929D830F3EA1020041FF5D /* os_shims.h */; }; + 96929D840F3EA1020041FF5D /* atomic.h in Headers */ = {isa = PBXBuildFile; fileRef = 96929D820F3EA1020041FF5D /* atomic.h */; }; + 96929D850F3EA1020041FF5D /* shims.h in Headers */ = {isa = PBXBuildFile; fileRef = 96929D830F3EA1020041FF5D /* shims.h */; }; 96929D960F3EA2170041FF5D /* queue_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 96929D950F3EA2170041FF5D /* queue_internal.h */; }; 96A8AA870F41E7A400CD570B /* source.c in Sources */ = {isa = PBXBuildFile; fileRef = 96A8AA860F41E7A400CD570B /* source.c */; }; 96BC39BD0F3EBAB100C59689 /* queue_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 96BC39BC0F3EBAB100C59689 /* queue_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; 96C9553B0F3EAEDD000D2CA4 /* once.h in Headers */ = {isa = PBXBuildFile; fileRef = 96C9553A0F3EAEDD000D2CA4 /* once.h */; settings = {ATTRIBUTES = (Public, ); }; }; 96DF70BE0F38FE3C0074BD99 /* once.c in Sources */ = {isa = PBXBuildFile; fileRef = 96DF70BD0F38FE3C0074BD99 /* once.c */; }; + E4128ED613BA9A1700ABB2CB /* hw_config.h in Headers */ = {isa = PBXBuildFile; fileRef = E4128ED513BA9A1700ABB2CB /* hw_config.h */; }; + E4128ED713BA9A1700ABB2CB /* hw_config.h in Headers */ = {isa = PBXBuildFile; fileRef = E4128ED513BA9A1700ABB2CB /* hw_config.h */; }; + E417A38412A472C4004D659D /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; }; + E417A38512A472C5004D659D /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; }; + E422A0D512A557B5005E5BDB /* trace.h in Headers */ = {isa = PBXBuildFile; fileRef = E422A0D412A557B5005E5BDB /* trace.h */; }; + E422A0D612A557B5005E5BDB /* trace.h in Headers */ = {isa = PBXBuildFile; fileRef = E422A0D412A557B5005E5BDB /* trace.h */; }; + E43570B9126E93380097AB9F /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; }; + E43570BA126E93380097AB9F /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; }; + E44EBE3E1251659900645D88 /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; }; + E44EBE5412517EBE00645D88 /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; }; + E44EBE5512517EBE00645D88 /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; }; + E44EBE5612517EBE00645D88 /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; }; + E44EBE5712517EBE00645D88 /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; }; + E49F2423125D3C960057C971 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; + E49F2424125D3C970057C971 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; + E49F2499125D48D80057C971 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; + E49F24AB125D57FA0057C971 /* dispatch.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED960E8361E600161930 /* dispatch.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E49F24AC125D57FA0057C971 /* base.h in Headers */ = {isa = PBXBuildFile; fileRef = 72CC942F0ECCD8750031B751 /* base.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E49F24AD125D57FA0057C971 /* object.h in Headers */ = {isa = PBXBuildFile; fileRef = 961B994F0F3E85C30006BC96 /* object.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E49F24AE125D57FA0057C971 /* queue.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED8B0E8361E600161930 /* queue.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E49F24AF125D57FA0057C971 /* source.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED8D0E8361E600161930 /* source.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E49F24B0125D57FA0057C971 /* semaphore.h in Headers */ = {isa = PBXBuildFile; fileRef = 721F5C5C0F15520500FF03A6 /* semaphore.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E49F24B1125D57FA0057C971 /* group.h in Headers */ = {isa = PBXBuildFile; fileRef = FC5C9C1D0EADABE3006E462D /* group.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E49F24B2125D57FA0057C971 /* once.h in Headers */ = {isa = PBXBuildFile; fileRef = 96C9553A0F3EAEDD000D2CA4 /* once.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E49F24B3125D57FA0057C971 /* io.h in Headers */ = {isa = PBXBuildFile; fileRef = 5AAB45C310D30CC7004407EA /* io.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E49F24B4125D57FA0057C971 /* data.h in Headers */ = {isa = PBXBuildFile; fileRef = 5AAB45C510D30D0C004407EA /* data.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E49F24B5125D57FA0057C971 /* time.h in Headers */ = {isa = PBXBuildFile; fileRef = 96032E4C0F5CC8D100241C5F /* time.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E49F24B6125D57FA0057C971 /* private.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED930E8361E600161930 /* private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E49F24B7125D57FA0057C971 /* queue_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 96BC39BC0F3EBAB100C59689 /* queue_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E49F24B8125D57FA0057C971 /* source_private.h in Headers */ = {isa = PBXBuildFile; fileRef = FCEF047F0F5661960067401F /* source_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E49F24B9125D57FA0057C971 /* benchmark.h in Headers */ = {isa = PBXBuildFile; fileRef = 961B99350F3E83980006BC96 /* benchmark.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E49F24BA125D57FA0057C971 /* internal.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED8F0E8361E600161930 /* internal.h */; settings = {ATTRIBUTES = (); }; }; + E49F24BB125D57FA0057C971 /* queue_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 96929D950F3EA2170041FF5D /* queue_internal.h */; }; + E49F24BC125D57FA0057C971 /* object_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 965ECC200F3EAB71004DDD89 /* object_internal.h */; }; + E49F24BD125D57FA0057C971 /* semaphore_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */; }; + E49F24BE125D57FA0057C971 /* source_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = FC0B34780FA2851C0080FFA0 /* source_internal.h */; }; + E49F24BF125D57FA0057C971 /* io_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 5A0095A110F274B0000E2A31 /* io_internal.h */; }; + E49F24C1125D57FA0057C971 /* tsd.h in Headers */ = {isa = PBXBuildFile; fileRef = FC1832A4109923C7003403D5 /* tsd.h */; }; + E49F24C2125D57FA0057C971 /* atomic.h in Headers */ = {isa = PBXBuildFile; fileRef = 96929D820F3EA1020041FF5D /* atomic.h */; }; + E49F24C3125D57FA0057C971 /* shims.h in Headers */ = {isa = PBXBuildFile; fileRef = 96929D830F3EA1020041FF5D /* shims.h */; }; + E49F24C4125D57FA0057C971 /* time.h in Headers */ = {isa = PBXBuildFile; fileRef = FC1832A3109923C7003403D5 /* time.h */; }; + E49F24C5125D57FA0057C971 /* perfmon.h in Headers */ = {isa = PBXBuildFile; fileRef = FC1832A2109923C7003403D5 /* perfmon.h */; }; + E49F24C6125D57FA0057C971 /* config.h in Headers */ = {isa = PBXBuildFile; fileRef = FC9C70E7105EC9620074F9CA /* config.h */; }; + E49F24C8125D57FA0057C971 /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; }; + E49F24C9125D57FA0057C971 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; + E49F24CA125D57FA0057C971 /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; }; + E49F24CB125D57FA0057C971 /* queue.c in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED8A0E8361E600161930 /* queue.c */; }; + E49F24CC125D57FA0057C971 /* semaphore.c in Sources */ = {isa = PBXBuildFile; fileRef = 721F5CCE0F15553500FF03A6 /* semaphore.c */; }; + E49F24CD125D57FA0057C971 /* once.c in Sources */ = {isa = PBXBuildFile; fileRef = 96DF70BD0F38FE3C0074BD99 /* once.c */; }; + E49F24CE125D57FA0057C971 /* apply.c in Sources */ = {isa = PBXBuildFile; fileRef = 9676A0E00F3E755D00713ADB /* apply.c */; }; + E49F24CF125D57FA0057C971 /* object.c in Sources */ = {isa = PBXBuildFile; fileRef = 9661E56A0F3E7DDF00749F3E /* object.c */; }; + E49F24D0125D57FA0057C971 /* benchmark.c in Sources */ = {isa = PBXBuildFile; fileRef = 965CD6340F3E806200D4E28D /* benchmark.c */; }; + E49F24D1125D57FA0057C971 /* source.c in Sources */ = {isa = PBXBuildFile; fileRef = 96A8AA860F41E7A400CD570B /* source.c */; }; + E49F24D2125D57FA0057C971 /* time.c in Sources */ = {isa = PBXBuildFile; fileRef = 96032E4A0F5CC8C700241C5F /* time.c */; }; + E49F24D3125D57FA0057C971 /* data.c in Sources */ = {isa = PBXBuildFile; fileRef = 5AAB45BF10D30B79004407EA /* data.c */; }; + E49F24D4125D57FA0057C971 /* io.c in Sources */ = {isa = PBXBuildFile; fileRef = 5A27262510F26F1900751FBC /* io.c */; }; + E4BA743B13A8911B0095BDF1 /* getprogname.h in Headers */ = {isa = PBXBuildFile; fileRef = E4BA743913A8911B0095BDF1 /* getprogname.h */; }; + E4BA743C13A8911B0095BDF1 /* getprogname.h in Headers */ = {isa = PBXBuildFile; fileRef = E4BA743913A8911B0095BDF1 /* getprogname.h */; }; + E4BA743F13A8911B0095BDF1 /* malloc_zone.h in Headers */ = {isa = PBXBuildFile; fileRef = E4BA743A13A8911B0095BDF1 /* malloc_zone.h */; }; + E4BA744013A8911B0095BDF1 /* malloc_zone.h in Headers */ = {isa = PBXBuildFile; fileRef = E4BA743A13A8911B0095BDF1 /* malloc_zone.h */; }; + E4C1ED6F1263E714000D3C8B /* data_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E4C1ED6E1263E714000D3C8B /* data_internal.h */; }; + E4C1ED701263E714000D3C8B /* data_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E4C1ED6E1263E714000D3C8B /* data_internal.h */; }; + E4EC11AE12514302000DDBD1 /* queue.c in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED8A0E8361E600161930 /* queue.c */; }; + E4EC11AF12514302000DDBD1 /* semaphore.c in Sources */ = {isa = PBXBuildFile; fileRef = 721F5CCE0F15553500FF03A6 /* semaphore.c */; }; + E4EC11B012514302000DDBD1 /* once.c in Sources */ = {isa = PBXBuildFile; fileRef = 96DF70BD0F38FE3C0074BD99 /* once.c */; }; + E4EC11B112514302000DDBD1 /* apply.c in Sources */ = {isa = PBXBuildFile; fileRef = 9676A0E00F3E755D00713ADB /* apply.c */; }; + E4EC11B212514302000DDBD1 /* object.c in Sources */ = {isa = PBXBuildFile; fileRef = 9661E56A0F3E7DDF00749F3E /* object.c */; }; + E4EC11B312514302000DDBD1 /* benchmark.c in Sources */ = {isa = PBXBuildFile; fileRef = 965CD6340F3E806200D4E28D /* benchmark.c */; }; + E4EC11B412514302000DDBD1 /* source.c in Sources */ = {isa = PBXBuildFile; fileRef = 96A8AA860F41E7A400CD570B /* source.c */; }; + E4EC11B512514302000DDBD1 /* time.c in Sources */ = {isa = PBXBuildFile; fileRef = 96032E4A0F5CC8C700241C5F /* time.c */; }; + E4EC11B712514302000DDBD1 /* data.c in Sources */ = {isa = PBXBuildFile; fileRef = 5AAB45BF10D30B79004407EA /* data.c */; }; + E4EC11B812514302000DDBD1 /* io.c in Sources */ = {isa = PBXBuildFile; fileRef = 5A27262510F26F1900751FBC /* io.c */; }; + E4EC121A12514715000DDBD1 /* queue.c in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED8A0E8361E600161930 /* queue.c */; }; + E4EC121B12514715000DDBD1 /* semaphore.c in Sources */ = {isa = PBXBuildFile; fileRef = 721F5CCE0F15553500FF03A6 /* semaphore.c */; }; + E4EC121C12514715000DDBD1 /* once.c in Sources */ = {isa = PBXBuildFile; fileRef = 96DF70BD0F38FE3C0074BD99 /* once.c */; }; + E4EC121D12514715000DDBD1 /* apply.c in Sources */ = {isa = PBXBuildFile; fileRef = 9676A0E00F3E755D00713ADB /* apply.c */; }; + E4EC121E12514715000DDBD1 /* object.c in Sources */ = {isa = PBXBuildFile; fileRef = 9661E56A0F3E7DDF00749F3E /* object.c */; }; + E4EC121F12514715000DDBD1 /* benchmark.c in Sources */ = {isa = PBXBuildFile; fileRef = 965CD6340F3E806200D4E28D /* benchmark.c */; }; + E4EC122012514715000DDBD1 /* source.c in Sources */ = {isa = PBXBuildFile; fileRef = 96A8AA860F41E7A400CD570B /* source.c */; }; + E4EC122112514715000DDBD1 /* time.c in Sources */ = {isa = PBXBuildFile; fileRef = 96032E4A0F5CC8C700241C5F /* time.c */; }; + E4EC122312514715000DDBD1 /* data.c in Sources */ = {isa = PBXBuildFile; fileRef = 5AAB45BF10D30B79004407EA /* data.c */; }; + E4EC122412514715000DDBD1 /* io.c in Sources */ = {isa = PBXBuildFile; fileRef = 5A27262510F26F1900751FBC /* io.c */; }; FC0B34790FA2851C0080FFA0 /* source_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = FC0B34780FA2851C0080FFA0 /* source_internal.h */; }; + FC1832A6109923C7003403D5 /* perfmon.h in Headers */ = {isa = PBXBuildFile; fileRef = FC1832A2109923C7003403D5 /* perfmon.h */; }; + FC1832A7109923C7003403D5 /* time.h in Headers */ = {isa = PBXBuildFile; fileRef = FC1832A3109923C7003403D5 /* time.h */; }; + FC1832A8109923C7003403D5 /* tsd.h in Headers */ = {isa = PBXBuildFile; fileRef = FC1832A4109923C7003403D5 /* tsd.h */; }; FC5C9C1E0EADABE3006E462D /* group.h in Headers */ = {isa = PBXBuildFile; fileRef = FC5C9C1D0EADABE3006E462D /* group.h */; settings = {ATTRIBUTES = (Public, ); }; }; FC7BED990E8361E600161930 /* queue.c in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED8A0E8361E600161930 /* queue.c */; }; FC7BED9A0E8361E600161930 /* queue.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED8B0E8361E600161930 /* queue.h */; settings = {ATTRIBUTES = (Public, ); }; }; FC7BED9C0E8361E600161930 /* source.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED8D0E8361E600161930 /* source.h */; settings = {ATTRIBUTES = (Public, ); }; }; FC7BED9E0E8361E600161930 /* internal.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED8F0E8361E600161930 /* internal.h */; settings = {ATTRIBUTES = (); }; }; - FC7BED9F0E8361E600161930 /* legacy.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED900E8361E600161930 /* legacy.h */; settings = {ATTRIBUTES = (Private, ); }; }; FC7BEDA20E8361E600161930 /* private.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED930E8361E600161930 /* private.h */; settings = {ATTRIBUTES = (Private, ); }; }; FC7BEDA40E8361E600161930 /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; }; FC7BEDA50E8361E600161930 /* dispatch.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED960E8361E600161930 /* dispatch.h */; settings = {ATTRIBUTES = (Public, ); }; }; - FC7BEDA60E8361E600161930 /* shims.c in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED970E8361E600161930 /* shims.c */; }; + FC9C70E8105EC9620074F9CA /* config.h in Headers */ = {isa = PBXBuildFile; fileRef = FC9C70E7105EC9620074F9CA /* config.h */; }; FCEF04800F5661960067401F /* source_private.h in Headers */ = {isa = PBXBuildFile; fileRef = FCEF047F0F5661960067401F /* source_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; /* End PBXBuildFile section */ +/* Begin PBXContainerItemProxy section */ + C927F36610FD7F1000C5AB8B /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = C927F35F10FD7F1000C5AB8B /* ddt.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = FCFA5AA010D1AE050074F59A; + remoteInfo = ddt; + }; + C927F36810FD7F1A00C5AB8B /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = C927F35F10FD7F1000C5AB8B /* ddt.xcodeproj */; + proxyType = 1; + remoteGlobalIDString = FCFA5A9F10D1AE050074F59A; + remoteInfo = ddt; + }; + E4128E4913B94BCE00ABB2CB /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = D2AAC045055464E500DB518D; + remoteInfo = libdispatch; + }; + E47D6ECA125FEB9D0070D91C /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = E4EC118F12514302000DDBD1; + remoteInfo = "libdispatch up resolved"; + }; + E47D6ECC125FEBA10070D91C /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = E4EC121612514715000DDBD1; + remoteInfo = "libdispatch mp resolved"; + }; +/* End PBXContainerItemProxy section */ + /* Begin PBXFileReference section */ - 2EC9C9B70E8809EF00E2499A /* legacy.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = legacy.c; path = src/legacy.c; sourceTree = ""; }; - 5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = semaphore_internal.h; path = src/semaphore_internal.h; sourceTree = ""; }; - 721F5C5C0F15520500FF03A6 /* semaphore.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = semaphore.h; path = src/semaphore.h; sourceTree = ""; }; - 721F5CCE0F15553500FF03A6 /* semaphore.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = semaphore.c; path = src/semaphore.c; sourceTree = ""; }; - 72B54F690EB169EB00DBECBA /* dispatch_source_create.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; name = dispatch_source_create.3; path = man/dispatch_source_create.3; sourceTree = ""; }; - 72CC940C0ECCD5720031B751 /* dispatch_object.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; name = dispatch_object.3; path = man/dispatch_object.3; sourceTree = ""; }; - 72CC940D0ECCD5720031B751 /* dispatch.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; name = dispatch.3; path = man/dispatch.3; sourceTree = ""; }; - 72CC942F0ECCD8750031B751 /* base.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = base.h; path = src/base.h; sourceTree = ""; }; - 96032E4A0F5CC8C700241C5F /* time.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = time.c; path = src/time.c; sourceTree = ""; }; - 96032E4C0F5CC8D100241C5F /* time.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = time.h; path = src/time.h; sourceTree = ""; }; - 960F0E7D0F3FB232000D88BF /* dispatch_apply.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = dispatch_apply.3; path = man/dispatch_apply.3; sourceTree = ""; }; - 960F0E7E0F3FB232000D88BF /* dispatch_once.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = dispatch_once.3; path = man/dispatch_once.3; sourceTree = ""; }; - 961B99350F3E83980006BC96 /* benchmark.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = benchmark.h; path = src/benchmark.h; sourceTree = ""; }; - 961B994F0F3E85C30006BC96 /* object.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = object.h; path = src/object.h; sourceTree = ""; }; - 963FDDE50F3FB6BD00BF2D00 /* dispatch_semaphore_create.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = dispatch_semaphore_create.3; path = man/dispatch_semaphore_create.3; sourceTree = ""; }; - 965CD6340F3E806200D4E28D /* benchmark.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = benchmark.c; path = src/benchmark.c; sourceTree = ""; }; - 965ECC200F3EAB71004DDD89 /* object_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = object_internal.h; path = src/object_internal.h; sourceTree = ""; }; - 9661E56A0F3E7DDF00749F3E /* object.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = object.c; path = src/object.c; sourceTree = ""; }; - 9676A0E00F3E755D00713ADB /* apply.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = apply.c; path = src/apply.c; sourceTree = ""; }; - 96859A3D0EF71BAD003EB3FB /* dispatch_benchmark.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; name = dispatch_benchmark.3; path = man/dispatch_benchmark.3; sourceTree = ""; }; - 96929D820F3EA1020041FF5D /* hw_shims.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = hw_shims.h; path = src/hw_shims.h; sourceTree = ""; }; - 96929D830F3EA1020041FF5D /* os_shims.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = os_shims.h; path = src/os_shims.h; sourceTree = ""; }; - 96929D950F3EA2170041FF5D /* queue_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = queue_internal.h; path = src/queue_internal.h; sourceTree = ""; }; - 96A8AA860F41E7A400CD570B /* source.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = source.c; path = src/source.c; sourceTree = ""; }; - 96BC39BC0F3EBAB100C59689 /* queue_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = queue_private.h; path = src/queue_private.h; sourceTree = ""; }; - 96C9553A0F3EAEDD000D2CA4 /* once.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = once.h; path = src/once.h; sourceTree = ""; }; - 96DF70BD0F38FE3C0074BD99 /* once.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = once.c; path = src/once.c; sourceTree = ""; }; - D2AAC046055464E500DB518D /* libdispatch.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch.a; sourceTree = BUILT_PRODUCTS_DIR; }; - FC0B34780FA2851C0080FFA0 /* source_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = source_internal.h; path = src/source_internal.h; sourceTree = ""; }; - FC36279C0E933ED80054F1A3 /* dispatch_queue_create.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; name = dispatch_queue_create.3; path = man/dispatch_queue_create.3; sourceTree = ""; }; - FC5C9C1D0EADABE3006E462D /* group.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = group.h; path = src/group.h; sourceTree = ""; }; - FC678DE80F97E0C300AB5993 /* dispatch_after.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = dispatch_after.3; path = man/dispatch_after.3; sourceTree = ""; }; - FC678DE90F97E0C300AB5993 /* dispatch_api.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = dispatch_api.3; path = man/dispatch_api.3; sourceTree = ""; }; - FC678DEA0F97E0C300AB5993 /* dispatch_async.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = dispatch_async.3; path = man/dispatch_async.3; sourceTree = ""; }; - FC678DEB0F97E0C300AB5993 /* dispatch_group_create.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = dispatch_group_create.3; path = man/dispatch_group_create.3; sourceTree = ""; }; - FC678DEC0F97E0C300AB5993 /* dispatch_time.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = dispatch_time.3; path = man/dispatch_time.3; sourceTree = ""; }; - FC7BED8A0E8361E600161930 /* queue.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = queue.c; path = src/queue.c; sourceTree = ""; }; - FC7BED8B0E8361E600161930 /* queue.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = queue.h; path = src/queue.h; sourceTree = ""; }; - FC7BED8D0E8361E600161930 /* source.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = source.h; path = src/source.h; sourceTree = ""; }; - FC7BED8F0E8361E600161930 /* internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = internal.h; path = src/internal.h; sourceTree = ""; }; - FC7BED900E8361E600161930 /* legacy.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = legacy.h; path = src/legacy.h; sourceTree = ""; }; - FC7BED930E8361E600161930 /* private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = private.h; path = src/private.h; sourceTree = ""; }; - FC7BED950E8361E600161930 /* protocol.defs */ = {isa = PBXFileReference; explicitFileType = sourcecode.mig; fileEncoding = 4; name = protocol.defs; path = src/protocol.defs; sourceTree = ""; }; - FC7BED960E8361E600161930 /* dispatch.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = dispatch.h; path = src/dispatch.h; sourceTree = ""; }; - FC7BED970E8361E600161930 /* shims.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = shims.c; path = src/shims.c; sourceTree = ""; }; - FCEF047F0F5661960067401F /* source_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = source_private.h; path = src/source_private.h; sourceTree = ""; }; + 5A0095A110F274B0000E2A31 /* io_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = io_internal.h; sourceTree = ""; }; + 5A27262510F26F1900751FBC /* io.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = io.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; + 5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = semaphore_internal.h; sourceTree = ""; }; + 5AAB45BF10D30B79004407EA /* data.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = data.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; + 5AAB45C310D30CC7004407EA /* io.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = io.h; sourceTree = ""; tabWidth = 8; }; + 5AAB45C510D30D0C004407EA /* data.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data.h; sourceTree = ""; tabWidth = 8; }; + 721F5C5C0F15520500FF03A6 /* semaphore.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = semaphore.h; sourceTree = ""; }; + 721F5CCE0F15553500FF03A6 /* semaphore.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = semaphore.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; + 72B54F690EB169EB00DBECBA /* dispatch_source_create.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; path = dispatch_source_create.3; sourceTree = ""; }; + 72CC940C0ECCD5720031B751 /* dispatch_object.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; path = dispatch_object.3; sourceTree = ""; }; + 72CC940D0ECCD5720031B751 /* dispatch.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; path = dispatch.3; sourceTree = ""; }; + 72CC942F0ECCD8750031B751 /* base.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = base.h; sourceTree = ""; }; + 96032E4A0F5CC8C700241C5F /* time.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = time.c; sourceTree = ""; }; + 96032E4C0F5CC8D100241C5F /* time.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = time.h; sourceTree = ""; }; + 960F0E7D0F3FB232000D88BF /* dispatch_apply.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = dispatch_apply.3; sourceTree = ""; }; + 960F0E7E0F3FB232000D88BF /* dispatch_once.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = dispatch_once.3; sourceTree = ""; }; + 961B99350F3E83980006BC96 /* benchmark.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = benchmark.h; sourceTree = ""; }; + 961B994F0F3E85C30006BC96 /* object.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = object.h; sourceTree = ""; }; + 963FDDE50F3FB6BD00BF2D00 /* dispatch_semaphore_create.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = dispatch_semaphore_create.3; sourceTree = ""; }; + 965CD6340F3E806200D4E28D /* benchmark.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = benchmark.c; sourceTree = ""; }; + 965ECC200F3EAB71004DDD89 /* object_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = object_internal.h; sourceTree = ""; }; + 9661E56A0F3E7DDF00749F3E /* object.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = object.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; + 9676A0E00F3E755D00713ADB /* apply.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = apply.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; + 96859A3D0EF71BAD003EB3FB /* dispatch_benchmark.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; path = dispatch_benchmark.3; sourceTree = ""; }; + 96929D820F3EA1020041FF5D /* atomic.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; lineEnding = 0; path = atomic.h; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.objcpp; }; + 96929D830F3EA1020041FF5D /* shims.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = shims.h; sourceTree = ""; }; + 96929D950F3EA2170041FF5D /* queue_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; lineEnding = 0; path = queue_internal.h; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.objcpp; }; + 96A8AA860F41E7A400CD570B /* source.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = source.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; + 96BC39BC0F3EBAB100C59689 /* queue_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = queue_private.h; sourceTree = ""; }; + 96C9553A0F3EAEDD000D2CA4 /* once.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = once.h; sourceTree = ""; }; + 96DF70BD0F38FE3C0074BD99 /* once.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = once.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; + C927F35F10FD7F1000C5AB8B /* ddt.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = ddt.xcodeproj; path = tools/ddt/ddt.xcodeproj; sourceTree = ""; }; + D2AAC046055464E500DB518D /* libdispatch.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libdispatch.dylib; sourceTree = BUILT_PRODUCTS_DIR; }; + E40041A9125D70590022B135 /* libdispatch-resolved.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "libdispatch-resolved.xcconfig"; sourceTree = ""; }; + E40041AA125D705F0022B135 /* libdispatch-resolver.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "libdispatch-resolver.xcconfig"; sourceTree = ""; }; + E4128ED513BA9A1700ABB2CB /* hw_config.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = hw_config.h; sourceTree = ""; }; + E422A0D412A557B5005E5BDB /* trace.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = trace.h; sourceTree = ""; }; + E43570B8126E93380097AB9F /* provider.d */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.dtrace; path = provider.d; sourceTree = ""; }; + E43D93F11097917E004F6A62 /* libdispatch.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = libdispatch.xcconfig; sourceTree = ""; }; + E44EBE331251654000645D88 /* resolver.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = resolver.h; sourceTree = ""; }; + E44EBE371251656400645D88 /* resolver.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = resolver.c; sourceTree = ""; }; + E44EBE3B1251659900645D88 /* init.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = init.c; sourceTree = ""; }; + E47D6BB5125F0F800070D91C /* resolved.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = resolved.h; sourceTree = ""; }; + E482F1CD12DBAB590030614D /* postprocess-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "postprocess-headers.sh"; sourceTree = ""; }; + E49F24DF125D57FA0057C971 /* libdispatch.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libdispatch.dylib; sourceTree = BUILT_PRODUCTS_DIR; }; + E49F251C125D629F0057C971 /* symlink-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "symlink-headers.sh"; sourceTree = ""; }; + E49F251D125D630A0057C971 /* install-manpages.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "install-manpages.sh"; sourceTree = ""; }; + E49F251E125D631D0057C971 /* mig-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "mig-headers.sh"; sourceTree = ""; }; + E4BA743513A88FE10095BDF1 /* dispatch_data_create.3 */ = {isa = PBXFileReference; lastKnownFileType = text; path = dispatch_data_create.3; sourceTree = ""; }; + E4BA743613A88FF30095BDF1 /* dispatch_io_create.3 */ = {isa = PBXFileReference; lastKnownFileType = text; path = dispatch_io_create.3; sourceTree = ""; }; + E4BA743713A88FF30095BDF1 /* dispatch_io_read.3 */ = {isa = PBXFileReference; lastKnownFileType = text; path = dispatch_io_read.3; sourceTree = ""; }; + E4BA743813A8900B0095BDF1 /* dispatch_read.3 */ = {isa = PBXFileReference; lastKnownFileType = text; path = dispatch_read.3; sourceTree = ""; }; + E4BA743913A8911B0095BDF1 /* getprogname.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = getprogname.h; sourceTree = ""; }; + E4BA743A13A8911B0095BDF1 /* malloc_zone.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = malloc_zone.h; sourceTree = ""; }; + E4C1ED6E1263E714000D3C8B /* data_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data_internal.h; sourceTree = ""; }; + E4EC11C312514302000DDBD1 /* libdispatch_up.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch_up.a; sourceTree = BUILT_PRODUCTS_DIR; }; + E4EC122D12514715000DDBD1 /* libdispatch_mp.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch_mp.a; sourceTree = BUILT_PRODUCTS_DIR; }; + FC0B34780FA2851C0080FFA0 /* source_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = source_internal.h; sourceTree = ""; }; + FC1832A2109923C7003403D5 /* perfmon.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = perfmon.h; sourceTree = ""; }; + FC1832A3109923C7003403D5 /* time.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = time.h; sourceTree = ""; }; + FC1832A4109923C7003403D5 /* tsd.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tsd.h; sourceTree = ""; }; + FC36279C0E933ED80054F1A3 /* dispatch_queue_create.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; path = dispatch_queue_create.3; sourceTree = ""; }; + FC5C9C1D0EADABE3006E462D /* group.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = group.h; sourceTree = ""; }; + FC678DE80F97E0C300AB5993 /* dispatch_after.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = dispatch_after.3; sourceTree = ""; }; + FC678DE90F97E0C300AB5993 /* dispatch_api.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = dispatch_api.3; sourceTree = ""; }; + FC678DEA0F97E0C300AB5993 /* dispatch_async.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = dispatch_async.3; sourceTree = ""; }; + FC678DEB0F97E0C300AB5993 /* dispatch_group_create.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = dispatch_group_create.3; sourceTree = ""; }; + FC678DEC0F97E0C300AB5993 /* dispatch_time.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = dispatch_time.3; sourceTree = ""; }; + FC7BED8A0E8361E600161930 /* queue.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = queue.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; + FC7BED8B0E8361E600161930 /* queue.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = queue.h; sourceTree = ""; }; + FC7BED8D0E8361E600161930 /* source.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = source.h; sourceTree = ""; }; + FC7BED8F0E8361E600161930 /* internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = internal.h; sourceTree = ""; }; + FC7BED930E8361E600161930 /* private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = private.h; sourceTree = ""; }; + FC7BED950E8361E600161930 /* protocol.defs */ = {isa = PBXFileReference; explicitFileType = sourcecode.mig; fileEncoding = 4; path = protocol.defs; sourceTree = ""; }; + FC7BED960E8361E600161930 /* dispatch.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dispatch.h; sourceTree = ""; }; + FC9C70E7105EC9620074F9CA /* config.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = config.h; sourceTree = ""; }; + FCEF047F0F5661960067401F /* source_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = source_private.h; sourceTree = ""; }; /* End PBXFileReference section */ /* Begin PBXFrameworksBuildPhase section */ @@ -98,12 +280,20 @@ ); runOnlyForDeploymentPostprocessing = 0; }; + E49F24D5125D57FA0057C971 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; /* End PBXFrameworksBuildPhase section */ /* Begin PBXGroup section */ 08FB7794FE84155DC02AAC07 /* libdispatch */ = { isa = PBXGroup; children = ( + E44DB71E11D2FF080074F2AD /* Build Support */, FC7BEDAA0E83625200161930 /* Public Headers */, FC7BEDAF0E83626100161930 /* Private Headers */, FC7BEDB60E8363DC00161930 /* Project Headers */, @@ -111,31 +301,40 @@ C6A0FF2B0290797F04C91782 /* Documentation */, 1AB674ADFE9D54B511CA2CBB /* Products */, ); + indentWidth = 4; name = libdispatch; sourceTree = ""; + tabWidth = 4; + usesTabs = 1; }; 08FB7795FE84155DC02AAC07 /* Source */ = { isa = PBXGroup; children = ( + 9676A0E00F3E755D00713ADB /* apply.c */, 965CD6340F3E806200D4E28D /* benchmark.c */, + 5AAB45BF10D30B79004407EA /* data.c */, + E44EBE3B1251659900645D88 /* init.c */, + 5A27262510F26F1900751FBC /* io.c */, 9661E56A0F3E7DDF00749F3E /* object.c */, - 9676A0E00F3E755D00713ADB /* apply.c */, - FC7BED8A0E8361E600161930 /* queue.c */, 96DF70BD0F38FE3C0074BD99 /* once.c */, - 96032E4A0F5CC8C700241C5F /* time.c */, + FC7BED8A0E8361E600161930 /* queue.c */, 721F5CCE0F15553500FF03A6 /* semaphore.c */, 96A8AA860F41E7A400CD570B /* source.c */, - FC7BED970E8361E600161930 /* shims.c */, - 2EC9C9B70E8809EF00E2499A /* legacy.c */, + 96032E4A0F5CC8C700241C5F /* time.c */, FC7BED950E8361E600161930 /* protocol.defs */, + E43570B8126E93380097AB9F /* provider.d */, ); name = Source; + path = src; sourceTree = ""; }; 1AB674ADFE9D54B511CA2CBB /* Products */ = { isa = PBXGroup; children = ( - D2AAC046055464E500DB518D /* libdispatch.a */, + D2AAC046055464E500DB518D /* libdispatch.dylib */, + E4EC11C312514302000DDBD1 /* libdispatch_up.a */, + E4EC122D12514715000DDBD1 /* libdispatch_mp.a */, + E49F24DF125D57FA0057C971 /* libdispatch.dylib */, ); name = Products; sourceTree = ""; @@ -143,38 +342,118 @@ C6A0FF2B0290797F04C91782 /* Documentation */ = { isa = PBXGroup; children = ( + 72CC940D0ECCD5720031B751 /* dispatch.3 */, FC678DE80F97E0C300AB5993 /* dispatch_after.3 */, FC678DE90F97E0C300AB5993 /* dispatch_api.3 */, - FC678DEA0F97E0C300AB5993 /* dispatch_async.3 */, - FC678DEB0F97E0C300AB5993 /* dispatch_group_create.3 */, - FC678DEC0F97E0C300AB5993 /* dispatch_time.3 */, - 72CC940D0ECCD5720031B751 /* dispatch.3 */, 960F0E7D0F3FB232000D88BF /* dispatch_apply.3 */, + FC678DEA0F97E0C300AB5993 /* dispatch_async.3 */, 96859A3D0EF71BAD003EB3FB /* dispatch_benchmark.3 */, + E4BA743513A88FE10095BDF1 /* dispatch_data_create.3 */, + FC678DEB0F97E0C300AB5993 /* dispatch_group_create.3 */, + E4BA743613A88FF30095BDF1 /* dispatch_io_create.3 */, + E4BA743713A88FF30095BDF1 /* dispatch_io_read.3 */, 72CC940C0ECCD5720031B751 /* dispatch_object.3 */, 960F0E7E0F3FB232000D88BF /* dispatch_once.3 */, + FC36279C0E933ED80054F1A3 /* dispatch_queue_create.3 */, + E4BA743813A8900B0095BDF1 /* dispatch_read.3 */, 963FDDE50F3FB6BD00BF2D00 /* dispatch_semaphore_create.3 */, 72B54F690EB169EB00DBECBA /* dispatch_source_create.3 */, - FC36279C0E933ED80054F1A3 /* dispatch_queue_create.3 */, + FC678DEC0F97E0C300AB5993 /* dispatch_time.3 */, ); name = Documentation; + path = man; + sourceTree = ""; + }; + C927F36010FD7F1000C5AB8B /* Products */ = { + isa = PBXGroup; + children = ( + C927F36710FD7F1000C5AB8B /* ddt */, + ); + name = Products; + sourceTree = ""; + }; + E40041E4125E71150022B135 /* xcodeconfig */ = { + isa = PBXGroup; + children = ( + E43D93F11097917E004F6A62 /* libdispatch.xcconfig */, + E40041AA125D705F0022B135 /* libdispatch-resolver.xcconfig */, + E40041A9125D70590022B135 /* libdispatch-resolved.xcconfig */, + ); + path = xcodeconfig; + sourceTree = ""; + }; + E44DB71E11D2FF080074F2AD /* Build Support */ = { + isa = PBXGroup; + children = ( + E4BA743413A88D390095BDF1 /* config */, + E40041E4125E71150022B135 /* xcodeconfig */, + E49F259C125D664F0057C971 /* xcodescripts */, + E47D6BCA125F10F70070D91C /* resolver */, + C927F35F10FD7F1000C5AB8B /* ddt.xcodeproj */, + ); + name = "Build Support"; + sourceTree = ""; + }; + E47D6BCA125F10F70070D91C /* resolver */ = { + isa = PBXGroup; + children = ( + E47D6BB5125F0F800070D91C /* resolved.h */, + E44EBE371251656400645D88 /* resolver.c */, + E44EBE331251654000645D88 /* resolver.h */, + ); + path = resolver; + sourceTree = ""; + }; + E49F259C125D664F0057C971 /* xcodescripts */ = { + isa = PBXGroup; + children = ( + E49F251D125D630A0057C971 /* install-manpages.sh */, + E49F251E125D631D0057C971 /* mig-headers.sh */, + E482F1CD12DBAB590030614D /* postprocess-headers.sh */, + E49F251C125D629F0057C971 /* symlink-headers.sh */, + ); + path = xcodescripts; + sourceTree = ""; + }; + E4BA743413A88D390095BDF1 /* config */ = { + isa = PBXGroup; + children = ( + FC9C70E7105EC9620074F9CA /* config.h */, + ); + path = config; + sourceTree = ""; + }; + FC1832A0109923B3003403D5 /* shims */ = { + isa = PBXGroup; + children = ( + 96929D820F3EA1020041FF5D /* atomic.h */, + E4BA743913A8911B0095BDF1 /* getprogname.h */, + E4128ED513BA9A1700ABB2CB /* hw_config.h */, + E4BA743A13A8911B0095BDF1 /* malloc_zone.h */, + FC1832A2109923C7003403D5 /* perfmon.h */, + FC1832A3109923C7003403D5 /* time.h */, + FC1832A4109923C7003403D5 /* tsd.h */, + ); + path = shims; sourceTree = ""; }; FC7BEDAA0E83625200161930 /* Public Headers */ = { isa = PBXGroup; children = ( - 96032E4C0F5CC8D100241C5F /* time.h */, - 721F5C5C0F15520500FF03A6 /* semaphore.h */, - 961B994F0F3E85C30006BC96 /* object.h */, - 96C9553A0F3EAEDD000D2CA4 /* once.h */, - 961B99350F3E83980006BC96 /* benchmark.h */, 72CC942F0ECCD8750031B751 /* base.h */, + 5AAB45C510D30D0C004407EA /* data.h */, FC7BED960E8361E600161930 /* dispatch.h */, + FC5C9C1D0EADABE3006E462D /* group.h */, + 5AAB45C310D30CC7004407EA /* io.h */, + 961B994F0F3E85C30006BC96 /* object.h */, + 96C9553A0F3EAEDD000D2CA4 /* once.h */, FC7BED8B0E8361E600161930 /* queue.h */, + 721F5C5C0F15520500FF03A6 /* semaphore.h */, FC7BED8D0E8361E600161930 /* source.h */, - FC5C9C1D0EADABE3006E462D /* group.h */, + 96032E4C0F5CC8D100241C5F /* time.h */, ); name = "Public Headers"; + path = dispatch; sourceTree = ""; }; FC7BEDAF0E83626100161930 /* Private Headers */ = { @@ -182,24 +461,29 @@ children = ( FC7BED930E8361E600161930 /* private.h */, 96BC39BC0F3EBAB100C59689 /* queue_private.h */, - FC7BED900E8361E600161930 /* legacy.h */, + FCEF047F0F5661960067401F /* source_private.h */, + 961B99350F3E83980006BC96 /* benchmark.h */, ); name = "Private Headers"; + path = private; sourceTree = ""; }; FC7BEDB60E8363DC00161930 /* Project Headers */ = { isa = PBXGroup; children = ( + FC7BED8F0E8361E600161930 /* internal.h */, + E4C1ED6E1263E714000D3C8B /* data_internal.h */, + 5A0095A110F274B0000E2A31 /* io_internal.h */, 965ECC200F3EAB71004DDD89 /* object_internal.h */, 96929D950F3EA2170041FF5D /* queue_internal.h */, 5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */, FC0B34780FA2851C0080FFA0 /* source_internal.h */, - FCEF047F0F5661960067401F /* source_private.h */, - 96929D820F3EA1020041FF5D /* hw_shims.h */, - 96929D830F3EA1020041FF5D /* os_shims.h */, - FC7BED8F0E8361E600161930 /* internal.h */, + E422A0D412A557B5005E5BDB /* trace.h */, + 96929D830F3EA1020041FF5D /* shims.h */, + FC1832A0109923B3003403D5 /* shims */, ); name = "Project Headers"; + path = src; sourceTree = ""; }; /* End PBXGroup section */ @@ -209,81 +493,157 @@ isa = PBXHeadersBuildPhase; buildActionMask = 2147483647; files = ( - 72CC94300ECCD8750031B751 /* base.h in Headers */, FC7BEDA50E8361E600161930 /* dispatch.h in Headers */, + 72CC94300ECCD8750031B751 /* base.h in Headers */, + 961B99500F3E85C30006BC96 /* object.h in Headers */, FC7BED9A0E8361E600161930 /* queue.h in Headers */, FC7BED9C0E8361E600161930 /* source.h in Headers */, - FC5C9C1E0EADABE3006E462D /* group.h in Headers */, - FC7BEDA20E8361E600161930 /* private.h in Headers */, - FC7BED9F0E8361E600161930 /* legacy.h in Headers */, - FC7BED9E0E8361E600161930 /* internal.h in Headers */, 721F5C5D0F15520500FF03A6 /* semaphore.h in Headers */, - 961B99360F3E83980006BC96 /* benchmark.h in Headers */, - 961B99500F3E85C30006BC96 /* object.h in Headers */, - 96929D840F3EA1020041FF5D /* hw_shims.h in Headers */, - 96929D850F3EA1020041FF5D /* os_shims.h in Headers */, - 96929D960F3EA2170041FF5D /* queue_internal.h in Headers */, - 965ECC210F3EAB71004DDD89 /* object_internal.h in Headers */, + FC5C9C1E0EADABE3006E462D /* group.h in Headers */, 96C9553B0F3EAEDD000D2CA4 /* once.h in Headers */, + 5AAB45C410D30CC7004407EA /* io.h in Headers */, + 5AAB45C610D30D0C004407EA /* data.h in Headers */, + 96032E4D0F5CC8D100241C5F /* time.h in Headers */, + FC7BEDA20E8361E600161930 /* private.h in Headers */, 96BC39BD0F3EBAB100C59689 /* queue_private.h in Headers */, FCEF04800F5661960067401F /* source_private.h in Headers */, - 96032E4D0F5CC8D100241C5F /* time.h in Headers */, - 5A5D13AC0F6B280500197CC3 /* semaphore_internal.h in Headers */, + 961B99360F3E83980006BC96 /* benchmark.h in Headers */, + FC7BED9E0E8361E600161930 /* internal.h in Headers */, + 965ECC210F3EAB71004DDD89 /* object_internal.h in Headers */, + 96929D960F3EA2170041FF5D /* queue_internal.h in Headers */, FC0B34790FA2851C0080FFA0 /* source_internal.h in Headers */, + 5A5D13AC0F6B280500197CC3 /* semaphore_internal.h in Headers */, + E4C1ED6F1263E714000D3C8B /* data_internal.h in Headers */, + 5A0095A210F274B0000E2A31 /* io_internal.h in Headers */, + FC1832A8109923C7003403D5 /* tsd.h in Headers */, + 96929D840F3EA1020041FF5D /* atomic.h in Headers */, + 96929D850F3EA1020041FF5D /* shims.h in Headers */, + FC1832A7109923C7003403D5 /* time.h in Headers */, + FC1832A6109923C7003403D5 /* perfmon.h in Headers */, + FC9C70E8105EC9620074F9CA /* config.h in Headers */, + E422A0D512A557B5005E5BDB /* trace.h in Headers */, + E4BA743B13A8911B0095BDF1 /* getprogname.h in Headers */, + E4BA743F13A8911B0095BDF1 /* malloc_zone.h in Headers */, + E4128ED613BA9A1700ABB2CB /* hw_config.h in Headers */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + E49F24AA125D57FA0057C971 /* Headers */ = { + isa = PBXHeadersBuildPhase; + buildActionMask = 2147483647; + files = ( + E49F24AB125D57FA0057C971 /* dispatch.h in Headers */, + E49F24AC125D57FA0057C971 /* base.h in Headers */, + E49F24AD125D57FA0057C971 /* object.h in Headers */, + E49F24AE125D57FA0057C971 /* queue.h in Headers */, + E49F24AF125D57FA0057C971 /* source.h in Headers */, + E49F24B0125D57FA0057C971 /* semaphore.h in Headers */, + E49F24B1125D57FA0057C971 /* group.h in Headers */, + E49F24B2125D57FA0057C971 /* once.h in Headers */, + E49F24B3125D57FA0057C971 /* io.h in Headers */, + E49F24B4125D57FA0057C971 /* data.h in Headers */, + E49F24B5125D57FA0057C971 /* time.h in Headers */, + E49F24B6125D57FA0057C971 /* private.h in Headers */, + E49F24B7125D57FA0057C971 /* queue_private.h in Headers */, + E49F24B8125D57FA0057C971 /* source_private.h in Headers */, + E49F24B9125D57FA0057C971 /* benchmark.h in Headers */, + E49F24BA125D57FA0057C971 /* internal.h in Headers */, + E49F24BC125D57FA0057C971 /* object_internal.h in Headers */, + E49F24BB125D57FA0057C971 /* queue_internal.h in Headers */, + E49F24BE125D57FA0057C971 /* source_internal.h in Headers */, + E49F24BD125D57FA0057C971 /* semaphore_internal.h in Headers */, + E4C1ED701263E714000D3C8B /* data_internal.h in Headers */, + E49F24BF125D57FA0057C971 /* io_internal.h in Headers */, + E49F24C1125D57FA0057C971 /* tsd.h in Headers */, + E49F24C2125D57FA0057C971 /* atomic.h in Headers */, + E49F24C3125D57FA0057C971 /* shims.h in Headers */, + E49F24C4125D57FA0057C971 /* time.h in Headers */, + E49F24C5125D57FA0057C971 /* perfmon.h in Headers */, + E49F24C6125D57FA0057C971 /* config.h in Headers */, + E422A0D612A557B5005E5BDB /* trace.h in Headers */, + E4BA743C13A8911B0095BDF1 /* getprogname.h in Headers */, + E4BA744013A8911B0095BDF1 /* malloc_zone.h in Headers */, + E4128ED713BA9A1700ABB2CB /* hw_config.h in Headers */, ); runOnlyForDeploymentPostprocessing = 0; }; /* End PBXHeadersBuildPhase section */ -/* Begin PBXLegacyTarget section */ - 721EB4790F69D26F00845379 /* testbots */ = { - isa = PBXLegacyTarget; - buildArgumentsString = testbots; - buildConfigurationList = 721EB4850F69D2A600845379 /* Build configuration list for PBXLegacyTarget "testbots" */; +/* Begin PBXNativeTarget section */ + D2AAC045055464E500DB518D /* libdispatch */ = { + isa = PBXNativeTarget; + buildConfigurationList = 1DEB91EB08733DB70010E9CD /* Build configuration list for PBXNativeTarget "libdispatch" */; buildPhases = ( + D2AAC043055464E500DB518D /* Headers */, + D2AAC044055464E500DB518D /* Sources */, + D289987405E68DCB004EDB86 /* Frameworks */, + E482F1C512DBAA110030614D /* Postprocess Headers */, + 2EC9C9800E846B5200E2499A /* Symlink Headers */, + 4CED8B9D0EEDF8B600AF99AB /* Install Manpages */, + ); + buildRules = ( ); - buildToolPath = /usr/bin/make; - buildWorkingDirectory = testing; dependencies = ( + E47D6ECB125FEB9D0070D91C /* PBXTargetDependency */, + E47D6ECD125FEBA10070D91C /* PBXTargetDependency */, ); - name = testbots; - passBuildSettingsInEnvironment = 0; - productName = testbots; + name = libdispatch; + productName = libdispatch; + productReference = D2AAC046055464E500DB518D /* libdispatch.dylib */; + productType = "com.apple.product-type.library.dynamic"; }; - 7276FCBA0EB10E0F00F7F487 /* test */ = { - isa = PBXLegacyTarget; - buildArgumentsString = test; - buildConfigurationList = 7276FCC80EB10E2300F7F487 /* Build configuration list for PBXLegacyTarget "test" */; + E49F24A9125D57FA0057C971 /* libdispatch no resolver */ = { + isa = PBXNativeTarget; + buildConfigurationList = E49F24D8125D57FA0057C971 /* Build configuration list for PBXNativeTarget "libdispatch no resolver" */; buildPhases = ( + E49F24AA125D57FA0057C971 /* Headers */, + E49F24C7125D57FA0057C971 /* Sources */, + E49F24D5125D57FA0057C971 /* Frameworks */, + E4128EB213B9612700ABB2CB /* Postprocess Headers */, + E49F24D6125D57FA0057C971 /* Symlink Headers */, + E49F24D7125D57FA0057C971 /* Install Manpages */, + ); + buildRules = ( ); - buildToolPath = /usr/bin/make; - buildWorkingDirectory = testing; dependencies = ( ); - name = test; - passBuildSettingsInEnvironment = 0; - productName = test; + name = "libdispatch no resolver"; + productName = libdispatch; + productReference = E49F24DF125D57FA0057C971 /* libdispatch.dylib */; + productType = "com.apple.product-type.library.dynamic"; }; -/* End PBXLegacyTarget section */ - -/* Begin PBXNativeTarget section */ - D2AAC045055464E500DB518D /* libdispatch */ = { + E4EC118F12514302000DDBD1 /* libdispatch up resolved */ = { isa = PBXNativeTarget; - buildConfigurationList = 1DEB91EB08733DB70010E9CD /* Build configuration list for PBXNativeTarget "libdispatch" */; + buildConfigurationList = E4EC11BC12514302000DDBD1 /* Build configuration list for PBXNativeTarget "libdispatch up resolved" */; buildPhases = ( - D2AAC043055464E500DB518D /* Headers */, - D2AAC044055464E500DB518D /* Sources */, - D289987405E68DCB004EDB86 /* Frameworks */, - 2EC9C9800E846B5200E2499A /* ShellScript */, - 4CED8B9D0EEDF8B600AF99AB /* ShellScript */, + E4EC12141251461A000DDBD1 /* Mig Headers */, + E4EC11AC12514302000DDBD1 /* Sources */, + E4EC121212514613000DDBD1 /* Symlink normal variant */, ); buildRules = ( ); dependencies = ( ); - name = libdispatch; + name = "libdispatch up resolved"; productName = libdispatch; - productReference = D2AAC046055464E500DB518D /* libdispatch.a */; + productReference = E4EC11C312514302000DDBD1 /* libdispatch_up.a */; + productType = "com.apple.product-type.library.static"; + }; + E4EC121612514715000DDBD1 /* libdispatch mp resolved */ = { + isa = PBXNativeTarget; + buildConfigurationList = E4EC122612514715000DDBD1 /* Build configuration list for PBXNativeTarget "libdispatch mp resolved" */; + buildPhases = ( + E4EC121712514715000DDBD1 /* Mig Headers */, + E4EC121812514715000DDBD1 /* Sources */, + E4EC122512514715000DDBD1 /* Symlink normal variant */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = "libdispatch mp resolved"; + productName = libdispatch; + productReference = E4EC122D12514715000DDBD1 /* libdispatch_mp.a */; productType = "com.apple.product-type.library.static"; }; /* End PBXNativeTarget section */ @@ -291,46 +651,216 @@ /* Begin PBXProject section */ 08FB7793FE84155DC02AAC07 /* Project object */ = { isa = PBXProject; + attributes = { + BuildIndependentTargetsInParallel = YES; + LastUpgradeCheck = 0420; + }; buildConfigurationList = 1DEB91EF08733DB70010E9CD /* Build configuration list for PBXProject "libdispatch" */; - compatibilityVersion = "Xcode 3.1"; + compatibilityVersion = "Xcode 3.2"; + developmentRegion = English; hasScannedForEncodings = 1; + knownRegions = ( + English, + Japanese, + French, + German, + ); mainGroup = 08FB7794FE84155DC02AAC07 /* libdispatch */; projectDirPath = ""; + projectReferences = ( + { + ProductGroup = C927F36010FD7F1000C5AB8B /* Products */; + ProjectRef = C927F35F10FD7F1000C5AB8B /* ddt.xcodeproj */; + }, + ); projectRoot = ""; targets = ( D2AAC045055464E500DB518D /* libdispatch */, - 7276FCBA0EB10E0F00F7F487 /* test */, - 721EB4790F69D26F00845379 /* testbots */, + E49F24A9125D57FA0057C971 /* libdispatch no resolver */, + E4EC118F12514302000DDBD1 /* libdispatch up resolved */, + E4EC121612514715000DDBD1 /* libdispatch mp resolved */, + 3F3C9326128E637B0042B1F7 /* libdispatch_Sim */, + C927F35A10FD7F0600C5AB8B /* libdispatch_tools */, ); }; /* End PBXProject section */ +/* Begin PBXReferenceProxy section */ + C927F36710FD7F1000C5AB8B /* ddt */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = ddt; + remoteRef = C927F36610FD7F1000C5AB8B /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; +/* End PBXReferenceProxy section */ + /* Begin PBXShellScriptBuildPhase section */ - 2EC9C9800E846B5200E2499A /* ShellScript */ = { + 2EC9C9800E846B5200E2499A /* Symlink Headers */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 12; + files = ( + ); + inputPaths = ( + "$(SRCROOT)/xcodescripts/symlink-headers.sh", + ); + name = "Symlink Headers"; + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = "/bin/bash -e"; + shellScript = ". \"${SCRIPT_INPUT_FILE_0}\""; + showEnvVarsInLog = 0; + }; + 4CED8B9D0EEDF8B600AF99AB /* Install Manpages */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 8; + files = ( + ); + inputPaths = ( + "$(SRCROOT)/xcodescripts/install-manpages.sh", + ); + name = "Install Manpages"; + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 1; + shellPath = "/bin/bash -e"; + shellScript = ". \"${SCRIPT_INPUT_FILE_0}\""; + showEnvVarsInLog = 0; + }; + E4128EB213B9612700ABB2CB /* Postprocess Headers */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 8; + files = ( + ); + inputPaths = ( + "$(SRCROOT)/xcodescripts/postprocess-headers.sh", + ); + name = "Postprocess Headers"; + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 1; + shellPath = "/bin/bash -e"; + shellScript = ". \"${SCRIPT_INPUT_FILE_0}\" "; + showEnvVarsInLog = 0; + }; + E482F1C512DBAA110030614D /* Postprocess Headers */ = { isa = PBXShellScriptBuildPhase; buildActionMask = 8; files = ( ); inputPaths = ( + "$(SRCROOT)/xcodescripts/postprocess-headers.sh", ); + name = "Postprocess Headers"; outputPaths = ( ); runOnlyForDeploymentPostprocessing = 1; - shellPath = /bin/sh; - shellScript = "# private.h supersedes dispatch.h where available\nmv \"$DSTROOT\"/usr/local/include/dispatch/private.h \"$DSTROOT\"/usr/local/include/dispatch/dispatch.h\nln -sf dispatch.h \"$DSTROOT\"/usr/local/include/dispatch/private.h\n\n# keep events.h around for a little while\nln -sf ../../../include/dispatch/source.h \"$DSTROOT\"/usr/local/include/dispatch/events.h"; + shellPath = "/bin/bash -e"; + shellScript = ". \"${SCRIPT_INPUT_FILE_0}\""; + showEnvVarsInLog = 0; + }; + E49F24D6125D57FA0057C971 /* Symlink Headers */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 12; + files = ( + ); + inputPaths = ( + "$(SRCROOT)/xcodescripts/symlink-headers.sh", + ); + name = "Symlink Headers"; + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = "/bin/bash -e"; + shellScript = ". \"${SCRIPT_INPUT_FILE_0}\""; + showEnvVarsInLog = 0; }; - 4CED8B9D0EEDF8B600AF99AB /* ShellScript */ = { + E49F24D7125D57FA0057C971 /* Install Manpages */ = { isa = PBXShellScriptBuildPhase; buildActionMask = 8; files = ( ); inputPaths = ( + "$(SRCROOT)/xcodescripts/install-manpages.sh", ); + name = "Install Manpages"; outputPaths = ( ); runOnlyForDeploymentPostprocessing = 1; - shellPath = /bin/sh; - shellScript = "#!/bin/sh\n\nmkdir -p $DSTROOT/usr/share/man/man3 || true\nmkdir -p $DSTROOT/usr/local/share/man/man3 || true\n\n# Copy man pages\ncd $SRCROOT/man\nBASE_PAGES=\"dispatch.3 dispatch_after.3 dispatch_api.3 dispatch_apply.3 dispatch_async.3 dispatch_group_create.3 dispatch_object.3 dispatch_once.3 dispatch_queue_create.3 dispatch_semaphore_create.3 dispatch_source_create.3 dispatch_time.3\"\n\nPRIVATE_PAGES=\"dispatch_benchmark.3\"\n\ncp ${BASE_PAGES} $DSTROOT/usr/share/man/man3\ncp ${PRIVATE_PAGES} $DSTROOT/usr/local/share/man/man3\n\n# Make hard links (lots of hard links)\n\ncd $DSTROOT/usr/local/share/man/man3\nln -f dispatch_benchmark.3 dispatch_benchmark_f.3\nchown ${INSTALL_OWNER}:${INSTALL_GROUP} $PRIVATE_PAGES\nchmod $INSTALL_MODE_FLAG $PRIVATE_PAGES\n\n\ncd $DSTROOT/usr/share/man/man3\n\nchown ${INSTALL_OWNER}:${INSTALL_GROUP} $BASE_PAGES\nchmod $INSTALL_MODE_FLAG $BASE_PAGES\n\nln -f dispatch_after.3 dispatch_after_f.3\nln -f dispatch_apply.3 dispatch_apply_f.3\nln -f dispatch_once.3 dispatch_once_f.3\n\nfor m in dispatch_async_f dispatch_sync dispatch_sync_f; do\n\tln -f dispatch_async.3 ${m}.3\ndone\n\nfor m in dispatch_group_enter dispatch_group_leave dispatch_group_wait dispatch_group_async dispatch_group_async_f dispatch_group_notify dispatch_group_notify_f; do\n\tln -f dispatch_group_create.3 ${m}.3\ndone\n\nfor m in dispatch_retain dispatch_release dispatch_suspend dispatch_resume dispatch_get_context dispatch_set_context dispatch_set_finalizer_f; do\n\tln -f dispatch_object.3 ${m}.3\ndone\n\nfor m in dispatch_semaphore_signal dispatch_semaphore_wait; do\n\tln -f dispatch_semaphore_create.3 ${m}.3\ndone\n\nfor m in dispatch_get_current_queue dispatch_main dispatch_get_main_queue dispatch_get_global_queue dispatch_queue_get_label dispatch_set_target_queue; do\n\tln -f dispatch_queue_create.3 ${m}.3\ndone\n\nfor m in dispatch_source_set_event_handler dispatch_source_set_event_handler_f dispatch_source_set_cancel_handler dispatch_source_set_cancel_handler_f dispatch_source_cancel dispatch_source_testcancel dispatch_source_get_handle dispatch_source_get_mask dispatch_source_get_data dispatch_source_merge_data dispatch_source_set_timer; do\n\tln -f dispatch_source_create.3 ${m}.3\ndone\n\nln -f dispatch_time.3 dispatch_walltime.3"; + shellPath = "/bin/bash -e"; + shellScript = ". \"${SCRIPT_INPUT_FILE_0}\""; + showEnvVarsInLog = 0; + }; + E4EC121212514613000DDBD1 /* Symlink normal variant */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + name = "Symlink normal variant"; + outputPaths = ( + "$(CONFIGURATION_BUILD_DIR)/$(PRODUCT_NAME)_normal.a", + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = "/bin/bash -e"; + shellScript = "ln -fs \"${PRODUCT_NAME}.a\" \"${SCRIPT_OUTPUT_FILE_0}\""; + showEnvVarsInLog = 0; + }; + E4EC12141251461A000DDBD1 /* Mig Headers */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + "$(SRCROOT)/src/protocol.defs", + "$(SRCROOT)/xcodescripts/mig-headers.sh", + ); + name = "Mig Headers"; + outputPaths = ( + "$(DERIVED_FILE_DIR)/protocol.h", + "$(DERIVED_FILE_DIR)/protocolServer.h", + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = "/bin/bash -e"; + shellScript = ". \"${SCRIPT_INPUT_FILE_1}\""; + showEnvVarsInLog = 0; + }; + E4EC121712514715000DDBD1 /* Mig Headers */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + "$(SRCROOT)/src/protocol.defs", + "$(SRCROOT)/xcodescripts/mig-headers.sh", + ); + name = "Mig Headers"; + outputPaths = ( + "$(DERIVED_FILE_DIR)/protocol.h", + "$(DERIVED_FILE_DIR)/protocolServer.h", + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = "/bin/bash -e"; + shellScript = ". \"${SCRIPT_INPUT_FILE_1}\""; + showEnvVarsInLog = 0; + }; + E4EC122512514715000DDBD1 /* Symlink normal variant */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + name = "Symlink normal variant"; + outputPaths = ( + "$(CONFIGURATION_BUILD_DIR)/$(PRODUCT_NAME)_normal.a", + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = "/bin/bash -e"; + shellScript = "ln -fs \"${PRODUCT_NAME}.a\" \"${SCRIPT_OUTPUT_FILE_0}\""; + showEnvVarsInLog = 0; }; /* End PBXShellScriptBuildPhase section */ @@ -339,10 +869,11 @@ isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( + E43570B9126E93380097AB9F /* provider.d in Sources */, FC7BEDA40E8361E600161930 /* protocol.defs in Sources */, + E49F2499125D48D80057C971 /* resolver.c in Sources */, + E44EBE3E1251659900645D88 /* init.c in Sources */, FC7BED990E8361E600161930 /* queue.c in Sources */, - FC7BEDA60E8361E600161930 /* shims.c in Sources */, - 2EC9C9B80E8809EF00E2499A /* legacy.c in Sources */, 721F5CCF0F15553500FF03A6 /* semaphore.c in Sources */, 96DF70BE0F38FE3C0074BD99 /* once.c in Sources */, 9676A0E10F3E755D00713ADB /* apply.c in Sources */, @@ -350,112 +881,198 @@ 965CD6350F3E806200D4E28D /* benchmark.c in Sources */, 96A8AA870F41E7A400CD570B /* source.c in Sources */, 96032E4B0F5CC8C700241C5F /* time.c in Sources */, + 5AAB45C010D30B79004407EA /* data.c in Sources */, + 5A27262610F26F1900751FBC /* io.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + E49F24C7125D57FA0057C971 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + E43570BA126E93380097AB9F /* provider.d in Sources */, + E49F24C8125D57FA0057C971 /* protocol.defs in Sources */, + E49F24C9125D57FA0057C971 /* resolver.c in Sources */, + E49F24CA125D57FA0057C971 /* init.c in Sources */, + E49F24CB125D57FA0057C971 /* queue.c in Sources */, + E49F24CC125D57FA0057C971 /* semaphore.c in Sources */, + E49F24CD125D57FA0057C971 /* once.c in Sources */, + E49F24CE125D57FA0057C971 /* apply.c in Sources */, + E49F24CF125D57FA0057C971 /* object.c in Sources */, + E49F24D0125D57FA0057C971 /* benchmark.c in Sources */, + E49F24D1125D57FA0057C971 /* source.c in Sources */, + E49F24D2125D57FA0057C971 /* time.c in Sources */, + E49F24D3125D57FA0057C971 /* data.c in Sources */, + E49F24D4125D57FA0057C971 /* io.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + E4EC11AC12514302000DDBD1 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + E417A38412A472C4004D659D /* provider.d in Sources */, + E44EBE5412517EBE00645D88 /* protocol.defs in Sources */, + E49F2424125D3C970057C971 /* resolver.c in Sources */, + E44EBE5512517EBE00645D88 /* init.c in Sources */, + E4EC11AE12514302000DDBD1 /* queue.c in Sources */, + E4EC11AF12514302000DDBD1 /* semaphore.c in Sources */, + E4EC11B012514302000DDBD1 /* once.c in Sources */, + E4EC11B112514302000DDBD1 /* apply.c in Sources */, + E4EC11B212514302000DDBD1 /* object.c in Sources */, + E4EC11B312514302000DDBD1 /* benchmark.c in Sources */, + E4EC11B412514302000DDBD1 /* source.c in Sources */, + E4EC11B512514302000DDBD1 /* time.c in Sources */, + E4EC11B712514302000DDBD1 /* data.c in Sources */, + E4EC11B812514302000DDBD1 /* io.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + E4EC121812514715000DDBD1 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + E417A38512A472C5004D659D /* provider.d in Sources */, + E44EBE5612517EBE00645D88 /* protocol.defs in Sources */, + E49F2423125D3C960057C971 /* resolver.c in Sources */, + E44EBE5712517EBE00645D88 /* init.c in Sources */, + E4EC121A12514715000DDBD1 /* queue.c in Sources */, + E4EC121B12514715000DDBD1 /* semaphore.c in Sources */, + E4EC121C12514715000DDBD1 /* once.c in Sources */, + E4EC121D12514715000DDBD1 /* apply.c in Sources */, + E4EC121E12514715000DDBD1 /* object.c in Sources */, + E4EC121F12514715000DDBD1 /* benchmark.c in Sources */, + E4EC122012514715000DDBD1 /* source.c in Sources */, + E4EC122112514715000DDBD1 /* time.c in Sources */, + E4EC122312514715000DDBD1 /* data.c in Sources */, + E4EC122412514715000DDBD1 /* io.c in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; /* End PBXSourcesBuildPhase section */ +/* Begin PBXTargetDependency section */ + C927F36910FD7F1A00C5AB8B /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + name = ddt; + targetProxy = C927F36810FD7F1A00C5AB8B /* PBXContainerItemProxy */; + }; + E4128E4A13B94BCE00ABB2CB /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = D2AAC045055464E500DB518D /* libdispatch */; + targetProxy = E4128E4913B94BCE00ABB2CB /* PBXContainerItemProxy */; + }; + E47D6ECB125FEB9D0070D91C /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = E4EC118F12514302000DDBD1 /* libdispatch up resolved */; + targetProxy = E47D6ECA125FEB9D0070D91C /* PBXContainerItemProxy */; + }; + E47D6ECD125FEBA10070D91C /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = E4EC121612514715000DDBD1 /* libdispatch mp resolved */; + targetProxy = E47D6ECC125FEBA10070D91C /* PBXContainerItemProxy */; + }; +/* End PBXTargetDependency section */ + /* Begin XCBuildConfiguration section */ 1DEB91ED08733DB70010E9CD /* Release */ = { isa = XCBuildConfiguration; + baseConfigurationReference = E40041AA125D705F0022B135 /* libdispatch-resolver.xcconfig */; buildSettings = { - COPY_PHASE_STRIP = NO; - CURRENT_PROJECT_VERSION = "$(RC_ProjectSourceVersion)"; - EXECUTABLE_PREFIX = ""; - GCC_CW_ASM_SYNTAX = NO; - GCC_ENABLE_CPP_EXCEPTIONS = NO; - GCC_ENABLE_CPP_RTTI = NO; - GCC_ENABLE_OBJC_EXCEPTIONS = NO; - GCC_OPTIMIZATION_LEVEL = s; - GCC_PREPROCESSOR_DEFINITIONS = "__DARWIN_NON_CANCELABLE=1"; - GENERATE_MASTER_OBJECT_FILE = NO; - INSTALL_PATH = /usr/local/lib/system; - LINK_WITH_STANDARD_LIBRARIES = NO; - OTHER_CFLAGS = ( - "-fno-unwind-tables", - "-fno-exceptions", - "-I$(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders", - "-fdiagnostics-show-option", - "-fsched-interblock", - "-freorder-blocks", - "-Xarch_x86_64", - "-momit-leaf-frame-pointer", - "-Xarch_i386", - "-momit-leaf-frame-pointer", - ); - OTHER_CFLAGS_debug = "-O0 -fstack-protector -fno-inline -DDISPATCH_DEBUG=1"; - PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/dispatch; - PRODUCT_NAME = libdispatch; - PUBLIC_HEADERS_FOLDER_PATH = /usr/include/dispatch; - SEPARATE_STRIP = NO; - VERSIONING_SYSTEM = "apple-generic"; - VERSION_INFO_PREFIX = __; }; name = Release; }; 1DEB91F108733DB70010E9CD /* Release */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = E43D93F11097917E004F6A62 /* libdispatch.xcconfig */; + buildSettings = { + }; + name = Release; + }; + 3F3C9357128E637B0042B1F7 /* Release */ = { isa = XCBuildConfiguration; buildSettings = { - ALWAYS_SEARCH_USER_PATHS = NO; - ARCHS = "$(ARCHS_STANDARD_32_64_BIT)"; - BUILD_VARIANTS = ( - normal, - debug, - profile, - ); - COPY_PHASE_STRIP = NO; - DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; - GCC_ENABLE_PASCAL_STRINGS = NO; - GCC_OPTIMIZATION_LEVEL = s; - GCC_STRICT_ALIASING = YES; - GCC_SYMBOLS_PRIVATE_EXTERN = YES; - GCC_TREAT_WARNINGS_AS_ERRORS = YES; - GCC_WARN_64_TO_32_BIT_CONVERSION = YES; - GCC_WARN_ABOUT_MISSING_NEWLINE = YES; - GCC_WARN_ABOUT_MISSING_PROTOTYPES = YES; - GCC_WARN_ABOUT_RETURN_TYPE = YES; - GCC_WARN_SHADOW = YES; - GCC_WARN_UNUSED_VARIABLE = YES; - LINK_WITH_STANDARD_LIBRARIES = YES; - ONLY_ACTIVE_ARCH = NO; - OTHER_CFLAGS = ( - "-I$(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders", - "-fdiagnostics-show-option", - "-fsched-interblock", - "-freorder-blocks", - "-Xarch_x86_64", - "-momit-leaf-frame-pointer", - "-Xarch_i386", - "-momit-leaf-frame-pointer", - ); - OTHER_CFLAGS_debug = "-O0 -fstack-protector -fno-inline -DDISPATCH_DEBUG=1"; - PREBINDING = NO; - STRIP_INSTALLED_PRODUCT = NO; - WARNING_CFLAGS = ( - "-Wall", - "-Wextra", - "-Waggregate-return", - "-Wfloat-equal", - "-Wpacked", - "-Wmissing-declarations", - "-Wstrict-overflow=4", - "-Wstrict-aliasing=2", - ); }; name = Release; }; - 721EB47A0F69D26F00845379 /* Release */ = { + 3F3C9358128E637B0042B1F7 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + }; + name = Debug; + }; + C927F35B10FD7F0600C5AB8B /* Release */ = { isa = XCBuildConfiguration; buildSettings = { }; name = Release; }; - 7276FCBB0EB10E0F00F7F487 /* Release */ = { + C927F35C10FD7F0600C5AB8B /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + }; + name = Debug; + }; + E49F24D9125D57FA0057C971 /* Release */ = { isa = XCBuildConfiguration; buildSettings = { }; name = Release; }; + E49F24DA125D57FA0057C971 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + }; + name = Debug; + }; + E4EB382D1089033000C33AD4 /* Debug */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = E43D93F11097917E004F6A62 /* libdispatch.xcconfig */; + buildSettings = { + BUILD_VARIANTS = debug; + ONLY_ACTIVE_ARCH = YES; + }; + name = Debug; + }; + E4EB382E1089033000C33AD4 /* Debug */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = E40041AA125D705F0022B135 /* libdispatch-resolver.xcconfig */; + buildSettings = { + }; + name = Debug; + }; + E4EC11BD12514302000DDBD1 /* Release */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = E40041A9125D70590022B135 /* libdispatch-resolved.xcconfig */; + buildSettings = { + DISPATCH_RESOLVED_VARIANT = up; + }; + name = Release; + }; + E4EC11BE12514302000DDBD1 /* Debug */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = E40041A9125D70590022B135 /* libdispatch-resolved.xcconfig */; + buildSettings = { + DISPATCH_RESOLVED_VARIANT = up; + }; + name = Debug; + }; + E4EC122712514715000DDBD1 /* Release */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = E40041A9125D70590022B135 /* libdispatch-resolved.xcconfig */; + buildSettings = { + DISPATCH_RESOLVED_VARIANT = mp; + }; + name = Release; + }; + E4EC122812514715000DDBD1 /* Debug */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = E40041A9125D70590022B135 /* libdispatch-resolved.xcconfig */; + buildSettings = { + DISPATCH_RESOLVED_VARIANT = mp; + }; + name = Debug; + }; /* End XCBuildConfiguration section */ /* Begin XCConfigurationList section */ @@ -463,6 +1080,7 @@ isa = XCConfigurationList; buildConfigurations = ( 1DEB91ED08733DB70010E9CD /* Release */, + E4EB382E1089033000C33AD4 /* Debug */, ); defaultConfigurationIsVisible = 0; defaultConfigurationName = Release; @@ -471,22 +1089,52 @@ isa = XCConfigurationList; buildConfigurations = ( 1DEB91F108733DB70010E9CD /* Release */, + E4EB382D1089033000C33AD4 /* Debug */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 3F3C9356128E637B0042B1F7 /* Build configuration list for PBXAggregateTarget "libdispatch_Sim" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 3F3C9357128E637B0042B1F7 /* Release */, + 3F3C9358128E637B0042B1F7 /* Debug */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + C927F35E10FD7F0B00C5AB8B /* Build configuration list for PBXAggregateTarget "libdispatch_tools" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + C927F35B10FD7F0600C5AB8B /* Release */, + C927F35C10FD7F0600C5AB8B /* Debug */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + E49F24D8125D57FA0057C971 /* Build configuration list for PBXNativeTarget "libdispatch no resolver" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + E49F24D9125D57FA0057C971 /* Release */, + E49F24DA125D57FA0057C971 /* Debug */, ); defaultConfigurationIsVisible = 0; defaultConfigurationName = Release; }; - 721EB4850F69D2A600845379 /* Build configuration list for PBXLegacyTarget "testbots" */ = { + E4EC11BC12514302000DDBD1 /* Build configuration list for PBXNativeTarget "libdispatch up resolved" */ = { isa = XCConfigurationList; buildConfigurations = ( - 721EB47A0F69D26F00845379 /* Release */, + E4EC11BD12514302000DDBD1 /* Release */, + E4EC11BE12514302000DDBD1 /* Debug */, ); defaultConfigurationIsVisible = 0; defaultConfigurationName = Release; }; - 7276FCC80EB10E2300F7F487 /* Build configuration list for PBXLegacyTarget "test" */ = { + E4EC122612514715000DDBD1 /* Build configuration list for PBXNativeTarget "libdispatch mp resolved" */ = { isa = XCConfigurationList; buildConfigurations = ( - 7276FCBB0EB10E0F00F7F487 /* Release */, + E4EC122712514715000DDBD1 /* Release */, + E4EC122812514715000DDBD1 /* Debug */, ); defaultConfigurationIsVisible = 0; defaultConfigurationName = Release; diff --git a/libdispatch.xcodeproj/project.xcworkspace/contents.xcworkspacedata b/libdispatch.xcodeproj/project.xcworkspace/contents.xcworkspacedata new file mode 100644 index 000000000..23ad996c6 --- /dev/null +++ b/libdispatch.xcodeproj/project.xcworkspace/contents.xcworkspacedata @@ -0,0 +1,6 @@ + + + + + diff --git a/m4/atomic.m4 b/m4/atomic.m4 new file mode 100644 index 000000000..ba85004db --- /dev/null +++ b/m4/atomic.m4 @@ -0,0 +1,21 @@ +AC_DEFUN([DISPATCH_C_ATOMIC_BUILTINS], [ +# +# This is a bit subtle: on i386 systems without at least -march=i486 defined, +# certain built-in atomics fall back to depending on undefined symbols if +# their return values are used. +# +AC_CACHE_CHECK([for gcc atomic builtins],[dispatch_cv_atomic], +[AC_LINK_IFELSE([AC_LANG_PROGRAM([],[[ +int i, x =0; +i = __sync_add_and_fetch(&x,1); +return x;]])],[dispatch_cv_atomic=yes], + [saveCFLAGS="$CFLAGS" + CFLAGS="$CFLAGS -march=i486" + AC_LINK_IFELSE([AC_LANG_PROGRAM([],[[ + int i, x =0; + i = __sync_add_and_fetch(&x,1); + return x;]])],[CFLAGS="$saveCFLAGS" +dispatch_cv_atomic="-march=i486" +])])]) + +]) diff --git a/m4/blocks.m4 b/m4/blocks.m4 new file mode 100644 index 000000000..49ee2a364 --- /dev/null +++ b/m4/blocks.m4 @@ -0,0 +1,112 @@ +AC_DEFUN([DISPATCH_C_BLOCKS], [ +# +# Allow configure to be passed a path to the directory where it should look +# for the Blocks runtime library, if any. +# +AC_ARG_WITH([blocks-runtime], + [AS_HELP_STRING([--with-blocks-runtime], + [Specify path to the blocks runtime])], + [blocks_runtime=${withval} + LIBS="$LIBS -L$blocks_runtime"] +) + +# +# Detect compiler support for Blocks; perhaps someday -fblocks won't be +# required, in which case we'll need to change this. +# +AC_CACHE_CHECK([for C Blocks support], [dispatch_cv_cblocks], [ + saveCFLAGS="$CFLAGS" + CFLAGS="$CFLAGS -fblocks" + AC_COMPILE_IFELSE([AC_LANG_PROGRAM([],[(void)^{int i; i = 0; }();])], [ + CFLAGS="$saveCFLAGS" + dispatch_cv_cblocks="-fblocks" + ], [ + CFLAGS="$saveCFLAGS" + dispatch_cv_cblocks="no" + ]) +]) + +AS_IF([test "x$dispatch_cv_cblocks" != "xno"], [ + CBLOCKS_FLAGS="$dispatch_cv_cblocks" + + # + # It may be necessary to directly link the Blocks runtime on some + # systems, so give it a try if we can't link a C program that uses + # Blocks. We will want to remove this at somepoint, as really -fblocks + # should force that linkage already. + # + saveCFLAGS="$CFLAGS" + CFLAGS="$CFLAGS -fblocks -O0" + AC_MSG_CHECKING([whether additional libraries are required for the Blocks runtime]) + AC_TRY_LINK([], [ + ^{ int j; j=0; }(); + ], [ + AC_MSG_RESULT([no]); + ], [ + saveLIBS="$LIBS" + LIBS="$LIBS -lBlocksRuntime" + AC_TRY_LINK([], [ + ^{ int k; k=0; }(); + ], [ + AC_MSG_RESULT([-lBlocksRuntime]) + ], [ + AC_MSG_ERROR([can't find Blocks runtime]) + ]) + ]) + CFLAGS="$saveCFLAGS" + have_cblocks=true +], [ + CBLOCKS_FLAGS="" + have_cblocks=false +]) +AM_CONDITIONAL(HAVE_CBLOCKS, $have_cblocks) +AC_SUBST([CBLOCKS_FLAGS]) + +# +# Because a different C++ compiler may be specified than C compiler, we have +# to do it again for C++. +# +AC_LANG_PUSH([C++]) +AC_CACHE_CHECK([for C++ Blocks support], [dispatch_cv_cxxblocks], [ + saveCXXFLAGS="$CXXFLAGS" + CXXFLAGS="$CXXFLAGS -fblocks" + AC_COMPILE_IFELSE([AC_LANG_PROGRAM([],[(void)^{int i; i = 0; }();])], [ + CXXFLAGS="$saveCXXFLAGS" + dispatch_cv_cxxblocks="-fblocks" + ], [ + CXXFLAGS="$saveCXXFLAGS" + dispatch_cv_cxxblocks="no" + ]) +]) + +AS_IF([test "x$dispatch_cv_cxxblocks" != "xno"], [ + CXXBLOCKS_FLAGS="$dispatch_cv_cxxblocks" + + saveCXXFLAGS="$CXXFLAGS" + CXXFLAGS="$CXXFLAGS -fblocks -O0" + AC_MSG_CHECKING([whether additional libraries are required for the Blocks runtime]) + AC_TRY_LINK([], [ + ^{ int j; j=0; }(); + ], [ + AC_MSG_RESULT([no]); + ], [ + saveLIBS="$LIBS" + LIBS="$LIBS -lBlocksRuntime" + AC_TRY_LINK([], [ + ^{ int k; k=0; }(); + ], [ + AC_MSG_RESULT([-lBlocksRuntime]) + ], [ + AC_MSG_ERROR([can't find Blocks runtime]) + ]) + ]) + CXXFLAGS="$saveCXXFLAGS" + have_cxxblocks=true +], [ + CXXBLOCKS_FLAGS="" + have_cxxblocks=false +]) +AC_LANG_POP([C++]) +AM_CONDITIONAL(HAVE_CXXBLOCKS, $have_cxxblocks) +AC_SUBST([CXXBLOCKS_FLAGS]) +]) diff --git a/m4/pkg.m4 b/m4/pkg.m4 new file mode 100644 index 000000000..a0b9cd45d --- /dev/null +++ b/m4/pkg.m4 @@ -0,0 +1,155 @@ +# pkg.m4 - Macros to locate and utilise pkg-config. -*- Autoconf -*- +# +# Copyright © 2004 Scott James Remnant . +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# PKG_PROG_PKG_CONFIG([MIN-VERSION]) +# ---------------------------------- +AC_DEFUN([PKG_PROG_PKG_CONFIG], +[m4_pattern_forbid([^_?PKG_[A-Z_]+$]) +m4_pattern_allow([^PKG_CONFIG(_PATH)?$]) +AC_ARG_VAR([PKG_CONFIG], [path to pkg-config utility])dnl +if test "x$ac_cv_env_PKG_CONFIG_set" != "xset"; then + AC_PATH_TOOL([PKG_CONFIG], [pkg-config]) +fi +if test -n "$PKG_CONFIG"; then + _pkg_min_version=m4_default([$1], [0.9.0]) + AC_MSG_CHECKING([pkg-config is at least version $_pkg_min_version]) + if $PKG_CONFIG --atleast-pkgconfig-version $_pkg_min_version; then + AC_MSG_RESULT([yes]) + else + AC_MSG_RESULT([no]) + PKG_CONFIG="" + fi + +fi[]dnl +])# PKG_PROG_PKG_CONFIG + +# PKG_CHECK_EXISTS(MODULES, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND]) +# +# Check to see whether a particular set of modules exists. Similar +# to PKG_CHECK_MODULES(), but does not set variables or print errors. +# +# +# Similar to PKG_CHECK_MODULES, make sure that the first instance of +# this or PKG_CHECK_MODULES is called, or make sure to call +# PKG_CHECK_EXISTS manually +# -------------------------------------------------------------- +AC_DEFUN([PKG_CHECK_EXISTS], +[AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl +if test -n "$PKG_CONFIG" && \ + AC_RUN_LOG([$PKG_CONFIG --exists --print-errors "$1"]); then + m4_ifval([$2], [$2], [:]) +m4_ifvaln([$3], [else + $3])dnl +fi]) + + +# _PKG_CONFIG([VARIABLE], [COMMAND], [MODULES]) +# --------------------------------------------- +m4_define([_PKG_CONFIG], +[if test -n "$$1"; then + pkg_cv_[]$1="$$1" + elif test -n "$PKG_CONFIG"; then + PKG_CHECK_EXISTS([$3], + [pkg_cv_[]$1=`$PKG_CONFIG --[]$2 "$3" 2>/dev/null`], + [pkg_failed=yes]) + else + pkg_failed=untried +fi[]dnl +])# _PKG_CONFIG + +# _PKG_SHORT_ERRORS_SUPPORTED +# ----------------------------- +AC_DEFUN([_PKG_SHORT_ERRORS_SUPPORTED], +[AC_REQUIRE([PKG_PROG_PKG_CONFIG]) +if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then + _pkg_short_errors_supported=yes +else + _pkg_short_errors_supported=no +fi[]dnl +])# _PKG_SHORT_ERRORS_SUPPORTED + + +# PKG_CHECK_MODULES(VARIABLE-PREFIX, MODULES, [ACTION-IF-FOUND], +# [ACTION-IF-NOT-FOUND]) +# +# +# Note that if there is a possibility the first call to +# PKG_CHECK_MODULES might not happen, you should be sure to include an +# explicit call to PKG_PROG_PKG_CONFIG in your configure.ac +# +# +# -------------------------------------------------------------- +AC_DEFUN([PKG_CHECK_MODULES], +[AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl +AC_ARG_VAR([$1][_CFLAGS], [C compiler flags for $1, overriding pkg-config])dnl +AC_ARG_VAR([$1][_LIBS], [linker flags for $1, overriding pkg-config])dnl + +pkg_failed=no +AC_MSG_CHECKING([for $1]) + +_PKG_CONFIG([$1][_CFLAGS], [cflags], [$2]) +_PKG_CONFIG([$1][_LIBS], [libs], [$2]) + +m4_define([_PKG_TEXT], [Alternatively, you may set the environment variables $1[]_CFLAGS +and $1[]_LIBS to avoid the need to call pkg-config. +See the pkg-config man page for more details.]) + +if test $pkg_failed = yes; then + _PKG_SHORT_ERRORS_SUPPORTED + if test $_pkg_short_errors_supported = yes; then + $1[]_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "$2" 2>&1` + else + $1[]_PKG_ERRORS=`$PKG_CONFIG --print-errors "$2" 2>&1` + fi + # Put the nasty error message in config.log where it belongs + echo "$$1[]_PKG_ERRORS" >&AS_MESSAGE_LOG_FD + + ifelse([$4], , [AC_MSG_ERROR(dnl +[Package requirements ($2) were not met: + +$$1_PKG_ERRORS + +Consider adjusting the PKG_CONFIG_PATH environment variable if you +installed software in a non-standard prefix. + +_PKG_TEXT +])], + [AC_MSG_RESULT([no]) + $4]) +elif test $pkg_failed = untried; then + ifelse([$4], , [AC_MSG_FAILURE(dnl +[The pkg-config script could not be found or is too old. Make sure it +is in your PATH or set the PKG_CONFIG environment variable to the full +path to pkg-config. + +_PKG_TEXT + +To get pkg-config, see .])], + [$4]) +else + $1[]_CFLAGS=$pkg_cv_[]$1[]_CFLAGS + $1[]_LIBS=$pkg_cv_[]$1[]_LIBS + AC_MSG_RESULT([yes]) + ifelse([$3], , :, [$3]) +fi[]dnl +])# PKG_CHECK_MODULES diff --git a/man/Makefile.am b/man/Makefile.am new file mode 100644 index 000000000..f57453aa4 --- /dev/null +++ b/man/Makefile.am @@ -0,0 +1,89 @@ +# +# +# + +dist_man3_MANS= \ + dispatch.3 \ + dispatch_after.3 \ + dispatch_api.3 \ + dispatch_apply.3 \ + dispatch_async.3 \ + dispatch_data_create.3 \ + dispatch_group_create.3 \ + dispatch_io_create.3 \ + dispatch_io_read.3 \ + dispatch_object.3 \ + dispatch_once.3 \ + dispatch_queue_create.3 \ + dispatch_read.3 \ + dispatch_semaphore_create.3 \ + dispatch_source_create.3 \ + dispatch_time.3 + +EXTRA_DIST= \ + dispatch_benchmark.3 + +# +# Install man page hardlinks. Is there a better way to do this in automake? +# + +LN=ln + +install-data-hook: + cd $(DESTDIR)$(mandir)/man3 && \ + $(LN) -f dispatch_after.3 dispatch_after_f.3 && \ + $(LN) -f dispatch_apply.3 dispatch_apply_f.3 && \ + $(LN) -f dispatch_async.3 dispatch_sync.3 && \ + $(LN) -f dispatch_async.3 dispatch_async_f.3 && \ + $(LN) -f dispatch_async.3 dispatch_sync_f.3 && \ + $(LN) -f dispatch_group_create.3 dispatch_group_enter.3 && \ + $(LN) -f dispatch_group_create.3 dispatch_group_leave.3 && \ + $(LN) -f dispatch_group_create.3 dispatch_group_wait.3 && \ + $(LN) -f dispatch_group_create.3 dispatch_group_notify.3 && \ + $(LN) -f dispatch_group_create.3 dispatch_group_notify_f.3 && \ + $(LN) -f dispatch_group_create.3 dispatch_group_async.3 && \ + $(LN) -f dispatch_group_create.3 dispatch_group_async_f.3 && \ + $(LN) -f dispatch_object.3 dispatch_retain.3 && \ + $(LN) -f dispatch_object.3 dispatch_release.3 && \ + $(LN) -f dispatch_object.3 dispatch_suspend.3 && \ + $(LN) -f dispatch_object.3 dispatch_resume.3 && \ + $(LN) -f dispatch_object.3 dispatch_get_context.3 && \ + $(LN) -f dispatch_object.3 dispatch_set_context.3 && \ + $(LN) -f dispatch_object.3 dispatch_set_finalizer_f.3 && \ + $(LN) -f dispatch_once.3 dispatch_once_f.3 && \ + $(LN) -f dispatch_queue_create.3 dispatch_queue_get_label.3 && \ + $(LN) -f dispatch_queue_create.3 dispatch_get_current_queue.3 && \ + $(LN) -f dispatch_queue_create.3 dispatch_get_global_queue.3 && \ + $(LN) -f dispatch_queue_create.3 dispatch_get_main_queue.3 && \ + $(LN) -f dispatch_queue_create.3 dispatch_main.3 && \ + $(LN) -f dispatch_queue_create.3 dispatch_set_target_queue.3 && \ + $(LN) -f dispatch_semaphore_create.3 dispatch_semaphore_signal.3 && \ + $(LN) -f dispatch_semaphore_create.3 dispatch_semaphore_wait.3 && \ + $(LN) -f dispatch_source_create.3 dispatch_source_set_event_handler.3 && \ + $(LN) -f dispatch_source_create.3 dispatch_source_set_event_handler_f.3 && \ + $(LN) -f dispatch_source_create.3 dispatch_source_set_cancel_handler.3 && \ + $(LN) -f dispatch_source_create.3 dispatch_source_set_cancel_handler_f.3 && \ + $(LN) -f dispatch_source_create.3 dispatch_source_cancel.3 && \ + $(LN) -f dispatch_source_create.3 dispatch_source_testcancel.3 && \ + $(LN) -f dispatch_source_create.3 dispatch_source_get_handle.3 && \ + $(LN) -f dispatch_source_create.3 dispatch_source_get_mask.3 && \ + $(LN) -f dispatch_source_create.3 dispatch_source_get_data.3 && \ + $(LN) -f dispatch_source_create.3 dispatch_source_merge_data.3 && \ + $(LN) -f dispatch_source_create.3 dispatch_source_set_timer.3 && \ + $(LN) -f dispatch_time.3 dispatch_walltime.3 && \ + $(LN) -f dispatch_data_create.3 dispatch_data_create_concat.3 && \ + $(LN) -f dispatch_data_create.3 dispatch_data_create_subrange.3 && \ + $(LN) -f dispatch_data_create.3 dispatch_data_create_map.3 && \ + $(LN) -f dispatch_data_create.3 dispatch_data_apply.3 && \ + $(LN) -f dispatch_data_create.3 dispatch_data_copy_subrange.3 && \ + $(LN) -f dispatch_data_create.3 dispatch_data_copy_region.3 && \ + $(LN) -f dispatch_data_create.3 dispatch_data_get_size.3 && \ + $(LN) -f dispatch_data_create.3 dispatch_data_copy_subrange.3 && \ + $(LN) -f dispatch_data_create.3 dispatch_data_empty.3 && \ + $(LN) -f dispatch_io_create.3 dispatch_io_create_with_path.3 && \ + $(LN) -f dispatch_io_create.3 dispatch_io_set_high_water.3 && \ + $(LN) -f dispatch_io_create.3 dispatch_io_set_low_water.3 && \ + $(LN) -f dispatch_io_create.3 dispatch_io_set_interval.3 && \ + $(LN) -f dispatch_io_create.3 dispatch_io_close.3 && \ + $(LN) -f dispatch_io_read.3 dispatch_io_write.3 && \ + $(LN) -f dispatch_read.3 dispatch_write.3 diff --git a/man/dispatch.3 b/man/dispatch.3 index c3618635b..c55be968f 100644 --- a/man/dispatch.3 +++ b/man/dispatch.3 @@ -1,4 +1,4 @@ -.\" Copyright (c) 2008-2009 Apple Inc. All rights reserved. +.\" Copyright (c) 2008-2010 Apple Inc. All rights reserved. .Dd May 1, 2009 .Dt dispatch 3 .Os Darwin @@ -27,12 +27,18 @@ for more information. The dispatch framework also provides functions to monitor underlying system events and automatically submit event handler blocks to dispatch queues. .Sh SEE ALSO +.Xr dispatch_after 3 , +.Xr dispatch_api 3 , +.Xr dispatch_apply 3 , .Xr dispatch_async 3 , +.Xr dispatch_benchmark 3 , +.Xr dispatch_data_create 3, +.Xr dispatch_group_create 3 , +.Xr dispatch_io_create 3 , +.Xr dispatch_io_read 3 , .Xr dispatch_object 3 , +.Xr dispatch_once 3 , .Xr dispatch_queue_create 3 , -.Xr dispatch_group_create 3 , +.Xr dispatch_semaphore_create 3 , .Xr dispatch_source_create 3 , -.Xr dispatch_benchmark 3 , -.Xr dispatch_time 3 , -.Xr dispatch_apply 3 , -.Xr dispatch_once 3 . +.Xr dispatch_time 3 diff --git a/man/dispatch_after.3 b/man/dispatch_after.3 index 404aefb4a..4c55214da 100644 --- a/man/dispatch_after.3 +++ b/man/dispatch_after.3 @@ -1,4 +1,4 @@ -.\" Copyright (c) 2008-2009 Apple Inc. All rights reserved. +.\" Copyright (c) 2008-2010 Apple Inc. All rights reserved. .Dd May 1, 2009 .Dt dispatch_after 3 .Os Darwin @@ -13,7 +13,7 @@ .Fc .Ft void .Fo dispatch_after_f -.Fa "dispatch_time_t when" "dispatch_queue_t queue" "void *context" "void (^function)(void *)" +.Fa "dispatch_time_t when" "dispatch_queue_t queue" "void *context" "void (*function)(void *)" .Fc .Sh DESCRIPTION The @@ -35,6 +35,9 @@ or For a more detailed description about submitting blocks to queues, see .Xr dispatch_async 3 . .Sh CAVEATS +.Fn dispatch_after +retains the passed queue. +.Pp Specifying .Vt DISPATCH_TIME_NOW as the @@ -42,16 +45,19 @@ as the parameter is supported, but is not as efficient as calling .Fn dispatch_async . +.Pp The result of passing .Vt DISPATCH_TIME_FOREVER as the .Fa when parameter is undefined. +.Pp .Sh FUNDAMENTALS The .Fn dispatch_after function is a wrapper around .Fn dispatch_after_f . .Sh SEE ALSO +.Xr dispatch 3 , .Xr dispatch_async 3 , .Xr dispatch_time 3 diff --git a/man/dispatch_api.3 b/man/dispatch_api.3 index a39fa64d6..912338672 100644 --- a/man/dispatch_api.3 +++ b/man/dispatch_api.3 @@ -22,7 +22,7 @@ which the block will be submitted should immediately precede the block argument .Pp .Bd -literal -offset indent read_async(file, callback_queue, ^{ - printf("received callback.\n"); + printf("received callback.\\n"); }); .Ed .Pp diff --git a/man/dispatch_apply.3 b/man/dispatch_apply.3 index 48fb395c5..5a43a0a13 100644 --- a/man/dispatch_apply.3 +++ b/man/dispatch_apply.3 @@ -1,4 +1,4 @@ -.\" Copyright (c) 2008-2009 Apple Inc. All rights reserved. +.\" Copyright (c) 2008-2010 Apple Inc. All rights reserved. .Dd May 1, 2009 .Dt dispatch_apply 3 .Os Darwin @@ -20,7 +20,7 @@ The .Fn dispatch_apply function provides data-level concurrency through a "for (;;)" loop like primitive: .Bd -literal -dispatch_queue_t the_queue = dispatch_get_concurrent_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT); +dispatch_queue_t the_queue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); size_t iterations = 10; // 'idx' is zero indexed, just like: @@ -47,7 +47,7 @@ Calculating the optimal stride is best left to experimentation. Start with a stride of one and work upwards until the desired performance is achieved (perhaps using a power of two search): .Bd -literal -#define STRIDE 3 +#define STRIDE 3 dispatch_apply(count / STRIDE, queue, ^(size_t idx) { size_t j = idx * STRIDE; @@ -62,6 +62,17 @@ for (i = count - (count % STRIDE); i < count; i++) { printf("%zu\\n", i); } .Ed +.Sh IMPLIED REFERENCES +Synchronous functions within the dispatch framework hold an implied reference +on the target queue. In other words, the synchronous function borrows the +reference of the calling function (this is valid because the calling function +is blocked waiting for the result of the synchronous function, and therefore +cannot modify the reference count of the target queue until after the +synchronous function has returned). +.Pp +This is in contrast to asynchronous functions which must retain both the block +and target queue for the duration of the asynchronous operation (as the calling +function may immediately release its interest in these objects). .Sh FUNDAMENTALS Conceptually, .Fn dispatch_apply @@ -74,7 +85,19 @@ The .Fn dispatch_apply function is a wrapper around .Fn dispatch_apply_f . +.Sh CAVEATS +Unlike +.Fn dispatch_async , +a block submitted to +.Fn dispatch_apply +is expected to be either independent or dependent +.Em only +on work already performed in lower-indexed invocations of the block. If +the block's index dependency is non-linear, it is recommended to +use a for-loop around invocations of +.Fn dispatch_async . .Sh SEE ALSO +.Xr dispatch 3 , .Xr dispatch_async 3 , -.Xr dispatch_semaphore_create 3 , -.Xr dispatch_queue_create 3 +.Xr dispatch_queue_create 3 , +.Xr dispatch_semaphore_create 3 diff --git a/man/dispatch_async.3 b/man/dispatch_async.3 index 4b874fb2b..9c09bb2a6 100644 --- a/man/dispatch_async.3 +++ b/man/dispatch_async.3 @@ -1,4 +1,4 @@ -.\" Copyright (c) 2008-2009 Apple Inc. All rights reserved. +.\" Copyright (c) 2008-2010 Apple Inc. All rights reserved. .Dd May 1, 2009 .Dt dispatch_async 3 .Os Darwin @@ -115,14 +115,14 @@ async_read(object_t obj, // This is just an example of nested blocks. dispatch_retain(destination_queue); - + dispatch_async(obj->queue, ^{ ssize_t r = read(obj->fd, where, bytes); int err = errno; dispatch_async(destination_queue, ^{ reply_block(r, err); - }); + }); dispatch_release(destination_queue); }); } @@ -131,7 +131,7 @@ async_read(object_t obj, While .Fn dispatch_sync can replace a lock, it cannot replace a recursive lock. Unlike locks, queues -support both asynchronous and synchrnous operations, and those operations are +support both asynchronous and synchronous operations, and those operations are ordered by definition. A recursive call to .Fn dispatch_sync causes a simple deadlock as the currently executing block waits for the next @@ -171,7 +171,7 @@ against queue B which runs on thread Y which recursively calls against queue A, which deadlocks both examples. This is bug-for-bug compatible with nontrivial pthread usage. In fact, nontrivial reentrancy is impossible to support in recursive locks once the ultimate level of reentrancy is deployed -(IPC or RPC). +(IPC or RPC). .Sh IMPLIED REFERENCES Synchronous functions within the dispatch framework hold an implied reference on the target queue. In other words, the synchronous function borrows the @@ -228,7 +228,8 @@ when it is invoked on the target .Fa queue . .Pp .Sh SEE ALSO +.Xr dispatch 3 , +.Xr dispatch_apply 3 , .Xr dispatch_once 3 , .Xr dispatch_queue_create 3 , -.Xr dispatch_semaphore_create 3 , -.Xr dispatch_apply 3 +.Xr dispatch_semaphore_create 3 diff --git a/man/dispatch_benchmark.3 b/man/dispatch_benchmark.3 index 0890aff31..f3e113262 100644 --- a/man/dispatch_benchmark.3 +++ b/man/dispatch_benchmark.3 @@ -11,6 +11,10 @@ .Fo dispatch_benchmark .Fa "size_t count" "void (^block)(void)" .Fc +.Ft uint64_t +.Fo dispatch_benchmark_f +.Fa "size_t count" "void *context" "void (*function)(void *)" +.Fc .Sh DESCRIPTION The .Fn dispatch_benchmark diff --git a/man/dispatch_data_create.3 b/man/dispatch_data_create.3 new file mode 100644 index 000000000..96965f2df --- /dev/null +++ b/man/dispatch_data_create.3 @@ -0,0 +1,206 @@ +.\" Copyright (c) 2010 Apple Inc. All rights reserved. +.Dd December 1, 2010 +.Dt dispatch_data_create 3 +.Os Darwin +.Sh NAME +.Nm dispatch_data_create , +.Nm dispatch_data_create_concat , +.Nm dispatch_data_create_subrange , +.Nm dispatch_data_create_map , +.Nm dispatch_data_apply , +.Nm dispatch_data_copy_region , +.Nm dispatch_data_get_size +.Nd create and manipulate dispatch data objects +.Sh SYNOPSIS +.Fd #include +.Ft dispatch_data_t +.Fo dispatch_data_create +.Fa "const void* buffer" +.Fa "size_t size" +.Fa "dispatch_queue_t queue" +.Fa "dispatch_block_t destructor" +.Fc +.Ft dispatch_data_t +.Fo dispatch_data_create_concat +.Fa "dispatch_data_t data1" +.Fa "dispatch_data_t data2" +.Fc +.Ft dispatch_data_t +.Fo dispatch_data_create_subrange +.Fa "dispatch_data_t data" +.Fa "size_t offset" +.Fa "size_t length" +.Fc +.Ft dispatch_data_t +.Fo dispatch_data_create_map +.Fa "dispatch_data_t data" +.Fa "const void **buffer_ptr" +.Fa "size_t *size_ptr" +.Fc +.Ft bool +.Fo dispatch_data_apply +.Fa "dispatch_data_t data" +.Fa "bool (^applier)(dispatch_data_t, size_t, const void *, size_t)" +.Fc +.Ft dispatch_data_t +.Fo dispatch_data_copy_region +.Fa "dispatch_data_t data" +.Fa "size_t location" +.Fa "size_t *offset_ptr" +.Fc +.Ft size_t +.Fo dispatch_data_get_size +.Fa "dispatch_data_t data" +.Fc +.Vt dispatch_data_t dispatch_data_empty ; +.Sh DESCRIPTION +Dispatch data objects are opaque containers of bytes that represent one or more +regions of memory. They are created either from memory buffers managed by the +application or the system or from other dispatch data objects. Dispatch data +objects are immutable and the memory regions they represent are required to +remain unchanged for the lifetime of all data objects that reference them. +Dispatch data objects avoid copying the represented memory as much as possible. +Multiple data objects can represent the same memory regions or subsections +thereof. +.Sh CREATION +The +.Fn dispatch_data_create +function creates a new dispatch data object of given +.Fa size +from a +.Fa buffer . +The provided +.Fa destructor +block will be submitted to the specified +.Fa queue +when the object reaches the end of its lifecycle, indicating that the system no +longer references the +.Fa buffer . +This allows the application to deallocate +the associated storage. The +.Fa queue +argument is ignored if one of the following predefined destructors is passed: +.Bl -tag -width DISPATCH_DATA_DESTRUCTOR_DEFAULT -compact -offset indent +.It DISPATCH_DATA_DESTRUCTOR_FREE +indicates that the provided buffer can be deallocated with +.Xr free 3 +directly. +.It DISPATCH_DATA_DESTRUCTOR_DEFAULT +indicates that the provided buffer is not managed by the application and should +be copied into memory managed and automatically deallocated by the system. +.El +.Pp +The +.Fn dispatch_data_create_concat +function creates a new data object representing the concatenation of the memory +regions represented by the provided data objects. +.Pp +The +.Fn dispatch_data_create_subrange +function creates a new data object representing the sub-region of the provided +.Fa data +object specified by the +.Fa offset +and +.Fa length +parameters. +.Pp +The +.Fn dispatch_data_create_map +function creates a new data object by mapping the memory represented by the +provided +.Fa data +object as a single contiguous memory region (moving or copying memory as +necessary). If the +.Fa buffer_ptr +and +.Fa size_ptr +references are not +.Dv NULL , +they are filled with the location and extent of the contiguous region, allowing +direct read access to the mapped memory. These values are valid only as long as +the newly created object has not been released. +.Sh ACCESS +The +.Fn dispatch_data_apply +function provides read access to represented memory without requiring it to be +mapped as a single contiguous region. It traverses the memory regions +represented by the +.Fa data +argument in logical order, invokes the specified +.Fa applier +block for each region and returns a boolean indicating whether traversal +completed successfully. The +.Fa applier +block is passed the following arguments for each memory region and returns a +boolean indicating whether traversal should continue: +.Bl -tag -width "dispatch_data_t rgn" -compact -offset indent +.It Fa "dispatch_data_t rgn" +data object representing the region +.It Fa "size_t offset" +logical position of the region in +.Fa data +.It Vt "const void *loc" +memory location of the region +.It Vt "size_t size" +extent of the region +.El +The +.Fa rgn +data object is released by the system when the +.Fa applier +block returns. +The associated memory location +.Fa loc +is valid only as long as +.Fa rgn +has not been deallocated; if +.Fa loc +is needed outside of the +.Fa applier +block, the +.Fa rgn +object must be retained in the block. +.Pp +The +.Fn dispatch_data_copy_region +function finds the contiguous memory region containing the logical position +specified by the +.Fa location +argument among the regions represented by the provided +.Fa data +object and returns a newly created copy of the data object representing that +region. The variable specified by the +.Fa offset_ptr +argument is filled with the logical position where the returned object starts +in the +.Fa data +object. +.Pp +The +.Fn dispatch_data_get_size +function returns the logical size of the memory region or regions represented +by the provided +.Fa data +object. +.Sh EMPTY DATA OBJECT +The +.Vt dispatch_data_empty +object is the global singleton object representing a zero-length memory region. +It is a valid input to any dispatch_data functions that take data object +parameters. +.Sh MEMORY MODEL +Dispatch data objects are retained and released via calls to +.Fn dispatch_retain +and +.Fn dispatch_release . +Data objects passed as arguments to a dispatch data +.Sy create +or +.Sy copy +function can be released when the function returns. The newly created object +holds implicit references to their constituent memory regions as necessary. +.Sh SEE ALSO +.Xr dispatch 3 , +.Xr dispatch_object 3 , +.Xr dispatch_io_read 3 diff --git a/man/dispatch_group_create.3 b/man/dispatch_group_create.3 index 5cca4ca8a..1dae0efcf 100644 --- a/man/dispatch_group_create.3 +++ b/man/dispatch_group_create.3 @@ -84,7 +84,7 @@ associated with the by submitting the .Fa block to the specified -.Fa queue +.Fa queue once all blocks associated with the .Fa group have completed. @@ -142,8 +142,9 @@ and .Fn dispatch_group_notify_f respectively. .Sh SEE ALSO -.Xr dispatch_object 3 , +.Xr dispatch 3 , .Xr dispatch_async 3 , -.Xr dispatch_time 3 , +.Xr dispatch_object 3 , .Xr dispatch_queue_create 3 , -.Xr dispatch_semaphore_create 3 +.Xr dispatch_semaphore_create 3 , +.Xr dispatch_time 3 diff --git a/man/dispatch_io_create.3 b/man/dispatch_io_create.3 new file mode 100644 index 000000000..90874424c --- /dev/null +++ b/man/dispatch_io_create.3 @@ -0,0 +1,238 @@ +.\" Copyright (c) 2010 Apple Inc. All rights reserved. +.Dd December 1, 2010 +.Dt dispatch_io_create 3 +.Os Darwin +.Sh NAME +.Nm dispatch_io_create , +.Nm dispatch_io_create_with_path , +.Nm dispatch_io_close , +.Nm dispatch_io_set_high_water , +.Nm dispatch_io_set_low_water , +.Nm dispatch_io_set_interval +.Nd open, close and configure dispatch I/O channels +.Sh SYNOPSIS +.Fd #include +.Ft dispatch_io_t +.Fo dispatch_io_create +.Fa "dispatch_io_type_t type" +.Fa "int fd" +.Fa "dispatch_queue_t queue" +.Fa "void (^cleanup_handler)(int error)" +.Fc +.Ft dispatch_io_t +.Fo dispatch_io_create_with_path +.Fa "dispatch_io_type_t type" +.Fa "const char *path" +.Fa "int oflag" +.Fa "mode_t mode" +.Fa "dispatch_queue_t queue" +.Fa "void (^cleanup_handler)(int error)" +.Fc +.Ft void +.Fo dispatch_io_close +.Fa "dispatch_io_t channel" +.Fa "dispatch_io_close_flags_t flags" +.Fc +.Ft void +.Fo dispatch_io_set_high_water +.Fa "dispatch_io_t channel" +.Fa "size_t high_water" +.Fc +.Ft void +.Fo dispatch_io_set_low_water +.Fa "dispatch_io_t channel" +.Fa "size_t low_water" +.Fc +.Ft void +.Fo dispatch_io_set_interval +.Fa "dispatch_io_t channel" +.Fa "uint64_t interval" +.Fa "dispatch_io_interval_flags_t flags" +.Fc +.Sh DESCRIPTION +The dispatch I/O framework is an API for asynchronous read and write I/O +operations. It is an application of the ideas and idioms present in the +.Xr dispatch 3 +framework to device I/O. Dispatch I/O enables an application to more easily +avoid blocking I/O operations and allows it to more directly express its I/O +requirements than by using the raw POSIX file API. Dispatch I/O will make a +best effort to optimize how and when asynchronous I/O operations are performed +based on the capabilities of the targeted device. +.Pp +This page provides details on how to create and configure dispatch I/O +channels. Reading from and writing to these channels is covered in the +.Xr dispatch_io_read 3 +page. The dispatch I/O framework also provides the convenience functions +.Xr dispatch_read 3 +and +.Xr dispatch_write 3 +for uses that do not require the full functionality provided by I/O channels. +.Sh FUNDAMENTALS +A dispatch I/O channel represents the asynchronous I/O policy applied to a file +descriptor and encapsulates it for the purposes of ownership tracking while +I/O operations are ongoing. +.Sh CHANNEL TYPES +Dispatch I/O channels can have one of the following types: +.Bl -tag -width DISPATCH_IO_STREAM -compact -offset indent +.It DISPATCH_IO_STREAM +channels that represent a stream of bytes and do not support reads and writes +at arbitrary offsets, such as pipes or sockets. Channels of this type perform +read and write operations sequentially at the current file pointer position and +ignore any offset specified. Depending on the underlying file descriptor, read +operations may be performed simultaneously with write operations. +.It DISPATCH_IO_RANDOM +channels that represent random access files on disk. Only supported for +seekable file descriptors and paths. Channels of this type may perform +submitted read and write operations concurrently at the specified offset +(interpreted relative to the position of the file pointer when the channel was +created). +.El +.Sh CHANNEL OPENING AND CLOSING +The +.Fn dispatch_io_create +and +.Fn dispatch_io_create_with_path +functions create a dispatch I/O channel of provided +.Fa type +from a file descriptor +.Fa fd +or a pathname, respectively. They can be thought of as +analogous to the +.Xr fdopen 3 +POSIX function and the +.Xr fopen 3 +function in the standard C library. For a channel created from a +pathname, the provided +.Fa path , +.Fa oflag +and +.Fa mode +parameters will be passed to +.Xr open 2 +when the first I/O operation on the channel is ready to execute. The provided +.Fa cleanup_handler +block will be submitted to the specified +.Fa queue +when all I/O operations on the channel have completed and is is closed or +reaches the end of its lifecycle. If an error occurs during channel creation, +the +.Fa cleanup_handler +block will be submitted immediately and passed an +.Fa error +parameter with the POSIX error encountered. After creating a dispatch I/O +channel from a file descriptor, the application must take care not to modify +that file descriptor until the associated +.Fa cleanup_handler +is invoked, see +.Sx "FILEDESCRIPTOR OWNERSHIP" +for details. +.Pp +The +.Fn dispatch_io_close +function closes a dispatch I/O channel to new submissions of I/O operations. If +.Dv DISPATCH_IO_STOP +is passed in the +.Fa flags +parameter, the system will in addition not perform the I/O operations already +submitted to the channel that are still pending and will make a best effort to +interrupt any ongoing operations. Handlers for operations so affected will be +passed the +.Er ECANCELED +error code, along with any partial results. +.Sh CHANNEL CONFIGURATION +Dispatch I/O channels have high-water mark, low-water mark and interval +configuration settings that determine if and when partial results from I/O +operations are delivered via their associated I/O handlers. +.Pp +The +.Fn dispatch_io_set_high_water +and +.Fn dispatch_io_set_low_water +functions configure the water mark settings of a +.Fa channel . +The system will read +or write at least the number of bytes specified by +.Fa low_water +before submitting an I/O handler with partial results, and will make a best +effort to submit an I/O handler as soon as the number of bytes read or written +reaches +.Fa high_water . +.Pp +The +.Fn dispatch_io_set_interval +function configures the time +.Fa interval +at which I/O handlers are submitted (measured in nanoseconds). If +.Dv DISPATCH_IO_STRICT_INTERVAL +is passed in the +.Fa flags +parameter, the interval will be strictly observed even if there is an +insufficient amount of data to deliver; otherwise delivery will be skipped for +intervals where the amount of available data is inferior to the channel's +low-water mark. Note that the system may defer enqueueing interval I/O handlers +by a small unspecified amount of leeway in order to align with other system +activity for improved system performance or power consumption. +.Pp +.Sh DATA DELIVERY +The size of data objects passed to I/O handlers for a channel will never be +larger than the high-water mark set on the channel; it will also never be +smaller than the low-water mark, except in the following cases: +.Bl -dash -offset indent -compact +.It +the final handler invocation for an I/O operation +.It +EOF was encountered +.It +the channel has an interval with the +.Dv DISPATCH_IO_STRICT_INTERVAL +flag set +.El +Bear in mind that dispatch I/O channels will typically deliver amounts of data +significantly higher than the low-water mark. The default value for the +low-water mark is unspecified, but must be assumed to allow intermediate +handler invocations. The default value for the high-water mark is +unlimited (i.e.\& +.Dv SIZE_MAX ) . +Channels that require intermediate results of fixed size should have both the +low-water and the high-water mark set to that size. Channels that do not wish +to receive any intermediate results should have the low-water mark set to +.Dv SIZE_MAX . +.Pp +.Sh FILEDESCRIPTOR OWNERSHIP +When an application creates a dispatch I/O channel from a file descriptor with +the +.Fn dispatch_io_create +function, the system takes control of that file descriptor until the channel is +closed, an error occurs on the file descriptor or all references to the channel +are released. At that time the channel's cleanup handler will be enqueued and +control over the file descriptor relinquished, making it safe for the +application to +.Xr close 2 +the file descriptor. While a file descriptor is under the control of a dispatch +I/O channel, file descriptor flags such as +.Dv O_NONBLOCK +will be modified by the system on behalf of the application. It is an error for +the application to modify a file descriptor directly while it is under the +control of a dispatch I/O channel, but it may create further I/O channels +from that file descriptor or use the +.Xr dispatch_read 3 +and +.Xr dispatch_write 3 +convenience functions with that file descriptor. If multiple I/O channels have +been created from the same file descriptor, all the associated cleanup handlers +will be submitted together once the last channel has been closed resp.\& all +references to those channels have been released. If convenience functions have +also been used on that file descriptor, submission of their handlers will be +tied to the submission of the channel cleanup handlers as well. +.Sh MEMORY MODEL +Dispatch I/O channel objects are retained and released via calls to +.Fn dispatch_retain +and +.Fn dispatch_release . +.Sh SEE ALSO +.Xr dispatch 3 , +.Xr dispatch_io_read 3 , +.Xr dispatch_object 3 , +.Xr dispatch_read 3 , +.Xr fopen 3 , +.Xr open 2 diff --git a/man/dispatch_io_read.3 b/man/dispatch_io_read.3 new file mode 100644 index 000000000..51c3b1c3e --- /dev/null +++ b/man/dispatch_io_read.3 @@ -0,0 +1,151 @@ +.\" Copyright (c) 2010 Apple Inc. All rights reserved. +.Dd December 1, 2010 +.Dt dispatch_io_read 3 +.Os Darwin +.Sh NAME +.Nm dispatch_io_read , +.Nm dispatch_io_write +.Nd submit read and write operations to dispatch I/O channels +.Sh SYNOPSIS +.Fd #include +.Ft void +.Fo dispatch_io_read +.Fa "dispatch_io_t channel" +.Fa "off_t offset" +.Fa "size_t length" +.Fa "dispatch_queue_t queue" +.Fa "void (^handler)(bool done, dispatch_data_t data, int error)" +.Fc +.Ft void +.Fo dispatch_io_write +.Fa "dispatch_io_t channel" +.Fa "off_t offset" +.Fa "dispatch_data_t dispatch" +.Fa "dispatch_queue_t queue" +.Fa "void (^handler)(bool done, dispatch_data_t data, int error)" +.Fc +.Sh DESCRIPTION +The dispatch I/O framework is an API for asynchronous read and write I/O +operations. It is an application of the ideas and idioms present in the +.Xr dispatch 3 +framework to device I/O. Dispatch I/O enables an application to more easily +avoid blocking I/O operations and allows it to more directly express its I/O +requirements than by using the raw POSIX file API. Dispatch I/O will make a +best effort to optimize how and when asynchronous I/O operations are performed +based on the capabilities of the targeted device. +.Pp +This page provides details on how to read from and write to dispatch I/O +channels. Creation and configuration of these channels is covered in the +.Xr dispatch_io_create 3 +page. The dispatch I/O framework also provides the convenience functions +.Xr dispatch_read 3 +and +.Xr dispatch_write 3 +for uses that do not require the full functionality provided by I/O channels. +.Pp +.Sh FUNDAMENTALS +The +.Fn dispatch_io_read +and +.Fn dispatch_io_write +functions are used to perform asynchronous read and write operations on +dispatch I/O channels. They can be thought of as asynchronous versions of the +.Xr fread 3 +and +.Xr fwrite 3 +functions in the standard C library. +.Sh READ OPERATIONS +The +.Fn dispatch_io_read +function schedules an I/O read operation on the specified dispatch I/O +.Va channel . +As results from the read operation become available, the provided +.Va handler +block will be submitted to the specified +.Va queue . +The block will be passed a dispatch data object representing the data that has +been read since the handler's previous invocation. +.Pp +The +.Va offset +parameter indicates where the read operation should begin. For a channel of +.Dv DISPATCH_IO_RANDOM +type it is interpreted relative to the position of the file pointer when the +channel was created, for a channel of +.Dv DISPATCH_IO_STREAM +type it is ignored and the read operation will begin at the current file +pointer position. +.Pp +The +.Va length +parameter indicates the number of bytes that should be read from the I/O +channel. Pass +.Dv SIZE_MAX +to keep reading until EOF is encountered (for a channel created from a +disk-based file this happens when reading past the end of the physical file). +.Sh WRITE OPERATIONS +The +.Fn dispatch_io_write +function schedules an I/O write operation on the specified dispatch I/O +.Va channel . +As the write operation progresses, the provided +.Va handler +block will be submitted to the specified +.Va queue . +The block will be passed a dispatch data object representing the data that +remains to be written as part of this I/O operation. +.Pp +The +.Va offset +parameter indicates where the write operation should begin. It is interpreted +as for read operations above. +.Pp +The +.Va data +parameter specifies the location and amount of data to be written, encapsulated +as a dispatch data object. The object is retained by the system until the write +operation is complete. +.Sh I/O HANDLER BLOCKS +Dispatch I/O handler blocks submitted to a channel via the +.Fn dispatch_io_read +or +.Fn dispatch_io_write +functions will be executed one or more times depending on system load and the +channel's configuration settings (see +.Xr dispatch_io_create 3 +for details). The handler block need not be reentrant safe, +no new I/O handler instance is submitted until the previously enqueued handler +block has returned. +.Pp +The dispatch +.Va data +object passed to an I/O handler block will be released by the system when the +block returns, if access to the memory buffer it represents is needed outside +of the handler, the handler block must retain the data object or create a new +(e.g.\& concatenated) data object from it (see +.Xr dispatch_data_create 3 +for details). +.Pp +Once an I/O handler block is invoked with the +.Va done +flag set, the associated I/O operation is complete and that handler block will +not be run again. If an unrecoverable error occurs while performing the I/O +operation, the handler block will be submitted with the +.Va done +flag set and the appriate POSIX error code in the +.Va error +parameter. An invocation of a handler block with the +.Va done +flag set, zero +.Va error +and +.Va data +set to +.Vt dispatch_data_empty +indicates that the I/O operation has encountered EOF. +.Sh SEE ALSO +.Xr dispatch 3 , +.Xr dispatch_data_create 3 , +.Xr dispatch_io_create 3 , +.Xr dispatch_read 3 , +.Xr fread 3 diff --git a/man/dispatch_object.3 b/man/dispatch_object.3 index b60831ac7..29c1621b5 100644 --- a/man/dispatch_object.3 +++ b/man/dispatch_object.3 @@ -1,4 +1,4 @@ -.\" Copyright (c) 2008-2009 Apple Inc. All rights reserved. +.\" Copyright (c) 2008-2010 Apple Inc. All rights reserved. .Dd May 1, 2009 .Dt dispatch_object 3 .Os Darwin @@ -58,7 +58,7 @@ The invocation of blocks on dispatch queues or dispatch sources may be suspended or resumed with the functions .Fn dispatch_suspend and -.Fn dispatch_resume +.Fn dispatch_resume respectively. The dispatch framework always checks the suspension status before executing a block, but such changes never affect a block during execution (non-preemptive). @@ -69,8 +69,9 @@ a dispatch source is undefined. .Pp .Em Important : suspension applies to all aspects of the dispatch object life cycle, including -the finalizer function and cancellation handler. Therefore it is important to -balance calls to +the finalizer function and cancellation handler. Suspending an object causes it +to be retained and resuming an object causes it to be released. Therefore it is +important to balance calls to .Fn dispatch_suspend and .Fn dispatch_resume @@ -79,21 +80,26 @@ released. The result of releasing all references to a dispatch object while in a suspended state is undefined. .Sh CONTEXT POINTERS Dispatch queues and sources support supplemental context pointers. The value of -the context point may be retrieved and updated with +the context pointer may be retrieved and updated with .Fn dispatch_get_context and .Fn dispatch_set_context respectively. The .Fn dispatch_set_finalizer_f -specifies an optional per-object finalizer function to that is invoked -asynchronously when the last reference to the object is released. This gives the +specifies an optional per-object finalizer function that is invoked +asynchronously if the context pointer is not NULL when the last +reference to the object is released. +This gives the application an opportunity to free the context data associated with the object. +The finalizer will be run on the object's target queue. .Pp The result of getting or setting the context of an object that is not a dispatch queue or a dispatch source is undefined. .Sh SEE ALSO +.Xr dispatch 3 , .Xr dispatch_group_create 3 , .Xr dispatch_queue_create 3 , .Xr dispatch_semaphore_create 3 , -.Xr dispatch_source_create 3 +.Xr dispatch_source_create 3 , +.Xr dispatch_set_target_queue 3 diff --git a/man/dispatch_once.3 b/man/dispatch_once.3 index da218968c..2118a23bb 100644 --- a/man/dispatch_once.3 +++ b/man/dispatch_once.3 @@ -42,3 +42,5 @@ The .Fn dispatch_once function is a wrapper around .Fn dispatch_once_f . +.Sh SEE ALSO +.Xr dispatch 3 diff --git a/man/dispatch_queue_create.3 b/man/dispatch_queue_create.3 index d11c1c1d1..9b3e6a911 100644 --- a/man/dispatch_queue_create.3 +++ b/man/dispatch_queue_create.3 @@ -1,4 +1,4 @@ -.\" Copyright (c) 2008-2009 Apple Inc. All rights reserved. +.\" Copyright (c) 2008-2010 Apple Inc. All rights reserved. .Dd May 1, 2008 .Dt dispatch_queue_create 3 .Os Darwin @@ -53,11 +53,13 @@ All blocks submitted to dispatch queues are dequeued in FIFO order. By default, queues created with .Fn dispatch_queue_create wait for the previously dequeued block to complete before dequeuing the next -block. This FIFO completion behavior is sometimes simply described as a "serial queue." +block. This FIFO completion behavior is sometimes simply described as a "serial +queue." All memory writes performed by a block dispatched to a serial queue are +guaranteed to be visible to subsequent blocks dispatched to the same queue. Queues are not bound to any specific thread of execution and blocks submitted -to independent queues may execute concurrently. -Queues, like all dispatch objects, are reference counted and newly created -queues have a reference count of one. +to independent queues may execute concurrently. Queues, like all dispatch +objects, are reference counted and newly created queues have a reference count +of one. .Pp The optional .Fa label @@ -80,13 +82,13 @@ argument is reserved for future use and must be NULL. Queues may be temporarily suspended and resumed with the functions .Fn dispatch_suspend and -.Fn dispatch_resume +.Fn dispatch_resume respectively. Suspension is checked prior to block execution and is .Em not preemptive. .Sh MAIN QUEUE -The dispatch framework provides a default serial queue for the application to use. -This queue is accessed via +The dispatch framework provides a default serial queue for the application to +use. This queue is accessed via .Fn dispatch_get_main_queue . Programs must call .Fn dispatch_main @@ -98,8 +100,8 @@ section for exceptions.) Unlike the main queue or queues allocated with .Fn dispatch_queue_create , the global concurrent queues schedule blocks as soon as threads become -available (non-FIFO completion order). The global concurrent queues represent -three priority bands: +available (non-FIFO completion order). Four global concurrent queues are +provided, representing the following priority bands: .Bl -bullet -compact -offset indent .It DISPATCH_QUEUE_PRIORITY_HIGH @@ -107,12 +109,26 @@ DISPATCH_QUEUE_PRIORITY_HIGH DISPATCH_QUEUE_PRIORITY_DEFAULT .It DISPATCH_QUEUE_PRIORITY_LOW +.It +DISPATCH_QUEUE_PRIORITY_BACKGROUND .El .Pp -Blocks submitted to the high priority global queue will be invoked before those -submitted to the default or low priority global queues. Blocks submitted to the -low priority global queue will only be invoked if no blocks are pending on the -default or high priority queues. +The priority of a global concurrent queue controls the scheduling priority of +the threads created by the system to invoke the blocks submitted to that queue. +Global queues with lower priority will be scheduled for execution after all +global queues with higher priority have been scheduled. Additionally, items on +the background priority global queue will execute on threads with background +state as described in +.Xr setpriority 2 +(i.e.\& disk I/O is throttled and the thread's scheduling priority is set to +lowest value). +.Pp +Use the +.Fn dispatch_get_global_queue +function to obtain the global queue of given priority. The +.Fa flags +argument is reserved for future use and must be zero. Passing any value other +than zero may result in a NULL return value. .Pp .Sh RETURN VALUES The @@ -131,13 +147,13 @@ function returns the default main queue. .Pp The .Fn dispatch_get_current_queue -function always returns a valid queue. When called from within a block submitted -to a dispatch queue, that queue will be returned. If this function is called from -the main thread before +function always returns a valid queue. When called from within a block +submitted to a dispatch queue, that queue will be returned. If this function is +called from the main thread before .Fn dispatch_main is called, then the result of .Fn dispatch_get_main_queue -is returned. Otherwise, the result of +is returned. The result of .Fo dispatch_get_global_queue .Fa DISPATCH_QUEUE_PRIORITY_DEFAULT .Fa 0 @@ -151,55 +167,73 @@ function never returns. The .Fn dispatch_set_target_queue function updates the target queue of the given dispatch object. The target -queue of an object is responsible for processing the object. Currently only -dispatch queues and dispatch sources are supported by this function. The result -of using -.Fn dispatch_set_target_queue -with any other dispatch object type is undefined. +queue of an object is responsible for processing the object. .Pp The new target queue is retained by the given object before the previous target -queue is released. The new target queue will take effect between block -executions, but not in the middle of any existing block executions +queue is released. The new target queue setting will take effect between block +executions on the object, but not in the middle of any existing block executions (non-preemptive). .Pp -The priority of a dispatch queue is inherited by its target queue. +The default target queue of all dispatch objects created by the application is +the default priority global concurrent queue. To reset an object's target queue +to the default, pass the +.Dv DISPATCH_TARGET_QUEUE_DEFAULT +constant to +.Fn dispatch_set_target_queue . +.Pp +The priority of a dispatch queue is inherited from its target queue. In order to change the priority of a queue created with .Fn dispatch_queue_create , use the .Fn dispatch_get_global_queue -function to obtain a target queue of the desired priority. The -.Fa flags -argument is reserved for future use and must be zero. Passing any value other -than zero may result in a -.Vt NULL -return value. +function to obtain a target queue of the desired priority. +.Pp +Blocks submitted to a serial queue whose target queue is another serial queue +will not be invoked concurrently with blocks submitted to the target queue or +to any other queue with that same target queue. .Pp The target queue of a dispatch source specifies where its event handler and cancellation handler blocks will be submitted. See .Xr dispatch_source_create 3 for more information about dispatch sources. .Pp -The result of passing the main queue or a global concurrent queue to the first +The target queue of a dispatch I/O channel specifies the priority of the global +queue where its I/O operations are executed. See +.Xr dispatch_io_create 3 +for more information about dispatch I/O channels. +.Pp +For all other dispatch object types, the only function of the target queue is +to determine where an object's finalizer function is invoked. +.Pp +The result of passing the main queue or a global concurrent queue as the first argument of .Fn dispatch_set_target_queue is undefined. .Pp -Directly or indirectly setting the target queue of a dispatch queue to itself is undefined. +Directly or indirectly setting the target queue of a dispatch queue to itself is +undefined. .Sh CAVEATS -Code cannot make any assumptions about the queue returned by -.Fn dispatch_get_current_queue . -The returned queue may have arbitrary policies that may surprise code that tries -to schedule work with the queue. The list of policies includes, but is not -limited to, queue width (i.e. serial vs. concurrent), scheduling priority, -security credential or filesystem configuration. Therefore, +The .Fn dispatch_get_current_queue -.Em MUST -only be used for identity tests or debugging. +function is only recommended for debugging and logging purposes. Code must not +make any assumptions about the queue returned, unless it is one of the global +queues or a queue the code has itself created. The returned queue may have +arbitrary policies that may surprise code that tries to schedule work with the +queue. The list of policies includes, but is not limited to, queue width (i.e. +serial vs. concurrent), scheduling priority, security credential or filesystem +configuration. +.Pp +It is equally unsafe for code to assume that synchronous execution onto a queue +is safe from deadlock if that queue is not the one returned by +.Fn dispatch_get_current_queue . .Sh COMPATIBILITY Cocoa applications need not call .Fn dispatch_main . -Blocks submitted to the main queue will be executed as part of the "common modes" -of the application's main NSRunLoop or CFRunLoop. +Blocks submitted to the main queue will be executed as part of the "common +modes" of the application's main NSRunLoop or CFRunLoop. +However, blocks submitted to the main queue in applications using +.Fn dispatch_main +are not guaranteed to execute on the main thread. .Pp The dispatch framework is a pure C level API. As a result, it does not catch exceptions generated by higher level languages such as Objective-C or C++. @@ -299,7 +333,7 @@ Note that in the above example .Va errno is a per-thread variable and must be copied out explicitly as the block may be invoked on different thread of execution than the caller. Another example of -per-thread data that would need to be copied is the use of +per-thread data that would need to be copied is the use of .Fn getpwnam instead of .Fn getpwnam_r . @@ -313,6 +347,7 @@ may persist from the block until back to the caller. Great care should be taken not to accidentally rely on this side-effect. .Pp .Sh SEE ALSO -.Xr dispatch_object 3 , +.Xr dispatch 3 , .Xr dispatch_async 3 , +.Xr dispatch_object 3 , .Xr dispatch_source_create 3 diff --git a/man/dispatch_read.3 b/man/dispatch_read.3 new file mode 100644 index 000000000..38e88dea8 --- /dev/null +++ b/man/dispatch_read.3 @@ -0,0 +1,123 @@ +.\" Copyright (c) 2010 Apple Inc. All rights reserved. +.Dd December 1, 2010 +.Dt dispatch_read 3 +.Os Darwin +.Sh NAME +.Nm dispatch_read , +.Nm dispatch_write +.Nd asynchronously read from and write to file descriptors +.Sh SYNOPSIS +.Fd #include +.Ft void +.Fo dispatch_read +.Fa "int fd" +.Fa "size_t length" +.Fa "dispatch_queue_t queue" +.Fa "void (^handler)(dispatch_data_t data, int error)" +.Fc +.Ft void +.Fo dispatch_write +.Fa "int fd" +.Fa "dispatch_data_t data" +.Fa "dispatch_queue_t queue" +.Fa "void (^handler)(dispatch_data_t data, int error))" +.Fc +.Sh DESCRIPTION +The +.Fn dispatch_read +and +.Fn dispatch_write +functions asynchronously read from and write to POSIX file descriptors. They +can be thought of as asynchronous, callback-based versions of the +.Fn fread +and +.Fn fwrite +functions provided by the standard C library. They are convenience functions +based on the +.Xr dispatch_io_read 3 +and +.Xr dispatch_io_write 3 +functions, intended for simple one-shot read or write requests. Multiple +request on the same file desciptor are better handled with the full underlying +dispatch I/O channel functions. +.Sh BEHAVIOR +The +.Fn dispatch_read +function schedules an asynchronous read operation on the file descriptor +.Va fd . +Once the file descriptor is readable, the system will read as much data as is +currently available, up to the specified +.Va length , +starting at the current file pointer position. The given +.Va handler +block will be submitted to +.Va queue +when the operation completes or an error occurs. The block will be passed a +dispatch +.Va data +object with the result of the read operation. If an error occurred while +reading from the file descriptor, the +.Va error +parameter to the block will be set to the appropriate POSIX error code and +.Va data +will contain any data that could be read successfully. If the file pointer +position is at end-of-file, emtpy +.Va data +and zero +.Va error +will be passed to the handler block. +.Pp +The +.Fn dispatch_write +function schedules an asynchronous write operation on the file descriptor +.Va fd . +The system will attempt to write the entire contents of the provided +.Va data +object to +.Va fd +at the current file pointer position. The given +.Va handler +block will be submitted to +.Va queue +when the operation completes or an error occurs. If the write operation +completed successfully, the +.Va error +parameter to the block will be set to zero, otherwise it will be set to the +appropriate POSIX error code and the +.Va data +parameter will contain any data that could not be written. +.Sh CAVEATS +The +.Va data +object passed to a +.Va handler +block is released by the system when the block returns. If +.Va data +is needed outside of the handler block, it must concatenate, copy, or retain +it. +.Pp +Once an asynchronous read or write operation has been submitted on a file +descriptor +.Va fd , +the system takes control of that file descriptor until the +.Va handler +block is executed. During this time the application must not manipulate +.Va fd +directly, in particular it is only safe to close +.Va fd +from the handler block (or after it has returned). +.Pp +If multiple asynchronous read or write operations are submitted to the same +file descriptor, they will be performed in order, but their handlers will only +be submitted once all operations have completed and control over the file +descriptor has been relinquished. For details on this and on the interaction +with dispatch I/O channels created from the same file descriptor, see +.Sx FILEDESCRIPTOR OWNERSHIP +in +.Xr dispatch_io_create 3 . +.Sh SEE ALSO +.Xr dispatch 3 , +.Xr dispatch_data_create 3 , +.Xr dispatch_io_create 3 , +.Xr dispatch_io_read 3 , +.Xr fread 3 diff --git a/man/dispatch_semaphore_create.3 b/man/dispatch_semaphore_create.3 index 12506423e..096e0e387 100644 --- a/man/dispatch_semaphore_create.3 +++ b/man/dispatch_semaphore_create.3 @@ -1,4 +1,4 @@ -.\" Copyright (c) 2008-2009 Apple Inc. All rights reserved. +.\" Copyright (c) 2008-2010 Apple Inc. All rights reserved. .Dd May 1, 2009 .Dt dispatch_semaphore_create 3 .Os Darwin @@ -33,7 +33,8 @@ functions. .Sh COMPLETION SYNCHRONIZATION If the .Fa count -parameter is equal to zero, then the semaphore is useful for synchronizing completion of work. +parameter is equal to zero, then the semaphore is useful for synchronizing +completion of work. For example: .Bd -literal -offset indent sema = dispatch_semaphore_create(0); @@ -50,7 +51,8 @@ dispatch_semaphore_wait(sema, DISPATCH_TIME_FOREVER); .Sh FINITE RESOURCE POOL If the .Fa count -parameter is greater than zero, then the semaphore is useful for managing a finite pool of resources. +parameter is greater than zero, then the semaphore is useful for managing a +finite pool of resources. For example, a library that wants to limit Unix descriptor usage: .Bd -literal -offset indent sema = dispatch_semaphore_create(getdtablesize() / 4); @@ -81,7 +83,8 @@ Otherwise, zero is returned. .Pp The .Fn dispatch_semaphore_wait -function returns zero upon success and non-zero after the timeout expires. If the timeout is DISPATCH_TIME_FOREVER, then +function returns zero upon success and non-zero after the timeout expires. If +the timeout is DISPATCH_TIME_FOREVER, then .Fn dispatch_semaphore_wait waits forever and always returns zero. .Sh MEMORY MODEL @@ -90,6 +93,15 @@ Dispatch semaphores are retained and released via calls to and .Fn dispatch_release . .Sh CAVEATS +Unbalanced dispatch semaphores cannot be released. +For a given semaphore, calls to +.Fn dispatch_semaphore_signal +and +.Fn dispatch_semaphore_wait +must be balanced before +.Fn dispatch_release +is called on it. +.Pp Dispatch semaphores are strict counting semaphores. In other words, dispatch semaphores do not saturate at any particular value. Saturation can be achieved through atomic compare-and-swap logic. @@ -111,4 +123,5 @@ saturating_semaphore_wait(dispatch_semaphore_t dsema, int *sent) } .Ed .Sh SEE ALSO +.Xr dispatch 3 , .Xr dispatch_object 3 diff --git a/man/dispatch_source_create.3 b/man/dispatch_source_create.3 index 0a38cd294..1d774a967 100644 --- a/man/dispatch_source_create.3 +++ b/man/dispatch_source_create.3 @@ -1,4 +1,4 @@ -.\" Copyright (c) 2008-2009 Apple Inc. All rights reserved. +.\" Copyright (c) 2008-2010 Apple Inc. All rights reserved. .Dd May 1, 2009 .Dt dispatch_source_create 3 .Os Darwin @@ -38,7 +38,7 @@ .Fo dispatch_source_cancel .Fa "dispatch_source_t source" .Fc -.Ft void +.Ft long .Fo dispatch_source_testcancel .Fa "dispatch_source_t source" .Fc @@ -67,7 +67,7 @@ .Fa "uint64_t leeway" .Fc .Sh DESCRIPTION -Dispatch event sources may be used to monitor a variety of system objects and +Dispatch event sources may be used to monitor a variety of system objects and events including file descriptors, mach ports, processes, virtual filesystem nodes, signal delivery and timers. .Pp @@ -81,9 +81,17 @@ with calls to .Fn dispatch_retain and .Fn dispatch_release -respectively. Newly created sources are created in a suspended state. After the -source has been configured by setting an event handler, cancellation handler, -context, etc., the source must be activated by a call to +respectively. The +.Fa queue +parameter specifies the target queue of the new source object, it will +be retained by the source object. Pass the +.Dv DISPATCH_TARGET_QUEUE_DEFAULT +constant to use the default target queue (the default priority global +concurrent queue). +.Pp +Newly created sources are created in a suspended state. After the source has +been configured by setting an event handler, cancellation handler, context, +etc., the source must be activated by a call to .Fn dispatch_resume before any events will be delivered. .Pp @@ -117,21 +125,21 @@ and .Fa mask arguments to .Fn dispatch_source_create -and the return values of the +and the return values of the .Fn dispatch_source_get_handle , .Fn dispatch_source_get_mask , and -.Fn dispatch_source_get_data +.Fn dispatch_source_get_data functions should be interpreted according to the type of the dispatch source. .Pp -The +The .Fn dispatch_source_get_handle function returns the underlying handle to the dispatch source (i.e. file descriptor, mach port, process identifer, etc.). The result of this function may be cast directly to the underlying type. .Pp -The +The .Fn dispatch_source_get_mask function returns the set of flags that were specified at source creation time via the @@ -175,7 +183,7 @@ block need not be reentrant safe, as it is not resubmitted to the target .Fa queue until any prior invocation for that dispatch source has completed. -When the hander is set, the dispatch source will perform a +When the handler is set, the dispatch source will perform a .Fn Block_copy on the .Fa handler @@ -205,7 +213,7 @@ a cancellation handler is required for file descriptor and mach port based sources in order to safely close the descriptor or destroy the port. Closing the descriptor or port before the cancellation handler has run may result in a race condition: if a new descriptor is allocated with the same value as the recently -cosed descriptor while the source's event handler is still running, the event +closed descriptor while the source's event handler is still running, the event handler may read/write data to the wrong descriptor. .Pp .Sh DISPATCH SOURCE TYPES @@ -216,16 +224,16 @@ the interpretation of their parameters and returned data. .Vt DISPATCH_SOURCE_TYPE_DATA_OR .Pp Sources of this type allow applications to manually trigger the source's event -handler via a call to +handler via a call to .Fn dispatch_source_merge_data . The data will be merged with the source's pending data via an atomic add or logic OR (based on the source's type), and the event handler block will be submitted to the source's target queue. The -.Fa mask -and .Fa data -are application defined. These sources have no +is application defined. These sources have no .Fa handle +or +.Fa mask and zero should be used. .Pp .Vt DISPATCH_SOURCE_TYPE_MACH_SEND @@ -268,7 +276,7 @@ is the process identifier (pid_t) of the process to monitor and the may be one or more of the following: .Bl -tag -width "XXDISPATCH_PROC_SIGNAL" -compact -offset indent .It \(bu DISPATCH_PROC_EXIT -The process has exited and is available to +The process has exited and is available to .Xr wait 2 . .It \(bu DISPATCH_PROC_FORK The process has created one or more child processes. @@ -277,7 +285,7 @@ The process has become another executable image via a call to .Xr execve 2 or .Xr posix_spawn 2 . -.It \(bu DISPATCH_PROC_REAP +.It \(bu DISPATCH_PROC_REAP The process status has been collected by its parent process via .Xr wait 2 . .It \(bu DISPATCH_PROC_SIGNAL @@ -309,7 +317,7 @@ will be performed. .Pp Users of this source type are strongly encouraged to perform non-blocking I/O and handle any truncated reads or error conditions that may occur. See -.Xr fnctl 2 +.Xr fcntl 2 for additional information about setting the .Vt O_NONBLOCK flag on a file descriptor. @@ -378,7 +386,7 @@ for more information.) The in nanoseconds, specifies the period at which the timer should repeat. All timers will repeat indefinitely until .Fn dispatch_source_cancel -is called. The +is called. The .Fa leeway , in nanoseconds, is a hint to the system that it may defer the timer in order to align with other system activity for improved system performance or reduced @@ -387,7 +395,7 @@ every 5 minutes with a leeway of up to 30 seconds.) Note that some latency is to be expected for all timers even when a value of zero is used. .Pp .Em Note : -Under the C language, untyped numbers default to the +Under the C language, untyped numbers default to the .Vt int type. This can lead to truncation bugs when arithmetic operations with other numbers are expected to generate a @@ -404,7 +412,7 @@ as a suffix. For example: Sources of this type monitor the virtual filesystem nodes for state changes. The .Fa handle -is a file descriptor (int) referencing the node to monitor, and +is a file descriptor (int) referencing the node to monitor, and the .Fa mask may be one or more of the following: @@ -423,7 +431,7 @@ The link count on the referenced node has changed .It \(bu DISPATCH_VNODE_RENAME The referenced node was renamed .It \(bu DISPATCH_VNODE_REVOKE -Access to the referenced node was revoked via +Access to the referenced node was revoked via .Xr revoke 2 or the underlying fileystem was unmounted. .El @@ -445,7 +453,7 @@ is unused and should be zero. .Pp Users of this source type are strongly encouraged to perform non-blocking I/O and handle any truncated reads or error conditions that may occur. See -.Xr fnctl 2 +.Xr fcntl 2 for additional information about setting the .Vt O_NONBLOCK flag on a file descriptor. diff --git a/man/dispatch_time.3 b/man/dispatch_time.3 index 06d78e8e4..6d1887315 100644 --- a/man/dispatch_time.3 +++ b/man/dispatch_time.3 @@ -105,6 +105,7 @@ These functions return an abstract value for use with or .Fn dispatch_semaphore_wait . .Sh SEE ALSO +.Xr dispatch 3 , .Xr dispatch_after 3 , .Xr dispatch_group_create 3 , .Xr dispatch_semaphore_create 3 diff --git a/private/Makefile.am b/private/Makefile.am new file mode 100644 index 000000000..488ef520e --- /dev/null +++ b/private/Makefile.am @@ -0,0 +1,10 @@ +# +# +# + +noinst_HEADERS= \ + benchmark.h \ + private.h \ + queue_private.h \ + source_private.h + diff --git a/src/benchmark.h b/private/benchmark.h similarity index 91% rename from src/benchmark.h rename to private/benchmark.h index b77af4586..df42a8a33 100644 --- a/src/benchmark.h +++ b/private/benchmark.h @@ -2,19 +2,19 @@ * Copyright (c) 2008-2009 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ - * + * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * + * * @APPLE_APACHE_LICENSE_HEADER_END@ */ @@ -64,17 +64,18 @@ __BEGIN_DECLS * 3) Code bound by critical sections may be inferred by retrograde changes in * performance as concurrency is increased. * 3a) Intentional: locks, mutexes, and condition variables. - * 3b) Accidental: unrelated and frequently modified data on the same cache-line. + * 3b) Accidental: unrelated and frequently modified data on the same + * cache-line. */ #ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL2 DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW uint64_t dispatch_benchmark(size_t count, void (^block)(void)); #endif -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL3 DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_NOTHROW uint64_t dispatch_benchmark_f(size_t count, void *ctxt, void (*func)(void *)); diff --git a/src/private.h b/private/private.h similarity index 51% rename from src/private.h rename to private/private.h index 8d817fe9f..9bb0e0190 100644 --- a/src/private.h +++ b/private/private.h @@ -1,20 +1,20 @@ /* - * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * Copyright (c) 2008-2011 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ - * + * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * + * * @APPLE_APACHE_LICENSE_HEADER_END@ */ @@ -27,14 +27,28 @@ #ifndef __DISPATCH_PRIVATE__ #define __DISPATCH_PRIVATE__ +#ifdef __APPLE__ +#include +#endif + +#if TARGET_OS_MAC #include #include #include +#endif +#if HAVE_UNISTD_H #include +#endif +#if HAVE_SYS_CDEFS_H #include -#include +#endif #include +#define DISPATCH_NO_LEGACY 1 +#ifdef DISPATCH_LEGACY // +#error "Dispatch legacy API unavailable." +#endif + #ifndef __DISPATCH_BUILDING_DISPATCH__ #include_next @@ -51,10 +65,6 @@ #include #include -#ifndef DISPATCH_NO_LEGACY -#include -#endif - #undef __DISPATCH_INDIRECT__ #endif /* !__DISPATCH_BUILDING_DISPATCH__ */ @@ -64,50 +74,72 @@ __BEGIN_DECLS -DISPATCH_NOTHROW +DISPATCH_EXPORT DISPATCH_NOTHROW void libdispatch_init(void); +#if TARGET_OS_MAC #define DISPATCH_COCOA_COMPAT 1 #if DISPATCH_COCOA_COMPAT -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_CONST DISPATCH_WARN_RESULT DISPATCH_NOTHROW mach_port_t _dispatch_get_main_queue_port_4CF(void); -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NOTHROW void _dispatch_main_queue_callback_4CF(mach_msg_header_t *msg); -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -extern void (*dispatch_begin_thread_4GC)(void); +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT +void (*dispatch_begin_thread_4GC)(void); + +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT +void (*dispatch_end_thread_4GC)(void); -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -extern void (*dispatch_end_thread_4GC)(void); +__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) +DISPATCH_EXPORT +void (*dispatch_no_worker_threads_4GC)(void); -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -extern void *(*_dispatch_begin_NSAutoReleasePool)(void); +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT +void *(*_dispatch_begin_NSAutoReleasePool)(void); -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -extern void (*_dispatch_end_NSAutoReleasePool)(void *); +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT +void (*_dispatch_end_NSAutoReleasePool)(void *); + +#define _dispatch_time_after_nsec(t) \ + dispatch_time(DISPATCH_TIME_NOW, (t)) +#define _dispatch_time_after_usec(t) \ + dispatch_time(DISPATCH_TIME_NOW, (t) * NSEC_PER_USEC) +#define _dispatch_time_after_msec(t) \ + dispatch_time(DISPATCH_TIME_NOW, (t) * NSEC_PER_MSEC) +#define _dispatch_time_after_sec(t) \ + dispatch_time(DISPATCH_TIME_NOW, (t) * NSEC_PER_SEC) #endif +#endif /* TARGET_OS_MAC */ /* pthreads magic */ -DISPATCH_NOTHROW void dispatch_atfork_prepare(void); -DISPATCH_NOTHROW void dispatch_atfork_parent(void); -DISPATCH_NOTHROW void dispatch_atfork_child(void); -DISPATCH_NOTHROW void dispatch_init_pthread(pthread_t); +DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_prepare(void); +DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_parent(void); +DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); +#if TARGET_OS_MAC /* * Extract the context pointer from a mach message trailer. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NONNULL_ALL +DISPATCH_NOTHROW void * dispatch_mach_msg_get_context(mach_msg_header_t *msg); +#endif /* TARGET_OS_MAC */ __END_DECLS diff --git a/src/queue_private.h b/private/queue_private.h similarity index 52% rename from src/queue_private.h rename to private/queue_private.h index 85f87c010..5ec36d09c 100644 --- a/src/queue_private.h +++ b/private/queue_private.h @@ -1,20 +1,20 @@ /* - * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * Copyright (c) 2008-2010 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ - * + * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * + * * @APPLE_APACHE_LICENSE_HEADER_END@ */ @@ -46,42 +46,27 @@ enum { DISPATCH_QUEUE_OVERCOMMIT = 0x2ull, }; -#define DISPATCH_QUEUE_FLAGS_MASK (DISPATCH_QUEUE_OVERCOMMIT) - -#ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL DISPATCH_NOTHROW -void -dispatch_barrier_sync(dispatch_queue_t queue, dispatch_block_t block); -#endif - -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW -void -dispatch_barrier_sync_f(dispatch_queue_t dq, void *context, dispatch_function_t work); - -#ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL DISPATCH_NOTHROW -void -dispatch_barrier_async(dispatch_queue_t queue, dispatch_block_t block); -#endif - -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW -void -dispatch_barrier_async_f(dispatch_queue_t dq, void *context, dispatch_function_t work); +#define DISPATCH_QUEUE_FLAGS_MASK (DISPATCH_QUEUE_OVERCOMMIT) /*! * @function dispatch_queue_set_width * * @abstract - * Set the width of concurrency for a given queue. The default width of a - * privately allocated queue is one. + * Set the width of concurrency for a given queue. The width of a serial queue + * is one. + * + * @discussion + * This SPI is DEPRECATED and will be removed in a future release. + * Uses of this SPI to make a queue concurrent by setting its width to LONG_MAX + * should be replaced by passing DISPATCH_QUEUE_CONCURRENT to + * dispatch_queue_create(). + * Uses of this SPI to limit queue concurrency are not recommended and should + * be replaced by alternative mechanisms such as a dispatch semaphore created + * with the desired concurrency width. * * @param queue - * The queue to adjust. Passing the main queue, a default concurrent queue or - * any other default queue will be ignored. + * The queue to adjust. Passing the main queue or a global concurrent queue + * will be ignored. * * @param width * The new maximum width of concurrency depending on available resources. @@ -89,19 +74,43 @@ dispatch_barrier_async_f(dispatch_queue_t dq, void *context, dispatch_function_t * Negative values are magic values that map to automatic width values. * Unknown negative values default to DISPATCH_QUEUE_WIDTH_MAX_LOGICAL_CPUS. */ -#define DISPATCH_QUEUE_WIDTH_ACTIVE_CPUS -1 +#define DISPATCH_QUEUE_WIDTH_ACTIVE_CPUS -1 #define DISPATCH_QUEUE_WIDTH_MAX_PHYSICAL_CPUS -2 #define DISPATCH_QUEUE_WIDTH_MAX_LOGICAL_CPUS -3 -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void -dispatch_queue_set_width(dispatch_queue_t dq, long width); +dispatch_queue_set_width(dispatch_queue_t dq, long width); // DEPRECATED +/*! + * @function dispatch_set_current_target_queue + * + * @abstract + * Synchronously sets the target queue of the current serial queue. + * + * @discussion + * This SPI is provided for a limited purpose case when calling + * dispatch_set_target_queue() is not sufficient. It works similarly to + * dispatch_set_target_queue() except the target queue of the current queue + * is immediately changed so that pending blocks on the queue will run on the + * new target queue. Calling this from outside of a block executing on a serial + * queue is undefined. + * + * @param queue + * The new target queue for the object. The queue is retained, and the + * previous target queue, if any, is released. + * If queue is DISPATCH_TARGET_QUEUE_DEFAULT, set the object's target queue + * to the default target queue for the given object type. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +DISPATCH_EXPORT DISPATCH_NOTHROW +void +dispatch_set_current_target_queue(dispatch_queue_t queue); -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -extern const struct dispatch_queue_offsets_s { +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT const struct dispatch_queue_offsets_s { // always add new fields at the end const uint16_t dqo_version; const uint16_t dqo_label; diff --git a/src/source_private.h b/private/source_private.h similarity index 62% rename from src/source_private.h rename to private/source_private.h index 9e45cc1bf..576f64a75 100644 --- a/src/source_private.h +++ b/private/source_private.h @@ -1,20 +1,20 @@ /* - * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * Copyright (c) 2008-2011 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ - * + * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * + * * @APPLE_APACHE_LICENSE_HEADER_END@ */ @@ -39,8 +39,17 @@ * The handle is a process identifier (pid_t). */ #define DISPATCH_SOURCE_TYPE_VFS (&_dispatch_source_type_vfs) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -extern const struct dispatch_source_type_s _dispatch_source_type_vfs; +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_vfs; + +/*! + * @const DISPATCH_SOURCE_TYPE_VM + * @discussion A dispatch source that monitors virtual memory + * The mask is a mask of desired events from dispatch_source_vm_flags_t. + */ +#define DISPATCH_SOURCE_TYPE_VM (&_dispatch_source_type_vm) +__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) +DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_vm; /*! * @enum dispatch_source_vfs_flags_t @@ -91,11 +100,17 @@ enum { /*! * @enum dispatch_source_mach_send_flags_t * - * @constant DISPATCH_MACH_SEND_DELETED - * The receive right corresponding to the given send right was destroyed. + * @constant DISPATCH_MACH_SEND_POSSIBLE + * The mach port corresponding to the given send right has space available + * for messages. Delivered only once a mach_msg() to that send right with + * options MACH_SEND_MSG|MACH_SEND_TIMEOUT|MACH_SEND_NOTIFY has returned + * MACH_SEND_TIMED_OUT (and not again until the next such mach_msg() timeout). + * NOTE: The source must have registered the send right for monitoring with the + * system for such a mach_msg() to arm the send-possible notifcation, so + * the initial send attempt must occur from a source registration handler. */ enum { - DISPATCH_MACH_SEND_DELETED = 0x2, + DISPATCH_MACH_SEND_POSSIBLE = 0x8, }; /*! @@ -109,20 +124,39 @@ enum { DISPATCH_PROC_REAP = 0x10000000, }; +/*! + * @enum dispatch_source_vm_flags_t + * + * @constant DISPATCH_VM_PRESSURE + * The VM has experienced memory pressure. + */ + +enum { + DISPATCH_VM_PRESSURE = 0x80000000, +}; + +#if TARGET_IPHONE_SIMULATOR // rdar://problem/9219483 +#define DISPATCH_VM_PRESSURE DISPATCH_VNODE_ATTRIB +#endif + __BEGIN_DECLS +#if TARGET_OS_MAC /*! * @typedef dispatch_mig_callback_t * * @abstract * The signature of a function that handles Mach message delivery and response. */ -typedef boolean_t (*dispatch_mig_callback_t)(mach_msg_header_t *message, mach_msg_header_t *reply); +typedef boolean_t (*dispatch_mig_callback_t)(mach_msg_header_t *message, + mach_msg_header_t *reply); -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW mach_msg_return_t -dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, dispatch_mig_callback_t callback); +dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, + dispatch_mig_callback_t callback); +#endif __END_DECLS diff --git a/resolver/resolved.h b/resolver/resolved.h new file mode 100644 index 000000000..bb9a82d59 --- /dev/null +++ b/resolver/resolved.h @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2010-2011 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + diff --git a/resolver/resolver.c b/resolver/resolver.c new file mode 100644 index 000000000..8b390b4a4 --- /dev/null +++ b/resolver/resolver.c @@ -0,0 +1,20 @@ +/* + * Copyright (c) 2010 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + diff --git a/resolver/resolver.h b/resolver/resolver.h new file mode 100644 index 000000000..5b1cd04d9 --- /dev/null +++ b/resolver/resolver.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2010 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_RESOLVERS__ +#define __DISPATCH_RESOLVERS__ + + +#endif diff --git a/src/Makefile.am b/src/Makefile.am new file mode 100644 index 000000000..20b2baa9c --- /dev/null +++ b/src/Makefile.am @@ -0,0 +1,73 @@ +# +# +# + +lib_LTLIBRARIES=libdispatch.la + +libdispatch_la_SOURCES= \ + apply.c \ + benchmark.c \ + data.c \ + init.c \ + io.c \ + object.c \ + once.c \ + queue.c \ + semaphore.c \ + source.c \ + time.c \ + protocol.defs \ + provider.d \ + data_internal.h \ + internal.h \ + io_internal.h \ + object_internal.h \ + queue_internal.h \ + semaphore_internal.h \ + shims.h \ + source_internal.h \ + trace.h \ + shims/atomic.h \ + shims/getprogname.h \ + shims/hw_config.h \ + shims/malloc_zone.h \ + shims/perfmon.h \ + shims/time.h \ + shims/tsd.h + +INCLUDES=-I$(top_builddir) -I$(top_srcdir) -I$(top_srcdir)/private \ + @APPLE_LIBC_SOURCE_PATH@ @APPLE_LIBCLOSURE_SOURCE_PATH@ @APPLE_XNU_SOURCE_PATH@ + +libdispatch_la_CFLAGS=-Wall $(VISIBILITY_FLAGS) $(OMIT_LEAF_FP_FLAGS) +libdispatch_la_CFLAGS+=$(MARCH_FLAGS) $(CBLOCKS_FLAGS) $(KQUEUE_CFLAGS) + +libdispatch_la_LDFLAGS=-avoid-version + +if HAVE_DARWIN_LD +libdispatch_la_LDFLAGS+=-Wl,-compatibility_version,1 -Wl,-current_version,$(VERSION) +endif + +CLEANFILES= + +if USE_MIG +BUILT_SOURCES= \ + protocolUser.c \ + protocol.h \ + protocolServer.c \ + protocolServer.h + +nodist_libdispatch_la_SOURCES=$(BUILT_SOURCES) +CLEANFILES+=$(BUILT_SOURCES) + +%User.c %.h %Server.c %Server.h: $(abs_srcdir)/%.defs + $(MIG) -user $*User.c -header $*.h \ + -server $*Server.c -sheader $*Server.h $< +endif + +if USE_XNU_SOURCE +# hack for pthread_machdep.h's #include +$(libdispatch_la_OBJECTS): $(abs_srcdir)/System +$(abs_srcdir)/System: + $(LN_S) -fh "@APPLE_XNU_SOURCE_SYSTEM_PATH@" System +CLEANFILES+=System +endif diff --git a/src/apply.c b/src/apply.c index 2c51eb270..9a6343906 100644 --- a/src/apply.c +++ b/src/apply.c @@ -1,20 +1,20 @@ /* - * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * Copyright (c) 2008-2011 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ - * + * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * + * * @APPLE_APACHE_LICENSE_HEADER_END@ */ #include "internal.h" @@ -24,81 +24,155 @@ // local thread to be sufficiently away to avoid cache-line contention with the // busy 'da_index' variable. // -// NOTE: 'char' arrays cause GCC to insert buffer overflow detection logic +// NOTE: 'char' arrays cause GCC to insert buffer overflow detection logic struct dispatch_apply_s { - long _da_pad0[DISPATCH_CACHELINE_SIZE / sizeof(long)]; - void (*da_func)(void *, size_t); - void *da_ctxt; - size_t da_iterations; - size_t da_index; - uint32_t da_thr_cnt; - dispatch_semaphore_t da_sema; - long _da_pad1[DISPATCH_CACHELINE_SIZE / sizeof(long)]; + long _da_pad0[DISPATCH_CACHELINE_SIZE / sizeof(long)]; + void (*da_func)(void *, size_t); + void *da_ctxt; + size_t da_iterations; + size_t da_index; + uint32_t da_thr_cnt; + _dispatch_thread_semaphore_t da_sema; + dispatch_queue_t da_queue; + long _da_pad1[DISPATCH_CACHELINE_SIZE / sizeof(long)]; }; -static void -_dispatch_apply2(void *_ctxt) +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_apply_invoke(void *ctxt) { - struct dispatch_apply_s *da = _ctxt; + struct dispatch_apply_s *da = ctxt; size_t const iter = da->da_iterations; typeof(da->da_func) const func = da->da_func; - void *const ctxt = da->da_ctxt; + void *const da_ctxt = da->da_ctxt; size_t idx; _dispatch_workitem_dec(); // this unit executes many items + // Make nested dispatch_apply fall into serial case rdar://problem/9294578 + _dispatch_thread_setspecific(dispatch_apply_key, (void*)~0ul); // Striding is the responsibility of the caller. - while (fastpath((idx = dispatch_atomic_inc(&da->da_index) - 1) < iter)) { - func(ctxt, idx); + while (fastpath((idx = dispatch_atomic_inc2o(da, da_index) - 1) < iter)) { + _dispatch_client_callout2(da_ctxt, idx, func); _dispatch_workitem_inc(); } + _dispatch_thread_setspecific(dispatch_apply_key, NULL); - if (dispatch_atomic_dec(&da->da_thr_cnt) == 0) { - dispatch_semaphore_signal(da->da_sema); + dispatch_atomic_release_barrier(); + if (dispatch_atomic_dec2o(da, da_thr_cnt) == 0) { + _dispatch_thread_semaphore_signal(da->da_sema); } } +DISPATCH_NOINLINE static void -_dispatch_apply_serial(void *context) +_dispatch_apply2(void *ctxt) { - struct dispatch_apply_s *da = context; + _dispatch_apply_invoke(ctxt); +} + +static void +_dispatch_apply3(void *ctxt) +{ + struct dispatch_apply_s *da = ctxt; + dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key); + + _dispatch_thread_setspecific(dispatch_queue_key, da->da_queue); + _dispatch_apply_invoke(ctxt); + _dispatch_thread_setspecific(dispatch_queue_key, old_dq); +} + +static void +_dispatch_apply_serial(void *ctxt) +{ + struct dispatch_apply_s *da = ctxt; size_t idx = 0; _dispatch_workitem_dec(); // this unit executes many items do { - da->da_func(da->da_ctxt, idx); + _dispatch_client_callout2(da->da_ctxt, idx, da->da_func); _dispatch_workitem_inc(); } while (++idx < da->da_iterations); } -#ifdef __BLOCKS__ -void -dispatch_apply(size_t iterations, dispatch_queue_t dq, void (^work)(size_t)) +// 256 threads should be good enough for the short to mid term +#define DISPATCH_APPLY_MAX_CPUS 256 + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_apply_f2(dispatch_queue_t dq, struct dispatch_apply_s *da, + dispatch_function_t func) { - struct Block_basic *bb = (void *)work; + struct dispatch_apply_dc_s { + DISPATCH_CONTINUATION_HEADER(dispatch_apply_dc_s); + } da_dc[DISPATCH_APPLY_MAX_CPUS]; + size_t i; - dispatch_apply_f(iterations, dq, bb, (void *)bb->Block_invoke); + for (i = 0; i < da->da_thr_cnt - 1; i++) { + da_dc[i].do_vtable = NULL; + da_dc[i].do_next = &da_dc[i + 1]; + da_dc[i].dc_func = func; + da_dc[i].dc_ctxt = da; + } + + da->da_sema = _dispatch_get_thread_semaphore(); + + _dispatch_queue_push_list(dq, (void *)&da_dc[0], + (void *)&da_dc[da->da_thr_cnt - 2]); + // Call the first element directly + _dispatch_apply2(da); + _dispatch_workitem_inc(); + + _dispatch_thread_semaphore_wait(da->da_sema); + _dispatch_put_thread_semaphore(da->da_sema); } -#endif -// 256 threads should be good enough for the short to mid term -#define DISPATCH_APPLY_MAX_CPUS 256 +static void +_dispatch_apply_redirect(void *ctxt) +{ + struct dispatch_apply_s *da = ctxt; + uint32_t da_width = 2 * (da->da_thr_cnt - 1); + dispatch_queue_t dq = da->da_queue, rq = dq, tq; + + do { + uint32_t running = dispatch_atomic_add2o(rq, dq_running, da_width); + uint32_t width = rq->dq_width; + if (slowpath(running > width)) { + uint32_t excess = width > 1 ? running - width : da_width; + for (tq = dq; 1; tq = tq->do_targetq) { + (void)dispatch_atomic_sub2o(tq, dq_running, excess); + if (tq == rq) { + break; + } + } + da_width -= excess; + if (slowpath(!da_width)) { + return _dispatch_apply_serial(da); + } + da->da_thr_cnt -= excess / 2; + } + rq = rq->do_targetq; + } while (slowpath(rq->do_targetq)); + _dispatch_apply_f2(rq, da, _dispatch_apply3); + do { + (void)dispatch_atomic_sub2o(dq, dq_running, da_width); + dq = dq->do_targetq; + } while (slowpath(dq->do_targetq)); +} DISPATCH_NOINLINE void -dispatch_apply_f(size_t iterations, dispatch_queue_t dq, void *ctxt, void (*func)(void *, size_t)) +dispatch_apply_f(size_t iterations, dispatch_queue_t dq, void *ctxt, + void (*func)(void *, size_t)) { - struct dispatch_apply_dc_s { - DISPATCH_CONTINUATION_HEADER(dispatch_apply_dc_s); - } da_dc[DISPATCH_APPLY_MAX_CPUS]; struct dispatch_apply_s da; - size_t i; da.da_func = func; da.da_ctxt = ctxt; da.da_iterations = iterations; da.da_index = 0; da.da_thr_cnt = _dispatch_hw_config.cc_max_active; + da.da_queue = NULL; if (da.da_thr_cnt > DISPATCH_APPLY_MAX_CPUS) { da.da_thr_cnt = DISPATCH_APPLY_MAX_CPUS; @@ -109,46 +183,62 @@ dispatch_apply_f(size_t iterations, dispatch_queue_t dq, void *ctxt, void (*func if (iterations < da.da_thr_cnt) { da.da_thr_cnt = (uint32_t)iterations; } - if (slowpath(dq->dq_width <= 2 || da.da_thr_cnt <= 1)) { + if (slowpath(dq->dq_width <= 2) || slowpath(da.da_thr_cnt <= 1) || + slowpath(_dispatch_thread_getspecific(dispatch_apply_key))) { return dispatch_sync_f(dq, &da, _dispatch_apply_serial); } - - for (i = 0; i < da.da_thr_cnt; i++) { - da_dc[i].do_vtable = NULL; - da_dc[i].do_next = &da_dc[i + 1]; - da_dc[i].dc_func = _dispatch_apply2; - da_dc[i].dc_ctxt = &da; + dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key); + if (slowpath(dq->do_targetq)) { + if (slowpath(dq == old_dq)) { + return dispatch_sync_f(dq, &da, _dispatch_apply_serial); + } else { + da.da_queue = dq; + return dispatch_sync_f(dq, &da, _dispatch_apply_redirect); + } } + dispatch_atomic_acquire_barrier(); + _dispatch_thread_setspecific(dispatch_queue_key, dq); + _dispatch_apply_f2(dq, &da, _dispatch_apply2); + _dispatch_thread_setspecific(dispatch_queue_key, old_dq); +} - da.da_sema = _dispatch_get_thread_semaphore(); +#ifdef __BLOCKS__ +#if DISPATCH_COCOA_COMPAT +DISPATCH_NOINLINE +static void +_dispatch_apply_slow(size_t iterations, dispatch_queue_t dq, + void (^work)(size_t)) +{ + struct Block_basic *bb = (void *)_dispatch_Block_copy((void *)work); + dispatch_apply_f(iterations, dq, bb, (void *)bb->Block_invoke); + Block_release(bb); +} +#endif - // some queues are easy to borrow and some are not - if (slowpath(dq->do_targetq)) { - _dispatch_queue_push_list(dq, (void *)&da_dc[0], (void *)&da_dc[da.da_thr_cnt - 1]); - } else { - dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key); - // root queues are always concurrent and safe to borrow - _dispatch_queue_push_list(dq, (void *)&da_dc[1], (void *)&da_dc[da.da_thr_cnt - 1]); - _dispatch_thread_setspecific(dispatch_queue_key, dq); - // The first da_dc[] element was explicitly not pushed on to the queue. - // We need to either call it like so: - // da_dc[0].dc_func(da_dc[0].dc_ctxt); - // Or, given that we know the 'func' and 'ctxt', we can call it directly: - _dispatch_apply2(&da); - _dispatch_workitem_inc(); - _dispatch_thread_setspecific(dispatch_queue_key, old_dq); +void +dispatch_apply(size_t iterations, dispatch_queue_t dq, void (^work)(size_t)) +{ +#if DISPATCH_COCOA_COMPAT + // Under GC, blocks transferred to other threads must be Block_copy()ed + // rdar://problem/7455071 + if (dispatch_begin_thread_4GC) { + return _dispatch_apply_slow(iterations, dq, work); } - dispatch_semaphore_wait(da.da_sema, DISPATCH_TIME_FOREVER); - _dispatch_put_thread_semaphore(da.da_sema); +#endif + struct Block_basic *bb = (void *)work; + dispatch_apply_f(iterations, dq, bb, (void *)bb->Block_invoke); } +#endif #if 0 #ifdef __BLOCKS__ void -dispatch_stride(size_t offset, size_t stride, size_t iterations, dispatch_queue_t dq, void (^work)(size_t)) +dispatch_stride(size_t offset, size_t stride, size_t iterations, + dispatch_queue_t dq, void (^work)(size_t)) { struct Block_basic *bb = (void *)work; - dispatch_stride_f(offset, stride, iterations, dq, bb, (void *)bb->Block_invoke); + dispatch_stride_f(offset, stride, iterations, dq, bb, + (void *)bb->Block_invoke); } #endif diff --git a/src/benchmark.c b/src/benchmark.c index fafe90968..246affa2f 100644 --- a/src/benchmark.c +++ b/src/benchmark.c @@ -2,19 +2,19 @@ * Copyright (c) 2008-2009 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ - * + * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * + * * @APPLE_APACHE_LICENSE_HEADER_END@ */ @@ -22,7 +22,9 @@ struct __dispatch_benchmark_data_s { +#if HAVE_MACH_ABSOLUTE_TIME mach_timebase_info_data_t tbi; +#endif uint64_t loop_cost; void (*func)(void *); void *ctxt; @@ -38,28 +40,32 @@ _dispatch_benchmark_init(void *context) register void (*f)(void *) = bdata->func; register void *c = bdata->ctxt; register size_t cnt = bdata->count; + size_t i = 0; uint64_t start, delta; -#ifdef __LP64__ +#if defined(__LP64__) __uint128_t lcost; #else long double lcost; #endif +#if HAVE_MACH_ABSOLUTE_TIME kern_return_t kr; - size_t i = 0; kr = mach_timebase_info(&bdata->tbi); dispatch_assert_zero(kr); +#endif - start = mach_absolute_time(); + start = _dispatch_absolute_time(); do { i++; f(c); } while (i < cnt); - delta = mach_absolute_time() - start; + delta = _dispatch_absolute_time() - start; lcost = delta; +#if HAVE_MACH_ABSOLUTE_TIME lcost *= bdata->tbi.numer; lcost /= bdata->tbi.denom; +#endif lcost /= cnt; bdata->loop_cost = lcost; @@ -75,7 +81,8 @@ dispatch_benchmark(size_t count, void (^block)(void)) #endif uint64_t -dispatch_benchmark_f(size_t count, register void *ctxt, register void (*func)(void *)) +dispatch_benchmark_f(size_t count, register void *ctxt, + register void (*func)(void *)) { static struct __dispatch_benchmark_data_s bdata = { .func = (void *)dummy_function, @@ -83,7 +90,7 @@ dispatch_benchmark_f(size_t count, register void *ctxt, register void (*func)(vo }; static dispatch_once_t pred; uint64_t ns, start, delta; -#ifdef __LP64__ +#if defined(__LP64__) __uint128_t conversion, big_denom; #else long double conversion, big_denom; @@ -96,16 +103,20 @@ dispatch_benchmark_f(size_t count, register void *ctxt, register void (*func)(vo return 0; } - start = mach_absolute_time(); + start = _dispatch_absolute_time(); do { i++; func(ctxt); } while (i < count); - delta = mach_absolute_time() - start; + delta = _dispatch_absolute_time() - start; conversion = delta; +#if HAVE_MACH_ABSOLUTE_TIME conversion *= bdata.tbi.numer; big_denom = bdata.tbi.denom; +#else + big_denom = delta; +#endif big_denom *= count; conversion /= big_denom; ns = conversion; diff --git a/src/data.c b/src/data.c new file mode 100644 index 000000000..e12565617 --- /dev/null +++ b/src/data.c @@ -0,0 +1,429 @@ +/* + * Copyright (c) 2009-2011 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include "internal.h" + +// Dispatch data objects are dispatch objects with standard retain/release +// memory management. A dispatch data object either points to a number of other +// dispatch data objects or is a leaf data object. A leaf data object contains +// a pointer to represented memory. A composite data object specifies the total +// size of data it represents and list of constituent records. +// +// A leaf data object has a single entry in records[], the object size is the +// same as records[0].length and records[0].from is always 0. In other words, a +// leaf data object always points to a full represented buffer, so a composite +// dispatch data object is needed to represent a subrange of a memory region. + +#define _dispatch_data_retain(x) dispatch_retain(x) +#define _dispatch_data_release(x) dispatch_release(x) + +static void _dispatch_data_dispose(dispatch_data_t data); +static size_t _dispatch_data_debug(dispatch_data_t data, char* buf, + size_t bufsiz); + +#if DISPATCH_DATA_MOVABLE +static const dispatch_block_t _dispatch_data_destructor_unlock = ^{ + DISPATCH_CRASH("unlock destructor called"); +}; +#define DISPATCH_DATA_DESTRUCTOR_UNLOCK (_dispatch_data_destructor_unlock) +#endif + +const struct dispatch_data_vtable_s _dispatch_data_vtable = { + .do_type = DISPATCH_DATA_TYPE, + .do_kind = "data", + .do_dispose = _dispatch_data_dispose, + .do_invoke = NULL, + .do_probe = (void *)dummy_function_r0, + .do_debug = _dispatch_data_debug, +}; + +static dispatch_data_t +_dispatch_data_init(size_t n) +{ + dispatch_data_t data = calloc(1ul, sizeof(struct dispatch_data_s) + + n * sizeof(range_record)); + data->num_records = n; + data->do_vtable = &_dispatch_data_vtable; + data->do_xref_cnt = 1; + data->do_ref_cnt = 1; + data->do_targetq = dispatch_get_global_queue( + DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); + data->do_next = DISPATCH_OBJECT_LISTLESS; + return data; +} + +dispatch_data_t +dispatch_data_create(const void* buffer, size_t size, dispatch_queue_t queue, + dispatch_block_t destructor) +{ + dispatch_data_t data; + if (!buffer || !size) { + // Empty data requested so return the singleton empty object. Call + // destructor immediately in this case to ensure any unused associated + // storage is released. + if (destructor == DISPATCH_DATA_DESTRUCTOR_FREE) { + free((void*)buffer); + } else if (destructor != DISPATCH_DATA_DESTRUCTOR_DEFAULT) { + dispatch_async(queue ? queue : dispatch_get_global_queue( + DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), destructor); + } + return dispatch_data_empty; + } + data = _dispatch_data_init(1); + // Leaf objects always point to the entirety of the memory region + data->leaf = true; + data->size = size; + data->records[0].from = 0; + data->records[0].length = size; + data->destructor = DISPATCH_DATA_DESTRUCTOR_FREE; + if (destructor == DISPATCH_DATA_DESTRUCTOR_DEFAULT) { + // The default destructor was provided, indicating the data should be + // copied. + void *data_buf = malloc(size); + if (slowpath(!data_buf)) { + free(data); + return NULL; + } + buffer = memcpy(data_buf, buffer, size); + } else { + if (destructor != DISPATCH_DATA_DESTRUCTOR_FREE) { + data->destructor = Block_copy(destructor); + } +#if DISPATCH_DATA_MOVABLE + // A non-default destructor was provided, indicating the system does not + // own the buffer. Mark the object as locked since the application has + // direct access to the buffer and it cannot be reallocated/moved. + data->locked = 1; +#endif + } + data->records[0].data_object = (void*)buffer; + if (queue) { + _dispatch_retain(queue); + data->do_targetq = queue; + } + return data; +} + +static void +_dispatch_data_dispose(dispatch_data_t dd) +{ + dispatch_block_t destructor = dd->destructor; + if (destructor == DISPATCH_DATA_DESTRUCTOR_DEFAULT) { + size_t i; + for (i = 0; i < dd->num_records; ++i) { + _dispatch_data_release(dd->records[i].data_object); + } +#if DISPATCH_DATA_MOVABLE + } else if (destructor == DISPATCH_DATA_DESTRUCTOR_UNLOCK) { + dispatch_data_t data = (dispatch_data_t)dd->records[0].data_object; + (void)dispatch_atomic_dec2o(data, locked); + _dispatch_data_release(data); +#endif + } else if (destructor == DISPATCH_DATA_DESTRUCTOR_FREE) { + free(dd->records[0].data_object); + } else { + dispatch_async_f(dd->do_targetq, destructor, + _dispatch_call_block_and_release); + } + _dispatch_dispose(dd); +} + +static size_t +_dispatch_data_debug(dispatch_data_t dd, char* buf, size_t bufsiz) +{ + size_t offset = 0; + if (dd->leaf) { + offset += snprintf(&buf[offset], bufsiz - offset, + "leaf: %d, size: %zd, data: %p", dd->leaf, dd->size, + dd->records[0].data_object); + } else { + offset += snprintf(&buf[offset], bufsiz - offset, + "leaf: %d, size: %zd, num_records: %zd", dd->leaf, + dd->size, dd->num_records); + size_t i; + for (i = 0; i < dd->num_records; ++i) { + range_record r = dd->records[i]; + offset += snprintf(&buf[offset], bufsiz - offset, + "records[%zd] from: %zd, length %zd, data_object: %p", i, + r.from, r.length, r.data_object); + } + } + return offset; +} + +size_t +dispatch_data_get_size(dispatch_data_t dd) +{ + return dd->size; +} + +dispatch_data_t +dispatch_data_create_concat(dispatch_data_t dd1, dispatch_data_t dd2) +{ + dispatch_data_t data; + if (!dd1->size) { + _dispatch_data_retain(dd2); + return dd2; + } + if (!dd2->size) { + _dispatch_data_retain(dd1); + return dd1; + } + data = _dispatch_data_init(dd1->num_records + dd2->num_records); + data->size = dd1->size + dd2->size; + // Copy the constituent records into the newly created data object + memcpy(data->records, dd1->records, dd1->num_records * + sizeof(range_record)); + memcpy(data->records + dd1->num_records, dd2->records, dd2->num_records * + sizeof(range_record)); + // Reference leaf objects as sub-objects + if (dd1->leaf) { + data->records[0].data_object = dd1; + } + if (dd2->leaf) { + data->records[dd1->num_records].data_object = dd2; + } + size_t i; + for (i = 0; i < data->num_records; ++i) { + _dispatch_data_retain(data->records[i].data_object); + } + return data; +} + +dispatch_data_t +dispatch_data_create_subrange(dispatch_data_t dd, size_t offset, + size_t length) +{ + dispatch_data_t data; + if (offset >= dd->size || !length) { + return dispatch_data_empty; + } else if ((offset + length) > dd->size) { + length = dd->size - offset; + } else if (length == dd->size) { + _dispatch_data_retain(dd); + return dd; + } + if (dd->leaf) { + data = _dispatch_data_init(1); + data->size = length; + data->records[0].from = offset; + data->records[0].length = length; + data->records[0].data_object = dd; + _dispatch_data_retain(dd); + return data; + } + // Subrange of a composite dispatch data object: find the record containing + // the specified offset + data = dispatch_data_empty; + size_t i = 0, bytes_left = length; + while (i < dd->num_records && offset >= dd->records[i].length) { + offset -= dd->records[i++].length; + } + while (i < dd->num_records) { + size_t record_len = dd->records[i].length - offset; + if (record_len > bytes_left) { + record_len = bytes_left; + } + dispatch_data_t subrange = dispatch_data_create_subrange( + dd->records[i].data_object, dd->records[i].from + offset, + record_len); + dispatch_data_t concat = dispatch_data_create_concat(data, subrange); + _dispatch_data_release(data); + _dispatch_data_release(subrange); + data = concat; + bytes_left -= record_len; + if (!bytes_left) { + return data; + } + offset = 0; + i++; + } + // Crashing here indicates memory corruption of passed in data object + DISPATCH_CRASH("dispatch_data_create_subrange out of bounds"); + return NULL; +} + +// When mapping a leaf object or a subrange of a leaf object, return a direct +// pointer to the represented buffer. For all other data objects, copy the +// represented buffers into a contiguous area. In the future it might +// be possible to relocate the buffers instead (if not marked as locked). +dispatch_data_t +dispatch_data_create_map(dispatch_data_t dd, const void **buffer_ptr, + size_t *size_ptr) +{ + dispatch_data_t data = dd; + void *buffer = NULL; + size_t size = dd->size, offset = 0; + if (!size) { + data = dispatch_data_empty; + goto out; + } + if (!dd->leaf && dd->num_records == 1 && + ((dispatch_data_t)dd->records[0].data_object)->leaf) { + offset = dd->records[0].from; + dd = (dispatch_data_t)(dd->records[0].data_object); + } + if (dd->leaf) { +#if DISPATCH_DATA_MOVABLE + data = _dispatch_data_init(1); + // Make sure the underlying leaf object does not move the backing buffer + (void)dispatch_atomic_inc2o(dd, locked); + data->size = size; + data->destructor = DISPATCH_DATA_DESTRUCTOR_UNLOCK; + data->records[0].data_object = dd; + data->records[0].from = offset; + data->records[0].length = size; + _dispatch_data_retain(dd); +#else + _dispatch_data_retain(data); +#endif + buffer = dd->records[0].data_object + offset; + goto out; + } + // Composite data object, copy the represented buffers + buffer = malloc(size); + if (!buffer) { + data = NULL; + size = 0; + goto out; + } + dispatch_data_apply(dd, ^(dispatch_data_t region DISPATCH_UNUSED, + size_t off, const void* buf, size_t len) { + memcpy(buffer + off, buf, len); + return (bool)true; + }); + data = dispatch_data_create(buffer, size, NULL, + DISPATCH_DATA_DESTRUCTOR_FREE); +out: + if (buffer_ptr) { + *buffer_ptr = buffer; + } + if (size_ptr) { + *size_ptr = size; + } + return data; +} + +static bool +_dispatch_data_apply(dispatch_data_t dd, size_t offset, size_t from, + size_t size, dispatch_data_applier_t applier) +{ + bool result = true; + dispatch_data_t data = dd; + const void *buffer; + dispatch_assert(dd->size); +#if DISPATCH_DATA_MOVABLE + if (dd->leaf) { + data = _dispatch_data_init(1); + // Make sure the underlying leaf object does not move the backing buffer + (void)dispatch_atomic_inc2o(dd, locked); + data->size = size; + data->destructor = DISPATCH_DATA_DESTRUCTOR_UNLOCK; + data->records[0].data_object = dd; + data->records[0].from = from; + data->records[0].length = size; + _dispatch_data_retain(dd); + buffer = dd->records[0].data_object + from; + result = applier(data, offset, buffer, size); + _dispatch_data_release(data); + return result; + } +#else + if (!dd->leaf && dd->num_records == 1 && + ((dispatch_data_t)dd->records[0].data_object)->leaf) { + from = dd->records[0].from; + dd = (dispatch_data_t)(dd->records[0].data_object); + } + if (dd->leaf) { + buffer = dd->records[0].data_object + from; + return applier(data, offset, buffer, size); + } +#endif + size_t i; + for (i = 0; i < dd->num_records && result; ++i) { + result = _dispatch_data_apply(dd->records[i].data_object, + offset, dd->records[i].from, dd->records[i].length, + applier); + offset += dd->records[i].length; + } + return result; +} + +bool +dispatch_data_apply(dispatch_data_t dd, dispatch_data_applier_t applier) +{ + if (!dd->size) { + return true; + } + return _dispatch_data_apply(dd, 0, 0, dd->size, applier); +} + +// Returs either a leaf object or an object composed of a single leaf object +dispatch_data_t +dispatch_data_copy_region(dispatch_data_t dd, size_t location, + size_t *offset_ptr) +{ + if (location >= dd->size) { + *offset_ptr = 0; + return dispatch_data_empty; + } + dispatch_data_t data; + size_t size = dd->size, offset = 0, from = 0; + while (true) { + if (dd->leaf) { + _dispatch_data_retain(dd); + *offset_ptr = offset; + if (size == dd->size) { + return dd; + } else { + // Create a new object for the requested subrange of the leaf + data = _dispatch_data_init(1); + data->size = size; + data->records[0].from = from; + data->records[0].length = size; + data->records[0].data_object = dd; + return data; + } + } else { + // Find record at the specified location + size_t i, pos; + for (i = 0; i < dd->num_records; ++i) { + pos = offset + dd->records[i].length; + if (location < pos) { + size = dd->records[i].length; + from = dd->records[i].from; + data = (dispatch_data_t)(dd->records[i].data_object); + if (dd->num_records == 1 && data->leaf) { + // Return objects composed of a single leaf node + *offset_ptr = offset; + _dispatch_data_retain(dd); + return dd; + } else { + // Drill down into other objects + dd = data; + break; + } + } else { + offset = pos; + } + } + } + } +} diff --git a/src/data_internal.h b/src/data_internal.h new file mode 100644 index 000000000..314efa752 --- /dev/null +++ b/src/data_internal.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2009-2011 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_DATA_INTERNAL__ +#define __DISPATCH_DATA_INTERNAL__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +struct dispatch_data_vtable_s { + DISPATCH_VTABLE_HEADER(dispatch_data_s); +}; + +extern const struct dispatch_data_vtable_s _dispatch_data_vtable; + +typedef struct range_record_s { + void* data_object; + size_t from; + size_t length; +} range_record; + +struct dispatch_data_s { + DISPATCH_STRUCT_HEADER(dispatch_data_s, dispatch_data_vtable_s); +#if DISPATCH_DATA_MOVABLE + unsigned int locked; +#endif + bool leaf; + dispatch_block_t destructor; + size_t size, num_records; + range_record records[]; +}; + +#endif // __DISPATCH_DATA_INTERNAL__ diff --git a/src/hw_shims.h b/src/hw_shims.h deleted file mode 100644 index b99bf177f..000000000 --- a/src/hw_shims.h +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright (c) 2008-2009 Apple Inc. All rights reserved. - * - * @APPLE_APACHE_LICENSE_HEADER_START@ - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * @APPLE_APACHE_LICENSE_HEADER_END@ - */ - -/* - * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch - * which are subject to change in future releases of Mac OS X. Any applications - * relying on these interfaces WILL break. - */ - -#ifndef __DISPATCH_HW_SHIMS__ -#define __DISPATCH_HW_SHIMS__ - -/* x86 has a 64 byte cacheline */ -#define DISPATCH_CACHELINE_SIZE 64 -#define ROUND_UP_TO_CACHELINE_SIZE(x) (((x) + (DISPATCH_CACHELINE_SIZE - 1)) & ~(DISPATCH_CACHELINE_SIZE - 1)) -#define ROUND_UP_TO_VECTOR_SIZE(x) (((x) + 15) & ~15) - -#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2) -// GCC generates suboptimal register pressure -// LLVM does better, but doesn't support tail calls -// 6248590 __sync_*() intrinsics force a gratuitous "lea" instruction, with resulting register pressure -#if 0 && defined(__i386__) || defined(__x86_64__) -#define dispatch_atomic_xchg(p, n) ({ typeof(*(p)) _r; asm("xchg %0, %1" : "=r" (_r) : "m" (*(p)), "0" (n)); _r; }) -#else -#define dispatch_atomic_xchg(p, n) __sync_lock_test_and_set((p), (n)) -#endif -#define dispatch_atomic_cmpxchg(p, o, n) __sync_bool_compare_and_swap((p), (o), (n)) -#define dispatch_atomic_inc(p) __sync_add_and_fetch((p), 1) -#define dispatch_atomic_dec(p) __sync_sub_and_fetch((p), 1) -#define dispatch_atomic_add(p, v) __sync_add_and_fetch((p), (v)) -#define dispatch_atomic_sub(p, v) __sync_sub_and_fetch((p), (v)) -#define dispatch_atomic_or(p, v) __sync_fetch_and_or((p), (v)) -#define dispatch_atomic_and(p, v) __sync_fetch_and_and((p), (v)) -#if defined(__i386__) || defined(__x86_64__) -/* GCC emits nothing for __sync_synchronize() on i386/x86_64. */ -#define dispatch_atomic_barrier() __asm__ __volatile__("mfence") -#else -#define dispatch_atomic_barrier() __sync_synchronize() -#endif -#else -#error "Please upgrade to GCC 4.2 or newer." -#endif - -#if defined(__i386__) || defined(__x86_64__) -#define _dispatch_hardware_pause() asm("pause") -#define _dispatch_debugger() asm("int3") -#else -#define _dispatch_hardware_pause() asm("") -#define _dispatch_debugger() asm("trap") -#endif -// really just a low level abort() -#define _dispatch_hardware_crash() __builtin_trap() - - -#endif diff --git a/src/init.c b/src/init.c new file mode 100644 index 000000000..d72219c92 --- /dev/null +++ b/src/init.c @@ -0,0 +1,622 @@ +/* + * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +// Contains exported global data and initialization & other routines that must +// only exist once in the shared library even when resolvers are used. + +#include "internal.h" + +#if HAVE_MACH +#include "protocolServer.h" +#endif + +#pragma mark - +#pragma mark dispatch_init + +#if USE_LIBDISPATCH_INIT_CONSTRUCTOR +DISPATCH_NOTHROW __attribute__((constructor)) +void +_libdispatch_init(void); + +DISPATCH_EXPORT DISPATCH_NOTHROW +void +_libdispatch_init(void) +{ + libdispatch_init(); +} +#endif + +DISPATCH_EXPORT DISPATCH_NOTHROW +void +dispatch_atfork_prepare(void) +{ +} + +DISPATCH_EXPORT DISPATCH_NOTHROW +void +dispatch_atfork_parent(void) +{ +} + +void +dummy_function(void) +{ +} + +long +dummy_function_r0(void) +{ + return 0; +} + +#pragma mark - +#pragma mark dispatch_globals + +#if DISPATCH_COCOA_COMPAT +// dispatch_begin_thread_4GC having non-default value triggers GC-only slow +// paths and is checked frequently, testing against NULL is faster than +// comparing for equality with "dummy_function" +void (*dispatch_begin_thread_4GC)(void) = NULL; +void (*dispatch_end_thread_4GC)(void) = dummy_function; +void (*dispatch_no_worker_threads_4GC)(void) = NULL; +void *(*_dispatch_begin_NSAutoReleasePool)(void) = (void *)dummy_function; +void (*_dispatch_end_NSAutoReleasePool)(void *) = (void *)dummy_function; +#endif + +struct _dispatch_hw_config_s _dispatch_hw_config; +bool _dispatch_safe_fork = true; + +const struct dispatch_queue_offsets_s dispatch_queue_offsets = { + .dqo_version = 3, + .dqo_label = offsetof(struct dispatch_queue_s, dq_label), + .dqo_label_size = sizeof(((dispatch_queue_t)NULL)->dq_label), + .dqo_flags = 0, + .dqo_flags_size = 0, + .dqo_width = offsetof(struct dispatch_queue_s, dq_width), + .dqo_width_size = sizeof(((dispatch_queue_t)NULL)->dq_width), + .dqo_serialnum = offsetof(struct dispatch_queue_s, dq_serialnum), + .dqo_serialnum_size = sizeof(((dispatch_queue_t)NULL)->dq_serialnum), + .dqo_running = offsetof(struct dispatch_queue_s, dq_running), + .dqo_running_size = sizeof(((dispatch_queue_t)NULL)->dq_running), +}; + +// 6618342 Contact the team that owns the Instrument DTrace probe before +// renaming this symbol +DISPATCH_CACHELINE_ALIGN +struct dispatch_queue_s _dispatch_main_q = { +#if !DISPATCH_USE_RESOLVERS + .do_vtable = &_dispatch_queue_vtable, + .do_targetq = &_dispatch_root_queues[ + DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY], +#endif + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, + .dq_label = "com.apple.main-thread", + .dq_running = 1, + .dq_width = 1, + .dq_serialnum = 1, +}; + +const struct dispatch_queue_attr_vtable_s dispatch_queue_attr_vtable = { + .do_type = DISPATCH_QUEUE_ATTR_TYPE, + .do_kind = "queue-attr", +}; + +struct dispatch_queue_attr_s _dispatch_queue_attr_concurrent = { + .do_vtable = &dispatch_queue_attr_vtable, + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_next = DISPATCH_OBJECT_LISTLESS, +}; + +struct dispatch_data_s _dispatch_data_empty = { +#if !DISPATCH_USE_RESOLVERS + .do_vtable = &_dispatch_data_vtable, +#endif + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_next = DISPATCH_OBJECT_LISTLESS, +}; + +const dispatch_block_t _dispatch_data_destructor_free = ^{ + DISPATCH_CRASH("free destructor called"); +}; + +#pragma mark - +#pragma mark dispatch_log + +static char _dispatch_build[16]; + +static void +_dispatch_bug_init(void *context DISPATCH_UNUSED) +{ +#ifdef __APPLE__ + int mib[] = { CTL_KERN, KERN_OSVERSION }; + size_t bufsz = sizeof(_dispatch_build); + + sysctl(mib, 2, _dispatch_build, &bufsz, NULL, 0); +#else + /* + * XXXRW: What to do here for !Mac OS X? + */ + memset(_dispatch_build, 0, sizeof(_dispatch_build)); +#endif +} + +void +_dispatch_bug(size_t line, long val) +{ + static dispatch_once_t pred; + static void *last_seen; + void *ra = __builtin_return_address(0); + + dispatch_once_f(&pred, NULL, _dispatch_bug_init); + if (last_seen != ra) { + last_seen = ra; + _dispatch_log("BUG in libdispatch: %s - %lu - 0x%lx", + _dispatch_build, (unsigned long)line, val); + } +} + +void +_dispatch_bug_mach_client(const char* msg, mach_msg_return_t kr) +{ + static void *last_seen; + void *ra = __builtin_return_address(0); + if (last_seen != ra) { + last_seen = ra; + _dispatch_log("BUG in libdispatch client: %s %s - 0x%x", msg, + mach_error_string(kr), kr); + } +} + +void +_dispatch_abort(size_t line, long val) +{ + _dispatch_bug(line, val); + abort(); +} + +void +_dispatch_log(const char *msg, ...) +{ + va_list ap; + + va_start(ap, msg); + _dispatch_logv(msg, ap); + va_end(ap); +} + +static FILE *dispatch_logfile; +static bool dispatch_log_disabled; + +static void +_dispatch_logv_init(void *context DISPATCH_UNUSED) +{ +#if DISPATCH_DEBUG + bool log_to_file = true; +#else + bool log_to_file = false; +#endif + char *e = getenv("LIBDISPATCH_LOG"); + if (e) { + if (strcmp(e, "YES") == 0) { + // default + } else if (strcmp(e, "NO") == 0) { + dispatch_log_disabled = true; + } else if (strcmp(e, "syslog") == 0) { + log_to_file = false; + } else if (strcmp(e, "file") == 0) { + log_to_file = true; + } else if (strcmp(e, "stderr") == 0) { + log_to_file = true; + dispatch_logfile = stderr; + } + } + if (!dispatch_log_disabled) { + if (log_to_file && !dispatch_logfile) { + char path[PATH_MAX]; + snprintf(path, sizeof(path), "/var/tmp/libdispatch.%d.log", + getpid()); + dispatch_logfile = fopen(path, "a"); + } + if (dispatch_logfile) { + struct timeval tv; + gettimeofday(&tv, NULL); + fprintf(dispatch_logfile, "=== log file opened for %s[%u] at " + "%ld.%06u ===\n", getprogname() ?: "", getpid(), + tv.tv_sec, tv.tv_usec); + fflush(dispatch_logfile); + } + } +} + +void +_dispatch_logv(const char *msg, va_list ap) +{ + static dispatch_once_t pred; + dispatch_once_f(&pred, NULL, _dispatch_logv_init); + + if (slowpath(dispatch_log_disabled)) { + return; + } + if (slowpath(dispatch_logfile)) { + vfprintf(dispatch_logfile, msg, ap); + // TODO: May cause interleaving with another thread's log + fputc('\n', dispatch_logfile); + fflush(dispatch_logfile); + return; + } + vsyslog(LOG_NOTICE, msg, ap); +} + +#pragma mark - +#pragma mark dispatch_debug + +void +dispatch_debug(dispatch_object_t dou, const char *msg, ...) +{ + va_list ap; + + va_start(ap, msg); + dispatch_debugv(dou._do, msg, ap); + va_end(ap); +} + +void +dispatch_debugv(dispatch_object_t dou, const char *msg, va_list ap) +{ + char buf[4096]; + size_t offs; + + if (dou._do && dou._do->do_vtable->do_debug) { + offs = dx_debug(dou._do, buf, sizeof(buf)); + } else { + offs = snprintf(buf, sizeof(buf), "NULL vtable slot"); + } + + snprintf(buf + offs, sizeof(buf) - offs, ": %s", msg); + _dispatch_logv(buf, ap); +} + +#pragma mark - +#pragma mark dispatch_block_t + +#ifdef __BLOCKS__ + +#undef _dispatch_Block_copy +dispatch_block_t +_dispatch_Block_copy(dispatch_block_t db) +{ + dispatch_block_t rval; + + while (!(rval = Block_copy(db))) { + sleep(1); + } + return rval; +} + +void +_dispatch_call_block_and_release(void *block) +{ + void (^b)(void) = block; + b(); + Block_release(b); +} + +#endif // __BLOCKS__ + +#pragma mark - +#pragma mark dispatch_client_callout + +#if DISPATCH_USE_CLIENT_CALLOUT + +#undef _dispatch_client_callout +#undef _dispatch_client_callout2 + +DISPATCH_NOINLINE +void +_dispatch_client_callout(void *ctxt, dispatch_function_t f) +{ + return f(ctxt); +} + +DISPATCH_NOINLINE +void +_dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) +{ + return f(ctxt, i); +} + +#endif + +#pragma mark - +#pragma mark dispatch_source_types + +static void +dispatch_source_type_timer_init(dispatch_source_t ds, + dispatch_source_type_t type DISPATCH_UNUSED, + uintptr_t handle DISPATCH_UNUSED, + unsigned long mask, + dispatch_queue_t q DISPATCH_UNUSED) +{ + ds->ds_refs = calloc(1ul, sizeof(struct dispatch_timer_source_refs_s)); + if (slowpath(!ds->ds_refs)) return; + ds->ds_needs_rearm = true; + ds->ds_is_timer = true; + ds_timer(ds->ds_refs).flags = mask; +} + +const struct dispatch_source_type_s _dispatch_source_type_timer = { + .ke = { + .filter = DISPATCH_EVFILT_TIMER, + }, + .mask = DISPATCH_TIMER_WALL_CLOCK, + .init = dispatch_source_type_timer_init, +}; + +const struct dispatch_source_type_s _dispatch_source_type_read = { + .ke = { + .filter = EVFILT_READ, + .flags = EV_DISPATCH, + }, +}; + +const struct dispatch_source_type_s _dispatch_source_type_write = { + .ke = { + .filter = EVFILT_WRITE, + .flags = EV_DISPATCH, + }, +}; + +#if DISPATCH_USE_VM_PRESSURE +#if TARGET_IPHONE_SIMULATOR // rdar://problem/9219483 +static int _dispatch_ios_simulator_memory_warnings_fd = -1; +static void +_dispatch_ios_simulator_vm_source_init(void *context DISPATCH_UNUSED) +{ + char *e = getenv("IPHONE_SIMULATOR_MEMORY_WARNINGS"); + if (!e) return; + _dispatch_ios_simulator_memory_warnings_fd = open(e, O_EVTONLY); + if (_dispatch_ios_simulator_memory_warnings_fd == -1) { + (void)dispatch_assume_zero(errno); + } +} +static void +dispatch_source_type_vm_init(dispatch_source_t ds, + dispatch_source_type_t type DISPATCH_UNUSED, + uintptr_t handle DISPATCH_UNUSED, + unsigned long mask, + dispatch_queue_t q DISPATCH_UNUSED) +{ + static dispatch_once_t pred; + dispatch_once_f(&pred, NULL, _dispatch_ios_simulator_vm_source_init); + ds->ds_dkev->dk_kevent.ident = (mask & DISPATCH_VM_PRESSURE ? + _dispatch_ios_simulator_memory_warnings_fd : -1); +} + +const struct dispatch_source_type_s _dispatch_source_type_vm = { + .ke = { + .filter = EVFILT_VNODE, + .flags = EV_CLEAR, + }, + .mask = NOTE_ATTRIB, + .init = dispatch_source_type_vm_init, +}; +#else +static void +dispatch_source_type_vm_init(dispatch_source_t ds, + dispatch_source_type_t type DISPATCH_UNUSED, + uintptr_t handle DISPATCH_UNUSED, + unsigned long mask DISPATCH_UNUSED, + dispatch_queue_t q DISPATCH_UNUSED) +{ + ds->ds_is_level = false; +} + +const struct dispatch_source_type_s _dispatch_source_type_vm = { + .ke = { + .filter = EVFILT_VM, + .flags = EV_DISPATCH, + }, + .mask = NOTE_VM_PRESSURE, + .init = dispatch_source_type_vm_init, +}; +#endif +#endif + +const struct dispatch_source_type_s _dispatch_source_type_proc = { + .ke = { + .filter = EVFILT_PROC, + .flags = EV_CLEAR, + }, + .mask = NOTE_EXIT|NOTE_FORK|NOTE_EXEC +#if HAVE_DECL_NOTE_SIGNAL + |NOTE_SIGNAL +#endif +#if HAVE_DECL_NOTE_REAP + |NOTE_REAP +#endif + , +}; + +const struct dispatch_source_type_s _dispatch_source_type_signal = { + .ke = { + .filter = EVFILT_SIGNAL, + }, +}; + +const struct dispatch_source_type_s _dispatch_source_type_vnode = { + .ke = { + .filter = EVFILT_VNODE, + .flags = EV_CLEAR, + }, + .mask = NOTE_DELETE|NOTE_WRITE|NOTE_EXTEND|NOTE_ATTRIB|NOTE_LINK| + NOTE_RENAME|NOTE_REVOKE +#if HAVE_DECL_NOTE_NONE + |NOTE_NONE +#endif + , +}; + +const struct dispatch_source_type_s _dispatch_source_type_vfs = { + .ke = { + .filter = EVFILT_FS, + .flags = EV_CLEAR, + }, + .mask = VQ_NOTRESP|VQ_NEEDAUTH|VQ_LOWDISK|VQ_MOUNT|VQ_UNMOUNT|VQ_DEAD| + VQ_ASSIST|VQ_NOTRESPLOCK +#if HAVE_DECL_VQ_UPDATE + |VQ_UPDATE +#endif +#if HAVE_DECL_VQ_VERYLOWDISK + |VQ_VERYLOWDISK +#endif + , +}; + +const struct dispatch_source_type_s _dispatch_source_type_data_add = { + .ke = { + .filter = DISPATCH_EVFILT_CUSTOM_ADD, + }, +}; + +const struct dispatch_source_type_s _dispatch_source_type_data_or = { + .ke = { + .filter = DISPATCH_EVFILT_CUSTOM_OR, + .flags = EV_CLEAR, + .fflags = ~0, + }, +}; + +#if HAVE_MACH + +static void +dispatch_source_type_mach_send_init(dispatch_source_t ds, + dispatch_source_type_t type DISPATCH_UNUSED, + uintptr_t handle DISPATCH_UNUSED, unsigned long mask, + dispatch_queue_t q DISPATCH_UNUSED) +{ + static dispatch_once_t pred; + dispatch_once_f(&pred, NULL, _dispatch_mach_notify_source_init); + if (!mask) { + // Preserve legacy behavior that (mask == 0) => DISPATCH_MACH_SEND_DEAD + ds->ds_dkev->dk_kevent.fflags = DISPATCH_MACH_SEND_DEAD; + ds->ds_pending_data_mask = DISPATCH_MACH_SEND_DEAD; + } +} + +const struct dispatch_source_type_s _dispatch_source_type_mach_send = { + .ke = { + .filter = EVFILT_MACHPORT, + .flags = EV_CLEAR, + }, + .mask = DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_POSSIBLE, + .init = dispatch_source_type_mach_send_init, +}; + +static void +dispatch_source_type_mach_recv_init(dispatch_source_t ds, + dispatch_source_type_t type DISPATCH_UNUSED, + uintptr_t handle DISPATCH_UNUSED, + unsigned long mask DISPATCH_UNUSED, + dispatch_queue_t q DISPATCH_UNUSED) +{ + ds->ds_is_level = false; +} + +const struct dispatch_source_type_s _dispatch_source_type_mach_recv = { + .ke = { + .filter = EVFILT_MACHPORT, + .flags = EV_DISPATCH, + .fflags = DISPATCH_MACH_RECV_MESSAGE, + }, + .init = dispatch_source_type_mach_recv_init, +}; + +#pragma mark - +#pragma mark dispatch_mig + +void * +dispatch_mach_msg_get_context(mach_msg_header_t *msg) +{ + mach_msg_context_trailer_t *tp; + void *context = NULL; + + tp = (mach_msg_context_trailer_t *)((uint8_t *)msg + + round_msg(msg->msgh_size)); + if (tp->msgh_trailer_size >= + (mach_msg_size_t)sizeof(mach_msg_context_trailer_t)) { + context = (void *)(uintptr_t)tp->msgh_context; + } + return context; +} + +kern_return_t +_dispatch_wakeup_main_thread(mach_port_t mp DISPATCH_UNUSED) +{ + // dummy function just to pop out the main thread out of mach_msg() + return 0; +} + +kern_return_t +_dispatch_consume_send_once_right(mach_port_t mp DISPATCH_UNUSED) +{ + // dummy function to consume a send-once right + return 0; +} + +kern_return_t +_dispatch_mach_notify_port_destroyed(mach_port_t notify DISPATCH_UNUSED, + mach_port_t name) +{ + kern_return_t kr; + // this function should never be called + (void)dispatch_assume_zero(name); + kr = mach_port_mod_refs(mach_task_self(), name, MACH_PORT_RIGHT_RECEIVE,-1); + DISPATCH_VERIFY_MIG(kr); + (void)dispatch_assume_zero(kr); + return KERN_SUCCESS; +} + +kern_return_t +_dispatch_mach_notify_no_senders(mach_port_t notify, + mach_port_mscount_t mscnt DISPATCH_UNUSED) +{ + // this function should never be called + (void)dispatch_assume_zero(notify); + return KERN_SUCCESS; +} + +kern_return_t +_dispatch_mach_notify_send_once(mach_port_t notify DISPATCH_UNUSED) +{ + // we only register for dead-name notifications + // some code deallocated our send-once right without consuming it +#if DISPATCH_DEBUG + _dispatch_log("Corruption: An app/library deleted a libdispatch " + "dead-name notification"); +#endif + return KERN_SUCCESS; +} + + +#endif // HAVE_MACH diff --git a/src/internal.h b/src/internal.h index d55540b0d..24d3a04ea 100644 --- a/src/internal.h +++ b/src/internal.h @@ -1,20 +1,20 @@ /* - * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * Copyright (c) 2008-2011 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ - * + * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * + * * @APPLE_APACHE_LICENSE_HEADER_END@ */ @@ -27,25 +27,33 @@ #ifndef __DISPATCH_INTERNAL__ #define __DISPATCH_INTERNAL__ +#include + #define __DISPATCH_BUILDING_DISPATCH__ #define __DISPATCH_INDIRECT__ -#include "dispatch.h" -#include "base.h" -#include "time.h" -#include "queue.h" -#include "object.h" -#include "source.h" -#include "group.h" -#include "semaphore.h" -#include "once.h" -#include "benchmark.h" + + +#include +#include + + +#include +#include +#include +#include +#include +#include +#include +#include +#include /* private.h uses #include_next and must be included last to avoid picking * up installed headers. */ #include "queue_private.h" #include "source_private.h" +#include "benchmark.h" #include "private.h" -#include "legacy.h" + /* More #includes at EOF (dependent on the contents of internal.h) ... */ /* The "_debug" library build */ @@ -53,9 +61,25 @@ #define DISPATCH_DEBUG 0 #endif +#ifndef DISPATCH_PROFILE +#define DISPATCH_PROFILE 0 +#endif + +#if DISPATCH_DEBUG && !defined(DISPATCH_USE_CLIENT_CALLOUT) +#define DISPATCH_USE_CLIENT_CALLOUT 1 +#endif + +#if (DISPATCH_DEBUG || DISPATCH_PROFILE) && !defined(DISPATCH_USE_DTRACE) +#define DISPATCH_USE_DTRACE 1 +#endif +#if HAVE_LIBKERN_OSCROSSENDIAN_H #include +#endif +#if HAVE_LIBKERN_OSATOMIC_H #include +#endif +#if HAVE_MACH #include #include #include @@ -70,14 +94,17 @@ #include #include #include +#endif /* HAVE_MACH */ +#if HAVE_MALLOC_MALLOC_H #include +#endif #include #include #include #include #include -#include #include +#include #include #ifdef __BLOCKS__ @@ -88,7 +115,11 @@ #include #include #include +#include #include +#if USE_POSIX_SEM +#include +#endif #include #include #include @@ -97,9 +128,32 @@ #include #include #include +#if HAVE_UNISTD_H #include +#endif -#define DISPATCH_NOINLINE __attribute__((noinline)) +#ifndef __has_builtin +#define __has_builtin(x) 0 +#endif +#ifndef __has_include +#define __has_include(x) 0 +#endif +#ifndef __has_feature +#define __has_feature(x) 0 +#endif +#ifndef __has_attribute +#define __has_attribute(x) 0 +#endif + +#define DISPATCH_NOINLINE __attribute__((__noinline__)) +#define DISPATCH_USED __attribute__((__used__)) +#define DISPATCH_UNUSED __attribute__((__unused__)) +#define DISPATCH_WEAK __attribute__((__weak__)) +#if DISPATCH_DEBUG +#define DISPATCH_ALWAYS_INLINE_NDEBUG +#else +#define DISPATCH_ALWAYS_INLINE_NDEBUG __attribute__((__always_inline__)) +#endif // workaround 6368156 #ifdef NSEC_PER_SEC @@ -116,184 +170,256 @@ #define NSEC_PER_USEC 1000ull /* I wish we had __builtin_expect_range() */ -#define fastpath(x) ((typeof(x))__builtin_expect((long)(x), ~0l)) -#define slowpath(x) ((typeof(x))__builtin_expect((long)(x), 0l)) - -void _dispatch_bug(size_t line, long val) __attribute__((__noinline__)); -void _dispatch_abort(size_t line, long val) __attribute__((__noinline__,__noreturn__)); -void _dispatch_log(const char *msg, ...) __attribute__((__noinline__,__format__(printf,1,2))); -void _dispatch_logv(const char *msg, va_list) __attribute__((__noinline__,__format__(printf,1,0))); +#define fastpath(x) ((typeof(x))__builtin_expect((long)(x), ~0l)) +#define slowpath(x) ((typeof(x))__builtin_expect((long)(x), 0l)) + +DISPATCH_NOINLINE +void _dispatch_bug(size_t line, long val); +DISPATCH_NOINLINE +void _dispatch_bug_mach_client(const char *msg, mach_msg_return_t kr); +DISPATCH_NOINLINE DISPATCH_NORETURN +void _dispatch_abort(size_t line, long val); +DISPATCH_NOINLINE __attribute__((__format__(printf,1,2))) +void _dispatch_log(const char *msg, ...); +DISPATCH_NOINLINE __attribute__((__format__(printf,1,0))) +void _dispatch_logv(const char *msg, va_list); /* - * For reporting bugs within libdispatch when using the "_debug" version of the library. + * For reporting bugs within libdispatch when using the "_debug" version of the + * library. */ -#define dispatch_assert(e) do { \ - if (__builtin_constant_p(e)) { \ - char __compile_time_assert__[(bool)(e) ? 1 : -1] __attribute__((unused)); \ - } else { \ - typeof(e) _e = fastpath(e); /* always eval 'e' */ \ - if (DISPATCH_DEBUG && !_e) { \ - _dispatch_abort(__LINE__, (long)_e); \ - } \ - } \ +#define dispatch_assert(e) do { \ + if (__builtin_constant_p(e)) { \ + char __compile_time_assert__[(bool)(e) ? 1 : -1] DISPATCH_UNUSED; \ + } else { \ + typeof(e) _e = fastpath(e); /* always eval 'e' */ \ + if (DISPATCH_DEBUG && !_e) { \ + _dispatch_abort(__LINE__, (long)_e); \ + } \ + } \ } while (0) -/* A lot of API return zero upon success and not-zero on fail. Let's capture and log the non-zero value */ -#define dispatch_assert_zero(e) do { \ - if (__builtin_constant_p(e)) { \ - char __compile_time_assert__[(bool)(!(e)) ? 1 : -1] __attribute__((unused)); \ - } else { \ - typeof(e) _e = slowpath(e); /* always eval 'e' */ \ - if (DISPATCH_DEBUG && _e) { \ - _dispatch_abort(__LINE__, (long)_e); \ - } \ - } \ +/* + * A lot of API return zero upon success and not-zero on fail. Let's capture + * and log the non-zero value + */ +#define dispatch_assert_zero(e) do { \ + if (__builtin_constant_p(e)) { \ + char __compile_time_assert__[(bool)(e) ? -1 : 1] DISPATCH_UNUSED; \ + } else { \ + typeof(e) _e = slowpath(e); /* always eval 'e' */ \ + if (DISPATCH_DEBUG && _e) { \ + _dispatch_abort(__LINE__, (long)_e); \ + } \ + } \ } while (0) /* - * For reporting bugs or impedance mismatches between libdispatch and external subsystems. - * These do NOT abort(), and are always compiled into the product. + * For reporting bugs or impedance mismatches between libdispatch and external + * subsystems. These do NOT abort(), and are always compiled into the product. * * In particular, we wrap all system-calls with assume() macros. */ -#define dispatch_assume(e) ({ \ - typeof(e) _e = fastpath(e); /* always eval 'e' */ \ - if (!_e) { \ - if (__builtin_constant_p(e)) { \ - char __compile_time_assert__[(e) ? 1 : -1]; \ - (void)__compile_time_assert__; \ - } \ - _dispatch_bug(__LINE__, (long)_e); \ - } \ - _e; \ +#define dispatch_assume(e) ({ \ + typeof(e) _e = fastpath(e); /* always eval 'e' */ \ + if (!_e) { \ + if (__builtin_constant_p(e)) { \ + char __compile_time_assert__[(bool)(e) ? 1 : -1]; \ + (void)__compile_time_assert__; \ + } \ + _dispatch_bug(__LINE__, (long)_e); \ + } \ + _e; \ }) -/* A lot of API return zero upon success and not-zero on fail. Let's capture and log the non-zero value */ -#define dispatch_assume_zero(e) ({ \ - typeof(e) _e = slowpath(e); /* always eval 'e' */ \ - if (_e) { \ - if (__builtin_constant_p(e)) { \ - char __compile_time_assert__[(e) ? -1 : 1]; \ - (void)__compile_time_assert__; \ - } \ - _dispatch_bug(__LINE__, (long)_e); \ - } \ - _e; \ +/* + * A lot of API return zero upon success and not-zero on fail. Let's capture + * and log the non-zero value + */ +#define dispatch_assume_zero(e) ({ \ + typeof(e) _e = slowpath(e); /* always eval 'e' */ \ + if (_e) { \ + if (__builtin_constant_p(e)) { \ + char __compile_time_assert__[(bool)(e) ? -1 : 1]; \ + (void)__compile_time_assert__; \ + } \ + _dispatch_bug(__LINE__, (long)_e); \ + } \ + _e; \ }) /* * For reporting bugs in clients when using the "_debug" version of the library. */ -#define dispatch_debug_assert(e, msg, args...) do { \ - if (__builtin_constant_p(e)) { \ - char __compile_time_assert__[(bool)(e) ? 1 : -1] __attribute__((unused)); \ - } else { \ - typeof(e) _e = fastpath(e); /* always eval 'e' */ \ - if (DISPATCH_DEBUG && !_e) { \ - _dispatch_log("%s() 0x%lx: " msg, __func__, (long)_e, ##args); \ - abort(); \ - } \ - } \ +#define dispatch_debug_assert(e, msg, args...) do { \ + if (__builtin_constant_p(e)) { \ + char __compile_time_assert__[(bool)(e) ? 1 : -1] DISPATCH_UNUSED; \ + } else { \ + typeof(e) _e = fastpath(e); /* always eval 'e' */ \ + if (DISPATCH_DEBUG && !_e) { \ + _dispatch_log("%s() 0x%lx: " msg, __func__, (long)_e, ##args); \ + abort(); \ + } \ + } \ } while (0) +/* Make sure the debug statments don't get too stale */ +#define _dispatch_debug(x, args...) \ +({ \ + if (DISPATCH_DEBUG) { \ + _dispatch_log("libdispatch: %u\t%p\t" x, __LINE__, \ + (void *)_dispatch_thread_self(), ##args); \ + } \ +}) +#if DISPATCH_DEBUG +#if HAVE_MACH +DISPATCH_NOINLINE DISPATCH_USED +void dispatch_debug_machport(mach_port_t name, const char* str); +#endif +DISPATCH_NOINLINE DISPATCH_USED +void dispatch_debug_kevents(struct kevent* kev, size_t count, const char* str); +#else +static inline void +dispatch_debug_kevents(struct kevent* kev DISPATCH_UNUSED, + size_t count DISPATCH_UNUSED, + const char* str DISPATCH_UNUSED) {} +#endif -#ifdef __BLOCKS__ -dispatch_block_t _dispatch_Block_copy(dispatch_block_t block); -void _dispatch_call_block_and_release(void *block); -void _dispatch_call_block_and_release2(void *block, void *ctxt); -#endif /* __BLOCKS__ */ +#if DISPATCH_USE_CLIENT_CALLOUT -void dummy_function(void); -long dummy_function_r0(void); +DISPATCH_NOTHROW void +_dispatch_client_callout(void *ctxt, dispatch_function_t f); +DISPATCH_NOTHROW void +_dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)); +#else -/* Make sure the debug statments don't get too stale */ -#define _dispatch_debug(x, args...) \ -({ \ - if (DISPATCH_DEBUG) { \ - _dispatch_log("libdispatch: %u\t%p\t" x, __LINE__, _dispatch_thread_self(), ##args); \ - } \ -}) +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_client_callout(void *ctxt, dispatch_function_t f) +{ + return f(ctxt); +} +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) +{ + return f(ctxt, i); +} -#if DISPATCH_DEBUG -void dispatch_debug_kevents(struct kevent* kev, size_t count, const char* str); -#else -#define dispatch_debug_kevents(x, y, z) #endif -uint64_t _dispatch_get_nanoseconds(void); +#ifdef __BLOCKS__ +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_client_callout_block(dispatch_block_t b) +{ + struct Block_basic *bb = (void*)b; + return _dispatch_client_callout(b, (dispatch_function_t)bb->Block_invoke); +} -void _dispatch_source_drain_kevent(struct kevent *); +dispatch_block_t _dispatch_Block_copy(dispatch_block_t block); +#define _dispatch_Block_copy(x) ((typeof(x))_dispatch_Block_copy(x)) +void _dispatch_call_block_and_release(void *block); +#endif /* __BLOCKS__ */ -dispatch_source_t -_dispatch_source_create2(dispatch_source_t ds, - dispatch_source_attr_t attr, - void *context, - dispatch_source_handler_function_t handler); +void dummy_function(void); +long dummy_function_r0(void); + +void _dispatch_source_drain_kevent(struct kevent *); -void _dispatch_update_kq(const struct kevent *); +long _dispatch_update_kq(const struct kevent *); void _dispatch_run_timers(void); // Returns howsoon with updated time value, or NULL if no timers active. struct timespec *_dispatch_get_next_timer_fire(struct timespec *howsoon); -dispatch_semaphore_t _dispatch_get_thread_semaphore(void); -void _dispatch_put_thread_semaphore(dispatch_semaphore_t); - bool _dispatch_source_testcancel(dispatch_source_t); uint64_t _dispatch_timeout(dispatch_time_t when); -__private_extern__ bool _dispatch_safe_fork; +extern bool _dispatch_safe_fork; -__private_extern__ struct _dispatch_hw_config_s { +extern struct _dispatch_hw_config_s { uint32_t cc_max_active; uint32_t cc_max_logical; uint32_t cc_max_physical; } _dispatch_hw_config; /* #includes dependent on internal.h */ +#include "shims.h" #include "object_internal.h" -#include "hw_shims.h" -#include "os_shims.h" #include "queue_internal.h" #include "semaphore_internal.h" #include "source_internal.h" +#include "data_internal.h" +#include "io_internal.h" +#include "trace.h" -// MIG_REPLY_MISMATCH means either: -// 1) A signal handler is NOT using async-safe API. See the sigaction(2) man page for more info. -// 2) A hand crafted call to mach_msg*() screwed up. Use MIG. -#define DISPATCH_VERIFY_MIG(x) do { \ - if ((x) == MIG_REPLY_MISMATCH) { \ - __crashreporter_info__ = "MIG_REPLY_MISMATCH"; \ - _dispatch_hardware_crash(); \ - } \ - } while (0) +// SnowLeopard and iOS Simulator fallbacks -#if defined(__x86_64__) || defined(__i386__) -// total hack to ensure that return register of a function is not trashed -#define DISPATCH_CRASH(x) do { \ - asm("mov %1, %0" : "=m" (__crashreporter_info__) : "c" ("BUG IN LIBDISPATCH: " x)); \ - _dispatch_hardware_crash(); \ - } while (0) +#if HAVE_PTHREAD_WORKQUEUES +#if !defined(WORKQ_BG_PRIOQUEUE) || \ + (TARGET_IPHONE_SIMULATOR && __MAC_OS_X_VERSION_MIN_REQUIRED < 1070) +#undef WORKQ_BG_PRIOQUEUE +#define WORKQ_BG_PRIOQUEUE WORKQ_LOW_PRIOQUEUE +#endif +#endif // HAVE_PTHREAD_WORKQUEUES -#define DISPATCH_CLIENT_CRASH(x) do { \ - asm("mov %1, %0" : "=m" (__crashreporter_info__) : "c" ("BUG IN CLIENT OF LIBDISPATCH: " x)); \ - _dispatch_hardware_crash(); \ - } while (0) +#if HAVE_MACH +#if !defined(MACH_NOTIFY_SEND_POSSIBLE) || \ + (TARGET_IPHONE_SIMULATOR && __MAC_OS_X_VERSION_MIN_REQUIRED < 1070) +#undef MACH_NOTIFY_SEND_POSSIBLE +#define MACH_NOTIFY_SEND_POSSIBLE MACH_NOTIFY_DEAD_NAME +#endif +#endif // HAVE_MACH -#else +#ifdef EVFILT_VM +#if TARGET_IPHONE_SIMULATOR && __MAC_OS_X_VERSION_MIN_REQUIRED < 1070 +#undef DISPATCH_USE_MALLOC_VM_PRESSURE_SOURCE +#define DISPATCH_USE_MALLOC_VM_PRESSURE_SOURCE 0 +#endif +#ifndef DISPATCH_USE_VM_PRESSURE +#define DISPATCH_USE_VM_PRESSURE 1 +#endif +#ifndef DISPATCH_USE_MALLOC_VM_PRESSURE_SOURCE +#define DISPATCH_USE_MALLOC_VM_PRESSURE_SOURCE 1 +#endif +#endif // EVFILT_VM -#define DISPATCH_CRASH(x) do { \ - __crashreporter_info__ = "BUG IN LIBDISPATCH: " x; \ - _dispatch_hardware_crash(); \ - } while (0) +#if defined(F_SETNOSIGPIPE) && defined(F_GETNOSIGPIPE) +#if TARGET_IPHONE_SIMULATOR && __MAC_OS_X_VERSION_MIN_REQUIRED < 1070 +#undef DISPATCH_USE_SETNOSIGPIPE +#define DISPATCH_USE_SETNOSIGPIPE 0 +#endif +#ifndef DISPATCH_USE_SETNOSIGPIPE +#define DISPATCH_USE_SETNOSIGPIPE 1 +#endif +#endif // F_SETNOSIGPIPE -#define DISPATCH_CLIENT_CRASH(x) do { \ - __crashreporter_info__ = "BUG IN CLIENT OF LIBDISPATCH: " x; \ - _dispatch_hardware_crash(); \ - } while (0) +#define _dispatch_set_crash_log_message(x) + +#if HAVE_MACH +// MIG_REPLY_MISMATCH means either: +// 1) A signal handler is NOT using async-safe API. See the sigaction(2) man +// page for more info. +// 2) A hand crafted call to mach_msg*() screwed up. Use MIG. +#define DISPATCH_VERIFY_MIG(x) do { \ + if ((x) == MIG_REPLY_MISMATCH) { \ + _dispatch_set_crash_log_message("MIG_REPLY_MISMATCH"); \ + _dispatch_hardware_crash(); \ + } \ + } while (0) #endif +#define DISPATCH_CRASH(x) do { \ + _dispatch_set_crash_log_message("BUG IN LIBDISPATCH: " x); \ + _dispatch_hardware_crash(); \ + } while (0) + +#define DISPATCH_CLIENT_CRASH(x) do { \ + _dispatch_set_crash_log_message("BUG IN CLIENT OF LIBDISPATCH: " x); \ + _dispatch_hardware_crash(); \ + } while (0) #endif /* __DISPATCH_INTERNAL__ */ diff --git a/src/io.c b/src/io.c new file mode 100644 index 000000000..b306054be --- /dev/null +++ b/src/io.c @@ -0,0 +1,2155 @@ +/* + * Copyright (c) 2009-2011 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include "internal.h" + +typedef void (^dispatch_fd_entry_init_callback_t)(dispatch_fd_entry_t fd_entry); + +DISPATCH_EXPORT DISPATCH_NOTHROW +void _dispatch_iocntl(uint32_t param, uint64_t value); + +static void _dispatch_io_dispose(dispatch_io_t channel); +static dispatch_operation_t _dispatch_operation_create( + dispatch_op_direction_t direction, dispatch_io_t channel, off_t offset, + size_t length, dispatch_data_t data, dispatch_queue_t queue, + dispatch_io_handler_t handler); +static void _dispatch_operation_dispose(dispatch_operation_t operation); +static void _dispatch_operation_enqueue(dispatch_operation_t op, + dispatch_op_direction_t direction, dispatch_data_t data); +static dispatch_source_t _dispatch_operation_timer(dispatch_queue_t tq, + dispatch_operation_t op); +static inline void _dispatch_fd_entry_retain(dispatch_fd_entry_t fd_entry); +static inline void _dispatch_fd_entry_release(dispatch_fd_entry_t fd_entry); +static void _dispatch_fd_entry_init_async(dispatch_fd_t fd, + dispatch_fd_entry_init_callback_t completion_callback); +static dispatch_fd_entry_t _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, + uintptr_t hash); +static dispatch_fd_entry_t _dispatch_fd_entry_create_with_path( + dispatch_io_path_data_t path_data, dev_t dev, mode_t mode); +static int _dispatch_fd_entry_open(dispatch_fd_entry_t fd_entry, + dispatch_io_t channel); +static void _dispatch_fd_entry_cleanup_operations(dispatch_fd_entry_t fd_entry, + dispatch_io_t channel); +static void _dispatch_stream_init(dispatch_fd_entry_t fd_entry, + dispatch_queue_t tq); +static void _dispatch_stream_dispose(dispatch_fd_entry_t fd_entry, + dispatch_op_direction_t direction); +static void _dispatch_disk_init(dispatch_fd_entry_t fd_entry, dev_t dev); +static void _dispatch_disk_dispose(dispatch_disk_t disk); +static void _dispatch_stream_enqueue_operation(dispatch_stream_t stream, + dispatch_operation_t operation, dispatch_data_t data); +static void _dispatch_disk_enqueue_operation(dispatch_disk_t dsk, + dispatch_operation_t operation, dispatch_data_t data); +static void _dispatch_stream_cleanup_operations(dispatch_stream_t stream, + dispatch_io_t channel); +static void _dispatch_disk_cleanup_operations(dispatch_disk_t disk, + dispatch_io_t channel); +static void _dispatch_stream_source_handler(void *ctx); +static void _dispatch_stream_handler(void *ctx); +static void _dispatch_disk_handler(void *ctx); +static void _dispatch_disk_perform(void *ctxt); +static void _dispatch_operation_advise(dispatch_operation_t op, + size_t chunk_size); +static int _dispatch_operation_perform(dispatch_operation_t op); +static void _dispatch_operation_deliver_data(dispatch_operation_t op, + dispatch_op_flags_t flags); + +// Macros to wrap syscalls which return -1 on error, and retry on EINTR +#define _dispatch_io_syscall_switch_noerr(_err, _syscall, ...) do { \ + switch (((_err) = (((_syscall) == -1) ? errno : 0))) { \ + case EINTR: continue; \ + __VA_ARGS__ \ + } \ + } while (0) +#define _dispatch_io_syscall_switch(__err, __syscall, ...) do { \ + _dispatch_io_syscall_switch_noerr(__err, __syscall, \ + case 0: break; \ + __VA_ARGS__ \ + ); \ + } while (0) +#define _dispatch_io_syscall(__syscall) do { int __err; \ + _dispatch_io_syscall_switch(__err, __syscall); \ + } while (0) + +enum { + DISPATCH_OP_COMPLETE = 1, + DISPATCH_OP_DELIVER, + DISPATCH_OP_DELIVER_AND_COMPLETE, + DISPATCH_OP_COMPLETE_RESUME, + DISPATCH_OP_RESUME, + DISPATCH_OP_ERR, + DISPATCH_OP_FD_ERR, +}; + +#pragma mark - +#pragma mark dispatch_io_vtable + +static const struct dispatch_io_vtable_s _dispatch_io_vtable = { + .do_type = DISPATCH_IO_TYPE, + .do_kind = "channel", + .do_dispose = _dispatch_io_dispose, + .do_invoke = NULL, + .do_probe = (void *)dummy_function_r0, + .do_debug = (void *)dummy_function_r0, +}; + +static const struct dispatch_operation_vtable_s _dispatch_operation_vtable = { + .do_type = DISPATCH_OPERATION_TYPE, + .do_kind = "operation", + .do_dispose = _dispatch_operation_dispose, + .do_invoke = NULL, + .do_probe = (void *)dummy_function_r0, + .do_debug = (void *)dummy_function_r0, +}; + +static const struct dispatch_disk_vtable_s _dispatch_disk_vtable = { + .do_type = DISPATCH_DISK_TYPE, + .do_kind = "disk", + .do_dispose = _dispatch_disk_dispose, + .do_invoke = NULL, + .do_probe = (void *)dummy_function_r0, + .do_debug = (void *)dummy_function_r0, +}; + +#pragma mark - +#pragma mark dispatch_io_hashtables + +#if TARGET_OS_EMBEDDED +#define DIO_HASH_SIZE 64u // must be a power of two +#else +#define DIO_HASH_SIZE 256u // must be a power of two +#endif +#define DIO_HASH(x) ((uintptr_t)((x) & (DIO_HASH_SIZE - 1))) + +// Global hashtable of dev_t -> disk_s mappings +DISPATCH_CACHELINE_ALIGN +static TAILQ_HEAD(, dispatch_disk_s) _dispatch_io_devs[DIO_HASH_SIZE]; +// Global hashtable of fd -> fd_entry_s mappings +DISPATCH_CACHELINE_ALIGN +static TAILQ_HEAD(, dispatch_fd_entry_s) _dispatch_io_fds[DIO_HASH_SIZE]; + +static dispatch_once_t _dispatch_io_devs_lockq_pred; +static dispatch_queue_t _dispatch_io_devs_lockq; +static dispatch_queue_t _dispatch_io_fds_lockq; + +static void +_dispatch_io_fds_lockq_init(void *context DISPATCH_UNUSED) +{ + _dispatch_io_fds_lockq = dispatch_queue_create( + "com.apple.libdispatch-io.fd_lockq", NULL); + unsigned int i; + for (i = 0; i < DIO_HASH_SIZE; i++) { + TAILQ_INIT(&_dispatch_io_fds[i]); + } +} + +static void +_dispatch_io_devs_lockq_init(void *context DISPATCH_UNUSED) +{ + _dispatch_io_devs_lockq = dispatch_queue_create( + "com.apple.libdispatch-io.dev_lockq", NULL); + unsigned int i; + for (i = 0; i < DIO_HASH_SIZE; i++) { + TAILQ_INIT(&_dispatch_io_devs[i]); + } +} + +#pragma mark - +#pragma mark dispatch_io_defaults + +enum { + DISPATCH_IOCNTL_CHUNK_PAGES = 1, + DISPATCH_IOCNTL_LOW_WATER_CHUNKS, + DISPATCH_IOCNTL_INITIAL_DELIVERY, + DISPATCH_IOCNTL_MAX_PENDING_IO_REQS, +}; + +static struct dispatch_io_defaults_s { + size_t chunk_pages, low_water_chunks, max_pending_io_reqs; + bool initial_delivery; +} dispatch_io_defaults = { + .chunk_pages = DIO_MAX_CHUNK_PAGES, + .low_water_chunks = DIO_DEFAULT_LOW_WATER_CHUNKS, + .max_pending_io_reqs = DIO_MAX_PENDING_IO_REQS, +}; + +#define _dispatch_iocntl_set_default(p, v) do { \ + dispatch_io_defaults.p = (typeof(dispatch_io_defaults.p))(v); \ + } while (0) + +void +_dispatch_iocntl(uint32_t param, uint64_t value) +{ + switch (param) { + case DISPATCH_IOCNTL_CHUNK_PAGES: + _dispatch_iocntl_set_default(chunk_pages, value); + break; + case DISPATCH_IOCNTL_LOW_WATER_CHUNKS: + _dispatch_iocntl_set_default(low_water_chunks, value); + break; + case DISPATCH_IOCNTL_INITIAL_DELIVERY: + _dispatch_iocntl_set_default(initial_delivery, value); + case DISPATCH_IOCNTL_MAX_PENDING_IO_REQS: + _dispatch_iocntl_set_default(max_pending_io_reqs, value); + break; + } +} + +#pragma mark - +#pragma mark dispatch_io_t + +static dispatch_io_t +_dispatch_io_create(dispatch_io_type_t type) +{ + dispatch_io_t channel = calloc(1ul, sizeof(struct dispatch_io_s)); + channel->do_vtable = &_dispatch_io_vtable; + channel->do_next = DISPATCH_OBJECT_LISTLESS; + channel->do_ref_cnt = 1; + channel->do_xref_cnt = 1; + channel->do_targetq = _dispatch_get_root_queue(0, true); + channel->params.type = type; + channel->params.high = SIZE_MAX; + channel->params.low = dispatch_io_defaults.low_water_chunks * + dispatch_io_defaults.chunk_pages * PAGE_SIZE; + channel->queue = dispatch_queue_create("com.apple.libdispatch-io.channelq", + NULL); + return channel; +} + +static void +_dispatch_io_init(dispatch_io_t channel, dispatch_fd_entry_t fd_entry, + dispatch_queue_t queue, int err, void (^cleanup_handler)(int)) +{ + // Enqueue the cleanup handler on the suspended close queue + if (cleanup_handler) { + _dispatch_retain(queue); + dispatch_async(!err ? fd_entry->close_queue : channel->queue, ^{ + dispatch_async(queue, ^{ + _dispatch_io_debug("cleanup handler invoke", -1); + cleanup_handler(err); + }); + _dispatch_release(queue); + }); + } + if (fd_entry) { + channel->fd_entry = fd_entry; + dispatch_retain(fd_entry->barrier_queue); + dispatch_retain(fd_entry->barrier_group); + channel->barrier_queue = fd_entry->barrier_queue; + channel->barrier_group = fd_entry->barrier_group; + } else { + // Still need to create a barrier queue, since all operations go + // through it + channel->barrier_queue = dispatch_queue_create( + "com.apple.libdispatch-io.barrierq", NULL); + channel->barrier_group = dispatch_group_create(); + } +} + +static void +_dispatch_io_dispose(dispatch_io_t channel) +{ + if (channel->fd_entry) { + if (channel->fd_entry->path_data) { + // This modification is safe since path_data->channel is checked + // only on close_queue (which is still suspended at this point) + channel->fd_entry->path_data->channel = NULL; + } + // Cleanup handlers will only run when all channels related to this + // fd are complete + _dispatch_fd_entry_release(channel->fd_entry); + } + if (channel->queue) { + dispatch_release(channel->queue); + } + if (channel->barrier_queue) { + dispatch_release(channel->barrier_queue); + } + if (channel->barrier_group) { + dispatch_release(channel->barrier_group); + } + _dispatch_dispose(channel); +} + +static int +_dispatch_io_validate_type(dispatch_io_t channel, mode_t mode) +{ + int err = 0; + if (S_ISDIR(mode)) { + err = EISDIR; + } else if (channel->params.type == DISPATCH_IO_RANDOM && + (S_ISFIFO(mode) || S_ISSOCK(mode))) { + err = ESPIPE; + } + return err; +} + +static int +_dispatch_io_get_error(dispatch_operation_t op, dispatch_io_t channel, + bool ignore_closed) +{ + // On _any_ queue + int err; + if (op) { + channel = op->channel; + } + if (channel->atomic_flags & (DIO_CLOSED|DIO_STOPPED)) { + if (!ignore_closed || channel->atomic_flags & DIO_STOPPED) { + err = ECANCELED; + } else { + err = 0; + } + } else { + err = op ? op->fd_entry->err : channel->err; + } + return err; +} + +#pragma mark - +#pragma mark dispatch_io_channels + +dispatch_io_t +dispatch_io_create(dispatch_io_type_t type, dispatch_fd_t fd, + dispatch_queue_t queue, void (^cleanup_handler)(int)) +{ + if (type != DISPATCH_IO_STREAM && type != DISPATCH_IO_RANDOM) { + return NULL; + } + _dispatch_io_debug("io create", fd); + dispatch_io_t channel = _dispatch_io_create(type); + channel->fd = fd; + channel->fd_actual = fd; + dispatch_suspend(channel->queue); + _dispatch_retain(queue); + _dispatch_retain(channel); + _dispatch_fd_entry_init_async(fd, ^(dispatch_fd_entry_t fd_entry) { + // On barrier queue + int err = fd_entry->err; + if (!err) { + err = _dispatch_io_validate_type(channel, fd_entry->stat.mode); + } + if (!err && type == DISPATCH_IO_RANDOM) { + off_t f_ptr; + _dispatch_io_syscall_switch_noerr(err, + f_ptr = lseek(fd_entry->fd, 0, SEEK_CUR), + case 0: channel->f_ptr = f_ptr; break; + default: (void)dispatch_assume_zero(err); break; + ); + } + channel->err = err; + _dispatch_fd_entry_retain(fd_entry); + _dispatch_io_init(channel, fd_entry, queue, err, cleanup_handler); + dispatch_resume(channel->queue); + _dispatch_release(channel); + _dispatch_release(queue); + }); + return channel; +} + +dispatch_io_t +dispatch_io_create_with_path(dispatch_io_type_t type, const char *path, + int oflag, mode_t mode, dispatch_queue_t queue, + void (^cleanup_handler)(int error)) +{ + if ((type != DISPATCH_IO_STREAM && type != DISPATCH_IO_RANDOM) || + !(path && *path == '/')) { + return NULL; + } + size_t pathlen = strlen(path); + dispatch_io_path_data_t path_data = malloc(sizeof(*path_data) + pathlen+1); + if (!path_data) { + return NULL; + } + _dispatch_io_debug("io create with path %s", -1, path); + dispatch_io_t channel = _dispatch_io_create(type); + channel->fd = -1; + channel->fd_actual = -1; + path_data->channel = channel; + path_data->oflag = oflag; + path_data->mode = mode; + path_data->pathlen = pathlen; + memcpy(path_data->path, path, pathlen + 1); + _dispatch_retain(queue); + _dispatch_retain(channel); + dispatch_async(channel->queue, ^{ + int err = 0; + struct stat st; + _dispatch_io_syscall_switch_noerr(err, + (path_data->oflag & O_NOFOLLOW) == O_NOFOLLOW || + (path_data->oflag & O_SYMLINK) == O_SYMLINK ? + lstat(path_data->path, &st) : stat(path_data->path, &st), + case 0: + err = _dispatch_io_validate_type(channel, st.st_mode); + break; + default: + if ((path_data->oflag & O_CREAT) && + (*(path_data->path + path_data->pathlen - 1) != '/')) { + // Check parent directory + char *c = strrchr(path_data->path, '/'); + dispatch_assert(c); + *c = 0; + int perr; + _dispatch_io_syscall_switch_noerr(perr, + stat(path_data->path, &st), + case 0: + // Since the parent directory exists, open() will + // create a regular file after the fd_entry has + // been filled in + st.st_mode = S_IFREG; + err = 0; + break; + ); + *c = '/'; + } + break; + ); + channel->err = err; + if (err) { + free(path_data); + _dispatch_io_init(channel, NULL, queue, err, cleanup_handler); + _dispatch_release(channel); + _dispatch_release(queue); + return; + } + dispatch_suspend(channel->queue); + dispatch_once_f(&_dispatch_io_devs_lockq_pred, NULL, + _dispatch_io_devs_lockq_init); + dispatch_async(_dispatch_io_devs_lockq, ^{ + dispatch_fd_entry_t fd_entry = _dispatch_fd_entry_create_with_path( + path_data, st.st_dev, st.st_mode); + _dispatch_io_init(channel, fd_entry, queue, 0, cleanup_handler); + dispatch_resume(channel->queue); + _dispatch_release(channel); + _dispatch_release(queue); + }); + }); + return channel; +} + +dispatch_io_t +dispatch_io_create_with_io(dispatch_io_type_t type, dispatch_io_t in_channel, + dispatch_queue_t queue, void (^cleanup_handler)(int error)) +{ + if (type != DISPATCH_IO_STREAM && type != DISPATCH_IO_RANDOM) { + return NULL; + } + _dispatch_io_debug("io create with io %p", -1, in_channel); + dispatch_io_t channel = _dispatch_io_create(type); + dispatch_suspend(channel->queue); + _dispatch_retain(queue); + _dispatch_retain(channel); + _dispatch_retain(in_channel); + dispatch_async(in_channel->queue, ^{ + int err0 = _dispatch_io_get_error(NULL, in_channel, false); + if (err0) { + channel->err = err0; + _dispatch_io_init(channel, NULL, queue, err0, cleanup_handler); + dispatch_resume(channel->queue); + _dispatch_release(channel); + _dispatch_release(in_channel); + _dispatch_release(queue); + return; + } + dispatch_async(in_channel->barrier_queue, ^{ + int err = _dispatch_io_get_error(NULL, in_channel, false); + // If there is no error, the fd_entry for the in_channel is valid. + // Since we are running on in_channel's queue, the fd_entry has been + // fully resolved and will stay valid for the duration of this block + if (!err) { + err = in_channel->err; + if (!err) { + err = in_channel->fd_entry->err; + } + } + if (!err) { + err = _dispatch_io_validate_type(channel, + in_channel->fd_entry->stat.mode); + } + if (!err && type == DISPATCH_IO_RANDOM && in_channel->fd != -1) { + off_t f_ptr; + _dispatch_io_syscall_switch_noerr(err, + f_ptr = lseek(in_channel->fd_entry->fd, 0, SEEK_CUR), + case 0: channel->f_ptr = f_ptr; break; + default: (void)dispatch_assume_zero(err); break; + ); + } + channel->err = err; + if (err) { + _dispatch_io_init(channel, NULL, queue, err, cleanup_handler); + dispatch_resume(channel->queue); + _dispatch_release(channel); + _dispatch_release(in_channel); + _dispatch_release(queue); + return; + } + if (in_channel->fd == -1) { + // in_channel was created from path + channel->fd = -1; + channel->fd_actual = -1; + mode_t mode = in_channel->fd_entry->stat.mode; + dev_t dev = in_channel->fd_entry->stat.dev; + size_t path_data_len = sizeof(struct dispatch_io_path_data_s) + + in_channel->fd_entry->path_data->pathlen + 1; + dispatch_io_path_data_t path_data = malloc(path_data_len); + memcpy(path_data, in_channel->fd_entry->path_data, + path_data_len); + path_data->channel = channel; + // lockq_io_devs is known to already exist + dispatch_async(_dispatch_io_devs_lockq, ^{ + dispatch_fd_entry_t fd_entry; + fd_entry = _dispatch_fd_entry_create_with_path(path_data, + dev, mode); + _dispatch_io_init(channel, fd_entry, queue, 0, + cleanup_handler); + dispatch_resume(channel->queue); + _dispatch_release(channel); + _dispatch_release(queue); + }); + } else { + dispatch_fd_entry_t fd_entry = in_channel->fd_entry; + channel->fd = in_channel->fd; + channel->fd_actual = in_channel->fd_actual; + _dispatch_fd_entry_retain(fd_entry); + _dispatch_io_init(channel, fd_entry, queue, 0, cleanup_handler); + dispatch_resume(channel->queue); + _dispatch_release(channel); + _dispatch_release(queue); + } + _dispatch_release(in_channel); + }); + }); + return channel; +} + +#pragma mark - +#pragma mark dispatch_io_accessors + +void +dispatch_io_set_high_water(dispatch_io_t channel, size_t high_water) +{ + _dispatch_retain(channel); + dispatch_async(channel->queue, ^{ + _dispatch_io_debug("io set high water", channel->fd); + if (channel->params.low > high_water) { + channel->params.low = high_water; + } + channel->params.high = high_water ? high_water : 1; + _dispatch_release(channel); + }); +} + +void +dispatch_io_set_low_water(dispatch_io_t channel, size_t low_water) +{ + _dispatch_retain(channel); + dispatch_async(channel->queue, ^{ + _dispatch_io_debug("io set low water", channel->fd); + if (channel->params.high < low_water) { + channel->params.high = low_water ? low_water : 1; + } + channel->params.low = low_water; + _dispatch_release(channel); + }); +} + +void +dispatch_io_set_interval(dispatch_io_t channel, uint64_t interval, + unsigned long flags) +{ + _dispatch_retain(channel); + dispatch_async(channel->queue, ^{ + _dispatch_io_debug("io set interval", channel->fd); + channel->params.interval = interval; + channel->params.interval_flags = flags; + _dispatch_release(channel); + }); +} + +void +_dispatch_io_set_target_queue(dispatch_io_t channel, dispatch_queue_t dq) +{ + _dispatch_retain(dq); + _dispatch_retain(channel); + dispatch_async(channel->queue, ^{ + dispatch_queue_t prev_dq = channel->do_targetq; + channel->do_targetq = dq; + _dispatch_release(prev_dq); + _dispatch_release(channel); + }); +} + +dispatch_fd_t +dispatch_io_get_descriptor(dispatch_io_t channel) +{ + if (channel->atomic_flags & (DIO_CLOSED|DIO_STOPPED)) { + return -1; + } + dispatch_fd_t fd = channel->fd_actual; + if (fd == -1 && + _dispatch_thread_getspecific(dispatch_io_key) == channel) { + dispatch_fd_entry_t fd_entry = channel->fd_entry; + (void)_dispatch_fd_entry_open(fd_entry, channel); + } + return channel->fd_actual; +} + +#pragma mark - +#pragma mark dispatch_io_operations + +static void +_dispatch_io_stop(dispatch_io_t channel) +{ + _dispatch_io_debug("io stop", channel->fd); + (void)dispatch_atomic_or2o(channel, atomic_flags, DIO_STOPPED); + _dispatch_retain(channel); + dispatch_async(channel->queue, ^{ + dispatch_async(channel->barrier_queue, ^{ + dispatch_fd_entry_t fd_entry = channel->fd_entry; + if (fd_entry) { + _dispatch_io_debug("io stop cleanup", channel->fd); + _dispatch_fd_entry_cleanup_operations(fd_entry, channel); + channel->fd_entry = NULL; + _dispatch_fd_entry_release(fd_entry); + } else if (channel->fd != -1) { + // Stop after close, need to check if fd_entry still exists + _dispatch_retain(channel); + dispatch_async(_dispatch_io_fds_lockq, ^{ + _dispatch_io_debug("io stop after close cleanup", + channel->fd); + dispatch_fd_entry_t fdi; + uintptr_t hash = DIO_HASH(channel->fd); + TAILQ_FOREACH(fdi, &_dispatch_io_fds[hash], fd_list) { + if (fdi->fd == channel->fd) { + _dispatch_fd_entry_cleanup_operations(fdi, channel); + break; + } + } + _dispatch_release(channel); + }); + } + _dispatch_release(channel); + }); + }); +} + +void +dispatch_io_close(dispatch_io_t channel, unsigned long flags) +{ + if (flags & DISPATCH_IO_STOP) { + // Don't stop an already stopped channel + if (channel->atomic_flags & DIO_STOPPED) { + return; + } + return _dispatch_io_stop(channel); + } + // Don't close an already closed or stopped channel + if (channel->atomic_flags & (DIO_CLOSED|DIO_STOPPED)) { + return; + } + _dispatch_retain(channel); + dispatch_async(channel->queue, ^{ + dispatch_async(channel->barrier_queue, ^{ + _dispatch_io_debug("io close", channel->fd); + (void)dispatch_atomic_or2o(channel, atomic_flags, DIO_CLOSED); + dispatch_fd_entry_t fd_entry = channel->fd_entry; + if (fd_entry) { + if (!fd_entry->path_data) { + channel->fd_entry = NULL; + } + _dispatch_fd_entry_release(fd_entry); + } + _dispatch_release(channel); + }); + }); +} + +void +dispatch_io_barrier(dispatch_io_t channel, dispatch_block_t barrier) +{ + _dispatch_retain(channel); + dispatch_async(channel->queue, ^{ + dispatch_queue_t io_q = channel->do_targetq; + dispatch_queue_t barrier_queue = channel->barrier_queue; + dispatch_group_t barrier_group = channel->barrier_group; + dispatch_async(barrier_queue, ^{ + dispatch_suspend(barrier_queue); + dispatch_group_notify(barrier_group, io_q, ^{ + _dispatch_thread_setspecific(dispatch_io_key, channel); + barrier(); + _dispatch_thread_setspecific(dispatch_io_key, NULL); + dispatch_resume(barrier_queue); + _dispatch_release(channel); + }); + }); + }); +} + +void +dispatch_io_read(dispatch_io_t channel, off_t offset, size_t length, + dispatch_queue_t queue, dispatch_io_handler_t handler) +{ + _dispatch_retain(channel); + _dispatch_retain(queue); + dispatch_async(channel->queue, ^{ + dispatch_operation_t op; + op = _dispatch_operation_create(DOP_DIR_READ, channel, offset, + length, dispatch_data_empty, queue, handler); + if (op) { + dispatch_queue_t barrier_q = channel->barrier_queue; + dispatch_async(barrier_q, ^{ + _dispatch_operation_enqueue(op, DOP_DIR_READ, + dispatch_data_empty); + }); + } + _dispatch_release(channel); + _dispatch_release(queue); + }); +} + +void +dispatch_io_write(dispatch_io_t channel, off_t offset, dispatch_data_t data, + dispatch_queue_t queue, dispatch_io_handler_t handler) +{ + _dispatch_io_data_retain(data); + _dispatch_retain(channel); + _dispatch_retain(queue); + dispatch_async(channel->queue, ^{ + dispatch_operation_t op; + op = _dispatch_operation_create(DOP_DIR_WRITE, channel, offset, + dispatch_data_get_size(data), data, queue, handler); + if (op) { + dispatch_queue_t barrier_q = channel->barrier_queue; + dispatch_async(barrier_q, ^{ + _dispatch_operation_enqueue(op, DOP_DIR_WRITE, data); + _dispatch_io_data_release(data); + }); + } else { + _dispatch_io_data_release(data); + } + _dispatch_release(channel); + _dispatch_release(queue); + }); +} + +void +dispatch_read(dispatch_fd_t fd, size_t length, dispatch_queue_t queue, + void (^handler)(dispatch_data_t, int)) +{ + _dispatch_retain(queue); + _dispatch_fd_entry_init_async(fd, ^(dispatch_fd_entry_t fd_entry) { + // On barrier queue + if (fd_entry->err) { + int err = fd_entry->err; + dispatch_async(queue, ^{ + _dispatch_io_debug("convenience handler invoke", fd); + handler(dispatch_data_empty, err); + }); + _dispatch_release(queue); + return; + } + // Safe to access fd_entry on barrier queue + dispatch_io_t channel = fd_entry->convenience_channel; + if (!channel) { + channel = _dispatch_io_create(DISPATCH_IO_STREAM); + channel->fd = fd; + channel->fd_actual = fd; + channel->fd_entry = fd_entry; + dispatch_retain(fd_entry->barrier_queue); + dispatch_retain(fd_entry->barrier_group); + channel->barrier_queue = fd_entry->barrier_queue; + channel->barrier_group = fd_entry->barrier_group; + fd_entry->convenience_channel = channel; + } + __block dispatch_data_t deliver_data = dispatch_data_empty; + __block int err = 0; + dispatch_async(fd_entry->close_queue, ^{ + dispatch_async(queue, ^{ + _dispatch_io_debug("convenience handler invoke", fd); + handler(deliver_data, err); + _dispatch_io_data_release(deliver_data); + }); + _dispatch_release(queue); + }); + dispatch_operation_t op = + _dispatch_operation_create(DOP_DIR_READ, channel, 0, + length, dispatch_data_empty, + _dispatch_get_root_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, + false), ^(bool done, dispatch_data_t data, int error) { + if (data) { + data = dispatch_data_create_concat(deliver_data, data); + _dispatch_io_data_release(deliver_data); + deliver_data = data; + } + if (done) { + err = error; + } + }); + if (op) { + _dispatch_operation_enqueue(op, DOP_DIR_READ, dispatch_data_empty); + } + }); +} + +void +dispatch_write(dispatch_fd_t fd, dispatch_data_t data, dispatch_queue_t queue, + void (^handler)(dispatch_data_t, int)) +{ + _dispatch_io_data_retain(data); + _dispatch_retain(queue); + _dispatch_fd_entry_init_async(fd, ^(dispatch_fd_entry_t fd_entry) { + // On barrier queue + if (fd_entry->err) { + int err = fd_entry->err; + dispatch_async(queue, ^{ + _dispatch_io_debug("convenience handler invoke", fd); + handler(NULL, err); + }); + _dispatch_release(queue); + return; + } + // Safe to access fd_entry on barrier queue + dispatch_io_t channel = fd_entry->convenience_channel; + if (!channel) { + channel = _dispatch_io_create(DISPATCH_IO_STREAM); + channel->fd = fd; + channel->fd_actual = fd; + channel->fd_entry = fd_entry; + dispatch_retain(fd_entry->barrier_queue); + dispatch_retain(fd_entry->barrier_group); + channel->barrier_queue = fd_entry->barrier_queue; + channel->barrier_group = fd_entry->barrier_group; + fd_entry->convenience_channel = channel; + } + __block dispatch_data_t deliver_data = NULL; + __block int err = 0; + dispatch_async(fd_entry->close_queue, ^{ + dispatch_async(queue, ^{ + _dispatch_io_debug("convenience handler invoke", fd); + handler(deliver_data, err); + if (deliver_data) { + _dispatch_io_data_release(deliver_data); + } + }); + _dispatch_release(queue); + }); + dispatch_operation_t op = + _dispatch_operation_create(DOP_DIR_WRITE, channel, 0, + dispatch_data_get_size(data), data, + _dispatch_get_root_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, + false), ^(bool done, dispatch_data_t d, int error) { + if (done) { + if (d) { + _dispatch_io_data_retain(d); + deliver_data = d; + } + err = error; + } + }); + if (op) { + _dispatch_operation_enqueue(op, DOP_DIR_WRITE, data); + } + _dispatch_io_data_release(data); + }); +} + +#pragma mark - +#pragma mark dispatch_operation_t + +static dispatch_operation_t +_dispatch_operation_create(dispatch_op_direction_t direction, + dispatch_io_t channel, off_t offset, size_t length, + dispatch_data_t data, dispatch_queue_t queue, + dispatch_io_handler_t handler) +{ + // On channel queue + dispatch_assert(direction < DOP_DIR_MAX); + _dispatch_io_debug("operation create", channel->fd); +#if DISPATCH_IO_DEBUG + int fd = channel->fd; +#endif + // Safe to call _dispatch_io_get_error() with channel->fd_entry since + // that can only be NULL if atomic_flags are set rdar://problem/8362514 + int err = _dispatch_io_get_error(NULL, channel, false); + if (err || !length) { + _dispatch_io_data_retain(data); + _dispatch_retain(queue); + dispatch_async(channel->barrier_queue, ^{ + dispatch_async(queue, ^{ + dispatch_data_t d = data; + if (direction == DOP_DIR_READ && err) { + d = NULL; + } else if (direction == DOP_DIR_WRITE && !err) { + d = NULL; + } + _dispatch_io_debug("IO handler invoke", fd); + handler(true, d, err); + _dispatch_io_data_release(data); + }); + _dispatch_release(queue); + }); + return NULL; + } + dispatch_operation_t op; + op = calloc(1ul, sizeof(struct dispatch_operation_s)); + op->do_vtable = &_dispatch_operation_vtable; + op->do_next = DISPATCH_OBJECT_LISTLESS; + op->do_ref_cnt = 1; + op->do_xref_cnt = 0; // operation object is not exposed externally + op->op_q = dispatch_queue_create("com.apple.libdispatch-io.opq", NULL); + op->op_q->do_targetq = queue; + _dispatch_retain(queue); + op->active = false; + op->direction = direction; + op->offset = offset + channel->f_ptr; + op->length = length; + op->handler = Block_copy(handler); + _dispatch_retain(channel); + op->channel = channel; + op->params = channel->params; + // Take a snapshot of the priority of the channel queue. The actual I/O + // for this operation will be performed at this priority + dispatch_queue_t targetq = op->channel->do_targetq; + while (fastpath(targetq->do_targetq)) { + targetq = targetq->do_targetq; + } + op->do_targetq = targetq; + return op; +} + +static void +_dispatch_operation_dispose(dispatch_operation_t op) +{ + // Deliver the data if there's any + if (op->fd_entry) { + _dispatch_operation_deliver_data(op, DOP_DONE); + dispatch_group_leave(op->fd_entry->barrier_group); + _dispatch_fd_entry_release(op->fd_entry); + } + if (op->channel) { + _dispatch_release(op->channel); + } + if (op->timer) { + dispatch_release(op->timer); + } + // For write operations, op->buf is owned by op->buf_data + if (op->buf && op->direction == DOP_DIR_READ) { + free(op->buf); + } + if (op->buf_data) { + _dispatch_io_data_release(op->buf_data); + } + if (op->data) { + _dispatch_io_data_release(op->data); + } + if (op->op_q) { + dispatch_release(op->op_q); + } + Block_release(op->handler); + _dispatch_dispose(op); +} + +static void +_dispatch_operation_enqueue(dispatch_operation_t op, + dispatch_op_direction_t direction, dispatch_data_t data) +{ + // Called from the barrier queue + _dispatch_io_data_retain(data); + // If channel is closed or stopped, then call the handler immediately + int err = _dispatch_io_get_error(NULL, op->channel, false); + if (err) { + dispatch_io_handler_t handler = op->handler; + dispatch_async(op->op_q, ^{ + dispatch_data_t d = data; + if (direction == DOP_DIR_READ && err) { + d = NULL; + } else if (direction == DOP_DIR_WRITE && !err) { + d = NULL; + } + handler(true, d, err); + _dispatch_io_data_release(data); + }); + _dispatch_release(op); + return; + } + // Finish operation init + op->fd_entry = op->channel->fd_entry; + _dispatch_fd_entry_retain(op->fd_entry); + dispatch_group_enter(op->fd_entry->barrier_group); + dispatch_disk_t disk = op->fd_entry->disk; + if (!disk) { + dispatch_stream_t stream = op->fd_entry->streams[direction]; + dispatch_async(stream->dq, ^{ + _dispatch_stream_enqueue_operation(stream, op, data); + _dispatch_io_data_release(data); + }); + } else { + dispatch_async(disk->pick_queue, ^{ + _dispatch_disk_enqueue_operation(disk, op, data); + _dispatch_io_data_release(data); + }); + } +} + +static bool +_dispatch_operation_should_enqueue(dispatch_operation_t op, + dispatch_queue_t tq, dispatch_data_t data) +{ + // On stream queue or disk queue + _dispatch_io_debug("enqueue operation", op->fd_entry->fd); + _dispatch_io_data_retain(data); + op->data = data; + int err = _dispatch_io_get_error(op, NULL, true); + if (err) { + op->err = err; + // Final release + _dispatch_release(op); + return false; + } + if (op->params.interval) { + dispatch_resume(_dispatch_operation_timer(tq, op)); + } + return true; +} + +static dispatch_source_t +_dispatch_operation_timer(dispatch_queue_t tq, dispatch_operation_t op) +{ + // On stream queue or pick queue + if (op->timer) { + return op->timer; + } + dispatch_source_t timer = dispatch_source_create( + DISPATCH_SOURCE_TYPE_TIMER, 0, 0, tq); + dispatch_source_set_timer(timer, dispatch_time(DISPATCH_TIME_NOW, + op->params.interval), op->params.interval, 0); + dispatch_source_set_event_handler(timer, ^{ + // On stream queue or pick queue + if (dispatch_source_testcancel(timer)) { + // Do nothing. The operation has already completed + return; + } + dispatch_op_flags_t flags = DOP_DEFAULT; + if (op->params.interval_flags & DISPATCH_IO_STRICT_INTERVAL) { + // Deliver even if there is less data than the low-water mark + flags |= DOP_DELIVER; + } + // If the operation is active, dont deliver data + if ((op->active) && (flags & DOP_DELIVER)) { + op->flags = flags; + } else { + _dispatch_operation_deliver_data(op, flags); + } + }); + op->timer = timer; + return op->timer; +} + +#pragma mark - +#pragma mark dispatch_fd_entry_t + +static inline void +_dispatch_fd_entry_retain(dispatch_fd_entry_t fd_entry) { + dispatch_suspend(fd_entry->close_queue); +} + +static inline void +_dispatch_fd_entry_release(dispatch_fd_entry_t fd_entry) { + dispatch_resume(fd_entry->close_queue); +} + +static void +_dispatch_fd_entry_init_async(dispatch_fd_t fd, + dispatch_fd_entry_init_callback_t completion_callback) +{ + static dispatch_once_t _dispatch_io_fds_lockq_pred; + dispatch_once_f(&_dispatch_io_fds_lockq_pred, NULL, + _dispatch_io_fds_lockq_init); + dispatch_async(_dispatch_io_fds_lockq, ^{ + _dispatch_io_debug("fd entry init", fd); + dispatch_fd_entry_t fd_entry = NULL; + // Check to see if there is an existing entry for the given fd + uintptr_t hash = DIO_HASH(fd); + TAILQ_FOREACH(fd_entry, &_dispatch_io_fds[hash], fd_list) { + if (fd_entry->fd == fd) { + // Retain the fd_entry to ensure it cannot go away until the + // stat() has completed + _dispatch_fd_entry_retain(fd_entry); + break; + } + } + if (!fd_entry) { + // If we did not find an existing entry, create one + fd_entry = _dispatch_fd_entry_create_with_fd(fd, hash); + } + dispatch_async(fd_entry->barrier_queue, ^{ + _dispatch_io_debug("fd entry init completion", fd); + completion_callback(fd_entry); + // stat() is complete, release reference to fd_entry + _dispatch_fd_entry_release(fd_entry); + }); + }); +} + +static dispatch_fd_entry_t +_dispatch_fd_entry_create(dispatch_queue_t q) +{ + dispatch_fd_entry_t fd_entry; + fd_entry = calloc(1ul, sizeof(struct dispatch_fd_entry_s)); + fd_entry->close_queue = dispatch_queue_create( + "com.apple.libdispatch-io.closeq", NULL); + // Use target queue to ensure that no concurrent lookups are going on when + // the close queue is running + fd_entry->close_queue->do_targetq = q; + _dispatch_retain(q); + // Suspend the cleanup queue until closing + _dispatch_fd_entry_retain(fd_entry); + return fd_entry; +} + +static dispatch_fd_entry_t +_dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash) +{ + // On fds lock queue + _dispatch_io_debug("fd entry create", fd); + dispatch_fd_entry_t fd_entry = _dispatch_fd_entry_create( + _dispatch_io_fds_lockq); + fd_entry->fd = fd; + TAILQ_INSERT_TAIL(&_dispatch_io_fds[hash], fd_entry, fd_list); + fd_entry->barrier_queue = dispatch_queue_create( + "com.apple.libdispatch-io.barrierq", NULL); + fd_entry->barrier_group = dispatch_group_create(); + dispatch_async(fd_entry->barrier_queue, ^{ + _dispatch_io_debug("fd entry stat", fd); + int err, orig_flags, orig_nosigpipe = -1; + struct stat st; + _dispatch_io_syscall_switch(err, + fstat(fd, &st), + default: fd_entry->err = err; return; + ); + fd_entry->stat.dev = st.st_dev; + fd_entry->stat.mode = st.st_mode; + _dispatch_io_syscall_switch(err, + orig_flags = fcntl(fd, F_GETFL), + default: (void)dispatch_assume_zero(err); break; + ); +#if DISPATCH_USE_SETNOSIGPIPE // rdar://problem/4121123 + if (S_ISFIFO(st.st_mode)) { + _dispatch_io_syscall_switch(err, + orig_nosigpipe = fcntl(fd, F_GETNOSIGPIPE), + default: (void)dispatch_assume_zero(err); break; + ); + if (orig_nosigpipe != -1) { + _dispatch_io_syscall_switch(err, + orig_nosigpipe = fcntl(fd, F_SETNOSIGPIPE, 1), + default: + orig_nosigpipe = -1; + (void)dispatch_assume_zero(err); + break; + ); + } + } +#endif + if (S_ISREG(st.st_mode)) { + if (orig_flags != -1) { + _dispatch_io_syscall_switch(err, + fcntl(fd, F_SETFL, orig_flags & ~O_NONBLOCK), + default: + orig_flags = -1; + (void)dispatch_assume_zero(err); + break; + ); + } + int32_t dev = major(st.st_dev); + // We have to get the disk on the global dev queue. The + // barrier queue cannot continue until that is complete + dispatch_suspend(fd_entry->barrier_queue); + dispatch_once_f(&_dispatch_io_devs_lockq_pred, NULL, + _dispatch_io_devs_lockq_init); + dispatch_async(_dispatch_io_devs_lockq, ^{ + _dispatch_disk_init(fd_entry, dev); + dispatch_resume(fd_entry->barrier_queue); + }); + } else { + if (orig_flags != -1) { + _dispatch_io_syscall_switch(err, + fcntl(fd, F_SETFL, orig_flags | O_NONBLOCK), + default: + orig_flags = -1; + (void)dispatch_assume_zero(err); + break; + ); + } + _dispatch_stream_init(fd_entry, _dispatch_get_root_queue( + DISPATCH_QUEUE_PRIORITY_DEFAULT, false)); + } + fd_entry->orig_flags = orig_flags; + fd_entry->orig_nosigpipe = orig_nosigpipe; + }); + // This is the first item run when the close queue is resumed, indicating + // that all channels associated with this entry have been closed and that + // all operations associated with this entry have been freed + dispatch_async(fd_entry->close_queue, ^{ + if (!fd_entry->disk) { + _dispatch_io_debug("close queue fd_entry cleanup", fd); + dispatch_op_direction_t dir; + for (dir = 0; dir < DOP_DIR_MAX; dir++) { + _dispatch_stream_dispose(fd_entry, dir); + } + } else { + dispatch_disk_t disk = fd_entry->disk; + dispatch_async(_dispatch_io_devs_lockq, ^{ + _dispatch_release(disk); + }); + } + // Remove this entry from the global fd list + TAILQ_REMOVE(&_dispatch_io_fds[hash], fd_entry, fd_list); + }); + // If there was a source associated with this stream, disposing of the + // source cancels it and suspends the close queue. Freeing the fd_entry + // structure must happen after the source cancel handler has finished + dispatch_async(fd_entry->close_queue, ^{ + _dispatch_io_debug("close queue release", fd); + dispatch_release(fd_entry->close_queue); + _dispatch_io_debug("barrier queue release", fd); + dispatch_release(fd_entry->barrier_queue); + _dispatch_io_debug("barrier group release", fd); + dispatch_release(fd_entry->barrier_group); + if (fd_entry->orig_flags != -1) { + _dispatch_io_syscall( + fcntl(fd, F_SETFL, fd_entry->orig_flags) + ); + } +#if DISPATCH_USE_SETNOSIGPIPE // rdar://problem/4121123 + if (fd_entry->orig_nosigpipe != -1) { + _dispatch_io_syscall( + fcntl(fd, F_SETNOSIGPIPE, fd_entry->orig_nosigpipe) + ); + } +#endif + if (fd_entry->convenience_channel) { + fd_entry->convenience_channel->fd_entry = NULL; + dispatch_release(fd_entry->convenience_channel); + } + free(fd_entry); + }); + return fd_entry; +} + +static dispatch_fd_entry_t +_dispatch_fd_entry_create_with_path(dispatch_io_path_data_t path_data, + dev_t dev, mode_t mode) +{ + // On devs lock queue + _dispatch_io_debug("fd entry create with path %s", -1, path_data->path); + dispatch_fd_entry_t fd_entry = _dispatch_fd_entry_create( + path_data->channel->queue); + if (S_ISREG(mode)) { + _dispatch_disk_init(fd_entry, major(dev)); + } else { + _dispatch_stream_init(fd_entry, _dispatch_get_root_queue( + DISPATCH_QUEUE_PRIORITY_DEFAULT, false)); + } + fd_entry->fd = -1; + fd_entry->orig_flags = -1; + fd_entry->path_data = path_data; + fd_entry->stat.dev = dev; + fd_entry->stat.mode = mode; + fd_entry->barrier_queue = dispatch_queue_create( + "com.apple.libdispatch-io.barrierq", NULL); + fd_entry->barrier_group = dispatch_group_create(); + // This is the first item run when the close queue is resumed, indicating + // that the channel associated with this entry has been closed and that + // all operations associated with this entry have been freed + dispatch_async(fd_entry->close_queue, ^{ + _dispatch_io_debug("close queue fd_entry cleanup", -1); + if (!fd_entry->disk) { + dispatch_op_direction_t dir; + for (dir = 0; dir < DOP_DIR_MAX; dir++) { + _dispatch_stream_dispose(fd_entry, dir); + } + } + if (fd_entry->fd != -1) { + close(fd_entry->fd); + } + if (fd_entry->path_data->channel) { + // If associated channel has not been released yet, mark it as + // no longer having an fd_entry (for stop after close). + // It is safe to modify channel since we are on close_queue with + // target queue the channel queue + fd_entry->path_data->channel->fd_entry = NULL; + } + }); + dispatch_async(fd_entry->close_queue, ^{ + _dispatch_io_debug("close queue release", -1); + dispatch_release(fd_entry->close_queue); + dispatch_release(fd_entry->barrier_queue); + dispatch_release(fd_entry->barrier_group); + free(fd_entry->path_data); + free(fd_entry); + }); + return fd_entry; +} + +static int +_dispatch_fd_entry_open(dispatch_fd_entry_t fd_entry, dispatch_io_t channel) +{ + if (!(fd_entry->fd == -1 && fd_entry->path_data)) { + return 0; + } + if (fd_entry->err) { + return fd_entry->err; + } + int fd = -1; + int oflag = fd_entry->disk ? fd_entry->path_data->oflag & ~O_NONBLOCK : + fd_entry->path_data->oflag | O_NONBLOCK; +open: + fd = open(fd_entry->path_data->path, oflag, fd_entry->path_data->mode); + if (fd == -1) { + int err = errno; + if (err == EINTR) { + goto open; + } + (void)dispatch_atomic_cmpxchg2o(fd_entry, err, 0, err); + return err; + } + if (!dispatch_atomic_cmpxchg2o(fd_entry, fd, -1, fd)) { + // Lost the race with another open + close(fd); + } else { + channel->fd_actual = fd; + } + return 0; +} + +static void +_dispatch_fd_entry_cleanup_operations(dispatch_fd_entry_t fd_entry, + dispatch_io_t channel) +{ + if (fd_entry->disk) { + if (channel) { + _dispatch_retain(channel); + } + _dispatch_fd_entry_retain(fd_entry); + dispatch_async(fd_entry->disk->pick_queue, ^{ + _dispatch_disk_cleanup_operations(fd_entry->disk, channel); + _dispatch_fd_entry_release(fd_entry); + if (channel) { + _dispatch_release(channel); + } + }); + } else { + dispatch_op_direction_t direction; + for (direction = 0; direction < DOP_DIR_MAX; direction++) { + dispatch_stream_t stream = fd_entry->streams[direction]; + if (!stream) { + continue; + } + if (channel) { + _dispatch_retain(channel); + } + _dispatch_fd_entry_retain(fd_entry); + dispatch_async(stream->dq, ^{ + _dispatch_stream_cleanup_operations(stream, channel); + _dispatch_fd_entry_release(fd_entry); + if (channel) { + _dispatch_release(channel); + } + }); + } + } +} + +#pragma mark - +#pragma mark dispatch_stream_t/dispatch_disk_t + +static void +_dispatch_stream_init(dispatch_fd_entry_t fd_entry, dispatch_queue_t tq) +{ + dispatch_op_direction_t direction; + for (direction = 0; direction < DOP_DIR_MAX; direction++) { + dispatch_stream_t stream; + stream = calloc(1ul, sizeof(struct dispatch_stream_s)); + stream->dq = dispatch_queue_create("com.apple.libdispatch-io.streamq", + NULL); + _dispatch_retain(tq); + stream->dq->do_targetq = tq; + TAILQ_INIT(&stream->operations[DISPATCH_IO_RANDOM]); + TAILQ_INIT(&stream->operations[DISPATCH_IO_STREAM]); + fd_entry->streams[direction] = stream; + } +} + +static void +_dispatch_stream_dispose(dispatch_fd_entry_t fd_entry, + dispatch_op_direction_t direction) +{ + // On close queue + dispatch_stream_t stream = fd_entry->streams[direction]; + if (!stream) { + return; + } + dispatch_assert(TAILQ_EMPTY(&stream->operations[DISPATCH_IO_STREAM])); + dispatch_assert(TAILQ_EMPTY(&stream->operations[DISPATCH_IO_RANDOM])); + if (stream->source) { + // Balanced by source cancel handler: + _dispatch_fd_entry_retain(fd_entry); + dispatch_source_cancel(stream->source); + dispatch_resume(stream->source); + dispatch_release(stream->source); + } + dispatch_release(stream->dq); + free(stream); +} + +static void +_dispatch_disk_init(dispatch_fd_entry_t fd_entry, dev_t dev) +{ + // On devs lock queue + dispatch_disk_t disk; + char label_name[256]; + // Check to see if there is an existing entry for the given device + uintptr_t hash = DIO_HASH(dev); + TAILQ_FOREACH(disk, &_dispatch_io_devs[hash], disk_list) { + if (disk->dev == dev) { + _dispatch_retain(disk); + goto out; + } + } + // Otherwise create a new entry + size_t pending_reqs_depth = dispatch_io_defaults.max_pending_io_reqs; + disk = calloc(1ul, sizeof(struct dispatch_disk_s) + (pending_reqs_depth * + sizeof(dispatch_operation_t))); + disk->do_vtable = &_dispatch_disk_vtable; + disk->do_next = DISPATCH_OBJECT_LISTLESS; + disk->do_ref_cnt = 1; + disk->do_xref_cnt = 0; + disk->advise_list_depth = pending_reqs_depth; + disk->do_targetq = _dispatch_get_root_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, + false); + disk->dev = dev; + TAILQ_INIT(&disk->operations); + disk->cur_rq = TAILQ_FIRST(&disk->operations); + sprintf(label_name, "com.apple.libdispatch-io.deviceq.%d", dev); + disk->pick_queue = dispatch_queue_create(label_name, NULL); + TAILQ_INSERT_TAIL(&_dispatch_io_devs[hash], disk, disk_list); +out: + fd_entry->disk = disk; + TAILQ_INIT(&fd_entry->stream_ops); +} + +static void +_dispatch_disk_dispose(dispatch_disk_t disk) +{ + uintptr_t hash = DIO_HASH(disk->dev); + TAILQ_REMOVE(&_dispatch_io_devs[hash], disk, disk_list); + dispatch_assert(TAILQ_EMPTY(&disk->operations)); + size_t i; + for (i=0; iadvise_list_depth; ++i) { + dispatch_assert(!disk->advise_list[i]); + } + dispatch_release(disk->pick_queue); + free(disk); +} + +#pragma mark - +#pragma mark dispatch_stream_operations/dispatch_disk_operations + +static inline bool +_dispatch_stream_operation_avail(dispatch_stream_t stream) +{ + return !(TAILQ_EMPTY(&stream->operations[DISPATCH_IO_RANDOM])) || + !(TAILQ_EMPTY(&stream->operations[DISPATCH_IO_STREAM])); +} + +static void +_dispatch_stream_enqueue_operation(dispatch_stream_t stream, + dispatch_operation_t op, dispatch_data_t data) +{ + if (!_dispatch_operation_should_enqueue(op, stream->dq, data)) { + return; + } + bool no_ops = !_dispatch_stream_operation_avail(stream); + TAILQ_INSERT_TAIL(&stream->operations[op->params.type], op, operation_list); + if (no_ops) { + dispatch_async_f(stream->dq, stream, _dispatch_stream_handler); + } +} + +static void +_dispatch_disk_enqueue_operation(dispatch_disk_t disk, dispatch_operation_t op, + dispatch_data_t data) +{ + if (!_dispatch_operation_should_enqueue(op, disk->pick_queue, data)) { + return; + } + if (op->params.type == DISPATCH_IO_STREAM) { + if (TAILQ_EMPTY(&op->fd_entry->stream_ops)) { + TAILQ_INSERT_TAIL(&disk->operations, op, operation_list); + } + TAILQ_INSERT_TAIL(&op->fd_entry->stream_ops, op, stream_list); + } else { + TAILQ_INSERT_TAIL(&disk->operations, op, operation_list); + } + _dispatch_disk_handler(disk); +} + +static void +_dispatch_stream_complete_operation(dispatch_stream_t stream, + dispatch_operation_t op) +{ + // On stream queue + _dispatch_io_debug("complete operation", op->fd_entry->fd); + TAILQ_REMOVE(&stream->operations[op->params.type], op, operation_list); + if (op == stream->op) { + stream->op = NULL; + } + if (op->timer) { + dispatch_source_cancel(op->timer); + } + // Final release will deliver any pending data + _dispatch_release(op); +} + +static void +_dispatch_disk_complete_operation(dispatch_disk_t disk, dispatch_operation_t op) +{ + // On pick queue + _dispatch_io_debug("complete operation", op->fd_entry->fd); + // Current request is always the last op returned + if (disk->cur_rq == op) { + disk->cur_rq = TAILQ_PREV(op, dispatch_disk_operations_s, + operation_list); + } + if (op->params.type == DISPATCH_IO_STREAM) { + // Check if there are other pending stream operations behind it + dispatch_operation_t op_next = TAILQ_NEXT(op, stream_list); + TAILQ_REMOVE(&op->fd_entry->stream_ops, op, stream_list); + if (op_next) { + TAILQ_INSERT_TAIL(&disk->operations, op_next, operation_list); + } + } + TAILQ_REMOVE(&disk->operations, op, operation_list); + if (op->timer) { + dispatch_source_cancel(op->timer); + } + // Final release will deliver any pending data + _dispatch_release(op); +} + +static dispatch_operation_t +_dispatch_stream_pick_next_operation(dispatch_stream_t stream, + dispatch_operation_t op) +{ + // On stream queue + if (!op) { + // On the first run through, pick the first operation + if (!_dispatch_stream_operation_avail(stream)) { + return op; + } + if (!TAILQ_EMPTY(&stream->operations[DISPATCH_IO_STREAM])) { + op = TAILQ_FIRST(&stream->operations[DISPATCH_IO_STREAM]); + } else if (!TAILQ_EMPTY(&stream->operations[DISPATCH_IO_RANDOM])) { + op = TAILQ_FIRST(&stream->operations[DISPATCH_IO_RANDOM]); + } + return op; + } + if (op->params.type == DISPATCH_IO_STREAM) { + // Stream operations need to be serialized so continue the current + // operation until it is finished + return op; + } + // Get the next random operation (round-robin) + if (op->params.type == DISPATCH_IO_RANDOM) { + op = TAILQ_NEXT(op, operation_list); + if (!op) { + op = TAILQ_FIRST(&stream->operations[DISPATCH_IO_RANDOM]); + } + return op; + } + return NULL; +} + +static dispatch_operation_t +_dispatch_disk_pick_next_operation(dispatch_disk_t disk) +{ + // On pick queue + dispatch_operation_t op; + if (!TAILQ_EMPTY(&disk->operations)) { + if (disk->cur_rq == NULL) { + op = TAILQ_FIRST(&disk->operations); + } else { + op = disk->cur_rq; + do { + op = TAILQ_NEXT(op, operation_list); + if (!op) { + op = TAILQ_FIRST(&disk->operations); + } + // TODO: more involved picking algorithm rdar://problem/8780312 + } while (op->active && op != disk->cur_rq); + } + if (!op->active) { + disk->cur_rq = op; + return op; + } + } + return NULL; +} + +static void +_dispatch_stream_cleanup_operations(dispatch_stream_t stream, + dispatch_io_t channel) +{ + // On stream queue + dispatch_operation_t op, tmp; + typeof(*stream->operations) *operations; + operations = &stream->operations[DISPATCH_IO_RANDOM]; + TAILQ_FOREACH_SAFE(op, operations, operation_list, tmp) { + if (!channel || op->channel == channel) { + _dispatch_stream_complete_operation(stream, op); + } + } + operations = &stream->operations[DISPATCH_IO_STREAM]; + TAILQ_FOREACH_SAFE(op, operations, operation_list, tmp) { + if (!channel || op->channel == channel) { + _dispatch_stream_complete_operation(stream, op); + } + } + if (stream->source_running && !_dispatch_stream_operation_avail(stream)) { + dispatch_suspend(stream->source); + stream->source_running = false; + } +} + +static void +_dispatch_disk_cleanup_operations(dispatch_disk_t disk, dispatch_io_t channel) +{ + // On pick queue + dispatch_operation_t op, tmp; + TAILQ_FOREACH_SAFE(op, &disk->operations, operation_list, tmp) { + if (!channel || op->channel == channel) { + _dispatch_disk_complete_operation(disk, op); + } + } +} + +#pragma mark - +#pragma mark dispatch_stream_handler/dispatch_disk_handler + +static dispatch_source_t +_dispatch_stream_source(dispatch_stream_t stream, dispatch_operation_t op) +{ + // On stream queue + if (stream->source) { + return stream->source; + } + dispatch_fd_t fd = op->fd_entry->fd; + _dispatch_io_debug("stream source create", fd); + dispatch_source_t source = NULL; + if (op->direction == DOP_DIR_READ) { + source = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, fd, 0, + stream->dq); + } else if (op->direction == DOP_DIR_WRITE) { + source = dispatch_source_create(DISPATCH_SOURCE_TYPE_WRITE, fd, 0, + stream->dq); + } else { + dispatch_assert(op->direction < DOP_DIR_MAX); + return NULL; + } + dispatch_set_context(source, stream); + dispatch_source_set_event_handler_f(source, + _dispatch_stream_source_handler); + // Close queue must not run user cleanup handlers until sources are fully + // unregistered + dispatch_queue_t close_queue = op->fd_entry->close_queue; + dispatch_source_set_cancel_handler(source, ^{ + _dispatch_io_debug("stream source cancel", fd); + dispatch_resume(close_queue); + }); + stream->source = source; + return stream->source; +} + +static void +_dispatch_stream_source_handler(void *ctx) +{ + // On stream queue + dispatch_stream_t stream = (dispatch_stream_t)ctx; + dispatch_suspend(stream->source); + stream->source_running = false; + return _dispatch_stream_handler(stream); +} + +static void +_dispatch_stream_handler(void *ctx) +{ + // On stream queue + dispatch_stream_t stream = (dispatch_stream_t)ctx; + dispatch_operation_t op; +pick: + op = _dispatch_stream_pick_next_operation(stream, stream->op); + if (!op) { + _dispatch_debug("no operation found: stream %p", stream); + return; + } + int err = _dispatch_io_get_error(op, NULL, true); + if (err) { + op->err = err; + _dispatch_stream_complete_operation(stream, op); + goto pick; + } + stream->op = op; + _dispatch_io_debug("stream handler", op->fd_entry->fd); + dispatch_fd_entry_t fd_entry = op->fd_entry; + _dispatch_fd_entry_retain(fd_entry); + // For performance analysis + if (!op->total && dispatch_io_defaults.initial_delivery) { + // Empty delivery to signal the start of the operation + _dispatch_io_debug("initial delivery", op->fd_entry->fd); + _dispatch_operation_deliver_data(op, DOP_DELIVER); + } + // TODO: perform on the operation target queue to get correct priority + int result = _dispatch_operation_perform(op), flags = -1; + switch (result) { + case DISPATCH_OP_DELIVER: + flags = DOP_DEFAULT; + // Fall through + case DISPATCH_OP_DELIVER_AND_COMPLETE: + flags = (flags != DOP_DEFAULT) ? DOP_DELIVER | DOP_NO_EMPTY : + DOP_DEFAULT; + _dispatch_operation_deliver_data(op, flags); + // Fall through + case DISPATCH_OP_COMPLETE: + if (flags != DOP_DEFAULT) { + _dispatch_stream_complete_operation(stream, op); + } + if (_dispatch_stream_operation_avail(stream)) { + dispatch_async_f(stream->dq, stream, _dispatch_stream_handler); + } + break; + case DISPATCH_OP_COMPLETE_RESUME: + _dispatch_stream_complete_operation(stream, op); + // Fall through + case DISPATCH_OP_RESUME: + if (_dispatch_stream_operation_avail(stream)) { + stream->source_running = true; + dispatch_resume(_dispatch_stream_source(stream, op)); + } + break; + case DISPATCH_OP_ERR: + _dispatch_stream_cleanup_operations(stream, op->channel); + break; + case DISPATCH_OP_FD_ERR: + _dispatch_fd_entry_retain(fd_entry); + dispatch_async(fd_entry->barrier_queue, ^{ + _dispatch_fd_entry_cleanup_operations(fd_entry, NULL); + _dispatch_fd_entry_release(fd_entry); + }); + break; + default: + break; + } + _dispatch_fd_entry_release(fd_entry); + return; +} + +static void +_dispatch_disk_handler(void *ctx) +{ + // On pick queue + dispatch_disk_t disk = (dispatch_disk_t)ctx; + if (disk->io_active) { + return; + } + _dispatch_io_debug("disk handler", -1); + dispatch_operation_t op; + size_t i = disk->free_idx, j = disk->req_idx; + if (j <= i) { + j += disk->advise_list_depth; + } + while (i <= j) { + if ((!disk->advise_list[i%disk->advise_list_depth]) && + (op = _dispatch_disk_pick_next_operation(disk))) { + int err = _dispatch_io_get_error(op, NULL, true); + if (err) { + op->err = err; + _dispatch_disk_complete_operation(disk, op); + continue; + } + _dispatch_retain(op); + disk->advise_list[i%disk->advise_list_depth] = op; + op->active = true; + } else { + // No more operations to get + break; + } + i++; + } + disk->free_idx = (i%disk->advise_list_depth); + op = disk->advise_list[disk->req_idx]; + if (op) { + disk->io_active = true; + dispatch_async_f(op->do_targetq, disk, _dispatch_disk_perform); + } +} + +static void +_dispatch_disk_perform(void *ctxt) +{ + dispatch_disk_t disk = ctxt; + size_t chunk_size = dispatch_io_defaults.chunk_pages * PAGE_SIZE; + _dispatch_io_debug("disk perform", -1); + dispatch_operation_t op; + size_t i = disk->advise_idx, j = disk->free_idx; + if (j <= i) { + j += disk->advise_list_depth; + } + do { + op = disk->advise_list[i%disk->advise_list_depth]; + if (!op) { + // Nothing more to advise, must be at free_idx + dispatch_assert(i%disk->advise_list_depth == disk->free_idx); + break; + } + if (op->direction == DOP_DIR_WRITE) { + // TODO: preallocate writes ? rdar://problem/9032172 + continue; + } + if (op->fd_entry->fd == -1 && _dispatch_fd_entry_open(op->fd_entry, + op->channel)) { + continue; + } + // For performance analysis + if (!op->total && dispatch_io_defaults.initial_delivery) { + // Empty delivery to signal the start of the operation + _dispatch_io_debug("initial delivery", op->fd_entry->fd); + _dispatch_operation_deliver_data(op, DOP_DELIVER); + } + // Advise two chunks if the list only has one element and this is the + // first advise on the operation + if ((j-i) == 1 && !disk->advise_list[disk->free_idx] && + !op->advise_offset) { + chunk_size *= 2; + } + _dispatch_operation_advise(op, chunk_size); + } while (++i < j); + disk->advise_idx = i%disk->advise_list_depth; + op = disk->advise_list[disk->req_idx]; + int result = _dispatch_operation_perform(op); + disk->advise_list[disk->req_idx] = NULL; + disk->req_idx = (++disk->req_idx)%disk->advise_list_depth; + dispatch_async(disk->pick_queue, ^{ + switch (result) { + case DISPATCH_OP_DELIVER: + _dispatch_operation_deliver_data(op, DOP_DELIVER); + break; + case DISPATCH_OP_COMPLETE: + _dispatch_disk_complete_operation(disk, op); + break; + case DISPATCH_OP_DELIVER_AND_COMPLETE: + _dispatch_operation_deliver_data(op, DOP_DELIVER); + _dispatch_disk_complete_operation(disk, op); + break; + case DISPATCH_OP_ERR: + _dispatch_disk_cleanup_operations(disk, op->channel); + break; + case DISPATCH_OP_FD_ERR: + _dispatch_disk_cleanup_operations(disk, NULL); + break; + default: + dispatch_assert(result); + break; + } + op->active = false; + disk->io_active = false; + _dispatch_disk_handler(disk); + // Balancing the retain in _dispatch_disk_handler. Note that op must be + // released at the very end, since it might hold the last reference to + // the disk + _dispatch_release(op); + }); +} + +#pragma mark - +#pragma mark dispatch_operation_perform + +static void +_dispatch_operation_advise(dispatch_operation_t op, size_t chunk_size) +{ + int err; + struct radvisory advise; + // No point in issuing a read advise for the next chunk if we are already + // a chunk ahead from reading the bytes + if (op->advise_offset > (off_t)((op->offset+op->total) + chunk_size + + PAGE_SIZE)) { + return; + } + advise.ra_count = (int)chunk_size; + if (!op->advise_offset) { + op->advise_offset = op->offset; + // If this is the first time through, align the advised range to a + // page boundary + size_t pg_fraction = (size_t)((op->offset + chunk_size) % PAGE_SIZE); + advise.ra_count += (int)(pg_fraction ? PAGE_SIZE - pg_fraction : 0); + } + advise.ra_offset = op->advise_offset; + op->advise_offset += advise.ra_count; + _dispatch_io_syscall_switch(err, + fcntl(op->fd_entry->fd, F_RDADVISE, &advise), + // TODO: set disk status on error + default: (void)dispatch_assume_zero(err); break; + ); +} + +static int +_dispatch_operation_perform(dispatch_operation_t op) +{ + int err = _dispatch_io_get_error(op, NULL, true); + if (err) { + goto error; + } + if (!op->buf) { + size_t max_buf_siz = op->params.high; + size_t chunk_siz = dispatch_io_defaults.chunk_pages * PAGE_SIZE; + if (op->direction == DOP_DIR_READ) { + // If necessary, create a buffer for the ongoing operation, large + // enough to fit chunk_pages but at most high-water + size_t data_siz = dispatch_data_get_size(op->data); + if (data_siz) { + dispatch_assert(data_siz < max_buf_siz); + max_buf_siz -= data_siz; + } + if (max_buf_siz > chunk_siz) { + max_buf_siz = chunk_siz; + } + if (op->length < SIZE_MAX) { + op->buf_siz = op->length - op->total; + if (op->buf_siz > max_buf_siz) { + op->buf_siz = max_buf_siz; + } + } else { + op->buf_siz = max_buf_siz; + } + op->buf = valloc(op->buf_siz); + _dispatch_io_debug("buffer allocated", op->fd_entry->fd); + } else if (op->direction == DOP_DIR_WRITE) { + // Always write the first data piece, if that is smaller than a + // chunk, accumulate further data pieces until chunk size is reached + if (chunk_siz > max_buf_siz) { + chunk_siz = max_buf_siz; + } + op->buf_siz = 0; + dispatch_data_apply(op->data, + ^(dispatch_data_t region DISPATCH_UNUSED, + size_t offset DISPATCH_UNUSED, + const void* buf DISPATCH_UNUSED, size_t len) { + size_t siz = op->buf_siz + len; + if (!op->buf_siz || siz <= chunk_siz) { + op->buf_siz = siz; + } + return (bool)(siz < chunk_siz); + }); + if (op->buf_siz > max_buf_siz) { + op->buf_siz = max_buf_siz; + } + dispatch_data_t d; + d = dispatch_data_create_subrange(op->data, 0, op->buf_siz); + op->buf_data = dispatch_data_create_map(d, (const void**)&op->buf, + NULL); + _dispatch_io_data_release(d); + _dispatch_io_debug("buffer mapped", op->fd_entry->fd); + } + } + if (op->fd_entry->fd == -1) { + err = _dispatch_fd_entry_open(op->fd_entry, op->channel); + if (err) { + goto error; + } + } + void *buf = op->buf + op->buf_len; + size_t len = op->buf_siz - op->buf_len; + off_t off = op->offset + op->total; + ssize_t processed = -1; +syscall: + if (op->direction == DOP_DIR_READ) { + if (op->params.type == DISPATCH_IO_STREAM) { + processed = read(op->fd_entry->fd, buf, len); + } else if (op->params.type == DISPATCH_IO_RANDOM) { + processed = pread(op->fd_entry->fd, buf, len, off); + } + } else if (op->direction == DOP_DIR_WRITE) { + if (op->params.type == DISPATCH_IO_STREAM) { + processed = write(op->fd_entry->fd, buf, len); + } else if (op->params.type == DISPATCH_IO_RANDOM) { + processed = pwrite(op->fd_entry->fd, buf, len, off); + } + } + // Encountered an error on the file descriptor + if (processed == -1) { + err = errno; + if (err == EINTR) { + goto syscall; + } + goto error; + } + // EOF is indicated by two handler invocations + if (processed == 0) { + _dispatch_io_debug("EOF", op->fd_entry->fd); + return DISPATCH_OP_DELIVER_AND_COMPLETE; + } + op->buf_len += processed; + op->total += processed; + if (op->total == op->length) { + // Finished processing all the bytes requested by the operation + return DISPATCH_OP_COMPLETE; + } else { + // Deliver data only if we satisfy the filters + return DISPATCH_OP_DELIVER; + } +error: + if (err == EAGAIN) { + // For disk based files with blocking I/O we should never get EAGAIN + dispatch_assert(!op->fd_entry->disk); + _dispatch_io_debug("EAGAIN %d", op->fd_entry->fd, err); + if (op->direction == DOP_DIR_READ && op->total && + op->channel == op->fd_entry->convenience_channel) { + // Convenience read with available data completes on EAGAIN + return DISPATCH_OP_COMPLETE_RESUME; + } + return DISPATCH_OP_RESUME; + } + op->err = err; + switch (err) { + case ECANCELED: + return DISPATCH_OP_ERR; + case EBADF: + (void)dispatch_atomic_cmpxchg2o(op->fd_entry, err, 0, err); + return DISPATCH_OP_FD_ERR; + default: + return DISPATCH_OP_COMPLETE; + } +} + +static void +_dispatch_operation_deliver_data(dispatch_operation_t op, + dispatch_op_flags_t flags) +{ + // Either called from stream resp. pick queue or when op is finalized + dispatch_data_t data = NULL; + int err = 0; + size_t undelivered = op->undelivered + op->buf_len; + bool deliver = (flags & (DOP_DELIVER|DOP_DONE)) || + (op->flags & DOP_DELIVER); + op->flags = DOP_DEFAULT; + if (!deliver) { + // Don't deliver data until low water mark has been reached + if (undelivered >= op->params.low) { + deliver = true; + } else if (op->buf_len < op->buf_siz) { + // Request buffer is not yet used up + _dispatch_io_debug("buffer data", op->fd_entry->fd); + return; + } + } else { + err = op->err; + if (!err && (op->channel->atomic_flags & DIO_STOPPED)) { + err = ECANCELED; + op->err = err; + } + } + // Deliver data or buffer used up + if (op->direction == DOP_DIR_READ) { + if (op->buf_len) { + void *buf = op->buf; + data = dispatch_data_create(buf, op->buf_len, NULL, + DISPATCH_DATA_DESTRUCTOR_FREE); + op->buf = NULL; + op->buf_len = 0; + dispatch_data_t d = dispatch_data_create_concat(op->data, data); + _dispatch_io_data_release(op->data); + _dispatch_io_data_release(data); + data = d; + } else { + data = op->data; + } + op->data = deliver ? dispatch_data_empty : data; + } else if (op->direction == DOP_DIR_WRITE) { + if (deliver) { + data = dispatch_data_create_subrange(op->data, op->buf_len, + op->length); + } + if (op->buf_len == op->buf_siz) { + _dispatch_io_data_release(op->buf_data); + op->buf_data = NULL; + op->buf = NULL; + op->buf_len = 0; + // Trim newly written buffer from head of unwritten data + dispatch_data_t d; + if (deliver) { + _dispatch_io_data_retain(data); + d = data; + } else { + d = dispatch_data_create_subrange(op->data, op->buf_len, + op->length); + } + _dispatch_io_data_release(op->data); + op->data = d; + } + } else { + dispatch_assert(op->direction < DOP_DIR_MAX); + return; + } + if (!deliver || ((flags & DOP_NO_EMPTY) && !dispatch_data_get_size(data))) { + op->undelivered = undelivered; + _dispatch_io_debug("buffer data", op->fd_entry->fd); + return; + } + op->undelivered = 0; + _dispatch_io_debug("deliver data", op->fd_entry->fd); + dispatch_op_direction_t direction = op->direction; + __block dispatch_data_t d = data; + dispatch_io_handler_t handler = op->handler; +#if DISPATCH_IO_DEBUG + int fd = op->fd_entry->fd; +#endif + dispatch_fd_entry_t fd_entry = op->fd_entry; + _dispatch_fd_entry_retain(fd_entry); + dispatch_io_t channel = op->channel; + _dispatch_retain(channel); + // Note that data delivery may occur after the operation is freed + dispatch_async(op->op_q, ^{ + bool done = (flags & DOP_DONE); + if (done) { + if (direction == DOP_DIR_READ && err) { + if (dispatch_data_get_size(d)) { + _dispatch_io_debug("IO handler invoke", fd); + handler(false, d, 0); + } + d = NULL; + } else if (direction == DOP_DIR_WRITE && !err) { + d = NULL; + } + } + _dispatch_io_debug("IO handler invoke", fd); + handler(done, d, err); + _dispatch_release(channel); + _dispatch_fd_entry_release(fd_entry); + _dispatch_io_data_release(data); + }); +} diff --git a/src/io_internal.h b/src/io_internal.h new file mode 100644 index 000000000..c43bd75b3 --- /dev/null +++ b/src/io_internal.h @@ -0,0 +1,198 @@ +/* + * Copyright (c) 2009-2011 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_IO_INTERNAL__ +#define __DISPATCH_IO_INTERNAL__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +#define _DISPATCH_IO_LABEL_SIZE 16 + +#ifndef DISPATCH_IO_DEBUG +#define DISPATCH_IO_DEBUG 0 +#endif + +#if TARGET_OS_EMBEDDED // rdar://problem/9032036 +#define DIO_MAX_CHUNK_PAGES 128u // 512kB chunk size +#else +#define DIO_MAX_CHUNK_PAGES 256u // 1024kB chunk size +#endif + +#define DIO_DEFAULT_LOW_WATER_CHUNKS 1u // default low-water mark +#define DIO_MAX_PENDING_IO_REQS 6u // Pending I/O read advises + +typedef unsigned int dispatch_op_direction_t; +enum { + DOP_DIR_READ = 0, + DOP_DIR_WRITE, + DOP_DIR_MAX, + DOP_DIR_IGNORE = UINT_MAX, +}; + +typedef unsigned int dispatch_op_flags_t; +#define DOP_DEFAULT 0u // check conditions to determine delivery +#define DOP_DELIVER 1u // always deliver operation +#define DOP_DONE 2u // operation is done (implies deliver) +#define DOP_STOP 4u // operation interrupted by chan stop (implies done) +#define DOP_NO_EMPTY 8u // don't deliver empty data + +// dispatch_io_t atomic_flags +#define DIO_CLOSED 1u // channel has been closed +#define DIO_STOPPED 2u // channel has been stopped (implies closed) + +#define _dispatch_io_data_retain(x) dispatch_retain(x) +#define _dispatch_io_data_release(x) dispatch_release(x) + +#if DISPATCH_IO_DEBUG +#define _dispatch_io_debug(msg, fd, args...) \ + _dispatch_debug("fd %d: " msg, (fd), ##args) +#else +#define _dispatch_io_debug(msg, fd, args...) +#endif + +DISPATCH_DECL(dispatch_operation); +DISPATCH_DECL(dispatch_disk); + +struct dispatch_stream_s { + dispatch_queue_t dq; + dispatch_source_t source; + dispatch_operation_t op; + bool source_running; + TAILQ_HEAD(, dispatch_operation_s) operations[2]; +}; + +typedef struct dispatch_stream_s *dispatch_stream_t; + +struct dispatch_io_path_data_s { + dispatch_io_t channel; + int oflag; + mode_t mode; + size_t pathlen; + char path[]; +}; + +typedef struct dispatch_io_path_data_s *dispatch_io_path_data_t; + +struct dispatch_stat_s { + dev_t dev; + mode_t mode; +}; + +struct dispatch_disk_vtable_s { + DISPATCH_VTABLE_HEADER(dispatch_disk_s); +}; + +struct dispatch_disk_s { + DISPATCH_STRUCT_HEADER(dispatch_disk_s, dispatch_disk_vtable_s); + dev_t dev; + TAILQ_HEAD(dispatch_disk_operations_s, dispatch_operation_s) operations; + dispatch_operation_t cur_rq; + dispatch_queue_t pick_queue; + + size_t free_idx; + size_t req_idx; + size_t advise_idx; + bool io_active; + int err; + TAILQ_ENTRY(dispatch_disk_s) disk_list; + size_t advise_list_depth; + dispatch_operation_t advise_list[]; +}; + +struct dispatch_fd_entry_s { + dispatch_fd_t fd; + dispatch_io_path_data_t path_data; + int orig_flags, orig_nosigpipe, err; + struct dispatch_stat_s stat; + dispatch_stream_t streams[2]; + dispatch_disk_t disk; + dispatch_queue_t close_queue, barrier_queue; + dispatch_group_t barrier_group; + dispatch_io_t convenience_channel; + TAILQ_HEAD(, dispatch_operation_s) stream_ops; + TAILQ_ENTRY(dispatch_fd_entry_s) fd_list; +}; + +typedef struct dispatch_fd_entry_s *dispatch_fd_entry_t; + +typedef struct dispatch_io_param_s { + dispatch_io_type_t type; // STREAM OR RANDOM + size_t low; + size_t high; + uint64_t interval; + unsigned long interval_flags; +} dispatch_io_param_s; + +struct dispatch_operation_vtable_s { + DISPATCH_VTABLE_HEADER(dispatch_operation_s); +}; + +struct dispatch_operation_s { + DISPATCH_STRUCT_HEADER(dispatch_operation_s, dispatch_operation_vtable_s); + dispatch_queue_t op_q; + dispatch_op_direction_t direction; // READ OR WRITE + dispatch_io_param_s params; + off_t offset; + size_t length; + int err; + dispatch_io_handler_t handler; + dispatch_io_t channel; + dispatch_fd_entry_t fd_entry; + dispatch_source_t timer; + bool active; + int count; + off_t advise_offset; + void* buf; + dispatch_op_flags_t flags; + size_t buf_siz, buf_len, undelivered, total; + dispatch_data_t buf_data, data; + TAILQ_ENTRY(dispatch_operation_s) operation_list; + // the request list in the fd_entry stream_ops + TAILQ_ENTRY(dispatch_operation_s) stream_list; +}; + +struct dispatch_io_vtable_s { + DISPATCH_VTABLE_HEADER(dispatch_io_s); +}; + +struct dispatch_io_s { + DISPATCH_STRUCT_HEADER(dispatch_io_s, dispatch_io_vtable_s); + dispatch_queue_t queue, barrier_queue; + dispatch_group_t barrier_group; + dispatch_io_param_s params; + dispatch_fd_entry_t fd_entry; + unsigned int atomic_flags; + dispatch_fd_t fd, fd_actual; + off_t f_ptr; + int err; // contains creation errors only +}; + +void _dispatch_io_set_target_queue(dispatch_io_t channel, dispatch_queue_t dq); + +#endif // __DISPATCH_IO_INTERNAL__ diff --git a/src/legacy.c b/src/legacy.c deleted file mode 100644 index 62329902a..000000000 --- a/src/legacy.c +++ /dev/null @@ -1,444 +0,0 @@ -/* - * Copyright (c) 2008-2009 Apple Inc. All rights reserved. - * - * @APPLE_APACHE_LICENSE_HEADER_START@ - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * @APPLE_APACHE_LICENSE_HEADER_END@ - */ - -#include "internal.h" -#include "legacy.h" - -/* - * LEGACY: This header file describles LEGACY interfaces to libdispatch from an - * earlier revision of the API. These interfaces WILL be removed in the future. - */ - -DISPATCH_PUBLIC_API DISPATCH_NONNULL1 DISPATCH_NONNULL2 -dispatch_item_t -LEGACY_dispatch_call(dispatch_queue_t, dispatch_legacy_block_t work, dispatch_legacy_block_t completion) -__asm__("_dispatch_call2"); - -DISPATCH_PUBLIC_API DISPATCH_PURE DISPATCH_WARN_RESULT -dispatch_queue_t -LEGACY_dispatch_queue_get_current(void) -__asm__("_dispatch_queue_get_current"); - -///////////////////////////////////////////////////////////////////////////// - -dispatch_queue_t -LEGACY_dispatch_queue_get_current(void) -{ - return _dispatch_queue_get_current(); -} - -dispatch_item_t -LEGACY_dispatch_call(dispatch_queue_t dq, - dispatch_legacy_block_t dispatch_block, - dispatch_legacy_block_t callback_block) -{ - dispatch_queue_t lq = _dispatch_queue_get_current() ?: dispatch_get_main_queue(); - dispatch_item_t di; - - di = dispatch_block ? calloc(1, ROUND_UP_TO_CACHELINE_SIZE(sizeof(*di))) : NULL; - - if (!di) { - return di; - } - - if (callback_block) { - dispatch_retain(lq); - } - - dispatch_async(dq, ^{ - dispatch_block(di); - - if (callback_block) { - dispatch_async(lq, ^{ - callback_block(di); - free(di); - dispatch_release(lq); - }); - } else { - free(di); - } - }); - - return di; -} - -sigset_t -dispatch_event_get_signals(dispatch_event_t de) -{ - sigset_t ret; - sigemptyset(&ret); - sigaddset(&ret, (int)dispatch_event_get_signal(de)); - return ret; -} - -void dispatch_cancel(dispatch_source_t ds) { dispatch_source_cancel(ds); } -long dispatch_testcancel(dispatch_source_t ds) { return dispatch_source_testcancel(ds); } - -void dispatch_queue_resume(dispatch_queue_t dq) { dispatch_resume(dq); } -void dispatch_queue_retain(dispatch_queue_t dq) { dispatch_retain(dq); } -void dispatch_queue_release(dispatch_queue_t dq) { dispatch_release(dq); } - -void dispatch_source_suspend(dispatch_source_t ds) { dispatch_suspend(ds); } -void dispatch_source_resume(dispatch_source_t ds) { dispatch_resume(ds); } -void dispatch_source_release(dispatch_source_t ds) { dispatch_release(ds); } - -void dispatch_source_attr_release(dispatch_source_attr_t attr) { dispatch_release(attr); } -void dispatch_queue_attr_release(dispatch_queue_attr_t attr) { dispatch_release(attr); } - -void *dispatch_queue_get_context(dispatch_queue_t dq) { return dispatch_get_context(dq); } -void dispatch_queue_set_context(dispatch_queue_t dq, void *context) { dispatch_set_context(dq, context); } - -void *dispatch_source_get_context(dispatch_source_t ds) { return dispatch_get_context(ds); } -void dispatch_source_set_context(dispatch_source_t ds, void *context) { dispatch_set_context(ds, context); } - -void dispatch_source_custom_trigger(dispatch_source_t ds) { dispatch_source_merge_data(ds, 1); } - -void -dispatch_source_trigger(dispatch_source_t ds, unsigned long val) -{ - dispatch_source_merge_data(ds, val); -} - -int dispatch_source_get_descriptor(dispatch_source_t ds) { return (int)dispatch_source_get_handle(ds); } - -pid_t dispatch_source_get_pid(dispatch_source_t ds) { return (pid_t)dispatch_source_get_handle(ds); } - -mach_port_t dispatch_source_get_machport(dispatch_source_t ds) { return (mach_port_t)dispatch_source_get_handle(ds); } - -uint64_t dispatch_source_get_flags(dispatch_source_t ds) { return dispatch_source_get_mask(ds); } - -dispatch_source_t dispatch_event_get_source(dispatch_event_t event) { return event; } - -long dispatch_event_get_error(dispatch_event_t event, long* error) { return dispatch_source_get_error(event, error); } - -uint64_t dispatch_event_get_flags(dispatch_event_t event) { return dispatch_source_get_data(event); } - -size_t dispatch_event_get_bytes_available(dispatch_event_t event) { return (size_t)dispatch_source_get_data(event); } - -unsigned long dispatch_event_get_count(dispatch_event_t event) { return (unsigned long)dispatch_source_get_data(event); } - -long dispatch_event_get_signal(dispatch_event_t event) { return (long)dispatch_source_get_handle(event); } - -dispatch_source_t -dispatch_source_custom_create( - unsigned long behavior, - dispatch_source_attr_t attr, - dispatch_queue_t queue, - dispatch_event_handler_t handler) { - return dispatch_source_data_create(behavior, attr, queue, handler); -} - -dispatch_source_t -dispatch_source_custom_create_f( - unsigned long behavior, - dispatch_source_attr_t attr, - dispatch_queue_t queue, - void *h_context, - dispatch_event_handler_function_t handler) { - return dispatch_source_data_create_f(behavior, attr, queue, h_context, handler); -} - -#define _dispatch_source_call_block ((void *)-1) - - - -#ifdef __BLOCKS__ -dispatch_source_t -dispatch_source_timer_create(uint64_t flags, - uint64_t nanoseconds, - uint64_t leeway, - dispatch_source_attr_t attr, - dispatch_queue_t q, - dispatch_source_handler_t callback) -{ - return dispatch_source_timer_create_f(flags, nanoseconds, leeway, - attr, q, callback, _dispatch_source_call_block); -} -#endif - -dispatch_source_t -dispatch_source_timer_create_f(uint64_t timer_flags, - uint64_t nanoseconds, - uint64_t leeway, - dispatch_source_attr_t attr, - dispatch_queue_t q, - void *context, - dispatch_source_handler_function_t callback) -{ - dispatch_source_t ds; - dispatch_time_t start; - - // 6866347 - make sure nanoseconds won't overflow - if ((int64_t)nanoseconds < 0) { - nanoseconds = INT64_MAX; - } - - if (timer_flags & DISPATCH_TIMER_ONESHOT) { - timer_flags |= DISPATCH_TIMER_WALL_CLOCK; - } - if (timer_flags == (DISPATCH_TIMER_ABSOLUTE|DISPATCH_TIMER_WALL_CLOCK)) { - static const struct timespec t0; - start = dispatch_walltime(&t0, nanoseconds); - } else if (timer_flags & DISPATCH_TIMER_WALL_CLOCK) { - start = dispatch_walltime(DISPATCH_TIME_NOW, nanoseconds); - } else { - start = dispatch_time(DISPATCH_TIME_NOW, nanoseconds); - } - if (timer_flags & DISPATCH_TIMER_ONESHOT) { - // 6866347 - make sure nanoseconds won't overflow - nanoseconds = INT64_MAX; // non-repeating (~292 years) - } - - ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, (unsigned long)timer_flags, q); - if (!ds) { - return NULL; - } - ds = _dispatch_source_create2(ds, attr, context, callback); - if (!ds) { - return NULL; - } - dispatch_source_set_timer(ds, start, nanoseconds, leeway); - - return ds; -} - -#ifdef __BLOCKS__ -dispatch_source_t -dispatch_source_read_create(int descriptor, - dispatch_source_attr_t attr, - dispatch_queue_t q, - dispatch_source_handler_t callback) -{ - return dispatch_source_read_create_f(descriptor, - attr, q, callback, _dispatch_source_call_block); -} -#endif - -dispatch_source_t -dispatch_source_read_create_f(int fd, - dispatch_source_attr_t attr, - dispatch_queue_t q, - void *context, - dispatch_source_handler_function_t callback) -{ - dispatch_source_t ds; - ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, fd, 0, q); - return _dispatch_source_create2(ds, attr, context, callback); -} - -#ifdef __BLOCKS__ -dispatch_source_t -dispatch_source_write_create(int descriptor, - dispatch_source_attr_t attr, - dispatch_queue_t q, - dispatch_source_handler_t callback) -{ - return dispatch_source_write_create_f(descriptor, - attr, q, callback, _dispatch_source_call_block); -} -#endif - -dispatch_source_t -dispatch_source_write_create_f(int fd, - dispatch_source_attr_t attr, - dispatch_queue_t q, - void *context, - dispatch_source_handler_function_t callback) -{ - dispatch_source_t ds; - ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_WRITE, fd, 0, q); - return _dispatch_source_create2(ds, attr, context, callback); -} - -#ifdef __BLOCKS__ -dispatch_source_t -dispatch_source_vnode_create(int descriptor, - uint64_t flags, - dispatch_source_attr_t attr, - dispatch_queue_t q, - dispatch_source_handler_t callback) -{ - return dispatch_source_vnode_create_f(descriptor, - flags, attr, q, callback, _dispatch_source_call_block); -} -#endif - -dispatch_source_t -dispatch_source_vnode_create_f(int fd, - uint64_t event_mask, - dispatch_source_attr_t attr, - dispatch_queue_t q, - void *context, - dispatch_source_handler_function_t callback) -{ - dispatch_source_t ds; - ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_VNODE, fd, (unsigned long)event_mask, q); - return _dispatch_source_create2(ds, attr, context, callback); -} - -#ifdef __BLOCKS__ -dispatch_source_t -dispatch_source_signal_create(unsigned long sig, - dispatch_source_attr_t attr, - dispatch_queue_t q, - dispatch_source_handler_t callback) -{ - return dispatch_source_signal_create_f(sig, - attr, q, callback, _dispatch_source_call_block); -} -#endif - -dispatch_source_t -dispatch_source_signal_create_f(unsigned long signo, - dispatch_source_attr_t attr, - dispatch_queue_t q, - void *context, - dispatch_source_handler_function_t callback) -{ - dispatch_source_t ds; - ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, signo, 0, q); - return _dispatch_source_create2(ds, attr, context, callback); -} - -#ifdef __BLOCKS__ -dispatch_source_t -dispatch_source_proc_create(pid_t pid, - uint64_t flags, - dispatch_source_attr_t attr, - dispatch_queue_t q, - dispatch_source_handler_t callback) -{ - return dispatch_source_proc_create_f(pid, - flags, attr, q, callback, _dispatch_source_call_block); -} -#endif - -dispatch_source_t -dispatch_source_proc_create_f(pid_t pid, - uint64_t event_mask, - dispatch_source_attr_t attr, - dispatch_queue_t q, - void *context, - dispatch_source_handler_function_t callback) -{ - dispatch_source_t ds; - ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_PROC, pid, (unsigned long)event_mask, q); - return _dispatch_source_create2(ds, attr, context, callback); -} - -#ifdef __BLOCKS__ -dispatch_source_t -dispatch_source_vfs_create(uint64_t flags, - dispatch_source_attr_t attr, - dispatch_queue_t q, - dispatch_source_handler_t callback) -{ - return dispatch_source_vfs_create_f(flags, - attr, q, callback, _dispatch_source_call_block); -} -#endif - -dispatch_source_t -dispatch_source_vfs_create_f(uint64_t event_mask, - dispatch_source_attr_t attr, - dispatch_queue_t q, - void *context, - dispatch_source_handler_function_t callback) -{ - dispatch_source_t ds; - ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_VFS, 0, (unsigned long)event_mask, q); - return _dispatch_source_create2(ds, attr, context, callback); -} - -#ifdef __BLOCKS__ -dispatch_source_t -dispatch_source_data_create(unsigned long behavior, - dispatch_source_attr_t attr, - dispatch_queue_t q, - dispatch_source_handler_t callback) -{ - return dispatch_source_data_create_f(behavior, - attr, q, callback, _dispatch_source_call_block); -} -#endif - -#ifdef __BLOCKS__ -dispatch_source_t -dispatch_source_machport_create(mach_port_t mport, - uint64_t flags, - dispatch_source_attr_t attr, - dispatch_queue_t dq, - dispatch_source_handler_t callback) -{ - return dispatch_source_machport_create_f(mport, flags, - attr, dq, callback, _dispatch_source_call_block); -} -#endif - -dispatch_source_t -dispatch_source_data_create_f(unsigned long behavior, - dispatch_source_attr_t attr, - dispatch_queue_t q, - void *context, - dispatch_source_handler_function_t callback) -{ - dispatch_source_t ds; - dispatch_source_type_t type; - switch (behavior) { - case DISPATCH_SOURCE_CUSTOM_ADD: - type = DISPATCH_SOURCE_TYPE_DATA_ADD; - break; - case DISPATCH_SOURCE_CUSTOM_OR: - type = DISPATCH_SOURCE_TYPE_DATA_OR; - break; - default: - return NULL; - } - ds = dispatch_source_create(type, 0, 0, q); - return _dispatch_source_create2(ds, attr, context, callback); -} - -dispatch_source_t -dispatch_source_machport_create_f(mach_port_t mport, - uint64_t flags, - dispatch_source_attr_t attr, - dispatch_queue_t dq, - void *ctxt, - dispatch_source_handler_function_t func) -{ - dispatch_source_t ds; - dispatch_source_type_t type; - unsigned long newflags = 0; - - if (flags & ~(DISPATCH_MACHPORT_DEAD|DISPATCH_MACHPORT_RECV)) { - return NULL; - } - // XXX DELETED - if (flags & DISPATCH_MACHPORT_DEAD) { - type = DISPATCH_SOURCE_TYPE_MACH_SEND; - newflags |= DISPATCH_MACH_SEND_DEAD; - } else { - type = DISPATCH_SOURCE_TYPE_MACH_RECV; - } - - ds = dispatch_source_create(type, mport, newflags, dq); - return _dispatch_source_create2(ds, attr, ctxt, func); -} - diff --git a/src/legacy.h b/src/legacy.h deleted file mode 100644 index e6bffbc59..000000000 --- a/src/legacy.h +++ /dev/null @@ -1,748 +0,0 @@ -/* - * Copyright (c) 2008-2009 Apple Inc. All rights reserved. - * - * @APPLE_APACHE_LICENSE_HEADER_START@ - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * @APPLE_APACHE_LICENSE_HEADER_END@ - */ - -/* - * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch - * which are subject to change in future releases of Mac OS X. Any applications - * relying on these interfaces WILL break. - */ - -/* - * LEGACY: This header file describles LEGACY interfaces to libdispatch from an - * earlier revision of the API. These interfaces WILL be removed in the future. - */ - -#ifndef __DISPATCH_LEGACY__ -#define __DISPATCH_LEGACY__ - -#ifndef __DISPATCH_INDIRECT__ -#error "Please #include instead of this file directly." -#include // for HeaderDoc -#endif - -#include - -#define DISPATCH_DEPRECATED __attribute__((deprecated)) -#define DISPATCH_PUBLIC_API __attribute__((visibility("default"))) - -typedef struct dispatch_item_s *dispatch_item_t; - -struct dispatch_item_s { - void * di_objc_isa; /* FIXME -- someday... */ - struct dispatch_item_s *volatile di_next; - dispatch_queue_t di_cback_q; - uint32_t di_flags; - semaphore_t di_semaphore; - void * di_work_func; - void * di_work_ctxt; - void * di_cback_func; - void * di_cback_ctxt; - void * di_ctxt; -}; - -// Use: dispatch_source_t -typedef struct dispatch_source_s *dispatch_event_t; - -// Obsolete -#ifdef __BLOCKS__ -typedef void (^dispatch_legacy_block_t)(dispatch_item_t); -typedef void (^dispatch_queue_deletion_block_t)(dispatch_queue_t queue); -typedef void (^dispatch_source_deletion_t)(dispatch_source_t source); -typedef void (^dispatch_event_callback_t)(dispatch_event_t event); -typedef void (^dispatch_source_handler_t)(dispatch_source_t source); -typedef dispatch_source_handler_t dispatch_event_handler_t; -typedef void (^dispatch_source_finalizer_t)(dispatch_source_t source); -#endif /* __BLOCKS__ */ - -// Obsolete -typedef void (*dispatch_source_handler_function_t)(void *, dispatch_source_t); -typedef void (*dispatch_source_finalizer_function_t)(void *, dispatch_source_t); -typedef dispatch_source_handler_function_t dispatch_event_handler_function_t; - -DISPATCH_DECL(dispatch_source_attr); - -#define DISPATCH_SOURCE_CREATE_SUSPENDED ((dispatch_source_attr_t)~0ul) - -#ifdef __BLOCKS__ -typedef void (^dispatch_queue_finalizer_t)(dispatch_queue_t queue); -#endif - -typedef void (*dispatch_queue_finalizer_function_t)(void *, dispatch_queue_t); - -__BEGIN_DECLS - -/*! - * @function dispatch_queue_attr_create - * - * @abstract - * Creates a new dispatch queue attribute structure. These attributes may be - * provided at creation time to modify the default behavior of the queue. - * - * @discussion - * The values present in this structure are copied to newly created queues. - * The same attribute structure may be provided to multiple calls to - * dispatch_queue_create() but only the values in the structure at the time the - * call is made will be used. - * - * @result - * The new dispatch queue attribute structure, initialized to default values. - */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_MALLOC DISPATCH_WARN_RESULT DISPATCH_NOTHROW -dispatch_queue_attr_t -dispatch_queue_attr_create(void); - -/*! - * @function dispatch_queue_attr_set_priority - * - * @abstract - * Set the priority level for a dispatch queue. - * - * @discussion - * Priority levels may be: - * - DISPATCH_QUEUE_PRIORITY_HIGH - * - DISPATCH_QUEUE_PRIORITY_DEFAULT - * - DISPATCH_QUEUE_PRIORITY_LOW - * Queues set to high priority will be processed - * before queues set to default priority or low priority. - * Queues set to low priority will be processed only if all - * high priority and default priority queues are empty. - */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL1 DISPATCH_NOTHROW -void -dispatch_queue_attr_set_priority(dispatch_queue_attr_t attr, int priority); - -#ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL1 DISPATCH_NOTHROW -long -dispatch_queue_attr_set_finalizer( - dispatch_queue_attr_t attr, - dispatch_queue_finalizer_t finalizer); -#endif - -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL1 DISPATCH_NOTHROW -void -dispatch_queue_attr_set_finalizer_f(dispatch_queue_attr_t attr, void *context, dispatch_queue_finalizer_function_t finalizer); - -/*! - * @function dispatch_get_concurrent_queue - * - * @abstract - * Returns a well-known global concurrent queue of a given priority level. - * - * @discussion - * Blocks submitted to the returned queue may be invoked concurrently with - * respect to each other. - * - * These queues are useful for performing one-shot asynchronous operations, - * e.g. dispatch_async() to an "anonymous" queue; or for performing parallel - * loops concurrently on multiple processors, e.g. dispatch_apply(). - * - * The dispatch queues returned by this function are managed by the system for - * the lifetime of the application, and need not be retained or released - * directly by the application. Furthermore, dispatch_suspend() and - * dispatch_queue_resume() are not supported on these global queues, and will - * be ignored. - * - * @param priority - * The requested priority level for the queue (default is zero): - * - DISPATCH_QUEUE_PRIORITY_HIGH - * - DISPATCH_QUEUE_PRIORITY_DEFAULT - * - DISPATCH_QUEUE_PRIORITY_LOW - * - * @result - * Returns a concurrent dispatch queue for use with dispatch_async(), - * dispatch_apply(), et al. - */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW -dispatch_queue_t -dispatch_get_concurrent_queue(long priority); - -DISPATCH_PUBLIC_API //DISPATCH_DEPRECATED -void -dispatch_queue_attr_set_flags(dispatch_queue_attr_t attr, uint64_t flags); - -#ifdef __BLOCKS__ -DISPATCH_PUBLIC_API DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_DEPRECATED -dispatch_item_t -dispatch_call(dispatch_queue_t, dispatch_legacy_block_t work, dispatch_legacy_block_t completion) -__asm__("_dispatch_call2"); -#endif /* __BLOCKS__ */ - -DISPATCH_PUBLIC_API DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_DEPRECATED -dispatch_queue_t -dispatch_queue_get_current(void); - -// Use: dispatch_retain -DISPATCH_PUBLIC_API DISPATCH_NONNULL_ALL DISPATCH_DEPRECATED -void -dispatch_queue_retain(dispatch_queue_t); - -// Use: dispatch_release -DISPATCH_PUBLIC_API DISPATCH_NONNULL_ALL DISPATCH_DEPRECATED -void -dispatch_queue_release(dispatch_queue_t); - -// Use: dispatch_resume -DISPATCH_PUBLIC_API DISPATCH_NONNULL_ALL DISPATCH_DEPRECATED -void -dispatch_queue_resume(dispatch_queue_t); - -// Use: dispatch_release -DISPATCH_PUBLIC_API DISPATCH_NONNULL_ALL DISPATCH_DEPRECATED -void -dispatch_source_release(dispatch_source_t); - -// Use: dispatch_suspend -DISPATCH_PUBLIC_API DISPATCH_NONNULL_ALL DISPATCH_DEPRECATED -void -dispatch_source_suspend(dispatch_source_t); - -// Use: dispatch_resume -DISPATCH_PUBLIC_API DISPATCH_NONNULL_ALL DISPATCH_DEPRECATED -void -dispatch_source_resume(dispatch_source_t); - -// Use: dispatch_release -DISPATCH_PUBLIC_API DISPATCH_NONNULL_ALL DISPATCH_DEPRECATED -void -dispatch_queue_attr_release(dispatch_queue_attr_t); - -// Use: dispatch_release -DISPATCH_PUBLIC_API DISPATCH_NONNULL_ALL DISPATCH_DEPRECATED -void -dispatch_source_attr_release(dispatch_source_attr_t); - -// Use: dispatch_source_get_handle -DISPATCH_PUBLIC_API DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_DEPRECATED -sigset_t -dispatch_event_get_signals(dispatch_event_t event); - -// Use: dispatch_get_context -DISPATCH_PUBLIC_API DISPATCH_NONNULL_ALL //DISPATCH_DEPRECATED -void * -dispatch_queue_get_context(dispatch_queue_t queue); - -// Use: dispatch_set_context -DISPATCH_PUBLIC_API DISPATCH_NONNULL1 //DISPATCH_DEPRECATED -void -dispatch_queue_set_context(dispatch_queue_t queue, void *context); - -// Use: dispatch_get_context -DISPATCH_PUBLIC_API DISPATCH_NONNULL_ALL //DISPATCH_DEPRECATED -void * -dispatch_source_get_context(dispatch_source_t source); - -// Use: dispatch_set_context -DISPATCH_PUBLIC_API DISPATCH_NONNULL1 //DISPATCH_DEPRECATED -void -dispatch_source_set_context(dispatch_source_t source, void * context); - -// Use: dispatch_source_merge_data -DISPATCH_PUBLIC_API DISPATCH_NONNULL_ALL DISPATCH_DEPRECATED -void -dispatch_source_custom_trigger(dispatch_source_t ds); - -// Use: dispatch_source_cancel -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL DISPATCH_NOTHROW -void -dispatch_cancel(dispatch_source_t); - -// Use: dispatch_source_testcancel -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL DISPATCH_NOTHROW -long -dispatch_testcancel(dispatch_source_t); - -// Use: dispatch_source_set_timer -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL1 DISPATCH_NOTHROW -long -dispatch_source_timer_set_time(dispatch_source_t ds, - uint64_t nanoseconds, - uint64_t leeway); - -// Use: dispatch_source_merge_data -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL DISPATCH_NOTHROW -void -dispatch_source_trigger(dispatch_source_t source, unsigned long value); - -enum { - DISPATCH_ERROR_DOMAIN_NO_ERROR = 0, - DISPATCH_ERROR_DOMAIN_POSIX = 1, - DISPATCH_ERROR_DOMAIN_MACH = 2, -}; - -// Obsolete -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL1 DISPATCH_WARN_RESULT DISPATCH_NOTHROW -long -dispatch_source_get_error(dispatch_source_t source, long* error); - -// Use: dispatch_source_get_handle -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_NOTHROW -mach_port_t -dispatch_source_get_machport(dispatch_source_t source); - -// Use: dispatch_source_get_handle -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_NOTHROW -pid_t -dispatch_source_get_descriptor(dispatch_source_t source); - -// Use: dispatch_source_get_handle -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_NOTHROW -pid_t -dispatch_source_get_pid(dispatch_source_t source); - -// Use: dispatch_source_get_mask -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_NOTHROW -uint64_t -dispatch_source_get_flags(dispatch_source_t source); - -// LEGACY: dispatch_event_t == dispatch_source_t -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_NOTHROW -dispatch_source_t -dispatch_event_get_source(dispatch_event_t event); - -// Use: dispatch_source_get_error -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL1 DISPATCH_WARN_RESULT DISPATCH_NOTHROW -long -dispatch_event_get_error(dispatch_event_t event, long* error); - -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_NOTHROW -uint64_t -dispatch_event_get_nanoseconds(dispatch_event_t event); - -// Use: dispatch_source_get_handle -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_NOTHROW -long -dispatch_event_get_signal(dispatch_event_t event); - -// Use: dispatch_source_get_data -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_NOTHROW -uint64_t -dispatch_event_get_flags(dispatch_event_t event); - -// Use: dispatch_source_get_data -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_NOTHROW -size_t -dispatch_event_get_bytes_available(dispatch_event_t event); - -// Use: dispatch_source_get_data -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL_ALL DISPATCH_NOTHROW -unsigned long -dispatch_event_get_count(dispatch_event_t event); - -// Obsolete -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_MALLOC DISPATCH_WARN_RESULT DISPATCH_NOTHROW -dispatch_source_attr_t -dispatch_source_attr_create(void); - -// Obsolete -#if defined(__BLOCKS__) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NOTHROW -dispatch_source_finalizer_t -dispatch_source_attr_get_finalizer(dispatch_source_attr_t attr); -#endif /* __BLOCKS__ */ - -// Obsolete -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_MALLOC DISPATCH_WARN_RESULT DISPATCH_NOTHROW -dispatch_source_attr_t -dispatch_source_attr_copy(dispatch_source_attr_t proto); - -// Obsolete -#ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL1 DISPATCH_NOTHROW -long -dispatch_source_attr_set_finalizer( - dispatch_source_attr_t attr, - dispatch_source_finalizer_t finalizer); -#endif /* __BLOCKS__ */ - -// Obsolete -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL1 DISPATCH_NOTHROW -void -dispatch_source_attr_set_finalizer_f( - dispatch_source_attr_t attr, - void *context, - dispatch_source_finalizer_function_t finalizer); - -// Obsolete -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_NONNULL1 DISPATCH_NOTHROW -void -dispatch_source_attr_set_context( - dispatch_source_attr_t attr, - void *context); - -// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_MACH_RECV, ...) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_MALLOC DISPATCH_NONNULL4 DISPATCH_NONNULL5 DISPATCH_NOTHROW -dispatch_source_t -dispatch_source_mig_create( - mach_port_t mport, - size_t max_size, - dispatch_source_attr_t attr, - dispatch_queue_t queue, - dispatch_mig_callback_t mig_callback); - -enum { - DISPATCH_TIMER_WALL_CLOCK = 0x4, -}; - -enum { - DISPATCH_TIMER_INTERVAL = 0x0, - DISPATCH_TIMER_ONESHOT = 0x1, - DISPATCH_TIMER_ABSOLUTE = 0x3, -}; - -// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, ...) -#ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_MALLOC DISPATCH_NONNULL5 DISPATCH_NONNULL6 DISPATCH_NOTHROW -dispatch_source_t -dispatch_source_timer_create( - uint64_t flags, - uint64_t nanoseconds, - uint64_t leeway, - dispatch_source_attr_t attr, - dispatch_queue_t queue, - dispatch_source_handler_t handler); -#endif /* __BLOCKS__ */ - -// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, ...) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_MALLOC DISPATCH_NONNULL5 DISPATCH_NONNULL7 DISPATCH_NOTHROW -dispatch_source_t -dispatch_source_timer_create_f( - uint64_t flags, - uint64_t nanoseconds, - uint64_t leeway, - dispatch_source_attr_t attr, - dispatch_queue_t queue, - void *h_context, - dispatch_source_handler_function_t handler); - -// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, ...) -#ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_MALLOC DISPATCH_NOTHROW -dispatch_source_t -dispatch_source_signal_create( - unsigned long signo, - dispatch_source_attr_t attr, - dispatch_queue_t queue, - dispatch_source_handler_t handler); -#endif /* __BLOCKS__ */ - -// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, ...) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_MALLOC DISPATCH_NONNULL3 DISPATCH_NONNULL5 DISPATCH_NOTHROW -dispatch_source_t -dispatch_source_signal_create_f( - unsigned long sig, - dispatch_source_attr_t attr, - dispatch_queue_t queue, - void *h_context, - dispatch_source_handler_function_t handler); - -// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, ...) -#ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_MALLOC DISPATCH_NONNULL3 DISPATCH_NONNULL4 DISPATCH_NOTHROW -dispatch_source_t -dispatch_source_read_create( - int descriptor, - dispatch_source_attr_t attr, - dispatch_queue_t queue, - dispatch_source_handler_t handler); -#endif /* __BLOCKS__ */ - -// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, ...) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_MALLOC DISPATCH_NONNULL3 DISPATCH_NONNULL5 DISPATCH_NOTHROW -dispatch_source_t -dispatch_source_read_create_f( - int descriptor, - dispatch_source_attr_t attr, - dispatch_queue_t queue, - void *h_context, - dispatch_source_handler_function_t handler); - -// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_WRITE, ...) -#ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_MALLOC DISPATCH_NONNULL3 DISPATCH_NONNULL4 DISPATCH_NOTHROW -dispatch_source_t -dispatch_source_write_create( - int descriptor, - dispatch_source_attr_t attr, - dispatch_queue_t queue, - dispatch_source_handler_t handler); -#endif /* __BLOCKS__ */ - -// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_WRITE, ...) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_MALLOC DISPATCH_NONNULL3 DISPATCH_NONNULL5 DISPATCH_NOTHROW -dispatch_source_t -dispatch_source_write_create_f( - int descriptor, - dispatch_source_attr_t attr, - dispatch_queue_t queue, - void *h_context, - dispatch_source_handler_function_t handler); - -// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_VNODE, ...) -#ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_MALLOC DISPATCH_NONNULL4 DISPATCH_NONNULL5 DISPATCH_NOTHROW -dispatch_source_t -dispatch_source_vnode_create( - int descriptor, - uint64_t flags, - dispatch_source_attr_t attr, - dispatch_queue_t queue, - dispatch_source_handler_t handler); -#endif /* __BLOCKS__ */ - -// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_VNODE, ...) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_MALLOC DISPATCH_NONNULL4 DISPATCH_NONNULL6 DISPATCH_NOTHROW -dispatch_source_t -dispatch_source_vnode_create_f( - int descriptor, - uint64_t flags, - dispatch_source_attr_t attr, - dispatch_queue_t queue, - void *h_context, - dispatch_source_handler_function_t handler); - -// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_PROC, ...) -#ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_MALLOC DISPATCH_NONNULL4 DISPATCH_NONNULL5 DISPATCH_NOTHROW -dispatch_source_t -dispatch_source_proc_create( - pid_t pid, - uint64_t flags, - dispatch_source_attr_t attr, - dispatch_queue_t queue, - dispatch_source_handler_t handler); -#endif /* __BLOCKS__ */ - -// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_PROC, ...) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_MALLOC DISPATCH_NONNULL4 DISPATCH_NONNULL6 DISPATCH_NOTHROW -dispatch_source_t -dispatch_source_proc_create_f( - pid_t pid, - uint64_t flags, - dispatch_source_attr_t attr, - dispatch_queue_t queue, - void *h_context, - dispatch_source_handler_function_t handler); - -enum { - DISPATCH_MACHPORT_DEAD = 0x1, - DISPATCH_MACHPORT_RECV = 0x2, - DISPATCH_MACHPORT_DELETED = 0x4, -}; - -// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_MACH_RECV, ...) -#ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_MALLOC DISPATCH_NOTHROW -dispatch_source_t -dispatch_source_machport_create( - mach_port_t mport, - uint64_t flags, - dispatch_source_attr_t attr, - dispatch_queue_t queue, - dispatch_source_handler_t handler); -#endif /* __BLOCKS__ */ - -// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_MACH_RECV, ...) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_MALLOC DISPATCH_NOTHROW -dispatch_source_t -dispatch_source_machport_create_f( - mach_port_t mport, - uint64_t flags, - dispatch_source_attr_t attr, - dispatch_queue_t queue, - void *h_context, - dispatch_source_handler_function_t handler); - -enum { - DISPATCH_SOURCE_DATA_ADD = 1, - DISPATCH_SOURCE_DATA_OR, -}; - -// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_DATA..., ...) -#ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_MALLOC DISPATCH_NONNULL3 DISPATCH_NONNULL4 DISPATCH_NOTHROW -dispatch_source_t -dispatch_source_data_create( - unsigned long behavior, - dispatch_source_attr_t attr, - dispatch_queue_t queue, - dispatch_source_handler_t handler); -#endif /* __BLOCKS__ */ - -// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_DATA..., ...) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_MALLOC DISPATCH_NONNULL3 DISPATCH_NONNULL5 DISPATCH_NOTHROW -dispatch_source_t -dispatch_source_data_create_f( - unsigned long behavior, - dispatch_source_attr_t attr, - dispatch_queue_t queue, - void *h_context, - dispatch_source_handler_function_t handler); - -enum { - DISPATCH_SOURCE_CUSTOM_ADD = DISPATCH_SOURCE_DATA_ADD, - DISPATCH_SOURCE_CUSTOM_OR = DISPATCH_SOURCE_DATA_OR, -}; - -// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_DATA..., ...) -#ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_MALLOC DISPATCH_NONNULL2 DISPATCH_NONNULL3 DISPATCH_NOTHROW -dispatch_source_t -dispatch_source_custom_create( - unsigned long behavior, - dispatch_source_attr_t attr, - dispatch_queue_t queue, - dispatch_event_handler_t handler); -#endif /* __BLOCKS__ */ - -// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_DATA..., ...) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_MALLOC DISPATCH_NONNULL2 DISPATCH_NONNULL4 DISPATCH_NOTHROW -dispatch_source_t -dispatch_source_custom_create_f( - unsigned long behavior, - dispatch_source_attr_t attr, - dispatch_queue_t queue, - void *h_context, - dispatch_event_handler_function_t handler); - -// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_VFS, ...) -#if defined(__BLOCKS__) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_MALLOC DISPATCH_NONNULL3 DISPATCH_NONNULL4 -dispatch_source_t -dispatch_source_vfs_create( - uint64_t flags, - dispatch_source_attr_t attr, - dispatch_queue_t queue, - dispatch_source_handler_t handler); -#endif /* __BLOCKS__ */ - -// Use: dispatch_source_create(DISPATCH_SOURCE_TYPE_VFS, ...) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -DISPATCH_MALLOC DISPATCH_NONNULL3 DISPATCH_NONNULL5 -dispatch_source_t -dispatch_source_vfs_create_f( - uint64_t flags, - dispatch_source_attr_t attr, - dispatch_queue_t queue, - void *h_context, - dispatch_source_handler_function_t handler); - -/* - * Raw Mach message support from MIG source. - * - * It is possible to use the following callback style with the MIG source to - * obtain the raw mach message (and send no reply) similar to CFMachPort. - * (For more specific CFMachPort compatibility, see below). - * - * void handle_mach_msg(mach_msg_header *msg) { ... } - * ... - * DISPATCH_MACHPORT_CALLBACK_DECL(mig_compat_callback, handle_mach_msg); - * ... - * mig = dispatch_source_mig_create(mp, MY_MAX_MSG_SIZE, NULL, - * queue, mig_compat_callback); - */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -boolean_t -_dispatch_machport_callback(mach_msg_header_t *msg, mach_msg_header_t *reply, void (*callback)(mach_msg_header_t *)); - -#define DISPATCH_MACHPORT_CALLBACK_DECL(new_callback, existing_callback) \ -__private_extern__ boolean_t \ -new_callback(mach_msg_header_t *msg, mach_msg_header_t *reply) \ -{ return _dispatch_machport_callback(msg, reply, existing_callback); } - -/* - * CFMachPort compatibility. - * - * It is possible to use existing CFMachPort callbacks with dispatch mig sources - * by delcaring the following shim and using the shim as the mig server callback - * to dispatch_source_mig_create(). - * The CFMachPortRef "port" parameter of the CFMachPortCallBack will be NULL. - * If mach_port_set_context() is used, that value will be passed into the "info" - * parameter of the CFMachPortCallBack. - * - * DISPATCH_CFMACHPORT_CALLBACK_DECL(mig_callback, MyCFMachPortCallBack); - * - * ... - * { - * kr = mach_port_set_context(mach_task_self(), mp, (mach_vm_address_t)context); - * mig = dispatch_source_mig_create(mp, MY_MAX_MSG_SIZE, NULL, - * queue, mig_callback); - */ -struct __CFMachPort; - -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -boolean_t -_dispatch_CFMachPortCallBack(mach_msg_header_t *msg, mach_msg_header_t *reply, void (*callback)(struct __CFMachPort *, void *msg, signed long size, void *)); - -#define DISPATCH_CFMACHPORT_CALLBACK_DECL(new_callback, existing_callback) \ -__private_extern__ boolean_t \ -new_callback(mach_msg_header_t *msg, mach_msg_header_t *reply) \ -{ return _dispatch_CFMachPortCallBack(msg, reply, existing_callback); } - -__END_DECLS - -#endif diff --git a/src/object.c b/src/object.c index 8746495bc..b84979b90 100644 --- a/src/object.c +++ b/src/object.c @@ -1,62 +1,32 @@ /* - * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * Copyright (c) 2008-2010 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ - * + * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * + * * @APPLE_APACHE_LICENSE_HEADER_END@ */ #include "internal.h" - -void -dispatch_debug(dispatch_object_t dou, const char *msg, ...) -{ - va_list ap; - - va_start(ap, msg); - - dispatch_debugv(dou._do, msg, ap); - - va_end(ap); -} - -void -dispatch_debugv(dispatch_object_t dou, const char *msg, va_list ap) -{ - char buf[4096]; - size_t offs; - - if (dou._do && dou._do->do_vtable->do_debug) { - offs = dx_debug(dou._do, buf, sizeof(buf)); - } else { - offs = snprintf(buf, sizeof(buf), "NULL vtable slot"); - } - - snprintf(buf + offs, sizeof(buf) - offs, ": %s", msg); - - _dispatch_logv(buf, ap); -} - void dispatch_retain(dispatch_object_t dou) { - if (dou._do->do_xref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) { + if (slowpath(dou._do->do_xref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) { return; // global object } - if ((dispatch_atomic_inc(&dou._do->do_xref_cnt) - 1) == 0) { + if (slowpath((dispatch_atomic_inc2o(dou._do, do_xref_cnt) - 1) == 0)) { DISPATCH_CLIENT_CRASH("Resurrection of an object"); } } @@ -64,10 +34,10 @@ dispatch_retain(dispatch_object_t dou) void _dispatch_retain(dispatch_object_t dou) { - if (dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) { + if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) { return; // global object } - if ((dispatch_atomic_inc(&dou._do->do_ref_cnt) - 1) == 0) { + if (slowpath((dispatch_atomic_inc2o(dou._do, do_ref_cnt) - 1) == 0)) { DISPATCH_CLIENT_CRASH("Resurrection of an object"); } } @@ -75,23 +45,18 @@ _dispatch_retain(dispatch_object_t dou) void dispatch_release(dispatch_object_t dou) { - typeof(dou._do->do_xref_cnt) oldval; - - if (dou._do->do_xref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) { + if (slowpath(dou._do->do_xref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) { return; } - oldval = dispatch_atomic_dec(&dou._do->do_xref_cnt) + 1; - - if (fastpath(oldval > 1)) { + unsigned int xref_cnt = dispatch_atomic_dec2o(dou._do, do_xref_cnt) + 1; + if (fastpath(xref_cnt > 1)) { return; } - if (oldval == 1) { -#ifndef DISPATCH_NO_LEGACY + if (fastpath(xref_cnt == 1)) { if (dou._do->do_vtable == (void*)&_dispatch_source_kevent_vtable) { - return _dispatch_source_legacy_xref_release(dou._ds); + return _dispatch_source_xref_release(dou._ds); } -#endif if (slowpath(DISPATCH_OBJECT_SUSPENDED(dou._do))) { // Arguments for and against this assert are within 6705399 DISPATCH_CLIENT_CRASH("Release of a suspended object"); @@ -121,25 +86,21 @@ _dispatch_dispose(dispatch_object_t dou) void _dispatch_release(dispatch_object_t dou) { - typeof(dou._do->do_ref_cnt) oldval; - - if (dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) { + if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) { return; // global object } - oldval = dispatch_atomic_dec(&dou._do->do_ref_cnt) + 1; - - if (fastpath(oldval > 1)) { + unsigned int ref_cnt = dispatch_atomic_dec2o(dou._do, do_ref_cnt) + 1; + if (fastpath(ref_cnt > 1)) { return; } - if (oldval == 1) { - if (dou._do->do_next != DISPATCH_OBJECT_LISTLESS) { + if (fastpath(ref_cnt == 1)) { + if (slowpath(dou._do->do_next != DISPATCH_OBJECT_LISTLESS)) { DISPATCH_CRASH("release while enqueued"); } - if (dou._do->do_xref_cnt) { + if (slowpath(dou._do->do_xref_cnt)) { DISPATCH_CRASH("release while external references exist"); } - return dx_dispose(dou._do); } DISPATCH_CRASH("over-release"); @@ -171,30 +132,55 @@ dispatch_suspend(dispatch_object_t dou) if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) { return; } - dispatch_atomic_add(&dou._do->do_suspend_cnt, DISPATCH_OBJECT_SUSPEND_INTERVAL); + // rdar://8181908 explains why we need to do an internal retain at every + // suspension. + (void)dispatch_atomic_add2o(dou._do, do_suspend_cnt, + DISPATCH_OBJECT_SUSPEND_INTERVAL); + _dispatch_retain(dou._do); +} + +DISPATCH_NOINLINE +static void +_dispatch_resume_slow(dispatch_object_t dou) +{ + _dispatch_wakeup(dou._do); + // Balancing the retain() done in suspend() for rdar://8181908 + _dispatch_release(dou._do); } void dispatch_resume(dispatch_object_t dou) { + // Global objects cannot be suspended or resumed. This also has the + // side effect of saturating the suspend count of an object and + // guarding against resuming due to overflow. if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) { return; } - switch (dispatch_atomic_sub(&dou._do->do_suspend_cnt, DISPATCH_OBJECT_SUSPEND_INTERVAL) + DISPATCH_OBJECT_SUSPEND_INTERVAL) { - case DISPATCH_OBJECT_SUSPEND_INTERVAL: - _dispatch_wakeup(dou._do); - break; - case 0: - DISPATCH_CLIENT_CRASH("Over-resume of an object"); - break; - default: - break; + // Check the previous value of the suspend count. If the previous + // value was a single suspend interval, the object should be resumed. + // If the previous value was less than the suspend interval, the object + // has been over-resumed. + unsigned int suspend_cnt = dispatch_atomic_sub2o(dou._do, do_suspend_cnt, + DISPATCH_OBJECT_SUSPEND_INTERVAL) + + DISPATCH_OBJECT_SUSPEND_INTERVAL; + if (fastpath(suspend_cnt > DISPATCH_OBJECT_SUSPEND_INTERVAL)) { + // Balancing the retain() done in suspend() for rdar://8181908 + return _dispatch_release(dou._do); + } + if (fastpath(suspend_cnt == DISPATCH_OBJECT_SUSPEND_INTERVAL)) { + return _dispatch_resume_slow(dou); } + DISPATCH_CLIENT_CRASH("Over-resume of an object"); } size_t -dispatch_object_debug_attr(dispatch_object_t dou, char* buf, size_t bufsiz) +_dispatch_object_debug_attr(dispatch_object_t dou, char* buf, size_t bufsiz) { - return snprintf(buf, bufsiz, "refcnt = 0x%x, suspend_cnt = 0x%x, ", - dou._do->do_ref_cnt, dou._do->do_suspend_cnt); + return snprintf(buf, bufsiz, "xrefcnt = 0x%x, refcnt = 0x%x, " + "suspend_cnt = 0x%x, locked = %d, ", dou._do->do_xref_cnt, + dou._do->do_ref_cnt, + dou._do->do_suspend_cnt / DISPATCH_OBJECT_SUSPEND_INTERVAL, + dou._do->do_suspend_cnt & 1); } + diff --git a/src/object_internal.h b/src/object_internal.h index cc048be70..0627cfd4f 100644 --- a/src/object_internal.h +++ b/src/object_internal.h @@ -1,20 +1,20 @@ /* - * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * Copyright (c) 2008-2010 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ - * + * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * + * * @APPLE_APACHE_LICENSE_HEADER_END@ */ @@ -28,32 +28,42 @@ #define __DISPATCH_OBJECT_INTERNAL__ enum { - _DISPATCH_CONTINUATION_TYPE = 0x00000, // meta-type for continuations + _DISPATCH_CONTINUATION_TYPE = 0x00000, // meta-type for continuations _DISPATCH_QUEUE_TYPE = 0x10000, // meta-type for queues _DISPATCH_SOURCE_TYPE = 0x20000, // meta-type for sources _DISPATCH_SEMAPHORE_TYPE = 0x30000, // meta-type for semaphores - _DISPATCH_ATTR_TYPE = 0x10000000, // meta-type for attribute structures - + _DISPATCH_NODE_TYPE = 0x40000, // meta-type for data node + _DISPATCH_IO_TYPE = 0x50000, // meta-type for io channels + _DISPATCH_OPERATION_TYPE = 0x60000, // meta-type for io operations + _DISPATCH_DISK_TYPE = 0x70000, // meta-type for io disks + _DISPATCH_META_TYPE_MASK = 0xfff0000, // mask for object meta-types + _DISPATCH_ATTR_TYPE = 0x10000000, // meta-type for attributes + DISPATCH_CONTINUATION_TYPE = _DISPATCH_CONTINUATION_TYPE, - - DISPATCH_QUEUE_ATTR_TYPE = _DISPATCH_QUEUE_TYPE | _DISPATCH_ATTR_TYPE, + + DISPATCH_DATA_TYPE = _DISPATCH_NODE_TYPE, + + DISPATCH_IO_TYPE = _DISPATCH_IO_TYPE, + DISPATCH_OPERATION_TYPE = _DISPATCH_OPERATION_TYPE, + DISPATCH_DISK_TYPE = _DISPATCH_DISK_TYPE, + + DISPATCH_QUEUE_ATTR_TYPE = _DISPATCH_QUEUE_TYPE |_DISPATCH_ATTR_TYPE, DISPATCH_QUEUE_TYPE = 1 | _DISPATCH_QUEUE_TYPE, DISPATCH_QUEUE_GLOBAL_TYPE = 2 | _DISPATCH_QUEUE_TYPE, DISPATCH_QUEUE_MGR_TYPE = 3 | _DISPATCH_QUEUE_TYPE, + DISPATCH_QUEUE_SPECIFIC_TYPE = 4 | _DISPATCH_QUEUE_TYPE, DISPATCH_SEMAPHORE_TYPE = _DISPATCH_SEMAPHORE_TYPE, - - DISPATCH_SOURCE_ATTR_TYPE = _DISPATCH_SOURCE_TYPE | _DISPATCH_ATTR_TYPE, - + DISPATCH_SOURCE_KEVENT_TYPE = 1 | _DISPATCH_SOURCE_TYPE, }; -#define DISPATCH_VTABLE_HEADER(x) \ - unsigned long const do_type; \ +#define DISPATCH_VTABLE_HEADER(x) \ + unsigned long const do_type; \ const char *const do_kind; \ - size_t (*const do_debug)(struct x *, char *, size_t); \ - struct dispatch_queue_s *(*const do_invoke)(struct x *); \ + size_t (*const do_debug)(struct x *, char *, size_t); \ + struct dispatch_queue_s *(*const do_invoke)(struct x *); \ bool (*const do_probe)(struct x *); \ void (*const do_dispose)(struct x *) @@ -64,34 +74,31 @@ enum { #define dx_invoke(x) (x)->do_vtable->do_invoke(x) #define dx_probe(x) (x)->do_vtable->do_probe(x) -#define DISPATCH_STRUCT_HEADER(x, y) \ - const struct y *do_vtable; \ - struct x *volatile do_next; \ - unsigned int do_ref_cnt; \ - unsigned int do_xref_cnt; \ - unsigned int do_suspend_cnt; \ - struct dispatch_queue_s *do_targetq; \ +#define DISPATCH_STRUCT_HEADER(x, y) \ + const struct y *do_vtable; \ + struct x *volatile do_next; \ + unsigned int do_ref_cnt; \ + unsigned int do_xref_cnt; \ + unsigned int do_suspend_cnt; \ + struct dispatch_queue_s *do_targetq; \ void *do_ctxt; \ - void *do_finalizer + void *do_finalizer; -#define DISPATCH_OBJECT_GLOBAL_REFCNT (~0u) -#define DISPATCH_OBJECT_SUSPEND_LOCK 1u // "word and bit" must be a power of two to be safely subtracted +#define DISPATCH_OBJECT_GLOBAL_REFCNT (~0u) +// "word and bit" must be a power of two to be safely subtracted +#define DISPATCH_OBJECT_SUSPEND_LOCK 1u #define DISPATCH_OBJECT_SUSPEND_INTERVAL 2u -#define DISPATCH_OBJECT_SUSPENDED(x) ((x)->do_suspend_cnt >= DISPATCH_OBJECT_SUSPEND_INTERVAL) +#define DISPATCH_OBJECT_SUSPENDED(x) \ + ((x)->do_suspend_cnt >= DISPATCH_OBJECT_SUSPEND_INTERVAL) #ifdef __LP64__ // the bottom nibble must not be zero, the rest of the bits should be random -// we sign extend the 64-bit version so that a better instruction encoding is generated on Intel -#define DISPATCH_OBJECT_LISTLESS ((void *)0xffffffff89abcdef) +// we sign extend the 64-bit version so that a better instruction encoding is +// generated on Intel +#define DISPATCH_OBJECT_LISTLESS ((void *)0xffffffff89abcdef) #else -#define DISPATCH_OBJECT_LISTLESS ((void *)0x89abcdef) +#define DISPATCH_OBJECT_LISTLESS ((void *)0x89abcdef) #endif -#define _dispatch_trysuspend(x) __sync_bool_compare_and_swap(&(x)->do_suspend_cnt, 0, DISPATCH_OBJECT_SUSPEND_INTERVAL) -// _dispatch_source_invoke() relies on this testing the whole suspend count -// word, not just the lock bit. In other words, no point taking the lock -// if the source is suspended or canceled. -#define _dispatch_trylock(x) dispatch_atomic_cmpxchg(&(x)->do_suspend_cnt, 0, DISPATCH_OBJECT_SUSPEND_LOCK) - struct dispatch_object_vtable_s { DISPATCH_VTABLE_HEADER(dispatch_object_s); }; @@ -100,7 +107,8 @@ struct dispatch_object_s { DISPATCH_STRUCT_HEADER(dispatch_object_s, dispatch_object_vtable_s); }; -size_t dispatch_object_debug_attr(dispatch_object_t dou, char* buf, size_t bufsiz); +size_t _dispatch_object_debug_attr(dispatch_object_t dou, char* buf, + size_t bufsiz); void _dispatch_retain(dispatch_object_t dou); void _dispatch_release(dispatch_object_t dou); diff --git a/src/once.c b/src/once.c index 9046c065e..ab4a4e887 100644 --- a/src/once.c +++ b/src/once.c @@ -1,20 +1,20 @@ /* - * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * Copyright (c) 2008-2011 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ - * + * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * + * * @APPLE_APACHE_LICENSE_HEADER_END@ */ @@ -23,9 +23,17 @@ #undef dispatch_once #undef dispatch_once_f + +struct _dispatch_once_waiter_s { + volatile struct _dispatch_once_waiter_s *volatile dow_next; + _dispatch_thread_semaphore_t dow_sema; +}; + +#define DISPATCH_ONCE_DONE ((struct _dispatch_once_waiter_s *)~0l) + #ifdef __BLOCKS__ void -dispatch_once(dispatch_once_t *val, void (^block)(void)) +dispatch_once(dispatch_once_t *val, dispatch_block_t block) { struct Block_basic *bb = (void *)block; @@ -35,12 +43,17 @@ dispatch_once(dispatch_once_t *val, void (^block)(void)) DISPATCH_NOINLINE void -dispatch_once_f(dispatch_once_t *val, void *ctxt, void (*func)(void *)) +dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func) { - volatile long *vval = val; + struct _dispatch_once_waiter_s * volatile *vval = + (struct _dispatch_once_waiter_s**)val; + struct _dispatch_once_waiter_s dow = { NULL, 0 }; + struct _dispatch_once_waiter_s *tail, *tmp; + _dispatch_thread_semaphore_t sema; - if (dispatch_atomic_cmpxchg(val, 0l, 1l)) { - func(ctxt); + if (dispatch_atomic_cmpxchg(vval, NULL, &dow)) { + dispatch_atomic_acquire_barrier(); + _dispatch_client_callout(ctxt, func); // The next barrier must be long and strong. // @@ -52,25 +65,25 @@ dispatch_once_f(dispatch_once_t *val, void *ctxt, void (*func)(void *)) // The dispatch_once*() wrapper macro causes the callee's // instruction stream to look like this (pseudo-RISC): // - // load r5, pred-addr - // cmpi r5, -1 - // beq 1f - // call dispatch_once*() - // 1f: - // load r6, data-addr + // load r5, pred-addr + // cmpi r5, -1 + // beq 1f + // call dispatch_once*() + // 1f: + // load r6, data-addr // // May be re-ordered like so: // - // load r6, data-addr - // load r5, pred-addr - // cmpi r5, -1 - // beq 1f - // call dispatch_once*() - // 1f: + // load r6, data-addr + // load r5, pred-addr + // cmpi r5, -1 + // beq 1f + // call dispatch_once*() + // 1f: // // Normally, a barrier on the read side is used to workaround // the weakly ordered memory model. But barriers are expensive - // and we only need to synchronize once! After func(ctxt) + // and we only need to synchronize once! After func(ctxt) // completes, the predicate will be marked as "done" and the // branch predictor will correctly skip the call to // dispatch_once*(). @@ -91,14 +104,32 @@ dispatch_once_f(dispatch_once_t *val, void *ctxt, void (*func)(void *)) // // On some CPUs, the most fully synchronizing instruction might // need to be issued. - - dispatch_atomic_barrier(); - *val = ~0l; - } else { - do { - _dispatch_hardware_pause(); - } while (*vval != ~0l); - dispatch_atomic_barrier(); + dispatch_atomic_maximally_synchronizing_barrier(); + //dispatch_atomic_release_barrier(); // assumed contained in above + tmp = dispatch_atomic_xchg(vval, DISPATCH_ONCE_DONE); + tail = &dow; + while (tail != tmp) { + while (!tmp->dow_next) { + _dispatch_hardware_pause(); + } + sema = tmp->dow_sema; + tmp = (struct _dispatch_once_waiter_s*)tmp->dow_next; + _dispatch_thread_semaphore_signal(sema); + } + } else { + dow.dow_sema = _dispatch_get_thread_semaphore(); + for (;;) { + tmp = *vval; + if (tmp == DISPATCH_ONCE_DONE) { + break; + } + dispatch_atomic_store_barrier(); + if (dispatch_atomic_cmpxchg(vval, tmp, &dow)) { + dow.dow_next = tmp; + _dispatch_thread_semaphore_wait(dow.dow_sema); + } + } + _dispatch_put_thread_semaphore(dow.dow_sema); } } diff --git a/src/os_shims.h b/src/os_shims.h deleted file mode 100644 index 7efd28e75..000000000 --- a/src/os_shims.h +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Copyright (c) 2008-2009 Apple Inc. All rights reserved. - * - * @APPLE_APACHE_LICENSE_HEADER_START@ - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * @APPLE_APACHE_LICENSE_HEADER_END@ - */ - -/* - * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch - * which are subject to change in future releases of Mac OS X. Any applications - * relying on these interfaces WILL break. - */ - -#ifndef __DISPATCH_OS_SHIMS__ -#define __DISPATCH_OS_SHIMS__ - -#include -#include -#include - -__private_extern__ const char *__crashreporter_info__; - -static const unsigned long dispatch_queue_key = __PTK_LIBDISPATCH_KEY0; -static const unsigned long dispatch_sema4_key = __PTK_LIBDISPATCH_KEY1; -static const unsigned long dispatch_cache_key = __PTK_LIBDISPATCH_KEY2; -static const unsigned long dispatch_bcounter_key = __PTK_LIBDISPATCH_KEY3; -//__PTK_LIBDISPATCH_KEY4 -//__PTK_LIBDISPATCH_KEY5 - - -#define SIMULATE_5491082 1 -#ifndef _PTHREAD_TSD_OFFSET -#define _PTHREAD_TSD_OFFSET 0 -#endif - -static inline void -_dispatch_thread_setspecific(unsigned long k, void *v) -{ -#if defined(SIMULATE_5491082) && defined(__i386__) - asm("movl %1, %%gs:%0" : "=m" (*(void **)(k * sizeof(void *) + _PTHREAD_TSD_OFFSET)) : "ri" (v) : "memory"); -#elif defined(SIMULATE_5491082) && defined(__x86_64__) - asm("movq %1, %%gs:%0" : "=m" (*(void **)(k * sizeof(void *) + _PTHREAD_TSD_OFFSET)) : "rn" (v) : "memory"); -#else - int res; - if (_pthread_has_direct_tsd()) { - res = _pthread_setspecific_direct(k, v); - } else { - res = pthread_setspecific(k, v); - } - dispatch_assert_zero(res); -#endif -} - -static inline void * -_dispatch_thread_getspecific(unsigned long k) -{ -#if defined(SIMULATE_5491082) && (defined(__i386__) || defined(__x86_64__)) - void *rval; - asm("mov %%gs:%1, %0" : "=r" (rval) : "m" (*(void **)(k * sizeof(void *) + _PTHREAD_TSD_OFFSET))); - return rval; -#else - if (_pthread_has_direct_tsd()) { - return _pthread_getspecific_direct(k); - } else { - return pthread_getspecific(k); - } -#endif -} - -static inline void -_dispatch_thread_key_init_np(unsigned long k, void (*d)(void *)) -{ - dispatch_assert_zero(pthread_key_init_np((int)k, d)); -} - -#define _dispatch_thread_self pthread_self - - -#if DISPATCH_PERF_MON - -#if defined(SIMULATE_5491082) && (defined(__i386__) || defined(__x86_64__)) -#ifdef __LP64__ -#define _dispatch_workitem_inc() asm("incq %%gs:%0" : "+m" \ - (*(void **)(dispatch_bcounter_key * sizeof(void *) + _PTHREAD_TSD_OFFSET)) :: "cc") -#define _dispatch_workitem_dec() asm("decq %%gs:%0" : "+m" \ - (*(void **)(dispatch_bcounter_key * sizeof(void *) + _PTHREAD_TSD_OFFSET)) :: "cc") -#else -#define _dispatch_workitem_inc() asm("incl %%gs:%0" : "+m" \ - (*(void **)(dispatch_bcounter_key * sizeof(void *) + _PTHREAD_TSD_OFFSET)) :: "cc") -#define _dispatch_workitem_dec() asm("decl %%gs:%0" : "+m" \ - (*(void **)(dispatch_bcounter_key * sizeof(void *) + _PTHREAD_TSD_OFFSET)) :: "cc") -#endif -#else -static inline void -_dispatch_workitem_inc(void) -{ - unsigned long cnt = (unsigned long)_dispatch_thread_getspecific(dispatch_bcounter_key); - _dispatch_thread_setspecific(dispatch_bcounter_key, (void *)++cnt); -} -static inline void -_dispatch_workitem_dec(void) -{ - unsigned long cnt = (unsigned long)_dispatch_thread_getspecific(dispatch_bcounter_key); - _dispatch_thread_setspecific(dispatch_bcounter_key, (void *)--cnt); -} -#endif - -// C99 doesn't define flsll() or ffsll() -#ifdef __LP64__ -#define flsll(x) flsl(x) -#else -static inline unsigned int -flsll(uint64_t val) -{ - union { - struct { -#ifdef __BIG_ENDIAN__ - unsigned int hi, low; -#else - unsigned int low, hi; -#endif - } words; - uint64_t word; - } _bucket = { - .word = val, - }; - if (_bucket.words.hi) { - return fls(_bucket.words.hi) + 32; - } - return fls(_bucket.words.low); -} -#endif - -#else -#define _dispatch_workitem_inc() -#define _dispatch_workitem_dec() -#endif // DISPATCH_PERF_MON - -#endif diff --git a/src/protocol.defs b/src/protocol.defs index e6bd40044..bf5fe5bce 100644 --- a/src/protocol.defs +++ b/src/protocol.defs @@ -1,45 +1,48 @@ /* - * Copyright (c) 2009 Apple Inc. All rights reserved. + * Copyright (c) 2008-2011 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ - * + * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * + * * @APPLE_APACHE_LICENSE_HEADER_END@ */ -/* - * Copyright (c) 2008-2009 Apple Inc. All rights reserved. - */ #include #include -// '64' is used to align with Mach notifications and so that we don't fight with the notify symbols in Libsystem +// '64' is used to align with Mach notifications and so that we don't fight +// with the notify symbols in Libsystem subsystem libdispatch_internal_protocol 64; serverprefix _dispatch_; userprefix _dispatch_send_; -skip; /* was MACH_NOTIFY_FIRST: 64 */ +skip; /* was MACH_NOTIFY_FIRST: 64 */ /* MACH_NOTIFY_PORT_DELETED: 65 */ simpleroutine mach_notify_port_deleted( - _notify : mach_port_move_send_once_t; - _name : mach_port_name_t + _notify : mach_port_move_send_once_t; + _name : mach_port_name_t ); -skip; /* was MACH_NOTIFY_MSG_ACCEPTED: 66 */ +/* MACH_NOTIFY_SEND_POSSIBLE: 66 */ +simpleroutine +mach_notify_send_possible( + _notify : mach_port_move_send_once_t; + _name : mach_port_name_t +); skip; /* was NOTIFY_OWNERSHIP_RIGHTS: 67 */ @@ -48,28 +51,28 @@ skip; /* was NOTIFY_RECEIVE_RIGHTS: 68 */ /* MACH_NOTIFY_PORT_DESTROYED: 69 */ simpleroutine mach_notify_port_destroyed( - _notify : mach_port_move_send_once_t; - _rights : mach_port_move_receive_t + _notify : mach_port_move_send_once_t; + _rights : mach_port_move_receive_t ); /* MACH_NOTIFY_NO_SENDERS: 70 */ simpleroutine mach_notify_no_senders( - _notify : mach_port_move_send_once_t; - _mscnt : mach_port_mscount_t + _notify : mach_port_move_send_once_t; + _mscnt : mach_port_mscount_t ); /* MACH_NOTIFY_SEND_ONCE: 71 */ simpleroutine mach_notify_send_once( - _notify : mach_port_move_send_once_t + _notify : mach_port_move_send_once_t ); /* MACH_NOTIFY_DEAD_NAME: 72 */ simpleroutine mach_notify_dead_name( - _notify : mach_port_move_send_once_t; - _name : mach_port_name_t + _notify : mach_port_move_send_once_t; + _name : mach_port_name_t ); /* highly unlikely additional Mach notifications */ @@ -81,11 +84,11 @@ skip; simpleroutine wakeup_main_thread( - _port : mach_port_t; - WaitTime _waitTimeout : natural_t + _port : mach_port_t; + WaitTime _waitTimeout : natural_t ); simpleroutine consume_send_once_right( - _port : mach_port_move_send_once_t + _port : mach_port_move_send_once_t ); diff --git a/src/provider.d b/src/provider.d new file mode 100644 index 000000000..59fe790d7 --- /dev/null +++ b/src/provider.d @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2010 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +typedef struct dispatch_object_s *dispatch_object_t; +typedef struct dispatch_queue_s *dispatch_queue_t; +typedef void (*dispatch_function_t)(void *); + +provider dispatch { + probe queue__push(dispatch_queue_t queue, const char *label, + dispatch_object_t item, const char *kind, + dispatch_function_t function, void *context); + probe queue__pop(dispatch_queue_t queue, const char *label, + dispatch_object_t item, const char *kind, + dispatch_function_t function, void *context); + probe callout__entry(dispatch_queue_t queue, const char *label, + dispatch_function_t function, void *context); + probe callout__return(dispatch_queue_t queue, const char *label, + dispatch_function_t function, void *context); +}; + +#pragma D attributes Evolving/Evolving/Common provider dispatch provider +#pragma D attributes Private/Private/Common provider dispatch module +#pragma D attributes Private/Private/Common provider dispatch function +#pragma D attributes Evolving/Evolving/Common provider dispatch name +#pragma D attributes Evolving/Evolving/Common provider dispatch args diff --git a/src/queue.c b/src/queue.c index a3e89360f..595bac562 100644 --- a/src/queue.c +++ b/src/queue.c @@ -1,527 +1,538 @@ /* - * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * Copyright (c) 2008-2011 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ - * + * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * + * * @APPLE_APACHE_LICENSE_HEADER_END@ */ #include "internal.h" +#if HAVE_MACH #include "protocol.h" +#endif -void -dummy_function(void) -{ -} +#if (!HAVE_PTHREAD_WORKQUEUES || DISPATCH_DEBUG) && \ + !defined(DISPATCH_ENABLE_THREAD_POOL) +#define DISPATCH_ENABLE_THREAD_POOL 1 +#endif -long -dummy_function_r0(void) -{ - return 0; -} +static void _dispatch_cache_cleanup(void *value); +static void _dispatch_async_f_redirect(dispatch_queue_t dq, + dispatch_continuation_t dc); +static void _dispatch_queue_cleanup(void *ctxt); +static bool _dispatch_queue_wakeup_global(dispatch_queue_t dq); +static void _dispatch_queue_drain(dispatch_queue_t dq); +static inline _dispatch_thread_semaphore_t + _dispatch_queue_drain_one_barrier_sync(dispatch_queue_t dq); +static void _dispatch_worker_thread2(void *context); +#if DISPATCH_ENABLE_THREAD_POOL +static void *_dispatch_worker_thread(void *context); +static int _dispatch_pthread_sigmask(int how, sigset_t *set, sigset_t *oset); +#endif +static bool _dispatch_mgr_wakeup(dispatch_queue_t dq); +static dispatch_queue_t _dispatch_mgr_thread(dispatch_queue_t dq); -static bool _dispatch_select_workaround; -static fd_set _dispatch_rfds; -static fd_set _dispatch_wfds; -static void *_dispatch_rfd_ptrs[FD_SETSIZE]; -static void *_dispatch_wfd_ptrs[FD_SETSIZE]; +#if DISPATCH_COCOA_COMPAT +static unsigned int _dispatch_worker_threads; +static dispatch_once_t _dispatch_main_q_port_pred; +static mach_port_t main_q_port; + +static void _dispatch_main_q_port_init(void *ctxt); +static void _dispatch_queue_wakeup_main(void); +static void _dispatch_main_queue_drain(void); +#endif + +#pragma mark - +#pragma mark dispatch_queue_vtable + +const struct dispatch_queue_vtable_s _dispatch_queue_vtable = { + .do_type = DISPATCH_QUEUE_TYPE, + .do_kind = "queue", + .do_dispose = _dispatch_queue_dispose, + .do_invoke = NULL, + .do_probe = (void *)dummy_function_r0, + .do_debug = dispatch_queue_debug, +}; + +static const struct dispatch_queue_vtable_s _dispatch_queue_root_vtable = { + .do_type = DISPATCH_QUEUE_GLOBAL_TYPE, + .do_kind = "global-queue", + .do_debug = dispatch_queue_debug, + .do_probe = _dispatch_queue_wakeup_global, +}; +static const struct dispatch_queue_vtable_s _dispatch_queue_mgr_vtable = { + .do_type = DISPATCH_QUEUE_MGR_TYPE, + .do_kind = "mgr-queue", + .do_invoke = _dispatch_mgr_thread, + .do_debug = dispatch_queue_debug, + .do_probe = _dispatch_mgr_wakeup, +}; + +#pragma mark - +#pragma mark dispatch_root_queue + +#if HAVE_PTHREAD_WORKQUEUES +static const int _dispatch_root_queue_wq_priorities[] = { + [DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY] = WORKQ_LOW_PRIOQUEUE, + [DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY] = WORKQ_LOW_PRIOQUEUE, + [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY] = WORKQ_DEFAULT_PRIOQUEUE, + [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY] = + WORKQ_DEFAULT_PRIOQUEUE, + [DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY] = WORKQ_HIGH_PRIOQUEUE, + [DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY] = WORKQ_HIGH_PRIOQUEUE, + [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY] = WORKQ_BG_PRIOQUEUE, + [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY] = + WORKQ_BG_PRIOQUEUE, +}; +#endif +#if DISPATCH_ENABLE_THREAD_POOL static struct dispatch_semaphore_s _dispatch_thread_mediator[] = { - { + [DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY] = { .do_vtable = &_dispatch_semaphore_vtable, .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, }, - { + [DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY] = { .do_vtable = &_dispatch_semaphore_vtable, .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, }, - { + [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY] = { .do_vtable = &_dispatch_semaphore_vtable, .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, }, - { + [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY] = { .do_vtable = &_dispatch_semaphore_vtable, .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, }, - { + [DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY] = { .do_vtable = &_dispatch_semaphore_vtable, .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, }, - { + [DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY] = { + .do_vtable = &_dispatch_semaphore_vtable, + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + }, + [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY] = { + .do_vtable = &_dispatch_semaphore_vtable, + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + }, + [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY] = { .do_vtable = &_dispatch_semaphore_vtable, .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, }, }; - -static struct dispatch_queue_s _dispatch_root_queues[]; - -static inline dispatch_queue_t -_dispatch_get_root_queue(long priority, bool overcommit) -{ - if (overcommit) switch (priority) { - case DISPATCH_QUEUE_PRIORITY_LOW: - return &_dispatch_root_queues[1]; - case DISPATCH_QUEUE_PRIORITY_DEFAULT: - return &_dispatch_root_queues[3]; - case DISPATCH_QUEUE_PRIORITY_HIGH: - return &_dispatch_root_queues[5]; - } - switch (priority) { - case DISPATCH_QUEUE_PRIORITY_LOW: - return &_dispatch_root_queues[0]; - case DISPATCH_QUEUE_PRIORITY_DEFAULT: - return &_dispatch_root_queues[2]; - case DISPATCH_QUEUE_PRIORITY_HIGH: - return &_dispatch_root_queues[4]; - default: - return NULL; - } -} - -#ifdef __BLOCKS__ -dispatch_block_t -_dispatch_Block_copy(dispatch_block_t db) -{ - dispatch_block_t rval; - - while (!(rval = Block_copy(db))) { - sleep(1); - } - - return rval; -} -#define _dispatch_Block_copy(x) ((typeof(x))_dispatch_Block_copy(x)) - -void -_dispatch_call_block_and_release(void *block) -{ - void (^b)(void) = block; - b(); - Block_release(b); -} - -void -_dispatch_call_block_and_release2(void *block, void *ctxt) -{ - void (^b)(void*) = block; - b(ctxt); - Block_release(b); -} - -#endif /* __BLOCKS__ */ - -struct dispatch_queue_attr_vtable_s { - DISPATCH_VTABLE_HEADER(dispatch_queue_attr_s); -}; - -struct dispatch_queue_attr_s { - DISPATCH_STRUCT_HEADER(dispatch_queue_attr_s, dispatch_queue_attr_vtable_s); - - // Public: - int qa_priority; - void* finalizer_ctxt; - dispatch_queue_finalizer_function_t finalizer_func; - - // Private: - unsigned long qa_flags; -}; - -static int _dispatch_pthread_sigmask(int how, sigset_t *set, sigset_t *oset); - -#define _dispatch_queue_trylock(dq) dispatch_atomic_cmpxchg(&(dq)->dq_running, 0, 1) -static inline void _dispatch_queue_unlock(dispatch_queue_t dq); -static void _dispatch_queue_invoke(dispatch_queue_t dq); -static void _dispatch_queue_serial_drain_till_empty(dispatch_queue_t dq); -static bool _dispatch_queue_wakeup_global(dispatch_queue_t dq); -static struct dispatch_object_s *_dispatch_queue_concurrent_drain_one(dispatch_queue_t dq); - -static bool _dispatch_program_is_probably_callback_driven; - -#if DISPATCH_COCOA_COMPAT -void (*dispatch_begin_thread_4GC)(void) = dummy_function; -void (*dispatch_end_thread_4GC)(void) = dummy_function; -void *(*_dispatch_begin_NSAutoReleasePool)(void) = (void *)dummy_function; -void (*_dispatch_end_NSAutoReleasePool)(void *) = (void *)dummy_function; -static void _dispatch_queue_wakeup_main(void); - -static dispatch_once_t _dispatch_main_q_port_pred; -static bool main_q_is_draining; -static mach_port_t main_q_port; #endif -static void _dispatch_cache_cleanup2(void *value); -static void _dispatch_force_cache_cleanup(void); - -static const struct dispatch_queue_vtable_s _dispatch_queue_vtable = { - .do_type = DISPATCH_QUEUE_TYPE, - .do_kind = "queue", - .do_dispose = _dispatch_queue_dispose, - .do_invoke = (void *)dummy_function_r0, - .do_probe = (void *)dummy_function_r0, - .do_debug = dispatch_queue_debug, -}; - -static const struct dispatch_queue_vtable_s _dispatch_queue_root_vtable = { - .do_type = DISPATCH_QUEUE_GLOBAL_TYPE, - .do_kind = "global-queue", - .do_debug = dispatch_queue_debug, - .do_probe = _dispatch_queue_wakeup_global, -}; - #define MAX_THREAD_COUNT 255 struct dispatch_root_queue_context_s { +#if HAVE_PTHREAD_WORKQUEUES pthread_workqueue_t dgq_kworkqueue; +#endif uint32_t dgq_pending; +#if DISPATCH_ENABLE_THREAD_POOL uint32_t dgq_thread_pool_size; dispatch_semaphore_t dgq_thread_mediator; +#endif }; -#define DISPATCH_ROOT_QUEUE_COUNT (DISPATCH_QUEUE_PRIORITY_COUNT * 2) static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { - { - .dgq_thread_mediator = &_dispatch_thread_mediator[0], + [DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY] = { +#if DISPATCH_ENABLE_THREAD_POOL + .dgq_thread_mediator = &_dispatch_thread_mediator[ + DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY], .dgq_thread_pool_size = MAX_THREAD_COUNT, +#endif }, - { - .dgq_thread_mediator = &_dispatch_thread_mediator[1], + [DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY] = { +#if DISPATCH_ENABLE_THREAD_POOL + .dgq_thread_mediator = &_dispatch_thread_mediator[ + DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY], .dgq_thread_pool_size = MAX_THREAD_COUNT, +#endif }, - { - .dgq_thread_mediator = &_dispatch_thread_mediator[2], + [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY] = { +#if DISPATCH_ENABLE_THREAD_POOL + .dgq_thread_mediator = &_dispatch_thread_mediator[ + DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY], .dgq_thread_pool_size = MAX_THREAD_COUNT, +#endif }, - { - .dgq_thread_mediator = &_dispatch_thread_mediator[3], + [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY] = { +#if DISPATCH_ENABLE_THREAD_POOL + .dgq_thread_mediator = &_dispatch_thread_mediator[ + DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY], .dgq_thread_pool_size = MAX_THREAD_COUNT, +#endif }, - { - .dgq_thread_mediator = &_dispatch_thread_mediator[4], + [DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY] = { +#if DISPATCH_ENABLE_THREAD_POOL + .dgq_thread_mediator = &_dispatch_thread_mediator[ + DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY], .dgq_thread_pool_size = MAX_THREAD_COUNT, +#endif }, - { - .dgq_thread_mediator = &_dispatch_thread_mediator[5], + [DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY] = { +#if DISPATCH_ENABLE_THREAD_POOL + .dgq_thread_mediator = &_dispatch_thread_mediator[ + DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY], + .dgq_thread_pool_size = MAX_THREAD_COUNT, +#endif + }, + [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY] = { +#if DISPATCH_ENABLE_THREAD_POOL + .dgq_thread_mediator = &_dispatch_thread_mediator[ + DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY], + .dgq_thread_pool_size = MAX_THREAD_COUNT, +#endif + }, + [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY] = { +#if DISPATCH_ENABLE_THREAD_POOL + .dgq_thread_mediator = &_dispatch_thread_mediator[ + DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY], .dgq_thread_pool_size = MAX_THREAD_COUNT, +#endif }, }; -// 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol +// 6618342 Contact the team that owns the Instrument DTrace probe before +// renaming this symbol // dq_running is set to 2 so that barrier operations go through the slow path -static struct dispatch_queue_s _dispatch_root_queues[] = { - { +DISPATCH_CACHELINE_ALIGN +struct dispatch_queue_s _dispatch_root_queues[] = { + [DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY] = { .do_vtable = &_dispatch_queue_root_vtable, .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, - .do_ctxt = &_dispatch_root_queue_contexts[0], + .do_ctxt = &_dispatch_root_queue_contexts[ + DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY], .dq_label = "com.apple.root.low-priority", .dq_running = 2, .dq_width = UINT32_MAX, .dq_serialnum = 4, }, - { + [DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY] = { .do_vtable = &_dispatch_queue_root_vtable, .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, - .do_ctxt = &_dispatch_root_queue_contexts[1], + .do_ctxt = &_dispatch_root_queue_contexts[ + DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY], .dq_label = "com.apple.root.low-overcommit-priority", .dq_running = 2, .dq_width = UINT32_MAX, .dq_serialnum = 5, }, - { + [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY] = { .do_vtable = &_dispatch_queue_root_vtable, .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, - .do_ctxt = &_dispatch_root_queue_contexts[2], + .do_ctxt = &_dispatch_root_queue_contexts[ + DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY], .dq_label = "com.apple.root.default-priority", .dq_running = 2, .dq_width = UINT32_MAX, .dq_serialnum = 6, }, - { + [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY] = { .do_vtable = &_dispatch_queue_root_vtable, .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, - .do_ctxt = &_dispatch_root_queue_contexts[3], + .do_ctxt = &_dispatch_root_queue_contexts[ + DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY], .dq_label = "com.apple.root.default-overcommit-priority", .dq_running = 2, .dq_width = UINT32_MAX, .dq_serialnum = 7, }, - { + [DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY] = { .do_vtable = &_dispatch_queue_root_vtable, .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, - .do_ctxt = &_dispatch_root_queue_contexts[4], + .do_ctxt = &_dispatch_root_queue_contexts[ + DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY], .dq_label = "com.apple.root.high-priority", .dq_running = 2, .dq_width = UINT32_MAX, .dq_serialnum = 8, }, - { + [DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY] = { .do_vtable = &_dispatch_queue_root_vtable, .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, - .do_ctxt = &_dispatch_root_queue_contexts[5], + .do_ctxt = &_dispatch_root_queue_contexts[ + DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY], .dq_label = "com.apple.root.high-overcommit-priority", .dq_running = 2, .dq_width = UINT32_MAX, .dq_serialnum = 9, }, + [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY] = { + .do_vtable = &_dispatch_queue_root_vtable, + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, + .do_ctxt = &_dispatch_root_queue_contexts[ + DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY], + + .dq_label = "com.apple.root.background-priority", + .dq_running = 2, + .dq_width = UINT32_MAX, + .dq_serialnum = 10, + }, + [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY] = { + .do_vtable = &_dispatch_queue_root_vtable, + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, + .do_ctxt = &_dispatch_root_queue_contexts[ + DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY], + + .dq_label = "com.apple.root.background-overcommit-priority", + .dq_running = 2, + .dq_width = UINT32_MAX, + .dq_serialnum = 11, + }, }; -// 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol -struct dispatch_queue_s _dispatch_main_q = { - .do_vtable = &_dispatch_queue_vtable, +// 6618342 Contact the team that owns the Instrument DTrace probe before +// renaming this symbol +DISPATCH_CACHELINE_ALIGN +struct dispatch_queue_s _dispatch_mgr_q = { + .do_vtable = &_dispatch_queue_mgr_vtable, .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, - .do_targetq = &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_COUNT / 2], + .do_targetq = &_dispatch_root_queues[ + DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY], - .dq_label = "com.apple.main-thread", - .dq_running = 1, + .dq_label = "com.apple.libdispatch-manager", .dq_width = 1, - .dq_serialnum = 1, + .dq_serialnum = 2, }; -#if DISPATCH_PERF_MON -static OSSpinLock _dispatch_stats_lock; -static size_t _dispatch_bad_ratio; -static struct { - uint64_t time_total; - uint64_t count_total; - uint64_t thread_total; -} _dispatch_stats[65]; // ffs*/fls*() returns zero when no bits are set -static void _dispatch_queue_merge_stats(uint64_t start); -#endif +dispatch_queue_t +dispatch_get_global_queue(long priority, unsigned long flags) +{ + if (flags & ~DISPATCH_QUEUE_OVERCOMMIT) { + return NULL; + } + return _dispatch_get_root_queue(priority, + flags & DISPATCH_QUEUE_OVERCOMMIT); +} -static void *_dispatch_worker_thread(void *context); -static void _dispatch_worker_thread2(void *context); +dispatch_queue_t +dispatch_get_current_queue(void) +{ + return _dispatch_queue_get_current() ?: _dispatch_get_root_queue(0, true); +} -malloc_zone_t *_dispatch_ccache_zone; +#pragma mark - +#pragma mark dispatch_init -static inline void -_dispatch_continuation_free(dispatch_continuation_t dc) +static void +_dispatch_hw_config_init(void) { - dispatch_continuation_t prev_dc = _dispatch_thread_getspecific(dispatch_cache_key); - dc->do_next = prev_dc; - _dispatch_thread_setspecific(dispatch_cache_key, dc); + _dispatch_hw_config.cc_max_active = _dispatch_get_activecpu(); + _dispatch_hw_config.cc_max_logical = _dispatch_get_logicalcpu_max(); + _dispatch_hw_config.cc_max_physical = _dispatch_get_physicalcpu_max(); } -static inline void -_dispatch_continuation_pop(dispatch_object_t dou) +static inline bool +_dispatch_root_queues_init_workq(void) { - dispatch_continuation_t dc = dou._dc; - dispatch_group_t dg; + bool result = false; +#if HAVE_PTHREAD_WORKQUEUES +#if DISPATCH_ENABLE_THREAD_POOL + if (slowpath(getenv("LIBDISPATCH_DISABLE_KWQ"))) return result; +#endif + int i, r; + pthread_workqueue_attr_t pwq_attr; + r = pthread_workqueue_attr_init_np(&pwq_attr); + (void)dispatch_assume_zero(r); + for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { + pthread_workqueue_t pwq = NULL; + const int prio = _dispatch_root_queue_wq_priorities[i]; - if (DISPATCH_OBJ_IS_VTABLE(dou._do)) { - return _dispatch_queue_invoke(dou._dq); + r = pthread_workqueue_attr_setqueuepriority_np(&pwq_attr, prio); + (void)dispatch_assume_zero(r); + r = pthread_workqueue_attr_setovercommit_np(&pwq_attr, i & 1); + (void)dispatch_assume_zero(r); + r = pthread_workqueue_create_np(&pwq, &pwq_attr); + (void)dispatch_assume_zero(r); + result = result || dispatch_assume(pwq); + _dispatch_root_queue_contexts[i].dgq_kworkqueue = pwq; } + r = pthread_workqueue_attr_destroy_np(&pwq_attr); + (void)dispatch_assume_zero(r); +#endif // HAVE_PTHREAD_WORKQUEUES + return result; +} - // Add the item back to the cache before calling the function. This - // allows the 'hot' continuation to be used for a quick callback. - // - // The ccache version is per-thread. - // Therefore, the object has not been reused yet. - // This generates better assembly. - if ((long)dou._do->do_vtable & DISPATCH_OBJ_ASYNC_BIT) { - _dispatch_continuation_free(dc); - } - if ((long)dou._do->do_vtable & DISPATCH_OBJ_GROUP_BIT) { - dg = dc->dc_group; - } else { - dg = NULL; +static inline void +_dispatch_root_queues_init_thread_pool(void) +{ +#if DISPATCH_ENABLE_THREAD_POOL + int i; + for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { +#if TARGET_OS_EMBEDDED + // some software hangs if the non-overcommitting queues do not + // overcommit when threads block. Someday, this behavior should apply + // to all platforms + if (!(i & 1)) { + _dispatch_root_queue_contexts[i].dgq_thread_pool_size = + _dispatch_hw_config.cc_max_active; + } +#endif +#if USE_MACH_SEM + // override the default FIFO behavior for the pool semaphores + kern_return_t kr = semaphore_create(mach_task_self(), + &_dispatch_thread_mediator[i].dsema_port, SYNC_POLICY_LIFO, 0); + DISPATCH_VERIFY_MIG(kr); + (void)dispatch_assume_zero(kr); + (void)dispatch_assume(_dispatch_thread_mediator[i].dsema_port); +#elif USE_POSIX_SEM + /* XXXRW: POSIX semaphores don't support LIFO? */ + int ret = sem_init(&_dispatch_thread_mediator[i].dsema_sem, 0, 0); + (void)dispatch_assume_zero(ret); +#endif } - dc->dc_func(dc->dc_ctxt); - if (dg) { - dispatch_group_leave(dg); - _dispatch_release(dg); +#else + DISPATCH_CRASH("Thread pool creation failed"); +#endif // DISPATCH_ENABLE_THREAD_POOL +} + +static void +_dispatch_root_queues_init(void *context DISPATCH_UNUSED) +{ + if (!_dispatch_root_queues_init_workq()) { + _dispatch_root_queues_init_thread_pool(); } + } -struct dispatch_object_s * -_dispatch_queue_concurrent_drain_one(dispatch_queue_t dq) +#define countof(x) (sizeof(x) / sizeof(x[0])) + +DISPATCH_EXPORT DISPATCH_NOTHROW +void +libdispatch_init(void) { - struct dispatch_object_s *head, *next, *const mediator = (void *)~0ul; + dispatch_assert(DISPATCH_QUEUE_PRIORITY_COUNT == 4); + dispatch_assert(DISPATCH_ROOT_QUEUE_COUNT == 8); + + dispatch_assert(DISPATCH_QUEUE_PRIORITY_LOW == + -DISPATCH_QUEUE_PRIORITY_HIGH); + dispatch_assert(countof(_dispatch_root_queues) == + DISPATCH_ROOT_QUEUE_COUNT); + dispatch_assert(countof(_dispatch_root_queue_contexts) == + DISPATCH_ROOT_QUEUE_COUNT); +#if HAVE_PTHREAD_WORKQUEUES + dispatch_assert(countof(_dispatch_root_queue_wq_priorities) == + DISPATCH_ROOT_QUEUE_COUNT); +#endif +#if DISPATCH_ENABLE_THREAD_POOL + dispatch_assert(countof(_dispatch_thread_mediator) == + DISPATCH_ROOT_QUEUE_COUNT); +#endif + dispatch_assert(sizeof(struct dispatch_source_s) == + sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_CACHELINE_PAD); +#if DISPATCH_DEBUG + dispatch_assert(sizeof(struct dispatch_queue_s) % DISPATCH_CACHELINE_SIZE + == 0); +#endif - // The mediator value acts both as a "lock" and a signal - head = dispatch_atomic_xchg(&dq->dq_items_head, mediator); + _dispatch_thread_key_create(&dispatch_queue_key, _dispatch_queue_cleanup); + _dispatch_thread_key_create(&dispatch_sema4_key, + (void (*)(void *))_dispatch_thread_semaphore_dispose); + _dispatch_thread_key_create(&dispatch_cache_key, _dispatch_cache_cleanup); + _dispatch_thread_key_create(&dispatch_io_key, NULL); + _dispatch_thread_key_create(&dispatch_apply_key, NULL); +#if DISPATCH_PERF_MON + _dispatch_thread_key_create(&dispatch_bcounter_key, NULL); +#endif - if (slowpath(head == NULL)) { - // The first xchg on the tail will tell the enqueueing thread that it - // is safe to blindly write out to the head pointer. A cmpxchg honors - // the algorithm. - dispatch_atomic_cmpxchg(&dq->dq_items_head, mediator, NULL); - _dispatch_debug("no work on global work queue"); - return NULL; - } +#if DISPATCH_USE_RESOLVERS // rdar://problem/8541707 + _dispatch_main_q.do_vtable = &_dispatch_queue_vtable; + _dispatch_main_q.do_targetq = &_dispatch_root_queues[ + DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY]; + _dispatch_data_empty.do_vtable = &_dispatch_data_vtable; +#endif - if (slowpath(head == mediator)) { - // This thread lost the race for ownership of the queue. - // - // The ratio of work to libdispatch overhead must be bad. This - // scenario implies that there are too many threads in the pool. - // Create a new pending thread and then exit this thread. - // The kernel will grant a new thread when the load subsides. - _dispatch_debug("Contention on queue: %p", dq); - _dispatch_queue_wakeup_global(dq); -#if DISPATCH_PERF_MON - dispatch_atomic_inc(&_dispatch_bad_ratio); -#endif - return NULL; - } - - // Restore the head pointer to a sane value before returning. - // If 'next' is NULL, then this item _might_ be the last item. - next = fastpath(head->do_next); - - if (slowpath(!next)) { - dq->dq_items_head = NULL; - - if (dispatch_atomic_cmpxchg(&dq->dq_items_tail, head, NULL)) { - // both head and tail are NULL now - goto out; - } - - // There must be a next item now. This thread won't wait long. - while (!(next = head->do_next)) { - _dispatch_hardware_pause(); - } - } - - dq->dq_items_head = next; - _dispatch_queue_wakeup_global(dq); -out: - return head; -} - -dispatch_queue_t -dispatch_get_current_queue(void) -{ - return _dispatch_queue_get_current() ?: _dispatch_get_root_queue(0, true); -} - -#undef dispatch_get_main_queue -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_NA) -dispatch_queue_t dispatch_get_main_queue(void); - -dispatch_queue_t -dispatch_get_main_queue(void) -{ - return &_dispatch_main_q; -} -#define dispatch_get_main_queue() (&_dispatch_main_q) - -struct _dispatch_hw_config_s _dispatch_hw_config; - -static void -_dispatch_queue_set_width_init(void) -{ - size_t valsz = sizeof(uint32_t); - - errno = 0; - sysctlbyname("hw.activecpu", &_dispatch_hw_config.cc_max_active, &valsz, NULL, 0); - dispatch_assume_zero(errno); - dispatch_assume(valsz == sizeof(uint32_t)); + _dispatch_thread_setspecific(dispatch_queue_key, &_dispatch_main_q); - errno = 0; - sysctlbyname("hw.logicalcpu_max", &_dispatch_hw_config.cc_max_logical, &valsz, NULL, 0); - dispatch_assume_zero(errno); - dispatch_assume(valsz == sizeof(uint32_t)); +#if DISPATCH_USE_PTHREAD_ATFORK + (void)dispatch_assume_zero(pthread_atfork(dispatch_atfork_prepare, + dispatch_atfork_parent, dispatch_atfork_child)); +#endif - errno = 0; - sysctlbyname("hw.physicalcpu_max", &_dispatch_hw_config.cc_max_physical, &valsz, NULL, 0); - dispatch_assume_zero(errno); - dispatch_assume(valsz == sizeof(uint32_t)); + _dispatch_hw_config_init(); } +DISPATCH_EXPORT DISPATCH_NOTHROW void -dispatch_queue_set_width(dispatch_queue_t dq, long width) +dispatch_atfork_child(void) { - int w = (int)width; // intentional truncation - uint32_t tmp; + void *crash = (void *)0x100; + size_t i; - if (slowpath(dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) { - return; - } - if (w == 1 || w == 0) { - dq->dq_width = 1; + if (_dispatch_safe_fork) { return; } - if (w > 0) { - tmp = w; - } else switch (w) { - case DISPATCH_QUEUE_WIDTH_MAX_PHYSICAL_CPUS: - tmp = _dispatch_hw_config.cc_max_physical; - break; - case DISPATCH_QUEUE_WIDTH_ACTIVE_CPUS: - tmp = _dispatch_hw_config.cc_max_active; - break; - default: - // fall through - case DISPATCH_QUEUE_WIDTH_MAX_LOGICAL_CPUS: - tmp = _dispatch_hw_config.cc_max_logical; - break; - } - // multiply by two since the running count is inc/dec by two (the low bit == barrier) - dq->dq_width = tmp * 2; - // XXX if the queue has items and the width is increased, we should try to wake the queue + _dispatch_main_q.dq_items_head = crash; + _dispatch_main_q.dq_items_tail = crash; + + _dispatch_mgr_q.dq_items_head = crash; + _dispatch_mgr_q.dq_items_tail = crash; + + for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { + _dispatch_root_queues[i].dq_items_head = crash; + _dispatch_root_queues[i].dq_items_tail = crash; + } } +#pragma mark - +#pragma mark dispatch_queue_t + // skip zero // 1 - main_q // 2 - mgr_q // 3 - _unused_ -// 4,5,6,7,8,9 - global queues +// 4,5,6,7,8,9,10,11 - global queues // we use 'xadd' on Intel, so the initial value == next assigned -static unsigned long _dispatch_queue_serial_numbers = 10; - -// Note to later developers: ensure that any initialization changes are -// made for statically allocated queues (i.e. _dispatch_main_q). -inline void -_dispatch_queue_init(dispatch_queue_t dq) -{ - dq->do_vtable = &_dispatch_queue_vtable; - dq->do_next = DISPATCH_OBJECT_LISTLESS; - dq->do_ref_cnt = 1; - dq->do_xref_cnt = 1; - dq->do_targetq = _dispatch_get_root_queue(0, true); - dq->dq_running = 0; - dq->dq_width = 1; - dq->dq_serialnum = dispatch_atomic_inc(&_dispatch_queue_serial_numbers) - 1; -} +unsigned long _dispatch_queue_serial_numbers = 12; dispatch_queue_t dispatch_queue_create(const char *label, dispatch_queue_attr_t attr) @@ -539,7 +550,9 @@ dispatch_queue_create(const char *label, dispatch_queue_attr_t attr) } // XXX switch to malloc() - dq = calloc(1ul, sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_MIN_LABEL_SIZE + label_len + 1); + dq = calloc(1ul, sizeof(struct dispatch_queue_s) - + DISPATCH_QUEUE_MIN_LABEL_SIZE - DISPATCH_QUEUE_CACHELINE_PAD + + label_len + 1); if (slowpath(!dq)) { return dq; } @@ -547,31 +560,20 @@ dispatch_queue_create(const char *label, dispatch_queue_attr_t attr) _dispatch_queue_init(dq); strcpy(dq->dq_label, label); -#ifndef DISPATCH_NO_LEGACY - if (slowpath(attr)) { - dq->do_targetq = _dispatch_get_root_queue(attr->qa_priority, attr->qa_flags & DISPATCH_QUEUE_OVERCOMMIT); - dq->dq_finalizer_ctxt = attr->finalizer_ctxt; - dq->dq_finalizer_func = attr->finalizer_func; -#ifdef __BLOCKS__ - if (attr->finalizer_func == (void*)_dispatch_call_block_and_release2) { - // if finalizer_ctxt is a Block, retain it. - dq->dq_finalizer_ctxt = Block_copy(dq->dq_finalizer_ctxt); - if (!(dq->dq_finalizer_ctxt)) { - goto out_bad; - } - } -#endif + if (fastpath(!attr)) { + return dq; + } + if (fastpath(attr == DISPATCH_QUEUE_CONCURRENT)) { + dq->dq_width = UINT32_MAX; + dq->do_targetq = _dispatch_get_root_queue(0, false); + } else { + dispatch_debug_assert(!attr, "Invalid attribute"); } -#endif - return dq; - -out_bad: - free(dq); - return NULL; } -// 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol +// 6618342 Contact the team that owns the Instrument DTrace probe before +// renaming this symbol void _dispatch_queue_dispose(dispatch_queue_t dq) { @@ -582,409 +584,1247 @@ _dispatch_queue_dispose(dispatch_queue_t dq) DISPATCH_CRASH("Release of a queue while items are enqueued"); } -#ifndef DISPATCH_NO_LEGACY - if (dq->dq_finalizer_func) { - dq->dq_finalizer_func(dq->dq_finalizer_ctxt, dq); - } -#endif - // trash the tail queue so that use after free will crash dq->dq_items_tail = (void *)0x200; + dispatch_queue_t dqsq = dispatch_atomic_xchg2o(dq, dq_specific_q, + (void *)0x200); + if (dqsq) { + _dispatch_release(dqsq); + } + _dispatch_dispose(dq); } -DISPATCH_NOINLINE -static void -_dispatch_barrier_async_f_slow(dispatch_queue_t dq, void *context, dispatch_function_t func) +const char * +dispatch_queue_get_label(dispatch_queue_t dq) { - dispatch_continuation_t dc = fastpath(_dispatch_continuation_alloc_from_heap()); - - dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_BARRIER_BIT); - dc->dc_func = func; - dc->dc_ctxt = context; - - _dispatch_queue_push(dq, dc); + return dq->dq_label; } -#ifdef __BLOCKS__ -void -dispatch_barrier_async(dispatch_queue_t dq, void (^work)(void)) +static void +_dispatch_queue_set_width2(void *ctxt) { - dispatch_barrier_async_f(dq, _dispatch_Block_copy(work), _dispatch_call_block_and_release); + int w = (int)(intptr_t)ctxt; // intentional truncation + uint32_t tmp; + dispatch_queue_t dq = _dispatch_queue_get_current(); + + if (w == 1 || w == 0) { + dq->dq_width = 1; + return; + } + if (w > 0) { + tmp = w; + } else switch (w) { + case DISPATCH_QUEUE_WIDTH_MAX_PHYSICAL_CPUS: + tmp = _dispatch_hw_config.cc_max_physical; + break; + case DISPATCH_QUEUE_WIDTH_ACTIVE_CPUS: + tmp = _dispatch_hw_config.cc_max_active; + break; + default: + // fall through + case DISPATCH_QUEUE_WIDTH_MAX_LOGICAL_CPUS: + tmp = _dispatch_hw_config.cc_max_logical; + break; + } + // multiply by two since the running count is inc/dec by two + // (the low bit == barrier) + dq->dq_width = tmp * 2; } -#endif -DISPATCH_NOINLINE void -dispatch_barrier_async_f(dispatch_queue_t dq, void *context, dispatch_function_t func) +dispatch_queue_set_width(dispatch_queue_t dq, long width) { - dispatch_continuation_t dc = fastpath(_dispatch_continuation_alloc_cacheonly()); - - if (!dc) { - return _dispatch_barrier_async_f_slow(dq, context, func); + if (slowpath(dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) { + return; } - - dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_BARRIER_BIT); - dc->dc_func = func; - dc->dc_ctxt = context; - - _dispatch_queue_push(dq, dc); + dispatch_barrier_async_f(dq, (void*)(intptr_t)width, + _dispatch_queue_set_width2); } -DISPATCH_NOINLINE +// 6618342 Contact the team that owns the Instrument DTrace probe before +// renaming this symbol static void -_dispatch_async_f_slow(dispatch_queue_t dq, void *context, dispatch_function_t func) +_dispatch_set_target_queue2(void *ctxt) { - dispatch_continuation_t dc = fastpath(_dispatch_continuation_alloc_from_heap()); - - dc->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT; - dc->dc_func = func; - dc->dc_ctxt = context; + dispatch_queue_t prev_dq, dq = _dispatch_queue_get_current(); - _dispatch_queue_push(dq, dc); + prev_dq = dq->do_targetq; + dq->do_targetq = ctxt; + _dispatch_release(prev_dq); } -#ifdef __BLOCKS__ void -dispatch_async(dispatch_queue_t dq, void (^work)(void)) +dispatch_set_target_queue(dispatch_object_t dou, dispatch_queue_t dq) { - dispatch_async_f(dq, _dispatch_Block_copy(work), _dispatch_call_block_and_release); + dispatch_queue_t prev_dq; + unsigned long type; + + if (slowpath(dou._do->do_xref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) { + return; + } + type = dx_type(dou._do) & _DISPATCH_META_TYPE_MASK; + if (slowpath(!dq)) { + bool is_concurrent_q = (type == _DISPATCH_QUEUE_TYPE && + slowpath(dou._dq->dq_width > 1)); + dq = _dispatch_get_root_queue(0, !is_concurrent_q); + } + // TODO: put into the vtable + switch(type) { + case _DISPATCH_QUEUE_TYPE: + case _DISPATCH_SOURCE_TYPE: + _dispatch_retain(dq); + return dispatch_barrier_async_f(dou._dq, dq, + _dispatch_set_target_queue2); + case _DISPATCH_IO_TYPE: + return _dispatch_io_set_target_queue(dou._dchannel, dq); + default: + _dispatch_retain(dq); + dispatch_atomic_store_barrier(); + prev_dq = dispatch_atomic_xchg2o(dou._do, do_targetq, dq); + if (prev_dq) _dispatch_release(prev_dq); + return; + } } -#endif -DISPATCH_NOINLINE void -dispatch_async_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) +dispatch_set_current_target_queue(dispatch_queue_t dq) { - dispatch_continuation_t dc = fastpath(_dispatch_continuation_alloc_cacheonly()); + dispatch_queue_t queue = _dispatch_queue_get_current(); - // unlike dispatch_sync_f(), we do NOT need to check the queue width, - // the "drain" function will do this test - - if (!dc) { - return _dispatch_async_f_slow(dq, ctxt, func); + if (slowpath(!queue)) { + DISPATCH_CLIENT_CRASH("SPI not called from a queue"); + } + if (slowpath(queue->do_xref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) { + DISPATCH_CLIENT_CRASH("SPI not supported on this queue"); } + if (slowpath(queue->dq_width != 1)) { + DISPATCH_CLIENT_CRASH("SPI not called from a serial queue"); + } + if (slowpath(!dq)) { + dq = _dispatch_get_root_queue(0, true); + } + _dispatch_retain(dq); + _dispatch_set_target_queue2(dq); +} + +#pragma mark - +#pragma mark dispatch_queue_specific + +struct dispatch_queue_specific_queue_s { + DISPATCH_STRUCT_HEADER(dispatch_queue_specific_queue_s, + dispatch_queue_specific_queue_vtable_s); + DISPATCH_QUEUE_HEADER; + union { + char _dqsq_pad[DISPATCH_QUEUE_MIN_LABEL_SIZE]; + struct { + char dq_label[16]; + TAILQ_HEAD(dispatch_queue_specific_head_s, + dispatch_queue_specific_s) dqsq_contexts; + }; + }; +}; +DISPATCH_DECL(dispatch_queue_specific_queue); - dc->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT; - dc->dc_func = func; - dc->dc_ctxt = ctxt; +static void +_dispatch_queue_specific_queue_dispose(dispatch_queue_specific_queue_t dqsq); - _dispatch_queue_push(dq, dc); -} +struct dispatch_queue_specific_queue_vtable_s { + DISPATCH_VTABLE_HEADER(dispatch_queue_specific_queue_s); +}; -struct dispatch_barrier_sync_slow2_s { - dispatch_function_t dbss2_func; - dispatch_function_t dbss2_ctxt; - dispatch_semaphore_t dbss2_sema; +static const struct dispatch_queue_specific_queue_vtable_s + _dispatch_queue_specific_queue_vtable = { + .do_type = DISPATCH_QUEUE_SPECIFIC_TYPE, + .do_kind = "queue-context", + .do_dispose = _dispatch_queue_specific_queue_dispose, + .do_invoke = NULL, + .do_probe = (void *)dummy_function_r0, + .do_debug = (void *)dispatch_queue_debug, }; +struct dispatch_queue_specific_s { + const void *dqs_key; + void *dqs_ctxt; + dispatch_function_t dqs_destructor; + TAILQ_ENTRY(dispatch_queue_specific_s) dqs_list; +}; +DISPATCH_DECL(dispatch_queue_specific); + static void -_dispatch_barrier_sync_f_slow_invoke(void *ctxt) +_dispatch_queue_specific_queue_dispose(dispatch_queue_specific_queue_t dqsq) { - struct dispatch_barrier_sync_slow2_s *dbss2 = ctxt; + dispatch_queue_specific_t dqs, tmp; - dbss2->dbss2_func(dbss2->dbss2_ctxt); - dispatch_semaphore_signal(dbss2->dbss2_sema); + TAILQ_FOREACH_SAFE(dqs, &dqsq->dqsq_contexts, dqs_list, tmp) { + if (dqs->dqs_destructor) { + dispatch_async_f(_dispatch_get_root_queue( + DISPATCH_QUEUE_PRIORITY_DEFAULT, false), dqs->dqs_ctxt, + dqs->dqs_destructor); + } + free(dqs); + } + _dispatch_queue_dispose((dispatch_queue_t)dqsq); } -DISPATCH_NOINLINE static void -_dispatch_barrier_sync_f_slow(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) +_dispatch_queue_init_specific(dispatch_queue_t dq) { - struct dispatch_barrier_sync_slow2_s dbss2 = { - .dbss2_func = func, - .dbss2_ctxt = ctxt, - .dbss2_sema = _dispatch_get_thread_semaphore(), - }; - struct dispatch_barrier_sync_slow_s { - DISPATCH_CONTINUATION_HEADER(dispatch_barrier_sync_slow_s); - } dbss = { - .do_vtable = (void *)DISPATCH_OBJ_BARRIER_BIT, - .dc_func = _dispatch_barrier_sync_f_slow_invoke, - .dc_ctxt = &dbss2, - }; - - _dispatch_queue_push(dq, (void *)&dbss); + dispatch_queue_specific_queue_t dqsq; - while (dispatch_semaphore_wait(dbss2.dbss2_sema, dispatch_time(0, 3ull * NSEC_PER_SEC))) { - if (DISPATCH_OBJECT_SUSPENDED(dq)) { - continue; - } - if (_dispatch_queue_trylock(dq)) { - _dispatch_queue_drain(dq); - _dispatch_queue_unlock(dq); - } + dqsq = calloc(1ul, sizeof(struct dispatch_queue_specific_queue_s)); + _dispatch_queue_init((dispatch_queue_t)dqsq); + dqsq->do_vtable = &_dispatch_queue_specific_queue_vtable; + dqsq->do_xref_cnt = 0; + dqsq->do_targetq = _dispatch_get_root_queue(DISPATCH_QUEUE_PRIORITY_HIGH, + true); + dqsq->dq_width = UINT32_MAX; + strlcpy(dqsq->dq_label, "queue-specific", sizeof(dqsq->dq_label)); + TAILQ_INIT(&dqsq->dqsq_contexts); + dispatch_atomic_store_barrier(); + if (slowpath(!dispatch_atomic_cmpxchg2o(dq, dq_specific_q, NULL, dqsq))) { + _dispatch_release((dispatch_queue_t)dqsq); } - _dispatch_put_thread_semaphore(dbss2.dbss2_sema); } -#ifdef __BLOCKS__ -void -dispatch_barrier_sync(dispatch_queue_t dq, void (^work)(void)) -{ - struct Block_basic *bb = (void *)work; - - dispatch_barrier_sync_f(dq, work, (dispatch_function_t)bb->Block_invoke); +static void +_dispatch_queue_set_specific(void *ctxt) +{ + dispatch_queue_specific_t dqs, dqsn = ctxt; + dispatch_queue_specific_queue_t dqsq = + (dispatch_queue_specific_queue_t)_dispatch_queue_get_current(); + + TAILQ_FOREACH(dqs, &dqsq->dqsq_contexts, dqs_list) { + if (dqs->dqs_key == dqsn->dqs_key) { + // Destroy previous context for existing key + if (dqs->dqs_destructor) { + dispatch_async_f(_dispatch_get_root_queue( + DISPATCH_QUEUE_PRIORITY_DEFAULT, false), dqs->dqs_ctxt, + dqs->dqs_destructor); + } + if (dqsn->dqs_ctxt) { + // Copy new context for existing key + dqs->dqs_ctxt = dqsn->dqs_ctxt; + dqs->dqs_destructor = dqsn->dqs_destructor; + } else { + // Remove context storage for existing key + TAILQ_REMOVE(&dqsq->dqsq_contexts, dqs, dqs_list); + free(dqs); + } + return free(dqsn); + } + } + // Insert context storage for new key + TAILQ_INSERT_TAIL(&dqsq->dqsq_contexts, dqsn, dqs_list); } -#endif DISPATCH_NOINLINE void -dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) +dispatch_queue_set_specific(dispatch_queue_t dq, const void *key, + void *ctxt, dispatch_function_t destructor) { - dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key); - - // 1) ensure that this thread hasn't enqueued anything ahead of this call - // 2) the queue is not suspended - // 3) the queue is not weird - if (slowpath(dq->dq_items_tail) - || slowpath(DISPATCH_OBJECT_SUSPENDED(dq)) - || slowpath(!_dispatch_queue_trylock(dq))) { - return _dispatch_barrier_sync_f_slow(dq, ctxt, func); + if (slowpath(!key)) { + return; } + dispatch_queue_specific_t dqs; - _dispatch_thread_setspecific(dispatch_queue_key, dq); - func(ctxt); - _dispatch_workitem_inc(); - _dispatch_thread_setspecific(dispatch_queue_key, old_dq); - _dispatch_queue_unlock(dq); + dqs = calloc(1, sizeof(struct dispatch_queue_specific_s)); + dqs->dqs_key = key; + dqs->dqs_ctxt = ctxt; + dqs->dqs_destructor = destructor; + if (slowpath(!dq->dq_specific_q)) { + _dispatch_queue_init_specific(dq); + } + dispatch_barrier_async_f(dq->dq_specific_q, dqs, + _dispatch_queue_set_specific); } static void -_dispatch_sync_f_slow2(void *ctxt) -{ - dispatch_queue_t dq = _dispatch_queue_get_current(); - dispatch_atomic_add(&dq->dq_running, 2); - dispatch_semaphore_signal(ctxt); +_dispatch_queue_get_specific(void *ctxt) +{ + void **ctxtp = ctxt; + void *key = *ctxtp; + dispatch_queue_specific_queue_t dqsq = + (dispatch_queue_specific_queue_t)_dispatch_queue_get_current(); + dispatch_queue_specific_t dqs; + + TAILQ_FOREACH(dqs, &dqsq->dqsq_contexts, dqs_list) { + if (dqs->dqs_key == key) { + *ctxtp = dqs->dqs_ctxt; + return; + } + } + *ctxtp = NULL; } DISPATCH_NOINLINE -static void -_dispatch_sync_f_slow(dispatch_queue_t dq) +void * +dispatch_queue_get_specific(dispatch_queue_t dq, const void *key) { - // the global root queues do not need strict ordering - if (dq->do_targetq == NULL) { - dispatch_atomic_add(&dq->dq_running, 2); - return; + if (slowpath(!key)) { + return NULL; + } + void *ctxt = NULL; + + if (fastpath(dq->dq_specific_q)) { + ctxt = (void *)key; + dispatch_sync_f(dq->dq_specific_q, &ctxt, _dispatch_queue_get_specific); + } + return ctxt; +} + +DISPATCH_NOINLINE +void * +dispatch_get_specific(const void *key) +{ + if (slowpath(!key)) { + return NULL; + } + void *ctxt = NULL; + dispatch_queue_t dq = _dispatch_queue_get_current(); + + while (slowpath(dq)) { + if (slowpath(dq->dq_specific_q)) { + ctxt = (void *)key; + dispatch_sync_f(dq->dq_specific_q, &ctxt, + _dispatch_queue_get_specific); + if (ctxt) break; + } + dq = dq->do_targetq; + } + return ctxt; +} + +#pragma mark - +#pragma mark dispatch_queue_debug + +size_t +_dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf, size_t bufsiz) +{ + dispatch_queue_t target = dq->do_targetq; + return snprintf(buf, bufsiz, "target = %s[%p], width = 0x%x, " + "running = 0x%x, barrier = %d ", target ? target->dq_label : "", + target, dq->dq_width / 2, dq->dq_running / 2, dq->dq_running & 1); +} + +size_t +dispatch_queue_debug(dispatch_queue_t dq, char* buf, size_t bufsiz) +{ + size_t offset = 0; + offset += snprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", + dq->dq_label, dq); + offset += _dispatch_object_debug_attr(dq, &buf[offset], bufsiz - offset); + offset += _dispatch_queue_debug_attr(dq, &buf[offset], bufsiz - offset); + offset += snprintf(&buf[offset], bufsiz - offset, "}"); + return offset; +} + +#if DISPATCH_DEBUG +void +dispatch_debug_queue(dispatch_queue_t dq, const char* str) { + if (fastpath(dq)) { + dispatch_debug(dq, "%s", str); + } else { + _dispatch_log("queue[NULL]: %s", str); + } +} +#endif + +#if DISPATCH_PERF_MON +static OSSpinLock _dispatch_stats_lock; +static size_t _dispatch_bad_ratio; +static struct { + uint64_t time_total; + uint64_t count_total; + uint64_t thread_total; +} _dispatch_stats[65]; // ffs*/fls*() returns zero when no bits are set + +static void +_dispatch_queue_merge_stats(uint64_t start) +{ + uint64_t avg, delta = _dispatch_absolute_time() - start; + unsigned long count, bucket; + + count = (size_t)_dispatch_thread_getspecific(dispatch_bcounter_key); + _dispatch_thread_setspecific(dispatch_bcounter_key, NULL); + + if (count) { + avg = delta / count; + bucket = flsll(avg); + } else { + bucket = 0; + } + + // 64-bit counters on 32-bit require a lock or a queue + OSSpinLockLock(&_dispatch_stats_lock); + + _dispatch_stats[bucket].time_total += delta; + _dispatch_stats[bucket].count_total += count; + _dispatch_stats[bucket].thread_total++; + + OSSpinLockUnlock(&_dispatch_stats_lock); +} +#endif + +#pragma mark - +#pragma mark dispatch_continuation_t + +static malloc_zone_t *_dispatch_ccache_zone; + +static void +_dispatch_ccache_init(void *context DISPATCH_UNUSED) +{ + _dispatch_ccache_zone = malloc_create_zone(0, 0); + dispatch_assert(_dispatch_ccache_zone); + malloc_set_zone_name(_dispatch_ccache_zone, "DispatchContinuations"); +} + +static dispatch_continuation_t +_dispatch_continuation_alloc_from_heap(void) +{ + static dispatch_once_t pred; + dispatch_continuation_t dc; + + dispatch_once_f(&pred, NULL, _dispatch_ccache_init); + + while (!(dc = fastpath(malloc_zone_calloc(_dispatch_ccache_zone, 1, + ROUND_UP_TO_CACHELINE_SIZE(sizeof(*dc)))))) { + sleep(1); + } + + return dc; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_continuation_t +_dispatch_continuation_alloc_cacheonly(void) +{ + dispatch_continuation_t dc; + dc = fastpath(_dispatch_thread_getspecific(dispatch_cache_key)); + if (dc) { + _dispatch_thread_setspecific(dispatch_cache_key, dc->do_next); + } + return dc; +} + +static void +_dispatch_force_cache_cleanup(void) +{ + dispatch_continuation_t dc; + dc = _dispatch_thread_getspecific(dispatch_cache_key); + if (dc) { + _dispatch_thread_setspecific(dispatch_cache_key, NULL); + _dispatch_cache_cleanup(dc); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_cache_cleanup(void *value) +{ + dispatch_continuation_t dc, next_dc = value; + + while ((dc = next_dc)) { + next_dc = dc->do_next; + malloc_zone_free(_dispatch_ccache_zone, dc); + } +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_continuation_free(dispatch_continuation_t dc) +{ + dispatch_continuation_t prev_dc; + prev_dc = _dispatch_thread_getspecific(dispatch_cache_key); + dc->do_next = prev_dc; + _dispatch_thread_setspecific(dispatch_cache_key, dc); +} + +DISPATCH_ALWAYS_INLINE_NDEBUG +static inline void +_dispatch_continuation_redirect(dispatch_queue_t dq, dispatch_object_t dou) +{ + dispatch_continuation_t dc = dou._dc; + + _dispatch_trace_continuation_pop(dq, dou); + (void)dispatch_atomic_add2o(dq, dq_running, 2); + if (!DISPATCH_OBJ_IS_VTABLE(dc) && + (long)dc->do_vtable & DISPATCH_OBJ_SYNC_SLOW_BIT) { + dispatch_atomic_barrier(); + _dispatch_thread_semaphore_signal( + (_dispatch_thread_semaphore_t)dc->dc_ctxt); + } else { + _dispatch_async_f_redirect(dq, dc); + } +} + +DISPATCH_ALWAYS_INLINE_NDEBUG +static inline void +_dispatch_continuation_pop(dispatch_object_t dou) +{ + dispatch_continuation_t dc = dou._dc; + dispatch_group_t dg; + + _dispatch_trace_continuation_pop(_dispatch_queue_get_current(), dou); + if (DISPATCH_OBJ_IS_VTABLE(dou._do)) { + return _dispatch_queue_invoke(dou._dq); + } + + // Add the item back to the cache before calling the function. This + // allows the 'hot' continuation to be used for a quick callback. + // + // The ccache version is per-thread. + // Therefore, the object has not been reused yet. + // This generates better assembly. + if ((long)dc->do_vtable & DISPATCH_OBJ_ASYNC_BIT) { + _dispatch_continuation_free(dc); + } + if ((long)dc->do_vtable & DISPATCH_OBJ_GROUP_BIT) { + dg = dc->dc_group; + } else { + dg = NULL; + } + _dispatch_client_callout(dc->dc_ctxt, dc->dc_func); + if (dg) { + dispatch_group_leave(dg); + _dispatch_release(dg); + } +} + +#pragma mark - +#pragma mark dispatch_barrier_async + +DISPATCH_NOINLINE +static void +_dispatch_barrier_async_f_slow(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) +{ + dispatch_continuation_t dc = _dispatch_continuation_alloc_from_heap(); + + dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_BARRIER_BIT); + dc->dc_func = func; + dc->dc_ctxt = ctxt; + + _dispatch_queue_push(dq, dc); +} + +DISPATCH_NOINLINE +void +dispatch_barrier_async_f(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) +{ + dispatch_continuation_t dc; + + dc = fastpath(_dispatch_continuation_alloc_cacheonly()); + if (!dc) { + return _dispatch_barrier_async_f_slow(dq, ctxt, func); + } + + dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_BARRIER_BIT); + dc->dc_func = func; + dc->dc_ctxt = ctxt; + + _dispatch_queue_push(dq, dc); +} + +#ifdef __BLOCKS__ +void +dispatch_barrier_async(dispatch_queue_t dq, void (^work)(void)) +{ + dispatch_barrier_async_f(dq, _dispatch_Block_copy(work), + _dispatch_call_block_and_release); +} +#endif + +#pragma mark - +#pragma mark dispatch_async + +static void +_dispatch_async_f_redirect_invoke(void *_ctxt) +{ + struct dispatch_continuation_s *dc = _ctxt; + struct dispatch_continuation_s *other_dc = dc->dc_data[1]; + dispatch_queue_t old_dq, dq = dc->dc_data[0], rq; + + old_dq = _dispatch_thread_getspecific(dispatch_queue_key); + _dispatch_thread_setspecific(dispatch_queue_key, dq); + _dispatch_continuation_pop(other_dc); + _dispatch_thread_setspecific(dispatch_queue_key, old_dq); + + rq = dq->do_targetq; + while (slowpath(rq->do_targetq) && rq != old_dq) { + if (dispatch_atomic_sub2o(rq, dq_running, 2) == 0) { + _dispatch_wakeup(rq); + } + rq = rq->do_targetq; + } + + if (dispatch_atomic_sub2o(dq, dq_running, 2) == 0) { + _dispatch_wakeup(dq); + } + _dispatch_release(dq); +} + +DISPATCH_NOINLINE +static void +_dispatch_async_f2_slow(dispatch_queue_t dq, dispatch_continuation_t dc) +{ + _dispatch_wakeup(dq); + _dispatch_queue_push(dq, dc); +} + +DISPATCH_NOINLINE +static void +_dispatch_async_f_redirect(dispatch_queue_t dq, + dispatch_continuation_t other_dc) +{ + dispatch_continuation_t dc; + dispatch_queue_t rq; + + _dispatch_retain(dq); + + dc = fastpath(_dispatch_continuation_alloc_cacheonly()); + if (!dc) { + dc = _dispatch_continuation_alloc_from_heap(); + } + + dc->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT; + dc->dc_func = _dispatch_async_f_redirect_invoke; + dc->dc_ctxt = dc; + dc->dc_data[0] = dq; + dc->dc_data[1] = other_dc; + + // Find the queue to redirect to + rq = dq->do_targetq; + while (slowpath(rq->do_targetq)) { + uint32_t running; + + if (slowpath(rq->dq_items_tail) || + slowpath(DISPATCH_OBJECT_SUSPENDED(rq)) || + slowpath(rq->dq_width == 1)) { + break; + } + running = dispatch_atomic_add2o(rq, dq_running, 2) - 2; + if (slowpath(running & 1) || slowpath(running + 2 > rq->dq_width)) { + if (slowpath(dispatch_atomic_sub2o(rq, dq_running, 2) == 0)) { + return _dispatch_async_f2_slow(rq, dc); + } + break; + } + rq = rq->do_targetq; + } + _dispatch_queue_push(rq, dc); +} + +DISPATCH_NOINLINE +static void +_dispatch_async_f2(dispatch_queue_t dq, dispatch_continuation_t dc) +{ + uint32_t running; + bool locked; + + do { + if (slowpath(dq->dq_items_tail) + || slowpath(DISPATCH_OBJECT_SUSPENDED(dq))) { + break; + } + running = dispatch_atomic_add2o(dq, dq_running, 2); + if (slowpath(running > dq->dq_width)) { + if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2) == 0)) { + return _dispatch_async_f2_slow(dq, dc); + } + break; + } + locked = running & 1; + if (fastpath(!locked)) { + return _dispatch_async_f_redirect(dq, dc); + } + locked = dispatch_atomic_sub2o(dq, dq_running, 2) & 1; + // We might get lucky and find that the barrier has ended by now + } while (!locked); + + _dispatch_queue_push(dq, dc); +} + +DISPATCH_NOINLINE +static void +_dispatch_async_f_slow(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) +{ + dispatch_continuation_t dc = _dispatch_continuation_alloc_from_heap(); + + dc->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT; + dc->dc_func = func; + dc->dc_ctxt = ctxt; + + // No fastpath/slowpath hint because we simply don't know + if (dq->do_targetq) { + return _dispatch_async_f2(dq, dc); } - struct dispatch_sync_slow_s { - DISPATCH_CONTINUATION_HEADER(dispatch_sync_slow_s); - } dss = { - .do_vtable = NULL, - .dc_func = _dispatch_sync_f_slow2, - .dc_ctxt = _dispatch_get_thread_semaphore(), - }; + _dispatch_queue_push(dq, dc); +} - // XXX FIXME -- concurrent queues can be come serial again - _dispatch_queue_push(dq, (void *)&dss); +DISPATCH_NOINLINE +void +dispatch_async_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) +{ + dispatch_continuation_t dc; + + // No fastpath/slowpath hint because we simply don't know + if (dq->dq_width == 1) { + return dispatch_barrier_async_f(dq, ctxt, func); + } + + dc = fastpath(_dispatch_continuation_alloc_cacheonly()); + if (!dc) { + return _dispatch_async_f_slow(dq, ctxt, func); + } + + dc->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT; + dc->dc_func = func; + dc->dc_ctxt = ctxt; + + // No fastpath/slowpath hint because we simply don't know + if (dq->do_targetq) { + return _dispatch_async_f2(dq, dc); + } - dispatch_semaphore_wait(dss.dc_ctxt, DISPATCH_TIME_FOREVER); - _dispatch_put_thread_semaphore(dss.dc_ctxt); + _dispatch_queue_push(dq, dc); } #ifdef __BLOCKS__ void -dispatch_sync(dispatch_queue_t dq, void (^work)(void)) +dispatch_async(dispatch_queue_t dq, void (^work)(void)) { - struct Block_basic *bb = (void *)work; - dispatch_sync_f(dq, work, (dispatch_function_t)bb->Block_invoke); + dispatch_async_f(dq, _dispatch_Block_copy(work), + _dispatch_call_block_and_release); } #endif +#pragma mark - +#pragma mark dispatch_group_async + DISPATCH_NOINLINE void -dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) +dispatch_group_async_f(dispatch_group_t dg, dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) { - typeof(dq->dq_running) prev_cnt; - dispatch_queue_t old_dq; + dispatch_continuation_t dc; - if (dq->dq_width == 1) { - return dispatch_barrier_sync_f(dq, ctxt, func); + _dispatch_retain(dg); + dispatch_group_enter(dg); + + dc = fastpath(_dispatch_continuation_alloc_cacheonly()); + if (!dc) { + dc = _dispatch_continuation_alloc_from_heap(); } - // 1) ensure that this thread hasn't enqueued anything ahead of this call - // 2) the queue is not suspended - if (slowpath(dq->dq_items_tail) || slowpath(DISPATCH_OBJECT_SUSPENDED(dq))) { - _dispatch_sync_f_slow(dq); - } else { - prev_cnt = dispatch_atomic_add(&dq->dq_running, 2) - 2; + dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_GROUP_BIT); + dc->dc_func = func; + dc->dc_ctxt = ctxt; + dc->dc_group = dg; - if (slowpath(prev_cnt & 1)) { - if (dispatch_atomic_sub(&dq->dq_running, 2) == 0) { - _dispatch_wakeup(dq); - } - _dispatch_sync_f_slow(dq); - } + // No fastpath/slowpath hint because we simply don't know + if (dq->dq_width != 1 && dq->do_targetq) { + return _dispatch_async_f2(dq, dc); } - old_dq = _dispatch_thread_getspecific(dispatch_queue_key); + _dispatch_queue_push(dq, dc); +} + +#ifdef __BLOCKS__ +void +dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq, + dispatch_block_t db) +{ + dispatch_group_async_f(dg, dq, _dispatch_Block_copy(db), + _dispatch_call_block_and_release); +} +#endif + +#pragma mark - +#pragma mark dispatch_function_invoke + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_function_invoke(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) +{ + dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key); _dispatch_thread_setspecific(dispatch_queue_key, dq); - func(ctxt); + _dispatch_client_callout(ctxt, func); _dispatch_workitem_inc(); _dispatch_thread_setspecific(dispatch_queue_key, old_dq); +} - if (slowpath(dispatch_atomic_sub(&dq->dq_running, 2) == 0)) { - _dispatch_wakeup(dq); - } +struct dispatch_function_recurse_s { + dispatch_queue_t dfr_dq; + void* dfr_ctxt; + dispatch_function_t dfr_func; +}; + +static void +_dispatch_function_recurse_invoke(void *ctxt) +{ + struct dispatch_function_recurse_s *dfr = ctxt; + _dispatch_function_invoke(dfr->dfr_dq, dfr->dfr_ctxt, dfr->dfr_func); } -const char * -dispatch_queue_get_label(dispatch_queue_t dq) +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_function_recurse(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) { - return dq->dq_label; + struct dispatch_function_recurse_s dfr = { + .dfr_dq = dq, + .dfr_func = func, + .dfr_ctxt = ctxt, + }; + dispatch_sync_f(dq->do_targetq, &dfr, _dispatch_function_recurse_invoke); } +#pragma mark - +#pragma mark dispatch_barrier_sync + +struct dispatch_barrier_sync_slow_s { + DISPATCH_CONTINUATION_HEADER(dispatch_barrier_sync_slow_s); +}; + +struct dispatch_barrier_sync_slow2_s { + dispatch_queue_t dbss2_dq; #if DISPATCH_COCOA_COMPAT + dispatch_function_t dbss2_func; + void *dbss2_ctxt; +#endif + _dispatch_thread_semaphore_t dbss2_sema; +}; + static void -_dispatch_main_q_port_init(void *ctxt __attribute__((unused))) +_dispatch_barrier_sync_f_slow_invoke(void *ctxt) { - kern_return_t kr; + struct dispatch_barrier_sync_slow2_s *dbss2 = ctxt; - kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &main_q_port); - DISPATCH_VERIFY_MIG(kr); - dispatch_assume_zero(kr); - kr = mach_port_insert_right(mach_task_self(), main_q_port, main_q_port, MACH_MSG_TYPE_MAKE_SEND); - DISPATCH_VERIFY_MIG(kr); - dispatch_assume_zero(kr); + dispatch_assert(dbss2->dbss2_dq == _dispatch_queue_get_current()); +#if DISPATCH_COCOA_COMPAT + // When the main queue is bound to the main thread + if (dbss2->dbss2_dq == &_dispatch_main_q && pthread_main_np()) { + dbss2->dbss2_func(dbss2->dbss2_ctxt); + dbss2->dbss2_func = NULL; + dispatch_atomic_barrier(); + _dispatch_thread_semaphore_signal(dbss2->dbss2_sema); + return; + } +#endif + (void)dispatch_atomic_add2o(dbss2->dbss2_dq, do_suspend_cnt, + DISPATCH_OBJECT_SUSPEND_INTERVAL); + // rdar://9032024 running lock must be held until sync_f_slow returns + (void)dispatch_atomic_add2o(dbss2->dbss2_dq, dq_running, 2); + dispatch_atomic_barrier(); + _dispatch_thread_semaphore_signal(dbss2->dbss2_sema); +} - _dispatch_program_is_probably_callback_driven = true; - _dispatch_safe_fork = false; +DISPATCH_NOINLINE +static void +_dispatch_barrier_sync_f_slow(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) +{ + // It's preferred to execute synchronous blocks on the current thread + // due to thread-local side effects, garbage collection, etc. However, + // blocks submitted to the main thread MUST be run on the main thread + + struct dispatch_barrier_sync_slow2_s dbss2 = { + .dbss2_dq = dq, +#if DISPATCH_COCOA_COMPAT + .dbss2_func = func, + .dbss2_ctxt = ctxt, +#endif + .dbss2_sema = _dispatch_get_thread_semaphore(), + }; + struct dispatch_barrier_sync_slow_s dbss = { + .do_vtable = (void *)(DISPATCH_OBJ_BARRIER_BIT | + DISPATCH_OBJ_SYNC_SLOW_BIT), + .dc_func = _dispatch_barrier_sync_f_slow_invoke, + .dc_ctxt = &dbss2, + }; + _dispatch_queue_push(dq, (void *)&dbss); + + _dispatch_thread_semaphore_wait(dbss2.dbss2_sema); + _dispatch_put_thread_semaphore(dbss2.dbss2_sema); + +#if DISPATCH_COCOA_COMPAT + // Main queue bound to main thread + if (dbss2.dbss2_func == NULL) { + return; + } +#endif + dispatch_atomic_acquire_barrier(); + if (slowpath(dq->do_targetq) && slowpath(dq->do_targetq->do_targetq)) { + _dispatch_function_recurse(dq, ctxt, func); + } else { + _dispatch_function_invoke(dq, ctxt, func); + } + dispatch_atomic_release_barrier(); + if (fastpath(dq->do_suspend_cnt < 2 * DISPATCH_OBJECT_SUSPEND_INTERVAL)) { + // rdar://problem/8290662 "lock transfer" + // ensure drain of current barrier sync has finished + while (slowpath(dq->dq_running > 2)) { + _dispatch_hardware_pause(); + } + _dispatch_thread_semaphore_t sema; + sema = _dispatch_queue_drain_one_barrier_sync(dq); + if (sema) { + _dispatch_thread_semaphore_signal(sema); + return; + } + } + (void)dispatch_atomic_sub2o(dq, do_suspend_cnt, + DISPATCH_OBJECT_SUSPEND_INTERVAL); + if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2) == 0)) { + _dispatch_wakeup(dq); + } } -// 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol DISPATCH_NOINLINE static void -_dispatch_queue_set_mainq_drain_state(bool arg) +_dispatch_barrier_sync_f2(dispatch_queue_t dq) +{ + if (!slowpath(DISPATCH_OBJECT_SUSPENDED(dq))) { + // rdar://problem/8290662 "lock transfer" + _dispatch_thread_semaphore_t sema; + sema = _dispatch_queue_drain_one_barrier_sync(dq); + if (sema) { + (void)dispatch_atomic_add2o(dq, do_suspend_cnt, + DISPATCH_OBJECT_SUSPEND_INTERVAL); + // rdar://9032024 running lock must be held until sync_f_slow + // returns: increment by 2 and decrement by 1 + (void)dispatch_atomic_inc2o(dq, dq_running); + _dispatch_thread_semaphore_signal(sema); + return; + } + } + if (slowpath(dispatch_atomic_dec2o(dq, dq_running) == 0)) { + _dispatch_wakeup(dq); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_barrier_sync_f_invoke(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) { - main_q_is_draining = arg; + dispatch_atomic_acquire_barrier(); + _dispatch_function_invoke(dq, ctxt, func); + dispatch_atomic_release_barrier(); + if (slowpath(dq->dq_items_tail)) { + return _dispatch_barrier_sync_f2(dq); + } + if (slowpath(dispatch_atomic_dec2o(dq, dq_running) == 0)) { + _dispatch_wakeup(dq); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_barrier_sync_f_recurse(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) +{ + dispatch_atomic_acquire_barrier(); + _dispatch_function_recurse(dq, ctxt, func); + dispatch_atomic_release_barrier(); + if (slowpath(dq->dq_items_tail)) { + return _dispatch_barrier_sync_f2(dq); + } + if (slowpath(dispatch_atomic_dec2o(dq, dq_running) == 0)) { + _dispatch_wakeup(dq); + } } -#endif +DISPATCH_NOINLINE void -dispatch_main(void) +dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) { - if (pthread_main_np()) { - _dispatch_program_is_probably_callback_driven = true; - pthread_exit(NULL); - DISPATCH_CRASH("pthread_exit() returned"); + // 1) ensure that this thread hasn't enqueued anything ahead of this call + // 2) the queue is not suspended + if (slowpath(dq->dq_items_tail) || slowpath(DISPATCH_OBJECT_SUSPENDED(dq))){ + return _dispatch_barrier_sync_f_slow(dq, ctxt, func); } - DISPATCH_CLIENT_CRASH("dispatch_main() must be called on the main thread"); + if (slowpath(!dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1))) { + // global queues and main queue bound to main thread always falls into + // the slow case + return _dispatch_barrier_sync_f_slow(dq, ctxt, func); + } + if (slowpath(dq->do_targetq->do_targetq)) { + return _dispatch_barrier_sync_f_recurse(dq, ctxt, func); + } + _dispatch_barrier_sync_f_invoke(dq, ctxt, func); } +#ifdef __BLOCKS__ +#if DISPATCH_COCOA_COMPAT +DISPATCH_NOINLINE static void -_dispatch_sigsuspend(void *ctxt __attribute__((unused))) +_dispatch_barrier_sync_slow(dispatch_queue_t dq, void (^work)(void)) { - static const sigset_t mask; + // Blocks submitted to the main queue MUST be run on the main thread, + // therefore under GC we must Block_copy in order to notify the thread-local + // garbage collector that the objects are transferring to the main thread + // rdar://problem/7176237&7181849&7458685 + if (dispatch_begin_thread_4GC) { + dispatch_block_t block = _dispatch_Block_copy(work); + return dispatch_barrier_sync_f(dq, block, + _dispatch_call_block_and_release); + } + struct Block_basic *bb = (void *)work; + dispatch_barrier_sync_f(dq, work, (dispatch_function_t)bb->Block_invoke); +} +#endif - for (;;) { - sigsuspend(&mask); +void +dispatch_barrier_sync(dispatch_queue_t dq, void (^work)(void)) +{ +#if DISPATCH_COCOA_COMPAT + if (slowpath(dq == &_dispatch_main_q)) { + return _dispatch_barrier_sync_slow(dq, work); } +#endif + struct Block_basic *bb = (void *)work; + dispatch_barrier_sync_f(dq, work, (dispatch_function_t)bb->Block_invoke); } +#endif + +#pragma mark - +#pragma mark dispatch_sync DISPATCH_NOINLINE static void -_dispatch_queue_cleanup2(void) +_dispatch_sync_f_slow(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { - dispatch_atomic_dec(&_dispatch_main_q.dq_running); + _dispatch_thread_semaphore_t sema = _dispatch_get_thread_semaphore(); + struct dispatch_sync_slow_s { + DISPATCH_CONTINUATION_HEADER(dispatch_sync_slow_s); + } dss = { + .do_vtable = (void*)DISPATCH_OBJ_SYNC_SLOW_BIT, + .dc_ctxt = (void*)sema, + }; + _dispatch_queue_push(dq, (void *)&dss); - if (dispatch_atomic_sub(&_dispatch_main_q.do_suspend_cnt, DISPATCH_OBJECT_SUSPEND_LOCK) == 0) { - _dispatch_wakeup(&_dispatch_main_q); + _dispatch_thread_semaphore_wait(sema); + _dispatch_put_thread_semaphore(sema); + + if (slowpath(dq->do_targetq->do_targetq)) { + _dispatch_function_recurse(dq, ctxt, func); + } else { + _dispatch_function_invoke(dq, ctxt, func); + } + if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2) == 0)) { + _dispatch_wakeup(dq); } +} - // overload the "probably" variable to mean that dispatch_main() or - // similar non-POSIX API was called - // this has to run before the DISPATCH_COCOA_COMPAT below - if (_dispatch_program_is_probably_callback_driven) { - dispatch_async_f(_dispatch_get_root_queue(0, 0), NULL, _dispatch_sigsuspend); - sleep(1); // workaround 6778970 +DISPATCH_NOINLINE +static void +_dispatch_sync_f_slow2(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) +{ + if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2) == 0)) { + _dispatch_wakeup(dq); } + _dispatch_sync_f_slow(dq, ctxt, func); +} -#if DISPATCH_COCOA_COMPAT - dispatch_once_f(&_dispatch_main_q_port_pred, NULL, _dispatch_main_q_port_init); +DISPATCH_NOINLINE +static void +_dispatch_sync_f_invoke(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) +{ + _dispatch_function_invoke(dq, ctxt, func); + if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2) == 0)) { + _dispatch_wakeup(dq); + } +} - mach_port_t mp = main_q_port; - kern_return_t kr; +DISPATCH_NOINLINE +static void +_dispatch_sync_f_recurse(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) +{ + _dispatch_function_recurse(dq, ctxt, func); + if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2) == 0)) { + _dispatch_wakeup(dq); + } +} - main_q_port = 0; +DISPATCH_NOINLINE +static void +_dispatch_sync_f2(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) +{ + // 1) ensure that this thread hasn't enqueued anything ahead of this call + // 2) the queue is not suspended + if (slowpath(dq->dq_items_tail) || slowpath(DISPATCH_OBJECT_SUSPENDED(dq))){ + return _dispatch_sync_f_slow(dq, ctxt, func); + } + if (slowpath(dispatch_atomic_add2o(dq, dq_running, 2) & 1)) { + return _dispatch_sync_f_slow2(dq, ctxt, func); + } + if (slowpath(dq->do_targetq->do_targetq)) { + return _dispatch_sync_f_recurse(dq, ctxt, func); + } + _dispatch_sync_f_invoke(dq, ctxt, func); +} - if (mp) { - kr = mach_port_deallocate(mach_task_self(), mp); - DISPATCH_VERIFY_MIG(kr); - dispatch_assume_zero(kr); - kr = mach_port_mod_refs(mach_task_self(), mp, MACH_PORT_RIGHT_RECEIVE, -1); - DISPATCH_VERIFY_MIG(kr); - dispatch_assume_zero(kr); +DISPATCH_NOINLINE +void +dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) +{ + if (fastpath(dq->dq_width == 1)) { + return dispatch_barrier_sync_f(dq, ctxt, func); } -#endif + if (slowpath(!dq->do_targetq)) { + // the global root queues do not need strict ordering + (void)dispatch_atomic_add2o(dq, dq_running, 2); + return _dispatch_sync_f_invoke(dq, ctxt, func); + } + _dispatch_sync_f2(dq, ctxt, func); +} + +#ifdef __BLOCKS__ +#if DISPATCH_COCOA_COMPAT +DISPATCH_NOINLINE +static void +_dispatch_sync_slow(dispatch_queue_t dq, void (^work)(void)) +{ + // Blocks submitted to the main queue MUST be run on the main thread, + // therefore under GC we must Block_copy in order to notify the thread-local + // garbage collector that the objects are transferring to the main thread + // rdar://problem/7176237&7181849&7458685 + if (dispatch_begin_thread_4GC) { + dispatch_block_t block = _dispatch_Block_copy(work); + return dispatch_sync_f(dq, block, _dispatch_call_block_and_release); + } + struct Block_basic *bb = (void *)work; + dispatch_sync_f(dq, work, (dispatch_function_t)bb->Block_invoke); } +#endif -dispatch_queue_t -dispatch_get_concurrent_queue(long pri) +void +dispatch_sync(dispatch_queue_t dq, void (^work)(void)) { - if (pri > 0) { - pri = DISPATCH_QUEUE_PRIORITY_HIGH; - } else if (pri < 0) { - pri = DISPATCH_QUEUE_PRIORITY_LOW; +#if DISPATCH_COCOA_COMPAT + if (slowpath(dq == &_dispatch_main_q)) { + return _dispatch_sync_slow(dq, work); } - return _dispatch_get_root_queue(pri, false); +#endif + struct Block_basic *bb = (void *)work; + dispatch_sync_f(dq, work, (dispatch_function_t)bb->Block_invoke); } +#endif + +#pragma mark - +#pragma mark dispatch_after + +struct _dispatch_after_time_s { + void *datc_ctxt; + void (*datc_func)(void *); + dispatch_source_t ds; +}; static void -_dispatch_queue_cleanup(void *ctxt) +_dispatch_after_timer_callback(void *ctxt) { - if (ctxt == &_dispatch_main_q) { - return _dispatch_queue_cleanup2(); - } - // POSIX defines that destructors are only called if 'ctxt' is non-null - DISPATCH_CRASH("Premature thread exit while a dispatch queue is running"); -} + struct _dispatch_after_time_s *datc = ctxt; -dispatch_queue_t -dispatch_get_global_queue(long priority, unsigned long flags) -{ - if (flags & ~DISPATCH_QUEUE_OVERCOMMIT) { - return NULL; - } - return _dispatch_get_root_queue(priority, flags & DISPATCH_QUEUE_OVERCOMMIT); + dispatch_assert(datc->datc_func); + _dispatch_client_callout(datc->datc_ctxt, datc->datc_func); + + dispatch_source_t ds = datc->ds; + free(datc); + + dispatch_source_cancel(ds); // Needed until 7287561 gets integrated + dispatch_release(ds); } -#define countof(x) (sizeof(x) / sizeof(x[0])) +DISPATCH_NOINLINE void -libdispatch_init(void) +dispatch_after_f(dispatch_time_t when, dispatch_queue_t queue, void *ctxt, + dispatch_function_t func) { - dispatch_assert(DISPATCH_QUEUE_PRIORITY_COUNT == 3); - dispatch_assert(DISPATCH_ROOT_QUEUE_COUNT == 6); - - dispatch_assert(DISPATCH_QUEUE_PRIORITY_LOW == -DISPATCH_QUEUE_PRIORITY_HIGH); - dispatch_assert(countof(_dispatch_root_queues) == DISPATCH_ROOT_QUEUE_COUNT); - dispatch_assert(countof(_dispatch_thread_mediator) == DISPATCH_ROOT_QUEUE_COUNT); - dispatch_assert(countof(_dispatch_root_queue_contexts) == DISPATCH_ROOT_QUEUE_COUNT); + uint64_t delta; + struct _dispatch_after_time_s *datc = NULL; + dispatch_source_t ds; - _dispatch_thread_key_init_np(dispatch_queue_key, _dispatch_queue_cleanup); - _dispatch_thread_key_init_np(dispatch_sema4_key, (void (*)(void *))dispatch_release); // use the extern release - _dispatch_thread_key_init_np(dispatch_cache_key, _dispatch_cache_cleanup2); -#if DISPATCH_PERF_MON - _dispatch_thread_key_init_np(dispatch_bcounter_key, NULL); + if (when == DISPATCH_TIME_FOREVER) { +#if DISPATCH_DEBUG + DISPATCH_CLIENT_CRASH( + "dispatch_after_f() called with 'when' == infinity"); #endif + return; + } - _dispatch_thread_setspecific(dispatch_queue_key, &_dispatch_main_q); + // this function can and should be optimized to not use a dispatch source + delta = _dispatch_timeout(when); + if (delta == 0) { + return dispatch_async_f(queue, ctxt, func); + } + // on successful creation, source owns malloc-ed context (which it frees in + // the event handler) + ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, queue); + dispatch_assert(ds); + + datc = malloc(sizeof(*datc)); + dispatch_assert(datc); + datc->datc_ctxt = ctxt; + datc->datc_func = func; + datc->ds = ds; - _dispatch_queue_set_width_init(); + dispatch_set_context(ds, datc); + dispatch_source_set_event_handler_f(ds, _dispatch_after_timer_callback); + dispatch_source_set_timer(ds, when, DISPATCH_TIME_FOREVER, 0); + dispatch_resume(ds); } +#ifdef __BLOCKS__ void -_dispatch_queue_unlock(dispatch_queue_t dq) +dispatch_after(dispatch_time_t when, dispatch_queue_t queue, + dispatch_block_t work) { - if (slowpath(dispatch_atomic_dec(&dq->dq_running))) { + // test before the copy of the block + if (when == DISPATCH_TIME_FOREVER) { +#if DISPATCH_DEBUG + DISPATCH_CLIENT_CRASH( + "dispatch_after() called with 'when' == infinity"); +#endif return; } + dispatch_after_f(when, queue, _dispatch_Block_copy(work), + _dispatch_call_block_and_release); +} +#endif + +#pragma mark - +#pragma mark dispatch_wakeup +DISPATCH_NOINLINE +void +_dispatch_queue_push_list_slow(dispatch_queue_t dq, + struct dispatch_object_s *obj) +{ + // The queue must be retained before dq_items_head is written in order + // to ensure that the reference is still valid when _dispatch_wakeup is + // called. Otherwise, if preempted between the assignment to + // dq_items_head and _dispatch_wakeup, the blocks submitted to the + // queue may release the last reference to the queue when invoked by + // _dispatch_queue_drain. + _dispatch_retain(dq); + dq->dq_items_head = obj; _dispatch_wakeup(dq); + _dispatch_release(dq); } -// 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol +// 6618342 Contact the team that owns the Instrument DTrace probe before +// renaming this symbol dispatch_queue_t _dispatch_wakeup(dispatch_object_t dou) { @@ -997,7 +1837,11 @@ _dispatch_wakeup(dispatch_object_t dou) return NULL; } - if (!_dispatch_trylock(dou._do)) { + // _dispatch_source_invoke() relies on this testing the whole suspend count + // word, not just the lock bit. In other words, no point taking the lock + // if the source is suspended or canceled. + if (!dispatch_atomic_cmpxchg2o(dou._do, do_suspend_cnt, 0, + DISPATCH_OBJECT_SUSPEND_LOCK)) { #if DISPATCH_COCOA_COMPAT if (dou._dq == &_dispatch_main_q) { _dispatch_queue_wakeup_main(); @@ -1008,7 +1852,8 @@ _dispatch_wakeup(dispatch_object_t dou) _dispatch_retain(dou._do); tq = dou._do->do_targetq; _dispatch_queue_push(tq, dou._do); - return tq; // libdispatch doesn't need this, but the Instrument DTrace probe does + return tq; // libdispatch does not need this, but the Instrument DTrace + // probe does } #if DISPATCH_COCOA_COMPAT @@ -1018,7 +1863,8 @@ _dispatch_queue_wakeup_main(void) { kern_return_t kr; - dispatch_once_f(&_dispatch_main_q_port_pred, NULL, _dispatch_main_q_port_init); + dispatch_once_f(&_dispatch_main_q_port_pred, NULL, + _dispatch_main_q_port_init); kr = _dispatch_send_wakeup_main_thread(main_q_port, 0); @@ -1028,7 +1874,7 @@ _dispatch_queue_wakeup_main(void) case MACH_SEND_INVALID_DEST: break; default: - dispatch_assume_zero(kr); + (void)dispatch_assume_zero(kr); break; } @@ -1036,78 +1882,12 @@ _dispatch_queue_wakeup_main(void) } #endif -static inline int -_dispatch_rootq2wq_pri(long idx) -{ -#ifdef WORKQ_DEFAULT_PRIOQUEUE - switch (idx) { - case 0: - case 1: - return WORKQ_LOW_PRIOQUEUE; - case 2: - case 3: - default: - return WORKQ_DEFAULT_PRIOQUEUE; - case 4: - case 5: - return WORKQ_HIGH_PRIOQUEUE; - } -#else - return pri; -#endif -} - -static void -_dispatch_root_queues_init(void *context __attribute__((unused))) -{ - bool disable_wq = getenv("LIBDISPATCH_DISABLE_KWQ"); - pthread_workqueue_attr_t pwq_attr; - kern_return_t kr; - int i, r; - - r = pthread_workqueue_attr_init_np(&pwq_attr); - dispatch_assume_zero(r); - - for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { - r = pthread_workqueue_attr_setqueuepriority_np(&pwq_attr, _dispatch_rootq2wq_pri(i)); - dispatch_assume_zero(r); - r = pthread_workqueue_attr_setovercommit_np(&pwq_attr, i & 1); - dispatch_assume_zero(r); -// some software hangs if the non-overcommitting queues do not overcommit when threads block -#if 0 - if (!(i & 1)) { - dispatch_root_queue_contexts[i].dgq_thread_pool_size = _dispatch_hw_config.cc_max_active; - } -#endif - - r = 0; - if (disable_wq || (r = pthread_workqueue_create_np(&_dispatch_root_queue_contexts[i].dgq_kworkqueue, &pwq_attr))) { - if (r != ENOTSUP) { - dispatch_assume_zero(r); - } - // override the default FIFO behavior for the pool semaphores - kr = semaphore_create(mach_task_self(), &_dispatch_thread_mediator[i].dsema_port, SYNC_POLICY_LIFO, 0); - DISPATCH_VERIFY_MIG(kr); - dispatch_assume_zero(kr); - dispatch_assume(_dispatch_thread_mediator[i].dsema_port); - } else { - dispatch_assume(_dispatch_root_queue_contexts[i].dgq_kworkqueue); - } - } - - r = pthread_workqueue_attr_destroy_np(&pwq_attr); - dispatch_assume_zero(r); -} - -bool +static bool _dispatch_queue_wakeup_global(dispatch_queue_t dq) { static dispatch_once_t pred; struct dispatch_root_queue_context_s *qc = dq->do_ctxt; - pthread_workitem_handle_t wh; - unsigned int gen_cnt; - pthread_t pthr; - int r, t_count; + int r; if (!dq->dq_items_tail) { return false; @@ -1119,200 +1899,146 @@ _dispatch_queue_wakeup_global(dispatch_queue_t dq) dispatch_once_f(&pred, NULL, _dispatch_root_queues_init); - if (qc->dgq_kworkqueue) { - if (dispatch_atomic_cmpxchg(&qc->dgq_pending, 0, 1)) { +#if HAVE_PTHREAD_WORKQUEUES +#if DISPATCH_ENABLE_THREAD_POOL + if (qc->dgq_kworkqueue) +#endif + { + if (dispatch_atomic_cmpxchg2o(qc, dgq_pending, 0, 1)) { + pthread_workitem_handle_t wh; + unsigned int gen_cnt; _dispatch_debug("requesting new worker thread"); - r = pthread_workqueue_additem_np(qc->dgq_kworkqueue, _dispatch_worker_thread2, dq, &wh, &gen_cnt); - dispatch_assume_zero(r); + r = pthread_workqueue_additem_np(qc->dgq_kworkqueue, + _dispatch_worker_thread2, dq, &wh, &gen_cnt); + (void)dispatch_assume_zero(r); } else { - _dispatch_debug("work thread request still pending on global queue: %p", dq); + _dispatch_debug("work thread request still pending on global " + "queue: %p", dq); } goto out; } - +#endif // HAVE_PTHREAD_WORKQUEUES +#if DISPATCH_ENABLE_THREAD_POOL if (dispatch_semaphore_signal(qc->dgq_thread_mediator)) { goto out; } + pthread_t pthr; + int t_count; do { t_count = qc->dgq_thread_pool_size; if (!t_count) { _dispatch_debug("The thread pool is full: %p", dq); goto out; } - } while (!dispatch_atomic_cmpxchg(&qc->dgq_thread_pool_size, t_count, t_count - 1)); + } while (!dispatch_atomic_cmpxchg2o(qc, dgq_thread_pool_size, t_count, + t_count - 1)); while ((r = pthread_create(&pthr, NULL, _dispatch_worker_thread, dq))) { if (r != EAGAIN) { - dispatch_assume_zero(r); + (void)dispatch_assume_zero(r); } sleep(1); } r = pthread_detach(pthr); - dispatch_assume_zero(r); + (void)dispatch_assume_zero(r); +#endif // DISPATCH_ENABLE_THREAD_POOL out: return false; } -void -_dispatch_queue_serial_drain_till_empty(dispatch_queue_t dq) -{ -#if DISPATCH_PERF_MON - uint64_t start = mach_absolute_time(); -#endif - _dispatch_queue_drain(dq); -#if DISPATCH_PERF_MON - _dispatch_queue_merge_stats(start); -#endif - _dispatch_force_cache_cleanup(); -} +#pragma mark - +#pragma mark dispatch_queue_drain -// 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol +// 6618342 Contact the team that owns the Instrument DTrace probe before +// renaming this symbol DISPATCH_NOINLINE void _dispatch_queue_invoke(dispatch_queue_t dq) { - dispatch_queue_t tq = dq->do_targetq; - - if (!slowpath(DISPATCH_OBJECT_SUSPENDED(dq)) && fastpath(_dispatch_queue_trylock(dq))) { + if (!slowpath(DISPATCH_OBJECT_SUSPENDED(dq)) && + fastpath(dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1))) { + dispatch_atomic_acquire_barrier(); + dispatch_queue_t otq = dq->do_targetq, tq = NULL; _dispatch_queue_drain(dq); - if (tq == dq->do_targetq) { + if (dq->do_vtable->do_invoke) { + // Assume that object invoke checks it is executing on correct queue tq = dx_invoke(dq); - } else { + } else if (slowpath(otq != dq->do_targetq)) { + // An item on the queue changed the target queue tq = dq->do_targetq; } // We do not need to check the result. // When the suspend-count lock is dropped, then the check will happen. - dispatch_atomic_dec(&dq->dq_running); + dispatch_atomic_release_barrier(); + (void)dispatch_atomic_dec2o(dq, dq_running); if (tq) { return _dispatch_queue_push(tq, dq); } } dq->do_next = DISPATCH_OBJECT_LISTLESS; - if (dispatch_atomic_sub(&dq->do_suspend_cnt, DISPATCH_OBJECT_SUSPEND_LOCK) == 0) { + if (!dispatch_atomic_sub2o(dq, do_suspend_cnt, + DISPATCH_OBJECT_SUSPEND_LOCK)) { if (dq->dq_running == 0) { - _dispatch_wakeup(dq); // verify that the queue is idle + _dispatch_wakeup(dq); // verify that the queue is idle } } - _dispatch_release(dq); // added when the queue is put on the list -} - -// 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol -static void -_dispatch_set_target_queue2(void *ctxt) -{ - dispatch_queue_t prev_dq, dq = _dispatch_queue_get_current(); - - prev_dq = dq->do_targetq; - dq->do_targetq = ctxt; - _dispatch_release(prev_dq); -} - -void -dispatch_set_target_queue(dispatch_object_t dou, dispatch_queue_t dq) -{ - if (slowpath(dou._do->do_xref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) { - return; - } - // NOTE: we test for NULL target queues internally to detect root queues - // therefore, if the retain crashes due to a bad input, that is OK - _dispatch_retain(dq); - dispatch_barrier_async_f(dou._dq, dq, _dispatch_set_target_queue2); -} - -static void -_dispatch_async_f_redirect2(void *_ctxt) -{ - struct dispatch_continuation_s *dc = _ctxt; - struct dispatch_continuation_s *other_dc = dc->dc_data[1]; - dispatch_queue_t old_dq, dq = dc->dc_data[0]; - - old_dq = _dispatch_thread_getspecific(dispatch_queue_key); - _dispatch_thread_setspecific(dispatch_queue_key, dq); - _dispatch_continuation_pop(other_dc); - _dispatch_thread_setspecific(dispatch_queue_key, old_dq); - - if (dispatch_atomic_sub(&dq->dq_running, 2) == 0) { - _dispatch_wakeup(dq); - } - _dispatch_release(dq); + _dispatch_release(dq); // added when the queue is put on the list } static void -_dispatch_async_f_redirect(dispatch_queue_t dq, struct dispatch_object_s *other_dc) -{ - dispatch_continuation_t dc = (void *)other_dc; - dispatch_queue_t root_dq = dq; - - if (dc->dc_func == _dispatch_sync_f_slow2) { - return dc->dc_func(dc->dc_ctxt); - } - - dispatch_atomic_add(&dq->dq_running, 2); - _dispatch_retain(dq); - - dc = _dispatch_continuation_alloc_cacheonly() ?: _dispatch_continuation_alloc_from_heap(); - - dc->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT; - dc->dc_func = _dispatch_async_f_redirect2; - dc->dc_ctxt = dc; - dc->dc_data[0] = dq; - dc->dc_data[1] = other_dc; - - do { - root_dq = root_dq->do_targetq; - } while (root_dq->do_targetq); - - _dispatch_queue_push(root_dq, dc); -} - - -void _dispatch_queue_drain(dispatch_queue_t dq) { - dispatch_queue_t orig_tq, old_dq = _dispatch_thread_getspecific(dispatch_queue_key); + dispatch_queue_t orig_tq, old_dq; + old_dq = _dispatch_thread_getspecific(dispatch_queue_key); struct dispatch_object_s *dc = NULL, *next_dc = NULL; + // Continue draining sources after target queue change rdar://8928171 + bool check_tq = (dx_type(dq) != DISPATCH_SOURCE_KEVENT_TYPE); + orig_tq = dq->do_targetq; _dispatch_thread_setspecific(dispatch_queue_key, dq); + //dispatch_debug_queue(dq, __PRETTY_FUNCTION__); while (dq->dq_items_tail) { - while (!fastpath(dq->dq_items_head)) { + while (!(dc = fastpath(dq->dq_items_head))) { _dispatch_hardware_pause(); } - - dc = dq->dq_items_head; dq->dq_items_head = NULL; - do { - // Enqueue is TIGHTLY controlled, we won't wait long. - do { - next_dc = fastpath(dc->do_next); - } while (!next_dc && !dispatch_atomic_cmpxchg(&dq->dq_items_tail, dc, NULL)); + next_dc = fastpath(dc->do_next); + if (!next_dc && + !dispatch_atomic_cmpxchg2o(dq, dq_items_tail, dc, NULL)) { + // Enqueue is TIGHTLY controlled, we won't wait long. + while (!(next_dc = fastpath(dc->do_next))) { + _dispatch_hardware_pause(); + } + } if (DISPATCH_OBJECT_SUSPENDED(dq)) { goto out; } if (dq->dq_running > dq->dq_width) { goto out; } - if (orig_tq != dq->do_targetq) { + if (slowpath(orig_tq != dq->do_targetq) && check_tq) { goto out; } if (fastpath(dq->dq_width == 1)) { _dispatch_continuation_pop(dc); _dispatch_workitem_inc(); - } else if ((long)dc->do_vtable & DISPATCH_OBJ_BARRIER_BIT) { + } else if (!DISPATCH_OBJ_IS_VTABLE(dc) && + (long)dc->do_vtable & DISPATCH_OBJ_BARRIER_BIT) { if (dq->dq_running > 1) { goto out; } _dispatch_continuation_pop(dc); _dispatch_workitem_inc(); } else { - _dispatch_async_f_redirect(dq, dc); + _dispatch_continuation_redirect(dq, dc); } } while ((dc = next_dc)); } @@ -1322,56 +2048,193 @@ _dispatch_queue_drain(dispatch_queue_t dq) if (slowpath(dc)) { // 'dc' must NOT be "popped" // 'dc' might be the last item - if (next_dc || dispatch_atomic_cmpxchg(&dq->dq_items_tail, NULL, dc)) { - dq->dq_items_head = dc; - } else { - while (!(next_dc = dq->dq_items_head)) { + if (!next_dc && + !dispatch_atomic_cmpxchg2o(dq, dq_items_tail, NULL, dc)) { + // wait for enqueue slow path to finish + while (!(next_dc = fastpath(dq->dq_items_head))) { _dispatch_hardware_pause(); } - dq->dq_items_head = dc; dc->do_next = next_dc; } + dq->dq_items_head = dc; } _dispatch_thread_setspecific(dispatch_queue_key, old_dq); } -// 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol -void * -_dispatch_worker_thread(void *context) +static void +_dispatch_queue_serial_drain_till_empty(dispatch_queue_t dq) { - dispatch_queue_t dq = context; - struct dispatch_root_queue_context_s *qc = dq->do_ctxt; - sigset_t mask; - int r; +#if DISPATCH_PERF_MON + uint64_t start = _dispatch_absolute_time(); +#endif + _dispatch_queue_drain(dq); +#if DISPATCH_PERF_MON + _dispatch_queue_merge_stats(start); +#endif + _dispatch_force_cache_cleanup(); +} - // workaround tweaks the kernel workqueue does for us - r = sigfillset(&mask); - dispatch_assume_zero(r); - r = _dispatch_pthread_sigmask(SIG_BLOCK, &mask, NULL); - dispatch_assume_zero(r); +#if DISPATCH_COCOA_COMPAT +void +_dispatch_main_queue_drain(void) +{ + dispatch_queue_t dq = &_dispatch_main_q; + if (!dq->dq_items_tail) { + return; + } + struct dispatch_main_queue_drain_marker_s { + DISPATCH_CONTINUATION_HEADER(dispatch_main_queue_drain_marker_s); + } marker = { + .do_vtable = NULL, + }; + struct dispatch_object_s *dmarker = (void*)▮ + _dispatch_queue_push_notrace(dq, dmarker); + +#if DISPATCH_PERF_MON + uint64_t start = _dispatch_absolute_time(); +#endif + dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key); + _dispatch_thread_setspecific(dispatch_queue_key, dq); + + struct dispatch_object_s *dc = NULL, *next_dc = NULL; + while (dq->dq_items_tail) { + while (!(dc = fastpath(dq->dq_items_head))) { + _dispatch_hardware_pause(); + } + dq->dq_items_head = NULL; + do { + next_dc = fastpath(dc->do_next); + if (!next_dc && + !dispatch_atomic_cmpxchg2o(dq, dq_items_tail, dc, NULL)) { + // Enqueue is TIGHTLY controlled, we won't wait long. + while (!(next_dc = fastpath(dc->do_next))) { + _dispatch_hardware_pause(); + } + } + if (dc == dmarker) { + if (next_dc) { + dq->dq_items_head = next_dc; + _dispatch_queue_wakeup_main(); + } + goto out; + } + _dispatch_continuation_pop(dc); + _dispatch_workitem_inc(); + } while ((dc = next_dc)); + } + dispatch_assert(dc); // did not encounter marker + +out: + _dispatch_thread_setspecific(dispatch_queue_key, old_dq); +#if DISPATCH_PERF_MON + _dispatch_queue_merge_stats(start); +#endif + _dispatch_force_cache_cleanup(); +} +#endif + +DISPATCH_ALWAYS_INLINE_NDEBUG +static inline _dispatch_thread_semaphore_t +_dispatch_queue_drain_one_barrier_sync(dispatch_queue_t dq) +{ + // rdar://problem/8290662 "lock transfer" + struct dispatch_object_s *dc, *next_dc; + + // queue is locked, or suspended and not being drained + dc = dq->dq_items_head; + if (slowpath(!dc) || DISPATCH_OBJ_IS_VTABLE(dc) || ((long)dc->do_vtable & + (DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT)) != + (DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT)) { + return 0; + } + // dequeue dc, it is a barrier sync + next_dc = fastpath(dc->do_next); + dq->dq_items_head = next_dc; + if (!next_dc && !dispatch_atomic_cmpxchg2o(dq, dq_items_tail, dc, NULL)) { + // Enqueue is TIGHTLY controlled, we won't wait long. + while (!(next_dc = fastpath(dc->do_next))) { + _dispatch_hardware_pause(); + } + dq->dq_items_head = next_dc; + } + _dispatch_trace_continuation_pop(dq, dc); + _dispatch_workitem_inc(); + + struct dispatch_barrier_sync_slow_s *dbssp = (void *)dc; + struct dispatch_barrier_sync_slow2_s *dbss2p = dbssp->dc_ctxt; + return dbss2p->dbss2_sema; +} + +static struct dispatch_object_s * +_dispatch_queue_concurrent_drain_one(dispatch_queue_t dq) +{ + struct dispatch_object_s *head, *next, *const mediator = (void *)~0ul; + + // The mediator value acts both as a "lock" and a signal + head = dispatch_atomic_xchg2o(dq, dq_items_head, mediator); + + if (slowpath(head == NULL)) { + // The first xchg on the tail will tell the enqueueing thread that it + // is safe to blindly write out to the head pointer. A cmpxchg honors + // the algorithm. + (void)dispatch_atomic_cmpxchg2o(dq, dq_items_head, mediator, NULL); + _dispatch_debug("no work on global work queue"); + return NULL; + } + + if (slowpath(head == mediator)) { + // This thread lost the race for ownership of the queue. + // + // The ratio of work to libdispatch overhead must be bad. This + // scenario implies that there are too many threads in the pool. + // Create a new pending thread and then exit this thread. + // The kernel will grant a new thread when the load subsides. + _dispatch_debug("Contention on queue: %p", dq); + _dispatch_queue_wakeup_global(dq); +#if DISPATCH_PERF_MON + dispatch_atomic_inc(&_dispatch_bad_ratio); +#endif + return NULL; + } + + // Restore the head pointer to a sane value before returning. + // If 'next' is NULL, then this item _might_ be the last item. + next = fastpath(head->do_next); - do { - _dispatch_worker_thread2(context); - // we use 65 seconds in case there are any timers that run once a minute - } while (dispatch_semaphore_wait(qc->dgq_thread_mediator, dispatch_time(0, 65ull * NSEC_PER_SEC)) == 0); + if (slowpath(!next)) { + dq->dq_items_head = NULL; - dispatch_atomic_inc(&qc->dgq_thread_pool_size); - if (dq->dq_items_tail) { - _dispatch_queue_wakeup_global(dq); + if (dispatch_atomic_cmpxchg2o(dq, dq_items_tail, head, NULL)) { + // both head and tail are NULL now + goto out; + } + + // There must be a next item now. This thread won't wait long. + while (!(next = head->do_next)) { + _dispatch_hardware_pause(); + } } - return NULL; + dq->dq_items_head = next; + _dispatch_queue_wakeup_global(dq); +out: + return head; } -// 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol -void +#pragma mark - +#pragma mark dispatch_worker_thread + +// 6618342 Contact the team that owns the Instrument DTrace probe before +// renaming this symbol +static void _dispatch_worker_thread2(void *context) { struct dispatch_object_s *item; dispatch_queue_t dq = context; struct dispatch_root_queue_context_s *qc = dq->do_ctxt; + if (_dispatch_thread_getspecific(dispatch_queue_key)) { DISPATCH_CRASH("Premature thread recycling"); } @@ -1380,13 +2243,16 @@ _dispatch_worker_thread2(void *context) qc->dgq_pending = 0; #if DISPATCH_COCOA_COMPAT + (void)dispatch_atomic_inc(&_dispatch_worker_threads); // ensure that high-level memory management techniques do not leak/crash - dispatch_begin_thread_4GC(); + if (dispatch_begin_thread_4GC) { + dispatch_begin_thread_4GC(); + } void *pool = _dispatch_begin_NSAutoReleasePool(); #endif #if DISPATCH_PERF_MON - uint64_t start = mach_absolute_time(); + uint64_t start = _dispatch_absolute_time(); #endif while ((item = fastpath(_dispatch_queue_concurrent_drain_one(dq)))) { _dispatch_continuation_pop(item); @@ -1398,366 +2264,238 @@ _dispatch_worker_thread2(void *context) #if DISPATCH_COCOA_COMPAT _dispatch_end_NSAutoReleasePool(pool); dispatch_end_thread_4GC(); + if (!dispatch_atomic_dec(&_dispatch_worker_threads) && + dispatch_no_worker_threads_4GC) { + dispatch_no_worker_threads_4GC(); + } #endif _dispatch_thread_setspecific(dispatch_queue_key, NULL); _dispatch_force_cache_cleanup(); -} - -#if DISPATCH_PERF_MON -void -_dispatch_queue_merge_stats(uint64_t start) -{ - uint64_t avg, delta = mach_absolute_time() - start; - unsigned long count, bucket; - - count = (size_t)_dispatch_thread_getspecific(dispatch_bcounter_key); - _dispatch_thread_setspecific(dispatch_bcounter_key, NULL); - - if (count) { - avg = delta / count; - bucket = flsll(avg); - } else { - bucket = 0; - } - - // 64-bit counters on 32-bit require a lock or a queue - OSSpinLockLock(&_dispatch_stats_lock); - - _dispatch_stats[bucket].time_total += delta; - _dispatch_stats[bucket].count_total += count; - _dispatch_stats[bucket].thread_total++; - OSSpinLockUnlock(&_dispatch_stats_lock); } -#endif -size_t -dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf, size_t bufsiz) +#if DISPATCH_ENABLE_THREAD_POOL +// 6618342 Contact the team that owns the Instrument DTrace probe before +// renaming this symbol +static void * +_dispatch_worker_thread(void *context) { - return snprintf(buf, bufsiz, "parent = %p ", dq->do_targetq); -} + dispatch_queue_t dq = context; + struct dispatch_root_queue_context_s *qc = dq->do_ctxt; + sigset_t mask; + int r; -size_t -dispatch_queue_debug(dispatch_queue_t dq, char* buf, size_t bufsiz) -{ - size_t offset = 0; - offset += snprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", dq->dq_label, dq); - offset += dispatch_object_debug_attr(dq, &buf[offset], bufsiz - offset); - offset += dispatch_queue_debug_attr(dq, &buf[offset], bufsiz - offset); - offset += snprintf(&buf[offset], bufsiz - offset, "}"); - return offset; -} + // workaround tweaks the kernel workqueue does for us + r = sigfillset(&mask); + (void)dispatch_assume_zero(r); + r = _dispatch_pthread_sigmask(SIG_BLOCK, &mask, NULL); + (void)dispatch_assume_zero(r); -#if DISPATCH_DEBUG -void -dispatch_debug_queue(dispatch_queue_t dq, const char* str) { - if (fastpath(dq)) { - dispatch_debug(dq, "%s", str); - } else { - _dispatch_log("queue[NULL]: %s", str); - } -} -#endif + do { + _dispatch_worker_thread2(context); + // we use 65 seconds in case there are any timers that run once a minute + } while (dispatch_semaphore_wait(qc->dgq_thread_mediator, + dispatch_time(0, 65ull * NSEC_PER_SEC)) == 0); -#if DISPATCH_COCOA_COMPAT -void -_dispatch_main_queue_callback_4CF(mach_msg_header_t *msg __attribute__((unused))) -{ - if (main_q_is_draining) { - return; + (void)dispatch_atomic_inc2o(qc, dgq_thread_pool_size); + if (dq->dq_items_tail) { + _dispatch_queue_wakeup_global(dq); } - _dispatch_queue_set_mainq_drain_state(true); - _dispatch_queue_serial_drain_till_empty(&_dispatch_main_q); - _dispatch_queue_set_mainq_drain_state(false); -} - -mach_port_t -_dispatch_get_main_queue_port_4CF(void) -{ - dispatch_once_f(&_dispatch_main_q_port_pred, NULL, _dispatch_main_q_port_init); - return main_q_port; -} -#endif -static void -dispatch_queue_attr_dispose(dispatch_queue_attr_t attr) -{ - dispatch_queue_attr_set_finalizer(attr, NULL); - _dispatch_dispose(attr); + return NULL; } -static const struct dispatch_queue_attr_vtable_s dispatch_queue_attr_vtable = { - .do_type = DISPATCH_QUEUE_ATTR_TYPE, - .do_kind = "queue-attr", - .do_dispose = dispatch_queue_attr_dispose, -}; - -dispatch_queue_attr_t -dispatch_queue_attr_create(void) +int +_dispatch_pthread_sigmask(int how, sigset_t *set, sigset_t *oset) { - dispatch_queue_attr_t a = calloc(1, sizeof(struct dispatch_queue_attr_s)); - - if (a) { - a->do_vtable = &dispatch_queue_attr_vtable; - a->do_next = DISPATCH_OBJECT_LISTLESS; - a->do_ref_cnt = 1; - a->do_xref_cnt = 1; - a->do_targetq = _dispatch_get_root_queue(0, 0); - a->qa_flags = DISPATCH_QUEUE_OVERCOMMIT; - } - return a; -} + int r; -void -dispatch_queue_attr_set_flags(dispatch_queue_attr_t attr, uint64_t flags) -{ - dispatch_assert_zero(flags & ~DISPATCH_QUEUE_FLAGS_MASK); - attr->qa_flags = (unsigned long)flags & DISPATCH_QUEUE_FLAGS_MASK; -} - -void -dispatch_queue_attr_set_priority(dispatch_queue_attr_t attr, int priority) -{ - dispatch_debug_assert(attr, "NULL pointer"); - dispatch_debug_assert(priority <= DISPATCH_QUEUE_PRIORITY_HIGH && priority >= DISPATCH_QUEUE_PRIORITY_LOW, "Invalid priority"); + /* Workaround: 6269619 Not all signals can be delivered on any thread */ - if (priority > 0) { - priority = DISPATCH_QUEUE_PRIORITY_HIGH; - } else if (priority < 0) { - priority = DISPATCH_QUEUE_PRIORITY_LOW; - } + r = sigdelset(set, SIGILL); + (void)dispatch_assume_zero(r); + r = sigdelset(set, SIGTRAP); + (void)dispatch_assume_zero(r); +#if HAVE_DECL_SIGEMT + r = sigdelset(set, SIGEMT); + (void)dispatch_assume_zero(r); +#endif + r = sigdelset(set, SIGFPE); + (void)dispatch_assume_zero(r); + r = sigdelset(set, SIGBUS); + (void)dispatch_assume_zero(r); + r = sigdelset(set, SIGSEGV); + (void)dispatch_assume_zero(r); + r = sigdelset(set, SIGSYS); + (void)dispatch_assume_zero(r); + r = sigdelset(set, SIGPIPE); + (void)dispatch_assume_zero(r); - attr->qa_priority = priority; + return pthread_sigmask(how, set, oset); } - -void -dispatch_queue_attr_set_finalizer_f(dispatch_queue_attr_t attr, - void *context, dispatch_queue_finalizer_function_t finalizer) -{ -#ifdef __BLOCKS__ - if (attr->finalizer_func == (void*)_dispatch_call_block_and_release2) { - Block_release(attr->finalizer_ctxt); - } #endif - attr->finalizer_ctxt = context; - attr->finalizer_func = finalizer; -} - -#ifdef __BLOCKS__ -long -dispatch_queue_attr_set_finalizer(dispatch_queue_attr_t attr, - dispatch_queue_finalizer_t finalizer) -{ - void *ctxt; - dispatch_queue_finalizer_function_t func; - - if (finalizer) { - if (!(ctxt = Block_copy(finalizer))) { - return 1; - } - func = (void *)_dispatch_call_block_and_release2; - } else { - ctxt = NULL; - func = NULL; - } - dispatch_queue_attr_set_finalizer_f(attr, ctxt, func); +#pragma mark - +#pragma mark dispatch_main_queue - return 0; -} -#endif +static bool _dispatch_program_is_probably_callback_driven; +#if DISPATCH_COCOA_COMPAT static void -_dispatch_ccache_init(void *context __attribute__((unused))) -{ - _dispatch_ccache_zone = malloc_create_zone(0, 0); - dispatch_assert(_dispatch_ccache_zone); - malloc_set_zone_name(_dispatch_ccache_zone, "DispatchContinuations"); -} - -dispatch_continuation_t -_dispatch_continuation_alloc_from_heap(void) +_dispatch_main_q_port_init(void *ctxt DISPATCH_UNUSED) { - static dispatch_once_t pred; - dispatch_continuation_t dc; - - dispatch_once_f(&pred, NULL, _dispatch_ccache_init); - - while (!(dc = fastpath(malloc_zone_calloc(_dispatch_ccache_zone, 1, ROUND_UP_TO_CACHELINE_SIZE(sizeof(*dc)))))) { - sleep(1); - } + kern_return_t kr; - return dc; -} + kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, + &main_q_port); + DISPATCH_VERIFY_MIG(kr); + (void)dispatch_assume_zero(kr); + kr = mach_port_insert_right(mach_task_self(), main_q_port, main_q_port, + MACH_MSG_TYPE_MAKE_SEND); + DISPATCH_VERIFY_MIG(kr); + (void)dispatch_assume_zero(kr); -void -_dispatch_force_cache_cleanup(void) -{ - dispatch_continuation_t dc = _dispatch_thread_getspecific(dispatch_cache_key); - if (dc) { - _dispatch_thread_setspecific(dispatch_cache_key, NULL); - _dispatch_cache_cleanup2(dc); - } + _dispatch_program_is_probably_callback_driven = true; + _dispatch_safe_fork = false; } -DISPATCH_NOINLINE -void -_dispatch_cache_cleanup2(void *value) +mach_port_t +_dispatch_get_main_queue_port_4CF(void) { - dispatch_continuation_t dc, next_dc = value; - - while ((dc = next_dc)) { - next_dc = dc->do_next; - malloc_zone_free(_dispatch_ccache_zone, dc); - } + dispatch_once_f(&_dispatch_main_q_port_pred, NULL, + _dispatch_main_q_port_init); + return main_q_port; } -static char _dispatch_build[16]; +static bool main_q_is_draining; +// 6618342 Contact the team that owns the Instrument DTrace probe before +// renaming this symbol +DISPATCH_NOINLINE static void -_dispatch_bug_init(void *context __attribute__((unused))) +_dispatch_queue_set_mainq_drain_state(bool arg) { - int mib[] = { CTL_KERN, KERN_OSVERSION }; - size_t bufsz = sizeof(_dispatch_build); - - sysctl(mib, 2, _dispatch_build, &bufsz, NULL, 0); + main_q_is_draining = arg; } void -_dispatch_bug(size_t line, long val) +_dispatch_main_queue_callback_4CF(mach_msg_header_t *msg DISPATCH_UNUSED) { - static dispatch_once_t pred; - static void *last_seen; - void *ra = __builtin_return_address(0); - - dispatch_once_f(&pred, NULL, _dispatch_bug_init); - if (last_seen != ra) { - last_seen = ra; - _dispatch_log("BUG in libdispatch: %s - %lu - 0x%lx", _dispatch_build, line, val); + if (main_q_is_draining) { + return; } + _dispatch_queue_set_mainq_drain_state(true); + _dispatch_main_queue_drain(); + _dispatch_queue_set_mainq_drain_state(false); } -void -_dispatch_abort(size_t line, long val) -{ - _dispatch_bug(line, val); - abort(); -} - -void -_dispatch_log(const char *msg, ...) -{ - va_list ap; - - va_start(ap, msg); - - _dispatch_logv(msg, ap); - - va_end(ap); -} +#endif void -_dispatch_logv(const char *msg, va_list ap) +dispatch_main(void) { -#if DISPATCH_DEBUG - static FILE *logfile, *tmp; - char newbuf[strlen(msg) + 2]; - char path[PATH_MAX]; - - sprintf(newbuf, "%s\n", msg); - - if (!logfile) { - snprintf(path, sizeof(path), "/var/tmp/libdispatch.%d.log", getpid()); - tmp = fopen(path, "a"); - assert(tmp); - if (!dispatch_atomic_cmpxchg(&logfile, NULL, tmp)) { - fclose(tmp); - } else { - struct timeval tv; - gettimeofday(&tv, NULL); - fprintf(logfile, "=== log file opened for %s[%u] at %ld.%06u ===\n", - getprogname() ?: "", getpid(), tv.tv_sec, tv.tv_usec); - } +#if HAVE_PTHREAD_MAIN_NP + if (pthread_main_np()) { +#endif + _dispatch_program_is_probably_callback_driven = true; + pthread_exit(NULL); + DISPATCH_CRASH("pthread_exit() returned"); +#if HAVE_PTHREAD_MAIN_NP } - vfprintf(logfile, newbuf, ap); - fflush(logfile); -#else - vsyslog(LOG_NOTICE, msg, ap); + DISPATCH_CLIENT_CRASH("dispatch_main() must be called on the main thread"); #endif } -int -_dispatch_pthread_sigmask(int how, sigset_t *set, sigset_t *oset) +DISPATCH_NOINLINE DISPATCH_NORETURN +static void +_dispatch_sigsuspend(void) { - int r; - - /* Workaround: 6269619 Not all signals can be delivered on any thread */ - - r = sigdelset(set, SIGILL); - dispatch_assume_zero(r); - r = sigdelset(set, SIGTRAP); - dispatch_assume_zero(r); - r = sigdelset(set, SIGEMT); - dispatch_assume_zero(r); - r = sigdelset(set, SIGFPE); - dispatch_assume_zero(r); - r = sigdelset(set, SIGBUS); - dispatch_assume_zero(r); - r = sigdelset(set, SIGSEGV); - dispatch_assume_zero(r); - r = sigdelset(set, SIGSYS); - dispatch_assume_zero(r); - r = sigdelset(set, SIGPIPE); - dispatch_assume_zero(r); + static const sigset_t mask; - return pthread_sigmask(how, set, oset); +#if DISPATCH_COCOA_COMPAT + // Do not count the signal handling thread as a worker thread + (void)dispatch_atomic_dec(&_dispatch_worker_threads); +#endif + for (;;) { + sigsuspend(&mask); + } } -bool _dispatch_safe_fork = true; - -void -dispatch_atfork_prepare(void) +DISPATCH_NORETURN +static void +_dispatch_sig_thread(void *ctxt DISPATCH_UNUSED) { + // never returns, so burn bridges behind us + _dispatch_clear_stack(0); + _dispatch_sigsuspend(); } -void -dispatch_atfork_parent(void) +DISPATCH_NOINLINE +static void +_dispatch_queue_cleanup2(void) { -} + (void)dispatch_atomic_dec(&_dispatch_main_q.dq_running); -void -dispatch_atfork_child(void) -{ - void *crash = (void *)0x100; - size_t i; + if (dispatch_atomic_sub(&_dispatch_main_q.do_suspend_cnt, + DISPATCH_OBJECT_SUSPEND_LOCK) == 0) { + _dispatch_wakeup(&_dispatch_main_q); + } - if (_dispatch_safe_fork) { - return; + // overload the "probably" variable to mean that dispatch_main() or + // similar non-POSIX API was called + // this has to run before the DISPATCH_COCOA_COMPAT below + if (_dispatch_program_is_probably_callback_driven) { + dispatch_async_f(_dispatch_get_root_queue(0, false), NULL, + _dispatch_sig_thread); + sleep(1); // workaround 6778970 } - _dispatch_main_q.dq_items_head = crash; - _dispatch_main_q.dq_items_tail = crash; +#if DISPATCH_COCOA_COMPAT + dispatch_once_f(&_dispatch_main_q_port_pred, NULL, + _dispatch_main_q_port_init); - _dispatch_mgr_q.dq_items_head = crash; - _dispatch_mgr_q.dq_items_tail = crash; + mach_port_t mp = main_q_port; + kern_return_t kr; - for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { - _dispatch_root_queues[i].dq_items_head = crash; - _dispatch_root_queues[i].dq_items_tail = crash; + main_q_port = 0; + + if (mp) { + kr = mach_port_deallocate(mach_task_self(), mp); + DISPATCH_VERIFY_MIG(kr); + (void)dispatch_assume_zero(kr); + kr = mach_port_mod_refs(mach_task_self(), mp, MACH_PORT_RIGHT_RECEIVE, + -1); + DISPATCH_VERIFY_MIG(kr); + (void)dispatch_assume_zero(kr); } +#endif } -void -dispatch_init_pthread(pthread_t pthr __attribute__((unused))) +static void +_dispatch_queue_cleanup(void *ctxt) { + if (ctxt == &_dispatch_main_q) { + return _dispatch_queue_cleanup2(); + } + // POSIX defines that destructors are only called if 'ctxt' is non-null + DISPATCH_CRASH("Premature thread exit while a dispatch queue is running"); } +#pragma mark - +#pragma mark dispatch_manager_queue + +static unsigned int _dispatch_select_workaround; +static fd_set _dispatch_rfds; +static fd_set _dispatch_wfds; +static void **_dispatch_rfd_ptrs; +static void **_dispatch_wfd_ptrs; + static int _dispatch_kq; static void -_dispatch_get_kq_init(void *context __attribute__((unused))) +_dispatch_get_kq_init(void *context DISPATCH_UNUSED) { static const struct kevent kev = { .ident = 1, @@ -1766,15 +2504,18 @@ _dispatch_get_kq_init(void *context __attribute__((unused))) }; _dispatch_kq = kqueue(); + _dispatch_safe_fork = false; - // in case we fall back to select() - FD_SET(_dispatch_kq, &_dispatch_rfds); if (_dispatch_kq == -1) { - dispatch_assert_zero(errno); + DISPATCH_CLIENT_CRASH("kqueue() create failed: " + "probably out of file descriptors"); + } else if (dispatch_assume(_dispatch_kq < FD_SETSIZE)) { + // in case we fall back to select() + FD_SET(_dispatch_kq, &_dispatch_rfds); } - dispatch_assume_zero(kevent(_dispatch_kq, &kev, 1, NULL, 0, NULL)); + (void)dispatch_assume_zero(kevent(_dispatch_kq, &kev, 1, NULL, 0, NULL)); _dispatch_queue_push(_dispatch_mgr_q.do_targetq, &_dispatch_mgr_q); } @@ -1789,6 +2530,120 @@ _dispatch_get_kq(void) return _dispatch_kq; } +long +_dispatch_update_kq(const struct kevent *kev) +{ + struct kevent kev_copy = *kev; + // This ensures we don't get a pending kevent back while registering + // a new kevent + kev_copy.flags |= EV_RECEIPT; + + if (_dispatch_select_workaround && (kev_copy.flags & EV_DELETE)) { + // Only executed on manager queue + switch (kev_copy.filter) { + case EVFILT_READ: + if (kev_copy.ident < FD_SETSIZE && + FD_ISSET((int)kev_copy.ident, &_dispatch_rfds)) { + FD_CLR((int)kev_copy.ident, &_dispatch_rfds); + _dispatch_rfd_ptrs[kev_copy.ident] = 0; + (void)dispatch_atomic_dec(&_dispatch_select_workaround); + return 0; + } + break; + case EVFILT_WRITE: + if (kev_copy.ident < FD_SETSIZE && + FD_ISSET((int)kev_copy.ident, &_dispatch_wfds)) { + FD_CLR((int)kev_copy.ident, &_dispatch_wfds); + _dispatch_wfd_ptrs[kev_copy.ident] = 0; + (void)dispatch_atomic_dec(&_dispatch_select_workaround); + return 0; + } + break; + default: + break; + } + } + + int rval = kevent(_dispatch_get_kq(), &kev_copy, 1, &kev_copy, 1, NULL); + if (rval == -1) { + // If we fail to register with kevents, for other reasons aside from + // changelist elements. + (void)dispatch_assume_zero(errno); + //kev_copy.flags |= EV_ERROR; + //kev_copy.data = error; + return errno; + } + + // The following select workaround only applies to adding kevents + if ((kev->flags & (EV_DISABLE|EV_DELETE)) || + !(kev->flags & (EV_ADD|EV_ENABLE))) { + return 0; + } + + // Only executed on manager queue + switch (kev_copy.data) { + case 0: + return 0; + case EBADF: + break; + default: + // If an error occurred while registering with kevent, and it was + // because of a kevent changelist processing && the kevent involved + // either doing a read or write, it would indicate we were trying + // to register a /dev/* port; fall back to select + switch (kev_copy.filter) { + case EVFILT_READ: + if (dispatch_assume(kev_copy.ident < FD_SETSIZE)) { + if (!_dispatch_rfd_ptrs) { + _dispatch_rfd_ptrs = calloc(FD_SETSIZE, sizeof(void*)); + } + _dispatch_rfd_ptrs[kev_copy.ident] = kev_copy.udata; + FD_SET((int)kev_copy.ident, &_dispatch_rfds); + (void)dispatch_atomic_inc(&_dispatch_select_workaround); + _dispatch_debug("select workaround used to read fd %d: 0x%lx", + (int)kev_copy.ident, (long)kev_copy.data); + return 0; + } + break; + case EVFILT_WRITE: + if (dispatch_assume(kev_copy.ident < FD_SETSIZE)) { + if (!_dispatch_wfd_ptrs) { + _dispatch_wfd_ptrs = calloc(FD_SETSIZE, sizeof(void*)); + } + _dispatch_wfd_ptrs[kev_copy.ident] = kev_copy.udata; + FD_SET((int)kev_copy.ident, &_dispatch_wfds); + (void)dispatch_atomic_inc(&_dispatch_select_workaround); + _dispatch_debug("select workaround used to write fd %d: 0x%lx", + (int)kev_copy.ident, (long)kev_copy.data); + return 0; + } + break; + default: + // kevent error, _dispatch_source_merge_kevent() will handle it + _dispatch_source_drain_kevent(&kev_copy); + break; + } + break; + } + return kev_copy.data; +} + +static bool +_dispatch_mgr_wakeup(dispatch_queue_t dq) +{ + static const struct kevent kev = { + .ident = 1, + .filter = EVFILT_USER, + .fflags = NOTE_TRIGGER, + }; + + _dispatch_debug("waking up the _dispatch_mgr_q: %p", dq); + + _dispatch_update_kq(&kev); + + return false; +} + static void _dispatch_mgr_thread2(struct kevent *kev, size_t cnt) { @@ -1797,17 +2652,42 @@ _dispatch_mgr_thread2(struct kevent *kev, size_t cnt) for (i = 0; i < cnt; i++) { // EVFILT_USER isn't used by sources if (kev[i].filter == EVFILT_USER) { - // If _dispatch_mgr_thread2() ever is changed to return to the - // caller, then this should become _dispatch_queue_drain() - _dispatch_queue_serial_drain_till_empty(&_dispatch_mgr_q); + // If _dispatch_mgr_thread2() ever is changed to return to the + // caller, then this should become _dispatch_queue_drain() + _dispatch_queue_serial_drain_till_empty(&_dispatch_mgr_q); } else { _dispatch_source_drain_kevent(&kev[i]); } - } + } } -static dispatch_queue_t -_dispatch_mgr_invoke(dispatch_queue_t dq) +#if DISPATCH_USE_VM_PRESSURE && DISPATCH_USE_MALLOC_VM_PRESSURE_SOURCE +// VM Pressure source for malloc +static dispatch_source_t _dispatch_malloc_vm_pressure_source; + +static void +_dispatch_malloc_vm_pressure_handler(void *context DISPATCH_UNUSED) +{ + malloc_zone_pressure_relief(0,0); +} + +static void +_dispatch_malloc_vm_pressure_setup(void) +{ + _dispatch_malloc_vm_pressure_source = dispatch_source_create( + DISPATCH_SOURCE_TYPE_VM, 0, DISPATCH_VM_PRESSURE, + _dispatch_get_root_queue(0, true)); + dispatch_source_set_event_handler_f(_dispatch_malloc_vm_pressure_source, + _dispatch_malloc_vm_pressure_handler); + dispatch_resume(_dispatch_malloc_vm_pressure_source); +} +#else +#define _dispatch_malloc_vm_pressure_setup() +#endif + +DISPATCH_NOINLINE DISPATCH_NORETURN +static void +_dispatch_mgr_invoke(void) { static const struct timespec timeout_immediately = { 0, 0 }; struct timespec timeout; @@ -1815,84 +2695,111 @@ _dispatch_mgr_invoke(dispatch_queue_t dq) struct timeval sel_timeout, *sel_timeoutp; fd_set tmp_rfds, tmp_wfds; struct kevent kev[1]; - int k_cnt, k_err, i, r; + int k_cnt, err, i, r; - _dispatch_thread_setspecific(dispatch_queue_key, dq); + _dispatch_thread_setspecific(dispatch_queue_key, &_dispatch_mgr_q); +#if DISPATCH_COCOA_COMPAT + // Do not count the manager thread as a worker thread + (void)dispatch_atomic_dec(&_dispatch_worker_threads); +#endif + _dispatch_malloc_vm_pressure_setup(); for (;;) { _dispatch_run_timers(); timeoutp = _dispatch_get_next_timer_fire(&timeout); - + if (_dispatch_select_workaround) { FD_COPY(&_dispatch_rfds, &tmp_rfds); FD_COPY(&_dispatch_wfds, &tmp_wfds); if (timeoutp) { sel_timeout.tv_sec = timeoutp->tv_sec; - sel_timeout.tv_usec = (typeof(sel_timeout.tv_usec))(timeoutp->tv_nsec / 1000u); + sel_timeout.tv_usec = (typeof(sel_timeout.tv_usec)) + (timeoutp->tv_nsec / 1000u); sel_timeoutp = &sel_timeout; } else { sel_timeoutp = NULL; } - + r = select(FD_SETSIZE, &tmp_rfds, &tmp_wfds, NULL, sel_timeoutp); if (r == -1) { - if (errno != EBADF) { - dispatch_assume_zero(errno); + err = errno; + if (err != EBADF) { + if (err != EINTR) { + (void)dispatch_assume_zero(err); + } continue; } for (i = 0; i < FD_SETSIZE; i++) { if (i == _dispatch_kq) { continue; } - if (!FD_ISSET(i, &_dispatch_rfds) && !FD_ISSET(i, &_dispatch_wfds)) { + if (!FD_ISSET(i, &_dispatch_rfds) && !FD_ISSET(i, + &_dispatch_wfds)) { continue; } r = dup(i); if (r != -1) { close(r); } else { - FD_CLR(i, &_dispatch_rfds); - FD_CLR(i, &_dispatch_wfds); - _dispatch_rfd_ptrs[i] = 0; - _dispatch_wfd_ptrs[i] = 0; + if (FD_ISSET(i, &_dispatch_rfds)) { + FD_CLR(i, &_dispatch_rfds); + _dispatch_rfd_ptrs[i] = 0; + (void)dispatch_atomic_dec( + &_dispatch_select_workaround); + } + if (FD_ISSET(i, &_dispatch_wfds)) { + FD_CLR(i, &_dispatch_wfds); + _dispatch_wfd_ptrs[i] = 0; + (void)dispatch_atomic_dec( + &_dispatch_select_workaround); + } } } continue; } - + if (r > 0) { for (i = 0; i < FD_SETSIZE; i++) { if (i == _dispatch_kq) { continue; } if (FD_ISSET(i, &tmp_rfds)) { - FD_CLR(i, &_dispatch_rfds); // emulate EV_DISABLE - EV_SET(&kev[0], i, EVFILT_READ, EV_ADD|EV_ENABLE|EV_DISPATCH, 0, 1, _dispatch_rfd_ptrs[i]); + FD_CLR(i, &_dispatch_rfds); // emulate EV_DISABLE + EV_SET(&kev[0], i, EVFILT_READ, + EV_ADD|EV_ENABLE|EV_DISPATCH, 0, 1, + _dispatch_rfd_ptrs[i]); _dispatch_rfd_ptrs[i] = 0; + (void)dispatch_atomic_dec(&_dispatch_select_workaround); _dispatch_mgr_thread2(kev, 1); } if (FD_ISSET(i, &tmp_wfds)) { - FD_CLR(i, &_dispatch_wfds); // emulate EV_DISABLE - EV_SET(&kev[0], i, EVFILT_WRITE, EV_ADD|EV_ENABLE|EV_DISPATCH, 0, 1, _dispatch_wfd_ptrs[i]); + FD_CLR(i, &_dispatch_wfds); // emulate EV_DISABLE + EV_SET(&kev[0], i, EVFILT_WRITE, + EV_ADD|EV_ENABLE|EV_DISPATCH, 0, 1, + _dispatch_wfd_ptrs[i]); _dispatch_wfd_ptrs[i] = 0; + (void)dispatch_atomic_dec(&_dispatch_select_workaround); _dispatch_mgr_thread2(kev, 1); } } } - + timeoutp = &timeout_immediately; } - - k_cnt = kevent(_dispatch_kq, NULL, 0, kev, sizeof(kev) / sizeof(kev[0]), timeoutp); - k_err = errno; + + k_cnt = kevent(_dispatch_kq, NULL, 0, kev, sizeof(kev) / sizeof(kev[0]), + timeoutp); + err = errno; switch (k_cnt) { case -1: - if (k_err == EBADF) { + if (err == EBADF) { DISPATCH_CLIENT_CRASH("Do not close random Unix descriptors"); } - dispatch_assume_zero(k_err); + if (err != EINTR) { + (void)dispatch_assume_zero(err); + } continue; default: _dispatch_mgr_thread2(kev, (size_t)k_cnt); @@ -1902,179 +2809,13 @@ _dispatch_mgr_invoke(dispatch_queue_t dq) continue; } } - - return NULL; -} - -static bool -_dispatch_mgr_wakeup(dispatch_queue_t dq) -{ - static const struct kevent kev = { - .ident = 1, - .filter = EVFILT_USER, -#ifdef EV_TRIGGER - .flags = EV_TRIGGER, -#endif -#ifdef NOTE_TRIGGER - .fflags = NOTE_TRIGGER, -#endif - }; - - _dispatch_debug("waking up the _dispatch_mgr_q: %p", dq); - - _dispatch_update_kq(&kev); - - return false; -} - -void -_dispatch_update_kq(const struct kevent *kev) -{ - struct kevent kev_copy = *kev; - kev_copy.flags |= EV_RECEIPT; - - if (kev_copy.flags & EV_DELETE) { - switch (kev_copy.filter) { - case EVFILT_READ: - if (FD_ISSET((int)kev_copy.ident, &_dispatch_rfds)) { - FD_CLR((int)kev_copy.ident, &_dispatch_rfds); - _dispatch_rfd_ptrs[kev_copy.ident] = 0; - return; - } - case EVFILT_WRITE: - if (FD_ISSET((int)kev_copy.ident, &_dispatch_wfds)) { - FD_CLR((int)kev_copy.ident, &_dispatch_wfds); - _dispatch_wfd_ptrs[kev_copy.ident] = 0; - return; - } - default: - break; - } - } - - int rval = kevent(_dispatch_get_kq(), &kev_copy, 1, &kev_copy, 1, NULL); - if (rval == -1) { - // If we fail to register with kevents, for other reasons aside from - // changelist elements. - dispatch_assume_zero(errno); - //kev_copy.flags |= EV_ERROR; - //kev_copy.data = error; - return; - } - - // The following select workaround only applies to adding kevents - if (!(kev->flags & EV_ADD)) { - return; - } - - switch (kev_copy.data) { - case 0: - return; - case EBADF: - break; - default: - // If an error occurred while registering with kevent, and it was - // because of a kevent changelist processing && the kevent involved - // either doing a read or write, it would indicate we were trying - // to register a /dev/* port; fall back to select - switch (kev_copy.filter) { - case EVFILT_READ: - _dispatch_select_workaround = true; - FD_SET((int)kev_copy.ident, &_dispatch_rfds); - _dispatch_rfd_ptrs[kev_copy.ident] = kev_copy.udata; - break; - case EVFILT_WRITE: - _dispatch_select_workaround = true; - FD_SET((int)kev_copy.ident, &_dispatch_wfds); - _dispatch_wfd_ptrs[kev_copy.ident] = kev_copy.udata; - break; - default: - _dispatch_source_drain_kevent(&kev_copy); - break; - } - break; - } -} - -static const struct dispatch_queue_vtable_s _dispatch_queue_mgr_vtable = { - .do_type = DISPATCH_QUEUE_MGR_TYPE, - .do_kind = "mgr-queue", - .do_invoke = _dispatch_mgr_invoke, - .do_debug = dispatch_queue_debug, - .do_probe = _dispatch_mgr_wakeup, -}; - -// 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol -struct dispatch_queue_s _dispatch_mgr_q = { - .do_vtable = &_dispatch_queue_mgr_vtable, - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, - .do_targetq = &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_COUNT - 1], - - .dq_label = "com.apple.libdispatch-manager", - .dq_width = 1, - .dq_serialnum = 2, -}; - -const struct dispatch_queue_offsets_s dispatch_queue_offsets = { - .dqo_version = 3, - .dqo_label = offsetof(struct dispatch_queue_s, dq_label), - .dqo_label_size = sizeof(_dispatch_main_q.dq_label), - .dqo_flags = 0, - .dqo_flags_size = 0, - .dqo_width = offsetof(struct dispatch_queue_s, dq_width), - .dqo_width_size = sizeof(_dispatch_main_q.dq_width), - .dqo_serialnum = offsetof(struct dispatch_queue_s, dq_serialnum), - .dqo_serialnum_size = sizeof(_dispatch_main_q.dq_serialnum), - .dqo_running = offsetof(struct dispatch_queue_s, dq_running), - .dqo_running_size = sizeof(_dispatch_main_q.dq_running), -}; - -#ifdef __BLOCKS__ -void -dispatch_after(dispatch_time_t when, dispatch_queue_t queue, dispatch_block_t work) -{ - // test before the copy of the block - if (when == DISPATCH_TIME_FOREVER) { -#if DISPATCH_DEBUG - DISPATCH_CLIENT_CRASH("dispatch_after() called with 'when' == infinity"); -#endif - return; - } - dispatch_after_f(when, queue, _dispatch_Block_copy(work), _dispatch_call_block_and_release); } -#endif -DISPATCH_NOINLINE -void -dispatch_after_f(dispatch_time_t when, dispatch_queue_t queue, void *ctxt, void (*func)(void *)) +DISPATCH_NORETURN +static dispatch_queue_t +_dispatch_mgr_thread(dispatch_queue_t dq DISPATCH_UNUSED) { - uint64_t delta; - if (when == DISPATCH_TIME_FOREVER) { -#if DISPATCH_DEBUG - DISPATCH_CLIENT_CRASH("dispatch_after_f() called with 'when' == infinity"); -#endif - return; - } - - // this function can and should be optimized to not use a dispatch source -again: - delta = _dispatch_timeout(when); - if (delta == 0) { - return dispatch_async_f(queue, ctxt, func); - } - if (!dispatch_source_timer_create(DISPATCH_TIMER_INTERVAL, delta, 0, NULL, queue, ^(dispatch_source_t ds) { - long err_dom, err_val; - if ((err_dom = dispatch_source_get_error(ds, &err_val))) { - dispatch_assert(err_dom == DISPATCH_ERROR_DOMAIN_POSIX); - dispatch_assert(err_val == ECANCELED); - func(ctxt); - dispatch_release(ds); // MUST NOT be _dispatch_release() - } else { - dispatch_source_cancel(ds); - } - })) { - goto again; - } + // never returns, so burn bridges behind us & clear stack 2k ahead + _dispatch_clear_stack(2048); + _dispatch_mgr_invoke(); } diff --git a/src/queue_internal.h b/src/queue_internal.h index 05237c202..479ae6006 100644 --- a/src/queue_internal.h +++ b/src/queue_internal.h @@ -1,20 +1,20 @@ /* - * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * Copyright (c) 2008-2011 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ - * + * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * + * * @APPLE_APACHE_LICENSE_HEADER_END@ */ @@ -35,102 +35,176 @@ // If dc_vtable is less than 127, then the object is a continuation. // Otherwise, the object has a private layout and memory management rules. The // first two words must align with normal objects. -#define DISPATCH_CONTINUATION_HEADER(x) \ - const void * do_vtable; \ - struct x *volatile do_next; \ - dispatch_function_t dc_func; \ - void * dc_ctxt +#define DISPATCH_CONTINUATION_HEADER(x) \ + const void *do_vtable; \ + struct x *volatile do_next; \ + dispatch_function_t dc_func; \ + void *dc_ctxt -#define DISPATCH_OBJ_ASYNC_BIT 0x1 +#define DISPATCH_OBJ_ASYNC_BIT 0x1 #define DISPATCH_OBJ_BARRIER_BIT 0x2 -#define DISPATCH_OBJ_GROUP_BIT 0x4 +#define DISPATCH_OBJ_GROUP_BIT 0x4 +#define DISPATCH_OBJ_SYNC_SLOW_BIT 0x8 // vtables are pointers far away from the low page in memory -#define DISPATCH_OBJ_IS_VTABLE(x) ((unsigned long)(x)->do_vtable > 127ul) +#define DISPATCH_OBJ_IS_VTABLE(x) ((unsigned long)(x)->do_vtable > 127ul) struct dispatch_continuation_s { DISPATCH_CONTINUATION_HEADER(dispatch_continuation_s); - dispatch_group_t dc_group; - void * dc_data[3]; + dispatch_group_t dc_group; + void *dc_data[3]; }; typedef struct dispatch_continuation_s *dispatch_continuation_t; +struct dispatch_queue_attr_vtable_s { + DISPATCH_VTABLE_HEADER(dispatch_queue_attr_s); +}; + +struct dispatch_queue_attr_s { + DISPATCH_STRUCT_HEADER(dispatch_queue_attr_s, dispatch_queue_attr_vtable_s); +}; struct dispatch_queue_vtable_s { DISPATCH_VTABLE_HEADER(dispatch_queue_s); }; -#define DISPATCH_QUEUE_MIN_LABEL_SIZE 64 +#define DISPATCH_QUEUE_MIN_LABEL_SIZE 64 + +#ifdef __LP64__ +#define DISPATCH_QUEUE_CACHELINE_PAD 32 +#else +#define DISPATCH_QUEUE_CACHELINE_PAD 8 +#endif #define DISPATCH_QUEUE_HEADER \ - uint32_t dq_running; \ + uint32_t volatile dq_running; \ uint32_t dq_width; \ - struct dispatch_object_s *dq_items_tail; \ + struct dispatch_object_s *volatile dq_items_tail; \ struct dispatch_object_s *volatile dq_items_head; \ unsigned long dq_serialnum; \ - void *dq_finalizer_ctxt; \ - dispatch_queue_finalizer_function_t dq_finalizer_func + dispatch_queue_t dq_specific_q; struct dispatch_queue_s { DISPATCH_STRUCT_HEADER(dispatch_queue_s, dispatch_queue_vtable_s); DISPATCH_QUEUE_HEADER; - char dq_label[DISPATCH_QUEUE_MIN_LABEL_SIZE]; // must be last + char dq_label[DISPATCH_QUEUE_MIN_LABEL_SIZE]; // must be last + char _dq_pad[DISPATCH_QUEUE_CACHELINE_PAD]; // for static queues only }; extern struct dispatch_queue_s _dispatch_mgr_q; -void _dispatch_queue_init(dispatch_queue_t dq); -void _dispatch_queue_drain(dispatch_queue_t dq); void _dispatch_queue_dispose(dispatch_queue_t dq); +void _dispatch_queue_invoke(dispatch_queue_t dq); +void _dispatch_queue_push_list_slow(dispatch_queue_t dq, + struct dispatch_object_s *obj); -__attribute__((always_inline)) +DISPATCH_ALWAYS_INLINE static inline void -_dispatch_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head, dispatch_object_t _tail) +_dispatch_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head, + dispatch_object_t _tail) { struct dispatch_object_s *prev, *head = _head._do, *tail = _tail._do; tail->do_next = NULL; - prev = fastpath(dispatch_atomic_xchg(&dq->dq_items_tail, tail)); + dispatch_atomic_store_barrier(); + prev = fastpath(dispatch_atomic_xchg2o(dq, dq_items_tail, tail)); if (prev) { - // if we crash here with a value less than 0x1000, then we are at a known bug in client code - // for example, see _dispatch_queue_dispose or _dispatch_atfork_child + // if we crash here with a value less than 0x1000, then we are at a + // known bug in client code for example, see _dispatch_queue_dispose + // or _dispatch_atfork_child prev->do_next = head; } else { - dq->dq_items_head = head; - _dispatch_wakeup(dq); + _dispatch_queue_push_list_slow(dq, head); } } #define _dispatch_queue_push(x, y) _dispatch_queue_push_list((x), (y), (y)) -#define DISPATCH_QUEUE_PRIORITY_COUNT 3 - #if DISPATCH_DEBUG void dispatch_debug_queue(dispatch_queue_t dq, const char* str); #else -static inline void dispatch_debug_queue(dispatch_queue_t dq __attribute__((unused)), const char* str __attribute__((unused))) {} +static inline void dispatch_debug_queue(dispatch_queue_t dq DISPATCH_UNUSED, + const char* str DISPATCH_UNUSED) {} #endif size_t dispatch_queue_debug(dispatch_queue_t dq, char* buf, size_t bufsiz); -size_t dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf, size_t bufsiz); +size_t _dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf, + size_t bufsiz); +DISPATCH_ALWAYS_INLINE static inline dispatch_queue_t _dispatch_queue_get_current(void) { return _dispatch_thread_getspecific(dispatch_queue_key); } -__private_extern__ malloc_zone_t *_dispatch_ccache_zone; -dispatch_continuation_t _dispatch_continuation_alloc_from_heap(void); +#define DISPATCH_QUEUE_PRIORITY_COUNT 4 +#define DISPATCH_ROOT_QUEUE_COUNT (DISPATCH_QUEUE_PRIORITY_COUNT * 2) + +// overcommit priority index values need bit 1 set +enum { + DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY = 0, + DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY, + DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY, + DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY, + DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY, + DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY, + DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY, + DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY, +}; + +extern const struct dispatch_queue_attr_vtable_s dispatch_queue_attr_vtable; +extern const struct dispatch_queue_vtable_s _dispatch_queue_vtable; +extern unsigned long _dispatch_queue_serial_numbers; +extern struct dispatch_queue_s _dispatch_root_queues[]; -static inline dispatch_continuation_t -_dispatch_continuation_alloc_cacheonly(void) +DISPATCH_ALWAYS_INLINE DISPATCH_CONST +static inline dispatch_queue_t +_dispatch_get_root_queue(long priority, bool overcommit) { - dispatch_continuation_t dc = fastpath(_dispatch_thread_getspecific(dispatch_cache_key)); - if (dc) { - _dispatch_thread_setspecific(dispatch_cache_key, dc->do_next); + if (overcommit) switch (priority) { + case DISPATCH_QUEUE_PRIORITY_LOW: + return &_dispatch_root_queues[ + DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY]; + case DISPATCH_QUEUE_PRIORITY_DEFAULT: + return &_dispatch_root_queues[ + DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY]; + case DISPATCH_QUEUE_PRIORITY_HIGH: + return &_dispatch_root_queues[ + DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY]; + case DISPATCH_QUEUE_PRIORITY_BACKGROUND: + return &_dispatch_root_queues[ + DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY]; + } + switch (priority) { + case DISPATCH_QUEUE_PRIORITY_LOW: + return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY]; + case DISPATCH_QUEUE_PRIORITY_DEFAULT: + return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY]; + case DISPATCH_QUEUE_PRIORITY_HIGH: + return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY]; + case DISPATCH_QUEUE_PRIORITY_BACKGROUND: + return &_dispatch_root_queues[ + DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY]; + default: + return NULL; } - return dc; +} + +// Note to later developers: ensure that any initialization changes are +// made for statically allocated queues (i.e. _dispatch_main_q). +static inline void +_dispatch_queue_init(dispatch_queue_t dq) +{ + dq->do_vtable = &_dispatch_queue_vtable; + dq->do_next = DISPATCH_OBJECT_LISTLESS; + dq->do_ref_cnt = 1; + dq->do_xref_cnt = 1; + // Default target queue is overcommit! + dq->do_targetq = _dispatch_get_root_queue(0, true); + dq->dq_running = 0; + dq->dq_width = 1; + dq->dq_serialnum = dispatch_atomic_inc(&_dispatch_queue_serial_numbers) - 1; } #endif diff --git a/src/semaphore.c b/src/semaphore.c index 9e36d4db1..29585bdf5 100644 --- a/src/semaphore.c +++ b/src/semaphore.c @@ -1,40 +1,55 @@ /* - * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * Copyright (c) 2008-2011 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ - * + * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * + * * @APPLE_APACHE_LICENSE_HEADER_END@ */ #include "internal.h" // semaphores are too fundamental to use the dispatch_assume*() macros -#define DISPATCH_SEMAPHORE_VERIFY_KR(x) do { \ - if (x) { \ - DISPATCH_CRASH("flawed group/semaphore logic"); \ - } \ +#if USE_MACH_SEM +#define DISPATCH_SEMAPHORE_VERIFY_KR(x) do { \ + if (slowpath(x)) { \ + DISPATCH_CRASH("flawed group/semaphore logic"); \ + } \ + } while (0) +#elif USE_POSIX_SEM +#define DISPATCH_SEMAPHORE_VERIFY_RET(x) do { \ + if (slowpath((x) == -1)) { \ + DISPATCH_CRASH("flawed group/semaphore logic"); \ + } \ } while (0) +#endif -struct dispatch_semaphore_vtable_s { - DISPATCH_VTABLE_HEADER(dispatch_semaphore_s); -}; +DISPATCH_WEAK // rdar://problem/8503746 +long _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema); static void _dispatch_semaphore_dispose(dispatch_semaphore_t dsema); -static size_t _dispatch_semaphore_debug(dispatch_semaphore_t dsema, char *buf, size_t bufsiz); +static size_t _dispatch_semaphore_debug(dispatch_semaphore_t dsema, char *buf, + size_t bufsiz); static long _dispatch_group_wake(dispatch_semaphore_t dsema); +#pragma mark - +#pragma mark dispatch_semaphore_t + +struct dispatch_semaphore_vtable_s { + DISPATCH_VTABLE_HEADER(dispatch_semaphore_s); +}; + const struct dispatch_semaphore_vtable_s _dispatch_semaphore_vtable = { .do_type = DISPATCH_SEMAPHORE_TYPE, .do_kind = "semaphore", @@ -42,64 +57,39 @@ const struct dispatch_semaphore_vtable_s _dispatch_semaphore_vtable = { .do_debug = _dispatch_semaphore_debug, }; -dispatch_semaphore_t -_dispatch_get_thread_semaphore(void) -{ - dispatch_semaphore_t dsema; - - dsema = fastpath(_dispatch_thread_getspecific(dispatch_sema4_key)); - if (!dsema) { - while (!(dsema = dispatch_semaphore_create(0))) { - sleep(1); - } - } - _dispatch_thread_setspecific(dispatch_sema4_key, NULL); - return dsema; -} - -void -_dispatch_put_thread_semaphore(dispatch_semaphore_t dsema) -{ - dispatch_semaphore_t old_sema = _dispatch_thread_getspecific(dispatch_sema4_key); - _dispatch_thread_setspecific(dispatch_sema4_key, dsema); - if (old_sema) { - dispatch_release(old_sema); - } -} - -dispatch_group_t -dispatch_group_create(void) -{ - return (dispatch_group_t)dispatch_semaphore_create(LONG_MAX); -} - dispatch_semaphore_t dispatch_semaphore_create(long value) { dispatch_semaphore_t dsema; - + // If the internal value is negative, then the absolute of the value is // equal to the number of waiting threads. Therefore it is bogus to // initialize the semaphore with a negative value. if (value < 0) { return NULL; } - + dsema = calloc(1, sizeof(struct dispatch_semaphore_s)); - + if (fastpath(dsema)) { dsema->do_vtable = &_dispatch_semaphore_vtable; dsema->do_next = DISPATCH_OBJECT_LISTLESS; dsema->do_ref_cnt = 1; dsema->do_xref_cnt = 1; - dsema->do_targetq = dispatch_get_global_queue(0, 0); + dsema->do_targetq = dispatch_get_global_queue( + DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); dsema->dsema_value = value; dsema->dsema_orig = value; +#if USE_POSIX_SEM + int ret = sem_init(&dsema->dsema_sem, 0, 0); + DISPATCH_SEMAPHORE_VERIFY_RET(ret); +#endif } - + return dsema; } +#if USE_MACH_SEM static void _dispatch_semaphore_create_port(semaphore_t *s4) { @@ -109,19 +99,20 @@ _dispatch_semaphore_create_port(semaphore_t *s4) if (*s4) { return; } - + // lazily allocate the semaphore port - + // Someday: // 1) Switch to a doubly-linked FIFO in user-space. // 2) User-space timers for the timeout. // 3) Use the per-thread semaphore port. - - while (dispatch_assume_zero(kr = semaphore_create(mach_task_self(), &tmp, SYNC_POLICY_FIFO, 0))) { + + while ((kr = semaphore_create(mach_task_self(), &tmp, + SYNC_POLICY_FIFO, 0))) { DISPATCH_VERIFY_MIG(kr); sleep(1); } - + if (!dispatch_atomic_cmpxchg(s4, 0, tmp)) { kr = semaphore_destroy(mach_task_self(), tmp); DISPATCH_SEMAPHORE_VERIFY_KR(kr); @@ -129,30 +120,117 @@ _dispatch_semaphore_create_port(semaphore_t *s4) _dispatch_safe_fork = false; } +#endif + +static void +_dispatch_semaphore_dispose(dispatch_semaphore_t dsema) +{ + if (dsema->dsema_value < dsema->dsema_orig) { + DISPATCH_CLIENT_CRASH( + "Semaphore/group object deallocated while in use"); + } + +#if USE_MACH_SEM + kern_return_t kr; + if (dsema->dsema_port) { + kr = semaphore_destroy(mach_task_self(), dsema->dsema_port); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); + } + if (dsema->dsema_waiter_port) { + kr = semaphore_destroy(mach_task_self(), dsema->dsema_waiter_port); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); + } +#elif USE_POSIX_SEM + int ret = sem_destroy(&dsema->dsema_sem); + DISPATCH_SEMAPHORE_VERIFY_RET(ret); +#endif + + _dispatch_dispose(dsema); +} + +static size_t +_dispatch_semaphore_debug(dispatch_semaphore_t dsema, char *buf, size_t bufsiz) +{ + size_t offset = 0; + offset += snprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", + dx_kind(dsema), dsema); + offset += _dispatch_object_debug_attr(dsema, &buf[offset], bufsiz - offset); +#if USE_MACH_SEM + offset += snprintf(&buf[offset], bufsiz - offset, "port = 0x%u, ", + dsema->dsema_port); +#endif + offset += snprintf(&buf[offset], bufsiz - offset, + "value = %ld, orig = %ld }", dsema->dsema_value, dsema->dsema_orig); + return offset; +} + +DISPATCH_NOINLINE +long +_dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema) +{ + // Before dsema_sent_ksignals is incremented we can rely on the reference + // held by the waiter. However, once this value is incremented the waiter + // may return between the atomic increment and the semaphore_signal(), + // therefore an explicit reference must be held in order to safely access + // dsema after the atomic increment. + _dispatch_retain(dsema); + + (void)dispatch_atomic_inc2o(dsema, dsema_sent_ksignals); + +#if USE_MACH_SEM + _dispatch_semaphore_create_port(&dsema->dsema_port); + kern_return_t kr = semaphore_signal(dsema->dsema_port); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); +#elif USE_POSIX_SEM + int ret = sem_post(&dsema->dsema_sem); + DISPATCH_SEMAPHORE_VERIFY_RET(ret); +#endif + + _dispatch_release(dsema); + return 1; +} + +long +dispatch_semaphore_signal(dispatch_semaphore_t dsema) +{ + dispatch_atomic_release_barrier(); + long value = dispatch_atomic_inc2o(dsema, dsema_value); + if (fastpath(value > 0)) { + return 0; + } + if (slowpath(value == LONG_MIN)) { + DISPATCH_CLIENT_CRASH("Unbalanced call to dispatch_group_leave() or " + "dispatch_semaphore_signal()"); + } + return _dispatch_semaphore_signal_slow(dsema); +} DISPATCH_NOINLINE static long -_dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout) +_dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, + dispatch_time_t timeout) { - mach_timespec_t _timeout; - kern_return_t kr; - uint64_t nsec; long orig; - + again: - // Mach semaphores appear to sometimes spuriously wake up. Therefore, + // Mach semaphores appear to sometimes spuriously wake up. Therefore, // we keep a parallel count of the number of times a Mach semaphore is - // signaled. + // signaled (6880961). while ((orig = dsema->dsema_sent_ksignals)) { - if (dispatch_atomic_cmpxchg(&dsema->dsema_sent_ksignals, orig, orig - 1)) { + if (dispatch_atomic_cmpxchg2o(dsema, dsema_sent_ksignals, orig, + orig - 1)) { return 0; } } +#if USE_MACH_SEM + mach_timespec_t _timeout; + kern_return_t kr; + _dispatch_semaphore_create_port(&dsema->dsema_port); // From xnu/osfmk/kern/sync_sema.c: - // wait_semaphore->count = -1; /* we don't keep an actual count */ + // wait_semaphore->count = -1; /* we don't keep an actual count */ // // The code above does not match the documentation, and that fact is // not surprising. The documented semantics are clumsy to use in any @@ -162,8 +240,7 @@ _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeou switch (timeout) { default: do { - // timeout() already calculates relative time left - nsec = _dispatch_timeout(timeout); + uint64_t nsec = _dispatch_timeout(timeout); _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); kr = slowpath(semaphore_timedwait(dsema->dsema_port, _timeout)); @@ -173,10 +250,11 @@ _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeou DISPATCH_SEMAPHORE_VERIFY_KR(kr); break; } - // Fall through and try to undo what the fast path did to dsema->dsema_value + // Fall through and try to undo what the fast path did to + // dsema->dsema_value case DISPATCH_TIME_NOW: while ((orig = dsema->dsema_value) < 0) { - if (dispatch_atomic_cmpxchg(&dsema->dsema_value, orig, orig + 1)) { + if (dispatch_atomic_cmpxchg2o(dsema, dsema_value, orig, orig + 1)) { return KERN_OPERATION_TIMED_OUT; } } @@ -189,222 +267,186 @@ _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeou DISPATCH_SEMAPHORE_VERIFY_KR(kr); break; } +#elif USE_POSIX_SEM + struct timespec _timeout; + int ret; - goto again; -} + switch (timeout) { + default: + do { + uint64_t nsec = _dispatch_timeout(timeout); + _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); + _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); + ret = slowpath(sem_timedwait(&dsema->dsema_sem, &_timeout)); + } while (ret == -1 && errno == EINTR); -DISPATCH_NOINLINE -void -dispatch_group_enter(dispatch_group_t dg) -{ - dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg; -#if defined(__OPTIMIZE__) && defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__)) - // This assumes: - // 1) Way too much about the optimizer of GCC. - // 2) There will never be more than LONG_MAX threads. - // Therefore: no overflow detection - asm( -#ifdef __LP64__ - "lock decq %0\n\t" -#else - "lock decl %0\n\t" -#endif - "js 1f\n\t" - "xor %%eax, %%eax\n\t" - "ret\n\t" - "1:" - : "+m" (dsema->dsema_value) - : - : "cc" - ); - _dispatch_semaphore_wait_slow(dsema, DISPATCH_TIME_FOREVER); -#else - dispatch_semaphore_wait(dsema, DISPATCH_TIME_FOREVER); + if (ret == -1 && errno != ETIMEDOUT) { + DISPATCH_SEMAPHORE_VERIFY_RET(ret); + break; + } + // Fall through and try to undo what the fast path did to + // dsema->dsema_value + case DISPATCH_TIME_NOW: + while ((orig = dsema->dsema_value) < 0) { + if (dispatch_atomic_cmpxchg2o(dsema, dsema_value, orig, orig + 1)) { + errno = ETIMEDOUT; + return -1; + } + } + // Another thread called semaphore_signal(). + // Fall through and drain the wakeup. + case DISPATCH_TIME_FOREVER: + do { + ret = sem_wait(&dsema->dsema_sem); + } while (ret != 0); + DISPATCH_SEMAPHORE_VERIFY_RET(ret); + break; + } #endif + + goto again; } -DISPATCH_NOINLINE long dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout) { -#if defined(__OPTIMIZE__) && defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__)) - // This assumes: - // 1) Way too much about the optimizer of GCC. - // 2) There will never be more than LONG_MAX threads. - // Therefore: no overflow detection - asm( -#ifdef __LP64__ - "lock decq %0\n\t" -#else - "lock decl %0\n\t" -#endif - "js 1f\n\t" - "xor %%eax, %%eax\n\t" - "ret\n\t" - "1:" - : "+m" (dsema->dsema_value) - : - : "cc" - ); -#else - if (dispatch_atomic_dec(&dsema->dsema_value) >= 0) { + long value = dispatch_atomic_dec2o(dsema, dsema_value); + dispatch_atomic_acquire_barrier(); + if (fastpath(value >= 0)) { return 0; } -#endif return _dispatch_semaphore_wait_slow(dsema, timeout); } -DISPATCH_NOINLINE -static long -_dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema) -{ - kern_return_t kr; - - _dispatch_semaphore_create_port(&dsema->dsema_port); - - // Before dsema_sent_ksignals is incremented we can rely on the reference - // held by the waiter. However, once this value is incremented the waiter - // may return between the atomic increment and the semaphore_signal(), - // therefore an explicit reference must be held in order to safely access - // dsema after the atomic increment. - _dispatch_retain(dsema); - - dispatch_atomic_inc(&dsema->dsema_sent_ksignals); - - kr = semaphore_signal(dsema->dsema_port); - DISPATCH_SEMAPHORE_VERIFY_KR(kr); +#pragma mark - +#pragma mark dispatch_group_t - _dispatch_release(dsema); - - return 1; +dispatch_group_t +dispatch_group_create(void) +{ + return (dispatch_group_t)dispatch_semaphore_create(LONG_MAX); } void -dispatch_group_leave(dispatch_group_t dg) +dispatch_group_enter(dispatch_group_t dg) { dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg; - dispatch_semaphore_signal(dsema); - - if (dsema->dsema_value == dsema->dsema_orig) { - _dispatch_group_wake(dsema); - } -} - -DISPATCH_NOINLINE -long -dispatch_semaphore_signal(dispatch_semaphore_t dsema) -{ -#if defined(__OPTIMIZE__) && defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__)) - // overflow detection - // this assumes way too much about the optimizer of GCC - asm( -#ifdef __LP64__ - "lock incq %0\n\t" -#else - "lock incl %0\n\t" -#endif - "jo 1f\n\t" - "jle 2f\n\t" - "xor %%eax, %%eax\n\t" - "ret\n\t" - "1:\n\t" - "int $4\n\t" - "2:" - : "+m" (dsema->dsema_value) - : - : "cc" - ); -#else - if (dispatch_atomic_inc(&dsema->dsema_value) > 0) { - return 0; - } -#endif - return _dispatch_semaphore_signal_slow(dsema); + (void)dispatch_semaphore_wait(dsema, DISPATCH_TIME_FOREVER); } DISPATCH_NOINLINE -long +static long _dispatch_group_wake(dispatch_semaphore_t dsema) { - struct dispatch_sema_notify_s *tmp, *head = dispatch_atomic_xchg(&dsema->dsema_notify_head, NULL); - long rval = dispatch_atomic_xchg(&dsema->dsema_group_waiters, 0); - bool do_rel = head; - long kr; + struct dispatch_sema_notify_s *next, *head, *tail = NULL; + long rval; - // wake any "group" waiter or notify blocks - + head = dispatch_atomic_xchg2o(dsema, dsema_notify_head, NULL); + if (head) { + // snapshot before anything is notified/woken + tail = dispatch_atomic_xchg2o(dsema, dsema_notify_tail, NULL); + } + rval = dispatch_atomic_xchg2o(dsema, dsema_group_waiters, 0); if (rval) { + // wake group waiters +#if USE_MACH_SEM _dispatch_semaphore_create_port(&dsema->dsema_waiter_port); do { - kr = semaphore_signal(dsema->dsema_waiter_port); + kern_return_t kr = semaphore_signal(dsema->dsema_waiter_port); DISPATCH_SEMAPHORE_VERIFY_KR(kr); } while (--rval); - } - while (head) { - dispatch_async_f(head->dsn_queue, head->dsn_ctxt, head->dsn_func); - _dispatch_release(head->dsn_queue); +#elif USE_POSIX_SEM do { - tmp = head->dsn_next; - } while (!tmp && !dispatch_atomic_cmpxchg(&dsema->dsema_notify_tail, head, NULL)); - free(head); - head = tmp; + int ret = sem_post(&dsema->dsema_sem); + DISPATCH_SEMAPHORE_VERIFY_RET(ret); + } while (--rval); +#endif } - if (do_rel) { + if (head) { + // async group notify blocks + do { + dispatch_async_f(head->dsn_queue, head->dsn_ctxt, head->dsn_func); + _dispatch_release(head->dsn_queue); + next = fastpath(head->dsn_next); + if (!next && head != tail) { + while (!(next = fastpath(head->dsn_next))) { + _dispatch_hardware_pause(); + } + } + free(head); + } while ((head = next)); _dispatch_release(dsema); } return 0; } +void +dispatch_group_leave(dispatch_group_t dg) +{ + dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg; + + dispatch_semaphore_signal(dsema); + if (dsema->dsema_value == dsema->dsema_orig) { + (void)_dispatch_group_wake(dsema); + } +} + DISPATCH_NOINLINE static long _dispatch_group_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout) { - mach_timespec_t _timeout; - kern_return_t kr; - uint64_t nsec; long orig; - + again: - // check before we cause another signal to be sent by incrementing dsema->dsema_group_waiters + // check before we cause another signal to be sent by incrementing + // dsema->dsema_group_waiters if (dsema->dsema_value == dsema->dsema_orig) { return _dispatch_group_wake(dsema); } - // Mach semaphores appear to sometimes spuriously wake up. Therefore, + // Mach semaphores appear to sometimes spuriously wake up. Therefore, // we keep a parallel count of the number of times a Mach semaphore is - // signaled. - dispatch_atomic_inc(&dsema->dsema_group_waiters); + // signaled (6880961). + (void)dispatch_atomic_inc2o(dsema, dsema_group_waiters); // check the values again in case we need to wake any threads if (dsema->dsema_value == dsema->dsema_orig) { return _dispatch_group_wake(dsema); } +#if USE_MACH_SEM + mach_timespec_t _timeout; + kern_return_t kr; + _dispatch_semaphore_create_port(&dsema->dsema_waiter_port); - + // From xnu/osfmk/kern/sync_sema.c: - // wait_semaphore->count = -1; /* we don't keep an actual count */ + // wait_semaphore->count = -1; /* we don't keep an actual count */ // // The code above does not match the documentation, and that fact is // not surprising. The documented semantics are clumsy to use in any // practical way. The above hack effectively tricks the rest of the // Mach semaphore logic to behave like the libdispatch algorithm. - + switch (timeout) { default: do { - nsec = _dispatch_timeout(timeout); + uint64_t nsec = _dispatch_timeout(timeout); _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); - kr = slowpath(semaphore_timedwait(dsema->dsema_waiter_port, _timeout)); + kr = slowpath(semaphore_timedwait(dsema->dsema_waiter_port, + _timeout)); } while (kr == KERN_ABORTED); + if (kr != KERN_OPERATION_TIMED_OUT) { DISPATCH_SEMAPHORE_VERIFY_KR(kr); break; } - // Fall through and try to undo the earlier change to dsema->dsema_group_waiters + // Fall through and try to undo the earlier change to + // dsema->dsema_group_waiters case DISPATCH_TIME_NOW: while ((orig = dsema->dsema_group_waiters)) { - if (dispatch_atomic_cmpxchg(&dsema->dsema_group_waiters, orig, orig - 1)) { + if (dispatch_atomic_cmpxchg2o(dsema, dsema_group_waiters, orig, + orig - 1)) { return KERN_OPERATION_TIMED_OUT; } } @@ -417,6 +459,43 @@ _dispatch_group_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout) DISPATCH_SEMAPHORE_VERIFY_KR(kr); break; } +#elif USE_POSIX_SEM + struct timespec _timeout; + int ret; + + switch (timeout) { + default: + do { + uint64_t nsec = _dispatch_timeout(timeout); + _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); + _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); + ret = slowpath(sem_timedwait(&dsema->dsema_sem, &_timeout)); + } while (ret == -1 && errno == EINTR); + + if (!(ret == -1 && errno == ETIMEDOUT)) { + DISPATCH_SEMAPHORE_VERIFY_RET(ret); + break; + } + // Fall through and try to undo the earlier change to + // dsema->dsema_group_waiters + case DISPATCH_TIME_NOW: + while ((orig = dsema->dsema_group_waiters)) { + if (dispatch_atomic_cmpxchg2o(dsema, dsema_group_waiters, orig, + orig - 1)) { + errno = ETIMEDOUT; + return -1; + } + } + // Another thread called semaphore_signal(). + // Fall through and drain the wakeup. + case DISPATCH_TIME_FOREVER: + do { + ret = sem_wait(&dsema->dsema_sem); + } while (ret == -1 && errno == EINTR); + DISPATCH_SEMAPHORE_VERIFY_RET(ret); + break; + } +#endif goto again; } @@ -430,37 +509,35 @@ dispatch_group_wait(dispatch_group_t dg, dispatch_time_t timeout) return 0; } if (timeout == 0) { +#if USE_MACH_SEM return KERN_OPERATION_TIMED_OUT; +#elif USE_POSIX_SEM + errno = ETIMEDOUT; + return (-1); +#endif } return _dispatch_group_wait_slow(dsema, timeout); } -#ifdef __BLOCKS__ -void -dispatch_group_notify(dispatch_group_t dg, dispatch_queue_t dq, dispatch_block_t db) -{ - dispatch_group_notify_f(dg, dq, _dispatch_Block_copy(db), _dispatch_call_block_and_release); -} -#endif - +DISPATCH_NOINLINE void -dispatch_group_notify_f(dispatch_group_t dg, dispatch_queue_t dq, void *ctxt, void (*func)(void *)) +dispatch_group_notify_f(dispatch_group_t dg, dispatch_queue_t dq, void *ctxt, + void (*func)(void *)) { dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg; struct dispatch_sema_notify_s *dsn, *prev; // FIXME -- this should be updated to use the continuation cache - while (!(dsn = malloc(sizeof(*dsn)))) { + while (!(dsn = calloc(1, sizeof(*dsn)))) { sleep(1); } - dsn->dsn_next = NULL; dsn->dsn_queue = dq; dsn->dsn_ctxt = ctxt; dsn->dsn_func = func; _dispatch_retain(dq); - - prev = dispatch_atomic_xchg(&dsema->dsema_notify_tail, dsn); + dispatch_atomic_store_barrier(); + prev = dispatch_atomic_xchg2o(dsema, dsema_notify_tail, dsn); if (fastpath(prev)) { prev->dsn_next = dsn; } else { @@ -472,61 +549,108 @@ dispatch_group_notify_f(dispatch_group_t dg, dispatch_queue_t dq, void *ctxt, vo } } +#ifdef __BLOCKS__ void -_dispatch_semaphore_dispose(dispatch_semaphore_t dsema) +dispatch_group_notify(dispatch_group_t dg, dispatch_queue_t dq, + dispatch_block_t db) { + dispatch_group_notify_f(dg, dq, _dispatch_Block_copy(db), + _dispatch_call_block_and_release); +} +#endif + +#pragma mark - +#pragma mark _dispatch_thread_semaphore_t + +DISPATCH_NOINLINE +static _dispatch_thread_semaphore_t +_dispatch_thread_semaphore_create(void) +{ +#if USE_MACH_SEM + semaphore_t s4; kern_return_t kr; - - if (dsema->dsema_value < dsema->dsema_orig) { - DISPATCH_CLIENT_CRASH("Semaphore/group object deallocated while in use"); - } - - if (dsema->dsema_port) { - kr = semaphore_destroy(mach_task_self(), dsema->dsema_port); - DISPATCH_SEMAPHORE_VERIFY_KR(kr); - } - if (dsema->dsema_waiter_port) { - kr = semaphore_destroy(mach_task_self(), dsema->dsema_waiter_port); - DISPATCH_SEMAPHORE_VERIFY_KR(kr); + while (slowpath(kr = semaphore_create(mach_task_self(), &s4, + SYNC_POLICY_FIFO, 0))) { + DISPATCH_VERIFY_MIG(kr); + sleep(1); } - - _dispatch_dispose(dsema); + return s4; +#elif USE_POSIX_SEM + sem_t s4; + int ret = sem_init(&s4, 0, 0); + DISPATCH_SEMAPHORE_VERIFY_RET(ret); + return s4; +#endif } -size_t -_dispatch_semaphore_debug(dispatch_semaphore_t dsema, char *buf, size_t bufsiz) +DISPATCH_NOINLINE +void +_dispatch_thread_semaphore_dispose(_dispatch_thread_semaphore_t sema) { - size_t offset = 0; - offset += snprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", dx_kind(dsema), dsema); - offset += dispatch_object_debug_attr(dsema, &buf[offset], bufsiz - offset); - offset += snprintf(&buf[offset], bufsiz - offset, "port = 0x%u, value = %ld, orig = %ld }", - dsema->dsema_port, dsema->dsema_value, dsema->dsema_orig); - return offset; +#if USE_MACH_SEM + semaphore_t s4 = (semaphore_t)sema; + kern_return_t kr = semaphore_destroy(mach_task_self(), s4); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); +#elif USE_POSIX_SEM + sem_t s4 = (sem_t)sema; + int ret = sem_destroy(&s4); + DISPATCH_SEMAPHORE_VERIFY_RET(ret); +#endif } -#ifdef __BLOCKS__ void -dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq, dispatch_block_t db) +_dispatch_thread_semaphore_signal(_dispatch_thread_semaphore_t sema) { - dispatch_group_async_f(dg, dq, _dispatch_Block_copy(db), _dispatch_call_block_and_release); -} +#if USE_MACH_SEM + semaphore_t s4 = (semaphore_t)sema; + kern_return_t kr = semaphore_signal(s4); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); +#elif USE_POSIX_SEM + sem_t s4 = (sem_t)sema; + int ret = sem_post(&s4); + DISPATCH_SEMAPHORE_VERIFY_RET(ret); #endif +} -DISPATCH_NOINLINE void -dispatch_group_async_f(dispatch_group_t dg, dispatch_queue_t dq, void *ctxt, void (*func)(void *)) +_dispatch_thread_semaphore_wait(_dispatch_thread_semaphore_t sema) { - dispatch_continuation_t dc; - - _dispatch_retain(dg); - dispatch_group_enter(dg); - - dc = _dispatch_continuation_alloc_cacheonly() ?: _dispatch_continuation_alloc_from_heap(); +#if USE_MACH_SEM + semaphore_t s4 = (semaphore_t)sema; + kern_return_t kr; + do { + kr = semaphore_wait(s4); + } while (slowpath(kr == KERN_ABORTED)); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); +#elif USE_POSIX_SEM + sem_t s4 = (sem_t)sema; + int ret; + do { + ret = sem_wait(&s4); + } while (slowpath(ret != 0)); + DISPATCH_SEMAPHORE_VERIFY_RET(ret); +#endif +} - dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT|DISPATCH_OBJ_GROUP_BIT); - dc->dc_func = func; - dc->dc_ctxt = ctxt; - dc->dc_group = dg; +_dispatch_thread_semaphore_t +_dispatch_get_thread_semaphore(void) +{ + _dispatch_thread_semaphore_t sema = (_dispatch_thread_semaphore_t) + _dispatch_thread_getspecific(dispatch_sema4_key); + if (slowpath(!sema)) { + return _dispatch_thread_semaphore_create(); + } + _dispatch_thread_setspecific(dispatch_sema4_key, NULL); + return sema; +} - _dispatch_queue_push(dq, dc); +void +_dispatch_put_thread_semaphore(_dispatch_thread_semaphore_t sema) +{ + _dispatch_thread_semaphore_t old_sema = (_dispatch_thread_semaphore_t) + _dispatch_thread_getspecific(dispatch_sema4_key); + _dispatch_thread_setspecific(dispatch_sema4_key, (void*)sema); + if (slowpath(old_sema)) { + return _dispatch_thread_semaphore_dispose(old_sema); + } } diff --git a/src/semaphore_internal.h b/src/semaphore_internal.h index 3af28c06b..e5b319e91 100644 --- a/src/semaphore_internal.h +++ b/src/semaphore_internal.h @@ -1,20 +1,20 @@ /* - * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * Copyright (c) 2008-2011 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ - * + * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * + * * @APPLE_APACHE_LICENSE_HEADER_END@ */ @@ -28,7 +28,7 @@ #define __DISPATCH_SEMAPHORE_INTERNAL__ struct dispatch_sema_notify_s { - struct dispatch_sema_notify_s *dsn_next; + struct dispatch_sema_notify_s *volatile dsn_next; dispatch_queue_t dsn_queue; void *dsn_ctxt; void (*dsn_func)(void *); @@ -39,8 +39,16 @@ struct dispatch_semaphore_s { long dsema_value; long dsema_orig; size_t dsema_sent_ksignals; +#if USE_MACH_SEM && USE_POSIX_SEM +#error "Too many supported semaphore types" +#elif USE_MACH_SEM semaphore_t dsema_port; semaphore_t dsema_waiter_port; +#elif USE_POSIX_SEM + sem_t dsema_sem; +#else +#error "No supported semaphore type" +#endif size_t dsema_group_waiters; struct dispatch_sema_notify_s *dsema_notify_head; struct dispatch_sema_notify_s *dsema_notify_tail; @@ -48,4 +56,11 @@ struct dispatch_semaphore_s { extern const struct dispatch_semaphore_vtable_s _dispatch_semaphore_vtable; +typedef uintptr_t _dispatch_thread_semaphore_t; +_dispatch_thread_semaphore_t _dispatch_get_thread_semaphore(void); +void _dispatch_put_thread_semaphore(_dispatch_thread_semaphore_t); +void _dispatch_thread_semaphore_wait(_dispatch_thread_semaphore_t); +void _dispatch_thread_semaphore_signal(_dispatch_thread_semaphore_t); +void _dispatch_thread_semaphore_dispose(_dispatch_thread_semaphore_t); + #endif diff --git a/src/shims.c b/src/shims.c deleted file mode 100644 index a02d4535e..000000000 --- a/src/shims.c +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright (c) 2008-2009 Apple Inc. All rights reserved. - * - * @APPLE_APACHE_LICENSE_HEADER_START@ - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * @APPLE_APACHE_LICENSE_HEADER_END@ - */ - -#include "internal.h" - -void * -dispatch_mach_msg_get_context(mach_msg_header_t *msg) -{ - mach_msg_context_trailer_t *tp; - void *context = NULL; - - tp = (mach_msg_context_trailer_t *)((uint8_t *)msg + round_msg(msg->msgh_size)); - if (tp->msgh_trailer_size >= (mach_msg_size_t)sizeof(mach_msg_context_trailer_t)) { - context = (void *)(uintptr_t)tp->msgh_context; - } - - return context; -} - -/* - * Raw Mach message support - */ -boolean_t -_dispatch_machport_callback(mach_msg_header_t *msg, mach_msg_header_t *reply, - void (*callback)(mach_msg_header_t *)) -{ - mig_reply_setup(msg, reply); - ((mig_reply_error_t*)reply)->RetCode = MIG_NO_REPLY; - - callback(msg); - - return TRUE; -} - -/* - * CFMachPort compatibility - */ -boolean_t -_dispatch_CFMachPortCallBack(mach_msg_header_t *msg, mach_msg_header_t *reply, - void (*callback)(struct __CFMachPort *, void *msg, signed long size, void *)) -{ - mig_reply_setup(msg, reply); - ((mig_reply_error_t*)reply)->RetCode = MIG_NO_REPLY; - - callback(NULL, msg, msg->msgh_size, dispatch_mach_msg_get_context(msg)); - - return TRUE; -} diff --git a/src/shims.h b/src/shims.h new file mode 100644 index 000000000..73322bea6 --- /dev/null +++ b/src/shims.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_OS_SHIMS__ +#define __DISPATCH_OS_SHIMS__ + +#include +#if HAVE_PTHREAD_WORKQUEUES +#include +#endif +#if HAVE_PTHREAD_NP_H +#include +#endif + +#if !HAVE_DECL_FD_COPY +#define FD_COPY(f, t) (void)(*(t) = *(f)) +#endif + +#if !HAVE_NORETURN_BUILTIN_TRAP +/* + * XXXRW: Work-around for possible clang bug in which __builtin_trap() is not + * marked noreturn, leading to a build error as dispatch_main() *is* marked + * noreturn. Mask by marking __builtin_trap() as noreturn locally. + */ +DISPATCH_NORETURN +void __builtin_trap(void); +#endif + +#include "shims/atomic.h" +#include "shims/tsd.h" +#include "shims/hw_config.h" +#include "shims/perfmon.h" + +#include "shims/getprogname.h" +#include "shims/malloc_zone.h" +#include "shims/time.h" + +#ifdef __APPLE__ +// Clear the stack before calling long-running thread-handler functions that +// never return (and don't take arguments), to facilitate leak detection and +// provide cleaner backtraces. +#define _dispatch_clear_stack(s) do { \ + void *a[(s)/sizeof(void*) ? (s)/sizeof(void*) : 1]; \ + a[0] = pthread_get_stackaddr_np(pthread_self()); \ + bzero((void*)&a[1], a[0] - (void*)&a[1]); \ + } while (0) +#else +#define _dispatch_clear_stack(s) +#endif + +#endif diff --git a/src/shims/atomic.h b/src/shims/atomic.h new file mode 100644 index 000000000..fbc11717f --- /dev/null +++ b/src/shims/atomic.h @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_SHIMS_ATOMIC__ +#define __DISPATCH_SHIMS_ATOMIC__ + +/* x86 & cortex-a8 have a 64 byte cacheline */ +#define DISPATCH_CACHELINE_SIZE 64 +#define ROUND_UP_TO_CACHELINE_SIZE(x) \ + (((x) + (DISPATCH_CACHELINE_SIZE - 1)) & ~(DISPATCH_CACHELINE_SIZE - 1)) +#define ROUND_UP_TO_VECTOR_SIZE(x) \ + (((x) + 15) & ~15) +#define DISPATCH_CACHELINE_ALIGN \ + __attribute__((__aligned__(DISPATCH_CACHELINE_SIZE))) + +#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2) + +#define _dispatch_atomic_barrier() __sync_synchronize() +// see comment in dispatch_once.c +#define dispatch_atomic_maximally_synchronizing_barrier() \ + _dispatch_atomic_barrier() +// assume atomic builtins provide barriers +#define dispatch_atomic_barrier() +#define dispatch_atomic_acquire_barrier() +#define dispatch_atomic_release_barrier() +#define dispatch_atomic_store_barrier() + +#define _dispatch_hardware_pause() asm("") +#define _dispatch_debugger() asm("trap") + +#define dispatch_atomic_cmpxchg(p, e, n) \ + __sync_bool_compare_and_swap((p), (e), (n)) +#if __has_builtin(__sync_swap) +#define dispatch_atomic_xchg(p, n) \ + ((typeof(*(p)))__sync_swap((p), (n))) +#else +#define dispatch_atomic_xchg(p, n) \ + ((typeof(*(p)))__sync_lock_test_and_set((p), (n))) +#endif +#define dispatch_atomic_add(p, v) __sync_add_and_fetch((p), (v)) +#define dispatch_atomic_sub(p, v) __sync_sub_and_fetch((p), (v)) +#define dispatch_atomic_or(p, v) __sync_fetch_and_or((p), (v)) +#define dispatch_atomic_and(p, v) __sync_fetch_and_and((p), (v)) + +#define dispatch_atomic_inc(p) dispatch_atomic_add((p), 1) +#define dispatch_atomic_dec(p) dispatch_atomic_sub((p), 1) +// really just a low level abort() +#define _dispatch_hardware_crash() __builtin_trap() + +#define dispatch_atomic_cmpxchg2o(p, f, e, n) \ + dispatch_atomic_cmpxchg(&(p)->f, (e), (n)) +#define dispatch_atomic_xchg2o(p, f, n) \ + dispatch_atomic_xchg(&(p)->f, (n)) +#define dispatch_atomic_add2o(p, f, v) \ + dispatch_atomic_add(&(p)->f, (v)) +#define dispatch_atomic_sub2o(p, f, v) \ + dispatch_atomic_sub(&(p)->f, (v)) +#define dispatch_atomic_or2o(p, f, v) \ + dispatch_atomic_or(&(p)->f, (v)) +#define dispatch_atomic_and2o(p, f, v) \ + dispatch_atomic_and(&(p)->f, (v)) +#define dispatch_atomic_inc2o(p, f) \ + dispatch_atomic_add2o((p), f, 1) +#define dispatch_atomic_dec2o(p, f) \ + dispatch_atomic_sub2o((p), f, 1) + +#else +#error "Please upgrade to GCC 4.2 or newer." +#endif + +#if defined(__x86_64__) || defined(__i386__) + +// GCC emits nothing for __sync_synchronize() on x86_64 & i386 +#undef _dispatch_atomic_barrier +#define _dispatch_atomic_barrier() \ + __asm__ __volatile__( \ + "mfence" \ + : : : "memory") +#undef dispatch_atomic_maximally_synchronizing_barrier +#ifdef __LP64__ +#define dispatch_atomic_maximally_synchronizing_barrier() \ + do { unsigned long _clbr; __asm__ __volatile__( \ + "cpuid" \ + : "=a" (_clbr) : "0" (0) : "rbx", "rcx", "rdx", "cc", "memory" \ + ); } while(0) +#else +#ifdef __llvm__ +#define dispatch_atomic_maximally_synchronizing_barrier() \ + do { unsigned long _clbr; __asm__ __volatile__( \ + "cpuid" \ + : "=a" (_clbr) : "0" (0) : "ebx", "ecx", "edx", "cc", "memory" \ + ); } while(0) +#else // gcc does not allow inline i386 asm to clobber ebx +#define dispatch_atomic_maximally_synchronizing_barrier() \ + do { unsigned long _clbr; __asm__ __volatile__( \ + "pushl %%ebx\n\t" \ + "cpuid\n\t" \ + "popl %%ebx" \ + : "=a" (_clbr) : "0" (0) : "ecx", "edx", "cc", "memory" \ + ); } while(0) +#endif +#endif +#undef _dispatch_hardware_pause +#define _dispatch_hardware_pause() asm("pause") +#undef _dispatch_debugger +#define _dispatch_debugger() asm("int3") + +#elif defined(__ppc__) || defined(__ppc64__) + +// GCC emits "sync" for __sync_synchronize() on ppc & ppc64 +#undef _dispatch_atomic_barrier +#ifdef __LP64__ +#define _dispatch_atomic_barrier() \ + __asm__ __volatile__( \ + "isync\n\t" \ + "lwsync" + : : : "memory") +#else +#define _dispatch_atomic_barrier() \ + __asm__ __volatile__( \ + "isync\n\t" \ + "eieio" \ + : : : "memory") +#endif +#undef dispatch_atomic_maximally_synchronizing_barrier +#define dispatch_atomic_maximally_synchronizing_barrier() \ + __asm__ __volatile__( \ + "sync" \ + : : : "memory") + +#endif + + +#endif // __DISPATCH_SHIMS_ATOMIC__ diff --git a/src/shims/getprogname.h b/src/shims/getprogname.h new file mode 100644 index 000000000..74aba1318 --- /dev/null +++ b/src/shims/getprogname.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2009-2010 Mark Heily + * All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __DISPATCH_SHIMS_GETPROGNAME__ +#define __DISPATCH_SHIMS_GETPROGNAME__ + +#if !HAVE_GETPROGNAME +static inline char * +getprogname(void) +{ +# if HAVE_DECL_PROGRAM_INVOCATION_SHORT_NAME + return program_invocation_short_name; +# else +# error getprogname(3) is not available on this platform +# endif +} +#endif /* HAVE_GETPROGNAME */ + +#endif /* __DISPATCH_SHIMS_GETPROGNAME__ */ diff --git a/src/shims/hw_config.h b/src/shims/hw_config.h new file mode 100644 index 000000000..2d9975910 --- /dev/null +++ b/src/shims/hw_config.h @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2011 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_SHIMS_HW_CONFIG__ +#define __DISPATCH_SHIMS_HW_CONFIG__ + +#if defined(__APPLE__) +#define DISPATCH_SYSCTL_LOGICAL_CPUS "hw.logicalcpu_max" +#define DISPATCH_SYSCTL_PHYSICAL_CPUS "hw.physicalcpu_max" +#define DISPATCH_SYSCTL_ACTIVE_CPUS "hw.activecpu" +#elif defined(__FreeBSD__) +#define DISPATCH_SYSCTL_LOGICAL_CPUS "kern.smp.cpus" +#define DISPATCH_SYSCTL_PHYSICAL_CPUS "kern.smp.cpus" +#define DISPATCH_SYSCTL_ACTIVE_CPUS "kern.smp.cpus" +#endif + +static inline uint32_t +_dispatch_get_logicalcpu_max() +{ + uint32_t val = 1; +#if defined(_COMM_PAGE_LOGICAL_CPUS) + uint8_t* u8val = (uint8_t*)(uintptr_t)_COMM_PAGE_LOGICAL_CPUS; + val = (uint32_t)*u8val; +#elif defined(DISPATCH_SYSCTL_LOGICAL_CPUS) + size_t valsz = sizeof(val); + int ret = sysctlbyname(DISPATCH_SYSCTL_LOGICAL_CPUS, + &val, &valsz, NULL, 0); + (void)dispatch_assume_zero(ret); + (void)dispatch_assume(valsz == sizeof(uint32_t)); +#elif HAVE_SYSCONF && defined(_SC_NPROCESSORS_ONLN) + int ret = (int)sysconf(_SC_NPROCESSORS_ONLN); + val = ret < 0 ? 1 : ret; +#else +#warning "no supported way to query logical CPU count" +#endif + return val; +} + +static inline uint32_t +_dispatch_get_physicalcpu_max() +{ + uint32_t val = 1; +#if defined(_COMM_PAGE_PHYSICAL_CPUS) + uint8_t* u8val = (uint8_t*)(uintptr_t)_COMM_PAGE_PHYSICAL_CPUS; + val = (uint32_t)*u8val; +#elif defined(DISPATCH_SYSCTL_PHYSICAL_CPUS) + size_t valsz = sizeof(val); + int ret = sysctlbyname(DISPATCH_SYSCTL_LOGICAL_CPUS, + &val, &valsz, NULL, 0); + (void)dispatch_assume_zero(ret); + (void)dispatch_assume(valsz == sizeof(uint32_t)); +#elif HAVE_SYSCONF && defined(_SC_NPROCESSORS_ONLN) + int ret = (int)sysconf(_SC_NPROCESSORS_ONLN); + val = ret < 0 ? 1 : ret; +#else +#warning "no supported way to query physical CPU count" +#endif + return val; +} + +static inline uint32_t +_dispatch_get_activecpu() +{ + uint32_t val = 1; +#if defined(_COMM_PAGE_ACTIVE_CPUS) + uint8_t* u8val = (uint8_t*)(uintptr_t)_COMM_PAGE_ACTIVE_CPUS; + val = (uint32_t)*u8val; +#elif defined(DISPATCH_SYSCTL_ACTIVE_CPUS) + size_t valsz = sizeof(val); + int ret = sysctlbyname(DISPATCH_SYSCTL_ACTIVE_CPUS, + &val, &valsz, NULL, 0); + (void)dispatch_assume_zero(ret); + (void)dispatch_assume(valsz == sizeof(uint32_t)); +#elif HAVE_SYSCONF && defined(_SC_NPROCESSORS_ONLN) + int ret = (int)sysconf(_SC_NPROCESSORS_ONLN); + val = ret < 0 ? 1 : ret; +#else +#warning "no supported way to query active CPU count" +#endif + return val; +} + +#endif /* __DISPATCH_SHIMS_HW_CONFIG__ */ diff --git a/src/shims/malloc_zone.h b/src/shims/malloc_zone.h new file mode 100644 index 000000000..3975b4feb --- /dev/null +++ b/src/shims/malloc_zone.h @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2009 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __DISPATCH_SHIMS_MALLOC_ZONE__ +#define __DISPATCH_SHIMS_MALLOC_ZONE__ + +#include + +#include + +/* + * Implement malloc zones as a simple wrapper around malloc(3) on systems + * that don't support them. + */ +#if !HAVE_MALLOC_CREATE_ZONE +typedef void * malloc_zone_t; + +static inline malloc_zone_t * +malloc_create_zone(size_t start_size, unsigned flags) +{ + + return ((void *)(-1)); +} + +static inline void +malloc_destroy_zone(malloc_zone_t *zone) +{ + +} + +static inline malloc_zone_t * +malloc_default_zone(void) +{ + + return ((void *)(-1)); +} + +static inline malloc_zone_t * +malloc_zone_from_ptr(const void *ptr) +{ + + return ((void *)(-1)); +} + +static inline void * +malloc_zone_malloc(malloc_zone_t *zone, size_t size) +{ + + return (malloc(size)); +} + +static inline void * +malloc_zone_calloc(malloc_zone_t *zone, size_t num_items, size_t size) +{ + + return (calloc(num_items, size)); +} + +static inline void * +malloc_zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) +{ + + return (realloc(ptr, size)); +} + +static inline void +malloc_zone_free(malloc_zone_t *zone, void *ptr) +{ + + free(ptr); +} + +static inline void +malloc_set_zone_name(malloc_zone_t *zone, const char *name) +{ + + /* No-op. */ +} +#endif + +#endif /* __DISPATCH_SHIMS_MALLOC_ZONE__ */ diff --git a/src/shims/perfmon.h b/src/shims/perfmon.h new file mode 100644 index 000000000..bf5eb2808 --- /dev/null +++ b/src/shims/perfmon.h @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_SHIMS_PERFMON__ +#define __DISPATCH_SHIMS_PERFMON__ + +#if DISPATCH_PERF_MON + +#if defined (USE_APPLE_TSD_OPTIMIZATIONS) && defined(SIMULATE_5491082) && \ + (defined(__i386__) || defined(__x86_64__)) +#ifdef __LP64__ +#define _dispatch_workitem_inc() asm("incq %%gs:%0" : "+m" \ + (*(void **)(dispatch_bcounter_key * sizeof(void *) + \ + _PTHREAD_TSD_OFFSET)) :: "cc") +#define _dispatch_workitem_dec() asm("decq %%gs:%0" : "+m" \ + (*(void **)(dispatch_bcounter_key * sizeof(void *) + \ + _PTHREAD_TSD_OFFSET)) :: "cc") +#else +#define _dispatch_workitem_inc() asm("incl %%gs:%0" : "+m" \ + (*(void **)(dispatch_bcounter_key * sizeof(void *) + \ + _PTHREAD_TSD_OFFSET)) :: "cc") +#define _dispatch_workitem_dec() asm("decl %%gs:%0" : "+m" \ + (*(void **)(dispatch_bcounter_key * sizeof(void *) + \ + _PTHREAD_TSD_OFFSET)) :: "cc") +#endif +#else /* !USE_APPLE_TSD_OPTIMIZATIONS */ +static inline void +_dispatch_workitem_inc(void) +{ + unsigned long cnt; + cnt = (unsigned long)_dispatch_thread_getspecific(dispatch_bcounter_key); + _dispatch_thread_setspecific(dispatch_bcounter_key, (void *)++cnt); +} +static inline void +_dispatch_workitem_dec(void) +{ + unsigned long cnt; + cnt = (unsigned long)_dispatch_thread_getspecific(dispatch_bcounter_key); + _dispatch_thread_setspecific(dispatch_bcounter_key, (void *)--cnt); +} +#endif /* USE_APPLE_TSD_OPTIMIZATIONS */ + +// C99 doesn't define flsll() or ffsll() +#ifdef __LP64__ +#define flsll(x) flsl(x) +#else +static inline unsigned int +flsll(uint64_t val) +{ + union { + struct { +#ifdef __BIG_ENDIAN__ + unsigned int hi, low; +#else + unsigned int low, hi; +#endif + } words; + uint64_t word; + } _bucket = { + .word = val, + }; + if (_bucket.words.hi) { + return fls(_bucket.words.hi) + 32; + } + return fls(_bucket.words.low); +} +#endif + +#else +#define _dispatch_workitem_inc() +#define _dispatch_workitem_dec() +#endif // DISPATCH_PERF_MON + +#endif diff --git a/src/shims/time.h b/src/shims/time.h new file mode 100644 index 000000000..9ae9160ca --- /dev/null +++ b/src/shims/time.h @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_SHIMS_TIME__ +#define __DISPATCH_SHIMS_TIME__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#endif + +uint64_t _dispatch_get_nanoseconds(void); + +#if defined(__i386__) || defined(__x86_64__) || !HAVE_MACH_ABSOLUTE_TIME +// x86 currently implements mach time in nanoseconds +// this is NOT likely to change +#define _dispatch_time_mach2nano(x) ({x;}) +#define _dispatch_time_nano2mach(x) ({x;}) +#else +typedef struct _dispatch_host_time_data_s { + long double frac; + bool ratio_1_to_1; + dispatch_once_t pred; +} _dispatch_host_time_data_s; +extern _dispatch_host_time_data_s _dispatch_host_time_data; +void _dispatch_get_host_time_init(void *context); + +static inline uint64_t +_dispatch_time_mach2nano(uint64_t machtime) +{ + _dispatch_host_time_data_s *const data = &_dispatch_host_time_data; + dispatch_once_f(&data->pred, NULL, _dispatch_get_host_time_init); + + return machtime * data->frac; +} + +static inline int64_t +_dispatch_time_nano2mach(int64_t nsec) +{ + _dispatch_host_time_data_s *const data = &_dispatch_host_time_data; + dispatch_once_f(&data->pred, NULL, _dispatch_get_host_time_init); + + if (slowpath(_dispatch_host_time_data.ratio_1_to_1)) { + return nsec; + } + + long double big_tmp = nsec; + + // Divide by tbi.numer/tbi.denom to convert nsec to Mach absolute time + big_tmp /= data->frac; + + // Clamp to a 64bit signed int + if (slowpath(big_tmp > INT64_MAX)) { + return INT64_MAX; + } + if (slowpath(big_tmp < INT64_MIN)) { + return INT64_MIN; + } + return big_tmp; +} +#endif + +static inline uint64_t +_dispatch_absolute_time(void) +{ +#if !HAVE_MACH_ABSOLUTE_TIME + struct timespec ts; + int ret; + +#if HAVE_DECL_CLOCK_UPTIME + ret = clock_gettime(CLOCK_UPTIME, &ts); +#elif HAVE_DECL_CLOCK_MONOTONIC + ret = clock_gettime(CLOCK_MONOTONIC, &ts); +#else +#error "clock_gettime: no supported absolute time clock" +#endif + (void)dispatch_assume_zero(ret); + + /* XXXRW: Some kind of overflow detection needed? */ + return (ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec); +#else + return mach_absolute_time(); +#endif +} + +#endif diff --git a/src/shims/tsd.h b/src/shims/tsd.h new file mode 100644 index 000000000..b8c6640b7 --- /dev/null +++ b/src/shims/tsd.h @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_SHIMS_TSD__ +#define __DISPATCH_SHIMS_TSD__ + +#if HAVE_PTHREAD_MACHDEP_H +#include +#endif + +#define DISPATCH_TSD_INLINE DISPATCH_ALWAYS_INLINE_NDEBUG + +#if USE_APPLE_TSD_OPTIMIZATIONS && HAVE_PTHREAD_KEY_INIT_NP && \ + !defined(DISPATCH_USE_DIRECT_TSD) +#define DISPATCH_USE_DIRECT_TSD 1 +#endif + +#if DISPATCH_USE_DIRECT_TSD +static const unsigned long dispatch_queue_key = __PTK_LIBDISPATCH_KEY0; +static const unsigned long dispatch_sema4_key = __PTK_LIBDISPATCH_KEY1; +static const unsigned long dispatch_cache_key = __PTK_LIBDISPATCH_KEY2; +static const unsigned long dispatch_io_key = __PTK_LIBDISPATCH_KEY3; +static const unsigned long dispatch_apply_key = __PTK_LIBDISPATCH_KEY4; +static const unsigned long dispatch_bcounter_key = __PTK_LIBDISPATCH_KEY5; +//__PTK_LIBDISPATCH_KEY5 + +DISPATCH_TSD_INLINE +static inline void +_dispatch_thread_key_create(const unsigned long *k, void (*d)(void *)) +{ + dispatch_assert_zero(pthread_key_init_np((int)*k, d)); +} +#else +pthread_key_t dispatch_queue_key; +pthread_key_t dispatch_sema4_key; +pthread_key_t dispatch_cache_key; +pthread_key_t dispatch_io_key; +pthread_key_t dispatch_apply_key; +pthread_key_t dispatch_bcounter_key; + +DISPATCH_TSD_INLINE +static inline void +_dispatch_thread_key_create(pthread_key_t *k, void (*d)(void *)) +{ + dispatch_assert_zero(pthread_key_create(k, d)); +} +#endif + +#if DISPATCH_USE_TSD_BASE && !DISPATCH_DEBUG +#else // DISPATCH_USE_TSD_BASE +DISPATCH_TSD_INLINE +static inline void +_dispatch_thread_setspecific(pthread_key_t k, void *v) +{ +#if DISPATCH_USE_DIRECT_TSD + if (_pthread_has_direct_tsd()) { + (void)_pthread_setspecific_direct(k, v); + return; + } +#endif + dispatch_assert_zero(pthread_setspecific(k, v)); +} + +DISPATCH_TSD_INLINE +static inline void * +_dispatch_thread_getspecific(pthread_key_t k) +{ +#if DISPATCH_USE_DIRECT_TSD + if (_pthread_has_direct_tsd()) { + return _pthread_getspecific_direct(k); + } +#endif + return pthread_getspecific(k); +} +#endif // DISPATCH_USE_TSD_BASE + +#define _dispatch_thread_self (uintptr_t)pthread_self + +#undef DISPATCH_TSD_INLINE + +#endif diff --git a/src/source.c b/src/source.c index 7259b0b0d..cf612aacf 100644 --- a/src/source.c +++ b/src/source.c @@ -1,189 +1,203 @@ /* - * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * Copyright (c) 2008-2011 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ - * + * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * + * * @APPLE_APACHE_LICENSE_HEADER_END@ */ #include "internal.h" +#if HAVE_MACH #include "protocol.h" #include "protocolServer.h" +#endif #include -#define DISPATCH_EVFILT_TIMER (-EVFILT_SYSCOUNT - 1) -#define DISPATCH_EVFILT_CUSTOM_ADD (-EVFILT_SYSCOUNT - 2) -#define DISPATCH_EVFILT_CUSTOM_OR (-EVFILT_SYSCOUNT - 3) -#define DISPATCH_EVFILT_SYSCOUNT (EVFILT_SYSCOUNT + 3) - -#define DISPATCH_TIMER_INDEX_WALL 0 -#define DISPATCH_TIMER_INDEX_MACH 1 -static struct dispatch_kevent_s _dispatch_kevent_timer[] = { - { - .dk_kevent = { - .ident = DISPATCH_TIMER_INDEX_WALL, - .filter = DISPATCH_EVFILT_TIMER, - .udata = &_dispatch_kevent_timer[0], - }, - .dk_sources = TAILQ_HEAD_INITIALIZER(_dispatch_kevent_timer[0].dk_sources), - }, - { - .dk_kevent = { - .ident = DISPATCH_TIMER_INDEX_MACH, - .filter = DISPATCH_EVFILT_TIMER, - .udata = &_dispatch_kevent_timer[1], - }, - .dk_sources = TAILQ_HEAD_INITIALIZER(_dispatch_kevent_timer[1].dk_sources), - }, -}; -#define DISPATCH_TIMER_COUNT (sizeof _dispatch_kevent_timer / sizeof _dispatch_kevent_timer[0]) - -static struct dispatch_kevent_s _dispatch_kevent_data_or = { - .dk_kevent = { - .filter = DISPATCH_EVFILT_CUSTOM_OR, - .flags = EV_CLEAR, - .udata = &_dispatch_kevent_data_or, - }, - .dk_sources = TAILQ_HEAD_INITIALIZER(_dispatch_kevent_data_or.dk_sources), -}; -static struct dispatch_kevent_s _dispatch_kevent_data_add = { - .dk_kevent = { - .filter = DISPATCH_EVFILT_CUSTOM_ADD, - .udata = &_dispatch_kevent_data_add, - }, - .dk_sources = TAILQ_HEAD_INITIALIZER(_dispatch_kevent_data_add.dk_sources), -}; - -#ifndef DISPATCH_NO_LEGACY -struct dispatch_source_attr_vtable_s { - DISPATCH_VTABLE_HEADER(dispatch_source_attr_s); -}; - -struct dispatch_source_attr_s { - DISPATCH_STRUCT_HEADER(dispatch_source_attr_s, dispatch_source_attr_vtable_s); - void* finalizer_ctxt; - dispatch_source_finalizer_function_t finalizer_func; - void* context; -}; -#endif /* DISPATCH_NO_LEGACY */ - -#define _dispatch_source_call_block ((void *)-1) -static void _dispatch_source_latch_and_call(dispatch_source_t ds); -static void _dispatch_source_cancel_callout(dispatch_source_t ds); -static bool _dispatch_source_probe(dispatch_source_t ds); static void _dispatch_source_dispose(dispatch_source_t ds); -static void _dispatch_source_merge_kevent(dispatch_source_t ds, const struct kevent *ke); -static size_t _dispatch_source_debug(dispatch_source_t ds, char* buf, size_t bufsiz); -static size_t dispatch_source_debug_attr(dispatch_source_t ds, char* buf, size_t bufsiz); static dispatch_queue_t _dispatch_source_invoke(dispatch_source_t ds); - -static void _dispatch_kevent_merge(dispatch_source_t ds); -static void _dispatch_kevent_release(dispatch_source_t ds); -static void _dispatch_kevent_resume(dispatch_kevent_t dk, uint32_t new_flags, uint32_t del_flags); -static void _dispatch_kevent_machport_resume(dispatch_kevent_t dk, uint32_t new_flags, uint32_t del_flags); -static void _dispatch_kevent_machport_enable(dispatch_kevent_t dk); -static void _dispatch_kevent_machport_disable(dispatch_kevent_t dk); - -static void _dispatch_drain_mach_messages(struct kevent *ke); +static bool _dispatch_source_probe(dispatch_source_t ds); +static void _dispatch_source_merge_kevent(dispatch_source_t ds, + const struct kevent *ke); +static void _dispatch_kevent_register(dispatch_source_t ds); +static void _dispatch_kevent_unregister(dispatch_source_t ds); +static bool _dispatch_kevent_resume(dispatch_kevent_t dk, uint32_t new_flags, + uint32_t del_flags); +static inline void _dispatch_source_timer_init(void); static void _dispatch_timer_list_update(dispatch_source_t ds); +static inline unsigned long _dispatch_source_timer_data( + dispatch_source_refs_t dr, unsigned long prev); +#if HAVE_MACH +static kern_return_t _dispatch_kevent_machport_resume(dispatch_kevent_t dk, + uint32_t new_flags, uint32_t del_flags); +static void _dispatch_drain_mach_messages(struct kevent *ke); +#endif +static size_t _dispatch_source_kevent_debug(dispatch_source_t ds, + char* buf, size_t bufsiz); +#if DISPATCH_DEBUG +static void _dispatch_kevent_debugger(void *context); +#endif -static void -_dispatch_mach_notify_source_init(void *context __attribute__((unused))); +#pragma mark - +#pragma mark dispatch_source_t -static const char * -_evfiltstr(short filt) +const struct dispatch_source_vtable_s _dispatch_source_kevent_vtable = { + .do_type = DISPATCH_SOURCE_KEVENT_TYPE, + .do_kind = "kevent-source", + .do_invoke = _dispatch_source_invoke, + .do_dispose = _dispatch_source_dispose, + .do_probe = _dispatch_source_probe, + .do_debug = _dispatch_source_kevent_debug, +}; + +dispatch_source_t +dispatch_source_create(dispatch_source_type_t type, + uintptr_t handle, + unsigned long mask, + dispatch_queue_t q) { - switch (filt) { -#define _evfilt2(f) case (f): return #f - _evfilt2(EVFILT_READ); - _evfilt2(EVFILT_WRITE); - _evfilt2(EVFILT_AIO); - _evfilt2(EVFILT_VNODE); - _evfilt2(EVFILT_PROC); - _evfilt2(EVFILT_SIGNAL); - _evfilt2(EVFILT_TIMER); - _evfilt2(EVFILT_MACHPORT); - _evfilt2(EVFILT_FS); - _evfilt2(EVFILT_USER); - _evfilt2(EVFILT_SESSION); + const struct kevent *proto_kev = &type->ke; + dispatch_source_t ds = NULL; + dispatch_kevent_t dk = NULL; - _evfilt2(DISPATCH_EVFILT_TIMER); - _evfilt2(DISPATCH_EVFILT_CUSTOM_ADD); - _evfilt2(DISPATCH_EVFILT_CUSTOM_OR); + // input validation + if (type == NULL || (mask & ~type->mask)) { + goto out_bad; + } + + switch (type->ke.filter) { + case EVFILT_SIGNAL: + if (handle >= NSIG) { + goto out_bad; + } + break; + case EVFILT_FS: +#if DISPATCH_USE_VM_PRESSURE + case EVFILT_VM: +#endif + case DISPATCH_EVFILT_CUSTOM_ADD: + case DISPATCH_EVFILT_CUSTOM_OR: + case DISPATCH_EVFILT_TIMER: + if (handle) { + goto out_bad; + } + break; default: - return "EVFILT_missing"; + break; } -} -#define DSL_HASH_SIZE 256u // must be a power of two -#define DSL_HASH(x) ((x) & (DSL_HASH_SIZE - 1)) + ds = calloc(1ul, sizeof(struct dispatch_source_s)); + if (slowpath(!ds)) { + goto out_bad; + } + dk = calloc(1ul, sizeof(struct dispatch_kevent_s)); + if (slowpath(!dk)) { + goto out_bad; + } -static TAILQ_HEAD(, dispatch_kevent_s) _dispatch_sources[DSL_HASH_SIZE]; + dk->dk_kevent = *proto_kev; + dk->dk_kevent.ident = handle; + dk->dk_kevent.flags |= EV_ADD|EV_ENABLE; + dk->dk_kevent.fflags |= (uint32_t)mask; + dk->dk_kevent.udata = dk; + TAILQ_INIT(&dk->dk_sources); -static dispatch_kevent_t -_dispatch_kevent_find(uintptr_t ident, short filter) -{ - uintptr_t hash = DSL_HASH(filter == EVFILT_MACHPORT ? MACH_PORT_INDEX(ident) : ident); - dispatch_kevent_t dki; + // Initialize as a queue first, then override some settings below. + _dispatch_queue_init((dispatch_queue_t)ds); + strlcpy(ds->dq_label, "source", sizeof(ds->dq_label)); - TAILQ_FOREACH(dki, &_dispatch_sources[hash], dk_list) { - if (dki->dk_kevent.ident == ident && dki->dk_kevent.filter == filter) { - break; + // Dispatch Object + ds->do_vtable = &_dispatch_source_kevent_vtable; + ds->do_ref_cnt++; // the reference the manger queue holds + ds->do_ref_cnt++; // since source is created suspended + ds->do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_INTERVAL; + // The initial target queue is the manager queue, in order to get + // the source installed. + ds->do_targetq = &_dispatch_mgr_q; + + // Dispatch Source + ds->ds_ident_hack = dk->dk_kevent.ident; + ds->ds_dkev = dk; + ds->ds_pending_data_mask = dk->dk_kevent.fflags; + if ((EV_DISPATCH|EV_ONESHOT) & proto_kev->flags) { + ds->ds_is_level = true; + ds->ds_needs_rearm = true; + } else if (!(EV_CLEAR & proto_kev->flags)) { + // we cheat and use EV_CLEAR to mean a "flag thingy" + ds->ds_is_adder = true; + } + + // Some sources require special processing + if (type->init != NULL) { + type->init(ds, type, handle, mask, q); + } + if (fastpath(!ds->ds_refs)) { + ds->ds_refs = calloc(1ul, sizeof(struct dispatch_source_refs_s)); + if (slowpath(!ds->ds_refs)) { + goto out_bad; } } - return dki; + ds->ds_refs->dr_source_wref = _dispatch_ptr2wref(ds); + dispatch_assert(!(ds->ds_is_level && ds->ds_is_adder)); + + // First item on the queue sets the user-specified target queue + dispatch_set_target_queue(ds, q); +#if DISPATCH_DEBUG + dispatch_debug(ds, "%s", __FUNCTION__); +#endif + return ds; + +out_bad: + free(ds); + free(dk); + return NULL; } static void -_dispatch_kevent_insert(dispatch_kevent_t dk) +_dispatch_source_dispose(dispatch_source_t ds) { - uintptr_t ident = dk->dk_kevent.ident; - uintptr_t hash = DSL_HASH(dk->dk_kevent.filter == EVFILT_MACHPORT ? MACH_PORT_INDEX(ident) : ident); - - TAILQ_INSERT_TAIL(&_dispatch_sources[hash], dk, dk_list); + free(ds->ds_refs); + _dispatch_queue_dispose((dispatch_queue_t)ds); } void -dispatch_source_cancel(dispatch_source_t ds) +_dispatch_source_xref_release(dispatch_source_t ds) { -#if DISPATCH_DEBUG - dispatch_debug(ds, __FUNCTION__); -#endif - dispatch_atomic_or(&ds->ds_atomic_flags, DSF_CANCELED); + if (slowpath(DISPATCH_OBJECT_SUSPENDED(ds))) { + // Arguments for and against this assert are within 6705399 + DISPATCH_CLIENT_CRASH("Release of a suspended object"); + } _dispatch_wakeup(ds); + _dispatch_release(ds); } -#ifndef DISPATCH_NO_LEGACY void -_dispatch_source_legacy_xref_release(dispatch_source_t ds) +dispatch_source_cancel(dispatch_source_t ds) { - if (ds->ds_is_legacy) { - if (!(ds->ds_timer.flags & DISPATCH_TIMER_ONESHOT)) { - dispatch_source_cancel(ds); - } +#if DISPATCH_DEBUG + dispatch_debug(ds, "%s", __FUNCTION__); +#endif + // Right after we set the cancel flag, someone else + // could potentially invoke the source, do the cancelation, + // unregister the source, and deallocate it. We would + // need to therefore retain/release before setting the bit - // Clients often leave sources suspended at the last release - dispatch_atomic_and(&ds->do_suspend_cnt, DISPATCH_OBJECT_SUSPEND_LOCK); - } else if (slowpath(DISPATCH_OBJECT_SUSPENDED(ds))) { - // Arguments for and against this assert are within 6705399 - DISPATCH_CLIENT_CRASH("Release of a suspended object"); - } + _dispatch_retain(ds); + (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_CANCELED); _dispatch_wakeup(ds); _dispatch_release(ds); } -#endif /* DISPATCH_NO_LEGACY */ long dispatch_source_testcancel(dispatch_source_t ds) @@ -210,113 +224,291 @@ dispatch_source_get_data(dispatch_source_t ds) return ds->ds_data; } -#if DISPATCH_DEBUG void -dispatch_debug_kevents(struct kevent* kev, size_t count, const char* str) +dispatch_source_merge_data(dispatch_source_t ds, unsigned long val) { - size_t i; - for (i = 0; i < count; ++i) { - _dispatch_log("kevent[%lu] = { ident = %p, filter = %s, flags = 0x%x, fflags = 0x%x, data = %p, udata = %p }: %s", - i, (void*)kev[i].ident, _evfiltstr(kev[i].filter), kev[i].flags, kev[i].fflags, (void*)kev[i].data, (void*)kev[i].udata, str); + struct kevent kev = { + .fflags = (typeof(kev.fflags))val, + .data = val, + }; + + dispatch_assert( + ds->ds_dkev->dk_kevent.filter == DISPATCH_EVFILT_CUSTOM_ADD || + ds->ds_dkev->dk_kevent.filter == DISPATCH_EVFILT_CUSTOM_OR); + + _dispatch_source_merge_kevent(ds, &kev); +} + +#pragma mark - +#pragma mark dispatch_source_handler + +#ifdef __BLOCKS__ +// 6618342 Contact the team that owns the Instrument DTrace probe before +// renaming this symbol +static void +_dispatch_source_set_event_handler2(void *context) +{ + struct Block_layout *bl = context; + + dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current(); + dispatch_assert(ds->do_vtable == &_dispatch_source_kevent_vtable); + dispatch_source_refs_t dr = ds->ds_refs; + + if (ds->ds_handler_is_block && dr->ds_handler_ctxt) { + Block_release(dr->ds_handler_ctxt); } + dr->ds_handler_func = bl ? (void *)bl->invoke : NULL; + dr->ds_handler_ctxt = bl; + ds->ds_handler_is_block = true; } -#endif -static size_t -_dispatch_source_kevent_debug(dispatch_source_t ds, char* buf, size_t bufsiz) +void +dispatch_source_set_event_handler(dispatch_source_t ds, + dispatch_block_t handler) { - size_t offset = _dispatch_source_debug(ds, buf, bufsiz); - offset += snprintf(&buf[offset], bufsiz - offset, "filter = %s }", - ds->ds_dkev ? _evfiltstr(ds->ds_dkev->dk_kevent.filter) : "????"); - return offset; + handler = _dispatch_Block_copy(handler); + dispatch_barrier_async_f((dispatch_queue_t)ds, handler, + _dispatch_source_set_event_handler2); } +#endif /* __BLOCKS__ */ static void -_dispatch_source_init_tail_queue_array(void *context __attribute__((unused))) +_dispatch_source_set_event_handler_f(void *context) { - unsigned int i; - for (i = 0; i < DSL_HASH_SIZE; i++) { - TAILQ_INIT(&_dispatch_sources[i]); - } + dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current(); + dispatch_assert(ds->do_vtable == &_dispatch_source_kevent_vtable); + dispatch_source_refs_t dr = ds->ds_refs; - TAILQ_INSERT_TAIL(&_dispatch_sources[DSL_HASH(DISPATCH_TIMER_INDEX_WALL)], &_dispatch_kevent_timer[DISPATCH_TIMER_INDEX_WALL], dk_list); - TAILQ_INSERT_TAIL(&_dispatch_sources[DSL_HASH(DISPATCH_TIMER_INDEX_MACH)], &_dispatch_kevent_timer[DISPATCH_TIMER_INDEX_MACH], dk_list); - TAILQ_INSERT_TAIL(&_dispatch_sources[0], &_dispatch_kevent_data_or, dk_list); - TAILQ_INSERT_TAIL(&_dispatch_sources[0], &_dispatch_kevent_data_add, dk_list); +#ifdef __BLOCKS__ + if (ds->ds_handler_is_block && dr->ds_handler_ctxt) { + Block_release(dr->ds_handler_ctxt); + } +#endif + dr->ds_handler_func = context; + dr->ds_handler_ctxt = ds->do_ctxt; + ds->ds_handler_is_block = false; } -// Find existing kevents, and merge any new flags if necessary void -_dispatch_kevent_merge(dispatch_source_t ds) +dispatch_source_set_event_handler_f(dispatch_source_t ds, + dispatch_function_t handler) { - static dispatch_once_t pred; - dispatch_kevent_t dk; - typeof(dk->dk_kevent.fflags) new_flags; - bool do_resume = false; + dispatch_barrier_async_f((dispatch_queue_t)ds, handler, + _dispatch_source_set_event_handler_f); +} - if (ds->ds_is_installed) { - return; +#ifdef __BLOCKS__ +// 6618342 Contact the team that owns the Instrument DTrace probe before +// renaming this symbol +static void +_dispatch_source_set_cancel_handler2(void *context) +{ + dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current(); + dispatch_assert(ds->do_vtable == &_dispatch_source_kevent_vtable); + dispatch_source_refs_t dr = ds->ds_refs; + + if (ds->ds_cancel_is_block && dr->ds_cancel_handler) { + Block_release(dr->ds_cancel_handler); } - ds->ds_is_installed = true; + dr->ds_cancel_handler = context; + ds->ds_cancel_is_block = true; +} - dispatch_once_f(&pred, NULL, _dispatch_source_init_tail_queue_array); +void +dispatch_source_set_cancel_handler(dispatch_source_t ds, + dispatch_block_t handler) +{ + handler = _dispatch_Block_copy(handler); + dispatch_barrier_async_f((dispatch_queue_t)ds, handler, + _dispatch_source_set_cancel_handler2); +} +#endif /* __BLOCKS__ */ - dk = _dispatch_kevent_find(ds->ds_dkev->dk_kevent.ident, ds->ds_dkev->dk_kevent.filter); - - if (dk) { - // If an existing dispatch kevent is found, check to see if new flags - // need to be added to the existing kevent - new_flags = ~dk->dk_kevent.fflags & ds->ds_dkev->dk_kevent.fflags; - dk->dk_kevent.fflags |= ds->ds_dkev->dk_kevent.fflags; - free(ds->ds_dkev); - ds->ds_dkev = dk; - do_resume = new_flags; - } else { - dk = ds->ds_dkev; - _dispatch_kevent_insert(dk); - new_flags = dk->dk_kevent.fflags; - do_resume = true; +static void +_dispatch_source_set_cancel_handler_f(void *context) +{ + dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current(); + dispatch_assert(ds->do_vtable == &_dispatch_source_kevent_vtable); + dispatch_source_refs_t dr = ds->ds_refs; + +#ifdef __BLOCKS__ + if (ds->ds_cancel_is_block && dr->ds_cancel_handler) { + Block_release(dr->ds_cancel_handler); } +#endif + dr->ds_cancel_handler = context; + ds->ds_cancel_is_block = false; +} - TAILQ_INSERT_TAIL(&dk->dk_sources, ds, ds_list); +void +dispatch_source_set_cancel_handler_f(dispatch_source_t ds, + dispatch_function_t handler) +{ + dispatch_barrier_async_f((dispatch_queue_t)ds, handler, + _dispatch_source_set_cancel_handler_f); +} - // Re-register the kevent with the kernel if new flags were added - // by the dispatch kevent - if (do_resume) { - dk->dk_kevent.flags |= EV_ADD; - _dispatch_kevent_resume(ds->ds_dkev, new_flags, 0); - ds->ds_is_armed = true; +#ifdef __BLOCKS__ +static void +_dispatch_source_set_registration_handler2(void *context) +{ + dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current(); + dispatch_assert(ds->do_vtable == &_dispatch_source_kevent_vtable); + dispatch_source_refs_t dr = ds->ds_refs; + + if (ds->ds_registration_is_block && dr->ds_registration_handler) { + Block_release(dr->ds_registration_handler); } + dr->ds_registration_handler = context; + ds->ds_registration_is_block = true; +} + +void +dispatch_source_set_registration_handler(dispatch_source_t ds, + dispatch_block_t handler) +{ + handler = _dispatch_Block_copy(handler); + dispatch_barrier_async_f((dispatch_queue_t)ds, handler, + _dispatch_source_set_registration_handler2); } +#endif /* __BLOCKS__ */ +static void +_dispatch_source_set_registration_handler_f(void *context) +{ + dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current(); + dispatch_assert(ds->do_vtable == &_dispatch_source_kevent_vtable); + dispatch_source_refs_t dr = ds->ds_refs; + +#ifdef __BLOCKS__ + if (ds->ds_registration_is_block && dr->ds_registration_handler) { + Block_release(dr->ds_registration_handler); + } +#endif + dr->ds_registration_handler = context; + ds->ds_registration_is_block = false; +} void -_dispatch_kevent_resume(dispatch_kevent_t dk, uint32_t new_flags, uint32_t del_flags) +dispatch_source_set_registration_handler_f(dispatch_source_t ds, + dispatch_function_t handler) { - switch (dk->dk_kevent.filter) { - case DISPATCH_EVFILT_TIMER: - case DISPATCH_EVFILT_CUSTOM_ADD: - case DISPATCH_EVFILT_CUSTOM_OR: - // these types not registered with kevent + dispatch_barrier_async_f((dispatch_queue_t)ds, handler, + _dispatch_source_set_registration_handler_f); +} + +#pragma mark - +#pragma mark dispatch_source_invoke + +static void +_dispatch_source_registration_callout(dispatch_source_t ds) +{ + dispatch_source_refs_t dr = ds->ds_refs; + + if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == 0)) { + // no registration callout if source is canceled rdar://problem/8955246 +#ifdef __BLOCKS__ + if (ds->ds_registration_is_block) { + Block_release(dr->ds_registration_handler); + } + } else if (ds->ds_registration_is_block) { + dispatch_block_t b = dr->ds_registration_handler; + _dispatch_client_callout_block(b); + Block_release(dr->ds_registration_handler); +#endif + } else { + dispatch_function_t f = dr->ds_registration_handler; + _dispatch_client_callout(ds->do_ctxt, f); + } + ds->ds_registration_is_block = false; + dr->ds_registration_handler = NULL; +} + +static void +_dispatch_source_cancel_callout(dispatch_source_t ds) +{ + dispatch_source_refs_t dr = ds->ds_refs; + + ds->ds_pending_data_mask = 0; + ds->ds_pending_data = 0; + ds->ds_data = 0; + +#ifdef __BLOCKS__ + if (ds->ds_handler_is_block) { + Block_release(dr->ds_handler_ctxt); + ds->ds_handler_is_block = false; + dr->ds_handler_func = NULL; + dr->ds_handler_ctxt = NULL; + } + if (ds->ds_registration_is_block) { + Block_release(dr->ds_registration_handler); + ds->ds_registration_is_block = false; + dr->ds_registration_handler = NULL; + } +#endif + + if (!dr->ds_cancel_handler) { return; - case EVFILT_MACHPORT: - _dispatch_kevent_machport_resume(dk, new_flags, del_flags); - break; - case EVFILT_PROC: - if (dk->dk_kevent.flags & EV_ONESHOT) { - return; + } + if (ds->ds_cancel_is_block) { +#ifdef __BLOCKS__ + dispatch_block_t b = dr->ds_cancel_handler; + if (ds->ds_atomic_flags & DSF_CANCELED) { + _dispatch_client_callout_block(b); } - // fall through - default: - _dispatch_update_kq(&dk->dk_kevent); - if (dk->dk_kevent.flags & EV_DISPATCH) { - dk->dk_kevent.flags &= ~EV_ADD; + Block_release(dr->ds_cancel_handler); + ds->ds_cancel_is_block = false; +#endif + } else { + dispatch_function_t f = dr->ds_cancel_handler; + if (ds->ds_atomic_flags & DSF_CANCELED) { + _dispatch_client_callout(ds->do_ctxt, f); + } + } + dr->ds_cancel_handler = NULL; +} + +static void +_dispatch_source_latch_and_call(dispatch_source_t ds) +{ + unsigned long prev; + + if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == 0)) { + return; + } + dispatch_source_refs_t dr = ds->ds_refs; + prev = dispatch_atomic_xchg2o(ds, ds_pending_data, 0); + if (ds->ds_is_level) { + ds->ds_data = ~prev; + } else if (ds->ds_is_timer && ds_timer(dr).target && prev) { + ds->ds_data = _dispatch_source_timer_data(dr, prev); + } else { + ds->ds_data = prev; + } + if (dispatch_assume(prev) && dr->ds_handler_func) { + _dispatch_client_callout(dr->ds_handler_ctxt, dr->ds_handler_func); + } +} + +static void +_dispatch_source_kevent_resume(dispatch_source_t ds, uint32_t new_flags) +{ + switch (ds->ds_dkev->dk_kevent.filter) { + case DISPATCH_EVFILT_TIMER: + // called on manager queue only + return _dispatch_timer_list_update(ds); + case EVFILT_MACHPORT: + if (ds->ds_pending_data_mask & DISPATCH_MACH_RECV_MESSAGE) { + new_flags |= DISPATCH_MACH_RECV_MESSAGE; // emulate EV_DISPATCH } break; } + if (_dispatch_kevent_resume(ds->ds_dkev, new_flags, 0)) { + _dispatch_kevent_unregister(ds); + } } -dispatch_queue_t +static dispatch_queue_t _dispatch_source_invoke(dispatch_source_t ds) { // This function performs all source actions. Each action is responsible @@ -325,15 +517,36 @@ _dispatch_source_invoke(dispatch_source_t ds) // will be returned and the invoke will be re-driven on that queue. // The order of tests here in invoke and in probe should be consistent. - + dispatch_queue_t dq = _dispatch_queue_get_current(); + dispatch_source_refs_t dr = ds->ds_refs; if (!ds->ds_is_installed) { // The source needs to be installed on the manager queue. if (dq != &_dispatch_mgr_q) { return &_dispatch_mgr_q; } - _dispatch_kevent_merge(ds); + _dispatch_kevent_register(ds); + if (dr->ds_registration_handler) { + return ds->do_targetq; + } + if (slowpath(ds->do_xref_cnt == 0)) { + return &_dispatch_mgr_q; // rdar://problem/9558246 + } + } else if (slowpath(DISPATCH_OBJECT_SUSPENDED(ds))) { + // Source suspended by an item drained from the source queue. + return NULL; + } else if (dr->ds_registration_handler) { + // The source has been registered and the registration handler needs + // to be delivered on the target queue. + if (dq != ds->do_targetq) { + return ds->do_targetq; + } + // clears ds_registration_handler + _dispatch_source_registration_callout(ds); + if (slowpath(ds->do_xref_cnt == 0)) { + return &_dispatch_mgr_q; // rdar://problem/9558246 + } } else if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == 0)) { // The source has been cancelled and needs to be uninstalled from the // manager queue. After uninstallation, the cancellation handler needs @@ -342,13 +555,13 @@ _dispatch_source_invoke(dispatch_source_t ds) if (dq != &_dispatch_mgr_q) { return &_dispatch_mgr_q; } - _dispatch_kevent_release(ds); + _dispatch_kevent_unregister(ds); return ds->do_targetq; - } else if (ds->ds_cancel_handler) { + } else if (dr->ds_cancel_handler) { if (dq != ds->do_targetq) { return ds->do_targetq; } - } + } _dispatch_source_cancel_callout(ds); } else if (ds->ds_pending_data) { // The source has pending data to deliver via the event handler callback @@ -361,38 +574,42 @@ _dispatch_source_invoke(dispatch_source_t ds) if (ds->ds_needs_rearm) { return &_dispatch_mgr_q; } - } else if (ds->ds_needs_rearm && !ds->ds_is_armed) { + } else if (ds->ds_needs_rearm && !(ds->ds_atomic_flags & DSF_ARMED)) { // The source needs to be rearmed on the manager queue. if (dq != &_dispatch_mgr_q) { return &_dispatch_mgr_q; } - _dispatch_kevent_resume(ds->ds_dkev, 0, 0); - ds->ds_is_armed = true; + _dispatch_source_kevent_resume(ds, 0); + (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED); } return NULL; } -bool +static bool _dispatch_source_probe(dispatch_source_t ds) { // This function determines whether the source needs to be invoked. // The order of tests here in probe and in invoke should be consistent. + dispatch_source_refs_t dr = ds->ds_refs; if (!ds->ds_is_installed) { // The source needs to be installed on the manager queue. return true; + } else if (dr->ds_registration_handler) { + // The registration handler needs to be delivered to the target queue. + return true; } else if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == 0)) { // The source needs to be uninstalled from the manager queue, or the // cancellation handler needs to be delivered to the target queue. // Note: cancellation assumes installation. - if (ds->ds_dkev || ds->ds_cancel_handler) { + if (ds->ds_dkev || dr->ds_cancel_handler) { return true; } } else if (ds->ds_pending_data) { // The source has pending data to deliver to the target queue. return true; - } else if (ds->ds_needs_rearm && !ds->ds_is_armed) { + } else if (ds->ds_needs_rearm && !(ds->ds_atomic_flags & DSF_ARMED)) { // The source needs to be rearmed on the manager queue. return true; } @@ -400,175 +617,253 @@ _dispatch_source_probe(dispatch_source_t ds) return false; } -void -_dispatch_source_dispose(dispatch_source_t ds) -{ - _dispatch_queue_dispose((dispatch_queue_t)ds); -} +#pragma mark - +#pragma mark dispatch_source_kevent static void -_dispatch_kevent_debugger2(void *context, dispatch_source_t unused __attribute__((unused))) +_dispatch_source_merge_kevent(dispatch_source_t ds, const struct kevent *ke) { - struct sockaddr sa; - socklen_t sa_len = sizeof(sa); - int c, fd = (int)(long)context; - unsigned int i; - dispatch_kevent_t dk; - dispatch_source_t ds; - FILE *debug_stream; + struct kevent fake; - c = accept(fd, &sa, &sa_len); - if (c == -1) { - if (errno != EAGAIN) { - dispatch_assume_zero(errno); - } + if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == 0)) { return; } -#if 0 - int r = fcntl(c, F_SETFL, 0); // disable non-blocking IO - if (r == -1) { - dispatch_assume_zero(errno); - } + + // EVFILT_PROC may fail with ESRCH when the process exists but is a zombie + // . As a workaround, we simulate an exit event for + // any EVFILT_PROC with an invalid pid . + if (ke->flags & EV_ERROR) { + if (ke->filter == EVFILT_PROC && ke->data == ESRCH) { + fake = *ke; + fake.flags &= ~EV_ERROR; + fake.fflags = NOTE_EXIT; + fake.data = 0; + ke = &fake; +#if DISPATCH_USE_VM_PRESSURE + } else if (ke->filter == EVFILT_VM && ke->data == ENOTSUP) { + // Memory pressure kevent is not supported on all platforms + // + return; #endif - debug_stream = fdopen(c, "a"); - if (!dispatch_assume(debug_stream)) { - close(c); - return; + } else { + // log the unexpected error + (void)dispatch_assume_zero(ke->data); + return; + } } - fprintf(debug_stream, "HTTP/1.0 200 OK\r\n"); - fprintf(debug_stream, "Content-type: text/html\r\n"); - fprintf(debug_stream, "Pragma: nocache\r\n"); - fprintf(debug_stream, "\r\n"); - fprintf(debug_stream, "\nPID %u\n\n

    \n", getpid()); - - //fprintf(debug_stream, "DKDKDKDKDKDKDK\n"); + if (ds->ds_is_level) { + // ke->data is signed and "negative available data" makes no sense + // zero bytes happens when EV_EOF is set + // 10A268 does not fail this assert with EVFILT_READ and a 10 GB file + dispatch_assert(ke->data >= 0l); + ds->ds_pending_data = ~ke->data; + } else if (ds->ds_is_adder) { + (void)dispatch_atomic_add2o(ds, ds_pending_data, ke->data); + } else if (ke->fflags & ds->ds_pending_data_mask) { + (void)dispatch_atomic_or2o(ds, ds_pending_data, + ke->fflags & ds->ds_pending_data_mask); + } - for (i = 0; i < DSL_HASH_SIZE; i++) { - if (TAILQ_EMPTY(&_dispatch_sources[i])) { - continue; - } - TAILQ_FOREACH(dk, &_dispatch_sources[i], dk_list) { - fprintf(debug_stream, "\t
  • DK %p ident %lu filter %s flags 0x%hx fflags 0x%x data 0x%lx udata %p\n", - dk, dk->dk_kevent.ident, _evfiltstr(dk->dk_kevent.filter), dk->dk_kevent.flags, - dk->dk_kevent.fflags, dk->dk_kevent.data, dk->dk_kevent.udata); - fprintf(debug_stream, "\t\t
      \n"); - TAILQ_FOREACH(ds, &dk->dk_sources, ds_list) { - fprintf(debug_stream, "\t\t\t
    • DS %p refcnt 0x%x suspend 0x%x data 0x%lx mask 0x%lx flags 0x%x
    • \n", - ds, ds->do_ref_cnt, ds->do_suspend_cnt, ds->ds_pending_data, ds->ds_pending_data_mask, - ds->ds_atomic_flags); - if (ds->do_suspend_cnt == DISPATCH_OBJECT_SUSPEND_LOCK) { - dispatch_queue_t dq = ds->do_targetq; - fprintf(debug_stream, "\t\t
      DQ: %p refcnt 0x%x suspend 0x%x label: %s\n", dq, dq->do_ref_cnt, dq->do_suspend_cnt, dq->dq_label); - } - } - fprintf(debug_stream, "\t\t
    \n"); - fprintf(debug_stream, "\t
  • \n"); - } + // EV_DISPATCH and EV_ONESHOT sources are no longer armed after delivery + if (ds->ds_needs_rearm) { + (void)dispatch_atomic_and2o(ds, ds_atomic_flags, ~DSF_ARMED); } - fprintf(debug_stream, "
\n\n\n"); - fflush(debug_stream); - fclose(debug_stream); + + _dispatch_wakeup(ds); } -static void -_dispatch_kevent_debugger(void *context __attribute__((unused))) +void +_dispatch_source_drain_kevent(struct kevent *ke) { - union { - struct sockaddr_in sa_in; - struct sockaddr sa; - } sa_u = { - .sa_in = { - .sin_family = AF_INET, - .sin_addr = { htonl(INADDR_LOOPBACK), }, - }, - }; - dispatch_source_t ds; - const char *valstr; - int val, r, fd, sock_opt = 1; - socklen_t slen = sizeof(sa_u); + dispatch_kevent_t dk = ke->udata; + dispatch_source_refs_t dri; - if (issetugid()) { - return; - } - valstr = getenv("LIBDISPATCH_DEBUGGER"); - if (!valstr) { - return; +#if DISPATCH_DEBUG + static dispatch_once_t pred; + dispatch_once_f(&pred, NULL, _dispatch_kevent_debugger); +#endif + + dispatch_debug_kevents(ke, 1, __func__); + +#if HAVE_MACH + if (ke->filter == EVFILT_MACHPORT) { + return _dispatch_drain_mach_messages(ke); } - val = atoi(valstr); - if (val == 2) { - sa_u.sa_in.sin_addr.s_addr = 0; +#endif + dispatch_assert(dk); + + if (ke->flags & EV_ONESHOT) { + dk->dk_kevent.flags |= EV_ONESHOT; } - fd = socket(PF_INET, SOCK_STREAM, 0); - if (fd == -1) { - dispatch_assume_zero(errno); - return; - } - r = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (void *)&sock_opt, (socklen_t) sizeof sock_opt); - if (r == -1) { - dispatch_assume_zero(errno); - goto out_bad; - } -#if 0 - r = fcntl(fd, F_SETFL, O_NONBLOCK); - if (r == -1) { - dispatch_assume_zero(errno); - goto out_bad; + + TAILQ_FOREACH(dri, &dk->dk_sources, dr_list) { + _dispatch_source_merge_kevent(_dispatch_source_from_refs(dri), ke); } +} + +#pragma mark - +#pragma mark dispatch_kevent_t + +static struct dispatch_kevent_s _dispatch_kevent_data_or = { + .dk_kevent = { + .filter = DISPATCH_EVFILT_CUSTOM_OR, + .flags = EV_CLEAR, + .udata = &_dispatch_kevent_data_or, + }, + .dk_sources = TAILQ_HEAD_INITIALIZER(_dispatch_kevent_data_or.dk_sources), +}; +static struct dispatch_kevent_s _dispatch_kevent_data_add = { + .dk_kevent = { + .filter = DISPATCH_EVFILT_CUSTOM_ADD, + .udata = &_dispatch_kevent_data_add, + }, + .dk_sources = TAILQ_HEAD_INITIALIZER(_dispatch_kevent_data_add.dk_sources), +}; + +#if TARGET_OS_EMBEDDED +#define DSL_HASH_SIZE 64u // must be a power of two +#else +#define DSL_HASH_SIZE 256u // must be a power of two #endif - r = bind(fd, &sa_u.sa, sizeof(sa_u)); - if (r == -1) { - dispatch_assume_zero(errno); - goto out_bad; - } - r = listen(fd, SOMAXCONN); - if (r == -1) { - dispatch_assume_zero(errno); - goto out_bad; - } - r = getsockname(fd, &sa_u.sa, &slen); - if (r == -1) { - dispatch_assume_zero(errno); - goto out_bad; +#define DSL_HASH(x) ((x) & (DSL_HASH_SIZE - 1)) + +DISPATCH_CACHELINE_ALIGN +static TAILQ_HEAD(, dispatch_kevent_s) _dispatch_sources[DSL_HASH_SIZE]; + +static dispatch_once_t __dispatch_kevent_init_pred; + +static void +_dispatch_kevent_init(void *context DISPATCH_UNUSED) +{ + unsigned int i; + for (i = 0; i < DSL_HASH_SIZE; i++) { + TAILQ_INIT(&_dispatch_sources[i]); } - ds = dispatch_source_read_create_f(fd, NULL, &_dispatch_mgr_q, (void *)(long)fd, _dispatch_kevent_debugger2); - if (dispatch_assume(ds)) { - _dispatch_log("LIBDISPATCH: debug port: %hu", ntohs(sa_u.sa_in.sin_port)); - return; + + TAILQ_INSERT_TAIL(&_dispatch_sources[0], + &_dispatch_kevent_data_or, dk_list); + TAILQ_INSERT_TAIL(&_dispatch_sources[0], + &_dispatch_kevent_data_add, dk_list); + + _dispatch_source_timer_init(); +} + +static inline uintptr_t +_dispatch_kevent_hash(uintptr_t ident, short filter) +{ + uintptr_t value; +#if HAVE_MACH + value = (filter == EVFILT_MACHPORT ? MACH_PORT_INDEX(ident) : ident); +#else + value = ident; +#endif + return DSL_HASH(value); +} + +static dispatch_kevent_t +_dispatch_kevent_find(uintptr_t ident, short filter) +{ + uintptr_t hash = _dispatch_kevent_hash(ident, filter); + dispatch_kevent_t dki; + + TAILQ_FOREACH(dki, &_dispatch_sources[hash], dk_list) { + if (dki->dk_kevent.ident == ident && dki->dk_kevent.filter == filter) { + break; + } } -out_bad: - close(fd); + return dki; } -void -_dispatch_source_drain_kevent(struct kevent *ke) +static void +_dispatch_kevent_insert(dispatch_kevent_t dk) { - static dispatch_once_t pred; - dispatch_kevent_t dk = ke->udata; - dispatch_source_t dsi; + uintptr_t hash = _dispatch_kevent_hash(dk->dk_kevent.ident, + dk->dk_kevent.filter); - dispatch_once_f(&pred, NULL, _dispatch_kevent_debugger); + TAILQ_INSERT_TAIL(&_dispatch_sources[hash], dk, dk_list); +} - dispatch_debug_kevents(ke, 1, __func__); +// Find existing kevents, and merge any new flags if necessary +static void +_dispatch_kevent_register(dispatch_source_t ds) +{ + dispatch_kevent_t dk; + typeof(dk->dk_kevent.fflags) new_flags; + bool do_resume = false; - if (ke->filter == EVFILT_MACHPORT) { - return _dispatch_drain_mach_messages(ke); + if (ds->ds_is_installed) { + return; } - dispatch_assert(dk); + ds->ds_is_installed = true; - if (ke->flags & EV_ONESHOT) { - dk->dk_kevent.flags |= EV_ONESHOT; + dispatch_once_f(&__dispatch_kevent_init_pred, + NULL, _dispatch_kevent_init); + + dk = _dispatch_kevent_find(ds->ds_dkev->dk_kevent.ident, + ds->ds_dkev->dk_kevent.filter); + + if (dk) { + // If an existing dispatch kevent is found, check to see if new flags + // need to be added to the existing kevent + new_flags = ~dk->dk_kevent.fflags & ds->ds_dkev->dk_kevent.fflags; + dk->dk_kevent.fflags |= ds->ds_dkev->dk_kevent.fflags; + free(ds->ds_dkev); + ds->ds_dkev = dk; + do_resume = new_flags; + } else { + dk = ds->ds_dkev; + _dispatch_kevent_insert(dk); + new_flags = dk->dk_kevent.fflags; + do_resume = true; } - TAILQ_FOREACH(dsi, &dk->dk_sources, ds_list) { - _dispatch_source_merge_kevent(dsi, ke); + TAILQ_INSERT_TAIL(&dk->dk_sources, ds->ds_refs, dr_list); + + // Re-register the kevent with the kernel if new flags were added + // by the dispatch kevent + if (do_resume) { + dk->dk_kevent.flags |= EV_ADD; + } + if (do_resume || ds->ds_needs_rearm) { + _dispatch_source_kevent_resume(ds, new_flags); + } + (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED); +} + +static bool +_dispatch_kevent_resume(dispatch_kevent_t dk, uint32_t new_flags, + uint32_t del_flags) +{ + long r; + switch (dk->dk_kevent.filter) { + case DISPATCH_EVFILT_TIMER: + case DISPATCH_EVFILT_CUSTOM_ADD: + case DISPATCH_EVFILT_CUSTOM_OR: + // these types not registered with kevent + return 0; +#if HAVE_MACH + case EVFILT_MACHPORT: + return _dispatch_kevent_machport_resume(dk, new_flags, del_flags); +#endif + case EVFILT_PROC: + if (dk->dk_kevent.flags & EV_ONESHOT) { + return 0; + } + // fall through + default: + r = _dispatch_update_kq(&dk->dk_kevent); + if (dk->dk_kevent.flags & EV_DISPATCH) { + dk->dk_kevent.flags &= ~EV_ADD; + } + return r; } } static void _dispatch_kevent_dispose(dispatch_kevent_t dk) { - uintptr_t key; + uintptr_t hash; switch (dk->dk_kevent.filter) { case DISPATCH_EVFILT_TIMER: @@ -576,12 +871,14 @@ _dispatch_kevent_dispose(dispatch_kevent_t dk) case DISPATCH_EVFILT_CUSTOM_OR: // these sources live on statically allocated lists return; +#if HAVE_MACH case EVFILT_MACHPORT: _dispatch_kevent_machport_resume(dk, 0, dk->dk_kevent.fflags); break; +#endif case EVFILT_PROC: if (dk->dk_kevent.flags & EV_ONESHOT) { - break; // implicitly deleted + break; // implicitly deleted } // fall through default: @@ -592,31 +889,28 @@ _dispatch_kevent_dispose(dispatch_kevent_t dk) break; } - if (dk->dk_kevent.filter == EVFILT_MACHPORT) { - key = MACH_PORT_INDEX(dk->dk_kevent.ident); - } else { - key = dk->dk_kevent.ident; - } - - TAILQ_REMOVE(&_dispatch_sources[DSL_HASH(key)], dk, dk_list); + hash = _dispatch_kevent_hash(dk->dk_kevent.ident, + dk->dk_kevent.filter); + TAILQ_REMOVE(&_dispatch_sources[hash], dk, dk_list); free(dk); } -void -_dispatch_kevent_release(dispatch_source_t ds) +static void +_dispatch_kevent_unregister(dispatch_source_t ds) { dispatch_kevent_t dk = ds->ds_dkev; - dispatch_source_t dsi; + dispatch_source_refs_t dri; uint32_t del_flags, fflags = 0; ds->ds_dkev = NULL; - TAILQ_REMOVE(&dk->dk_sources, ds, ds_list); + TAILQ_REMOVE(&dk->dk_sources, ds->ds_refs, dr_list); if (TAILQ_EMPTY(&dk->dk_sources)) { _dispatch_kevent_dispose(dk); } else { - TAILQ_FOREACH(dsi, &dk->dk_sources, ds_list) { + TAILQ_FOREACH(dri, &dk->dk_sources, dr_list) { + dispatch_source_t dsi = _dispatch_source_from_refs(dri); fflags |= (uint32_t)dsi->ds_pending_data_mask; } del_flags = (uint32_t)ds->ds_pending_data_mask & ~fflags; @@ -627,714 +921,175 @@ _dispatch_kevent_release(dispatch_source_t ds) } } - ds->ds_is_armed = false; - ds->ds_needs_rearm = false; // re-arm is pointless and bad now - _dispatch_release(ds); // the retain is done at creation time -} - -void -_dispatch_source_merge_kevent(dispatch_source_t ds, const struct kevent *ke) -{ - struct kevent fake; - - if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == 0)) { - return; - } - - // EVFILT_PROC may fail with ESRCH when the process exists but is a zombie. - // We simulate an exit event in this case. - if (ke->flags & EV_ERROR) { - if (ke->filter == EVFILT_PROC && ke->data == ESRCH) { - fake = *ke; - fake.flags &= ~EV_ERROR; - fake.fflags = NOTE_EXIT; - fake.data = 0; - ke = &fake; - } else { - // log the unexpected error - dispatch_assume_zero(ke->data); - return; - } - } - - if (ds->ds_is_level) { - // ke->data is signed and "negative available data" makes no sense - // zero bytes happens when EV_EOF is set - // 10A268 does not fail this assert with EVFILT_READ and a 10 GB file - dispatch_assert(ke->data >= 0l); - ds->ds_pending_data = ~ke->data; - } else if (ds->ds_is_adder) { - dispatch_atomic_add(&ds->ds_pending_data, ke->data); - } else { - dispatch_atomic_or(&ds->ds_pending_data, ke->fflags & ds->ds_pending_data_mask); - } - - // EV_DISPATCH and EV_ONESHOT sources are no longer armed after delivery - if (ds->ds_needs_rearm) { - ds->ds_is_armed = false; - } - - _dispatch_wakeup(ds); -} - -void -_dispatch_source_latch_and_call(dispatch_source_t ds) -{ - unsigned long prev; - - if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == 0)) { - return; - } - prev = dispatch_atomic_xchg(&ds->ds_pending_data, 0); - if (ds->ds_is_level) { - ds->ds_data = ~prev; - } else { - ds->ds_data = prev; - } - if (dispatch_assume(prev)) { - if (ds->ds_handler_func) { - ds->ds_handler_func(ds->ds_handler_ctxt, ds); - } - } -} - -void -_dispatch_source_cancel_callout(dispatch_source_t ds) -{ - ds->ds_pending_data_mask = 0; - ds->ds_pending_data = 0; - ds->ds_data = 0; - -#ifdef __BLOCKS__ - if (ds->ds_handler_is_block) { - Block_release(ds->ds_handler_ctxt); - ds->ds_handler_is_block = false; - ds->ds_handler_func = NULL; - ds->ds_handler_ctxt = NULL; - } -#endif - - if (!ds->ds_cancel_handler) { - return; - } - if (ds->ds_cancel_is_block) { -#ifdef __BLOCKS__ - dispatch_block_t b = ds->ds_cancel_handler; - if (ds->ds_atomic_flags & DSF_CANCELED) { - b(); - } - Block_release(ds->ds_cancel_handler); - ds->ds_cancel_is_block = false; -#endif - } else { - dispatch_function_t f = ds->ds_cancel_handler; - if (ds->ds_atomic_flags & DSF_CANCELED) { - f(ds->do_ctxt); - } - } - ds->ds_cancel_handler = NULL; -} - -const struct dispatch_source_vtable_s _dispatch_source_kevent_vtable = { - .do_type = DISPATCH_SOURCE_KEVENT_TYPE, - .do_kind = "kevent-source", - .do_invoke = _dispatch_source_invoke, - .do_dispose = _dispatch_source_dispose, - .do_probe = _dispatch_source_probe, - .do_debug = _dispatch_source_kevent_debug, -}; - -void -dispatch_source_merge_data(dispatch_source_t ds, unsigned long val) -{ - struct kevent kev = { - .fflags = (typeof(kev.fflags))val, - .data = val, - }; - - dispatch_assert(ds->ds_dkev->dk_kevent.filter == DISPATCH_EVFILT_CUSTOM_ADD || - ds->ds_dkev->dk_kevent.filter == DISPATCH_EVFILT_CUSTOM_OR); - - _dispatch_source_merge_kevent(ds, &kev); -} - -size_t -dispatch_source_debug_attr(dispatch_source_t ds, char* buf, size_t bufsiz) -{ - dispatch_queue_t target = ds->do_targetq; - return snprintf(buf, bufsiz, - "target = %s[%p], pending_data = 0x%lx, pending_data_mask = 0x%lx, ", - target ? target->dq_label : "", target, - ds->ds_pending_data, ds->ds_pending_data_mask); -} - -size_t -_dispatch_source_debug(dispatch_source_t ds, char* buf, size_t bufsiz) -{ - size_t offset = 0; - offset += snprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", dx_kind(ds), ds); - offset += dispatch_object_debug_attr(ds, &buf[offset], bufsiz - offset); - offset += dispatch_source_debug_attr(ds, &buf[offset], bufsiz - offset); - return offset; -} - -#ifndef DISPATCH_NO_LEGACY -static void -dispatch_source_attr_dispose(dispatch_source_attr_t attr) -{ - // release the finalizer block if necessary - dispatch_source_attr_set_finalizer(attr, NULL); - _dispatch_dispose(attr); -} - -static const struct dispatch_source_attr_vtable_s dispatch_source_attr_vtable = { - .do_type = DISPATCH_SOURCE_ATTR_TYPE, - .do_kind = "source-attr", - .do_dispose = dispatch_source_attr_dispose, -}; - -dispatch_source_attr_t -dispatch_source_attr_create(void) -{ - dispatch_source_attr_t rval = calloc(1, sizeof(struct dispatch_source_attr_s)); - - if (rval) { - rval->do_vtable = &dispatch_source_attr_vtable; - rval->do_next = DISPATCH_OBJECT_LISTLESS; - rval->do_targetq = dispatch_get_global_queue(0, 0); - rval->do_ref_cnt = 1; - rval->do_xref_cnt = 1; - } - - return rval; -} - -void -dispatch_source_attr_set_finalizer_f(dispatch_source_attr_t attr, - void *context, dispatch_source_finalizer_function_t finalizer) -{ -#ifdef __BLOCKS__ - if (attr->finalizer_func == (void*)_dispatch_call_block_and_release2) { - Block_release(attr->finalizer_ctxt); - } -#endif - - attr->finalizer_ctxt = context; - attr->finalizer_func = finalizer; -} - -#ifdef __BLOCKS__ -long -dispatch_source_attr_set_finalizer(dispatch_source_attr_t attr, - dispatch_source_finalizer_t finalizer) -{ - void *ctxt; - dispatch_source_finalizer_function_t func; - - if (finalizer) { - if (!(ctxt = Block_copy(finalizer))) { - return 1; - } - func = (void *)_dispatch_call_block_and_release2; - } else { - ctxt = NULL; - func = NULL; - } - - dispatch_source_attr_set_finalizer_f(attr, ctxt, func); - - return 0; -} - -dispatch_source_finalizer_t -dispatch_source_attr_get_finalizer(dispatch_source_attr_t attr) -{ - if (attr->finalizer_func == (void*)_dispatch_call_block_and_release2) { - return (dispatch_source_finalizer_t)attr->finalizer_ctxt; - } else if (attr->finalizer_func == NULL) { - return NULL; - } else { - abort(); // finalizer is not a block... - } + (void)dispatch_atomic_and2o(ds, ds_atomic_flags, ~DSF_ARMED); + ds->ds_needs_rearm = false; // re-arm is pointless and bad now + _dispatch_release(ds); // the retain is done at creation time } -#endif - -void -dispatch_source_attr_set_context(dispatch_source_attr_t attr, void *context) -{ - attr->context = context; -} - -dispatch_source_attr_t -dispatch_source_attr_copy(dispatch_source_attr_t proto) -{ - dispatch_source_attr_t rval = NULL; - - if (proto && (rval = malloc(sizeof(struct dispatch_source_attr_s)))) { - memcpy(rval, proto, sizeof(struct dispatch_source_attr_s)); -#ifdef __BLOCKS__ - if (rval->finalizer_func == (void*)_dispatch_call_block_and_release2) { - rval->finalizer_ctxt = Block_copy(rval->finalizer_ctxt); - } -#endif - } else if (!proto) { - rval = dispatch_source_attr_create(); - } - return rval; -} -#endif /* DISPATCH_NO_LEGACY */ - - -struct dispatch_source_type_s { - struct kevent ke; - uint64_t mask; -}; - -const struct dispatch_source_type_s _dispatch_source_type_timer = { - .ke = { - .filter = DISPATCH_EVFILT_TIMER, - }, - .mask = DISPATCH_TIMER_INTERVAL|DISPATCH_TIMER_ONESHOT|DISPATCH_TIMER_ABSOLUTE|DISPATCH_TIMER_WALL_CLOCK, -}; -const struct dispatch_source_type_s _dispatch_source_type_read = { - .ke = { - .filter = EVFILT_READ, - .flags = EV_DISPATCH, - }, -}; - -const struct dispatch_source_type_s _dispatch_source_type_write = { - .ke = { - .filter = EVFILT_WRITE, - .flags = EV_DISPATCH, - }, -}; - -const struct dispatch_source_type_s _dispatch_source_type_proc = { - .ke = { - .filter = EVFILT_PROC, - .flags = EV_CLEAR, - }, - .mask = NOTE_EXIT|NOTE_FORK|NOTE_EXEC|NOTE_SIGNAL|NOTE_REAP, -}; - -const struct dispatch_source_type_s _dispatch_source_type_signal = { - .ke = { - .filter = EVFILT_SIGNAL, - }, -}; - -const struct dispatch_source_type_s _dispatch_source_type_vnode = { - .ke = { - .filter = EVFILT_VNODE, - .flags = EV_CLEAR, - }, - .mask = NOTE_DELETE|NOTE_WRITE|NOTE_EXTEND|NOTE_ATTRIB|NOTE_LINK|NOTE_RENAME|NOTE_REVOKE|NOTE_NONE, -}; +#pragma mark - +#pragma mark dispatch_timer -const struct dispatch_source_type_s _dispatch_source_type_vfs = { - .ke = { - .filter = EVFILT_FS, - .flags = EV_CLEAR, - }, - .mask = VQ_NOTRESP|VQ_NEEDAUTH|VQ_LOWDISK|VQ_MOUNT|VQ_UNMOUNT|VQ_DEAD|VQ_ASSIST|VQ_NOTRESPLOCK|VQ_UPDATE|VQ_VERYLOWDISK, -}; - -const struct dispatch_source_type_s _dispatch_source_type_mach_send = { - .ke = { - .filter = EVFILT_MACHPORT, - .flags = EV_DISPATCH, - .fflags = DISPATCH_MACHPORT_DEAD, - }, - .mask = DISPATCH_MACH_SEND_DEAD, -}; - -const struct dispatch_source_type_s _dispatch_source_type_mach_recv = { - .ke = { - .filter = EVFILT_MACHPORT, - .flags = EV_DISPATCH, - .fflags = DISPATCH_MACHPORT_RECV, +DISPATCH_CACHELINE_ALIGN +static struct dispatch_kevent_s _dispatch_kevent_timer[] = { + [DISPATCH_TIMER_INDEX_WALL] = { + .dk_kevent = { + .ident = DISPATCH_TIMER_INDEX_WALL, + .filter = DISPATCH_EVFILT_TIMER, + .udata = &_dispatch_kevent_timer[DISPATCH_TIMER_INDEX_WALL], + }, + .dk_sources = TAILQ_HEAD_INITIALIZER( + _dispatch_kevent_timer[DISPATCH_TIMER_INDEX_WALL].dk_sources), }, -}; - -const struct dispatch_source_type_s _dispatch_source_type_data_add = { - .ke = { - .filter = DISPATCH_EVFILT_CUSTOM_ADD, + [DISPATCH_TIMER_INDEX_MACH] = { + .dk_kevent = { + .ident = DISPATCH_TIMER_INDEX_MACH, + .filter = DISPATCH_EVFILT_TIMER, + .udata = &_dispatch_kevent_timer[DISPATCH_TIMER_INDEX_MACH], + }, + .dk_sources = TAILQ_HEAD_INITIALIZER( + _dispatch_kevent_timer[DISPATCH_TIMER_INDEX_MACH].dk_sources), }, -}; - -const struct dispatch_source_type_s _dispatch_source_type_data_or = { - .ke = { - .filter = DISPATCH_EVFILT_CUSTOM_OR, - .flags = EV_CLEAR, - .fflags = ~0, + [DISPATCH_TIMER_INDEX_DISARM] = { + .dk_kevent = { + .ident = DISPATCH_TIMER_INDEX_DISARM, + .filter = DISPATCH_EVFILT_TIMER, + .udata = &_dispatch_kevent_timer[DISPATCH_TIMER_INDEX_DISARM], + }, + .dk_sources = TAILQ_HEAD_INITIALIZER( + _dispatch_kevent_timer[DISPATCH_TIMER_INDEX_DISARM].dk_sources), }, -}; - -dispatch_source_t -dispatch_source_create(dispatch_source_type_t type, - uintptr_t handle, - unsigned long mask, - dispatch_queue_t q) -{ - const struct kevent *proto_kev = &type->ke; - dispatch_source_t ds = NULL; - dispatch_kevent_t dk = NULL; - - // input validation - if (type == NULL || (mask & ~type->mask)) { - goto out_bad; - } - - switch (type->ke.filter) { - case EVFILT_SIGNAL: - if (handle >= NSIG) { - goto out_bad; - } - break; - case EVFILT_FS: - case DISPATCH_EVFILT_CUSTOM_ADD: - case DISPATCH_EVFILT_CUSTOM_OR: - case DISPATCH_EVFILT_TIMER: - if (handle) { - goto out_bad; - } - break; - default: - break; - } - - ds = calloc(1ul, sizeof(struct dispatch_source_s)); - if (slowpath(!ds)) { - goto out_bad; - } - dk = calloc(1ul, sizeof(struct dispatch_kevent_s)); - if (slowpath(!dk)) { - goto out_bad; - } - - dk->dk_kevent = *proto_kev; - dk->dk_kevent.ident = handle; - dk->dk_kevent.flags |= EV_ADD|EV_ENABLE; - dk->dk_kevent.fflags |= (uint32_t)mask; - dk->dk_kevent.udata = dk; - TAILQ_INIT(&dk->dk_sources); - - // Initialize as a queue first, then override some settings below. - _dispatch_queue_init((dispatch_queue_t)ds); - strlcpy(ds->dq_label, "source", sizeof(ds->dq_label)); - - // Dispatch Object - ds->do_vtable = &_dispatch_source_kevent_vtable; - ds->do_ref_cnt++; // the reference the manger queue holds - ds->do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_INTERVAL; - // do_targetq will be retained below, past point of no-return - ds->do_targetq = q; - - // Dispatch Source - ds->ds_ident_hack = dk->dk_kevent.ident; - ds->ds_dkev = dk; - ds->ds_pending_data_mask = dk->dk_kevent.fflags; - if ((EV_DISPATCH|EV_ONESHOT) & proto_kev->flags) { - if (proto_kev->filter != EVFILT_MACHPORT) { - ds->ds_is_level = true; - } - ds->ds_needs_rearm = true; - } else if (!(EV_CLEAR & proto_kev->flags)) { - // we cheat and use EV_CLEAR to mean a "flag thingy" - ds->ds_is_adder = true; - } - - // If its a timer source, it needs to be re-armed - if (type->ke.filter == DISPATCH_EVFILT_TIMER) { - ds->ds_needs_rearm = true; - } - - dispatch_assert(!(ds->ds_is_level && ds->ds_is_adder)); -#if DISPATCH_DEBUG - dispatch_debug(ds, __FUNCTION__); -#endif - - // Some sources require special processing - if (type == DISPATCH_SOURCE_TYPE_MACH_SEND) { - static dispatch_once_t pred; - dispatch_once_f(&pred, NULL, _dispatch_mach_notify_source_init); - } else if (type == DISPATCH_SOURCE_TYPE_TIMER) { - ds->ds_timer.flags = mask; - } - - _dispatch_retain(ds->do_targetq); - return ds; - -out_bad: - free(ds); - free(dk); - return NULL; -} - -// 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol -static void -_dispatch_source_set_event_handler2(void *context) -{ - struct Block_layout *bl = context; - - dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current(); - dispatch_assert(ds->do_vtable == &_dispatch_source_kevent_vtable); - - if (ds->ds_handler_is_block && ds->ds_handler_ctxt) { - Block_release(ds->ds_handler_ctxt); - } - ds->ds_handler_func = bl ? (void *)bl->invoke : NULL; - ds->ds_handler_ctxt = bl; - ds->ds_handler_is_block = true; -} - -void -dispatch_source_set_event_handler(dispatch_source_t ds, dispatch_block_t handler) -{ - dispatch_assert(!ds->ds_is_legacy); - handler = _dispatch_Block_copy(handler); - dispatch_barrier_async_f((dispatch_queue_t)ds, - handler, _dispatch_source_set_event_handler2); -} - -static void -_dispatch_source_set_event_handler_f(void *context) -{ - dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current(); - dispatch_assert(ds->do_vtable == &_dispatch_source_kevent_vtable); - - if (ds->ds_handler_is_block && ds->ds_handler_ctxt) { - Block_release(ds->ds_handler_ctxt); - } - ds->ds_handler_func = context; - ds->ds_handler_ctxt = ds->do_ctxt; - ds->ds_handler_is_block = false; -} - -void -dispatch_source_set_event_handler_f(dispatch_source_t ds, - dispatch_function_t handler) -{ - dispatch_assert(!ds->ds_is_legacy); - dispatch_barrier_async_f((dispatch_queue_t)ds, - handler, _dispatch_source_set_event_handler_f); -} - -// 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol -static void -_dispatch_source_set_cancel_handler2(void *context) -{ - dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current(); - dispatch_assert(ds->do_vtable == &_dispatch_source_kevent_vtable); - - if (ds->ds_cancel_is_block && ds->ds_cancel_handler) { - Block_release(ds->ds_cancel_handler); - } - ds->ds_cancel_handler = context; - ds->ds_cancel_is_block = true; -} - -void -dispatch_source_set_cancel_handler(dispatch_source_t ds, - dispatch_block_t handler) -{ - dispatch_assert(!ds->ds_is_legacy); - handler = _dispatch_Block_copy(handler); - dispatch_barrier_async_f((dispatch_queue_t)ds, - handler, _dispatch_source_set_cancel_handler2); -} - -static void -_dispatch_source_set_cancel_handler_f(void *context) -{ - dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current(); - dispatch_assert(ds->do_vtable == &_dispatch_source_kevent_vtable); - - if (ds->ds_cancel_is_block && ds->ds_cancel_handler) { - Block_release(ds->ds_cancel_handler); - } - ds->ds_cancel_handler = context; - ds->ds_cancel_is_block = false; -} +}; +// Don't count disarmed timer list +#define DISPATCH_TIMER_COUNT ((sizeof(_dispatch_kevent_timer) \ + / sizeof(_dispatch_kevent_timer[0])) - 1) -void -dispatch_source_set_cancel_handler_f(dispatch_source_t ds, - dispatch_function_t handler) +static inline void +_dispatch_source_timer_init(void) { - dispatch_assert(!ds->ds_is_legacy); - dispatch_barrier_async_f((dispatch_queue_t)ds, - handler, _dispatch_source_set_cancel_handler_f); + TAILQ_INSERT_TAIL(&_dispatch_sources[DSL_HASH(DISPATCH_TIMER_INDEX_WALL)], + &_dispatch_kevent_timer[DISPATCH_TIMER_INDEX_WALL], dk_list); + TAILQ_INSERT_TAIL(&_dispatch_sources[DSL_HASH(DISPATCH_TIMER_INDEX_MACH)], + &_dispatch_kevent_timer[DISPATCH_TIMER_INDEX_MACH], dk_list); + TAILQ_INSERT_TAIL(&_dispatch_sources[DSL_HASH(DISPATCH_TIMER_INDEX_DISARM)], + &_dispatch_kevent_timer[DISPATCH_TIMER_INDEX_DISARM], dk_list); } -#ifndef DISPATCH_NO_LEGACY -// 6618342 Contact the team that owns the Instrument DTrace probe before renaming this symbol -dispatch_source_t -_dispatch_source_create2(dispatch_source_t ds, - dispatch_source_attr_t attr, - void *context, - dispatch_source_handler_function_t handler) +DISPATCH_ALWAYS_INLINE +static inline unsigned int +_dispatch_source_timer_idx(dispatch_source_refs_t dr) { - if (ds == NULL || handler == NULL) { - return NULL; - } - - ds->ds_is_legacy = true; - - ds->ds_handler_func = handler; - ds->ds_handler_ctxt = context; - - if (attr && attr != DISPATCH_SOURCE_CREATE_SUSPENDED) { - ds->dq_finalizer_ctxt = attr->finalizer_ctxt; - ds->dq_finalizer_func = (typeof(ds->dq_finalizer_func))attr->finalizer_func; - ds->do_ctxt = attr->context; - } -#ifdef __BLOCKS__ - if (ds->dq_finalizer_func == (void*)_dispatch_call_block_and_release2) { - ds->dq_finalizer_ctxt = Block_copy(ds->dq_finalizer_ctxt); - if (!ds->dq_finalizer_ctxt) { - goto out_bad; - } - } - if (handler == _dispatch_source_call_block) { - struct Block_layout *bl = ds->ds_handler_ctxt = Block_copy(context); - if (!ds->ds_handler_ctxt) { - if (ds->dq_finalizer_func == (void*)_dispatch_call_block_and_release2) { - Block_release(ds->dq_finalizer_ctxt); - } - goto out_bad; - } - ds->ds_handler_func = (void *)bl->invoke; - ds->ds_handler_is_block = true; - } + return ds_timer(dr).flags & DISPATCH_TIMER_WALL_CLOCK ? + DISPATCH_TIMER_INDEX_WALL : DISPATCH_TIMER_INDEX_MACH; +} - // all legacy sources get a cancellation event on the normal event handler. - dispatch_source_handler_function_t func = ds->ds_handler_func; - dispatch_source_handler_t block = ds->ds_handler_ctxt; - void *ctxt = ds->ds_handler_ctxt; - bool handler_is_block = ds->ds_handler_is_block; - - ds->ds_cancel_is_block = true; - if (handler_is_block) { - ds->ds_cancel_handler = _dispatch_Block_copy(^{ - block(ds); - }); - } else { - ds->ds_cancel_handler = _dispatch_Block_copy(^{ - func(ctxt, ds); - }); - } -#endif - if (attr != DISPATCH_SOURCE_CREATE_SUSPENDED) { - dispatch_resume(ds); +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_dispatch_source_timer_now2(unsigned int timer) +{ + switch (timer) { + case DISPATCH_TIMER_INDEX_MACH: + return _dispatch_absolute_time(); + case DISPATCH_TIMER_INDEX_WALL: + return _dispatch_get_nanoseconds(); + default: + DISPATCH_CRASH("Invalid timer"); } - - return ds; - -out_bad: - free(ds); - return NULL; } -long -dispatch_source_get_error(dispatch_source_t ds, long *err_out) +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_dispatch_source_timer_now(dispatch_source_refs_t dr) { - // 6863892 don't report ECANCELED until kevent is unregistered - if ((ds->ds_atomic_flags & DSF_CANCELED) && !ds->ds_dkev) { - if (err_out) { - *err_out = ECANCELED; - } - return DISPATCH_ERROR_DOMAIN_POSIX; - } else { - return DISPATCH_ERROR_DOMAIN_NO_ERROR; - } + return _dispatch_source_timer_now2(_dispatch_source_timer_idx(dr)); } -#endif /* DISPATCH_NO_LEGACY */ // Updates the ordered list of timers based on next fire date for changes to ds. // Should only be called from the context of _dispatch_mgr_q. -void +static void _dispatch_timer_list_update(dispatch_source_t ds) { - dispatch_source_t dsi = NULL; - int idx; - + dispatch_source_refs_t dr = ds->ds_refs, dri = NULL; + dispatch_assert(_dispatch_queue_get_current() == &_dispatch_mgr_q); - // do not reschedule timers unregistered with _dispatch_kevent_release() + // do not reschedule timers unregistered with _dispatch_kevent_unregister() if (!ds->ds_dkev) { return; } // Ensure the source is on the global kevent lists before it is removed and // readded below. - _dispatch_kevent_merge(ds); - - TAILQ_REMOVE(&ds->ds_dkev->dk_sources, ds, ds_list); + _dispatch_kevent_register(ds); + + TAILQ_REMOVE(&ds->ds_dkev->dk_sources, dr, dr_list); + + // Move timers that are disabled, suspended or have missed intervals to the + // disarmed list, rearm after resume resp. source invoke will reenable them + if (!ds_timer(dr).target || DISPATCH_OBJECT_SUSPENDED(ds) || + ds->ds_pending_data) { + (void)dispatch_atomic_and2o(ds, ds_atomic_flags, ~DSF_ARMED); + ds->ds_dkev = &_dispatch_kevent_timer[DISPATCH_TIMER_INDEX_DISARM]; + TAILQ_INSERT_TAIL(&ds->ds_dkev->dk_sources, (dispatch_source_refs_t)dr, + dr_list); + return; + } // change the list if the clock type has changed - if (ds->ds_timer.flags & DISPATCH_TIMER_WALL_CLOCK) { - idx = DISPATCH_TIMER_INDEX_WALL; - } else { - idx = DISPATCH_TIMER_INDEX_MACH; - } - ds->ds_dkev = &_dispatch_kevent_timer[idx]; + ds->ds_dkev = &_dispatch_kevent_timer[_dispatch_source_timer_idx(dr)]; - if (ds->ds_timer.target) { - TAILQ_FOREACH(dsi, &ds->ds_dkev->dk_sources, ds_list) { - if (dsi->ds_timer.target == 0 || ds->ds_timer.target < dsi->ds_timer.target) { - break; - } + TAILQ_FOREACH(dri, &ds->ds_dkev->dk_sources, dr_list) { + if (ds_timer(dri).target == 0 || + ds_timer(dr).target < ds_timer(dri).target) { + break; } } - - if (dsi) { - TAILQ_INSERT_BEFORE(dsi, ds, ds_list); + + if (dri) { + TAILQ_INSERT_BEFORE(dri, dr, dr_list); } else { - TAILQ_INSERT_TAIL(&ds->ds_dkev->dk_sources, ds, ds_list); + TAILQ_INSERT_TAIL(&ds->ds_dkev->dk_sources, dr, dr_list); } } -static void +static inline void _dispatch_run_timers2(unsigned int timer) { + dispatch_source_refs_t dr; dispatch_source_t ds; uint64_t now, missed; - if (timer == DISPATCH_TIMER_INDEX_MACH) { - now = mach_absolute_time(); - } else { - now = _dispatch_get_nanoseconds(); - } - - while ((ds = TAILQ_FIRST(&_dispatch_kevent_timer[timer].dk_sources))) { + now = _dispatch_source_timer_now2(timer); + while ((dr = TAILQ_FIRST(&_dispatch_kevent_timer[timer].dk_sources))) { + ds = _dispatch_source_from_refs(dr); // We may find timers on the wrong list due to a pending update from // dispatch_source_set_timer. Force an update of the list in that case. if (timer != ds->ds_ident_hack) { _dispatch_timer_list_update(ds); continue; } - if (!ds->ds_timer.target) { + if (!ds_timer(dr).target) { // no configured timers on the list break; } - if (ds->ds_timer.target > now) { + if (ds_timer(dr).target > now) { // Done running timers for now. break; } - - if (ds->ds_timer.flags & (DISPATCH_TIMER_ONESHOT|DISPATCH_TIMER_ABSOLUTE)) { - dispatch_atomic_inc(&ds->ds_pending_data); - ds->ds_timer.target = 0; - } else { - // Calculate number of missed intervals. - missed = (now - ds->ds_timer.target) / ds->ds_timer.interval; - dispatch_atomic_add(&ds->ds_pending_data, missed + 1); - ds->ds_timer.target += (missed + 1) * ds->ds_timer.interval; + // Remove timers that are suspended or have missed intervals from the + // list, rearm after resume resp. source invoke will reenable them + if (DISPATCH_OBJECT_SUSPENDED(ds) || ds->ds_pending_data) { + _dispatch_timer_list_update(ds); + continue; } - + // Calculate number of missed intervals. + missed = (now - ds_timer(dr).target) / ds_timer(dr).interval; + if (++missed > INT_MAX) { + missed = INT_MAX; + } + ds_timer(dr).target += missed * ds_timer(dr).interval; _dispatch_timer_list_update(ds); + ds_timer(dr).last_fire = now; + (void)dispatch_atomic_add2o(ds, ds_pending_data, (int)missed); _dispatch_wakeup(ds); } } @@ -1342,65 +1097,32 @@ _dispatch_run_timers2(unsigned int timer) void _dispatch_run_timers(void) { + dispatch_once_f(&__dispatch_kevent_init_pred, + NULL, _dispatch_kevent_init); + unsigned int i; for (i = 0; i < DISPATCH_TIMER_COUNT; i++) { - _dispatch_run_timers2(i); + if (!TAILQ_EMPTY(&_dispatch_kevent_timer[i].dk_sources)) { + _dispatch_run_timers2(i); + } } } -#if defined(__i386__) || defined(__x86_64__) -// these architectures always return mach_absolute_time() in nanoseconds -#define _dispatch_convert_mach2nano(x) (x) -#define _dispatch_convert_nano2mach(x) (x) -#else -static mach_timebase_info_data_t tbi; -static dispatch_once_t tbi_pred; - -static void -_dispatch_convert_init(void *context __attribute__((unused))) -{ - dispatch_assume_zero(mach_timebase_info(&tbi)); -} - -static uint64_t -_dispatch_convert_mach2nano(uint64_t val) -{ -#ifdef __LP64__ - __uint128_t tmp; -#else - long double tmp; -#endif - - dispatch_once_f(&tbi_pred, NULL, _dispatch_convert_init); - - tmp = val; - tmp *= tbi.numer; - tmp /= tbi.denom; - - return tmp; -} - -static uint64_t -_dispatch_convert_nano2mach(uint64_t val) +static inline unsigned long +_dispatch_source_timer_data(dispatch_source_refs_t dr, unsigned long prev) { -#ifdef __LP64__ - __uint128_t tmp; -#else - long double tmp; -#endif - - dispatch_once_f(&tbi_pred, NULL, _dispatch_convert_init); - - tmp = val; - tmp *= tbi.denom; - tmp /= tbi.numer; - - return tmp; + // calculate the number of intervals since last fire + unsigned long data, missed; + uint64_t now = _dispatch_source_timer_now(dr); + missed = (unsigned long)((now - ds_timer(dr).last_fire) / + ds_timer(dr).interval); + // correct for missed intervals already delivered last time + data = prev - ds_timer(dr).missed + missed; + ds_timer(dr).missed = missed; + return data; } -#endif // approx 1 year (60s * 60m * 24h * 365d) -#define FOREVER_SEC 3153600l #define FOREVER_NSEC 31536000000000000ull struct timespec * @@ -1409,45 +1131,28 @@ _dispatch_get_next_timer_fire(struct timespec *howsoon) // // kevent(2) does not allow large timeouts, so we use a long timeout // instead (approximately 1 year). - dispatch_source_t ds = NULL; + dispatch_source_refs_t dr = NULL; unsigned int timer; uint64_t now, delta_tmp, delta = UINT64_MAX; - // We are looking for the first unsuspended timer which has its target - // time set. Given timers are kept in order, if we hit an timer that's - // unset there's no point in continuing down the list. for (timer = 0; timer < DISPATCH_TIMER_COUNT; timer++) { - TAILQ_FOREACH(ds, &_dispatch_kevent_timer[timer].dk_sources, ds_list) { - if (!ds->ds_timer.target) { - break; - } - if (DISPATCH_OBJECT_SUSPENDED(ds)) { - ds->ds_is_armed = false; - } else { - break; - } - } - - if (!ds || !ds->ds_timer.target) { + // Timers are kept in order, first one will fire next + dr = TAILQ_FIRST(&_dispatch_kevent_timer[timer].dk_sources); + if (!dr || !ds_timer(dr).target) { + // Empty list or disabled timer continue; } - - if (ds->ds_timer.flags & DISPATCH_TIMER_WALL_CLOCK) { - now = _dispatch_get_nanoseconds(); - } else { - now = mach_absolute_time(); - } - if (ds->ds_timer.target <= now) { + now = _dispatch_source_timer_now(dr); + if (ds_timer(dr).target <= now) { howsoon->tv_sec = 0; howsoon->tv_nsec = 0; return howsoon; } - // the subtraction cannot go negative because the previous "if" // verified that the target is greater than now. - delta_tmp = ds->ds_timer.target - now; - if (!(ds->ds_timer.flags & DISPATCH_TIMER_WALL_CLOCK)) { - delta_tmp = _dispatch_convert_mach2nano(delta_tmp); + delta_tmp = ds_timer(dr).target - now; + if (!(ds_timer(dr).flags & DISPATCH_TIMER_WALL_CLOCK)) { + delta_tmp = _dispatch_time_mach2nano(delta_tmp); } if (delta_tmp < delta) { delta = delta_tmp; @@ -1468,28 +1173,45 @@ struct dispatch_set_timer_params { struct dispatch_timer_source_s values; }; -// To be called from the context of the _dispatch_mgr_q static void -_dispatch_source_set_timer2(void *context) +_dispatch_source_set_timer3(void *context) { + // Called on the _dispatch_mgr_q struct dispatch_set_timer_params *params = context; dispatch_source_t ds = params->ds; ds->ds_ident_hack = params->ident; - ds->ds_timer = params->values; + ds_timer(ds->ds_refs) = params->values; + // Clear any pending data that might have accumulated on + // older timer params + ds->ds_pending_data = 0; _dispatch_timer_list_update(ds); dispatch_resume(ds); dispatch_release(ds); free(params); } +static void +_dispatch_source_set_timer2(void *context) +{ + // Called on the source queue + struct dispatch_set_timer_params *params = context; + dispatch_suspend(params->ds); + dispatch_barrier_async_f(&_dispatch_mgr_q, params, + _dispatch_source_set_timer3); +} + void dispatch_source_set_timer(dispatch_source_t ds, dispatch_time_t start, uint64_t interval, uint64_t leeway) { + if (slowpath(!ds->ds_is_timer)) { + DISPATCH_CLIENT_CRASH("Attempt to set timer on a non-timer source"); + } + struct dispatch_set_timer_params *params; - + // we use zero internally to mean disabled if (interval == 0) { interval = 1; @@ -1497,121 +1219,89 @@ dispatch_source_set_timer(dispatch_source_t ds, // 6866347 - make sure nanoseconds won't overflow interval = INT64_MAX; } + if ((int64_t)leeway < 0) { + leeway = INT64_MAX; + } - // Suspend the source so that it doesn't fire with pending changes - // The use of suspend/resume requires the external retain/release - dispatch_retain(ds); - dispatch_suspend(ds); - if (start == DISPATCH_TIME_NOW) { - start = mach_absolute_time(); + start = _dispatch_absolute_time(); } else if (start == DISPATCH_TIME_FOREVER) { start = INT64_MAX; } - while (!(params = malloc(sizeof(struct dispatch_set_timer_params)))) { + while (!(params = calloc(1ul, sizeof(struct dispatch_set_timer_params)))) { sleep(1); } params->ds = ds; - params->values.flags = ds->ds_timer.flags; + params->values.flags = ds_timer(ds->ds_refs).flags; if ((int64_t)start < 0) { // wall clock params->ident = DISPATCH_TIMER_INDEX_WALL; - params->values.start = -((int64_t)start); params->values.target = -((int64_t)start); params->values.interval = interval; params->values.leeway = leeway; params->values.flags |= DISPATCH_TIMER_WALL_CLOCK; } else { - // mach clock + // absolute clock params->ident = DISPATCH_TIMER_INDEX_MACH; - params->values.start = start; params->values.target = start; - params->values.interval = _dispatch_convert_nano2mach(interval); - params->values.leeway = _dispatch_convert_nano2mach(leeway); - params->values.flags &= ~DISPATCH_TIMER_WALL_CLOCK; - } - - dispatch_barrier_async_f(&_dispatch_mgr_q, params, _dispatch_source_set_timer2); -} - -#ifndef DISPATCH_NO_LEGACY -// LEGACY -long -dispatch_source_timer_set_time(dispatch_source_t ds, uint64_t nanoseconds, uint64_t leeway) -{ - dispatch_time_t start; - if (nanoseconds == 0) { - nanoseconds = 1; - } - if (ds->ds_timer.flags == (DISPATCH_TIMER_ABSOLUTE|DISPATCH_TIMER_WALL_CLOCK)) { - static const struct timespec t0; - start = dispatch_walltime(&t0, nanoseconds); - } else if (ds->ds_timer.flags & DISPATCH_TIMER_WALL_CLOCK) { - start = dispatch_walltime(DISPATCH_TIME_NOW, nanoseconds); - } else { - start = dispatch_time(DISPATCH_TIME_NOW, nanoseconds); - } - if (ds->ds_timer.flags & (DISPATCH_TIMER_ABSOLUTE|DISPATCH_TIMER_ONESHOT)) { - // 6866347 - make sure nanoseconds won't overflow - nanoseconds = INT64_MAX; // non-repeating (~292 years) - } - dispatch_source_set_timer(ds, start, nanoseconds, leeway); - return 0; -} + params->values.interval = _dispatch_time_nano2mach(interval); + + // rdar://problem/7287561 interval must be at least one in + // in order to avoid later division by zero when calculating + // the missed interval count. (NOTE: the wall clock's + // interval is already "fixed" to be 1 or more) + if (params->values.interval < 1) { + params->values.interval = 1; + } -// LEGACY -uint64_t -dispatch_event_get_nanoseconds(dispatch_source_t ds) -{ - if (ds->ds_timer.flags & DISPATCH_TIMER_WALL_CLOCK) { - return ds->ds_timer.interval; - } else { - return _dispatch_convert_mach2nano(ds->ds_timer.interval); + params->values.leeway = _dispatch_time_nano2mach(leeway); + params->values.flags &= ~DISPATCH_TIMER_WALL_CLOCK; } + // Suspend the source so that it doesn't fire with pending changes + // The use of suspend/resume requires the external retain/release + dispatch_retain(ds); + dispatch_barrier_async_f((dispatch_queue_t)ds, params, + _dispatch_source_set_timer2); } -#endif /* DISPATCH_NO_LEGACY */ -static dispatch_source_t _dispatch_mach_notify_source; -static mach_port_t _dispatch_port_set; -static mach_port_t _dispatch_event_port; +#pragma mark - +#pragma mark dispatch_mach -#define _DISPATCH_IS_POWER_OF_TWO(v) (!(v & (v - 1)) && v) -#define _DISPATCH_HASH(x, y) (_DISPATCH_IS_POWER_OF_TWO(y) ? (MACH_PORT_INDEX(x) & ((y) - 1)) : (MACH_PORT_INDEX(x) % (y))) +#if HAVE_MACH -#define _DISPATCH_MACHPORT_HASH_SIZE 32 -#define _DISPATCH_MACHPORT_HASH(x) _DISPATCH_HASH((x), _DISPATCH_MACHPORT_HASH_SIZE) +#if DISPATCH_DEBUG && DISPATCH_MACHPORT_DEBUG +#define _dispatch_debug_machport(name) \ + dispatch_debug_machport((name), __func__) +#else +#define _dispatch_debug_machport(name) +#endif -static void _dispatch_port_set_init(void *); -static mach_port_t _dispatch_get_port_set(void); +// Flags for all notifications that are registered/unregistered when a +// send-possible notification is requested/delivered +#define _DISPATCH_MACH_SP_FLAGS (DISPATCH_MACH_SEND_POSSIBLE| \ + DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_DELETED) -void -_dispatch_drain_mach_messages(struct kevent *ke) -{ - dispatch_source_t dsi; - dispatch_kevent_t dk; - struct kevent ke2; +#define _DISPATCH_IS_POWER_OF_TWO(v) (!(v & (v - 1)) && v) +#define _DISPATCH_HASH(x, y) (_DISPATCH_IS_POWER_OF_TWO(y) ? \ + (MACH_PORT_INDEX(x) & ((y) - 1)) : (MACH_PORT_INDEX(x) % (y))) - if (!dispatch_assume(ke->data)) { - return; - } - dk = _dispatch_kevent_find(ke->data, EVFILT_MACHPORT); - if (!dispatch_assume(dk)) { - return; - } - _dispatch_kevent_machport_disable(dk); // emulate EV_DISPATCH +#define _DISPATCH_MACHPORT_HASH_SIZE 32 +#define _DISPATCH_MACHPORT_HASH(x) \ + _DISPATCH_HASH((x), _DISPATCH_MACHPORT_HASH_SIZE) - EV_SET(&ke2, ke->data, EVFILT_MACHPORT, EV_ADD|EV_ENABLE|EV_DISPATCH, DISPATCH_MACHPORT_RECV, 0, dk); +static dispatch_source_t _dispatch_mach_notify_source; +static mach_port_t _dispatch_port_set; +static mach_port_t _dispatch_event_port; - TAILQ_FOREACH(dsi, &dk->dk_sources, ds_list) { - _dispatch_source_merge_kevent(dsi, &ke2); - } -} +static kern_return_t _dispatch_mach_notify_update(dispatch_kevent_t dk, + uint32_t new_flags, uint32_t del_flags, uint32_t mask, + mach_msg_id_t notify_msgid, mach_port_mscount_t notify_sync); -void -_dispatch_port_set_init(void *context __attribute__((unused))) +static void +_dispatch_port_set_init(void *context DISPATCH_UNUSED) { struct kevent kev = { .filter = EVFILT_MACHPORT, @@ -1619,22 +1309,39 @@ _dispatch_port_set_init(void *context __attribute__((unused))) }; kern_return_t kr; - kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET, &_dispatch_port_set); + kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET, + &_dispatch_port_set); DISPATCH_VERIFY_MIG(kr); - dispatch_assume_zero(kr); - kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &_dispatch_event_port); + if (kr) { + _dispatch_bug_mach_client( + "_dispatch_port_set_init: mach_port_allocate() failed", kr); + DISPATCH_CLIENT_CRASH( + "mach_port_allocate() failed: cannot create port set"); + } + kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, + &_dispatch_event_port); DISPATCH_VERIFY_MIG(kr); - dispatch_assume_zero(kr); - kr = mach_port_move_member(mach_task_self(), _dispatch_event_port, _dispatch_port_set); + if (kr) { + _dispatch_bug_mach_client( + "_dispatch_port_set_init: mach_port_allocate() failed", kr); + DISPATCH_CLIENT_CRASH( + "mach_port_allocate() failed: cannot create receive right"); + } + kr = mach_port_move_member(mach_task_self(), _dispatch_event_port, + _dispatch_port_set); DISPATCH_VERIFY_MIG(kr); - dispatch_assume_zero(kr); + if (kr) { + _dispatch_bug_mach_client( + "_dispatch_port_set_init: mach_port_move_member() failed", kr); + DISPATCH_CLIENT_CRASH("mach_port_move_member() failed"); + } kev.ident = _dispatch_port_set; _dispatch_update_kq(&kev); } -mach_port_t +static mach_port_t _dispatch_get_port_set(void) { static dispatch_once_t pred; @@ -1644,284 +1351,334 @@ _dispatch_get_port_set(void) return _dispatch_port_set; } -void -_dispatch_kevent_machport_resume(dispatch_kevent_t dk, uint32_t new_flags, uint32_t del_flags) -{ - mach_port_t previous, port = (mach_port_t)dk->dk_kevent.ident; - kern_return_t kr; - - if ((new_flags & DISPATCH_MACHPORT_RECV) || (!new_flags && !del_flags && dk->dk_kevent.fflags & DISPATCH_MACHPORT_RECV)) { - _dispatch_kevent_machport_enable(dk); - } - if (new_flags & DISPATCH_MACHPORT_DEAD) { - kr = mach_port_request_notification(mach_task_self(), port, MACH_NOTIFY_DEAD_NAME, 1, - _dispatch_event_port, MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous); - DISPATCH_VERIFY_MIG(kr); - - - switch(kr) { - case KERN_INVALID_NAME: - case KERN_INVALID_RIGHT: - // Supress errors - break; - default: - // Else, we dont expect any errors from mach. Log any errors if we do - if (dispatch_assume_zero(kr)) { - // log the error - } else if (dispatch_assume_zero(previous)) { - // Another subsystem has beat libdispatch to requesting the Mach - // dead-name notification on this port. We should technically cache the - // previous port and message it when the kernel messages our port. Or - // we can just say screw those subsystems and drop the previous port. - // They should adopt libdispatch :-P - kr = mach_port_deallocate(mach_task_self(), previous); - DISPATCH_VERIFY_MIG(kr); - dispatch_assume_zero(kr); - } - } - } - - if (del_flags & DISPATCH_MACHPORT_RECV) { - _dispatch_kevent_machport_disable(dk); - } - if (del_flags & DISPATCH_MACHPORT_DEAD) { - kr = mach_port_request_notification(mach_task_self(), (mach_port_t)dk->dk_kevent.ident, - MACH_NOTIFY_DEAD_NAME, 1, MACH_PORT_NULL, MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous); - DISPATCH_VERIFY_MIG(kr); - - switch (kr) { - case KERN_INVALID_NAME: - case KERN_INVALID_RIGHT: - case KERN_INVALID_ARGUMENT: - break; - default: - if (dispatch_assume_zero(kr)) { - // log the error - } else if (previous) { - // the kernel has not consumed the right yet - dispatch_assume_zero(_dispatch_send_consume_send_once_right(previous)); - } - } - } -} - -void +static kern_return_t _dispatch_kevent_machport_enable(dispatch_kevent_t dk) { mach_port_t mp = (mach_port_t)dk->dk_kevent.ident; kern_return_t kr; + _dispatch_debug_machport(mp); kr = mach_port_move_member(mach_task_self(), mp, _dispatch_get_port_set()); - DISPATCH_VERIFY_MIG(kr); - switch (kr) { - case KERN_INVALID_NAME: + if (slowpath(kr)) { + DISPATCH_VERIFY_MIG(kr); + switch (kr) { + case KERN_INVALID_NAME: #if DISPATCH_DEBUG - _dispatch_log("Corruption: Mach receive right 0x%x destroyed prematurely", mp); + _dispatch_log("Corruption: Mach receive right 0x%x destroyed " + "prematurely", mp); #endif - break; - default: - dispatch_assume_zero(kr); + break; + case KERN_INVALID_RIGHT: + _dispatch_bug_mach_client("_dispatch_kevent_machport_enable: " + "mach_port_move_member() failed ", kr); + break; + default: + (void)dispatch_assume_zero(kr); + break; + } } + return kr; } -void +static void _dispatch_kevent_machport_disable(dispatch_kevent_t dk) { mach_port_t mp = (mach_port_t)dk->dk_kevent.ident; kern_return_t kr; + _dispatch_debug_machport(mp); kr = mach_port_move_member(mach_task_self(), mp, 0); - DISPATCH_VERIFY_MIG(kr); - switch (kr) { - case KERN_INVALID_RIGHT: - case KERN_INVALID_NAME: + if (slowpath(kr)) { + DISPATCH_VERIFY_MIG(kr); + switch (kr) { + case KERN_INVALID_RIGHT: + case KERN_INVALID_NAME: #if DISPATCH_DEBUG - _dispatch_log("Corruption: Mach receive right 0x%x destroyed prematurely", mp); + _dispatch_log("Corruption: Mach receive right 0x%x destroyed " + "prematurely", mp); #endif - break; - case 0: - break; - default: - dispatch_assume_zero(kr); - break; + break; + default: + (void)dispatch_assume_zero(kr); + break; + } } } -#define _DISPATCH_MIN_MSG_SZ (8ul * 1024ul - MAX_TRAILER_SIZE) -#ifndef DISPATCH_NO_LEGACY -dispatch_source_t -dispatch_source_mig_create(mach_port_t mport, size_t max_msg_size, dispatch_source_attr_t attr, - dispatch_queue_t dq, dispatch_mig_callback_t mig_callback) -{ - if (max_msg_size < _DISPATCH_MIN_MSG_SZ) { - max_msg_size = _DISPATCH_MIN_MSG_SZ; - } - return dispatch_source_machport_create(mport, DISPATCH_MACHPORT_RECV, attr, dq, - ^(dispatch_source_t ds) { - if (!dispatch_source_get_error(ds, NULL)) { - if (dq->dq_width != 1) { - dispatch_retain(ds); // this is a shim -- use the external retain - dispatch_async(dq, ^{ - dispatch_mig_server(ds, max_msg_size, mig_callback); - dispatch_release(ds); // this is a shim -- use the external release - }); - } else { - dispatch_mig_server(ds, max_msg_size, mig_callback); - } - } - }); +kern_return_t +_dispatch_kevent_machport_resume(dispatch_kevent_t dk, uint32_t new_flags, + uint32_t del_flags) +{ + kern_return_t kr_recv = 0, kr_sp = 0; + + dispatch_assert_zero(new_flags & del_flags); + if (new_flags & DISPATCH_MACH_RECV_MESSAGE) { + kr_recv = _dispatch_kevent_machport_enable(dk); + } else if (del_flags & DISPATCH_MACH_RECV_MESSAGE) { + _dispatch_kevent_machport_disable(dk); + } + if ((new_flags & _DISPATCH_MACH_SP_FLAGS) || + (del_flags & _DISPATCH_MACH_SP_FLAGS)) { + // Requesting a (delayed) non-sync send-possible notification + // registers for both immediate dead-name notification and delayed-arm + // send-possible notification for the port. + // The send-possible notification is armed when a mach_msg() with the + // the MACH_SEND_NOTIFY to the port times out. + // If send-possible is unavailable, fall back to immediate dead-name + // registration rdar://problem/2527840&9008724 + kr_sp = _dispatch_mach_notify_update(dk, new_flags, del_flags, + _DISPATCH_MACH_SP_FLAGS, MACH_NOTIFY_SEND_POSSIBLE, + MACH_NOTIFY_SEND_POSSIBLE == MACH_NOTIFY_DEAD_NAME ? 1 : 0); + } + + return (kr_recv ? kr_recv : kr_sp); } -#endif /* DISPATCH_NO_LEGACY */ -static void -_dispatch_mach_notify_source_init(void *context __attribute__((unused))) +void +_dispatch_drain_mach_messages(struct kevent *ke) { - size_t maxsz = sizeof(union __RequestUnion___dispatch_send_libdispatch_internal_protocol_subsystem); + mach_port_t name = (mach_port_name_t)ke->data; + dispatch_source_refs_t dri; + dispatch_kevent_t dk; + struct kevent kev; - if (sizeof(union __ReplyUnion___dispatch_libdispatch_internal_protocol_subsystem) > maxsz) { - maxsz = sizeof(union __ReplyUnion___dispatch_libdispatch_internal_protocol_subsystem); + if (!dispatch_assume(name)) { + return; } + _dispatch_debug_machport(name); + dk = _dispatch_kevent_find(name, EVFILT_MACHPORT); + if (!dispatch_assume(dk)) { + return; + } + _dispatch_kevent_machport_disable(dk); // emulate EV_DISPATCH - _dispatch_get_port_set(); - - _dispatch_mach_notify_source = dispatch_source_mig_create(_dispatch_event_port, - maxsz, NULL, &_dispatch_mgr_q, libdispatch_internal_protocol_server); + EV_SET(&kev, name, EVFILT_MACHPORT, EV_ADD|EV_ENABLE|EV_DISPATCH, + DISPATCH_MACH_RECV_MESSAGE, 0, dk); - dispatch_assert(_dispatch_mach_notify_source); + TAILQ_FOREACH(dri, &dk->dk_sources, dr_list) { + _dispatch_source_merge_kevent(_dispatch_source_from_refs(dri), &kev); + } } -kern_return_t -_dispatch_mach_notify_port_deleted(mach_port_t notify __attribute__((unused)), mach_port_name_t name) +static inline void +_dispatch_mach_notify_merge(mach_port_t name, uint32_t flag, uint32_t unreg, + bool final) { - dispatch_source_t dsi; + dispatch_source_refs_t dri; dispatch_kevent_t dk; struct kevent kev; -#if DISPATCH_DEBUG - _dispatch_log("Corruption: Mach send/send-once/dead-name right 0x%x deleted prematurely", name); -#endif + dk = _dispatch_kevent_find(name, EVFILT_MACHPORT); + if (!dk) { + return; + } + + // Update notification registration state. + dk->dk_kevent.data &= ~unreg; + if (!final) { + // Re-register for notification before delivery + _dispatch_kevent_resume(dk, flag, 0); + } + + EV_SET(&kev, name, EVFILT_MACHPORT, EV_ADD|EV_ENABLE, flag, 0, dk); + + TAILQ_FOREACH(dri, &dk->dk_sources, dr_list) { + _dispatch_source_merge_kevent(_dispatch_source_from_refs(dri), &kev); + if (final) { + // this can never happen again + // this must happen after the merge + // this may be racy in the future, but we don't provide a 'setter' + // API for the mask yet + _dispatch_source_from_refs(dri)->ds_pending_data_mask &= ~unreg; + } + } + + if (final) { + // no more sources have these flags + dk->dk_kevent.fflags &= ~unreg; + } +} + +static kern_return_t +_dispatch_mach_notify_update(dispatch_kevent_t dk, uint32_t new_flags, + uint32_t del_flags, uint32_t mask, mach_msg_id_t notify_msgid, + mach_port_mscount_t notify_sync) +{ + mach_port_t previous, port = (mach_port_t)dk->dk_kevent.ident; + typeof(dk->dk_kevent.data) prev = dk->dk_kevent.data; + kern_return_t kr, krr = 0; + + // Update notification registration state. + dk->dk_kevent.data |= (new_flags | dk->dk_kevent.fflags) & mask; + dk->dk_kevent.data &= ~(del_flags & mask); + + _dispatch_debug_machport(port); + if ((dk->dk_kevent.data & mask) && !(prev & mask)) { + previous = MACH_PORT_NULL; + krr = mach_port_request_notification(mach_task_self(), port, + notify_msgid, notify_sync, _dispatch_event_port, + MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous); + DISPATCH_VERIFY_MIG(krr); + + switch(krr) { + case KERN_INVALID_NAME: + case KERN_INVALID_RIGHT: + // Supress errors & clear registration state + dk->dk_kevent.data &= ~mask; + break; + default: + // Else, we dont expect any errors from mach. Log any errors + if (dispatch_assume_zero(krr)) { + // log the error & clear registration state + dk->dk_kevent.data &= ~mask; + } else if (dispatch_assume_zero(previous)) { + // Another subsystem has beat libdispatch to requesting the + // specified Mach notification on this port. We should + // technically cache the previous port and message it when the + // kernel messages our port. Or we can just say screw those + // subsystems and deallocate the previous port. + // They should adopt libdispatch :-P + kr = mach_port_deallocate(mach_task_self(), previous); + DISPATCH_VERIFY_MIG(kr); + (void)dispatch_assume_zero(kr); + previous = MACH_PORT_NULL; + } + } + } else if (!(dk->dk_kevent.data & mask) && (prev & mask)) { + previous = MACH_PORT_NULL; + kr = mach_port_request_notification(mach_task_self(), port, + notify_msgid, notify_sync, MACH_PORT_NULL, + MACH_MSG_TYPE_MOVE_SEND_ONCE, &previous); + DISPATCH_VERIFY_MIG(kr); - dk = _dispatch_kevent_find(name, EVFILT_MACHPORT); - if (!dk) { - goto out; + switch (kr) { + case KERN_INVALID_NAME: + case KERN_INVALID_RIGHT: + case KERN_INVALID_ARGUMENT: + break; + default: + if (dispatch_assume_zero(kr)) { + // log the error + } + } + } else { + return 0; } - - EV_SET(&kev, name, EVFILT_MACHPORT, EV_ADD|EV_ENABLE|EV_DISPATCH|EV_EOF, DISPATCH_MACHPORT_DELETED, 0, dk); - - TAILQ_FOREACH(dsi, &dk->dk_sources, ds_list) { - _dispatch_source_merge_kevent(dsi, &kev); - // this can never happen again - // this must happen after the merge - // this may be racy in the future, but we don't provide a 'setter' API for the mask yet - dsi->ds_pending_data_mask &= ~DISPATCH_MACHPORT_DELETED; + if (slowpath(previous)) { + // the kernel has not consumed the send-once right yet + (void)dispatch_assume_zero( + _dispatch_send_consume_send_once_right(previous)); } - - // no more sources have this flag - dk->dk_kevent.fflags &= ~DISPATCH_MACHPORT_DELETED; - -out: - return KERN_SUCCESS; + return krr; } -kern_return_t -_dispatch_mach_notify_port_destroyed(mach_port_t notify __attribute__((unused)), mach_port_t name) +static void +_dispatch_mach_notify_source2(void *context) { - kern_return_t kr; - // this function should never be called - dispatch_assume_zero(name); - kr = mach_port_mod_refs(mach_task_self(), name, MACH_PORT_RIGHT_RECEIVE, -1); - DISPATCH_VERIFY_MIG(kr); - dispatch_assume_zero(kr); - return KERN_SUCCESS; + dispatch_source_t ds = context; + size_t maxsz = MAX(sizeof(union + __RequestUnion___dispatch_send_libdispatch_internal_protocol_subsystem), + sizeof(union + __ReplyUnion___dispatch_libdispatch_internal_protocol_subsystem)); + + dispatch_mig_server(ds, maxsz, libdispatch_internal_protocol_server); } -kern_return_t -_dispatch_mach_notify_no_senders(mach_port_t notify, mach_port_mscount_t mscnt __attribute__((unused))) +void +_dispatch_mach_notify_source_init(void *context DISPATCH_UNUSED) { - // this function should never be called - dispatch_assume_zero(notify); - return KERN_SUCCESS; + _dispatch_get_port_set(); + + _dispatch_mach_notify_source = dispatch_source_create( + DISPATCH_SOURCE_TYPE_MACH_RECV, _dispatch_event_port, 0, + &_dispatch_mgr_q); + dispatch_assert(_dispatch_mach_notify_source); + dispatch_set_context(_dispatch_mach_notify_source, + _dispatch_mach_notify_source); + dispatch_source_set_event_handler_f(_dispatch_mach_notify_source, + _dispatch_mach_notify_source2); + dispatch_resume(_dispatch_mach_notify_source); } kern_return_t -_dispatch_mach_notify_send_once(mach_port_t notify __attribute__((unused))) +_dispatch_mach_notify_port_deleted(mach_port_t notify DISPATCH_UNUSED, + mach_port_name_t name) { - // we only register for dead-name notifications - // some code deallocated our send-once right without consuming it #if DISPATCH_DEBUG - _dispatch_log("Corruption: An app/library deleted a libdispatch dead-name notification"); + _dispatch_log("Corruption: Mach send/send-once/dead-name right 0x%x " + "deleted prematurely", name); #endif + + _dispatch_debug_machport(name); + _dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_DELETED, + _DISPATCH_MACH_SP_FLAGS, true); + return KERN_SUCCESS; } kern_return_t -_dispatch_mach_notify_dead_name(mach_port_t notify __attribute__((unused)), mach_port_name_t name) +_dispatch_mach_notify_dead_name(mach_port_t notify DISPATCH_UNUSED, + mach_port_name_t name) { - dispatch_source_t dsi; - dispatch_kevent_t dk; - struct kevent kev; kern_return_t kr; - dk = _dispatch_kevent_find(name, EVFILT_MACHPORT); - if (!dk) { - goto out; - } - - EV_SET(&kev, name, EVFILT_MACHPORT, EV_ADD|EV_ENABLE|EV_DISPATCH|EV_EOF, DISPATCH_MACHPORT_DEAD, 0, dk); - - TAILQ_FOREACH(dsi, &dk->dk_sources, ds_list) { - _dispatch_source_merge_kevent(dsi, &kev); - // this can never happen again - // this must happen after the merge - // this may be racy in the future, but we don't provide a 'setter' API for the mask yet - dsi->ds_pending_data_mask &= ~DISPATCH_MACHPORT_DEAD; - } - - // no more sources have this flag - dk->dk_kevent.fflags &= ~DISPATCH_MACHPORT_DEAD; +#if DISPATCH_DEBUG + _dispatch_log("machport[0x%08x]: dead-name notification: %s", + name, __func__); +#endif + _dispatch_debug_machport(name); + _dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_DEAD, + _DISPATCH_MACH_SP_FLAGS, true); -out: - // the act of receiving a dead name notification allocates a dead-name right that must be deallocated + // the act of receiving a dead name notification allocates a dead-name + // right that must be deallocated kr = mach_port_deallocate(mach_task_self(), name); DISPATCH_VERIFY_MIG(kr); - //dispatch_assume_zero(kr); + //(void)dispatch_assume_zero(kr); return KERN_SUCCESS; } kern_return_t -_dispatch_wakeup_main_thread(mach_port_t mp __attribute__((unused))) +_dispatch_mach_notify_send_possible(mach_port_t notify DISPATCH_UNUSED, + mach_port_name_t name) { - // dummy function just to pop out the main thread out of mach_msg() - return 0; -} +#if DISPATCH_DEBUG + _dispatch_log("machport[0x%08x]: send-possible notification: %s", + name, __func__); +#endif + _dispatch_debug_machport(name); + _dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_POSSIBLE, + _DISPATCH_MACH_SP_FLAGS, false); -kern_return_t -_dispatch_consume_send_once_right(mach_port_t mp __attribute__((unused))) -{ - // dummy function to consume a send-once right - return 0; + return KERN_SUCCESS; } mach_msg_return_t -dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, dispatch_mig_callback_t callback) +dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, + dispatch_mig_callback_t callback) { mach_msg_options_t options = MACH_RCV_MSG | MACH_RCV_TIMEOUT | MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_CTX) | MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0); - mach_msg_options_t tmp_options = options; + mach_msg_options_t tmp_options; mig_reply_error_t *bufTemp, *bufRequest, *bufReply; mach_msg_return_t kr = 0; - unsigned int cnt = 1000; // do not stall out serial queues + unsigned int cnt = 1000; // do not stall out serial queues int demux_success; - - maxmsgsz += MAX_TRAILER_SIZE; + bool received = false; + size_t rcv_size = maxmsgsz + MAX_TRAILER_SIZE; // XXX FIXME -- allocate these elsewhere - bufRequest = alloca(maxmsgsz); - bufReply = alloca(maxmsgsz); - bufReply->Head.msgh_size = 0; // make CLANG happy + bufRequest = alloca(rcv_size); + bufReply = alloca(rcv_size); + bufReply->Head.msgh_size = 0; // make CLANG happy + bufRequest->RetCode = 0; +#if DISPATCH_DEBUG + options |= MACH_RCV_LARGE; // rdar://problem/8422992 +#endif + tmp_options = options; // XXX FIXME -- change this to not starve out the target queue for (;;) { if (DISPATCH_OBJECT_SUSPENDED(ds) || (--cnt == 0)) { @@ -1932,9 +1689,8 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, dispatch_mig_callback break; } } - kr = mach_msg(&bufReply->Head, tmp_options, bufReply->Head.msgh_size, - (mach_msg_size_t)maxmsgsz, (mach_port_t)ds->ds_ident_hack, 0, 0); + (mach_msg_size_t)rcv_size, (mach_port_t)ds->ds_ident_hack, 0,0); tmp_options = options; @@ -1947,10 +1703,43 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, dispatch_mig_callback } break; case MACH_RCV_TIMED_OUT: + // Don't return an error if a message was sent this time or + // a message was successfully received previously + // rdar://problems/7363620&7791738 + if(bufReply->Head.msgh_remote_port || received) { + kr = MACH_MSG_SUCCESS; + } + break; case MACH_RCV_INVALID_NAME: break; +#if DISPATCH_DEBUG + case MACH_RCV_TOO_LARGE: + // receive messages that are too large and log their id and size + // rdar://problem/8422992 + tmp_options &= ~MACH_RCV_LARGE; + size_t large_size = bufReply->Head.msgh_size + MAX_TRAILER_SIZE; + void *large_buf = malloc(large_size); + if (large_buf) { + rcv_size = large_size; + bufReply = large_buf; + } + if (!mach_msg(&bufReply->Head, tmp_options, 0, + (mach_msg_size_t)rcv_size, + (mach_port_t)ds->ds_ident_hack, 0, 0)) { + _dispatch_log("BUG in libdispatch client: " + "dispatch_mig_server received message larger than " + "requested size %zd: id = 0x%x, size = %d", + maxmsgsz, bufReply->Head.msgh_id, + bufReply->Head.msgh_size); + } + if (large_buf) { + free(large_buf); + } + // fall through +#endif default: - dispatch_assume_zero(kr); + _dispatch_bug_mach_client( + "dispatch_mig_server: mach_msg() failed", kr); break; } break; @@ -1959,6 +1748,7 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, dispatch_mig_callback if (!(tmp_options & MACH_RCV_MSG)) { break; } + received = true; bufTemp = bufRequest; bufRequest = bufReply; @@ -1971,7 +1761,8 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, dispatch_mig_callback bufRequest->Head.msgh_remote_port = 0; mach_msg_destroy(&bufRequest->Head); } else if (!(bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX)) { - // if MACH_MSGH_BITS_COMPLEX is _not_ set, then bufReply->RetCode is present + // if MACH_MSGH_BITS_COMPLEX is _not_ set, then bufReply->RetCode + // is present if (slowpath(bufReply->RetCode)) { if (bufReply->RetCode == MIG_NO_REPLY) { continue; @@ -1985,7 +1776,8 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, dispatch_mig_callback if (bufReply->Head.msgh_remote_port) { tmp_options |= MACH_SEND_MSG; - if (MACH_MSGH_BITS_REMOTE(bufReply->Head.msgh_bits) != MACH_MSG_TYPE_MOVE_SEND_ONCE) { + if (MACH_MSGH_BITS_REMOTE(bufReply->Head.msgh_bits) != + MACH_MSG_TYPE_MOVE_SEND_ONCE) { tmp_options |= MACH_SEND_TIMEOUT; } } @@ -1993,3 +1785,329 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, dispatch_mig_callback return kr; } + +#endif /* HAVE_MACH */ + +#pragma mark - +#pragma mark dispatch_source_debug + +DISPATCH_NOINLINE +static const char * +_evfiltstr(short filt) +{ + switch (filt) { +#define _evfilt2(f) case (f): return #f + _evfilt2(EVFILT_READ); + _evfilt2(EVFILT_WRITE); + _evfilt2(EVFILT_AIO); + _evfilt2(EVFILT_VNODE); + _evfilt2(EVFILT_PROC); + _evfilt2(EVFILT_SIGNAL); + _evfilt2(EVFILT_TIMER); +#ifdef EVFILT_VM + _evfilt2(EVFILT_VM); +#endif +#if HAVE_MACH + _evfilt2(EVFILT_MACHPORT); +#endif + _evfilt2(EVFILT_FS); + _evfilt2(EVFILT_USER); + + _evfilt2(DISPATCH_EVFILT_TIMER); + _evfilt2(DISPATCH_EVFILT_CUSTOM_ADD); + _evfilt2(DISPATCH_EVFILT_CUSTOM_OR); + default: + return "EVFILT_missing"; + } +} + +static size_t +_dispatch_source_debug_attr(dispatch_source_t ds, char* buf, size_t bufsiz) +{ + dispatch_queue_t target = ds->do_targetq; + return snprintf(buf, bufsiz, "target = %s[%p], pending_data = 0x%lx, " + "pending_data_mask = 0x%lx, ", + target ? target->dq_label : "", target, + ds->ds_pending_data, ds->ds_pending_data_mask); +} + +static size_t +_dispatch_timer_debug_attr(dispatch_source_t ds, char* buf, size_t bufsiz) +{ + dispatch_source_refs_t dr = ds->ds_refs; + return snprintf(buf, bufsiz, "timer = { target = 0x%llx, " + "last_fire = 0x%llx, interval = 0x%llx, flags = 0x%llx }, ", + ds_timer(dr).target, ds_timer(dr).last_fire, ds_timer(dr).interval, + ds_timer(dr).flags); +} + +static size_t +_dispatch_source_debug(dispatch_source_t ds, char* buf, size_t bufsiz) +{ + size_t offset = 0; + offset += snprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", + dx_kind(ds), ds); + offset += _dispatch_object_debug_attr(ds, &buf[offset], bufsiz - offset); + offset += _dispatch_source_debug_attr(ds, &buf[offset], bufsiz - offset); + if (ds->ds_is_timer) { + offset += _dispatch_timer_debug_attr(ds, &buf[offset], bufsiz - offset); + } + return offset; +} + +static size_t +_dispatch_source_kevent_debug(dispatch_source_t ds, char* buf, size_t bufsiz) +{ + size_t offset = _dispatch_source_debug(ds, buf, bufsiz); + offset += snprintf(&buf[offset], bufsiz - offset, "filter = %s }", + ds->ds_dkev ? _evfiltstr(ds->ds_dkev->dk_kevent.filter) : "????"); + return offset; +} + +#if DISPATCH_DEBUG +void +dispatch_debug_kevents(struct kevent* kev, size_t count, const char* str) +{ + size_t i; + for (i = 0; i < count; ++i) { + _dispatch_log("kevent[%lu] = { ident = %p, filter = %s, flags = 0x%x, " + "fflags = 0x%x, data = %p, udata = %p }: %s", + i, (void*)kev[i].ident, _evfiltstr(kev[i].filter), kev[i].flags, + kev[i].fflags, (void*)kev[i].data, (void*)kev[i].udata, str); + } +} + +static void +_dispatch_kevent_debugger2(void *context) +{ + struct sockaddr sa; + socklen_t sa_len = sizeof(sa); + int c, fd = (int)(long)context; + unsigned int i; + dispatch_kevent_t dk; + dispatch_source_t ds; + dispatch_source_refs_t dr; + FILE *debug_stream; + + c = accept(fd, &sa, &sa_len); + if (c == -1) { + if (errno != EAGAIN) { + (void)dispatch_assume_zero(errno); + } + return; + } +#if 0 + int r = fcntl(c, F_SETFL, 0); // disable non-blocking IO + if (r == -1) { + (void)dispatch_assume_zero(errno); + } +#endif + debug_stream = fdopen(c, "a"); + if (!dispatch_assume(debug_stream)) { + close(c); + return; + } + + fprintf(debug_stream, "HTTP/1.0 200 OK\r\n"); + fprintf(debug_stream, "Content-type: text/html\r\n"); + fprintf(debug_stream, "Pragma: nocache\r\n"); + fprintf(debug_stream, "\r\n"); + fprintf(debug_stream, "\n"); + fprintf(debug_stream, "PID %u\n", getpid()); + fprintf(debug_stream, "\n
    \n"); + + //fprintf(debug_stream, "DKDKDKDK" + // "DKDKDK\n"); + + for (i = 0; i < DSL_HASH_SIZE; i++) { + if (TAILQ_EMPTY(&_dispatch_sources[i])) { + continue; + } + TAILQ_FOREACH(dk, &_dispatch_sources[i], dk_list) { + fprintf(debug_stream, "\t
  • DK %p ident %lu filter %s flags " + "0x%hx fflags 0x%x data 0x%lx udata %p\n", + dk, (unsigned long)dk->dk_kevent.ident, + _evfiltstr(dk->dk_kevent.filter), dk->dk_kevent.flags, + dk->dk_kevent.fflags, (unsigned long)dk->dk_kevent.data, + dk->dk_kevent.udata); + fprintf(debug_stream, "\t\t
      \n"); + TAILQ_FOREACH(dr, &dk->dk_sources, dr_list) { + ds = _dispatch_source_from_refs(dr); + fprintf(debug_stream, "\t\t\t
    • DS %p refcnt 0x%x suspend " + "0x%x data 0x%lx mask 0x%lx flags 0x%x
    • \n", + ds, ds->do_ref_cnt, ds->do_suspend_cnt, + ds->ds_pending_data, ds->ds_pending_data_mask, + ds->ds_atomic_flags); + if (ds->do_suspend_cnt == DISPATCH_OBJECT_SUSPEND_LOCK) { + dispatch_queue_t dq = ds->do_targetq; + fprintf(debug_stream, "\t\t
      DQ: %p refcnt 0x%x suspend " + "0x%x label: %s\n", dq, dq->do_ref_cnt, + dq->do_suspend_cnt, dq->dq_label); + } + } + fprintf(debug_stream, "\t\t
    \n"); + fprintf(debug_stream, "\t
  • \n"); + } + } + fprintf(debug_stream, "
\n\n\n"); + fflush(debug_stream); + fclose(debug_stream); +} + +static void +_dispatch_kevent_debugger2_cancel(void *context) +{ + int ret, fd = (int)(long)context; + + ret = close(fd); + if (ret != -1) { + (void)dispatch_assume_zero(errno); + } +} + +static void +_dispatch_kevent_debugger(void *context DISPATCH_UNUSED) +{ + union { + struct sockaddr_in sa_in; + struct sockaddr sa; + } sa_u = { + .sa_in = { + .sin_family = AF_INET, + .sin_addr = { htonl(INADDR_LOOPBACK), }, + }, + }; + dispatch_source_t ds; + const char *valstr; + int val, r, fd, sock_opt = 1; + socklen_t slen = sizeof(sa_u); + + if (issetugid()) { + return; + } + valstr = getenv("LIBDISPATCH_DEBUGGER"); + if (!valstr) { + return; + } + val = atoi(valstr); + if (val == 2) { + sa_u.sa_in.sin_addr.s_addr = 0; + } + fd = socket(PF_INET, SOCK_STREAM, 0); + if (fd == -1) { + (void)dispatch_assume_zero(errno); + return; + } + r = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (void *)&sock_opt, + (socklen_t) sizeof sock_opt); + if (r == -1) { + (void)dispatch_assume_zero(errno); + goto out_bad; + } +#if 0 + r = fcntl(fd, F_SETFL, O_NONBLOCK); + if (r == -1) { + (void)dispatch_assume_zero(errno); + goto out_bad; + } +#endif + r = bind(fd, &sa_u.sa, sizeof(sa_u)); + if (r == -1) { + (void)dispatch_assume_zero(errno); + goto out_bad; + } + r = listen(fd, SOMAXCONN); + if (r == -1) { + (void)dispatch_assume_zero(errno); + goto out_bad; + } + r = getsockname(fd, &sa_u.sa, &slen); + if (r == -1) { + (void)dispatch_assume_zero(errno); + goto out_bad; + } + + ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, fd, 0, + &_dispatch_mgr_q); + if (dispatch_assume(ds)) { + _dispatch_log("LIBDISPATCH: debug port: %hu", + (in_port_t)ntohs(sa_u.sa_in.sin_port)); + + /* ownership of fd transfers to ds */ + dispatch_set_context(ds, (void *)(long)fd); + dispatch_source_set_event_handler_f(ds, _dispatch_kevent_debugger2); + dispatch_source_set_cancel_handler_f(ds, + _dispatch_kevent_debugger2_cancel); + dispatch_resume(ds); + + return; + } +out_bad: + close(fd); +} + +#if HAVE_MACH + +#ifndef MACH_PORT_TYPE_SPREQUEST +#define MACH_PORT_TYPE_SPREQUEST 0x40000000 +#endif + +void +dispatch_debug_machport(mach_port_t name, const char* str) +{ + mach_port_type_t type; + mach_msg_bits_t ns = 0, nr = 0, nso = 0, nd = 0; + unsigned int dnreqs = 0, dnrsiz; + kern_return_t kr = mach_port_type(mach_task_self(), name, &type); + if (kr) { + _dispatch_log("machport[0x%08x] = { error(0x%x) \"%s\" }: %s", name, + kr, mach_error_string(kr), str); + return; + } + if (type & MACH_PORT_TYPE_SEND) { + (void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name, + MACH_PORT_RIGHT_SEND, &ns)); + } + if (type & MACH_PORT_TYPE_SEND_ONCE) { + (void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name, + MACH_PORT_RIGHT_SEND_ONCE, &nso)); + } + if (type & MACH_PORT_TYPE_DEAD_NAME) { + (void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name, + MACH_PORT_RIGHT_DEAD_NAME, &nd)); + } + if (type & (MACH_PORT_TYPE_RECEIVE|MACH_PORT_TYPE_SEND| + MACH_PORT_TYPE_SEND_ONCE)) { + (void)dispatch_assume_zero(mach_port_dnrequest_info(mach_task_self(), + name, &dnrsiz, &dnreqs)); + } + if (type & MACH_PORT_TYPE_RECEIVE) { + mach_port_status_t status = { .mps_pset = 0, }; + mach_msg_type_number_t cnt = MACH_PORT_RECEIVE_STATUS_COUNT; + (void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name, + MACH_PORT_RIGHT_RECEIVE, &nr)); + (void)dispatch_assume_zero(mach_port_get_attributes(mach_task_self(), + name, MACH_PORT_RECEIVE_STATUS, (void*)&status, &cnt)); + _dispatch_log("machport[0x%08x] = { R(%03u) S(%03u) SO(%03u) D(%03u) " + "dnreqs(%03u) spreq(%s) nsreq(%s) pdreq(%s) srights(%s) " + "sorights(%03u) qlim(%03u) msgcount(%03u) mkscount(%03u) " + "seqno(%03u) }: %s", name, nr, ns, nso, nd, dnreqs, + type & MACH_PORT_TYPE_SPREQUEST ? "Y":"N", + status.mps_nsrequest ? "Y":"N", status.mps_pdrequest ? "Y":"N", + status.mps_srights ? "Y":"N", status.mps_sorights, + status.mps_qlimit, status.mps_msgcount, status.mps_mscount, + status.mps_seqno, str); + } else if (type & (MACH_PORT_TYPE_SEND|MACH_PORT_TYPE_SEND_ONCE| + MACH_PORT_TYPE_DEAD_NAME)) { + _dispatch_log("machport[0x%08x] = { R(%03u) S(%03u) SO(%03u) D(%03u) " + "dnreqs(%03u) spreq(%s) }: %s", name, nr, ns, nso, nd, dnreqs, + type & MACH_PORT_TYPE_SPREQUEST ? "Y":"N", str); + } else { + _dispatch_log("machport[0x%08x] = { type(0x%08x) }: %s", name, type, + str); + } +} + +#endif // HAVE_MACH + +#endif // DISPATCH_DEBUG diff --git a/src/source_internal.h b/src/source_internal.h index e7126dbb3..a44eef7c1 100644 --- a/src/source_internal.h +++ b/src/source_internal.h @@ -1,20 +1,20 @@ /* - * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * Copyright (c) 2008-2011 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ - * + * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * + * * @APPLE_APACHE_LICENSE_HEADER_END@ */ @@ -32,6 +32,45 @@ #include // for HeaderDoc #endif +// NOTE: dispatch_source_mach_send_flags_t and dispatch_source_mach_recv_flags_t +// bit values must not overlap as they share the same kevent fflags ! + +/*! + * @enum dispatch_source_mach_send_flags_t + * + * @constant DISPATCH_MACH_SEND_DELETED + * Port-deleted notification. Disabled for source registration. + */ +enum { + DISPATCH_MACH_SEND_DELETED = 0x4, +}; +/*! + * @enum dispatch_source_mach_recv_flags_t + * + * @constant DISPATCH_MACH_RECV_MESSAGE + * Receive right has pending messages + * + * @constant DISPATCH_MACH_RECV_NO_SENDERS + * Receive right has no more senders. TODO + */ +enum { + DISPATCH_MACH_RECV_MESSAGE = 0x2, + DISPATCH_MACH_RECV_NO_SENDERS = 0x10, +}; + +enum { + DISPATCH_TIMER_WALL_CLOCK = 0x4, +}; + +#define DISPATCH_EVFILT_TIMER (-EVFILT_SYSCOUNT - 1) +#define DISPATCH_EVFILT_CUSTOM_ADD (-EVFILT_SYSCOUNT - 2) +#define DISPATCH_EVFILT_CUSTOM_OR (-EVFILT_SYSCOUNT - 3) +#define DISPATCH_EVFILT_SYSCOUNT ( EVFILT_SYSCOUNT + 3) + +#define DISPATCH_TIMER_INDEX_WALL 0 +#define DISPATCH_TIMER_INDEX_MACH 1 +#define DISPATCH_TIMER_INDEX_DISARM 2 + struct dispatch_source_vtable_s { DISPATCH_VTABLE_HEADER(dispatch_source_s); }; @@ -40,21 +79,56 @@ extern const struct dispatch_source_vtable_s _dispatch_source_kevent_vtable; struct dispatch_kevent_s { TAILQ_ENTRY(dispatch_kevent_s) dk_list; - TAILQ_HEAD(, dispatch_source_s) dk_sources; + TAILQ_HEAD(, dispatch_source_refs_s) dk_sources; struct kevent dk_kevent; }; typedef struct dispatch_kevent_s *dispatch_kevent_t; +struct dispatch_source_type_s { + struct kevent ke; + uint64_t mask; + void (*init)(dispatch_source_t ds, dispatch_source_type_t type, + uintptr_t handle, unsigned long mask, dispatch_queue_t q); +}; + struct dispatch_timer_source_s { uint64_t target; - uint64_t start; + uint64_t last_fire; uint64_t interval; uint64_t leeway; uint64_t flags; // dispatch_timer_flags_t + unsigned long missed; +}; + +// Source state which may contain references to the source object +// Separately allocated so that 'leaks' can see sources +struct dispatch_source_refs_s { + TAILQ_ENTRY(dispatch_source_refs_s) dr_list; + uintptr_t dr_source_wref; // "weak" backref to dispatch_source_t + dispatch_function_t ds_handler_func; + void *ds_handler_ctxt; + void *ds_cancel_handler; + void *ds_registration_handler; }; +typedef struct dispatch_source_refs_s *dispatch_source_refs_t; + +struct dispatch_timer_source_refs_s { + struct dispatch_source_refs_s _ds_refs; + struct dispatch_timer_source_s _ds_timer; +}; + +#define _dispatch_ptr2wref(ptr) (~(uintptr_t)(ptr)) +#define _dispatch_wref2ptr(ref) ((void*)~(ref)) +#define _dispatch_source_from_refs(dr) \ + ((dispatch_source_t)_dispatch_wref2ptr((dr)->dr_source_wref)) +#define ds_timer(dr) \ + (((struct dispatch_timer_source_refs_s *)(dr))->_ds_timer) + +// ds_atomic_flags bits #define DSF_CANCELED 1u // cancellation has been requested +#define DSF_ARMED 2u // source is armed struct dispatch_source_s { DISPATCH_STRUCT_HEADER(dispatch_source_s, dispatch_source_vtable_s); @@ -66,37 +140,26 @@ struct dispatch_source_s { struct { char dq_label[8]; dispatch_kevent_t ds_dkev; - - dispatch_source_handler_function_t ds_handler_func; - void *ds_handler_ctxt; - - void *ds_cancel_handler; - - unsigned int ds_is_level:1, - ds_is_adder:1, - ds_is_installed:1, - ds_needs_rearm:1, - ds_is_armed:1, - ds_is_legacy:1, - ds_cancel_is_block:1, - ds_handler_is_block:1; - + dispatch_source_refs_t ds_refs; unsigned int ds_atomic_flags; - + unsigned int + ds_is_level:1, + ds_is_adder:1, + ds_is_installed:1, + ds_needs_rearm:1, + ds_is_timer:1, + ds_cancel_is_block:1, + ds_handler_is_block:1, + ds_registration_is_block:1; unsigned long ds_data; unsigned long ds_pending_data; unsigned long ds_pending_data_mask; - - TAILQ_ENTRY(dispatch_source_s) ds_list; - unsigned long ds_ident_hack; - - struct dispatch_timer_source_s ds_timer; }; }; }; - -void _dispatch_source_legacy_xref_release(dispatch_source_t ds); +void _dispatch_source_xref_release(dispatch_source_t ds); +void _dispatch_mach_notify_source_init(void *context); #endif /* __DISPATCH_SOURCE_INTERNAL__ */ diff --git a/src/time.c b/src/time.c index 07506f256..4c0285ad9 100644 --- a/src/time.c +++ b/src/time.c @@ -1,26 +1,26 @@ /* - * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * Copyright (c) 2008-2010 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ - * + * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 - * + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * + * * @APPLE_APACHE_LICENSE_HEADER_END@ */ #include "internal.h" -uint64_t +uint64_t _dispatch_get_nanoseconds(void) { struct timeval now; @@ -31,77 +31,17 @@ _dispatch_get_nanoseconds(void) return now.tv_sec * NSEC_PER_SEC + now.tv_usec * NSEC_PER_USEC; } -#if defined(__i386__) || defined(__x86_64__) -// x86 currently implements mach time in nanoseconds; this is NOT likely to change -#define _dispatch_time_mach2nano(x) (x) -#define _dispatch_time_nano2mach(x) (x) -#else -static struct _dispatch_host_time_data_s { - mach_timebase_info_data_t tbi; - uint64_t safe_numer_math; - dispatch_once_t pred; -} _dispatch_host_time_data; - -static void -_dispatch_get_host_time_init(void *context __attribute__((unused))) -{ - dispatch_assume_zero(mach_timebase_info(&_dispatch_host_time_data.tbi)); - _dispatch_host_time_data.safe_numer_math = DISPATCH_TIME_FOREVER / _dispatch_host_time_data.tbi.numer; -} +#if !(defined(__i386__) || defined(__x86_64__) || !HAVE_MACH_ABSOLUTE_TIME) +DISPATCH_CACHELINE_ALIGN _dispatch_host_time_data_s _dispatch_host_time_data; -static uint64_t -_dispatch_time_mach2nano(uint64_t nsec) +void +_dispatch_get_host_time_init(void *context DISPATCH_UNUSED) { - struct _dispatch_host_time_data_s *const data = &_dispatch_host_time_data; - uint64_t small_tmp = nsec; -#ifdef __LP64__ - __uint128_t big_tmp = nsec; -#else - long double big_tmp = nsec; -#endif - - dispatch_once_f(&data->pred, NULL, _dispatch_get_host_time_init); - - if (slowpath(data->tbi.numer != data->tbi.denom)) { - if (nsec < data->safe_numer_math) { - small_tmp *= data->tbi.numer; - small_tmp /= data->tbi.denom; - } else { - big_tmp *= data->tbi.numer; - big_tmp /= data->tbi.denom; - small_tmp = big_tmp; - } - } - return small_tmp; -} - -static int64_t -_dispatch_time_nano2mach(int64_t nsec) -{ - struct _dispatch_host_time_data_s *const data = &_dispatch_host_time_data; -#ifdef __LP64__ - __int128_t big_tmp = nsec; -#else - long double big_tmp = nsec; -#endif - - dispatch_once_f(&data->pred, NULL, _dispatch_get_host_time_init); - - if (fastpath(data->tbi.numer == data->tbi.denom)) { - return nsec; - } - - // Multiply by the inverse to convert nsec to Mach absolute time - big_tmp *= data->tbi.denom; - big_tmp /= data->tbi.numer; - - if (big_tmp > INT64_MAX) { - return INT64_MAX; - } - if (big_tmp < INT64_MIN) { - return INT64_MIN; - } - return big_tmp; + mach_timebase_info_data_t tbi; + (void)dispatch_assume_zero(mach_timebase_info(&tbi)); + _dispatch_host_time_data.frac = tbi.numer; + _dispatch_host_time_data.frac /= tbi.denom; + _dispatch_host_time_data.ratio_1_to_1 = (tbi.numer == tbi.denom); } #endif @@ -115,29 +55,29 @@ dispatch_time(dispatch_time_t inval, int64_t delta) // wall clock if (delta >= 0) { if ((int64_t)(inval -= delta) >= 0) { - return DISPATCH_TIME_FOREVER; // overflow + return DISPATCH_TIME_FOREVER; // overflow } return inval; } if ((int64_t)(inval -= delta) >= -1) { // -1 is special == DISPATCH_TIME_FOREVER == forever - return -2; // underflow + return -2; // underflow } return inval; } // mach clock delta = _dispatch_time_nano2mach(delta); - if (inval == 0) { - inval = mach_absolute_time(); + if (inval == 0) { + inval = _dispatch_absolute_time(); } if (delta >= 0) { if ((int64_t)(inval += delta) <= 0) { - return DISPATCH_TIME_FOREVER; // overflow + return DISPATCH_TIME_FOREVER; // overflow } return inval; } if ((int64_t)(inval += delta) < 1) { - return 1; // underflow + return 1; // underflow } return inval; } @@ -146,7 +86,7 @@ dispatch_time_t dispatch_walltime(const struct timespec *inval, int64_t delta) { int64_t nsec; - + if (inval) { nsec = inval->tv_sec * 1000000000ull + inval->tv_nsec; } else { @@ -178,6 +118,6 @@ _dispatch_timeout(dispatch_time_t when) now = _dispatch_get_nanoseconds(); return now >= when ? 0 : when - now; } - now = mach_absolute_time(); + now = _dispatch_absolute_time(); return now >= when ? 0 : _dispatch_time_mach2nano(when - now); } diff --git a/src/trace.h b/src/trace.h new file mode 100644 index 000000000..0d9bc3dc5 --- /dev/null +++ b/src/trace.h @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2010-2011 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_TRACE__ +#define __DISPATCH_TRACE__ + +#if DISPATCH_USE_DTRACE + +#include "provider.h" + +#define _dispatch_trace_callout(_c, _f, _dcc) do { \ + if (slowpath(DISPATCH_CALLOUT_ENTRY_ENABLED()) || \ + slowpath(DISPATCH_CALLOUT_RETURN_ENABLED())) { \ + dispatch_queue_t _dq = _dispatch_queue_get_current(); \ + char *_label = _dq ? _dq->dq_label : ""; \ + dispatch_function_t _func = (dispatch_function_t)(_f); \ + void *_ctxt = (_c); \ + DISPATCH_CALLOUT_ENTRY(_dq, _label, _func, _ctxt); \ + _dcc; \ + DISPATCH_CALLOUT_RETURN(_dq, _label, _func, _ctxt); \ + return; \ + } \ + return _dcc; \ + } while (0) + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_trace_client_callout(void *ctxt, dispatch_function_t f) +{ + _dispatch_trace_callout(ctxt, f == _dispatch_call_block_and_release && + ctxt ? ((struct Block_basic *)ctxt)->Block_invoke : f, + _dispatch_client_callout(ctxt, f)); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_trace_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) +{ + _dispatch_trace_callout(ctxt, f, _dispatch_client_callout2(ctxt, i, f)); +} + +#ifdef __BLOCKS__ +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_trace_client_callout_block(dispatch_block_t b) +{ + struct Block_basic *bb = (void*)b; + _dispatch_trace_callout(b, bb->Block_invoke, + _dispatch_client_callout(b, (dispatch_function_t)bb->Block_invoke)); +} +#endif + +#define _dispatch_client_callout _dispatch_trace_client_callout +#define _dispatch_client_callout2 _dispatch_trace_client_callout2 +#define _dispatch_client_callout_block _dispatch_trace_client_callout_block + +#define _dispatch_trace_continuation(_q, _o, _t) do { \ + dispatch_queue_t _dq = (_q); \ + char *_label = _dq ? _dq->dq_label : ""; \ + struct dispatch_object_s *_do = (_o); \ + char *_kind; \ + dispatch_function_t _func; \ + void *_ctxt; \ + if (DISPATCH_OBJ_IS_VTABLE(_do)) { \ + _ctxt = _do->do_ctxt; \ + _kind = (char*)dx_kind(_do); \ + if (dx_type(_do) == DISPATCH_SOURCE_KEVENT_TYPE && \ + (_dq) != &_dispatch_mgr_q) { \ + _func = ((dispatch_source_t)_do)->ds_refs->ds_handler_func; \ + } else { \ + _func = (dispatch_function_t)_dispatch_queue_invoke; \ + } \ + } else { \ + struct dispatch_continuation_s *_dc = (void*)(_do); \ + _ctxt = _dc->dc_ctxt; \ + if ((long)_dc->do_vtable & DISPATCH_OBJ_SYNC_SLOW_BIT) { \ + _kind = "semaphore"; \ + _func = (dispatch_function_t)dispatch_semaphore_signal; \ + } else if (_dc->dc_func == _dispatch_call_block_and_release) { \ + _kind = "block"; \ + _func = ((struct Block_basic *)_dc->dc_ctxt)->Block_invoke;\ + } else { \ + _kind = "function"; \ + _func = _dc->dc_func; \ + } \ + } \ + _t(_dq, _label, _do, _kind, _func, _ctxt); \ + } while (0) + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_trace_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head, + dispatch_object_t _tail) +{ + if (slowpath(DISPATCH_QUEUE_PUSH_ENABLED())) { + struct dispatch_object_s *dou = _head._do; + do { + _dispatch_trace_continuation(dq, dou, DISPATCH_QUEUE_PUSH); + } while (dou != _tail._do && (dou = dou->do_next)); + } + _dispatch_queue_push_list(dq, _head, _tail); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_push_notrace(dispatch_queue_t dq, dispatch_object_t dou) +{ + _dispatch_queue_push_list(dq, dou, dou); +} + +#define _dispatch_queue_push_list _dispatch_trace_queue_push_list + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_trace_continuation_pop(dispatch_queue_t dq, + dispatch_object_t dou) +{ + if (slowpath(DISPATCH_QUEUE_POP_ENABLED())) { + _dispatch_trace_continuation(dq, dou._do, DISPATCH_QUEUE_POP); + } +} +#else + +#define _dispatch_queue_push_notrace _dispatch_queue_push +#define _dispatch_trace_continuation_pop(dq, dou) + +#endif // DISPATCH_USE_DTRACE + +#endif // __DISPATCH_TRACE__ diff --git a/tools/dispatch_trace.d b/tools/dispatch_trace.d new file mode 100755 index 000000000..9059e4ed2 --- /dev/null +++ b/tools/dispatch_trace.d @@ -0,0 +1,76 @@ +#!/usr/sbin/dtrace -Z -s + +/* + * Copyright (c) 2010-2011 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * Usage: dispatch_dtrace.d -p [pid] + * traced process must have been executed with + * DYLD_IMAGE_SUFFIX=_profile or DYLD_IMAGE_SUFFIX=_debug + */ + +#pragma D option quiet +#pragma D option bufsize=16m + +BEGIN { + printf("%-8s %-3s %-8s %-35s%-15s%-?s %-43s%-?s %-14s%-?s %s\n", + "Time us", "CPU", "Thread", "Function", "Probe", "Queue", "Label", + "Item", "Kind", "Context", "Symbol"); +} + +dispatch$target:libdispatch_profile.dylib::queue-push, +dispatch$target:libdispatch_debug.dylib::queue-push, +dispatch$target:libdispatch_profile.dylib::queue-pop, +dispatch$target:libdispatch_debug.dylib::queue-pop, +dispatch$target:libdispatch_profile.dylib::callout-entry, +dispatch$target:libdispatch_debug.dylib::callout-entry, +dispatch$target:libdispatch_profile.dylib::callout-return, +dispatch$target:libdispatch_debug.dylib::callout-return /!start/ { + start = walltimestamp; +} + +/* probe queue-push/-pop(dispatch_queue_t queue, const char *label, + * dispatch_object_t item, const char *kind, + * dispatch_function_t function, void *context) + */ +dispatch$target:libdispatch_profile.dylib::queue-push, +dispatch$target:libdispatch_debug.dylib::queue-push, +dispatch$target:libdispatch_profile.dylib::queue-pop, +dispatch$target:libdispatch_debug.dylib::queue-pop { + printf("%-8d %-3d 0x%08p %-35s%-15s0x%0?p %-43s0x%0?p %-14s0x%0?p", + (walltimestamp-start)/1000, cpu, tid, probefunc, probename, arg0, + copyinstr(arg1, 42), arg2, copyinstr(arg3, 13), arg5); + usym(arg4); + printf("\n"); +} + +/* probe callout-entry/-return(dispatch_queue_t queue, const char *label, + * dispatch_function_t function, void *context) + */ +dispatch$target:libdispatch_profile.dylib::callout-entry, +dispatch$target:libdispatch_debug.dylib::callout-entry, +dispatch$target:libdispatch_profile.dylib::callout-return, +dispatch$target:libdispatch_debug.dylib::callout-return { + printf("%-8d %-3d 0x%08p %-35s%-15s0x%0?p %-43s%-?s %-14s0x%0?p", + (walltimestamp-start)/1000, cpu, tid, probefunc, probename, arg0, + copyinstr(arg1, 42), "", "", arg3); + usym(arg2); + printf("\n"); +} diff --git a/xcodeconfig/libdispatch-resolved.xcconfig b/xcodeconfig/libdispatch-resolved.xcconfig new file mode 100644 index 000000000..70e405f84 --- /dev/null +++ b/xcodeconfig/libdispatch-resolved.xcconfig @@ -0,0 +1,25 @@ +// +// Copyright (c) 2010-2011 Apple Inc. All rights reserved. +// +// @APPLE_APACHE_LICENSE_HEADER_START@ +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// @APPLE_APACHE_LICENSE_HEADER_END@ +// + +SUPPORTED_PLATFORMS = iphoneos +PRODUCT_NAME = libdispatch_$(DISPATCH_RESOLVED_VARIANT) +OTHER_LDFLAGS = +SKIP_INSTALL = YES +VERSIONING_SYSTEM = diff --git a/xcodeconfig/libdispatch-resolver.xcconfig b/xcodeconfig/libdispatch-resolver.xcconfig new file mode 100644 index 000000000..d8abe3dea --- /dev/null +++ b/xcodeconfig/libdispatch-resolver.xcconfig @@ -0,0 +1,20 @@ +// +// Copyright (c) 2010-2011 Apple Inc. All rights reserved. +// +// @APPLE_APACHE_LICENSE_HEADER_START@ +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// @APPLE_APACHE_LICENSE_HEADER_END@ +// + diff --git a/xcodeconfig/libdispatch.xcconfig b/xcodeconfig/libdispatch.xcconfig new file mode 100644 index 000000000..e7d44f4f6 --- /dev/null +++ b/xcodeconfig/libdispatch.xcconfig @@ -0,0 +1,67 @@ +// +// Copyright (c) 2010-2011 Apple Inc. All rights reserved. +// +// @APPLE_APACHE_LICENSE_HEADER_START@ +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// @APPLE_APACHE_LICENSE_HEADER_END@ +// + +#include "/Makefiles/CoreOS/Xcode/BSD.xcconfig" +SUPPORTED_PLATFORMS = macosx iphoneos iphonesimulator +ARCHS[sdk=iphonesimulator*] = $(NATIVE_ARCH_32_BIT) // Override BSD.xcconfig ARCHS +PRODUCT_NAME = libdispatch +PRODUCT_NAME[sdk=iphonesimulator*] = libdispatch_sim +EXECUTABLE_PREFIX = +LD_DYLIB_INSTALL_NAME = /usr/lib/system/$(EXECUTABLE_NAME) +INSTALL_PATH = /usr/lib/system +INSTALL_PATH[sdk=iphonesimulator*] = $(SDKROOT)/usr/lib/system +PUBLIC_HEADERS_FOLDER_PATH = /usr/include/dispatch +PUBLIC_HEADERS_FOLDER_PATH[sdk=iphonesimulator*] = $(SDKROOT)/usr/include/dispatch +PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/dispatch +PRIVATE_HEADERS_FOLDER_PATH[sdk=iphonesimulator*] = $(SDKROOT)/usr/local/include/dispatch +HEADER_SEARCH_PATHS = $(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders $(PROJECT_DIR) +INSTALLHDRS_SCRIPT_PHASE = YES +ALWAYS_SEARCH_USER_PATHS = NO +BUILD_VARIANTS = normal debug profile +ONLY_ACTIVE_ARCH = NO +GCC_VERSION = com.apple.compilers.llvm.clang.1_0 +GCC_STRICT_ALIASING = YES +GCC_SYMBOLS_PRIVATE_EXTERN = YES +GCC_CW_ASM_SYNTAX = NO +GCC_ENABLE_CPP_EXCEPTIONS = NO +GCC_ENABLE_CPP_RTTI = NO +GCC_ENABLE_OBJC_EXCEPTIONS = NO +GCC_ENABLE_PASCAL_STRINGS = NO +GCC_WARN_SHADOW = YES +GCC_WARN_64_TO_32_BIT_CONVERSION = YES +GCC_WARN_ABOUT_RETURN_TYPE = YES +GCC_WARN_ABOUT_MISSING_PROTOTYPES = YES +GCC_WARN_ABOUT_MISSING_NEWLINE = YES +GCC_WARN_UNUSED_VARIABLE = YES +GCC_TREAT_WARNINGS_AS_ERRORS = YES +GCC_OPTIMIZATION_LEVEL = s +GCC_THUMB_SUPPORT[arch=armv6] = NO +GCC_PREPROCESSOR_DEFINITIONS = __DARWIN_NON_CANCELABLE=1 +GCC_PREPROCESSOR_DEFINITIONS[sdk=iphonesimulator*] = $(GCC_PREPROCESSOR_DEFINITIONS) USE_LIBDISPATCH_INIT_CONSTRUCTOR=1 DISPATCH_USE_PTHREAD_ATFORK=1 DISPATCH_USE_DIRECT_TSD=0 +WARNING_CFLAGS = -Wall -Wextra -Waggregate-return -Wfloat-equal -Wpacked -Wmissing-declarations -Wstrict-overflow=4 -Wstrict-aliasing=2 +OTHER_CFLAGS = -fno-unwind-tables -fno-asynchronous-unwind-tables -fno-exceptions -fdiagnostics-show-option -fverbose-asm -momit-leaf-frame-pointer +OTHER_CFLAGS_debug = -fstack-protector -fno-inline -O0 -DDISPATCH_DEBUG=1 +OTHER_CFLAGS_profile = -DDISPATCH_PROFILE=1 +GENERATE_PROFILING_CODE = NO +GENERATE_MASTER_OBJECT_FILE = NO +DYLIB_CURRENT_VERSION = $(CURRENT_PROJECT_VERSION) +UMBRELLA_LDFLAGS = -umbrella System +UMBRELLA_LDFLAGS[sdk=iphonesimulator*] = +OTHER_LDFLAGS = $(OTHER_LDFLAGS) $(UMBRELLA_LDFLAGS) $(CR_LDFLAGS) diff --git a/xcodescripts/install-manpages.sh b/xcodescripts/install-manpages.sh new file mode 100755 index 000000000..2d88a26b0 --- /dev/null +++ b/xcodescripts/install-manpages.sh @@ -0,0 +1,107 @@ +#!/bin/bash -e +# +# Copyright (c) 2010-2011 Apple Inc. All rights reserved. +# +# @APPLE_APACHE_LICENSE_HEADER_START@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# @APPLE_APACHE_LICENSE_HEADER_END@ +# + +if [ "$ACTION" = installhdrs ]; then exit 0; fi +if [ "${RC_ProjectName%_Sim}" != "${RC_ProjectName}" ]; then exit 0; fi + +mkdir -p "$DSTROOT"/usr/share/man/man3 || true +mkdir -p "$DSTROOT"/usr/local/share/man/man3 || true + +# Copy man pages +cd "$SRCROOT"/man +BASE_PAGES="dispatch.3 dispatch_after.3 dispatch_api.3 dispatch_apply.3 \ + dispatch_async.3 dispatch_group_create.3 dispatch_object.3 \ + dispatch_once.3 dispatch_queue_create.3 dispatch_semaphore_create.3 \ + dispatch_source_create.3 dispatch_time.3 dispatch_data_create.3 \ + dispatch_io_create.3 dispatch_io_read.3 dispatch_read.3" + +PRIVATE_PAGES="dispatch_benchmark.3" + +cp ${BASE_PAGES} "$DSTROOT"/usr/share/man/man3 +cp ${PRIVATE_PAGES} "$DSTROOT"/usr/local/share/man/man3 + +# Make hard links (lots of hard links) + +cd "$DSTROOT"/usr/local/share/man/man3 +ln -f dispatch_benchmark.3 dispatch_benchmark_f.3 +chown ${INSTALL_OWNER}:${INSTALL_GROUP} $PRIVATE_PAGES +chmod $INSTALL_MODE_FLAG $PRIVATE_PAGES + +cd $DSTROOT/usr/share/man/man3 + +chown ${INSTALL_OWNER}:${INSTALL_GROUP} $BASE_PAGES +chmod $INSTALL_MODE_FLAG $BASE_PAGES + +ln -f dispatch_after.3 dispatch_after_f.3 +ln -f dispatch_apply.3 dispatch_apply_f.3 +ln -f dispatch_once.3 dispatch_once_f.3 + +for m in dispatch_async_f dispatch_sync dispatch_sync_f; do + ln -f dispatch_async.3 ${m}.3 +done + +for m in dispatch_group_enter dispatch_group_leave dispatch_group_wait \ + dispatch_group_async dispatch_group_async_f dispatch_group_notify \ + dispatch_group_notify_f; do + ln -f dispatch_group_create.3 ${m}.3 +done + +for m in dispatch_retain dispatch_release dispatch_suspend dispatch_resume \ + dispatch_get_context dispatch_set_context dispatch_set_finalizer_f; do + ln -f dispatch_object.3 ${m}.3 +done + +for m in dispatch_semaphore_signal dispatch_semaphore_wait; do + ln -f dispatch_semaphore_create.3 ${m}.3 +done + +for m in dispatch_get_current_queue dispatch_main dispatch_get_main_queue \ + dispatch_get_global_queue dispatch_queue_get_label \ + dispatch_set_target_queue; do + ln -f dispatch_queue_create.3 ${m}.3 +done + +for m in dispatch_source_set_event_handler dispatch_source_set_event_handler_f \ + dispatch_source_set_cancel_handler dispatch_source_set_cancel_handler_f \ + dispatch_source_cancel dispatch_source_testcancel \ + dispatch_source_get_handle dispatch_source_get_mask \ + dispatch_source_get_data dispatch_source_merge_data \ + dispatch_source_set_timer; do + ln -f dispatch_source_create.3 ${m}.3 +done + +ln -f dispatch_time.3 dispatch_walltime.3 + +for m in dispatch_data_create_concat dispatch_data_create_subrange \ + dispatch_data_create_map dispatch_data_apply \ + dispatch_data_copy_region dispatch_data_get_size; do + ln -f dispatch_data_create.3 ${m}.3 +done + +for m in dispatch_io_create_with_path dispatch_io_set_high_water \ + dispatch_io_set_low_water dispatch_io_set_interval \ + dispatch_io_close; do + ln -f dispatch_io_create.3 ${m}.3 +done + +ln -f dispatch_io_read.3 dispatch_io_write.3 + +ln -f dispatch_read.3 dispatch_write.3 diff --git a/xcodescripts/mig-headers.sh b/xcodescripts/mig-headers.sh new file mode 100755 index 000000000..3669ec237 --- /dev/null +++ b/xcodescripts/mig-headers.sh @@ -0,0 +1,29 @@ +#!/bin/bash -e +# +# Copyright (c) 2010-2011 Apple Inc. All rights reserved. +# +# @APPLE_APACHE_LICENSE_HEADER_START@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# @APPLE_APACHE_LICENSE_HEADER_END@ +# + +export MIGCC="$(xcrun -find cc)" +export MIGCOM="$(xcrun -find migcom)" +export PATH="${PLATFORM_DEVELOPER_BIN_DIR}:${DEVELOPER_BIN_DIR}:${PATH}" +for a in ${ARCHS}; do + xcrun mig -arch $a -header "${SCRIPT_OUTPUT_FILE_0}" \ + -sheader "${SCRIPT_OUTPUT_FILE_1}" -user /dev/null \ + -server /dev/null "${SCRIPT_INPUT_FILE_0}" +done diff --git a/xcodescripts/postprocess-headers.sh b/xcodescripts/postprocess-headers.sh new file mode 100755 index 000000000..41f466939 --- /dev/null +++ b/xcodescripts/postprocess-headers.sh @@ -0,0 +1,21 @@ +#!/bin/bash -e +# +# Copyright (c) 2010-2011 Apple Inc. All rights reserved. +# +# @APPLE_APACHE_LICENSE_HEADER_START@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# @APPLE_APACHE_LICENSE_HEADER_END@ +# + diff --git a/xcodescripts/symlink-headers.sh b/xcodescripts/symlink-headers.sh new file mode 100755 index 000000000..a062a6f5a --- /dev/null +++ b/xcodescripts/symlink-headers.sh @@ -0,0 +1,29 @@ +#!/bin/bash -e +# +# Copyright (c) 2010-2011 Apple Inc. All rights reserved. +# +# @APPLE_APACHE_LICENSE_HEADER_START@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# @APPLE_APACHE_LICENSE_HEADER_END@ +# + +if [ "$DEPLOYMENT_LOCATION" != YES ]; then + DSTROOT="$CONFIGURATION_BUILD_DIR" + [ -L "$DSTROOT$PRIVATE_HEADERS_FOLDER_PATH"/private.h ] && exit +fi + +mv "$DSTROOT$PRIVATE_HEADERS_FOLDER_PATH"/private.h \ + "$DSTROOT$PRIVATE_HEADERS_FOLDER_PATH"/dispatch.h +ln -sf dispatch.h "$DSTROOT$PRIVATE_HEADERS_FOLDER_PATH"/private.h From 558f12eae9d85652a3d8c3951a97af74285604ce Mon Sep 17 00:00:00 2001 From: Apple OSS Distributions <91980991+AppleOSSDistributions@users.noreply.github.com> Date: Fri, 27 Jul 2012 17:24:27 +0000 Subject: [PATCH 03/18] libdispatch-228.18 Imported from libdispatch-228.18.tar.gz --- INSTALL | 40 +- config/config.h | 6 + configure.ac | 99 +- dispatch/base.h | 41 +- dispatch/data.h | 24 +- dispatch/dispatch.h | 4 +- dispatch/group.h | 6 +- dispatch/io.h | 11 +- dispatch/object.h | 82 +- dispatch/queue.h | 20 +- dispatch/semaphore.h | 3 +- dispatch/source.h | 3 +- dispatch/time.h | 14 +- libdispatch.xcodeproj/project.pbxproj | 117 +- man/Makefile.am | 67 +- man/dispatch.3 | 3 +- man/dispatch_data_create.3 | 14 + man/dispatch_group_create.3 | 30 + man/dispatch_io_create.3 | 38 +- man/dispatch_object.3 | 97 +- man/dispatch_queue_create.3 | 22 +- man/dispatch_semaphore_create.3 | 21 - man/dispatch_source_create.3 | 48 +- man/dispatch_time.3 | 2 +- os/object.h | 108 ++ os/object_private.h | 134 +++ private/benchmark.h | 2 +- private/data_private.h | 178 +++ private/dispatch.h | 39 + private/private.h | 23 +- private/queue_private.h | 17 +- private/source_private.h | 53 +- src/Makefile.am | 12 +- src/apply.c | 130 ++- src/benchmark.c | 1 - src/data.c | 84 +- src/data_internal.h | 22 +- src/init.c | 390 +++++-- src/internal.h | 95 +- src/io.c | 91 +- src/io_internal.h | 28 +- src/object.c | 199 +++- src/object.m | 286 +++++ src/object_internal.h | 166 ++- src/queue.c | 716 +++++++----- src/queue_internal.h | 198 +++- src/semaphore.c | 69 +- src/semaphore_internal.h | 13 +- src/shims/atomic.h | 8 +- src/shims/tsd.h | 13 +- src/source.c | 98 +- src/source_internal.h | 15 +- src/trace.h | 24 +- src/transform.c | 1015 +++++++++++++++++ xcodeconfig/libdispatch.aliases | 12 + xcodeconfig/libdispatch.order | 40 + xcodeconfig/libdispatch.unexport | 12 + xcodeconfig/libdispatch.xcconfig | 25 +- ...{symlink-headers.sh => install-headers.sh} | 14 +- xcodescripts/install-manpages.sh | 3 +- 60 files changed, 4041 insertions(+), 1104 deletions(-) create mode 100644 os/object.h create mode 100644 os/object_private.h create mode 100644 private/data_private.h create mode 100644 private/dispatch.h create mode 100644 src/object.m create mode 100644 src/transform.c create mode 100644 xcodeconfig/libdispatch.aliases create mode 100644 xcodeconfig/libdispatch.order create mode 100644 xcodeconfig/libdispatch.unexport rename xcodescripts/{symlink-headers.sh => install-headers.sh} (61%) diff --git a/INSTALL b/INSTALL index 69fd5a6aa..bac7e27e8 100644 --- a/INSTALL +++ b/INSTALL @@ -22,8 +22,8 @@ The following configure options may be of general interest: --with-apple-libc-source - Specify the path to Apple's Libc package, so that appropriate headers - can be found and used. + Specify the path to Apple's Libc package, so that appropriate headers can + be found and used. --with-apple-libclosure-source @@ -32,32 +32,31 @@ The following configure options may be of general interest: --with-apple-xnu-source - Specify the path to Apple's XNU package, so that appropriate headers - can be found and used. + Specify the path to Apple's XNU package, so that appropriate headers can be + found and used. --with-blocks-runtime - On systems where -fblocks is supported, specify an additional library - path in which libBlocksRuntime can be found. This is not required on - Mac OS X, where the Blocks runtime is included in libSystem, but is - required on FreeBSD. + On systems where -fblocks is supported, specify an additional library path + in which libBlocksRuntime can be found. This is not required on Mac OS X, + where the Blocks runtime is included in libSystem, but is required on + FreeBSD. -The following options are likely to only be useful when building libdispatch -on Mac OS X as a replacement for /usr/lib/system/libdispatch.dylib: +The following options are likely to only be useful when building libdispatch on +Mac OS X as a replacement for /usr/lib/system/libdispatch.dylib: --disable-libdispatch-init-constructor - Do not tag libdispatch's init routine as __constructor, in which case - it must be run manually before libdispatch routines can be called. - For the libdispatch library in /usr/lib/system, the init routine is called - automatically during process start. + Do not tag libdispatch's init routine as __constructor, in which case it + must be run manually before libdispatch routines can be called. This is the + default when building on Mac OS X. For /usr/lib/system/libdispatch.dylib + the init routine is called automatically during process start. --enable-apple-tsd-optimizations - Use a non-portable allocation scheme for pthread per-thread data (TSD) - keys when building libdispatch for /usr/lib/system on Mac OS X. This - should not be used on other OS's, or on Mac OS X when building a - stand-alone library. + Use a non-portable allocation scheme for pthread per-thread data (TSD) keys + when building libdispatch for /usr/lib/system on Mac OS X. This should not + be used on other OS's, or on Mac OS X when building a stand-alone library. Typical configuration commands @@ -65,17 +64,18 @@ The following command lines create the configuration required to build libdispatch for /usr/lib/system on Mac OS X Lion: sh autogen.sh - ./configure CFLAGS='-arch x86_64 -arch i386' \ + ./configure CFLAGS='-arch x86_64 -arch i386 -g -Os' \ --prefix=/usr --libdir=/usr/lib/system \ --disable-dependency-tracking --disable-static \ - --disable-libdispatch-init-constructor \ --enable-apple-tsd-optimizations \ --with-apple-libc-source=/path/to/10.7.0/Libc-763.11 \ --with-apple-libclosure-source=/path/to/10.7.0/libclosure-53 \ --with-apple-xnu-source=/path/to/10.7.0/xnu-1699.22.73 + make check Typical configuration line for FreeBSD 8.x and 9.x to build libdispatch with clang and blocks support: sh autogen.sh ./configure CC=clang --with-blocks-runtime=/usr/local/lib + make check diff --git a/config/config.h b/config/config.h index 040bf21a2..7fe2d6348 100644 --- a/config/config.h +++ b/config/config.h @@ -96,6 +96,9 @@ /* Define if pthread work queues are present */ #define HAVE_PTHREAD_WORKQUEUES 1 +/* Define to 1 if you have the `pthread_workqueue_setdispatch_np' function. */ +#define HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP 1 + /* Define to 1 if you have the header file. */ #define HAVE_STDINT_H 1 @@ -160,6 +163,9 @@ /* Define to use Mach semaphores */ #define USE_MACH_SEM 1 +/* Define to use Objective-C runtime */ +#define USE_OBJC 1 + /* Define to use POSIX semaphores */ /* #undef USE_POSIX_SEM */ diff --git a/configure.ac b/configure.ac index eeba91bbe..9e751e7ed 100644 --- a/configure.ac +++ b/configure.ac @@ -8,45 +8,38 @@ AC_REVISION([$$]) AC_CONFIG_AUX_DIR(config) AC_CONFIG_HEADER([config/config.h]) AC_CONFIG_MACRO_DIR([m4]) +ac_clean_files=a.out.dSYM AM_MAINTAINER_MODE +AC_PROG_CC([clang gcc cc]) +AC_PROG_CXX([clang++ g++ c++]) +AC_PROG_OBJC([clang gcc cc]) + # # On Mac OS X, some required header files come from other source packages; # allow specifying where those are. # AC_ARG_WITH([apple-libc-source], [AS_HELP_STRING([--with-apple-libc-source], - [Specify path to Apple Libc source])], - [apple_libc_source_path=${withval}/pthreads - APPLE_LIBC_SOURCE_PATH=-I$apple_libc_source_path - CPPFLAGS="$CPPFLAGS -I$apple_libc_source_path"], - [APPLE_LIBC_SOURCE_PATH=] -) -AC_SUBST([APPLE_LIBC_SOURCE_PATH]) + [Specify path to Apple Libc source])], [ + apple_libc_source_pthreads_path=${withval}/pthreads + CPPFLAGS="$CPPFLAGS -I$apple_libc_source_pthreads_path" +]) AC_ARG_WITH([apple-libclosure-source], [AS_HELP_STRING([--with-apple-libclosure-source], - [Specify path to Apple libclosure source])], - [apple_libclosure_source_path=${withval} - APPLE_LIBCLOSURE_SOURCE_PATH=-I$apple_libclosure_source_path - CPPFLAGS="$CPPFLAGS -I$apple_libclosure_source_path"], - [APPLE_LIBCLOSURE_SOURCE_PATH=] -) -AC_SUBST([APPLE_LIBCLOSURE_SOURCE_PATH]) + [Specify path to Apple libclosure source])], [ + apple_libclosure_source_path=${withval} + CPPFLAGS="$CPPFLAGS -I$apple_libclosure_source_path" +]) AC_ARG_WITH([apple-xnu-source], [AS_HELP_STRING([--with-apple-xnu-source], - [Specify path to Apple XNU source])], - [apple_xnu_source_path=${withval}/libkern - APPLE_XNU_SOURCE_PATH=-I$apple_xnu_source_path - CPPFLAGS="$CPPFLAGS -I$apple_xnu_source_path" - apple_xnu_source_system_path=${withval}/osfmk - APPLE_XNU_SOURCE_SYSTEM_PATH=$apple_xnu_source_system_path], - [APPLE_XNU_SOURCE_PATH=] -) -AC_SUBST([APPLE_XNU_SOURCE_PATH]) -AC_SUBST([APPLE_XNU_SOURCE_SYSTEM_PATH]) -AM_CONDITIONAL(USE_XNU_SOURCE, [test -n "$apple_xnu_source_system_path"]) + [Specify path to Apple XNU source])], [ + apple_xnu_source_libkern_path=${withval}/libkern + apple_xnu_source_osfmk_path=${withval}/osfmk + CPPFLAGS="$CPPFLAGS -I$apple_xnu_source_libkern_path" +]) AC_CACHE_CHECK([for System.framework/PrivateHeaders], dispatch_cv_system_privateheaders, [AS_IF([test -d /System/Library/Frameworks/System.framework/PrivateHeaders], @@ -57,15 +50,16 @@ AS_IF([test "x$dispatch_cv_system_privateheaders" != "xno"], ) # -# On Mac OS X, libpispatch_init is automatically invoked during libSystem +# On Mac OS X, libdispatch_init is automatically invoked during libSystem # process initialization. On other systems, it is tagged as a library # constructor to be run by automatically by the runtime linker. # AC_ARG_ENABLE([libdispatch-init-constructor], [AS_HELP_STRING([--disable-libdispatch-init-constructor], - [Disable libdispatch_init as a constructor])] + [Disable libdispatch_init as a constructor])],, + [AS_IF([test -f /usr/lib/system/libdispatch.dylib], + [enable_libdispatch_init_constructor=no])] ) - AS_IF([test "x$enable_libdispatch_init_constructor" != "xno"], [AC_DEFINE(USE_LIBDISPATCH_INIT_CONSTRUCTOR, 1, [Define to tag libdispatch_init as a constructor])] @@ -78,15 +72,12 @@ AC_ARG_ENABLE([apple-tsd-optimizations], [AS_HELP_STRING([--enable-apple-tsd-optimizations], [Use non-portable pthread TSD optimizations for Mac OS X.])] ) - AS_IF([test "x$enable_apple_tsd_optimizations" = "xyes"], [AC_DEFINE(USE_APPLE_TSD_OPTIMIZATIONS, 1, [Define to use non-portable pthread TSD optimizations for Mac OS X)])] ) AC_USE_SYSTEM_EXTENSIONS -AC_PROG_CC -AC_PROG_CXX AC_PROG_INSTALL AC_PROG_LIBTOOL AC_PATH_PROGS(MIG, mig) @@ -127,26 +118,34 @@ AC_HEADER_STDC AC_CHECK_HEADERS([TargetConditionals.h pthread_np.h malloc/malloc.h libkern/OSCrossEndian.h libkern/OSAtomic.h]) # hack for pthread_machdep.h's #include -AS_IF([test -n "$apple_xnu_source_system_path"], [ +AS_IF([test -n "$apple_xnu_source_osfmk_path"], [ saveCPPFLAGS="$CPPFLAGS" CPPFLAGS="$CPPFLAGS -I." - ln -fsh "$apple_xnu_source_system_path" System + ln -fsh "$apple_xnu_source_osfmk_path" System ]) AC_CHECK_HEADERS([pthread_machdep.h]) -AS_IF([test -n "$apple_xnu_source_system_path"], [ +AS_IF([test -n "$apple_xnu_source_osfmk_path"], [ rm -f System CPPFLAGS="$saveCPPFLAGS" + AC_CONFIG_COMMANDS([src/System], + [ln -fsh "$apple_xnu_source_osfmk_path" src/System], + [apple_xnu_source_osfmk_path="$apple_xnu_source_osfmk_path"]) ]) # -# Core Services is tested in one of the GCD regression tests, so test for its -# presence using its header file. +# Parts of the testsuite use CoreFoundation and Foundation # -AC_CHECK_HEADER([CoreServices/CoreServices.h], - [have_coreservices=true], - [have_coreservices=false] +AC_CHECK_HEADER([CoreFoundation/CoreFoundation.h], + [have_corefoundation=true], [have_corefoundation=false] ) -AM_CONDITIONAL(HAVE_CORESERVICES, $have_coreservices) +AM_CONDITIONAL(HAVE_COREFOUNDATION, $have_corefoundation) +AC_LANG_PUSH([Objective C]) +AC_CHECK_HEADER([Foundation/Foundation.h], [ + AC_DEFINE(USE_OBJC, 1, [Define to use Objective-C runtime]) + have_foundation=true], [have_foundation=false] +) +AM_CONDITIONAL(HAVE_FOUNDATION, $have_foundation) +AC_LANG_POP([Objective C]) # # We use the availability of mach.h to decide whether to compile in all sorts @@ -155,8 +154,7 @@ AM_CONDITIONAL(HAVE_CORESERVICES, $have_coreservices) AC_CHECK_HEADER([mach/mach.h], [ AC_DEFINE(HAVE_MACH, 1, [Define if mach is present]) AC_DEFINE(__DARWIN_NON_CANCELABLE, 1, [Define if using Darwin $NOCANCEL]) - have_mach=true], - [have_mach=false] + have_mach=true], [have_mach=false] ) AM_CONDITIONAL(USE_MIG, $have_mach) @@ -167,6 +165,7 @@ AM_CONDITIONAL(USE_MIG, $have_mach) AC_CHECK_HEADER([pthread_workqueue.h], [AC_DEFINE(HAVE_PTHREAD_WORKQUEUES, 1, [Define if pthread work queues are present])] ) +AC_CHECK_FUNCS([pthread_workqueue_setdispatch_np]) # # Find functions and declarations we care about. @@ -182,15 +181,13 @@ AC_CHECK_DECLS([program_invocation_short_name], [], [], [[#include ]]) AC_CHECK_FUNCS([pthread_key_init_np pthread_main_np mach_absolute_time malloc_create_zone sysconf getprogname]) AC_CHECK_DECLS([POSIX_SPAWN_START_SUSPENDED], - [have_posix_spawn_start_suspended=true], - [have_posix_spawn_start_suspended=false], + [have_posix_spawn_start_suspended=true], [have_posix_spawn_start_suspended=false], [[#include ]] ) AM_CONDITIONAL(HAVE_POSIX_SPAWN_START_SUSPENDED, $have_posix_spawn_start_suspended) AC_CHECK_FUNC([sem_init], - [have_sem_init=true], - [have_sem_init=false] + [have_sem_init=true], [have_sem_init=false] ) # @@ -236,7 +233,7 @@ AC_CACHE_CHECK([for -momit-leaf-frame-pointer], [dispatch_cv_cc_omit_leaf_fp], [ CFLAGS="$saveCFLAGS" ]) AS_IF([test "x$dispatch_cv_cc_omit_leaf_fp" != "xno"], [ - OMIT_LEAF_FP_FLAGS="-momit-leaf-frame-pointer" + OMIT_LEAF_FP_FLAGS="-momit-leaf-frame-pointer" ]) AC_SUBST([OMIT_LEAF_FP_FLAGS]) @@ -254,10 +251,10 @@ AM_CONDITIONAL(HAVE_DARWIN_LD, [test "x$dispatch_cv_ld_darwin" != "xno"]) # Temporary: some versions of clang do not mark __builtin_trap() as # __attribute__((__noreturn__)). Detect and add if required. # -AC_COMPILE_IFELSE([ - AC_LANG_PROGRAM([void __attribute__((__noreturn__)) temp(void) { __builtin_trap(); }], [])], [ - AC_DEFINE(HAVE_NORETURN_BUILTIN_TRAP, 1, [Define if __builtin_trap marked noreturn]) - ], []) +AC_COMPILE_IFELSE( + [AC_LANG_PROGRAM([void __attribute__((__noreturn__)) temp(void) { __builtin_trap(); }], [])], + [AC_DEFINE(HAVE_NORETURN_BUILTIN_TRAP, 1, [Define if __builtin_trap marked noreturn])] +) # # Generate Makefiles. diff --git a/dispatch/base.h b/dispatch/base.h index 029e3e0a8..2af340e34 100644 --- a/dispatch/base.h +++ b/dispatch/base.h @@ -25,45 +25,6 @@ #error "Please #include instead of this file directly." #endif -#ifdef __cplusplus -/* - * Dispatch objects are NOT C++ objects. Nevertheless, we can at least keep C++ - * aware of type compatibility. - */ -typedef struct dispatch_object_s { -private: - dispatch_object_s(); - ~dispatch_object_s(); - dispatch_object_s(const dispatch_object_s &); - void operator=(const dispatch_object_s &); -} *dispatch_object_t; -#else -typedef union { - struct dispatch_object_s *_do; - struct dispatch_continuation_s *_dc; - struct dispatch_queue_s *_dq; - struct dispatch_queue_attr_s *_dqa; - struct dispatch_group_s *_dg; - struct dispatch_source_s *_ds; - struct dispatch_source_attr_s *_dsa; - struct dispatch_semaphore_s *_dsema; - struct dispatch_data_s *_ddata; - struct dispatch_io_s *_dchannel; - struct dispatch_operation_s *_doperation; - struct dispatch_disk_s *_ddisk; -} dispatch_object_t __attribute__((transparent_union)); -#endif - -typedef void (*dispatch_function_t)(void *); - -#ifdef __cplusplus -#define DISPATCH_DECL(name) \ - typedef struct name##_s : public dispatch_object_s {} *name##_t -#else -/*! @parseOnly */ -#define DISPATCH_DECL(name) typedef struct name##_s *name##_t -#endif - #if __GNUC__ #define DISPATCH_NORETURN __attribute__((__noreturn__)) #define DISPATCH_NOTHROW __attribute__((__nothrow__)) @@ -139,4 +100,6 @@ typedef void (*dispatch_function_t)(void *); #define DISPATCH_EXPECT(x, v) (x) #endif +typedef void (*dispatch_function_t)(void *); + #endif diff --git a/dispatch/data.h b/dispatch/data.h index 2222e1bc0..ddba5dcae 100644 --- a/dispatch/data.h +++ b/dispatch/data.h @@ -46,7 +46,8 @@ DISPATCH_DECL(dispatch_data); * @discussion The singleton dispatch data object representing a zero-length * memory region. */ -#define dispatch_data_empty (&_dispatch_data_empty) +#define dispatch_data_empty \ + DISPATCH_GLOBAL_OBJECT(dispatch_data_t, _dispatch_data_empty) __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) DISPATCH_EXPORT struct dispatch_data_s _dispatch_data_empty; @@ -92,7 +93,7 @@ DISPATCH_EXPORT const dispatch_block_t _dispatch_data_destructor_free; * @result A newly created dispatch data object. */ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) -DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_WARN_RESULT DISPATCH_NOTHROW +DISPATCH_EXPORT DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_data_t dispatch_data_create(const void *buffer, size_t size, @@ -118,8 +119,10 @@ dispatch_data_get_size(dispatch_data_t data); * contiguous memory region and returns a new data object representing it. * If non-NULL references to a pointer and a size variable are provided, they * are filled with the location and extent of that region. These allow direct - * read access to the represented memory, but are only valid until the copy - * object is released. + * read access to the represented memory, but are only valid until the returned + * object is released. Under ARC, if that object is held in a variable with + * automatic storage, care needs to be taken to ensure that it is not released + * by the compiler before memory access via the pointer has been completed. * * @param data The dispatch data object to map. * @param buffer_ptr A pointer to a pointer variable to be filled with the @@ -130,7 +133,8 @@ dispatch_data_get_size(dispatch_data_t data); * @result A newly created dispatch data object. */ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) -DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_WARN_RESULT DISPATCH_NOTHROW +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_RETURNS_RETAINED +DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_data_t dispatch_data_create_map(dispatch_data_t data, const void **buffer_ptr, @@ -152,7 +156,8 @@ dispatch_data_create_map(dispatch_data_t data, * data1 and data2 objects. */ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) -DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_NOTHROW +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_RETURNS_RETAINED +DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_data_t dispatch_data_create_concat(dispatch_data_t data1, dispatch_data_t data2); @@ -172,7 +177,8 @@ dispatch_data_create_concat(dispatch_data_t data1, dispatch_data_t data2); * subrange of the data object. */ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) -DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_WARN_RESULT DISPATCH_NOTHROW +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_RETURNS_RETAINED +DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_data_t dispatch_data_create_subrange(dispatch_data_t data, size_t offset, @@ -234,8 +240,8 @@ dispatch_data_apply(dispatch_data_t data, dispatch_data_applier_t applier); * @result A newly created dispatch data object. */ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) -DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_WARN_RESULT -DISPATCH_NOTHROW +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_RETURNS_RETAINED +DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_data_t dispatch_data_copy_region(dispatch_data_t data, size_t location, diff --git a/dispatch/dispatch.h b/dispatch/dispatch.h index 2ba2cce38..119b413c3 100644 --- a/dispatch/dispatch.h +++ b/dispatch/dispatch.h @@ -31,12 +31,13 @@ #include #include #include +#include #ifndef __OSX_AVAILABLE_STARTING #define __OSX_AVAILABLE_STARTING(x, y) #endif -#define DISPATCH_API_VERSION 20110201 +#define DISPATCH_API_VERSION 20111201 #ifndef __DISPATCH_BUILDING_DISPATCH__ @@ -44,6 +45,7 @@ #define __DISPATCH_INDIRECT__ #endif +#include #include #include #include diff --git a/dispatch/group.h b/dispatch/group.h index 4e6e11d97..88e80871a 100644 --- a/dispatch/group.h +++ b/dispatch/group.h @@ -50,7 +50,8 @@ __BEGIN_DECLS * The newly created group, or NULL on failure. */ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_WARN_RESULT DISPATCH_NOTHROW +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NOTHROW dispatch_group_t dispatch_group_create(void); @@ -113,7 +114,8 @@ dispatch_group_async(dispatch_group_t group, * dispatch_group_async_f(). */ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NONNULL4 DISPATCH_NOTHROW +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NONNULL4 +DISPATCH_NOTHROW void dispatch_group_async_f(dispatch_group_t group, dispatch_queue_t queue, diff --git a/dispatch/io.h b/dispatch/io.h index f8fb2ff42..dd83e7dfb 100644 --- a/dispatch/io.h +++ b/dispatch/io.h @@ -220,7 +220,8 @@ typedef unsigned long dispatch_io_type_t; * occurred. */ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) -DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_WARN_RESULT DISPATCH_NOTHROW +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NOTHROW dispatch_io_t dispatch_io_create(dispatch_io_type_t type, dispatch_fd_t fd, @@ -255,8 +256,8 @@ dispatch_io_create(dispatch_io_type_t type, * occurred. */ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) -DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_NONNULL2 DISPATCH_WARN_RESULT -DISPATCH_NOTHROW +DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED +DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_io_t dispatch_io_create_with_path(dispatch_io_type_t type, const char *path, int oflag, mode_t mode, @@ -295,8 +296,8 @@ dispatch_io_create_with_path(dispatch_io_type_t type, * occurred. */ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) -DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_MALLOC DISPATCH_WARN_RESULT -DISPATCH_NOTHROW +DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED +DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_io_t dispatch_io_create_with_io(dispatch_io_type_t type, dispatch_io_t io, diff --git a/dispatch/object.h b/dispatch/object.h index 2ecf25186..bc7257a9c 100644 --- a/dispatch/object.h +++ b/dispatch/object.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2010 Apple Inc. All rights reserved. + * Copyright (c) 2008-2012 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -26,6 +26,76 @@ #include // for HeaderDoc #endif +/*! + * @typedef dispatch_object_t + * + * @abstract + * Abstract base type for all dispatch objects. + * The details of the type definition are language-specific. + * + * @discussion + * Dispatch objects are reference counted via calls to dispatch_retain() and + * dispatch_release(). + */ + +#if OS_OBJECT_USE_OBJC +/* + * By default, dispatch objects are declared as Objective-C types when building + * with an Objective-C compiler. This allows them to participate in ARC, in RR + * management by the Blocks runtime and in leaks checking by the static + * analyzer, and enables them to be added to Cocoa collections. + * See for details. + */ +OS_OBJECT_DECL(dispatch_object); +#define DISPATCH_DECL(name) OS_OBJECT_DECL_SUBCLASS(name, dispatch_object) +#define DISPATCH_GLOBAL_OBJECT(type, object) ((OS_OBJECT_BRIDGE type)&(object)) +#define DISPATCH_RETURNS_RETAINED OS_OBJECT_RETURNS_RETAINED +DISPATCH_INLINE DISPATCH_ALWAYS_INLINE DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +_dispatch_object_validate(dispatch_object_t object) { + void *isa = *(void* volatile*)(OS_OBJECT_BRIDGE void*)object; + (void)isa; +} +#elif defined(__cplusplus) +/* + * Dispatch objects are NOT C++ objects. Nevertheless, we can at least keep C++ + * aware of type compatibility. + */ +typedef struct dispatch_object_s { +private: + dispatch_object_s(); + ~dispatch_object_s(); + dispatch_object_s(const dispatch_object_s &); + void operator=(const dispatch_object_s &); +} *dispatch_object_t; +#define DISPATCH_DECL(name) \ + typedef struct name##_s : public dispatch_object_s {} *name##_t +#define DISPATCH_GLOBAL_OBJECT(type, object) (&(object)) +#define DISPATCH_RETURNS_RETAINED +#else /* Plain C */ +typedef union { + struct _os_object_s *_os_obj; + struct dispatch_object_s *_do; + struct dispatch_continuation_s *_dc; + struct dispatch_queue_s *_dq; + struct dispatch_queue_attr_s *_dqa; + struct dispatch_group_s *_dg; + struct dispatch_source_s *_ds; + struct dispatch_source_attr_s *_dsa; + struct dispatch_semaphore_s *_dsema; + struct dispatch_data_s *_ddata; + struct dispatch_io_s *_dchannel; + struct dispatch_operation_s *_doperation; + struct dispatch_disk_s *_ddisk; +} dispatch_object_t __attribute__((__transparent_union__)); +/*! @parseOnly */ +#define DISPATCH_DECL(name) typedef struct name##_s *name##_t +/*! @parseOnly */ +#define DISPATCH_GLOBAL_OBJECT(t, x) (&(x)) +/*! @parseOnly */ +#define DISPATCH_RETURNS_RETAINED +#endif + __BEGIN_DECLS /*! @@ -77,6 +147,11 @@ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_retain(dispatch_object_t object); +#if OS_OBJECT_USE_OBJC_RETAIN_RELEASE +#undef dispatch_retain +#define dispatch_retain(object) ({ dispatch_object_t _o = (object); \ + _dispatch_object_validate(_o); (void)[_o retain]; }) +#endif /*! * @function dispatch_release @@ -98,6 +173,11 @@ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_release(dispatch_object_t object); +#if OS_OBJECT_USE_OBJC_RETAIN_RELEASE +#undef dispatch_release +#define dispatch_release(object) ({ dispatch_object_t _o = (object); \ + _dispatch_object_validate(_o); [_o release]; }) +#endif /*! * @function dispatch_get_context diff --git a/dispatch/queue.h b/dispatch/queue.h index d76777136..b8050f923 100644 --- a/dispatch/queue.h +++ b/dispatch/queue.h @@ -330,6 +330,11 @@ dispatch_apply_f(size_t iterations, dispatch_queue_t queue, * from deadlock if that queue is not the one returned by * dispatch_get_current_queue(). * + * When dispatch_get_current_queue() is called on the main thread, it may + * or may not return the same value as dispatch_get_main_queue(). Comparing + * the two is not a valid way to test whether code is executing on the + * main thread. + * * @result * Returns the current queue. */ @@ -355,7 +360,8 @@ dispatch_get_current_queue(void); */ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT struct dispatch_queue_s _dispatch_main_q; -#define dispatch_get_main_queue() (&_dispatch_main_q) +#define dispatch_get_main_queue() \ + DISPATCH_GLOBAL_OBJECT(dispatch_queue_t, _dispatch_main_q) /*! * @typedef dispatch_queue_priority_t @@ -430,7 +436,9 @@ dispatch_get_global_queue(dispatch_queue_priority_t priority, * @discussion A dispatch queue that may invoke blocks concurrently and supports * barrier blocks submitted with the dispatch barrier API. */ -#define DISPATCH_QUEUE_CONCURRENT (&_dispatch_queue_attr_concurrent) +#define DISPATCH_QUEUE_CONCURRENT \ + DISPATCH_GLOBAL_OBJECT(dispatch_queue_attr_t, \ + _dispatch_queue_attr_concurrent) __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) DISPATCH_EXPORT struct dispatch_queue_attr_s _dispatch_queue_attr_concurrent; @@ -470,7 +478,8 @@ struct dispatch_queue_attr_s _dispatch_queue_attr_concurrent; * The newly created dispatch queue. */ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_WARN_RESULT DISPATCH_NOTHROW +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NOTHROW dispatch_queue_t dispatch_queue_create(const char *label, dispatch_queue_attr_t attr); @@ -525,7 +534,10 @@ dispatch_queue_get_label(dispatch_queue_t queue); * cancellation handler blocks will be submitted. * * A dispatch I/O channel's target queue specifies where where its I/O - * operations are executed. + * operations are executed. If the channel's target queue's priority is set to + * DISPATCH_QUEUE_PRIORITY_BACKGROUND, then the I/O operations performed by + * dispatch_io_read() or dispatch_io_write() on that queue will be + * throttled when there is I/O contention. * * For all other dispatch object types, the only function of the target queue * is to determine where an object's finalizer function is invoked. diff --git a/dispatch/semaphore.h b/dispatch/semaphore.h index 19b50af58..312a2c2e3 100644 --- a/dispatch/semaphore.h +++ b/dispatch/semaphore.h @@ -56,7 +56,8 @@ __BEGIN_DECLS * The newly created semaphore, or NULL on failure. */ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_WARN_RESULT DISPATCH_NOTHROW +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NOTHROW dispatch_semaphore_t dispatch_semaphore_create(long value); diff --git a/dispatch/source.h b/dispatch/source.h index 4c9f601dd..e37ecec07 100644 --- a/dispatch/source.h +++ b/dispatch/source.h @@ -288,7 +288,8 @@ __BEGIN_DECLS * handler block to the default priority global queue. */ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_WARN_RESULT DISPATCH_NOTHROW +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NOTHROW dispatch_source_t dispatch_source_create(dispatch_source_type_t type, uintptr_t handle, diff --git a/dispatch/time.h b/dispatch/time.h index d39578d66..e0bc2f63a 100644 --- a/dispatch/time.h +++ b/dispatch/time.h @@ -28,11 +28,11 @@ #include -__BEGIN_DECLS - -struct timespec; +// +#if TARGET_OS_MAC +#include +#endif -// 6368156 #ifdef NSEC_PER_SEC #undef NSEC_PER_SEC #endif @@ -50,6 +50,10 @@ struct timespec; #define USEC_PER_SEC 1000000ull #define NSEC_PER_USEC 1000ull +__BEGIN_DECLS + +struct timespec; + /*! * @typedef dispatch_time_t * @@ -60,7 +64,7 @@ struct timespec; */ typedef uint64_t dispatch_time_t; -#define DISPATCH_TIME_NOW 0 +#define DISPATCH_TIME_NOW (0ull) #define DISPATCH_TIME_FOREVER (~0ull) /*! diff --git a/libdispatch.xcodeproj/project.pbxproj b/libdispatch.xcodeproj/project.pbxproj index e36948cd1..59d706e53 100644 --- a/libdispatch.xcodeproj/project.pbxproj +++ b/libdispatch.xcodeproj/project.pbxproj @@ -56,6 +56,11 @@ 96BC39BD0F3EBAB100C59689 /* queue_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 96BC39BC0F3EBAB100C59689 /* queue_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; 96C9553B0F3EAEDD000D2CA4 /* once.h in Headers */ = {isa = PBXBuildFile; fileRef = 96C9553A0F3EAEDD000D2CA4 /* once.h */; settings = {ATTRIBUTES = (Public, ); }; }; 96DF70BE0F38FE3C0074BD99 /* once.c in Sources */ = {isa = PBXBuildFile; fileRef = 96DF70BD0F38FE3C0074BD99 /* once.c */; }; + C913AC0F143BD34800B78976 /* data_private.h in Headers */ = {isa = PBXBuildFile; fileRef = C913AC0E143BD34800B78976 /* data_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + C93D6165143E190E00EB9023 /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; }; + C93D6166143E190F00EB9023 /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; }; + C93D6167143E190F00EB9023 /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; }; + C9C5F80E143C1771006DC718 /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; }; E4128ED613BA9A1700ABB2CB /* hw_config.h in Headers */ = {isa = PBXBuildFile; fileRef = E4128ED513BA9A1700ABB2CB /* hw_config.h */; }; E4128ED713BA9A1700ABB2CB /* hw_config.h in Headers */ = {isa = PBXBuildFile; fileRef = E4128ED513BA9A1700ABB2CB /* hw_config.h */; }; E417A38412A472C4004D659D /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; }; @@ -64,11 +69,15 @@ E422A0D612A557B5005E5BDB /* trace.h in Headers */ = {isa = PBXBuildFile; fileRef = E422A0D412A557B5005E5BDB /* trace.h */; }; E43570B9126E93380097AB9F /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; }; E43570BA126E93380097AB9F /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; }; + E4407FAE143CC984003A9E80 /* dispatch.h in Headers */ = {isa = PBXBuildFile; fileRef = E4407FAD143CC984003A9E80 /* dispatch.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E4407FAF143CC984003A9E80 /* dispatch.h in Headers */ = {isa = PBXBuildFile; fileRef = E4407FAD143CC984003A9E80 /* dispatch.h */; settings = {ATTRIBUTES = (Private, ); }; }; E44EBE3E1251659900645D88 /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; }; E44EBE5412517EBE00645D88 /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; }; E44EBE5512517EBE00645D88 /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; }; E44EBE5612517EBE00645D88 /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; }; E44EBE5712517EBE00645D88 /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; }; + E454569314746F1B00106147 /* object_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E454569214746F1B00106147 /* object_private.h */; settings = {ATTRIBUTES = (); }; }; + E454569414746F1B00106147 /* object_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E454569214746F1B00106147 /* object_private.h */; settings = {ATTRIBUTES = (); }; }; E49F2423125D3C960057C971 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; E49F2424125D3C970057C971 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; E49F2499125D48D80057C971 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; @@ -118,6 +127,8 @@ E4BA744013A8911B0095BDF1 /* malloc_zone.h in Headers */ = {isa = PBXBuildFile; fileRef = E4BA743A13A8911B0095BDF1 /* malloc_zone.h */; }; E4C1ED6F1263E714000D3C8B /* data_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E4C1ED6E1263E714000D3C8B /* data_internal.h */; }; E4C1ED701263E714000D3C8B /* data_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E4C1ED6E1263E714000D3C8B /* data_internal.h */; }; + E4EB4A2714C35ECE00AA0FA9 /* object.h in Headers */ = {isa = PBXBuildFile; fileRef = E4EB4A2614C35ECE00AA0FA9 /* object.h */; }; + E4EB4A2814C35ECE00AA0FA9 /* object.h in Headers */ = {isa = PBXBuildFile; fileRef = E4EB4A2614C35ECE00AA0FA9 /* object.h */; }; E4EC11AE12514302000DDBD1 /* queue.c in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED8A0E8361E600161930 /* queue.c */; }; E4EC11AF12514302000DDBD1 /* semaphore.c in Sources */ = {isa = PBXBuildFile; fileRef = 721F5CCE0F15553500FF03A6 /* semaphore.c */; }; E4EC11B012514302000DDBD1 /* once.c in Sources */ = {isa = PBXBuildFile; fileRef = 96DF70BD0F38FE3C0074BD99 /* once.c */; }; @@ -138,6 +149,10 @@ E4EC122112514715000DDBD1 /* time.c in Sources */ = {isa = PBXBuildFile; fileRef = 96032E4A0F5CC8C700241C5F /* time.c */; }; E4EC122312514715000DDBD1 /* data.c in Sources */ = {isa = PBXBuildFile; fileRef = 5AAB45BF10D30B79004407EA /* data.c */; }; E4EC122412514715000DDBD1 /* io.c in Sources */ = {isa = PBXBuildFile; fileRef = 5A27262510F26F1900751FBC /* io.c */; }; + E4FC3264145F46C9002FBDDB /* object.m in Sources */ = {isa = PBXBuildFile; fileRef = E4FC3263145F46C9002FBDDB /* object.m */; }; + E4FC3265145F46C9002FBDDB /* object.m in Sources */ = {isa = PBXBuildFile; fileRef = E4FC3263145F46C9002FBDDB /* object.m */; }; + E4FC3266145F46C9002FBDDB /* object.m in Sources */ = {isa = PBXBuildFile; fileRef = E4FC3263145F46C9002FBDDB /* object.m */; }; + E4FC3267145F46C9002FBDDB /* object.m in Sources */ = {isa = PBXBuildFile; fileRef = E4FC3263145F46C9002FBDDB /* object.m */; }; FC0B34790FA2851C0080FFA0 /* source_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = FC0B34780FA2851C0080FFA0 /* source_internal.h */; }; FC1832A6109923C7003403D5 /* perfmon.h in Headers */ = {isa = PBXBuildFile; fileRef = FC1832A2109923C7003403D5 /* perfmon.h */; }; FC1832A7109923C7003403D5 /* time.h in Headers */ = {isa = PBXBuildFile; fileRef = FC1832A3109923C7003403D5 /* time.h */; }; @@ -224,21 +239,27 @@ 96BC39BC0F3EBAB100C59689 /* queue_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = queue_private.h; sourceTree = ""; }; 96C9553A0F3EAEDD000D2CA4 /* once.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = once.h; sourceTree = ""; }; 96DF70BD0F38FE3C0074BD99 /* once.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = once.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; + C913AC0E143BD34800B78976 /* data_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data_private.h; sourceTree = ""; }; C927F35F10FD7F1000C5AB8B /* ddt.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = ddt.xcodeproj; path = tools/ddt/ddt.xcodeproj; sourceTree = ""; }; + C9C5F80D143C1771006DC718 /* transform.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = transform.c; sourceTree = ""; }; D2AAC046055464E500DB518D /* libdispatch.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libdispatch.dylib; sourceTree = BUILT_PRODUCTS_DIR; }; E40041A9125D70590022B135 /* libdispatch-resolved.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "libdispatch-resolved.xcconfig"; sourceTree = ""; }; E40041AA125D705F0022B135 /* libdispatch-resolver.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "libdispatch-resolver.xcconfig"; sourceTree = ""; }; E4128ED513BA9A1700ABB2CB /* hw_config.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = hw_config.h; sourceTree = ""; }; E422A0D412A557B5005E5BDB /* trace.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = trace.h; sourceTree = ""; }; + E422DA3614D2A7E7003C6EE4 /* libdispatch.aliases */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.aliases; sourceTree = ""; }; + E422DA3714D2AE11003C6EE4 /* libdispatch.unexport */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.unexport; sourceTree = ""; }; E43570B8126E93380097AB9F /* provider.d */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.dtrace; path = provider.d; sourceTree = ""; }; E43D93F11097917E004F6A62 /* libdispatch.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = libdispatch.xcconfig; sourceTree = ""; }; + E4407FAD143CC984003A9E80 /* dispatch.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dispatch.h; sourceTree = ""; }; + E448727914C6215D00BB45C2 /* libdispatch.order */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.order; sourceTree = ""; }; E44EBE331251654000645D88 /* resolver.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = resolver.h; sourceTree = ""; }; E44EBE371251656400645D88 /* resolver.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = resolver.c; sourceTree = ""; }; E44EBE3B1251659900645D88 /* init.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = init.c; sourceTree = ""; }; + E454569214746F1B00106147 /* object_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = object_private.h; sourceTree = ""; }; E47D6BB5125F0F800070D91C /* resolved.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = resolved.h; sourceTree = ""; }; E482F1CD12DBAB590030614D /* postprocess-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "postprocess-headers.sh"; sourceTree = ""; }; E49F24DF125D57FA0057C971 /* libdispatch.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libdispatch.dylib; sourceTree = BUILT_PRODUCTS_DIR; }; - E49F251C125D629F0057C971 /* symlink-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "symlink-headers.sh"; sourceTree = ""; }; E49F251D125D630A0057C971 /* install-manpages.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "install-manpages.sh"; sourceTree = ""; }; E49F251E125D631D0057C971 /* mig-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "mig-headers.sh"; sourceTree = ""; }; E4BA743513A88FE10095BDF1 /* dispatch_data_create.3 */ = {isa = PBXFileReference; lastKnownFileType = text; path = dispatch_data_create.3; sourceTree = ""; }; @@ -248,8 +269,11 @@ E4BA743913A8911B0095BDF1 /* getprogname.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = getprogname.h; sourceTree = ""; }; E4BA743A13A8911B0095BDF1 /* malloc_zone.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = malloc_zone.h; sourceTree = ""; }; E4C1ED6E1263E714000D3C8B /* data_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data_internal.h; sourceTree = ""; }; + E4EB4A2614C35ECE00AA0FA9 /* object.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = object.h; sourceTree = ""; }; + E4EB4A2A14C36F4E00AA0FA9 /* install-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "install-headers.sh"; sourceTree = ""; }; E4EC11C312514302000DDBD1 /* libdispatch_up.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch_up.a; sourceTree = BUILT_PRODUCTS_DIR; }; E4EC122D12514715000DDBD1 /* libdispatch_mp.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch_mp.a; sourceTree = BUILT_PRODUCTS_DIR; }; + E4FC3263145F46C9002FBDDB /* object.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = object.m; sourceTree = ""; }; FC0B34780FA2851C0080FFA0 /* source_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = source_internal.h; sourceTree = ""; }; FC1832A2109923C7003403D5 /* perfmon.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = perfmon.h; sourceTree = ""; }; FC1832A3109923C7003403D5 /* time.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = time.h; sourceTree = ""; }; @@ -294,6 +318,7 @@ isa = PBXGroup; children = ( E44DB71E11D2FF080074F2AD /* Build Support */, + E4EB4A2914C35F1800AA0FA9 /* OS Object */, FC7BEDAA0E83625200161930 /* Public Headers */, FC7BEDAF0E83626100161930 /* Private Headers */, FC7BEDB60E8363DC00161930 /* Project Headers */, @@ -316,11 +341,13 @@ E44EBE3B1251659900645D88 /* init.c */, 5A27262510F26F1900751FBC /* io.c */, 9661E56A0F3E7DDF00749F3E /* object.c */, + E4FC3263145F46C9002FBDDB /* object.m */, 96DF70BD0F38FE3C0074BD99 /* once.c */, FC7BED8A0E8361E600161930 /* queue.c */, 721F5CCE0F15553500FF03A6 /* semaphore.c */, 96A8AA860F41E7A400CD570B /* source.c */, 96032E4A0F5CC8C700241C5F /* time.c */, + C9C5F80D143C1771006DC718 /* transform.c */, FC7BED950E8361E600161930 /* protocol.defs */, E43570B8126E93380097AB9F /* provider.d */, ); @@ -378,6 +405,9 @@ E43D93F11097917E004F6A62 /* libdispatch.xcconfig */, E40041AA125D705F0022B135 /* libdispatch-resolver.xcconfig */, E40041A9125D70590022B135 /* libdispatch-resolved.xcconfig */, + E422DA3614D2A7E7003C6EE4 /* libdispatch.aliases */, + E448727914C6215D00BB45C2 /* libdispatch.order */, + E422DA3714D2AE11003C6EE4 /* libdispatch.unexport */, ); path = xcodeconfig; sourceTree = ""; @@ -408,9 +438,9 @@ isa = PBXGroup; children = ( E49F251D125D630A0057C971 /* install-manpages.sh */, + E4EB4A2A14C36F4E00AA0FA9 /* install-headers.sh */, E49F251E125D631D0057C971 /* mig-headers.sh */, E482F1CD12DBAB590030614D /* postprocess-headers.sh */, - E49F251C125D629F0057C971 /* symlink-headers.sh */, ); path = xcodescripts; sourceTree = ""; @@ -423,6 +453,16 @@ path = config; sourceTree = ""; }; + E4EB4A2914C35F1800AA0FA9 /* OS Object */ = { + isa = PBXGroup; + children = ( + E4EB4A2614C35ECE00AA0FA9 /* object.h */, + E454569214746F1B00106147 /* object_private.h */, + ); + name = "OS Object"; + path = os; + sourceTree = ""; + }; FC1832A0109923B3003403D5 /* shims */ = { isa = PBXGroup; children = ( @@ -459,10 +499,12 @@ FC7BEDAF0E83626100161930 /* Private Headers */ = { isa = PBXGroup; children = ( + E4407FAD143CC984003A9E80 /* dispatch.h */, FC7BED930E8361E600161930 /* private.h */, 96BC39BC0F3EBAB100C59689 /* queue_private.h */, FCEF047F0F5661960067401F /* source_private.h */, 961B99350F3E83980006BC96 /* benchmark.h */, + C913AC0E143BD34800B78976 /* data_private.h */, ); name = "Private Headers"; path = private; @@ -505,6 +547,7 @@ 5AAB45C610D30D0C004407EA /* data.h in Headers */, 96032E4D0F5CC8D100241C5F /* time.h in Headers */, FC7BEDA20E8361E600161930 /* private.h in Headers */, + C913AC0F143BD34800B78976 /* data_private.h in Headers */, 96BC39BD0F3EBAB100C59689 /* queue_private.h in Headers */, FCEF04800F5661960067401F /* source_private.h in Headers */, 961B99360F3E83980006BC96 /* benchmark.h in Headers */, @@ -525,6 +568,9 @@ E4BA743B13A8911B0095BDF1 /* getprogname.h in Headers */, E4BA743F13A8911B0095BDF1 /* malloc_zone.h in Headers */, E4128ED613BA9A1700ABB2CB /* hw_config.h in Headers */, + E4407FAE143CC984003A9E80 /* dispatch.h in Headers */, + E454569314746F1B00106147 /* object_private.h in Headers */, + E4EB4A2714C35ECE00AA0FA9 /* object.h in Headers */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -564,6 +610,9 @@ E4BA743C13A8911B0095BDF1 /* getprogname.h in Headers */, E4BA744013A8911B0095BDF1 /* malloc_zone.h in Headers */, E4128ED713BA9A1700ABB2CB /* hw_config.h in Headers */, + E4407FAF143CC984003A9E80 /* dispatch.h in Headers */, + E454569414746F1B00106147 /* object_private.h in Headers */, + E4EB4A2814C35ECE00AA0FA9 /* object.h in Headers */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -577,8 +626,8 @@ D2AAC043055464E500DB518D /* Headers */, D2AAC044055464E500DB518D /* Sources */, D289987405E68DCB004EDB86 /* Frameworks */, + E4EB4A2B14C3720B00AA0FA9 /* Install Headers */, E482F1C512DBAA110030614D /* Postprocess Headers */, - 2EC9C9800E846B5200E2499A /* Symlink Headers */, 4CED8B9D0EEDF8B600AF99AB /* Install Manpages */, ); buildRules = ( @@ -599,8 +648,8 @@ E49F24AA125D57FA0057C971 /* Headers */, E49F24C7125D57FA0057C971 /* Sources */, E49F24D5125D57FA0057C971 /* Frameworks */, + E4EB4A3014C3A14000AA0FA9 /* Install Headers */, E4128EB213B9612700ABB2CB /* Postprocess Headers */, - E49F24D6125D57FA0057C971 /* Symlink Headers */, E49F24D7125D57FA0057C971 /* Install Manpages */, ); buildRules = ( @@ -696,31 +745,31 @@ /* End PBXReferenceProxy section */ /* Begin PBXShellScriptBuildPhase section */ - 2EC9C9800E846B5200E2499A /* Symlink Headers */ = { + 4CED8B9D0EEDF8B600AF99AB /* Install Manpages */ = { isa = PBXShellScriptBuildPhase; - buildActionMask = 12; + buildActionMask = 8; files = ( ); inputPaths = ( - "$(SRCROOT)/xcodescripts/symlink-headers.sh", + "$(SRCROOT)/xcodescripts/install-manpages.sh", ); - name = "Symlink Headers"; + name = "Install Manpages"; outputPaths = ( ); - runOnlyForDeploymentPostprocessing = 0; + runOnlyForDeploymentPostprocessing = 1; shellPath = "/bin/bash -e"; shellScript = ". \"${SCRIPT_INPUT_FILE_0}\""; showEnvVarsInLog = 0; }; - 4CED8B9D0EEDF8B600AF99AB /* Install Manpages */ = { + E4128EB213B9612700ABB2CB /* Postprocess Headers */ = { isa = PBXShellScriptBuildPhase; buildActionMask = 8; files = ( ); inputPaths = ( - "$(SRCROOT)/xcodescripts/install-manpages.sh", + "$(SRCROOT)/xcodescripts/postprocess-headers.sh", ); - name = "Install Manpages"; + name = "Postprocess Headers"; outputPaths = ( ); runOnlyForDeploymentPostprocessing = 1; @@ -728,7 +777,7 @@ shellScript = ". \"${SCRIPT_INPUT_FILE_0}\""; showEnvVarsInLog = 0; }; - E4128EB213B9612700ABB2CB /* Postprocess Headers */ = { + E482F1C512DBAA110030614D /* Postprocess Headers */ = { isa = PBXShellScriptBuildPhase; buildActionMask = 8; files = ( @@ -741,18 +790,18 @@ ); runOnlyForDeploymentPostprocessing = 1; shellPath = "/bin/bash -e"; - shellScript = ". \"${SCRIPT_INPUT_FILE_0}\" "; + shellScript = ". \"${SCRIPT_INPUT_FILE_0}\""; showEnvVarsInLog = 0; }; - E482F1C512DBAA110030614D /* Postprocess Headers */ = { + E49F24D7125D57FA0057C971 /* Install Manpages */ = { isa = PBXShellScriptBuildPhase; buildActionMask = 8; files = ( ); inputPaths = ( - "$(SRCROOT)/xcodescripts/postprocess-headers.sh", + "$(SRCROOT)/xcodescripts/install-manpages.sh", ); - name = "Postprocess Headers"; + name = "Install Manpages"; outputPaths = ( ); runOnlyForDeploymentPostprocessing = 1; @@ -760,34 +809,42 @@ shellScript = ". \"${SCRIPT_INPUT_FILE_0}\""; showEnvVarsInLog = 0; }; - E49F24D6125D57FA0057C971 /* Symlink Headers */ = { + E4EB4A2B14C3720B00AA0FA9 /* Install Headers */ = { isa = PBXShellScriptBuildPhase; - buildActionMask = 12; + buildActionMask = 2147483647; files = ( ); inputPaths = ( - "$(SRCROOT)/xcodescripts/symlink-headers.sh", + "$(SRCROOT)/xcodescripts/install-headers.sh", + "$(SRCROOT)/os/object.h", + "$(SRCROOT)/os/object_private.h", ); - name = "Symlink Headers"; + name = "Install Headers"; outputPaths = ( + "$(CONFIGURATION_BUILD_DIR)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/object.h", + "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/object_private.h", ); runOnlyForDeploymentPostprocessing = 0; shellPath = "/bin/bash -e"; shellScript = ". \"${SCRIPT_INPUT_FILE_0}\""; showEnvVarsInLog = 0; }; - E49F24D7125D57FA0057C971 /* Install Manpages */ = { + E4EB4A3014C3A14000AA0FA9 /* Install Headers */ = { isa = PBXShellScriptBuildPhase; - buildActionMask = 8; + buildActionMask = 2147483647; files = ( ); inputPaths = ( - "$(SRCROOT)/xcodescripts/install-manpages.sh", + "$(SRCROOT)/xcodescripts/install-headers.sh", + "$(SRCROOT)/os/object.h", + "$(SRCROOT)/os/object_private.h", ); - name = "Install Manpages"; + name = "Install Headers"; outputPaths = ( + "$(CONFIGURATION_BUILD_DIR)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/object.h", + "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/object_private.h", ); - runOnlyForDeploymentPostprocessing = 1; + runOnlyForDeploymentPostprocessing = 0; shellPath = "/bin/bash -e"; shellScript = ". \"${SCRIPT_INPUT_FILE_0}\""; showEnvVarsInLog = 0; @@ -883,6 +940,8 @@ 96032E4B0F5CC8C700241C5F /* time.c in Sources */, 5AAB45C010D30B79004407EA /* data.c in Sources */, 5A27262610F26F1900751FBC /* io.c in Sources */, + C9C5F80E143C1771006DC718 /* transform.c in Sources */, + E4FC3264145F46C9002FBDDB /* object.m in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -904,6 +963,8 @@ E49F24D2125D57FA0057C971 /* time.c in Sources */, E49F24D3125D57FA0057C971 /* data.c in Sources */, E49F24D4125D57FA0057C971 /* io.c in Sources */, + C93D6165143E190E00EB9023 /* transform.c in Sources */, + E4FC3265145F46C9002FBDDB /* object.m in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -925,6 +986,8 @@ E4EC11B512514302000DDBD1 /* time.c in Sources */, E4EC11B712514302000DDBD1 /* data.c in Sources */, E4EC11B812514302000DDBD1 /* io.c in Sources */, + C93D6166143E190F00EB9023 /* transform.c in Sources */, + E4FC3266145F46C9002FBDDB /* object.m in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -946,6 +1009,8 @@ E4EC122112514715000DDBD1 /* time.c in Sources */, E4EC122312514715000DDBD1 /* data.c in Sources */, E4EC122412514715000DDBD1 /* io.c in Sources */, + C93D6167143E190F00EB9023 /* transform.c in Sources */, + E4FC3267145F46C9002FBDDB /* object.m in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; diff --git a/man/Makefile.am b/man/Makefile.am index f57453aa4..7ad94e21b 100644 --- a/man/Makefile.am +++ b/man/Makefile.am @@ -30,7 +30,7 @@ EXTRA_DIST= \ LN=ln install-data-hook: - cd $(DESTDIR)$(mandir)/man3 && \ + cd $(DESTDIR)$(mandir)/man3 && \ $(LN) -f dispatch_after.3 dispatch_after_f.3 && \ $(LN) -f dispatch_apply.3 dispatch_apply_f.3 && \ $(LN) -f dispatch_async.3 dispatch_sync.3 && \ @@ -61,6 +61,8 @@ install-data-hook: $(LN) -f dispatch_semaphore_create.3 dispatch_semaphore_wait.3 && \ $(LN) -f dispatch_source_create.3 dispatch_source_set_event_handler.3 && \ $(LN) -f dispatch_source_create.3 dispatch_source_set_event_handler_f.3 && \ + $(LN) -f dispatch_source_create.3 dispatch_source_set_registration_handler.3 && \ + $(LN) -f dispatch_source_create.3 dispatch_source_set_registration_handler_f.3 && \ $(LN) -f dispatch_source_create.3 dispatch_source_set_cancel_handler.3 && \ $(LN) -f dispatch_source_create.3 dispatch_source_set_cancel_handler_f.3 && \ $(LN) -f dispatch_source_create.3 dispatch_source_cancel.3 && \ @@ -85,5 +87,68 @@ install-data-hook: $(LN) -f dispatch_io_create.3 dispatch_io_set_low_water.3 && \ $(LN) -f dispatch_io_create.3 dispatch_io_set_interval.3 && \ $(LN) -f dispatch_io_create.3 dispatch_io_close.3 && \ + $(LN) -f dispatch_io_create.3 dispatch_io_barrier.3 && \ $(LN) -f dispatch_io_read.3 dispatch_io_write.3 && \ $(LN) -f dispatch_read.3 dispatch_write.3 + +uninstall-hook: + cd $(DESTDIR)$(mandir)/man3 && \ + rm -f dispatch_after_f.3 \ + dispatch_apply_f.3 \ + dispatch_sync.3 \ + dispatch_async_f.3 \ + dispatch_sync_f.3 \ + dispatch_group_enter.3 \ + dispatch_group_leave.3 \ + dispatch_group_wait.3 \ + dispatch_group_notify.3 \ + dispatch_group_notify_f.3 \ + dispatch_group_async.3 \ + dispatch_group_async_f.3 \ + dispatch_retain.3 \ + dispatch_release.3 \ + dispatch_suspend.3 \ + dispatch_resume.3 \ + dispatch_get_context.3 \ + dispatch_set_context.3 \ + dispatch_set_finalizer_f.3 \ + dispatch_once_f.3 \ + dispatch_queue_get_label.3 \ + dispatch_get_current_queue.3 \ + dispatch_get_global_queue.3 \ + dispatch_get_main_queue.3 \ + dispatch_main.3 \ + dispatch_set_target_queue.3 \ + dispatch_semaphore_signal.3 \ + dispatch_semaphore_wait.3 \ + dispatch_source_set_event_handler.3 \ + dispatch_source_set_event_handler_f.3 \ + dispatch_source_set_registration_handler.3 \ + dispatch_source_set_registration_handler_f.3 \ + dispatch_source_set_cancel_handler.3 \ + dispatch_source_set_cancel_handler_f.3 \ + dispatch_source_cancel.3 \ + dispatch_source_testcancel.3 \ + dispatch_source_get_handle.3 \ + dispatch_source_get_mask.3 \ + dispatch_source_get_data.3 \ + dispatch_source_merge_data.3 \ + dispatch_source_set_timer.3 \ + dispatch_walltime.3 \ + dispatch_data_create_concat.3 \ + dispatch_data_create_subrange.3 \ + dispatch_data_create_map.3 \ + dispatch_data_apply.3 \ + dispatch_data_copy_subrange.3 \ + dispatch_data_copy_region.3 \ + dispatch_data_get_size.3 \ + dispatch_data_copy_subrange.3 \ + dispatch_data_empty.3 \ + dispatch_io_create_with_path.3 \ + dispatch_io_set_high_water.3 \ + dispatch_io_set_low_water.3 \ + dispatch_io_set_interval.3 \ + dispatch_io_close.3 \ + dispatch_io_barrier.3 \ + dispatch_io_write.3 \ + dispatch_write.3 diff --git a/man/dispatch.3 b/man/dispatch.3 index c55be968f..d25e08392 100644 --- a/man/dispatch.3 +++ b/man/dispatch.3 @@ -31,8 +31,7 @@ events and automatically submit event handler blocks to dispatch queues. .Xr dispatch_api 3 , .Xr dispatch_apply 3 , .Xr dispatch_async 3 , -.Xr dispatch_benchmark 3 , -.Xr dispatch_data_create 3, +.Xr dispatch_data_create 3 , .Xr dispatch_group_create 3 , .Xr dispatch_io_create 3 , .Xr dispatch_io_read 3 , diff --git a/man/dispatch_data_create.3 b/man/dispatch_data_create.3 index 96965f2df..b941b34b3 100644 --- a/man/dispatch_data_create.3 +++ b/man/dispatch_data_create.3 @@ -200,6 +200,20 @@ or .Sy copy function can be released when the function returns. The newly created object holds implicit references to their constituent memory regions as necessary. +.Pp +The functions +.Fn dispatch_data_create_map +and +.Fn dispatch_data_apply +return an interior pointer to represented memory that is only valid as long as +an associated object has not been released. When Objective-C Automated +Reference Counting is enabled, care needs to be taken if that object is held in +a variable with automatic storage. It may need to be annotated with the +.Li objc_precise_lifetime +attribute, or stored in a +.Li __strong +instance variable instead, to ensure that the object is not released +prematurely before memory accesses via the interor pointer have been completed. .Sh SEE ALSO .Xr dispatch 3 , .Xr dispatch_object 3 , diff --git a/man/dispatch_group_create.3 b/man/dispatch_group_create.3 index 1dae0efcf..4b8063c9d 100644 --- a/man/dispatch_group_create.3 +++ b/man/dispatch_group_create.3 @@ -141,6 +141,36 @@ functions are wrappers around and .Fn dispatch_group_notify_f respectively. +.Sh CAVEATS +In order to ensure deterministic behavior, it is recommended to call +.Fn dispatch_group_wait +only once all blocks have been submitted to the group. If it is later +determined that new blocks should be run, it is recommended not to reuse an +already-running group, but to create a new group. +.Pp +.Fn dispatch_group_wait +returns as soon as there are exactly zero +.Em enqueued or running +blocks associated with a group (more precisely, as soon as every +.Fn dispatch_group_enter +call has been balanced by a +.Fn dispatch_group_leave +call). If one thread waits for a group while another thread submits +new blocks to the group, then the count of associated blocks might +momentarily reach zero before all blocks have been submitted. If this happens, +.Fn dispatch_group_wait +will return too early: some blocks associated with the group have finished, +but some have not yet been submitted or run. +.Pp +However, as a special case, a block associated with a group may submit new +blocks associated with its own group. In this case, the behavior is +deterministic: a waiting thread will +.Em not +wake up until the newly submitted blocks have also finished. +.Pp +All of the foregoing also applies to +.Fn dispath_group_notify +as well, with "block to be submitted" substituted for "waiting thread". .Sh SEE ALSO .Xr dispatch 3 , .Xr dispatch_async 3 , diff --git a/man/dispatch_io_create.3 b/man/dispatch_io_create.3 index 90874424c..7af5b6dc1 100644 --- a/man/dispatch_io_create.3 +++ b/man/dispatch_io_create.3 @@ -8,7 +8,8 @@ .Nm dispatch_io_close , .Nm dispatch_io_set_high_water , .Nm dispatch_io_set_low_water , -.Nm dispatch_io_set_interval +.Nm dispatch_io_set_interval , +.Nm dispatch_io_barrier .Nd open, close and configure dispatch I/O channels .Sh SYNOPSIS .Fd #include @@ -49,6 +50,11 @@ .Fa "uint64_t interval" .Fa "dispatch_io_interval_flags_t flags" .Fc +.Ft void +.Fo dispatch_io_barrier +.Fa "dispatch_io_t channel" +.Fa "void (^barrier)(void)" +.Fc .Sh DESCRIPTION The dispatch I/O framework is an API for asynchronous read and write I/O operations. It is an application of the ideas and idioms present in the @@ -224,6 +230,36 @@ will be submitted together once the last channel has been closed resp.\& all references to those channels have been released. If convenience functions have also been used on that file descriptor, submission of their handlers will be tied to the submission of the channel cleanup handlers as well. +.Pp +.Sh BARRIER OPERATIONS +The +.Fn dispatch_io_barrier +function schedules a barrier operation on an I/O channel. The specified barrier +block will be run once, after all current I/O operations (such as +.Xr read 2 or +.Xr write 2 ) +on the underlying +file descriptor have finished. No new I/O operations will start until the +barrier block finishes. +.Pp +The barrier block may operate on the underlying file descriptor with functions +like +.Xr fsync 2 +or +.Xr lseek 2 . +As discussed in the +.Sx FILEDESCRIPTOR OWNERSHIP +section, the barrier block must not +.Xr close 2 +the file descriptor, and if it changes any flags on the file descriptor, it +must restore them before finishing. +.Pp +There is no synchronization between a barrier block and any +.Xr dispatch_io_read 3 +or +.Xr dispatch_io_write 3 +handler blocks; they may be running at the same time. The barrier block itself +is responsible for any required synchronization. .Sh MEMORY MODEL Dispatch I/O channel objects are retained and released via calls to .Fn dispatch_retain diff --git a/man/dispatch_object.3 b/man/dispatch_object.3 index 29c1621b5..21b3d95db 100644 --- a/man/dispatch_object.3 +++ b/man/dispatch_object.3 @@ -1,5 +1,5 @@ -.\" Copyright (c) 2008-2010 Apple Inc. All rights reserved. -.Dd May 1, 2009 +.\" Copyright (c) 2008-2012 Apple Inc. All rights reserved. +.Dd March 1, 2012 .Dt dispatch_object 3 .Os Darwin .Sh NAME @@ -39,9 +39,7 @@ .Fc .Sh DESCRIPTION Dispatch objects share functions for coordinating memory management, suspension, -cancellation and context pointers. While all dispatch objects are retainable, -not all objects support suspension, context pointers or finalizers (currently -only queues and sources support these additional interfaces). +cancellation and context pointers. .Sh MEMORY MANGEMENT Objects returned by creation functions in the dispatch framework may be uniformly retained and released with the functions @@ -53,13 +51,87 @@ respectively. The dispatch framework does not guarantee that any given client has the last or only reference to a given object. Objects may be retained internally by the system. +.Ss INTEGRATION WITH OBJECTIVE-C +.Bd -filled -offset indent +When building with an Objective-C or Objective-C++ compiler, dispatch objects +are declared as Objective-C types. This results in the following differences +compared to building as plain C/C++: +.Bl -dash +.It +if Objective-C Automated Reference Counting is enabled, dispatch objects are +memory managed by the Objective-C runtime and explicit calls to the +.Fn dispatch_retain +and +.Fn dispatch_release +functions will produce build errors. +.Pp +.Em Note : +when ARC is enabled, care needs to be taken with dispatch API returning an +interior pointer that is only valid as long as an associated object has not +been released. If that object is held in a variable with automatic storage, it +may need to be annotated with the +.Li objc_precise_lifetime +attribute, or stored in a +.Li __strong +instance variable instead, to ensure that the object is not prematurely +released. The functions returning interior pointers are +.Xr dispatch_data_create_map 3 +and +.Xr dispatch_data_apply 3 . +.It +the Blocks runtime automatically retains and releases dispatch objects captured +by blocks upon +.Fn Block_copy +and +.Fn Block_release , +e.g.\& as performed during asynchronous execution of a block via +.Xr dispatch_async 3 . +.Pp +.Em Note : +retain cycles may be encountered if dispatch source objects are captured by +their handler blocks; these cycles can be broken by declaring the captured +object +.Li __weak +or by calling +.Xr dispatch_source_cancel 3 +to cause its handler blocks to be released explicitly. +.It +dispatch objects can be added directly to Cocoa collections, and their +lifetime is tracked by the Objective-C static analyzer. +.El +.Pp +Integration of dispatch objects with Objective-C requires targeting Mac\ OS\ X +10.8 or later, and is disabled when building with Objective-C Garbage +Collection or for the legacy Objective-C runtime. It can also be disabled +manually by using compiler options to define the +.Dv OS_OBJECT_USE_OBJC +preprocessor macro to +.Li 0 . +.Ed +.Pp +.Em Important : +When building with a plain C/C++ compiler or when integration with Objective-C +is disabled, dispatch objects are +.Em not +automatically retained and released when captured by a block. Therefore, when a +dispatch object is captured by a block that will be executed asynchronously, +the object must be manually retained and released: +.Pp +.Bd -literal -offset indent +dispatch_retain(object); +dispatch_async(queue, ^{ + do_something_with_object(object); + dispatch_release(object); +}); +.Ed .Sh SUSPENSION The invocation of blocks on dispatch queues or dispatch sources may be suspended or resumed with the functions .Fn dispatch_suspend and .Fn dispatch_resume -respectively. +respectively. Other dispatch objects do not support suspension. +.Pp The dispatch framework always checks the suspension status before executing a block, but such changes never affect a block during execution (non-preemptive). Therefore the suspension of an object is asynchronous, unless it is performed @@ -79,8 +151,8 @@ such that the dispatch object is fully resumed when the last reference is released. The result of releasing all references to a dispatch object while in a suspended state is undefined. .Sh CONTEXT POINTERS -Dispatch queues and sources support supplemental context pointers. The value of -the context pointer may be retrieved and updated with +Dispatch objects support supplemental context pointers. The value of the +context pointer may be retrieved and updated with .Fn dispatch_get_context and .Fn dispatch_set_context @@ -93,13 +165,12 @@ reference to the object is released. This gives the application an opportunity to free the context data associated with the object. The finalizer will be run on the object's target queue. -.Pp -The result of getting or setting the context of an object that is not a -dispatch queue or a dispatch source is undefined. .Sh SEE ALSO .Xr dispatch 3 , +.Xr dispatch_async 3 , .Xr dispatch_group_create 3 , .Xr dispatch_queue_create 3 , .Xr dispatch_semaphore_create 3 , -.Xr dispatch_source_create 3 , -.Xr dispatch_set_target_queue 3 +.Xr dispatch_set_target_queue 3 , +.Xr dispatch_source_cancel 3 , +.Xr dispatch_source_create 3 diff --git a/man/dispatch_queue_create.3 b/man/dispatch_queue_create.3 index 9b3e6a911..b657abfcf 100644 --- a/man/dispatch_queue_create.3 +++ b/man/dispatch_queue_create.3 @@ -153,12 +153,10 @@ called from the main thread before .Fn dispatch_main is called, then the result of .Fn dispatch_get_main_queue -is returned. The result of -.Fo dispatch_get_global_queue -.Fa DISPATCH_QUEUE_PRIORITY_DEFAULT -.Fa 0 -.Fc -will be returned in all other cases. +is returned. In all other cases, the default target queue will be returned. See +the +.Sx CAVEATS +section below. .Pp The .Fn dispatch_main @@ -226,6 +224,15 @@ configuration. It is equally unsafe for code to assume that synchronous execution onto a queue is safe from deadlock if that queue is not the one returned by .Fn dispatch_get_current_queue . +.Pp +The result of +.Fn dispatch_get_main_queue +may or may not equal the result of +.Fn dispatch_get_current_queue +when called on the main thread. Comparing the two is not a valid way to test +whether code is executing on the main thread. Foundation/AppKit programs should +use [NSThread isMainThread]. POSIX programs may use +.Xr pthread_main_np 3 . .Sh COMPATIBILITY Cocoa applications need not call .Fn dispatch_main . @@ -240,8 +247,7 @@ exceptions generated by higher level languages such as Objective-C or C++. Applications .Em MUST catch all exceptions before returning from a block submitted to a dispatch -queue; otherwise the internal data structures of the dispatch framework will be -left in an inconsistent state. +queue; otherwise the process will be terminated with an uncaught exception. .Pp The dispatch framework manages the relationship between dispatch queues and threads of execution. As a result, applications diff --git a/man/dispatch_semaphore_create.3 b/man/dispatch_semaphore_create.3 index 096e0e387..896412b08 100644 --- a/man/dispatch_semaphore_create.3 +++ b/man/dispatch_semaphore_create.3 @@ -101,27 +101,6 @@ and must be balanced before .Fn dispatch_release is called on it. -.Pp -Dispatch semaphores are strict counting semaphores. -In other words, dispatch semaphores do not saturate at any particular value. -Saturation can be achieved through atomic compare-and-swap logic. -What follows is a saturating binary semaphore: -.Bd -literal -void -saturating_semaphore_signal(dispatch_semaphore_t dsema, int *sent) -{ - if (__sync_bool_compare_and_swap(sent, 0, 1)) { - dispatch_semaphore_signal(dsema); - } -} - -void -saturating_semaphore_wait(dispatch_semaphore_t dsema, int *sent) -{ - *sent = 0; - dispatch_semaphore_wait(dsema, DISPATCH_TIME_FOREVER); -} -.Ed .Sh SEE ALSO .Xr dispatch 3 , .Xr dispatch_object 3 diff --git a/man/dispatch_source_create.3 b/man/dispatch_source_create.3 index 1d774a967..89e7d514a 100644 --- a/man/dispatch_source_create.3 +++ b/man/dispatch_source_create.3 @@ -25,6 +25,16 @@ .Fa "void (*function)(void *)" .Fc .Ft void +.Fo dispatch_source_set_registration_handler +.Fa "dispatch_source_t source" +.Fa "void (^block)(void)" +.Fc +.Ft void +.Fo dispatch_source_set_registration_handler_f +.Fa "dispatch_source_t source" +.Fa "void (*function)(void *)" +.Fc +.Ft void .Fo dispatch_source_set_cancel_handler .Fa "dispatch_source_t source" .Fa "void (^block)(void)" @@ -90,7 +100,8 @@ constant to use the default target queue (the default priority global concurrent queue). .Pp Newly created sources are created in a suspended state. After the source has -been configured by setting an event handler, cancellation handler, context, +been configured by setting an event handler, cancellation handler, registration +handler, context, etc., the source must be activated by a call to .Fn dispatch_resume before any events will be delivered. @@ -167,7 +178,11 @@ In order to receive events from the dispatch source, an event handler should be specified via .Fn dispatch_source_set_event_handler . The event handler block is submitted to the source's target queue when the state -of the underlying system handle changes, or when an event occurs. +of the underlying system handle changes, or when an event occurs. If a source +is resumed with no event handler block set, events will be quietly ignored. +If the event handler block is changed while the source is suspended, or from a +block running on a serial queue that is the source's target queue, then the next +event handler invocation will use the new block. .Pp Dispatch sources may be suspended or resumed independently of their target queues using @@ -189,12 +204,39 @@ on the .Fa handler block. .Pp +To unset the event handler, call +.Fn dispatch_source_set_event_handler_f +and pass NULL as +.Fa function . +This unsets the event handler regardless of whether the handler +was a function pointer or a block. Registration and cancellation handlers +(see below) may be unset in the same way, but as noted below, a cancellation +handler may be required. +.Sh REGISTRATION +When +.Fn dispatch_resume +is called on a suspended or newly created source, there may be a brief delay +before the source is ready to receive events from the underlying system handle. +During this delay, the event handler will not be invoked, and events will be +missed. +.Pp +Once the dispatch source is registered with the underlying system and is ready +to process all events its optional registration handler will be submitted to +its target queue. This registration handler may be specified via +.Fn dispatch_source_set_registration_handler . +.Pp +The event handler will not be called until the registration handler finishes. +If the source is canceled (see below) before it is registered, +its registration handler will not be called. +.Pp .Sh CANCELLATION The .Fn dispatch_source_cancel function asynchronously cancels the dispatch source, preventing any further invocation of its event handler block. Cancellation does not interrupt a -currently executing handler block (non-preemptive). +currently executing handler block (non-preemptive). If a source is canceled +before the first time it is resumed, its event handler will never be called. +(In this case, note that the source must be resumed before it can be released.) .Pp The .Fn dispatch_source_testcancel diff --git a/man/dispatch_time.3 b/man/dispatch_time.3 index 6d1887315..cb65dc5fa 100644 --- a/man/dispatch_time.3 +++ b/man/dispatch_time.3 @@ -8,7 +8,7 @@ .Nd Calculate temporal milestones .Sh SYNOPSIS .Fd #include -.Vt static const dispatch_time_t DISPATCH_TIME_NOW = 0 ; +.Vt static const dispatch_time_t DISPATCH_TIME_NOW = 0ull ; .Vt static const dispatch_time_t DISPATCH_TIME_FOREVER = ~0ull ; .Ft dispatch_time_t .Fo dispatch_time diff --git a/os/object.h b/os/object.h new file mode 100644 index 000000000..7d1e5ce85 --- /dev/null +++ b/os/object.h @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2011-2012 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __OS_OBJECT__ +#define __OS_OBJECT__ + +#ifdef __APPLE__ +#include +#endif + +/*! + * @header + * + * @preprocinfo + * By default, libSystem objects such as GCD and XPC objects are declared as + * Objective-C types when building with an Objective-C compiler. This allows + * them to participate in ARC, in RR management by the Blocks runtime and in + * leaks checking by the static analyzer, and enables them to be added to Cocoa + * collections. + * + * NOTE: this requires explicit cancellation of dispatch sources and xpc + * connections whose handler blocks capture the source/connection object, + * resp. ensuring that such captures do not form retain cycles (e.g. by + * declaring the source as __weak). + * + * To opt-out of this default behavior, add -DOS_OBJECT_USE_OBJC=0 to your + * compiler flags. + * + * This mode requires a platform with the modern Objective-C runtime, the + * Objective-C GC compiler option to be disabled, and at least a Mac OS X 10.8 + * deployment target. + */ + +#ifndef OS_OBJECT_HAVE_OBJC_SUPPORT +#if defined(__OBJC2__) && !defined(__OBJC_GC__) && ( \ + __MAC_OS_X_VERSION_MIN_REQUIRED >= __MAC_10_8 || \ + __IPHONE_OS_VERSION_MIN_REQUIRED >= __IPHONE_6_0) +#define OS_OBJECT_HAVE_OBJC_SUPPORT 1 +#else +#define OS_OBJECT_HAVE_OBJC_SUPPORT 0 +#endif +#endif + +#if OS_OBJECT_HAVE_OBJC_SUPPORT +#ifndef OS_OBJECT_USE_OBJC +#define OS_OBJECT_USE_OBJC 1 +#endif +#elif defined(OS_OBJECT_USE_OBJC) && OS_OBJECT_USE_OBJC +/* Unsupported platform for OS_OBJECT_USE_OBJC=1 */ +#undef OS_OBJECT_USE_OBJC +#define OS_OBJECT_USE_OBJC 0 +#else +#define OS_OBJECT_USE_OBJC 0 +#endif + +#if OS_OBJECT_USE_OBJC +#import +#define OS_OBJECT_CLASS(name) OS_##name +#define OS_OBJECT_DECL(name, ...) \ + @protocol OS_OBJECT_CLASS(name) __VA_ARGS__ \ + @end \ + typedef NSObject *name##_t +#define OS_OBJECT_DECL_SUBCLASS(name, super) \ + OS_OBJECT_DECL(name, ) +#if defined(__has_attribute) && __has_attribute(ns_returns_retained) +#define OS_OBJECT_RETURNS_RETAINED __attribute__((__ns_returns_retained__)) +#else +#define OS_OBJECT_RETURNS_RETAINED +#endif +#if defined(__has_feature) && __has_feature(objc_arc) +#define OS_OBJECT_BRIDGE __bridge +#else +#define OS_OBJECT_BRIDGE +#endif +#ifndef OS_OBJECT_USE_OBJC_RETAIN_RELEASE +#if defined(__has_feature) && __has_feature(objc_arc) || \ + defined(__clang_analyzer__) +#define OS_OBJECT_USE_OBJC_RETAIN_RELEASE 1 +#else +#define OS_OBJECT_USE_OBJC_RETAIN_RELEASE 0 +#endif +#endif +#else +/*! @parseOnly */ +#define OS_OBJECT_RETURNS_RETAINED +/*! @parseOnly */ +#define OS_OBJECT_BRIDGE +#define OS_OBJECT_USE_OBJC_RETAIN_RELEASE 0 +#endif + +#endif diff --git a/os/object_private.h b/os/object_private.h new file mode 100644 index 000000000..235e0d305 --- /dev/null +++ b/os/object_private.h @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2011-2012 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __OS_OBJECT_PRIVATE__ +#define __OS_OBJECT_PRIVATE__ + +#include +#include +#include + +#ifndef __OSX_AVAILABLE_STARTING +#define __OSX_AVAILABLE_STARTING(x, y) +#endif + +#if __GNUC__ +#define OS_OBJECT_NOTHROW __attribute__((__nothrow__)) +#define OS_OBJECT_NONNULL __attribute__((__nonnull__)) +#define OS_OBJECT_WARN_RESULT __attribute__((__warn_unused_result__)) +#define OS_OBJECT_MALLOC __attribute__((__malloc__)) +#define OS_OBJECT_EXPORT extern __attribute__((visibility("default"))) +#else +/*! @parseOnly */ +#define OS_OBJECT_NOTHROW +/*! @parseOnly */ +#define OS_OBJECT_NONNULL +/*! @parseOnly */ +#define OS_OBJECT_WARN_RESULT +/*! @parseOnly */ +#define OS_OBJECT_MALLOC +#define OS_OBJECT_EXPORT extern +#endif + +#if OS_OBJECT_USE_OBJC && defined(__has_feature) && __has_feature(objc_arc) +#define _OS_OBJECT_OBJC_ARC 1 +#else +#define _OS_OBJECT_OBJC_ARC 0 +#endif + +#define _OS_OBJECT_GLOBAL_REFCNT INT_MAX + +#define _OS_OBJECT_HEADER(isa, ref_cnt, xref_cnt) \ + isa; /* must be pointer-sized */ \ + int volatile ref_cnt; \ + int volatile xref_cnt + +#if OS_OBJECT_HAVE_OBJC_SUPPORT +// Must match size of compiler-generated OBJC_CLASS structure rdar://10640168 +#define _OS_OBJECT_CLASS_HEADER() \ + void *_os_obj_objc_class_t[5] +#else +#define _OS_OBJECT_CLASS_HEADER() \ + void (*_os_obj_xref_dispose)(_os_object_t); \ + void (*_os_obj_dispose)(_os_object_t) +#endif + +#define OS_OBJECT_CLASS(name) OS_##name + +#if OS_OBJECT_USE_OBJC +__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) +OS_OBJECT_EXPORT +@interface OS_OBJECT_CLASS(object) : NSObject +- (void)_xref_dispose; +- (void)_dispose; +@end +typedef OS_OBJECT_CLASS(object) *_os_object_t; +#define _OS_OBJECT_DECL_SUBCLASS_INTERFACE(name, super) \ + @interface OS_OBJECT_CLASS(name) : OS_OBJECT_CLASS(super) \ + \ + @end +#else +typedef struct _os_object_s *_os_object_t; +#endif + +__BEGIN_DECLS + +#if !_OS_OBJECT_OBJC_ARC + +__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) +OS_OBJECT_EXPORT OS_OBJECT_MALLOC OS_OBJECT_WARN_RESULT OS_OBJECT_NOTHROW +_os_object_t +_os_object_alloc(const void *cls, size_t size); + +__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) +OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW +void _os_object_dealloc(_os_object_t object); + +__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) +OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW +_os_object_t +_os_object_retain(_os_object_t object); + +__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) +OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW +void +_os_object_release(_os_object_t object); + +__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) +OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW +_os_object_t +_os_object_retain_internal(_os_object_t object); + +__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) +OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW +void +_os_object_release_internal(_os_object_t object); + +#endif // !_OS_OBJECT_OBJC_ARC + +__END_DECLS + +#endif diff --git a/private/benchmark.h b/private/benchmark.h index df42a8a33..c6edfe632 100644 --- a/private/benchmark.h +++ b/private/benchmark.h @@ -28,7 +28,7 @@ #define __DISPATCH_BENCHMARK__ #ifndef __DISPATCH_INDIRECT__ -#error "Please #include instead of this file directly." +#error "Please #include instead of this file directly." #include // for HeaderDoc #endif diff --git a/private/data_private.h b/private/data_private.h new file mode 100644 index 000000000..6562b37d3 --- /dev/null +++ b/private/data_private.h @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2011 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_DATA_PRIVATE__ +#define __DISPATCH_DATA_PRIVATE__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +__BEGIN_DECLS + +#ifdef __BLOCKS__ + +/*! + * @const DISPATCH_DATA_DESTRUCTOR_NONE + * @discussion The destructor for dispatch data objects that require no + * management. This can be used to allow a data object to efficiently + * encapsulate data that should not be copied or freed by the system. + */ +#define DISPATCH_DATA_DESTRUCTOR_NONE (_dispatch_data_destructor_none) +__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) +DISPATCH_EXPORT const dispatch_block_t _dispatch_data_destructor_none; + +/*! + * @const DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE + * @discussion The destructor for dispatch data objects that have been created + * from buffers that require deallocation using vm_deallocate. + */ +#define DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE \ + (_dispatch_data_destructor_vm_deallocate) +__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) +DISPATCH_EXPORT const dispatch_block_t _dispatch_data_destructor_vm_deallocate; + +/*! + * @typedef dispatch_data_format_type_t + * + * @abstract + * Data formats are used to specify the input and output types of data supplied + * to dispatch_data_create_transform. + */ +typedef const struct dispatch_data_format_type_s *dispatch_data_format_type_t; + +/*! + * @const DISPATCH_DATA_FORMAT_TYPE_NONE + * @discussion A data format denoting that the given input or output format is, + * or should be, comprised of raw data bytes with no given encoding. + */ +#define DISPATCH_DATA_FORMAT_TYPE_NONE (&_dispatch_data_format_type_none) +__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) +DISPATCH_EXPORT +const struct dispatch_data_format_type_s _dispatch_data_format_type_none; + +/*! + * @const DISPATCH_DATA_FORMAT_TYPE_BASE32 + * @discussion A data format denoting that the given input or output format is, + * or should be, encoded in Base32 (RFC 4648) format. On input, this format will + * skip whitespace characters. Cannot be used in conjunction with UTF format + * types. + */ +#define DISPATCH_DATA_FORMAT_TYPE_BASE32 (&_dispatch_data_format_type_base32) +__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) +DISPATCH_EXPORT +const struct dispatch_data_format_type_s _dispatch_data_format_type_base32; + +/*! + * @const DISPATCH_DATA_FORMAT_TYPE_BASE64 + * @discussion A data format denoting that the given input or output format is, + * or should be, encoded in Base64 (RFC 4648) format. On input, this format will + * skip whitespace characters. Cannot be used in conjunction with UTF format + * types. + */ +#define DISPATCH_DATA_FORMAT_TYPE_BASE64 (&_dispatch_data_format_type_base64) +__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) +DISPATCH_EXPORT +const struct dispatch_data_format_type_s _dispatch_data_format_type_base64; + +/*! + * @const DISPATCH_DATA_FORMAT_TYPE_UTF8 + * @discussion A data format denoting that the given input or output format is, + * or should be, encoded in UTF-8 format. Is only valid when used in conjunction + * with other UTF format types. + */ +#define DISPATCH_DATA_FORMAT_TYPE_UTF8 (&_dispatch_data_format_type_utf8) +__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) +DISPATCH_EXPORT +const struct dispatch_data_format_type_s _dispatch_data_format_type_utf8; + +/*! + * @const DISPATCH_DATA_FORMAT_TYPE_UTF16LE + * @discussion A data format denoting that the given input or output format is, + * or should be, encoded in UTF-16LE format. Is only valid when used in + * conjunction with other UTF format types. + */ +#define DISPATCH_DATA_FORMAT_TYPE_UTF16LE (&_dispatch_data_format_type_utf16le) +__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) +DISPATCH_EXPORT +const struct dispatch_data_format_type_s _dispatch_data_format_type_utf16le; + +/*! + * @const DISPATCH_DATA_FORMAT_TYPE_UTF16BE + * @discussion A data format denoting that the given input or output format is, + * or should be, encoded in UTF-16BE format. Is only valid when used in + * conjunction with other UTF format types. + */ +#define DISPATCH_DATA_FORMAT_TYPE_UTF16BE (&_dispatch_data_format_type_utf16be) +__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) +DISPATCH_EXPORT +const struct dispatch_data_format_type_s _dispatch_data_format_type_utf16be; + +/*! + * @const DISPATCH_DATA_FORMAT_TYPE_UTFANY + * @discussion A data format denoting that dispatch_data_create_transform should + * attempt to automatically detect the input type based on the presence of a + * byte order mark character at the beginning of the data. In the absence of a + * BOM, the data will be assumed to be in UTF-8 format. Only valid as an input + * format. + */ +#define DISPATCH_DATA_FORMAT_TYPE_UTF_ANY (&_dispatch_data_format_type_utf_any) +__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) +DISPATCH_EXPORT +const struct dispatch_data_format_type_s _dispatch_data_format_type_utf_any; + +/*! + * @function dispatch_data_create_transform + * Returns a new dispatch data object after transforming the given data object + * from the supplied format, into the given output format. + * + * @param data + * The data object representing the region(s) of memory to transform. + * @param input_type + * Flags specifying the input format of the source dispatch_data_t + * + * @param output_type + * Flags specifying the expected output format of the resulting transfomation. + * + * @result + * A newly created dispatch data object, dispatch_data_empty if no has been + * produced, or NULL if an error occurred. + */ + +__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_RETURNS_RETAINED +DISPATCH_WARN_RESULT DISPATCH_NOTHROW +dispatch_data_t +dispatch_data_create_with_transform(dispatch_data_t data, + dispatch_data_format_type_t input_type, + dispatch_data_format_type_t output_type); + +#endif /* __BLOCKS__ */ + +__END_DECLS + +#endif // __DISPATCH_DATA_PRIVATE__ diff --git a/private/dispatch.h b/private/dispatch.h new file mode 100644 index 000000000..3f1f37457 --- /dev/null +++ b/private/dispatch.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2011 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_PRIVATE_LEGACY__ +#define __DISPATCH_PRIVATE_LEGACY__ + +#define DISPATCH_NO_LEGACY 1 +#ifdef DISPATCH_LEGACY // +#error "Dispatch legacy API unavailable." +#endif + +#ifndef __DISPATCH_BUILDING_DISPATCH__ +#include_next +#endif + +#endif // __DISPATCH_PRIVATE_LEGACY__ diff --git a/private/private.h b/private/private.h index 9bb0e0190..08a14ce16 100644 --- a/private/private.h +++ b/private/private.h @@ -44,18 +44,8 @@ #endif #include -#define DISPATCH_NO_LEGACY 1 -#ifdef DISPATCH_LEGACY // -#error "Dispatch legacy API unavailable." -#endif - #ifndef __DISPATCH_BUILDING_DISPATCH__ -#include_next - -// Workaround -#ifndef __DISPATCH_PUBLIC__ -#include "/usr/include/dispatch/dispatch.h" -#endif +#include #ifndef __DISPATCH_INDIRECT__ #define __DISPATCH_INDIRECT__ @@ -64,13 +54,16 @@ #include #include #include +#include #undef __DISPATCH_INDIRECT__ #endif /* !__DISPATCH_BUILDING_DISPATCH__ */ -/* LEGACY: Use DISPATCH_API_VERSION */ -#define LIBDISPATCH_VERSION DISPATCH_API_VERSION +// Check that public and private dispatch headers match +#if DISPATCH_API_VERSION != 20111201 // Keep in sync with +#error "Dispatch header mismatch between /usr/include and /usr/local/include" +#endif __BEGIN_DECLS @@ -112,6 +105,10 @@ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT void (*_dispatch_end_NSAutoReleasePool)(void *); +__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) +DISPATCH_EXPORT DISPATCH_NOTHROW +bool _dispatch_is_multithreaded(void); + #define _dispatch_time_after_nsec(t) \ dispatch_time(DISPATCH_TIME_NOW, (t)) #define _dispatch_time_after_usec(t) \ diff --git a/private/queue_private.h b/private/queue_private.h index 5ec36d09c..bdfb5b8b9 100644 --- a/private/queue_private.h +++ b/private/queue_private.h @@ -28,7 +28,7 @@ #define __DISPATCH_QUEUE_PRIVATE__ #ifndef __DISPATCH_INDIRECT__ -#error "Please #include instead of this file directly." +#error "Please #include instead of this file directly." #include // for HeaderDoc #endif @@ -125,6 +125,21 @@ DISPATCH_EXPORT const struct dispatch_queue_offsets_s { const uint16_t dqo_running_size; } dispatch_queue_offsets; +/*! + * @function dispatch_flush_continuation_cache + * + * @abstract + * Flushes the current thread's cache of continuation objects, if any. + * + * @discussion + * Warning: this function is subject to change in a future release. + * Please contact the GCD team before using it in your code. + */ + +__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_NA) +DISPATCH_EXPORT DISPATCH_NOTHROW +void +dispatch_flush_continuation_cache(void); __END_DECLS diff --git a/private/source_private.h b/private/source_private.h index 576f64a75..8de730821 100644 --- a/private/source_private.h +++ b/private/source_private.h @@ -28,7 +28,7 @@ #define __DISPATCH_SOURCE_PRIVATE__ #ifndef __DISPATCH_INDIRECT__ -#error "Please #include instead of this file directly." +#error "Please #include instead of this file directly." #include // for HeaderDoc #endif @@ -51,6 +51,57 @@ DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_vfs; __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_vm; +/*! + * @const DISPATCH_SOURCE_TYPE_SOCK + * @discussion A dispatch source that monitors events on socket state changes. + */ +#define DISPATCH_SOURCE_TYPE_SOCK (&_dispatch_source_type_sock) +__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) +DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_sock; + +/*! + * @enum dispatch_source_sock_flags_t + * + * @constant DISPATCH_SOCK_CONNRESET + * Received RST + * + * @constant DISPATCH_SOCK_READCLOSED + * Read side is shutdown + * + * @constant DISPATCH_SOCK_WRITECLOSED + * Write side is shutdown + * + * @constant DISPATCH_SOCK_TIMEOUT + * Timeout: rexmt, keep-alive or persist + * + * @constant DISPATCH_SOCK_NOSRCADDR + * Source address not available + * + * @constant DISPATCH_SOCK_IFDENIED + * Interface denied connection + * + * @constant DISPATCH_SOCK_SUSPEND + * Output queue suspended + * + * @constant DISPATCH_SOCK_RESUME + * Output queue resumed + * + * @constant DISPATCH_SOCK_KEEPALIVE + * TCP Keepalive received + * + */ +enum { + DISPATCH_SOCK_CONNRESET = 0x00000001, + DISPATCH_SOCK_READCLOSED = 0x00000002, + DISPATCH_SOCK_WRITECLOSED = 0x00000004, + DISPATCH_SOCK_TIMEOUT = 0x00000008, + DISPATCH_SOCK_NOSRCADDR = 0x00000010, + DISPATCH_SOCK_IFDENIED = 0x00000020, + DISPATCH_SOCK_SUSPEND = 0x00000040, + DISPATCH_SOCK_RESUME = 0x00000080, + DISPATCH_SOCK_KEEPALIVE = 0x00000100, +}; + /*! * @enum dispatch_source_vfs_flags_t * diff --git a/src/Makefile.am b/src/Makefile.am index 20b2baa9c..1af748c52 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -35,8 +35,7 @@ libdispatch_la_SOURCES= \ shims/time.h \ shims/tsd.h -INCLUDES=-I$(top_builddir) -I$(top_srcdir) -I$(top_srcdir)/private \ - @APPLE_LIBC_SOURCE_PATH@ @APPLE_LIBCLOSURE_SOURCE_PATH@ @APPLE_XNU_SOURCE_PATH@ +INCLUDES=-I$(top_builddir) -I$(top_srcdir) -I$(top_srcdir)/private libdispatch_la_CFLAGS=-Wall $(VISIBILITY_FLAGS) $(OMIT_LEAF_FP_FLAGS) libdispatch_la_CFLAGS+=$(MARCH_FLAGS) $(CBLOCKS_FLAGS) $(KQUEUE_CFLAGS) @@ -48,6 +47,7 @@ libdispatch_la_LDFLAGS+=-Wl,-compatibility_version,1 -Wl,-current_version,$(VERS endif CLEANFILES= +DISTCLEANFILES=System if USE_MIG BUILT_SOURCES= \ @@ -63,11 +63,3 @@ CLEANFILES+=$(BUILT_SOURCES) $(MIG) -user $*User.c -header $*.h \ -server $*Server.c -sheader $*Server.h $< endif - -if USE_XNU_SOURCE -# hack for pthread_machdep.h's #include -$(libdispatch_la_OBJECTS): $(abs_srcdir)/System -$(abs_srcdir)/System: - $(LN_S) -fh "@APPLE_XNU_SOURCE_SYSTEM_PATH@" System -CLEANFILES+=System -endif diff --git a/src/apply.c b/src/apply.c index 9a6343906..1a771145c 100644 --- a/src/apply.c +++ b/src/apply.c @@ -17,35 +17,18 @@ * * @APPLE_APACHE_LICENSE_HEADER_END@ */ -#include "internal.h" -// We'd use __attribute__((aligned(x))), but it does not atually increase the -// alignment of stack variables. All we really need is the stack usage of the -// local thread to be sufficiently away to avoid cache-line contention with the -// busy 'da_index' variable. -// -// NOTE: 'char' arrays cause GCC to insert buffer overflow detection logic -struct dispatch_apply_s { - long _da_pad0[DISPATCH_CACHELINE_SIZE / sizeof(long)]; - void (*da_func)(void *, size_t); - void *da_ctxt; - size_t da_iterations; - size_t da_index; - uint32_t da_thr_cnt; - _dispatch_thread_semaphore_t da_sema; - dispatch_queue_t da_queue; - long _da_pad1[DISPATCH_CACHELINE_SIZE / sizeof(long)]; -}; +#include "internal.h" DISPATCH_ALWAYS_INLINE static inline void _dispatch_apply_invoke(void *ctxt) { - struct dispatch_apply_s *da = ctxt; + dispatch_apply_t da = ctxt; size_t const iter = da->da_iterations; typeof(da->da_func) const func = da->da_func; void *const da_ctxt = da->da_ctxt; - size_t idx; + size_t idx, done = 0; _dispatch_workitem_dec(); // this unit executes many items @@ -55,13 +38,21 @@ _dispatch_apply_invoke(void *ctxt) while (fastpath((idx = dispatch_atomic_inc2o(da, da_index) - 1) < iter)) { _dispatch_client_callout2(da_ctxt, idx, func); _dispatch_workitem_inc(); + done++; } _dispatch_thread_setspecific(dispatch_apply_key, NULL); dispatch_atomic_release_barrier(); - if (dispatch_atomic_dec2o(da, da_thr_cnt) == 0) { + + // The thread that finished the last workitem wakes up the (possibly waiting) + // thread that called dispatch_apply. They could be one and the same. + if (done && (dispatch_atomic_add2o(da, da_done, done) == iter)) { _dispatch_thread_semaphore_signal(da->da_sema); } + + if (dispatch_atomic_dec2o(da, da_thr_cnt) == 0) { + _dispatch_continuation_free((dispatch_continuation_t)da); + } } DISPATCH_NOINLINE @@ -74,7 +65,7 @@ _dispatch_apply2(void *ctxt) static void _dispatch_apply3(void *ctxt) { - struct dispatch_apply_s *da = ctxt; + dispatch_apply_t da = ctxt; dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key); _dispatch_thread_setspecific(dispatch_queue_key, da->da_queue); @@ -85,7 +76,7 @@ _dispatch_apply3(void *ctxt) static void _dispatch_apply_serial(void *ctxt) { - struct dispatch_apply_s *da = ctxt; + dispatch_apply_t da = ctxt; size_t idx = 0; _dispatch_workitem_dec(); // this unit executes many items @@ -93,44 +84,57 @@ _dispatch_apply_serial(void *ctxt) _dispatch_client_callout2(da->da_ctxt, idx, da->da_func); _dispatch_workitem_inc(); } while (++idx < da->da_iterations); + + _dispatch_continuation_free((dispatch_continuation_t)da); } -// 256 threads should be good enough for the short to mid term -#define DISPATCH_APPLY_MAX_CPUS 256 +// 64 threads should be good enough for the short to mid term +#define DISPATCH_APPLY_MAX_CPUS 64 DISPATCH_ALWAYS_INLINE static inline void -_dispatch_apply_f2(dispatch_queue_t dq, struct dispatch_apply_s *da, +_dispatch_apply_f2(dispatch_queue_t dq, dispatch_apply_t da, dispatch_function_t func) { - struct dispatch_apply_dc_s { - DISPATCH_CONTINUATION_HEADER(dispatch_apply_dc_s); - } da_dc[DISPATCH_APPLY_MAX_CPUS]; - size_t i; - - for (i = 0; i < da->da_thr_cnt - 1; i++) { - da_dc[i].do_vtable = NULL; - da_dc[i].do_next = &da_dc[i + 1]; - da_dc[i].dc_func = func; - da_dc[i].dc_ctxt = da; + uint32_t i = 0; + dispatch_continuation_t head = NULL, tail = NULL; + + // The current thread does not need a continuation + uint32_t continuation_cnt = da->da_thr_cnt - 1; + + dispatch_assert(continuation_cnt); + + for (i = 0; i < continuation_cnt; i++) { + dispatch_continuation_t next = _dispatch_continuation_alloc(); + next->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT; + next->dc_func = func; + next->dc_ctxt = da; + + next->do_next = head; + head = next; + + if (!tail) { + tail = next; + } } - da->da_sema = _dispatch_get_thread_semaphore(); + _dispatch_thread_semaphore_t sema = _dispatch_get_thread_semaphore(); + da->da_sema = sema; - _dispatch_queue_push_list(dq, (void *)&da_dc[0], - (void *)&da_dc[da->da_thr_cnt - 2]); + _dispatch_queue_push_list(dq, head, tail, continuation_cnt); // Call the first element directly _dispatch_apply2(da); _dispatch_workitem_inc(); - _dispatch_thread_semaphore_wait(da->da_sema); - _dispatch_put_thread_semaphore(da->da_sema); + _dispatch_thread_semaphore_wait(sema); + _dispatch_put_thread_semaphore(sema); + } static void _dispatch_apply_redirect(void *ctxt) { - struct dispatch_apply_s *da = ctxt; + dispatch_apply_t da = ctxt; uint32_t da_width = 2 * (da->da_thr_cnt - 1); dispatch_queue_t dq = da->da_queue, rq = dq, tq; @@ -165,40 +169,42 @@ void dispatch_apply_f(size_t iterations, dispatch_queue_t dq, void *ctxt, void (*func)(void *, size_t)) { - struct dispatch_apply_s da; - - da.da_func = func; - da.da_ctxt = ctxt; - da.da_iterations = iterations; - da.da_index = 0; - da.da_thr_cnt = _dispatch_hw_config.cc_max_active; - da.da_queue = NULL; - - if (da.da_thr_cnt > DISPATCH_APPLY_MAX_CPUS) { - da.da_thr_cnt = DISPATCH_APPLY_MAX_CPUS; - } if (slowpath(iterations == 0)) { return; } - if (iterations < da.da_thr_cnt) { - da.da_thr_cnt = (uint32_t)iterations; + + dispatch_apply_t da = (typeof(da))_dispatch_continuation_alloc(); + + da->da_func = func; + da->da_ctxt = ctxt; + da->da_iterations = iterations; + da->da_index = 0; + da->da_thr_cnt = _dispatch_hw_config.cc_max_active; + da->da_done = 0; + da->da_queue = NULL; + + if (da->da_thr_cnt > DISPATCH_APPLY_MAX_CPUS) { + da->da_thr_cnt = DISPATCH_APPLY_MAX_CPUS; + } + if (iterations < da->da_thr_cnt) { + da->da_thr_cnt = (uint32_t)iterations; } - if (slowpath(dq->dq_width <= 2) || slowpath(da.da_thr_cnt <= 1) || + if (slowpath(dq->dq_width <= 2) || slowpath(da->da_thr_cnt <= 1) || slowpath(_dispatch_thread_getspecific(dispatch_apply_key))) { - return dispatch_sync_f(dq, &da, _dispatch_apply_serial); + return dispatch_sync_f(dq, da, _dispatch_apply_serial); } dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key); if (slowpath(dq->do_targetq)) { if (slowpath(dq == old_dq)) { - return dispatch_sync_f(dq, &da, _dispatch_apply_serial); + return dispatch_sync_f(dq, da, _dispatch_apply_serial); } else { - da.da_queue = dq; - return dispatch_sync_f(dq, &da, _dispatch_apply_redirect); + da->da_queue = dq; + return dispatch_sync_f(dq, da, _dispatch_apply_redirect); } } dispatch_atomic_acquire_barrier(); _dispatch_thread_setspecific(dispatch_queue_key, dq); - _dispatch_apply_f2(dq, &da, _dispatch_apply2); + _dispatch_apply_f2(dq, da, _dispatch_apply2); _dispatch_thread_setspecific(dispatch_queue_key, old_dq); } diff --git a/src/benchmark.c b/src/benchmark.c index 246affa2f..f340b4431 100644 --- a/src/benchmark.c +++ b/src/benchmark.c @@ -20,7 +20,6 @@ #include "internal.h" - struct __dispatch_benchmark_data_s { #if HAVE_MACH_ABSOLUTE_TIME mach_timebase_info_data_t tbi; diff --git a/src/data.c b/src/data.c index e12565617..804896460 100644 --- a/src/data.c +++ b/src/data.c @@ -34,41 +34,66 @@ #define _dispatch_data_retain(x) dispatch_retain(x) #define _dispatch_data_release(x) dispatch_release(x) -static void _dispatch_data_dispose(dispatch_data_t data); -static size_t _dispatch_data_debug(dispatch_data_t data, char* buf, - size_t bufsiz); - #if DISPATCH_DATA_MOVABLE +#if DISPATCH_USE_RESOLVERS && !defined(DISPATCH_RESOLVED_VARIANT) +#error Resolved variant required for movable +#endif static const dispatch_block_t _dispatch_data_destructor_unlock = ^{ DISPATCH_CRASH("unlock destructor called"); }; #define DISPATCH_DATA_DESTRUCTOR_UNLOCK (_dispatch_data_destructor_unlock) #endif -const struct dispatch_data_vtable_s _dispatch_data_vtable = { - .do_type = DISPATCH_DATA_TYPE, - .do_kind = "data", - .do_dispose = _dispatch_data_dispose, - .do_invoke = NULL, - .do_probe = (void *)dummy_function_r0, - .do_debug = _dispatch_data_debug, +const dispatch_block_t _dispatch_data_destructor_free = ^{ + DISPATCH_CRASH("free destructor called"); +}; + +const dispatch_block_t _dispatch_data_destructor_none = ^{ + DISPATCH_CRASH("none destructor called"); +}; + +const dispatch_block_t _dispatch_data_destructor_vm_deallocate = ^{ + DISPATCH_CRASH("vmdeallocate destructor called"); +}; + +struct dispatch_data_s _dispatch_data_empty = { + .do_vtable = DISPATCH_VTABLE(data), + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_next = DISPATCH_OBJECT_LISTLESS, }; static dispatch_data_t _dispatch_data_init(size_t n) { - dispatch_data_t data = calloc(1ul, sizeof(struct dispatch_data_s) + - n * sizeof(range_record)); + dispatch_data_t data = _dispatch_alloc(DISPATCH_VTABLE(data), + sizeof(struct dispatch_data_s) + n * sizeof(range_record)); data->num_records = n; - data->do_vtable = &_dispatch_data_vtable; - data->do_xref_cnt = 1; - data->do_ref_cnt = 1; data->do_targetq = dispatch_get_global_queue( DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); data->do_next = DISPATCH_OBJECT_LISTLESS; return data; } +static void +_dispatch_data_destroy_buffer(const void* buffer, size_t size, + dispatch_queue_t queue, dispatch_block_t destructor) +{ + if (destructor == DISPATCH_DATA_DESTRUCTOR_FREE) { + free((void*)buffer); + } else if (destructor == DISPATCH_DATA_DESTRUCTOR_NONE) { + // do nothing + } else if (destructor == DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE) { + vm_deallocate(mach_task_self(), (vm_address_t)buffer, size); + } else { + if (!queue) { + queue = dispatch_get_global_queue( + DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); + } + dispatch_async_f(queue, destructor, _dispatch_call_block_and_release); + } +} + dispatch_data_t dispatch_data_create(const void* buffer, size_t size, dispatch_queue_t queue, dispatch_block_t destructor) @@ -78,11 +103,9 @@ dispatch_data_create(const void* buffer, size_t size, dispatch_queue_t queue, // Empty data requested so return the singleton empty object. Call // destructor immediately in this case to ensure any unused associated // storage is released. - if (destructor == DISPATCH_DATA_DESTRUCTOR_FREE) { - free((void*)buffer); - } else if (destructor != DISPATCH_DATA_DESTRUCTOR_DEFAULT) { - dispatch_async(queue ? queue : dispatch_get_global_queue( - DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), destructor); + if (destructor) { + _dispatch_data_destroy_buffer(buffer, size, queue, + _dispatch_Block_copy(destructor)); } return dispatch_data_empty; } @@ -92,7 +115,6 @@ dispatch_data_create(const void* buffer, size_t size, dispatch_queue_t queue, data->size = size; data->records[0].from = 0; data->records[0].length = size; - data->destructor = DISPATCH_DATA_DESTRUCTOR_FREE; if (destructor == DISPATCH_DATA_DESTRUCTOR_DEFAULT) { // The default destructor was provided, indicating the data should be // copied. @@ -102,10 +124,9 @@ dispatch_data_create(const void* buffer, size_t size, dispatch_queue_t queue, return NULL; } buffer = memcpy(data_buf, buffer, size); + data->destructor = DISPATCH_DATA_DESTRUCTOR_FREE; } else { - if (destructor != DISPATCH_DATA_DESTRUCTOR_FREE) { - data->destructor = Block_copy(destructor); - } + data->destructor = _dispatch_Block_copy(destructor); #if DISPATCH_DATA_MOVABLE // A non-default destructor was provided, indicating the system does not // own the buffer. Mark the object as locked since the application has @@ -121,11 +142,11 @@ dispatch_data_create(const void* buffer, size_t size, dispatch_queue_t queue, return data; } -static void +void _dispatch_data_dispose(dispatch_data_t dd) { dispatch_block_t destructor = dd->destructor; - if (destructor == DISPATCH_DATA_DESTRUCTOR_DEFAULT) { + if (destructor == NULL) { size_t i; for (i = 0; i < dd->num_records; ++i) { _dispatch_data_release(dd->records[i].data_object); @@ -136,16 +157,13 @@ _dispatch_data_dispose(dispatch_data_t dd) (void)dispatch_atomic_dec2o(data, locked); _dispatch_data_release(data); #endif - } else if (destructor == DISPATCH_DATA_DESTRUCTOR_FREE) { - free(dd->records[0].data_object); } else { - dispatch_async_f(dd->do_targetq, destructor, - _dispatch_call_block_and_release); + _dispatch_data_destroy_buffer(dd->records[0].data_object, + dd->records[0].length, dd->do_targetq, destructor); } - _dispatch_dispose(dd); } -static size_t +size_t _dispatch_data_debug(dispatch_data_t dd, char* buf, size_t bufsiz) { size_t offset = 0; diff --git a/src/data_internal.h b/src/data_internal.h index 314efa752..2dec5f001 100644 --- a/src/data_internal.h +++ b/src/data_internal.h @@ -32,20 +32,15 @@ #include // for HeaderDoc #endif -struct dispatch_data_vtable_s { - DISPATCH_VTABLE_HEADER(dispatch_data_s); -}; - -extern const struct dispatch_data_vtable_s _dispatch_data_vtable; - typedef struct range_record_s { void* data_object; size_t from; size_t length; } range_record; +DISPATCH_CLASS_DECL(data); struct dispatch_data_s { - DISPATCH_STRUCT_HEADER(dispatch_data_s, dispatch_data_vtable_s); + DISPATCH_STRUCT_HEADER(data); #if DISPATCH_DATA_MOVABLE unsigned int locked; #endif @@ -55,4 +50,17 @@ struct dispatch_data_s { range_record records[]; }; +typedef dispatch_data_t (*dispatch_transform_t)(dispatch_data_t data); + +struct dispatch_data_format_type_s { + uint64_t type; + uint64_t input_mask; + uint64_t output_mask; + dispatch_transform_t decode; + dispatch_transform_t encode; +}; + +void _dispatch_data_dispose(dispatch_data_t data); +size_t _dispatch_data_debug(dispatch_data_t data, char* buf, size_t bufsiz); + #endif // __DISPATCH_DATA_INTERNAL__ diff --git a/src/init.c b/src/init.c index d72219c92..8f1456edd 100644 --- a/src/init.c +++ b/src/init.c @@ -70,19 +70,34 @@ dummy_function_r0(void) #pragma mark dispatch_globals #if DISPATCH_COCOA_COMPAT -// dispatch_begin_thread_4GC having non-default value triggers GC-only slow -// paths and is checked frequently, testing against NULL is faster than -// comparing for equality with "dummy_function" -void (*dispatch_begin_thread_4GC)(void) = NULL; -void (*dispatch_end_thread_4GC)(void) = dummy_function; -void (*dispatch_no_worker_threads_4GC)(void) = NULL; -void *(*_dispatch_begin_NSAutoReleasePool)(void) = (void *)dummy_function; -void (*_dispatch_end_NSAutoReleasePool)(void *) = (void *)dummy_function; +void (*dispatch_begin_thread_4GC)(void); +void (*dispatch_end_thread_4GC)(void); +void (*dispatch_no_worker_threads_4GC)(void); +void *(*_dispatch_begin_NSAutoReleasePool)(void); +void (*_dispatch_end_NSAutoReleasePool)(void *); #endif +#if !DISPATCH_USE_DIRECT_TSD +pthread_key_t dispatch_queue_key; +pthread_key_t dispatch_sema4_key; +pthread_key_t dispatch_cache_key; +pthread_key_t dispatch_io_key; +pthread_key_t dispatch_apply_key; +#if DISPATCH_PERF_MON +pthread_key_t dispatch_bcounter_key; +#endif +#endif // !DISPATCH_USE_DIRECT_TSD + struct _dispatch_hw_config_s _dispatch_hw_config; bool _dispatch_safe_fork = true; +DISPATCH_NOINLINE +bool +_dispatch_is_multithreaded(void) +{ + return !_dispatch_safe_fork; +} + const struct dispatch_queue_offsets_s dispatch_queue_offsets = { .dqo_version = 3, .dqo_label = offsetof(struct dispatch_queue_s, dq_label), @@ -101,8 +116,8 @@ const struct dispatch_queue_offsets_s dispatch_queue_offsets = { // renaming this symbol DISPATCH_CACHELINE_ALIGN struct dispatch_queue_s _dispatch_main_q = { + .do_vtable = DISPATCH_VTABLE(queue), #if !DISPATCH_USE_RESOLVERS - .do_vtable = &_dispatch_queue_vtable, .do_targetq = &_dispatch_root_queues[ DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY], #endif @@ -115,38 +130,137 @@ struct dispatch_queue_s _dispatch_main_q = { .dq_serialnum = 1, }; -const struct dispatch_queue_attr_vtable_s dispatch_queue_attr_vtable = { - .do_type = DISPATCH_QUEUE_ATTR_TYPE, - .do_kind = "queue-attr", -}; - struct dispatch_queue_attr_s _dispatch_queue_attr_concurrent = { - .do_vtable = &dispatch_queue_attr_vtable, + .do_vtable = DISPATCH_VTABLE(queue_attr), .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_next = DISPATCH_OBJECT_LISTLESS, }; -struct dispatch_data_s _dispatch_data_empty = { -#if !DISPATCH_USE_RESOLVERS - .do_vtable = &_dispatch_data_vtable, -#endif - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_next = DISPATCH_OBJECT_LISTLESS, -}; +#pragma mark - +#pragma mark dispatch_vtables + +DISPATCH_VTABLE_INSTANCE(semaphore, + .do_type = DISPATCH_SEMAPHORE_TYPE, + .do_kind = "semaphore", + .do_dispose = _dispatch_semaphore_dispose, + .do_debug = _dispatch_semaphore_debug, +); + +DISPATCH_VTABLE_INSTANCE(group, + .do_type = DISPATCH_GROUP_TYPE, + .do_kind = "group", + .do_dispose = _dispatch_semaphore_dispose, + .do_debug = _dispatch_semaphore_debug, +); + +DISPATCH_VTABLE_INSTANCE(queue, + .do_type = DISPATCH_QUEUE_TYPE, + .do_kind = "queue", + .do_dispose = _dispatch_queue_dispose, + .do_invoke = NULL, + .do_probe = (void *)dummy_function_r0, + .do_debug = dispatch_queue_debug, +); + +DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_root, queue, + .do_type = DISPATCH_QUEUE_GLOBAL_TYPE, + .do_kind = "global-queue", + .do_debug = dispatch_queue_debug, + .do_probe = _dispatch_queue_probe_root, +); + +DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_mgr, queue, + .do_type = DISPATCH_QUEUE_MGR_TYPE, + .do_kind = "mgr-queue", + .do_invoke = _dispatch_mgr_thread, + .do_debug = dispatch_queue_debug, + .do_probe = _dispatch_mgr_wakeup, +); + +DISPATCH_VTABLE_INSTANCE(queue_specific_queue, + .do_type = DISPATCH_QUEUE_SPECIFIC_TYPE, + .do_kind = "queue-context", + .do_dispose = _dispatch_queue_specific_queue_dispose, + .do_invoke = NULL, + .do_probe = (void *)dummy_function_r0, + .do_debug = (void *)dispatch_queue_debug, +); + +DISPATCH_VTABLE_INSTANCE(queue_attr, + .do_type = DISPATCH_QUEUE_ATTR_TYPE, + .do_kind = "queue-attr", +); + +DISPATCH_VTABLE_INSTANCE(source, + .do_type = DISPATCH_SOURCE_KEVENT_TYPE, + .do_kind = "kevent-source", + .do_invoke = _dispatch_source_invoke, + .do_dispose = _dispatch_source_dispose, + .do_probe = _dispatch_source_probe, + .do_debug = _dispatch_source_debug, +); + +DISPATCH_VTABLE_INSTANCE(data, + .do_type = DISPATCH_DATA_TYPE, + .do_kind = "data", + .do_dispose = _dispatch_data_dispose, + .do_invoke = NULL, + .do_probe = (void *)dummy_function_r0, + .do_debug = _dispatch_data_debug, +); + +DISPATCH_VTABLE_INSTANCE(io, + .do_type = DISPATCH_IO_TYPE, + .do_kind = "channel", + .do_dispose = _dispatch_io_dispose, + .do_invoke = NULL, + .do_probe = (void *)dummy_function_r0, + .do_debug = (void *)dummy_function_r0, +); + +DISPATCH_VTABLE_INSTANCE(operation, + .do_type = DISPATCH_OPERATION_TYPE, + .do_kind = "operation", + .do_dispose = _dispatch_operation_dispose, + .do_invoke = NULL, + .do_probe = (void *)dummy_function_r0, + .do_debug = (void *)dummy_function_r0, +); + +DISPATCH_VTABLE_INSTANCE(disk, + .do_type = DISPATCH_DISK_TYPE, + .do_kind = "disk", + .do_dispose = _dispatch_disk_dispose, + .do_invoke = NULL, + .do_probe = (void *)dummy_function_r0, + .do_debug = (void *)dummy_function_r0, +); -const dispatch_block_t _dispatch_data_destructor_free = ^{ - DISPATCH_CRASH("free destructor called"); -}; +void +_dispatch_vtable_init(void) +{ +#if USE_OBJC + // ObjC classes and dispatch vtables are co-located via linker order and + // alias files, verify correct layout during initialization rdar://10640168 + #define DISPATCH_OBJC_CLASS(name) \ + DISPATCH_CONCAT(OBJC_CLASS_$_,DISPATCH_CLASS(name)) + extern void *DISPATCH_OBJC_CLASS(semaphore); + dispatch_assert((char*)DISPATCH_VTABLE(semaphore) - + (char*)&DISPATCH_OBJC_CLASS(semaphore) == 0); + dispatch_assert((char*)&DISPATCH_CONCAT(_,DISPATCH_CLASS(semaphore_vtable)) + - (char*)&DISPATCH_OBJC_CLASS(semaphore) == + sizeof(_os_object_class_s)); +#endif +} #pragma mark - -#pragma mark dispatch_log +#pragma mark dispatch_bug static char _dispatch_build[16]; static void -_dispatch_bug_init(void *context DISPATCH_UNUSED) +_dispatch_build_init(void *context DISPATCH_UNUSED) { #ifdef __APPLE__ int mib[] = { CTL_KERN, KERN_OSVERSION }; @@ -161,31 +275,36 @@ _dispatch_bug_init(void *context DISPATCH_UNUSED) #endif } +#define _dispatch_bug_log(msg, ...) do { \ + static void *last_seen; \ + void *ra = __builtin_return_address(0); \ + if (last_seen != ra) { \ + last_seen = ra; \ + _dispatch_log((msg), ##__VA_ARGS__); \ + } \ +} while(0) + void _dispatch_bug(size_t line, long val) { static dispatch_once_t pred; - static void *last_seen; - void *ra = __builtin_return_address(0); - - dispatch_once_f(&pred, NULL, _dispatch_bug_init); - if (last_seen != ra) { - last_seen = ra; - _dispatch_log("BUG in libdispatch: %s - %lu - 0x%lx", - _dispatch_build, (unsigned long)line, val); - } + + dispatch_once_f(&pred, NULL, _dispatch_build_init); + _dispatch_bug_log("BUG in libdispatch: %s - %lu - 0x%lx", + _dispatch_build, (unsigned long)line, val); +} + +void +_dispatch_bug_client(const char* msg) +{ + _dispatch_bug_log("BUG in libdispatch client: %s", msg); } void _dispatch_bug_mach_client(const char* msg, mach_msg_return_t kr) { - static void *last_seen; - void *ra = __builtin_return_address(0); - if (last_seen != ra) { - last_seen = ra; - _dispatch_log("BUG in libdispatch client: %s %s - 0x%x", msg, - mach_error_string(kr), kr); - } + _dispatch_bug_log("BUG in libdispatch client: %s %s - 0x%x", msg, + mach_error_string(kr), kr); } void @@ -195,18 +314,12 @@ _dispatch_abort(size_t line, long val) abort(); } -void -_dispatch_log(const char *msg, ...) -{ - va_list ap; - - va_start(ap, msg); - _dispatch_logv(msg, ap); - va_end(ap); -} +#pragma mark - +#pragma mark dispatch_log static FILE *dispatch_logfile; static bool dispatch_log_disabled; +static dispatch_once_t _dispatch_logv_pred; static void _dispatch_logv_init(void *context DISPATCH_UNUSED) @@ -249,38 +362,48 @@ _dispatch_logv_init(void *context DISPATCH_UNUSED) } } -void -_dispatch_logv(const char *msg, va_list ap) +DISPATCH_NOINLINE +static void +_dispatch_logv_file(const char *msg, va_list ap) { - static dispatch_once_t pred; - dispatch_once_f(&pred, NULL, _dispatch_logv_init); + char *buf; + size_t len; + + len = vasprintf(&buf, msg, ap); + buf[len++] = '\n'; + fwrite(buf, 1, len, dispatch_logfile); + fflush(dispatch_logfile); + free(buf); +} +static inline void +_dispatch_logv(const char *msg, va_list ap) +{ + dispatch_once_f(&_dispatch_logv_pred, NULL, _dispatch_logv_init); if (slowpath(dispatch_log_disabled)) { return; } if (slowpath(dispatch_logfile)) { - vfprintf(dispatch_logfile, msg, ap); - // TODO: May cause interleaving with another thread's log - fputc('\n', dispatch_logfile); - fflush(dispatch_logfile); - return; + return _dispatch_logv_file(msg, ap); } vsyslog(LOG_NOTICE, msg, ap); } -#pragma mark - -#pragma mark dispatch_debug - +DISPATCH_NOINLINE void -dispatch_debug(dispatch_object_t dou, const char *msg, ...) +_dispatch_log(const char *msg, ...) { va_list ap; va_start(ap, msg); - dispatch_debugv(dou._do, msg, ap); + _dispatch_logv(msg, ap); va_end(ap); } +#pragma mark - +#pragma mark dispatch_debug + +DISPATCH_NOINLINE void dispatch_debugv(dispatch_object_t dou, const char *msg, va_list ap) { @@ -297,6 +420,17 @@ dispatch_debugv(dispatch_object_t dou, const char *msg, va_list ap) _dispatch_logv(buf, ap); } +DISPATCH_NOINLINE +void +dispatch_debug(dispatch_object_t dou, const char *msg, ...) +{ + va_list ap; + + va_start(ap, msg); + dispatch_debugv(dou._do, msg, ap); + va_end(ap); +} + #pragma mark - #pragma mark dispatch_block_t @@ -308,10 +442,13 @@ _dispatch_Block_copy(dispatch_block_t db) { dispatch_block_t rval; - while (!(rval = Block_copy(db))) { - sleep(1); + if (fastpath(db)) { + while (!fastpath(rval = Block_copy(db))) { + sleep(1); + } + return rval; } - return rval; + DISPATCH_CLIENT_CRASH("NULL was passed where a block should have been"); } void @@ -327,26 +464,116 @@ _dispatch_call_block_and_release(void *block) #pragma mark - #pragma mark dispatch_client_callout -#if DISPATCH_USE_CLIENT_CALLOUT +// Abort on uncaught exceptions thrown from client callouts rdar://8577499 +#if DISPATCH_USE_CLIENT_CALLOUT && (__arm__ || !USE_OBJC) +// On platforms with SjLj exceptions, avoid the SjLj overhead on every callout +// by clearing the unwinder's TSD pointer to the handler stack around callouts -#undef _dispatch_client_callout -#undef _dispatch_client_callout2 +#define _dispatch_get_tsd_base() +#define _dispatch_get_unwind_tsd() (NULL) +#define _dispatch_set_unwind_tsd(u) do {(void)(u);} while (0) +#define _dispatch_free_unwind_tsd() +#undef _dispatch_client_callout DISPATCH_NOINLINE void _dispatch_client_callout(void *ctxt, dispatch_function_t f) { - return f(ctxt); + _dispatch_get_tsd_base(); + void *u = _dispatch_get_unwind_tsd(); + _dispatch_set_unwind_tsd(NULL); + f(ctxt); + _dispatch_free_unwind_tsd(); + _dispatch_set_unwind_tsd(u); } +#undef _dispatch_client_callout2 DISPATCH_NOINLINE void _dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) { - return f(ctxt, i); + _dispatch_get_tsd_base(); + void *u = _dispatch_get_unwind_tsd(); + _dispatch_set_unwind_tsd(NULL); + f(ctxt, i); + _dispatch_free_unwind_tsd(); + _dispatch_set_unwind_tsd(u); } -#endif +#endif // DISPATCH_USE_CLIENT_CALLOUT + +#pragma mark - +#pragma mark _os_object_t no_objc + +#if !USE_OBJC + +static const _os_object_class_s _os_object_class; + +void +_os_object_init(void) +{ + return; +} + +_os_object_t +_os_object_alloc(const void *cls, size_t size) +{ + _os_object_t obj; + dispatch_assert(size >= sizeof(struct _os_object_s)); + if (!cls) cls = &_os_object_class; + while (!fastpath(obj = calloc(1u, size))) { + sleep(1); // Temporary resource shortage + } + obj->os_obj_isa = cls; + return obj; +} + +void +_os_object_dealloc(_os_object_t obj) +{ + *((void *volatile*)&obj->os_obj_isa) = (void *)0x200; + return free(obj); +} + +void +_os_object_xref_dispose(_os_object_t obj) +{ + if (fastpath(obj->os_obj_isa->_os_obj_xref_dispose)) { + return obj->os_obj_isa->_os_obj_xref_dispose(obj); + } + return _os_object_release_internal(obj); +} + +void +_os_object_dispose(_os_object_t obj) +{ + if (fastpath(obj->os_obj_isa->_os_obj_dispose)) { + return obj->os_obj_isa->_os_obj_dispose(obj); + } + return _os_object_dealloc(obj); +} + +#pragma mark - +#pragma mark dispatch_autorelease_pool no_objc + +#if DISPATCH_COCOA_COMPAT + +void *_dispatch_autorelease_pool_push(void) { + void *pool = NULL; + if (_dispatch_begin_NSAutoReleasePool) { + pool = _dispatch_begin_NSAutoReleasePool(); + } + return pool; +} + +void _dispatch_autorelease_pool_pop(void *pool) { + if (_dispatch_end_NSAutoReleasePool) { + _dispatch_end_NSAutoReleasePool(pool); + } +} + +#endif // DISPATCH_COCOA_COMPAT +#endif // !USE_OBJC #pragma mark - #pragma mark dispatch_source_types @@ -552,6 +779,16 @@ const struct dispatch_source_type_s _dispatch_source_type_mach_recv = { .init = dispatch_source_type_mach_recv_init, }; +const struct dispatch_source_type_s _dispatch_source_type_sock = { + .ke = { + .filter = EVFILT_SOCK, + .flags = EV_CLEAR, + }, + .mask = NOTE_CONNRESET | NOTE_READCLOSED | NOTE_WRITECLOSED | + NOTE_TIMEOUT | NOTE_NOSRCADDR | NOTE_IFDENIED | NOTE_SUSPEND | + NOTE_RESUME | NOTE_KEEPALIVE, +}; + #pragma mark - #pragma mark dispatch_mig @@ -618,5 +855,4 @@ _dispatch_mach_notify_send_once(mach_port_t notify DISPATCH_UNUSED) return KERN_SUCCESS; } - #endif // HAVE_MACH diff --git a/src/internal.h b/src/internal.h index 24d3a04ea..a90f93f8d 100644 --- a/src/internal.h +++ b/src/internal.h @@ -32,14 +32,38 @@ #define __DISPATCH_BUILDING_DISPATCH__ #define __DISPATCH_INDIRECT__ +#ifdef __APPLE__ +#include +#include +#endif + + +#if USE_OBJC && ((!TARGET_IPHONE_SIMULATOR && defined(__i386__)) || \ + (!TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED < 1080)) +// Disable Objective-C support on platforms with legacy objc runtime +#undef USE_OBJC +#define USE_OBJC 0 +#endif + +#if USE_OBJC +#define OS_OBJECT_HAVE_OBJC_SUPPORT 1 +#if __OBJC__ +#define OS_OBJECT_USE_OBJC 1 +#else +#define OS_OBJECT_USE_OBJC 0 +#endif // __OBJC__ +#else +#define OS_OBJECT_HAVE_OBJC_SUPPORT 0 +#endif // USE_OBJC #include #include +#include +#include #include #include -#include #include #include #include @@ -47,15 +71,21 @@ #include #include -/* private.h uses #include_next and must be included last to avoid picking - * up installed headers. */ +/* private.h must be included last to avoid picking up installed headers. */ +#include "object_private.h" #include "queue_private.h" #include "source_private.h" +#include "data_private.h" #include "benchmark.h" #include "private.h" /* More #includes at EOF (dependent on the contents of internal.h) ... */ +// Abort on uncaught exceptions thrown from client callouts rdar://8577499 +#if !defined(DISPATCH_USE_CLIENT_CALLOUT) +#define DISPATCH_USE_CLIENT_CALLOUT 1 +#endif + /* The "_debug" library build */ #ifndef DISPATCH_DEBUG #define DISPATCH_DEBUG 0 @@ -65,10 +95,6 @@ #define DISPATCH_PROFILE 0 #endif -#if DISPATCH_DEBUG && !defined(DISPATCH_USE_CLIENT_CALLOUT) -#define DISPATCH_USE_CLIENT_CALLOUT 1 -#endif - #if (DISPATCH_DEBUG || DISPATCH_PROFILE) && !defined(DISPATCH_USE_DTRACE) #define DISPATCH_USE_DTRACE 1 #endif @@ -154,6 +180,8 @@ #else #define DISPATCH_ALWAYS_INLINE_NDEBUG __attribute__((__always_inline__)) #endif +#define DISPATCH_CONCAT(x,y) DISPATCH_CONCAT1(x,y) +#define DISPATCH_CONCAT1(x,y) x ## y // workaround 6368156 #ifdef NSEC_PER_SEC @@ -176,13 +204,13 @@ DISPATCH_NOINLINE void _dispatch_bug(size_t line, long val); DISPATCH_NOINLINE +void _dispatch_bug_client(const char* msg); +DISPATCH_NOINLINE void _dispatch_bug_mach_client(const char *msg, mach_msg_return_t kr); DISPATCH_NOINLINE DISPATCH_NORETURN void _dispatch_abort(size_t line, long val); -DISPATCH_NOINLINE __attribute__((__format__(printf,1,2))) +DISPATCH_NOINLINE __attribute__((__format__(__printf__,1,2))) void _dispatch_log(const char *msg, ...); -DISPATCH_NOINLINE __attribute__((__format__(printf,1,0))) -void _dispatch_logv(const char *msg, va_list); /* * For reporting bugs within libdispatch when using the "_debug" version of the @@ -325,6 +353,7 @@ void _dispatch_call_block_and_release(void *block); void dummy_function(void); long dummy_function_r0(void); +void _dispatch_vtable_init(void); void _dispatch_source_drain_kevent(struct kevent *); @@ -333,8 +362,6 @@ void _dispatch_run_timers(void); // Returns howsoon with updated time value, or NULL if no timers active. struct timespec *_dispatch_get_next_timer_fire(struct timespec *howsoon); -bool _dispatch_source_testcancel(dispatch_source_t); - uint64_t _dispatch_timeout(dispatch_time_t when); extern bool _dispatch_safe_fork; @@ -347,21 +374,29 @@ extern struct _dispatch_hw_config_s { /* #includes dependent on internal.h */ #include "shims.h" -#include "object_internal.h" -#include "queue_internal.h" -#include "semaphore_internal.h" -#include "source_internal.h" -#include "data_internal.h" -#include "io_internal.h" -#include "trace.h" // SnowLeopard and iOS Simulator fallbacks #if HAVE_PTHREAD_WORKQUEUES -#if !defined(WORKQ_BG_PRIOQUEUE) || \ - (TARGET_IPHONE_SIMULATOR && __MAC_OS_X_VERSION_MIN_REQUIRED < 1070) -#undef WORKQ_BG_PRIOQUEUE -#define WORKQ_BG_PRIOQUEUE WORKQ_LOW_PRIOQUEUE +#ifndef WORKQ_BG_PRIOQUEUE +#define WORKQ_BG_PRIOQUEUE 3 +#endif +#ifndef WORKQ_ADDTHREADS_OPTION_OVERCOMMIT +#define WORKQ_ADDTHREADS_OPTION_OVERCOMMIT 0x00000001 +#endif +#if TARGET_IPHONE_SIMULATOR && __MAC_OS_X_VERSION_MIN_REQUIRED < 1070 +#ifndef DISPATCH_NO_BG_PRIORITY +#define DISPATCH_NO_BG_PRIORITY 1 +#endif +#endif +#if TARGET_IPHONE_SIMULATOR && __MAC_OS_X_VERSION_MIN_REQUIRED < 1080 +#ifndef DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK +#define DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK 1 +#endif +#endif +#if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED < 1080 +#undef HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP +#define HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP 0 #endif #endif // HAVE_PTHREAD_WORKQUEUES @@ -422,4 +457,18 @@ extern struct _dispatch_hw_config_s { _dispatch_hardware_crash(); \ } while (0) +#define _OS_OBJECT_CLIENT_CRASH(x) do { \ + _dispatch_set_crash_log_message("API MISUSE: " x); \ + _dispatch_hardware_crash(); \ + } while (0) + +/* #includes dependent on internal.h */ +#include "object_internal.h" +#include "semaphore_internal.h" +#include "queue_internal.h" +#include "source_internal.h" +#include "data_internal.h" +#include "io_internal.h" +#include "trace.h" + #endif /* __DISPATCH_INTERNAL__ */ diff --git a/src/io.c b/src/io.c index b306054be..4e3601518 100644 --- a/src/io.c +++ b/src/io.c @@ -25,12 +25,10 @@ typedef void (^dispatch_fd_entry_init_callback_t)(dispatch_fd_entry_t fd_entry); DISPATCH_EXPORT DISPATCH_NOTHROW void _dispatch_iocntl(uint32_t param, uint64_t value); -static void _dispatch_io_dispose(dispatch_io_t channel); static dispatch_operation_t _dispatch_operation_create( dispatch_op_direction_t direction, dispatch_io_t channel, off_t offset, size_t length, dispatch_data_t data, dispatch_queue_t queue, dispatch_io_handler_t handler); -static void _dispatch_operation_dispose(dispatch_operation_t operation); static void _dispatch_operation_enqueue(dispatch_operation_t op, dispatch_op_direction_t direction, dispatch_data_t data); static dispatch_source_t _dispatch_operation_timer(dispatch_queue_t tq, @@ -52,7 +50,6 @@ static void _dispatch_stream_init(dispatch_fd_entry_t fd_entry, static void _dispatch_stream_dispose(dispatch_fd_entry_t fd_entry, dispatch_op_direction_t direction); static void _dispatch_disk_init(dispatch_fd_entry_t fd_entry, dev_t dev); -static void _dispatch_disk_dispose(dispatch_disk_t disk); static void _dispatch_stream_enqueue_operation(dispatch_stream_t stream, dispatch_operation_t operation, dispatch_data_t data); static void _dispatch_disk_enqueue_operation(dispatch_disk_t dsk, @@ -98,35 +95,7 @@ enum { DISPATCH_OP_FD_ERR, }; -#pragma mark - -#pragma mark dispatch_io_vtable - -static const struct dispatch_io_vtable_s _dispatch_io_vtable = { - .do_type = DISPATCH_IO_TYPE, - .do_kind = "channel", - .do_dispose = _dispatch_io_dispose, - .do_invoke = NULL, - .do_probe = (void *)dummy_function_r0, - .do_debug = (void *)dummy_function_r0, -}; - -static const struct dispatch_operation_vtable_s _dispatch_operation_vtable = { - .do_type = DISPATCH_OPERATION_TYPE, - .do_kind = "operation", - .do_dispose = _dispatch_operation_dispose, - .do_invoke = NULL, - .do_probe = (void *)dummy_function_r0, - .do_debug = (void *)dummy_function_r0, -}; - -static const struct dispatch_disk_vtable_s _dispatch_disk_vtable = { - .do_type = DISPATCH_DISK_TYPE, - .do_kind = "disk", - .do_dispose = _dispatch_disk_dispose, - .do_invoke = NULL, - .do_probe = (void *)dummy_function_r0, - .do_debug = (void *)dummy_function_r0, -}; +#define _dispatch_io_Block_copy(x) ((typeof(x))_dispatch_Block_copy((dispatch_block_t)(x))) #pragma mark - #pragma mark dispatch_io_hashtables @@ -218,11 +187,9 @@ _dispatch_iocntl(uint32_t param, uint64_t value) static dispatch_io_t _dispatch_io_create(dispatch_io_type_t type) { - dispatch_io_t channel = calloc(1ul, sizeof(struct dispatch_io_s)); - channel->do_vtable = &_dispatch_io_vtable; + dispatch_io_t channel = _dispatch_alloc(DISPATCH_VTABLE(io), + sizeof(struct dispatch_io_s)); channel->do_next = DISPATCH_OBJECT_LISTLESS; - channel->do_ref_cnt = 1; - channel->do_xref_cnt = 1; channel->do_targetq = _dispatch_get_root_queue(0, true); channel->params.type = type; channel->params.high = SIZE_MAX; @@ -263,10 +230,10 @@ _dispatch_io_init(dispatch_io_t channel, dispatch_fd_entry_t fd_entry, } } -static void +void _dispatch_io_dispose(dispatch_io_t channel) { - if (channel->fd_entry) { + if (channel->fd_entry && !(channel->atomic_flags & (DIO_CLOSED|DIO_STOPPED))) { if (channel->fd_entry->path_data) { // This modification is safe since path_data->channel is checked // only on close_queue (which is still suspended at this point) @@ -285,7 +252,6 @@ _dispatch_io_dispose(dispatch_io_t channel) if (channel->barrier_group) { dispatch_release(channel->barrier_group); } - _dispatch_dispose(channel); } static int @@ -625,8 +591,10 @@ _dispatch_io_stop(dispatch_io_t channel) if (fd_entry) { _dispatch_io_debug("io stop cleanup", channel->fd); _dispatch_fd_entry_cleanup_operations(fd_entry, channel); - channel->fd_entry = NULL; - _dispatch_fd_entry_release(fd_entry); + if (!(channel->atomic_flags & DIO_CLOSED)) { + channel->fd_entry = NULL; + _dispatch_fd_entry_release(fd_entry); + } } else if (channel->fd != -1) { // Stop after close, need to check if fd_entry still exists _dispatch_retain(channel); @@ -667,9 +635,9 @@ dispatch_io_close(dispatch_io_t channel, unsigned long flags) dispatch_async(channel->queue, ^{ dispatch_async(channel->barrier_queue, ^{ _dispatch_io_debug("io close", channel->fd); - (void)dispatch_atomic_or2o(channel, atomic_flags, DIO_CLOSED); - dispatch_fd_entry_t fd_entry = channel->fd_entry; - if (fd_entry) { + if (!(channel->atomic_flags & (DIO_CLOSED|DIO_STOPPED))) { + (void)dispatch_atomic_or2o(channel, atomic_flags, DIO_CLOSED); + dispatch_fd_entry_t fd_entry = channel->fd_entry; if (!fd_entry->path_data) { channel->fd_entry = NULL; } @@ -906,12 +874,10 @@ _dispatch_operation_create(dispatch_op_direction_t direction, }); return NULL; } - dispatch_operation_t op; - op = calloc(1ul, sizeof(struct dispatch_operation_s)); - op->do_vtable = &_dispatch_operation_vtable; + dispatch_operation_t op = _dispatch_alloc(DISPATCH_VTABLE(operation), + sizeof(struct dispatch_operation_s)); op->do_next = DISPATCH_OBJECT_LISTLESS; - op->do_ref_cnt = 1; - op->do_xref_cnt = 0; // operation object is not exposed externally + op->do_xref_cnt = -1; // operation object is not exposed externally op->op_q = dispatch_queue_create("com.apple.libdispatch-io.opq", NULL); op->op_q->do_targetq = queue; _dispatch_retain(queue); @@ -919,7 +885,7 @@ _dispatch_operation_create(dispatch_op_direction_t direction, op->direction = direction; op->offset = offset + channel->f_ptr; op->length = length; - op->handler = Block_copy(handler); + op->handler = _dispatch_io_Block_copy(handler); _dispatch_retain(channel); op->channel = channel; op->params = channel->params; @@ -933,7 +899,7 @@ _dispatch_operation_create(dispatch_op_direction_t direction, return op; } -static void +void _dispatch_operation_dispose(dispatch_operation_t op) { // Deliver the data if there's any @@ -962,7 +928,6 @@ _dispatch_operation_dispose(dispatch_operation_t op) dispatch_release(op->op_q); } Block_release(op->handler); - _dispatch_dispose(op); } static void @@ -1434,12 +1399,11 @@ _dispatch_disk_init(dispatch_fd_entry_t fd_entry, dev_t dev) } // Otherwise create a new entry size_t pending_reqs_depth = dispatch_io_defaults.max_pending_io_reqs; - disk = calloc(1ul, sizeof(struct dispatch_disk_s) + (pending_reqs_depth * - sizeof(dispatch_operation_t))); - disk->do_vtable = &_dispatch_disk_vtable; + disk = _dispatch_alloc(DISPATCH_VTABLE(disk), + sizeof(struct dispatch_disk_s) + + (pending_reqs_depth * sizeof(dispatch_operation_t))); disk->do_next = DISPATCH_OBJECT_LISTLESS; - disk->do_ref_cnt = 1; - disk->do_xref_cnt = 0; + disk->do_xref_cnt = -1; disk->advise_list_depth = pending_reqs_depth; disk->do_targetq = _dispatch_get_root_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, false); @@ -1454,7 +1418,7 @@ _dispatch_disk_init(dispatch_fd_entry_t fd_entry, dev_t dev) TAILQ_INIT(&fd_entry->stream_ops); } -static void +void _dispatch_disk_dispose(dispatch_disk_t disk) { uintptr_t hash = DIO_HASH(disk->dev); @@ -1465,7 +1429,6 @@ _dispatch_disk_dispose(dispatch_disk_t disk) dispatch_assert(!disk->advise_list[i]); } dispatch_release(disk->pick_queue); - free(disk); } #pragma mark - @@ -1857,13 +1820,13 @@ _dispatch_disk_perform(void *ctxt) dispatch_async(disk->pick_queue, ^{ switch (result) { case DISPATCH_OP_DELIVER: - _dispatch_operation_deliver_data(op, DOP_DELIVER); + _dispatch_operation_deliver_data(op, DOP_DEFAULT); break; case DISPATCH_OP_COMPLETE: _dispatch_disk_complete_operation(disk, op); break; case DISPATCH_OP_DELIVER_AND_COMPLETE: - _dispatch_operation_deliver_data(op, DOP_DELIVER); + _dispatch_operation_deliver_data(op, DOP_DELIVER | DOP_NO_EMPTY); _dispatch_disk_complete_operation(disk, op); break; case DISPATCH_OP_ERR: @@ -2094,7 +2057,7 @@ _dispatch_operation_deliver_data(dispatch_operation_t op, data = dispatch_data_create_subrange(op->data, op->buf_len, op->length); } - if (op->buf_len == op->buf_siz) { + if (op->buf_data && op->buf_len == op->buf_siz) { _dispatch_io_data_release(op->buf_data); op->buf_data = NULL; op->buf = NULL; @@ -2105,7 +2068,7 @@ _dispatch_operation_deliver_data(dispatch_operation_t op, _dispatch_io_data_retain(data); d = data; } else { - d = dispatch_data_create_subrange(op->data, op->buf_len, + d = dispatch_data_create_subrange(op->data, op->buf_siz, op->length); } _dispatch_io_data_release(op->data); @@ -2123,7 +2086,6 @@ _dispatch_operation_deliver_data(dispatch_operation_t op, op->undelivered = 0; _dispatch_io_debug("deliver data", op->fd_entry->fd); dispatch_op_direction_t direction = op->direction; - __block dispatch_data_t d = data; dispatch_io_handler_t handler = op->handler; #if DISPATCH_IO_DEBUG int fd = op->fd_entry->fd; @@ -2135,6 +2097,7 @@ _dispatch_operation_deliver_data(dispatch_operation_t op, // Note that data delivery may occur after the operation is freed dispatch_async(op->op_q, ^{ bool done = (flags & DOP_DONE); + dispatch_data_t d = data; if (done) { if (direction == DOP_DIR_READ && err) { if (dispatch_data_get_size(d)) { diff --git a/src/io_internal.h b/src/io_internal.h index c43bd75b3..dbbb6bf6c 100644 --- a/src/io_internal.h +++ b/src/io_internal.h @@ -76,8 +76,8 @@ typedef unsigned int dispatch_op_flags_t; #define _dispatch_io_debug(msg, fd, args...) #endif -DISPATCH_DECL(dispatch_operation); -DISPATCH_DECL(dispatch_disk); +DISPATCH_DECL_INTERNAL(dispatch_operation); +DISPATCH_DECL_INTERNAL(dispatch_disk); struct dispatch_stream_s { dispatch_queue_t dq; @@ -104,12 +104,9 @@ struct dispatch_stat_s { mode_t mode; }; -struct dispatch_disk_vtable_s { - DISPATCH_VTABLE_HEADER(dispatch_disk_s); -}; - +DISPATCH_CLASS_DECL(disk); struct dispatch_disk_s { - DISPATCH_STRUCT_HEADER(dispatch_disk_s, dispatch_disk_vtable_s); + DISPATCH_STRUCT_HEADER(disk); dev_t dev; TAILQ_HEAD(dispatch_disk_operations_s, dispatch_operation_s) operations; dispatch_operation_t cur_rq; @@ -149,12 +146,9 @@ typedef struct dispatch_io_param_s { unsigned long interval_flags; } dispatch_io_param_s; -struct dispatch_operation_vtable_s { - DISPATCH_VTABLE_HEADER(dispatch_operation_s); -}; - +DISPATCH_CLASS_DECL(operation); struct dispatch_operation_s { - DISPATCH_STRUCT_HEADER(dispatch_operation_s, dispatch_operation_vtable_s); + DISPATCH_STRUCT_HEADER(operation); dispatch_queue_t op_q; dispatch_op_direction_t direction; // READ OR WRITE dispatch_io_param_s params; @@ -177,12 +171,9 @@ struct dispatch_operation_s { TAILQ_ENTRY(dispatch_operation_s) stream_list; }; -struct dispatch_io_vtable_s { - DISPATCH_VTABLE_HEADER(dispatch_io_s); -}; - +DISPATCH_CLASS_DECL(io); struct dispatch_io_s { - DISPATCH_STRUCT_HEADER(dispatch_io_s, dispatch_io_vtable_s); + DISPATCH_STRUCT_HEADER(io); dispatch_queue_t queue, barrier_queue; dispatch_group_t barrier_group; dispatch_io_param_s params; @@ -194,5 +185,8 @@ struct dispatch_io_s { }; void _dispatch_io_set_target_queue(dispatch_io_t channel, dispatch_queue_t dq); +void _dispatch_io_dispose(dispatch_io_t channel); +void _dispatch_operation_dispose(dispatch_operation_t operation); +void _dispatch_disk_dispose(dispatch_disk_t disk); #endif // __DISPATCH_IO_INTERNAL__ diff --git a/src/object.c b/src/object.c index b84979b90..7b94c757c 100644 --- a/src/object.c +++ b/src/object.c @@ -20,62 +20,164 @@ #include "internal.h" -void -dispatch_retain(dispatch_object_t dou) +#pragma mark - +#pragma mark _os_object_t + +unsigned long +_os_object_retain_count(_os_object_t obj) { - if (slowpath(dou._do->do_xref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) { - return; // global object + int xref_cnt = obj->os_obj_xref_cnt; + if (slowpath(xref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) { + return ULONG_MAX; // global object } - if (slowpath((dispatch_atomic_inc2o(dou._do, do_xref_cnt) - 1) == 0)) { - DISPATCH_CLIENT_CRASH("Resurrection of an object"); + return xref_cnt + 1; +} + +_os_object_t +_os_object_retain_internal(_os_object_t obj) +{ + int ref_cnt = obj->os_obj_ref_cnt; + if (slowpath(ref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) { + return obj; // global object } + ref_cnt = dispatch_atomic_inc2o(obj, os_obj_ref_cnt); + if (slowpath(ref_cnt <= 0)) { + DISPATCH_CRASH("Resurrection of an object"); + } + return obj; } void -_dispatch_retain(dispatch_object_t dou) +_os_object_release_internal(_os_object_t obj) { - if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) { + int ref_cnt = obj->os_obj_ref_cnt; + if (slowpath(ref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) { return; // global object } - if (slowpath((dispatch_atomic_inc2o(dou._do, do_ref_cnt) - 1) == 0)) { - DISPATCH_CLIENT_CRASH("Resurrection of an object"); + ref_cnt = dispatch_atomic_dec2o(obj, os_obj_ref_cnt); + if (fastpath(ref_cnt >= 0)) { + return; + } + if (slowpath(ref_cnt < -1)) { + DISPATCH_CRASH("Over-release of an object"); + } +#if DISPATCH_DEBUG + if (slowpath(obj->os_obj_xref_cnt >= 0)) { + DISPATCH_CRASH("Release while external references exist"); + } +#endif + return _os_object_dispose(obj); +} + +_os_object_t +_os_object_retain(_os_object_t obj) +{ + int xref_cnt = obj->os_obj_xref_cnt; + if (slowpath(xref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) { + return obj; // global object + } + xref_cnt = dispatch_atomic_inc2o(obj, os_obj_xref_cnt); + if (slowpath(xref_cnt <= 0)) { + _OS_OBJECT_CLIENT_CRASH("Resurrection of an object"); } + return obj; } void -dispatch_release(dispatch_object_t dou) +_os_object_release(_os_object_t obj) { - if (slowpath(dou._do->do_xref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) { + int xref_cnt = obj->os_obj_xref_cnt; + if (slowpath(xref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) { + return; // global object + } + xref_cnt = dispatch_atomic_dec2o(obj, os_obj_xref_cnt); + if (fastpath(xref_cnt >= 0)) { return; } + if (slowpath(xref_cnt < -1)) { + _OS_OBJECT_CLIENT_CRASH("Over-release of an object"); + } + return _os_object_xref_dispose(obj); +} - unsigned int xref_cnt = dispatch_atomic_dec2o(dou._do, do_xref_cnt) + 1; - if (fastpath(xref_cnt > 1)) { - return; +bool +_os_object_retain_weak(_os_object_t obj) +{ + int xref_cnt = obj->os_obj_xref_cnt; + if (slowpath(xref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) { + return true; // global object } - if (fastpath(xref_cnt == 1)) { - if (dou._do->do_vtable == (void*)&_dispatch_source_kevent_vtable) { - return _dispatch_source_xref_release(dou._ds); - } - if (slowpath(DISPATCH_OBJECT_SUSPENDED(dou._do))) { - // Arguments for and against this assert are within 6705399 - DISPATCH_CLIENT_CRASH("Release of a suspended object"); - } - return _dispatch_release(dou._do); +retry: + if (slowpath(xref_cnt == -1)) { + return false; + } + if (slowpath(xref_cnt < -1)) { + goto overrelease; } - DISPATCH_CLIENT_CRASH("Over-release of an object"); + if (slowpath(!dispatch_atomic_cmpxchg2o(obj, os_obj_xref_cnt, xref_cnt, + xref_cnt + 1))) { + xref_cnt = obj->os_obj_xref_cnt; + goto retry; + } + return true; +overrelease: + _OS_OBJECT_CLIENT_CRASH("Over-release of an object"); +} + +bool +_os_object_allows_weak_reference(_os_object_t obj) +{ + int xref_cnt = obj->os_obj_xref_cnt; + if (slowpath(xref_cnt == -1)) { + return false; + } + if (slowpath(xref_cnt < -1)) { + _OS_OBJECT_CLIENT_CRASH("Over-release of an object"); + } + return true; +} + +#pragma mark - +#pragma mark dispatch_object_t + +void * +_dispatch_alloc(const void *vtable, size_t size) +{ + return _os_object_alloc(vtable, size); } void -_dispatch_dispose(dispatch_object_t dou) +dispatch_retain(dispatch_object_t dou) +{ + (void)_os_object_retain(dou._os_obj); +} + +void +_dispatch_retain(dispatch_object_t dou) +{ + (void)_os_object_retain_internal(dou._os_obj); +} + +void +dispatch_release(dispatch_object_t dou) +{ + _os_object_release(dou._os_obj); +} + +void +_dispatch_release(dispatch_object_t dou) +{ + _os_object_release_internal(dou._os_obj); +} + +static void +_dispatch_dealloc(dispatch_object_t dou) { dispatch_queue_t tq = dou._do->do_targetq; dispatch_function_t func = dou._do->do_finalizer; void *ctxt = dou._do->do_ctxt; - dou._do->do_vtable = (void *)0x200; - - free(dou._do); + _os_object_dealloc(dou._os_obj); if (func && ctxt) { dispatch_async_f(tq, ctxt, func); @@ -84,26 +186,28 @@ _dispatch_dispose(dispatch_object_t dou) } void -_dispatch_release(dispatch_object_t dou) +_dispatch_xref_dispose(dispatch_object_t dou) { - if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) { - return; // global object + if (slowpath(DISPATCH_OBJECT_SUSPENDED(dou._do))) { + // Arguments for and against this assert are within 6705399 + DISPATCH_CLIENT_CRASH("Release of a suspended object"); } - - unsigned int ref_cnt = dispatch_atomic_dec2o(dou._do, do_ref_cnt) + 1; - if (fastpath(ref_cnt > 1)) { - return; +#if !USE_OBJC + if (dx_type(dou._do) == DISPATCH_SOURCE_KEVENT_TYPE) { + _dispatch_source_xref_dispose(dou._ds); } - if (fastpath(ref_cnt == 1)) { - if (slowpath(dou._do->do_next != DISPATCH_OBJECT_LISTLESS)) { - DISPATCH_CRASH("release while enqueued"); - } - if (slowpath(dou._do->do_xref_cnt)) { - DISPATCH_CRASH("release while external references exist"); - } - return dx_dispose(dou._do); + return _dispatch_release(dou._os_obj); +#endif +} + +void +_dispatch_dispose(dispatch_object_t dou) +{ + if (slowpath(dou._do->do_next != DISPATCH_OBJECT_LISTLESS)) { + DISPATCH_CRASH("Release while enqueued"); } - DISPATCH_CRASH("over-release"); + dx_dispose(dou._do); + return _dispatch_dealloc(dou); } void * @@ -178,9 +282,8 @@ size_t _dispatch_object_debug_attr(dispatch_object_t dou, char* buf, size_t bufsiz) { return snprintf(buf, bufsiz, "xrefcnt = 0x%x, refcnt = 0x%x, " - "suspend_cnt = 0x%x, locked = %d, ", dou._do->do_xref_cnt, - dou._do->do_ref_cnt, + "suspend_cnt = 0x%x, locked = %d, ", dou._do->do_xref_cnt + 1, + dou._do->do_ref_cnt + 1, dou._do->do_suspend_cnt / DISPATCH_OBJECT_SUSPEND_INTERVAL, dou._do->do_suspend_cnt & 1); } - diff --git a/src/object.m b/src/object.m new file mode 100644 index 000000000..ea696228c --- /dev/null +++ b/src/object.m @@ -0,0 +1,286 @@ +/* + * Copyright (c) 2011 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include "internal.h" + +#if USE_OBJC + +#if !__OBJC2__ +#error "Cannot build with legacy ObjC runtime" +#endif +#if _OS_OBJECT_OBJC_ARC +#error "Cannot build with ARC" +#endif + +#include +#include +#include + +#pragma mark - +#pragma mark _os_object_gc + +#if __OBJC_GC__ +#include +#include + +static dispatch_once_t _os_object_gc_pred; +static bool _os_object_have_gc; +static malloc_zone_t *_os_object_gc_zone; + +static void +_os_object_gc_init(void *ctxt DISPATCH_UNUSED) +{ + _os_object_have_gc = objc_collectingEnabled(); + if (slowpath(_os_object_have_gc)) { + _os_object_gc_zone = objc_collectableZone(); + } +} + +static _os_object_t +_os_object_make_uncollectable(_os_object_t obj) +{ + dispatch_once_f(&_os_object_gc_pred, NULL, _os_object_gc_init); + if (slowpath(_os_object_have_gc)) { + auto_zone_retain(_os_object_gc_zone, obj); + } + return obj; +} + +static _os_object_t +_os_object_make_collectable(_os_object_t obj) +{ + dispatch_once_f(&_os_object_gc_pred, NULL, _os_object_gc_init); + if (slowpath(_os_object_have_gc)) { + auto_zone_release(_os_object_gc_zone, obj); + } + return obj; +} +#else +#define _os_object_make_uncollectable(obj) (obj) +#define _os_object_make_collectable(obj) (obj) +#endif // __OBJC_GC__ + +#pragma mark - +#pragma mark _os_object_t + +void +_os_object_init(void) +{ + return _objc_init(); +} + +_os_object_t +_os_object_alloc(const void *_cls, size_t size) +{ + Class cls = _cls; + _os_object_t obj; + dispatch_assert(size >= sizeof(struct _os_object_s)); + size -= sizeof(((struct _os_object_s *)NULL)->os_obj_isa); + if (!cls) cls = [OS_OBJECT_CLASS(object) class]; + while (!fastpath(obj = class_createInstance(cls, size))) { + sleep(1); // Temporary resource shortage + } + return _os_object_make_uncollectable(obj); +} + +void +_os_object_dealloc(_os_object_t obj) +{ + [_os_object_make_collectable(obj) dealloc]; +} + +void +_os_object_xref_dispose(_os_object_t obj) +{ + [obj _xref_dispose]; +} + +void +_os_object_dispose(_os_object_t obj) +{ + [obj _dispose]; +} + +#pragma mark - +#pragma mark _os_object + +@implementation OS_OBJECT_CLASS(object) + +-(id)retain { + return _os_object_retain(self); +} + +-(oneway void)release { + return _os_object_release(self); +} + +-(NSUInteger)retainCount { + return _os_object_retain_count(self); +} + +-(BOOL)retainWeakReference { + return _os_object_retain_weak(self); +} + +-(BOOL)allowsWeakReference { + return _os_object_allows_weak_reference(self); +} + +- (void)_xref_dispose { + return _os_object_release_internal(self); +} + +- (void)_dispose { + return _os_object_dealloc(self); +} + +@end + +#pragma mark - +#pragma mark _dispatch_object + +#include + +// Force non-lazy class realization rdar://10640168 +#define DISPATCH_OBJC_LOAD() + (void)load {} + +@implementation DISPATCH_CLASS(object) + +- (id)init { + self = [super init]; + [self release]; + self = nil; + return self; +} + +- (void)_xref_dispose { + _dispatch_xref_dispose(self); + [super _xref_dispose]; +} + +- (void)_dispose { + return _dispatch_dispose(self); // calls _os_object_dealloc() +} + +- (NSString *)debugDescription { + Class nsstring = objc_lookUpClass("NSString"); + if (!nsstring) return nil; + char buf[4096]; + dx_debug((struct dispatch_object_s *)self, buf, sizeof(buf)); + return [nsstring stringWithFormat: + [nsstring stringWithUTF8String:"<%s: %s>"], + class_getName([self class]), buf]; +} + +@end + +@implementation DISPATCH_CLASS(queue) +DISPATCH_OBJC_LOAD() + +- (NSString *)description { + Class nsstring = objc_lookUpClass("NSString"); + if (!nsstring) return nil; + return [nsstring stringWithFormat: + [nsstring stringWithUTF8String:"<%s: %s[%p]>"], + class_getName([self class]), dispatch_queue_get_label(self), self]; +} + +@end + +@implementation DISPATCH_CLASS(source) +DISPATCH_OBJC_LOAD() + +- (void)_xref_dispose { + _dispatch_source_xref_dispose(self); + [super _xref_dispose]; +} + +@end + +#define DISPATCH_CLASS_IMPL(name) \ + @implementation DISPATCH_CLASS(name) \ + DISPATCH_OBJC_LOAD() \ + @end + +DISPATCH_CLASS_IMPL(semaphore) +DISPATCH_CLASS_IMPL(group) +DISPATCH_CLASS_IMPL(queue_root) +DISPATCH_CLASS_IMPL(queue_mgr) +DISPATCH_CLASS_IMPL(queue_specific_queue) +DISPATCH_CLASS_IMPL(queue_attr) +DISPATCH_CLASS_IMPL(io) +DISPATCH_CLASS_IMPL(operation) +DISPATCH_CLASS_IMPL(disk) +DISPATCH_CLASS_IMPL(data) + +#pragma mark - +#pragma mark dispatch_autorelease_pool + +#if DISPATCH_COCOA_COMPAT + +void * +_dispatch_autorelease_pool_push(void) { + return objc_autoreleasePoolPush(); +} + +void +_dispatch_autorelease_pool_pop(void *context) { + return objc_autoreleasePoolPop(context); +} + +#endif // DISPATCH_COCOA_COMPAT + +#pragma mark - +#pragma mark dispatch_client_callout + +// Abort on uncaught exceptions thrown from client callouts rdar://8577499 +#if DISPATCH_USE_CLIENT_CALLOUT && !__arm__ +// On platforms with zero-cost exceptions, use a compiler-generated catch-all +// exception handler. + +DISPATCH_NORETURN extern void objc_terminate(void); + +#undef _dispatch_client_callout +void +_dispatch_client_callout(void *ctxt, dispatch_function_t f) +{ + @try { + return f(ctxt); + } + @catch (...) { + objc_terminate(); + } +} + +#undef _dispatch_client_callout2 +void +_dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) +{ + @try { + return f(ctxt, i); + } + @catch (...) { + objc_terminate(); + } +} + +#endif // DISPATCH_USE_CLIENT_CALLOUT + +#endif // USE_OBJC diff --git a/src/object_internal.h b/src/object_internal.h index 0627cfd4f..8bb673366 100644 --- a/src/object_internal.h +++ b/src/object_internal.h @@ -27,45 +27,62 @@ #ifndef __DISPATCH_OBJECT_INTERNAL__ #define __DISPATCH_OBJECT_INTERNAL__ -enum { - _DISPATCH_CONTINUATION_TYPE = 0x00000, // meta-type for continuations - _DISPATCH_QUEUE_TYPE = 0x10000, // meta-type for queues - _DISPATCH_SOURCE_TYPE = 0x20000, // meta-type for sources - _DISPATCH_SEMAPHORE_TYPE = 0x30000, // meta-type for semaphores - _DISPATCH_NODE_TYPE = 0x40000, // meta-type for data node - _DISPATCH_IO_TYPE = 0x50000, // meta-type for io channels - _DISPATCH_OPERATION_TYPE = 0x60000, // meta-type for io operations - _DISPATCH_DISK_TYPE = 0x70000, // meta-type for io disks - _DISPATCH_META_TYPE_MASK = 0xfff0000, // mask for object meta-types - _DISPATCH_ATTR_TYPE = 0x10000000, // meta-type for attributes - - DISPATCH_CONTINUATION_TYPE = _DISPATCH_CONTINUATION_TYPE, - - DISPATCH_DATA_TYPE = _DISPATCH_NODE_TYPE, - - DISPATCH_IO_TYPE = _DISPATCH_IO_TYPE, - DISPATCH_OPERATION_TYPE = _DISPATCH_OPERATION_TYPE, - DISPATCH_DISK_TYPE = _DISPATCH_DISK_TYPE, - - DISPATCH_QUEUE_ATTR_TYPE = _DISPATCH_QUEUE_TYPE |_DISPATCH_ATTR_TYPE, - - DISPATCH_QUEUE_TYPE = 1 | _DISPATCH_QUEUE_TYPE, - DISPATCH_QUEUE_GLOBAL_TYPE = 2 | _DISPATCH_QUEUE_TYPE, - DISPATCH_QUEUE_MGR_TYPE = 3 | _DISPATCH_QUEUE_TYPE, - DISPATCH_QUEUE_SPECIFIC_TYPE = 4 | _DISPATCH_QUEUE_TYPE, - - DISPATCH_SEMAPHORE_TYPE = _DISPATCH_SEMAPHORE_TYPE, - - DISPATCH_SOURCE_KEVENT_TYPE = 1 | _DISPATCH_SOURCE_TYPE, -}; +#if OS_OBJECT_USE_OBJC +#define DISPATCH_DECL_INTERNAL_SUBCLASS(name, super) \ + OS_OBJECT_DECL_SUBCLASS(name, super) +#define DISPATCH_DECL_INTERNAL(name) \ + DISPATCH_DECL_INTERNAL_SUBCLASS(name, dispatch_object) +#define DISPATCH_DECL_SUBCLASS_INTERFACE(name, super) \ + _OS_OBJECT_DECL_SUBCLASS_INTERFACE(name, super) +#else +#define DISPATCH_DECL_INTERNAL_SUBCLASS(name, super) DISPATCH_DECL(name) +#define DISPATCH_DECL_INTERNAL(name) DISPATCH_DECL(name) +#define DISPATCH_DECL_SUBCLASS_INTERFACE(name, super) +#endif // OS_OBJECT_USE_OBJC + +#if USE_OBJC +#define DISPATCH_CLASS(name) OS_OBJECT_CLASS(dispatch_##name) +// ObjC classes and dispatch vtables are co-located via linker order and alias +// files rdar://10640168 +#define DISPATCH_VTABLE_SUBCLASS_INSTANCE(name, super, ...) \ + __attribute__((section("__DATA,__objc_data"), used)) \ + static const struct { \ + DISPATCH_VTABLE_HEADER(super); \ + } DISPATCH_CONCAT(_,DISPATCH_CLASS(name##_vtable)) = { \ + __VA_ARGS__ \ + } +#else +#define DISPATCH_VTABLE_SUBCLASS_INSTANCE(name, super, ...) \ + const struct dispatch_##super##_vtable_s _dispatch_##name##_vtable = { \ + ._os_obj_xref_dispose = _dispatch_xref_dispose, \ + ._os_obj_dispose = _dispatch_dispose, \ + __VA_ARGS__ \ + } +#endif // USE_OBJC + +#define DISPATCH_SUBCLASS_DECL(name, super) \ + DISPATCH_DECL_SUBCLASS_INTERFACE(dispatch_##name, super) \ + struct dispatch_##name##_s; \ + extern const struct dispatch_##name##_vtable_s { \ + _OS_OBJECT_CLASS_HEADER(); \ + DISPATCH_VTABLE_HEADER(name); \ + } _dispatch_##name##_vtable +#define DISPATCH_CLASS_DECL(name) DISPATCH_SUBCLASS_DECL(name, dispatch_object) +#define DISPATCH_INTERNAL_SUBCLASS_DECL(name, super) \ + DISPATCH_DECL_INTERNAL_SUBCLASS(dispatch_##name, dispatch_##super); \ + DISPATCH_DECL_SUBCLASS_INTERFACE(dispatch_##name, dispatch_##super) \ + extern const struct dispatch_##super##_vtable_s _dispatch_##name##_vtable +#define DISPATCH_VTABLE_INSTANCE(name, ...) \ + DISPATCH_VTABLE_SUBCLASS_INSTANCE(name, name, __VA_ARGS__) +#define DISPATCH_VTABLE(name) &_dispatch_##name##_vtable #define DISPATCH_VTABLE_HEADER(x) \ unsigned long const do_type; \ const char *const do_kind; \ - size_t (*const do_debug)(struct x *, char *, size_t); \ - struct dispatch_queue_s *(*const do_invoke)(struct x *); \ - bool (*const do_probe)(struct x *); \ - void (*const do_dispose)(struct x *) + size_t (*const do_debug)(struct dispatch_##x##_s *, char *, size_t); \ + struct dispatch_queue_s *(*const do_invoke)(struct dispatch_##x##_s *); \ + bool (*const do_probe)(struct dispatch_##x##_s *); \ + void (*const do_dispose)(struct dispatch_##x##_s *) #define dx_type(x) (x)->do_vtable->do_type #define dx_kind(x) (x)->do_vtable->do_kind @@ -74,17 +91,18 @@ enum { #define dx_invoke(x) (x)->do_vtable->do_invoke(x) #define dx_probe(x) (x)->do_vtable->do_probe(x) -#define DISPATCH_STRUCT_HEADER(x, y) \ - const struct y *do_vtable; \ - struct x *volatile do_next; \ - unsigned int do_ref_cnt; \ - unsigned int do_xref_cnt; \ - unsigned int do_suspend_cnt; \ +#define DISPATCH_STRUCT_HEADER(x) \ + _OS_OBJECT_HEADER( \ + const struct dispatch_##x##_vtable_s *do_vtable, \ + do_ref_cnt, \ + do_xref_cnt); \ + struct dispatch_##x##_s *volatile do_next; \ struct dispatch_queue_s *do_targetq; \ void *do_ctxt; \ - void *do_finalizer; + void *do_finalizer; \ + unsigned int do_suspend_cnt; -#define DISPATCH_OBJECT_GLOBAL_REFCNT (~0u) +#define DISPATCH_OBJECT_GLOBAL_REFCNT _OS_OBJECT_GLOBAL_REFCNT // "word and bit" must be a power of two to be safely subtracted #define DISPATCH_OBJECT_SUSPEND_LOCK 1u #define DISPATCH_OBJECT_SUSPEND_INTERVAL 2u @@ -99,20 +117,72 @@ enum { #define DISPATCH_OBJECT_LISTLESS ((void *)0x89abcdef) #endif -struct dispatch_object_vtable_s { - DISPATCH_VTABLE_HEADER(dispatch_object_s); +enum { + _DISPATCH_CONTINUATION_TYPE = 0x00000, // meta-type for continuations + _DISPATCH_QUEUE_TYPE = 0x10000, // meta-type for queues + _DISPATCH_SOURCE_TYPE = 0x20000, // meta-type for sources + _DISPATCH_SEMAPHORE_TYPE = 0x30000, // meta-type for semaphores + _DISPATCH_NODE_TYPE = 0x40000, // meta-type for data node + _DISPATCH_IO_TYPE = 0x50000, // meta-type for io channels + _DISPATCH_OPERATION_TYPE = 0x60000, // meta-type for io operations + _DISPATCH_DISK_TYPE = 0x70000, // meta-type for io disks + _DISPATCH_META_TYPE_MASK = 0xfff0000, // mask for object meta-types + _DISPATCH_ATTR_TYPE = 0x10000000, // meta-type for attributes + + DISPATCH_CONTINUATION_TYPE = _DISPATCH_CONTINUATION_TYPE, + + DISPATCH_DATA_TYPE = _DISPATCH_NODE_TYPE, + + DISPATCH_IO_TYPE = _DISPATCH_IO_TYPE, + DISPATCH_OPERATION_TYPE = _DISPATCH_OPERATION_TYPE, + DISPATCH_DISK_TYPE = _DISPATCH_DISK_TYPE, + + DISPATCH_QUEUE_ATTR_TYPE = _DISPATCH_QUEUE_TYPE |_DISPATCH_ATTR_TYPE, + + DISPATCH_QUEUE_TYPE = 1 | _DISPATCH_QUEUE_TYPE, + DISPATCH_QUEUE_GLOBAL_TYPE = 2 | _DISPATCH_QUEUE_TYPE, + DISPATCH_QUEUE_MGR_TYPE = 3 | _DISPATCH_QUEUE_TYPE, + DISPATCH_QUEUE_SPECIFIC_TYPE = 4 | _DISPATCH_QUEUE_TYPE, + + DISPATCH_SEMAPHORE_TYPE = 1 | _DISPATCH_SEMAPHORE_TYPE, + DISPATCH_GROUP_TYPE = 2 | _DISPATCH_SEMAPHORE_TYPE, + + DISPATCH_SOURCE_KEVENT_TYPE = 1 | _DISPATCH_SOURCE_TYPE, }; +DISPATCH_SUBCLASS_DECL(object, object); struct dispatch_object_s { - DISPATCH_STRUCT_HEADER(dispatch_object_s, dispatch_object_vtable_s); + DISPATCH_STRUCT_HEADER(object); }; size_t _dispatch_object_debug_attr(dispatch_object_t dou, char* buf, size_t bufsiz); - +void *_dispatch_alloc(const void *vtable, size_t size); void _dispatch_retain(dispatch_object_t dou); void _dispatch_release(dispatch_object_t dou); +void _dispatch_xref_dispose(dispatch_object_t dou); void _dispatch_dispose(dispatch_object_t dou); -dispatch_queue_t _dispatch_wakeup(dispatch_object_t dou); +#if DISPATCH_COCOA_COMPAT +void *_dispatch_autorelease_pool_push(void); +void _dispatch_autorelease_pool_pop(void *context); +#endif + +typedef struct _os_object_class_s { + _OS_OBJECT_CLASS_HEADER(); +} _os_object_class_s; + +typedef struct _os_object_s { + _OS_OBJECT_HEADER( + const _os_object_class_s *os_obj_isa, + os_obj_ref_cnt, + os_obj_xref_cnt); +} _os_object_s; + +void _os_object_init(void); +unsigned long _os_object_retain_count(_os_object_t obj); +bool _os_object_retain_weak(_os_object_t obj); +bool _os_object_allows_weak_reference(_os_object_t obj); +void _os_object_dispose(_os_object_t obj); +void _os_object_xref_dispose(_os_object_t obj); #endif diff --git a/src/queue.c b/src/queue.c index 595bac562..f01d7f855 100644 --- a/src/queue.c +++ b/src/queue.c @@ -27,22 +27,30 @@ !defined(DISPATCH_ENABLE_THREAD_POOL) #define DISPATCH_ENABLE_THREAD_POOL 1 #endif +#if DISPATCH_ENABLE_THREAD_POOL && !DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK +#define pthread_workqueue_t void* +#endif static void _dispatch_cache_cleanup(void *value); static void _dispatch_async_f_redirect(dispatch_queue_t dq, dispatch_continuation_t dc); static void _dispatch_queue_cleanup(void *ctxt); -static bool _dispatch_queue_wakeup_global(dispatch_queue_t dq); -static void _dispatch_queue_drain(dispatch_queue_t dq); +static inline void _dispatch_queue_wakeup_global2(dispatch_queue_t dq, + unsigned int n); +static inline void _dispatch_queue_wakeup_global(dispatch_queue_t dq); +static _dispatch_thread_semaphore_t _dispatch_queue_drain(dispatch_queue_t dq); static inline _dispatch_thread_semaphore_t _dispatch_queue_drain_one_barrier_sync(dispatch_queue_t dq); -static void _dispatch_worker_thread2(void *context); +#if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK +static void _dispatch_worker_thread3(void *context); +#endif +#if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP +static void _dispatch_worker_thread2(int priority, int options, void *context); +#endif #if DISPATCH_ENABLE_THREAD_POOL static void *_dispatch_worker_thread(void *context); static int _dispatch_pthread_sigmask(int how, sigset_t *set, sigset_t *oset); #endif -static bool _dispatch_mgr_wakeup(dispatch_queue_t dq); -static dispatch_queue_t _dispatch_mgr_thread(dispatch_queue_t dq); #if DISPATCH_COCOA_COMPAT static unsigned int _dispatch_worker_threads; @@ -50,94 +58,52 @@ static dispatch_once_t _dispatch_main_q_port_pred; static mach_port_t main_q_port; static void _dispatch_main_q_port_init(void *ctxt); -static void _dispatch_queue_wakeup_main(void); +static dispatch_queue_t _dispatch_queue_wakeup_main(void); static void _dispatch_main_queue_drain(void); #endif -#pragma mark - -#pragma mark dispatch_queue_vtable - -const struct dispatch_queue_vtable_s _dispatch_queue_vtable = { - .do_type = DISPATCH_QUEUE_TYPE, - .do_kind = "queue", - .do_dispose = _dispatch_queue_dispose, - .do_invoke = NULL, - .do_probe = (void *)dummy_function_r0, - .do_debug = dispatch_queue_debug, -}; - -static const struct dispatch_queue_vtable_s _dispatch_queue_root_vtable = { - .do_type = DISPATCH_QUEUE_GLOBAL_TYPE, - .do_kind = "global-queue", - .do_debug = dispatch_queue_debug, - .do_probe = _dispatch_queue_wakeup_global, -}; - -static const struct dispatch_queue_vtable_s _dispatch_queue_mgr_vtable = { - .do_type = DISPATCH_QUEUE_MGR_TYPE, - .do_kind = "mgr-queue", - .do_invoke = _dispatch_mgr_thread, - .do_debug = dispatch_queue_debug, - .do_probe = _dispatch_mgr_wakeup, -}; - #pragma mark - #pragma mark dispatch_root_queue -#if HAVE_PTHREAD_WORKQUEUES -static const int _dispatch_root_queue_wq_priorities[] = { - [DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY] = WORKQ_LOW_PRIOQUEUE, - [DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY] = WORKQ_LOW_PRIOQUEUE, - [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY] = WORKQ_DEFAULT_PRIOQUEUE, - [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY] = - WORKQ_DEFAULT_PRIOQUEUE, - [DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY] = WORKQ_HIGH_PRIOQUEUE, - [DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY] = WORKQ_HIGH_PRIOQUEUE, - [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY] = WORKQ_BG_PRIOQUEUE, - [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY] = - WORKQ_BG_PRIOQUEUE, -}; -#endif - #if DISPATCH_ENABLE_THREAD_POOL static struct dispatch_semaphore_s _dispatch_thread_mediator[] = { [DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY] = { - .do_vtable = &_dispatch_semaphore_vtable, + .do_vtable = DISPATCH_VTABLE(semaphore), .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, }, [DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY] = { - .do_vtable = &_dispatch_semaphore_vtable, + .do_vtable = DISPATCH_VTABLE(semaphore), .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, }, [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY] = { - .do_vtable = &_dispatch_semaphore_vtable, + .do_vtable = DISPATCH_VTABLE(semaphore), .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, }, [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY] = { - .do_vtable = &_dispatch_semaphore_vtable, + .do_vtable = DISPATCH_VTABLE(semaphore), .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, }, [DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY] = { - .do_vtable = &_dispatch_semaphore_vtable, + .do_vtable = DISPATCH_VTABLE(semaphore), .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, }, [DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY] = { - .do_vtable = &_dispatch_semaphore_vtable, + .do_vtable = DISPATCH_VTABLE(semaphore), .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, }, [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY] = { - .do_vtable = &_dispatch_semaphore_vtable, + .do_vtable = DISPATCH_VTABLE(semaphore), .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, }, [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY] = { - .do_vtable = &_dispatch_semaphore_vtable, + .do_vtable = DISPATCH_VTABLE(semaphore), .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, }, @@ -147,73 +113,114 @@ static struct dispatch_semaphore_s _dispatch_thread_mediator[] = { #define MAX_THREAD_COUNT 255 struct dispatch_root_queue_context_s { + union { + struct { + unsigned int volatile dgq_pending; #if HAVE_PTHREAD_WORKQUEUES - pthread_workqueue_t dgq_kworkqueue; + int dgq_wq_priority, dgq_wq_options; +#if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK || DISPATCH_ENABLE_THREAD_POOL + pthread_workqueue_t dgq_kworkqueue; #endif - uint32_t dgq_pending; +#endif // HAVE_PTHREAD_WORKQUEUES #if DISPATCH_ENABLE_THREAD_POOL - uint32_t dgq_thread_pool_size; - dispatch_semaphore_t dgq_thread_mediator; + dispatch_semaphore_t dgq_thread_mediator; + uint32_t dgq_thread_pool_size; #endif + }; + char _dgq_pad[DISPATCH_CACHELINE_SIZE]; + }; }; +DISPATCH_CACHELINE_ALIGN static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { - [DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY] = { + [DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY] = {{{ +#if HAVE_PTHREAD_WORKQUEUES + .dgq_wq_priority = WORKQ_LOW_PRIOQUEUE, + .dgq_wq_options = 0, +#endif #if DISPATCH_ENABLE_THREAD_POOL .dgq_thread_mediator = &_dispatch_thread_mediator[ DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY], .dgq_thread_pool_size = MAX_THREAD_COUNT, #endif - }, - [DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY] = { + }}}, + [DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY] = {{{ +#if HAVE_PTHREAD_WORKQUEUES + .dgq_wq_priority = WORKQ_LOW_PRIOQUEUE, + .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, +#endif #if DISPATCH_ENABLE_THREAD_POOL .dgq_thread_mediator = &_dispatch_thread_mediator[ DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY], .dgq_thread_pool_size = MAX_THREAD_COUNT, #endif - }, - [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY] = { + }}}, + [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY] = {{{ +#if HAVE_PTHREAD_WORKQUEUES + .dgq_wq_priority = WORKQ_DEFAULT_PRIOQUEUE, + .dgq_wq_options = 0, +#endif #if DISPATCH_ENABLE_THREAD_POOL .dgq_thread_mediator = &_dispatch_thread_mediator[ DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY], .dgq_thread_pool_size = MAX_THREAD_COUNT, #endif - }, - [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY] = { + }}}, + [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY] = {{{ +#if HAVE_PTHREAD_WORKQUEUES + .dgq_wq_priority = WORKQ_DEFAULT_PRIOQUEUE, + .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, +#endif #if DISPATCH_ENABLE_THREAD_POOL .dgq_thread_mediator = &_dispatch_thread_mediator[ DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY], .dgq_thread_pool_size = MAX_THREAD_COUNT, #endif - }, - [DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY] = { + }}}, + [DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY] = {{{ +#if HAVE_PTHREAD_WORKQUEUES + .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE, + .dgq_wq_options = 0, +#endif #if DISPATCH_ENABLE_THREAD_POOL .dgq_thread_mediator = &_dispatch_thread_mediator[ DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY], .dgq_thread_pool_size = MAX_THREAD_COUNT, #endif - }, - [DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY] = { + }}}, + [DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY] = {{{ +#if HAVE_PTHREAD_WORKQUEUES + .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE, + .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, +#endif #if DISPATCH_ENABLE_THREAD_POOL .dgq_thread_mediator = &_dispatch_thread_mediator[ DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY], .dgq_thread_pool_size = MAX_THREAD_COUNT, #endif - }, - [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY] = { + }}}, + [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY] = {{{ +#if HAVE_PTHREAD_WORKQUEUES + .dgq_wq_priority = WORKQ_BG_PRIOQUEUE, + .dgq_wq_options = 0, +#endif #if DISPATCH_ENABLE_THREAD_POOL .dgq_thread_mediator = &_dispatch_thread_mediator[ DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY], .dgq_thread_pool_size = MAX_THREAD_COUNT, #endif - }, - [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY] = { + }}}, + [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY] = {{{ +#if HAVE_PTHREAD_WORKQUEUES + .dgq_wq_priority = WORKQ_BG_PRIOQUEUE, + .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, +#endif #if DISPATCH_ENABLE_THREAD_POOL .dgq_thread_mediator = &_dispatch_thread_mediator[ DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY], .dgq_thread_pool_size = MAX_THREAD_COUNT, #endif - }, + }}}, }; // 6618342 Contact the team that owns the Instrument DTrace probe before @@ -222,104 +229,96 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { DISPATCH_CACHELINE_ALIGN struct dispatch_queue_s _dispatch_root_queues[] = { [DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY] = { - .do_vtable = &_dispatch_queue_root_vtable, + .do_vtable = DISPATCH_VTABLE(queue_root), .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, .do_ctxt = &_dispatch_root_queue_contexts[ DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY], - .dq_label = "com.apple.root.low-priority", .dq_running = 2, .dq_width = UINT32_MAX, .dq_serialnum = 4, }, [DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY] = { - .do_vtable = &_dispatch_queue_root_vtable, + .do_vtable = DISPATCH_VTABLE(queue_root), .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, .do_ctxt = &_dispatch_root_queue_contexts[ DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY], - .dq_label = "com.apple.root.low-overcommit-priority", .dq_running = 2, .dq_width = UINT32_MAX, .dq_serialnum = 5, }, [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY] = { - .do_vtable = &_dispatch_queue_root_vtable, + .do_vtable = DISPATCH_VTABLE(queue_root), .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, .do_ctxt = &_dispatch_root_queue_contexts[ DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY], - .dq_label = "com.apple.root.default-priority", .dq_running = 2, .dq_width = UINT32_MAX, .dq_serialnum = 6, }, [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY] = { - .do_vtable = &_dispatch_queue_root_vtable, + .do_vtable = DISPATCH_VTABLE(queue_root), .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, .do_ctxt = &_dispatch_root_queue_contexts[ DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY], - .dq_label = "com.apple.root.default-overcommit-priority", .dq_running = 2, .dq_width = UINT32_MAX, .dq_serialnum = 7, }, [DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY] = { - .do_vtable = &_dispatch_queue_root_vtable, + .do_vtable = DISPATCH_VTABLE(queue_root), .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, .do_ctxt = &_dispatch_root_queue_contexts[ DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY], - .dq_label = "com.apple.root.high-priority", .dq_running = 2, .dq_width = UINT32_MAX, .dq_serialnum = 8, }, [DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY] = { - .do_vtable = &_dispatch_queue_root_vtable, + .do_vtable = DISPATCH_VTABLE(queue_root), .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, .do_ctxt = &_dispatch_root_queue_contexts[ DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY], - .dq_label = "com.apple.root.high-overcommit-priority", .dq_running = 2, .dq_width = UINT32_MAX, .dq_serialnum = 9, }, [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY] = { - .do_vtable = &_dispatch_queue_root_vtable, + .do_vtable = DISPATCH_VTABLE(queue_root), .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, .do_ctxt = &_dispatch_root_queue_contexts[ DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY], - .dq_label = "com.apple.root.background-priority", .dq_running = 2, .dq_width = UINT32_MAX, .dq_serialnum = 10, }, [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY] = { - .do_vtable = &_dispatch_queue_root_vtable, + .do_vtable = DISPATCH_VTABLE(queue_root), .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, .do_ctxt = &_dispatch_root_queue_contexts[ DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY], - .dq_label = "com.apple.root.background-overcommit-priority", .dq_running = 2, .dq_width = UINT32_MAX, @@ -327,17 +326,41 @@ struct dispatch_queue_s _dispatch_root_queues[] = { }, }; +#if HAVE_PTHREAD_WORKQUEUES +static const dispatch_queue_t _dispatch_wq2root_queues[][2] = { + [WORKQ_LOW_PRIOQUEUE][0] = &_dispatch_root_queues[ + DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY], + [WORKQ_LOW_PRIOQUEUE][WORKQ_ADDTHREADS_OPTION_OVERCOMMIT] = + &_dispatch_root_queues[ + DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY], + [WORKQ_DEFAULT_PRIOQUEUE][0] = &_dispatch_root_queues[ + DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY], + [WORKQ_DEFAULT_PRIOQUEUE][WORKQ_ADDTHREADS_OPTION_OVERCOMMIT] = + &_dispatch_root_queues[ + DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY], + [WORKQ_HIGH_PRIOQUEUE][0] = &_dispatch_root_queues[ + DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY], + [WORKQ_HIGH_PRIOQUEUE][WORKQ_ADDTHREADS_OPTION_OVERCOMMIT] = + &_dispatch_root_queues[ + DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY], + [WORKQ_BG_PRIOQUEUE][0] = &_dispatch_root_queues[ + DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY], + [WORKQ_BG_PRIOQUEUE][WORKQ_ADDTHREADS_OPTION_OVERCOMMIT] = + &_dispatch_root_queues[ + DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY], +}; +#endif // HAVE_PTHREAD_WORKQUEUES + // 6618342 Contact the team that owns the Instrument DTrace probe before // renaming this symbol DISPATCH_CACHELINE_ALIGN struct dispatch_queue_s _dispatch_mgr_q = { - .do_vtable = &_dispatch_queue_mgr_vtable, + .do_vtable = DISPATCH_VTABLE(queue_mgr), .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, .do_targetq = &_dispatch_root_queues[ DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY], - .dq_label = "com.apple.libdispatch-manager", .dq_width = 1, .dq_serialnum = 2, @@ -375,29 +398,62 @@ _dispatch_root_queues_init_workq(void) { bool result = false; #if HAVE_PTHREAD_WORKQUEUES + bool disable_wq = false; #if DISPATCH_ENABLE_THREAD_POOL - if (slowpath(getenv("LIBDISPATCH_DISABLE_KWQ"))) return result; + disable_wq = slowpath(getenv("LIBDISPATCH_DISABLE_KWQ")); #endif - int i, r; - pthread_workqueue_attr_t pwq_attr; - r = pthread_workqueue_attr_init_np(&pwq_attr); - (void)dispatch_assume_zero(r); - for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { - pthread_workqueue_t pwq = NULL; - const int prio = _dispatch_root_queue_wq_priorities[i]; - - r = pthread_workqueue_attr_setqueuepriority_np(&pwq_attr, prio); - (void)dispatch_assume_zero(r); - r = pthread_workqueue_attr_setovercommit_np(&pwq_attr, i & 1); - (void)dispatch_assume_zero(r); - r = pthread_workqueue_create_np(&pwq, &pwq_attr); + int r; +#if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP + if (!disable_wq) { + r = pthread_workqueue_setdispatch_np(_dispatch_worker_thread2); +#if !DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK (void)dispatch_assume_zero(r); - result = result || dispatch_assume(pwq); - _dispatch_root_queue_contexts[i].dgq_kworkqueue = pwq; +#endif + result = !r; } - r = pthread_workqueue_attr_destroy_np(&pwq_attr); - (void)dispatch_assume_zero(r); -#endif // HAVE_PTHREAD_WORKQUEUES +#endif // HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP +#if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK || DISPATCH_ENABLE_THREAD_POOL + if (!result) { +#if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK + pthread_workqueue_attr_t pwq_attr; + if (!disable_wq) { + r = pthread_workqueue_attr_init_np(&pwq_attr); + (void)dispatch_assume_zero(r); + } +#endif + int i; + for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { + pthread_workqueue_t pwq = NULL; + struct dispatch_root_queue_context_s *qc = + &_dispatch_root_queue_contexts[i]; +#if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK + if (!disable_wq +#if DISPATCH_NO_BG_PRIORITY + && (qc->dgq_wq_priority != WORKQ_BG_PRIOQUEUE) +#endif + ) { + r = pthread_workqueue_attr_setqueuepriority_np(&pwq_attr, + qc->dgq_wq_priority); + (void)dispatch_assume_zero(r); + r = pthread_workqueue_attr_setovercommit_np(&pwq_attr, + qc->dgq_wq_options & WORKQ_ADDTHREADS_OPTION_OVERCOMMIT); + (void)dispatch_assume_zero(r); + r = pthread_workqueue_create_np(&pwq, &pwq_attr); + (void)dispatch_assume_zero(r); + result = result || dispatch_assume(pwq); + } +#endif + qc->dgq_kworkqueue = pwq ? pwq : (void*)(~0ul); + } +#if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK + if (!disable_wq) { + r = pthread_workqueue_attr_destroy_np(&pwq_attr); + (void)dispatch_assume_zero(r); + } +#endif + } +#endif // DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK || DISPATCH_ENABLE_THREAD_POOL +#endif // HAVE_PTHREAD_WORKQUEUES return result; } @@ -437,6 +493,7 @@ _dispatch_root_queues_init_thread_pool(void) static void _dispatch_root_queues_init(void *context DISPATCH_UNUSED) { + _dispatch_safe_fork = false; if (!_dispatch_root_queues_init_workq()) { _dispatch_root_queues_init_thread_pool(); } @@ -459,19 +516,24 @@ libdispatch_init(void) dispatch_assert(countof(_dispatch_root_queue_contexts) == DISPATCH_ROOT_QUEUE_COUNT); #if HAVE_PTHREAD_WORKQUEUES - dispatch_assert(countof(_dispatch_root_queue_wq_priorities) == + dispatch_assert(sizeof(_dispatch_wq2root_queues) / + sizeof(_dispatch_wq2root_queues[0][0]) == DISPATCH_ROOT_QUEUE_COUNT); #endif #if DISPATCH_ENABLE_THREAD_POOL dispatch_assert(countof(_dispatch_thread_mediator) == DISPATCH_ROOT_QUEUE_COUNT); #endif + + dispatch_assert(sizeof(struct dispatch_apply_s) <= + ROUND_UP_TO_CACHELINE_SIZE(sizeof( + struct dispatch_continuation_s))); dispatch_assert(sizeof(struct dispatch_source_s) == sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_CACHELINE_PAD); -#if DISPATCH_DEBUG dispatch_assert(sizeof(struct dispatch_queue_s) % DISPATCH_CACHELINE_SIZE == 0); -#endif + dispatch_assert(sizeof(struct dispatch_root_queue_context_s) % + DISPATCH_CACHELINE_SIZE == 0); _dispatch_thread_key_create(&dispatch_queue_key, _dispatch_queue_cleanup); _dispatch_thread_key_create(&dispatch_sema4_key, @@ -484,10 +546,8 @@ libdispatch_init(void) #endif #if DISPATCH_USE_RESOLVERS // rdar://problem/8541707 - _dispatch_main_q.do_vtable = &_dispatch_queue_vtable; _dispatch_main_q.do_targetq = &_dispatch_root_queues[ DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY]; - _dispatch_data_empty.do_vtable = &_dispatch_data_vtable; #endif _dispatch_thread_setspecific(dispatch_queue_key, &_dispatch_main_q); @@ -498,6 +558,8 @@ libdispatch_init(void) #endif _dispatch_hw_config_init(); + _dispatch_vtable_init(); + _os_object_init(); } DISPATCH_EXPORT DISPATCH_NOTHROW @@ -550,12 +612,9 @@ dispatch_queue_create(const char *label, dispatch_queue_attr_t attr) } // XXX switch to malloc() - dq = calloc(1ul, sizeof(struct dispatch_queue_s) - - DISPATCH_QUEUE_MIN_LABEL_SIZE - DISPATCH_QUEUE_CACHELINE_PAD + - label_len + 1); - if (slowpath(!dq)) { - return dq; - } + dq = _dispatch_alloc(DISPATCH_VTABLE(queue), + sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_MIN_LABEL_SIZE - + DISPATCH_QUEUE_CACHELINE_PAD + label_len + 1); _dispatch_queue_init(dq); strcpy(dq->dq_label, label); @@ -592,8 +651,6 @@ _dispatch_queue_dispose(dispatch_queue_t dq) if (dqsq) { _dispatch_release(dqsq); } - - _dispatch_dispose(dq); } const char * @@ -713,8 +770,7 @@ dispatch_set_current_target_queue(dispatch_queue_t dq) #pragma mark dispatch_queue_specific struct dispatch_queue_specific_queue_s { - DISPATCH_STRUCT_HEADER(dispatch_queue_specific_queue_s, - dispatch_queue_specific_queue_vtable_s); + DISPATCH_STRUCT_HEADER(queue_specific_queue); DISPATCH_QUEUE_HEADER; union { char _dqsq_pad[DISPATCH_QUEUE_MIN_LABEL_SIZE]; @@ -725,24 +781,6 @@ struct dispatch_queue_specific_queue_s { }; }; }; -DISPATCH_DECL(dispatch_queue_specific_queue); - -static void -_dispatch_queue_specific_queue_dispose(dispatch_queue_specific_queue_t dqsq); - -struct dispatch_queue_specific_queue_vtable_s { - DISPATCH_VTABLE_HEADER(dispatch_queue_specific_queue_s); -}; - -static const struct dispatch_queue_specific_queue_vtable_s - _dispatch_queue_specific_queue_vtable = { - .do_type = DISPATCH_QUEUE_SPECIFIC_TYPE, - .do_kind = "queue-context", - .do_dispose = _dispatch_queue_specific_queue_dispose, - .do_invoke = NULL, - .do_probe = (void *)dummy_function_r0, - .do_debug = (void *)dispatch_queue_debug, -}; struct dispatch_queue_specific_s { const void *dqs_key; @@ -752,7 +790,7 @@ struct dispatch_queue_specific_s { }; DISPATCH_DECL(dispatch_queue_specific); -static void +void _dispatch_queue_specific_queue_dispose(dispatch_queue_specific_queue_t dqsq) { dispatch_queue_specific_t dqs, tmp; @@ -773,17 +811,18 @@ _dispatch_queue_init_specific(dispatch_queue_t dq) { dispatch_queue_specific_queue_t dqsq; - dqsq = calloc(1ul, sizeof(struct dispatch_queue_specific_queue_s)); + dqsq = _dispatch_alloc(DISPATCH_VTABLE(queue_specific_queue), + sizeof(struct dispatch_queue_specific_queue_s)); _dispatch_queue_init((dispatch_queue_t)dqsq); - dqsq->do_vtable = &_dispatch_queue_specific_queue_vtable; - dqsq->do_xref_cnt = 0; + dqsq->do_xref_cnt = -1; dqsq->do_targetq = _dispatch_get_root_queue(DISPATCH_QUEUE_PRIORITY_HIGH, true); dqsq->dq_width = UINT32_MAX; strlcpy(dqsq->dq_label, "queue-specific", sizeof(dqsq->dq_label)); TAILQ_INIT(&dqsq->dqsq_contexts); dispatch_atomic_store_barrier(); - if (slowpath(!dispatch_atomic_cmpxchg2o(dq, dq_specific_q, NULL, dqsq))) { + if (slowpath(!dispatch_atomic_cmpxchg2o(dq, dq_specific_q, NULL, + (dispatch_queue_t)dqsq))) { _dispatch_release((dispatch_queue_t)dqsq); } } @@ -980,7 +1019,7 @@ _dispatch_ccache_init(void *context DISPATCH_UNUSED) malloc_set_zone_name(_dispatch_ccache_zone, "DispatchContinuations"); } -static dispatch_continuation_t +dispatch_continuation_t _dispatch_continuation_alloc_from_heap(void) { static dispatch_once_t pred; @@ -988,6 +1027,8 @@ _dispatch_continuation_alloc_from_heap(void) dispatch_once_f(&pred, NULL, _dispatch_ccache_init); + // This is also used for allocating struct dispatch_apply_s. If the + // ROUND_UP behavior is changed, adjust the assert in libdispatch_init while (!(dc = fastpath(malloc_zone_calloc(_dispatch_ccache_zone, 1, ROUND_UP_TO_CACHELINE_SIZE(sizeof(*dc)))))) { sleep(1); @@ -996,18 +1037,6 @@ _dispatch_continuation_alloc_from_heap(void) return dc; } -DISPATCH_ALWAYS_INLINE -static inline dispatch_continuation_t -_dispatch_continuation_alloc_cacheonly(void) -{ - dispatch_continuation_t dc; - dc = fastpath(_dispatch_thread_getspecific(dispatch_cache_key)); - if (dc) { - _dispatch_thread_setspecific(dispatch_cache_key, dc->do_next); - } - return dc; -} - static void _dispatch_force_cache_cleanup(void) { @@ -1019,6 +1048,13 @@ _dispatch_force_cache_cleanup(void) } } +// rdar://problem/11500155 +void +dispatch_flush_continuation_cache(void) +{ + _dispatch_force_cache_cleanup(); +} + DISPATCH_NOINLINE static void _dispatch_cache_cleanup(void *value) @@ -1031,16 +1067,6 @@ _dispatch_cache_cleanup(void *value) } } -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_continuation_free(dispatch_continuation_t dc) -{ - dispatch_continuation_t prev_dc; - prev_dc = _dispatch_thread_getspecific(dispatch_cache_key); - dc->do_next = prev_dc; - _dispatch_thread_setspecific(dispatch_cache_key, dc); -} - DISPATCH_ALWAYS_INLINE_NDEBUG static inline void _dispatch_continuation_redirect(dispatch_queue_t dq, dispatch_object_t dou) @@ -1081,7 +1107,7 @@ _dispatch_continuation_pop(dispatch_object_t dou) _dispatch_continuation_free(dc); } if ((long)dc->do_vtable & DISPATCH_OBJ_GROUP_BIT) { - dg = dc->dc_group; + dg = dc->dc_data; } else { dg = NULL; } @@ -1144,8 +1170,8 @@ static void _dispatch_async_f_redirect_invoke(void *_ctxt) { struct dispatch_continuation_s *dc = _ctxt; - struct dispatch_continuation_s *other_dc = dc->dc_data[1]; - dispatch_queue_t old_dq, dq = dc->dc_data[0], rq; + struct dispatch_continuation_s *other_dc = dc->dc_other; + dispatch_queue_t old_dq, dq = dc->dc_data, rq; old_dq = _dispatch_thread_getspecific(dispatch_queue_key); _dispatch_thread_setspecific(dispatch_queue_key, dq); @@ -1184,16 +1210,13 @@ _dispatch_async_f_redirect(dispatch_queue_t dq, _dispatch_retain(dq); - dc = fastpath(_dispatch_continuation_alloc_cacheonly()); - if (!dc) { - dc = _dispatch_continuation_alloc_from_heap(); - } + dc = _dispatch_continuation_alloc(); dc->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT; dc->dc_func = _dispatch_async_f_redirect_invoke; dc->dc_ctxt = dc; - dc->dc_data[0] = dq; - dc->dc_data[1] = other_dc; + dc->dc_data = dq; + dc->dc_other = other_dc; // Find the queue to redirect to rq = dq->do_targetq; @@ -1316,15 +1339,12 @@ dispatch_group_async_f(dispatch_group_t dg, dispatch_queue_t dq, void *ctxt, _dispatch_retain(dg); dispatch_group_enter(dg); - dc = fastpath(_dispatch_continuation_alloc_cacheonly()); - if (!dc) { - dc = _dispatch_continuation_alloc_from_heap(); - } + dc = _dispatch_continuation_alloc(); dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_GROUP_BIT); dc->dc_func = func; dc->dc_ctxt = ctxt; - dc->dc_group = dg; + dc->dc_data = dg; // No fastpath/slowpath hint because we simply don't know if (dq->dq_width != 1 && dq->do_targetq) { @@ -1389,7 +1409,7 @@ _dispatch_function_recurse(dispatch_queue_t dq, void *ctxt, #pragma mark dispatch_barrier_sync struct dispatch_barrier_sync_slow_s { - DISPATCH_CONTINUATION_HEADER(dispatch_barrier_sync_slow_s); + DISPATCH_CONTINUATION_HEADER(barrier_sync_slow); }; struct dispatch_barrier_sync_slow2_s { @@ -1401,6 +1421,33 @@ struct dispatch_barrier_sync_slow2_s { _dispatch_thread_semaphore_t dbss2_sema; }; +DISPATCH_ALWAYS_INLINE_NDEBUG +static inline _dispatch_thread_semaphore_t +_dispatch_barrier_sync_f_pop(dispatch_queue_t dq, dispatch_object_t dou, + bool lock) +{ + dispatch_continuation_t dc = dou._dc; + + if (DISPATCH_OBJ_IS_VTABLE(dc) || ((long)dc->do_vtable & + (DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT)) != + (DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT)) { + return 0; + } + _dispatch_trace_continuation_pop(dq, dc); + _dispatch_workitem_inc(); + + struct dispatch_barrier_sync_slow_s *dbssp = (void *)dc; + struct dispatch_barrier_sync_slow2_s *dbss2 = dbssp->dc_ctxt; + if (lock) { + (void)dispatch_atomic_add2o(dbss2->dbss2_dq, do_suspend_cnt, + DISPATCH_OBJECT_SUSPEND_INTERVAL); + // rdar://problem/9032024 running lock must be held until sync_f_slow + // returns + (void)dispatch_atomic_add2o(dbss2->dbss2_dq, dq_running, 2); + } + return dbss2->dbss2_sema ? dbss2->dbss2_sema : MACH_PORT_DEAD; +} + static void _dispatch_barrier_sync_f_slow_invoke(void *ctxt) { @@ -1466,12 +1513,9 @@ _dispatch_barrier_sync_f_slow(dispatch_queue_t dq, void *ctxt, _dispatch_function_invoke(dq, ctxt, func); } dispatch_atomic_release_barrier(); - if (fastpath(dq->do_suspend_cnt < 2 * DISPATCH_OBJECT_SUSPEND_INTERVAL)) { + if (fastpath(dq->do_suspend_cnt < 2 * DISPATCH_OBJECT_SUSPEND_INTERVAL) && + dq->dq_running == 2) { // rdar://problem/8290662 "lock transfer" - // ensure drain of current barrier sync has finished - while (slowpath(dq->dq_running > 2)) { - _dispatch_hardware_pause(); - } _dispatch_thread_semaphore_t sema; sema = _dispatch_queue_drain_one_barrier_sync(dq); if (sema) { @@ -1604,7 +1648,7 @@ _dispatch_sync_f_slow(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { _dispatch_thread_semaphore_t sema = _dispatch_get_thread_semaphore(); struct dispatch_sync_slow_s { - DISPATCH_CONTINUATION_HEADER(dispatch_sync_slow_s); + DISPATCH_CONTINUATION_HEADER(sync_slow); } dss = { .do_vtable = (void*)DISPATCH_OBJ_SYNC_SLOW_BIT, .dc_ctxt = (void*)sema, @@ -1807,8 +1851,8 @@ dispatch_after(dispatch_time_t when, dispatch_queue_t queue, #pragma mark dispatch_wakeup DISPATCH_NOINLINE -void -_dispatch_queue_push_list_slow(dispatch_queue_t dq, +static void +_dispatch_queue_push_list_slow2(dispatch_queue_t dq, struct dispatch_object_s *obj) { // The queue must be retained before dq_items_head is written in order @@ -1823,6 +1867,30 @@ _dispatch_queue_push_list_slow(dispatch_queue_t dq, _dispatch_release(dq); } +DISPATCH_NOINLINE +void +_dispatch_queue_push_list_slow(dispatch_queue_t dq, + struct dispatch_object_s *obj, unsigned int n) +{ + if (dx_type(dq) == DISPATCH_QUEUE_GLOBAL_TYPE) { + dq->dq_items_head = obj; + return _dispatch_queue_wakeup_global2(dq, n); + } + _dispatch_queue_push_list_slow2(dq, obj); +} + +DISPATCH_NOINLINE +void +_dispatch_queue_push_slow(dispatch_queue_t dq, + struct dispatch_object_s *obj) +{ + if (dx_type(dq) == DISPATCH_QUEUE_GLOBAL_TYPE) { + dq->dq_items_head = obj; + return _dispatch_queue_wakeup_global(dq); + } + _dispatch_queue_push_list_slow2(dq, obj); +} + // 6618342 Contact the team that owns the Instrument DTrace probe before // renaming this symbol dispatch_queue_t @@ -1844,11 +1912,12 @@ _dispatch_wakeup(dispatch_object_t dou) DISPATCH_OBJECT_SUSPEND_LOCK)) { #if DISPATCH_COCOA_COMPAT if (dou._dq == &_dispatch_main_q) { - _dispatch_queue_wakeup_main(); + return _dispatch_queue_wakeup_main(); } #endif return NULL; } + dispatch_atomic_acquire_barrier(); _dispatch_retain(dou._do); tq = dou._do->do_targetq; _dispatch_queue_push(tq, dou._do); @@ -1858,14 +1927,16 @@ _dispatch_wakeup(dispatch_object_t dou) #if DISPATCH_COCOA_COMPAT DISPATCH_NOINLINE -void +dispatch_queue_t _dispatch_queue_wakeup_main(void) { kern_return_t kr; dispatch_once_f(&_dispatch_main_q_port_pred, NULL, _dispatch_main_q_port_init); - + if (!main_q_port) { + return NULL; + } kr = _dispatch_send_wakeup_main_thread(main_q_port, 0); switch (kr) { @@ -1877,51 +1948,50 @@ _dispatch_queue_wakeup_main(void) (void)dispatch_assume_zero(kr); break; } - - _dispatch_safe_fork = false; + return NULL; } #endif -static bool -_dispatch_queue_wakeup_global(dispatch_queue_t dq) +DISPATCH_NOINLINE +static void +_dispatch_queue_wakeup_global_slow(dispatch_queue_t dq, unsigned int n) { static dispatch_once_t pred; struct dispatch_root_queue_context_s *qc = dq->do_ctxt; int r; - if (!dq->dq_items_tail) { - return false; - } - - _dispatch_safe_fork = false; - - dispatch_debug_queue(dq, __PRETTY_FUNCTION__); - + dispatch_debug_queue(dq, __func__); dispatch_once_f(&pred, NULL, _dispatch_root_queues_init); #if HAVE_PTHREAD_WORKQUEUES #if DISPATCH_ENABLE_THREAD_POOL - if (qc->dgq_kworkqueue) + if (qc->dgq_kworkqueue != (void*)(~0ul)) #endif { - if (dispatch_atomic_cmpxchg2o(qc, dgq_pending, 0, 1)) { + _dispatch_debug("requesting new worker thread"); +#if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK + if (qc->dgq_kworkqueue) { pthread_workitem_handle_t wh; - unsigned int gen_cnt; - _dispatch_debug("requesting new worker thread"); - - r = pthread_workqueue_additem_np(qc->dgq_kworkqueue, - _dispatch_worker_thread2, dq, &wh, &gen_cnt); - (void)dispatch_assume_zero(r); - } else { - _dispatch_debug("work thread request still pending on global " - "queue: %p", dq); + unsigned int gen_cnt, i = n; + do { + r = pthread_workqueue_additem_np(qc->dgq_kworkqueue, + _dispatch_worker_thread3, dq, &wh, &gen_cnt); + (void)dispatch_assume_zero(r); + } while (--i); + return; } - goto out; +#endif // DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK +#if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP + r = pthread_workqueue_addthreads_np(qc->dgq_wq_priority, + qc->dgq_wq_options, n); + (void)dispatch_assume_zero(r); +#endif + return; } #endif // HAVE_PTHREAD_WORKQUEUES #if DISPATCH_ENABLE_THREAD_POOL if (dispatch_semaphore_signal(qc->dgq_thread_mediator)) { - goto out; + return; } pthread_t pthr; @@ -1930,7 +2000,7 @@ _dispatch_queue_wakeup_global(dispatch_queue_t dq) t_count = qc->dgq_thread_pool_size; if (!t_count) { _dispatch_debug("The thread pool is full: %p", dq); - goto out; + return; } } while (!dispatch_atomic_cmpxchg2o(qc, dgq_thread_pool_size, t_count, t_count - 1)); @@ -1944,8 +2014,40 @@ _dispatch_queue_wakeup_global(dispatch_queue_t dq) r = pthread_detach(pthr); (void)dispatch_assume_zero(r); #endif // DISPATCH_ENABLE_THREAD_POOL +} -out: +static inline void +_dispatch_queue_wakeup_global2(dispatch_queue_t dq, unsigned int n) +{ + struct dispatch_root_queue_context_s *qc = dq->do_ctxt; + + if (!dq->dq_items_tail) { + return; + } +#if HAVE_PTHREAD_WORKQUEUES + if ( +#if DISPATCH_ENABLE_THREAD_POOL + (qc->dgq_kworkqueue != (void*)(~0ul)) && +#endif + !dispatch_atomic_cmpxchg2o(qc, dgq_pending, 0, n)) { + _dispatch_debug("work thread request still pending on global queue: " + "%p", dq); + return; + } +#endif // HAVE_PTHREAD_WORKQUEUES + return _dispatch_queue_wakeup_global_slow(dq, n); +} + +static inline void +_dispatch_queue_wakeup_global(dispatch_queue_t dq) +{ + return _dispatch_queue_wakeup_global2(dq, 1); +} + +bool +_dispatch_queue_probe_root(dispatch_queue_t dq) +{ + _dispatch_queue_wakeup_global2(dq, 1); return false; } @@ -1962,7 +2064,7 @@ _dispatch_queue_invoke(dispatch_queue_t dq) fastpath(dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1))) { dispatch_atomic_acquire_barrier(); dispatch_queue_t otq = dq->do_targetq, tq = NULL; - _dispatch_queue_drain(dq); + _dispatch_thread_semaphore_t sema = _dispatch_queue_drain(dq); if (dq->do_vtable->do_invoke) { // Assume that object invoke checks it is executing on correct queue tq = dx_invoke(dq); @@ -1974,12 +2076,15 @@ _dispatch_queue_invoke(dispatch_queue_t dq) // When the suspend-count lock is dropped, then the check will happen. dispatch_atomic_release_barrier(); (void)dispatch_atomic_dec2o(dq, dq_running); - if (tq) { + if (sema) { + _dispatch_thread_semaphore_signal(sema); + } else if (tq) { return _dispatch_queue_push(tq, dq); } } dq->do_next = DISPATCH_OBJECT_LISTLESS; + dispatch_atomic_release_barrier(); if (!dispatch_atomic_sub2o(dq, do_suspend_cnt, DISPATCH_OBJECT_SUSPEND_LOCK)) { if (dq->dq_running == 0) { @@ -1989,12 +2094,13 @@ _dispatch_queue_invoke(dispatch_queue_t dq) _dispatch_release(dq); // added when the queue is put on the list } -static void +static _dispatch_thread_semaphore_t _dispatch_queue_drain(dispatch_queue_t dq) { dispatch_queue_t orig_tq, old_dq; old_dq = _dispatch_thread_getspecific(dispatch_queue_key); struct dispatch_object_s *dc = NULL, *next_dc = NULL; + _dispatch_thread_semaphore_t sema = 0; // Continue draining sources after target queue change rdar://8928171 bool check_tq = (dx_type(dq) != DISPATCH_SOURCE_KEVENT_TYPE); @@ -2002,7 +2108,7 @@ _dispatch_queue_drain(dispatch_queue_t dq) orig_tq = dq->do_targetq; _dispatch_thread_setspecific(dispatch_queue_key, dq); - //dispatch_debug_queue(dq, __PRETTY_FUNCTION__); + //dispatch_debug_queue(dq, __func__); while (dq->dq_items_tail) { while (!(dc = fastpath(dq->dq_items_head))) { @@ -2027,19 +2133,23 @@ _dispatch_queue_drain(dispatch_queue_t dq) if (slowpath(orig_tq != dq->do_targetq) && check_tq) { goto out; } - if (fastpath(dq->dq_width == 1)) { - _dispatch_continuation_pop(dc); - _dispatch_workitem_inc(); - } else if (!DISPATCH_OBJ_IS_VTABLE(dc) && - (long)dc->do_vtable & DISPATCH_OBJ_BARRIER_BIT) { - if (dq->dq_running > 1) { - goto out; + if (!fastpath(dq->dq_width == 1)) { + if (!DISPATCH_OBJ_IS_VTABLE(dc) && + (long)dc->do_vtable & DISPATCH_OBJ_BARRIER_BIT) { + if (dq->dq_running > 1) { + goto out; + } + } else { + _dispatch_continuation_redirect(dq, dc); + continue; } - _dispatch_continuation_pop(dc); - _dispatch_workitem_inc(); - } else { - _dispatch_continuation_redirect(dq, dc); } + if ((sema = _dispatch_barrier_sync_f_pop(dq, dc, true))) { + dc = next_dc; + goto out; + } + _dispatch_continuation_pop(dc); + _dispatch_workitem_inc(); } while ((dc = next_dc)); } @@ -2060,6 +2170,7 @@ _dispatch_queue_drain(dispatch_queue_t dq) } _dispatch_thread_setspecific(dispatch_queue_key, old_dq); + return sema; } static void @@ -2068,7 +2179,11 @@ _dispatch_queue_serial_drain_till_empty(dispatch_queue_t dq) #if DISPATCH_PERF_MON uint64_t start = _dispatch_absolute_time(); #endif - _dispatch_queue_drain(dq); + _dispatch_thread_semaphore_t sema = _dispatch_queue_drain(dq); + if (sema) { + dispatch_atomic_barrier(); + _dispatch_thread_semaphore_signal(sema); + } #if DISPATCH_PERF_MON _dispatch_queue_merge_stats(start); #endif @@ -2084,7 +2199,7 @@ _dispatch_main_queue_drain(void) return; } struct dispatch_main_queue_drain_marker_s { - DISPATCH_CONTINUATION_HEADER(dispatch_main_queue_drain_marker_s); + DISPATCH_CONTINUATION_HEADER(main_queue_drain_marker); } marker = { .do_vtable = NULL, }; @@ -2140,12 +2255,11 @@ _dispatch_queue_drain_one_barrier_sync(dispatch_queue_t dq) { // rdar://problem/8290662 "lock transfer" struct dispatch_object_s *dc, *next_dc; + _dispatch_thread_semaphore_t sema; // queue is locked, or suspended and not being drained dc = dq->dq_items_head; - if (slowpath(!dc) || DISPATCH_OBJ_IS_VTABLE(dc) || ((long)dc->do_vtable & - (DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT)) != - (DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT)) { + if (slowpath(!dc) || !(sema = _dispatch_barrier_sync_f_pop(dq, dc, false))){ return 0; } // dequeue dc, it is a barrier sync @@ -2158,19 +2272,19 @@ _dispatch_queue_drain_one_barrier_sync(dispatch_queue_t dq) } dq->dq_items_head = next_dc; } - _dispatch_trace_continuation_pop(dq, dc); - _dispatch_workitem_inc(); - - struct dispatch_barrier_sync_slow_s *dbssp = (void *)dc; - struct dispatch_barrier_sync_slow2_s *dbss2p = dbssp->dc_ctxt; - return dbss2p->dbss2_sema; + return sema; } +#ifndef DISPATCH_HEAD_CONTENTION_SPINS +#define DISPATCH_HEAD_CONTENTION_SPINS 10000 +#endif + static struct dispatch_object_s * _dispatch_queue_concurrent_drain_one(dispatch_queue_t dq) { struct dispatch_object_s *head, *next, *const mediator = (void *)~0ul; +start: // The mediator value acts both as a "lock" and a signal head = dispatch_atomic_xchg2o(dq, dq_items_head, mediator); @@ -2185,7 +2299,13 @@ _dispatch_queue_concurrent_drain_one(dispatch_queue_t dq) if (slowpath(head == mediator)) { // This thread lost the race for ownership of the queue. - // + // Spin for a short while in case many threads have started draining at + // once as part of a dispatch_apply + unsigned int i = DISPATCH_HEAD_CONTENTION_SPINS; + do { + _dispatch_hardware_pause(); + if (dq->dq_items_head != mediator) goto start; + } while (--i); // The ratio of work to libdispatch overhead must be bad. This // scenario implies that there are too many threads in the pool. // Create a new pending thread and then exit this thread. @@ -2225,22 +2345,18 @@ _dispatch_queue_concurrent_drain_one(dispatch_queue_t dq) #pragma mark - #pragma mark dispatch_worker_thread -// 6618342 Contact the team that owns the Instrument DTrace probe before -// renaming this symbol static void -_dispatch_worker_thread2(void *context) +_dispatch_worker_thread4(dispatch_queue_t dq) { struct dispatch_object_s *item; - dispatch_queue_t dq = context; - struct dispatch_root_queue_context_s *qc = dq->do_ctxt; +#if DISPATCH_DEBUG if (_dispatch_thread_getspecific(dispatch_queue_key)) { DISPATCH_CRASH("Premature thread recycling"); } - +#endif _dispatch_thread_setspecific(dispatch_queue_key, dq); - qc->dgq_pending = 0; #if DISPATCH_COCOA_COMPAT (void)dispatch_atomic_inc(&_dispatch_worker_threads); @@ -2248,8 +2364,8 @@ _dispatch_worker_thread2(void *context) if (dispatch_begin_thread_4GC) { dispatch_begin_thread_4GC(); } - void *pool = _dispatch_begin_NSAutoReleasePool(); -#endif + void *pool = _dispatch_autorelease_pool_push(); +#endif // DISPATCH_COCOA_COMPAT #if DISPATCH_PERF_MON uint64_t start = _dispatch_absolute_time(); @@ -2262,13 +2378,15 @@ _dispatch_worker_thread2(void *context) #endif #if DISPATCH_COCOA_COMPAT - _dispatch_end_NSAutoReleasePool(pool); - dispatch_end_thread_4GC(); + _dispatch_autorelease_pool_pop(pool); + if (dispatch_end_thread_4GC) { + dispatch_end_thread_4GC(); + } if (!dispatch_atomic_dec(&_dispatch_worker_threads) && dispatch_no_worker_threads_4GC) { dispatch_no_worker_threads_4GC(); } -#endif +#endif // DISPATCH_COCOA_COMPAT _dispatch_thread_setspecific(dispatch_queue_key, NULL); @@ -2276,6 +2394,35 @@ _dispatch_worker_thread2(void *context) } +#if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK +static void +_dispatch_worker_thread3(void *context) +{ + dispatch_queue_t dq = context; + struct dispatch_root_queue_context_s *qc = dq->do_ctxt; + + (void)dispatch_atomic_dec2o(qc, dgq_pending); + _dispatch_worker_thread4(dq); +} +#endif + +#if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP +// 6618342 Contact the team that owns the Instrument DTrace probe before +// renaming this symbol +static void +_dispatch_worker_thread2(int priority, int options, + void *context DISPATCH_UNUSED) +{ + dispatch_assert(priority >= 0 && priority < WORKQ_NUM_PRIOQUEUE); + dispatch_assert(!(options & ~WORKQ_ADDTHREADS_OPTION_OVERCOMMIT)); + dispatch_queue_t dq = _dispatch_wq2root_queues[priority][options]; + struct dispatch_root_queue_context_s *qc = dq->do_ctxt; + + (void)dispatch_atomic_dec2o(qc, dgq_pending); + _dispatch_worker_thread4(dq); +} +#endif + #if DISPATCH_ENABLE_THREAD_POOL // 6618342 Contact the team that owns the Instrument DTrace probe before // renaming this symbol @@ -2294,7 +2441,7 @@ _dispatch_worker_thread(void *context) (void)dispatch_assume_zero(r); do { - _dispatch_worker_thread2(context); + _dispatch_worker_thread4(dq); // we use 65 seconds in case there are any timers that run once a minute } while (dispatch_semaphore_wait(qc->dgq_thread_mediator, dispatch_time(0, 65ull * NSEC_PER_SEC)) == 0); @@ -2348,6 +2495,7 @@ _dispatch_main_q_port_init(void *ctxt DISPATCH_UNUSED) { kern_return_t kr; + _dispatch_safe_fork = false; kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &main_q_port); DISPATCH_VERIFY_MIG(kr); @@ -2358,7 +2506,6 @@ _dispatch_main_q_port_init(void *ctxt DISPATCH_UNUSED) (void)dispatch_assume_zero(kr); _dispatch_program_is_probably_callback_driven = true; - _dispatch_safe_fork = false; } mach_port_t @@ -2438,7 +2585,8 @@ _dispatch_queue_cleanup2(void) { (void)dispatch_atomic_dec(&_dispatch_main_q.dq_running); - if (dispatch_atomic_sub(&_dispatch_main_q.do_suspend_cnt, + dispatch_atomic_release_barrier(); + if (dispatch_atomic_sub2o(&_dispatch_main_q, do_suspend_cnt, DISPATCH_OBJECT_SUSPEND_LOCK) == 0) { _dispatch_wakeup(&_dispatch_main_q); } @@ -2447,7 +2595,7 @@ _dispatch_queue_cleanup2(void) // similar non-POSIX API was called // this has to run before the DISPATCH_COCOA_COMPAT below if (_dispatch_program_is_probably_callback_driven) { - dispatch_async_f(_dispatch_get_root_queue(0, false), NULL, + dispatch_async_f(_dispatch_get_root_queue(0, true), NULL, _dispatch_sig_thread); sleep(1); // workaround 6778970 } @@ -2503,10 +2651,8 @@ _dispatch_get_kq_init(void *context DISPATCH_UNUSED) .flags = EV_ADD|EV_CLEAR, }; - _dispatch_kq = kqueue(); - _dispatch_safe_fork = false; - + _dispatch_kq = kqueue(); if (_dispatch_kq == -1) { DISPATCH_CLIENT_CRASH("kqueue() create failed: " "probably out of file descriptors"); @@ -2533,6 +2679,7 @@ _dispatch_get_kq(void) long _dispatch_update_kq(const struct kevent *kev) { + int rval; struct kevent kev_copy = *kev; // This ensures we don't get a pending kevent back while registering // a new kevent @@ -2564,14 +2711,25 @@ _dispatch_update_kq(const struct kevent *kev) } } - int rval = kevent(_dispatch_get_kq(), &kev_copy, 1, &kev_copy, 1, NULL); +retry: + rval = kevent(_dispatch_get_kq(), &kev_copy, 1, &kev_copy, 1, NULL); if (rval == -1) { // If we fail to register with kevents, for other reasons aside from // changelist elements. - (void)dispatch_assume_zero(errno); + int err = errno; + switch (err) { + case EINTR: + goto retry; + case EBADF: + _dispatch_bug_client("Do not close random Unix descriptors"); + break; + default: + (void)dispatch_assume_zero(err); + break; + } //kev_copy.flags |= EV_ERROR; - //kev_copy.data = error; - return errno; + //kev_copy.data = err; + return err; } // The following select workaround only applies to adding kevents @@ -2628,7 +2786,7 @@ _dispatch_update_kq(const struct kevent *kev) return kev_copy.data; } -static bool +bool _dispatch_mgr_wakeup(dispatch_queue_t dq) { static const struct kevent kev = { @@ -2812,7 +2970,7 @@ _dispatch_mgr_invoke(void) } DISPATCH_NORETURN -static dispatch_queue_t +dispatch_queue_t _dispatch_mgr_thread(dispatch_queue_t dq DISPATCH_UNUSED) { // never returns, so burn bridges behind us & clear stack 2k ahead diff --git a/src/queue_internal.h b/src/queue_internal.h index 479ae6006..b223ccec2 100644 --- a/src/queue_internal.h +++ b/src/queue_internal.h @@ -34,12 +34,17 @@ // If dc_vtable is less than 127, then the object is a continuation. // Otherwise, the object has a private layout and memory management rules. The -// first two words must align with normal objects. +// layout until after 'do_next' must align with normal objects. #define DISPATCH_CONTINUATION_HEADER(x) \ - const void *do_vtable; \ - struct x *volatile do_next; \ + _OS_OBJECT_HEADER( \ + const void *do_vtable, \ + do_ref_cnt, \ + do_xref_cnt); \ + struct dispatch_##x##_s *volatile do_next; \ dispatch_function_t dc_func; \ - void *dc_ctxt + void *dc_ctxt; \ + void *dc_data; \ + void *dc_other; #define DISPATCH_OBJ_ASYNC_BIT 0x1 #define DISPATCH_OBJ_BARRIER_BIT 0x2 @@ -49,31 +54,35 @@ #define DISPATCH_OBJ_IS_VTABLE(x) ((unsigned long)(x)->do_vtable > 127ul) struct dispatch_continuation_s { - DISPATCH_CONTINUATION_HEADER(dispatch_continuation_s); - dispatch_group_t dc_group; - void *dc_data[3]; + DISPATCH_CONTINUATION_HEADER(continuation); }; typedef struct dispatch_continuation_s *dispatch_continuation_t; -struct dispatch_queue_attr_vtable_s { - DISPATCH_VTABLE_HEADER(dispatch_queue_attr_s); +struct dispatch_apply_s { + size_t da_index; + size_t da_iterations; + void (*da_func)(void *, size_t); + void *da_ctxt; + _dispatch_thread_semaphore_t da_sema; + dispatch_queue_t da_queue; + size_t da_done; + uint32_t da_thr_cnt; }; -struct dispatch_queue_attr_s { - DISPATCH_STRUCT_HEADER(dispatch_queue_attr_s, dispatch_queue_attr_vtable_s); -}; +typedef struct dispatch_apply_s *dispatch_apply_t; -struct dispatch_queue_vtable_s { - DISPATCH_VTABLE_HEADER(dispatch_queue_s); +DISPATCH_CLASS_DECL(queue_attr); +struct dispatch_queue_attr_s { + DISPATCH_STRUCT_HEADER(queue_attr); }; #define DISPATCH_QUEUE_MIN_LABEL_SIZE 64 #ifdef __LP64__ -#define DISPATCH_QUEUE_CACHELINE_PAD 32 +#define DISPATCH_QUEUE_CACHELINE_PAD (4*sizeof(void*)) #else -#define DISPATCH_QUEUE_CACHELINE_PAD 8 +#define DISPATCH_QUEUE_CACHELINE_PAD (2*sizeof(void*)) #endif #define DISPATCH_QUEUE_HEADER \ @@ -84,41 +93,35 @@ struct dispatch_queue_vtable_s { unsigned long dq_serialnum; \ dispatch_queue_t dq_specific_q; +DISPATCH_CLASS_DECL(queue); struct dispatch_queue_s { - DISPATCH_STRUCT_HEADER(dispatch_queue_s, dispatch_queue_vtable_s); + DISPATCH_STRUCT_HEADER(queue); DISPATCH_QUEUE_HEADER; char dq_label[DISPATCH_QUEUE_MIN_LABEL_SIZE]; // must be last char _dq_pad[DISPATCH_QUEUE_CACHELINE_PAD]; // for static queues only }; +DISPATCH_INTERNAL_SUBCLASS_DECL(queue_root, queue); +DISPATCH_INTERNAL_SUBCLASS_DECL(queue_mgr, queue); + +DISPATCH_DECL_INTERNAL_SUBCLASS(dispatch_queue_specific_queue, dispatch_queue); +DISPATCH_CLASS_DECL(queue_specific_queue); + extern struct dispatch_queue_s _dispatch_mgr_q; void _dispatch_queue_dispose(dispatch_queue_t dq); void _dispatch_queue_invoke(dispatch_queue_t dq); void _dispatch_queue_push_list_slow(dispatch_queue_t dq, + struct dispatch_object_s *obj, unsigned int n); +void _dispatch_queue_push_slow(dispatch_queue_t dq, struct dispatch_object_s *obj); - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head, - dispatch_object_t _tail) -{ - struct dispatch_object_s *prev, *head = _head._do, *tail = _tail._do; - - tail->do_next = NULL; - dispatch_atomic_store_barrier(); - prev = fastpath(dispatch_atomic_xchg2o(dq, dq_items_tail, tail)); - if (prev) { - // if we crash here with a value less than 0x1000, then we are at a - // known bug in client code for example, see _dispatch_queue_dispose - // or _dispatch_atfork_child - prev->do_next = head; - } else { - _dispatch_queue_push_list_slow(dq, head); - } -} - -#define _dispatch_queue_push(x, y) _dispatch_queue_push_list((x), (y), (y)) +dispatch_queue_t _dispatch_wakeup(dispatch_object_t dou); +void _dispatch_queue_specific_queue_dispose(dispatch_queue_specific_queue_t + dqsq); +bool _dispatch_queue_probe_root(dispatch_queue_t dq); +bool _dispatch_mgr_wakeup(dispatch_queue_t dq); +DISPATCH_NORETURN +dispatch_queue_t _dispatch_mgr_thread(dispatch_queue_t dq); #if DISPATCH_DEBUG void dispatch_debug_queue(dispatch_queue_t dq, const char* str); @@ -131,13 +134,6 @@ size_t dispatch_queue_debug(dispatch_queue_t dq, char* buf, size_t bufsiz); size_t _dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf, size_t bufsiz); -DISPATCH_ALWAYS_INLINE -static inline dispatch_queue_t -_dispatch_queue_get_current(void) -{ - return _dispatch_thread_getspecific(dispatch_queue_key); -} - #define DISPATCH_QUEUE_PRIORITY_COUNT 4 #define DISPATCH_ROOT_QUEUE_COUNT (DISPATCH_QUEUE_PRIORITY_COUNT * 2) @@ -153,16 +149,67 @@ enum { DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY, }; -extern const struct dispatch_queue_attr_vtable_s dispatch_queue_attr_vtable; -extern const struct dispatch_queue_vtable_s _dispatch_queue_vtable; extern unsigned long _dispatch_queue_serial_numbers; extern struct dispatch_queue_s _dispatch_root_queues[]; +#if !__OBJC2__ + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_queue_push_list2(dispatch_queue_t dq, struct dispatch_object_s *head, + struct dispatch_object_s *tail) +{ + struct dispatch_object_s *prev; + tail->do_next = NULL; + dispatch_atomic_store_barrier(); + prev = dispatch_atomic_xchg2o(dq, dq_items_tail, tail); + if (fastpath(prev)) { + // if we crash here with a value less than 0x1000, then we are at a + // known bug in client code for example, see _dispatch_queue_dispose + // or _dispatch_atfork_child + prev->do_next = head; + } + return prev; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head, + dispatch_object_t _tail, unsigned int n) +{ + struct dispatch_object_s *head = _head._do, *tail = _tail._do; + if (!fastpath(_dispatch_queue_push_list2(dq, head, tail))) { + _dispatch_queue_push_list_slow(dq, head, n); + } +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_push(dispatch_queue_t dq, dispatch_object_t _tail) +{ + struct dispatch_object_s *tail = _tail._do; + if (!fastpath(_dispatch_queue_push_list2(dq, tail, tail))) { + _dispatch_queue_push_slow(dq, tail); + } +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_t +_dispatch_queue_get_current(void) +{ + return _dispatch_thread_getspecific(dispatch_queue_key); +} + DISPATCH_ALWAYS_INLINE DISPATCH_CONST static inline dispatch_queue_t _dispatch_get_root_queue(long priority, bool overcommit) { if (overcommit) switch (priority) { + case DISPATCH_QUEUE_PRIORITY_BACKGROUND: +#if !DISPATCH_NO_BG_PRIORITY + return &_dispatch_root_queues[ + DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY]; +#endif case DISPATCH_QUEUE_PRIORITY_LOW: return &_dispatch_root_queues[ DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY]; @@ -172,20 +219,19 @@ _dispatch_get_root_queue(long priority, bool overcommit) case DISPATCH_QUEUE_PRIORITY_HIGH: return &_dispatch_root_queues[ DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY]; - case DISPATCH_QUEUE_PRIORITY_BACKGROUND: - return &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY]; } switch (priority) { + case DISPATCH_QUEUE_PRIORITY_BACKGROUND: +#if !DISPATCH_NO_BG_PRIORITY + return &_dispatch_root_queues[ + DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY]; +#endif case DISPATCH_QUEUE_PRIORITY_LOW: return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY]; case DISPATCH_QUEUE_PRIORITY_DEFAULT: return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY]; case DISPATCH_QUEUE_PRIORITY_HIGH: return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY]; - case DISPATCH_QUEUE_PRIORITY_BACKGROUND: - return &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY]; default: return NULL; } @@ -196,10 +242,7 @@ _dispatch_get_root_queue(long priority, bool overcommit) static inline void _dispatch_queue_init(dispatch_queue_t dq) { - dq->do_vtable = &_dispatch_queue_vtable; dq->do_next = DISPATCH_OBJECT_LISTLESS; - dq->do_ref_cnt = 1; - dq->do_xref_cnt = 1; // Default target queue is overcommit! dq->do_targetq = _dispatch_get_root_queue(0, true); dq->dq_running = 0; @@ -207,4 +250,45 @@ _dispatch_queue_init(dispatch_queue_t dq) dq->dq_serialnum = dispatch_atomic_inc(&_dispatch_queue_serial_numbers) - 1; } +dispatch_continuation_t +_dispatch_continuation_alloc_from_heap(void); + +DISPATCH_ALWAYS_INLINE +static inline dispatch_continuation_t +_dispatch_continuation_alloc_cacheonly(void) +{ + dispatch_continuation_t dc; + dc = fastpath(_dispatch_thread_getspecific(dispatch_cache_key)); + if (dc) { + _dispatch_thread_setspecific(dispatch_cache_key, dc->do_next); + } + return dc; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_continuation_t +_dispatch_continuation_alloc(void) +{ + dispatch_continuation_t dc; + + dc = fastpath(_dispatch_continuation_alloc_cacheonly()); + if(!dc) { + return _dispatch_continuation_alloc_from_heap(); + } + return dc; +} + + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_continuation_free(dispatch_continuation_t dc) +{ + dispatch_continuation_t prev_dc; + prev_dc = _dispatch_thread_getspecific(dispatch_cache_key); + dc->do_next = prev_dc; + _dispatch_thread_setspecific(dispatch_cache_key, dc); +} + +#endif // !__OBJC2__ + #endif diff --git a/src/semaphore.c b/src/semaphore.c index 29585bdf5..d3fd43117 100644 --- a/src/semaphore.c +++ b/src/semaphore.c @@ -38,24 +38,26 @@ DISPATCH_WEAK // rdar://problem/8503746 long _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema); -static void _dispatch_semaphore_dispose(dispatch_semaphore_t dsema); -static size_t _dispatch_semaphore_debug(dispatch_semaphore_t dsema, char *buf, - size_t bufsiz); static long _dispatch_group_wake(dispatch_semaphore_t dsema); #pragma mark - #pragma mark dispatch_semaphore_t -struct dispatch_semaphore_vtable_s { - DISPATCH_VTABLE_HEADER(dispatch_semaphore_s); -}; +static void +_dispatch_semaphore_init(long value, dispatch_object_t dou) +{ + dispatch_semaphore_t dsema = dou._dsema; -const struct dispatch_semaphore_vtable_s _dispatch_semaphore_vtable = { - .do_type = DISPATCH_SEMAPHORE_TYPE, - .do_kind = "semaphore", - .do_dispose = _dispatch_semaphore_dispose, - .do_debug = _dispatch_semaphore_debug, -}; + dsema->do_next = DISPATCH_OBJECT_LISTLESS; + dsema->do_targetq = dispatch_get_global_queue( + DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); + dsema->dsema_value = value; + dsema->dsema_orig = value; +#if USE_POSIX_SEM + int ret = sem_init(&dsema->dsema_sem, 0, 0); + DISPATCH_SEMAPHORE_VERIFY_RET(ret); +#endif +} dispatch_semaphore_t dispatch_semaphore_create(long value) @@ -69,23 +71,9 @@ dispatch_semaphore_create(long value) return NULL; } - dsema = calloc(1, sizeof(struct dispatch_semaphore_s)); - - if (fastpath(dsema)) { - dsema->do_vtable = &_dispatch_semaphore_vtable; - dsema->do_next = DISPATCH_OBJECT_LISTLESS; - dsema->do_ref_cnt = 1; - dsema->do_xref_cnt = 1; - dsema->do_targetq = dispatch_get_global_queue( - DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); - dsema->dsema_value = value; - dsema->dsema_orig = value; -#if USE_POSIX_SEM - int ret = sem_init(&dsema->dsema_sem, 0, 0); - DISPATCH_SEMAPHORE_VERIFY_RET(ret); -#endif - } - + dsema = _dispatch_alloc(DISPATCH_VTABLE(semaphore), + sizeof(struct dispatch_semaphore_s)); + _dispatch_semaphore_init(value, dsema); return dsema; } @@ -99,6 +87,7 @@ _dispatch_semaphore_create_port(semaphore_t *s4) if (*s4) { return; } + _dispatch_safe_fork = false; // lazily allocate the semaphore port @@ -117,14 +106,14 @@ _dispatch_semaphore_create_port(semaphore_t *s4) kr = semaphore_destroy(mach_task_self(), tmp); DISPATCH_SEMAPHORE_VERIFY_KR(kr); } - - _dispatch_safe_fork = false; } #endif -static void -_dispatch_semaphore_dispose(dispatch_semaphore_t dsema) +void +_dispatch_semaphore_dispose(dispatch_object_t dou) { + dispatch_semaphore_t dsema = dou._dsema; + if (dsema->dsema_value < dsema->dsema_orig) { DISPATCH_CLIENT_CRASH( "Semaphore/group object deallocated while in use"); @@ -144,13 +133,13 @@ _dispatch_semaphore_dispose(dispatch_semaphore_t dsema) int ret = sem_destroy(&dsema->dsema_sem); DISPATCH_SEMAPHORE_VERIFY_RET(ret); #endif - - _dispatch_dispose(dsema); } -static size_t -_dispatch_semaphore_debug(dispatch_semaphore_t dsema, char *buf, size_t bufsiz) +size_t +_dispatch_semaphore_debug(dispatch_object_t dou, char *buf, size_t bufsiz) { + dispatch_semaphore_t dsema = dou._dsema; + size_t offset = 0; offset += snprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", dx_kind(dsema), dsema); @@ -324,7 +313,10 @@ dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout) dispatch_group_t dispatch_group_create(void) { - return (dispatch_group_t)dispatch_semaphore_create(LONG_MAX); + dispatch_group_t dg = _dispatch_alloc(DISPATCH_VTABLE(group), + sizeof(struct dispatch_semaphore_s)); + _dispatch_semaphore_init(LONG_MAX, dg); + return dg; } void @@ -566,6 +558,7 @@ DISPATCH_NOINLINE static _dispatch_thread_semaphore_t _dispatch_thread_semaphore_create(void) { + _dispatch_safe_fork = false; #if USE_MACH_SEM semaphore_t s4; kern_return_t kr; diff --git a/src/semaphore_internal.h b/src/semaphore_internal.h index e5b319e91..e27f9342f 100644 --- a/src/semaphore_internal.h +++ b/src/semaphore_internal.h @@ -27,15 +27,18 @@ #ifndef __DISPATCH_SEMAPHORE_INTERNAL__ #define __DISPATCH_SEMAPHORE_INTERNAL__ +struct dispatch_queue_s; + struct dispatch_sema_notify_s { struct dispatch_sema_notify_s *volatile dsn_next; - dispatch_queue_t dsn_queue; + struct dispatch_queue_s *dsn_queue; void *dsn_ctxt; void (*dsn_func)(void *); }; +DISPATCH_CLASS_DECL(semaphore); struct dispatch_semaphore_s { - DISPATCH_STRUCT_HEADER(dispatch_semaphore_s, dispatch_semaphore_vtable_s); + DISPATCH_STRUCT_HEADER(semaphore); long dsema_value; long dsema_orig; size_t dsema_sent_ksignals; @@ -54,7 +57,11 @@ struct dispatch_semaphore_s { struct dispatch_sema_notify_s *dsema_notify_tail; }; -extern const struct dispatch_semaphore_vtable_s _dispatch_semaphore_vtable; +DISPATCH_CLASS_DECL(group); + +void _dispatch_semaphore_dispose(dispatch_object_t dou); +size_t _dispatch_semaphore_debug(dispatch_object_t dou, char *buf, + size_t bufsiz); typedef uintptr_t _dispatch_thread_semaphore_t; _dispatch_thread_semaphore_t _dispatch_get_thread_semaphore(void); diff --git a/src/shims/atomic.h b/src/shims/atomic.h index fbc11717f..a30c89fe6 100644 --- a/src/shims/atomic.h +++ b/src/shims/atomic.h @@ -48,8 +48,8 @@ #define dispatch_atomic_release_barrier() #define dispatch_atomic_store_barrier() -#define _dispatch_hardware_pause() asm("") -#define _dispatch_debugger() asm("trap") +#define _dispatch_hardware_pause() __asm__("") +#define _dispatch_debugger() __asm__("trap") #define dispatch_atomic_cmpxchg(p, e, n) \ __sync_bool_compare_and_swap((p), (e), (n)) @@ -124,9 +124,9 @@ #endif #endif #undef _dispatch_hardware_pause -#define _dispatch_hardware_pause() asm("pause") +#define _dispatch_hardware_pause() __asm__("pause") #undef _dispatch_debugger -#define _dispatch_debugger() asm("int3") +#define _dispatch_debugger() __asm__("int3") #elif defined(__ppc__) || defined(__ppc64__) diff --git a/src/shims/tsd.h b/src/shims/tsd.h index b8c6640b7..f300d64b3 100644 --- a/src/shims/tsd.h +++ b/src/shims/tsd.h @@ -45,7 +45,6 @@ static const unsigned long dispatch_cache_key = __PTK_LIBDISPATCH_KEY2; static const unsigned long dispatch_io_key = __PTK_LIBDISPATCH_KEY3; static const unsigned long dispatch_apply_key = __PTK_LIBDISPATCH_KEY4; static const unsigned long dispatch_bcounter_key = __PTK_LIBDISPATCH_KEY5; -//__PTK_LIBDISPATCH_KEY5 DISPATCH_TSD_INLINE static inline void @@ -54,12 +53,12 @@ _dispatch_thread_key_create(const unsigned long *k, void (*d)(void *)) dispatch_assert_zero(pthread_key_init_np((int)*k, d)); } #else -pthread_key_t dispatch_queue_key; -pthread_key_t dispatch_sema4_key; -pthread_key_t dispatch_cache_key; -pthread_key_t dispatch_io_key; -pthread_key_t dispatch_apply_key; -pthread_key_t dispatch_bcounter_key; +extern pthread_key_t dispatch_queue_key; +extern pthread_key_t dispatch_sema4_key; +extern pthread_key_t dispatch_cache_key; +extern pthread_key_t dispatch_io_key; +extern pthread_key_t dispatch_apply_key; +extern pthread_key_t dispatch_bcounter_key; DISPATCH_TSD_INLINE static inline void diff --git a/src/source.c b/src/source.c index cf612aacf..2b0a9a2a7 100644 --- a/src/source.c +++ b/src/source.c @@ -25,9 +25,6 @@ #endif #include -static void _dispatch_source_dispose(dispatch_source_t ds); -static dispatch_queue_t _dispatch_source_invoke(dispatch_source_t ds); -static bool _dispatch_source_probe(dispatch_source_t ds); static void _dispatch_source_merge_kevent(dispatch_source_t ds, const struct kevent *ke); static void _dispatch_kevent_register(dispatch_source_t ds); @@ -43,8 +40,6 @@ static kern_return_t _dispatch_kevent_machport_resume(dispatch_kevent_t dk, uint32_t new_flags, uint32_t del_flags); static void _dispatch_drain_mach_messages(struct kevent *ke); #endif -static size_t _dispatch_source_kevent_debug(dispatch_source_t ds, - char* buf, size_t bufsiz); #if DISPATCH_DEBUG static void _dispatch_kevent_debugger(void *context); #endif @@ -52,15 +47,6 @@ static void _dispatch_kevent_debugger(void *context); #pragma mark - #pragma mark dispatch_source_t -const struct dispatch_source_vtable_s _dispatch_source_kevent_vtable = { - .do_type = DISPATCH_SOURCE_KEVENT_TYPE, - .do_kind = "kevent-source", - .do_invoke = _dispatch_source_invoke, - .do_dispose = _dispatch_source_dispose, - .do_probe = _dispatch_source_probe, - .do_debug = _dispatch_source_kevent_debug, -}; - dispatch_source_t dispatch_source_create(dispatch_source_type_t type, uintptr_t handle, @@ -73,13 +59,13 @@ dispatch_source_create(dispatch_source_type_t type, // input validation if (type == NULL || (mask & ~type->mask)) { - goto out_bad; + return NULL; } switch (type->ke.filter) { case EVFILT_SIGNAL: if (handle >= NSIG) { - goto out_bad; + return NULL; } break; case EVFILT_FS: @@ -90,22 +76,14 @@ dispatch_source_create(dispatch_source_type_t type, case DISPATCH_EVFILT_CUSTOM_OR: case DISPATCH_EVFILT_TIMER: if (handle) { - goto out_bad; + return NULL; } break; default: break; } - ds = calloc(1ul, sizeof(struct dispatch_source_s)); - if (slowpath(!ds)) { - goto out_bad; - } dk = calloc(1ul, sizeof(struct dispatch_kevent_s)); - if (slowpath(!dk)) { - goto out_bad; - } - dk->dk_kevent = *proto_kev; dk->dk_kevent.ident = handle; dk->dk_kevent.flags |= EV_ADD|EV_ENABLE; @@ -113,12 +91,13 @@ dispatch_source_create(dispatch_source_type_t type, dk->dk_kevent.udata = dk; TAILQ_INIT(&dk->dk_sources); + ds = _dispatch_alloc(DISPATCH_VTABLE(source), + sizeof(struct dispatch_source_s)); // Initialize as a queue first, then override some settings below. _dispatch_queue_init((dispatch_queue_t)ds); strlcpy(ds->dq_label, "source", sizeof(ds->dq_label)); // Dispatch Object - ds->do_vtable = &_dispatch_source_kevent_vtable; ds->do_ref_cnt++; // the reference the manger queue holds ds->do_ref_cnt++; // since source is created suspended ds->do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_INTERVAL; @@ -154,7 +133,7 @@ dispatch_source_create(dispatch_source_type_t type, // First item on the queue sets the user-specified target queue dispatch_set_target_queue(ds, q); #if DISPATCH_DEBUG - dispatch_debug(ds, "%s", __FUNCTION__); + dispatch_debug(ds, "%s", __func__); #endif return ds; @@ -164,7 +143,7 @@ dispatch_source_create(dispatch_source_type_t type, return NULL; } -static void +void _dispatch_source_dispose(dispatch_source_t ds) { free(ds->ds_refs); @@ -172,21 +151,16 @@ _dispatch_source_dispose(dispatch_source_t ds) } void -_dispatch_source_xref_release(dispatch_source_t ds) +_dispatch_source_xref_dispose(dispatch_source_t ds) { - if (slowpath(DISPATCH_OBJECT_SUSPENDED(ds))) { - // Arguments for and against this assert are within 6705399 - DISPATCH_CLIENT_CRASH("Release of a suspended object"); - } _dispatch_wakeup(ds); - _dispatch_release(ds); } void dispatch_source_cancel(dispatch_source_t ds) { #if DISPATCH_DEBUG - dispatch_debug(ds, "%s", __FUNCTION__); + dispatch_debug(ds, "%s", __func__); #endif // Right after we set the cancel flag, someone else // could potentially invoke the source, do the cancelation, @@ -251,7 +225,7 @@ _dispatch_source_set_event_handler2(void *context) struct Block_layout *bl = context; dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current(); - dispatch_assert(ds->do_vtable == &_dispatch_source_kevent_vtable); + dispatch_assert(dx_type(ds) == DISPATCH_SOURCE_KEVENT_TYPE); dispatch_source_refs_t dr = ds->ds_refs; if (ds->ds_handler_is_block && dr->ds_handler_ctxt) { @@ -276,7 +250,7 @@ static void _dispatch_source_set_event_handler_f(void *context) { dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current(); - dispatch_assert(ds->do_vtable == &_dispatch_source_kevent_vtable); + dispatch_assert(dx_type(ds) == DISPATCH_SOURCE_KEVENT_TYPE); dispatch_source_refs_t dr = ds->ds_refs; #ifdef __BLOCKS__ @@ -304,7 +278,7 @@ static void _dispatch_source_set_cancel_handler2(void *context) { dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current(); - dispatch_assert(ds->do_vtable == &_dispatch_source_kevent_vtable); + dispatch_assert(dx_type(ds) == DISPATCH_SOURCE_KEVENT_TYPE); dispatch_source_refs_t dr = ds->ds_refs; if (ds->ds_cancel_is_block && dr->ds_cancel_handler) { @@ -328,7 +302,7 @@ static void _dispatch_source_set_cancel_handler_f(void *context) { dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current(); - dispatch_assert(ds->do_vtable == &_dispatch_source_kevent_vtable); + dispatch_assert(dx_type(ds) == DISPATCH_SOURCE_KEVENT_TYPE); dispatch_source_refs_t dr = ds->ds_refs; #ifdef __BLOCKS__ @@ -353,7 +327,7 @@ static void _dispatch_source_set_registration_handler2(void *context) { dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current(); - dispatch_assert(ds->do_vtable == &_dispatch_source_kevent_vtable); + dispatch_assert(dx_type(ds) == DISPATCH_SOURCE_KEVENT_TYPE); dispatch_source_refs_t dr = ds->ds_refs; if (ds->ds_registration_is_block && dr->ds_registration_handler) { @@ -377,7 +351,7 @@ static void _dispatch_source_set_registration_handler_f(void *context) { dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current(); - dispatch_assert(ds->do_vtable == &_dispatch_source_kevent_vtable); + dispatch_assert(dx_type(ds) == DISPATCH_SOURCE_KEVENT_TYPE); dispatch_source_refs_t dr = ds->ds_refs; #ifdef __BLOCKS__ @@ -405,7 +379,7 @@ _dispatch_source_registration_callout(dispatch_source_t ds) { dispatch_source_refs_t dr = ds->ds_refs; - if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == 0)) { + if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == -1)) { // no registration callout if source is canceled rdar://problem/8955246 #ifdef __BLOCKS__ if (ds->ds_registration_is_block) { @@ -473,7 +447,7 @@ _dispatch_source_latch_and_call(dispatch_source_t ds) { unsigned long prev; - if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == 0)) { + if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == -1)) { return; } dispatch_source_refs_t dr = ds->ds_refs; @@ -508,7 +482,7 @@ _dispatch_source_kevent_resume(dispatch_source_t ds, uint32_t new_flags) } } -static dispatch_queue_t +dispatch_queue_t _dispatch_source_invoke(dispatch_source_t ds) { // This function performs all source actions. Each action is responsible @@ -530,7 +504,7 @@ _dispatch_source_invoke(dispatch_source_t ds) if (dr->ds_registration_handler) { return ds->do_targetq; } - if (slowpath(ds->do_xref_cnt == 0)) { + if (slowpath(ds->do_xref_cnt == -1)) { return &_dispatch_mgr_q; // rdar://problem/9558246 } } else if (slowpath(DISPATCH_OBJECT_SUSPENDED(ds))) { @@ -544,10 +518,10 @@ _dispatch_source_invoke(dispatch_source_t ds) } // clears ds_registration_handler _dispatch_source_registration_callout(ds); - if (slowpath(ds->do_xref_cnt == 0)) { + if (slowpath(ds->do_xref_cnt == -1)) { return &_dispatch_mgr_q; // rdar://problem/9558246 } - } else if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == 0)) { + } else if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == -1)){ // The source has been cancelled and needs to be uninstalled from the // manager queue. After uninstallation, the cancellation handler needs // to be delivered to the target queue. @@ -556,8 +530,9 @@ _dispatch_source_invoke(dispatch_source_t ds) return &_dispatch_mgr_q; } _dispatch_kevent_unregister(ds); - return ds->do_targetq; - } else if (dr->ds_cancel_handler) { + } + if (dr->ds_cancel_handler || ds->ds_handler_is_block || + ds->ds_registration_is_block) { if (dq != ds->do_targetq) { return ds->do_targetq; } @@ -586,7 +561,7 @@ _dispatch_source_invoke(dispatch_source_t ds) return NULL; } -static bool +bool _dispatch_source_probe(dispatch_source_t ds) { // This function determines whether the source needs to be invoked. @@ -599,11 +574,15 @@ _dispatch_source_probe(dispatch_source_t ds) } else if (dr->ds_registration_handler) { // The registration handler needs to be delivered to the target queue. return true; - } else if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == 0)) { + } else if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == -1)){ // The source needs to be uninstalled from the manager queue, or the // cancellation handler needs to be delivered to the target queue. // Note: cancellation assumes installation. - if (ds->ds_dkev || dr->ds_cancel_handler) { + if (ds->ds_dkev || dr->ds_cancel_handler +#ifdef __BLOCKS__ + || ds->ds_handler_is_block || ds->ds_registration_is_block +#endif + ) { return true; } } else if (ds->ds_pending_data) { @@ -625,7 +604,7 @@ _dispatch_source_merge_kevent(dispatch_source_t ds, const struct kevent *ke) { struct kevent fake; - if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == 0)) { + if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == -1)) { return; } @@ -1841,7 +1820,7 @@ _dispatch_timer_debug_attr(dispatch_source_t ds, char* buf, size_t bufsiz) ds_timer(dr).flags); } -static size_t +size_t _dispatch_source_debug(dispatch_source_t ds, char* buf, size_t bufsiz) { size_t offset = 0; @@ -1852,13 +1831,6 @@ _dispatch_source_debug(dispatch_source_t ds, char* buf, size_t bufsiz) if (ds->ds_is_timer) { offset += _dispatch_timer_debug_attr(ds, &buf[offset], bufsiz - offset); } - return offset; -} - -static size_t -_dispatch_source_kevent_debug(dispatch_source_t ds, char* buf, size_t bufsiz) -{ - size_t offset = _dispatch_source_debug(ds, buf, bufsiz); offset += snprintf(&buf[offset], bufsiz - offset, "filter = %s }", ds->ds_dkev ? _evfiltstr(ds->ds_dkev->dk_kevent.filter) : "????"); return offset; @@ -1935,13 +1907,13 @@ _dispatch_kevent_debugger2(void *context) ds = _dispatch_source_from_refs(dr); fprintf(debug_stream, "\t\t\t
  • DS %p refcnt 0x%x suspend " "0x%x data 0x%lx mask 0x%lx flags 0x%x
  • \n", - ds, ds->do_ref_cnt, ds->do_suspend_cnt, + ds, ds->do_ref_cnt + 1, ds->do_suspend_cnt, ds->ds_pending_data, ds->ds_pending_data_mask, ds->ds_atomic_flags); if (ds->do_suspend_cnt == DISPATCH_OBJECT_SUSPEND_LOCK) { dispatch_queue_t dq = ds->do_targetq; fprintf(debug_stream, "\t\t
    DQ: %p refcnt 0x%x suspend " - "0x%x label: %s\n", dq, dq->do_ref_cnt, + "0x%x label: %s\n", dq, dq->do_ref_cnt + 1, dq->do_suspend_cnt, dq->dq_label); } } diff --git a/src/source_internal.h b/src/source_internal.h index a44eef7c1..c2c706f84 100644 --- a/src/source_internal.h +++ b/src/source_internal.h @@ -71,12 +71,6 @@ enum { #define DISPATCH_TIMER_INDEX_MACH 1 #define DISPATCH_TIMER_INDEX_DISARM 2 -struct dispatch_source_vtable_s { - DISPATCH_VTABLE_HEADER(dispatch_source_s); -}; - -extern const struct dispatch_source_vtable_s _dispatch_source_kevent_vtable; - struct dispatch_kevent_s { TAILQ_ENTRY(dispatch_kevent_s) dk_list; TAILQ_HEAD(, dispatch_source_refs_s) dk_sources; @@ -130,8 +124,9 @@ struct dispatch_timer_source_refs_s { #define DSF_CANCELED 1u // cancellation has been requested #define DSF_ARMED 2u // source is armed +DISPATCH_CLASS_DECL(source); struct dispatch_source_s { - DISPATCH_STRUCT_HEADER(dispatch_source_s, dispatch_source_vtable_s); + DISPATCH_STRUCT_HEADER(source); DISPATCH_QUEUE_HEADER; // Instruments always copies DISPATCH_QUEUE_MIN_LABEL_SIZE, which is 64, // so the remainder of the structure must be big enough @@ -159,7 +154,11 @@ struct dispatch_source_s { }; }; -void _dispatch_source_xref_release(dispatch_source_t ds); +void _dispatch_source_xref_dispose(dispatch_source_t ds); void _dispatch_mach_notify_source_init(void *context); +dispatch_queue_t _dispatch_source_invoke(dispatch_source_t ds); +void _dispatch_source_dispose(dispatch_source_t ds); +bool _dispatch_source_probe(dispatch_source_t ds); +size_t _dispatch_source_debug(dispatch_source_t ds, char* buf, size_t bufsiz); #endif /* __DISPATCH_SOURCE_INTERNAL__ */ diff --git a/src/trace.h b/src/trace.h index 0d9bc3dc5..4969cbe1b 100644 --- a/src/trace.h +++ b/src/trace.h @@ -27,7 +27,7 @@ #ifndef __DISPATCH_TRACE__ #define __DISPATCH_TRACE__ -#if DISPATCH_USE_DTRACE +#if DISPATCH_USE_DTRACE && !__OBJC2__ #include "provider.h" @@ -113,7 +113,7 @@ _dispatch_trace_client_callout_block(dispatch_block_t b) DISPATCH_ALWAYS_INLINE static inline void _dispatch_trace_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head, - dispatch_object_t _tail) + dispatch_object_t _tail, unsigned int n) { if (slowpath(DISPATCH_QUEUE_PUSH_ENABLED())) { struct dispatch_object_s *dou = _head._do; @@ -121,17 +121,29 @@ _dispatch_trace_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head, _dispatch_trace_continuation(dq, dou, DISPATCH_QUEUE_PUSH); } while (dou != _tail._do && (dou = dou->do_next)); } - _dispatch_queue_push_list(dq, _head, _tail); + _dispatch_queue_push_list(dq, _head, _tail, n); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_trace_queue_push(dispatch_queue_t dq, dispatch_object_t _tail) +{ + if (slowpath(DISPATCH_QUEUE_PUSH_ENABLED())) { + struct dispatch_object_s *dou = _tail._do; + _dispatch_trace_continuation(dq, dou, DISPATCH_QUEUE_PUSH); + } + _dispatch_queue_push(dq, _tail); } DISPATCH_ALWAYS_INLINE static inline void _dispatch_queue_push_notrace(dispatch_queue_t dq, dispatch_object_t dou) { - _dispatch_queue_push_list(dq, dou, dou); + _dispatch_queue_push(dq, dou); } #define _dispatch_queue_push_list _dispatch_trace_queue_push_list +#define _dispatch_queue_push _dispatch_trace_queue_push DISPATCH_ALWAYS_INLINE static inline void @@ -145,8 +157,8 @@ _dispatch_trace_continuation_pop(dispatch_queue_t dq, #else #define _dispatch_queue_push_notrace _dispatch_queue_push -#define _dispatch_trace_continuation_pop(dq, dou) +#define _dispatch_trace_continuation_pop(dq, dou) (void)(dq) -#endif // DISPATCH_USE_DTRACE +#endif // DISPATCH_USE_DTRACE && !__OBJC2__ #endif // __DISPATCH_TRACE__ diff --git a/src/transform.c b/src/transform.c new file mode 100644 index 000000000..775ce4127 --- /dev/null +++ b/src/transform.c @@ -0,0 +1,1015 @@ +/* + * Copyright (c) 2011 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include "internal.h" + +#include + +#if defined(__LITTLE_ENDIAN__) +#define DISPATCH_DATA_FORMAT_TYPE_UTF16_HOST DISPATCH_DATA_FORMAT_TYPE_UTF16LE +#define DISPATCH_DATA_FORMAT_TYPE_UTF16_REV DISPATCH_DATA_FORMAT_TYPE_UTF16BE +#elif defined(__BIG_ENDIAN__) +#define DISPATCH_DATA_FORMAT_TYPE_UTF16_HOST DISPATCH_DATA_FORMAT_TYPE_UTF16BE +#define DISPATCH_DATA_FORMAT_TYPE_UTF16_REV DISPATCH_DATA_FORMAT_TYPE_UTF16LE +#endif + +enum { + _DISPATCH_DATA_FORMAT_NONE = 0x1, + _DISPATCH_DATA_FORMAT_UTF8 = 0x2, + _DISPATCH_DATA_FORMAT_UTF16LE = 0x4, + _DISPATCH_DATA_FORMAT_UTF16BE = 0x8, + _DISPATCH_DATA_FORMAT_UTF_ANY = 0x10, + _DISPATCH_DATA_FORMAT_BASE32 = 0x20, + _DISPATCH_DATA_FORMAT_BASE64 = 0x40, +}; + +#pragma mark - +#pragma mark baseXX tables + +static const char base32_encode_table[] = + "ABCDEFGHIJKLMNOPQRSTUVWXYZ23456789"; + +static const char base32_decode_table[] = { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 26, + 27, 28, 29, 30, 31, -1, -1, -1, -1, -1, -2, -1, -1, -1, 0, 1, 2, + 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25 +}; +static const ssize_t base32_decode_table_size = sizeof(base32_decode_table) + / sizeof(*base32_decode_table); + +static const char base64_encode_table[] = + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; + +static const char base64_decode_table[] = { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, 62, -1, -1, -1, 63, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, -1, -1, -1, -2, -1, -1, -1, 0, 1, 2, 3, 4, + 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, -1, -1, -1, -1, -1, -1, 26, + 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51 +}; + +static const ssize_t base64_decode_table_size = sizeof(base64_decode_table) + / sizeof(*base64_decode_table); + +#pragma mark - +#pragma mark dispatch_transform_buffer + +typedef struct dispatch_transform_buffer_s { + dispatch_data_t data; + uint8_t *start; + union { + uint8_t *u8; + uint16_t *u16; + } ptr; + size_t size; +} dispatch_transform_buffer_s; + +static size_t +_dispatch_transform_sizet_mul(size_t a, size_t b) +{ + size_t rv = SIZE_MAX; + if (a == 0 || rv/a >= b) { + rv = a * b; + } + return rv; +} + +#define BUFFER_MALLOC_MAX (100*1024*1024) + +static bool +_dispatch_transform_buffer_new(dispatch_transform_buffer_s *buffer, + size_t required, size_t size) +{ + size_t remaining = buffer->size - (buffer->ptr.u8 - buffer->start); + if (required == 0 || remaining < required) { + if (buffer->start) { + if (buffer->ptr.u8 > buffer->start) { + dispatch_data_t _new = dispatch_data_create(buffer->start, + buffer->ptr.u8 - buffer->start, NULL, + DISPATCH_DATA_DESTRUCTOR_FREE); + dispatch_data_t _concat = dispatch_data_create_concat( + buffer->data, _new); + dispatch_release(_new); + dispatch_release(buffer->data); + buffer->data = _concat; + } else { + free(buffer->start); + } + } + buffer->size = required + size; + buffer->start = NULL; + if (buffer->size > 0) { + if (buffer->size > BUFFER_MALLOC_MAX) { + return false; + } + buffer->start = (uint8_t*)malloc(buffer->size); + if (buffer->start == NULL) { + return false; + } + } + buffer->ptr.u8 = buffer->start; + } + return true; +} + +#pragma mark - +#pragma mark dispatch_transform_helpers + +static dispatch_data_t +_dispatch_data_subrange_map(dispatch_data_t data, const void **ptr, + size_t offset, size_t size) +{ + dispatch_data_t subrange, map = NULL; + + subrange = dispatch_data_create_subrange(data, offset, size); + if (dispatch_data_get_size(subrange) == size) { + map = dispatch_data_create_map(subrange, ptr, NULL); + } + dispatch_release(subrange); + return map; +} + +static dispatch_data_format_type_t +_dispatch_transform_detect_utf(dispatch_data_t data) +{ + const void *p; + dispatch_data_t subrange = _dispatch_data_subrange_map(data, &p, 0, 2); + + if (subrange == NULL) { + return NULL; + } + + const uint16_t ch = *(const uint16_t *)p; + dispatch_data_format_type_t type = DISPATCH_DATA_FORMAT_TYPE_UTF8; + + if (ch == 0xfeff) { + type = DISPATCH_DATA_FORMAT_TYPE_UTF16_HOST; + } else if (ch == 0xfffe) { + type = DISPATCH_DATA_FORMAT_TYPE_UTF16_REV; + } + + dispatch_release(subrange); + + return type; +} + +static uint16_t +_dispatch_transform_swap_to_host(uint16_t x, int32_t byteOrder) +{ + if (byteOrder == OSLittleEndian) { + return OSSwapLittleToHostInt16(x); + } + return OSSwapBigToHostInt16(x); +} + +static uint16_t +_dispatch_transform_swap_from_host(uint16_t x, int32_t byteOrder) +{ + if (byteOrder == OSLittleEndian) { + return OSSwapHostToLittleInt16(x); + } + return OSSwapHostToBigInt16(x); +} + +#pragma mark - +#pragma mark UTF-8 + +static uint8_t +_dispatch_transform_utf8_length(uint8_t byte) +{ + if ((byte & 0x80) == 0) { + return 1; + } else if ((byte & 0xe0) == 0xc0) { + return 2; + } else if ((byte & 0xf0) == 0xe0) { + return 3; + } else if ((byte & 0xf8) == 0xf0) { + return 4; + } + return 0; +} + +static uint32_t +_dispatch_transform_read_utf8_sequence(const uint8_t *bytes) +{ + uint32_t wch = 0; + uint8_t seq_length = _dispatch_transform_utf8_length(*bytes); + + switch (seq_length) { + case 4: + wch |= (*bytes & 0x7); + wch <<= 6; + break; + case 3: + wch |= (*bytes & 0xf); + wch <<= 6; + break; + case 2: + wch |= (*bytes & 0x1f); + wch <<= 6; + break; + case 1: + wch = (*bytes & 0x7f); + break; + default: + // Not a utf-8 sequence + break; + } + + bytes++; + seq_length--; + + while (seq_length > 0) { + wch |= (*bytes & 0x3f); + bytes++; + seq_length--; + + if (seq_length > 0) { + wch <<= 6; + } + } + return wch; +} + +#pragma mark - +#pragma mark UTF-16 + +static dispatch_data_t +_dispatch_transform_to_utf16(dispatch_data_t data, int32_t byteOrder) +{ + __block size_t skip = 0; + + __block dispatch_transform_buffer_s buffer = { + .data = dispatch_data_empty, + }; + + bool success = dispatch_data_apply(data, ^( + DISPATCH_UNUSED dispatch_data_t region, + size_t offset, const void *_buffer, size_t size) { + const uint8_t *src = _buffer; + size_t i; + + if (offset == 0) { + size_t dest_size = 2 + _dispatch_transform_sizet_mul(size, + sizeof(uint16_t)); + if (!_dispatch_transform_buffer_new(&buffer, dest_size, 0)) { + return (bool)false; + } + // Insert BOM + *(buffer.ptr.u16)++ = _dispatch_transform_swap_from_host(0xfeff, + byteOrder); + } + + // Skip is incremented if the previous block read-ahead into our block + if (skip >= size) { + skip -= size; + return (bool)true; + } else if (skip > 0) { + src += skip; + size -= skip; + skip = 0; + } + + for (i = 0; i < size;) { + uint32_t wch = 0; + uint8_t byte_size = _dispatch_transform_utf8_length(*src); + + if (byte_size == 0) { + return (bool)false; + } else if (byte_size + i > size) { + // UTF-8 byte sequence spans over into the next block(s) + const void *p; + dispatch_data_t subrange = _dispatch_data_subrange_map(data, &p, + offset + i, byte_size); + if (subrange == NULL) { + return (bool)false; + } + + wch = _dispatch_transform_read_utf8_sequence(p); + skip += byte_size - (size - i); + src += byte_size; + i = size; + + dispatch_release(subrange); + } else { + wch = _dispatch_transform_read_utf8_sequence(src); + src += byte_size; + i += byte_size; + } + + size_t next = _dispatch_transform_sizet_mul(size - i, sizeof(uint16_t)); + if (wch >= 0xd800 && wch < 0xdfff) { + // Illegal range (surrogate pair) + return (bool)false; + } else if (wch >= 0x10000) { + // Surrogate pair + if (!_dispatch_transform_buffer_new(&buffer, 2 * + sizeof(uint16_t), next)) { + return (bool)false; + } + wch -= 0x10000; + *(buffer.ptr.u16)++ = _dispatch_transform_swap_from_host( + ((wch >> 10) & 0x3ff) + 0xd800, byteOrder); + *(buffer.ptr.u16)++ = _dispatch_transform_swap_from_host( + (wch & 0x3ff) + 0xdc00, byteOrder); + } else { + if (!_dispatch_transform_buffer_new(&buffer, 1 * + sizeof(uint16_t), next)) { + return (bool)false; + } + *(buffer.ptr.u16)++ = _dispatch_transform_swap_from_host( + (wch & 0xffff), byteOrder); + } + } + + (void)_dispatch_transform_buffer_new(&buffer, 0, 0); + + return (bool)true; + }); + + if (!success) { + dispatch_release(buffer.data); + return NULL; + } + + return buffer.data; +} + +static dispatch_data_t +_dispatch_transform_from_utf16(dispatch_data_t data, int32_t byteOrder) +{ + __block size_t skip = 0; + + __block dispatch_transform_buffer_s buffer = { + .data = dispatch_data_empty, + }; + + bool success = dispatch_data_apply(data, ^( + DISPATCH_UNUSED dispatch_data_t region, size_t offset, + const void *_buffer, size_t size) { + const uint16_t *src = _buffer; + + if (offset == 0) { + // Assume first buffer will be mostly single-byte UTF-8 sequences + size_t dest_size = _dispatch_transform_sizet_mul(size, 2) / 3; + if (!_dispatch_transform_buffer_new(&buffer, dest_size, 0)) { + return (bool)false; + } + } + + size_t i = 0, max = size / 2; + + // Skip is incremented if the previous block read-ahead into our block + if (skip >= size) { + skip -= size; + return (bool)true; + } else if (skip > 0) { + src = (uint16_t *)(((uint8_t *)src) + skip); + size -= skip; + max = (size / 2); + skip = 0; + } + + // If the buffer is an odd size, allow read ahead into the next region + if ((size % 2) != 0) { + max += 1; + } + + for (i = 0; i < max; i++) { + uint32_t wch = 0; + uint16_t ch; + + if ((i == (max - 1)) && (max > (size / 2))) { + // Last byte of an odd sized range + const void *p; + dispatch_data_t range = _dispatch_data_subrange_map(data, &p, + offset + (i * 2), 2); + if (range == NULL) { + return (bool)false; + } + ch = _dispatch_transform_swap_to_host(*(uint64_t*)p, byteOrder); + dispatch_release(range); + skip += 1; + } else { + ch = _dispatch_transform_swap_to_host(src[i], byteOrder); + } + + if (ch == 0xfffe && offset == 0 && i == 0) { + // Wrong-endian BOM at beginning of data + return (bool)false; + } else if (ch == 0xfeff && offset == 0 && i == 0) { + // Correct-endian BOM, skip it + continue; + } + + if ((ch >= 0xd800) && (ch <= 0xdbff)) { + // Surrogate pair + wch = ((ch - 0xd800) << 10); + if (++i >= max) { + // Surrogate byte isn't in this block + const void *p; + dispatch_data_t range = _dispatch_data_subrange_map(data, + &p, offset + (i * 2), 2); + if (range == NULL) { + return (bool)false; + } + ch = _dispatch_transform_swap_to_host(*(uint16_t *)p, + byteOrder); + dispatch_release(range); + skip += 2; + } else { + ch = _dispatch_transform_swap_to_host(src[i], byteOrder); + } + if (!((ch >= 0xdc00) && (ch <= 0xdfff))) { + return (bool)false; + } + wch = (wch | (ch & 0x3ff)); + wch += 0x10000; + } else if ((ch >= 0xdc00) && (ch <= 0xdfff)) { + return (bool)false; + } else { + wch = ch; + } + + size_t next = _dispatch_transform_sizet_mul(max - i, 2); + if (wch < 0x80) { + if (!_dispatch_transform_buffer_new(&buffer, 1, next)) { + return (bool)false; + } + *(buffer.ptr.u8)++ = (uint8_t)(wch & 0xff); + } else if (wch < 0x800) { + if (!_dispatch_transform_buffer_new(&buffer, 2, next)) { + return (bool)false; + } + *(buffer.ptr.u8)++ = (uint8_t)(0xc0 | (wch >> 6)); + *(buffer.ptr.u8)++ = (uint8_t)(0x80 | (wch & 0x3f)); + } else if (wch < 0x10000) { + if (!_dispatch_transform_buffer_new(&buffer, 3, next)) { + return (bool)false; + } + *(buffer.ptr.u8)++ = (uint8_t)(0xe0 | (wch >> 12)); + *(buffer.ptr.u8)++ = (uint8_t)(0x80 | ((wch >> 6) & 0x3f)); + *(buffer.ptr.u8)++ = (uint8_t)(0x80 | (wch & 0x3f)); + } else if (wch < 0x200000) { + if (!_dispatch_transform_buffer_new(&buffer, 4, next)) { + return (bool)false; + } + *(buffer.ptr.u8)++ = (uint8_t)(0xf0 | (wch >> 18)); + *(buffer.ptr.u8)++ = (uint8_t)(0x80 | ((wch >> 12) & 0x3f)); + *(buffer.ptr.u8)++ = (uint8_t)(0x80 | ((wch >> 6) & 0x3f)); + *(buffer.ptr.u8)++ = (uint8_t)(0x80 | (wch & 0x3f)); + } + } + + (void)_dispatch_transform_buffer_new(&buffer, 0, 0); + + return (bool)true; + }); + + if (!success) { + dispatch_release(buffer.data); + return NULL; + } + + return buffer.data; +} + +static dispatch_data_t +_dispatch_transform_from_utf16le(dispatch_data_t data) +{ + return _dispatch_transform_from_utf16(data, OSLittleEndian); +} + +static dispatch_data_t +_dispatch_transform_from_utf16be(dispatch_data_t data) +{ + return _dispatch_transform_from_utf16(data, OSBigEndian); +} + +static dispatch_data_t +_dispatch_transform_to_utf16le(dispatch_data_t data) +{ + return _dispatch_transform_to_utf16(data, OSLittleEndian); +} + +static dispatch_data_t +_dispatch_transform_to_utf16be(dispatch_data_t data) +{ + return _dispatch_transform_to_utf16(data, OSBigEndian); +} + +#pragma mark - +#pragma mark base32 + +static dispatch_data_t +_dispatch_transform_from_base32(dispatch_data_t data) +{ + __block uint64_t x = 0, count = 0, pad = 0; + + __block dispatch_data_t rv = dispatch_data_empty; + + bool success = dispatch_data_apply(data, ^( + DISPATCH_UNUSED dispatch_data_t region, + DISPATCH_UNUSED size_t offset, const void *buffer, size_t size) { + size_t i, dest_size = (size * 5) / 8; + + uint8_t *dest = (uint8_t*)malloc(dest_size * sizeof(char)); + uint8_t *ptr = dest; + if (dest == NULL) { + return (bool)false; + } + + const uint8_t *bytes = buffer; + + for (i = 0; i < size; i++) { + if (bytes[i] == '\n' || bytes[i] == '\t' || bytes[i] == ' ') { + continue; + } + + ssize_t index = bytes[i]; + if (index >= base32_decode_table_size || + base32_decode_table[index] == -1) { + free(dest); + return (bool)false; + } + count++; + + char value = base32_decode_table[index]; + if (value == -2) { + value = 0; + pad++; + } + + x <<= 5; + x += value; + + if ((count & 0x7) == 0) { + *ptr++ = (x >> 32) & 0xff; + *ptr++ = (x >> 24) & 0xff; + *ptr++ = (x >> 16) & 0xff; + *ptr++ = (x >> 8) & 0xff; + *ptr++ = x & 0xff; + } + } + + size_t final = (ptr - dest); + switch (pad) { + case 1: + final -= 1; + break; + case 3: + final -= 2; + break; + case 4: + final -= 3; + break; + case 6: + final -= 4; + break; + } + + dispatch_data_t val = dispatch_data_create(dest, final, NULL, + DISPATCH_DATA_DESTRUCTOR_FREE); + dispatch_data_t concat = dispatch_data_create_concat(rv, val); + + dispatch_release(val); + dispatch_release(rv); + rv = concat; + + return (bool)true; + }); + + if (!success) { + dispatch_release(rv); + return NULL; + } + + return rv; +} + +static dispatch_data_t +_dispatch_transform_to_base32(dispatch_data_t data) +{ + size_t total = dispatch_data_get_size(data); + __block size_t count = 0; + + size_t dest_size = ((total + 4) * 8) / 5; + dest_size -= dest_size % 8; + + uint8_t *dest = (uint8_t*)malloc(dest_size * sizeof(uint8_t)); + if (dest == NULL) { + return NULL; + } + + __block uint8_t *ptr = dest; + + /* + 0 1 2 3 4 + 8-bit bytes: xxxxxxxx yyyyyyyy zzzzzzzz xxxxxxxx yyyyyyyy + 5-bit chunks: aaaaabbb bbcccccd ddddeeee efffffgg ggghhhhh + */ + + bool success = dispatch_data_apply(data, ^( + DISPATCH_UNUSED dispatch_data_t region, + size_t offset, const void *buffer, size_t size) { + const uint8_t *bytes = buffer; + size_t i; + + for (i = 0; i < size; i++, count++) { + uint8_t curr = bytes[i], last = 0; + + if ((count % 5) != 0) { + if (i == 0) { + const void *p; + dispatch_data_t subrange = _dispatch_data_subrange_map(data, + &p, offset - 1, 1); + if (subrange == NULL) { + return (bool)false; + } + last = *(uint8_t*)p; + dispatch_release(subrange); + } else { + last = bytes[i - 1]; + } + } + + switch (count % 5) { + case 0: + // a + *ptr++ = base32_encode_table[(curr >> 3) & 0x1f]; + break; + case 1: + // b + c + *ptr++ = base32_encode_table[((last << 2)|(curr >> 6)) & 0x1f]; + *ptr++ = base32_encode_table[(curr >> 1) & 0x1f]; + break; + case 2: + // d + *ptr++ = base32_encode_table[((last << 4)|(curr >> 4)) & 0x1f]; + break; + case 3: + // e + f + *ptr++ = base32_encode_table[((last << 1)|(curr >> 7)) & 0x1f]; + *ptr++ = base32_encode_table[(curr >> 2) & 0x1f]; + break; + case 4: + // g + h + *ptr++ = base32_encode_table[((last << 3)|(curr >> 5)) & 0x1f]; + *ptr++ = base32_encode_table[curr & 0x1f]; + break; + } + } + + // Last region, insert padding bytes, if needed + if (offset + size == total) { + switch (count % 5) { + case 0: + break; + case 1: + // b[4:2] + *ptr++ = base32_encode_table[(bytes[size-1] << 2) & 0x1c]; + break; + case 2: + // d[4] + *ptr++ = base32_encode_table[(bytes[size-1] << 4) & 0x10]; + break; + case 3: + // e[4:1] + *ptr++ = base32_encode_table[(bytes[size-1] << 1) & 0x1e]; + break; + case 4: + // g[4:3] + *ptr++ = base32_encode_table[bytes[size-1] & 0x18]; + break; + } + switch (count % 5) { + case 0: + break; + case 1: + *ptr++ = '='; // c + *ptr++ = '='; // d + case 2: + *ptr++ = '='; // e + case 3: + *ptr++ = '='; // f + *ptr++ = '='; // g + case 4: + *ptr++ = '='; // h + break; + } + } + + return (bool)true; + }); + + if (!success) { + free(dest); + return NULL; + } + return dispatch_data_create(dest, dest_size, NULL, + DISPATCH_DATA_DESTRUCTOR_FREE); +} + +#pragma mark - +#pragma mark base64 + +static dispatch_data_t +_dispatch_transform_from_base64(dispatch_data_t data) +{ + __block uint64_t x = 0, count = 0; + __block size_t pad = 0; + + __block dispatch_data_t rv = dispatch_data_empty; + + bool success = dispatch_data_apply(data, ^( + DISPATCH_UNUSED dispatch_data_t region, + DISPATCH_UNUSED size_t offset, const void *buffer, size_t size) { + size_t i, dest_size = (size * 3) / 4; + + uint8_t *dest = (uint8_t*)malloc(dest_size * sizeof(uint8_t)); + uint8_t *ptr = dest; + if (dest == NULL) { + return (bool)false; + } + + const uint8_t *bytes = buffer; + + for (i = 0; i < size; i++) { + if (bytes[i] == '\n' || bytes[i] == '\t' || bytes[i] == ' ') { + continue; + } + + ssize_t index = bytes[i]; + if (index >= base64_decode_table_size || + base64_decode_table[index] == -1) { + free(dest); + return (bool)false; + } + count++; + + char value = base64_decode_table[index]; + if (value == -2) { + value = 0; + pad++; + } + + x <<= 6; + x += value; + + if ((count & 0x3) == 0) { + *ptr++ = (x >> 16) & 0xff; + *ptr++ = (x >> 8) & 0xff; + *ptr++ = x & 0xff; + } + } + + size_t final = (ptr - dest); + if (pad > 0) { + // 2 bytes of pad means only had one char in final group + final -= pad; + } + + dispatch_data_t val = dispatch_data_create(dest, final, NULL, + DISPATCH_DATA_DESTRUCTOR_FREE); + dispatch_data_t concat = dispatch_data_create_concat(rv, val); + + dispatch_release(val); + dispatch_release(rv); + rv = concat; + + return (bool)true; + }); + + if (!success) { + dispatch_release(rv); + return NULL; + } + + return rv; +} + +static dispatch_data_t +_dispatch_transform_to_base64(dispatch_data_t data) +{ + // RFC 4648 states that we should not linebreak + // http://tools.ietf.org/html/rfc4648 + size_t total = dispatch_data_get_size(data); + __block size_t count = 0; + + size_t dest_size = ((total + 2) * 4) / 3; + dest_size -= dest_size % 4; + + uint8_t *dest = (uint8_t*)malloc(dest_size * sizeof(uint8_t)); + if (dest == NULL) { + return NULL; + } + + __block uint8_t *ptr = dest; + + /* + * 3 8-bit bytes: xxxxxxxx yyyyyyyy zzzzzzzz + * 4 6-bit chunks: aaaaaabb bbbbcccc ccdddddd + */ + + bool success = dispatch_data_apply(data, ^( + DISPATCH_UNUSED dispatch_data_t region, + size_t offset, const void *buffer, size_t size) { + const uint8_t *bytes = buffer; + size_t i; + + for (i = 0; i < size; i++, count++) { + uint8_t curr = bytes[i], last = 0; + + if ((count % 3) != 0) { + if (i == 0) { + const void *p; + dispatch_data_t subrange = _dispatch_data_subrange_map(data, + &p, offset - 1, 1); + if (subrange == NULL) { + return (bool)false; + } + last = *(uint8_t*)p; + dispatch_release(subrange); + } else { + last = bytes[i - 1]; + } + } + + switch (count % 3) { + case 0: + *ptr++ = base64_encode_table[(curr >> 2) & 0x3f]; + break; + case 1: + *ptr++ = base64_encode_table[((last << 4)|(curr >> 4)) & 0x3f]; + break; + case 2: + *ptr++ = base64_encode_table[((last << 2)|(curr >> 6)) & 0x3f]; + *ptr++ = base64_encode_table[(curr & 0x3f)]; + break; + } + } + + // Last region, insert padding bytes, if needed + if (offset + size == total) { + switch (count % 3) { + case 0: + break; + case 1: + *ptr++ = base64_encode_table[(bytes[size-1] << 4) & 0x30]; + *ptr++ = '='; + *ptr++ = '='; + break; + case 2: + *ptr++ = base64_encode_table[(bytes[size-1] << 2) & 0x3c]; + *ptr++ = '='; + break; + } + } + + return (bool)true; + }); + + if (!success) { + free(dest); + return NULL; + } + return dispatch_data_create(dest, dest_size, NULL, + DISPATCH_DATA_DESTRUCTOR_FREE); +} + +#pragma mark - +#pragma mark dispatch_data_transform + +dispatch_data_t +dispatch_data_create_with_transform(dispatch_data_t data, + dispatch_data_format_type_t input, dispatch_data_format_type_t output) +{ + if (input->type == _DISPATCH_DATA_FORMAT_UTF_ANY) { + input = _dispatch_transform_detect_utf(data); + } + + if ((input->type & ~output->input_mask) != 0) { + return NULL; + } + + if ((output->type & ~input->output_mask) != 0) { + return NULL; + } + + if (dispatch_data_get_size(data) == 0) { + return data; + } + + dispatch_data_t temp1; + if (input->decode) { + temp1 = input->decode(data); + } else { + dispatch_retain(data); + temp1 = data; + } + + if (!temp1) { + return NULL; + } + + dispatch_data_t temp2; + if (output->encode) { + temp2 = output->encode(temp1); + } else { + dispatch_retain(temp1); + temp2 = temp1; + } + + dispatch_release(temp1); + return temp2; +} + +const struct dispatch_data_format_type_s _dispatch_data_format_type_none = { + .type = _DISPATCH_DATA_FORMAT_NONE, + .input_mask = ~0, + .output_mask = ~0, + .decode = NULL, + .encode = NULL, +}; + +const struct dispatch_data_format_type_s _dispatch_data_format_type_base32 = { + .type = _DISPATCH_DATA_FORMAT_BASE32, + .input_mask = (_DISPATCH_DATA_FORMAT_NONE | _DISPATCH_DATA_FORMAT_BASE32 | + _DISPATCH_DATA_FORMAT_BASE64), + .output_mask = (_DISPATCH_DATA_FORMAT_NONE | _DISPATCH_DATA_FORMAT_BASE32 | + _DISPATCH_DATA_FORMAT_BASE64), + .decode = _dispatch_transform_from_base32, + .encode = _dispatch_transform_to_base32, +}; + +const struct dispatch_data_format_type_s _dispatch_data_format_type_base64 = { + .type = _DISPATCH_DATA_FORMAT_BASE64, + .input_mask = (_DISPATCH_DATA_FORMAT_NONE | _DISPATCH_DATA_FORMAT_BASE32 | + _DISPATCH_DATA_FORMAT_BASE64), + .output_mask = (_DISPATCH_DATA_FORMAT_NONE | _DISPATCH_DATA_FORMAT_BASE32 | + _DISPATCH_DATA_FORMAT_BASE64), + .decode = _dispatch_transform_from_base64, + .encode = _dispatch_transform_to_base64, +}; + +const struct dispatch_data_format_type_s _dispatch_data_format_type_utf16le = { + .type = _DISPATCH_DATA_FORMAT_UTF16LE, + .input_mask = (_DISPATCH_DATA_FORMAT_UTF8 | _DISPATCH_DATA_FORMAT_UTF16BE | + _DISPATCH_DATA_FORMAT_UTF16LE), + .output_mask = (_DISPATCH_DATA_FORMAT_UTF8 | _DISPATCH_DATA_FORMAT_UTF16BE | + _DISPATCH_DATA_FORMAT_UTF16LE), + .decode = _dispatch_transform_from_utf16le, + .encode = _dispatch_transform_to_utf16le, +}; + +const struct dispatch_data_format_type_s _dispatch_data_format_type_utf16be = { + .type = _DISPATCH_DATA_FORMAT_UTF16BE, + .input_mask = (_DISPATCH_DATA_FORMAT_UTF8 | _DISPATCH_DATA_FORMAT_UTF16BE | + _DISPATCH_DATA_FORMAT_UTF16LE), + .output_mask = (_DISPATCH_DATA_FORMAT_UTF8 | _DISPATCH_DATA_FORMAT_UTF16BE | + _DISPATCH_DATA_FORMAT_UTF16LE), + .decode = _dispatch_transform_from_utf16be, + .encode = _dispatch_transform_to_utf16be, +}; + +const struct dispatch_data_format_type_s _dispatch_data_format_type_utf8 = { + .type = _DISPATCH_DATA_FORMAT_UTF8, + .input_mask = (_DISPATCH_DATA_FORMAT_UTF8 | _DISPATCH_DATA_FORMAT_UTF16BE | + _DISPATCH_DATA_FORMAT_UTF16LE), + .output_mask = (_DISPATCH_DATA_FORMAT_UTF8 | _DISPATCH_DATA_FORMAT_UTF16BE | + _DISPATCH_DATA_FORMAT_UTF16LE), + .decode = NULL, + .encode = NULL, +}; + +const struct dispatch_data_format_type_s _dispatch_data_format_type_utf_any = { + .type = _DISPATCH_DATA_FORMAT_UTF_ANY, + .input_mask = 0, + .output_mask = 0, + .decode = NULL, + .encode = NULL, +}; diff --git a/xcodeconfig/libdispatch.aliases b/xcodeconfig/libdispatch.aliases new file mode 100644 index 000000000..aae0bccde --- /dev/null +++ b/xcodeconfig/libdispatch.aliases @@ -0,0 +1,12 @@ +_OBJC_CLASS_$_OS_dispatch_semaphore __dispatch_semaphore_vtable +_OBJC_CLASS_$_OS_dispatch_group __dispatch_group_vtable +_OBJC_CLASS_$_OS_dispatch_queue __dispatch_queue_vtable +_OBJC_CLASS_$_OS_dispatch_queue_root __dispatch_queue_root_vtable +_OBJC_CLASS_$_OS_dispatch_queue_mgr __dispatch_queue_mgr_vtable +_OBJC_CLASS_$_OS_dispatch_queue_specific_queue __dispatch_queue_specific_queue_vtable +_OBJC_CLASS_$_OS_dispatch_queue_attr __dispatch_queue_attr_vtable +_OBJC_CLASS_$_OS_dispatch_source __dispatch_source_vtable +_OBJC_CLASS_$_OS_dispatch_data __dispatch_data_vtable +_OBJC_CLASS_$_OS_dispatch_io __dispatch_io_vtable +_OBJC_CLASS_$_OS_dispatch_operation __dispatch_operation_vtable +_OBJC_CLASS_$_OS_dispatch_disk __dispatch_disk_vtable diff --git a/xcodeconfig/libdispatch.order b/xcodeconfig/libdispatch.order new file mode 100644 index 000000000..64787b7a4 --- /dev/null +++ b/xcodeconfig/libdispatch.order @@ -0,0 +1,40 @@ +_OBJC_CLASS_$_OS_object +_OBJC_METACLASS_$_OS_object +_OBJC_CLASS_$_OS_dispatch_object +_OBJC_METACLASS_$_OS_dispatch_object +_OBJC_CLASS_$_OS_dispatch_semaphore +__OS_dispatch_semaphore_vtable +_OBJC_METACLASS_$_OS_dispatch_semaphore +_OBJC_CLASS_$_OS_dispatch_group +__OS_dispatch_group_vtable +_OBJC_METACLASS_$_OS_dispatch_group +_OBJC_CLASS_$_OS_dispatch_queue +__OS_dispatch_queue_vtable +_OBJC_METACLASS_$_OS_dispatch_queue +_OBJC_CLASS_$_OS_dispatch_queue_root +__OS_dispatch_queue_root_vtable +_OBJC_METACLASS_$_OS_dispatch_queue_root +_OBJC_CLASS_$_OS_dispatch_queue_mgr +__OS_dispatch_queue_mgr_vtable +_OBJC_METACLASS_$_OS_dispatch_queue_mgr +_OBJC_CLASS_$_OS_dispatch_queue_specific_queue +__OS_dispatch_queue_specific_queue_vtable +_OBJC_METACLASS_$_OS_dispatch_queue_specific_queue +_OBJC_CLASS_$_OS_dispatch_queue_attr +__OS_dispatch_queue_attr_vtable +_OBJC_METACLASS_$_OS_dispatch_queue_attr +_OBJC_CLASS_$_OS_dispatch_source +__OS_dispatch_source_vtable +_OBJC_METACLASS_$_OS_dispatch_source +_OBJC_CLASS_$_OS_dispatch_data +__OS_dispatch_data_vtable +_OBJC_METACLASS_$_OS_dispatch_data +_OBJC_CLASS_$_OS_dispatch_io +__OS_dispatch_io_vtable +_OBJC_METACLASS_$_OS_dispatch_io +_OBJC_CLASS_$_OS_dispatch_operation +__OS_dispatch_operation_vtable +_OBJC_METACLASS_$_OS_dispatch_operation +_OBJC_CLASS_$_OS_dispatch_disk +__OS_dispatch_disk_vtable +_OBJC_METACLASS_$_OS_dispatch_disk diff --git a/xcodeconfig/libdispatch.unexport b/xcodeconfig/libdispatch.unexport new file mode 100644 index 000000000..035bd9c5e --- /dev/null +++ b/xcodeconfig/libdispatch.unexport @@ -0,0 +1,12 @@ +__dispatch_semaphore_vtable +__dispatch_group_vtable +__dispatch_queue_vtable +__dispatch_queue_root_vtable +__dispatch_queue_mgr_vtable +__dispatch_queue_specific_queue_vtable +__dispatch_queue_attr_vtable +__dispatch_source_vtable +__dispatch_data_vtable +__dispatch_io_vtable +__dispatch_operation_vtable +__dispatch_disk_vtable diff --git a/xcodeconfig/libdispatch.xcconfig b/xcodeconfig/libdispatch.xcconfig index e7d44f4f6..e651bfcb0 100644 --- a/xcodeconfig/libdispatch.xcconfig +++ b/xcodeconfig/libdispatch.xcconfig @@ -22,7 +22,6 @@ SUPPORTED_PLATFORMS = macosx iphoneos iphonesimulator ARCHS[sdk=iphonesimulator*] = $(NATIVE_ARCH_32_BIT) // Override BSD.xcconfig ARCHS PRODUCT_NAME = libdispatch -PRODUCT_NAME[sdk=iphonesimulator*] = libdispatch_sim EXECUTABLE_PREFIX = LD_DYLIB_INSTALL_NAME = /usr/lib/system/$(EXECUTABLE_NAME) INSTALL_PATH = /usr/lib/system @@ -31,6 +30,10 @@ PUBLIC_HEADERS_FOLDER_PATH = /usr/include/dispatch PUBLIC_HEADERS_FOLDER_PATH[sdk=iphonesimulator*] = $(SDKROOT)/usr/include/dispatch PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/dispatch PRIVATE_HEADERS_FOLDER_PATH[sdk=iphonesimulator*] = $(SDKROOT)/usr/local/include/dispatch +OS_PUBLIC_HEADERS_FOLDER_PATH = /usr/include/os +OS_PUBLIC_HEADERS_FOLDER_PATH[sdk=iphonesimulator*] = $(SDKROOT)/usr/include/os +OS_PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/os +OS_PRIVATE_HEADERS_FOLDER_PATH[sdk=iphonesimulator*] = $(SDKROOT)/usr/local/include/os HEADER_SEARCH_PATHS = $(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders $(PROJECT_DIR) INSTALLHDRS_SCRIPT_PHASE = YES ALWAYS_SEARCH_USER_PATHS = NO @@ -39,10 +42,7 @@ ONLY_ACTIVE_ARCH = NO GCC_VERSION = com.apple.compilers.llvm.clang.1_0 GCC_STRICT_ALIASING = YES GCC_SYMBOLS_PRIVATE_EXTERN = YES -GCC_CW_ASM_SYNTAX = NO -GCC_ENABLE_CPP_EXCEPTIONS = NO -GCC_ENABLE_CPP_RTTI = NO -GCC_ENABLE_OBJC_EXCEPTIONS = NO +GCC_ENABLE_OBJC_GC[sdk=macosx*] = supported GCC_ENABLE_PASCAL_STRINGS = NO GCC_WARN_SHADOW = YES GCC_WARN_64_TO_32_BIT_CONVERSION = YES @@ -54,14 +54,19 @@ GCC_TREAT_WARNINGS_AS_ERRORS = YES GCC_OPTIMIZATION_LEVEL = s GCC_THUMB_SUPPORT[arch=armv6] = NO GCC_PREPROCESSOR_DEFINITIONS = __DARWIN_NON_CANCELABLE=1 -GCC_PREPROCESSOR_DEFINITIONS[sdk=iphonesimulator*] = $(GCC_PREPROCESSOR_DEFINITIONS) USE_LIBDISPATCH_INIT_CONSTRUCTOR=1 DISPATCH_USE_PTHREAD_ATFORK=1 DISPATCH_USE_DIRECT_TSD=0 WARNING_CFLAGS = -Wall -Wextra -Waggregate-return -Wfloat-equal -Wpacked -Wmissing-declarations -Wstrict-overflow=4 -Wstrict-aliasing=2 -OTHER_CFLAGS = -fno-unwind-tables -fno-asynchronous-unwind-tables -fno-exceptions -fdiagnostics-show-option -fverbose-asm -momit-leaf-frame-pointer +OTHER_CFLAGS = -fdiagnostics-show-option -fverbose-asm +OTHER_CFLAGS[arch=i386][sdk=macosx*] = $(OTHER_CFLAGS) -fno-unwind-tables -fno-asynchronous-unwind-tables -fno-exceptions +OTHER_CFLAGS_normal = -momit-leaf-frame-pointer +OTHER_CFLAGS_normal[arch=armv6][sdk=macosx*] = +OTHER_CFLAGS_profile = $(OTHER_CFLAGS_normal) -DDISPATCH_PROFILE=1 OTHER_CFLAGS_debug = -fstack-protector -fno-inline -O0 -DDISPATCH_DEBUG=1 -OTHER_CFLAGS_profile = -DDISPATCH_PROFILE=1 GENERATE_PROFILING_CODE = NO -GENERATE_MASTER_OBJECT_FILE = NO DYLIB_CURRENT_VERSION = $(CURRENT_PROJECT_VERSION) UMBRELLA_LDFLAGS = -umbrella System UMBRELLA_LDFLAGS[sdk=iphonesimulator*] = -OTHER_LDFLAGS = $(OTHER_LDFLAGS) $(UMBRELLA_LDFLAGS) $(CR_LDFLAGS) +OBJC_LDFLAGS = -Wl,-upward-lobjc -Wl,-order_file,$(SRCROOT)/xcodeconfig/libdispatch.order -Wl,-alias_list,$(SRCROOT)/xcodeconfig/libdispatch.aliases -Wl,-unexported_symbols_list,$(SRCROOT)/xcodeconfig/libdispatch.unexport +OBJC_LDFLAGS[sdk=macosx*] = $(OBJC_LDFLAGS) -Wl,-upward-lauto +OBJC_LDFLAGS[arch=i386][sdk=macosx*] = +OBJC_EXCLUDED_SOURCE_FILE_NAMES_i386_macosx = object.m +OTHER_LDFLAGS = $(OTHER_LDFLAGS) $(UMBRELLA_LDFLAGS) $(CR_LDFLAGS) $(OBJC_LDFLAGS) diff --git a/xcodescripts/symlink-headers.sh b/xcodescripts/install-headers.sh similarity index 61% rename from xcodescripts/symlink-headers.sh rename to xcodescripts/install-headers.sh index a062a6f5a..cb5e80495 100755 --- a/xcodescripts/symlink-headers.sh +++ b/xcodescripts/install-headers.sh @@ -1,6 +1,6 @@ #!/bin/bash -e # -# Copyright (c) 2010-2011 Apple Inc. All rights reserved. +# Copyright (c) 2012 Apple Inc. All rights reserved. # # @APPLE_APACHE_LICENSE_HEADER_START@ # @@ -19,11 +19,11 @@ # @APPLE_APACHE_LICENSE_HEADER_END@ # -if [ "$DEPLOYMENT_LOCATION" != YES ]; then - DSTROOT="$CONFIGURATION_BUILD_DIR" - [ -L "$DSTROOT$PRIVATE_HEADERS_FOLDER_PATH"/private.h ] && exit +if [ "${DEPLOYMENT_LOCATION}" != YES ]; then + DSTROOT="${CONFIGURATION_BUILD_DIR}" fi -mv "$DSTROOT$PRIVATE_HEADERS_FOLDER_PATH"/private.h \ - "$DSTROOT$PRIVATE_HEADERS_FOLDER_PATH"/dispatch.h -ln -sf dispatch.h "$DSTROOT$PRIVATE_HEADERS_FOLDER_PATH"/private.h +mkdir -p "${DSTROOT}${OS_PUBLIC_HEADERS_FOLDER_PATH}" || true +mkdir -p "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" || true +cp -X "${SCRIPT_INPUT_FILE_1}" "${DSTROOT}${OS_PUBLIC_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_2}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" diff --git a/xcodescripts/install-manpages.sh b/xcodescripts/install-manpages.sh index 2d88a26b0..2ea1f6803 100755 --- a/xcodescripts/install-manpages.sh +++ b/xcodescripts/install-manpages.sh @@ -80,6 +80,7 @@ for m in dispatch_get_current_queue dispatch_main dispatch_get_main_queue \ done for m in dispatch_source_set_event_handler dispatch_source_set_event_handler_f \ + dispatch_source_set_registration_handler dispatch_source_set_registration_handler_f \ dispatch_source_set_cancel_handler dispatch_source_set_cancel_handler_f \ dispatch_source_cancel dispatch_source_testcancel \ dispatch_source_get_handle dispatch_source_get_mask \ @@ -98,7 +99,7 @@ done for m in dispatch_io_create_with_path dispatch_io_set_high_water \ dispatch_io_set_low_water dispatch_io_set_interval \ - dispatch_io_close; do + dispatch_io_close dispatch_io_barrier; do ln -f dispatch_io_create.3 ${m}.3 done From 0d35deff14a4a1895eb1a93f11c2ee1226bb3a0a Mon Sep 17 00:00:00 2001 From: Apple OSS Distributions <91980991+AppleOSSDistributions@users.noreply.github.com> Date: Thu, 10 Oct 2013 23:37:40 +0000 Subject: [PATCH 04/18] libdispatch-339.1.9 Imported from libdispatch-339.1.9.tar.gz --- INSTALL | 23 +- Makefile.am | 1 + config/config.h | 21 +- configure.ac | 68 +- dispatch/Makefile.am | 1 + dispatch/base.h | 27 +- dispatch/data.h | 36 +- dispatch/dispatch.h | 4 +- dispatch/group.h | 4 +- dispatch/introspection.h | 163 + dispatch/io.h | 94 +- dispatch/object.h | 10 +- dispatch/queue.h | 31 +- dispatch/semaphore.h | 4 +- dispatch/source.h | 165 +- libdispatch.xcodeproj/project.pbxproj | 383 +- .../contents.xcworkspacedata | 6 - man/Makefile.am | 4 - man/dispatch.3 | 2 +- man/dispatch_async.3 | 4 +- man/dispatch_data_create.3 | 2 +- man/dispatch_group_create.3 | 2 +- man/dispatch_io_create.3 | 28 +- man/dispatch_queue_create.3 | 139 +- man/dispatch_semaphore_create.3 | 2 +- man/dispatch_source_create.3 | 124 +- man/dispatch_time.3 | 23 +- os/Makefile.am | 11 + os/object.h | 27 +- os/object_private.h | 11 +- private/Makefile.am | 4 + private/data_private.h | 176 +- private/dispatch.h | 39 - private/introspection_private.h | 727 +++ private/io_private.h | 411 ++ private/mach_private.h | 547 +++ private/private.h | 155 +- private/queue_private.h | 204 +- private/source_private.h | 170 +- resolver/resolved.h | 2 +- resolver/resolver.c | 2 +- src/Makefile.am | 23 +- src/allocator.c | 764 ++++ src/allocator_internal.h | 268 ++ src/apply.c | 165 +- src/benchmark.c | 16 +- src/data.c | 365 +- src/data.m | 177 + src/data_internal.h | 73 +- src/init.c | 441 +- src/internal.h | 335 +- src/introspection.c | 595 +++ src/introspection_internal.h | 119 + src/io.c | 457 +- src/io_internal.h | 26 +- src/object.c | 61 +- src/object.m | 178 +- src/object_internal.h | 104 +- src/once.c | 21 +- src/protocol.defs | 4 +- src/provider.d | 63 +- src/queue.c | 2135 ++++----- src/queue_internal.h | 245 +- src/semaphore.c | 444 +- src/semaphore_internal.h | 61 +- src/shims.h | 25 +- src/shims/atomic.h | 415 +- src/shims/atomic_sfb.h | 115 + src/shims/hw_config.h | 36 +- src/shims/malloc_zone.h | 98 - src/shims/perfmon.h | 26 +- src/shims/time.h | 92 +- src/shims/tsd.h | 37 +- src/source.c | 3916 +++++++++++++---- src/source_internal.h | 199 +- src/time.c | 72 +- src/trace.h | 178 +- src/transform.c | 159 +- tools/dispatch_timers.d | 89 + tools/dispatch_trace.d | 42 +- .../libdispatch-introspection.xcconfig | 26 + xcodeconfig/libdispatch-static.xcconfig | 25 + xcodeconfig/libdispatch.aliases | 26 +- xcodeconfig/libdispatch.interposable | 28 + xcodeconfig/libdispatch.order | 67 +- xcodeconfig/libdispatch.unexport | 24 +- xcodeconfig/libdispatch.xcconfig | 31 +- xcodeconfig/libdispatch_macosx.aliases | 21 + xcodescripts/install-dtrace.sh | 30 + xcodescripts/install-manpages.sh | 2 +- 90 files changed, 13415 insertions(+), 3361 deletions(-) create mode 100644 dispatch/introspection.h delete mode 100644 libdispatch.xcodeproj/project.xcworkspace/contents.xcworkspacedata create mode 100644 os/Makefile.am delete mode 100644 private/dispatch.h create mode 100644 private/introspection_private.h create mode 100644 private/io_private.h create mode 100644 private/mach_private.h create mode 100644 src/allocator.c create mode 100644 src/allocator_internal.h create mode 100644 src/data.m create mode 100644 src/introspection.c create mode 100644 src/introspection_internal.h create mode 100644 src/shims/atomic_sfb.h delete mode 100644 src/shims/malloc_zone.h create mode 100755 tools/dispatch_timers.d create mode 100644 xcodeconfig/libdispatch-introspection.xcconfig create mode 100644 xcodeconfig/libdispatch-static.xcconfig create mode 100644 xcodeconfig/libdispatch.interposable create mode 100644 xcodeconfig/libdispatch_macosx.aliases create mode 100644 xcodescripts/install-dtrace.sh diff --git a/INSTALL b/INSTALL index bac7e27e8..faf66d231 100644 --- a/INSTALL +++ b/INSTALL @@ -45,6 +45,16 @@ The following configure options may be of general interest: The following options are likely to only be useful when building libdispatch on Mac OS X as a replacement for /usr/lib/system/libdispatch.dylib: +--with-apple-objc4-source + + Specify the path to Apple's objc4 package, so that appropriate headers can + be found and used. + +--with-apple-libauto-source + + Specify the path to Apple's libauto package, so that appropriate headers + can be found and used. + --disable-libdispatch-init-constructor Do not tag libdispatch's init routine as __constructor, in which case it @@ -61,16 +71,19 @@ Mac OS X as a replacement for /usr/lib/system/libdispatch.dylib: Typical configuration commands The following command lines create the configuration required to build -libdispatch for /usr/lib/system on Mac OS X Lion: +libdispatch for /usr/lib/system on OS X MountainLion: sh autogen.sh - ./configure CFLAGS='-arch x86_64 -arch i386 -g -Os' \ + cflags='-arch x86_64 -arch i386 -g -Os' + ./configure CFLAGS="$cflags" OBJCFLAGS="$cflags" CXXFLAGS="$cflags" \ --prefix=/usr --libdir=/usr/lib/system \ --disable-dependency-tracking --disable-static \ --enable-apple-tsd-optimizations \ - --with-apple-libc-source=/path/to/10.7.0/Libc-763.11 \ - --with-apple-libclosure-source=/path/to/10.7.0/libclosure-53 \ - --with-apple-xnu-source=/path/to/10.7.0/xnu-1699.22.73 + --with-apple-libc-source=/path/to/10.8.0/Libc-825.24 \ + --with-apple-libclosure-source=/path/to/10.8.0/libclosure-59 \ + --with-apple-xnu-source=/path/to/10.8.0/xnu-2050.7.9 \ + --with-apple-objc4-source=/path/to/10.8.0/objc4-532 \ + --with-apple-libauto-source=/path/to/10.8.0/libauto-185.1 make check Typical configuration line for FreeBSD 8.x and 9.x to build libdispatch with diff --git a/Makefile.am b/Makefile.am index 4e3167c2a..72f432242 100644 --- a/Makefile.am +++ b/Makefile.am @@ -7,6 +7,7 @@ ACLOCAL_AMFLAGS = -I m4 SUBDIRS= \ dispatch \ man \ + os \ private \ src diff --git a/config/config.h b/config/config.h index 7fe2d6348..d2ad0ffa6 100644 --- a/config/config.h +++ b/config/config.h @@ -48,6 +48,9 @@ /* Define to 1 if you have the header file. */ #define HAVE_DLFCN_H 1 +/* Define to 1 if you have the header file. */ +#define HAVE_FCNTL_H 1 + /* Define to 1 if you have the `getprogname' function. */ #define HAVE_GETPROGNAME 1 @@ -63,6 +66,9 @@ /* Define to 1 if you have the header file. */ #define HAVE_LIBKERN_OSCROSSENDIAN_H 1 +/* Define to 1 if you have the header file. */ +#define HAVE_LIBPROC_INTERNAL_H 1 + /* Define if mach is present */ #define HAVE_MACH 1 @@ -81,6 +87,9 @@ /* Define if __builtin_trap marked noreturn */ #define HAVE_NORETURN_BUILTIN_TRAP 1 +/* Define if you have the Objective-C runtime */ +#define HAVE_OBJC 1 + /* Define to 1 if you have the `pthread_key_init_np' function. */ #define HAVE_PTHREAD_KEY_INIT_NP 1 @@ -117,6 +126,9 @@ /* Define to 1 if you have the header file. */ #define HAVE_SYS_CDEFS_H 1 +/* Define to 1 if you have the header file. */ +#define HAVE_SYS_GUARDED_H 1 + /* Define to 1 if you have the header file. */ #define HAVE_SYS_STAT_H 1 @@ -143,13 +155,13 @@ #define PACKAGE_NAME "libdispatch" /* Define to the full name and version of this package. */ -#define PACKAGE_STRING "libdispatch 1.1" +#define PACKAGE_STRING "libdispatch 1.2" /* Define to the one symbol short name of this package. */ #define PACKAGE_TARNAME "libdispatch" /* Define to the version of this package. */ -#define PACKAGE_VERSION "1.1" +#define PACKAGE_VERSION "1.2" /* Define to 1 if you have the ANSI C header files. */ #define STDC_HEADERS 1 @@ -163,14 +175,11 @@ /* Define to use Mach semaphores */ #define USE_MACH_SEM 1 -/* Define to use Objective-C runtime */ -#define USE_OBJC 1 - /* Define to use POSIX semaphores */ /* #undef USE_POSIX_SEM */ /* Version number of package */ -#define VERSION "1.1" +#define VERSION "1.2" /* Define to 1 if on AIX 3. System headers sometimes define this. diff --git a/configure.ac b/configure.ac index 9e751e7ed..223084c61 100644 --- a/configure.ac +++ b/configure.ac @@ -3,7 +3,7 @@ # AC_PREREQ(2.59) -AC_INIT([libdispatch], [1.1], [libdispatch@macosforge.org], [libdispatch]) +AC_INIT([libdispatch], [1.2], [libdispatch@macosforge.org], [libdispatch]) AC_REVISION([$$]) AC_CONFIG_AUX_DIR(config) AC_CONFIG_HEADER([config/config.h]) @@ -37,8 +37,22 @@ AC_ARG_WITH([apple-xnu-source], [AS_HELP_STRING([--with-apple-xnu-source], [Specify path to Apple XNU source])], [ apple_xnu_source_libkern_path=${withval}/libkern + apple_xnu_source_bsd_path=${withval}/bsd apple_xnu_source_osfmk_path=${withval}/osfmk - CPPFLAGS="$CPPFLAGS -I$apple_xnu_source_libkern_path" + CPPFLAGS="$CPPFLAGS -idirafter $apple_xnu_source_libkern_path -isystem $apple_xnu_source_bsd_path" +]) + +AC_ARG_WITH([apple-objc4-source], + [AS_HELP_STRING([--with-apple-objc4-source], + [Specify path to Apple objc4 source])], [ + apple_objc4_source_runtime_path=${withval}/runtime +]) + +AC_ARG_WITH([apple-libauto-source], + [AS_HELP_STRING([--with-apple-libauto-source], + [Specify path to Apple libauto source])], [ + apple_libauto_source_path=${withval} + CPPFLAGS="$CPPFLAGS -I$apple_libauto_source_path" ]) AC_CACHE_CHECK([for System.framework/PrivateHeaders], dispatch_cv_system_privateheaders, @@ -78,17 +92,16 @@ AS_IF([test "x$enable_apple_tsd_optimizations" = "xyes"], ) AC_USE_SYSTEM_EXTENSIONS +AM_INIT_AUTOMAKE([foreign no-dependencies]) +LT_INIT([disable-static]) + AC_PROG_INSTALL -AC_PROG_LIBTOOL AC_PATH_PROGS(MIG, mig) - AC_PATH_PROG(LEAKS, leaks) AS_IF([test "x$LEAKS" != "x"], [AC_DEFINE(HAVE_LEAKS, 1, [Define if Apple leaks program is present])] ) -AM_INIT_AUTOMAKE([foreign]) - DISPATCH_C_ATOMIC_BUILTINS case $dispatch_cv_atomic in @@ -115,7 +128,7 @@ AC_CHECK_HEADER(sys/event.h, [], # Checks for header files. # AC_HEADER_STDC -AC_CHECK_HEADERS([TargetConditionals.h pthread_np.h malloc/malloc.h libkern/OSCrossEndian.h libkern/OSAtomic.h]) +AC_CHECK_HEADERS([TargetConditionals.h pthread_np.h malloc/malloc.h libkern/OSCrossEndian.h libkern/OSAtomic.h sys/guarded.h libproc_internal.h]) # hack for pthread_machdep.h's #include AS_IF([test -n "$apple_xnu_source_osfmk_path"], [ @@ -131,20 +144,43 @@ AS_IF([test -n "$apple_xnu_source_osfmk_path"], [ [ln -fsh "$apple_xnu_source_osfmk_path" src/System], [apple_xnu_source_osfmk_path="$apple_xnu_source_osfmk_path"]) ]) +# hack for xnu/bsd/sys/event.h EVFILT_SOCK declaration +AS_IF([test -n "$apple_xnu_source_bsd_path"], [ + CPPFLAGS="$CPPFLAGS -DPRIVATE=1" +]) # -# Parts of the testsuite use CoreFoundation and Foundation +# Check for CoreFoundation, Foundation and objc # AC_CHECK_HEADER([CoreFoundation/CoreFoundation.h], [have_corefoundation=true], [have_corefoundation=false] ) AM_CONDITIONAL(HAVE_COREFOUNDATION, $have_corefoundation) + AC_LANG_PUSH([Objective C]) -AC_CHECK_HEADER([Foundation/Foundation.h], [ - AC_DEFINE(USE_OBJC, 1, [Define to use Objective-C runtime]) - have_foundation=true], [have_foundation=false] +AC_CHECK_HEADER([Foundation/Foundation.h], + [have_foundation=true], [have_foundation=false] ) AM_CONDITIONAL(HAVE_FOUNDATION, $have_foundation) +# hack for objc4/runtime/objc-internal.h +AS_IF([test -n "$apple_objc4_source_runtime_path"], [ + saveCPPFLAGS="$CPPFLAGS" + CPPFLAGS="$CPPFLAGS -I." + ln -fsh "$apple_objc4_source_runtime_path" objc +]) +AC_CHECK_HEADER([objc/objc-internal.h], [ + AC_DEFINE(HAVE_OBJC, 1, [Define if you have the Objective-C runtime]) + have_objc=true], [have_objc=false], + [#include ] +) +AS_IF([test -n "$apple_objc4_source_runtime_path"], [ + rm -f objc + CPPFLAGS="$saveCPPFLAGS" + AC_CONFIG_COMMANDS([src/objc], + [ln -fsh "$apple_objc4_source_runtime_path" src/objc], + [apple_objc4_source_runtime_path="$apple_objc4_source_runtime_path"]) +]) +AM_CONDITIONAL(USE_OBJC, $have_objc) AC_LANG_POP([Objective C]) # @@ -152,9 +188,9 @@ AC_LANG_POP([Objective C]) # of Machisms, including using Mach ports as event sources, etc. # AC_CHECK_HEADER([mach/mach.h], [ - AC_DEFINE(HAVE_MACH, 1, [Define if mach is present]) - AC_DEFINE(__DARWIN_NON_CANCELABLE, 1, [Define if using Darwin $NOCANCEL]) - have_mach=true], [have_mach=false] + AC_DEFINE(HAVE_MACH, 1, [Define if mach is present]) + AC_DEFINE(__DARWIN_NON_CANCELABLE, 1, [Define if using Darwin $NOCANCEL]) + have_mach=true], [have_mach=false] ) AM_CONDITIONAL(USE_MIG, $have_mach) @@ -239,7 +275,7 @@ AC_SUBST([OMIT_LEAF_FP_FLAGS]) AC_CACHE_CHECK([for darwin linker], [dispatch_cv_ld_darwin], [ saveLDFLAGS="$LDFLAGS" - LDFLAGS="$LDFLAGS -dynamiclib -compatibility_version 1.2.3 -current_version 4.5.6" + LDFLAGS="$LDFLAGS -dynamiclib -compatibility_version 1.2.3 -current_version 4.5.6 -dead_strip" AC_LINK_IFELSE([AC_LANG_PROGRAM([ extern int foo; int foo;], [foo = 0;])], [dispatch_cv_ld_darwin="yes"], [dispatch_cv_ld_darwin="no"]) @@ -259,5 +295,5 @@ AC_COMPILE_IFELSE( # # Generate Makefiles. # -AC_CONFIG_FILES([Makefile dispatch/Makefile man/Makefile private/Makefile src/Makefile]) +AC_CONFIG_FILES([Makefile dispatch/Makefile man/Makefile os/Makefile private/Makefile src/Makefile]) AC_OUTPUT diff --git a/dispatch/Makefile.am b/dispatch/Makefile.am index 5cba7138e..6dc850b21 100644 --- a/dispatch/Makefile.am +++ b/dispatch/Makefile.am @@ -9,6 +9,7 @@ dispatch_HEADERS= \ data.h \ dispatch.h \ group.h \ + introspection.h \ io.h \ object.h \ once.h \ diff --git a/dispatch/base.h b/dispatch/base.h index 2af340e34..af17ccf53 100644 --- a/dispatch/base.h +++ b/dispatch/base.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2012 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -82,7 +82,17 @@ #define DISPATCH_ALWAYS_INLINE #endif -#if __GNUC__ + +#if TARGET_OS_WIN32 && defined(__DISPATCH_BUILDING_DISPATCH__) && \ + defined(__cplusplus) +#define DISPATCH_EXPORT extern "C" extern __declspec(dllexport) +#elif TARGET_OS_WIN32 && defined(__DISPATCH_BUILDING_DISPATCH__) +#define DISPATCH_EXPORT extern __declspec(dllexport) +#elif TARGET_OS_WIN32 && defined(__cplusplus) +#define DISPATCH_EXPORT extern "C" extern __declspec(dllimport) +#elif TARGET_OS_WIN32 +#define DISPATCH_EXPORT extern __declspec(dllimport) +#elif __GNUC__ #define DISPATCH_EXPORT extern __attribute__((visibility("default"))) #else #define DISPATCH_EXPORT extern @@ -100,6 +110,19 @@ #define DISPATCH_EXPECT(x, v) (x) #endif +#if defined(__has_feature) +#if __has_feature(objc_fixed_enum) +#define DISPATCH_ENUM(name, type, ...) \ + typedef enum : type { __VA_ARGS__ } name##_t +#else +#define DISPATCH_ENUM(name, type, ...) \ + enum { __VA_ARGS__ }; typedef type name##_t +#endif +#else +#define DISPATCH_ENUM(name, type, ...) \ + enum { __VA_ARGS__ }; typedef type name##_t +#endif + typedef void (*dispatch_function_t)(void *); #endif diff --git a/dispatch/data.h b/dispatch/data.h index ddba5dcae..d65658478 100644 --- a/dispatch/data.h +++ b/dispatch/data.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2011 Apple Inc. All rights reserved. + * Copyright (c) 2009-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -51,8 +51,6 @@ DISPATCH_DECL(dispatch_data); __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) DISPATCH_EXPORT struct dispatch_data_s _dispatch_data_empty; -#ifdef __BLOCKS__ - /*! * @const DISPATCH_DATA_DESTRUCTOR_DEFAULT * @discussion The default destructor for dispatch data objects. @@ -61,6 +59,21 @@ DISPATCH_EXPORT struct dispatch_data_s _dispatch_data_empty; */ #define DISPATCH_DATA_DESTRUCTOR_DEFAULT NULL +#ifdef __BLOCKS__ +#if !TARGET_OS_WIN32 +/*! @parseOnly */ +#define DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(name) \ + DISPATCH_EXPORT const dispatch_block_t _dispatch_data_destructor_##name +#else +#define DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(name) \ + DISPATCH_EXPORT dispatch_block_t _dispatch_data_destructor_##name +#endif +#else +#define DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(name) \ + DISPATCH_EXPORT const dispatch_function_t \ + _dispatch_data_destructor_##name +#endif /* __BLOCKS__ */ + /*! * @const DISPATCH_DATA_DESTRUCTOR_FREE * @discussion The destructor for dispatch data objects created from a malloc'd @@ -69,8 +82,18 @@ DISPATCH_EXPORT struct dispatch_data_s _dispatch_data_empty; */ #define DISPATCH_DATA_DESTRUCTOR_FREE (_dispatch_data_destructor_free) __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) -DISPATCH_EXPORT const dispatch_block_t _dispatch_data_destructor_free; +DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(free); +/*! + * @const DISPATCH_DATA_DESTRUCTOR_MUNMAP + * @discussion The destructor for dispatch data objects that have been created + * from buffers that require deallocation with munmap(2). + */ +#define DISPATCH_DATA_DESTRUCTOR_MUNMAP (_dispatch_data_destructor_munmap) +__OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0) +DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(munmap); + +#ifdef __BLOCKS__ /*! * @function dispatch_data_create * Creates a dispatch data object from the given contiguous buffer of memory. If @@ -99,6 +122,7 @@ dispatch_data_create(const void *buffer, size_t size, dispatch_queue_t queue, dispatch_block_t destructor); +#endif /* __BLOCKS__ */ /*! * @function dispatch_data_get_size @@ -184,6 +208,7 @@ dispatch_data_create_subrange(dispatch_data_t data, size_t offset, size_t length); +#ifdef __BLOCKS__ /*! * @typedef dispatch_data_applier_t * A block to be invoked for every contiguous memory region in a data object. @@ -224,6 +249,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW bool dispatch_data_apply(dispatch_data_t data, dispatch_data_applier_t applier); +#endif /* __BLOCKS__ */ /*! * @function dispatch_data_copy_region @@ -247,8 +273,6 @@ dispatch_data_copy_region(dispatch_data_t data, size_t location, size_t *offset_ptr); -#endif /* __BLOCKS__ */ - __END_DECLS #endif /* __DISPATCH_DATA__ */ diff --git a/dispatch/dispatch.h b/dispatch/dispatch.h index 119b413c3..cb5af230d 100644 --- a/dispatch/dispatch.h +++ b/dispatch/dispatch.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -37,7 +37,7 @@ #define __OSX_AVAILABLE_STARTING(x, y) #endif -#define DISPATCH_API_VERSION 20111201 +#define DISPATCH_API_VERSION 20130520 #ifndef __DISPATCH_BUILDING_DISPATCH__ diff --git a/dispatch/group.h b/dispatch/group.h index 88e80871a..77420c123 100644 --- a/dispatch/group.h +++ b/dispatch/group.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -138,7 +138,7 @@ dispatch_group_async_f(dispatch_group_t group, * This function will return immediately if there are no blocks associated * with the dispatch group (i.e. the group is empty). * - * The result of calling this function from mulitple threads simultaneously + * The result of calling this function from multiple threads simultaneously * with the same dispatch group is undefined. * * After the successful return of this function, the dispatch group is empty. diff --git a/dispatch/introspection.h b/dispatch/introspection.h new file mode 100644 index 000000000..9e9634118 --- /dev/null +++ b/dispatch/introspection.h @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2013 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __DISPATCH_INTROSPECTION__ +#define __DISPATCH_INTROSPECTION__ + +#include + +/*! + * @header + * + * @abstract + * Interposable introspection hooks for libdispatch. + * + * @discussion + * These hooks are only available in the introspection version of the library, + * loaded by running a process with the environment variable + * DYLD_LIBRARY_PATH=/usr/lib/system/introspection + */ + +__BEGIN_DECLS + +/*! + * @function dispatch_introspection_hook_queue_create + * + * @abstract + * Interposable hook function called when a dispatch queue was created. + * + * @param queue + * The newly created dispatch queue. + */ + +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT +void +dispatch_introspection_hook_queue_create(dispatch_queue_t queue); + +/*! + * @function dispatch_introspection_hook_queue_destroy + * + * @abstract + * Interposable hook function called when a dispatch queue is about to be + * destroyed. + * + * @param queue + * The dispatch queue about to be destroyed. + */ + +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT +void +dispatch_introspection_hook_queue_destroy(dispatch_queue_t queue); + +/*! + * @function dispatch_introspection_hook_queue_item_enqueue + * + * @abstract + * Interposable hook function called when an item is about to be enqueued onto + * a dispatch queue. + * + * @param queue + * The dispatch queue enqueued onto. + * + * @param item + * The object about to be enqueued. + */ + +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT +void +dispatch_introspection_hook_queue_item_enqueue(dispatch_queue_t queue, + dispatch_object_t item); + +/*! + * @function dispatch_introspection_hook_queue_item_dequeue + * + * @abstract + * Interposable hook function called when an item was dequeued from a dispatch + * queue. + * + * @param queue + * The dispatch queue dequeued from. + * + * @param item + * The dequeued object. + */ + +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT +void +dispatch_introspection_hook_queue_item_dequeue(dispatch_queue_t queue, + dispatch_object_t item); + +/*! + * @function dispatch_introspection_hook_queue_callout_begin + * + * @abstract + * Interposable hook function called when a client function is about to be + * called out to on a dispatch queue. + * + * @param queue + * The dispatch queue the callout is performed on. + * + * @param context + * The context parameter passed to the function. For a callout to a block, + * this is a pointer to the block object. + * + * @param function + * The client function about to be called out to. For a callout to a block, + * this is the block object's invoke function. + */ + +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT +void +dispatch_introspection_hook_queue_callout_begin(dispatch_queue_t queue, + void *context, dispatch_function_t function); + +/*! + * @function dispatch_introspection_hook_queue_callout_end + * + * @abstract + * Interposable hook function called after a client function has returned from + * a callout on a dispatch queue. + * + * @param queue + * The dispatch queue the callout was performed on. + * + * @param context + * The context parameter passed to the function. For a callout to a block, + * this is a pointer to the block object. + * + * @param function + * The client function that was called out to. For a callout to a block, + * this is the block object's invoke function. + */ + +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT +void +dispatch_introspection_hook_queue_callout_end(dispatch_queue_t queue, + void *context, dispatch_function_t function); + +__END_DECLS + +#endif diff --git a/dispatch/io.h b/dispatch/io.h index dd83e7dfb..569dbdb19 100644 --- a/dispatch/io.h +++ b/dispatch/io.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2010 Apple Inc. All rights reserved. + * Copyright (c) 2009-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -50,8 +50,6 @@ __BEGIN_DECLS */ typedef int dispatch_fd_t; -#ifdef __BLOCKS__ - /*! * @functiongroup Dispatch I/O Convenience API * Convenience wrappers around the dispatch I/O channel API, with simpler @@ -61,6 +59,7 @@ typedef int dispatch_fd_t; * may incur more overhead than by using the dispatch I/O channel API directly. */ +#ifdef __BLOCKS__ /*! * @function dispatch_read * Schedule a read operation for asynchronous execution on the specified file @@ -147,6 +146,7 @@ dispatch_write(dispatch_fd_t fd, dispatch_data_t data, dispatch_queue_t queue, void (^handler)(dispatch_data_t data, int error)); +#endif /* __BLOCKS__ */ /*! * @functiongroup Dispatch I/O Channel API @@ -160,17 +160,6 @@ dispatch_write(dispatch_fd_t fd, */ DISPATCH_DECL(dispatch_io); -/*! - * @typedef dispatch_io_handler_t - * The prototype of I/O handler blocks for dispatch I/O operations. - * - * @param done A flag indicating whether the operation is complete. - * @param data The data object to be handled. - * @param error An errno condition for the operation. - */ -typedef void (^dispatch_io_handler_t)(bool done, dispatch_data_t data, - int error); - /*! * @typedef dispatch_io_type_t * The type of a dispatch I/O channel: @@ -194,6 +183,7 @@ typedef void (^dispatch_io_handler_t)(bool done, dispatch_data_t data, typedef unsigned long dispatch_io_type_t; +#ifdef __BLOCKS__ /*! * @function dispatch_io_create * Create a dispatch I/O channel associated with a file descriptor. The system @@ -217,7 +207,7 @@ typedef unsigned long dispatch_io_type_t; * @param error An errno condition if control is relinquished * because channel creation failed, zero otherwise. * @result The newly created dispatch I/O channel or NULL if an error - * occurred. + * occurred (invalid type specified). */ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT @@ -229,32 +219,32 @@ dispatch_io_create(dispatch_io_type_t type, void (^cleanup_handler)(int error)); /*! -* @function dispatch_io_create_with_path -* Create a dispatch I/O channel associated with a path name. The specified -* path, oflag and mode parameters will be passed to open(2) when the first I/O -* operation on the channel is ready to execute and the resulting file -* descriptor will remain open and under the control of the system until the -* channel is closed, an error occurs on the file descriptor or all references -* to the channel are released. At that time the file descriptor will be closed -* and the specified cleanup handler will be enqueued. -* -* @param type The desired type of I/O channel (DISPATCH_IO_STREAM -* or DISPATCH_IO_RANDOM). -* @param path The path to associate with the I/O channel. -* @param oflag The flags to pass to open(2) when opening the file at -* path. -* @param mode The mode to pass to open(2) when creating the file at -* path (i.e. with flag O_CREAT), zero otherwise. -* @param queue The dispatch queue to which the handler should be -* submitted. -* @param cleanup_handler The handler to enqueue when the system -* has closed the file at path. -* @param error An errno condition if control is relinquished -* because channel creation or opening of the -* specified file failed, zero otherwise. -* @result The newly created dispatch I/O channel or NULL if an error -* occurred. -*/ + * @function dispatch_io_create_with_path + * Create a dispatch I/O channel associated with a path name. The specified + * path, oflag and mode parameters will be passed to open(2) when the first I/O + * operation on the channel is ready to execute and the resulting file + * descriptor will remain open and under the control of the system until the + * channel is closed, an error occurs on the file descriptor or all references + * to the channel are released. At that time the file descriptor will be closed + * and the specified cleanup handler will be enqueued. + * + * @param type The desired type of I/O channel (DISPATCH_IO_STREAM + * or DISPATCH_IO_RANDOM). + * @param path The absolute path to associate with the I/O channel. + * @param oflag The flags to pass to open(2) when opening the file at + * path. + * @param mode The mode to pass to open(2) when creating the file at + * path (i.e. with flag O_CREAT), zero otherwise. + * @param queue The dispatch queue to which the handler should be + * submitted. + * @param cleanup_handler The handler to enqueue when the system + * has closed the file at path. + * @param error An errno condition if control is relinquished + * because channel creation or opening of the + * specified file failed, zero otherwise. + * @result The newly created dispatch I/O channel or NULL if an error + * occurred (invalid type or non-absolute path specified). + */ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW @@ -293,7 +283,7 @@ dispatch_io_create_with_path(dispatch_io_type_t type, * @param error An errno condition if control is relinquished * because channel creation failed, zero otherwise. * @result The newly created dispatch I/O channel or NULL if an error - * occurred. + * occurred (invalid type specified). */ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED @@ -304,6 +294,17 @@ dispatch_io_create_with_io(dispatch_io_type_t type, dispatch_queue_t queue, void (^cleanup_handler)(int error)); +/*! + * @typedef dispatch_io_handler_t + * The prototype of I/O handler blocks for dispatch I/O operations. + * + * @param done A flag indicating whether the operation is complete. + * @param data The data object to be handled. + * @param error An errno condition for the operation. + */ +typedef void (^dispatch_io_handler_t)(bool done, dispatch_data_t data, + int error); + /*! * @function dispatch_io_read * Schedule a read operation for asynchronous execution on the specified I/O @@ -408,6 +409,7 @@ dispatch_io_write(dispatch_io_t channel, dispatch_data_t data, dispatch_queue_t queue, dispatch_io_handler_t io_handler); +#endif /* __BLOCKS__ */ /*! * @typedef dispatch_io_close_flags_t @@ -442,6 +444,7 @@ DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_io_close(dispatch_io_t channel, dispatch_io_close_flags_t flags); +#ifdef __BLOCKS__ /*! * @function dispatch_io_barrier * Schedule a barrier operation on the specified I/O channel; all previously @@ -460,13 +463,14 @@ dispatch_io_close(dispatch_io_t channel, dispatch_io_close_flags_t flags); * While the barrier block is running, it may safely operate on the channel's * underlying file descriptor with fsync(2), lseek(2) etc. (but not close(2)). * - * @param channel The dispatch I/O channel to close. - * @param barrier The flags for the close operation. + * @param channel The dispatch I/O channel to schedule the barrier on. + * @param barrier The barrier block. */ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_io_barrier(dispatch_io_t channel, dispatch_block_t barrier); +#endif /* __BLOCKS__ */ /*! * @function dispatch_io_get_descriptor @@ -580,8 +584,6 @@ dispatch_io_set_interval(dispatch_io_t channel, uint64_t interval, dispatch_io_interval_flags_t flags); -#endif /* __BLOCKS__ */ - __END_DECLS #endif /* __DISPATCH_IO__ */ diff --git a/dispatch/object.h b/dispatch/object.h index bc7257a9c..4ae0ab626 100644 --- a/dispatch/object.h +++ b/dispatch/object.h @@ -81,6 +81,9 @@ typedef union { struct dispatch_queue_attr_s *_dqa; struct dispatch_group_s *_dg; struct dispatch_source_s *_ds; + struct dispatch_mach_s *_dm; + struct dispatch_mach_msg_s *_dmsg; + struct dispatch_timer_aggregate_s *_dta; struct dispatch_source_attr_s *_dsa; struct dispatch_semaphore_s *_dsema; struct dispatch_data_s *_ddata; @@ -111,19 +114,22 @@ __BEGIN_DECLS * The log output destination can be configured via the LIBDISPATCH_LOG * environment variable, valid values are: YES, NO, syslog, stderr, file. * + * This function is deprecated and will be removed in a future release. + * Objective-C callers may use -debugDescription instead. + * * @param object * The object to introspect. * * @param message * The message to log above and beyond the introspection. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +__OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_6,__MAC_10_9,__IPHONE_4_0,__IPHONE_6_0) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW __attribute__((__format__(printf,2,3))) void dispatch_debug(dispatch_object_t object, const char *message, ...); -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +__OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_6,__MAC_10_9,__IPHONE_4_0,__IPHONE_6_0) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW __attribute__((__format__(printf,2,0))) void diff --git a/dispatch/queue.h b/dispatch/queue.h index b8050f923..9090676d8 100644 --- a/dispatch/queue.h +++ b/dispatch/queue.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2012 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -272,7 +272,7 @@ dispatch_sync_f(dispatch_queue_t queue, */ #ifdef __BLOCKS__ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_apply(size_t iterations, dispatch_queue_t queue, void (^block)(size_t)); @@ -305,7 +305,7 @@ dispatch_apply(size_t iterations, dispatch_queue_t queue, * The result of passing NULL in this parameter is undefined. */ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NONNULL4 DISPATCH_NOTHROW +DISPATCH_EXPORT DISPATCH_NONNULL4 DISPATCH_NOTHROW void dispatch_apply_f(size_t iterations, dispatch_queue_t queue, void *context, @@ -335,10 +335,12 @@ dispatch_apply_f(size_t iterations, dispatch_queue_t queue, * the two is not a valid way to test whether code is executing on the * main thread. * + * This function is deprecated and will be removed in a future release. + * * @result * Returns the current queue. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +__OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_6,__MAC_10_9,__IPHONE_4_0,__IPHONE_6_0) DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_t dispatch_get_current_queue(void); @@ -483,22 +485,31 @@ DISPATCH_NOTHROW dispatch_queue_t dispatch_queue_create(const char *label, dispatch_queue_attr_t attr); +/*! + * @const DISPATCH_CURRENT_QUEUE_LABEL + * @discussion Constant to pass to the dispatch_queue_get_label() function to + * retrieve the label of the current queue. + */ +#define DISPATCH_CURRENT_QUEUE_LABEL NULL + /*! * @function dispatch_queue_get_label * * @abstract - * Returns the label of the queue that was specified when the - * queue was created. + * Returns the label of the given queue, as specified when the queue was + * created, or the empty string if a NULL label was specified. + * + * Passing DISPATCH_CURRENT_QUEUE_LABEL will return the label of the current + * queue. * * @param queue - * The result of passing NULL in this parameter is undefined. + * The queue to query, or DISPATCH_CURRENT_QUEUE_LABEL. * * @result - * The label of the queue. The result may be NULL. + * The label of the queue. */ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_PURE DISPATCH_WARN_RESULT -DISPATCH_NOTHROW +DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW const char * dispatch_queue_get_label(dispatch_queue_t queue); diff --git a/dispatch/semaphore.h b/dispatch/semaphore.h index 312a2c2e3..8f68407d7 100644 --- a/dispatch/semaphore.h +++ b/dispatch/semaphore.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -69,7 +69,7 @@ dispatch_semaphore_create(long value); * * @discussion * Decrement the counting semaphore. If the resulting value is less than zero, - * this function waits in FIFO order for a signal to occur before returning. + * this function waits for a signal to occur before returning. * * @param dsema * The semaphore. The result of passing NULL in this parameter is undefined. diff --git a/dispatch/source.h b/dispatch/source.h index e37ecec07..ebbf8b95a 100644 --- a/dispatch/source.h +++ b/dispatch/source.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -30,7 +30,10 @@ #include #include #endif + +#if !TARGET_OS_WIN32 #include +#endif /*! * @header @@ -64,6 +67,16 @@ DISPATCH_DECL(dispatch_source); */ typedef const struct dispatch_source_type_s *dispatch_source_type_t; +#if !TARGET_OS_WIN32 +/*! @parseOnly */ +#define DISPATCH_SOURCE_TYPE_DECL(name) \ + DISPATCH_EXPORT const struct dispatch_source_type_s \ + _dispatch_source_type_##name +#else +#define DISPATCH_SOURCE_TYPE_DECL(name) \ + DISPATCH_EXPORT struct dispatch_source_type_s _dispatch_source_type_##name +#endif + /*! * @const DISPATCH_SOURCE_TYPE_DATA_ADD * @discussion A dispatch source that coalesces data obtained via calls to @@ -73,21 +86,18 @@ typedef const struct dispatch_source_type_s *dispatch_source_type_t; */ #define DISPATCH_SOURCE_TYPE_DATA_ADD (&_dispatch_source_type_data_add) __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT -const struct dispatch_source_type_s _dispatch_source_type_data_add; +DISPATCH_SOURCE_TYPE_DECL(data_add); /*! * @const DISPATCH_SOURCE_TYPE_DATA_OR * @discussion A dispatch source that coalesces data obtained via calls to - * dispatch_source_merge_data(). A logical OR is used to coalesce the data. + * dispatch_source_merge_data(). A bitwise OR is used to coalesce the data. * The handle is unused (pass zero for now). - * The mask is used to perform a logical AND with the value passed to - * dispatch_source_merge_data(). + * The mask is unused (pass zero for now). */ #define DISPATCH_SOURCE_TYPE_DATA_OR (&_dispatch_source_type_data_or) __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT -const struct dispatch_source_type_s _dispatch_source_type_data_or; +DISPATCH_SOURCE_TYPE_DECL(data_or); /*! * @const DISPATCH_SOURCE_TYPE_MACH_SEND @@ -98,8 +108,7 @@ const struct dispatch_source_type_s _dispatch_source_type_data_or; */ #define DISPATCH_SOURCE_TYPE_MACH_SEND (&_dispatch_source_type_mach_send) __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT -const struct dispatch_source_type_s _dispatch_source_type_mach_send; +DISPATCH_SOURCE_TYPE_DECL(mach_send); /*! * @const DISPATCH_SOURCE_TYPE_MACH_RECV @@ -109,8 +118,20 @@ const struct dispatch_source_type_s _dispatch_source_type_mach_send; */ #define DISPATCH_SOURCE_TYPE_MACH_RECV (&_dispatch_source_type_mach_recv) __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT -const struct dispatch_source_type_s _dispatch_source_type_mach_recv; +DISPATCH_SOURCE_TYPE_DECL(mach_recv); + +/*! + * @const DISPATCH_SOURCE_TYPE_MEMORYPRESSURE + * @discussion A dispatch source that monitors the system for changes in + * memory pressure condition. + * The handle is unused (pass zero for now). + * The mask is a mask of desired events from + * dispatch_source_memorypressure_flags_t. + */ +#define DISPATCH_SOURCE_TYPE_MEMORYPRESSURE \ + (&_dispatch_source_type_memorypressure) +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_NA) +DISPATCH_SOURCE_TYPE_DECL(memorypressure); /*! * @const DISPATCH_SOURCE_TYPE_PROC @@ -121,8 +142,7 @@ const struct dispatch_source_type_s _dispatch_source_type_mach_recv; */ #define DISPATCH_SOURCE_TYPE_PROC (&_dispatch_source_type_proc) __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT -const struct dispatch_source_type_s _dispatch_source_type_proc; +DISPATCH_SOURCE_TYPE_DECL(proc); /*! * @const DISPATCH_SOURCE_TYPE_READ @@ -133,8 +153,7 @@ const struct dispatch_source_type_s _dispatch_source_type_proc; */ #define DISPATCH_SOURCE_TYPE_READ (&_dispatch_source_type_read) __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT -const struct dispatch_source_type_s _dispatch_source_type_read; +DISPATCH_SOURCE_TYPE_DECL(read); /*! * @const DISPATCH_SOURCE_TYPE_SIGNAL @@ -144,20 +163,18 @@ const struct dispatch_source_type_s _dispatch_source_type_read; */ #define DISPATCH_SOURCE_TYPE_SIGNAL (&_dispatch_source_type_signal) __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT -const struct dispatch_source_type_s _dispatch_source_type_signal; +DISPATCH_SOURCE_TYPE_DECL(signal); /*! * @const DISPATCH_SOURCE_TYPE_TIMER * @discussion A dispatch source that submits the event handler block based * on a timer. * The handle is unused (pass zero for now). - * The mask is unused (pass zero for now). + * The mask specifies which flags from dispatch_source_timer_flags_t to apply. */ #define DISPATCH_SOURCE_TYPE_TIMER (&_dispatch_source_type_timer) __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT -const struct dispatch_source_type_s _dispatch_source_type_timer; +DISPATCH_SOURCE_TYPE_DECL(timer); /*! * @const DISPATCH_SOURCE_TYPE_VNODE @@ -168,8 +185,7 @@ const struct dispatch_source_type_s _dispatch_source_type_timer; */ #define DISPATCH_SOURCE_TYPE_VNODE (&_dispatch_source_type_vnode) __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT -const struct dispatch_source_type_s _dispatch_source_type_vnode; +DISPATCH_SOURCE_TYPE_DECL(vnode); /*! * @const DISPATCH_SOURCE_TYPE_WRITE @@ -180,8 +196,7 @@ const struct dispatch_source_type_s _dispatch_source_type_vnode; */ #define DISPATCH_SOURCE_TYPE_WRITE (&_dispatch_source_type_write) __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT -const struct dispatch_source_type_s _dispatch_source_type_write; +DISPATCH_SOURCE_TYPE_DECL(write); /*! * @typedef dispatch_source_mach_send_flags_t @@ -194,6 +209,36 @@ const struct dispatch_source_type_s _dispatch_source_type_write; typedef unsigned long dispatch_source_mach_send_flags_t; +/*! + * @typedef dispatch_source_memorypressure_flags_t + * Type of dispatch_source_memorypressure flags + * + * @constant DISPATCH_MEMORYPRESSURE_NORMAL + * The system memory pressure condition has returned to normal. + * + * @constant DISPATCH_MEMORYPRESSURE_WARN + * The system memory pressure condition has changed to warning. + * + * @constant DISPATCH_MEMORYPRESSURE_CRITICAL + * The system memory pressure condition has changed to critical. + * + * @discussion + * Elevated memory pressure is a system-wide condition that applications + * registered for this source should react to by changing their future memory + * use behavior, e.g. by reducing cache sizes of newly initiated operations + * until memory pressure returns back to normal. + * NOTE: applications should NOT traverse and discard existing caches for past + * operations when the system memory pressure enters an elevated state, as that + * is likely to trigger VM operations that will further aggravate system memory + * pressure. + */ + +#define DISPATCH_MEMORYPRESSURE_NORMAL 0x01 +#define DISPATCH_MEMORYPRESSURE_WARN 0x02 +#define DISPATCH_MEMORYPRESSURE_CRITICAL 0x04 + +typedef unsigned long dispatch_source_memorypressure_flags_t; + /*! * @typedef dispatch_source_proc_flags_t * Type of dispatch_source_proc flags @@ -254,6 +299,26 @@ typedef unsigned long dispatch_source_proc_flags_t; typedef unsigned long dispatch_source_vnode_flags_t; +/*! + * @typedef dispatch_source_timer_flags_t + * Type of dispatch_source_timer flags + * + * @constant DISPATCH_TIMER_STRICT + * Specifies that the system should make a best effort to strictly observe the + * leeway value specified for the timer via dispatch_source_set_timer(), even + * if that value is smaller than the default leeway value that would be applied + * to the timer otherwise. A minimal amount of leeway will be applied to the + * timer even if this flag is specified. + * + * CAUTION: Use of this flag may override power-saving techniques employed by + * the system and cause higher power consumption, so it must be used with care + * and only when absolutely necessary. + */ + +#define DISPATCH_TIMER_STRICT 0x1 + +typedef unsigned long dispatch_source_timer_flags_t; + __BEGIN_DECLS /*! @@ -461,6 +526,7 @@ dispatch_source_testcancel(dispatch_source_t source); * DISPATCH_SOURCE_TYPE_DATA_OR: n/a * DISPATCH_SOURCE_TYPE_MACH_SEND: mach port (mach_port_t) * DISPATCH_SOURCE_TYPE_MACH_RECV: mach port (mach_port_t) + * DISPATCH_SOURCE_TYPE_MEMORYPRESSURE n/a * DISPATCH_SOURCE_TYPE_PROC: process identifier (pid_t) * DISPATCH_SOURCE_TYPE_READ: file descriptor (int) * DISPATCH_SOURCE_TYPE_SIGNAL: signal number (int) @@ -491,10 +557,11 @@ dispatch_source_get_handle(dispatch_source_t source); * DISPATCH_SOURCE_TYPE_DATA_OR: n/a * DISPATCH_SOURCE_TYPE_MACH_SEND: dispatch_source_mach_send_flags_t * DISPATCH_SOURCE_TYPE_MACH_RECV: n/a + * DISPATCH_SOURCE_TYPE_MEMORYPRESSURE dispatch_source_memorypressure_flags_t * DISPATCH_SOURCE_TYPE_PROC: dispatch_source_proc_flags_t * DISPATCH_SOURCE_TYPE_READ: n/a * DISPATCH_SOURCE_TYPE_SIGNAL: n/a - * DISPATCH_SOURCE_TYPE_TIMER: n/a + * DISPATCH_SOURCE_TYPE_TIMER: dispatch_source_timer_flags_t * DISPATCH_SOURCE_TYPE_VNODE: dispatch_source_vnode_flags_t * DISPATCH_SOURCE_TYPE_WRITE: n/a */ @@ -526,6 +593,7 @@ dispatch_source_get_mask(dispatch_source_t source); * DISPATCH_SOURCE_TYPE_DATA_OR: application defined data * DISPATCH_SOURCE_TYPE_MACH_SEND: dispatch_source_mach_send_flags_t * DISPATCH_SOURCE_TYPE_MACH_RECV: n/a + * DISPATCH_SOURCE_TYPE_MEMORYPRESSURE dispatch_source_memorypressure_flags_t * DISPATCH_SOURCE_TYPE_PROC: dispatch_source_proc_flags_t * DISPATCH_SOURCE_TYPE_READ: estimated bytes available to read * DISPATCH_SOURCE_TYPE_SIGNAL: number of signals delivered since @@ -569,30 +637,45 @@ dispatch_source_merge_data(dispatch_source_t source, unsigned long value); * Sets a start time, interval, and leeway value for a timer source. * * @discussion - * Calling this function has no effect if the timer source has already been - * canceled. Once this function returns, any pending timer data accumulated - * for the previous timer values has been cleared + * Once this function returns, any pending source data accumulated for the + * previous timer values has been cleared; the next fire of the timer will + * occur at 'start', and every 'interval' nanoseconds thereafter until the + * timer source is canceled. + * + * Any fire of the timer may be delayed by the system in order to improve power + * consumption and system performance. The upper limit to the allowable delay + * may be configured with the 'leeway' argument, the lower limit is under the + * control of the system. + * + * For the initial timer fire at 'start', the upper limit to the allowable + * delay is set to 'leeway' nanoseconds. For the subsequent timer fires at + * 'start' + N * 'interval', the upper limit is MIN('leeway','interval'/2). + * + * The lower limit to the allowable delay may vary with process state such as + * visibility of application UI. If the specified timer source was created with + * a mask of DISPATCH_TIMER_STRICT, the system will make a best effort to + * strictly observe the provided 'leeway' value even if it is smaller than the + * current lower limit. Note that a minimal amount of delay is to be expected + * even if this flag is specified. + * + * The 'start' argument also determines which clock will be used for the timer: + * If 'start' is DISPATCH_TIME_NOW or was created with dispatch_time(3), the + * timer is based on mach_absolute_time(). If 'start' was created with + * dispatch_walltime(3), the timer is based on gettimeofday(3). * - * The start time argument also determines which clock will be used for the - * timer. If the start time is DISPATCH_TIME_NOW or created with - * dispatch_time() then the timer is based on mach_absolute_time(). Otherwise, - * if the start time of the timer is created with dispatch_walltime() then the - * timer is based on gettimeofday(3). + * Calling this function has no effect if the timer source has already been + * canceled. * * @param start * The start time of the timer. See dispatch_time() and dispatch_walltime() * for more information. * * @param interval - * The nanosecond interval for the timer. + * The nanosecond interval for the timer. Use DISPATCH_TIME_FOREVER for a + * one-shot timer. * * @param leeway - * A hint given to the system by the application for the amount of leeway, in - * nanoseconds, that the system may defer the timer in order to align with other - * system activity for improved system performance or power consumption. (For - * example, an application might perform a periodic task every 5 minutes, with - * a leeway of up to 30 seconds.) Note that some latency is to be expected for - * all timers even when a leeway value of zero is specified. + * The nanosecond leeway for the timer. */ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW diff --git a/libdispatch.xcodeproj/project.pbxproj b/libdispatch.xcodeproj/project.pbxproj index 59d706e53..b465ba7e7 100644 --- a/libdispatch.xcodeproj/project.pbxproj +++ b/libdispatch.xcodeproj/project.pbxproj @@ -32,6 +32,13 @@ /* End PBXAggregateTarget section */ /* Begin PBXBuildFile section */ + 2BBF5A60154B64D8002B20F9 /* allocator_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 2BBF5A5F154B64D8002B20F9 /* allocator_internal.h */; }; + 2BBF5A61154B64D8002B20F9 /* allocator_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 2BBF5A5F154B64D8002B20F9 /* allocator_internal.h */; }; + 2BBF5A63154B64F5002B20F9 /* allocator.c in Sources */ = {isa = PBXBuildFile; fileRef = 2BBF5A62154B64F5002B20F9 /* allocator.c */; }; + 2BBF5A64154B64F5002B20F9 /* allocator.c in Sources */ = {isa = PBXBuildFile; fileRef = 2BBF5A62154B64F5002B20F9 /* allocator.c */; }; + 2BBF5A65154B64F5002B20F9 /* allocator.c in Sources */ = {isa = PBXBuildFile; fileRef = 2BBF5A62154B64F5002B20F9 /* allocator.c */; }; + 2BBF5A66154B64F5002B20F9 /* allocator.c in Sources */ = {isa = PBXBuildFile; fileRef = 2BBF5A62154B64F5002B20F9 /* allocator.c */; }; + 2BBF5A67154B64F5002B20F9 /* allocator.c in Sources */ = {isa = PBXBuildFile; fileRef = 2BBF5A62154B64F5002B20F9 /* allocator.c */; }; 5A0095A210F274B0000E2A31 /* io_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 5A0095A110F274B0000E2A31 /* io_internal.h */; }; 5A27262610F26F1900751FBC /* io.c in Sources */ = {isa = PBXBuildFile; fileRef = 5A27262510F26F1900751FBC /* io.c */; }; 5A5D13AC0F6B280500197CC3 /* semaphore_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */; }; @@ -65,19 +72,63 @@ E4128ED713BA9A1700ABB2CB /* hw_config.h in Headers */ = {isa = PBXBuildFile; fileRef = E4128ED513BA9A1700ABB2CB /* hw_config.h */; }; E417A38412A472C4004D659D /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; }; E417A38512A472C5004D659D /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; }; + E420867016027AE500EEE210 /* data.m in Sources */ = {isa = PBXBuildFile; fileRef = E420866F16027AE500EEE210 /* data.m */; }; + E420867116027AE500EEE210 /* data.m in Sources */ = {isa = PBXBuildFile; fileRef = E420866F16027AE500EEE210 /* data.m */; }; + E420867216027AE500EEE210 /* data.m in Sources */ = {isa = PBXBuildFile; fileRef = E420866F16027AE500EEE210 /* data.m */; }; + E420867316027AE500EEE210 /* data.m in Sources */ = {isa = PBXBuildFile; fileRef = E420866F16027AE500EEE210 /* data.m */; }; + E421E5F91716ADA10090DC9B /* introspection.h in Headers */ = {isa = PBXBuildFile; fileRef = E421E5F81716ADA10090DC9B /* introspection.h */; settings = {ATTRIBUTES = (Public, ); }; }; E422A0D512A557B5005E5BDB /* trace.h in Headers */ = {isa = PBXBuildFile; fileRef = E422A0D412A557B5005E5BDB /* trace.h */; }; E422A0D612A557B5005E5BDB /* trace.h in Headers */ = {isa = PBXBuildFile; fileRef = E422A0D412A557B5005E5BDB /* trace.h */; }; E43570B9126E93380097AB9F /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; }; E43570BA126E93380097AB9F /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; }; - E4407FAE143CC984003A9E80 /* dispatch.h in Headers */ = {isa = PBXBuildFile; fileRef = E4407FAD143CC984003A9E80 /* dispatch.h */; settings = {ATTRIBUTES = (Private, ); }; }; - E4407FAF143CC984003A9E80 /* dispatch.h in Headers */ = {isa = PBXBuildFile; fileRef = E4407FAD143CC984003A9E80 /* dispatch.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E43A710615783F7E0012D38D /* data_private.h in Headers */ = {isa = PBXBuildFile; fileRef = C913AC0E143BD34800B78976 /* data_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; E44EBE3E1251659900645D88 /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; }; E44EBE5412517EBE00645D88 /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; }; E44EBE5512517EBE00645D88 /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; }; E44EBE5612517EBE00645D88 /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; }; E44EBE5712517EBE00645D88 /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; }; + E44F9DAB16543F94001DCD38 /* introspection_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44F9DA816543F79001DCD38 /* introspection_internal.h */; }; + E44F9DAC1654400D001DCD38 /* introspection_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44F9DA816543F79001DCD38 /* introspection_internal.h */; }; + E44F9DAD1654400E001DCD38 /* introspection_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44F9DA816543F79001DCD38 /* introspection_internal.h */; }; + E44F9DAE16544022001DCD38 /* allocator_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 2BBF5A5F154B64D8002B20F9 /* allocator_internal.h */; }; + E44F9DAF16544026001DCD38 /* internal.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED8F0E8361E600161930 /* internal.h */; }; + E44F9DB01654402B001DCD38 /* data_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E4C1ED6E1263E714000D3C8B /* data_internal.h */; }; + E44F9DB11654402E001DCD38 /* io_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 5A0095A110F274B0000E2A31 /* io_internal.h */; }; + E44F9DB216544032001DCD38 /* object_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 965ECC200F3EAB71004DDD89 /* object_internal.h */; }; + E44F9DB316544037001DCD38 /* queue_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 96929D950F3EA2170041FF5D /* queue_internal.h */; }; + E44F9DB41654403B001DCD38 /* semaphore_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */; }; + E44F9DB51654403F001DCD38 /* source_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = FC0B34780FA2851C0080FFA0 /* source_internal.h */; }; + E44F9DB616544043001DCD38 /* trace.h in Headers */ = {isa = PBXBuildFile; fileRef = E422A0D412A557B5005E5BDB /* trace.h */; }; + E44F9DB71654404F001DCD38 /* shims.h in Headers */ = {isa = PBXBuildFile; fileRef = 96929D830F3EA1020041FF5D /* shims.h */; }; + E44F9DB816544053001DCD38 /* atomic.h in Headers */ = {isa = PBXBuildFile; fileRef = 96929D820F3EA1020041FF5D /* atomic.h */; }; + E44F9DB916544056001DCD38 /* getprogname.h in Headers */ = {isa = PBXBuildFile; fileRef = E4BA743913A8911B0095BDF1 /* getprogname.h */; }; + E44F9DBA1654405B001DCD38 /* hw_config.h in Headers */ = {isa = PBXBuildFile; fileRef = E4128ED513BA9A1700ABB2CB /* hw_config.h */; }; + E44F9DBC1654405B001DCD38 /* perfmon.h in Headers */ = {isa = PBXBuildFile; fileRef = FC1832A2109923C7003403D5 /* perfmon.h */; }; + E44F9DBE1654405B001DCD38 /* tsd.h in Headers */ = {isa = PBXBuildFile; fileRef = FC1832A4109923C7003403D5 /* tsd.h */; }; + E44F9DBF165440EF001DCD38 /* config.h in Headers */ = {isa = PBXBuildFile; fileRef = FC9C70E7105EC9620074F9CA /* config.h */; }; + E44F9DC016544115001DCD38 /* object.h in Headers */ = {isa = PBXBuildFile; fileRef = E4EB4A2614C35ECE00AA0FA9 /* object.h */; }; + E44F9DC116544115001DCD38 /* object_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E454569214746F1B00106147 /* object_private.h */; }; E454569314746F1B00106147 /* object_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E454569214746F1B00106147 /* object_private.h */; settings = {ATTRIBUTES = (); }; }; E454569414746F1B00106147 /* object_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E454569214746F1B00106147 /* object_private.h */; settings = {ATTRIBUTES = (); }; }; + E4630251176162D200E11F4C /* atomic_sfb.h in Headers */ = {isa = PBXBuildFile; fileRef = E463024F1761603C00E11F4C /* atomic_sfb.h */; }; + E4630252176162D300E11F4C /* atomic_sfb.h in Headers */ = {isa = PBXBuildFile; fileRef = E463024F1761603C00E11F4C /* atomic_sfb.h */; }; + E4630253176162D400E11F4C /* atomic_sfb.h in Headers */ = {isa = PBXBuildFile; fileRef = E463024F1761603C00E11F4C /* atomic_sfb.h */; }; + E46DBC4014EE10C80001F9F6 /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; }; + E46DBC4114EE10C80001F9F6 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; + E46DBC4214EE10C80001F9F6 /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; }; + E46DBC4314EE10C80001F9F6 /* queue.c in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED8A0E8361E600161930 /* queue.c */; }; + E46DBC4414EE10C80001F9F6 /* semaphore.c in Sources */ = {isa = PBXBuildFile; fileRef = 721F5CCE0F15553500FF03A6 /* semaphore.c */; }; + E46DBC4514EE10C80001F9F6 /* once.c in Sources */ = {isa = PBXBuildFile; fileRef = 96DF70BD0F38FE3C0074BD99 /* once.c */; }; + E46DBC4614EE10C80001F9F6 /* apply.c in Sources */ = {isa = PBXBuildFile; fileRef = 9676A0E00F3E755D00713ADB /* apply.c */; }; + E46DBC4714EE10C80001F9F6 /* object.c in Sources */ = {isa = PBXBuildFile; fileRef = 9661E56A0F3E7DDF00749F3E /* object.c */; }; + E46DBC4814EE10C80001F9F6 /* benchmark.c in Sources */ = {isa = PBXBuildFile; fileRef = 965CD6340F3E806200D4E28D /* benchmark.c */; }; + E46DBC4914EE10C80001F9F6 /* source.c in Sources */ = {isa = PBXBuildFile; fileRef = 96A8AA860F41E7A400CD570B /* source.c */; }; + E46DBC4A14EE10C80001F9F6 /* time.c in Sources */ = {isa = PBXBuildFile; fileRef = 96032E4A0F5CC8C700241C5F /* time.c */; }; + E46DBC4B14EE10C80001F9F6 /* data.c in Sources */ = {isa = PBXBuildFile; fileRef = 5AAB45BF10D30B79004407EA /* data.c */; }; + E46DBC4C14EE10C80001F9F6 /* io.c in Sources */ = {isa = PBXBuildFile; fileRef = 5A27262510F26F1900751FBC /* io.c */; }; + E46DBC4D14EE10C80001F9F6 /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; }; + E48AF55A16E70FD9004105FF /* io_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E48AF55916E70FD9004105FF /* io_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E48AF55B16E72D44004105FF /* io_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E48AF55916E70FD9004105FF /* io_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; E49F2423125D3C960057C971 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; E49F2424125D3C970057C971 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; E49F2499125D48D80057C971 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; @@ -121,10 +172,31 @@ E49F24D2125D57FA0057C971 /* time.c in Sources */ = {isa = PBXBuildFile; fileRef = 96032E4A0F5CC8C700241C5F /* time.c */; }; E49F24D3125D57FA0057C971 /* data.c in Sources */ = {isa = PBXBuildFile; fileRef = 5AAB45BF10D30B79004407EA /* data.c */; }; E49F24D4125D57FA0057C971 /* io.c in Sources */ = {isa = PBXBuildFile; fileRef = 5A27262510F26F1900751FBC /* io.c */; }; + E4A2C9C5176019820000F809 /* atomic_llsc.h in Headers */ = {isa = PBXBuildFile; fileRef = E4A2C9C4176019760000F809 /* atomic_llsc.h */; }; + E4A2C9C6176019830000F809 /* atomic_llsc.h in Headers */ = {isa = PBXBuildFile; fileRef = E4A2C9C4176019760000F809 /* atomic_llsc.h */; }; + E4A2C9C7176019840000F809 /* atomic_llsc.h in Headers */ = {isa = PBXBuildFile; fileRef = E4A2C9C4176019760000F809 /* atomic_llsc.h */; }; + E4B515BD164B2DA300E003AF /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; }; + E4B515BE164B2DA300E003AF /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; }; + E4B515BF164B2DA300E003AF /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; + E4B515C0164B2DA300E003AF /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; }; + E4B515C1164B2DA300E003AF /* queue.c in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED8A0E8361E600161930 /* queue.c */; }; + E4B515C2164B2DA300E003AF /* semaphore.c in Sources */ = {isa = PBXBuildFile; fileRef = 721F5CCE0F15553500FF03A6 /* semaphore.c */; }; + E4B515C3164B2DA300E003AF /* once.c in Sources */ = {isa = PBXBuildFile; fileRef = 96DF70BD0F38FE3C0074BD99 /* once.c */; }; + E4B515C4164B2DA300E003AF /* apply.c in Sources */ = {isa = PBXBuildFile; fileRef = 9676A0E00F3E755D00713ADB /* apply.c */; }; + E4B515C5164B2DA300E003AF /* object.c in Sources */ = {isa = PBXBuildFile; fileRef = 9661E56A0F3E7DDF00749F3E /* object.c */; }; + E4B515C6164B2DA300E003AF /* benchmark.c in Sources */ = {isa = PBXBuildFile; fileRef = 965CD6340F3E806200D4E28D /* benchmark.c */; }; + E4B515C7164B2DA300E003AF /* source.c in Sources */ = {isa = PBXBuildFile; fileRef = 96A8AA860F41E7A400CD570B /* source.c */; }; + E4B515C8164B2DA300E003AF /* time.c in Sources */ = {isa = PBXBuildFile; fileRef = 96032E4A0F5CC8C700241C5F /* time.c */; }; + E4B515C9164B2DA300E003AF /* data.c in Sources */ = {isa = PBXBuildFile; fileRef = 5AAB45BF10D30B79004407EA /* data.c */; }; + E4B515CA164B2DA300E003AF /* io.c in Sources */ = {isa = PBXBuildFile; fileRef = 5A27262510F26F1900751FBC /* io.c */; }; + E4B515CB164B2DA300E003AF /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; }; + E4B515CC164B2DA300E003AF /* object.m in Sources */ = {isa = PBXBuildFile; fileRef = E4FC3263145F46C9002FBDDB /* object.m */; }; + E4B515CD164B2DA300E003AF /* allocator.c in Sources */ = {isa = PBXBuildFile; fileRef = 2BBF5A62154B64F5002B20F9 /* allocator.c */; }; + E4B515CE164B2DA300E003AF /* data.m in Sources */ = {isa = PBXBuildFile; fileRef = E420866F16027AE500EEE210 /* data.m */; }; + E4B515D8164B2DFB00E003AF /* introspection_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E4B515D7164B2DFB00E003AF /* introspection_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E4B515DD164B32E000E003AF /* introspection.c in Sources */ = {isa = PBXBuildFile; fileRef = E4B515DC164B32E000E003AF /* introspection.c */; }; E4BA743B13A8911B0095BDF1 /* getprogname.h in Headers */ = {isa = PBXBuildFile; fileRef = E4BA743913A8911B0095BDF1 /* getprogname.h */; }; E4BA743C13A8911B0095BDF1 /* getprogname.h in Headers */ = {isa = PBXBuildFile; fileRef = E4BA743913A8911B0095BDF1 /* getprogname.h */; }; - E4BA743F13A8911B0095BDF1 /* malloc_zone.h in Headers */ = {isa = PBXBuildFile; fileRef = E4BA743A13A8911B0095BDF1 /* malloc_zone.h */; }; - E4BA744013A8911B0095BDF1 /* malloc_zone.h in Headers */ = {isa = PBXBuildFile; fileRef = E4BA743A13A8911B0095BDF1 /* malloc_zone.h */; }; E4C1ED6F1263E714000D3C8B /* data_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E4C1ED6E1263E714000D3C8B /* data_internal.h */; }; E4C1ED701263E714000D3C8B /* data_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E4C1ED6E1263E714000D3C8B /* data_internal.h */; }; E4EB4A2714C35ECE00AA0FA9 /* object.h in Headers */ = {isa = PBXBuildFile; fileRef = E4EB4A2614C35ECE00AA0FA9 /* object.h */; }; @@ -149,6 +221,8 @@ E4EC122112514715000DDBD1 /* time.c in Sources */ = {isa = PBXBuildFile; fileRef = 96032E4A0F5CC8C700241C5F /* time.c */; }; E4EC122312514715000DDBD1 /* data.c in Sources */ = {isa = PBXBuildFile; fileRef = 5AAB45BF10D30B79004407EA /* data.c */; }; E4EC122412514715000DDBD1 /* io.c in Sources */ = {isa = PBXBuildFile; fileRef = 5A27262510F26F1900751FBC /* io.c */; }; + E4ECBAA515253C25002C313C /* mach_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E4ECBAA415253C25002C313C /* mach_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E4ECBAA615253D17002C313C /* mach_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E4ECBAA415253C25002C313C /* mach_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; E4FC3264145F46C9002FBDDB /* object.m in Sources */ = {isa = PBXBuildFile; fileRef = E4FC3263145F46C9002FBDDB /* object.m */; }; E4FC3265145F46C9002FBDDB /* object.m in Sources */ = {isa = PBXBuildFile; fileRef = E4FC3263145F46C9002FBDDB /* object.m */; }; E4FC3266145F46C9002FBDDB /* object.m in Sources */ = {isa = PBXBuildFile; fileRef = E4FC3263145F46C9002FBDDB /* object.m */; }; @@ -191,6 +265,13 @@ remoteGlobalIDString = D2AAC045055464E500DB518D; remoteInfo = libdispatch; }; + E437F0D514F7441F00F0B997 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = E46DBC1A14EE10C80001F9F6; + remoteInfo = libdispatch_static; + }; E47D6ECA125FEB9D0070D91C /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; @@ -205,9 +286,18 @@ remoteGlobalIDString = E4EC121612514715000DDBD1; remoteInfo = "libdispatch mp resolved"; }; + E4B515DA164B317700E003AF /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = E4B51595164B2DA300E003AF; + remoteInfo = "libdispatch introspection"; + }; /* End PBXContainerItemProxy section */ /* Begin PBXFileReference section */ + 2BBF5A5F154B64D8002B20F9 /* allocator_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = allocator_internal.h; sourceTree = ""; }; + 2BBF5A62154B64F5002B20F9 /* allocator.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = allocator.c; sourceTree = ""; }; 5A0095A110F274B0000E2A31 /* io_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = io_internal.h; sourceTree = ""; }; 5A27262510F26F1900751FBC /* io.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = io.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; 5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = semaphore_internal.h; sourceTree = ""; }; @@ -222,11 +312,11 @@ 72CC942F0ECCD8750031B751 /* base.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = base.h; sourceTree = ""; }; 96032E4A0F5CC8C700241C5F /* time.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = time.c; sourceTree = ""; }; 96032E4C0F5CC8D100241C5F /* time.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = time.h; sourceTree = ""; }; - 960F0E7D0F3FB232000D88BF /* dispatch_apply.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = dispatch_apply.3; sourceTree = ""; }; - 960F0E7E0F3FB232000D88BF /* dispatch_once.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = dispatch_once.3; sourceTree = ""; }; + 960F0E7D0F3FB232000D88BF /* dispatch_apply.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; path = dispatch_apply.3; sourceTree = ""; }; + 960F0E7E0F3FB232000D88BF /* dispatch_once.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; path = dispatch_once.3; sourceTree = ""; }; 961B99350F3E83980006BC96 /* benchmark.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = benchmark.h; sourceTree = ""; }; 961B994F0F3E85C30006BC96 /* object.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = object.h; sourceTree = ""; }; - 963FDDE50F3FB6BD00BF2D00 /* dispatch_semaphore_create.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = dispatch_semaphore_create.3; sourceTree = ""; }; + 963FDDE50F3FB6BD00BF2D00 /* dispatch_semaphore_create.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; path = dispatch_semaphore_create.3; sourceTree = ""; }; 965CD6340F3E806200D4E28D /* benchmark.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = benchmark.c; sourceTree = ""; }; 965ECC200F3EAB71004DDD89 /* object_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = object_internal.h; sourceTree = ""; }; 9661E56A0F3E7DDF00749F3E /* object.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = object.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; @@ -239,40 +329,54 @@ 96BC39BC0F3EBAB100C59689 /* queue_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = queue_private.h; sourceTree = ""; }; 96C9553A0F3EAEDD000D2CA4 /* once.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = once.h; sourceTree = ""; }; 96DF70BD0F38FE3C0074BD99 /* once.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = once.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; - C913AC0E143BD34800B78976 /* data_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data_private.h; sourceTree = ""; }; + C913AC0E143BD34800B78976 /* data_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data_private.h; sourceTree = ""; tabWidth = 8; }; C927F35F10FD7F1000C5AB8B /* ddt.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = ddt.xcodeproj; path = tools/ddt/ddt.xcodeproj; sourceTree = ""; }; C9C5F80D143C1771006DC718 /* transform.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = transform.c; sourceTree = ""; }; D2AAC046055464E500DB518D /* libdispatch.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libdispatch.dylib; sourceTree = BUILT_PRODUCTS_DIR; }; E40041A9125D70590022B135 /* libdispatch-resolved.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "libdispatch-resolved.xcconfig"; sourceTree = ""; }; E40041AA125D705F0022B135 /* libdispatch-resolver.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "libdispatch-resolver.xcconfig"; sourceTree = ""; }; E4128ED513BA9A1700ABB2CB /* hw_config.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = hw_config.h; sourceTree = ""; }; + E416F53F175D04B800B23711 /* libdispatch_macosx.aliases */ = {isa = PBXFileReference; lastKnownFileType = text; path = libdispatch_macosx.aliases; sourceTree = ""; }; + E420866F16027AE500EEE210 /* data.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = data.m; sourceTree = ""; }; + E421E5F81716ADA10090DC9B /* introspection.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = introspection.h; sourceTree = ""; }; + E421E5FB1716B8730090DC9B /* install-dtrace.sh */ = {isa = PBXFileReference; lastKnownFileType = text.script.sh; path = "install-dtrace.sh"; sourceTree = ""; }; + E421E5FD1716BEA70090DC9B /* libdispatch.interposable */ = {isa = PBXFileReference; lastKnownFileType = text; path = libdispatch.interposable; sourceTree = ""; }; E422A0D412A557B5005E5BDB /* trace.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = trace.h; sourceTree = ""; }; E422DA3614D2A7E7003C6EE4 /* libdispatch.aliases */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.aliases; sourceTree = ""; }; E422DA3714D2AE11003C6EE4 /* libdispatch.unexport */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.unexport; sourceTree = ""; }; - E43570B8126E93380097AB9F /* provider.d */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.dtrace; path = provider.d; sourceTree = ""; }; + E43570B8126E93380097AB9F /* provider.d */ = {isa = PBXFileReference; explicitFileType = sourcecode.dtrace; fileEncoding = 4; path = provider.d; sourceTree = ""; }; E43D93F11097917E004F6A62 /* libdispatch.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = libdispatch.xcconfig; sourceTree = ""; }; - E4407FAD143CC984003A9E80 /* dispatch.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dispatch.h; sourceTree = ""; }; E448727914C6215D00BB45C2 /* libdispatch.order */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.order; sourceTree = ""; }; E44EBE331251654000645D88 /* resolver.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = resolver.h; sourceTree = ""; }; E44EBE371251656400645D88 /* resolver.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = resolver.c; sourceTree = ""; }; E44EBE3B1251659900645D88 /* init.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = init.c; sourceTree = ""; }; + E44F9DA816543F79001DCD38 /* introspection_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = introspection_internal.h; sourceTree = ""; }; E454569214746F1B00106147 /* object_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = object_private.h; sourceTree = ""; }; + E463024F1761603C00E11F4C /* atomic_sfb.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = atomic_sfb.h; sourceTree = ""; }; + E46DBC5714EE10C80001F9F6 /* libdispatch.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch.a; sourceTree = BUILT_PRODUCTS_DIR; }; + E46DBC5814EE11BC0001F9F6 /* libdispatch-static.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "libdispatch-static.xcconfig"; sourceTree = ""; }; E47D6BB5125F0F800070D91C /* resolved.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = resolved.h; sourceTree = ""; }; E482F1CD12DBAB590030614D /* postprocess-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "postprocess-headers.sh"; sourceTree = ""; }; + E48AF55916E70FD9004105FF /* io_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = io_private.h; path = private/io_private.h; sourceTree = SOURCE_ROOT; tabWidth = 8; }; E49F24DF125D57FA0057C971 /* libdispatch.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libdispatch.dylib; sourceTree = BUILT_PRODUCTS_DIR; }; E49F251D125D630A0057C971 /* install-manpages.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "install-manpages.sh"; sourceTree = ""; }; E49F251E125D631D0057C971 /* mig-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "mig-headers.sh"; sourceTree = ""; }; - E4BA743513A88FE10095BDF1 /* dispatch_data_create.3 */ = {isa = PBXFileReference; lastKnownFileType = text; path = dispatch_data_create.3; sourceTree = ""; }; - E4BA743613A88FF30095BDF1 /* dispatch_io_create.3 */ = {isa = PBXFileReference; lastKnownFileType = text; path = dispatch_io_create.3; sourceTree = ""; }; - E4BA743713A88FF30095BDF1 /* dispatch_io_read.3 */ = {isa = PBXFileReference; lastKnownFileType = text; path = dispatch_io_read.3; sourceTree = ""; }; - E4BA743813A8900B0095BDF1 /* dispatch_read.3 */ = {isa = PBXFileReference; lastKnownFileType = text; path = dispatch_read.3; sourceTree = ""; }; + E4A2C9C4176019760000F809 /* atomic_llsc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = atomic_llsc.h; sourceTree = ""; }; + E4B515D6164B2DA300E003AF /* libdispatch.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libdispatch.dylib; sourceTree = BUILT_PRODUCTS_DIR; }; + E4B515D7164B2DFB00E003AF /* introspection_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = introspection_private.h; sourceTree = ""; }; + E4B515D9164B2E9B00E003AF /* libdispatch-introspection.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "libdispatch-introspection.xcconfig"; sourceTree = ""; }; + E4B515DC164B32E000E003AF /* introspection.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = introspection.c; sourceTree = ""; }; + E4BA743513A88FE10095BDF1 /* dispatch_data_create.3 */ = {isa = PBXFileReference; explicitFileType = text.man; path = dispatch_data_create.3; sourceTree = ""; }; + E4BA743613A88FF30095BDF1 /* dispatch_io_create.3 */ = {isa = PBXFileReference; explicitFileType = text.man; path = dispatch_io_create.3; sourceTree = ""; }; + E4BA743713A88FF30095BDF1 /* dispatch_io_read.3 */ = {isa = PBXFileReference; explicitFileType = text.man; path = dispatch_io_read.3; sourceTree = ""; }; + E4BA743813A8900B0095BDF1 /* dispatch_read.3 */ = {isa = PBXFileReference; explicitFileType = text.man; path = dispatch_read.3; sourceTree = ""; }; E4BA743913A8911B0095BDF1 /* getprogname.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = getprogname.h; sourceTree = ""; }; - E4BA743A13A8911B0095BDF1 /* malloc_zone.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = malloc_zone.h; sourceTree = ""; }; E4C1ED6E1263E714000D3C8B /* data_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data_internal.h; sourceTree = ""; }; E4EB4A2614C35ECE00AA0FA9 /* object.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = object.h; sourceTree = ""; }; E4EB4A2A14C36F4E00AA0FA9 /* install-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "install-headers.sh"; sourceTree = ""; }; E4EC11C312514302000DDBD1 /* libdispatch_up.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch_up.a; sourceTree = BUILT_PRODUCTS_DIR; }; E4EC122D12514715000DDBD1 /* libdispatch_mp.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch_mp.a; sourceTree = BUILT_PRODUCTS_DIR; }; + E4ECBAA415253C25002C313C /* mach_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mach_private.h; sourceTree = ""; }; E4FC3263145F46C9002FBDDB /* object.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = object.m; sourceTree = ""; }; FC0B34780FA2851C0080FFA0 /* source_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = source_internal.h; sourceTree = ""; }; FC1832A2109923C7003403D5 /* perfmon.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = perfmon.h; sourceTree = ""; }; @@ -280,11 +384,11 @@ FC1832A4109923C7003403D5 /* tsd.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tsd.h; sourceTree = ""; }; FC36279C0E933ED80054F1A3 /* dispatch_queue_create.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; path = dispatch_queue_create.3; sourceTree = ""; }; FC5C9C1D0EADABE3006E462D /* group.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = group.h; sourceTree = ""; }; - FC678DE80F97E0C300AB5993 /* dispatch_after.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = dispatch_after.3; sourceTree = ""; }; - FC678DE90F97E0C300AB5993 /* dispatch_api.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = dispatch_api.3; sourceTree = ""; }; - FC678DEA0F97E0C300AB5993 /* dispatch_async.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = dispatch_async.3; sourceTree = ""; }; - FC678DEB0F97E0C300AB5993 /* dispatch_group_create.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = dispatch_group_create.3; sourceTree = ""; }; - FC678DEC0F97E0C300AB5993 /* dispatch_time.3 */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = dispatch_time.3; sourceTree = ""; }; + FC678DE80F97E0C300AB5993 /* dispatch_after.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; path = dispatch_after.3; sourceTree = ""; }; + FC678DE90F97E0C300AB5993 /* dispatch_api.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; path = dispatch_api.3; sourceTree = ""; }; + FC678DEA0F97E0C300AB5993 /* dispatch_async.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; path = dispatch_async.3; sourceTree = ""; }; + FC678DEB0F97E0C300AB5993 /* dispatch_group_create.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; path = dispatch_group_create.3; sourceTree = ""; }; + FC678DEC0F97E0C300AB5993 /* dispatch_time.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; path = dispatch_time.3; sourceTree = ""; }; FC7BED8A0E8361E600161930 /* queue.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = queue.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; FC7BED8B0E8361E600161930 /* queue.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = queue.h; sourceTree = ""; }; FC7BED8D0E8361E600161930 /* source.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = source.h; sourceTree = ""; }; @@ -335,10 +439,13 @@ 08FB7795FE84155DC02AAC07 /* Source */ = { isa = PBXGroup; children = ( + 2BBF5A62154B64F5002B20F9 /* allocator.c */, 9676A0E00F3E755D00713ADB /* apply.c */, 965CD6340F3E806200D4E28D /* benchmark.c */, 5AAB45BF10D30B79004407EA /* data.c */, + E420866F16027AE500EEE210 /* data.m */, E44EBE3B1251659900645D88 /* init.c */, + E4B515DC164B32E000E003AF /* introspection.c */, 5A27262510F26F1900751FBC /* io.c */, 9661E56A0F3E7DDF00749F3E /* object.c */, E4FC3263145F46C9002FBDDB /* object.m */, @@ -362,6 +469,8 @@ E4EC11C312514302000DDBD1 /* libdispatch_up.a */, E4EC122D12514715000DDBD1 /* libdispatch_mp.a */, E49F24DF125D57FA0057C971 /* libdispatch.dylib */, + E46DBC5714EE10C80001F9F6 /* libdispatch.a */, + E4B515D6164B2DA300E003AF /* libdispatch.dylib */, ); name = Products; sourceTree = ""; @@ -405,9 +514,13 @@ E43D93F11097917E004F6A62 /* libdispatch.xcconfig */, E40041AA125D705F0022B135 /* libdispatch-resolver.xcconfig */, E40041A9125D70590022B135 /* libdispatch-resolved.xcconfig */, + E46DBC5814EE11BC0001F9F6 /* libdispatch-static.xcconfig */, + E4B515D9164B2E9B00E003AF /* libdispatch-introspection.xcconfig */, E422DA3614D2A7E7003C6EE4 /* libdispatch.aliases */, + E416F53F175D04B800B23711 /* libdispatch_macosx.aliases */, E448727914C6215D00BB45C2 /* libdispatch.order */, E422DA3714D2AE11003C6EE4 /* libdispatch.unexport */, + E421E5FD1716BEA70090DC9B /* libdispatch.interposable */, ); path = xcodeconfig; sourceTree = ""; @@ -439,6 +552,7 @@ children = ( E49F251D125D630A0057C971 /* install-manpages.sh */, E4EB4A2A14C36F4E00AA0FA9 /* install-headers.sh */, + E421E5FB1716B8730090DC9B /* install-dtrace.sh */, E49F251E125D631D0057C971 /* mig-headers.sh */, E482F1CD12DBAB590030614D /* postprocess-headers.sh */, ); @@ -467,9 +581,10 @@ isa = PBXGroup; children = ( 96929D820F3EA1020041FF5D /* atomic.h */, + E4A2C9C4176019760000F809 /* atomic_llsc.h */, + E463024F1761603C00E11F4C /* atomic_sfb.h */, E4BA743913A8911B0095BDF1 /* getprogname.h */, E4128ED513BA9A1700ABB2CB /* hw_config.h */, - E4BA743A13A8911B0095BDF1 /* malloc_zone.h */, FC1832A2109923C7003403D5 /* perfmon.h */, FC1832A3109923C7003403D5 /* time.h */, FC1832A4109923C7003403D5 /* tsd.h */, @@ -491,6 +606,7 @@ 721F5C5C0F15520500FF03A6 /* semaphore.h */, FC7BED8D0E8361E600161930 /* source.h */, 96032E4C0F5CC8D100241C5F /* time.h */, + E421E5F81716ADA10090DC9B /* introspection.h */, ); name = "Public Headers"; path = dispatch; @@ -499,12 +615,14 @@ FC7BEDAF0E83626100161930 /* Private Headers */ = { isa = PBXGroup; children = ( - E4407FAD143CC984003A9E80 /* dispatch.h */, FC7BED930E8361E600161930 /* private.h */, + C913AC0E143BD34800B78976 /* data_private.h */, + E48AF55916E70FD9004105FF /* io_private.h */, 96BC39BC0F3EBAB100C59689 /* queue_private.h */, FCEF047F0F5661960067401F /* source_private.h */, + E4ECBAA415253C25002C313C /* mach_private.h */, 961B99350F3E83980006BC96 /* benchmark.h */, - C913AC0E143BD34800B78976 /* data_private.h */, + E4B515D7164B2DFB00E003AF /* introspection_private.h */, ); name = "Private Headers"; path = private; @@ -513,6 +631,7 @@ FC7BEDB60E8363DC00161930 /* Project Headers */ = { isa = PBXGroup; children = ( + 2BBF5A5F154B64D8002B20F9 /* allocator_internal.h */, FC7BED8F0E8361E600161930 /* internal.h */, E4C1ED6E1263E714000D3C8B /* data_internal.h */, 5A0095A110F274B0000E2A31 /* io_internal.h */, @@ -521,6 +640,7 @@ 5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */, FC0B34780FA2851C0080FFA0 /* source_internal.h */, E422A0D412A557B5005E5BDB /* trace.h */, + E44F9DA816543F79001DCD38 /* introspection_internal.h */, 96929D830F3EA1020041FF5D /* shims.h */, FC1832A0109923B3003403D5 /* shims */, ); @@ -544,9 +664,11 @@ FC5C9C1E0EADABE3006E462D /* group.h in Headers */, 96C9553B0F3EAEDD000D2CA4 /* once.h in Headers */, 5AAB45C410D30CC7004407EA /* io.h in Headers */, + E4630253176162D400E11F4C /* atomic_sfb.h in Headers */, 5AAB45C610D30D0C004407EA /* data.h in Headers */, 96032E4D0F5CC8D100241C5F /* time.h in Headers */, FC7BEDA20E8361E600161930 /* private.h in Headers */, + E4A2C9C7176019840000F809 /* atomic_llsc.h in Headers */, C913AC0F143BD34800B78976 /* data_private.h in Headers */, 96BC39BD0F3EBAB100C59689 /* queue_private.h in Headers */, FCEF04800F5661960067401F /* source_private.h in Headers */, @@ -566,11 +688,13 @@ FC9C70E8105EC9620074F9CA /* config.h in Headers */, E422A0D512A557B5005E5BDB /* trace.h in Headers */, E4BA743B13A8911B0095BDF1 /* getprogname.h in Headers */, - E4BA743F13A8911B0095BDF1 /* malloc_zone.h in Headers */, E4128ED613BA9A1700ABB2CB /* hw_config.h in Headers */, - E4407FAE143CC984003A9E80 /* dispatch.h in Headers */, E454569314746F1B00106147 /* object_private.h in Headers */, E4EB4A2714C35ECE00AA0FA9 /* object.h in Headers */, + E48AF55A16E70FD9004105FF /* io_private.h in Headers */, + E4ECBAA515253C25002C313C /* mach_private.h in Headers */, + 2BBF5A60154B64D8002B20F9 /* allocator_internal.h in Headers */, + E44F9DAC1654400D001DCD38 /* introspection_internal.h in Headers */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -587,9 +711,11 @@ E49F24B1125D57FA0057C971 /* group.h in Headers */, E49F24B2125D57FA0057C971 /* once.h in Headers */, E49F24B3125D57FA0057C971 /* io.h in Headers */, + E4630252176162D300E11F4C /* atomic_sfb.h in Headers */, E49F24B4125D57FA0057C971 /* data.h in Headers */, E49F24B5125D57FA0057C971 /* time.h in Headers */, E49F24B6125D57FA0057C971 /* private.h in Headers */, + E4A2C9C6176019830000F809 /* atomic_llsc.h in Headers */, E49F24B7125D57FA0057C971 /* queue_private.h in Headers */, E49F24B8125D57FA0057C971 /* source_private.h in Headers */, E49F24B9125D57FA0057C971 /* benchmark.h in Headers */, @@ -608,11 +734,44 @@ E49F24C6125D57FA0057C971 /* config.h in Headers */, E422A0D612A557B5005E5BDB /* trace.h in Headers */, E4BA743C13A8911B0095BDF1 /* getprogname.h in Headers */, - E4BA744013A8911B0095BDF1 /* malloc_zone.h in Headers */, E4128ED713BA9A1700ABB2CB /* hw_config.h in Headers */, - E4407FAF143CC984003A9E80 /* dispatch.h in Headers */, E454569414746F1B00106147 /* object_private.h in Headers */, E4EB4A2814C35ECE00AA0FA9 /* object.h in Headers */, + E4ECBAA615253D17002C313C /* mach_private.h in Headers */, + E48AF55B16E72D44004105FF /* io_private.h in Headers */, + 2BBF5A61154B64D8002B20F9 /* allocator_internal.h in Headers */, + E43A710615783F7E0012D38D /* data_private.h in Headers */, + E44F9DAD1654400E001DCD38 /* introspection_internal.h in Headers */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + E4B51596164B2DA300E003AF /* Headers */ = { + isa = PBXHeadersBuildPhase; + buildActionMask = 2147483647; + files = ( + E4B515D8164B2DFB00E003AF /* introspection_private.h in Headers */, + E44F9DAF16544026001DCD38 /* internal.h in Headers */, + E421E5F91716ADA10090DC9B /* introspection.h in Headers */, + E44F9DB216544032001DCD38 /* object_internal.h in Headers */, + E44F9DB316544037001DCD38 /* queue_internal.h in Headers */, + E44F9DB51654403F001DCD38 /* source_internal.h in Headers */, + E44F9DB41654403B001DCD38 /* semaphore_internal.h in Headers */, + E44F9DB01654402B001DCD38 /* data_internal.h in Headers */, + E44F9DB11654402E001DCD38 /* io_internal.h in Headers */, + E4630251176162D200E11F4C /* atomic_sfb.h in Headers */, + E44F9DBE1654405B001DCD38 /* tsd.h in Headers */, + E44F9DB816544053001DCD38 /* atomic.h in Headers */, + E44F9DB71654404F001DCD38 /* shims.h in Headers */, + E44F9DBC1654405B001DCD38 /* perfmon.h in Headers */, + E44F9DBF165440EF001DCD38 /* config.h in Headers */, + E4A2C9C5176019820000F809 /* atomic_llsc.h in Headers */, + E44F9DB616544043001DCD38 /* trace.h in Headers */, + E44F9DB916544056001DCD38 /* getprogname.h in Headers */, + E44F9DBA1654405B001DCD38 /* hw_config.h in Headers */, + E44F9DC116544115001DCD38 /* object_private.h in Headers */, + E44F9DC016544115001DCD38 /* object.h in Headers */, + E44F9DAE16544022001DCD38 /* allocator_internal.h in Headers */, + E44F9DAB16543F94001DCD38 /* introspection_internal.h in Headers */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -635,12 +794,29 @@ dependencies = ( E47D6ECB125FEB9D0070D91C /* PBXTargetDependency */, E47D6ECD125FEBA10070D91C /* PBXTargetDependency */, + E4B515DB164B317700E003AF /* PBXTargetDependency */, + E437F0D614F7441F00F0B997 /* PBXTargetDependency */, ); name = libdispatch; productName = libdispatch; productReference = D2AAC046055464E500DB518D /* libdispatch.dylib */; productType = "com.apple.product-type.library.dynamic"; }; + E46DBC1A14EE10C80001F9F6 /* libdispatch static */ = { + isa = PBXNativeTarget; + buildConfigurationList = E46DBC5414EE10C80001F9F6 /* Build configuration list for PBXNativeTarget "libdispatch static" */; + buildPhases = ( + E46DBC3E14EE10C80001F9F6 /* Sources */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = "libdispatch static"; + productName = libdispatch; + productReference = E46DBC5714EE10C80001F9F6 /* libdispatch.a */; + productType = "com.apple.product-type.library.static"; + }; E49F24A9125D57FA0057C971 /* libdispatch no resolver */ = { isa = PBXNativeTarget; buildConfigurationList = E49F24D8125D57FA0057C971 /* Build configuration list for PBXNativeTarget "libdispatch no resolver" */; @@ -661,6 +837,23 @@ productReference = E49F24DF125D57FA0057C971 /* libdispatch.dylib */; productType = "com.apple.product-type.library.dynamic"; }; + E4B51595164B2DA300E003AF /* libdispatch introspection */ = { + isa = PBXNativeTarget; + buildConfigurationList = E4B515D3164B2DA300E003AF /* Build configuration list for PBXNativeTarget "libdispatch introspection" */; + buildPhases = ( + E4B51596164B2DA300E003AF /* Headers */, + E4B515BC164B2DA300E003AF /* Sources */, + E421E5FC1716B8E10090DC9B /* Install DTrace Header */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = "libdispatch introspection"; + productName = libdispatch; + productReference = E4B515D6164B2DA300E003AF /* libdispatch.dylib */; + productType = "com.apple.product-type.library.dynamic"; + }; E4EC118F12514302000DDBD1 /* libdispatch up resolved */ = { isa = PBXNativeTarget; buildConfigurationList = E4EC11BC12514302000DDBD1 /* Build configuration list for PBXNativeTarget "libdispatch up resolved" */; @@ -702,7 +895,7 @@ isa = PBXProject; attributes = { BuildIndependentTargetsInParallel = YES; - LastUpgradeCheck = 0420; + LastUpgradeCheck = 0500; }; buildConfigurationList = 1DEB91EF08733DB70010E9CD /* Build configuration list for PBXProject "libdispatch" */; compatibilityVersion = "Xcode 3.2"; @@ -726,8 +919,10 @@ targets = ( D2AAC045055464E500DB518D /* libdispatch */, E49F24A9125D57FA0057C971 /* libdispatch no resolver */, - E4EC118F12514302000DDBD1 /* libdispatch up resolved */, E4EC121612514715000DDBD1 /* libdispatch mp resolved */, + E4EC118F12514302000DDBD1 /* libdispatch up resolved */, + E4B51595164B2DA300E003AF /* libdispatch introspection */, + E46DBC1A14EE10C80001F9F6 /* libdispatch static */, 3F3C9326128E637B0042B1F7 /* libdispatch_Sim */, C927F35A10FD7F0600C5AB8B /* libdispatch_tools */, ); @@ -777,6 +972,24 @@ shellScript = ". \"${SCRIPT_INPUT_FILE_0}\""; showEnvVarsInLog = 0; }; + E421E5FC1716B8E10090DC9B /* Install DTrace Header */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + "$(SRCROOT)/xcodescripts/install-dtrace.sh", + "$(SRCROOT)/src/provider.d", + ); + name = "Install DTrace Header"; + outputPaths = ( + "$(CONFIGURATION_BUILD_DIR)/$(PUBLIC_HEADERS_FOLDER_PATH)/introspection.d", + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = "/bin/bash -e"; + shellScript = ". \"${SCRIPT_INPUT_FILE_0}\""; + showEnvVarsInLog = 0; + }; E482F1C512DBAA110030614D /* Postprocess Headers */ = { isa = PBXShellScriptBuildPhase; buildActionMask = 8; @@ -942,6 +1155,30 @@ 5A27262610F26F1900751FBC /* io.c in Sources */, C9C5F80E143C1771006DC718 /* transform.c in Sources */, E4FC3264145F46C9002FBDDB /* object.m in Sources */, + 2BBF5A63154B64F5002B20F9 /* allocator.c in Sources */, + E420867016027AE500EEE210 /* data.m in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + E46DBC3E14EE10C80001F9F6 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + E46DBC4014EE10C80001F9F6 /* protocol.defs in Sources */, + E46DBC4114EE10C80001F9F6 /* resolver.c in Sources */, + E46DBC4214EE10C80001F9F6 /* init.c in Sources */, + E46DBC4314EE10C80001F9F6 /* queue.c in Sources */, + E46DBC4414EE10C80001F9F6 /* semaphore.c in Sources */, + E46DBC4514EE10C80001F9F6 /* once.c in Sources */, + E46DBC4614EE10C80001F9F6 /* apply.c in Sources */, + E46DBC4714EE10C80001F9F6 /* object.c in Sources */, + E46DBC4814EE10C80001F9F6 /* benchmark.c in Sources */, + E46DBC4914EE10C80001F9F6 /* source.c in Sources */, + E46DBC4A14EE10C80001F9F6 /* time.c in Sources */, + E46DBC4B14EE10C80001F9F6 /* data.c in Sources */, + E46DBC4C14EE10C80001F9F6 /* io.c in Sources */, + E46DBC4D14EE10C80001F9F6 /* transform.c in Sources */, + 2BBF5A67154B64F5002B20F9 /* allocator.c in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -965,6 +1202,34 @@ E49F24D4125D57FA0057C971 /* io.c in Sources */, C93D6165143E190E00EB9023 /* transform.c in Sources */, E4FC3265145F46C9002FBDDB /* object.m in Sources */, + 2BBF5A64154B64F5002B20F9 /* allocator.c in Sources */, + E420867116027AE500EEE210 /* data.m in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + E4B515BC164B2DA300E003AF /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + E4B515BD164B2DA300E003AF /* provider.d in Sources */, + E4B515BE164B2DA300E003AF /* protocol.defs in Sources */, + E4B515BF164B2DA300E003AF /* resolver.c in Sources */, + E4B515C0164B2DA300E003AF /* init.c in Sources */, + E4B515C1164B2DA300E003AF /* queue.c in Sources */, + E4B515C2164B2DA300E003AF /* semaphore.c in Sources */, + E4B515C3164B2DA300E003AF /* once.c in Sources */, + E4B515C4164B2DA300E003AF /* apply.c in Sources */, + E4B515C5164B2DA300E003AF /* object.c in Sources */, + E4B515C6164B2DA300E003AF /* benchmark.c in Sources */, + E4B515C7164B2DA300E003AF /* source.c in Sources */, + E4B515C8164B2DA300E003AF /* time.c in Sources */, + E4B515C9164B2DA300E003AF /* data.c in Sources */, + E4B515CA164B2DA300E003AF /* io.c in Sources */, + E4B515CB164B2DA300E003AF /* transform.c in Sources */, + E4B515CC164B2DA300E003AF /* object.m in Sources */, + E4B515CD164B2DA300E003AF /* allocator.c in Sources */, + E4B515CE164B2DA300E003AF /* data.m in Sources */, + E4B515DD164B32E000E003AF /* introspection.c in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -988,6 +1253,8 @@ E4EC11B812514302000DDBD1 /* io.c in Sources */, C93D6166143E190F00EB9023 /* transform.c in Sources */, E4FC3266145F46C9002FBDDB /* object.m in Sources */, + 2BBF5A65154B64F5002B20F9 /* allocator.c in Sources */, + E420867316027AE500EEE210 /* data.m in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -1011,6 +1278,8 @@ E4EC122412514715000DDBD1 /* io.c in Sources */, C93D6167143E190F00EB9023 /* transform.c in Sources */, E4FC3267145F46C9002FBDDB /* object.m in Sources */, + 2BBF5A66154B64F5002B20F9 /* allocator.c in Sources */, + E420867216027AE500EEE210 /* data.m in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -1027,6 +1296,11 @@ target = D2AAC045055464E500DB518D /* libdispatch */; targetProxy = E4128E4913B94BCE00ABB2CB /* PBXContainerItemProxy */; }; + E437F0D614F7441F00F0B997 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = E46DBC1A14EE10C80001F9F6 /* libdispatch static */; + targetProxy = E437F0D514F7441F00F0B997 /* PBXContainerItemProxy */; + }; E47D6ECB125FEB9D0070D91C /* PBXTargetDependency */ = { isa = PBXTargetDependency; target = E4EC118F12514302000DDBD1 /* libdispatch up resolved */; @@ -1037,6 +1311,11 @@ target = E4EC121612514715000DDBD1 /* libdispatch mp resolved */; targetProxy = E47D6ECC125FEBA10070D91C /* PBXContainerItemProxy */; }; + E4B515DB164B317700E003AF /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = E4B51595164B2DA300E003AF /* libdispatch introspection */; + targetProxy = E4B515DA164B317700E003AF /* PBXContainerItemProxy */; + }; /* End PBXTargetDependency section */ /* Begin XCBuildConfiguration section */ @@ -1078,6 +1357,20 @@ }; name = Debug; }; + E46DBC5514EE10C80001F9F6 /* Release */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = E46DBC5814EE11BC0001F9F6 /* libdispatch-static.xcconfig */; + buildSettings = { + }; + name = Release; + }; + E46DBC5614EE10C80001F9F6 /* Debug */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = E46DBC5814EE11BC0001F9F6 /* libdispatch-static.xcconfig */; + buildSettings = { + }; + name = Debug; + }; E49F24D9125D57FA0057C971 /* Release */ = { isa = XCBuildConfiguration; buildSettings = { @@ -1090,6 +1383,20 @@ }; name = Debug; }; + E4B515D4164B2DA300E003AF /* Release */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = E4B515D9164B2E9B00E003AF /* libdispatch-introspection.xcconfig */; + buildSettings = { + }; + name = Release; + }; + E4B515D5164B2DA300E003AF /* Debug */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = E4B515D9164B2E9B00E003AF /* libdispatch-introspection.xcconfig */; + buildSettings = { + }; + name = Debug; + }; E4EB382D1089033000C33AD4 /* Debug */ = { isa = XCBuildConfiguration; baseConfigurationReference = E43D93F11097917E004F6A62 /* libdispatch.xcconfig */; @@ -1177,6 +1484,15 @@ defaultConfigurationIsVisible = 0; defaultConfigurationName = Release; }; + E46DBC5414EE10C80001F9F6 /* Build configuration list for PBXNativeTarget "libdispatch static" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + E46DBC5514EE10C80001F9F6 /* Release */, + E46DBC5614EE10C80001F9F6 /* Debug */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; E49F24D8125D57FA0057C971 /* Build configuration list for PBXNativeTarget "libdispatch no resolver" */ = { isa = XCConfigurationList; buildConfigurations = ( @@ -1186,6 +1502,15 @@ defaultConfigurationIsVisible = 0; defaultConfigurationName = Release; }; + E4B515D3164B2DA300E003AF /* Build configuration list for PBXNativeTarget "libdispatch introspection" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + E4B515D4164B2DA300E003AF /* Release */, + E4B515D5164B2DA300E003AF /* Debug */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; E4EC11BC12514302000DDBD1 /* Build configuration list for PBXNativeTarget "libdispatch up resolved" */ = { isa = XCConfigurationList; buildConfigurations = ( diff --git a/libdispatch.xcodeproj/project.xcworkspace/contents.xcworkspacedata b/libdispatch.xcodeproj/project.xcworkspace/contents.xcworkspacedata deleted file mode 100644 index 23ad996c6..000000000 --- a/libdispatch.xcodeproj/project.xcworkspace/contents.xcworkspacedata +++ /dev/null @@ -1,6 +0,0 @@ - - - - - diff --git a/man/Makefile.am b/man/Makefile.am index 7ad94e21b..0d58d141f 100644 --- a/man/Makefile.am +++ b/man/Makefile.am @@ -77,10 +77,8 @@ install-data-hook: $(LN) -f dispatch_data_create.3 dispatch_data_create_subrange.3 && \ $(LN) -f dispatch_data_create.3 dispatch_data_create_map.3 && \ $(LN) -f dispatch_data_create.3 dispatch_data_apply.3 && \ - $(LN) -f dispatch_data_create.3 dispatch_data_copy_subrange.3 && \ $(LN) -f dispatch_data_create.3 dispatch_data_copy_region.3 && \ $(LN) -f dispatch_data_create.3 dispatch_data_get_size.3 && \ - $(LN) -f dispatch_data_create.3 dispatch_data_copy_subrange.3 && \ $(LN) -f dispatch_data_create.3 dispatch_data_empty.3 && \ $(LN) -f dispatch_io_create.3 dispatch_io_create_with_path.3 && \ $(LN) -f dispatch_io_create.3 dispatch_io_set_high_water.3 && \ @@ -139,10 +137,8 @@ uninstall-hook: dispatch_data_create_subrange.3 \ dispatch_data_create_map.3 \ dispatch_data_apply.3 \ - dispatch_data_copy_subrange.3 \ dispatch_data_copy_region.3 \ dispatch_data_get_size.3 \ - dispatch_data_copy_subrange.3 \ dispatch_data_empty.3 \ dispatch_io_create_with_path.3 \ dispatch_io_set_high_water.3 \ diff --git a/man/dispatch.3 b/man/dispatch.3 index d25e08392..6e5cfed48 100644 --- a/man/dispatch.3 +++ b/man/dispatch.3 @@ -1,4 +1,4 @@ -.\" Copyright (c) 2008-2010 Apple Inc. All rights reserved. +.\" Copyright (c) 2008-2012 Apple Inc. All rights reserved. .Dd May 1, 2009 .Dt dispatch 3 .Os Darwin diff --git a/man/dispatch_async.3 b/man/dispatch_async.3 index 9c09bb2a6..99c532d40 100644 --- a/man/dispatch_async.3 +++ b/man/dispatch_async.3 @@ -1,4 +1,4 @@ -.\" Copyright (c) 2008-2010 Apple Inc. All rights reserved. +.\" Copyright (c) 2008-2012 Apple Inc. All rights reserved. .Dd May 1, 2009 .Dt dispatch_async 3 .Os Darwin @@ -85,7 +85,7 @@ dispatch_async(my_queue, ^{ .Sh BACKGROUND CONCURRENCY .The .Fn dispatch_async -function may be used to execute trivial backgound tasks on a global concurrent +function may be used to execute trivial background tasks on a global concurrent queue. For example: .Bd -literal dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT,0), ^{ diff --git a/man/dispatch_data_create.3 b/man/dispatch_data_create.3 index b941b34b3..b3a216e4f 100644 --- a/man/dispatch_data_create.3 +++ b/man/dispatch_data_create.3 @@ -1,4 +1,4 @@ -.\" Copyright (c) 2010 Apple Inc. All rights reserved. +.\" Copyright (c) 2010-2012 Apple Inc. All rights reserved. .Dd December 1, 2010 .Dt dispatch_data_create 3 .Os Darwin diff --git a/man/dispatch_group_create.3 b/man/dispatch_group_create.3 index 4b8063c9d..d82391e82 100644 --- a/man/dispatch_group_create.3 +++ b/man/dispatch_group_create.3 @@ -1,4 +1,4 @@ -.\" Copyright (c) 2008-2009 Apple Inc. All rights reserved. +.\" Copyright (c) 2008-2012 Apple Inc. All rights reserved. .Dd May 1, 2009 .Dt dispatch_group_create 3 .Os Darwin diff --git a/man/dispatch_io_create.3 b/man/dispatch_io_create.3 index 7af5b6dc1..83e551401 100644 --- a/man/dispatch_io_create.3 +++ b/man/dispatch_io_create.3 @@ -1,4 +1,4 @@ -.\" Copyright (c) 2010 Apple Inc. All rights reserved. +.\" Copyright (c) 2010-2013 Apple Inc. All rights reserved. .Dd December 1, 2010 .Dt dispatch_io_create 3 .Os Darwin @@ -102,32 +102,40 @@ functions create a dispatch I/O channel of provided .Fa type from a file descriptor .Fa fd -or a pathname, respectively. They can be thought of as -analogous to the +or an absolute pathname, respectively. They can be thought of as analogous to +the .Xr fdopen 3 POSIX function and the .Xr fopen 3 -function in the standard C library. For a channel created from a -pathname, the provided +function in the standard C library. For a channel created from a pathname, the +provided .Fa path , .Fa oflag and .Fa mode parameters will be passed to .Xr open 2 -when the first I/O operation on the channel is ready to execute. The provided +when the first I/O operation on the channel is ready to execute. +.Pp +The provided .Fa cleanup_handler block will be submitted to the specified .Fa queue -when all I/O operations on the channel have completed and is is closed or +when all I/O operations on the channel have completed and it is closed or reaches the end of its lifecycle. If an error occurs during channel creation, the .Fa cleanup_handler block will be submitted immediately and passed an .Fa error -parameter with the POSIX error encountered. After creating a dispatch I/O -channel from a file descriptor, the application must take care not to modify -that file descriptor until the associated +parameter with the POSIX error encountered. If an invalid +.Fa type +or a non-absolute +.Fa path +argument is specified, these functions will return NULL and the +.Fa cleanup_handler +will not be invoked. After successfully creating a dispatch I/O channel from a +file descriptor, the application must take care not to modify that file +descriptor until the associated .Fa cleanup_handler is invoked, see .Sx "FILEDESCRIPTOR OWNERSHIP" diff --git a/man/dispatch_queue_create.3 b/man/dispatch_queue_create.3 index b657abfcf..0ca0648e9 100644 --- a/man/dispatch_queue_create.3 +++ b/man/dispatch_queue_create.3 @@ -1,4 +1,4 @@ -.\" Copyright (c) 2008-2010 Apple Inc. All rights reserved. +.\" Copyright (c) 2008-2012 Apple Inc. All rights reserved. .Dd May 1, 2008 .Dt dispatch_queue_create 3 .Os Darwin @@ -22,10 +22,6 @@ .Fa "dispatch_queue_t queue" .Fc .Ft dispatch_queue_t -.Fo dispatch_get_current_queue -.Fa void -.Fc -.Ft dispatch_queue_t .Fo dispatch_get_global_queue .Fa "long priority" .Fa "unsigned long flags" @@ -50,35 +46,53 @@ the framework. .Pp All blocks submitted to dispatch queues are dequeued in FIFO order. -By default, queues created with +Queues created with the +.Dv DISPATCH_QUEUE_SERIAL +attribute wait for the previously dequeued block to complete before dequeuing +the next block. A queue with this FIFO completion behavior is usually simply +described as a "serial queue." All memory writes performed by a block dispatched +to a serial queue are guaranteed to be visible to subsequent blocks dispatched +to the same queue. Queues are not bound to any specific thread of execution and +blocks submitted to independent queues may execute concurrently. +.Pp +Queues created with the +.Dv DISPATCH_QUEUE_CONCURRENT +attribute may execute dequeued blocks concurrently and support barrier blocks +submitted with the dispatch barrier API. +.Sh CREATION +Queues are created with the .Fn dispatch_queue_create -wait for the previously dequeued block to complete before dequeuing the next -block. This FIFO completion behavior is sometimes simply described as a "serial -queue." All memory writes performed by a block dispatched to a serial queue are -guaranteed to be visible to subsequent blocks dispatched to the same queue. -Queues are not bound to any specific thread of execution and blocks submitted -to independent queues may execute concurrently. Queues, like all dispatch -objects, are reference counted and newly created queues have a reference count -of one. +function. Queues, like all dispatch objects, are reference counted and newly +created queues have a reference count of one. .Pp The optional .Fa label argument is used to describe the purpose of the queue and is useful during -debugging and performance analysis. By convention, clients should pass a -reverse DNS style label. -If a label is provided, it is copied. If a label is not provided, then -.Fn dispatch_queue_get_label -returns an empty C string. -For example: +debugging and performance analysis. If a label is provided, it is copied. +By convention, clients should pass a reverse DNS style label. For example: .Pp -.Bd -literal +.Bd -literal -offset indent my_queue = dispatch_queue_create("com.example.subsystem.taskXYZ", NULL); .Ed .Pp The .Fa attr -argument is reserved for future use and must be NULL. +argument specifies the type of queue to create and must be either +.Dv DISPATCH_QUEUE_SERIAL +or +.Dv DISPATCH_QUEUE_CONCURRENT . .Pp +The +.Fn dispatch_queue_get_label +function returns the label provided when the given +.Fa queue +was created (or an empty C string if no label was provided at creation). +Passing the constant +.Dv DISPATCH_CURRENT_QUEUE_LABEL +to +.Fn dispatch_queue_get_label +returns the label of the current queue. +.Sh SUSPENSION Queues may be temporarily suspended and resumed with the functions .Fn dispatch_suspend and @@ -88,14 +102,19 @@ respectively. Suspension is checked prior to block execution and is preemptive. .Sh MAIN QUEUE The dispatch framework provides a default serial queue for the application to -use. This queue is accessed via -.Fn dispatch_get_main_queue . +use. This queue is accessed via the +.Fn dispatch_get_main_queue +function. +.Pp Programs must call .Fn dispatch_main at the end of .Fn main -in order to process blocks submitted to the main queue. (See the compatibility -section for exceptions.) +in order to process blocks submitted to the main queue. (See the +.Sx COMPATIBILITY +section for exceptions.) The +.Fn dispatch_main +function never returns. .Sh GLOBAL CONCURRENT QUEUES Unlike the main queue or queues allocated with .Fn dispatch_queue_create , @@ -129,38 +148,6 @@ function to obtain the global queue of given priority. The .Fa flags argument is reserved for future use and must be zero. Passing any value other than zero may result in a NULL return value. -.Pp -.Sh RETURN VALUES -The -.Fn dispatch_queue_create -function returns NULL on failure. -.Pp -The -.Fn dispatch_queue_get_label -function always returns a valid C string. An empty C string is returned if the -.Fa label -was NULL creation time. -.Pp -The -.Fn dispatch_get_main_queue -function returns the default main queue. -.Pp -The -.Fn dispatch_get_current_queue -function always returns a valid queue. When called from within a block -submitted to a dispatch queue, that queue will be returned. If this function is -called from the main thread before -.Fn dispatch_main -is called, then the result of -.Fn dispatch_get_main_queue -is returned. In all other cases, the default target queue will be returned. See -the -.Sx CAVEATS -section below. -.Pp -The -.Fn dispatch_main -function never returns. .Sh TARGET QUEUE The .Fn dispatch_set_target_queue @@ -210,16 +197,33 @@ is undefined. .Pp Directly or indirectly setting the target queue of a dispatch queue to itself is undefined. -.Sh CAVEATS -The +.Sh DEPRECATED FUNCTIONS +The following functions are deprecated and will be removed in a future release: +.Bl -item +.It +.Ft dispatch_queue_t +.Fn dispatch_get_current_queue void ; +.El +.Pp +.Fn dispatch_get_current_queue +always returns a valid queue. When called from within a block +submitted to a dispatch queue, that queue will be returned. If this function is +called from the main thread before +.Fn dispatch_main +is called, then the result of +.Fn dispatch_get_main_queue +is returned. In all other cases, the default target queue will be returned. +.Pp +The use of .Fn dispatch_get_current_queue -function is only recommended for debugging and logging purposes. Code must not +is strongly discouraged except for debugging and logging purposes. Code must not make any assumptions about the queue returned, unless it is one of the global queues or a queue the code has itself created. The returned queue may have arbitrary policies that may surprise code that tries to schedule work with the queue. The list of policies includes, but is not limited to, queue width (i.e. serial vs. concurrent), scheduling priority, security credential or filesystem -configuration. +configuration. This function is deprecated and will be removed in a future +release. .Pp It is equally unsafe for code to assume that synchronous execution onto a queue is safe from deadlock if that queue is not the one returned by @@ -233,6 +237,17 @@ when called on the main thread. Comparing the two is not a valid way to test whether code is executing on the main thread. Foundation/AppKit programs should use [NSThread isMainThread]. POSIX programs may use .Xr pthread_main_np 3 . +.Pp +.Fn dispatch_get_current_queue +may return a queue owned by a different subsystem which has already had all +external references to it released. While such a queue will continue to exist +until all blocks submitted to it have completed, attempting to retain it is +forbidden and will trigger an assertion. If Objective-C Automatic Reference +Counting is enabled, any use of the object returned by +.Fn dispatch_get_current_queue +will cause retain calls to be automatically generated, so the use of +.Fn dispatch_get_current_queue +for any reason in code built with ARC is particularly strongly discouraged. .Sh COMPATIBILITY Cocoa applications need not call .Fn dispatch_main . diff --git a/man/dispatch_semaphore_create.3 b/man/dispatch_semaphore_create.3 index 896412b08..81c291546 100644 --- a/man/dispatch_semaphore_create.3 +++ b/man/dispatch_semaphore_create.3 @@ -1,4 +1,4 @@ -.\" Copyright (c) 2008-2010 Apple Inc. All rights reserved. +.\" Copyright (c) 2008-2012 Apple Inc. All rights reserved. .Dd May 1, 2009 .Dt dispatch_semaphore_create 3 .Os Darwin diff --git a/man/dispatch_source_create.3 b/man/dispatch_source_create.3 index 89e7d514a..a17e8681f 100644 --- a/man/dispatch_source_create.3 +++ b/man/dispatch_source_create.3 @@ -1,4 +1,4 @@ -.\" Copyright (c) 2008-2010 Apple Inc. All rights reserved. +.\" Copyright (c) 2008-2013 Apple Inc. All rights reserved. .Dd May 1, 2009 .Dt dispatch_source_create 3 .Os Darwin @@ -117,6 +117,8 @@ DISPATCH_SOURCE_TYPE_MACH_SEND .It DISPATCH_SOURCE_TYPE_MACH_RECV .It +DISPATCH_SOURCE_TYPE_MEMORYPRESSURE +.It DISPATCH_SOURCE_TYPE_PROC .It DISPATCH_SOURCE_TYPE_READ @@ -170,7 +172,7 @@ function is intended for use with the and .Vt DISPATCH_SOURCE_TYPE_DATA_OR source types. The result of using this function with any other source type is -undefined. Calling this function will atomically add or logical OR the data +undefined. Calling this function will atomically add or bitwise OR the data into the source's data, and trigger the delivery of the source's event handler. .Pp .Sh SOURCE EVENT HANDLERS @@ -308,6 +310,40 @@ is unused and should be zero. The event handler block will be submitted to the target queue when a message on the mach port is waiting to be received. .Pp +.Vt DISPATCH_SOURCE_TYPE_MEMORYPRESSURE +.Pp +Sources of this type monitor the system memory pressure condition for state changes. +The +.Fa handle +is unused and should be zero. The +.Fa mask +may be one or more of the following: +.Bl -tag -width "XXDISPATCH_MEMORYPRESSURE_CRITICAL" -compact -offset indent +.It \(bu DISPATCH_MEMORYPRESSURE_NORMAL +The system memory pressure condition has returned to normal. +.It \(bu DISPATCH_MEMORYPRESSURE_WARN +The system memory pressure condition has changed to warning. +.It \(bu DISPATCH_MEMORYPRESSURE_CRITICAL +The system memory pressure condition has changed to critical. +.El +.Pp +The data returned by +.Fn dispatch_source_get_data +indicates which of the events in the +.Fa mask +were observed. +.Pp +Elevated memory pressure is a system-wide condition that applications +registered for this source should react to by changing their future memory use +behavior, e.g. by reducing cache sizes of newly initiated operations until +memory pressure returns back to normal. +.Pp +However, applications should +.Em NOT +traverse and discard existing caches for past operations when the system memory +pressure enters an elevated state, as that is likely to trigger VM operations +that will further aggravate system memory pressure. +.Pp .Vt DISPATCH_SOURCE_TYPE_PROC .Pp Sources of this type monitor processes for state changes. @@ -327,9 +363,6 @@ The process has become another executable image via a call to .Xr execve 2 or .Xr posix_spawn 2 . -.It \(bu DISPATCH_PROC_REAP -The process status has been collected by its parent process via -.Xr wait 2 . .It \(bu DISPATCH_PROC_SIGNAL A signal was delivered to the process. .El @@ -397,44 +430,71 @@ signal(SIGTERM, SIG_IGN); .Vt DISPATCH_SOURCE_TYPE_TIMER .Pp Sources of this type periodically submit the event handler block to the target -queue on an interval specified by -.Fn dispatch_source_set_timer . -The +queue. The .Fa handle -and -.Fa mask -arguments are unused and should be zero. -.Pp -A best effort attempt is made to submit the event handler block to the target -queue at the specified time; however, actual invocation may occur at a later -time. +argument is unused and should be zero. .Pp The data returned by .Fn dispatch_source_get_data is the number of times the timer has fired since the last invocation of the event handler block. .Pp -The function +The timer parameters are configured with the .Fn dispatch_source_set_timer -takes as an argument the +function. Once this function returns, any pending source data accumulated for +the previous timer parameters has been cleared; the next fire of the timer will +occur at +.Fa start , +and every +.Fa interval +nanoseconds thereafter until the timer source is canceled. +.Pp +Any fire of the timer may be delayed by the system in order to improve power +consumption and system performance. The upper limit to the allowable delay may +be configured with the +.Fa leeway +argument, the lower limit is under the control of the system. +.Pp +For the initial timer fire at +.Fa start , +the upper limit to the allowable delay is set to +.Fa leeway +nanoseconds. For the subsequent timer fires at .Fa start -time of the timer (initial fire time) represented as a -.Vt dispatch_time_t . -The timer dispatch source will use the same clock as the function used to -create this value. (See -.Xr dispatch_time 3 -for more information.) The +.Li "+ N *" .Fa interval , -in nanoseconds, specifies the period at which the timer should repeat. All -timers will repeat indefinitely until -.Fn dispatch_source_cancel -is called. The +the upper limit is +.Li MIN( .Fa leeway , -in nanoseconds, is a hint to the system that it may defer the timer in order to -align with other system activity for improved system performance or reduced -power consumption. (For example, an application might perform a periodic task -every 5 minutes with a leeway of up to 30 seconds.) Note that some latency is -to be expected for all timers even when a value of zero is used. +.Fa interval +.Li "/ 2 )" . +.Pp +The lower limit to the allowable delay may vary with process state such as +visibility of application UI. If the specified timer source was created with a +.Fa mask +of +.Vt DISPATCH_TIMER_STRICT , +the system will make a best effort to strictly observe the provided +.Fa leeway +value even if it is smaller than the current lower limit. Note that a minimal +amount of delay is to be expected even if this flag is specified. +.Pp +The +.Fa start +argument also determines which clock will be used for the timer: If +.Fa start +is +.Vt DISPATCH_TIME_NOW +or was created with +.Xr dispatch_time 3 , +the timer is based on +.Fn mach_absolute_time . +If +.Fa start +was created with +.Xr dispatch_walltime 3 , +the timer is based on +.Xr gettimeofday 3 . .Pp .Em Note : Under the C language, untyped numbers default to the diff --git a/man/dispatch_time.3 b/man/dispatch_time.3 index cb65dc5fa..e318e90e9 100644 --- a/man/dispatch_time.3 +++ b/man/dispatch_time.3 @@ -1,4 +1,4 @@ -.\" Copyright (c) 2008-2009 Apple Inc. All rights reserved. +.\" Copyright (c) 2008-2013 Apple Inc. All rights reserved. .Dd May 1, 2009 .Dt dispatch_time 3 .Os Darwin @@ -80,6 +80,24 @@ parameter is ignored. .Pp Underflow causes the smallest representable value to be returned for a given clock. +.Sh CAVEATS +Under the C language, untyped numbers default to the +.Vt int +type. This can lead to truncation bugs when arithmetic operations with other +numbers are expected to generate a +.Vt int64_t +sized result, such as the +.Fa offset +argument to +.Fn dispatch_time +and +.Fn dispatch_walltime . +When in doubt, use +.Vt ull +as a suffix. For example: +.Bd -literal -offset indent +3ull * NSEC_PER_SEC +.Ed .Sh EXAMPLES Create a milestone two seconds in the future: .Bd -literal -offset indent @@ -102,8 +120,9 @@ milestone = dispatch_walltime(&ts, 0); These functions return an abstract value for use with .Fn dispatch_after , .Fn dispatch_group_wait , +.Fn dispatch_semaphore_wait , or -.Fn dispatch_semaphore_wait . +.Fn dispatch_source_set_timer . .Sh SEE ALSO .Xr dispatch 3 , .Xr dispatch_after 3 , diff --git a/os/Makefile.am b/os/Makefile.am new file mode 100644 index 000000000..2189f16b1 --- /dev/null +++ b/os/Makefile.am @@ -0,0 +1,11 @@ +# +# +# + +osdir=$(includedir)/os + +os_HEADERS= \ + object.h + +noinst_HEADERS= \ + object_private.h diff --git a/os/object.h b/os/object.h index 7d1e5ce85..f8d23a3a1 100644 --- a/os/object.h +++ b/os/object.h @@ -45,11 +45,11 @@ * * This mode requires a platform with the modern Objective-C runtime, the * Objective-C GC compiler option to be disabled, and at least a Mac OS X 10.8 - * deployment target. + * or iOS 6.0 deployment target. */ #ifndef OS_OBJECT_HAVE_OBJC_SUPPORT -#if defined(__OBJC2__) && !defined(__OBJC_GC__) && ( \ +#if defined(__OBJC__) && defined(__OBJC2__) && !defined(__OBJC_GC__) && ( \ __MAC_OS_X_VERSION_MIN_REQUIRED >= __MAC_10_8 || \ __IPHONE_OS_VERSION_MIN_REQUIRED >= __IPHONE_6_0) #define OS_OBJECT_HAVE_OBJC_SUPPORT 1 @@ -71,7 +71,7 @@ #endif #if OS_OBJECT_USE_OBJC -#import +#import #define OS_OBJECT_CLASS(name) OS_##name #define OS_OBJECT_DECL(name, ...) \ @protocol OS_OBJECT_CLASS(name) __VA_ARGS__ \ @@ -79,23 +79,36 @@ typedef NSObject *name##_t #define OS_OBJECT_DECL_SUBCLASS(name, super) \ OS_OBJECT_DECL(name, ) -#if defined(__has_attribute) && __has_attribute(ns_returns_retained) +#if defined(__has_attribute) +#if __has_attribute(ns_returns_retained) #define OS_OBJECT_RETURNS_RETAINED __attribute__((__ns_returns_retained__)) #else #define OS_OBJECT_RETURNS_RETAINED #endif -#if defined(__has_feature) && __has_feature(objc_arc) +#else +#define OS_OBJECT_RETURNS_RETAINED +#endif +#if defined(__has_feature) +#if __has_feature(objc_arc) #define OS_OBJECT_BRIDGE __bridge #else #define OS_OBJECT_BRIDGE #endif +#else +#define OS_OBJECT_BRIDGE +#endif #ifndef OS_OBJECT_USE_OBJC_RETAIN_RELEASE -#if defined(__has_feature) && __has_feature(objc_arc) || \ - defined(__clang_analyzer__) +#if defined(__clang_analyzer__) +#define OS_OBJECT_USE_OBJC_RETAIN_RELEASE 1 +#elif defined(__has_feature) +#if __has_feature(objc_arc) #define OS_OBJECT_USE_OBJC_RETAIN_RELEASE 1 #else #define OS_OBJECT_USE_OBJC_RETAIN_RELEASE 0 #endif +#else +#define OS_OBJECT_USE_OBJC_RETAIN_RELEASE 0 +#endif #endif #else /*! @parseOnly */ diff --git a/os/object_private.h b/os/object_private.h index 235e0d305..f5d326823 100644 --- a/os/object_private.h +++ b/os/object_private.h @@ -53,11 +53,15 @@ #define OS_OBJECT_EXPORT extern #endif -#if OS_OBJECT_USE_OBJC && defined(__has_feature) && __has_feature(objc_arc) +#if OS_OBJECT_USE_OBJC && defined(__has_feature) +#if __has_feature(objc_arc) #define _OS_OBJECT_OBJC_ARC 1 #else #define _OS_OBJECT_OBJC_ARC 0 #endif +#else +#define _OS_OBJECT_OBJC_ARC 0 +#endif #define _OS_OBJECT_GLOBAL_REFCNT INT_MAX @@ -103,6 +107,11 @@ OS_OBJECT_EXPORT OS_OBJECT_MALLOC OS_OBJECT_WARN_RESULT OS_OBJECT_NOTHROW _os_object_t _os_object_alloc(const void *cls, size_t size); +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +OS_OBJECT_EXPORT OS_OBJECT_MALLOC OS_OBJECT_WARN_RESULT OS_OBJECT_NOTHROW +_os_object_t +_os_object_alloc_realized(const void *cls, size_t size); + __OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW void _os_object_dealloc(_os_object_t object); diff --git a/private/Makefile.am b/private/Makefile.am index 488ef520e..de1239168 100644 --- a/private/Makefile.am +++ b/private/Makefile.am @@ -4,6 +4,10 @@ noinst_HEADERS= \ benchmark.h \ + data_private.h \ + introspection_private.h \ + io_private.h \ + mach_private.h \ private.h \ queue_private.h \ source_private.h diff --git a/private/data_private.h b/private/data_private.h index 6562b37d3..df60d2869 100644 --- a/private/data_private.h +++ b/private/data_private.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011 Apple Inc. All rights reserved. + * Copyright (c) 2011-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -34,17 +34,15 @@ __BEGIN_DECLS -#ifdef __BLOCKS__ - /*! * @const DISPATCH_DATA_DESTRUCTOR_NONE - * @discussion The destructor for dispatch data objects that require no - * management. This can be used to allow a data object to efficiently - * encapsulate data that should not be copied or freed by the system. + * @discussion The destructor for dispatch data objects that require no buffer + * memory management. This can be used to allow a data object to efficiently + * encapsulate buffers that should not be copied or freed by the system. */ #define DISPATCH_DATA_DESTRUCTOR_NONE (_dispatch_data_destructor_none) __OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) -DISPATCH_EXPORT const dispatch_block_t _dispatch_data_destructor_none; +DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(none); /*! * @const DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE @@ -54,7 +52,124 @@ DISPATCH_EXPORT const dispatch_block_t _dispatch_data_destructor_none; #define DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE \ (_dispatch_data_destructor_vm_deallocate) __OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) -DISPATCH_EXPORT const dispatch_block_t _dispatch_data_destructor_vm_deallocate; +DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(vm_deallocate); + +/*! + * @function dispatch_data_create_f + * Creates a dispatch data object from the given contiguous buffer of memory. If + * a non-default destructor is provided, ownership of the buffer remains with + * the caller (i.e. the bytes will not be copied). The last release of the data + * object will result in the invocation of the specified destructor function on + * specified queue to free the buffer (passed as the context parameter). + * + * If the DISPATCH_DATA_DESTRUCTOR_FREE destructor is provided the buffer will + * be freed via free(3) and the queue argument ignored. + * + * If the DISPATCH_DATA_DESTRUCTOR_DEFAULT destructor is provided, data object + * creation will copy the buffer into internal memory managed by the system. + * + * @param buffer A contiguous buffer of data. + * @param size The size of the contiguous buffer of data. + * @param queue The queue to which the destructor should be submitted. + * @param destructor The destructor function responsible for freeing the + * data buffer when it is no longer needed. + * @result A newly created dispatch data object. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW +dispatch_data_t +dispatch_data_create_f(const void *buffer, + size_t size, + dispatch_queue_t queue, + dispatch_function_t destructor); + +/*! + * @function dispatch_data_create_alloc + * Creates a dispatch data object representing a newly allocated memory region + * of the given size. If a non-NULL reference to a pointer is provided, it is + * filled with the location of the memory region. + * + * It is the responsibility of the application to ensure that the data object + * becomes immutable (i.e. the returned memory region is not further modified) + * once the dispatch data object is passed to other API. + * + * @param size The size of the required allocation. + * @param buffer_ptr A pointer to a pointer variable to be filled with the + * location of the newly allocated memory region, or NULL. + * @result A newly created dispatch data object. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +DISPATCH_EXPORT DISPATCH_RETURNS_RETAINED +DISPATCH_WARN_RESULT DISPATCH_NOTHROW +dispatch_data_t +dispatch_data_create_alloc(size_t size, void** buffer_ptr); + +/*! + * @typedef dispatch_data_applier_function_t + * A function to be invoked for every contiguous memory region in a data object. + * + * @param context Application-defined context parameter. + * @param region A data object representing the current region. + * @param offset The logical offset of the current region to the start + * of the data object. + * @param buffer The location of the memory for the current region. + * @param size The size of the memory for the current region. + * @result A Boolean indicating whether traversal should continue. + */ +typedef bool (*dispatch_data_applier_function_t)(void *context, + dispatch_data_t region, size_t offset, const void *buffer, size_t size); + +/*! + * @function dispatch_data_apply_f + * Traverse the memory regions represented by the specified dispatch data object + * in logical order and invoke the specified function once for every contiguous + * memory region encountered. + * + * Each invocation of the function is passed a data object representing the + * current region and its logical offset, along with the memory location and + * extent of the region. These allow direct read access to the memory region, + * but are only valid until the passed-in region object is released. Note that + * the region object is released by the system when the function returns, it is + * the responsibility of the application to retain it if the region object or + * the associated memory location are needed after the function returns. + * + * @param data The data object to traverse. + * @param context The application-defined context to pass to the function. + * @param applier The function to be invoked for every contiguous memory + * region in the data object. + * @result A Boolean indicating whether traversal completed + * successfully. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +bool +dispatch_data_apply_f(dispatch_data_t data, void *context, + dispatch_data_applier_function_t applier); + +#if TARGET_OS_MAC +/*! + * @function dispatch_data_make_memory_entry + * Return a mach memory entry for the memory regions represented by the + * specified dispatch data object. + * + * For data objects created with the DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE + * destructor, directly makes a memory entry from the represented region; + * otherwise, makes a memory entry from newly allocated pages containing a copy + * of the represented memory regions. + * + * @param data The data object to make a memory entry for. + * @result A mach port for the newly made memory entry, or + * MACH_PORT_NULL if an error ocurred. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +mach_port_t +dispatch_data_make_memory_entry(dispatch_data_t dd); +#endif + +/*! + * @functiongroup Dispatch data transform SPI + */ /*! * @typedef dispatch_data_format_type_t @@ -65,6 +180,16 @@ DISPATCH_EXPORT const dispatch_block_t _dispatch_data_destructor_vm_deallocate; */ typedef const struct dispatch_data_format_type_s *dispatch_data_format_type_t; +#if !TARGET_OS_WIN32 +#define DISPATCH_DATA_FORMAT_TYPE_DECL(name) \ + DISPATCH_EXPORT const struct dispatch_data_format_type_s \ + _dispatch_data_format_type_##name +#else +#define DISPATCH_DATA_FORMAT_TYPE_DECL(name) \ + DISPATCH_EXPORT struct dispatch_data_format_type_s \ + _dispatch_data_format_type_##name +#endif + /*! * @const DISPATCH_DATA_FORMAT_TYPE_NONE * @discussion A data format denoting that the given input or output format is, @@ -72,8 +197,7 @@ typedef const struct dispatch_data_format_type_s *dispatch_data_format_type_t; */ #define DISPATCH_DATA_FORMAT_TYPE_NONE (&_dispatch_data_format_type_none) __OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) -DISPATCH_EXPORT -const struct dispatch_data_format_type_s _dispatch_data_format_type_none; +DISPATCH_DATA_FORMAT_TYPE_DECL(none); /*! * @const DISPATCH_DATA_FORMAT_TYPE_BASE32 @@ -84,8 +208,19 @@ const struct dispatch_data_format_type_s _dispatch_data_format_type_none; */ #define DISPATCH_DATA_FORMAT_TYPE_BASE32 (&_dispatch_data_format_type_base32) __OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) -DISPATCH_EXPORT -const struct dispatch_data_format_type_s _dispatch_data_format_type_base32; +DISPATCH_DATA_FORMAT_TYPE_DECL(base32); + +/*! + * @const DISPATCH_DATA_FORMAT_TYPE_BASE32HEX + * @discussion A data format denoting that the given input or output format is, + * or should be, encoded in Base32Hex (RFC 4648) format. On input, this format + * will skip whitespace characters. Cannot be used in conjunction with UTF + * format types. + */ +#define DISPATCH_DATA_FORMAT_TYPE_BASE32HEX \ + (&_dispatch_data_format_type_base32hex) +__OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0) +DISPATCH_DATA_FORMAT_TYPE_DECL(base32hex); /*! * @const DISPATCH_DATA_FORMAT_TYPE_BASE64 @@ -96,8 +231,7 @@ const struct dispatch_data_format_type_s _dispatch_data_format_type_base32; */ #define DISPATCH_DATA_FORMAT_TYPE_BASE64 (&_dispatch_data_format_type_base64) __OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) -DISPATCH_EXPORT -const struct dispatch_data_format_type_s _dispatch_data_format_type_base64; +DISPATCH_DATA_FORMAT_TYPE_DECL(base64); /*! * @const DISPATCH_DATA_FORMAT_TYPE_UTF8 @@ -107,8 +241,7 @@ const struct dispatch_data_format_type_s _dispatch_data_format_type_base64; */ #define DISPATCH_DATA_FORMAT_TYPE_UTF8 (&_dispatch_data_format_type_utf8) __OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) -DISPATCH_EXPORT -const struct dispatch_data_format_type_s _dispatch_data_format_type_utf8; +DISPATCH_DATA_FORMAT_TYPE_DECL(utf8); /*! * @const DISPATCH_DATA_FORMAT_TYPE_UTF16LE @@ -118,8 +251,7 @@ const struct dispatch_data_format_type_s _dispatch_data_format_type_utf8; */ #define DISPATCH_DATA_FORMAT_TYPE_UTF16LE (&_dispatch_data_format_type_utf16le) __OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) -DISPATCH_EXPORT -const struct dispatch_data_format_type_s _dispatch_data_format_type_utf16le; +DISPATCH_DATA_FORMAT_TYPE_DECL(utf16le); /*! * @const DISPATCH_DATA_FORMAT_TYPE_UTF16BE @@ -129,8 +261,7 @@ const struct dispatch_data_format_type_s _dispatch_data_format_type_utf16le; */ #define DISPATCH_DATA_FORMAT_TYPE_UTF16BE (&_dispatch_data_format_type_utf16be) __OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) -DISPATCH_EXPORT -const struct dispatch_data_format_type_s _dispatch_data_format_type_utf16be; +DISPATCH_DATA_FORMAT_TYPE_DECL(utf16be); /*! * @const DISPATCH_DATA_FORMAT_TYPE_UTFANY @@ -142,8 +273,7 @@ const struct dispatch_data_format_type_s _dispatch_data_format_type_utf16be; */ #define DISPATCH_DATA_FORMAT_TYPE_UTF_ANY (&_dispatch_data_format_type_utf_any) __OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) -DISPATCH_EXPORT -const struct dispatch_data_format_type_s _dispatch_data_format_type_utf_any; +DISPATCH_DATA_FORMAT_TYPE_DECL(utf_any); /*! * @function dispatch_data_create_transform @@ -171,8 +301,6 @@ dispatch_data_create_with_transform(dispatch_data_t data, dispatch_data_format_type_t input_type, dispatch_data_format_type_t output_type); -#endif /* __BLOCKS__ */ - __END_DECLS #endif // __DISPATCH_DATA_PRIVATE__ diff --git a/private/dispatch.h b/private/dispatch.h deleted file mode 100644 index 3f1f37457..000000000 --- a/private/dispatch.h +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright (c) 2011 Apple Inc. All rights reserved. - * - * @APPLE_APACHE_LICENSE_HEADER_START@ - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * @APPLE_APACHE_LICENSE_HEADER_END@ - */ - -/* - * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch - * which are subject to change in future releases of Mac OS X. Any applications - * relying on these interfaces WILL break. - */ - -#ifndef __DISPATCH_PRIVATE_LEGACY__ -#define __DISPATCH_PRIVATE_LEGACY__ - -#define DISPATCH_NO_LEGACY 1 -#ifdef DISPATCH_LEGACY // -#error "Dispatch legacy API unavailable." -#endif - -#ifndef __DISPATCH_BUILDING_DISPATCH__ -#include_next -#endif - -#endif // __DISPATCH_PRIVATE_LEGACY__ diff --git a/private/introspection_private.h b/private/introspection_private.h new file mode 100644 index 000000000..727d9715a --- /dev/null +++ b/private/introspection_private.h @@ -0,0 +1,727 @@ +/* + * Copyright (c) 2012-2013 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_INTROSPECTION_PRIVATE__ +#define __DISPATCH_INTROSPECTION_PRIVATE__ + +/*! + * @header + * + * @abstract + * Introspection SPI for libdispatch. + * + * @discussion + * This SPI is only available in the introspection version of the library, + * loaded by running a process with the environment variable + * DYLD_LIBRARY_PATH=/usr/lib/system/introspection + * + * NOTE: these functions are _not_ exported from the shared library, they are + * only intended to be called from a debugger context while the rest of the + * process is suspended. + */ + +#ifndef __BEGIN_DECLS +#if defined(__cplusplus) +#define __BEGIN_DECLS extern "C" { +#define __END_DECLS } +#else +#define __BEGIN_DECLS +#define __END_DECLS +#endif +#endif + +__BEGIN_DECLS + +#ifndef __DISPATCH_INDIRECT__ +/* + * Typedefs of opaque types, for direct inclusion of header in lldb expressions + */ +typedef __typeof__(sizeof(int)) size_t; +typedef struct _opaque_pthread_t *pthread_t; +typedef void (*dispatch_function_t)(void *); +typedef struct Block_layout *dispatch_block_t; +typedef struct dispatch_continuation_s *dispatch_continuation_t; +typedef struct dispatch_queue_s *dispatch_queue_t; +typedef struct dispatch_source_s *dispatch_source_t; +typedef struct dispatch_group_s *dispatch_group_t; +typedef struct dispatch_object_s *dispatch_object_t; +#endif + +/*! + * @typedef dispatch_introspection_versions_s + * + * @abstract + * A structure of version and size information of introspection structures. + * + * @field introspection_version + * Version of overall dispatch_introspection SPI. + * + * @field queue_version + * Version of dispatch_introspection_queue_s structure. + * + * @field queue_size + * Size of dispatch_introspection_queue_s structure. + * + * @field source_version + * Version of dispatch_introspection_source_s structure. + * + * @field source_size + * Size of dispatch_introspection_source_s structure. + */ + +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT const struct dispatch_introspection_versions_s { + unsigned long introspection_version; + unsigned long hooks_version; + size_t hooks_size; + unsigned long queue_item_version; + size_t queue_item_size; + unsigned long queue_block_version; + size_t queue_block_size; + unsigned long queue_function_version; + size_t queue_function_size; + unsigned long queue_thread_version; + size_t queue_thread_size; + unsigned long object_version; + size_t object_size; + unsigned long queue_version; + size_t queue_size; + unsigned long source_version; + size_t source_size; +} dispatch_introspection_versions; + +/*! + * @typedef dispatch_introspection_queue_block_s + * + * @abstract + * A structure of introspection information for a block item enqueued on a + * dispatch queue. + * + * @field continuation + * Pointer to enqueued item. + * + * @field target_queue + * Target queue of item (may be different to the queue the item is currently + * enqueued on). + * + * @field block + * Block for enqueued item. + * + * @field block_invoke + * Function pointer of block for enqueued item. + * + * @field group + * Group containing enqueued item (may be NULL). + * + * @field waiter + * Thread waiting for completion of enqueued item (NULL if sync == 0). + * + * @field barrier + * Item is a barrier on the queue (all items on serial queues are barriers). + * + * @field sync + * Item was enqueued by a dispatch_sync/dispatch_barrier_sync. + * + * @field apply + * Item is part of a dispatch_apply. + */ +typedef struct dispatch_introspection_queue_block_s { + dispatch_continuation_t continuation; + dispatch_queue_t target_queue; + dispatch_block_t block; + dispatch_function_t block_invoke; + dispatch_group_t group; + pthread_t waiter; + unsigned long barrier:1, + sync:1, + apply:1; +} dispatch_introspection_queue_block_s; +typedef dispatch_introspection_queue_block_s + *dispatch_introspection_queue_block_t; + +/*! + * @typedef dispatch_introspection_queue_function_s + * + * @abstract + * A structure of introspection information for a function & context pointer + * item enqueued on a dispatch queue. + * + * @field continuation + * Pointer to enqueued item. + * + * @field target_queue + * Target queue of item (may be different to the queue the item is currently + * enqueued on). + * + * @field context + * Context in enqueued item. + * + * @field block_invoke + * Function pointer in enqueued item. + * + * @field group + * Group containing enqueued item (may be NULL). + * + * @field waiter + * Thread waiting for completion of enqueued item (NULL if sync == 0). + * + * @field barrier + * Item is a barrier on the queue (all items on serial queues are barriers). + * + * @field sync + * Item was enqueued by a dispatch_sync_f/dispatch_barrier_sync_f. + * + * @field apply + * Item is part of a dispatch_apply_f. + */ +typedef struct dispatch_introspection_queue_function_s { + dispatch_continuation_t continuation; + dispatch_queue_t target_queue; + void *context; + dispatch_function_t function; + dispatch_group_t group; + pthread_t waiter; + unsigned long barrier:1, + sync:1, + apply:1; +} dispatch_introspection_queue_function_s; +typedef dispatch_introspection_queue_function_s + *dispatch_introspection_queue_function_t; + +/*! + * @typedef dispatch_introspection_object_s + * + * @abstract + * A structure of introspection information for a generic dispatch object. + * + * @field object + * Pointer to object. + * + * @field target_queue + * Target queue of object (may be different to the queue the object is + * currently enqueued on). + * + * @field type + * Object class pointer. + * + * @field kind + * String describing the object type. + */ +typedef struct dispatch_introspection_object_s { + dispatch_continuation_t object; + dispatch_queue_t target_queue; + void *type; + const char *kind; +} dispatch_introspection_object_s; +typedef dispatch_introspection_object_s *dispatch_introspection_object_t; + +/*! + * @typedef dispatch_introspection_queue_s + * + * @abstract + * A structure of introspection information for a dispatch queue. + * + * @field queue + * Pointer to queue object. + * + * @field target_queue + * Target queue of queue (may be different to the queue the queue is currently + * enqueued on). NULL indicates queue is a root queue. + * + * @field label + * Pointer to queue label. + * + * @field serialnum + * Queue serial number (unique per process). + * + * @field width + * Queue width (1: serial queue, UINT_MAX: concurrent queue). + * + * @field suspend_count + * Number of times the queue has been suspended. + * + * @field enqueued + * Queue is enqueued on another queue. + * + * @field barrier + * Queue is executing a barrier item. + * + * @field draining + * Queue is being drained (cannot get queue items). + * + * @field global + * Queue is a global queue. + * + * @field main + * Queue is the main queue. + */ +typedef struct dispatch_introspection_queue_s { + dispatch_queue_t queue; + dispatch_queue_t target_queue; + const char *label; + unsigned long serialnum; + unsigned int width; + unsigned int suspend_count; + unsigned long enqueued:1, + barrier:1, + draining:1, + global:1, + main:1; +} dispatch_introspection_queue_s; +typedef dispatch_introspection_queue_s *dispatch_introspection_queue_t; + +/*! + * @typedef dispatch_introspection_source_s + * + * @abstract + * A structure of introspection information for a dispatch source. + * + * @field source + * Pointer to source object. + * + * @field target_queue + * Target queue of source (may be different to the queue the source is currently + * enqueued on). + * + * @field type + * Source type (kevent filter) + * + * @field handle + * Source handle (monitored entity). + * + * @field context + * Context pointer passed to source handler. Pointer to handler block if + * handler_is_block == 1. + * + * @field handler + * Source handler function. Function pointer of handler block if + * handler_is_block == 1. + * + * @field suspend_count + * Number of times the source has been suspended. + * + * @field enqueued + * Source is enqueued on a queue. + * + * @field handler_is_block + * Source handler is a block. + * + * @field timer + * Source is a timer. + * + * @field after + * Source is a dispatch_after timer. + */ +typedef struct dispatch_introspection_source_s { + dispatch_source_t source; + dispatch_queue_t target_queue; + unsigned long type; + unsigned long handle; + void *context; + dispatch_function_t handler; + unsigned int suspend_count; + unsigned long enqueued:1, + handler_is_block:1, + timer:1, + after:1; +} dispatch_introspection_source_s; +typedef dispatch_introspection_source_s *dispatch_introspection_source_t; + +/*! + * @typedef dispatch_introspection_queue_thread_s + * + * @abstract + * A structure of introspection information about a thread executing items for + * a dispatch queue. + * + * @field object + * Pointer to thread object. + * + * @field thread + * Thread executing items for a queue. + * + * @field queue + * Queue introspection information. The queue.queue field is NULL if this thread + * is not currently executing items for a queue. + */ +typedef struct dispatch_introspection_queue_thread_s { + dispatch_continuation_t object; + pthread_t thread; + dispatch_introspection_queue_s queue; +} dispatch_introspection_queue_thread_s; +typedef dispatch_introspection_queue_thread_s + *dispatch_introspection_queue_thread_t; + +/*! + * @enum dispatch_introspection_queue_item_type + * + * @abstract + * Types of items enqueued on a dispatch queue. + */ +enum dispatch_introspection_queue_item_type { + dispatch_introspection_queue_item_type_none = 0x0, + dispatch_introspection_queue_item_type_block = 0x11, + dispatch_introspection_queue_item_type_function = 0x12, + dispatch_introspection_queue_item_type_object = 0x100, + dispatch_introspection_queue_item_type_queue = 0x101, + dispatch_introspection_queue_item_type_source = 0102, +}; + +/*! + * @typedef dispatch_introspection_queue_item_s + * + * @abstract + * A structure of introspection information about an item enqueued on a + * dispatch queue. + * + * @field type + * Indicates which of the union members applies to this item. + */ +typedef struct dispatch_introspection_queue_item_s { + unsigned long type; // dispatch_introspection_queue_item_type + union { + dispatch_introspection_queue_block_s block; + dispatch_introspection_queue_function_s function; + dispatch_introspection_object_s object; + dispatch_introspection_queue_s queue; + dispatch_introspection_source_s source; + }; +} dispatch_introspection_queue_item_s; +typedef dispatch_introspection_queue_item_s + *dispatch_introspection_queue_item_t; + +/*! + * @typedef dispatch_introspection_hook_queue_create_t + * + * @abstract + * A function pointer called when a dispatch queue is created. + * + * @param queue_info + * Pointer to queue introspection structure. + */ +typedef void (*dispatch_introspection_hook_queue_create_t)( + dispatch_introspection_queue_t queue_info); + +/*! + * @typedef dispatch_introspection_hook_queue_dispose_t + * + * @abstract + * A function pointer called when a dispatch queue is destroyed. + * + * @param queue_info + * Pointer to queue introspection structure. + */ +typedef void (*dispatch_introspection_hook_queue_dispose_t)( + dispatch_introspection_queue_t queue_info); + +/*! + * @typedef dispatch_introspection_hook_queue_item_enqueue_t + * + * @abstract + * A function pointer called when an item is enqueued onto a dispatch queue. + * + * @param queue + * Pointer to queue. + * + * @param item + * Pointer to item introspection structure. + */ +typedef void (*dispatch_introspection_hook_queue_item_enqueue_t)( + dispatch_queue_t queue, dispatch_introspection_queue_item_t item); + +/*! + * @typedef dispatch_introspection_hook_queue_item_dequeue_t + * + * @abstract + * A function pointer called when an item is dequeued from a dispatch queue. + * + * @param queue + * Pointer to queue. + * + * @param item + * Pointer to item introspection structure. + */ +typedef void (*dispatch_introspection_hook_queue_item_dequeue_t)( + dispatch_queue_t queue, dispatch_introspection_queue_item_t item); + +/*! + * @typedef dispatch_introspection_hooks_s + * + * @abstract + * A structure of function pointer hoooks into libdispatch. + */ + +typedef struct dispatch_introspection_hooks_s { + dispatch_introspection_hook_queue_create_t queue_create; + dispatch_introspection_hook_queue_dispose_t queue_dispose; + dispatch_introspection_hook_queue_item_enqueue_t queue_item_enqueue; + dispatch_introspection_hook_queue_item_dequeue_t queue_item_dequeue; + void *_reserved[6]; +} dispatch_introspection_hooks_s; +typedef dispatch_introspection_hooks_s *dispatch_introspection_hooks_t; + +/*! + * @function dispatch_introspection_get_queues + * + * @abstract + * Retrieve introspection information about all dispatch queues in the process, + * in batches of specified size. + * + * @discussion + * Retrieving queue information and iterating through the list of all queues + * must take place from a debugger context (while the rest of the process is + * suspended). + * + * @param start + * Starting point for this batch of queue information, as returned by a previous + * call to _dispatch_introspection_get_queues(). + * Pass NULL to retrieve the initial batch. + * + * @param count + * Number of queues to introspect. + * + * @param queues + * Array to fill with queue information. If less than 'count' queues are left + * in this batch, the end of valid entries in the array will be indicated + * by an entry with NULL queue member. + * + * @result + * Queue to pass to another call to _dispatch_introspection_get_queues() to + * retrieve information about the next batch of queues. May be NULL if there + * are no more queues to iterate over. + */ +extern dispatch_queue_t +dispatch_introspection_get_queues(dispatch_queue_t start, size_t count, + dispatch_introspection_queue_t queues); + +/*! + * @function dispatch_introspection_get_queue_threads + * + * @abstract + * Retrieve introspection information about all threads in the process executing + * items for dispatch queues, in batches of specified size. + * + * @discussion + * Retrieving thread information and iterating through the list of all queue + * threads must take place from a debugger context (while the rest of the + * process is suspended). + * + * @param start + * Starting point for this batch of thread information, as returned by a + * previous call to _dispatch_introspection_get_queue_threads(). + * Pass NULL to retrieve the initial batch. + * + * @param count + * Number of queue threads to introspect. + * + * @param threads + * Array to fill with queue thread information. If less than 'count' threads are + * left in this batch, the end of valid entries in the array will be indicated + * by an entry with NULL object member. + * + * @result + * Object to pass to another call to _dispatch_introspection_get_queues() to + * retrieve information about the next batch of queues. May be NULL if there + * are no more queues to iterate over. + */ +extern dispatch_continuation_t +dispatch_introspection_get_queue_threads(dispatch_continuation_t start, + size_t count, dispatch_introspection_queue_thread_t threads); + +/*! + * @function dispatch_introspection_queue_get_items + * + * @abstract + * Retrieve introspection information about all items enqueued on a queue, in + * batches of specified size. + * + * @discussion + * Retrieving queue item information and iterating through a queue must take + * place from a debugger context (while the rest of the process is suspended). + * + * @param queue + * Queue to introspect. + * + * @param start + * Starting point for this batch of queue item information, as returned by a + * previous call to _dispatch_introspection_queue_get_items(). + * Pass NULL to retrieve the initial batch. + * + * @param count + * Number of items to introspect. + * + * @param items + * Array to fill with queue item information. If less than 'count' queues are + * left in this batch, the end of valid entries in the array will be indicated + * by an entry with type dispatch_introspection_queue_item_type_none. + * + * @result + * Item to pass to another call to _dispatch_introspection_queue_get_items() to + * retrieve information about the next batch of queue items. May be NULL if + * there are no more items to iterate over. + */ +extern dispatch_continuation_t +dispatch_introspection_queue_get_items(dispatch_queue_t queue, + dispatch_continuation_t start, size_t count, + dispatch_introspection_queue_item_t items); + +/*! + * @function dispatch_introspection_queue_get_info + * + * @abstract + * Retrieve introspection information about a specified dispatch queue. + * + * @discussion + * Retrieving queue information must take place from a debugger context (while + * the rest of the process is suspended). + * + * @param queue + * Queue to introspect. + * + * @result + * Queue information struct. + */ +extern dispatch_introspection_queue_s +dispatch_introspection_queue_get_info(dispatch_queue_t queue); + +/*! + * @function dispatch_introspection_queue_item_get_info + * + * @abstract + * Retrieve introspection information about a specified dispatch queue item. + * + * @discussion + * Retrieving queue item information must take place from a debugger context + * (while the rest of the process is suspended). + * + * @param queue + * Queue to introspect. + * + * @param item + * Item to introspect. + * + * @result + * Queue item information struct. + */ +extern dispatch_introspection_queue_item_s +dispatch_introspection_queue_item_get_info(dispatch_queue_t queue, + dispatch_continuation_t item); + +/*! + * @function dispatch_introspection_hooks_install + * + * @abstract + * Install hook functions into libdispatch. + * + * @discussion + * Installing hook functions must take place from a debugger context (while the + * rest of the process is suspended). + * + * The caller is responsible for implementing chaining to the hooks that were + * previously installed (if any). + * + * @param hooks + * Pointer to structure of hook function pointers. Any of the structure members + * may be NULL to indicate that the hook in question should not be installed. + * The structure is copied on input and filled with the previously installed + * hooks on output. + */ + +extern void +dispatch_introspection_hooks_install(dispatch_introspection_hooks_t hooks); + +/*! + * @function dispatch_introspection_hook_callouts_enable + * + * @abstract + * Enable hook callout functions in libdispatch that a debugger can break on + * and get introspection arguments even if there are no hook functions + * installed via dispatch_introspection_hooks_install(). + * + * @discussion + * Enabling hook callout functions must take place from a debugger context + * (while the rest of the process is suspended). + * + * @param enable + * Pointer to dispatch_introspection_hooks_s structure. For every structure + * member with (any) non-NULL value, the corresponding hook callout will be + * enabled; for every NULL member the hook callout will be disabled (if there + * is no hook function installed). + * As a convenience, the 'enable' pointer may itself be NULL to indicate that + * all hook callouts should be enabled. + */ + +extern void +dispatch_introspection_hook_callouts_enable( + dispatch_introspection_hooks_t enable); + +/*! + * @function dispatch_introspection_hook_callout_queue_create + * + * @abstract + * Callout to queue creation hook that a debugger can break on. + */ + +extern void +dispatch_introspection_hook_callout_queue_create( + dispatch_introspection_queue_t queue_info); + +/*! + * @function dispatch_introspection_hook_callout_queue_dispose + * + * @abstract + * Callout to queue destruction hook that a debugger can break on. + */ + +extern void +dispatch_introspection_hook_callout_queue_dispose( + dispatch_introspection_queue_t queue_info); + +/*! + * @function dispatch_introspection_hook_callout_queue_item_enqueue + * + * @abstract + * Callout to queue enqueue hook that a debugger can break on. + */ + +extern void +dispatch_introspection_hook_callout_queue_item_enqueue( + dispatch_queue_t queue, dispatch_introspection_queue_item_t item); + +/*! + * @function dispatch_introspection_hook_callout_queue_item_dequeue + * + * @abstract + * Callout to queue dequeue hook that a debugger can break on. + */ + +extern void +dispatch_introspection_hook_callout_queue_item_dequeue( + dispatch_queue_t queue, dispatch_introspection_queue_item_t item); + +__END_DECLS + +#endif diff --git a/private/io_private.h b/private/io_private.h new file mode 100644 index 000000000..c35b41f2c --- /dev/null +++ b/private/io_private.h @@ -0,0 +1,411 @@ +/* + * Copyright (c) 2009-2013 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_IO_PRIVATE__ +#define __DISPATCH_IO_PRIVATE__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +__BEGIN_DECLS + +/*! + * @function dispatch_read_f + * Schedule a read operation for asynchronous execution on the specified file + * descriptor. The specified handler is enqueued with the data read from the + * file descriptor when the operation has completed or an error occurs. + * + * The data object passed to the handler will be automatically released by the + * system when the handler returns. It is the responsibility of the application + * to retain, concatenate or copy the data object if it is needed after the + * handler returns. + * + * The data object passed to the handler will only contain as much data as is + * currently available from the file descriptor (up to the specified length). + * + * If an unrecoverable error occurs on the file descriptor, the handler will be + * enqueued with the appropriate error code along with a data object of any data + * that could be read successfully. + * + * An invocation of the handler with an error code of zero and an empty data + * object indicates that EOF was reached. + * + * The system takes control of the file descriptor until the handler is + * enqueued, and during this time file descriptor flags such as O_NONBLOCK will + * be modified by the system on behalf of the application. It is an error for + * the application to modify a file descriptor directly while it is under the + * control of the system, but it may create additional dispatch I/O convenience + * operations or dispatch I/O channels associated with that file descriptor. + * + * @param fd The file descriptor from which to read the data. + * @param length The length of data to read from the file descriptor, + * or SIZE_MAX to indicate that all of the data currently + * available from the file descriptor should be read. + * @param queue The dispatch queue to which the handler should be + * submitted. + * @param context The application-defined context parameter to pass to + * the handler function. + * @param handler The handler to enqueue when data is ready to be + * delivered. + * @param context Application-defined context parameter. + * @param data The data read from the file descriptor. + * @param error An errno condition for the read operation or + * zero if the read was successful. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_NONNULL5 DISPATCH_NOTHROW +void +dispatch_read_f(dispatch_fd_t fd, + size_t length, + dispatch_queue_t queue, + void *context, + void (*handler)(void *context, dispatch_data_t data, int error)); + +/*! + * @function dispatch_write_f + * Schedule a write operation for asynchronous execution on the specified file + * descriptor. The specified handler is enqueued when the operation has + * completed or an error occurs. + * + * If an unrecoverable error occurs on the file descriptor, the handler will be + * enqueued with the appropriate error code along with the data that could not + * be successfully written. + * + * An invocation of the handler with an error code of zero indicates that the + * data was fully written to the channel. + * + * The system takes control of the file descriptor until the handler is + * enqueued, and during this time file descriptor flags such as O_NONBLOCK will + * be modified by the system on behalf of the application. It is an error for + * the application to modify a file descriptor directly while it is under the + * control of the system, but it may create additional dispatch I/O convenience + * operations or dispatch I/O channels associated with that file descriptor. + * + * @param fd The file descriptor to which to write the data. + * @param data The data object to write to the file descriptor. + * @param queue The dispatch queue to which the handler should be + * submitted. + * @param context The application-defined context parameter to pass to + * the handler function. + * @param handler The handler to enqueue when the data has been written. + * @param context Application-defined context parameter. + * @param data The data that could not be written to the I/O + * channel, or NULL. + * @param error An errno condition for the write operation or + * zero if the write was successful. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NONNULL3 DISPATCH_NONNULL5 +DISPATCH_NOTHROW +void +dispatch_write_f(dispatch_fd_t fd, + dispatch_data_t data, + dispatch_queue_t queue, + void *context, + void (*handler)(void *context, dispatch_data_t data, int error)); + +/*! + * @function dispatch_io_create_f + * Create a dispatch I/O channel associated with a file descriptor. The system + * takes control of the file descriptor until the channel is closed, an error + * occurs on the file descriptor or all references to the channel are released. + * At that time the specified cleanup handler will be enqueued and control over + * the file descriptor relinquished. + * + * While a file descriptor is under the control of a dispatch I/O channel, file + * descriptor flags such as O_NONBLOCK will be modified by the system on behalf + * of the application. It is an error for the application to modify a file + * descriptor directly while it is under the control of a dispatch I/O channel, + * but it may create additional channels associated with that file descriptor. + * + * @param type The desired type of I/O channel (DISPATCH_IO_STREAM + * or DISPATCH_IO_RANDOM). + * @param fd The file descriptor to associate with the I/O channel. + * @param queue The dispatch queue to which the handler should be submitted. + * @param context The application-defined context parameter to pass to + * the cleanup handler function. + * @param cleanup_handler The handler to enqueue when the system + * relinquishes control over the file descriptor. + * @param context Application-defined context parameter. + * @param error An errno condition if control is relinquished + * because channel creation failed, zero otherwise. + * @result The newly created dispatch I/O channel or NULL if an error + * occurred (invalid type specified). + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NOTHROW +dispatch_io_t +dispatch_io_create_f(dispatch_io_type_t type, + dispatch_fd_t fd, + dispatch_queue_t queue, + void *context, + void (*cleanup_handler)(void *context, int error)); + +/*! + * @function dispatch_io_create_with_path_f + * Create a dispatch I/O channel associated with a path name. The specified + * path, oflag and mode parameters will be passed to open(2) when the first I/O + * operation on the channel is ready to execute and the resulting file + * descriptor will remain open and under the control of the system until the + * channel is closed, an error occurs on the file descriptor or all references + * to the channel are released. At that time the file descriptor will be closed + * and the specified cleanup handler will be enqueued. + * + * @param type The desired type of I/O channel (DISPATCH_IO_STREAM + * or DISPATCH_IO_RANDOM). + * @param path The absolute path to associate with the I/O channel. + * @param oflag The flags to pass to open(2) when opening the file at + * path. + * @param mode The mode to pass to open(2) when creating the file at + * path (i.e. with flag O_CREAT), zero otherwise. + * @param queue The dispatch queue to which the handler should be + * submitted. + * @param context The application-defined context parameter to pass to + * the cleanup handler function. + * @param cleanup_handler The handler to enqueue when the system + * has closed the file at path. + * @param context Application-defined context parameter. + * @param error An errno condition if control is relinquished + * because channel creation or opening of the + * specified file failed, zero otherwise. + * @result The newly created dispatch I/O channel or NULL if an error + * occurred (invalid type or non-absolute path specified). + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED +DISPATCH_WARN_RESULT DISPATCH_NOTHROW +dispatch_io_t +dispatch_io_create_with_path_f(dispatch_io_type_t type, + const char *path, int oflag, mode_t mode, + dispatch_queue_t queue, + void *context, + void (*cleanup_handler)(void *context, int error)); + +/*! + * @function dispatch_io_create_with_io_f + * Create a new dispatch I/O channel from an existing dispatch I/O channel. + * The new channel inherits the file descriptor or path name associated with + * the existing channel, but not its channel type or policies. + * + * If the existing channel is associated with a file descriptor, control by the + * system over that file descriptor is extended until the new channel is also + * closed, an error occurs on the file descriptor, or all references to both + * channels are released. At that time the specified cleanup handler will be + * enqueued and control over the file descriptor relinquished. + * + * While a file descriptor is under the control of a dispatch I/O channel, file + * descriptor flags such as O_NONBLOCK will be modified by the system on behalf + * of the application. It is an error for the application to modify a file + * descriptor directly while it is under the control of a dispatch I/O channel, + * but it may create additional channels associated with that file descriptor. + * + * @param type The desired type of I/O channel (DISPATCH_IO_STREAM + * or DISPATCH_IO_RANDOM). + * @param io The existing channel to create the new I/O channel from. + * @param queue The dispatch queue to which the handler should be submitted. + * @param context The application-defined context parameter to pass to + * the cleanup handler function. + * @param cleanup_handler The handler to enqueue when the system + * relinquishes control over the file descriptor + * (resp. closes the file at path) associated with + * the existing channel. + * @param context Application-defined context parameter. + * @param error An errno condition if control is relinquished + * because channel creation failed, zero otherwise. + * @result The newly created dispatch I/O channel or NULL if an error + * occurred (invalid type specified). + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED +DISPATCH_WARN_RESULT DISPATCH_NOTHROW +dispatch_io_t +dispatch_io_create_with_io_f(dispatch_io_type_t type, + dispatch_io_t io, + dispatch_queue_t queue, + void *context, + void (*cleanup_handler)(void *context, int error)); + +/*! + * @typedef dispatch_io_handler_function_t + * The prototype of I/O handler functions for dispatch I/O operations. + * + * @param context Application-defined context parameter. + * @param done A flag indicating whether the operation is complete. + * @param data The data object to be handled. + * @param error An errno condition for the operation. + */ +typedef void (*dispatch_io_handler_function_t)(void *context, bool done, + dispatch_data_t data, int error); + +/*! + * @function dispatch_io_read_f + * Schedule a read operation for asynchronous execution on the specified I/O + * channel. The I/O handler is enqueued one or more times depending on the + * general load of the system and the policy specified on the I/O channel. + * + * Any data read from the channel is described by the dispatch data object + * passed to the I/O handler. This object will be automatically released by the + * system when the I/O handler returns. It is the responsibility of the + * application to retain, concatenate or copy the data object if it is needed + * after the I/O handler returns. + * + * Dispatch I/O handlers are not reentrant. The system will ensure that no new + * I/O handler instance is invoked until the previously enqueued handler + * function has returned. + * + * An invocation of the I/O handler with the done flag set indicates that the + * read operation is complete and that the handler will not be enqueued again. + * + * If an unrecoverable error occurs on the I/O channel's underlying file + * descriptor, the I/O handler will be enqueued with the done flag set, the + * appropriate error code and a NULL data object. + * + * An invocation of the I/O handler with the done flag set, an error code of + * zero and an empty data object indicates that EOF was reached. + * + * @param channel The dispatch I/O channel from which to read the data. + * @param offset The offset relative to the channel position from which + * to start reading (only for DISPATCH_IO_RANDOM). + * @param length The length of data to read from the I/O channel, or + * SIZE_MAX to indicate that data should be read until EOF + * is reached. + * @param queue The dispatch queue to which the I/O handler should be + * submitted. + * @param context The application-defined context parameter to pass to + * the handler function. + * @param io_handler The I/O handler to enqueue when data is ready to be + * delivered. + * @param context Application-defined context parameter. + * @param done A flag indicating whether the operation is complete. + * @param data An object with the data most recently read from the + * I/O channel as part of this read operation, or NULL. + * @param error An errno condition for the read operation or zero if + * the read was successful. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL4 DISPATCH_NONNULL6 +DISPATCH_NOTHROW +void +dispatch_io_read_f(dispatch_io_t channel, + off_t offset, + size_t length, + dispatch_queue_t queue, + void *context, + dispatch_io_handler_function_t io_handler); + +/*! + * @function dispatch_io_write_f + * Schedule a write operation for asynchronous execution on the specified I/O + * channel. The I/O handler is enqueued one or more times depending on the + * general load of the system and the policy specified on the I/O channel. + * + * Any data remaining to be written to the I/O channel is described by the + * dispatch data object passed to the I/O handler. This object will be + * automatically released by the system when the I/O handler returns. It is the + * responsibility of the application to retain, concatenate or copy the data + * object if it is needed after the I/O handler returns. + * + * Dispatch I/O handlers are not reentrant. The system will ensure that no new + * I/O handler instance is invoked until the previously enqueued handler + * function has returned. + * + * An invocation of the I/O handler with the done flag set indicates that the + * write operation is complete and that the handler will not be enqueued again. + * + * If an unrecoverable error occurs on the I/O channel's underlying file + * descriptor, the I/O handler will be enqueued with the done flag set, the + * appropriate error code and an object containing the data that could not be + * written. + * + * An invocation of the I/O handler with the done flag set and an error code of + * zero indicates that the data was fully written to the channel. + * + * @param channel The dispatch I/O channel on which to write the data. + * @param offset The offset relative to the channel position from which + * to start writing (only for DISPATCH_IO_RANDOM). + * @param data The data to write to the I/O channel. The data object + * will be retained by the system until the write operation + * is complete. + * @param queue The dispatch queue to which the I/O handler should be + * submitted. + * @param context The application-defined context parameter to pass to + * the handler function. + * @param io_handler The I/O handler to enqueue when data has been delivered. + * @param context Application-defined context parameter. + * @param done A flag indicating whether the operation is complete. + * @param data An object of the data remaining to be + * written to the I/O channel as part of this write + * operation, or NULL. + * @param error An errno condition for the write operation or zero + * if the write was successful. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NONNULL4 +DISPATCH_NONNULL6 DISPATCH_NOTHROW +void +dispatch_io_write_f(dispatch_io_t channel, + off_t offset, + dispatch_data_t data, + dispatch_queue_t queue, + void *context, + dispatch_io_handler_function_t io_handler); + +/*! + * @function dispatch_io_barrier_f + * Schedule a barrier operation on the specified I/O channel; all previously + * scheduled operations on the channel will complete before the provided + * barrier function is enqueued onto the global queue determined by the + * channel's target queue, and no subsequently scheduled operations will start + * until the barrier function has returned. + * + * If multiple channels are associated with the same file descriptor, a barrier + * operation scheduled on any of these channels will act as a barrier across all + * channels in question, i.e. all previously scheduled operations on any of the + * channels will complete before the barrier function is enqueued, and no + * operations subsequently scheduled on any of the channels will start until the + * barrier function has returned. + * + * While the barrier function is running, it may safely operate on the channel's + * underlying file descriptor with fsync(2), lseek(2) etc. (but not close(2)). + * + * @param channel The dispatch I/O channel to schedule the barrier on. + * @param context The application-defined context parameter to pass to + * the barrier function. + * @param barrier The barrier function. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +void +dispatch_io_barrier_f(dispatch_io_t channel, + void *context, + dispatch_function_t barrier); + +__END_DECLS + +#endif /* __DISPATCH_IO_PRIVATE__ */ diff --git a/private/mach_private.h b/private/mach_private.h new file mode 100644 index 000000000..603330db4 --- /dev/null +++ b/private/mach_private.h @@ -0,0 +1,547 @@ +/* + * Copyright (c) 2012-2013 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_MACH_PRIVATE__ +#define __DISPATCH_MACH_PRIVATE__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +__BEGIN_DECLS + +#if DISPATCH_MACH_SPI + +#include + +/*! + * @functiongroup Dispatch Mach Channel SPI + * + * IMPORTANT: This is Libsystem-internal SPI not intended for general use and + * is subject to change at any time without warning. + */ + +/*! + * @typedef dispatch_mach_t + * A dispatch mach channel asynchronously recevives and sends mach messages. + */ +DISPATCH_DECL(dispatch_mach); + +/*! + * @typedef dispatch_mach_reason_t + * Reasons for a mach channel handler to be invoked. + * + * @const DISPATCH_MACH_CONNECTED + * The channel has been connected. The first handler invocation on a channel + * after calling dispatch_mach_connect() will have this reason. + * + * @const DISPATCH_MACH_MESSAGE_RECEIVED + * A message was received, it is passed in the message parameter. + * + * @const DISPATCH_MACH_MESSAGE_SENT + * A message was sent, it is passed in the message parameter (so that associated + * resources can be disposed of). + * + * @const DISPATCH_MACH_MESSAGE_SEND_FAILED + * A message failed to be sent, it is passed in the message parameter (so that + * associated resources can be disposed of), along with the error code from + * mach_msg(). + * + * @const DISPATCH_MACH_MESSAGE_NOT_SENT + * A message was not sent due to the channel being canceled or reconnected, it + * is passed in the message parameter (so that associated resources can be + * disposed of). + * + * @const DISPATCH_MACH_BARRIER_COMPLETED + * A barrier block has finished executing. + * + * @const DISPATCH_MACH_DISCONNECTED + * The channel has been disconnected by a call to dispatch_mach_reconnect() or + * dispatch_mach_cancel(), an empty message is passed in the message parameter + * (so that associated port rights can be disposed of). + * The message header will contain either a remote port with a previously + * connected send right, or a local port with a previously connected receive + * right (if the channel was canceled), or a local port with a receive right + * that was being monitored for a direct reply to a message previously sent to + * the channel (if no reply was received). + * + * @const DISPATCH_MACH_CANCELED + * The channel has been canceled. + */ +DISPATCH_ENUM(dispatch_mach_reason, unsigned long, + DISPATCH_MACH_CONNECTED = 1, + DISPATCH_MACH_MESSAGE_RECEIVED, + DISPATCH_MACH_MESSAGE_SENT, + DISPATCH_MACH_MESSAGE_SEND_FAILED, + DISPATCH_MACH_MESSAGE_NOT_SENT, + DISPATCH_MACH_BARRIER_COMPLETED, + DISPATCH_MACH_DISCONNECTED, + DISPATCH_MACH_CANCELED, + DISPATCH_MACH_REASON_LAST, /* unused */ +); + +/*! + * @typedef dispatch_mach_trailer_t + * Trailer type of mach message received by dispatch mach channels + */ + +typedef mach_msg_context_trailer_t dispatch_mach_trailer_t; + +/*! + * @constant DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE + * Maximum size of a message that can be received inline by a dispatch mach + * channel, reception of larger messages requires an extra roundtrip through + * the kernel. + */ + +#define DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE \ + ((PAGE_SIZE > 0x1000 ? 1 : 3) * PAGE_SIZE - \ + sizeof(dispatch_mach_trailer_t)) + +/*! + * @typedef dispatch_mach_msg_t + * A dispatch mach message encapsulates messages received or sent with dispatch + * mach channels. + */ +DISPATCH_DECL(dispatch_mach_msg); + +/*! + * @typedef dispatch_mach_msg_destructor_t + * Dispatch mach message object destructors. + * + * @const DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT + * Message buffer storage is internal to the object, if a buffer is supplied + * during object creation, its contents are copied. + * + * @const DISPATCH_MACH_MSG_DESTRUCTOR_FREE + * Message buffer will be deallocated with free(3). + * + * @const DISPATCH_MACH_MSG_DESTRUCTOR_FREE + * Message buffer will be deallocated with vm_deallocate. + */ +DISPATCH_ENUM(dispatch_mach_msg_destructor, unsigned int, + DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT = 0, + DISPATCH_MACH_MSG_DESTRUCTOR_FREE, + DISPATCH_MACH_MSG_DESTRUCTOR_VM_DEALLOCATE, +); + +/*! + * @function dispatch_mach_msg_create + * Creates a dispatch mach message object, either with a newly allocated message + * buffer of given size, or from an existing message buffer that will be + * deallocated with the specified destructor when the object is released. + * + * If a non-NULL reference to a pointer is provided in 'msg_ptr', it is filled + * with the location of the (possibly newly allocated) message buffer. + * + * It is the responsibility of the application to ensure that it does not modify + * the underlying message buffer once the dispatch mach message object is passed + * to other dispatch mach API. + * + * @param msg The message buffer to create the message object from. + * If 'destructor' is DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT, + * this argument may be NULL to leave the newly allocated + * message buffer zero-initialized. + * @param size The size of the message buffer. + * Must be >= sizeof(mach_msg_header_t) + * @param destructor The destructor to use to deallocate the message buffer + * when the object is released. + * @param msg_ptr A pointer to a pointer variable to be filled with the + * location of the (possibly newly allocated) message + * buffer, or NULL. + * @result A newly created dispatch mach message object. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NOTHROW +dispatch_mach_msg_t +dispatch_mach_msg_create(mach_msg_header_t *msg, size_t size, + dispatch_mach_msg_destructor_t destructor, mach_msg_header_t **msg_ptr); + +/*! + * @function dispatch_mach_msg_get_msg + * Returns the message buffer underlying a dispatch mach message object. + * + * @param message The dispatch mach message object to query. + * @param size_ptr A pointer to a size_t variable to be filled with the + * size of the message buffer, or NULL. + * @result Pointer to message buffer underlying the object. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW +mach_msg_header_t* +dispatch_mach_msg_get_msg(dispatch_mach_msg_t message, size_t *size_ptr); + +#ifdef __BLOCKS__ +/*! + * @typedef dispatch_mach_handler_t + * Prototype of dispatch mach channel handler blocks. + * + * @param reason Reason the handler was invoked. + * @param message Message object that was sent or received. + * @param error Mach error code for the send operation. + */ +typedef void (^dispatch_mach_handler_t)(dispatch_mach_reason_t reason, + dispatch_mach_msg_t message, mach_error_t error); + +/*! + * @function dispatch_mach_create + * Create a dispatch mach channel to asynchronously receive and send mach + * messages. + * + * The specified handler will be called with the corresponding reason parameter + * for each message received and for each message that was successfully sent, + * that failed to be sent, or was not sent; as well as when a barrier block + * has completed, or when channel connection, reconnection or cancellation has + * taken effect. + * + * Dispatch mach channels are created in a disconnected state, they must be + * connected via dispatch_mach_connect() to begin receiving and sending + * messages. + * + * @param label + * An optional string label to attach to the channel. The string is not copied, + * if it is non-NULL it must point to storage that remains valid for the + * lifetime of the channel object. May be NULL. + * + * @param queue + * The target queue of the channel, where the handler and barrier blocks will + * be submitted. + * + * @param handler + * The handler block to submit when a message has been sent or received. + * + * @result + * The newly created dispatch mach channel. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NONNULL3 DISPATCH_NOTHROW +dispatch_mach_t +dispatch_mach_create(const char *label, dispatch_queue_t queue, + dispatch_mach_handler_t handler); +#endif + +/*! + * @typedef dispatch_mach_handler_function_t + * Prototype of dispatch mach channel handler functions. + * + * @param context Application-defined context parameter. + * @param reason Reason the handler was invoked. + * @param message Message object that was sent or received. + * @param error Mach error code for the send operation. + */ +typedef void (*dispatch_mach_handler_function_t)(void *context, + dispatch_mach_reason_t reason, dispatch_mach_msg_t message, + mach_error_t error); + +/*! + * @function dispatch_mach_create_f + * Create a dispatch mach channel to asynchronously receive and send mach + * messages. + * + * The specified handler will be called with the corresponding reason parameter + * for each message received and for each message that was successfully sent, + * that failed to be sent, or was not sent; as well as when a barrier block + * has completed, or when channel connection, reconnection or cancellation has + * taken effect. + * + * Dispatch mach channels are created in a disconnected state, they must be + * connected via dispatch_mach_connect() to begin receiving and sending + * messages. + * + * @param label + * An optional string label to attach to the channel. The string is not copied, + * if it is non-NULL it must point to storage that remains valid for the + * lifetime of the channel object. May be NULL. + * + * @param queue + * The target queue of the channel, where the handler and barrier blocks will + * be submitted. + * + * @param context + * The application-defined context to pass to the handler. + * + * @param handler + * The handler function to submit when a message has been sent or received. + * + * @result + * The newly created dispatch mach channel. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NONNULL4 DISPATCH_NOTHROW +dispatch_mach_t +dispatch_mach_create_f(const char *label, dispatch_queue_t queue, void *context, + dispatch_mach_handler_function_t handler); + +/*! + * @function dispatch_mach_connect + * Connect a mach channel to the specified receive and send rights. + * + * This function must only be called once during the lifetime of a channel, it + * will initiate message reception and perform any already submitted message + * sends or barrier operations. + * + * @param channel + * The mach channel to connect. + * + * @param receive + * The receive right to associate with the channel. May be MACH_PORT_NULL. + * + * @param send + * The send right to associate with the channel. May be MACH_PORT_NULL. + * + * @param checkin + * An optional message object encapsulating the initial check-in message to send + * upon channel connection. The check-in message is sent immediately before the + * first message submitted via dispatch_mach_send(). The message object will be + * retained until the initial send operation is complete (or not peformed due + * to channel cancellation or reconnection) and the channel handler has + * returned. May be NULL. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW +void +dispatch_mach_connect(dispatch_mach_t channel, mach_port_t receive, + mach_port_t send, dispatch_mach_msg_t checkin); + +/*! + * @function dispatch_mach_reconnect + * Reconnect a mach channel to the specified send right. + * + * Disconnects the channel from the current send right, interrupts any pending + * message sends (and returns the messages as unsent), and reconnects the + * channel to a new send right. + * + * The application must wait for the channel handler to be invoked with + * DISPATCH_MACH_DISCONNECTED before releasing the previous send right. + * + * @param channel + * The mach channel to reconnect. + * + * @param send + * The new send right to associate with the channel. May be MACH_PORT_NULL. + * + * @param checkin + * An optional message object encapsulating the initial check-in message to send + * upon channel reconnection. The check-in message is sent immediately before + * the first message submitted via dispatch_mach_send() after this function + * returns. The message object will be retained until the initial send operation + * is complete (or not peformed due to channel cancellation or reconnection) + * and the channel handler has returned. May be NULL. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW +void +dispatch_mach_reconnect(dispatch_mach_t channel, mach_port_t send, + dispatch_mach_msg_t checkin); + +/*! + * @function dispatch_mach_cancel + * Cancel a mach channel, preventing any further messages from being sent or + * received. + * + * The application must wait for the channel handler to be invoked with + * DISPATCH_MACH_DISCONNECTED before releasing the underlying send and receive + * rights. + * + * Note: explicit cancellation of mach channels is required, no implicit + * cancellation takes place on release of the last application reference + * to the channel object. Failure to cancel will cause the channel and + * its associated resources to be leaked. + * + * @param channel + * The mach channel to cancel. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_mach_cancel(dispatch_mach_t channel); + +/*! + * @function dispatch_mach_send + * Asynchronously send a message encapsulated in a dispatch mach message object + * to the specified mach channel. + * + * Unless the message is being sent to a send-once right (as determined by the + * presence of MACH_MSG_TYPE_MOVE_SEND_ONCE in the message header remote bits), + * the message header remote port is set to the channel send right before the + * send operation is performed. + * + * If the message expects a direct reply (as determined by the presence of + * MACH_MSG_TYPE_MAKE_SEND_ONCE in the message header local bits) the receive + * right specified in the message header local port will be monitored until a + * reply message (or a send-once notification) is received, or the channel is + * canceled. Hence the application must wait for the channel handler to be + * invoked with a DISPATCH_MACH_DISCONNECTED message before releasing that + * receive right. + * + * If the message send operation is attempted but the channel is canceled + * before the send operation succesfully completes, the message returned to the + * channel handler with DISPATCH_MACH_MESSAGE_NOT_SENT may be the result of a + * pseudo-receive operation. If the message expected a direct reply, the + * receive right originally specified in the message header local port will + * returned in a DISPATCH_MACH_DISCONNECTED message. + * + * @param channel + * The mach channel to which to send the message. + * + * @param message + * The message object encapsulating the message to send. The object will be + * retained until the send operation is complete and the channel handler has + * returned. The storage underlying the message object may be modified by the + * send operation. + * + * @param options + * Additional send options to pass to mach_msg() when performing the send + * operation. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NOTHROW +void +dispatch_mach_send(dispatch_mach_t channel, dispatch_mach_msg_t message, + mach_msg_option_t options); + +#ifdef __BLOCKS__ +/*! + * @function dispatch_mach_send_barrier + * Submit a send barrier to the specified mach channel. Messages submitted to + * the channel before the barrier will be sent before the barrier block is + * executed, and messages submitted to the channel after the barrier will only + * be sent once the barrier block has completed and the channel handler + * invocation for the barrier has returned. + * + * @param channel + * The mach channel to which to submit the barrier. + * + * @param barrier + * The barrier block to submit to the channel target queue. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_mach_send_barrier(dispatch_mach_t channel, dispatch_block_t barrier); +#endif + +/*! + * @function dispatch_mach_send_barrier_f + * Submit a send barrier to the specified mach channel. Messages submitted to + * the channel before the barrier will be sent before the barrier block is + * executed, and messages submitted to the channel after the barrier will only + * be sent once the barrier block has completed and the channel handler + * invocation for the barrier has returned. + * + * @param channel + * The mach channel to which to submit the barrier. + * + * @param context + * The application-defined context parameter to pass to the function. + * + * @param barrier + * The barrier function to submit to the channel target queue. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +void +dispatch_mach_send_barrier_f(dispatch_mach_t channel, void *context, + dispatch_function_t barrier); + +#ifdef __BLOCKS__ +/*! + * @function dispatch_mach_receive_barrier + * Submit a receive barrier to the specified mach channel. Channel handlers for + * messages received by the channel after the receive barrier has been + * submitted will only be invoked once the barrier block has completed and the + * channel handler invocation for the barrier has returned. + * + * @param channel + * The mach channel to which to submit the receive barrier. + * + * @param barrier + * The barrier block to submit to the channel target queue. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_mach_receive_barrier(dispatch_mach_t channel, + dispatch_block_t barrier); +#endif + +/*! + * @function dispatch_mach_receive_barrier_f + * Submit a receive barrier to the specified mach channel. Channel handlers for + * messages received by the channel after the receive barrier has been + * submitted will only be invoked once the barrier block has completed and the + * channel handler invocation for the barrier has returned. + * + * @param channel + * The mach channel to which to submit the receive barrier. + * + * @param context + * The application-defined context parameter to pass to the function. + * + * @param barrier + * The barrier function to submit to the channel target queue. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +void +dispatch_mach_receive_barrier_f(dispatch_mach_t channel, void *context, + dispatch_function_t barrier); + +/*! + * @function dispatch_mach_get_checkin_port + * Returns the port specified in the message header remote port of the check-in + * message passed to the most recent invocation of dispatch_mach_connect() or + * dispatch_mach_reconnect() for the provided mach channel (irrespective of the + * completion of the (re)connect or check-in operations in question). + * + * Returns MACH_PORT_NULL if dispatch_mach_connect() has not yet been called or + * if the most recently specified check-in message was NULL, and MACH_PORT_DEAD + * if the channel has been canceled. + * + * It is the responsibility of the application to ensure that the port + * specified in a check-in message remains valid at the time this function is + * called. + * + * @param channel + * The mach channel to query. + * + * @result + * The most recently specified check-in port for the channel. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +mach_port_t +dispatch_mach_get_checkin_port(dispatch_mach_t channel); + +#endif // DISPATCH_MACH_SPI + +__END_DECLS + +#endif diff --git a/private/private.h b/private/private.h index 08a14ce16..4e32e7345 100644 --- a/private/private.h +++ b/private/private.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -54,27 +54,90 @@ #include #include #include +#include #include +#include #undef __DISPATCH_INDIRECT__ #endif /* !__DISPATCH_BUILDING_DISPATCH__ */ // Check that public and private dispatch headers match -#if DISPATCH_API_VERSION != 20111201 // Keep in sync with +#if DISPATCH_API_VERSION != 20130520 // Keep in sync with #error "Dispatch header mismatch between /usr/include and /usr/local/include" #endif __BEGIN_DECLS +/*! + * @function _dispatch_is_multithreaded + * + * @abstract + * Returns true if the current process has become multithreaded by the use + * of libdispatch functionality. + * + * @discussion + * This SPI is intended for use by low-level system components that need to + * ensure that they do not make a single-threaded process multithreaded, to + * avoid negatively affecting child processes of a fork (without exec). + * + * Such components must not use any libdispatch functionality if this function + * returns false. + * + * @result + * Boolean indicating whether the process has used libdispatch and become + * multithreaded. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) DISPATCH_EXPORT DISPATCH_NOTHROW -void -libdispatch_init(void); +bool _dispatch_is_multithreaded(void); + +/*! + * @function _dispatch_is_fork_of_multithreaded_parent + * + * @abstract + * Returns true if the current process is a child of a parent process that had + * become multithreaded by the use of libdispatch functionality at the time of + * fork (without exec). + * + * @discussion + * This SPI is intended for use by (rare) low-level system components that need + * to continue working on the child side of a fork (without exec) of a + * multithreaded process. + * + * Such components must not use any libdispatch functionality if this function + * returns true. + * + * @result + * Boolean indicating whether the parent process had used libdispatch and + * become multithreaded at the time of fork. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_NOTHROW +bool _dispatch_is_fork_of_multithreaded_parent(void); + +/* + * dispatch_time convenience macros + */ + +#define _dispatch_time_after_nsec(t) \ + dispatch_time(DISPATCH_TIME_NOW, (t)) +#define _dispatch_time_after_usec(t) \ + dispatch_time(DISPATCH_TIME_NOW, (t) * NSEC_PER_USEC) +#define _dispatch_time_after_msec(t) \ + dispatch_time(DISPATCH_TIME_NOW, (t) * NSEC_PER_MSEC) +#define _dispatch_time_after_sec(t) \ + dispatch_time(DISPATCH_TIME_NOW, (t) * NSEC_PER_SEC) + +/* + * SPI for CoreFoundation/Foundation/libauto ONLY + */ + +#define DISPATCH_COCOA_COMPAT (TARGET_OS_MAC || TARGET_OS_WIN32) -#if TARGET_OS_MAC -#define DISPATCH_COCOA_COMPAT 1 #if DISPATCH_COCOA_COMPAT +#if TARGET_OS_MAC __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_CONST DISPATCH_WARN_RESULT DISPATCH_NOTHROW mach_port_t @@ -84,6 +147,46 @@ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NOTHROW void _dispatch_main_queue_callback_4CF(mach_msg_header_t *msg); +#elif TARGET_OS_WIN32 +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NOTHROW +HANDLE +_dispatch_get_main_queue_handle_4CF(void); + +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NOTHROW +void +_dispatch_main_queue_callback_4CF(void); +#endif // TARGET_OS_WIN32 + +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NOTHROW +dispatch_queue_t +_dispatch_runloop_root_queue_create_4CF(const char *label, unsigned long flags); + +#if TARGET_OS_MAC +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW +mach_port_t +_dispatch_runloop_root_queue_get_port_4CF(dispatch_queue_t queue); +#endif + +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_NOTHROW +void +_dispatch_runloop_root_queue_wakeup_4CF(dispatch_queue_t queue); + +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW +bool +_dispatch_runloop_root_queue_perform_4CF(dispatch_queue_t queue); + +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +_dispatch_source_set_runloop_timer_4CF(dispatch_source_t source, + dispatch_time_t start, uint64_t interval, uint64_t leeway); __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT @@ -92,11 +195,6 @@ void (*dispatch_begin_thread_4GC)(void); __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT void (*dispatch_end_thread_4GC)(void); - -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) -DISPATCH_EXPORT -void (*dispatch_no_worker_threads_4GC)(void); - __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT void *(*_dispatch_begin_NSAutoReleasePool)(void); @@ -105,39 +203,8 @@ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT void (*_dispatch_end_NSAutoReleasePool)(void *); -__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) -DISPATCH_EXPORT DISPATCH_NOTHROW -bool _dispatch_is_multithreaded(void); - -#define _dispatch_time_after_nsec(t) \ - dispatch_time(DISPATCH_TIME_NOW, (t)) -#define _dispatch_time_after_usec(t) \ - dispatch_time(DISPATCH_TIME_NOW, (t) * NSEC_PER_USEC) -#define _dispatch_time_after_msec(t) \ - dispatch_time(DISPATCH_TIME_NOW, (t) * NSEC_PER_MSEC) -#define _dispatch_time_after_sec(t) \ - dispatch_time(DISPATCH_TIME_NOW, (t) * NSEC_PER_SEC) - -#endif -#endif /* TARGET_OS_MAC */ - -/* pthreads magic */ - -DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_prepare(void); -DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_parent(void); -DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); - -#if TARGET_OS_MAC -/* - * Extract the context pointer from a mach message trailer. - */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NONNULL_ALL -DISPATCH_NOTHROW -void * -dispatch_mach_msg_get_context(mach_msg_header_t *msg); -#endif /* TARGET_OS_MAC */ +#endif /* DISPATCH_COCOA_COMPAT */ __END_DECLS -#endif +#endif // __DISPATCH_PRIVATE__ diff --git a/private/queue_private.h b/private/queue_private.h index bdfb5b8b9..dfef7859f 100644 --- a/private/queue_private.h +++ b/private/queue_private.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2010 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -34,7 +34,6 @@ __BEGIN_DECLS - /*! * @enum dispatch_queue_flags_t * @@ -48,6 +47,17 @@ enum { #define DISPATCH_QUEUE_FLAGS_MASK (DISPATCH_QUEUE_OVERCOMMIT) +/*! + * @typedef dispatch_queue_priority_t + * + * @constant DISPATCH_QUEUE_PRIORITY_NON_INTERACTIVE + * Items dispatched to the queue will run at non-interactive priority. + * This priority level is intended for user-initiated application activity that + * is long-running and CPU or IO intensive and that the user is actively waiting + * on, but that should not interfere with interactive use of the application. + */ +#define DISPATCH_QUEUE_PRIORITY_NON_INTERACTIVE INT8_MIN + /*! * @function dispatch_queue_set_width * @@ -84,31 +94,122 @@ void dispatch_queue_set_width(dispatch_queue_t dq, long width); // DEPRECATED /*! - * @function dispatch_set_current_target_queue + * @function dispatch_queue_create_with_target * * @abstract - * Synchronously sets the target queue of the current serial queue. + * Creates a new dispatch queue with a specified target queue. * * @discussion - * This SPI is provided for a limited purpose case when calling - * dispatch_set_target_queue() is not sufficient. It works similarly to - * dispatch_set_target_queue() except the target queue of the current queue - * is immediately changed so that pending blocks on the queue will run on the - * new target queue. Calling this from outside of a block executing on a serial - * queue is undefined. + * Dispatch queues created with the DISPATCH_QUEUE_SERIAL or a NULL attribute + * invoke blocks serially in FIFO order. * - * @param queue - * The new target queue for the object. The queue is retained, and the - * previous target queue, if any, is released. - * If queue is DISPATCH_TARGET_QUEUE_DEFAULT, set the object's target queue - * to the default target queue for the given object type. + * Dispatch queues created with the DISPATCH_QUEUE_CONCURRENT attribute may + * invoke blocks concurrently (similarly to the global concurrent queues, but + * potentially with more overhead), and support barrier blocks submitted with + * the dispatch barrier API, which e.g. enables the implementation of efficient + * reader-writer schemes. + * + * When a dispatch queue is no longer needed, it should be released with + * dispatch_release(). Note that any pending blocks submitted to a queue will + * hold a reference to that queue. Therefore a queue will not be deallocated + * until all pending blocks have finished. + * + * @param label + * A string label to attach to the queue. + * This parameter is optional and may be NULL. + * + * @param attr + * DISPATCH_QUEUE_SERIAL or DISPATCH_QUEUE_CONCURRENT. + * + * @param target + * The target queue for the newly created queue. The target queue is retained. + * If this parameter is DISPATCH_TARGET_QUEUE_DEFAULT, sets the queue's target + * queue to the default target queue for the given queue type. + * + * @result + * The newly created dispatch queue. */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NOTHROW +dispatch_queue_t +dispatch_queue_create_with_target(const char *label, + dispatch_queue_attr_t attr, dispatch_queue_t target); -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) -DISPATCH_EXPORT DISPATCH_NOTHROW -void -dispatch_set_current_target_queue(dispatch_queue_t queue); +#ifdef __BLOCKS__ +/*! + * @function dispatch_pthread_root_queue_create + * + * @abstract + * Creates a new concurrent dispatch root queue with a pthread-based pool of + * worker threads owned by the application. + * + * @discussion + * Dispatch pthread root queues are similar to the global concurrent dispatch + * queues in that they invoke blocks concurrently, however the blocks are not + * executed on ordinary worker threads but use a dedicated pool of pthreads not + * shared with the global queues or any other pthread root queues. + * + * NOTE: this is a special-purpose facility that should only be used in very + * limited circumstances, in almost all cases the global concurrent queues + * should be preferred. While this facility allows for more flexibility in + * configuring worker threads for special needs it comes at the cost of + * increased overall memory usage due to reduced thread sharing and higher + * latency in worker thread bringup. + * + * Dispatch pthread root queues do not support suspension, application context + * and change of width or of target queue. They can however be used as the + * target queue for serial or concurrent queues obtained via + * dispatch_queue_create() or dispatch_queue_create_with_target(), which + * enables the blocks submitted to those queues to be processed on the root + * queue's pthread pool. + * + * When a dispatch pthread root queue is no longer needed, it should be + * released with dispatch_release(). Existing worker pthreads and pending blocks + * submitted to the root queue will hold a reference to the queue so it will not + * be deallocated until all blocks have finished and worker threads exited. + * + * @param label + * A string label to attach to the queue. + * This parameter is optional and may be NULL. + * + * @param flags + * Reserved for future use. Passing any value other than zero may result in + * a NULL return value. + * + * @param attr + * Attributes passed to pthread_create(3) when creating worker pthreads. This + * parameter is copied and can be destroyed after this call returns. + * This parameter is optional and may be NULL. + * + * @param configure + * Configuration block called on newly created worker pthreads before any blocks + * for the root queue are executed. The block may configure the current thread + * as needed. + * This parameter is optional and may be NULL. + * + * @result + * The newly created dispatch pthread root queue. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NOTHROW +dispatch_queue_t +dispatch_pthread_root_queue_create(const char *label, unsigned long flags, + const pthread_attr_t *attr, dispatch_block_t configure); +#endif /* __BLOCKS__ */ + +/*! + * @constant DISPATCH_APPLY_CURRENT_ROOT_QUEUE + * @discussion Constant to pass to the dispatch_apply() and dispatch_apply_f() + * functions to indicate that the root queue for the current thread should be + * used (i.e. one of the global concurrent queues or a queue created with + * dispatch_pthread_root_queue_create()). If there is no such queue, the + * default priority global concurrent queue will be used. + */ +#define DISPATCH_APPLY_CURRENT_ROOT_QUEUE NULL +#if !TARGET_OS_WIN32 __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT const struct dispatch_queue_offsets_s { // always add new fields at the end @@ -124,22 +225,73 @@ DISPATCH_EXPORT const struct dispatch_queue_offsets_s { const uint16_t dqo_running; const uint16_t dqo_running_size; } dispatch_queue_offsets; +#endif /*! - * @function dispatch_flush_continuation_cache + * @function dispatch_assert_queue * * @abstract - * Flushes the current thread's cache of continuation objects, if any. + * Verifies that the current block is executing on a certain dispatch queue. * * @discussion - * Warning: this function is subject to change in a future release. - * Please contact the GCD team before using it in your code. + * Some code expects to be run on a specific dispatch queue. This function + * verifies that expectation for debugging. + * + * This function will only return if the currently executing block was submitted + * to the specified queue or to any queue targeting it (see + * dispatch_set_target_queue()). Otherwise, it logs an explanation to the system + * log, then terminates the application. + * + * When dispatch_assert_queue() is called outside of the context of a + * submitted block, its behavior is undefined. + * + * Passing the result of dispatch_get_main_queue() to this function verifies + * that the current block was submitted to the main queue or to a queue + * targeting it. + * IMPORTANT: this is NOT the same as verifying that the current block is + * executing on the main thread. + * + * The variant dispatch_assert_queue_debug() is compiled out when the + * preprocessor macro NDEBUG is defined. (See also assert(3)). + * + * @param queue + * The dispatch queue that the current block is expected to run on. + * The result of passing NULL in this parameter is undefined. */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 +void +dispatch_assert_queue(dispatch_queue_t queue); -__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_NA) -DISPATCH_EXPORT DISPATCH_NOTHROW +/*! + * @function dispatch_assert_queue_not + * + * @abstract + * Verifies that the current block is not executing on a certain dispatch queue. + * + * @discussion + * This function is the equivalent of dispatch_queue_assert() with the test for + * equality inverted. See discussion there. + * + * The variant dispatch_assert_queue_not_debug() is compiled out when the + * preprocessor macro NDEBUG is defined. (See also assert(3)). + * + * @param queue + * The dispatch queue that the current block is expected not to run on. + * The result of passing NULL in this parameter is undefined. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 void -dispatch_flush_continuation_cache(void); +dispatch_assert_queue_not(dispatch_queue_t queue); + +#ifdef NDEBUG +#define dispatch_assert_queue_debug(q) ((void)0) +#define dispatch_assert_queue_not_debug(q) ((void)0) +#else +#define dispatch_assert_queue_debug(q) dispatch_assert_queue(q) +#define dispatch_assert_queue_not_debug(q) dispatch_assert_queue_not(q) +#endif __END_DECLS diff --git a/private/source_private.h b/private/source_private.h index 8de730821..0f44e27e6 100644 --- a/private/source_private.h +++ b/private/source_private.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -32,6 +32,42 @@ #include // for HeaderDoc #endif +/*! + * @const DISPATCH_SOURCE_TYPE_TIMER_WITH_AGGREGATE + * @discussion A dispatch timer source that is part of a timer aggregate. + * The handle is the dispatch timer aggregate object. + * The mask specifies which flags from dispatch_source_timer_flags_t to apply. + */ +#define DISPATCH_SOURCE_TYPE_TIMER_WITH_AGGREGATE \ + (&_dispatch_source_type_timer_with_aggregate) +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_SOURCE_TYPE_DECL(timer_with_aggregate); + +/*! + * @const DISPATCH_SOURCE_TYPE_INTERVAL + * @discussion A dispatch source that submits the event handler block at a + * specified time interval, phase-aligned with all other interval sources on + * the system that have the same interval value. + * + * The initial submission of the event handler will occur at some point during + * the first time interval after the source is created (assuming the source is + * resumed at that time). + * + * By default, the unit for the interval value is milliseconds and the leeway + * (maximum amount of time any individual handler submission may be deferred to + * align with other system activity) for the source is fixed at interval/2. + * + * If the DISPATCH_INTERVAL_UI_ANIMATION flag is specified, the unit for the + * interval value is animation frames (1/60th of a second) and the leeway is + * fixed at one frame. + * + * The handle is the interval value in milliseconds or frames. + * The mask specifies which flags from dispatch_source_timer_flags_t to apply. + */ +#define DISPATCH_SOURCE_TYPE_INTERVAL (&_dispatch_source_type_interval) +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_SOURCE_TYPE_DECL(interval); + /*! * @const DISPATCH_SOURCE_TYPE_VFS * @discussion Apple-internal dispatch source that monitors for vfs events @@ -51,6 +87,17 @@ DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_vfs; __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_vm; +/*! + * @const DISPATCH_SOURCE_TYPE_MEMORYSTATUS + * @discussion A dispatch source that monitors memory status + * The mask is a mask of desired events from + * dispatch_source_memorystatus_flags_t. + */ +#define DISPATCH_SOURCE_TYPE_MEMORYSTATUS (&_dispatch_source_type_memorystatus) +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +DISPATCH_EXPORT const struct dispatch_source_type_s + _dispatch_source_type_memorystatus; + /*! * @const DISPATCH_SOURCE_TYPE_SOCK * @discussion A dispatch source that monitors events on socket state changes. @@ -89,6 +136,14 @@ DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_sock; * @constant DISPATCH_SOCK_KEEPALIVE * TCP Keepalive received * + * @constant DISPATCH_SOCK_CONNECTED + * Socket is connected + * + * @constant DISPATCH_SOCK_DISCONNECTED + * Socket is disconnected + * + * @constant DISPATCH_SOCK_CONNINFO_UPDATED + * Connection info was updated */ enum { DISPATCH_SOCK_CONNRESET = 0x00000001, @@ -100,6 +155,11 @@ enum { DISPATCH_SOCK_SUSPEND = 0x00000040, DISPATCH_SOCK_RESUME = 0x00000080, DISPATCH_SOCK_KEEPALIVE = 0x00000100, + DISPATCH_SOCK_ADAPTIVE_WTIMO = 0x00000200, + DISPATCH_SOCK_ADAPTIVE_RTIMO = 0x00000400, + DISPATCH_SOCK_CONNECTED = 0x00000800, + DISPATCH_SOCK_DISCONNECTED = 0x00001000, + DISPATCH_SOCK_CONNINFO_UPDATED = 0x00002000, }; /*! @@ -148,6 +208,24 @@ enum { DISPATCH_VFS_VERYLOWDISK = 0x0200, }; +/*! + * @enum dispatch_source_timer_flags_t + * + * @constant DISPATCH_TIMER_BACKGROUND + * Specifies that the timer is used to trigger low priority maintenance-level + * activity and that the system may apply larger minimum leeway values to the + * timer in order to align it with other system activity. + * + * @constant DISPATCH_INTERVAL_UI_ANIMATION + * Specifies that the interval source is used for UI animation. The unit for + * the interval value of such sources is frames (1/60th of a second) and the + * leeway is fixed at one frame. + */ +enum { + DISPATCH_TIMER_BACKGROUND = 0x2, + DISPATCH_INTERVAL_UI_ANIMATION = 0x20, +}; + /*! * @enum dispatch_source_mach_send_flags_t * @@ -168,11 +246,12 @@ enum { * @enum dispatch_source_proc_flags_t * * @constant DISPATCH_PROC_REAP - * The process has been reaped by the parent process via - * wait*(). + * The process has been reaped by the parent process via wait*(). + * This flag is deprecated and will be removed in a future release. */ enum { - DISPATCH_PROC_REAP = 0x10000000, + DISPATCH_PROC_REAP __OSX_AVAILABLE_BUT_DEPRECATED( + __MAC_10_6, __MAC_10_9, __IPHONE_4_0, __IPHONE_7_0) = 0x10000000, }; /*! @@ -186,12 +265,83 @@ enum { DISPATCH_VM_PRESSURE = 0x80000000, }; +/*! + * @enum dispatch_source_memorystatus_flags_t + * + * @constant DISPATCH_MEMORYSTATUS_PRESSURE_NORMAL + * The system's memory pressure state has returned to normal. + * @constant DISPATCH_MEMORYSTATUS_PRESSURE_WARN + * The system's memory pressure state has changed to warning. + * @constant DISPATCH_MEMORYSTATUS_PRESSURE_CRITICAL + * The system's memory pressure state has changed to critical. + */ + +enum { + DISPATCH_MEMORYSTATUS_PRESSURE_NORMAL = 0x01, + DISPATCH_MEMORYSTATUS_PRESSURE_WARN = 0x02, +#if !TARGET_OS_EMBEDDED + DISPATCH_MEMORYSTATUS_PRESSURE_CRITICAL = 0x04, +#endif +}; + #if TARGET_IPHONE_SIMULATOR // rdar://problem/9219483 #define DISPATCH_VM_PRESSURE DISPATCH_VNODE_ATTRIB #endif __BEGIN_DECLS +/*! + * @typedef dispatch_timer_aggregate_t + * + * @abstract + * Dispatch timer aggregates are sets of related timers. + */ +DISPATCH_DECL(dispatch_timer_aggregate); + +/*! + * @function dispatch_timer_aggregate_create + * + * @abstract + * Creates a new dispatch timer aggregate. + * + * @discussion + * A dispatch timer aggregate is a set of related timers whose overall timing + * parameters can be queried. + * + * Timers are added to an aggregate when a timer source is created with type + * DISPATCH_SOURCE_TYPE_TIMER_WITH_AGGREGATE. + * + * @result + * The newly created dispatch timer aggregate. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NOTHROW +dispatch_timer_aggregate_t +dispatch_timer_aggregate_create(void); + +/*! + * @function dispatch_timer_aggregate_get_delay + * + * @abstract + * Retrieves the delay until a timer in the given aggregate will next fire. + * + * @param aggregate + * The dispatch timer aggregate to query. + * + * @param leeway_ptr + * Optional pointer to a variable filled with the leeway (in ns) that will be + * applied to the return value. May be NULL. + * + * @result + * Delay in ns from now. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT DISPATCH_NOTHROW +uint64_t +dispatch_timer_aggregate_get_delay(dispatch_timer_aggregate_t aggregate, + uint64_t *leeway_ptr); + #if TARGET_OS_MAC /*! * @typedef dispatch_mig_callback_t @@ -207,6 +357,18 @@ DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW mach_msg_return_t dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, dispatch_mig_callback_t callback); + +/*! + * @function dispatch_mach_msg_get_context + * + * @abstract + * Extract the context pointer from a mach message trailer. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NONNULL_ALL +DISPATCH_NOTHROW +void * +dispatch_mach_msg_get_context(mach_msg_header_t *msg); #endif __END_DECLS diff --git a/resolver/resolved.h b/resolver/resolved.h index bb9a82d59..a481a2033 100644 --- a/resolver/resolved.h +++ b/resolver/resolved.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010-2011 Apple Inc. All rights reserved. + * Copyright (c) 2010-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * diff --git a/resolver/resolver.c b/resolver/resolver.c index 8b390b4a4..9afc893d5 100644 --- a/resolver/resolver.c +++ b/resolver/resolver.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010 Apple Inc. All rights reserved. + * Copyright (c) 2010-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * diff --git a/src/Makefile.am b/src/Makefile.am index 1af748c52..630a4806d 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -16,6 +16,7 @@ libdispatch_la_SOURCES= \ semaphore.c \ source.c \ time.c \ + transform.c \ protocol.defs \ provider.d \ data_internal.h \ @@ -30,24 +31,34 @@ libdispatch_la_SOURCES= \ shims/atomic.h \ shims/getprogname.h \ shims/hw_config.h \ - shims/malloc_zone.h \ shims/perfmon.h \ shims/time.h \ shims/tsd.h -INCLUDES=-I$(top_builddir) -I$(top_srcdir) -I$(top_srcdir)/private +AM_CPPFLAGS=-I$(top_builddir) -I$(top_srcdir) \ + -I$(top_srcdir)/private -I$(top_srcdir)/os -libdispatch_la_CFLAGS=-Wall $(VISIBILITY_FLAGS) $(OMIT_LEAF_FP_FLAGS) -libdispatch_la_CFLAGS+=$(MARCH_FLAGS) $(CBLOCKS_FLAGS) $(KQUEUE_CFLAGS) +AM_CFLAGS=-Wall $(VISIBILITY_FLAGS) $(OMIT_LEAF_FP_FLAGS) \ + $(MARCH_FLAGS) $(CBLOCKS_FLAGS) $(KQUEUE_CFLAGS) libdispatch_la_LDFLAGS=-avoid-version if HAVE_DARWIN_LD -libdispatch_la_LDFLAGS+=-Wl,-compatibility_version,1 -Wl,-current_version,$(VERSION) +libdispatch_la_LDFLAGS+=-Wl,-compatibility_version,1 \ + -Wl,-current_version,$(VERSION) -Wl,-dead_strip +endif + +if USE_OBJC +libdispatch_la_SOURCES+=object.m +libdispatch_la_OBJCFLAGS=$(AM_CFLAGS) -fobjc-gc +libdispatch_la_LDFLAGS+=-Wl,-upward-lobjc -Wl,-upward-lauto \ + -Wl,-order_file,$(top_srcdir)/xcodeconfig/libdispatch.order \ + -Wl,-alias_list,$(top_srcdir)/xcodeconfig/libdispatch.aliases \ + -Wl,-unexported_symbols_list,$(top_srcdir)/xcodeconfig/libdispatch.unexport endif CLEANFILES= -DISTCLEANFILES=System +DISTCLEANFILES=System objc if USE_MIG BUILT_SOURCES= \ diff --git a/src/allocator.c b/src/allocator.c new file mode 100644 index 000000000..7b4c16529 --- /dev/null +++ b/src/allocator.c @@ -0,0 +1,764 @@ +/* + * Copyright (c) 2012-2013 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include "internal.h" +#include "allocator_internal.h" + +#if DISPATCH_ALLOCATOR + +#ifndef VM_MEMORY_LIBDISPATCH +#define VM_MEMORY_LIBDISPATCH 74 +#endif + +// _dispatch_main_heap is is the first heap in the linked list, where searches +// always begin. +// +// _dispatch_main_heap, and dh_next, are read normally but only written (in +// try_create_heap) by cmpxchg. They start life at 0, and are only written +// once to non-zero. They are not marked volatile. There is a small risk that +// some thread may see a stale 0 value and enter try_create_heap. It will +// waste some time in an allocate syscall, but eventually it will try to +// cmpxchg, expecting to overwite 0 with an address. This will fail +// (because another thread already did this), the thread will deallocate the +// unused allocated memory, and continue with the new value. +// +// If something goes wrong here, the symptom would be a NULL dereference +// in alloc_continuation_from_heap or _magazine when derefing the magazine ptr. +static dispatch_heap_t _dispatch_main_heap; + +DISPATCH_ALWAYS_INLINE +static void +set_last_found_page(bitmap_t *val) +{ + dispatch_assert(_dispatch_main_heap); + unsigned int cpu = _dispatch_cpu_number(); + _dispatch_main_heap[cpu].header.last_found_page = val; +} + +DISPATCH_ALWAYS_INLINE +static bitmap_t * +last_found_page(void) +{ + dispatch_assert(_dispatch_main_heap); + unsigned int cpu = _dispatch_cpu_number(); + return _dispatch_main_heap[cpu].header.last_found_page; +} + +#pragma mark - +#pragma mark dispatch_alloc_bitmaps + +DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST +static bitmap_t * +supermap_address(struct dispatch_magazine_s *magazine, unsigned int supermap) +{ + return &magazine->supermaps[supermap]; +} + +DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST +static bitmap_t * +bitmap_address(struct dispatch_magazine_s *magazine, unsigned int supermap, + unsigned int map) +{ + return &magazine->maps[supermap][map]; +} + +DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST +static dispatch_continuation_t +continuation_address(struct dispatch_magazine_s *magazine, + unsigned int supermap, unsigned int map, unsigned int index) +{ +#if DISPATCH_DEBUG + dispatch_assert(supermap < SUPERMAPS_PER_MAGAZINE); + dispatch_assert(map < BITMAPS_PER_SUPERMAP); + dispatch_assert(index < CONTINUATIONS_PER_BITMAP); +#endif + return (dispatch_continuation_t)&magazine->conts[supermap][map][index]; +} + +DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST +static struct dispatch_magazine_s * +magazine_for_continuation(dispatch_continuation_t c) +{ + return (struct dispatch_magazine_s *)((uintptr_t)c & MAGAZINE_MASK); +} + +DISPATCH_ALWAYS_INLINE_NDEBUG +static void +get_cont_and_indices_for_bitmap_and_index(bitmap_t *bitmap, + unsigned int index, dispatch_continuation_t *continuation_out, + bitmap_t **supermap_out, unsigned int *bitmap_index_out) +{ + // m_for_c wants a continuation not a bitmap, but it works because it + // just masks off the bottom bits of the address. + struct dispatch_magazine_s *m = magazine_for_continuation((void *)bitmap); + unsigned int mindex = (unsigned int)(bitmap - m->maps[0]); + unsigned int bindex = mindex % BITMAPS_PER_SUPERMAP; + unsigned int sindex = mindex / BITMAPS_PER_SUPERMAP; + dispatch_assert(&m->maps[sindex][bindex] == bitmap); + if (fastpath(continuation_out)) { + *continuation_out = continuation_address(m, sindex, bindex, index); + } + if (fastpath(supermap_out)) *supermap_out = supermap_address(m, sindex); + if (fastpath(bitmap_index_out)) *bitmap_index_out = bindex; +} + +DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST +static bool +continuation_is_in_first_page(dispatch_continuation_t c) +{ +#if PACK_FIRST_PAGE_WITH_CONTINUATIONS + // (the base of c's magazine == the base of c's page) + // => c is in first page of magazine + return (((uintptr_t)c & MAGAZINE_MASK) == + ((uintptr_t)c & ~(uintptr_t)PAGE_MASK)); +#else + (void)c; + return false; +#endif +} + +DISPATCH_ALWAYS_INLINE_NDEBUG +static void +get_maps_and_indices_for_continuation(dispatch_continuation_t c, + bitmap_t **supermap_out, unsigned int *bitmap_index_out, + bitmap_t **bitmap_out, unsigned int *index_out) +{ + unsigned int cindex, sindex, index, mindex; + padded_continuation *p = (padded_continuation *)c; + struct dispatch_magazine_s *m = magazine_for_continuation(c); +#if PACK_FIRST_PAGE_WITH_CONTINUATIONS + if (fastpath(continuation_is_in_first_page(c))) { + cindex = (unsigned int)(p - m->fp_conts); + index = cindex % CONTINUATIONS_PER_BITMAP; + mindex = cindex / CONTINUATIONS_PER_BITMAP; + if (fastpath(supermap_out)) *supermap_out = NULL; + if (fastpath(bitmap_index_out)) *bitmap_index_out = mindex; + if (fastpath(bitmap_out)) *bitmap_out = &m->fp_maps[mindex]; + if (fastpath(index_out)) *index_out = index; + return; + } +#endif // PACK_FIRST_PAGE_WITH_CONTINUATIONS + cindex = (unsigned int)(p - (padded_continuation *)m->conts); + sindex = cindex / (BITMAPS_PER_SUPERMAP * CONTINUATIONS_PER_BITMAP); + mindex = (cindex / CONTINUATIONS_PER_BITMAP) % BITMAPS_PER_SUPERMAP; + index = cindex % CONTINUATIONS_PER_BITMAP; + if (fastpath(supermap_out)) *supermap_out = &m->supermaps[sindex]; + if (fastpath(bitmap_index_out)) *bitmap_index_out = mindex; + if (fastpath(bitmap_out)) *bitmap_out = &m->maps[sindex][mindex]; + if (fastpath(index_out)) *index_out = index; +} + +// Base address of page, or NULL if this page shouldn't be madvise()d +DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST +static void * +madvisable_page_base_for_continuation(dispatch_continuation_t c) +{ + if (fastpath(continuation_is_in_first_page(c))) { + return NULL; + } + void *page_base = (void *)((uintptr_t)c & ~(uintptr_t)PAGE_MASK); +#if DISPATCH_DEBUG + struct dispatch_magazine_s *m = magazine_for_continuation(c); + if (slowpath(page_base < (void *)&m->conts)) { + DISPATCH_CRASH("madvisable continuation too low"); + } + if (slowpath(page_base > (void *)&m->conts[SUPERMAPS_PER_MAGAZINE-1] + [BITMAPS_PER_SUPERMAP-1][CONTINUATIONS_PER_BITMAP-1])) { + DISPATCH_CRASH("madvisable continuation too high"); + } +#endif + return page_base; +} + +// Bitmap that controls the first few continuations in the same page as +// the continuations controlled by the passed bitmap. Undefined results if the +// passed bitmap controls continuations in the first page. +DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST +static bitmap_t * +first_bitmap_in_same_page(bitmap_t *b) +{ +#if DISPATCH_DEBUG + struct dispatch_magazine_s *m; + m = magazine_for_continuation((void*)b); + dispatch_assert(b >= &m->maps[0][0]); + dispatch_assert(b < &m->maps[SUPERMAPS_PER_MAGAZINE] + [BITMAPS_PER_SUPERMAP]); +#endif + const uintptr_t PAGE_BITMAP_MASK = (BITMAPS_PER_PAGE * + BYTES_PER_BITMAP) - 1; + return (bitmap_t *)((uintptr_t)b & ~PAGE_BITMAP_MASK); +} + +DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST +static bool +bitmap_is_full(bitmap_t bits) +{ + return (bits == BITMAP_ALL_ONES); +} + +#define NO_BITS_WERE_UNSET (UINT_MAX) + +// max_index is the 0-based position of the most significant bit that is +// allowed to be set. +DISPATCH_ALWAYS_INLINE_NDEBUG +static unsigned int +bitmap_set_first_unset_bit_upto_index(volatile bitmap_t *bitmap, + unsigned int max_index) +{ + // No barriers needed in acquire path: the just-allocated + // continuation is "uninitialized", so the caller shouldn't + // load from it before storing, so we don't need to guard + // against reordering those loads. +#if defined(__x86_64__) // TODO rdar://problem/11477843 + dispatch_assert(sizeof(*bitmap) == sizeof(uint64_t)); + return dispatch_atomic_set_first_bit((volatile uint64_t *)bitmap,max_index); +#else + dispatch_assert(sizeof(*bitmap) == sizeof(uint32_t)); + return dispatch_atomic_set_first_bit((volatile uint32_t *)bitmap,max_index); +#endif +} + +DISPATCH_ALWAYS_INLINE +static unsigned int +bitmap_set_first_unset_bit(volatile bitmap_t *bitmap) +{ + return bitmap_set_first_unset_bit_upto_index(bitmap, UINT_MAX); +} + +#define CLEAR_EXCLUSIVELY true +#define CLEAR_NONEXCLUSIVELY false + +// Return true if this bit was the last in the bitmap, and it is now all zeroes +DISPATCH_ALWAYS_INLINE_NDEBUG +static bool +bitmap_clear_bit(volatile bitmap_t *bitmap, unsigned int index, + bool exclusively) +{ +#if DISPATCH_DEBUG + dispatch_assert(index < CONTINUATIONS_PER_BITMAP); +#endif + const bitmap_t mask = BITMAP_C(1) << index; + bitmap_t b; + + b = *bitmap; + if (exclusively == CLEAR_EXCLUSIVELY) { + if (slowpath((b & mask) == 0)) { + DISPATCH_CRASH("Corruption: failed to clear bit exclusively"); + } + } + + // and-and-fetch + b = dispatch_atomic_and(bitmap, ~mask, release); + return b == 0; +} + +DISPATCH_ALWAYS_INLINE_NDEBUG +static void +mark_bitmap_as_full_if_still_full(volatile bitmap_t *supermap, + unsigned int bitmap_index, volatile bitmap_t *bitmap) +{ +#if DISPATCH_DEBUG + dispatch_assert(bitmap_index < BITMAPS_PER_SUPERMAP); +#endif + const bitmap_t mask = BITMAP_C(1) << bitmap_index; + bitmap_t s, s_new, s_masked; + + if (!bitmap_is_full(*bitmap)) { + return; + } + s_new = *supermap; + for (;;) { + // No barriers because supermaps are only advisory, they + // don't protect access to other memory. + s = s_new; + s_masked = s | mask; + if (dispatch_atomic_cmpxchgvw(supermap, s, s_masked, &s_new, relaxed) || + !bitmap_is_full(*bitmap)) { + return; + } + } +} + +#pragma mark - +#pragma mark dispatch_alloc_continuation_alloc + +#if PACK_FIRST_PAGE_WITH_CONTINUATIONS +DISPATCH_ALWAYS_INLINE_NDEBUG +static dispatch_continuation_t +alloc_continuation_from_first_page(struct dispatch_magazine_s *magazine) +{ + unsigned int i, index, continuation_index; + + // TODO: unroll if this is hot? + for (i = 0; i < FULL_BITMAPS_IN_FIRST_PAGE; i++) { + index = bitmap_set_first_unset_bit(&magazine->fp_maps[i]); + if (fastpath(index != NO_BITS_WERE_UNSET)) goto found; + } + if (REMAINDERED_CONTINUATIONS_IN_FIRST_PAGE) { + index = bitmap_set_first_unset_bit_upto_index(&magazine->fp_maps[i], + REMAINDERED_CONTINUATIONS_IN_FIRST_PAGE - 1); + if (fastpath(index != NO_BITS_WERE_UNSET)) goto found; + } + return NULL; + +found: + continuation_index = (i * CONTINUATIONS_PER_BITMAP) + index; + return (dispatch_continuation_t)&magazine->fp_conts[continuation_index]; +} +#endif // PACK_FIRST_PAGE_WITH_CONTINUATIONS + +DISPATCH_ALWAYS_INLINE_NDEBUG +static dispatch_continuation_t +alloc_continuation_from_magazine(struct dispatch_magazine_s *magazine) +{ + unsigned int s, b, index; + + for (s = 0; s < SUPERMAPS_PER_MAGAZINE; s++) { + volatile bitmap_t *supermap = supermap_address(magazine, s); + if (bitmap_is_full(*supermap)) { + continue; + } + for (b = 0; b < BITMAPS_PER_SUPERMAP; b++) { + volatile bitmap_t *bitmap = bitmap_address(magazine, s, b); + index = bitmap_set_first_unset_bit(bitmap); + if (index != NO_BITS_WERE_UNSET) { + set_last_found_page( + first_bitmap_in_same_page((bitmap_t *)bitmap)); + mark_bitmap_as_full_if_still_full(supermap, b, bitmap); + return continuation_address(magazine, s, b, index); + } + } + } + return NULL; +} + +DISPATCH_NOINLINE +static void +_dispatch_alloc_try_create_heap(dispatch_heap_t *heap_ptr) +{ +#if HAVE_MACH + kern_return_t kr; + mach_vm_size_t vm_size = MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE; + mach_vm_offset_t vm_mask = ~MAGAZINE_MASK; + mach_vm_address_t vm_addr = vm_page_size; + while (slowpath(kr = mach_vm_map(mach_task_self(), &vm_addr, vm_size, + vm_mask, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_LIBDISPATCH), + MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL, + VM_INHERIT_DEFAULT))) { + if (kr != KERN_NO_SPACE) { + (void)dispatch_assume_zero(kr); + DISPATCH_CLIENT_CRASH("Could not allocate heap"); + } + _dispatch_temporary_resource_shortage(); + vm_addr = vm_page_size; + } + uintptr_t aligned_region = (uintptr_t)vm_addr; +#else // HAVE_MACH + const size_t region_sz = (1 + MAGAZINES_PER_HEAP) * BYTES_PER_MAGAZINE; + void *region_p; + while (!dispatch_assume((region_p = mmap(NULL, region_sz, + PROT_READ|PROT_WRITE, MAP_ANON | MAP_PRIVATE, + VM_MAKE_TAG(VM_MEMORY_LIBDISPATCH), 0)) != MAP_FAILED)) { + _dispatch_temporary_resource_shortage(); + } + uintptr_t region = (uintptr_t)region_p; + uintptr_t region_end = region + region_sz; + uintptr_t aligned_region, aligned_region_end; + uintptr_t bottom_slop_len, top_slop_len; + // Realign if needed; find the slop at top/bottom to unmap + if ((region & ~(MAGAZINE_MASK)) == 0) { + bottom_slop_len = 0; + aligned_region = region; + aligned_region_end = region_end - BYTES_PER_MAGAZINE; + top_slop_len = BYTES_PER_MAGAZINE; + } else { + aligned_region = (region & MAGAZINE_MASK) + BYTES_PER_MAGAZINE; + aligned_region_end = aligned_region + + (MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE); + bottom_slop_len = aligned_region - region; + top_slop_len = BYTES_PER_MAGAZINE - bottom_slop_len; + } +#if DISPATCH_DEBUG + // Double-check our math. + dispatch_assert(aligned_region % PAGE_SIZE == 0); + dispatch_assert(aligned_region_end % PAGE_SIZE == 0); + dispatch_assert(aligned_region_end > aligned_region); + dispatch_assert(top_slop_len % PAGE_SIZE == 0); + dispatch_assert(bottom_slop_len % PAGE_SIZE == 0); + dispatch_assert(aligned_region_end + top_slop_len == region_end); + dispatch_assert(region + bottom_slop_len == aligned_region); + dispatch_assert(region_sz == bottom_slop_len + top_slop_len + + MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE); + if (bottom_slop_len) { + (void)dispatch_assume_zero(mprotect((void *)region, bottom_slop_len, + PROT_NONE)); + } + if (top_slop_len) { + (void)dispatch_assume_zero(mprotect((void *)aligned_region_end, + top_slop_len, PROT_NONE)); + } +#else + if (bottom_slop_len) { + (void)dispatch_assume_zero(munmap((void *)region, bottom_slop_len)); + } + if (top_slop_len) { + (void)dispatch_assume_zero(munmap((void *)aligned_region_end, + top_slop_len)); + } +#endif // DISPATCH_DEBUG +#endif // HAVE_MACH + + if (!dispatch_atomic_cmpxchg(heap_ptr, NULL, (void *)aligned_region, + relaxed)) { + // If we lost the race to link in the new region, unmap the whole thing. +#if DISPATCH_DEBUG + (void)dispatch_assume_zero(mprotect((void *)aligned_region, + MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE, PROT_NONE)); +#else + (void)dispatch_assume_zero(munmap((void *)aligned_region, + MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE)); +#endif + } +} + +DISPATCH_NOINLINE +static dispatch_continuation_t +_dispatch_alloc_continuation_from_heap(dispatch_heap_t heap) +{ + dispatch_continuation_t cont; + + unsigned int cpu_number = _dispatch_cpu_number(); +#ifdef DISPATCH_DEBUG + dispatch_assert(cpu_number < NUM_CPU); +#endif + +#if PACK_FIRST_PAGE_WITH_CONTINUATIONS + // First try the continuations in the first page for this CPU + cont = alloc_continuation_from_first_page(&(heap[cpu_number])); + if (fastpath(cont)) { + return cont; + } +#endif + // Next, try the rest of the magazine for this CPU + cont = alloc_continuation_from_magazine(&(heap[cpu_number])); + return cont; +} + +DISPATCH_NOINLINE +static dispatch_continuation_t +_dispatch_alloc_continuation_from_heap_slow(void) +{ + dispatch_heap_t *heap = &_dispatch_main_heap; + dispatch_continuation_t cont; + + for (;;) { + if (!fastpath(*heap)) { + _dispatch_alloc_try_create_heap(heap); + } + cont = _dispatch_alloc_continuation_from_heap(*heap); + if (fastpath(cont)) { + return cont; + } + // If we have tuned our parameters right, 99.999% of apps should + // never reach this point! The ones that do have gone off the rails... + // + // Magazine is full? Onto the next heap! + // We tried 'stealing' from other CPUs' magazines. The net effect + // was worse performance from more wasted search time and more + // cache contention. + + // rdar://11378331 + // Future optimization: start at the page we last used, start + // in the *zone* we last used. But this would only improve deeply + // pathological cases like dispatch_starfish + heap = &(*heap)->header.dh_next; + } +} + +DISPATCH_ALLOC_NOINLINE +static dispatch_continuation_t +_dispatch_alloc_continuation_alloc(void) +{ + dispatch_continuation_t cont; + + if (fastpath(_dispatch_main_heap)) { + // Start looking in the same page where we found a continuation + // last time. + bitmap_t *last = last_found_page(); + if (fastpath(last)) { + unsigned int i; + for (i = 0; i < BITMAPS_PER_PAGE; i++) { + bitmap_t *cur = last + i; + unsigned int index = bitmap_set_first_unset_bit(cur); + if (fastpath(index != NO_BITS_WERE_UNSET)) { + bitmap_t *supermap; + unsigned int bindex; + get_cont_and_indices_for_bitmap_and_index(cur, + index, &cont, &supermap, &bindex); + mark_bitmap_as_full_if_still_full(supermap, bindex, + cur); + return cont; + } + } + } + + cont = _dispatch_alloc_continuation_from_heap(_dispatch_main_heap); + if (fastpath(cont)) { + return cont; + } + } + return _dispatch_alloc_continuation_from_heap_slow(); +} + +#pragma mark - +#pragma mark dispatch_alloc_continuation_free + +DISPATCH_NOINLINE +static void +_dispatch_alloc_maybe_madvise_page(dispatch_continuation_t c) +{ + void *page = madvisable_page_base_for_continuation(c); + if (!page) { + // page can't be madvised; maybe it contains non-continuations + return; + } + // Are all the continuations in this page unallocated? + volatile bitmap_t *page_bitmaps; + get_maps_and_indices_for_continuation((dispatch_continuation_t)page, NULL, + NULL, (bitmap_t **)&page_bitmaps, NULL); + unsigned int i; + for (i = 0; i < BITMAPS_PER_PAGE; i++) { + if (page_bitmaps[i] != 0) { + return; + } + } + // They are all unallocated, so we could madvise the page. Try to + // take ownership of them all. + int last_locked = 0; + do { + if (!dispatch_atomic_cmpxchg(&page_bitmaps[last_locked], BITMAP_C(0), + BITMAP_ALL_ONES, relaxed)) { + // We didn't get one; since there is a cont allocated in + // the page, we can't madvise. Give up and unlock all. + goto unlock; + } + } while (++last_locked < (signed)BITMAPS_PER_PAGE); +#if DISPATCH_DEBUG + //fprintf(stderr, "%s: madvised page %p for cont %p (next = %p), " + // "[%u+1]=%u bitmaps at %p\n", __func__, page, c, c->do_next, + // last_locked-1, BITMAPS_PER_PAGE, &page_bitmaps[0]); + // Scribble to expose use-after-free bugs + // madvise (syscall) flushes these stores + memset(page, DISPATCH_ALLOCATOR_SCRIBBLE, PAGE_SIZE); +#endif + (void)dispatch_assume_zero(madvise(page, PAGE_SIZE, MADV_FREE)); + +unlock: + while (last_locked > 1) { + page_bitmaps[--last_locked] = BITMAP_C(0); + } + if (last_locked) { + dispatch_atomic_store(&page_bitmaps[0], BITMAP_C(0), relaxed); + } + return; +} + +DISPATCH_ALLOC_NOINLINE +static void +_dispatch_alloc_continuation_free(dispatch_continuation_t c) +{ + bitmap_t *b, *s; + unsigned int b_idx, idx; + + get_maps_and_indices_for_continuation(c, &s, &b_idx, &b, &idx); + bool bitmap_now_empty = bitmap_clear_bit(b, idx, CLEAR_EXCLUSIVELY); + if (slowpath(s)) { + (void)bitmap_clear_bit(s, b_idx, CLEAR_NONEXCLUSIVELY); + } + // We only try to madvise(2) pages outside of the first page. + // (Allocations in the first page do not have a supermap entry.) + if (slowpath(bitmap_now_empty) && slowpath(s)) { + return _dispatch_alloc_maybe_madvise_page(c); + } +} + +#pragma mark - +#pragma mark dispatch_alloc_init + +#if DISPATCH_DEBUG +static void +_dispatch_alloc_init(void) +{ + // Double-check our math. These are all compile time checks and don't + // generate code. + + dispatch_assert(sizeof(bitmap_t) == BYTES_PER_BITMAP); + dispatch_assert(sizeof(bitmap_t) == BYTES_PER_SUPERMAP); + dispatch_assert(sizeof(struct dispatch_magazine_header_s) == + SIZEOF_HEADER); + + dispatch_assert(sizeof(struct dispatch_continuation_s) <= + DISPATCH_CONTINUATION_SIZE); + + // Magazines should be the right size, so they pack neatly into an array of + // heaps. + dispatch_assert(sizeof(struct dispatch_magazine_s) == BYTES_PER_MAGAZINE); + + // The header and maps sizes should match what we computed. + dispatch_assert(SIZEOF_HEADER == + sizeof(((struct dispatch_magazine_s *)0x0)->header)); + dispatch_assert(SIZEOF_MAPS == + sizeof(((struct dispatch_magazine_s *)0x0)->maps)); + + // The main array of continuations should start at the second page, + // self-aligned. + dispatch_assert(offsetof(struct dispatch_magazine_s, conts) % + (CONTINUATIONS_PER_BITMAP * DISPATCH_CONTINUATION_SIZE) == 0); + dispatch_assert(offsetof(struct dispatch_magazine_s, conts) == PAGE_SIZE); + +#if PACK_FIRST_PAGE_WITH_CONTINUATIONS + // The continuations in the first page should actually fit within the first + // page. + dispatch_assert(offsetof(struct dispatch_magazine_s, fp_conts) < PAGE_SIZE); + dispatch_assert(offsetof(struct dispatch_magazine_s, fp_conts) % + DISPATCH_CONTINUATION_SIZE == 0); + dispatch_assert(offsetof(struct dispatch_magazine_s, fp_conts) + + sizeof(((struct dispatch_magazine_s *)0x0)->fp_conts) == PAGE_SIZE); +#endif // PACK_FIRST_PAGE_WITH_CONTINUATIONS +} +#else +static inline void _dispatch_alloc_init(void) {} +#endif + +#endif // DISPATCH_ALLOCATOR + +#pragma mark - +#pragma mark dispatch_malloc + +#if DISPATCH_CONTINUATION_MALLOC + +#if DISPATCH_USE_MALLOCZONE +static malloc_zone_t *_dispatch_ccache_zone; + +#define calloc(n, s) malloc_zone_calloc(_dispatch_ccache_zone, (n), (s)) +#define free(c) malloc_zone_free(_dispatch_ccache_zone, (c)) + +static void +_dispatch_malloc_init(void) +{ + _dispatch_ccache_zone = malloc_create_zone(0, 0); + dispatch_assert(_dispatch_ccache_zone); + malloc_set_zone_name(_dispatch_ccache_zone, "DispatchContinuations"); +} +#else +static inline void _dispatch_malloc_init(void) {} +#endif // DISPATCH_USE_MALLOCZONE + +static dispatch_continuation_t +_dispatch_malloc_continuation_alloc(void) +{ + dispatch_continuation_t dc; + while (!(dc = fastpath(calloc(1, + ROUND_UP_TO_CACHELINE_SIZE(sizeof(*dc)))))) { + _dispatch_temporary_resource_shortage(); + } + return dc; +} + +static inline void +_dispatch_malloc_continuation_free(dispatch_continuation_t c) +{ + free(c); +} +#endif // DISPATCH_CONTINUATION_MALLOC + +#pragma mark - +#pragma mark dispatch_continuation_alloc + +#if DISPATCH_ALLOCATOR +#if DISPATCH_CONTINUATION_MALLOC +#if DISPATCH_USE_NANOZONE +extern boolean_t malloc_engaged_nano(void); +#else +#define malloc_engaged_nano() false +#endif // DISPATCH_USE_NANOZONE +static int _dispatch_use_dispatch_alloc; +#else +#define _dispatch_use_dispatch_alloc 1 +#endif // DISPATCH_CONTINUATION_MALLOC +#endif // DISPATCH_ALLOCATOR + +#if (DISPATCH_ALLOCATOR && (DISPATCH_CONTINUATION_MALLOC || DISPATCH_DEBUG)) \ + || (DISPATCH_CONTINUATION_MALLOC && DISPATCH_USE_MALLOCZONE) +static void +_dispatch_continuation_alloc_init(void *ctxt DISPATCH_UNUSED) +{ +#if DISPATCH_ALLOCATOR +#if DISPATCH_CONTINUATION_MALLOC + bool use_dispatch_alloc = !malloc_engaged_nano(); + char *e = getenv("LIBDISPATCH_CONTINUATION_ALLOCATOR"); + if (e) { + use_dispatch_alloc = atoi(e); + } + _dispatch_use_dispatch_alloc = use_dispatch_alloc; +#endif // DISPATCH_CONTINUATION_MALLOC + if (_dispatch_use_dispatch_alloc) + return _dispatch_alloc_init(); +#endif // DISPATCH_ALLOCATOR +#if DISPATCH_CONTINUATION_MALLOC + return _dispatch_malloc_init(); +#endif // DISPATCH_ALLOCATOR +} + +static void +_dispatch_continuation_alloc_once() +{ + static dispatch_once_t pred; + dispatch_once_f(&pred, NULL, _dispatch_continuation_alloc_init); +} +#else +static inline void _dispatch_continuation_alloc_once(void) {} +#endif // DISPATCH_ALLOCATOR ... || DISPATCH_CONTINUATION_MALLOC ... + +dispatch_continuation_t +_dispatch_continuation_alloc_from_heap(void) +{ + _dispatch_continuation_alloc_once(); +#if DISPATCH_ALLOCATOR + if (_dispatch_use_dispatch_alloc) + return _dispatch_alloc_continuation_alloc(); +#endif +#if DISPATCH_CONTINUATION_MALLOC + return _dispatch_malloc_continuation_alloc(); +#endif +} + +void +_dispatch_continuation_free_to_heap(dispatch_continuation_t c) +{ +#if DISPATCH_ALLOCATOR + if (_dispatch_use_dispatch_alloc) + return _dispatch_alloc_continuation_free(c); +#endif +#if DISPATCH_CONTINUATION_MALLOC + return _dispatch_malloc_continuation_free(c); +#endif +} + diff --git a/src/allocator_internal.h b/src/allocator_internal.h new file mode 100644 index 000000000..5f223f65f --- /dev/null +++ b/src/allocator_internal.h @@ -0,0 +1,268 @@ +/* + * Copyright (c) 2012-2013 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_ALLOCATOR_INTERNAL__ +#define __DISPATCH_ALLOCATOR_INTERNAL__ + +#ifndef DISPATCH_ALLOCATOR +#if TARGET_OS_MAC && (defined(__LP64__) || TARGET_OS_EMBEDDED) +#define DISPATCH_ALLOCATOR 1 +#endif +#endif + +#if TARGET_IPHONE_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090 +#undef DISPATCH_USE_NANOZONE +#define DISPATCH_USE_NANOZONE 0 +#endif +#ifndef DISPATCH_USE_NANOZONE +#if TARGET_OS_MAC && defined(__LP64__) && \ + (__MAC_OS_X_VERSION_MIN_REQUIRED >= 1090 || \ + __IPHONE_OS_VERSION_MIN_REQUIRED >= 70000) +#define DISPATCH_USE_NANOZONE 1 +#endif +#endif + +#ifndef DISPATCH_USE_MALLOCZONE +#if (TARGET_OS_MAC && !DISPATCH_USE_NANOZONE) || \ + (!TARGET_OS_MAC && HAVE_MALLOC_CREATE_ZONE) +#define DISPATCH_USE_MALLOCZONE 1 +#endif +#endif + +#ifndef DISPATCH_CONTINUATION_MALLOC +#if DISPATCH_USE_NANOZONE || !DISPATCH_ALLOCATOR +#define DISPATCH_CONTINUATION_MALLOC 1 +#endif +#endif + +#if !DISPATCH_ALLOCATOR && !DISPATCH_CONTINUATION_MALLOC +#error Invalid allocator configuration +#endif + +#if DISPATCH_ALLOCATOR && DISPATCH_CONTINUATION_MALLOC +#define DISPATCH_ALLOC_NOINLINE DISPATCH_NOINLINE +#else +#define DISPATCH_ALLOC_NOINLINE +#endif + +#pragma mark - +#pragma mark DISPATCH_ALLOCATOR + +#if DISPATCH_ALLOCATOR + +// Configuration here! +#define NUM_CPU _dispatch_hw_config.cc_max_logical +#define MAGAZINES_PER_HEAP (NUM_CPU) + +// Do you care about compaction or performance? +#if TARGET_OS_EMBEDDED +#define PACK_FIRST_PAGE_WITH_CONTINUATIONS 1 +#else +#define PACK_FIRST_PAGE_WITH_CONTINUATIONS 0 +#endif + +#if TARGET_OS_EMBEDDED +#define PAGES_PER_MAGAZINE 64 +#else +#define PAGES_PER_MAGAZINE 512 +#endif + +// Use the largest type your platform is comfortable doing atomic ops with. +#if defined(__x86_64__) // TODO: rdar://11477843 +typedef unsigned long bitmap_t; +#define BYTES_PER_BITMAP 8 +#else +typedef uint32_t bitmap_t; +#define BYTES_PER_BITMAP 4 +#endif + +#define BITMAP_C(v) ((bitmap_t)(v)) +#define BITMAP_ALL_ONES (~BITMAP_C(0)) + +// Stop configuring. + +#define CONTINUATIONS_PER_BITMAP (BYTES_PER_BITMAP * 8) +#define BITMAPS_PER_SUPERMAP (BYTES_PER_SUPERMAP * 8) + +#define BYTES_PER_MAGAZINE (PAGES_PER_MAGAZINE * PAGE_SIZE) +#define CONSUMED_BYTES_PER_BITMAP (BYTES_PER_BITMAP + \ + (DISPATCH_CONTINUATION_SIZE * CONTINUATIONS_PER_BITMAP)) + +#define BYTES_PER_SUPERMAP BYTES_PER_BITMAP +#define CONSUMED_BYTES_PER_SUPERMAP (BYTES_PER_SUPERMAP + \ + (BITMAPS_PER_SUPERMAP * CONSUMED_BYTES_PER_BITMAP)) + +#define BYTES_PER_HEAP (BYTES_PER_MAGAZINE * MAGAZINES_PER_HEAP) + +#define BYTES_PER_PAGE PAGE_SIZE +#define CONTINUATIONS_PER_PAGE (BYTES_PER_PAGE / DISPATCH_CONTINUATION_SIZE) +#define BITMAPS_PER_PAGE (CONTINUATIONS_PER_PAGE / CONTINUATIONS_PER_BITMAP) + +// Assumption: metadata will be only in the first page. +#define SUPERMAPS_PER_MAGAZINE ((BYTES_PER_MAGAZINE - BYTES_PER_PAGE) / \ + CONSUMED_BYTES_PER_SUPERMAP) +#define BITMAPS_PER_MAGAZINE (SUPERMAPS_PER_MAGAZINE * BITMAPS_PER_SUPERMAP) +#define CONTINUATIONS_PER_MAGAZINE \ + (BITMAPS_PER_MAGAZINE * CONTINUATIONS_PER_BITMAP) + +#define HEAP_MASK (~(uintptr_t)(BYTES_PER_HEAP - 1)) +#define MAGAZINE_MASK (~(uintptr_t)(BYTES_PER_MAGAZINE - 1)) + +#define PADDING_TO_CONTINUATION_SIZE(x) (ROUND_UP_TO_CONTINUATION_SIZE(x) - (x)) + +#if defined(__LP64__) +#define SIZEOF_HEADER 16 +#else +#define SIZEOF_HEADER 8 +#endif + +#define SIZEOF_SUPERMAPS (BYTES_PER_SUPERMAP * SUPERMAPS_PER_MAGAZINE) +#define SIZEOF_MAPS (BYTES_PER_BITMAP * BITMAPS_PER_SUPERMAP * \ + SUPERMAPS_PER_MAGAZINE) + +// header is expected to end on supermap's required alignment +#define HEADER_TO_SUPERMAPS_PADDING 0 +#define SUPERMAPS_TO_MAPS_PADDING (PADDING_TO_CONTINUATION_SIZE( \ + SIZEOF_SUPERMAPS + HEADER_TO_SUPERMAPS_PADDING + SIZEOF_HEADER)) +#define MAPS_TO_FPMAPS_PADDING (PADDING_TO_CONTINUATION_SIZE(SIZEOF_MAPS)) + +#define BYTES_LEFT_IN_FIRST_PAGE (BYTES_PER_PAGE - \ + (SIZEOF_HEADER + HEADER_TO_SUPERMAPS_PADDING + SIZEOF_SUPERMAPS + \ + SUPERMAPS_TO_MAPS_PADDING + SIZEOF_MAPS + MAPS_TO_FPMAPS_PADDING)) + +#if PACK_FIRST_PAGE_WITH_CONTINUATIONS + +#define FULL_BITMAPS_IN_FIRST_PAGE \ + (BYTES_LEFT_IN_FIRST_PAGE / CONSUMED_BYTES_PER_BITMAP) +#define REMAINDER_IN_FIRST_PAGE (BYTES_LEFT_IN_FIRST_PAGE - \ + (FULL_BITMAPS_IN_FIRST_PAGE * CONSUMED_BYTES_PER_BITMAP) - \ + (FULL_BITMAPS_IN_FIRST_PAGE ? 0 : ROUND_UP_TO_CONTINUATION_SIZE(BYTES_PER_BITMAP))) + +#define REMAINDERED_CONTINUATIONS_IN_FIRST_PAGE \ + (REMAINDER_IN_FIRST_PAGE / DISPATCH_CONTINUATION_SIZE) +#define CONTINUATIONS_IN_FIRST_PAGE (FULL_BITMAPS_IN_FIRST_PAGE * \ + CONTINUATIONS_PER_BITMAP) + REMAINDERED_CONTINUATIONS_IN_FIRST_PAGE +#define BITMAPS_IN_FIRST_PAGE (FULL_BITMAPS_IN_FIRST_PAGE + \ + (REMAINDERED_CONTINUATIONS_IN_FIRST_PAGE == 0 ? 0 : 1)) + +#define FPMAPS_TO_FPCONTS_PADDING (PADDING_TO_CONTINUATION_SIZE(\ + BYTES_PER_BITMAP * BITMAPS_IN_FIRST_PAGE)) + +#else // PACK_FIRST_PAGE_WITH_CONTINUATIONS + +#define MAPS_TO_CONTS_PADDING BYTES_LEFT_IN_FIRST_PAGE + +#endif // PACK_FIRST_PAGE_WITH_CONTINUATIONS + +#define AFTER_CONTS_PADDING (BYTES_PER_MAGAZINE - (BYTES_PER_PAGE + \ + (DISPATCH_CONTINUATION_SIZE * CONTINUATIONS_PER_MAGAZINE))) + +// This is the object our allocator allocates: a chunk of memory rounded up +// from sizeof(struct dispatch_continuation_s) to the cacheline size, so +// unrelated continuations don't share cachelines. It'd be nice if +// dispatch_continuation_s included this rounding/padding, but it doesn't. +typedef char padded_continuation[DISPATCH_CONTINUATION_SIZE]; + +// A dispatch_heap_t is the base address of an array of dispatch_magazine_s, +// one magazine per CPU. +typedef struct dispatch_magazine_s * dispatch_heap_t; + +struct dispatch_magazine_header_s { + // Link to the next heap in the chain. Only used in magazine 0's header + dispatch_heap_t dh_next; + + // Points to the first bitmap in the page where this CPU succesfully + // allocated a continuation last time. Only used in the first heap. + bitmap_t *last_found_page; +}; + +// A magazine is a complex data structure. It must be exactly +// PAGES_PER_MAGAZINE * PAGE_SIZE bytes long, and that value must be a +// power of 2. (See magazine_for_continuation()). +struct dispatch_magazine_s { + // See above. + struct dispatch_magazine_header_s header; + + // Align supermaps as needed. +#if HEADER_TO_SUPERMAPS_PADDING > 0 + char _pad0[HEADER_TO_SUPERMAPS_PADDING]; +#endif + + // Second-level bitmap; each set bit means a bitmap_t in maps[][] + // is completely full (and can be skipped while searching). + bitmap_t supermaps[SUPERMAPS_PER_MAGAZINE]; + + // Align maps to a cacheline. +#if SUPERMAPS_TO_MAPS_PADDING > 0 + char _pad1[SUPERMAPS_TO_MAPS_PADDING]; +#endif + + // Each bit in maps[][] is the free/used state of a member of conts[][][]. + bitmap_t maps[SUPERMAPS_PER_MAGAZINE][BITMAPS_PER_SUPERMAP]; + + // Align fp_maps to a cacheline. +#if MAPS_TO_FPMAPS_PADDING > 0 + char _pad2[MAPS_TO_FPMAPS_PADDING]; +#endif + +#if PACK_FIRST_PAGE_WITH_CONTINUATIONS + // Bitmaps for the continuations that live in the first page, which + // are treated specially (they have faster search code). + bitmap_t fp_maps[BITMAPS_IN_FIRST_PAGE]; + + // Align fp_conts to cacheline. +#if FPMAPS_TO_FPCONTS_PADDING > 0 + char _pad3[FPMAPS_TO_FPCONTS_PADDING]; +#endif + + // Continuations that live in the first page. + padded_continuation fp_conts[CONTINUATIONS_IN_FIRST_PAGE]; + +#else // PACK_FIRST_PAGE_WITH_CONTINUATIONS + +#if MAPS_TO_CONTS_PADDING > 0 + char _pad4[MAPS_TO_CONTS_PADDING]; +#endif +#endif // PACK_FIRST_PAGE_WITH_CONTINUATIONS + + // This is the big array of continuations. + // This must start on a page boundary. + padded_continuation conts[SUPERMAPS_PER_MAGAZINE][BITMAPS_PER_SUPERMAP] + [CONTINUATIONS_PER_BITMAP]; + + // Fill the unused space to exactly BYTES_PER_MAGAZINE +#if AFTER_CONTS_PADDING > 0 + char _pad5[AFTER_CONTS_PADDING]; +#endif +}; + +#if DISPATCH_DEBUG +#define DISPATCH_ALLOCATOR_SCRIBBLE ((uintptr_t)0xAFAFAFAFAFAFAFAF) +#endif + +#endif // DISPATCH_ALLOCATOR + +#endif // __DISPATCH_ALLOCATOR_INTERNAL__ diff --git a/src/apply.c b/src/apply.c index 1a771145c..aa187a086 100644 --- a/src/apply.c +++ b/src/apply.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -20,77 +20,86 @@ #include "internal.h" +typedef void (*dispatch_apply_function_t)(void *, size_t); + DISPATCH_ALWAYS_INLINE static inline void -_dispatch_apply_invoke(void *ctxt) +_dispatch_apply_invoke2(void *ctxt) { - dispatch_apply_t da = ctxt; + dispatch_apply_t da = (dispatch_apply_t)ctxt; size_t const iter = da->da_iterations; - typeof(da->da_func) const func = da->da_func; - void *const da_ctxt = da->da_ctxt; size_t idx, done = 0; - _dispatch_workitem_dec(); // this unit executes many items + idx = dispatch_atomic_inc_orig2o(da, da_index, acquire); + if (!fastpath(idx < iter)) goto out; + + // da_dc is only safe to access once the 'index lock' has been acquired + dispatch_apply_function_t const func = (void *)da->da_dc->dc_func; + void *const da_ctxt = da->da_dc->dc_ctxt; + + _dispatch_perfmon_workitem_dec(); // this unit executes many items + + // Handle nested dispatch_apply rdar://problem/9294578 + size_t nested = (size_t)_dispatch_thread_getspecific(dispatch_apply_key); + _dispatch_thread_setspecific(dispatch_apply_key, (void*)da->da_nested); - // Make nested dispatch_apply fall into serial case rdar://problem/9294578 - _dispatch_thread_setspecific(dispatch_apply_key, (void*)~0ul); // Striding is the responsibility of the caller. - while (fastpath((idx = dispatch_atomic_inc2o(da, da_index) - 1) < iter)) { + do { _dispatch_client_callout2(da_ctxt, idx, func); - _dispatch_workitem_inc(); + _dispatch_perfmon_workitem_inc(); done++; - } - _dispatch_thread_setspecific(dispatch_apply_key, NULL); + idx = dispatch_atomic_inc_orig2o(da, da_index, relaxed); + } while (fastpath(idx < iter)); + _dispatch_thread_setspecific(dispatch_apply_key, (void*)nested); - dispatch_atomic_release_barrier(); - - // The thread that finished the last workitem wakes up the (possibly waiting) + // The thread that finished the last workitem wakes up the possibly waiting // thread that called dispatch_apply. They could be one and the same. - if (done && (dispatch_atomic_add2o(da, da_done, done) == iter)) { + if (!dispatch_atomic_sub2o(da, da_todo, done, release)) { _dispatch_thread_semaphore_signal(da->da_sema); } - - if (dispatch_atomic_dec2o(da, da_thr_cnt) == 0) { +out: + if (dispatch_atomic_dec2o(da, da_thr_cnt, release) == 0) { _dispatch_continuation_free((dispatch_continuation_t)da); } } DISPATCH_NOINLINE -static void -_dispatch_apply2(void *ctxt) +void +_dispatch_apply_invoke(void *ctxt) { - _dispatch_apply_invoke(ctxt); + _dispatch_apply_invoke2(ctxt); } -static void -_dispatch_apply3(void *ctxt) +DISPATCH_NOINLINE +void +_dispatch_apply_redirect_invoke(void *ctxt) { - dispatch_apply_t da = ctxt; - dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key); + dispatch_apply_t da = (dispatch_apply_t)ctxt; + dispatch_queue_t old_dq; + old_dq = (dispatch_queue_t)_dispatch_thread_getspecific(dispatch_queue_key); - _dispatch_thread_setspecific(dispatch_queue_key, da->da_queue); - _dispatch_apply_invoke(ctxt); + _dispatch_thread_setspecific(dispatch_queue_key, da->da_dc->dc_data); + _dispatch_apply_invoke2(ctxt); _dispatch_thread_setspecific(dispatch_queue_key, old_dq); } static void _dispatch_apply_serial(void *ctxt) { - dispatch_apply_t da = ctxt; + dispatch_apply_t da = (dispatch_apply_t)ctxt; + dispatch_continuation_t dc = da->da_dc; + size_t const iter = da->da_iterations; size_t idx = 0; - _dispatch_workitem_dec(); // this unit executes many items + _dispatch_perfmon_workitem_dec(); // this unit executes many items do { - _dispatch_client_callout2(da->da_ctxt, idx, da->da_func); - _dispatch_workitem_inc(); - } while (++idx < da->da_iterations); + _dispatch_client_callout2(dc->dc_ctxt, idx, (void*)dc->dc_func); + _dispatch_perfmon_workitem_inc(); + } while (++idx < iter); _dispatch_continuation_free((dispatch_continuation_t)da); } -// 64 threads should be good enough for the short to mid term -#define DISPATCH_APPLY_MAX_CPUS 64 - DISPATCH_ALWAYS_INLINE static inline void _dispatch_apply_f2(dispatch_queue_t dq, dispatch_apply_t da, @@ -123,8 +132,8 @@ _dispatch_apply_f2(dispatch_queue_t dq, dispatch_apply_t da, _dispatch_queue_push_list(dq, head, tail, continuation_cnt); // Call the first element directly - _dispatch_apply2(da); - _dispatch_workitem_inc(); + _dispatch_apply_invoke(da); + _dispatch_perfmon_workitem_inc(); _dispatch_thread_semaphore_wait(sema); _dispatch_put_thread_semaphore(sema); @@ -134,17 +143,17 @@ _dispatch_apply_f2(dispatch_queue_t dq, dispatch_apply_t da, static void _dispatch_apply_redirect(void *ctxt) { - dispatch_apply_t da = ctxt; + dispatch_apply_t da = (dispatch_apply_t)ctxt; uint32_t da_width = 2 * (da->da_thr_cnt - 1); - dispatch_queue_t dq = da->da_queue, rq = dq, tq; + dispatch_queue_t dq = da->da_dc->dc_data, rq = dq, tq; do { - uint32_t running = dispatch_atomic_add2o(rq, dq_running, da_width); - uint32_t width = rq->dq_width; + uint32_t running, width = rq->dq_width; + running = dispatch_atomic_add2o(rq, dq_running, da_width, relaxed); if (slowpath(running > width)) { uint32_t excess = width > 1 ? running - width : da_width; for (tq = dq; 1; tq = tq->do_targetq) { - (void)dispatch_atomic_sub2o(tq, dq_running, excess); + (void)dispatch_atomic_sub2o(tq, dq_running, excess, relaxed); if (tq == rq) { break; } @@ -157,13 +166,15 @@ _dispatch_apply_redirect(void *ctxt) } rq = rq->do_targetq; } while (slowpath(rq->do_targetq)); - _dispatch_apply_f2(rq, da, _dispatch_apply3); + _dispatch_apply_f2(rq, da, _dispatch_apply_redirect_invoke); do { - (void)dispatch_atomic_sub2o(dq, dq_running, da_width); + (void)dispatch_atomic_sub2o(dq, dq_running, da_width, relaxed); dq = dq->do_targetq; } while (slowpath(dq->do_targetq)); } +#define DISPATCH_APPLY_MAX UINT16_MAX // must be < sqrt(SIZE_MAX) + DISPATCH_NOINLINE void dispatch_apply_f(size_t iterations, dispatch_queue_t dq, void *ctxt, @@ -172,39 +183,51 @@ dispatch_apply_f(size_t iterations, dispatch_queue_t dq, void *ctxt, if (slowpath(iterations == 0)) { return; } - + uint32_t thr_cnt = _dispatch_hw_config.cc_max_active; + size_t nested = (size_t)_dispatch_thread_getspecific(dispatch_apply_key); + if (!slowpath(nested)) { + nested = iterations; + } else { + thr_cnt = nested < thr_cnt ? thr_cnt / nested : 1; + nested = nested < DISPATCH_APPLY_MAX && iterations < DISPATCH_APPLY_MAX + ? nested * iterations : DISPATCH_APPLY_MAX; + } + if (iterations < thr_cnt) { + thr_cnt = (uint32_t)iterations; + } + struct dispatch_continuation_s dc = { + .dc_func = (void*)func, + .dc_ctxt = ctxt, + }; dispatch_apply_t da = (typeof(da))_dispatch_continuation_alloc(); - - da->da_func = func; - da->da_ctxt = ctxt; - da->da_iterations = iterations; da->da_index = 0; - da->da_thr_cnt = _dispatch_hw_config.cc_max_active; - da->da_done = 0; - da->da_queue = NULL; - - if (da->da_thr_cnt > DISPATCH_APPLY_MAX_CPUS) { - da->da_thr_cnt = DISPATCH_APPLY_MAX_CPUS; - } - if (iterations < da->da_thr_cnt) { - da->da_thr_cnt = (uint32_t)iterations; + da->da_todo = iterations; + da->da_iterations = iterations; + da->da_nested = nested; + da->da_thr_cnt = thr_cnt; + da->da_dc = &dc; + + dispatch_queue_t old_dq; + old_dq = (dispatch_queue_t)_dispatch_thread_getspecific(dispatch_queue_key); + if (slowpath(dq == DISPATCH_APPLY_CURRENT_ROOT_QUEUE)) { + dq = old_dq ? old_dq : _dispatch_get_root_queue(0, 0); + while (slowpath(dq->do_targetq)) { + dq = dq->do_targetq; + } } - if (slowpath(dq->dq_width <= 2) || slowpath(da->da_thr_cnt <= 1) || - slowpath(_dispatch_thread_getspecific(dispatch_apply_key))) { + if (slowpath(dq->dq_width <= 2) || slowpath(thr_cnt <= 1)) { return dispatch_sync_f(dq, da, _dispatch_apply_serial); } - dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key); if (slowpath(dq->do_targetq)) { if (slowpath(dq == old_dq)) { return dispatch_sync_f(dq, da, _dispatch_apply_serial); } else { - da->da_queue = dq; + dc.dc_data = dq; return dispatch_sync_f(dq, da, _dispatch_apply_redirect); } } - dispatch_atomic_acquire_barrier(); _dispatch_thread_setspecific(dispatch_queue_key, dq); - _dispatch_apply_f2(dq, da, _dispatch_apply2); + _dispatch_apply_f2(dq, da, _dispatch_apply_invoke); _dispatch_thread_setspecific(dispatch_queue_key, old_dq); } @@ -215,8 +238,9 @@ static void _dispatch_apply_slow(size_t iterations, dispatch_queue_t dq, void (^work)(size_t)) { - struct Block_basic *bb = (void *)_dispatch_Block_copy((void *)work); - dispatch_apply_f(iterations, dq, bb, (void *)bb->Block_invoke); + dispatch_block_t bb = _dispatch_Block_copy((void *)work); + dispatch_apply_f(iterations, dq, bb, + (dispatch_apply_function_t)_dispatch_Block_invoke(bb)); Block_release(bb); } #endif @@ -231,8 +255,8 @@ dispatch_apply(size_t iterations, dispatch_queue_t dq, void (^work)(size_t)) return _dispatch_apply_slow(iterations, dq, work); } #endif - struct Block_basic *bb = (void *)work; - dispatch_apply_f(iterations, dq, bb, (void *)bb->Block_invoke); + dispatch_apply_f(iterations, dq, work, + (dispatch_apply_function_t)_dispatch_Block_invoke(work)); } #endif @@ -242,9 +266,8 @@ void dispatch_stride(size_t offset, size_t stride, size_t iterations, dispatch_queue_t dq, void (^work)(size_t)) { - struct Block_basic *bb = (void *)work; - dispatch_stride_f(offset, stride, iterations, dq, bb, - (void *)bb->Block_invoke); + dispatch_stride_f(offset, stride, iterations, dq, work, + (dispatch_apply_function_t)_dispatch_Block_invoke(work)); } #endif diff --git a/src/benchmark.c b/src/benchmark.c index f340b4431..49a4faa44 100644 --- a/src/benchmark.c +++ b/src/benchmark.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -67,24 +67,28 @@ _dispatch_benchmark_init(void *context) #endif lcost /= cnt; - bdata->loop_cost = lcost; + bdata->loop_cost = lcost > UINT64_MAX ? UINT64_MAX : (uint64_t)lcost; } #ifdef __BLOCKS__ uint64_t dispatch_benchmark(size_t count, void (^block)(void)) { - struct Block_basic *bb = (void *)block; - return dispatch_benchmark_f(count, block, (void *)bb->Block_invoke); + return dispatch_benchmark_f(count, block, _dispatch_Block_invoke(block)); } #endif +static void +_dispatch_benchmark_dummy_function(void *ctxt DISPATCH_UNUSED) +{ +} + uint64_t dispatch_benchmark_f(size_t count, register void *ctxt, register void (*func)(void *)) { static struct __dispatch_benchmark_data_s bdata = { - .func = (void *)dummy_function, + .func = _dispatch_benchmark_dummy_function, .count = 10000000ul, // ten million }; static dispatch_once_t pred; @@ -118,7 +122,7 @@ dispatch_benchmark_f(size_t count, register void *ctxt, #endif big_denom *= count; conversion /= big_denom; - ns = conversion; + ns = conversion > UINT64_MAX ? UINT64_MAX : (uint64_t)conversion; return ns - bdata.loop_cost; } diff --git a/src/data.c b/src/data.c index 804896460..feb601281 100644 --- a/src/data.c +++ b/src/data.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2011 Apple Inc. All rights reserved. + * Copyright (c) 2009-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -26,22 +26,15 @@ // a pointer to represented memory. A composite data object specifies the total // size of data it represents and list of constituent records. // -// A leaf data object has a single entry in records[], the object size is the -// same as records[0].length and records[0].from is always 0. In other words, a -// leaf data object always points to a full represented buffer, so a composite +// A leaf data object always points to a full represented buffer, a composite // dispatch data object is needed to represent a subrange of a memory region. +#if USE_OBJC +#define _dispatch_data_retain(x) _dispatch_objc_retain(x) +#define _dispatch_data_release(x) _dispatch_objc_release(x) +#else #define _dispatch_data_retain(x) dispatch_retain(x) #define _dispatch_data_release(x) dispatch_release(x) - -#if DISPATCH_DATA_MOVABLE -#if DISPATCH_USE_RESOLVERS && !defined(DISPATCH_RESOLVED_VARIANT) -#error Resolved variant required for movable -#endif -static const dispatch_block_t _dispatch_data_destructor_unlock = ^{ - DISPATCH_CRASH("unlock destructor called"); -}; -#define DISPATCH_DATA_DESTRUCTOR_UNLOCK (_dispatch_data_destructor_unlock) #endif const dispatch_block_t _dispatch_data_destructor_free = ^{ @@ -56,22 +49,32 @@ const dispatch_block_t _dispatch_data_destructor_vm_deallocate = ^{ DISPATCH_CRASH("vmdeallocate destructor called"); }; +const dispatch_block_t _dispatch_data_destructor_inline = ^{ + DISPATCH_CRASH("inline destructor called"); +}; + struct dispatch_data_s _dispatch_data_empty = { - .do_vtable = DISPATCH_VTABLE(data), + .do_vtable = DISPATCH_DATA_EMPTY_CLASS, +#if !USE_OBJC .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_next = DISPATCH_OBJECT_LISTLESS, +#endif }; -static dispatch_data_t -_dispatch_data_init(size_t n) +DISPATCH_ALWAYS_INLINE +static inline dispatch_data_t +_dispatch_data_alloc(size_t n, size_t extra) { - dispatch_data_t data = _dispatch_alloc(DISPATCH_VTABLE(data), - sizeof(struct dispatch_data_s) + n * sizeof(range_record)); + dispatch_data_t data = _dispatch_alloc(DISPATCH_DATA_CLASS, + sizeof(struct dispatch_data_s) + extra + + (n ? n * sizeof(range_record) - sizeof(data->buf) : 0)); data->num_records = n; +#if !USE_OBJC data->do_targetq = dispatch_get_global_queue( DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); data->do_next = DISPATCH_OBJECT_LISTLESS; +#endif return data; } @@ -84,7 +87,9 @@ _dispatch_data_destroy_buffer(const void* buffer, size_t size, } else if (destructor == DISPATCH_DATA_DESTRUCTOR_NONE) { // do nothing } else if (destructor == DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE) { - vm_deallocate(mach_task_self(), (vm_address_t)buffer, size); + mach_vm_size_t vm_size = size; + mach_vm_address_t vm_addr = (uintptr_t)buffer; + mach_vm_deallocate(mach_task_self(), vm_addr, vm_size); } else { if (!queue) { queue = dispatch_get_global_queue( @@ -94,11 +99,46 @@ _dispatch_data_destroy_buffer(const void* buffer, size_t size, } } +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_data_init(dispatch_data_t data, const void *buffer, size_t size, + dispatch_queue_t queue, dispatch_block_t destructor) +{ + data->buf = buffer; + data->size = size; + data->destructor = destructor; +#if DISPATCH_DATA_USE_LEAF_MEMBER + data->leaf = true; + data->num_records = 1; +#endif + if (queue) { + _dispatch_retain(queue); + data->do_targetq = queue; + } +} + +void +dispatch_data_init(dispatch_data_t data, const void *buffer, size_t size, + dispatch_block_t destructor) +{ + if (!buffer || !size) { + if (destructor) { + _dispatch_data_destroy_buffer(buffer, size, NULL, + _dispatch_Block_copy(destructor)); + } + buffer = NULL; + size = 0; + destructor = DISPATCH_DATA_DESTRUCTOR_NONE; + } + _dispatch_data_init(data, buffer, size, NULL, destructor); +} + dispatch_data_t dispatch_data_create(const void* buffer, size_t size, dispatch_queue_t queue, dispatch_block_t destructor) { dispatch_data_t data; + void *data_buf = NULL; if (!buffer || !size) { // Empty data requested so return the singleton empty object. Call // destructor immediately in this case to ensure any unused associated @@ -109,35 +149,60 @@ dispatch_data_create(const void* buffer, size_t size, dispatch_queue_t queue, } return dispatch_data_empty; } - data = _dispatch_data_init(1); - // Leaf objects always point to the entirety of the memory region - data->leaf = true; - data->size = size; - data->records[0].from = 0; - data->records[0].length = size; if (destructor == DISPATCH_DATA_DESTRUCTOR_DEFAULT) { // The default destructor was provided, indicating the data should be // copied. - void *data_buf = malloc(size); + data_buf = malloc(size); if (slowpath(!data_buf)) { - free(data); return NULL; } buffer = memcpy(data_buf, buffer, size); - data->destructor = DISPATCH_DATA_DESTRUCTOR_FREE; + data = _dispatch_data_alloc(0, 0); + destructor = DISPATCH_DATA_DESTRUCTOR_FREE; + } else if (destructor == DISPATCH_DATA_DESTRUCTOR_INLINE) { + data = _dispatch_data_alloc(0, size); + buffer = memcpy((void*)data + sizeof(struct dispatch_data_s), buffer, + size); + destructor = DISPATCH_DATA_DESTRUCTOR_NONE; } else { - data->destructor = _dispatch_Block_copy(destructor); -#if DISPATCH_DATA_MOVABLE - // A non-default destructor was provided, indicating the system does not - // own the buffer. Mark the object as locked since the application has - // direct access to the buffer and it cannot be reallocated/moved. - data->locked = 1; -#endif + data = _dispatch_data_alloc(0, 0); + destructor = _dispatch_Block_copy(destructor); } - data->records[0].data_object = (void*)buffer; - if (queue) { - _dispatch_retain(queue); - data->do_targetq = queue; + _dispatch_data_init(data, buffer, size, queue, destructor); + return data; +} + +dispatch_data_t +dispatch_data_create_f(const void *buffer, size_t size, dispatch_queue_t queue, + dispatch_function_t destructor_function) +{ + dispatch_block_t destructor = (dispatch_block_t)destructor_function; + if (destructor != DISPATCH_DATA_DESTRUCTOR_DEFAULT && + destructor != DISPATCH_DATA_DESTRUCTOR_FREE && + destructor != DISPATCH_DATA_DESTRUCTOR_NONE && + destructor != DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE && + destructor != DISPATCH_DATA_DESTRUCTOR_INLINE) { + destructor = ^{ destructor_function((void*)buffer); }; + } + return dispatch_data_create(buffer, size, queue, destructor); +} + +dispatch_data_t +dispatch_data_create_alloc(size_t size, void** buffer_ptr) +{ + dispatch_data_t data = dispatch_data_empty; + void *buffer = NULL; + + if (slowpath(!size)) { + goto out; + } + data = _dispatch_data_alloc(0, size); + buffer = (void*)data + sizeof(struct dispatch_data_s); + _dispatch_data_init(data, buffer, size, NULL, + DISPATCH_DATA_DESTRUCTOR_NONE); +out: + if (buffer_ptr) { + *buffer_ptr = buffer; } return data; } @@ -148,18 +213,12 @@ _dispatch_data_dispose(dispatch_data_t dd) dispatch_block_t destructor = dd->destructor; if (destructor == NULL) { size_t i; - for (i = 0; i < dd->num_records; ++i) { + for (i = 0; i < _dispatch_data_num_records(dd); ++i) { _dispatch_data_release(dd->records[i].data_object); } -#if DISPATCH_DATA_MOVABLE - } else if (destructor == DISPATCH_DATA_DESTRUCTOR_UNLOCK) { - dispatch_data_t data = (dispatch_data_t)dd->records[0].data_object; - (void)dispatch_atomic_dec2o(data, locked); - _dispatch_data_release(data); -#endif } else { - _dispatch_data_destroy_buffer(dd->records[0].data_object, - dd->records[0].length, dd->do_targetq, destructor); + _dispatch_data_destroy_buffer(dd->buf, dd->size, dd->do_targetq, + destructor); } } @@ -167,22 +226,23 @@ size_t _dispatch_data_debug(dispatch_data_t dd, char* buf, size_t bufsiz) { size_t offset = 0; - if (dd->leaf) { - offset += snprintf(&buf[offset], bufsiz - offset, - "leaf: %d, size: %zd, data: %p", dd->leaf, dd->size, - dd->records[0].data_object); + offset += dsnprintf(&buf[offset], bufsiz - offset, "data[%p] = { ", dd); + if (_dispatch_data_leaf(dd)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, + "leaf, size = %zd, buf = %p ", dd->size, dd->buf); } else { - offset += snprintf(&buf[offset], bufsiz - offset, - "leaf: %d, size: %zd, num_records: %zd", dd->leaf, - dd->size, dd->num_records); + offset += dsnprintf(&buf[offset], bufsiz - offset, + "composite, size = %zd, num_records = %zd ", dd->size, + _dispatch_data_num_records(dd)); size_t i; - for (i = 0; i < dd->num_records; ++i) { + for (i = 0; i < _dispatch_data_num_records(dd); ++i) { range_record r = dd->records[i]; - offset += snprintf(&buf[offset], bufsiz - offset, - "records[%zd] from: %zd, length %zd, data_object: %p", i, + offset += dsnprintf(&buf[offset], bufsiz - offset, "record[%zd] = " + "{ from = %zd, length = %zd, data_object = %p }, ", i, r.from, r.length, r.data_object); } } + offset += dsnprintf(&buf[offset], bufsiz - offset, "}"); return offset; } @@ -204,22 +264,29 @@ dispatch_data_create_concat(dispatch_data_t dd1, dispatch_data_t dd2) _dispatch_data_retain(dd1); return dd1; } - data = _dispatch_data_init(dd1->num_records + dd2->num_records); + data = _dispatch_data_alloc(_dispatch_data_num_records(dd1) + + _dispatch_data_num_records(dd2), 0); data->size = dd1->size + dd2->size; // Copy the constituent records into the newly created data object - memcpy(data->records, dd1->records, dd1->num_records * - sizeof(range_record)); - memcpy(data->records + dd1->num_records, dd2->records, dd2->num_records * - sizeof(range_record)); // Reference leaf objects as sub-objects - if (dd1->leaf) { + if (_dispatch_data_leaf(dd1)) { + data->records[0].from = 0; + data->records[0].length = dd1->size; data->records[0].data_object = dd1; + } else { + memcpy(data->records, dd1->records, _dispatch_data_num_records(dd1) * + sizeof(range_record)); } - if (dd2->leaf) { - data->records[dd1->num_records].data_object = dd2; + if (_dispatch_data_leaf(dd2)) { + data->records[_dispatch_data_num_records(dd1)].from = 0; + data->records[_dispatch_data_num_records(dd1)].length = dd2->size; + data->records[_dispatch_data_num_records(dd1)].data_object = dd2; + } else { + memcpy(data->records + _dispatch_data_num_records(dd1), dd2->records, + _dispatch_data_num_records(dd2) * sizeof(range_record)); } size_t i; - for (i = 0; i < data->num_records; ++i) { + for (i = 0; i < _dispatch_data_num_records(data); ++i) { _dispatch_data_retain(data->records[i].data_object); } return data; @@ -238,8 +305,8 @@ dispatch_data_create_subrange(dispatch_data_t dd, size_t offset, _dispatch_data_retain(dd); return dd; } - if (dd->leaf) { - data = _dispatch_data_init(1); + if (_dispatch_data_leaf(dd)) { + data = _dispatch_data_alloc(1, 0); data->size = length; data->records[0].from = offset; data->records[0].length = length; @@ -251,10 +318,11 @@ dispatch_data_create_subrange(dispatch_data_t dd, size_t offset, // the specified offset data = dispatch_data_empty; size_t i = 0, bytes_left = length; - while (i < dd->num_records && offset >= dd->records[i].length) { + while (i < _dispatch_data_num_records(dd) && + offset >= dd->records[i].length) { offset -= dd->records[i++].length; } - while (i < dd->num_records) { + while (i < _dispatch_data_num_records(dd)) { size_t record_len = dd->records[i].length - offset; if (record_len > bytes_left) { record_len = bytes_left; @@ -287,32 +355,20 @@ dispatch_data_create_map(dispatch_data_t dd, const void **buffer_ptr, size_t *size_ptr) { dispatch_data_t data = dd; - void *buffer = NULL; + const void *buffer = NULL; size_t size = dd->size, offset = 0; if (!size) { data = dispatch_data_empty; goto out; } - if (!dd->leaf && dd->num_records == 1 && - ((dispatch_data_t)dd->records[0].data_object)->leaf) { + if (!_dispatch_data_leaf(dd) && _dispatch_data_num_records(dd) == 1 && + _dispatch_data_leaf(dd->records[0].data_object)) { offset = dd->records[0].from; - dd = (dispatch_data_t)(dd->records[0].data_object); - } - if (dd->leaf) { -#if DISPATCH_DATA_MOVABLE - data = _dispatch_data_init(1); - // Make sure the underlying leaf object does not move the backing buffer - (void)dispatch_atomic_inc2o(dd, locked); - data->size = size; - data->destructor = DISPATCH_DATA_DESTRUCTOR_UNLOCK; - data->records[0].data_object = dd; - data->records[0].from = offset; - data->records[0].length = size; - _dispatch_data_retain(dd); -#else + dd = dd->records[0].data_object; + } + if (_dispatch_data_leaf(dd)) { _dispatch_data_retain(data); -#endif - buffer = dd->records[0].data_object + offset; + buffer = dd->buf + offset; goto out; } // Composite data object, copy the represented buffers @@ -324,7 +380,7 @@ dispatch_data_create_map(dispatch_data_t dd, const void **buffer_ptr, } dispatch_data_apply(dd, ^(dispatch_data_t region DISPATCH_UNUSED, size_t off, const void* buf, size_t len) { - memcpy(buffer + off, buf, len); + memcpy((void*)buffer + off, buf, len); return (bool)true; }); data = dispatch_data_create(buffer, size, NULL, @@ -341,56 +397,50 @@ dispatch_data_create_map(dispatch_data_t dd, const void **buffer_ptr, static bool _dispatch_data_apply(dispatch_data_t dd, size_t offset, size_t from, - size_t size, dispatch_data_applier_t applier) + size_t size, void *ctxt, dispatch_data_applier_function_t applier) { bool result = true; dispatch_data_t data = dd; const void *buffer; dispatch_assert(dd->size); -#if DISPATCH_DATA_MOVABLE - if (dd->leaf) { - data = _dispatch_data_init(1); - // Make sure the underlying leaf object does not move the backing buffer - (void)dispatch_atomic_inc2o(dd, locked); - data->size = size; - data->destructor = DISPATCH_DATA_DESTRUCTOR_UNLOCK; - data->records[0].data_object = dd; - data->records[0].from = from; - data->records[0].length = size; - _dispatch_data_retain(dd); - buffer = dd->records[0].data_object + from; - result = applier(data, offset, buffer, size); - _dispatch_data_release(data); - return result; - } -#else - if (!dd->leaf && dd->num_records == 1 && - ((dispatch_data_t)dd->records[0].data_object)->leaf) { + if (!_dispatch_data_leaf(dd) && _dispatch_data_num_records(dd) == 1 && + _dispatch_data_leaf(dd->records[0].data_object)) { from = dd->records[0].from; - dd = (dispatch_data_t)(dd->records[0].data_object); + dd = dd->records[0].data_object; } - if (dd->leaf) { - buffer = dd->records[0].data_object + from; - return applier(data, offset, buffer, size); + if (_dispatch_data_leaf(dd)) { + buffer = dd->buf + from; + return _dispatch_client_callout3(ctxt, data, offset, buffer, size, + applier); } -#endif size_t i; - for (i = 0; i < dd->num_records && result; ++i) { + for (i = 0; i < _dispatch_data_num_records(dd) && result; ++i) { result = _dispatch_data_apply(dd->records[i].data_object, - offset, dd->records[i].from, dd->records[i].length, + offset, dd->records[i].from, dd->records[i].length, ctxt, applier); offset += dd->records[i].length; } return result; } +bool +dispatch_data_apply_f(dispatch_data_t dd, void *ctxt, + dispatch_data_applier_function_t applier) +{ + if (!dd->size) { + return true; + } + return _dispatch_data_apply(dd, 0, 0, dd->size, ctxt, applier); +} + bool dispatch_data_apply(dispatch_data_t dd, dispatch_data_applier_t applier) { if (!dd->size) { return true; } - return _dispatch_data_apply(dd, 0, 0, dd->size, applier); + return _dispatch_data_apply(dd, 0, 0, dd->size, applier, + (dispatch_data_applier_function_t)_dispatch_Block_invoke(applier)); } // Returs either a leaf object or an object composed of a single leaf object @@ -405,14 +455,14 @@ dispatch_data_copy_region(dispatch_data_t dd, size_t location, dispatch_data_t data; size_t size = dd->size, offset = 0, from = 0; while (true) { - if (dd->leaf) { + if (_dispatch_data_leaf(dd)) { _dispatch_data_retain(dd); *offset_ptr = offset; if (size == dd->size) { return dd; } else { // Create a new object for the requested subrange of the leaf - data = _dispatch_data_init(1); + data = _dispatch_data_alloc(1, 0); data->size = size; data->records[0].from = from; data->records[0].length = size; @@ -422,13 +472,14 @@ dispatch_data_copy_region(dispatch_data_t dd, size_t location, } else { // Find record at the specified location size_t i, pos; - for (i = 0; i < dd->num_records; ++i) { + for (i = 0; i < _dispatch_data_num_records(dd); ++i) { pos = offset + dd->records[i].length; if (location < pos) { size = dd->records[i].length; from = dd->records[i].from; - data = (dispatch_data_t)(dd->records[i].data_object); - if (dd->num_records == 1 && data->leaf) { + data = dd->records[i].data_object; + if (_dispatch_data_num_records(dd) == 1 && + _dispatch_data_leaf(data)) { // Return objects composed of a single leaf node *offset_ptr = offset; _dispatch_data_retain(dd); @@ -445,3 +496,69 @@ dispatch_data_copy_region(dispatch_data_t dd, size_t location, } } } + +#if HAVE_MACH + +#ifndef MAP_MEM_VM_COPY +#define MAP_MEM_VM_COPY 0x200000 // +#endif + +mach_port_t +dispatch_data_make_memory_entry(dispatch_data_t dd) +{ + mach_port_t mep = MACH_PORT_NULL; + memory_object_size_t mos; + mach_vm_size_t vm_size = dd->size; + mach_vm_address_t vm_addr; + vm_prot_t flags; + kern_return_t kr; + bool copy = (dd->destructor != DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE); + +retry: + if (copy) { + vm_addr = vm_page_size; + kr = mach_vm_allocate(mach_task_self(), &vm_addr, vm_size, + VM_FLAGS_ANYWHERE); + if (kr) { + if (kr != KERN_NO_SPACE) { + (void)dispatch_assume_zero(kr); + } + return mep; + } + dispatch_data_apply(dd, ^(dispatch_data_t region DISPATCH_UNUSED, + size_t off, const void* buf, size_t len) { + memcpy((void*)(vm_addr + off), buf, len); + return (bool)true; + }); + } else { + vm_addr = (uintptr_t)dd->buf; + } + flags = VM_PROT_DEFAULT|VM_PROT_IS_MASK|MAP_MEM_VM_COPY; + mos = vm_size; + kr = mach_make_memory_entry_64(mach_task_self(), &mos, vm_addr, flags, + &mep, MACH_PORT_NULL); + if (kr == KERN_INVALID_VALUE) { + // Fallback in case MAP_MEM_VM_COPY is not supported + flags &= ~MAP_MEM_VM_COPY; + kr = mach_make_memory_entry_64(mach_task_self(), &mos, vm_addr, flags, + &mep, MACH_PORT_NULL); + } + if (dispatch_assume_zero(kr)) { + mep = MACH_PORT_NULL; + } else if (mos < vm_size) { + // Memory object was truncated, e.g. due to lack of MAP_MEM_VM_COPY + kr = mach_port_deallocate(mach_task_self(), mep); + (void)dispatch_assume_zero(kr); + if (!copy) { + copy = true; + goto retry; + } + mep = MACH_PORT_NULL; + } + if (copy) { + kr = mach_vm_deallocate(mach_task_self(), vm_addr, vm_size); + (void)dispatch_assume_zero(kr); + } + return mep; +} +#endif // HAVE_MACH diff --git a/src/data.m b/src/data.m new file mode 100644 index 000000000..3e3eee142 --- /dev/null +++ b/src/data.m @@ -0,0 +1,177 @@ +/* + * Copyright (c) 2012-2013 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include "internal.h" + +#if USE_OBJC + +#if !__OBJC2__ +#error "Cannot build with legacy ObjC runtime" +#endif +#if _OS_OBJECT_OBJC_ARC +#error "Cannot build with ARC" +#endif + +#include + +@interface DISPATCH_CLASS(data) () +- (id)initWithBytes:(void *)bytes length:(NSUInteger)length copy:(BOOL)copy + freeWhenDone:(BOOL)freeBytes bytesAreVM:(BOOL)vm; +- (BOOL)_bytesAreVM; +@end + +@interface DISPATCH_CLASS(data_empty) : DISPATCH_CLASS(data) +@end + +@implementation DISPATCH_CLASS(data) + ++ (id)allocWithZone:(NSZone *) DISPATCH_UNUSED zone { + return _dispatch_objc_alloc(self, sizeof(struct dispatch_data_s)); +} + +- (id)init { + return [self initWithBytes:NULL length:0 copy:NO freeWhenDone:NO + bytesAreVM:NO]; +} + +- (id)initWithBytes:(void *)bytes length:(NSUInteger)length copy:(BOOL)copy + freeWhenDone:(BOOL)freeBytes bytesAreVM:(BOOL)vm { + dispatch_block_t destructor; + if (copy) { + destructor = DISPATCH_DATA_DESTRUCTOR_DEFAULT; + } else if (freeBytes) { + if (vm) { + destructor = DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE; + } else { + destructor = DISPATCH_DATA_DESTRUCTOR_FREE; + } + } else { + destructor = DISPATCH_DATA_DESTRUCTOR_NONE; + } + dispatch_data_init(self, bytes, length, destructor); + return self; +} + +#define _dispatch_data_objc_dispose(selector) \ + struct dispatch_data_s *dd = (void*)self; \ + _dispatch_data_dispose(self); \ + dispatch_queue_t tq = dd->do_targetq; \ + dispatch_function_t func = dd->finalizer; \ + void *ctxt = dd->ctxt; \ + [super selector]; \ + if (func && ctxt) { \ + if (!tq) { \ + tq = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT,0);\ + } \ + dispatch_async_f(tq, ctxt, func); \ + } \ + if (tq) { \ + _dispatch_release(tq); \ + } + +- (void)dealloc { + _dispatch_data_objc_dispose(dealloc); +} + +- (void)finalize { + _dispatch_data_objc_dispose(finalize); +} + +- (BOOL)_bytesAreVM { + struct dispatch_data_s *dd = (void*)self; + return dd->destructor == DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE; +} + +- (void)_setContext:(void*)context { + struct dispatch_data_s *dd = (void*)self; + dd->ctxt = context; +} + +- (void*)_getContext { + struct dispatch_data_s *dd = (void*)self; + return dd->ctxt; +} + +- (void)_setFinalizer:(dispatch_function_t)finalizer { + struct dispatch_data_s *dd = (void*)self; + dd->finalizer = finalizer; +} + +- (void)_setTargetQueue:(dispatch_queue_t)queue { + struct dispatch_data_s *dd = (void*)self; + _dispatch_retain(queue); + dispatch_queue_t prev; + prev = dispatch_atomic_xchg2o(dd, do_targetq, queue, release); + if (prev) _dispatch_release(prev); +} + +- (NSString *)debugDescription { + Class nsstring = objc_lookUpClass("NSString"); + if (!nsstring) return nil; + char buf[2048]; + _dispatch_data_debug(self, buf, sizeof(buf)); + return [nsstring stringWithFormat: + [nsstring stringWithUTF8String:"<%s: %s>"], + class_getName([self class]), buf]; +} + +@end + +@implementation DISPATCH_CLASS(data_empty) + +// Force non-lazy class realization rdar://10640168 ++ (void)load { +} + +- (id)retain { + return (id)self; +} + +- (oneway void)release { +} + +- (id)autorelease { + return (id)self; +} + +- (NSUInteger)retainCount { + return ULONG_MAX; +} + ++ (id)allocWithZone:(NSZone *) DISPATCH_UNUSED zone { + return (id)&_dispatch_data_empty; +} + +- (void)_setContext:(void*) DISPATCH_UNUSED context { +} + +- (void*)_getContext { + return NULL; +} + +- (void)_setFinalizer:(dispatch_function_t) DISPATCH_UNUSED finalizer { +} + +- (void)_setTargetQueue:(dispatch_queue_t) DISPATCH_UNUSED queue { +} + +@end + +#endif // USE_OBJC diff --git a/src/data_internal.h b/src/data_internal.h index 2dec5f001..d0de8bb9c 100644 --- a/src/data_internal.h +++ b/src/data_internal.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2011 Apple Inc. All rights reserved. + * Copyright (c) 2009-2012 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -32,24 +32,61 @@ #include // for HeaderDoc #endif +#if defined(__LP64__) && !defined(DISPATCH_DATA_USE_LEAF_MEMBER) && !USE_OBJC +// explicit leaf member is free on 64bit due to padding +#define DISPATCH_DATA_USE_LEAF_MEMBER 1 +#endif + typedef struct range_record_s { - void* data_object; + dispatch_data_t data_object; size_t from; size_t length; } range_record; +#if USE_OBJC +#if OS_OBJECT_USE_OBJC +@interface DISPATCH_CLASS(data) : NSObject +@end +#endif +DISPATCH_OBJC_CLASS_DECL(data); +DISPATCH_OBJC_CLASS_DECL(data_empty); +#define DISPATCH_DATA_CLASS DISPATCH_OBJC_CLASS(data) +#define DISPATCH_DATA_EMPTY_CLASS DISPATCH_OBJC_CLASS(data_empty) +#else // USE_OBJC DISPATCH_CLASS_DECL(data); +#define DISPATCH_DATA_CLASS DISPATCH_VTABLE(data) +#define DISPATCH_DATA_EMPTY_CLASS DISPATCH_VTABLE(data) +#endif // USE_OBJC + struct dispatch_data_s { +#if USE_OBJC + const void *do_vtable; + dispatch_queue_t do_targetq; + void *ctxt; + void *finalizer; +#else // USE_OBJC DISPATCH_STRUCT_HEADER(data); -#if DISPATCH_DATA_MOVABLE - unsigned int locked; -#endif +#endif // USE_OBJC +#if DISPATCH_DATA_USE_LEAF_MEMBER bool leaf; +#endif dispatch_block_t destructor; size_t size, num_records; - range_record records[]; + union { + const void* buf; + range_record records[0]; + }; }; +#if DISPATCH_DATA_USE_LEAF_MEMBER +#define _dispatch_data_leaf(d) ((d)->leaf) +#define _dispatch_data_num_records(d) ((d)->num_records) +#else +#define _dispatch_data_leaf(d) ((d)->num_records ? 0 : ((d)->size ? 1 : 0)) +#define _dispatch_data_num_records(d) \ + (_dispatch_data_leaf(d) ? 1 : (d)->num_records) +#endif // DISPATCH_DATA_USE_LEAF_MEMBER + typedef dispatch_data_t (*dispatch_transform_t)(dispatch_data_t data); struct dispatch_data_format_type_s { @@ -60,7 +97,31 @@ struct dispatch_data_format_type_s { dispatch_transform_t encode; }; +void dispatch_data_init(dispatch_data_t data, const void *buffer, size_t size, + dispatch_block_t destructor); void _dispatch_data_dispose(dispatch_data_t data); size_t _dispatch_data_debug(dispatch_data_t data, char* buf, size_t bufsiz); +const dispatch_block_t _dispatch_data_destructor_inline; +#define DISPATCH_DATA_DESTRUCTOR_INLINE (_dispatch_data_destructor_inline) + +#if !__OBJC2__ + +static inline const void* +_dispatch_data_map_direct(dispatch_data_t dd) +{ + size_t offset = 0; + if (slowpath(!dd->size)) { + return NULL; + } + if (slowpath(!_dispatch_data_leaf(dd)) && + _dispatch_data_num_records(dd) == 1 && + _dispatch_data_leaf(dd->records[0].data_object)) { + offset = dd->records[0].from; + dd = dd->records[0].data_object; + } + return fastpath(_dispatch_data_leaf(dd)) ? (dd->buf + offset) : NULL; +} + +#endif // !__OBJC2__ #endif // __DISPATCH_DATA_INTERNAL__ diff --git a/src/init.c b/src/init.c index 8f1456edd..5a8b4bb43 100644 --- a/src/init.c +++ b/src/init.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -55,24 +55,12 @@ dispatch_atfork_parent(void) { } -void -dummy_function(void) -{ -} - -long -dummy_function_r0(void) -{ - return 0; -} - #pragma mark - #pragma mark dispatch_globals #if DISPATCH_COCOA_COMPAT void (*dispatch_begin_thread_4GC)(void); void (*dispatch_end_thread_4GC)(void); -void (*dispatch_no_worker_threads_4GC)(void); void *(*_dispatch_begin_NSAutoReleasePool)(void); void (*_dispatch_end_NSAutoReleasePool)(void *); #endif @@ -83,13 +71,15 @@ pthread_key_t dispatch_sema4_key; pthread_key_t dispatch_cache_key; pthread_key_t dispatch_io_key; pthread_key_t dispatch_apply_key; -#if DISPATCH_PERF_MON +#if DISPATCH_INTROSPECTION +pthread_key_t dispatch_introspection_key; +#elif DISPATCH_PERF_MON pthread_key_t dispatch_bcounter_key; #endif #endif // !DISPATCH_USE_DIRECT_TSD struct _dispatch_hw_config_s _dispatch_hw_config; -bool _dispatch_safe_fork = true; +bool _dispatch_safe_fork = true, _dispatch_child_of_unsafe_fork; DISPATCH_NOINLINE bool @@ -98,8 +88,16 @@ _dispatch_is_multithreaded(void) return !_dispatch_safe_fork; } + +DISPATCH_NOINLINE +bool +_dispatch_is_fork_of_multithreaded_parent(void) +{ + return _dispatch_child_of_unsafe_fork; +} + const struct dispatch_queue_offsets_s dispatch_queue_offsets = { - .dqo_version = 3, + .dqo_version = 4, .dqo_label = offsetof(struct dispatch_queue_s, dq_label), .dqo_label_size = sizeof(((dispatch_queue_t)NULL)->dq_label), .dqo_flags = 0, @@ -127,6 +125,7 @@ struct dispatch_queue_s _dispatch_main_q = { .dq_label = "com.apple.main-thread", .dq_running = 1, .dq_width = 1, + .dq_is_thread_bound = 1, .dq_serialnum = 1, }; @@ -158,32 +157,42 @@ DISPATCH_VTABLE_INSTANCE(queue, .do_type = DISPATCH_QUEUE_TYPE, .do_kind = "queue", .do_dispose = _dispatch_queue_dispose, - .do_invoke = NULL, - .do_probe = (void *)dummy_function_r0, + .do_invoke = _dispatch_queue_invoke, + .do_probe = _dispatch_queue_probe, .do_debug = dispatch_queue_debug, ); DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_root, queue, - .do_type = DISPATCH_QUEUE_GLOBAL_TYPE, + .do_type = DISPATCH_QUEUE_ROOT_TYPE, .do_kind = "global-queue", + .do_dispose = _dispatch_pthread_root_queue_dispose, + .do_probe = _dispatch_root_queue_probe, + .do_debug = dispatch_queue_debug, +); + +DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_runloop, queue, + .do_type = DISPATCH_QUEUE_ROOT_TYPE, + .do_kind = "runloop-queue", + .do_dispose = _dispatch_runloop_queue_dispose, + .do_invoke = _dispatch_queue_invoke, + .do_probe = _dispatch_runloop_queue_probe, .do_debug = dispatch_queue_debug, - .do_probe = _dispatch_queue_probe_root, ); DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_mgr, queue, .do_type = DISPATCH_QUEUE_MGR_TYPE, .do_kind = "mgr-queue", .do_invoke = _dispatch_mgr_thread, + .do_probe = _dispatch_mgr_queue_probe, .do_debug = dispatch_queue_debug, - .do_probe = _dispatch_mgr_wakeup, ); DISPATCH_VTABLE_INSTANCE(queue_specific_queue, .do_type = DISPATCH_QUEUE_SPECIFIC_TYPE, .do_kind = "queue-context", .do_dispose = _dispatch_queue_specific_queue_dispose, - .do_invoke = NULL, - .do_probe = (void *)dummy_function_r0, + .do_invoke = (void*)_dispatch_queue_invoke, + .do_probe = (void *)_dispatch_queue_probe, .do_debug = (void *)dispatch_queue_debug, ); @@ -195,46 +204,56 @@ DISPATCH_VTABLE_INSTANCE(queue_attr, DISPATCH_VTABLE_INSTANCE(source, .do_type = DISPATCH_SOURCE_KEVENT_TYPE, .do_kind = "kevent-source", - .do_invoke = _dispatch_source_invoke, .do_dispose = _dispatch_source_dispose, + .do_invoke = _dispatch_source_invoke, .do_probe = _dispatch_source_probe, .do_debug = _dispatch_source_debug, ); +DISPATCH_VTABLE_INSTANCE(mach, + .do_type = DISPATCH_MACH_CHANNEL_TYPE, + .do_kind = "mach-channel", + .do_dispose = _dispatch_mach_dispose, + .do_invoke = _dispatch_mach_invoke, + .do_probe = _dispatch_mach_probe, + .do_debug = _dispatch_mach_debug, +); + +DISPATCH_VTABLE_INSTANCE(mach_msg, + .do_type = DISPATCH_MACH_MSG_TYPE, + .do_kind = "mach-msg", + .do_dispose = _dispatch_mach_msg_dispose, + .do_invoke = _dispatch_mach_msg_invoke, + .do_debug = _dispatch_mach_msg_debug, +); + +#if !USE_OBJC DISPATCH_VTABLE_INSTANCE(data, .do_type = DISPATCH_DATA_TYPE, .do_kind = "data", .do_dispose = _dispatch_data_dispose, - .do_invoke = NULL, - .do_probe = (void *)dummy_function_r0, .do_debug = _dispatch_data_debug, ); +#endif DISPATCH_VTABLE_INSTANCE(io, .do_type = DISPATCH_IO_TYPE, .do_kind = "channel", .do_dispose = _dispatch_io_dispose, - .do_invoke = NULL, - .do_probe = (void *)dummy_function_r0, - .do_debug = (void *)dummy_function_r0, + .do_debug = _dispatch_io_debug, ); DISPATCH_VTABLE_INSTANCE(operation, .do_type = DISPATCH_OPERATION_TYPE, .do_kind = "operation", .do_dispose = _dispatch_operation_dispose, - .do_invoke = NULL, - .do_probe = (void *)dummy_function_r0, - .do_debug = (void *)dummy_function_r0, + .do_debug = _dispatch_operation_debug, ); DISPATCH_VTABLE_INSTANCE(disk, .do_type = DISPATCH_DISK_TYPE, .do_kind = "disk", .do_dispose = _dispatch_disk_dispose, - .do_invoke = NULL, - .do_probe = (void *)dummy_function_r0, - .do_debug = (void *)dummy_function_r0, ); void @@ -243,15 +262,13 @@ _dispatch_vtable_init(void) #if USE_OBJC // ObjC classes and dispatch vtables are co-located via linker order and // alias files, verify correct layout during initialization rdar://10640168 - #define DISPATCH_OBJC_CLASS(name) \ - DISPATCH_CONCAT(OBJC_CLASS_$_,DISPATCH_CLASS(name)) - extern void *DISPATCH_OBJC_CLASS(semaphore); + DISPATCH_OBJC_CLASS_DECL(semaphore); dispatch_assert((char*)DISPATCH_VTABLE(semaphore) - - (char*)&DISPATCH_OBJC_CLASS(semaphore) == 0); + (char*)DISPATCH_OBJC_CLASS(semaphore) == 0); dispatch_assert((char*)&DISPATCH_CONCAT(_,DISPATCH_CLASS(semaphore_vtable)) - - (char*)&DISPATCH_OBJC_CLASS(semaphore) == + - (char*)DISPATCH_OBJC_CLASS(semaphore) == sizeof(_os_object_class_s)); -#endif +#endif // USE_OBJC } #pragma mark - @@ -275,21 +292,28 @@ _dispatch_build_init(void *context DISPATCH_UNUSED) #endif } +static dispatch_once_t _dispatch_build_pred; + +char* +_dispatch_get_build(void) +{ + dispatch_once_f(&_dispatch_build_pred, NULL, _dispatch_build_init); + return _dispatch_build; +} + #define _dispatch_bug_log(msg, ...) do { \ static void *last_seen; \ void *ra = __builtin_return_address(0); \ if (last_seen != ra) { \ last_seen = ra; \ - _dispatch_log((msg), ##__VA_ARGS__); \ + _dispatch_log(msg, ##__VA_ARGS__); \ } \ } while(0) void _dispatch_bug(size_t line, long val) { - static dispatch_once_t pred; - - dispatch_once_f(&pred, NULL, _dispatch_build_init); + dispatch_once_f(&_dispatch_build_pred, NULL, _dispatch_build_init); _dispatch_bug_log("BUG in libdispatch: %s - %lu - 0x%lx", _dispatch_build, (unsigned long)line, val); } @@ -307,6 +331,14 @@ _dispatch_bug_mach_client(const char* msg, mach_msg_return_t kr) mach_error_string(kr), kr); } +void +_dispatch_bug_kevent_client(const char* msg, const char* filter, + const char *operation, int err) +{ + _dispatch_bug_log("BUG in libdispatch client: %s[%s] %s: \"%s\" - 0x%x", + msg, filter, operation, strerror(err), err); +} + void _dispatch_abort(size_t line, long val) { @@ -314,10 +346,12 @@ _dispatch_abort(size_t line, long val) abort(); } +#if !DISPATCH_USE_OS_TRACE + #pragma mark - #pragma mark dispatch_log -static FILE *dispatch_logfile; +static int dispatch_logfile = -1; static bool dispatch_log_disabled; static dispatch_once_t _dispatch_logv_pred; @@ -341,52 +375,72 @@ _dispatch_logv_init(void *context DISPATCH_UNUSED) log_to_file = true; } else if (strcmp(e, "stderr") == 0) { log_to_file = true; - dispatch_logfile = stderr; + dispatch_logfile = STDERR_FILENO; } } if (!dispatch_log_disabled) { - if (log_to_file && !dispatch_logfile) { + if (log_to_file && dispatch_logfile == -1) { char path[PATH_MAX]; snprintf(path, sizeof(path), "/var/tmp/libdispatch.%d.log", getpid()); - dispatch_logfile = fopen(path, "a"); + dispatch_logfile = open(path, O_WRONLY | O_APPEND | O_CREAT | + O_NOFOLLOW | O_CLOEXEC, 0666); } - if (dispatch_logfile) { + if (dispatch_logfile != -1) { struct timeval tv; gettimeofday(&tv, NULL); - fprintf(dispatch_logfile, "=== log file opened for %s[%u] at " + dprintf(dispatch_logfile, "=== log file opened for %s[%u] at " "%ld.%06u ===\n", getprogname() ?: "", getpid(), tv.tv_sec, tv.tv_usec); - fflush(dispatch_logfile); } } } +static inline void +_dispatch_log_file(char *buf, size_t len) +{ + ssize_t r; + + buf[len++] = '\n'; +retry: + r = write(dispatch_logfile, buf, len); + if (slowpath(r == -1) && errno == EINTR) { + goto retry; + } +} + DISPATCH_NOINLINE static void _dispatch_logv_file(const char *msg, va_list ap) { - char *buf; - size_t len; - - len = vasprintf(&buf, msg, ap); - buf[len++] = '\n'; - fwrite(buf, 1, len, dispatch_logfile); - fflush(dispatch_logfile); - free(buf); + char buf[2048]; + int r = vsnprintf(buf, sizeof(buf), msg, ap); + if (r < 0) return; + size_t len = (size_t)r; + if (len > sizeof(buf) - 1) { + len = sizeof(buf) - 1; + } + _dispatch_log_file(buf, len); } +DISPATCH_ALWAYS_INLINE static inline void -_dispatch_logv(const char *msg, va_list ap) +_dispatch_logv(const char *msg, size_t len, va_list ap) { dispatch_once_f(&_dispatch_logv_pred, NULL, _dispatch_logv_init); if (slowpath(dispatch_log_disabled)) { return; } - if (slowpath(dispatch_logfile)) { + if (slowpath(dispatch_logfile != -1)) { + if (!ap) { + return _dispatch_log_file((char*)msg, len); + } return _dispatch_logv_file(msg, ap); } - vsyslog(LOG_NOTICE, msg, ap); + if (!ap) { + return syslog(LOG_NOTICE, "%s", msg); + } + return vsyslog(LOG_NOTICE, msg, ap); } DISPATCH_NOINLINE @@ -396,28 +450,58 @@ _dispatch_log(const char *msg, ...) va_list ap; va_start(ap, msg); - _dispatch_logv(msg, ap); + _dispatch_logv(msg, 0, ap); va_end(ap); } +#endif // DISPATCH_USE_OS_TRACE + #pragma mark - #pragma mark dispatch_debug +static size_t +_dispatch_object_debug2(dispatch_object_t dou, char* buf, size_t bufsiz) +{ + DISPATCH_OBJECT_TFB(_dispatch_objc_debug, dou, buf, bufsiz); + if (dou._do->do_vtable->do_debug) { + return dx_debug(dou._do, buf, bufsiz); + } + return strlcpy(buf, "NULL vtable slot: ", bufsiz); +} + DISPATCH_NOINLINE -void -dispatch_debugv(dispatch_object_t dou, const char *msg, va_list ap) +static void +_dispatch_debugv(dispatch_object_t dou, const char *msg, va_list ap) { - char buf[4096]; + char buf[2048]; + int r; size_t offs; - - if (dou._do && dou._do->do_vtable->do_debug) { - offs = dx_debug(dou._do, buf, sizeof(buf)); + if (dou._do) { + offs = _dispatch_object_debug2(dou, buf, sizeof(buf)); + dispatch_assert(offs + 2 < sizeof(buf)); + buf[offs++] = ':'; + buf[offs++] = ' '; + buf[offs] = '\0'; } else { - offs = snprintf(buf, sizeof(buf), "NULL vtable slot"); + offs = strlcpy(buf, "NULL: ", sizeof(buf)); } + r = vsnprintf(buf + offs, sizeof(buf) - offs, msg, ap); +#if !DISPATCH_USE_OS_TRACE + size_t len = offs + (r < 0 ? 0 : (size_t)r); + if (len > sizeof(buf) - 1) { + len = sizeof(buf) - 1; + } + _dispatch_logv(buf, len, NULL); +#else + _dispatch_log("%s", buf); +#endif +} - snprintf(buf + offs, sizeof(buf) - offs, ": %s", msg); - _dispatch_logv(buf, ap); +DISPATCH_NOINLINE +void +dispatch_debugv(dispatch_object_t dou, const char *msg, va_list ap) +{ + _dispatch_debugv(dou, msg, ap); } DISPATCH_NOINLINE @@ -427,10 +511,43 @@ dispatch_debug(dispatch_object_t dou, const char *msg, ...) va_list ap; va_start(ap, msg); - dispatch_debugv(dou._do, msg, ap); + _dispatch_debugv(dou, msg, ap); va_end(ap); } +#if DISPATCH_DEBUG +DISPATCH_NOINLINE +void +_dispatch_object_debug(dispatch_object_t dou, const char *msg, ...) +{ + va_list ap; + + va_start(ap, msg); + _dispatch_debugv(dou._do, msg, ap); + va_end(ap); +} +#endif + +#pragma mark - +#pragma mark dispatch_calloc + +DISPATCH_NOINLINE +void +_dispatch_temporary_resource_shortage(void) +{ + sleep(1); +} + +void * +_dispatch_calloc(size_t num_items, size_t size) +{ + void *buf; + while (!fastpath(buf = calloc(num_items, size))) { + _dispatch_temporary_resource_shortage(); + } + return buf; +} + #pragma mark - #pragma mark dispatch_block_t @@ -442,9 +559,9 @@ _dispatch_Block_copy(dispatch_block_t db) { dispatch_block_t rval; - if (fastpath(db)) { + if (fastpath(db)) { while (!fastpath(rval = Block_copy(db))) { - sleep(1); + _dispatch_temporary_resource_shortage(); } return rval; } @@ -481,6 +598,7 @@ _dispatch_client_callout(void *ctxt, dispatch_function_t f) { _dispatch_get_tsd_base(); void *u = _dispatch_get_unwind_tsd(); + if (fastpath(!u)) return f(ctxt); _dispatch_set_unwind_tsd(NULL); f(ctxt); _dispatch_free_unwind_tsd(); @@ -494,12 +612,43 @@ _dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) { _dispatch_get_tsd_base(); void *u = _dispatch_get_unwind_tsd(); + if (fastpath(!u)) return f(ctxt, i); _dispatch_set_unwind_tsd(NULL); f(ctxt, i); _dispatch_free_unwind_tsd(); _dispatch_set_unwind_tsd(u); } +#undef _dispatch_client_callout3 +bool +_dispatch_client_callout3(void *ctxt, dispatch_data_t region, size_t offset, + const void *buffer, size_t size, dispatch_data_applier_function_t f) +{ + _dispatch_get_tsd_base(); + void *u = _dispatch_get_unwind_tsd(); + if (fastpath(!u)) return f(ctxt, region, offset, buffer, size); + _dispatch_set_unwind_tsd(NULL); + bool res = f(ctxt, region, offset, buffer, size); + _dispatch_free_unwind_tsd(); + _dispatch_set_unwind_tsd(u); + return res; +} + +#undef _dispatch_client_callout4 +void +_dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, + dispatch_mach_msg_t dmsg, mach_error_t error, + dispatch_mach_handler_function_t f) +{ + _dispatch_get_tsd_base(); + void *u = _dispatch_get_unwind_tsd(); + if (fastpath(!u)) return f(ctxt, reason, dmsg, error); + _dispatch_set_unwind_tsd(NULL); + f(ctxt, reason, dmsg, error); + _dispatch_free_unwind_tsd(); + _dispatch_set_unwind_tsd(u); +} + #endif // DISPATCH_USE_CLIENT_CALLOUT #pragma mark - @@ -515,19 +664,25 @@ _os_object_init(void) return; } -_os_object_t -_os_object_alloc(const void *cls, size_t size) +inline _os_object_t +_os_object_alloc_realized(const void *cls, size_t size) { _os_object_t obj; dispatch_assert(size >= sizeof(struct _os_object_s)); - if (!cls) cls = &_os_object_class; while (!fastpath(obj = calloc(1u, size))) { - sleep(1); // Temporary resource shortage + _dispatch_temporary_resource_shortage(); } obj->os_obj_isa = cls; return obj; } +_os_object_t +_os_object_alloc(const void *cls, size_t size) +{ + if (!cls) cls = &_os_object_class; + return _os_object_alloc_realized(cls, size); +} + void _os_object_dealloc(_os_object_t obj) { @@ -583,12 +738,19 @@ dispatch_source_type_timer_init(dispatch_source_t ds, dispatch_source_type_t type DISPATCH_UNUSED, uintptr_t handle DISPATCH_UNUSED, unsigned long mask, - dispatch_queue_t q DISPATCH_UNUSED) + dispatch_queue_t q) { - ds->ds_refs = calloc(1ul, sizeof(struct dispatch_timer_source_refs_s)); - if (slowpath(!ds->ds_refs)) return; + if (fastpath(!ds->ds_refs)) { + ds->ds_refs = _dispatch_calloc(1ul, + sizeof(struct dispatch_timer_source_refs_s)); + } ds->ds_needs_rearm = true; ds->ds_is_timer = true; + if (q == dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_BACKGROUND, 0) + || q == dispatch_get_global_queue( + DISPATCH_QUEUE_PRIORITY_BACKGROUND, DISPATCH_QUEUE_OVERCOMMIT)){ + mask |= DISPATCH_TIMER_BACKGROUND; // + } ds_timer(ds->ds_refs).flags = mask; } @@ -596,10 +758,55 @@ const struct dispatch_source_type_s _dispatch_source_type_timer = { .ke = { .filter = DISPATCH_EVFILT_TIMER, }, - .mask = DISPATCH_TIMER_WALL_CLOCK, + .mask = DISPATCH_TIMER_STRICT|DISPATCH_TIMER_BACKGROUND| + DISPATCH_TIMER_WALL_CLOCK, .init = dispatch_source_type_timer_init, }; +static void +dispatch_source_type_timer_with_aggregate_init(dispatch_source_t ds, + dispatch_source_type_t type, uintptr_t handle, unsigned long mask, + dispatch_queue_t q) +{ + ds->ds_refs = _dispatch_calloc(1ul, + sizeof(struct dispatch_timer_source_aggregate_refs_s)); + dispatch_source_type_timer_init(ds, type, handle, mask, q); + ds_timer(ds->ds_refs).flags |= DISPATCH_TIMER_WITH_AGGREGATE; + ds->dq_specific_q = (void*)handle; + _dispatch_retain(ds->dq_specific_q); +} + +const struct dispatch_source_type_s _dispatch_source_type_timer_with_aggregate={ + .ke = { + .filter = DISPATCH_EVFILT_TIMER, + .ident = ~0ull, + }, + .mask = DISPATCH_TIMER_STRICT|DISPATCH_TIMER_BACKGROUND, + .init = dispatch_source_type_timer_with_aggregate_init, +}; + +static void +dispatch_source_type_interval_init(dispatch_source_t ds, + dispatch_source_type_t type, uintptr_t handle, unsigned long mask, + dispatch_queue_t q) +{ + dispatch_source_type_timer_init(ds, type, handle, mask, q); + ds_timer(ds->ds_refs).flags |= DISPATCH_TIMER_INTERVAL; + unsigned long ident = _dispatch_source_timer_idx(ds->ds_refs); + ds->ds_dkev->dk_kevent.ident = ds->ds_ident_hack = ident; + _dispatch_source_set_interval(ds, handle); +} + +const struct dispatch_source_type_s _dispatch_source_type_interval = { + .ke = { + .filter = DISPATCH_EVFILT_TIMER, + .ident = ~0ull, + }, + .mask = DISPATCH_TIMER_STRICT|DISPATCH_TIMER_BACKGROUND| + DISPATCH_INTERVAL_UI_ANIMATION, + .init = dispatch_source_type_interval_init, +}; + const struct dispatch_source_type_s _dispatch_source_type_read = { .ke = { .filter = EVFILT_READ, @@ -636,7 +843,7 @@ dispatch_source_type_vm_init(dispatch_source_t ds, { static dispatch_once_t pred; dispatch_once_f(&pred, NULL, _dispatch_ios_simulator_vm_source_init); - ds->ds_dkev->dk_kevent.ident = (mask & DISPATCH_VM_PRESSURE ? + ds->ds_dkev->dk_kevent.ident = (uint64_t)(mask & DISPATCH_VM_PRESSURE ? _dispatch_ios_simulator_memory_warnings_fd : -1); } @@ -670,6 +877,31 @@ const struct dispatch_source_type_s _dispatch_source_type_vm = { #endif #endif +#ifdef DISPATCH_USE_MEMORYSTATUS +static void +dispatch_source_type_memorystatus_init(dispatch_source_t ds, + dispatch_source_type_t type DISPATCH_UNUSED, + uintptr_t handle DISPATCH_UNUSED, + unsigned long mask DISPATCH_UNUSED, + dispatch_queue_t q DISPATCH_UNUSED) +{ + ds->ds_is_level = false; +} + +const struct dispatch_source_type_s _dispatch_source_type_memorystatus = { + .ke = { + .filter = EVFILT_MEMORYSTATUS, + .flags = EV_DISPATCH, + }, + .mask = NOTE_MEMORYSTATUS_PRESSURE_NORMAL|NOTE_MEMORYSTATUS_PRESSURE_WARN +#ifdef NOTE_MEMORYSTATUS_PRESSURE_CRITICAL + |NOTE_MEMORYSTATUS_PRESSURE_CRITICAL +#endif + , + .init = dispatch_source_type_memorystatus_init, +}; +#endif + const struct dispatch_source_type_s _dispatch_source_type_proc = { .ke = { .filter = EVFILT_PROC, @@ -720,6 +952,25 @@ const struct dispatch_source_type_s _dispatch_source_type_vfs = { , }; +const struct dispatch_source_type_s _dispatch_source_type_sock = { +#ifdef EVFILT_SOCK + .ke = { + .filter = EVFILT_SOCK, + .flags = EV_CLEAR, + }, + .mask = NOTE_CONNRESET | NOTE_READCLOSED | NOTE_WRITECLOSED | + NOTE_TIMEOUT | NOTE_NOSRCADDR | NOTE_IFDENIED | NOTE_SUSPEND | + NOTE_RESUME | NOTE_KEEPALIVE +#ifdef NOTE_ADAPTIVE_WTIMO + | NOTE_ADAPTIVE_WTIMO | NOTE_ADAPTIVE_RTIMO +#endif +#ifdef NOTE_CONNECTED + | NOTE_CONNECTED | NOTE_DISCONNECTED | NOTE_CONNINFO_UPDATED +#endif + , +#endif // EVFILT_SOCK +}; + const struct dispatch_source_type_s _dispatch_source_type_data_add = { .ke = { .filter = DISPATCH_EVFILT_CUSTOM_ADD, @@ -730,7 +981,7 @@ const struct dispatch_source_type_s _dispatch_source_type_data_or = { .ke = { .filter = DISPATCH_EVFILT_CUSTOM_OR, .flags = EV_CLEAR, - .fflags = ~0, + .fflags = ~0u, }, }; @@ -742,8 +993,6 @@ dispatch_source_type_mach_send_init(dispatch_source_t ds, uintptr_t handle DISPATCH_UNUSED, unsigned long mask, dispatch_queue_t q DISPATCH_UNUSED) { - static dispatch_once_t pred; - dispatch_once_f(&pred, NULL, _dispatch_mach_notify_source_init); if (!mask) { // Preserve legacy behavior that (mask == 0) => DISPATCH_MACH_SEND_DEAD ds->ds_dkev->dk_kevent.fflags = DISPATCH_MACH_SEND_DEAD; @@ -753,7 +1002,7 @@ dispatch_source_type_mach_send_init(dispatch_source_t ds, const struct dispatch_source_type_s _dispatch_source_type_mach_send = { .ke = { - .filter = EVFILT_MACHPORT, + .filter = DISPATCH_EVFILT_MACH_NOTIFICATION, .flags = EV_CLEAR, }, .mask = DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_POSSIBLE, @@ -779,16 +1028,6 @@ const struct dispatch_source_type_s _dispatch_source_type_mach_recv = { .init = dispatch_source_type_mach_recv_init, }; -const struct dispatch_source_type_s _dispatch_source_type_sock = { - .ke = { - .filter = EVFILT_SOCK, - .flags = EV_CLEAR, - }, - .mask = NOTE_CONNRESET | NOTE_READCLOSED | NOTE_WRITECLOSED | - NOTE_TIMEOUT | NOTE_NOSRCADDR | NOTE_IFDENIED | NOTE_SUSPEND | - NOTE_RESUME | NOTE_KEEPALIVE, -}; - #pragma mark - #pragma mark dispatch_mig @@ -808,9 +1047,9 @@ dispatch_mach_msg_get_context(mach_msg_header_t *msg) } kern_return_t -_dispatch_wakeup_main_thread(mach_port_t mp DISPATCH_UNUSED) +_dispatch_wakeup_runloop_thread(mach_port_t mp DISPATCH_UNUSED) { - // dummy function just to pop out the main thread out of mach_msg() + // dummy function just to pop a runloop thread out of mach_msg() return 0; } diff --git a/src/internal.h b/src/internal.h index a90f93f8d..ed1a9c7e6 100644 --- a/src/internal.h +++ b/src/internal.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -38,6 +38,14 @@ #endif +#if !defined(DISPATCH_MACH_SPI) && TARGET_OS_MAC +#define DISPATCH_MACH_SPI 1 +#endif + +#if !defined(USE_OBJC) && HAVE_OBJC +#define USE_OBJC 1 +#endif + #if USE_OBJC && ((!TARGET_IPHONE_SIMULATOR && defined(__i386__)) || \ (!TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED < 1080)) // Disable Objective-C support on platforms with legacy objc runtime @@ -69,16 +77,60 @@ #include #include #include +#if !TARGET_OS_WIN32 #include +#endif + +#define DISPATCH_STRUCT_DECL(type, name, ...) \ + struct type __VA_ARGS__ name + +// Visual Studio C++ does not support C99 designated initializers. +// This means that static declarations should be zero initialized and cannot +// be const since we must fill in the values during DLL initialization. +#if !TARGET_OS_WIN32 +#define DISPATCH_STRUCT_INSTANCE(type, name, ...) \ +struct type name = { \ +__VA_ARGS__ \ +} +#else +#define DISPATCH_STRUCT_INSTANCE(type, name, ...) \ +struct type name = { 0 } +#endif + +#if !TARGET_OS_WIN32 +#define DISPATCH_CONST_STRUCT_DECL(type, name, ...) \ + const DISPATCH_STRUCT_DECL(type, name, __VA_ARGS__) + +#define DISPATCH_CONST_STRUCT_INSTANCE(type, name, ...) \ + const DISPATCH_STRUCT_INSTANCE(type, name, __VA_ARGS__) +#else +#define DISPATCH_CONST_STRUCT_DECL(type, name, ...) \ + DISPATCH_STRUCT_DECL(type, name, __VA_ARGS__) + +#define DISPATCH_CONST_STRUCT_INSTANCE(type, name, ...) \ + DISPATCH_STRUCT_INSTANCE(type, name, __VA_ARGS__) +#endif /* private.h must be included last to avoid picking up installed headers. */ #include "object_private.h" #include "queue_private.h" #include "source_private.h" +#include "mach_private.h" #include "data_private.h" +#if !TARGET_OS_WIN32 +#include "io_private.h" +#endif #include "benchmark.h" #include "private.h" +/* SPI for Libsystem-internal use */ +DISPATCH_EXPORT DISPATCH_NOTHROW void libdispatch_init(void); +#if !TARGET_OS_WIN32 +DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_prepare(void); +DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_parent(void); +DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); +#endif + /* More #includes at EOF (dependent on the contents of internal.h) ... */ // Abort on uncaught exceptions thrown from client callouts rdar://8577499 @@ -95,10 +147,16 @@ #define DISPATCH_PROFILE 0 #endif -#if (DISPATCH_DEBUG || DISPATCH_PROFILE) && !defined(DISPATCH_USE_DTRACE) +#if (!TARGET_OS_EMBEDDED || DISPATCH_DEBUG || DISPATCH_PROFILE) && \ + !defined(DISPATCH_USE_DTRACE) #define DISPATCH_USE_DTRACE 1 #endif +#if ((!TARGET_OS_EMBEDDED && DISPATCH_INTROSPECTION) || DISPATCH_DEBUG || \ + DISPATCH_PROFILE) && !defined(DISPATCH_USE_DTRACE_INTROSPECTION) +#define DISPATCH_USE_DTRACE_INTROSPECTION 1 +#endif + #if HAVE_LIBKERN_OSCROSSENDIAN_H #include #endif @@ -120,18 +178,27 @@ #include #include #include +#include +#include #endif /* HAVE_MACH */ #if HAVE_MALLOC_MALLOC_H #include #endif + +#include + +#if !TARGET_OS_WIN32 #include #include #include -#include #include #include #include +#include #include +#else +#include "sys_queue.h" +#endif #ifdef __BLOCKS__ #include @@ -140,7 +207,9 @@ #include #include +#if HAVE_FCNTL_H #include +#endif #include #include #if USE_POSIX_SEM @@ -153,7 +222,9 @@ #include #include #include +#if !TARGET_OS_WIN32 #include +#endif #if HAVE_UNISTD_H #include #endif @@ -171,15 +242,25 @@ #define __has_attribute(x) 0 #endif +#if __GNUC__ #define DISPATCH_NOINLINE __attribute__((__noinline__)) #define DISPATCH_USED __attribute__((__used__)) #define DISPATCH_UNUSED __attribute__((__unused__)) #define DISPATCH_WEAK __attribute__((__weak__)) +#define DISPATCH_OVERLOADABLE __attribute__((__overloadable__)) #if DISPATCH_DEBUG #define DISPATCH_ALWAYS_INLINE_NDEBUG #else #define DISPATCH_ALWAYS_INLINE_NDEBUG __attribute__((__always_inline__)) #endif +#else /* __GNUC__ */ +#define DISPATCH_NOINLINE +#define DISPATCH_USED +#define DISPATCH_UNUSED +#define DISPATCH_WEAK +#define DISPATCH_ALWAYS_INLINE_NDEBUG +#endif /* __GNUC__ */ + #define DISPATCH_CONCAT(x,y) DISPATCH_CONCAT1(x,y) #define DISPATCH_CONCAT1(x,y) x ## y @@ -198,24 +279,52 @@ #define NSEC_PER_USEC 1000ull /* I wish we had __builtin_expect_range() */ +#if __GNUC__ #define fastpath(x) ((typeof(x))__builtin_expect((long)(x), ~0l)) #define slowpath(x) ((typeof(x))__builtin_expect((long)(x), 0l)) +#else +#define fastpath(x) (x) +#define slowpath(x) (x) +#endif // __GNUC__ DISPATCH_NOINLINE void _dispatch_bug(size_t line, long val); + +#if HAVE_MACH DISPATCH_NOINLINE void _dispatch_bug_client(const char* msg); DISPATCH_NOINLINE void _dispatch_bug_mach_client(const char *msg, mach_msg_return_t kr); +DISPATCH_NOINLINE +void _dispatch_bug_kevent_client(const char* msg, const char* filter, + const char *operation, int err); +#endif + DISPATCH_NOINLINE DISPATCH_NORETURN void _dispatch_abort(size_t line, long val); + +#if !defined(DISPATCH_USE_OS_TRACE) && DISPATCH_DEBUG +#if __has_include() +#define DISPATCH_USE_OS_TRACE 1 +#include +#endif +#endif // DISPATCH_USE_OS_TRACE + +#if DISPATCH_USE_OS_TRACE +#define _dispatch_log(msg, ...) os_trace("libdispatch", msg, ## __VA_ARGS__) +#else DISPATCH_NOINLINE __attribute__((__format__(__printf__,1,2))) void _dispatch_log(const char *msg, ...); +#endif // DISPATCH_USE_OS_TRACE + +#define dsnprintf(...) \ + ({ int _r = snprintf(__VA_ARGS__); _r < 0 ? 0u : (size_t)_r; }) /* * For reporting bugs within libdispatch when using the "_debug" version of the * library. */ +#if __GNUC__ #define dispatch_assert(e) do { \ if (__builtin_constant_p(e)) { \ char __compile_time_assert__[(bool)(e) ? 1 : -1] DISPATCH_UNUSED; \ @@ -226,6 +335,14 @@ void _dispatch_log(const char *msg, ...); } \ } \ } while (0) +#else +static inline void _dispatch_assert(long e, long line) { + if (DISPATCH_DEBUG && !e) _dispatch_abort(line, e); +} +#define dispatch_assert(e) _dispatch_assert((long)(e), __LINE__) +#endif /* __GNUC__ */ + +#if __GNUC__ /* * A lot of API return zero upon success and not-zero on fail. Let's capture * and log the non-zero value @@ -240,6 +357,12 @@ void _dispatch_log(const char *msg, ...); } \ } \ } while (0) +#else +static inline void _dispatch_assert_zero(long e, long line) { + if (DISPATCH_DEBUG && e) _dispatch_abort(line, e); +} +#define dispatch_assert_zero(e) _dispatch_assert((long)(e), __LINE__) +#endif /* __GNUC__ */ /* * For reporting bugs or impedance mismatches between libdispatch and external @@ -247,6 +370,7 @@ void _dispatch_log(const char *msg, ...); * * In particular, we wrap all system-calls with assume() macros. */ +#if __GNUC__ #define dispatch_assume(e) ({ \ typeof(e) _e = fastpath(e); /* always eval 'e' */ \ if (!_e) { \ @@ -258,10 +382,19 @@ void _dispatch_log(const char *msg, ...); } \ _e; \ }) +#else +static inline long _dispatch_assume(long e, long line) { + if (!e) _dispatch_bug(line, e); + return e; +} +#define dispatch_assume(e) _dispatch_assume((long)(e), __LINE__) +#endif /* __GNUC__ */ + /* * A lot of API return zero upon success and not-zero on fail. Let's capture * and log the non-zero value */ +#if __GNUC__ #define dispatch_assume_zero(e) ({ \ typeof(e) _e = slowpath(e); /* always eval 'e' */ \ if (_e) { \ @@ -273,10 +406,18 @@ void _dispatch_log(const char *msg, ...); } \ _e; \ }) +#else +static inline long _dispatch_assume_zero(long e, long line) { + if (e) _dispatch_bug(line, e); + return e; +} +#define dispatch_assume_zero(e) _dispatch_assume_zero((long)(e), __LINE__) +#endif /* __GNUC__ */ /* * For reporting bugs in clients when using the "_debug" version of the library. */ +#if __GNUC__ #define dispatch_debug_assert(e, msg, args...) do { \ if (__builtin_constant_p(e)) { \ char __compile_time_assert__[(bool)(e) ? 1 : -1] DISPATCH_UNUSED; \ @@ -288,38 +429,56 @@ void _dispatch_log(const char *msg, ...); } \ } \ } while (0) +#else +#define dispatch_debug_assert(e, msg, args...) do { \ + long _e = (long)fastpath(e); /* always eval 'e' */ \ + if (DISPATCH_DEBUG && !_e) { \ + _dispatch_log("%s() 0x%lx: " msg, __FUNCTION__, _e, ##args); \ + abort(); \ + } \ +} while (0) +#endif /* __GNUC__ */ /* Make sure the debug statments don't get too stale */ -#define _dispatch_debug(x, args...) \ -({ \ +#define _dispatch_debug(x, args...) do { \ if (DISPATCH_DEBUG) { \ - _dispatch_log("libdispatch: %u\t%p\t" x, __LINE__, \ + _dispatch_log("%u\t%p\t" x, __LINE__, \ (void *)_dispatch_thread_self(), ##args); \ } \ -}) +} while (0) #if DISPATCH_DEBUG #if HAVE_MACH DISPATCH_NOINLINE DISPATCH_USED void dispatch_debug_machport(mach_port_t name, const char* str); #endif -DISPATCH_NOINLINE DISPATCH_USED -void dispatch_debug_kevents(struct kevent* kev, size_t count, const char* str); -#else -static inline void -dispatch_debug_kevents(struct kevent* kev DISPATCH_UNUSED, - size_t count DISPATCH_UNUSED, - const char* str DISPATCH_UNUSED) {} #endif +#if DISPATCH_DEBUG +/* This is the private version of the deprecated dispatch_debug() */ +DISPATCH_NONNULL2 DISPATCH_NOTHROW +__attribute__((__format__(printf,2,3))) +void +_dispatch_object_debug(dispatch_object_t object, const char *message, ...); +#else +#define _dispatch_object_debug(object, message, ...) +#endif // DISPATCH_DEBUG + #if DISPATCH_USE_CLIENT_CALLOUT DISPATCH_NOTHROW void _dispatch_client_callout(void *ctxt, dispatch_function_t f); DISPATCH_NOTHROW void _dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)); +DISPATCH_NOTHROW bool +_dispatch_client_callout3(void *ctxt, dispatch_data_t region, size_t offset, + const void *buffer, size_t size, dispatch_data_applier_function_t f); +DISPATCH_NOTHROW void +_dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, + dispatch_mach_msg_t dmsg, mach_error_t error, + dispatch_mach_handler_function_t f); -#else +#else // !DISPATCH_USE_CLIENT_CALLOUT DISPATCH_ALWAYS_INLINE static inline void @@ -335,36 +494,53 @@ _dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) return f(ctxt, i); } -#endif +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_client_callout3(void *ctxt, dispatch_data_t region, size_t offset, + const void *buffer, size_t size, dispatch_data_applier_function_t f) +{ + return f(ctxt, region, offset, buffer, size); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, + dispatch_mach_msg_t dmsg, mach_error_t error, + dispatch_mach_handler_function_t f); +{ + return f(ctxt, reason, dmsg, error); +} + +#endif // !DISPATCH_USE_CLIENT_CALLOUT #ifdef __BLOCKS__ +#define _dispatch_Block_invoke(bb) \ + ((dispatch_function_t)((struct Block_layout *)bb)->invoke) DISPATCH_ALWAYS_INLINE static inline void _dispatch_client_callout_block(dispatch_block_t b) { - struct Block_basic *bb = (void*)b; - return _dispatch_client_callout(b, (dispatch_function_t)bb->Block_invoke); + return _dispatch_client_callout(b, _dispatch_Block_invoke(b)); } +#if __GNUC__ dispatch_block_t _dispatch_Block_copy(dispatch_block_t block); #define _dispatch_Block_copy(x) ((typeof(x))_dispatch_Block_copy(x)) +#else +dispatch_block_t _dispatch_Block_copy(const void *block); +#endif + void _dispatch_call_block_and_release(void *block); #endif /* __BLOCKS__ */ -void dummy_function(void); -long dummy_function_r0(void); +void _dispatch_temporary_resource_shortage(void); +void *_dispatch_calloc(size_t num_items, size_t size); void _dispatch_vtable_init(void); - -void _dispatch_source_drain_kevent(struct kevent *); - -long _dispatch_update_kq(const struct kevent *); -void _dispatch_run_timers(void); -// Returns howsoon with updated time value, or NULL if no timers active. -struct timespec *_dispatch_get_next_timer_fire(struct timespec *howsoon); +char *_dispatch_get_build(void); uint64_t _dispatch_timeout(dispatch_time_t when); -extern bool _dispatch_safe_fork; +extern bool _dispatch_safe_fork, _dispatch_child_of_unsafe_fork; extern struct _dispatch_hw_config_s { uint32_t cc_max_active; @@ -372,10 +548,18 @@ extern struct _dispatch_hw_config_s { uint32_t cc_max_physical; } _dispatch_hw_config; +#if !defined(DISPATCH_USE_OS_SEMAPHORE_CACHE) && !(TARGET_IPHONE_SIMULATOR && \ + IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090) +#if __has_include() +#define DISPATCH_USE_OS_SEMAPHORE_CACHE 1 +#include +#endif +#endif + /* #includes dependent on internal.h */ #include "shims.h" -// SnowLeopard and iOS Simulator fallbacks +// Older Mac OS X and iOS Simulator fallbacks #if HAVE_PTHREAD_WORKQUEUES #ifndef WORKQ_BG_PRIOQUEUE @@ -384,12 +568,12 @@ extern struct _dispatch_hw_config_s { #ifndef WORKQ_ADDTHREADS_OPTION_OVERCOMMIT #define WORKQ_ADDTHREADS_OPTION_OVERCOMMIT 0x00000001 #endif -#if TARGET_IPHONE_SIMULATOR && __MAC_OS_X_VERSION_MIN_REQUIRED < 1070 +#if TARGET_IPHONE_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1070 #ifndef DISPATCH_NO_BG_PRIORITY #define DISPATCH_NO_BG_PRIORITY 1 #endif #endif -#if TARGET_IPHONE_SIMULATOR && __MAC_OS_X_VERSION_MIN_REQUIRED < 1080 +#if TARGET_IPHONE_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1080 #ifndef DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK #define DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK 1 #endif @@ -401,28 +585,60 @@ extern struct _dispatch_hw_config_s { #endif // HAVE_PTHREAD_WORKQUEUES #if HAVE_MACH -#if !defined(MACH_NOTIFY_SEND_POSSIBLE) || \ - (TARGET_IPHONE_SIMULATOR && __MAC_OS_X_VERSION_MIN_REQUIRED < 1070) +#if !defined(MACH_NOTIFY_SEND_POSSIBLE) || (TARGET_IPHONE_SIMULATOR && \ + IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1070) #undef MACH_NOTIFY_SEND_POSSIBLE #define MACH_NOTIFY_SEND_POSSIBLE MACH_NOTIFY_DEAD_NAME #endif #endif // HAVE_MACH #ifdef EVFILT_VM -#if TARGET_IPHONE_SIMULATOR && __MAC_OS_X_VERSION_MIN_REQUIRED < 1070 -#undef DISPATCH_USE_MALLOC_VM_PRESSURE_SOURCE -#define DISPATCH_USE_MALLOC_VM_PRESSURE_SOURCE 0 -#endif #ifndef DISPATCH_USE_VM_PRESSURE #define DISPATCH_USE_VM_PRESSURE 1 #endif -#ifndef DISPATCH_USE_MALLOC_VM_PRESSURE_SOURCE -#define DISPATCH_USE_MALLOC_VM_PRESSURE_SOURCE 1 -#endif #endif // EVFILT_VM +#ifdef EVFILT_MEMORYSTATUS +#ifndef DISPATCH_USE_MEMORYSTATUS +#define DISPATCH_USE_MEMORYSTATUS 1 +#endif +#endif // EVFILT_MEMORYSTATUS + +#if TARGET_IPHONE_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1070 +#undef DISPATCH_USE_VM_PRESSURE_SOURCE +#define DISPATCH_USE_VM_PRESSURE_SOURCE 0 +#endif // TARGET_IPHONE_SIMULATOR +#if TARGET_OS_EMBEDDED +#if !defined(DISPATCH_USE_VM_PRESSURE_SOURCE) && DISPATCH_USE_VM_PRESSURE +#define DISPATCH_USE_VM_PRESSURE_SOURCE 1 +#endif +#else // !TARGET_OS_EMBEDDED +#if !defined(DISPATCH_USE_MEMORYSTATUS_SOURCE) && DISPATCH_USE_MEMORYSTATUS +#define DISPATCH_USE_MEMORYSTATUS_SOURCE 1 +#elif !defined(DISPATCH_USE_VM_PRESSURE_SOURCE) && DISPATCH_USE_VM_PRESSURE +#define DISPATCH_USE_VM_PRESSURE_SOURCE 1 +#endif +#endif // TARGET_OS_EMBEDDED + +#if !defined(NOTE_LEEWAY) || (TARGET_IPHONE_SIMULATOR && \ + IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090) +#undef NOTE_LEEWAY +#define NOTE_LEEWAY 0 +#undef NOTE_CRITICAL +#define NOTE_CRITICAL 0 +#undef NOTE_BACKGROUND +#define NOTE_BACKGROUND 0 +#endif // NOTE_LEEWAY + +#if HAVE_DECL_NOTE_REAP +#if defined(NOTE_REAP) && defined(__APPLE__) +#undef NOTE_REAP +#define NOTE_REAP 0x10000000 // +#endif +#endif // HAVE_DECL_NOTE_REAP + #if defined(F_SETNOSIGPIPE) && defined(F_GETNOSIGPIPE) -#if TARGET_IPHONE_SIMULATOR && __MAC_OS_X_VERSION_MIN_REQUIRED < 1070 +#if TARGET_IPHONE_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1070 #undef DISPATCH_USE_SETNOSIGPIPE #define DISPATCH_USE_SETNOSIGPIPE 0 #endif @@ -432,6 +648,40 @@ extern struct _dispatch_hw_config_s { #endif // F_SETNOSIGPIPE +#if HAVE_LIBPROC_INTERNAL_H +#include +#include +#if TARGET_IPHONE_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090 +#undef DISPATCH_USE_IMPORTANCE_ASSERTION +#define DISPATCH_USE_IMPORTANCE_ASSERTION 0 +#endif +#if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED < 1090 +#undef DISPATCH_USE_IMPORTANCE_ASSERTION +#define DISPATCH_USE_IMPORTANCE_ASSERTION 0 +#endif +#ifndef DISPATCH_USE_IMPORTANCE_ASSERTION +#define DISPATCH_USE_IMPORTANCE_ASSERTION 1 +#endif +#endif // HAVE_LIBPROC_INTERNAL_H + +#if HAVE_SYS_GUARDED_H +#include +#if TARGET_IPHONE_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090 +#undef DISPATCH_USE_GUARDED_FD +#define DISPATCH_USE_GUARDED_FD 0 +#endif +#ifndef DISPATCH_USE_GUARDED_FD +#define DISPATCH_USE_GUARDED_FD 1 +#endif +// change_fdguard_np() requires GUARD_DUP +#if DISPATCH_USE_GUARDED_FD && RDAR_11814513 +#define DISPATCH_USE_GUARDED_FD_CHANGE_FDGUARD 1 +#endif +#endif // HAVE_SYS_GUARDED_H + + +#define _dispatch_hardware_crash() __builtin_trap() + #define _dispatch_set_crash_log_message(x) #if HAVE_MACH @@ -465,10 +715,13 @@ extern struct _dispatch_hw_config_s { /* #includes dependent on internal.h */ #include "object_internal.h" #include "semaphore_internal.h" +#include "introspection_internal.h" #include "queue_internal.h" #include "source_internal.h" #include "data_internal.h" +#if !TARGET_OS_WIN32 #include "io_internal.h" +#endif #include "trace.h" #endif /* __DISPATCH_INTERNAL__ */ diff --git a/src/introspection.c b/src/introspection.c new file mode 100644 index 000000000..5338f259a --- /dev/null +++ b/src/introspection.c @@ -0,0 +1,595 @@ +/* + * Copyright (c) 2012-2013 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +// Contains introspection routines that only exist in the version of the +// library with introspection support + +#if DISPATCH_INTROSPECTION + +#include "internal.h" +#include "introspection.h" +#include "introspection_private.h" + +typedef struct dispatch_introspection_thread_s { + void *dit_isa; + TAILQ_ENTRY(dispatch_introspection_thread_s) dit_list; + pthread_t thread; + dispatch_queue_t *queue; +} dispatch_introspection_thread_s; +typedef struct dispatch_introspection_thread_s *dispatch_introspection_thread_t; + +static TAILQ_HEAD(, dispatch_introspection_thread_s) + _dispatch_introspection_threads = + TAILQ_HEAD_INITIALIZER(_dispatch_introspection_threads); +static volatile OSSpinLock _dispatch_introspection_threads_lock; + +static void _dispatch_introspection_thread_remove(void *ctxt); + +static TAILQ_HEAD(, dispatch_queue_s) _dispatch_introspection_queues = + TAILQ_HEAD_INITIALIZER(_dispatch_introspection_queues); +static volatile OSSpinLock _dispatch_introspection_queues_lock; + +static ptrdiff_t _dispatch_introspection_thread_queue_offset; + +#pragma mark - +#pragma mark dispatch_introspection_init + +void +_dispatch_introspection_init(void) +{ + TAILQ_INSERT_TAIL(&_dispatch_introspection_queues, + &_dispatch_main_q, diq_list); + TAILQ_INSERT_TAIL(&_dispatch_introspection_queues, + &_dispatch_mgr_q, diq_list); +#if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES + TAILQ_INSERT_TAIL(&_dispatch_introspection_queues, + _dispatch_mgr_q.do_targetq, diq_list); +#endif + for (size_t i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { + TAILQ_INSERT_TAIL(&_dispatch_introspection_queues, + &_dispatch_root_queues[i], diq_list); + } + + // Hack to determine queue TSD offset from start of pthread structure + uintptr_t thread = _dispatch_thread_self(); + thread_identifier_info_data_t tiid; + mach_msg_type_number_t cnt = THREAD_IDENTIFIER_INFO_COUNT; + kern_return_t kr = thread_info(pthread_mach_thread_np((void*)thread), + THREAD_IDENTIFIER_INFO, (thread_info_t)&tiid, &cnt); + if (!dispatch_assume_zero(kr)) { + _dispatch_introspection_thread_queue_offset = + (void*)(uintptr_t)tiid.dispatch_qaddr - (void*)thread; + } + _dispatch_thread_key_create(&dispatch_introspection_key, + _dispatch_introspection_thread_remove); + _dispatch_introspection_thread_add(); // add main thread +} + +const struct dispatch_introspection_versions_s +dispatch_introspection_versions = { + .introspection_version = 1, + .hooks_version = 1, + .hooks_size = sizeof(dispatch_introspection_hooks_s), + .queue_item_version = 1, + .queue_item_size = sizeof(dispatch_introspection_queue_item_s), + .queue_block_version = 1, + .queue_block_size = sizeof(dispatch_introspection_queue_block_s), + .queue_function_version = 1, + .queue_function_size = sizeof(dispatch_introspection_queue_function_s), + .queue_thread_version = 1, + .queue_thread_size = sizeof(dispatch_introspection_queue_thread_s), + .object_version = 1, + .object_size = sizeof(dispatch_introspection_object_s), + .queue_version = 1, + .queue_size = sizeof(dispatch_introspection_queue_s), + .source_version = 1, + .source_size = sizeof(dispatch_introspection_source_s), +}; + +#pragma mark - +#pragma mark dispatch_introspection_threads + +void +_dispatch_introspection_thread_add(void) +{ + if (_dispatch_thread_getspecific(dispatch_introspection_key)) { + return; + } + uintptr_t thread = _dispatch_thread_self(); + dispatch_introspection_thread_t dit = (void*)_dispatch_continuation_alloc(); + dit->dit_isa = (void*)0x41; + dit->thread = (void*)thread; + dit->queue = !_dispatch_introspection_thread_queue_offset ? NULL : + (void*)thread + _dispatch_introspection_thread_queue_offset; + _dispatch_thread_setspecific(dispatch_introspection_key, dit); + OSSpinLockLock(&_dispatch_introspection_threads_lock); + TAILQ_INSERT_TAIL(&_dispatch_introspection_threads, dit, dit_list); + OSSpinLockUnlock(&_dispatch_introspection_threads_lock); +} + +static void +_dispatch_introspection_thread_remove(void *ctxt) +{ + dispatch_introspection_thread_t dit = ctxt; + OSSpinLockLock(&_dispatch_introspection_threads_lock); + TAILQ_REMOVE(&_dispatch_introspection_threads, dit, dit_list); + OSSpinLockUnlock(&_dispatch_introspection_threads_lock); + _dispatch_continuation_free((void*)dit); + _dispatch_thread_setspecific(dispatch_introspection_key, NULL); +} + +#pragma mark - +#pragma mark dispatch_introspection_info + +static inline +dispatch_introspection_queue_function_s +_dispatch_introspection_continuation_get_info(dispatch_queue_t dq, + dispatch_continuation_t dc, unsigned long *type) +{ + void *ctxt = dc->dc_ctxt; + dispatch_function_t func = dc->dc_func; + pthread_t waiter = NULL; + bool apply = false; + long flags = (long)dc->do_vtable; + if (flags & DISPATCH_OBJ_SYNC_SLOW_BIT) { + waiter = dc->dc_data; + if (flags & DISPATCH_OBJ_BARRIER_BIT) { + dc = dc->dc_ctxt; + dq = dc->dc_data; + } + ctxt = dc->dc_ctxt; + func = dc->dc_func; + } + if (func == _dispatch_sync_recurse_invoke) { + dc = dc->dc_ctxt; + dq = dc->dc_data; + ctxt = dc->dc_ctxt; + func = dc->dc_func; + } else if (func == _dispatch_async_redirect_invoke) { + dq = dc->dc_data; + dc = dc->dc_other; + ctxt = dc->dc_ctxt; + func = dc->dc_func; + flags = (long)dc->do_vtable; + } else if (func == _dispatch_mach_barrier_invoke) { + dq = dq->do_targetq; + ctxt = dc->dc_data; + func = dc->dc_other; + } else if (func == _dispatch_apply_invoke || + func == _dispatch_apply_redirect_invoke) { + dispatch_apply_t da = ctxt; + if (da->da_todo) { + dc = da->da_dc; + if (func == _dispatch_apply_redirect_invoke) { + dq = dc->dc_data; + } + ctxt = dc->dc_ctxt; + func = dc->dc_func; + apply = true; + } + } + if (func == _dispatch_call_block_and_release) { + *type = dispatch_introspection_queue_item_type_block; + func = _dispatch_Block_invoke(ctxt); + } else { + *type = dispatch_introspection_queue_item_type_function; + } + dispatch_introspection_queue_function_s diqf= { + .continuation = dc, + .target_queue = dq, + .context = ctxt, + .function = func, + .group = flags & DISPATCH_OBJ_GROUP_BIT ? dc->dc_data : NULL, + .waiter = waiter, + .barrier = flags & DISPATCH_OBJ_BARRIER_BIT, + .sync = flags & DISPATCH_OBJ_SYNC_SLOW_BIT, + .apply = apply, + }; + return diqf; +} + +static inline +dispatch_introspection_object_s +_dispatch_introspection_object_get_info(dispatch_object_t dou) +{ + dispatch_introspection_object_s dio = { + .object = dou._dc, + .target_queue = dou._do->do_targetq, + .type = (void*)dou._do->do_vtable, + .kind = dx_kind(dou._do), + }; + return dio; +} + +DISPATCH_USED inline +dispatch_introspection_queue_s +dispatch_introspection_queue_get_info(dispatch_queue_t dq) +{ + bool global = (dq->do_xref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || + (dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT); + uint32_t width = dq->dq_width; + if (width > 1 && width != UINT32_MAX) width /= 2; + dispatch_introspection_queue_s diq = { + .queue = dq, + .target_queue = dq->do_targetq, + .label = dq->dq_label, + .serialnum = dq->dq_serialnum, + .width = width, + .suspend_count = dq->do_suspend_cnt / 2, + .enqueued = (dq->do_suspend_cnt & 1) && !global, + .barrier = (dq->dq_running & 1) && !global, + .draining = (dq->dq_items_head == (void*)~0ul) || + (!dq->dq_items_head && dq->dq_items_tail), + .global = global, + .main = (dq == &_dispatch_main_q), + }; + return diq; +} + +static inline +dispatch_introspection_source_s +_dispatch_introspection_source_get_info(dispatch_source_t ds) +{ + dispatch_source_refs_t dr = ds->ds_refs; + void *ctxt = dr->ds_handler_ctxt; + dispatch_function_t handler = dr->ds_handler_func; + bool handler_is_block = ds->ds_handler_is_block; + bool after = (handler == _dispatch_after_timer_callback); + if (after) { + dispatch_continuation_t dc = ctxt; + ctxt = dc->dc_ctxt; + handler = dc->dc_func; + if (handler == _dispatch_call_block_and_release) { + handler = _dispatch_Block_invoke(ctxt); + handler_is_block = 1; + } + } + dispatch_introspection_source_s dis = { + .source = ds, + .target_queue = ds->do_targetq, + .type = ds->ds_dkev ? (unsigned long)ds->ds_dkev->dk_kevent.filter : 0, + .handle = ds->ds_dkev ? (unsigned long)ds->ds_dkev->dk_kevent.ident : 0, + .context = ctxt, + .handler = handler, + .suspend_count = ds->do_suspend_cnt / 2, + .enqueued = (ds->do_suspend_cnt & 1), + .handler_is_block = handler_is_block, + .timer = ds->ds_is_timer, + .after = after, + }; + return dis; +} + +static inline +dispatch_introspection_queue_thread_s +_dispatch_introspection_thread_get_info(dispatch_introspection_thread_t dit) +{ + dispatch_introspection_queue_thread_s diqt = { + .object = (void*)dit, + .thread = dit->thread, + }; + if (dit->queue && *dit->queue) { + diqt.queue = dispatch_introspection_queue_get_info(*dit->queue); + } + return diqt; +} + +DISPATCH_USED inline +dispatch_introspection_queue_item_s +dispatch_introspection_queue_item_get_info(dispatch_queue_t dq, + dispatch_continuation_t dc) +{ + dispatch_introspection_queue_item_s diqi; + if (DISPATCH_OBJ_IS_VTABLE(dc)) { + dispatch_object_t dou = (dispatch_object_t)dc; + unsigned long type = dx_type(dou._do); + unsigned long metatype = type & _DISPATCH_META_TYPE_MASK; + if (metatype == _DISPATCH_QUEUE_TYPE && + type != DISPATCH_QUEUE_SPECIFIC_TYPE) { + diqi.type = dispatch_introspection_queue_item_type_queue; + diqi.queue = dispatch_introspection_queue_get_info(dou._dq); + } else if (metatype == _DISPATCH_SOURCE_TYPE) { + diqi.type = dispatch_introspection_queue_item_type_source; + diqi.source = _dispatch_introspection_source_get_info(dou._ds); + } else { + diqi.type = dispatch_introspection_queue_item_type_object; + diqi.object = _dispatch_introspection_object_get_info(dou._do); + } + } else { + diqi.function = _dispatch_introspection_continuation_get_info(dq, dc, + &diqi.type); + } + return diqi; +} + +#pragma mark - +#pragma mark dispatch_introspection_iterators + +DISPATCH_USED +dispatch_queue_t +dispatch_introspection_get_queues(dispatch_queue_t start, size_t count, + dispatch_introspection_queue_t queues) +{ + dispatch_queue_t next; + next = start ? start : TAILQ_FIRST(&_dispatch_introspection_queues); + while (count--) { + if (!next) { + queues->queue = NULL; + break; + } + *queues++ = dispatch_introspection_queue_get_info(next); + next = TAILQ_NEXT(next, diq_list); + } + return next; +} + +DISPATCH_USED +dispatch_continuation_t +dispatch_introspection_get_queue_threads(dispatch_continuation_t start, + size_t count, dispatch_introspection_queue_thread_t threads) +{ + dispatch_introspection_thread_t next = start ? (void*)start : + TAILQ_FIRST(&_dispatch_introspection_threads); + while (count--) { + if (!next) { + threads->object = NULL; + break; + } + *threads++ = _dispatch_introspection_thread_get_info(next); + next = TAILQ_NEXT(next, dit_list); + } + return (void*)next; +} + +DISPATCH_USED +dispatch_continuation_t +dispatch_introspection_queue_get_items(dispatch_queue_t dq, + dispatch_continuation_t start, size_t count, + dispatch_introspection_queue_item_t items) +{ + dispatch_continuation_t next = start ? start : + dq->dq_items_head == (void*)~0ul ? NULL : (void*)dq->dq_items_head; + while (count--) { + if (!next) { + items->type = dispatch_introspection_queue_item_type_none; + break; + } + *items++ = dispatch_introspection_queue_item_get_info(dq, next); + next = next->do_next; + } + return next; +} + +#pragma mark - +#pragma mark dispatch_introspection_hooks + +#define DISPATCH_INTROSPECTION_NO_HOOK ((void*)~0ul) + +dispatch_introspection_hooks_s _dispatch_introspection_hooks; +dispatch_introspection_hooks_s _dispatch_introspection_hook_callouts; +static const +dispatch_introspection_hooks_s _dispatch_introspection_hook_callouts_enabled = { + .queue_create = DISPATCH_INTROSPECTION_NO_HOOK, + .queue_dispose = DISPATCH_INTROSPECTION_NO_HOOK, + .queue_item_enqueue = DISPATCH_INTROSPECTION_NO_HOOK, + .queue_item_dequeue = DISPATCH_INTROSPECTION_NO_HOOK, +}; + +#define DISPATCH_INTROSPECTION_HOOKS_COUNT (( \ + sizeof(_dispatch_introspection_hook_callouts_enabled) - \ + sizeof(_dispatch_introspection_hook_callouts_enabled._reserved)) / \ + sizeof(dispatch_function_t)) + +#define DISPATCH_INTROSPECTION_HOOK_ENABLED(h) \ + (slowpath(_dispatch_introspection_hooks.h)) + +#define DISPATCH_INTROSPECTION_HOOK_CALLOUT(h, ...) ({ \ + typeof(_dispatch_introspection_hooks.h) _h; \ + _h = _dispatch_introspection_hooks.h; \ + if (slowpath((void*)(_h) != DISPATCH_INTROSPECTION_NO_HOOK)) { \ + _h(__VA_ARGS__); \ + } }) + +#define DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(h) \ + DISPATCH_EXPORT void _dispatch_introspection_hook_##h(void) \ + asm("_dispatch_introspection_hook_" #h); \ + void _dispatch_introspection_hook_##h(void) {} + +#define DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(h, ...)\ + dispatch_introspection_hook_##h(__VA_ARGS__) + +DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_create); +DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_destroy); +DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_item_enqueue); +DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_item_dequeue); +DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_callout_begin); +DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_callout_end); + +DISPATCH_USED +void +dispatch_introspection_hooks_install(dispatch_introspection_hooks_t hooks) +{ + dispatch_introspection_hooks_s old_hooks = _dispatch_introspection_hooks; + _dispatch_introspection_hooks = *hooks; + dispatch_function_t *e = (void*)&_dispatch_introspection_hook_callouts, + *h = (void*)&_dispatch_introspection_hooks, *oh = (void*)&old_hooks; + for (size_t i = 0; i < DISPATCH_INTROSPECTION_HOOKS_COUNT; i++) { + if (!h[i] && e[i]) { + h[i] = DISPATCH_INTROSPECTION_NO_HOOK; + } + if (oh[i] == DISPATCH_INTROSPECTION_NO_HOOK) { + oh[i] = NULL; + } + } + *hooks = old_hooks; +} + +DISPATCH_USED +void +dispatch_introspection_hook_callouts_enable( + dispatch_introspection_hooks_t enable) +{ + _dispatch_introspection_hook_callouts = enable ? *enable : + _dispatch_introspection_hook_callouts_enabled; + dispatch_function_t *e = (void*)&_dispatch_introspection_hook_callouts, + *h = (void*)&_dispatch_introspection_hooks; + for (size_t i = 0; i < DISPATCH_INTROSPECTION_HOOKS_COUNT; i++) { + if (e[i] && !h[i]) { + h[i] = DISPATCH_INTROSPECTION_NO_HOOK; + } else if (!e[i] && h[i] == DISPATCH_INTROSPECTION_NO_HOOK) { + h[i] = NULL; + } + } +} + +DISPATCH_NOINLINE +void +dispatch_introspection_hook_callout_queue_create( + dispatch_introspection_queue_t queue_info) +{ + DISPATCH_INTROSPECTION_HOOK_CALLOUT(queue_create, queue_info); +} + +DISPATCH_NOINLINE +static void +_dispatch_introspection_queue_create_hook(dispatch_queue_t dq) +{ + dispatch_introspection_queue_s diq; + diq = dispatch_introspection_queue_get_info(dq); + dispatch_introspection_hook_callout_queue_create(&diq); +} + +dispatch_queue_t +_dispatch_introspection_queue_create(dispatch_queue_t dq) +{ + OSSpinLockLock(&_dispatch_introspection_queues_lock); + TAILQ_INSERT_TAIL(&_dispatch_introspection_queues, dq, diq_list); + OSSpinLockUnlock(&_dispatch_introspection_queues_lock); + + DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(queue_create, dq); + if (DISPATCH_INTROSPECTION_HOOK_ENABLED(queue_create)) { + _dispatch_introspection_queue_create_hook(dq); + } + return dq; +} + +DISPATCH_NOINLINE +void +dispatch_introspection_hook_callout_queue_dispose( + dispatch_introspection_queue_t queue_info) +{ + DISPATCH_INTROSPECTION_HOOK_CALLOUT(queue_dispose, queue_info); +} + +DISPATCH_NOINLINE +static void +_dispatch_introspection_queue_dispose_hook(dispatch_queue_t dq) +{ + dispatch_introspection_queue_s diq; + diq = dispatch_introspection_queue_get_info(dq); + dispatch_introspection_hook_callout_queue_dispose(&diq); +} + +void +_dispatch_introspection_queue_dispose(dispatch_queue_t dq) +{ + DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(queue_destroy, dq); + if (DISPATCH_INTROSPECTION_HOOK_ENABLED(queue_dispose)) { + _dispatch_introspection_queue_dispose_hook(dq); + } + + OSSpinLockLock(&_dispatch_introspection_queues_lock); + TAILQ_REMOVE(&_dispatch_introspection_queues, dq, diq_list); + OSSpinLockUnlock(&_dispatch_introspection_queues_lock); +} + +DISPATCH_NOINLINE +void +dispatch_introspection_hook_callout_queue_item_enqueue(dispatch_queue_t queue, + dispatch_introspection_queue_item_t item) +{ + DISPATCH_INTROSPECTION_HOOK_CALLOUT(queue_item_enqueue, queue, item); +} + +DISPATCH_NOINLINE +static void +_dispatch_introspection_queue_item_enqueue_hook(dispatch_queue_t dq, + dispatch_object_t dou) +{ + dispatch_introspection_queue_item_s diqi; + diqi = dispatch_introspection_queue_item_get_info(dq, dou._dc); + dispatch_introspection_hook_callout_queue_item_enqueue(dq, &diqi); +} + +void +_dispatch_introspection_queue_item_enqueue(dispatch_queue_t dq, + dispatch_object_t dou) +{ + DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT( + queue_item_enqueue, dq, dou); + if (DISPATCH_INTROSPECTION_HOOK_ENABLED(queue_item_enqueue)) { + _dispatch_introspection_queue_item_enqueue_hook(dq, dou); + } +} + +DISPATCH_NOINLINE +void +dispatch_introspection_hook_callout_queue_item_dequeue(dispatch_queue_t queue, + dispatch_introspection_queue_item_t item) +{ + DISPATCH_INTROSPECTION_HOOK_CALLOUT(queue_item_dequeue, queue, item); +} + +DISPATCH_NOINLINE +static void +_dispatch_introspection_queue_item_dequeue_hook(dispatch_queue_t dq, + dispatch_object_t dou) +{ + dispatch_introspection_queue_item_s diqi; + diqi = dispatch_introspection_queue_item_get_info(dq, dou._dc); + dispatch_introspection_hook_callout_queue_item_enqueue(dq, &diqi); +} + +void +_dispatch_introspection_queue_item_dequeue(dispatch_queue_t dq, + dispatch_object_t dou) +{ + DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT( + queue_item_dequeue, dq, dou); + if (DISPATCH_INTROSPECTION_HOOK_ENABLED(queue_item_dequeue)) { + _dispatch_introspection_queue_item_dequeue_hook(dq, dou); + } +} + +void +_dispatch_introspection_callout_entry(void *ctxt, dispatch_function_t f) { + dispatch_queue_t dq = _dispatch_queue_get_current(); + DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT( + queue_callout_begin, dq, ctxt, f); +} + +void +_dispatch_introspection_callout_return(void *ctxt, dispatch_function_t f) { + dispatch_queue_t dq = _dispatch_queue_get_current(); + DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT( + queue_callout_end, dq, ctxt, f); +} + +#endif // DISPATCH_INTROSPECTION diff --git a/src/introspection_internal.h b/src/introspection_internal.h new file mode 100644 index 000000000..89a9360c0 --- /dev/null +++ b/src/introspection_internal.h @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2010-2013 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_INTROSPECTION_INTERNAL__ +#define __DISPATCH_INTROSPECTION_INTERNAL__ + +#if DISPATCH_INTROSPECTION + +#define DISPATCH_INTROSPECTION_QUEUE_LIST \ + TAILQ_ENTRY(dispatch_queue_s) diq_list +#define DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE \ + sizeof(TAILQ_ENTRY(dispatch_queue_s)) + +void _dispatch_introspection_init(void); +void _dispatch_introspection_thread_add(void); +dispatch_queue_t _dispatch_introspection_queue_create(dispatch_queue_t dq); +void _dispatch_introspection_queue_dispose(dispatch_queue_t dq); +void _dispatch_introspection_queue_item_enqueue(dispatch_queue_t dq, + dispatch_object_t dou); +void _dispatch_introspection_queue_item_dequeue(dispatch_queue_t dq, + dispatch_object_t dou); +void _dispatch_introspection_callout_entry(void *ctxt, dispatch_function_t f); +void _dispatch_introspection_callout_return(void *ctxt, dispatch_function_t f); + +#if !__OBJC2__ + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_introspection_queue_push_list(dispatch_queue_t dq, + dispatch_object_t head, dispatch_object_t tail) { + struct dispatch_object_s *dou = head._do; + do { + _dispatch_introspection_queue_item_enqueue(dq, dou); + } while (dou != tail._do && (dou = dou->do_next)); +}; + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_introspection_queue_push(dispatch_queue_t dq, dispatch_object_t dou) { + _dispatch_introspection_queue_item_enqueue(dq, dou); +}; + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_introspection_queue_pop(dispatch_queue_t dq, dispatch_object_t dou) { + _dispatch_introspection_queue_item_dequeue(dq, dou); +}; + +#endif + +#else + +#define DISPATCH_INTROSPECTION_QUEUE_LIST +#define DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE 0 + +#define _dispatch_introspection_init() +#define _dispatch_introspection_thread_add() +#define _dispatch_introspection_thread_remove() + +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_t +_dispatch_introspection_queue_create(dispatch_queue_t dq) { return dq; } + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_introspection_queue_dispose(dispatch_queue_t dq) { (void)dq; } + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_introspection_queue_push_list(dispatch_queue_t dq DISPATCH_UNUSED, + dispatch_object_t head DISPATCH_UNUSED, + dispatch_object_t tail DISPATCH_UNUSED) {} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_introspection_queue_push(dispatch_queue_t dq DISPATCH_UNUSED, + dispatch_object_t dou DISPATCH_UNUSED) {} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_introspection_queue_pop(dispatch_queue_t dq DISPATCH_UNUSED, + dispatch_object_t dou DISPATCH_UNUSED) {} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_introspection_callout_entry(void *ctxt DISPATCH_UNUSED, + dispatch_function_t f DISPATCH_UNUSED) {} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_introspection_callout_return(void *ctxt DISPATCH_UNUSED, + dispatch_function_t f DISPATCH_UNUSED) {} + +#endif // DISPATCH_INTROSPECTION + +#endif // __DISPATCH_INTROSPECTION_INTERNAL__ diff --git a/src/io.c b/src/io.c index 4e3601518..48683a666 100644 --- a/src/io.c +++ b/src/io.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2011 Apple Inc. All rights reserved. + * Copyright (c) 2009-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -20,6 +20,25 @@ #include "internal.h" +#ifndef DISPATCH_IO_DEBUG +#define DISPATCH_IO_DEBUG DISPATCH_DEBUG +#endif + +#if DISPATCH_IO_DEBUG +#define _dispatch_fd_debug(msg, fd, args...) \ + _dispatch_debug("fd[0x%x]: " msg, (fd), ##args) +#else +#define _dispatch_fd_debug(msg, fd, args...) +#endif + +#if USE_OBJC +#define _dispatch_io_data_retain(x) _dispatch_objc_retain(x) +#define _dispatch_io_data_release(x) _dispatch_objc_release(x) +#else +#define _dispatch_io_data_retain(x) dispatch_retain(x) +#define _dispatch_io_data_release(x) dispatch_release(x) +#endif + typedef void (^dispatch_fd_entry_init_callback_t)(dispatch_fd_entry_t fd_entry); DISPATCH_EXPORT DISPATCH_NOTHROW @@ -59,6 +78,7 @@ static void _dispatch_stream_cleanup_operations(dispatch_stream_t stream, static void _dispatch_disk_cleanup_operations(dispatch_disk_t disk, dispatch_io_t channel); static void _dispatch_stream_source_handler(void *ctx); +static void _dispatch_stream_queue_handler(void *ctx); static void _dispatch_stream_handler(void *ctx); static void _dispatch_disk_handler(void *ctx); static void _dispatch_disk_perform(void *ctxt); @@ -74,7 +94,8 @@ static void _dispatch_operation_deliver_data(dispatch_operation_t op, case EINTR: continue; \ __VA_ARGS__ \ } \ - } while (0) + break; \ + } while (1) #define _dispatch_io_syscall_switch(__err, __syscall, ...) do { \ _dispatch_io_syscall_switch_noerr(__err, __syscall, \ case 0: break; \ @@ -95,7 +116,8 @@ enum { DISPATCH_OP_FD_ERR, }; -#define _dispatch_io_Block_copy(x) ((typeof(x))_dispatch_Block_copy((dispatch_block_t)(x))) +#define _dispatch_io_Block_copy(x) \ + ((typeof(x))_dispatch_Block_copy((dispatch_block_t)(x))) #pragma mark - #pragma mark dispatch_io_hashtables @@ -105,7 +127,7 @@ enum { #else #define DIO_HASH_SIZE 256u // must be a power of two #endif -#define DIO_HASH(x) ((uintptr_t)((x) & (DIO_HASH_SIZE - 1))) +#define DIO_HASH(x) ((uintptr_t)(x) & (DIO_HASH_SIZE - 1)) // Global hashtable of dev_t -> disk_s mappings DISPATCH_CACHELINE_ALIGN @@ -209,7 +231,7 @@ _dispatch_io_init(dispatch_io_t channel, dispatch_fd_entry_t fd_entry, _dispatch_retain(queue); dispatch_async(!err ? fd_entry->close_queue : channel->queue, ^{ dispatch_async(queue, ^{ - _dispatch_io_debug("cleanup handler invoke", -1); + _dispatch_fd_debug("cleanup handler invoke", -1); cleanup_handler(err); }); _dispatch_release(queue); @@ -233,7 +255,9 @@ _dispatch_io_init(dispatch_io_t channel, dispatch_fd_entry_t fd_entry, void _dispatch_io_dispose(dispatch_io_t channel) { - if (channel->fd_entry && !(channel->atomic_flags & (DIO_CLOSED|DIO_STOPPED))) { + _dispatch_object_debug(channel, "%s", __func__); + if (channel->fd_entry && + !(channel->atomic_flags & (DIO_CLOSED|DIO_STOPPED))) { if (channel->fd_entry->path_data) { // This modification is safe since path_data->channel is checked // only on close_queue (which is still suspended at this point) @@ -298,7 +322,7 @@ dispatch_io_create(dispatch_io_type_t type, dispatch_fd_t fd, if (type != DISPATCH_IO_STREAM && type != DISPATCH_IO_RANDOM) { return NULL; } - _dispatch_io_debug("io create", fd); + _dispatch_fd_debug("io create", fd); dispatch_io_t channel = _dispatch_io_create(type); channel->fd = fd; channel->fd_actual = fd; @@ -323,12 +347,23 @@ dispatch_io_create(dispatch_io_type_t type, dispatch_fd_t fd, _dispatch_fd_entry_retain(fd_entry); _dispatch_io_init(channel, fd_entry, queue, err, cleanup_handler); dispatch_resume(channel->queue); + _dispatch_object_debug(channel, "%s", __func__); _dispatch_release(channel); _dispatch_release(queue); }); + _dispatch_object_debug(channel, "%s", __func__); return channel; } +dispatch_io_t +dispatch_io_create_f(dispatch_io_type_t type, dispatch_fd_t fd, + dispatch_queue_t queue, void *context, + void (*cleanup_handler)(void *context, int error)) +{ + return dispatch_io_create(type, fd, queue, !cleanup_handler ? NULL : + ^(int error){ cleanup_handler(context, error); }); +} + dispatch_io_t dispatch_io_create_with_path(dispatch_io_type_t type, const char *path, int oflag, mode_t mode, dispatch_queue_t queue, @@ -343,7 +378,7 @@ dispatch_io_create_with_path(dispatch_io_type_t type, const char *path, if (!path_data) { return NULL; } - _dispatch_io_debug("io create with path %s", -1, path); + _dispatch_fd_debug("io create with path %s", -1, path); dispatch_io_t channel = _dispatch_io_create(type); channel->fd = -1; channel->fd_actual = -1; @@ -402,13 +437,25 @@ dispatch_io_create_with_path(dispatch_io_type_t type, const char *path, path_data, st.st_dev, st.st_mode); _dispatch_io_init(channel, fd_entry, queue, 0, cleanup_handler); dispatch_resume(channel->queue); + _dispatch_object_debug(channel, "%s", __func__); _dispatch_release(channel); _dispatch_release(queue); }); }); + _dispatch_object_debug(channel, "%s", __func__); return channel; } +dispatch_io_t +dispatch_io_create_with_path_f(dispatch_io_type_t type, const char *path, + int oflag, mode_t mode, dispatch_queue_t queue, void *context, + void (*cleanup_handler)(void *context, int error)) +{ + return dispatch_io_create_with_path(type, path, oflag, mode, queue, + !cleanup_handler ? NULL : + ^(int error){ cleanup_handler(context, error); }); +} + dispatch_io_t dispatch_io_create_with_io(dispatch_io_type_t type, dispatch_io_t in_channel, dispatch_queue_t queue, void (^cleanup_handler)(int error)) @@ -416,7 +463,7 @@ dispatch_io_create_with_io(dispatch_io_type_t type, dispatch_io_t in_channel, if (type != DISPATCH_IO_STREAM && type != DISPATCH_IO_RANDOM) { return NULL; } - _dispatch_io_debug("io create with io %p", -1, in_channel); + _dispatch_fd_debug("io create with io %p", -1, in_channel); dispatch_io_t channel = _dispatch_io_create(type); dispatch_suspend(channel->queue); _dispatch_retain(queue); @@ -499,11 +546,23 @@ dispatch_io_create_with_io(dispatch_io_type_t type, dispatch_io_t in_channel, _dispatch_release(queue); } _dispatch_release(in_channel); + _dispatch_object_debug(channel, "%s", __func__); }); }); + _dispatch_object_debug(channel, "%s", __func__); return channel; } +dispatch_io_t +dispatch_io_create_with_io_f(dispatch_io_type_t type, dispatch_io_t in_channel, + dispatch_queue_t queue, void *context, + void (*cleanup_handler)(void *context, int error)) +{ + return dispatch_io_create_with_io(type, in_channel, queue, + !cleanup_handler ? NULL : + ^(int error){ cleanup_handler(context, error); }); +} + #pragma mark - #pragma mark dispatch_io_accessors @@ -512,7 +571,7 @@ dispatch_io_set_high_water(dispatch_io_t channel, size_t high_water) { _dispatch_retain(channel); dispatch_async(channel->queue, ^{ - _dispatch_io_debug("io set high water", channel->fd); + _dispatch_fd_debug("io set high water", channel->fd); if (channel->params.low > high_water) { channel->params.low = high_water; } @@ -526,7 +585,7 @@ dispatch_io_set_low_water(dispatch_io_t channel, size_t low_water) { _dispatch_retain(channel); dispatch_async(channel->queue, ^{ - _dispatch_io_debug("io set low water", channel->fd); + _dispatch_fd_debug("io set low water", channel->fd); if (channel->params.high < low_water) { channel->params.high = low_water ? low_water : 1; } @@ -541,8 +600,8 @@ dispatch_io_set_interval(dispatch_io_t channel, uint64_t interval, { _dispatch_retain(channel); dispatch_async(channel->queue, ^{ - _dispatch_io_debug("io set interval", channel->fd); - channel->params.interval = interval; + _dispatch_fd_debug("io set interval", channel->fd); + channel->params.interval = interval < INT64_MAX ? interval : INT64_MAX; channel->params.interval_flags = flags; _dispatch_release(channel); }); @@ -557,6 +616,7 @@ _dispatch_io_set_target_queue(dispatch_io_t channel, dispatch_queue_t dq) dispatch_queue_t prev_dq = channel->do_targetq; channel->do_targetq = dq; _dispatch_release(prev_dq); + _dispatch_object_debug(channel, "%s", __func__); _dispatch_release(channel); }); } @@ -568,8 +628,8 @@ dispatch_io_get_descriptor(dispatch_io_t channel) return -1; } dispatch_fd_t fd = channel->fd_actual; - if (fd == -1 && - _dispatch_thread_getspecific(dispatch_io_key) == channel) { + if (fd == -1 && _dispatch_thread_getspecific(dispatch_io_key) == channel && + !_dispatch_io_get_error(NULL, channel, false)) { dispatch_fd_entry_t fd_entry = channel->fd_entry; (void)_dispatch_fd_entry_open(fd_entry, channel); } @@ -582,14 +642,15 @@ dispatch_io_get_descriptor(dispatch_io_t channel) static void _dispatch_io_stop(dispatch_io_t channel) { - _dispatch_io_debug("io stop", channel->fd); - (void)dispatch_atomic_or2o(channel, atomic_flags, DIO_STOPPED); + _dispatch_fd_debug("io stop", channel->fd); + (void)dispatch_atomic_or2o(channel, atomic_flags, DIO_STOPPED, relaxed); _dispatch_retain(channel); dispatch_async(channel->queue, ^{ dispatch_async(channel->barrier_queue, ^{ + _dispatch_object_debug(channel, "%s", __func__); dispatch_fd_entry_t fd_entry = channel->fd_entry; if (fd_entry) { - _dispatch_io_debug("io stop cleanup", channel->fd); + _dispatch_fd_debug("io stop cleanup", channel->fd); _dispatch_fd_entry_cleanup_operations(fd_entry, channel); if (!(channel->atomic_flags & DIO_CLOSED)) { channel->fd_entry = NULL; @@ -599,7 +660,8 @@ _dispatch_io_stop(dispatch_io_t channel) // Stop after close, need to check if fd_entry still exists _dispatch_retain(channel); dispatch_async(_dispatch_io_fds_lockq, ^{ - _dispatch_io_debug("io stop after close cleanup", + _dispatch_object_debug(channel, "%s", __func__); + _dispatch_fd_debug("io stop after close cleanup", channel->fd); dispatch_fd_entry_t fdi; uintptr_t hash = DIO_HASH(channel->fd); @@ -634,14 +696,18 @@ dispatch_io_close(dispatch_io_t channel, unsigned long flags) _dispatch_retain(channel); dispatch_async(channel->queue, ^{ dispatch_async(channel->barrier_queue, ^{ - _dispatch_io_debug("io close", channel->fd); + _dispatch_object_debug(channel, "%s", __func__); + _dispatch_fd_debug("io close", channel->fd); if (!(channel->atomic_flags & (DIO_CLOSED|DIO_STOPPED))) { - (void)dispatch_atomic_or2o(channel, atomic_flags, DIO_CLOSED); + (void)dispatch_atomic_or2o(channel, atomic_flags, DIO_CLOSED, + relaxed); dispatch_fd_entry_t fd_entry = channel->fd_entry; - if (!fd_entry->path_data) { - channel->fd_entry = NULL; + if (fd_entry) { + if (!fd_entry->path_data) { + channel->fd_entry = NULL; + } + _dispatch_fd_entry_release(fd_entry); } - _dispatch_fd_entry_release(fd_entry); } _dispatch_release(channel); }); @@ -659,6 +725,7 @@ dispatch_io_barrier(dispatch_io_t channel, dispatch_block_t barrier) dispatch_async(barrier_queue, ^{ dispatch_suspend(barrier_queue); dispatch_group_notify(barrier_group, io_q, ^{ + _dispatch_object_debug(channel, "%s", __func__); _dispatch_thread_setspecific(dispatch_io_key, channel); barrier(); _dispatch_thread_setspecific(dispatch_io_key, NULL); @@ -669,6 +736,13 @@ dispatch_io_barrier(dispatch_io_t channel, dispatch_block_t barrier) }); } +void +dispatch_io_barrier_f(dispatch_io_t channel, void *context, + dispatch_function_t barrier) +{ + return dispatch_io_barrier(channel, ^{ barrier(context); }); +} + void dispatch_io_read(dispatch_io_t channel, off_t offset, size_t length, dispatch_queue_t queue, dispatch_io_handler_t handler) @@ -691,6 +765,17 @@ dispatch_io_read(dispatch_io_t channel, off_t offset, size_t length, }); } +void +dispatch_io_read_f(dispatch_io_t channel, off_t offset, size_t length, + dispatch_queue_t queue, void *context, + dispatch_io_handler_function_t handler) +{ + return dispatch_io_read(channel, offset, length, queue, + ^(bool done, dispatch_data_t d, int error){ + handler(context, done, d, error); + }); +} + void dispatch_io_write(dispatch_io_t channel, off_t offset, dispatch_data_t data, dispatch_queue_t queue, dispatch_io_handler_t handler) @@ -716,6 +801,17 @@ dispatch_io_write(dispatch_io_t channel, off_t offset, dispatch_data_t data, }); } +void +dispatch_io_write_f(dispatch_io_t channel, off_t offset, dispatch_data_t data, + dispatch_queue_t queue, void *context, + dispatch_io_handler_function_t handler) +{ + return dispatch_io_write(channel, offset, data, queue, + ^(bool done, dispatch_data_t d, int error){ + handler(context, done, d, error); + }); +} + void dispatch_read(dispatch_fd_t fd, size_t length, dispatch_queue_t queue, void (^handler)(dispatch_data_t, int)) @@ -726,7 +822,7 @@ dispatch_read(dispatch_fd_t fd, size_t length, dispatch_queue_t queue, if (fd_entry->err) { int err = fd_entry->err; dispatch_async(queue, ^{ - _dispatch_io_debug("convenience handler invoke", fd); + _dispatch_fd_debug("convenience handler invoke", fd); handler(dispatch_data_empty, err); }); _dispatch_release(queue); @@ -749,7 +845,7 @@ dispatch_read(dispatch_fd_t fd, size_t length, dispatch_queue_t queue, __block int err = 0; dispatch_async(fd_entry->close_queue, ^{ dispatch_async(queue, ^{ - _dispatch_io_debug("convenience handler invoke", fd); + _dispatch_fd_debug("convenience handler invoke", fd); handler(deliver_data, err); _dispatch_io_data_release(deliver_data); }); @@ -775,6 +871,15 @@ dispatch_read(dispatch_fd_t fd, size_t length, dispatch_queue_t queue, }); } +void +dispatch_read_f(dispatch_fd_t fd, size_t length, dispatch_queue_t queue, + void *context, void (*handler)(void *, dispatch_data_t, int)) +{ + return dispatch_read(fd, length, queue, ^(dispatch_data_t d, int error){ + handler(context, d, error); + }); +} + void dispatch_write(dispatch_fd_t fd, dispatch_data_t data, dispatch_queue_t queue, void (^handler)(dispatch_data_t, int)) @@ -786,7 +891,7 @@ dispatch_write(dispatch_fd_t fd, dispatch_data_t data, dispatch_queue_t queue, if (fd_entry->err) { int err = fd_entry->err; dispatch_async(queue, ^{ - _dispatch_io_debug("convenience handler invoke", fd); + _dispatch_fd_debug("convenience handler invoke", fd); handler(NULL, err); }); _dispatch_release(queue); @@ -809,7 +914,7 @@ dispatch_write(dispatch_fd_t fd, dispatch_data_t data, dispatch_queue_t queue, __block int err = 0; dispatch_async(fd_entry->close_queue, ^{ dispatch_async(queue, ^{ - _dispatch_io_debug("convenience handler invoke", fd); + _dispatch_fd_debug("convenience handler invoke", fd); handler(deliver_data, err); if (deliver_data) { _dispatch_io_data_release(deliver_data); @@ -837,6 +942,15 @@ dispatch_write(dispatch_fd_t fd, dispatch_data_t data, dispatch_queue_t queue, }); } +void +dispatch_write_f(dispatch_fd_t fd, dispatch_data_t data, dispatch_queue_t queue, + void *context, void (*handler)(void *, dispatch_data_t, int)) +{ + return dispatch_write(fd, data, queue, ^(dispatch_data_t d, int error){ + handler(context, d, error); + }); +} + #pragma mark - #pragma mark dispatch_operation_t @@ -848,7 +962,7 @@ _dispatch_operation_create(dispatch_op_direction_t direction, { // On channel queue dispatch_assert(direction < DOP_DIR_MAX); - _dispatch_io_debug("operation create", channel->fd); + _dispatch_fd_debug("operation create", channel->fd); #if DISPATCH_IO_DEBUG int fd = channel->fd; #endif @@ -866,7 +980,7 @@ _dispatch_operation_create(dispatch_op_direction_t direction, } else if (direction == DOP_DIR_WRITE && !err) { d = NULL; } - _dispatch_io_debug("IO handler invoke", fd); + _dispatch_fd_debug("IO handler invoke", fd); handler(true, d, err); _dispatch_io_data_release(data); }); @@ -896,12 +1010,14 @@ _dispatch_operation_create(dispatch_op_direction_t direction, targetq = targetq->do_targetq; } op->do_targetq = targetq; + _dispatch_object_debug(op, "%s", __func__); return op; } void _dispatch_operation_dispose(dispatch_operation_t op) { + _dispatch_object_debug(op, "%s", __func__); // Deliver the data if there's any if (op->fd_entry) { _dispatch_operation_deliver_data(op, DOP_DONE); @@ -977,7 +1093,7 @@ _dispatch_operation_should_enqueue(dispatch_operation_t op, dispatch_queue_t tq, dispatch_data_t data) { // On stream queue or disk queue - _dispatch_io_debug("enqueue operation", op->fd_entry->fd); + _dispatch_fd_debug("enqueue operation", op->fd_entry->fd); _dispatch_io_data_retain(data); op->data = data; int err = _dispatch_io_get_error(op, NULL, true); @@ -1003,7 +1119,7 @@ _dispatch_operation_timer(dispatch_queue_t tq, dispatch_operation_t op) dispatch_source_t timer = dispatch_source_create( DISPATCH_SOURCE_TYPE_TIMER, 0, 0, tq); dispatch_source_set_timer(timer, dispatch_time(DISPATCH_TIME_NOW, - op->params.interval), op->params.interval, 0); + (int64_t)op->params.interval), op->params.interval, 0); dispatch_source_set_event_handler(timer, ^{ // On stream queue or pick queue if (dispatch_source_testcancel(timer)) { @@ -1029,6 +1145,79 @@ _dispatch_operation_timer(dispatch_queue_t tq, dispatch_operation_t op) #pragma mark - #pragma mark dispatch_fd_entry_t +#if DISPATCH_USE_GUARDED_FD_CHANGE_FDGUARD +static void +_dispatch_fd_entry_guard(dispatch_fd_entry_t fd_entry) +{ + guardid_t guard = fd_entry; + const unsigned int guard_flags = GUARD_CLOSE; + int err, fd_flags = 0; + _dispatch_io_syscall_switch_noerr(err, + change_fdguard_np(fd_entry->fd, NULL, 0, &guard, guard_flags, + &fd_flags), + case 0: + fd_entry->guard_flags = guard_flags; + fd_entry->orig_fd_flags = fd_flags; + break; + case EPERM: break; + default: (void)dispatch_assume_zero(err); break; + ); +} + +static void +_dispatch_fd_entry_unguard(dispatch_fd_entry_t fd_entry) +{ + if (!fd_entry->guard_flags) { + return; + } + guardid_t guard = fd_entry; + int err, fd_flags = fd_entry->orig_fd_flags; + _dispatch_io_syscall_switch(err, + change_fdguard_np(fd_entry->fd, &guard, fd_entry->guard_flags, NULL, 0, + &fd_flags), + default: (void)dispatch_assume_zero(err); break; + ); +} +#else +static inline void +_dispatch_fd_entry_guard(dispatch_fd_entry_t fd_entry) { (void)fd_entry; } +static inline void +_dispatch_fd_entry_unguard(dispatch_fd_entry_t fd_entry) { (void)fd_entry; } +#endif // DISPATCH_USE_GUARDED_FD + +static inline int +_dispatch_fd_entry_guarded_open(dispatch_fd_entry_t fd_entry, const char *path, + int oflag, mode_t mode) { +#if DISPATCH_USE_GUARDED_FD + guardid_t guard = (uintptr_t)fd_entry; + const unsigned int guard_flags = GUARD_CLOSE | GUARD_DUP | + GUARD_SOCKET_IPC | GUARD_FILEPORT; + int fd = guarded_open_np(path, &guard, guard_flags, oflag | O_CLOEXEC, + mode); + if (fd != -1) { + fd_entry->guard_flags = guard_flags; + return fd; + } + errno = 0; +#endif + return open(path, oflag, mode); + (void)fd_entry; +} + +static inline int +_dispatch_fd_entry_guarded_close(dispatch_fd_entry_t fd_entry, int fd) { +#if DISPATCH_USE_GUARDED_FD + if (fd_entry->guard_flags) { + guardid_t guard = (uintptr_t)fd_entry; + return guarded_close_np(fd, &guard); + } else +#endif + { + return close(fd); + } + (void)fd_entry; +} + static inline void _dispatch_fd_entry_retain(dispatch_fd_entry_t fd_entry) { dispatch_suspend(fd_entry->close_queue); @@ -1047,7 +1236,7 @@ _dispatch_fd_entry_init_async(dispatch_fd_t fd, dispatch_once_f(&_dispatch_io_fds_lockq_pred, NULL, _dispatch_io_fds_lockq_init); dispatch_async(_dispatch_io_fds_lockq, ^{ - _dispatch_io_debug("fd entry init", fd); + _dispatch_fd_debug("fd entry init", fd); dispatch_fd_entry_t fd_entry = NULL; // Check to see if there is an existing entry for the given fd uintptr_t hash = DIO_HASH(fd); @@ -1064,7 +1253,7 @@ _dispatch_fd_entry_init_async(dispatch_fd_t fd, fd_entry = _dispatch_fd_entry_create_with_fd(fd, hash); } dispatch_async(fd_entry->barrier_queue, ^{ - _dispatch_io_debug("fd entry init completion", fd); + _dispatch_fd_debug("fd entry init completion", fd); completion_callback(fd_entry); // stat() is complete, release reference to fd_entry _dispatch_fd_entry_release(fd_entry); @@ -1076,7 +1265,7 @@ static dispatch_fd_entry_t _dispatch_fd_entry_create(dispatch_queue_t q) { dispatch_fd_entry_t fd_entry; - fd_entry = calloc(1ul, sizeof(struct dispatch_fd_entry_s)); + fd_entry = _dispatch_calloc(1ul, sizeof(struct dispatch_fd_entry_s)); fd_entry->close_queue = dispatch_queue_create( "com.apple.libdispatch-io.closeq", NULL); // Use target queue to ensure that no concurrent lookups are going on when @@ -1092,7 +1281,7 @@ static dispatch_fd_entry_t _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash) { // On fds lock queue - _dispatch_io_debug("fd entry create", fd); + _dispatch_fd_debug("fd entry create", fd); dispatch_fd_entry_t fd_entry = _dispatch_fd_entry_create( _dispatch_io_fds_lockq); fd_entry->fd = fd; @@ -1101,7 +1290,7 @@ _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash) "com.apple.libdispatch-io.barrierq", NULL); fd_entry->barrier_group = dispatch_group_create(); dispatch_async(fd_entry->barrier_queue, ^{ - _dispatch_io_debug("fd entry stat", fd); + _dispatch_fd_debug("fd entry stat", fd); int err, orig_flags, orig_nosigpipe = -1; struct stat st; _dispatch_io_syscall_switch(err, @@ -1110,6 +1299,7 @@ _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash) ); fd_entry->stat.dev = st.st_dev; fd_entry->stat.mode = st.st_mode; + _dispatch_fd_entry_guard(fd_entry); _dispatch_io_syscall_switch(err, orig_flags = fcntl(fd, F_GETFL), default: (void)dispatch_assume_zero(err); break; @@ -1172,7 +1362,7 @@ _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash) // all operations associated with this entry have been freed dispatch_async(fd_entry->close_queue, ^{ if (!fd_entry->disk) { - _dispatch_io_debug("close queue fd_entry cleanup", fd); + _dispatch_fd_debug("close queue fd_entry cleanup", fd); dispatch_op_direction_t dir; for (dir = 0; dir < DOP_DIR_MAX; dir++) { _dispatch_stream_dispose(fd_entry, dir); @@ -1190,11 +1380,11 @@ _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash) // source cancels it and suspends the close queue. Freeing the fd_entry // structure must happen after the source cancel handler has finished dispatch_async(fd_entry->close_queue, ^{ - _dispatch_io_debug("close queue release", fd); + _dispatch_fd_debug("close queue release", fd); dispatch_release(fd_entry->close_queue); - _dispatch_io_debug("barrier queue release", fd); + _dispatch_fd_debug("barrier queue release", fd); dispatch_release(fd_entry->barrier_queue); - _dispatch_io_debug("barrier group release", fd); + _dispatch_fd_debug("barrier group release", fd); dispatch_release(fd_entry->barrier_group); if (fd_entry->orig_flags != -1) { _dispatch_io_syscall( @@ -1208,6 +1398,7 @@ _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash) ); } #endif + _dispatch_fd_entry_unguard(fd_entry); if (fd_entry->convenience_channel) { fd_entry->convenience_channel->fd_entry = NULL; dispatch_release(fd_entry->convenience_channel); @@ -1222,7 +1413,7 @@ _dispatch_fd_entry_create_with_path(dispatch_io_path_data_t path_data, dev_t dev, mode_t mode) { // On devs lock queue - _dispatch_io_debug("fd entry create with path %s", -1, path_data->path); + _dispatch_fd_debug("fd entry create with path %s", -1, path_data->path); dispatch_fd_entry_t fd_entry = _dispatch_fd_entry_create( path_data->channel->queue); if (S_ISREG(mode)) { @@ -1243,7 +1434,7 @@ _dispatch_fd_entry_create_with_path(dispatch_io_path_data_t path_data, // that the channel associated with this entry has been closed and that // all operations associated with this entry have been freed dispatch_async(fd_entry->close_queue, ^{ - _dispatch_io_debug("close queue fd_entry cleanup", -1); + _dispatch_fd_debug("close queue fd_entry cleanup", -1); if (!fd_entry->disk) { dispatch_op_direction_t dir; for (dir = 0; dir < DOP_DIR_MAX; dir++) { @@ -1251,7 +1442,7 @@ _dispatch_fd_entry_create_with_path(dispatch_io_path_data_t path_data, } } if (fd_entry->fd != -1) { - close(fd_entry->fd); + _dispatch_fd_entry_guarded_close(fd_entry, fd_entry->fd); } if (fd_entry->path_data->channel) { // If associated channel has not been released yet, mark it as @@ -1262,7 +1453,7 @@ _dispatch_fd_entry_create_with_path(dispatch_io_path_data_t path_data, } }); dispatch_async(fd_entry->close_queue, ^{ - _dispatch_io_debug("close queue release", -1); + _dispatch_fd_debug("close queue release", -1); dispatch_release(fd_entry->close_queue); dispatch_release(fd_entry->barrier_queue); dispatch_release(fd_entry->barrier_group); @@ -1285,21 +1476,23 @@ _dispatch_fd_entry_open(dispatch_fd_entry_t fd_entry, dispatch_io_t channel) int oflag = fd_entry->disk ? fd_entry->path_data->oflag & ~O_NONBLOCK : fd_entry->path_data->oflag | O_NONBLOCK; open: - fd = open(fd_entry->path_data->path, oflag, fd_entry->path_data->mode); + fd = _dispatch_fd_entry_guarded_open(fd_entry, fd_entry->path_data->path, + oflag, fd_entry->path_data->mode); if (fd == -1) { int err = errno; if (err == EINTR) { goto open; } - (void)dispatch_atomic_cmpxchg2o(fd_entry, err, 0, err); + (void)dispatch_atomic_cmpxchg2o(fd_entry, err, 0, err, relaxed); return err; } - if (!dispatch_atomic_cmpxchg2o(fd_entry, fd, -1, fd)) { + if (!dispatch_atomic_cmpxchg2o(fd_entry, fd, -1, fd, relaxed)) { // Lost the race with another open - close(fd); + _dispatch_fd_entry_guarded_close(fd_entry, fd); } else { channel->fd_actual = fd; } + _dispatch_object_debug(channel, "%s", __func__); return 0; } @@ -1350,9 +1543,10 @@ _dispatch_stream_init(dispatch_fd_entry_t fd_entry, dispatch_queue_t tq) dispatch_op_direction_t direction; for (direction = 0; direction < DOP_DIR_MAX; direction++) { dispatch_stream_t stream; - stream = calloc(1ul, sizeof(struct dispatch_stream_s)); + stream = _dispatch_calloc(1ul, sizeof(struct dispatch_stream_s)); stream->dq = dispatch_queue_create("com.apple.libdispatch-io.streamq", NULL); + dispatch_set_context(stream->dq, stream); _dispatch_retain(tq); stream->dq->do_targetq = tq; TAILQ_INIT(&stream->operations[DISPATCH_IO_RANDOM]); @@ -1379,6 +1573,7 @@ _dispatch_stream_dispose(dispatch_fd_entry_t fd_entry, dispatch_resume(stream->source); dispatch_release(stream->source); } + dispatch_set_context(stream->dq, NULL); dispatch_release(stream->dq); free(stream); } @@ -1388,7 +1583,6 @@ _dispatch_disk_init(dispatch_fd_entry_t fd_entry, dev_t dev) { // On devs lock queue dispatch_disk_t disk; - char label_name[256]; // Check to see if there is an existing entry for the given device uintptr_t hash = DIO_HASH(dev); TAILQ_FOREACH(disk, &_dispatch_io_devs[hash], disk_list) { @@ -1410,8 +1604,9 @@ _dispatch_disk_init(dispatch_fd_entry_t fd_entry, dev_t dev) disk->dev = dev; TAILQ_INIT(&disk->operations); disk->cur_rq = TAILQ_FIRST(&disk->operations); - sprintf(label_name, "com.apple.libdispatch-io.deviceq.%d", dev); - disk->pick_queue = dispatch_queue_create(label_name, NULL); + char label[45]; + snprintf(label, sizeof(label), "com.apple.libdispatch-io.deviceq.%d", dev); + disk->pick_queue = dispatch_queue_create(label, NULL); TAILQ_INSERT_TAIL(&_dispatch_io_devs[hash], disk, disk_list); out: fd_entry->disk = disk; @@ -1448,10 +1643,12 @@ _dispatch_stream_enqueue_operation(dispatch_stream_t stream, if (!_dispatch_operation_should_enqueue(op, stream->dq, data)) { return; } + _dispatch_object_debug(op, "%s", __func__); bool no_ops = !_dispatch_stream_operation_avail(stream); TAILQ_INSERT_TAIL(&stream->operations[op->params.type], op, operation_list); if (no_ops) { - dispatch_async_f(stream->dq, stream, _dispatch_stream_handler); + dispatch_async_f(stream->dq, stream->dq, + _dispatch_stream_queue_handler); } } @@ -1462,6 +1659,7 @@ _dispatch_disk_enqueue_operation(dispatch_disk_t disk, dispatch_operation_t op, if (!_dispatch_operation_should_enqueue(op, disk->pick_queue, data)) { return; } + _dispatch_object_debug(op, "%s", __func__); if (op->params.type == DISPATCH_IO_STREAM) { if (TAILQ_EMPTY(&op->fd_entry->stream_ops)) { TAILQ_INSERT_TAIL(&disk->operations, op, operation_list); @@ -1478,7 +1676,8 @@ _dispatch_stream_complete_operation(dispatch_stream_t stream, dispatch_operation_t op) { // On stream queue - _dispatch_io_debug("complete operation", op->fd_entry->fd); + _dispatch_object_debug(op, "%s", __func__); + _dispatch_fd_debug("complete operation", op->fd_entry->fd); TAILQ_REMOVE(&stream->operations[op->params.type], op, operation_list); if (op == stream->op) { stream->op = NULL; @@ -1494,7 +1693,8 @@ static void _dispatch_disk_complete_operation(dispatch_disk_t disk, dispatch_operation_t op) { // On pick queue - _dispatch_io_debug("complete operation", op->fd_entry->fd); + _dispatch_object_debug(op, "%s", __func__); + _dispatch_fd_debug("complete operation", op->fd_entry->fd); // Current request is always the last op returned if (disk->cur_rq == op) { disk->cur_rq = TAILQ_PREV(op, dispatch_disk_operations_s, @@ -1623,14 +1823,14 @@ _dispatch_stream_source(dispatch_stream_t stream, dispatch_operation_t op) return stream->source; } dispatch_fd_t fd = op->fd_entry->fd; - _dispatch_io_debug("stream source create", fd); + _dispatch_fd_debug("stream source create", fd); dispatch_source_t source = NULL; if (op->direction == DOP_DIR_READ) { - source = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, fd, 0, - stream->dq); + source = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, + (uintptr_t)fd, 0, stream->dq); } else if (op->direction == DOP_DIR_WRITE) { - source = dispatch_source_create(DISPATCH_SOURCE_TYPE_WRITE, fd, 0, - stream->dq); + source = dispatch_source_create(DISPATCH_SOURCE_TYPE_WRITE, + (uintptr_t)fd, 0, stream->dq); } else { dispatch_assert(op->direction < DOP_DIR_MAX); return NULL; @@ -1642,7 +1842,7 @@ _dispatch_stream_source(dispatch_stream_t stream, dispatch_operation_t op) // unregistered dispatch_queue_t close_queue = op->fd_entry->close_queue; dispatch_source_set_cancel_handler(source, ^{ - _dispatch_io_debug("stream source cancel", fd); + _dispatch_fd_debug("stream source cancel", fd); dispatch_resume(close_queue); }); stream->source = source; @@ -1659,6 +1859,18 @@ _dispatch_stream_source_handler(void *ctx) return _dispatch_stream_handler(stream); } +static void +_dispatch_stream_queue_handler(void *ctx) +{ + // On stream queue + dispatch_stream_t stream = (dispatch_stream_t)dispatch_get_context(ctx); + if (!stream) { + // _dispatch_stream_dispose has been called + return; + } + return _dispatch_stream_handler(stream); +} + static void _dispatch_stream_handler(void *ctx) { @@ -1678,17 +1890,18 @@ _dispatch_stream_handler(void *ctx) goto pick; } stream->op = op; - _dispatch_io_debug("stream handler", op->fd_entry->fd); + _dispatch_fd_debug("stream handler", op->fd_entry->fd); dispatch_fd_entry_t fd_entry = op->fd_entry; _dispatch_fd_entry_retain(fd_entry); // For performance analysis if (!op->total && dispatch_io_defaults.initial_delivery) { // Empty delivery to signal the start of the operation - _dispatch_io_debug("initial delivery", op->fd_entry->fd); + _dispatch_fd_debug("initial delivery", op->fd_entry->fd); _dispatch_operation_deliver_data(op, DOP_DELIVER); } // TODO: perform on the operation target queue to get correct priority - int result = _dispatch_operation_perform(op), flags = -1; + int result = _dispatch_operation_perform(op); + dispatch_op_flags_t flags = ~0u; switch (result) { case DISPATCH_OP_DELIVER: flags = DOP_DEFAULT; @@ -1703,7 +1916,8 @@ _dispatch_stream_handler(void *ctx) _dispatch_stream_complete_operation(stream, op); } if (_dispatch_stream_operation_avail(stream)) { - dispatch_async_f(stream->dq, stream, _dispatch_stream_handler); + dispatch_async_f(stream->dq, stream->dq, + _dispatch_stream_queue_handler); } break; case DISPATCH_OP_COMPLETE_RESUME: @@ -1740,7 +1954,7 @@ _dispatch_disk_handler(void *ctx) if (disk->io_active) { return; } - _dispatch_io_debug("disk handler", -1); + _dispatch_fd_debug("disk handler", -1); dispatch_operation_t op; size_t i = disk->free_idx, j = disk->req_idx; if (j <= i) { @@ -1758,6 +1972,7 @@ _dispatch_disk_handler(void *ctx) _dispatch_retain(op); disk->advise_list[i%disk->advise_list_depth] = op; op->active = true; + _dispatch_object_debug(op, "%s", __func__); } else { // No more operations to get break; @@ -1777,7 +1992,7 @@ _dispatch_disk_perform(void *ctxt) { dispatch_disk_t disk = ctxt; size_t chunk_size = dispatch_io_defaults.chunk_pages * PAGE_SIZE; - _dispatch_io_debug("disk perform", -1); + _dispatch_fd_debug("disk perform", -1); dispatch_operation_t op; size_t i = disk->advise_idx, j = disk->free_idx; if (j <= i) { @@ -1801,7 +2016,7 @@ _dispatch_disk_perform(void *ctxt) // For performance analysis if (!op->total && dispatch_io_defaults.initial_delivery) { // Empty delivery to signal the start of the operation - _dispatch_io_debug("initial delivery", op->fd_entry->fd); + _dispatch_fd_debug("initial delivery", op->fd_entry->fd); _dispatch_operation_deliver_data(op, DOP_DELIVER); } // Advise two chunks if the list only has one element and this is the @@ -1859,22 +2074,25 @@ _dispatch_operation_advise(dispatch_operation_t op, size_t chunk_size) struct radvisory advise; // No point in issuing a read advise for the next chunk if we are already // a chunk ahead from reading the bytes - if (op->advise_offset > (off_t)((op->offset+op->total) + chunk_size + - PAGE_SIZE)) { + if (op->advise_offset > (off_t)(((size_t)op->offset + op->total) + + chunk_size + PAGE_SIZE)) { return; } + _dispatch_object_debug(op, "%s", __func__); advise.ra_count = (int)chunk_size; if (!op->advise_offset) { op->advise_offset = op->offset; // If this is the first time through, align the advised range to a // page boundary - size_t pg_fraction = (size_t)((op->offset + chunk_size) % PAGE_SIZE); + size_t pg_fraction = ((size_t)op->offset + chunk_size) % PAGE_SIZE; advise.ra_count += (int)(pg_fraction ? PAGE_SIZE - pg_fraction : 0); } advise.ra_offset = op->advise_offset; op->advise_offset += advise.ra_count; _dispatch_io_syscall_switch(err, fcntl(op->fd_entry->fd, F_RDADVISE, &advise), + case EFBIG: break; // advised past the end of the file rdar://10415691 + case ENOTSUP: break; // not all FS support radvise rdar://13484629 // TODO: set disk status on error default: (void)dispatch_assume_zero(err); break; ); @@ -1887,6 +2105,7 @@ _dispatch_operation_perform(dispatch_operation_t op) if (err) { goto error; } + _dispatch_object_debug(op, "%s", __func__); if (!op->buf) { size_t max_buf_siz = op->params.high; size_t chunk_siz = dispatch_io_defaults.chunk_pages * PAGE_SIZE; @@ -1910,7 +2129,7 @@ _dispatch_operation_perform(dispatch_operation_t op) op->buf_siz = max_buf_siz; } op->buf = valloc(op->buf_siz); - _dispatch_io_debug("buffer allocated", op->fd_entry->fd); + _dispatch_fd_debug("buffer allocated", op->fd_entry->fd); } else if (op->direction == DOP_DIR_WRITE) { // Always write the first data piece, if that is smaller than a // chunk, accumulate further data pieces until chunk size is reached @@ -1936,7 +2155,7 @@ _dispatch_operation_perform(dispatch_operation_t op) op->buf_data = dispatch_data_create_map(d, (const void**)&op->buf, NULL); _dispatch_io_data_release(d); - _dispatch_io_debug("buffer mapped", op->fd_entry->fd); + _dispatch_fd_debug("buffer mapped", op->fd_entry->fd); } } if (op->fd_entry->fd == -1) { @@ -1947,7 +2166,7 @@ _dispatch_operation_perform(dispatch_operation_t op) } void *buf = op->buf + op->buf_len; size_t len = op->buf_siz - op->buf_len; - off_t off = op->offset + op->total; + off_t off = (off_t)((size_t)op->offset + op->total); ssize_t processed = -1; syscall: if (op->direction == DOP_DIR_READ) { @@ -1973,11 +2192,11 @@ _dispatch_operation_perform(dispatch_operation_t op) } // EOF is indicated by two handler invocations if (processed == 0) { - _dispatch_io_debug("EOF", op->fd_entry->fd); + _dispatch_fd_debug("EOF", op->fd_entry->fd); return DISPATCH_OP_DELIVER_AND_COMPLETE; } - op->buf_len += processed; - op->total += processed; + op->buf_len += (size_t)processed; + op->total += (size_t)processed; if (op->total == op->length) { // Finished processing all the bytes requested by the operation return DISPATCH_OP_COMPLETE; @@ -1989,7 +2208,7 @@ _dispatch_operation_perform(dispatch_operation_t op) if (err == EAGAIN) { // For disk based files with blocking I/O we should never get EAGAIN dispatch_assert(!op->fd_entry->disk); - _dispatch_io_debug("EAGAIN %d", op->fd_entry->fd, err); + _dispatch_fd_debug("EAGAIN %d", op->fd_entry->fd, err); if (op->direction == DOP_DIR_READ && op->total && op->channel == op->fd_entry->convenience_channel) { // Convenience read with available data completes on EAGAIN @@ -2002,7 +2221,7 @@ _dispatch_operation_perform(dispatch_operation_t op) case ECANCELED: return DISPATCH_OP_ERR; case EBADF: - (void)dispatch_atomic_cmpxchg2o(op->fd_entry, err, 0, err); + (void)dispatch_atomic_cmpxchg2o(op->fd_entry, err, 0, err, relaxed); return DISPATCH_OP_FD_ERR; default: return DISPATCH_OP_COMPLETE; @@ -2026,7 +2245,7 @@ _dispatch_operation_deliver_data(dispatch_operation_t op, deliver = true; } else if (op->buf_len < op->buf_siz) { // Request buffer is not yet used up - _dispatch_io_debug("buffer data", op->fd_entry->fd); + _dispatch_fd_debug("buffer data", op->fd_entry->fd); return; } } else { @@ -2080,11 +2299,12 @@ _dispatch_operation_deliver_data(dispatch_operation_t op, } if (!deliver || ((flags & DOP_NO_EMPTY) && !dispatch_data_get_size(data))) { op->undelivered = undelivered; - _dispatch_io_debug("buffer data", op->fd_entry->fd); + _dispatch_fd_debug("buffer data", op->fd_entry->fd); return; } op->undelivered = 0; - _dispatch_io_debug("deliver data", op->fd_entry->fd); + _dispatch_object_debug(op, "%s", __func__); + _dispatch_fd_debug("deliver data", op->fd_entry->fd); dispatch_op_direction_t direction = op->direction; dispatch_io_handler_t handler = op->handler; #if DISPATCH_IO_DEBUG @@ -2101,7 +2321,7 @@ _dispatch_operation_deliver_data(dispatch_operation_t op, if (done) { if (direction == DOP_DIR_READ && err) { if (dispatch_data_get_size(d)) { - _dispatch_io_debug("IO handler invoke", fd); + _dispatch_fd_debug("IO handler invoke", fd); handler(false, d, 0); } d = NULL; @@ -2109,10 +2329,77 @@ _dispatch_operation_deliver_data(dispatch_operation_t op, d = NULL; } } - _dispatch_io_debug("IO handler invoke", fd); + _dispatch_fd_debug("IO handler invoke", fd); handler(done, d, err); _dispatch_release(channel); _dispatch_fd_entry_release(fd_entry); _dispatch_io_data_release(data); }); } + +#pragma mark - +#pragma mark dispatch_io_debug + +static size_t +_dispatch_io_debug_attr(dispatch_io_t channel, char* buf, size_t bufsiz) +{ + dispatch_queue_t target = channel->do_targetq; + return dsnprintf(buf, bufsiz, "type = %s, fd = 0x%x, %sfd_entry = %p, " + "queue = %p, target = %s[%p], barrier_queue = %p, barrier_group = " + "%p, err = 0x%x, low = 0x%zx, high = 0x%zx, interval%s = %llu ", + channel->params.type == DISPATCH_IO_STREAM ? "stream" : "random", + channel->fd_actual, channel->atomic_flags & DIO_STOPPED ? + "stopped, " : channel->atomic_flags & DIO_CLOSED ? "closed, " : "", + channel->fd_entry, channel->queue, target && target->dq_label ? + target->dq_label : "", target, channel->barrier_queue, + channel->barrier_group, channel->err, channel->params.low, + channel->params.high, channel->params.interval_flags & + DISPATCH_IO_STRICT_INTERVAL ? "(strict)" : "", + channel->params.interval); +} + +size_t +_dispatch_io_debug(dispatch_io_t channel, char* buf, size_t bufsiz) +{ + size_t offset = 0; + offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", + dx_kind(channel), channel); + offset += _dispatch_object_debug_attr(channel, &buf[offset], + bufsiz - offset); + offset += _dispatch_io_debug_attr(channel, &buf[offset], bufsiz - offset); + offset += dsnprintf(&buf[offset], bufsiz - offset, "}"); + return offset; +} + +static size_t +_dispatch_operation_debug_attr(dispatch_operation_t op, char* buf, + size_t bufsiz) +{ + dispatch_queue_t target = op->do_targetq; + dispatch_queue_t oqtarget = op->op_q ? op->op_q->do_targetq : NULL; + return dsnprintf(buf, bufsiz, "type = %s %s, fd = 0x%x, fd_entry = %p, " + "channel = %p, queue = %p -> %s[%p], target = %s[%p], " + "offset = %lld, length = %zu, done = %zu, undelivered = %zu, " + "flags = %u, err = 0x%x, low = 0x%zx, high = 0x%zx, " + "interval%s = %llu ", op->params.type == DISPATCH_IO_STREAM ? + "stream" : "random", op->direction == DOP_DIR_READ ? "read" : + "write", op->fd_entry ? op->fd_entry->fd : -1, op->fd_entry, + op->channel, op->op_q, oqtarget && oqtarget->dq_label ? + oqtarget->dq_label : "", oqtarget, target && target->dq_label ? + target->dq_label : "", target, op->offset, op->length, op->total, + op->undelivered + op->buf_len, op->flags, op->err, op->params.low, + op->params.high, op->params.interval_flags & + DISPATCH_IO_STRICT_INTERVAL ? "(strict)" : "", op->params.interval); +} + +size_t +_dispatch_operation_debug(dispatch_operation_t op, char* buf, size_t bufsiz) +{ + size_t offset = 0; + offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", + dx_kind(op), op); + offset += _dispatch_object_debug_attr(op, &buf[offset], bufsiz - offset); + offset += _dispatch_operation_debug_attr(op, &buf[offset], bufsiz - offset); + offset += dsnprintf(&buf[offset], bufsiz - offset, "}"); + return offset; +} diff --git a/src/io_internal.h b/src/io_internal.h index dbbb6bf6c..fbb27c570 100644 --- a/src/io_internal.h +++ b/src/io_internal.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2011 Apple Inc. All rights reserved. + * Copyright (c) 2009-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -34,10 +34,6 @@ #define _DISPATCH_IO_LABEL_SIZE 16 -#ifndef DISPATCH_IO_DEBUG -#define DISPATCH_IO_DEBUG 0 -#endif - #if TARGET_OS_EMBEDDED // rdar://problem/9032036 #define DIO_MAX_CHUNK_PAGES 128u // 512kB chunk size #else @@ -66,16 +62,6 @@ typedef unsigned int dispatch_op_flags_t; #define DIO_CLOSED 1u // channel has been closed #define DIO_STOPPED 2u // channel has been stopped (implies closed) -#define _dispatch_io_data_retain(x) dispatch_retain(x) -#define _dispatch_io_data_release(x) dispatch_release(x) - -#if DISPATCH_IO_DEBUG -#define _dispatch_io_debug(msg, fd, args...) \ - _dispatch_debug("fd %d: " msg, (fd), ##args) -#else -#define _dispatch_io_debug(msg, fd, args...) -#endif - DISPATCH_DECL_INTERNAL(dispatch_operation); DISPATCH_DECL_INTERNAL(dispatch_disk); @@ -126,6 +112,12 @@ struct dispatch_fd_entry_s { dispatch_fd_t fd; dispatch_io_path_data_t path_data; int orig_flags, orig_nosigpipe, err; +#if DISPATCH_USE_GUARDED_FD_CHANGE_FDGUARD + int orig_fd_flags; +#endif +#if DISPATCH_USE_GUARDED_FD + unsigned int guard_flags; +#endif struct dispatch_stat_s stat; dispatch_stream_t streams[2]; dispatch_disk_t disk; @@ -160,7 +152,6 @@ struct dispatch_operation_s { dispatch_fd_entry_t fd_entry; dispatch_source_t timer; bool active; - int count; off_t advise_offset; void* buf; dispatch_op_flags_t flags; @@ -185,7 +176,10 @@ struct dispatch_io_s { }; void _dispatch_io_set_target_queue(dispatch_io_t channel, dispatch_queue_t dq); +size_t _dispatch_io_debug(dispatch_io_t channel, char* buf, size_t bufsiz); void _dispatch_io_dispose(dispatch_io_t channel); +size_t _dispatch_operation_debug(dispatch_operation_t op, char* buf, + size_t bufsiz); void _dispatch_operation_dispose(dispatch_operation_t operation); void _dispatch_disk_dispose(dispatch_disk_t disk); diff --git a/src/object.c b/src/object.c index 7b94c757c..a30503989 100644 --- a/src/object.c +++ b/src/object.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2010 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -30,9 +30,10 @@ _os_object_retain_count(_os_object_t obj) if (slowpath(xref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) { return ULONG_MAX; // global object } - return xref_cnt + 1; + return (unsigned long)(xref_cnt + 1); } +DISPATCH_NOINLINE _os_object_t _os_object_retain_internal(_os_object_t obj) { @@ -40,13 +41,14 @@ _os_object_retain_internal(_os_object_t obj) if (slowpath(ref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) { return obj; // global object } - ref_cnt = dispatch_atomic_inc2o(obj, os_obj_ref_cnt); + ref_cnt = dispatch_atomic_inc2o(obj, os_obj_ref_cnt, relaxed); if (slowpath(ref_cnt <= 0)) { DISPATCH_CRASH("Resurrection of an object"); } return obj; } +DISPATCH_NOINLINE void _os_object_release_internal(_os_object_t obj) { @@ -54,7 +56,7 @@ _os_object_release_internal(_os_object_t obj) if (slowpath(ref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) { return; // global object } - ref_cnt = dispatch_atomic_dec2o(obj, os_obj_ref_cnt); + ref_cnt = dispatch_atomic_dec2o(obj, os_obj_ref_cnt, relaxed); if (fastpath(ref_cnt >= 0)) { return; } @@ -69,6 +71,7 @@ _os_object_release_internal(_os_object_t obj) return _os_object_dispose(obj); } +DISPATCH_NOINLINE _os_object_t _os_object_retain(_os_object_t obj) { @@ -76,13 +79,14 @@ _os_object_retain(_os_object_t obj) if (slowpath(xref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) { return obj; // global object } - xref_cnt = dispatch_atomic_inc2o(obj, os_obj_xref_cnt); + xref_cnt = dispatch_atomic_inc2o(obj, os_obj_xref_cnt, relaxed); if (slowpath(xref_cnt <= 0)) { _OS_OBJECT_CLIENT_CRASH("Resurrection of an object"); } return obj; } +DISPATCH_NOINLINE void _os_object_release(_os_object_t obj) { @@ -90,7 +94,7 @@ _os_object_release(_os_object_t obj) if (slowpath(xref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) { return; // global object } - xref_cnt = dispatch_atomic_dec2o(obj, os_obj_xref_cnt); + xref_cnt = dispatch_atomic_dec2o(obj, os_obj_xref_cnt, relaxed); if (fastpath(xref_cnt >= 0)) { return; } @@ -114,9 +118,8 @@ _os_object_retain_weak(_os_object_t obj) if (slowpath(xref_cnt < -1)) { goto overrelease; } - if (slowpath(!dispatch_atomic_cmpxchg2o(obj, os_obj_xref_cnt, xref_cnt, - xref_cnt + 1))) { - xref_cnt = obj->os_obj_xref_cnt; + if (slowpath(!dispatch_atomic_cmpxchgvw2o(obj, os_obj_xref_cnt, xref_cnt, + xref_cnt + 1, &xref_cnt, relaxed))) { goto retry; } return true; @@ -143,12 +146,13 @@ _os_object_allows_weak_reference(_os_object_t obj) void * _dispatch_alloc(const void *vtable, size_t size) { - return _os_object_alloc(vtable, size); + return _os_object_alloc_realized(vtable, size); } void dispatch_retain(dispatch_object_t dou) { + DISPATCH_OBJECT_TFB(_dispatch_objc_retain, dou); (void)_os_object_retain(dou._os_obj); } @@ -161,6 +165,7 @@ _dispatch_retain(dispatch_object_t dou) void dispatch_release(dispatch_object_t dou) { + DISPATCH_OBJECT_TFB(_dispatch_objc_release, dou); _os_object_release(dou._os_obj); } @@ -195,6 +200,8 @@ _dispatch_xref_dispose(dispatch_object_t dou) #if !USE_OBJC if (dx_type(dou._do) == DISPATCH_SOURCE_KEVENT_TYPE) { _dispatch_source_xref_dispose(dou._ds); + } else if (dou._dq->do_vtable == DISPATCH_VTABLE(queue_runloop)) { + _dispatch_runloop_queue_xref_dispose(dou._dq); } return _dispatch_release(dou._os_obj); #endif @@ -213,33 +220,48 @@ _dispatch_dispose(dispatch_object_t dou) void * dispatch_get_context(dispatch_object_t dou) { + DISPATCH_OBJECT_TFB(_dispatch_objc_get_context, dou); + if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || + slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) { + return NULL; + } return dou._do->do_ctxt; } void dispatch_set_context(dispatch_object_t dou, void *context) { - if (dou._do->do_ref_cnt != DISPATCH_OBJECT_GLOBAL_REFCNT) { - dou._do->do_ctxt = context; + DISPATCH_OBJECT_TFB(_dispatch_objc_set_context, dou, context); + if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || + slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) { + return; } + dou._do->do_ctxt = context; } void dispatch_set_finalizer_f(dispatch_object_t dou, dispatch_function_t finalizer) { + DISPATCH_OBJECT_TFB(_dispatch_objc_set_finalizer_f, dou, finalizer); + if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || + slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) { + return; + } dou._do->do_finalizer = finalizer; } void dispatch_suspend(dispatch_object_t dou) { - if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) { + DISPATCH_OBJECT_TFB(_dispatch_objc_suspend, dou); + if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || + slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) { return; } // rdar://8181908 explains why we need to do an internal retain at every // suspension. (void)dispatch_atomic_add2o(dou._do, do_suspend_cnt, - DISPATCH_OBJECT_SUSPEND_INTERVAL); + DISPATCH_OBJECT_SUSPEND_INTERVAL, relaxed); _dispatch_retain(dou._do); } @@ -255,19 +277,20 @@ _dispatch_resume_slow(dispatch_object_t dou) void dispatch_resume(dispatch_object_t dou) { + DISPATCH_OBJECT_TFB(_dispatch_objc_resume, dou); // Global objects cannot be suspended or resumed. This also has the // side effect of saturating the suspend count of an object and // guarding against resuming due to overflow. - if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) { + if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || + slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) { return; } // Check the previous value of the suspend count. If the previous // value was a single suspend interval, the object should be resumed. // If the previous value was less than the suspend interval, the object // has been over-resumed. - unsigned int suspend_cnt = dispatch_atomic_sub2o(dou._do, do_suspend_cnt, - DISPATCH_OBJECT_SUSPEND_INTERVAL) + - DISPATCH_OBJECT_SUSPEND_INTERVAL; + unsigned int suspend_cnt = dispatch_atomic_sub_orig2o(dou._do, + do_suspend_cnt, DISPATCH_OBJECT_SUSPEND_INTERVAL, relaxed); if (fastpath(suspend_cnt > DISPATCH_OBJECT_SUSPEND_INTERVAL)) { // Balancing the retain() done in suspend() for rdar://8181908 return _dispatch_release(dou._do); @@ -281,7 +304,7 @@ dispatch_resume(dispatch_object_t dou) size_t _dispatch_object_debug_attr(dispatch_object_t dou, char* buf, size_t bufsiz) { - return snprintf(buf, bufsiz, "xrefcnt = 0x%x, refcnt = 0x%x, " + return dsnprintf(buf, bufsiz, "xrefcnt = 0x%x, refcnt = 0x%x, " "suspend_cnt = 0x%x, locked = %d, ", dou._do->do_xref_cnt + 1, dou._do->do_ref_cnt + 1, dou._do->do_suspend_cnt / DISPATCH_OBJECT_SUSPEND_INTERVAL, diff --git a/src/object.m b/src/object.m index ea696228c..e64a4fda7 100644 --- a/src/object.m +++ b/src/object.m @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011 Apple Inc. All rights reserved. + * Copyright (c) 2011-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -29,7 +29,6 @@ #error "Cannot build with ARC" #endif -#include #include #include @@ -40,12 +39,11 @@ #include #include -static dispatch_once_t _os_object_gc_pred; static bool _os_object_have_gc; static malloc_zone_t *_os_object_gc_zone; static void -_os_object_gc_init(void *ctxt DISPATCH_UNUSED) +_os_object_gc_init(void) { _os_object_have_gc = objc_collectingEnabled(); if (slowpath(_os_object_have_gc)) { @@ -56,7 +54,6 @@ static _os_object_t _os_object_make_uncollectable(_os_object_t obj) { - dispatch_once_f(&_os_object_gc_pred, NULL, _os_object_gc_init); if (slowpath(_os_object_have_gc)) { auto_zone_retain(_os_object_gc_zone, obj); } @@ -66,38 +63,64 @@ static _os_object_t _os_object_make_collectable(_os_object_t obj) { - dispatch_once_f(&_os_object_gc_pred, NULL, _os_object_gc_init); if (slowpath(_os_object_have_gc)) { auto_zone_release(_os_object_gc_zone, obj); } return obj; } -#else + +#define _os_objc_gc_retain(obj) \ + if (slowpath(_os_object_have_gc)) { \ + return auto_zone_retain(_os_object_gc_zone, obj); \ + } + +#define _os_objc_gc_release(obj) \ + if (slowpath(_os_object_have_gc)) { \ + return (void)auto_zone_release(_os_object_gc_zone, obj); \ + } + +#else // __OBJC_GC__ +#define _os_object_gc_init() #define _os_object_make_uncollectable(obj) (obj) #define _os_object_make_collectable(obj) (obj) +#define _os_objc_gc_retain(obj) +#define _os_objc_gc_release(obj) #endif // __OBJC_GC__ #pragma mark - #pragma mark _os_object_t +static inline id +_os_objc_alloc(Class cls, size_t size) +{ + id obj; + size -= sizeof(((struct _os_object_s *)NULL)->os_obj_isa); + while (!fastpath(obj = class_createInstance(cls, size))) { + _dispatch_temporary_resource_shortage(); + } + return obj; +} + void _os_object_init(void) { - return _objc_init(); + _objc_init(); + _os_object_gc_init(); +} + +_os_object_t +_os_object_alloc_realized(const void *cls, size_t size) +{ + dispatch_assert(size >= sizeof(struct _os_object_s)); + return _os_object_make_uncollectable(_os_objc_alloc(cls, size)); } _os_object_t _os_object_alloc(const void *_cls, size_t size) { - Class cls = _cls; - _os_object_t obj; dispatch_assert(size >= sizeof(struct _os_object_s)); - size -= sizeof(((struct _os_object_s *)NULL)->os_obj_isa); - if (!cls) cls = [OS_OBJECT_CLASS(object) class]; - while (!fastpath(obj = class_createInstance(cls, size))) { - sleep(1); // Temporary resource shortage - } - return _os_object_make_uncollectable(obj); + Class cls = _cls ? [(id)_cls class] : [OS_OBJECT_CLASS(object) class]; + return _os_object_make_uncollectable(_os_objc_alloc(cls, size)); } void @@ -154,10 +177,82 @@ - (void)_dispose { @end #pragma mark - -#pragma mark _dispatch_object +#pragma mark _dispatch_objc #include +id +_dispatch_objc_alloc(Class cls, size_t size) +{ + return _os_objc_alloc(cls, size); +} + +void +_dispatch_objc_retain(dispatch_object_t dou) +{ + _os_objc_gc_retain(dou); + return (void)[dou retain]; +} + +void +_dispatch_objc_release(dispatch_object_t dou) +{ + _os_objc_gc_release(dou); + return [dou release]; +} + +void +_dispatch_objc_set_context(dispatch_object_t dou, void *context) +{ + return [dou _setContext:context]; +} + +void * +_dispatch_objc_get_context(dispatch_object_t dou) +{ + return [dou _getContext]; +} + +void +_dispatch_objc_set_finalizer_f(dispatch_object_t dou, + dispatch_function_t finalizer) +{ + return [dou _setFinalizer:finalizer]; +} + +void +_dispatch_objc_set_target_queue(dispatch_object_t dou, dispatch_queue_t queue) +{ + return [dou _setTargetQueue:queue]; +} + +void +_dispatch_objc_suspend(dispatch_object_t dou) +{ + return [dou _suspend]; +} + +void +_dispatch_objc_resume(dispatch_object_t dou) +{ + return [dou _resume]; +} + +size_t +_dispatch_objc_debug(dispatch_object_t dou, char* buf, size_t bufsiz) +{ + NSUInteger offset = 0; + NSString *desc = [dou debugDescription]; + [desc getBytes:buf maxLength:bufsiz-1 usedLength:&offset + encoding:NSUTF8StringEncoding options:0 + range:NSMakeRange(0, [desc length]) remainingRange:NULL]; + if (offset) buf[offset] = 0; + return offset; +} + +#pragma mark - +#pragma mark _dispatch_object + // Force non-lazy class realization rdar://10640168 #define DISPATCH_OBJC_LOAD() + (void)load {} @@ -182,8 +277,13 @@ - (void)_dispose { - (NSString *)debugDescription { Class nsstring = objc_lookUpClass("NSString"); if (!nsstring) return nil; - char buf[4096]; - dx_debug((struct dispatch_object_s *)self, buf, sizeof(buf)); + char buf[2048]; + struct dispatch_object_s *obj = (struct dispatch_object_s *)self; + if (obj->do_vtable->do_debug) { + dx_debug(obj, buf, sizeof(buf)); + } else { + strlcpy(buf, dx_kind(obj), sizeof(buf)); + } return [nsstring stringWithFormat: [nsstring stringWithUTF8String:"<%s: %s>"], class_getName([self class]), buf]; @@ -214,6 +314,16 @@ - (void)_xref_dispose { @end +@implementation DISPATCH_CLASS(queue_runloop) +DISPATCH_OBJC_LOAD() + +- (void)_xref_dispose { + _dispatch_runloop_queue_xref_dispose(self); + [super _xref_dispose]; +} + +@end + #define DISPATCH_CLASS_IMPL(name) \ @implementation DISPATCH_CLASS(name) \ DISPATCH_OBJC_LOAD() \ @@ -225,10 +335,11 @@ - (void)_xref_dispose { DISPATCH_CLASS_IMPL(queue_mgr) DISPATCH_CLASS_IMPL(queue_specific_queue) DISPATCH_CLASS_IMPL(queue_attr) +DISPATCH_CLASS_IMPL(mach) +DISPATCH_CLASS_IMPL(mach_msg) DISPATCH_CLASS_IMPL(io) DISPATCH_CLASS_IMPL(operation) DISPATCH_CLASS_IMPL(disk) -DISPATCH_CLASS_IMPL(data) #pragma mark - #pragma mark dispatch_autorelease_pool @@ -281,6 +392,33 @@ - (void)_xref_dispose { } } +#undef _dispatch_client_callout3 +bool +_dispatch_client_callout3(void *ctxt, dispatch_data_t region, size_t offset, + const void *buffer, size_t size, dispatch_data_applier_function_t f) +{ + @try { + return f(ctxt, region, offset, buffer, size); + } + @catch (...) { + objc_terminate(); + } +} + +#undef _dispatch_client_callout4 +void +_dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, + dispatch_mach_msg_t dmsg, mach_error_t error, + dispatch_mach_handler_function_t f) +{ + @try { + return f(ctxt, reason, dmsg, error); + } + @catch (...) { + objc_terminate(); + } +} + #endif // DISPATCH_USE_CLIENT_CALLOUT #endif // USE_OBJC diff --git a/src/object_internal.h b/src/object_internal.h index 8bb673366..b3696632b 100644 --- a/src/object_internal.h +++ b/src/object_internal.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2010 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -53,38 +53,53 @@ } #else #define DISPATCH_VTABLE_SUBCLASS_INSTANCE(name, super, ...) \ - const struct dispatch_##super##_vtable_s _dispatch_##name##_vtable = { \ + DISPATCH_CONST_STRUCT_INSTANCE(dispatch_##super##_vtable_s, \ + _dispatch_##name##_vtable, \ ._os_obj_xref_dispose = _dispatch_xref_dispose, \ ._os_obj_dispose = _dispatch_dispose, \ - __VA_ARGS__ \ - } + __VA_ARGS__) #endif // USE_OBJC #define DISPATCH_SUBCLASS_DECL(name, super) \ DISPATCH_DECL_SUBCLASS_INTERFACE(dispatch_##name, super) \ struct dispatch_##name##_s; \ - extern const struct dispatch_##name##_vtable_s { \ + extern DISPATCH_CONST_STRUCT_DECL(dispatch_##name##_vtable_s, \ + _dispatch_##name##_vtable, \ + { \ _OS_OBJECT_CLASS_HEADER(); \ DISPATCH_VTABLE_HEADER(name); \ - } _dispatch_##name##_vtable + }) #define DISPATCH_CLASS_DECL(name) DISPATCH_SUBCLASS_DECL(name, dispatch_object) #define DISPATCH_INTERNAL_SUBCLASS_DECL(name, super) \ DISPATCH_DECL_INTERNAL_SUBCLASS(dispatch_##name, dispatch_##super); \ DISPATCH_DECL_SUBCLASS_INTERFACE(dispatch_##name, dispatch_##super) \ - extern const struct dispatch_##super##_vtable_s _dispatch_##name##_vtable + extern DISPATCH_CONST_STRUCT_DECL(dispatch_##super##_vtable_s, \ + _dispatch_##name##_vtable) #define DISPATCH_VTABLE_INSTANCE(name, ...) \ DISPATCH_VTABLE_SUBCLASS_INSTANCE(name, name, __VA_ARGS__) #define DISPATCH_VTABLE(name) &_dispatch_##name##_vtable +#if !TARGET_OS_WIN32 #define DISPATCH_VTABLE_HEADER(x) \ unsigned long const do_type; \ const char *const do_kind; \ size_t (*const do_debug)(struct dispatch_##x##_s *, char *, size_t); \ - struct dispatch_queue_s *(*const do_invoke)(struct dispatch_##x##_s *); \ - bool (*const do_probe)(struct dispatch_##x##_s *); \ - void (*const do_dispose)(struct dispatch_##x##_s *) + void (*const do_invoke)(struct dispatch_##x##_s *); \ + unsigned long (*const do_probe)(struct dispatch_##x##_s *); \ + void (*const do_dispose)(struct dispatch_##x##_s *); +#else +// Cannot be const on Win32 because we initialize at runtime. +#define DISPATCH_VTABLE_HEADER(x) \ + unsigned long do_type; \ + const char *do_kind; \ + size_t (*do_debug)(struct dispatch_##x##_s *, char *, size_t); \ + void (*do_invoke)(struct dispatch_##x##_s *); \ + unsigned long (*do_probe)(struct dispatch_##x##_s *); \ + void (*do_dispose)(struct dispatch_##x##_s *); +#endif #define dx_type(x) (x)->do_vtable->do_type +#define dx_metatype(x) ((x)->do_vtable->do_type & _DISPATCH_META_TYPE_MASK) #define dx_kind(x) (x)->do_vtable->do_kind #define dx_debug(x, y, z) (x)->do_vtable->do_debug((x), (y), (z)) #define dx_dispose(x) (x)->do_vtable->do_dispose(x) @@ -131,7 +146,8 @@ enum { DISPATCH_CONTINUATION_TYPE = _DISPATCH_CONTINUATION_TYPE, - DISPATCH_DATA_TYPE = _DISPATCH_NODE_TYPE, + DISPATCH_DATA_TYPE = 1 | _DISPATCH_NODE_TYPE, + DISPATCH_MACH_MSG_TYPE = 2 | _DISPATCH_NODE_TYPE, DISPATCH_IO_TYPE = _DISPATCH_IO_TYPE, DISPATCH_OPERATION_TYPE = _DISPATCH_OPERATION_TYPE, @@ -140,7 +156,7 @@ enum { DISPATCH_QUEUE_ATTR_TYPE = _DISPATCH_QUEUE_TYPE |_DISPATCH_ATTR_TYPE, DISPATCH_QUEUE_TYPE = 1 | _DISPATCH_QUEUE_TYPE, - DISPATCH_QUEUE_GLOBAL_TYPE = 2 | _DISPATCH_QUEUE_TYPE, + DISPATCH_QUEUE_ROOT_TYPE = 2 | _DISPATCH_QUEUE_TYPE, DISPATCH_QUEUE_MGR_TYPE = 3 | _DISPATCH_QUEUE_TYPE, DISPATCH_QUEUE_SPECIFIC_TYPE = 4 | _DISPATCH_QUEUE_TYPE, @@ -148,6 +164,7 @@ enum { DISPATCH_GROUP_TYPE = 2 | _DISPATCH_SEMAPHORE_TYPE, DISPATCH_SOURCE_KEVENT_TYPE = 1 | _DISPATCH_SOURCE_TYPE, + DISPATCH_MACH_CHANNEL_TYPE = 2 | _DISPATCH_SOURCE_TYPE, }; DISPATCH_SUBCLASS_DECL(object, object); @@ -167,6 +184,67 @@ void *_dispatch_autorelease_pool_push(void); void _dispatch_autorelease_pool_pop(void *context); #endif +#if USE_OBJC +#include + +#define OS_OBJC_CLASS_SYMBOL(name) \ + DISPATCH_CONCAT(OBJC_CLASS_$_,name) +#define OS_OBJC_CLASS_DECL(name) \ + extern void *OS_OBJC_CLASS_SYMBOL(name) +#define OS_OBJC_CLASS(name) \ + ((Class)&OS_OBJC_CLASS_SYMBOL(name)) +#define OS_OBJECT_OBJC_CLASS_DECL(name) \ + OS_OBJC_CLASS_DECL(OS_OBJECT_CLASS(name)) +#define OS_OBJECT_OBJC_CLASS(name) \ + OS_OBJC_CLASS(OS_OBJECT_CLASS(name)) +#define DISPATCH_OBJC_CLASS_DECL(name) \ + OS_OBJC_CLASS_DECL(DISPATCH_CLASS(name)) +#define DISPATCH_OBJC_CLASS(name) \ + OS_OBJC_CLASS(DISPATCH_CLASS(name)) + +OS_OBJECT_OBJC_CLASS_DECL(object); +DISPATCH_OBJC_CLASS_DECL(object); + +// ObjC toll-free bridging, keep in sync with libdispatch.order file +#define DISPATCH_OBJECT_TFB(f, o, ...) \ + if (slowpath((uintptr_t)((o)._os_obj->os_obj_isa) & 1) || \ + slowpath((Class)((o)._os_obj->os_obj_isa) < \ + DISPATCH_OBJC_CLASS(object)) || \ + slowpath((Class)((o)._os_obj->os_obj_isa) >= \ + OS_OBJECT_OBJC_CLASS(object))) { \ + return f((o), ##__VA_ARGS__); \ + } + +id _dispatch_objc_alloc(Class cls, size_t size); +void _dispatch_objc_retain(dispatch_object_t dou); +void _dispatch_objc_release(dispatch_object_t dou); +void _dispatch_objc_set_context(dispatch_object_t dou, void *context); +void *_dispatch_objc_get_context(dispatch_object_t dou); +void _dispatch_objc_set_finalizer_f(dispatch_object_t dou, + dispatch_function_t finalizer); +void _dispatch_objc_set_target_queue(dispatch_object_t dou, + dispatch_queue_t queue); +void _dispatch_objc_suspend(dispatch_object_t dou); +void _dispatch_objc_resume(dispatch_object_t dou); +size_t _dispatch_objc_debug(dispatch_object_t dou, char* buf, size_t bufsiz); + +#if __OBJC2__ +@interface NSObject (DISPATCH_CONCAT(_,DISPATCH_CLASS(object))) +- (void)_setContext:(void*)context; +- (void*)_getContext; +- (void)_setFinalizer:(dispatch_function_t)finalizer; +- (void)_setTargetQueue:(dispatch_queue_t)queue; +- (void)_suspend; +- (void)_resume; +@end +#endif // __OBJC2__ +#else // USE_OBJC +#define DISPATCH_OBJECT_TFB(f, o, ...) +#endif // USE_OBJC + +#pragma mark - +#pragma mark _os_object_s + typedef struct _os_object_class_s { _OS_OBJECT_CLASS_HEADER(); } _os_object_class_s; @@ -185,4 +263,4 @@ bool _os_object_allows_weak_reference(_os_object_t obj); void _os_object_dispose(_os_object_t obj); void _os_object_xref_dispose(_os_object_t obj); -#endif +#endif // __DISPATCH_OBJECT_INTERNAL__ diff --git a/src/once.c b/src/once.c index ab4a4e887..ef57fc383 100644 --- a/src/once.c +++ b/src/once.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -35,9 +35,7 @@ struct _dispatch_once_waiter_s { void dispatch_once(dispatch_once_t *val, dispatch_block_t block) { - struct Block_basic *bb = (void *)block; - - dispatch_once_f(val, block, (void *)bb->Block_invoke); + dispatch_once_f(val, block, _dispatch_Block_invoke(block)); } #endif @@ -51,8 +49,7 @@ dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func) struct _dispatch_once_waiter_s *tail, *tmp; _dispatch_thread_semaphore_t sema; - if (dispatch_atomic_cmpxchg(vval, NULL, &dow)) { - dispatch_atomic_acquire_barrier(); + if (dispatch_atomic_cmpxchg(vval, NULL, &dow, acquire)) { _dispatch_client_callout(ctxt, func); // The next barrier must be long and strong. @@ -106,12 +103,12 @@ dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func) // need to be issued. dispatch_atomic_maximally_synchronizing_barrier(); - //dispatch_atomic_release_barrier(); // assumed contained in above - tmp = dispatch_atomic_xchg(vval, DISPATCH_ONCE_DONE); + // above assumed to contain release barrier + tmp = dispatch_atomic_xchg(vval, DISPATCH_ONCE_DONE, relaxed); tail = &dow; while (tail != tmp) { while (!tmp->dow_next) { - _dispatch_hardware_pause(); + dispatch_hardware_pause(); } sema = tmp->dow_sema; tmp = (struct _dispatch_once_waiter_s*)tmp->dow_next; @@ -119,15 +116,15 @@ dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func) } } else { dow.dow_sema = _dispatch_get_thread_semaphore(); + tmp = *vval; for (;;) { - tmp = *vval; if (tmp == DISPATCH_ONCE_DONE) { break; } - dispatch_atomic_store_barrier(); - if (dispatch_atomic_cmpxchg(vval, tmp, &dow)) { + if (dispatch_atomic_cmpxchgvw(vval, tmp, &dow, &tmp, release)) { dow.dow_next = tmp; _dispatch_thread_semaphore_wait(dow.dow_sema); + break; } } _dispatch_put_thread_semaphore(dow.dow_sema); diff --git a/src/protocol.defs b/src/protocol.defs index bf5fe5bce..7a9cf1898 100644 --- a/src/protocol.defs +++ b/src/protocol.defs @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -83,7 +83,7 @@ skip; skip; simpleroutine -wakeup_main_thread( +wakeup_runloop_thread( _port : mach_port_t; WaitTime _waitTimeout : natural_t ); diff --git a/src/provider.d b/src/provider.d index 59fe790d7..ede3c56b3 100644 --- a/src/provider.d +++ b/src/provider.d @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010 Apple Inc. All rights reserved. + * Copyright (c) 2010-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -18,23 +18,84 @@ * @APPLE_APACHE_LICENSE_HEADER_END@ */ +/* + * DTrace Probes for libdispatch + * + * Only available in the introspection version of the library, + * loaded by running a process with the environment variable + * DYLD_LIBRARY_PATH=/usr/lib/system/introspection + */ + typedef struct dispatch_object_s *dispatch_object_t; typedef struct dispatch_queue_s *dispatch_queue_t; +typedef struct dispatch_source_s *dispatch_source_t; typedef void (*dispatch_function_t)(void *); +typedef struct dispatch_trace_timer_params_s { + int64_t deadline, interval, leeway; +} *dispatch_trace_timer_params_t; + provider dispatch { + +/* + * Probes for dispatch queue push and pop operations + * + * dispatch$target:libdispatch*.dylib::queue-push + * dispatch$target:libdispatch*.dylib::queue-pop + */ probe queue__push(dispatch_queue_t queue, const char *label, dispatch_object_t item, const char *kind, dispatch_function_t function, void *context); probe queue__pop(dispatch_queue_t queue, const char *label, dispatch_object_t item, const char *kind, dispatch_function_t function, void *context); + +/* + * Probes for dispatch callouts to client functions + * + * dispatch$target:libdispatch*.dylib::callout-entry + * dispatch$target:libdispatch*.dylib::callout-return + */ probe callout__entry(dispatch_queue_t queue, const char *label, dispatch_function_t function, void *context); probe callout__return(dispatch_queue_t queue, const char *label, dispatch_function_t function, void *context); + +/* + * Probes for dispatch timer configuration and programming + * + * Timer configuration indicates that dispatch_source_set_timer() was called. + * Timer programming indicates that the dispatch manager is about to sleep + * for 'deadline' ns (but may wake up earlier if non-timer events occur). + * Time parameters are in nanoseconds, a value of -1 means "forever". + * + * dispatch$target:libdispatch*.dylib::timer-configure + * dispatch$target:libdispatch*.dylib::timer-program + */ + probe timer__configure(dispatch_source_t source, + dispatch_function_t handler, dispatch_trace_timer_params_t params); + probe timer__program(dispatch_source_t source, dispatch_function_t handler, + dispatch_trace_timer_params_t params); + +/* + * Probes for dispatch timer wakes and fires + * + * Timer wakes indicate that the dispatch manager woke up due to expiry of the + * deadline for the specified timer. + * Timer fires indicate that that the dispatch manager scheduled the event + * handler of the specified timer for asynchronous execution (may occur without + * a corresponding timer wake if the manager was awake processing other events + * when the timer deadline expired). + * + * dispatch$target:libdispatch*.dylib::timer-wake + * dispatch$target:libdispatch*.dylib::timer-fire + */ + probe timer__wake(dispatch_source_t source, dispatch_function_t handler); + probe timer__fire(dispatch_source_t source, dispatch_function_t handler); + }; + #pragma D attributes Evolving/Evolving/Common provider dispatch provider #pragma D attributes Private/Private/Common provider dispatch module #pragma D attributes Private/Private/Common provider dispatch function diff --git a/src/queue.c b/src/queue.c index f01d7f855..056876223 100644 --- a/src/queue.c +++ b/src/queue.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -27,7 +27,15 @@ !defined(DISPATCH_ENABLE_THREAD_POOL) #define DISPATCH_ENABLE_THREAD_POOL 1 #endif -#if DISPATCH_ENABLE_THREAD_POOL && !DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK +#if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES || DISPATCH_ENABLE_THREAD_POOL +#define DISPATCH_USE_PTHREAD_POOL 1 +#endif +#if HAVE_PTHREAD_WORKQUEUES && !HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP && \ + !defined(DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK) +#define DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK 1 +#endif +#if HAVE_PTHREAD_WORKQUEUES && DISPATCH_USE_PTHREAD_POOL && \ + !DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK #define pthread_workqueue_t void* #endif @@ -38,28 +46,25 @@ static void _dispatch_queue_cleanup(void *ctxt); static inline void _dispatch_queue_wakeup_global2(dispatch_queue_t dq, unsigned int n); static inline void _dispatch_queue_wakeup_global(dispatch_queue_t dq); -static _dispatch_thread_semaphore_t _dispatch_queue_drain(dispatch_queue_t dq); static inline _dispatch_thread_semaphore_t _dispatch_queue_drain_one_barrier_sync(dispatch_queue_t dq); -#if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK +#if HAVE_PTHREAD_WORKQUEUES static void _dispatch_worker_thread3(void *context); -#endif #if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP static void _dispatch_worker_thread2(int priority, int options, void *context); #endif -#if DISPATCH_ENABLE_THREAD_POOL +#endif +#if DISPATCH_USE_PTHREAD_POOL static void *_dispatch_worker_thread(void *context); static int _dispatch_pthread_sigmask(int how, sigset_t *set, sigset_t *oset); #endif #if DISPATCH_COCOA_COMPAT -static unsigned int _dispatch_worker_threads; static dispatch_once_t _dispatch_main_q_port_pred; -static mach_port_t main_q_port; - -static void _dispatch_main_q_port_init(void *ctxt); -static dispatch_queue_t _dispatch_queue_wakeup_main(void); -static void _dispatch_main_queue_drain(void); +static dispatch_queue_t _dispatch_main_queue_wakeup(void); +unsigned long _dispatch_runloop_queue_wakeup(dispatch_queue_t dq); +static void _dispatch_runloop_queue_port_init(void *ctxt); +static void _dispatch_runloop_queue_port_dispose(dispatch_queue_t dq); #endif #pragma mark - @@ -110,7 +115,7 @@ static struct dispatch_semaphore_s _dispatch_thread_mediator[] = { }; #endif -#define MAX_THREAD_COUNT 255 +#define MAX_PTHREAD_COUNT 255 struct dispatch_root_queue_context_s { union { @@ -118,18 +123,20 @@ struct dispatch_root_queue_context_s { unsigned int volatile dgq_pending; #if HAVE_PTHREAD_WORKQUEUES int dgq_wq_priority, dgq_wq_options; -#if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK || DISPATCH_ENABLE_THREAD_POOL +#if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK || DISPATCH_USE_PTHREAD_POOL pthread_workqueue_t dgq_kworkqueue; #endif #endif // HAVE_PTHREAD_WORKQUEUES -#if DISPATCH_ENABLE_THREAD_POOL +#if DISPATCH_USE_PTHREAD_POOL + void *dgq_ctxt; dispatch_semaphore_t dgq_thread_mediator; - uint32_t dgq_thread_pool_size; + uint32_t volatile dgq_thread_pool_size; #endif }; char _dgq_pad[DISPATCH_CACHELINE_SIZE]; }; }; +typedef struct dispatch_root_queue_context_s *dispatch_root_queue_context_t; DISPATCH_CACHELINE_ALIGN static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { @@ -141,7 +148,6 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { #if DISPATCH_ENABLE_THREAD_POOL .dgq_thread_mediator = &_dispatch_thread_mediator[ DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY], - .dgq_thread_pool_size = MAX_THREAD_COUNT, #endif }}}, [DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY] = {{{ @@ -152,7 +158,6 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { #if DISPATCH_ENABLE_THREAD_POOL .dgq_thread_mediator = &_dispatch_thread_mediator[ DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY], - .dgq_thread_pool_size = MAX_THREAD_COUNT, #endif }}}, [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY] = {{{ @@ -163,7 +168,6 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { #if DISPATCH_ENABLE_THREAD_POOL .dgq_thread_mediator = &_dispatch_thread_mediator[ DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY], - .dgq_thread_pool_size = MAX_THREAD_COUNT, #endif }}}, [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY] = {{{ @@ -174,7 +178,6 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { #if DISPATCH_ENABLE_THREAD_POOL .dgq_thread_mediator = &_dispatch_thread_mediator[ DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY], - .dgq_thread_pool_size = MAX_THREAD_COUNT, #endif }}}, [DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY] = {{{ @@ -185,7 +188,6 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { #if DISPATCH_ENABLE_THREAD_POOL .dgq_thread_mediator = &_dispatch_thread_mediator[ DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY], - .dgq_thread_pool_size = MAX_THREAD_COUNT, #endif }}}, [DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY] = {{{ @@ -196,7 +198,6 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { #if DISPATCH_ENABLE_THREAD_POOL .dgq_thread_mediator = &_dispatch_thread_mediator[ DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY], - .dgq_thread_pool_size = MAX_THREAD_COUNT, #endif }}}, [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY] = {{{ @@ -207,7 +208,6 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { #if DISPATCH_ENABLE_THREAD_POOL .dgq_thread_mediator = &_dispatch_thread_mediator[ DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY], - .dgq_thread_pool_size = MAX_THREAD_COUNT, #endif }}}, [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY] = {{{ @@ -218,7 +218,6 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { #if DISPATCH_ENABLE_THREAD_POOL .dgq_thread_mediator = &_dispatch_thread_mediator[ DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY], - .dgq_thread_pool_size = MAX_THREAD_COUNT, #endif }}}, }; @@ -351,6 +350,13 @@ static const dispatch_queue_t _dispatch_wq2root_queues[][2] = { }; #endif // HAVE_PTHREAD_WORKQUEUES +#if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES +static struct dispatch_queue_s _dispatch_mgr_root_queue; +#else +#define _dispatch_mgr_root_queue \ + _dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY] +#endif + // 6618342 Contact the team that owns the Instrument DTrace probe before // renaming this symbol DISPATCH_CACHELINE_ALIGN @@ -359,29 +365,101 @@ struct dispatch_queue_s _dispatch_mgr_q = { .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, - .do_targetq = &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY], + .do_targetq = &_dispatch_mgr_root_queue, .dq_label = "com.apple.libdispatch-manager", .dq_width = 1, + .dq_is_thread_bound = 1, .dq_serialnum = 2, }; dispatch_queue_t dispatch_get_global_queue(long priority, unsigned long flags) { - if (flags & ~DISPATCH_QUEUE_OVERCOMMIT) { + if (flags & ~(unsigned long)DISPATCH_QUEUE_OVERCOMMIT) { return NULL; } return _dispatch_get_root_queue(priority, flags & DISPATCH_QUEUE_OVERCOMMIT); } +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_t +_dispatch_get_current_queue(void) +{ + return _dispatch_queue_get_current() ?: _dispatch_get_root_queue(0, true); +} + dispatch_queue_t dispatch_get_current_queue(void) { - return _dispatch_queue_get_current() ?: _dispatch_get_root_queue(0, true); + return _dispatch_get_current_queue(); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_queue_targets_queue(dispatch_queue_t dq1, dispatch_queue_t dq2) +{ + while (dq1) { + if (dq1 == dq2) { + return true; + } + dq1 = dq1->do_targetq; + } + return false; +} + +#define DISPATCH_ASSERT_QUEUE_MESSAGE "BUG in client of libdispatch: " \ + "Assertion failed: Block was run on an unexpected queue" + +DISPATCH_NOINLINE +static void +_dispatch_assert_queue_fail(dispatch_queue_t dq, bool expected) +{ + char *msg; + asprintf(&msg, "%s\n%s queue: 0x%p[%s]", DISPATCH_ASSERT_QUEUE_MESSAGE, + expected ? "Expected" : "Unexpected", dq, dq->dq_label ? + dq->dq_label : ""); + _dispatch_log("%s", msg); + _dispatch_set_crash_log_message(msg); + _dispatch_hardware_crash(); + free(msg); +} + +void +dispatch_assert_queue(dispatch_queue_t dq) +{ + if (slowpath(!dq) || slowpath(!(dx_metatype(dq) == _DISPATCH_QUEUE_TYPE))) { + DISPATCH_CLIENT_CRASH("invalid queue passed to " + "dispatch_assert_queue()"); + } + dispatch_queue_t cq = _dispatch_queue_get_current(); + if (fastpath(cq) && fastpath(_dispatch_queue_targets_queue(cq, dq))) { + return; + } + _dispatch_assert_queue_fail(dq, true); } +void +dispatch_assert_queue_not(dispatch_queue_t dq) +{ + if (slowpath(!dq) || slowpath(!(dx_metatype(dq) == _DISPATCH_QUEUE_TYPE))) { + DISPATCH_CLIENT_CRASH("invalid queue passed to " + "dispatch_assert_queue_not()"); + } + dispatch_queue_t cq = _dispatch_queue_get_current(); + if (slowpath(cq) && slowpath(_dispatch_queue_targets_queue(cq, dq))) { + _dispatch_assert_queue_fail(dq, false); + } +} + +#if DISPATCH_DEBUG && DISPATCH_ROOT_QUEUE_DEBUG +#define _dispatch_root_queue_debug(...) _dispatch_debug(__VA_ARGS__) +#define _dispatch_debug_root_queue(...) dispatch_debug_queue(__VA_ARGS__) +#else +#define _dispatch_root_queue_debug(...) +#define _dispatch_debug_root_queue(...) +#endif + #pragma mark - #pragma mark dispatch_init @@ -399,12 +477,16 @@ _dispatch_root_queues_init_workq(void) bool result = false; #if HAVE_PTHREAD_WORKQUEUES bool disable_wq = false; -#if DISPATCH_ENABLE_THREAD_POOL +#if DISPATCH_ENABLE_THREAD_POOL && DISPATCH_DEBUG disable_wq = slowpath(getenv("LIBDISPATCH_DISABLE_KWQ")); #endif int r; #if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP if (!disable_wq) { +#if PTHREAD_WORKQUEUE_SPI_VERSION >= 20121218 + pthread_workqueue_setdispatchoffset_np( + offsetof(struct dispatch_queue_s, dq_serialnum)); +#endif r = pthread_workqueue_setdispatch_np(_dispatch_worker_thread2); #if !DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK (void)dispatch_assume_zero(r); @@ -412,7 +494,7 @@ _dispatch_root_queues_init_workq(void) result = !r; } #endif // HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP -#if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK || DISPATCH_ENABLE_THREAD_POOL +#if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK || DISPATCH_USE_PTHREAD_POOL if (!result) { #if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK pthread_workqueue_attr_t pwq_attr; @@ -424,8 +506,8 @@ _dispatch_root_queues_init_workq(void) int i; for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { pthread_workqueue_t pwq = NULL; - struct dispatch_root_queue_context_s *qc = - &_dispatch_root_queue_contexts[i]; + dispatch_root_queue_context_t qc; + qc = &_dispatch_root_queue_contexts[i]; #if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK if (!disable_wq #if DISPATCH_NO_BG_PRIORITY @@ -436,13 +518,14 @@ _dispatch_root_queues_init_workq(void) qc->dgq_wq_priority); (void)dispatch_assume_zero(r); r = pthread_workqueue_attr_setovercommit_np(&pwq_attr, - qc->dgq_wq_options & WORKQ_ADDTHREADS_OPTION_OVERCOMMIT); + qc->dgq_wq_options & + WORKQ_ADDTHREADS_OPTION_OVERCOMMIT); (void)dispatch_assume_zero(r); r = pthread_workqueue_create_np(&pwq, &pwq_attr); (void)dispatch_assume_zero(r); result = result || dispatch_assume(pwq); } -#endif +#endif // DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK qc->dgq_kworkqueue = pwq ? pwq : (void*)(~0ul); } #if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK @@ -457,45 +540,51 @@ _dispatch_root_queues_init_workq(void) return result; } +#if DISPATCH_USE_PTHREAD_POOL static inline void -_dispatch_root_queues_init_thread_pool(void) +_dispatch_root_queue_init_pthread_pool(dispatch_root_queue_context_t qc, + bool overcommit) { -#if DISPATCH_ENABLE_THREAD_POOL - int i; - for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { -#if TARGET_OS_EMBEDDED - // some software hangs if the non-overcommitting queues do not - // overcommit when threads block. Someday, this behavior should apply - // to all platforms - if (!(i & 1)) { - _dispatch_root_queue_contexts[i].dgq_thread_pool_size = - _dispatch_hw_config.cc_max_active; - } -#endif + qc->dgq_thread_pool_size = overcommit ? MAX_PTHREAD_COUNT : + _dispatch_hw_config.cc_max_active; #if USE_MACH_SEM - // override the default FIFO behavior for the pool semaphores - kern_return_t kr = semaphore_create(mach_task_self(), - &_dispatch_thread_mediator[i].dsema_port, SYNC_POLICY_LIFO, 0); - DISPATCH_VERIFY_MIG(kr); - (void)dispatch_assume_zero(kr); - (void)dispatch_assume(_dispatch_thread_mediator[i].dsema_port); + // override the default FIFO behavior for the pool semaphores + kern_return_t kr = semaphore_create(mach_task_self(), + &qc->dgq_thread_mediator->dsema_port, SYNC_POLICY_LIFO, 0); + DISPATCH_VERIFY_MIG(kr); + (void)dispatch_assume_zero(kr); + (void)dispatch_assume(qc->dgq_thread_mediator->dsema_port); #elif USE_POSIX_SEM - /* XXXRW: POSIX semaphores don't support LIFO? */ - int ret = sem_init(&_dispatch_thread_mediator[i].dsema_sem, 0, 0); - (void)dispatch_assume_zero(ret); + /* XXXRW: POSIX semaphores don't support LIFO? */ + int ret = sem_init(&qc->dgq_thread_mediator->dsema_sem, 0, 0); + (void)dispatch_assume_zero(ret); #endif - } -#else - DISPATCH_CRASH("Thread pool creation failed"); -#endif // DISPATCH_ENABLE_THREAD_POOL } +#endif // DISPATCH_USE_PTHREAD_POOL static void _dispatch_root_queues_init(void *context DISPATCH_UNUSED) { _dispatch_safe_fork = false; if (!_dispatch_root_queues_init_workq()) { - _dispatch_root_queues_init_thread_pool(); +#if DISPATCH_ENABLE_THREAD_POOL + int i; + for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { + bool overcommit = true; +#if TARGET_OS_EMBEDDED + // some software hangs if the non-overcommitting queues do not + // overcommit when threads block. Someday, this behavior should + // apply to all platforms + if (!(i & 1)) { + overcommit = false; + } +#endif + _dispatch_root_queue_init_pthread_pool( + &_dispatch_root_queue_contexts[i], overcommit); + } +#else + DISPATCH_CRASH("Root queue initialization failed"); +#endif // DISPATCH_ENABLE_THREAD_POOL } } @@ -526,18 +615,17 @@ libdispatch_init(void) #endif dispatch_assert(sizeof(struct dispatch_apply_s) <= - ROUND_UP_TO_CACHELINE_SIZE(sizeof( - struct dispatch_continuation_s))); - dispatch_assert(sizeof(struct dispatch_source_s) == - sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_CACHELINE_PAD); + DISPATCH_CONTINUATION_SIZE); dispatch_assert(sizeof(struct dispatch_queue_s) % DISPATCH_CACHELINE_SIZE == 0); dispatch_assert(sizeof(struct dispatch_root_queue_context_s) % DISPATCH_CACHELINE_SIZE == 0); _dispatch_thread_key_create(&dispatch_queue_key, _dispatch_queue_cleanup); +#if !DISPATCH_USE_OS_SEMAPHORE_CACHE _dispatch_thread_key_create(&dispatch_sema4_key, (void (*)(void *))_dispatch_thread_semaphore_dispose); +#endif _dispatch_thread_key_create(&dispatch_cache_key, _dispatch_cache_cleanup); _dispatch_thread_key_create(&dispatch_io_key, NULL); _dispatch_thread_key_create(&dispatch_apply_key, NULL); @@ -551,6 +639,7 @@ libdispatch_init(void) #endif _dispatch_thread_setspecific(dispatch_queue_key, &_dispatch_main_q); + _dispatch_queue_set_bound_thread(&_dispatch_main_q); #if DISPATCH_USE_PTHREAD_ATFORK (void)dispatch_assume_zero(pthread_atfork(dispatch_atfork_prepare, @@ -560,6 +649,7 @@ libdispatch_init(void) _dispatch_hw_config_init(); _dispatch_vtable_init(); _os_object_init(); + _dispatch_introspection_init(); } DISPATCH_EXPORT DISPATCH_NOTHROW @@ -572,6 +662,7 @@ dispatch_atfork_child(void) if (_dispatch_safe_fork) { return; } + _dispatch_child_of_unsafe_fork = true; _dispatch_main_q.dq_items_head = crash; _dispatch_main_q.dq_items_tail = crash; @@ -591,51 +682,55 @@ dispatch_atfork_child(void) // skip zero // 1 - main_q // 2 - mgr_q -// 3 - _unused_ +// 3 - mgr_root_q // 4,5,6,7,8,9,10,11 - global queues // we use 'xadd' on Intel, so the initial value == next assigned -unsigned long _dispatch_queue_serial_numbers = 12; +unsigned long volatile _dispatch_queue_serial_numbers = 12; dispatch_queue_t -dispatch_queue_create(const char *label, dispatch_queue_attr_t attr) +dispatch_queue_create_with_target(const char *label, + dispatch_queue_attr_t attr, dispatch_queue_t tq) { dispatch_queue_t dq; - size_t label_len; - - if (!label) { - label = ""; - } - - label_len = strlen(label); - if (label_len < (DISPATCH_QUEUE_MIN_LABEL_SIZE - 1)) { - label_len = (DISPATCH_QUEUE_MIN_LABEL_SIZE - 1); - } - // XXX switch to malloc() dq = _dispatch_alloc(DISPATCH_VTABLE(queue), - sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_MIN_LABEL_SIZE - - DISPATCH_QUEUE_CACHELINE_PAD + label_len + 1); + sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_CACHELINE_PAD); _dispatch_queue_init(dq); - strcpy(dq->dq_label, label); - - if (fastpath(!attr)) { - return dq; + if (label) { + dq->dq_label = strdup(label); } - if (fastpath(attr == DISPATCH_QUEUE_CONCURRENT)) { + + if (attr == DISPATCH_QUEUE_CONCURRENT) { dq->dq_width = UINT32_MAX; - dq->do_targetq = _dispatch_get_root_queue(0, false); + if (!tq) { + tq = _dispatch_get_root_queue(0, false); + } } else { - dispatch_debug_assert(!attr, "Invalid attribute"); + if (!tq) { + // Default target queue is overcommit! + tq = _dispatch_get_root_queue(0, true); + } + if (slowpath(attr)) { + dispatch_debug_assert(!attr, "Invalid attribute"); + } } - return dq; + dq->do_targetq = tq; + _dispatch_object_debug(dq, "%s", __func__); + return _dispatch_introspection_queue_create(dq); +} + +dispatch_queue_t +dispatch_queue_create(const char *label, dispatch_queue_attr_t attr) +{ + return dispatch_queue_create_with_target(label, attr, + DISPATCH_TARGET_QUEUE_DEFAULT); } -// 6618342 Contact the team that owns the Instrument DTrace probe before -// renaming this symbol void -_dispatch_queue_dispose(dispatch_queue_t dq) +_dispatch_queue_destroy(dispatch_object_t dou) { + dispatch_queue_t dq = dou._dq; if (slowpath(dq == _dispatch_queue_get_current())) { DISPATCH_CRASH("Release of a queue by itself"); } @@ -647,16 +742,32 @@ _dispatch_queue_dispose(dispatch_queue_t dq) dq->dq_items_tail = (void *)0x200; dispatch_queue_t dqsq = dispatch_atomic_xchg2o(dq, dq_specific_q, - (void *)0x200); + (void *)0x200, relaxed); if (dqsq) { _dispatch_release(dqsq); } } +// 6618342 Contact the team that owns the Instrument DTrace probe before +// renaming this symbol +void +_dispatch_queue_dispose(dispatch_queue_t dq) +{ + _dispatch_object_debug(dq, "%s", __func__); + _dispatch_introspection_queue_dispose(dq); + if (dq->dq_label) { + free((void*)dq->dq_label); + } + _dispatch_queue_destroy(dq); +} + const char * dispatch_queue_get_label(dispatch_queue_t dq) { - return dq->dq_label; + if (slowpath(dq == DISPATCH_CURRENT_QUEUE_LABEL)) { + dq = _dispatch_get_current_queue(); + } + return dq->dq_label ? dq->dq_label : ""; } static void @@ -668,10 +779,11 @@ _dispatch_queue_set_width2(void *ctxt) if (w == 1 || w == 0) { dq->dq_width = 1; + _dispatch_object_debug(dq, "%s", __func__); return; } if (w > 0) { - tmp = w; + tmp = (unsigned int)w; } else switch (w) { case DISPATCH_QUEUE_WIDTH_MAX_PHYSICAL_CPUS: tmp = _dispatch_hw_config.cc_max_physical; @@ -688,15 +800,17 @@ _dispatch_queue_set_width2(void *ctxt) // multiply by two since the running count is inc/dec by two // (the low bit == barrier) dq->dq_width = tmp * 2; + _dispatch_object_debug(dq, "%s", __func__); } void dispatch_queue_set_width(dispatch_queue_t dq, long width) { - if (slowpath(dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) { + if (slowpath(dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || + slowpath(dx_type(dq) == DISPATCH_QUEUE_ROOT_TYPE)) { return; } - dispatch_barrier_async_f(dq, (void*)(intptr_t)width, + _dispatch_barrier_trysync_f(dq, (void*)(intptr_t)width, _dispatch_queue_set_width2); } @@ -710,18 +824,18 @@ _dispatch_set_target_queue2(void *ctxt) prev_dq = dq->do_targetq; dq->do_targetq = ctxt; _dispatch_release(prev_dq); + _dispatch_object_debug(dq, "%s", __func__); } void dispatch_set_target_queue(dispatch_object_t dou, dispatch_queue_t dq) { - dispatch_queue_t prev_dq; - unsigned long type; - - if (slowpath(dou._do->do_xref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) { + DISPATCH_OBJECT_TFB(_dispatch_objc_set_target_queue, dou, dq); + if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || + slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) { return; } - type = dx_type(dou._do) & _DISPATCH_META_TYPE_MASK; + unsigned long type = dx_metatype(dou._do); if (slowpath(!dq)) { bool is_concurrent_q = (type == _DISPATCH_QUEUE_TYPE && slowpath(dou._dq->dq_width > 1)); @@ -732,38 +846,210 @@ dispatch_set_target_queue(dispatch_object_t dou, dispatch_queue_t dq) case _DISPATCH_QUEUE_TYPE: case _DISPATCH_SOURCE_TYPE: _dispatch_retain(dq); - return dispatch_barrier_async_f(dou._dq, dq, + return _dispatch_barrier_trysync_f(dou._dq, dq, _dispatch_set_target_queue2); case _DISPATCH_IO_TYPE: return _dispatch_io_set_target_queue(dou._dchannel, dq); - default: + default: { + dispatch_queue_t prev_dq; _dispatch_retain(dq); - dispatch_atomic_store_barrier(); - prev_dq = dispatch_atomic_xchg2o(dou._do, do_targetq, dq); + prev_dq = dispatch_atomic_xchg2o(dou._do, do_targetq, dq, release); if (prev_dq) _dispatch_release(prev_dq); + _dispatch_object_debug(dou._do, "%s", __func__); return; + } } } +#pragma mark - +#pragma mark dispatch_pthread_root_queue + +struct dispatch_pthread_root_queue_context_s { + pthread_attr_t dpq_thread_attr; + dispatch_block_t dpq_thread_configure; + struct dispatch_semaphore_s dpq_thread_mediator; +}; +typedef struct dispatch_pthread_root_queue_context_s * + dispatch_pthread_root_queue_context_t; + +#if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES +static struct dispatch_pthread_root_queue_context_s + _dispatch_mgr_root_queue_pthread_context; +static struct dispatch_root_queue_context_s + _dispatch_mgr_root_queue_context = {{{ +#if HAVE_PTHREAD_WORKQUEUES + .dgq_kworkqueue = (void*)(~0ul), +#endif + .dgq_ctxt = &_dispatch_mgr_root_queue_pthread_context, + .dgq_thread_pool_size = 1, +}}}; +static struct dispatch_queue_s _dispatch_mgr_root_queue = { + .do_vtable = DISPATCH_VTABLE(queue_root), + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, + .do_ctxt = &_dispatch_mgr_root_queue_context, + .dq_label = "com.apple.root.libdispatch-manager", + .dq_running = 2, + .dq_width = UINT32_MAX, + .dq_serialnum = 3, +}; +static struct { + volatile int prio; + int policy; + pthread_t tid; +} _dispatch_mgr_sched; +static dispatch_once_t _dispatch_mgr_sched_pred; + +static void +_dispatch_mgr_sched_init(void *ctxt DISPATCH_UNUSED) +{ + struct sched_param param; + pthread_attr_t *attr; + attr = &_dispatch_mgr_root_queue_pthread_context.dpq_thread_attr; + (void)dispatch_assume_zero(pthread_attr_init(attr)); + (void)dispatch_assume_zero(pthread_attr_getschedpolicy(attr, + &_dispatch_mgr_sched.policy)); + (void)dispatch_assume_zero(pthread_attr_getschedparam(attr, ¶m)); + // high-priority workq threads are at priority 2 above default + _dispatch_mgr_sched.prio = param.sched_priority + 2; +} + +DISPATCH_NOINLINE +static pthread_t * +_dispatch_mgr_root_queue_init(void) +{ + dispatch_once_f(&_dispatch_mgr_sched_pred, NULL, _dispatch_mgr_sched_init); + struct sched_param param; + pthread_attr_t *attr; + attr = &_dispatch_mgr_root_queue_pthread_context.dpq_thread_attr; + (void)dispatch_assume_zero(pthread_attr_setdetachstate(attr, + PTHREAD_CREATE_DETACHED)); +#if !DISPATCH_DEBUG + (void)dispatch_assume_zero(pthread_attr_setstacksize(attr, 64 * 1024)); +#endif + param.sched_priority = _dispatch_mgr_sched.prio; + (void)dispatch_assume_zero(pthread_attr_setschedparam(attr, ¶m)); + return &_dispatch_mgr_sched.tid; +} + +static inline void +_dispatch_mgr_priority_apply(void) +{ + struct sched_param param; + do { + param.sched_priority = _dispatch_mgr_sched.prio; + (void)dispatch_assume_zero(pthread_setschedparam( + _dispatch_mgr_sched.tid, _dispatch_mgr_sched.policy, ¶m)); + } while (_dispatch_mgr_sched.prio > param.sched_priority); +} + +DISPATCH_NOINLINE void -dispatch_set_current_target_queue(dispatch_queue_t dq) +_dispatch_mgr_priority_init(void) +{ + struct sched_param param; + pthread_attr_t *attr; + attr = &_dispatch_mgr_root_queue_pthread_context.dpq_thread_attr; + (void)dispatch_assume_zero(pthread_attr_getschedparam(attr, ¶m)); + if (slowpath(_dispatch_mgr_sched.prio > param.sched_priority)) { + return _dispatch_mgr_priority_apply(); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_mgr_priority_raise(const pthread_attr_t *attr) +{ + dispatch_once_f(&_dispatch_mgr_sched_pred, NULL, _dispatch_mgr_sched_init); + struct sched_param param; + (void)dispatch_assume_zero(pthread_attr_getschedparam(attr, ¶m)); + int p = _dispatch_mgr_sched.prio; + do if (p >= param.sched_priority) { + return; + } while (slowpath(!dispatch_atomic_cmpxchgvw2o(&_dispatch_mgr_sched, prio, + p, param.sched_priority, &p, relaxed))); + if (_dispatch_mgr_sched.tid) { + return _dispatch_mgr_priority_apply(); + } +} + +dispatch_queue_t +dispatch_pthread_root_queue_create(const char *label, unsigned long flags, + const pthread_attr_t *attr, dispatch_block_t configure) { - dispatch_queue_t queue = _dispatch_queue_get_current(); + dispatch_queue_t dq; + dispatch_root_queue_context_t qc; + dispatch_pthread_root_queue_context_t pqc; + size_t dqs; - if (slowpath(!queue)) { - DISPATCH_CLIENT_CRASH("SPI not called from a queue"); + if (slowpath(flags)) { + return NULL; } - if (slowpath(queue->do_xref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) { - DISPATCH_CLIENT_CRASH("SPI not supported on this queue"); + dqs = sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_CACHELINE_PAD; + dq = _dispatch_alloc(DISPATCH_VTABLE(queue_root), dqs + + sizeof(struct dispatch_root_queue_context_s) + + sizeof(struct dispatch_pthread_root_queue_context_s)); + qc = (void*)dq + dqs; + pqc = (void*)qc + sizeof(struct dispatch_root_queue_context_s); + + _dispatch_queue_init(dq); + if (label) { + dq->dq_label = strdup(label); } - if (slowpath(queue->dq_width != 1)) { - DISPATCH_CLIENT_CRASH("SPI not called from a serial queue"); + + dq->do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK; + dq->do_ctxt = qc; + dq->do_targetq = NULL; + dq->dq_running = 2; + dq->dq_width = UINT32_MAX; + + pqc->dpq_thread_mediator.do_vtable = DISPATCH_VTABLE(semaphore); + qc->dgq_thread_mediator = &pqc->dpq_thread_mediator; + qc->dgq_ctxt = pqc; +#if HAVE_PTHREAD_WORKQUEUES + qc->dgq_kworkqueue = (void*)(~0ul); +#endif + _dispatch_root_queue_init_pthread_pool(qc, true); // rdar://11352331 + + if (attr) { + memcpy(&pqc->dpq_thread_attr, attr, sizeof(pthread_attr_t)); + _dispatch_mgr_priority_raise(&pqc->dpq_thread_attr); + } else { + (void)dispatch_assume_zero(pthread_attr_init(&pqc->dpq_thread_attr)); } - if (slowpath(!dq)) { - dq = _dispatch_get_root_queue(0, true); + (void)dispatch_assume_zero(pthread_attr_setdetachstate( + &pqc->dpq_thread_attr, PTHREAD_CREATE_DETACHED)); + if (configure) { + pqc->dpq_thread_configure = _dispatch_Block_copy(configure); } - _dispatch_retain(dq); - _dispatch_set_target_queue2(dq); + _dispatch_object_debug(dq, "%s", __func__); + return _dispatch_introspection_queue_create(dq); +} +#endif + +void +_dispatch_pthread_root_queue_dispose(dispatch_queue_t dq) +{ + if (slowpath(dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) { + DISPATCH_CRASH("Global root queue disposed"); + } + _dispatch_object_debug(dq, "%s", __func__); + _dispatch_introspection_queue_dispose(dq); +#if DISPATCH_USE_PTHREAD_POOL + dispatch_root_queue_context_t qc = dq->do_ctxt; + dispatch_pthread_root_queue_context_t pqc = qc->dgq_ctxt; + + _dispatch_semaphore_dispose(qc->dgq_thread_mediator); + if (pqc->dpq_thread_configure) { + Block_release(pqc->dpq_thread_configure); + } + dq->do_targetq = _dispatch_get_root_queue(0, false); +#endif + if (dq->dq_label) { + free((void*)dq->dq_label); + } + _dispatch_queue_destroy(dq); } #pragma mark - @@ -772,14 +1058,8 @@ dispatch_set_current_target_queue(dispatch_queue_t dq) struct dispatch_queue_specific_queue_s { DISPATCH_STRUCT_HEADER(queue_specific_queue); DISPATCH_QUEUE_HEADER; - union { - char _dqsq_pad[DISPATCH_QUEUE_MIN_LABEL_SIZE]; - struct { - char dq_label[16]; - TAILQ_HEAD(dispatch_queue_specific_head_s, - dispatch_queue_specific_s) dqsq_contexts; - }; - }; + TAILQ_HEAD(dispatch_queue_specific_head_s, + dispatch_queue_specific_s) dqsq_contexts; }; struct dispatch_queue_specific_s { @@ -803,7 +1083,7 @@ _dispatch_queue_specific_queue_dispose(dispatch_queue_specific_queue_t dqsq) } free(dqs); } - _dispatch_queue_dispose((dispatch_queue_t)dqsq); + _dispatch_queue_destroy((dispatch_queue_t)dqsq); } static void @@ -818,11 +1098,10 @@ _dispatch_queue_init_specific(dispatch_queue_t dq) dqsq->do_targetq = _dispatch_get_root_queue(DISPATCH_QUEUE_PRIORITY_HIGH, true); dqsq->dq_width = UINT32_MAX; - strlcpy(dqsq->dq_label, "queue-specific", sizeof(dqsq->dq_label)); + dqsq->dq_label = "queue-specific"; TAILQ_INIT(&dqsq->dqsq_contexts); - dispatch_atomic_store_barrier(); if (slowpath(!dispatch_atomic_cmpxchg2o(dq, dq_specific_q, NULL, - (dispatch_queue_t)dqsq))) { + (dispatch_queue_t)dqsq, release))) { _dispatch_release((dispatch_queue_t)dqsq); } } @@ -868,14 +1147,14 @@ dispatch_queue_set_specific(dispatch_queue_t dq, const void *key, } dispatch_queue_specific_t dqs; - dqs = calloc(1, sizeof(struct dispatch_queue_specific_s)); + dqs = _dispatch_calloc(1, sizeof(struct dispatch_queue_specific_s)); dqs->dqs_key = key; dqs->dqs_ctxt = ctxt; dqs->dqs_destructor = destructor; if (slowpath(!dq->dq_specific_q)) { _dispatch_queue_init_specific(dq); } - dispatch_barrier_async_f(dq->dq_specific_q, dqs, + _dispatch_barrier_trysync_f(dq->dq_specific_q, dqs, _dispatch_queue_set_specific); } @@ -941,21 +1220,28 @@ dispatch_get_specific(const void *key) size_t _dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf, size_t bufsiz) { + size_t offset = 0; dispatch_queue_t target = dq->do_targetq; - return snprintf(buf, bufsiz, "target = %s[%p], width = 0x%x, " - "running = 0x%x, barrier = %d ", target ? target->dq_label : "", - target, dq->dq_width / 2, dq->dq_running / 2, dq->dq_running & 1); + offset += dsnprintf(buf, bufsiz, "target = %s[%p], width = 0x%x, " + "running = 0x%x, barrier = %d ", target && target->dq_label ? + target->dq_label : "", target, dq->dq_width / 2, + dq->dq_running / 2, dq->dq_running & 1); + if (dq->dq_is_thread_bound) { + offset += dsnprintf(buf, bufsiz, ", thread = %p ", + _dispatch_queue_get_bound_thread(dq)); + } + return offset; } size_t dispatch_queue_debug(dispatch_queue_t dq, char* buf, size_t bufsiz) { size_t offset = 0; - offset += snprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", - dq->dq_label, dq); + offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", + dq->dq_label ? dq->dq_label : dx_kind(dq), dq); offset += _dispatch_object_debug_attr(dq, &buf[offset], bufsiz - offset); offset += _dispatch_queue_debug_attr(dq, &buf[offset], bufsiz - offset); - offset += snprintf(&buf[offset], bufsiz - offset, "}"); + offset += dsnprintf(&buf[offset], bufsiz - offset, "}"); return offset; } @@ -963,7 +1249,7 @@ dispatch_queue_debug(dispatch_queue_t dq, char* buf, size_t bufsiz) void dispatch_debug_queue(dispatch_queue_t dq, const char* str) { if (fastpath(dq)) { - dispatch_debug(dq, "%s", str); + _dispatch_object_debug(dq, "%s", str); } else { _dispatch_log("queue[NULL]: %s", str); } @@ -972,7 +1258,6 @@ dispatch_debug_queue(dispatch_queue_t dq, const char* str) { #if DISPATCH_PERF_MON static OSSpinLock _dispatch_stats_lock; -static size_t _dispatch_bad_ratio; static struct { uint64_t time_total; uint64_t count_total; @@ -1009,34 +1294,6 @@ _dispatch_queue_merge_stats(uint64_t start) #pragma mark - #pragma mark dispatch_continuation_t -static malloc_zone_t *_dispatch_ccache_zone; - -static void -_dispatch_ccache_init(void *context DISPATCH_UNUSED) -{ - _dispatch_ccache_zone = malloc_create_zone(0, 0); - dispatch_assert(_dispatch_ccache_zone); - malloc_set_zone_name(_dispatch_ccache_zone, "DispatchContinuations"); -} - -dispatch_continuation_t -_dispatch_continuation_alloc_from_heap(void) -{ - static dispatch_once_t pred; - dispatch_continuation_t dc; - - dispatch_once_f(&pred, NULL, _dispatch_ccache_init); - - // This is also used for allocating struct dispatch_apply_s. If the - // ROUND_UP behavior is changed, adjust the assert in libdispatch_init - while (!(dc = fastpath(malloc_zone_calloc(_dispatch_ccache_zone, 1, - ROUND_UP_TO_CACHELINE_SIZE(sizeof(*dc)))))) { - sleep(1); - } - - return dc; -} - static void _dispatch_force_cache_cleanup(void) { @@ -1048,13 +1305,6 @@ _dispatch_force_cache_cleanup(void) } } -// rdar://problem/11500155 -void -dispatch_flush_continuation_cache(void) -{ - _dispatch_force_cache_cleanup(); -} - DISPATCH_NOINLINE static void _dispatch_cache_cleanup(void *value) @@ -1063,9 +1313,31 @@ _dispatch_cache_cleanup(void *value) while ((dc = next_dc)) { next_dc = dc->do_next; - malloc_zone_free(_dispatch_ccache_zone, dc); + _dispatch_continuation_free_to_heap(dc); + } +} + +#if DISPATCH_USE_MEMORYSTATUS_SOURCE +int _dispatch_continuation_cache_limit = DISPATCH_CONTINUATION_CACHE_LIMIT; + +DISPATCH_NOINLINE +void +_dispatch_continuation_free_to_cache_limit(dispatch_continuation_t dc) +{ + _dispatch_continuation_free_to_heap(dc); + dispatch_continuation_t next_dc; + dc = _dispatch_thread_getspecific(dispatch_cache_key); + int cnt; + if (!dc || (cnt = dc->do_ref_cnt-_dispatch_continuation_cache_limit) <= 0) { + return; } + do { + next_dc = dc->do_next; + _dispatch_continuation_free_to_heap(dc); + } while (--cnt && (dc = next_dc)); + _dispatch_thread_setspecific(dispatch_cache_key, next_dc); } +#endif DISPATCH_ALWAYS_INLINE_NDEBUG static inline void @@ -1074,12 +1346,11 @@ _dispatch_continuation_redirect(dispatch_queue_t dq, dispatch_object_t dou) dispatch_continuation_t dc = dou._dc; _dispatch_trace_continuation_pop(dq, dou); - (void)dispatch_atomic_add2o(dq, dq_running, 2); + (void)dispatch_atomic_add2o(dq, dq_running, 2, acquire); if (!DISPATCH_OBJ_IS_VTABLE(dc) && (long)dc->do_vtable & DISPATCH_OBJ_SYNC_SLOW_BIT) { - dispatch_atomic_barrier(); _dispatch_thread_semaphore_signal( - (_dispatch_thread_semaphore_t)dc->dc_ctxt); + (_dispatch_thread_semaphore_t)dc->dc_other); } else { _dispatch_async_f_redirect(dq, dc); } @@ -1089,12 +1360,12 @@ DISPATCH_ALWAYS_INLINE_NDEBUG static inline void _dispatch_continuation_pop(dispatch_object_t dou) { - dispatch_continuation_t dc = dou._dc; + dispatch_continuation_t dc = dou._dc, dc1; dispatch_group_t dg; _dispatch_trace_continuation_pop(_dispatch_queue_get_current(), dou); if (DISPATCH_OBJ_IS_VTABLE(dou._do)) { - return _dispatch_queue_invoke(dou._dq); + return dx_invoke(dou._do); } // Add the item back to the cache before calling the function. This @@ -1104,7 +1375,9 @@ _dispatch_continuation_pop(dispatch_object_t dou) // Therefore, the object has not been reused yet. // This generates better assembly. if ((long)dc->do_vtable & DISPATCH_OBJ_ASYNC_BIT) { - _dispatch_continuation_free(dc); + dc1 = _dispatch_continuation_free_cacheonly(dc); + } else { + dc1 = NULL; } if ((long)dc->do_vtable & DISPATCH_OBJ_GROUP_BIT) { dg = dc->dc_data; @@ -1116,6 +1389,9 @@ _dispatch_continuation_pop(dispatch_object_t dou) dispatch_group_leave(dg); _dispatch_release(dg); } + if (slowpath(dc1)) { + _dispatch_continuation_free_to_cache_limit(dc1); + } } #pragma mark - @@ -1166,10 +1442,10 @@ dispatch_barrier_async(dispatch_queue_t dq, void (^work)(void)) #pragma mark - #pragma mark dispatch_async -static void -_dispatch_async_f_redirect_invoke(void *_ctxt) +void +_dispatch_async_redirect_invoke(void *ctxt) { - struct dispatch_continuation_s *dc = _ctxt; + struct dispatch_continuation_s *dc = ctxt; struct dispatch_continuation_s *other_dc = dc->dc_other; dispatch_queue_t old_dq, dq = dc->dc_data, rq; @@ -1180,24 +1456,39 @@ _dispatch_async_f_redirect_invoke(void *_ctxt) rq = dq->do_targetq; while (slowpath(rq->do_targetq) && rq != old_dq) { - if (dispatch_atomic_sub2o(rq, dq_running, 2) == 0) { + if (dispatch_atomic_sub2o(rq, dq_running, 2, relaxed) == 0) { _dispatch_wakeup(rq); } rq = rq->do_targetq; } - if (dispatch_atomic_sub2o(dq, dq_running, 2) == 0) { + if (dispatch_atomic_sub2o(dq, dq_running, 2, relaxed) == 0) { _dispatch_wakeup(dq); } _dispatch_release(dq); } -DISPATCH_NOINLINE -static void -_dispatch_async_f2_slow(dispatch_queue_t dq, dispatch_continuation_t dc) +static inline void +_dispatch_async_f_redirect2(dispatch_queue_t dq, dispatch_continuation_t dc) { - _dispatch_wakeup(dq); - _dispatch_queue_push(dq, dc); + uint32_t running = 2; + + // Find the queue to redirect to + do { + if (slowpath(dq->dq_items_tail) || + slowpath(DISPATCH_OBJECT_SUSPENDED(dq)) || + slowpath(dq->dq_width == 1)) { + break; + } + running = dispatch_atomic_add2o(dq, dq_running, 2, relaxed); + if (slowpath(running & 1) || slowpath(running > dq->dq_width)) { + running = dispatch_atomic_sub2o(dq, dq_running, 2, relaxed); + break; + } + dq = dq->do_targetq; + } while (slowpath(dq->do_targetq)); + + _dispatch_queue_push_wakeup(dq, dc, running == 0); } DISPATCH_NOINLINE @@ -1205,69 +1496,47 @@ static void _dispatch_async_f_redirect(dispatch_queue_t dq, dispatch_continuation_t other_dc) { - dispatch_continuation_t dc; - dispatch_queue_t rq; - - _dispatch_retain(dq); - - dc = _dispatch_continuation_alloc(); + dispatch_continuation_t dc = _dispatch_continuation_alloc(); dc->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT; - dc->dc_func = _dispatch_async_f_redirect_invoke; + dc->dc_func = _dispatch_async_redirect_invoke; dc->dc_ctxt = dc; dc->dc_data = dq; dc->dc_other = other_dc; - // Find the queue to redirect to - rq = dq->do_targetq; - while (slowpath(rq->do_targetq)) { - uint32_t running; - - if (slowpath(rq->dq_items_tail) || - slowpath(DISPATCH_OBJECT_SUSPENDED(rq)) || - slowpath(rq->dq_width == 1)) { - break; - } - running = dispatch_atomic_add2o(rq, dq_running, 2) - 2; - if (slowpath(running & 1) || slowpath(running + 2 > rq->dq_width)) { - if (slowpath(dispatch_atomic_sub2o(rq, dq_running, 2) == 0)) { - return _dispatch_async_f2_slow(rq, dc); - } - break; - } - rq = rq->do_targetq; + _dispatch_retain(dq); + dq = dq->do_targetq; + if (slowpath(dq->do_targetq)) { + return _dispatch_async_f_redirect2(dq, dc); } - _dispatch_queue_push(rq, dc); + + _dispatch_queue_push(dq, dc); } DISPATCH_NOINLINE static void _dispatch_async_f2(dispatch_queue_t dq, dispatch_continuation_t dc) { - uint32_t running; - bool locked; + uint32_t running = 2; do { if (slowpath(dq->dq_items_tail) || slowpath(DISPATCH_OBJECT_SUSPENDED(dq))) { break; } - running = dispatch_atomic_add2o(dq, dq_running, 2); + running = dispatch_atomic_add2o(dq, dq_running, 2, relaxed); if (slowpath(running > dq->dq_width)) { - if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2) == 0)) { - return _dispatch_async_f2_slow(dq, dc); - } + running = dispatch_atomic_sub2o(dq, dq_running, 2, relaxed); break; } - locked = running & 1; - if (fastpath(!locked)) { + if (!slowpath(running & 1)) { return _dispatch_async_f_redirect(dq, dc); } - locked = dispatch_atomic_sub2o(dq, dq_running, 2) & 1; + running = dispatch_atomic_sub2o(dq, dq_running, 2, relaxed); // We might get lucky and find that the barrier has ended by now - } while (!locked); + } while (!(running & 1)); - _dispatch_queue_push(dq, dc); + _dispatch_queue_push_wakeup(dq, dc, running == 0); } DISPATCH_NOINLINE @@ -1375,21 +1644,15 @@ _dispatch_function_invoke(dispatch_queue_t dq, void *ctxt, dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key); _dispatch_thread_setspecific(dispatch_queue_key, dq); _dispatch_client_callout(ctxt, func); - _dispatch_workitem_inc(); + _dispatch_perfmon_workitem_inc(); _dispatch_thread_setspecific(dispatch_queue_key, old_dq); } -struct dispatch_function_recurse_s { - dispatch_queue_t dfr_dq; - void* dfr_ctxt; - dispatch_function_t dfr_func; -}; - -static void -_dispatch_function_recurse_invoke(void *ctxt) +void +_dispatch_sync_recurse_invoke(void *ctxt) { - struct dispatch_function_recurse_s *dfr = ctxt; - _dispatch_function_invoke(dfr->dfr_dq, dfr->dfr_ctxt, dfr->dfr_func); + dispatch_continuation_t dc = ctxt; + _dispatch_function_invoke(dc->dc_data, dc->dc_ctxt, dc->dc_func); } DISPATCH_ALWAYS_INLINE @@ -1397,35 +1660,26 @@ static inline void _dispatch_function_recurse(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { - struct dispatch_function_recurse_s dfr = { - .dfr_dq = dq, - .dfr_func = func, - .dfr_ctxt = ctxt, + struct dispatch_continuation_s dc = { + .dc_data = dq, + .dc_func = func, + .dc_ctxt = ctxt, }; - dispatch_sync_f(dq->do_targetq, &dfr, _dispatch_function_recurse_invoke); + dispatch_sync_f(dq->do_targetq, &dc, _dispatch_sync_recurse_invoke); } #pragma mark - #pragma mark dispatch_barrier_sync -struct dispatch_barrier_sync_slow_s { - DISPATCH_CONTINUATION_HEADER(barrier_sync_slow); -}; - -struct dispatch_barrier_sync_slow2_s { - dispatch_queue_t dbss2_dq; -#if DISPATCH_COCOA_COMPAT - dispatch_function_t dbss2_func; - void *dbss2_ctxt; -#endif - _dispatch_thread_semaphore_t dbss2_sema; -}; +static void _dispatch_sync_f_invoke(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func); DISPATCH_ALWAYS_INLINE_NDEBUG static inline _dispatch_thread_semaphore_t _dispatch_barrier_sync_f_pop(dispatch_queue_t dq, dispatch_object_t dou, bool lock) { + _dispatch_thread_semaphore_t sema; dispatch_continuation_t dc = dou._dc; if (DISPATCH_OBJ_IS_VTABLE(dc) || ((long)dc->do_vtable & @@ -1434,42 +1688,44 @@ _dispatch_barrier_sync_f_pop(dispatch_queue_t dq, dispatch_object_t dou, return 0; } _dispatch_trace_continuation_pop(dq, dc); - _dispatch_workitem_inc(); + _dispatch_perfmon_workitem_inc(); - struct dispatch_barrier_sync_slow_s *dbssp = (void *)dc; - struct dispatch_barrier_sync_slow2_s *dbss2 = dbssp->dc_ctxt; + dc = dc->dc_ctxt; + dq = dc->dc_data; + sema = (_dispatch_thread_semaphore_t)dc->dc_other; if (lock) { - (void)dispatch_atomic_add2o(dbss2->dbss2_dq, do_suspend_cnt, - DISPATCH_OBJECT_SUSPEND_INTERVAL); + (void)dispatch_atomic_add2o(dq, do_suspend_cnt, + DISPATCH_OBJECT_SUSPEND_INTERVAL, relaxed); // rdar://problem/9032024 running lock must be held until sync_f_slow // returns - (void)dispatch_atomic_add2o(dbss2->dbss2_dq, dq_running, 2); + (void)dispatch_atomic_add2o(dq, dq_running, 2, relaxed); } - return dbss2->dbss2_sema ? dbss2->dbss2_sema : MACH_PORT_DEAD; + return sema ? sema : MACH_PORT_DEAD; } static void _dispatch_barrier_sync_f_slow_invoke(void *ctxt) { - struct dispatch_barrier_sync_slow2_s *dbss2 = ctxt; + dispatch_continuation_t dc = ctxt; + dispatch_queue_t dq = dc->dc_data; + _dispatch_thread_semaphore_t sema; + sema = (_dispatch_thread_semaphore_t)dc->dc_other; - dispatch_assert(dbss2->dbss2_dq == _dispatch_queue_get_current()); + dispatch_assert(dq == _dispatch_queue_get_current()); #if DISPATCH_COCOA_COMPAT - // When the main queue is bound to the main thread - if (dbss2->dbss2_dq == &_dispatch_main_q && pthread_main_np()) { - dbss2->dbss2_func(dbss2->dbss2_ctxt); - dbss2->dbss2_func = NULL; - dispatch_atomic_barrier(); - _dispatch_thread_semaphore_signal(dbss2->dbss2_sema); + if (slowpath(dq->dq_is_thread_bound)) { + // The queue is bound to a non-dispatch thread (e.g. main thread) + dc->dc_func(dc->dc_ctxt); + dispatch_atomic_store2o(dc, dc_func, NULL, release); + _dispatch_thread_semaphore_signal(sema); // release return; } #endif - (void)dispatch_atomic_add2o(dbss2->dbss2_dq, do_suspend_cnt, - DISPATCH_OBJECT_SUSPEND_INTERVAL); + (void)dispatch_atomic_add2o(dq, do_suspend_cnt, + DISPATCH_OBJECT_SUSPEND_INTERVAL, relaxed); // rdar://9032024 running lock must be held until sync_f_slow returns - (void)dispatch_atomic_add2o(dbss2->dbss2_dq, dq_running, 2); - dispatch_atomic_barrier(); - _dispatch_thread_semaphore_signal(dbss2->dbss2_sema); + (void)dispatch_atomic_add2o(dq, dq_running, 2, relaxed); + _dispatch_thread_semaphore_signal(sema); // release } DISPATCH_NOINLINE @@ -1477,55 +1733,61 @@ static void _dispatch_barrier_sync_f_slow(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { + if (slowpath(!dq->do_targetq)) { + // the global concurrent queues do not need strict ordering + (void)dispatch_atomic_add2o(dq, dq_running, 2, relaxed); + return _dispatch_sync_f_invoke(dq, ctxt, func); + } // It's preferred to execute synchronous blocks on the current thread // due to thread-local side effects, garbage collection, etc. However, // blocks submitted to the main thread MUST be run on the main thread - struct dispatch_barrier_sync_slow2_s dbss2 = { - .dbss2_dq = dq, + _dispatch_thread_semaphore_t sema = _dispatch_get_thread_semaphore(); + struct dispatch_continuation_s dc = { + .dc_data = dq, #if DISPATCH_COCOA_COMPAT - .dbss2_func = func, - .dbss2_ctxt = ctxt, + .dc_func = func, + .dc_ctxt = ctxt, #endif - .dbss2_sema = _dispatch_get_thread_semaphore(), + .dc_other = (void*)sema, }; - struct dispatch_barrier_sync_slow_s dbss = { + struct dispatch_continuation_s dbss = { .do_vtable = (void *)(DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT), .dc_func = _dispatch_barrier_sync_f_slow_invoke, - .dc_ctxt = &dbss2, + .dc_ctxt = &dc, +#if DISPATCH_INTROSPECTION + .dc_data = (void*)_dispatch_thread_self(), +#endif }; - _dispatch_queue_push(dq, (void *)&dbss); + _dispatch_queue_push(dq, &dbss); - _dispatch_thread_semaphore_wait(dbss2.dbss2_sema); - _dispatch_put_thread_semaphore(dbss2.dbss2_sema); + _dispatch_thread_semaphore_wait(sema); // acquire + _dispatch_put_thread_semaphore(sema); #if DISPATCH_COCOA_COMPAT - // Main queue bound to main thread - if (dbss2.dbss2_func == NULL) { + // Queue bound to a non-dispatch thread + if (dc.dc_func == NULL) { return; } #endif - dispatch_atomic_acquire_barrier(); - if (slowpath(dq->do_targetq) && slowpath(dq->do_targetq->do_targetq)) { + if (slowpath(dq->do_targetq->do_targetq)) { _dispatch_function_recurse(dq, ctxt, func); } else { _dispatch_function_invoke(dq, ctxt, func); } - dispatch_atomic_release_barrier(); if (fastpath(dq->do_suspend_cnt < 2 * DISPATCH_OBJECT_SUSPEND_INTERVAL) && dq->dq_running == 2) { // rdar://problem/8290662 "lock transfer" - _dispatch_thread_semaphore_t sema; sema = _dispatch_queue_drain_one_barrier_sync(dq); if (sema) { - _dispatch_thread_semaphore_signal(sema); + _dispatch_thread_semaphore_signal(sema); // release return; } } (void)dispatch_atomic_sub2o(dq, do_suspend_cnt, - DISPATCH_OBJECT_SUSPEND_INTERVAL); - if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2) == 0)) { + DISPATCH_OBJECT_SUSPEND_INTERVAL, relaxed); + if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2, release) == 0)) { _dispatch_wakeup(dq); } } @@ -1540,15 +1802,15 @@ _dispatch_barrier_sync_f2(dispatch_queue_t dq) sema = _dispatch_queue_drain_one_barrier_sync(dq); if (sema) { (void)dispatch_atomic_add2o(dq, do_suspend_cnt, - DISPATCH_OBJECT_SUSPEND_INTERVAL); + DISPATCH_OBJECT_SUSPEND_INTERVAL, relaxed); // rdar://9032024 running lock must be held until sync_f_slow // returns: increment by 2 and decrement by 1 - (void)dispatch_atomic_inc2o(dq, dq_running); + (void)dispatch_atomic_inc2o(dq, dq_running, relaxed); _dispatch_thread_semaphore_signal(sema); return; } } - if (slowpath(dispatch_atomic_dec2o(dq, dq_running) == 0)) { + if (slowpath(dispatch_atomic_dec2o(dq, dq_running, release) == 0)) { _dispatch_wakeup(dq); } } @@ -1558,13 +1820,11 @@ static void _dispatch_barrier_sync_f_invoke(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { - dispatch_atomic_acquire_barrier(); _dispatch_function_invoke(dq, ctxt, func); - dispatch_atomic_release_barrier(); if (slowpath(dq->dq_items_tail)) { return _dispatch_barrier_sync_f2(dq); } - if (slowpath(dispatch_atomic_dec2o(dq, dq_running) == 0)) { + if (slowpath(dispatch_atomic_dec2o(dq, dq_running, release) == 0)) { _dispatch_wakeup(dq); } } @@ -1574,13 +1834,11 @@ static void _dispatch_barrier_sync_f_recurse(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { - dispatch_atomic_acquire_barrier(); _dispatch_function_recurse(dq, ctxt, func); - dispatch_atomic_release_barrier(); if (slowpath(dq->dq_items_tail)) { return _dispatch_barrier_sync_f2(dq); } - if (slowpath(dispatch_atomic_dec2o(dq, dq_running) == 0)) { + if (slowpath(dispatch_atomic_dec2o(dq, dq_running, release) == 0)) { _dispatch_wakeup(dq); } } @@ -1595,9 +1853,9 @@ dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt, if (slowpath(dq->dq_items_tail) || slowpath(DISPATCH_OBJECT_SUSPENDED(dq))){ return _dispatch_barrier_sync_f_slow(dq, ctxt, func); } - if (slowpath(!dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1))) { - // global queues and main queue bound to main thread always falls into - // the slow case + if (slowpath(!dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1, acquire))) { + // global concurrent queues and queues bound to non-dispatch threads + // always fall into the slow case return _dispatch_barrier_sync_f_slow(dq, ctxt, func); } if (slowpath(dq->do_targetq->do_targetq)) { @@ -1621,8 +1879,7 @@ _dispatch_barrier_sync_slow(dispatch_queue_t dq, void (^work)(void)) return dispatch_barrier_sync_f(dq, block, _dispatch_call_block_and_release); } - struct Block_basic *bb = (void *)work; - dispatch_barrier_sync_f(dq, work, (dispatch_function_t)bb->Block_invoke); + dispatch_barrier_sync_f(dq, work, _dispatch_Block_invoke(work)); } #endif @@ -1630,30 +1887,59 @@ void dispatch_barrier_sync(dispatch_queue_t dq, void (^work)(void)) { #if DISPATCH_COCOA_COMPAT - if (slowpath(dq == &_dispatch_main_q)) { + if (slowpath(dq->dq_is_thread_bound)) { return _dispatch_barrier_sync_slow(dq, work); } #endif - struct Block_basic *bb = (void *)work; - dispatch_barrier_sync_f(dq, work, (dispatch_function_t)bb->Block_invoke); + dispatch_barrier_sync_f(dq, work, _dispatch_Block_invoke(work)); } #endif +DISPATCH_NOINLINE +static void +_dispatch_barrier_trysync_f_invoke(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) +{ + _dispatch_function_invoke(dq, ctxt, func); + if (slowpath(dispatch_atomic_dec2o(dq, dq_running, release) == 0)) { + _dispatch_wakeup(dq); + } +} + +DISPATCH_NOINLINE +void +_dispatch_barrier_trysync_f(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) +{ + // Use for mutation of queue-/source-internal state only, ignores target + // queue hierarchy! + if (slowpath(dq->dq_items_tail) || slowpath(DISPATCH_OBJECT_SUSPENDED(dq)) + || slowpath(!dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1, + acquire))) { + return dispatch_barrier_async_f(dq, ctxt, func); + } + _dispatch_barrier_trysync_f_invoke(dq, ctxt, func); +} + #pragma mark - #pragma mark dispatch_sync DISPATCH_NOINLINE static void -_dispatch_sync_f_slow(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) +_dispatch_sync_f_slow(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, + bool wakeup) { _dispatch_thread_semaphore_t sema = _dispatch_get_thread_semaphore(); - struct dispatch_sync_slow_s { - DISPATCH_CONTINUATION_HEADER(sync_slow); - } dss = { + struct dispatch_continuation_s dss = { .do_vtable = (void*)DISPATCH_OBJ_SYNC_SLOW_BIT, - .dc_ctxt = (void*)sema, +#if DISPATCH_INTROSPECTION + .dc_func = func, + .dc_ctxt = ctxt, + .dc_data = (void*)_dispatch_thread_self(), +#endif + .dc_other = (void*)sema, }; - _dispatch_queue_push(dq, (void *)&dss); + _dispatch_queue_push_wakeup(dq, &dss, wakeup); _dispatch_thread_semaphore_wait(sema); _dispatch_put_thread_semaphore(sema); @@ -1663,29 +1949,18 @@ _dispatch_sync_f_slow(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) } else { _dispatch_function_invoke(dq, ctxt, func); } - if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2) == 0)) { + if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2, relaxed) == 0)) { _dispatch_wakeup(dq); } } -DISPATCH_NOINLINE -static void -_dispatch_sync_f_slow2(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) -{ - if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2) == 0)) { - _dispatch_wakeup(dq); - } - _dispatch_sync_f_slow(dq, ctxt, func); -} - DISPATCH_NOINLINE static void _dispatch_sync_f_invoke(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { _dispatch_function_invoke(dq, ctxt, func); - if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2) == 0)) { + if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2, relaxed) == 0)) { _dispatch_wakeup(dq); } } @@ -1696,22 +1971,23 @@ _dispatch_sync_f_recurse(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { _dispatch_function_recurse(dq, ctxt, func); - if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2) == 0)) { + if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2, relaxed) == 0)) { _dispatch_wakeup(dq); } } -DISPATCH_NOINLINE -static void +static inline void _dispatch_sync_f2(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { // 1) ensure that this thread hasn't enqueued anything ahead of this call // 2) the queue is not suspended if (slowpath(dq->dq_items_tail) || slowpath(DISPATCH_OBJECT_SUSPENDED(dq))){ - return _dispatch_sync_f_slow(dq, ctxt, func); + return _dispatch_sync_f_slow(dq, ctxt, func, false); } - if (slowpath(dispatch_atomic_add2o(dq, dq_running, 2) & 1)) { - return _dispatch_sync_f_slow2(dq, ctxt, func); + uint32_t running = dispatch_atomic_add2o(dq, dq_running, 2, relaxed); + if (slowpath(running & 1)) { + running = dispatch_atomic_sub2o(dq, dq_running, 2, relaxed); + return _dispatch_sync_f_slow(dq, ctxt, func, running == 0); } if (slowpath(dq->do_targetq->do_targetq)) { return _dispatch_sync_f_recurse(dq, ctxt, func); @@ -1727,8 +2003,8 @@ dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) return dispatch_barrier_sync_f(dq, ctxt, func); } if (slowpath(!dq->do_targetq)) { - // the global root queues do not need strict ordering - (void)dispatch_atomic_add2o(dq, dq_running, 2); + // the global concurrent queues do not need strict ordering + (void)dispatch_atomic_add2o(dq, dq_running, 2, relaxed); return _dispatch_sync_f_invoke(dq, ctxt, func); } _dispatch_sync_f2(dq, ctxt, func); @@ -1748,8 +2024,7 @@ _dispatch_sync_slow(dispatch_queue_t dq, void (^work)(void)) dispatch_block_t block = _dispatch_Block_copy(work); return dispatch_sync_f(dq, block, _dispatch_call_block_and_release); } - struct Block_basic *bb = (void *)work; - dispatch_sync_f(dq, work, (dispatch_function_t)bb->Block_invoke); + dispatch_sync_f(dq, work, _dispatch_Block_invoke(work)); } #endif @@ -1757,37 +2032,29 @@ void dispatch_sync(dispatch_queue_t dq, void (^work)(void)) { #if DISPATCH_COCOA_COMPAT - if (slowpath(dq == &_dispatch_main_q)) { + if (slowpath(dq->dq_is_thread_bound)) { return _dispatch_sync_slow(dq, work); } #endif - struct Block_basic *bb = (void *)work; - dispatch_sync_f(dq, work, (dispatch_function_t)bb->Block_invoke); + dispatch_sync_f(dq, work, _dispatch_Block_invoke(work)); } #endif #pragma mark - #pragma mark dispatch_after -struct _dispatch_after_time_s { - void *datc_ctxt; - void (*datc_func)(void *); - dispatch_source_t ds; -}; - -static void +void _dispatch_after_timer_callback(void *ctxt) { - struct _dispatch_after_time_s *datc = ctxt; - - dispatch_assert(datc->datc_func); - _dispatch_client_callout(datc->datc_ctxt, datc->datc_func); - - dispatch_source_t ds = datc->ds; - free(datc); - - dispatch_source_cancel(ds); // Needed until 7287561 gets integrated + dispatch_continuation_t dc = ctxt, dc1; + dispatch_source_t ds = dc->dc_data; + dc1 = _dispatch_continuation_free_cacheonly(dc); + _dispatch_client_callout(dc->dc_ctxt, dc->dc_func); + dispatch_source_cancel(ds); dispatch_release(ds); + if (slowpath(dc1)) { + _dispatch_continuation_free_to_cache_limit(dc1); + } } DISPATCH_NOINLINE @@ -1795,8 +2062,7 @@ void dispatch_after_f(dispatch_time_t when, dispatch_queue_t queue, void *ctxt, dispatch_function_t func) { - uint64_t delta; - struct _dispatch_after_time_s *datc = NULL; + uint64_t delta, leeway; dispatch_source_t ds; if (when == DISPATCH_TIME_FOREVER) { @@ -1807,25 +2073,27 @@ dispatch_after_f(dispatch_time_t when, dispatch_queue_t queue, void *ctxt, return; } - // this function can and should be optimized to not use a dispatch source delta = _dispatch_timeout(when); if (delta == 0) { return dispatch_async_f(queue, ctxt, func); } - // on successful creation, source owns malloc-ed context (which it frees in - // the event handler) + leeway = delta / 10; // + if (leeway < NSEC_PER_MSEC) leeway = NSEC_PER_MSEC; + if (leeway > 60 * NSEC_PER_SEC) leeway = 60 * NSEC_PER_SEC; + + // this function can and should be optimized to not use a dispatch source ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, queue); dispatch_assert(ds); - datc = malloc(sizeof(*datc)); - dispatch_assert(datc); - datc->datc_ctxt = ctxt; - datc->datc_func = func; - datc->ds = ds; + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_BARRIER_BIT); + dc->dc_func = func; + dc->dc_ctxt = ctxt; + dc->dc_data = ds; - dispatch_set_context(ds, datc); + dispatch_set_context(ds, dc); dispatch_source_set_event_handler_f(ds, _dispatch_after_timer_callback); - dispatch_source_set_timer(ds, when, DISPATCH_TIME_FOREVER, 0); + dispatch_source_set_timer(ds, when, DISPATCH_TIME_FOREVER, leeway); dispatch_resume(ds); } @@ -1848,7 +2116,7 @@ dispatch_after(dispatch_time_t when, dispatch_queue_t queue, #endif #pragma mark - -#pragma mark dispatch_wakeup +#pragma mark dispatch_queue_push DISPATCH_NOINLINE static void @@ -1872,8 +2140,8 @@ void _dispatch_queue_push_list_slow(dispatch_queue_t dq, struct dispatch_object_s *obj, unsigned int n) { - if (dx_type(dq) == DISPATCH_QUEUE_GLOBAL_TYPE) { - dq->dq_items_head = obj; + if (dx_type(dq) == DISPATCH_QUEUE_ROOT_TYPE && !dq->dq_is_thread_bound) { + dispatch_atomic_store2o(dq, dq_items_head, obj, relaxed); return _dispatch_queue_wakeup_global2(dq, n); } _dispatch_queue_push_list_slow2(dq, obj); @@ -1884,61 +2152,89 @@ void _dispatch_queue_push_slow(dispatch_queue_t dq, struct dispatch_object_s *obj) { - if (dx_type(dq) == DISPATCH_QUEUE_GLOBAL_TYPE) { - dq->dq_items_head = obj; + if (dx_type(dq) == DISPATCH_QUEUE_ROOT_TYPE && !dq->dq_is_thread_bound) { + dispatch_atomic_store2o(dq, dq_items_head, obj, relaxed); return _dispatch_queue_wakeup_global(dq); } _dispatch_queue_push_list_slow2(dq, obj); } +#pragma mark - +#pragma mark dispatch_queue_probe + +unsigned long +_dispatch_queue_probe(dispatch_queue_t dq) +{ + return (unsigned long)slowpath(dq->dq_items_tail != NULL); +} + +#if DISPATCH_COCOA_COMPAT +unsigned long +_dispatch_runloop_queue_probe(dispatch_queue_t dq) +{ + if (_dispatch_queue_probe(dq)) { + if (dq->do_xref_cnt == -1) return true; // + return _dispatch_runloop_queue_wakeup(dq); + } + return false; +} +#endif + +unsigned long +_dispatch_mgr_queue_probe(dispatch_queue_t dq) +{ + if (_dispatch_queue_probe(dq)) { + return _dispatch_mgr_wakeup(dq); + } + return false; +} + +unsigned long +_dispatch_root_queue_probe(dispatch_queue_t dq) +{ + _dispatch_queue_wakeup_global(dq); + return false; +} + +#pragma mark - +#pragma mark dispatch_wakeup + // 6618342 Contact the team that owns the Instrument DTrace probe before // renaming this symbol dispatch_queue_t _dispatch_wakeup(dispatch_object_t dou) { - dispatch_queue_t tq; - if (slowpath(DISPATCH_OBJECT_SUSPENDED(dou._do))) { return NULL; } - if (!dx_probe(dou._do) && !dou._dq->dq_items_tail) { + if (!dx_probe(dou._do)) { return NULL; } - - // _dispatch_source_invoke() relies on this testing the whole suspend count - // word, not just the lock bit. In other words, no point taking the lock - // if the source is suspended or canceled. if (!dispatch_atomic_cmpxchg2o(dou._do, do_suspend_cnt, 0, - DISPATCH_OBJECT_SUSPEND_LOCK)) { + DISPATCH_OBJECT_SUSPEND_LOCK, release)) { #if DISPATCH_COCOA_COMPAT if (dou._dq == &_dispatch_main_q) { - return _dispatch_queue_wakeup_main(); + return _dispatch_main_queue_wakeup(); } #endif return NULL; } - dispatch_atomic_acquire_barrier(); _dispatch_retain(dou._do); - tq = dou._do->do_targetq; + dispatch_queue_t tq = dou._do->do_targetq; _dispatch_queue_push(tq, dou._do); return tq; // libdispatch does not need this, but the Instrument DTrace // probe does } #if DISPATCH_COCOA_COMPAT -DISPATCH_NOINLINE -dispatch_queue_t -_dispatch_queue_wakeup_main(void) +static inline void +_dispatch_runloop_queue_wakeup_thread(dispatch_queue_t dq) { - kern_return_t kr; - - dispatch_once_f(&_dispatch_main_q_port_pred, NULL, - _dispatch_main_q_port_init); - if (!main_q_port) { - return NULL; + mach_port_t mp = (mach_port_t)dq->do_ctxt; + if (!mp) { + return; } - kr = _dispatch_send_wakeup_main_thread(main_q_port, 0); - + kern_return_t kr = _dispatch_send_wakeup_runloop_thread(mp, 0); switch (kr) { case MACH_SEND_TIMEOUT: case MACH_SEND_TIMED_OUT: @@ -1948,6 +2244,27 @@ _dispatch_queue_wakeup_main(void) (void)dispatch_assume_zero(kr); break; } +} + +DISPATCH_NOINLINE DISPATCH_WEAK +unsigned long +_dispatch_runloop_queue_wakeup(dispatch_queue_t dq) +{ + _dispatch_runloop_queue_wakeup_thread(dq); + return false; +} + +DISPATCH_NOINLINE +static dispatch_queue_t +_dispatch_main_queue_wakeup(void) +{ + dispatch_queue_t dq = &_dispatch_main_q; + if (!dq->dq_is_thread_bound) { + return NULL; + } + dispatch_once_f(&_dispatch_main_q_port_pred, dq, + _dispatch_runloop_queue_port_init); + _dispatch_runloop_queue_wakeup_thread(dq); return NULL; } #endif @@ -1957,22 +2274,24 @@ static void _dispatch_queue_wakeup_global_slow(dispatch_queue_t dq, unsigned int n) { static dispatch_once_t pred; - struct dispatch_root_queue_context_s *qc = dq->do_ctxt; + dispatch_root_queue_context_t qc = dq->do_ctxt; + uint32_t i = n; int r; - dispatch_debug_queue(dq, __func__); + _dispatch_debug_root_queue(dq, __func__); dispatch_once_f(&pred, NULL, _dispatch_root_queues_init); #if HAVE_PTHREAD_WORKQUEUES -#if DISPATCH_ENABLE_THREAD_POOL +#if DISPATCH_USE_PTHREAD_POOL if (qc->dgq_kworkqueue != (void*)(~0ul)) #endif { - _dispatch_debug("requesting new worker thread"); + _dispatch_root_queue_debug("requesting new worker thread for global " + "queue: %p", dq); #if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK if (qc->dgq_kworkqueue) { pthread_workitem_handle_t wh; - unsigned int gen_cnt, i = n; + unsigned int gen_cnt; do { r = pthread_workqueue_additem_np(qc->dgq_kworkqueue, _dispatch_worker_thread3, dq, &wh, &gen_cnt); @@ -1983,55 +2302,70 @@ _dispatch_queue_wakeup_global_slow(dispatch_queue_t dq, unsigned int n) #endif // DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK #if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP r = pthread_workqueue_addthreads_np(qc->dgq_wq_priority, - qc->dgq_wq_options, n); + qc->dgq_wq_options, (int)i); (void)dispatch_assume_zero(r); #endif return; } #endif // HAVE_PTHREAD_WORKQUEUES -#if DISPATCH_ENABLE_THREAD_POOL - if (dispatch_semaphore_signal(qc->dgq_thread_mediator)) { - return; +#if DISPATCH_USE_PTHREAD_POOL + if (fastpath(qc->dgq_thread_mediator)) { + while (dispatch_semaphore_signal(qc->dgq_thread_mediator)) { + if (!--i) { + return; + } + } } - - pthread_t pthr; - int t_count; + uint32_t j, t_count = qc->dgq_thread_pool_size; do { - t_count = qc->dgq_thread_pool_size; if (!t_count) { - _dispatch_debug("The thread pool is full: %p", dq); + _dispatch_root_queue_debug("pthread pool is full for root queue: " + "%p", dq); return; } - } while (!dispatch_atomic_cmpxchg2o(qc, dgq_thread_pool_size, t_count, - t_count - 1)); + j = i > t_count ? t_count : i; + } while (!dispatch_atomic_cmpxchgvw2o(qc, dgq_thread_pool_size, t_count, + t_count - j, &t_count, relaxed)); - while ((r = pthread_create(&pthr, NULL, _dispatch_worker_thread, dq))) { - if (r != EAGAIN) { + dispatch_pthread_root_queue_context_t pqc = qc->dgq_ctxt; + pthread_attr_t *attr = pqc ? &pqc->dpq_thread_attr : NULL; + pthread_t tid, *pthr = &tid; +#if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES + if (slowpath(dq == &_dispatch_mgr_root_queue)) { + pthr = _dispatch_mgr_root_queue_init(); + } +#endif + do { + _dispatch_retain(dq); + while ((r = pthread_create(pthr, attr, _dispatch_worker_thread, dq))) { + if (r != EAGAIN) { + (void)dispatch_assume_zero(r); + } + _dispatch_temporary_resource_shortage(); + } + if (!attr) { + r = pthread_detach(*pthr); (void)dispatch_assume_zero(r); } - sleep(1); - } - r = pthread_detach(pthr); - (void)dispatch_assume_zero(r); -#endif // DISPATCH_ENABLE_THREAD_POOL + } while (--j); +#endif // DISPATCH_USE_PTHREAD_POOL } static inline void _dispatch_queue_wakeup_global2(dispatch_queue_t dq, unsigned int n) { - struct dispatch_root_queue_context_s *qc = dq->do_ctxt; - if (!dq->dq_items_tail) { return; } #if HAVE_PTHREAD_WORKQUEUES + dispatch_root_queue_context_t qc = dq->do_ctxt; if ( -#if DISPATCH_ENABLE_THREAD_POOL +#if DISPATCH_USE_PTHREAD_POOL (qc->dgq_kworkqueue != (void*)(~0ul)) && #endif - !dispatch_atomic_cmpxchg2o(qc, dgq_pending, 0, n)) { - _dispatch_debug("work thread request still pending on global queue: " - "%p", dq); + !dispatch_atomic_cmpxchg2o(qc, dgq_pending, 0, n, relaxed)) { + _dispatch_root_queue_debug("worker thread request still pending for " + "global queue: %p", dq); return; } #endif // HAVE_PTHREAD_WORKQUEUES @@ -2044,15 +2378,24 @@ _dispatch_queue_wakeup_global(dispatch_queue_t dq) return _dispatch_queue_wakeup_global2(dq, 1); } -bool -_dispatch_queue_probe_root(dispatch_queue_t dq) +#pragma mark - +#pragma mark dispatch_queue_invoke + +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_t +dispatch_queue_invoke2(dispatch_object_t dou, + _dispatch_thread_semaphore_t *sema_ptr) { - _dispatch_queue_wakeup_global2(dq, 1); - return false; -} + dispatch_queue_t dq = dou._dq; + dispatch_queue_t otq = dq->do_targetq; + *sema_ptr = _dispatch_queue_drain(dq); -#pragma mark - -#pragma mark dispatch_queue_drain + if (slowpath(otq != dq->do_targetq)) { + // An item on the queue changed the target queue + return dq->do_targetq; + } + return NULL; +} // 6618342 Contact the team that owns the Instrument DTrace probe before // renaming this symbol @@ -2060,46 +2403,47 @@ DISPATCH_NOINLINE void _dispatch_queue_invoke(dispatch_queue_t dq) { - if (!slowpath(DISPATCH_OBJECT_SUSPENDED(dq)) && - fastpath(dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1))) { - dispatch_atomic_acquire_barrier(); - dispatch_queue_t otq = dq->do_targetq, tq = NULL; - _dispatch_thread_semaphore_t sema = _dispatch_queue_drain(dq); - if (dq->do_vtable->do_invoke) { - // Assume that object invoke checks it is executing on correct queue - tq = dx_invoke(dq); - } else if (slowpath(otq != dq->do_targetq)) { - // An item on the queue changed the target queue - tq = dq->do_targetq; - } - // We do not need to check the result. - // When the suspend-count lock is dropped, then the check will happen. - dispatch_atomic_release_barrier(); - (void)dispatch_atomic_dec2o(dq, dq_running); - if (sema) { - _dispatch_thread_semaphore_signal(sema); - } else if (tq) { - return _dispatch_queue_push(tq, dq); - } + _dispatch_queue_class_invoke(dq, dispatch_queue_invoke2); +} + +#pragma mark - +#pragma mark dispatch_queue_drain + +DISPATCH_ALWAYS_INLINE +static inline struct dispatch_object_s* +_dispatch_queue_head(dispatch_queue_t dq) +{ + struct dispatch_object_s *dc; + while (!(dc = fastpath(dq->dq_items_head))) { + dispatch_hardware_pause(); } + return dc; +} - dq->do_next = DISPATCH_OBJECT_LISTLESS; - dispatch_atomic_release_barrier(); - if (!dispatch_atomic_sub2o(dq, do_suspend_cnt, - DISPATCH_OBJECT_SUSPEND_LOCK)) { - if (dq->dq_running == 0) { - _dispatch_wakeup(dq); // verify that the queue is idle +DISPATCH_ALWAYS_INLINE +static inline struct dispatch_object_s* +_dispatch_queue_next(dispatch_queue_t dq, struct dispatch_object_s *dc) +{ + struct dispatch_object_s *next_dc; + next_dc = fastpath(dc->do_next); + dq->dq_items_head = next_dc; + if (!next_dc && !dispatch_atomic_cmpxchg2o(dq, dq_items_tail, dc, NULL, + relaxed)) { + // Enqueue is TIGHTLY controlled, we won't wait long. + while (!(next_dc = fastpath(dc->do_next))) { + dispatch_hardware_pause(); } + dq->dq_items_head = next_dc; } - _dispatch_release(dq); // added when the queue is put on the list + return next_dc; } -static _dispatch_thread_semaphore_t -_dispatch_queue_drain(dispatch_queue_t dq) +_dispatch_thread_semaphore_t +_dispatch_queue_drain(dispatch_object_t dou) { - dispatch_queue_t orig_tq, old_dq; + dispatch_queue_t dq = dou._dq, orig_tq, old_dq; old_dq = _dispatch_thread_getspecific(dispatch_queue_key); - struct dispatch_object_s *dc = NULL, *next_dc = NULL; + struct dispatch_object_s *dc, *next_dc; _dispatch_thread_semaphore_t sema = 0; // Continue draining sources after target queue change rdar://8928171 @@ -2111,19 +2455,8 @@ _dispatch_queue_drain(dispatch_queue_t dq) //dispatch_debug_queue(dq, __func__); while (dq->dq_items_tail) { - while (!(dc = fastpath(dq->dq_items_head))) { - _dispatch_hardware_pause(); - } - dq->dq_items_head = NULL; + dc = _dispatch_queue_head(dq); do { - next_dc = fastpath(dc->do_next); - if (!next_dc && - !dispatch_atomic_cmpxchg2o(dq, dq_items_tail, dc, NULL)) { - // Enqueue is TIGHTLY controlled, we won't wait long. - while (!(next_dc = fastpath(dc->do_next))) { - _dispatch_hardware_pause(); - } - } if (DISPATCH_OBJECT_SUSPENDED(dq)) { goto out; } @@ -2133,6 +2466,7 @@ _dispatch_queue_drain(dispatch_queue_t dq) if (slowpath(orig_tq != dq->do_targetq) && check_tq) { goto out; } + bool redirect = false; if (!fastpath(dq->dq_width == 1)) { if (!DISPATCH_OBJ_IS_VTABLE(dc) && (long)dc->do_vtable & DISPATCH_OBJ_BARRIER_BIT) { @@ -2140,113 +2474,87 @@ _dispatch_queue_drain(dispatch_queue_t dq) goto out; } } else { - _dispatch_continuation_redirect(dq, dc); - continue; + redirect = true; } } + next_dc = _dispatch_queue_next(dq, dc); + if (redirect) { + _dispatch_continuation_redirect(dq, dc); + continue; + } if ((sema = _dispatch_barrier_sync_f_pop(dq, dc, true))) { - dc = next_dc; goto out; } _dispatch_continuation_pop(dc); - _dispatch_workitem_inc(); + _dispatch_perfmon_workitem_inc(); } while ((dc = next_dc)); } out: - // if this is not a complete drain, we must undo some things - if (slowpath(dc)) { - // 'dc' must NOT be "popped" - // 'dc' might be the last item - if (!next_dc && - !dispatch_atomic_cmpxchg2o(dq, dq_items_tail, NULL, dc)) { - // wait for enqueue slow path to finish - while (!(next_dc = fastpath(dq->dq_items_head))) { - _dispatch_hardware_pause(); - } - dc->do_next = next_dc; - } - dq->dq_items_head = dc; - } - _dispatch_thread_setspecific(dispatch_queue_key, old_dq); return sema; } -static void -_dispatch_queue_serial_drain_till_empty(dispatch_queue_t dq) -{ -#if DISPATCH_PERF_MON - uint64_t start = _dispatch_absolute_time(); -#endif - _dispatch_thread_semaphore_t sema = _dispatch_queue_drain(dq); - if (sema) { - dispatch_atomic_barrier(); - _dispatch_thread_semaphore_signal(sema); - } -#if DISPATCH_PERF_MON - _dispatch_queue_merge_stats(start); -#endif - _dispatch_force_cache_cleanup(); -} - #if DISPATCH_COCOA_COMPAT -void +static void _dispatch_main_queue_drain(void) { dispatch_queue_t dq = &_dispatch_main_q; if (!dq->dq_items_tail) { return; } - struct dispatch_main_queue_drain_marker_s { - DISPATCH_CONTINUATION_HEADER(main_queue_drain_marker); - } marker = { + struct dispatch_continuation_s marker = { .do_vtable = NULL, }; struct dispatch_object_s *dmarker = (void*)▮ _dispatch_queue_push_notrace(dq, dmarker); -#if DISPATCH_PERF_MON - uint64_t start = _dispatch_absolute_time(); -#endif + _dispatch_perfmon_start(); dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key); _dispatch_thread_setspecific(dispatch_queue_key, dq); - struct dispatch_object_s *dc = NULL, *next_dc = NULL; - while (dq->dq_items_tail) { - while (!(dc = fastpath(dq->dq_items_head))) { - _dispatch_hardware_pause(); + struct dispatch_object_s *dc, *next_dc; + dc = _dispatch_queue_head(dq); + do { + next_dc = _dispatch_queue_next(dq, dc); + if (dc == dmarker) { + goto out; } - dq->dq_items_head = NULL; - do { - next_dc = fastpath(dc->do_next); - if (!next_dc && - !dispatch_atomic_cmpxchg2o(dq, dq_items_tail, dc, NULL)) { - // Enqueue is TIGHTLY controlled, we won't wait long. - while (!(next_dc = fastpath(dc->do_next))) { - _dispatch_hardware_pause(); - } - } - if (dc == dmarker) { - if (next_dc) { - dq->dq_items_head = next_dc; - _dispatch_queue_wakeup_main(); - } - goto out; - } - _dispatch_continuation_pop(dc); - _dispatch_workitem_inc(); - } while ((dc = next_dc)); - } - dispatch_assert(dc); // did not encounter marker + _dispatch_continuation_pop(dc); + _dispatch_perfmon_workitem_inc(); + } while ((dc = next_dc)); + DISPATCH_CRASH("Main queue corruption"); out: + if (next_dc) { + _dispatch_main_queue_wakeup(); + } _dispatch_thread_setspecific(dispatch_queue_key, old_dq); -#if DISPATCH_PERF_MON - _dispatch_queue_merge_stats(start); -#endif + _dispatch_perfmon_end(); _dispatch_force_cache_cleanup(); } + +static bool +_dispatch_runloop_queue_drain_one(dispatch_queue_t dq) +{ + if (!dq->dq_items_tail) { + return false; + } + _dispatch_perfmon_start(); + dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key); + _dispatch_thread_setspecific(dispatch_queue_key, dq); + + struct dispatch_object_s *dc, *next_dc; + dc = _dispatch_queue_head(dq); + next_dc = _dispatch_queue_next(dq, dc); + _dispatch_continuation_pop(dc); + _dispatch_perfmon_workitem_inc(); + + _dispatch_thread_setspecific(dispatch_queue_key, old_dq); + _dispatch_perfmon_end(); + _dispatch_force_cache_cleanup(); + return next_dc; +} #endif DISPATCH_ALWAYS_INLINE_NDEBUG @@ -2254,7 +2562,7 @@ static inline _dispatch_thread_semaphore_t _dispatch_queue_drain_one_barrier_sync(dispatch_queue_t dq) { // rdar://problem/8290662 "lock transfer" - struct dispatch_object_s *dc, *next_dc; + struct dispatch_object_s *dc; _dispatch_thread_semaphore_t sema; // queue is locked, or suspended and not being drained @@ -2263,58 +2571,122 @@ _dispatch_queue_drain_one_barrier_sync(dispatch_queue_t dq) return 0; } // dequeue dc, it is a barrier sync - next_dc = fastpath(dc->do_next); - dq->dq_items_head = next_dc; - if (!next_dc && !dispatch_atomic_cmpxchg2o(dq, dq_items_tail, dc, NULL)) { - // Enqueue is TIGHTLY controlled, we won't wait long. - while (!(next_dc = fastpath(dc->do_next))) { - _dispatch_hardware_pause(); - } - dq->dq_items_head = next_dc; - } + (void)_dispatch_queue_next(dq, dc); return sema; } -#ifndef DISPATCH_HEAD_CONTENTION_SPINS -#define DISPATCH_HEAD_CONTENTION_SPINS 10000 +void +_dispatch_mgr_queue_drain(void) +{ + dispatch_queue_t dq = &_dispatch_mgr_q; + if (!dq->dq_items_tail) { + return _dispatch_force_cache_cleanup(); + } + _dispatch_perfmon_start(); + if (slowpath(_dispatch_queue_drain(dq))) { + DISPATCH_CRASH("Sync onto manager queue"); + } + _dispatch_perfmon_end(); + _dispatch_force_cache_cleanup(); +} + +#pragma mark - +#pragma mark dispatch_root_queue_drain + +#ifndef DISPATCH_CONTENTION_USE_RAND +#define DISPATCH_CONTENTION_USE_RAND (!TARGET_OS_EMBEDDED) +#endif +#ifndef DISPATCH_CONTENTION_SPINS_MAX +#define DISPATCH_CONTENTION_SPINS_MAX (128 - 1) +#endif +#ifndef DISPATCH_CONTENTION_SPINS_MIN +#define DISPATCH_CONTENTION_SPINS_MIN (32 - 1) +#endif +#ifndef DISPATCH_CONTENTION_USLEEP_START +#define DISPATCH_CONTENTION_USLEEP_START 500 #endif +#ifndef DISPATCH_CONTENTION_USLEEP_MAX +#define DISPATCH_CONTENTION_USLEEP_MAX 100000 +#endif + +DISPATCH_NOINLINE +static bool +_dispatch_queue_concurrent_drain_one_slow(dispatch_queue_t dq) +{ + dispatch_root_queue_context_t qc = dq->do_ctxt; + struct dispatch_object_s *const mediator = (void *)~0ul; + bool pending = false, available = true; + unsigned int spins, sleep_time = DISPATCH_CONTENTION_USLEEP_START; -static struct dispatch_object_s * + do { + // Spin for a short while in case the contention is temporary -- e.g. + // when starting up after dispatch_apply, or when executing a few + // short continuations in a row. +#if DISPATCH_CONTENTION_USE_RAND + // Use randomness to prevent threads from resonating at the same + // frequency and permanently contending. All threads sharing the same + // seed value is safe with the FreeBSD rand_r implementation. + static unsigned int seed; + spins = (rand_r(&seed) & DISPATCH_CONTENTION_SPINS_MAX) | + DISPATCH_CONTENTION_SPINS_MIN; +#else + spins = DISPATCH_CONTENTION_SPINS_MIN + + (DISPATCH_CONTENTION_SPINS_MAX-DISPATCH_CONTENTION_SPINS_MIN)/2; +#endif + while (spins--) { + dispatch_hardware_pause(); + if (fastpath(dq->dq_items_head != mediator)) goto out; + }; + // Since we have serious contention, we need to back off. + if (!pending) { + // Mark this queue as pending to avoid requests for further threads + (void)dispatch_atomic_inc2o(qc, dgq_pending, relaxed); + pending = true; + } + _dispatch_contention_usleep(sleep_time); + if (fastpath(dq->dq_items_head != mediator)) goto out; + sleep_time *= 2; + } while (sleep_time < DISPATCH_CONTENTION_USLEEP_MAX); + + // The ratio of work to libdispatch overhead must be bad. This + // scenario implies that there are too many threads in the pool. + // Create a new pending thread and then exit this thread. + // The kernel will grant a new thread when the load subsides. + _dispatch_debug("contention on global queue: %p", dq); + _dispatch_queue_wakeup_global(dq); + available = false; +out: + if (pending) { + (void)dispatch_atomic_dec2o(qc, dgq_pending, relaxed); + } + return available; +} + +DISPATCH_ALWAYS_INLINE_NDEBUG +static inline struct dispatch_object_s * _dispatch_queue_concurrent_drain_one(dispatch_queue_t dq) { struct dispatch_object_s *head, *next, *const mediator = (void *)~0ul; start: // The mediator value acts both as a "lock" and a signal - head = dispatch_atomic_xchg2o(dq, dq_items_head, mediator); + head = dispatch_atomic_xchg2o(dq, dq_items_head, mediator, relaxed); if (slowpath(head == NULL)) { // The first xchg on the tail will tell the enqueueing thread that it // is safe to blindly write out to the head pointer. A cmpxchg honors // the algorithm. - (void)dispatch_atomic_cmpxchg2o(dq, dq_items_head, mediator, NULL); - _dispatch_debug("no work on global work queue"); + (void)dispatch_atomic_cmpxchg2o(dq, dq_items_head, mediator, NULL, + relaxed); + _dispatch_root_queue_debug("no work on global queue: %p", dq); return NULL; } if (slowpath(head == mediator)) { // This thread lost the race for ownership of the queue. - // Spin for a short while in case many threads have started draining at - // once as part of a dispatch_apply - unsigned int i = DISPATCH_HEAD_CONTENTION_SPINS; - do { - _dispatch_hardware_pause(); - if (dq->dq_items_head != mediator) goto start; - } while (--i); - // The ratio of work to libdispatch overhead must be bad. This - // scenario implies that there are too many threads in the pool. - // Create a new pending thread and then exit this thread. - // The kernel will grant a new thread when the load subsides. - _dispatch_debug("Contention on queue: %p", dq); - _dispatch_queue_wakeup_global(dq); -#if DISPATCH_PERF_MON - dispatch_atomic_inc(&_dispatch_bad_ratio); -#endif + if (fastpath(_dispatch_queue_concurrent_drain_one_slow(dq))) { + goto start; + } return NULL; } @@ -2323,34 +2695,28 @@ _dispatch_queue_concurrent_drain_one(dispatch_queue_t dq) next = fastpath(head->do_next); if (slowpath(!next)) { - dq->dq_items_head = NULL; + dispatch_atomic_store2o(dq, dq_items_head, NULL, relaxed); - if (dispatch_atomic_cmpxchg2o(dq, dq_items_tail, head, NULL)) { + if (dispatch_atomic_cmpxchg2o(dq, dq_items_tail, head, NULL, relaxed)) { // both head and tail are NULL now goto out; } // There must be a next item now. This thread won't wait long. while (!(next = head->do_next)) { - _dispatch_hardware_pause(); + dispatch_hardware_pause(); } } - dq->dq_items_head = next; + dispatch_atomic_store2o(dq, dq_items_head, next, relaxed); _dispatch_queue_wakeup_global(dq); out: return head; } -#pragma mark - -#pragma mark dispatch_worker_thread - static void -_dispatch_worker_thread4(dispatch_queue_t dq) +_dispatch_root_queue_drain(dispatch_queue_t dq) { - struct dispatch_object_s *item; - - #if DISPATCH_DEBUG if (_dispatch_thread_getspecific(dispatch_queue_key)) { DISPATCH_CRASH("Premature thread recycling"); @@ -2359,7 +2725,6 @@ _dispatch_worker_thread4(dispatch_queue_t dq) _dispatch_thread_setspecific(dispatch_queue_key, dq); #if DISPATCH_COCOA_COMPAT - (void)dispatch_atomic_inc(&_dispatch_worker_threads); // ensure that high-level memory management techniques do not leak/crash if (dispatch_begin_thread_4GC) { dispatch_begin_thread_4GC(); @@ -2367,44 +2732,40 @@ _dispatch_worker_thread4(dispatch_queue_t dq) void *pool = _dispatch_autorelease_pool_push(); #endif // DISPATCH_COCOA_COMPAT -#if DISPATCH_PERF_MON - uint64_t start = _dispatch_absolute_time(); -#endif + _dispatch_perfmon_start(); + struct dispatch_object_s *item; while ((item = fastpath(_dispatch_queue_concurrent_drain_one(dq)))) { _dispatch_continuation_pop(item); } -#if DISPATCH_PERF_MON - _dispatch_queue_merge_stats(start); -#endif + _dispatch_perfmon_end(); #if DISPATCH_COCOA_COMPAT _dispatch_autorelease_pool_pop(pool); if (dispatch_end_thread_4GC) { dispatch_end_thread_4GC(); } - if (!dispatch_atomic_dec(&_dispatch_worker_threads) && - dispatch_no_worker_threads_4GC) { - dispatch_no_worker_threads_4GC(); - } #endif // DISPATCH_COCOA_COMPAT _dispatch_thread_setspecific(dispatch_queue_key, NULL); - - _dispatch_force_cache_cleanup(); - } -#if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK +#pragma mark - +#pragma mark dispatch_worker_thread + +#if HAVE_PTHREAD_WORKQUEUES static void _dispatch_worker_thread3(void *context) { dispatch_queue_t dq = context; - struct dispatch_root_queue_context_s *qc = dq->do_ctxt; + dispatch_root_queue_context_t qc = dq->do_ctxt; + + _dispatch_introspection_thread_add(); + + (void)dispatch_atomic_dec2o(qc, dgq_pending, relaxed); + _dispatch_root_queue_drain(dq); + __asm__(""); // prevent tailcall (for Instrument DTrace probe) - (void)dispatch_atomic_dec2o(qc, dgq_pending); - _dispatch_worker_thread4(dq); } -#endif #if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP // 6618342 Contact the team that owns the Instrument DTrace probe before @@ -2416,40 +2777,47 @@ _dispatch_worker_thread2(int priority, int options, dispatch_assert(priority >= 0 && priority < WORKQ_NUM_PRIOQUEUE); dispatch_assert(!(options & ~WORKQ_ADDTHREADS_OPTION_OVERCOMMIT)); dispatch_queue_t dq = _dispatch_wq2root_queues[priority][options]; - struct dispatch_root_queue_context_s *qc = dq->do_ctxt; - (void)dispatch_atomic_dec2o(qc, dgq_pending); - _dispatch_worker_thread4(dq); + return _dispatch_worker_thread3(dq); } -#endif +#endif // HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP +#endif // HAVE_PTHREAD_WORKQUEUES -#if DISPATCH_ENABLE_THREAD_POOL +#if DISPATCH_USE_PTHREAD_POOL // 6618342 Contact the team that owns the Instrument DTrace probe before // renaming this symbol static void * _dispatch_worker_thread(void *context) { dispatch_queue_t dq = context; - struct dispatch_root_queue_context_s *qc = dq->do_ctxt; + dispatch_root_queue_context_t qc = dq->do_ctxt; + dispatch_pthread_root_queue_context_t pqc = qc->dgq_ctxt; + + if (pqc && pqc->dpq_thread_configure) { + pqc->dpq_thread_configure(); + } + sigset_t mask; int r; - // workaround tweaks the kernel workqueue does for us r = sigfillset(&mask); (void)dispatch_assume_zero(r); r = _dispatch_pthread_sigmask(SIG_BLOCK, &mask, NULL); (void)dispatch_assume_zero(r); + _dispatch_introspection_thread_add(); + + // Non-pthread-root-queue pthreads use a 65 second timeout in case there + // are any timers that run once a minute + const int64_t timeout = (pqc ? 5ull : 65ull) * NSEC_PER_SEC; do { - _dispatch_worker_thread4(dq); - // we use 65 seconds in case there are any timers that run once a minute + _dispatch_root_queue_drain(dq); } while (dispatch_semaphore_wait(qc->dgq_thread_mediator, - dispatch_time(0, 65ull * NSEC_PER_SEC)) == 0); + dispatch_time(0, timeout)) == 0); - (void)dispatch_atomic_inc2o(qc, dgq_thread_pool_size); - if (dq->dq_items_tail) { - _dispatch_queue_wakeup_global(dq); - } + (void)dispatch_atomic_inc2o(qc, dgq_thread_pool_size, relaxed); + _dispatch_queue_wakeup_global(dq); + _dispatch_release(dq); return NULL; } @@ -2482,38 +2850,146 @@ _dispatch_pthread_sigmask(int how, sigset_t *set, sigset_t *oset) return pthread_sigmask(how, set, oset); } -#endif +#endif // DISPATCH_USE_PTHREAD_POOL #pragma mark - -#pragma mark dispatch_main_queue +#pragma mark dispatch_runloop_queue static bool _dispatch_program_is_probably_callback_driven; #if DISPATCH_COCOA_COMPAT + +dispatch_queue_t +_dispatch_runloop_root_queue_create_4CF(const char *label, unsigned long flags) +{ + dispatch_queue_t dq; + size_t dqs; + + if (slowpath(flags)) { + return NULL; + } + dqs = sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_CACHELINE_PAD; + dq = _dispatch_alloc(DISPATCH_VTABLE(queue_runloop), dqs); + _dispatch_queue_init(dq); + dq->do_targetq = _dispatch_get_root_queue(0, true); + dq->dq_label = label ? label : "runloop-queue"; // no-copy contract + dq->do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK; + dq->dq_running = 1; + dq->dq_is_thread_bound = 1; + _dispatch_runloop_queue_port_init(dq); + _dispatch_queue_set_bound_thread(dq); + _dispatch_object_debug(dq, "%s", __func__); + return _dispatch_introspection_queue_create(dq); +} + +void +_dispatch_runloop_queue_xref_dispose(dispatch_queue_t dq) +{ + _dispatch_object_debug(dq, "%s", __func__); + (void)dispatch_atomic_dec2o(dq, dq_running, relaxed); + unsigned int suspend_cnt = dispatch_atomic_sub2o(dq, do_suspend_cnt, + DISPATCH_OBJECT_SUSPEND_LOCK, release); + _dispatch_queue_clear_bound_thread(dq); + if (suspend_cnt == 0) { + _dispatch_wakeup(dq); + } +} + +void +_dispatch_runloop_queue_dispose(dispatch_queue_t dq) +{ + _dispatch_object_debug(dq, "%s", __func__); + _dispatch_introspection_queue_dispose(dq); + _dispatch_runloop_queue_port_dispose(dq); + _dispatch_queue_destroy(dq); +} + +bool +_dispatch_runloop_root_queue_perform_4CF(dispatch_queue_t dq) +{ + if (slowpath(dq->do_vtable != DISPATCH_VTABLE(queue_runloop))) { + DISPATCH_CLIENT_CRASH("Not a runloop queue"); + } + dispatch_retain(dq); + bool r = _dispatch_runloop_queue_drain_one(dq); + dispatch_release(dq); + return r; +} + +void +_dispatch_runloop_root_queue_wakeup_4CF(dispatch_queue_t dq) +{ + if (slowpath(dq->do_vtable != DISPATCH_VTABLE(queue_runloop))) { + DISPATCH_CLIENT_CRASH("Not a runloop queue"); + } + _dispatch_runloop_queue_probe(dq); +} + +mach_port_t +_dispatch_runloop_root_queue_get_port_4CF(dispatch_queue_t dq) +{ + if (slowpath(dq->do_vtable != DISPATCH_VTABLE(queue_runloop))) { + DISPATCH_CLIENT_CRASH("Not a runloop queue"); + } + return (mach_port_t)dq->do_ctxt; +} + static void -_dispatch_main_q_port_init(void *ctxt DISPATCH_UNUSED) +_dispatch_runloop_queue_port_init(void *ctxt) { + dispatch_queue_t dq = (dispatch_queue_t)ctxt; + mach_port_t mp; kern_return_t kr; _dispatch_safe_fork = false; - kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, - &main_q_port); + kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &mp); DISPATCH_VERIFY_MIG(kr); (void)dispatch_assume_zero(kr); - kr = mach_port_insert_right(mach_task_self(), main_q_port, main_q_port, + kr = mach_port_insert_right(mach_task_self(), mp, mp, MACH_MSG_TYPE_MAKE_SEND); DISPATCH_VERIFY_MIG(kr); (void)dispatch_assume_zero(kr); + if (dq != &_dispatch_main_q) { + struct mach_port_limits limits = { + .mpl_qlimit = 1, + }; + kr = mach_port_set_attributes(mach_task_self(), mp, + MACH_PORT_LIMITS_INFO, (mach_port_info_t)&limits, + sizeof(limits)); + DISPATCH_VERIFY_MIG(kr); + (void)dispatch_assume_zero(kr); + } + dq->do_ctxt = (void*)(uintptr_t)mp; _dispatch_program_is_probably_callback_driven = true; } +static void +_dispatch_runloop_queue_port_dispose(dispatch_queue_t dq) +{ + mach_port_t mp = (mach_port_t)dq->do_ctxt; + if (!mp) { + return; + } + dq->do_ctxt = NULL; + kern_return_t kr = mach_port_deallocate(mach_task_self(), mp); + DISPATCH_VERIFY_MIG(kr); + (void)dispatch_assume_zero(kr); + kr = mach_port_mod_refs(mach_task_self(), mp, MACH_PORT_RIGHT_RECEIVE, -1); + DISPATCH_VERIFY_MIG(kr); + (void)dispatch_assume_zero(kr); +} + +#pragma mark - +#pragma mark dispatch_main_queue + mach_port_t _dispatch_get_main_queue_port_4CF(void) { - dispatch_once_f(&_dispatch_main_q_port_pred, NULL, - _dispatch_main_q_port_init); - return main_q_port; + dispatch_queue_t dq = &_dispatch_main_q; + dispatch_once_f(&_dispatch_main_q_port_pred, dq, + _dispatch_runloop_queue_port_init); + return (mach_port_t)dq->do_ctxt; } static bool main_q_is_draining; @@ -2546,6 +3022,7 @@ dispatch_main(void) #if HAVE_PTHREAD_MAIN_NP if (pthread_main_np()) { #endif + _dispatch_object_debug(&_dispatch_main_q, "%s", __func__); _dispatch_program_is_probably_callback_driven = true; pthread_exit(NULL); DISPATCH_CRASH("pthread_exit() returned"); @@ -2561,10 +3038,6 @@ _dispatch_sigsuspend(void) { static const sigset_t mask; -#if DISPATCH_COCOA_COMPAT - // Do not count the signal handling thread as a worker thread - (void)dispatch_atomic_dec(&_dispatch_worker_threads); -#endif for (;;) { sigsuspend(&mask); } @@ -2583,12 +3056,13 @@ DISPATCH_NOINLINE static void _dispatch_queue_cleanup2(void) { - (void)dispatch_atomic_dec(&_dispatch_main_q.dq_running); - - dispatch_atomic_release_barrier(); - if (dispatch_atomic_sub2o(&_dispatch_main_q, do_suspend_cnt, - DISPATCH_OBJECT_SUSPEND_LOCK) == 0) { - _dispatch_wakeup(&_dispatch_main_q); + dispatch_queue_t dq = &_dispatch_main_q; + (void)dispatch_atomic_dec2o(dq, dq_running, relaxed); + unsigned int suspend_cnt = dispatch_atomic_sub2o(dq, do_suspend_cnt, + DISPATCH_OBJECT_SUSPEND_LOCK, release); + dq->dq_is_thread_bound = 0; + if (suspend_cnt == 0) { + _dispatch_wakeup(dq); } // overload the "probably" variable to mean that dispatch_main() or @@ -2601,23 +3075,9 @@ _dispatch_queue_cleanup2(void) } #if DISPATCH_COCOA_COMPAT - dispatch_once_f(&_dispatch_main_q_port_pred, NULL, - _dispatch_main_q_port_init); - - mach_port_t mp = main_q_port; - kern_return_t kr; - - main_q_port = 0; - - if (mp) { - kr = mach_port_deallocate(mach_task_self(), mp); - DISPATCH_VERIFY_MIG(kr); - (void)dispatch_assume_zero(kr); - kr = mach_port_mod_refs(mach_task_self(), mp, MACH_PORT_RIGHT_RECEIVE, - -1); - DISPATCH_VERIFY_MIG(kr); - (void)dispatch_assume_zero(kr); - } + dispatch_once_f(&_dispatch_main_q_port_pred, dq, + _dispatch_runloop_queue_port_init); + _dispatch_runloop_queue_port_dispose(dq); #endif } @@ -2630,350 +3090,3 @@ _dispatch_queue_cleanup(void *ctxt) // POSIX defines that destructors are only called if 'ctxt' is non-null DISPATCH_CRASH("Premature thread exit while a dispatch queue is running"); } - -#pragma mark - -#pragma mark dispatch_manager_queue - -static unsigned int _dispatch_select_workaround; -static fd_set _dispatch_rfds; -static fd_set _dispatch_wfds; -static void **_dispatch_rfd_ptrs; -static void **_dispatch_wfd_ptrs; - -static int _dispatch_kq; - -static void -_dispatch_get_kq_init(void *context DISPATCH_UNUSED) -{ - static const struct kevent kev = { - .ident = 1, - .filter = EVFILT_USER, - .flags = EV_ADD|EV_CLEAR, - }; - - _dispatch_safe_fork = false; - _dispatch_kq = kqueue(); - if (_dispatch_kq == -1) { - DISPATCH_CLIENT_CRASH("kqueue() create failed: " - "probably out of file descriptors"); - } else if (dispatch_assume(_dispatch_kq < FD_SETSIZE)) { - // in case we fall back to select() - FD_SET(_dispatch_kq, &_dispatch_rfds); - } - - (void)dispatch_assume_zero(kevent(_dispatch_kq, &kev, 1, NULL, 0, NULL)); - - _dispatch_queue_push(_dispatch_mgr_q.do_targetq, &_dispatch_mgr_q); -} - -static int -_dispatch_get_kq(void) -{ - static dispatch_once_t pred; - - dispatch_once_f(&pred, NULL, _dispatch_get_kq_init); - - return _dispatch_kq; -} - -long -_dispatch_update_kq(const struct kevent *kev) -{ - int rval; - struct kevent kev_copy = *kev; - // This ensures we don't get a pending kevent back while registering - // a new kevent - kev_copy.flags |= EV_RECEIPT; - - if (_dispatch_select_workaround && (kev_copy.flags & EV_DELETE)) { - // Only executed on manager queue - switch (kev_copy.filter) { - case EVFILT_READ: - if (kev_copy.ident < FD_SETSIZE && - FD_ISSET((int)kev_copy.ident, &_dispatch_rfds)) { - FD_CLR((int)kev_copy.ident, &_dispatch_rfds); - _dispatch_rfd_ptrs[kev_copy.ident] = 0; - (void)dispatch_atomic_dec(&_dispatch_select_workaround); - return 0; - } - break; - case EVFILT_WRITE: - if (kev_copy.ident < FD_SETSIZE && - FD_ISSET((int)kev_copy.ident, &_dispatch_wfds)) { - FD_CLR((int)kev_copy.ident, &_dispatch_wfds); - _dispatch_wfd_ptrs[kev_copy.ident] = 0; - (void)dispatch_atomic_dec(&_dispatch_select_workaround); - return 0; - } - break; - default: - break; - } - } - -retry: - rval = kevent(_dispatch_get_kq(), &kev_copy, 1, &kev_copy, 1, NULL); - if (rval == -1) { - // If we fail to register with kevents, for other reasons aside from - // changelist elements. - int err = errno; - switch (err) { - case EINTR: - goto retry; - case EBADF: - _dispatch_bug_client("Do not close random Unix descriptors"); - break; - default: - (void)dispatch_assume_zero(err); - break; - } - //kev_copy.flags |= EV_ERROR; - //kev_copy.data = err; - return err; - } - - // The following select workaround only applies to adding kevents - if ((kev->flags & (EV_DISABLE|EV_DELETE)) || - !(kev->flags & (EV_ADD|EV_ENABLE))) { - return 0; - } - - // Only executed on manager queue - switch (kev_copy.data) { - case 0: - return 0; - case EBADF: - break; - default: - // If an error occurred while registering with kevent, and it was - // because of a kevent changelist processing && the kevent involved - // either doing a read or write, it would indicate we were trying - // to register a /dev/* port; fall back to select - switch (kev_copy.filter) { - case EVFILT_READ: - if (dispatch_assume(kev_copy.ident < FD_SETSIZE)) { - if (!_dispatch_rfd_ptrs) { - _dispatch_rfd_ptrs = calloc(FD_SETSIZE, sizeof(void*)); - } - _dispatch_rfd_ptrs[kev_copy.ident] = kev_copy.udata; - FD_SET((int)kev_copy.ident, &_dispatch_rfds); - (void)dispatch_atomic_inc(&_dispatch_select_workaround); - _dispatch_debug("select workaround used to read fd %d: 0x%lx", - (int)kev_copy.ident, (long)kev_copy.data); - return 0; - } - break; - case EVFILT_WRITE: - if (dispatch_assume(kev_copy.ident < FD_SETSIZE)) { - if (!_dispatch_wfd_ptrs) { - _dispatch_wfd_ptrs = calloc(FD_SETSIZE, sizeof(void*)); - } - _dispatch_wfd_ptrs[kev_copy.ident] = kev_copy.udata; - FD_SET((int)kev_copy.ident, &_dispatch_wfds); - (void)dispatch_atomic_inc(&_dispatch_select_workaround); - _dispatch_debug("select workaround used to write fd %d: 0x%lx", - (int)kev_copy.ident, (long)kev_copy.data); - return 0; - } - break; - default: - // kevent error, _dispatch_source_merge_kevent() will handle it - _dispatch_source_drain_kevent(&kev_copy); - break; - } - break; - } - return kev_copy.data; -} - -bool -_dispatch_mgr_wakeup(dispatch_queue_t dq) -{ - static const struct kevent kev = { - .ident = 1, - .filter = EVFILT_USER, - .fflags = NOTE_TRIGGER, - }; - - _dispatch_debug("waking up the _dispatch_mgr_q: %p", dq); - - _dispatch_update_kq(&kev); - - return false; -} - -static void -_dispatch_mgr_thread2(struct kevent *kev, size_t cnt) -{ - size_t i; - - for (i = 0; i < cnt; i++) { - // EVFILT_USER isn't used by sources - if (kev[i].filter == EVFILT_USER) { - // If _dispatch_mgr_thread2() ever is changed to return to the - // caller, then this should become _dispatch_queue_drain() - _dispatch_queue_serial_drain_till_empty(&_dispatch_mgr_q); - } else { - _dispatch_source_drain_kevent(&kev[i]); - } - } -} - -#if DISPATCH_USE_VM_PRESSURE && DISPATCH_USE_MALLOC_VM_PRESSURE_SOURCE -// VM Pressure source for malloc -static dispatch_source_t _dispatch_malloc_vm_pressure_source; - -static void -_dispatch_malloc_vm_pressure_handler(void *context DISPATCH_UNUSED) -{ - malloc_zone_pressure_relief(0,0); -} - -static void -_dispatch_malloc_vm_pressure_setup(void) -{ - _dispatch_malloc_vm_pressure_source = dispatch_source_create( - DISPATCH_SOURCE_TYPE_VM, 0, DISPATCH_VM_PRESSURE, - _dispatch_get_root_queue(0, true)); - dispatch_source_set_event_handler_f(_dispatch_malloc_vm_pressure_source, - _dispatch_malloc_vm_pressure_handler); - dispatch_resume(_dispatch_malloc_vm_pressure_source); -} -#else -#define _dispatch_malloc_vm_pressure_setup() -#endif - -DISPATCH_NOINLINE DISPATCH_NORETURN -static void -_dispatch_mgr_invoke(void) -{ - static const struct timespec timeout_immediately = { 0, 0 }; - struct timespec timeout; - const struct timespec *timeoutp; - struct timeval sel_timeout, *sel_timeoutp; - fd_set tmp_rfds, tmp_wfds; - struct kevent kev[1]; - int k_cnt, err, i, r; - - _dispatch_thread_setspecific(dispatch_queue_key, &_dispatch_mgr_q); -#if DISPATCH_COCOA_COMPAT - // Do not count the manager thread as a worker thread - (void)dispatch_atomic_dec(&_dispatch_worker_threads); -#endif - _dispatch_malloc_vm_pressure_setup(); - - for (;;) { - _dispatch_run_timers(); - - timeoutp = _dispatch_get_next_timer_fire(&timeout); - - if (_dispatch_select_workaround) { - FD_COPY(&_dispatch_rfds, &tmp_rfds); - FD_COPY(&_dispatch_wfds, &tmp_wfds); - if (timeoutp) { - sel_timeout.tv_sec = timeoutp->tv_sec; - sel_timeout.tv_usec = (typeof(sel_timeout.tv_usec)) - (timeoutp->tv_nsec / 1000u); - sel_timeoutp = &sel_timeout; - } else { - sel_timeoutp = NULL; - } - - r = select(FD_SETSIZE, &tmp_rfds, &tmp_wfds, NULL, sel_timeoutp); - if (r == -1) { - err = errno; - if (err != EBADF) { - if (err != EINTR) { - (void)dispatch_assume_zero(err); - } - continue; - } - for (i = 0; i < FD_SETSIZE; i++) { - if (i == _dispatch_kq) { - continue; - } - if (!FD_ISSET(i, &_dispatch_rfds) && !FD_ISSET(i, - &_dispatch_wfds)) { - continue; - } - r = dup(i); - if (r != -1) { - close(r); - } else { - if (FD_ISSET(i, &_dispatch_rfds)) { - FD_CLR(i, &_dispatch_rfds); - _dispatch_rfd_ptrs[i] = 0; - (void)dispatch_atomic_dec( - &_dispatch_select_workaround); - } - if (FD_ISSET(i, &_dispatch_wfds)) { - FD_CLR(i, &_dispatch_wfds); - _dispatch_wfd_ptrs[i] = 0; - (void)dispatch_atomic_dec( - &_dispatch_select_workaround); - } - } - } - continue; - } - - if (r > 0) { - for (i = 0; i < FD_SETSIZE; i++) { - if (i == _dispatch_kq) { - continue; - } - if (FD_ISSET(i, &tmp_rfds)) { - FD_CLR(i, &_dispatch_rfds); // emulate EV_DISABLE - EV_SET(&kev[0], i, EVFILT_READ, - EV_ADD|EV_ENABLE|EV_DISPATCH, 0, 1, - _dispatch_rfd_ptrs[i]); - _dispatch_rfd_ptrs[i] = 0; - (void)dispatch_atomic_dec(&_dispatch_select_workaround); - _dispatch_mgr_thread2(kev, 1); - } - if (FD_ISSET(i, &tmp_wfds)) { - FD_CLR(i, &_dispatch_wfds); // emulate EV_DISABLE - EV_SET(&kev[0], i, EVFILT_WRITE, - EV_ADD|EV_ENABLE|EV_DISPATCH, 0, 1, - _dispatch_wfd_ptrs[i]); - _dispatch_wfd_ptrs[i] = 0; - (void)dispatch_atomic_dec(&_dispatch_select_workaround); - _dispatch_mgr_thread2(kev, 1); - } - } - } - - timeoutp = &timeout_immediately; - } - - k_cnt = kevent(_dispatch_kq, NULL, 0, kev, sizeof(kev) / sizeof(kev[0]), - timeoutp); - err = errno; - - switch (k_cnt) { - case -1: - if (err == EBADF) { - DISPATCH_CLIENT_CRASH("Do not close random Unix descriptors"); - } - if (err != EINTR) { - (void)dispatch_assume_zero(err); - } - continue; - default: - _dispatch_mgr_thread2(kev, (size_t)k_cnt); - // fall through - case 0: - _dispatch_force_cache_cleanup(); - continue; - } - } -} - -DISPATCH_NORETURN -dispatch_queue_t -_dispatch_mgr_thread(dispatch_queue_t dq DISPATCH_UNUSED) -{ - // never returns, so burn bridges behind us & clear stack 2k ahead - _dispatch_clear_stack(2048); - _dispatch_mgr_invoke(); -} diff --git a/src/queue_internal.h b/src/queue_internal.h index b223ccec2..4f42d24fa 100644 --- a/src/queue_internal.h +++ b/src/queue_internal.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -32,6 +32,42 @@ #include // for HeaderDoc #endif +#if defined(__BLOCKS__) && !defined(DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES) +#define DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES 1 // +#endif + +/* x86 & cortex-a8 have a 64 byte cacheline */ +#define DISPATCH_CACHELINE_SIZE 64u +#define DISPATCH_CONTINUATION_SIZE DISPATCH_CACHELINE_SIZE +#define ROUND_UP_TO_CACHELINE_SIZE(x) \ + (((x) + (DISPATCH_CACHELINE_SIZE - 1u)) & \ + ~(DISPATCH_CACHELINE_SIZE - 1u)) +#define ROUND_UP_TO_CONTINUATION_SIZE(x) \ + (((x) + (DISPATCH_CONTINUATION_SIZE - 1u)) & \ + ~(DISPATCH_CONTINUATION_SIZE - 1u)) +#define ROUND_UP_TO_VECTOR_SIZE(x) \ + (((x) + 15u) & ~15u) +#define DISPATCH_CACHELINE_ALIGN \ + __attribute__((__aligned__(DISPATCH_CACHELINE_SIZE))) + + +#define DISPATCH_QUEUE_CACHELINE_PADDING \ + char _dq_pad[DISPATCH_QUEUE_CACHELINE_PAD] +#ifdef __LP64__ +#define DISPATCH_QUEUE_CACHELINE_PAD (( \ + (3*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE) \ + + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE) +#else +#define DISPATCH_QUEUE_CACHELINE_PAD (( \ + (0*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE) \ + + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE) +#if !DISPATCH_INTROSPECTION +// No padding, DISPATCH_QUEUE_CACHELINE_PAD == 0 +#undef DISPATCH_QUEUE_CACHELINE_PADDING +#define DISPATCH_QUEUE_CACHELINE_PADDING +#endif +#endif + // If dc_vtable is less than 127, then the object is a continuation. // Otherwise, the object has a private layout and memory management rules. The // layout until after 'do_next' must align with normal objects. @@ -60,13 +96,10 @@ struct dispatch_continuation_s { typedef struct dispatch_continuation_s *dispatch_continuation_t; struct dispatch_apply_s { - size_t da_index; - size_t da_iterations; - void (*da_func)(void *, size_t); - void *da_ctxt; + size_t volatile da_index, da_todo; + size_t da_iterations, da_nested; + dispatch_continuation_t da_dc; _dispatch_thread_semaphore_t da_sema; - dispatch_queue_t da_queue; - size_t da_done; uint32_t da_thr_cnt; }; @@ -77,31 +110,27 @@ struct dispatch_queue_attr_s { DISPATCH_STRUCT_HEADER(queue_attr); }; -#define DISPATCH_QUEUE_MIN_LABEL_SIZE 64 - -#ifdef __LP64__ -#define DISPATCH_QUEUE_CACHELINE_PAD (4*sizeof(void*)) -#else -#define DISPATCH_QUEUE_CACHELINE_PAD (2*sizeof(void*)) -#endif - #define DISPATCH_QUEUE_HEADER \ uint32_t volatile dq_running; \ - uint32_t dq_width; \ - struct dispatch_object_s *volatile dq_items_tail; \ struct dispatch_object_s *volatile dq_items_head; \ + /* LP64 global queue cacheline boundary */ \ + struct dispatch_object_s *volatile dq_items_tail; \ + dispatch_queue_t dq_specific_q; \ + uint32_t dq_width; \ + unsigned int dq_is_thread_bound:1; \ unsigned long dq_serialnum; \ - dispatch_queue_t dq_specific_q; + const char *dq_label; \ + DISPATCH_INTROSPECTION_QUEUE_LIST; DISPATCH_CLASS_DECL(queue); struct dispatch_queue_s { DISPATCH_STRUCT_HEADER(queue); DISPATCH_QUEUE_HEADER; - char dq_label[DISPATCH_QUEUE_MIN_LABEL_SIZE]; // must be last - char _dq_pad[DISPATCH_QUEUE_CACHELINE_PAD]; // for static queues only + DISPATCH_QUEUE_CACHELINE_PADDING; // for static queues only }; DISPATCH_INTERNAL_SUBCLASS_DECL(queue_root, queue); +DISPATCH_INTERNAL_SUBCLASS_DECL(queue_runloop, queue); DISPATCH_INTERNAL_SUBCLASS_DECL(queue_mgr, queue); DISPATCH_DECL_INTERNAL_SUBCLASS(dispatch_queue_specific_queue, dispatch_queue); @@ -109,19 +138,37 @@ DISPATCH_CLASS_DECL(queue_specific_queue); extern struct dispatch_queue_s _dispatch_mgr_q; +void _dispatch_queue_destroy(dispatch_object_t dou); void _dispatch_queue_dispose(dispatch_queue_t dq); void _dispatch_queue_invoke(dispatch_queue_t dq); void _dispatch_queue_push_list_slow(dispatch_queue_t dq, struct dispatch_object_s *obj, unsigned int n); void _dispatch_queue_push_slow(dispatch_queue_t dq, struct dispatch_object_s *obj); +unsigned long _dispatch_queue_probe(dispatch_queue_t dq); dispatch_queue_t _dispatch_wakeup(dispatch_object_t dou); +_dispatch_thread_semaphore_t _dispatch_queue_drain(dispatch_object_t dou); void _dispatch_queue_specific_queue_dispose(dispatch_queue_specific_queue_t dqsq); -bool _dispatch_queue_probe_root(dispatch_queue_t dq); -bool _dispatch_mgr_wakeup(dispatch_queue_t dq); -DISPATCH_NORETURN -dispatch_queue_t _dispatch_mgr_thread(dispatch_queue_t dq); +unsigned long _dispatch_root_queue_probe(dispatch_queue_t dq); +void _dispatch_pthread_root_queue_dispose(dispatch_queue_t dq); +unsigned long _dispatch_runloop_queue_probe(dispatch_queue_t dq); +void _dispatch_runloop_queue_xref_dispose(dispatch_queue_t dq); +void _dispatch_runloop_queue_dispose(dispatch_queue_t dq); +void _dispatch_mgr_queue_drain(void); +unsigned long _dispatch_mgr_queue_probe(dispatch_queue_t dq); +#if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES +void _dispatch_mgr_priority_init(void); +#else +static inline void _dispatch_mgr_priority_init(void) {} +#endif +void _dispatch_after_timer_callback(void *ctxt); +void _dispatch_async_redirect_invoke(void *ctxt); +void _dispatch_sync_recurse_invoke(void *ctxt); +void _dispatch_apply_invoke(void *ctxt); +void _dispatch_apply_redirect_invoke(void *ctxt); +void _dispatch_barrier_trysync_f(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func); #if DISPATCH_DEBUG void dispatch_debug_queue(dispatch_queue_t dq, const char* str); @@ -149,10 +196,10 @@ enum { DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY, }; -extern unsigned long _dispatch_queue_serial_numbers; +extern unsigned long volatile _dispatch_queue_serial_numbers; extern struct dispatch_queue_s _dispatch_root_queues[]; -#if !__OBJC2__ +#if !(USE_OBJC && __OBJC2__) DISPATCH_ALWAYS_INLINE static inline bool @@ -161,15 +208,14 @@ _dispatch_queue_push_list2(dispatch_queue_t dq, struct dispatch_object_s *head, { struct dispatch_object_s *prev; tail->do_next = NULL; - dispatch_atomic_store_barrier(); - prev = dispatch_atomic_xchg2o(dq, dq_items_tail, tail); + prev = dispatch_atomic_xchg2o(dq, dq_items_tail, tail, release); if (fastpath(prev)) { // if we crash here with a value less than 0x1000, then we are at a // known bug in client code for example, see _dispatch_queue_dispose // or _dispatch_atfork_child prev->do_next = head; } - return prev; + return (prev != NULL); } DISPATCH_ALWAYS_INLINE @@ -193,11 +239,56 @@ _dispatch_queue_push(dispatch_queue_t dq, dispatch_object_t _tail) } } +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_push_wakeup(dispatch_queue_t dq, dispatch_object_t _tail, + bool wakeup) +{ + struct dispatch_object_s *tail = _tail._do; + if (!fastpath(_dispatch_queue_push_list2(dq, tail, tail))) { + _dispatch_queue_push_slow(dq, tail); + } else if (slowpath(wakeup)) { + _dispatch_wakeup(dq); + } +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_class_invoke(dispatch_object_t dou, + dispatch_queue_t (*invoke)(dispatch_object_t, + _dispatch_thread_semaphore_t*)) +{ + dispatch_queue_t dq = dou._dq; + if (!slowpath(DISPATCH_OBJECT_SUSPENDED(dq)) && + fastpath(dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1, acquire))){ + dispatch_queue_t tq = NULL; + _dispatch_thread_semaphore_t sema = 0; + tq = invoke(dq, &sema); + // We do not need to check the result. + // When the suspend-count lock is dropped, then the check will happen. + (void)dispatch_atomic_dec2o(dq, dq_running, release); + if (sema) { + _dispatch_thread_semaphore_signal(sema); + } else if (tq) { + return _dispatch_queue_push(tq, dq); + } + } + dq->do_next = DISPATCH_OBJECT_LISTLESS; + if (!dispatch_atomic_sub2o(dq, do_suspend_cnt, + DISPATCH_OBJECT_SUSPEND_LOCK, release)) { + dispatch_atomic_barrier(seq_cst); // + if (dispatch_atomic_load2o(dq, dq_running, seq_cst) == 0) { + _dispatch_wakeup(dq); // verify that the queue is idle + } + } + _dispatch_release(dq); // added when the queue is put on the list +} + DISPATCH_ALWAYS_INLINE static inline dispatch_queue_t _dispatch_queue_get_current(void) { - return _dispatch_thread_getspecific(dispatch_queue_key); + return (dispatch_queue_t)_dispatch_thread_getspecific(dispatch_queue_key); } DISPATCH_ALWAYS_INLINE DISPATCH_CONST @@ -211,6 +302,7 @@ _dispatch_get_root_queue(long priority, bool overcommit) DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY]; #endif case DISPATCH_QUEUE_PRIORITY_LOW: + case DISPATCH_QUEUE_PRIORITY_NON_INTERACTIVE: return &_dispatch_root_queues[ DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY]; case DISPATCH_QUEUE_PRIORITY_DEFAULT: @@ -227,6 +319,7 @@ _dispatch_get_root_queue(long priority, bool overcommit) DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY]; #endif case DISPATCH_QUEUE_PRIORITY_LOW: + case DISPATCH_QUEUE_PRIORITY_NON_INTERACTIVE: return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY]; case DISPATCH_QUEUE_PRIORITY_DEFAULT: return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY]; @@ -242,23 +335,67 @@ _dispatch_get_root_queue(long priority, bool overcommit) static inline void _dispatch_queue_init(dispatch_queue_t dq) { - dq->do_next = DISPATCH_OBJECT_LISTLESS; - // Default target queue is overcommit! - dq->do_targetq = _dispatch_get_root_queue(0, true); + dq->do_next = (struct dispatch_queue_s *)DISPATCH_OBJECT_LISTLESS; + dq->dq_running = 0; dq->dq_width = 1; - dq->dq_serialnum = dispatch_atomic_inc(&_dispatch_queue_serial_numbers) - 1; + dq->dq_serialnum = dispatch_atomic_inc_orig(&_dispatch_queue_serial_numbers, + relaxed); } -dispatch_continuation_t -_dispatch_continuation_alloc_from_heap(void); +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_set_bound_thread(dispatch_queue_t dq) +{ + //Tag thread-bound queues with the owning thread + dispatch_assert(dq->dq_is_thread_bound); + dq->do_finalizer = (void*)_dispatch_thread_self(); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_clear_bound_thread(dispatch_queue_t dq) +{ + dispatch_assert(dq->dq_is_thread_bound); + dq->do_finalizer = NULL; +} + +DISPATCH_ALWAYS_INLINE +static inline pthread_t +_dispatch_queue_get_bound_thread(dispatch_queue_t dq) +{ + dispatch_assert(dq->dq_is_thread_bound); + return (pthread_t)dq->do_finalizer; +} + +#ifndef DISPATCH_CONTINUATION_CACHE_LIMIT +#if TARGET_OS_EMBEDDED +#define DISPATCH_CONTINUATION_CACHE_LIMIT 112 // one 256k heap for 64 threads +#define DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYSTATUS_PRESSURE_WARN 16 +#else +#define DISPATCH_CONTINUATION_CACHE_LIMIT 65536 +#define DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYSTATUS_PRESSURE_WARN 128 +#endif +#endif + +dispatch_continuation_t _dispatch_continuation_alloc_from_heap(void); +void _dispatch_continuation_free_to_heap(dispatch_continuation_t c); + +#if DISPATCH_USE_MEMORYSTATUS_SOURCE +extern int _dispatch_continuation_cache_limit; +void _dispatch_continuation_free_to_cache_limit(dispatch_continuation_t c); +#else +#define _dispatch_continuation_cache_limit DISPATCH_CONTINUATION_CACHE_LIMIT +#define _dispatch_continuation_free_to_cache_limit(c) \ + _dispatch_continuation_free_to_heap(c) +#endif DISPATCH_ALWAYS_INLINE static inline dispatch_continuation_t _dispatch_continuation_alloc_cacheonly(void) { - dispatch_continuation_t dc; - dc = fastpath(_dispatch_thread_getspecific(dispatch_cache_key)); + dispatch_continuation_t dc = (dispatch_continuation_t) + fastpath(_dispatch_thread_getspecific(dispatch_cache_key)); if (dc) { _dispatch_thread_setspecific(dispatch_cache_key, dc->do_next); } @@ -269,26 +406,40 @@ DISPATCH_ALWAYS_INLINE static inline dispatch_continuation_t _dispatch_continuation_alloc(void) { - dispatch_continuation_t dc; - - dc = fastpath(_dispatch_continuation_alloc_cacheonly()); + dispatch_continuation_t dc = + fastpath(_dispatch_continuation_alloc_cacheonly()); if(!dc) { return _dispatch_continuation_alloc_from_heap(); } return dc; } - DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_continuation_free(dispatch_continuation_t dc) +static inline dispatch_continuation_t +_dispatch_continuation_free_cacheonly(dispatch_continuation_t dc) { - dispatch_continuation_t prev_dc; - prev_dc = _dispatch_thread_getspecific(dispatch_cache_key); + dispatch_continuation_t prev_dc = (dispatch_continuation_t) + fastpath(_dispatch_thread_getspecific(dispatch_cache_key)); + int cnt = prev_dc ? prev_dc->do_ref_cnt + 1 : 1; + // Cap continuation cache + if (slowpath(cnt > _dispatch_continuation_cache_limit)) { + return dc; + } dc->do_next = prev_dc; + dc->do_ref_cnt = cnt; _dispatch_thread_setspecific(dispatch_cache_key, dc); + return NULL; } -#endif // !__OBJC2__ +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_continuation_free(dispatch_continuation_t dc) +{ + dc = _dispatch_continuation_free_cacheonly(dc); + if (slowpath(dc)) { + _dispatch_continuation_free_to_cache_limit(dc); + } +} +#endif // !(USE_OBJC && __OBJC2__) #endif diff --git a/src/semaphore.c b/src/semaphore.c index d3fd43117..20d9ae54a 100644 --- a/src/semaphore.c +++ b/src/semaphore.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -35,6 +35,52 @@ } while (0) #endif +#if USE_WIN32_SEM +// rdar://problem/8428132 +static DWORD best_resolution = 1; // 1ms + +DWORD +_push_timer_resolution(DWORD ms) +{ + MMRESULT res; + static dispatch_once_t once; + + if (ms > 16) { + // only update timer resolution if smaller than default 15.6ms + // zero means not updated + return 0; + } + + // aim for the best resolution we can accomplish + dispatch_once(&once, ^{ + TIMECAPS tc; + MMRESULT res; + res = timeGetDevCaps(&tc, sizeof(tc)); + if (res == MMSYSERR_NOERROR) { + best_resolution = min(max(tc.wPeriodMin, best_resolution), + tc.wPeriodMax); + } + }); + + res = timeBeginPeriod(best_resolution); + if (res == TIMERR_NOERROR) { + return best_resolution; + } + // zero means not updated + return 0; +} + +// match ms parameter to result from _push_timer_resolution +void +_pop_timer_resolution(DWORD ms) +{ + if (ms) { + timeEndPeriod(ms); + } +} +#endif /* USE_WIN32_SEM */ + + DISPATCH_WEAK // rdar://problem/8503746 long _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema); @@ -48,7 +94,7 @@ _dispatch_semaphore_init(long value, dispatch_object_t dou) { dispatch_semaphore_t dsema = dou._dsema; - dsema->do_next = DISPATCH_OBJECT_LISTLESS; + dsema->do_next = (dispatch_semaphore_t)DISPATCH_OBJECT_LISTLESS; dsema->do_targetq = dispatch_get_global_queue( DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); dsema->dsema_value = value; @@ -71,8 +117,10 @@ dispatch_semaphore_create(long value) return NULL; } - dsema = _dispatch_alloc(DISPATCH_VTABLE(semaphore), - sizeof(struct dispatch_semaphore_s)); + dsema = (dispatch_semaphore_t)_dispatch_alloc(DISPATCH_VTABLE(semaphore), + sizeof(struct dispatch_semaphore_s) - + sizeof(dsema->dsema_notify_head) - + sizeof(dsema->dsema_notify_tail)); _dispatch_semaphore_init(value, dsema); return dsema; } @@ -99,14 +147,34 @@ _dispatch_semaphore_create_port(semaphore_t *s4) while ((kr = semaphore_create(mach_task_self(), &tmp, SYNC_POLICY_FIFO, 0))) { DISPATCH_VERIFY_MIG(kr); - sleep(1); + _dispatch_temporary_resource_shortage(); } - if (!dispatch_atomic_cmpxchg(s4, 0, tmp)) { + if (!dispatch_atomic_cmpxchg(s4, 0, tmp, relaxed)) { kr = semaphore_destroy(mach_task_self(), tmp); DISPATCH_SEMAPHORE_VERIFY_KR(kr); } } +#elif USE_WIN32_SEM +static void +_dispatch_semaphore_create_handle(HANDLE *s4) +{ + HANDLE tmp; + + if (*s4) { + return; + } + + // lazily allocate the semaphore port + + while (!dispatch_assume(tmp = CreateSemaphore(NULL, 0, LONG_MAX, NULL))) { + _dispatch_temporary_resource_shortage(); + } + + if (!dispatch_atomic_cmpxchg(s4, 0, tmp)) { + CloseHandle(tmp); + } +} #endif void @@ -125,13 +193,13 @@ _dispatch_semaphore_dispose(dispatch_object_t dou) kr = semaphore_destroy(mach_task_self(), dsema->dsema_port); DISPATCH_SEMAPHORE_VERIFY_KR(kr); } - if (dsema->dsema_waiter_port) { - kr = semaphore_destroy(mach_task_self(), dsema->dsema_waiter_port); - DISPATCH_SEMAPHORE_VERIFY_KR(kr); - } #elif USE_POSIX_SEM int ret = sem_destroy(&dsema->dsema_sem); DISPATCH_SEMAPHORE_VERIFY_RET(ret); +#elif USE_WIN32_SEM + if (dsema->dsema_handle) { + CloseHandle(dsema->dsema_handle); + } #endif } @@ -141,14 +209,14 @@ _dispatch_semaphore_debug(dispatch_object_t dou, char *buf, size_t bufsiz) dispatch_semaphore_t dsema = dou._dsema; size_t offset = 0; - offset += snprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", + offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", dx_kind(dsema), dsema); offset += _dispatch_object_debug_attr(dsema, &buf[offset], bufsiz - offset); #if USE_MACH_SEM - offset += snprintf(&buf[offset], bufsiz - offset, "port = 0x%u, ", + offset += dsnprintf(&buf[offset], bufsiz - offset, "port = 0x%u, ", dsema->dsema_port); #endif - offset += snprintf(&buf[offset], bufsiz - offset, + offset += dsnprintf(&buf[offset], bufsiz - offset, "value = %ld, orig = %ld }", dsema->dsema_value, dsema->dsema_orig); return offset; } @@ -164,7 +232,9 @@ _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema) // dsema after the atomic increment. _dispatch_retain(dsema); - (void)dispatch_atomic_inc2o(dsema, dsema_sent_ksignals); +#if USE_MACH_SEM || USE_POSIX_SEM + (void)dispatch_atomic_inc2o(dsema, dsema_sent_ksignals, relaxed); +#endif #if USE_MACH_SEM _dispatch_semaphore_create_port(&dsema->dsema_port); @@ -173,6 +243,10 @@ _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema) #elif USE_POSIX_SEM int ret = sem_post(&dsema->dsema_sem); DISPATCH_SEMAPHORE_VERIFY_RET(ret); +#elif USE_WIN32_SEM + _dispatch_semaphore_create_handle(&dsema->dsema_handle); + int ret = ReleaseSemaphore(dsema->dsema_handle, 1, NULL); + dispatch_assume(ret); #endif _dispatch_release(dsema); @@ -182,14 +256,12 @@ _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema) long dispatch_semaphore_signal(dispatch_semaphore_t dsema) { - dispatch_atomic_release_barrier(); - long value = dispatch_atomic_inc2o(dsema, dsema_value); + long value = dispatch_atomic_inc2o(dsema, dsema_value, release); if (fastpath(value > 0)) { return 0; } if (slowpath(value == LONG_MIN)) { - DISPATCH_CLIENT_CRASH("Unbalanced call to dispatch_group_leave() or " - "dispatch_semaphore_signal()"); + DISPATCH_CLIENT_CRASH("Unbalanced call to dispatch_semaphore_signal()"); } return _dispatch_semaphore_signal_slow(dsema); } @@ -201,22 +273,38 @@ _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, { long orig; +#if USE_MACH_SEM + mach_timespec_t _timeout; + kern_return_t kr; +#elif USE_POSIX_SEM + struct timespec _timeout; + int ret; +#elif USE_WIN32_SEM + uint64_t nsec; + DWORD msec; + DWORD resolution; + DWORD wait_result; +#endif + +#if USE_MACH_SEM || USE_POSIX_SEM again: // Mach semaphores appear to sometimes spuriously wake up. Therefore, // we keep a parallel count of the number of times a Mach semaphore is // signaled (6880961). - while ((orig = dsema->dsema_sent_ksignals)) { - if (dispatch_atomic_cmpxchg2o(dsema, dsema_sent_ksignals, orig, - orig - 1)) { + orig = dsema->dsema_sent_ksignals; + while (orig) { + if (dispatch_atomic_cmpxchgvw2o(dsema, dsema_sent_ksignals, orig, + orig - 1, &orig, relaxed)) { return 0; } } +#endif #if USE_MACH_SEM - mach_timespec_t _timeout; - kern_return_t kr; - _dispatch_semaphore_create_port(&dsema->dsema_port); +#elif USE_WIN32_SEM + _dispatch_semaphore_create_handle(&dsema->dsema_handle); +#endif // From xnu/osfmk/kern/sync_sema.c: // wait_semaphore->count = -1; /* we don't keep an actual count */ @@ -228,6 +316,7 @@ _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, switch (timeout) { default: +#if USE_MACH_SEM do { uint64_t nsec = _dispatch_timeout(timeout); _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); @@ -239,29 +328,7 @@ _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, DISPATCH_SEMAPHORE_VERIFY_KR(kr); break; } - // Fall through and try to undo what the fast path did to - // dsema->dsema_value - case DISPATCH_TIME_NOW: - while ((orig = dsema->dsema_value) < 0) { - if (dispatch_atomic_cmpxchg2o(dsema, dsema_value, orig, orig + 1)) { - return KERN_OPERATION_TIMED_OUT; - } - } - // Another thread called semaphore_signal(). - // Fall through and drain the wakeup. - case DISPATCH_TIME_FOREVER: - do { - kr = semaphore_wait(dsema->dsema_port); - } while (kr == KERN_ABORTED); - DISPATCH_SEMAPHORE_VERIFY_KR(kr); - break; - } #elif USE_POSIX_SEM - struct timespec _timeout; - int ret; - - switch (timeout) { - default: do { uint64_t nsec = _dispatch_timeout(timeout); _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); @@ -273,34 +340,60 @@ _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, DISPATCH_SEMAPHORE_VERIFY_RET(ret); break; } +#elif USE_WIN32_SEM + nsec = _dispatch_timeout(timeout); + msec = (DWORD)(nsec / (uint64_t)1000000); + resolution = _push_timer_resolution(msec); + wait_result = WaitForSingleObject(dsema->dsema_handle, msec); + _pop_timer_resolution(resolution); + if (wait_result != WAIT_TIMEOUT) { + break; + } +#endif // Fall through and try to undo what the fast path did to // dsema->dsema_value case DISPATCH_TIME_NOW: - while ((orig = dsema->dsema_value) < 0) { - if (dispatch_atomic_cmpxchg2o(dsema, dsema_value, orig, orig + 1)) { + orig = dsema->dsema_value; + while (orig < 0) { + if (dispatch_atomic_cmpxchgvw2o(dsema, dsema_value, orig, orig + 1, + &orig, relaxed)) { +#if USE_MACH_SEM + return KERN_OPERATION_TIMED_OUT; +#elif USE_POSIX_SEM || USE_WIN32_SEM errno = ETIMEDOUT; return -1; +#endif } } // Another thread called semaphore_signal(). // Fall through and drain the wakeup. case DISPATCH_TIME_FOREVER: +#if USE_MACH_SEM + do { + kr = semaphore_wait(dsema->dsema_port); + } while (kr == KERN_ABORTED); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); +#elif USE_POSIX_SEM do { ret = sem_wait(&dsema->dsema_sem); } while (ret != 0); DISPATCH_SEMAPHORE_VERIFY_RET(ret); +#elif USE_WIN32_SEM + WaitForSingleObject(dsema->dsema_handle, INFINITE); +#endif break; } -#endif - +#if USE_MACH_SEM || USE_POSIX_SEM goto again; +#else + return 0; +#endif } long dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout) { - long value = dispatch_atomic_dec2o(dsema, dsema_value); - dispatch_atomic_acquire_barrier(); + long value = dispatch_atomic_dec2o(dsema, dsema_value, acquire); if (fastpath(value >= 0)) { return 0; } @@ -313,8 +406,8 @@ dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout) dispatch_group_t dispatch_group_create(void) { - dispatch_group_t dg = _dispatch_alloc(DISPATCH_VTABLE(group), - sizeof(struct dispatch_semaphore_s)); + dispatch_group_t dg = (dispatch_group_t)_dispatch_alloc( + DISPATCH_VTABLE(group), sizeof(struct dispatch_semaphore_s)); _dispatch_semaphore_init(LONG_MAX, dg); return dg; } @@ -323,29 +416,32 @@ void dispatch_group_enter(dispatch_group_t dg) { dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg; - - (void)dispatch_semaphore_wait(dsema, DISPATCH_TIME_FOREVER); + long value = dispatch_atomic_dec2o(dsema, dsema_value, acquire); + if (slowpath(value < 0)) { + DISPATCH_CLIENT_CRASH( + "Too many nested calls to dispatch_group_enter()"); + } } DISPATCH_NOINLINE static long _dispatch_group_wake(dispatch_semaphore_t dsema) { - struct dispatch_sema_notify_s *next, *head, *tail = NULL; + dispatch_continuation_t next, head, tail = NULL, dc; long rval; - head = dispatch_atomic_xchg2o(dsema, dsema_notify_head, NULL); + head = dispatch_atomic_xchg2o(dsema, dsema_notify_head, NULL, relaxed); if (head) { // snapshot before anything is notified/woken - tail = dispatch_atomic_xchg2o(dsema, dsema_notify_tail, NULL); + tail = dispatch_atomic_xchg2o(dsema, dsema_notify_tail, NULL, relaxed); } - rval = dispatch_atomic_xchg2o(dsema, dsema_group_waiters, 0); + rval = (long)dispatch_atomic_xchg2o(dsema, dsema_group_waiters, 0, relaxed); if (rval) { // wake group waiters #if USE_MACH_SEM - _dispatch_semaphore_create_port(&dsema->dsema_waiter_port); + _dispatch_semaphore_create_port(&dsema->dsema_port); do { - kern_return_t kr = semaphore_signal(dsema->dsema_waiter_port); + kern_return_t kr = semaphore_signal(dsema->dsema_port); DISPATCH_SEMAPHORE_VERIFY_KR(kr); } while (--rval); #elif USE_POSIX_SEM @@ -353,20 +449,31 @@ _dispatch_group_wake(dispatch_semaphore_t dsema) int ret = sem_post(&dsema->dsema_sem); DISPATCH_SEMAPHORE_VERIFY_RET(ret); } while (--rval); +#elif USE_WIN32_SEM + _dispatch_semaphore_create_handle(&dsema->dsema_handle); + int ret; + ret = ReleaseSemaphore(dsema->dsema_handle, rval, NULL); + dispatch_assume(ret); +#else +#error "No supported semaphore type" #endif } if (head) { // async group notify blocks do { - dispatch_async_f(head->dsn_queue, head->dsn_ctxt, head->dsn_func); - _dispatch_release(head->dsn_queue); - next = fastpath(head->dsn_next); + next = fastpath(head->do_next); if (!next && head != tail) { - while (!(next = fastpath(head->dsn_next))) { - _dispatch_hardware_pause(); + while (!(next = fastpath(head->do_next))) { + dispatch_hardware_pause(); } } - free(head); + dispatch_queue_t dsn_queue = (dispatch_queue_t)head->dc_data; + dc = _dispatch_continuation_free_cacheonly(head); + dispatch_async_f(dsn_queue, head->dc_ctxt, head->dc_func); + _dispatch_release(dsn_queue); + if (slowpath(dc)) { + _dispatch_continuation_free_to_cache_limit(dc); + } } while ((head = next)); _dispatch_release(dsema); } @@ -377,9 +484,11 @@ void dispatch_group_leave(dispatch_group_t dg) { dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg; - - dispatch_semaphore_signal(dsema); - if (dsema->dsema_value == dsema->dsema_orig) { + long value = dispatch_atomic_inc2o(dsema, dsema_value, release); + if (slowpath(value < 0)) { + DISPATCH_CLIENT_CRASH("Unbalanced call to dispatch_group_leave()"); + } + if (slowpath(value == LONG_MAX)) { (void)_dispatch_group_wake(dsema); } } @@ -390,26 +499,39 @@ _dispatch_group_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout) { long orig; +#if USE_MACH_SEM + mach_timespec_t _timeout; + kern_return_t kr; +#elif USE_POSIX_SEM // KVV + struct timespec _timeout; + int ret; +#elif USE_WIN32_SEM // KVV + uint64_t nsec; + DWORD msec; + DWORD resolution; + DWORD wait_result; +#endif + again: // check before we cause another signal to be sent by incrementing // dsema->dsema_group_waiters - if (dsema->dsema_value == dsema->dsema_orig) { + if (dsema->dsema_value == LONG_MAX) { return _dispatch_group_wake(dsema); } // Mach semaphores appear to sometimes spuriously wake up. Therefore, // we keep a parallel count of the number of times a Mach semaphore is // signaled (6880961). - (void)dispatch_atomic_inc2o(dsema, dsema_group_waiters); + (void)dispatch_atomic_inc2o(dsema, dsema_group_waiters, relaxed); // check the values again in case we need to wake any threads - if (dsema->dsema_value == dsema->dsema_orig) { + if (dsema->dsema_value == LONG_MAX) { return _dispatch_group_wake(dsema); } #if USE_MACH_SEM - mach_timespec_t _timeout; - kern_return_t kr; - - _dispatch_semaphore_create_port(&dsema->dsema_waiter_port); + _dispatch_semaphore_create_port(&dsema->dsema_port); +#elif USE_WIN32_SEM + _dispatch_semaphore_create_handle(&dsema->dsema_handle); +#endif // From xnu/osfmk/kern/sync_sema.c: // wait_semaphore->count = -1; /* we don't keep an actual count */ @@ -421,42 +543,19 @@ _dispatch_group_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout) switch (timeout) { default: +#if USE_MACH_SEM do { uint64_t nsec = _dispatch_timeout(timeout); _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); - kr = slowpath(semaphore_timedwait(dsema->dsema_waiter_port, - _timeout)); + kr = slowpath(semaphore_timedwait(dsema->dsema_port, _timeout)); } while (kr == KERN_ABORTED); if (kr != KERN_OPERATION_TIMED_OUT) { DISPATCH_SEMAPHORE_VERIFY_KR(kr); break; } - // Fall through and try to undo the earlier change to - // dsema->dsema_group_waiters - case DISPATCH_TIME_NOW: - while ((orig = dsema->dsema_group_waiters)) { - if (dispatch_atomic_cmpxchg2o(dsema, dsema_group_waiters, orig, - orig - 1)) { - return KERN_OPERATION_TIMED_OUT; - } - } - // Another thread called semaphore_signal(). - // Fall through and drain the wakeup. - case DISPATCH_TIME_FOREVER: - do { - kr = semaphore_wait(dsema->dsema_waiter_port); - } while (kr == KERN_ABORTED); - DISPATCH_SEMAPHORE_VERIFY_KR(kr); - break; - } #elif USE_POSIX_SEM - struct timespec _timeout; - int ret; - - switch (timeout) { - default: do { uint64_t nsec = _dispatch_timeout(timeout); _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); @@ -468,42 +567,64 @@ _dispatch_group_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout) DISPATCH_SEMAPHORE_VERIFY_RET(ret); break; } +#elif USE_WIN32_SEM + nsec = _dispatch_timeout(timeout); + msec = (DWORD)(nsec / (uint64_t)1000000); + resolution = _push_timer_resolution(msec); + wait_result = WaitForSingleObject(dsema->dsema_handle, msec); + _pop_timer_resolution(resolution); + if (wait_result != WAIT_TIMEOUT) { + break; + } +#endif // Fall through and try to undo the earlier change to // dsema->dsema_group_waiters case DISPATCH_TIME_NOW: - while ((orig = dsema->dsema_group_waiters)) { - if (dispatch_atomic_cmpxchg2o(dsema, dsema_group_waiters, orig, - orig - 1)) { + orig = dsema->dsema_group_waiters; + while (orig) { + if (dispatch_atomic_cmpxchgvw2o(dsema, dsema_group_waiters, orig, + orig - 1, &orig, relaxed)) { +#if USE_MACH_SEM + return KERN_OPERATION_TIMED_OUT; +#elif USE_POSIX_SEM || USE_WIN32_SEM errno = ETIMEDOUT; return -1; +#endif } } // Another thread called semaphore_signal(). // Fall through and drain the wakeup. case DISPATCH_TIME_FOREVER: +#if USE_MACH_SEM + do { + kr = semaphore_wait(dsema->dsema_port); + } while (kr == KERN_ABORTED); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); +#elif USE_POSIX_SEM do { ret = sem_wait(&dsema->dsema_sem); } while (ret == -1 && errno == EINTR); DISPATCH_SEMAPHORE_VERIFY_RET(ret); +#elif USE_WIN32_SEM + WaitForSingleObject(dsema->dsema_handle, INFINITE); +#endif break; } -#endif - goto again; -} + } long dispatch_group_wait(dispatch_group_t dg, dispatch_time_t timeout) { dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg; - if (dsema->dsema_value == dsema->dsema_orig) { + if (dsema->dsema_value == LONG_MAX) { return 0; } if (timeout == 0) { #if USE_MACH_SEM return KERN_OPERATION_TIMED_OUT; -#elif USE_POSIX_SEM +#elif USE_POSIX_SEM || USE_WIN32_SEM errno = ETIMEDOUT; return (-1); #endif @@ -517,25 +638,21 @@ dispatch_group_notify_f(dispatch_group_t dg, dispatch_queue_t dq, void *ctxt, void (*func)(void *)) { dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg; - struct dispatch_sema_notify_s *dsn, *prev; - - // FIXME -- this should be updated to use the continuation cache - while (!(dsn = calloc(1, sizeof(*dsn)))) { - sleep(1); - } - - dsn->dsn_queue = dq; - dsn->dsn_ctxt = ctxt; - dsn->dsn_func = func; + dispatch_continuation_t prev, dsn = _dispatch_continuation_alloc(); + dsn->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT; + dsn->dc_data = dq; + dsn->dc_ctxt = ctxt; + dsn->dc_func = func; + dsn->do_next = NULL; _dispatch_retain(dq); - dispatch_atomic_store_barrier(); - prev = dispatch_atomic_xchg2o(dsema, dsema_notify_tail, dsn); + prev = dispatch_atomic_xchg2o(dsema, dsema_notify_tail, dsn, release); if (fastpath(prev)) { - prev->dsn_next = dsn; + prev->do_next = dsn; } else { _dispatch_retain(dg); - dsema->dsema_notify_head = dsn; - if (dsema->dsema_value == dsema->dsema_orig) { + dispatch_atomic_store2o(dsema, dsema_notify_head, dsn, seq_cst); + dispatch_atomic_barrier(seq_cst); // + if (dispatch_atomic_load2o(dsema, dsema_value, seq_cst) == LONG_MAX) { _dispatch_group_wake(dsema); } } @@ -554,18 +671,19 @@ dispatch_group_notify(dispatch_group_t dg, dispatch_queue_t dq, #pragma mark - #pragma mark _dispatch_thread_semaphore_t -DISPATCH_NOINLINE -static _dispatch_thread_semaphore_t +_dispatch_thread_semaphore_t _dispatch_thread_semaphore_create(void) { _dispatch_safe_fork = false; -#if USE_MACH_SEM +#if DISPATCH_USE_OS_SEMAPHORE_CACHE + return _os_semaphore_create(); +#elif USE_MACH_SEM semaphore_t s4; kern_return_t kr; while (slowpath(kr = semaphore_create(mach_task_self(), &s4, SYNC_POLICY_FIFO, 0))) { DISPATCH_VERIFY_MIG(kr); - sleep(1); + _dispatch_temporary_resource_shortage(); } return s4; #elif USE_POSIX_SEM @@ -573,14 +691,23 @@ _dispatch_thread_semaphore_create(void) int ret = sem_init(&s4, 0, 0); DISPATCH_SEMAPHORE_VERIFY_RET(ret); return s4; +#elif USE_WIN32_SEM + HANDLE tmp; + while (!dispatch_assume(tmp = CreateSemaphore(NULL, 0, LONG_MAX, NULL))) { + _dispatch_temporary_resource_shortage(); + } + return (_dispatch_thread_semaphore_t)tmp; +#else +#error "No supported semaphore type" #endif } -DISPATCH_NOINLINE void _dispatch_thread_semaphore_dispose(_dispatch_thread_semaphore_t sema) { -#if USE_MACH_SEM +#if DISPATCH_USE_OS_SEMAPHORE_CACHE + return _os_semaphore_dispose(sema); +#elif USE_MACH_SEM semaphore_t s4 = (semaphore_t)sema; kern_return_t kr = semaphore_destroy(mach_task_self(), s4); DISPATCH_SEMAPHORE_VERIFY_KR(kr); @@ -588,13 +715,23 @@ _dispatch_thread_semaphore_dispose(_dispatch_thread_semaphore_t sema) sem_t s4 = (sem_t)sema; int ret = sem_destroy(&s4); DISPATCH_SEMAPHORE_VERIFY_RET(ret); +#elif USE_WIN32_SEM + // XXX: signal the semaphore? + WINBOOL success; + success = CloseHandle((HANDLE)sema); + dispatch_assume(success); +#else +#error "No supported semaphore type" #endif } void _dispatch_thread_semaphore_signal(_dispatch_thread_semaphore_t sema) { -#if USE_MACH_SEM + // assumed to contain a release barrier +#if DISPATCH_USE_OS_SEMAPHORE_CACHE + return _os_semaphore_signal(sema); +#elif USE_MACH_SEM semaphore_t s4 = (semaphore_t)sema; kern_return_t kr = semaphore_signal(s4); DISPATCH_SEMAPHORE_VERIFY_KR(kr); @@ -602,13 +739,22 @@ _dispatch_thread_semaphore_signal(_dispatch_thread_semaphore_t sema) sem_t s4 = (sem_t)sema; int ret = sem_post(&s4); DISPATCH_SEMAPHORE_VERIFY_RET(ret); +#elif USE_WIN32_SEM + int ret; + ret = ReleaseSemaphore((HANDLE)sema, 1, NULL); + dispatch_assume(ret); +#else +#error "No supported semaphore type" #endif } void _dispatch_thread_semaphore_wait(_dispatch_thread_semaphore_t sema) { -#if USE_MACH_SEM + // assumed to contain an acquire barrier +#if DISPATCH_USE_OS_SEMAPHORE_CACHE + return _os_semaphore_wait(sema); +#elif USE_MACH_SEM semaphore_t s4 = (semaphore_t)sema; kern_return_t kr; do { @@ -622,28 +768,12 @@ _dispatch_thread_semaphore_wait(_dispatch_thread_semaphore_t sema) ret = sem_wait(&s4); } while (slowpath(ret != 0)); DISPATCH_SEMAPHORE_VERIFY_RET(ret); +#elif USE_WIN32_SEM + DWORD wait_result; + do { + wait_result = WaitForSingleObject((HANDLE)sema, INFINITE); + } while (wait_result != WAIT_OBJECT_0); +#else +#error "No supported semaphore type" #endif } - -_dispatch_thread_semaphore_t -_dispatch_get_thread_semaphore(void) -{ - _dispatch_thread_semaphore_t sema = (_dispatch_thread_semaphore_t) - _dispatch_thread_getspecific(dispatch_sema4_key); - if (slowpath(!sema)) { - return _dispatch_thread_semaphore_create(); - } - _dispatch_thread_setspecific(dispatch_sema4_key, NULL); - return sema; -} - -void -_dispatch_put_thread_semaphore(_dispatch_thread_semaphore_t sema) -{ - _dispatch_thread_semaphore_t old_sema = (_dispatch_thread_semaphore_t) - _dispatch_thread_getspecific(dispatch_sema4_key); - _dispatch_thread_setspecific(dispatch_sema4_key, (void*)sema); - if (slowpath(old_sema)) { - return _dispatch_thread_semaphore_dispose(old_sema); - } -} diff --git a/src/semaphore_internal.h b/src/semaphore_internal.h index e27f9342f..c8174b6b4 100644 --- a/src/semaphore_internal.h +++ b/src/semaphore_internal.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -29,32 +29,26 @@ struct dispatch_queue_s; -struct dispatch_sema_notify_s { - struct dispatch_sema_notify_s *volatile dsn_next; - struct dispatch_queue_s *dsn_queue; - void *dsn_ctxt; - void (*dsn_func)(void *); -}; - DISPATCH_CLASS_DECL(semaphore); struct dispatch_semaphore_s { DISPATCH_STRUCT_HEADER(semaphore); - long dsema_value; - long dsema_orig; - size_t dsema_sent_ksignals; -#if USE_MACH_SEM && USE_POSIX_SEM -#error "Too many supported semaphore types" -#elif USE_MACH_SEM +#if USE_MACH_SEM semaphore_t dsema_port; - semaphore_t dsema_waiter_port; #elif USE_POSIX_SEM sem_t dsema_sem; +#elif USE_WIN32_SEM + HANDLE dsema_handle; #else #error "No supported semaphore type" #endif - size_t dsema_group_waiters; - struct dispatch_sema_notify_s *dsema_notify_head; - struct dispatch_sema_notify_s *dsema_notify_tail; + long dsema_orig; + long volatile dsema_value; + union { + long volatile dsema_sent_ksignals; + long volatile dsema_group_waiters; + }; + struct dispatch_continuation_s *volatile dsema_notify_head; + struct dispatch_continuation_s *volatile dsema_notify_tail; }; DISPATCH_CLASS_DECL(group); @@ -64,10 +58,35 @@ size_t _dispatch_semaphore_debug(dispatch_object_t dou, char *buf, size_t bufsiz); typedef uintptr_t _dispatch_thread_semaphore_t; -_dispatch_thread_semaphore_t _dispatch_get_thread_semaphore(void); -void _dispatch_put_thread_semaphore(_dispatch_thread_semaphore_t); + +_dispatch_thread_semaphore_t _dispatch_thread_semaphore_create(void); +void _dispatch_thread_semaphore_dispose(_dispatch_thread_semaphore_t); void _dispatch_thread_semaphore_wait(_dispatch_thread_semaphore_t); void _dispatch_thread_semaphore_signal(_dispatch_thread_semaphore_t); -void _dispatch_thread_semaphore_dispose(_dispatch_thread_semaphore_t); + +DISPATCH_ALWAYS_INLINE +static inline _dispatch_thread_semaphore_t +_dispatch_get_thread_semaphore(void) +{ + _dispatch_thread_semaphore_t sema = (_dispatch_thread_semaphore_t) + _dispatch_thread_getspecific(dispatch_sema4_key); + if (slowpath(!sema)) { + return _dispatch_thread_semaphore_create(); + } + _dispatch_thread_setspecific(dispatch_sema4_key, NULL); + return sema; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_put_thread_semaphore(_dispatch_thread_semaphore_t sema) +{ + _dispatch_thread_semaphore_t old_sema = (_dispatch_thread_semaphore_t) + _dispatch_thread_getspecific(dispatch_sema4_key); + _dispatch_thread_setspecific(dispatch_sema4_key, (void*)sema); + if (slowpath(old_sema)) { + return _dispatch_thread_semaphore_dispose(old_sema); + } +} #endif diff --git a/src/shims.h b/src/shims.h index 73322bea6..32376eea4 100644 --- a/src/shims.h +++ b/src/shims.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -39,6 +39,21 @@ #define FD_COPY(f, t) (void)(*(t) = *(f)) #endif +#if TARGET_OS_WIN32 +#define bzero(ptr,len) memset((ptr), 0, (len)) +#define snprintf _snprintf + +inline size_t strlcpy(char *dst, const char *src, size_t size) { + int res = strlen(dst) + strlen(src) + 1; + if (size > 0) { + size_t n = size - 1; + strncpy(dst, src, n); + dst[n] = 0; + } + return res; +} +#endif // TARGET_OS_WIN32 + #if !HAVE_NORETURN_BUILTIN_TRAP /* * XXXRW: Work-around for possible clang bug in which __builtin_trap() is not @@ -49,13 +64,17 @@ DISPATCH_NORETURN void __builtin_trap(void); #endif +#if DISPATCH_HW_CONFIG_UP +#define DISPATCH_ATOMIC_UP 1 +#endif + #include "shims/atomic.h" +#include "shims/atomic_sfb.h" #include "shims/tsd.h" #include "shims/hw_config.h" #include "shims/perfmon.h" #include "shims/getprogname.h" -#include "shims/malloc_zone.h" #include "shims/time.h" #ifdef __APPLE__ @@ -65,7 +84,7 @@ void __builtin_trap(void); #define _dispatch_clear_stack(s) do { \ void *a[(s)/sizeof(void*) ? (s)/sizeof(void*) : 1]; \ a[0] = pthread_get_stackaddr_np(pthread_self()); \ - bzero((void*)&a[1], a[0] - (void*)&a[1]); \ + bzero((void*)&a[1], (size_t)(a[0] - (void*)&a[1])); \ } while (0) #else #define _dispatch_clear_stack(s) diff --git a/src/shims/atomic.h b/src/shims/atomic.h index a30c89fe6..2f44775e1 100644 --- a/src/shims/atomic.h +++ b/src/shims/atomic.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -27,129 +27,346 @@ #ifndef __DISPATCH_SHIMS_ATOMIC__ #define __DISPATCH_SHIMS_ATOMIC__ -/* x86 & cortex-a8 have a 64 byte cacheline */ -#define DISPATCH_CACHELINE_SIZE 64 -#define ROUND_UP_TO_CACHELINE_SIZE(x) \ - (((x) + (DISPATCH_CACHELINE_SIZE - 1)) & ~(DISPATCH_CACHELINE_SIZE - 1)) -#define ROUND_UP_TO_VECTOR_SIZE(x) \ - (((x) + 15) & ~15) -#define DISPATCH_CACHELINE_ALIGN \ - __attribute__((__aligned__(DISPATCH_CACHELINE_SIZE))) +// generate error during codegen +#define _dispatch_atomic_unimplemented() \ + ({ __asm__(".err unimplemented"); }) -#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2) +#pragma mark - +#pragma mark memory_order -#define _dispatch_atomic_barrier() __sync_synchronize() -// see comment in dispatch_once.c -#define dispatch_atomic_maximally_synchronizing_barrier() \ - _dispatch_atomic_barrier() -// assume atomic builtins provide barriers -#define dispatch_atomic_barrier() -#define dispatch_atomic_acquire_barrier() -#define dispatch_atomic_release_barrier() -#define dispatch_atomic_store_barrier() +typedef enum _dispatch_atomic_memory_order +{ + _dispatch_atomic_memory_order_relaxed, + _dispatch_atomic_memory_order_consume, + _dispatch_atomic_memory_order_acquire, + _dispatch_atomic_memory_order_release, + _dispatch_atomic_memory_order_acq_rel, + _dispatch_atomic_memory_order_seq_cst, +} _dispatch_atomic_memory_order; + +#if !DISPATCH_ATOMIC_UP + +#define dispatch_atomic_memory_order_relaxed \ + _dispatch_atomic_memory_order_relaxed +#define dispatch_atomic_memory_order_acquire \ + _dispatch_atomic_memory_order_acquire +#define dispatch_atomic_memory_order_release \ + _dispatch_atomic_memory_order_release +#define dispatch_atomic_memory_order_acq_rel \ + _dispatch_atomic_memory_order_acq_rel +#define dispatch_atomic_memory_order_seq_cst \ + _dispatch_atomic_memory_order_seq_cst + +#else // DISPATCH_ATOMIC_UP + +#define dispatch_atomic_memory_order_relaxed \ + _dispatch_atomic_memory_order_relaxed +#define dispatch_atomic_memory_order_acquire \ + _dispatch_atomic_memory_order_relaxed +#define dispatch_atomic_memory_order_release \ + _dispatch_atomic_memory_order_relaxed +#define dispatch_atomic_memory_order_acq_rel \ + _dispatch_atomic_memory_order_relaxed +#define dispatch_atomic_memory_order_seq_cst \ + _dispatch_atomic_memory_order_relaxed + +#endif // DISPATCH_ATOMIC_UP + +#if __has_extension(c_generic_selections) +#define _dispatch_atomic_basetypeof(p) \ + typeof(*_Generic((p), \ + int*: (int*)(p), \ + volatile int*: (int*)(p), \ + unsigned int*: (unsigned int*)(p), \ + volatile unsigned int*: (unsigned int*)(p), \ + long*: (long*)(p), \ + volatile long*: (long*)(p), \ + unsigned long*: (unsigned long*)(p), \ + volatile unsigned long*: (unsigned long*)(p), \ + long long*: (long long*)(p), \ + volatile long long*: (long long*)(p), \ + unsigned long long*: (unsigned long long*)(p), \ + volatile unsigned long long*: (unsigned long long*)(p), \ + default: (void**)(p))) +#endif + +#if __has_extension(c_atomic) && __has_extension(c_generic_selections) +#pragma mark - +#pragma mark c11 + +#define _dispatch_atomic_c11_atomic(p) \ + _Generic((p), \ + int*: (_Atomic(int)*)(p), \ + volatile int*: (volatile _Atomic(int)*)(p), \ + unsigned int*: (_Atomic(unsigned int)*)(p), \ + volatile unsigned int*: (volatile _Atomic(unsigned int)*)(p), \ + long*: (_Atomic(long)*)(p), \ + volatile long*: (volatile _Atomic(long)*)(p), \ + unsigned long*: (_Atomic(unsigned long)*)(p), \ + volatile unsigned long*: (volatile _Atomic(unsigned long)*)(p), \ + long long*: (_Atomic(long long)*)(p), \ + volatile long long*: (volatile _Atomic(long long)*)(p), \ + unsigned long long*: (_Atomic(unsigned long long)*)(p), \ + volatile unsigned long long*: \ + (volatile _Atomic(unsigned long long)*)(p), \ + default: (volatile _Atomic(void*)*)(p)) + +#define _dispatch_atomic_barrier(m) \ + ({ __c11_atomic_thread_fence(dispatch_atomic_memory_order_##m); }) +#define dispatch_atomic_load(p, m) \ + ({ _dispatch_atomic_basetypeof(p) _r = \ + __c11_atomic_load(_dispatch_atomic_c11_atomic(p), \ + dispatch_atomic_memory_order_##m); (typeof(*(p)))_r; }) +#define dispatch_atomic_store(p, v, m) \ + ({ _dispatch_atomic_basetypeof(p) _v = (v); \ + __c11_atomic_store(_dispatch_atomic_c11_atomic(p), _v, \ + dispatch_atomic_memory_order_##m); }) +#define dispatch_atomic_xchg(p, v, m) \ + ({ _dispatch_atomic_basetypeof(p) _v = (v), _r = \ + __c11_atomic_exchange(_dispatch_atomic_c11_atomic(p), _v, \ + dispatch_atomic_memory_order_##m); (typeof(*(p)))_r; }) +#define dispatch_atomic_cmpxchg(p, e, v, m) \ + ({ _dispatch_atomic_basetypeof(p) _v = (v), _r = (e); \ + __c11_atomic_compare_exchange_strong(_dispatch_atomic_c11_atomic(p), \ + &_r, _v, dispatch_atomic_memory_order_##m, \ + dispatch_atomic_memory_order_relaxed); }) +#define dispatch_atomic_cmpxchgv(p, e, v, g, m) \ + ({ _dispatch_atomic_basetypeof(p) _v = (v), _r = (e); _Bool _b = \ + __c11_atomic_compare_exchange_strong(_dispatch_atomic_c11_atomic(p), \ + &_r, _v, dispatch_atomic_memory_order_##m, \ + dispatch_atomic_memory_order_relaxed); *(g) = (typeof(*(p)))_r; _b; }) +#define dispatch_atomic_cmpxchgvw(p, e, v, g, m) \ + ({ _dispatch_atomic_basetypeof(p) _v = (v), _r = (e); _Bool _b = \ + __c11_atomic_compare_exchange_weak(_dispatch_atomic_c11_atomic(p), \ + &_r, _v, dispatch_atomic_memory_order_##m, \ + dispatch_atomic_memory_order_relaxed); *(g) = (typeof(*(p)))_r; _b; }) +#define _dispatch_atomic_c11_op(p, v, m, o, op) \ + ({ _dispatch_atomic_basetypeof(p) _v = (v), _r = \ + __c11_atomic_fetch_##o(_dispatch_atomic_c11_atomic(p), _v, \ + dispatch_atomic_memory_order_##m); (typeof(*(p)))(_r op _v); }) +#define _dispatch_atomic_c11_op_orig(p, v, m, o, op) \ + ({ _dispatch_atomic_basetypeof(p) _v = (v), _r = \ + __c11_atomic_fetch_##o(_dispatch_atomic_c11_atomic(p), _v, \ + dispatch_atomic_memory_order_##m); (typeof(*(p)))_r; }) + +#define dispatch_atomic_add(p, v, m) \ + _dispatch_atomic_c11_op((p), (v), m, add, +) +#define dispatch_atomic_add_orig(p, v, m) \ + _dispatch_atomic_c11_op_orig((p), (v), m, add, +) +#define dispatch_atomic_sub(p, v, m) \ + _dispatch_atomic_c11_op((p), (v), m, sub, -) +#define dispatch_atomic_sub_orig(p, v, m) \ + _dispatch_atomic_c11_op_orig((p), (v), m, sub, -) +#define dispatch_atomic_and(p, v, m) \ + _dispatch_atomic_c11_op((p), (v), m, and, &) +#define dispatch_atomic_and_orig(p, v, m) \ + _dispatch_atomic_c11_op_orig((p), (v), m, and, &) +#define dispatch_atomic_or(p, v, m) \ + _dispatch_atomic_c11_op((p), (v), m, or, |) +#define dispatch_atomic_or_orig(p, v, m) \ + _dispatch_atomic_c11_op_orig((p), (v), m, or, |) +#define dispatch_atomic_xor(p, v, m) \ + _dispatch_atomic_c11_op((p), (v), m, xor, ^) +#define dispatch_atomic_xor_orig(p, v, m) \ + _dispatch_atomic_c11_op_orig((p), (v), m, xor, ^) -#define _dispatch_hardware_pause() __asm__("") -#define _dispatch_debugger() __asm__("trap") +#elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2) +#pragma mark - +#pragma mark gnu99 -#define dispatch_atomic_cmpxchg(p, e, n) \ - __sync_bool_compare_and_swap((p), (e), (n)) +#define _dispatch_atomic_full_barrier() \ + __sync_synchronize() +#define _dispatch_atomic_barrier(m) \ + ({ switch(dispatch_atomic_memory_order_##m) { \ + case _dispatch_atomic_memory_order_relaxed: \ + break; \ + default: \ + _dispatch_atomic_full_barrier(); break; \ + } }) +// Only emulate store seq_cst -> load seq_cst +#define dispatch_atomic_load(p, m) \ + ({ switch(dispatch_atomic_memory_order_##m) { \ + case _dispatch_atomic_memory_order_relaxed: \ + case _dispatch_atomic_memory_order_seq_cst: \ + break; \ + default: \ + _dispatch_atomic_unimplemented(); break; \ + }; *(p); }) +#define dispatch_atomic_store(p, v, m) \ + ({ switch(dispatch_atomic_memory_order_##m) { \ + case _dispatch_atomic_memory_order_release: \ + _dispatch_atomic_barrier(m); /* fallthrough */ \ + case _dispatch_atomic_memory_order_relaxed: \ + case _dispatch_atomic_memory_order_seq_cst: \ + *(p) = (v); break; \ + default:\ + _dispatch_atomic_unimplemented(); break; \ + } switch(dispatch_atomic_memory_order_##m) { \ + case _dispatch_atomic_memory_order_seq_cst: \ + _dispatch_atomic_barrier(m); break; \ + default: \ + break; \ + } }) #if __has_builtin(__sync_swap) -#define dispatch_atomic_xchg(p, n) \ - ((typeof(*(p)))__sync_swap((p), (n))) +#define dispatch_atomic_xchg(p, v, m) \ + ((typeof(*(p)))__sync_swap((p), (v))) #else -#define dispatch_atomic_xchg(p, n) \ - ((typeof(*(p)))__sync_lock_test_and_set((p), (n))) +#define dispatch_atomic_xchg(p, v, m) \ + ((typeof(*(p)))__sync_lock_test_and_set((p), (v))) +#endif +#define dispatch_atomic_cmpxchg(p, e, v, m) \ + __sync_bool_compare_and_swap((p), (e), (v)) +#define dispatch_atomic_cmpxchgv(p, e, v, g, m) \ + ({ typeof(*(g)) _e = (e), _r = \ + __sync_val_compare_and_swap((p), _e, (v)); \ + bool _b = (_e == _r); *(g) = _r; _b; }) +#define dispatch_atomic_cmpxchgvw(p, e, v, g, m) \ + dispatch_atomic_cmpxchgv((p), (e), (v), (g), m) + +#define dispatch_atomic_add(p, v, m) \ + __sync_add_and_fetch((p), (v)) +#define dispatch_atomic_add_orig(p, v, m) \ + __sync_fetch_and_add((p), (v)) +#define dispatch_atomic_sub(p, v, m) \ + __sync_sub_and_fetch((p), (v)) +#define dispatch_atomic_sub_orig(p, v, m) \ + __sync_fetch_and_sub((p), (v)) +#define dispatch_atomic_and(p, v, m) \ + __sync_and_and_fetch((p), (v)) +#define dispatch_atomic_and_orig(p, v, m) \ + __sync_fetch_and_and((p), (v)) +#define dispatch_atomic_or(p, v, m) \ + __sync_or_and_fetch((p), (v)) +#define dispatch_atomic_or_orig(p, v, m) \ + __sync_fetch_and_or((p), (v)) +#define dispatch_atomic_xor(p, v, m) \ + __sync_xor_and_fetch((p), (v)) +#define dispatch_atomic_xor_orig(p, v, m) \ + __sync_fetch_and_xor((p), (v)) + +#if defined(__x86_64__) || defined(__i386__) +// GCC emits nothing for __sync_synchronize() on x86_64 & i386 +#undef _dispatch_atomic_full_barrier +#define _dispatch_atomic_full_barrier() \ + ({ __asm__ __volatile__( \ + "mfence" \ + : : : "memory"); }) +// xchg is faster than store + mfence +#undef dispatch_atomic_store +#define dispatch_atomic_store(p, v, m) \ + ({ switch(dispatch_atomic_memory_order_##m) { \ + case _dispatch_atomic_memory_order_relaxed: \ + case _dispatch_atomic_memory_order_release: \ + *(p) = (v); break; \ + case _dispatch_atomic_memory_order_seq_cst: \ + (void)dispatch_atomic_xchg((p), (v), m); break; \ + default:\ + _dispatch_atomic_unimplemented(); break; \ + } }) #endif -#define dispatch_atomic_add(p, v) __sync_add_and_fetch((p), (v)) -#define dispatch_atomic_sub(p, v) __sync_sub_and_fetch((p), (v)) -#define dispatch_atomic_or(p, v) __sync_fetch_and_or((p), (v)) -#define dispatch_atomic_and(p, v) __sync_fetch_and_and((p), (v)) - -#define dispatch_atomic_inc(p) dispatch_atomic_add((p), 1) -#define dispatch_atomic_dec(p) dispatch_atomic_sub((p), 1) -// really just a low level abort() -#define _dispatch_hardware_crash() __builtin_trap() - -#define dispatch_atomic_cmpxchg2o(p, f, e, n) \ - dispatch_atomic_cmpxchg(&(p)->f, (e), (n)) -#define dispatch_atomic_xchg2o(p, f, n) \ - dispatch_atomic_xchg(&(p)->f, (n)) -#define dispatch_atomic_add2o(p, f, v) \ - dispatch_atomic_add(&(p)->f, (v)) -#define dispatch_atomic_sub2o(p, f, v) \ - dispatch_atomic_sub(&(p)->f, (v)) -#define dispatch_atomic_or2o(p, f, v) \ - dispatch_atomic_or(&(p)->f, (v)) -#define dispatch_atomic_and2o(p, f, v) \ - dispatch_atomic_and(&(p)->f, (v)) -#define dispatch_atomic_inc2o(p, f) \ - dispatch_atomic_add2o((p), f, 1) -#define dispatch_atomic_dec2o(p, f) \ - dispatch_atomic_sub2o((p), f, 1) #else #error "Please upgrade to GCC 4.2 or newer." #endif +#pragma mark - +#pragma mark generic + +#define dispatch_hardware_pause() ({ __asm__(""); }) +// assume atomic builtins provide barriers +#define dispatch_atomic_barrier(m) +// see comment in dispatch_once.c +#define dispatch_atomic_maximally_synchronizing_barrier() \ + _dispatch_atomic_barrier(seq_cst) + +#define dispatch_atomic_load2o(p, f, m) \ + dispatch_atomic_load(&(p)->f, m) +#define dispatch_atomic_store2o(p, f, v, m) \ + dispatch_atomic_store(&(p)->f, (v), m) +#define dispatch_atomic_xchg2o(p, f, v, m) \ + dispatch_atomic_xchg(&(p)->f, (v), m) +#define dispatch_atomic_cmpxchg2o(p, f, e, v, m) \ + dispatch_atomic_cmpxchg(&(p)->f, (e), (v), m) +#define dispatch_atomic_cmpxchgv2o(p, f, e, v, g, m) \ + dispatch_atomic_cmpxchgv(&(p)->f, (e), (v), (g), m) +#define dispatch_atomic_cmpxchgvw2o(p, f, e, v, g, m) \ + dispatch_atomic_cmpxchgvw(&(p)->f, (e), (v), (g), m) +#define dispatch_atomic_add2o(p, f, v, m) \ + dispatch_atomic_add(&(p)->f, (v), m) +#define dispatch_atomic_add_orig2o(p, f, v, m) \ + dispatch_atomic_add_orig(&(p)->f, (v), m) +#define dispatch_atomic_sub2o(p, f, v, m) \ + dispatch_atomic_sub(&(p)->f, (v), m) +#define dispatch_atomic_sub_orig2o(p, f, v, m) \ + dispatch_atomic_sub_orig(&(p)->f, (v), m) +#define dispatch_atomic_and2o(p, f, v, m) \ + dispatch_atomic_and(&(p)->f, (v), m) +#define dispatch_atomic_and_orig2o(p, f, v, m) \ + dispatch_atomic_and_orig(&(p)->f, (v), m) +#define dispatch_atomic_or2o(p, f, v, m) \ + dispatch_atomic_or(&(p)->f, (v), m) +#define dispatch_atomic_or_orig2o(p, f, v, m) \ + dispatch_atomic_or_orig(&(p)->f, (v), m) +#define dispatch_atomic_xor2o(p, f, v, m) \ + dispatch_atomic_xor(&(p)->f, (v), m) +#define dispatch_atomic_xor_orig2o(p, f, v, m) \ + dispatch_atomic_xor_orig(&(p)->f, (v), m) + +#define dispatch_atomic_inc(p, m) \ + dispatch_atomic_add((p), 1, m) +#define dispatch_atomic_inc_orig(p, m) \ + dispatch_atomic_add_orig((p), 1, m) +#define dispatch_atomic_inc2o(p, f, m) \ + dispatch_atomic_add2o(p, f, 1, m) +#define dispatch_atomic_inc_orig2o(p, f, m) \ + dispatch_atomic_add_orig2o(p, f, 1, m) +#define dispatch_atomic_dec(p, m) \ + dispatch_atomic_sub((p), 1, m) +#define dispatch_atomic_dec_orig(p, m) \ + dispatch_atomic_sub_orig((p), 1, m) +#define dispatch_atomic_dec2o(p, f, m) \ + dispatch_atomic_sub2o(p, f, 1, m) +#define dispatch_atomic_dec_orig2o(p, f, m) \ + dispatch_atomic_sub_orig2o(p, f, 1, m) + +#define dispatch_atomic_tsx_xacq_cmpxchgv(p, e, v, g) \ + dispatch_atomic_cmpxchgv((p), (e), (v), (g), acquire) +#define dispatch_atomic_tsx_xrel_store(p, v) \ + dispatch_atomic_store(p, v, release) +#define dispatch_atomic_tsx_xacq_cmpxchgv2o(p, f, e, v, g) \ + dispatch_atomic_tsx_xacq_cmpxchgv(&(p)->f, (e), (v), (g)) +#define dispatch_atomic_tsx_xrel_store2o(p, f, v) \ + dispatch_atomic_tsx_xrel_store(&(p)->f, (v)) + #if defined(__x86_64__) || defined(__i386__) +#pragma mark - +#pragma mark x86 + +#undef dispatch_hardware_pause +#define dispatch_hardware_pause() ({ __asm__("pause"); }) -// GCC emits nothing for __sync_synchronize() on x86_64 & i386 -#undef _dispatch_atomic_barrier -#define _dispatch_atomic_barrier() \ - __asm__ __volatile__( \ - "mfence" \ - : : : "memory") #undef dispatch_atomic_maximally_synchronizing_barrier #ifdef __LP64__ #define dispatch_atomic_maximally_synchronizing_barrier() \ - do { unsigned long _clbr; __asm__ __volatile__( \ - "cpuid" \ - : "=a" (_clbr) : "0" (0) : "rbx", "rcx", "rdx", "cc", "memory" \ - ); } while(0) + ({ unsigned long _clbr; __asm__ __volatile__( \ + "cpuid" \ + : "=a" (_clbr) : "0" (0) : "rbx", "rcx", "rdx", "cc", "memory"); }) #else #ifdef __llvm__ #define dispatch_atomic_maximally_synchronizing_barrier() \ - do { unsigned long _clbr; __asm__ __volatile__( \ - "cpuid" \ - : "=a" (_clbr) : "0" (0) : "ebx", "ecx", "edx", "cc", "memory" \ - ); } while(0) + ({ unsigned long _clbr; __asm__ __volatile__( \ + "cpuid" \ + : "=a" (_clbr) : "0" (0) : "ebx", "ecx", "edx", "cc", "memory"); }) #else // gcc does not allow inline i386 asm to clobber ebx #define dispatch_atomic_maximally_synchronizing_barrier() \ - do { unsigned long _clbr; __asm__ __volatile__( \ - "pushl %%ebx\n\t" \ - "cpuid\n\t" \ - "popl %%ebx" \ - : "=a" (_clbr) : "0" (0) : "ecx", "edx", "cc", "memory" \ - ); } while(0) + ({ unsigned long _clbr; __asm__ __volatile__( \ + "pushl %%ebx\n\t" \ + "cpuid\n\t" \ + "popl %%ebx" \ + : "=a" (_clbr) : "0" (0) : "ecx", "edx", "cc", "memory"); }) #endif #endif -#undef _dispatch_hardware_pause -#define _dispatch_hardware_pause() __asm__("pause") -#undef _dispatch_debugger -#define _dispatch_debugger() __asm__("int3") -#elif defined(__ppc__) || defined(__ppc64__) - -// GCC emits "sync" for __sync_synchronize() on ppc & ppc64 -#undef _dispatch_atomic_barrier -#ifdef __LP64__ -#define _dispatch_atomic_barrier() \ - __asm__ __volatile__( \ - "isync\n\t" \ - "lwsync" - : : : "memory") -#else -#define _dispatch_atomic_barrier() \ - __asm__ __volatile__( \ - "isync\n\t" \ - "eieio" \ - : : : "memory") -#endif -#undef dispatch_atomic_maximally_synchronizing_barrier -#define dispatch_atomic_maximally_synchronizing_barrier() \ - __asm__ __volatile__( \ - "sync" \ - : : : "memory") #endif diff --git a/src/shims/atomic_sfb.h b/src/shims/atomic_sfb.h new file mode 100644 index 000000000..c5e7be3f2 --- /dev/null +++ b/src/shims/atomic_sfb.h @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2012-2013 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_SHIMS_ATOMIC_SFB__ +#define __DISPATCH_SHIMS_ATOMIC_SFB__ + +#if __clang__ && __clang_major__ < 5 // +#define __builtin_ffs(x) __builtin_ffs((unsigned int)(x)) +#endif + +// Returns UINT_MAX if all the bits in p were already set. +#define dispatch_atomic_set_first_bit(p,m) _dispatch_atomic_set_first_bit(p,m) + +// TODO: rdar://11477843 +DISPATCH_ALWAYS_INLINE +static inline unsigned int +_dispatch_atomic_set_first_bit(volatile uint32_t *p, unsigned int max_index) +{ + unsigned int index; + typeof(*p) b, mask, b_masked; + + for (;;) { + b = *p; + // ffs returns 1 + index, or 0 if none set. + index = (unsigned int)__builtin_ffs((int)~b); + if (slowpath(index == 0)) { + return UINT_MAX; + } + index--; + if (slowpath(index > max_index)) { + return UINT_MAX; + } + mask = ((typeof(b))1) << index; + b_masked = b | mask; + if (__sync_bool_compare_and_swap(p, b, b_masked)) { + return index; + } + } +} + +#if defined(__x86_64__) || defined(__i386__) + +#undef dispatch_atomic_set_first_bit +// TODO: rdar://11477843 uint64_t -> long +DISPATCH_ALWAYS_INLINE +static inline unsigned int +dispatch_atomic_set_first_bit(volatile uint64_t *p, unsigned int max) +{ + typeof(*p) val, bit; + if (max > (sizeof(val) * 8)) { + __asm__ ( + "1: \n\t" + "mov %[_p], %[_val] \n\t" + "not %[_val] \n\t" + "bsf %[_val], %[_bit] \n\t" /* val is 0 => set zf */ + "jz 2f \n\t" + "lock \n\t" + "bts %[_bit], %[_p] \n\t" /* cf = prev bit val */ + "jc 1b \n\t" /* lost race, retry */ + "jmp 3f \n\t" + "2: \n\t" + "mov %[_all_ones], %[_bit]" "\n\t" + "3: \n\t" + : [_p] "=m" (*p), [_val] "=&r" (val), [_bit] "=&r" (bit) + : [_all_ones] "i" ((typeof(bit))UINT_MAX) : "memory", "cc"); + } else { + __asm__ ( + "1: \n\t" + "mov %[_p], %[_val] \n\t" + "not %[_val] \n\t" + "bsf %[_val], %[_bit] \n\t" /* val is 0 => set zf */ + "jz 2f \n\t" + "cmp %[_max], %[_bit] \n\t" + "jg 2f \n\t" + "lock \n\t" + "bts %[_bit], %[_p] \n\t" /* cf = prev bit val */ + "jc 1b \n\t" /* lost race, retry */ + "jmp 3f \n\t" + "2: \n\t" + "mov %[_all_ones], %[_bit]" "\n\t" + "3: \n\t" + : [_p] "=m" (*p), [_val] "=&r" (val), [_bit] "=&r" (bit) + : [_all_ones] "i" ((typeof(bit))UINT_MAX), + [_max] "g" ((typeof(bit))max) : "memory", "cc"); + } + return (unsigned int)bit; +} + +#endif + + +#endif // __DISPATCH_SHIMS_ATOMIC_SFB__ diff --git a/src/shims/hw_config.h b/src/shims/hw_config.h index 2d9975910..ede0d4800 100644 --- a/src/shims/hw_config.h +++ b/src/shims/hw_config.h @@ -37,8 +37,10 @@ #define DISPATCH_SYSCTL_ACTIVE_CPUS "kern.smp.cpus" #endif +#if !TARGET_OS_WIN32 + static inline uint32_t -_dispatch_get_logicalcpu_max() +_dispatch_get_logicalcpu_max(void) { uint32_t val = 1; #if defined(_COMM_PAGE_LOGICAL_CPUS) @@ -60,7 +62,7 @@ _dispatch_get_logicalcpu_max() } static inline uint32_t -_dispatch_get_physicalcpu_max() +_dispatch_get_physicalcpu_max(void) { uint32_t val = 1; #if defined(_COMM_PAGE_PHYSICAL_CPUS) @@ -82,7 +84,7 @@ _dispatch_get_physicalcpu_max() } static inline uint32_t -_dispatch_get_activecpu() +_dispatch_get_activecpu(void) { uint32_t val = 1; #if defined(_COMM_PAGE_ACTIVE_CPUS) @@ -103,4 +105,32 @@ _dispatch_get_activecpu() return val; } +#else // TARGET_OS_WIN32 + +static inline long +_dispatch_count_bits(unsigned long value) +{ + long bits = 0; + while (value) { + bits += (value & 1); + value = value >> 1; + } + return bits; +} + + +static inline uint32_t +_dispatch_get_ncpus(void) +{ + uint32_t val; + DWORD_PTR procmask, sysmask; + if (GetProcessAffinityMask(GetCurrentProcess(), &procmask, &sysmask)) { + val = _dispatch_count_bits(procmask); + } else { + val = 1; + } + return val; +} +#endif // TARGET_OS_WIN32 + #endif /* __DISPATCH_SHIMS_HW_CONFIG__ */ diff --git a/src/shims/malloc_zone.h b/src/shims/malloc_zone.h deleted file mode 100644 index 3975b4feb..000000000 --- a/src/shims/malloc_zone.h +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright (c) 2009 Apple Inc. All rights reserved. - * - * @APPLE_APACHE_LICENSE_HEADER_START@ - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * @APPLE_APACHE_LICENSE_HEADER_END@ - */ - -#ifndef __DISPATCH_SHIMS_MALLOC_ZONE__ -#define __DISPATCH_SHIMS_MALLOC_ZONE__ - -#include - -#include - -/* - * Implement malloc zones as a simple wrapper around malloc(3) on systems - * that don't support them. - */ -#if !HAVE_MALLOC_CREATE_ZONE -typedef void * malloc_zone_t; - -static inline malloc_zone_t * -malloc_create_zone(size_t start_size, unsigned flags) -{ - - return ((void *)(-1)); -} - -static inline void -malloc_destroy_zone(malloc_zone_t *zone) -{ - -} - -static inline malloc_zone_t * -malloc_default_zone(void) -{ - - return ((void *)(-1)); -} - -static inline malloc_zone_t * -malloc_zone_from_ptr(const void *ptr) -{ - - return ((void *)(-1)); -} - -static inline void * -malloc_zone_malloc(malloc_zone_t *zone, size_t size) -{ - - return (malloc(size)); -} - -static inline void * -malloc_zone_calloc(malloc_zone_t *zone, size_t num_items, size_t size) -{ - - return (calloc(num_items, size)); -} - -static inline void * -malloc_zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) -{ - - return (realloc(ptr, size)); -} - -static inline void -malloc_zone_free(malloc_zone_t *zone, void *ptr) -{ - - free(ptr); -} - -static inline void -malloc_set_zone_name(malloc_zone_t *zone, const char *name) -{ - - /* No-op. */ -} -#endif - -#endif /* __DISPATCH_SHIMS_MALLOC_ZONE__ */ diff --git a/src/shims/perfmon.h b/src/shims/perfmon.h index bf5eb2808..f73900689 100644 --- a/src/shims/perfmon.h +++ b/src/shims/perfmon.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2009 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -32,30 +32,30 @@ #if defined (USE_APPLE_TSD_OPTIMIZATIONS) && defined(SIMULATE_5491082) && \ (defined(__i386__) || defined(__x86_64__)) #ifdef __LP64__ -#define _dispatch_workitem_inc() asm("incq %%gs:%0" : "+m" \ +#define _dispatch_perfmon_workitem_inc() asm("incq %%gs:%0" : "+m" \ (*(void **)(dispatch_bcounter_key * sizeof(void *) + \ _PTHREAD_TSD_OFFSET)) :: "cc") -#define _dispatch_workitem_dec() asm("decq %%gs:%0" : "+m" \ +#define _dispatch_perfmon_workitem_dec() asm("decq %%gs:%0" : "+m" \ (*(void **)(dispatch_bcounter_key * sizeof(void *) + \ _PTHREAD_TSD_OFFSET)) :: "cc") #else -#define _dispatch_workitem_inc() asm("incl %%gs:%0" : "+m" \ +#define _dispatch_perfmon_workitem_inc() asm("incl %%gs:%0" : "+m" \ (*(void **)(dispatch_bcounter_key * sizeof(void *) + \ _PTHREAD_TSD_OFFSET)) :: "cc") -#define _dispatch_workitem_dec() asm("decl %%gs:%0" : "+m" \ +#define _dispatch_perfmon_workitem_dec() asm("decl %%gs:%0" : "+m" \ (*(void **)(dispatch_bcounter_key * sizeof(void *) + \ _PTHREAD_TSD_OFFSET)) :: "cc") #endif #else /* !USE_APPLE_TSD_OPTIMIZATIONS */ static inline void -_dispatch_workitem_inc(void) +_dispatch_perfmon_workitem_inc(void) { unsigned long cnt; cnt = (unsigned long)_dispatch_thread_getspecific(dispatch_bcounter_key); _dispatch_thread_setspecific(dispatch_bcounter_key, (void *)++cnt); } static inline void -_dispatch_workitem_dec(void) +_dispatch_perfmon_workitem_dec(void) { unsigned long cnt; cnt = (unsigned long)_dispatch_thread_getspecific(dispatch_bcounter_key); @@ -89,9 +89,17 @@ flsll(uint64_t val) } #endif +#define _dispatch_perfmon_start() \ + uint64_t start = _dispatch_absolute_time() +#define _dispatch_perfmon_end() \ + _dispatch_queue_merge_stats(start) #else -#define _dispatch_workitem_inc() -#define _dispatch_workitem_dec() + +#define _dispatch_perfmon_workitem_inc() +#define _dispatch_perfmon_workitem_dec() +#define _dispatch_perfmon_start() +#define _dispatch_perfmon_end() + #endif // DISPATCH_PERF_MON #endif diff --git a/src/shims/time.h b/src/shims/time.h index 9ae9160ca..b30b9893c 100644 --- a/src/shims/time.h +++ b/src/shims/time.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -31,18 +31,54 @@ #error "Please #include instead of this file directly." #endif +DISPATCH_ALWAYS_INLINE_NDEBUG +static inline void +_dispatch_contention_usleep(unsigned int us) +{ +#if HAVE_MACH +#if defined(SWITCH_OPTION_DISPATCH_CONTENTION) && !(TARGET_IPHONE_SIMULATOR && \ + IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090) + thread_switch(MACH_PORT_NULL, SWITCH_OPTION_DISPATCH_CONTENTION, us); +#else + thread_switch(MACH_PORT_NULL, SWITCH_OPTION_WAIT, ((us-1)/1000)+1); +#endif +#else + usleep(us); +#endif +} + +#if TARGET_OS_WIN32 +static inline unsigned int +sleep(unsigned int seconds) +{ + Sleep(seconds * 1000); // milliseconds + return 0; +} +#endif + uint64_t _dispatch_get_nanoseconds(void); #if defined(__i386__) || defined(__x86_64__) || !HAVE_MACH_ABSOLUTE_TIME // x86 currently implements mach time in nanoseconds // this is NOT likely to change -#define _dispatch_time_mach2nano(x) ({x;}) -#define _dispatch_time_nano2mach(x) ({x;}) +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_dispatch_time_mach2nano(uint64_t machtime) +{ + return machtime; +} + +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_dispatch_time_nano2mach(uint64_t nsec) +{ + return nsec; +} #else typedef struct _dispatch_host_time_data_s { + dispatch_once_t pred; long double frac; bool ratio_1_to_1; - dispatch_once_t pred; } _dispatch_host_time_data_s; extern _dispatch_host_time_data_s _dispatch_host_time_data; void _dispatch_get_host_time_init(void *context); @@ -53,39 +89,48 @@ _dispatch_time_mach2nano(uint64_t machtime) _dispatch_host_time_data_s *const data = &_dispatch_host_time_data; dispatch_once_f(&data->pred, NULL, _dispatch_get_host_time_init); - return machtime * data->frac; + if (!machtime || slowpath(data->ratio_1_to_1)) { + return machtime; + } + if (machtime >= INT64_MAX) { + return INT64_MAX; + } + long double big_tmp = ((long double)machtime * data->frac) + .5; + if (slowpath(big_tmp >= INT64_MAX)) { + return INT64_MAX; + } + return (uint64_t)big_tmp; } -static inline int64_t -_dispatch_time_nano2mach(int64_t nsec) +static inline uint64_t +_dispatch_time_nano2mach(uint64_t nsec) { _dispatch_host_time_data_s *const data = &_dispatch_host_time_data; dispatch_once_f(&data->pred, NULL, _dispatch_get_host_time_init); - if (slowpath(_dispatch_host_time_data.ratio_1_to_1)) { + if (!nsec || slowpath(data->ratio_1_to_1)) { return nsec; } - - long double big_tmp = nsec; - - // Divide by tbi.numer/tbi.denom to convert nsec to Mach absolute time - big_tmp /= data->frac; - - // Clamp to a 64bit signed int - if (slowpath(big_tmp > INT64_MAX)) { + if (nsec >= INT64_MAX) { return INT64_MAX; } - if (slowpath(big_tmp < INT64_MIN)) { - return INT64_MIN; + long double big_tmp = ((long double)nsec / data->frac) + .5; + if (slowpath(big_tmp >= INT64_MAX)) { + return INT64_MAX; } - return big_tmp; + return (uint64_t)big_tmp; } #endif static inline uint64_t _dispatch_absolute_time(void) { -#if !HAVE_MACH_ABSOLUTE_TIME +#if HAVE_MACH_ABSOLUTE_TIME + return mach_absolute_time(); +#elif TARGET_OS_WIN32 + LARGE_INTEGER now; + return QueryPerformanceCounter(&now) ? now.QuadPart : 0; +#else struct timespec ts; int ret; @@ -100,9 +145,8 @@ _dispatch_absolute_time(void) /* XXXRW: Some kind of overflow detection needed? */ return (ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec); -#else - return mach_absolute_time(); -#endif +#endif // HAVE_MACH_ABSOLUTE_TIME } -#endif + +#endif // __DISPATCH_SHIMS_TIME__ diff --git a/src/shims/tsd.h b/src/shims/tsd.h index f300d64b3..2a0ab2290 100644 --- a/src/shims/tsd.h +++ b/src/shims/tsd.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -40,11 +40,19 @@ #if DISPATCH_USE_DIRECT_TSD static const unsigned long dispatch_queue_key = __PTK_LIBDISPATCH_KEY0; +#if DISPATCH_USE_OS_SEMAPHORE_CACHE +static const unsigned long dispatch_sema4_key = __TSD_SEMAPHORE_CACHE; +#else static const unsigned long dispatch_sema4_key = __PTK_LIBDISPATCH_KEY1; +#endif static const unsigned long dispatch_cache_key = __PTK_LIBDISPATCH_KEY2; static const unsigned long dispatch_io_key = __PTK_LIBDISPATCH_KEY3; static const unsigned long dispatch_apply_key = __PTK_LIBDISPATCH_KEY4; +#if DISPATCH_INTROSPECTION +static const unsigned long dispatch_introspection_key = __PTK_LIBDISPATCH_KEY5; +#elif DISPATCH_PERF_MON static const unsigned long dispatch_bcounter_key = __PTK_LIBDISPATCH_KEY5; +#endif DISPATCH_TSD_INLINE static inline void @@ -54,11 +62,20 @@ _dispatch_thread_key_create(const unsigned long *k, void (*d)(void *)) } #else extern pthread_key_t dispatch_queue_key; +#if DISPATCH_USE_OS_SEMAPHORE_CACHE +#error "Invalid DISPATCH_USE_OS_SEMAPHORE_CACHE configuration" +#else extern pthread_key_t dispatch_sema4_key; +#endif extern pthread_key_t dispatch_cache_key; extern pthread_key_t dispatch_io_key; extern pthread_key_t dispatch_apply_key; +#if DISPATCH_INTROSPECTION +extern pthread_key_t dispatch_introspection_key; +#elif DISPATCH_PERF_MON extern pthread_key_t dispatch_bcounter_key; +#endif + DISPATCH_TSD_INLINE static inline void @@ -96,8 +113,18 @@ _dispatch_thread_getspecific(pthread_key_t k) } #endif // DISPATCH_USE_TSD_BASE -#define _dispatch_thread_self (uintptr_t)pthread_self - -#undef DISPATCH_TSD_INLINE - +#if TARGET_OS_WIN32 +#define _dispatch_thread_self() ((uintptr_t)GetCurrentThreadId()) +#else +#if DISPATCH_USE_DIRECT_TSD +#define _dispatch_thread_self() ((uintptr_t)_dispatch_thread_getspecific( \ + _PTHREAD_TSD_SLOT_PTHREAD_SELF)) +#else +#define _dispatch_thread_self() ((uintptr_t)pthread_self()) #endif +#endif + +DISPATCH_TSD_INLINE DISPATCH_CONST +static inline unsigned int +_dispatch_cpu_number(void) +{ diff --git a/src/source.c b/src/source.c index 2b0a9a2a7..067c5baf9 100644 --- a/src/source.c +++ b/src/source.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -26,22 +26,50 @@ #include static void _dispatch_source_merge_kevent(dispatch_source_t ds, - const struct kevent *ke); -static void _dispatch_kevent_register(dispatch_source_t ds); -static void _dispatch_kevent_unregister(dispatch_source_t ds); + const struct kevent64_s *ke); +static bool _dispatch_kevent_register(dispatch_kevent_t *dkp, uint32_t *flgp); +static void _dispatch_kevent_unregister(dispatch_kevent_t dk, uint32_t flg); static bool _dispatch_kevent_resume(dispatch_kevent_t dk, uint32_t new_flags, uint32_t del_flags); -static inline void _dispatch_source_timer_init(void); -static void _dispatch_timer_list_update(dispatch_source_t ds); +static void _dispatch_kevent_drain(struct kevent64_s *ke); +static void _dispatch_kevent_merge(struct kevent64_s *ke); +static void _dispatch_timers_kevent(struct kevent64_s *ke); +static void _dispatch_timers_unregister(dispatch_source_t ds, + dispatch_kevent_t dk); +static void _dispatch_timers_update(dispatch_source_t ds); +static void _dispatch_timer_aggregates_check(void); +static void _dispatch_timer_aggregates_register(dispatch_source_t ds); +static void _dispatch_timer_aggregates_update(dispatch_source_t ds, + unsigned int tidx); +static void _dispatch_timer_aggregates_unregister(dispatch_source_t ds, + unsigned int tidx); static inline unsigned long _dispatch_source_timer_data( dispatch_source_refs_t dr, unsigned long prev); +static long _dispatch_kq_update(const struct kevent64_s *); +static void _dispatch_memorystatus_init(void); #if HAVE_MACH +static void _dispatch_mach_host_calendar_change_register(void); +static void _dispatch_mach_recv_msg_buf_init(void); static kern_return_t _dispatch_kevent_machport_resume(dispatch_kevent_t dk, uint32_t new_flags, uint32_t del_flags); -static void _dispatch_drain_mach_messages(struct kevent *ke); +static kern_return_t _dispatch_kevent_mach_notify_resume(dispatch_kevent_t dk, + uint32_t new_flags, uint32_t del_flags); +static inline void _dispatch_kevent_mach_portset(struct kevent64_s *ke); +#else +static inline void _dispatch_mach_host_calendar_change_register(void) {} +static inline void _dispatch_mach_recv_msg_buf_init(void) {} #endif +static const char * _evfiltstr(short filt); #if DISPATCH_DEBUG +static void _dispatch_kevent_debug(struct kevent64_s* kev, const char* str); static void _dispatch_kevent_debugger(void *context); +#define DISPATCH_ASSERT_ON_MANAGER_QUEUE() \ + dispatch_assert(_dispatch_queue_get_current() == &_dispatch_mgr_q) +#else +static inline void +_dispatch_kevent_debug(struct kevent64_s* kev DISPATCH_UNUSED, + const char* str DISPATCH_UNUSED) {} +#define DISPATCH_ASSERT_ON_MANAGER_QUEUE() #endif #pragma mark - @@ -53,9 +81,9 @@ dispatch_source_create(dispatch_source_type_t type, unsigned long mask, dispatch_queue_t q) { - const struct kevent *proto_kev = &type->ke; - dispatch_source_t ds = NULL; - dispatch_kevent_t dk = NULL; + const struct kevent64_s *proto_kev = &type->ke; + dispatch_source_t ds; + dispatch_kevent_t dk; // input validation if (type == NULL || (mask & ~type->mask)) { @@ -71,44 +99,49 @@ dispatch_source_create(dispatch_source_type_t type, case EVFILT_FS: #if DISPATCH_USE_VM_PRESSURE case EVFILT_VM: +#endif +#if DISPATCH_USE_MEMORYSTATUS + case EVFILT_MEMORYSTATUS: #endif case DISPATCH_EVFILT_CUSTOM_ADD: case DISPATCH_EVFILT_CUSTOM_OR: - case DISPATCH_EVFILT_TIMER: if (handle) { return NULL; } break; + case DISPATCH_EVFILT_TIMER: + if (!!handle ^ !!type->ke.ident) { + return NULL; + } + break; default: break; } - dk = calloc(1ul, sizeof(struct dispatch_kevent_s)); - dk->dk_kevent = *proto_kev; - dk->dk_kevent.ident = handle; - dk->dk_kevent.flags |= EV_ADD|EV_ENABLE; - dk->dk_kevent.fflags |= (uint32_t)mask; - dk->dk_kevent.udata = dk; - TAILQ_INIT(&dk->dk_sources); - ds = _dispatch_alloc(DISPATCH_VTABLE(source), sizeof(struct dispatch_source_s)); // Initialize as a queue first, then override some settings below. _dispatch_queue_init((dispatch_queue_t)ds); - strlcpy(ds->dq_label, "source", sizeof(ds->dq_label)); + ds->dq_label = "source"; - // Dispatch Object - ds->do_ref_cnt++; // the reference the manger queue holds + ds->do_ref_cnt++; // the reference the manager queue holds ds->do_ref_cnt++; // since source is created suspended ds->do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_INTERVAL; // The initial target queue is the manager queue, in order to get // the source installed. ds->do_targetq = &_dispatch_mgr_q; - // Dispatch Source - ds->ds_ident_hack = dk->dk_kevent.ident; + dk = _dispatch_calloc(1ul, sizeof(struct dispatch_kevent_s)); + dk->dk_kevent = *proto_kev; + dk->dk_kevent.ident = handle; + dk->dk_kevent.flags |= EV_ADD|EV_ENABLE; + dk->dk_kevent.fflags |= (uint32_t)mask; + dk->dk_kevent.udata = (uintptr_t)dk; + TAILQ_INIT(&dk->dk_sources); + ds->ds_dkev = dk; ds->ds_pending_data_mask = dk->dk_kevent.fflags; + ds->ds_ident_hack = (uintptr_t)dk->dk_kevent.ident; if ((EV_DISPATCH|EV_ONESHOT) & proto_kev->flags) { ds->ds_is_level = true; ds->ds_needs_rearm = true; @@ -116,38 +149,30 @@ dispatch_source_create(dispatch_source_type_t type, // we cheat and use EV_CLEAR to mean a "flag thingy" ds->ds_is_adder = true; } - // Some sources require special processing if (type->init != NULL) { type->init(ds, type, handle, mask, q); } + dispatch_assert(!(ds->ds_is_level && ds->ds_is_adder)); + if (fastpath(!ds->ds_refs)) { - ds->ds_refs = calloc(1ul, sizeof(struct dispatch_source_refs_s)); - if (slowpath(!ds->ds_refs)) { - goto out_bad; - } + ds->ds_refs = _dispatch_calloc(1ul, + sizeof(struct dispatch_source_refs_s)); } ds->ds_refs->dr_source_wref = _dispatch_ptr2wref(ds); - dispatch_assert(!(ds->ds_is_level && ds->ds_is_adder)); // First item on the queue sets the user-specified target queue dispatch_set_target_queue(ds, q); -#if DISPATCH_DEBUG - dispatch_debug(ds, "%s", __func__); -#endif + _dispatch_object_debug(ds, "%s", __func__); return ds; - -out_bad: - free(ds); - free(dk); - return NULL; } void _dispatch_source_dispose(dispatch_source_t ds) { + _dispatch_object_debug(ds, "%s", __func__); free(ds->ds_refs); - _dispatch_queue_dispose((dispatch_queue_t)ds); + _dispatch_queue_destroy(ds); } void @@ -159,16 +184,14 @@ _dispatch_source_xref_dispose(dispatch_source_t ds) void dispatch_source_cancel(dispatch_source_t ds) { -#if DISPATCH_DEBUG - dispatch_debug(ds, "%s", __func__); -#endif + _dispatch_object_debug(ds, "%s", __func__); // Right after we set the cancel flag, someone else // could potentially invoke the source, do the cancelation, // unregister the source, and deallocate it. We would // need to therefore retain/release before setting the bit _dispatch_retain(ds); - (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_CANCELED); + (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_CANCELED, relaxed); _dispatch_wakeup(ds); _dispatch_release(ds); } @@ -189,7 +212,7 @@ dispatch_source_get_mask(dispatch_source_t ds) uintptr_t dispatch_source_get_handle(dispatch_source_t ds) { - return (int)ds->ds_ident_hack; + return (unsigned int)ds->ds_ident_hack; } unsigned long @@ -201,9 +224,9 @@ dispatch_source_get_data(dispatch_source_t ds) void dispatch_source_merge_data(dispatch_source_t ds, unsigned long val) { - struct kevent kev = { + struct kevent64_s kev = { .fflags = (typeof(kev.fflags))val, - .data = val, + .data = (typeof(kev.data))val, }; dispatch_assert( @@ -222,8 +245,6 @@ dispatch_source_merge_data(dispatch_source_t ds, unsigned long val) static void _dispatch_source_set_event_handler2(void *context) { - struct Block_layout *bl = context; - dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current(); dispatch_assert(dx_type(ds) == DISPATCH_SOURCE_KEVENT_TYPE); dispatch_source_refs_t dr = ds->ds_refs; @@ -231,8 +252,8 @@ _dispatch_source_set_event_handler2(void *context) if (ds->ds_handler_is_block && dr->ds_handler_ctxt) { Block_release(dr->ds_handler_ctxt); } - dr->ds_handler_func = bl ? (void *)bl->invoke : NULL; - dr->ds_handler_ctxt = bl; + dr->ds_handler_func = context ? _dispatch_Block_invoke(context) : NULL; + dr->ds_handler_ctxt = context; ds->ds_handler_is_block = true; } @@ -241,7 +262,7 @@ dispatch_source_set_event_handler(dispatch_source_t ds, dispatch_block_t handler) { handler = _dispatch_Block_copy(handler); - dispatch_barrier_async_f((dispatch_queue_t)ds, handler, + _dispatch_barrier_trysync_f((dispatch_queue_t)ds, handler, _dispatch_source_set_event_handler2); } #endif /* __BLOCKS__ */ @@ -267,7 +288,7 @@ void dispatch_source_set_event_handler_f(dispatch_source_t ds, dispatch_function_t handler) { - dispatch_barrier_async_f((dispatch_queue_t)ds, handler, + _dispatch_barrier_trysync_f((dispatch_queue_t)ds, handler, _dispatch_source_set_event_handler_f); } @@ -293,7 +314,7 @@ dispatch_source_set_cancel_handler(dispatch_source_t ds, dispatch_block_t handler) { handler = _dispatch_Block_copy(handler); - dispatch_barrier_async_f((dispatch_queue_t)ds, handler, + _dispatch_barrier_trysync_f((dispatch_queue_t)ds, handler, _dispatch_source_set_cancel_handler2); } #endif /* __BLOCKS__ */ @@ -318,7 +339,7 @@ void dispatch_source_set_cancel_handler_f(dispatch_source_t ds, dispatch_function_t handler) { - dispatch_barrier_async_f((dispatch_queue_t)ds, handler, + _dispatch_barrier_trysync_f((dispatch_queue_t)ds, handler, _dispatch_source_set_cancel_handler_f); } @@ -342,7 +363,7 @@ dispatch_source_set_registration_handler(dispatch_source_t ds, dispatch_block_t handler) { handler = _dispatch_Block_copy(handler); - dispatch_barrier_async_f((dispatch_queue_t)ds, handler, + _dispatch_barrier_trysync_f((dispatch_queue_t)ds, handler, _dispatch_source_set_registration_handler2); } #endif /* __BLOCKS__ */ @@ -367,7 +388,7 @@ void dispatch_source_set_registration_handler_f(dispatch_source_t ds, dispatch_function_t handler) { - dispatch_barrier_async_f((dispatch_queue_t)ds, handler, + _dispatch_barrier_trysync_f((dispatch_queue_t)ds, handler, _dispatch_source_set_registration_handler_f); } @@ -451,7 +472,7 @@ _dispatch_source_latch_and_call(dispatch_source_t ds) return; } dispatch_source_refs_t dr = ds->ds_refs; - prev = dispatch_atomic_xchg2o(ds, ds_pending_data, 0); + prev = dispatch_atomic_xchg2o(ds, ds_pending_data, 0, relaxed); if (ds->ds_is_level) { ds->ds_data = ~prev; } else if (ds->ds_is_timer && ds_timer(dr).target && prev) { @@ -464,13 +485,33 @@ _dispatch_source_latch_and_call(dispatch_source_t ds) } } +static void +_dispatch_source_kevent_unregister(dispatch_source_t ds) +{ + _dispatch_object_debug(ds, "%s", __func__); + dispatch_kevent_t dk = ds->ds_dkev; + ds->ds_dkev = NULL; + switch (dk->dk_kevent.filter) { + case DISPATCH_EVFILT_TIMER: + _dispatch_timers_unregister(ds, dk); + break; + default: + TAILQ_REMOVE(&dk->dk_sources, ds->ds_refs, dr_list); + _dispatch_kevent_unregister(dk, (uint32_t)ds->ds_pending_data_mask); + break; + } + + (void)dispatch_atomic_and2o(ds, ds_atomic_flags, ~DSF_ARMED, relaxed); + ds->ds_needs_rearm = false; // re-arm is pointless and bad now + _dispatch_release(ds); // the retain is done at creation time +} + static void _dispatch_source_kevent_resume(dispatch_source_t ds, uint32_t new_flags) { switch (ds->ds_dkev->dk_kevent.filter) { case DISPATCH_EVFILT_TIMER: - // called on manager queue only - return _dispatch_timer_list_update(ds); + return _dispatch_timers_update(ds); case EVFILT_MACHPORT: if (ds->ds_pending_data_mask & DISPATCH_MACH_RECV_MESSAGE) { new_flags |= DISPATCH_MACH_RECV_MESSAGE; // emulate EV_DISPATCH @@ -478,13 +519,38 @@ _dispatch_source_kevent_resume(dispatch_source_t ds, uint32_t new_flags) break; } if (_dispatch_kevent_resume(ds->ds_dkev, new_flags, 0)) { - _dispatch_kevent_unregister(ds); + _dispatch_source_kevent_unregister(ds); } } -dispatch_queue_t -_dispatch_source_invoke(dispatch_source_t ds) +static void +_dispatch_source_kevent_register(dispatch_source_t ds) +{ + dispatch_assert_zero(ds->ds_is_installed); + switch (ds->ds_dkev->dk_kevent.filter) { + case DISPATCH_EVFILT_TIMER: + return _dispatch_timers_update(ds); + } + uint32_t flags; + bool do_resume = _dispatch_kevent_register(&ds->ds_dkev, &flags); + TAILQ_INSERT_TAIL(&ds->ds_dkev->dk_sources, ds->ds_refs, dr_list); + if (do_resume || ds->ds_needs_rearm) { + _dispatch_source_kevent_resume(ds, flags); + } + (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED, relaxed); + _dispatch_object_debug(ds, "%s", __func__); +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_t +_dispatch_source_invoke2(dispatch_object_t dou, + _dispatch_thread_semaphore_t *sema_ptr DISPATCH_UNUSED) { + dispatch_source_t ds = dou._ds; + if (slowpath(_dispatch_queue_drain(ds))) { + DISPATCH_CLIENT_CRASH("Sync onto source"); + } + // This function performs all source actions. Each action is responsible // for verifying that it takes place on the appropriate queue. If the // current queue is not the correct queue for this action, the correct queue @@ -500,7 +566,8 @@ _dispatch_source_invoke(dispatch_source_t ds) if (dq != &_dispatch_mgr_q) { return &_dispatch_mgr_q; } - _dispatch_kevent_register(ds); + _dispatch_source_kevent_register(ds); + ds->ds_is_installed = true; if (dr->ds_registration_handler) { return ds->do_targetq; } @@ -529,7 +596,7 @@ _dispatch_source_invoke(dispatch_source_t ds) if (dq != &_dispatch_mgr_q) { return &_dispatch_mgr_q; } - _dispatch_kevent_unregister(ds); + _dispatch_source_kevent_unregister(ds); } if (dr->ds_cancel_handler || ds->ds_handler_is_block || ds->ds_registration_is_block) { @@ -555,13 +622,20 @@ _dispatch_source_invoke(dispatch_source_t ds) return &_dispatch_mgr_q; } _dispatch_source_kevent_resume(ds, 0); - (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED); + (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED, relaxed); } return NULL; } -bool +DISPATCH_NOINLINE +void +_dispatch_source_invoke(dispatch_source_t ds) +{ + _dispatch_queue_class_invoke(ds, _dispatch_source_invoke2); +} + +unsigned long _dispatch_source_probe(dispatch_source_t ds) { // This function determines whether the source needs to be invoked. @@ -592,128 +666,69 @@ _dispatch_source_probe(dispatch_source_t ds) // The source needs to be rearmed on the manager queue. return true; } - // Nothing to do. - return false; + return (ds->dq_items_tail != NULL); } -#pragma mark - -#pragma mark dispatch_source_kevent - static void -_dispatch_source_merge_kevent(dispatch_source_t ds, const struct kevent *ke) +_dispatch_source_merge_kevent(dispatch_source_t ds, const struct kevent64_s *ke) { - struct kevent fake; - if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == -1)) { return; } - - // EVFILT_PROC may fail with ESRCH when the process exists but is a zombie - // . As a workaround, we simulate an exit event for - // any EVFILT_PROC with an invalid pid . - if (ke->flags & EV_ERROR) { - if (ke->filter == EVFILT_PROC && ke->data == ESRCH) { - fake = *ke; - fake.flags &= ~EV_ERROR; - fake.fflags = NOTE_EXIT; - fake.data = 0; - ke = &fake; -#if DISPATCH_USE_VM_PRESSURE - } else if (ke->filter == EVFILT_VM && ke->data == ENOTSUP) { - // Memory pressure kevent is not supported on all platforms - // - return; -#endif - } else { - // log the unexpected error - (void)dispatch_assume_zero(ke->data); - return; - } - } - if (ds->ds_is_level) { // ke->data is signed and "negative available data" makes no sense // zero bytes happens when EV_EOF is set // 10A268 does not fail this assert with EVFILT_READ and a 10 GB file dispatch_assert(ke->data >= 0l); - ds->ds_pending_data = ~ke->data; + dispatch_atomic_store2o(ds, ds_pending_data, ~(unsigned long)ke->data, + relaxed); } else if (ds->ds_is_adder) { - (void)dispatch_atomic_add2o(ds, ds_pending_data, ke->data); + (void)dispatch_atomic_add2o(ds, ds_pending_data, + (unsigned long)ke->data, relaxed); } else if (ke->fflags & ds->ds_pending_data_mask) { (void)dispatch_atomic_or2o(ds, ds_pending_data, - ke->fflags & ds->ds_pending_data_mask); + ke->fflags & ds->ds_pending_data_mask, relaxed); } - // EV_DISPATCH and EV_ONESHOT sources are no longer armed after delivery if (ds->ds_needs_rearm) { - (void)dispatch_atomic_and2o(ds, ds_atomic_flags, ~DSF_ARMED); + (void)dispatch_atomic_and2o(ds, ds_atomic_flags, ~DSF_ARMED, relaxed); } _dispatch_wakeup(ds); } -void -_dispatch_source_drain_kevent(struct kevent *ke) -{ - dispatch_kevent_t dk = ke->udata; - dispatch_source_refs_t dri; - -#if DISPATCH_DEBUG - static dispatch_once_t pred; - dispatch_once_f(&pred, NULL, _dispatch_kevent_debugger); -#endif - - dispatch_debug_kevents(ke, 1, __func__); - -#if HAVE_MACH - if (ke->filter == EVFILT_MACHPORT) { - return _dispatch_drain_mach_messages(ke); - } -#endif - dispatch_assert(dk); - - if (ke->flags & EV_ONESHOT) { - dk->dk_kevent.flags |= EV_ONESHOT; - } - - TAILQ_FOREACH(dri, &dk->dk_sources, dr_list) { - _dispatch_source_merge_kevent(_dispatch_source_from_refs(dri), ke); - } -} - #pragma mark - #pragma mark dispatch_kevent_t +#if DISPATCH_USE_GUARDED_FD_CHANGE_FDGUARD +static void _dispatch_kevent_guard(dispatch_kevent_t dk); +static void _dispatch_kevent_unguard(dispatch_kevent_t dk); +#else +static inline void _dispatch_kevent_guard(dispatch_kevent_t dk) { (void)dk; } +static inline void _dispatch_kevent_unguard(dispatch_kevent_t dk) { (void)dk; } +#endif + static struct dispatch_kevent_s _dispatch_kevent_data_or = { .dk_kevent = { .filter = DISPATCH_EVFILT_CUSTOM_OR, .flags = EV_CLEAR, - .udata = &_dispatch_kevent_data_or, }, .dk_sources = TAILQ_HEAD_INITIALIZER(_dispatch_kevent_data_or.dk_sources), }; static struct dispatch_kevent_s _dispatch_kevent_data_add = { .dk_kevent = { .filter = DISPATCH_EVFILT_CUSTOM_ADD, - .udata = &_dispatch_kevent_data_add, }, .dk_sources = TAILQ_HEAD_INITIALIZER(_dispatch_kevent_data_add.dk_sources), }; -#if TARGET_OS_EMBEDDED -#define DSL_HASH_SIZE 64u // must be a power of two -#else -#define DSL_HASH_SIZE 256u // must be a power of two -#endif #define DSL_HASH(x) ((x) & (DSL_HASH_SIZE - 1)) DISPATCH_CACHELINE_ALIGN static TAILQ_HEAD(, dispatch_kevent_s) _dispatch_sources[DSL_HASH_SIZE]; -static dispatch_once_t __dispatch_kevent_init_pred; - static void -_dispatch_kevent_init(void *context DISPATCH_UNUSED) +_dispatch_kevent_init() { unsigned int i; for (i = 0; i < DSL_HASH_SIZE; i++) { @@ -724,24 +739,28 @@ _dispatch_kevent_init(void *context DISPATCH_UNUSED) &_dispatch_kevent_data_or, dk_list); TAILQ_INSERT_TAIL(&_dispatch_sources[0], &_dispatch_kevent_data_add, dk_list); - - _dispatch_source_timer_init(); + _dispatch_kevent_data_or.dk_kevent.udata = + (uintptr_t)&_dispatch_kevent_data_or; + _dispatch_kevent_data_add.dk_kevent.udata = + (uintptr_t)&_dispatch_kevent_data_add; } static inline uintptr_t -_dispatch_kevent_hash(uintptr_t ident, short filter) +_dispatch_kevent_hash(uint64_t ident, short filter) { - uintptr_t value; + uint64_t value; #if HAVE_MACH - value = (filter == EVFILT_MACHPORT ? MACH_PORT_INDEX(ident) : ident); + value = (filter == EVFILT_MACHPORT || + filter == DISPATCH_EVFILT_MACH_NOTIFICATION ? + MACH_PORT_INDEX(ident) : ident); #else value = ident; #endif - return DSL_HASH(value); + return DSL_HASH((uintptr_t)value); } static dispatch_kevent_t -_dispatch_kevent_find(uintptr_t ident, short filter) +_dispatch_kevent_find(uint64_t ident, short filter) { uintptr_t hash = _dispatch_kevent_hash(ident, filter); dispatch_kevent_t dki; @@ -757,57 +776,43 @@ _dispatch_kevent_find(uintptr_t ident, short filter) static void _dispatch_kevent_insert(dispatch_kevent_t dk) { + _dispatch_kevent_guard(dk); uintptr_t hash = _dispatch_kevent_hash(dk->dk_kevent.ident, dk->dk_kevent.filter); - TAILQ_INSERT_TAIL(&_dispatch_sources[hash], dk, dk_list); } // Find existing kevents, and merge any new flags if necessary -static void -_dispatch_kevent_register(dispatch_source_t ds) +static bool +_dispatch_kevent_register(dispatch_kevent_t *dkp, uint32_t *flgp) { - dispatch_kevent_t dk; - typeof(dk->dk_kevent.fflags) new_flags; + dispatch_kevent_t dk, ds_dkev = *dkp; + uint32_t new_flags; bool do_resume = false; - if (ds->ds_is_installed) { - return; - } - ds->ds_is_installed = true; - - dispatch_once_f(&__dispatch_kevent_init_pred, - NULL, _dispatch_kevent_init); - - dk = _dispatch_kevent_find(ds->ds_dkev->dk_kevent.ident, - ds->ds_dkev->dk_kevent.filter); - + dk = _dispatch_kevent_find(ds_dkev->dk_kevent.ident, + ds_dkev->dk_kevent.filter); if (dk) { // If an existing dispatch kevent is found, check to see if new flags // need to be added to the existing kevent - new_flags = ~dk->dk_kevent.fflags & ds->ds_dkev->dk_kevent.fflags; - dk->dk_kevent.fflags |= ds->ds_dkev->dk_kevent.fflags; - free(ds->ds_dkev); - ds->ds_dkev = dk; + new_flags = ~dk->dk_kevent.fflags & ds_dkev->dk_kevent.fflags; + dk->dk_kevent.fflags |= ds_dkev->dk_kevent.fflags; + free(ds_dkev); + *dkp = dk; do_resume = new_flags; } else { - dk = ds->ds_dkev; + dk = ds_dkev; _dispatch_kevent_insert(dk); new_flags = dk->dk_kevent.fflags; do_resume = true; } - - TAILQ_INSERT_TAIL(&dk->dk_sources, ds->ds_refs, dr_list); - // Re-register the kevent with the kernel if new flags were added // by the dispatch kevent if (do_resume) { dk->dk_kevent.flags |= EV_ADD; } - if (do_resume || ds->ds_needs_rearm) { - _dispatch_source_kevent_resume(ds, new_flags); - } - (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED); + *flgp = new_flags; + return do_resume; } static bool @@ -824,6 +829,8 @@ _dispatch_kevent_resume(dispatch_kevent_t dk, uint32_t new_flags, #if HAVE_MACH case EVFILT_MACHPORT: return _dispatch_kevent_machport_resume(dk, new_flags, del_flags); + case DISPATCH_EVFILT_MACH_NOTIFICATION: + return _dispatch_kevent_mach_notify_resume(dk, new_flags, del_flags); #endif case EVFILT_PROC: if (dk->dk_kevent.flags & EV_ONESHOT) { @@ -831,7 +838,7 @@ _dispatch_kevent_resume(dispatch_kevent_t dk, uint32_t new_flags, } // fall through default: - r = _dispatch_update_kq(&dk->dk_kevent); + r = _dispatch_kq_update(&dk->dk_kevent); if (dk->dk_kevent.flags & EV_DISPATCH) { dk->dk_kevent.flags &= ~EV_ADD; } @@ -854,6 +861,9 @@ _dispatch_kevent_dispose(dispatch_kevent_t dk) case EVFILT_MACHPORT: _dispatch_kevent_machport_resume(dk, 0, dk->dk_kevent.fflags); break; + case DISPATCH_EVFILT_MACH_NOTIFICATION: + _dispatch_kevent_mach_notify_resume(dk, 0, dk->dk_kevent.fflags); + break; #endif case EVFILT_PROC: if (dk->dk_kevent.flags & EV_ONESHOT) { @@ -863,7 +873,8 @@ _dispatch_kevent_dispose(dispatch_kevent_t dk) default: if (~dk->dk_kevent.flags & EV_DELETE) { dk->dk_kevent.flags |= EV_DELETE; - _dispatch_update_kq(&dk->dk_kevent); + dk->dk_kevent.flags &= ~(EV_ADD|EV_ENABLE); + _dispatch_kq_update(&dk->dk_kevent); } break; } @@ -871,220 +882,247 @@ _dispatch_kevent_dispose(dispatch_kevent_t dk) hash = _dispatch_kevent_hash(dk->dk_kevent.ident, dk->dk_kevent.filter); TAILQ_REMOVE(&_dispatch_sources[hash], dk, dk_list); + _dispatch_kevent_unguard(dk); free(dk); } static void -_dispatch_kevent_unregister(dispatch_source_t ds) +_dispatch_kevent_unregister(dispatch_kevent_t dk, uint32_t flg) { - dispatch_kevent_t dk = ds->ds_dkev; dispatch_source_refs_t dri; uint32_t del_flags, fflags = 0; - ds->ds_dkev = NULL; - - TAILQ_REMOVE(&dk->dk_sources, ds->ds_refs, dr_list); - if (TAILQ_EMPTY(&dk->dk_sources)) { _dispatch_kevent_dispose(dk); } else { TAILQ_FOREACH(dri, &dk->dk_sources, dr_list) { dispatch_source_t dsi = _dispatch_source_from_refs(dri); - fflags |= (uint32_t)dsi->ds_pending_data_mask; + uint32_t mask = (uint32_t)dsi->ds_pending_data_mask; + fflags |= mask; } - del_flags = (uint32_t)ds->ds_pending_data_mask & ~fflags; + del_flags = flg & ~fflags; if (del_flags) { dk->dk_kevent.flags |= EV_ADD; dk->dk_kevent.fflags = fflags; _dispatch_kevent_resume(dk, 0, del_flags); } } - - (void)dispatch_atomic_and2o(ds, ds_atomic_flags, ~DSF_ARMED); - ds->ds_needs_rearm = false; // re-arm is pointless and bad now - _dispatch_release(ds); // the retain is done at creation time -} - -#pragma mark - -#pragma mark dispatch_timer - -DISPATCH_CACHELINE_ALIGN -static struct dispatch_kevent_s _dispatch_kevent_timer[] = { - [DISPATCH_TIMER_INDEX_WALL] = { - .dk_kevent = { - .ident = DISPATCH_TIMER_INDEX_WALL, - .filter = DISPATCH_EVFILT_TIMER, - .udata = &_dispatch_kevent_timer[DISPATCH_TIMER_INDEX_WALL], - }, - .dk_sources = TAILQ_HEAD_INITIALIZER( - _dispatch_kevent_timer[DISPATCH_TIMER_INDEX_WALL].dk_sources), - }, - [DISPATCH_TIMER_INDEX_MACH] = { - .dk_kevent = { - .ident = DISPATCH_TIMER_INDEX_MACH, - .filter = DISPATCH_EVFILT_TIMER, - .udata = &_dispatch_kevent_timer[DISPATCH_TIMER_INDEX_MACH], - }, - .dk_sources = TAILQ_HEAD_INITIALIZER( - _dispatch_kevent_timer[DISPATCH_TIMER_INDEX_MACH].dk_sources), - }, - [DISPATCH_TIMER_INDEX_DISARM] = { - .dk_kevent = { - .ident = DISPATCH_TIMER_INDEX_DISARM, - .filter = DISPATCH_EVFILT_TIMER, - .udata = &_dispatch_kevent_timer[DISPATCH_TIMER_INDEX_DISARM], - }, - .dk_sources = TAILQ_HEAD_INITIALIZER( - _dispatch_kevent_timer[DISPATCH_TIMER_INDEX_DISARM].dk_sources), - }, -}; -// Don't count disarmed timer list -#define DISPATCH_TIMER_COUNT ((sizeof(_dispatch_kevent_timer) \ - / sizeof(_dispatch_kevent_timer[0])) - 1) - -static inline void -_dispatch_source_timer_init(void) -{ - TAILQ_INSERT_TAIL(&_dispatch_sources[DSL_HASH(DISPATCH_TIMER_INDEX_WALL)], - &_dispatch_kevent_timer[DISPATCH_TIMER_INDEX_WALL], dk_list); - TAILQ_INSERT_TAIL(&_dispatch_sources[DSL_HASH(DISPATCH_TIMER_INDEX_MACH)], - &_dispatch_kevent_timer[DISPATCH_TIMER_INDEX_MACH], dk_list); - TAILQ_INSERT_TAIL(&_dispatch_sources[DSL_HASH(DISPATCH_TIMER_INDEX_DISARM)], - &_dispatch_kevent_timer[DISPATCH_TIMER_INDEX_DISARM], dk_list); } -DISPATCH_ALWAYS_INLINE -static inline unsigned int -_dispatch_source_timer_idx(dispatch_source_refs_t dr) +DISPATCH_NOINLINE +static void +_dispatch_kevent_proc_exit(struct kevent64_s *ke) { - return ds_timer(dr).flags & DISPATCH_TIMER_WALL_CLOCK ? - DISPATCH_TIMER_INDEX_WALL : DISPATCH_TIMER_INDEX_MACH; + // EVFILT_PROC may fail with ESRCH when the process exists but is a zombie + // . As a workaround, we simulate an exit event for + // any EVFILT_PROC with an invalid pid . + struct kevent64_s fake; + fake = *ke; + fake.flags &= ~EV_ERROR; + fake.fflags = NOTE_EXIT; + fake.data = 0; + _dispatch_kevent_drain(&fake); } -DISPATCH_ALWAYS_INLINE -static inline uint64_t -_dispatch_source_timer_now2(unsigned int timer) +DISPATCH_NOINLINE +static void +_dispatch_kevent_error(struct kevent64_s *ke) { - switch (timer) { - case DISPATCH_TIMER_INDEX_MACH: - return _dispatch_absolute_time(); - case DISPATCH_TIMER_INDEX_WALL: - return _dispatch_get_nanoseconds(); - default: - DISPATCH_CRASH("Invalid timer"); + _dispatch_kevent_debug(ke, __func__); + if (ke->data) { + // log the unexpected error + _dispatch_bug_kevent_client("kevent", _evfiltstr(ke->filter), + ke->flags & EV_DELETE ? "delete" : + ke->flags & EV_ADD ? "add" : + ke->flags & EV_ENABLE ? "enable" : "monitor", + (int)ke->data); } } -DISPATCH_ALWAYS_INLINE -static inline uint64_t -_dispatch_source_timer_now(dispatch_source_refs_t dr) +static void +_dispatch_kevent_drain(struct kevent64_s *ke) { - return _dispatch_source_timer_now2(_dispatch_source_timer_idx(dr)); +#if DISPATCH_DEBUG + static dispatch_once_t pred; + dispatch_once_f(&pred, NULL, _dispatch_kevent_debugger); +#endif + if (ke->filter == EVFILT_USER) { + return; + } + if (slowpath(ke->flags & EV_ERROR)) { + if (ke->filter == EVFILT_PROC) { + if (ke->flags & EV_DELETE) { + // Process exited while monitored + return; + } else if (ke->data == ESRCH) { + return _dispatch_kevent_proc_exit(ke); + } +#if DISPATCH_USE_VM_PRESSURE + } else if (ke->filter == EVFILT_VM && ke->data == ENOTSUP) { + // Memory pressure kevent is not supported on all platforms + // + return; +#endif +#if DISPATCH_USE_MEMORYSTATUS + } else if (ke->filter == EVFILT_MEMORYSTATUS && + (ke->data == EINVAL || ke->data == ENOTSUP)) { + // Memory status kevent is not supported on all platforms + return; +#endif + } + return _dispatch_kevent_error(ke); + } + _dispatch_kevent_debug(ke, __func__); + if (ke->filter == EVFILT_TIMER) { + return _dispatch_timers_kevent(ke); + } +#if HAVE_MACH + if (ke->filter == EVFILT_MACHPORT) { + return _dispatch_kevent_mach_portset(ke); + } +#endif + return _dispatch_kevent_merge(ke); } -// Updates the ordered list of timers based on next fire date for changes to ds. -// Should only be called from the context of _dispatch_mgr_q. +DISPATCH_NOINLINE static void -_dispatch_timer_list_update(dispatch_source_t ds) +_dispatch_kevent_merge(struct kevent64_s *ke) { - dispatch_source_refs_t dr = ds->ds_refs, dri = NULL; + dispatch_kevent_t dk; + dispatch_source_refs_t dri; - dispatch_assert(_dispatch_queue_get_current() == &_dispatch_mgr_q); + dk = (void*)ke->udata; + dispatch_assert(dk); - // do not reschedule timers unregistered with _dispatch_kevent_unregister() - if (!ds->ds_dkev) { - return; + if (ke->flags & EV_ONESHOT) { + dk->dk_kevent.flags |= EV_ONESHOT; } - - // Ensure the source is on the global kevent lists before it is removed and - // readded below. - _dispatch_kevent_register(ds); - - TAILQ_REMOVE(&ds->ds_dkev->dk_sources, dr, dr_list); - - // Move timers that are disabled, suspended or have missed intervals to the - // disarmed list, rearm after resume resp. source invoke will reenable them - if (!ds_timer(dr).target || DISPATCH_OBJECT_SUSPENDED(ds) || - ds->ds_pending_data) { - (void)dispatch_atomic_and2o(ds, ds_atomic_flags, ~DSF_ARMED); - ds->ds_dkev = &_dispatch_kevent_timer[DISPATCH_TIMER_INDEX_DISARM]; - TAILQ_INSERT_TAIL(&ds->ds_dkev->dk_sources, (dispatch_source_refs_t)dr, - dr_list); - return; + TAILQ_FOREACH(dri, &dk->dk_sources, dr_list) { + _dispatch_source_merge_kevent(_dispatch_source_from_refs(dri), ke); } +} - // change the list if the clock type has changed - ds->ds_dkev = &_dispatch_kevent_timer[_dispatch_source_timer_idx(dr)]; +#if DISPATCH_USE_GUARDED_FD_CHANGE_FDGUARD +static void +_dispatch_kevent_guard(dispatch_kevent_t dk) +{ + guardid_t guard; + const unsigned int guard_flags = GUARD_CLOSE; + int r, fd_flags = 0; + switch (dk->dk_kevent.filter) { + case EVFILT_READ: + case EVFILT_WRITE: + case EVFILT_VNODE: + guard = &dk->dk_kevent; + r = change_fdguard_np((int)dk->dk_kevent.ident, NULL, 0, + &guard, guard_flags, &fd_flags); + if (slowpath(r == -1)) { + int err = errno; + if (err != EPERM) { + (void)dispatch_assume_zero(err); + } + return; + } + dk->dk_kevent.ext[0] = guard_flags; + dk->dk_kevent.ext[1] = fd_flags; + break; + } +} - TAILQ_FOREACH(dri, &ds->ds_dkev->dk_sources, dr_list) { - if (ds_timer(dri).target == 0 || - ds_timer(dr).target < ds_timer(dri).target) { - break; +static void +_dispatch_kevent_unguard(dispatch_kevent_t dk) +{ + guardid_t guard; + unsigned int guard_flags; + int r, fd_flags; + switch (dk->dk_kevent.filter) { + case EVFILT_READ: + case EVFILT_WRITE: + case EVFILT_VNODE: + guard_flags = (unsigned int)dk->dk_kevent.ext[0]; + if (!guard_flags) { + return; + } + guard = &dk->dk_kevent; + fd_flags = (int)dk->dk_kevent.ext[1]; + r = change_fdguard_np((int)dk->dk_kevent.ident, &guard, + guard_flags, NULL, 0, &fd_flags); + if (slowpath(r == -1)) { + (void)dispatch_assume_zero(errno); + return; } + dk->dk_kevent.ext[0] = 0; + break; } +} +#endif // DISPATCH_USE_GUARDED_FD_CHANGE_FDGUARD - if (dri) { - TAILQ_INSERT_BEFORE(dri, dr, dr_list); - } else { - TAILQ_INSERT_TAIL(&ds->ds_dkev->dk_sources, dr, dr_list); +#pragma mark - +#pragma mark dispatch_source_timer + +#if DISPATCH_USE_DTRACE && DISPATCH_USE_DTRACE_INTROSPECTION +static dispatch_source_refs_t + _dispatch_trace_next_timer[DISPATCH_TIMER_QOS_COUNT]; +#define _dispatch_trace_next_timer_set(x, q) \ + _dispatch_trace_next_timer[(q)] = (x) +#define _dispatch_trace_next_timer_program(d, q) \ + _dispatch_trace_timer_program(_dispatch_trace_next_timer[(q)], (d)) +#define _dispatch_trace_next_timer_wake(q) \ + _dispatch_trace_timer_wake(_dispatch_trace_next_timer[(q)]) +#else +#define _dispatch_trace_next_timer_set(x, q) +#define _dispatch_trace_next_timer_program(d, q) +#define _dispatch_trace_next_timer_wake(q) +#endif + +#define _dispatch_source_timer_telemetry_enabled() false + +DISPATCH_NOINLINE +static void +_dispatch_source_timer_telemetry_slow(dispatch_source_t ds, + uintptr_t ident, struct dispatch_timer_source_s *values) +{ + if (_dispatch_trace_timer_configure_enabled()) { + _dispatch_trace_timer_configure(ds, ident, values); } } +DISPATCH_ALWAYS_INLINE static inline void -_dispatch_run_timers2(unsigned int timer) +_dispatch_source_timer_telemetry(dispatch_source_t ds, uintptr_t ident, + struct dispatch_timer_source_s *values) { - dispatch_source_refs_t dr; - dispatch_source_t ds; - uint64_t now, missed; + if (_dispatch_trace_timer_configure_enabled() || + _dispatch_source_timer_telemetry_enabled()) { + _dispatch_source_timer_telemetry_slow(ds, ident, values); + asm(""); // prevent tailcall + } +} - now = _dispatch_source_timer_now2(timer); - while ((dr = TAILQ_FIRST(&_dispatch_kevent_timer[timer].dk_sources))) { - ds = _dispatch_source_from_refs(dr); - // We may find timers on the wrong list due to a pending update from - // dispatch_source_set_timer. Force an update of the list in that case. - if (timer != ds->ds_ident_hack) { - _dispatch_timer_list_update(ds); - continue; - } - if (!ds_timer(dr).target) { - // no configured timers on the list - break; - } - if (ds_timer(dr).target > now) { - // Done running timers for now. - break; - } - // Remove timers that are suspended or have missed intervals from the - // list, rearm after resume resp. source invoke will reenable them - if (DISPATCH_OBJECT_SUSPENDED(ds) || ds->ds_pending_data) { - _dispatch_timer_list_update(ds); - continue; - } - // Calculate number of missed intervals. - missed = (now - ds_timer(dr).target) / ds_timer(dr).interval; - if (++missed > INT_MAX) { - missed = INT_MAX; - } - ds_timer(dr).target += missed * ds_timer(dr).interval; - _dispatch_timer_list_update(ds); - ds_timer(dr).last_fire = now; - (void)dispatch_atomic_add2o(ds, ds_pending_data, (int)missed); - _dispatch_wakeup(ds); - } -} +// approx 1 year (60s * 60m * 24h * 365d) +#define FOREVER_NSEC 31536000000000000ull -void -_dispatch_run_timers(void) +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_dispatch_source_timer_now(uint64_t nows[], unsigned int tidx) { - dispatch_once_f(&__dispatch_kevent_init_pred, - NULL, _dispatch_kevent_init); - - unsigned int i; - for (i = 0; i < DISPATCH_TIMER_COUNT; i++) { - if (!TAILQ_EMPTY(&_dispatch_kevent_timer[i].dk_sources)) { - _dispatch_run_timers2(i); - } + unsigned int tk = DISPATCH_TIMER_KIND(tidx); + if (nows && fastpath(nows[tk])) { + return nows[tk]; + } + uint64_t now; + switch (tk) { + case DISPATCH_TIMER_KIND_MACH: + now = _dispatch_absolute_time(); + break; + case DISPATCH_TIMER_KIND_WALL: + now = _dispatch_get_nanoseconds(); + break; } + if (nows) { + nows[tk] = now; + } + return now; } static inline unsigned long @@ -1092,7 +1130,8 @@ _dispatch_source_timer_data(dispatch_source_refs_t dr, unsigned long prev) { // calculate the number of intervals since last fire unsigned long data, missed; - uint64_t now = _dispatch_source_timer_now(dr); + uint64_t now; + now = _dispatch_source_timer_now(NULL, _dispatch_source_timer_idx(dr)); missed = (unsigned long)((now - ds_timer(dr).last_fire) / ds_timer(dr).interval); // correct for missed intervals already delivered last time @@ -1101,51 +1140,6 @@ _dispatch_source_timer_data(dispatch_source_refs_t dr, unsigned long prev) return data; } -// approx 1 year (60s * 60m * 24h * 365d) -#define FOREVER_NSEC 31536000000000000ull - -struct timespec * -_dispatch_get_next_timer_fire(struct timespec *howsoon) -{ - // - // kevent(2) does not allow large timeouts, so we use a long timeout - // instead (approximately 1 year). - dispatch_source_refs_t dr = NULL; - unsigned int timer; - uint64_t now, delta_tmp, delta = UINT64_MAX; - - for (timer = 0; timer < DISPATCH_TIMER_COUNT; timer++) { - // Timers are kept in order, first one will fire next - dr = TAILQ_FIRST(&_dispatch_kevent_timer[timer].dk_sources); - if (!dr || !ds_timer(dr).target) { - // Empty list or disabled timer - continue; - } - now = _dispatch_source_timer_now(dr); - if (ds_timer(dr).target <= now) { - howsoon->tv_sec = 0; - howsoon->tv_nsec = 0; - return howsoon; - } - // the subtraction cannot go negative because the previous "if" - // verified that the target is greater than now. - delta_tmp = ds_timer(dr).target - now; - if (!(ds_timer(dr).flags & DISPATCH_TIMER_WALL_CLOCK)) { - delta_tmp = _dispatch_time_mach2nano(delta_tmp); - } - if (delta_tmp < delta) { - delta = delta_tmp; - } - } - if (slowpath(delta > FOREVER_NSEC)) { - return NULL; - } else { - howsoon->tv_sec = (time_t)(delta / NSEC_PER_SEC); - howsoon->tv_nsec = (long)(delta % NSEC_PER_SEC); - } - return howsoon; -} - struct dispatch_set_timer_params { dispatch_source_t ds; uintptr_t ident; @@ -1163,9 +1157,15 @@ _dispatch_source_set_timer3(void *context) // Clear any pending data that might have accumulated on // older timer params ds->ds_pending_data = 0; - _dispatch_timer_list_update(ds); + // Re-arm in case we got disarmed because of pending set_timer suspension + (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED, release); dispatch_resume(ds); + // Must happen after resume to avoid getting disarmed due to suspension + _dispatch_timers_update(ds); dispatch_release(ds); + if (params->values.flags & DISPATCH_TIMER_WALL_CLOCK) { + _dispatch_mach_host_calendar_change_register(); + } free(params); } @@ -1179,20 +1179,18 @@ _dispatch_source_set_timer2(void *context) _dispatch_source_set_timer3); } -void -dispatch_source_set_timer(dispatch_source_t ds, - dispatch_time_t start, - uint64_t interval, - uint64_t leeway) +DISPATCH_NOINLINE +static struct dispatch_set_timer_params * +_dispatch_source_timer_params(dispatch_source_t ds, dispatch_time_t start, + uint64_t interval, uint64_t leeway) { - if (slowpath(!ds->ds_is_timer)) { - DISPATCH_CLIENT_CRASH("Attempt to set timer on a non-timer source"); - } - struct dispatch_set_timer_params *params; + params = _dispatch_calloc(1ul, sizeof(struct dispatch_set_timer_params)); + params->ds = ds; + params->values.flags = ds_timer(ds->ds_refs).flags; - // we use zero internally to mean disabled if (interval == 0) { + // we use zero internally to mean disabled interval = 1; } else if ((int64_t)interval < 0) { // 6866347 - make sure nanoseconds won't overflow @@ -1201,202 +1199,1384 @@ dispatch_source_set_timer(dispatch_source_t ds, if ((int64_t)leeway < 0) { leeway = INT64_MAX; } - if (start == DISPATCH_TIME_NOW) { start = _dispatch_absolute_time(); } else if (start == DISPATCH_TIME_FOREVER) { start = INT64_MAX; } - while (!(params = calloc(1ul, sizeof(struct dispatch_set_timer_params)))) { - sleep(1); - } - - params->ds = ds; - params->values.flags = ds_timer(ds->ds_refs).flags; - if ((int64_t)start < 0) { // wall clock - params->ident = DISPATCH_TIMER_INDEX_WALL; - params->values.target = -((int64_t)start); - params->values.interval = interval; - params->values.leeway = leeway; + start = (dispatch_time_t)-((int64_t)start); params->values.flags |= DISPATCH_TIMER_WALL_CLOCK; } else { // absolute clock - params->ident = DISPATCH_TIMER_INDEX_MACH; - params->values.target = start; - params->values.interval = _dispatch_time_nano2mach(interval); - - // rdar://problem/7287561 interval must be at least one in - // in order to avoid later division by zero when calculating - // the missed interval count. (NOTE: the wall clock's - // interval is already "fixed" to be 1 or more) - if (params->values.interval < 1) { - params->values.interval = 1; + interval = _dispatch_time_nano2mach(interval); + if (interval < 1) { + // rdar://problem/7287561 interval must be at least one in + // in order to avoid later division by zero when calculating + // the missed interval count. (NOTE: the wall clock's + // interval is already "fixed" to be 1 or more) + interval = 1; } + leeway = _dispatch_time_nano2mach(leeway); + params->values.flags &= ~(unsigned long)DISPATCH_TIMER_WALL_CLOCK; + } + params->ident = DISPATCH_TIMER_IDENT(params->values.flags); + params->values.target = start; + params->values.deadline = (start < UINT64_MAX - leeway) ? + start + leeway : UINT64_MAX; + params->values.interval = interval; + params->values.leeway = (interval == INT64_MAX || leeway < interval / 2) ? + leeway : interval / 2; + return params; +} - params->values.leeway = _dispatch_time_nano2mach(leeway); - params->values.flags &= ~DISPATCH_TIMER_WALL_CLOCK; +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_source_set_timer(dispatch_source_t ds, dispatch_time_t start, + uint64_t interval, uint64_t leeway, bool source_sync) +{ + if (slowpath(!ds->ds_is_timer) || + slowpath(ds_timer(ds->ds_refs).flags & DISPATCH_TIMER_INTERVAL)) { + DISPATCH_CLIENT_CRASH("Attempt to set timer on a non-timer source"); } + + struct dispatch_set_timer_params *params; + params = _dispatch_source_timer_params(ds, start, interval, leeway); + + _dispatch_source_timer_telemetry(ds, params->ident, ¶ms->values); // Suspend the source so that it doesn't fire with pending changes // The use of suspend/resume requires the external retain/release dispatch_retain(ds); - dispatch_barrier_async_f((dispatch_queue_t)ds, params, - _dispatch_source_set_timer2); + if (source_sync) { + return _dispatch_barrier_trysync_f((dispatch_queue_t)ds, params, + _dispatch_source_set_timer2); + } else { + return _dispatch_source_set_timer2(params); + } } -#pragma mark - -#pragma mark dispatch_mach +void +dispatch_source_set_timer(dispatch_source_t ds, dispatch_time_t start, + uint64_t interval, uint64_t leeway) +{ + _dispatch_source_set_timer(ds, start, interval, leeway, true); +} -#if HAVE_MACH +void +_dispatch_source_set_runloop_timer_4CF(dispatch_source_t ds, + dispatch_time_t start, uint64_t interval, uint64_t leeway) +{ + // Don't serialize through the source queue for CF timers + _dispatch_source_set_timer(ds, start, interval, leeway, false); +} -#if DISPATCH_DEBUG && DISPATCH_MACHPORT_DEBUG -#define _dispatch_debug_machport(name) \ - dispatch_debug_machport((name), __func__) -#else -#define _dispatch_debug_machport(name) -#endif +void +_dispatch_source_set_interval(dispatch_source_t ds, uint64_t interval) +{ + dispatch_source_refs_t dr = ds->ds_refs; + #define NSEC_PER_FRAME (NSEC_PER_SEC/60) + const bool animation = ds_timer(dr).flags & DISPATCH_INTERVAL_UI_ANIMATION; + if (fastpath(interval <= (animation ? FOREVER_NSEC/NSEC_PER_FRAME : + FOREVER_NSEC/NSEC_PER_MSEC))) { + interval *= animation ? NSEC_PER_FRAME : NSEC_PER_MSEC; + } else { + interval = FOREVER_NSEC; + } + interval = _dispatch_time_nano2mach(interval); + uint64_t target = _dispatch_absolute_time() + interval; + target = (target / interval) * interval; + const uint64_t leeway = animation ? + _dispatch_time_nano2mach(NSEC_PER_FRAME) : interval / 2; + ds_timer(dr).target = target; + ds_timer(dr).deadline = target + leeway; + ds_timer(dr).interval = interval; + ds_timer(dr).leeway = leeway; + _dispatch_source_timer_telemetry(ds, ds->ds_ident_hack, &ds_timer(dr)); +} -// Flags for all notifications that are registered/unregistered when a -// send-possible notification is requested/delivered -#define _DISPATCH_MACH_SP_FLAGS (DISPATCH_MACH_SEND_POSSIBLE| \ - DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_DELETED) +#pragma mark - +#pragma mark dispatch_timers + +#define DISPATCH_TIMER_STRUCT(refs) \ + uint64_t target, deadline; \ + TAILQ_HEAD(, refs) dt_sources + +typedef struct dispatch_timer_s { + DISPATCH_TIMER_STRUCT(dispatch_timer_source_refs_s); +} *dispatch_timer_t; + +#define DISPATCH_TIMER_INITIALIZER(tidx) \ + [tidx] = { \ + .target = UINT64_MAX, \ + .deadline = UINT64_MAX, \ + .dt_sources = TAILQ_HEAD_INITIALIZER( \ + _dispatch_timer[tidx].dt_sources), \ + } +#define DISPATCH_TIMER_INIT(kind, qos) \ + DISPATCH_TIMER_INITIALIZER(DISPATCH_TIMER_INDEX( \ + DISPATCH_TIMER_KIND_##kind, DISPATCH_TIMER_QOS_##qos)) + +struct dispatch_timer_s _dispatch_timer[] = { + DISPATCH_TIMER_INIT(WALL, NORMAL), + DISPATCH_TIMER_INIT(WALL, CRITICAL), + DISPATCH_TIMER_INIT(WALL, BACKGROUND), + DISPATCH_TIMER_INIT(MACH, NORMAL), + DISPATCH_TIMER_INIT(MACH, CRITICAL), + DISPATCH_TIMER_INIT(MACH, BACKGROUND), +}; +#define DISPATCH_TIMER_COUNT \ + ((sizeof(_dispatch_timer) / sizeof(_dispatch_timer[0]))) + +#define DISPATCH_KEVENT_TIMER_UDATA(tidx) \ + (uintptr_t)&_dispatch_kevent_timer[tidx] +#ifdef __LP64__ +#define DISPATCH_KEVENT_TIMER_UDATA_INITIALIZER(tidx) \ + .udata = DISPATCH_KEVENT_TIMER_UDATA(tidx) +#else // __LP64__ +// dynamic initialization in _dispatch_timers_init() +#define DISPATCH_KEVENT_TIMER_UDATA_INITIALIZER(tidx) \ + .udata = 0 +#endif // __LP64__ +#define DISPATCH_KEVENT_TIMER_INITIALIZER(tidx) \ + [tidx] = { \ + .dk_kevent = { \ + .ident = tidx, \ + .filter = DISPATCH_EVFILT_TIMER, \ + DISPATCH_KEVENT_TIMER_UDATA_INITIALIZER(tidx), \ + }, \ + .dk_sources = TAILQ_HEAD_INITIALIZER( \ + _dispatch_kevent_timer[tidx].dk_sources), \ + } +#define DISPATCH_KEVENT_TIMER_INIT(kind, qos) \ + DISPATCH_KEVENT_TIMER_INITIALIZER(DISPATCH_TIMER_INDEX( \ + DISPATCH_TIMER_KIND_##kind, DISPATCH_TIMER_QOS_##qos)) + +struct dispatch_kevent_s _dispatch_kevent_timer[] = { + DISPATCH_KEVENT_TIMER_INIT(WALL, NORMAL), + DISPATCH_KEVENT_TIMER_INIT(WALL, CRITICAL), + DISPATCH_KEVENT_TIMER_INIT(WALL, BACKGROUND), + DISPATCH_KEVENT_TIMER_INIT(MACH, NORMAL), + DISPATCH_KEVENT_TIMER_INIT(MACH, CRITICAL), + DISPATCH_KEVENT_TIMER_INIT(MACH, BACKGROUND), + DISPATCH_KEVENT_TIMER_INITIALIZER(DISPATCH_TIMER_INDEX_DISARM), +}; +#define DISPATCH_KEVENT_TIMER_COUNT \ + ((sizeof(_dispatch_kevent_timer) / sizeof(_dispatch_kevent_timer[0]))) + +#define DISPATCH_KEVENT_TIMEOUT_IDENT_MASK (~0ull << 8) +#define DISPATCH_KEVENT_TIMEOUT_INITIALIZER(qos, note) \ + [qos] = { \ + .ident = DISPATCH_KEVENT_TIMEOUT_IDENT_MASK|(qos), \ + .filter = EVFILT_TIMER, \ + .flags = EV_ONESHOT, \ + .fflags = NOTE_ABSOLUTE|NOTE_NSECONDS|NOTE_LEEWAY|(note), \ + } +#define DISPATCH_KEVENT_TIMEOUT_INIT(qos, note) \ + DISPATCH_KEVENT_TIMEOUT_INITIALIZER(DISPATCH_TIMER_QOS_##qos, note) -#define _DISPATCH_IS_POWER_OF_TWO(v) (!(v & (v - 1)) && v) -#define _DISPATCH_HASH(x, y) (_DISPATCH_IS_POWER_OF_TWO(y) ? \ - (MACH_PORT_INDEX(x) & ((y) - 1)) : (MACH_PORT_INDEX(x) % (y))) +struct kevent64_s _dispatch_kevent_timeout[] = { + DISPATCH_KEVENT_TIMEOUT_INIT(NORMAL, 0), + DISPATCH_KEVENT_TIMEOUT_INIT(CRITICAL, NOTE_CRITICAL), + DISPATCH_KEVENT_TIMEOUT_INIT(BACKGROUND, NOTE_BACKGROUND), +}; -#define _DISPATCH_MACHPORT_HASH_SIZE 32 -#define _DISPATCH_MACHPORT_HASH(x) \ - _DISPATCH_HASH((x), _DISPATCH_MACHPORT_HASH_SIZE) +#define DISPATCH_KEVENT_COALESCING_WINDOW_INIT(qos, ms) \ + [DISPATCH_TIMER_QOS_##qos] = 2ull * (ms) * NSEC_PER_MSEC -static dispatch_source_t _dispatch_mach_notify_source; -static mach_port_t _dispatch_port_set; -static mach_port_t _dispatch_event_port; +static const uint64_t _dispatch_kevent_coalescing_window[] = { + DISPATCH_KEVENT_COALESCING_WINDOW_INIT(NORMAL, 75), + DISPATCH_KEVENT_COALESCING_WINDOW_INIT(CRITICAL, 1), + DISPATCH_KEVENT_COALESCING_WINDOW_INIT(BACKGROUND, 100), +}; -static kern_return_t _dispatch_mach_notify_update(dispatch_kevent_t dk, - uint32_t new_flags, uint32_t del_flags, uint32_t mask, - mach_msg_id_t notify_msgid, mach_port_mscount_t notify_sync); +#define _dispatch_timers_insert(tidx, dra, dr, dr_list, dta, dt, dt_list) ({ \ + typeof(dr) dri = NULL; typeof(dt) dti; \ + if (tidx != DISPATCH_TIMER_INDEX_DISARM) { \ + TAILQ_FOREACH(dri, &dra[tidx].dk_sources, dr_list) { \ + if (ds_timer(dr).target < ds_timer(dri).target) { \ + break; \ + } \ + } \ + TAILQ_FOREACH(dti, &dta[tidx].dt_sources, dt_list) { \ + if (ds_timer(dt).deadline < ds_timer(dti).deadline) { \ + break; \ + } \ + } \ + if (dti) { \ + TAILQ_INSERT_BEFORE(dti, dt, dt_list); \ + } else { \ + TAILQ_INSERT_TAIL(&dta[tidx].dt_sources, dt, dt_list); \ + } \ + } \ + if (dri) { \ + TAILQ_INSERT_BEFORE(dri, dr, dr_list); \ + } else { \ + TAILQ_INSERT_TAIL(&dra[tidx].dk_sources, dr, dr_list); \ + } \ + }) + +#define _dispatch_timers_remove(tidx, dk, dra, dr, dr_list, dta, dt, dt_list) \ + ({ \ + if (tidx != DISPATCH_TIMER_INDEX_DISARM) { \ + TAILQ_REMOVE(&dta[tidx].dt_sources, dt, dt_list); \ + } \ + TAILQ_REMOVE(dk ? &(*(dk)).dk_sources : &dra[tidx].dk_sources, dr, \ + dr_list); }) + +#define _dispatch_timers_check(dra, dta) ({ \ + unsigned int qosm = _dispatch_timers_qos_mask; \ + bool update = false; \ + unsigned int tidx; \ + for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) { \ + if (!(qosm & 1 << DISPATCH_TIMER_QOS(tidx))){ \ + continue; \ + } \ + dispatch_timer_source_refs_t dr = (dispatch_timer_source_refs_t) \ + TAILQ_FIRST(&dra[tidx].dk_sources); \ + dispatch_timer_source_refs_t dt = (dispatch_timer_source_refs_t) \ + TAILQ_FIRST(&dta[tidx].dt_sources); \ + uint64_t target = dr ? ds_timer(dr).target : UINT64_MAX; \ + uint64_t deadline = dr ? ds_timer(dt).deadline : UINT64_MAX; \ + if (target != dta[tidx].target) { \ + dta[tidx].target = target; \ + update = true; \ + } \ + if (deadline != dta[tidx].deadline) { \ + dta[tidx].deadline = deadline; \ + update = true; \ + } \ + } \ + update; }) + +static bool _dispatch_timers_reconfigure, _dispatch_timer_expired; +static unsigned int _dispatch_timers_qos_mask; +static bool _dispatch_timers_force_max_leeway; static void -_dispatch_port_set_init(void *context DISPATCH_UNUSED) +_dispatch_timers_init(void) { - struct kevent kev = { - .filter = EVFILT_MACHPORT, - .flags = EV_ADD, - }; - kern_return_t kr; - - kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET, - &_dispatch_port_set); - DISPATCH_VERIFY_MIG(kr); - if (kr) { - _dispatch_bug_mach_client( - "_dispatch_port_set_init: mach_port_allocate() failed", kr); - DISPATCH_CLIENT_CRASH( - "mach_port_allocate() failed: cannot create port set"); - } - kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, - &_dispatch_event_port); - DISPATCH_VERIFY_MIG(kr); - if (kr) { - _dispatch_bug_mach_client( - "_dispatch_port_set_init: mach_port_allocate() failed", kr); - DISPATCH_CLIENT_CRASH( - "mach_port_allocate() failed: cannot create receive right"); - } - kr = mach_port_move_member(mach_task_self(), _dispatch_event_port, - _dispatch_port_set); - DISPATCH_VERIFY_MIG(kr); - if (kr) { - _dispatch_bug_mach_client( - "_dispatch_port_set_init: mach_port_move_member() failed", kr); - DISPATCH_CLIENT_CRASH("mach_port_move_member() failed"); +#ifndef __LP64__ + unsigned int tidx; + for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) { + _dispatch_kevent_timer[tidx].dk_kevent.udata = \ + DISPATCH_KEVENT_TIMER_UDATA(tidx); } +#endif // __LP64__ + _dispatch_timers_force_max_leeway = + getenv("LIBDISPATCH_TIMERS_FORCE_MAX_LEEWAY"); +} - kev.ident = _dispatch_port_set; +static inline void +_dispatch_timers_unregister(dispatch_source_t ds, dispatch_kevent_t dk) +{ + dispatch_source_refs_t dr = ds->ds_refs; + unsigned int tidx = (unsigned int)dk->dk_kevent.ident; - _dispatch_update_kq(&kev); + if (slowpath(ds_timer_aggregate(ds))) { + _dispatch_timer_aggregates_unregister(ds, tidx); + } + _dispatch_timers_remove(tidx, dk, _dispatch_kevent_timer, dr, dr_list, + _dispatch_timer, (dispatch_timer_source_refs_t)dr, dt_list); + if (tidx != DISPATCH_TIMER_INDEX_DISARM) { + _dispatch_timers_reconfigure = true; + _dispatch_timers_qos_mask |= 1 << DISPATCH_TIMER_QOS(tidx); + } } -static mach_port_t -_dispatch_get_port_set(void) +// Updates the ordered list of timers based on next fire date for changes to ds. +// Should only be called from the context of _dispatch_mgr_q. +static void +_dispatch_timers_update(dispatch_source_t ds) { - static dispatch_once_t pred; + dispatch_kevent_t dk = ds->ds_dkev; + dispatch_source_refs_t dr = ds->ds_refs; + unsigned int tidx; - dispatch_once_f(&pred, NULL, _dispatch_port_set_init); + DISPATCH_ASSERT_ON_MANAGER_QUEUE(); - return _dispatch_port_set; + // Do not reschedule timers unregistered with _dispatch_kevent_unregister() + if (slowpath(!dk)) { + return; + } + // Move timers that are disabled, suspended or have missed intervals to the + // disarmed list, rearm after resume resp. source invoke will reenable them + if (!ds_timer(dr).target || DISPATCH_OBJECT_SUSPENDED(ds) || + ds->ds_pending_data) { + tidx = DISPATCH_TIMER_INDEX_DISARM; + (void)dispatch_atomic_and2o(ds, ds_atomic_flags, ~DSF_ARMED, relaxed); + } else { + tidx = _dispatch_source_timer_idx(dr); + } + if (slowpath(ds_timer_aggregate(ds))) { + _dispatch_timer_aggregates_register(ds); + } + if (slowpath(!ds->ds_is_installed)) { + ds->ds_is_installed = true; + if (tidx != DISPATCH_TIMER_INDEX_DISARM) { + (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED, relaxed); + } + free(dk); + _dispatch_object_debug(ds, "%s", __func__); + } else { + _dispatch_timers_unregister(ds, dk); + } + if (tidx != DISPATCH_TIMER_INDEX_DISARM) { + _dispatch_timers_reconfigure = true; + _dispatch_timers_qos_mask |= 1 << DISPATCH_TIMER_QOS(tidx); + } + if (dk != &_dispatch_kevent_timer[tidx]){ + ds->ds_dkev = &_dispatch_kevent_timer[tidx]; + } + _dispatch_timers_insert(tidx, _dispatch_kevent_timer, dr, dr_list, + _dispatch_timer, (dispatch_timer_source_refs_t)dr, dt_list); + if (slowpath(ds_timer_aggregate(ds))) { + _dispatch_timer_aggregates_update(ds, tidx); + } } -static kern_return_t -_dispatch_kevent_machport_enable(dispatch_kevent_t dk) +static inline void +_dispatch_timers_run2(uint64_t nows[], unsigned int tidx) { - mach_port_t mp = (mach_port_t)dk->dk_kevent.ident; - kern_return_t kr; + dispatch_source_refs_t dr; + dispatch_source_t ds; + uint64_t now, missed; - _dispatch_debug_machport(mp); - kr = mach_port_move_member(mach_task_self(), mp, _dispatch_get_port_set()); - if (slowpath(kr)) { - DISPATCH_VERIFY_MIG(kr); - switch (kr) { - case KERN_INVALID_NAME: -#if DISPATCH_DEBUG - _dispatch_log("Corruption: Mach receive right 0x%x destroyed " - "prematurely", mp); -#endif - break; - case KERN_INVALID_RIGHT: - _dispatch_bug_mach_client("_dispatch_kevent_machport_enable: " - "mach_port_move_member() failed ", kr); + now = _dispatch_source_timer_now(nows, tidx); + while ((dr = TAILQ_FIRST(&_dispatch_kevent_timer[tidx].dk_sources))) { + ds = _dispatch_source_from_refs(dr); + // We may find timers on the wrong list due to a pending update from + // dispatch_source_set_timer. Force an update of the list in that case. + if (tidx != ds->ds_ident_hack) { + _dispatch_timers_update(ds); + continue; + } + if (!ds_timer(dr).target) { + // No configured timers on the list break; - default: - (void)dispatch_assume_zero(kr); + } + if (ds_timer(dr).target > now) { + // Done running timers for now. break; } + // Remove timers that are suspended or have missed intervals from the + // list, rearm after resume resp. source invoke will reenable them + if (DISPATCH_OBJECT_SUSPENDED(ds) || ds->ds_pending_data) { + _dispatch_timers_update(ds); + continue; + } + // Calculate number of missed intervals. + missed = (now - ds_timer(dr).target) / ds_timer(dr).interval; + if (++missed > INT_MAX) { + missed = INT_MAX; + } + if (ds_timer(dr).interval < INT64_MAX) { + ds_timer(dr).target += missed * ds_timer(dr).interval; + ds_timer(dr).deadline = ds_timer(dr).target + ds_timer(dr).leeway; + } else { + ds_timer(dr).target = UINT64_MAX; + ds_timer(dr).deadline = UINT64_MAX; + } + _dispatch_timers_update(ds); + ds_timer(dr).last_fire = now; + + unsigned long data; + data = dispatch_atomic_add2o(ds, ds_pending_data, + (unsigned long)missed, relaxed); + _dispatch_trace_timer_fire(dr, data, (unsigned long)missed); + _dispatch_wakeup(ds); } - return kr; } +DISPATCH_NOINLINE static void -_dispatch_kevent_machport_disable(dispatch_kevent_t dk) +_dispatch_timers_run(uint64_t nows[]) { - mach_port_t mp = (mach_port_t)dk->dk_kevent.ident; - kern_return_t kr; + unsigned int tidx; + for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) { + if (!TAILQ_EMPTY(&_dispatch_kevent_timer[tidx].dk_sources)) { + _dispatch_timers_run2(nows, tidx); + } + } +} - _dispatch_debug_machport(mp); - kr = mach_port_move_member(mach_task_self(), mp, 0); - if (slowpath(kr)) { - DISPATCH_VERIFY_MIG(kr); - switch (kr) { - case KERN_INVALID_RIGHT: - case KERN_INVALID_NAME: -#if DISPATCH_DEBUG - _dispatch_log("Corruption: Mach receive right 0x%x destroyed " - "prematurely", mp); -#endif - break; - default: - (void)dispatch_assume_zero(kr); +static inline unsigned int +_dispatch_timers_get_delay(uint64_t nows[], struct dispatch_timer_s timer[], + uint64_t *delay, uint64_t *leeway, int qos) +{ + unsigned int tidx, ridx = DISPATCH_TIMER_COUNT; + uint64_t tmp, delta = UINT64_MAX, dldelta = UINT64_MAX; + + for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) { + if (qos >= 0 && qos != DISPATCH_TIMER_QOS(tidx)){ + continue; + } + uint64_t target = timer[tidx].target; + if (target == UINT64_MAX) { + continue; + } + uint64_t deadline = timer[tidx].deadline; + if (qos >= 0) { + // Timer pre-coalescing + uint64_t window = _dispatch_kevent_coalescing_window[qos]; + uint64_t latest = deadline > window ? deadline - window : 0; + dispatch_source_refs_t dri; + TAILQ_FOREACH(dri, &_dispatch_kevent_timer[tidx].dk_sources, + dr_list) { + tmp = ds_timer(dri).target; + if (tmp > latest) break; + target = tmp; + } + } + uint64_t now = _dispatch_source_timer_now(nows, tidx); + if (target <= now) { + delta = 0; break; } + tmp = target - now; + if (DISPATCH_TIMER_KIND(tidx) != DISPATCH_TIMER_KIND_WALL) { + tmp = _dispatch_time_mach2nano(tmp); + } + if (tmp < INT64_MAX && tmp < delta) { + ridx = tidx; + delta = tmp; + } + dispatch_assert(target <= deadline); + tmp = deadline - now; + if (DISPATCH_TIMER_KIND(tidx) != DISPATCH_TIMER_KIND_WALL) { + tmp = _dispatch_time_mach2nano(tmp); + } + if (tmp < INT64_MAX && tmp < dldelta) { + dldelta = tmp; + } } + *delay = delta; + *leeway = delta && delta < UINT64_MAX ? dldelta - delta : UINT64_MAX; + return ridx; } -kern_return_t -_dispatch_kevent_machport_resume(dispatch_kevent_t dk, uint32_t new_flags, - uint32_t del_flags) +static bool +_dispatch_timers_program2(uint64_t nows[], struct kevent64_s *ke, + unsigned int qos) { - kern_return_t kr_recv = 0, kr_sp = 0; + unsigned int tidx; + bool poll; + uint64_t delay, leeway; + + tidx = _dispatch_timers_get_delay(nows, _dispatch_timer, &delay, &leeway, + (int)qos); + poll = (delay == 0); + if (poll || delay == UINT64_MAX) { + _dispatch_trace_next_timer_set(NULL, qos); + if (!ke->data) { + return poll; + } + ke->data = 0; + ke->flags |= EV_DELETE; + ke->flags &= ~(EV_ADD|EV_ENABLE); + } else { + _dispatch_trace_next_timer_set( + TAILQ_FIRST(&_dispatch_kevent_timer[tidx].dk_sources), qos); + _dispatch_trace_next_timer_program(delay, qos); + delay += _dispatch_source_timer_now(nows, DISPATCH_TIMER_KIND_WALL); + if (slowpath(_dispatch_timers_force_max_leeway)) { + ke->data = (int64_t)(delay + leeway); + ke->ext[1] = 0; + } else { + ke->data = (int64_t)delay; + ke->ext[1] = leeway; + } + ke->flags |= EV_ADD|EV_ENABLE; + ke->flags &= ~EV_DELETE; + } + _dispatch_kq_update(ke); + return poll; +} - dispatch_assert_zero(new_flags & del_flags); - if (new_flags & DISPATCH_MACH_RECV_MESSAGE) { - kr_recv = _dispatch_kevent_machport_enable(dk); - } else if (del_flags & DISPATCH_MACH_RECV_MESSAGE) { - _dispatch_kevent_machport_disable(dk); +DISPATCH_NOINLINE +static bool +_dispatch_timers_program(uint64_t nows[]) +{ + bool poll = false; + unsigned int qos, qosm = _dispatch_timers_qos_mask; + for (qos = 0; qos < DISPATCH_TIMER_QOS_COUNT; qos++) { + if (!(qosm & 1 << qos)){ + continue; + } + poll |= _dispatch_timers_program2(nows, &_dispatch_kevent_timeout[qos], + qos); } - if ((new_flags & _DISPATCH_MACH_SP_FLAGS) || + return poll; +} + +DISPATCH_NOINLINE +static bool +_dispatch_timers_configure(void) +{ + _dispatch_timer_aggregates_check(); + // Find out if there is a new target/deadline on the timer lists + return _dispatch_timers_check(_dispatch_kevent_timer, _dispatch_timer); +} + +static void +_dispatch_timers_calendar_change(void) +{ + // calendar change may have gone past the wallclock deadline + _dispatch_timer_expired = true; + _dispatch_timers_qos_mask = ~0u; +} + +static void +_dispatch_timers_kevent(struct kevent64_s *ke) +{ + dispatch_assert(ke->data > 0); + dispatch_assert((ke->ident & DISPATCH_KEVENT_TIMEOUT_IDENT_MASK) == + DISPATCH_KEVENT_TIMEOUT_IDENT_MASK); + unsigned int qos = ke->ident & ~DISPATCH_KEVENT_TIMEOUT_IDENT_MASK; + dispatch_assert(qos < DISPATCH_TIMER_QOS_COUNT); + dispatch_assert(_dispatch_kevent_timeout[qos].data); + _dispatch_kevent_timeout[qos].data = 0; // kevent deleted via EV_ONESHOT + _dispatch_timer_expired = true; + _dispatch_timers_qos_mask |= 1 << qos; + _dispatch_trace_next_timer_wake(qos); +} + +static inline bool +_dispatch_mgr_timers(void) +{ + uint64_t nows[DISPATCH_TIMER_KIND_COUNT] = {}; + bool expired = slowpath(_dispatch_timer_expired); + if (expired) { + _dispatch_timers_run(nows); + } + bool reconfigure = slowpath(_dispatch_timers_reconfigure); + if (reconfigure || expired) { + if (reconfigure) { + reconfigure = _dispatch_timers_configure(); + _dispatch_timers_reconfigure = false; + } + if (reconfigure || expired) { + expired = _dispatch_timer_expired = _dispatch_timers_program(nows); + expired = expired || _dispatch_mgr_q.dq_items_tail; + } + _dispatch_timers_qos_mask = 0; + } + return expired; +} + +#pragma mark - +#pragma mark dispatch_timer_aggregate + +typedef struct { + TAILQ_HEAD(, dispatch_timer_source_aggregate_refs_s) dk_sources; +} dispatch_timer_aggregate_refs_s; + +typedef struct dispatch_timer_aggregate_s { + DISPATCH_STRUCT_HEADER(queue); + DISPATCH_QUEUE_HEADER; + TAILQ_ENTRY(dispatch_timer_aggregate_s) dta_list; + dispatch_timer_aggregate_refs_s + dta_kevent_timer[DISPATCH_KEVENT_TIMER_COUNT]; + struct { + DISPATCH_TIMER_STRUCT(dispatch_timer_source_aggregate_refs_s); + } dta_timer[DISPATCH_TIMER_COUNT]; + struct dispatch_timer_s dta_timer_data[DISPATCH_TIMER_COUNT]; + unsigned int dta_refcount; +} dispatch_timer_aggregate_s; + +typedef TAILQ_HEAD(, dispatch_timer_aggregate_s) dispatch_timer_aggregates_s; +static dispatch_timer_aggregates_s _dispatch_timer_aggregates = + TAILQ_HEAD_INITIALIZER(_dispatch_timer_aggregates); + +dispatch_timer_aggregate_t +dispatch_timer_aggregate_create(void) +{ + unsigned int tidx; + dispatch_timer_aggregate_t dta = _dispatch_alloc(DISPATCH_VTABLE(queue), + sizeof(struct dispatch_timer_aggregate_s)); + _dispatch_queue_init((dispatch_queue_t)dta); + dta->do_targetq = _dispatch_get_root_queue(DISPATCH_QUEUE_PRIORITY_HIGH, + true); + dta->dq_width = UINT32_MAX; + //FIXME: aggregates need custom vtable + //dta->dq_label = "timer-aggregate"; + for (tidx = 0; tidx < DISPATCH_KEVENT_TIMER_COUNT; tidx++) { + TAILQ_INIT(&dta->dta_kevent_timer[tidx].dk_sources); + } + for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) { + TAILQ_INIT(&dta->dta_timer[tidx].dt_sources); + dta->dta_timer[tidx].target = UINT64_MAX; + dta->dta_timer[tidx].deadline = UINT64_MAX; + dta->dta_timer_data[tidx].target = UINT64_MAX; + dta->dta_timer_data[tidx].deadline = UINT64_MAX; + } + return (dispatch_timer_aggregate_t)_dispatch_introspection_queue_create( + (dispatch_queue_t)dta); +} + +typedef struct dispatch_timer_delay_s { + dispatch_timer_t timer; + uint64_t delay, leeway; +} *dispatch_timer_delay_t; + +static void +_dispatch_timer_aggregate_get_delay(void *ctxt) +{ + dispatch_timer_delay_t dtd = ctxt; + struct { uint64_t nows[DISPATCH_TIMER_KIND_COUNT]; } dtn = {}; + _dispatch_timers_get_delay(dtn.nows, dtd->timer, &dtd->delay, &dtd->leeway, + -1); +} + +uint64_t +dispatch_timer_aggregate_get_delay(dispatch_timer_aggregate_t dta, + uint64_t *leeway_ptr) +{ + struct dispatch_timer_delay_s dtd = { + .timer = dta->dta_timer_data, + }; + dispatch_sync_f((dispatch_queue_t)dta, &dtd, + _dispatch_timer_aggregate_get_delay); + if (leeway_ptr) { + *leeway_ptr = dtd.leeway; + } + return dtd.delay; +} + +static void +_dispatch_timer_aggregate_update(void *ctxt) +{ + dispatch_timer_aggregate_t dta = (void*)_dispatch_queue_get_current(); + dispatch_timer_t dtau = ctxt; + unsigned int tidx; + for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) { + dta->dta_timer_data[tidx].target = dtau[tidx].target; + dta->dta_timer_data[tidx].deadline = dtau[tidx].deadline; + } + free(dtau); +} + +DISPATCH_NOINLINE +static void +_dispatch_timer_aggregates_configure(void) +{ + dispatch_timer_aggregate_t dta; + dispatch_timer_t dtau; + TAILQ_FOREACH(dta, &_dispatch_timer_aggregates, dta_list) { + if (!_dispatch_timers_check(dta->dta_kevent_timer, dta->dta_timer)) { + continue; + } + dtau = _dispatch_calloc(DISPATCH_TIMER_COUNT, sizeof(*dtau)); + memcpy(dtau, dta->dta_timer, sizeof(dta->dta_timer)); + dispatch_barrier_async_f((dispatch_queue_t)dta, dtau, + _dispatch_timer_aggregate_update); + } +} + +static inline void +_dispatch_timer_aggregates_check(void) +{ + if (fastpath(TAILQ_EMPTY(&_dispatch_timer_aggregates))) { + return; + } + _dispatch_timer_aggregates_configure(); +} + +static void +_dispatch_timer_aggregates_register(dispatch_source_t ds) +{ + dispatch_timer_aggregate_t dta = ds_timer_aggregate(ds); + if (!dta->dta_refcount++) { + TAILQ_INSERT_TAIL(&_dispatch_timer_aggregates, dta, dta_list); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_timer_aggregates_update(dispatch_source_t ds, unsigned int tidx) +{ + dispatch_timer_aggregate_t dta = ds_timer_aggregate(ds); + dispatch_timer_source_aggregate_refs_t dr; + dr = (dispatch_timer_source_aggregate_refs_t)ds->ds_refs; + _dispatch_timers_insert(tidx, dta->dta_kevent_timer, dr, dra_list, + dta->dta_timer, dr, dta_list); +} + +DISPATCH_NOINLINE +static void +_dispatch_timer_aggregates_unregister(dispatch_source_t ds, unsigned int tidx) +{ + dispatch_timer_aggregate_t dta = ds_timer_aggregate(ds); + dispatch_timer_source_aggregate_refs_t dr; + dr = (dispatch_timer_source_aggregate_refs_t)ds->ds_refs; + _dispatch_timers_remove(tidx, (dispatch_timer_aggregate_refs_s*)NULL, + dta->dta_kevent_timer, dr, dra_list, dta->dta_timer, dr, dta_list); + if (!--dta->dta_refcount) { + TAILQ_REMOVE(&_dispatch_timer_aggregates, dta, dta_list); + } +} + +#pragma mark - +#pragma mark dispatch_select + +static int _dispatch_kq; + +static unsigned int _dispatch_select_workaround; +static fd_set _dispatch_rfds; +static fd_set _dispatch_wfds; +static uint64_t*_dispatch_rfd_ptrs; +static uint64_t*_dispatch_wfd_ptrs; + +DISPATCH_NOINLINE +static bool +_dispatch_select_register(struct kevent64_s *kev) +{ + + // Must execute on manager queue + DISPATCH_ASSERT_ON_MANAGER_QUEUE(); + + // If an EINVAL or ENOENT error occurred while adding/enabling a read or + // write kevent, assume it was due to a type of filedescriptor not + // supported by kqueue and fall back to select + switch (kev->filter) { + case EVFILT_READ: + if ((kev->data == EINVAL || kev->data == ENOENT) && + dispatch_assume(kev->ident < FD_SETSIZE)) { + FD_SET((int)kev->ident, &_dispatch_rfds); + if (slowpath(!_dispatch_rfd_ptrs)) { + _dispatch_rfd_ptrs = _dispatch_calloc(FD_SETSIZE, + sizeof(*_dispatch_rfd_ptrs)); + } + if (!_dispatch_rfd_ptrs[kev->ident]) { + _dispatch_rfd_ptrs[kev->ident] = kev->udata; + _dispatch_select_workaround++; + _dispatch_debug("select workaround used to read fd %d: 0x%lx", + (int)kev->ident, (long)kev->data); + } + } + return true; + case EVFILT_WRITE: + if ((kev->data == EINVAL || kev->data == ENOENT) && + dispatch_assume(kev->ident < FD_SETSIZE)) { + FD_SET((int)kev->ident, &_dispatch_wfds); + if (slowpath(!_dispatch_wfd_ptrs)) { + _dispatch_wfd_ptrs = _dispatch_calloc(FD_SETSIZE, + sizeof(*_dispatch_wfd_ptrs)); + } + if (!_dispatch_wfd_ptrs[kev->ident]) { + _dispatch_wfd_ptrs[kev->ident] = kev->udata; + _dispatch_select_workaround++; + _dispatch_debug("select workaround used to write fd %d: 0x%lx", + (int)kev->ident, (long)kev->data); + } + } + return true; + } + return false; +} + +DISPATCH_NOINLINE +static bool +_dispatch_select_unregister(const struct kevent64_s *kev) +{ + // Must execute on manager queue + DISPATCH_ASSERT_ON_MANAGER_QUEUE(); + + switch (kev->filter) { + case EVFILT_READ: + if (_dispatch_rfd_ptrs && kev->ident < FD_SETSIZE && + _dispatch_rfd_ptrs[kev->ident]) { + FD_CLR((int)kev->ident, &_dispatch_rfds); + _dispatch_rfd_ptrs[kev->ident] = 0; + _dispatch_select_workaround--; + return true; + } + break; + case EVFILT_WRITE: + if (_dispatch_wfd_ptrs && kev->ident < FD_SETSIZE && + _dispatch_wfd_ptrs[kev->ident]) { + FD_CLR((int)kev->ident, &_dispatch_wfds); + _dispatch_wfd_ptrs[kev->ident] = 0; + _dispatch_select_workaround--; + return true; + } + break; + } + return false; +} + +DISPATCH_NOINLINE +static bool +_dispatch_mgr_select(bool poll) +{ + static const struct timeval timeout_immediately = { 0, 0 }; + fd_set tmp_rfds, tmp_wfds; + struct kevent64_s kev; + int err, i, r; + bool kevent_avail = false; + + FD_COPY(&_dispatch_rfds, &tmp_rfds); + FD_COPY(&_dispatch_wfds, &tmp_wfds); + + r = select(FD_SETSIZE, &tmp_rfds, &tmp_wfds, NULL, + poll ? (struct timeval*)&timeout_immediately : NULL); + if (slowpath(r == -1)) { + err = errno; + if (err != EBADF) { + if (err != EINTR) { + (void)dispatch_assume_zero(err); + } + return false; + } + for (i = 0; i < FD_SETSIZE; i++) { + if (i == _dispatch_kq) { + continue; + } + if (!FD_ISSET(i, &_dispatch_rfds) && !FD_ISSET(i, &_dispatch_wfds)){ + continue; + } + r = dup(i); + if (dispatch_assume(r != -1)) { + close(r); + } else { + if (_dispatch_rfd_ptrs && _dispatch_rfd_ptrs[i]) { + FD_CLR(i, &_dispatch_rfds); + _dispatch_rfd_ptrs[i] = 0; + _dispatch_select_workaround--; + } + if (_dispatch_wfd_ptrs && _dispatch_wfd_ptrs[i]) { + FD_CLR(i, &_dispatch_wfds); + _dispatch_wfd_ptrs[i] = 0; + _dispatch_select_workaround--; + } + } + } + return false; + } + if (r > 0) { + for (i = 0; i < FD_SETSIZE; i++) { + if (FD_ISSET(i, &tmp_rfds)) { + if (i == _dispatch_kq) { + kevent_avail = true; + continue; + } + FD_CLR(i, &_dispatch_rfds); // emulate EV_DISPATCH + EV_SET64(&kev, i, EVFILT_READ, + EV_ADD|EV_ENABLE|EV_DISPATCH, 0, 1, + _dispatch_rfd_ptrs[i], 0, 0); + _dispatch_kevent_drain(&kev); + } + if (FD_ISSET(i, &tmp_wfds)) { + FD_CLR(i, &_dispatch_wfds); // emulate EV_DISPATCH + EV_SET64(&kev, i, EVFILT_WRITE, + EV_ADD|EV_ENABLE|EV_DISPATCH, 0, 1, + _dispatch_wfd_ptrs[i], 0, 0); + _dispatch_kevent_drain(&kev); + } + } + } + return kevent_avail; +} + +#pragma mark - +#pragma mark dispatch_kqueue + +static void +_dispatch_kq_init(void *context DISPATCH_UNUSED) +{ + static const struct kevent64_s kev = { + .ident = 1, + .filter = EVFILT_USER, + .flags = EV_ADD|EV_CLEAR, + }; + + _dispatch_safe_fork = false; +#if DISPATCH_USE_GUARDED_FD + guardid_t guard = (uintptr_t)&kev; + _dispatch_kq = guarded_kqueue_np(&guard, GUARD_CLOSE | GUARD_DUP); +#else + _dispatch_kq = kqueue(); +#endif + if (_dispatch_kq == -1) { + DISPATCH_CLIENT_CRASH("kqueue() create failed: " + "probably out of file descriptors"); + } else if (dispatch_assume(_dispatch_kq < FD_SETSIZE)) { + // in case we fall back to select() + FD_SET(_dispatch_kq, &_dispatch_rfds); + } + + (void)dispatch_assume_zero(kevent64(_dispatch_kq, &kev, 1, NULL, 0, 0, + NULL)); + _dispatch_queue_push(_dispatch_mgr_q.do_targetq, &_dispatch_mgr_q); +} + +static int +_dispatch_get_kq(void) +{ + static dispatch_once_t pred; + + dispatch_once_f(&pred, NULL, _dispatch_kq_init); + + return _dispatch_kq; +} + +DISPATCH_NOINLINE +static long +_dispatch_kq_update(const struct kevent64_s *kev) +{ + int r; + struct kevent64_s kev_copy; + + if (slowpath(_dispatch_select_workaround) && (kev->flags & EV_DELETE)) { + if (_dispatch_select_unregister(kev)) { + return 0; + } + } + kev_copy = *kev; + // This ensures we don't get a pending kevent back while registering + // a new kevent + kev_copy.flags |= EV_RECEIPT; +retry: + r = dispatch_assume(kevent64(_dispatch_get_kq(), &kev_copy, 1, + &kev_copy, 1, 0, NULL)); + if (slowpath(r == -1)) { + int err = errno; + switch (err) { + case EINTR: + goto retry; + case EBADF: + DISPATCH_CLIENT_CRASH("Do not close random Unix descriptors"); + break; + default: + (void)dispatch_assume_zero(err); + break; + } + return err; + } + switch (kev_copy.data) { + case 0: + return 0; + case EBADF: + case EPERM: + case EINVAL: + case ENOENT: + if ((kev->flags & (EV_ADD|EV_ENABLE)) && !(kev->flags & EV_DELETE)) { + if (_dispatch_select_register(&kev_copy)) { + return 0; + } + } + // fall through + default: + kev_copy.flags |= kev->flags; + _dispatch_kevent_drain(&kev_copy); + break; + } + return (long)kev_copy.data; +} + +#pragma mark - +#pragma mark dispatch_mgr + +static struct kevent64_s *_dispatch_kevent_enable; + +static void inline +_dispatch_mgr_kevent_reenable(struct kevent64_s *ke) +{ + dispatch_assert(!_dispatch_kevent_enable || _dispatch_kevent_enable == ke); + _dispatch_kevent_enable = ke; +} + +unsigned long +_dispatch_mgr_wakeup(dispatch_queue_t dq DISPATCH_UNUSED) +{ + if (_dispatch_queue_get_current() == &_dispatch_mgr_q) { + return false; + } + + static const struct kevent64_s kev = { + .ident = 1, + .filter = EVFILT_USER, + .fflags = NOTE_TRIGGER, + }; + +#if DISPATCH_DEBUG && DISPATCH_MGR_QUEUE_DEBUG + _dispatch_debug("waking up the dispatch manager queue: %p", dq); +#endif + + _dispatch_kq_update(&kev); + + return false; +} + +DISPATCH_NOINLINE +static void +_dispatch_mgr_init(void) +{ + (void)dispatch_atomic_inc2o(&_dispatch_mgr_q, dq_running, relaxed); + _dispatch_thread_setspecific(dispatch_queue_key, &_dispatch_mgr_q); + _dispatch_queue_set_bound_thread(&_dispatch_mgr_q); + _dispatch_mgr_priority_init(); + _dispatch_kevent_init(); + _dispatch_timers_init(); + _dispatch_mach_recv_msg_buf_init(); + _dispatch_memorystatus_init(); +} + +DISPATCH_NOINLINE DISPATCH_NORETURN +static void +_dispatch_mgr_invoke(void) +{ + static const struct timespec timeout_immediately = { 0, 0 }; + struct kevent64_s kev; + bool poll; + int r; + + for (;;) { + _dispatch_mgr_queue_drain(); + poll = _dispatch_mgr_timers(); + if (slowpath(_dispatch_select_workaround)) { + poll = _dispatch_mgr_select(poll); + if (!poll) continue; + } + r = kevent64(_dispatch_kq, _dispatch_kevent_enable, + _dispatch_kevent_enable ? 1 : 0, &kev, 1, 0, + poll ? &timeout_immediately : NULL); + _dispatch_kevent_enable = NULL; + if (slowpath(r == -1)) { + int err = errno; + switch (err) { + case EINTR: + break; + case EBADF: + DISPATCH_CLIENT_CRASH("Do not close random Unix descriptors"); + break; + default: + (void)dispatch_assume_zero(err); + break; + } + } else if (r) { + _dispatch_kevent_drain(&kev); + } + } +} + +DISPATCH_NORETURN +void +_dispatch_mgr_thread(dispatch_queue_t dq DISPATCH_UNUSED) +{ + _dispatch_mgr_init(); + // never returns, so burn bridges behind us & clear stack 2k ahead + _dispatch_clear_stack(2048); + _dispatch_mgr_invoke(); +} + +#pragma mark - +#pragma mark dispatch_memorystatus + +#if DISPATCH_USE_MEMORYSTATUS_SOURCE +#define DISPATCH_MEMORYSTATUS_SOURCE_TYPE DISPATCH_SOURCE_TYPE_MEMORYSTATUS +#define DISPATCH_MEMORYSTATUS_SOURCE_MASK ( \ + DISPATCH_MEMORYSTATUS_PRESSURE_NORMAL | \ + DISPATCH_MEMORYSTATUS_PRESSURE_WARN) +#elif DISPATCH_USE_VM_PRESSURE_SOURCE +#define DISPATCH_MEMORYSTATUS_SOURCE_TYPE DISPATCH_SOURCE_TYPE_VM +#define DISPATCH_MEMORYSTATUS_SOURCE_MASK DISPATCH_VM_PRESSURE +#endif + +#if DISPATCH_USE_MEMORYSTATUS_SOURCE || DISPATCH_USE_VM_PRESSURE_SOURCE +static dispatch_source_t _dispatch_memorystatus_source; + +static void +_dispatch_memorystatus_handler(void *context DISPATCH_UNUSED) +{ +#if DISPATCH_USE_MEMORYSTATUS_SOURCE + unsigned long memorystatus; + memorystatus = dispatch_source_get_data(_dispatch_memorystatus_source); + if (memorystatus & DISPATCH_MEMORYSTATUS_PRESSURE_NORMAL) { + _dispatch_continuation_cache_limit = DISPATCH_CONTINUATION_CACHE_LIMIT; + return; + } + _dispatch_continuation_cache_limit = + DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYSTATUS_PRESSURE_WARN; +#endif + malloc_zone_pressure_relief(0,0); +} + +static void +_dispatch_memorystatus_init(void) +{ + _dispatch_memorystatus_source = dispatch_source_create( + DISPATCH_MEMORYSTATUS_SOURCE_TYPE, 0, + DISPATCH_MEMORYSTATUS_SOURCE_MASK, + _dispatch_get_root_queue(0, true)); + dispatch_source_set_event_handler_f(_dispatch_memorystatus_source, + _dispatch_memorystatus_handler); + dispatch_resume(_dispatch_memorystatus_source); +} +#else +static inline void _dispatch_memorystatus_init(void) {} +#endif // DISPATCH_USE_MEMORYSTATUS_SOURCE || DISPATCH_USE_VM_PRESSURE_SOURCE + +#pragma mark - +#pragma mark dispatch_mach + +#if HAVE_MACH + +#if DISPATCH_DEBUG && DISPATCH_MACHPORT_DEBUG +#define _dispatch_debug_machport(name) \ + dispatch_debug_machport((name), __func__) +#else +#define _dispatch_debug_machport(name) ((void)(name)) +#endif + +// Flags for all notifications that are registered/unregistered when a +// send-possible notification is requested/delivered +#define _DISPATCH_MACH_SP_FLAGS (DISPATCH_MACH_SEND_POSSIBLE| \ + DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_DELETED) +#define _DISPATCH_MACH_RECV_FLAGS (DISPATCH_MACH_RECV_MESSAGE| \ + DISPATCH_MACH_RECV_MESSAGE_DIRECT| \ + DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE) +#define _DISPATCH_MACH_RECV_DIRECT_FLAGS ( \ + DISPATCH_MACH_RECV_MESSAGE_DIRECT| \ + DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE) + +#define _DISPATCH_IS_POWER_OF_TWO(v) (!(v & (v - 1)) && v) +#define _DISPATCH_HASH(x, y) (_DISPATCH_IS_POWER_OF_TWO(y) ? \ + (MACH_PORT_INDEX(x) & ((y) - 1)) : (MACH_PORT_INDEX(x) % (y))) + +#define _DISPATCH_MACHPORT_HASH_SIZE 32 +#define _DISPATCH_MACHPORT_HASH(x) \ + _DISPATCH_HASH((x), _DISPATCH_MACHPORT_HASH_SIZE) + +#ifndef MACH_RCV_LARGE_IDENTITY +#define MACH_RCV_LARGE_IDENTITY 0x00000008 +#endif +#define DISPATCH_MACH_RCV_TRAILER MACH_RCV_TRAILER_CTX +#define DISPATCH_MACH_RCV_OPTIONS ( \ + MACH_RCV_MSG | MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY | \ + MACH_RCV_TRAILER_ELEMENTS(DISPATCH_MACH_RCV_TRAILER) | \ + MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0)) + +#define DISPATCH_MACH_KEVENT_ARMED(dk) ((dk)->dk_kevent.ext[0]) + +static void _dispatch_kevent_machport_drain(struct kevent64_s *ke); +static void _dispatch_kevent_mach_msg_drain(struct kevent64_s *ke); +static void _dispatch_kevent_mach_msg_recv(mach_msg_header_t *hdr); +static void _dispatch_kevent_mach_msg_destroy(mach_msg_header_t *hdr); +static void _dispatch_source_merge_mach_msg(dispatch_source_t ds, + dispatch_source_refs_t dr, dispatch_kevent_t dk, + mach_msg_header_t *hdr, mach_msg_size_t siz); +static kern_return_t _dispatch_mach_notify_update(dispatch_kevent_t dk, + uint32_t new_flags, uint32_t del_flags, uint32_t mask, + mach_msg_id_t notify_msgid, mach_port_mscount_t notify_sync); +static void _dispatch_mach_notify_source_invoke(mach_msg_header_t *hdr); +static void _dispatch_mach_reply_kevent_unregister(dispatch_mach_t dm, + dispatch_mach_reply_refs_t dmr, bool disconnected); +static void _dispatch_mach_msg_recv(dispatch_mach_t dm, mach_msg_header_t *hdr, + mach_msg_size_t siz); +static void _dispatch_mach_merge_kevent(dispatch_mach_t dm, + const struct kevent64_s *ke); +static void _dispatch_mach_kevent_unregister(dispatch_mach_t dm); + +static const size_t _dispatch_mach_recv_msg_size = + DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE; +static const size_t dispatch_mach_trailer_size = + sizeof(dispatch_mach_trailer_t); +static const size_t _dispatch_mach_recv_msg_buf_size = mach_vm_round_page( + _dispatch_mach_recv_msg_size + dispatch_mach_trailer_size); +static mach_port_t _dispatch_mach_portset, _dispatch_mach_recv_portset; +static mach_port_t _dispatch_mach_notify_port; +static struct kevent64_s _dispatch_mach_recv_kevent = { + .filter = EVFILT_MACHPORT, + .flags = EV_ADD|EV_ENABLE|EV_DISPATCH, + .fflags = DISPATCH_MACH_RCV_OPTIONS, +}; +static dispatch_source_t _dispatch_mach_notify_source; +static const +struct dispatch_source_type_s _dispatch_source_type_mach_recv_direct = { + .ke = { + .filter = EVFILT_MACHPORT, + .flags = EV_CLEAR, + .fflags = DISPATCH_MACH_RECV_MESSAGE_DIRECT, + }, +}; + +static void +_dispatch_mach_recv_msg_buf_init(void) +{ + mach_vm_size_t vm_size = _dispatch_mach_recv_msg_buf_size; + mach_vm_address_t vm_addr = vm_page_size; + kern_return_t kr; + + while (slowpath(kr = mach_vm_allocate(mach_task_self(), &vm_addr, vm_size, + VM_FLAGS_ANYWHERE))) { + if (kr != KERN_NO_SPACE) { + (void)dispatch_assume_zero(kr); + DISPATCH_CLIENT_CRASH("Could not allocate mach msg receive buffer"); + } + _dispatch_temporary_resource_shortage(); + vm_addr = vm_page_size; + } + _dispatch_mach_recv_kevent.ext[0] = (uintptr_t)vm_addr; + _dispatch_mach_recv_kevent.ext[1] = _dispatch_mach_recv_msg_buf_size; +} + +static inline void* +_dispatch_get_mach_recv_msg_buf(void) +{ + return (void*)_dispatch_mach_recv_kevent.ext[0]; +} + +static void +_dispatch_mach_recv_portset_init(void *context DISPATCH_UNUSED) +{ + kern_return_t kr; + + kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET, + &_dispatch_mach_recv_portset); + DISPATCH_VERIFY_MIG(kr); + if (dispatch_assume_zero(kr)) { + DISPATCH_CLIENT_CRASH( + "mach_port_allocate() failed: cannot create port set"); + } + dispatch_assert(_dispatch_get_mach_recv_msg_buf()); + dispatch_assert(dispatch_mach_trailer_size == + REQUESTED_TRAILER_SIZE_NATIVE(MACH_RCV_TRAILER_ELEMENTS( + DISPATCH_MACH_RCV_TRAILER))); + _dispatch_mach_recv_kevent.ident = _dispatch_mach_recv_portset; + _dispatch_kq_update(&_dispatch_mach_recv_kevent); + + kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, + &_dispatch_mach_notify_port); + DISPATCH_VERIFY_MIG(kr); + if (dispatch_assume_zero(kr)) { + DISPATCH_CLIENT_CRASH( + "mach_port_allocate() failed: cannot create receive right"); + } + _dispatch_mach_notify_source = dispatch_source_create( + &_dispatch_source_type_mach_recv_direct, + _dispatch_mach_notify_port, 0, &_dispatch_mgr_q); + _dispatch_mach_notify_source->ds_refs->ds_handler_func = + (void*)_dispatch_mach_notify_source_invoke; + dispatch_assert(_dispatch_mach_notify_source); + dispatch_resume(_dispatch_mach_notify_source); +} + +static mach_port_t +_dispatch_get_mach_recv_portset(void) +{ + static dispatch_once_t pred; + dispatch_once_f(&pred, NULL, _dispatch_mach_recv_portset_init); + return _dispatch_mach_recv_portset; +} + +static void +_dispatch_mach_portset_init(void *context DISPATCH_UNUSED) +{ + struct kevent64_s kev = { + .filter = EVFILT_MACHPORT, + .flags = EV_ADD, + }; + kern_return_t kr; + + kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET, + &_dispatch_mach_portset); + DISPATCH_VERIFY_MIG(kr); + if (dispatch_assume_zero(kr)) { + DISPATCH_CLIENT_CRASH( + "mach_port_allocate() failed: cannot create port set"); + } + kev.ident = _dispatch_mach_portset; + _dispatch_kq_update(&kev); +} + +static mach_port_t +_dispatch_get_mach_portset(void) +{ + static dispatch_once_t pred; + dispatch_once_f(&pred, NULL, _dispatch_mach_portset_init); + return _dispatch_mach_portset; +} + +static kern_return_t +_dispatch_mach_portset_update(dispatch_kevent_t dk, mach_port_t mps) +{ + mach_port_t mp = (mach_port_t)dk->dk_kevent.ident; + kern_return_t kr; + + _dispatch_debug_machport(mp); + kr = mach_port_move_member(mach_task_self(), mp, mps); + if (slowpath(kr)) { + DISPATCH_VERIFY_MIG(kr); + switch (kr) { + case KERN_INVALID_RIGHT: + if (mps) { + _dispatch_bug_mach_client("_dispatch_kevent_machport_enable: " + "mach_port_move_member() failed ", kr); + break; + } + //fall through + case KERN_INVALID_NAME: +#if DISPATCH_DEBUG + _dispatch_log("Corruption: Mach receive right 0x%x destroyed " + "prematurely", mp); +#endif + break; + default: + (void)dispatch_assume_zero(kr); + break; + } + } + return mps ? kr : 0; +} + +static void +_dispatch_kevent_mach_recv_reenable(struct kevent64_s *ke DISPATCH_UNUSED) +{ +#if (TARGET_IPHONE_SIMULATOR && \ + IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090) || \ + (!TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED < 1090) + // delete and re-add kevent to workaround + if (ke->ext[1] != _dispatch_mach_recv_kevent.ext[1]) { + struct kevent64_s kev = _dispatch_mach_recv_kevent; + kev.flags = EV_DELETE; + _dispatch_kq_update(&kev); + } +#endif + _dispatch_mgr_kevent_reenable(&_dispatch_mach_recv_kevent); +} + +static kern_return_t +_dispatch_kevent_machport_resume(dispatch_kevent_t dk, uint32_t new_flags, + uint32_t del_flags) +{ + kern_return_t kr = 0; + dispatch_assert_zero(new_flags & del_flags); + if ((new_flags & _DISPATCH_MACH_RECV_FLAGS) || + (del_flags & _DISPATCH_MACH_RECV_FLAGS)) { + mach_port_t mps; + if (new_flags & _DISPATCH_MACH_RECV_DIRECT_FLAGS) { + mps = _dispatch_get_mach_recv_portset(); + } else if ((new_flags & DISPATCH_MACH_RECV_MESSAGE) || + ((del_flags & _DISPATCH_MACH_RECV_DIRECT_FLAGS) && + (dk->dk_kevent.fflags & DISPATCH_MACH_RECV_MESSAGE))) { + mps = _dispatch_get_mach_portset(); + } else { + mps = MACH_PORT_NULL; + } + kr = _dispatch_mach_portset_update(dk, mps); + } + return kr; +} + +static kern_return_t +_dispatch_kevent_mach_notify_resume(dispatch_kevent_t dk, uint32_t new_flags, + uint32_t del_flags) +{ + kern_return_t kr = 0; + dispatch_assert_zero(new_flags & del_flags); + if ((new_flags & _DISPATCH_MACH_SP_FLAGS) || (del_flags & _DISPATCH_MACH_SP_FLAGS)) { // Requesting a (delayed) non-sync send-possible notification // registers for both immediate dead-name notification and delayed-arm @@ -1405,234 +2585,1430 @@ _dispatch_kevent_machport_resume(dispatch_kevent_t dk, uint32_t new_flags, // the MACH_SEND_NOTIFY to the port times out. // If send-possible is unavailable, fall back to immediate dead-name // registration rdar://problem/2527840&9008724 - kr_sp = _dispatch_mach_notify_update(dk, new_flags, del_flags, + kr = _dispatch_mach_notify_update(dk, new_flags, del_flags, _DISPATCH_MACH_SP_FLAGS, MACH_NOTIFY_SEND_POSSIBLE, MACH_NOTIFY_SEND_POSSIBLE == MACH_NOTIFY_DEAD_NAME ? 1 : 0); } + return kr; +} + +static inline void +_dispatch_kevent_mach_portset(struct kevent64_s *ke) +{ + if (ke->ident == _dispatch_mach_recv_portset) { + return _dispatch_kevent_mach_msg_drain(ke); + } else if (ke->ident == _dispatch_mach_portset) { + return _dispatch_kevent_machport_drain(ke); + } else { + return _dispatch_kevent_error(ke); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_kevent_machport_drain(struct kevent64_s *ke) +{ + mach_port_t name = (mach_port_name_t)ke->data; + dispatch_kevent_t dk; + struct kevent64_s kev; + + _dispatch_debug_machport(name); + dk = _dispatch_kevent_find(name, EVFILT_MACHPORT); + if (!dispatch_assume(dk)) { + return; + } + _dispatch_mach_portset_update(dk, MACH_PORT_NULL); // emulate EV_DISPATCH + + EV_SET64(&kev, name, EVFILT_MACHPORT, EV_ADD|EV_ENABLE|EV_DISPATCH, + DISPATCH_MACH_RECV_MESSAGE, 0, (uintptr_t)dk, 0, 0); + _dispatch_kevent_debug(&kev, __func__); + _dispatch_kevent_merge(&kev); +} + +DISPATCH_NOINLINE +static void +_dispatch_kevent_mach_msg_drain(struct kevent64_s *ke) +{ + mach_msg_header_t *hdr = (mach_msg_header_t*)ke->ext[0]; + mach_msg_size_t siz, msgsiz; + mach_msg_return_t kr = (mach_msg_return_t)ke->fflags; + + _dispatch_kevent_mach_recv_reenable(ke); + if (!dispatch_assume(hdr)) { + DISPATCH_CRASH("EVFILT_MACHPORT with no message"); + } + if (fastpath(!kr)) { + return _dispatch_kevent_mach_msg_recv(hdr); + } else if (kr != MACH_RCV_TOO_LARGE) { + goto out; + } + if (!dispatch_assume(ke->ext[1] <= UINT_MAX - + dispatch_mach_trailer_size)) { + DISPATCH_CRASH("EVFILT_MACHPORT with overlarge message"); + } + siz = (mach_msg_size_t)ke->ext[1] + dispatch_mach_trailer_size; + hdr = malloc(siz); + if (ke->data) { + if (!dispatch_assume(hdr)) { + // Kernel will discard message too large to fit + hdr = _dispatch_get_mach_recv_msg_buf(); + siz = _dispatch_mach_recv_msg_buf_size; + } + mach_port_t name = (mach_port_name_t)ke->data; + const mach_msg_option_t options = ((DISPATCH_MACH_RCV_OPTIONS | + MACH_RCV_TIMEOUT) & ~MACH_RCV_LARGE); + kr = mach_msg(hdr, options, 0, siz, name, MACH_MSG_TIMEOUT_NONE, + MACH_PORT_NULL); + if (fastpath(!kr)) { + return _dispatch_kevent_mach_msg_recv(hdr); + } else if (kr == MACH_RCV_TOO_LARGE) { + _dispatch_log("BUG in libdispatch client: " + "_dispatch_kevent_mach_msg_drain: dropped message too " + "large to fit in memory: id = 0x%x, size = %lld", + hdr->msgh_id, ke->ext[1]); + kr = MACH_MSG_SUCCESS; + } + } else { + // We don't know which port in the portset contains the large message, + // so need to receive all messages pending on the portset to ensure the + // large message is drained. + bool received = false; + for (;;) { + if (!dispatch_assume(hdr)) { + DISPATCH_CLIENT_CRASH("Message too large to fit in memory"); + } + const mach_msg_option_t options = (DISPATCH_MACH_RCV_OPTIONS | + MACH_RCV_TIMEOUT); + kr = mach_msg(hdr, options, 0, siz, _dispatch_mach_recv_portset, + MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); + if ((!kr || kr == MACH_RCV_TOO_LARGE) && !dispatch_assume( + hdr->msgh_size <= UINT_MAX - dispatch_mach_trailer_size)) { + DISPATCH_CRASH("Overlarge message"); + } + if (fastpath(!kr)) { + msgsiz = hdr->msgh_size + dispatch_mach_trailer_size; + if (msgsiz < siz) { + void *shrink = realloc(hdr, msgsiz); + if (shrink) hdr = shrink; + } + _dispatch_kevent_mach_msg_recv(hdr); + hdr = NULL; + received = true; + } else if (kr == MACH_RCV_TOO_LARGE) { + siz = hdr->msgh_size + dispatch_mach_trailer_size; + } else { + if (kr == MACH_RCV_TIMED_OUT && received) { + kr = MACH_MSG_SUCCESS; + } + break; + } + hdr = reallocf(hdr, siz); + } + } + if (hdr != _dispatch_get_mach_recv_msg_buf()) { + free(hdr); + } +out: + if (slowpath(kr)) { + _dispatch_bug_mach_client("_dispatch_kevent_mach_msg_drain: " + "message reception failed", kr); + } +} + +static void +_dispatch_kevent_mach_msg_recv(mach_msg_header_t *hdr) +{ + dispatch_source_refs_t dri; + dispatch_kevent_t dk; + mach_port_t name = hdr->msgh_local_port; + mach_msg_size_t siz = hdr->msgh_size + dispatch_mach_trailer_size; + + if (!dispatch_assume(hdr->msgh_size <= UINT_MAX - + dispatch_mach_trailer_size)) { + _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: " + "received overlarge message"); + return _dispatch_kevent_mach_msg_destroy(hdr); + } + if (!dispatch_assume(name)) { + _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: " + "received message with MACH_PORT_NULL port"); + return _dispatch_kevent_mach_msg_destroy(hdr); + } + _dispatch_debug_machport(name); + dk = _dispatch_kevent_find(name, EVFILT_MACHPORT); + if (!dispatch_assume(dk)) { + _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: " + "received message with unknown kevent"); + return _dispatch_kevent_mach_msg_destroy(hdr); + } + _dispatch_kevent_debug(&dk->dk_kevent, __func__); + TAILQ_FOREACH(dri, &dk->dk_sources, dr_list) { + dispatch_source_t dsi = _dispatch_source_from_refs(dri); + if (dsi->ds_pending_data_mask & _DISPATCH_MACH_RECV_DIRECT_FLAGS) { + return _dispatch_source_merge_mach_msg(dsi, dri, dk, hdr, siz); + } + } + _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: " + "received message with no listeners"); + return _dispatch_kevent_mach_msg_destroy(hdr); +} + +static void +_dispatch_kevent_mach_msg_destroy(mach_msg_header_t *hdr) +{ + if (hdr) { + mach_msg_destroy(hdr); + if (hdr != _dispatch_get_mach_recv_msg_buf()) { + free(hdr); + } + } +} + +static void +_dispatch_source_merge_mach_msg(dispatch_source_t ds, dispatch_source_refs_t dr, + dispatch_kevent_t dk, mach_msg_header_t *hdr, mach_msg_size_t siz) +{ + if (ds == _dispatch_mach_notify_source) { + _dispatch_mach_notify_source_invoke(hdr); + return _dispatch_kevent_mach_msg_destroy(hdr); + } + if (dk->dk_kevent.fflags & DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE) { + _dispatch_mach_reply_kevent_unregister((dispatch_mach_t)ds, + (dispatch_mach_reply_refs_t)dr, false); + } + return _dispatch_mach_msg_recv((dispatch_mach_t)ds, hdr, siz); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_mach_notify_merge(mach_port_t name, uint32_t flag, bool final) +{ + dispatch_source_refs_t dri, dr_next; + dispatch_kevent_t dk; + struct kevent64_s kev; + bool unreg; + + dk = _dispatch_kevent_find(name, DISPATCH_EVFILT_MACH_NOTIFICATION); + if (!dk) { + return; + } + + // Update notification registration state. + dk->dk_kevent.data &= ~_DISPATCH_MACH_SP_FLAGS; + EV_SET64(&kev, name, DISPATCH_EVFILT_MACH_NOTIFICATION, EV_ADD|EV_ENABLE, + flag, 0, (uintptr_t)dk, 0, 0); + if (final) { + // This can never happen again + unreg = true; + } else { + // Re-register for notification before delivery + unreg = _dispatch_kevent_resume(dk, flag, 0); + } + DISPATCH_MACH_KEVENT_ARMED(dk) = 0; + TAILQ_FOREACH_SAFE(dri, &dk->dk_sources, dr_list, dr_next) { + dispatch_source_t dsi = _dispatch_source_from_refs(dri); + if (dx_type(dsi) == DISPATCH_MACH_CHANNEL_TYPE) { + dispatch_mach_t dm = (dispatch_mach_t)dsi; + _dispatch_mach_merge_kevent(dm, &kev); + if (unreg && dm->dm_dkev) { + _dispatch_mach_kevent_unregister(dm); + } + } else { + _dispatch_source_merge_kevent(dsi, &kev); + if (unreg) { + _dispatch_source_kevent_unregister(dsi); + } + } + if (!dr_next || DISPATCH_MACH_KEVENT_ARMED(dk)) { + // current merge is last in list (dk might have been freed) + // or it re-armed the notification + return; + } + } +} + +static kern_return_t +_dispatch_mach_notify_update(dispatch_kevent_t dk, uint32_t new_flags, + uint32_t del_flags, uint32_t mask, mach_msg_id_t notify_msgid, + mach_port_mscount_t notify_sync) +{ + mach_port_t previous, port = (mach_port_t)dk->dk_kevent.ident; + typeof(dk->dk_kevent.data) prev = dk->dk_kevent.data; + kern_return_t kr, krr = 0; + + // Update notification registration state. + dk->dk_kevent.data |= (new_flags | dk->dk_kevent.fflags) & mask; + dk->dk_kevent.data &= ~(del_flags & mask); + + _dispatch_debug_machport(port); + if ((dk->dk_kevent.data & mask) && !(prev & mask)) { + // initialize _dispatch_mach_notify_port: + (void)_dispatch_get_mach_recv_portset(); + _dispatch_debug("machport[0x%08x]: registering for send-possible " + "notification", port); + previous = MACH_PORT_NULL; + krr = mach_port_request_notification(mach_task_self(), port, + notify_msgid, notify_sync, _dispatch_mach_notify_port, + MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous); + DISPATCH_VERIFY_MIG(krr); + + switch(krr) { + case KERN_INVALID_NAME: + case KERN_INVALID_RIGHT: + // Supress errors & clear registration state + dk->dk_kevent.data &= ~mask; + break; + default: + // Else, we dont expect any errors from mach. Log any errors + if (dispatch_assume_zero(krr)) { + // log the error & clear registration state + dk->dk_kevent.data &= ~mask; + } else if (dispatch_assume_zero(previous)) { + // Another subsystem has beat libdispatch to requesting the + // specified Mach notification on this port. We should + // technically cache the previous port and message it when the + // kernel messages our port. Or we can just say screw those + // subsystems and deallocate the previous port. + // They should adopt libdispatch :-P + kr = mach_port_deallocate(mach_task_self(), previous); + DISPATCH_VERIFY_MIG(kr); + (void)dispatch_assume_zero(kr); + previous = MACH_PORT_NULL; + } + } + } else if (!(dk->dk_kevent.data & mask) && (prev & mask)) { + _dispatch_debug("machport[0x%08x]: unregistering for send-possible " + "notification", port); + previous = MACH_PORT_NULL; + kr = mach_port_request_notification(mach_task_self(), port, + notify_msgid, notify_sync, MACH_PORT_NULL, + MACH_MSG_TYPE_MOVE_SEND_ONCE, &previous); + DISPATCH_VERIFY_MIG(kr); + + switch (kr) { + case KERN_INVALID_NAME: + case KERN_INVALID_RIGHT: + case KERN_INVALID_ARGUMENT: + break; + default: + if (dispatch_assume_zero(kr)) { + // log the error + } + } + } else { + return 0; + } + if (slowpath(previous)) { + // the kernel has not consumed the send-once right yet + (void)dispatch_assume_zero( + _dispatch_send_consume_send_once_right(previous)); + } + return krr; +} + +static void +_dispatch_mach_host_notify_update(void *context DISPATCH_UNUSED) +{ + (void)_dispatch_get_mach_recv_portset(); + _dispatch_debug("registering for calendar-change notification"); + kern_return_t kr = host_request_notification(mach_host_self(), + HOST_NOTIFY_CALENDAR_CHANGE, _dispatch_mach_notify_port); + DISPATCH_VERIFY_MIG(kr); + (void)dispatch_assume_zero(kr); +} + +static void +_dispatch_mach_host_calendar_change_register(void) +{ + static dispatch_once_t pred; + dispatch_once_f(&pred, NULL, _dispatch_mach_host_notify_update); +} + +static void +_dispatch_mach_notify_source_invoke(mach_msg_header_t *hdr) +{ + mig_reply_error_t reply; + dispatch_assert(sizeof(mig_reply_error_t) == sizeof(union + __ReplyUnion___dispatch_libdispatch_internal_protocol_subsystem)); + dispatch_assert(sizeof(mig_reply_error_t) < _dispatch_mach_recv_msg_size); + boolean_t success = libdispatch_internal_protocol_server(hdr, &reply.Head); + if (!success && reply.RetCode == MIG_BAD_ID && hdr->msgh_id == 950) { + // host_notify_reply.defs: host_calendar_changed + _dispatch_debug("calendar-change notification"); + _dispatch_timers_calendar_change(); + _dispatch_mach_host_notify_update(NULL); + success = TRUE; + reply.RetCode = KERN_SUCCESS; + } + if (dispatch_assume(success) && reply.RetCode != MIG_NO_REPLY) { + (void)dispatch_assume_zero(reply.RetCode); + } +} + +kern_return_t +_dispatch_mach_notify_port_deleted(mach_port_t notify DISPATCH_UNUSED, + mach_port_name_t name) +{ +#if DISPATCH_DEBUG + _dispatch_log("Corruption: Mach send/send-once/dead-name right 0x%x " + "deleted prematurely", name); +#endif + + _dispatch_debug_machport(name); + _dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_DELETED, true); + + return KERN_SUCCESS; +} + +kern_return_t +_dispatch_mach_notify_dead_name(mach_port_t notify DISPATCH_UNUSED, + mach_port_name_t name) +{ + kern_return_t kr; + + _dispatch_debug("machport[0x%08x]: dead-name notification", name); + _dispatch_debug_machport(name); + _dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_DEAD, true); + + // the act of receiving a dead name notification allocates a dead-name + // right that must be deallocated + kr = mach_port_deallocate(mach_task_self(), name); + DISPATCH_VERIFY_MIG(kr); + //(void)dispatch_assume_zero(kr); + + return KERN_SUCCESS; +} + +kern_return_t +_dispatch_mach_notify_send_possible(mach_port_t notify DISPATCH_UNUSED, + mach_port_name_t name) +{ + _dispatch_debug("machport[0x%08x]: send-possible notification", name); + _dispatch_debug_machport(name); + _dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_POSSIBLE, false); + + return KERN_SUCCESS; +} + +#pragma mark - +#pragma mark dispatch_mach_t + +#define DISPATCH_MACH_NEVER_CONNECTED (UINT32_MAX/2) +#define DISPATCH_MACH_PSEUDO_RECEIVED 0x1 +#define DISPATCH_MACH_REGISTER_FOR_REPLY 0x2 +#define DISPATCH_MACH_OPTIONS_MASK 0xffff + +static mach_port_t _dispatch_mach_msg_get_remote_port(dispatch_object_t dou); +static void _dispatch_mach_msg_disconnected(dispatch_mach_t dm, + mach_port_t local_port, mach_port_t remote_port); +static bool _dispatch_mach_reconnect_invoke(dispatch_mach_t dm, + dispatch_object_t dou); +static inline mach_msg_header_t* _dispatch_mach_msg_get_msg( + dispatch_mach_msg_t dmsg); + +static dispatch_mach_t +_dispatch_mach_create(const char *label, dispatch_queue_t q, void *context, + dispatch_mach_handler_function_t handler, bool handler_is_block) +{ + dispatch_mach_t dm; + dispatch_mach_refs_t dr; + + dm = _dispatch_alloc(DISPATCH_VTABLE(mach), + sizeof(struct dispatch_mach_s)); + _dispatch_queue_init((dispatch_queue_t)dm); + dm->dq_label = label; + + dm->do_ref_cnt++; // the reference _dispatch_mach_cancel_invoke holds + dm->do_ref_cnt++; // since channel is created suspended + dm->do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_INTERVAL; + dm->do_targetq = &_dispatch_mgr_q; + + dr = _dispatch_calloc(1ul, sizeof(struct dispatch_mach_refs_s)); + dr->dr_source_wref = _dispatch_ptr2wref(dm); + dr->dm_handler_func = handler; + dr->dm_handler_ctxt = context; + dm->ds_refs = dr; + dm->ds_handler_is_block = handler_is_block; + + dm->dm_refs = _dispatch_calloc(1ul, + sizeof(struct dispatch_mach_send_refs_s)); + dm->dm_refs->dr_source_wref = _dispatch_ptr2wref(dm); + dm->dm_refs->dm_disconnect_cnt = DISPATCH_MACH_NEVER_CONNECTED; + TAILQ_INIT(&dm->dm_refs->dm_replies); + + // First item on the channel sets the user-specified target queue + dispatch_set_target_queue(dm, q); + _dispatch_object_debug(dm, "%s", __func__); + return dm; +} + +dispatch_mach_t +dispatch_mach_create(const char *label, dispatch_queue_t q, + dispatch_mach_handler_t handler) +{ + dispatch_block_t bb = _dispatch_Block_copy((void*)handler); + return _dispatch_mach_create(label, q, bb, + (dispatch_mach_handler_function_t)_dispatch_Block_invoke(bb), true); +} + +dispatch_mach_t +dispatch_mach_create_f(const char *label, dispatch_queue_t q, void *context, + dispatch_mach_handler_function_t handler) +{ + return _dispatch_mach_create(label, q, context, handler, false); +} + +void +_dispatch_mach_dispose(dispatch_mach_t dm) +{ + _dispatch_object_debug(dm, "%s", __func__); + dispatch_mach_refs_t dr = dm->ds_refs; + if (dm->ds_handler_is_block && dr->dm_handler_ctxt) { + Block_release(dr->dm_handler_ctxt); + } + free(dr); + free(dm->dm_refs); + _dispatch_queue_destroy(dm); +} + +void +dispatch_mach_connect(dispatch_mach_t dm, mach_port_t receive, + mach_port_t send, dispatch_mach_msg_t checkin) +{ + dispatch_mach_send_refs_t dr = dm->dm_refs; + dispatch_kevent_t dk; + + if (MACH_PORT_VALID(receive)) { + dk = _dispatch_calloc(1ul, sizeof(struct dispatch_kevent_s)); + dk->dk_kevent = _dispatch_source_type_mach_recv_direct.ke; + dk->dk_kevent.ident = receive; + dk->dk_kevent.flags |= EV_ADD|EV_ENABLE; + dk->dk_kevent.udata = (uintptr_t)dk; + TAILQ_INIT(&dk->dk_sources); + dm->ds_dkev = dk; + dm->ds_pending_data_mask = dk->dk_kevent.fflags; + _dispatch_retain(dm); // the reference the manager queue holds + } + dr->dm_send = send; + if (MACH_PORT_VALID(send)) { + if (checkin) { + dispatch_retain(checkin); + dr->dm_checkin_port = _dispatch_mach_msg_get_remote_port(checkin); + } + dr->dm_checkin = checkin; + } + // monitor message reply ports + dm->ds_pending_data_mask |= DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE; + if (slowpath(!dispatch_atomic_cmpxchg2o(dr, dm_disconnect_cnt, + DISPATCH_MACH_NEVER_CONNECTED, 0, release))) { + DISPATCH_CLIENT_CRASH("Channel already connected"); + } + _dispatch_object_debug(dm, "%s", __func__); + return dispatch_resume(dm); +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_reply_kevent_unregister(dispatch_mach_t dm, + dispatch_mach_reply_refs_t dmr, bool disconnected) +{ + dispatch_kevent_t dk = dmr->dm_dkev; + mach_port_t local_port = (mach_port_t)dk->dk_kevent.ident; + TAILQ_REMOVE(&dk->dk_sources, (dispatch_source_refs_t)dmr, dr_list); + _dispatch_kevent_unregister(dk, DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE); + TAILQ_REMOVE(&dm->dm_refs->dm_replies, dmr, dm_list); + free(dmr); + if (disconnected) { + _dispatch_mach_msg_disconnected(dm, local_port, MACH_PORT_NULL); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_reply_kevent_register(dispatch_mach_t dm, mach_port_t reply, + void *ctxt) +{ + dispatch_kevent_t dk; + dispatch_mach_reply_refs_t dmr; + + dk = _dispatch_calloc(1ul, sizeof(struct dispatch_kevent_s)); + dk->dk_kevent = _dispatch_source_type_mach_recv_direct.ke; + dk->dk_kevent.ident = reply; + dk->dk_kevent.flags |= EV_ADD|EV_ENABLE; + dk->dk_kevent.fflags = DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE; + dk->dk_kevent.udata = (uintptr_t)dk; + // make reply context visible to leaks rdar://11777199 + dk->dk_kevent.ext[1] = (uintptr_t)ctxt; + TAILQ_INIT(&dk->dk_sources); + + dmr = _dispatch_calloc(1ul, sizeof(struct dispatch_mach_reply_refs_s)); + dmr->dr_source_wref = _dispatch_ptr2wref(dm); + dmr->dm_dkev = dk; + + _dispatch_debug("machport[0x%08x]: registering for reply, ctxt %p", reply, + ctxt); + uint32_t flags; + bool do_resume = _dispatch_kevent_register(&dmr->dm_dkev, &flags); + TAILQ_INSERT_TAIL(&dmr->dm_dkev->dk_sources, (dispatch_source_refs_t)dmr, + dr_list); + TAILQ_INSERT_TAIL(&dm->dm_refs->dm_replies, dmr, dm_list); + if (do_resume && _dispatch_kevent_resume(dmr->dm_dkev, flags, 0)) { + _dispatch_mach_reply_kevent_unregister(dm, dmr, true); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_kevent_unregister(dispatch_mach_t dm) +{ + dispatch_kevent_t dk = dm->dm_dkev; + dm->dm_dkev = NULL; + TAILQ_REMOVE(&dk->dk_sources, (dispatch_source_refs_t)dm->dm_refs, + dr_list); + dm->ds_pending_data_mask &= ~(unsigned long) + (DISPATCH_MACH_SEND_POSSIBLE|DISPATCH_MACH_SEND_DEAD); + _dispatch_kevent_unregister(dk, + DISPATCH_MACH_SEND_POSSIBLE|DISPATCH_MACH_SEND_DEAD); +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_kevent_register(dispatch_mach_t dm, mach_port_t send) +{ + dispatch_kevent_t dk; + + dk = _dispatch_calloc(1ul, sizeof(struct dispatch_kevent_s)); + dk->dk_kevent = _dispatch_source_type_mach_send.ke; + dk->dk_kevent.ident = send; + dk->dk_kevent.flags |= EV_ADD|EV_ENABLE; + dk->dk_kevent.fflags = DISPATCH_MACH_SEND_POSSIBLE|DISPATCH_MACH_SEND_DEAD; + dk->dk_kevent.udata = (uintptr_t)dk; + TAILQ_INIT(&dk->dk_sources); + + dm->ds_pending_data_mask |= dk->dk_kevent.fflags; + + uint32_t flags; + bool do_resume = _dispatch_kevent_register(&dk, &flags); + TAILQ_INSERT_TAIL(&dk->dk_sources, + (dispatch_source_refs_t)dm->dm_refs, dr_list); + dm->dm_dkev = dk; + if (do_resume && _dispatch_kevent_resume(dm->dm_dkev, flags, 0)) { + _dispatch_mach_kevent_unregister(dm); + } +} + +static inline void +_dispatch_mach_push(dispatch_object_t dm, dispatch_object_t dou) +{ + return _dispatch_queue_push(dm._dq, dou); +} + +static inline void +_dispatch_mach_msg_set_options(dispatch_object_t dou, mach_msg_option_t options) +{ + dou._do->do_suspend_cnt = (unsigned int)options; +} + +static inline mach_msg_option_t +_dispatch_mach_msg_get_options(dispatch_object_t dou) +{ + mach_msg_option_t options = (mach_msg_option_t)dou._do->do_suspend_cnt; + return options; +} + +static inline void +_dispatch_mach_msg_set_reason(dispatch_object_t dou, mach_error_t err, + unsigned long reason) +{ + dispatch_assert_zero(reason & ~(unsigned long)code_emask); + dou._do->do_suspend_cnt = (unsigned int)((err || !reason) ? err : + err_local|err_sub(0x3e0)|(mach_error_t)reason); +} + +static inline unsigned long +_dispatch_mach_msg_get_reason(dispatch_object_t dou, mach_error_t *err_ptr) +{ + mach_error_t err = (mach_error_t)dou._do->do_suspend_cnt; + dou._do->do_suspend_cnt = 0; + if ((err & system_emask) == err_local && err_get_sub(err) == 0x3e0) { + *err_ptr = 0; + return err_get_code(err); + } + *err_ptr = err; + return err ? DISPATCH_MACH_MESSAGE_SEND_FAILED : DISPATCH_MACH_MESSAGE_SENT; +} + +static void +_dispatch_mach_msg_recv(dispatch_mach_t dm, mach_msg_header_t *hdr, + mach_msg_size_t siz) +{ + _dispatch_debug_machport(hdr->msgh_remote_port); + _dispatch_debug("machport[0x%08x]: received msg id 0x%x, reply on 0x%08x", + hdr->msgh_local_port, hdr->msgh_id, hdr->msgh_remote_port); + if (slowpath(dm->ds_atomic_flags & DSF_CANCELED)) { + return _dispatch_kevent_mach_msg_destroy(hdr); + } + dispatch_mach_msg_t dmsg; + dispatch_mach_msg_destructor_t destructor; + destructor = (hdr == _dispatch_get_mach_recv_msg_buf()) ? + DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT : + DISPATCH_MACH_MSG_DESTRUCTOR_FREE; + dmsg = dispatch_mach_msg_create(hdr, siz, destructor, NULL); + _dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_MESSAGE_RECEIVED); + return _dispatch_mach_push(dm, dmsg); +} + +static inline mach_port_t +_dispatch_mach_msg_get_remote_port(dispatch_object_t dou) +{ + mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dou._dmsg); + mach_port_t remote = hdr->msgh_remote_port; + return remote; +} + +static inline mach_port_t +_dispatch_mach_msg_get_reply_port(dispatch_mach_t dm, dispatch_object_t dou) +{ + mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dou._dmsg); + mach_port_t reply = MACH_PORT_NULL; + mach_msg_option_t msg_opts = _dispatch_mach_msg_get_options(dou); + if (msg_opts & DISPATCH_MACH_PSEUDO_RECEIVED) { + reply = hdr->msgh_reserved; + hdr->msgh_reserved = 0; + } else if (MACH_MSGH_BITS_LOCAL(hdr->msgh_bits) == + MACH_MSG_TYPE_MAKE_SEND_ONCE && + MACH_PORT_VALID(hdr->msgh_local_port) && (!dm->ds_dkev || + dm->ds_dkev->dk_kevent.ident != hdr->msgh_local_port)) { + reply = hdr->msgh_local_port; + } + return reply; +} + +static inline void +_dispatch_mach_msg_disconnected(dispatch_mach_t dm, mach_port_t local_port, + mach_port_t remote_port) +{ + mach_msg_header_t *hdr; + dispatch_mach_msg_t dmsg; + dmsg = dispatch_mach_msg_create(NULL, sizeof(mach_msg_header_t), + DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT, &hdr); + if (local_port) hdr->msgh_local_port = local_port; + if (remote_port) hdr->msgh_remote_port = remote_port; + _dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_DISCONNECTED); + return _dispatch_mach_push(dm, dmsg); +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_msg_not_sent(dispatch_mach_t dm, dispatch_object_t dou) +{ + mach_port_t reply = _dispatch_mach_msg_get_reply_port(dm, dou); + _dispatch_mach_msg_set_reason(dou, 0, DISPATCH_MACH_MESSAGE_NOT_SENT); + _dispatch_mach_push(dm, dou); + if (reply) { + _dispatch_mach_msg_disconnected(dm, reply, MACH_PORT_NULL); + } +} + +DISPATCH_NOINLINE +static dispatch_object_t +_dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou) +{ + dispatch_mach_send_refs_t dr = dm->dm_refs; + dispatch_mach_msg_t dmsg = dou._dmsg; + dr->dm_needs_mgr = 0; + if (slowpath(dr->dm_checkin) && dmsg != dr->dm_checkin) { + // send initial checkin message + if (dm->dm_dkev && slowpath(_dispatch_queue_get_current() != + &_dispatch_mgr_q)) { + // send kevent must be uninstalled on the manager queue + dr->dm_needs_mgr = 1; + goto out; + } + dr->dm_checkin = _dispatch_mach_msg_send(dm, dr->dm_checkin)._dmsg; + if (slowpath(dr->dm_checkin)) { + goto out; + } + } + mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg); + mach_msg_return_t kr = 0; + mach_port_t reply = _dispatch_mach_msg_get_reply_port(dm, dmsg); + mach_msg_option_t opts = 0, msg_opts = _dispatch_mach_msg_get_options(dmsg); + if (!slowpath(msg_opts & DISPATCH_MACH_REGISTER_FOR_REPLY)) { + opts = MACH_SEND_MSG | (msg_opts & DISPATCH_MACH_OPTIONS_MASK); + if (MACH_MSGH_BITS_REMOTE(msg->msgh_bits) != + MACH_MSG_TYPE_MOVE_SEND_ONCE) { + if (dmsg != dr->dm_checkin) { + msg->msgh_remote_port = dr->dm_send; + } + if (_dispatch_queue_get_current() == &_dispatch_mgr_q) { + if (slowpath(!dm->dm_dkev)) { + _dispatch_mach_kevent_register(dm, msg->msgh_remote_port); + } + if (fastpath(dm->dm_dkev)) { + if (DISPATCH_MACH_KEVENT_ARMED(dm->dm_dkev)) { + goto out; + } + opts |= MACH_SEND_NOTIFY; + } + } + opts |= MACH_SEND_TIMEOUT; + } + _dispatch_debug_machport(msg->msgh_remote_port); + if (reply) _dispatch_debug_machport(reply); + kr = mach_msg(msg, opts, msg->msgh_size, 0, MACH_PORT_NULL, 0, + MACH_PORT_NULL); + } + _dispatch_debug("machport[0x%08x]: sent msg id 0x%x, ctxt %p, opts 0x%x, " + "msg_opts 0x%x, reply on 0x%08x: %s - 0x%x", msg->msgh_remote_port, + msg->msgh_id, dmsg->do_ctxt, opts, msg_opts, reply, + mach_error_string(kr), kr); + if (kr == MACH_SEND_TIMED_OUT && (opts & MACH_SEND_TIMEOUT)) { + if (opts & MACH_SEND_NOTIFY) { + _dispatch_debug("machport[0x%08x]: send-possible notification " + "armed", (mach_port_t)dm->dm_dkev->dk_kevent.ident); + DISPATCH_MACH_KEVENT_ARMED(dm->dm_dkev) = 1; + } else { + // send kevent must be installed on the manager queue + dr->dm_needs_mgr = 1; + } + if (reply) { + _dispatch_mach_msg_set_options(dmsg, msg_opts | + DISPATCH_MACH_PSEUDO_RECEIVED); + msg->msgh_reserved = reply; // Remember the original reply port + } + goto out; + } + if (fastpath(!kr) && reply) { + if (_dispatch_queue_get_current() != &_dispatch_mgr_q) { + // reply receive kevent must be installed on the manager queue + dr->dm_needs_mgr = 1; + _dispatch_mach_msg_set_options(dmsg, msg_opts | + DISPATCH_MACH_REGISTER_FOR_REPLY); + if (msg_opts & DISPATCH_MACH_PSEUDO_RECEIVED) { + msg->msgh_reserved = reply; // Remember the original reply port + } + goto out; + } + _dispatch_mach_reply_kevent_register(dm, reply, dmsg->do_ctxt); + } + if (slowpath(dmsg == dr->dm_checkin) && dm->dm_dkev) { + _dispatch_mach_kevent_unregister(dm); + } + _dispatch_mach_msg_set_reason(dmsg, kr, 0); + _dispatch_mach_push(dm, dmsg); + dmsg = NULL; + if (slowpath(kr) && reply) { + // Send failed, so reply was never connected + _dispatch_mach_msg_disconnected(dm, reply, MACH_PORT_NULL); + } +out: + return (dispatch_object_t)dmsg; +} + +static void +_dispatch_mach_send_push(dispatch_mach_t dm, dispatch_object_t dou) +{ + dispatch_mach_send_refs_t dr = dm->dm_refs; + struct dispatch_object_s *prev, *dc = dou._do; + dc->do_next = NULL; + + prev = dispatch_atomic_xchg2o(dr, dm_tail, dc, release); + if (fastpath(prev)) { + prev->do_next = dc; + return; + } + dr->dm_head = dc; + _dispatch_wakeup(dm); +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_send_drain(dispatch_mach_t dm) +{ + dispatch_mach_send_refs_t dr = dm->dm_refs; + struct dispatch_object_s *dc = NULL, *next_dc = NULL; + while (dr->dm_tail) { + while (!(dc = fastpath(dr->dm_head))) { + dispatch_hardware_pause(); + } + do { + next_dc = fastpath(dc->do_next); + dr->dm_head = next_dc; + if (!next_dc && !dispatch_atomic_cmpxchg2o(dr, dm_tail, dc, NULL, + relaxed)) { + // Enqueue is TIGHTLY controlled, we won't wait long. + while (!(next_dc = fastpath(dc->do_next))) { + dispatch_hardware_pause(); + } + dr->dm_head = next_dc; + } + if (!DISPATCH_OBJ_IS_VTABLE(dc)) { + if ((long)dc->do_vtable & DISPATCH_OBJ_BARRIER_BIT) { + // send barrier + // leave send queue locked until barrier has completed + return _dispatch_mach_push(dm, dc); + } +#if DISPATCH_MACH_SEND_SYNC + if (slowpath((long)dc->do_vtable & DISPATCH_OBJ_SYNC_SLOW_BIT)){ + _dispatch_thread_semaphore_signal( + (_dispatch_thread_semaphore_t)dc->do_ctxt); + continue; + } +#endif // DISPATCH_MACH_SEND_SYNC + if (slowpath(!_dispatch_mach_reconnect_invoke(dm, dc))) { + goto out; + } + continue; + } + if (slowpath(dr->dm_disconnect_cnt) || + slowpath(dm->ds_atomic_flags & DSF_CANCELED)) { + _dispatch_mach_msg_not_sent(dm, dc); + continue; + } + if (slowpath(dc = _dispatch_mach_msg_send(dm, dc)._do)) { + goto out; + } + } while ((dc = next_dc)); + } +out: + // if this is not a complete drain, we must undo some things + if (slowpath(dc)) { + if (!next_dc && + !dispatch_atomic_cmpxchg2o(dr, dm_tail, NULL, dc, relaxed)) { + // wait for enqueue slow path to finish + while (!(next_dc = fastpath(dr->dm_head))) { + dispatch_hardware_pause(); + } + dc->do_next = next_dc; + } + dr->dm_head = dc; + } + (void)dispatch_atomic_dec2o(dr, dm_sending, release); + _dispatch_wakeup(dm); +} + +static inline void +_dispatch_mach_send(dispatch_mach_t dm) +{ + dispatch_mach_send_refs_t dr = dm->dm_refs; + if (!fastpath(dr->dm_tail) || !fastpath(dispatch_atomic_cmpxchg2o(dr, + dm_sending, 0, 1, acquire))) { + return; + } + _dispatch_object_debug(dm, "%s", __func__); + _dispatch_mach_send_drain(dm); +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_merge_kevent(dispatch_mach_t dm, const struct kevent64_s *ke) +{ + if (!(ke->fflags & dm->ds_pending_data_mask)) { + return; + } + _dispatch_mach_send(dm); +} + +DISPATCH_NOINLINE +void +dispatch_mach_send(dispatch_mach_t dm, dispatch_mach_msg_t dmsg, + mach_msg_option_t options) +{ + dispatch_mach_send_refs_t dr = dm->dm_refs; + if (slowpath(dmsg->do_next != DISPATCH_OBJECT_LISTLESS)) { + DISPATCH_CLIENT_CRASH("Message already enqueued"); + } + dispatch_retain(dmsg); + dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK); + _dispatch_mach_msg_set_options(dmsg, options & ~DISPATCH_MACH_OPTIONS_MASK); + if (slowpath(dr->dm_tail) || slowpath(dr->dm_disconnect_cnt) || + slowpath(dm->ds_atomic_flags & DSF_CANCELED) || + slowpath(!dispatch_atomic_cmpxchg2o(dr, dm_sending, 0, 1, + acquire))) { + return _dispatch_mach_send_push(dm, dmsg); + } + if (slowpath(dmsg = _dispatch_mach_msg_send(dm, dmsg)._dmsg)) { + (void)dispatch_atomic_dec2o(dr, dm_sending, release); + return _dispatch_mach_send_push(dm, dmsg); + } + if (slowpath(dr->dm_tail)) { + return _dispatch_mach_send_drain(dm); + } + (void)dispatch_atomic_dec2o(dr, dm_sending, release); + _dispatch_wakeup(dm); +} + +static void +_dispatch_mach_disconnect(dispatch_mach_t dm) +{ + dispatch_mach_send_refs_t dr = dm->dm_refs; + if (dm->dm_dkev) { + _dispatch_mach_kevent_unregister(dm); + } + if (MACH_PORT_VALID(dr->dm_send)) { + _dispatch_mach_msg_disconnected(dm, MACH_PORT_NULL, dr->dm_send); + } + dr->dm_send = MACH_PORT_NULL; + if (dr->dm_checkin) { + _dispatch_mach_msg_not_sent(dm, dr->dm_checkin); + dr->dm_checkin = NULL; + } + if (!TAILQ_EMPTY(&dm->dm_refs->dm_replies)) { + dispatch_mach_reply_refs_t dmr, tmp; + TAILQ_FOREACH_SAFE(dmr, &dm->dm_refs->dm_replies, dm_list, tmp){ + _dispatch_mach_reply_kevent_unregister(dm, dmr, true); + } + } +} + +DISPATCH_NOINLINE +static bool +_dispatch_mach_cancel(dispatch_mach_t dm) +{ + dispatch_mach_send_refs_t dr = dm->dm_refs; + if (!fastpath(dispatch_atomic_cmpxchg2o(dr, dm_sending, 0, 1, acquire))) { + return false; + } + _dispatch_object_debug(dm, "%s", __func__); + _dispatch_mach_disconnect(dm); + if (dm->ds_dkev) { + mach_port_t local_port = (mach_port_t)dm->ds_dkev->dk_kevent.ident; + _dispatch_source_kevent_unregister((dispatch_source_t)dm); + _dispatch_mach_msg_disconnected(dm, local_port, MACH_PORT_NULL); + } + (void)dispatch_atomic_dec2o(dr, dm_sending, release); + return true; +} + +DISPATCH_NOINLINE +static bool +_dispatch_mach_reconnect_invoke(dispatch_mach_t dm, dispatch_object_t dou) +{ + if (dm->dm_dkev || !TAILQ_EMPTY(&dm->dm_refs->dm_replies)) { + if (slowpath(_dispatch_queue_get_current() != &_dispatch_mgr_q)) { + // send/reply kevents must be uninstalled on the manager queue + return false; + } + } + _dispatch_mach_disconnect(dm); + dispatch_mach_send_refs_t dr = dm->dm_refs; + dr->dm_checkin = dou._dc->dc_data; + dr->dm_send = (mach_port_t)dou._dc->dc_other; + _dispatch_continuation_free(dou._dc); + (void)dispatch_atomic_dec2o(dr, dm_disconnect_cnt, relaxed); + _dispatch_object_debug(dm, "%s", __func__); + return true; +} + +DISPATCH_NOINLINE +void +dispatch_mach_reconnect(dispatch_mach_t dm, mach_port_t send, + dispatch_mach_msg_t checkin) +{ + dispatch_mach_send_refs_t dr = dm->dm_refs; + (void)dispatch_atomic_inc2o(dr, dm_disconnect_cnt, relaxed); + if (MACH_PORT_VALID(send) && checkin) { + dispatch_retain(checkin); + dr->dm_checkin_port = _dispatch_mach_msg_get_remote_port(checkin); + } else { + checkin = NULL; + dr->dm_checkin_port = MACH_PORT_NULL; + } + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT); + dc->dc_func = (void*)_dispatch_mach_reconnect_invoke; + dc->dc_ctxt = dc; + dc->dc_data = checkin; + dc->dc_other = (void*)(uintptr_t)send; + return _dispatch_mach_send_push(dm, dc); +} + +#if DISPATCH_MACH_SEND_SYNC +DISPATCH_NOINLINE +static void +_dispatch_mach_send_sync_slow(dispatch_mach_t dm) +{ + _dispatch_thread_semaphore_t sema = _dispatch_get_thread_semaphore(); + struct dispatch_object_s dc = { + .do_vtable = (void *)(DISPATCH_OBJ_SYNC_SLOW_BIT), + .do_ctxt = (void*)sema, + }; + _dispatch_mach_send_push(dm, &dc); + _dispatch_thread_semaphore_wait(sema); + _dispatch_put_thread_semaphore(sema); +} +#endif // DISPATCH_MACH_SEND_SYNC + +DISPATCH_NOINLINE +mach_port_t +dispatch_mach_get_checkin_port(dispatch_mach_t dm) +{ + dispatch_mach_send_refs_t dr = dm->dm_refs; + if (slowpath(dm->ds_atomic_flags & DSF_CANCELED)) { + return MACH_PORT_DEAD; + } + return dr->dm_checkin_port; +} - return (kr_recv ? kr_recv : kr_sp); +DISPATCH_NOINLINE +static void +_dispatch_mach_connect_invoke(dispatch_mach_t dm) +{ + dispatch_mach_refs_t dr = dm->ds_refs; + _dispatch_client_callout4(dr->dm_handler_ctxt, + DISPATCH_MACH_CONNECTED, NULL, 0, dr->dm_handler_func); + dm->dm_connect_handler_called = 1; } +DISPATCH_NOINLINE void -_dispatch_drain_mach_messages(struct kevent *ke) +_dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg) { - mach_port_t name = (mach_port_name_t)ke->data; - dispatch_source_refs_t dri; - dispatch_kevent_t dk; - struct kevent kev; + dispatch_mach_t dm = (dispatch_mach_t)_dispatch_queue_get_current(); + dispatch_mach_refs_t dr = dm->ds_refs; + mach_error_t err; + unsigned long reason = _dispatch_mach_msg_get_reason(dmsg, &err); + + dmsg->do_next = DISPATCH_OBJECT_LISTLESS; + _dispatch_thread_setspecific(dispatch_queue_key, dm->do_targetq); + if (slowpath(!dm->dm_connect_handler_called)) { + _dispatch_mach_connect_invoke(dm); + } + _dispatch_client_callout4(dr->dm_handler_ctxt, reason, dmsg, err, + dr->dm_handler_func); + _dispatch_thread_setspecific(dispatch_queue_key, (dispatch_queue_t)dm); + dispatch_release(dmsg); +} - if (!dispatch_assume(name)) { - return; +DISPATCH_NOINLINE +void +_dispatch_mach_barrier_invoke(void *ctxt) +{ + dispatch_mach_t dm = (dispatch_mach_t)_dispatch_queue_get_current(); + dispatch_mach_refs_t dr = dm->ds_refs; + struct dispatch_continuation_s *dc = ctxt; + void *context = dc->dc_data; + dispatch_function_t barrier = dc->dc_other; + bool send_barrier = ((long)dc->do_vtable & DISPATCH_OBJ_BARRIER_BIT); + + _dispatch_thread_setspecific(dispatch_queue_key, dm->do_targetq); + if (slowpath(!dm->dm_connect_handler_called)) { + _dispatch_mach_connect_invoke(dm); } - _dispatch_debug_machport(name); - dk = _dispatch_kevent_find(name, EVFILT_MACHPORT); - if (!dispatch_assume(dk)) { - return; + _dispatch_client_callout(context, barrier); + _dispatch_client_callout4(dr->dm_handler_ctxt, + DISPATCH_MACH_BARRIER_COMPLETED, NULL, 0, dr->dm_handler_func); + _dispatch_thread_setspecific(dispatch_queue_key, (dispatch_queue_t)dm); + if (send_barrier) { + (void)dispatch_atomic_dec2o(dm->dm_refs, dm_sending, release); } - _dispatch_kevent_machport_disable(dk); // emulate EV_DISPATCH - - EV_SET(&kev, name, EVFILT_MACHPORT, EV_ADD|EV_ENABLE|EV_DISPATCH, - DISPATCH_MACH_RECV_MESSAGE, 0, dk); +} - TAILQ_FOREACH(dri, &dk->dk_sources, dr_list) { - _dispatch_source_merge_kevent(_dispatch_source_from_refs(dri), &kev); +DISPATCH_NOINLINE +void +dispatch_mach_send_barrier_f(dispatch_mach_t dm, void *context, + dispatch_function_t barrier) +{ + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_BARRIER_BIT); + dc->dc_func = _dispatch_mach_barrier_invoke; + dc->dc_ctxt = dc; + dc->dc_data = context; + dc->dc_other = barrier; + + dispatch_mach_send_refs_t dr = dm->dm_refs; + if (slowpath(dr->dm_tail) || slowpath(!dispatch_atomic_cmpxchg2o(dr, + dm_sending, 0, 1, acquire))) { + return _dispatch_mach_send_push(dm, dc); } + // leave send queue locked until barrier has completed + return _dispatch_mach_push(dm, dc); } -static inline void -_dispatch_mach_notify_merge(mach_port_t name, uint32_t flag, uint32_t unreg, - bool final) +DISPATCH_NOINLINE +void +dispatch_mach_receive_barrier_f(dispatch_mach_t dm, void *context, + dispatch_function_t barrier) { - dispatch_source_refs_t dri; - dispatch_kevent_t dk; - struct kevent kev; - - dk = _dispatch_kevent_find(name, EVFILT_MACHPORT); - if (!dk) { - return; - } + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT); + dc->dc_func = _dispatch_mach_barrier_invoke; + dc->dc_ctxt = dc; + dc->dc_data = context; + dc->dc_other = barrier; + return _dispatch_mach_push(dm, dc); +} - // Update notification registration state. - dk->dk_kevent.data &= ~unreg; - if (!final) { - // Re-register for notification before delivery - _dispatch_kevent_resume(dk, flag, 0); - } +DISPATCH_NOINLINE +void +dispatch_mach_send_barrier(dispatch_mach_t dm, dispatch_block_t barrier) +{ + dispatch_mach_send_barrier_f(dm, _dispatch_Block_copy(barrier), + _dispatch_call_block_and_release); +} - EV_SET(&kev, name, EVFILT_MACHPORT, EV_ADD|EV_ENABLE, flag, 0, dk); +DISPATCH_NOINLINE +void +dispatch_mach_receive_barrier(dispatch_mach_t dm, dispatch_block_t barrier) +{ + dispatch_mach_receive_barrier_f(dm, _dispatch_Block_copy(barrier), + _dispatch_call_block_and_release); +} - TAILQ_FOREACH(dri, &dk->dk_sources, dr_list) { - _dispatch_source_merge_kevent(_dispatch_source_from_refs(dri), &kev); - if (final) { - // this can never happen again - // this must happen after the merge - // this may be racy in the future, but we don't provide a 'setter' - // API for the mask yet - _dispatch_source_from_refs(dri)->ds_pending_data_mask &= ~unreg; - } +DISPATCH_NOINLINE +static void +_dispatch_mach_cancel_invoke(dispatch_mach_t dm) +{ + dispatch_mach_refs_t dr = dm->ds_refs; + if (slowpath(!dm->dm_connect_handler_called)) { + _dispatch_mach_connect_invoke(dm); } + _dispatch_client_callout4(dr->dm_handler_ctxt, + DISPATCH_MACH_CANCELED, NULL, 0, dr->dm_handler_func); + dm->dm_cancel_handler_called = 1; + _dispatch_release(dm); // the retain is done at creation time +} - if (final) { - // no more sources have these flags - dk->dk_kevent.fflags &= ~unreg; - } +DISPATCH_NOINLINE +void +dispatch_mach_cancel(dispatch_mach_t dm) +{ + dispatch_source_cancel((dispatch_source_t)dm); } -static kern_return_t -_dispatch_mach_notify_update(dispatch_kevent_t dk, uint32_t new_flags, - uint32_t del_flags, uint32_t mask, mach_msg_id_t notify_msgid, - mach_port_mscount_t notify_sync) +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_t +_dispatch_mach_invoke2(dispatch_object_t dou, + _dispatch_thread_semaphore_t *sema_ptr DISPATCH_UNUSED) { - mach_port_t previous, port = (mach_port_t)dk->dk_kevent.ident; - typeof(dk->dk_kevent.data) prev = dk->dk_kevent.data; - kern_return_t kr, krr = 0; + dispatch_mach_t dm = dou._dm; - // Update notification registration state. - dk->dk_kevent.data |= (new_flags | dk->dk_kevent.fflags) & mask; - dk->dk_kevent.data &= ~(del_flags & mask); + // This function performs all mach channel actions. Each action is + // responsible for verifying that it takes place on the appropriate queue. + // If the current queue is not the correct queue for this action, the + // correct queue will be returned and the invoke will be re-driven on that + // queue. - _dispatch_debug_machport(port); - if ((dk->dk_kevent.data & mask) && !(prev & mask)) { - previous = MACH_PORT_NULL; - krr = mach_port_request_notification(mach_task_self(), port, - notify_msgid, notify_sync, _dispatch_event_port, - MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous); - DISPATCH_VERIFY_MIG(krr); + // The order of tests here in invoke and in probe should be consistent. - switch(krr) { - case KERN_INVALID_NAME: - case KERN_INVALID_RIGHT: - // Supress errors & clear registration state - dk->dk_kevent.data &= ~mask; - break; - default: - // Else, we dont expect any errors from mach. Log any errors - if (dispatch_assume_zero(krr)) { - // log the error & clear registration state - dk->dk_kevent.data &= ~mask; - } else if (dispatch_assume_zero(previous)) { - // Another subsystem has beat libdispatch to requesting the - // specified Mach notification on this port. We should - // technically cache the previous port and message it when the - // kernel messages our port. Or we can just say screw those - // subsystems and deallocate the previous port. - // They should adopt libdispatch :-P - kr = mach_port_deallocate(mach_task_self(), previous); - DISPATCH_VERIFY_MIG(kr); - (void)dispatch_assume_zero(kr); - previous = MACH_PORT_NULL; + dispatch_queue_t dq = _dispatch_queue_get_current(); + dispatch_mach_send_refs_t dr = dm->dm_refs; + + if (slowpath(!dm->ds_is_installed)) { + // The channel needs to be installed on the manager queue. + if (dq != &_dispatch_mgr_q) { + return &_dispatch_mgr_q; + } + if (dm->ds_dkev) { + _dispatch_source_kevent_register((dispatch_source_t)dm); + } + dm->ds_is_installed = true; + _dispatch_mach_send(dm); + // Apply initial target queue change + _dispatch_queue_drain(dou); + if (dm->dq_items_tail) { + return dm->do_targetq; + } + } else if (dm->dq_items_tail) { + // The channel has pending messages to deliver to the target queue. + if (dq != dm->do_targetq) { + return dm->do_targetq; + } + dispatch_queue_t tq = dm->do_targetq; + if (slowpath(_dispatch_queue_drain(dou))) { + DISPATCH_CLIENT_CRASH("Sync onto mach channel"); + } + if (slowpath(tq != dm->do_targetq)) { + // An item on the channel changed the target queue + return dm->do_targetq; + } + } else if (dr->dm_tail) { + if (slowpath(dr->dm_needs_mgr) || (slowpath(dr->dm_disconnect_cnt) && + (dm->dm_dkev || !TAILQ_EMPTY(&dm->dm_refs->dm_replies)))) { + // Send/reply kevents need to be installed or uninstalled + if (dq != &_dispatch_mgr_q) { + return &_dispatch_mgr_q; } } - } else if (!(dk->dk_kevent.data & mask) && (prev & mask)) { - previous = MACH_PORT_NULL; - kr = mach_port_request_notification(mach_task_self(), port, - notify_msgid, notify_sync, MACH_PORT_NULL, - MACH_MSG_TYPE_MOVE_SEND_ONCE, &previous); - DISPATCH_VERIFY_MIG(kr); - - switch (kr) { - case KERN_INVALID_NAME: - case KERN_INVALID_RIGHT: - case KERN_INVALID_ARGUMENT: - break; - default: - if (dispatch_assume_zero(kr)) { - // log the error + if (!(dm->dm_dkev && DISPATCH_MACH_KEVENT_ARMED(dm->dm_dkev)) || + (dm->ds_atomic_flags & DSF_CANCELED) || dr->dm_disconnect_cnt) { + // The channel has pending messages to send. + _dispatch_mach_send(dm); + } + } else if (dm->ds_atomic_flags & DSF_CANCELED){ + // The channel has been cancelled and needs to be uninstalled from the + // manager queue. After uninstallation, the cancellation handler needs + // to be delivered to the target queue. + if (dm->ds_dkev || dm->dm_dkev || dr->dm_send || + !TAILQ_EMPTY(&dm->dm_refs->dm_replies)) { + if (dq != &_dispatch_mgr_q) { + return &_dispatch_mgr_q; + } + if (!_dispatch_mach_cancel(dm)) { + return NULL; } } - } else { - return 0; - } - if (slowpath(previous)) { - // the kernel has not consumed the send-once right yet - (void)dispatch_assume_zero( - _dispatch_send_consume_send_once_right(previous)); + if (!dm->dm_cancel_handler_called) { + if (dq != dm->do_targetq) { + return dm->do_targetq; + } + _dispatch_mach_cancel_invoke(dm); + } } - return krr; -} - -static void -_dispatch_mach_notify_source2(void *context) -{ - dispatch_source_t ds = context; - size_t maxsz = MAX(sizeof(union - __RequestUnion___dispatch_send_libdispatch_internal_protocol_subsystem), - sizeof(union - __ReplyUnion___dispatch_libdispatch_internal_protocol_subsystem)); - - dispatch_mig_server(ds, maxsz, libdispatch_internal_protocol_server); + return NULL; } +DISPATCH_NOINLINE void -_dispatch_mach_notify_source_init(void *context DISPATCH_UNUSED) +_dispatch_mach_invoke(dispatch_mach_t dm) { - _dispatch_get_port_set(); - - _dispatch_mach_notify_source = dispatch_source_create( - DISPATCH_SOURCE_TYPE_MACH_RECV, _dispatch_event_port, 0, - &_dispatch_mgr_q); - dispatch_assert(_dispatch_mach_notify_source); - dispatch_set_context(_dispatch_mach_notify_source, - _dispatch_mach_notify_source); - dispatch_source_set_event_handler_f(_dispatch_mach_notify_source, - _dispatch_mach_notify_source2); - dispatch_resume(_dispatch_mach_notify_source); + _dispatch_queue_class_invoke(dm, _dispatch_mach_invoke2); } -kern_return_t -_dispatch_mach_notify_port_deleted(mach_port_t notify DISPATCH_UNUSED, - mach_port_name_t name) +unsigned long +_dispatch_mach_probe(dispatch_mach_t dm) { -#if DISPATCH_DEBUG - _dispatch_log("Corruption: Mach send/send-once/dead-name right 0x%x " - "deleted prematurely", name); -#endif + // This function determines whether the mach channel needs to be invoked. + // The order of tests here in probe and in invoke should be consistent. - _dispatch_debug_machport(name); - _dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_DELETED, - _DISPATCH_MACH_SP_FLAGS, true); + dispatch_mach_send_refs_t dr = dm->dm_refs; - return KERN_SUCCESS; + if (slowpath(!dm->ds_is_installed)) { + // The channel needs to be installed on the manager queue. + return true; + } else if (dm->dq_items_tail) { + // The source has pending messages to deliver to the target queue. + return true; + } else if (dr->dm_tail && + (!(dm->dm_dkev && DISPATCH_MACH_KEVENT_ARMED(dm->dm_dkev)) || + (dm->ds_atomic_flags & DSF_CANCELED) || dr->dm_disconnect_cnt)) { + // The channel has pending messages to send. + return true; + } else if (dm->ds_atomic_flags & DSF_CANCELED) { + if (dm->ds_dkev || dm->dm_dkev || dr->dm_send || + !TAILQ_EMPTY(&dm->dm_refs->dm_replies) || + !dm->dm_cancel_handler_called) { + // The channel needs to be uninstalled from the manager queue, or + // the cancellation handler needs to be delivered to the target + // queue. + return true; + } + } + // Nothing to do. + return false; } -kern_return_t -_dispatch_mach_notify_dead_name(mach_port_t notify DISPATCH_UNUSED, - mach_port_name_t name) -{ - kern_return_t kr; +#pragma mark - +#pragma mark dispatch_mach_msg_t -#if DISPATCH_DEBUG - _dispatch_log("machport[0x%08x]: dead-name notification: %s", - name, __func__); -#endif - _dispatch_debug_machport(name); - _dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_DEAD, - _DISPATCH_MACH_SP_FLAGS, true); +dispatch_mach_msg_t +dispatch_mach_msg_create(mach_msg_header_t *msg, size_t size, + dispatch_mach_msg_destructor_t destructor, mach_msg_header_t **msg_ptr) +{ + if (slowpath(size < sizeof(mach_msg_header_t)) || + slowpath(destructor && !msg)) { + DISPATCH_CLIENT_CRASH("Empty message"); + } + dispatch_mach_msg_t dmsg = _dispatch_alloc(DISPATCH_VTABLE(mach_msg), + sizeof(struct dispatch_mach_msg_s) + + (destructor ? 0 : size - sizeof(dmsg->msg))); + if (destructor) { + dmsg->msg = msg; + } else if (msg) { + memcpy(dmsg->buf, msg, size); + } + dmsg->do_next = DISPATCH_OBJECT_LISTLESS; + dmsg->do_targetq = _dispatch_get_root_queue(0, false); + dmsg->destructor = destructor; + dmsg->size = size; + if (msg_ptr) { + *msg_ptr = _dispatch_mach_msg_get_msg(dmsg); + } + return dmsg; +} - // the act of receiving a dead name notification allocates a dead-name - // right that must be deallocated - kr = mach_port_deallocate(mach_task_self(), name); - DISPATCH_VERIFY_MIG(kr); - //(void)dispatch_assume_zero(kr); +void +_dispatch_mach_msg_dispose(dispatch_mach_msg_t dmsg) +{ + switch (dmsg->destructor) { + case DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT: + break; + case DISPATCH_MACH_MSG_DESTRUCTOR_FREE: + free(dmsg->msg); + break; + case DISPATCH_MACH_MSG_DESTRUCTOR_VM_DEALLOCATE: { + mach_vm_size_t vm_size = dmsg->size; + mach_vm_address_t vm_addr = (uintptr_t)dmsg->msg; + (void)dispatch_assume_zero(mach_vm_deallocate(mach_task_self(), + vm_addr, vm_size)); + break; + }} +} - return KERN_SUCCESS; +static inline mach_msg_header_t* +_dispatch_mach_msg_get_msg(dispatch_mach_msg_t dmsg) +{ + return dmsg->destructor ? dmsg->msg : (mach_msg_header_t*)dmsg->buf; } -kern_return_t -_dispatch_mach_notify_send_possible(mach_port_t notify DISPATCH_UNUSED, - mach_port_name_t name) +mach_msg_header_t* +dispatch_mach_msg_get_msg(dispatch_mach_msg_t dmsg, size_t *size_ptr) { -#if DISPATCH_DEBUG - _dispatch_log("machport[0x%08x]: send-possible notification: %s", - name, __func__); -#endif - _dispatch_debug_machport(name); - _dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_POSSIBLE, - _DISPATCH_MACH_SP_FLAGS, false); + if (size_ptr) { + *size_ptr = dmsg->size; + } + return _dispatch_mach_msg_get_msg(dmsg); +} - return KERN_SUCCESS; +size_t +_dispatch_mach_msg_debug(dispatch_mach_msg_t dmsg, char* buf, size_t bufsiz) +{ + size_t offset = 0; + offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", + dx_kind(dmsg), dmsg); + offset += dsnprintf(&buf[offset], bufsiz - offset, "xrefcnt = 0x%x, " + "refcnt = 0x%x, ", dmsg->do_xref_cnt + 1, dmsg->do_ref_cnt + 1); + offset += dsnprintf(&buf[offset], bufsiz - offset, "opts/err = 0x%x, " + "msgh[%p] = { ", dmsg->do_suspend_cnt, dmsg->buf); + mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dmsg); + if (hdr->msgh_id) { + offset += dsnprintf(&buf[offset], bufsiz - offset, "id 0x%x, ", + hdr->msgh_id); + } + if (hdr->msgh_size) { + offset += dsnprintf(&buf[offset], bufsiz - offset, "size %u, ", + hdr->msgh_size); + } + if (hdr->msgh_bits) { + offset += dsnprintf(&buf[offset], bufsiz - offset, "bits msgh_bits), + MACH_MSGH_BITS_REMOTE(hdr->msgh_bits)); + if (MACH_MSGH_BITS_OTHER(hdr->msgh_bits)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", o 0x%x", + MACH_MSGH_BITS_OTHER(hdr->msgh_bits)); + } + offset += dsnprintf(&buf[offset], bufsiz - offset, ">, "); + } + if (hdr->msgh_local_port && hdr->msgh_remote_port) { + offset += dsnprintf(&buf[offset], bufsiz - offset, "local 0x%x, " + "remote 0x%x", hdr->msgh_local_port, hdr->msgh_remote_port); + } else if (hdr->msgh_local_port) { + offset += dsnprintf(&buf[offset], bufsiz - offset, "local 0x%x", + hdr->msgh_local_port); + } else if (hdr->msgh_remote_port) { + offset += dsnprintf(&buf[offset], bufsiz - offset, "remote 0x%x", + hdr->msgh_remote_port); + } else { + offset += dsnprintf(&buf[offset], bufsiz - offset, "no ports"); + } + offset += dsnprintf(&buf[offset], bufsiz - offset, " } }"); + return offset; } +#pragma mark - +#pragma mark dispatch_mig_server + mach_msg_return_t dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, dispatch_mig_callback_t callback) @@ -1643,15 +4019,16 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, mach_msg_options_t tmp_options; mig_reply_error_t *bufTemp, *bufRequest, *bufReply; mach_msg_return_t kr = 0; + uint64_t assertion_token = 0; unsigned int cnt = 1000; // do not stall out serial queues - int demux_success; + boolean_t demux_success; bool received = false; size_t rcv_size = maxmsgsz + MAX_TRAILER_SIZE; // XXX FIXME -- allocate these elsewhere bufRequest = alloca(rcv_size); bufReply = alloca(rcv_size); - bufReply->Head.msgh_size = 0; // make CLANG happy + bufReply->Head.msgh_size = 0; bufRequest->RetCode = 0; #if DISPATCH_DEBUG @@ -1665,7 +4042,7 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, tmp_options &= ~MACH_RCV_MSG; if (!(tmp_options & MACH_SEND_MSG)) { - break; + goto out; } } kr = mach_msg(&bufReply->Head, tmp_options, bufReply->Head.msgh_size, @@ -1721,11 +4098,19 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, "dispatch_mig_server: mach_msg() failed", kr); break; } - break; + goto out; } if (!(tmp_options & MACH_RCV_MSG)) { - break; + goto out; + } + + if (assertion_token) { +#if DISPATCH_USE_IMPORTANCE_ASSERTION + int r = proc_importance_assertion_complete(assertion_token); + (void)dispatch_assume_zero(r); +#endif + assertion_token = 0; } received = true; @@ -1733,6 +4118,14 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, bufRequest = bufReply; bufReply = bufTemp; +#if DISPATCH_USE_IMPORTANCE_ASSERTION + int r = proc_importance_assertion_begin_with_msg(&bufRequest->Head, + NULL, &assertion_token); + if (r && slowpath(r != EIO)) { + (void)dispatch_assume_zero(r); + } +#endif + demux_success = callback(&bufRequest->Head, &bufReply->Head); if (!demux_success) { @@ -1762,6 +4155,14 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, } } +out: + if (assertion_token) { +#if DISPATCH_USE_IMPORTANCE_ASSERTION + int r = proc_importance_assertion_complete(assertion_token); + (void)dispatch_assume_zero(r); +#endif + } + return kr; } @@ -1786,8 +4187,12 @@ _evfiltstr(short filt) #ifdef EVFILT_VM _evfilt2(EVFILT_VM); #endif +#ifdef EVFILT_MEMORYSTATUS + _evfilt2(EVFILT_MEMORYSTATUS); +#endif #if HAVE_MACH _evfilt2(EVFILT_MACHPORT); + _evfilt2(DISPATCH_EVFILT_MACH_NOTIFICATION); #endif _evfilt2(EVFILT_FS); _evfilt2(EVFILT_USER); @@ -1804,49 +4209,76 @@ static size_t _dispatch_source_debug_attr(dispatch_source_t ds, char* buf, size_t bufsiz) { dispatch_queue_t target = ds->do_targetq; - return snprintf(buf, bufsiz, "target = %s[%p], pending_data = 0x%lx, " - "pending_data_mask = 0x%lx, ", - target ? target->dq_label : "", target, - ds->ds_pending_data, ds->ds_pending_data_mask); + return dsnprintf(buf, bufsiz, "target = %s[%p], ident = 0x%lx, " + "pending_data = 0x%lx, pending_data_mask = 0x%lx, ", + target && target->dq_label ? target->dq_label : "", target, + ds->ds_ident_hack, ds->ds_pending_data, ds->ds_pending_data_mask); } static size_t _dispatch_timer_debug_attr(dispatch_source_t ds, char* buf, size_t bufsiz) { dispatch_source_refs_t dr = ds->ds_refs; - return snprintf(buf, bufsiz, "timer = { target = 0x%llx, " - "last_fire = 0x%llx, interval = 0x%llx, flags = 0x%llx }, ", - ds_timer(dr).target, ds_timer(dr).last_fire, ds_timer(dr).interval, - ds_timer(dr).flags); + return dsnprintf(buf, bufsiz, "timer = { target = 0x%llx, deadline = 0x%llx," + " last_fire = 0x%llx, interval = 0x%llx, flags = 0x%lx }, ", + ds_timer(dr).target, ds_timer(dr).deadline, ds_timer(dr).last_fire, + ds_timer(dr).interval, ds_timer(dr).flags); } size_t _dispatch_source_debug(dispatch_source_t ds, char* buf, size_t bufsiz) { size_t offset = 0; - offset += snprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", + offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", dx_kind(ds), ds); offset += _dispatch_object_debug_attr(ds, &buf[offset], bufsiz - offset); offset += _dispatch_source_debug_attr(ds, &buf[offset], bufsiz - offset); if (ds->ds_is_timer) { offset += _dispatch_timer_debug_attr(ds, &buf[offset], bufsiz - offset); } - offset += snprintf(&buf[offset], bufsiz - offset, "filter = %s }", + offset += dsnprintf(&buf[offset], bufsiz - offset, "filter = %s }", ds->ds_dkev ? _evfiltstr(ds->ds_dkev->dk_kevent.filter) : "????"); return offset; } +static size_t +_dispatch_mach_debug_attr(dispatch_mach_t dm, char* buf, size_t bufsiz) +{ + dispatch_queue_t target = dm->do_targetq; + return dsnprintf(buf, bufsiz, "target = %s[%p], receive = 0x%x, " + "send = 0x%x, send-possible = 0x%x%s, checkin = 0x%x%s, " + "sending = %d, disconnected = %d, canceled = %d ", + target && target->dq_label ? target->dq_label : "", target, + dm->ds_dkev ?(mach_port_t)dm->ds_dkev->dk_kevent.ident:0, + dm->dm_refs->dm_send, + dm->dm_dkev ?(mach_port_t)dm->dm_dkev->dk_kevent.ident:0, + dm->dm_dkev && DISPATCH_MACH_KEVENT_ARMED(dm->dm_dkev) ? + " (armed)" : "", dm->dm_refs->dm_checkin_port, + dm->dm_refs->dm_checkin ? " (pending)" : "", + dm->dm_refs->dm_sending, dm->dm_refs->dm_disconnect_cnt, + (bool)(dm->ds_atomic_flags & DSF_CANCELED)); +} +size_t +_dispatch_mach_debug(dispatch_mach_t dm, char* buf, size_t bufsiz) +{ + size_t offset = 0; + offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", + dm->dq_label ? dm->dq_label : dx_kind(dm), dm); + offset += _dispatch_object_debug_attr(dm, &buf[offset], bufsiz - offset); + offset += _dispatch_mach_debug_attr(dm, &buf[offset], bufsiz - offset); + offset += dsnprintf(&buf[offset], bufsiz - offset, "}"); + return offset; +} + #if DISPATCH_DEBUG -void -dispatch_debug_kevents(struct kevent* kev, size_t count, const char* str) +static void +_dispatch_kevent_debug(struct kevent64_s* kev, const char* str) { - size_t i; - for (i = 0; i < count; ++i) { - _dispatch_log("kevent[%lu] = { ident = %p, filter = %s, flags = 0x%x, " - "fflags = 0x%x, data = %p, udata = %p }: %s", - i, (void*)kev[i].ident, _evfiltstr(kev[i].filter), kev[i].flags, - kev[i].fflags, (void*)kev[i].data, (void*)kev[i].udata, str); - } + _dispatch_log("kevent[%p] = { ident = 0x%llx, filter = %s, flags = 0x%x, " + "fflags = 0x%x, data = 0x%llx, udata = 0x%llx, ext[0] = 0x%llx, " + "ext[1] = 0x%llx }: %s", kev, kev->ident, _evfiltstr(kev->filter), + kev->flags, kev->fflags, kev->data, kev->udata, kev->ext[0], + kev->ext[1], str); } static void @@ -1901,7 +4333,7 @@ _dispatch_kevent_debugger2(void *context) dk, (unsigned long)dk->dk_kevent.ident, _evfiltstr(dk->dk_kevent.filter), dk->dk_kevent.flags, dk->dk_kevent.fflags, (unsigned long)dk->dk_kevent.data, - dk->dk_kevent.udata); + (void*)dk->dk_kevent.udata); fprintf(debug_stream, "\t\t
      \n"); TAILQ_FOREACH(dr, &dk->dk_sources, dr_list) { ds = _dispatch_source_from_refs(dr); @@ -1914,7 +4346,7 @@ _dispatch_kevent_debugger2(void *context) dispatch_queue_t dq = ds->do_targetq; fprintf(debug_stream, "\t\t
      DQ: %p refcnt 0x%x suspend " "0x%x label: %s\n", dq, dq->do_ref_cnt + 1, - dq->do_suspend_cnt, dq->dq_label); + dq->do_suspend_cnt, dq->dq_label ? dq->dq_label:""); } } fprintf(debug_stream, "\t\t
    \n"); @@ -1999,7 +4431,7 @@ _dispatch_kevent_debugger(void *context DISPATCH_UNUSED) goto out_bad; } - ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, fd, 0, + ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, (uintptr_t)fd, 0, &_dispatch_mgr_q); if (dispatch_assume(ds)) { _dispatch_log("LIBDISPATCH: debug port: %hu", @@ -2024,6 +4456,7 @@ _dispatch_kevent_debugger(void *context DISPATCH_UNUSED) #define MACH_PORT_TYPE_SPREQUEST 0x40000000 #endif +DISPATCH_NOINLINE void dispatch_debug_machport(mach_port_t name, const char* str) { @@ -2048,11 +4481,10 @@ dispatch_debug_machport(mach_port_t name, const char* str) (void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name, MACH_PORT_RIGHT_DEAD_NAME, &nd)); } - if (type & (MACH_PORT_TYPE_RECEIVE|MACH_PORT_TYPE_SEND| - MACH_PORT_TYPE_SEND_ONCE)) { + if (type & (MACH_PORT_TYPE_RECEIVE|MACH_PORT_TYPE_SEND)) { (void)dispatch_assume_zero(mach_port_dnrequest_info(mach_task_self(), name, &dnrsiz, &dnreqs)); - } + } if (type & MACH_PORT_TYPE_RECEIVE) { mach_port_status_t status = { .mps_pset = 0, }; mach_msg_type_number_t cnt = MACH_PORT_RECEIVE_STATUS_COUNT; diff --git a/src/source_internal.h b/src/source_internal.h index c2c706f84..1a023cf2d 100644 --- a/src/source_internal.h +++ b/src/source_internal.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2011 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -32,6 +32,12 @@ #include // for HeaderDoc #endif +#define DISPATCH_EVFILT_TIMER (-EVFILT_SYSCOUNT - 1) +#define DISPATCH_EVFILT_CUSTOM_ADD (-EVFILT_SYSCOUNT - 2) +#define DISPATCH_EVFILT_CUSTOM_OR (-EVFILT_SYSCOUNT - 3) +#define DISPATCH_EVFILT_MACH_NOTIFICATION (-EVFILT_SYSCOUNT - 4) +#define DISPATCH_EVFILT_SYSCOUNT ( EVFILT_SYSCOUNT + 4) + // NOTE: dispatch_source_mach_send_flags_t and dispatch_source_mach_recv_flags_t // bit values must not overlap as they share the same kevent fflags ! @@ -50,37 +56,58 @@ enum { * @constant DISPATCH_MACH_RECV_MESSAGE * Receive right has pending messages * + * @constant DISPATCH_MACH_RECV_MESSAGE_DIRECT + * Receive messages from receive right directly via kevent64() + * * @constant DISPATCH_MACH_RECV_NO_SENDERS * Receive right has no more senders. TODO */ enum { DISPATCH_MACH_RECV_MESSAGE = 0x2, - DISPATCH_MACH_RECV_NO_SENDERS = 0x10, + DISPATCH_MACH_RECV_MESSAGE_DIRECT = 0x10, + DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE = 0x20, + DISPATCH_MACH_RECV_NO_SENDERS = 0x40, }; enum { DISPATCH_TIMER_WALL_CLOCK = 0x4, + DISPATCH_TIMER_INTERVAL = 0x8, + DISPATCH_TIMER_WITH_AGGREGATE = 0x10, }; -#define DISPATCH_EVFILT_TIMER (-EVFILT_SYSCOUNT - 1) -#define DISPATCH_EVFILT_CUSTOM_ADD (-EVFILT_SYSCOUNT - 2) -#define DISPATCH_EVFILT_CUSTOM_OR (-EVFILT_SYSCOUNT - 3) -#define DISPATCH_EVFILT_SYSCOUNT ( EVFILT_SYSCOUNT + 3) +// low bits are timer QoS class +#define DISPATCH_TIMER_QOS_NORMAL 0u +#define DISPATCH_TIMER_QOS_CRITICAL 1u +#define DISPATCH_TIMER_QOS_BACKGROUND 2u +#define DISPATCH_TIMER_QOS_COUNT (DISPATCH_TIMER_QOS_BACKGROUND + 1) +#define DISPATCH_TIMER_QOS(tidx) ((uintptr_t)(tidx) & 0x3ul) -#define DISPATCH_TIMER_INDEX_WALL 0 -#define DISPATCH_TIMER_INDEX_MACH 1 -#define DISPATCH_TIMER_INDEX_DISARM 2 +#define DISPATCH_TIMER_KIND_WALL 0u +#define DISPATCH_TIMER_KIND_MACH 1u +#define DISPATCH_TIMER_KIND_COUNT (DISPATCH_TIMER_KIND_MACH + 1) +#define DISPATCH_TIMER_KIND(tidx) (((uintptr_t)(tidx) >> 2) & 0x1ul) + +#define DISPATCH_TIMER_INDEX(kind, qos) (((kind) << 2) | (qos)) +#define DISPATCH_TIMER_INDEX_DISARM \ + DISPATCH_TIMER_INDEX(DISPATCH_TIMER_KIND_COUNT, 0) +#define DISPATCH_TIMER_INDEX_COUNT (DISPATCH_TIMER_INDEX_DISARM + 1) +#define DISPATCH_TIMER_IDENT(flags) ({ unsigned long f = (flags); \ + DISPATCH_TIMER_INDEX(f & DISPATCH_TIMER_WALL_CLOCK ? \ + DISPATCH_TIMER_KIND_WALL : DISPATCH_TIMER_KIND_MACH, \ + f & DISPATCH_TIMER_STRICT ? DISPATCH_TIMER_QOS_CRITICAL : \ + f & DISPATCH_TIMER_BACKGROUND ? DISPATCH_TIMER_QOS_BACKGROUND : \ + DISPATCH_TIMER_QOS_NORMAL); }) struct dispatch_kevent_s { TAILQ_ENTRY(dispatch_kevent_s) dk_list; TAILQ_HEAD(, dispatch_source_refs_s) dk_sources; - struct kevent dk_kevent; + struct kevent64_s dk_kevent; }; typedef struct dispatch_kevent_s *dispatch_kevent_t; struct dispatch_source_type_s { - struct kevent ke; + struct kevent64_s ke; uint64_t mask; void (*init)(dispatch_source_t ds, dispatch_source_type_t type, uintptr_t handle, unsigned long mask, dispatch_queue_t q); @@ -88,77 +115,161 @@ struct dispatch_source_type_s { struct dispatch_timer_source_s { uint64_t target; + uint64_t deadline; uint64_t last_fire; uint64_t interval; uint64_t leeway; - uint64_t flags; // dispatch_timer_flags_t + unsigned long flags; // dispatch_timer_flags_t unsigned long missed; }; // Source state which may contain references to the source object // Separately allocated so that 'leaks' can see sources -struct dispatch_source_refs_s { +typedef struct dispatch_source_refs_s { TAILQ_ENTRY(dispatch_source_refs_s) dr_list; uintptr_t dr_source_wref; // "weak" backref to dispatch_source_t dispatch_function_t ds_handler_func; void *ds_handler_ctxt; void *ds_cancel_handler; void *ds_registration_handler; -}; +} *dispatch_source_refs_t; -typedef struct dispatch_source_refs_s *dispatch_source_refs_t; - -struct dispatch_timer_source_refs_s { +typedef struct dispatch_timer_source_refs_s { struct dispatch_source_refs_s _ds_refs; struct dispatch_timer_source_s _ds_timer; -}; + TAILQ_ENTRY(dispatch_timer_source_refs_s) dt_list; +} *dispatch_timer_source_refs_t; + +typedef struct dispatch_timer_source_aggregate_refs_s { + struct dispatch_timer_source_refs_s _dsa_refs; + TAILQ_ENTRY(dispatch_timer_source_aggregate_refs_s) dra_list; + TAILQ_ENTRY(dispatch_timer_source_aggregate_refs_s) dta_list; +} *dispatch_timer_source_aggregate_refs_t; #define _dispatch_ptr2wref(ptr) (~(uintptr_t)(ptr)) #define _dispatch_wref2ptr(ref) ((void*)~(ref)) #define _dispatch_source_from_refs(dr) \ ((dispatch_source_t)_dispatch_wref2ptr((dr)->dr_source_wref)) #define ds_timer(dr) \ - (((struct dispatch_timer_source_refs_s *)(dr))->_ds_timer) + (((dispatch_timer_source_refs_t)(dr))->_ds_timer) +#define ds_timer_aggregate(ds) \ + ((dispatch_timer_aggregate_t)((ds)->dq_specific_q)) + +DISPATCH_ALWAYS_INLINE +static inline unsigned int +_dispatch_source_timer_idx(dispatch_source_refs_t dr) +{ + return DISPATCH_TIMER_IDENT(ds_timer(dr).flags); +} // ds_atomic_flags bits #define DSF_CANCELED 1u // cancellation has been requested #define DSF_ARMED 2u // source is armed +#define DISPATCH_SOURCE_HEADER(refs) \ + dispatch_kevent_t ds_dkev; \ + dispatch_##refs##_refs_t ds_refs; \ + unsigned int ds_atomic_flags; \ + unsigned int \ + ds_is_level:1, \ + ds_is_adder:1, \ + ds_is_installed:1, \ + ds_needs_rearm:1, \ + ds_is_timer:1, \ + ds_cancel_is_block:1, \ + ds_handler_is_block:1, \ + ds_registration_is_block:1, \ + dm_connect_handler_called:1, \ + dm_cancel_handler_called:1; \ + unsigned long ds_pending_data_mask; + DISPATCH_CLASS_DECL(source); struct dispatch_source_s { DISPATCH_STRUCT_HEADER(source); DISPATCH_QUEUE_HEADER; - // Instruments always copies DISPATCH_QUEUE_MIN_LABEL_SIZE, which is 64, - // so the remainder of the structure must be big enough + DISPATCH_SOURCE_HEADER(source); + unsigned long ds_ident_hack; + unsigned long ds_data; + unsigned long ds_pending_data; +}; + +// Mach channel state which may contain references to the channel object +// layout must match dispatch_source_refs_s +struct dispatch_mach_refs_s { + TAILQ_ENTRY(dispatch_mach_refs_s) dr_list; + uintptr_t dr_source_wref; // "weak" backref to dispatch_mach_t + dispatch_mach_handler_function_t dm_handler_func; + void *dm_handler_ctxt; +}; +typedef struct dispatch_mach_refs_s *dispatch_mach_refs_t; + +struct dispatch_mach_reply_refs_s { + TAILQ_ENTRY(dispatch_mach_reply_refs_s) dr_list; + uintptr_t dr_source_wref; // "weak" backref to dispatch_mach_t + dispatch_kevent_t dm_dkev; + TAILQ_ENTRY(dispatch_mach_reply_refs_s) dm_list; +}; +typedef struct dispatch_mach_reply_refs_s *dispatch_mach_reply_refs_t; + +struct dispatch_mach_send_refs_s { + TAILQ_ENTRY(dispatch_mach_send_refs_s) dr_list; + uintptr_t dr_source_wref; // "weak" backref to dispatch_mach_t + dispatch_mach_msg_t dm_checkin; + TAILQ_HEAD(, dispatch_mach_reply_refs_s) dm_replies; + uint32_t volatile dm_disconnect_cnt; + uint32_t volatile dm_sending; + unsigned int dm_needs_mgr:1; + struct dispatch_object_s *volatile dm_tail; + struct dispatch_object_s *volatile dm_head; + mach_port_t dm_send, dm_checkin_port; +}; +typedef struct dispatch_mach_send_refs_s *dispatch_mach_send_refs_t; + +DISPATCH_CLASS_DECL(mach); +struct dispatch_mach_s { + DISPATCH_STRUCT_HEADER(mach); + DISPATCH_QUEUE_HEADER; + DISPATCH_SOURCE_HEADER(mach); + dispatch_kevent_t dm_dkev; + dispatch_mach_send_refs_t dm_refs; +}; + +DISPATCH_CLASS_DECL(mach_msg); +struct dispatch_mach_msg_s { + DISPATCH_STRUCT_HEADER(mach_msg); + dispatch_mach_msg_destructor_t destructor; + size_t size; union { - char _ds_pad[DISPATCH_QUEUE_MIN_LABEL_SIZE]; - struct { - char dq_label[8]; - dispatch_kevent_t ds_dkev; - dispatch_source_refs_t ds_refs; - unsigned int ds_atomic_flags; - unsigned int - ds_is_level:1, - ds_is_adder:1, - ds_is_installed:1, - ds_needs_rearm:1, - ds_is_timer:1, - ds_cancel_is_block:1, - ds_handler_is_block:1, - ds_registration_is_block:1; - unsigned long ds_data; - unsigned long ds_pending_data; - unsigned long ds_pending_data_mask; - unsigned long ds_ident_hack; - }; + mach_msg_header_t *msg; + char buf[0]; }; }; +#if TARGET_OS_EMBEDDED +#define DSL_HASH_SIZE 64u // must be a power of two +#else +#define DSL_HASH_SIZE 256u // must be a power of two +#endif + void _dispatch_source_xref_dispose(dispatch_source_t ds); -void _dispatch_mach_notify_source_init(void *context); -dispatch_queue_t _dispatch_source_invoke(dispatch_source_t ds); void _dispatch_source_dispose(dispatch_source_t ds); -bool _dispatch_source_probe(dispatch_source_t ds); +void _dispatch_source_invoke(dispatch_source_t ds); +unsigned long _dispatch_source_probe(dispatch_source_t ds); size_t _dispatch_source_debug(dispatch_source_t ds, char* buf, size_t bufsiz); +void _dispatch_source_set_interval(dispatch_source_t ds, uint64_t interval); + +void _dispatch_mach_dispose(dispatch_mach_t dm); +void _dispatch_mach_invoke(dispatch_mach_t dm); +unsigned long _dispatch_mach_probe(dispatch_mach_t dm); +size_t _dispatch_mach_debug(dispatch_mach_t dm, char* buf, size_t bufsiz); + +void _dispatch_mach_msg_dispose(dispatch_mach_msg_t dmsg); +void _dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg); +size_t _dispatch_mach_msg_debug(dispatch_mach_msg_t dmsg, char* buf, size_t bufsiz); + +void _dispatch_mach_barrier_invoke(void *ctxt); + +unsigned long _dispatch_mgr_wakeup(dispatch_queue_t dq); +void _dispatch_mgr_thread(dispatch_queue_t dq); #endif /* __DISPATCH_SOURCE_INTERNAL__ */ diff --git a/src/time.c b/src/time.c index 4c0285ad9..a1a89242a 100644 --- a/src/time.c +++ b/src/time.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2010 Apple Inc. All rights reserved. + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -23,90 +23,112 @@ uint64_t _dispatch_get_nanoseconds(void) { +#if !TARGET_OS_WIN32 struct timeval now; int r = gettimeofday(&now, NULL); dispatch_assert_zero(r); dispatch_assert(sizeof(NSEC_PER_SEC) == 8); dispatch_assert(sizeof(NSEC_PER_USEC) == 8); - return now.tv_sec * NSEC_PER_SEC + now.tv_usec * NSEC_PER_USEC; + return (uint64_t)now.tv_sec * NSEC_PER_SEC + + (uint64_t)now.tv_usec * NSEC_PER_USEC; +#else /* TARGET_OS_WIN32 */ + // FILETIME is 100-nanosecond intervals since January 1, 1601 (UTC). + FILETIME ft; + ULARGE_INTEGER li; + GetSystemTimeAsFileTime(&ft); + li.LowPart = ft.dwLowDateTime; + li.HighPart = ft.dwHighDateTime; + return li.QuadPart * 100ull; +#endif /* TARGET_OS_WIN32 */ } -#if !(defined(__i386__) || defined(__x86_64__) || !HAVE_MACH_ABSOLUTE_TIME) +#if !(defined(__i386__) || defined(__x86_64__) || !HAVE_MACH_ABSOLUTE_TIME) \ + || TARGET_OS_WIN32 DISPATCH_CACHELINE_ALIGN _dispatch_host_time_data_s _dispatch_host_time_data; void _dispatch_get_host_time_init(void *context DISPATCH_UNUSED) { +#if !TARGET_OS_WIN32 mach_timebase_info_data_t tbi; (void)dispatch_assume_zero(mach_timebase_info(&tbi)); _dispatch_host_time_data.frac = tbi.numer; _dispatch_host_time_data.frac /= tbi.denom; _dispatch_host_time_data.ratio_1_to_1 = (tbi.numer == tbi.denom); +#else + LARGE_INTEGER freq; + dispatch_assume(QueryPerformanceFrequency(&freq)); + _dispatch_host_time_data.frac = (long double)NSEC_PER_SEC / + (long double)freq.QuadPart; + _dispatch_host_time_data.ratio_1_to_1 = (freq.QuadPart == 1); +#endif /* TARGET_OS_WIN32 */ } #endif dispatch_time_t dispatch_time(dispatch_time_t inval, int64_t delta) { + uint64_t offset; if (inval == DISPATCH_TIME_FOREVER) { return DISPATCH_TIME_FOREVER; } if ((int64_t)inval < 0) { // wall clock if (delta >= 0) { - if ((int64_t)(inval -= delta) >= 0) { + offset = (uint64_t)delta; + if ((int64_t)(inval -= offset) >= 0) { return DISPATCH_TIME_FOREVER; // overflow } return inval; + } else { + offset = (uint64_t)-delta; + if ((int64_t)(inval += offset) >= -1) { + // -1 is special == DISPATCH_TIME_FOREVER == forever + return (dispatch_time_t)-2ll; // underflow + } + return inval; } - if ((int64_t)(inval -= delta) >= -1) { - // -1 is special == DISPATCH_TIME_FOREVER == forever - return -2; // underflow - } - return inval; } // mach clock - delta = _dispatch_time_nano2mach(delta); if (inval == 0) { inval = _dispatch_absolute_time(); } if (delta >= 0) { - if ((int64_t)(inval += delta) <= 0) { + offset = _dispatch_time_nano2mach((uint64_t)delta); + if ((int64_t)(inval += offset) <= 0) { return DISPATCH_TIME_FOREVER; // overflow } return inval; + } else { + offset = _dispatch_time_nano2mach((uint64_t)-delta); + if ((int64_t)(inval -= offset) < 1) { + return 1; // underflow + } + return inval; } - if ((int64_t)(inval += delta) < 1) { - return 1; // underflow - } - return inval; } dispatch_time_t dispatch_walltime(const struct timespec *inval, int64_t delta) { int64_t nsec; - if (inval) { - nsec = inval->tv_sec * 1000000000ull + inval->tv_nsec; + nsec = inval->tv_sec * 1000000000ll + inval->tv_nsec; } else { - nsec = _dispatch_get_nanoseconds(); + nsec = (int64_t)_dispatch_get_nanoseconds(); } - nsec += delta; if (nsec <= 1) { // -1 is special == DISPATCH_TIME_FOREVER == forever - return delta >= 0 ? DISPATCH_TIME_FOREVER : (uint64_t)-2ll; + return delta >= 0 ? DISPATCH_TIME_FOREVER : (dispatch_time_t)-2ll; } - - return -nsec; + return (dispatch_time_t)-nsec; } uint64_t _dispatch_timeout(dispatch_time_t when) { - uint64_t now; - + dispatch_time_t now; if (when == DISPATCH_TIME_FOREVER) { return DISPATCH_TIME_FOREVER; } @@ -114,7 +136,7 @@ _dispatch_timeout(dispatch_time_t when) return 0; } if ((int64_t)when < 0) { - when = -(int64_t)when; + when = (dispatch_time_t)-(int64_t)when; now = _dispatch_get_nanoseconds(); return now >= when ? 0 : when - now; } diff --git a/src/trace.h b/src/trace.h index 4969cbe1b..9a0f15289 100644 --- a/src/trace.h +++ b/src/trace.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010-2011 Apple Inc. All rights reserved. + * Copyright (c) 2010-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -29,37 +29,48 @@ #if DISPATCH_USE_DTRACE && !__OBJC2__ +typedef struct dispatch_trace_timer_params_s { + int64_t deadline, interval, leeway; +} *dispatch_trace_timer_params_t; + #include "provider.h" +#if DISPATCH_USE_DTRACE_INTROSPECTION + #define _dispatch_trace_callout(_c, _f, _dcc) do { \ if (slowpath(DISPATCH_CALLOUT_ENTRY_ENABLED()) || \ slowpath(DISPATCH_CALLOUT_RETURN_ENABLED())) { \ dispatch_queue_t _dq = _dispatch_queue_get_current(); \ - char *_label = _dq ? _dq->dq_label : ""; \ + const char *_label = _dq && _dq->dq_label ? _dq->dq_label : ""; \ dispatch_function_t _func = (dispatch_function_t)(_f); \ void *_ctxt = (_c); \ DISPATCH_CALLOUT_ENTRY(_dq, _label, _func, _ctxt); \ _dcc; \ DISPATCH_CALLOUT_RETURN(_dq, _label, _func, _ctxt); \ - return; \ + } else { \ + _dcc; \ } \ - return _dcc; \ } while (0) DISPATCH_ALWAYS_INLINE static inline void _dispatch_trace_client_callout(void *ctxt, dispatch_function_t f) { - _dispatch_trace_callout(ctxt, f == _dispatch_call_block_and_release && - ctxt ? ((struct Block_basic *)ctxt)->Block_invoke : f, - _dispatch_client_callout(ctxt, f)); + dispatch_function_t func = (f == _dispatch_call_block_and_release && + ctxt ? _dispatch_Block_invoke(ctxt) : f); + _dispatch_introspection_callout_entry(ctxt, func); + _dispatch_trace_callout(ctxt, func, _dispatch_client_callout(ctxt, f)); + _dispatch_introspection_callout_return(ctxt, func); } DISPATCH_ALWAYS_INLINE static inline void _dispatch_trace_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) { - _dispatch_trace_callout(ctxt, f, _dispatch_client_callout2(ctxt, i, f)); + dispatch_function_t func = (dispatch_function_t)f; + _dispatch_introspection_callout_entry(ctxt, func); + _dispatch_trace_callout(ctxt, func, _dispatch_client_callout2(ctxt, i, f)); + _dispatch_introspection_callout_return(ctxt, func); } #ifdef __BLOCKS__ @@ -67,9 +78,10 @@ DISPATCH_ALWAYS_INLINE static inline void _dispatch_trace_client_callout_block(dispatch_block_t b) { - struct Block_basic *bb = (void*)b; - _dispatch_trace_callout(b, bb->Block_invoke, - _dispatch_client_callout(b, (dispatch_function_t)bb->Block_invoke)); + dispatch_function_t func = _dispatch_Block_invoke(b); + _dispatch_introspection_callout_entry(b, func); + _dispatch_trace_callout(b, func, _dispatch_client_callout(b, func)); + _dispatch_introspection_callout_return(b, func); } #endif @@ -79,7 +91,7 @@ _dispatch_trace_client_callout_block(dispatch_block_t b) #define _dispatch_trace_continuation(_q, _o, _t) do { \ dispatch_queue_t _dq = (_q); \ - char *_label = _dq ? _dq->dq_label : ""; \ + const char *_label = _dq && _dq->dq_label ? _dq->dq_label : ""; \ struct dispatch_object_s *_do = (_o); \ char *_kind; \ dispatch_function_t _func; \ @@ -87,8 +99,8 @@ _dispatch_trace_client_callout_block(dispatch_block_t b) if (DISPATCH_OBJ_IS_VTABLE(_do)) { \ _ctxt = _do->do_ctxt; \ _kind = (char*)dx_kind(_do); \ - if (dx_type(_do) == DISPATCH_SOURCE_KEVENT_TYPE && \ - (_dq) != &_dispatch_mgr_q) { \ + if ((dx_type(_do) & _DISPATCH_META_TYPE_MASK) == \ + _DISPATCH_SOURCE_TYPE && (_dq) != &_dispatch_mgr_q) { \ _func = ((dispatch_source_t)_do)->ds_refs->ds_handler_func; \ } else { \ _func = (dispatch_function_t)_dispatch_queue_invoke; \ @@ -101,7 +113,7 @@ _dispatch_trace_client_callout_block(dispatch_block_t b) _func = (dispatch_function_t)dispatch_semaphore_signal; \ } else if (_dc->dc_func == _dispatch_call_block_and_release) { \ _kind = "block"; \ - _func = ((struct Block_basic *)_dc->dc_ctxt)->Block_invoke;\ + _func = _dispatch_Block_invoke(_dc->dc_ctxt); \ } else { \ _kind = "function"; \ _func = _dc->dc_func; \ @@ -121,6 +133,7 @@ _dispatch_trace_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head, _dispatch_trace_continuation(dq, dou, DISPATCH_QUEUE_PUSH); } while (dou != _tail._do && (dou = dou->do_next)); } + _dispatch_introspection_queue_push_list(dq, _head, _tail); _dispatch_queue_push_list(dq, _head, _tail, n); } @@ -132,9 +145,23 @@ _dispatch_trace_queue_push(dispatch_queue_t dq, dispatch_object_t _tail) struct dispatch_object_s *dou = _tail._do; _dispatch_trace_continuation(dq, dou, DISPATCH_QUEUE_PUSH); } + _dispatch_introspection_queue_push(dq, _tail); _dispatch_queue_push(dq, _tail); } +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_trace_queue_push_wakeup(dispatch_queue_t dq, dispatch_object_t _tail, + bool wakeup) +{ + if (slowpath(DISPATCH_QUEUE_PUSH_ENABLED())) { + struct dispatch_object_s *dou = _tail._do; + _dispatch_trace_continuation(dq, dou, DISPATCH_QUEUE_PUSH); + } + _dispatch_introspection_queue_push(dq, _tail); + _dispatch_queue_push_wakeup(dq, _tail, wakeup); +} + DISPATCH_ALWAYS_INLINE static inline void _dispatch_queue_push_notrace(dispatch_queue_t dq, dispatch_object_t dou) @@ -144,6 +171,7 @@ _dispatch_queue_push_notrace(dispatch_queue_t dq, dispatch_object_t dou) #define _dispatch_queue_push_list _dispatch_trace_queue_push_list #define _dispatch_queue_push _dispatch_trace_queue_push +#define _dispatch_queue_push_wakeup _dispatch_trace_queue_push_wakeup DISPATCH_ALWAYS_INLINE static inline void @@ -153,12 +181,128 @@ _dispatch_trace_continuation_pop(dispatch_queue_t dq, if (slowpath(DISPATCH_QUEUE_POP_ENABLED())) { _dispatch_trace_continuation(dq, dou._do, DISPATCH_QUEUE_POP); } + _dispatch_introspection_queue_pop(dq, dou); +} + +#endif // DISPATCH_USE_DTRACE_INTROSPECTION + +static inline dispatch_function_t +_dispatch_trace_timer_function(dispatch_source_t ds, dispatch_source_refs_t dr) +{ + dispatch_function_t func = dr->ds_handler_func; + if (func == _dispatch_after_timer_callback) { + dispatch_continuation_t dc = ds->do_ctxt; + func = dc->dc_func != _dispatch_call_block_and_release ? dc->dc_func : + dc->dc_ctxt ? _dispatch_Block_invoke(dc->dc_ctxt) : NULL; + } + return func; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_trace_timer_params_t +_dispatch_trace_timer_params(uintptr_t ident, + struct dispatch_timer_source_s *values, uint64_t deadline, + dispatch_trace_timer_params_t params) +{ + #define _dispatch_trace_time2nano3(t) (DISPATCH_TIMER_KIND(ident) \ + == DISPATCH_TIMER_KIND_MACH ? _dispatch_time_mach2nano(t) : (t)) + #define _dispatch_trace_time2nano2(v, t) ({ uint64_t _t = (t); \ + (v) >= INT64_MAX ? -1ll : (int64_t)_dispatch_trace_time2nano3(_t);}) + #define _dispatch_trace_time2nano(v) ({ uint64_t _t; \ + _t = _dispatch_trace_time2nano3(v); _t >= INT64_MAX ? -1ll : \ + (int64_t)_t; }) + if (deadline) { + params->deadline = (int64_t)deadline; + } else { + uint64_t now = (DISPATCH_TIMER_KIND(ident) == + DISPATCH_TIMER_KIND_MACH ? _dispatch_absolute_time() : + _dispatch_get_nanoseconds()); + params->deadline = _dispatch_trace_time2nano2(values->target, + values->target < now ? 0 : values->target - now); + } + params->interval = _dispatch_trace_time2nano(values->interval); + params->leeway = _dispatch_trace_time2nano(values->leeway); + return params; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_trace_timer_configure_enabled(void) +{ + return slowpath(DISPATCH_TIMER_CONFIGURE_ENABLED()); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_trace_timer_configure(dispatch_source_t ds, uintptr_t ident, + struct dispatch_timer_source_s *values) +{ + struct dispatch_trace_timer_params_s params; + DISPATCH_TIMER_CONFIGURE(ds, _dispatch_trace_timer_function(ds, + ds->ds_refs), _dispatch_trace_timer_params(ident, values, 0, + ¶ms)); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_trace_timer_program(dispatch_source_refs_t dr, uint64_t deadline) +{ + if (slowpath(DISPATCH_TIMER_PROGRAM_ENABLED())) { + if (deadline && dr) { + dispatch_source_t ds = _dispatch_source_from_refs(dr); + struct dispatch_trace_timer_params_s params; + DISPATCH_TIMER_PROGRAM(ds, _dispatch_trace_timer_function(ds, dr), + _dispatch_trace_timer_params(ds->ds_ident_hack, + &ds_timer(dr), deadline, ¶ms)); + } + } +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_trace_timer_wake(dispatch_source_refs_t dr) +{ + if (slowpath(DISPATCH_TIMER_WAKE_ENABLED())) { + if (dr) { + dispatch_source_t ds = _dispatch_source_from_refs(dr); + DISPATCH_TIMER_WAKE(ds, _dispatch_trace_timer_function(ds, dr)); + } + } +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_trace_timer_fire(dispatch_source_refs_t dr, unsigned long data, + unsigned long missed) +{ + if (slowpath(DISPATCH_TIMER_FIRE_ENABLED())) { + if (!(data - missed) && dr) { + dispatch_source_t ds = _dispatch_source_from_refs(dr); + DISPATCH_TIMER_FIRE(ds, _dispatch_trace_timer_function(ds, dr)); + } + } } + #else -#define _dispatch_queue_push_notrace _dispatch_queue_push -#define _dispatch_trace_continuation_pop(dq, dou) (void)(dq) +#define _dispatch_trace_timer_configure_enabled() false +#define _dispatch_trace_timer_configure(ds, ident, values) \ + do { (void)(ds); (void)(ident); (void)(values); } while(0) +#define _dispatch_trace_timer_program(dr, deadline) \ + do { (void)(dr); (void)(deadline); } while(0) +#define _dispatch_trace_timer_wake(dr) \ + do { (void)(dr); } while(0) +#define _dispatch_trace_timer_fire(dr, data, missed) \ + do { (void)(dr); (void)(data); (void)(missed); } while(0) #endif // DISPATCH_USE_DTRACE && !__OBJC2__ +#if !DISPATCH_USE_DTRACE_INTROSPECTION + +#define _dispatch_queue_push_notrace _dispatch_queue_push +#define _dispatch_trace_continuation_pop(dq, dou) \ + do { (void)(dq); (void)(dou); } while(0) + +#endif // !DISPATCH_USE_DTRACE_INTROSPECTION + #endif // __DISPATCH_TRACE__ diff --git a/src/transform.c b/src/transform.c index 775ce4127..e6fa4017e 100644 --- a/src/transform.c +++ b/src/transform.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011 Apple Inc. All rights reserved. + * Copyright (c) 2011-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -37,14 +37,15 @@ enum { _DISPATCH_DATA_FORMAT_UTF16BE = 0x8, _DISPATCH_DATA_FORMAT_UTF_ANY = 0x10, _DISPATCH_DATA_FORMAT_BASE32 = 0x20, - _DISPATCH_DATA_FORMAT_BASE64 = 0x40, + _DISPATCH_DATA_FORMAT_BASE32HEX = 0x40, + _DISPATCH_DATA_FORMAT_BASE64 = 0x80, }; #pragma mark - #pragma mark baseXX tables -static const char base32_encode_table[] = - "ABCDEFGHIJKLMNOPQRSTUVWXYZ23456789"; +static const unsigned char base32_encode_table[] = + "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567"; static const char base32_decode_table[] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, @@ -57,7 +58,21 @@ static const char base32_decode_table[] = { static const ssize_t base32_decode_table_size = sizeof(base32_decode_table) / sizeof(*base32_decode_table); -static const char base64_encode_table[] = +static const unsigned char base32hex_encode_table[] = + "0123456789ABCDEFGHIJKLMNOPQRSTUV"; + +static const char base32hex_decode_table[] = { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, + 3, 4, 5, 6, 7, 8, 9, -1, -1, -1, -2, -1, -1, -1, 10, 11, 12, + 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31 +}; +static const ssize_t base32hex_decode_table_size = + sizeof(base32hex_encode_table) / sizeof(*base32hex_encode_table); + +static const unsigned char base64_encode_table[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; static const char base64_decode_table[] = { @@ -104,12 +119,12 @@ static bool _dispatch_transform_buffer_new(dispatch_transform_buffer_s *buffer, size_t required, size_t size) { - size_t remaining = buffer->size - (buffer->ptr.u8 - buffer->start); + size_t remaining = buffer->size - (size_t)(buffer->ptr.u8 - buffer->start); if (required == 0 || remaining < required) { if (buffer->start) { if (buffer->ptr.u8 > buffer->start) { dispatch_data_t _new = dispatch_data_create(buffer->start, - buffer->ptr.u8 - buffer->start, NULL, + (size_t)(buffer->ptr.u8 - buffer->start), NULL, DISPATCH_DATA_DESTRUCTOR_FREE); dispatch_data_t _concat = dispatch_data_create_concat( buffer->data, _new); @@ -352,6 +367,7 @@ _dispatch_transform_to_utf16(dispatch_data_t data, int32_t byteOrder) }); if (!success) { + (void)_dispatch_transform_buffer_new(&buffer, 0, 0); dispatch_release(buffer.data); return NULL; } @@ -411,7 +427,8 @@ _dispatch_transform_from_utf16(dispatch_data_t data, int32_t byteOrder) if (range == NULL) { return (bool)false; } - ch = _dispatch_transform_swap_to_host(*(uint64_t*)p, byteOrder); + ch = _dispatch_transform_swap_to_host((uint16_t)*(uint64_t*)p, + byteOrder); dispatch_release(range); skip += 1; } else { @@ -428,7 +445,7 @@ _dispatch_transform_from_utf16(dispatch_data_t data, int32_t byteOrder) if ((ch >= 0xd800) && (ch <= 0xdbff)) { // Surrogate pair - wch = ((ch - 0xd800) << 10); + wch = ((ch - 0xd800u) << 10); if (++i >= max) { // Surrogate byte isn't in this block const void *p; @@ -491,6 +508,7 @@ _dispatch_transform_from_utf16(dispatch_data_t data, int32_t byteOrder) }); if (!success) { + (void)_dispatch_transform_buffer_new(&buffer, 0, 0); dispatch_release(buffer.data); return NULL; } @@ -526,7 +544,8 @@ _dispatch_transform_to_utf16be(dispatch_data_t data) #pragma mark base32 static dispatch_data_t -_dispatch_transform_from_base32(dispatch_data_t data) +_dispatch_transform_from_base32_with_table(dispatch_data_t data, + const char* table, ssize_t table_size) { __block uint64_t x = 0, count = 0, pad = 0; @@ -537,7 +556,7 @@ _dispatch_transform_from_base32(dispatch_data_t data) DISPATCH_UNUSED size_t offset, const void *buffer, size_t size) { size_t i, dest_size = (size * 5) / 8; - uint8_t *dest = (uint8_t*)malloc(dest_size * sizeof(char)); + uint8_t *dest = (uint8_t*)malloc(dest_size * sizeof(uint8_t)); uint8_t *ptr = dest; if (dest == NULL) { return (bool)false; @@ -551,21 +570,20 @@ _dispatch_transform_from_base32(dispatch_data_t data) } ssize_t index = bytes[i]; - if (index >= base32_decode_table_size || - base32_decode_table[index] == -1) { + if (index >= table_size || table[index] == -1) { free(dest); return (bool)false; } count++; - char value = base32_decode_table[index]; + char value = table[index]; if (value == -2) { value = 0; pad++; } x <<= 5; - x += value; + x += (uint64_t)value; if ((count & 0x7) == 0) { *ptr++ = (x >> 32) & 0xff; @@ -576,7 +594,7 @@ _dispatch_transform_from_base32(dispatch_data_t data) } } - size_t final = (ptr - dest); + size_t final = (size_t)(ptr - dest); switch (pad) { case 1: final -= 1; @@ -612,15 +630,21 @@ _dispatch_transform_from_base32(dispatch_data_t data) } static dispatch_data_t -_dispatch_transform_to_base32(dispatch_data_t data) +_dispatch_transform_to_base32_with_table(dispatch_data_t data, const unsigned char* table) { size_t total = dispatch_data_get_size(data); __block size_t count = 0; - size_t dest_size = ((total + 4) * 8) / 5; - dest_size -= dest_size % 8; + if (total > SIZE_T_MAX-4 || ((total+4)/5 > SIZE_T_MAX/8)) { + /* We can't hold larger than size_t in a dispatch_data_t + * and we want to avoid an integer overflow in the next + * calculation. + */ + return NULL; + } - uint8_t *dest = (uint8_t*)malloc(dest_size * sizeof(uint8_t)); + size_t dest_size = (total + 4) / 5 * 8; + uint8_t *dest = (uint8_t*)malloc(dest_size); if (dest == NULL) { return NULL; } @@ -660,26 +684,26 @@ _dispatch_transform_to_base32(dispatch_data_t data) switch (count % 5) { case 0: // a - *ptr++ = base32_encode_table[(curr >> 3) & 0x1f]; + *ptr++ = table[(curr >> 3) & 0x1fu]; break; case 1: // b + c - *ptr++ = base32_encode_table[((last << 2)|(curr >> 6)) & 0x1f]; - *ptr++ = base32_encode_table[(curr >> 1) & 0x1f]; + *ptr++ = table[((last << 2)|(curr >> 6)) & 0x1f]; + *ptr++ = table[(curr >> 1) & 0x1f]; break; case 2: // d - *ptr++ = base32_encode_table[((last << 4)|(curr >> 4)) & 0x1f]; + *ptr++ = table[((last << 4)|(curr >> 4)) & 0x1f]; break; case 3: // e + f - *ptr++ = base32_encode_table[((last << 1)|(curr >> 7)) & 0x1f]; - *ptr++ = base32_encode_table[(curr >> 2) & 0x1f]; + *ptr++ = table[((last << 1)|(curr >> 7)) & 0x1f]; + *ptr++ = table[(curr >> 2) & 0x1f]; break; case 4: // g + h - *ptr++ = base32_encode_table[((last << 3)|(curr >> 5)) & 0x1f]; - *ptr++ = base32_encode_table[curr & 0x1f]; + *ptr++ = table[((last << 3)|(curr >> 5)) & 0x1f]; + *ptr++ = table[curr & 0x1f]; break; } } @@ -691,19 +715,19 @@ _dispatch_transform_to_base32(dispatch_data_t data) break; case 1: // b[4:2] - *ptr++ = base32_encode_table[(bytes[size-1] << 2) & 0x1c]; + *ptr++ = table[(bytes[size-1] << 2) & 0x1c]; break; case 2: // d[4] - *ptr++ = base32_encode_table[(bytes[size-1] << 4) & 0x10]; + *ptr++ = table[(bytes[size-1] << 4) & 0x10]; break; case 3: // e[4:1] - *ptr++ = base32_encode_table[(bytes[size-1] << 1) & 0x1e]; + *ptr++ = table[(bytes[size-1] << 1) & 0x1e]; break; case 4: - // g[4:3] - *ptr++ = base32_encode_table[bytes[size-1] & 0x18]; + // g[2:3] + *ptr++ = table[(bytes[size-1] << 3) & 0x18]; break; } switch (count % 5) { @@ -734,6 +758,33 @@ _dispatch_transform_to_base32(dispatch_data_t data) DISPATCH_DATA_DESTRUCTOR_FREE); } +static dispatch_data_t +_dispatch_transform_from_base32(dispatch_data_t data) +{ + return _dispatch_transform_from_base32_with_table(data, base32_decode_table, + base32_decode_table_size); +} + +static dispatch_data_t +_dispatch_transform_to_base32(dispatch_data_t data) +{ + return _dispatch_transform_to_base32_with_table(data, base32_encode_table); +} + +static dispatch_data_t +_dispatch_transform_from_base32hex(dispatch_data_t data) +{ + return _dispatch_transform_from_base32_with_table(data, + base32hex_decode_table, base32hex_decode_table_size); +} + +static dispatch_data_t +_dispatch_transform_to_base32hex(dispatch_data_t data) +{ + return _dispatch_transform_to_base32_with_table(data, + base32hex_encode_table); +} + #pragma mark - #pragma mark base64 @@ -778,7 +829,7 @@ _dispatch_transform_from_base64(dispatch_data_t data) } x <<= 6; - x += value; + x += (uint64_t)value; if ((count & 0x3) == 0) { *ptr++ = (x >> 16) & 0xff; @@ -787,7 +838,7 @@ _dispatch_transform_from_base64(dispatch_data_t data) } } - size_t final = (ptr - dest); + size_t final = (size_t)(ptr - dest); if (pad > 0) { // 2 bytes of pad means only had one char in final group final -= pad; @@ -820,10 +871,16 @@ _dispatch_transform_to_base64(dispatch_data_t data) size_t total = dispatch_data_get_size(data); __block size_t count = 0; - size_t dest_size = ((total + 2) * 4) / 3; - dest_size -= dest_size % 4; + if (total > SIZE_T_MAX-2 || ((total+2)/3> SIZE_T_MAX/4)) { + /* We can't hold larger than size_t in a dispatch_data_t + * and we want to avoid an integer overflow in the next + * calculation. + */ + return NULL; + } - uint8_t *dest = (uint8_t*)malloc(dest_size * sizeof(uint8_t)); + size_t dest_size = (total + 2) / 3 * 4; + uint8_t *dest = (uint8_t*)malloc(dest_size); if (dest == NULL) { return NULL; } @@ -910,6 +967,9 @@ dispatch_data_create_with_transform(dispatch_data_t data, { if (input->type == _DISPATCH_DATA_FORMAT_UTF_ANY) { input = _dispatch_transform_detect_utf(data); + if (input == NULL) { + return NULL; + } } if ((input->type & ~output->input_mask) != 0) { @@ -950,8 +1010,8 @@ dispatch_data_create_with_transform(dispatch_data_t data, const struct dispatch_data_format_type_s _dispatch_data_format_type_none = { .type = _DISPATCH_DATA_FORMAT_NONE, - .input_mask = ~0, - .output_mask = ~0, + .input_mask = ~0u, + .output_mask = ~0u, .decode = NULL, .encode = NULL, }; @@ -959,19 +1019,30 @@ const struct dispatch_data_format_type_s _dispatch_data_format_type_none = { const struct dispatch_data_format_type_s _dispatch_data_format_type_base32 = { .type = _DISPATCH_DATA_FORMAT_BASE32, .input_mask = (_DISPATCH_DATA_FORMAT_NONE | _DISPATCH_DATA_FORMAT_BASE32 | - _DISPATCH_DATA_FORMAT_BASE64), + _DISPATCH_DATA_FORMAT_BASE32HEX | _DISPATCH_DATA_FORMAT_BASE64), .output_mask = (_DISPATCH_DATA_FORMAT_NONE | _DISPATCH_DATA_FORMAT_BASE32 | - _DISPATCH_DATA_FORMAT_BASE64), + _DISPATCH_DATA_FORMAT_BASE32HEX | _DISPATCH_DATA_FORMAT_BASE64), .decode = _dispatch_transform_from_base32, .encode = _dispatch_transform_to_base32, }; +const struct dispatch_data_format_type_s _dispatch_data_format_type_base32hex = +{ + .type = _DISPATCH_DATA_FORMAT_BASE32HEX, + .input_mask = (_DISPATCH_DATA_FORMAT_NONE | _DISPATCH_DATA_FORMAT_BASE32 | + _DISPATCH_DATA_FORMAT_BASE32HEX | _DISPATCH_DATA_FORMAT_BASE64), + .output_mask = (_DISPATCH_DATA_FORMAT_NONE | _DISPATCH_DATA_FORMAT_BASE32 | + _DISPATCH_DATA_FORMAT_BASE32HEX | _DISPATCH_DATA_FORMAT_BASE64), + .decode = _dispatch_transform_from_base32hex, + .encode = _dispatch_transform_to_base32hex, +}; + const struct dispatch_data_format_type_s _dispatch_data_format_type_base64 = { .type = _DISPATCH_DATA_FORMAT_BASE64, .input_mask = (_DISPATCH_DATA_FORMAT_NONE | _DISPATCH_DATA_FORMAT_BASE32 | - _DISPATCH_DATA_FORMAT_BASE64), + _DISPATCH_DATA_FORMAT_BASE32HEX | _DISPATCH_DATA_FORMAT_BASE64), .output_mask = (_DISPATCH_DATA_FORMAT_NONE | _DISPATCH_DATA_FORMAT_BASE32 | - _DISPATCH_DATA_FORMAT_BASE64), + _DISPATCH_DATA_FORMAT_BASE32HEX | _DISPATCH_DATA_FORMAT_BASE64), .decode = _dispatch_transform_from_base64, .encode = _dispatch_transform_to_base64, }; diff --git a/tools/dispatch_timers.d b/tools/dispatch_timers.d new file mode 100755 index 000000000..282150501 --- /dev/null +++ b/tools/dispatch_timers.d @@ -0,0 +1,89 @@ +#!/usr/sbin/dtrace -s + +/* + * Copyright (c) 2012-2013 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * Usage: dispatch_timers.d -p [pid] + * traced process must have been executed with + * DYLD_LIBRARY_PATH=/usr/lib/system/introspection or with + * DYLD_IMAGE_SUFFIX=_profile or DYLD_IMAGE_SUFFIX=_debug + */ + +#pragma D option quiet +#pragma D option zdefs + +typedef struct dispatch_trace_timer_params_s { + int64_t deadline, interval, leeway; +} *dispatch_trace_timer_params_t; + +dispatch$target:libdispatch*.dylib::timer-configure, +dispatch$target:libdispatch*.dylib::timer-program, +dispatch$target:libdispatch*.dylib::timer-wake, +dispatch$target:libdispatch*.dylib::timer-fire /!start/ { + start = walltimestamp; +} + +/* + * Trace dispatch timer configuration and programming: + * Timer configuration indicates that dispatch_source_set_timer() was called. + * Timer programming indicates that the dispatch manager is about to sleep + * for 'deadline' ns (but may wake up earlier if non-timer events occur). + * Time parameters are in nanoseconds, a value of -1 means "forever". + * + * probe timer-configure/-program(dispatch_source_t source, + * dispatch_function_t function, dispatch_trace_timer_params_t params) + */ +dispatch$target:libdispatch*.dylib::timer-configure, +dispatch$target:libdispatch*.dylib::timer-program { + this->p = (dispatch_trace_timer_params_t)copyin(arg2, + sizeof(struct dispatch_trace_timer_params_s)); + printf("%8dus %-15s: 0x%0?p deadline: %11dns interval: %11dns leeway: %11dns", + (walltimestamp-start)/1000, probename, arg0, + this->p ? this->p->deadline : 0, this->p ? this->p->interval : 0, + this->p ? this->p->leeway : 0); + usym(arg1); + printf("\n"); +} +dispatch$target:libdispatch*.dylib::timer-configure { + printf(" / --- Begin ustack"); + ustack(); + printf(" \ --- End ustack\n"); +} + +/* + * Trace dispatch timer wakes and fires: + * Timer wakes indicate that the dispatch manager woke up due to expiry of the + * deadline for the specified timer. + * Timer fires indicate that that the dispatch manager scheduled the event + * handler of the specified timer for asynchronous execution (may occur without + * a corresponding timer wake if the manager was awake processing other events + * when the timer deadline expired). + * + * probe timer-wake/-fire(dispatch_source_t source, + * dispatch_function_t function) + */ +dispatch$target:libdispatch*.dylib::timer-wake, +dispatch$target:libdispatch*.dylib::timer-fire { + printf("%8dus %-15s: 0x%0?p%-70s", (walltimestamp-start)/1000, probename, + arg0, ""); + usym(arg1); + printf("\n"); +} diff --git a/tools/dispatch_trace.d b/tools/dispatch_trace.d index 9059e4ed2..7f5386710 100755 --- a/tools/dispatch_trace.d +++ b/tools/dispatch_trace.d @@ -1,7 +1,7 @@ -#!/usr/sbin/dtrace -Z -s +#!/usr/sbin/dtrace -s /* - * Copyright (c) 2010-2011 Apple Inc. All rights reserved. + * Copyright (c) 2010-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -21,12 +21,14 @@ */ /* - * Usage: dispatch_dtrace.d -p [pid] + * Usage: dispatch_trace.d -p [pid] * traced process must have been executed with + * DYLD_LIBRARY_PATH=/usr/lib/system/introspection or with * DYLD_IMAGE_SUFFIX=_profile or DYLD_IMAGE_SUFFIX=_debug */ #pragma D option quiet +#pragma D option zdefs #pragma D option bufsize=16m BEGIN { @@ -35,25 +37,22 @@ BEGIN { "Item", "Kind", "Context", "Symbol"); } -dispatch$target:libdispatch_profile.dylib::queue-push, -dispatch$target:libdispatch_debug.dylib::queue-push, -dispatch$target:libdispatch_profile.dylib::queue-pop, -dispatch$target:libdispatch_debug.dylib::queue-pop, -dispatch$target:libdispatch_profile.dylib::callout-entry, -dispatch$target:libdispatch_debug.dylib::callout-entry, -dispatch$target:libdispatch_profile.dylib::callout-return, -dispatch$target:libdispatch_debug.dylib::callout-return /!start/ { +dispatch$target:libdispatch*.dylib::queue-push, +dispatch$target:libdispatch*.dylib::queue-pop, +dispatch$target:libdispatch*.dylib::callout-entry, +dispatch$target:libdispatch*.dylib::callout-return /!start/ { start = walltimestamp; } -/* probe queue-push/-pop(dispatch_queue_t queue, const char *label, +/* + * Trace queue push and pop operations: + * + * probe queue-push/-pop(dispatch_queue_t queue, const char *label, * dispatch_object_t item, const char *kind, * dispatch_function_t function, void *context) */ -dispatch$target:libdispatch_profile.dylib::queue-push, -dispatch$target:libdispatch_debug.dylib::queue-push, -dispatch$target:libdispatch_profile.dylib::queue-pop, -dispatch$target:libdispatch_debug.dylib::queue-pop { +dispatch$target:libdispatch*.dylib::queue-push, +dispatch$target:libdispatch*.dylib::queue-pop { printf("%-8d %-3d 0x%08p %-35s%-15s0x%0?p %-43s0x%0?p %-14s0x%0?p", (walltimestamp-start)/1000, cpu, tid, probefunc, probename, arg0, copyinstr(arg1, 42), arg2, copyinstr(arg3, 13), arg5); @@ -61,13 +60,14 @@ dispatch$target:libdispatch_debug.dylib::queue-pop { printf("\n"); } -/* probe callout-entry/-return(dispatch_queue_t queue, const char *label, +/* + * Trace callouts to client functions: + * + * probe callout-entry/-return(dispatch_queue_t queue, const char *label, * dispatch_function_t function, void *context) */ -dispatch$target:libdispatch_profile.dylib::callout-entry, -dispatch$target:libdispatch_debug.dylib::callout-entry, -dispatch$target:libdispatch_profile.dylib::callout-return, -dispatch$target:libdispatch_debug.dylib::callout-return { +dispatch$target:libdispatch*.dylib::callout-entry, +dispatch$target:libdispatch*.dylib::callout-return { printf("%-8d %-3d 0x%08p %-35s%-15s0x%0?p %-43s%-?s %-14s0x%0?p", (walltimestamp-start)/1000, cpu, tid, probefunc, probename, arg0, copyinstr(arg1, 42), "", "", arg3); diff --git a/xcodeconfig/libdispatch-introspection.xcconfig b/xcodeconfig/libdispatch-introspection.xcconfig new file mode 100644 index 000000000..d0f431deb --- /dev/null +++ b/xcodeconfig/libdispatch-introspection.xcconfig @@ -0,0 +1,26 @@ +// +// Copyright (c) 2012-2013 Apple Inc. All rights reserved. +// +// @APPLE_APACHE_LICENSE_HEADER_START@ +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// @APPLE_APACHE_LICENSE_HEADER_END@ +// + +BUILD_VARIANTS = normal +INSTALL_PATH = /usr/lib/system/introspection +INSTALL_PATH[sdk=iphonesimulator*] = $(SDKROOT)/usr/lib/system/introspection +GCC_PREPROCESSOR_DEFINITIONS = $(GCC_PREPROCESSOR_DEFINITIONS) DISPATCH_INTROSPECTION=1 +CONFIGURATION_BUILD_DIR = $(BUILD_DIR)/introspection +OTHER_LDFLAGS = $(OTHER_LDFLAGS) -Wl,-interposable_list,$(SRCROOT)/xcodeconfig/libdispatch.interposable diff --git a/xcodeconfig/libdispatch-static.xcconfig b/xcodeconfig/libdispatch-static.xcconfig new file mode 100644 index 000000000..632e01cef --- /dev/null +++ b/xcodeconfig/libdispatch-static.xcconfig @@ -0,0 +1,25 @@ +// +// Copyright (c) 2012-2013 Apple Inc. All rights reserved. +// +// @APPLE_APACHE_LICENSE_HEADER_START@ +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// @APPLE_APACHE_LICENSE_HEADER_END@ +// + +OTHER_LDFLAGS = +BUILD_VARIANTS = normal +SKIP_INSTALL = YES +EXCLUDED_SOURCE_FILE_NAMES = * +GCC_PREPROCESSOR_DEFINITIONS = $(GCC_PREPROCESSOR_DEFINITIONS) USE_OBJC=0 DISPATCH_USE_DTRACE=0 diff --git a/xcodeconfig/libdispatch.aliases b/xcodeconfig/libdispatch.aliases index aae0bccde..5877e5019 100644 --- a/xcodeconfig/libdispatch.aliases +++ b/xcodeconfig/libdispatch.aliases @@ -1,12 +1,36 @@ +# +# Copyright (c) 2012-2013 Apple Inc. All rights reserved. +# +# @APPLE_APACHE_LICENSE_HEADER_START@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# @APPLE_APACHE_LICENSE_HEADER_END@ +# + _OBJC_CLASS_$_OS_dispatch_semaphore __dispatch_semaphore_vtable _OBJC_CLASS_$_OS_dispatch_group __dispatch_group_vtable _OBJC_CLASS_$_OS_dispatch_queue __dispatch_queue_vtable _OBJC_CLASS_$_OS_dispatch_queue_root __dispatch_queue_root_vtable +_OBJC_CLASS_$_OS_dispatch_queue_runloop __dispatch_queue_runloop_vtable _OBJC_CLASS_$_OS_dispatch_queue_mgr __dispatch_queue_mgr_vtable _OBJC_CLASS_$_OS_dispatch_queue_specific_queue __dispatch_queue_specific_queue_vtable _OBJC_CLASS_$_OS_dispatch_queue_attr __dispatch_queue_attr_vtable _OBJC_CLASS_$_OS_dispatch_source __dispatch_source_vtable -_OBJC_CLASS_$_OS_dispatch_data __dispatch_data_vtable +_OBJC_CLASS_$_OS_dispatch_mach __dispatch_mach_vtable +_OBJC_CLASS_$_OS_dispatch_mach_msg __dispatch_mach_msg_vtable _OBJC_CLASS_$_OS_dispatch_io __dispatch_io_vtable _OBJC_CLASS_$_OS_dispatch_operation __dispatch_operation_vtable _OBJC_CLASS_$_OS_dispatch_disk __dispatch_disk_vtable + +__dispatch_data_destructor_vm_deallocate __dispatch_data_destructor_munmap diff --git a/xcodeconfig/libdispatch.interposable b/xcodeconfig/libdispatch.interposable new file mode 100644 index 000000000..f3377617b --- /dev/null +++ b/xcodeconfig/libdispatch.interposable @@ -0,0 +1,28 @@ +# +# Copyright (c) 2013 Apple Inc. All rights reserved. +# +# @APPLE_APACHE_LICENSE_HEADER_START@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# @APPLE_APACHE_LICENSE_HEADER_END@ +# + +# Interposable API hooks in the introspection library + +_dispatch_introspection_hook_queue_create +_dispatch_introspection_hook_queue_destroy +_dispatch_introspection_hook_queue_item_enqueue +_dispatch_introspection_hook_queue_item_dequeue +_dispatch_introspection_hook_queue_callout_begin +_dispatch_introspection_hook_queue_callout_end diff --git a/xcodeconfig/libdispatch.order b/xcodeconfig/libdispatch.order index 64787b7a4..8870ea9e4 100644 --- a/xcodeconfig/libdispatch.order +++ b/xcodeconfig/libdispatch.order @@ -1,40 +1,75 @@ -_OBJC_CLASS_$_OS_object -_OBJC_METACLASS_$_OS_object +# +# Copyright (c) 2013 Apple Inc. All rights reserved. +# +# @APPLE_APACHE_LICENSE_HEADER_START@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# @APPLE_APACHE_LICENSE_HEADER_END@ +# + +# Must be kept in sync with ObjC TFB checks in object_internal.h + +# dispatch_object_t classes _OBJC_CLASS_$_OS_dispatch_object -_OBJC_METACLASS_$_OS_dispatch_object _OBJC_CLASS_$_OS_dispatch_semaphore __OS_dispatch_semaphore_vtable -_OBJC_METACLASS_$_OS_dispatch_semaphore _OBJC_CLASS_$_OS_dispatch_group __OS_dispatch_group_vtable -_OBJC_METACLASS_$_OS_dispatch_group _OBJC_CLASS_$_OS_dispatch_queue __OS_dispatch_queue_vtable -_OBJC_METACLASS_$_OS_dispatch_queue _OBJC_CLASS_$_OS_dispatch_queue_root __OS_dispatch_queue_root_vtable -_OBJC_METACLASS_$_OS_dispatch_queue_root +_OBJC_CLASS_$_OS_dispatch_queue_runloop +__OS_dispatch_queue_runloop_vtable _OBJC_CLASS_$_OS_dispatch_queue_mgr __OS_dispatch_queue_mgr_vtable -_OBJC_METACLASS_$_OS_dispatch_queue_mgr _OBJC_CLASS_$_OS_dispatch_queue_specific_queue __OS_dispatch_queue_specific_queue_vtable -_OBJC_METACLASS_$_OS_dispatch_queue_specific_queue _OBJC_CLASS_$_OS_dispatch_queue_attr __OS_dispatch_queue_attr_vtable -_OBJC_METACLASS_$_OS_dispatch_queue_attr _OBJC_CLASS_$_OS_dispatch_source __OS_dispatch_source_vtable -_OBJC_METACLASS_$_OS_dispatch_source -_OBJC_CLASS_$_OS_dispatch_data -__OS_dispatch_data_vtable -_OBJC_METACLASS_$_OS_dispatch_data +_OBJC_CLASS_$_OS_dispatch_mach +__OS_dispatch_mach_vtable +_OBJC_CLASS_$_OS_dispatch_mach_msg +__OS_dispatch_mach_msg_vtable _OBJC_CLASS_$_OS_dispatch_io __OS_dispatch_io_vtable -_OBJC_METACLASS_$_OS_dispatch_io _OBJC_CLASS_$_OS_dispatch_operation __OS_dispatch_operation_vtable -_OBJC_METACLASS_$_OS_dispatch_operation _OBJC_CLASS_$_OS_dispatch_disk __OS_dispatch_disk_vtable +# non-dispatch_object_t classes +_OBJC_CLASS_$_OS_object +_OBJC_CLASS_$_OS_dispatch_data +_OBJC_CLASS_$_OS_dispatch_data_empty +# metaclasses +_OBJC_METACLASS_$_OS_dispatch_object +_OBJC_METACLASS_$_OS_dispatch_semaphore +_OBJC_METACLASS_$_OS_dispatch_group +_OBJC_METACLASS_$_OS_dispatch_queue +_OBJC_METACLASS_$_OS_dispatch_queue_root +_OBJC_METACLASS_$_OS_dispatch_queue_runloop +_OBJC_METACLASS_$_OS_dispatch_queue_mgr +_OBJC_METACLASS_$_OS_dispatch_queue_specific_queue +_OBJC_METACLASS_$_OS_dispatch_queue_attr +_OBJC_METACLASS_$_OS_dispatch_source +_OBJC_METACLASS_$_OS_dispatch_mach +_OBJC_METACLASS_$_OS_dispatch_mach_msg +_OBJC_METACLASS_$_OS_dispatch_io +_OBJC_METACLASS_$_OS_dispatch_operation _OBJC_METACLASS_$_OS_dispatch_disk +_OBJC_METACLASS_$_OS_object +_OBJC_METACLASS_$_OS_dispatch_data +_OBJC_METACLASS_$_OS_dispatch_data_empty diff --git a/xcodeconfig/libdispatch.unexport b/xcodeconfig/libdispatch.unexport index 035bd9c5e..dba78b92e 100644 --- a/xcodeconfig/libdispatch.unexport +++ b/xcodeconfig/libdispatch.unexport @@ -1,12 +1,34 @@ +# +# Copyright (c) 2012-2013 Apple Inc. All rights reserved. +# +# @APPLE_APACHE_LICENSE_HEADER_START@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# @APPLE_APACHE_LICENSE_HEADER_END@ +# + __dispatch_semaphore_vtable __dispatch_group_vtable __dispatch_queue_vtable __dispatch_queue_root_vtable +__dispatch_queue_runloop_vtable __dispatch_queue_mgr_vtable __dispatch_queue_specific_queue_vtable __dispatch_queue_attr_vtable __dispatch_source_vtable -__dispatch_data_vtable +__dispatch_mach_vtable +__dispatch_mach_msg_vtable __dispatch_io_vtable __dispatch_operation_vtable __dispatch_disk_vtable diff --git a/xcodeconfig/libdispatch.xcconfig b/xcodeconfig/libdispatch.xcconfig index e651bfcb0..4904b9d64 100644 --- a/xcodeconfig/libdispatch.xcconfig +++ b/xcodeconfig/libdispatch.xcconfig @@ -1,5 +1,5 @@ // -// Copyright (c) 2010-2011 Apple Inc. All rights reserved. +// Copyright (c) 2010-2013 Apple Inc. All rights reserved. // // @APPLE_APACHE_LICENSE_HEADER_START@ // @@ -34,12 +34,14 @@ OS_PUBLIC_HEADERS_FOLDER_PATH = /usr/include/os OS_PUBLIC_HEADERS_FOLDER_PATH[sdk=iphonesimulator*] = $(SDKROOT)/usr/include/os OS_PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/os OS_PRIVATE_HEADERS_FOLDER_PATH[sdk=iphonesimulator*] = $(SDKROOT)/usr/local/include/os -HEADER_SEARCH_PATHS = $(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders $(PROJECT_DIR) +HEADER_SEARCH_PATHS = $(PROJECT_DIR) +LIBRARY_SEARCH_PATHS = $(SDKROOT)/usr/lib/system INSTALLHDRS_SCRIPT_PHASE = YES ALWAYS_SEARCH_USER_PATHS = NO BUILD_VARIANTS = normal debug profile ONLY_ACTIVE_ARCH = NO -GCC_VERSION = com.apple.compilers.llvm.clang.1_0 +CLANG_LINK_OBJC_RUNTIME = NO +GCC_C_LANGUAGE_STANDARD = gnu11 GCC_STRICT_ALIASING = YES GCC_SYMBOLS_PRIVATE_EXTERN = YES GCC_ENABLE_OBJC_GC[sdk=macosx*] = supported @@ -50,23 +52,30 @@ GCC_WARN_ABOUT_RETURN_TYPE = YES GCC_WARN_ABOUT_MISSING_PROTOTYPES = YES GCC_WARN_ABOUT_MISSING_NEWLINE = YES GCC_WARN_UNUSED_VARIABLE = YES +GCC_WARN_INITIALIZER_NOT_FULLY_BRACKETED = YES +GCC_WARN_ABOUT_MISSING_FIELD_INITIALIZERS = YES +GCC_WARN_SIGN_COMPARE = YES +GCC_WARN_UNINITIALIZED_AUTOS = YES +CLANG_WARN_EMPTY_BODY = YES +CLANG_WARN_IMPLICIT_SIGN_CONVERSION = YES +CLANG_WARN_SUSPICIOUS_IMPLICIT_CONVERSION = YES +CLANG_WARN_DOCUMENTATION_COMMENTS = YES GCC_TREAT_WARNINGS_AS_ERRORS = YES GCC_OPTIMIZATION_LEVEL = s -GCC_THUMB_SUPPORT[arch=armv6] = NO GCC_PREPROCESSOR_DEFINITIONS = __DARWIN_NON_CANCELABLE=1 -WARNING_CFLAGS = -Wall -Wextra -Waggregate-return -Wfloat-equal -Wpacked -Wmissing-declarations -Wstrict-overflow=4 -Wstrict-aliasing=2 -OTHER_CFLAGS = -fdiagnostics-show-option -fverbose-asm +WARNING_CFLAGS = -Wall -Wextra -Waggregate-return -Wfloat-equal -Wpacked -Wmissing-declarations -Wstrict-overflow=4 -Wstrict-aliasing=2 -Wno-unknown-warning-option +OTHER_CFLAGS = -fverbose-asm -isystem $(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders OTHER_CFLAGS[arch=i386][sdk=macosx*] = $(OTHER_CFLAGS) -fno-unwind-tables -fno-asynchronous-unwind-tables -fno-exceptions OTHER_CFLAGS_normal = -momit-leaf-frame-pointer -OTHER_CFLAGS_normal[arch=armv6][sdk=macosx*] = OTHER_CFLAGS_profile = $(OTHER_CFLAGS_normal) -DDISPATCH_PROFILE=1 OTHER_CFLAGS_debug = -fstack-protector -fno-inline -O0 -DDISPATCH_DEBUG=1 GENERATE_PROFILING_CODE = NO DYLIB_CURRENT_VERSION = $(CURRENT_PROJECT_VERSION) -UMBRELLA_LDFLAGS = -umbrella System -UMBRELLA_LDFLAGS[sdk=iphonesimulator*] = +UMBRELLA_LDFLAGS = -umbrella System -nodefaultlibs -ldyld -lcompiler_rt -lsystem_kernel -lsystem_platform -lsystem_pthread -lsystem_malloc -lsystem_c -lsystem_blocks -lunwind -Wl,-upward-lsystem_asl +UMBRELLA_LDFLAGS[sdk=iphonesimulator*] = -umbrella System -nodefaultlibs -ldyld_sim -lcompiler_rt_sim -lsystem_sim_c -lsystem_sim_blocks -lunwind_sim -Wl,-upward-lSystem OBJC_LDFLAGS = -Wl,-upward-lobjc -Wl,-order_file,$(SRCROOT)/xcodeconfig/libdispatch.order -Wl,-alias_list,$(SRCROOT)/xcodeconfig/libdispatch.aliases -Wl,-unexported_symbols_list,$(SRCROOT)/xcodeconfig/libdispatch.unexport OBJC_LDFLAGS[sdk=macosx*] = $(OBJC_LDFLAGS) -Wl,-upward-lauto OBJC_LDFLAGS[arch=i386][sdk=macosx*] = -OBJC_EXCLUDED_SOURCE_FILE_NAMES_i386_macosx = object.m -OTHER_LDFLAGS = $(OTHER_LDFLAGS) $(UMBRELLA_LDFLAGS) $(CR_LDFLAGS) $(OBJC_LDFLAGS) +OBJC_EXCLUDED_SOURCE_FILE_NAMES_i386_macosx = object.m data.m +PLATFORM_LDFLAGS[sdk=macosx*] = -Wl,-alias_list,$(SRCROOT)/xcodeconfig/libdispatch_macosx.aliases +OTHER_LDFLAGS = $(OTHER_LDFLAGS) $(UMBRELLA_LDFLAGS) $(CR_LDFLAGS) $(OBJC_LDFLAGS) $(PLATFORM_LDFLAGS) diff --git a/xcodeconfig/libdispatch_macosx.aliases b/xcodeconfig/libdispatch_macosx.aliases new file mode 100644 index 000000000..a7f61c5f0 --- /dev/null +++ b/xcodeconfig/libdispatch_macosx.aliases @@ -0,0 +1,21 @@ +# +# Copyright (c) 2013 Apple Inc. All rights reserved. +# +# @APPLE_APACHE_LICENSE_HEADER_START@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# @APPLE_APACHE_LICENSE_HEADER_END@ +# + +__dispatch_source_type_memorystatus __dispatch_source_type_memorypressure diff --git a/xcodescripts/install-dtrace.sh b/xcodescripts/install-dtrace.sh new file mode 100644 index 000000000..c0eb3647e --- /dev/null +++ b/xcodescripts/install-dtrace.sh @@ -0,0 +1,30 @@ +#!/bin/bash -e +# +# Copyright (c) 2013 Apple Inc. All rights reserved. +# +# @APPLE_APACHE_LICENSE_HEADER_START@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# @APPLE_APACHE_LICENSE_HEADER_END@ +# + +if [ "${PLATFORM_NAME}" = iphoneos ]; then exit 0; fi + +if [ "${DEPLOYMENT_LOCATION}" != YES ]; then + DSTROOT="${CONFIGURATION_BUILD_DIR}" +fi + +mkdir -p "${DSTROOT}${PUBLIC_HEADERS_FOLDER_PATH}" || true +cp -X "${SCRIPT_INPUT_FILE_1}" \ + "${DSTROOT}${PUBLIC_HEADERS_FOLDER_PATH}/${SCRIPT_OUTPUT_FILE_0##/*/}" diff --git a/xcodescripts/install-manpages.sh b/xcodescripts/install-manpages.sh index 2ea1f6803..d9e28af6c 100755 --- a/xcodescripts/install-manpages.sh +++ b/xcodescripts/install-manpages.sh @@ -1,6 +1,6 @@ #!/bin/bash -e # -# Copyright (c) 2010-2011 Apple Inc. All rights reserved. +# Copyright (c) 2010-2012 Apple Inc. All rights reserved. # # @APPLE_APACHE_LICENSE_HEADER_START@ # From 396510d18ab6f0496e1ba7be643ee436f5951d84 Mon Sep 17 00:00:00 2001 From: Apple OSS Distributions <91980991+AppleOSSDistributions@users.noreply.github.com> Date: Fri, 24 Oct 2014 18:06:19 +0000 Subject: [PATCH 05/18] libdispatch-442.1.4 Imported from libdispatch-442.1.4.tar.gz --- config/config.h | 6 + dispatch/base.h | 26 +- dispatch/block.h | 422 +++ dispatch/dispatch.h | 5 +- dispatch/introspection.h | 21 + dispatch/io.h | 26 +- dispatch/object.h | 238 +- dispatch/queue.h | 228 +- dispatch/source.h | 10 +- libdispatch.xcodeproj/project.pbxproj | 78 +- os/object.h | 69 +- private/data_private.h | 2 +- private/introspection_private.h | 90 +- private/io_private.h | 40 +- private/layout_private.h | 86 + private/mach_private.h | 3 +- private/private.h | 3 +- private/queue_private.h | 81 +- private/source_private.h | 28 +- private/voucher_activity_private.h | 544 ++++ private/voucher_private.h | 375 +++ src/allocator.c | 44 +- src/allocator_internal.h | 23 +- src/apply.c | 36 +- src/data.m | 6 +- src/init.c | 325 +- src/inline_internal.h | 996 ++++++ src/internal.h | 206 +- src/introspection.c | 60 +- src/introspection_internal.h | 6 + src/io.c | 17 +- src/object.c | 44 +- src/object.m | 173 +- src/object_internal.h | 4 +- src/once.c | 46 +- src/queue.c | 2218 ++++++++++--- src/queue_internal.h | 509 ++- src/semaphore.c | 35 +- src/semaphore_internal.h | 9 + src/shims.h | 93 + src/shims/atomic.h | 27 +- src/shims/atomic_sfb.h | 13 +- src/shims/hw_config.h | 144 +- src/shims/perfmon.h | 28 +- src/shims/time.h | 16 - src/shims/tsd.h | 55 +- src/shims/yield.h | 156 + src/source.c | 813 +++-- src/source_internal.h | 37 +- src/time.c | 4 +- src/trace.h | 102 +- src/voucher.c | 2774 +++++++++++++++++ src/voucher_internal.h | 929 ++++++ .../libdispatch-introspection.xcconfig | 4 +- .../libdispatch-resolver_iphoneos.order | 20 + xcodeconfig/libdispatch.aliases | 19 +- xcodeconfig/libdispatch.interposable | 1 + xcodeconfig/libdispatch.order | 7 +- xcodeconfig/libdispatch.xcconfig | 34 +- xcodeconfig/libdispatch_iphoneos.order | 20 + xcodeconfig/libdispatch_macosx.aliases | 2 - xcodeconfig/libdispatch_objc.aliases | 34 + xcodescripts/install-headers.sh | 2 + xcodescripts/mig-headers.sh | 2 +- 64 files changed, 10688 insertions(+), 1786 deletions(-) create mode 100644 dispatch/block.h create mode 100644 private/layout_private.h create mode 100644 private/voucher_activity_private.h create mode 100644 private/voucher_private.h create mode 100644 src/inline_internal.h create mode 100644 src/shims/yield.h create mode 100644 src/voucher.c create mode 100644 src/voucher_internal.h create mode 100644 xcodeconfig/libdispatch-resolver_iphoneos.order create mode 100644 xcodeconfig/libdispatch_iphoneos.order create mode 100644 xcodeconfig/libdispatch_objc.aliases diff --git a/config/config.h b/config/config.h index d2ad0ffa6..894428199 100644 --- a/config/config.h +++ b/config/config.h @@ -108,6 +108,12 @@ /* Define to 1 if you have the `pthread_workqueue_setdispatch_np' function. */ #define HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP 1 +/* Define to 1 if you have the `_pthread_workqueue_init' function. */ +#define HAVE__PTHREAD_WORKQUEUE_INIT 1 + +/* Define to 1 if you have the header file. */ +#define HAVE_PTHREAD_QOS_H 1 + /* Define to 1 if you have the header file. */ #define HAVE_STDINT_H 1 diff --git a/dispatch/base.h b/dispatch/base.h index af17ccf53..01d5ec5db 100644 --- a/dispatch/base.h +++ b/dispatch/base.h @@ -47,6 +47,7 @@ #define DISPATCH_WARN_RESULT __attribute__((__warn_unused_result__)) #define DISPATCH_MALLOC __attribute__((__malloc__)) #define DISPATCH_ALWAYS_INLINE __attribute__((__always_inline__)) +#define DISPATCH_UNAVAILABLE __attribute__((__unavailable__)) #else /*! @parseOnly */ #define DISPATCH_NORETURN @@ -80,9 +81,10 @@ #define DISPATCH_MALLOC /*! @parseOnly */ #define DISPATCH_ALWAYS_INLINE +/*! @parseOnly */ +#define DISPATCH_UNAVAILABLE #endif - #if TARGET_OS_WIN32 && defined(__DISPATCH_BUILDING_DISPATCH__) && \ defined(__cplusplus) #define DISPATCH_EXPORT extern "C" extern __declspec(dllexport) @@ -110,17 +112,35 @@ #define DISPATCH_EXPECT(x, v) (x) #endif -#if defined(__has_feature) -#if __has_feature(objc_fixed_enum) +#ifndef DISPATCH_RETURNS_RETAINED_BLOCK +#if defined(__has_attribute) +#if __has_attribute(ns_returns_retained) +#define DISPATCH_RETURNS_RETAINED_BLOCK __attribute__((__ns_returns_retained__)) +#else +#define DISPATCH_RETURNS_RETAINED_BLOCK +#endif +#else +#define DISPATCH_RETURNS_RETAINED_BLOCK +#endif +#endif + +#if defined(__has_feature) && defined(__has_extension) +#if __has_feature(objc_fixed_enum) || __has_extension(cxx_strong_enums) #define DISPATCH_ENUM(name, type, ...) \ typedef enum : type { __VA_ARGS__ } name##_t #else #define DISPATCH_ENUM(name, type, ...) \ enum { __VA_ARGS__ }; typedef type name##_t #endif +#if __has_feature(enumerator_attributes) +#define DISPATCH_ENUM_AVAILABLE_STARTING __OSX_AVAILABLE_STARTING +#else +#define DISPATCH_ENUM_AVAILABLE_STARTING(...) +#endif #else #define DISPATCH_ENUM(name, type, ...) \ enum { __VA_ARGS__ }; typedef type name##_t +#define DISPATCH_ENUM_AVAILABLE_STARTING(...) #endif typedef void (*dispatch_function_t)(void *); diff --git a/dispatch/block.h b/dispatch/block.h new file mode 100644 index 000000000..e82f665b3 --- /dev/null +++ b/dispatch/block.h @@ -0,0 +1,422 @@ +/* + * Copyright (c) 2014 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __DISPATCH_BLOCK__ +#define __DISPATCH_BLOCK__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +#ifdef __BLOCKS__ + +/*! + * @group Dispatch block objects + */ + +__BEGIN_DECLS + +/*! + * @typedef dispatch_block_flags_t + * Flags to pass to the dispatch_block_create* functions. + * + * @const DISPATCH_BLOCK_BARRIER + * Flag indicating that a dispatch block object should act as a barrier block + * when submitted to a DISPATCH_QUEUE_CONCURRENT queue. + * See dispatch_barrier_async() for details. + * This flag has no effect when the dispatch block object is invoked directly. + * + * @const DISPATCH_BLOCK_DETACHED + * Flag indicating that a dispatch block object should execute disassociated + * from current execution context attributes such as QOS class, os_activity_t + * and properties of the current IPC request (if any). If invoked directly, the + * block object will remove these attributes from the calling thread for the + * duration of the block body (before applying attributes assigned to the block + * object, if any). If submitted to a queue, the block object will be executed + * with the attributes of the queue (or any attributes specifically assigned to + * the block object). + * + * @const DISPATCH_BLOCK_ASSIGN_CURRENT + * Flag indicating that a dispatch block object should be assigned the execution + * context attributes that are current at the time the block object is created. + * This applies to attributes such as QOS class, os_activity_t and properties of + * the current IPC request (if any). If invoked directly, the block object will + * apply these attributes to the calling thread for the duration of the block + * body. If the block object is submitted to a queue, this flag replaces the + * default behavior of associating the submitted block instance with the + * execution context attributes that are current at the time of submission. + * If a specific QOS class is assigned with DISPATCH_BLOCK_NO_QOS_CLASS or + * dispatch_block_create_with_qos_class(), that QOS class takes precedence over + * the QOS class assignment indicated by this flag. + * + * @const DISPATCH_BLOCK_NO_QOS_CLASS + * Flag indicating that a dispatch block object should be not be assigned a QOS + * class. If invoked directly, the block object will be executed with the QOS + * class of the calling thread. If the block object is submitted to a queue, + * this replaces the default behavior of associating the submitted block + * instance with the QOS class current at the time of submission. + * This flag is ignored if a specific QOS class is assigned with + * dispatch_block_create_with_qos_class(). + * + * @const DISPATCH_BLOCK_INHERIT_QOS_CLASS + * Flag indicating that execution of a dispatch block object submitted to a + * queue should prefer the QOS class assigned to the queue over the QOS class + * assigned to the block (resp. associated with the block at the time of + * submission). The latter will only be used if the queue in question does not + * have an assigned QOS class, as long as doing so does not result in a QOS + * class lower than the QOS class inherited from the queue's target queue. + * This flag is the default when a dispatch block object is submitted to a queue + * for asynchronous execution and has no effect when the dispatch block object + * is invoked directly. It is ignored if DISPATCH_BLOCK_ENFORCE_QOS_CLASS is + * also passed. + * + * @const DISPATCH_BLOCK_ENFORCE_QOS_CLASS + * Flag indicating that execution of a dispatch block object submitted to a + * queue should prefer the QOS class assigned to the block (resp. associated + * with the block at the time of submission) over the QOS class assigned to the + * queue, as long as doing so will not result in a lower QOS class. + * This flag is the default when a dispatch block object is submitted to a queue + * for synchronous execution or when the dispatch block object is invoked + * directly. + */ +DISPATCH_ENUM(dispatch_block_flags, unsigned long, + DISPATCH_BLOCK_BARRIER + DISPATCH_ENUM_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x1, + DISPATCH_BLOCK_DETACHED + DISPATCH_ENUM_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x2, + DISPATCH_BLOCK_ASSIGN_CURRENT + DISPATCH_ENUM_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x4, + DISPATCH_BLOCK_NO_QOS_CLASS + DISPATCH_ENUM_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x8, + DISPATCH_BLOCK_INHERIT_QOS_CLASS + DISPATCH_ENUM_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x10, + DISPATCH_BLOCK_ENFORCE_QOS_CLASS + DISPATCH_ENUM_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x20, +); + +/*! + * @function dispatch_block_create + * + * @abstract + * Create a new dispatch block object on the heap from an existing block and + * the given flags. + * + * @discussion + * The provided block is Block_copy'ed to the heap and retained by the newly + * created dispatch block object. + * + * The returned dispatch block object is intended to be submitted to a dispatch + * queue with dispatch_async() and related functions, but may also be invoked + * directly. Both operations can be performed an arbitrary number of times but + * only the first completed execution of a dispatch block object can be waited + * on with dispatch_block_wait() or observed with dispatch_block_notify(). + * + * If the returned dispatch block object is submitted to a dispatch queue, the + * submitted block instance will be associated with the QOS class current at the + * time of submission, unless one of the following flags assigned a specific QOS + * class (or no QOS class) at the time of block creation: + * - DISPATCH_BLOCK_ASSIGN_CURRENT + * - DISPATCH_BLOCK_NO_QOS_CLASS + * - DISPATCH_BLOCK_DETACHED + * The QOS class the block object will be executed with also depends on the QOS + * class assigned to the queue and which of the following flags was specified or + * defaulted to: + * - DISPATCH_BLOCK_INHERIT_QOS_CLASS (default for asynchronous execution) + * - DISPATCH_BLOCK_ENFORCE_QOS_CLASS (default for synchronous execution) + * See description of dispatch_block_flags_t for details. + * + * If the returned dispatch block object is submitted directly to a serial queue + * and is configured to execute with a specific QOS class, the system will make + * a best effort to apply the necessary QOS overrides to ensure that blocks + * submitted earlier to the serial queue are executed at that same QOS class or + * higher. + * + * @param flags + * Configuration flags for the block object. + * Passing a value that is not a bitwise OR of flags from dispatch_block_flags_t + * results in NULL being returned. + * + * @param block + * The block to create the dispatch block object from. + * + * @result + * The newly created dispatch block object, or NULL. + * When not building with Objective-C ARC, must be released with a -[release] + * message or the Block_release() function. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) +DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_RETURNS_RETAINED_BLOCK +DISPATCH_WARN_RESULT DISPATCH_NOTHROW +dispatch_block_t +dispatch_block_create(dispatch_block_flags_t flags, dispatch_block_t block); + +/*! + * @function dispatch_block_create_with_qos_class + * + * @abstract + * Create a new dispatch block object on the heap from an existing block and + * the given flags, and assign it the specified QOS class and relative priority. + * + * @discussion + * The provided block is Block_copy'ed to the heap and retained by the newly + * created dispatch block object. + * + * The returned dispatch block object is intended to be submitted to a dispatch + * queue with dispatch_async() and related functions, but may also be invoked + * directly. Both operations can be performed an arbitrary number of times but + * only the first completed execution of a dispatch block object can be waited + * on with dispatch_block_wait() or observed with dispatch_block_notify(). + * + * If invoked directly, the returned dispatch block object will be executed with + * the assigned QOS class as long as that does not result in a lower QOS class + * than what is current on the calling thread. + * + * If the returned dispatch block object is submitted to a dispatch queue, the + * QOS class it will be executed with depends on the QOS class assigned to the + * block, the QOS class assigned to the queue and which of the following flags + * was specified or defaulted to: + * - DISPATCH_BLOCK_INHERIT_QOS_CLASS: default for asynchronous execution + * - DISPATCH_BLOCK_ENFORCE_QOS_CLASS: default for synchronous execution + * See description of dispatch_block_flags_t for details. + * + * If the returned dispatch block object is submitted directly to a serial queue + * and is configured to execute with a specific QOS class, the system will make + * a best effort to apply the necessary QOS overrides to ensure that blocks + * submitted earlier to the serial queue are executed at that same QOS class or + * higher. + * + * @param flags + * Configuration flags for the new block object. + * Passing a value that is not a bitwise OR of flags from dispatch_block_flags_t + * results in NULL being returned. + * + * @param qos_class + * A QOS class value: + * - QOS_CLASS_USER_INTERACTIVE + * - QOS_CLASS_USER_INITIATED + * - QOS_CLASS_DEFAULT + * - QOS_CLASS_UTILITY + * - QOS_CLASS_BACKGROUND + * - QOS_CLASS_UNSPECIFIED + * Passing QOS_CLASS_UNSPECIFIED is equivalent to specifying the + * DISPATCH_BLOCK_NO_QOS_CLASS flag. Passing any other value results in NULL + * being returned. + * + * @param relative_priority + * A relative priority within the QOS class. This value is a negative + * offset from the maximum supported scheduler priority for the given class. + * Passing a value greater than zero or less than QOS_MIN_RELATIVE_PRIORITY + * results in NULL being returned. + * + * @param block + * The block to create the dispatch block object from. + * + * @result + * The newly created dispatch block object, or NULL. + * When not building with Objective-C ARC, must be released with a -[release] + * message or the Block_release() function. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) +DISPATCH_EXPORT DISPATCH_NONNULL4 DISPATCH_RETURNS_RETAINED_BLOCK +DISPATCH_WARN_RESULT DISPATCH_NOTHROW +dispatch_block_t +dispatch_block_create_with_qos_class(dispatch_block_flags_t flags, + dispatch_qos_class_t qos_class, int relative_priority, + dispatch_block_t block); + +/*! + * @function dispatch_block_perform + * + * @abstract + * Create, synchronously execute and release a dispatch block object from the + * specified block and flags. + * + * @discussion + * Behaves identically to the sequence + * + * dispatch_block_t b = dispatch_block_create(flags, block); + * b(); + * Block_release(b); + * + * but may be implemented more efficiently internally by not requiring a copy + * to the heap of the specified block or the allocation of a new block object. + * + * @param flags + * Configuration flags for the temporary block object. + * The result of passing a value that is not a bitwise OR of flags from + * dispatch_block_flags_t is undefined. + * + * @param block + * The block to create the temporary block object from. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) +DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW +void +dispatch_block_perform(dispatch_block_flags_t flags, dispatch_block_t block); + +/*! + * @function dispatch_block_wait + * + * @abstract + * Wait synchronously until execution of the specified dispatch block object has + * completed or until the specified timeout has elapsed. + * + * @discussion + * This function will return immediately if execution of the block object has + * already completed. + * + * It is not possible to wait for multiple executions of the same block object + * with this interface; use dispatch_group_wait() for that purpose. A single + * dispatch block object may either be waited on once and executed once, + * or it may be executed any number of times. The behavior of any other + * combination is undefined. Submission to a dispatch queue counts as an + * execution, even if cancelation (dispatch_block_cancel) means the block's + * code never runs. + * + * The result of calling this function from multiple threads simultaneously + * with the same dispatch block object is undefined, but note that doing so + * would violate the rules described in the previous paragraph. + * + * If this function returns indicating that the specified timeout has elapsed, + * then that invocation does not count as the one allowed wait. + * + * If at the time this function is called, the specified dispatch block object + * has been submitted directly to a serial queue, the system will make a best + * effort to apply the necessary QOS overrides to ensure that the block and any + * blocks submitted earlier to that serial queue are executed at the QOS class + * (or higher) of the thread calling dispatch_block_wait(). + * + * @param block + * The dispatch block object to wait on. + * The result of passing NULL or a block object not returned by one of the + * dispatch_block_create* functions is undefined. + * + * @param timeout + * When to timeout (see dispatch_time). As a convenience, there are the + * DISPATCH_TIME_NOW and DISPATCH_TIME_FOREVER constants. + * + * @result + * Returns zero on success (the dispatch block object completed within the + * specified timeout) or non-zero on error (i.e. timed out). + */ +__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW +long +dispatch_block_wait(dispatch_block_t block, dispatch_time_t timeout); + +/*! + * @function dispatch_block_notify + * + * @abstract + * Schedule a notification block to be submitted to a queue when the execution + * of a specified dispatch block object has completed. + * + * @discussion + * This function will submit the notification block immediately if execution of + * the observed block object has already completed. + * + * It is not possible to be notified of multiple executions of the same block + * object with this interface, use dispatch_group_notify() for that purpose. + * + * A single dispatch block object may either be observed one or more times + * and executed once, or it may be executed any number of times. The behavior + * of any other combination is undefined. Submission to a dispatch queue + * counts as an execution, even if cancellation (dispatch_block_cancel) means + * the block's code never runs. + * + * If multiple notification blocks are scheduled for a single block object, + * there is no defined order in which the notification blocks will be submitted + * to their associated queues. + * + * @param block + * The dispatch block object to observe. + * The result of passing NULL or a block object not returned by one of the + * dispatch_block_create* functions is undefined. + * + * @param queue + * The queue to which the supplied notification block will be submitted when + * the observed block completes. + * + * @param notification_block + * The notification block to submit when the observed block object completes. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_block_notify(dispatch_block_t block, dispatch_queue_t queue, + dispatch_block_t notification_block); + +/*! + * @function dispatch_block_cancel + * + * @abstract + * Asynchronously cancel the specified dispatch block object. + * + * @discussion + * Cancellation causes any future execution of the dispatch block object to + * return immediately, but does not affect any execution of the block object + * that is already in progress. + * + * Release of any resources associated with the block object will be delayed + * until execution of the block object is next attempted (or any execution + * already in progress completes). + * + * NOTE: care needs to be taken to ensure that a block object that may be + * canceled does not capture any resources that require execution of the + * block body in order to be released (e.g. memory allocated with + * malloc(3) that the block body calls free(3) on). Such resources will + * be leaked if the block body is never executed due to cancellation. + * + * @param block + * The dispatch block object to cancel. + * The result of passing NULL or a block object not returned by one of the + * dispatch_block_create* functions is undefined. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_block_cancel(dispatch_block_t block); + +/*! + * @function dispatch_block_testcancel + * + * @abstract + * Tests whether the given dispatch block object has been canceled. + * + * @param block + * The dispatch block object to test. + * The result of passing NULL or a block object not returned by one of the + * dispatch_block_create* functions is undefined. + * + * @result + * Non-zero if canceled and zero if not canceled. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE +DISPATCH_NOTHROW +long +dispatch_block_testcancel(dispatch_block_t block); + +__END_DECLS + +#endif // __BLOCKS__ + +#endif // __DISPATCH_BLOCK__ diff --git a/dispatch/dispatch.h b/dispatch/dispatch.h index cb5af230d..722b0c90d 100644 --- a/dispatch/dispatch.h +++ b/dispatch/dispatch.h @@ -37,7 +37,7 @@ #define __OSX_AVAILABLE_STARTING(x, y) #endif -#define DISPATCH_API_VERSION 20130520 +#define DISPATCH_API_VERSION 20140804 #ifndef __DISPATCH_BUILDING_DISPATCH__ @@ -47,9 +47,10 @@ #include #include -#include #include +#include #include +#include #include #include #include diff --git a/dispatch/introspection.h b/dispatch/introspection.h index 9e9634118..d20d90ad8 100644 --- a/dispatch/introspection.h +++ b/dispatch/introspection.h @@ -108,6 +108,27 @@ void dispatch_introspection_hook_queue_item_dequeue(dispatch_queue_t queue, dispatch_object_t item); +/*! + * @function dispatch_introspection_hook_queue_item_complete + * + * @abstract + * Interposable hook function called when an item previously dequeued from a + * dispatch queue has completed processing. + * + * @discussion + * The object pointer value passed to this function must be treated as a value + * only. It is intended solely for matching up with an earlier call to a + * dequeue hook function and must NOT be dereferenced. + * + * @param item + * Opaque dentifier for completed item. Must NOT be dereferenced. + */ + +__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_7_1) +DISPATCH_EXPORT +void +dispatch_introspection_hook_queue_item_complete(dispatch_object_t item); + /*! * @function dispatch_introspection_hook_queue_callout_begin * diff --git a/dispatch/io.h b/dispatch/io.h index 569dbdb19..d53d488f7 100644 --- a/dispatch/io.h +++ b/dispatch/io.h @@ -96,8 +96,8 @@ typedef int dispatch_fd_t; * submitted. * @param handler The handler to enqueue when data is ready to be * delivered. - * @param data The data read from the file descriptor. - * @param error An errno condition for the read operation or + * param data The data read from the file descriptor. + * param error An errno condition for the read operation or * zero if the read was successful. */ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) @@ -133,9 +133,9 @@ dispatch_read(dispatch_fd_t fd, * @param queue The dispatch queue to which the handler should be * submitted. * @param handler The handler to enqueue when the data has been written. - * @param data The data that could not be written to the I/O + * param data The data that could not be written to the I/O * channel, or NULL. - * @param error An errno condition for the write operation or + * param error An errno condition for the write operation or * zero if the write was successful. */ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) @@ -204,7 +204,7 @@ typedef unsigned long dispatch_io_type_t; * @param queue The dispatch queue to which the handler should be submitted. * @param cleanup_handler The handler to enqueue when the system * relinquishes control over the file descriptor. - * @param error An errno condition if control is relinquished + * param error An errno condition if control is relinquished * because channel creation failed, zero otherwise. * @result The newly created dispatch I/O channel or NULL if an error * occurred (invalid type specified). @@ -239,7 +239,7 @@ dispatch_io_create(dispatch_io_type_t type, * submitted. * @param cleanup_handler The handler to enqueue when the system * has closed the file at path. - * @param error An errno condition if control is relinquished + * param error An errno condition if control is relinquished * because channel creation or opening of the * specified file failed, zero otherwise. * @result The newly created dispatch I/O channel or NULL if an error @@ -280,7 +280,7 @@ dispatch_io_create_with_path(dispatch_io_type_t type, * relinquishes control over the file descriptor * (resp. closes the file at path) associated with * the existing channel. - * @param error An errno condition if control is relinquished + * param error An errno condition if control is relinquished * because channel creation failed, zero otherwise. * @result The newly created dispatch I/O channel or NULL if an error * occurred (invalid type specified). @@ -341,10 +341,10 @@ typedef void (^dispatch_io_handler_t)(bool done, dispatch_data_t data, * submitted. * @param io_handler The I/O handler to enqueue when data is ready to be * delivered. - * @param done A flag indicating whether the operation is complete. - * @param data An object with the data most recently read from the + * param done A flag indicating whether the operation is complete. + * param data An object with the data most recently read from the * I/O channel as part of this read operation, or NULL. - * @param error An errno condition for the read operation or zero if + * param error An errno condition for the read operation or zero if * the read was successful. */ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) @@ -393,11 +393,11 @@ dispatch_io_read(dispatch_io_t channel, * @param queue The dispatch queue to which the I/O handler should be * submitted. * @param io_handler The I/O handler to enqueue when data has been delivered. - * @param done A flag indicating whether the operation is complete. - * @param data An object of the data remaining to be + * param done A flag indicating whether the operation is complete. + * param data An object of the data remaining to be * written to the I/O channel as part of this write * operation, or NULL. - * @param error An errno condition for the write operation or zero + * param error An errno condition for the write operation or zero * if the write was successful. */ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) diff --git a/dispatch/object.h b/dispatch/object.h index 4ae0ab626..c6371899a 100644 --- a/dispatch/object.h +++ b/dispatch/object.h @@ -99,41 +99,50 @@ typedef union { #define DISPATCH_RETURNS_RETAINED #endif -__BEGIN_DECLS - /*! - * @function dispatch_debug + * @typedef dispatch_block_t * * @abstract - * Programmatically log debug information about a dispatch object. + * The type of blocks submitted to dispatch queues, which take no arguments + * and have no return value. * * @discussion - * Programmatically log debug information about a dispatch object. By default, - * the log output is sent to syslog at notice level. In the debug version of - * the library, the log output is sent to a file in /var/tmp. - * The log output destination can be configured via the LIBDISPATCH_LOG - * environment variable, valid values are: YES, NO, syslog, stderr, file. - * - * This function is deprecated and will be removed in a future release. - * Objective-C callers may use -debugDescription instead. - * - * @param object - * The object to introspect. - * - * @param message - * The message to log above and beyond the introspection. + * When not building with Objective-C ARC, a block object allocated on or + * copied to the heap must be released with a -[release] message or the + * Block_release() function. + * + * The declaration of a block literal allocates storage on the stack. + * Therefore, this is an invalid construct: + * + * dispatch_block_t block; + * if (x) { + * block = ^{ printf("true\n"); }; + * } else { + * block = ^{ printf("false\n"); }; + * } + * block(); // unsafe!!! + * + * + * What is happening behind the scenes: + * + * if (x) { + * struct Block __tmp_1 = ...; // setup details + * block = &__tmp_1; + * } else { + * struct Block __tmp_2 = ...; // setup details + * block = &__tmp_2; + * } + * + * + * As the example demonstrates, the address of a stack variable is escaping the + * scope in which it is allocated. That is a classic C bug. + * + * Instead, the block literal must be copied to the heap with the Block_copy() + * function or by sending it a -[copy] message. */ -__OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_6,__MAC_10_9,__IPHONE_4_0,__IPHONE_6_0) -DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW -__attribute__((__format__(printf,2,3))) -void -dispatch_debug(dispatch_object_t object, const char *message, ...); +typedef void (^dispatch_block_t)(void); -__OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_6,__MAC_10_9,__IPHONE_4_0,__IPHONE_6_0) -DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW -__attribute__((__format__(printf,2,0))) -void -dispatch_debugv(dispatch_object_t object, const char *message, va_list ap); +__BEGIN_DECLS /*! * @function dispatch_retain @@ -227,11 +236,11 @@ dispatch_set_context(dispatch_object_t object, void *context); * @abstract * Set the finalizer function for a dispatch object. * - * @param + * @param object * The dispatch object to modify. * The result of passing NULL in this parameter is undefined. * - * @param + * @param finalizer * The finalizer function pointer. * * @discussion @@ -246,7 +255,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NOTHROW //DISPATCH_NONNULL1 void dispatch_set_finalizer_f(dispatch_object_t object, - dispatch_function_t finalizer); + dispatch_function_t finalizer); /*! * @function dispatch_suspend @@ -286,6 +295,173 @@ DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_resume(dispatch_object_t object); +/*! + * @function dispatch_wait + * + * @abstract + * Wait synchronously for an object or until the specified timeout has elapsed. + * + * @discussion + * Type-generic macro that maps to dispatch_block_wait, dispatch_group_wait or + * dispatch_semaphore_wait, depending on the type of the first argument. + * See documentation for these functions for more details. + * This function is unavailable for any other object type. + * + * @param object + * The object to wait on. + * The result of passing NULL in this parameter is undefined. + * + * @param timeout + * When to timeout (see dispatch_time). As a convenience, there are the + * DISPATCH_TIME_NOW and DISPATCH_TIME_FOREVER constants. + * + * @result + * Returns zero on success or non-zero on error (i.e. timed out). + */ +DISPATCH_UNAVAILABLE +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW +long +dispatch_wait(void *object, dispatch_time_t timeout); +#if __has_extension(c_generic_selections) +#define dispatch_wait(object, timeout) \ + _Generic((object), \ + dispatch_block_t:dispatch_block_wait, \ + dispatch_group_t:dispatch_group_wait, \ + dispatch_semaphore_t:dispatch_semaphore_wait \ + )((object),(timeout)) +#endif + +/*! + * @function dispatch_notify + * + * @abstract + * Schedule a notification block to be submitted to a queue when the execution + * of a specified object has completed. + * + * @discussion + * Type-generic macro that maps to dispatch_block_notify or + * dispatch_group_notify, depending on the type of the first argument. + * See documentation for these functions for more details. + * This function is unavailable for any other object type. + * + * @param object + * The object to observe. + * The result of passing NULL in this parameter is undefined. + * + * @param queue + * The queue to which the supplied notification block will be submitted when + * the observed object completes. + * + * @param notification_block + * The block to submit when the observed object completes. + */ +DISPATCH_UNAVAILABLE +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_notify(void *object, dispatch_object_t queue, + dispatch_block_t notification_block); +#if __has_extension(c_generic_selections) +#define dispatch_notify(object, queue, notification_block) \ + _Generic((object), \ + dispatch_block_t:dispatch_block_notify, \ + dispatch_group_t:dispatch_group_notify \ + )((object),(queue), (notification_block)) +#endif + +/*! + * @function dispatch_cancel + * + * @abstract + * Cancel the specified object. + * + * @discussion + * Type-generic macro that maps to dispatch_block_cancel or + * dispatch_source_cancel, depending on the type of the first argument. + * See documentation for these functions for more details. + * This function is unavailable for any other object type. + * + * @param object + * The object to cancel. + * The result of passing NULL in this parameter is undefined. + */ +DISPATCH_UNAVAILABLE +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_cancel(void *object); +#if __has_extension(c_generic_selections) +#define dispatch_cancel(object) \ + _Generic((object), \ + dispatch_block_t:dispatch_block_cancel, \ + dispatch_source_t:dispatch_source_cancel \ + )((object)) +#endif + +/*! + * @function dispatch_testcancel + * + * @abstract + * Test whether the specified object has been canceled + * + * @discussion + * Type-generic macro that maps to dispatch_block_testcancel or + * dispatch_source_testcancel, depending on the type of the first argument. + * See documentation for these functions for more details. + * This function is unavailable for any other object type. + * + * @param object + * The object to test. + * The result of passing NULL in this parameter is undefined. + * + * @result + * Non-zero if canceled and zero if not canceled. + */ +DISPATCH_UNAVAILABLE +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE +DISPATCH_NOTHROW +long +dispatch_testcancel(void *object); +#if __has_extension(c_generic_selections) +#define dispatch_testcancel(object) \ + _Generic((object), \ + dispatch_block_t:dispatch_block_testcancel, \ + dispatch_source_t:dispatch_source_testcancel \ + )((object)) +#endif + +/*! + * @function dispatch_debug + * + * @abstract + * Programmatically log debug information about a dispatch object. + * + * @discussion + * Programmatically log debug information about a dispatch object. By default, + * the log output is sent to syslog at notice level. In the debug version of + * the library, the log output is sent to a file in /var/tmp. + * The log output destination can be configured via the LIBDISPATCH_LOG + * environment variable, valid values are: YES, NO, syslog, stderr, file. + * + * This function is deprecated and will be removed in a future release. + * Objective-C callers may use -debugDescription instead. + * + * @param object + * The object to introspect. + * + * @param message + * The message to log above and beyond the introspection. + */ +__OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_6,__MAC_10_9,__IPHONE_4_0,__IPHONE_6_0) +DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW +__attribute__((__format__(printf,2,3))) +void +dispatch_debug(dispatch_object_t object, const char *message, ...); + +__OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_6,__MAC_10_9,__IPHONE_4_0,__IPHONE_6_0) +DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW +__attribute__((__format__(printf,2,0))) +void +dispatch_debugv(dispatch_object_t object, const char *message, va_list ap); + __END_DECLS #endif diff --git a/dispatch/queue.h b/dispatch/queue.h index 9090676d8..cc3ca941e 100644 --- a/dispatch/queue.h +++ b/dispatch/queue.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2012 Apple Inc. All rights reserved. + * Copyright (c) 2008-2014 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -66,51 +66,6 @@ */ DISPATCH_DECL(dispatch_queue); -/*! - * @typedef dispatch_queue_attr_t - * - * @abstract - * Attribute for dispatch queues. - */ -DISPATCH_DECL(dispatch_queue_attr); - -/*! - * @typedef dispatch_block_t - * - * @abstract - * The prototype of blocks submitted to dispatch queues, which take no - * arguments and have no return value. - * - * @discussion - * The declaration of a block allocates storage on the stack. Therefore, this - * is an invalid construct: - * - * dispatch_block_t block; - * - * if (x) { - * block = ^{ printf("true\n"); }; - * } else { - * block = ^{ printf("false\n"); }; - * } - * block(); // unsafe!!! - * - * What is happening behind the scenes: - * - * if (x) { - * struct Block __tmp_1 = ...; // setup details - * block = &__tmp_1; - * } else { - * struct Block __tmp_2 = ...; // setup details - * block = &__tmp_2; - * } - * - * As the example demonstrates, the address of a stack variable is escaping the - * scope in which it is allocated. That is a classic C bug. - */ -#ifdef __BLOCKS__ -typedef void (^dispatch_block_t)(void); -#endif - __BEGIN_DECLS /*! @@ -345,6 +300,9 @@ DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_t dispatch_get_current_queue(void); +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT struct dispatch_queue_s _dispatch_main_q; + /*! * @function dispatch_get_main_queue * @@ -360,10 +318,12 @@ dispatch_get_current_queue(void); * Returns the main queue. This queue is created automatically on behalf of * the main thread before main() is called. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT struct dispatch_queue_s _dispatch_main_q; -#define dispatch_get_main_queue() \ - DISPATCH_GLOBAL_OBJECT(dispatch_queue_t, _dispatch_main_q) +DISPATCH_INLINE DISPATCH_ALWAYS_INLINE DISPATCH_CONST DISPATCH_NOTHROW +dispatch_queue_t +dispatch_get_main_queue(void) +{ + return DISPATCH_GLOBAL_OBJECT(dispatch_queue_t, _dispatch_main_q); +} /*! * @typedef dispatch_queue_priority_t @@ -400,32 +360,68 @@ DISPATCH_EXPORT struct dispatch_queue_s _dispatch_main_q; typedef long dispatch_queue_priority_t; +/*! + * @typedef dispatch_qos_class_t + * Alias for qos_class_t type. + */ +#if __has_include() +#include +typedef qos_class_t dispatch_qos_class_t; +#else +typedef unsigned int dispatch_qos_class_t; +#endif + /*! * @function dispatch_get_global_queue * * @abstract - * Returns a well-known global concurrent queue of a given priority level. + * Returns a well-known global concurrent queue of a given quality of service + * class. * * @discussion * The well-known global concurrent queues may not be modified. Calls to * dispatch_suspend(), dispatch_resume(), dispatch_set_context(), etc., will * have no effect when used with queues returned by this function. * - * @param priority - * A priority defined in dispatch_queue_priority_t + * @param identifier + * A quality of service class defined in qos_class_t or a priority defined in + * dispatch_queue_priority_t. + * + * It is recommended to use quality of service class values to identify the + * well-known global concurrent queues: + * - QOS_CLASS_USER_INTERACTIVE + * - QOS_CLASS_USER_INITIATED + * - QOS_CLASS_DEFAULT + * - QOS_CLASS_UTILITY + * - QOS_CLASS_BACKGROUND + * + * The global concurrent queues may still be identified by their priority, + * which map to the following QOS classes: + * - DISPATCH_QUEUE_PRIORITY_HIGH: QOS_CLASS_USER_INITIATED + * - DISPATCH_QUEUE_PRIORITY_DEFAULT: QOS_CLASS_DEFAULT + * - DISPATCH_QUEUE_PRIORITY_LOW: QOS_CLASS_UTILITY + * - DISPATCH_QUEUE_PRIORITY_BACKGROUND: QOS_CLASS_BACKGROUND * * @param flags * Reserved for future use. Passing any value other than zero may result in * a NULL return value. * * @result - * Returns the requested global queue. + * Returns the requested global queue or NULL if the requested global queue + * does not exist. */ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_CONST DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_t -dispatch_get_global_queue(dispatch_queue_priority_t priority, - unsigned long flags); +dispatch_get_global_queue(long identifier, unsigned long flags); + +/*! + * @typedef dispatch_queue_attr_t + * + * @abstract + * Attribute for dispatch queues. + */ +DISPATCH_DECL(dispatch_queue_attr); /*! * @const DISPATCH_QUEUE_SERIAL @@ -445,6 +441,63 @@ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) DISPATCH_EXPORT struct dispatch_queue_attr_s _dispatch_queue_attr_concurrent; +/*! + * @function dispatch_queue_attr_make_with_qos_class + * + * @abstract + * Returns an attribute value which may be provided to dispatch_queue_create() + * in order to assign a QOS class and relative priority to the queue. + * + * @discussion + * When specified in this manner, the QOS class and relative priority take + * precedence over those inherited from the dispatch queue's target queue (if + * any) as long that does not result in a lower QOS class and relative priority. + * + * The global queue priorities map to the following QOS classes: + * - DISPATCH_QUEUE_PRIORITY_HIGH: QOS_CLASS_USER_INITIATED + * - DISPATCH_QUEUE_PRIORITY_DEFAULT: QOS_CLASS_DEFAULT + * - DISPATCH_QUEUE_PRIORITY_LOW: QOS_CLASS_UTILITY + * - DISPATCH_QUEUE_PRIORITY_BACKGROUND: QOS_CLASS_BACKGROUND + * + * Example: + * + * dispatch_queue_t queue; + * dispatch_queue_attr_t attr; + * attr = dispatch_queue_attr_make_with_qos_class(DISPATCH_QUEUE_SERIAL, + * QOS_CLASS_UTILITY, 0); + * queue = dispatch_queue_create("com.example.myqueue", attr); + * + * + * @param attr + * A queue attribute value to be combined with the QOS class, or NULL. + * + * @param qos_class + * A QOS class value: + * - QOS_CLASS_USER_INTERACTIVE + * - QOS_CLASS_USER_INITIATED + * - QOS_CLASS_DEFAULT + * - QOS_CLASS_UTILITY + * - QOS_CLASS_BACKGROUND + * Passing any other value results in NULL being returned. + * + * @param relative_priority + * A relative priority within the QOS class. This value is a negative + * offset from the maximum supported scheduler priority for the given class. + * Passing a value greater than zero or less than QOS_MIN_RELATIVE_PRIORITY + * results in NULL being returned. + * + * @return + * Returns an attribute value which may be provided to dispatch_queue_create(), + * or NULL if an invalid QOS class was requested. + * The new value combines the attributes specified by the 'attr' parameter and + * the new QOS class and relative priority. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) +DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW +dispatch_queue_attr_t +dispatch_queue_attr_make_with_qos_class(dispatch_queue_attr_t attr, + dispatch_qos_class_t qos_class, int relative_priority); + /*! * @function dispatch_queue_create * @@ -466,15 +519,23 @@ struct dispatch_queue_attr_s _dispatch_queue_attr_concurrent; * hold a reference to that queue. Therefore a queue will not be deallocated * until all pending blocks have finished. * - * The target queue of a newly created dispatch queue is the default priority - * global concurrent queue. + * Passing the result of the dispatch_queue_attr_make_with_qos_class() function + * to the attr parameter of this function allows a quality of service class and + * relative priority to be specified for the newly created queue. + * The quality of service class so specified takes precedence over the quality + * of service class of the newly created dispatch queue's target queue (if any) + * as long that does not result in a lower QOS class and relative priority. + * + * When no quality of service class is specified, the target queue of a newly + * created dispatch queue is the default priority global concurrent queue. * * @param label * A string label to attach to the queue. * This parameter is optional and may be NULL. * * @param attr - * DISPATCH_QUEUE_SERIAL or DISPATCH_QUEUE_CONCURRENT. + * DISPATCH_QUEUE_SERIAL, DISPATCH_QUEUE_CONCURRENT, or the result of a call to + * the function dispatch_queue_attr_make_with_qos_class(). * * @result * The newly created dispatch queue. @@ -513,6 +574,46 @@ DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW const char * dispatch_queue_get_label(dispatch_queue_t queue); +/*! + * @function dispatch_queue_get_qos_class + * + * @abstract + * Returns the QOS class and relative priority of the given queue. + * + * @discussion + * If the given queue was created with an attribute value returned from + * dispatch_queue_attr_make_with_qos_class(), this function returns the QOS + * class and relative priority specified at that time; for any other attribute + * value it returns a QOS class of QOS_CLASS_UNSPECIFIED and a relative + * priority of 0. + * + * If the given queue is one of the global queues, this function returns its + * assigned QOS class value as documented under dispatch_get_global_queue() and + * a relative priority of 0; in the case of the main queue it returns the QOS + * value provided by qos_class_main() and a relative priority of 0. + * + * @param queue + * The queue to query. + * + * @param relative_priority_ptr + * A pointer to an int variable to be filled with the relative priority offset + * within the QOS class, or NULL. + * + * @return + * A QOS class value: + * - QOS_CLASS_USER_INTERACTIVE + * - QOS_CLASS_USER_INITIATED + * - QOS_CLASS_DEFAULT + * - QOS_CLASS_UTILITY + * - QOS_CLASS_BACKGROUND + * - QOS_CLASS_UNSPECIFIED + */ +__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) +DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NONNULL1 DISPATCH_NOTHROW +dispatch_qos_class_t +dispatch_queue_get_qos_class(dispatch_queue_t queue, + int *relative_priority_ptr); + /*! * @const DISPATCH_TARGET_QUEUE_DEFAULT * @discussion Constant to pass to the dispatch_set_target_queue() and @@ -530,9 +631,12 @@ dispatch_queue_get_label(dispatch_queue_t queue); * @discussion * An object's target queue is responsible for processing the object. * - * A dispatch queue's priority is inherited from its target queue. Use the - * dispatch_get_global_queue() function to obtain suitable target queue - * of the desired priority. + * When no quality of service class and relative priority is specified for a + * dispatch queue at the time of creation, a dispatch queue's quality of service + * class is inherited from its target queue. The dispatch_get_global_queue() + * function may be used to obtain a target queue of a specific quality of + * service class, however the use of dispatch_queue_attr_make_with_qos_class() + * is recommended instead. * * Blocks submitted to a serial queue whose target queue is another serial * queue will not be invoked concurrently with blocks submitted to the target diff --git a/dispatch/source.h b/dispatch/source.h index ebbf8b95a..411ed0611 100644 --- a/dispatch/source.h +++ b/dispatch/source.h @@ -130,7 +130,7 @@ DISPATCH_SOURCE_TYPE_DECL(mach_recv); */ #define DISPATCH_SOURCE_TYPE_MEMORYPRESSURE \ (&_dispatch_source_type_memorypressure) -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_NA) +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_8_0) DISPATCH_SOURCE_TYPE_DECL(memorypressure); /*! @@ -436,7 +436,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_source_set_cancel_handler(dispatch_source_t source, - dispatch_block_t cancel_handler); + dispatch_block_t handler); #endif /* __BLOCKS__ */ /*! @@ -461,7 +461,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_source_set_cancel_handler_f(dispatch_source_t source, - dispatch_function_t cancel_handler); + dispatch_function_t handler); /*! * @function dispatch_source_cancel @@ -711,7 +711,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_source_set_registration_handler(dispatch_source_t source, - dispatch_block_t registration_handler); + dispatch_block_t handler); #endif /* __BLOCKS__ */ /*! @@ -736,7 +736,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_source_set_registration_handler_f(dispatch_source_t source, - dispatch_function_t registration_handler); + dispatch_function_t handler); __END_DECLS diff --git a/libdispatch.xcodeproj/project.pbxproj b/libdispatch.xcodeproj/project.pbxproj index b465ba7e7..ff12a47f2 100644 --- a/libdispatch.xcodeproj/project.pbxproj +++ b/libdispatch.xcodeproj/project.pbxproj @@ -39,6 +39,8 @@ 2BBF5A65154B64F5002B20F9 /* allocator.c in Sources */ = {isa = PBXBuildFile; fileRef = 2BBF5A62154B64F5002B20F9 /* allocator.c */; }; 2BBF5A66154B64F5002B20F9 /* allocator.c in Sources */ = {isa = PBXBuildFile; fileRef = 2BBF5A62154B64F5002B20F9 /* allocator.c */; }; 2BBF5A67154B64F5002B20F9 /* allocator.c in Sources */ = {isa = PBXBuildFile; fileRef = 2BBF5A62154B64F5002B20F9 /* allocator.c */; }; + 2BE17C6418EA305E002CA4E8 /* layout_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 2BE17C6318EA305E002CA4E8 /* layout_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 2BE17C6518EA305E002CA4E8 /* layout_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 2BE17C6318EA305E002CA4E8 /* layout_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; 5A0095A210F274B0000E2A31 /* io_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 5A0095A110F274B0000E2A31 /* io_internal.h */; }; 5A27262610F26F1900751FBC /* io.c in Sources */ = {isa = PBXBuildFile; fileRef = 5A27262510F26F1900751FBC /* io.c */; }; 5A5D13AC0F6B280500197CC3 /* semaphore_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */; }; @@ -82,6 +84,20 @@ E43570B9126E93380097AB9F /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; }; E43570BA126E93380097AB9F /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; }; E43A710615783F7E0012D38D /* data_private.h in Headers */ = {isa = PBXBuildFile; fileRef = C913AC0E143BD34800B78976 /* data_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E44757DA17F4572600B82CA1 /* inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44757D917F4572600B82CA1 /* inline_internal.h */; }; + E44757DB17F4573500B82CA1 /* inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44757D917F4572600B82CA1 /* inline_internal.h */; }; + E44757DC17F4573600B82CA1 /* inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44757D917F4572600B82CA1 /* inline_internal.h */; }; + E44A8E6B1805C3E0009FFDB6 /* voucher.c in Sources */ = {isa = PBXBuildFile; fileRef = E44A8E6A1805C3E0009FFDB6 /* voucher.c */; }; + E44A8E6C1805C3E0009FFDB6 /* voucher.c in Sources */ = {isa = PBXBuildFile; fileRef = E44A8E6A1805C3E0009FFDB6 /* voucher.c */; }; + E44A8E6D1805C3E0009FFDB6 /* voucher.c in Sources */ = {isa = PBXBuildFile; fileRef = E44A8E6A1805C3E0009FFDB6 /* voucher.c */; }; + E44A8E6E1805C3E0009FFDB6 /* voucher.c in Sources */ = {isa = PBXBuildFile; fileRef = E44A8E6A1805C3E0009FFDB6 /* voucher.c */; }; + E44A8E6F1805C3E0009FFDB6 /* voucher.c in Sources */ = {isa = PBXBuildFile; fileRef = E44A8E6A1805C3E0009FFDB6 /* voucher.c */; }; + E44A8E701805C3E0009FFDB6 /* voucher.c in Sources */ = {isa = PBXBuildFile; fileRef = E44A8E6A1805C3E0009FFDB6 /* voucher.c */; }; + E44A8E721805C473009FFDB6 /* voucher_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E44A8E711805C473009FFDB6 /* voucher_private.h */; }; + E44A8E731805C473009FFDB6 /* voucher_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E44A8E711805C473009FFDB6 /* voucher_private.h */; }; + E44A8E7518066276009FFDB6 /* voucher_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44A8E7418066276009FFDB6 /* voucher_internal.h */; }; + E44A8E7618066276009FFDB6 /* voucher_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44A8E7418066276009FFDB6 /* voucher_internal.h */; }; + E44A8E7718066276009FFDB6 /* voucher_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44A8E7418066276009FFDB6 /* voucher_internal.h */; }; E44EBE3E1251659900645D88 /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; }; E44EBE5412517EBE00645D88 /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; }; E44EBE5512517EBE00645D88 /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; }; @@ -129,6 +145,9 @@ E46DBC4D14EE10C80001F9F6 /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; }; E48AF55A16E70FD9004105FF /* io_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E48AF55916E70FD9004105FF /* io_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; E48AF55B16E72D44004105FF /* io_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E48AF55916E70FD9004105FF /* io_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E48EC97C1835BADD00EAC4F1 /* yield.h in Headers */ = {isa = PBXBuildFile; fileRef = E48EC97B1835BADD00EAC4F1 /* yield.h */; }; + E48EC97D1835BADD00EAC4F1 /* yield.h in Headers */ = {isa = PBXBuildFile; fileRef = E48EC97B1835BADD00EAC4F1 /* yield.h */; }; + E48EC97E1835BADD00EAC4F1 /* yield.h in Headers */ = {isa = PBXBuildFile; fileRef = E48EC97B1835BADD00EAC4F1 /* yield.h */; }; E49F2423125D3C960057C971 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; E49F2424125D3C970057C971 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; E49F2499125D48D80057C971 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; @@ -175,6 +194,8 @@ E4A2C9C5176019820000F809 /* atomic_llsc.h in Headers */ = {isa = PBXBuildFile; fileRef = E4A2C9C4176019760000F809 /* atomic_llsc.h */; }; E4A2C9C6176019830000F809 /* atomic_llsc.h in Headers */ = {isa = PBXBuildFile; fileRef = E4A2C9C4176019760000F809 /* atomic_llsc.h */; }; E4A2C9C7176019840000F809 /* atomic_llsc.h in Headers */ = {isa = PBXBuildFile; fileRef = E4A2C9C4176019760000F809 /* atomic_llsc.h */; }; + E4B3C3FE18C50D000039F49F /* voucher_activity_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E4B3C3FD18C50D000039F49F /* voucher_activity_private.h */; }; + E4B3C3FF18C50D0E0039F49F /* voucher_activity_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E4B3C3FD18C50D000039F49F /* voucher_activity_private.h */; }; E4B515BD164B2DA300E003AF /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; }; E4B515BE164B2DA300E003AF /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; }; E4B515BF164B2DA300E003AF /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; @@ -199,6 +220,8 @@ E4BA743C13A8911B0095BDF1 /* getprogname.h in Headers */ = {isa = PBXBuildFile; fileRef = E4BA743913A8911B0095BDF1 /* getprogname.h */; }; E4C1ED6F1263E714000D3C8B /* data_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E4C1ED6E1263E714000D3C8B /* data_internal.h */; }; E4C1ED701263E714000D3C8B /* data_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E4C1ED6E1263E714000D3C8B /* data_internal.h */; }; + E4D76A9318E325D200B1F98B /* block.h in Headers */ = {isa = PBXBuildFile; fileRef = E4D76A9218E325D200B1F98B /* block.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E4D76A9418E325D200B1F98B /* block.h in Headers */ = {isa = PBXBuildFile; fileRef = E4D76A9218E325D200B1F98B /* block.h */; settings = {ATTRIBUTES = (Public, ); }; }; E4EB4A2714C35ECE00AA0FA9 /* object.h in Headers */ = {isa = PBXBuildFile; fileRef = E4EB4A2614C35ECE00AA0FA9 /* object.h */; }; E4EB4A2814C35ECE00AA0FA9 /* object.h in Headers */ = {isa = PBXBuildFile; fileRef = E4EB4A2614C35ECE00AA0FA9 /* object.h */; }; E4EC11AE12514302000DDBD1 /* queue.c in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED8A0E8361E600161930 /* queue.c */; }; @@ -298,6 +321,7 @@ /* Begin PBXFileReference section */ 2BBF5A5F154B64D8002B20F9 /* allocator_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = allocator_internal.h; sourceTree = ""; }; 2BBF5A62154B64F5002B20F9 /* allocator.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = allocator.c; sourceTree = ""; }; + 2BE17C6318EA305E002CA4E8 /* layout_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = layout_private.h; sourceTree = ""; }; 5A0095A110F274B0000E2A31 /* io_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = io_internal.h; sourceTree = ""; }; 5A27262510F26F1900751FBC /* io.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = io.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; 5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = semaphore_internal.h; sourceTree = ""; }; @@ -346,7 +370,11 @@ E422DA3714D2AE11003C6EE4 /* libdispatch.unexport */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.unexport; sourceTree = ""; }; E43570B8126E93380097AB9F /* provider.d */ = {isa = PBXFileReference; explicitFileType = sourcecode.dtrace; fileEncoding = 4; path = provider.d; sourceTree = ""; }; E43D93F11097917E004F6A62 /* libdispatch.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = libdispatch.xcconfig; sourceTree = ""; }; + E44757D917F4572600B82CA1 /* inline_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = inline_internal.h; sourceTree = ""; }; E448727914C6215D00BB45C2 /* libdispatch.order */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.order; sourceTree = ""; }; + E44A8E6A1805C3E0009FFDB6 /* voucher.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = voucher.c; sourceTree = ""; }; + E44A8E711805C473009FFDB6 /* voucher_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = voucher_private.h; sourceTree = ""; }; + E44A8E7418066276009FFDB6 /* voucher_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = voucher_internal.h; sourceTree = ""; }; E44EBE331251654000645D88 /* resolver.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = resolver.h; sourceTree = ""; }; E44EBE371251656400645D88 /* resolver.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = resolver.c; sourceTree = ""; }; E44EBE3B1251659900645D88 /* init.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = init.c; sourceTree = ""; }; @@ -358,10 +386,14 @@ E47D6BB5125F0F800070D91C /* resolved.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = resolved.h; sourceTree = ""; }; E482F1CD12DBAB590030614D /* postprocess-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "postprocess-headers.sh"; sourceTree = ""; }; E48AF55916E70FD9004105FF /* io_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = io_private.h; path = private/io_private.h; sourceTree = SOURCE_ROOT; tabWidth = 8; }; + E48EC97B1835BADD00EAC4F1 /* yield.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = yield.h; sourceTree = ""; }; E49F24DF125D57FA0057C971 /* libdispatch.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libdispatch.dylib; sourceTree = BUILT_PRODUCTS_DIR; }; E49F251D125D630A0057C971 /* install-manpages.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "install-manpages.sh"; sourceTree = ""; }; E49F251E125D631D0057C971 /* mig-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "mig-headers.sh"; sourceTree = ""; }; E4A2C9C4176019760000F809 /* atomic_llsc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = atomic_llsc.h; sourceTree = ""; }; + E4B2D42D17A7F0F90034A18F /* libdispatch-resolver_iphoneos.order */ = {isa = PBXFileReference; lastKnownFileType = text; path = "libdispatch-resolver_iphoneos.order"; sourceTree = ""; }; + E4B2D42E17A7F0F90034A18F /* libdispatch_iphoneos.order */ = {isa = PBXFileReference; lastKnownFileType = text; path = libdispatch_iphoneos.order; sourceTree = ""; }; + E4B3C3FD18C50D000039F49F /* voucher_activity_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = voucher_activity_private.h; sourceTree = ""; }; E4B515D6164B2DA300E003AF /* libdispatch.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libdispatch.dylib; sourceTree = BUILT_PRODUCTS_DIR; }; E4B515D7164B2DFB00E003AF /* introspection_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = introspection_private.h; sourceTree = ""; }; E4B515D9164B2E9B00E003AF /* libdispatch-introspection.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "libdispatch-introspection.xcconfig"; sourceTree = ""; }; @@ -372,6 +404,8 @@ E4BA743813A8900B0095BDF1 /* dispatch_read.3 */ = {isa = PBXFileReference; explicitFileType = text.man; path = dispatch_read.3; sourceTree = ""; }; E4BA743913A8911B0095BDF1 /* getprogname.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = getprogname.h; sourceTree = ""; }; E4C1ED6E1263E714000D3C8B /* data_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data_internal.h; sourceTree = ""; }; + E4D76A9218E325D200B1F98B /* block.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = block.h; sourceTree = ""; }; + E4DC8D45191053EE0005C6F4 /* libdispatch_objc.aliases */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch_objc.aliases; sourceTree = ""; }; E4EB4A2614C35ECE00AA0FA9 /* object.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = object.h; sourceTree = ""; }; E4EB4A2A14C36F4E00AA0FA9 /* install-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "install-headers.sh"; sourceTree = ""; }; E4EC11C312514302000DDBD1 /* libdispatch_up.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch_up.a; sourceTree = BUILT_PRODUCTS_DIR; }; @@ -455,6 +489,7 @@ 96A8AA860F41E7A400CD570B /* source.c */, 96032E4A0F5CC8C700241C5F /* time.c */, C9C5F80D143C1771006DC718 /* transform.c */, + E44A8E6A1805C3E0009FFDB6 /* voucher.c */, FC7BED950E8361E600161930 /* protocol.defs */, E43570B8126E93380097AB9F /* provider.d */, ); @@ -517,8 +552,11 @@ E46DBC5814EE11BC0001F9F6 /* libdispatch-static.xcconfig */, E4B515D9164B2E9B00E003AF /* libdispatch-introspection.xcconfig */, E422DA3614D2A7E7003C6EE4 /* libdispatch.aliases */, + E4DC8D45191053EE0005C6F4 /* libdispatch_objc.aliases */, E416F53F175D04B800B23711 /* libdispatch_macosx.aliases */, E448727914C6215D00BB45C2 /* libdispatch.order */, + E4B2D42E17A7F0F90034A18F /* libdispatch_iphoneos.order */, + E4B2D42D17A7F0F90034A18F /* libdispatch-resolver_iphoneos.order */, E422DA3714D2AE11003C6EE4 /* libdispatch.unexport */, E421E5FD1716BEA70090DC9B /* libdispatch.interposable */, ); @@ -588,6 +626,7 @@ FC1832A2109923C7003403D5 /* perfmon.h */, FC1832A3109923C7003403D5 /* time.h */, FC1832A4109923C7003403D5 /* tsd.h */, + E48EC97B1835BADD00EAC4F1 /* yield.h */, ); path = shims; sourceTree = ""; @@ -596,6 +635,7 @@ isa = PBXGroup; children = ( 72CC942F0ECCD8750031B751 /* base.h */, + E4D76A9218E325D200B1F98B /* block.h */, 5AAB45C510D30D0C004407EA /* data.h */, FC7BED960E8361E600161930 /* dispatch.h */, FC5C9C1D0EADABE3006E462D /* group.h */, @@ -621,8 +661,11 @@ 96BC39BC0F3EBAB100C59689 /* queue_private.h */, FCEF047F0F5661960067401F /* source_private.h */, E4ECBAA415253C25002C313C /* mach_private.h */, + E44A8E711805C473009FFDB6 /* voucher_private.h */, + E4B3C3FD18C50D000039F49F /* voucher_activity_private.h */, 961B99350F3E83980006BC96 /* benchmark.h */, E4B515D7164B2DFB00E003AF /* introspection_private.h */, + 2BE17C6318EA305E002CA4E8 /* layout_private.h */, ); name = "Private Headers"; path = private; @@ -633,12 +676,14 @@ children = ( 2BBF5A5F154B64D8002B20F9 /* allocator_internal.h */, FC7BED8F0E8361E600161930 /* internal.h */, + E44757D917F4572600B82CA1 /* inline_internal.h */, E4C1ED6E1263E714000D3C8B /* data_internal.h */, 5A0095A110F274B0000E2A31 /* io_internal.h */, 965ECC200F3EAB71004DDD89 /* object_internal.h */, 96929D950F3EA2170041FF5D /* queue_internal.h */, 5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */, FC0B34780FA2851C0080FFA0 /* source_internal.h */, + E44A8E7418066276009FFDB6 /* voucher_internal.h */, E422A0D412A557B5005E5BDB /* trace.h */, E44F9DA816543F79001DCD38 /* introspection_internal.h */, 96929D830F3EA1020041FF5D /* shims.h */, @@ -658,16 +703,20 @@ FC7BEDA50E8361E600161930 /* dispatch.h in Headers */, 72CC94300ECCD8750031B751 /* base.h in Headers */, 961B99500F3E85C30006BC96 /* object.h in Headers */, + E44757DA17F4572600B82CA1 /* inline_internal.h in Headers */, FC7BED9A0E8361E600161930 /* queue.h in Headers */, FC7BED9C0E8361E600161930 /* source.h in Headers */, + E4B3C3FE18C50D000039F49F /* voucher_activity_private.h in Headers */, 721F5C5D0F15520500FF03A6 /* semaphore.h in Headers */, FC5C9C1E0EADABE3006E462D /* group.h in Headers */, 96C9553B0F3EAEDD000D2CA4 /* once.h in Headers */, 5AAB45C410D30CC7004407EA /* io.h in Headers */, + E44A8E7518066276009FFDB6 /* voucher_internal.h in Headers */, E4630253176162D400E11F4C /* atomic_sfb.h in Headers */, 5AAB45C610D30D0C004407EA /* data.h in Headers */, 96032E4D0F5CC8D100241C5F /* time.h in Headers */, FC7BEDA20E8361E600161930 /* private.h in Headers */, + E4D76A9318E325D200B1F98B /* block.h in Headers */, E4A2C9C7176019840000F809 /* atomic_llsc.h in Headers */, C913AC0F143BD34800B78976 /* data_private.h in Headers */, 96BC39BD0F3EBAB100C59689 /* queue_private.h in Headers */, @@ -679,11 +728,14 @@ FC0B34790FA2851C0080FFA0 /* source_internal.h in Headers */, 5A5D13AC0F6B280500197CC3 /* semaphore_internal.h in Headers */, E4C1ED6F1263E714000D3C8B /* data_internal.h in Headers */, + E44A8E721805C473009FFDB6 /* voucher_private.h in Headers */, 5A0095A210F274B0000E2A31 /* io_internal.h in Headers */, FC1832A8109923C7003403D5 /* tsd.h in Headers */, 96929D840F3EA1020041FF5D /* atomic.h in Headers */, 96929D850F3EA1020041FF5D /* shims.h in Headers */, FC1832A7109923C7003403D5 /* time.h in Headers */, + E48EC97C1835BADD00EAC4F1 /* yield.h in Headers */, + 2BE17C6418EA305E002CA4E8 /* layout_private.h in Headers */, FC1832A6109923C7003403D5 /* perfmon.h in Headers */, FC9C70E8105EC9620074F9CA /* config.h in Headers */, E422A0D512A557B5005E5BDB /* trace.h in Headers */, @@ -705,16 +757,20 @@ E49F24AB125D57FA0057C971 /* dispatch.h in Headers */, E49F24AC125D57FA0057C971 /* base.h in Headers */, E49F24AD125D57FA0057C971 /* object.h in Headers */, + E44757DC17F4573600B82CA1 /* inline_internal.h in Headers */, E49F24AE125D57FA0057C971 /* queue.h in Headers */, E49F24AF125D57FA0057C971 /* source.h in Headers */, + E4B3C3FF18C50D0E0039F49F /* voucher_activity_private.h in Headers */, E49F24B0125D57FA0057C971 /* semaphore.h in Headers */, E49F24B1125D57FA0057C971 /* group.h in Headers */, E49F24B2125D57FA0057C971 /* once.h in Headers */, E49F24B3125D57FA0057C971 /* io.h in Headers */, + E44A8E7618066276009FFDB6 /* voucher_internal.h in Headers */, E4630252176162D300E11F4C /* atomic_sfb.h in Headers */, E49F24B4125D57FA0057C971 /* data.h in Headers */, E49F24B5125D57FA0057C971 /* time.h in Headers */, E49F24B6125D57FA0057C971 /* private.h in Headers */, + E4D76A9418E325D200B1F98B /* block.h in Headers */, E4A2C9C6176019830000F809 /* atomic_llsc.h in Headers */, E49F24B7125D57FA0057C971 /* queue_private.h in Headers */, E49F24B8125D57FA0057C971 /* source_private.h in Headers */, @@ -726,11 +782,14 @@ E49F24BD125D57FA0057C971 /* semaphore_internal.h in Headers */, E4C1ED701263E714000D3C8B /* data_internal.h in Headers */, E49F24BF125D57FA0057C971 /* io_internal.h in Headers */, + E44A8E731805C473009FFDB6 /* voucher_private.h in Headers */, E49F24C1125D57FA0057C971 /* tsd.h in Headers */, E49F24C2125D57FA0057C971 /* atomic.h in Headers */, E49F24C3125D57FA0057C971 /* shims.h in Headers */, E49F24C4125D57FA0057C971 /* time.h in Headers */, E49F24C5125D57FA0057C971 /* perfmon.h in Headers */, + E48EC97D1835BADD00EAC4F1 /* yield.h in Headers */, + 2BE17C6518EA305E002CA4E8 /* layout_private.h in Headers */, E49F24C6125D57FA0057C971 /* config.h in Headers */, E422A0D612A557B5005E5BDB /* trace.h in Headers */, E4BA743C13A8911B0095BDF1 /* getprogname.h in Headers */, @@ -761,12 +820,15 @@ E4630251176162D200E11F4C /* atomic_sfb.h in Headers */, E44F9DBE1654405B001DCD38 /* tsd.h in Headers */, E44F9DB816544053001DCD38 /* atomic.h in Headers */, + E44757DB17F4573500B82CA1 /* inline_internal.h in Headers */, E44F9DB71654404F001DCD38 /* shims.h in Headers */, E44F9DBC1654405B001DCD38 /* perfmon.h in Headers */, E44F9DBF165440EF001DCD38 /* config.h in Headers */, + E44A8E7718066276009FFDB6 /* voucher_internal.h in Headers */, E4A2C9C5176019820000F809 /* atomic_llsc.h in Headers */, E44F9DB616544043001DCD38 /* trace.h in Headers */, E44F9DB916544056001DCD38 /* getprogname.h in Headers */, + E48EC97E1835BADD00EAC4F1 /* yield.h in Headers */, E44F9DBA1654405B001DCD38 /* hw_config.h in Headers */, E44F9DC116544115001DCD38 /* object_private.h in Headers */, E44F9DC016544115001DCD38 /* object.h in Headers */, @@ -895,7 +957,7 @@ isa = PBXProject; attributes = { BuildIndependentTargetsInParallel = YES; - LastUpgradeCheck = 0500; + LastUpgradeCheck = 0600; }; buildConfigurationList = 1DEB91EF08733DB70010E9CD /* Build configuration list for PBXProject "libdispatch" */; compatibilityVersion = "Xcode 3.2"; @@ -1031,11 +1093,15 @@ "$(SRCROOT)/xcodescripts/install-headers.sh", "$(SRCROOT)/os/object.h", "$(SRCROOT)/os/object_private.h", + "$(SRCROOT)/private/voucher_private.h", + "$(SRCROOT)/private/voucher_activity_private.h", ); name = "Install Headers"; outputPaths = ( "$(CONFIGURATION_BUILD_DIR)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/object.h", "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/object_private.h", + "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/voucher_private.h", + "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/voucher_activity_private.h", ); runOnlyForDeploymentPostprocessing = 0; shellPath = "/bin/bash -e"; @@ -1051,11 +1117,15 @@ "$(SRCROOT)/xcodescripts/install-headers.sh", "$(SRCROOT)/os/object.h", "$(SRCROOT)/os/object_private.h", + "$(SRCROOT)/private/voucher_private.h", + "$(SRCROOT)/private/voucher_activity_private.h", ); name = "Install Headers"; outputPaths = ( "$(CONFIGURATION_BUILD_DIR)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/object.h", "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/object_private.h", + "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/voucher_private.h", + "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/voucher_activity_private.h", ); runOnlyForDeploymentPostprocessing = 0; shellPath = "/bin/bash -e"; @@ -1157,6 +1227,7 @@ E4FC3264145F46C9002FBDDB /* object.m in Sources */, 2BBF5A63154B64F5002B20F9 /* allocator.c in Sources */, E420867016027AE500EEE210 /* data.m in Sources */, + E44A8E6B1805C3E0009FFDB6 /* voucher.c in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -1170,6 +1241,7 @@ E46DBC4314EE10C80001F9F6 /* queue.c in Sources */, E46DBC4414EE10C80001F9F6 /* semaphore.c in Sources */, E46DBC4514EE10C80001F9F6 /* once.c in Sources */, + E44A8E701805C3E0009FFDB6 /* voucher.c in Sources */, E46DBC4614EE10C80001F9F6 /* apply.c in Sources */, E46DBC4714EE10C80001F9F6 /* object.c in Sources */, E46DBC4814EE10C80001F9F6 /* benchmark.c in Sources */, @@ -1204,6 +1276,7 @@ E4FC3265145F46C9002FBDDB /* object.m in Sources */, 2BBF5A64154B64F5002B20F9 /* allocator.c in Sources */, E420867116027AE500EEE210 /* data.m in Sources */, + E44A8E6C1805C3E0009FFDB6 /* voucher.c in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -1225,6 +1298,7 @@ E4B515C8164B2DA300E003AF /* time.c in Sources */, E4B515C9164B2DA300E003AF /* data.c in Sources */, E4B515CA164B2DA300E003AF /* io.c in Sources */, + E44A8E6F1805C3E0009FFDB6 /* voucher.c in Sources */, E4B515CB164B2DA300E003AF /* transform.c in Sources */, E4B515CC164B2DA300E003AF /* object.m in Sources */, E4B515CD164B2DA300E003AF /* allocator.c in Sources */, @@ -1255,6 +1329,7 @@ E4FC3266145F46C9002FBDDB /* object.m in Sources */, 2BBF5A65154B64F5002B20F9 /* allocator.c in Sources */, E420867316027AE500EEE210 /* data.m in Sources */, + E44A8E6E1805C3E0009FFDB6 /* voucher.c in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -1280,6 +1355,7 @@ E4FC3267145F46C9002FBDDB /* object.m in Sources */, 2BBF5A66154B64F5002B20F9 /* allocator.c in Sources */, E420867216027AE500EEE210 /* data.m in Sources */, + E44A8E6D1805C3E0009FFDB6 /* voucher.c in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; diff --git a/os/object.h b/os/object.h index f8d23a3a1..944d3313d 100644 --- a/os/object.h +++ b/os/object.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2012 Apple Inc. All rights reserved. + * Copyright (c) 2011-2014 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -24,6 +24,7 @@ #ifdef __APPLE__ #include #endif +#include /*! * @header @@ -85,17 +86,26 @@ #else #define OS_OBJECT_RETURNS_RETAINED #endif +#if __has_attribute(ns_consumed) +#define OS_OBJECT_CONSUMED __attribute__((__ns_consumed__)) +#else +#define OS_OBJECT_CONSUMED +#endif #else #define OS_OBJECT_RETURNS_RETAINED +#define OS_OBJECT_CONSUMED #endif #if defined(__has_feature) #if __has_feature(objc_arc) #define OS_OBJECT_BRIDGE __bridge +#define OS_WARN_RESULT_NEEDS_RELEASE #else #define OS_OBJECT_BRIDGE +#define OS_WARN_RESULT_NEEDS_RELEASE OS_WARN_RESULT #endif #else #define OS_OBJECT_BRIDGE +#define OS_WARN_RESULT_NEEDS_RELEASE OS_WARN_RESULT #endif #ifndef OS_OBJECT_USE_OBJC_RETAIN_RELEASE #if defined(__clang_analyzer__) @@ -114,8 +124,65 @@ /*! @parseOnly */ #define OS_OBJECT_RETURNS_RETAINED /*! @parseOnly */ +#define OS_OBJECT_CONSUMED +/*! @parseOnly */ #define OS_OBJECT_BRIDGE +/*! @parseOnly */ +#define OS_WARN_RESULT_NEEDS_RELEASE OS_WARN_RESULT #define OS_OBJECT_USE_OBJC_RETAIN_RELEASE 0 #endif +#define OS_OBJECT_GLOBAL_OBJECT(type, object) ((OS_OBJECT_BRIDGE type)&(object)) + +__BEGIN_DECLS + +/*! + * @function os_retain + * + * @abstract + * Increment the reference count of an os_object. + * + * @discussion + * On a platform with the modern Objective-C runtime this is exactly equivalent + * to sending the object the -[retain] message. + * + * @param object + * The object to retain. + * + * @result + * The retained object. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +OS_EXPORT +void* +os_retain(void *object); +#if OS_OBJECT_USE_OBJC +#undef os_retain +#define os_retain(object) [object retain] +#endif + +/*! + * @function os_release + * + * @abstract + * Decrement the reference count of a os_object. + * + * @discussion + * On a platform with the modern Objective-C runtime this is exactly equivalent + * to sending the object the -[release] message. + * + * @param object + * The object to release. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +OS_EXPORT +void +os_release(void *object); +#if OS_OBJECT_USE_OBJC +#undef os_release +#define os_release(object) [object release] +#endif + +__END_DECLS + #endif diff --git a/private/data_private.h b/private/data_private.h index df60d2869..751b7ce9c 100644 --- a/private/data_private.h +++ b/private/data_private.h @@ -164,7 +164,7 @@ dispatch_data_apply_f(dispatch_data_t data, void *context, __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW mach_port_t -dispatch_data_make_memory_entry(dispatch_data_t dd); +dispatch_data_make_memory_entry(dispatch_data_t data); #endif /*! diff --git a/private/introspection_private.h b/private/introspection_private.h index 727d9715a..7ac0e7e92 100644 --- a/private/introspection_private.h +++ b/private/introspection_private.h @@ -38,9 +38,9 @@ * loaded by running a process with the environment variable * DYLD_LIBRARY_PATH=/usr/lib/system/introspection * - * NOTE: these functions are _not_ exported from the shared library, they are - * only intended to be called from a debugger context while the rest of the - * process is suspended. + * NOTE: most of these functions are _not_ exported from the shared library, + * the unexported functions are intended to only be called from a debugger + * context while the rest of the process is suspended. */ #ifndef __BEGIN_DECLS @@ -68,7 +68,13 @@ typedef struct dispatch_queue_s *dispatch_queue_t; typedef struct dispatch_source_s *dispatch_source_t; typedef struct dispatch_group_s *dispatch_group_t; typedef struct dispatch_object_s *dispatch_object_t; +#ifndef __OSX_AVAILABLE_STARTING +#define __OSX_AVAILABLE_STARTING(x,y) #endif +#ifndef DISPATCH_EXPORT +#define DISPATCH_EXPORT extern +#endif +#endif // __DISPATCH_INDIRECT__ /*! * @typedef dispatch_introspection_versions_s @@ -79,6 +85,43 @@ typedef struct dispatch_object_s *dispatch_object_t; * @field introspection_version * Version of overall dispatch_introspection SPI. * + * @field hooks_version + * Version of dispatch_introspection_hooks_s structure. + * Version 2 adds the queue_item_complete member. + * + * @field hooks_size + * Size of dispatch_introspection_hooks_s structure. + * + * @field queue_item_version + * Version of dispatch_introspection_queue_item_s structure. + * + * @field queue_item_size + * Size of dispatch_introspection_queue_item_s structure. + * + * @field queue_block_version + * Version of dispatch_introspection_queue_block_s structure. + * + * @field queue_block_size + * Size of dispatch_introspection_queue_block_s structure. + * + * @field queue_function_version + * Version of dispatch_introspection_queue_function_s structure. + * + * @field queue_function_size + * Size of dispatch_introspection_queue_function_s structure. + * + * @field queue_thread_version + * Version of dispatch_introspection_queue_thread_s structure. + * + * @field queue_thread_size + * Size of dispatch_introspection_queue_thread_s structure. + * + * @field object_version + * Version of dispatch_introspection_object_s structure. + * + * @field object_size + * Size of dispatch_introspection_object_s structure. + * * @field queue_version * Version of dispatch_introspection_queue_s structure. * @@ -467,6 +510,27 @@ typedef void (*dispatch_introspection_hook_queue_item_enqueue_t)( typedef void (*dispatch_introspection_hook_queue_item_dequeue_t)( dispatch_queue_t queue, dispatch_introspection_queue_item_t item); +/*! + * @typedef dispatch_introspection_hook_queue_item_complete_t + * + * @abstract + * A function pointer called when an item previously dequeued from a dispatch + * queue has completed processing. + * + * @discussion + * The object pointer value passed to this function pointer must be treated as a + * value only. It is intended solely for matching up with an earlier call to a + * dequeue hook function pointer by comparing to the first member of the + * dispatch_introspection_queue_item_t structure. It must NOT be dereferenced + * or e.g. passed to dispatch_introspection_queue_item_get_info(), the memory + * that was backing it may have been reused at the time this hook is called. + * + * @param object + * Opaque dentifier for completed item. Must NOT be dereferenced. + */ +typedef void (*dispatch_introspection_hook_queue_item_complete_t)( + dispatch_continuation_t object); + /*! * @typedef dispatch_introspection_hooks_s * @@ -479,7 +543,8 @@ typedef struct dispatch_introspection_hooks_s { dispatch_introspection_hook_queue_dispose_t queue_dispose; dispatch_introspection_hook_queue_item_enqueue_t queue_item_enqueue; dispatch_introspection_hook_queue_item_dequeue_t queue_item_dequeue; - void *_reserved[6]; + dispatch_introspection_hook_queue_item_complete_t queue_item_complete; + void *_reserved[5]; } dispatch_introspection_hooks_s; typedef dispatch_introspection_hooks_s *dispatch_introspection_hooks_t; @@ -638,7 +703,8 @@ dispatch_introspection_queue_item_get_info(dispatch_queue_t queue, * * @discussion * Installing hook functions must take place from a debugger context (while the - * rest of the process is suspended). + * rest of the process is suspended) or early enough in the process lifecycle + * that the process is still single-threaded. * * The caller is responsible for implementing chaining to the hooks that were * previously installed (if any). @@ -650,7 +716,8 @@ dispatch_introspection_queue_item_get_info(dispatch_queue_t queue, * hooks on output. */ -extern void +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +DISPATCH_EXPORT void dispatch_introspection_hooks_install(dispatch_introspection_hooks_t hooks); /*! @@ -722,6 +789,17 @@ extern void dispatch_introspection_hook_callout_queue_item_dequeue( dispatch_queue_t queue, dispatch_introspection_queue_item_t item); +/*! + * @function dispatch_introspection_hook_callout_queue_item_complete + * + * @abstract + * Callout to queue item complete hook that a debugger can break on. + */ + +extern void +dispatch_introspection_hook_callout_queue_item_complete( + dispatch_continuation_t object); + __END_DECLS #endif diff --git a/private/io_private.h b/private/io_private.h index c35b41f2c..4a00ee004 100644 --- a/private/io_private.h +++ b/private/io_private.h @@ -72,9 +72,9 @@ __BEGIN_DECLS * the handler function. * @param handler The handler to enqueue when data is ready to be * delivered. - * @param context Application-defined context parameter. - * @param data The data read from the file descriptor. - * @param error An errno condition for the read operation or + * param context Application-defined context parameter. + * param data The data read from the file descriptor. + * param error An errno condition for the read operation or * zero if the read was successful. */ __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) @@ -113,10 +113,10 @@ dispatch_read_f(dispatch_fd_t fd, * @param context The application-defined context parameter to pass to * the handler function. * @param handler The handler to enqueue when the data has been written. - * @param context Application-defined context parameter. - * @param data The data that could not be written to the I/O + * param context Application-defined context parameter. + * param data The data that could not be written to the I/O * channel, or NULL. - * @param error An errno condition for the write operation or + * param error An errno condition for the write operation or * zero if the write was successful. */ __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) @@ -151,8 +151,8 @@ dispatch_write_f(dispatch_fd_t fd, * the cleanup handler function. * @param cleanup_handler The handler to enqueue when the system * relinquishes control over the file descriptor. - * @param context Application-defined context parameter. - * @param error An errno condition if control is relinquished + * param context Application-defined context parameter. + * param error An errno condition if control is relinquished * because channel creation failed, zero otherwise. * @result The newly created dispatch I/O channel or NULL if an error * occurred (invalid type specified). @@ -190,8 +190,8 @@ dispatch_io_create_f(dispatch_io_type_t type, * the cleanup handler function. * @param cleanup_handler The handler to enqueue when the system * has closed the file at path. - * @param context Application-defined context parameter. - * @param error An errno condition if control is relinquished + * param context Application-defined context parameter. + * param error An errno condition if control is relinquished * because channel creation or opening of the * specified file failed, zero otherwise. * @result The newly created dispatch I/O channel or NULL if an error @@ -235,8 +235,8 @@ dispatch_io_create_with_path_f(dispatch_io_type_t type, * relinquishes control over the file descriptor * (resp. closes the file at path) associated with * the existing channel. - * @param context Application-defined context parameter. - * @param error An errno condition if control is relinquished + * param context Application-defined context parameter. + * param error An errno condition if control is relinquished * because channel creation failed, zero otherwise. * @result The newly created dispatch I/O channel or NULL if an error * occurred (invalid type specified). @@ -301,11 +301,11 @@ typedef void (*dispatch_io_handler_function_t)(void *context, bool done, * the handler function. * @param io_handler The I/O handler to enqueue when data is ready to be * delivered. - * @param context Application-defined context parameter. - * @param done A flag indicating whether the operation is complete. - * @param data An object with the data most recently read from the + * param context Application-defined context parameter. + * param done A flag indicating whether the operation is complete. + * param data An object with the data most recently read from the * I/O channel as part of this read operation, or NULL. - * @param error An errno condition for the read operation or zero if + * param error An errno condition for the read operation or zero if * the read was successful. */ __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) @@ -357,12 +357,12 @@ dispatch_io_read_f(dispatch_io_t channel, * @param context The application-defined context parameter to pass to * the handler function. * @param io_handler The I/O handler to enqueue when data has been delivered. - * @param context Application-defined context parameter. - * @param done A flag indicating whether the operation is complete. - * @param data An object of the data remaining to be + * param context Application-defined context parameter. + * param done A flag indicating whether the operation is complete. + * param data An object of the data remaining to be * written to the I/O channel as part of this write * operation, or NULL. - * @param error An errno condition for the write operation or zero + * param error An errno condition for the write operation or zero * if the write was successful. */ __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) diff --git a/private/layout_private.h b/private/layout_private.h new file mode 100644 index 000000000..17e8ed836 --- /dev/null +++ b/private/layout_private.h @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2014 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __DISPATCH_LAYOUT_PRIVATE__ +#define __DISPATCH_LAYOUT_PRIVATE__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +__BEGIN_DECLS + +#if !TARGET_OS_WIN32 +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT const struct dispatch_queue_offsets_s { + // always add new fields at the end + const uint16_t dqo_version; + const uint16_t dqo_label; + const uint16_t dqo_label_size; + const uint16_t dqo_flags; + const uint16_t dqo_flags_size; + const uint16_t dqo_serialnum; + const uint16_t dqo_serialnum_size; + const uint16_t dqo_width; + const uint16_t dqo_width_size; + const uint16_t dqo_running; + const uint16_t dqo_running_size; + // fields added in dqo_version 5: + const uint16_t dqo_suspend_cnt; + const uint16_t dqo_suspend_cnt_size; + const uint16_t dqo_target_queue; + const uint16_t dqo_target_queue_size; + const uint16_t dqo_priority; + const uint16_t dqo_priority_size; +} dispatch_queue_offsets; +#endif + +#if DISPATCH_LAYOUT_SPI + +/*! + * @group Data Structure Layout SPI + * SPI intended for CoreSymbolication only + */ + +__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +DISPATCH_EXPORT const struct dispatch_tsd_indexes_s { + // always add new fields at the end + const uint16_t dti_version; + const uint16_t dti_queue_index; + const uint16_t dti_voucher_index; + const uint16_t dti_qos_class_index; +} dispatch_tsd_indexes; + +__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +DISPATCH_EXPORT const struct voucher_offsets_s { + // always add new fields at the end + const uint16_t vo_version; + const uint16_t vo_activity_ids_count; + const uint16_t vo_activity_ids_count_size; + const uint16_t vo_activity_ids_array; + const uint16_t vo_activity_ids_array_entry_size; +} voucher_offsets; + +#endif // DISPATCH_LAYOUT_SPI + +__END_DECLS + +#endif // __DISPATCH_LAYOUT_PRIVATE__ diff --git a/private/mach_private.h b/private/mach_private.h index 603330db4..93c1e811c 100644 --- a/private/mach_private.h +++ b/private/mach_private.h @@ -119,8 +119,7 @@ typedef mach_msg_context_trailer_t dispatch_mach_trailer_t; */ #define DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE \ - ((PAGE_SIZE > 0x1000 ? 1 : 3) * PAGE_SIZE - \ - sizeof(dispatch_mach_trailer_t)) + (0x4000 - sizeof(dispatch_mach_trailer_t)) /*! * @typedef dispatch_mach_msg_t diff --git a/private/private.h b/private/private.h index 4e32e7345..8fd5abc06 100644 --- a/private/private.h +++ b/private/private.h @@ -57,13 +57,14 @@ #include #include #include +#include #undef __DISPATCH_INDIRECT__ #endif /* !__DISPATCH_BUILDING_DISPATCH__ */ // Check that public and private dispatch headers match -#if DISPATCH_API_VERSION != 20130520 // Keep in sync with +#if DISPATCH_API_VERSION != 20140804 // Keep in sync with #error "Dispatch header mismatch between /usr/include and /usr/local/include" #endif diff --git a/private/queue_private.h b/private/queue_private.h index dfef7859f..e8130360d 100644 --- a/private/queue_private.h +++ b/private/queue_private.h @@ -47,6 +47,30 @@ enum { #define DISPATCH_QUEUE_FLAGS_MASK (DISPATCH_QUEUE_OVERCOMMIT) +/*! + * @function dispatch_queue_attr_make_with_overcommit + * + * @discussion + * Returns a dispatch queue attribute value with the overcommit flag set to the + * specified value. + * + * @param attr + * A queue attribute value to be combined with the overcommit flag, or NULL. + * + * @param overcommit + * Boolean overcommit flag. + * + * @return + * Returns an attribute value which may be provided to dispatch_queue_create(). + * This new value combines the attributes specified by the 'attr' parameter and + * the overcommit flag. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) +DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW +dispatch_queue_attr_t +dispatch_queue_attr_make_with_overcommit(dispatch_queue_attr_t attr, + bool overcommit); + /*! * @typedef dispatch_queue_priority_t * @@ -88,10 +112,11 @@ enum { #define DISPATCH_QUEUE_WIDTH_MAX_PHYSICAL_CPUS -2 #define DISPATCH_QUEUE_WIDTH_MAX_LOGICAL_CPUS -3 -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +__OSX_AVAILABLE_BUT_DEPRECATED_MSG(__MAC_10_6,__MAC_10_10,__IPHONE_4_0,__IPHONE_8_0, \ + "Use dispatch_queue_create(name, DISPATCH_QUEUE_CONCURRENT) instead") DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void -dispatch_queue_set_width(dispatch_queue_t dq, long width); // DEPRECATED +dispatch_queue_set_width(dispatch_queue_t dq, long width); /*! * @function dispatch_queue_create_with_target @@ -119,7 +144,8 @@ dispatch_queue_set_width(dispatch_queue_t dq, long width); // DEPRECATED * This parameter is optional and may be NULL. * * @param attr - * DISPATCH_QUEUE_SERIAL or DISPATCH_QUEUE_CONCURRENT. + * DISPATCH_QUEUE_SERIAL, DISPATCH_QUEUE_CONCURRENT, or the result of a call to + * the function dispatch_queue_attr_make_with_qos_class(). * * @param target * The target queue for the newly created queue. The target queue is retained. @@ -174,8 +200,8 @@ dispatch_queue_create_with_target(const char *label, * This parameter is optional and may be NULL. * * @param flags - * Reserved for future use. Passing any value other than zero may result in - * a NULL return value. + * Pass flags value returned by dispatch_pthread_root_queue_flags_pool_size() + * or 0 if unused. * * @param attr * Attributes passed to pthread_create(3) when creating worker pthreads. This @@ -197,6 +223,33 @@ DISPATCH_NOTHROW dispatch_queue_t dispatch_pthread_root_queue_create(const char *label, unsigned long flags, const pthread_attr_t *attr, dispatch_block_t configure); + +/*! + * @function dispatch_pthread_root_queue_flags_pool_size + * + * @abstract + * Returns flags argument to pass to dispatch_pthread_root_queue_create() to + * specify the maximum size of the pthread pool to use for a pthread root queue. + * + * @param pool_size + * Maximum size of the pthread pool to use for the root queue. The number of + * pthreads created for this root queue will never exceed this number but there + * is no guarantee that the specified number will be reached. + * Pass 0 to specify that a default pool size determined by the system should + * be used. + * + * @result + * The flags argument to pass to dispatch_pthread_root_queue_create(). + */ +DISPATCH_INLINE DISPATCH_ALWAYS_INLINE +unsigned long +dispatch_pthread_root_queue_flags_pool_size(uint8_t pool_size) +{ + #define _DISPATCH_PTHREAD_ROOT_QUEUE_FLAG_POOL_SIZE (0x80000000ul) + return (_DISPATCH_PTHREAD_ROOT_QUEUE_FLAG_POOL_SIZE | + (unsigned long)pool_size); +} + #endif /* __BLOCKS__ */ /*! @@ -209,24 +262,6 @@ dispatch_pthread_root_queue_create(const char *label, unsigned long flags, */ #define DISPATCH_APPLY_CURRENT_ROOT_QUEUE NULL -#if !TARGET_OS_WIN32 -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT const struct dispatch_queue_offsets_s { - // always add new fields at the end - const uint16_t dqo_version; - const uint16_t dqo_label; - const uint16_t dqo_label_size; - const uint16_t dqo_flags; - const uint16_t dqo_flags_size; - const uint16_t dqo_serialnum; - const uint16_t dqo_serialnum_size; - const uint16_t dqo_width; - const uint16_t dqo_width_size; - const uint16_t dqo_running; - const uint16_t dqo_running_size; -} dispatch_queue_offsets; -#endif - /*! * @function dispatch_assert_queue * diff --git a/private/source_private.h b/private/source_private.h index 0f44e27e6..c4ce1d452 100644 --- a/private/source_private.h +++ b/private/source_private.h @@ -82,9 +82,11 @@ DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_vfs; * @const DISPATCH_SOURCE_TYPE_VM * @discussion A dispatch source that monitors virtual memory * The mask is a mask of desired events from dispatch_source_vm_flags_t. + * This type is deprecated, use DISPATCH_SOURCE_TYPE_MEMORYSTATUS instead. */ #define DISPATCH_SOURCE_TYPE_VM (&_dispatch_source_type_vm) -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) +__OSX_AVAILABLE_BUT_DEPRECATED_MSG(__MAC_10_7, __MAC_10_10, __IPHONE_4_3, + __IPHONE_8_0, "Use DISPATCH_SOURCE_TYPE_MEMORYSTATUS instead") DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_vm; /*! @@ -262,7 +264,9 @@ enum { */ enum { - DISPATCH_VM_PRESSURE = 0x80000000, + DISPATCH_VM_PRESSURE __OSX_AVAILABLE_BUT_DEPRECATED_MSG( + __MAC_10_7, __MAC_10_10, __IPHONE_4_3, __IPHONE_8_0, + "Use DISPATCH_MEMORYSTATUS_PRESSURE_WARN instead") = 0x80000000, }; /*! @@ -274,20 +278,22 @@ enum { * The system's memory pressure state has changed to warning. * @constant DISPATCH_MEMORYSTATUS_PRESSURE_CRITICAL * The system's memory pressure state has changed to critical. + * @constant DISPATCH_MEMORYSTATUS_LOW_SWAP + * The system's memory pressure state has entered the "low swap" condition. + * Restricted to the root user. */ enum { - DISPATCH_MEMORYSTATUS_PRESSURE_NORMAL = 0x01, - DISPATCH_MEMORYSTATUS_PRESSURE_WARN = 0x02, -#if !TARGET_OS_EMBEDDED - DISPATCH_MEMORYSTATUS_PRESSURE_CRITICAL = 0x04, -#endif + DISPATCH_MEMORYSTATUS_PRESSURE_NORMAL + __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_6_0) = 0x01, + DISPATCH_MEMORYSTATUS_PRESSURE_WARN + __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_6_0) = 0x02, + DISPATCH_MEMORYSTATUS_PRESSURE_CRITICAL + __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_8_0) = 0x04, + DISPATCH_MEMORYSTATUS_LOW_SWAP + __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x08, }; -#if TARGET_IPHONE_SIMULATOR // rdar://problem/9219483 -#define DISPATCH_VM_PRESSURE DISPATCH_VNODE_ATTRIB -#endif - __BEGIN_DECLS /*! diff --git a/private/voucher_activity_private.h b/private/voucher_activity_private.h new file mode 100644 index 000000000..c02b23653 --- /dev/null +++ b/private/voucher_activity_private.h @@ -0,0 +1,544 @@ +/* + * Copyright (c) 2013-2014 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __OS_VOUCHER_ACTIVITY_PRIVATE__ +#define __OS_VOUCHER_ACTIVITY_PRIVATE__ + +#include +#include +#if !defined(__DISPATCH_BUILDING_DISPATCH__) +#include +#endif + +#define OS_VOUCHER_ACTIVITY_SPI_VERSION 20140708 + +#if OS_VOUCHER_WEAK_IMPORT +#define OS_VOUCHER_EXPORT OS_EXPORT OS_WEAK_IMPORT +#else +#define OS_VOUCHER_EXPORT OS_EXPORT +#endif + +__BEGIN_DECLS + +#if OS_VOUCHER_ACTIVITY_SPI + +/*! + * @group Voucher Activity SPI + * SPI intended for libtrace only + */ + +/*! + * @typedef voucher_activity_id_t + * + * @abstract + * Opaque activity identifier. + * + * @discussion + * Scalar value type, not reference counted. + */ +typedef uint64_t voucher_activity_id_t; + +/*! + * @enum voucher_activity_tracepoint_type_t + * + * @abstract + * Types of tracepoints. + */ +OS_ENUM(voucher_activity_tracepoint_type, uint8_t, + voucher_activity_tracepoint_type_release = (1u << 0), + voucher_activity_tracepoint_type_debug = (1u << 1), + voucher_activity_tracepoint_type_error = (1u << 6) | (1u << 0), + voucher_activity_tracepoint_type_fault = (1u << 7) | (1u << 6) | (1u << 0), +); + +/*! + * @enum voucher_activity_flag_t + * + * @abstract + * Flags to pass to voucher_activity_start/voucher_activity_start_with_location + */ +OS_ENUM(voucher_activity_flag, unsigned long, + voucher_activity_flag_default = 0, + voucher_activity_flag_force = 0x1, +); + +/*! + * @typedef voucher_activity_trace_id_t + * + * @abstract + * Opaque tracepoint identifier. + */ +typedef uint64_t voucher_activity_trace_id_t; +static const uint8_t _voucher_activity_trace_id_type_shift = 40; +static const uint8_t _voucher_activity_trace_id_code_namespace_shift = 32; + +/*! + * @function voucher_activity_trace_id + * + * @abstract + * Return tracepoint identifier for specified arguments. + * + * @param type + * Tracepoint type from voucher_activity_tracepoint_type_t. + * + * @param code_namespace + * Namespace of 'code' argument. + * + * @param code + * Tracepoint code. + * + * @result + * Tracepoint identifier. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +OS_INLINE OS_ALWAYS_INLINE +voucher_activity_trace_id_t +voucher_activity_trace_id(uint8_t type, uint8_t code_namespace, uint32_t code) +{ + return ((voucher_activity_trace_id_t)type << + _voucher_activity_trace_id_type_shift) | + ((voucher_activity_trace_id_t)code_namespace << + _voucher_activity_trace_id_code_namespace_shift) | + (voucher_activity_trace_id_t)code; +} + +/*! + * @function voucher_activity_start + * + * @abstract + * Creates a new activity identifier and marks the current thread as + * participating in the activity. + * + * @discussion + * As part of voucher transport, activities are automatically propagated by the + * system to other threads and processes (across IPC). + * + * Activities persist as long as any threads in any process are marked as + * participating. There may be many calls to voucher_activity_end() + * corresponding to one call to voucher_activity_start(). + * + * @param trace_id + * Tracepoint identifier returned by voucher_activity_trace_id(), intended for + * identification of the automatic tracepoint generated as part of creating the + * new activity. + * + * @param flags + * Pass voucher_activity_flag_force to indicate that existing activities + * on the current thread should not be inherited and that a new toplevel + * activity should be created. + * + * @result + * A new activity identifier. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW +voucher_activity_id_t +voucher_activity_start(voucher_activity_trace_id_t trace_id, + voucher_activity_flag_t flags); + +/*! + * @function voucher_activity_start_with_location + * + * @abstract + * Creates a new activity identifier and marks the current thread as + * participating in the activity. + * + * @discussion + * As part of voucher transport, activities are automatically propagated by the + * system to other threads and processes (across IPC). + * + * Activities persist as long as any threads in any process are marked as + * participating. There may be many calls to voucher_activity_end() + * corresponding to one call to voucher_activity_start_with_location(). + * + * @param trace_id + * Tracepoint identifier returned by voucher_activity_trace_id(), intended for + * identification of the automatic tracepoint generated as part of creating the + * new activity. + * + * @param location + * Location identifier for the automatic tracepoint generated as part of + * creating the new activity. + * + * @param flags + * Pass voucher_activity_flag_force to indicate that existing activities + * on the current thread should not be inherited and that a new toplevel + * activity should be created. + * + * @result + * A new activity identifier. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW +voucher_activity_id_t +voucher_activity_start_with_location(voucher_activity_trace_id_t trace_id, + uint64_t location, voucher_activity_flag_t flags); + +/*! + * @function voucher_activity_end + * + * @abstract + * Unmarks the current thread if it is marked as particpating in the activity + * with the specified identifier. + * + * @discussion + * Activities persist as long as any threads in any process are marked as + * participating. There may be many calls to voucher_activity_end() + * corresponding to one call to voucher_activity_start() or + * voucher_activity_start_with_location(). + */ +__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +OS_VOUCHER_EXPORT OS_NOTHROW +void +voucher_activity_end(voucher_activity_id_t activity_id); + +/*! + * @function voucher_get_activities + * + * @abstract + * Returns the list of activity identifiers that the current thread is marked + * with. + * + * @param entries + * Pointer to an array of activity identifiers to be filled in. + * + * @param count + * Pointer to the requested number of activity identifiers. + * On output will be filled with the number of activities that are available. + * + * @result + * Number of activity identifiers written to 'entries' + */ +__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +OS_VOUCHER_EXPORT OS_NOTHROW +unsigned int +voucher_get_activities(voucher_activity_id_t *entries, unsigned int *count); + +/*! + * @group Voucher Activity Trace SPI + * SPI intended for libtrace only + */ + +/*! + * @function voucher_activity_get_namespace + * + * @abstract + * Returns the namespace of the current activity. + * + * @result + * The namespace of the current activity (if any). + */ +__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +OS_VOUCHER_EXPORT OS_NOTHROW +uint8_t +voucher_activity_get_namespace(void); + +/*! + * @function voucher_activity_trace + * + * @abstract + * Add a tracepoint to trace buffer of the current activity. + * + * @param trace_id + * Tracepoint identifier returned by voucher_activity_trace_id() + * + * @param location + * Tracepoint location. + * + * @param buffer + * Pointer to packed buffer of tracepoint data. + * + * @param length + * Length of data at 'buffer'. + * + * @result + * Timestamp recorded in tracepoint or 0 if no tracepoint was recorded. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +OS_VOUCHER_EXPORT OS_NOTHROW +uint64_t +voucher_activity_trace(voucher_activity_trace_id_t trace_id, uint64_t location, + void *buffer, size_t length); + +/*! + * @function voucher_activity_trace_args + * + * @abstract + * Add a tracepoint to trace buffer of the current activity, recording + * specified arguments passed in registers. + * + * @param trace_id + * Tracepoint identifier returned by voucher_activity_trace_id() + * + * @param location + * Tracepoint location. + * + * @param arg1 + * Argument to be recorded in tracepoint data. + * + * @param arg2 + * Argument to be recorded in tracepoint data. + * + * @param arg3 + * Argument to be recorded in tracepoint data. + * + * @param arg4 + * Argument to be recorded in tracepoint data. + * + * @result + * Timestamp recorded in tracepoint or 0 if no tracepoint was recorded. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +OS_VOUCHER_EXPORT OS_NOTHROW +uint64_t +voucher_activity_trace_args(voucher_activity_trace_id_t trace_id, + uint64_t location, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, + uintptr_t arg4); + +/*! + * @group Voucher Activity Mode SPI + * SPI intended for libtrace only + */ + +/*! + * @enum voucher_activity_mode_t + * + * @abstract + * Voucher activity mode. + * + * @discussion + * Configure at process start by setting the OS_ACTIVITY_MODE environment + * variable. + */ +OS_ENUM(voucher_activity_mode, unsigned long, + voucher_activity_mode_disable = 0, + voucher_activity_mode_release = (1u << 0), + voucher_activity_mode_debug = (1u << 1), + voucher_activity_mode_stream = (1u << 2), +); + +/*! + * @function voucher_activity_get_mode + * + * @abstract + * Return current mode of voucher activity subsystem. + * + * @result + * Value from voucher_activity_mode_t enum. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW +voucher_activity_mode_t +voucher_activity_get_mode(void); + +/*! + * @function voucher_activity_set_mode_4libtrace(void) + * + * @abstract + * Set the current mode of voucher activity subsystem. + * + * @param mode + * The new mode. + * + * Note that the new mode will take effect soon, but not immediately. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +OS_VOUCHER_EXPORT OS_NOTHROW +void +voucher_activity_set_mode_4libtrace(voucher_activity_mode_t mode); + +/*! + * @group Voucher Activity Metadata SPI + * SPI intended for libtrace only + */ + +/*! + * @function voucher_activity_get_metadata_buffer + * + * @abstract + * Return address and length of buffer in the process trace memory area + * reserved for libtrace metadata. + * + * @param length + * Pointer to size_t variable, filled with length of metadata buffer. + * + * @result + * Address of metadata buffer. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW OS_NONNULL_ALL +void* +voucher_activity_get_metadata_buffer(size_t *length); + +#endif // OS_VOUCHER_ACTIVITY_SPI + +#if OS_VOUCHER_ACTIVITY_BUFFER_SPI + +/*! + * @group Voucher Activity Tracepoint SPI + * SPI intended for diagnosticd only + */ + +OS_ENUM(_voucher_activity_tracepoint_flag, uint16_t, + _voucher_activity_trace_flag_buffer_empty = 0, + _voucher_activity_trace_flag_tracepoint = (1u << 0), + _voucher_activity_trace_flag_tracepoint_args = (1u << 1), + _voucher_activity_trace_flag_wide_first = (1u << 6), + _voucher_activity_trace_flag_wide_second = (1u << 6) | (1u << 7), + _voucher_activity_trace_flag_start = (1u << 8), + _voucher_activity_trace_flag_end = (1u << 8) | (1u << 9), + _voucher_activity_trace_flag_libdispatch = (1u << 13), + _voucher_activity_trace_flag_activity = (1u << 14), + _voucher_activity_trace_flag_buffer_header = (1u << 15), +); + +// for tracepoints with _voucher_activity_trace_flag_libdispatch +OS_ENUM(_voucher_activity_tracepoint_namespace, uint8_t, + _voucher_activity_tracepoint_namespace_ipc = 0x1 +); +OS_ENUM(_voucher_activity_tracepoint_code, uint32_t, + _voucher_activity_tracepoint_namespace_ipc_send = 0x1, + _voucher_activity_tracepoint_namespace_ipc_receive = 0x2, +); + +typedef struct _voucher_activity_tracepoint_s { + uint16_t vat_flags; // voucher_activity_tracepoint_flag_t + uint8_t vat_type; // voucher_activity_tracepoint_type_t + uint8_t vat_namespace; // namespace for tracepoint code + uint32_t vat_code; // tracepoint code + uint64_t vat_thread; // pthread_t + uint64_t vat_timestamp; // absolute time + uint64_t vat_location; // tracepoint PC + uint64_t vat_data[4]; // trace data +} *_voucher_activity_tracepoint_t; + +/*! + * @group Voucher Activity Buffer Internals + * SPI intended for diagnosticd only + * Layout of structs is subject to change without notice + */ + +#include +#include +#include + +static const atm_subaid32_t _voucher_default_activity_subid = + ATM_SUBAID32_MAX-1; + +static const size_t _voucher_activity_buffer_size = 4096; +static const size_t _voucher_activity_tracepoints_per_buffer = + _voucher_activity_buffer_size / + sizeof(struct _voucher_activity_tracepoint_s); +typedef uint8_t _voucher_activity_buffer_t[_voucher_activity_buffer_size]; + +struct _voucher_activity_self_metadata_s { + struct _voucher_activity_metadata_opaque_s *vasm_baseaddr; +}; +typedef struct _voucher_activity_metadata_opaque_s { + _voucher_activity_buffer_t vam_kernel_metadata; + _voucher_activity_buffer_t vam_client_metadata; + union { + struct _voucher_activity_self_metadata_s vam_self_metadata; + _voucher_activity_buffer_t vam_self_metadata_opaque; + }; +} *_voucher_activity_metadata_opaque_t; + +typedef os_lock_handoff_s _voucher_activity_lock_s; + +typedef struct _voucher_atm_s { + int32_t volatile vatm_refcnt; + mach_voucher_t vatm_kvoucher; + atm_aid_t vatm_id; + atm_mailbox_offset_t vatm_mailbox_offset; + TAILQ_ENTRY(_voucher_atm_s) vatm_list; +#if __LP64__ + uintptr_t vatm_pad[3]; + // cacheline +#endif + _voucher_activity_lock_s vatm_activities_lock; + TAILQ_HEAD(_voucher_atm_activities_s, _voucher_activity_s) vatm_activities; + TAILQ_HEAD(, _voucher_activity_s) vatm_used_activities; +} *_voucher_atm_t; + +// must match layout of _voucher_activity_tracepoint_s +typedef struct _voucher_activity_buffer_header_s { + uint16_t vabh_flags; // _voucher_activity_trace_flag_buffer_header + uint8_t vabh_unused[6]; + uint64_t vabh_thread; + uint64_t vabh_timestamp; + uint32_t volatile vabh_next_tracepoint_idx; + uint32_t vabh_sequence_no; + voucher_activity_id_t vabh_activity_id; + uint64_t vabh_reserved; + TAILQ_ENTRY(_voucher_activity_buffer_header_s) vabh_list; +} *_voucher_activity_buffer_header_t; + +// must match layout of _voucher_activity_buffer_header_s +typedef struct _voucher_activity_s { + // first tracepoint entry + // must match layout of _voucher_activity_tracepoint_s + uint16_t va_flags; // _voucher_activity_trace_flag_buffer_header | + // _voucher_activity_trace_flag_activity | + // _voucher_activity_trace_flag_start | + // _voucher_activity_trace_flag_wide_first + uint8_t va_type; + uint8_t va_namespace; + uint32_t va_code; + uint64_t va_thread; + uint64_t va_timestamp; + uint32_t volatile vabh_next_tracepoint_idx; + uint32_t volatile va_max_sequence_no; + voucher_activity_id_t va_id; + int32_t volatile va_use_count; + uint32_t va_buffer_limit; + TAILQ_HEAD(_voucher_activity_buffer_list_s, + _voucher_activity_buffer_header_s) va_buffers; +#if !__LP64__ + uint64_t va_pad; +#endif + + // second tracepoint entry + // must match layout of _voucher_activity_tracepoint_s + uint16_t va_flags2; + uint8_t va_unused2[2]; + int32_t volatile va_refcnt; + uint64_t va_location; + _voucher_activity_buffer_header_t volatile va_current_buffer; + _voucher_atm_t va_atm; + _voucher_activity_lock_s va_buffers_lock; + uintptr_t va_pad2[2]; + +#if __LP64__ + // third tracepoint entry + // must match layout of _voucher_activity_tracepoint_s + uint16_t va_flags3; + uint8_t va_unused3[6]; + uintptr_t va_pad3; +#endif + TAILQ_ENTRY(_voucher_activity_s) va_list; + TAILQ_ENTRY(_voucher_activity_s) va_atm_list; + TAILQ_ENTRY(_voucher_activity_s) va_atm_used_list; +} *_voucher_activity_t; + +#endif // OS_VOUCHER_ACTIVITY_BUFFER_SPI + +__END_DECLS + +#endif // __OS_VOUCHER_ACTIVITY_PRIVATE__ diff --git a/private/voucher_private.h b/private/voucher_private.h new file mode 100644 index 000000000..fcc28f13c --- /dev/null +++ b/private/voucher_private.h @@ -0,0 +1,375 @@ +/* + * Copyright (c) 2013-2014 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __OS_VOUCHER_PRIVATE__ +#define __OS_VOUCHER_PRIVATE__ + +#include +#include + +#define OS_VOUCHER_SPI_VERSION 20140425 + +#if OS_VOUCHER_WEAK_IMPORT +#define OS_VOUCHER_EXPORT OS_EXPORT OS_WEAK_IMPORT +#else +#define OS_VOUCHER_EXPORT OS_EXPORT +#endif + +__BEGIN_DECLS + +/*! + * @group Voucher Transport SPI + * SPI intended for clients that need to transport vouchers. + */ + +/*! + * @typedef voucher_t + * + * @abstract + * Vouchers are immutable sets of key/value attributes that can be adopted on a + * thread in the current process or sent to another process. + * + * @discussion + * Voucher objects are os_objects (c.f. ). They are memory-managed + * with the os_retain()/os_release() functions or -[retain]/-[release] methods. + */ +#if OS_OBJECT_USE_OBJC +OS_OBJECT_DECL(voucher); +#else +typedef struct voucher_s *voucher_t; +#endif + +/*! + * @function voucher_adopt + * + * @abstract + * Adopt the specified voucher on the current thread and return the voucher + * that had been adopted previously. + * + * @discussion + * Adopted vouchers are automatically carried forward by the system to other + * threads and processes (across IPC). + * + * Consumes a reference to the specified voucher. + * Returns a reference to the previous voucher. + * + * @param voucher + * The voucher object to adopt on the current thread. + * + * @result + * The previously adopted voucher object. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT_NEEDS_RELEASE +OS_NOTHROW +voucher_t +voucher_adopt(voucher_t voucher OS_OBJECT_CONSUMED); + +/*! + * @function voucher_copy + * + * @abstract + * Returns a reference to the voucher that had been adopted previously on the + * current thread (or carried forward by the system). + * + * @result + * The currently adopted voucher object. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW +voucher_t +voucher_copy(void); + +/*! + * @function voucher_copy_without_importance + * + * @abstract + * Returns a reference to a voucher object with all the properties of the + * voucher that had been adopted previously on the current thread, but + * without the importance properties that are frequently attached to vouchers + * carried with IPC requests. Importance properties may elevate the scheduling + * of threads that adopt or retain the voucher while they service the request. + * See xpc_transaction_begin(3) for further details on importance. + * + * @result + * A copy of the currently adopted voucher object, with importance removed. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW +voucher_t +voucher_copy_without_importance(void); + +/*! + * @function voucher_replace_default_voucher + * + * @abstract + * Replace process attributes of default voucher (used for IPC by this process + * when no voucher is adopted on the sending thread) with the process attributes + * of the voucher adopted on the current thread. + * + * @discussion + * This allows a daemon to indicate from the context of an incoming IPC request + * that all future outgoing IPC from the process should be marked as acting + * "on behalf of" the sending process of the current IPC request (as long as the + * thread sending that outgoing IPC is not itself in the direct context of an + * IPC request, i.e. no voucher is adopted). + * + * If no voucher is adopted on the current thread or the current voucher does + * not contain any process attributes, the default voucher is reset to the + * default process attributes for the current process. + * + * CAUTION: Do NOT use this SPI without contacting the Darwin Runtime team. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +OS_VOUCHER_EXPORT OS_NOTHROW +void +voucher_replace_default_voucher(void); + +/*! + * @function voucher_decrement_importance_count4CF + * + * @abstract + * Decrement external importance count of the mach voucher in the specified + * voucher object. + * + * @discussion + * This is only intended for use by CoreFoundation to explicitly manage the + * App Nap state of an application following receiption of a de-nap IPC message. + * + * CAUTION: Do NOT use this SPI without contacting the Darwin Runtime team. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +OS_VOUCHER_EXPORT OS_NOTHROW +void +voucher_decrement_importance_count4CF(voucher_t voucher); + +/*! + * @group Dispatch block objects + */ + +#ifndef __DISPATCH_BUILDING_DISPATCH__ +#include +#endif /* !__DISPATCH_BUILDING_DISPATCH__ */ + +/*! + * @typedef dispatch_block_flags_t + * SPI Flags to pass to the dispatch_block_create* functions. + * + * @const DISPATCH_BLOCK_NO_VOUCHER + * Flag indicating that a dispatch block object should not be assigned a voucher + * object. If invoked directly, the block object will be executed with the + * voucher adopted on the calling thread. If the block object is submitted to a + * queue, this replaces the default behavior of associating the submitted block + * instance with the voucher adopted at the time of submission. + * This flag is ignored if a specific voucher object is assigned with the + * dispatch_block_create_with_voucher* functions, and is equivalent to passing + * the NULL voucher to these functions. + */ +#define DISPATCH_BLOCK_NO_VOUCHER (0x40) + +/*! + * @function dispatch_block_create_with_voucher + * + * @abstract + * Create a new dispatch block object on the heap from an existing block and + * the given flags, and assign it the specified voucher object. + * + * @discussion + * The provided block is Block_copy'ed to the heap, it and the specified voucher + * object are retained by the newly created dispatch block object. + * + * The returned dispatch block object is intended to be submitted to a dispatch + * queue with dispatch_async() and related functions, but may also be invoked + * directly. Both operations can be performed an arbitrary number of times but + * only the first completed execution of a dispatch block object can be waited + * on with dispatch_block_wait() or observed with dispatch_block_notify(). + * + * The returned dispatch block will be executed with the specified voucher + * adopted for the duration of the block body. If the NULL voucher is passed, + * the block will be executed with the voucher adopted on the calling thread, or + * with no voucher if the DISPATCH_BLOCK_DETACHED flag was also provided. + * + * If the returned dispatch block object is submitted to a dispatch queue, the + * submitted block instance will be associated with the QOS class current at the + * time of submission, unless one of the following flags assigned a specific QOS + * class (or no QOS class) at the time of block creation: + * - DISPATCH_BLOCK_ASSIGN_CURRENT + * - DISPATCH_BLOCK_NO_QOS_CLASS + * - DISPATCH_BLOCK_DETACHED + * The QOS class the block object will be executed with also depends on the QOS + * class assigned to the queue and which of the following flags was specified or + * defaulted to: + * - DISPATCH_BLOCK_INHERIT_QOS_CLASS (default for asynchronous execution) + * - DISPATCH_BLOCK_ENFORCE_QOS_CLASS (default for synchronous execution) + * See description of dispatch_block_flags_t for details. + * + * If the returned dispatch block object is submitted directly to a serial queue + * and is configured to execute with a specific QOS class, the system will make + * a best effort to apply the necessary QOS overrides to ensure that blocks + * submitted earlier to the serial queue are executed at that same QOS class or + * higher. + * + * @param flags + * Configuration flags for the block object. + * Passing a value that is not a bitwise OR of flags from dispatch_block_flags_t + * results in NULL being returned. + * + * @param voucher + * A voucher object or NULL. Passing NULL is equivalent to specifying the + * DISPATCH_BLOCK_NO_VOUCHER flag. + * + * @param block + * The block to create the dispatch block object from. + * + * @result + * The newly created dispatch block object, or NULL. + * When not building with Objective-C ARC, must be released with a -[release] + * message or the Block_release() function. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) +DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_RETURNS_RETAINED_BLOCK +DISPATCH_WARN_RESULT DISPATCH_NOTHROW +dispatch_block_t +dispatch_block_create_with_voucher(dispatch_block_flags_t flags, + voucher_t voucher, dispatch_block_t block); + +/*! + * @function dispatch_block_create_with_voucher_and_qos_class + * + * @abstract + * Create a new dispatch block object on the heap from an existing block and + * the given flags, and assign it the specified voucher object, QOS class and + * relative priority. + * + * @discussion + * The provided block is Block_copy'ed to the heap, it and the specified voucher + * object are retained by the newly created dispatch block object. + * + * The returned dispatch block object is intended to be submitted to a dispatch + * queue with dispatch_async() and related functions, but may also be invoked + * directly. Both operations can be performed an arbitrary number of times but + * only the first completed execution of a dispatch block object can be waited + * on with dispatch_block_wait() or observed with dispatch_block_notify(). + * + * The returned dispatch block will be executed with the specified voucher + * adopted for the duration of the block body. If the NULL voucher is passed, + * the block will be executed with the voucher adopted on the calling thread, or + * with no voucher if the DISPATCH_BLOCK_DETACHED flag was also provided. + * + * If invoked directly, the returned dispatch block object will be executed with + * the assigned QOS class as long as that does not result in a lower QOS class + * than what is current on the calling thread. + * + * If the returned dispatch block object is submitted to a dispatch queue, the + * QOS class it will be executed with depends on the QOS class assigned to the + * block, the QOS class assigned to the queue and which of the following flags + * was specified or defaulted to: + * - DISPATCH_BLOCK_INHERIT_QOS_CLASS: default for asynchronous execution + * - DISPATCH_BLOCK_ENFORCE_QOS_CLASS: default for synchronous execution + * See description of dispatch_block_flags_t for details. + * + * If the returned dispatch block object is submitted directly to a serial queue + * and is configured to execute with a specific QOS class, the system will make + * a best effort to apply the necessary QOS overrides to ensure that blocks + * submitted earlier to the serial queue are executed at that same QOS class or + * higher. + * + * @param flags + * Configuration flags for the block object. + * Passing a value that is not a bitwise OR of flags from dispatch_block_flags_t + * results in NULL being returned. + * + * @param voucher + * A voucher object or NULL. Passing NULL is equivalent to specifying the + * DISPATCH_BLOCK_NO_VOUCHER flag. + * + * @param qos_class + * A QOS class value: + * - QOS_CLASS_USER_INTERACTIVE + * - QOS_CLASS_USER_INITIATED + * - QOS_CLASS_DEFAULT + * - QOS_CLASS_UTILITY + * - QOS_CLASS_BACKGROUND + * - QOS_CLASS_UNSPECIFIED + * Passing QOS_CLASS_UNSPECIFIED is equivalent to specifying the + * DISPATCH_BLOCK_NO_QOS_CLASS flag. Passing any other value results in NULL + * being returned. + * + * @param relative_priority + * A relative priority within the QOS class. This value is a negative + * offset from the maximum supported scheduler priority for the given class. + * Passing a value greater than zero or less than QOS_MIN_RELATIVE_PRIORITY + * results in NULL being returned. + * + * @param block + * The block to create the dispatch block object from. + * + * @result + * The newly created dispatch block object, or NULL. + * When not building with Objective-C ARC, must be released with a -[release] + * message or the Block_release() function. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) +DISPATCH_EXPORT DISPATCH_NONNULL5 DISPATCH_RETURNS_RETAINED_BLOCK +DISPATCH_WARN_RESULT DISPATCH_NOTHROW +dispatch_block_t +dispatch_block_create_with_voucher_and_qos_class(dispatch_block_flags_t flags, + voucher_t voucher, dispatch_qos_class_t qos_class, + int relative_priority, dispatch_block_t block); + +/*! + * @group Voucher Mach SPI + * SPI intended for clients that need to interact with mach messages or mach + * voucher ports directly. + */ + +#include + +/*! + * @function voucher_create_with_mach_msg + * + * @abstract + * Creates a new voucher object from a mach message carrying a mach voucher port + * + * @discussion + * Ownership of the mach voucher port in the message is transfered to the new + * voucher object and the message header mach voucher field is cleared. + * + * @param msg + * The mach message to query. + * + * @result + * The newly created voucher object or NULL if the message was not carrying a + * mach voucher. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW +voucher_t +voucher_create_with_mach_msg(mach_msg_header_t *msg); + +__END_DECLS + +#endif // __OS_VOUCHER_PRIVATE__ + +#if (OS_VOUCHER_ACTIVITY_SPI || OS_VOUCHER_ACTIVITY_BUFFER_SPI) && \ + !defined(__DISPATCH_BUILDING_DISPATCH__) && \ + !defined(__OS_VOUCHER_ACTIVITY_PRIVATE__) +#include +#endif diff --git a/src/allocator.c b/src/allocator.c index 7b4c16529..af1f3c115 100644 --- a/src/allocator.c +++ b/src/allocator.c @@ -127,7 +127,7 @@ continuation_is_in_first_page(dispatch_continuation_t c) // (the base of c's magazine == the base of c's page) // => c is in first page of magazine return (((uintptr_t)c & MAGAZINE_MASK) == - ((uintptr_t)c & ~(uintptr_t)PAGE_MASK)); + ((uintptr_t)c & ~(uintptr_t)DISPATCH_ALLOCATOR_PAGE_MASK)); #else (void)c; return false; @@ -173,7 +173,8 @@ madvisable_page_base_for_continuation(dispatch_continuation_t c) if (fastpath(continuation_is_in_first_page(c))) { return NULL; } - void *page_base = (void *)((uintptr_t)c & ~(uintptr_t)PAGE_MASK); + void *page_base = (void *)((uintptr_t)c & + ~(uintptr_t)DISPATCH_ALLOCATOR_PAGE_MASK); #if DISPATCH_DEBUG struct dispatch_magazine_s *m = magazine_for_continuation(c); if (slowpath(page_base < (void *)&m->conts)) { @@ -226,13 +227,8 @@ bitmap_set_first_unset_bit_upto_index(volatile bitmap_t *bitmap, // continuation is "uninitialized", so the caller shouldn't // load from it before storing, so we don't need to guard // against reordering those loads. -#if defined(__x86_64__) // TODO rdar://problem/11477843 - dispatch_assert(sizeof(*bitmap) == sizeof(uint64_t)); - return dispatch_atomic_set_first_bit((volatile uint64_t *)bitmap,max_index); -#else - dispatch_assert(sizeof(*bitmap) == sizeof(uint32_t)); - return dispatch_atomic_set_first_bit((volatile uint32_t *)bitmap,max_index); -#endif + dispatch_assert(sizeof(*bitmap) == sizeof(unsigned long)); + return dispatch_atomic_set_first_bit(bitmap,max_index); } DISPATCH_ALWAYS_INLINE @@ -257,9 +253,8 @@ bitmap_clear_bit(volatile bitmap_t *bitmap, unsigned int index, const bitmap_t mask = BITMAP_C(1) << index; bitmap_t b; - b = *bitmap; if (exclusively == CLEAR_EXCLUSIVELY) { - if (slowpath((b & mask) == 0)) { + if (slowpath((*bitmap & mask) == 0)) { DISPATCH_CRASH("Corruption: failed to clear bit exclusively"); } } @@ -397,11 +392,13 @@ _dispatch_alloc_try_create_heap(dispatch_heap_t *heap_ptr) } #if DISPATCH_DEBUG // Double-check our math. - dispatch_assert(aligned_region % PAGE_SIZE == 0); - dispatch_assert(aligned_region_end % PAGE_SIZE == 0); + dispatch_assert(aligned_region % DISPATCH_ALLOCATOR_PAGE_SIZE == 0); + dispatch_assert(aligned_region % vm_kernel_page_size == 0); + dispatch_assert(aligned_region_end % DISPATCH_ALLOCATOR_PAGE_SIZE == 0); + dispatch_assert(aligned_region_end % vm_kernel_page_size == 0); dispatch_assert(aligned_region_end > aligned_region); - dispatch_assert(top_slop_len % PAGE_SIZE == 0); - dispatch_assert(bottom_slop_len % PAGE_SIZE == 0); + dispatch_assert(top_slop_len % DISPATCH_ALLOCATOR_PAGE_SIZE == 0); + dispatch_assert(bottom_slop_len % DISPATCH_ALLOCATOR_PAGE_SIZE == 0); dispatch_assert(aligned_region_end + top_slop_len == region_end); dispatch_assert(region + bottom_slop_len == aligned_region); dispatch_assert(region_sz == bottom_slop_len + top_slop_len + @@ -566,9 +563,10 @@ _dispatch_alloc_maybe_madvise_page(dispatch_continuation_t c) // last_locked-1, BITMAPS_PER_PAGE, &page_bitmaps[0]); // Scribble to expose use-after-free bugs // madvise (syscall) flushes these stores - memset(page, DISPATCH_ALLOCATOR_SCRIBBLE, PAGE_SIZE); + memset(page, DISPATCH_ALLOCATOR_SCRIBBLE, DISPATCH_ALLOCATOR_PAGE_SIZE); #endif - (void)dispatch_assume_zero(madvise(page, PAGE_SIZE, MADV_FREE)); + (void)dispatch_assume_zero(madvise(page, DISPATCH_ALLOCATOR_PAGE_SIZE, + MADV_FREE)); unlock: while (last_locked > 1) { @@ -631,19 +629,23 @@ _dispatch_alloc_init(void) // self-aligned. dispatch_assert(offsetof(struct dispatch_magazine_s, conts) % (CONTINUATIONS_PER_BITMAP * DISPATCH_CONTINUATION_SIZE) == 0); - dispatch_assert(offsetof(struct dispatch_magazine_s, conts) == PAGE_SIZE); + dispatch_assert(offsetof(struct dispatch_magazine_s, conts) == + DISPATCH_ALLOCATOR_PAGE_SIZE); #if PACK_FIRST_PAGE_WITH_CONTINUATIONS // The continuations in the first page should actually fit within the first // page. - dispatch_assert(offsetof(struct dispatch_magazine_s, fp_conts) < PAGE_SIZE); + dispatch_assert(offsetof(struct dispatch_magazine_s, fp_conts) < + DISPATCH_ALLOCATOR_PAGE_SIZE); dispatch_assert(offsetof(struct dispatch_magazine_s, fp_conts) % DISPATCH_CONTINUATION_SIZE == 0); dispatch_assert(offsetof(struct dispatch_magazine_s, fp_conts) + - sizeof(((struct dispatch_magazine_s *)0x0)->fp_conts) == PAGE_SIZE); + sizeof(((struct dispatch_magazine_s *)0x0)->fp_conts) == + DISPATCH_ALLOCATOR_PAGE_SIZE); #endif // PACK_FIRST_PAGE_WITH_CONTINUATIONS } -#else +#elif (DISPATCH_ALLOCATOR && DISPATCH_CONTINUATION_MALLOC) \ + || (DISPATCH_CONTINUATION_MALLOC && DISPATCH_USE_MALLOCZONE) static inline void _dispatch_alloc_init(void) {} #endif diff --git a/src/allocator_internal.h b/src/allocator_internal.h index 5f223f65f..f4c8ba0de 100644 --- a/src/allocator_internal.h +++ b/src/allocator_internal.h @@ -74,7 +74,7 @@ #if DISPATCH_ALLOCATOR // Configuration here! -#define NUM_CPU _dispatch_hw_config.cc_max_logical +#define NUM_CPU dispatch_hw_config(logical_cpus) #define MAGAZINES_PER_HEAP (NUM_CPU) // Do you care about compaction or performance? @@ -84,6 +84,16 @@ #define PACK_FIRST_PAGE_WITH_CONTINUATIONS 0 #endif +#ifndef PAGE_MAX_SIZE +#define PAGE_MAX_SIZE PAGE_SIZE +#endif +#ifndef PAGE_MAX_MASK +#define PAGE_MAX_MASK PAGE_MASK +#endif +#define DISPATCH_ALLOCATOR_PAGE_SIZE PAGE_MAX_SIZE +#define DISPATCH_ALLOCATOR_PAGE_MASK PAGE_MAX_MASK + + #if TARGET_OS_EMBEDDED #define PAGES_PER_MAGAZINE 64 #else @@ -91,11 +101,11 @@ #endif // Use the largest type your platform is comfortable doing atomic ops with. -#if defined(__x86_64__) // TODO: rdar://11477843 +// TODO: rdar://11477843 typedef unsigned long bitmap_t; +#if defined(__LP64__) #define BYTES_PER_BITMAP 8 #else -typedef uint32_t bitmap_t; #define BYTES_PER_BITMAP 4 #endif @@ -107,7 +117,7 @@ typedef uint32_t bitmap_t; #define CONTINUATIONS_PER_BITMAP (BYTES_PER_BITMAP * 8) #define BITMAPS_PER_SUPERMAP (BYTES_PER_SUPERMAP * 8) -#define BYTES_PER_MAGAZINE (PAGES_PER_MAGAZINE * PAGE_SIZE) +#define BYTES_PER_MAGAZINE (PAGES_PER_MAGAZINE * DISPATCH_ALLOCATOR_PAGE_SIZE) #define CONSUMED_BYTES_PER_BITMAP (BYTES_PER_BITMAP + \ (DISPATCH_CONTINUATION_SIZE * CONTINUATIONS_PER_BITMAP)) @@ -117,7 +127,7 @@ typedef uint32_t bitmap_t; #define BYTES_PER_HEAP (BYTES_PER_MAGAZINE * MAGAZINES_PER_HEAP) -#define BYTES_PER_PAGE PAGE_SIZE +#define BYTES_PER_PAGE DISPATCH_ALLOCATOR_PAGE_SIZE #define CONTINUATIONS_PER_PAGE (BYTES_PER_PAGE / DISPATCH_CONTINUATION_SIZE) #define BITMAPS_PER_PAGE (CONTINUATIONS_PER_PAGE / CONTINUATIONS_PER_BITMAP) @@ -159,7 +169,8 @@ typedef uint32_t bitmap_t; (BYTES_LEFT_IN_FIRST_PAGE / CONSUMED_BYTES_PER_BITMAP) #define REMAINDER_IN_FIRST_PAGE (BYTES_LEFT_IN_FIRST_PAGE - \ (FULL_BITMAPS_IN_FIRST_PAGE * CONSUMED_BYTES_PER_BITMAP) - \ - (FULL_BITMAPS_IN_FIRST_PAGE ? 0 : ROUND_UP_TO_CONTINUATION_SIZE(BYTES_PER_BITMAP))) + (FULL_BITMAPS_IN_FIRST_PAGE ? 0 : \ + ROUND_UP_TO_CONTINUATION_SIZE(BYTES_PER_BITMAP))) #define REMAINDERED_CONTINUATIONS_IN_FIRST_PAGE \ (REMAINDER_IN_FIRST_PAGE / DISPATCH_CONTINUATION_SIZE) diff --git a/src/apply.c b/src/apply.c index aa187a086..e0ab2c383 100644 --- a/src/apply.c +++ b/src/apply.c @@ -24,7 +24,7 @@ typedef void (*dispatch_apply_function_t)(void *, size_t); DISPATCH_ALWAYS_INLINE static inline void -_dispatch_apply_invoke2(void *ctxt) +_dispatch_apply_invoke2(void *ctxt, bool redirect) { dispatch_apply_t da = (dispatch_apply_t)ctxt; size_t const iter = da->da_iterations; @@ -36,6 +36,7 @@ _dispatch_apply_invoke2(void *ctxt) // da_dc is only safe to access once the 'index lock' has been acquired dispatch_apply_function_t const func = (void *)da->da_dc->dc_func; void *const da_ctxt = da->da_dc->dc_ctxt; + dispatch_queue_t dq = da->da_dc->dc_data; _dispatch_perfmon_workitem_dec(); // this unit executes many items @@ -43,6 +44,14 @@ _dispatch_apply_invoke2(void *ctxt) size_t nested = (size_t)_dispatch_thread_getspecific(dispatch_apply_key); _dispatch_thread_setspecific(dispatch_apply_key, (void*)da->da_nested); + dispatch_queue_t old_dq; + pthread_priority_t old_dp; + if (redirect) { + old_dq = _dispatch_thread_getspecific(dispatch_queue_key); + _dispatch_thread_setspecific(dispatch_queue_key, dq); + old_dp = _dispatch_set_defaultpriority(dq->dq_priority); + } + // Striding is the responsibility of the caller. do { _dispatch_client_callout2(da_ctxt, idx, func); @@ -50,6 +59,11 @@ _dispatch_apply_invoke2(void *ctxt) done++; idx = dispatch_atomic_inc_orig2o(da, da_index, relaxed); } while (fastpath(idx < iter)); + + if (redirect) { + _dispatch_reset_defaultpriority(old_dp); + _dispatch_thread_setspecific(dispatch_queue_key, old_dq); + } _dispatch_thread_setspecific(dispatch_apply_key, (void*)nested); // The thread that finished the last workitem wakes up the possibly waiting @@ -67,20 +81,14 @@ DISPATCH_NOINLINE void _dispatch_apply_invoke(void *ctxt) { - _dispatch_apply_invoke2(ctxt); + _dispatch_apply_invoke2(ctxt, false); } DISPATCH_NOINLINE void _dispatch_apply_redirect_invoke(void *ctxt) { - dispatch_apply_t da = (dispatch_apply_t)ctxt; - dispatch_queue_t old_dq; - old_dq = (dispatch_queue_t)_dispatch_thread_getspecific(dispatch_queue_key); - - _dispatch_thread_setspecific(dispatch_queue_key, da->da_dc->dc_data); - _dispatch_apply_invoke2(ctxt); - _dispatch_thread_setspecific(dispatch_queue_key, old_dq); + _dispatch_apply_invoke2(ctxt, true); } static void @@ -118,6 +126,8 @@ _dispatch_apply_f2(dispatch_queue_t dq, dispatch_apply_t da, next->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT; next->dc_func = func; next->dc_ctxt = da; + _dispatch_continuation_voucher_set(next, 0); + _dispatch_continuation_priority_set(next, 0, 0); next->do_next = head; head = next; @@ -130,7 +140,8 @@ _dispatch_apply_f2(dispatch_queue_t dq, dispatch_apply_t da, _dispatch_thread_semaphore_t sema = _dispatch_get_thread_semaphore(); da->da_sema = sema; - _dispatch_queue_push_list(dq, head, tail, continuation_cnt); + _dispatch_queue_push_list(dq, head, tail, head->dc_priority, + continuation_cnt); // Call the first element directly _dispatch_apply_invoke(da); _dispatch_perfmon_workitem_inc(); @@ -183,7 +194,7 @@ dispatch_apply_f(size_t iterations, dispatch_queue_t dq, void *ctxt, if (slowpath(iterations == 0)) { return; } - uint32_t thr_cnt = _dispatch_hw_config.cc_max_active; + uint32_t thr_cnt = dispatch_hw_config(active_cpus); size_t nested = (size_t)_dispatch_thread_getspecific(dispatch_apply_key); if (!slowpath(nested)) { nested = iterations; @@ -210,7 +221,8 @@ dispatch_apply_f(size_t iterations, dispatch_queue_t dq, void *ctxt, dispatch_queue_t old_dq; old_dq = (dispatch_queue_t)_dispatch_thread_getspecific(dispatch_queue_key); if (slowpath(dq == DISPATCH_APPLY_CURRENT_ROOT_QUEUE)) { - dq = old_dq ? old_dq : _dispatch_get_root_queue(0, 0); + dq = old_dq ? old_dq : _dispatch_get_root_queue( + _DISPATCH_QOS_CLASS_DEFAULT, false); while (slowpath(dq->do_targetq)) { dq = dq->do_targetq; } diff --git a/src/data.m b/src/data.m index 3e3eee142..c76f26a52 100644 --- a/src/data.m +++ b/src/data.m @@ -83,7 +83,7 @@ - (id)initWithBytes:(void *)bytes length:(NSUInteger)length copy:(BOOL)copy dispatch_async_f(tq, ctxt, func); \ } \ if (tq) { \ - _dispatch_release(tq); \ + _os_object_release_internal((_os_object_t)tq); \ } - (void)dealloc { @@ -116,10 +116,10 @@ - (void)_setFinalizer:(dispatch_function_t)finalizer { - (void)_setTargetQueue:(dispatch_queue_t)queue { struct dispatch_data_s *dd = (void*)self; - _dispatch_retain(queue); + _os_object_retain_internal((_os_object_t)queue); dispatch_queue_t prev; prev = dispatch_atomic_xchg2o(dd, do_targetq, queue, release); - if (prev) _dispatch_release(prev); + if (prev) _os_object_release_internal((_os_object_t)prev); } - (NSString *)debugDescription { diff --git a/src/init.c b/src/init.c index 5a8b4bb43..5cbf8057f 100644 --- a/src/init.c +++ b/src/init.c @@ -71,6 +71,7 @@ pthread_key_t dispatch_sema4_key; pthread_key_t dispatch_cache_key; pthread_key_t dispatch_io_key; pthread_key_t dispatch_apply_key; +pthread_key_t dispatch_defaultpriority_key; #if DISPATCH_INTROSPECTION pthread_key_t dispatch_introspection_key; #elif DISPATCH_PERF_MON @@ -78,7 +79,30 @@ pthread_key_t dispatch_bcounter_key; #endif #endif // !DISPATCH_USE_DIRECT_TSD -struct _dispatch_hw_config_s _dispatch_hw_config; +#if VOUCHER_USE_MACH_VOUCHER +dispatch_once_t _voucher_task_mach_voucher_pred; +mach_voucher_t _voucher_task_mach_voucher; +_voucher_activity_t _voucher_activity_default; +#endif +voucher_activity_mode_t _voucher_activity_mode; +int _dispatch_set_qos_class_enabled; + + +DISPATCH_NOINLINE +voucher_activity_mode_t +voucher_activity_get_mode(void) +{ + return _voucher_activity_mode; +} + +void +voucher_activity_set_mode_4libtrace(voucher_activity_mode_t mode) +{ + if (_voucher_activity_disabled()) return; + _voucher_activity_mode = mode; +} + +DISPATCH_HW_CONFIG(); bool _dispatch_safe_fork = true, _dispatch_child_of_unsafe_fork; DISPATCH_NOINLINE @@ -88,7 +112,6 @@ _dispatch_is_multithreaded(void) return !_dispatch_safe_fork; } - DISPATCH_NOINLINE bool _dispatch_is_fork_of_multithreaded_parent(void) @@ -97,19 +120,50 @@ _dispatch_is_fork_of_multithreaded_parent(void) } const struct dispatch_queue_offsets_s dispatch_queue_offsets = { - .dqo_version = 4, + .dqo_version = 5, .dqo_label = offsetof(struct dispatch_queue_s, dq_label), .dqo_label_size = sizeof(((dispatch_queue_t)NULL)->dq_label), .dqo_flags = 0, .dqo_flags_size = 0, - .dqo_width = offsetof(struct dispatch_queue_s, dq_width), - .dqo_width_size = sizeof(((dispatch_queue_t)NULL)->dq_width), .dqo_serialnum = offsetof(struct dispatch_queue_s, dq_serialnum), .dqo_serialnum_size = sizeof(((dispatch_queue_t)NULL)->dq_serialnum), + .dqo_width = offsetof(struct dispatch_queue_s, dq_width), + .dqo_width_size = sizeof(((dispatch_queue_t)NULL)->dq_width), .dqo_running = offsetof(struct dispatch_queue_s, dq_running), .dqo_running_size = sizeof(((dispatch_queue_t)NULL)->dq_running), + .dqo_suspend_cnt = offsetof(struct dispatch_queue_s, do_suspend_cnt), + .dqo_suspend_cnt_size = sizeof(((dispatch_queue_t)NULL)->do_suspend_cnt), + .dqo_target_queue = offsetof(struct dispatch_queue_s, do_targetq), + .dqo_target_queue_size = sizeof(((dispatch_queue_t)NULL)->do_targetq), + .dqo_priority = offsetof(struct dispatch_queue_s, dq_priority), + .dqo_priority_size = sizeof(((dispatch_queue_t)NULL)->dq_priority), }; +#if VOUCHER_USE_MACH_VOUCHER +const struct voucher_offsets_s voucher_offsets = { + .vo_version = 1, + .vo_activity_ids_count = offsetof(struct voucher_s, v_activities), + .vo_activity_ids_count_size = sizeof(((voucher_t)NULL)->v_activities), + .vo_activity_ids_array = (uint16_t)_voucher_activity_ids((voucher_t)(NULL)), + .vo_activity_ids_array_entry_size = sizeof(voucher_activity_id_t), +}; +#else // VOUCHER_USE_MACH_VOUCHER +const struct voucher_offsets_s voucher_offsets = { + .vo_version = 0, +}; +#endif // VOUCHER_USE_MACH_VOUCHER + +#if DISPATCH_USE_DIRECT_TSD +const struct dispatch_tsd_indexes_s dispatch_tsd_indexes = { + .dti_version = 2, + .dti_queue_index = dispatch_queue_key, + .dti_voucher_index = dispatch_voucher_key, + .dti_qos_class_index = dispatch_priority_key, +}; +#else // DISPATCH_USE_DIRECT_TSD +#error Not implemented on this platform +#endif // DISPATCH_USE_DIRECT_TSD + // 6618342 Contact the team that owns the Instrument DTrace probe before // renaming this symbol DISPATCH_CACHELINE_ALIGN @@ -117,7 +171,7 @@ struct dispatch_queue_s _dispatch_main_q = { .do_vtable = DISPATCH_VTABLE(queue), #if !DISPATCH_USE_RESOLVERS .do_targetq = &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY], + DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT], #endif .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, @@ -129,13 +183,72 @@ struct dispatch_queue_s _dispatch_main_q = { .dq_serialnum = 1, }; -struct dispatch_queue_attr_s _dispatch_queue_attr_concurrent = { - .do_vtable = DISPATCH_VTABLE(queue_attr), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_next = DISPATCH_OBJECT_LISTLESS, +#pragma mark - +#pragma mark dispatch_queue_attr_t + +#define DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, overcommit, concurrent) \ + { \ + .do_vtable = DISPATCH_VTABLE(queue_attr), \ + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, \ + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, \ + .do_next = DISPATCH_OBJECT_LISTLESS, \ + .dqa_qos_class = (qos), \ + .dqa_relative_priority = (qos) ? (prio) : 0, \ + .dqa_overcommit = (overcommit), \ + .dqa_concurrent = (concurrent), \ + } + +#define DISPATCH_QUEUE_ATTR_KIND_INIT(qos, prio) \ + { \ + [DQA_INDEX_NON_OVERCOMMIT][DQA_INDEX_CONCURRENT] = \ + DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, 0, 1), \ + [DQA_INDEX_NON_OVERCOMMIT][DQA_INDEX_SERIAL] = \ + DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, 0, 0), \ + [DQA_INDEX_OVERCOMMIT][DQA_INDEX_CONCURRENT] = \ + DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, 1, 1), \ + [DQA_INDEX_OVERCOMMIT][DQA_INDEX_SERIAL] = \ + DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, 1, 0), \ + } + +#define DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, prio) \ + [prio] = DISPATCH_QUEUE_ATTR_KIND_INIT(qos, -(prio)) + +#define DISPATCH_QUEUE_ATTR_PRIO_INIT(qos) \ + { \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 0), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 1), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 2), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 3), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 4), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 5), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 6), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 7), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 8), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 9), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 10), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 11), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 12), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 13), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 14), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 15), \ + } + +#define DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(qos) \ + [DQA_INDEX_QOS_CLASS_##qos] = \ + DISPATCH_QUEUE_ATTR_PRIO_INIT(_DISPATCH_QOS_CLASS_##qos) + +const struct dispatch_queue_attr_s _dispatch_queue_attrs[] + [DISPATCH_QUEUE_ATTR_PRIO_COUNT][2][2] = { + DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(UNSPECIFIED), + DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(MAINTENANCE), + DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(BACKGROUND), + DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(UTILITY), + DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(DEFAULT), + DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(USER_INITIATED), + DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(USER_INTERACTIVE), }; + #pragma mark - #pragma mark dispatch_vtables @@ -346,7 +459,7 @@ _dispatch_abort(size_t line, long val) abort(); } -#if !DISPATCH_USE_OS_TRACE +#if !DISPATCH_USE_OS_DEBUG_LOG #pragma mark - #pragma mark dispatch_log @@ -423,24 +536,55 @@ _dispatch_logv_file(const char *msg, va_list ap) _dispatch_log_file(buf, len); } +#if DISPATCH_USE_SIMPLE_ASL +static inline void +_dispatch_syslog(const char *msg) +{ + _simple_asl_log(ASL_LEVEL_NOTICE, "com.apple.libsystem.libdispatch", msg); +} + +static inline void +_dispatch_vsyslog(const char *msg, va_list ap) +{ + char *str; + vasprintf(&str, msg, ap); + if (str) { + _dispatch_syslog(str); + free(str); + } +} +#else // DISPATCH_USE_SIMPLE_ASL +static inline void +_dispatch_syslog(const char *msg) +{ + syslog(LOG_NOTICE, "%s", msg); +} + +static inline void +_dispatch_vsyslog(const char *msg, va_list ap) +{ + vsyslog(LOG_NOTICE, msg, *ap_ptr); +} +#endif // DISPATCH_USE_SIMPLE_ASL + DISPATCH_ALWAYS_INLINE static inline void -_dispatch_logv(const char *msg, size_t len, va_list ap) +_dispatch_logv(const char *msg, size_t len, va_list *ap_ptr) { dispatch_once_f(&_dispatch_logv_pred, NULL, _dispatch_logv_init); if (slowpath(dispatch_log_disabled)) { return; } if (slowpath(dispatch_logfile != -1)) { - if (!ap) { + if (!ap_ptr) { return _dispatch_log_file((char*)msg, len); } - return _dispatch_logv_file(msg, ap); + return _dispatch_logv_file(msg, *ap_ptr); } - if (!ap) { - return syslog(LOG_NOTICE, "%s", msg); + if (!ap_ptr) { + return _dispatch_syslog(msg); } - return vsyslog(LOG_NOTICE, msg, ap); + return _dispatch_vsyslog(msg, *ap_ptr); } DISPATCH_NOINLINE @@ -450,11 +594,11 @@ _dispatch_log(const char *msg, ...) va_list ap; va_start(ap, msg); - _dispatch_logv(msg, 0, ap); + _dispatch_logv(msg, 0, &ap); va_end(ap); } -#endif // DISPATCH_USE_OS_TRACE +#endif // DISPATCH_USE_OS_DEBUG_LOG #pragma mark - #pragma mark dispatch_debug @@ -486,7 +630,7 @@ _dispatch_debugv(dispatch_object_t dou, const char *msg, va_list ap) offs = strlcpy(buf, "NULL: ", sizeof(buf)); } r = vsnprintf(buf + offs, sizeof(buf) - offs, msg, ap); -#if !DISPATCH_USE_OS_TRACE +#if !DISPATCH_USE_OS_DEBUG_LOG size_t len = offs + (r < 0 ? 0 : (size_t)r); if (len > sizeof(buf) - 1) { len = sizeof(buf) - 1; @@ -576,13 +720,47 @@ _dispatch_call_block_and_release(void *block) Block_release(b); } +#pragma mark - +#pragma mark _dispatch_block_create no_objc + +#if !USE_OBJC + +// The compiler hides the name of the function it generates, and changes it if +// we try to reference it directly, but the linker still sees it. +extern void DISPATCH_BLOCK_SPECIAL_INVOKE(void *) + asm("____dispatch_block_create_block_invoke"); +void (*_dispatch_block_special_invoke)(void*) = DISPATCH_BLOCK_SPECIAL_INVOKE; + +dispatch_block_t +_dispatch_block_create(dispatch_block_flags_t flags, voucher_t voucher, + pthread_priority_t pri, dispatch_block_t block) +{ + dispatch_block_t copy_block = _dispatch_Block_copy(block); // 17094902 + (void)voucher; // No voucher capture! (requires ObjC runtime) + struct dispatch_block_private_data_s dbpds = + DISPATCH_BLOCK_PRIVATE_DATA_INITIALIZER(flags, NULL, pri, copy_block); + dispatch_block_t new_block = _dispatch_Block_copy(^{ + // Capture object references, which retains copy_block. + // All retained objects must be captured by the *block*. We + // cannot borrow any references, because the block might be + // called zero or several times, so Block_release() is the + // only place that can release retained objects. + (void)copy_block; + _dispatch_block_invoke(&dbpds); + }); + Block_release(copy_block); + return new_block; +} + +#endif // !USE_OBJC + #endif // __BLOCKS__ #pragma mark - #pragma mark dispatch_client_callout // Abort on uncaught exceptions thrown from client callouts rdar://8577499 -#if DISPATCH_USE_CLIENT_CALLOUT && (__arm__ || !USE_OBJC) +#if DISPATCH_USE_CLIENT_CALLOUT && (__USING_SJLJ_EXCEPTIONS__ || !USE_OBJC) // On platforms with SjLj exceptions, avoid the SjLj overhead on every callout // by clearing the unwinder's TSD pointer to the handler stack around callouts @@ -708,6 +886,24 @@ _os_object_dispose(_os_object_t obj) return _os_object_dealloc(obj); } +void* +os_retain(void *obj) +{ + if (fastpath(obj)) { + return _os_object_retain(obj); + } + return obj; +} + +#undef os_release +void +os_release(void *obj) +{ + if (fastpath(obj)) { + return _os_object_release(obj); + } +} + #pragma mark - #pragma mark dispatch_autorelease_pool no_objc @@ -821,65 +1017,87 @@ const struct dispatch_source_type_s _dispatch_source_type_write = { }, }; -#if DISPATCH_USE_VM_PRESSURE +#if DISPATCH_USE_MEMORYSTATUS + #if TARGET_IPHONE_SIMULATOR // rdar://problem/9219483 static int _dispatch_ios_simulator_memory_warnings_fd = -1; static void -_dispatch_ios_simulator_vm_source_init(void *context DISPATCH_UNUSED) +_dispatch_ios_simulator_memorypressure_init(void *context DISPATCH_UNUSED) { - char *e = getenv("IPHONE_SIMULATOR_MEMORY_WARNINGS"); + char *e = getenv("SIMULATOR_MEMORY_WARNINGS"); if (!e) return; _dispatch_ios_simulator_memory_warnings_fd = open(e, O_EVTONLY); if (_dispatch_ios_simulator_memory_warnings_fd == -1) { (void)dispatch_assume_zero(errno); } } +#endif + static void -dispatch_source_type_vm_init(dispatch_source_t ds, +dispatch_source_type_memorystatus_init(dispatch_source_t ds, dispatch_source_type_t type DISPATCH_UNUSED, uintptr_t handle DISPATCH_UNUSED, - unsigned long mask, + unsigned long mask DISPATCH_UNUSED, dispatch_queue_t q DISPATCH_UNUSED) { +#if TARGET_IPHONE_SIMULATOR static dispatch_once_t pred; - dispatch_once_f(&pred, NULL, _dispatch_ios_simulator_vm_source_init); - ds->ds_dkev->dk_kevent.ident = (uint64_t)(mask & DISPATCH_VM_PRESSURE ? - _dispatch_ios_simulator_memory_warnings_fd : -1); + dispatch_once_f(&pred, NULL, _dispatch_ios_simulator_memorypressure_init); + handle = (uintptr_t)_dispatch_ios_simulator_memory_warnings_fd; + mask = NOTE_ATTRIB; + ds->ds_dkev->dk_kevent.filter = EVFILT_VNODE; + ds->ds_dkev->dk_kevent.ident = handle; + ds->ds_dkev->dk_kevent.flags |= EV_CLEAR; + ds->ds_dkev->dk_kevent.fflags = (uint32_t)mask; + ds->ds_ident_hack = handle; + ds->ds_pending_data_mask = mask; + ds->ds_memorystatus_override = 1; +#endif + ds->ds_is_level = false; } -const struct dispatch_source_type_s _dispatch_source_type_vm = { +#ifndef NOTE_MEMORYSTATUS_LOW_SWAP +#define NOTE_MEMORYSTATUS_LOW_SWAP 0x8 +#endif + +const struct dispatch_source_type_s _dispatch_source_type_memorystatus = { .ke = { - .filter = EVFILT_VNODE, - .flags = EV_CLEAR, + .filter = EVFILT_MEMORYSTATUS, + .flags = EV_DISPATCH, }, - .mask = NOTE_ATTRIB, - .init = dispatch_source_type_vm_init, + .mask = NOTE_MEMORYSTATUS_PRESSURE_NORMAL|NOTE_MEMORYSTATUS_PRESSURE_WARN + |NOTE_MEMORYSTATUS_PRESSURE_CRITICAL|NOTE_MEMORYSTATUS_LOW_SWAP, + .init = dispatch_source_type_memorystatus_init, }; -#else + static void dispatch_source_type_vm_init(dispatch_source_t ds, - dispatch_source_type_t type DISPATCH_UNUSED, - uintptr_t handle DISPATCH_UNUSED, - unsigned long mask DISPATCH_UNUSED, - dispatch_queue_t q DISPATCH_UNUSED) + dispatch_source_type_t type, + uintptr_t handle, + unsigned long mask, + dispatch_queue_t q) { - ds->ds_is_level = false; + // Map legacy vm pressure to memorystatus warning rdar://problem/15907505 + mask = NOTE_MEMORYSTATUS_PRESSURE_WARN; + ds->ds_dkev->dk_kevent.fflags = (uint32_t)mask; + ds->ds_pending_data_mask = mask; + ds->ds_vmpressure_override = 1; + dispatch_source_type_memorystatus_init(ds, type, handle, mask, q); } const struct dispatch_source_type_s _dispatch_source_type_vm = { .ke = { - .filter = EVFILT_VM, + .filter = EVFILT_MEMORYSTATUS, .flags = EV_DISPATCH, }, .mask = NOTE_VM_PRESSURE, .init = dispatch_source_type_vm_init, }; -#endif -#endif -#ifdef DISPATCH_USE_MEMORYSTATUS +#elif DISPATCH_USE_VM_PRESSURE + static void -dispatch_source_type_memorystatus_init(dispatch_source_t ds, +dispatch_source_type_vm_init(dispatch_source_t ds, dispatch_source_type_t type DISPATCH_UNUSED, uintptr_t handle DISPATCH_UNUSED, unsigned long mask DISPATCH_UNUSED, @@ -888,19 +1106,16 @@ dispatch_source_type_memorystatus_init(dispatch_source_t ds, ds->ds_is_level = false; } -const struct dispatch_source_type_s _dispatch_source_type_memorystatus = { +const struct dispatch_source_type_s _dispatch_source_type_vm = { .ke = { - .filter = EVFILT_MEMORYSTATUS, + .filter = EVFILT_VM, .flags = EV_DISPATCH, }, - .mask = NOTE_MEMORYSTATUS_PRESSURE_NORMAL|NOTE_MEMORYSTATUS_PRESSURE_WARN -#ifdef NOTE_MEMORYSTATUS_PRESSURE_CRITICAL - |NOTE_MEMORYSTATUS_PRESSURE_CRITICAL -#endif - , - .init = dispatch_source_type_memorystatus_init, + .mask = NOTE_VM_PRESSURE, + .init = dispatch_source_type_vm_init, }; -#endif + +#endif // DISPATCH_USE_VM_PRESSURE const struct dispatch_source_type_s _dispatch_source_type_proc = { .ke = { diff --git a/src/inline_internal.h b/src/inline_internal.h new file mode 100644 index 000000000..ea6953cd0 --- /dev/null +++ b/src/inline_internal.h @@ -0,0 +1,996 @@ +/* + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_INLINE_INTERNAL__ +#define __DISPATCH_INLINE_INTERNAL__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +#if DISPATCH_USE_CLIENT_CALLOUT + +DISPATCH_NOTHROW void +_dispatch_client_callout(void *ctxt, dispatch_function_t f); +DISPATCH_NOTHROW void +_dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)); +DISPATCH_NOTHROW bool +_dispatch_client_callout3(void *ctxt, dispatch_data_t region, size_t offset, + const void *buffer, size_t size, dispatch_data_applier_function_t f); +DISPATCH_NOTHROW void +_dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, + dispatch_mach_msg_t dmsg, mach_error_t error, + dispatch_mach_handler_function_t f); + +#else // !DISPATCH_USE_CLIENT_CALLOUT + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_client_callout(void *ctxt, dispatch_function_t f) +{ + return f(ctxt); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) +{ + return f(ctxt, i); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_client_callout3(void *ctxt, dispatch_data_t region, size_t offset, + const void *buffer, size_t size, dispatch_data_applier_function_t f) +{ + return f(ctxt, region, offset, buffer, size); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, + dispatch_mach_msg_t dmsg, mach_error_t error, + dispatch_mach_handler_function_t f) +{ + return f(ctxt, reason, dmsg, error); +} + +#endif // !DISPATCH_USE_CLIENT_CALLOUT + +#if !(USE_OBJC && __OBJC2__) + +#pragma mark - +#pragma mark _os_object_t & dispatch_object_t + +DISPATCH_ALWAYS_INLINE +static inline _os_object_t +_os_object_retain_internal_inline(_os_object_t obj) +{ + int ref_cnt = obj->os_obj_ref_cnt; + if (slowpath(ref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) { + return obj; // global object + } + ref_cnt = dispatch_atomic_inc2o(obj, os_obj_ref_cnt, relaxed); + if (slowpath(ref_cnt <= 0)) { + DISPATCH_CRASH("Resurrection of an object"); + } + return obj; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_os_object_release_internal_inline(_os_object_t obj) +{ + int ref_cnt = obj->os_obj_ref_cnt; + if (slowpath(ref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) { + return; // global object + } + ref_cnt = dispatch_atomic_dec2o(obj, os_obj_ref_cnt, relaxed); + if (fastpath(ref_cnt >= 0)) { + return; + } + if (slowpath(ref_cnt < -1)) { + DISPATCH_CRASH("Over-release of an object"); + } +#if DISPATCH_DEBUG + if (slowpath(obj->os_obj_xref_cnt >= 0)) { + DISPATCH_CRASH("Release while external references exist"); + } +#endif + return _os_object_dispose(obj); +} + +DISPATCH_ALWAYS_INLINE_NDEBUG +static inline void +_dispatch_retain(dispatch_object_t dou) +{ + (void)_os_object_retain_internal_inline(dou._os_obj); +} + +DISPATCH_ALWAYS_INLINE_NDEBUG +static inline void +_dispatch_release(dispatch_object_t dou) +{ + _os_object_release_internal_inline(dou._os_obj); +} + +#pragma mark - +#pragma mark dispatch_thread + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_wqthread_override_start(mach_port_t thread, + pthread_priority_t priority) +{ +#if HAVE_PTHREAD_WORKQUEUE_QOS + if (!_dispatch_set_qos_class_enabled) return; + (void)_pthread_workqueue_override_start_direct(thread, priority); +#else + (void)thread; (void)priority; +#endif +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_wqthread_override_reset(void) +{ +#if HAVE_PTHREAD_WORKQUEUE_QOS + if (!_dispatch_set_qos_class_enabled) return; + (void)_pthread_workqueue_override_reset(); +#endif +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_thread_override_start(mach_port_t thread, pthread_priority_t priority) +{ +#if HAVE_PTHREAD_WORKQUEUE_QOS + if (!_dispatch_set_qos_class_enabled) return; + (void)_pthread_override_qos_class_start_direct(thread, priority); +#else + (void)thread; (void)priority; +#endif +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_thread_override_end(mach_port_t thread) +{ +#if HAVE_PTHREAD_WORKQUEUE_QOS + if (!_dispatch_set_qos_class_enabled) return; + (void)_pthread_override_qos_class_end_direct(thread); +#else + (void)thread; +#endif +} + +#pragma mark - +#pragma mark dispatch_queue_t + +static inline bool _dispatch_queue_need_override(dispatch_queue_t dq, + pthread_priority_t pp); +static inline bool _dispatch_queue_need_override_retain(dispatch_queue_t dq, + pthread_priority_t pp); +static inline bool _dispatch_queue_retain_if_override(dispatch_queue_t dq, + pthread_priority_t pp); +static inline pthread_priority_t _dispatch_queue_get_override_priority( + dispatch_queue_t dq); +static inline pthread_priority_t _dispatch_queue_reset_override_priority( + dispatch_queue_t dq); +static inline pthread_priority_t _dispatch_get_defaultpriority(void); +static inline void _dispatch_set_defaultpriority_override(void); +static inline void _dispatch_reset_defaultpriority(pthread_priority_t priority); +static inline void _dispatch_set_priority(pthread_priority_t priority); + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_set_thread(dispatch_queue_t dq) +{ + // The manager queue uses dispatch_queue_drain but is thread bound + if (!dq->dq_is_thread_bound) { + dq->dq_thread = _dispatch_thread_port(); + } +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_clear_thread(dispatch_queue_t dq) +{ + if (!dq->dq_is_thread_bound) { + dq->dq_thread = MACH_PORT_NULL; + } +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_queue_push_list2(dispatch_queue_t dq, struct dispatch_object_s *head, + struct dispatch_object_s *tail) +{ + struct dispatch_object_s *prev; + tail->do_next = NULL; + prev = dispatch_atomic_xchg2o(dq, dq_items_tail, tail, release); + if (fastpath(prev)) { + // if we crash here with a value less than 0x1000, then we are at a + // known bug in client code for example, see _dispatch_queue_dispose + // or _dispatch_atfork_child + prev->do_next = head; + } + return (prev != NULL); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head, + dispatch_object_t _tail, pthread_priority_t pp, unsigned int n) +{ + struct dispatch_object_s *head = _head._do, *tail = _tail._do; + bool override = _dispatch_queue_need_override_retain(dq, pp); + if (!fastpath(_dispatch_queue_push_list2(dq, head, tail))) { + _dispatch_queue_push_list_slow(dq, pp, head, n, override); + } else if (override) { + _dispatch_queue_wakeup_with_qos_and_release(dq, pp); + } +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_push(dispatch_queue_t dq, dispatch_object_t _tail, + pthread_priority_t pp) +{ + struct dispatch_object_s *tail = _tail._do; + bool override = _dispatch_queue_need_override_retain(dq, pp); + if (!fastpath(_dispatch_queue_push_list2(dq, tail, tail))) { + _dispatch_queue_push_slow(dq, pp, tail, override); + } else if (override) { + _dispatch_queue_wakeup_with_qos_and_release(dq, pp); + } +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_push_wakeup(dispatch_queue_t dq, dispatch_object_t _tail, + pthread_priority_t pp, bool wakeup) +{ + // caller assumed to have a reference on dq + struct dispatch_object_s *tail = _tail._do; + if (!fastpath(_dispatch_queue_push_list2(dq, tail, tail))) { + _dispatch_queue_push_slow(dq, pp, tail, false); + } else if (_dispatch_queue_need_override(dq, pp)) { + _dispatch_queue_wakeup_with_qos(dq, pp); + } else if (slowpath(wakeup)) { + _dispatch_queue_wakeup(dq); + } +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_class_invoke(dispatch_object_t dou, + dispatch_queue_t (*invoke)(dispatch_object_t, + _dispatch_thread_semaphore_t*)) +{ + pthread_priority_t p = 0; + dispatch_queue_t dq = dou._dq; + if (!slowpath(DISPATCH_OBJECT_SUSPENDED(dq)) && + fastpath(dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1, acquire))){ + _dispatch_queue_set_thread(dq); + dispatch_queue_t tq = NULL; + _dispatch_thread_semaphore_t sema = 0; + tq = invoke(dq, &sema); + _dispatch_queue_clear_thread(dq); + p = _dispatch_queue_reset_override_priority(dq); + if (p > (dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { + // Ensure that the root queue sees that this thread was overridden. + _dispatch_set_defaultpriority_override(); + } + // We do not need to check the result. + // When the suspend-count lock is dropped, then the check will happen. + (void)dispatch_atomic_dec2o(dq, dq_running, release); + if (sema) { + _dispatch_thread_semaphore_signal(sema); + } else if (tq) { + _dispatch_introspection_queue_item_complete(dq); + return _dispatch_queue_push(tq, dq, p); + } + } + dq->do_next = DISPATCH_OBJECT_LISTLESS; + _dispatch_introspection_queue_item_complete(dq); + if (!dispatch_atomic_sub2o(dq, do_suspend_cnt, + DISPATCH_OBJECT_SUSPEND_LOCK, seq_cst)) { + // seq_cst with atomic store to suspend_cnt + if (dispatch_atomic_load2o(dq, dq_running, seq_cst) == 0) { + // verify that the queue is idle + return _dispatch_queue_wakeup_with_qos_and_release(dq, p); + } + } + _dispatch_release(dq); // added when the queue is put on the list +} + +DISPATCH_ALWAYS_INLINE +static inline unsigned long +_dispatch_queue_class_probe(dispatch_object_t dou) +{ + dispatch_queue_t dq = dou._dq; + struct dispatch_object_s *tail; + // seq_cst with atomic store to suspend_cnt + tail = dispatch_atomic_load2o(dq, dq_items_tail, seq_cst); + return (unsigned long)slowpath(tail != NULL); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_object_suspended(dispatch_object_t dou) +{ + struct dispatch_object_s *obj = dou._do; + unsigned int suspend_cnt; + // seq_cst with atomic store to tail + suspend_cnt = dispatch_atomic_load2o(obj, do_suspend_cnt, seq_cst); + return slowpath(suspend_cnt >= DISPATCH_OBJECT_SUSPEND_INTERVAL); +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_t +_dispatch_queue_get_current(void) +{ + return (dispatch_queue_t)_dispatch_thread_getspecific(dispatch_queue_key); +} + +DISPATCH_ALWAYS_INLINE DISPATCH_CONST +static inline dispatch_queue_t +_dispatch_get_root_queue(qos_class_t priority, bool overcommit) +{ + if (overcommit) switch (priority) { + case _DISPATCH_QOS_CLASS_MAINTENANCE: + return &_dispatch_root_queues[ + DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT]; + case _DISPATCH_QOS_CLASS_BACKGROUND: + return &_dispatch_root_queues[ + DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT]; + case _DISPATCH_QOS_CLASS_UTILITY: + return &_dispatch_root_queues[ + DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT]; + case _DISPATCH_QOS_CLASS_DEFAULT: + return &_dispatch_root_queues[ + DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT]; + case _DISPATCH_QOS_CLASS_USER_INITIATED: + return &_dispatch_root_queues[ + DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT]; + case _DISPATCH_QOS_CLASS_USER_INTERACTIVE: + return &_dispatch_root_queues[ + DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT]; + } else switch (priority) { + case _DISPATCH_QOS_CLASS_MAINTENANCE: + return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS]; + case _DISPATCH_QOS_CLASS_BACKGROUND: + return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS]; + case _DISPATCH_QOS_CLASS_UTILITY: + return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS]; + case _DISPATCH_QOS_CLASS_DEFAULT: + return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS]; + case _DISPATCH_QOS_CLASS_USER_INITIATED: + return &_dispatch_root_queues[ + DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS]; + case _DISPATCH_QOS_CLASS_USER_INTERACTIVE: + return &_dispatch_root_queues[ + DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS]; + } + return NULL; +} + +// Note to later developers: ensure that any initialization changes are +// made for statically allocated queues (i.e. _dispatch_main_q). +static inline void +_dispatch_queue_init(dispatch_queue_t dq) +{ + dq->do_next = (struct dispatch_queue_s *)DISPATCH_OBJECT_LISTLESS; + + dq->dq_running = 0; + dq->dq_width = 1; + dq->dq_serialnum = dispatch_atomic_inc_orig(&_dispatch_queue_serial_numbers, + relaxed); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_set_bound_thread(dispatch_queue_t dq) +{ + //Tag thread-bound queues with the owning thread + dispatch_assert(dq->dq_is_thread_bound); + dq->dq_thread = _dispatch_thread_port(); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_clear_bound_thread(dispatch_queue_t dq) +{ + dispatch_assert(dq->dq_is_thread_bound); + dq->dq_thread = MACH_PORT_NULL; +} + +DISPATCH_ALWAYS_INLINE +static inline mach_port_t +_dispatch_queue_get_bound_thread(dispatch_queue_t dq) +{ + dispatch_assert(dq->dq_is_thread_bound); + return dq->dq_thread; +} + +#pragma mark - +#pragma mark dispatch_priority + +DISPATCH_ALWAYS_INLINE +static inline pthread_priority_t +_dispatch_get_defaultpriority(void) +{ +#if HAVE_PTHREAD_WORKQUEUE_QOS + pthread_priority_t priority = (uintptr_t)_dispatch_thread_getspecific( + dispatch_defaultpriority_key); + return priority; +#else + return 0; +#endif +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_reset_defaultpriority(pthread_priority_t priority) +{ +#if HAVE_PTHREAD_WORKQUEUE_QOS + pthread_priority_t old_priority = _dispatch_get_defaultpriority(); + // if an inner-loop or'd in the override flag to the per-thread priority, + // it needs to be propogated up the chain + priority |= old_priority & _PTHREAD_PRIORITY_OVERRIDE_FLAG; + + if (slowpath(priority != old_priority)) { + _dispatch_thread_setspecific(dispatch_defaultpriority_key, + (void*)priority); + } +#else + (void)priority; +#endif +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_set_defaultpriority_override(void) +{ +#if HAVE_PTHREAD_WORKQUEUE_QOS + pthread_priority_t old_priority = _dispatch_get_defaultpriority(); + pthread_priority_t priority = old_priority | + _PTHREAD_PRIORITY_OVERRIDE_FLAG; + + if (slowpath(priority != old_priority)) { + _dispatch_thread_setspecific(dispatch_defaultpriority_key, + (void*)priority); + } +#endif +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_reset_defaultpriority_override(void) +{ +#if HAVE_PTHREAD_WORKQUEUE_QOS + pthread_priority_t old_priority = _dispatch_get_defaultpriority(); + pthread_priority_t priority = old_priority & + ~((pthread_priority_t)_PTHREAD_PRIORITY_OVERRIDE_FLAG); + + if (slowpath(priority != old_priority)) { + _dispatch_thread_setspecific(dispatch_defaultpriority_key, + (void*)priority); + return true; + } +#endif + return false; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_priority_inherit_from_target(dispatch_queue_t dq, + dispatch_queue_t tq) +{ +#if HAVE_PTHREAD_WORKQUEUE_QOS + const pthread_priority_t rootqueue_flag = _PTHREAD_PRIORITY_ROOTQUEUE_FLAG; + const pthread_priority_t inherited_flag = _PTHREAD_PRIORITY_INHERIT_FLAG; + pthread_priority_t dqp = dq->dq_priority, tqp = tq->dq_priority; + if ((!dqp || (dqp & inherited_flag)) && (tqp & rootqueue_flag)) { + dq->dq_priority = (tqp & ~rootqueue_flag) | inherited_flag; + } +#else + (void)dq; (void)tq; +#endif +} + +DISPATCH_ALWAYS_INLINE +static inline pthread_priority_t +_dispatch_set_defaultpriority(pthread_priority_t priority) +{ +#if HAVE_PTHREAD_WORKQUEUE_QOS + pthread_priority_t old_priority = _dispatch_get_defaultpriority(); + if (old_priority) { + pthread_priority_t flags, defaultqueue, basepri; + flags = (priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG); + defaultqueue = (old_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG); + basepri = (old_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK); + priority &= ~_PTHREAD_PRIORITY_FLAGS_MASK; + if (!priority) { + flags = _PTHREAD_PRIORITY_INHERIT_FLAG | defaultqueue; + priority = basepri; + } else if (priority < basepri && !defaultqueue) { // rdar://16349734 + priority = basepri; + } + priority |= flags | (old_priority & _PTHREAD_PRIORITY_OVERRIDE_FLAG); + } + if (slowpath(priority != old_priority)) { + _dispatch_thread_setspecific(dispatch_defaultpriority_key, + (void*)priority); + } + return old_priority; +#else + (void)priority; + return 0; +#endif +} + +DISPATCH_ALWAYS_INLINE +static inline pthread_priority_t +_dispatch_priority_adopt(pthread_priority_t priority, unsigned long flags) +{ +#if HAVE_PTHREAD_WORKQUEUE_QOS + pthread_priority_t defaultpri = _dispatch_get_defaultpriority(); + bool enforce, inherited, defaultqueue; + enforce = (flags & DISPATCH_PRIORITY_ENFORCE) || + (priority & _PTHREAD_PRIORITY_ENFORCE_FLAG); + inherited = (defaultpri & _PTHREAD_PRIORITY_INHERIT_FLAG); + defaultqueue = (defaultpri & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG); + defaultpri &= ~_PTHREAD_PRIORITY_FLAGS_MASK; + priority &= ~_PTHREAD_PRIORITY_FLAGS_MASK; + if (!priority) { + enforce = false; + } else if (!enforce) { + if (priority < defaultpri) { + if (defaultqueue) enforce = true; // rdar://16349734 + } else if (inherited || defaultqueue) { + enforce = true; + } + } else if (priority < defaultpri && !defaultqueue) { // rdar://16349734 + enforce = false; + } + return enforce ? priority : defaultpri; +#else + (void)priority; (void)flags; + return 0; +#endif +} + +DISPATCH_ALWAYS_INLINE +static inline pthread_priority_t +_dispatch_get_priority(void) +{ +#if HAVE_PTHREAD_WORKQUEUE_QOS + pthread_priority_t priority = (uintptr_t)_dispatch_thread_getspecific( + dispatch_priority_key); + return (priority & ~_PTHREAD_PRIORITY_FLAGS_MASK); +#else + return 0; +#endif +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_set_priority_and_mach_voucher(pthread_priority_t priority, + mach_voucher_t kv) +{ +#if HAVE_PTHREAD_WORKQUEUE_QOS + _pthread_set_flags_t flags = 0; + if (priority && _dispatch_set_qos_class_enabled) { + pthread_priority_t old_priority = _dispatch_get_priority(); + if (priority != old_priority && old_priority) { + flags |= _PTHREAD_SET_SELF_QOS_FLAG; + } + } + if (kv != VOUCHER_NO_MACH_VOUCHER) { +#if VOUCHER_USE_MACH_VOUCHER + flags |= _PTHREAD_SET_SELF_VOUCHER_FLAG; +#endif + } + if (!flags) return; + int r = _pthread_set_properties_self(flags, priority, kv); + (void)dispatch_assume_zero(r); +#elif VOUCHER_USE_MACH_VOUCHER +#error Invalid build configuration +#else + (void)priority; (void)kv; +#endif +} + +DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT +static inline voucher_t +_dispatch_set_priority_and_adopt_voucher(pthread_priority_t priority, + voucher_t voucher) +{ + pthread_priority_t p = (priority != DISPATCH_NO_PRIORITY) ? priority : 0; + voucher_t ov = DISPATCH_NO_VOUCHER; + mach_voucher_t kv = VOUCHER_NO_MACH_VOUCHER; + if (voucher != DISPATCH_NO_VOUCHER) { + ov = _voucher_get(); + kv = _voucher_swap_and_get_mach_voucher(ov, voucher); + } + _dispatch_set_priority_and_mach_voucher(p, kv); + return ov; +} + +DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT +static inline voucher_t +_dispatch_adopt_priority_and_voucher(pthread_priority_t priority, + voucher_t voucher, unsigned long flags) +{ + pthread_priority_t p = 0; + if (priority != DISPATCH_NO_PRIORITY) { + p = _dispatch_priority_adopt(priority, flags); + } + return _dispatch_set_priority_and_adopt_voucher(p, voucher); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_adopt_priority_and_replace_voucher(pthread_priority_t priority, + voucher_t voucher, unsigned long flags) +{ + voucher_t ov; + ov = _dispatch_adopt_priority_and_voucher(priority, voucher, flags); + if (voucher != DISPATCH_NO_VOUCHER && ov) _voucher_release(ov); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_set_priority_and_replace_voucher(pthread_priority_t priority, + voucher_t voucher) +{ + voucher_t ov; + ov = _dispatch_set_priority_and_adopt_voucher(priority, voucher); + if (voucher != DISPATCH_NO_VOUCHER && ov) _voucher_release(ov); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_set_priority(pthread_priority_t priority) +{ + _dispatch_set_priority_and_mach_voucher(priority, VOUCHER_NO_MACH_VOUCHER); +} + +DISPATCH_ALWAYS_INLINE +static inline pthread_priority_t +_dispatch_priority_normalize(pthread_priority_t pp) +{ + dispatch_assert_zero(pp & ~(pthread_priority_t) + _PTHREAD_PRIORITY_QOS_CLASS_MASK); + unsigned int qosbits = (unsigned int)pp, idx; + if (!qosbits) return 0; + idx = (unsigned int)(sizeof(qosbits)*8) - + (unsigned int)__builtin_clz(qosbits) - 1; + return (1 << idx); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_queue_need_override(dispatch_queue_t dq, pthread_priority_t pp) +{ + if (!pp || dx_type(dq) == DISPATCH_QUEUE_ROOT_TYPE) return false; + uint32_t p = (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK); + uint32_t o = dq->dq_override; + return (o < p); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_queue_need_override_retain(dispatch_queue_t dq, pthread_priority_t pp) +{ + bool override = _dispatch_queue_need_override(dq, pp); + if (override) _dispatch_retain(dq); + return override; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_queue_override_priority(dispatch_queue_t dq, pthread_priority_t pp) +{ + uint32_t p = (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK); + uint32_t o = dq->dq_override; + if (o < p) o = dispatch_atomic_or_orig2o(dq, dq_override, p, relaxed); + return (o < p); +} + +DISPATCH_ALWAYS_INLINE +static inline pthread_priority_t +_dispatch_queue_get_override_priority(dispatch_queue_t dq) +{ + uint32_t p = (dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK); + uint32_t o = dq->dq_override; + if (o == p) return o; + return _dispatch_priority_normalize(o); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_set_override_priority(dispatch_queue_t dq) +{ + uint32_t p = 0; + if (!(dq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG)) { + p = dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK; + } + dispatch_atomic_store2o(dq, dq_override, p, relaxed); +} + +DISPATCH_ALWAYS_INLINE +static inline pthread_priority_t +_dispatch_queue_reset_override_priority(dispatch_queue_t dq) +{ + uint32_t p = 0; + if (!(dq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG)) { + p = dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK; + } + uint32_t o = dispatch_atomic_xchg2o(dq, dq_override, p, relaxed); + if (o == p) return o; + return _dispatch_priority_normalize(o); +} + +DISPATCH_ALWAYS_INLINE +static inline pthread_priority_t +_dispatch_priority_propagate(void) +{ +#if HAVE_PTHREAD_WORKQUEUE_QOS + pthread_priority_t priority = _dispatch_get_priority(); + if (priority > _dispatch_user_initiated_priority) { + // Cap QOS for propagation at user-initiated + priority = _dispatch_user_initiated_priority; + } + return priority; +#else + return 0; +#endif +} + +// including maintenance +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_is_background_thread(void) +{ +#if HAVE_PTHREAD_WORKQUEUE_QOS + pthread_priority_t priority; + priority = _dispatch_get_priority(); + return priority && (priority <= _dispatch_background_priority); +#else + return false; +#endif +} + +#pragma mark - +#pragma mark dispatch_block_t + +#ifdef __BLOCKS__ + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_block_has_private_data(const dispatch_block_t block) +{ + extern void (*_dispatch_block_special_invoke)(void*); + return (_dispatch_Block_invoke(block) == _dispatch_block_special_invoke); +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_block_private_data_t +_dispatch_block_get_data(const dispatch_block_t db) +{ + if (!_dispatch_block_has_private_data(db)) { + return NULL; + } + // Keep in sync with _dispatch_block_create implementation + uint8_t *x = (uint8_t *)db; + // x points to base of struct Block_layout + x += sizeof(struct Block_layout); + // x points to addresss of captured block + x += sizeof(dispatch_block_t); +#if USE_OBJC + // x points to addresss of captured voucher + x += sizeof(voucher_t); +#endif + // x points to base of captured dispatch_block_private_data_s structure + dispatch_block_private_data_t dbpd = (dispatch_block_private_data_t)x; + if (dbpd->dbpd_magic != DISPATCH_BLOCK_PRIVATE_DATA_MAGIC) { + DISPATCH_CRASH("Corruption of dispatch block object"); + } + return dbpd; +} + +DISPATCH_ALWAYS_INLINE +static inline pthread_priority_t +_dispatch_block_get_priority(const dispatch_block_t db) +{ + dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); + return dbpd ? dbpd->dbpd_priority : 0; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_block_flags_t +_dispatch_block_get_flags(const dispatch_block_t db) +{ + dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); + return dbpd ? dbpd->dbpd_flags : 0; +} + +#define DISPATCH_BLOCK_HAS(flag, db) \ + ((_dispatch_block_get_flags((db)) & DISPATCH_BLOCK_HAS_ ## flag) != 0) +#define DISPATCH_BLOCK_IS(flag, db) \ + ((_dispatch_block_get_flags((db)) & DISPATCH_BLOCK_ ## flag) != 0) + +#endif + +#pragma mark - +#pragma mark dispatch_continuation_t + +DISPATCH_ALWAYS_INLINE +static inline dispatch_continuation_t +_dispatch_continuation_alloc_cacheonly(void) +{ + dispatch_continuation_t dc = (dispatch_continuation_t) + fastpath(_dispatch_thread_getspecific(dispatch_cache_key)); + if (dc) { + _dispatch_thread_setspecific(dispatch_cache_key, dc->do_next); + } + return dc; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_continuation_t +_dispatch_continuation_alloc(void) +{ + dispatch_continuation_t dc = + fastpath(_dispatch_continuation_alloc_cacheonly()); + if(!dc) { + return _dispatch_continuation_alloc_from_heap(); + } + return dc; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_continuation_t +_dispatch_continuation_free_cacheonly(dispatch_continuation_t dc) +{ + dispatch_continuation_t prev_dc = (dispatch_continuation_t) + fastpath(_dispatch_thread_getspecific(dispatch_cache_key)); + int cnt = prev_dc ? prev_dc->dc_cache_cnt + 1 : 1; + // Cap continuation cache + if (slowpath(cnt > _dispatch_continuation_cache_limit)) { + return dc; + } + dc->do_next = prev_dc; + dc->dc_cache_cnt = cnt; + _dispatch_thread_setspecific(dispatch_cache_key, dc); + return NULL; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_continuation_free(dispatch_continuation_t dc) +{ + dc = _dispatch_continuation_free_cacheonly(dc); + if (slowpath(dc)) { + _dispatch_continuation_free_to_cache_limit(dc); + } +} + +#include "trace.h" + +DISPATCH_ALWAYS_INLINE_NDEBUG +static inline void +_dispatch_continuation_pop(dispatch_object_t dou) +{ + dispatch_continuation_t dc = dou._dc, dc1; + dispatch_group_t dg; + + _dispatch_trace_continuation_pop(_dispatch_queue_get_current(), dou); + if (DISPATCH_OBJ_IS_VTABLE(dou._do)) { + return dx_invoke(dou._do); + } + + // Add the item back to the cache before calling the function. This + // allows the 'hot' continuation to be used for a quick callback. + // + // The ccache version is per-thread. + // Therefore, the object has not been reused yet. + // This generates better assembly. + if ((long)dc->do_vtable & DISPATCH_OBJ_ASYNC_BIT) { + _dispatch_continuation_voucher_adopt(dc); + dc1 = _dispatch_continuation_free_cacheonly(dc); + } else { + dc1 = NULL; + } + if ((long)dc->do_vtable & DISPATCH_OBJ_GROUP_BIT) { + dg = dc->dc_data; + } else { + dg = NULL; + } + _dispatch_client_callout(dc->dc_ctxt, dc->dc_func); + if (dg) { + dispatch_group_leave(dg); + _dispatch_release(dg); + } + _dispatch_introspection_queue_item_complete(dou); + if (slowpath(dc1)) { + _dispatch_continuation_free_to_cache_limit(dc1); + } +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_continuation_priority_set(dispatch_continuation_t dc, + pthread_priority_t pp, dispatch_block_flags_t flags) +{ +#if HAVE_PTHREAD_WORKQUEUE_QOS + pthread_priority_t prio = 0; + if (flags & DISPATCH_BLOCK_HAS_PRIORITY) { + prio = pp; + } else if (!(flags & DISPATCH_BLOCK_NO_QOS_CLASS)) { + prio = _dispatch_priority_propagate(); + } + if (flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS) { + prio |= _PTHREAD_PRIORITY_ENFORCE_FLAG; + } + dc->dc_priority = prio; +#else + (void)dc; (void)pp; (void)flags; +#endif +} + +DISPATCH_ALWAYS_INLINE +static inline pthread_priority_t +_dispatch_continuation_get_override_priority(dispatch_queue_t dq, + dispatch_continuation_t dc) +{ +#if HAVE_PTHREAD_WORKQUEUE_QOS + pthread_priority_t p = dc->dc_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK; + bool enforce = dc->dc_priority & _PTHREAD_PRIORITY_ENFORCE_FLAG; + pthread_priority_t dqp = dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK; + bool defaultqueue = dq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG; + if (!p) { + enforce = false; + } else if (!enforce && (!dqp || defaultqueue)) { + enforce = true; + } + if (!enforce) { + p = dqp; + } + return p; +#else + (void)dq; (void)dc; + return 0; +#endif +} + +#endif // !(USE_OBJC && __OBJC2__) + +#endif /* __DISPATCH_INLINE_INTERNAL__ */ diff --git a/src/internal.h b/src/internal.h index ed1a9c7e6..33fcedb41 100644 --- a/src/internal.h +++ b/src/internal.h @@ -41,6 +41,19 @@ #if !defined(DISPATCH_MACH_SPI) && TARGET_OS_MAC #define DISPATCH_MACH_SPI 1 #endif +#if !defined(OS_VOUCHER_CREATION_SPI) && TARGET_OS_MAC +#define OS_VOUCHER_CREATION_SPI 1 +#endif +#if !defined(OS_VOUCHER_ACTIVITY_SPI) && TARGET_OS_MAC +#define OS_VOUCHER_ACTIVITY_SPI 1 +#endif +#if !defined(OS_VOUCHER_ACTIVITY_BUFFER_SPI) && TARGET_OS_MAC && \ + __has_include() +#define OS_VOUCHER_ACTIVITY_BUFFER_SPI 1 +#endif +#if !defined(DISPATCH_LAYOUT_SPI) && TARGET_OS_MAC +#define DISPATCH_LAYOUT_SPI 1 +#endif #if !defined(USE_OBJC) && HAVE_OBJC #define USE_OBJC 1 @@ -69,9 +82,10 @@ #include -#include #include +#include #include +#include #include #include #include @@ -120,6 +134,9 @@ struct type name = { 0 } #if !TARGET_OS_WIN32 #include "io_private.h" #endif +#include "voucher_private.h" +#include "voucher_activity_private.h" +#include "layout_private.h" #include "benchmark.h" #include "private.h" @@ -152,7 +169,7 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); #define DISPATCH_USE_DTRACE 1 #endif -#if ((!TARGET_OS_EMBEDDED && DISPATCH_INTROSPECTION) || DISPATCH_DEBUG || \ +#if DISPATCH_USE_DTRACE && (DISPATCH_INTROSPECTION || DISPATCH_DEBUG || \ DISPATCH_PROFILE) && !defined(DISPATCH_USE_DTRACE_INTROSPECTION) #define DISPATCH_USE_DTRACE_INTROSPECTION 1 #endif @@ -176,6 +193,7 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); #include #include #include +#include #include #include #include @@ -222,9 +240,6 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); #include #include #include -#if !TARGET_OS_WIN32 -#include -#endif #if HAVE_UNISTD_H #include #endif @@ -303,19 +318,30 @@ void _dispatch_bug_kevent_client(const char* msg, const char* filter, DISPATCH_NOINLINE DISPATCH_NORETURN void _dispatch_abort(size_t line, long val); -#if !defined(DISPATCH_USE_OS_TRACE) && DISPATCH_DEBUG -#if __has_include() -#define DISPATCH_USE_OS_TRACE 1 -#include +#if !defined(DISPATCH_USE_OS_DEBUG_LOG) && DISPATCH_DEBUG +#if __has_include() +#define DISPATCH_USE_OS_DEBUG_LOG 1 +#include +#endif +#endif // DISPATCH_USE_OS_DEBUG_LOG + +#if !defined(DISPATCH_USE_SIMPLE_ASL) && !DISPATCH_USE_OS_DEBUG_LOG +#if __has_include(<_simple.h>) +#define DISPATCH_USE_SIMPLE_ASL 1 +#include <_simple.h> #endif -#endif // DISPATCH_USE_OS_TRACE +#endif // DISPATCH_USE_SIMPLE_ASL -#if DISPATCH_USE_OS_TRACE -#define _dispatch_log(msg, ...) os_trace("libdispatch", msg, ## __VA_ARGS__) +#if !DISPATCH_USE_SIMPLE_ASL && !DISPATCH_USE_OS_DEBUG_LOG && !TARGET_OS_WIN32 +#include +#endif + +#if DISPATCH_USE_OS_DEBUG_LOG +#define _dispatch_log(msg, ...) os_debug_log("libdispatch", msg, ## __VA_ARGS__) #else DISPATCH_NOINLINE __attribute__((__format__(__printf__,1,2))) void _dispatch_log(const char *msg, ...); -#endif // DISPATCH_USE_OS_TRACE +#endif // DISPATCH_USE_OS_DEBUG_LOG #define dsnprintf(...) \ ({ int _r = snprintf(__VA_ARGS__); _r < 0 ? 0u : (size_t)_r; }) @@ -464,72 +490,15 @@ _dispatch_object_debug(dispatch_object_t object, const char *message, ...); #define _dispatch_object_debug(object, message, ...) #endif // DISPATCH_DEBUG -#if DISPATCH_USE_CLIENT_CALLOUT - -DISPATCH_NOTHROW void -_dispatch_client_callout(void *ctxt, dispatch_function_t f); -DISPATCH_NOTHROW void -_dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)); -DISPATCH_NOTHROW bool -_dispatch_client_callout3(void *ctxt, dispatch_data_t region, size_t offset, - const void *buffer, size_t size, dispatch_data_applier_function_t f); -DISPATCH_NOTHROW void -_dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, - dispatch_mach_msg_t dmsg, mach_error_t error, - dispatch_mach_handler_function_t f); - -#else // !DISPATCH_USE_CLIENT_CALLOUT - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_client_callout(void *ctxt, dispatch_function_t f) -{ - return f(ctxt); -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) -{ - return f(ctxt, i); -} - -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_client_callout3(void *ctxt, dispatch_data_t region, size_t offset, - const void *buffer, size_t size, dispatch_data_applier_function_t f) -{ - return f(ctxt, region, offset, buffer, size); -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, - dispatch_mach_msg_t dmsg, mach_error_t error, - dispatch_mach_handler_function_t f); -{ - return f(ctxt, reason, dmsg, error); -} - -#endif // !DISPATCH_USE_CLIENT_CALLOUT - #ifdef __BLOCKS__ #define _dispatch_Block_invoke(bb) \ ((dispatch_function_t)((struct Block_layout *)bb)->invoke) -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_client_callout_block(dispatch_block_t b) -{ - return _dispatch_client_callout(b, _dispatch_Block_invoke(b)); -} - #if __GNUC__ dispatch_block_t _dispatch_Block_copy(dispatch_block_t block); #define _dispatch_Block_copy(x) ((typeof(x))_dispatch_Block_copy(x)) #else dispatch_block_t _dispatch_Block_copy(const void *block); #endif - void _dispatch_call_block_and_release(void *block); #endif /* __BLOCKS__ */ @@ -542,14 +511,8 @@ uint64_t _dispatch_timeout(dispatch_time_t when); extern bool _dispatch_safe_fork, _dispatch_child_of_unsafe_fork; -extern struct _dispatch_hw_config_s { - uint32_t cc_max_active; - uint32_t cc_max_logical; - uint32_t cc_max_physical; -} _dispatch_hw_config; - -#if !defined(DISPATCH_USE_OS_SEMAPHORE_CACHE) && !(TARGET_IPHONE_SIMULATOR && \ - IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090) +#if !defined(DISPATCH_USE_OS_SEMAPHORE_CACHE) && !(TARGET_IPHONE_SIMULATOR) +// rdar://problem/15492045 #if __has_include() #define DISPATCH_USE_OS_SEMAPHORE_CACHE 1 #include @@ -562,17 +525,9 @@ extern struct _dispatch_hw_config_s { // Older Mac OS X and iOS Simulator fallbacks #if HAVE_PTHREAD_WORKQUEUES -#ifndef WORKQ_BG_PRIOQUEUE -#define WORKQ_BG_PRIOQUEUE 3 -#endif #ifndef WORKQ_ADDTHREADS_OPTION_OVERCOMMIT #define WORKQ_ADDTHREADS_OPTION_OVERCOMMIT 0x00000001 #endif -#if TARGET_IPHONE_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1070 -#ifndef DISPATCH_NO_BG_PRIORITY -#define DISPATCH_NO_BG_PRIORITY 1 -#endif -#endif #if TARGET_IPHONE_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1080 #ifndef DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK #define DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK 1 @@ -582,7 +537,21 @@ extern struct _dispatch_hw_config_s { #undef HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP #define HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP 0 #endif +#if TARGET_IPHONE_SIMULATOR && \ + IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 101000 +#ifndef DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK +#define DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK 1 +#endif +#endif +#if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED < 101000 +#undef HAVE__PTHREAD_WORKQUEUE_INIT +#define HAVE__PTHREAD_WORKQUEUE_INIT 0 +#endif #endif // HAVE_PTHREAD_WORKQUEUES +#if HAVE__PTHREAD_WORKQUEUE_INIT && PTHREAD_WORKQUEUE_SPI_VERSION >= 20140213 \ + && !defined(HAVE_PTHREAD_WORKQUEUE_QOS) +#define HAVE_PTHREAD_WORKQUEUE_QOS 1 +#endif #if HAVE_MACH #if !defined(MACH_NOTIFY_SEND_POSSIBLE) || (TARGET_IPHONE_SIMULATOR && \ @@ -592,33 +561,29 @@ extern struct _dispatch_hw_config_s { #endif #endif // HAVE_MACH -#ifdef EVFILT_VM -#ifndef DISPATCH_USE_VM_PRESSURE -#define DISPATCH_USE_VM_PRESSURE 1 -#endif -#endif // EVFILT_VM - #ifdef EVFILT_MEMORYSTATUS #ifndef DISPATCH_USE_MEMORYSTATUS #define DISPATCH_USE_MEMORYSTATUS 1 #endif #endif // EVFILT_MEMORYSTATUS -#if TARGET_IPHONE_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1070 +#if defined(EVFILT_VM) && !DISPATCH_USE_MEMORYSTATUS +#ifndef DISPATCH_USE_VM_PRESSURE +#define DISPATCH_USE_VM_PRESSURE 1 +#endif +#endif // EVFILT_VM + +#if TARGET_IPHONE_SIMULATOR +#undef DISPATCH_USE_MEMORYSTATUS_SOURCE +#define DISPATCH_USE_MEMORYSTATUS_SOURCE 0 #undef DISPATCH_USE_VM_PRESSURE_SOURCE #define DISPATCH_USE_VM_PRESSURE_SOURCE 0 #endif // TARGET_IPHONE_SIMULATOR -#if TARGET_OS_EMBEDDED -#if !defined(DISPATCH_USE_VM_PRESSURE_SOURCE) && DISPATCH_USE_VM_PRESSURE -#define DISPATCH_USE_VM_PRESSURE_SOURCE 1 -#endif -#else // !TARGET_OS_EMBEDDED #if !defined(DISPATCH_USE_MEMORYSTATUS_SOURCE) && DISPATCH_USE_MEMORYSTATUS #define DISPATCH_USE_MEMORYSTATUS_SOURCE 1 #elif !defined(DISPATCH_USE_VM_PRESSURE_SOURCE) && DISPATCH_USE_VM_PRESSURE #define DISPATCH_USE_VM_PRESSURE_SOURCE 1 #endif -#endif // TARGET_OS_EMBEDDED #if !defined(NOTE_LEEWAY) || (TARGET_IPHONE_SIMULATOR && \ IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090) @@ -647,6 +612,12 @@ extern struct _dispatch_hw_config_s { #endif #endif // F_SETNOSIGPIPE +#if defined(MACH_SEND_NOIMPORTANCE) +#ifndef DISPATCH_USE_CHECKIN_NOIMPORTANCE +#define DISPATCH_USE_CHECKIN_NOIMPORTANCE 1 // rdar://problem/16996737 +#endif +#endif // MACH_SEND_NOIMPORTANCE + #if HAVE_LIBPROC_INTERNAL_H #include @@ -680,9 +651,26 @@ extern struct _dispatch_hw_config_s { #endif // HAVE_SYS_GUARDED_H -#define _dispatch_hardware_crash() __builtin_trap() +#ifndef MACH_MSGH_BITS_VOUCHER_MASK +#define MACH_MSGH_BITS_VOUCHER_MASK 0x001f0000 +#define MACH_MSGH_BITS_SET_PORTS(remote, local, voucher) \ + (((remote) & MACH_MSGH_BITS_REMOTE_MASK) | \ + (((local) << 8) & MACH_MSGH_BITS_LOCAL_MASK) | \ + (((voucher) << 16) & MACH_MSGH_BITS_VOUCHER_MASK)) +#define MACH_MSGH_BITS_VOUCHER(bits) \ + (((bits) & MACH_MSGH_BITS_VOUCHER_MASK) >> 16) +#define MACH_MSGH_BITS_HAS_VOUCHER(bits) \ + (MACH_MSGH_BITS_VOUCHER(bits) != MACH_MSGH_BITS_ZERO) +#define msgh_voucher_port msgh_reserved +#define mach_voucher_t mach_port_t +#define MACH_VOUCHER_NULL MACH_PORT_NULL +#define MACH_SEND_INVALID_VOUCHER 0x10000005 +#endif + +#define _dispatch_hardware_crash() \ + __asm__(""); __builtin_trap() // -#define _dispatch_set_crash_log_message(x) +#define _dispatch_set_crash_log_message(msg) #if HAVE_MACH // MIG_REPLY_MISMATCH means either: @@ -712,16 +700,30 @@ extern struct _dispatch_hw_config_s { _dispatch_hardware_crash(); \ } while (0) +extern int _dispatch_set_qos_class_enabled; +#define DISPATCH_NO_VOUCHER ((voucher_t)(void*)~0ul) +#define DISPATCH_NO_PRIORITY ((pthread_priority_t)~0ul) +#define DISPATCH_PRIORITY_ENFORCE 0x1 +static inline void _dispatch_adopt_priority_and_replace_voucher( + pthread_priority_t priority, voucher_t voucher, unsigned long flags); +#if HAVE_MACH +static inline void _dispatch_set_priority_and_mach_voucher( + pthread_priority_t priority, mach_voucher_t kv); +mach_port_t _dispatch_get_mach_host_port(void); +#endif + + /* #includes dependent on internal.h */ #include "object_internal.h" #include "semaphore_internal.h" #include "introspection_internal.h" #include "queue_internal.h" #include "source_internal.h" +#include "voucher_internal.h" #include "data_internal.h" #if !TARGET_OS_WIN32 #include "io_internal.h" #endif -#include "trace.h" +#include "inline_internal.h" #endif /* __DISPATCH_INTERNAL__ */ diff --git a/src/introspection.c b/src/introspection.c index 5338f259a..e907f857b 100644 --- a/src/introspection.c +++ b/src/introspection.c @@ -24,7 +24,7 @@ #if DISPATCH_INTROSPECTION #include "internal.h" -#include "introspection.h" +#include "dispatch/introspection.h" #include "introspection_private.h" typedef struct dispatch_introspection_thread_s { @@ -85,7 +85,7 @@ _dispatch_introspection_init(void) const struct dispatch_introspection_versions_s dispatch_introspection_versions = { .introspection_version = 1, - .hooks_version = 1, + .hooks_version = 2, .hooks_size = sizeof(dispatch_introspection_hooks_s), .queue_item_version = 1, .queue_item_size = sizeof(dispatch_introspection_queue_item_s), @@ -149,7 +149,7 @@ _dispatch_introspection_continuation_get_info(dispatch_queue_t dq, bool apply = false; long flags = (long)dc->do_vtable; if (flags & DISPATCH_OBJ_SYNC_SLOW_BIT) { - waiter = dc->dc_data; + waiter = pthread_from_mach_thread_np((mach_port_t)dc->dc_data); if (flags & DISPATCH_OBJ_BARRIER_BIT) { dc = dc->dc_ctxt; dq = dc->dc_data; @@ -224,8 +224,8 @@ dispatch_introspection_queue_get_info(dispatch_queue_t dq) { bool global = (dq->do_xref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || (dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT); - uint32_t width = dq->dq_width; - if (width > 1 && width != UINT32_MAX) width /= 2; + uint16_t width = dq->dq_width; + if (width > 1 && width != DISPATCH_QUEUE_WIDTH_MAX) width /= 2; dispatch_introspection_queue_s diq = { .queue = dq, .target_queue = dq->do_targetq, @@ -248,17 +248,23 @@ dispatch_introspection_source_s _dispatch_introspection_source_get_info(dispatch_source_t ds) { dispatch_source_refs_t dr = ds->ds_refs; - void *ctxt = dr->ds_handler_ctxt; - dispatch_function_t handler = dr->ds_handler_func; - bool handler_is_block = ds->ds_handler_is_block; + dispatch_continuation_t dc = dr->ds_handler[DS_EVENT_HANDLER]; + void *ctxt = NULL; + dispatch_function_t handler = NULL; + bool hdlr_is_block = false; + if (dc) { + ctxt = dc->dc_ctxt; + handler = dc->dc_func; + hdlr_is_block = ((long)dc->do_vtable & DISPATCH_OBJ_BLOCK_RELEASE_BIT); + } bool after = (handler == _dispatch_after_timer_callback); - if (after) { - dispatch_continuation_t dc = ctxt; + if (after && !(ds->ds_atomic_flags & DSF_CANCELED)) { + dc = ctxt; ctxt = dc->dc_ctxt; handler = dc->dc_func; - if (handler == _dispatch_call_block_and_release) { + hdlr_is_block = (handler == _dispatch_call_block_and_release); + if (hdlr_is_block) { handler = _dispatch_Block_invoke(ctxt); - handler_is_block = 1; } } dispatch_introspection_source_s dis = { @@ -270,7 +276,7 @@ _dispatch_introspection_source_get_info(dispatch_source_t ds) .handler = handler, .suspend_count = ds->do_suspend_cnt / 2, .enqueued = (ds->do_suspend_cnt & 1), - .handler_is_block = handler_is_block, + .handler_is_block = hdlr_is_block, .timer = ds->ds_is_timer, .after = after, }; @@ -390,6 +396,7 @@ dispatch_introspection_hooks_s _dispatch_introspection_hook_callouts_enabled = { .queue_dispose = DISPATCH_INTROSPECTION_NO_HOOK, .queue_item_enqueue = DISPATCH_INTROSPECTION_NO_HOOK, .queue_item_dequeue = DISPATCH_INTROSPECTION_NO_HOOK, + .queue_item_complete = DISPATCH_INTROSPECTION_NO_HOOK, }; #define DISPATCH_INTROSPECTION_HOOKS_COUNT (( \ @@ -419,6 +426,7 @@ DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_create); DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_destroy); DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_item_enqueue); DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_item_dequeue); +DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_item_complete); DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_callout_begin); DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(queue_callout_end); @@ -564,7 +572,7 @@ _dispatch_introspection_queue_item_dequeue_hook(dispatch_queue_t dq, { dispatch_introspection_queue_item_s diqi; diqi = dispatch_introspection_queue_item_get_info(dq, dou._dc); - dispatch_introspection_hook_callout_queue_item_enqueue(dq, &diqi); + dispatch_introspection_hook_callout_queue_item_dequeue(dq, &diqi); } void @@ -578,6 +586,30 @@ _dispatch_introspection_queue_item_dequeue(dispatch_queue_t dq, } } +DISPATCH_NOINLINE +void +dispatch_introspection_hook_callout_queue_item_complete( + dispatch_continuation_t object) +{ + DISPATCH_INTROSPECTION_HOOK_CALLOUT(queue_item_complete, object); +} + +DISPATCH_NOINLINE +static void +_dispatch_introspection_queue_item_complete_hook(dispatch_object_t dou) +{ + dispatch_introspection_hook_callout_queue_item_complete(dou._dc); +} + +void +_dispatch_introspection_queue_item_complete(dispatch_object_t dou) +{ + DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(queue_item_complete, dou); + if (DISPATCH_INTROSPECTION_HOOK_ENABLED(queue_item_complete)) { + _dispatch_introspection_queue_item_complete_hook(dou); + } +} + void _dispatch_introspection_callout_entry(void *ctxt, dispatch_function_t f) { dispatch_queue_t dq = _dispatch_queue_get_current(); diff --git a/src/introspection_internal.h b/src/introspection_internal.h index 89a9360c0..7b015aa0e 100644 --- a/src/introspection_internal.h +++ b/src/introspection_internal.h @@ -42,6 +42,7 @@ void _dispatch_introspection_queue_item_enqueue(dispatch_queue_t dq, dispatch_object_t dou); void _dispatch_introspection_queue_item_dequeue(dispatch_queue_t dq, dispatch_object_t dou); +void _dispatch_introspection_queue_item_complete(dispatch_object_t dou); void _dispatch_introspection_callout_entry(void *ctxt, dispatch_function_t f); void _dispatch_introspection_callout_return(void *ctxt, dispatch_function_t f); @@ -104,6 +105,11 @@ static inline void _dispatch_introspection_queue_pop(dispatch_queue_t dq DISPATCH_UNUSED, dispatch_object_t dou DISPATCH_UNUSED) {} +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_introspection_queue_item_complete( + dispatch_object_t dou DISPATCH_UNUSED) {} + DISPATCH_ALWAYS_INLINE static inline void _dispatch_introspection_callout_entry(void *ctxt DISPATCH_UNUSED, diff --git a/src/io.c b/src/io.c index 48683a666..d66873ba7 100644 --- a/src/io.c +++ b/src/io.c @@ -212,7 +212,8 @@ _dispatch_io_create(dispatch_io_type_t type) dispatch_io_t channel = _dispatch_alloc(DISPATCH_VTABLE(io), sizeof(struct dispatch_io_s)); channel->do_next = DISPATCH_OBJECT_LISTLESS; - channel->do_targetq = _dispatch_get_root_queue(0, true); + channel->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, + true); channel->params.type = type; channel->params.high = SIZE_MAX; channel->params.low = dispatch_io_defaults.low_water_chunks * @@ -854,8 +855,8 @@ dispatch_read(dispatch_fd_t fd, size_t length, dispatch_queue_t queue, dispatch_operation_t op = _dispatch_operation_create(DOP_DIR_READ, channel, 0, length, dispatch_data_empty, - _dispatch_get_root_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, - false), ^(bool done, dispatch_data_t data, int error) { + _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT,false), + ^(bool done, dispatch_data_t data, int error) { if (data) { data = dispatch_data_create_concat(deliver_data, data); _dispatch_io_data_release(deliver_data); @@ -925,8 +926,8 @@ dispatch_write(dispatch_fd_t fd, dispatch_data_t data, dispatch_queue_t queue, dispatch_operation_t op = _dispatch_operation_create(DOP_DIR_WRITE, channel, 0, dispatch_data_get_size(data), data, - _dispatch_get_root_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, - false), ^(bool done, dispatch_data_t d, int error) { + _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT,false), + ^(bool done, dispatch_data_t d, int error) { if (done) { if (d) { _dispatch_io_data_retain(d); @@ -1352,7 +1353,7 @@ _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash) ); } _dispatch_stream_init(fd_entry, _dispatch_get_root_queue( - DISPATCH_QUEUE_PRIORITY_DEFAULT, false)); + _DISPATCH_QOS_CLASS_DEFAULT, false)); } fd_entry->orig_flags = orig_flags; fd_entry->orig_nosigpipe = orig_nosigpipe; @@ -1420,7 +1421,7 @@ _dispatch_fd_entry_create_with_path(dispatch_io_path_data_t path_data, _dispatch_disk_init(fd_entry, major(dev)); } else { _dispatch_stream_init(fd_entry, _dispatch_get_root_queue( - DISPATCH_QUEUE_PRIORITY_DEFAULT, false)); + _DISPATCH_QOS_CLASS_DEFAULT, false)); } fd_entry->fd = -1; fd_entry->orig_flags = -1; @@ -1599,7 +1600,7 @@ _dispatch_disk_init(dispatch_fd_entry_t fd_entry, dev_t dev) disk->do_next = DISPATCH_OBJECT_LISTLESS; disk->do_xref_cnt = -1; disk->advise_list_depth = pending_reqs_depth; - disk->do_targetq = _dispatch_get_root_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, + disk->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, false); disk->dev = dev; TAILQ_INIT(&disk->operations); diff --git a/src/object.c b/src/object.c index a30503989..5b09de716 100644 --- a/src/object.c +++ b/src/object.c @@ -37,38 +37,14 @@ DISPATCH_NOINLINE _os_object_t _os_object_retain_internal(_os_object_t obj) { - int ref_cnt = obj->os_obj_ref_cnt; - if (slowpath(ref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) { - return obj; // global object - } - ref_cnt = dispatch_atomic_inc2o(obj, os_obj_ref_cnt, relaxed); - if (slowpath(ref_cnt <= 0)) { - DISPATCH_CRASH("Resurrection of an object"); - } - return obj; + return _os_object_retain_internal_inline(obj); } DISPATCH_NOINLINE void _os_object_release_internal(_os_object_t obj) { - int ref_cnt = obj->os_obj_ref_cnt; - if (slowpath(ref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) { - return; // global object - } - ref_cnt = dispatch_atomic_dec2o(obj, os_obj_ref_cnt, relaxed); - if (fastpath(ref_cnt >= 0)) { - return; - } - if (slowpath(ref_cnt < -1)) { - DISPATCH_CRASH("Over-release of an object"); - } -#if DISPATCH_DEBUG - if (slowpath(obj->os_obj_xref_cnt >= 0)) { - DISPATCH_CRASH("Release while external references exist"); - } -#endif - return _os_object_dispose(obj); + return _os_object_release_internal_inline(obj); } DISPATCH_NOINLINE @@ -156,12 +132,6 @@ dispatch_retain(dispatch_object_t dou) (void)_os_object_retain(dou._os_obj); } -void -_dispatch_retain(dispatch_object_t dou) -{ - (void)_os_object_retain_internal(dou._os_obj); -} - void dispatch_release(dispatch_object_t dou) { @@ -169,12 +139,6 @@ dispatch_release(dispatch_object_t dou) _os_object_release(dou._os_obj); } -void -_dispatch_release(dispatch_object_t dou) -{ - _os_object_release_internal(dou._os_obj); -} - static void _dispatch_dealloc(dispatch_object_t dou) { @@ -261,7 +225,7 @@ dispatch_suspend(dispatch_object_t dou) // rdar://8181908 explains why we need to do an internal retain at every // suspension. (void)dispatch_atomic_add2o(dou._do, do_suspend_cnt, - DISPATCH_OBJECT_SUSPEND_INTERVAL, relaxed); + DISPATCH_OBJECT_SUSPEND_INTERVAL, acquire); _dispatch_retain(dou._do); } @@ -290,7 +254,7 @@ dispatch_resume(dispatch_object_t dou) // If the previous value was less than the suspend interval, the object // has been over-resumed. unsigned int suspend_cnt = dispatch_atomic_sub_orig2o(dou._do, - do_suspend_cnt, DISPATCH_OBJECT_SUSPEND_INTERVAL, relaxed); + do_suspend_cnt, DISPATCH_OBJECT_SUSPEND_INTERVAL, release); if (fastpath(suspend_cnt > DISPATCH_OBJECT_SUSPEND_INTERVAL)) { // Balancing the retain() done in suspend() for rdar://8181908 return _dispatch_release(dou._do); diff --git a/src/object.m b/src/object.m index e64a4fda7..953cb0bd6 100644 --- a/src/object.m +++ b/src/object.m @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011-2013 Apple Inc. All rights reserved. + * Copyright (c) 2011-2014 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -48,6 +48,7 @@ _os_object_have_gc = objc_collectingEnabled(); if (slowpath(_os_object_have_gc)) { _os_object_gc_zone = objc_collectableZone(); + (void)[OS_OBJECT_CLASS(object) class]; // OS_object class realization } } @@ -69,22 +70,55 @@ return obj; } -#define _os_objc_gc_retain(obj) \ - if (slowpath(_os_object_have_gc)) { \ - return auto_zone_retain(_os_object_gc_zone, obj); \ +DISPATCH_NOINLINE +static id +_os_objc_gc_retain(id obj) +{ + if (fastpath(obj)) { + auto_zone_retain(_os_object_gc_zone, obj); + } + return obj; +} + +DISPATCH_NOINLINE +static void +_os_objc_gc_release(id obj) +{ + if (fastpath(obj)) { + (void)auto_zone_release(_os_object_gc_zone, obj); + } + asm(""); // prevent tailcall +} + +DISPATCH_NOINLINE +static id +_os_object_gc_retain(id obj) +{ + if ([obj isKindOfClass:OS_OBJECT_OBJC_CLASS(object)]) { + return _os_object_retain(obj); + } else { + return _os_objc_gc_retain(obj); } +} -#define _os_objc_gc_release(obj) \ - if (slowpath(_os_object_have_gc)) { \ - return (void)auto_zone_release(_os_object_gc_zone, obj); \ +DISPATCH_NOINLINE +static void +_os_object_gc_release(id obj) +{ + if ([obj isKindOfClass:OS_OBJECT_OBJC_CLASS(object)]) { + return _os_object_release(obj); + } else { + return _os_objc_gc_release(obj); } +} #else // __OBJC_GC__ #define _os_object_gc_init() #define _os_object_make_uncollectable(obj) (obj) #define _os_object_make_collectable(obj) (obj) -#define _os_objc_gc_retain(obj) -#define _os_objc_gc_release(obj) +#define _os_object_have_gc 0 +#define _os_object_gc_retain(obj) (obj) +#define _os_object_gc_release(obj) #endif // __OBJC_GC__ #pragma mark - @@ -101,11 +135,26 @@ return obj; } +static void* +_os_objc_destructInstance(id obj) +{ + // noop if only Libystem is loaded + return obj; +} + void _os_object_init(void) { _objc_init(); _os_object_gc_init(); + if (slowpath(_os_object_have_gc)) return; + Block_callbacks_RR callbacks = { + sizeof(Block_callbacks_RR), + (void (*)(const void *))&objc_retain, + (void (*)(const void *))&objc_release, + (void (*)(const void *))&_os_objc_destructInstance + }; + _Block_use_RR2(&callbacks); } _os_object_t @@ -141,6 +190,22 @@ [obj _dispose]; } +#undef os_retain +void* +os_retain(void *obj) +{ + if (slowpath(_os_object_have_gc)) return _os_object_gc_retain(obj); + return objc_retain(obj); +} + +#undef os_release +void +os_release(void *obj) +{ + if (slowpath(_os_object_have_gc)) return _os_object_gc_release(obj); + return objc_release(obj); +} + #pragma mark - #pragma mark _os_object @@ -190,15 +255,13 @@ - (void)_dispose { void _dispatch_objc_retain(dispatch_object_t dou) { - _os_objc_gc_retain(dou); - return (void)[dou retain]; + return (void)os_retain(dou); } void _dispatch_objc_release(dispatch_object_t dou) { - _os_objc_gc_release(dou); - return [dou release]; + return os_release(dou); } void @@ -341,6 +404,58 @@ - (void)_xref_dispose { DISPATCH_CLASS_IMPL(operation) DISPATCH_CLASS_IMPL(disk) +@implementation OS_OBJECT_CLASS(voucher) +DISPATCH_OBJC_LOAD() + +- (id)init { + self = [super init]; + [self release]; + self = nil; + return self; +} + +- (void)_xref_dispose { + return _voucher_xref_dispose(self); // calls _os_object_release_internal() +} + +- (void)_dispose { + return _voucher_dispose(self); // calls _os_object_dealloc() +} + +- (NSString *)debugDescription { + Class nsstring = objc_lookUpClass("NSString"); + if (!nsstring) return nil; + char buf[2048]; + _voucher_debug(self, buf, sizeof(buf)); + return [nsstring stringWithFormat: + [nsstring stringWithUTF8String:"<%s: %s>"], + class_getName([self class]), buf]; +} + +@end + +#if VOUCHER_ENABLE_RECIPE_OBJECTS +@implementation OS_OBJECT_CLASS(voucher_recipe) +DISPATCH_OBJC_LOAD() + +- (id)init { + self = [super init]; + [self release]; + self = nil; + return self; +} + +- (void)_dispose { + +} + +- (NSString *)debugDescription { + return nil; // TODO: voucher_recipe debugDescription +} + +@end +#endif + #pragma mark - #pragma mark dispatch_autorelease_pool @@ -362,7 +477,7 @@ - (void)_xref_dispose { #pragma mark dispatch_client_callout // Abort on uncaught exceptions thrown from client callouts rdar://8577499 -#if DISPATCH_USE_CLIENT_CALLOUT && !__arm__ +#if DISPATCH_USE_CLIENT_CALLOUT && !__USING_SJLJ_EXCEPTIONS__ // On platforms with zero-cost exceptions, use a compiler-generated catch-all // exception handler. @@ -421,4 +536,34 @@ - (void)_xref_dispose { #endif // DISPATCH_USE_CLIENT_CALLOUT +#pragma mark - +#pragma mark _dispatch_block_create + +// The compiler hides the name of the function it generates, and changes it if +// we try to reference it directly, but the linker still sees it. +extern void DISPATCH_BLOCK_SPECIAL_INVOKE(void *) + asm("____dispatch_block_create_block_invoke"); +void (*_dispatch_block_special_invoke)(void*) = DISPATCH_BLOCK_SPECIAL_INVOKE; + +dispatch_block_t +_dispatch_block_create(dispatch_block_flags_t flags, voucher_t voucher, + pthread_priority_t pri, dispatch_block_t block) +{ + dispatch_block_t copy_block = _dispatch_Block_copy(block); // 17094902 + struct dispatch_block_private_data_s dbpds = + DISPATCH_BLOCK_PRIVATE_DATA_INITIALIZER(flags, voucher, pri, copy_block); + dispatch_block_t new_block = _dispatch_Block_copy(^{ + // Capture object references, which retains copy_block and voucher. + // All retained objects must be captured by the *block*. We + // cannot borrow any references, because the block might be + // called zero or several times, so Block_release() is the + // only place that can release retained objects. + (void)copy_block; + (void)voucher; + _dispatch_block_invoke(&dbpds); + }); + Block_release(copy_block); + return new_block; +} + #endif // USE_OBJC diff --git a/src/object_internal.h b/src/object_internal.h index b3696632b..c0d17ae49 100644 --- a/src/object_internal.h +++ b/src/object_internal.h @@ -115,7 +115,7 @@ struct dispatch_queue_s *do_targetq; \ void *do_ctxt; \ void *do_finalizer; \ - unsigned int do_suspend_cnt; + unsigned int volatile do_suspend_cnt; #define DISPATCH_OBJECT_GLOBAL_REFCNT _OS_OBJECT_GLOBAL_REFCNT // "word and bit" must be a power of two to be safely subtracted @@ -175,8 +175,6 @@ struct dispatch_object_s { size_t _dispatch_object_debug_attr(dispatch_object_t dou, char* buf, size_t bufsiz); void *_dispatch_alloc(const void *vtable, size_t size); -void _dispatch_retain(dispatch_object_t dou); -void _dispatch_release(dispatch_object_t dou); void _dispatch_xref_dispose(dispatch_object_t dou); void _dispatch_dispose(dispatch_object_t dou); #if DISPATCH_COCOA_COMPAT diff --git a/src/once.c b/src/once.c index ef57fc383..86b1a032c 100644 --- a/src/once.c +++ b/src/once.c @@ -24,12 +24,13 @@ #undef dispatch_once_f -struct _dispatch_once_waiter_s { +typedef struct _dispatch_once_waiter_s { volatile struct _dispatch_once_waiter_s *volatile dow_next; _dispatch_thread_semaphore_t dow_sema; -}; + mach_port_t dow_thread; +} *_dispatch_once_waiter_t; -#define DISPATCH_ONCE_DONE ((struct _dispatch_once_waiter_s *)~0l) +#define DISPATCH_ONCE_DONE ((_dispatch_once_waiter_t)~0l) #ifdef __BLOCKS__ void @@ -43,13 +44,13 @@ DISPATCH_NOINLINE void dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func) { - struct _dispatch_once_waiter_s * volatile *vval = - (struct _dispatch_once_waiter_s**)val; - struct _dispatch_once_waiter_s dow = { NULL, 0 }; - struct _dispatch_once_waiter_s *tail, *tmp; + _dispatch_once_waiter_t volatile *vval = (_dispatch_once_waiter_t*)val; + struct _dispatch_once_waiter_s dow = { NULL, 0, MACH_PORT_NULL }; + _dispatch_once_waiter_t tail = &dow, next, tmp; _dispatch_thread_semaphore_t sema; - if (dispatch_atomic_cmpxchg(vval, NULL, &dow, acquire)) { + if (dispatch_atomic_cmpxchg(vval, NULL, tail, acquire)) { + dow.dow_thread = _dispatch_thread_port(); _dispatch_client_callout(ctxt, func); // The next barrier must be long and strong. @@ -104,26 +105,31 @@ dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func) dispatch_atomic_maximally_synchronizing_barrier(); // above assumed to contain release barrier - tmp = dispatch_atomic_xchg(vval, DISPATCH_ONCE_DONE, relaxed); - tail = &dow; - while (tail != tmp) { - while (!tmp->dow_next) { - dispatch_hardware_pause(); - } - sema = tmp->dow_sema; - tmp = (struct _dispatch_once_waiter_s*)tmp->dow_next; + next = dispatch_atomic_xchg(vval, DISPATCH_ONCE_DONE, relaxed); + while (next != tail) { + _dispatch_wait_until(tmp = (_dispatch_once_waiter_t)next->dow_next); + sema = next->dow_sema; + next = tmp; _dispatch_thread_semaphore_signal(sema); } } else { dow.dow_sema = _dispatch_get_thread_semaphore(); - tmp = *vval; + next = *vval; for (;;) { - if (tmp == DISPATCH_ONCE_DONE) { + if (next == DISPATCH_ONCE_DONE) { break; } - if (dispatch_atomic_cmpxchgvw(vval, tmp, &dow, &tmp, release)) { - dow.dow_next = tmp; + if (dispatch_atomic_cmpxchgvw(vval, next, tail, &next, release)) { + dow.dow_thread = next->dow_thread; + dow.dow_next = next; + if (dow.dow_thread) { + pthread_priority_t pp = _dispatch_get_priority(); + _dispatch_thread_override_start(dow.dow_thread, pp); + } _dispatch_thread_semaphore_wait(dow.dow_sema); + if (dow.dow_thread) { + _dispatch_thread_override_end(dow.dow_thread); + } break; } } diff --git a/src/queue.c b/src/queue.c index 056876223..b8b4ad94f 100644 --- a/src/queue.c +++ b/src/queue.c @@ -30,10 +30,19 @@ #if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES || DISPATCH_ENABLE_THREAD_POOL #define DISPATCH_USE_PTHREAD_POOL 1 #endif -#if HAVE_PTHREAD_WORKQUEUES && !HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP && \ +#if HAVE_PTHREAD_WORKQUEUES && (!HAVE_PTHREAD_WORKQUEUE_QOS || DISPATCH_DEBUG) \ + && !defined(DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK) +#define DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK 1 +#endif +#if HAVE_PTHREAD_WORKQUEUES && DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK && \ + !HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP && \ !defined(DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK) #define DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK 1 #endif +#if HAVE_PTHREAD_WORKQUEUE_QOS && !DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK +#undef HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP +#define HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP 0 +#endif #if HAVE_PTHREAD_WORKQUEUES && DISPATCH_USE_PTHREAD_POOL && \ !DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK #define pthread_workqueue_t void* @@ -41,15 +50,22 @@ static void _dispatch_cache_cleanup(void *value); static void _dispatch_async_f_redirect(dispatch_queue_t dq, - dispatch_continuation_t dc); + dispatch_continuation_t dc, pthread_priority_t pp); static void _dispatch_queue_cleanup(void *ctxt); static inline void _dispatch_queue_wakeup_global2(dispatch_queue_t dq, unsigned int n); static inline void _dispatch_queue_wakeup_global(dispatch_queue_t dq); static inline _dispatch_thread_semaphore_t _dispatch_queue_drain_one_barrier_sync(dispatch_queue_t dq); +static inline bool _dispatch_queue_prepare_override(dispatch_queue_t dq, + dispatch_queue_t tq, pthread_priority_t p); +static inline void _dispatch_queue_push_override(dispatch_queue_t dq, + dispatch_queue_t tq, pthread_priority_t p); #if HAVE_PTHREAD_WORKQUEUES -static void _dispatch_worker_thread3(void *context); +static void _dispatch_worker_thread4(void *context); +#if HAVE_PTHREAD_WORKQUEUE_QOS +static void _dispatch_worker_thread3(pthread_priority_t priority); +#endif #if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP static void _dispatch_worker_thread2(int priority, int options, void *context); #endif @@ -67,51 +83,95 @@ static void _dispatch_runloop_queue_port_init(void *ctxt); static void _dispatch_runloop_queue_port_dispose(dispatch_queue_t dq); #endif +static void _dispatch_root_queues_init(void *context); +static dispatch_once_t _dispatch_root_queues_pred; + #pragma mark - #pragma mark dispatch_root_queue +struct dispatch_pthread_root_queue_context_s { + pthread_attr_t dpq_thread_attr; + dispatch_block_t dpq_thread_configure; + struct dispatch_semaphore_s dpq_thread_mediator; +}; +typedef struct dispatch_pthread_root_queue_context_s * + dispatch_pthread_root_queue_context_t; + #if DISPATCH_ENABLE_THREAD_POOL -static struct dispatch_semaphore_s _dispatch_thread_mediator[] = { - [DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY] = { - .do_vtable = DISPATCH_VTABLE(semaphore), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - }, - [DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY] = { - .do_vtable = DISPATCH_VTABLE(semaphore), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - }, - [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY] = { - .do_vtable = DISPATCH_VTABLE(semaphore), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - }, - [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY] = { - .do_vtable = DISPATCH_VTABLE(semaphore), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - }, - [DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY] = { - .do_vtable = DISPATCH_VTABLE(semaphore), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - }, - [DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY] = { - .do_vtable = DISPATCH_VTABLE(semaphore), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - }, - [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY] = { - .do_vtable = DISPATCH_VTABLE(semaphore), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - }, - [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY] = { - .do_vtable = DISPATCH_VTABLE(semaphore), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - }, +static struct dispatch_pthread_root_queue_context_s + _dispatch_pthread_root_queue_contexts[] = { + [DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS] = { + .dpq_thread_mediator = { + .do_vtable = DISPATCH_VTABLE(semaphore), + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + }}, + [DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT] = { + .dpq_thread_mediator = { + .do_vtable = DISPATCH_VTABLE(semaphore), + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + }}, + [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS] = { + .dpq_thread_mediator = { + .do_vtable = DISPATCH_VTABLE(semaphore), + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + }}, + [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT] = { + .dpq_thread_mediator = { + .do_vtable = DISPATCH_VTABLE(semaphore), + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + }}, + [DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS] = { + .dpq_thread_mediator = { + .do_vtable = DISPATCH_VTABLE(semaphore), + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + }}, + [DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT] = { + .dpq_thread_mediator = { + .do_vtable = DISPATCH_VTABLE(semaphore), + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + }}, + [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS] = { + .dpq_thread_mediator = { + .do_vtable = DISPATCH_VTABLE(semaphore), + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + }}, + [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT] = { + .dpq_thread_mediator = { + .do_vtable = DISPATCH_VTABLE(semaphore), + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + }}, + [DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS] = { + .dpq_thread_mediator = { + .do_vtable = DISPATCH_VTABLE(semaphore), + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + }}, + [DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT] = { + .dpq_thread_mediator = { + .do_vtable = DISPATCH_VTABLE(semaphore), + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + }}, + [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS] = { + .dpq_thread_mediator = { + .do_vtable = DISPATCH_VTABLE(semaphore), + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + }}, + [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT] = { + .dpq_thread_mediator = { + .do_vtable = DISPATCH_VTABLE(semaphore), + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + }}, }; #endif @@ -122,6 +182,7 @@ struct dispatch_root_queue_context_s { struct { unsigned int volatile dgq_pending; #if HAVE_PTHREAD_WORKQUEUES + qos_class_t dgq_qos; int dgq_wq_priority, dgq_wq_options; #if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK || DISPATCH_USE_PTHREAD_POOL pthread_workqueue_t dgq_kworkqueue; @@ -129,7 +190,6 @@ struct dispatch_root_queue_context_s { #endif // HAVE_PTHREAD_WORKQUEUES #if DISPATCH_USE_PTHREAD_POOL void *dgq_ctxt; - dispatch_semaphore_t dgq_thread_mediator; uint32_t volatile dgq_thread_pool_size; #endif }; @@ -140,84 +200,136 @@ typedef struct dispatch_root_queue_context_s *dispatch_root_queue_context_t; DISPATCH_CACHELINE_ALIGN static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { - [DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY] = {{{ + [DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS] = {{{ +#if HAVE_PTHREAD_WORKQUEUES + .dgq_qos = _DISPATCH_QOS_CLASS_MAINTENANCE, + .dgq_wq_priority = WORKQ_BG_PRIOQUEUE, + .dgq_wq_options = 0, +#endif +#if DISPATCH_ENABLE_THREAD_POOL + .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ + DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS], +#endif + }}}, + [DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT] = {{{ +#if HAVE_PTHREAD_WORKQUEUES + .dgq_qos = _DISPATCH_QOS_CLASS_MAINTENANCE, + .dgq_wq_priority = WORKQ_BG_PRIOQUEUE, + .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, +#endif +#if DISPATCH_ENABLE_THREAD_POOL + .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ + DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT], +#endif + }}}, + [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS] = {{{ +#if HAVE_PTHREAD_WORKQUEUES + .dgq_qos = _DISPATCH_QOS_CLASS_BACKGROUND, + .dgq_wq_priority = WORKQ_BG_PRIOQUEUE, + .dgq_wq_options = 0, +#endif +#if DISPATCH_ENABLE_THREAD_POOL + .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ + DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS], +#endif + }}}, + [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT] = {{{ +#if HAVE_PTHREAD_WORKQUEUES + .dgq_qos = _DISPATCH_QOS_CLASS_BACKGROUND, + .dgq_wq_priority = WORKQ_BG_PRIOQUEUE, + .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, +#endif +#if DISPATCH_ENABLE_THREAD_POOL + .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ + DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT], +#endif + }}}, + [DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS] = {{{ #if HAVE_PTHREAD_WORKQUEUES + .dgq_qos = _DISPATCH_QOS_CLASS_UTILITY, .dgq_wq_priority = WORKQ_LOW_PRIOQUEUE, .dgq_wq_options = 0, #endif #if DISPATCH_ENABLE_THREAD_POOL - .dgq_thread_mediator = &_dispatch_thread_mediator[ - DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY], + .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ + DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS], #endif }}}, - [DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY] = {{{ + [DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT] = {{{ #if HAVE_PTHREAD_WORKQUEUES + .dgq_qos = _DISPATCH_QOS_CLASS_UTILITY, .dgq_wq_priority = WORKQ_LOW_PRIOQUEUE, .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, #endif #if DISPATCH_ENABLE_THREAD_POOL - .dgq_thread_mediator = &_dispatch_thread_mediator[ - DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY], + .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ + DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT], #endif }}}, - [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY] = {{{ + [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS] = {{{ #if HAVE_PTHREAD_WORKQUEUES + .dgq_qos = _DISPATCH_QOS_CLASS_DEFAULT, .dgq_wq_priority = WORKQ_DEFAULT_PRIOQUEUE, .dgq_wq_options = 0, #endif #if DISPATCH_ENABLE_THREAD_POOL - .dgq_thread_mediator = &_dispatch_thread_mediator[ - DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY], + .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ + DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS], #endif }}}, - [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY] = {{{ + [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT] = {{{ #if HAVE_PTHREAD_WORKQUEUES + .dgq_qos = _DISPATCH_QOS_CLASS_DEFAULT, .dgq_wq_priority = WORKQ_DEFAULT_PRIOQUEUE, .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, #endif #if DISPATCH_ENABLE_THREAD_POOL - .dgq_thread_mediator = &_dispatch_thread_mediator[ - DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY], + .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ + DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT], #endif }}}, - [DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY] = {{{ + [DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS] = {{{ #if HAVE_PTHREAD_WORKQUEUES + .dgq_qos = _DISPATCH_QOS_CLASS_USER_INITIATED, .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE, .dgq_wq_options = 0, #endif #if DISPATCH_ENABLE_THREAD_POOL - .dgq_thread_mediator = &_dispatch_thread_mediator[ - DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY], + .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ + DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS], #endif }}}, - [DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY] = {{{ + [DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT] = {{{ #if HAVE_PTHREAD_WORKQUEUES + .dgq_qos = _DISPATCH_QOS_CLASS_USER_INITIATED, .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE, .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, #endif #if DISPATCH_ENABLE_THREAD_POOL - .dgq_thread_mediator = &_dispatch_thread_mediator[ - DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY], + .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ + DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT], #endif }}}, - [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY] = {{{ + [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS] = {{{ #if HAVE_PTHREAD_WORKQUEUES - .dgq_wq_priority = WORKQ_BG_PRIOQUEUE, + .dgq_qos = _DISPATCH_QOS_CLASS_USER_INTERACTIVE, + .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE, .dgq_wq_options = 0, #endif #if DISPATCH_ENABLE_THREAD_POOL - .dgq_thread_mediator = &_dispatch_thread_mediator[ - DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY], + .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ + DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS], #endif }}}, - [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY] = {{{ + [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT] = {{{ #if HAVE_PTHREAD_WORKQUEUES - .dgq_wq_priority = WORKQ_BG_PRIOQUEUE, + .dgq_qos = _DISPATCH_QOS_CLASS_USER_INTERACTIVE, + .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE, .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, #endif #if DISPATCH_ENABLE_THREAD_POOL - .dgq_thread_mediator = &_dispatch_thread_mediator[ - DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY], + .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ + DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT], #endif }}}, }; @@ -227,128 +339,206 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { // dq_running is set to 2 so that barrier operations go through the slow path DISPATCH_CACHELINE_ALIGN struct dispatch_queue_s _dispatch_root_queues[] = { - [DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY] = { + [DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS] = { .do_vtable = DISPATCH_VTABLE(queue_root), .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, .do_ctxt = &_dispatch_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY], - .dq_label = "com.apple.root.low-priority", + DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS], + .dq_label = "com.apple.root.maintenance-qos", .dq_running = 2, - .dq_width = UINT32_MAX, + .dq_width = DISPATCH_QUEUE_WIDTH_MAX, .dq_serialnum = 4, }, - [DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY] = { + [DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT] = { .do_vtable = DISPATCH_VTABLE(queue_root), .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, .do_ctxt = &_dispatch_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY], - .dq_label = "com.apple.root.low-overcommit-priority", + DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT], + .dq_label = "com.apple.root.maintenance-qos.overcommit", .dq_running = 2, - .dq_width = UINT32_MAX, + .dq_width = DISPATCH_QUEUE_WIDTH_MAX, .dq_serialnum = 5, }, - [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY] = { + [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS] = { .do_vtable = DISPATCH_VTABLE(queue_root), .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, .do_ctxt = &_dispatch_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY], - .dq_label = "com.apple.root.default-priority", + DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS], + .dq_label = "com.apple.root.background-qos", .dq_running = 2, - .dq_width = UINT32_MAX, + .dq_width = DISPATCH_QUEUE_WIDTH_MAX, .dq_serialnum = 6, }, - [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY] = { + [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT] = { .do_vtable = DISPATCH_VTABLE(queue_root), .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, .do_ctxt = &_dispatch_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY], - .dq_label = "com.apple.root.default-overcommit-priority", + DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT], + .dq_label = "com.apple.root.background-qos.overcommit", .dq_running = 2, - .dq_width = UINT32_MAX, + .dq_width = DISPATCH_QUEUE_WIDTH_MAX, .dq_serialnum = 7, }, - [DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY] = { + [DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS] = { .do_vtable = DISPATCH_VTABLE(queue_root), .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, .do_ctxt = &_dispatch_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY], - .dq_label = "com.apple.root.high-priority", + DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS], + .dq_label = "com.apple.root.utility-qos", .dq_running = 2, - .dq_width = UINT32_MAX, + .dq_width = DISPATCH_QUEUE_WIDTH_MAX, .dq_serialnum = 8, }, - [DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY] = { + [DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT] = { .do_vtable = DISPATCH_VTABLE(queue_root), .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, .do_ctxt = &_dispatch_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY], - .dq_label = "com.apple.root.high-overcommit-priority", + DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT], + .dq_label = "com.apple.root.utility-qos.overcommit", .dq_running = 2, - .dq_width = UINT32_MAX, + .dq_width = DISPATCH_QUEUE_WIDTH_MAX, .dq_serialnum = 9, }, - [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY] = { + [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS] = { .do_vtable = DISPATCH_VTABLE(queue_root), .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, .do_ctxt = &_dispatch_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY], - .dq_label = "com.apple.root.background-priority", + DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS], + .dq_label = "com.apple.root.default-qos", .dq_running = 2, - .dq_width = UINT32_MAX, + .dq_width = DISPATCH_QUEUE_WIDTH_MAX, .dq_serialnum = 10, }, - [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY] = { + [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT] = { .do_vtable = DISPATCH_VTABLE(queue_root), .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, .do_ctxt = &_dispatch_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY], - .dq_label = "com.apple.root.background-overcommit-priority", + DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT], + .dq_label = "com.apple.root.default-qos.overcommit", .dq_running = 2, - .dq_width = UINT32_MAX, + .dq_width = DISPATCH_QUEUE_WIDTH_MAX, .dq_serialnum = 11, }, + [DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS] = { + .do_vtable = DISPATCH_VTABLE(queue_root), + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, + .do_ctxt = &_dispatch_root_queue_contexts[ + DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS], + .dq_label = "com.apple.root.user-initiated-qos", + .dq_running = 2, + .dq_width = DISPATCH_QUEUE_WIDTH_MAX, + .dq_serialnum = 12, + }, + [DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT] = { + .do_vtable = DISPATCH_VTABLE(queue_root), + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, + .do_ctxt = &_dispatch_root_queue_contexts[ + DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT], + .dq_label = "com.apple.root.user-initiated-qos.overcommit", + .dq_running = 2, + .dq_width = DISPATCH_QUEUE_WIDTH_MAX, + .dq_serialnum = 13, + }, + [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS] = { + .do_vtable = DISPATCH_VTABLE(queue_root), + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, + .do_ctxt = &_dispatch_root_queue_contexts[ + DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS], + .dq_label = "com.apple.root.user-interactive-qos", + .dq_running = 2, + .dq_width = DISPATCH_QUEUE_WIDTH_MAX, + .dq_serialnum = 14, + }, + [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT] = { + .do_vtable = DISPATCH_VTABLE(queue_root), + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, + .do_ctxt = &_dispatch_root_queue_contexts[ + DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT], + .dq_label = "com.apple.root.user-interactive-qos.overcommit", + .dq_running = 2, + .dq_width = DISPATCH_QUEUE_WIDTH_MAX, + .dq_serialnum = 15, + }, }; -#if HAVE_PTHREAD_WORKQUEUES +#if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP static const dispatch_queue_t _dispatch_wq2root_queues[][2] = { + [WORKQ_BG_PRIOQUEUE][0] = &_dispatch_root_queues[ + DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS], + [WORKQ_BG_PRIOQUEUE][WORKQ_ADDTHREADS_OPTION_OVERCOMMIT] = + &_dispatch_root_queues[ + DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT], [WORKQ_LOW_PRIOQUEUE][0] = &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY], + DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS], [WORKQ_LOW_PRIOQUEUE][WORKQ_ADDTHREADS_OPTION_OVERCOMMIT] = &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY], + DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT], [WORKQ_DEFAULT_PRIOQUEUE][0] = &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY], + DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS], [WORKQ_DEFAULT_PRIOQUEUE][WORKQ_ADDTHREADS_OPTION_OVERCOMMIT] = &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY], + DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT], [WORKQ_HIGH_PRIOQUEUE][0] = &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY], + DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS], [WORKQ_HIGH_PRIOQUEUE][WORKQ_ADDTHREADS_OPTION_OVERCOMMIT] = &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY], - [WORKQ_BG_PRIOQUEUE][0] = &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY], - [WORKQ_BG_PRIOQUEUE][WORKQ_ADDTHREADS_OPTION_OVERCOMMIT] = - &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY], + DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT], +}; +#endif // HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP + +#define DISPATCH_PRIORITY_COUNT 5 + +enum { + // No DISPATCH_PRIORITY_IDX_MAINTENANCE define because there is no legacy + // maintenance priority + DISPATCH_PRIORITY_IDX_BACKGROUND = 0, + DISPATCH_PRIORITY_IDX_NON_INTERACTIVE, + DISPATCH_PRIORITY_IDX_LOW, + DISPATCH_PRIORITY_IDX_DEFAULT, + DISPATCH_PRIORITY_IDX_HIGH, +}; + +static qos_class_t _dispatch_priority2qos[] = { + [DISPATCH_PRIORITY_IDX_BACKGROUND] = _DISPATCH_QOS_CLASS_BACKGROUND, + [DISPATCH_PRIORITY_IDX_NON_INTERACTIVE] = _DISPATCH_QOS_CLASS_UTILITY, + [DISPATCH_PRIORITY_IDX_LOW] = _DISPATCH_QOS_CLASS_UTILITY, + [DISPATCH_PRIORITY_IDX_DEFAULT] = _DISPATCH_QOS_CLASS_DEFAULT, + [DISPATCH_PRIORITY_IDX_HIGH] = _DISPATCH_QOS_CLASS_USER_INITIATED, }; -#endif // HAVE_PTHREAD_WORKQUEUES + +#if HAVE_PTHREAD_WORKQUEUE_QOS +static const int _dispatch_priority2wq[] = { + [DISPATCH_PRIORITY_IDX_BACKGROUND] = WORKQ_BG_PRIOQUEUE, + [DISPATCH_PRIORITY_IDX_NON_INTERACTIVE] = WORKQ_NON_INTERACTIVE_PRIOQUEUE, + [DISPATCH_PRIORITY_IDX_LOW] = WORKQ_LOW_PRIOQUEUE, + [DISPATCH_PRIORITY_IDX_DEFAULT] = WORKQ_DEFAULT_PRIOQUEUE, + [DISPATCH_PRIORITY_IDX_HIGH] = WORKQ_HIGH_PRIOQUEUE, +}; +#endif #if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES static struct dispatch_queue_s _dispatch_mgr_root_queue; @@ -378,15 +568,58 @@ dispatch_get_global_queue(long priority, unsigned long flags) if (flags & ~(unsigned long)DISPATCH_QUEUE_OVERCOMMIT) { return NULL; } - return _dispatch_get_root_queue(priority, - flags & DISPATCH_QUEUE_OVERCOMMIT); + dispatch_once_f(&_dispatch_root_queues_pred, NULL, + _dispatch_root_queues_init); + qos_class_t qos; + switch (priority) { +#if !RDAR_17878963 || DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK + case _DISPATCH_QOS_CLASS_MAINTENANCE: + if (!_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS] + .dq_priority) { + // map maintenance to background on old kernel + qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_BACKGROUND]; + } else { + qos = (qos_class_t)priority; + } + break; +#endif // RDAR_17878963 || DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK + case DISPATCH_QUEUE_PRIORITY_BACKGROUND: + qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_BACKGROUND]; + break; + case DISPATCH_QUEUE_PRIORITY_NON_INTERACTIVE: + qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_NON_INTERACTIVE]; + break; + case DISPATCH_QUEUE_PRIORITY_LOW: + qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_LOW]; + break; + case DISPATCH_QUEUE_PRIORITY_DEFAULT: + qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_DEFAULT]; + break; + case DISPATCH_QUEUE_PRIORITY_HIGH: + qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_HIGH]; + break; + case _DISPATCH_QOS_CLASS_USER_INTERACTIVE: +#if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK + if (!_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS] + .dq_priority) { + qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_HIGH]; + break; + } +#endif + // fallthrough + default: + qos = (qos_class_t)priority; + break; + } + return _dispatch_get_root_queue(qos, flags & DISPATCH_QUEUE_OVERCOMMIT); } DISPATCH_ALWAYS_INLINE static inline dispatch_queue_t _dispatch_get_current_queue(void) { - return _dispatch_queue_get_current() ?: _dispatch_get_root_queue(0, true); + return _dispatch_queue_get_current() ?: + _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, true); } dispatch_queue_t @@ -463,13 +696,52 @@ dispatch_assert_queue_not(dispatch_queue_t dq) #pragma mark - #pragma mark dispatch_init +#if HAVE_PTHREAD_WORKQUEUE_QOS +int _dispatch_set_qos_class_enabled; +pthread_priority_t _dispatch_background_priority; +pthread_priority_t _dispatch_user_initiated_priority; + static void -_dispatch_hw_config_init(void) +_dispatch_root_queues_init_qos(int supported) { - _dispatch_hw_config.cc_max_active = _dispatch_get_activecpu(); - _dispatch_hw_config.cc_max_logical = _dispatch_get_logicalcpu_max(); - _dispatch_hw_config.cc_max_physical = _dispatch_get_physicalcpu_max(); + pthread_priority_t p; + qos_class_t qos; + unsigned int i; + for (i = 0; i < DISPATCH_PRIORITY_COUNT; i++) { + p = _pthread_qos_class_encode_workqueue(_dispatch_priority2wq[i], 0); + qos = _pthread_qos_class_decode(p, NULL, NULL); + dispatch_assert(qos != _DISPATCH_QOS_CLASS_UNSPECIFIED); + _dispatch_priority2qos[i] = qos; + } + for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { + qos = _dispatch_root_queue_contexts[i].dgq_qos; + if (qos == _DISPATCH_QOS_CLASS_MAINTENANCE && + !(supported & WORKQ_FEATURE_MAINTENANCE)) { + continue; + } + unsigned long flags = i & 1 ? _PTHREAD_PRIORITY_OVERCOMMIT_FLAG : 0; + flags |= _PTHREAD_PRIORITY_ROOTQUEUE_FLAG; + if (i == DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS || + i == DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT) { + flags |= _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG; + } + p = _pthread_qos_class_encode(qos, 0, flags); + _dispatch_root_queues[i].dq_priority = p; + } + p = _pthread_qos_class_encode(qos_class_main(), 0, 0); + _dispatch_main_q.dq_priority = p; + _dispatch_queue_set_override_priority(&_dispatch_main_q); + _dispatch_background_priority = _dispatch_root_queues[ + DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS].dq_priority & + ~_PTHREAD_PRIORITY_FLAGS_MASK; + _dispatch_user_initiated_priority = _dispatch_root_queues[ + DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS].dq_priority & + ~_PTHREAD_PRIORITY_FLAGS_MASK; + if (!slowpath(getenv("LIBDISPATCH_DISABLE_SET_QOS"))) { + _dispatch_set_qos_class_enabled = 1; + } } +#endif static inline bool _dispatch_root_queues_init_workq(void) @@ -481,8 +753,24 @@ _dispatch_root_queues_init_workq(void) disable_wq = slowpath(getenv("LIBDISPATCH_DISABLE_KWQ")); #endif int r; +#if HAVE_PTHREAD_WORKQUEUE_QOS + bool disable_qos = false; +#if DISPATCH_DEBUG + disable_qos = slowpath(getenv("LIBDISPATCH_DISABLE_QOS")); +#endif + if (!disable_qos && !disable_wq) { + r = _pthread_workqueue_supported(); + int supported = r; + if (r & WORKQ_FEATURE_FINEPRIO) { + r = _pthread_workqueue_init(_dispatch_worker_thread3, + offsetof(struct dispatch_queue_s, dq_serialnum), 0); + result = !r; + if (result) _dispatch_root_queues_init_qos(supported); + } + } +#endif // HAVE_PTHREAD_WORKQUEUE_QOS #if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP - if (!disable_wq) { + if (!result && !disable_wq) { #if PTHREAD_WORKQUEUE_SPI_VERSION >= 20121218 pthread_workqueue_setdispatchoffset_np( offsetof(struct dispatch_queue_s, dq_serialnum)); @@ -509,11 +797,7 @@ _dispatch_root_queues_init_workq(void) dispatch_root_queue_context_t qc; qc = &_dispatch_root_queue_contexts[i]; #if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK - if (!disable_wq -#if DISPATCH_NO_BG_PRIORITY - && (qc->dgq_wq_priority != WORKQ_BG_PRIOQUEUE) -#endif - ) { + if (!disable_wq) { r = pthread_workqueue_attr_setqueuepriority_np(&pwq_attr, qc->dgq_wq_priority); (void)dispatch_assume_zero(r); @@ -543,25 +827,41 @@ _dispatch_root_queues_init_workq(void) #if DISPATCH_USE_PTHREAD_POOL static inline void _dispatch_root_queue_init_pthread_pool(dispatch_root_queue_context_t qc, - bool overcommit) + uint8_t pool_size, bool overcommit) { - qc->dgq_thread_pool_size = overcommit ? MAX_PTHREAD_COUNT : - _dispatch_hw_config.cc_max_active; + dispatch_pthread_root_queue_context_t pqc = qc->dgq_ctxt; + uint32_t thread_pool_size = overcommit ? MAX_PTHREAD_COUNT : + dispatch_hw_config(active_cpus); + if (slowpath(pool_size) && pool_size < thread_pool_size) { + thread_pool_size = pool_size; + } + qc->dgq_thread_pool_size = thread_pool_size; + if (qc->dgq_qos) { + (void)dispatch_assume_zero(pthread_attr_init(&pqc->dpq_thread_attr)); + (void)dispatch_assume_zero(pthread_attr_setdetachstate( + &pqc->dpq_thread_attr, PTHREAD_CREATE_DETACHED)); +#if HAVE_PTHREAD_WORKQUEUE_QOS + (void)dispatch_assume_zero(pthread_attr_set_qos_class_np( + &pqc->dpq_thread_attr, qc->dgq_qos, 0)); +#endif + } #if USE_MACH_SEM // override the default FIFO behavior for the pool semaphores kern_return_t kr = semaphore_create(mach_task_self(), - &qc->dgq_thread_mediator->dsema_port, SYNC_POLICY_LIFO, 0); + &pqc->dpq_thread_mediator.dsema_port, SYNC_POLICY_LIFO, 0); DISPATCH_VERIFY_MIG(kr); (void)dispatch_assume_zero(kr); - (void)dispatch_assume(qc->dgq_thread_mediator->dsema_port); + (void)dispatch_assume(pqc->dpq_thread_mediator.dsema_port); #elif USE_POSIX_SEM /* XXXRW: POSIX semaphores don't support LIFO? */ - int ret = sem_init(&qc->dgq_thread_mediator->dsema_sem, 0, 0); + int ret = sem_init(&pqc->dpq_thread_mediator.dsema_sem), 0, 0); (void)dispatch_assume_zero(ret); #endif } #endif // DISPATCH_USE_PTHREAD_POOL +static dispatch_once_t _dispatch_root_queues_pred; + static void _dispatch_root_queues_init(void *context DISPATCH_UNUSED) { @@ -580,13 +880,12 @@ _dispatch_root_queues_init(void *context DISPATCH_UNUSED) } #endif _dispatch_root_queue_init_pthread_pool( - &_dispatch_root_queue_contexts[i], overcommit); + &_dispatch_root_queue_contexts[i], 0, overcommit); } #else DISPATCH_CRASH("Root queue initialization failed"); #endif // DISPATCH_ENABLE_THREAD_POOL } - } #define countof(x) (sizeof(x) / sizeof(x[0])) @@ -595,8 +894,8 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void libdispatch_init(void) { - dispatch_assert(DISPATCH_QUEUE_PRIORITY_COUNT == 4); - dispatch_assert(DISPATCH_ROOT_QUEUE_COUNT == 8); + dispatch_assert(DISPATCH_QUEUE_QOS_COUNT == 6); + dispatch_assert(DISPATCH_ROOT_QUEUE_COUNT == 12); dispatch_assert(DISPATCH_QUEUE_PRIORITY_LOW == -DISPATCH_QUEUE_PRIORITY_HIGH); @@ -604,16 +903,24 @@ libdispatch_init(void) DISPATCH_ROOT_QUEUE_COUNT); dispatch_assert(countof(_dispatch_root_queue_contexts) == DISPATCH_ROOT_QUEUE_COUNT); -#if HAVE_PTHREAD_WORKQUEUES + dispatch_assert(countof(_dispatch_priority2qos) == + DISPATCH_PRIORITY_COUNT); +#if HAVE_PTHREAD_WORKQUEUE_QOS + dispatch_assert(countof(_dispatch_priority2wq) == + DISPATCH_PRIORITY_COUNT); +#endif +#if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP dispatch_assert(sizeof(_dispatch_wq2root_queues) / sizeof(_dispatch_wq2root_queues[0][0]) == - DISPATCH_ROOT_QUEUE_COUNT); + WORKQ_NUM_PRIOQUEUE * 2); #endif #if DISPATCH_ENABLE_THREAD_POOL - dispatch_assert(countof(_dispatch_thread_mediator) == + dispatch_assert(countof(_dispatch_pthread_root_queue_contexts) == DISPATCH_ROOT_QUEUE_COUNT); #endif + dispatch_assert(offsetof(struct dispatch_continuation_s, do_next) == + offsetof(struct dispatch_object_s, do_next)); dispatch_assert(sizeof(struct dispatch_apply_s) <= DISPATCH_CONTINUATION_SIZE); dispatch_assert(sizeof(struct dispatch_queue_s) % DISPATCH_CACHELINE_SIZE @@ -622,20 +929,22 @@ libdispatch_init(void) DISPATCH_CACHELINE_SIZE == 0); _dispatch_thread_key_create(&dispatch_queue_key, _dispatch_queue_cleanup); -#if !DISPATCH_USE_OS_SEMAPHORE_CACHE - _dispatch_thread_key_create(&dispatch_sema4_key, - (void (*)(void *))_dispatch_thread_semaphore_dispose); -#endif + _dispatch_thread_key_create(&dispatch_voucher_key, _voucher_thread_cleanup); _dispatch_thread_key_create(&dispatch_cache_key, _dispatch_cache_cleanup); _dispatch_thread_key_create(&dispatch_io_key, NULL); _dispatch_thread_key_create(&dispatch_apply_key, NULL); -#if DISPATCH_PERF_MON + _dispatch_thread_key_create(&dispatch_defaultpriority_key, NULL); +#if DISPATCH_PERF_MON && !DISPATCH_INTROSPECTION _dispatch_thread_key_create(&dispatch_bcounter_key, NULL); #endif +#if !DISPATCH_USE_OS_SEMAPHORE_CACHE + _dispatch_thread_key_create(&dispatch_sema4_key, + (void (*)(void *))_dispatch_thread_semaphore_dispose); +#endif #if DISPATCH_USE_RESOLVERS // rdar://problem/8541707 _dispatch_main_q.do_targetq = &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY]; + DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT]; #endif _dispatch_thread_setspecific(dispatch_queue_key, &_dispatch_main_q); @@ -649,9 +958,45 @@ libdispatch_init(void) _dispatch_hw_config_init(); _dispatch_vtable_init(); _os_object_init(); + _voucher_init(); _dispatch_introspection_init(); } +#if HAVE_MACH +static dispatch_once_t _dispatch_mach_host_port_pred; +static mach_port_t _dispatch_mach_host_port; + +static void +_dispatch_mach_host_port_init(void *ctxt DISPATCH_UNUSED) +{ + kern_return_t kr; + mach_port_t mp, mhp = mach_host_self(); + kr = host_get_host_port(mhp, &mp); + DISPATCH_VERIFY_MIG(kr); + if (!kr) { + // mach_host_self returned the HOST_PRIV port + kr = mach_port_deallocate(mach_task_self(), mhp); + DISPATCH_VERIFY_MIG(kr); + (void)dispatch_assume_zero(kr); + mhp = mp; + } else if (kr != KERN_INVALID_ARGUMENT) { + (void)dispatch_assume_zero(kr); + } + if (!dispatch_assume(mhp)) { + DISPATCH_CRASH("Could not get unprivileged host port"); + } + _dispatch_mach_host_port = mhp; +} + +mach_port_t +_dispatch_get_mach_host_port(void) +{ + dispatch_once_f(&_dispatch_mach_host_port_pred, NULL, + _dispatch_mach_host_port_init); + return _dispatch_mach_host_port; +} +#endif + DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void) @@ -659,6 +1004,11 @@ dispatch_atfork_child(void) void *crash = (void *)0x100; size_t i; +#if HAVE_MACH + _dispatch_mach_host_port_pred = 0; + _dispatch_mach_host_port = MACH_VOUCHER_NULL; +#endif + _voucher_atfork_child(); if (_dispatch_safe_fork) { return; } @@ -676,6 +1026,94 @@ dispatch_atfork_child(void) } } +#pragma mark - +#pragma mark dispatch_queue_attr_t + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_qos_class_valid(dispatch_qos_class_t qos_class, int relative_priority) +{ + qos_class_t qos = (qos_class_t)qos_class; + switch (qos) { + case _DISPATCH_QOS_CLASS_MAINTENANCE: + case _DISPATCH_QOS_CLASS_BACKGROUND: + case _DISPATCH_QOS_CLASS_UTILITY: + case _DISPATCH_QOS_CLASS_DEFAULT: + case _DISPATCH_QOS_CLASS_USER_INITIATED: + case _DISPATCH_QOS_CLASS_USER_INTERACTIVE: + case _DISPATCH_QOS_CLASS_UNSPECIFIED: + break; + default: + return false; + } + if (relative_priority > 0 || relative_priority < QOS_MIN_RELATIVE_PRIORITY){ + return false; + } + return true; +} + +#define DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(qos) \ + [_DISPATCH_QOS_CLASS_##qos] = DQA_INDEX_QOS_CLASS_##qos + +static const +_dispatch_queue_attr_index_qos_class_t _dispatch_queue_attr_qos2idx[] = { + DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(UNSPECIFIED), + DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(MAINTENANCE), + DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(BACKGROUND), + DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(UTILITY), + DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(DEFAULT), + DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(USER_INITIATED), + DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(USER_INTERACTIVE), +}; + +#define DISPATCH_QUEUE_ATTR_OVERCOMMIT2IDX(overcommit) \ + (overcommit ? DQA_INDEX_OVERCOMMIT : DQA_INDEX_NON_OVERCOMMIT) + +#define DISPATCH_QUEUE_ATTR_CONCURRENT2IDX(concurrent) \ + (concurrent ? DQA_INDEX_CONCURRENT : DQA_INDEX_SERIAL) + +#define DISPATCH_QUEUE_ATTR_PRIO2IDX(prio) (-(prio)) + +#define DISPATCH_QUEUE_ATTR_QOS2IDX(qos) (_dispatch_queue_attr_qos2idx[(qos)]) + +static inline dispatch_queue_attr_t +_dispatch_get_queue_attr(qos_class_t qos, int prio, bool overcommit, + bool concurrent) +{ + return (dispatch_queue_attr_t)&_dispatch_queue_attrs + [DISPATCH_QUEUE_ATTR_QOS2IDX(qos)] + [DISPATCH_QUEUE_ATTR_PRIO2IDX(prio)] + [DISPATCH_QUEUE_ATTR_OVERCOMMIT2IDX(overcommit)] + [DISPATCH_QUEUE_ATTR_CONCURRENT2IDX(concurrent)]; +} + +dispatch_queue_attr_t +dispatch_queue_attr_make_with_qos_class(dispatch_queue_attr_t dqa, + dispatch_qos_class_t qos_class, int relative_priority) +{ + if (!_dispatch_qos_class_valid(qos_class, relative_priority)) return NULL; + if (!slowpath(dqa)) { + dqa = _dispatch_get_queue_attr(0, 0, false, false); + } else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) { + DISPATCH_CLIENT_CRASH("Invalid queue attribute"); + } + return _dispatch_get_queue_attr(qos_class, relative_priority, + dqa->dqa_overcommit, dqa->dqa_concurrent); +} + +dispatch_queue_attr_t +dispatch_queue_attr_make_with_overcommit(dispatch_queue_attr_t dqa, + bool overcommit) +{ + if (!slowpath(dqa)) { + dqa = _dispatch_get_queue_attr(0, 0, false, false); + } else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) { + DISPATCH_CLIENT_CRASH("Invalid queue attribute"); + } + return _dispatch_get_queue_attr(dqa->dqa_qos_class, + dqa->dqa_relative_priority, overcommit, dqa->dqa_concurrent); +} + #pragma mark - #pragma mark dispatch_queue_t @@ -683,38 +1121,80 @@ dispatch_atfork_child(void) // 1 - main_q // 2 - mgr_q // 3 - mgr_root_q -// 4,5,6,7,8,9,10,11 - global queues +// 4,5,6,7,8,9,10,11,12,13,14,15 - global queues // we use 'xadd' on Intel, so the initial value == next assigned -unsigned long volatile _dispatch_queue_serial_numbers = 12; +unsigned long volatile _dispatch_queue_serial_numbers = 16; dispatch_queue_t -dispatch_queue_create_with_target(const char *label, - dispatch_queue_attr_t attr, dispatch_queue_t tq) +dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa, + dispatch_queue_t tq) { - dispatch_queue_t dq; - - dq = _dispatch_alloc(DISPATCH_VTABLE(queue), +#if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK + // Be sure the root queue priorities are set + dispatch_once_f(&_dispatch_root_queues_pred, NULL, + _dispatch_root_queues_init); +#endif + bool disallow_tq = (slowpath(dqa) && dqa != DISPATCH_QUEUE_CONCURRENT); + if (!slowpath(dqa)) { + dqa = _dispatch_get_queue_attr(0, 0, false, false); + } else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) { + DISPATCH_CLIENT_CRASH("Invalid queue attribute"); + } + dispatch_queue_t dq = _dispatch_alloc(DISPATCH_VTABLE(queue), sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_CACHELINE_PAD); - _dispatch_queue_init(dq); if (label) { dq->dq_label = strdup(label); } + qos_class_t qos = dqa->dqa_qos_class; + bool overcommit = dqa->dqa_overcommit; +#if HAVE_PTHREAD_WORKQUEUE_QOS + dq->dq_priority = _pthread_qos_class_encode(qos, dqa->dqa_relative_priority, + overcommit); +#endif + if (dqa->dqa_concurrent) { + dq->dq_width = DISPATCH_QUEUE_WIDTH_MAX; + } else { + // Default serial queue target queue is overcommit! + overcommit = true; + } + if (!tq) { + if (qos == _DISPATCH_QOS_CLASS_UNSPECIFIED) { + qos = _DISPATCH_QOS_CLASS_DEFAULT; + } +#if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK + if (qos == _DISPATCH_QOS_CLASS_USER_INTERACTIVE && + !_dispatch_root_queues[ + DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS].dq_priority) { + qos = _DISPATCH_QOS_CLASS_USER_INITIATED; + } +#endif + bool maintenance_fallback = false; +#if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK + maintenance_fallback = true; +#endif // DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK + if (maintenance_fallback) { + if (qos == _DISPATCH_QOS_CLASS_MAINTENANCE && + !_dispatch_root_queues[ + DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS].dq_priority) { + qos = _DISPATCH_QOS_CLASS_BACKGROUND; + } + } - if (attr == DISPATCH_QUEUE_CONCURRENT) { - dq->dq_width = UINT32_MAX; - if (!tq) { - tq = _dispatch_get_root_queue(0, false); + tq = _dispatch_get_root_queue(qos, overcommit); + if (slowpath(!tq)) { + DISPATCH_CLIENT_CRASH("Invalid queue attribute"); } } else { - if (!tq) { - // Default target queue is overcommit! - tq = _dispatch_get_root_queue(0, true); - } - if (slowpath(attr)) { - dispatch_debug_assert(!attr, "Invalid attribute"); + _dispatch_retain(tq); + if (disallow_tq) { + // TODO: override target queue's qos/overcommit ? + DISPATCH_CLIENT_CRASH("Invalid combination of target queue & " + "queue attribute"); } + _dispatch_queue_priority_inherit_from_target(dq, tq); } + _dispatch_queue_set_override_priority(dq); dq->do_targetq = tq; _dispatch_object_debug(dq, "%s", __func__); return _dispatch_introspection_queue_create(dq); @@ -770,6 +1250,22 @@ dispatch_queue_get_label(dispatch_queue_t dq) return dq->dq_label ? dq->dq_label : ""; } +qos_class_t +dispatch_queue_get_qos_class(dispatch_queue_t dq, int *relative_priority_ptr) +{ + qos_class_t qos = _DISPATCH_QOS_CLASS_UNSPECIFIED; + int relative_priority = 0; +#if HAVE_PTHREAD_WORKQUEUE_QOS + pthread_priority_t dqp = dq->dq_priority; + if (dqp & _PTHREAD_PRIORITY_INHERIT_FLAG) dqp = 0; + qos = _pthread_qos_class_decode(dqp, &relative_priority, NULL); +#else + (void)dq; +#endif + if (relative_priority_ptr) *relative_priority_ptr = relative_priority; + return qos; +} + static void _dispatch_queue_set_width2(void *ctxt) { @@ -786,20 +1282,23 @@ _dispatch_queue_set_width2(void *ctxt) tmp = (unsigned int)w; } else switch (w) { case DISPATCH_QUEUE_WIDTH_MAX_PHYSICAL_CPUS: - tmp = _dispatch_hw_config.cc_max_physical; + tmp = dispatch_hw_config(physical_cpus); break; case DISPATCH_QUEUE_WIDTH_ACTIVE_CPUS: - tmp = _dispatch_hw_config.cc_max_active; + tmp = dispatch_hw_config(active_cpus); break; default: // fall through case DISPATCH_QUEUE_WIDTH_MAX_LOGICAL_CPUS: - tmp = _dispatch_hw_config.cc_max_logical; + tmp = dispatch_hw_config(logical_cpus); break; } + if (tmp > DISPATCH_QUEUE_WIDTH_MAX / 2) { + tmp = DISPATCH_QUEUE_WIDTH_MAX / 2; + } // multiply by two since the running count is inc/dec by two // (the low bit == barrier) - dq->dq_width = tmp * 2; + dq->dq_width = (typeof(dq->dq_width))(tmp * 2); _dispatch_object_debug(dq, "%s", __func__); } @@ -819,12 +1318,20 @@ dispatch_queue_set_width(dispatch_queue_t dq, long width) static void _dispatch_set_target_queue2(void *ctxt) { - dispatch_queue_t prev_dq, dq = _dispatch_queue_get_current(); + dispatch_queue_t prev_dq, dq = _dispatch_queue_get_current(), tq = ctxt; + mach_port_t th; + while (!dispatch_atomic_cmpxchgv2o(dq, dq_tqthread, MACH_PORT_NULL, + _dispatch_thread_port(), &th, acquire)) { + _dispatch_thread_switch(th, DISPATCH_YIELD_THREAD_SWITCH_OPTION, + DISPATCH_CONTENTION_USLEEP_START); + } + _dispatch_queue_priority_inherit_from_target(dq, tq); prev_dq = dq->do_targetq; - dq->do_targetq = ctxt; + dq->do_targetq = tq; _dispatch_release(prev_dq); _dispatch_object_debug(dq, "%s", __func__); + dispatch_atomic_store2o(dq, dq_tqthread, MACH_PORT_NULL, release); } void @@ -839,7 +1346,8 @@ dispatch_set_target_queue(dispatch_object_t dou, dispatch_queue_t dq) if (slowpath(!dq)) { bool is_concurrent_q = (type == _DISPATCH_QUEUE_TYPE && slowpath(dou._dq->dq_width > 1)); - dq = _dispatch_get_root_queue(0, !is_concurrent_q); + dq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, + !is_concurrent_q); } // TODO: put into the vtable switch(type) { @@ -864,14 +1372,6 @@ dispatch_set_target_queue(dispatch_object_t dou, dispatch_queue_t dq) #pragma mark - #pragma mark dispatch_pthread_root_queue -struct dispatch_pthread_root_queue_context_s { - pthread_attr_t dpq_thread_attr; - dispatch_block_t dpq_thread_configure; - struct dispatch_semaphore_s dpq_thread_mediator; -}; -typedef struct dispatch_pthread_root_queue_context_s * - dispatch_pthread_root_queue_context_t; - #if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES static struct dispatch_pthread_root_queue_context_s _dispatch_mgr_root_queue_pthread_context; @@ -891,11 +1391,12 @@ static struct dispatch_queue_s _dispatch_mgr_root_queue = { .do_ctxt = &_dispatch_mgr_root_queue_context, .dq_label = "com.apple.root.libdispatch-manager", .dq_running = 2, - .dq_width = UINT32_MAX, + .dq_width = DISPATCH_QUEUE_WIDTH_MAX, .dq_serialnum = 3, }; static struct { volatile int prio; + int default_prio; int policy; pthread_t tid; } _dispatch_mgr_sched; @@ -911,8 +1412,9 @@ _dispatch_mgr_sched_init(void *ctxt DISPATCH_UNUSED) (void)dispatch_assume_zero(pthread_attr_getschedpolicy(attr, &_dispatch_mgr_sched.policy)); (void)dispatch_assume_zero(pthread_attr_getschedparam(attr, ¶m)); - // high-priority workq threads are at priority 2 above default - _dispatch_mgr_sched.prio = param.sched_priority + 2; + // legacy priority calls allowed when requesting above default priority + _dispatch_mgr_sched.default_prio = param.sched_priority; + _dispatch_mgr_sched.prio = _dispatch_mgr_sched.default_prio; } DISPATCH_NOINLINE @@ -927,9 +1429,19 @@ _dispatch_mgr_root_queue_init(void) PTHREAD_CREATE_DETACHED)); #if !DISPATCH_DEBUG (void)dispatch_assume_zero(pthread_attr_setstacksize(attr, 64 * 1024)); +#endif +#if HAVE_PTHREAD_WORKQUEUE_QOS + if (_dispatch_set_qos_class_enabled) { + qos_class_t qos = qos_class_main(); + (void)dispatch_assume_zero(pthread_attr_set_qos_class_np(attr, qos, 0)); + _dispatch_mgr_q.dq_priority = _pthread_qos_class_encode(qos, 0, 0); + _dispatch_queue_set_override_priority(&_dispatch_mgr_q); + } #endif param.sched_priority = _dispatch_mgr_sched.prio; - (void)dispatch_assume_zero(pthread_attr_setschedparam(attr, ¶m)); + if (param.sched_priority > _dispatch_mgr_sched.default_prio) { + (void)dispatch_assume_zero(pthread_attr_setschedparam(attr, ¶m)); + } return &_dispatch_mgr_sched.tid; } @@ -939,8 +1451,11 @@ _dispatch_mgr_priority_apply(void) struct sched_param param; do { param.sched_priority = _dispatch_mgr_sched.prio; - (void)dispatch_assume_zero(pthread_setschedparam( - _dispatch_mgr_sched.tid, _dispatch_mgr_sched.policy, ¶m)); + if (param.sched_priority > _dispatch_mgr_sched.default_prio) { + (void)dispatch_assume_zero(pthread_setschedparam( + _dispatch_mgr_sched.tid, _dispatch_mgr_sched.policy, + ¶m)); + } } while (_dispatch_mgr_sched.prio > param.sched_priority); } @@ -982,10 +1497,9 @@ dispatch_pthread_root_queue_create(const char *label, unsigned long flags, dispatch_root_queue_context_t qc; dispatch_pthread_root_queue_context_t pqc; size_t dqs; + uint8_t pool_size = flags & _DISPATCH_PTHREAD_ROOT_QUEUE_FLAG_POOL_SIZE ? + (uint8_t)(flags & ~_DISPATCH_PTHREAD_ROOT_QUEUE_FLAG_POOL_SIZE) : 0; - if (slowpath(flags)) { - return NULL; - } dqs = sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_CACHELINE_PAD; dq = _dispatch_alloc(DISPATCH_VTABLE(queue_root), dqs + sizeof(struct dispatch_root_queue_context_s) + @@ -1002,18 +1516,25 @@ dispatch_pthread_root_queue_create(const char *label, unsigned long flags, dq->do_ctxt = qc; dq->do_targetq = NULL; dq->dq_running = 2; - dq->dq_width = UINT32_MAX; + dq->dq_width = DISPATCH_QUEUE_WIDTH_MAX; pqc->dpq_thread_mediator.do_vtable = DISPATCH_VTABLE(semaphore); - qc->dgq_thread_mediator = &pqc->dpq_thread_mediator; qc->dgq_ctxt = pqc; #if HAVE_PTHREAD_WORKQUEUES qc->dgq_kworkqueue = (void*)(~0ul); #endif - _dispatch_root_queue_init_pthread_pool(qc, true); // rdar://11352331 + _dispatch_root_queue_init_pthread_pool(qc, pool_size, true); if (attr) { memcpy(&pqc->dpq_thread_attr, attr, sizeof(pthread_attr_t)); +#if HAVE_PTHREAD_WORKQUEUE_QOS + qos_class_t qos = 0; + if (!pthread_attr_get_qos_class_np(&pqc->dpq_thread_attr, &qos, NULL) + && qos > _DISPATCH_QOS_CLASS_DEFAULT) { + DISPATCH_CLIENT_CRASH("pthread root queues do not support " + "explicit QoS attributes"); + } +#endif _dispatch_mgr_priority_raise(&pqc->dpq_thread_attr); } else { (void)dispatch_assume_zero(pthread_attr_init(&pqc->dpq_thread_attr)); @@ -1040,11 +1561,13 @@ _dispatch_pthread_root_queue_dispose(dispatch_queue_t dq) dispatch_root_queue_context_t qc = dq->do_ctxt; dispatch_pthread_root_queue_context_t pqc = qc->dgq_ctxt; - _dispatch_semaphore_dispose(qc->dgq_thread_mediator); + pthread_attr_destroy(&pqc->dpq_thread_attr); + _dispatch_semaphore_dispose(&pqc->dpq_thread_mediator); if (pqc->dpq_thread_configure) { Block_release(pqc->dpq_thread_configure); } - dq->do_targetq = _dispatch_get_root_queue(0, false); + dq->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, + false); #endif if (dq->dq_label) { free((void*)dq->dq_label); @@ -1078,7 +1601,7 @@ _dispatch_queue_specific_queue_dispose(dispatch_queue_specific_queue_t dqsq) TAILQ_FOREACH_SAFE(dqs, &dqsq->dqsq_contexts, dqs_list, tmp) { if (dqs->dqs_destructor) { dispatch_async_f(_dispatch_get_root_queue( - DISPATCH_QUEUE_PRIORITY_DEFAULT, false), dqs->dqs_ctxt, + _DISPATCH_QOS_CLASS_DEFAULT, false), dqs->dqs_ctxt, dqs->dqs_destructor); } free(dqs); @@ -1095,9 +1618,9 @@ _dispatch_queue_init_specific(dispatch_queue_t dq) sizeof(struct dispatch_queue_specific_queue_s)); _dispatch_queue_init((dispatch_queue_t)dqsq); dqsq->do_xref_cnt = -1; - dqsq->do_targetq = _dispatch_get_root_queue(DISPATCH_QUEUE_PRIORITY_HIGH, - true); - dqsq->dq_width = UINT32_MAX; + dqsq->do_targetq = _dispatch_get_root_queue( + _DISPATCH_QOS_CLASS_USER_INITIATED, true); + dqsq->dq_width = DISPATCH_QUEUE_WIDTH_MAX; dqsq->dq_label = "queue-specific"; TAILQ_INIT(&dqsq->dqsq_contexts); if (slowpath(!dispatch_atomic_cmpxchg2o(dq, dq_specific_q, NULL, @@ -1118,7 +1641,7 @@ _dispatch_queue_set_specific(void *ctxt) // Destroy previous context for existing key if (dqs->dqs_destructor) { dispatch_async_f(_dispatch_get_root_queue( - DISPATCH_QUEUE_PRIORITY_DEFAULT, false), dqs->dqs_ctxt, + _DISPATCH_QOS_CLASS_DEFAULT, false), dqs->dqs_ctxt, dqs->dqs_destructor); } if (dqsn->dqs_ctxt) { @@ -1227,7 +1750,7 @@ _dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf, size_t bufsiz) target->dq_label : "", target, dq->dq_width / 2, dq->dq_running / 2, dq->dq_running & 1); if (dq->dq_is_thread_bound) { - offset += dsnprintf(buf, bufsiz, ", thread = %p ", + offset += dsnprintf(buf, bufsiz, ", thread = 0x%x ", _dispatch_queue_get_bound_thread(dq)); } return offset; @@ -1256,7 +1779,7 @@ dispatch_debug_queue(dispatch_queue_t dq, const char* str) { } #endif -#if DISPATCH_PERF_MON +#if DISPATCH_PERF_MON && !DISPATCH_INTROSPECTION static OSSpinLock _dispatch_stats_lock; static struct { uint64_t time_total; @@ -1267,18 +1790,13 @@ static struct { static void _dispatch_queue_merge_stats(uint64_t start) { - uint64_t avg, delta = _dispatch_absolute_time() - start; - unsigned long count, bucket; + uint64_t delta = _dispatch_absolute_time() - start; + unsigned long count; - count = (size_t)_dispatch_thread_getspecific(dispatch_bcounter_key); + count = (unsigned long)_dispatch_thread_getspecific(dispatch_bcounter_key); _dispatch_thread_setspecific(dispatch_bcounter_key, NULL); - if (count) { - avg = delta / count; - bucket = flsll(avg); - } else { - bucket = 0; - } + int bucket = flsl((long)count); // 64-bit counters on 32-bit require a lock or a queue OSSpinLockLock(&_dispatch_stats_lock); @@ -1328,7 +1846,8 @@ _dispatch_continuation_free_to_cache_limit(dispatch_continuation_t dc) dispatch_continuation_t next_dc; dc = _dispatch_thread_getspecific(dispatch_cache_key); int cnt; - if (!dc || (cnt = dc->do_ref_cnt-_dispatch_continuation_cache_limit) <= 0) { + if (!dc || (cnt = dc->dc_cache_cnt - + _dispatch_continuation_cache_limit) <= 0){ return; } do { @@ -1345,97 +1864,454 @@ _dispatch_continuation_redirect(dispatch_queue_t dq, dispatch_object_t dou) { dispatch_continuation_t dc = dou._dc; - _dispatch_trace_continuation_pop(dq, dou); (void)dispatch_atomic_add2o(dq, dq_running, 2, acquire); if (!DISPATCH_OBJ_IS_VTABLE(dc) && (long)dc->do_vtable & DISPATCH_OBJ_SYNC_SLOW_BIT) { + _dispatch_trace_continuation_pop(dq, dou); _dispatch_thread_semaphore_signal( (_dispatch_thread_semaphore_t)dc->dc_other); + _dispatch_introspection_queue_item_complete(dou); } else { - _dispatch_async_f_redirect(dq, dc); + _dispatch_async_f_redirect(dq, dc, + _dispatch_queue_get_override_priority(dq)); } + _dispatch_perfmon_workitem_inc(); } -DISPATCH_ALWAYS_INLINE_NDEBUG -static inline void -_dispatch_continuation_pop(dispatch_object_t dou) -{ - dispatch_continuation_t dc = dou._dc, dc1; - dispatch_group_t dg; +#pragma mark - +#pragma mark dispatch_block_create - _dispatch_trace_continuation_pop(_dispatch_queue_get_current(), dou); - if (DISPATCH_OBJ_IS_VTABLE(dou._do)) { - return dx_invoke(dou._do); +#if __BLOCKS__ + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_block_flags_valid(dispatch_block_flags_t flags) +{ + return ((flags & ~DISPATCH_BLOCK_API_MASK) == 0); +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_block_flags_t +_dispatch_block_normalize_flags(dispatch_block_flags_t flags) +{ + if (flags & (DISPATCH_BLOCK_NO_VOUCHER|DISPATCH_BLOCK_DETACHED)) { + flags |= DISPATCH_BLOCK_HAS_VOUCHER; + } + if (flags & (DISPATCH_BLOCK_NO_QOS_CLASS|DISPATCH_BLOCK_DETACHED)) { + flags |= DISPATCH_BLOCK_HAS_PRIORITY; } + return flags; +} - // Add the item back to the cache before calling the function. This - // allows the 'hot' continuation to be used for a quick callback. - // - // The ccache version is per-thread. - // Therefore, the object has not been reused yet. - // This generates better assembly. - if ((long)dc->do_vtable & DISPATCH_OBJ_ASYNC_BIT) { - dc1 = _dispatch_continuation_free_cacheonly(dc); - } else { - dc1 = NULL; +static inline dispatch_block_t +_dispatch_block_create_with_voucher_and_priority(dispatch_block_flags_t flags, + voucher_t voucher, pthread_priority_t pri, dispatch_block_t block) +{ + flags = _dispatch_block_normalize_flags(flags); + voucher_t cv = NULL; + bool assign = (flags & DISPATCH_BLOCK_ASSIGN_CURRENT); + if (assign && !(flags & DISPATCH_BLOCK_HAS_VOUCHER)) { + voucher = cv = voucher_copy(); + flags |= DISPATCH_BLOCK_HAS_VOUCHER; + } + if (assign && !(flags & DISPATCH_BLOCK_HAS_PRIORITY)) { + pri = _dispatch_priority_propagate(); + flags |= DISPATCH_BLOCK_HAS_PRIORITY; + } + dispatch_block_t db = _dispatch_block_create(flags, voucher, pri, block); + if (cv) _voucher_release(cv); +#if DISPATCH_DEBUG + dispatch_assert(_dispatch_block_get_data(db)); +#endif + return db; +} + +dispatch_block_t +dispatch_block_create(dispatch_block_flags_t flags, dispatch_block_t block) +{ + if (!_dispatch_block_flags_valid(flags)) return NULL; + return _dispatch_block_create_with_voucher_and_priority(flags, NULL, 0, + block); +} + +dispatch_block_t +dispatch_block_create_with_qos_class(dispatch_block_flags_t flags, + dispatch_qos_class_t qos_class, int relative_priority, + dispatch_block_t block) +{ + if (!_dispatch_block_flags_valid(flags)) return NULL; + if (!_dispatch_qos_class_valid(qos_class, relative_priority)) return NULL; + flags |= DISPATCH_BLOCK_HAS_PRIORITY; + pthread_priority_t pri = 0; +#if HAVE_PTHREAD_WORKQUEUE_QOS + pri = _pthread_qos_class_encode(qos_class, relative_priority, 0); +#endif + return _dispatch_block_create_with_voucher_and_priority(flags, NULL, + pri, block); +} + +dispatch_block_t +dispatch_block_create_with_voucher(dispatch_block_flags_t flags, + voucher_t voucher, dispatch_block_t block) +{ + if (!_dispatch_block_flags_valid(flags)) return NULL; + flags |= DISPATCH_BLOCK_HAS_VOUCHER; + return _dispatch_block_create_with_voucher_and_priority(flags, voucher, 0, + block); +} + +dispatch_block_t +dispatch_block_create_with_voucher_and_qos_class(dispatch_block_flags_t flags, + voucher_t voucher, dispatch_qos_class_t qos_class, + int relative_priority, dispatch_block_t block) +{ + if (!_dispatch_block_flags_valid(flags)) return NULL; + if (!_dispatch_qos_class_valid(qos_class, relative_priority)) return NULL; + flags |= (DISPATCH_BLOCK_HAS_VOUCHER|DISPATCH_BLOCK_HAS_PRIORITY); + pthread_priority_t pri = 0; +#if HAVE_PTHREAD_WORKQUEUE_QOS + pri = _pthread_qos_class_encode(qos_class, relative_priority, 0); +#endif + return _dispatch_block_create_with_voucher_and_priority(flags, voucher, + pri, block); +} + +void +dispatch_block_perform(dispatch_block_flags_t flags, dispatch_block_t block) +{ + if (!_dispatch_block_flags_valid(flags)) { + DISPATCH_CLIENT_CRASH("Invalid flags passed to " + "dispatch_block_perform()"); + } + flags = _dispatch_block_normalize_flags(flags); + struct dispatch_block_private_data_s dbpds = + DISPATCH_BLOCK_PRIVATE_DATA_INITIALIZER(flags, NULL, 0, block); + dbpds.dbpd_atomic_flags |= DBF_PERFORM; // no group_leave at end of invoke + return _dispatch_block_invoke(&dbpds); +} + +#define _dbpd_group(dbpd) ((dispatch_group_t)&(dbpd)->dbpd_group) + +void +_dispatch_block_invoke(const struct dispatch_block_private_data_s *dbcpd) +{ + dispatch_block_private_data_t dbpd = (dispatch_block_private_data_t)dbcpd; + dispatch_block_flags_t flags = dbpd->dbpd_flags; + unsigned int atomic_flags = dbpd->dbpd_atomic_flags; + if (slowpath(atomic_flags & DBF_WAITED)) { + DISPATCH_CLIENT_CRASH("A block object may not be both run more " + "than once and waited for"); + } + if (atomic_flags & DBF_CANCELED) goto out; + + pthread_priority_t op = DISPATCH_NO_PRIORITY, p = DISPATCH_NO_PRIORITY; + unsigned long override = 0; + if (flags & DISPATCH_BLOCK_HAS_PRIORITY) { + op = _dispatch_get_priority(); + p = dbpd->dbpd_priority; + override = (flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS) || + !(flags & DISPATCH_BLOCK_INHERIT_QOS_CLASS) ? + DISPATCH_PRIORITY_ENFORCE : 0; + } + voucher_t ov, v = DISPATCH_NO_VOUCHER; + if (flags & DISPATCH_BLOCK_HAS_VOUCHER) { + v = dbpd->dbpd_voucher; + if (v) _voucher_retain(v); + } + ov = _dispatch_adopt_priority_and_voucher(p, v, override); + dbpd->dbpd_thread = _dispatch_thread_port(); + dbpd->dbpd_block(); + _dispatch_set_priority_and_replace_voucher(op, ov); +out: + if ((atomic_flags & DBF_PERFORM) == 0) { + if (dispatch_atomic_inc2o(dbpd, dbpd_performed, acquire) == 1) { + dispatch_group_leave(_dbpd_group(dbpd)); + } + } +} + +static void +_dispatch_block_sync_invoke(void *block) +{ + dispatch_block_t b = block; + dispatch_block_private_data_t dbpd = _dispatch_block_get_data(b); + dispatch_block_flags_t flags = dbpd->dbpd_flags; + unsigned int atomic_flags = dbpd->dbpd_atomic_flags; + if (slowpath(atomic_flags & DBF_WAITED)) { + DISPATCH_CLIENT_CRASH("A block object may not be both run more " + "than once and waited for"); + } + if (atomic_flags & DBF_CANCELED) goto out; + + pthread_priority_t op = DISPATCH_NO_PRIORITY, p = DISPATCH_NO_PRIORITY; + unsigned long override = 0; + if (flags & DISPATCH_BLOCK_HAS_PRIORITY) { + op = _dispatch_get_priority(); + p = dbpd->dbpd_priority; + override = (flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS) || + !(flags & DISPATCH_BLOCK_INHERIT_QOS_CLASS) ? + DISPATCH_PRIORITY_ENFORCE : 0; + } + voucher_t ov, v = DISPATCH_NO_VOUCHER; + if (flags & DISPATCH_BLOCK_HAS_VOUCHER) { + v = dbpd->dbpd_voucher; + if (v) _voucher_retain(v); + } + ov = _dispatch_adopt_priority_and_voucher(p, v, override); + dbpd->dbpd_block(); + _dispatch_set_priority_and_replace_voucher(op, ov); +out: + if ((atomic_flags & DBF_PERFORM) == 0) { + if (dispatch_atomic_inc2o(dbpd, dbpd_performed, acquire) == 1) { + dispatch_group_leave(_dbpd_group(dbpd)); + } + } + + dispatch_queue_t dq = _dispatch_queue_get_current(); + if (dispatch_atomic_cmpxchg2o(dbpd, dbpd_queue, dq, NULL, acquire)) { + // balances dispatch_{,barrier_,}sync + _dispatch_release(dq); } - if ((long)dc->do_vtable & DISPATCH_OBJ_GROUP_BIT) { - dg = dc->dc_data; +} + +static void +_dispatch_block_async_invoke_and_release(void *block) +{ + dispatch_block_t b = block; + dispatch_block_private_data_t dbpd = _dispatch_block_get_data(b); + dispatch_block_flags_t flags = dbpd->dbpd_flags; + unsigned int atomic_flags = dbpd->dbpd_atomic_flags; + if (slowpath(atomic_flags & DBF_WAITED)) { + DISPATCH_CLIENT_CRASH("A block object may not be both run more " + "than once and waited for"); + } + if (atomic_flags & DBF_CANCELED) goto out; + + pthread_priority_t p = DISPATCH_NO_PRIORITY; + unsigned long override = 0; + if (flags & DISPATCH_BLOCK_HAS_PRIORITY) { + override = (flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS) ? + DISPATCH_PRIORITY_ENFORCE : 0; + p = dbpd->dbpd_priority; + } + voucher_t v = DISPATCH_NO_VOUCHER; + if (flags & DISPATCH_BLOCK_HAS_VOUCHER) { + v = dbpd->dbpd_voucher; + if (v) _voucher_retain(v); + } + _dispatch_adopt_priority_and_replace_voucher(p, v, override); + dbpd->dbpd_block(); +out: + if ((atomic_flags & DBF_PERFORM) == 0) { + if (dispatch_atomic_inc2o(dbpd, dbpd_performed, acquire) == 1) { + dispatch_group_leave(_dbpd_group(dbpd)); + } + } + dispatch_queue_t dq = _dispatch_queue_get_current(); + if (dispatch_atomic_cmpxchg2o(dbpd, dbpd_queue, dq, NULL, acquire)) { + // balances dispatch_{,barrier_,group_}async + _dispatch_release(dq); + } + Block_release(b); +} + +void +dispatch_block_cancel(dispatch_block_t db) +{ + dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); + if (!dbpd) { + DISPATCH_CLIENT_CRASH("Invalid block object passed to " + "dispatch_block_cancel()"); + } + (void)dispatch_atomic_or2o(dbpd, dbpd_atomic_flags, DBF_CANCELED, relaxed); +} + +long +dispatch_block_testcancel(dispatch_block_t db) +{ + dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); + if (!dbpd) { + DISPATCH_CLIENT_CRASH("Invalid block object passed to " + "dispatch_block_testcancel()"); + } + return (bool)(dbpd->dbpd_atomic_flags & DBF_CANCELED); +} + +long +dispatch_block_wait(dispatch_block_t db, dispatch_time_t timeout) +{ + dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); + if (!dbpd) { + DISPATCH_CLIENT_CRASH("Invalid block object passed to " + "dispatch_block_wait()"); + } + + unsigned int flags = dispatch_atomic_or_orig2o(dbpd, dbpd_atomic_flags, + DBF_WAITING, relaxed); + if (slowpath(flags & (DBF_WAITED | DBF_WAITING))) { + DISPATCH_CLIENT_CRASH("A block object may not be waited for " + "more than once"); + } + + // If we know the queue where this block is + // enqueued, or the thread that's executing it, then we should boost + // it here. + + pthread_priority_t pp = _dispatch_get_priority(); + + dispatch_queue_t boost_dq; + boost_dq = dispatch_atomic_xchg2o(dbpd, dbpd_queue, NULL, acquire); + if (boost_dq) { + // release balances dispatch_{,barrier_,group_}async. + // Can't put the queue back in the timeout case: the block might + // finish after we fell out of group_wait and see our NULL, so + // neither of us would ever release. Side effect: After a _wait + // that times out, subsequent waits will not boost the qos of the + // still-running block. + _dispatch_queue_wakeup_with_qos_and_release(boost_dq, pp); + } + + mach_port_t boost_th = dbpd->dbpd_thread; + if (boost_th) { + _dispatch_thread_override_start(boost_th, pp); + } + + int performed = dispatch_atomic_load2o(dbpd, dbpd_performed, relaxed); + if (slowpath(performed > 1 || (boost_th && boost_dq))) { + DISPATCH_CLIENT_CRASH("A block object may not be both run more " + "than once and waited for"); + } + + long ret = dispatch_group_wait(_dbpd_group(dbpd), timeout); + + if (boost_th) { + _dispatch_thread_override_end(boost_th); + } + + if (ret) { + // timed out: reverse our changes + (void)dispatch_atomic_and2o(dbpd, dbpd_atomic_flags, + ~DBF_WAITING, relaxed); } else { - dg = NULL; + (void)dispatch_atomic_or2o(dbpd, dbpd_atomic_flags, + DBF_WAITED, relaxed); + // don't need to re-test here: the second call would see + // the first call's WAITING } - _dispatch_client_callout(dc->dc_ctxt, dc->dc_func); - if (dg) { - dispatch_group_leave(dg); - _dispatch_release(dg); + + return ret; +} + +void +dispatch_block_notify(dispatch_block_t db, dispatch_queue_t queue, + dispatch_block_t notification_block) +{ + dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); + if (!dbpd) { + DISPATCH_CLIENT_CRASH("Invalid block object passed to " + "dispatch_block_notify()"); } - if (slowpath(dc1)) { - _dispatch_continuation_free_to_cache_limit(dc1); + int performed = dispatch_atomic_load2o(dbpd, dbpd_performed, relaxed); + if (slowpath(performed > 1)) { + DISPATCH_CLIENT_CRASH("A block object may not be both run more " + "than once and observed"); } + + return dispatch_group_notify(_dbpd_group(dbpd), queue, notification_block); } +#endif // __BLOCKS__ + #pragma mark - #pragma mark dispatch_barrier_async DISPATCH_NOINLINE static void _dispatch_barrier_async_f_slow(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) + dispatch_function_t func, pthread_priority_t pp, + dispatch_block_flags_t flags) { dispatch_continuation_t dc = _dispatch_continuation_alloc_from_heap(); dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_BARRIER_BIT); dc->dc_func = func; dc->dc_ctxt = ctxt; + _dispatch_continuation_voucher_set(dc, flags); + _dispatch_continuation_priority_set(dc, pp, flags); + + pp = _dispatch_continuation_get_override_priority(dq, dc); - _dispatch_queue_push(dq, dc); + _dispatch_queue_push(dq, dc, pp); } -DISPATCH_NOINLINE -void -dispatch_barrier_async_f(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_barrier_async_f2(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func, pthread_priority_t pp, + dispatch_block_flags_t flags) { dispatch_continuation_t dc; dc = fastpath(_dispatch_continuation_alloc_cacheonly()); if (!dc) { - return _dispatch_barrier_async_f_slow(dq, ctxt, func); + return _dispatch_barrier_async_f_slow(dq, ctxt, func, pp, flags); } dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_BARRIER_BIT); dc->dc_func = func; dc->dc_ctxt = ctxt; + _dispatch_continuation_voucher_set(dc, flags); + _dispatch_continuation_priority_set(dc, pp, flags); + + pp = _dispatch_continuation_get_override_priority(dq, dc); - _dispatch_queue_push(dq, dc); + _dispatch_queue_push(dq, dc, pp); +} + +DISPATCH_NOINLINE +static void +_dispatch_barrier_async_f(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func, pthread_priority_t pp, + dispatch_block_flags_t flags) +{ + return _dispatch_barrier_async_f2(dq, ctxt, func, pp, flags); +} + +DISPATCH_NOINLINE +void +dispatch_barrier_async_f(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) +{ + return _dispatch_barrier_async_f2(dq, ctxt, func, 0, 0); +} + +DISPATCH_NOINLINE +void +_dispatch_barrier_async_detached_f(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) +{ + return _dispatch_barrier_async_f2(dq, ctxt, func, 0, + DISPATCH_BLOCK_NO_QOS_CLASS|DISPATCH_BLOCK_NO_VOUCHER); } #ifdef __BLOCKS__ void dispatch_barrier_async(dispatch_queue_t dq, void (^work)(void)) { - dispatch_barrier_async_f(dq, _dispatch_Block_copy(work), - _dispatch_call_block_and_release); + dispatch_function_t func = _dispatch_call_block_and_release; + pthread_priority_t pp = 0; + dispatch_block_flags_t flags = 0; + if (slowpath(_dispatch_block_has_private_data(work))) { + func = _dispatch_block_async_invoke_and_release; + pp = _dispatch_block_get_priority(work); + flags = _dispatch_block_get_flags(work); + // balanced in d_block_async_invoke_and_release or d_block_wait + if (dispatch_atomic_cmpxchg2o(_dispatch_block_get_data(work), + dbpd_queue, NULL, dq, release)) { + _dispatch_retain(dq); + } + } + _dispatch_barrier_async_f(dq, _dispatch_Block_copy(work), func, pp, flags); } #endif @@ -1451,25 +2327,28 @@ _dispatch_async_redirect_invoke(void *ctxt) old_dq = _dispatch_thread_getspecific(dispatch_queue_key); _dispatch_thread_setspecific(dispatch_queue_key, dq); + pthread_priority_t old_dp = _dispatch_set_defaultpriority(dq->dq_priority); _dispatch_continuation_pop(other_dc); + _dispatch_reset_defaultpriority(old_dp); _dispatch_thread_setspecific(dispatch_queue_key, old_dq); rq = dq->do_targetq; while (slowpath(rq->do_targetq) && rq != old_dq) { if (dispatch_atomic_sub2o(rq, dq_running, 2, relaxed) == 0) { - _dispatch_wakeup(rq); + _dispatch_queue_wakeup(rq); } rq = rq->do_targetq; } if (dispatch_atomic_sub2o(dq, dq_running, 2, relaxed) == 0) { - _dispatch_wakeup(dq); + _dispatch_queue_wakeup(dq); } _dispatch_release(dq); } static inline void -_dispatch_async_f_redirect2(dispatch_queue_t dq, dispatch_continuation_t dc) +_dispatch_async_f_redirect2(dispatch_queue_t dq, dispatch_continuation_t dc, + pthread_priority_t pp) { uint32_t running = 2; @@ -1488,13 +2367,13 @@ _dispatch_async_f_redirect2(dispatch_queue_t dq, dispatch_continuation_t dc) dq = dq->do_targetq; } while (slowpath(dq->do_targetq)); - _dispatch_queue_push_wakeup(dq, dc, running == 0); + _dispatch_queue_push_wakeup(dq, dc, pp, running == 0); } DISPATCH_NOINLINE static void _dispatch_async_f_redirect(dispatch_queue_t dq, - dispatch_continuation_t other_dc) + dispatch_continuation_t other_dc, pthread_priority_t pp) { dispatch_continuation_t dc = _dispatch_continuation_alloc(); @@ -1503,19 +2382,22 @@ _dispatch_async_f_redirect(dispatch_queue_t dq, dc->dc_ctxt = dc; dc->dc_data = dq; dc->dc_other = other_dc; + dc->dc_priority = 0; + dc->dc_voucher = NULL; _dispatch_retain(dq); dq = dq->do_targetq; if (slowpath(dq->do_targetq)) { - return _dispatch_async_f_redirect2(dq, dc); + return _dispatch_async_f_redirect2(dq, dc, pp); } - _dispatch_queue_push(dq, dc); + _dispatch_queue_push(dq, dc, pp); } DISPATCH_NOINLINE static void -_dispatch_async_f2(dispatch_queue_t dq, dispatch_continuation_t dc) +_dispatch_async_f2(dispatch_queue_t dq, dispatch_continuation_t dc, + pthread_priority_t pp) { uint32_t running = 2; @@ -1530,78 +2412,108 @@ _dispatch_async_f2(dispatch_queue_t dq, dispatch_continuation_t dc) break; } if (!slowpath(running & 1)) { - return _dispatch_async_f_redirect(dq, dc); + return _dispatch_async_f_redirect(dq, dc, pp); } running = dispatch_atomic_sub2o(dq, dq_running, 2, relaxed); // We might get lucky and find that the barrier has ended by now } while (!(running & 1)); - _dispatch_queue_push_wakeup(dq, dc, running == 0); + _dispatch_queue_push_wakeup(dq, dc, pp, running == 0); } DISPATCH_NOINLINE static void _dispatch_async_f_slow(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) + dispatch_function_t func, pthread_priority_t pp, + dispatch_block_flags_t flags) { dispatch_continuation_t dc = _dispatch_continuation_alloc_from_heap(); dc->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT; dc->dc_func = func; dc->dc_ctxt = ctxt; + _dispatch_continuation_voucher_set(dc, flags); + _dispatch_continuation_priority_set(dc, pp, flags); + + pp = _dispatch_continuation_get_override_priority(dq, dc); // No fastpath/slowpath hint because we simply don't know if (dq->do_targetq) { - return _dispatch_async_f2(dq, dc); + return _dispatch_async_f2(dq, dc, pp); } - _dispatch_queue_push(dq, dc); + _dispatch_queue_push(dq, dc, pp); } -DISPATCH_NOINLINE -void -dispatch_async_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_async_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, + pthread_priority_t pp, dispatch_block_flags_t flags) { dispatch_continuation_t dc; // No fastpath/slowpath hint because we simply don't know - if (dq->dq_width == 1) { - return dispatch_barrier_async_f(dq, ctxt, func); + if (dq->dq_width == 1 || flags & DISPATCH_BLOCK_BARRIER) { + return _dispatch_barrier_async_f(dq, ctxt, func, pp, flags); } dc = fastpath(_dispatch_continuation_alloc_cacheonly()); if (!dc) { - return _dispatch_async_f_slow(dq, ctxt, func); + return _dispatch_async_f_slow(dq, ctxt, func, pp, flags); } dc->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT; dc->dc_func = func; dc->dc_ctxt = ctxt; + _dispatch_continuation_voucher_set(dc, flags); + _dispatch_continuation_priority_set(dc, pp, flags); + + pp = _dispatch_continuation_get_override_priority(dq, dc); // No fastpath/slowpath hint because we simply don't know if (dq->do_targetq) { - return _dispatch_async_f2(dq, dc); + return _dispatch_async_f2(dq, dc, pp); } - _dispatch_queue_push(dq, dc); + _dispatch_queue_push(dq, dc, pp); +} + +DISPATCH_NOINLINE +void +dispatch_async_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) +{ + return _dispatch_async_f(dq, ctxt, func, 0, 0); } #ifdef __BLOCKS__ void dispatch_async(dispatch_queue_t dq, void (^work)(void)) { - dispatch_async_f(dq, _dispatch_Block_copy(work), - _dispatch_call_block_and_release); + dispatch_function_t func = _dispatch_call_block_and_release; + dispatch_block_flags_t flags = 0; + pthread_priority_t pp = 0; + if (slowpath(_dispatch_block_has_private_data(work))) { + func = _dispatch_block_async_invoke_and_release; + pp = _dispatch_block_get_priority(work); + flags = _dispatch_block_get_flags(work); + // balanced in d_block_async_invoke_and_release or d_block_wait + if (dispatch_atomic_cmpxchg2o(_dispatch_block_get_data(work), + dbpd_queue, NULL, dq, release)) { + _dispatch_retain(dq); + } + } + _dispatch_async_f(dq, _dispatch_Block_copy(work), func, pp, flags); } #endif #pragma mark - #pragma mark dispatch_group_async -DISPATCH_NOINLINE -void -dispatch_group_async_f(dispatch_group_t dg, dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_group_async_f(dispatch_group_t dg, dispatch_queue_t dq, void *ctxt, + dispatch_function_t func, pthread_priority_t pp, + dispatch_block_flags_t flags) { dispatch_continuation_t dc; @@ -1610,17 +2522,32 @@ dispatch_group_async_f(dispatch_group_t dg, dispatch_queue_t dq, void *ctxt, dc = _dispatch_continuation_alloc(); - dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_GROUP_BIT); + unsigned long barrier = (flags & DISPATCH_BLOCK_BARRIER) ? + DISPATCH_OBJ_BARRIER_BIT : 0; + dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_GROUP_BIT | + barrier); dc->dc_func = func; dc->dc_ctxt = ctxt; dc->dc_data = dg; + _dispatch_continuation_voucher_set(dc, flags); + _dispatch_continuation_priority_set(dc, pp, flags); + + pp = _dispatch_continuation_get_override_priority(dq, dc); // No fastpath/slowpath hint because we simply don't know - if (dq->dq_width != 1 && dq->do_targetq) { - return _dispatch_async_f2(dq, dc); + if (dq->dq_width != 1 && !barrier && dq->do_targetq) { + return _dispatch_async_f2(dq, dc, pp); } - _dispatch_queue_push(dq, dc); + _dispatch_queue_push(dq, dc, pp); +} + +DISPATCH_NOINLINE +void +dispatch_group_async_f(dispatch_group_t dg, dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) +{ + return _dispatch_group_async_f(dg, dq, ctxt, func, 0, 0); } #ifdef __BLOCKS__ @@ -1628,14 +2555,29 @@ void dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq, dispatch_block_t db) { - dispatch_group_async_f(dg, dq, _dispatch_Block_copy(db), - _dispatch_call_block_and_release); + dispatch_function_t func = _dispatch_call_block_and_release; + dispatch_block_flags_t flags = 0; + pthread_priority_t pp = 0; + if (slowpath(_dispatch_block_has_private_data(db))) { + func = _dispatch_block_async_invoke_and_release; + pp = _dispatch_block_get_priority(db); + flags = _dispatch_block_get_flags(db); + // balanced in d_block_async_invoke_and_release or d_block_wait + if (dispatch_atomic_cmpxchg2o(_dispatch_block_get_data(db), + dbpd_queue, NULL, dq, release)) { + _dispatch_retain(dq); + } + } + _dispatch_group_async_f(dg, dq, _dispatch_Block_copy(db), func, pp, flags); } #endif #pragma mark - #pragma mark dispatch_function_invoke +static void _dispatch_sync_f(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func, pthread_priority_t pp); + DISPATCH_ALWAYS_INLINE static inline void _dispatch_function_invoke(dispatch_queue_t dq, void *ctxt, @@ -1658,14 +2600,14 @@ _dispatch_sync_recurse_invoke(void *ctxt) DISPATCH_ALWAYS_INLINE static inline void _dispatch_function_recurse(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) + dispatch_function_t func, pthread_priority_t pp) { struct dispatch_continuation_s dc = { .dc_data = dq, .dc_func = func, .dc_ctxt = ctxt, }; - dispatch_sync_f(dq->do_targetq, &dc, _dispatch_sync_recurse_invoke); + _dispatch_sync_f(dq->do_targetq, &dc, _dispatch_sync_recurse_invoke, pp); } #pragma mark - @@ -1681,6 +2623,7 @@ _dispatch_barrier_sync_f_pop(dispatch_queue_t dq, dispatch_object_t dou, { _dispatch_thread_semaphore_t sema; dispatch_continuation_t dc = dou._dc; + mach_port_t th; if (DISPATCH_OBJ_IS_VTABLE(dc) || ((long)dc->do_vtable & (DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT)) != @@ -1690,6 +2633,7 @@ _dispatch_barrier_sync_f_pop(dispatch_queue_t dq, dispatch_object_t dou, _dispatch_trace_continuation_pop(dq, dc); _dispatch_perfmon_workitem_inc(); + th = (mach_port_t)dc->dc_data; dc = dc->dc_ctxt; dq = dc->dc_data; sema = (_dispatch_thread_semaphore_t)dc->dc_other; @@ -1700,6 +2644,9 @@ _dispatch_barrier_sync_f_pop(dispatch_queue_t dq, dispatch_object_t dou, // returns (void)dispatch_atomic_add2o(dq, dq_running, 2, relaxed); } + _dispatch_introspection_queue_item_complete(dou); + _dispatch_wqthread_override_start(th, + _dispatch_queue_get_override_priority(dq)); return sema ? sema : MACH_PORT_DEAD; } @@ -1715,7 +2662,8 @@ _dispatch_barrier_sync_f_slow_invoke(void *ctxt) #if DISPATCH_COCOA_COMPAT if (slowpath(dq->dq_is_thread_bound)) { // The queue is bound to a non-dispatch thread (e.g. main thread) - dc->dc_func(dc->dc_ctxt); + _dispatch_continuation_voucher_adopt(dc); + _dispatch_client_callout(dc->dc_ctxt, dc->dc_func); dispatch_atomic_store2o(dc, dc_func, NULL, release); _dispatch_thread_semaphore_signal(sema); // release return; @@ -1731,17 +2679,14 @@ _dispatch_barrier_sync_f_slow_invoke(void *ctxt) DISPATCH_NOINLINE static void _dispatch_barrier_sync_f_slow(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) + dispatch_function_t func, pthread_priority_t pp) { if (slowpath(!dq->do_targetq)) { // the global concurrent queues do not need strict ordering (void)dispatch_atomic_add2o(dq, dq_running, 2, relaxed); return _dispatch_sync_f_invoke(dq, ctxt, func); } - // It's preferred to execute synchronous blocks on the current thread - // due to thread-local side effects, garbage collection, etc. However, - // blocks submitted to the main thread MUST be run on the main thread - + if (!pp) pp = (_dispatch_get_priority() | _PTHREAD_PRIORITY_ENFORCE_FLAG); _dispatch_thread_semaphore_t sema = _dispatch_get_thread_semaphore(); struct dispatch_continuation_s dc = { .dc_data = dq, @@ -1751,16 +2696,24 @@ _dispatch_barrier_sync_f_slow(dispatch_queue_t dq, void *ctxt, #endif .dc_other = (void*)sema, }; +#if DISPATCH_COCOA_COMPAT + // It's preferred to execute synchronous blocks on the current thread + // due to thread-local side effects, garbage collection, etc. However, + // blocks submitted to the main thread MUST be run on the main thread + if (slowpath(dq->dq_is_thread_bound)) { + _dispatch_continuation_voucher_set(&dc, 0); + } +#endif struct dispatch_continuation_s dbss = { .do_vtable = (void *)(DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT), .dc_func = _dispatch_barrier_sync_f_slow_invoke, .dc_ctxt = &dc, -#if DISPATCH_INTROSPECTION - .dc_data = (void*)_dispatch_thread_self(), -#endif + .dc_data = (void*)(uintptr_t)_dispatch_thread_port(), + .dc_priority = pp, }; - _dispatch_queue_push(dq, &dbss); + _dispatch_queue_push(dq, &dbss, + _dispatch_continuation_get_override_priority(dq, &dbss)); _dispatch_thread_semaphore_wait(sema); // acquire _dispatch_put_thread_semaphore(sema); @@ -1771,11 +2724,15 @@ _dispatch_barrier_sync_f_slow(dispatch_queue_t dq, void *ctxt, return; } #endif + + _dispatch_queue_set_thread(dq); if (slowpath(dq->do_targetq->do_targetq)) { - _dispatch_function_recurse(dq, ctxt, func); + _dispatch_function_recurse(dq, ctxt, func, pp); } else { _dispatch_function_invoke(dq, ctxt, func); } + _dispatch_queue_clear_thread(dq); + if (fastpath(dq->do_suspend_cnt < 2 * DISPATCH_OBJECT_SUSPEND_INTERVAL) && dq->dq_running == 2) { // rdar://problem/8290662 "lock transfer" @@ -1786,9 +2743,9 @@ _dispatch_barrier_sync_f_slow(dispatch_queue_t dq, void *ctxt, } } (void)dispatch_atomic_sub2o(dq, do_suspend_cnt, - DISPATCH_OBJECT_SUSPEND_INTERVAL, relaxed); - if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2, release) == 0)) { - _dispatch_wakeup(dq); + DISPATCH_OBJECT_SUSPEND_INTERVAL, release); + if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2, relaxed) == 0)) { + _dispatch_queue_wakeup(dq); } } @@ -1811,7 +2768,7 @@ _dispatch_barrier_sync_f2(dispatch_queue_t dq) } } if (slowpath(dispatch_atomic_dec2o(dq, dq_running, release) == 0)) { - _dispatch_wakeup(dq); + _dispatch_queue_wakeup(dq); } } @@ -1820,27 +2777,52 @@ static void _dispatch_barrier_sync_f_invoke(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { + _dispatch_queue_set_thread(dq); _dispatch_function_invoke(dq, ctxt, func); + _dispatch_queue_clear_thread(dq); if (slowpath(dq->dq_items_tail)) { return _dispatch_barrier_sync_f2(dq); } if (slowpath(dispatch_atomic_dec2o(dq, dq_running, release) == 0)) { - _dispatch_wakeup(dq); + _dispatch_queue_wakeup(dq); } } DISPATCH_NOINLINE static void _dispatch_barrier_sync_f_recurse(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) + dispatch_function_t func, pthread_priority_t pp) { - _dispatch_function_recurse(dq, ctxt, func); + _dispatch_queue_set_thread(dq); + _dispatch_function_recurse(dq, ctxt, func, pp); + _dispatch_queue_clear_thread(dq); if (slowpath(dq->dq_items_tail)) { return _dispatch_barrier_sync_f2(dq); } if (slowpath(dispatch_atomic_dec2o(dq, dq_running, release) == 0)) { - _dispatch_wakeup(dq); + _dispatch_queue_wakeup(dq); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func, pthread_priority_t pp) +{ + // 1) ensure that this thread hasn't enqueued anything ahead of this call + // 2) the queue is not suspended + if (slowpath(dq->dq_items_tail) || slowpath(DISPATCH_OBJECT_SUSPENDED(dq))){ + return _dispatch_barrier_sync_f_slow(dq, ctxt, func, pp); + } + if (slowpath(!dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1, acquire))) { + // global concurrent queues and queues bound to non-dispatch threads + // always fall into the slow case + return _dispatch_barrier_sync_f_slow(dq, ctxt, func, pp); } + if (slowpath(dq->do_targetq->do_targetq)) { + return _dispatch_barrier_sync_f_recurse(dq, ctxt, func, pp); + } + _dispatch_barrier_sync_f_invoke(dq, ctxt, func); } DISPATCH_NOINLINE @@ -1851,46 +2833,64 @@ dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt, // 1) ensure that this thread hasn't enqueued anything ahead of this call // 2) the queue is not suspended if (slowpath(dq->dq_items_tail) || slowpath(DISPATCH_OBJECT_SUSPENDED(dq))){ - return _dispatch_barrier_sync_f_slow(dq, ctxt, func); + return _dispatch_barrier_sync_f_slow(dq, ctxt, func, 0); } if (slowpath(!dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1, acquire))) { // global concurrent queues and queues bound to non-dispatch threads // always fall into the slow case - return _dispatch_barrier_sync_f_slow(dq, ctxt, func); + return _dispatch_barrier_sync_f_slow(dq, ctxt, func, 0); } if (slowpath(dq->do_targetq->do_targetq)) { - return _dispatch_barrier_sync_f_recurse(dq, ctxt, func); + return _dispatch_barrier_sync_f_recurse(dq, ctxt, func, 0); } _dispatch_barrier_sync_f_invoke(dq, ctxt, func); } #ifdef __BLOCKS__ -#if DISPATCH_COCOA_COMPAT DISPATCH_NOINLINE static void _dispatch_barrier_sync_slow(dispatch_queue_t dq, void (^work)(void)) { - // Blocks submitted to the main queue MUST be run on the main thread, - // therefore under GC we must Block_copy in order to notify the thread-local - // garbage collector that the objects are transferring to the main thread - // rdar://problem/7176237&7181849&7458685 - if (dispatch_begin_thread_4GC) { - dispatch_block_t block = _dispatch_Block_copy(work); - return dispatch_barrier_sync_f(dq, block, - _dispatch_call_block_and_release); + bool has_pd = _dispatch_block_has_private_data(work); + dispatch_function_t func = _dispatch_Block_invoke(work); + pthread_priority_t pp = 0; + if (has_pd) { + func = _dispatch_block_sync_invoke; + pp = _dispatch_block_get_priority(work); + dispatch_block_flags_t flags = _dispatch_block_get_flags(work); + if (flags & DISPATCH_BLOCK_HAS_PRIORITY) { + pthread_priority_t tp = _dispatch_get_priority(); + if (pp < tp) { + pp = tp | _PTHREAD_PRIORITY_ENFORCE_FLAG; + } else if (!(flags & DISPATCH_BLOCK_INHERIT_QOS_CLASS)) { + pp |= _PTHREAD_PRIORITY_ENFORCE_FLAG; + } + } + // balanced in d_block_sync_invoke or d_block_wait + if (dispatch_atomic_cmpxchg2o(_dispatch_block_get_data(work), + dbpd_queue, NULL, dq, release)) { + _dispatch_retain(dq); + } +#if DISPATCH_COCOA_COMPAT + } else if (dq->dq_is_thread_bound && dispatch_begin_thread_4GC) { + // Blocks submitted to the main queue MUST be run on the main thread, + // under GC we must Block_copy in order to notify the thread-local + // garbage collector that the objects are transferring to another thread + // rdar://problem/7176237&7181849&7458685 + work = _dispatch_Block_copy(work); + func = _dispatch_call_block_and_release; } - dispatch_barrier_sync_f(dq, work, _dispatch_Block_invoke(work)); -} #endif + _dispatch_barrier_sync_f(dq, work, func, pp); +} void dispatch_barrier_sync(dispatch_queue_t dq, void (^work)(void)) { -#if DISPATCH_COCOA_COMPAT - if (slowpath(dq->dq_is_thread_bound)) { + if (slowpath(dq->dq_is_thread_bound) || + slowpath(_dispatch_block_has_private_data(work))) { return _dispatch_barrier_sync_slow(dq, work); } -#endif dispatch_barrier_sync_f(dq, work, _dispatch_Block_invoke(work)); } #endif @@ -1900,9 +2900,11 @@ static void _dispatch_barrier_trysync_f_invoke(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { + _dispatch_queue_set_thread(dq); _dispatch_function_invoke(dq, ctxt, func); + _dispatch_queue_clear_thread(dq); if (slowpath(dispatch_atomic_dec2o(dq, dq_running, release) == 0)) { - _dispatch_wakeup(dq); + _dispatch_queue_wakeup(dq); } } @@ -1916,7 +2918,7 @@ _dispatch_barrier_trysync_f(dispatch_queue_t dq, void *ctxt, if (slowpath(dq->dq_items_tail) || slowpath(DISPATCH_OBJECT_SUSPENDED(dq)) || slowpath(!dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1, acquire))) { - return dispatch_barrier_async_f(dq, ctxt, func); + return _dispatch_barrier_async_detached_f(dq, ctxt, func); } _dispatch_barrier_trysync_f_invoke(dq, ctxt, func); } @@ -1927,30 +2929,34 @@ _dispatch_barrier_trysync_f(dispatch_queue_t dq, void *ctxt, DISPATCH_NOINLINE static void _dispatch_sync_f_slow(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, - bool wakeup) + pthread_priority_t pp, bool wakeup) { + if (!pp) pp = (_dispatch_get_priority() | _PTHREAD_PRIORITY_ENFORCE_FLAG); _dispatch_thread_semaphore_t sema = _dispatch_get_thread_semaphore(); - struct dispatch_continuation_s dss = { + struct dispatch_continuation_s dc = { .do_vtable = (void*)DISPATCH_OBJ_SYNC_SLOW_BIT, #if DISPATCH_INTROSPECTION .dc_func = func, .dc_ctxt = ctxt, - .dc_data = (void*)_dispatch_thread_self(), + .dc_data = (void*)(uintptr_t)_dispatch_thread_port(), #endif .dc_other = (void*)sema, + .dc_priority = pp, }; - _dispatch_queue_push_wakeup(dq, &dss, wakeup); + _dispatch_queue_push_wakeup(dq, &dc, + _dispatch_continuation_get_override_priority(dq, &dc), wakeup); _dispatch_thread_semaphore_wait(sema); _dispatch_put_thread_semaphore(sema); if (slowpath(dq->do_targetq->do_targetq)) { - _dispatch_function_recurse(dq, ctxt, func); + _dispatch_function_recurse(dq, ctxt, func, pp); } else { _dispatch_function_invoke(dq, ctxt, func); } + if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2, relaxed) == 0)) { - _dispatch_wakeup(dq); + _dispatch_queue_wakeup(dq); } } @@ -1961,40 +2967,58 @@ _dispatch_sync_f_invoke(dispatch_queue_t dq, void *ctxt, { _dispatch_function_invoke(dq, ctxt, func); if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2, relaxed) == 0)) { - _dispatch_wakeup(dq); + _dispatch_queue_wakeup(dq); } } DISPATCH_NOINLINE static void _dispatch_sync_f_recurse(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) + dispatch_function_t func, pthread_priority_t pp) { - _dispatch_function_recurse(dq, ctxt, func); + _dispatch_function_recurse(dq, ctxt, func, pp); if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2, relaxed) == 0)) { - _dispatch_wakeup(dq); + _dispatch_queue_wakeup(dq); } } static inline void -_dispatch_sync_f2(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) +_dispatch_sync_f2(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, + pthread_priority_t pp) { // 1) ensure that this thread hasn't enqueued anything ahead of this call // 2) the queue is not suspended if (slowpath(dq->dq_items_tail) || slowpath(DISPATCH_OBJECT_SUSPENDED(dq))){ - return _dispatch_sync_f_slow(dq, ctxt, func, false); + return _dispatch_sync_f_slow(dq, ctxt, func, pp, false); } uint32_t running = dispatch_atomic_add2o(dq, dq_running, 2, relaxed); - if (slowpath(running & 1)) { + // re-check suspension after barrier check + if (slowpath(running & 1) || _dispatch_object_suspended(dq)) { running = dispatch_atomic_sub2o(dq, dq_running, 2, relaxed); - return _dispatch_sync_f_slow(dq, ctxt, func, running == 0); + return _dispatch_sync_f_slow(dq, ctxt, func, pp, running == 0); } if (slowpath(dq->do_targetq->do_targetq)) { - return _dispatch_sync_f_recurse(dq, ctxt, func); + return _dispatch_sync_f_recurse(dq, ctxt, func, pp); } _dispatch_sync_f_invoke(dq, ctxt, func); } +DISPATCH_NOINLINE +static void +_dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, + pthread_priority_t pp) +{ + if (fastpath(dq->dq_width == 1)) { + return _dispatch_barrier_sync_f(dq, ctxt, func, pp); + } + if (slowpath(!dq->do_targetq)) { + // the global concurrent queues do not need strict ordering + (void)dispatch_atomic_add2o(dq, dq_running, 2, relaxed); + return _dispatch_sync_f_invoke(dq, ctxt, func); + } + _dispatch_sync_f2(dq, ctxt, func, pp); +} + DISPATCH_NOINLINE void dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) @@ -2007,35 +3031,65 @@ dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) (void)dispatch_atomic_add2o(dq, dq_running, 2, relaxed); return _dispatch_sync_f_invoke(dq, ctxt, func); } - _dispatch_sync_f2(dq, ctxt, func); + _dispatch_sync_f2(dq, ctxt, func, 0); } #ifdef __BLOCKS__ -#if DISPATCH_COCOA_COMPAT DISPATCH_NOINLINE static void _dispatch_sync_slow(dispatch_queue_t dq, void (^work)(void)) { - // Blocks submitted to the main queue MUST be run on the main thread, - // therefore under GC we must Block_copy in order to notify the thread-local - // garbage collector that the objects are transferring to the main thread - // rdar://problem/7176237&7181849&7458685 - if (dispatch_begin_thread_4GC) { - dispatch_block_t block = _dispatch_Block_copy(work); - return dispatch_sync_f(dq, block, _dispatch_call_block_and_release); + bool has_pd = _dispatch_block_has_private_data(work); + if (has_pd && (_dispatch_block_get_flags(work) & DISPATCH_BLOCK_BARRIER)) { + return _dispatch_barrier_sync_slow(dq, work); } - dispatch_sync_f(dq, work, _dispatch_Block_invoke(work)); -} + dispatch_function_t func = _dispatch_Block_invoke(work); + pthread_priority_t pp = 0; + if (has_pd) { + func = _dispatch_block_sync_invoke; + pp = _dispatch_block_get_priority(work); + dispatch_block_flags_t flags = _dispatch_block_get_flags(work); + if (flags & DISPATCH_BLOCK_HAS_PRIORITY) { + pthread_priority_t tp = _dispatch_get_priority(); + if (pp < tp) { + pp = tp | _PTHREAD_PRIORITY_ENFORCE_FLAG; + } else if (!(flags & DISPATCH_BLOCK_INHERIT_QOS_CLASS)) { + pp |= _PTHREAD_PRIORITY_ENFORCE_FLAG; + } + } + // balanced in d_block_sync_invoke or d_block_wait + if (dispatch_atomic_cmpxchg2o(_dispatch_block_get_data(work), + dbpd_queue, NULL, dq, release)) { + _dispatch_retain(dq); + } +#if DISPATCH_COCOA_COMPAT + } else if (dq->dq_is_thread_bound && dispatch_begin_thread_4GC) { + // Blocks submitted to the main queue MUST be run on the main thread, + // under GC we must Block_copy in order to notify the thread-local + // garbage collector that the objects are transferring to another thread + // rdar://problem/7176237&7181849&7458685 + work = _dispatch_Block_copy(work); + func = _dispatch_call_block_and_release; #endif + } + if (slowpath(!dq->do_targetq)) { + // the global concurrent queues do not need strict ordering + (void)dispatch_atomic_add2o(dq, dq_running, 2, relaxed); + return _dispatch_sync_f_invoke(dq, work, func); + } + _dispatch_sync_f2(dq, work, func, pp); +} void dispatch_sync(dispatch_queue_t dq, void (^work)(void)) { -#if DISPATCH_COCOA_COMPAT - if (slowpath(dq->dq_is_thread_bound)) { + if (fastpath(dq->dq_width == 1)) { + return dispatch_barrier_sync(dq, work); + } + if (slowpath(dq->dq_is_thread_bound) || + slowpath(_dispatch_block_has_private_data(work)) ) { return _dispatch_sync_slow(dq, work); } -#endif dispatch_sync_f(dq, work, _dispatch_Block_invoke(work)); } #endif @@ -2085,8 +3139,9 @@ dispatch_after_f(dispatch_time_t when, dispatch_queue_t queue, void *ctxt, ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, queue); dispatch_assert(ds); + // TODO: don't use a separate continuation & voucher dispatch_continuation_t dc = _dispatch_continuation_alloc(); - dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_BARRIER_BIT); + dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT); dc->dc_func = func; dc->dc_ctxt = ctxt; dc->dc_data = ds; @@ -2118,10 +3173,10 @@ dispatch_after(dispatch_time_t when, dispatch_queue_t queue, #pragma mark - #pragma mark dispatch_queue_push -DISPATCH_NOINLINE -static void -_dispatch_queue_push_list_slow2(dispatch_queue_t dq, - struct dispatch_object_s *obj) +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_push_list_slow2(dispatch_queue_t dq, pthread_priority_t pp, + struct dispatch_object_s *obj, bool retained) { // The queue must be retained before dq_items_head is written in order // to ensure that the reference is still valid when _dispatch_wakeup is @@ -2129,34 +3184,35 @@ _dispatch_queue_push_list_slow2(dispatch_queue_t dq, // dq_items_head and _dispatch_wakeup, the blocks submitted to the // queue may release the last reference to the queue when invoked by // _dispatch_queue_drain. - _dispatch_retain(dq); + if (!retained) _dispatch_retain(dq); dq->dq_items_head = obj; - _dispatch_wakeup(dq); - _dispatch_release(dq); + return _dispatch_queue_wakeup_with_qos_and_release(dq, pp); } DISPATCH_NOINLINE void -_dispatch_queue_push_list_slow(dispatch_queue_t dq, - struct dispatch_object_s *obj, unsigned int n) +_dispatch_queue_push_list_slow(dispatch_queue_t dq, pthread_priority_t pp, + struct dispatch_object_s *obj, unsigned int n, bool retained) { if (dx_type(dq) == DISPATCH_QUEUE_ROOT_TYPE && !dq->dq_is_thread_bound) { + dispatch_assert(!retained); dispatch_atomic_store2o(dq, dq_items_head, obj, relaxed); return _dispatch_queue_wakeup_global2(dq, n); } - _dispatch_queue_push_list_slow2(dq, obj); + _dispatch_queue_push_list_slow2(dq, pp, obj, retained); } DISPATCH_NOINLINE void -_dispatch_queue_push_slow(dispatch_queue_t dq, - struct dispatch_object_s *obj) +_dispatch_queue_push_slow(dispatch_queue_t dq, pthread_priority_t pp, + struct dispatch_object_s *obj, bool retained) { if (dx_type(dq) == DISPATCH_QUEUE_ROOT_TYPE && !dq->dq_is_thread_bound) { + dispatch_assert(!retained); dispatch_atomic_store2o(dq, dq_items_head, obj, relaxed); return _dispatch_queue_wakeup_global(dq); } - _dispatch_queue_push_list_slow2(dq, obj); + _dispatch_queue_push_list_slow2(dq, pp, obj, retained); } #pragma mark - @@ -2165,14 +3221,14 @@ _dispatch_queue_push_slow(dispatch_queue_t dq, unsigned long _dispatch_queue_probe(dispatch_queue_t dq) { - return (unsigned long)slowpath(dq->dq_items_tail != NULL); + return _dispatch_queue_class_probe(dq); } #if DISPATCH_COCOA_COMPAT unsigned long _dispatch_runloop_queue_probe(dispatch_queue_t dq) { - if (_dispatch_queue_probe(dq)) { + if (_dispatch_queue_class_probe(dq)) { if (dq->do_xref_cnt == -1) return true; // return _dispatch_runloop_queue_wakeup(dq); } @@ -2183,7 +3239,7 @@ _dispatch_runloop_queue_probe(dispatch_queue_t dq) unsigned long _dispatch_mgr_queue_probe(dispatch_queue_t dq) { - if (_dispatch_queue_probe(dq)) { + if (_dispatch_queue_class_probe(dq)) { return _dispatch_mgr_wakeup(dq); } return false; @@ -2204,24 +3260,23 @@ _dispatch_root_queue_probe(dispatch_queue_t dq) dispatch_queue_t _dispatch_wakeup(dispatch_object_t dou) { - if (slowpath(DISPATCH_OBJECT_SUSPENDED(dou._do))) { + unsigned long type = dx_metatype(dou._do); + if (type == _DISPATCH_QUEUE_TYPE || type == _DISPATCH_SOURCE_TYPE) { + return _dispatch_queue_wakeup(dou._dq); + } + if (_dispatch_object_suspended(dou)) { return NULL; } if (!dx_probe(dou._do)) { return NULL; } if (!dispatch_atomic_cmpxchg2o(dou._do, do_suspend_cnt, 0, - DISPATCH_OBJECT_SUSPEND_LOCK, release)) { -#if DISPATCH_COCOA_COMPAT - if (dou._dq == &_dispatch_main_q) { - return _dispatch_main_queue_wakeup(); - } -#endif + DISPATCH_OBJECT_SUSPEND_LOCK, acquire)) { return NULL; } _dispatch_retain(dou._do); dispatch_queue_t tq = dou._do->do_targetq; - _dispatch_queue_push(tq, dou._do); + _dispatch_queue_push(tq, dou._do, 0); return tq; // libdispatch does not need this, but the Instrument DTrace // probe does } @@ -2273,13 +3328,13 @@ DISPATCH_NOINLINE static void _dispatch_queue_wakeup_global_slow(dispatch_queue_t dq, unsigned int n) { - static dispatch_once_t pred; dispatch_root_queue_context_t qc = dq->do_ctxt; uint32_t i = n; int r; _dispatch_debug_root_queue(dq, __func__); - dispatch_once_f(&pred, NULL, _dispatch_root_queues_init); + dispatch_once_f(&_dispatch_root_queues_pred, NULL, + _dispatch_root_queues_init); #if HAVE_PTHREAD_WORKQUEUES #if DISPATCH_USE_PTHREAD_POOL @@ -2294,29 +3349,39 @@ _dispatch_queue_wakeup_global_slow(dispatch_queue_t dq, unsigned int n) unsigned int gen_cnt; do { r = pthread_workqueue_additem_np(qc->dgq_kworkqueue, - _dispatch_worker_thread3, dq, &wh, &gen_cnt); + _dispatch_worker_thread4, dq, &wh, &gen_cnt); (void)dispatch_assume_zero(r); } while (--i); return; } #endif // DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK #if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP - r = pthread_workqueue_addthreads_np(qc->dgq_wq_priority, - qc->dgq_wq_options, (int)i); + if (!dq->dq_priority) { + r = pthread_workqueue_addthreads_np(qc->dgq_wq_priority, + qc->dgq_wq_options, (int)i); + (void)dispatch_assume_zero(r); + return; + } +#endif +#if HAVE_PTHREAD_WORKQUEUE_QOS + r = _pthread_workqueue_addthreads((int)i, dq->dq_priority); (void)dispatch_assume_zero(r); #endif return; } #endif // HAVE_PTHREAD_WORKQUEUES #if DISPATCH_USE_PTHREAD_POOL - if (fastpath(qc->dgq_thread_mediator)) { - while (dispatch_semaphore_signal(qc->dgq_thread_mediator)) { + dispatch_pthread_root_queue_context_t pqc = qc->dgq_ctxt; + if (fastpath(pqc->dpq_thread_mediator.do_vtable)) { + while (dispatch_semaphore_signal(&pqc->dpq_thread_mediator)) { if (!--i) { return; } } } - uint32_t j, t_count = qc->dgq_thread_pool_size; + uint32_t j, t_count; + // seq_cst with atomic store to tail + t_count = dispatch_atomic_load2o(qc, dgq_thread_pool_size, seq_cst); do { if (!t_count) { _dispatch_root_queue_debug("pthread pool is full for root queue: " @@ -2325,10 +3390,9 @@ _dispatch_queue_wakeup_global_slow(dispatch_queue_t dq, unsigned int n) } j = i > t_count ? t_count : i; } while (!dispatch_atomic_cmpxchgvw2o(qc, dgq_thread_pool_size, t_count, - t_count - j, &t_count, relaxed)); + t_count - j, &t_count, acquire)); - dispatch_pthread_root_queue_context_t pqc = qc->dgq_ctxt; - pthread_attr_t *attr = pqc ? &pqc->dpq_thread_attr : NULL; + pthread_attr_t *attr = &pqc->dpq_thread_attr; pthread_t tid, *pthr = &tid; #if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES if (slowpath(dq == &_dispatch_mgr_root_queue)) { @@ -2343,10 +3407,6 @@ _dispatch_queue_wakeup_global_slow(dispatch_queue_t dq, unsigned int n) } _dispatch_temporary_resource_shortage(); } - if (!attr) { - r = pthread_detach(*pthr); - (void)dispatch_assume_zero(r); - } } while (--j); #endif // DISPATCH_USE_PTHREAD_POOL } @@ -2354,7 +3414,7 @@ _dispatch_queue_wakeup_global_slow(dispatch_queue_t dq, unsigned int n) static inline void _dispatch_queue_wakeup_global2(dispatch_queue_t dq, unsigned int n) { - if (!dq->dq_items_tail) { + if (!_dispatch_queue_class_probe(dq)) { return; } #if HAVE_PTHREAD_WORKQUEUES @@ -2388,6 +3448,12 @@ dispatch_queue_invoke2(dispatch_object_t dou, { dispatch_queue_t dq = dou._dq; dispatch_queue_t otq = dq->do_targetq; + dispatch_queue_t cq = _dispatch_queue_get_current(); + + if (slowpath(cq != otq)) { + return otq; + } + *sema_ptr = _dispatch_queue_drain(dq); if (slowpath(otq != dq->do_targetq)) { @@ -2414,9 +3480,7 @@ static inline struct dispatch_object_s* _dispatch_queue_head(dispatch_queue_t dq) { struct dispatch_object_s *dc; - while (!(dc = fastpath(dq->dq_items_head))) { - dispatch_hardware_pause(); - } + _dispatch_wait_until(dc = fastpath(dq->dq_items_head)); return dc; } @@ -2429,10 +3493,7 @@ _dispatch_queue_next(dispatch_queue_t dq, struct dispatch_object_s *dc) dq->dq_items_head = next_dc; if (!next_dc && !dispatch_atomic_cmpxchg2o(dq, dq_items_tail, dc, NULL, relaxed)) { - // Enqueue is TIGHTLY controlled, we won't wait long. - while (!(next_dc = fastpath(dc->do_next))) { - dispatch_hardware_pause(); - } + _dispatch_wait_until(next_dc = fastpath(dc->do_next)); dq->dq_items_head = next_dc; } return next_dc; @@ -2452,6 +3513,15 @@ _dispatch_queue_drain(dispatch_object_t dou) orig_tq = dq->do_targetq; _dispatch_thread_setspecific(dispatch_queue_key, dq); + pthread_priority_t old_dp = _dispatch_set_defaultpriority(dq->dq_priority); + + pthread_priority_t op = _dispatch_queue_get_override_priority(dq); + pthread_priority_t dp = _dispatch_get_defaultpriority(); + dp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; + if (op > dp) { + _dispatch_wqthread_override_start(dq->dq_thread, op); + } + //dispatch_debug_queue(dq, __func__); while (dq->dq_items_tail) { @@ -2491,6 +3561,7 @@ _dispatch_queue_drain(dispatch_object_t dou) } out: + _dispatch_reset_defaultpriority(old_dp); _dispatch_thread_setspecific(dispatch_queue_key, old_dq); return sema; } @@ -2507,11 +3578,14 @@ _dispatch_main_queue_drain(void) .do_vtable = NULL, }; struct dispatch_object_s *dmarker = (void*)▮ - _dispatch_queue_push_notrace(dq, dmarker); + _dispatch_queue_push_notrace(dq, dmarker, 0); _dispatch_perfmon_start(); dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key); _dispatch_thread_setspecific(dispatch_queue_key, dq); + pthread_priority_t old_pri = _dispatch_get_priority(); + pthread_priority_t old_dp = _dispatch_set_defaultpriority(old_pri); + voucher_t voucher = _voucher_copy(); struct dispatch_object_s *dc, *next_dc; dc = _dispatch_queue_head(dq); @@ -2529,6 +3603,10 @@ _dispatch_main_queue_drain(void) if (next_dc) { _dispatch_main_queue_wakeup(); } + _dispatch_voucher_debug("main queue restore", voucher); + _dispatch_set_priority_and_replace_voucher(old_pri, voucher); + _dispatch_queue_reset_override_priority(dq); + _dispatch_reset_defaultpriority(old_dp); _dispatch_thread_setspecific(dispatch_queue_key, old_dq); _dispatch_perfmon_end(); _dispatch_force_cache_cleanup(); @@ -2543,6 +3621,9 @@ _dispatch_runloop_queue_drain_one(dispatch_queue_t dq) _dispatch_perfmon_start(); dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key); _dispatch_thread_setspecific(dispatch_queue_key, dq); + pthread_priority_t old_pri = _dispatch_get_priority(); + pthread_priority_t old_dp = _dispatch_set_defaultpriority(old_pri); + voucher_t voucher = _voucher_copy(); struct dispatch_object_s *dc, *next_dc; dc = _dispatch_queue_head(dq); @@ -2550,6 +3631,9 @@ _dispatch_runloop_queue_drain_one(dispatch_queue_t dq) _dispatch_continuation_pop(dc); _dispatch_perfmon_workitem_inc(); + _dispatch_voucher_debug("runloop queue restore", voucher); + _dispatch_set_priority_and_replace_voucher(old_pri, voucher); + _dispatch_reset_defaultpriority(old_dp); _dispatch_thread_setspecific(dispatch_queue_key, old_dq); _dispatch_perfmon_end(); _dispatch_force_cache_cleanup(); @@ -2586,28 +3670,219 @@ _dispatch_mgr_queue_drain(void) if (slowpath(_dispatch_queue_drain(dq))) { DISPATCH_CRASH("Sync onto manager queue"); } + _dispatch_voucher_debug("mgr queue clear", NULL); + _voucher_clear(); + _dispatch_queue_reset_override_priority(dq); + _dispatch_reset_defaultpriority_override(); _dispatch_perfmon_end(); _dispatch_force_cache_cleanup(); } #pragma mark - -#pragma mark dispatch_root_queue_drain +#pragma mark _dispatch_queue_wakeup_with_qos -#ifndef DISPATCH_CONTENTION_USE_RAND -#define DISPATCH_CONTENTION_USE_RAND (!TARGET_OS_EMBEDDED) -#endif -#ifndef DISPATCH_CONTENTION_SPINS_MAX -#define DISPATCH_CONTENTION_SPINS_MAX (128 - 1) +DISPATCH_NOINLINE +static dispatch_queue_t +_dispatch_queue_wakeup_with_qos_slow(dispatch_queue_t dq, pthread_priority_t pp, + bool retained) +{ + if (!dx_probe(dq) && (dq->dq_is_thread_bound || !dq->dq_thread)) { + if (retained) _dispatch_release(dq); + return NULL; + } + pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; + bool override = _dispatch_queue_override_priority(dq, pp); + if (override && dq->dq_running > 1) { + override = false; + } + + if (!dispatch_atomic_cmpxchg2o(dq, do_suspend_cnt, 0, + DISPATCH_OBJECT_SUSPEND_LOCK, acquire)) { +#if DISPATCH_COCOA_COMPAT + if (dq == &_dispatch_main_q && dq->dq_is_thread_bound) { + return _dispatch_main_queue_wakeup(); + } #endif -#ifndef DISPATCH_CONTENTION_SPINS_MIN -#define DISPATCH_CONTENTION_SPINS_MIN (32 - 1) + if (override) { + mach_port_t th; + // to traverse the tq chain safely we must + // lock it to ensure it cannot change, unless the queue is running + // and we can just override the thread itself + if (dq->dq_thread) { + _dispatch_wqthread_override_start(dq->dq_thread, pp); + } else if (!dispatch_atomic_cmpxchgv2o(dq, dq_tqthread, + MACH_PORT_NULL, _dispatch_thread_port(), &th, acquire)) { + // already locked, override the owner, trysync will do a queue + // wakeup when it returns. + _dispatch_wqthread_override_start(th, pp); + } else { + dispatch_queue_t tq = dq->do_targetq; + if (_dispatch_queue_prepare_override(dq, tq, pp)) { + _dispatch_queue_push_override(dq, tq, pp); + } else { + _dispatch_queue_wakeup_with_qos(tq, pp); + } + dispatch_atomic_store2o(dq, dq_tqthread, MACH_PORT_NULL, + release); + } + } + if (retained) _dispatch_release(dq); + return NULL; + } + dispatch_queue_t tq = dq->do_targetq; + if (!retained) _dispatch_retain(dq); + if (override) { + override = _dispatch_queue_prepare_override(dq, tq, pp); + } + _dispatch_queue_push(tq, dq, pp); + if (override) { + _dispatch_queue_push_override(dq, tq, pp); + } + return tq; // libdispatch does not need this, but the Instrument DTrace + // probe does +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_t +_dispatch_queue_wakeup_with_qos2(dispatch_queue_t dq, pthread_priority_t pp, + bool retained) +{ + if (_dispatch_object_suspended(dq)) { + _dispatch_queue_override_priority(dq, pp); + if (retained) _dispatch_release(dq); + return NULL; + } + return _dispatch_queue_wakeup_with_qos_slow(dq, pp, retained); +} + +DISPATCH_NOINLINE +void +_dispatch_queue_wakeup_with_qos_and_release(dispatch_queue_t dq, + pthread_priority_t pp) +{ + (void)_dispatch_queue_wakeup_with_qos2(dq, pp, true); +} + +DISPATCH_NOINLINE +void +_dispatch_queue_wakeup_with_qos(dispatch_queue_t dq, pthread_priority_t pp) +{ + (void)_dispatch_queue_wakeup_with_qos2(dq, pp, false); +} + +DISPATCH_NOINLINE +dispatch_queue_t +_dispatch_queue_wakeup(dispatch_queue_t dq) +{ + return _dispatch_queue_wakeup_with_qos2(dq, + _dispatch_queue_get_override_priority(dq), false); +} + +#if HAVE_PTHREAD_WORKQUEUE_QOS +static void +_dispatch_queue_override_invoke(void *ctxt) +{ + dispatch_continuation_t dc = (dispatch_continuation_t)ctxt; + dispatch_queue_t dq = dc->dc_data; + pthread_priority_t p = 0; + + if (!slowpath(DISPATCH_OBJECT_SUSPENDED(dq)) && + fastpath(dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1, acquire))) { + _dispatch_queue_set_thread(dq); + + _dispatch_object_debug(dq, "stolen onto thread 0x%x, 0x%lx", + dq->dq_thread, _dispatch_get_defaultpriority()); + + pthread_priority_t old_dp = _dispatch_get_defaultpriority(); + _dispatch_reset_defaultpriority(dc->dc_priority); + + dispatch_queue_t tq = NULL; + _dispatch_thread_semaphore_t sema = 0; + tq = dispatch_queue_invoke2(dq, &sema); + + _dispatch_queue_clear_thread(dq); + _dispatch_reset_defaultpriority(old_dp); + + uint32_t running = dispatch_atomic_dec2o(dq, dq_running, release); + if (sema) { + _dispatch_thread_semaphore_signal(sema); + } else if (!tq && running == 0) { + p = _dispatch_queue_reset_override_priority(dq); + if (p > (dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { + _dispatch_wqthread_override_reset(); + } + } + _dispatch_introspection_queue_item_complete(dq); + if (running == 0) { + return _dispatch_queue_wakeup_with_qos_and_release(dq, p); + } + } else { + mach_port_t th = dq->dq_thread; + if (th) { + p = _dispatch_queue_get_override_priority(dq); + _dispatch_object_debug(dq, "overriding thr 0x%x to priority 0x%lx", + th, p); + _dispatch_wqthread_override_start(th, p); + } + } + _dispatch_release(dq); // added when we pushed the override block +} #endif -#ifndef DISPATCH_CONTENTION_USLEEP_START -#define DISPATCH_CONTENTION_USLEEP_START 500 + +static inline bool +_dispatch_queue_prepare_override(dispatch_queue_t dq, dispatch_queue_t tq, + pthread_priority_t p) +{ +#if HAVE_PTHREAD_WORKQUEUE_QOS + if (dx_type(tq) != DISPATCH_QUEUE_ROOT_TYPE || !tq->dq_priority) { + return false; + } + if (p <= (dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { + return false; + } + if (p <= (tq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { + return false; + } + _dispatch_retain(dq); + return true; +#else + (void)dq; (void)tq; (void)p; + return false; #endif -#ifndef DISPATCH_CONTENTION_USLEEP_MAX -#define DISPATCH_CONTENTION_USLEEP_MAX 100000 +} + +static inline void +_dispatch_queue_push_override(dispatch_queue_t dq, dispatch_queue_t tq, + pthread_priority_t p) +{ +#if HAVE_PTHREAD_WORKQUEUE_QOS + unsigned int qosbit, idx, overcommit; + overcommit = (tq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) ? 1 : 0; + qosbit = (p & _PTHREAD_PRIORITY_QOS_CLASS_MASK) >> + _PTHREAD_PRIORITY_QOS_CLASS_SHIFT; + idx = (unsigned int)__builtin_ffs((int)qosbit); + if (!idx || idx > DISPATCH_QUEUE_QOS_COUNT) { + DISPATCH_CRASH("Corrupted override priority"); + } + dispatch_queue_t rq = &_dispatch_root_queues[((idx-1) << 1) | overcommit]; + + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_BARRIER_BIT); + dc->dc_func = _dispatch_queue_override_invoke; + dc->dc_ctxt = dc; + dc->dc_priority = tq->dq_priority; + dc->dc_voucher = NULL; + dc->dc_data = dq; + // dq retained by _dispatch_queue_prepare_override + + _dispatch_queue_push(rq, dc, 0); +#else + (void)dq; (void)tq; (void)p; #endif +} + +#pragma mark - +#pragma mark dispatch_root_queue_drain DISPATCH_NOINLINE static bool @@ -2616,27 +3891,15 @@ _dispatch_queue_concurrent_drain_one_slow(dispatch_queue_t dq) dispatch_root_queue_context_t qc = dq->do_ctxt; struct dispatch_object_s *const mediator = (void *)~0ul; bool pending = false, available = true; - unsigned int spins, sleep_time = DISPATCH_CONTENTION_USLEEP_START; + unsigned int sleep_time = DISPATCH_CONTENTION_USLEEP_START; do { // Spin for a short while in case the contention is temporary -- e.g. // when starting up after dispatch_apply, or when executing a few // short continuations in a row. -#if DISPATCH_CONTENTION_USE_RAND - // Use randomness to prevent threads from resonating at the same - // frequency and permanently contending. All threads sharing the same - // seed value is safe with the FreeBSD rand_r implementation. - static unsigned int seed; - spins = (rand_r(&seed) & DISPATCH_CONTENTION_SPINS_MAX) | - DISPATCH_CONTENTION_SPINS_MIN; -#else - spins = DISPATCH_CONTENTION_SPINS_MIN + - (DISPATCH_CONTENTION_SPINS_MAX-DISPATCH_CONTENTION_SPINS_MIN)/2; -#endif - while (spins--) { - dispatch_hardware_pause(); - if (fastpath(dq->dq_items_head != mediator)) goto out; - }; + if (_dispatch_contention_wait_until(dq->dq_items_head != mediator)) { + goto out; + } // Since we have serious contention, we need to back off. if (!pending) { // Mark this queue as pending to avoid requests for further threads @@ -2653,12 +3916,25 @@ _dispatch_queue_concurrent_drain_one_slow(dispatch_queue_t dq) // Create a new pending thread and then exit this thread. // The kernel will grant a new thread when the load subsides. _dispatch_debug("contention on global queue: %p", dq); - _dispatch_queue_wakeup_global(dq); available = false; out: if (pending) { (void)dispatch_atomic_dec2o(qc, dgq_pending, relaxed); } + if (!available) { + _dispatch_queue_wakeup_global(dq); + } + return available; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_queue_concurrent_drain_one2(dispatch_queue_t dq) +{ + // Wait for queue head and tail to be both non-empty or both empty + bool available; // + _dispatch_wait_until((dq->dq_items_head != NULL) == + (available = (dq->dq_items_tail != NULL))); return available; } @@ -2676,8 +3952,14 @@ _dispatch_queue_concurrent_drain_one(dispatch_queue_t dq) // The first xchg on the tail will tell the enqueueing thread that it // is safe to blindly write out to the head pointer. A cmpxchg honors // the algorithm. - (void)dispatch_atomic_cmpxchg2o(dq, dq_items_head, mediator, NULL, - relaxed); + if (slowpath(!dispatch_atomic_cmpxchg2o(dq, dq_items_head, mediator, + NULL, relaxed))) { + goto start; + } + if (slowpath(dq->dq_items_tail) && // + _dispatch_queue_concurrent_drain_one2(dq)) { + goto start; + } _dispatch_root_queue_debug("no work on global queue: %p", dq); return NULL; } @@ -2701,11 +3983,8 @@ _dispatch_queue_concurrent_drain_one(dispatch_queue_t dq) // both head and tail are NULL now goto out; } - - // There must be a next item now. This thread won't wait long. - while (!(next = head->do_next)) { - dispatch_hardware_pause(); - } + // There must be a next item now. + _dispatch_wait_until(next = head->do_next); } dispatch_atomic_store2o(dq, dq_items_head, next, relaxed); @@ -2723,6 +4002,9 @@ _dispatch_root_queue_drain(dispatch_queue_t dq) } #endif _dispatch_thread_setspecific(dispatch_queue_key, dq); + pthread_priority_t old_pri = _dispatch_get_priority(); + pthread_priority_t pri = dq->dq_priority ? dq->dq_priority : old_pri; + pthread_priority_t old_dp = _dispatch_set_defaultpriority(pri); #if DISPATCH_COCOA_COMPAT // ensure that high-level memory management techniques do not leak/crash @@ -2734,9 +4016,15 @@ _dispatch_root_queue_drain(dispatch_queue_t dq) _dispatch_perfmon_start(); struct dispatch_object_s *item; + bool reset = false; while ((item = fastpath(_dispatch_queue_concurrent_drain_one(dq)))) { + if (reset) _dispatch_wqthread_override_reset(); _dispatch_continuation_pop(item); + reset = _dispatch_reset_defaultpriority_override(); } + _dispatch_voucher_debug("root queue clear", NULL); + _dispatch_set_priority_and_replace_voucher(old_pri, NULL); + _dispatch_reset_defaultpriority(old_dp); _dispatch_perfmon_end(); #if DISPATCH_COCOA_COMPAT @@ -2754,18 +4042,41 @@ _dispatch_root_queue_drain(dispatch_queue_t dq) #if HAVE_PTHREAD_WORKQUEUES static void -_dispatch_worker_thread3(void *context) +_dispatch_worker_thread4(void *context) { dispatch_queue_t dq = context; dispatch_root_queue_context_t qc = dq->do_ctxt; _dispatch_introspection_thread_add(); - - (void)dispatch_atomic_dec2o(qc, dgq_pending, relaxed); + int pending = (int)dispatch_atomic_dec2o(qc, dgq_pending, relaxed); + dispatch_assert(pending >= 0); _dispatch_root_queue_drain(dq); __asm__(""); // prevent tailcall (for Instrument DTrace probe) +} +#if HAVE_PTHREAD_WORKQUEUE_QOS +static void +_dispatch_worker_thread3(pthread_priority_t priority) +{ + // Reset priority TSD to workaround + _dispatch_thread_setspecific(dispatch_priority_key, + (void*)(uintptr_t)(priority & ~_PTHREAD_PRIORITY_FLAGS_MASK)); + unsigned int overcommit, qosbit, idx; + overcommit = (priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) ? 1 : 0; + qosbit = (priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK) >> + _PTHREAD_PRIORITY_QOS_CLASS_SHIFT; + if (!_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS]. + dq_priority) { + // If kernel doesn't support maintenance, bottom bit is background. + // Shift to our idea of where background bit is. + qosbit <<= 1; + } + idx = (unsigned int)__builtin_ffs((int)qosbit); + dispatch_assert(idx > 0 && idx < DISPATCH_QUEUE_QOS_COUNT+1); + dispatch_queue_t dq = &_dispatch_root_queues[((idx-1) << 1) | overcommit]; + return _dispatch_worker_thread4(dq); } +#endif // HAVE_PTHREAD_WORKQUEUE_QOS #if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP // 6618342 Contact the team that owns the Instrument DTrace probe before @@ -2778,7 +4089,7 @@ _dispatch_worker_thread2(int priority, int options, dispatch_assert(!(options & ~WORKQ_ADDTHREADS_OPTION_OVERCOMMIT)); dispatch_queue_t dq = _dispatch_wq2root_queues[priority][options]; - return _dispatch_worker_thread3(dq); + return _dispatch_worker_thread4(dq); } #endif // HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP #endif // HAVE_PTHREAD_WORKQUEUES @@ -2793,7 +4104,7 @@ _dispatch_worker_thread(void *context) dispatch_root_queue_context_t qc = dq->do_ctxt; dispatch_pthread_root_queue_context_t pqc = qc->dgq_ctxt; - if (pqc && pqc->dpq_thread_configure) { + if (pqc->dpq_thread_configure) { pqc->dpq_thread_configure(); } @@ -2806,16 +4117,13 @@ _dispatch_worker_thread(void *context) (void)dispatch_assume_zero(r); _dispatch_introspection_thread_add(); - // Non-pthread-root-queue pthreads use a 65 second timeout in case there - // are any timers that run once a minute - const int64_t timeout = (pqc ? 5ull : 65ull) * NSEC_PER_SEC; - + const int64_t timeout = 5ull * NSEC_PER_SEC; do { _dispatch_root_queue_drain(dq); - } while (dispatch_semaphore_wait(qc->dgq_thread_mediator, + } while (dispatch_semaphore_wait(&pqc->dpq_thread_mediator, dispatch_time(0, timeout)) == 0); - (void)dispatch_atomic_inc2o(qc, dgq_thread_pool_size, relaxed); + (void)dispatch_atomic_inc2o(qc, dgq_thread_pool_size, release); _dispatch_queue_wakeup_global(dq); _dispatch_release(dq); @@ -2871,7 +4179,7 @@ _dispatch_runloop_root_queue_create_4CF(const char *label, unsigned long flags) dqs = sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_CACHELINE_PAD; dq = _dispatch_alloc(DISPATCH_VTABLE(queue_runloop), dqs); _dispatch_queue_init(dq); - dq->do_targetq = _dispatch_get_root_queue(0, true); + dq->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT,true); dq->dq_label = label ? label : "runloop-queue"; // no-copy contract dq->do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK; dq->dq_running = 1; @@ -2891,7 +4199,7 @@ _dispatch_runloop_queue_xref_dispose(dispatch_queue_t dq) DISPATCH_OBJECT_SUSPEND_LOCK, release); _dispatch_queue_clear_bound_thread(dq); if (suspend_cnt == 0) { - _dispatch_wakeup(dq); + _dispatch_queue_wakeup(dq); } } @@ -3062,15 +4370,15 @@ _dispatch_queue_cleanup2(void) DISPATCH_OBJECT_SUSPEND_LOCK, release); dq->dq_is_thread_bound = 0; if (suspend_cnt == 0) { - _dispatch_wakeup(dq); + _dispatch_queue_wakeup(dq); } // overload the "probably" variable to mean that dispatch_main() or // similar non-POSIX API was called // this has to run before the DISPATCH_COCOA_COMPAT below if (_dispatch_program_is_probably_callback_driven) { - dispatch_async_f(_dispatch_get_root_queue(0, true), NULL, - _dispatch_sig_thread); + _dispatch_barrier_async_detached_f(_dispatch_get_root_queue( + _DISPATCH_QOS_CLASS_DEFAULT, true), NULL, _dispatch_sig_thread); sleep(1); // workaround 6778970 } diff --git a/src/queue_internal.h b/src/queue_internal.h index 4f42d24fa..d76b15f05 100644 --- a/src/queue_internal.h +++ b/src/queue_internal.h @@ -38,77 +38,15 @@ /* x86 & cortex-a8 have a 64 byte cacheline */ #define DISPATCH_CACHELINE_SIZE 64u -#define DISPATCH_CONTINUATION_SIZE DISPATCH_CACHELINE_SIZE #define ROUND_UP_TO_CACHELINE_SIZE(x) \ (((x) + (DISPATCH_CACHELINE_SIZE - 1u)) & \ ~(DISPATCH_CACHELINE_SIZE - 1u)) -#define ROUND_UP_TO_CONTINUATION_SIZE(x) \ - (((x) + (DISPATCH_CONTINUATION_SIZE - 1u)) & \ - ~(DISPATCH_CONTINUATION_SIZE - 1u)) -#define ROUND_UP_TO_VECTOR_SIZE(x) \ - (((x) + 15u) & ~15u) #define DISPATCH_CACHELINE_ALIGN \ __attribute__((__aligned__(DISPATCH_CACHELINE_SIZE))) -#define DISPATCH_QUEUE_CACHELINE_PADDING \ - char _dq_pad[DISPATCH_QUEUE_CACHELINE_PAD] -#ifdef __LP64__ -#define DISPATCH_QUEUE_CACHELINE_PAD (( \ - (3*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE) \ - + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE) -#else -#define DISPATCH_QUEUE_CACHELINE_PAD (( \ - (0*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE) \ - + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE) -#if !DISPATCH_INTROSPECTION -// No padding, DISPATCH_QUEUE_CACHELINE_PAD == 0 -#undef DISPATCH_QUEUE_CACHELINE_PADDING -#define DISPATCH_QUEUE_CACHELINE_PADDING -#endif -#endif - -// If dc_vtable is less than 127, then the object is a continuation. -// Otherwise, the object has a private layout and memory management rules. The -// layout until after 'do_next' must align with normal objects. -#define DISPATCH_CONTINUATION_HEADER(x) \ - _OS_OBJECT_HEADER( \ - const void *do_vtable, \ - do_ref_cnt, \ - do_xref_cnt); \ - struct dispatch_##x##_s *volatile do_next; \ - dispatch_function_t dc_func; \ - void *dc_ctxt; \ - void *dc_data; \ - void *dc_other; - -#define DISPATCH_OBJ_ASYNC_BIT 0x1 -#define DISPATCH_OBJ_BARRIER_BIT 0x2 -#define DISPATCH_OBJ_GROUP_BIT 0x4 -#define DISPATCH_OBJ_SYNC_SLOW_BIT 0x8 -// vtables are pointers far away from the low page in memory -#define DISPATCH_OBJ_IS_VTABLE(x) ((unsigned long)(x)->do_vtable > 127ul) - -struct dispatch_continuation_s { - DISPATCH_CONTINUATION_HEADER(continuation); -}; - -typedef struct dispatch_continuation_s *dispatch_continuation_t; - -struct dispatch_apply_s { - size_t volatile da_index, da_todo; - size_t da_iterations, da_nested; - dispatch_continuation_t da_dc; - _dispatch_thread_semaphore_t da_sema; - uint32_t da_thr_cnt; -}; - -typedef struct dispatch_apply_s *dispatch_apply_t; - -DISPATCH_CLASS_DECL(queue_attr); -struct dispatch_queue_attr_s { - DISPATCH_STRUCT_HEADER(queue_attr); -}; +#pragma mark - +#pragma mark dispatch_queue_t #define DISPATCH_QUEUE_HEADER \ uint32_t volatile dq_running; \ @@ -116,12 +54,30 @@ struct dispatch_queue_attr_s { /* LP64 global queue cacheline boundary */ \ struct dispatch_object_s *volatile dq_items_tail; \ dispatch_queue_t dq_specific_q; \ - uint32_t dq_width; \ - unsigned int dq_is_thread_bound:1; \ + uint16_t dq_width; \ + uint16_t dq_is_thread_bound:1; \ + pthread_priority_t dq_priority; \ + mach_port_t dq_thread; \ + mach_port_t volatile dq_tqthread; \ + uint32_t volatile dq_override; \ unsigned long dq_serialnum; \ const char *dq_label; \ DISPATCH_INTROSPECTION_QUEUE_LIST; +#define DISPATCH_QUEUE_WIDTH_MAX UINT16_MAX + +#define DISPATCH_QUEUE_CACHELINE_PADDING \ + char _dq_pad[DISPATCH_QUEUE_CACHELINE_PAD] +#ifdef __LP64__ +#define DISPATCH_QUEUE_CACHELINE_PAD (( \ + (0*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE) \ + + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE) +#else +#define DISPATCH_QUEUE_CACHELINE_PAD (( \ + (13*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE) \ + + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE) +#endif + DISPATCH_CLASS_DECL(queue); struct dispatch_queue_s { DISPATCH_STRUCT_HEADER(queue); @@ -136,17 +92,21 @@ DISPATCH_INTERNAL_SUBCLASS_DECL(queue_mgr, queue); DISPATCH_DECL_INTERNAL_SUBCLASS(dispatch_queue_specific_queue, dispatch_queue); DISPATCH_CLASS_DECL(queue_specific_queue); -extern struct dispatch_queue_s _dispatch_mgr_q; - void _dispatch_queue_destroy(dispatch_object_t dou); void _dispatch_queue_dispose(dispatch_queue_t dq); void _dispatch_queue_invoke(dispatch_queue_t dq); void _dispatch_queue_push_list_slow(dispatch_queue_t dq, - struct dispatch_object_s *obj, unsigned int n); + pthread_priority_t pp, struct dispatch_object_s *obj, unsigned int n, + bool retained); void _dispatch_queue_push_slow(dispatch_queue_t dq, - struct dispatch_object_s *obj); + pthread_priority_t pp, struct dispatch_object_s *obj, bool retained); unsigned long _dispatch_queue_probe(dispatch_queue_t dq); dispatch_queue_t _dispatch_wakeup(dispatch_object_t dou); +dispatch_queue_t _dispatch_queue_wakeup(dispatch_queue_t dq); +void _dispatch_queue_wakeup_with_qos(dispatch_queue_t dq, + pthread_priority_t pp); +void _dispatch_queue_wakeup_with_qos_and_release(dispatch_queue_t dq, + pthread_priority_t pp); _dispatch_thread_semaphore_t _dispatch_queue_drain(dispatch_object_t dou); void _dispatch_queue_specific_queue_dispose(dispatch_queue_specific_queue_t dqsq); @@ -167,6 +127,8 @@ void _dispatch_async_redirect_invoke(void *ctxt); void _dispatch_sync_recurse_invoke(void *ctxt); void _dispatch_apply_invoke(void *ctxt); void _dispatch_apply_redirect_invoke(void *ctxt); +void _dispatch_barrier_async_detached_f(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func); void _dispatch_barrier_trysync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func); @@ -181,192 +143,136 @@ size_t dispatch_queue_debug(dispatch_queue_t dq, char* buf, size_t bufsiz); size_t _dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf, size_t bufsiz); -#define DISPATCH_QUEUE_PRIORITY_COUNT 4 -#define DISPATCH_ROOT_QUEUE_COUNT (DISPATCH_QUEUE_PRIORITY_COUNT * 2) +#define DISPATCH_QUEUE_QOS_COUNT 6 +#define DISPATCH_ROOT_QUEUE_COUNT (DISPATCH_QUEUE_QOS_COUNT * 2) -// overcommit priority index values need bit 1 set +// must be in lowest to highest qos order (as encoded in pthread_priority_t) +// overcommit qos index values need bit 1 set enum { - DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY = 0, - DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY, - DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY, - DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY, - DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY, - DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY, - DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY, - DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY, + DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS = 0, + DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT, + DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS, + DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT, + DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS, + DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT, + DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS, + DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT, + DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS, + DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT, + DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS, + DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT, }; extern unsigned long volatile _dispatch_queue_serial_numbers; extern struct dispatch_queue_s _dispatch_root_queues[]; +extern struct dispatch_queue_s _dispatch_mgr_q; -#if !(USE_OBJC && __OBJC2__) - -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_queue_push_list2(dispatch_queue_t dq, struct dispatch_object_s *head, - struct dispatch_object_s *tail) -{ - struct dispatch_object_s *prev; - tail->do_next = NULL; - prev = dispatch_atomic_xchg2o(dq, dq_items_tail, tail, release); - if (fastpath(prev)) { - // if we crash here with a value less than 0x1000, then we are at a - // known bug in client code for example, see _dispatch_queue_dispose - // or _dispatch_atfork_child - prev->do_next = head; - } - return (prev != NULL); -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head, - dispatch_object_t _tail, unsigned int n) -{ - struct dispatch_object_s *head = _head._do, *tail = _tail._do; - if (!fastpath(_dispatch_queue_push_list2(dq, head, tail))) { - _dispatch_queue_push_list_slow(dq, head, n); - } -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_queue_push(dispatch_queue_t dq, dispatch_object_t _tail) -{ - struct dispatch_object_s *tail = _tail._do; - if (!fastpath(_dispatch_queue_push_list2(dq, tail, tail))) { - _dispatch_queue_push_slow(dq, tail); - } -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_queue_push_wakeup(dispatch_queue_t dq, dispatch_object_t _tail, - bool wakeup) -{ - struct dispatch_object_s *tail = _tail._do; - if (!fastpath(_dispatch_queue_push_list2(dq, tail, tail))) { - _dispatch_queue_push_slow(dq, tail); - } else if (slowpath(wakeup)) { - _dispatch_wakeup(dq); - } -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_queue_class_invoke(dispatch_object_t dou, - dispatch_queue_t (*invoke)(dispatch_object_t, - _dispatch_thread_semaphore_t*)) -{ - dispatch_queue_t dq = dou._dq; - if (!slowpath(DISPATCH_OBJECT_SUSPENDED(dq)) && - fastpath(dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1, acquire))){ - dispatch_queue_t tq = NULL; - _dispatch_thread_semaphore_t sema = 0; - tq = invoke(dq, &sema); - // We do not need to check the result. - // When the suspend-count lock is dropped, then the check will happen. - (void)dispatch_atomic_dec2o(dq, dq_running, release); - if (sema) { - _dispatch_thread_semaphore_signal(sema); - } else if (tq) { - return _dispatch_queue_push(tq, dq); - } - } - dq->do_next = DISPATCH_OBJECT_LISTLESS; - if (!dispatch_atomic_sub2o(dq, do_suspend_cnt, - DISPATCH_OBJECT_SUSPEND_LOCK, release)) { - dispatch_atomic_barrier(seq_cst); // - if (dispatch_atomic_load2o(dq, dq_running, seq_cst) == 0) { - _dispatch_wakeup(dq); // verify that the queue is idle - } - } - _dispatch_release(dq); // added when the queue is put on the list -} - -DISPATCH_ALWAYS_INLINE -static inline dispatch_queue_t -_dispatch_queue_get_current(void) -{ - return (dispatch_queue_t)_dispatch_thread_getspecific(dispatch_queue_key); -} - -DISPATCH_ALWAYS_INLINE DISPATCH_CONST -static inline dispatch_queue_t -_dispatch_get_root_queue(long priority, bool overcommit) -{ - if (overcommit) switch (priority) { - case DISPATCH_QUEUE_PRIORITY_BACKGROUND: -#if !DISPATCH_NO_BG_PRIORITY - return &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_OVERCOMMIT_PRIORITY]; +#if HAVE_PTHREAD_WORKQUEUE_QOS +extern pthread_priority_t _dispatch_background_priority; +extern pthread_priority_t _dispatch_user_initiated_priority; +#endif + +#pragma mark - +#pragma mark dispatch_queue_attr_t + +DISPATCH_CLASS_DECL(queue_attr); +struct dispatch_queue_attr_s { + DISPATCH_STRUCT_HEADER(queue_attr); + qos_class_t dqa_qos_class; + int dqa_relative_priority; + unsigned int dqa_overcommit:1, dqa_concurrent:1; +}; + +enum { + DQA_INDEX_NON_OVERCOMMIT = 0, + DQA_INDEX_OVERCOMMIT, +}; + +enum { + DQA_INDEX_CONCURRENT = 0, + DQA_INDEX_SERIAL, +}; + +#define DISPATCH_QUEUE_ATTR_PRIO_COUNT (1 - QOS_MIN_RELATIVE_PRIORITY) + +typedef enum { + DQA_INDEX_QOS_CLASS_UNSPECIFIED = 0, + DQA_INDEX_QOS_CLASS_MAINTENANCE, + DQA_INDEX_QOS_CLASS_BACKGROUND, + DQA_INDEX_QOS_CLASS_UTILITY, + DQA_INDEX_QOS_CLASS_DEFAULT, + DQA_INDEX_QOS_CLASS_USER_INITIATED, + DQA_INDEX_QOS_CLASS_USER_INTERACTIVE, +} _dispatch_queue_attr_index_qos_class_t; + +extern const struct dispatch_queue_attr_s _dispatch_queue_attrs[] + [DISPATCH_QUEUE_ATTR_PRIO_COUNT][2][2]; + +#pragma mark - +#pragma mark dispatch_continuation_t + +// If dc_vtable is less than 127, then the object is a continuation. +// Otherwise, the object has a private layout and memory management rules. The +// layout until after 'do_next' must align with normal objects. +#if __LP64__ +#define DISPATCH_CONTINUATION_HEADER(x) \ + const void *do_vtable; \ + union { \ + pthread_priority_t dc_priority; \ + int dc_cache_cnt; \ + uintptr_t dc_pad; \ + }; \ + struct dispatch_##x##_s *volatile do_next; \ + struct voucher_s *dc_voucher; \ + dispatch_function_t dc_func; \ + void *dc_ctxt; \ + void *dc_data; \ + void *dc_other; +#define _DISPATCH_SIZEOF_PTR 8 +#else +#define DISPATCH_CONTINUATION_HEADER(x) \ + const void *do_vtable; \ + union { \ + pthread_priority_t dc_priority; \ + int dc_cache_cnt; \ + uintptr_t dc_pad; \ + }; \ + struct voucher_s *dc_voucher; \ + struct dispatch_##x##_s *volatile do_next; \ + dispatch_function_t dc_func; \ + void *dc_ctxt; \ + void *dc_data; \ + void *dc_other; +#define _DISPATCH_SIZEOF_PTR 4 #endif - case DISPATCH_QUEUE_PRIORITY_LOW: - case DISPATCH_QUEUE_PRIORITY_NON_INTERACTIVE: - return &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_LOW_OVERCOMMIT_PRIORITY]; - case DISPATCH_QUEUE_PRIORITY_DEFAULT: - return &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_DEFAULT_OVERCOMMIT_PRIORITY]; - case DISPATCH_QUEUE_PRIORITY_HIGH: - return &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY]; - } - switch (priority) { - case DISPATCH_QUEUE_PRIORITY_BACKGROUND: -#if !DISPATCH_NO_BG_PRIORITY - return &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_PRIORITY]; +#define _DISPATCH_CONTINUATION_PTRS 8 +#if DISPATCH_HW_CONFIG_UP +// UP devices don't contend on continuations so we don't need to force them to +// occupy a whole cacheline (which is intended to avoid contention) +#define DISPATCH_CONTINUATION_SIZE \ + (_DISPATCH_CONTINUATION_PTRS * _DISPATCH_SIZEOF_PTR) +#else +#define DISPATCH_CONTINUATION_SIZE ROUND_UP_TO_CACHELINE_SIZE( \ + (_DISPATCH_CONTINUATION_PTRS * _DISPATCH_SIZEOF_PTR)) #endif - case DISPATCH_QUEUE_PRIORITY_LOW: - case DISPATCH_QUEUE_PRIORITY_NON_INTERACTIVE: - return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_LOW_PRIORITY]; - case DISPATCH_QUEUE_PRIORITY_DEFAULT: - return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_PRIORITY]; - case DISPATCH_QUEUE_PRIORITY_HIGH: - return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_HIGH_PRIORITY]; - default: - return NULL; - } -} - -// Note to later developers: ensure that any initialization changes are -// made for statically allocated queues (i.e. _dispatch_main_q). -static inline void -_dispatch_queue_init(dispatch_queue_t dq) -{ - dq->do_next = (struct dispatch_queue_s *)DISPATCH_OBJECT_LISTLESS; - - dq->dq_running = 0; - dq->dq_width = 1; - dq->dq_serialnum = dispatch_atomic_inc_orig(&_dispatch_queue_serial_numbers, - relaxed); -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_queue_set_bound_thread(dispatch_queue_t dq) -{ - //Tag thread-bound queues with the owning thread - dispatch_assert(dq->dq_is_thread_bound); - dq->do_finalizer = (void*)_dispatch_thread_self(); -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_queue_clear_bound_thread(dispatch_queue_t dq) -{ - dispatch_assert(dq->dq_is_thread_bound); - dq->do_finalizer = NULL; -} - -DISPATCH_ALWAYS_INLINE -static inline pthread_t -_dispatch_queue_get_bound_thread(dispatch_queue_t dq) -{ - dispatch_assert(dq->dq_is_thread_bound); - return (pthread_t)dq->do_finalizer; -} +#define ROUND_UP_TO_CONTINUATION_SIZE(x) \ + (((x) + (DISPATCH_CONTINUATION_SIZE - 1u)) & \ + ~(DISPATCH_CONTINUATION_SIZE - 1u)) + +#define DISPATCH_OBJ_ASYNC_BIT 0x1 +#define DISPATCH_OBJ_BARRIER_BIT 0x2 +#define DISPATCH_OBJ_GROUP_BIT 0x4 +#define DISPATCH_OBJ_SYNC_SLOW_BIT 0x8 +#define DISPATCH_OBJ_BLOCK_RELEASE_BIT 0x10 +#define DISPATCH_OBJ_CTXT_FETCH_BIT 0x20 +#define DISPATCH_OBJ_HAS_VOUCHER_BIT 0x80 +// vtables are pointers far away from the low page in memory +#define DISPATCH_OBJ_IS_VTABLE(x) ((unsigned long)(x)->do_vtable > 0xfful) + +struct dispatch_continuation_s { + DISPATCH_CONTINUATION_HEADER(continuation); +}; +typedef struct dispatch_continuation_s *dispatch_continuation_t; #ifndef DISPATCH_CONTINUATION_CACHE_LIMIT #if TARGET_OS_EMBEDDED @@ -390,56 +296,63 @@ void _dispatch_continuation_free_to_cache_limit(dispatch_continuation_t c); _dispatch_continuation_free_to_heap(c) #endif -DISPATCH_ALWAYS_INLINE -static inline dispatch_continuation_t -_dispatch_continuation_alloc_cacheonly(void) -{ - dispatch_continuation_t dc = (dispatch_continuation_t) - fastpath(_dispatch_thread_getspecific(dispatch_cache_key)); - if (dc) { - _dispatch_thread_setspecific(dispatch_cache_key, dc->do_next); - } - return dc; -} - -DISPATCH_ALWAYS_INLINE -static inline dispatch_continuation_t -_dispatch_continuation_alloc(void) -{ - dispatch_continuation_t dc = - fastpath(_dispatch_continuation_alloc_cacheonly()); - if(!dc) { - return _dispatch_continuation_alloc_from_heap(); - } - return dc; -} - -DISPATCH_ALWAYS_INLINE -static inline dispatch_continuation_t -_dispatch_continuation_free_cacheonly(dispatch_continuation_t dc) -{ - dispatch_continuation_t prev_dc = (dispatch_continuation_t) - fastpath(_dispatch_thread_getspecific(dispatch_cache_key)); - int cnt = prev_dc ? prev_dc->do_ref_cnt + 1 : 1; - // Cap continuation cache - if (slowpath(cnt > _dispatch_continuation_cache_limit)) { - return dc; - } - dc->do_next = prev_dc; - dc->do_ref_cnt = cnt; - _dispatch_thread_setspecific(dispatch_cache_key, dc); - return NULL; -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_continuation_free(dispatch_continuation_t dc) -{ - dc = _dispatch_continuation_free_cacheonly(dc); - if (slowpath(dc)) { - _dispatch_continuation_free_to_cache_limit(dc); - } -} -#endif // !(USE_OBJC && __OBJC2__) +#pragma mark - +#pragma mark dispatch_apply_t + +struct dispatch_apply_s { + size_t volatile da_index, da_todo; + size_t da_iterations, da_nested; + dispatch_continuation_t da_dc; + _dispatch_thread_semaphore_t da_sema; + uint32_t da_thr_cnt; +}; +typedef struct dispatch_apply_s *dispatch_apply_t; + +#pragma mark - +#pragma mark dispatch_block_t + +#ifdef __BLOCKS__ + +#define DISPATCH_BLOCK_API_MASK (0x80u - 1) +#define DISPATCH_BLOCK_HAS_VOUCHER (1u << 31) +#define DISPATCH_BLOCK_HAS_PRIORITY (1u << 30) + +struct dispatch_block_private_data_s { + unsigned long dbpd_magic; + dispatch_block_flags_t dbpd_flags; + unsigned int volatile dbpd_atomic_flags; + int volatile dbpd_performed; + pthread_priority_t dbpd_priority; + voucher_t dbpd_voucher; + dispatch_block_t dbpd_block; + struct dispatch_semaphore_s dbpd_group; + dispatch_queue_t volatile dbpd_queue; + mach_port_t dbpd_thread; +}; +typedef struct dispatch_block_private_data_s *dispatch_block_private_data_t; + +// dbpd_atomic_flags bits +#define DBF_CANCELED 1u // block has been cancelled +#define DBF_WAITING 2u // dispatch_block_wait has begun +#define DBF_WAITED 4u // dispatch_block_wait has finished without timeout +#define DBF_PERFORM 8u // dispatch_block_perform: don't group_leave + +#define DISPATCH_BLOCK_PRIVATE_DATA_MAGIC 0xD159B10C // 0xDISPatch_BLOCk + +#define DISPATCH_BLOCK_PRIVATE_DATA_INITIALIZER(flags, voucher, prio, block) \ + { \ + .dbpd_magic = DISPATCH_BLOCK_PRIVATE_DATA_MAGIC, \ + .dbpd_flags = (flags), \ + .dbpd_priority = (prio), \ + .dbpd_voucher = (voucher), \ + .dbpd_block = (block), \ + .dbpd_group = DISPATCH_GROUP_INITIALIZER(1), \ + } + +dispatch_block_t _dispatch_block_create(dispatch_block_flags_t flags, + voucher_t voucher, pthread_priority_t priority, dispatch_block_t block); +void _dispatch_block_invoke(const struct dispatch_block_private_data_s *dbcpd); + +#endif /* __BLOCKS__ */ #endif diff --git a/src/semaphore.c b/src/semaphore.c index 20d9ae54a..f9bfdbec6 100644 --- a/src/semaphore.c +++ b/src/semaphore.c @@ -23,14 +23,23 @@ // semaphores are too fundamental to use the dispatch_assume*() macros #if USE_MACH_SEM #define DISPATCH_SEMAPHORE_VERIFY_KR(x) do { \ - if (slowpath(x)) { \ - DISPATCH_CRASH("flawed group/semaphore logic"); \ + if (slowpath((x) == KERN_INVALID_NAME)) { \ + DISPATCH_CLIENT_CRASH("Use-after-free of dispatch_semaphore_t"); \ + } else if (slowpath(x)) { \ + DISPATCH_CRASH("mach semaphore API failure"); \ + } \ + } while (0) +#define DISPATCH_GROUP_VERIFY_KR(x) do { \ + if (slowpath((x) == KERN_INVALID_NAME)) { \ + DISPATCH_CLIENT_CRASH("Use-after-free of dispatch_group_t"); \ + } else if (slowpath(x)) { \ + DISPATCH_CRASH("mach semaphore API failure"); \ } \ } while (0) #elif USE_POSIX_SEM #define DISPATCH_SEMAPHORE_VERIFY_RET(x) do { \ if (slowpath((x) == -1)) { \ - DISPATCH_CRASH("flawed group/semaphore logic"); \ + DISPATCH_CRASH("POSIX semaphore API failure"); \ } \ } while (0) #endif @@ -95,8 +104,8 @@ _dispatch_semaphore_init(long value, dispatch_object_t dou) dispatch_semaphore_t dsema = dou._dsema; dsema->do_next = (dispatch_semaphore_t)DISPATCH_OBJECT_LISTLESS; - dsema->do_targetq = dispatch_get_global_queue( - DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); + dsema->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, + false); dsema->dsema_value = value; dsema->dsema_orig = value; #if USE_POSIX_SEM @@ -152,6 +161,7 @@ _dispatch_semaphore_create_port(semaphore_t *s4) if (!dispatch_atomic_cmpxchg(s4, 0, tmp, relaxed)) { kr = semaphore_destroy(mach_task_self(), tmp); + DISPATCH_VERIFY_MIG(kr); DISPATCH_SEMAPHORE_VERIFY_KR(kr); } } @@ -191,8 +201,10 @@ _dispatch_semaphore_dispose(dispatch_object_t dou) kern_return_t kr; if (dsema->dsema_port) { kr = semaphore_destroy(mach_task_self(), dsema->dsema_port); + DISPATCH_VERIFY_MIG(kr); DISPATCH_SEMAPHORE_VERIFY_KR(kr); } + dsema->dsema_port = MACH_PORT_DEAD; #elif USE_POSIX_SEM int ret = sem_destroy(&dsema->dsema_sem); DISPATCH_SEMAPHORE_VERIFY_RET(ret); @@ -442,7 +454,7 @@ _dispatch_group_wake(dispatch_semaphore_t dsema) _dispatch_semaphore_create_port(&dsema->dsema_port); do { kern_return_t kr = semaphore_signal(dsema->dsema_port); - DISPATCH_SEMAPHORE_VERIFY_KR(kr); + DISPATCH_GROUP_VERIFY_KR(kr); } while (--rval); #elif USE_POSIX_SEM do { @@ -463,9 +475,7 @@ _dispatch_group_wake(dispatch_semaphore_t dsema) do { next = fastpath(head->do_next); if (!next && head != tail) { - while (!(next = fastpath(head->do_next))) { - dispatch_hardware_pause(); - } + _dispatch_wait_until(next = fastpath(head->do_next)); } dispatch_queue_t dsn_queue = (dispatch_queue_t)head->dc_data; dc = _dispatch_continuation_free_cacheonly(head); @@ -552,7 +562,7 @@ _dispatch_group_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout) } while (kr == KERN_ABORTED); if (kr != KERN_OPERATION_TIMED_OUT) { - DISPATCH_SEMAPHORE_VERIFY_KR(kr); + DISPATCH_GROUP_VERIFY_KR(kr); break; } #elif USE_POSIX_SEM @@ -599,7 +609,7 @@ _dispatch_group_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout) do { kr = semaphore_wait(dsema->dsema_port); } while (kr == KERN_ABORTED); - DISPATCH_SEMAPHORE_VERIFY_KR(kr); + DISPATCH_GROUP_VERIFY_KR(kr); #elif USE_POSIX_SEM do { ret = sem_wait(&dsema->dsema_sem); @@ -651,7 +661,7 @@ dispatch_group_notify_f(dispatch_group_t dg, dispatch_queue_t dq, void *ctxt, } else { _dispatch_retain(dg); dispatch_atomic_store2o(dsema, dsema_notify_head, dsn, seq_cst); - dispatch_atomic_barrier(seq_cst); // + // seq_cst with atomic store to notify_head if (dispatch_atomic_load2o(dsema, dsema_value, seq_cst) == LONG_MAX) { _dispatch_group_wake(dsema); } @@ -710,6 +720,7 @@ _dispatch_thread_semaphore_dispose(_dispatch_thread_semaphore_t sema) #elif USE_MACH_SEM semaphore_t s4 = (semaphore_t)sema; kern_return_t kr = semaphore_destroy(mach_task_self(), s4); + DISPATCH_VERIFY_MIG(kr); DISPATCH_SEMAPHORE_VERIFY_KR(kr); #elif USE_POSIX_SEM sem_t s4 = (sem_t)sema; diff --git a/src/semaphore_internal.h b/src/semaphore_internal.h index c8174b6b4..01179cb66 100644 --- a/src/semaphore_internal.h +++ b/src/semaphore_internal.h @@ -53,6 +53,15 @@ struct dispatch_semaphore_s { DISPATCH_CLASS_DECL(group); +#define DISPATCH_GROUP_INITIALIZER(s) \ + { \ + .do_vtable = (const void*)DISPATCH_VTABLE(group), \ + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, \ + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, \ + .dsema_value = LONG_MAX - (s), \ + .dsema_orig = LONG_MAX, \ + } + void _dispatch_semaphore_dispose(dispatch_object_t dou); size_t _dispatch_semaphore_debug(dispatch_object_t dou, char *buf, size_t bufsiz); diff --git a/src/shims.h b/src/shims.h index 32376eea4..ae7f1c3d7 100644 --- a/src/shims.h +++ b/src/shims.h @@ -28,9 +28,73 @@ #define __DISPATCH_OS_SHIMS__ #include +#if HAVE_PTHREAD_QOS_H && __has_include() +#include +#if __has_include() +#include +#define _DISPATCH_QOS_CLASS_USER_INTERACTIVE QOS_CLASS_USER_INTERACTIVE +#define _DISPATCH_QOS_CLASS_USER_INITIATED QOS_CLASS_USER_INITIATED +#ifndef QOS_CLASS_LEGACY +#define _DISPATCH_QOS_CLASS_DEFAULT QOS_CLASS_LEGACY +#else +#define _DISPATCH_QOS_CLASS_DEFAULT QOS_CLASS_DEFAULT +#endif +#define _DISPATCH_QOS_CLASS_UTILITY QOS_CLASS_UTILITY +#define _DISPATCH_QOS_CLASS_BACKGROUND QOS_CLASS_BACKGROUND +#define _DISPATCH_QOS_CLASS_UNSPECIFIED QOS_CLASS_UNSPECIFIED +#else // pthread/qos_private.h +typedef unsigned long pthread_priority_t; +#endif // pthread/qos_private.h +#if __has_include() +#include +#define _DISPATCH_QOS_CLASS_MAINTENANCE QOS_CLASS_MAINTENANCE +#else // sys/qos_private.h +#define _DISPATCH_QOS_CLASS_MAINTENANCE 0x05 +#endif // sys/qos_private.h +#ifndef _PTHREAD_PRIORITY_ROOTQUEUE_FLAG +#define _PTHREAD_PRIORITY_ROOTQUEUE_FLAG 0x20000000 +#endif +#ifndef _PTHREAD_PRIORITY_ENFORCE_FLAG +#define _PTHREAD_PRIORITY_ENFORCE_FLAG 0x10000000 +#endif +#ifndef _PTHREAD_PRIORITY_OVERRIDE_FLAG +#define _PTHREAD_PRIORITY_OVERRIDE_FLAG 0x08000000 +#endif +#ifndef _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG +#define _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG 0x04000000 +#endif +#else // HAVE_PTHREAD_QOS_H +typedef unsigned int qos_class_t; +typedef unsigned long pthread_priority_t; +#define QOS_MIN_RELATIVE_PRIORITY (-15) +#define _PTHREAD_PRIORITY_QOS_CLASS_MASK 0x00ffff00 +#define _PTHREAD_PRIORITY_ROOTQUEUE_FLAG 0x20000000 +#define _PTHREAD_PRIORITY_ENFORCE_FLAG 0x10000000 +#define _PTHREAD_PRIORITY_OVERRIDE_FLAG 0x08000000 +#define _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG 0x04000000 +#endif // HAVE_PTHREAD_QOS_H +#ifndef _DISPATCH_QOS_CLASS_USER_INTERACTIVE +enum { + _DISPATCH_QOS_CLASS_USER_INTERACTIVE = 0x21, + _DISPATCH_QOS_CLASS_USER_INITIATED = 0x19, + _DISPATCH_QOS_CLASS_DEFAULT = 0x15, + _DISPATCH_QOS_CLASS_UTILITY = 0x11, + _DISPATCH_QOS_CLASS_BACKGROUND = 0x09, + _DISPATCH_QOS_CLASS_MAINTENANCE = 0x05, + _DISPATCH_QOS_CLASS_UNSPECIFIED = 0x00, +}; +#endif // _DISPATCH_QOS_CLASS_USER_INTERACTIVE #if HAVE_PTHREAD_WORKQUEUES +#if __has_include() +#include +#else #include #endif +#ifndef WORKQ_FEATURE_MAINTENANCE +#define WORKQ_FEATURE_MAINTENANCE 0x10 +#endif +#endif // HAVE_PTHREAD_WORKQUEUES + #if HAVE_PTHREAD_NP_H #include #endif @@ -54,6 +118,33 @@ inline size_t strlcpy(char *dst, const char *src, size_t size) { } #endif // TARGET_OS_WIN32 +#if PTHREAD_WORKQUEUE_SPI_VERSION < 20140716 +static inline int +_pthread_workqueue_override_start_direct(mach_port_t thread, + pthread_priority_t priority) +{ + (void)thread; (void)priority; + return 0; +} +#endif // PTHREAD_WORKQUEUE_SPI_VERSION < 20140716 + +#if PTHREAD_WORKQUEUE_SPI_VERSION < 20140707 +static inline int +_pthread_override_qos_class_start_direct(pthread_t thread, + pthread_priority_t priority) +{ + (void)thread; (void)priority; + return 0; +} + +static inline int +_pthread_override_qos_class_end_direct(mach_port_t thread) +{ + (void)thread; + return 0; +} +#endif // PTHREAD_WORKQUEUE_SPI_VERSION < 20140707 + #if !HAVE_NORETURN_BUILTIN_TRAP /* * XXXRW: Work-around for possible clang bug in which __builtin_trap() is not @@ -71,6 +162,8 @@ void __builtin_trap(void); #include "shims/atomic.h" #include "shims/atomic_sfb.h" #include "shims/tsd.h" +#include "shims/yield.h" + #include "shims/hw_config.h" #include "shims/perfmon.h" diff --git a/src/shims/atomic.h b/src/shims/atomic.h index 2f44775e1..24c113b97 100644 --- a/src/shims/atomic.h +++ b/src/shims/atomic.h @@ -183,23 +183,25 @@ typedef enum _dispatch_atomic_memory_order default: \ _dispatch_atomic_full_barrier(); break; \ } }) -// Only emulate store seq_cst -> load seq_cst +// seq_cst: only emulate explicit store(seq_cst) -> load(seq_cst) #define dispatch_atomic_load(p, m) \ - ({ switch(dispatch_atomic_memory_order_##m) { \ - case _dispatch_atomic_memory_order_relaxed: \ + ({ typeof(*(p)) _r = *(p); \ + switch(dispatch_atomic_memory_order_##m) { \ case _dispatch_atomic_memory_order_seq_cst: \ + _dispatch_atomic_barrier(m); /* fallthrough */ \ + case _dispatch_atomic_memory_order_relaxed: \ break; \ default: \ _dispatch_atomic_unimplemented(); break; \ - }; *(p); }) + } _r; }) #define dispatch_atomic_store(p, v, m) \ ({ switch(dispatch_atomic_memory_order_##m) { \ case _dispatch_atomic_memory_order_release: \ + case _dispatch_atomic_memory_order_seq_cst: \ _dispatch_atomic_barrier(m); /* fallthrough */ \ case _dispatch_atomic_memory_order_relaxed: \ - case _dispatch_atomic_memory_order_seq_cst: \ *(p) = (v); break; \ - default:\ + default: \ _dispatch_atomic_unimplemented(); break; \ } switch(dispatch_atomic_memory_order_##m) { \ case _dispatch_atomic_memory_order_seq_cst: \ @@ -251,6 +253,15 @@ typedef enum _dispatch_atomic_memory_order ({ __asm__ __volatile__( \ "mfence" \ : : : "memory"); }) +#undef dispatch_atomic_load +#define dispatch_atomic_load(p, m) \ + ({ switch(dispatch_atomic_memory_order_##m) { \ + case _dispatch_atomic_memory_order_seq_cst: \ + case _dispatch_atomic_memory_order_relaxed: \ + break; \ + default: \ + _dispatch_atomic_unimplemented(); break; \ + } *(p); }) // xchg is faster than store + mfence #undef dispatch_atomic_store #define dispatch_atomic_store(p, v, m) \ @@ -272,7 +283,6 @@ typedef enum _dispatch_atomic_memory_order #pragma mark - #pragma mark generic -#define dispatch_hardware_pause() ({ __asm__(""); }) // assume atomic builtins provide barriers #define dispatch_atomic_barrier(m) // see comment in dispatch_once.c @@ -342,9 +352,6 @@ typedef enum _dispatch_atomic_memory_order #pragma mark - #pragma mark x86 -#undef dispatch_hardware_pause -#define dispatch_hardware_pause() ({ __asm__("pause"); }) - #undef dispatch_atomic_maximally_synchronizing_barrier #ifdef __LP64__ #define dispatch_atomic_maximally_synchronizing_barrier() \ diff --git a/src/shims/atomic_sfb.h b/src/shims/atomic_sfb.h index c5e7be3f2..087d98c80 100644 --- a/src/shims/atomic_sfb.h +++ b/src/shims/atomic_sfb.h @@ -34,18 +34,18 @@ // Returns UINT_MAX if all the bits in p were already set. #define dispatch_atomic_set_first_bit(p,m) _dispatch_atomic_set_first_bit(p,m) -// TODO: rdar://11477843 DISPATCH_ALWAYS_INLINE static inline unsigned int -_dispatch_atomic_set_first_bit(volatile uint32_t *p, unsigned int max_index) +_dispatch_atomic_set_first_bit(volatile unsigned long *p, + unsigned int max_index) { unsigned int index; - typeof(*p) b, mask, b_masked; + unsigned long b, mask, b_masked; for (;;) { b = *p; // ffs returns 1 + index, or 0 if none set. - index = (unsigned int)__builtin_ffs((int)~b); + index = (unsigned int)__builtin_ffsl((long)~b); if (slowpath(index == 0)) { return UINT_MAX; } @@ -64,12 +64,11 @@ _dispatch_atomic_set_first_bit(volatile uint32_t *p, unsigned int max_index) #if defined(__x86_64__) || defined(__i386__) #undef dispatch_atomic_set_first_bit -// TODO: rdar://11477843 uint64_t -> long DISPATCH_ALWAYS_INLINE static inline unsigned int -dispatch_atomic_set_first_bit(volatile uint64_t *p, unsigned int max) +dispatch_atomic_set_first_bit(volatile unsigned long *p, unsigned int max) { - typeof(*p) val, bit; + unsigned long val, bit; if (max > (sizeof(val) * 8)) { __asm__ ( "1: \n\t" diff --git a/src/shims/hw_config.h b/src/shims/hw_config.h index ede0d4800..2b85d4a60 100644 --- a/src/shims/hw_config.h +++ b/src/shims/hw_config.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2011 Apple Inc. All rights reserved. + * Copyright (c) 2011-2013 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -27,84 +27,103 @@ #ifndef __DISPATCH_SHIMS_HW_CONFIG__ #define __DISPATCH_SHIMS_HW_CONFIG__ -#if defined(__APPLE__) -#define DISPATCH_SYSCTL_LOGICAL_CPUS "hw.logicalcpu_max" -#define DISPATCH_SYSCTL_PHYSICAL_CPUS "hw.physicalcpu_max" -#define DISPATCH_SYSCTL_ACTIVE_CPUS "hw.activecpu" -#elif defined(__FreeBSD__) -#define DISPATCH_SYSCTL_LOGICAL_CPUS "kern.smp.cpus" -#define DISPATCH_SYSCTL_PHYSICAL_CPUS "kern.smp.cpus" -#define DISPATCH_SYSCTL_ACTIVE_CPUS "kern.smp.cpus" +#if !TARGET_OS_WIN32 + +typedef enum { + _dispatch_hw_config_logical_cpus, + _dispatch_hw_config_physical_cpus, + _dispatch_hw_config_active_cpus, +} _dispatch_hw_config_t; + +#if !defined(DISPATCH_HAVE_HW_CONFIG_COMMPAGE) && \ + defined(_COMM_PAGE_LOGICAL_CPUS) && \ + defined(_COMM_PAGE_PHYSICAL_CPUS) && defined(_COMM_PAGE_ACTIVE_CPUS) +#define DISPATCH_HAVE_HW_CONFIG_COMMPAGE 1 #endif -#if !TARGET_OS_WIN32 +#if DISPATCH_HAVE_HW_CONFIG_COMMPAGE +DISPATCH_ALWAYS_INLINE static inline uint32_t -_dispatch_get_logicalcpu_max(void) +_dispatch_hw_get_config(_dispatch_hw_config_t c) { - uint32_t val = 1; -#if defined(_COMM_PAGE_LOGICAL_CPUS) - uint8_t* u8val = (uint8_t*)(uintptr_t)_COMM_PAGE_LOGICAL_CPUS; - val = (uint32_t)*u8val; -#elif defined(DISPATCH_SYSCTL_LOGICAL_CPUS) - size_t valsz = sizeof(val); - int ret = sysctlbyname(DISPATCH_SYSCTL_LOGICAL_CPUS, - &val, &valsz, NULL, 0); - (void)dispatch_assume_zero(ret); - (void)dispatch_assume(valsz == sizeof(uint32_t)); -#elif HAVE_SYSCONF && defined(_SC_NPROCESSORS_ONLN) - int ret = (int)sysconf(_SC_NPROCESSORS_ONLN); - val = ret < 0 ? 1 : ret; -#else -#warning "no supported way to query logical CPU count" -#endif - return val; + uintptr_t p; + switch (c) { + case _dispatch_hw_config_logical_cpus: + p = _COMM_PAGE_LOGICAL_CPUS; break; + case _dispatch_hw_config_physical_cpus: + p = _COMM_PAGE_PHYSICAL_CPUS; break; + case _dispatch_hw_config_active_cpus: + p = _COMM_PAGE_ACTIVE_CPUS; break; + } + return *(uint8_t*)p; } +#define dispatch_hw_config(c) \ + _dispatch_hw_get_config(_dispatch_hw_config_##c) + +#define DISPATCH_HW_CONFIG() +#define _dispatch_hw_config_init() + +#else // DISPATCH_HAVE_HW_CONFIG_COMMPAGE + +extern struct _dispatch_hw_configs_s { + uint32_t logical_cpus; + uint32_t physical_cpus; + uint32_t active_cpus; +} _dispatch_hw_config; + +#define DISPATCH_HW_CONFIG() struct _dispatch_hw_configs_s _dispatch_hw_config +#define dispatch_hw_config(c) (_dispatch_hw_config.c) + +DISPATCH_ALWAYS_INLINE static inline uint32_t -_dispatch_get_physicalcpu_max(void) +_dispatch_hw_get_config(_dispatch_hw_config_t c) { uint32_t val = 1; -#if defined(_COMM_PAGE_PHYSICAL_CPUS) - uint8_t* u8val = (uint8_t*)(uintptr_t)_COMM_PAGE_PHYSICAL_CPUS; - val = (uint32_t)*u8val; -#elif defined(DISPATCH_SYSCTL_PHYSICAL_CPUS) - size_t valsz = sizeof(val); - int ret = sysctlbyname(DISPATCH_SYSCTL_LOGICAL_CPUS, - &val, &valsz, NULL, 0); - (void)dispatch_assume_zero(ret); - (void)dispatch_assume(valsz == sizeof(uint32_t)); -#elif HAVE_SYSCONF && defined(_SC_NPROCESSORS_ONLN) - int ret = (int)sysconf(_SC_NPROCESSORS_ONLN); - val = ret < 0 ? 1 : ret; -#else -#warning "no supported way to query physical CPU count" + const char *name = NULL; + int r; +#if defined(__APPLE__) + switch (c) { + case _dispatch_hw_config_logical_cpus: + name = "hw.logicalcpu_max"; break; + case _dispatch_hw_config_physical_cpus: + name = "hw.physicalcpu_max"; break; + case _dispatch_hw_config_active_cpus: + name = "hw.activecpu"; break; + } +#elif defined(__FreeBSD__) + (void)c; name = "kern.smp.cpus"; #endif + if (name) { + size_t valsz = sizeof(val); + r = sysctlbyname(name, &val, &valsz, NULL, 0); + (void)dispatch_assume_zero(r); + dispatch_assert(valsz == sizeof(uint32_t)); + } else { +#if HAVE_SYSCONF && defined(_SC_NPROCESSORS_ONLN) + r = (int)sysconf(_SC_NPROCESSORS_ONLN); + if (r > 0) val = (uint32_t)r; +#endif + } return val; } -static inline uint32_t -_dispatch_get_activecpu(void) +#define dispatch_hw_config_init(c) \ + _dispatch_hw_get_config(_dispatch_hw_config_##c) + +static inline void +_dispatch_hw_config_init(void) { - uint32_t val = 1; -#if defined(_COMM_PAGE_ACTIVE_CPUS) - uint8_t* u8val = (uint8_t*)(uintptr_t)_COMM_PAGE_ACTIVE_CPUS; - val = (uint32_t)*u8val; -#elif defined(DISPATCH_SYSCTL_ACTIVE_CPUS) - size_t valsz = sizeof(val); - int ret = sysctlbyname(DISPATCH_SYSCTL_ACTIVE_CPUS, - &val, &valsz, NULL, 0); - (void)dispatch_assume_zero(ret); - (void)dispatch_assume(valsz == sizeof(uint32_t)); -#elif HAVE_SYSCONF && defined(_SC_NPROCESSORS_ONLN) - int ret = (int)sysconf(_SC_NPROCESSORS_ONLN); - val = ret < 0 ? 1 : ret; -#else -#warning "no supported way to query active CPU count" -#endif - return val; + dispatch_hw_config(logical_cpus) = dispatch_hw_config_init(logical_cpus); + dispatch_hw_config(physical_cpus) = dispatch_hw_config_init(physical_cpus); + dispatch_hw_config(active_cpus) = dispatch_hw_config_init(active_cpus); } +#undef dispatch_hw_config_init + +#endif // DISPATCH_HAVE_HW_CONFIG_COMMPAGE + #else // TARGET_OS_WIN32 static inline long @@ -118,7 +137,6 @@ _dispatch_count_bits(unsigned long value) return bits; } - static inline uint32_t _dispatch_get_ncpus(void) { diff --git a/src/shims/perfmon.h b/src/shims/perfmon.h index f73900689..8af33ead9 100644 --- a/src/shims/perfmon.h +++ b/src/shims/perfmon.h @@ -27,7 +27,7 @@ #ifndef __DISPATCH_SHIMS_PERFMON__ #define __DISPATCH_SHIMS_PERFMON__ -#if DISPATCH_PERF_MON +#if DISPATCH_PERF_MON && !DISPATCH_INTROSPECTION #if defined (USE_APPLE_TSD_OPTIMIZATIONS) && defined(SIMULATE_5491082) && \ (defined(__i386__) || defined(__x86_64__)) @@ -63,32 +63,6 @@ _dispatch_perfmon_workitem_dec(void) } #endif /* USE_APPLE_TSD_OPTIMIZATIONS */ -// C99 doesn't define flsll() or ffsll() -#ifdef __LP64__ -#define flsll(x) flsl(x) -#else -static inline unsigned int -flsll(uint64_t val) -{ - union { - struct { -#ifdef __BIG_ENDIAN__ - unsigned int hi, low; -#else - unsigned int low, hi; -#endif - } words; - uint64_t word; - } _bucket = { - .word = val, - }; - if (_bucket.words.hi) { - return fls(_bucket.words.hi) + 32; - } - return fls(_bucket.words.low); -} -#endif - #define _dispatch_perfmon_start() \ uint64_t start = _dispatch_absolute_time() #define _dispatch_perfmon_end() \ diff --git a/src/shims/time.h b/src/shims/time.h index b30b9893c..7b34bc7af 100644 --- a/src/shims/time.h +++ b/src/shims/time.h @@ -31,22 +31,6 @@ #error "Please #include instead of this file directly." #endif -DISPATCH_ALWAYS_INLINE_NDEBUG -static inline void -_dispatch_contention_usleep(unsigned int us) -{ -#if HAVE_MACH -#if defined(SWITCH_OPTION_DISPATCH_CONTENTION) && !(TARGET_IPHONE_SIMULATOR && \ - IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090) - thread_switch(MACH_PORT_NULL, SWITCH_OPTION_DISPATCH_CONTENTION, us); -#else - thread_switch(MACH_PORT_NULL, SWITCH_OPTION_WAIT, ((us-1)/1000)+1); -#endif -#else - usleep(us); -#endif -} - #if TARGET_OS_WIN32 static inline unsigned int sleep(unsigned int seconds) diff --git a/src/shims/tsd.h b/src/shims/tsd.h index 2a0ab2290..25de7e474 100644 --- a/src/shims/tsd.h +++ b/src/shims/tsd.h @@ -36,32 +36,44 @@ #if USE_APPLE_TSD_OPTIMIZATIONS && HAVE_PTHREAD_KEY_INIT_NP && \ !defined(DISPATCH_USE_DIRECT_TSD) #define DISPATCH_USE_DIRECT_TSD 1 +#if __has_include() +#include +#endif #endif #if DISPATCH_USE_DIRECT_TSD static const unsigned long dispatch_queue_key = __PTK_LIBDISPATCH_KEY0; -#if DISPATCH_USE_OS_SEMAPHORE_CACHE -static const unsigned long dispatch_sema4_key = __TSD_SEMAPHORE_CACHE; -#else -static const unsigned long dispatch_sema4_key = __PTK_LIBDISPATCH_KEY1; -#endif +static const unsigned long dispatch_voucher_key = __PTK_LIBDISPATCH_KEY1; static const unsigned long dispatch_cache_key = __PTK_LIBDISPATCH_KEY2; static const unsigned long dispatch_io_key = __PTK_LIBDISPATCH_KEY3; static const unsigned long dispatch_apply_key = __PTK_LIBDISPATCH_KEY4; +static const unsigned long dispatch_defaultpriority_key =__PTK_LIBDISPATCH_KEY5; #if DISPATCH_INTROSPECTION -static const unsigned long dispatch_introspection_key = __PTK_LIBDISPATCH_KEY5; +static const unsigned long dispatch_introspection_key =__PTK_LIBDISPATCH_KEY5+1; #elif DISPATCH_PERF_MON -static const unsigned long dispatch_bcounter_key = __PTK_LIBDISPATCH_KEY5; +static const unsigned long dispatch_bcounter_key = __PTK_LIBDISPATCH_KEY5+1; +#endif +#if DISPATCH_USE_OS_SEMAPHORE_CACHE +static const unsigned long dispatch_sema4_key = __TSD_SEMAPHORE_CACHE; +#else +static const unsigned long dispatch_sema4_key = __PTK_LIBDISPATCH_KEY5+2; #endif +#ifndef __TSD_THREAD_QOS_CLASS +#define __TSD_THREAD_QOS_CLASS 4 +#endif +static const unsigned long dispatch_priority_key = __TSD_THREAD_QOS_CLASS; + DISPATCH_TSD_INLINE static inline void _dispatch_thread_key_create(const unsigned long *k, void (*d)(void *)) { + if (!*k || !d) return; dispatch_assert_zero(pthread_key_init_np((int)*k, d)); } #else extern pthread_key_t dispatch_queue_key; +extern pthread_key_t dispatch_voucher_key; #if DISPATCH_USE_OS_SEMAPHORE_CACHE #error "Invalid DISPATCH_USE_OS_SEMAPHORE_CACHE configuration" #else @@ -70,6 +82,7 @@ extern pthread_key_t dispatch_sema4_key; extern pthread_key_t dispatch_cache_key; extern pthread_key_t dispatch_io_key; extern pthread_key_t dispatch_apply_key; +extern pthread_key_t dispatch_defaultpriority_key; #if DISPATCH_INTROSPECTION extern pthread_key_t dispatch_introspection_key; #elif DISPATCH_PERF_MON @@ -124,7 +137,35 @@ _dispatch_thread_getspecific(pthread_key_t k) #endif #endif +#if TARGET_OS_WIN32 +#define _dispatch_thread_port() ((mach_port_t)0) +#else +#if DISPATCH_USE_DIRECT_TSD +#define _dispatch_thread_port() ((mach_port_t)_dispatch_thread_getspecific(\ + _PTHREAD_TSD_SLOT_MACH_THREAD_SELF)) +#else +#define _dispatch_thread_port() (pthread_mach_thread_np(_dispatch_thread_self())) +#endif +#endif + DISPATCH_TSD_INLINE DISPATCH_CONST static inline unsigned int _dispatch_cpu_number(void) { +#if TARGET_IPHONE_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090 + return 0; +#elif __has_include() + return _os_cpu_number(); +#elif defined(__x86_64__) || defined(__i386__) + struct { uintptr_t p1, p2; } p; + __asm__("sidt %[p]" : [p] "=&m" (p)); + return (unsigned int)(p.p1 & 0xfff); +#else + // Not yet implemented. + return 0; +#endif +} + +#undef DISPATCH_TSD_INLINE + +#endif diff --git a/src/shims/yield.h b/src/shims/yield.h new file mode 100644 index 000000000..2a884d6a9 --- /dev/null +++ b/src/shims/yield.h @@ -0,0 +1,156 @@ +/* + * Copyright (c) 2013 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_SHIMS_YIELD__ +#define __DISPATCH_SHIMS_YIELD__ + +#pragma mark - +#pragma mark _dispatch_wait_until + +#if DISPATCH_HW_CONFIG_UP +#define _dispatch_wait_until(c) do { \ + int _spins = 0; \ + while (!(c)) { \ + _spins++; \ + _dispatch_preemption_yield(_spins); \ + } } while (0) +#elif TARGET_OS_EMBEDDED +// +#ifndef DISPATCH_WAIT_SPINS +#define DISPATCH_WAIT_SPINS 1024 +#endif +#define _dispatch_wait_until(c) do { \ + int _spins = -(DISPATCH_WAIT_SPINS); \ + while (!(c)) { \ + if (slowpath(_spins++ >= 0)) { \ + _dispatch_preemption_yield(_spins); \ + } else { \ + dispatch_hardware_pause(); \ + } \ + } } while (0) +#else +#define _dispatch_wait_until(c) do { \ + while (!(c)) { \ + dispatch_hardware_pause(); \ + } } while (0) +#endif + +#pragma mark - +#pragma mark _dispatch_contention_wait_until + +#if DISPATCH_HW_CONFIG_UP +#define _dispatch_contention_wait_until(c) false +#else +#ifndef DISPATCH_CONTENTION_SPINS_MAX +#define DISPATCH_CONTENTION_SPINS_MAX (128 - 1) +#endif +#ifndef DISPATCH_CONTENTION_SPINS_MIN +#define DISPATCH_CONTENTION_SPINS_MIN (32 - 1) +#endif +#if TARGET_OS_EMBEDDED +#define _dispatch_contention_spins() \ + ((DISPATCH_CONTENTION_SPINS_MIN) + ((DISPATCH_CONTENTION_SPINS_MAX) - \ + (DISPATCH_CONTENTION_SPINS_MIN)) / 2) +#else +// Use randomness to prevent threads from resonating at the same +// frequency and permanently contending. All threads sharing the same +// seed value is safe with the FreeBSD rand_r implementation. +#define _dispatch_contention_spins() ({ \ + static unsigned int _seed; \ + ((unsigned int)rand_r(&_seed) & (DISPATCH_CONTENTION_SPINS_MAX)) | \ + (DISPATCH_CONTENTION_SPINS_MIN); }) +#endif +#define _dispatch_contention_wait_until(c) ({ \ + bool _out = false; \ + unsigned int _spins = _dispatch_contention_spins(); \ + while (_spins--) { \ + dispatch_hardware_pause(); \ + if ((_out = fastpath(c))) break; \ + }; _out; }) +#endif + +#pragma mark - +#pragma mark dispatch_hardware_pause + +#if defined(__x86_64__) || defined(__i386__) +#define dispatch_hardware_pause() __asm__("pause") +#elif (defined(__arm__) && defined(_ARM_ARCH_7) && defined(__thumb__)) || \ + defined(__arm64__) +#define dispatch_hardware_pause() __asm__("yield") +#define dispatch_hardware_wfe() __asm__("wfe") +#else +#define dispatch_hardware_pause() __asm__("") +#endif + +#pragma mark - +#pragma mark _dispatch_preemption_yield + +#if HAVE_MACH +#if defined(SWITCH_OPTION_OSLOCK_DEPRESS) && !(TARGET_IPHONE_SIMULATOR && \ + IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090) +#define DISPATCH_YIELD_THREAD_SWITCH_OPTION SWITCH_OPTION_OSLOCK_DEPRESS +#else +#define DISPATCH_YIELD_THREAD_SWITCH_OPTION SWITCH_OPTION_DEPRESS +#endif +#define _dispatch_preemption_yield(n) _dispatch_thread_switch(MACH_PORT_NULL, \ + DISPATCH_YIELD_THREAD_SWITCH_OPTION, (mach_msg_timeout_t)(n)) +#else +#define _dispatch_preemption_yield(n) pthread_yield_np() +#endif // HAVE_MACH + +#pragma mark - +#pragma mark _dispatch_contention_usleep + +#ifndef DISPATCH_CONTENTION_USLEEP_START +#define DISPATCH_CONTENTION_USLEEP_START 500 +#endif +#ifndef DISPATCH_CONTENTION_USLEEP_MAX +#define DISPATCH_CONTENTION_USLEEP_MAX 100000 +#endif + +#if HAVE_MACH +#if defined(SWITCH_OPTION_DISPATCH_CONTENTION) && !(TARGET_IPHONE_SIMULATOR && \ + IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090) +#define _dispatch_contention_usleep(u) _dispatch_thread_switch(MACH_PORT_NULL, \ + SWITCH_OPTION_DISPATCH_CONTENTION, (u)) +#else +#define _dispatch_contention_usleep(u) _dispatch_thread_switch(MACH_PORT_NULL, \ + SWITCH_OPTION_WAIT, (((u)-1)/1000)+1) +#endif +#else +#define _dispatch_contention_usleep(u) usleep((u)) +#endif // HAVE_MACH + +#pragma mark - +#pragma mark _dispatch_thread_switch + +#if HAVE_MACH +#define _dispatch_thread_switch(thread_name, option, option_time) \ + thread_switch((thread_name), (option), (option_time)) + +#endif // HAVE_MACH + +#endif // __DISPATCH_SHIMS_YIELD__ diff --git a/src/source.c b/src/source.c index 067c5baf9..b593ae04a 100644 --- a/src/source.c +++ b/src/source.c @@ -202,23 +202,46 @@ dispatch_source_testcancel(dispatch_source_t ds) return (bool)(ds->ds_atomic_flags & DSF_CANCELED); } - unsigned long dispatch_source_get_mask(dispatch_source_t ds) { - return ds->ds_pending_data_mask; + unsigned long mask = ds->ds_pending_data_mask; + if (ds->ds_vmpressure_override) { + mask = NOTE_VM_PRESSURE; + } +#if TARGET_IPHONE_SIMULATOR + else if (ds->ds_memorystatus_override) { + mask = NOTE_MEMORYSTATUS_PRESSURE_WARN; + } +#endif + return mask; } uintptr_t dispatch_source_get_handle(dispatch_source_t ds) { - return (unsigned int)ds->ds_ident_hack; + unsigned int handle = (unsigned int)ds->ds_ident_hack; +#if TARGET_IPHONE_SIMULATOR + if (ds->ds_memorystatus_override) { + handle = 0; + } +#endif + return handle; } unsigned long dispatch_source_get_data(dispatch_source_t ds) { - return ds->ds_data; + unsigned long data = ds->ds_data; + if (ds->ds_vmpressure_override) { + data = NOTE_VM_PRESSURE; + } +#if TARGET_IPHONE_SIMULATOR + else if (ds->ds_memorystatus_override) { + data = NOTE_MEMORYSTATUS_PRESSURE_WARN; + } +#endif + return data; } void @@ -239,157 +262,172 @@ dispatch_source_merge_data(dispatch_source_t ds, unsigned long val) #pragma mark - #pragma mark dispatch_source_handler -#ifdef __BLOCKS__ -// 6618342 Contact the team that owns the Instrument DTrace probe before -// renaming this symbol -static void -_dispatch_source_set_event_handler2(void *context) +DISPATCH_ALWAYS_INLINE +static inline dispatch_continuation_t +_dispatch_source_handler_alloc(dispatch_source_t ds, void *handler, long kind, + bool block) { - dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current(); - dispatch_assert(dx_type(ds) == DISPATCH_SOURCE_KEVENT_TYPE); - dispatch_source_refs_t dr = ds->ds_refs; - - if (ds->ds_handler_is_block && dr->ds_handler_ctxt) { - Block_release(dr->ds_handler_ctxt); + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + if (handler) { + dc->do_vtable = (void *)((block ? DISPATCH_OBJ_BLOCK_RELEASE_BIT : + DISPATCH_OBJ_CTXT_FETCH_BIT) | (kind != DS_EVENT_HANDLER ? + DISPATCH_OBJ_ASYNC_BIT : 0l)); + dc->dc_priority = 0; + dc->dc_voucher = NULL; + if (block) { +#ifdef __BLOCKS__ + if (slowpath(_dispatch_block_has_private_data(handler))) { + // sources don't propagate priority by default + dispatch_block_flags_t flags = DISPATCH_BLOCK_NO_QOS_CLASS; + flags |= _dispatch_block_get_flags(handler); + _dispatch_continuation_priority_set(dc, + _dispatch_block_get_priority(handler), flags); + } + if (kind != DS_EVENT_HANDLER) { + dc->dc_func = _dispatch_call_block_and_release; + } else { + dc->dc_func = _dispatch_Block_invoke(handler); + } + dc->dc_ctxt = _dispatch_Block_copy(handler); +#endif /* __BLOCKS__ */ + } else { + dc->dc_func = handler; + dc->dc_ctxt = ds->do_ctxt; + } + _dispatch_trace_continuation_push((dispatch_queue_t)ds, dc); + } else { + dc->dc_func = NULL; } - dr->ds_handler_func = context ? _dispatch_Block_invoke(context) : NULL; - dr->ds_handler_ctxt = context; - ds->ds_handler_is_block = true; -} - -void -dispatch_source_set_event_handler(dispatch_source_t ds, - dispatch_block_t handler) -{ - handler = _dispatch_Block_copy(handler); - _dispatch_barrier_trysync_f((dispatch_queue_t)ds, handler, - _dispatch_source_set_event_handler2); + dc->dc_data = (void*)kind; + return dc; } -#endif /* __BLOCKS__ */ -static void -_dispatch_source_set_event_handler_f(void *context) +static inline void +_dispatch_source_handler_replace(dispatch_source_refs_t dr, long kind, + dispatch_continuation_t dc_new) { - dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current(); - dispatch_assert(dx_type(ds) == DISPATCH_SOURCE_KEVENT_TYPE); - dispatch_source_refs_t dr = ds->ds_refs; - + dispatch_continuation_t dc = dr->ds_handler[kind]; + if (dc) { #ifdef __BLOCKS__ - if (ds->ds_handler_is_block && dr->ds_handler_ctxt) { - Block_release(dr->ds_handler_ctxt); + if ((long)dc->do_vtable & DISPATCH_OBJ_BLOCK_RELEASE_BIT) { + Block_release(dc->dc_ctxt); + } +#endif /* __BLOCKS__ */ + if (dc->dc_voucher) { + _voucher_release(dc->dc_voucher); + dc->dc_voucher = NULL; + } + _dispatch_continuation_free(dc); } -#endif - dr->ds_handler_func = context; - dr->ds_handler_ctxt = ds->do_ctxt; - ds->ds_handler_is_block = false; + dr->ds_handler[kind] = dc_new; } -void -dispatch_source_set_event_handler_f(dispatch_source_t ds, - dispatch_function_t handler) +static inline void +_dispatch_source_handler_free(dispatch_source_refs_t dr, long kind) { - _dispatch_barrier_trysync_f((dispatch_queue_t)ds, handler, - _dispatch_source_set_event_handler_f); + _dispatch_source_handler_replace(dr, kind, NULL); } -#ifdef __BLOCKS__ -// 6618342 Contact the team that owns the Instrument DTrace probe before -// renaming this symbol static void -_dispatch_source_set_cancel_handler2(void *context) +_dispatch_source_set_handler(void *context) { dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current(); dispatch_assert(dx_type(ds) == DISPATCH_SOURCE_KEVENT_TYPE); - dispatch_source_refs_t dr = ds->ds_refs; - - if (ds->ds_cancel_is_block && dr->ds_cancel_handler) { - Block_release(dr->ds_cancel_handler); + dispatch_continuation_t dc = context; + long kind = (long)dc->dc_data; + dc->dc_data = 0; + if (!dc->dc_func) { + _dispatch_continuation_free(dc); + dc = NULL; + } else if ((long)dc->do_vtable & DISPATCH_OBJ_CTXT_FETCH_BIT) { + dc->dc_ctxt = ds->do_ctxt; + } + _dispatch_source_handler_replace(ds->ds_refs, kind, dc); + if (kind == DS_EVENT_HANDLER && dc && dc->dc_priority) { +#if HAVE_PTHREAD_WORKQUEUE_QOS + ds->dq_priority = dc->dc_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK; + _dispatch_queue_set_override_priority((dispatch_queue_t)ds); +#endif } - dr->ds_cancel_handler = context; - ds->ds_cancel_is_block = true; } +#ifdef __BLOCKS__ void -dispatch_source_set_cancel_handler(dispatch_source_t ds, - dispatch_block_t handler) +dispatch_source_set_event_handler(dispatch_source_t ds, + dispatch_block_t handler) { - handler = _dispatch_Block_copy(handler); - _dispatch_barrier_trysync_f((dispatch_queue_t)ds, handler, - _dispatch_source_set_cancel_handler2); + dispatch_continuation_t dc; + dc = _dispatch_source_handler_alloc(ds, handler, DS_EVENT_HANDLER, true); + _dispatch_barrier_trysync_f((dispatch_queue_t)ds, dc, + _dispatch_source_set_handler); } #endif /* __BLOCKS__ */ -static void -_dispatch_source_set_cancel_handler_f(void *context) +void +dispatch_source_set_event_handler_f(dispatch_source_t ds, + dispatch_function_t handler) { - dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current(); - dispatch_assert(dx_type(ds) == DISPATCH_SOURCE_KEVENT_TYPE); - dispatch_source_refs_t dr = ds->ds_refs; - -#ifdef __BLOCKS__ - if (ds->ds_cancel_is_block && dr->ds_cancel_handler) { - Block_release(dr->ds_cancel_handler); - } -#endif - dr->ds_cancel_handler = context; - ds->ds_cancel_is_block = false; + dispatch_continuation_t dc; + dc = _dispatch_source_handler_alloc(ds, handler, DS_EVENT_HANDLER, false); + _dispatch_barrier_trysync_f((dispatch_queue_t)ds, dc, + _dispatch_source_set_handler); } void -dispatch_source_set_cancel_handler_f(dispatch_source_t ds, - dispatch_function_t handler) +_dispatch_source_set_event_handler_with_context_f(dispatch_source_t ds, + void *ctxt, dispatch_function_t handler) { - _dispatch_barrier_trysync_f((dispatch_queue_t)ds, handler, - _dispatch_source_set_cancel_handler_f); + dispatch_continuation_t dc; + dc = _dispatch_source_handler_alloc(ds, handler, DS_EVENT_HANDLER, false); + dc->do_vtable = (void *)((long)dc->do_vtable &~DISPATCH_OBJ_CTXT_FETCH_BIT); + dc->dc_other = dc->dc_ctxt; + dc->dc_ctxt = ctxt; + _dispatch_barrier_trysync_f((dispatch_queue_t)ds, dc, + _dispatch_source_set_handler); } #ifdef __BLOCKS__ -static void -_dispatch_source_set_registration_handler2(void *context) -{ - dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current(); - dispatch_assert(dx_type(ds) == DISPATCH_SOURCE_KEVENT_TYPE); - dispatch_source_refs_t dr = ds->ds_refs; - - if (ds->ds_registration_is_block && dr->ds_registration_handler) { - Block_release(dr->ds_registration_handler); - } - dr->ds_registration_handler = context; - ds->ds_registration_is_block = true; -} - void -dispatch_source_set_registration_handler(dispatch_source_t ds, - dispatch_block_t handler) +dispatch_source_set_cancel_handler(dispatch_source_t ds, + dispatch_block_t handler) { - handler = _dispatch_Block_copy(handler); - _dispatch_barrier_trysync_f((dispatch_queue_t)ds, handler, - _dispatch_source_set_registration_handler2); + dispatch_continuation_t dc; + dc = _dispatch_source_handler_alloc(ds, handler, DS_CANCEL_HANDLER, true); + _dispatch_barrier_trysync_f((dispatch_queue_t)ds, dc, + _dispatch_source_set_handler); } #endif /* __BLOCKS__ */ -static void -_dispatch_source_set_registration_handler_f(void *context) +void +dispatch_source_set_cancel_handler_f(dispatch_source_t ds, + dispatch_function_t handler) { - dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current(); - dispatch_assert(dx_type(ds) == DISPATCH_SOURCE_KEVENT_TYPE); - dispatch_source_refs_t dr = ds->ds_refs; + dispatch_continuation_t dc; + dc = _dispatch_source_handler_alloc(ds, handler, DS_CANCEL_HANDLER, false); + _dispatch_barrier_trysync_f((dispatch_queue_t)ds, dc, + _dispatch_source_set_handler); +} #ifdef __BLOCKS__ - if (ds->ds_registration_is_block && dr->ds_registration_handler) { - Block_release(dr->ds_registration_handler); - } -#endif - dr->ds_registration_handler = context; - ds->ds_registration_is_block = false; +void +dispatch_source_set_registration_handler(dispatch_source_t ds, + dispatch_block_t handler) +{ + dispatch_continuation_t dc; + dc = _dispatch_source_handler_alloc(ds, handler, DS_REGISTN_HANDLER, true); + _dispatch_barrier_trysync_f((dispatch_queue_t)ds, dc, + _dispatch_source_set_handler); } +#endif /* __BLOCKS__ */ void dispatch_source_set_registration_handler_f(dispatch_source_t ds, dispatch_function_t handler) { - _dispatch_barrier_trysync_f((dispatch_queue_t)ds, handler, - _dispatch_source_set_registration_handler_f); + dispatch_continuation_t dc; + dc = _dispatch_source_handler_alloc(ds, handler, DS_REGISTN_HANDLER, false); + _dispatch_barrier_trysync_f((dispatch_queue_t)ds, dc, + _dispatch_source_set_handler); } #pragma mark - @@ -399,68 +437,43 @@ static void _dispatch_source_registration_callout(dispatch_source_t ds) { dispatch_source_refs_t dr = ds->ds_refs; - + dispatch_continuation_t dc = dr->ds_handler[DS_REGISTN_HANDLER]; if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == -1)) { // no registration callout if source is canceled rdar://problem/8955246 -#ifdef __BLOCKS__ - if (ds->ds_registration_is_block) { - Block_release(dr->ds_registration_handler); - } - } else if (ds->ds_registration_is_block) { - dispatch_block_t b = dr->ds_registration_handler; - _dispatch_client_callout_block(b); - Block_release(dr->ds_registration_handler); -#endif - } else { - dispatch_function_t f = dr->ds_registration_handler; - _dispatch_client_callout(ds->do_ctxt, f); + return _dispatch_source_handler_free(dr, DS_REGISTN_HANDLER); } - ds->ds_registration_is_block = false; - dr->ds_registration_handler = NULL; + pthread_priority_t old_dp = _dispatch_set_defaultpriority(ds->dq_priority); + if ((long)dc->do_vtable & DISPATCH_OBJ_CTXT_FETCH_BIT) { + dc->dc_ctxt = ds->do_ctxt; + } + _dispatch_continuation_pop(dc); + dr->ds_handler[DS_REGISTN_HANDLER] = NULL; + _dispatch_reset_defaultpriority(old_dp); } static void _dispatch_source_cancel_callout(dispatch_source_t ds) { dispatch_source_refs_t dr = ds->ds_refs; - + dispatch_continuation_t dc = dr->ds_handler[DS_CANCEL_HANDLER]; ds->ds_pending_data_mask = 0; ds->ds_pending_data = 0; ds->ds_data = 0; - -#ifdef __BLOCKS__ - if (ds->ds_handler_is_block) { - Block_release(dr->ds_handler_ctxt); - ds->ds_handler_is_block = false; - dr->ds_handler_func = NULL; - dr->ds_handler_ctxt = NULL; - } - if (ds->ds_registration_is_block) { - Block_release(dr->ds_registration_handler); - ds->ds_registration_is_block = false; - dr->ds_registration_handler = NULL; - } -#endif - - if (!dr->ds_cancel_handler) { + _dispatch_source_handler_free(dr, DS_EVENT_HANDLER); + _dispatch_source_handler_free(dr, DS_REGISTN_HANDLER); + if (!dc) { return; } - if (ds->ds_cancel_is_block) { -#ifdef __BLOCKS__ - dispatch_block_t b = dr->ds_cancel_handler; - if (ds->ds_atomic_flags & DSF_CANCELED) { - _dispatch_client_callout_block(b); - } - Block_release(dr->ds_cancel_handler); - ds->ds_cancel_is_block = false; -#endif - } else { - dispatch_function_t f = dr->ds_cancel_handler; - if (ds->ds_atomic_flags & DSF_CANCELED) { - _dispatch_client_callout(ds->do_ctxt, f); - } + if (!(ds->ds_atomic_flags & DSF_CANCELED)) { + return _dispatch_source_handler_free(dr, DS_CANCEL_HANDLER); } - dr->ds_cancel_handler = NULL; + pthread_priority_t old_dp = _dispatch_set_defaultpriority(ds->dq_priority); + if ((long)dc->do_vtable & DISPATCH_OBJ_CTXT_FETCH_BIT) { + dc->dc_ctxt = ds->do_ctxt; + } + _dispatch_continuation_pop(dc); + dr->ds_handler[DS_CANCEL_HANDLER] = NULL; + _dispatch_reset_defaultpriority(old_dp); } static void @@ -472,6 +485,7 @@ _dispatch_source_latch_and_call(dispatch_source_t ds) return; } dispatch_source_refs_t dr = ds->ds_refs; + dispatch_continuation_t dc = dr->ds_handler[DS_EVENT_HANDLER]; prev = dispatch_atomic_xchg2o(ds, ds_pending_data, 0, relaxed); if (ds->ds_is_level) { ds->ds_data = ~prev; @@ -480,9 +494,17 @@ _dispatch_source_latch_and_call(dispatch_source_t ds) } else { ds->ds_data = prev; } - if (dispatch_assume(prev) && dr->ds_handler_func) { - _dispatch_client_callout(dr->ds_handler_ctxt, dr->ds_handler_func); + if (!dispatch_assume(prev) || !dc) { + return; } + pthread_priority_t old_dp = _dispatch_set_defaultpriority(ds->dq_priority); + _dispatch_trace_continuation_pop(_dispatch_queue_get_current(), dc); + voucher_t voucher = dc->dc_voucher ? _voucher_retain(dc->dc_voucher) : NULL; + _dispatch_continuation_voucher_adopt(dc); // consumes voucher reference + _dispatch_client_callout(dc->dc_ctxt, dc->dc_func); + _dispatch_introspection_queue_item_complete(dc); + if (voucher) dc->dc_voucher = voucher; + _dispatch_reset_defaultpriority(old_dp); } static void @@ -568,7 +590,7 @@ _dispatch_source_invoke2(dispatch_object_t dou, } _dispatch_source_kevent_register(ds); ds->ds_is_installed = true; - if (dr->ds_registration_handler) { + if (dr->ds_handler[DS_REGISTN_HANDLER]) { return ds->do_targetq; } if (slowpath(ds->do_xref_cnt == -1)) { @@ -577,7 +599,7 @@ _dispatch_source_invoke2(dispatch_object_t dou, } else if (slowpath(DISPATCH_OBJECT_SUSPENDED(ds))) { // Source suspended by an item drained from the source queue. return NULL; - } else if (dr->ds_registration_handler) { + } else if (dr->ds_handler[DS_REGISTN_HANDLER]) { // The source has been registered and the registration handler needs // to be delivered on the target queue. if (dq != ds->do_targetq) { @@ -598,8 +620,9 @@ _dispatch_source_invoke2(dispatch_object_t dou, } _dispatch_source_kevent_unregister(ds); } - if (dr->ds_cancel_handler || ds->ds_handler_is_block || - ds->ds_registration_is_block) { + if (dr->ds_handler[DS_EVENT_HANDLER] || + dr->ds_handler[DS_CANCEL_HANDLER] || + dr->ds_handler[DS_REGISTN_HANDLER]) { if (dq != ds->do_targetq) { return ds->do_targetq; } @@ -645,18 +668,16 @@ _dispatch_source_probe(dispatch_source_t ds) if (!ds->ds_is_installed) { // The source needs to be installed on the manager queue. return true; - } else if (dr->ds_registration_handler) { + } else if (dr->ds_handler[DS_REGISTN_HANDLER]) { // The registration handler needs to be delivered to the target queue. return true; } else if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == -1)){ // The source needs to be uninstalled from the manager queue, or the // cancellation handler needs to be delivered to the target queue. // Note: cancellation assumes installation. - if (ds->ds_dkev || dr->ds_cancel_handler -#ifdef __BLOCKS__ - || ds->ds_handler_is_block || ds->ds_registration_is_block -#endif - ) { + if (ds->ds_dkev || dr->ds_handler[DS_EVENT_HANDLER] || + dr->ds_handler[DS_CANCEL_HANDLER] || + dr->ds_handler[DS_REGISTN_HANDLER]) { return true; } } else if (ds->ds_pending_data) { @@ -666,7 +687,7 @@ _dispatch_source_probe(dispatch_source_t ds) // The source needs to be rearmed on the manager queue. return true; } - return (ds->dq_items_tail != NULL); + return _dispatch_queue_class_probe(ds); } static void @@ -957,18 +978,6 @@ _dispatch_kevent_drain(struct kevent64_s *ke) } else if (ke->data == ESRCH) { return _dispatch_kevent_proc_exit(ke); } -#if DISPATCH_USE_VM_PRESSURE - } else if (ke->filter == EVFILT_VM && ke->data == ENOTSUP) { - // Memory pressure kevent is not supported on all platforms - // - return; -#endif -#if DISPATCH_USE_MEMORYSTATUS - } else if (ke->filter == EVFILT_MEMORYSTATUS && - (ke->data == EINVAL || ke->data == ENOTSUP)) { - // Memory status kevent is not supported on all platforms - return; -#endif } return _dispatch_kevent_error(ke); } @@ -1060,7 +1069,7 @@ _dispatch_kevent_unguard(dispatch_kevent_t dk) #pragma mark - #pragma mark dispatch_source_timer -#if DISPATCH_USE_DTRACE && DISPATCH_USE_DTRACE_INTROSPECTION +#if DISPATCH_USE_DTRACE static dispatch_source_refs_t _dispatch_trace_next_timer[DISPATCH_TIMER_QOS_COUNT]; #define _dispatch_trace_next_timer_set(x, q) \ @@ -1175,7 +1184,7 @@ _dispatch_source_set_timer2(void *context) // Called on the source queue struct dispatch_set_timer_params *params = context; dispatch_suspend(params->ds); - dispatch_barrier_async_f(&_dispatch_mgr_q, params, + _dispatch_barrier_async_detached_f(&_dispatch_mgr_q, params, _dispatch_source_set_timer3); } @@ -1464,8 +1473,9 @@ _dispatch_timers_init(void) DISPATCH_KEVENT_TIMER_UDATA(tidx); } #endif // __LP64__ - _dispatch_timers_force_max_leeway = - getenv("LIBDISPATCH_TIMERS_FORCE_MAX_LEEWAY"); + if (slowpath(getenv("LIBDISPATCH_TIMERS_FORCE_MAX_LEEWAY"))) { + _dispatch_timers_force_max_leeway = true; + } } static inline void @@ -1517,8 +1527,9 @@ _dispatch_timers_update(dispatch_source_t ds) if (tidx != DISPATCH_TIMER_INDEX_DISARM) { (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED, relaxed); } - free(dk); _dispatch_object_debug(ds, "%s", __func__); + ds->ds_dkev = NULL; + free(dk); } else { _dispatch_timers_unregister(ds, dk); } @@ -1796,9 +1807,9 @@ dispatch_timer_aggregate_create(void) dispatch_timer_aggregate_t dta = _dispatch_alloc(DISPATCH_VTABLE(queue), sizeof(struct dispatch_timer_aggregate_s)); _dispatch_queue_init((dispatch_queue_t)dta); - dta->do_targetq = _dispatch_get_root_queue(DISPATCH_QUEUE_PRIORITY_HIGH, - true); - dta->dq_width = UINT32_MAX; + dta->do_targetq = _dispatch_get_root_queue( + _DISPATCH_QOS_CLASS_USER_INITIATED, true); + dta->dq_width = DISPATCH_QUEUE_WIDTH_MAX; //FIXME: aggregates need custom vtable //dta->dq_label = "timer-aggregate"; for (tidx = 0; tidx < DISPATCH_KEVENT_TIMER_COUNT; tidx++) { @@ -1869,7 +1880,7 @@ _dispatch_timer_aggregates_configure(void) } dtau = _dispatch_calloc(DISPATCH_TIMER_COUNT, sizeof(*dtau)); memcpy(dtau, dta->dta_timer, sizeof(dta->dta_timer)); - dispatch_barrier_async_f((dispatch_queue_t)dta, dtau, + _dispatch_barrier_async_detached_f((dispatch_queue_t)dta, dtau, _dispatch_timer_aggregate_update); } } @@ -2099,8 +2110,25 @@ _dispatch_kq_init(void *context DISPATCH_UNUSED) _dispatch_kq = kqueue(); #endif if (_dispatch_kq == -1) { - DISPATCH_CLIENT_CRASH("kqueue() create failed: " - "probably out of file descriptors"); + int err = errno; + switch (err) { + case EMFILE: + DISPATCH_CLIENT_CRASH("kqueue() failure: " + "process is out of file descriptors"); + break; + case ENFILE: + DISPATCH_CLIENT_CRASH("kqueue() failure: " + "system is out of file descriptors"); + break; + case ENOMEM: + DISPATCH_CLIENT_CRASH("kqueue() failure: " + "kernel is out of memory"); + break; + default: + (void)dispatch_assume_zero(err); + DISPATCH_CRASH("kqueue() failure"); + break; + } } else if (dispatch_assume(_dispatch_kq < FD_SETSIZE)) { // in case we fall back to select() FD_SET(_dispatch_kq, &_dispatch_rfds); @@ -2108,7 +2136,7 @@ _dispatch_kq_init(void *context DISPATCH_UNUSED) (void)dispatch_assume_zero(kevent64(_dispatch_kq, &kev, 1, NULL, 0, 0, NULL)); - _dispatch_queue_push(_dispatch_mgr_q.do_targetq, &_dispatch_mgr_q); + _dispatch_queue_push(_dispatch_mgr_q.do_targetq, &_dispatch_mgr_q, 0); } static int @@ -2239,6 +2267,7 @@ _dispatch_mgr_invoke(void) poll = _dispatch_mgr_select(poll); if (!poll) continue; } + poll = poll || _dispatch_queue_class_probe(&_dispatch_mgr_q); r = kevent64(_dispatch_kq, _dispatch_kevent_enable, _dispatch_kevent_enable ? 1 : 0, &kev, 1, 0, poll ? &timeout_immediately : NULL); @@ -2295,10 +2324,12 @@ _dispatch_memorystatus_handler(void *context DISPATCH_UNUSED) memorystatus = dispatch_source_get_data(_dispatch_memorystatus_source); if (memorystatus & DISPATCH_MEMORYSTATUS_PRESSURE_NORMAL) { _dispatch_continuation_cache_limit = DISPATCH_CONTINUATION_CACHE_LIMIT; + _voucher_activity_heap_pressure_normal(); return; } _dispatch_continuation_cache_limit = DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYSTATUS_PRESSURE_WARN; + _voucher_activity_heap_pressure_warn(); #endif malloc_zone_pressure_relief(0,0); } @@ -2309,7 +2340,7 @@ _dispatch_memorystatus_init(void) _dispatch_memorystatus_source = dispatch_source_create( DISPATCH_MEMORYSTATUS_SOURCE_TYPE, 0, DISPATCH_MEMORYSTATUS_SOURCE_MASK, - _dispatch_get_root_queue(0, true)); + _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, true)); dispatch_source_set_event_handler_f(_dispatch_memorystatus_source, _dispatch_memorystatus_handler); dispatch_resume(_dispatch_memorystatus_source); @@ -2352,11 +2383,15 @@ static inline void _dispatch_memorystatus_init(void) {} #ifndef MACH_RCV_LARGE_IDENTITY #define MACH_RCV_LARGE_IDENTITY 0x00000008 #endif +#ifndef MACH_RCV_VOUCHER +#define MACH_RCV_VOUCHER 0x00000800 +#endif #define DISPATCH_MACH_RCV_TRAILER MACH_RCV_TRAILER_CTX #define DISPATCH_MACH_RCV_OPTIONS ( \ MACH_RCV_MSG | MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY | \ MACH_RCV_TRAILER_ELEMENTS(DISPATCH_MACH_RCV_TRAILER) | \ - MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0)) + MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0)) | \ + MACH_RCV_VOUCHER #define DISPATCH_MACH_KEVENT_ARMED(dk) ((dk)->dk_kevent.ext[0]) @@ -2373,18 +2408,21 @@ static kern_return_t _dispatch_mach_notify_update(dispatch_kevent_t dk, static void _dispatch_mach_notify_source_invoke(mach_msg_header_t *hdr); static void _dispatch_mach_reply_kevent_unregister(dispatch_mach_t dm, dispatch_mach_reply_refs_t dmr, bool disconnected); -static void _dispatch_mach_msg_recv(dispatch_mach_t dm, mach_msg_header_t *hdr, +static void _dispatch_mach_kevent_unregister(dispatch_mach_t dm); +static inline void _dispatch_mach_msg_set_options(dispatch_object_t dou, + mach_msg_option_t options); +static void _dispatch_mach_msg_recv(dispatch_mach_t dm, + dispatch_mach_reply_refs_t dmr, mach_msg_header_t *hdr, mach_msg_size_t siz); static void _dispatch_mach_merge_kevent(dispatch_mach_t dm, const struct kevent64_s *ke); -static void _dispatch_mach_kevent_unregister(dispatch_mach_t dm); +static inline mach_msg_option_t _dispatch_mach_checkin_options(void); static const size_t _dispatch_mach_recv_msg_size = DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE; static const size_t dispatch_mach_trailer_size = sizeof(dispatch_mach_trailer_t); -static const size_t _dispatch_mach_recv_msg_buf_size = mach_vm_round_page( - _dispatch_mach_recv_msg_size + dispatch_mach_trailer_size); +static mach_msg_size_t _dispatch_mach_recv_msg_buf_size; static mach_port_t _dispatch_mach_portset, _dispatch_mach_recv_portset; static mach_port_t _dispatch_mach_notify_port; static struct kevent64_s _dispatch_mach_recv_kevent = { @@ -2405,7 +2443,9 @@ struct dispatch_source_type_s _dispatch_source_type_mach_recv_direct = { static void _dispatch_mach_recv_msg_buf_init(void) { - mach_vm_size_t vm_size = _dispatch_mach_recv_msg_buf_size; + mach_vm_size_t vm_size = mach_vm_round_page( + _dispatch_mach_recv_msg_size + dispatch_mach_trailer_size); + _dispatch_mach_recv_msg_buf_size = (mach_msg_size_t)vm_size; mach_vm_address_t vm_addr = vm_page_size; kern_return_t kr; @@ -2419,7 +2459,7 @@ _dispatch_mach_recv_msg_buf_init(void) vm_addr = vm_page_size; } _dispatch_mach_recv_kevent.ext[0] = (uintptr_t)vm_addr; - _dispatch_mach_recv_kevent.ext[1] = _dispatch_mach_recv_msg_buf_size; + _dispatch_mach_recv_kevent.ext[1] = vm_size; } static inline void* @@ -2457,8 +2497,11 @@ _dispatch_mach_recv_portset_init(void *context DISPATCH_UNUSED) _dispatch_mach_notify_source = dispatch_source_create( &_dispatch_source_type_mach_recv_direct, _dispatch_mach_notify_port, 0, &_dispatch_mgr_q); - _dispatch_mach_notify_source->ds_refs->ds_handler_func = - (void*)_dispatch_mach_notify_source_invoke; + static const struct dispatch_continuation_s dc = { + .dc_func = (void*)_dispatch_mach_notify_source_invoke, + }; + _dispatch_mach_notify_source->ds_refs->ds_handler[DS_EVENT_HANDLER] = + (dispatch_continuation_t)&dc; dispatch_assert(_dispatch_mach_notify_source); dispatch_resume(_dispatch_mach_notify_source); } @@ -2772,11 +2815,11 @@ _dispatch_source_merge_mach_msg(dispatch_source_t ds, dispatch_source_refs_t dr, _dispatch_mach_notify_source_invoke(hdr); return _dispatch_kevent_mach_msg_destroy(hdr); } + dispatch_mach_reply_refs_t dmr = NULL; if (dk->dk_kevent.fflags & DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE) { - _dispatch_mach_reply_kevent_unregister((dispatch_mach_t)ds, - (dispatch_mach_reply_refs_t)dr, false); + dmr = (dispatch_mach_reply_refs_t)dr; } - return _dispatch_mach_msg_recv((dispatch_mach_t)ds, hdr, siz); + return _dispatch_mach_msg_recv((dispatch_mach_t)ds, dmr, hdr, siz); } DISPATCH_ALWAYS_INLINE @@ -2911,7 +2954,7 @@ _dispatch_mach_host_notify_update(void *context DISPATCH_UNUSED) { (void)_dispatch_get_mach_recv_portset(); _dispatch_debug("registering for calendar-change notification"); - kern_return_t kr = host_request_notification(mach_host_self(), + kern_return_t kr = host_request_notification(_dispatch_get_mach_host_port(), HOST_NOTIFY_CALENDAR_CHANGE, _dispatch_mach_notify_port); DISPATCH_VERIFY_MIG(kr); (void)dispatch_assume_zero(kr); @@ -2994,17 +3037,20 @@ _dispatch_mach_notify_send_possible(mach_port_t notify DISPATCH_UNUSED, #pragma mark dispatch_mach_t #define DISPATCH_MACH_NEVER_CONNECTED (UINT32_MAX/2) -#define DISPATCH_MACH_PSEUDO_RECEIVED 0x1 #define DISPATCH_MACH_REGISTER_FOR_REPLY 0x2 #define DISPATCH_MACH_OPTIONS_MASK 0xffff static mach_port_t _dispatch_mach_msg_get_remote_port(dispatch_object_t dou); static void _dispatch_mach_msg_disconnected(dispatch_mach_t dm, mach_port_t local_port, mach_port_t remote_port); +static dispatch_mach_msg_t _dispatch_mach_msg_create_reply_disconnected( + dispatch_object_t dou, dispatch_mach_reply_refs_t dmr); static bool _dispatch_mach_reconnect_invoke(dispatch_mach_t dm, dispatch_object_t dou); static inline mach_msg_header_t* _dispatch_mach_msg_get_msg( dispatch_mach_msg_t dmsg); +static void _dispatch_mach_push(dispatch_object_t dm, dispatch_object_t dou, + pthread_priority_t pp); static dispatch_mach_t _dispatch_mach_create(const char *label, dispatch_queue_t q, void *context, @@ -3028,7 +3074,7 @@ _dispatch_mach_create(const char *label, dispatch_queue_t q, void *context, dr->dm_handler_func = handler; dr->dm_handler_ctxt = context; dm->ds_refs = dr; - dm->ds_handler_is_block = handler_is_block; + dm->dm_handler_is_block = handler_is_block; dm->dm_refs = _dispatch_calloc(1ul, sizeof(struct dispatch_mach_send_refs_s)); @@ -3063,7 +3109,7 @@ _dispatch_mach_dispose(dispatch_mach_t dm) { _dispatch_object_debug(dm, "%s", __func__); dispatch_mach_refs_t dr = dm->ds_refs; - if (dm->ds_handler_is_block && dr->dm_handler_ctxt) { + if (dm->dm_handler_is_block && dr->dm_handler_ctxt) { Block_release(dr->dm_handler_ctxt); } free(dr); @@ -3093,6 +3139,8 @@ dispatch_mach_connect(dispatch_mach_t dm, mach_port_t receive, if (MACH_PORT_VALID(send)) { if (checkin) { dispatch_retain(checkin); + mach_msg_option_t options = _dispatch_mach_checkin_options(); + _dispatch_mach_msg_set_options(checkin, options); dr->dm_checkin_port = _dispatch_mach_msg_get_remote_port(checkin); } dr->dm_checkin = checkin; @@ -3112,21 +3160,23 @@ static void _dispatch_mach_reply_kevent_unregister(dispatch_mach_t dm, dispatch_mach_reply_refs_t dmr, bool disconnected) { - dispatch_kevent_t dk = dmr->dm_dkev; - mach_port_t local_port = (mach_port_t)dk->dk_kevent.ident; + dispatch_mach_msg_t dmsgr = NULL; + if (disconnected) { + dmsgr = _dispatch_mach_msg_create_reply_disconnected(NULL, dmr); + } + dispatch_kevent_t dk = dmr->dmr_dkev; TAILQ_REMOVE(&dk->dk_sources, (dispatch_source_refs_t)dmr, dr_list); _dispatch_kevent_unregister(dk, DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE); - TAILQ_REMOVE(&dm->dm_refs->dm_replies, dmr, dm_list); + TAILQ_REMOVE(&dm->dm_refs->dm_replies, dmr, dmr_list); + if (dmr->dmr_voucher) _voucher_release(dmr->dmr_voucher); free(dmr); - if (disconnected) { - _dispatch_mach_msg_disconnected(dm, local_port, MACH_PORT_NULL); - } + if (dmsgr) _dispatch_mach_push(dm, dmsgr, dmsgr->dmsg_priority); } DISPATCH_NOINLINE static void _dispatch_mach_reply_kevent_register(dispatch_mach_t dm, mach_port_t reply, - void *ctxt) + dispatch_mach_msg_t dmsg) { dispatch_kevent_t dk; dispatch_mach_reply_refs_t dmr; @@ -3137,22 +3187,26 @@ _dispatch_mach_reply_kevent_register(dispatch_mach_t dm, mach_port_t reply, dk->dk_kevent.flags |= EV_ADD|EV_ENABLE; dk->dk_kevent.fflags = DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE; dk->dk_kevent.udata = (uintptr_t)dk; - // make reply context visible to leaks rdar://11777199 - dk->dk_kevent.ext[1] = (uintptr_t)ctxt; TAILQ_INIT(&dk->dk_sources); dmr = _dispatch_calloc(1ul, sizeof(struct dispatch_mach_reply_refs_s)); dmr->dr_source_wref = _dispatch_ptr2wref(dm); - dmr->dm_dkev = dk; + dmr->dmr_dkev = dk; + if (dmsg->dmsg_voucher) { + dmr->dmr_voucher =_voucher_retain(dmsg->dmsg_voucher); + } + dmr->dmr_priority = dmsg->dmsg_priority; + // make reply context visible to leaks rdar://11777199 + dmr->dmr_ctxt = dmsg->do_ctxt; _dispatch_debug("machport[0x%08x]: registering for reply, ctxt %p", reply, - ctxt); + dmsg->do_ctxt); uint32_t flags; - bool do_resume = _dispatch_kevent_register(&dmr->dm_dkev, &flags); - TAILQ_INSERT_TAIL(&dmr->dm_dkev->dk_sources, (dispatch_source_refs_t)dmr, + bool do_resume = _dispatch_kevent_register(&dmr->dmr_dkev, &flags); + TAILQ_INSERT_TAIL(&dmr->dmr_dkev->dk_sources, (dispatch_source_refs_t)dmr, dr_list); - TAILQ_INSERT_TAIL(&dm->dm_refs->dm_replies, dmr, dm_list); - if (do_resume && _dispatch_kevent_resume(dmr->dm_dkev, flags, 0)) { + TAILQ_INSERT_TAIL(&dm->dm_refs->dm_replies, dmr, dmr_list); + if (do_resume && _dispatch_kevent_resume(dmr->dmr_dkev, flags, 0)) { _dispatch_mach_reply_kevent_unregister(dm, dmr, true); } } @@ -3198,9 +3252,10 @@ _dispatch_mach_kevent_register(dispatch_mach_t dm, mach_port_t send) } static inline void -_dispatch_mach_push(dispatch_object_t dm, dispatch_object_t dou) +_dispatch_mach_push(dispatch_object_t dm, dispatch_object_t dou, + pthread_priority_t pp) { - return _dispatch_queue_push(dm._dq, dou); + return _dispatch_queue_push(dm._dq, dou, pp); } static inline void @@ -3239,8 +3294,8 @@ _dispatch_mach_msg_get_reason(dispatch_object_t dou, mach_error_t *err_ptr) } static void -_dispatch_mach_msg_recv(dispatch_mach_t dm, mach_msg_header_t *hdr, - mach_msg_size_t siz) +_dispatch_mach_msg_recv(dispatch_mach_t dm, dispatch_mach_reply_refs_t dmr, + mach_msg_header_t *hdr, mach_msg_size_t siz) { _dispatch_debug_machport(hdr->msgh_remote_port); _dispatch_debug("machport[0x%08x]: received msg id 0x%x, reply on 0x%08x", @@ -3249,13 +3304,32 @@ _dispatch_mach_msg_recv(dispatch_mach_t dm, mach_msg_header_t *hdr, return _dispatch_kevent_mach_msg_destroy(hdr); } dispatch_mach_msg_t dmsg; + voucher_t voucher; + pthread_priority_t priority; + void *ctxt = NULL; + if (dmr) { + _voucher_mach_msg_clear(hdr, false); // deallocate reply message voucher + voucher = dmr->dmr_voucher; + dmr->dmr_voucher = NULL; // transfer reference + priority = dmr->dmr_priority; + ctxt = dmr->dmr_ctxt; + _dispatch_mach_reply_kevent_unregister(dm, dmr, false); + } else { + voucher = voucher_create_with_mach_msg(hdr); + priority = _voucher_get_priority(voucher); + } dispatch_mach_msg_destructor_t destructor; destructor = (hdr == _dispatch_get_mach_recv_msg_buf()) ? DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT : DISPATCH_MACH_MSG_DESTRUCTOR_FREE; dmsg = dispatch_mach_msg_create(hdr, siz, destructor, NULL); + dmsg->dmsg_voucher = voucher; + dmsg->dmsg_priority = priority; + dmsg->do_ctxt = ctxt; _dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_MESSAGE_RECEIVED); - return _dispatch_mach_push(dm, dmsg); + _dispatch_voucher_debug("mach-msg[%p] create", voucher, dmsg); + _dispatch_voucher_ktrace_dmsg_push(dmsg); + return _dispatch_mach_push(dm, dmsg, dmsg->dmsg_priority); } static inline mach_port_t @@ -3266,24 +3340,6 @@ _dispatch_mach_msg_get_remote_port(dispatch_object_t dou) return remote; } -static inline mach_port_t -_dispatch_mach_msg_get_reply_port(dispatch_mach_t dm, dispatch_object_t dou) -{ - mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dou._dmsg); - mach_port_t reply = MACH_PORT_NULL; - mach_msg_option_t msg_opts = _dispatch_mach_msg_get_options(dou); - if (msg_opts & DISPATCH_MACH_PSEUDO_RECEIVED) { - reply = hdr->msgh_reserved; - hdr->msgh_reserved = 0; - } else if (MACH_MSGH_BITS_LOCAL(hdr->msgh_bits) == - MACH_MSG_TYPE_MAKE_SEND_ONCE && - MACH_PORT_VALID(hdr->msgh_local_port) && (!dm->ds_dkev || - dm->ds_dkev->dk_kevent.ident != hdr->msgh_local_port)) { - reply = hdr->msgh_local_port; - } - return reply; -} - static inline void _dispatch_mach_msg_disconnected(dispatch_mach_t dm, mach_port_t local_port, mach_port_t remote_port) @@ -3295,19 +3351,45 @@ _dispatch_mach_msg_disconnected(dispatch_mach_t dm, mach_port_t local_port, if (local_port) hdr->msgh_local_port = local_port; if (remote_port) hdr->msgh_remote_port = remote_port; _dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_DISCONNECTED); - return _dispatch_mach_push(dm, dmsg); + return _dispatch_mach_push(dm, dmsg, dmsg->dmsg_priority); +} + +static inline dispatch_mach_msg_t +_dispatch_mach_msg_create_reply_disconnected(dispatch_object_t dou, + dispatch_mach_reply_refs_t dmr) +{ + dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr; + if (dmsg && !dmsg->dmsg_reply) return NULL; + mach_msg_header_t *hdr; + dmsgr = dispatch_mach_msg_create(NULL, sizeof(mach_msg_header_t), + DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT, &hdr); + if (dmsg) { + hdr->msgh_local_port = dmsg->dmsg_reply; + if (dmsg->dmsg_voucher) { + dmsgr->dmsg_voucher = _voucher_retain(dmsg->dmsg_voucher); + } + dmsgr->dmsg_priority = dmsg->dmsg_priority; + dmsgr->do_ctxt = dmsg->do_ctxt; + } else { + hdr->msgh_local_port = (mach_port_t)dmr->dmr_dkev->dk_kevent.ident; + dmsgr->dmsg_voucher = dmr->dmr_voucher; + dmr->dmr_voucher = NULL; // transfer reference + dmsgr->dmsg_priority = dmr->dmr_priority; + dmsgr->do_ctxt = dmr->dmr_ctxt; + } + _dispatch_mach_msg_set_reason(dmsgr, 0, DISPATCH_MACH_DISCONNECTED); + return dmsgr; } DISPATCH_NOINLINE static void _dispatch_mach_msg_not_sent(dispatch_mach_t dm, dispatch_object_t dou) { - mach_port_t reply = _dispatch_mach_msg_get_reply_port(dm, dou); - _dispatch_mach_msg_set_reason(dou, 0, DISPATCH_MACH_MESSAGE_NOT_SENT); - _dispatch_mach_push(dm, dou); - if (reply) { - _dispatch_mach_msg_disconnected(dm, reply, MACH_PORT_NULL); - } + dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr; + dmsgr = _dispatch_mach_msg_create_reply_disconnected(dmsg, NULL); + _dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_MESSAGE_NOT_SENT); + _dispatch_mach_push(dm, dmsg, dmsg->dmsg_priority); + if (dmsgr) _dispatch_mach_push(dm, dmsgr, dmsgr->dmsg_priority); } DISPATCH_NOINLINE @@ -3315,7 +3397,10 @@ static dispatch_object_t _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou) { dispatch_mach_send_refs_t dr = dm->dm_refs; - dispatch_mach_msg_t dmsg = dou._dmsg; + dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr = NULL; + voucher_t voucher = dmsg->dmsg_voucher; + mach_voucher_t ipc_kvoucher = MACH_VOUCHER_NULL; + bool clear_voucher = false, kvoucher_move_send = false; dr->dm_needs_mgr = 0; if (slowpath(dr->dm_checkin) && dmsg != dr->dm_checkin) { // send initial checkin message @@ -3332,10 +3417,10 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou) } mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg); mach_msg_return_t kr = 0; - mach_port_t reply = _dispatch_mach_msg_get_reply_port(dm, dmsg); + mach_port_t reply = dmsg->dmsg_reply; mach_msg_option_t opts = 0, msg_opts = _dispatch_mach_msg_get_options(dmsg); if (!slowpath(msg_opts & DISPATCH_MACH_REGISTER_FOR_REPLY)) { - opts = MACH_SEND_MSG | (msg_opts & DISPATCH_MACH_OPTIONS_MASK); + opts = MACH_SEND_MSG | (msg_opts & ~DISPATCH_MACH_OPTIONS_MASK); if (MACH_MSGH_BITS_REMOTE(msg->msgh_bits) != MACH_MSG_TYPE_MOVE_SEND_ONCE) { if (dmsg != dr->dm_checkin) { @@ -3353,16 +3438,38 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou) } } opts |= MACH_SEND_TIMEOUT; + if (dmsg->dmsg_priority != _voucher_get_priority(voucher)) { + ipc_kvoucher = _voucher_create_mach_voucher_with_priority( + voucher, dmsg->dmsg_priority); + } + _dispatch_voucher_debug("mach-msg[%p] msg_set", voucher, dmsg); + if (ipc_kvoucher) { + kvoucher_move_send = true; + clear_voucher = _voucher_mach_msg_set_mach_voucher(msg, + ipc_kvoucher, kvoucher_move_send); + } else { + clear_voucher = _voucher_mach_msg_set(msg, voucher); + } } + _voucher_activity_trace_msg(voucher, msg, send); _dispatch_debug_machport(msg->msgh_remote_port); if (reply) _dispatch_debug_machport(reply); kr = mach_msg(msg, opts, msg->msgh_size, 0, MACH_PORT_NULL, 0, MACH_PORT_NULL); + _dispatch_debug("machport[0x%08x]: sent msg id 0x%x, ctxt %p, " + "opts 0x%x, msg_opts 0x%x, kvoucher 0x%08x, reply on 0x%08x: " + "%s - 0x%x", msg->msgh_remote_port, msg->msgh_id, dmsg->do_ctxt, + opts, msg_opts, msg->msgh_voucher_port, reply, + mach_error_string(kr), kr); + if (clear_voucher) { + if (kr == MACH_SEND_INVALID_VOUCHER && msg->msgh_voucher_port) { + DISPATCH_CRASH("Voucher port corruption"); + } + mach_voucher_t kv; + kv = _voucher_mach_msg_clear(msg, kvoucher_move_send); + if (kvoucher_move_send) ipc_kvoucher = kv; + } } - _dispatch_debug("machport[0x%08x]: sent msg id 0x%x, ctxt %p, opts 0x%x, " - "msg_opts 0x%x, reply on 0x%08x: %s - 0x%x", msg->msgh_remote_port, - msg->msgh_id, dmsg->do_ctxt, opts, msg_opts, reply, - mach_error_string(kr), kr); if (kr == MACH_SEND_TIMED_OUT && (opts & MACH_SEND_TIMEOUT)) { if (opts & MACH_SEND_NOTIFY) { _dispatch_debug("machport[0x%08x]: send-possible notification " @@ -3372,42 +3479,50 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou) // send kevent must be installed on the manager queue dr->dm_needs_mgr = 1; } - if (reply) { - _dispatch_mach_msg_set_options(dmsg, msg_opts | - DISPATCH_MACH_PSEUDO_RECEIVED); - msg->msgh_reserved = reply; // Remember the original reply port + if (ipc_kvoucher) { + _dispatch_kvoucher_debug("reuse on re-send", ipc_kvoucher); + voucher_t ipc_voucher; + ipc_voucher = _voucher_create_with_priority_and_mach_voucher( + voucher, dmsg->dmsg_priority, ipc_kvoucher); + _dispatch_voucher_debug("mach-msg[%p] replace voucher[%p]", + ipc_voucher, dmsg, voucher); + if (dmsg->dmsg_voucher) _voucher_release(dmsg->dmsg_voucher); + dmsg->dmsg_voucher = ipc_voucher; } goto out; + } else if (ipc_kvoucher && (kr || !kvoucher_move_send)) { + _voucher_dealloc_mach_voucher(ipc_kvoucher); } - if (fastpath(!kr) && reply) { + if (fastpath(!kr) && reply && + !(dm->ds_dkev && dm->ds_dkev->dk_kevent.ident == reply)) { if (_dispatch_queue_get_current() != &_dispatch_mgr_q) { // reply receive kevent must be installed on the manager queue dr->dm_needs_mgr = 1; _dispatch_mach_msg_set_options(dmsg, msg_opts | DISPATCH_MACH_REGISTER_FOR_REPLY); - if (msg_opts & DISPATCH_MACH_PSEUDO_RECEIVED) { - msg->msgh_reserved = reply; // Remember the original reply port - } goto out; } - _dispatch_mach_reply_kevent_register(dm, reply, dmsg->do_ctxt); + _dispatch_mach_reply_kevent_register(dm, reply, dmsg); } if (slowpath(dmsg == dr->dm_checkin) && dm->dm_dkev) { _dispatch_mach_kevent_unregister(dm); } - _dispatch_mach_msg_set_reason(dmsg, kr, 0); - _dispatch_mach_push(dm, dmsg); - dmsg = NULL; - if (slowpath(kr) && reply) { + if (slowpath(kr)) { // Send failed, so reply was never connected - _dispatch_mach_msg_disconnected(dm, reply, MACH_PORT_NULL); + dmsgr = _dispatch_mach_msg_create_reply_disconnected(dmsg, NULL); } + _dispatch_mach_msg_set_reason(dmsg, kr, 0); + _dispatch_mach_push(dm, dmsg, dmsg->dmsg_priority); + if (dmsgr) _dispatch_mach_push(dm, dmsgr, dmsgr->dmsg_priority); + dmsg = NULL; out: return (dispatch_object_t)dmsg; } -static void -_dispatch_mach_send_push(dispatch_mach_t dm, dispatch_object_t dou) +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_mach_send_push_wakeup(dispatch_mach_t dm, dispatch_object_t dou, + bool wakeup) { dispatch_mach_send_refs_t dr = dm->dm_refs; struct dispatch_object_s *prev, *dc = dou._do; @@ -3416,10 +3531,19 @@ _dispatch_mach_send_push(dispatch_mach_t dm, dispatch_object_t dou) prev = dispatch_atomic_xchg2o(dr, dm_tail, dc, release); if (fastpath(prev)) { prev->do_next = dc; - return; + } else { + dr->dm_head = dc; + } + if (wakeup || !prev) { + _dispatch_wakeup(dm); } - dr->dm_head = dc; - _dispatch_wakeup(dm); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_mach_send_push(dispatch_mach_t dm, dispatch_object_t dou) +{ + return _dispatch_mach_send_push_wakeup(dm, dou, false); } DISPATCH_NOINLINE @@ -3429,25 +3553,21 @@ _dispatch_mach_send_drain(dispatch_mach_t dm) dispatch_mach_send_refs_t dr = dm->dm_refs; struct dispatch_object_s *dc = NULL, *next_dc = NULL; while (dr->dm_tail) { - while (!(dc = fastpath(dr->dm_head))) { - dispatch_hardware_pause(); - } + _dispatch_wait_until(dc = fastpath(dr->dm_head)); do { next_dc = fastpath(dc->do_next); dr->dm_head = next_dc; if (!next_dc && !dispatch_atomic_cmpxchg2o(dr, dm_tail, dc, NULL, relaxed)) { - // Enqueue is TIGHTLY controlled, we won't wait long. - while (!(next_dc = fastpath(dc->do_next))) { - dispatch_hardware_pause(); - } + _dispatch_wait_until(next_dc = fastpath(dc->do_next)); dr->dm_head = next_dc; } if (!DISPATCH_OBJ_IS_VTABLE(dc)) { if ((long)dc->do_vtable & DISPATCH_OBJ_BARRIER_BIT) { // send barrier // leave send queue locked until barrier has completed - return _dispatch_mach_push(dm, dc); + return _dispatch_mach_push(dm, dc, + ((dispatch_continuation_t)dc)->dc_priority); } #if DISPATCH_MACH_SEND_SYNC if (slowpath((long)dc->do_vtable & DISPATCH_OBJ_SYNC_SLOW_BIT)){ @@ -3461,6 +3581,7 @@ _dispatch_mach_send_drain(dispatch_mach_t dm) } continue; } + _dispatch_voucher_ktrace_dmsg_pop((dispatch_mach_msg_t)dc); if (slowpath(dr->dm_disconnect_cnt) || slowpath(dm->ds_atomic_flags & DSF_CANCELED)) { _dispatch_mach_msg_not_sent(dm, dc); @@ -3477,9 +3598,7 @@ _dispatch_mach_send_drain(dispatch_mach_t dm) if (!next_dc && !dispatch_atomic_cmpxchg2o(dr, dm_tail, NULL, dc, relaxed)) { // wait for enqueue slow path to finish - while (!(next_dc = fastpath(dr->dm_head))) { - dispatch_hardware_pause(); - } + _dispatch_wait_until(next_dc = fastpath(dr->dm_head)); dc->do_next = next_dc; } dr->dm_head = dc; @@ -3510,6 +3629,24 @@ _dispatch_mach_merge_kevent(dispatch_mach_t dm, const struct kevent64_s *ke) _dispatch_mach_send(dm); } +static inline mach_msg_option_t +_dispatch_mach_checkin_options(void) +{ + mach_msg_option_t options = 0; +#if DISPATCH_USE_CHECKIN_NOIMPORTANCE + options = MACH_SEND_NOIMPORTANCE; // +#endif + return options; +} + + +static inline mach_msg_option_t +_dispatch_mach_send_options(void) +{ + mach_msg_option_t options = 0; + return options; +} + DISPATCH_NOINLINE void dispatch_mach_send(dispatch_mach_t dm, dispatch_mach_msg_t dmsg, @@ -3521,18 +3658,32 @@ dispatch_mach_send(dispatch_mach_t dm, dispatch_mach_msg_t dmsg, } dispatch_retain(dmsg); dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK); + options |= _dispatch_mach_send_options(); _dispatch_mach_msg_set_options(dmsg, options & ~DISPATCH_MACH_OPTIONS_MASK); - if (slowpath(dr->dm_tail) || slowpath(dr->dm_disconnect_cnt) || + mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg); + dmsg->dmsg_reply = (MACH_MSGH_BITS_LOCAL(msg->msgh_bits) == + MACH_MSG_TYPE_MAKE_SEND_ONCE && + MACH_PORT_VALID(msg->msgh_local_port) ? msg->msgh_local_port : + MACH_PORT_NULL); + bool is_reply = (MACH_MSGH_BITS_REMOTE(msg->msgh_bits) == + MACH_MSG_TYPE_MOVE_SEND_ONCE); + dmsg->dmsg_priority = _dispatch_priority_propagate(); + dmsg->dmsg_voucher = _voucher_copy(); + _dispatch_voucher_debug("mach-msg[%p] set", dmsg->dmsg_voucher, dmsg); + if ((!is_reply && slowpath(dr->dm_tail)) || + slowpath(dr->dm_disconnect_cnt) || slowpath(dm->ds_atomic_flags & DSF_CANCELED) || slowpath(!dispatch_atomic_cmpxchg2o(dr, dm_sending, 0, 1, acquire))) { + _dispatch_voucher_ktrace_dmsg_push(dmsg); return _dispatch_mach_send_push(dm, dmsg); } if (slowpath(dmsg = _dispatch_mach_msg_send(dm, dmsg)._dmsg)) { (void)dispatch_atomic_dec2o(dr, dm_sending, release); - return _dispatch_mach_send_push(dm, dmsg); + _dispatch_voucher_ktrace_dmsg_push(dmsg); + return _dispatch_mach_send_push_wakeup(dm, dmsg, true); } - if (slowpath(dr->dm_tail)) { + if (!is_reply && slowpath(dr->dm_tail)) { return _dispatch_mach_send_drain(dm); } (void)dispatch_atomic_dec2o(dr, dm_sending, release); @@ -3556,7 +3707,7 @@ _dispatch_mach_disconnect(dispatch_mach_t dm) } if (!TAILQ_EMPTY(&dm->dm_refs->dm_replies)) { dispatch_mach_reply_refs_t dmr, tmp; - TAILQ_FOREACH_SAFE(dmr, &dm->dm_refs->dm_replies, dm_list, tmp){ + TAILQ_FOREACH_SAFE(dmr, &dm->dm_refs->dm_replies, dmr_list, tmp){ _dispatch_mach_reply_kevent_unregister(dm, dmr, true); } } @@ -3610,6 +3761,8 @@ dispatch_mach_reconnect(dispatch_mach_t dm, mach_port_t send, (void)dispatch_atomic_inc2o(dr, dm_disconnect_cnt, relaxed); if (MACH_PORT_VALID(send) && checkin) { dispatch_retain(checkin); + mach_msg_option_t options = _dispatch_mach_checkin_options(); + _dispatch_mach_msg_set_options(checkin, options); dr->dm_checkin_port = _dispatch_mach_msg_get_remote_port(checkin); } else { checkin = NULL; @@ -3672,12 +3825,18 @@ _dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg) dmsg->do_next = DISPATCH_OBJECT_LISTLESS; _dispatch_thread_setspecific(dispatch_queue_key, dm->do_targetq); + _dispatch_voucher_ktrace_dmsg_pop(dmsg); + _dispatch_voucher_debug("mach-msg[%p] adopt", dmsg->dmsg_voucher, dmsg); + _dispatch_adopt_priority_and_replace_voucher(dmsg->dmsg_priority, + dmsg->dmsg_voucher, DISPATCH_PRIORITY_ENFORCE); + dmsg->dmsg_voucher = NULL; if (slowpath(!dm->dm_connect_handler_called)) { _dispatch_mach_connect_invoke(dm); } _dispatch_client_callout4(dr->dm_handler_ctxt, reason, dmsg, err, dr->dm_handler_func); _dispatch_thread_setspecific(dispatch_queue_key, (dispatch_queue_t)dm); + _dispatch_introspection_queue_item_complete(dmsg); dispatch_release(dmsg); } @@ -3716,6 +3875,8 @@ dispatch_mach_send_barrier_f(dispatch_mach_t dm, void *context, dc->dc_ctxt = dc; dc->dc_data = context; dc->dc_other = barrier; + _dispatch_continuation_voucher_set(dc, 0); + _dispatch_continuation_priority_set(dc, 0, 0); dispatch_mach_send_refs_t dr = dm->dm_refs; if (slowpath(dr->dm_tail) || slowpath(!dispatch_atomic_cmpxchg2o(dr, @@ -3723,7 +3884,7 @@ dispatch_mach_send_barrier_f(dispatch_mach_t dm, void *context, return _dispatch_mach_send_push(dm, dc); } // leave send queue locked until barrier has completed - return _dispatch_mach_push(dm, dc); + return _dispatch_mach_push(dm, dc, dc->dc_priority); } DISPATCH_NOINLINE @@ -3737,7 +3898,10 @@ dispatch_mach_receive_barrier_f(dispatch_mach_t dm, void *context, dc->dc_ctxt = dc; dc->dc_data = context; dc->dc_other = barrier; - return _dispatch_mach_push(dm, dc); + _dispatch_continuation_voucher_set(dc, 0); + _dispatch_continuation_priority_set(dc, 0, 0); + + return _dispatch_mach_push(dm, dc, dc->dc_priority); } DISPATCH_NOINLINE @@ -3823,6 +3987,10 @@ _dispatch_mach_invoke2(dispatch_object_t dou, // An item on the channel changed the target queue return dm->do_targetq; } + } else if (dr->dm_sending) { + // Sending and uninstallation below require the send lock, the channel + // will be woken up when the lock is dropped + return NULL; } else if (dr->dm_tail) { if (slowpath(dr->dm_needs_mgr) || (slowpath(dr->dm_disconnect_cnt) && (dm->dm_dkev || !TAILQ_EMPTY(&dm->dm_refs->dm_replies)))) { @@ -3877,9 +4045,13 @@ _dispatch_mach_probe(dispatch_mach_t dm) if (slowpath(!dm->ds_is_installed)) { // The channel needs to be installed on the manager queue. return true; - } else if (dm->dq_items_tail) { + } else if (_dispatch_queue_class_probe(dm)) { // The source has pending messages to deliver to the target queue. return true; + } else if (dr->dm_sending) { + // Sending and uninstallation below require the send lock, the channel + // will be woken up when the lock is dropped + return false; } else if (dr->dm_tail && (!(dm->dm_dkev && DISPATCH_MACH_KEVENT_ARMED(dm->dm_dkev)) || (dm->ds_atomic_flags & DSF_CANCELED) || dr->dm_disconnect_cnt)) { @@ -3912,16 +4084,17 @@ dispatch_mach_msg_create(mach_msg_header_t *msg, size_t size, } dispatch_mach_msg_t dmsg = _dispatch_alloc(DISPATCH_VTABLE(mach_msg), sizeof(struct dispatch_mach_msg_s) + - (destructor ? 0 : size - sizeof(dmsg->msg))); + (destructor ? 0 : size - sizeof(dmsg->dmsg_msg))); if (destructor) { - dmsg->msg = msg; + dmsg->dmsg_msg = msg; } else if (msg) { - memcpy(dmsg->buf, msg, size); + memcpy(dmsg->dmsg_buf, msg, size); } dmsg->do_next = DISPATCH_OBJECT_LISTLESS; - dmsg->do_targetq = _dispatch_get_root_queue(0, false); - dmsg->destructor = destructor; - dmsg->size = size; + dmsg->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, + false); + dmsg->dmsg_destructor = destructor; + dmsg->dmsg_size = size; if (msg_ptr) { *msg_ptr = _dispatch_mach_msg_get_msg(dmsg); } @@ -3931,15 +4104,19 @@ dispatch_mach_msg_create(mach_msg_header_t *msg, size_t size, void _dispatch_mach_msg_dispose(dispatch_mach_msg_t dmsg) { - switch (dmsg->destructor) { + if (dmsg->dmsg_voucher) { + _voucher_release(dmsg->dmsg_voucher); + dmsg->dmsg_voucher = NULL; + } + switch (dmsg->dmsg_destructor) { case DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT: break; case DISPATCH_MACH_MSG_DESTRUCTOR_FREE: - free(dmsg->msg); + free(dmsg->dmsg_msg); break; case DISPATCH_MACH_MSG_DESTRUCTOR_VM_DEALLOCATE: { - mach_vm_size_t vm_size = dmsg->size; - mach_vm_address_t vm_addr = (uintptr_t)dmsg->msg; + mach_vm_size_t vm_size = dmsg->dmsg_size; + mach_vm_address_t vm_addr = (uintptr_t)dmsg->dmsg_msg; (void)dispatch_assume_zero(mach_vm_deallocate(mach_task_self(), vm_addr, vm_size)); break; @@ -3949,14 +4126,15 @@ _dispatch_mach_msg_dispose(dispatch_mach_msg_t dmsg) static inline mach_msg_header_t* _dispatch_mach_msg_get_msg(dispatch_mach_msg_t dmsg) { - return dmsg->destructor ? dmsg->msg : (mach_msg_header_t*)dmsg->buf; + return dmsg->dmsg_destructor ? dmsg->dmsg_msg : + (mach_msg_header_t*)dmsg->dmsg_buf; } mach_msg_header_t* dispatch_mach_msg_get_msg(dispatch_mach_msg_t dmsg, size_t *size_ptr) { if (size_ptr) { - *size_ptr = dmsg->size; + *size_ptr = dmsg->dmsg_size; } return _dispatch_mach_msg_get_msg(dmsg); } @@ -3970,7 +4148,7 @@ _dispatch_mach_msg_debug(dispatch_mach_msg_t dmsg, char* buf, size_t bufsiz) offset += dsnprintf(&buf[offset], bufsiz - offset, "xrefcnt = 0x%x, " "refcnt = 0x%x, ", dmsg->do_xref_cnt + 1, dmsg->do_ref_cnt + 1); offset += dsnprintf(&buf[offset], bufsiz - offset, "opts/err = 0x%x, " - "msgh[%p] = { ", dmsg->do_suspend_cnt, dmsg->buf); + "msgh[%p] = { ", dmsg->do_suspend_cnt, dmsg->dmsg_buf); mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dmsg); if (hdr->msgh_id) { offset += dsnprintf(&buf[offset], bufsiz - offset, "id 0x%x, ", @@ -4015,7 +4193,7 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, { mach_msg_options_t options = MACH_RCV_MSG | MACH_RCV_TIMEOUT | MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_CTX) - | MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0); + | MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0) | MACH_RCV_VOUCHER; mach_msg_options_t tmp_options; mig_reply_error_t *bufTemp, *bufRequest, *bufReply; mach_msg_return_t kr = 0; @@ -4125,7 +4303,7 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, (void)dispatch_assume_zero(r); } #endif - + _voucher_replace(voucher_create_with_mach_msg(&bufRequest->Head)); demux_success = callback(&bufRequest->Head, &bufReply->Head); if (!demux_success) { @@ -4263,7 +4441,8 @@ _dispatch_mach_debug(dispatch_mach_t dm, char* buf, size_t bufsiz) { size_t offset = 0; offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", - dm->dq_label ? dm->dq_label : dx_kind(dm), dm); + dm->dq_label && !dm->dm_cancel_handler_called ? dm->dq_label : + dx_kind(dm), dm); offset += _dispatch_object_debug_attr(dm, &buf[offset], bufsiz - offset); offset += _dispatch_mach_debug_attr(dm, &buf[offset], bufsiz - offset); offset += dsnprintf(&buf[offset], bufsiz - offset, "}"); @@ -4482,8 +4661,8 @@ dispatch_debug_machport(mach_port_t name, const char* str) MACH_PORT_RIGHT_DEAD_NAME, &nd)); } if (type & (MACH_PORT_TYPE_RECEIVE|MACH_PORT_TYPE_SEND)) { - (void)dispatch_assume_zero(mach_port_dnrequest_info(mach_task_self(), - name, &dnrsiz, &dnreqs)); + kr = mach_port_dnrequest_info(mach_task_self(), name, &dnrsiz, &dnreqs); + if (kr != KERN_INVALID_RIGHT) (void)dispatch_assume_zero(kr); } if (type & MACH_PORT_TYPE_RECEIVE) { mach_port_status_t status = { .mps_pset = 0, }; diff --git a/src/source_internal.h b/src/source_internal.h index 1a023cf2d..12ccdda97 100644 --- a/src/source_internal.h +++ b/src/source_internal.h @@ -123,15 +123,18 @@ struct dispatch_timer_source_s { unsigned long missed; }; +enum { + DS_EVENT_HANDLER = 0, + DS_CANCEL_HANDLER, + DS_REGISTN_HANDLER, +}; + // Source state which may contain references to the source object // Separately allocated so that 'leaks' can see sources typedef struct dispatch_source_refs_s { TAILQ_ENTRY(dispatch_source_refs_s) dr_list; uintptr_t dr_source_wref; // "weak" backref to dispatch_source_t - dispatch_function_t ds_handler_func; - void *ds_handler_ctxt; - void *ds_cancel_handler; - void *ds_registration_handler; + dispatch_continuation_t ds_handler[3]; } *dispatch_source_refs_t; typedef struct dispatch_timer_source_refs_s { @@ -176,9 +179,9 @@ _dispatch_source_timer_idx(dispatch_source_refs_t dr) ds_is_installed:1, \ ds_needs_rearm:1, \ ds_is_timer:1, \ - ds_cancel_is_block:1, \ - ds_handler_is_block:1, \ - ds_registration_is_block:1, \ + ds_vmpressure_override:1, \ + ds_memorystatus_override:1, \ + dm_handler_is_block:1, \ dm_connect_handler_called:1, \ dm_cancel_handler_called:1; \ unsigned long ds_pending_data_mask; @@ -206,8 +209,11 @@ typedef struct dispatch_mach_refs_s *dispatch_mach_refs_t; struct dispatch_mach_reply_refs_s { TAILQ_ENTRY(dispatch_mach_reply_refs_s) dr_list; uintptr_t dr_source_wref; // "weak" backref to dispatch_mach_t - dispatch_kevent_t dm_dkev; - TAILQ_ENTRY(dispatch_mach_reply_refs_s) dm_list; + dispatch_kevent_t dmr_dkev; + void *dmr_ctxt; + pthread_priority_t dmr_priority; + voucher_t dmr_voucher; + TAILQ_ENTRY(dispatch_mach_reply_refs_s) dmr_list; }; typedef struct dispatch_mach_reply_refs_s *dispatch_mach_reply_refs_t; @@ -237,11 +243,14 @@ struct dispatch_mach_s { DISPATCH_CLASS_DECL(mach_msg); struct dispatch_mach_msg_s { DISPATCH_STRUCT_HEADER(mach_msg); - dispatch_mach_msg_destructor_t destructor; - size_t size; + mach_port_t dmsg_reply; + pthread_priority_t dmsg_priority; + voucher_t dmsg_voucher; + dispatch_mach_msg_destructor_t dmsg_destructor; + size_t dmsg_size; union { - mach_msg_header_t *msg; - char buf[0]; + mach_msg_header_t *dmsg_msg; + char dmsg_buf[0]; }; }; @@ -257,6 +266,8 @@ void _dispatch_source_invoke(dispatch_source_t ds); unsigned long _dispatch_source_probe(dispatch_source_t ds); size_t _dispatch_source_debug(dispatch_source_t ds, char* buf, size_t bufsiz); void _dispatch_source_set_interval(dispatch_source_t ds, uint64_t interval); +void _dispatch_source_set_event_handler_with_context_f(dispatch_source_t ds, + void *ctxt, dispatch_function_t handler); void _dispatch_mach_dispose(dispatch_mach_t dm); void _dispatch_mach_invoke(dispatch_mach_t dm); diff --git a/src/time.c b/src/time.c index a1a89242a..35b0e5201 100644 --- a/src/time.c +++ b/src/time.c @@ -44,7 +44,9 @@ _dispatch_get_nanoseconds(void) #if !(defined(__i386__) || defined(__x86_64__) || !HAVE_MACH_ABSOLUTE_TIME) \ || TARGET_OS_WIN32 -DISPATCH_CACHELINE_ALIGN _dispatch_host_time_data_s _dispatch_host_time_data; +DISPATCH_CACHELINE_ALIGN _dispatch_host_time_data_s _dispatch_host_time_data = { + .ratio_1_to_1 = true, +}; void _dispatch_get_host_time_init(void *context DISPATCH_UNUSED) diff --git a/src/trace.h b/src/trace.h index 9a0f15289..df27ca81b 100644 --- a/src/trace.h +++ b/src/trace.h @@ -27,16 +27,17 @@ #ifndef __DISPATCH_TRACE__ #define __DISPATCH_TRACE__ -#if DISPATCH_USE_DTRACE && !__OBJC2__ +#if !__OBJC2__ +#if DISPATCH_USE_DTRACE || DISPATCH_USE_DTRACE_INTROSPECTION typedef struct dispatch_trace_timer_params_s { int64_t deadline, interval, leeway; } *dispatch_trace_timer_params_t; #include "provider.h" +#endif // DISPATCH_USE_DTRACE || DISPATCH_USE_DTRACE_INTROSPECTION #if DISPATCH_USE_DTRACE_INTROSPECTION - #define _dispatch_trace_callout(_c, _f, _dcc) do { \ if (slowpath(DISPATCH_CALLOUT_ENTRY_ENABLED()) || \ slowpath(DISPATCH_CALLOUT_RETURN_ENABLED())) { \ @@ -51,7 +52,12 @@ typedef struct dispatch_trace_timer_params_s { _dcc; \ } \ } while (0) +#elif DISPATCH_INTROSPECTION +#define _dispatch_trace_callout(_c, _f, _dcc) \ + do { (void)(_c); (void)(_f); _dcc; } while (0) +#endif // DISPATCH_USE_DTRACE_INTROSPECTION || DISPATCH_INTROSPECTION +#if DISPATCH_USE_DTRACE_INTROSPECTION || DISPATCH_INTROSPECTION DISPATCH_ALWAYS_INLINE static inline void _dispatch_trace_client_callout(void *ctxt, dispatch_function_t f) @@ -73,40 +79,33 @@ _dispatch_trace_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) _dispatch_introspection_callout_return(ctxt, func); } -#ifdef __BLOCKS__ -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_trace_client_callout_block(dispatch_block_t b) -{ - dispatch_function_t func = _dispatch_Block_invoke(b); - _dispatch_introspection_callout_entry(b, func); - _dispatch_trace_callout(b, func, _dispatch_client_callout(b, func)); - _dispatch_introspection_callout_return(b, func); -} -#endif - #define _dispatch_client_callout _dispatch_trace_client_callout #define _dispatch_client_callout2 _dispatch_trace_client_callout2 -#define _dispatch_client_callout_block _dispatch_trace_client_callout_block +#endif // DISPATCH_USE_DTRACE_INTROSPECTION || DISPATCH_INTROSPECTION +#if DISPATCH_USE_DTRACE_INTROSPECTION #define _dispatch_trace_continuation(_q, _o, _t) do { \ dispatch_queue_t _dq = (_q); \ const char *_label = _dq && _dq->dq_label ? _dq->dq_label : ""; \ struct dispatch_object_s *_do = (_o); \ + dispatch_continuation_t _dc; \ char *_kind; \ dispatch_function_t _func; \ void *_ctxt; \ if (DISPATCH_OBJ_IS_VTABLE(_do)) { \ - _ctxt = _do->do_ctxt; \ _kind = (char*)dx_kind(_do); \ if ((dx_type(_do) & _DISPATCH_META_TYPE_MASK) == \ _DISPATCH_SOURCE_TYPE && (_dq) != &_dispatch_mgr_q) { \ - _func = ((dispatch_source_t)_do)->ds_refs->ds_handler_func; \ + dispatch_source_t _ds = (dispatch_source_t)_do; \ + _dc = _ds->ds_refs->ds_handler[DS_EVENT_HANDLER]; \ + _func = _dc->dc_func; \ + _ctxt = _dc->dc_ctxt; \ } else { \ _func = (dispatch_function_t)_dispatch_queue_invoke; \ + _ctxt = _do->do_ctxt; \ } \ } else { \ - struct dispatch_continuation_s *_dc = (void*)(_do); \ + _dc = (void*)_do; \ _ctxt = _dc->dc_ctxt; \ if ((long)_dc->do_vtable & DISPATCH_OBJ_SYNC_SLOW_BIT) { \ _kind = "semaphore"; \ @@ -121,11 +120,18 @@ _dispatch_trace_client_callout_block(dispatch_block_t b) } \ _t(_dq, _label, _do, _kind, _func, _ctxt); \ } while (0) - +#elif DISPATCH_INTROSPECTION +#define _dispatch_trace_continuation(_q, _o, _t) \ + do { (void)(_q); (void)(_o); } while(0) +#define DISPATCH_QUEUE_PUSH_ENABLED() 0 +#define DISPATCH_QUEUE_POP_ENABLED() 0 +#endif // DISPATCH_USE_DTRACE_INTROSPECTION || DISPATCH_INTROSPECTION + +#if DISPATCH_USE_DTRACE_INTROSPECTION || DISPATCH_INTROSPECTION DISPATCH_ALWAYS_INLINE static inline void _dispatch_trace_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head, - dispatch_object_t _tail, unsigned int n) + dispatch_object_t _tail, pthread_priority_t pp, unsigned int n) { if (slowpath(DISPATCH_QUEUE_PUSH_ENABLED())) { struct dispatch_object_s *dou = _head._do; @@ -134,39 +140,50 @@ _dispatch_trace_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head, } while (dou != _tail._do && (dou = dou->do_next)); } _dispatch_introspection_queue_push_list(dq, _head, _tail); - _dispatch_queue_push_list(dq, _head, _tail, n); + _dispatch_queue_push_list(dq, _head, _tail, pp, n); } DISPATCH_ALWAYS_INLINE static inline void -_dispatch_trace_queue_push(dispatch_queue_t dq, dispatch_object_t _tail) +_dispatch_trace_queue_push(dispatch_queue_t dq, dispatch_object_t _tail, pthread_priority_t pp) { if (slowpath(DISPATCH_QUEUE_PUSH_ENABLED())) { struct dispatch_object_s *dou = _tail._do; _dispatch_trace_continuation(dq, dou, DISPATCH_QUEUE_PUSH); } _dispatch_introspection_queue_push(dq, _tail); - _dispatch_queue_push(dq, _tail); + _dispatch_queue_push(dq, _tail, pp); } DISPATCH_ALWAYS_INLINE static inline void _dispatch_trace_queue_push_wakeup(dispatch_queue_t dq, dispatch_object_t _tail, - bool wakeup) + pthread_priority_t pp, bool wakeup) +{ + if (slowpath(DISPATCH_QUEUE_PUSH_ENABLED())) { + struct dispatch_object_s *dou = _tail._do; + _dispatch_trace_continuation(dq, dou, DISPATCH_QUEUE_PUSH); + } + _dispatch_introspection_queue_push(dq, _tail); + _dispatch_queue_push_wakeup(dq, _tail, pp, wakeup); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_trace_continuation_push(dispatch_queue_t dq, dispatch_object_t _tail) { if (slowpath(DISPATCH_QUEUE_PUSH_ENABLED())) { struct dispatch_object_s *dou = _tail._do; _dispatch_trace_continuation(dq, dou, DISPATCH_QUEUE_PUSH); } _dispatch_introspection_queue_push(dq, _tail); - _dispatch_queue_push_wakeup(dq, _tail, wakeup); } DISPATCH_ALWAYS_INLINE static inline void -_dispatch_queue_push_notrace(dispatch_queue_t dq, dispatch_object_t dou) +_dispatch_queue_push_notrace(dispatch_queue_t dq, dispatch_object_t dou, pthread_priority_t pp) { - _dispatch_queue_push(dq, dou); + _dispatch_queue_push(dq, dou, pp); } #define _dispatch_queue_push_list _dispatch_trace_queue_push_list @@ -175,23 +192,30 @@ _dispatch_queue_push_notrace(dispatch_queue_t dq, dispatch_object_t dou) DISPATCH_ALWAYS_INLINE static inline void -_dispatch_trace_continuation_pop(dispatch_queue_t dq, - dispatch_object_t dou) +_dispatch_trace_continuation_pop(dispatch_queue_t dq, dispatch_object_t dou) { if (slowpath(DISPATCH_QUEUE_POP_ENABLED())) { _dispatch_trace_continuation(dq, dou._do, DISPATCH_QUEUE_POP); } _dispatch_introspection_queue_pop(dq, dou); } +#else +#define _dispatch_queue_push_notrace _dispatch_queue_push +#define _dispatch_trace_continuation_push(dq, dou) \ + do { (void)(dq); (void)(dou); } while(0) +#define _dispatch_trace_continuation_pop(dq, dou) \ + do { (void)(dq); (void)(dou); } while(0) +#endif // DISPATCH_USE_DTRACE_INTROSPECTION || DISPATCH_INTROSPECTION -#endif // DISPATCH_USE_DTRACE_INTROSPECTION - +#if DISPATCH_USE_DTRACE static inline dispatch_function_t _dispatch_trace_timer_function(dispatch_source_t ds, dispatch_source_refs_t dr) { - dispatch_function_t func = dr->ds_handler_func; - if (func == _dispatch_after_timer_callback) { - dispatch_continuation_t dc = ds->do_ctxt; + dispatch_continuation_t dc = dr->ds_handler[DS_EVENT_HANDLER]; + dispatch_function_t func = dc ? dc->dc_func : NULL; + if (func == _dispatch_after_timer_callback && + !(ds->ds_atomic_flags & DSF_CANCELED)) { + dc = ds->do_ctxt; func = dc->dc_func != _dispatch_call_block_and_release ? dc->dc_func : dc->dc_ctxt ? _dispatch_Block_invoke(dc->dc_ctxt) : NULL; } @@ -295,14 +319,8 @@ _dispatch_trace_timer_fire(dispatch_source_refs_t dr, unsigned long data, #define _dispatch_trace_timer_fire(dr, data, missed) \ do { (void)(dr); (void)(data); (void)(missed); } while(0) -#endif // DISPATCH_USE_DTRACE && !__OBJC2__ - -#if !DISPATCH_USE_DTRACE_INTROSPECTION - -#define _dispatch_queue_push_notrace _dispatch_queue_push -#define _dispatch_trace_continuation_pop(dq, dou) \ - do { (void)(dq); (void)(dou); } while(0) +#endif // DISPATCH_USE_DTRACE -#endif // !DISPATCH_USE_DTRACE_INTROSPECTION +#endif // !__OBJC2__ #endif // __DISPATCH_TRACE__ diff --git a/src/voucher.c b/src/voucher.c new file mode 100644 index 000000000..e886fafbe --- /dev/null +++ b/src/voucher.c @@ -0,0 +1,2774 @@ +/* + * Copyright (c) 2013 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include "internal.h" + +#if VOUCHER_USE_MACH_VOUCHER + +#include + +// +#ifndef VM_MEMORY_GENEALOGY +#define VM_MEMORY_GENEALOGY 78 +#endif + +#ifndef VOUCHER_ATM_COLLECT_THRESHOLD +#define VOUCHER_ATM_COLLECT_THRESHOLD 1 +#endif +#define VATM_COLLECT_THRESHOLD_VALUE(t) (((t) - 1) * 2) +static volatile long _voucher_atm_collect_level; +static long _voucher_atm_collect_threshold = + VATM_COLLECT_THRESHOLD_VALUE(VOUCHER_ATM_COLLECT_THRESHOLD); +static unsigned long _voucher_atm_subid_bits; + +typedef struct _voucher_atm_s *_voucher_atm_t; + +static void _voucher_activity_atfork_child(void); +static inline mach_voucher_t _voucher_get_atm_mach_voucher(voucher_t voucher); +static inline mach_voucher_t _voucher_activity_get_atm_mach_voucher( + _voucher_activity_t act); +static inline _voucher_activity_t _voucher_activity_get(voucher_t voucher); +static _voucher_activity_t _voucher_activity_copy_from_mach_voucher( + mach_voucher_t kv, voucher_activity_id_t va_id); +static inline _voucher_activity_t _voucher_activity_retain( + _voucher_activity_t act); +static inline void _voucher_activity_release(_voucher_activity_t act); + +#pragma mark - +#pragma mark voucher_t + +#if USE_OBJC +OS_OBJECT_OBJC_CLASS_DECL(voucher); +#define VOUCHER_CLASS OS_OBJECT_OBJC_CLASS(voucher) +#else +const _os_object_class_s _voucher_class = { + ._os_obj_xref_dispose = (void(*)(_os_object_t))_voucher_xref_dispose, + ._os_obj_dispose = (void(*)(_os_object_t))_voucher_dispose, +}; +#define VOUCHER_CLASS &_voucher_class +#endif // USE_OBJC + +static const voucher_activity_trace_id_t _voucher_activity_trace_id_release = + (voucher_activity_trace_id_t)voucher_activity_tracepoint_type_release << + _voucher_activity_trace_id_type_shift; +static const unsigned int _voucher_max_activities = 16; + +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_recipes_init(mach_voucher_attr_recipe_data_t *recipes, + mach_voucher_attr_content_size_t bits_size) +{ + static const mach_voucher_attr_recipe_data_t base_recipe = { + .key = MACH_VOUCHER_ATTR_KEY_ALL, + .command = MACH_VOUCHER_ATTR_COPY, + }; + _voucher_recipes_base(recipes) = base_recipe; + static const mach_voucher_attr_recipe_data_t atm_recipe = { + .key = MACH_VOUCHER_ATTR_KEY_ATM, + .command = MACH_VOUCHER_ATTR_COPY, + }; + _voucher_recipes_atm(recipes) = atm_recipe; + static const mach_voucher_attr_recipe_data_t bits_recipe = { + .key = MACH_VOUCHER_ATTR_KEY_USER_DATA, + .command = MACH_VOUCHER_ATTR_USER_DATA_STORE, + }; + _voucher_recipes_bits(recipes) = bits_recipe; + if (!bits_size) return; + _voucher_recipes_bits(recipes).content_size = bits_size; + *_voucher_recipes_magic(recipes) = _voucher_magic_v1; +} + +static inline voucher_t +_voucher_alloc(unsigned int activities, pthread_priority_t priority, + mach_voucher_attr_recipe_size_t extra) +{ + if (activities > _voucher_max_activities) { + activities = _voucher_max_activities; + } + voucher_t voucher; + size_t voucher_size, recipes_size; + mach_voucher_attr_content_size_t bits_size; + recipes_size = (priority||activities||extra) ? _voucher_recipes_size() : 0; + bits_size = recipes_size ? _voucher_bits_size(activities) : 0; + voucher_size = sizeof(voucher_s) + recipes_size + bits_size + extra; + voucher = (voucher_t)_os_object_alloc_realized(VOUCHER_CLASS, voucher_size); +#if VOUCHER_ENABLE_RECIPE_OBJECTS + voucher->v_recipe_extra_size = extra; + voucher->v_recipe_extra_offset = voucher_size - extra; +#else + dispatch_assert(!extra); +#endif + voucher->v_has_priority = priority ? 1 : 0; + voucher->v_activities = activities; + if (!recipes_size) return voucher; + _voucher_recipes_init(voucher->v_recipes, bits_size); + *_voucher_priority(voucher) = (_voucher_priority_t)priority; + _dispatch_voucher_debug("alloc", voucher); + return voucher; +} + +#if VOUCHER_ENABLE_RECIPE_OBJECTS +voucher_t +voucher_create(voucher_recipe_t recipe) +{ + // TODO: capture current activities or current kvoucher ? + mach_voucher_attr_recipe_size_t extra = recipe ? recipe->vr_size : 0; + voucher_t voucher = _voucher_alloc(0, 0, extra); + if (extra) { + memcpy(_voucher_extra_recipes(voucher), recipe->vr_data, extra); + } + return voucher; +} +#endif + +voucher_t +voucher_adopt(voucher_t voucher) +{ + return _voucher_adopt(voucher); +} + +voucher_t +voucher_copy(void) +{ + return _voucher_copy(); +} + +voucher_t +voucher_copy_without_importance(void) +{ + return _voucher_copy_without_importance(); +} + +void +_voucher_thread_cleanup(void *voucher) +{ + _voucher_swap(voucher, NULL); +} + +DISPATCH_CACHELINE_ALIGN +static TAILQ_HEAD(, voucher_s) _vouchers[VL_HASH_SIZE]; +#define _vouchers(kv) (&_vouchers[VL_HASH((kv))]) +static os_lock_handoff_s _vouchers_lock = OS_LOCK_HANDOFF_INIT; +#define _vouchers_lock_lock() os_lock_lock(&_vouchers_lock) +#define _vouchers_lock_unlock() os_lock_unlock(&_vouchers_lock) + +static voucher_t +_voucher_find_and_retain(mach_voucher_t kv) +{ + voucher_t v; + if (!kv) return NULL; + _vouchers_lock_lock(); + TAILQ_FOREACH(v, _vouchers(kv), v_list) { + if (v->v_ipc_kvoucher == kv) { + int xref_cnt = dispatch_atomic_inc2o(v, os_obj_xref_cnt, relaxed); + _dispatch_voucher_debug("retain -> %d", v, xref_cnt + 1); + if (slowpath(xref_cnt < 0)) { + _dispatch_voucher_debug("overrelease", v); + DISPATCH_CRASH("Voucher overrelease"); + } + if (xref_cnt == 0) { + // resurrection: raced with _voucher_remove + (void)dispatch_atomic_inc2o(v, os_obj_ref_cnt, relaxed); + } + break; + } + } + _vouchers_lock_unlock(); + return v; +} + +static void +_voucher_insert(voucher_t v) +{ + mach_voucher_t kv = v->v_ipc_kvoucher; + if (!kv) return; + _vouchers_lock_lock(); + if (slowpath(_TAILQ_IS_ENQUEUED(v, v_list))) { + _dispatch_voucher_debug("corruption", v); + DISPATCH_CRASH("Voucher corruption"); + } + TAILQ_INSERT_TAIL(_vouchers(kv), v, v_list); + _vouchers_lock_unlock(); +} + +static void +_voucher_remove(voucher_t v) +{ + mach_voucher_t kv = v->v_ipc_kvoucher; + if (!_TAILQ_IS_ENQUEUED(v, v_list)) return; + _vouchers_lock_lock(); + if (slowpath(!kv)) { + _dispatch_voucher_debug("corruption", v); + DISPATCH_CRASH("Voucher corruption"); + } + // check for resurrection race with _voucher_find_and_retain + if (dispatch_atomic_load2o(v, os_obj_xref_cnt, seq_cst) < 0 && + _TAILQ_IS_ENQUEUED(v, v_list)) { + TAILQ_REMOVE(_vouchers(kv), v, v_list); + _TAILQ_MARK_NOT_ENQUEUED(v, v_list); + v->v_list.tqe_next = (void*)~0ull; + } + _vouchers_lock_unlock(); +} + +void +_voucher_dealloc_mach_voucher(mach_voucher_t kv) +{ + _dispatch_kvoucher_debug("dealloc", kv); + _dispatch_voucher_debug_machport(kv); + kern_return_t kr = mach_voucher_deallocate(kv); + DISPATCH_VERIFY_MIG(kr); + (void)dispatch_assume_zero(kr); +} + +static inline kern_return_t +_voucher_create_mach_voucher(const mach_voucher_attr_recipe_data_t *recipes, + size_t recipes_size, mach_voucher_t *kvp) +{ + kern_return_t kr; + mach_port_t mhp = _dispatch_get_mach_host_port(); + mach_voucher_t kv = MACH_VOUCHER_NULL; + mach_voucher_attr_raw_recipe_array_t kvr; + mach_voucher_attr_recipe_size_t kvr_size; + kvr = (mach_voucher_attr_raw_recipe_array_t)recipes; + kvr_size = (mach_voucher_attr_recipe_size_t)recipes_size; + kr = host_create_mach_voucher(mhp, kvr, kvr_size, &kv); + DISPATCH_VERIFY_MIG(kr); + if (!kr) { + _dispatch_kvoucher_debug("create", kv); + _dispatch_voucher_debug_machport(kv); + } + *kvp = kv; + return kr; +} + +#if __has_include() && !defined(VOUCHER_USE_ATTR_BANK) +#include +#define VOUCHER_USE_ATTR_BANK 1 +mach_voucher_t _voucher_default_task_mach_voucher; +#endif + +void +_voucher_task_mach_voucher_init(void* ctxt DISPATCH_UNUSED) +{ +#if VOUCHER_USE_ATTR_BANK + kern_return_t kr; + mach_voucher_t kv; + static const mach_voucher_attr_recipe_data_t task_create_recipe = { + .key = MACH_VOUCHER_ATTR_KEY_BANK, + .command = MACH_VOUCHER_ATTR_BANK_CREATE, + }; + kr = _voucher_create_mach_voucher(&task_create_recipe, + sizeof(task_create_recipe), &kv); + if (dispatch_assume_zero(kr)) { + DISPATCH_CLIENT_CRASH("Could not create task mach voucher"); + } + _voucher_default_task_mach_voucher = kv; + _voucher_task_mach_voucher = kv; +#endif +} + +void +voucher_replace_default_voucher(void) +{ +#if VOUCHER_USE_ATTR_BANK + (void)_voucher_get_task_mach_voucher(); // initalize task mach voucher + mach_voucher_t kv, tkv = MACH_VOUCHER_NULL; + voucher_t v = _voucher_get(); + if (v && v->v_kvoucher) { + kern_return_t kr; + kv = v->v_ipc_kvoucher ? v->v_ipc_kvoucher : v->v_kvoucher; + const mach_voucher_attr_recipe_data_t task_copy_recipe = { + .key = MACH_VOUCHER_ATTR_KEY_BANK, + .command = MACH_VOUCHER_ATTR_COPY, + .previous_voucher = kv, + }; + kr = _voucher_create_mach_voucher(&task_copy_recipe, + sizeof(task_copy_recipe), &tkv); + if (dispatch_assume_zero(kr)) { + tkv = MACH_VOUCHER_NULL; + } + } + if (!tkv) tkv = _voucher_default_task_mach_voucher; + kv = dispatch_atomic_xchg(&_voucher_task_mach_voucher, tkv, relaxed); + if (kv && kv != _voucher_default_task_mach_voucher) { + _voucher_dealloc_mach_voucher(kv); + } + _dispatch_voucher_debug("kvoucher[0x%08x] replace default voucher", v, tkv); +#endif +} + +static inline mach_voucher_t +_voucher_get_atm_mach_voucher(voucher_t voucher) +{ + _voucher_activity_t act = _voucher_activity_get(voucher); + return _voucher_activity_get_atm_mach_voucher(act); +} + +mach_voucher_t +_voucher_get_mach_voucher(voucher_t voucher) +{ + if (!voucher) return MACH_VOUCHER_NULL; + if (voucher->v_ipc_kvoucher) return voucher->v_ipc_kvoucher; + mach_voucher_t kvb = voucher->v_kvoucher; + if (!kvb) kvb = _voucher_get_task_mach_voucher(); + if (!voucher->v_has_priority && !voucher->v_activities && + !_voucher_extra_size(voucher)) { + return kvb; + } + kern_return_t kr; + mach_voucher_t kv, kvo; + _voucher_base_recipe(voucher).previous_voucher = kvb; + _voucher_atm_recipe(voucher).previous_voucher = + _voucher_get_atm_mach_voucher(voucher); + kr = _voucher_create_mach_voucher(voucher->v_recipes, + _voucher_recipes_size() + _voucher_extra_size(voucher) + + _voucher_bits_recipe(voucher).content_size, &kv); + if (dispatch_assume_zero(kr) || !kv){ + return MACH_VOUCHER_NULL; + } + if (!dispatch_atomic_cmpxchgv2o(voucher, v_ipc_kvoucher, MACH_VOUCHER_NULL, + kv, &kvo, relaxed)) { + _voucher_dealloc_mach_voucher(kv); + kv = kvo; + } else { + if (kv == voucher->v_kvoucher) { + // if v_kvoucher == v_ipc_kvoucher we keep only one reference + _voucher_dealloc_mach_voucher(kv); + } + _voucher_insert(voucher); + _dispatch_voucher_debug("kvoucher[0x%08x] create", voucher, kv); + } + return kv; +} + +mach_voucher_t +_voucher_create_mach_voucher_with_priority(voucher_t voucher, + pthread_priority_t priority) +{ + if (priority == _voucher_get_priority(voucher)) { + return MACH_VOUCHER_NULL; // caller will use _voucher_get_mach_voucher + } + kern_return_t kr; + mach_voucher_t kv, kvb = voucher ? voucher->v_kvoucher : MACH_VOUCHER_NULL; + if (!kvb) kvb = _voucher_get_task_mach_voucher(); + mach_voucher_attr_recipe_data_t *recipes; + size_t recipes_size = _voucher_recipes_size(); + if (voucher && (voucher->v_has_priority || voucher->v_activities || + _voucher_extra_size(voucher))) { + recipes_size += _voucher_bits_recipe(voucher).content_size + + _voucher_extra_size(voucher); + recipes = alloca(recipes_size); + memcpy(recipes, voucher->v_recipes, recipes_size); + _voucher_recipes_atm(recipes).previous_voucher = + _voucher_get_atm_mach_voucher(voucher); + } else { + mach_voucher_attr_content_size_t bits_size = _voucher_bits_size(0); + recipes_size += bits_size; + recipes = alloca(recipes_size); + _voucher_recipes_init(recipes, bits_size); + } + _voucher_recipes_base(recipes).previous_voucher = kvb; + *_voucher_recipes_priority(recipes) = (_voucher_priority_t)priority; + kr = _voucher_create_mach_voucher(recipes, recipes_size, &kv); + if (dispatch_assume_zero(kr) || !kv){ + return MACH_VOUCHER_NULL; + } + _dispatch_kvoucher_debug("create with priority from voucher[%p]", kv, + voucher); + return kv; +} + +static voucher_t +_voucher_create_with_mach_voucher(mach_voucher_t kv) +{ + if (!kv) return NULL; + kern_return_t kr; + mach_voucher_t rkv; + mach_voucher_attr_recipe_t vr; + size_t vr_size; + mach_voucher_attr_recipe_size_t kvr_size = 0; + const mach_voucher_attr_recipe_data_t redeem_recipe[] = { + [0] = { + .key = MACH_VOUCHER_ATTR_KEY_ALL, + .command = MACH_VOUCHER_ATTR_COPY, + .previous_voucher = kv, + }, +#if VOUCHER_USE_ATTR_BANK + [1] = { + .key = MACH_VOUCHER_ATTR_KEY_BANK, + .command = MACH_VOUCHER_ATTR_REDEEM, + }, +#endif + }; + kr = _voucher_create_mach_voucher(redeem_recipe, sizeof(redeem_recipe), + &rkv); + if (!dispatch_assume_zero(kr)) { + _voucher_dealloc_mach_voucher(kv); + } else { + _dispatch_voucher_debug_machport(kv); + rkv = kv; + } + voucher_t v = _voucher_find_and_retain(rkv); + if (v) { + _dispatch_voucher_debug("kvoucher[0x%08x] find with 0x%08x", v, rkv,kv); + _voucher_dealloc_mach_voucher(rkv); + return v; + } + vr_size = sizeof(*vr) + _voucher_bits_size(_voucher_max_activities); + vr = alloca(vr_size); + if (rkv) { + kvr_size = (mach_voucher_attr_recipe_size_t)vr_size; + kr = mach_voucher_extract_attr_recipe(rkv, + MACH_VOUCHER_ATTR_KEY_USER_DATA, (void*)vr, &kvr_size); + DISPATCH_VERIFY_MIG(kr); + if (dispatch_assume_zero(kr)) kvr_size = 0; + } + mach_voucher_attr_content_size_t content_size = vr->content_size; + uint8_t *content = vr->content; + bool valid = false, has_priority = false; + unsigned int activities = 0; + if (kvr_size >= sizeof(*vr) + sizeof(_voucher_magic_t)) { + valid = (*(_voucher_magic_t*)content == _voucher_magic_v1); + content += sizeof(_voucher_magic_t); + content_size -= sizeof(_voucher_magic_t); + } + if (valid) { + has_priority = (content_size >= sizeof(_voucher_priority_t)); + activities = has_priority ? (content_size - sizeof(_voucher_priority_t)) + / sizeof(voucher_activity_id_t) : 0; + } + pthread_priority_t priority = 0; + if (has_priority) { + priority = (pthread_priority_t)*(_voucher_priority_t*)content; + content += sizeof(_voucher_priority_t); + content_size -= sizeof(_voucher_priority_t); + } + voucher_activity_id_t va_id = 0, va_base_id = 0; + _voucher_activity_t act = NULL; + if (activities) { + va_id = *(voucher_activity_id_t*)content; + act = _voucher_activity_copy_from_mach_voucher(rkv, va_id); + if (!act && _voucher_activity_default) { + activities++; + // default to _voucher_activity_default base activity + va_base_id = _voucher_activity_default->va_id; + } else if (act && act->va_id != va_id) { + activities++; + va_base_id = act->va_id; + } + } + v = _voucher_alloc(activities, priority, 0); + v->v_activity = act; + voucher_activity_id_t *activity_ids = _voucher_activity_ids(v); + if (activities && va_base_id) { + *activity_ids++ = va_base_id; + activities--; + } + if (activities) { + memcpy(activity_ids, content, content_size); + } + v->v_ipc_kvoucher = v->v_kvoucher = rkv; + _voucher_insert(v); + _dispatch_voucher_debug("kvoucher[0x%08x] create with 0x%08x", v, rkv, kv); + return v; +} + +voucher_t +_voucher_create_with_priority_and_mach_voucher(voucher_t ov, + pthread_priority_t priority, mach_voucher_t kv) +{ + if (priority == _voucher_get_priority(ov)) { + if (kv) _voucher_dealloc_mach_voucher(kv); + return ov ? _voucher_retain(ov) : NULL; + } + voucher_t v = _voucher_find_and_retain(kv); + if (v) { + _dispatch_voucher_debug("kvoucher[0x%08x] find", v, kv); + _voucher_dealloc_mach_voucher(kv); + return v; + } + unsigned int activities = ov ? ov->v_activities : 0; + mach_voucher_attr_recipe_size_t extra = ov ? _voucher_extra_size(ov) : 0; + v = _voucher_alloc(activities, priority, extra); + if (extra) { + memcpy(_voucher_extra_recipes(v), _voucher_extra_recipes(ov), extra); + } + if (activities) { + if (ov->v_activity) { + v->v_activity = _voucher_activity_retain(ov->v_activity); + } + memcpy(_voucher_activity_ids(v), _voucher_activity_ids(ov), + activities * sizeof(voucher_activity_id_t)); + } + if (kv) { + v->v_ipc_kvoucher = v->v_kvoucher = kv; + _voucher_insert(v); + _dispatch_voucher_debug("kvoucher[0x%08x] create with priority from " + "voucher[%p]", v, kv, ov); + _dispatch_voucher_debug_machport(kv); + } else if (ov && ov->v_kvoucher) { + voucher_t kvb = ov->v_kvbase ? ov->v_kvbase : ov; + v->v_kvbase = _voucher_retain(kvb); + v->v_kvoucher = kvb->v_kvoucher; + } + return v; +} + +voucher_t +_voucher_create_without_importance(voucher_t ov) +{ + // Nothing to do unless the old voucher has a kernel voucher. If it + // doesn't, it can't have any importance, now or in the future. + if (!ov) return NULL; + // TODO: 17487167: track presence of importance attribute + if (!ov->v_kvoucher) return _voucher_retain(ov); + kern_return_t kr; + mach_voucher_t kv, okv; + // Copy kernel voucher, removing importance. + okv = ov->v_ipc_kvoucher ? ov->v_ipc_kvoucher : ov->v_kvoucher; + const mach_voucher_attr_recipe_data_t importance_remove_recipe[] = { + [0] = { + .key = MACH_VOUCHER_ATTR_KEY_ALL, + .command = MACH_VOUCHER_ATTR_COPY, + .previous_voucher = okv, + }, + [1] = { + .key = MACH_VOUCHER_ATTR_KEY_IMPORTANCE, + .command = MACH_VOUCHER_ATTR_REMOVE, + }, + }; + kr = _voucher_create_mach_voucher(importance_remove_recipe, + sizeof(importance_remove_recipe), &kv); + if (dispatch_assume_zero(kr) || !kv){ + if (ov->v_ipc_kvoucher) return NULL; + kv = MACH_VOUCHER_NULL; + } + if (kv == okv) { + _voucher_dealloc_mach_voucher(kv); + return _voucher_retain(ov); + } + voucher_t v = _voucher_find_and_retain(kv); + if (v && ov->v_ipc_kvoucher) { + _dispatch_voucher_debug("kvoucher[0x%08x] find without importance " + "from voucher[%p]", v, kv, ov); + _voucher_dealloc_mach_voucher(kv); + return v; + } + voucher_t kvbase = v; + // Copy userspace contents + unsigned int activities = ov->v_activities; + pthread_priority_t priority = _voucher_get_priority(ov); + mach_voucher_attr_recipe_size_t extra = _voucher_extra_size(ov); + v = _voucher_alloc(activities, priority, extra); + if (extra) { + memcpy(_voucher_extra_recipes(v), _voucher_extra_recipes(ov), extra); + } + if (activities) { + if (ov->v_activity) { + v->v_activity = _voucher_activity_retain(ov->v_activity); + } + memcpy(_voucher_activity_ids(v), _voucher_activity_ids(ov), + activities * sizeof(voucher_activity_id_t)); + } + v->v_kvoucher = kv; + if (ov->v_ipc_kvoucher) { + v->v_ipc_kvoucher = kv; + _voucher_insert(v); + } else if (kvbase) { + v->v_kvbase = kvbase; + _voucher_dealloc_mach_voucher(kv); // borrow base reference + } + if (!kvbase) { + _dispatch_voucher_debug("kvoucher[0x%08x] create without importance " + "from voucher[%p]", v, kv, ov); + } + return v; +} + +voucher_t +voucher_create_with_mach_msg(mach_msg_header_t *msg) +{ + voucher_t v = _voucher_create_with_mach_voucher(_voucher_mach_msg_get(msg)); + _voucher_activity_trace_msg(v, msg, receive); + return v; +} + +#ifndef MACH_VOUCHER_IMPORTANCE_ATTR_DROP_EXTERNAL +#define MACH_VOUCHER_IMPORTANCE_ATTR_DROP_EXTERNAL 2 +#endif + +void +voucher_decrement_importance_count4CF(voucher_t v) +{ + if (!v || !v->v_kvoucher) return; + // TODO: 17487167: track presence of importance attribute + kern_return_t kr; + mach_voucher_t kv = v->v_ipc_kvoucher ? v->v_ipc_kvoucher : v->v_kvoucher; + uint32_t dec = 1; + mach_voucher_attr_content_t kvc_in = (mach_voucher_attr_content_t)&dec; + mach_voucher_attr_content_size_t kvc_in_size = sizeof(dec); + mach_voucher_attr_content_t kvc_out = NULL; + mach_voucher_attr_content_size_t kvc_out_size = 0; +#if DISPATCH_DEBUG + uint32_t count = UINT32_MAX; + kvc_out = (mach_voucher_attr_content_t)&count; + kvc_out_size = sizeof(count); +#endif + kr = mach_voucher_attr_command(kv, MACH_VOUCHER_ATTR_KEY_IMPORTANCE, + MACH_VOUCHER_IMPORTANCE_ATTR_DROP_EXTERNAL, kvc_in, kvc_in_size, + kvc_out, &kvc_out_size); + DISPATCH_VERIFY_MIG(kr); +#if DISPATCH_DEBUG + _dispatch_voucher_debug("kvoucher[0x%08x] decrement importance count to %u:" + " %s - 0x%x", v, kv, count, mach_error_string(kr), kr); +#endif + if (kr != KERN_INVALID_ARGUMENT && + dispatch_assume_zero(kr) == KERN_FAILURE) { + // TODO: 17487167: skip KERN_INVALID_ARGUMENT check + DISPATCH_CLIENT_CRASH("Voucher importance count underflow"); + } +} + +#if VOUCHER_ENABLE_GET_MACH_VOUCHER +mach_voucher_t +voucher_get_mach_voucher(voucher_t voucher) +{ + return _voucher_get_mach_voucher(voucher); +} +#endif + +void +_voucher_xref_dispose(voucher_t voucher) +{ + _dispatch_voucher_debug("xref_dispose", voucher); + _voucher_remove(voucher); + return _os_object_release_internal_inline((_os_object_t)voucher); +} + +void +_voucher_dispose(voucher_t voucher) +{ + _dispatch_voucher_debug("dispose", voucher); + if (slowpath(_TAILQ_IS_ENQUEUED(voucher, v_list))) { + _dispatch_voucher_debug("corruption", voucher); + DISPATCH_CRASH("Voucher corruption"); + } + voucher->v_list.tqe_next = DISPATCH_OBJECT_LISTLESS; + if (voucher->v_ipc_kvoucher) { + if (voucher->v_ipc_kvoucher != voucher->v_kvoucher) { + _voucher_dealloc_mach_voucher(voucher->v_ipc_kvoucher); + } + voucher->v_ipc_kvoucher = MACH_VOUCHER_NULL; + } + if (voucher->v_kvoucher) { + if (!voucher->v_kvbase) { + _voucher_dealloc_mach_voucher(voucher->v_kvoucher); + } + voucher->v_kvoucher = MACH_VOUCHER_NULL; + } + if (voucher->v_kvbase) { + _voucher_release(voucher->v_kvbase); + voucher->v_kvbase = NULL; + } + if (voucher->v_activity) { + _voucher_activity_release(voucher->v_activity); + voucher->v_activity = NULL; + } + voucher->v_has_priority= 0; + voucher->v_activities = 0; +#if VOUCHER_ENABLE_RECIPE_OBJECTS + voucher->v_recipe_extra_size = 0; + voucher->v_recipe_extra_offset = 0; +#endif + return _os_object_dealloc((_os_object_t)voucher); +} + +void +_voucher_atfork_child(void) +{ + _voucher_activity_atfork_child(); + _dispatch_thread_setspecific(dispatch_voucher_key, NULL); + _voucher_task_mach_voucher_pred = 0; + _voucher_task_mach_voucher = MACH_VOUCHER_NULL; + + // TODO: voucher/activity inheritance on fork ? +} + +#pragma mark - +#pragma mark _voucher_init + +boolean_t +voucher_mach_msg_set(mach_msg_header_t *msg) +{ + voucher_t v = _voucher_get(); + bool clear_voucher = _voucher_mach_msg_set(msg, v); + if (clear_voucher) _voucher_activity_trace_msg(v, msg, send); + return clear_voucher; +} + +void +voucher_mach_msg_clear(mach_msg_header_t *msg) +{ + (void)_voucher_mach_msg_clear(msg, false); +} + +voucher_mach_msg_state_t +voucher_mach_msg_adopt(mach_msg_header_t *msg) +{ + mach_voucher_t kv = _voucher_mach_msg_get(msg); + if (!kv) return VOUCHER_MACH_MSG_STATE_UNCHANGED; + voucher_t v = _voucher_create_with_mach_voucher(kv); + _voucher_activity_trace_msg(v, msg, receive); + return (voucher_mach_msg_state_t)_voucher_adopt(v); +} + +void +voucher_mach_msg_revert(voucher_mach_msg_state_t state) +{ + if (state == VOUCHER_MACH_MSG_STATE_UNCHANGED) return; + _voucher_replace((voucher_t)state); +} + +#if DISPATCH_USE_LIBKERNEL_VOUCHER_INIT +#include <_libkernel_init.h> + +static const struct _libkernel_voucher_functions _voucher_libkernel_functions = +{ + .version = 1, + .voucher_mach_msg_set = voucher_mach_msg_set, + .voucher_mach_msg_clear = voucher_mach_msg_clear, + .voucher_mach_msg_adopt = voucher_mach_msg_adopt, + .voucher_mach_msg_revert = voucher_mach_msg_revert, +}; + +static void +_voucher_libkernel_init(void) +{ + kern_return_t kr = __libkernel_voucher_init(&_voucher_libkernel_functions); + dispatch_assert(!kr); +} +#else +#define _voucher_libkernel_init() +#endif + +void +_voucher_init(void) +{ + _voucher_libkernel_init(); + char *e, *end; + unsigned int i; + for (i = 0; i < VL_HASH_SIZE; i++) { + TAILQ_INIT(&_vouchers[i]); + } + voucher_activity_mode_t mode; + mode = DISPATCH_DEBUG ? voucher_activity_mode_debug + : voucher_activity_mode_release; + e = getenv("OS_ACTIVITY_MODE"); + if (e) { + if (strcmp(e, "release") == 0) { + mode = voucher_activity_mode_release; + } else if (strcmp(e, "debug") == 0) { + mode = voucher_activity_mode_debug; + } else if (strcmp(e, "stream") == 0) { + mode = voucher_activity_mode_stream; + } else if (strcmp(e, "disable") == 0) { + mode = voucher_activity_mode_disable; + } + } + _voucher_activity_mode = mode; + if (_voucher_activity_disabled()) return; + + e = getenv("LIBDISPATCH_ACTIVITY_ATM_SUBID_BITS"); + if (e) { + unsigned long v = strtoul(e, &end, 0); + if (v && !*end) { + _voucher_atm_subid_bits = v; + } + } + e = getenv("LIBDISPATCH_ACTIVITY_ATM_COLLECT_THRESHOLD"); + if (e) { + unsigned long v = strtoul(e, &end, 0); + if (v && v < LONG_MAX/2 && !*end) { + _voucher_atm_collect_threshold = + VATM_COLLECT_THRESHOLD_VALUE((long)v); + } + } + // default task activity + bool default_task_activity = DISPATCH_DEBUG; + e = getenv("LIBDISPATCH_DEFAULT_TASK_ACTIVITY"); + if (e) default_task_activity = atoi(e); + if (default_task_activity) { + (void)voucher_activity_start(_voucher_activity_trace_id_release, 0); + } +} + +#pragma mark - +#pragma mark _voucher_activity_lock_s + +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_activity_lock_init(_voucher_activity_lock_s *lock) { + static const os_lock_handoff_s _os_lock_handoff_init = OS_LOCK_HANDOFF_INIT; + *lock = _os_lock_handoff_init; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_activity_lock_lock(_voucher_activity_lock_s *lock) { + return os_lock_lock(lock); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_activity_lock_unlock(_voucher_activity_lock_s *lock) { + return os_lock_unlock(lock); +} + +#pragma mark - +#pragma mark _voucher_activity_heap + +#if __has_extension(c_static_assert) +_Static_assert(sizeof(struct _voucher_activity_tracepoint_s) == 64, + "Tracepoint too large"); +_Static_assert(sizeof(struct _voucher_activity_buffer_header_s) <= + sizeof(struct _voucher_activity_tracepoint_s), + "Buffer header too large"); +_Static_assert(offsetof(struct _voucher_activity_s, va_flags2) == + sizeof(struct _voucher_activity_tracepoint_s), + "Extended activity object misaligned"); +#if __LP64__ +_Static_assert(sizeof(struct _voucher_activity_s) == + 3 * sizeof(struct _voucher_activity_tracepoint_s), + "Activity object too large"); +_Static_assert(offsetof(struct _voucher_activity_s, va_flags3) == + 2 * sizeof(struct _voucher_activity_tracepoint_s), + "Extended activity object misaligned"); +_Static_assert(offsetof(struct _voucher_atm_s, vatm_activities_lock) % 64 == 0, + "Bad ATM padding"); +_Static_assert(sizeof(struct _voucher_atm_s) <= 128, + "ATM too large"); +#else +_Static_assert(sizeof(struct _voucher_activity_s) == + 2 * sizeof(struct _voucher_activity_tracepoint_s), + "Activity object too large"); +_Static_assert(sizeof(struct _voucher_atm_s) <= 64, + "ATM too large"); +#endif +_Static_assert(sizeof(_voucher_activity_buffer_t) == + sizeof(struct {char x[_voucher_activity_buffer_size];}), + "Buffer too large"); +_Static_assert(sizeof(struct _voucher_activity_metadata_s) <= + sizeof(struct _voucher_activity_metadata_opaque_s), + "Metadata too large"); +_Static_assert(sizeof(_voucher_activity_bitmap_t) % 64 == 0, + "Bad metadata bitmap size"); +_Static_assert(offsetof(struct _voucher_activity_metadata_s, + vam_atm_mbox_bitmap) % 64 == 0, + "Bad metadata padding"); +_Static_assert(offsetof(struct _voucher_activity_metadata_s, + vam_base_atm_subid) % 64 == 0, + "Bad metadata padding"); +_Static_assert(offsetof(struct _voucher_activity_metadata_s, vam_base_atm_lock) + % 32 == 0, + "Bad metadata padding"); +_Static_assert(offsetof(struct _voucher_activity_metadata_s, vam_atms) % 64 ==0, + "Bad metadata padding"); +_Static_assert(sizeof(_voucher_activity_bitmap_t) * 8 * + sizeof(atm_mailbox_offset_t) <= + sizeof(((_voucher_activity_metadata_t)NULL)->vam_kernel_metadata), + "Bad kernel metadata bitmap"); +_Static_assert(sizeof(atm_mailbox_offset_t) == 2 * sizeof(atm_subaid32_t), + "Bad kernel ATM mailbox sizes"); +#endif + +static const size_t _voucher_atm_mailboxes = + sizeof(((_voucher_activity_metadata_t)NULL)->vam_kernel_metadata) / + sizeof(atm_mailbox_offset_t); + +#define va_buffers_lock(va) (&(va)->va_buffers_lock) +#define vatm_activities_lock(vatm) (&(vatm)->vatm_activities_lock) +#define vatm_activities(vatm) (&(vatm)->vatm_activities) +#define vatm_used_activities(vatm) (&(vatm)->vatm_used_activities) +#define vam_base_atm_lock() (&_voucher_activity_heap->vam_base_atm_lock) +#define vam_nested_atm_lock() (&_voucher_activity_heap->vam_nested_atm_lock) +#define vam_atms_lock() (&_voucher_activity_heap->vam_atms_lock) +#define vam_activities_lock() (&_voucher_activity_heap->vam_activities_lock) +#define vam_atms(hash) (&_voucher_activity_heap->vam_atms[hash]) +#define vam_activities(hash) (&_voucher_activity_heap->vam_activities[hash]) +#define vam_buffer_bitmap() (_voucher_activity_heap->vam_buffer_bitmap) +#define vam_atm_mbox_bitmap() (_voucher_activity_heap->vam_atm_mbox_bitmap) +#define vam_pressure_locked_bitmap() \ + (_voucher_activity_heap->vam_pressure_locked_bitmap) +#define vam_buffer(i) ((void*)((char*)_voucher_activity_heap + \ + (i) * _voucher_activity_buffer_size)) + +static _voucher_activity_t _voucher_activity_create_with_atm( + _voucher_atm_t vatm, voucher_activity_id_t va_id, + voucher_activity_trace_id_t trace_id, uint64_t location, + _voucher_activity_buffer_header_t buffer); +static _voucher_atm_t _voucher_atm_create(mach_voucher_t kv, atm_aid_t atm_id); +static voucher_activity_id_t _voucher_atm_nested_atm_id_make(void); + +DISPATCH_ALWAYS_INLINE +static inline uint32_t +_voucher_default_activity_buffer_limit() +{ + switch (_voucher_activity_mode) { + case voucher_activity_mode_debug: + case voucher_activity_mode_stream: + // High-profile modes: Default activity can use 1/32nd of the heap + // (twice as much as non-default activities) + return MAX(_voucher_activity_buffers_per_heap / 32, 3) - 1; + } +#if TARGET_OS_EMBEDDED + // Low-profile modes: Default activity can use a total of 3 buffers. + return 2; +#else + // Low-profile modes: Default activity can use a total of 8 buffers. + return 7; +#endif +} + +DISPATCH_ALWAYS_INLINE +static inline uint32_t +_voucher_activity_buffer_limit() +{ + switch (_voucher_activity_mode) { + case voucher_activity_mode_debug: + case voucher_activity_mode_stream: + // High-profile modes: 64 activities, each of which can use 1/64th + // of the entire heap. + return MAX(_voucher_activity_buffers_per_heap / 64, 2) - 1; + } +#if TARGET_OS_EMBEDDED + // Low-profile modes: Each activity can use a total of 2 buffers. + return 1; +#else + // Low-profile modes: Each activity can use a total of 4 buffers. + return 3; +#endif +} + +// The two functions above return the number of *additional* buffers activities +// may allocate, hence the gymnastics with - 1. + +DISPATCH_ALWAYS_INLINE +static inline uint32_t +_voucher_heap_buffer_limit() +{ + switch (_voucher_activity_mode) { + case voucher_activity_mode_debug: + case voucher_activity_mode_stream: + // High-profile modes: Use it all. + return _voucher_activity_buffers_per_heap; + } +#if TARGET_OS_EMBEDDED + // Low-profile modes: 3 activities, each of which can use 2 buffers; + // plus the default activity, which can use 3; plus 3 buffers of overhead. + return 12; +#else + // Low-profile modes: 13 activities, each of which can use 4 buffers; + // plus the default activity, which can use 8; plus 3 buffers of overhead. + return 64; +#endif +} + +#define NO_BITS_WERE_UNSET (UINT_MAX) + +DISPATCH_ALWAYS_INLINE +static inline size_t +_voucher_activity_bitmap_set_first_unset_bit_upto( + _voucher_activity_bitmap_t volatile bitmap, + unsigned int max_index) +{ + dispatch_assert(max_index != 0); + unsigned int index = NO_BITS_WERE_UNSET, max_map, max_bit, i; + max_map = max_index / _voucher_activity_bits_per_bitmap_base_t; + max_map = MIN(max_map, _voucher_activity_bitmaps_per_heap - 1); + max_bit = max_index % _voucher_activity_bits_per_bitmap_base_t; + for (i = 0; i < max_map; i++) { + index = dispatch_atomic_set_first_bit(&bitmap[i], UINT_MAX); + if (fastpath(index < NO_BITS_WERE_UNSET)) { + return index + i * _voucher_activity_bits_per_bitmap_base_t; + } + } + index = dispatch_atomic_set_first_bit(&bitmap[i], max_bit); + if (fastpath(index < NO_BITS_WERE_UNSET)) { + return index + i * _voucher_activity_bits_per_bitmap_base_t; + } + return index; +} + +DISPATCH_ALWAYS_INLINE +static inline size_t +_voucher_activity_bitmap_set_first_unset_bit( + _voucher_activity_bitmap_t volatile bitmap) +{ + return _voucher_activity_bitmap_set_first_unset_bit_upto(bitmap, UINT_MAX); +} + + +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_activity_bitmap_clear_bit( + _voucher_activity_bitmap_t volatile bitmap, size_t index) +{ + size_t i = index / _voucher_activity_bits_per_bitmap_base_t; + _voucher_activity_bitmap_base_t mask = ((typeof(mask))1) << + (index % _voucher_activity_bits_per_bitmap_base_t); + if (slowpath((bitmap[i] & mask) == 0)) { + DISPATCH_CRASH("Corruption: failed to clear bit exclusively"); + } + (void)dispatch_atomic_and(&bitmap[i], ~mask, release); +} + +_voucher_activity_metadata_t _voucher_activity_heap; +static dispatch_once_t _voucher_activity_heap_pred; + +static void +_voucher_activity_heap_init(void *ctxt DISPATCH_UNUSED) +{ + if (_voucher_activity_disabled()) return; + kern_return_t kr; + mach_vm_size_t vm_size = _voucher_activity_buffer_size * + _voucher_activity_buffers_per_heap; + mach_vm_address_t vm_addr = vm_page_size; + while (slowpath(kr = mach_vm_map(mach_task_self(), &vm_addr, vm_size, + 0, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_GENEALOGY), + MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL, + VM_INHERIT_NONE))) { + if (kr != KERN_NO_SPACE) { + (void)dispatch_assume_zero(kr); + _voucher_activity_mode = voucher_activity_mode_disable; + return; + } + _dispatch_temporary_resource_shortage(); + vm_addr = vm_page_size; + } + _voucher_activity_metadata_t heap; + task_trace_memory_info_data_t trace_memory_info = { + .user_memory_address = vm_addr, + .buffer_size = vm_size, + .mailbox_array_size = sizeof(heap->vam_kernel_metadata), + }; + kr = task_set_info(mach_task_self(), TASK_TRACE_MEMORY_INFO, + (task_info_t)&trace_memory_info, TASK_TRACE_MEMORY_INFO_COUNT); + DISPATCH_VERIFY_MIG(kr); + if (kr) { + if (kr != KERN_NOT_SUPPORTED) (void)dispatch_assume_zero(kr); + kr = mach_vm_deallocate(mach_task_self(), vm_addr, vm_size); + (void)dispatch_assume_zero(kr); + _voucher_activity_mode = voucher_activity_mode_disable; + return; + } + heap = (void*)vm_addr; + heap->vam_self_metadata.vasm_baseaddr = (void*)vm_addr; + heap->vam_buffer_bitmap[0] = 0xf; // first four buffers are reserved + uint32_t i; + for (i = 0; i < _voucher_activity_hash_size; i++) { + TAILQ_INIT(&heap->vam_activities[i]); + TAILQ_INIT(&heap->vam_atms[i]); + } + uint32_t subid_max = VATM_SUBID_MAX; + if (_voucher_atm_subid_bits && + _voucher_atm_subid_bits < VATM_SUBID_MAXBITS) { + subid_max = MIN(VATM_SUBID_BITS2MAX(_voucher_atm_subid_bits), + VATM_SUBID_MAX); + } + heap->vam_base_atm_subid_max = subid_max; + _voucher_activity_lock_init(&heap->vam_base_atm_lock); + _voucher_activity_lock_init(&heap->vam_nested_atm_lock); + _voucher_activity_lock_init(&heap->vam_atms_lock); + _voucher_activity_lock_init(&heap->vam_activities_lock); + _voucher_activity_heap = heap; + + _voucher_atm_t vatm = _voucher_atm_create(0, 0); + dispatch_assert(vatm->vatm_kvoucher); + heap->vam_default_activity_atm = vatm; + _voucher_activity_buffer_header_t buffer = vam_buffer(3); // reserved index + // consumes vatm reference: + _voucher_activity_t va = _voucher_activity_create_with_atm(vatm, + VATM_ACTID(vatm, _voucher_default_activity_subid), 0, 0, buffer); + dispatch_assert(va); + va->va_buffer_limit = _voucher_default_activity_buffer_limit(); + _voucher_activity_default = va; + heap->vam_base_atm = _voucher_atm_create(0, 0); + heap->vam_nested_atm_id = _voucher_atm_nested_atm_id_make(); +} + +static void +_voucher_activity_atfork_child(void) +{ + _voucher_activity_heap_pred = 0; + _voucher_activity_heap = NULL; // activity heap is VM_INHERIT_NONE + _voucher_activity_default = NULL; +} + +void* +voucher_activity_get_metadata_buffer(size_t *length) +{ + dispatch_once_f(&_voucher_activity_heap_pred, NULL, + _voucher_activity_heap_init); + if (_voucher_activity_disabled()) { + *length = 0; + return NULL; + } + *length = sizeof(_voucher_activity_heap->vam_client_metadata); + return _voucher_activity_heap->vam_client_metadata; +} + +DISPATCH_ALWAYS_INLINE +static inline _voucher_activity_buffer_header_t +_voucher_activity_heap_buffer_alloc(void) +{ + _voucher_activity_buffer_header_t buffer = NULL; + size_t index; + index = _voucher_activity_bitmap_set_first_unset_bit_upto( + vam_buffer_bitmap(), _voucher_heap_buffer_limit() - 1); + if (index < NO_BITS_WERE_UNSET) { + buffer = vam_buffer(index); + } +#if DISPATCH_DEBUG && DISPATCH_VOUCHER_ACTIVITY_DEBUG + _dispatch_debug("activity heap alloc %zd (%p)", index, buffer); +#endif + return buffer; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_activity_heap_buffer_free(_voucher_activity_buffer_header_t buffer) +{ + buffer->vabh_flags = _voucher_activity_trace_flag_buffer_empty; + size_t index = (size_t)((char*)buffer - (char*)_voucher_activity_heap) / + _voucher_activity_buffer_size; +#if DISPATCH_DEBUG && DISPATCH_VOUCHER_ACTIVITY_DEBUG + _dispatch_debug("activity heap free %zd (%p)", index, buffer); +#endif + _voucher_activity_bitmap_clear_bit(vam_buffer_bitmap(), index); +} + +#define _voucher_activity_heap_can_madvise() \ + (PAGE_SIZE == _voucher_activity_buffer_size) // + +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_activity_heap_madvise(size_t bitmap_num, unsigned int start, + unsigned int len) +{ + size_t base = bitmap_num * _voucher_activity_bits_per_bitmap_base_t; +#if DISPATCH_DEBUG +#if DISPATCH_VOUCHER_ACTIVITY_DEBUG + _dispatch_debug("activity heap madvise %zd (%p) -> %zd (%p)", base + start, + vam_buffer(base + start), base + start + len, + vam_buffer(base + start + len)); +#endif + dispatch_assert(!(len * _voucher_activity_buffer_size % vm_page_size)); + const uint64_t pattern = 0xFACEFACEFACEFACE; + _voucher_activity_buffer_header_t buffer = vam_buffer(base + start); + for (unsigned int i = 0; i < len; i++, buffer++) { + memset_pattern8((char*)buffer + sizeof(buffer->vabh_flags), &pattern, + _voucher_activity_buffer_size - sizeof(buffer->vabh_flags)); + } +#endif + (void)dispatch_assume_zero(madvise(vam_buffer(base + start), + len * _voucher_activity_buffer_size, MADV_FREE)); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_activity_heap_madvise_contiguous(size_t bitmap_num, + _voucher_activity_bitmap_base_t bits) +{ + // TODO: x86 has fast ctz; arm has fast clz; haswell has fast ctz + dispatch_assert(_voucher_activity_heap_can_madvise()); + if (bits == 0) { + return; + } else if (~bits == 0) { + _voucher_activity_heap_madvise(bitmap_num, 0, + _voucher_activity_bits_per_bitmap_base_t); + } else while (bits != 0) { + unsigned int start = (typeof(start))__builtin_ctzl(bits), len; + typeof(bits) inverse = ~bits >> start; + if (inverse) { + len = (typeof(len))__builtin_ctzl(inverse); + } else { + len = _voucher_activity_bits_per_bitmap_base_t - start; + } + typeof(bits) mask = ((((typeof(bits))1) << len) - 1) << start; + bits &= ~mask; + _voucher_activity_heap_madvise(bitmap_num, start, len); + } +} + +void +_voucher_activity_heap_pressure_warn(void) +{ + if (!_voucher_activity_heap_can_madvise() || !_voucher_activity_heap) { + return; + } + volatile _voucher_activity_bitmap_base_t *bitmap, *pressure_locked_bitmap; + bitmap = vam_buffer_bitmap(); + pressure_locked_bitmap = vam_pressure_locked_bitmap(); + + // number of bitmaps needed to map the current buffer limit = + // ceil(buffer limit / bits per bitmap) + size_t nbuffers = _voucher_heap_buffer_limit(); + size_t nbitmaps_quot = nbuffers / _voucher_activity_bits_per_bitmap_base_t; + size_t nbitmaps_rem = nbuffers % _voucher_activity_bits_per_bitmap_base_t; + size_t nbitmaps = nbitmaps_quot + ((nbitmaps_rem == 0) ? 0 : 1); + + for (size_t i = 0; i < nbitmaps; i++) { + _voucher_activity_bitmap_base_t got_bits; + got_bits = dispatch_atomic_or_orig(&bitmap[i], ~((typeof(bitmap[i]))0), + relaxed); + got_bits = ~got_bits; // Now 1 means 'acquired this one, madvise it' + _voucher_activity_heap_madvise_contiguous(i, got_bits); + pressure_locked_bitmap[i] |= got_bits; + } +} + +void +_voucher_activity_heap_pressure_normal(void) +{ + if (!_voucher_activity_heap_can_madvise() || !_voucher_activity_heap) { + return; + } + volatile _voucher_activity_bitmap_base_t *bitmap, *pressure_locked_bitmap; + bitmap = vam_buffer_bitmap(); + pressure_locked_bitmap = vam_pressure_locked_bitmap(); + for (size_t i = 0; i < _voucher_activity_bitmaps_per_heap; i++) { + _voucher_activity_bitmap_base_t free_bits = pressure_locked_bitmap[i]; + pressure_locked_bitmap[i] = 0; + if (free_bits != 0) { + (void)dispatch_atomic_and(&bitmap[i], ~free_bits, release); + } + } +} + +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_activity_buffer_init(_voucher_activity_t act, + _voucher_activity_buffer_header_t buffer, bool reuse) +{ + if (!reuse) { + buffer->vabh_flags = _voucher_activity_trace_flag_buffer_header; + buffer->vabh_activity_id = act->va_id; + } + buffer->vabh_timestamp = _voucher_activity_timestamp(); + buffer->vabh_next_tracepoint_idx = 1; + buffer->vabh_sequence_no = dispatch_atomic_inc2o(act, va_max_sequence_no, + relaxed); +} + +static _voucher_activity_buffer_header_t +_voucher_activity_buffer_alloc_slow(_voucher_activity_t act, + _voucher_activity_buffer_header_t current) +{ + _voucher_activity_buffer_header_t buffer; + _voucher_activity_lock_lock(va_buffers_lock(act)); // TODO: revisit locking + buffer = act->va_current_buffer; + if (buffer != current) goto out; + buffer = TAILQ_FIRST(&act->va_buffers); + if (buffer) { + _voucher_activity_buffer_init(act, buffer, true); + if (buffer != TAILQ_LAST(&act->va_buffers, + _voucher_activity_buffer_list_s)) { + TAILQ_REMOVE(&act->va_buffers, buffer, vabh_list); + TAILQ_INSERT_TAIL(&act->va_buffers, buffer, vabh_list); + } + } + if (!dispatch_atomic_cmpxchgv2o(act, va_current_buffer, current, buffer, + ¤t, release)) { + if (buffer) { + TAILQ_REMOVE(&act->va_buffers, buffer, vabh_list); + _voucher_activity_heap_buffer_free(buffer); + } + buffer = current; + } +out: + _voucher_activity_lock_unlock(va_buffers_lock(act)); + _dispatch_voucher_activity_debug("buffer reuse %p", act, buffer); + return buffer; +} + +static _voucher_activity_buffer_header_t +_voucher_activity_buffer_alloc(_voucher_activity_t act, + _voucher_activity_buffer_header_t current) +{ + _voucher_activity_buffer_header_t buffer = NULL; + if (act->va_max_sequence_no < act->va_buffer_limit) { + buffer = _voucher_activity_heap_buffer_alloc(); + } + if (!buffer) return _voucher_activity_buffer_alloc_slow(act, current); + _voucher_activity_buffer_init(act, buffer, false); + if (dispatch_atomic_cmpxchgv2o(act, va_current_buffer, current, buffer, + ¤t, release)) { + _voucher_activity_lock_lock(va_buffers_lock(act)); + TAILQ_INSERT_TAIL(&act->va_buffers, buffer, vabh_list); + _voucher_activity_lock_unlock(va_buffers_lock(act)); + } else { + _voucher_activity_heap_buffer_free(buffer); + buffer = current; + } + _dispatch_voucher_activity_debug("buffer alloc %p", act, buffer); + return buffer; +} + +#pragma mark - +#pragma mark _voucher_activity_t + +#define _voucher_activity_ordered_insert(_act, head, field) do { \ + typeof(_act) _vai; \ + TAILQ_FOREACH(_vai, (head), field) { \ + if (_act->va_id < _vai->va_id) break; \ + } \ + if (_vai) { \ + TAILQ_INSERT_BEFORE(_vai, _act, field); \ + } else { \ + TAILQ_INSERT_TAIL((head), _act, field); \ + } } while (0); + +static void _voucher_activity_dispose(_voucher_activity_t act); +static _voucher_activity_t _voucher_atm_activity_mark_used( + _voucher_activity_t act); +static void _voucher_atm_activity_mark_unused(_voucher_activity_t act); +static _voucher_atm_t _voucher_atm_copy(atm_aid_t atm_id); +static inline void _voucher_atm_release(_voucher_atm_t vatm); +static void _voucher_atm_activity_insert(_voucher_atm_t vatm, + _voucher_activity_t act); +static void _voucher_atm_activity_remove(_voucher_activity_t act); +static atm_aid_t _voucher_mach_voucher_get_atm_id(mach_voucher_t kv); + +DISPATCH_ALWAYS_INLINE +static inline bool +_voucher_activity_copy(_voucher_activity_t act) +{ + int use_cnt = dispatch_atomic_inc2o(act, va_use_count, relaxed); + _dispatch_voucher_activity_debug("retain -> %d", act, use_cnt + 1); + if (slowpath(use_cnt < 0)) { + _dispatch_voucher_activity_debug("overrelease", act); + DISPATCH_CRASH("Activity overrelease"); + } + return (use_cnt == 0); +} + +DISPATCH_ALWAYS_INLINE +static inline _voucher_activity_t +_voucher_activity_retain(_voucher_activity_t act) +{ + if (_voucher_activity_copy(act)) { + _dispatch_voucher_activity_debug("invalid resurrection", act); + DISPATCH_CRASH("Invalid activity resurrection"); + } + return act; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_activity_release(_voucher_activity_t act) +{ + int use_cnt = dispatch_atomic_dec2o(act, va_use_count, relaxed); + _dispatch_voucher_activity_debug("release -> %d", act, use_cnt + 1); + if (fastpath(use_cnt >= 0)) { + return; + } + if (slowpath(use_cnt < -1)) { + _dispatch_voucher_activity_debug("overrelease", act); + DISPATCH_CRASH("Activity overrelease"); + } + return _voucher_atm_activity_mark_unused(act); +} + +DISPATCH_ALWAYS_INLINE +static inline _voucher_activity_t +_voucher_activity_atm_retain(_voucher_activity_t act) +{ + int refcnt = dispatch_atomic_inc2o(act, va_refcnt, relaxed); + _dispatch_voucher_activity_debug("atm retain -> %d", act, refcnt + 1); + if (slowpath(refcnt <= 0)) { + _dispatch_voucher_activity_debug("atm resurrection", act); + DISPATCH_CRASH("Activity ATM resurrection"); + } + return act; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_activity_atm_release(_voucher_activity_t act) +{ + int refcnt = dispatch_atomic_dec2o(act, va_refcnt, relaxed); + _dispatch_voucher_activity_debug("atm release -> %d", act, refcnt + 1); + if (fastpath(refcnt >= 0)) { + return; + } + if (slowpath(refcnt < -1)) { + _dispatch_voucher_activity_debug("atm overrelease", act); + DISPATCH_CRASH("Activity ATM overrelease"); + } + return _voucher_activity_dispose(act); +} + +static inline _voucher_activity_t +_voucher_activity_get(voucher_t v) +{ + _voucher_activity_t act; + act = v && v->v_activity ? v->v_activity : _voucher_activity_default; + return act; +} + +static _voucher_activity_t +_voucher_activity_find(voucher_activity_id_t va_id, uint32_t hash) +{ + // assumes vam_activities_lock held + _voucher_activity_t act; + TAILQ_FOREACH(act, vam_activities(hash), va_list){ + if (act->va_id == va_id) break; + } + return act; +} + +static _voucher_activity_t +_voucher_activity_copy_from_id(voucher_activity_id_t va_id) +{ + bool resurrect = false; + uint32_t hash = VACTID_HASH(va_id); + _voucher_activity_lock_lock(vam_activities_lock()); + _voucher_activity_t act = _voucher_activity_find(va_id, hash); + if (act) { + resurrect = _voucher_activity_copy(act); + _dispatch_voucher_activity_debug("copy from id 0x%llx", act, va_id); + } + _voucher_activity_lock_unlock(vam_activities_lock()); + if (resurrect) return _voucher_atm_activity_mark_used(act); + return act; +} + +static _voucher_activity_t +_voucher_activity_try_insert(_voucher_activity_t act_new) +{ + bool resurrect = false; + voucher_activity_id_t va_id = act_new->va_id; + uint32_t hash = VACTID_HASH(va_id); + _voucher_activity_lock_lock(vam_activities_lock()); + _voucher_activity_t act = _voucher_activity_find(va_id, hash); + if (act) { + resurrect = _voucher_activity_copy(act); + _dispatch_voucher_activity_debug("try insert: failed (%p)", act,act_new); + } else { + if (slowpath(_TAILQ_IS_ENQUEUED(act_new, va_list))) { + _dispatch_voucher_activity_debug("corruption", act_new); + DISPATCH_CRASH("Activity corruption"); + } + TAILQ_INSERT_TAIL(vam_activities(hash), act_new, va_list); + _dispatch_voucher_activity_debug("try insert: succeeded", act_new); + } + _voucher_activity_lock_unlock(vam_activities_lock()); + if (resurrect) return _voucher_atm_activity_mark_used(act); + return act; +} + +static bool +_voucher_activity_try_remove(_voucher_activity_t act) +{ + bool r; + voucher_activity_id_t va_id = act->va_id; + uint32_t hash = VACTID_HASH(va_id); + _voucher_activity_lock_lock(vam_activities_lock()); + if (slowpath(!va_id)) { + _dispatch_voucher_activity_debug("corruption", act); + DISPATCH_CRASH("Activity corruption"); + } + if ((r = (dispatch_atomic_load2o(act, va_use_count, seq_cst) < 0 && + _TAILQ_IS_ENQUEUED(act, va_list)))) { + TAILQ_REMOVE(vam_activities(hash), act, va_list); + _TAILQ_MARK_NOT_ENQUEUED(act, va_list); + act->va_list.tqe_next = (void*)~0ull; + } + _dispatch_voucher_activity_debug("try remove: %s", act, r ? "succeeded" : + "failed"); + _voucher_activity_lock_unlock(vam_activities_lock()); + return r; +} + +static _voucher_activity_t +_voucher_activity_create_with_atm(_voucher_atm_t vatm, + voucher_activity_id_t va_id, voucher_activity_trace_id_t trace_id, + uint64_t location, _voucher_activity_buffer_header_t buffer) +{ + if (!buffer) buffer = _voucher_activity_heap_buffer_alloc(); + if (!buffer) { + _dispatch_voucher_atm_debug("no buffer", vatm); + _voucher_atm_release(vatm); // consume vatm reference + return NULL; + } + if (!trace_id) trace_id = _voucher_activity_trace_id_release; + _voucher_activity_tracepoint_t vat = (_voucher_activity_tracepoint_t)buffer; + _voucher_activity_tracepoint_init_with_id(vat, trace_id, ~1ull); + _voucher_activity_t act = (_voucher_activity_t)buffer; + act->va_flags = _voucher_activity_trace_flag_buffer_header | + _voucher_activity_trace_flag_activity | + _voucher_activity_trace_flag_start | + _voucher_activity_trace_flag_wide_first; + act->vabh_next_tracepoint_idx = sizeof(*act)/sizeof(*vat); + act->va_max_sequence_no = 0; + act->va_id = va_id ? va_id : VATM_ACTID(vatm, 0); + act->va_use_count = 0; + act->va_buffer_limit = _voucher_activity_buffer_limit(); + TAILQ_INIT(&act->va_buffers); + act->va_flags2 = _voucher_activity_trace_flag_activity | + _voucher_activity_trace_flag_wide_second; +#if __LP64__ + act->va_flags3 = act->va_flags2; +#endif + act->va_refcnt = 0; + act->va_location = location; + act->va_current_buffer = buffer; + act->va_atm = vatm; // transfer vatm reference + _voucher_activity_lock_init(va_buffers_lock(act)); + _TAILQ_MARK_NOT_ENQUEUED(act, va_list); + _TAILQ_MARK_NOT_ENQUEUED(act, va_atm_list); + _TAILQ_MARK_NOT_ENQUEUED(act, va_atm_used_list); + _voucher_activity_t actx = _voucher_activity_try_insert(act); + if (actx) { + _voucher_activity_dispose(act); + act = actx; + } else { + _voucher_atm_activity_insert(vatm, act); + } + _dispatch_voucher_activity_debug("create", act); + return act; +} + +static void +_voucher_activity_dispose(_voucher_activity_t act) +{ + _dispatch_voucher_activity_debug("dispose", act); + _voucher_atm_release(act->va_atm); + if (slowpath(_TAILQ_IS_ENQUEUED(act, va_list))) { + _dispatch_voucher_activity_debug("corruption", act); + DISPATCH_CRASH("Activity corruption"); + } + act->va_list.tqe_next = DISPATCH_OBJECT_LISTLESS; + dispatch_assert(!_TAILQ_IS_ENQUEUED(act, va_atm_list)); + dispatch_assert(!_TAILQ_IS_ENQUEUED(act, va_atm_used_list)); + _voucher_activity_buffer_header_t buffer, tmp; + TAILQ_FOREACH_SAFE(buffer, &act->va_buffers, vabh_list, tmp) { + _dispatch_voucher_activity_debug("buffer free %p", act, buffer); + TAILQ_REMOVE(&act->va_buffers, buffer, vabh_list); + _voucher_activity_heap_buffer_free(buffer); + } + buffer = (_voucher_activity_buffer_header_t)act; + _voucher_activity_heap_buffer_free(buffer); +} + +static void +_voucher_activity_collect(_voucher_activity_t act) +{ + _dispatch_voucher_activity_debug("collect", act); + if (_voucher_activity_try_remove(act)) { + _voucher_atm_activity_remove(act); + } +} + +static _voucher_activity_t +_voucher_activity_copy_from_mach_voucher(mach_voucher_t kv, + voucher_activity_id_t va_id) +{ + dispatch_once_f(&_voucher_activity_heap_pred, NULL, + _voucher_activity_heap_init); + if (_voucher_activity_disabled()) return NULL; + _voucher_activity_t act = NULL; + if (dispatch_assume(va_id)) { + if ((act = _voucher_activity_copy_from_id(va_id))) return act; + } + atm_aid_t atm_id = _voucher_mach_voucher_get_atm_id(kv); + if (!dispatch_assume(atm_id)) return NULL; + _voucher_activity_buffer_header_t buffer; + buffer = _voucher_activity_heap_buffer_alloc(); + if (!buffer) return NULL; + _dispatch_kvoucher_debug("atm copy/create from <%lld>", kv, atm_id); + _voucher_atm_t vatm = _voucher_atm_copy(atm_id); + if (!vatm) vatm = _voucher_atm_create(kv, atm_id); + if (!vatm) { + _voucher_activity_heap_buffer_free(buffer); + return NULL; + } + if (VACTID_BASEID(va_id) != VATMID2ACTID(atm_id)) va_id = 0; + // consumes vatm reference: + act = _voucher_activity_create_with_atm(vatm, va_id, 0, 0, buffer); + _dispatch_voucher_activity_debug("copy from kvoucher[0x%08x]", act, kv); + return act; +} + +#pragma mark - +#pragma mark _voucher_atm_mailbox + +DISPATCH_ALWAYS_INLINE +static inline atm_mailbox_offset_t +_voucher_atm_mailbox_alloc(void) +{ + atm_mailbox_offset_t mailbox_offset = MAILBOX_OFFSET_UNSET; + size_t index; + index = _voucher_activity_bitmap_set_first_unset_bit(vam_atm_mbox_bitmap()); + if (index < NO_BITS_WERE_UNSET) { + mailbox_offset = index * sizeof(atm_mailbox_offset_t); +#if DISPATCH_DEBUG && DISPATCH_VOUCHER_ACTIVITY_DEBUG + _dispatch_debug("mailbox alloc %zd (%lld)", index, mailbox_offset); +#endif + } + return mailbox_offset; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_atm_mailbox_free(atm_mailbox_offset_t mailbox_offset) +{ + if (mailbox_offset == MAILBOX_OFFSET_UNSET) return; + size_t index = (size_t)mailbox_offset / sizeof(atm_mailbox_offset_t); +#if DISPATCH_DEBUG && DISPATCH_VOUCHER_ACTIVITY_DEBUG + _dispatch_debug("mailbox free %zd (%lld)", index, mailbox_offset); +#endif + _voucher_activity_bitmap_clear_bit(vam_atm_mbox_bitmap(), index); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_voucher_atm_mailbox_set(atm_mailbox_offset_t mailbox_offset, + atm_subaid32_t subaid, bool max_present) +{ + if (mailbox_offset == MAILBOX_OFFSET_UNSET) return false; + char *mailbox_base = (char*)_voucher_activity_heap->vam_kernel_metadata; + atm_subaid32_t *mailbox = (atm_subaid32_t*)(mailbox_base + mailbox_offset); + if (max_present) mailbox++; // second atm_subaid32_t in atm_mailbox_offset_t + if (*mailbox == subaid) return false; + *mailbox = subaid; + return true; +} + +#pragma mark - +#pragma mark _voucher_atm_t + +static bool _voucher_atm_try_remove(_voucher_atm_t vatm); +static void _voucher_atm_dispose(_voucher_atm_t vatm, bool unregister); +static inline void _voucher_atm_collect_if_needed(bool updated); + +DISPATCH_ALWAYS_INLINE +static inline _voucher_atm_t +_voucher_atm_retain(_voucher_atm_t vatm) +{ + // assumes vam_atms_lock or vam_base_atm_lock held + int refcnt = dispatch_atomic_inc2o(vatm, vatm_refcnt, relaxed); + _dispatch_voucher_atm_debug("retain -> %d", vatm, refcnt + 1); + if (slowpath(refcnt < 0)) { + _dispatch_voucher_atm_debug("overrelease", vatm); + DISPATCH_CRASH("ATM overrelease"); + } + return vatm; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_atm_release(_voucher_atm_t vatm) +{ + int refcnt = dispatch_atomic_dec2o(vatm, vatm_refcnt, relaxed); + _dispatch_voucher_atm_debug("release -> %d", vatm, refcnt + 1); + if (fastpath(refcnt >= 0)) { + return; + } + if (slowpath(refcnt < -1)) { + _dispatch_voucher_atm_debug("overrelease", vatm); + DISPATCH_CRASH("ATM overrelease"); + } + if (_voucher_atm_try_remove(vatm)) { + _voucher_atm_dispose(vatm, true); + } +} + +static _voucher_atm_t +_voucher_atm_find(atm_aid_t atm_id, uint32_t hash) +{ + // assumes vam_atms_lock held + _voucher_atm_t vatm; + TAILQ_FOREACH(vatm, vam_atms(hash), vatm_list){ + if (vatm->vatm_id == atm_id) break; + } + return vatm; +} + +static _voucher_atm_t +_voucher_atm_copy(atm_aid_t atm_id) +{ + uint32_t hash = VATMID_HASH(atm_id); + _voucher_activity_lock_lock(vam_atms_lock()); + _voucher_atm_t vatm = _voucher_atm_find(atm_id, hash); + if (vatm) { + _voucher_atm_retain(vatm); + _dispatch_voucher_atm_debug("copy", vatm); + } + _voucher_activity_lock_unlock(vam_atms_lock()); + return vatm; +} + +static _voucher_atm_t +_voucher_atm_try_insert(_voucher_atm_t vatm_new) +{ + atm_aid_t atm_id = vatm_new->vatm_id; + uint32_t hash = VATMID_HASH(atm_id); + _voucher_activity_lock_lock(vam_atms_lock()); + _voucher_atm_t vatm = _voucher_atm_find(atm_id, hash); + if (vatm) { + _voucher_atm_retain(vatm); + _dispatch_voucher_atm_debug("try insert: failed (%p)", vatm, vatm_new); + } else { + if (slowpath(_TAILQ_IS_ENQUEUED(vatm_new, vatm_list))) { + _dispatch_voucher_atm_debug("corruption", vatm_new); + DISPATCH_CRASH("ATM corruption"); + } + TAILQ_INSERT_TAIL(vam_atms(hash), vatm_new, vatm_list); + _dispatch_voucher_atm_debug("try insert: succeeded", vatm_new); + } + _voucher_activity_lock_unlock(vam_atms_lock()); + return vatm; +} + +static bool +_voucher_atm_try_remove(_voucher_atm_t vatm) +{ + bool r; + atm_aid_t atm_id = vatm->vatm_id; + uint32_t hash = VATMID_HASH(atm_id); + _voucher_activity_lock_lock(vam_atms_lock()); + if (slowpath(!atm_id)) { + _dispatch_voucher_atm_debug("corruption", vatm); + DISPATCH_CRASH("ATM corruption"); + } + if ((r = (dispatch_atomic_load2o(vatm, vatm_refcnt, seq_cst) < 0 && + _TAILQ_IS_ENQUEUED(vatm, vatm_list)))) { + TAILQ_REMOVE(vam_atms(hash), vatm, vatm_list); + _TAILQ_MARK_NOT_ENQUEUED(vatm, vatm_list); + vatm->vatm_list.tqe_next = (void*)~0ull; + } + _dispatch_voucher_atm_debug("try remove: %s", vatm, r ? "succeeded" : + "failed"); + _voucher_activity_lock_unlock(vam_atms_lock()); + return r; +} + +static bool +_voucher_atm_update_mailbox(_voucher_atm_t vatm) +{ + // Update kernel mailbox with largest allocated subaid for this atm_id + // assumes atm_activities_lock held + _voucher_activity_t act = TAILQ_LAST(vatm_activities(vatm), + _voucher_atm_activities_s); + atm_subaid32_t subaid = act ? VACTID_SUBID(act->va_id) : 0; + bool r = _voucher_atm_mailbox_set(vatm->vatm_mailbox_offset, subaid, true); + if (r) { + _dispatch_voucher_atm_debug("update max-present subaid 0x%x", vatm, + subaid); + } + return r; +} + +static bool +_voucher_atm_update_used_mailbox(_voucher_atm_t vatm) +{ + // Update kernel mailbox with smallest in-use subaid for this atm_id + // assumes atm_activities_lock held + _voucher_activity_t act = TAILQ_FIRST(vatm_used_activities(vatm)); + atm_subaid32_t subaid = act ? VACTID_SUBID(act->va_id) : ATM_SUBAID32_MAX; + bool r = _voucher_atm_mailbox_set(vatm->vatm_mailbox_offset, subaid, false); + if (r) { + _dispatch_voucher_atm_debug("update min-used subaid 0x%x", vatm, + subaid); + } + return r; +} + +static void +_voucher_atm_activity_insert(_voucher_atm_t vatm, _voucher_activity_t act) +{ + _voucher_activity_lock_lock(vatm_activities_lock(vatm)); + if (!_TAILQ_IS_ENQUEUED(act, va_atm_list)) { + _voucher_activity_ordered_insert(act, vatm_activities(vatm), + va_atm_list); + _voucher_atm_update_mailbox(vatm); + } + if (!_TAILQ_IS_ENQUEUED(act, va_atm_used_list)) { + _voucher_activity_ordered_insert(act, vatm_used_activities(vatm), + va_atm_used_list); + _voucher_atm_update_used_mailbox(vatm); + } + _dispatch_voucher_activity_debug("atm insert", act); + _voucher_activity_lock_unlock(vatm_activities_lock(vatm)); +} + +static void +_voucher_atm_activity_remove(_voucher_activity_t act) +{ + _voucher_atm_t vatm = act->va_atm; + _voucher_activity_lock_lock(vatm_activities_lock(vatm)); + _dispatch_voucher_activity_debug("atm remove", act); + if (_TAILQ_IS_ENQUEUED(act, va_atm_used_list)) { + TAILQ_REMOVE(vatm_activities(vatm), act, va_atm_used_list); + _TAILQ_MARK_NOT_ENQUEUED(act, va_atm_used_list); + _voucher_atm_update_used_mailbox(vatm); + } + if (_TAILQ_IS_ENQUEUED(act, va_atm_list)) { + TAILQ_REMOVE(vatm_activities(vatm), act, va_atm_list); + _TAILQ_MARK_NOT_ENQUEUED(act, va_atm_list); + _voucher_atm_update_mailbox(vatm); + // Balance initial creation refcnt. Caller must hold additional + // reference to ensure this does not release vatm before the unlock, + // see _voucher_atm_activity_collect + _voucher_activity_atm_release(act); + } + _voucher_activity_lock_unlock(vatm_activities_lock(vatm)); +} + +static _voucher_activity_t +_voucher_atm_activity_mark_used(_voucher_activity_t act) +{ + _voucher_atm_t vatm = act->va_atm; + _voucher_activity_lock_lock(vatm_activities_lock(vatm)); + if (!_TAILQ_IS_ENQUEUED(act, va_atm_used_list)) { + _voucher_activity_ordered_insert(act, vatm_used_activities(vatm), + va_atm_used_list); + _voucher_atm_update_used_mailbox(vatm); + _dispatch_voucher_activity_debug("mark used", act); + } + _voucher_activity_lock_unlock(vatm_activities_lock(vatm)); + return act; +} + +static void +_voucher_atm_activity_mark_unused(_voucher_activity_t act) +{ + bool atm_collect = false, updated = false; + _voucher_atm_t vatm = act->va_atm; + _voucher_activity_lock_lock(vatm_activities_lock(vatm)); + if (_TAILQ_IS_ENQUEUED(act, va_atm_used_list)) { + _dispatch_voucher_activity_debug("mark unused", act); + TAILQ_REMOVE(&vatm->vatm_used_activities, act, va_atm_used_list); + _TAILQ_MARK_NOT_ENQUEUED(act, va_atm_used_list); + atm_collect = true; + _voucher_atm_retain(vatm); + updated = _voucher_atm_update_used_mailbox(vatm); + } + _voucher_activity_lock_unlock(vatm_activities_lock(vatm)); + if (atm_collect) { + _voucher_atm_release(vatm); + _voucher_atm_collect_if_needed(updated); + } +} + +static void +_voucher_atm_activity_collect(_voucher_atm_t vatm, atm_subaid32_t min_subaid) +{ + _dispatch_voucher_atm_debug("collect min subaid 0x%x", vatm, min_subaid); + voucher_activity_id_t min_va_id = VATM_ACTID(vatm, min_subaid); + _voucher_activity_t act; + do { + _voucher_activity_lock_lock(vatm_activities_lock(vatm)); + TAILQ_FOREACH(act, vatm_activities(vatm), va_atm_list) { + if (act->va_id >= min_va_id) { + act = NULL; + break; + } + if (!_TAILQ_IS_ENQUEUED(act, va_atm_used_list)) { + _voucher_activity_atm_retain(act); + break; + } + } + _voucher_activity_lock_unlock(vatm_activities_lock(vatm)); + if (act) { + _voucher_activity_collect(act); + _voucher_activity_atm_release(act); + } + } while (act); +} + +DISPATCH_NOINLINE +static void +_voucher_atm_collect(void) +{ + _voucher_atm_t vatms[_voucher_atm_mailboxes], vatm; + atm_aid_t aids[_voucher_atm_mailboxes]; + mach_atm_subaid_t subaids[_voucher_atm_mailboxes]; + uint32_t i, a = 0, s; + + _voucher_activity_lock_lock(vam_atms_lock()); + for (i = 0; i < _voucher_activity_hash_size; i++) { + TAILQ_FOREACH(vatm, vam_atms(i), vatm_list){ + if (vatm == _voucher_activity_heap->vam_default_activity_atm || + vatm->vatm_mailbox_offset == MAILBOX_OFFSET_UNSET) continue; + _dispatch_voucher_atm_debug("find min subaid", vatm); + vatms[a] = _voucher_atm_retain(vatm); + aids[a] = vatm->vatm_id; + if (++a == _voucher_atm_mailboxes) goto out; + } + } +out: + _voucher_activity_lock_unlock(vam_atms_lock()); + if (!a) return; + kern_return_t kr; + mach_voucher_t kv = vatms[0]->vatm_kvoucher; + mach_voucher_attr_content_t kvc_in = (mach_voucher_attr_content_t)&aids; + mach_voucher_attr_content_size_t kvc_in_size = sizeof(atm_aid_t) * a; + mach_voucher_attr_content_t kvc_out = (mach_voucher_attr_content_t)&subaids; + mach_voucher_attr_content_size_t kvc_out_size = sizeof(mach_atm_subaid_t)*a; + kr = mach_voucher_attr_command(kv, MACH_VOUCHER_ATTR_KEY_ATM, + ATM_FIND_MIN_SUB_AID, kvc_in, kvc_in_size, kvc_out, &kvc_out_size); + DISPATCH_VERIFY_MIG(kr); + (void)dispatch_assume_zero(kr); + s = kvc_out_size / sizeof(mach_atm_subaid_t); +#if DISPATCH_DEBUG && DISPATCH_VOUCHER_ACTIVITY_DEBUG + _dispatch_debug("found min subaids (%u out of %u)", s, a); +#endif + for (i = 0; i < a; i++) { + if (i < s) _voucher_atm_activity_collect(vatms[i], + (atm_subaid32_t)subaids[i]); + _voucher_atm_release(vatms[i]); + } +} + +static inline void +_voucher_atm_collect_if_needed(bool updated) +{ + long level; + if (updated) { + level = dispatch_atomic_add(&_voucher_atm_collect_level, 2ul, relaxed); + } else { + level = _voucher_atm_collect_level; + if (!level) return; + } + if (level & 1 || level <= _voucher_atm_collect_threshold) return; + if (!dispatch_atomic_cmpxchg(&_voucher_atm_collect_level, level, level + 1, + acquire)) return; +#if DISPATCH_DEBUG && DISPATCH_VOUCHER_ACTIVITY_DEBUG + _dispatch_debug("atm collect: reached level %ld", level/2); +#endif + if (slowpath(level < 0)) { + DISPATCH_CRASH("ATM collection level corruption"); + } + _voucher_atm_collect(); + dispatch_atomic_sub(&_voucher_atm_collect_level, level + 1, release); +} + +DISPATCH_NOINLINE +static void +_voucher_atm_fault(mach_voucher_attr_command_t kvc_cmd) +{ + _voucher_activity_t act = _voucher_activity_get(_voucher_get()); + mach_voucher_t kv = _voucher_activity_get_atm_mach_voucher(act); + if (!kv) return; + + kern_return_t kr; + mach_atm_subaid_t subaid = VACTID_SUBID(act->va_id); + mach_voucher_attr_content_t kvc_in = (mach_voucher_attr_content_t)&subaid; + mach_voucher_attr_content_size_t kvc_in_size = sizeof(mach_atm_subaid_t); + mach_voucher_attr_content_t kvc_out = (mach_voucher_attr_content_t)&subaid; + mach_voucher_attr_content_size_t kvc_out_size = sizeof(mach_atm_subaid_t); + kr = mach_voucher_attr_command(kv, MACH_VOUCHER_ATTR_KEY_ATM, + kvc_cmd, kvc_in, kvc_in_size, kvc_out, &kvc_out_size); + DISPATCH_VERIFY_MIG(kr); + (void)dispatch_assume_zero(kr); +} + +static atm_aid_t +_voucher_mach_voucher_get_atm_id(mach_voucher_t kv) +{ + kern_return_t kr; + atm_aid_t atm_id = 0; + mach_voucher_attr_content_t kvc = (mach_voucher_attr_content_t)&atm_id; + mach_voucher_attr_content_size_t kvc_size = sizeof(atm_id); + kr = mach_voucher_extract_attr_content(kv, MACH_VOUCHER_ATTR_KEY_ATM, kvc, + &kvc_size); + DISPATCH_VERIFY_MIG(kr); + (void)dispatch_assume_zero(kr); + return atm_id; +} + +static mach_voucher_t +_voucher_atm_mach_voucher_create(atm_aid_t *atm_id_ptr) +{ + kern_return_t kr; + mach_voucher_t kv; + static const mach_voucher_attr_recipe_data_t atm_create_recipe = { + .key = MACH_VOUCHER_ATTR_KEY_ATM, + .command = MACH_VOUCHER_ATTR_ATM_CREATE, + }; + kr = _voucher_create_mach_voucher(&atm_create_recipe, + sizeof(atm_create_recipe), &kv); + if (dispatch_assume_zero(kr)) { + DISPATCH_CLIENT_CRASH("Could not create ATM mach voucher"); + } + atm_aid_t atm_id = _voucher_mach_voucher_get_atm_id(kv); + if (!dispatch_assume(atm_id)) { + DISPATCH_CLIENT_CRASH("Could not extract ATM ID"); + } + _dispatch_kvoucher_debug("atm create <%lld>", kv, atm_id); + *atm_id_ptr = atm_id; + return kv; +} + +static void +_voucher_atm_mailbox_mach_voucher_register(_voucher_atm_t vatm, + mach_voucher_t kv) +{ + _dispatch_voucher_atm_debug("mailbox register %lld with kvoucher[0x%08x]", + vatm, vatm->vatm_mailbox_offset, kv); + kern_return_t kr; + mach_voucher_t akv; + atm_mailbox_offset_t offset = vatm->vatm_mailbox_offset; + mach_voucher_attr_recipe_t vr; + size_t vr_size; + static const mach_voucher_attr_recipe_data_t atm_register_recipe = { + .key = MACH_VOUCHER_ATTR_KEY_ATM, + .command = MACH_VOUCHER_ATTR_ATM_REGISTER, + .content_size = sizeof(offset), + }; + vr_size = sizeof(atm_register_recipe) + atm_register_recipe.content_size; + vr = alloca(vr_size); + *vr = atm_register_recipe; + vr->previous_voucher = kv; + memcpy(&vr->content, &offset, sizeof(offset)); + kr = _voucher_create_mach_voucher(vr, vr_size, &akv); + if (dispatch_assume_zero(kr)) { + DISPATCH_CLIENT_CRASH("Could not register ATM ID"); + } + if (!vatm->vatm_kvoucher) { + vatm->vatm_kvoucher = akv; + } else { +#if !RDAR_17510224 + if (akv != vatm->vatm_kvoucher) { + DISPATCH_CRASH("Unexpected mach voucher returned by ATM ID " + "registration"); + } + _voucher_dealloc_mach_voucher(akv); +#else + DISPATCH_CRASH("Registered invalid ATM object"); +#endif + } + _dispatch_voucher_atm_debug("mailbox registered %lld", vatm, + vatm->vatm_mailbox_offset); +} + +static void +_voucher_atm_mailbox_register(_voucher_atm_t vatm) +{ + mach_voucher_t kv = vatm->vatm_kvoucher; + if (!kv) return; +#if !RDAR_17510224 + _voucher_atm_mailbox_mach_voucher_register(vatm, kv); +#else // RDAR_17510224 + _dispatch_voucher_atm_debug("mailbox register %lld", vatm, + vatm->vatm_mailbox_offset); + kern_return_t kr; + atm_mailbox_offset_t offset = vatm->vatm_mailbox_offset; + mach_voucher_attr_content_t kvc_in = (mach_voucher_attr_content_t)&offset; + mach_voucher_attr_content_size_t kvc_in_size = sizeof(offset); + mach_voucher_attr_content_t kvc_out = NULL; + mach_voucher_attr_content_size_t kvc_out_size = 0; + kr = mach_voucher_attr_command(kv, MACH_VOUCHER_ATTR_KEY_ATM, + ATM_ACTION_REGISTER, kvc_in, kvc_in_size, kvc_out, + &kvc_out_size); + DISPATCH_VERIFY_MIG(kr); + if (dispatch_assume_zero(kr)) { + DISPATCH_CLIENT_CRASH("Could not register ATM ID"); + } + _dispatch_voucher_atm_debug("mailbox registered %lld", vatm, + vatm->vatm_mailbox_offset); +#endif // RDAR_17510224 +} + +static bool +_voucher_atm_mailbox_unregister(_voucher_atm_t vatm) +{ + if (vatm->vatm_mailbox_offset == MAILBOX_OFFSET_UNSET) return false; + _dispatch_voucher_atm_debug("mailbox unregister %lld", vatm, + vatm->vatm_mailbox_offset); + mach_voucher_t kv = vatm->vatm_kvoucher; + dispatch_assert(kv); + kern_return_t kr; + atm_mailbox_offset_t offset = vatm->vatm_mailbox_offset; + mach_voucher_attr_content_t kvc_in = (mach_voucher_attr_content_t)&offset; + mach_voucher_attr_content_size_t kvc_in_size = sizeof(offset); + mach_voucher_attr_content_t kvc_out = NULL; + mach_voucher_attr_content_size_t kvc_out_size = 0; + kr = mach_voucher_attr_command(kv, MACH_VOUCHER_ATTR_KEY_ATM, + ATM_ACTION_UNREGISTER, kvc_in, kvc_in_size, kvc_out, &kvc_out_size); + DISPATCH_VERIFY_MIG(kr); + if (kr && kr != KERN_INVALID_VALUE) { + (void)dispatch_assume_zero(kr); + DISPATCH_CLIENT_CRASH("Could not unregister ATM ID"); + } + _dispatch_voucher_atm_debug("mailbox unregistered %lld", vatm, + vatm->vatm_mailbox_offset); + return true; +} + +static _voucher_atm_t +_voucher_atm_create(mach_voucher_t kv, atm_aid_t atm_id) +{ + atm_mailbox_offset_t mailbox_offset = _voucher_atm_mailbox_alloc(); + if (kv && mailbox_offset == MAILBOX_OFFSET_UNSET) return NULL; + _voucher_atm_t vatm = _dispatch_calloc(1ul, sizeof(struct _voucher_atm_s)); + if (!kv) { + kv = _voucher_atm_mach_voucher_create(&atm_id); + if (mailbox_offset == MAILBOX_OFFSET_UNSET) { + _voucher_dealloc_mach_voucher(kv); + } else { + vatm->vatm_kvoucher = kv; + } + kv = MACH_VOUCHER_NULL; + } + vatm->vatm_id = atm_id; + vatm->vatm_mailbox_offset = mailbox_offset; + _voucher_activity_lock_init(vatm_activities_lock(vatm)); + TAILQ_INIT(&vatm->vatm_activities); + TAILQ_INIT(&vatm->vatm_used_activities); + _voucher_atm_mailbox_set(mailbox_offset, 0, true); + _voucher_atm_mailbox_set(mailbox_offset, ATM_SUBAID32_MAX, false); + _voucher_atm_t vatmx = _voucher_atm_try_insert(vatm); + if (vatmx) { + _voucher_atm_dispose(vatm, false); + vatm = vatmx; + } else if (kv) { + _voucher_atm_mailbox_mach_voucher_register(vatm, kv); + } else { + _voucher_atm_mailbox_register(vatm); + } + _dispatch_voucher_atm_debug("create with kvoucher[0x%08x]", vatm, kv); + return vatm; +} + +static void +_voucher_atm_dispose(_voucher_atm_t vatm, bool unregister) +{ + _dispatch_voucher_atm_debug("dispose", vatm); + dispatch_assert(TAILQ_EMPTY(&vatm->vatm_activities)); + dispatch_assert(TAILQ_EMPTY(&vatm->vatm_used_activities)); + if (slowpath(_TAILQ_IS_ENQUEUED(vatm, vatm_list))) { + _dispatch_voucher_atm_debug("corruption", vatm); + DISPATCH_CRASH("ATM corruption"); + } + vatm->vatm_list.tqe_next = DISPATCH_OBJECT_LISTLESS; + bool free_mailbox = (vatm->vatm_mailbox_offset != MAILBOX_OFFSET_UNSET); + if (vatm->vatm_kvoucher) { + if (unregister) free_mailbox = _voucher_atm_mailbox_unregister(vatm); + _voucher_dealloc_mach_voucher(vatm->vatm_kvoucher); + vatm->vatm_kvoucher = MACH_VOUCHER_NULL; + } + if (free_mailbox) { + _voucher_atm_mailbox_free(vatm->vatm_mailbox_offset); + vatm->vatm_mailbox_offset = MAILBOX_OFFSET_UNSET; + } + free(vatm); +} + +static inline mach_voucher_t +_voucher_activity_get_atm_mach_voucher(_voucher_activity_t act) +{ + mach_voucher_t kv; + kv = act && act->va_atm ? act->va_atm->vatm_kvoucher : MACH_VOUCHER_NULL; + return kv; +} + +DISPATCH_NOINLINE +static _voucher_atm_t +_voucher_atm_base_copy_and_activity_id_make(voucher_activity_id_t *va_id_ptr) +{ + _voucher_atm_subid_t subid; + _voucher_atm_t vatm, vatm_old = NULL, vatm_new = NULL; + if (_voucher_activity_heap->vam_base_atm_subid_max == 1) { + vatm = _voucher_atm_create(0, 0); + subid = 1; + goto out; + } + _voucher_activity_lock_lock(vam_base_atm_lock()); + vatm = _voucher_activity_heap->vam_base_atm; +retry: + _voucher_atm_retain(vatm); + subid = _voucher_activity_heap->vam_base_atm_subid; + if (subid++ >= _voucher_activity_heap->vam_base_atm_subid_max) { + _voucher_activity_lock_unlock(vam_base_atm_lock()); + if (!vatm_new) vatm_new = _voucher_atm_create(0, 0); + _voucher_activity_lock_lock(vam_base_atm_lock()); + _voucher_atm_release(vatm); + vatm_old = vatm; + vatm = _voucher_activity_heap->vam_base_atm; + if (vatm != vatm_old) { + vatm_old = NULL; + goto retry; + } + _voucher_activity_heap->vam_base_atm = vatm = vatm_new; + _voucher_activity_heap->vam_base_atm_subid = subid = 1; + vatm_new = NULL; + _voucher_atm_retain(vatm); + _dispatch_voucher_atm_debug("base replace", vatm); + } else { + _voucher_activity_heap->vam_base_atm_subid = subid; + _dispatch_voucher_atm_debug("base copy", vatm); + } + _voucher_activity_lock_unlock(vam_base_atm_lock()); + if (vatm_old) _voucher_atm_release(vatm_old); + if (vatm_new) _voucher_atm_release(vatm_new); +out: + *va_id_ptr = VATM_ACTID(vatm, subid); + return vatm; +} + +static voucher_activity_id_t +_voucher_atm_nested_atm_id_make(void) +{ + atm_aid_t atm_id; + mach_voucher_t kv = _voucher_atm_mach_voucher_create(&atm_id); + _voucher_dealloc_mach_voucher(kv); // just need the unique ID + return VATMID2ACTID(atm_id); +} + +static voucher_activity_id_t +_voucher_atm_nested_activity_id_make(void) +{ + voucher_activity_id_t va_id, va_id_old, va_id_new; + _voucher_atm_subid_t subid; + _voucher_activity_lock_lock(vam_nested_atm_lock()); + va_id = _voucher_activity_heap->vam_nested_atm_id; +retry: + subid = _voucher_activity_heap->vam_nested_atm_subid; + if (subid++ >= VATM_SUBID_MAX) { + _voucher_activity_lock_unlock(vam_nested_atm_lock()); + va_id_new = _voucher_atm_nested_atm_id_make(); + va_id_old = va_id; + _voucher_activity_lock_lock(vam_nested_atm_lock()); + va_id = _voucher_activity_heap->vam_nested_atm_id; + if (va_id != va_id_old) goto retry; + _voucher_activity_heap->vam_nested_atm_id = va_id = va_id_new; + subid = 1; + } + _voucher_activity_heap->vam_nested_atm_subid = subid; + _voucher_activity_lock_unlock(vam_nested_atm_lock()); + return va_id + subid; +} + +#pragma mark - +#pragma mark voucher_activity_id_t + +voucher_activity_id_t +voucher_activity_start_with_location(voucher_activity_trace_id_t trace_id, + uint64_t location, voucher_activity_flag_t flags) +{ + dispatch_once_f(&_voucher_activity_heap_pred, NULL, + _voucher_activity_heap_init); + if (!_voucher_activity_trace_id_enabled(trace_id)) return 0; + voucher_activity_id_t va_id = 0, va_base_id = 0; + _voucher_atm_t vatm = NULL; + _voucher_activity_t act = NULL; + _voucher_activity_tracepoint_t vat = NULL; + unsigned int activities = 1, oactivities = 0; + voucher_t ov = _voucher_get(); + if (!(flags & voucher_activity_flag_force) && ov && ov->v_activities) { + oactivities = ov->v_activities; + activities += oactivities; + if (activities > _voucher_max_activities) { + va_id = _voucher_atm_nested_activity_id_make(); + goto out; + } + } + if (activities == 1) { + vatm = _voucher_atm_base_copy_and_activity_id_make(&va_id); + if (vatm->vatm_kvoucher) { + // consumes vatm reference: + act = _voucher_activity_create_with_atm(vatm, va_id, trace_id, + location, NULL); + vat = (_voucher_activity_tracepoint_t)act; + } else { + _voucher_atm_release(vatm); + } + if (!act) { + activities++; + // default to _voucher_activity_default base activity + va_base_id = _voucher_activity_default->va_id; + } + } + pthread_priority_t priority = _voucher_get_priority(ov); + mach_voucher_attr_recipe_size_t extra = ov ? _voucher_extra_size(ov) : 0; + voucher_t v = _voucher_alloc(activities, priority, extra); + if (extra) { + memcpy(_voucher_extra_recipes(v), _voucher_extra_recipes(ov), extra); + } + if (ov && ov->v_kvoucher) { + voucher_t kvb = ov->v_kvbase ? ov->v_kvbase : ov; + v->v_kvbase = _voucher_retain(kvb); + v->v_kvoucher = kvb->v_kvoucher; + } + voucher_activity_id_t *activity_ids = _voucher_activity_ids(v); + if (oactivities) { + memcpy(activity_ids, _voucher_activity_ids(ov), + oactivities * sizeof(voucher_activity_id_t)); + } + if (!va_id) { + va_id = _voucher_atm_nested_activity_id_make(); + if (ov && ov->v_activity) { + act = _voucher_activity_retain(ov->v_activity); + } + } + if (va_base_id) activity_ids[0] = va_base_id; + activity_ids[activities-1] = va_id; + v->v_activity = act; + _voucher_swap(ov, v); + if (vat) return va_id; // new _voucher_activity_s contains trace info +out: + vat = _voucher_activity_trace_with_id(trace_id); + if (vat) { + vat->vat_flags |= _voucher_activity_trace_flag_activity | + _voucher_activity_trace_flag_start; + vat->vat_data[0] = va_id; + } + return va_id; +} + +voucher_activity_id_t +voucher_activity_start(voucher_activity_trace_id_t trace_id, + voucher_activity_flag_t flags) +{ + return voucher_activity_start_with_location(trace_id, 0, flags); +} + +void +voucher_activity_end(voucher_activity_id_t va_id) +{ + if (!va_id) return; + _voucher_activity_tracepoint_t vat; + vat = _voucher_activity_trace_with_id(_voucher_activity_trace_id_release); + if (vat) { + vat->vat_flags |= _voucher_activity_trace_flag_activity | + _voucher_activity_trace_flag_end; + vat->vat_data[0] = va_id; + } + voucher_t v = _voucher_get(); + if (!v) return; + unsigned int activities = v->v_activities, act_idx = activities; + voucher_activity_id_t *activity_ids = _voucher_activity_ids(v); + while (act_idx) { + if (activity_ids[act_idx-1] == va_id) break; + act_idx--; + } + if (!act_idx) return; // activity_id not found + pthread_priority_t priority = _voucher_get_priority(v); + mach_voucher_attr_recipe_size_t extra = _voucher_extra_size(v); + voucher_t nv = NULL; + if (act_idx > 1 || activities == 1) --activities; + if (priority || activities || extra || v->v_kvoucher) { + nv = _voucher_alloc(activities, priority, extra); + if (extra) { + memcpy(_voucher_extra_recipes(nv), _voucher_extra_recipes(v),extra); + } + } + if (v->v_kvoucher) { + voucher_t kvb = v->v_kvbase ? v->v_kvbase : v; + nv->v_kvbase = _voucher_retain(kvb); + nv->v_kvoucher = kvb->v_kvoucher; + } + bool atm_collect = !activities; + if (activities) { + voucher_activity_id_t *new_activity_ids = _voucher_activity_ids(nv); + if (act_idx == 1 && _voucher_activity_default) { + atm_collect = true; + // default to _voucher_activity_default base activity + new_activity_ids[0] = _voucher_activity_default->va_id; + memcpy(&new_activity_ids[1], &activity_ids[1], + (activities - 1) * sizeof(voucher_activity_id_t)); + } else { + if (v->v_activity) { + nv->v_activity = _voucher_activity_retain(v->v_activity); + } + memcpy(new_activity_ids, activity_ids, + --act_idx * sizeof(voucher_activity_id_t)); + if (act_idx < activities) { + memcpy(&new_activity_ids[act_idx], &activity_ids[act_idx+1], + (activities - act_idx) * sizeof(voucher_activity_id_t)); + } + } + } + _voucher_swap(v, nv); +} + +unsigned int +voucher_get_activities(voucher_activity_id_t *entries, unsigned int *count) +{ + voucher_t v = _voucher_get(); + if (!v || !count) return 0; + unsigned int activities = v->v_activities; + if (*count < activities) activities = *count; + *count = v->v_activities; + voucher_activity_id_t *activity_ids = _voucher_activity_ids(v); + if (activities && entries) { + memcpy(entries, activity_ids, activities * + sizeof(voucher_activity_id_t)); + } + return activities; +} + +uint8_t +voucher_activity_get_namespace(void) +{ + voucher_t v = _voucher_get(); + if (!v || !v->v_activity) return 0; + return v->v_activity->va_namespace; +} + +DISPATCH_NOINLINE +_voucher_activity_tracepoint_t +_voucher_activity_tracepoint_get_slow(unsigned int slots) +{ + _voucher_activity_t act; + _voucher_activity_buffer_header_t vab; + _voucher_activity_tracepoint_t vat = NULL; + voucher_t v = _voucher_get(); + if (v && v->v_activity) { + act = v->v_activity; + } else { + dispatch_once_f(&_voucher_activity_heap_pred, NULL, + _voucher_activity_heap_init); + if (_voucher_activity_disabled()) return NULL; + act = _voucher_activity_default; + } + vab = act->va_current_buffer; + if (vab && vab->vabh_next_tracepoint_idx <= + _voucher_activity_tracepoints_per_buffer) { + goto retry; // another slowpath raced us + } + do { + vab = _voucher_activity_buffer_alloc(act, vab); + if (!vab) break; +retry: + vat = _voucher_activity_buffer_tracepoint_get(vab, slots); + } while (!vat); + return vat; +} + +static inline void +_voucher_activity_trace_fault(voucher_activity_trace_id_t trace_id) +{ + if (!slowpath(_voucher_activity_trace_id_is_subtype(trace_id, error))) { + return; + } + mach_voucher_attr_command_t atm_cmd = ATM_ACTION_COLLECT; + if (_voucher_activity_trace_id_is_subtype(trace_id, fault)) { + atm_cmd = ATM_ACTION_LOGFAIL; + } + return _voucher_atm_fault(atm_cmd); +} + +uint64_t +voucher_activity_trace(voucher_activity_trace_id_t trace_id, uint64_t location, + void *buffer, size_t length) +{ + if (!_voucher_activity_trace_id_enabled(trace_id)) return 0; + _voucher_activity_tracepoint_t vat; + const unsigned int slots = length <= sizeof(vat->vat_data) ? 1 : 2; + vat = _voucher_activity_tracepoint_get(slots); + if (!vat) vat = _voucher_activity_tracepoint_get_slow(slots); + if (!vat) return 0; + uint64_t timestamp = _voucher_activity_tracepoint_init_with_id(vat, + trace_id, location); + void *tbuf = vat->vat_data; + size_t tlen = sizeof(vat->vat_data); + if (length < tlen) { + memcpy(tbuf, buffer, length); + } else { + memcpy(tbuf, buffer, tlen); + } + if (length > tlen) { + vat->vat_flags |= _voucher_activity_trace_flag_wide_first; + buffer += tlen; + length -= tlen; + (++vat)->vat_flags = _voucher_activity_trace_flag_tracepoint | + _voucher_activity_trace_flag_wide_second; + vat->vat_type = 0; vat->vat_namespace = 0; + tbuf = (void*)vat + offsetof(typeof(*vat), vat_code); + tlen = sizeof(*vat) - offsetof(typeof(*vat), vat_code); + if (length < tlen) { + memcpy(tbuf, buffer, length); + } else { + memcpy(tbuf, buffer, tlen); + } + } + _voucher_activity_trace_fault(trace_id); + return timestamp; +} + +uint64_t +voucher_activity_trace_args(voucher_activity_trace_id_t trace_id, + uint64_t location, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, + uintptr_t arg4) +{ + if (!_voucher_activity_trace_id_enabled(trace_id)) return 0; + _voucher_activity_tracepoint_t vat; + vat = _voucher_activity_tracepoint_get(1); + if (!vat) vat = _voucher_activity_tracepoint_get_slow(1); + if (!vat) return 0; + uint64_t timestamp = _voucher_activity_tracepoint_init_with_id(vat, + trace_id, location); + vat->vat_flags |= _voucher_activity_trace_flag_tracepoint_args; + vat->vat_data[0] = arg1; + vat->vat_data[1] = arg2; + vat->vat_data[2] = arg3; + vat->vat_data[3] = arg4; + _voucher_activity_trace_fault(trace_id); + return timestamp; +} + +#pragma mark - +#pragma mark _voucher_debug + +size_t +_voucher_debug(voucher_t v, char* buf, size_t bufsiz) +{ + size_t offset = 0; + #define bufprintf(...) \ + offset += dsnprintf(&buf[offset], bufsiz - offset, ##__VA_ARGS__) + bufprintf("voucher[%p] = { xrefcnt = 0x%x, refcnt = 0x%x, ", v, + v->os_obj_xref_cnt + 1, v->os_obj_ref_cnt + 1); + + if (v->v_kvbase) { + bufprintf("base voucher %p, ", v->v_kvbase); + } + if (v->v_kvoucher) { + bufprintf("kvoucher%s 0x%x, ", v->v_kvoucher == v->v_ipc_kvoucher ? + " & ipc kvoucher" : "", v->v_kvoucher); + } + if (v->v_ipc_kvoucher && v->v_ipc_kvoucher != v->v_kvoucher) { + bufprintf("ipc kvoucher 0x%x, ", v->v_ipc_kvoucher); + } + if (v->v_has_priority) { + bufprintf("QOS 0x%x, ", *_voucher_priority(v)); + } + if (v->v_activities) { + voucher_activity_id_t *activity_ids = _voucher_activity_ids(v); + bufprintf("activity IDs = { "); + unsigned int i; + for (i = 0; i < v->v_activities; i++) { + bufprintf("0x%llx, ", *activity_ids++); + } + bufprintf("}, "); + } + if (v->v_activity) { + _voucher_activity_t va = v->v_activity; + _voucher_atm_t vatm = va->va_atm; + bufprintf("activity[%p] = { ID 0x%llx, use %d, atm[%p] = { " + "AID 0x%llx, ref %d, kvoucher 0x%x } }, ", va, va->va_id, + va->va_use_count + 1, va->va_atm, vatm->vatm_id, + vatm->vatm_refcnt + 1, vatm->vatm_kvoucher); + } + bufprintf("}"); + return offset; +} + +#else // VOUCHER_USE_MACH_VOUCHER + +#pragma mark - +#pragma mark Simulator / vouchers disabled + +#if VOUCHER_ENABLE_RECIPE_OBJECTS +voucher_t +voucher_create(voucher_recipe_t recipe) +{ + (void)recipe; + return NULL; +} +#endif + +voucher_t +voucher_adopt(voucher_t voucher) +{ + return voucher; +} + +voucher_t +voucher_copy(void) +{ + return NULL; +} + +voucher_t +voucher_copy_without_importance(void) +{ + return NULL; +} + +void +voucher_replace_default_voucher(void) +{ +} + +void +voucher_decrement_importance_count4CF(voucher_t v) +{ + (void)v; +} + +void +_voucher_thread_cleanup(void *voucher) +{ + (void)voucher; +} + +void +_voucher_dealloc_mach_voucher(mach_voucher_t kv) +{ + (void)kv; +} + +mach_voucher_t +_voucher_create_mach_voucher_with_priority(voucher_t voucher, + pthread_priority_t priority) +{ + (void)voucher; (void)priority; + return MACH_VOUCHER_NULL; +} + +voucher_t +_voucher_create_with_priority_and_mach_voucher(voucher_t voucher, + pthread_priority_t priority, mach_voucher_t kv) +{ + (void)voucher; (void)priority; (void)kv; + return NULL; +} + +voucher_t +voucher_create_with_mach_msg(mach_msg_header_t *msg) +{ + (void)msg; + return NULL; +} + +#if VOUCHER_ENABLE_GET_MACH_VOUCHER +mach_voucher_t +voucher_get_mach_voucher(voucher_t voucher) +{ + (void)voucher; + return 0; +} +#endif + +void +_voucher_xref_dispose(voucher_t voucher) +{ + (void)voucher; +} + +void +_voucher_dispose(voucher_t voucher) +{ + (void)voucher; +} + +void +_voucher_atfork_child(void) +{ +} + +void +_voucher_init(void) +{ +} + +void* +voucher_activity_get_metadata_buffer(size_t *length) +{ + *length = 0; + return NULL; +} + +void +_voucher_activity_heap_pressure_normal(void) +{ +} + +void +_voucher_activity_heap_pressure_warn(void) +{ +} + +voucher_activity_id_t +voucher_activity_start_with_location(voucher_activity_trace_id_t trace_id, + uint64_t location, voucher_activity_flag_t flags) +{ + (void)trace_id; (void)location; (void)flags; + return 0; +} + +voucher_activity_id_t +voucher_activity_start(voucher_activity_trace_id_t trace_id, + voucher_activity_flag_t flags) +{ + (void)trace_id; (void)flags; + return 0; +} + +void +voucher_activity_end(voucher_activity_id_t activity_id) +{ + (void)activity_id; +} + +unsigned int +voucher_get_activities(voucher_activity_id_t *entries, unsigned int *count) +{ + (void)entries; (void)count; + return 0; +} + +uint8_t +voucher_activity_get_namespace(void) +{ + return 0; +} + +uint64_t +voucher_activity_trace(voucher_activity_trace_id_t trace_id, uint64_t location, + void *buffer, size_t length) +{ + (void)trace_id; (void)location; (void)buffer; (void)length; + return 0; +} + +uint64_t +voucher_activity_trace_args(voucher_activity_trace_id_t trace_id, + uint64_t location, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, + uintptr_t arg4) +{ + (void)trace_id; (void)location; + (void)arg1; (void)arg2; (void)arg3; (void)arg4; + return 0; +} + +size_t +_voucher_debug(voucher_t v, char* buf, size_t bufsiz) +{ + (void)v; (void)buf; (void)bufsiz; + return 0; +} + +#endif // VOUCHER_USE_MACH_VOUCHER diff --git a/src/voucher_internal.h b/src/voucher_internal.h new file mode 100644 index 000000000..6fa1538cf --- /dev/null +++ b/src/voucher_internal.h @@ -0,0 +1,929 @@ +/* + * Copyright (c) 2013 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_VOUCHER_INTERNAL__ +#define __DISPATCH_VOUCHER_INTERNAL__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +#pragma mark - +#pragma mark voucher_recipe_t (disabled) + +#if VOUCHER_ENABLE_RECIPE_OBJECTS +/*! + * @group Voucher Creation SPI + * SPI intended for clients that need to create vouchers. + */ + +#if OS_OBJECT_USE_OBJC +OS_OBJECT_DECL(voucher_recipe); +#else +typedef struct voucher_recipe_s *voucher_recipe_t; +#endif + +/*! + * @function voucher_create + * + * @abstract + * Creates a new voucher object from a recipe. + * + * @discussion + * Error handling TBD + * + * @result + * The newly created voucher object. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +OS_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW +voucher_t +voucher_create(voucher_recipe_t recipe); +#endif // VOUCHER_ENABLE_RECIPE_OBJECTS + +#if VOUCHER_ENABLE_GET_MACH_VOUCHER +/*! + * @function voucher_get_mach_voucher + * + * @abstract + * Returns the mach voucher port underlying the specified voucher object. + * + * @discussion + * The caller must either maintain a reference on the voucher object while the + * returned mach voucher port is in use to ensure it stays valid for the + * duration, or it must retain the mach voucher port with mach_port_mod_refs(). + * + * @param voucher + * The voucher object to query. + * + * @result + * A mach voucher port. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW +mach_voucher_t +voucher_get_mach_voucher(voucher_t voucher); +#endif // VOUCHER_ENABLE_GET_MACH_VOUCHER + +#pragma mark - +#pragma mark voucher_t + +#if TARGET_IPHONE_SIMULATOR && \ + IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 101000 +#undef VOUCHER_USE_MACH_VOUCHER +#define VOUCHER_USE_MACH_VOUCHER 0 +#endif +#ifndef VOUCHER_USE_MACH_VOUCHER +#if __has_include() +#define VOUCHER_USE_MACH_VOUCHER 1 +#endif +#endif + +#if VOUCHER_USE_MACH_VOUCHER +#undef DISPATCH_USE_IMPORTANCE_ASSERTION +#define DISPATCH_USE_IMPORTANCE_ASSERTION 0 +#else +#undef MACH_RCV_VOUCHER +#define MACH_RCV_VOUCHER 0 +#endif // VOUCHER_USE_MACH_VOUCHER + +void _voucher_init(void); +void _voucher_atfork_child(void); +void _voucher_activity_heap_pressure_warn(void); +void _voucher_activity_heap_pressure_normal(void); +void _voucher_xref_dispose(voucher_t voucher); +void _voucher_dispose(voucher_t voucher); +size_t _voucher_debug(voucher_t v, char* buf, size_t bufsiz); +void _voucher_thread_cleanup(void *voucher); +mach_voucher_t _voucher_get_mach_voucher(voucher_t voucher); +voucher_t _voucher_create_without_importance(voucher_t voucher); +mach_voucher_t _voucher_create_mach_voucher_with_priority(voucher_t voucher, + pthread_priority_t priority); +voucher_t _voucher_create_with_priority_and_mach_voucher(voucher_t voucher, + pthread_priority_t priority, mach_voucher_t kv); +void _voucher_dealloc_mach_voucher(mach_voucher_t kv); + +#if OS_OBJECT_USE_OBJC +_OS_OBJECT_DECL_SUBCLASS_INTERFACE(voucher, object) +#if VOUCHER_ENABLE_RECIPE_OBJECTS +_OS_OBJECT_DECL_SUBCLASS_INTERFACE(voucher_recipe, object) +#endif +#endif + +#define _TAILQ_IS_ENQUEUED(elm, field) \ + ((elm)->field.tqe_prev != NULL) +#define _TAILQ_MARK_NOT_ENQUEUED(elm, field) \ + do { (elm)->field.tqe_prev = NULL; } while (0) + +#define VOUCHER_NO_MACH_VOUCHER MACH_PORT_DEAD + +#if VOUCHER_USE_MACH_VOUCHER + +#if DISPATCH_DEBUG +#define DISPATCH_VOUCHER_DEBUG 1 +#define DISPATCH_VOUCHER_ACTIVITY_DEBUG 1 +#endif + +typedef struct voucher_s { + _OS_OBJECT_HEADER( + void *os_obj_isa, + os_obj_ref_cnt, + os_obj_xref_cnt); + TAILQ_ENTRY(voucher_s) v_list; + mach_voucher_t v_kvoucher, v_ipc_kvoucher; // if equal, only one reference + voucher_t v_kvbase; // if non-NULL, v_kvoucher is a borrowed reference + _voucher_activity_t v_activity; +#if VOUCHER_ENABLE_RECIPE_OBJECTS + size_t v_recipe_extra_offset; + mach_voucher_attr_recipe_size_t v_recipe_extra_size; +#endif + unsigned int v_has_priority:1; + unsigned int v_activities; + mach_voucher_attr_recipe_data_t v_recipes[]; +} voucher_s; + +#if VOUCHER_ENABLE_RECIPE_OBJECTS +typedef struct voucher_recipe_s { + _OS_OBJECT_HEADER( + const _os_object_class_s *os_obj_isa, + os_obj_ref_cnt, + os_obj_xref_cnt); + size_t vr_allocation_size; + mach_voucher_attr_recipe_size_t volatile vr_size; + mach_voucher_attr_recipe_t vr_data; +} voucher_recipe_s; +#endif + +#define _voucher_recipes_base(r) (r[0]) +#define _voucher_recipes_atm(r) (r[1]) +#define _voucher_recipes_bits(r) (r[2]) +#define _voucher_base_recipe(v) (_voucher_recipes_base((v)->v_recipes)) +#define _voucher_atm_recipe(v) (_voucher_recipes_atm((v)->v_recipes)) +#define _voucher_bits_recipe(v) (_voucher_recipes_bits((v)->v_recipes)) +#define _voucher_recipes_size() (3 * sizeof(mach_voucher_attr_recipe_data_t)) + +#if TARGET_OS_EMBEDDED +#define VL_HASH_SIZE 64u // must be a power of two +#else +#define VL_HASH_SIZE 256u // must be a power of two +#endif +#define VL_HASH(kv) (MACH_PORT_INDEX(kv) & (VL_HASH_SIZE - 1)) + +typedef uint32_t _voucher_magic_t; +const _voucher_magic_t _voucher_magic_v1 = 0x0190cefa; // little-endian FACE9001 +#define _voucher_recipes_magic(r) ((_voucher_magic_t*) \ + (_voucher_recipes_bits(r).content)) +#define _voucher_magic(v) _voucher_recipes_magic((v)->v_recipes) +typedef uint32_t _voucher_priority_t; +#define _voucher_recipes_priority(r) ((_voucher_priority_t*) \ + (_voucher_recipes_bits(r).content + sizeof(_voucher_magic_t))) +#define _voucher_priority(v) _voucher_recipes_priority((v)->v_recipes) +#define _voucher_activity_ids(v) ((voucher_activity_id_t*) \ + (_voucher_bits_recipe(v).content + sizeof(_voucher_magic_t) + \ + sizeof(_voucher_priority_t))) +#define _voucher_bits_size(activities) \ + (sizeof(_voucher_magic_t) + sizeof(_voucher_priority_t) + \ + (activities) * sizeof(voucher_activity_id_t)) + +#if VOUCHER_ENABLE_RECIPE_OBJECTS +#define _voucher_extra_size(v) ((v)->v_recipe_extra_size) +#define _voucher_extra_recipes(v) ((char*)(v) + (v)->v_recipe_extra_offset) +#else +#define _voucher_extra_size(v) 0 +#define _voucher_extra_recipes(v) NULL +#endif + +#if DISPATCH_DEBUG && DISPATCH_VOUCHER_DEBUG +#define _dispatch_voucher_debug(msg, v, ...) \ + _dispatch_debug("voucher[%p]: " msg, v, ##__VA_ARGS__) +#define _dispatch_kvoucher_debug(msg, kv, ...) \ + _dispatch_debug("kvoucher[0x%08x]: " msg, kv, ##__VA_ARGS__) +#define _dispatch_voucher_debug_machport(name) \ + dispatch_debug_machport((name), __func__) +#else +#define _dispatch_voucher_debug(msg, v, ...) +#define _dispatch_kvoucher_debug(msg, kv, ...) +#define _dispatch_voucher_debug_machport(name) ((void)(name)) +#endif + +#if !(USE_OBJC && __OBJC2__) + +DISPATCH_ALWAYS_INLINE +static inline voucher_t +_voucher_retain(voucher_t voucher) +{ +#if !DISPATCH_VOUCHER_OBJC_DEBUG + int xref_cnt = dispatch_atomic_inc2o(voucher, os_obj_xref_cnt, relaxed); + _dispatch_voucher_debug("retain -> %d", voucher, xref_cnt + 1); + if (slowpath(xref_cnt <= 0)) { + _dispatch_voucher_debug("resurrection", voucher); + DISPATCH_CRASH("Voucher resurrection"); + } +#else + os_retain(voucher); + _dispatch_voucher_debug("retain -> %d", voucher, + voucher->os_obj_xref_cnt + 1); +#endif // DISPATCH_DEBUG + return voucher; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_release(voucher_t voucher) +{ +#if !DISPATCH_VOUCHER_OBJC_DEBUG + int xref_cnt = dispatch_atomic_dec2o(voucher, os_obj_xref_cnt, relaxed); + _dispatch_voucher_debug("release -> %d", voucher, xref_cnt + 1); + if (fastpath(xref_cnt >= 0)) { + return; + } + if (slowpath(xref_cnt < -1)) { + _dispatch_voucher_debug("overrelease", voucher); + DISPATCH_CRASH("Voucher overrelease"); + } + return _os_object_xref_dispose((_os_object_t)voucher); +#else + _dispatch_voucher_debug("release -> %d", voucher, voucher->os_obj_xref_cnt); + return os_release(voucher); +#endif // DISPATCH_DEBUG +} + +DISPATCH_ALWAYS_INLINE +static inline voucher_t +_voucher_get(void) +{ + return _dispatch_thread_getspecific(dispatch_voucher_key); +} + +DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT +static inline voucher_t +_voucher_copy(void) +{ + voucher_t voucher = _voucher_get(); + if (voucher) _voucher_retain(voucher); + return voucher; +} + +DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT +static inline voucher_t +_voucher_copy_without_importance(void) +{ + voucher_t voucher = _voucher_get(); + if (voucher) voucher = _voucher_create_without_importance(voucher); + return voucher; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_mach_voucher_set(mach_voucher_t kv) +{ + if (kv == VOUCHER_NO_MACH_VOUCHER) return; + _dispatch_set_priority_and_mach_voucher(0, kv); +} + +DISPATCH_ALWAYS_INLINE +static inline mach_voucher_t +_voucher_swap_and_get_mach_voucher(voucher_t ov, voucher_t voucher) +{ + if (ov == voucher) return VOUCHER_NO_MACH_VOUCHER; + _dispatch_voucher_debug("swap from voucher[%p]", voucher, ov); + _dispatch_thread_setspecific(dispatch_voucher_key, voucher); + mach_voucher_t kv = voucher ? voucher->v_kvoucher : MACH_VOUCHER_NULL; + mach_voucher_t okv = ov ? ov->v_kvoucher : MACH_VOUCHER_NULL; + return (kv != okv) ? kv : VOUCHER_NO_MACH_VOUCHER; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_swap(voucher_t ov, voucher_t voucher) +{ + _voucher_mach_voucher_set(_voucher_swap_and_get_mach_voucher(ov, voucher)); + if (ov) _voucher_release(ov); +} + +DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT +static inline voucher_t +_voucher_adopt(voucher_t voucher) +{ + voucher_t ov = _voucher_get(); + _voucher_mach_voucher_set(_voucher_swap_and_get_mach_voucher(ov, voucher)); + return ov; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_replace(voucher_t voucher) +{ + voucher_t ov = _voucher_get(); + _voucher_swap(ov, voucher); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_clear(void) +{ + _voucher_replace(NULL); +} + +DISPATCH_ALWAYS_INLINE +static inline pthread_priority_t +_voucher_get_priority(voucher_t voucher) +{ + return voucher && voucher->v_has_priority ? + (pthread_priority_t)*_voucher_priority(voucher) : 0; +} + +void _voucher_task_mach_voucher_init(void* ctxt); +extern dispatch_once_t _voucher_task_mach_voucher_pred; +extern mach_voucher_t _voucher_task_mach_voucher; + +DISPATCH_ALWAYS_INLINE +static inline mach_voucher_t +_voucher_get_task_mach_voucher(void) +{ + dispatch_once_f(&_voucher_task_mach_voucher_pred, NULL, + _voucher_task_mach_voucher_init); + return _voucher_task_mach_voucher; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_voucher_mach_msg_set_mach_voucher(mach_msg_header_t *msg, mach_voucher_t kv, + bool move_send) +{ + if (MACH_MSGH_BITS_HAS_VOUCHER(msg->msgh_bits)) return false; + if (!kv) return false; + msg->msgh_voucher_port = kv; + msg->msgh_bits |= MACH_MSGH_BITS_SET_PORTS(0, 0, move_send ? + MACH_MSG_TYPE_MOVE_SEND : MACH_MSG_TYPE_COPY_SEND); + _dispatch_kvoucher_debug("msg[%p] set %s", kv, msg, move_send ? + "move-send" : "copy-send"); + _dispatch_voucher_debug_machport(kv); + return true; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_voucher_mach_msg_set(mach_msg_header_t *msg, voucher_t voucher) +{ + if (MACH_MSGH_BITS_HAS_VOUCHER(msg->msgh_bits)) return false; + mach_voucher_t kv; + if (voucher) { + kv = _voucher_get_mach_voucher(voucher); + } else { + kv = _voucher_get_task_mach_voucher(); + } + return _voucher_mach_msg_set_mach_voucher(msg, kv, false); +} + +DISPATCH_ALWAYS_INLINE +static inline mach_voucher_t +_voucher_mach_msg_get(mach_msg_header_t *msg) +{ + if (!MACH_MSGH_BITS_HAS_VOUCHER(msg->msgh_bits)) return MACH_VOUCHER_NULL; + mach_voucher_t kv = msg->msgh_voucher_port; + msg->msgh_voucher_port = MACH_VOUCHER_NULL; + msg->msgh_bits &= (mach_msg_bits_t)~MACH_MSGH_BITS_VOUCHER_MASK; + return kv; +} + +DISPATCH_ALWAYS_INLINE +static inline mach_voucher_t +_voucher_mach_msg_clear(mach_msg_header_t *msg, bool move_send) +{ + mach_msg_bits_t kvbits = MACH_MSGH_BITS_VOUCHER(msg->msgh_bits); + mach_voucher_t kv = msg->msgh_voucher_port, kvm = MACH_VOUCHER_NULL; + if ((kvbits == MACH_MSG_TYPE_COPY_SEND || + kvbits == MACH_MSG_TYPE_MOVE_SEND) && kv) { + _dispatch_kvoucher_debug("msg[%p] clear %s", kv, msg, move_send ? + "move-send" : "copy-send"); + _dispatch_voucher_debug_machport(kv); + if (kvbits == MACH_MSG_TYPE_MOVE_SEND) { + // return/drop received or pseudo-received + // voucher reference (e.g. due to send failure). + if (move_send) { + kvm = kv; + } else { + _voucher_dealloc_mach_voucher(kv); + } + } + msg->msgh_voucher_port = MACH_VOUCHER_NULL; + msg->msgh_bits &= (mach_msg_bits_t)~MACH_MSGH_BITS_VOUCHER_MASK; + } + return kvm; +} + +#pragma mark - +#pragma mark dispatch_continuation_t + voucher_t + +#if DISPATCH_USE_KDEBUG_TRACE +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_voucher_ktrace(int code, natural_t voucher, void *container) +{ + if (!voucher) return; + __kdebug_trace(APPSDBG_CODE(DBG_MACH_CHUD, (0xfac >> 2)) | DBG_FUNC_NONE, + code, (int)voucher, (int)(uintptr_t)container, +#ifdef __LP64__ + (int)((uintptr_t)container >> 32) +#else + 0 +#endif + ); +} +#define _dispatch_voucher_ktrace_dc_push(dc) \ + _dispatch_voucher_ktrace(0x1, (dc)->dc_voucher ? \ + (dc)->dc_voucher->v_kvoucher : MACH_VOUCHER_NULL, (dc)) +#define _dispatch_voucher_ktrace_dc_pop(dc) \ + _dispatch_voucher_ktrace(0x2, (dc)->dc_voucher ? \ + (dc)->dc_voucher->v_kvoucher : MACH_VOUCHER_NULL, (dc)) +#define _dispatch_voucher_ktrace_dmsg_push(dmsg) \ + _dispatch_voucher_ktrace(0x3, (dmsg)->dmsg_voucher ? \ + (dmsg)->dmsg_voucher->v_kvoucher : MACH_VOUCHER_NULL, (dmsg)) +#define _dispatch_voucher_ktrace_dmsg_pop(dmsg) \ + _dispatch_voucher_ktrace(0x4, (dmsg)->dmsg_voucher ? \ + (dmsg)->dmsg_voucher->v_kvoucher : MACH_VOUCHER_NULL, (dmsg)) +#else +#define _dispatch_voucher_ktrace_dc_push(dc) +#define _dispatch_voucher_ktrace_dc_pop(dc) +#define _dispatch_voucher_ktrace_dmsg_push(dmsg) +#define _dispatch_voucher_ktrace_dmsg_pop(dmsg) +#endif // DISPATCH_USE_KDEBUG_TRACE + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_continuation_voucher_set(dispatch_continuation_t dc, + dispatch_block_flags_t flags) +{ + unsigned long bits = (unsigned long)dc->do_vtable; + voucher_t v = NULL; + + if (flags & DISPATCH_BLOCK_HAS_VOUCHER) { + bits |= DISPATCH_OBJ_HAS_VOUCHER_BIT; + } else if (!(flags & DISPATCH_BLOCK_NO_VOUCHER)) { + v = _voucher_copy(); + } + dc->do_vtable = (void*)bits; + dc->dc_voucher = v; + _dispatch_voucher_debug("continuation[%p] set", dc->dc_voucher, dc); + _dispatch_voucher_ktrace_dc_push(dc); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_continuation_voucher_adopt(dispatch_continuation_t dc) +{ + unsigned long bits = (unsigned long)dc->do_vtable; + voucher_t v = DISPATCH_NO_VOUCHER; + if (!(bits & DISPATCH_OBJ_HAS_VOUCHER_BIT)) { + _dispatch_voucher_ktrace_dc_pop(dc); + _dispatch_voucher_debug("continuation[%p] adopt", dc->dc_voucher, dc); + v = dc->dc_voucher; + dc->dc_voucher = NULL; + } + _dispatch_adopt_priority_and_replace_voucher(dc->dc_priority, v, 0); +} + +#pragma mark - +#pragma mark _voucher_activity_heap + +typedef uint32_t _voucher_atm_subid_t; +static const size_t _voucher_activity_hash_bits = 6; +static const size_t _voucher_activity_hash_size = + 1 << _voucher_activity_hash_bits; +#define VACTID_HASH(x) ((((uint32_t)((x) >> 32) + (uint32_t)(x)) * \ + 2654435761u) >> (32-_voucher_activity_hash_bits)) +#define VATMID_HASH(x) \ + (((uint32_t)(x) * 2654435761u) >> (32-_voucher_activity_hash_bits)) +#define VATMID2ACTID(x) ((uint64_t)(x) << 32) +#define VACTID_BASEID(x) ((uint64_t)(x) & (((uint64_t)UINT32_MAX) << 32)) +#define VACTID_SUBID(x) ((uint32_t)(x)) +#define VATM_ACTID(vatm, subid) (VATMID2ACTID((vatm)->vatm_id) + (subid)) +#define VATM_SUBID_BITS2MAX(bits) ((1u << (bits)) - 1) +#define VATM_SUBID_MAXBITS (32) +#define VATM_SUBID_MAX (ATM_SUBAID32_MAX) +#define MAILBOX_OFFSET_UNSET UINT64_MAX + +static const size_t _voucher_activity_buffers_per_heap = 512; +typedef unsigned long _voucher_activity_bitmap_base_t; +static const size_t _voucher_activity_bits_per_bitmap_base_t = + 8 * sizeof(_voucher_activity_bitmap_base_t); +static const size_t _voucher_activity_bitmaps_per_heap = + _voucher_activity_buffers_per_heap / + _voucher_activity_bits_per_bitmap_base_t; +typedef _voucher_activity_bitmap_base_t + _voucher_activity_bitmap_t[_voucher_activity_bitmaps_per_heap]; + +typedef struct _voucher_activity_metadata_s { + _voucher_activity_buffer_t vam_kernel_metadata; + _voucher_activity_buffer_t vam_client_metadata; + struct _voucher_activity_self_metadata_s vam_self_metadata; +#if __LP64__ + uintptr_t vam_pad0[7]; +#else + uintptr_t vam_pad0[15]; +#endif + // cacheline + _voucher_activity_bitmap_t volatile vam_atm_mbox_bitmap; + _voucher_activity_bitmap_t volatile vam_buffer_bitmap; + _voucher_activity_bitmap_t volatile vam_pressure_locked_bitmap; + // cacheline + _voucher_atm_subid_t vam_base_atm_subid; + _voucher_atm_subid_t vam_base_atm_subid_max; + _voucher_atm_subid_t vam_nested_atm_subid; + _voucher_atm_t vam_default_activity_atm; + _voucher_atm_t volatile vam_base_atm; + voucher_activity_id_t volatile vam_nested_atm_id; +#if __LP64__ + uintptr_t vam_pad2[3]; +#else + uintptr_t vam_pad2[1]; +#endif + _voucher_activity_lock_s vam_base_atm_lock; + _voucher_activity_lock_s vam_nested_atm_lock; + _voucher_activity_lock_s vam_atms_lock; + _voucher_activity_lock_s vam_activities_lock; + // cacheline + TAILQ_HEAD(, _voucher_atm_s) vam_atms[_voucher_activity_hash_size]; + TAILQ_HEAD(, _voucher_activity_s) + vam_activities[_voucher_activity_hash_size]; +} *_voucher_activity_metadata_t; + +#pragma mark - +#pragma mark _voucher_activity_t + +_voucher_activity_tracepoint_t _voucher_activity_tracepoint_get_slow( + unsigned int slots); +extern _voucher_activity_t _voucher_activity_default; +extern voucher_activity_mode_t _voucher_activity_mode; + +#if DISPATCH_DEBUG && DISPATCH_VOUCHER_ACTIVITY_DEBUG +#define _dispatch_voucher_activity_debug(msg, act, ...) \ + _dispatch_debug("activity[%p] <0x%x>: atm[%p] <%lld>: " msg, (act), \ + (act) ? VACTID_SUBID((act)->va_id) : 0, (act) ? (act)->va_atm : NULL, \ + (act) && (act)->va_atm ? (act)->va_atm->vatm_id : 0, ##__VA_ARGS__) +#define _dispatch_voucher_atm_debug(msg, atm, ...) \ + _dispatch_debug("atm[%p] <%lld> kvoucher[0x%08x]: " msg, (atm), \ + (atm) ? (atm)->vatm_id : 0, (atm) ? (atm)->vatm_kvoucher : 0, \ + ##__VA_ARGS__) +#else +#define _dispatch_voucher_activity_debug(msg, act, ...) +#define _dispatch_voucher_atm_debug(msg, atm, ...) +#endif + +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_voucher_activity_timestamp(void) +{ +#if TARGET_IPHONE_SIMULATOR && \ + IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 101000 + return mach_absolute_time(); +#else + return mach_approximate_time(); +#endif +} + +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_voucher_activity_thread_id(void) +{ + uint64_t thread_id; + pthread_threadid_np(NULL, &thread_id); // TODO: 15923074: use TSD thread_id + return thread_id; +} + +DISPATCH_ALWAYS_INLINE +static inline _voucher_activity_tracepoint_t +_voucher_activity_buffer_tracepoint_get(_voucher_activity_buffer_header_t vab, + unsigned int slots) +{ + uint32_t idx = dispatch_atomic_add2o(vab, vabh_next_tracepoint_idx, + slots, relaxed); + if (idx <= _voucher_activity_tracepoints_per_buffer) { + return (_voucher_activity_tracepoint_t)vab + (idx - slots); + } + return NULL; +} + +DISPATCH_ALWAYS_INLINE +static inline _voucher_activity_tracepoint_t +_voucher_activity_tracepoint_get_from_activity(_voucher_activity_t va, + unsigned int slots) +{ + _voucher_activity_buffer_header_t vab = va ? va->va_current_buffer : NULL; + return vab ? _voucher_activity_buffer_tracepoint_get(vab, slots) : NULL; +} + +DISPATCH_ALWAYS_INLINE +static inline _voucher_activity_tracepoint_t +_voucher_activity_tracepoint_get(unsigned int slots) +{ + _voucher_activity_t va; + voucher_t v = _voucher_get(); + va = v && v->v_activity ? v->v_activity : _voucher_activity_default; + return _voucher_activity_tracepoint_get_from_activity(va, slots); +} + +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_voucher_activity_tracepoint_init(_voucher_activity_tracepoint_t vat, + uint8_t type, uint8_t code_namespace, uint32_t code, uint64_t location) +{ + if (!location) location = (uint64_t)__builtin_return_address(0); + uint64_t timestamp = _voucher_activity_timestamp(); + vat->vat_flags = _voucher_activity_trace_flag_tracepoint, + vat->vat_type = type, + vat->vat_namespace = code_namespace, + vat->vat_code = code, + vat->vat_timestamp = timestamp, + vat->vat_thread = _voucher_activity_thread_id(), + vat->vat_location = location; + return timestamp; +} + +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_voucher_activity_tracepoint_init_with_id(_voucher_activity_tracepoint_t vat, + voucher_activity_trace_id_t trace_id, uint64_t location) +{ + uint8_t type = (uint8_t)(trace_id >> _voucher_activity_trace_id_type_shift); + uint8_t cns = (uint8_t)(trace_id >> + _voucher_activity_trace_id_code_namespace_shift); + uint32_t code = (uint32_t)trace_id; + return _voucher_activity_tracepoint_init(vat, type, cns, code, location); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_voucher_activity_trace_id_is_subtype(voucher_activity_trace_id_t trace_id, + uint8_t type) +{ + voucher_activity_trace_id_t type_id = voucher_activity_trace_id(type, 0, 0); + return (trace_id & type_id) == type_id; +} +#define _voucher_activity_trace_id_is_subtype(trace_id, name) \ + _voucher_activity_trace_id_is_subtype(trace_id, \ + voucher_activity_tracepoint_type_ ## name) + +DISPATCH_ALWAYS_INLINE +static inline bool +_voucher_activity_trace_id_enabled(voucher_activity_trace_id_t trace_id) +{ + switch (_voucher_activity_mode) { + case voucher_activity_mode_release: + return _voucher_activity_trace_id_is_subtype(trace_id, release); + case voucher_activity_mode_stream: + case voucher_activity_mode_debug: + return _voucher_activity_trace_id_is_subtype(trace_id, debug) || + _voucher_activity_trace_id_is_subtype(trace_id, release); + } + return false; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_voucher_activity_trace_type_enabled(uint8_t type) +{ + voucher_activity_trace_id_t type_id = voucher_activity_trace_id(type, 0, 0); + return _voucher_activity_trace_id_enabled(type_id); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_voucher_activity_disabled(void) +{ + return slowpath(_voucher_activity_mode == voucher_activity_mode_disable); +} + +DISPATCH_ALWAYS_INLINE +static inline _voucher_activity_tracepoint_t +_voucher_activity_trace_args_inline(uint8_t type, uint8_t code_namespace, + uint32_t code, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, + uintptr_t arg4) +{ + if (!_voucher_activity_trace_type_enabled(type)) return NULL; + _voucher_activity_tracepoint_t vat; + vat = _voucher_activity_tracepoint_get(1); + if (!vat) return NULL; + _voucher_activity_tracepoint_init(vat, type, code_namespace, code, 0); + vat->vat_flags |= _voucher_activity_trace_flag_tracepoint_args; + vat->vat_data[0] = arg1; + vat->vat_data[1] = arg2; + vat->vat_data[2] = arg3; + vat->vat_data[3] = arg4; + return vat; +} + +DISPATCH_ALWAYS_INLINE +static inline _voucher_activity_tracepoint_t +_voucher_activity_trace_with_id_inline(voucher_activity_trace_id_t trace_id) +{ + _voucher_activity_tracepoint_t vat = _voucher_activity_tracepoint_get(1); + if (!vat) return NULL; + _voucher_activity_tracepoint_init_with_id(vat, trace_id, 0); + return vat; +} + +DISPATCH_ALWAYS_INLINE +static inline _voucher_activity_tracepoint_t +_voucher_activity_trace_with_id(voucher_activity_trace_id_t trace_id) +{ + _voucher_activity_tracepoint_t vat = _voucher_activity_tracepoint_get(1); + if (!vat) vat = _voucher_activity_tracepoint_get_slow(1); + if (!vat) return NULL; + _voucher_activity_tracepoint_init_with_id(vat, trace_id, 0); + return vat; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_activity_trace_msg(voucher_t v, mach_msg_header_t *msg, uint32_t code) +{ + if (!v || !v->v_activity) return; // Don't use default activity for IPC + const uint8_t type = voucher_activity_tracepoint_type_release; + const uint8_t code_namespace = _voucher_activity_tracepoint_namespace_ipc; + if (!_voucher_activity_trace_type_enabled(type)) return; + _voucher_activity_tracepoint_t vat; + vat = _voucher_activity_tracepoint_get_from_activity(v->v_activity, 1); + if (!vat) return; // TODO: slowpath ? + _voucher_activity_tracepoint_init(vat, type, code_namespace, code, 0); + vat->vat_flags |= _voucher_activity_trace_flag_libdispatch; +#if __has_extension(c_static_assert) + _Static_assert(sizeof(mach_msg_header_t) <= sizeof(vat->vat_data), + "mach_msg_header_t too large"); +#endif + memcpy(vat->vat_data, msg, sizeof(mach_msg_header_t)); +} +#define _voucher_activity_trace_msg(v, msg, type) \ + _voucher_activity_trace_msg(v, msg, \ + _voucher_activity_tracepoint_namespace_ipc_ ## type) + +#endif // !(USE_OBJC && __OBJC2__) + +#else // VOUCHER_USE_MACH_VOUCHER + +#pragma mark - +#pragma mark Simulator / vouchers disabled + +#define _dispatch_voucher_debug(msg, v, ...) +#define _dispatch_kvoucher_debug(msg, kv, ...) + +DISPATCH_ALWAYS_INLINE +static inline voucher_t +_voucher_retain(voucher_t voucher) +{ + return voucher; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_release(voucher_t voucher) +{ + (void)voucher; +} + +DISPATCH_ALWAYS_INLINE +static inline voucher_t +_voucher_get(void) +{ + return NULL; +} + +DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT +static inline voucher_t +_voucher_copy(void) +{ + return NULL; +} + +DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT +static inline voucher_t +_voucher_copy_without_importance(void) +{ + return NULL; +} + +DISPATCH_ALWAYS_INLINE +static inline mach_voucher_t +_voucher_swap_and_get_mach_voucher(voucher_t ov, voucher_t voucher) +{ + (void)ov; (void)voucher; + return MACH_VOUCHER_NULL; +} + +DISPATCH_ALWAYS_INLINE +static inline voucher_t +_voucher_adopt(voucher_t voucher) +{ + return voucher; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_replace(voucher_t voucher) +{ + (void)voucher; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_clear(void) +{ +} + +DISPATCH_ALWAYS_INLINE +static inline pthread_priority_t +_voucher_get_priority(voucher_t voucher) +{ + (void)voucher; + return 0; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_voucher_mach_msg_set_mach_voucher(mach_msg_header_t *msg, mach_voucher_t kv, + bool move_send) +{ + (void)msg; (void)kv; (void)move_send; + return false; + +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_voucher_mach_msg_set(mach_msg_header_t *msg, voucher_t voucher) +{ + (void)msg; (void)voucher; + return false; +} + +DISPATCH_ALWAYS_INLINE +static inline mach_voucher_t +_voucher_mach_msg_get(mach_msg_header_t *msg) +{ + (void)msg; + return 0; +} + +DISPATCH_ALWAYS_INLINE +static inline mach_voucher_t +_voucher_mach_msg_clear(mach_msg_header_t *msg, bool move_send) +{ + (void)msg; (void)move_send; + return MACH_VOUCHER_NULL; +} + +#define _dispatch_voucher_ktrace_dmsg_push(dmsg) +#define _dispatch_voucher_ktrace_dmsg_pop(dmsg) + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_continuation_voucher_set(dispatch_continuation_t dc, + dispatch_block_flags_t flags) +{ + (void)dc; (void)flags; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_continuation_voucher_adopt(dispatch_continuation_t dc) +{ + (void)dc; +} + +#define _voucher_activity_trace_msg(v, msg, type) + +DISPATCH_ALWAYS_INLINE +static inline bool +_voucher_activity_disabled(void) +{ + return true; +} + +#endif // VOUCHER_USE_MACH_VOUCHER + +#endif /* __DISPATCH_VOUCHER_INTERNAL__ */ diff --git a/xcodeconfig/libdispatch-introspection.xcconfig b/xcodeconfig/libdispatch-introspection.xcconfig index d0f431deb..1644ea90f 100644 --- a/xcodeconfig/libdispatch-introspection.xcconfig +++ b/xcodeconfig/libdispatch-introspection.xcconfig @@ -19,8 +19,8 @@ // BUILD_VARIANTS = normal -INSTALL_PATH = /usr/lib/system/introspection -INSTALL_PATH[sdk=iphonesimulator*] = $(SDKROOT)/usr/lib/system/introspection +INSTALL_PATH_ACTUAL = /usr/lib/system/introspection + GCC_PREPROCESSOR_DEFINITIONS = $(GCC_PREPROCESSOR_DEFINITIONS) DISPATCH_INTROSPECTION=1 CONFIGURATION_BUILD_DIR = $(BUILD_DIR)/introspection OTHER_LDFLAGS = $(OTHER_LDFLAGS) -Wl,-interposable_list,$(SRCROOT)/xcodeconfig/libdispatch.interposable diff --git a/xcodeconfig/libdispatch-resolver_iphoneos.order b/xcodeconfig/libdispatch-resolver_iphoneos.order new file mode 100644 index 000000000..eea98459d --- /dev/null +++ b/xcodeconfig/libdispatch-resolver_iphoneos.order @@ -0,0 +1,20 @@ +# +# Copyright (c) 2013 Apple Inc. All rights reserved. +# +# @APPLE_APACHE_LICENSE_HEADER_START@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# @APPLE_APACHE_LICENSE_HEADER_END@ +# + diff --git a/xcodeconfig/libdispatch.aliases b/xcodeconfig/libdispatch.aliases index 5877e5019..c29b16337 100644 --- a/xcodeconfig/libdispatch.aliases +++ b/xcodeconfig/libdispatch.aliases @@ -1,5 +1,5 @@ # -# Copyright (c) 2012-2013 Apple Inc. All rights reserved. +# Copyright (c) 2013-2014 Apple Inc. All rights reserved. # # @APPLE_APACHE_LICENSE_HEADER_START@ # @@ -18,19 +18,6 @@ # @APPLE_APACHE_LICENSE_HEADER_END@ # -_OBJC_CLASS_$_OS_dispatch_semaphore __dispatch_semaphore_vtable -_OBJC_CLASS_$_OS_dispatch_group __dispatch_group_vtable -_OBJC_CLASS_$_OS_dispatch_queue __dispatch_queue_vtable -_OBJC_CLASS_$_OS_dispatch_queue_root __dispatch_queue_root_vtable -_OBJC_CLASS_$_OS_dispatch_queue_runloop __dispatch_queue_runloop_vtable -_OBJC_CLASS_$_OS_dispatch_queue_mgr __dispatch_queue_mgr_vtable -_OBJC_CLASS_$_OS_dispatch_queue_specific_queue __dispatch_queue_specific_queue_vtable -_OBJC_CLASS_$_OS_dispatch_queue_attr __dispatch_queue_attr_vtable -_OBJC_CLASS_$_OS_dispatch_source __dispatch_source_vtable -_OBJC_CLASS_$_OS_dispatch_mach __dispatch_mach_vtable -_OBJC_CLASS_$_OS_dispatch_mach_msg __dispatch_mach_msg_vtable -_OBJC_CLASS_$_OS_dispatch_io __dispatch_io_vtable -_OBJC_CLASS_$_OS_dispatch_operation __dispatch_operation_vtable -_OBJC_CLASS_$_OS_dispatch_disk __dispatch_disk_vtable - __dispatch_data_destructor_vm_deallocate __dispatch_data_destructor_munmap +__dispatch_source_type_memorystatus __dispatch_source_type_memorypressure +__dispatch_queue_attrs __dispatch_queue_attr_concurrent diff --git a/xcodeconfig/libdispatch.interposable b/xcodeconfig/libdispatch.interposable index f3377617b..52a126468 100644 --- a/xcodeconfig/libdispatch.interposable +++ b/xcodeconfig/libdispatch.interposable @@ -24,5 +24,6 @@ _dispatch_introspection_hook_queue_create _dispatch_introspection_hook_queue_destroy _dispatch_introspection_hook_queue_item_enqueue _dispatch_introspection_hook_queue_item_dequeue +_dispatch_introspection_hook_queue_item_complete _dispatch_introspection_hook_queue_callout_begin _dispatch_introspection_hook_queue_callout_end diff --git a/xcodeconfig/libdispatch.order b/xcodeconfig/libdispatch.order index 8870ea9e4..8bb455055 100644 --- a/xcodeconfig/libdispatch.order +++ b/xcodeconfig/libdispatch.order @@ -50,8 +50,11 @@ _OBJC_CLASS_$_OS_dispatch_operation __OS_dispatch_operation_vtable _OBJC_CLASS_$_OS_dispatch_disk __OS_dispatch_disk_vtable -# non-dispatch_object_t classes +# os_object_t classes _OBJC_CLASS_$_OS_object +_OBJC_CLASS_$_OS_voucher +#_OBJC_CLASS_$_OS_voucher_recipe +# non-os_object_t classes _OBJC_CLASS_$_OS_dispatch_data _OBJC_CLASS_$_OS_dispatch_data_empty # metaclasses @@ -71,5 +74,7 @@ _OBJC_METACLASS_$_OS_dispatch_io _OBJC_METACLASS_$_OS_dispatch_operation _OBJC_METACLASS_$_OS_dispatch_disk _OBJC_METACLASS_$_OS_object +_OBJC_METACLASS_$_OS_voucher +_OBJC_METACLASS_$_OS_voucher_recipe _OBJC_METACLASS_$_OS_dispatch_data _OBJC_METACLASS_$_OS_dispatch_data_empty diff --git a/xcodeconfig/libdispatch.xcconfig b/xcodeconfig/libdispatch.xcconfig index 4904b9d64..1d2293318 100644 --- a/xcodeconfig/libdispatch.xcconfig +++ b/xcodeconfig/libdispatch.xcconfig @@ -19,25 +19,25 @@ // #include "/Makefiles/CoreOS/Xcode/BSD.xcconfig" -SUPPORTED_PLATFORMS = macosx iphoneos iphonesimulator +#include "/AppleInternal/XcodeConfig/SimulatorSupport.xcconfig" + +// Set INSTALL_PATH[sdk=macosx*] when SimulatorSupport.xcconfig is unavailable +INSTALL_PATH[sdk=macosx*] = $(INSTALL_PATH_ACTUAL) + +SUPPORTED_PLATFORMS = macosx iphoneos iphonesimulator iphoneosnano iphonesimulatornano ARCHS[sdk=iphonesimulator*] = $(NATIVE_ARCH_32_BIT) // Override BSD.xcconfig ARCHS PRODUCT_NAME = libdispatch EXECUTABLE_PREFIX = -LD_DYLIB_INSTALL_NAME = /usr/lib/system/$(EXECUTABLE_NAME) -INSTALL_PATH = /usr/lib/system -INSTALL_PATH[sdk=iphonesimulator*] = $(SDKROOT)/usr/lib/system -PUBLIC_HEADERS_FOLDER_PATH = /usr/include/dispatch -PUBLIC_HEADERS_FOLDER_PATH[sdk=iphonesimulator*] = $(SDKROOT)/usr/include/dispatch -PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/dispatch -PRIVATE_HEADERS_FOLDER_PATH[sdk=iphonesimulator*] = $(SDKROOT)/usr/local/include/dispatch -OS_PUBLIC_HEADERS_FOLDER_PATH = /usr/include/os -OS_PUBLIC_HEADERS_FOLDER_PATH[sdk=iphonesimulator*] = $(SDKROOT)/usr/include/os -OS_PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/os -OS_PRIVATE_HEADERS_FOLDER_PATH[sdk=iphonesimulator*] = $(SDKROOT)/usr/local/include/os -HEADER_SEARCH_PATHS = $(PROJECT_DIR) +INSTALL_PATH_ACTUAL = /usr/lib/system +PUBLIC_HEADERS_FOLDER_PATH = $(INSTALL_PATH_PREFIX)/usr/include/dispatch +PRIVATE_HEADERS_FOLDER_PATH = $(INSTALL_PATH_PREFIX)/usr/local/include/dispatch +OS_PUBLIC_HEADERS_FOLDER_PATH = $(INSTALL_PATH_PREFIX)/usr/include/os +OS_PRIVATE_HEADERS_FOLDER_PATH = $(INSTALL_PATH_PREFIX)/usr/local/include/os +HEADER_SEARCH_PATHS = $(PROJECT_DIR) $(PROJECT_DIR)/private $(PROJECT_DIR)/os LIBRARY_SEARCH_PATHS = $(SDKROOT)/usr/lib/system INSTALLHDRS_SCRIPT_PHASE = YES ALWAYS_SEARCH_USER_PATHS = NO +USE_HEADERMAP = NO BUILD_VARIANTS = normal debug profile ONLY_ACTIVE_ARCH = NO CLANG_LINK_OBJC_RUNTIME = NO @@ -71,11 +71,13 @@ OTHER_CFLAGS_profile = $(OTHER_CFLAGS_normal) -DDISPATCH_PROFILE=1 OTHER_CFLAGS_debug = -fstack-protector -fno-inline -O0 -DDISPATCH_DEBUG=1 GENERATE_PROFILING_CODE = NO DYLIB_CURRENT_VERSION = $(CURRENT_PROJECT_VERSION) -UMBRELLA_LDFLAGS = -umbrella System -nodefaultlibs -ldyld -lcompiler_rt -lsystem_kernel -lsystem_platform -lsystem_pthread -lsystem_malloc -lsystem_c -lsystem_blocks -lunwind -Wl,-upward-lsystem_asl +UMBRELLA_LDFLAGS = -umbrella System -nodefaultlibs -ldyld -lcompiler_rt -lsystem_kernel -lsystem_platform -lsystem_pthread -lsystem_malloc -lsystem_c -lsystem_blocks -lunwind UMBRELLA_LDFLAGS[sdk=iphonesimulator*] = -umbrella System -nodefaultlibs -ldyld_sim -lcompiler_rt_sim -lsystem_sim_c -lsystem_sim_blocks -lunwind_sim -Wl,-upward-lSystem -OBJC_LDFLAGS = -Wl,-upward-lobjc -Wl,-order_file,$(SRCROOT)/xcodeconfig/libdispatch.order -Wl,-alias_list,$(SRCROOT)/xcodeconfig/libdispatch.aliases -Wl,-unexported_symbols_list,$(SRCROOT)/xcodeconfig/libdispatch.unexport +OBJC_LDFLAGS = -Wl,-upward-lobjc -Wl,-order_file,$(SRCROOT)/xcodeconfig/libdispatch.order -Wl,-alias_list,$(SRCROOT)/xcodeconfig/libdispatch_objc.aliases -Wl,-unexported_symbols_list,$(SRCROOT)/xcodeconfig/libdispatch.unexport OBJC_LDFLAGS[sdk=macosx*] = $(OBJC_LDFLAGS) -Wl,-upward-lauto OBJC_LDFLAGS[arch=i386][sdk=macosx*] = OBJC_EXCLUDED_SOURCE_FILE_NAMES_i386_macosx = object.m data.m +ALIASES_LDFLAGS = -Wl,-alias_list,$(SRCROOT)/xcodeconfig/libdispatch.aliases PLATFORM_LDFLAGS[sdk=macosx*] = -Wl,-alias_list,$(SRCROOT)/xcodeconfig/libdispatch_macosx.aliases -OTHER_LDFLAGS = $(OTHER_LDFLAGS) $(UMBRELLA_LDFLAGS) $(CR_LDFLAGS) $(OBJC_LDFLAGS) $(PLATFORM_LDFLAGS) +OTHER_LDFLAGS = $(OTHER_LDFLAGS) $(UMBRELLA_LDFLAGS) $(CR_LDFLAGS) $(OBJC_LDFLAGS) $(ALIASES_LDFLAGS) $(PLATFORM_LDFLAGS) +OTHER_MIGFLAGS = -novouchers diff --git a/xcodeconfig/libdispatch_iphoneos.order b/xcodeconfig/libdispatch_iphoneos.order new file mode 100644 index 000000000..eea98459d --- /dev/null +++ b/xcodeconfig/libdispatch_iphoneos.order @@ -0,0 +1,20 @@ +# +# Copyright (c) 2013 Apple Inc. All rights reserved. +# +# @APPLE_APACHE_LICENSE_HEADER_START@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# @APPLE_APACHE_LICENSE_HEADER_END@ +# + diff --git a/xcodeconfig/libdispatch_macosx.aliases b/xcodeconfig/libdispatch_macosx.aliases index a7f61c5f0..66b24a31e 100644 --- a/xcodeconfig/libdispatch_macosx.aliases +++ b/xcodeconfig/libdispatch_macosx.aliases @@ -17,5 +17,3 @@ # # @APPLE_APACHE_LICENSE_HEADER_END@ # - -__dispatch_source_type_memorystatus __dispatch_source_type_memorypressure diff --git a/xcodeconfig/libdispatch_objc.aliases b/xcodeconfig/libdispatch_objc.aliases new file mode 100644 index 000000000..ad104a190 --- /dev/null +++ b/xcodeconfig/libdispatch_objc.aliases @@ -0,0 +1,34 @@ +# +# Copyright (c) 2012-2013 Apple Inc. All rights reserved. +# +# @APPLE_APACHE_LICENSE_HEADER_START@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# @APPLE_APACHE_LICENSE_HEADER_END@ +# + +_OBJC_CLASS_$_OS_dispatch_semaphore __dispatch_semaphore_vtable +_OBJC_CLASS_$_OS_dispatch_group __dispatch_group_vtable +_OBJC_CLASS_$_OS_dispatch_queue __dispatch_queue_vtable +_OBJC_CLASS_$_OS_dispatch_queue_root __dispatch_queue_root_vtable +_OBJC_CLASS_$_OS_dispatch_queue_runloop __dispatch_queue_runloop_vtable +_OBJC_CLASS_$_OS_dispatch_queue_mgr __dispatch_queue_mgr_vtable +_OBJC_CLASS_$_OS_dispatch_queue_specific_queue __dispatch_queue_specific_queue_vtable +_OBJC_CLASS_$_OS_dispatch_queue_attr __dispatch_queue_attr_vtable +_OBJC_CLASS_$_OS_dispatch_source __dispatch_source_vtable +_OBJC_CLASS_$_OS_dispatch_mach __dispatch_mach_vtable +_OBJC_CLASS_$_OS_dispatch_mach_msg __dispatch_mach_msg_vtable +_OBJC_CLASS_$_OS_dispatch_io __dispatch_io_vtable +_OBJC_CLASS_$_OS_dispatch_operation __dispatch_operation_vtable +_OBJC_CLASS_$_OS_dispatch_disk __dispatch_disk_vtable diff --git a/xcodescripts/install-headers.sh b/xcodescripts/install-headers.sh index cb5e80495..1610b81ad 100755 --- a/xcodescripts/install-headers.sh +++ b/xcodescripts/install-headers.sh @@ -27,3 +27,5 @@ mkdir -p "${DSTROOT}${OS_PUBLIC_HEADERS_FOLDER_PATH}" || true mkdir -p "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" || true cp -X "${SCRIPT_INPUT_FILE_1}" "${DSTROOT}${OS_PUBLIC_HEADERS_FOLDER_PATH}" cp -X "${SCRIPT_INPUT_FILE_2}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_3}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_4}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" diff --git a/xcodescripts/mig-headers.sh b/xcodescripts/mig-headers.sh index 3669ec237..f81eb856f 100755 --- a/xcodescripts/mig-headers.sh +++ b/xcodescripts/mig-headers.sh @@ -23,7 +23,7 @@ export MIGCC="$(xcrun -find cc)" export MIGCOM="$(xcrun -find migcom)" export PATH="${PLATFORM_DEVELOPER_BIN_DIR}:${DEVELOPER_BIN_DIR}:${PATH}" for a in ${ARCHS}; do - xcrun mig -arch $a -header "${SCRIPT_OUTPUT_FILE_0}" \ + xcrun mig ${OTHER_MIGFLAGS} -arch $a -header "${SCRIPT_OUTPUT_FILE_0}" \ -sheader "${SCRIPT_OUTPUT_FILE_1}" -user /dev/null \ -server /dev/null "${SCRIPT_INPUT_FILE_0}" done From e24356c67c8a18052185298b7f7d6e57ac68ce0b Mon Sep 17 00:00:00 2001 From: Apple OSS Distributions <91980991+AppleOSSDistributions@users.noreply.github.com> Date: Fri, 4 Sep 2015 17:28:44 +0000 Subject: [PATCH 06/18] libdispatch-500.1.5 Imported from libdispatch-500.1.5.tar.gz --- dispatch/dispatch.h | 2 +- dispatch/object.h | 6 +- dispatch/queue.h | 7 +- dispatch/source.h | 3 +- libdispatch.xcodeproj/project.pbxproj | 813 ++++++++++- man/dispatch_source_create.3 | 4 +- os/object.h | 6 +- os/object_private.h | 5 + private/private.h | 2 +- private/queue_private.h | 42 + private/source_private.h | 4 + private/voucher_activity_private.h | 219 ++- private/voucher_private.h | 64 +- src/allocator.c | 6 + src/allocator_internal.h | 15 +- src/block.cpp | 110 ++ src/data.c | 412 ++++-- src/data.m | 19 + src/data_internal.h | 89 +- src/init.c | 188 +-- src/inline_internal.h | 270 +++- src/internal.h | 89 +- src/introspection.c | 3 +- src/introspection_internal.h | 6 +- src/io.c | 23 +- src/io_internal.h | 11 +- src/object.c | 26 +- src/object.m | 64 +- src/object_internal.h | 86 +- src/queue.c | 402 ++++-- src/queue_internal.h | 66 +- src/semaphore.c | 27 +- src/semaphore_internal.h | 10 +- src/shims/tsd.h | 10 +- src/source.c | 716 +++++++--- src/source_internal.h | 21 +- src/trace.h | 8 +- src/voucher.c | 1255 +++++++---------- src/voucher_internal.h | 300 ++-- .../libdispatch-introspection.xcconfig | 2 +- xcodeconfig/libdispatch-resolved.xcconfig | 2 +- xcodeconfig/libdispatch.xcconfig | 27 +- xcodescripts/install-dtrace.sh | 3 +- 43 files changed, 3752 insertions(+), 1691 deletions(-) create mode 100644 src/block.cpp diff --git a/dispatch/dispatch.h b/dispatch/dispatch.h index 722b0c90d..bb32bdf31 100644 --- a/dispatch/dispatch.h +++ b/dispatch/dispatch.h @@ -37,7 +37,7 @@ #define __OSX_AVAILABLE_STARTING(x, y) #endif -#define DISPATCH_API_VERSION 20140804 +#define DISPATCH_API_VERSION 20141121 #ifndef __DISPATCH_BUILDING_DISPATCH__ diff --git a/dispatch/object.h b/dispatch/object.h index c6371899a..a9b805e75 100644 --- a/dispatch/object.h +++ b/dispatch/object.h @@ -56,7 +56,7 @@ _dispatch_object_validate(dispatch_object_t object) { void *isa = *(void* volatile*)(OS_OBJECT_BRIDGE void*)object; (void)isa; } -#elif defined(__cplusplus) +#elif defined(__cplusplus) && !defined(__DISPATCH_BUILDING_DISPATCH__) /* * Dispatch objects are NOT C++ objects. Nevertheless, we can at least keep C++ * aware of type compatibility. @@ -99,6 +99,7 @@ typedef union { #define DISPATCH_RETURNS_RETAINED #endif +#ifdef __BLOCKS__ /*! * @typedef dispatch_block_t * @@ -141,6 +142,7 @@ typedef union { * function or by sending it a -[copy] message. */ typedef void (^dispatch_block_t)(void); +#endif // __BLOCKS__ __BEGIN_DECLS @@ -295,6 +297,7 @@ DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_resume(dispatch_object_t object); +#ifdef __BLOCKS__ /*! * @function dispatch_wait * @@ -427,6 +430,7 @@ dispatch_testcancel(void *object); dispatch_source_t:dispatch_source_testcancel \ )((object)) #endif +#endif // __BLOCKS__ /*! * @function dispatch_debug diff --git a/dispatch/queue.h b/dispatch/queue.h index cc3ca941e..b3cb54f9a 100644 --- a/dispatch/queue.h +++ b/dispatch/queue.h @@ -930,7 +930,7 @@ dispatch_barrier_sync_f(dispatch_queue_t queue, * is NULL. */ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) -DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NOTHROW +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_queue_set_specific(dispatch_queue_t queue, const void *key, void *context, dispatch_function_t destructor); @@ -959,7 +959,7 @@ dispatch_queue_set_specific(dispatch_queue_t queue, const void *key, * The context for the specified key or NULL if no context was found. */ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) -DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_PURE DISPATCH_WARN_RESULT +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW void * dispatch_queue_get_specific(dispatch_queue_t queue, const void *key); @@ -986,8 +986,7 @@ dispatch_queue_get_specific(dispatch_queue_t queue, const void *key); * The context for the specified key or NULL if no context was found. */ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) -DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_PURE DISPATCH_WARN_RESULT -DISPATCH_NOTHROW +DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW void * dispatch_get_specific(const void *key); diff --git a/dispatch/source.h b/dispatch/source.h index 411ed0611..f01fd9392 100644 --- a/dispatch/source.h +++ b/dispatch/source.h @@ -54,6 +54,8 @@ */ DISPATCH_DECL(dispatch_source); +__BEGIN_DECLS + /*! * @typedef dispatch_source_type_t * @@ -319,7 +321,6 @@ typedef unsigned long dispatch_source_vnode_flags_t; typedef unsigned long dispatch_source_timer_flags_t; -__BEGIN_DECLS /*! * @function dispatch_source_create diff --git a/libdispatch.xcodeproj/project.pbxproj b/libdispatch.xcodeproj/project.pbxproj index ff12a47f2..898ffcaf9 100644 --- a/libdispatch.xcodeproj/project.pbxproj +++ b/libdispatch.xcodeproj/project.pbxproj @@ -18,6 +18,17 @@ name = libdispatch_Sim; productName = libdispatch_Sim; }; + 4552540A19B1389700B88766 /* libdispatch_tests */ = { + isa = PBXAggregateTarget; + buildConfigurationList = 4552540B19B1389700B88766 /* Build configuration list for PBXAggregateTarget "libdispatch_tests" */; + buildPhases = ( + ); + dependencies = ( + 4552540F19B138B700B88766 /* PBXTargetDependency */, + ); + name = libdispatch_tests; + productName = libdispatch_tests; + }; C927F35A10FD7F0600C5AB8B /* libdispatch_tools */ = { isa = PBXAggregateTarget; buildConfigurationList = C927F35E10FD7F0B00C5AB8B /* Build configuration list for PBXAggregateTarget "libdispatch_tools" */; @@ -84,6 +95,12 @@ E43570B9126E93380097AB9F /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; }; E43570BA126E93380097AB9F /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; }; E43A710615783F7E0012D38D /* data_private.h in Headers */ = {isa = PBXBuildFile; fileRef = C913AC0E143BD34800B78976 /* data_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E43A72501AF85BBC00BAA921 /* block.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E43A724F1AF85BBC00BAA921 /* block.cpp */; }; + E43A72841AF85BCB00BAA921 /* block.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E43A724F1AF85BBC00BAA921 /* block.cpp */; }; + E43A72851AF85BCC00BAA921 /* block.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E43A724F1AF85BBC00BAA921 /* block.cpp */; }; + E43A72861AF85BCC00BAA921 /* block.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E43A724F1AF85BBC00BAA921 /* block.cpp */; }; + E43A72871AF85BCD00BAA921 /* block.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E43A724F1AF85BBC00BAA921 /* block.cpp */; }; + E43A72881AF85BE900BAA921 /* block.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E43A724F1AF85BBC00BAA921 /* block.cpp */; }; E44757DA17F4572600B82CA1 /* inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44757D917F4572600B82CA1 /* inline_internal.h */; }; E44757DB17F4573500B82CA1 /* inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44757D917F4572600B82CA1 /* inline_internal.h */; }; E44757DC17F4573600B82CA1 /* inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44757D917F4572600B82CA1 /* inline_internal.h */; }; @@ -267,6 +284,356 @@ /* End PBXBuildFile section */ /* Begin PBXContainerItemProxy section */ + 455253A819B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = DF80F67E10B5C71600FAB5AE; + remoteInfo = dispatch_test; + }; + 455253AA19B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E4D01C78108E68D400FAA873; + remoteInfo = dispatch_apply; + }; + 455253AC19B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E4EB36CD1088F0B000C33AD4; + remoteInfo = dispatch_api; + }; + 455253AE19B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E4D01CA7108E6C5000FAA873; + remoteInfo = dispatch_c99; + }; + 455253B019B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E4C72A26115C3F65009F3CE1; + remoteInfo = dispatch_cf_main; + }; + 455253B219B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E4D01CB9108E6C7200FAA873; + remoteInfo = dispatch_deadname; + }; + 455253B419B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E4D01CC3108E6CC300FAA873; + remoteInfo = dispatch_debug; + }; + 455253B619B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E4D01CCC108E6CD400FAA873; + remoteInfo = dispatch_group; + }; + 455253B819B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E4D01CD5108E6CE300FAA873; + remoteInfo = dispatch_overcommit; + }; + 455253BA19B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E4D01CDE108E6CF300FAA873; + remoteInfo = dispatch_pingpong; + }; + 455253BC19B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E4D01CE7108E6D0500FAA873; + remoteInfo = dispatch_plusplus; + }; + 455253BE19B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E4D01CF0108E6D2900FAA873; + remoteInfo = dispatch_priority; + }; + 455253C019B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E4D01CF9108E6D3800FAA873; + remoteInfo = dispatch_priority2; + }; + 455253C219B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E46D768811D0365F00615518; + remoteInfo = dispatch_concur; + }; + 455253C419B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E4324AAC12250F0800A3CAD5; + remoteInfo = dispatch_context_for_key; + }; + 455253C619B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E4D01D02108E6D5600FAA873; + remoteInfo = dispatch_proc; + }; + 455253C819B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E4D01D0B108E6D6000FAA873; + remoteInfo = dispatch_queue_finalizer; + }; + 455253CA19B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E4D01D14108E6D7300FAA873; + remoteInfo = dispatch_read; + }; + 455253CC19B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E4D01D1D108E6D8B00FAA873; + remoteInfo = dispatch_read2; + }; + 455253CE19B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E4D01D26108E6D9A00FAA873; + remoteInfo = dispatch_after; + }; + 455253D019B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E4D01D2F108E6DA700FAA873; + remoteInfo = dispatch_timer; + }; + 455253D219B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E4CE9BC31151AB2A00D710C0; + remoteInfo = dispatch_timer_short; + }; + 455253D419B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = 5A2BA66D11D0369E0081FF89; + remoteInfo = dispatch_timer_timeout; + }; + 455253D619B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E4D01D38108E6DB200FAA873; + remoteInfo = dispatch_suspend_timer; + }; + 455253D819B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E4D01D41108E6DBF00FAA873; + remoteInfo = dispatch_sema; + }; + 455253DA19B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E4D01D53108E6DDC00FAA873; + remoteInfo = dispatch_timer_bit31; + }; + 455253DC19B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E4D01D5C108E6E0400FAA873; + remoteInfo = dispatch_timer_bit63; + }; + 455253DE19B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E4D01D74108E6E4B00FAA873; + remoteInfo = dispatch_timer_set_time; + }; + 455253E019B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E4D01D7D108E6E6600FAA873; + remoteInfo = dispatch_drift; + }; + 455253E219B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E4D01D86108E6E7200FAA873; + remoteInfo = dispatch_starfish; + }; + 455253E419B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E4D01D8F108E6E7E00FAA873; + remoteInfo = dispatch_cascade; + }; + 455253E619B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E4D01D98108E6E9500FAA873; + remoteInfo = dispatch_readsync; + }; + 455253E819B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E4E24A0710E0020B00C3C692; + remoteInfo = dispatch_sync_on_main; + }; + 455253EA19B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E4E24A1810E0021C00C3C692; + remoteInfo = dispatch_sync_gc; + }; + 455253EC19B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E4E24C3210E01DF800C3C692; + remoteInfo = dispatch_apply_gc; + }; + 455253EE19B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = 5AAB464A10D330C5004407EA; + remoteInfo = dispatch_data; + }; + 455253F019B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = 5A11B20E10DB124C000FAD7A; + remoteInfo = dispatch_io; + }; + 455253F219B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = 5AA78BAB114821D0009A233B; + remoteInfo = dispatch_io_net; + }; + 455253F419B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = 5AF00EF51135FA1300CA14CE; + remoteInfo = dispatch_vm; + }; + 455253F619B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E4E33EB6121C9C9400F4B71C; + remoteInfo = dispatch_vnode; + }; + 455253F819B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = C9E804AF1963EC5F00C2B970; + remoteInfo = dispatch_qos; + }; + 455253FA19B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = C9B1FF84113F458A00843414; + remoteInfo = dispatch_select; + }; + 455253FC19B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = C985912B143D584100718FE3; + remoteInfo = dispatch_transform; + }; + 455253FE19B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E4D01DA1108E6EE000FAA873; + remoteInfo = nsoperation; + }; + 4552540019B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E4D01CB0108E6C6300FAA873; + remoteInfo = cffd; + }; + 4552540219B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E4D020B4108F73E000FAA873; + remoteInfo = bench; + }; + 4552540419B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E454823616C1D8E50042EC2D; + remoteInfo = jsgc_bench; + }; + 4552540619B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E454824516C1F0EF0042EC2D; + remoteInfo = async_bench; + }; + 4552540819B1384900B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E454824F16C1F0FE0042EC2D; + remoteInfo = apply_bench; + }; + 4552540E19B138B700B88766 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 1; + remoteGlobalIDString = E4D01DC5108E708E00FAA873; + remoteInfo = all; + }; C927F36610FD7F1000C5AB8B /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; containerPortal = C927F35F10FD7F1000C5AB8B /* ddt.xcodeproj */; @@ -322,6 +689,7 @@ 2BBF5A5F154B64D8002B20F9 /* allocator_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = allocator_internal.h; sourceTree = ""; }; 2BBF5A62154B64F5002B20F9 /* allocator.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = allocator.c; sourceTree = ""; }; 2BE17C6318EA305E002CA4E8 /* layout_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = layout_private.h; sourceTree = ""; }; + 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = libdispatchtest.xcodeproj; path = tests/libdispatchtest.xcodeproj; sourceTree = ""; }; 5A0095A110F274B0000E2A31 /* io_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = io_internal.h; sourceTree = ""; }; 5A27262510F26F1900751FBC /* io.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = io.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; 5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = semaphore_internal.h; sourceTree = ""; }; @@ -369,6 +737,7 @@ E422DA3614D2A7E7003C6EE4 /* libdispatch.aliases */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.aliases; sourceTree = ""; }; E422DA3714D2AE11003C6EE4 /* libdispatch.unexport */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.unexport; sourceTree = ""; }; E43570B8126E93380097AB9F /* provider.d */ = {isa = PBXFileReference; explicitFileType = sourcecode.dtrace; fileEncoding = 4; path = provider.d; sourceTree = ""; }; + E43A724F1AF85BBC00BAA921 /* block.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = block.cpp; sourceTree = ""; }; E43D93F11097917E004F6A62 /* libdispatch.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = libdispatch.xcconfig; sourceTree = ""; }; E44757D917F4572600B82CA1 /* inline_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = inline_internal.h; sourceTree = ""; }; E448727914C6215D00BB45C2 /* libdispatch.order */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.order; sourceTree = ""; }; @@ -463,6 +832,8 @@ 08FB7795FE84155DC02AAC07 /* Source */, C6A0FF2B0290797F04C91782 /* Documentation */, 1AB674ADFE9D54B511CA2CBB /* Products */, + C927F35F10FD7F1000C5AB8B /* ddt.xcodeproj */, + 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */, ); indentWidth = 4; name = libdispatch; @@ -476,6 +847,7 @@ 2BBF5A62154B64F5002B20F9 /* allocator.c */, 9676A0E00F3E755D00713ADB /* apply.c */, 965CD6340F3E806200D4E28D /* benchmark.c */, + E43A724F1AF85BBC00BAA921 /* block.cpp */, 5AAB45BF10D30B79004407EA /* data.c */, E420866F16027AE500EEE210 /* data.m */, E44EBE3B1251659900645D88 /* init.c */, @@ -510,6 +882,62 @@ name = Products; sourceTree = ""; }; + 4552536F19B1384900B88766 /* Products */ = { + isa = PBXGroup; + children = ( + 455253A919B1384900B88766 /* libdispatch_test.a */, + 455253AB19B1384900B88766 /* dispatch_apply */, + 455253AD19B1384900B88766 /* dispatch_api */, + 455253AF19B1384900B88766 /* dispatch_c99 */, + 455253B119B1384900B88766 /* dispatch_cf_main */, + 455253B319B1384900B88766 /* dispatch_deadname */, + 455253B519B1384900B88766 /* dispatch_debug */, + 455253B719B1384900B88766 /* dispatch_group */, + 455253B919B1384900B88766 /* dispatch_overcommit */, + 455253BB19B1384900B88766 /* dispatch_pingpong */, + 455253BD19B1384900B88766 /* dispatch_plusplus */, + 455253BF19B1384900B88766 /* dispatch_priority */, + 455253C119B1384900B88766 /* dispatch_priority2 */, + 455253C319B1384900B88766 /* dispatch_concur */, + 455253C519B1384900B88766 /* dispatch_context_for_key */, + 455253C719B1384900B88766 /* dispatch_proc */, + 455253C919B1384900B88766 /* dispatch_queue_finalizer */, + 455253CB19B1384900B88766 /* dispatch_read */, + 455253CD19B1384900B88766 /* dispatch_read2 */, + 455253CF19B1384900B88766 /* dispatch_after */, + 455253D119B1384900B88766 /* dispatch_timer */, + 455253D319B1384900B88766 /* dispatch_timer_short */, + 455253D519B1384900B88766 /* dispatch_timer_timeout */, + 455253D719B1384900B88766 /* dispatch_suspend_timer */, + 455253D919B1384900B88766 /* dispatch_sema */, + 455253DB19B1384900B88766 /* dispatch_timer_bit31 */, + 455253DD19B1384900B88766 /* dispatch_timer_bit63 */, + 455253DF19B1384900B88766 /* dispatch_timer_set_time */, + 455253E119B1384900B88766 /* dispatch_drift */, + 455253E319B1384900B88766 /* dispatch_starfish */, + 455253E519B1384900B88766 /* dispatch_cascade */, + 455253E719B1384900B88766 /* dispatch_readsync */, + 455253E919B1384900B88766 /* dispatch_sync_on_main */, + 455253EB19B1384900B88766 /* dispatch_sync_gc */, + 455253ED19B1384900B88766 /* dispatch_apply_gc */, + 455253EF19B1384900B88766 /* dispatch_data */, + 455253F119B1384900B88766 /* dispatch_io */, + 455253F319B1384900B88766 /* dispatch_io_net */, + 455253F519B1384900B88766 /* dispatch_vm */, + 455253F719B1384900B88766 /* dispatch_vnode */, + 455253F919B1384900B88766 /* dispatch_qos */, + 455253FB19B1384900B88766 /* dispatch_select */, + 455253FD19B1384900B88766 /* dispatch_transform */, + 455253FF19B1384900B88766 /* nsoperation */, + 4552540119B1384900B88766 /* cffd */, + 4552540319B1384900B88766 /* bench */, + 4552540519B1384900B88766 /* jsgc_bench */, + 4552540719B1384900B88766 /* async_bench */, + 4552540919B1384900B88766 /* apply_bench */, + ); + name = Products; + sourceTree = ""; + }; C6A0FF2B0290797F04C91782 /* Documentation */ = { isa = PBXGroup; children = ( @@ -570,7 +998,6 @@ E40041E4125E71150022B135 /* xcodeconfig */, E49F259C125D664F0057C971 /* xcodescripts */, E47D6BCA125F10F70070D91C /* resolver */, - C927F35F10FD7F1000C5AB8B /* ddt.xcodeproj */, ); name = "Build Support"; sourceTree = ""; @@ -957,7 +1384,7 @@ isa = PBXProject; attributes = { BuildIndependentTargetsInParallel = YES; - LastUpgradeCheck = 0600; + LastUpgradeCheck = 0700; }; buildConfigurationList = 1DEB91EF08733DB70010E9CD /* Build configuration list for PBXProject "libdispatch" */; compatibilityVersion = "Xcode 3.2"; @@ -976,6 +1403,10 @@ ProductGroup = C927F36010FD7F1000C5AB8B /* Products */; ProjectRef = C927F35F10FD7F1000C5AB8B /* ddt.xcodeproj */; }, + { + ProductGroup = 4552536F19B1384900B88766 /* Products */; + ProjectRef = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + }, ); projectRoot = ""; targets = ( @@ -987,11 +1418,355 @@ E46DBC1A14EE10C80001F9F6 /* libdispatch static */, 3F3C9326128E637B0042B1F7 /* libdispatch_Sim */, C927F35A10FD7F0600C5AB8B /* libdispatch_tools */, + 4552540A19B1389700B88766 /* libdispatch_tests */, ); }; /* End PBXProject section */ /* Begin PBXReferenceProxy section */ + 455253A919B1384900B88766 /* libdispatch_test.a */ = { + isa = PBXReferenceProxy; + fileType = archive.ar; + path = libdispatch_test.a; + remoteRef = 455253A819B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253AB19B1384900B88766 /* dispatch_apply */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_apply; + remoteRef = 455253AA19B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253AD19B1384900B88766 /* dispatch_api */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_api; + remoteRef = 455253AC19B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253AF19B1384900B88766 /* dispatch_c99 */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_c99; + remoteRef = 455253AE19B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253B119B1384900B88766 /* dispatch_cf_main */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_cf_main; + remoteRef = 455253B019B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253B319B1384900B88766 /* dispatch_deadname */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_deadname; + remoteRef = 455253B219B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253B519B1384900B88766 /* dispatch_debug */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_debug; + remoteRef = 455253B419B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253B719B1384900B88766 /* dispatch_group */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_group; + remoteRef = 455253B619B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253B919B1384900B88766 /* dispatch_overcommit */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_overcommit; + remoteRef = 455253B819B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253BB19B1384900B88766 /* dispatch_pingpong */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_pingpong; + remoteRef = 455253BA19B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253BD19B1384900B88766 /* dispatch_plusplus */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_plusplus; + remoteRef = 455253BC19B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253BF19B1384900B88766 /* dispatch_priority */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_priority; + remoteRef = 455253BE19B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253C119B1384900B88766 /* dispatch_priority2 */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_priority2; + remoteRef = 455253C019B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253C319B1384900B88766 /* dispatch_concur */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_concur; + remoteRef = 455253C219B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253C519B1384900B88766 /* dispatch_context_for_key */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_context_for_key; + remoteRef = 455253C419B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253C719B1384900B88766 /* dispatch_proc */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_proc; + remoteRef = 455253C619B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253C919B1384900B88766 /* dispatch_queue_finalizer */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_queue_finalizer; + remoteRef = 455253C819B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253CB19B1384900B88766 /* dispatch_read */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_read; + remoteRef = 455253CA19B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253CD19B1384900B88766 /* dispatch_read2 */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_read2; + remoteRef = 455253CC19B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253CF19B1384900B88766 /* dispatch_after */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_after; + remoteRef = 455253CE19B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253D119B1384900B88766 /* dispatch_timer */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_timer; + remoteRef = 455253D019B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253D319B1384900B88766 /* dispatch_timer_short */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_timer_short; + remoteRef = 455253D219B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253D519B1384900B88766 /* dispatch_timer_timeout */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_timer_timeout; + remoteRef = 455253D419B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253D719B1384900B88766 /* dispatch_suspend_timer */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_suspend_timer; + remoteRef = 455253D619B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253D919B1384900B88766 /* dispatch_sema */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_sema; + remoteRef = 455253D819B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253DB19B1384900B88766 /* dispatch_timer_bit31 */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_timer_bit31; + remoteRef = 455253DA19B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253DD19B1384900B88766 /* dispatch_timer_bit63 */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_timer_bit63; + remoteRef = 455253DC19B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253DF19B1384900B88766 /* dispatch_timer_set_time */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_timer_set_time; + remoteRef = 455253DE19B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253E119B1384900B88766 /* dispatch_drift */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_drift; + remoteRef = 455253E019B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253E319B1384900B88766 /* dispatch_starfish */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_starfish; + remoteRef = 455253E219B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253E519B1384900B88766 /* dispatch_cascade */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_cascade; + remoteRef = 455253E419B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253E719B1384900B88766 /* dispatch_readsync */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_readsync; + remoteRef = 455253E619B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253E919B1384900B88766 /* dispatch_sync_on_main */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_sync_on_main; + remoteRef = 455253E819B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253EB19B1384900B88766 /* dispatch_sync_gc */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_sync_gc; + remoteRef = 455253EA19B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253ED19B1384900B88766 /* dispatch_apply_gc */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_apply_gc; + remoteRef = 455253EC19B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253EF19B1384900B88766 /* dispatch_data */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_data; + remoteRef = 455253EE19B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253F119B1384900B88766 /* dispatch_io */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_io; + remoteRef = 455253F019B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253F319B1384900B88766 /* dispatch_io_net */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_io_net; + remoteRef = 455253F219B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253F519B1384900B88766 /* dispatch_vm */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_vm; + remoteRef = 455253F419B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253F719B1384900B88766 /* dispatch_vnode */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_vnode; + remoteRef = 455253F619B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253F919B1384900B88766 /* dispatch_qos */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_qos; + remoteRef = 455253F819B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253FB19B1384900B88766 /* dispatch_select */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_select; + remoteRef = 455253FA19B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253FD19B1384900B88766 /* dispatch_transform */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_transform; + remoteRef = 455253FC19B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 455253FF19B1384900B88766 /* nsoperation */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = nsoperation; + remoteRef = 455253FE19B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 4552540119B1384900B88766 /* cffd */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = cffd; + remoteRef = 4552540019B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 4552540319B1384900B88766 /* bench */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = bench; + remoteRef = 4552540219B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 4552540519B1384900B88766 /* jsgc_bench */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = jsgc_bench; + remoteRef = 4552540419B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 4552540719B1384900B88766 /* async_bench */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = async_bench; + remoteRef = 4552540619B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; + 4552540919B1384900B88766 /* apply_bench */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = apply_bench; + remoteRef = 4552540819B1384900B88766 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; C927F36710FD7F1000C5AB8B /* ddt */ = { isa = PBXReferenceProxy; fileType = "compiled.mach-o.executable"; @@ -1223,6 +1998,7 @@ 96032E4B0F5CC8C700241C5F /* time.c in Sources */, 5AAB45C010D30B79004407EA /* data.c in Sources */, 5A27262610F26F1900751FBC /* io.c in Sources */, + E43A72501AF85BBC00BAA921 /* block.cpp in Sources */, C9C5F80E143C1771006DC718 /* transform.c in Sources */, E4FC3264145F46C9002FBDDB /* object.m in Sources */, 2BBF5A63154B64F5002B20F9 /* allocator.c in Sources */, @@ -1239,6 +2015,7 @@ E46DBC4114EE10C80001F9F6 /* resolver.c in Sources */, E46DBC4214EE10C80001F9F6 /* init.c in Sources */, E46DBC4314EE10C80001F9F6 /* queue.c in Sources */, + E43A72881AF85BE900BAA921 /* block.cpp in Sources */, E46DBC4414EE10C80001F9F6 /* semaphore.c in Sources */, E46DBC4514EE10C80001F9F6 /* once.c in Sources */, E44A8E701805C3E0009FFDB6 /* voucher.c in Sources */, @@ -1272,6 +2049,7 @@ E49F24D2125D57FA0057C971 /* time.c in Sources */, E49F24D3125D57FA0057C971 /* data.c in Sources */, E49F24D4125D57FA0057C971 /* io.c in Sources */, + E43A72841AF85BCB00BAA921 /* block.cpp in Sources */, C93D6165143E190E00EB9023 /* transform.c in Sources */, E4FC3265145F46C9002FBDDB /* object.m in Sources */, 2BBF5A64154B64F5002B20F9 /* allocator.c in Sources */, @@ -1291,6 +2069,7 @@ E4B515C1164B2DA300E003AF /* queue.c in Sources */, E4B515C2164B2DA300E003AF /* semaphore.c in Sources */, E4B515C3164B2DA300E003AF /* once.c in Sources */, + E43A72871AF85BCD00BAA921 /* block.cpp in Sources */, E4B515C4164B2DA300E003AF /* apply.c in Sources */, E4B515C5164B2DA300E003AF /* object.c in Sources */, E4B515C6164B2DA300E003AF /* benchmark.c in Sources */, @@ -1325,6 +2104,7 @@ E4EC11B512514302000DDBD1 /* time.c in Sources */, E4EC11B712514302000DDBD1 /* data.c in Sources */, E4EC11B812514302000DDBD1 /* io.c in Sources */, + E43A72861AF85BCC00BAA921 /* block.cpp in Sources */, C93D6166143E190F00EB9023 /* transform.c in Sources */, E4FC3266145F46C9002FBDDB /* object.m in Sources */, 2BBF5A65154B64F5002B20F9 /* allocator.c in Sources */, @@ -1351,6 +2131,7 @@ E4EC122112514715000DDBD1 /* time.c in Sources */, E4EC122312514715000DDBD1 /* data.c in Sources */, E4EC122412514715000DDBD1 /* io.c in Sources */, + E43A72851AF85BCC00BAA921 /* block.cpp in Sources */, C93D6167143E190F00EB9023 /* transform.c in Sources */, E4FC3267145F46C9002FBDDB /* object.m in Sources */, 2BBF5A66154B64F5002B20F9 /* allocator.c in Sources */, @@ -1362,6 +2143,11 @@ /* End PBXSourcesBuildPhase section */ /* Begin PBXTargetDependency section */ + 4552540F19B138B700B88766 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + name = all; + targetProxy = 4552540E19B138B700B88766 /* PBXContainerItemProxy */; + }; C927F36910FD7F1A00C5AB8B /* PBXTargetDependency */ = { isa = PBXTargetDependency; name = ddt; @@ -1421,6 +2207,20 @@ }; name = Debug; }; + 4552540C19B1389700B88766 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 4552540D19B1389700B88766 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; C927F35B10FD7F0600C5AB8B /* Release */ = { isa = XCBuildConfiguration; buildSettings = { @@ -1551,6 +2351,15 @@ defaultConfigurationIsVisible = 0; defaultConfigurationName = Release; }; + 4552540B19B1389700B88766 /* Build configuration list for PBXAggregateTarget "libdispatch_tests" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 4552540C19B1389700B88766 /* Release */, + 4552540D19B1389700B88766 /* Debug */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; C927F35E10FD7F0B00C5AB8B /* Build configuration list for PBXAggregateTarget "libdispatch_tools" */ = { isa = XCConfigurationList; buildConfigurations = ( diff --git a/man/dispatch_source_create.3 b/man/dispatch_source_create.3 index a17e8681f..b954bcff5 100644 --- a/man/dispatch_source_create.3 +++ b/man/dispatch_source_create.3 @@ -297,7 +297,9 @@ The data returned by .Fn dispatch_source_get_data indicates which of the events in the .Fa mask -were observed. +were observed. Note that because this source type will request notifications on the provided port, it should not be mixed with the use of +.Fn mach_port_request_notification +on the same port. .Pp .Vt DISPATCH_SOURCE_TYPE_MACH_RECV .Pp diff --git a/os/object.h b/os/object.h index 944d3313d..e07aaec67 100644 --- a/os/object.h +++ b/os/object.h @@ -74,12 +74,14 @@ #if OS_OBJECT_USE_OBJC #import #define OS_OBJECT_CLASS(name) OS_##name -#define OS_OBJECT_DECL(name, ...) \ +#define OS_OBJECT_DECL_IMPL(name, ...) \ @protocol OS_OBJECT_CLASS(name) __VA_ARGS__ \ @end \ typedef NSObject *name##_t +#define OS_OBJECT_DECL(name, ...) \ + OS_OBJECT_DECL_IMPL(name, __VA_ARGS__) #define OS_OBJECT_DECL_SUBCLASS(name, super) \ - OS_OBJECT_DECL(name, ) + OS_OBJECT_DECL_IMPL(name, ) #if defined(__has_attribute) #if __has_attribute(ns_returns_retained) #define OS_OBJECT_RETURNS_RETAINED __attribute__((__ns_returns_retained__)) diff --git a/os/object_private.h b/os/object_private.h index f5d326823..0f2f01dff 100644 --- a/os/object_private.h +++ b/os/object_private.h @@ -121,6 +121,11 @@ OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW _os_object_t _os_object_retain(_os_object_t object); +__OSX_AVAILABLE_STARTING(__MAC_10_11,__IPHONE_9_0) +OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW +_os_object_t +_os_object_retain_with_resurrect(_os_object_t obj); + __OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW void diff --git a/private/private.h b/private/private.h index 8fd5abc06..46d0e5d48 100644 --- a/private/private.h +++ b/private/private.h @@ -64,7 +64,7 @@ #endif /* !__DISPATCH_BUILDING_DISPATCH__ */ // Check that public and private dispatch headers match -#if DISPATCH_API_VERSION != 20140804 // Keep in sync with +#if DISPATCH_API_VERSION != 20141121 // Keep in sync with #error "Dispatch header mismatch between /usr/include and /usr/local/include" #endif diff --git a/private/queue_private.h b/private/queue_private.h index e8130360d..f2bb69132 100644 --- a/private/queue_private.h +++ b/private/queue_private.h @@ -79,6 +79,8 @@ dispatch_queue_attr_make_with_overcommit(dispatch_queue_attr_t attr, * This priority level is intended for user-initiated application activity that * is long-running and CPU or IO intensive and that the user is actively waiting * on, but that should not interfere with interactive use of the application. + * + * This global queue priority level is mapped to QOS_CLASS_UTILITY. */ #define DISPATCH_QUEUE_PRIORITY_NON_INTERACTIVE INT8_MIN @@ -237,6 +239,8 @@ dispatch_pthread_root_queue_create(const char *label, unsigned long flags, * is no guarantee that the specified number will be reached. * Pass 0 to specify that a default pool size determined by the system should * be used. + * NOTE: passing pool_size == 1 does NOT make the pthread root queue equivalent + * to a serial queue. * * @result * The flags argument to pass to dispatch_pthread_root_queue_create(). @@ -328,6 +332,44 @@ dispatch_assert_queue_not(dispatch_queue_t queue); #define dispatch_assert_queue_not_debug(q) dispatch_assert_queue_not(q) #endif +/*! + * @function dispatch_async_enforce_qos_class_f + * + * @abstract + * Submits a function for asynchronous execution on a dispatch queue. + * + * @discussion + * See dispatch_async() for details. The QOS will be enforced as if + * this was called: + * + * dispatch_async(queue, dispatch_block_create(DISPATCH_BLOCK_ENFORCE_QOS_CLASS, ^{ + * work(context); + * }); + * + * + * @param queue + * The target dispatch queue to which the function is submitted. + * The system will hold a reference on the target queue until the function + * has returned. + * The result of passing NULL in this parameter is undefined. + * + * @param context + * The application-defined context parameter to pass to the function. + * + * @param work + * The application-defined function to invoke on the target queue. The first + * parameter passed to this function is the context provided to + * dispatch_async_f(). + * The result of passing NULL in this parameter is undefined. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_11,__IPHONE_9_0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +void +dispatch_async_enforce_qos_class_f(dispatch_queue_t queue, + void *context, + dispatch_function_t work); + + __END_DECLS #endif diff --git a/private/source_private.h b/private/source_private.h index c4ce1d452..e8373ba26 100644 --- a/private/source_private.h +++ b/private/source_private.h @@ -32,6 +32,8 @@ #include // for HeaderDoc #endif +__BEGIN_DECLS + /*! * @const DISPATCH_SOURCE_TYPE_TIMER_WITH_AGGREGATE * @discussion A dispatch timer source that is part of a timer aggregate. @@ -108,6 +110,8 @@ DISPATCH_EXPORT const struct dispatch_source_type_s __OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_sock; +__END_DECLS + /*! * @enum dispatch_source_sock_flags_t * diff --git a/private/voucher_activity_private.h b/private/voucher_activity_private.h index c02b23653..8a13e769f 100644 --- a/private/voucher_activity_private.h +++ b/private/voucher_activity_private.h @@ -27,7 +27,7 @@ #include #endif -#define OS_VOUCHER_ACTIVITY_SPI_VERSION 20140708 +#define OS_VOUCHER_ACTIVITY_SPI_VERSION 20150318 #if OS_VOUCHER_WEAK_IMPORT #define OS_VOUCHER_EXPORT OS_EXPORT OS_WEAK_IMPORT @@ -77,6 +77,9 @@ OS_ENUM(voucher_activity_tracepoint_type, uint8_t, OS_ENUM(voucher_activity_flag, unsigned long, voucher_activity_flag_default = 0, voucher_activity_flag_force = 0x1, + voucher_activity_flag_debug = 0x2, + voucher_activity_flag_persist = 0x4, + voucher_activity_flag_stream = 0x8, ); /*! @@ -277,6 +280,47 @@ uint64_t voucher_activity_trace(voucher_activity_trace_id_t trace_id, uint64_t location, void *buffer, size_t length); +/*! + * @function voucher_activity_trace_strings + * + * @abstract + * Add a tracepoint with strings data to trace buffer of the current activity. + * + * @param trace_id + * Tracepoint identifier returned by voucher_activity_trace_id() + * + * @param location + * Tracepoint location. + * + * @param buffer + * Pointer to packed buffer of tracepoint data. + * + * @param length + * Length of data at 'buffer'. + * + * @param strings + * NULL-terminated array of strings data. + * + * @param string_lengths + * Array of string lengths (required to have the same number of elements as the + * 'strings' array): string_lengths[i] is the maximum number of characters to + * copy from strings[i], excluding the NUL-terminator (may be smaller than the + * length of the string present in strings[i]). + * + * @param total_strings_size + * Total size of all strings data to be copied from strings array (including + * all NUL-terminators). + * + * @result + * Timestamp recorded in tracepoint or 0 if no tracepoint was recorded. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_11,__IPHONE_9_0) +OS_VOUCHER_EXPORT OS_NOTHROW +uint64_t +voucher_activity_trace_strings(voucher_activity_trace_id_t trace_id, + uint64_t location, void *buffer, size_t length, const char *strings[], + size_t string_lengths[], size_t total_strings_size); + /*! * @function voucher_activity_trace_args * @@ -349,7 +393,7 @@ voucher_activity_mode_t voucher_activity_get_mode(void); /*! - * @function voucher_activity_set_mode_4libtrace(void) + * @function voucher_activity_set_mode_4libtrace * * @abstract * Set the current mode of voucher activity subsystem. @@ -400,6 +444,7 @@ OS_ENUM(_voucher_activity_tracepoint_flag, uint16_t, _voucher_activity_trace_flag_buffer_empty = 0, _voucher_activity_trace_flag_tracepoint = (1u << 0), _voucher_activity_trace_flag_tracepoint_args = (1u << 1), + _voucher_activity_trace_flag_tracepoint_strings = (1u << 2), _voucher_activity_trace_flag_wide_first = (1u << 6), _voucher_activity_trace_flag_wide_second = (1u << 6) | (1u << 7), _voucher_activity_trace_flag_start = (1u << 8), @@ -426,7 +471,13 @@ typedef struct _voucher_activity_tracepoint_s { uint64_t vat_thread; // pthread_t uint64_t vat_timestamp; // absolute time uint64_t vat_location; // tracepoint PC - uint64_t vat_data[4]; // trace data + union { + uint64_t vat_data[4]; // trace data + struct { + uint16_t vats_offset; // offset to string data (from buffer end) + uint8_t vats_data[30]; // trace data + } vat_stroff; // iff _vat_flag_tracepoint_strings present + }; } *_voucher_activity_tracepoint_t; /*! @@ -439,20 +490,33 @@ typedef struct _voucher_activity_tracepoint_s { #include #include -static const atm_subaid32_t _voucher_default_activity_subid = - ATM_SUBAID32_MAX-1; - static const size_t _voucher_activity_buffer_size = 4096; static const size_t _voucher_activity_tracepoints_per_buffer = _voucher_activity_buffer_size / sizeof(struct _voucher_activity_tracepoint_s); +static const size_t _voucher_activity_buffer_header_size = + sizeof(struct _voucher_activity_tracepoint_s); +static const size_t _voucher_activity_strings_header_size = 0; // TODO + typedef uint8_t _voucher_activity_buffer_t[_voucher_activity_buffer_size]; +static const size_t _voucher_activity_buffers_per_heap = 512; +typedef unsigned long _voucher_activity_bitmap_base_t; +static const size_t _voucher_activity_bits_per_bitmap_base_t = + 8 * sizeof(_voucher_activity_bitmap_base_t); +static const size_t _voucher_activity_bitmaps_per_heap = + _voucher_activity_buffers_per_heap / + _voucher_activity_bits_per_bitmap_base_t; +typedef _voucher_activity_bitmap_base_t + _voucher_activity_bitmap_t[_voucher_activity_bitmaps_per_heap] + __attribute__((__aligned__(64))); + struct _voucher_activity_self_metadata_s { struct _voucher_activity_metadata_opaque_s *vasm_baseaddr; + _voucher_activity_bitmap_t volatile vam_buffer_bitmap; }; + typedef struct _voucher_activity_metadata_opaque_s { - _voucher_activity_buffer_t vam_kernel_metadata; _voucher_activity_buffer_t vam_client_metadata; union { struct _voucher_activity_self_metadata_s vam_self_metadata; @@ -462,80 +526,91 @@ typedef struct _voucher_activity_metadata_opaque_s { typedef os_lock_handoff_s _voucher_activity_lock_s; -typedef struct _voucher_atm_s { - int32_t volatile vatm_refcnt; - mach_voucher_t vatm_kvoucher; - atm_aid_t vatm_id; - atm_mailbox_offset_t vatm_mailbox_offset; - TAILQ_ENTRY(_voucher_atm_s) vatm_list; -#if __LP64__ - uintptr_t vatm_pad[3]; - // cacheline -#endif - _voucher_activity_lock_s vatm_activities_lock; - TAILQ_HEAD(_voucher_atm_activities_s, _voucher_activity_s) vatm_activities; - TAILQ_HEAD(, _voucher_activity_s) vatm_used_activities; -} *_voucher_atm_t; +OS_ENUM(_voucher_activity_buffer_atomic_flags, uint8_t, + _voucher_activity_buffer_full = (1u << 0), + _voucher_activity_buffer_pushing = (1u << 1), +); + +typedef union { + uint64_t vabp_atomic_pos; + struct { + uint16_t vabp_refcnt; + uint8_t vabp_flags; + uint8_t vabp_unused; + uint16_t vabp_next_tracepoint_idx; + uint16_t vabp_string_offset; // offset from the _end_ of the buffer + } vabp_pos; +} _voucher_activity_buffer_position_u; // must match layout of _voucher_activity_tracepoint_s typedef struct _voucher_activity_buffer_header_s { uint16_t vabh_flags; // _voucher_activity_trace_flag_buffer_header - uint8_t vabh_unused[6]; - uint64_t vabh_thread; - uint64_t vabh_timestamp; - uint32_t volatile vabh_next_tracepoint_idx; - uint32_t vabh_sequence_no; + uint8_t vat_type; + uint8_t vat_namespace; + uint32_t vat_code; + uint64_t vat_thread; + uint64_t vat_timestamp; + uint64_t vat_location; voucher_activity_id_t vabh_activity_id; - uint64_t vabh_reserved; + _voucher_activity_buffer_position_u volatile vabh_pos; TAILQ_ENTRY(_voucher_activity_buffer_header_s) vabh_list; } *_voucher_activity_buffer_header_t; -// must match layout of _voucher_activity_buffer_header_s -typedef struct _voucher_activity_s { - // first tracepoint entry - // must match layout of _voucher_activity_tracepoint_s - uint16_t va_flags; // _voucher_activity_trace_flag_buffer_header | - // _voucher_activity_trace_flag_activity | - // _voucher_activity_trace_flag_start | - // _voucher_activity_trace_flag_wide_first - uint8_t va_type; - uint8_t va_namespace; - uint32_t va_code; - uint64_t va_thread; - uint64_t va_timestamp; - uint32_t volatile vabh_next_tracepoint_idx; - uint32_t volatile va_max_sequence_no; - voucher_activity_id_t va_id; - int32_t volatile va_use_count; - uint32_t va_buffer_limit; - TAILQ_HEAD(_voucher_activity_buffer_list_s, - _voucher_activity_buffer_header_s) va_buffers; -#if !__LP64__ - uint64_t va_pad; -#endif +/*! + * @enum _voucher_activity_buffer_hook_reason + * + * @constant _voucher_activity_buffer_hook_reason_full + * Specified activity buffer is full. + * Will be reported reused or freed later. + * + * @constant _voucher_activity_buffer_hook_reason_reuse + * Specified activity buffer is about to be reused. + * Was previously reported as full. + * + * @constant _voucher_activity_buffer_hook_reason_free + * Specified activity buffer is about to be freed. + * May have been previously reported as full or may be only partially filled. + */ +typedef enum _voucher_activity_buffer_hook_reason { + _voucher_activity_buffer_hook_reason_full = 0x1, + _voucher_activity_buffer_hook_reason_reuse = 0x2, + _voucher_activity_buffer_hook_reason_free = 0x4, +} _voucher_activity_buffer_hook_reason; - // second tracepoint entry - // must match layout of _voucher_activity_tracepoint_s - uint16_t va_flags2; - uint8_t va_unused2[2]; - int32_t volatile va_refcnt; - uint64_t va_location; - _voucher_activity_buffer_header_t volatile va_current_buffer; - _voucher_atm_t va_atm; - _voucher_activity_lock_s va_buffers_lock; - uintptr_t va_pad2[2]; - -#if __LP64__ - // third tracepoint entry - // must match layout of _voucher_activity_tracepoint_s - uint16_t va_flags3; - uint8_t va_unused3[6]; - uintptr_t va_pad3; -#endif - TAILQ_ENTRY(_voucher_activity_s) va_list; - TAILQ_ENTRY(_voucher_activity_s) va_atm_list; - TAILQ_ENTRY(_voucher_activity_s) va_atm_used_list; -} *_voucher_activity_t; +/*! + * @typedef _voucher_activity_buffer_hook_t + * + * @abstract + * A function pointer called when an activity buffer is full or being freed. + * NOTE: callbacks occur under an activity-wide handoff lock and work done + * inside the callback function must not block or otherwise cause that lock to + * be held for a extended period of time. + * + * @param reason + * Reason for callback. + * + * @param buffer + * Pointer to activity buffer. + */ +typedef void (*_voucher_activity_buffer_hook_t)( + _voucher_activity_buffer_hook_reason reason, + _voucher_activity_buffer_header_t buffer); + +/*! + * @function voucher_activity_buffer_hook_install_4libtrace + * + * @abstract + * Install activity buffer hook callback function. + * Must be called from the libtrace initializer, and at most once. + * + * @param hook + * Hook function to install. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_11,__IPHONE_9_0) +OS_VOUCHER_EXPORT OS_NOTHROW +void +voucher_activity_buffer_hook_install_4libtrace( + _voucher_activity_buffer_hook_t hook); #endif // OS_VOUCHER_ACTIVITY_BUFFER_SPI diff --git a/private/voucher_private.h b/private/voucher_private.h index fcc28f13c..e4c31a696 100644 --- a/private/voucher_private.h +++ b/private/voucher_private.h @@ -24,7 +24,7 @@ #include #include -#define OS_VOUCHER_SPI_VERSION 20140425 +#define OS_VOUCHER_SPI_VERSION 20141203 #if OS_VOUCHER_WEAK_IMPORT #define OS_VOUCHER_EXPORT OS_EXPORT OS_WEAK_IMPORT @@ -161,7 +161,7 @@ void voucher_decrement_importance_count4CF(voucher_t voucher); /*! - * @group Dispatch block objects + * @group Voucher dispatch block SPI */ #ifndef __DISPATCH_BUILDING_DISPATCH__ @@ -334,6 +334,66 @@ dispatch_block_create_with_voucher_and_qos_class(dispatch_block_flags_t flags, voucher_t voucher, dispatch_qos_class_t qos_class, int relative_priority, dispatch_block_t block); +/*! + * @group Voucher dispatch queue SPI + */ + +/*! + * @function dispatch_queue_create_with_accounting_override_voucher + * + * @abstract + * Creates a new dispatch queue with an accounting override voucher created + * from the specified voucher. + * + * @discussion + * See dispatch_queue_create() headerdoc for generic details on queue creation. + * + * The resource accounting attributes of the specified voucher are extracted + * and used to create an accounting override voucher for the new queue. + * + * Every block executed on the returned queue will initially have this override + * voucher adopted, any voucher automatically associated with or explicitly + * assigned to the block will NOT be used and released immediately before block + * execution starts. + * + * The accounting override voucher will be automatically propagated to any + * asynchronous work generated from the queue following standard voucher + * propagation rules. + * + * NOTE: this SPI should only be used in special circumstances when a subsystem + * has complete control over all workitems submitted to a queue (e.g. no client + * block is ever submitted to the queue) and if and only if such queues have a + * one-to-one mapping with resource accounting identities. + * + * CAUTION: use of this SPI represents a potential voucher propagation hole. It + * is the responsibility of the caller to ensure that any callbacks into client + * code from the queue have the correct client voucher applied (rather than the + * automatically propagated accounting override voucher), e.g. by use of the + * dispatch_block_create() API to capture client state at the time the callback + * is registered. + * + * @param label + * A string label to attach to the queue. + * This parameter is optional and may be NULL. + * + * @param attr + * DISPATCH_QUEUE_SERIAL, DISPATCH_QUEUE_CONCURRENT, or the result of a call to + * the function dispatch_queue_attr_make_with_qos_class(). + * + * @param voucher + * A voucher whose resource accounting attributes are used to create the + * accounting override voucher attached to the queue. + * + * @result + * The newly created dispatch queue. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_11,__IPHONE_9_0) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NOTHROW +dispatch_queue_t +dispatch_queue_create_with_accounting_override_voucher(const char *label, + dispatch_queue_attr_t attr, voucher_t voucher); + /*! * @group Voucher Mach SPI * SPI intended for clients that need to interact with mach messages or mach diff --git a/src/allocator.c b/src/allocator.c index af1f3c115..d6db272cb 100644 --- a/src/allocator.c +++ b/src/allocator.c @@ -643,6 +643,12 @@ _dispatch_alloc_init(void) sizeof(((struct dispatch_magazine_s *)0x0)->fp_conts) == DISPATCH_ALLOCATOR_PAGE_SIZE); #endif // PACK_FIRST_PAGE_WITH_CONTINUATIONS + // Make sure our alignment will be correct: that is, that we are correctly + // aligning to both. + dispatch_assert(ROUND_UP_TO_BITMAP_ALIGNMENT(ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(1)) == + ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(1)); + dispatch_assert(ROUND_UP_TO_CONTINUATION_SIZE(ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(1)) == + ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(1)); } #elif (DISPATCH_ALLOCATOR && DISPATCH_CONTINUATION_MALLOC) \ || (DISPATCH_CONTINUATION_MALLOC && DISPATCH_USE_MALLOCZONE) diff --git a/src/allocator_internal.h b/src/allocator_internal.h index f4c8ba0de..893ba8283 100644 --- a/src/allocator_internal.h +++ b/src/allocator_internal.h @@ -141,6 +141,16 @@ typedef unsigned long bitmap_t; #define HEAP_MASK (~(uintptr_t)(BYTES_PER_HEAP - 1)) #define MAGAZINE_MASK (~(uintptr_t)(BYTES_PER_MAGAZINE - 1)) +// this will round up such that first_bitmap_in_same_page() can mask the address +// of a bitmap_t in the maps to obtain the first bitmap for that same page +#define ROUND_UP_TO_BITMAP_ALIGNMENT(x) \ + (((x) + ((BITMAPS_PER_PAGE * BYTES_PER_BITMAP) - 1u)) & \ + ~((BITMAPS_PER_PAGE * BYTES_PER_BITMAP) - 1u)) +// Since these are both powers of two, we end up with not only the max alignment, +// but happily the least common multiple, which will be the greater of the two. +#define ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(x) (ROUND_UP_TO_CONTINUATION_SIZE(ROUND_UP_TO_BITMAP_ALIGNMENT(x))) +#define PADDING_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(x) (ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(x) - (x)) + #define PADDING_TO_CONTINUATION_SIZE(x) (ROUND_UP_TO_CONTINUATION_SIZE(x) - (x)) #if defined(__LP64__) @@ -155,8 +165,11 @@ typedef unsigned long bitmap_t; // header is expected to end on supermap's required alignment #define HEADER_TO_SUPERMAPS_PADDING 0 -#define SUPERMAPS_TO_MAPS_PADDING (PADDING_TO_CONTINUATION_SIZE( \ +// we want to align the maps to a continuation size, but we must also have proper padding +// so that we can perform first_bitmap_in_same_page() +#define SUPERMAPS_TO_MAPS_PADDING (PADDING_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE( \ SIZEOF_SUPERMAPS + HEADER_TO_SUPERMAPS_PADDING + SIZEOF_HEADER)) + #define MAPS_TO_FPMAPS_PADDING (PADDING_TO_CONTINUATION_SIZE(SIZEOF_MAPS)) #define BYTES_LEFT_IN_FIRST_PAGE (BYTES_PER_PAGE - \ diff --git a/src/block.cpp b/src/block.cpp new file mode 100644 index 000000000..83fff54ed --- /dev/null +++ b/src/block.cpp @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2015 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifdef __BLOCKS__ + +#if __cplusplus < 201103L +#error Must build with C++11 or later +#endif + +#if __has_feature(cxx_exceptions) +#error Must build without C++ exceptions +#endif + +extern "C" { +#include "internal.h" +} + +#if DISPATCH_DEBUG && DISPATCH_BLOCK_PRIVATE_DATA_DEBUG +#define _dispatch_block_private_data_debug(msg, ...) \ + _dispatch_debug("block_private[%p]: " msg, (this), ##__VA_ARGS__) +#else +#define _dispatch_block_private_data_debug(msg, ...) +#endif + +#pragma mark - +#pragma mark _dispatch_block_create + +// rdar://20766742 C++ helpers to enable block capture of vouchers and groups + +struct dispatch_block_private_data_s { + DISPATCH_BLOCK_PRIVATE_DATA_HEADER(); + static void* operator new(size_t) = delete; + static void* operator new [] (size_t) = delete; + explicit inline DISPATCH_ALWAYS_INLINE dispatch_block_private_data_s( + dispatch_block_flags_t flags, voucher_t voucher, + pthread_priority_t priority, dispatch_block_t block) noexcept : + dbpd_magic(), dbpd_flags(flags), dbpd_atomic_flags(), + dbpd_performed(), dbpd_priority(priority), dbpd_voucher(voucher), + dbpd_block(block), dbpd_group(), dbpd_queue(), dbpd_thread() + { + // stack structure constructor, no releases on destruction + _dispatch_block_private_data_debug("create, block: %p", dbpd_block); + } + inline DISPATCH_ALWAYS_INLINE dispatch_block_private_data_s( + dispatch_block_private_data_s const &o) noexcept : + dbpd_magic(DISPATCH_BLOCK_PRIVATE_DATA_MAGIC), + dbpd_flags(o.dbpd_flags), dbpd_atomic_flags(), dbpd_performed(), + dbpd_priority(o.dbpd_priority), dbpd_voucher(o.dbpd_voucher), + dbpd_block(), dbpd_group(), dbpd_queue(), dbpd_thread() + { + // copy constructor, create copy with retained references + if (dbpd_voucher) voucher_retain(dbpd_voucher); + if (o.dbpd_block) dbpd_block = _dispatch_Block_copy(o.dbpd_block); + _dispatch_block_private_data_debug("copy from %p, block: %p from %p", + &o, dbpd_block, o.dbpd_block); + if (!o.dbpd_magic) return; // No group in initial copy of stack object + dbpd_group = _dispatch_group_create_and_enter(); + } + inline DISPATCH_ALWAYS_INLINE ~dispatch_block_private_data_s() noexcept + { + _dispatch_block_private_data_debug("destroy%s, block: %p", + dbpd_magic ? "" : " (stack)", dbpd_block); + if (dbpd_magic != DISPATCH_BLOCK_PRIVATE_DATA_MAGIC) return; + if (dbpd_group) { + if (!dbpd_performed) dispatch_group_leave(dbpd_group); + ((void (*)(dispatch_group_t))dispatch_release)(dbpd_group); + } + if (dbpd_block) Block_release(dbpd_block); + if (dbpd_voucher) voucher_release(dbpd_voucher); + } +}; + +dispatch_block_t +_dispatch_block_create(dispatch_block_flags_t flags, voucher_t voucher, + pthread_priority_t pri, dispatch_block_t block) +{ + struct dispatch_block_private_data_s dbpds(flags, voucher, pri, block); + return _dispatch_Block_copy(^{ + // Capture stack object: invokes copy constructor (17094902) + (void)dbpds; + _dispatch_block_invoke(&dbpds); + }); +} + +extern "C" { +// The compiler hides the name of the function it generates, and changes it if +// we try to reference it directly, but the linker still sees it. +extern void DISPATCH_BLOCK_SPECIAL_INVOKE(void *) + asm("____dispatch_block_create_block_invoke"); +void (*_dispatch_block_special_invoke)(void*) = DISPATCH_BLOCK_SPECIAL_INVOKE; +} + +#endif // __BLOCKS__ diff --git a/src/data.c b/src/data.c index feb601281..e65399fdc 100644 --- a/src/data.c +++ b/src/data.c @@ -20,14 +20,77 @@ #include "internal.h" -// Dispatch data objects are dispatch objects with standard retain/release -// memory management. A dispatch data object either points to a number of other -// dispatch data objects or is a leaf data object. A leaf data object contains -// a pointer to represented memory. A composite data object specifies the total -// size of data it represents and list of constituent records. -// -// A leaf data object always points to a full represented buffer, a composite -// dispatch data object is needed to represent a subrange of a memory region. +/* + * Dispatch data objects are dispatch objects with standard retain/release + * memory management. A dispatch data object either points to a number of other + * dispatch data objects or is a leaf data object. + * A composite data object specifies the total size of data it represents + * and list of constituent records. + * + ******************************************************************************* + * + * CURRENT IMPLEMENTATION DETAILS + * + * There are actually 3 kinds of composite objects + * - trivial subranges + * - unflattened composite data objects + * - flattened composite data objects + * + * LEAVES (num_records == 0, destructor != nil) + * + * Those objects have a pointer to represented memory in `buf`. + * + * UNFLATTENED (num_records > 1, buf == nil, destructor == nil) + * + * This is the generic case of a composite object. + * + * FLATTENED (num_records > 1, buf != nil, destructor == nil) + * + * Those objects are non trivial composite objects whose `buf` pointer + * is a contiguous representation (copied) of the memory it represents. + * + * Such objects are created when used as an NSData and -bytes is called and + * where the dispatch data object is an unflattened composite object. + * The underlying implementation is _dispatch_data_get_flattened_bytes + * + * TRIVIAL SUBRANGES (num_records == 1, buf == nil, destructor == nil) + * + * Those objects point to a single leaf, never to flattened objects. + * + ******************************************************************************* + * + * Non trivial invariants: + * + * It is forbidden to point into a composite data object and ignore entire + * records from it. (for example by having `from` longer than the first + * record length). + * + * dispatch_data_t's are either leaves, or composite objects pointing to + * leaves. Depth is never greater than 1. + * + ******************************************************************************* + * + * There are 4 dispatch_data_t constructors who may create non leaf objects, + * and ensure proper invariants. + * + * dispatch_data_copy_region() + * This function first sees through trivial subranges, and may in turn + * generate new trivial subranges. + * + * dispatch_data_create_map() + * This function either returns existing data objects, or a leaf. + * + * dispatch_data_create_subrange() + * This function treats flattened objects like unflattened ones, + * and recurses into trivial subranges, it can create trivial subranges. + * + * dispatch_data_create_concat() + * This function unwraps the top-level composite objects, trivial or not, + * and else concatenates the two arguments range lists, hence always creating + * unflattened objects, unless one of the arguments was empty. + * + ******************************************************************************* + */ #if USE_OBJC #define _dispatch_data_retain(x) _dispatch_objc_retain(x) @@ -68,7 +131,7 @@ _dispatch_data_alloc(size_t n, size_t extra) { dispatch_data_t data = _dispatch_alloc(DISPATCH_DATA_CLASS, sizeof(struct dispatch_data_s) + extra + - (n ? n * sizeof(range_record) - sizeof(data->buf) : 0)); + n * sizeof(range_record)); data->num_records = n; #if !USE_OBJC data->do_targetq = dispatch_get_global_queue( @@ -107,10 +170,6 @@ _dispatch_data_init(dispatch_data_t data, const void *buffer, size_t size, data->buf = buffer; data->size = size; data->destructor = destructor; -#if DISPATCH_DATA_USE_LEAF_MEMBER - data->leaf = true; - data->num_records = 1; -#endif if (queue) { _dispatch_retain(queue); data->do_targetq = queue; @@ -210,15 +269,15 @@ dispatch_data_create_alloc(size_t size, void** buffer_ptr) void _dispatch_data_dispose(dispatch_data_t dd) { - dispatch_block_t destructor = dd->destructor; - if (destructor == NULL) { + if (_dispatch_data_leaf(dd)) { + _dispatch_data_destroy_buffer(dd->buf, dd->size, dd->do_targetq, + dd->destructor); + } else { size_t i; for (i = 0; i < _dispatch_data_num_records(dd); ++i) { _dispatch_data_release(dd->records[i].data_object); } - } else { - _dispatch_data_destroy_buffer(dd->buf, dd->size, dd->do_targetq, - destructor); + free((void *)dd->buf); } } @@ -234,6 +293,10 @@ _dispatch_data_debug(dispatch_data_t dd, char* buf, size_t bufsiz) offset += dsnprintf(&buf[offset], bufsiz - offset, "composite, size = %zd, num_records = %zd ", dd->size, _dispatch_data_num_records(dd)); + if (dd->buf) { + offset += dsnprintf(&buf[offset], bufsiz - offset, + ", flatbuf = %p ", dd->buf); + } size_t i; for (i = 0; i < _dispatch_data_num_records(dd); ++i) { range_record r = dd->records[i]; @@ -264,6 +327,7 @@ dispatch_data_create_concat(dispatch_data_t dd1, dispatch_data_t dd2) _dispatch_data_retain(dd1); return dd1; } + data = _dispatch_data_alloc(_dispatch_data_num_records(dd1) + _dispatch_data_num_records(dd2), 0); data->size = dd1->size + dd2->size; @@ -305,6 +369,13 @@ dispatch_data_create_subrange(dispatch_data_t dd, size_t offset, _dispatch_data_retain(dd); return dd; } + /* + * we must only optimize leaves and not flattened objects + * because lots of users want to keep the end of a buffer and release + * as much memory as they can from the beginning of it + * + * Using the flatbuf here would be very wrong with respect to that goal + */ if (_dispatch_data_leaf(dd)) { data = _dispatch_data_alloc(1, 0); data->size = length; @@ -314,36 +385,88 @@ dispatch_data_create_subrange(dispatch_data_t dd, size_t offset, _dispatch_data_retain(dd); return data; } - // Subrange of a composite dispatch data object: find the record containing - // the specified offset - data = dispatch_data_empty; - size_t i = 0, bytes_left = length; - while (i < _dispatch_data_num_records(dd) && - offset >= dd->records[i].length) { + + // Subrange of a composite dispatch data object + const size_t dd_num_records = _dispatch_data_num_records(dd); + bool to_the_end = (offset + length == dd->size); + size_t i = 0; + + // find the record containing the specified offset + while (i < dd_num_records && offset >= dd->records[i].length) { offset -= dd->records[i++].length; } - while (i < _dispatch_data_num_records(dd)) { - size_t record_len = dd->records[i].length - offset; - if (record_len > bytes_left) { - record_len = bytes_left; - } - dispatch_data_t subrange = dispatch_data_create_subrange( - dd->records[i].data_object, dd->records[i].from + offset, - record_len); - dispatch_data_t concat = dispatch_data_create_concat(data, subrange); - _dispatch_data_release(data); - _dispatch_data_release(subrange); - data = concat; - bytes_left -= record_len; - if (!bytes_left) { - return data; + + // Crashing here indicates memory corruption of passed in data object + if (slowpath(i >= dd_num_records)) { + DISPATCH_CRASH("dispatch_data_create_subrange out of bounds"); + return NULL; + } + + // if everything is from a single dispatch data object, avoid boxing it + if (offset + length <= dd->records[i].length) { + return dispatch_data_create_subrange(dd->records[i].data_object, + dd->records[i].from + offset, length); + } + + // find the record containing the end of the current range + // and optimize the case when you just remove bytes at the origin + size_t count, last_length; + + if (to_the_end) { + count = dd_num_records - i; + } else { + last_length = length - (dd->records[i].length - offset); + count = 1; + + while (i + count < dd_num_records) { + size_t record_length = dd->records[i + count++].length; + + if (last_length <= record_length) { + break; + } + last_length -= record_length; + + // Crashing here indicates memory corruption of passed in data object + if (slowpath(i + count >= dd_num_records)) { + DISPATCH_CRASH("dispatch_data_create_subrange out of bounds"); + return NULL; + } } - offset = 0; - i++; } - // Crashing here indicates memory corruption of passed in data object - DISPATCH_CRASH("dispatch_data_create_subrange out of bounds"); - return NULL; + + data = _dispatch_data_alloc(count, 0); + data->size = length; + memcpy(data->records, dd->records + i, count * sizeof(range_record)); + + if (offset) { + data->records[0].from += offset; + data->records[0].length -= offset; + } + if (!to_the_end) { + data->records[count - 1].length = last_length; + } + + for (i = 0; i < count; i++) { + _dispatch_data_retain(data->records[i].data_object); + } + return data; +} + +static void* +_dispatch_data_flatten(dispatch_data_t dd) +{ + void *buffer = malloc(dd->size); + + // Composite data object, copy the represented buffers + if (buffer) { + dispatch_data_apply(dd, ^(dispatch_data_t region DISPATCH_UNUSED, + size_t off, const void* buf, size_t len) { + memcpy(buffer + off, buf, len); + return (bool)true; + }); + } + + return buffer; } // When mapping a leaf object or a subrange of a leaf object, return a direct @@ -354,37 +477,30 @@ dispatch_data_t dispatch_data_create_map(dispatch_data_t dd, const void **buffer_ptr, size_t *size_ptr) { - dispatch_data_t data = dd; + dispatch_data_t data = NULL; const void *buffer = NULL; - size_t size = dd->size, offset = 0; + size_t size = dd->size; + if (!size) { data = dispatch_data_empty; goto out; } - if (!_dispatch_data_leaf(dd) && _dispatch_data_num_records(dd) == 1 && - _dispatch_data_leaf(dd->records[0].data_object)) { - offset = dd->records[0].from; - dd = dd->records[0].data_object; - } - if (_dispatch_data_leaf(dd)) { - _dispatch_data_retain(data); - buffer = dd->buf + offset; + + buffer = _dispatch_data_map_direct(dd, 0, NULL, NULL); + if (buffer) { + _dispatch_data_retain(dd); + data = dd; goto out; } - // Composite data object, copy the represented buffers - buffer = malloc(size); - if (!buffer) { - data = NULL; + + buffer = _dispatch_data_flatten(dd); + if (fastpath(buffer)) { + data = dispatch_data_create(buffer, size, NULL, + DISPATCH_DATA_DESTRUCTOR_FREE); + } else { size = 0; - goto out; } - dispatch_data_apply(dd, ^(dispatch_data_t region DISPATCH_UNUSED, - size_t off, const void* buf, size_t len) { - memcpy((void*)buffer + off, buf, len); - return (bool)true; - }); - data = dispatch_data_create(buffer, size, NULL, - DISPATCH_DATA_DESTRUCTOR_FREE); + out: if (buffer_ptr) { *buffer_ptr = buffer; @@ -395,24 +511,63 @@ dispatch_data_create_map(dispatch_data_t dd, const void **buffer_ptr, return data; } +const void * +_dispatch_data_get_flattened_bytes(dispatch_data_t dd) +{ + const void *buffer; + size_t offset = 0; + + if (slowpath(!dd->size)) { + return NULL; + } + + buffer = _dispatch_data_map_direct(dd, 0, &dd, &offset); + if (buffer) { + return buffer; + } + + void *flatbuf = _dispatch_data_flatten(dd); + if (fastpath(flatbuf)) { + // we need a release so that readers see the content of the buffer + if (slowpath(!dispatch_atomic_cmpxchgv2o(dd, buf, NULL, flatbuf, + &buffer, release))) { + free(flatbuf); + } else { + buffer = flatbuf; + } + } else { + return NULL; + } + + return buffer + offset; +} + +#if DISPATCH_USE_CLIENT_CALLOUT +DISPATCH_NOINLINE +#else +DISPATCH_ALWAYS_INLINE +#endif +static bool +_dispatch_data_apply_client_callout(void *ctxt, dispatch_data_t region, size_t offset, + const void *buffer, size_t size, dispatch_data_applier_function_t f) +{ + return f(ctxt, region, offset, buffer, size); +} + + static bool _dispatch_data_apply(dispatch_data_t dd, size_t offset, size_t from, size_t size, void *ctxt, dispatch_data_applier_function_t applier) { bool result = true; - dispatch_data_t data = dd; const void *buffer; - dispatch_assert(dd->size); - if (!_dispatch_data_leaf(dd) && _dispatch_data_num_records(dd) == 1 && - _dispatch_data_leaf(dd->records[0].data_object)) { - from = dd->records[0].from; - dd = dd->records[0].data_object; - } - if (_dispatch_data_leaf(dd)) { - buffer = dd->buf + from; - return _dispatch_client_callout3(ctxt, data, offset, buffer, size, - applier); + + buffer = _dispatch_data_map_direct(dd, 0, NULL, NULL); + if (buffer) { + return _dispatch_data_apply_client_callout(ctxt, dd, + offset, buffer + from, size, applier); } + size_t i; for (i = 0; i < _dispatch_data_num_records(dd) && result; ++i) { result = _dispatch_data_apply(dd->records[i].data_object, @@ -443,58 +598,73 @@ dispatch_data_apply(dispatch_data_t dd, dispatch_data_applier_t applier) (dispatch_data_applier_function_t)_dispatch_Block_invoke(applier)); } +static dispatch_data_t +_dispatch_data_copy_region(dispatch_data_t dd, size_t from, size_t size, + size_t location, size_t *offset_ptr) +{ + dispatch_data_t reusable_dd = NULL; + size_t offset = 0; + + if (from == 0 && size == dd->size) { + reusable_dd = dd; + } + + if (_dispatch_data_map_direct(dd, from, &dd, &from)) { + if (reusable_dd) { + _dispatch_data_retain(reusable_dd); + return reusable_dd; + } + + _dispatch_data_retain(dd); + if (from == 0 && size == dd->size) { + return dd; + } + + dispatch_data_t data = _dispatch_data_alloc(1, 0); + data->size = size; + data->records[0].from = from; + data->records[0].length = size; + data->records[0].data_object = dd; + return data; + } + + size_t i; + for (i = 0; i < _dispatch_data_num_records(dd); ++i) { + size_t length = dd->records[i].length; + + if (from >= length) { + from -= length; + continue; + } + + length -= from; + if (location >= offset + length) { + offset += length; + from = 0; + continue; + } + + from += dd->records[i].from; + dd = dd->records[i].data_object; + *offset_ptr += offset; + location -= offset; + return _dispatch_data_copy_region(dd, from, length, location, offset_ptr); + } + + DISPATCH_CRASH("dispatch_data_copy_region out of bounds"); +} + // Returs either a leaf object or an object composed of a single leaf object dispatch_data_t dispatch_data_copy_region(dispatch_data_t dd, size_t location, size_t *offset_ptr) { if (location >= dd->size) { - *offset_ptr = 0; + *offset_ptr = dd->size; return dispatch_data_empty; } - dispatch_data_t data; - size_t size = dd->size, offset = 0, from = 0; - while (true) { - if (_dispatch_data_leaf(dd)) { - _dispatch_data_retain(dd); - *offset_ptr = offset; - if (size == dd->size) { - return dd; - } else { - // Create a new object for the requested subrange of the leaf - data = _dispatch_data_alloc(1, 0); - data->size = size; - data->records[0].from = from; - data->records[0].length = size; - data->records[0].data_object = dd; - return data; - } - } else { - // Find record at the specified location - size_t i, pos; - for (i = 0; i < _dispatch_data_num_records(dd); ++i) { - pos = offset + dd->records[i].length; - if (location < pos) { - size = dd->records[i].length; - from = dd->records[i].from; - data = dd->records[i].data_object; - if (_dispatch_data_num_records(dd) == 1 && - _dispatch_data_leaf(data)) { - // Return objects composed of a single leaf node - *offset_ptr = offset; - _dispatch_data_retain(dd); - return dd; - } else { - // Drill down into other objects - dd = data; - break; - } - } else { - offset = pos; - } - } - } - } + *offset_ptr = 0; + return _dispatch_data_copy_region(dd, 0, dd->size, location, offset_ptr); } #if HAVE_MACH diff --git a/src/data.m b/src/data.m index c76f26a52..92bc1e28c 100644 --- a/src/data.m +++ b/src/data.m @@ -32,9 +32,13 @@ #include @interface DISPATCH_CLASS(data) () +@property (readonly) NSUInteger length; +@property (readonly) const void *bytes NS_RETURNS_INNER_POINTER; + - (id)initWithBytes:(void *)bytes length:(NSUInteger)length copy:(BOOL)copy freeWhenDone:(BOOL)freeBytes bytesAreVM:(BOOL)vm; - (BOOL)_bytesAreVM; +- (BOOL)_isCompact; @end @interface DISPATCH_CLASS(data_empty) : DISPATCH_CLASS(data) @@ -132,6 +136,21 @@ - (NSString *)debugDescription { class_getName([self class]), buf]; } +- (NSUInteger)length { + struct dispatch_data_s *dd = (void*)self; + return dd->size; +} + +- (const void *)bytes { + struct dispatch_data_s *dd = (void*)self; + return _dispatch_data_get_flattened_bytes(dd); +} + +- (BOOL)_isCompact { + struct dispatch_data_s *dd = (void*)self; + return !dd->size || _dispatch_data_map_direct(dd, 0, NULL, NULL) != NULL; +} + @end @implementation DISPATCH_CLASS(data_empty) diff --git a/src/data_internal.h b/src/data_internal.h index d0de8bb9c..40a780ce9 100644 --- a/src/data_internal.h +++ b/src/data_internal.h @@ -32,11 +32,6 @@ #include // for HeaderDoc #endif -#if defined(__LP64__) && !defined(DISPATCH_DATA_USE_LEAF_MEMBER) && !USE_OBJC -// explicit leaf member is free on 64bit due to padding -#define DISPATCH_DATA_USE_LEAF_MEMBER 1 -#endif - typedef struct range_record_s { dispatch_data_t data_object; size_t from; @@ -67,25 +62,31 @@ struct dispatch_data_s { #else // USE_OBJC DISPATCH_STRUCT_HEADER(data); #endif // USE_OBJC -#if DISPATCH_DATA_USE_LEAF_MEMBER - bool leaf; -#endif + const void *buf; dispatch_block_t destructor; size_t size, num_records; - union { - const void* buf; - range_record records[0]; - }; + range_record records[0]; }; -#if DISPATCH_DATA_USE_LEAF_MEMBER -#define _dispatch_data_leaf(d) ((d)->leaf) -#define _dispatch_data_num_records(d) ((d)->num_records) -#else -#define _dispatch_data_leaf(d) ((d)->num_records ? 0 : ((d)->size ? 1 : 0)) -#define _dispatch_data_num_records(d) \ - (_dispatch_data_leaf(d) ? 1 : (d)->num_records) -#endif // DISPATCH_DATA_USE_LEAF_MEMBER +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_data_leaf(struct dispatch_data_s *dd) +{ + return dd->num_records == 0; +} + +/* + * This is about the number of records required to hold that dispatch data + * if it's not a leaf. Callers either want that value, or have to special + * case the case when the dispatch data *is* a leaf before (and that the actual + * embeded record count of that dispatch data is 0) + */ +DISPATCH_ALWAYS_INLINE +static inline size_t +_dispatch_data_num_records(struct dispatch_data_s *dd) +{ + return dd->num_records ?: 1; +} typedef dispatch_data_t (*dispatch_transform_t)(dispatch_data_t data); @@ -101,27 +102,49 @@ void dispatch_data_init(dispatch_data_t data, const void *buffer, size_t size, dispatch_block_t destructor); void _dispatch_data_dispose(dispatch_data_t data); size_t _dispatch_data_debug(dispatch_data_t data, char* buf, size_t bufsiz); -const dispatch_block_t _dispatch_data_destructor_inline; -#define DISPATCH_DATA_DESTRUCTOR_INLINE (_dispatch_data_destructor_inline) +const void* +_dispatch_data_get_flattened_bytes(struct dispatch_data_s *dd); +#if !defined(__cplusplus) #if !__OBJC2__ +const dispatch_block_t _dispatch_data_destructor_inline; +#define DISPATCH_DATA_DESTRUCTOR_INLINE (_dispatch_data_destructor_inline) +#endif // !__OBJC2__ +/* + * the out parameters are about seeing "through" trivial subranges + * so for something like this: dd = { subrange [ dd1, offset1 ] }, + * this will return { dd1, offset + offset1 } + * + * If the dispatch object isn't a trivial subrange, it returns { dd, offset } + */ +DISPATCH_ALWAYS_INLINE static inline const void* -_dispatch_data_map_direct(dispatch_data_t dd) +_dispatch_data_map_direct(struct dispatch_data_s *dd, size_t offset, + struct dispatch_data_s **dd_out, size_t *from_out) { - size_t offset = 0; - if (slowpath(!dd->size)) { - return NULL; - } + const void *buffer = NULL; + + dispatch_assert(dd->size); if (slowpath(!_dispatch_data_leaf(dd)) && - _dispatch_data_num_records(dd) == 1 && - _dispatch_data_leaf(dd->records[0].data_object)) { - offset = dd->records[0].from; - dd = dd->records[0].data_object; + _dispatch_data_num_records(dd) == 1) { + offset += dd->records[0].from; + dd = (struct dispatch_data_s *)dd->records[0].data_object; } - return fastpath(_dispatch_data_leaf(dd)) ? (dd->buf + offset) : NULL; + + if (fastpath(_dispatch_data_leaf(dd))) { + buffer = dd->buf + offset; + } else { + buffer = dispatch_atomic_load((void **)&dd->buf, relaxed); + if (buffer) { + buffer += offset; + } + } + if (dd_out) *dd_out = dd; + if (from_out) *from_out = offset; + return buffer; } -#endif // !__OBJC2__ +#endif // !defined(__cplusplus) #endif // __DISPATCH_DATA_INTERNAL__ diff --git a/src/init.c b/src/init.c index 5cbf8057f..0aff191f0 100644 --- a/src/init.c +++ b/src/init.c @@ -82,6 +82,7 @@ pthread_key_t dispatch_bcounter_key; #if VOUCHER_USE_MACH_VOUCHER dispatch_once_t _voucher_task_mach_voucher_pred; mach_voucher_t _voucher_task_mach_voucher; +_voucher_atm_t _voucher_task_atm; _voucher_activity_t _voucher_activity_default; #endif voucher_activity_mode_t _voucher_activity_mode; @@ -180,6 +181,7 @@ struct dispatch_queue_s _dispatch_main_q = { .dq_running = 1, .dq_width = 1, .dq_is_thread_bound = 1, + .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 1, }; @@ -194,20 +196,24 @@ struct dispatch_queue_s _dispatch_main_q = { .do_next = DISPATCH_OBJECT_LISTLESS, \ .dqa_qos_class = (qos), \ .dqa_relative_priority = (qos) ? (prio) : 0, \ - .dqa_overcommit = (overcommit), \ + .dqa_overcommit = _dispatch_queue_attr_overcommit_##overcommit, \ .dqa_concurrent = (concurrent), \ } #define DISPATCH_QUEUE_ATTR_KIND_INIT(qos, prio) \ { \ [DQA_INDEX_NON_OVERCOMMIT][DQA_INDEX_CONCURRENT] = \ - DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, 0, 1), \ + DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, disabled, 1), \ [DQA_INDEX_NON_OVERCOMMIT][DQA_INDEX_SERIAL] = \ - DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, 0, 0), \ + DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, disabled, 0), \ [DQA_INDEX_OVERCOMMIT][DQA_INDEX_CONCURRENT] = \ - DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, 1, 1), \ + DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, enabled, 1), \ [DQA_INDEX_OVERCOMMIT][DQA_INDEX_SERIAL] = \ - DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, 1, 0), \ + DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, enabled, 0), \ + [DQA_INDEX_UNSPECIFIED_OVERCOMMIT][DQA_INDEX_CONCURRENT] = \ + DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, unspecified, 1),\ + [DQA_INDEX_UNSPECIFIED_OVERCOMMIT][DQA_INDEX_SERIAL] = \ + DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, unspecified, 0),\ } #define DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, prio) \ @@ -237,8 +243,12 @@ struct dispatch_queue_s _dispatch_main_q = { [DQA_INDEX_QOS_CLASS_##qos] = \ DISPATCH_QUEUE_ATTR_PRIO_INIT(_DISPATCH_QOS_CLASS_##qos) +// DISPATCH_QUEUE_CONCURRENT resp. _dispatch_queue_attr_concurrent is aliased +// to array member [0][0][0][0] and their properties must match! const struct dispatch_queue_attr_s _dispatch_queue_attrs[] - [DISPATCH_QUEUE_ATTR_PRIO_COUNT][2][2] = { + [DISPATCH_QUEUE_ATTR_PRIO_COUNT] + [DISPATCH_QUEUE_ATTR_OVERCOMMIT_COUNT] + [DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT] = { DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(UNSPECIFIED), DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(MAINTENANCE), DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(BACKGROUND), @@ -466,6 +476,9 @@ _dispatch_abort(size_t line, long val) static int dispatch_logfile = -1; static bool dispatch_log_disabled; +#if DISPATCH_DEBUG +static uint64_t dispatch_log_basetime; +#endif static dispatch_once_t _dispatch_logv_pred; static void @@ -502,6 +515,9 @@ _dispatch_logv_init(void *context DISPATCH_UNUSED) if (dispatch_logfile != -1) { struct timeval tv; gettimeofday(&tv, NULL); +#if DISPATCH_DEBUG + dispatch_log_basetime = mach_absolute_time(); +#endif dprintf(dispatch_logfile, "=== log file opened for %s[%u] at " "%ld.%06u ===\n", getprogname() ?: "", getpid(), tv.tv_sec, tv.tv_usec); @@ -527,13 +543,20 @@ static void _dispatch_logv_file(const char *msg, va_list ap) { char buf[2048]; - int r = vsnprintf(buf, sizeof(buf), msg, ap); + size_t bufsiz = sizeof(buf), offset = 0; + int r; + +#if DISPATCH_DEBUG + offset += dsnprintf(&buf[offset], bufsiz - offset, "%llu\t", + mach_absolute_time() - dispatch_log_basetime); +#endif + r = vsnprintf(&buf[offset], bufsiz - offset, msg, ap); if (r < 0) return; - size_t len = (size_t)r; - if (len > sizeof(buf) - 1) { - len = sizeof(buf) - 1; + offset += (size_t)r; + if (offset > bufsiz - 1) { + offset = bufsiz - 1; } - _dispatch_log_file(buf, len); + _dispatch_log_file(buf, offset); } #if DISPATCH_USE_SIMPLE_ASL @@ -618,22 +641,27 @@ static void _dispatch_debugv(dispatch_object_t dou, const char *msg, va_list ap) { char buf[2048]; + size_t bufsiz = sizeof(buf), offset = 0; int r; - size_t offs; +#if DISPATCH_DEBUG && !DISPATCH_USE_OS_DEBUG_LOG + offset += dsnprintf(&buf[offset], bufsiz - offset, "%llu\t\t%p\t", + mach_absolute_time() - dispatch_log_basetime, + (void *)_dispatch_thread_self()); +#endif if (dou._do) { - offs = _dispatch_object_debug2(dou, buf, sizeof(buf)); - dispatch_assert(offs + 2 < sizeof(buf)); - buf[offs++] = ':'; - buf[offs++] = ' '; - buf[offs] = '\0'; + offset += _dispatch_object_debug2(dou, &buf[offset], bufsiz - offset); + dispatch_assert(offset + 2 < bufsiz); + buf[offset++] = ':'; + buf[offset++] = ' '; + buf[offset] = '\0'; } else { - offs = strlcpy(buf, "NULL: ", sizeof(buf)); + offset += strlcpy(&buf[offset], "NULL: ", bufsiz - offset); } - r = vsnprintf(buf + offs, sizeof(buf) - offs, msg, ap); + r = vsnprintf(&buf[offset], bufsiz - offset, msg, ap); #if !DISPATCH_USE_OS_DEBUG_LOG - size_t len = offs + (r < 0 ? 0 : (size_t)r); - if (len > sizeof(buf) - 1) { - len = sizeof(buf) - 1; + size_t len = offset + (r < 0 ? 0 : (size_t)r); + if (len > bufsiz - 1) { + len = bufsiz - 1; } _dispatch_logv(buf, len, NULL); #else @@ -720,40 +748,6 @@ _dispatch_call_block_and_release(void *block) Block_release(b); } -#pragma mark - -#pragma mark _dispatch_block_create no_objc - -#if !USE_OBJC - -// The compiler hides the name of the function it generates, and changes it if -// we try to reference it directly, but the linker still sees it. -extern void DISPATCH_BLOCK_SPECIAL_INVOKE(void *) - asm("____dispatch_block_create_block_invoke"); -void (*_dispatch_block_special_invoke)(void*) = DISPATCH_BLOCK_SPECIAL_INVOKE; - -dispatch_block_t -_dispatch_block_create(dispatch_block_flags_t flags, voucher_t voucher, - pthread_priority_t pri, dispatch_block_t block) -{ - dispatch_block_t copy_block = _dispatch_Block_copy(block); // 17094902 - (void)voucher; // No voucher capture! (requires ObjC runtime) - struct dispatch_block_private_data_s dbpds = - DISPATCH_BLOCK_PRIVATE_DATA_INITIALIZER(flags, NULL, pri, copy_block); - dispatch_block_t new_block = _dispatch_Block_copy(^{ - // Capture object references, which retains copy_block. - // All retained objects must be captured by the *block*. We - // cannot borrow any references, because the block might be - // called zero or several times, so Block_release() is the - // only place that can release retained objects. - (void)copy_block; - _dispatch_block_invoke(&dbpds); - }); - Block_release(copy_block); - return new_block; -} - -#endif // !USE_OBJC - #endif // __BLOCKS__ #pragma mark - @@ -797,21 +791,6 @@ _dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) _dispatch_set_unwind_tsd(u); } -#undef _dispatch_client_callout3 -bool -_dispatch_client_callout3(void *ctxt, dispatch_data_t region, size_t offset, - const void *buffer, size_t size, dispatch_data_applier_function_t f) -{ - _dispatch_get_tsd_base(); - void *u = _dispatch_get_unwind_tsd(); - if (fastpath(!u)) return f(ctxt, region, offset, buffer, size); - _dispatch_set_unwind_tsd(NULL); - bool res = f(ctxt, region, offset, buffer, size); - _dispatch_free_unwind_tsd(); - _dispatch_set_unwind_tsd(u); - return res; -} - #undef _dispatch_client_callout4 void _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, @@ -871,6 +850,7 @@ _os_object_dealloc(_os_object_t obj) void _os_object_xref_dispose(_os_object_t obj) { + _os_object_xrefcnt_dispose_barrier(obj); if (fastpath(obj->os_obj_isa->_os_obj_xref_dispose)) { return obj->os_obj_isa->_os_obj_xref_dispose(obj); } @@ -880,6 +860,7 @@ _os_object_xref_dispose(_os_object_t obj) void _os_object_dispose(_os_object_t obj) { + _os_object_refcnt_dispose_barrier(obj); if (fastpath(obj->os_obj_isa->_os_obj_dispose)) { return obj->os_obj_isa->_os_obj_dispose(obj); } @@ -1003,11 +984,30 @@ const struct dispatch_source_type_s _dispatch_source_type_interval = { .init = dispatch_source_type_interval_init, }; +#if !DISPATCH_USE_SELECT_FALLBACK || DISPATCH_DYNAMIC_SELECT_FALLBACK +static void +dispatch_source_type_readwrite_init(dispatch_source_t ds, + dispatch_source_type_t type DISPATCH_UNUSED, + uintptr_t handle DISPATCH_UNUSED, + unsigned long mask DISPATCH_UNUSED, + dispatch_queue_t q DISPATCH_UNUSED) +{ + ds->ds_dkev->dk_kevent.flags |= EV_UDATA_SPECIFIC; + ds->ds_is_direct_kevent = true; + // bypass kernel check for device kqueue support rdar://19004921 + ds->ds_dkev->dk_kevent.fflags = NOTE_LOWAT; + ds->ds_dkev->dk_kevent.data = 1; +} +#else +#define dispatch_source_type_readwrite_init NULL +#endif + const struct dispatch_source_type_s _dispatch_source_type_read = { .ke = { .filter = EVFILT_READ, .flags = EV_DISPATCH, }, + .init = dispatch_source_type_readwrite_init, }; const struct dispatch_source_type_s _dispatch_source_type_write = { @@ -1015,6 +1015,7 @@ const struct dispatch_source_type_s _dispatch_source_type_write = { .filter = EVFILT_WRITE, .flags = EV_DISPATCH, }, + .init = dispatch_source_type_readwrite_init, }; #if DISPATCH_USE_MEMORYSTATUS @@ -1063,7 +1064,7 @@ dispatch_source_type_memorystatus_init(dispatch_source_t ds, const struct dispatch_source_type_s _dispatch_source_type_memorystatus = { .ke = { .filter = EVFILT_MEMORYSTATUS, - .flags = EV_DISPATCH, + .flags = EV_DISPATCH|EV_UDATA_SPECIFIC, }, .mask = NOTE_MEMORYSTATUS_PRESSURE_NORMAL|NOTE_MEMORYSTATUS_PRESSURE_WARN |NOTE_MEMORYSTATUS_PRESSURE_CRITICAL|NOTE_MEMORYSTATUS_LOW_SWAP, @@ -1088,7 +1089,7 @@ dispatch_source_type_vm_init(dispatch_source_t ds, const struct dispatch_source_type_s _dispatch_source_type_vm = { .ke = { .filter = EVFILT_MEMORYSTATUS, - .flags = EV_DISPATCH, + .flags = EV_DISPATCH|EV_UDATA_SPECIFIC, }, .mask = NOTE_VM_PRESSURE, .init = dispatch_source_type_vm_init, @@ -1109,7 +1110,7 @@ dispatch_source_type_vm_init(dispatch_source_t ds, const struct dispatch_source_type_s _dispatch_source_type_vm = { .ke = { .filter = EVFILT_VM, - .flags = EV_DISPATCH, + .flags = EV_DISPATCH|EV_UDATA_SPECIFIC, }, .mask = NOTE_VM_PRESSURE, .init = dispatch_source_type_vm_init, @@ -1117,10 +1118,20 @@ const struct dispatch_source_type_s _dispatch_source_type_vm = { #endif // DISPATCH_USE_VM_PRESSURE +static void +dispatch_source_type_proc_init(dispatch_source_t ds, + dispatch_source_type_t type DISPATCH_UNUSED, + uintptr_t handle DISPATCH_UNUSED, + unsigned long mask DISPATCH_UNUSED, + dispatch_queue_t q DISPATCH_UNUSED) +{ + ds->ds_dkev->dk_kevent.fflags |= NOTE_EXIT; // rdar://16655831 +} + const struct dispatch_source_type_s _dispatch_source_type_proc = { .ke = { .filter = EVFILT_PROC, - .flags = EV_CLEAR, + .flags = EV_CLEAR|EV_UDATA_SPECIFIC, }, .mask = NOTE_EXIT|NOTE_FORK|NOTE_EXEC #if HAVE_DECL_NOTE_SIGNAL @@ -1130,18 +1141,20 @@ const struct dispatch_source_type_s _dispatch_source_type_proc = { |NOTE_REAP #endif , + .init = dispatch_source_type_proc_init, }; const struct dispatch_source_type_s _dispatch_source_type_signal = { .ke = { .filter = EVFILT_SIGNAL, + .flags = EV_UDATA_SPECIFIC, }, }; const struct dispatch_source_type_s _dispatch_source_type_vnode = { .ke = { .filter = EVFILT_VNODE, - .flags = EV_CLEAR, + .flags = EV_CLEAR|EV_UDATA_SPECIFIC, }, .mask = NOTE_DELETE|NOTE_WRITE|NOTE_EXTEND|NOTE_ATTRIB|NOTE_LINK| NOTE_RENAME|NOTE_REVOKE @@ -1154,7 +1167,7 @@ const struct dispatch_source_type_s _dispatch_source_type_vnode = { const struct dispatch_source_type_s _dispatch_source_type_vfs = { .ke = { .filter = EVFILT_FS, - .flags = EV_CLEAR, + .flags = EV_CLEAR|EV_UDATA_SPECIFIC, }, .mask = VQ_NOTRESP|VQ_NEEDAUTH|VQ_LOWDISK|VQ_MOUNT|VQ_UNMOUNT|VQ_DEAD| VQ_ASSIST|VQ_NOTRESPLOCK @@ -1171,7 +1184,7 @@ const struct dispatch_source_type_s _dispatch_source_type_sock = { #ifdef EVFILT_SOCK .ke = { .filter = EVFILT_SOCK, - .flags = EV_CLEAR, + .flags = EV_CLEAR|EV_UDATA_SPECIFIC, }, .mask = NOTE_CONNRESET | NOTE_READCLOSED | NOTE_WRITECLOSED | NOTE_TIMEOUT | NOTE_NOSRCADDR | NOTE_IFDENIED | NOTE_SUSPEND | @@ -1186,18 +1199,35 @@ const struct dispatch_source_type_s _dispatch_source_type_sock = { #endif // EVFILT_SOCK }; +#if DISPATCH_USE_EV_UDATA_SPECIFIC +static void +dispatch_source_type_data_init(dispatch_source_t ds, + dispatch_source_type_t type DISPATCH_UNUSED, + uintptr_t handle DISPATCH_UNUSED, + unsigned long mask DISPATCH_UNUSED, + dispatch_queue_t q DISPATCH_UNUSED) +{ + ds->ds_needs_rearm = false; // not registered with kevent +} +#else +#define dispatch_source_type_data_init NULL +#endif + const struct dispatch_source_type_s _dispatch_source_type_data_add = { .ke = { .filter = DISPATCH_EVFILT_CUSTOM_ADD, + .flags = EV_UDATA_SPECIFIC, }, + .init = dispatch_source_type_data_init, }; const struct dispatch_source_type_s _dispatch_source_type_data_or = { .ke = { .filter = DISPATCH_EVFILT_CUSTOM_OR, - .flags = EV_CLEAR, + .flags = EV_CLEAR|EV_UDATA_SPECIFIC, .fflags = ~0u, }, + .init = dispatch_source_type_data_init, }; #if HAVE_MACH diff --git a/src/inline_internal.h b/src/inline_internal.h index ea6953cd0..5cc4cd884 100644 --- a/src/inline_internal.h +++ b/src/inline_internal.h @@ -38,9 +38,6 @@ DISPATCH_NOTHROW void _dispatch_client_callout(void *ctxt, dispatch_function_t f); DISPATCH_NOTHROW void _dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)); -DISPATCH_NOTHROW bool -_dispatch_client_callout3(void *ctxt, dispatch_data_t region, size_t offset, - const void *buffer, size_t size, dispatch_data_applier_function_t f); DISPATCH_NOTHROW void _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, dispatch_mach_msg_t dmsg, mach_error_t error, @@ -62,14 +59,6 @@ _dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) return f(ctxt, i); } -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_client_callout3(void *ctxt, dispatch_data_t region, size_t offset, - const void *buffer, size_t size, dispatch_data_applier_function_t f) -{ - return f(ctxt, region, offset, buffer, size); -} - DISPATCH_ALWAYS_INLINE static inline void _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, @@ -81,7 +70,7 @@ _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, #endif // !DISPATCH_USE_CLIENT_CALLOUT -#if !(USE_OBJC && __OBJC2__) +#if !(USE_OBJC && __OBJC2__) && !defined(__cplusplus) #pragma mark - #pragma mark _os_object_t & dispatch_object_t @@ -90,11 +79,7 @@ DISPATCH_ALWAYS_INLINE static inline _os_object_t _os_object_retain_internal_inline(_os_object_t obj) { - int ref_cnt = obj->os_obj_ref_cnt; - if (slowpath(ref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) { - return obj; // global object - } - ref_cnt = dispatch_atomic_inc2o(obj, os_obj_ref_cnt, relaxed); + int ref_cnt = _os_object_refcnt_inc(obj); if (slowpath(ref_cnt <= 0)) { DISPATCH_CRASH("Resurrection of an object"); } @@ -105,11 +90,7 @@ DISPATCH_ALWAYS_INLINE static inline void _os_object_release_internal_inline(_os_object_t obj) { - int ref_cnt = obj->os_obj_ref_cnt; - if (slowpath(ref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) { - return; // global object - } - ref_cnt = dispatch_atomic_dec2o(obj, os_obj_ref_cnt, relaxed); + int ref_cnt = _os_object_refcnt_dec(obj); if (fastpath(ref_cnt >= 0)) { return; } @@ -121,6 +102,7 @@ _os_object_release_internal_inline(_os_object_t obj) DISPATCH_CRASH("Release while external references exist"); } #endif + // _os_object_refcnt_dispose_barrier() is in _os_object_dispose() return _os_object_dispose(obj); } @@ -204,8 +186,16 @@ static inline pthread_priority_t _dispatch_queue_reset_override_priority( static inline pthread_priority_t _dispatch_get_defaultpriority(void); static inline void _dispatch_set_defaultpriority_override(void); static inline void _dispatch_reset_defaultpriority(pthread_priority_t priority); +static inline pthread_priority_t _dispatch_get_priority(void); static inline void _dispatch_set_priority(pthread_priority_t priority); +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_t +_dispatch_queue_get_current(void) +{ + return (dispatch_queue_t)_dispatch_thread_getspecific(dispatch_queue_key); +} + DISPATCH_ALWAYS_INLINE static inline void _dispatch_queue_set_thread(dispatch_queue_t dq) @@ -286,44 +276,135 @@ _dispatch_queue_push_wakeup(dispatch_queue_t dq, dispatch_object_t _tail, } } +struct _dispatch_identity_s { + pthread_priority_t old_pri; + pthread_priority_t old_pp; + dispatch_queue_t old_dq; +}; + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_root_queue_identity_assume(struct _dispatch_identity_s *di, + dispatch_queue_t assumed_rq) +{ + di->old_dq = _dispatch_queue_get_current(); + di->old_pri = _dispatch_get_priority(); + di->old_pp = _dispatch_get_defaultpriority(); + + dispatch_assert(dx_type(di->old_dq) == DISPATCH_QUEUE_ROOT_TYPE); + dispatch_assert(dx_type(assumed_rq) == DISPATCH_QUEUE_ROOT_TYPE); + + _dispatch_wqthread_override_start(_dispatch_thread_port(), di->old_pri); + _dispatch_set_priority(assumed_rq->dq_priority); + _dispatch_reset_defaultpriority(assumed_rq->dq_priority); + _dispatch_thread_setspecific(dispatch_queue_key, assumed_rq); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_root_queue_identity_restore(struct _dispatch_identity_s *di) +{ + _dispatch_thread_setspecific(dispatch_queue_key, di->old_dq); + _dispatch_set_priority(di->old_pri); + _dispatch_reset_defaultpriority(di->old_pp); + // Ensure that the root queue sees that this thread was overridden. + _dispatch_set_defaultpriority_override(); +} + +typedef dispatch_queue_t +_dispatch_queue_class_invoke_handler_t(dispatch_object_t, + _dispatch_thread_semaphore_t*); + DISPATCH_ALWAYS_INLINE static inline void _dispatch_queue_class_invoke(dispatch_object_t dou, - dispatch_queue_t (*invoke)(dispatch_object_t, - _dispatch_thread_semaphore_t*)) + dispatch_continuation_t dc, dispatch_invoke_flags_t flags, + _dispatch_queue_class_invoke_handler_t invoke) { pthread_priority_t p = 0; dispatch_queue_t dq = dou._dq; + bool owning = !slowpath(flags & DISPATCH_INVOKE_STEALING); + bool overriding = slowpath(flags & DISPATCH_INVOKE_OVERRIDING); + if (!slowpath(DISPATCH_OBJECT_SUSPENDED(dq)) && fastpath(dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1, acquire))){ _dispatch_queue_set_thread(dq); + dispatch_queue_t tq = NULL; _dispatch_thread_semaphore_t sema = 0; + struct _dispatch_identity_s di; + + if (overriding) { + _dispatch_object_debug(dq, "stolen onto thread 0x%x, 0x%lx", + dq->dq_thread, _dispatch_get_defaultpriority()); + _dispatch_root_queue_identity_assume(&di, dc->dc_other); + } + tq = invoke(dq, &sema); _dispatch_queue_clear_thread(dq); - p = _dispatch_queue_reset_override_priority(dq); - if (p > (dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { - // Ensure that the root queue sees that this thread was overridden. - _dispatch_set_defaultpriority_override(); + + if (!owning && !sema && tq && tq != dq->do_targetq) { + /* + * When (tq && tq != dq->do_targetq) this is a source or mach + * channel asking to get to their manager queue. + * + * Since stealers cannot call _dispatch_queue_push_queue and + * retarget those, they need ot destroy the override so that + * when waking those sources or mach channels on their target queue + * we don't risk a stealer taking them over and not be able to + * retarget again, effectively live-locking them. + * + * Also, we're in the `overriding` case so the thread will be marked + * dirty by _dispatch_root_queue_identity_restore anyway + * so forgetting about p is fine. + */ + (void)_dispatch_queue_reset_override_priority(dq); + p = 0; + } else if (sema || tq || DISPATCH_OBJECT_SUSPENDED(dq)) { + p = _dispatch_queue_get_override_priority(dq); + } else { + p = _dispatch_queue_reset_override_priority(dq); + } + if (overriding) { + _dispatch_root_queue_identity_restore(&di); + } else { + if (p > (dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { + // Ensure that the root queue sees that this thread was overridden. + _dispatch_set_defaultpriority_override(); + } } - // We do not need to check the result. - // When the suspend-count lock is dropped, then the check will happen. - (void)dispatch_atomic_dec2o(dq, dq_running, release); + + uint32_t running = dispatch_atomic_dec2o(dq, dq_running, release); if (sema) { _dispatch_thread_semaphore_signal(sema); - } else if (tq) { + } else if (owning && tq) { + _dispatch_introspection_queue_item_complete(dq); + return _dispatch_queue_push_queue(tq, dq, p); + } + if (!owning && running == 0) { _dispatch_introspection_queue_item_complete(dq); - return _dispatch_queue_push(tq, dq, p); + return _dispatch_queue_wakeup_with_qos_and_release(dq, p); + } + } else if (overriding) { + mach_port_t th = dq->dq_thread; + if (th) { + p = _dispatch_queue_get_override_priority(dq); + _dispatch_object_debug(dq, "overriding thr 0x%x to priority 0x%lx", + th, p); + _dispatch_wqthread_override_start(th, p); } } - dq->do_next = DISPATCH_OBJECT_LISTLESS; + _dispatch_introspection_queue_item_complete(dq); - if (!dispatch_atomic_sub2o(dq, do_suspend_cnt, - DISPATCH_OBJECT_SUSPEND_LOCK, seq_cst)) { - // seq_cst with atomic store to suspend_cnt - if (dispatch_atomic_load2o(dq, dq_running, seq_cst) == 0) { - // verify that the queue is idle - return _dispatch_queue_wakeup_with_qos_and_release(dq, p); + if (owning) { + dq->do_next = DISPATCH_OBJECT_LISTLESS; + if (!dispatch_atomic_sub2o(dq, do_suspend_cnt, + DISPATCH_OBJECT_SUSPEND_LOCK, seq_cst)) { + // seq_cst with atomic store to suspend_cnt + if (dispatch_atomic_load2o(dq, dq_running, seq_cst) == 0) { + // verify that the queue is idle + return _dispatch_queue_wakeup_with_qos_and_release(dq, p); + } } } _dispatch_release(dq); // added when the queue is put on the list @@ -351,13 +432,6 @@ _dispatch_object_suspended(dispatch_object_t dou) return slowpath(suspend_cnt >= DISPATCH_OBJECT_SUSPEND_INTERVAL); } -DISPATCH_ALWAYS_INLINE -static inline dispatch_queue_t -_dispatch_queue_get_current(void) -{ - return (dispatch_queue_t)_dispatch_thread_getspecific(dispatch_queue_key); -} - DISPATCH_ALWAYS_INLINE DISPATCH_CONST static inline dispatch_queue_t _dispatch_get_root_queue(qos_class_t priority, bool overcommit) @@ -409,6 +483,7 @@ _dispatch_queue_init(dispatch_queue_t dq) dq->dq_running = 0; dq->dq_width = 1; + dq->dq_override_voucher = DISPATCH_NO_VOUCHER; dq->dq_serialnum = dispatch_atomic_inc_orig(&_dispatch_queue_serial_numbers, relaxed); } @@ -438,6 +513,23 @@ _dispatch_queue_get_bound_thread(dispatch_queue_t dq) return dq->dq_thread; } +DISPATCH_ALWAYS_INLINE +static inline dispatch_pthread_root_queue_observer_hooks_t +_dispatch_get_pthread_root_queue_observer_hooks(void) +{ + return _dispatch_thread_getspecific( + dispatch_pthread_root_queue_observer_hooks_key); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_set_pthread_root_queue_observer_hooks( + dispatch_pthread_root_queue_observer_hooks_t observer_hooks) +{ + _dispatch_thread_setspecific(dispatch_pthread_root_queue_observer_hooks_key, + observer_hooks); +} + #pragma mark - #pragma mark dispatch_priority @@ -516,7 +608,8 @@ _dispatch_queue_priority_inherit_from_target(dispatch_queue_t dq, const pthread_priority_t rootqueue_flag = _PTHREAD_PRIORITY_ROOTQUEUE_FLAG; const pthread_priority_t inherited_flag = _PTHREAD_PRIORITY_INHERIT_FLAG; pthread_priority_t dqp = dq->dq_priority, tqp = tq->dq_priority; - if ((!dqp || (dqp & inherited_flag)) && (tqp & rootqueue_flag)) { + if ((!(dqp & ~_PTHREAD_PRIORITY_FLAGS_MASK) || (dqp & inherited_flag)) && + (tqp & rootqueue_flag)) { dq->dq_priority = (tqp & ~rootqueue_flag) | inherited_flag; } #else @@ -646,13 +739,31 @@ _dispatch_set_priority_and_adopt_voucher(pthread_priority_t priority, DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT static inline voucher_t _dispatch_adopt_priority_and_voucher(pthread_priority_t priority, - voucher_t voucher, unsigned long flags) + voucher_t v, unsigned long flags) { pthread_priority_t p = 0; if (priority != DISPATCH_NO_PRIORITY) { p = _dispatch_priority_adopt(priority, flags); } - return _dispatch_set_priority_and_adopt_voucher(p, voucher); + if (!(flags & DISPATCH_VOUCHER_IGNORE_QUEUE_OVERRIDE)) { + dispatch_queue_t dq = _dispatch_queue_get_current(); + if (dq && dq->dq_override_voucher != DISPATCH_NO_VOUCHER) { + if (v != DISPATCH_NO_VOUCHER && v) _voucher_release(v); + v = dq->dq_override_voucher; + if (v) _voucher_retain(v); + } + } + return _dispatch_set_priority_and_adopt_voucher(p, v); +} + +DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT +static inline voucher_t +_dispatch_adopt_queue_override_voucher(dispatch_queue_t dq) +{ + voucher_t v = dq->dq_override_voucher; + if (v == DISPATCH_NO_VOUCHER) return DISPATCH_NO_VOUCHER; + if (v) _voucher_retain(v); + return _dispatch_set_priority_and_adopt_voucher(DISPATCH_NO_PRIORITY, v); } DISPATCH_ALWAYS_INLINE @@ -667,7 +778,7 @@ _dispatch_adopt_priority_and_replace_voucher(pthread_priority_t priority, DISPATCH_ALWAYS_INLINE static inline void -_dispatch_set_priority_and_replace_voucher(pthread_priority_t priority, +_dispatch_reset_priority_and_voucher(pthread_priority_t priority, voucher_t voucher) { voucher_t ov; @@ -675,6 +786,13 @@ _dispatch_set_priority_and_replace_voucher(pthread_priority_t priority, if (voucher != DISPATCH_NO_VOUCHER && ov) _voucher_release(ov); } +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_reset_voucher(voucher_t voucher) +{ + return _dispatch_reset_priority_and_voucher(DISPATCH_NO_PRIORITY, voucher); +} + DISPATCH_ALWAYS_INLINE static inline void _dispatch_set_priority(pthread_priority_t priority) @@ -716,11 +834,25 @@ _dispatch_queue_need_override_retain(dispatch_queue_t dq, pthread_priority_t pp) DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_queue_override_priority(dispatch_queue_t dq, pthread_priority_t pp) +_dispatch_queue_override_priority(dispatch_queue_t dq, pthread_priority_t *pp, + bool *was_overridden) { - uint32_t p = (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK); uint32_t o = dq->dq_override; - if (o < p) o = dispatch_atomic_or_orig2o(dq, dq_override, p, relaxed); + uint32_t p = (*pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK); + if (o < p) { + o = dispatch_atomic_or_orig2o(dq, dq_override, p, relaxed); + if (was_overridden) { + o = (uint32_t)_dispatch_priority_normalize(o); + } + *pp = _dispatch_priority_normalize(o | p); + } else { + o = (uint32_t)_dispatch_priority_normalize(o); + *pp = o; + } + if (was_overridden) { + *was_overridden = + (dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK) < o; + } return (o < p); } @@ -812,13 +944,7 @@ _dispatch_block_get_data(const dispatch_block_t db) uint8_t *x = (uint8_t *)db; // x points to base of struct Block_layout x += sizeof(struct Block_layout); - // x points to addresss of captured block - x += sizeof(dispatch_block_t); -#if USE_OBJC - // x points to addresss of captured voucher - x += sizeof(voucher_t); -#endif - // x points to base of captured dispatch_block_private_data_s structure + // x points to base of captured dispatch_block_private_data_s object dispatch_block_private_data_t dbpd = (dispatch_block_private_data_t)x; if (dbpd->dbpd_magic != DISPATCH_BLOCK_PRIVATE_DATA_MAGIC) { DISPATCH_CRASH("Corruption of dispatch block object"); @@ -905,16 +1031,16 @@ _dispatch_continuation_free(dispatch_continuation_t dc) #include "trace.h" -DISPATCH_ALWAYS_INLINE_NDEBUG +DISPATCH_ALWAYS_INLINE static inline void -_dispatch_continuation_pop(dispatch_object_t dou) +_dispatch_continuation_invoke(dispatch_object_t dou, dispatch_queue_t dq) { dispatch_continuation_t dc = dou._dc, dc1; dispatch_group_t dg; - _dispatch_trace_continuation_pop(_dispatch_queue_get_current(), dou); + _dispatch_trace_continuation_pop(dq, dou); if (DISPATCH_OBJ_IS_VTABLE(dou._do)) { - return dx_invoke(dou._do); + return dx_invoke(dou._do, NULL, DISPATCH_INVOKE_NONE); } // Add the item back to the cache before calling the function. This @@ -945,6 +1071,18 @@ _dispatch_continuation_pop(dispatch_object_t dou) } } +DISPATCH_ALWAYS_INLINE_NDEBUG +static inline void +_dispatch_continuation_pop(dispatch_object_t dou) +{ + dispatch_queue_t dq = _dispatch_queue_get_current(); + dispatch_pthread_root_queue_observer_hooks_t observer_hooks = + _dispatch_get_pthread_root_queue_observer_hooks(); + if (observer_hooks) observer_hooks->queue_will_execute(dq); + _dispatch_continuation_invoke(dou, dq); + if (observer_hooks) observer_hooks->queue_did_execute(dq); +} + DISPATCH_ALWAYS_INLINE static inline void _dispatch_continuation_priority_set(dispatch_continuation_t dc, @@ -991,6 +1129,6 @@ _dispatch_continuation_get_override_priority(dispatch_queue_t dq, #endif } -#endif // !(USE_OBJC && __OBJC2__) +#endif // !(USE_OBJC && __OBJC2__) && !defined(__cplusplus) #endif /* __DISPATCH_INLINE_INTERNAL__ */ diff --git a/src/internal.h b/src/internal.h index 33fcedb41..98626c643 100644 --- a/src/internal.h +++ b/src/internal.h @@ -302,6 +302,20 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); #define slowpath(x) (x) #endif // __GNUC__ +#if DISPATCH_DEBUG +// sys/queue.h debugging +#undef TRASHIT +#define TRASHIT(x) do {(x) = (void *)-1;} while (0) +#endif // DISPATCH_DEBUG +#define _TAILQ_TRASH_ENTRY(elm, field) do { \ + TRASHIT((elm)->field.tqe_next); \ + TRASHIT((elm)->field.tqe_prev); \ + } while (0) +#define _TAILQ_TRASH_HEAD(head) do { \ + TRASHIT((head)->tqh_first); \ + TRASHIT((head)->tqh_last); \ + } while (0) + DISPATCH_NOINLINE void _dispatch_bug(size_t line, long val); @@ -346,6 +360,14 @@ void _dispatch_log(const char *msg, ...); #define dsnprintf(...) \ ({ int _r = snprintf(__VA_ARGS__); _r < 0 ? 0u : (size_t)_r; }) +#if __GNUC__ +#define dispatch_static_assert(e) ({ \ + char __compile_time_assert__[(bool)(e) ? 1 : -1] DISPATCH_UNUSED; \ + }) +#else +#define dispatch_static_assert(e) +#endif + /* * For reporting bugs within libdispatch when using the "_debug" version of the * library. @@ -353,7 +375,7 @@ void _dispatch_log(const char *msg, ...); #if __GNUC__ #define dispatch_assert(e) do { \ if (__builtin_constant_p(e)) { \ - char __compile_time_assert__[(bool)(e) ? 1 : -1] DISPATCH_UNUSED; \ + dispatch_static_assert(e); \ } else { \ typeof(e) _e = fastpath(e); /* always eval 'e' */ \ if (DISPATCH_DEBUG && !_e) { \ @@ -375,7 +397,7 @@ static inline void _dispatch_assert(long e, long line) { */ #define dispatch_assert_zero(e) do { \ if (__builtin_constant_p(e)) { \ - char __compile_time_assert__[(bool)(e) ? -1 : 1] DISPATCH_UNUSED; \ + dispatch_static_assert(e); \ } else { \ typeof(e) _e = slowpath(e); /* always eval 'e' */ \ if (DISPATCH_DEBUG && _e) { \ @@ -401,8 +423,7 @@ static inline void _dispatch_assert_zero(long e, long line) { typeof(e) _e = fastpath(e); /* always eval 'e' */ \ if (!_e) { \ if (__builtin_constant_p(e)) { \ - char __compile_time_assert__[(bool)(e) ? 1 : -1]; \ - (void)__compile_time_assert__; \ + dispatch_static_assert(e); \ } \ _dispatch_bug(__LINE__, (long)_e); \ } \ @@ -425,8 +446,7 @@ static inline long _dispatch_assume(long e, long line) { typeof(e) _e = slowpath(e); /* always eval 'e' */ \ if (_e) { \ if (__builtin_constant_p(e)) { \ - char __compile_time_assert__[(bool)(e) ? -1 : 1]; \ - (void)__compile_time_assert__; \ + dispatch_static_assert(e); \ } \ _dispatch_bug(__LINE__, (long)_e); \ } \ @@ -446,7 +466,7 @@ static inline long _dispatch_assume_zero(long e, long line) { #if __GNUC__ #define dispatch_debug_assert(e, msg, args...) do { \ if (__builtin_constant_p(e)) { \ - char __compile_time_assert__[(bool)(e) ? 1 : -1] DISPATCH_UNUSED; \ + dispatch_static_assert(e); \ } else { \ typeof(e) _e = fastpath(e); /* always eval 'e' */ \ if (DISPATCH_DEBUG && !_e) { \ @@ -602,6 +622,56 @@ extern bool _dispatch_safe_fork, _dispatch_child_of_unsafe_fork; #endif #endif // HAVE_DECL_NOTE_REAP +#if !defined(EV_UDATA_SPECIFIC) || (TARGET_IPHONE_SIMULATOR && \ + IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 101100) || \ + (!TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED < 101100) +#undef DISPATCH_USE_EV_UDATA_SPECIFIC +#define DISPATCH_USE_EV_UDATA_SPECIFIC 0 +#elif !defined(DISPATCH_USE_EV_UDATA_SPECIFIC) +#define DISPATCH_USE_EV_UDATA_SPECIFIC 1 +#endif // EV_UDATA_SPECIFIC + +#if !DISPATCH_USE_EV_UDATA_SPECIFIC +#undef EV_UDATA_SPECIFIC +#define EV_UDATA_SPECIFIC 0 +#undef DISPATCH_DYNAMIC_SELECT_FALLBACK +#define DISPATCH_DYNAMIC_SELECT_FALLBACK 0 +#undef DISPATCH_USE_SELECT_FALLBACK +#define DISPATCH_USE_SELECT_FALLBACK 1 +#endif // !DISPATCH_USE_EV_UDATA_SPECIFIC + +#if !defined(EV_SET_QOS) || (TARGET_IPHONE_SIMULATOR && \ + IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 101100) || \ + (!TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED < 101100) +#undef DISPATCH_USE_KEVENT_QOS +#define DISPATCH_USE_KEVENT_QOS 0 +#elif !defined(DISPATCH_USE_KEVENT_QOS) +#define DISPATCH_USE_KEVENT_QOS 1 +#endif // EV_SET_QOS + +#if DISPATCH_USE_KEVENT_QOS +typedef struct kevent_qos_s _dispatch_kevent_qos_s; +#else // DISPATCH_USE_KEVENT_QOS +#ifndef KEVENT_FLAG_IMMEDIATE +#define KEVENT_FLAG_NONE 0x00 +#define KEVENT_FLAG_IMMEDIATE 0x01 +#define KEVENT_FLAG_ERROR_EVENTS 0x02 +#endif // KEVENT_FLAG_IMMEDIATE +typedef struct kevent64_s _dispatch_kevent_qos_s; +#define kevent_qos(_kq, _changelist, _nchanges, _eventlist, _nevents, \ + _data_out, _data_available, _flags) \ + ({ unsigned int _f = (_flags); _dispatch_kevent_qos_s _kev_copy; \ + const _dispatch_kevent_qos_s *_cl = (_changelist); \ + int _n = (_nchanges); const struct timespec _timeout_immediately = {}; \ + dispatch_static_assert(!(_data_out) && !(_data_available)); \ + if (_f & KEVENT_FLAG_ERROR_EVENTS) { \ + dispatch_static_assert(_n == 1); \ + _kev_copy = *_cl; _kev_copy.flags |= EV_RECEIPT; } \ + kevent64((_kq), _f & KEVENT_FLAG_ERROR_EVENTS ? &_kev_copy : _cl, _n, \ + (_eventlist), (_nevents), 0, \ + _f & KEVENT_FLAG_IMMEDIATE ? &_timeout_immediately : NULL); }) +#endif // DISPATCH_USE_KEVENT_QOS + #if defined(F_SETNOSIGPIPE) && defined(F_GETNOSIGPIPE) #if TARGET_IPHONE_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1070 #undef DISPATCH_USE_SETNOSIGPIPE @@ -616,6 +686,9 @@ extern bool _dispatch_safe_fork, _dispatch_child_of_unsafe_fork; #ifndef DISPATCH_USE_CHECKIN_NOIMPORTANCE #define DISPATCH_USE_CHECKIN_NOIMPORTANCE 1 // rdar://problem/16996737 #endif +#ifndef DISPATCH_USE_NOIMPORTANCE_QOS +#define DISPATCH_USE_NOIMPORTANCE_QOS 1 // rdar://problem/21414476 +#endif #endif // MACH_SEND_NOIMPORTANCE @@ -671,6 +744,7 @@ extern bool _dispatch_safe_fork, _dispatch_child_of_unsafe_fork; __asm__(""); __builtin_trap() // #define _dispatch_set_crash_log_message(msg) +#define _dispatch_set_crash_log_message_dynamic(msg) #if HAVE_MACH // MIG_REPLY_MISMATCH means either: @@ -704,6 +778,7 @@ extern int _dispatch_set_qos_class_enabled; #define DISPATCH_NO_VOUCHER ((voucher_t)(void*)~0ul) #define DISPATCH_NO_PRIORITY ((pthread_priority_t)~0ul) #define DISPATCH_PRIORITY_ENFORCE 0x1 +#define DISPATCH_VOUCHER_IGNORE_QUEUE_OVERRIDE 0x2 static inline void _dispatch_adopt_priority_and_replace_voucher( pthread_priority_t priority, voucher_t voucher, unsigned long flags); #if HAVE_MACH diff --git a/src/introspection.c b/src/introspection.c index e907f857b..35b0b573f 100644 --- a/src/introspection.c +++ b/src/introspection.c @@ -311,7 +311,8 @@ dispatch_introspection_queue_item_get_info(dispatch_queue_t dq, type != DISPATCH_QUEUE_SPECIFIC_TYPE) { diqi.type = dispatch_introspection_queue_item_type_queue; diqi.queue = dispatch_introspection_queue_get_info(dou._dq); - } else if (metatype == _DISPATCH_SOURCE_TYPE) { + } else if (metatype == _DISPATCH_SOURCE_TYPE && + type != DISPATCH_MACH_CHANNEL_TYPE) { diqi.type = dispatch_introspection_queue_item_type_source; diqi.source = _dispatch_introspection_source_get_info(dou._ds); } else { diff --git a/src/introspection_internal.h b/src/introspection_internal.h index 7b015aa0e..4ed951e7d 100644 --- a/src/introspection_internal.h +++ b/src/introspection_internal.h @@ -46,7 +46,7 @@ void _dispatch_introspection_queue_item_complete(dispatch_object_t dou); void _dispatch_introspection_callout_entry(void *ctxt, dispatch_function_t f); void _dispatch_introspection_callout_return(void *ctxt, dispatch_function_t f); -#if !__OBJC2__ +#if !__OBJC2__ && !defined(__cplusplus) DISPATCH_ALWAYS_INLINE static inline void @@ -70,9 +70,9 @@ _dispatch_introspection_queue_pop(dispatch_queue_t dq, dispatch_object_t dou) { _dispatch_introspection_queue_item_dequeue(dq, dou); }; -#endif +#endif // !__OBJC2__ && !defined(__cplusplus) -#else +#else // DISPATCH_INTROSPECTION #define DISPATCH_INTROSPECTION_QUEUE_LIST #define DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE 0 diff --git a/src/io.c b/src/io.c index d66873ba7..0ad5b5373 100644 --- a/src/io.c +++ b/src/io.c @@ -122,13 +122,6 @@ enum { #pragma mark - #pragma mark dispatch_io_hashtables -#if TARGET_OS_EMBEDDED -#define DIO_HASH_SIZE 64u // must be a power of two -#else -#define DIO_HASH_SIZE 256u // must be a power of two -#endif -#define DIO_HASH(x) ((uintptr_t)(x) & (DIO_HASH_SIZE - 1)) - // Global hashtable of dev_t -> disk_s mappings DISPATCH_CACHELINE_ALIGN static TAILQ_HEAD(, dispatch_disk_s) _dispatch_io_devs[DIO_HASH_SIZE]; @@ -173,10 +166,10 @@ enum { }; static struct dispatch_io_defaults_s { - size_t chunk_pages, low_water_chunks, max_pending_io_reqs; + size_t chunk_size, low_water_chunks, max_pending_io_reqs; bool initial_delivery; } dispatch_io_defaults = { - .chunk_pages = DIO_MAX_CHUNK_PAGES, + .chunk_size = DIO_MAX_CHUNK_SIZE, .low_water_chunks = DIO_DEFAULT_LOW_WATER_CHUNKS, .max_pending_io_reqs = DIO_MAX_PENDING_IO_REQS, }; @@ -190,7 +183,7 @@ _dispatch_iocntl(uint32_t param, uint64_t value) { switch (param) { case DISPATCH_IOCNTL_CHUNK_PAGES: - _dispatch_iocntl_set_default(chunk_pages, value); + _dispatch_iocntl_set_default(chunk_size, value * PAGE_SIZE); break; case DISPATCH_IOCNTL_LOW_WATER_CHUNKS: _dispatch_iocntl_set_default(low_water_chunks, value); @@ -217,7 +210,7 @@ _dispatch_io_create(dispatch_io_type_t type) channel->params.type = type; channel->params.high = SIZE_MAX; channel->params.low = dispatch_io_defaults.low_water_chunks * - dispatch_io_defaults.chunk_pages * PAGE_SIZE; + dispatch_io_defaults.chunk_size; channel->queue = dispatch_queue_create("com.apple.libdispatch-io.channelq", NULL); return channel; @@ -371,7 +364,7 @@ dispatch_io_create_with_path(dispatch_io_type_t type, const char *path, void (^cleanup_handler)(int error)) { if ((type != DISPATCH_IO_STREAM && type != DISPATCH_IO_RANDOM) || - !(path && *path == '/')) { + !(*path == '/')) { return NULL; } size_t pathlen = strlen(path); @@ -1992,7 +1985,7 @@ static void _dispatch_disk_perform(void *ctxt) { dispatch_disk_t disk = ctxt; - size_t chunk_size = dispatch_io_defaults.chunk_pages * PAGE_SIZE; + size_t chunk_size = dispatch_io_defaults.chunk_size; _dispatch_fd_debug("disk perform", -1); dispatch_operation_t op; size_t i = disk->advise_idx, j = disk->free_idx; @@ -2109,10 +2102,10 @@ _dispatch_operation_perform(dispatch_operation_t op) _dispatch_object_debug(op, "%s", __func__); if (!op->buf) { size_t max_buf_siz = op->params.high; - size_t chunk_siz = dispatch_io_defaults.chunk_pages * PAGE_SIZE; + size_t chunk_siz = dispatch_io_defaults.chunk_size; if (op->direction == DOP_DIR_READ) { // If necessary, create a buffer for the ongoing operation, large - // enough to fit chunk_pages but at most high-water + // enough to fit chunk_size but at most high-water size_t data_siz = dispatch_data_get_size(op->data); if (data_siz) { dispatch_assert(data_siz < max_buf_siz); diff --git a/src/io_internal.h b/src/io_internal.h index fbb27c570..ecdc77583 100644 --- a/src/io_internal.h +++ b/src/io_internal.h @@ -35,11 +35,15 @@ #define _DISPATCH_IO_LABEL_SIZE 16 #if TARGET_OS_EMBEDDED // rdar://problem/9032036 -#define DIO_MAX_CHUNK_PAGES 128u // 512kB chunk size +#define DIO_MAX_CHUNK_SIZE (512u * 1024) +#define DIO_HASH_SIZE 64u // must be a power of two #else -#define DIO_MAX_CHUNK_PAGES 256u // 1024kB chunk size +#define DIO_MAX_CHUNK_SIZE (1024u * 1024) +#define DIO_HASH_SIZE 256u // must be a power of two #endif +#define DIO_HASH(x) ((uintptr_t)(x) & (DIO_HASH_SIZE - 1)) + #define DIO_DEFAULT_LOW_WATER_CHUNKS 1u // default low-water mark #define DIO_MAX_PENDING_IO_REQS 6u // Pending I/O read advises @@ -93,7 +97,6 @@ struct dispatch_stat_s { DISPATCH_CLASS_DECL(disk); struct dispatch_disk_s { DISPATCH_STRUCT_HEADER(disk); - dev_t dev; TAILQ_HEAD(dispatch_disk_operations_s, dispatch_operation_s) operations; dispatch_operation_t cur_rq; dispatch_queue_t pick_queue; @@ -101,8 +104,8 @@ struct dispatch_disk_s { size_t free_idx; size_t req_idx; size_t advise_idx; + dev_t dev; bool io_active; - int err; TAILQ_ENTRY(dispatch_disk_s) disk_list; size_t advise_list_depth; dispatch_operation_t advise_list[]; diff --git a/src/object.c b/src/object.c index 5b09de716..4089ba0c5 100644 --- a/src/object.c +++ b/src/object.c @@ -51,26 +51,32 @@ DISPATCH_NOINLINE _os_object_t _os_object_retain(_os_object_t obj) { - int xref_cnt = obj->os_obj_xref_cnt; - if (slowpath(xref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) { - return obj; // global object - } - xref_cnt = dispatch_atomic_inc2o(obj, os_obj_xref_cnt, relaxed); + int xref_cnt = _os_object_xrefcnt_inc(obj); if (slowpath(xref_cnt <= 0)) { _OS_OBJECT_CLIENT_CRASH("Resurrection of an object"); } return obj; } +DISPATCH_NOINLINE +_os_object_t +_os_object_retain_with_resurrect(_os_object_t obj) +{ + int xref_cnt = _os_object_xrefcnt_inc(obj); + if (slowpath(xref_cnt < 0)) { + _OS_OBJECT_CLIENT_CRASH("Resurrection of an overreleased object"); + } + if (slowpath(xref_cnt == 0)) { + _os_object_retain_internal(obj); + } + return obj; +} + DISPATCH_NOINLINE void _os_object_release(_os_object_t obj) { - int xref_cnt = obj->os_obj_xref_cnt; - if (slowpath(xref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) { - return; // global object - } - xref_cnt = dispatch_atomic_dec2o(obj, os_obj_xref_cnt, relaxed); + int xref_cnt = _os_object_xrefcnt_dec(obj); if (fastpath(xref_cnt >= 0)) { return; } diff --git a/src/object.m b/src/object.m index 953cb0bd6..1a98d7e0e 100644 --- a/src/object.m +++ b/src/object.m @@ -142,6 +142,10 @@ return obj; } +#if DISPATCH_COCOA_COMPAT +static bool _os_object_debug_missing_pools; +#endif + void _os_object_init(void) { @@ -155,6 +159,10 @@ (void (*)(const void *))&_os_objc_destructInstance }; _Block_use_RR2(&callbacks); +#if DISPATCH_COCOA_COMPAT + const char *v = getenv("OBJC_DEBUG_MISSING_POOLS"); + _os_object_debug_missing_pools = v && !strcmp(v, "YES"); +#endif } _os_object_t @@ -181,12 +189,16 @@ void _os_object_xref_dispose(_os_object_t obj) { + struct _os_object_s *o = (struct _os_object_s *)obj; + _os_object_xrefcnt_dispose_barrier(o); [obj _xref_dispose]; } void _os_object_dispose(_os_object_t obj) { + struct _os_object_s *o = (struct _os_object_s *)obj; + _os_object_refcnt_dispose_barrier(o); [obj _dispose]; } @@ -463,12 +475,17 @@ - (NSString *)debugDescription { void * _dispatch_autorelease_pool_push(void) { - return objc_autoreleasePoolPush(); + if (!slowpath(_os_object_debug_missing_pools)) { + return objc_autoreleasePoolPush(); + } + return NULL; } void _dispatch_autorelease_pool_pop(void *context) { - return objc_autoreleasePoolPop(context); + if (!slowpath(_os_object_debug_missing_pools)) { + return objc_autoreleasePoolPop(context); + } } #endif // DISPATCH_COCOA_COMPAT @@ -507,19 +524,6 @@ - (NSString *)debugDescription { } } -#undef _dispatch_client_callout3 -bool -_dispatch_client_callout3(void *ctxt, dispatch_data_t region, size_t offset, - const void *buffer, size_t size, dispatch_data_applier_function_t f) -{ - @try { - return f(ctxt, region, offset, buffer, size); - } - @catch (...) { - objc_terminate(); - } -} - #undef _dispatch_client_callout4 void _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, @@ -536,34 +540,4 @@ - (NSString *)debugDescription { #endif // DISPATCH_USE_CLIENT_CALLOUT -#pragma mark - -#pragma mark _dispatch_block_create - -// The compiler hides the name of the function it generates, and changes it if -// we try to reference it directly, but the linker still sees it. -extern void DISPATCH_BLOCK_SPECIAL_INVOKE(void *) - asm("____dispatch_block_create_block_invoke"); -void (*_dispatch_block_special_invoke)(void*) = DISPATCH_BLOCK_SPECIAL_INVOKE; - -dispatch_block_t -_dispatch_block_create(dispatch_block_flags_t flags, voucher_t voucher, - pthread_priority_t pri, dispatch_block_t block) -{ - dispatch_block_t copy_block = _dispatch_Block_copy(block); // 17094902 - struct dispatch_block_private_data_s dbpds = - DISPATCH_BLOCK_PRIVATE_DATA_INITIALIZER(flags, voucher, pri, copy_block); - dispatch_block_t new_block = _dispatch_Block_copy(^{ - // Capture object references, which retains copy_block and voucher. - // All retained objects must be captured by the *block*. We - // cannot borrow any references, because the block might be - // called zero or several times, so Block_release() is the - // only place that can release retained objects. - (void)copy_block; - (void)voucher; - _dispatch_block_invoke(&dbpds); - }); - Block_release(copy_block); - return new_block; -} - #endif // USE_OBJC diff --git a/src/object_internal.h b/src/object_internal.h index c0d17ae49..4778f4c1f 100644 --- a/src/object_internal.h +++ b/src/object_internal.h @@ -40,6 +40,18 @@ #define DISPATCH_DECL_SUBCLASS_INTERFACE(name, super) #endif // OS_OBJECT_USE_OBJC +DISPATCH_ENUM(dispatch_invoke_flags, unsigned long, + DISPATCH_INVOKE_NONE = 0x00, + /* This invoke is a stealer, meaning that it doesn't own the + * enqueue lock, and is not allowed to requeue elsewhere + */ + DISPATCH_INVOKE_STEALING = 0x01, + /* The `dc` argument is a dispatch continuation wrapper + * created by _dispatch_queue_push_override + */ + DISPATCH_INVOKE_OVERRIDING = 0x02, +); + #if USE_OBJC #define DISPATCH_CLASS(name) OS_OBJECT_CLASS(dispatch_##name) // ObjC classes and dispatch vtables are co-located via linker order and alias @@ -84,7 +96,8 @@ unsigned long const do_type; \ const char *const do_kind; \ size_t (*const do_debug)(struct dispatch_##x##_s *, char *, size_t); \ - void (*const do_invoke)(struct dispatch_##x##_s *); \ + void (*const do_invoke)(struct dispatch_##x##_s *, dispatch_object_t dc, \ + dispatch_invoke_flags_t); \ unsigned long (*const do_probe)(struct dispatch_##x##_s *); \ void (*const do_dispose)(struct dispatch_##x##_s *); #else @@ -93,7 +106,8 @@ unsigned long do_type; \ const char *do_kind; \ size_t (*do_debug)(struct dispatch_##x##_s *, char *, size_t); \ - void (*do_invoke)(struct dispatch_##x##_s *); \ + void (*do_invoke)(struct dispatch_##x##_s *, dispatch_object_t dc, \ + dispatch_invoke_flags_t); \ unsigned long (*do_probe)(struct dispatch_##x##_s *); \ void (*do_dispose)(struct dispatch_##x##_s *); #endif @@ -103,7 +117,7 @@ #define dx_kind(x) (x)->do_vtable->do_kind #define dx_debug(x, y, z) (x)->do_vtable->do_debug((x), (y), (z)) #define dx_dispose(x) (x)->do_vtable->do_dispose(x) -#define dx_invoke(x) (x)->do_vtable->do_invoke(x) +#define dx_invoke(x, y, z) (x)->do_vtable->do_invoke(x, y, z) #define dx_probe(x) (x)->do_vtable->do_probe(x) #define DISPATCH_STRUCT_HEADER(x) \ @@ -243,6 +257,72 @@ size_t _dispatch_objc_debug(dispatch_object_t dou, char* buf, size_t bufsiz); #pragma mark - #pragma mark _os_object_s +/* + * Low level _os_atomic_refcnt_* actions + * + * _os_atomic_refcnt_inc2o(o, f): + * performs a refcount increment and returns the new refcount value + * + * _os_atomic_refcnt_dec2o(o, f): + * performs a refcount decrement and returns the new refcount value + * + * _os_atomic_refcnt_dispose_barrier2o(o, f): + * a barrier to perform prior to tearing down an object when the refcount + * reached -1. + */ +#define _os_atomic_refcnt_perform2o(o, f, op, m) ({ \ + typeof(o) _o = (o); \ + int _ref_cnt = _o->f; \ + if (fastpath(_ref_cnt != _OS_OBJECT_GLOBAL_REFCNT)) { \ + _ref_cnt = dispatch_atomic_##op##2o(_o, f, m); \ + } \ + _ref_cnt; \ + }) + +#define _os_atomic_refcnt_inc2o(o, m) \ + _os_atomic_refcnt_perform2o(o, m, inc, relaxed) + +#define _os_atomic_refcnt_dec2o(o, m) \ + _os_atomic_refcnt_perform2o(o, m, dec, release) + +#define _os_atomic_refcnt_dispose_barrier2o(o, m) \ + (void)dispatch_atomic_load2o(o, m, acquire) + + +/* + * Higher level _os_object_{x,}refcnt_* actions + * + * _os_atomic_{x,}refcnt_inc(o): + * increment the external (resp. internal) refcount and + * returns the new refcount value + * + * _os_atomic_{x,}refcnt_dec(o): + * decrement the external (resp. internal) refcount and + * returns the new refcount value + * + * _os_atomic_{x,}refcnt_dispose_barrier(o): + * performs the pre-teardown barrier for the external + * (resp. internal) refcount + * + */ +#define _os_object_xrefcnt_inc(o) \ + _os_atomic_refcnt_inc2o(o, os_obj_xref_cnt) + +#define _os_object_xrefcnt_dec(o) \ + _os_atomic_refcnt_dec2o(o, os_obj_xref_cnt) + +#define _os_object_xrefcnt_dispose_barrier(o) \ + _os_atomic_refcnt_dispose_barrier2o(o, os_obj_xref_cnt) + +#define _os_object_refcnt_inc(o) \ + _os_atomic_refcnt_inc2o(o, os_obj_ref_cnt) + +#define _os_object_refcnt_dec(o) \ + _os_atomic_refcnt_dec2o(o, os_obj_ref_cnt) + +#define _os_object_refcnt_dispose_barrier(o) \ + _os_atomic_refcnt_dispose_barrier2o(o, os_obj_ref_cnt) + typedef struct _os_object_class_s { _OS_OBJECT_CLASS_HEADER(); } _os_object_class_s; diff --git a/src/queue.c b/src/queue.c index b8b4ad94f..5868e8799 100644 --- a/src/queue.c +++ b/src/queue.c @@ -60,7 +60,7 @@ static inline _dispatch_thread_semaphore_t static inline bool _dispatch_queue_prepare_override(dispatch_queue_t dq, dispatch_queue_t tq, pthread_priority_t p); static inline void _dispatch_queue_push_override(dispatch_queue_t dq, - dispatch_queue_t tq, pthread_priority_t p); + dispatch_queue_t tq, pthread_priority_t p, bool owning); #if HAVE_PTHREAD_WORKQUEUES static void _dispatch_worker_thread4(void *context); #if HAVE_PTHREAD_WORKQUEUE_QOS @@ -93,6 +93,7 @@ struct dispatch_pthread_root_queue_context_s { pthread_attr_t dpq_thread_attr; dispatch_block_t dpq_thread_configure; struct dispatch_semaphore_s dpq_thread_mediator; + dispatch_pthread_root_queue_observer_hooks_s dpq_observer_hooks; }; typedef struct dispatch_pthread_root_queue_context_s * dispatch_pthread_root_queue_context_t; @@ -349,6 +350,7 @@ struct dispatch_queue_s _dispatch_root_queues[] = { .dq_label = "com.apple.root.maintenance-qos", .dq_running = 2, .dq_width = DISPATCH_QUEUE_WIDTH_MAX, + .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 4, }, [DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT] = { @@ -361,6 +363,7 @@ struct dispatch_queue_s _dispatch_root_queues[] = { .dq_label = "com.apple.root.maintenance-qos.overcommit", .dq_running = 2, .dq_width = DISPATCH_QUEUE_WIDTH_MAX, + .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 5, }, [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS] = { @@ -373,6 +376,7 @@ struct dispatch_queue_s _dispatch_root_queues[] = { .dq_label = "com.apple.root.background-qos", .dq_running = 2, .dq_width = DISPATCH_QUEUE_WIDTH_MAX, + .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 6, }, [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT] = { @@ -385,6 +389,7 @@ struct dispatch_queue_s _dispatch_root_queues[] = { .dq_label = "com.apple.root.background-qos.overcommit", .dq_running = 2, .dq_width = DISPATCH_QUEUE_WIDTH_MAX, + .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 7, }, [DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS] = { @@ -397,6 +402,7 @@ struct dispatch_queue_s _dispatch_root_queues[] = { .dq_label = "com.apple.root.utility-qos", .dq_running = 2, .dq_width = DISPATCH_QUEUE_WIDTH_MAX, + .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 8, }, [DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT] = { @@ -409,6 +415,7 @@ struct dispatch_queue_s _dispatch_root_queues[] = { .dq_label = "com.apple.root.utility-qos.overcommit", .dq_running = 2, .dq_width = DISPATCH_QUEUE_WIDTH_MAX, + .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 9, }, [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS] = { @@ -421,6 +428,7 @@ struct dispatch_queue_s _dispatch_root_queues[] = { .dq_label = "com.apple.root.default-qos", .dq_running = 2, .dq_width = DISPATCH_QUEUE_WIDTH_MAX, + .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 10, }, [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT] = { @@ -433,6 +441,7 @@ struct dispatch_queue_s _dispatch_root_queues[] = { .dq_label = "com.apple.root.default-qos.overcommit", .dq_running = 2, .dq_width = DISPATCH_QUEUE_WIDTH_MAX, + .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 11, }, [DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS] = { @@ -445,6 +454,7 @@ struct dispatch_queue_s _dispatch_root_queues[] = { .dq_label = "com.apple.root.user-initiated-qos", .dq_running = 2, .dq_width = DISPATCH_QUEUE_WIDTH_MAX, + .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 12, }, [DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT] = { @@ -457,6 +467,7 @@ struct dispatch_queue_s _dispatch_root_queues[] = { .dq_label = "com.apple.root.user-initiated-qos.overcommit", .dq_running = 2, .dq_width = DISPATCH_QUEUE_WIDTH_MAX, + .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 13, }, [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS] = { @@ -469,6 +480,7 @@ struct dispatch_queue_s _dispatch_root_queues[] = { .dq_label = "com.apple.root.user-interactive-qos", .dq_running = 2, .dq_width = DISPATCH_QUEUE_WIDTH_MAX, + .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 14, }, [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT] = { @@ -481,6 +493,7 @@ struct dispatch_queue_s _dispatch_root_queues[] = { .dq_label = "com.apple.root.user-interactive-qos.overcommit", .dq_running = 2, .dq_width = DISPATCH_QUEUE_WIDTH_MAX, + .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 15, }, }; @@ -559,6 +572,7 @@ struct dispatch_queue_s _dispatch_mgr_q = { .dq_label = "com.apple.libdispatch-manager", .dq_width = 1, .dq_is_thread_bound = 1, + .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 2, }; @@ -572,7 +586,7 @@ dispatch_get_global_queue(long priority, unsigned long flags) _dispatch_root_queues_init); qos_class_t qos; switch (priority) { -#if !RDAR_17878963 || DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK +#if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK case _DISPATCH_QOS_CLASS_MAINTENANCE: if (!_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS] .dq_priority) { @@ -582,7 +596,7 @@ dispatch_get_global_queue(long priority, unsigned long flags) qos = (qos_class_t)priority; } break; -#endif // RDAR_17878963 || DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK +#endif // DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK case DISPATCH_QUEUE_PRIORITY_BACKGROUND: qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_BACKGROUND]; break; @@ -653,7 +667,7 @@ _dispatch_assert_queue_fail(dispatch_queue_t dq, bool expected) expected ? "Expected" : "Unexpected", dq, dq->dq_label ? dq->dq_label : ""); _dispatch_log("%s", msg); - _dispatch_set_crash_log_message(msg); + _dispatch_set_crash_log_message_dynamic(msg); _dispatch_hardware_crash(); free(msg); } @@ -661,7 +675,7 @@ _dispatch_assert_queue_fail(dispatch_queue_t dq, bool expected) void dispatch_assert_queue(dispatch_queue_t dq) { - if (slowpath(!dq) || slowpath(!(dx_metatype(dq) == _DISPATCH_QUEUE_TYPE))) { + if (slowpath(!(dx_metatype(dq) == _DISPATCH_QUEUE_TYPE))) { DISPATCH_CLIENT_CRASH("invalid queue passed to " "dispatch_assert_queue()"); } @@ -675,7 +689,7 @@ dispatch_assert_queue(dispatch_queue_t dq) void dispatch_assert_queue_not(dispatch_queue_t dq) { - if (slowpath(!dq) || slowpath(!(dx_metatype(dq) == _DISPATCH_QUEUE_TYPE))) { + if (slowpath(!(dx_metatype(dq) == _DISPATCH_QUEUE_TYPE))) { DISPATCH_CLIENT_CRASH("invalid queue passed to " "dispatch_assert_queue_not()"); } @@ -934,6 +948,8 @@ libdispatch_init(void) _dispatch_thread_key_create(&dispatch_io_key, NULL); _dispatch_thread_key_create(&dispatch_apply_key, NULL); _dispatch_thread_key_create(&dispatch_defaultpriority_key, NULL); + _dispatch_thread_key_create(&dispatch_pthread_root_queue_observer_hooks_key, + NULL); #if DISPATCH_PERF_MON && !DISPATCH_INTROSPECTION _dispatch_thread_key_create(&dispatch_bcounter_key, NULL); #endif @@ -1067,18 +1083,21 @@ _dispatch_queue_attr_index_qos_class_t _dispatch_queue_attr_qos2idx[] = { }; #define DISPATCH_QUEUE_ATTR_OVERCOMMIT2IDX(overcommit) \ - (overcommit ? DQA_INDEX_OVERCOMMIT : DQA_INDEX_NON_OVERCOMMIT) + ((overcommit) == _dispatch_queue_attr_overcommit_disabled ? \ + DQA_INDEX_NON_OVERCOMMIT : \ + ((overcommit) == _dispatch_queue_attr_overcommit_enabled ? \ + DQA_INDEX_OVERCOMMIT : DQA_INDEX_UNSPECIFIED_OVERCOMMIT)) #define DISPATCH_QUEUE_ATTR_CONCURRENT2IDX(concurrent) \ - (concurrent ? DQA_INDEX_CONCURRENT : DQA_INDEX_SERIAL) + ((concurrent) ? DQA_INDEX_CONCURRENT : DQA_INDEX_SERIAL) #define DISPATCH_QUEUE_ATTR_PRIO2IDX(prio) (-(prio)) #define DISPATCH_QUEUE_ATTR_QOS2IDX(qos) (_dispatch_queue_attr_qos2idx[(qos)]) static inline dispatch_queue_attr_t -_dispatch_get_queue_attr(qos_class_t qos, int prio, bool overcommit, - bool concurrent) +_dispatch_get_queue_attr(qos_class_t qos, int prio, + _dispatch_queue_attr_overcommit_t overcommit, bool concurrent) { return (dispatch_queue_attr_t)&_dispatch_queue_attrs [DISPATCH_QUEUE_ATTR_QOS2IDX(qos)] @@ -1093,7 +1112,8 @@ dispatch_queue_attr_make_with_qos_class(dispatch_queue_attr_t dqa, { if (!_dispatch_qos_class_valid(qos_class, relative_priority)) return NULL; if (!slowpath(dqa)) { - dqa = _dispatch_get_queue_attr(0, 0, false, false); + dqa = _dispatch_get_queue_attr(0, 0, + _dispatch_queue_attr_overcommit_unspecified, false); } else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) { DISPATCH_CLIENT_CRASH("Invalid queue attribute"); } @@ -1106,12 +1126,15 @@ dispatch_queue_attr_make_with_overcommit(dispatch_queue_attr_t dqa, bool overcommit) { if (!slowpath(dqa)) { - dqa = _dispatch_get_queue_attr(0, 0, false, false); + dqa = _dispatch_get_queue_attr(0, 0, + _dispatch_queue_attr_overcommit_unspecified, false); } else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) { DISPATCH_CLIENT_CRASH("Invalid queue attribute"); } return _dispatch_get_queue_attr(dqa->dqa_qos_class, - dqa->dqa_relative_priority, overcommit, dqa->dqa_concurrent); + dqa->dqa_relative_priority, overcommit ? + _dispatch_queue_attr_overcommit_enabled : + _dispatch_queue_attr_overcommit_disabled, dqa->dqa_concurrent); } #pragma mark - @@ -1136,7 +1159,8 @@ dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa, #endif bool disallow_tq = (slowpath(dqa) && dqa != DISPATCH_QUEUE_CONCURRENT); if (!slowpath(dqa)) { - dqa = _dispatch_get_queue_attr(0, 0, false, false); + dqa = _dispatch_get_queue_attr(0, 0, + _dispatch_queue_attr_overcommit_unspecified, false); } else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) { DISPATCH_CLIENT_CRASH("Invalid queue attribute"); } @@ -1147,16 +1171,20 @@ dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa, dq->dq_label = strdup(label); } qos_class_t qos = dqa->dqa_qos_class; - bool overcommit = dqa->dqa_overcommit; + _dispatch_queue_attr_overcommit_t overcommit = dqa->dqa_overcommit; + if (overcommit == _dispatch_queue_attr_overcommit_unspecified) { + // Serial queues default to overcommit! + overcommit = dqa->dqa_concurrent ? + _dispatch_queue_attr_overcommit_disabled : + _dispatch_queue_attr_overcommit_enabled; + } #if HAVE_PTHREAD_WORKQUEUE_QOS dq->dq_priority = _pthread_qos_class_encode(qos, dqa->dqa_relative_priority, - overcommit); + overcommit == _dispatch_queue_attr_overcommit_enabled ? + _PTHREAD_PRIORITY_OVERCOMMIT_FLAG : 0); #endif if (dqa->dqa_concurrent) { dq->dq_width = DISPATCH_QUEUE_WIDTH_MAX; - } else { - // Default serial queue target queue is overcommit! - overcommit = true; } if (!tq) { if (qos == _DISPATCH_QOS_CLASS_UNSPECIFIED) { @@ -1181,7 +1209,8 @@ dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa, } } - tq = _dispatch_get_root_queue(qos, overcommit); + tq = _dispatch_get_root_queue(qos, overcommit == + _dispatch_queue_attr_overcommit_enabled); if (slowpath(!tq)) { DISPATCH_CLIENT_CRASH("Invalid queue attribute"); } @@ -1207,6 +1236,16 @@ dispatch_queue_create(const char *label, dispatch_queue_attr_t attr) DISPATCH_TARGET_QUEUE_DEFAULT); } +dispatch_queue_t +dispatch_queue_create_with_accounting_override_voucher(const char *label, + dispatch_queue_attr_t attr, voucher_t voucher) +{ + dispatch_queue_t dq = dispatch_queue_create_with_target(label, attr, + DISPATCH_TARGET_QUEUE_DEFAULT); + dq->dq_override_voucher = _voucher_create_accounting_voucher(voucher); + return dq; +} + void _dispatch_queue_destroy(dispatch_object_t dou) { @@ -1226,6 +1265,10 @@ _dispatch_queue_destroy(dispatch_object_t dou) if (dqsq) { _dispatch_release(dqsq); } + if (dq->dq_override_voucher != DISPATCH_NO_VOUCHER) { + if (dq->dq_override_voucher) _voucher_release(dq->dq_override_voucher); + dq->dq_override_voucher = DISPATCH_NO_VOUCHER; + } } // 6618342 Contact the team that owns the Instrument DTrace probe before @@ -1319,13 +1362,17 @@ static void _dispatch_set_target_queue2(void *ctxt) { dispatch_queue_t prev_dq, dq = _dispatch_queue_get_current(), tq = ctxt; +#if HAVE_PTHREAD_WORKQUEUE_QOS + // see _dispatch_queue_wakeup_with_qos_slow + mach_msg_timeout_t timeout = 1; mach_port_t th; while (!dispatch_atomic_cmpxchgv2o(dq, dq_tqthread, MACH_PORT_NULL, _dispatch_thread_port(), &th, acquire)) { _dispatch_thread_switch(th, DISPATCH_YIELD_THREAD_SWITCH_OPTION, - DISPATCH_CONTENTION_USLEEP_START); + timeout++); } +#endif _dispatch_queue_priority_inherit_from_target(dq, tq); prev_dq = dq->do_targetq; dq->do_targetq = tq; @@ -1383,6 +1430,7 @@ static struct dispatch_root_queue_context_s .dgq_ctxt = &_dispatch_mgr_root_queue_pthread_context, .dgq_thread_pool_size = 1, }}}; + static struct dispatch_queue_s _dispatch_mgr_root_queue = { .do_vtable = DISPATCH_VTABLE(queue_root), .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, @@ -1392,16 +1440,32 @@ static struct dispatch_queue_s _dispatch_mgr_root_queue = { .dq_label = "com.apple.root.libdispatch-manager", .dq_running = 2, .dq_width = DISPATCH_QUEUE_WIDTH_MAX, + .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 3, }; + static struct { volatile int prio; + volatile qos_class_t qos; int default_prio; int policy; pthread_t tid; } _dispatch_mgr_sched; + static dispatch_once_t _dispatch_mgr_sched_pred; +// TODO: switch to "event-reflector thread" property + +// Must be kept in sync with list of qos classes in sys/qos.h +static const int _dispatch_mgr_sched_qos2prio[] = { + [_DISPATCH_QOS_CLASS_MAINTENANCE] = 4, + [_DISPATCH_QOS_CLASS_BACKGROUND] = 4, + [_DISPATCH_QOS_CLASS_UTILITY] = 20, + [_DISPATCH_QOS_CLASS_DEFAULT] = 31, + [_DISPATCH_QOS_CLASS_USER_INITIATED] = 37, + [_DISPATCH_QOS_CLASS_USER_INTERACTIVE] = 47, +}; + static void _dispatch_mgr_sched_init(void *ctxt DISPATCH_UNUSED) { @@ -1412,7 +1476,16 @@ _dispatch_mgr_sched_init(void *ctxt DISPATCH_UNUSED) (void)dispatch_assume_zero(pthread_attr_getschedpolicy(attr, &_dispatch_mgr_sched.policy)); (void)dispatch_assume_zero(pthread_attr_getschedparam(attr, ¶m)); - // legacy priority calls allowed when requesting above default priority +#if HAVE_PTHREAD_WORKQUEUE_QOS + qos_class_t qos = qos_class_main(); + if (qos == _DISPATCH_QOS_CLASS_DEFAULT) { + qos = _DISPATCH_QOS_CLASS_USER_INITIATED; // rdar://problem/17279292 + } + if (qos) { + _dispatch_mgr_sched.qos = qos; + param.sched_priority = _dispatch_mgr_sched_qos2prio[qos]; + } +#endif _dispatch_mgr_sched.default_prio = param.sched_priority; _dispatch_mgr_sched.prio = _dispatch_mgr_sched.default_prio; } @@ -1431,9 +1504,12 @@ _dispatch_mgr_root_queue_init(void) (void)dispatch_assume_zero(pthread_attr_setstacksize(attr, 64 * 1024)); #endif #if HAVE_PTHREAD_WORKQUEUE_QOS - if (_dispatch_set_qos_class_enabled) { - qos_class_t qos = qos_class_main(); - (void)dispatch_assume_zero(pthread_attr_set_qos_class_np(attr, qos, 0)); + qos_class_t qos = _dispatch_mgr_sched.qos; + if (qos) { + if (_dispatch_set_qos_class_enabled) { + (void)dispatch_assume_zero(pthread_attr_set_qos_class_np(attr, + qos, 0)); + } _dispatch_mgr_q.dq_priority = _pthread_qos_class_encode(qos, 0, 0); _dispatch_queue_set_override_priority(&_dispatch_mgr_q); } @@ -1467,6 +1543,17 @@ _dispatch_mgr_priority_init(void) pthread_attr_t *attr; attr = &_dispatch_mgr_root_queue_pthread_context.dpq_thread_attr; (void)dispatch_assume_zero(pthread_attr_getschedparam(attr, ¶m)); +#if HAVE_PTHREAD_WORKQUEUE_QOS + qos_class_t qos = 0; + (void)pthread_attr_get_qos_class_np(attr, &qos, NULL); + if (_dispatch_mgr_sched.qos > qos && _dispatch_set_qos_class_enabled) { + (void)pthread_set_qos_class_self_np(_dispatch_mgr_sched.qos, 0); + int p = _dispatch_mgr_sched_qos2prio[_dispatch_mgr_sched.qos]; + if (p > param.sched_priority) { + param.sched_priority = p; + } + } +#endif if (slowpath(_dispatch_mgr_sched.prio > param.sched_priority)) { return _dispatch_mgr_priority_apply(); } @@ -1479,6 +1566,18 @@ _dispatch_mgr_priority_raise(const pthread_attr_t *attr) dispatch_once_f(&_dispatch_mgr_sched_pred, NULL, _dispatch_mgr_sched_init); struct sched_param param; (void)dispatch_assume_zero(pthread_attr_getschedparam(attr, ¶m)); +#if HAVE_PTHREAD_WORKQUEUE_QOS + qos_class_t qos = 0; + (void)pthread_attr_get_qos_class_np((pthread_attr_t *)attr, &qos, NULL); + if (qos) { + param.sched_priority = _dispatch_mgr_sched_qos2prio[qos]; + qos_class_t q = _dispatch_mgr_sched.qos; + do if (q >= qos) { + break; + } while (slowpath(!dispatch_atomic_cmpxchgvw2o(&_dispatch_mgr_sched, + qos, q, qos, &q, relaxed))); + } +#endif int p = _dispatch_mgr_sched.prio; do if (p >= param.sched_priority) { return; @@ -1489,9 +1588,10 @@ _dispatch_mgr_priority_raise(const pthread_attr_t *attr) } } -dispatch_queue_t -dispatch_pthread_root_queue_create(const char *label, unsigned long flags, - const pthread_attr_t *attr, dispatch_block_t configure) +static dispatch_queue_t +_dispatch_pthread_root_queue_create(const char *label, unsigned long flags, + const pthread_attr_t *attr, dispatch_block_t configure, + dispatch_pthread_root_queue_observer_hooks_t observer_hooks) { dispatch_queue_t dq; dispatch_root_queue_context_t qc; @@ -1527,14 +1627,6 @@ dispatch_pthread_root_queue_create(const char *label, unsigned long flags, if (attr) { memcpy(&pqc->dpq_thread_attr, attr, sizeof(pthread_attr_t)); -#if HAVE_PTHREAD_WORKQUEUE_QOS - qos_class_t qos = 0; - if (!pthread_attr_get_qos_class_np(&pqc->dpq_thread_attr, &qos, NULL) - && qos > _DISPATCH_QOS_CLASS_DEFAULT) { - DISPATCH_CLIENT_CRASH("pthread root queues do not support " - "explicit QoS attributes"); - } -#endif _dispatch_mgr_priority_raise(&pqc->dpq_thread_attr); } else { (void)dispatch_assume_zero(pthread_attr_init(&pqc->dpq_thread_attr)); @@ -1544,10 +1636,23 @@ dispatch_pthread_root_queue_create(const char *label, unsigned long flags, if (configure) { pqc->dpq_thread_configure = _dispatch_Block_copy(configure); } + if (observer_hooks) { + pqc->dpq_observer_hooks = *observer_hooks; + } _dispatch_object_debug(dq, "%s", __func__); return _dispatch_introspection_queue_create(dq); } -#endif + +dispatch_queue_t +dispatch_pthread_root_queue_create(const char *label, unsigned long flags, + const pthread_attr_t *attr, dispatch_block_t configure) +{ + return _dispatch_pthread_root_queue_create(label, flags, attr, configure, + NULL); +} + + +#endif // DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES void _dispatch_pthread_root_queue_dispose(dispatch_queue_t dq) @@ -1737,6 +1842,7 @@ dispatch_get_specific(const void *key) return ctxt; } + #pragma mark - #pragma mark dispatch_queue_debug @@ -1745,12 +1851,12 @@ _dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf, size_t bufsiz) { size_t offset = 0; dispatch_queue_t target = dq->do_targetq; - offset += dsnprintf(buf, bufsiz, "target = %s[%p], width = 0x%x, " - "running = 0x%x, barrier = %d ", target && target->dq_label ? - target->dq_label : "", target, dq->dq_width / 2, - dq->dq_running / 2, dq->dq_running & 1); + offset += dsnprintf(&buf[offset], bufsiz - offset, "target = %s[%p], " + "width = 0x%x, running = 0x%x, barrier = %d ", + target && target->dq_label ? target->dq_label : "", target, + dq->dq_width / 2, dq->dq_running / 2, dq->dq_running & 1); if (dq->dq_is_thread_bound) { - offset += dsnprintf(buf, bufsiz, ", thread = 0x%x ", + offset += dsnprintf(&buf[offset], bufsiz - offset, ", thread = 0x%x ", _dispatch_queue_get_bound_thread(dq)); } return offset; @@ -1868,6 +1974,8 @@ _dispatch_continuation_redirect(dispatch_queue_t dq, dispatch_object_t dou) if (!DISPATCH_OBJ_IS_VTABLE(dc) && (long)dc->do_vtable & DISPATCH_OBJ_SYNC_SLOW_BIT) { _dispatch_trace_continuation_pop(dq, dou); + _dispatch_wqthread_override_start((mach_port_t)dc->dc_data, + _dispatch_queue_get_override_priority(dq)); _dispatch_thread_semaphore_signal( (_dispatch_thread_semaphore_t)dc->dc_other); _dispatch_introspection_queue_item_complete(dou); @@ -1985,12 +2093,11 @@ dispatch_block_perform(dispatch_block_flags_t flags, dispatch_block_t block) } flags = _dispatch_block_normalize_flags(flags); struct dispatch_block_private_data_s dbpds = - DISPATCH_BLOCK_PRIVATE_DATA_INITIALIZER(flags, NULL, 0, block); - dbpds.dbpd_atomic_flags |= DBF_PERFORM; // no group_leave at end of invoke + DISPATCH_BLOCK_PRIVATE_DATA_PERFORM_INITIALIZER(flags, block); return _dispatch_block_invoke(&dbpds); } -#define _dbpd_group(dbpd) ((dispatch_group_t)&(dbpd)->dbpd_group) +#define _dbpd_group(dbpd) ((dbpd)->dbpd_group) void _dispatch_block_invoke(const struct dispatch_block_private_data_s *dbcpd) @@ -2005,11 +2112,11 @@ _dispatch_block_invoke(const struct dispatch_block_private_data_s *dbcpd) if (atomic_flags & DBF_CANCELED) goto out; pthread_priority_t op = DISPATCH_NO_PRIORITY, p = DISPATCH_NO_PRIORITY; - unsigned long override = 0; + unsigned long override = DISPATCH_VOUCHER_IGNORE_QUEUE_OVERRIDE; if (flags & DISPATCH_BLOCK_HAS_PRIORITY) { op = _dispatch_get_priority(); p = dbpd->dbpd_priority; - override = (flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS) || + override |= (flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS) || !(flags & DISPATCH_BLOCK_INHERIT_QOS_CLASS) ? DISPATCH_PRIORITY_ENFORCE : 0; } @@ -2020,8 +2127,9 @@ _dispatch_block_invoke(const struct dispatch_block_private_data_s *dbcpd) } ov = _dispatch_adopt_priority_and_voucher(p, v, override); dbpd->dbpd_thread = _dispatch_thread_port(); - dbpd->dbpd_block(); - _dispatch_set_priority_and_replace_voucher(op, ov); + _dispatch_client_callout(dbpd->dbpd_block, + _dispatch_Block_invoke(dbpd->dbpd_block)); + _dispatch_reset_priority_and_voucher(op, ov); out: if ((atomic_flags & DBF_PERFORM) == 0) { if (dispatch_atomic_inc2o(dbpd, dbpd_performed, acquire) == 1) { @@ -2059,7 +2167,7 @@ _dispatch_block_sync_invoke(void *block) } ov = _dispatch_adopt_priority_and_voucher(p, v, override); dbpd->dbpd_block(); - _dispatch_set_priority_and_replace_voucher(op, ov); + _dispatch_reset_priority_and_voucher(op, ov); out: if ((atomic_flags & DBF_PERFORM) == 0) { if (dispatch_atomic_inc2o(dbpd, dbpd_performed, acquire) == 1) { @@ -2485,6 +2593,15 @@ dispatch_async_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) return _dispatch_async_f(dq, ctxt, func, 0, 0); } +DISPATCH_NOINLINE +void +dispatch_async_enforce_qos_class_f(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) +{ + return _dispatch_async_f(dq, ctxt, func, 0, + DISPATCH_BLOCK_ENFORCE_QOS_CLASS); +} + #ifdef __BLOCKS__ void dispatch_async(dispatch_queue_t dq, void (^work)(void)) @@ -2578,11 +2695,28 @@ dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq, static void _dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, pthread_priority_t pp); +DISPATCH_NOINLINE +static void +_dispatch_function_invoke_slow(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) +{ + dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key); + _dispatch_thread_setspecific(dispatch_queue_key, dq); + voucher_t ov = _dispatch_adopt_queue_override_voucher(dq); + _dispatch_client_callout(ctxt, func); + _dispatch_perfmon_workitem_inc(); + _dispatch_reset_voucher(ov); + _dispatch_thread_setspecific(dispatch_queue_key, old_dq); +} + DISPATCH_ALWAYS_INLINE static inline void _dispatch_function_invoke(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { + if (slowpath(dq->dq_override_voucher != DISPATCH_NO_VOUCHER)) { + return _dispatch_function_invoke_slow(dq, ctxt, func); + } dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key); _dispatch_thread_setspecific(dispatch_queue_key, dq); _dispatch_client_callout(ctxt, func); @@ -2718,6 +2852,12 @@ _dispatch_barrier_sync_f_slow(dispatch_queue_t dq, void *ctxt, _dispatch_thread_semaphore_wait(sema); // acquire _dispatch_put_thread_semaphore(sema); + pthread_priority_t p = _dispatch_queue_get_override_priority(dq); + if (p > (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { + // Ensure that the root queue sees that this thread was overridden. + _dispatch_set_defaultpriority_override(); + } + #if DISPATCH_COCOA_COMPAT // Queue bound to a non-dispatch thread if (dc.dc_func == NULL) { @@ -2949,6 +3089,12 @@ _dispatch_sync_f_slow(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, _dispatch_thread_semaphore_wait(sema); _dispatch_put_thread_semaphore(sema); + pthread_priority_t p = _dispatch_queue_get_override_priority(dq); + if (p > (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { + // Ensure that the root queue sees that this thread was overridden. + _dispatch_set_defaultpriority_override(); + } + if (slowpath(dq->do_targetq->do_targetq)) { _dispatch_function_recurse(dq, ctxt, func, pp); } else { @@ -3467,9 +3613,10 @@ dispatch_queue_invoke2(dispatch_object_t dou, // renaming this symbol DISPATCH_NOINLINE void -_dispatch_queue_invoke(dispatch_queue_t dq) +_dispatch_queue_invoke(dispatch_queue_t dq, dispatch_object_t dou, + dispatch_invoke_flags_t flags) { - _dispatch_queue_class_invoke(dq, dispatch_queue_invoke2); + _dispatch_queue_class_invoke(dq, dou._dc, flags, dispatch_queue_invoke2); } #pragma mark - @@ -3602,10 +3749,15 @@ _dispatch_main_queue_drain(void) out: if (next_dc) { _dispatch_main_queue_wakeup(); + } else { + pthread_priority_t p = _dispatch_queue_reset_override_priority(dq); + + if (p > (dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { + _dispatch_thread_override_end(dq->dq_thread); + } } _dispatch_voucher_debug("main queue restore", voucher); - _dispatch_set_priority_and_replace_voucher(old_pri, voucher); - _dispatch_queue_reset_override_priority(dq); + _dispatch_reset_priority_and_voucher(old_pri, voucher); _dispatch_reset_defaultpriority(old_dp); _dispatch_thread_setspecific(dispatch_queue_key, old_dq); _dispatch_perfmon_end(); @@ -3632,7 +3784,7 @@ _dispatch_runloop_queue_drain_one(dispatch_queue_t dq) _dispatch_perfmon_workitem_inc(); _dispatch_voucher_debug("runloop queue restore", voucher); - _dispatch_set_priority_and_replace_voucher(old_pri, voucher); + _dispatch_reset_priority_and_voucher(old_pri, voucher); _dispatch_reset_defaultpriority(old_dp); _dispatch_thread_setspecific(dispatch_queue_key, old_dq); _dispatch_perfmon_end(); @@ -3690,20 +3842,28 @@ _dispatch_queue_wakeup_with_qos_slow(dispatch_queue_t dq, pthread_priority_t pp, if (retained) _dispatch_release(dq); return NULL; } - pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; - bool override = _dispatch_queue_override_priority(dq, pp); - if (override && dq->dq_running > 1) { - override = false; - } - if (!dispatch_atomic_cmpxchg2o(dq, do_suspend_cnt, 0, DISPATCH_OBJECT_SUSPEND_LOCK, acquire)) { + bool was_overridden, override; + + override = _dispatch_queue_override_priority(dq, &pp, &was_overridden); + if (override && dq->dq_running > 1) { + override = false; + } + #if DISPATCH_COCOA_COMPAT if (dq == &_dispatch_main_q && dq->dq_is_thread_bound) { + if (override) { + _dispatch_thread_override_start(dq->dq_thread, pp); + if (was_overridden) { + _dispatch_thread_override_end(dq->dq_thread); + } + } return _dispatch_main_queue_wakeup(); } #endif if (override) { +#if HAVE_PTHREAD_WORKQUEUE_QOS mach_port_t th; // to traverse the tq chain safely we must // lock it to ensure it cannot change, unless the queue is running @@ -3713,31 +3873,27 @@ _dispatch_queue_wakeup_with_qos_slow(dispatch_queue_t dq, pthread_priority_t pp, } else if (!dispatch_atomic_cmpxchgv2o(dq, dq_tqthread, MACH_PORT_NULL, _dispatch_thread_port(), &th, acquire)) { // already locked, override the owner, trysync will do a queue - // wakeup when it returns. + // wakeup when it returns, see _dispatch_set_target_queue2 _dispatch_wqthread_override_start(th, pp); } else { dispatch_queue_t tq = dq->do_targetq; if (_dispatch_queue_prepare_override(dq, tq, pp)) { - _dispatch_queue_push_override(dq, tq, pp); + _dispatch_queue_push_override(dq, tq, pp, false); } else { _dispatch_queue_wakeup_with_qos(tq, pp); } dispatch_atomic_store2o(dq, dq_tqthread, MACH_PORT_NULL, release); } +#endif } if (retained) _dispatch_release(dq); return NULL; } + dispatch_queue_t tq = dq->do_targetq; if (!retained) _dispatch_retain(dq); - if (override) { - override = _dispatch_queue_prepare_override(dq, tq, pp); - } - _dispatch_queue_push(tq, dq, pp); - if (override) { - _dispatch_queue_push_override(dq, tq, pp); - } + _dispatch_queue_push_queue(tq, dq, pp); return tq; // libdispatch does not need this, but the Instrument DTrace // probe does } @@ -3748,7 +3904,7 @@ _dispatch_queue_wakeup_with_qos2(dispatch_queue_t dq, pthread_priority_t pp, bool retained) { if (_dispatch_object_suspended(dq)) { - _dispatch_queue_override_priority(dq, pp); + _dispatch_queue_override_priority(dq, &pp, NULL); if (retained) _dispatch_release(dq); return NULL; } @@ -3770,6 +3926,14 @@ _dispatch_queue_wakeup_with_qos(dispatch_queue_t dq, pthread_priority_t pp) (void)_dispatch_queue_wakeup_with_qos2(dq, pp, false); } +DISPATCH_NOINLINE +void +_dispatch_queue_wakeup_and_release(dispatch_queue_t dq) +{ + (void)_dispatch_queue_wakeup_with_qos2(dq, + _dispatch_queue_get_override_priority(dq), true); +} + DISPATCH_NOINLINE dispatch_queue_t _dispatch_queue_wakeup(dispatch_queue_t dq) @@ -3779,53 +3943,26 @@ _dispatch_queue_wakeup(dispatch_queue_t dq) } #if HAVE_PTHREAD_WORKQUEUE_QOS +DISPATCH_NOINLINE static void -_dispatch_queue_override_invoke(void *ctxt) +_dispatch_queue_override_invoke_stealing(void *ctxt) { dispatch_continuation_t dc = (dispatch_continuation_t)ctxt; dispatch_queue_t dq = dc->dc_data; - pthread_priority_t p = 0; - if (!slowpath(DISPATCH_OBJECT_SUSPENDED(dq)) && - fastpath(dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1, acquire))) { - _dispatch_queue_set_thread(dq); - - _dispatch_object_debug(dq, "stolen onto thread 0x%x, 0x%lx", - dq->dq_thread, _dispatch_get_defaultpriority()); - - pthread_priority_t old_dp = _dispatch_get_defaultpriority(); - _dispatch_reset_defaultpriority(dc->dc_priority); - - dispatch_queue_t tq = NULL; - _dispatch_thread_semaphore_t sema = 0; - tq = dispatch_queue_invoke2(dq, &sema); + dx_invoke(dq, dc, DISPATCH_INVOKE_OVERRIDING | DISPATCH_INVOKE_STEALING); +} - _dispatch_queue_clear_thread(dq); - _dispatch_reset_defaultpriority(old_dp); +DISPATCH_NOINLINE +static void +_dispatch_queue_override_invoke_owning(void *ctxt) +{ + dispatch_continuation_t dc = (dispatch_continuation_t)ctxt; + dispatch_queue_t dq = dc->dc_data; - uint32_t running = dispatch_atomic_dec2o(dq, dq_running, release); - if (sema) { - _dispatch_thread_semaphore_signal(sema); - } else if (!tq && running == 0) { - p = _dispatch_queue_reset_override_priority(dq); - if (p > (dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { - _dispatch_wqthread_override_reset(); - } - } - _dispatch_introspection_queue_item_complete(dq); - if (running == 0) { - return _dispatch_queue_wakeup_with_qos_and_release(dq, p); - } - } else { - mach_port_t th = dq->dq_thread; - if (th) { - p = _dispatch_queue_get_override_priority(dq); - _dispatch_object_debug(dq, "overriding thr 0x%x to priority 0x%lx", - th, p); - _dispatch_wqthread_override_start(th, p); - } - } - _dispatch_release(dq); // added when we pushed the override block + // balance the fake continuation push in _dispatch_queue_push_override + _dispatch_trace_continuation_pop(dc->dc_other, dc->dc_data); + dx_invoke(dq, dc, DISPATCH_INVOKE_OVERRIDING); } #endif @@ -3843,7 +3980,6 @@ _dispatch_queue_prepare_override(dispatch_queue_t dq, dispatch_queue_t tq, if (p <= (tq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { return false; } - _dispatch_retain(dq); return true; #else (void)dq; (void)tq; (void)p; @@ -3853,7 +3989,7 @@ _dispatch_queue_prepare_override(dispatch_queue_t dq, dispatch_queue_t tq, static inline void _dispatch_queue_push_override(dispatch_queue_t dq, dispatch_queue_t tq, - pthread_priority_t p) + pthread_priority_t p, bool owning) { #if HAVE_PTHREAD_WORKQUEUE_QOS unsigned int qosbit, idx, overcommit; @@ -3868,12 +4004,19 @@ _dispatch_queue_push_override(dispatch_queue_t dq, dispatch_queue_t tq, dispatch_continuation_t dc = _dispatch_continuation_alloc(); dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_BARRIER_BIT); - dc->dc_func = _dispatch_queue_override_invoke; + if (owning) { + // fake that we queued `dq` on `tq` for introspection purposes + _dispatch_trace_continuation_push(tq, dq); + dc->dc_func = _dispatch_queue_override_invoke_owning; + } else { + dc->dc_func = _dispatch_queue_override_invoke_stealing; + _dispatch_retain(dq); + } dc->dc_ctxt = dc; - dc->dc_priority = tq->dq_priority; + dc->dc_priority = 0; + dc->dc_other = tq; dc->dc_voucher = NULL; dc->dc_data = dq; - // dq retained by _dispatch_queue_prepare_override _dispatch_queue_push(rq, dc, 0); #else @@ -3881,6 +4024,18 @@ _dispatch_queue_push_override(dispatch_queue_t dq, dispatch_queue_t tq, #endif } +void +_dispatch_queue_push_queue(dispatch_queue_t tq, dispatch_queue_t dq, + pthread_priority_t pp) +{ + _dispatch_queue_override_priority(dq, &pp, NULL); + if (_dispatch_queue_prepare_override(dq, tq, pp)) { + _dispatch_queue_push_override(dq, tq, pp, true); + } else { + _dispatch_queue_push(tq, dq, pp); + } +} + #pragma mark - #pragma mark dispatch_root_queue_drain @@ -4023,7 +4178,7 @@ _dispatch_root_queue_drain(dispatch_queue_t dq) reset = _dispatch_reset_defaultpriority_override(); } _dispatch_voucher_debug("root queue clear", NULL); - _dispatch_set_priority_and_replace_voucher(old_pri, NULL); + _dispatch_reset_priority_and_voucher(old_pri, NULL); _dispatch_reset_defaultpriority(old_dp); _dispatch_perfmon_end(); @@ -4104,6 +4259,10 @@ _dispatch_worker_thread(void *context) dispatch_root_queue_context_t qc = dq->do_ctxt; dispatch_pthread_root_queue_context_t pqc = qc->dgq_ctxt; + if (pqc->dpq_observer_hooks.queue_will_execute) { + _dispatch_set_pthread_root_queue_observer_hooks( + &pqc->dpq_observer_hooks); + } if (pqc->dpq_thread_configure) { pqc->dpq_thread_configure(); } @@ -4366,12 +4525,13 @@ _dispatch_queue_cleanup2(void) { dispatch_queue_t dq = &_dispatch_main_q; (void)dispatch_atomic_dec2o(dq, dq_running, relaxed); - unsigned int suspend_cnt = dispatch_atomic_sub2o(dq, do_suspend_cnt, + (void)dispatch_atomic_sub2o(dq, do_suspend_cnt, DISPATCH_OBJECT_SUSPEND_LOCK, release); + _dispatch_queue_clear_bound_thread(dq); dq->dq_is_thread_bound = 0; - if (suspend_cnt == 0) { - _dispatch_queue_wakeup(dq); - } + // no need to drop the override, the thread will die anyway + _dispatch_queue_wakeup_with_qos(dq, + _dispatch_queue_reset_override_priority(dq)); // overload the "probably" variable to mean that dispatch_main() or // similar non-POSIX API was called diff --git a/src/queue_internal.h b/src/queue_internal.h index d76b15f05..143ab1e2a 100644 --- a/src/queue_internal.h +++ b/src/queue_internal.h @@ -56,10 +56,11 @@ dispatch_queue_t dq_specific_q; \ uint16_t dq_width; \ uint16_t dq_is_thread_bound:1; \ + uint32_t volatile dq_override; \ pthread_priority_t dq_priority; \ mach_port_t dq_thread; \ mach_port_t volatile dq_tqthread; \ - uint32_t volatile dq_override; \ + voucher_t dq_override_voucher; \ unsigned long dq_serialnum; \ const char *dq_label; \ DISPATCH_INTROSPECTION_QUEUE_LIST; @@ -74,16 +75,18 @@ + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE) #else #define DISPATCH_QUEUE_CACHELINE_PAD (( \ - (13*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE) \ + (12*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE) \ + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE) #endif DISPATCH_CLASS_DECL(queue); +#if !(defined(__cplusplus) && DISPATCH_INTROSPECTION) struct dispatch_queue_s { DISPATCH_STRUCT_HEADER(queue); DISPATCH_QUEUE_HEADER; DISPATCH_QUEUE_CACHELINE_PADDING; // for static queues only }; +#endif // !(defined(__cplusplus) && DISPATCH_INTROSPECTION) DISPATCH_INTERNAL_SUBCLASS_DECL(queue_root, queue); DISPATCH_INTERNAL_SUBCLASS_DECL(queue_runloop, queue); @@ -94,7 +97,8 @@ DISPATCH_CLASS_DECL(queue_specific_queue); void _dispatch_queue_destroy(dispatch_object_t dou); void _dispatch_queue_dispose(dispatch_queue_t dq); -void _dispatch_queue_invoke(dispatch_queue_t dq); +void _dispatch_queue_invoke(dispatch_queue_t dq, dispatch_object_t dou, + dispatch_invoke_flags_t flags); void _dispatch_queue_push_list_slow(dispatch_queue_t dq, pthread_priority_t pp, struct dispatch_object_s *obj, unsigned int n, bool retained); @@ -103,10 +107,13 @@ void _dispatch_queue_push_slow(dispatch_queue_t dq, unsigned long _dispatch_queue_probe(dispatch_queue_t dq); dispatch_queue_t _dispatch_wakeup(dispatch_object_t dou); dispatch_queue_t _dispatch_queue_wakeup(dispatch_queue_t dq); +void _dispatch_queue_wakeup_and_release(dispatch_queue_t dq); void _dispatch_queue_wakeup_with_qos(dispatch_queue_t dq, pthread_priority_t pp); void _dispatch_queue_wakeup_with_qos_and_release(dispatch_queue_t dq, pthread_priority_t pp); +void _dispatch_queue_push_queue(dispatch_queue_t tq, dispatch_queue_t dq, + pthread_priority_t pp); _dispatch_thread_semaphore_t _dispatch_queue_drain(dispatch_object_t dou); void _dispatch_queue_specific_queue_dispose(dispatch_queue_specific_queue_t dqsq); @@ -175,25 +182,34 @@ extern pthread_priority_t _dispatch_user_initiated_priority; #pragma mark - #pragma mark dispatch_queue_attr_t +typedef enum { + _dispatch_queue_attr_overcommit_unspecified = 0, + _dispatch_queue_attr_overcommit_enabled, + _dispatch_queue_attr_overcommit_disabled, +} _dispatch_queue_attr_overcommit_t; + DISPATCH_CLASS_DECL(queue_attr); struct dispatch_queue_attr_s { DISPATCH_STRUCT_HEADER(queue_attr); qos_class_t dqa_qos_class; int dqa_relative_priority; - unsigned int dqa_overcommit:1, dqa_concurrent:1; + unsigned int dqa_overcommit:2, dqa_concurrent:1; }; enum { DQA_INDEX_NON_OVERCOMMIT = 0, DQA_INDEX_OVERCOMMIT, + DQA_INDEX_UNSPECIFIED_OVERCOMMIT, }; +#define DISPATCH_QUEUE_ATTR_OVERCOMMIT_COUNT 3 + enum { DQA_INDEX_CONCURRENT = 0, DQA_INDEX_SERIAL, }; -#define DISPATCH_QUEUE_ATTR_PRIO_COUNT (1 - QOS_MIN_RELATIVE_PRIORITY) +#define DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT 2 typedef enum { DQA_INDEX_QOS_CLASS_UNSPECIFIED = 0, @@ -205,8 +221,12 @@ typedef enum { DQA_INDEX_QOS_CLASS_USER_INTERACTIVE, } _dispatch_queue_attr_index_qos_class_t; +#define DISPATCH_QUEUE_ATTR_PRIO_COUNT (1 - QOS_MIN_RELATIVE_PRIORITY) + extern const struct dispatch_queue_attr_s _dispatch_queue_attrs[] - [DISPATCH_QUEUE_ATTR_PRIO_COUNT][2][2]; + [DISPATCH_QUEUE_ATTR_PRIO_COUNT] + [DISPATCH_QUEUE_ATTR_OVERCOMMIT_COUNT] + [DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT]; #pragma mark - #pragma mark dispatch_continuation_t @@ -279,7 +299,7 @@ typedef struct dispatch_continuation_s *dispatch_continuation_t; #define DISPATCH_CONTINUATION_CACHE_LIMIT 112 // one 256k heap for 64 threads #define DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYSTATUS_PRESSURE_WARN 16 #else -#define DISPATCH_CONTINUATION_CACHE_LIMIT 65536 +#define DISPATCH_CONTINUATION_CACHE_LIMIT 1024 #define DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYSTATUS_PRESSURE_WARN 128 #endif #endif @@ -317,18 +337,23 @@ typedef struct dispatch_apply_s *dispatch_apply_t; #define DISPATCH_BLOCK_HAS_VOUCHER (1u << 31) #define DISPATCH_BLOCK_HAS_PRIORITY (1u << 30) -struct dispatch_block_private_data_s { - unsigned long dbpd_magic; - dispatch_block_flags_t dbpd_flags; - unsigned int volatile dbpd_atomic_flags; - int volatile dbpd_performed; - pthread_priority_t dbpd_priority; - voucher_t dbpd_voucher; - dispatch_block_t dbpd_block; - struct dispatch_semaphore_s dbpd_group; - dispatch_queue_t volatile dbpd_queue; +#define DISPATCH_BLOCK_PRIVATE_DATA_HEADER() \ + unsigned long dbpd_magic; \ + dispatch_block_flags_t dbpd_flags; \ + unsigned int volatile dbpd_atomic_flags; \ + int volatile dbpd_performed; \ + pthread_priority_t dbpd_priority; \ + voucher_t dbpd_voucher; \ + dispatch_block_t dbpd_block; \ + dispatch_group_t dbpd_group; \ + dispatch_queue_t volatile dbpd_queue; \ mach_port_t dbpd_thread; + +#if !defined(__cplusplus) +struct dispatch_block_private_data_s { + DISPATCH_BLOCK_PRIVATE_DATA_HEADER(); }; +#endif typedef struct dispatch_block_private_data_s *dispatch_block_private_data_t; // dbpd_atomic_flags bits @@ -339,14 +364,13 @@ typedef struct dispatch_block_private_data_s *dispatch_block_private_data_t; #define DISPATCH_BLOCK_PRIVATE_DATA_MAGIC 0xD159B10C // 0xDISPatch_BLOCk -#define DISPATCH_BLOCK_PRIVATE_DATA_INITIALIZER(flags, voucher, prio, block) \ +// struct for synchronous perform: no group_leave at end of invoke +#define DISPATCH_BLOCK_PRIVATE_DATA_PERFORM_INITIALIZER(flags, block) \ { \ .dbpd_magic = DISPATCH_BLOCK_PRIVATE_DATA_MAGIC, \ .dbpd_flags = (flags), \ - .dbpd_priority = (prio), \ - .dbpd_voucher = (voucher), \ + .dbpd_atomic_flags = DBF_PERFORM, \ .dbpd_block = (block), \ - .dbpd_group = DISPATCH_GROUP_INITIALIZER(1), \ } dispatch_block_t _dispatch_block_create(dispatch_block_flags_t flags, diff --git a/src/semaphore.c b/src/semaphore.c index f9bfdbec6..f356fb876 100644 --- a/src/semaphore.c +++ b/src/semaphore.c @@ -415,15 +415,28 @@ dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout) #pragma mark - #pragma mark dispatch_group_t -dispatch_group_t -dispatch_group_create(void) +DISPATCH_ALWAYS_INLINE +static inline dispatch_group_t +_dispatch_group_create_with_count(long count) { dispatch_group_t dg = (dispatch_group_t)_dispatch_alloc( DISPATCH_VTABLE(group), sizeof(struct dispatch_semaphore_s)); - _dispatch_semaphore_init(LONG_MAX, dg); + _dispatch_semaphore_init(LONG_MAX - count, dg); return dg; } +dispatch_group_t +dispatch_group_create(void) +{ + return _dispatch_group_create_with_count(0); +} + +dispatch_group_t +_dispatch_group_create_and_enter(void) +{ + return _dispatch_group_create_with_count(1); +} + void dispatch_group_enter(dispatch_group_t dg) { @@ -507,7 +520,7 @@ DISPATCH_NOINLINE static long _dispatch_group_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout) { - long orig; + long orig, value; #if USE_MACH_SEM mach_timespec_t _timeout; @@ -525,7 +538,8 @@ _dispatch_group_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout) again: // check before we cause another signal to be sent by incrementing // dsema->dsema_group_waiters - if (dsema->dsema_value == LONG_MAX) { + value = dispatch_atomic_load2o(dsema, dsema_value, seq_cst); // 19296565 + if (value == LONG_MAX) { return _dispatch_group_wake(dsema); } // Mach semaphores appear to sometimes spuriously wake up. Therefore, @@ -533,7 +547,8 @@ _dispatch_group_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout) // signaled (6880961). (void)dispatch_atomic_inc2o(dsema, dsema_group_waiters, relaxed); // check the values again in case we need to wake any threads - if (dsema->dsema_value == LONG_MAX) { + value = dispatch_atomic_load2o(dsema, dsema_value, seq_cst); // 19296565 + if (value == LONG_MAX) { return _dispatch_group_wake(dsema); } diff --git a/src/semaphore_internal.h b/src/semaphore_internal.h index 01179cb66..11261c3c9 100644 --- a/src/semaphore_internal.h +++ b/src/semaphore_internal.h @@ -53,15 +53,7 @@ struct dispatch_semaphore_s { DISPATCH_CLASS_DECL(group); -#define DISPATCH_GROUP_INITIALIZER(s) \ - { \ - .do_vtable = (const void*)DISPATCH_VTABLE(group), \ - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, \ - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, \ - .dsema_value = LONG_MAX - (s), \ - .dsema_orig = LONG_MAX, \ - } - +dispatch_group_t _dispatch_group_create_and_enter(void); void _dispatch_semaphore_dispose(dispatch_object_t dou); size_t _dispatch_semaphore_debug(dispatch_object_t dou, char *buf, size_t bufsiz); diff --git a/src/shims/tsd.h b/src/shims/tsd.h index 25de7e474..cf5238512 100644 --- a/src/shims/tsd.h +++ b/src/shims/tsd.h @@ -49,15 +49,17 @@ static const unsigned long dispatch_io_key = __PTK_LIBDISPATCH_KEY3; static const unsigned long dispatch_apply_key = __PTK_LIBDISPATCH_KEY4; static const unsigned long dispatch_defaultpriority_key =__PTK_LIBDISPATCH_KEY5; #if DISPATCH_INTROSPECTION -static const unsigned long dispatch_introspection_key =__PTK_LIBDISPATCH_KEY5+1; +static const unsigned long dispatch_introspection_key = __PTK_LIBDISPATCH_KEY6; #elif DISPATCH_PERF_MON -static const unsigned long dispatch_bcounter_key = __PTK_LIBDISPATCH_KEY5+1; +static const unsigned long dispatch_bcounter_key = __PTK_LIBDISPATCH_KEY6; #endif #if DISPATCH_USE_OS_SEMAPHORE_CACHE static const unsigned long dispatch_sema4_key = __TSD_SEMAPHORE_CACHE; #else -static const unsigned long dispatch_sema4_key = __PTK_LIBDISPATCH_KEY5+2; +static const unsigned long dispatch_sema4_key = __PTK_LIBDISPATCH_KEY7; #endif +static const unsigned long dispatch_pthread_root_queue_observer_hooks_key = + __PTK_LIBDISPATCH_KEY8; #ifndef __TSD_THREAD_QOS_CLASS #define __TSD_THREAD_QOS_CLASS 4 @@ -88,7 +90,7 @@ extern pthread_key_t dispatch_introspection_key; #elif DISPATCH_PERF_MON extern pthread_key_t dispatch_bcounter_key; #endif - +exern pthread_key_t dispatch_pthread_root_queue_observer_hooks_key; DISPATCH_TSD_INLINE static inline void diff --git a/src/source.c b/src/source.c index b593ae04a..dde7db9af 100644 --- a/src/source.c +++ b/src/source.c @@ -25,15 +25,19 @@ #endif #include +#define DKEV_DISPOSE_IMMEDIATE_DELETE 0x1 +#define DKEV_DISPOSE_IGNORE_ENOENT 0x2 + static void _dispatch_source_merge_kevent(dispatch_source_t ds, - const struct kevent64_s *ke); + const _dispatch_kevent_qos_s *ke); static bool _dispatch_kevent_register(dispatch_kevent_t *dkp, uint32_t *flgp); -static void _dispatch_kevent_unregister(dispatch_kevent_t dk, uint32_t flg); -static bool _dispatch_kevent_resume(dispatch_kevent_t dk, uint32_t new_flags, +static long _dispatch_kevent_unregister(dispatch_kevent_t dk, uint32_t flg, + int options); +static long _dispatch_kevent_resume(dispatch_kevent_t dk, uint32_t new_flags, uint32_t del_flags); -static void _dispatch_kevent_drain(struct kevent64_s *ke); -static void _dispatch_kevent_merge(struct kevent64_s *ke); -static void _dispatch_timers_kevent(struct kevent64_s *ke); +static void _dispatch_kevent_drain(_dispatch_kevent_qos_s *ke); +static void _dispatch_kevent_merge(_dispatch_kevent_qos_s *ke); +static void _dispatch_timers_kevent(_dispatch_kevent_qos_s *ke); static void _dispatch_timers_unregister(dispatch_source_t ds, dispatch_kevent_t dk); static void _dispatch_timers_update(dispatch_source_t ds); @@ -45,7 +49,7 @@ static void _dispatch_timer_aggregates_unregister(dispatch_source_t ds, unsigned int tidx); static inline unsigned long _dispatch_source_timer_data( dispatch_source_refs_t dr, unsigned long prev); -static long _dispatch_kq_update(const struct kevent64_s *); +static long _dispatch_kq_update(const _dispatch_kevent_qos_s *); static void _dispatch_memorystatus_init(void); #if HAVE_MACH static void _dispatch_mach_host_calendar_change_register(void); @@ -54,23 +58,34 @@ static kern_return_t _dispatch_kevent_machport_resume(dispatch_kevent_t dk, uint32_t new_flags, uint32_t del_flags); static kern_return_t _dispatch_kevent_mach_notify_resume(dispatch_kevent_t dk, uint32_t new_flags, uint32_t del_flags); -static inline void _dispatch_kevent_mach_portset(struct kevent64_s *ke); +static inline void _dispatch_kevent_mach_portset(_dispatch_kevent_qos_s *ke); #else static inline void _dispatch_mach_host_calendar_change_register(void) {} static inline void _dispatch_mach_recv_msg_buf_init(void) {} #endif static const char * _evfiltstr(short filt); #if DISPATCH_DEBUG -static void _dispatch_kevent_debug(struct kevent64_s* kev, const char* str); +static void _dispatch_kevent_debug(const _dispatch_kevent_qos_s* kev, + const char* str); static void _dispatch_kevent_debugger(void *context); #define DISPATCH_ASSERT_ON_MANAGER_QUEUE() \ dispatch_assert(_dispatch_queue_get_current() == &_dispatch_mgr_q) #else static inline void -_dispatch_kevent_debug(struct kevent64_s* kev DISPATCH_UNUSED, +_dispatch_kevent_debug(const _dispatch_kevent_qos_s* kev DISPATCH_UNUSED, const char* str DISPATCH_UNUSED) {} #define DISPATCH_ASSERT_ON_MANAGER_QUEUE() #endif +#ifndef DISPATCH_MGR_QUEUE_DEBUG +#define DISPATCH_MGR_QUEUE_DEBUG 0 +#endif +#if DISPATCH_MGR_QUEUE_DEBUG +#define _dispatch_kevent_mgr_debug _dispatch_kevent_debug +#else +static inline void +_dispatch_kevent_mgr_debug(_dispatch_kevent_qos_s* kev DISPATCH_UNUSED, + const char* str DISPATCH_UNUSED) {} +#endif #pragma mark - #pragma mark dispatch_source_t @@ -79,9 +94,9 @@ dispatch_source_t dispatch_source_create(dispatch_source_type_t type, uintptr_t handle, unsigned long mask, - dispatch_queue_t q) + dispatch_queue_t dq) { - const struct kevent64_s *proto_kev = &type->ke; + const _dispatch_kevent_qos_s *proto_kev = &type->ke; dispatch_source_t ds; dispatch_kevent_t dk; @@ -127,9 +142,6 @@ dispatch_source_create(dispatch_source_type_t type, ds->do_ref_cnt++; // the reference the manager queue holds ds->do_ref_cnt++; // since source is created suspended ds->do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_INTERVAL; - // The initial target queue is the manager queue, in order to get - // the source installed. - ds->do_targetq = &_dispatch_mgr_q; dk = _dispatch_calloc(1ul, sizeof(struct dispatch_kevent_s)); dk->dk_kevent = *proto_kev; @@ -149,9 +161,15 @@ dispatch_source_create(dispatch_source_type_t type, // we cheat and use EV_CLEAR to mean a "flag thingy" ds->ds_is_adder = true; } + if (EV_UDATA_SPECIFIC & proto_kev->flags) { + dispatch_assert(!(EV_ONESHOT & proto_kev->flags)); + dk->dk_kevent.flags |= EV_DISPATCH; + ds->ds_is_direct_kevent = true; + ds->ds_needs_rearm = true; + } // Some sources require special processing if (type->init != NULL) { - type->init(ds, type, handle, mask, q); + type->init(ds, type, handle, mask, dq); } dispatch_assert(!(ds->ds_is_level && ds->ds_is_adder)); @@ -161,12 +179,36 @@ dispatch_source_create(dispatch_source_type_t type, } ds->ds_refs->dr_source_wref = _dispatch_ptr2wref(ds); - // First item on the queue sets the user-specified target queue - dispatch_set_target_queue(ds, q); + if (!ds->ds_is_direct_kevent) { + // The initial target queue is the manager queue, in order to get + // the source installed. + ds->do_targetq = &_dispatch_mgr_q; + // First item on the queue sets the user-specified target queue + dispatch_set_target_queue(ds, dq); + } else { + if (slowpath(!dq)) { + dq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, true); + } else { + _dispatch_retain(dq); + } + ds->do_targetq = dq; + _dispatch_queue_priority_inherit_from_target((dispatch_queue_t)ds, dq); + _dispatch_queue_set_override_priority(dq); + } _dispatch_object_debug(ds, "%s", __func__); return ds; } +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_t +_dispatch_source_get_kevent_queue(dispatch_source_t ds) +{ + if (ds->ds_is_direct_kevent) { + return ds->do_targetq; + } + return &_dispatch_mgr_q; +} + void _dispatch_source_dispose(dispatch_source_t ds) { @@ -247,7 +289,7 @@ dispatch_source_get_data(dispatch_source_t ds) void dispatch_source_merge_data(dispatch_source_t ds, unsigned long val) { - struct kevent64_s kev = { + _dispatch_kevent_qos_s kev = { .fflags = (typeof(kev.fflags))val, .data = (typeof(kev.data))val, }; @@ -498,11 +540,9 @@ _dispatch_source_latch_and_call(dispatch_source_t ds) return; } pthread_priority_t old_dp = _dispatch_set_defaultpriority(ds->dq_priority); - _dispatch_trace_continuation_pop(_dispatch_queue_get_current(), dc); voucher_t voucher = dc->dc_voucher ? _voucher_retain(dc->dc_voucher) : NULL; _dispatch_continuation_voucher_adopt(dc); // consumes voucher reference - _dispatch_client_callout(dc->dc_ctxt, dc->dc_func); - _dispatch_introspection_queue_item_complete(dc); + _dispatch_continuation_pop(dc); if (voucher) dc->dc_voucher = voucher; _dispatch_reset_defaultpriority(old_dp); } @@ -511,19 +551,45 @@ static void _dispatch_source_kevent_unregister(dispatch_source_t ds) { _dispatch_object_debug(ds, "%s", __func__); + uint32_t flags = (uint32_t)ds->ds_pending_data_mask; dispatch_kevent_t dk = ds->ds_dkev; - ds->ds_dkev = NULL; - switch (dk->dk_kevent.filter) { - case DISPATCH_EVFILT_TIMER: + if (ds->ds_atomic_flags & DSF_DELETED) { + dk->dk_kevent.flags |= EV_DELETE; // already deleted + dk->dk_kevent.flags &= ~(EV_ADD|EV_ENABLE); + } + if (dk->dk_kevent.filter == DISPATCH_EVFILT_TIMER) { + ds->ds_dkev = NULL; _dispatch_timers_unregister(ds, dk); - break; - default: + } else if (!ds->ds_is_direct_kevent) { + ds->ds_dkev = NULL; TAILQ_REMOVE(&dk->dk_sources, ds->ds_refs, dr_list); - _dispatch_kevent_unregister(dk, (uint32_t)ds->ds_pending_data_mask); - break; + _dispatch_kevent_unregister(dk, flags, 0); + } else { + int dkev_dispose_options = 0; + if (ds->ds_needs_rearm && !(ds->ds_atomic_flags & DSF_ARMED)) { + dkev_dispose_options |= DKEV_DISPOSE_IMMEDIATE_DELETE; + } + if (ds->ds_needs_mgr) { + dkev_dispose_options |= DKEV_DISPOSE_IGNORE_ENOENT; + ds->ds_needs_mgr = false; + } + long r = _dispatch_kevent_unregister(dk, flags, dkev_dispose_options); + if (r == EINPROGRESS) { + _dispatch_debug("kevent-source[%p]: deferred delete kevent[%p]", + ds, dk); + ds->ds_pending_delete = true; + return; // deferred unregistration + } else if (r == ENOENT) { + _dispatch_debug("kevent-source[%p]: ENOENT delete kevent[%p]", + ds, dk); + ds->ds_needs_mgr = true; + return; // potential concurrent EV_DELETE delivery rdar://22047283 + } + ds->ds_dkev = NULL; + _TAILQ_TRASH_ENTRY(ds->ds_refs, dr_list); } - (void)dispatch_atomic_and2o(ds, ds_atomic_flags, ~DSF_ARMED, relaxed); + _dispatch_debug("kevent-source[%p]: disarmed kevent[%p]", ds, ds->ds_dkev); ds->ds_needs_rearm = false; // re-arm is pointless and bad now _dispatch_release(ds); // the retain is done at creation time } @@ -533,14 +599,19 @@ _dispatch_source_kevent_resume(dispatch_source_t ds, uint32_t new_flags) { switch (ds->ds_dkev->dk_kevent.filter) { case DISPATCH_EVFILT_TIMER: - return _dispatch_timers_update(ds); + _dispatch_timers_update(ds); + (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED, relaxed); + _dispatch_debug("kevent-source[%p]: rearmed kevent[%p]", ds, + ds->ds_dkev); + return; case EVFILT_MACHPORT: if (ds->ds_pending_data_mask & DISPATCH_MACH_RECV_MESSAGE) { new_flags |= DISPATCH_MACH_RECV_MESSAGE; // emulate EV_DISPATCH } break; } - if (_dispatch_kevent_resume(ds->ds_dkev, new_flags, 0)) { + if ((ds->ds_atomic_flags & DSF_DELETED) || + _dispatch_kevent_resume(ds->ds_dkev, new_flags, 0)) { _dispatch_source_kevent_unregister(ds); } } @@ -551,15 +622,19 @@ _dispatch_source_kevent_register(dispatch_source_t ds) dispatch_assert_zero(ds->ds_is_installed); switch (ds->ds_dkev->dk_kevent.filter) { case DISPATCH_EVFILT_TIMER: - return _dispatch_timers_update(ds); + _dispatch_timers_update(ds); + (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED, relaxed); + _dispatch_debug("kevent-source[%p]: armed kevent[%p]", ds, ds->ds_dkev); + return; } uint32_t flags; bool do_resume = _dispatch_kevent_register(&ds->ds_dkev, &flags); TAILQ_INSERT_TAIL(&ds->ds_dkev->dk_sources, ds->ds_refs, dr_list); + (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED, relaxed); + _dispatch_debug("kevent-source[%p]: armed kevent[%p]", ds, ds->ds_dkev); if (do_resume || ds->ds_needs_rearm) { _dispatch_source_kevent_resume(ds, flags); } - (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED, relaxed); _dispatch_object_debug(ds, "%s", __func__); } @@ -569,8 +644,10 @@ _dispatch_source_invoke2(dispatch_object_t dou, _dispatch_thread_semaphore_t *sema_ptr DISPATCH_UNUSED) { dispatch_source_t ds = dou._ds; - if (slowpath(_dispatch_queue_drain(ds))) { - DISPATCH_CLIENT_CRASH("Sync onto source"); + if (_dispatch_queue_class_probe(ds)) { + if (slowpath(_dispatch_queue_drain(ds))) { + DISPATCH_CLIENT_CRASH("Sync onto source"); + } } // This function performs all source actions. Each action is responsible @@ -581,12 +658,13 @@ _dispatch_source_invoke2(dispatch_object_t dou, // The order of tests here in invoke and in probe should be consistent. dispatch_queue_t dq = _dispatch_queue_get_current(); + dispatch_queue_t dkq = _dispatch_source_get_kevent_queue(ds); dispatch_source_refs_t dr = ds->ds_refs; if (!ds->ds_is_installed) { - // The source needs to be installed on the manager queue. - if (dq != &_dispatch_mgr_q) { - return &_dispatch_mgr_q; + // The source needs to be installed on the kevent queue. + if (dq != dkq) { + return dkq; } _dispatch_source_kevent_register(ds); ds->ds_is_installed = true; @@ -594,7 +672,7 @@ _dispatch_source_invoke2(dispatch_object_t dou, return ds->do_targetq; } if (slowpath(ds->do_xref_cnt == -1)) { - return &_dispatch_mgr_q; // rdar://problem/9558246 + return dkq; // rdar://problem/9558246 } } else if (slowpath(DISPATCH_OBJECT_SUSPENDED(ds))) { // Source suspended by an item drained from the source queue. @@ -608,17 +686,56 @@ _dispatch_source_invoke2(dispatch_object_t dou, // clears ds_registration_handler _dispatch_source_registration_callout(ds); if (slowpath(ds->do_xref_cnt == -1)) { - return &_dispatch_mgr_q; // rdar://problem/9558246 + return dkq; // rdar://problem/9558246 + } + } else if ((ds->ds_atomic_flags & DSF_DELETED) && (ds->ds_pending_delete || + (ds->ds_atomic_flags & DSF_ONESHOT))) { + // Pending source kevent unregistration has been completed + if (ds->ds_needs_mgr) { + dkq = &_dispatch_mgr_q; } - } else if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == -1)){ + if (dq != dkq) { + return dkq; + } + ds->ds_pending_delete = false; + if (ds->ds_atomic_flags & DSF_ONESHOT) { + (void)dispatch_atomic_and2o(ds, ds_atomic_flags, ~DSF_ONESHOT, + relaxed); + } + if (ds->ds_dkev) { + _dispatch_source_kevent_unregister(ds); + if (ds->ds_needs_mgr) { + return &_dispatch_mgr_q; + } + } + if (dr->ds_handler[DS_EVENT_HANDLER] || + dr->ds_handler[DS_CANCEL_HANDLER] || + dr->ds_handler[DS_REGISTN_HANDLER]) { + return ds->do_targetq; + } + } else if (((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == -1)) + && !ds->ds_pending_delete) { // The source has been cancelled and needs to be uninstalled from the - // manager queue. After uninstallation, the cancellation handler needs + // kevent queue. After uninstallation, the cancellation handler needs // to be delivered to the target queue. if (ds->ds_dkev) { - if (dq != &_dispatch_mgr_q) { - return &_dispatch_mgr_q; + if (ds->ds_needs_mgr) { + dkq = &_dispatch_mgr_q; + } + if (dq != dkq) { + return dkq; } _dispatch_source_kevent_unregister(ds); + if (ds->ds_needs_mgr) { + return &_dispatch_mgr_q; + } + if (ds->ds_pending_delete) { + // deferred unregistration + if (ds->ds_needs_rearm) { + return dkq; + } + return NULL; + } } if (dr->ds_handler[DS_EVENT_HANDLER] || dr->ds_handler[DS_CANCEL_HANDLER] || @@ -628,24 +745,26 @@ _dispatch_source_invoke2(dispatch_object_t dou, } } _dispatch_source_cancel_callout(ds); - } else if (ds->ds_pending_data) { + } else if (ds->ds_pending_data && !ds->ds_pending_delete) { // The source has pending data to deliver via the event handler callback - // on the target queue. Some sources need to be rearmed on the manager + // on the target queue. Some sources need to be rearmed on the kevent // queue after event delivery. if (dq != ds->do_targetq) { return ds->do_targetq; } _dispatch_source_latch_and_call(ds); if (ds->ds_needs_rearm) { - return &_dispatch_mgr_q; + return dkq; } } else if (ds->ds_needs_rearm && !(ds->ds_atomic_flags & DSF_ARMED)) { - // The source needs to be rearmed on the manager queue. - if (dq != &_dispatch_mgr_q) { - return &_dispatch_mgr_q; + // The source needs to be rearmed on the kevent queue. + if (dq != dkq) { + return dkq; } - _dispatch_source_kevent_resume(ds, 0); (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED, relaxed); + _dispatch_debug("kevent-source[%p]: rearmed kevent[%p]", ds, + ds->ds_dkev); + _dispatch_source_kevent_resume(ds, 0); } return NULL; @@ -653,9 +772,10 @@ _dispatch_source_invoke2(dispatch_object_t dou, DISPATCH_NOINLINE void -_dispatch_source_invoke(dispatch_source_t ds) +_dispatch_source_invoke(dispatch_source_t ds, dispatch_object_t dou, + dispatch_invoke_flags_t flags) { - _dispatch_queue_class_invoke(ds, _dispatch_source_invoke2); + _dispatch_queue_class_invoke(ds, dou._dc, flags, _dispatch_source_invoke2); } unsigned long @@ -666,13 +786,18 @@ _dispatch_source_probe(dispatch_source_t ds) dispatch_source_refs_t dr = ds->ds_refs; if (!ds->ds_is_installed) { - // The source needs to be installed on the manager queue. + // The source needs to be installed on the kevent queue. return true; } else if (dr->ds_handler[DS_REGISTN_HANDLER]) { // The registration handler needs to be delivered to the target queue. return true; - } else if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == -1)){ - // The source needs to be uninstalled from the manager queue, or the + } else if ((ds->ds_atomic_flags & DSF_DELETED) && (ds->ds_pending_delete || + (ds->ds_atomic_flags & DSF_ONESHOT))) { + // Pending source kevent unregistration has been completed + return true; + } else if (((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == -1)) + && !ds->ds_pending_delete) { + // The source needs to be uninstalled from the kevent queue, or the // cancellation handler needs to be delivered to the target queue. // Note: cancellation assumes installation. if (ds->ds_dkev || dr->ds_handler[DS_EVENT_HANDLER] || @@ -680,21 +805,37 @@ _dispatch_source_probe(dispatch_source_t ds) dr->ds_handler[DS_REGISTN_HANDLER]) { return true; } - } else if (ds->ds_pending_data) { + } else if (ds->ds_pending_data && !ds->ds_pending_delete) { // The source has pending data to deliver to the target queue. return true; } else if (ds->ds_needs_rearm && !(ds->ds_atomic_flags & DSF_ARMED)) { - // The source needs to be rearmed on the manager queue. + // The source needs to be rearmed on the kevent queue. return true; } return _dispatch_queue_class_probe(ds); } static void -_dispatch_source_merge_kevent(dispatch_source_t ds, const struct kevent64_s *ke) +_dispatch_source_merge_kevent(dispatch_source_t ds, + const _dispatch_kevent_qos_s *ke) { + _dispatch_object_debug(ds, "%s", __func__); + bool retained = false; + if ((ke->flags & EV_UDATA_SPECIFIC) && (ke->flags & EV_ONESHOT) && + !(ke->flags & EV_DELETE)) { + _dispatch_debug("kevent-source[%p]: deferred delete oneshot kevent[%p]", + ds, (void*)ke->udata); + (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ONESHOT, relaxed); + } else if ((ke->flags & EV_DELETE) || (ke->flags & EV_ONESHOT)) { + _dispatch_debug("kevent-source[%p]: delete kevent[%p]", + ds, (void*)ke->udata); + retained = true; + _dispatch_retain(ds); + (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_DELETED, relaxed); + if (ke->flags & EV_DELETE) goto done; + } if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == -1)) { - return; + goto done; // rdar://20204025 } if (ds->ds_is_level) { // ke->data is signed and "negative available data" makes no sense @@ -710,12 +851,22 @@ _dispatch_source_merge_kevent(dispatch_source_t ds, const struct kevent64_s *ke) (void)dispatch_atomic_or2o(ds, ds_pending_data, ke->fflags & ds->ds_pending_data_mask, relaxed); } +done: // EV_DISPATCH and EV_ONESHOT sources are no longer armed after delivery if (ds->ds_needs_rearm) { + if (!retained) { + retained = true; + _dispatch_retain(ds); // rdar://20382435 + } (void)dispatch_atomic_and2o(ds, ds_atomic_flags, ~DSF_ARMED, relaxed); + _dispatch_debug("kevent-source[%p]: disarmed kevent[%p] ", + ds, (void*)ke->udata); + } + if (retained) { + _dispatch_queue_wakeup_and_release((dispatch_queue_t)ds); + } else { + _dispatch_queue_wakeup((dispatch_queue_t)ds); } - - _dispatch_wakeup(ds); } #pragma mark - @@ -729,6 +880,7 @@ static inline void _dispatch_kevent_guard(dispatch_kevent_t dk) { (void)dk; } static inline void _dispatch_kevent_unguard(dispatch_kevent_t dk) { (void)dk; } #endif +#if !DISPATCH_USE_EV_UDATA_SPECIFIC static struct dispatch_kevent_s _dispatch_kevent_data_or = { .dk_kevent = { .filter = DISPATCH_EVFILT_CUSTOM_OR, @@ -742,6 +894,7 @@ static struct dispatch_kevent_s _dispatch_kevent_data_add = { }, .dk_sources = TAILQ_HEAD_INITIALIZER(_dispatch_kevent_data_add.dk_sources), }; +#endif // !DISPATCH_USE_EV_UDATA_SPECIFIC #define DSL_HASH(x) ((x) & (DSL_HASH_SIZE - 1)) @@ -756,6 +909,7 @@ _dispatch_kevent_init() TAILQ_INIT(&_dispatch_sources[i]); } +#if !DISPATCH_USE_EV_UDATA_SPECIFIC TAILQ_INSERT_TAIL(&_dispatch_sources[0], &_dispatch_kevent_data_or, dk_list); TAILQ_INSERT_TAIL(&_dispatch_sources[0], @@ -764,6 +918,7 @@ _dispatch_kevent_init() (uintptr_t)&_dispatch_kevent_data_or; _dispatch_kevent_data_add.dk_kevent.udata = (uintptr_t)&_dispatch_kevent_data_add; +#endif // !DISPATCH_USE_EV_UDATA_SPECIFIC } static inline uintptr_t @@ -797,6 +952,7 @@ _dispatch_kevent_find(uint64_t ident, short filter) static void _dispatch_kevent_insert(dispatch_kevent_t dk) { + if (dk->dk_kevent.flags & EV_UDATA_SPECIFIC) return; _dispatch_kevent_guard(dk); uintptr_t hash = _dispatch_kevent_hash(dk->dk_kevent.ident, dk->dk_kevent.filter); @@ -807,12 +963,14 @@ _dispatch_kevent_insert(dispatch_kevent_t dk) static bool _dispatch_kevent_register(dispatch_kevent_t *dkp, uint32_t *flgp) { - dispatch_kevent_t dk, ds_dkev = *dkp; + dispatch_kevent_t dk = NULL, ds_dkev = *dkp; uint32_t new_flags; bool do_resume = false; - dk = _dispatch_kevent_find(ds_dkev->dk_kevent.ident, - ds_dkev->dk_kevent.filter); + if (!(ds_dkev->dk_kevent.flags & EV_UDATA_SPECIFIC)) { + dk = _dispatch_kevent_find(ds_dkev->dk_kevent.ident, + ds_dkev->dk_kevent.filter); + } if (dk) { // If an existing dispatch kevent is found, check to see if new flags // need to be added to the existing kevent @@ -836,7 +994,7 @@ _dispatch_kevent_register(dispatch_kevent_t *dkp, uint32_t *flgp) return do_resume; } -static bool +static long _dispatch_kevent_resume(dispatch_kevent_t dk, uint32_t new_flags, uint32_t del_flags) { @@ -853,31 +1011,36 @@ _dispatch_kevent_resume(dispatch_kevent_t dk, uint32_t new_flags, case DISPATCH_EVFILT_MACH_NOTIFICATION: return _dispatch_kevent_mach_notify_resume(dk, new_flags, del_flags); #endif - case EVFILT_PROC: - if (dk->dk_kevent.flags & EV_ONESHOT) { + default: + if (dk->dk_kevent.flags & EV_DELETE) { return 0; } - // fall through - default: r = _dispatch_kq_update(&dk->dk_kevent); - if (dk->dk_kevent.flags & EV_DISPATCH) { + if (r && (dk->dk_kevent.flags & EV_ADD) && + (dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) { + dk->dk_kevent.flags |= EV_DELETE; + dk->dk_kevent.flags &= ~(EV_ADD|EV_ENABLE); + } else if (dk->dk_kevent.flags & EV_DISPATCH) { dk->dk_kevent.flags &= ~EV_ADD; } return r; } } -static void -_dispatch_kevent_dispose(dispatch_kevent_t dk) +static long +_dispatch_kevent_dispose(dispatch_kevent_t dk, int options) { - uintptr_t hash; - + long r = 0; switch (dk->dk_kevent.filter) { case DISPATCH_EVFILT_TIMER: case DISPATCH_EVFILT_CUSTOM_ADD: case DISPATCH_EVFILT_CUSTOM_OR: - // these sources live on statically allocated lists - return; + if (dk->dk_kevent.flags & EV_UDATA_SPECIFIC) { + free(dk); + } else { + // these sources live on statically allocated lists + } + return r; #if HAVE_MACH case EVFILT_MACHPORT: _dispatch_kevent_machport_resume(dk, 0, dk->dk_kevent.fflags); @@ -886,35 +1049,56 @@ _dispatch_kevent_dispose(dispatch_kevent_t dk) _dispatch_kevent_mach_notify_resume(dk, 0, dk->dk_kevent.fflags); break; #endif - case EVFILT_PROC: - if (dk->dk_kevent.flags & EV_ONESHOT) { - break; // implicitly deleted - } - // fall through default: if (~dk->dk_kevent.flags & EV_DELETE) { dk->dk_kevent.flags |= EV_DELETE; dk->dk_kevent.flags &= ~(EV_ADD|EV_ENABLE); - _dispatch_kq_update(&dk->dk_kevent); + if (options & DKEV_DISPOSE_IMMEDIATE_DELETE) { + dk->dk_kevent.flags |= EV_ENABLE; + } + r = _dispatch_kq_update(&dk->dk_kevent); + if (r == ENOENT && (options & DKEV_DISPOSE_IGNORE_ENOENT)) { + r = 0; + } + if (options & DKEV_DISPOSE_IMMEDIATE_DELETE) { + dk->dk_kevent.flags &= ~EV_ENABLE; + } } break; } - - hash = _dispatch_kevent_hash(dk->dk_kevent.ident, - dk->dk_kevent.filter); - TAILQ_REMOVE(&_dispatch_sources[hash], dk, dk_list); - _dispatch_kevent_unguard(dk); - free(dk); + if ((r == EINPROGRESS || r == ENOENT) && + (dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) { + // deferred EV_DELETE or concurrent concurrent EV_DELETE delivery + dk->dk_kevent.flags &= ~EV_DELETE; + dk->dk_kevent.flags |= EV_ENABLE; + } else { + if ((dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) { +#if DISPATCH_DEBUG + // zero/trash dr linkage + dispatch_source_refs_t dr = TAILQ_FIRST(&dk->dk_sources); + TAILQ_REMOVE(&dk->dk_sources, dr, dr_list); +#endif + } else { + uintptr_t hash = _dispatch_kevent_hash(dk->dk_kevent.ident, + dk->dk_kevent.filter); + TAILQ_REMOVE(&_dispatch_sources[hash], dk, dk_list); + } + _dispatch_kevent_unguard(dk); + free(dk); + } + return r; } -static void -_dispatch_kevent_unregister(dispatch_kevent_t dk, uint32_t flg) +static long +_dispatch_kevent_unregister(dispatch_kevent_t dk, uint32_t flg, int options) { dispatch_source_refs_t dri; uint32_t del_flags, fflags = 0; + long r = 0; - if (TAILQ_EMPTY(&dk->dk_sources)) { - _dispatch_kevent_dispose(dk); + if (TAILQ_EMPTY(&dk->dk_sources) || + (dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) { + r = _dispatch_kevent_dispose(dk, options); } else { TAILQ_FOREACH(dri, &dk->dk_sources, dr_list) { dispatch_source_t dsi = _dispatch_source_from_refs(dri); @@ -924,20 +1108,21 @@ _dispatch_kevent_unregister(dispatch_kevent_t dk, uint32_t flg) del_flags = flg & ~fflags; if (del_flags) { dk->dk_kevent.flags |= EV_ADD; - dk->dk_kevent.fflags = fflags; - _dispatch_kevent_resume(dk, 0, del_flags); + dk->dk_kevent.fflags &= ~del_flags; + r = _dispatch_kevent_resume(dk, 0, del_flags); } } + return r; } DISPATCH_NOINLINE static void -_dispatch_kevent_proc_exit(struct kevent64_s *ke) +_dispatch_kevent_proc_exit(_dispatch_kevent_qos_s *ke) { // EVFILT_PROC may fail with ESRCH when the process exists but is a zombie // . As a workaround, we simulate an exit event for // any EVFILT_PROC with an invalid pid . - struct kevent64_s fake; + _dispatch_kevent_qos_s fake; fake = *ke; fake.flags &= ~EV_ERROR; fake.fflags = NOTE_EXIT; @@ -947,7 +1132,7 @@ _dispatch_kevent_proc_exit(struct kevent64_s *ke) DISPATCH_NOINLINE static void -_dispatch_kevent_error(struct kevent64_s *ke) +_dispatch_kevent_error(_dispatch_kevent_qos_s *ke) { _dispatch_kevent_debug(ke, __func__); if (ke->data) { @@ -961,27 +1146,30 @@ _dispatch_kevent_error(struct kevent64_s *ke) } static void -_dispatch_kevent_drain(struct kevent64_s *ke) +_dispatch_kevent_drain(_dispatch_kevent_qos_s *ke) { #if DISPATCH_DEBUG static dispatch_once_t pred; dispatch_once_f(&pred, NULL, _dispatch_kevent_debugger); #endif if (ke->filter == EVFILT_USER) { + _dispatch_kevent_mgr_debug(ke, __func__); return; } if (slowpath(ke->flags & EV_ERROR)) { - if (ke->filter == EVFILT_PROC) { + if (ke->filter == EVFILT_PROC && ke->data == ESRCH) { + ke->data = 0; // don't return error from caller if (ke->flags & EV_DELETE) { - // Process exited while monitored + _dispatch_debug("kevent[0x%llx]: ignoring ESRCH from " + "EVFILT_PROC EV_DELETE", ke->udata); return; - } else if (ke->data == ESRCH) { - return _dispatch_kevent_proc_exit(ke); } + _dispatch_debug("kevent[0x%llx]: ESRCH from EVFILT_PROC: " + "generating fake NOTE_EXIT", ke->udata); + return _dispatch_kevent_proc_exit(ke); } return _dispatch_kevent_error(ke); } - _dispatch_kevent_debug(ke, __func__); if (ke->filter == EVFILT_TIMER) { return _dispatch_timers_kevent(ke); } @@ -995,18 +1183,16 @@ _dispatch_kevent_drain(struct kevent64_s *ke) DISPATCH_NOINLINE static void -_dispatch_kevent_merge(struct kevent64_s *ke) +_dispatch_kevent_merge(_dispatch_kevent_qos_s *ke) { + _dispatch_kevent_debug(ke, __func__); dispatch_kevent_t dk; - dispatch_source_refs_t dri; + dispatch_source_refs_t dri, dr_next; dk = (void*)ke->udata; dispatch_assert(dk); - if (ke->flags & EV_ONESHOT) { - dk->dk_kevent.flags |= EV_ONESHOT; - } - TAILQ_FOREACH(dri, &dk->dk_sources, dr_list) { + TAILQ_FOREACH_SAFE(dri, &dk->dk_sources, dr_list, dr_next) { _dispatch_source_merge_kevent(_dispatch_source_from_refs(dri), ke); } } @@ -1168,6 +1354,7 @@ _dispatch_source_set_timer3(void *context) ds->ds_pending_data = 0; // Re-arm in case we got disarmed because of pending set_timer suspension (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED, release); + _dispatch_debug("kevent-source[%p]: rearmed kevent[%p]", ds, ds->ds_dkev); dispatch_resume(ds); // Must happen after resume to avoid getting disarmed due to suspension _dispatch_timers_update(ds); @@ -1385,7 +1572,7 @@ struct dispatch_kevent_s _dispatch_kevent_timer[] = { #define DISPATCH_KEVENT_TIMEOUT_INIT(qos, note) \ DISPATCH_KEVENT_TIMEOUT_INITIALIZER(DISPATCH_TIMER_QOS_##qos, note) -struct kevent64_s _dispatch_kevent_timeout[] = { +_dispatch_kevent_qos_s _dispatch_kevent_timeout[] = { DISPATCH_KEVENT_TIMEOUT_INIT(NORMAL, 0), DISPATCH_KEVENT_TIMEOUT_INIT(CRITICAL, NOTE_CRITICAL), DISPATCH_KEVENT_TIMEOUT_INIT(BACKGROUND, NOTE_BACKGROUND), @@ -1516,6 +1703,8 @@ _dispatch_timers_update(dispatch_source_t ds) ds->ds_pending_data) { tidx = DISPATCH_TIMER_INDEX_DISARM; (void)dispatch_atomic_and2o(ds, ds_atomic_flags, ~DSF_ARMED, relaxed); + _dispatch_debug("kevent-source[%p]: disarmed kevent[%p]", ds, + ds->ds_dkev); } else { tidx = _dispatch_source_timer_idx(dr); } @@ -1526,6 +1715,8 @@ _dispatch_timers_update(dispatch_source_t ds) ds->ds_is_installed = true; if (tidx != DISPATCH_TIMER_INDEX_DISARM) { (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED, relaxed); + _dispatch_debug("kevent-source[%p]: rearmed kevent[%p]", ds, + ds->ds_dkev); } _dispatch_object_debug(ds, "%s", __func__); ds->ds_dkev = NULL; @@ -1668,7 +1859,7 @@ _dispatch_timers_get_delay(uint64_t nows[], struct dispatch_timer_s timer[], } static bool -_dispatch_timers_program2(uint64_t nows[], struct kevent64_s *ke, +_dispatch_timers_program2(uint64_t nows[], _dispatch_kevent_qos_s *ke, unsigned int qos) { unsigned int tidx; @@ -1739,8 +1930,9 @@ _dispatch_timers_calendar_change(void) } static void -_dispatch_timers_kevent(struct kevent64_s *ke) +_dispatch_timers_kevent(_dispatch_kevent_qos_s *ke) { + _dispatch_kevent_debug(ke, __func__); dispatch_assert(ke->data > 0); dispatch_assert((ke->ident & DISPATCH_KEVENT_TIMEOUT_IDENT_MASK) == DISPATCH_KEVENT_TIMEOUT_IDENT_MASK); @@ -1933,6 +2125,8 @@ _dispatch_timer_aggregates_unregister(dispatch_source_t ds, unsigned int tidx) static int _dispatch_kq; +#if DISPATCH_USE_SELECT_FALLBACK + static unsigned int _dispatch_select_workaround; static fd_set _dispatch_rfds; static fd_set _dispatch_wfds; @@ -1941,9 +2135,8 @@ static uint64_t*_dispatch_wfd_ptrs; DISPATCH_NOINLINE static bool -_dispatch_select_register(struct kevent64_s *kev) +_dispatch_select_register(const _dispatch_kevent_qos_s *kev) { - // Must execute on manager queue DISPATCH_ASSERT_ON_MANAGER_QUEUE(); @@ -1965,8 +2158,9 @@ _dispatch_select_register(struct kevent64_s *kev) _dispatch_debug("select workaround used to read fd %d: 0x%lx", (int)kev->ident, (long)kev->data); } + return true; } - return true; + break; case EVFILT_WRITE: if ((kev->data == EINVAL || kev->data == ENOENT) && dispatch_assume(kev->ident < FD_SETSIZE)) { @@ -1981,15 +2175,16 @@ _dispatch_select_register(struct kevent64_s *kev) _dispatch_debug("select workaround used to write fd %d: 0x%lx", (int)kev->ident, (long)kev->data); } + return true; } - return true; + break; } return false; } DISPATCH_NOINLINE static bool -_dispatch_select_unregister(const struct kevent64_s *kev) +_dispatch_select_unregister(const _dispatch_kevent_qos_s *kev) { // Must execute on manager queue DISPATCH_ASSERT_ON_MANAGER_QUEUE(); @@ -2023,7 +2218,6 @@ _dispatch_mgr_select(bool poll) { static const struct timeval timeout_immediately = { 0, 0 }; fd_set tmp_rfds, tmp_wfds; - struct kevent64_s kev; int err, i, r; bool kevent_avail = false; @@ -2073,16 +2267,24 @@ _dispatch_mgr_select(bool poll) continue; } FD_CLR(i, &_dispatch_rfds); // emulate EV_DISPATCH - EV_SET64(&kev, i, EVFILT_READ, - EV_ADD|EV_ENABLE|EV_DISPATCH, 0, 1, - _dispatch_rfd_ptrs[i], 0, 0); + _dispatch_kevent_qos_s kev = { + .ident = (uint64_t)i, + .filter = EVFILT_READ, + .flags = EV_ADD|EV_ENABLE|EV_DISPATCH, + .data = 1, + .udata = _dispatch_rfd_ptrs[i], + }; _dispatch_kevent_drain(&kev); } if (FD_ISSET(i, &tmp_wfds)) { FD_CLR(i, &_dispatch_wfds); // emulate EV_DISPATCH - EV_SET64(&kev, i, EVFILT_WRITE, - EV_ADD|EV_ENABLE|EV_DISPATCH, 0, 1, - _dispatch_wfd_ptrs[i], 0, 0); + _dispatch_kevent_qos_s kev = { + .ident = (uint64_t)i, + .filter = EVFILT_WRITE, + .flags = EV_ADD|EV_ENABLE|EV_DISPATCH, + .data = 1, + .udata = _dispatch_wfd_ptrs[i], + }; _dispatch_kevent_drain(&kev); } } @@ -2090,13 +2292,15 @@ _dispatch_mgr_select(bool poll) return kevent_avail; } +#endif // DISPATCH_USE_SELECT_FALLBACK + #pragma mark - #pragma mark dispatch_kqueue static void _dispatch_kq_init(void *context DISPATCH_UNUSED) { - static const struct kevent64_s kev = { + static const _dispatch_kevent_qos_s kev = { .ident = 1, .filter = EVFILT_USER, .flags = EV_ADD|EV_CLEAR, @@ -2129,13 +2333,16 @@ _dispatch_kq_init(void *context DISPATCH_UNUSED) DISPATCH_CRASH("kqueue() failure"); break; } - } else if (dispatch_assume(_dispatch_kq < FD_SETSIZE)) { + } +#if DISPATCH_USE_SELECT_FALLBACK + else if (dispatch_assume(_dispatch_kq < FD_SETSIZE)) { // in case we fall back to select() FD_SET(_dispatch_kq, &_dispatch_rfds); } +#endif // DISPATCH_USE_SELECT_FALLBACK - (void)dispatch_assume_zero(kevent64(_dispatch_kq, &kev, 1, NULL, 0, 0, - NULL)); + (void)dispatch_assume_zero(kevent_qos(_dispatch_kq, &kev, 1, NULL, 0, NULL, + NULL, 0)); _dispatch_queue_push(_dispatch_mgr_q.do_targetq, &_dispatch_mgr_q, 0); } @@ -2151,23 +2358,24 @@ _dispatch_get_kq(void) DISPATCH_NOINLINE static long -_dispatch_kq_update(const struct kevent64_s *kev) +_dispatch_kq_update(const _dispatch_kevent_qos_s *kev) { int r; - struct kevent64_s kev_copy; + _dispatch_kevent_qos_s kev_error; +#if DISPATCH_USE_SELECT_FALLBACK if (slowpath(_dispatch_select_workaround) && (kev->flags & EV_DELETE)) { if (_dispatch_select_unregister(kev)) { return 0; } } - kev_copy = *kev; - // This ensures we don't get a pending kevent back while registering - // a new kevent - kev_copy.flags |= EV_RECEIPT; +#endif // DISPATCH_USE_SELECT_FALLBACK + if (kev->filter != EVFILT_USER || DISPATCH_MGR_QUEUE_DEBUG) { + _dispatch_kevent_debug(kev, __func__); + } retry: - r = dispatch_assume(kevent64(_dispatch_get_kq(), &kev_copy, 1, - &kev_copy, 1, 0, NULL)); + r = kevent_qos(_dispatch_get_kq(), kev, 1, &kev_error, + 1, NULL, NULL, KEVENT_FLAG_ERROR_EVENTS); if (slowpath(r == -1)) { int err = errno; switch (err) { @@ -2182,34 +2390,59 @@ _dispatch_kq_update(const struct kevent64_s *kev) } return err; } - switch (kev_copy.data) { - case 0: + if (r == 0) { return 0; - case EBADF: - case EPERM: - case EINVAL: + } + if (kev_error.flags & EV_ERROR && kev_error.data) { + _dispatch_kevent_debug(&kev_error, __func__); + } + r = (int)kev_error.data; + switch (r) { + case 0: + _dispatch_kevent_mgr_debug(&kev_error, __func__); + break; + case EINPROGRESS: + // deferred EV_DELETE + break; case ENOENT: + if ((kev->flags & EV_DELETE) && (kev->flags & EV_UDATA_SPECIFIC)) { + // potential concurrent EV_DELETE delivery + break; + } + // fall through + case EINVAL: if ((kev->flags & (EV_ADD|EV_ENABLE)) && !(kev->flags & EV_DELETE)) { - if (_dispatch_select_register(&kev_copy)) { - return 0; +#if DISPATCH_USE_SELECT_FALLBACK + if (_dispatch_select_register(&kev_error)) { + r = 0; + break; + } +#elif DISPATCH_DEBUG + if (kev->filter == EVFILT_READ || kev->filter == EVFILT_WRITE) { + DISPATCH_CRASH("Unsupported fd for EVFILT_READ or EVFILT_WRITE " + "kevent"); } +#endif // DISPATCH_USE_SELECT_FALLBACK } // fall through + case EBADF: + case EPERM: default: - kev_copy.flags |= kev->flags; - _dispatch_kevent_drain(&kev_copy); + kev_error.flags |= kev->flags; + _dispatch_kevent_drain(&kev_error); + r = (int)kev_error.data; break; } - return (long)kev_copy.data; + return r; } #pragma mark - #pragma mark dispatch_mgr -static struct kevent64_s *_dispatch_kevent_enable; +static _dispatch_kevent_qos_s *_dispatch_kevent_enable; static void inline -_dispatch_mgr_kevent_reenable(struct kevent64_s *ke) +_dispatch_mgr_kevent_reenable(_dispatch_kevent_qos_s *ke) { dispatch_assert(!_dispatch_kevent_enable || _dispatch_kevent_enable == ke); _dispatch_kevent_enable = ke; @@ -2222,7 +2455,7 @@ _dispatch_mgr_wakeup(dispatch_queue_t dq DISPATCH_UNUSED) return false; } - static const struct kevent64_s kev = { + static const _dispatch_kevent_qos_s kev = { .ident = 1, .filter = EVFILT_USER, .fflags = NOTE_TRIGGER, @@ -2255,22 +2488,23 @@ DISPATCH_NOINLINE DISPATCH_NORETURN static void _dispatch_mgr_invoke(void) { - static const struct timespec timeout_immediately = { 0, 0 }; - struct kevent64_s kev; + _dispatch_kevent_qos_s kev; bool poll; int r; for (;;) { _dispatch_mgr_queue_drain(); poll = _dispatch_mgr_timers(); +#if DISPATCH_USE_SELECT_FALLBACK if (slowpath(_dispatch_select_workaround)) { poll = _dispatch_mgr_select(poll); if (!poll) continue; } +#endif // DISPATCH_USE_SELECT_FALLBACK poll = poll || _dispatch_queue_class_probe(&_dispatch_mgr_q); - r = kevent64(_dispatch_kq, _dispatch_kevent_enable, - _dispatch_kevent_enable ? 1 : 0, &kev, 1, 0, - poll ? &timeout_immediately : NULL); + r = kevent_qos(_dispatch_kq, _dispatch_kevent_enable, + _dispatch_kevent_enable ? 1 : 0, &kev, 1, NULL, NULL, + poll ? KEVENT_FLAG_IMMEDIATE : KEVENT_FLAG_NONE); _dispatch_kevent_enable = NULL; if (slowpath(r == -1)) { int err = errno; @@ -2292,7 +2526,9 @@ _dispatch_mgr_invoke(void) DISPATCH_NORETURN void -_dispatch_mgr_thread(dispatch_queue_t dq DISPATCH_UNUSED) +_dispatch_mgr_thread(dispatch_queue_t dq DISPATCH_UNUSED, + dispatch_object_t dou DISPATCH_UNUSED, + dispatch_invoke_flags_t flags DISPATCH_UNUSED) { _dispatch_mgr_init(); // never returns, so burn bridges behind us & clear stack 2k ahead @@ -2395,8 +2631,8 @@ static inline void _dispatch_memorystatus_init(void) {} #define DISPATCH_MACH_KEVENT_ARMED(dk) ((dk)->dk_kevent.ext[0]) -static void _dispatch_kevent_machport_drain(struct kevent64_s *ke); -static void _dispatch_kevent_mach_msg_drain(struct kevent64_s *ke); +static void _dispatch_kevent_machport_drain(_dispatch_kevent_qos_s *ke); +static void _dispatch_kevent_mach_msg_drain(_dispatch_kevent_qos_s *ke); static void _dispatch_kevent_mach_msg_recv(mach_msg_header_t *hdr); static void _dispatch_kevent_mach_msg_destroy(mach_msg_header_t *hdr); static void _dispatch_source_merge_mach_msg(dispatch_source_t ds, @@ -2415,7 +2651,7 @@ static void _dispatch_mach_msg_recv(dispatch_mach_t dm, dispatch_mach_reply_refs_t dmr, mach_msg_header_t *hdr, mach_msg_size_t siz); static void _dispatch_mach_merge_kevent(dispatch_mach_t dm, - const struct kevent64_s *ke); + const _dispatch_kevent_qos_s *ke); static inline mach_msg_option_t _dispatch_mach_checkin_options(void); static const size_t _dispatch_mach_recv_msg_size = @@ -2425,7 +2661,7 @@ static const size_t dispatch_mach_trailer_size = static mach_msg_size_t _dispatch_mach_recv_msg_buf_size; static mach_port_t _dispatch_mach_portset, _dispatch_mach_recv_portset; static mach_port_t _dispatch_mach_notify_port; -static struct kevent64_s _dispatch_mach_recv_kevent = { +static _dispatch_kevent_qos_s _dispatch_mach_recv_kevent = { .filter = EVFILT_MACHPORT, .flags = EV_ADD|EV_ENABLE|EV_DISPATCH, .fflags = DISPATCH_MACH_RCV_OPTIONS, @@ -2517,7 +2753,7 @@ _dispatch_get_mach_recv_portset(void) static void _dispatch_mach_portset_init(void *context DISPATCH_UNUSED) { - struct kevent64_s kev = { + _dispatch_kevent_qos_s kev = { .filter = EVFILT_MACHPORT, .flags = EV_ADD, }; @@ -2575,14 +2811,14 @@ _dispatch_mach_portset_update(dispatch_kevent_t dk, mach_port_t mps) } static void -_dispatch_kevent_mach_recv_reenable(struct kevent64_s *ke DISPATCH_UNUSED) +_dispatch_kevent_mach_recv_reenable(_dispatch_kevent_qos_s *ke DISPATCH_UNUSED) { #if (TARGET_IPHONE_SIMULATOR && \ IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090) || \ (!TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED < 1090) // delete and re-add kevent to workaround if (ke->ext[1] != _dispatch_mach_recv_kevent.ext[1]) { - struct kevent64_s kev = _dispatch_mach_recv_kevent; + _dispatch_kevent_qos_s kev = _dispatch_mach_recv_kevent; kev.flags = EV_DELETE; _dispatch_kq_update(&kev); } @@ -2636,7 +2872,7 @@ _dispatch_kevent_mach_notify_resume(dispatch_kevent_t dk, uint32_t new_flags, } static inline void -_dispatch_kevent_mach_portset(struct kevent64_s *ke) +_dispatch_kevent_mach_portset(_dispatch_kevent_qos_s *ke) { if (ke->ident == _dispatch_mach_recv_portset) { return _dispatch_kevent_mach_msg_drain(ke); @@ -2649,11 +2885,11 @@ _dispatch_kevent_mach_portset(struct kevent64_s *ke) DISPATCH_NOINLINE static void -_dispatch_kevent_machport_drain(struct kevent64_s *ke) +_dispatch_kevent_machport_drain(_dispatch_kevent_qos_s *ke) { + _dispatch_kevent_debug(ke, __func__); mach_port_t name = (mach_port_name_t)ke->data; dispatch_kevent_t dk; - struct kevent64_s kev; _dispatch_debug_machport(name); dk = _dispatch_kevent_find(name, EVFILT_MACHPORT); @@ -2662,16 +2898,22 @@ _dispatch_kevent_machport_drain(struct kevent64_s *ke) } _dispatch_mach_portset_update(dk, MACH_PORT_NULL); // emulate EV_DISPATCH - EV_SET64(&kev, name, EVFILT_MACHPORT, EV_ADD|EV_ENABLE|EV_DISPATCH, - DISPATCH_MACH_RECV_MESSAGE, 0, (uintptr_t)dk, 0, 0); + _dispatch_kevent_qos_s kev = { + .ident = name, + .filter = EVFILT_MACHPORT, + .flags = EV_ADD|EV_ENABLE|EV_DISPATCH, + .fflags = DISPATCH_MACH_RECV_MESSAGE, + .udata = (uintptr_t)dk, + }; _dispatch_kevent_debug(&kev, __func__); _dispatch_kevent_merge(&kev); } DISPATCH_NOINLINE static void -_dispatch_kevent_mach_msg_drain(struct kevent64_s *ke) +_dispatch_kevent_mach_msg_drain(_dispatch_kevent_qos_s *ke) { + _dispatch_kevent_debug(ke, __func__); mach_msg_header_t *hdr = (mach_msg_header_t*)ke->ext[0]; mach_msg_size_t siz, msgsiz; mach_msg_return_t kr = (mach_msg_return_t)ke->fflags; @@ -2822,13 +3064,12 @@ _dispatch_source_merge_mach_msg(dispatch_source_t ds, dispatch_source_refs_t dr, return _dispatch_mach_msg_recv((dispatch_mach_t)ds, dmr, hdr, siz); } -DISPATCH_ALWAYS_INLINE -static inline void +DISPATCH_NOINLINE +static void _dispatch_mach_notify_merge(mach_port_t name, uint32_t flag, bool final) { dispatch_source_refs_t dri, dr_next; dispatch_kevent_t dk; - struct kevent64_s kev; bool unreg; dk = _dispatch_kevent_find(name, DISPATCH_EVFILT_MACH_NOTIFICATION); @@ -2838,8 +3079,13 @@ _dispatch_mach_notify_merge(mach_port_t name, uint32_t flag, bool final) // Update notification registration state. dk->dk_kevent.data &= ~_DISPATCH_MACH_SP_FLAGS; - EV_SET64(&kev, name, DISPATCH_EVFILT_MACH_NOTIFICATION, EV_ADD|EV_ENABLE, - flag, 0, (uintptr_t)dk, 0, 0); + _dispatch_kevent_qos_s kev = { + .ident = name, + .filter = DISPATCH_EVFILT_MACH_NOTIFICATION, + .flags = EV_ADD|EV_ENABLE, + .fflags = flag, + .udata = (uintptr_t)dk, + }; if (final) { // This can never happen again unreg = true; @@ -3166,7 +3412,7 @@ _dispatch_mach_reply_kevent_unregister(dispatch_mach_t dm, } dispatch_kevent_t dk = dmr->dmr_dkev; TAILQ_REMOVE(&dk->dk_sources, (dispatch_source_refs_t)dmr, dr_list); - _dispatch_kevent_unregister(dk, DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE); + _dispatch_kevent_unregister(dk, DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE, 0); TAILQ_REMOVE(&dm->dm_refs->dm_replies, dmr, dmr_list); if (dmr->dmr_voucher) _voucher_release(dmr->dmr_voucher); free(dmr); @@ -3222,7 +3468,7 @@ _dispatch_mach_kevent_unregister(dispatch_mach_t dm) dm->ds_pending_data_mask &= ~(unsigned long) (DISPATCH_MACH_SEND_POSSIBLE|DISPATCH_MACH_SEND_DEAD); _dispatch_kevent_unregister(dk, - DISPATCH_MACH_SEND_POSSIBLE|DISPATCH_MACH_SEND_DEAD); + DISPATCH_MACH_SEND_POSSIBLE|DISPATCH_MACH_SEND_DEAD, 0); } DISPATCH_NOINLINE @@ -3619,9 +3865,9 @@ _dispatch_mach_send(dispatch_mach_t dm) _dispatch_mach_send_drain(dm); } -DISPATCH_NOINLINE static void -_dispatch_mach_merge_kevent(dispatch_mach_t dm, const struct kevent64_s *ke) +_dispatch_mach_merge_kevent(dispatch_mach_t dm, + const _dispatch_kevent_qos_s *ke) { if (!(ke->fflags & dm->ds_pending_data_mask)) { return; @@ -3647,6 +3893,18 @@ _dispatch_mach_send_options(void) return options; } +DISPATCH_ALWAYS_INLINE +static inline pthread_priority_t +_dispatch_mach_priority_propagate(mach_msg_option_t options) +{ +#if DISPATCH_USE_NOIMPORTANCE_QOS + if (options & MACH_SEND_NOIMPORTANCE) return 0; +#else + (void)options; +#endif + return _dispatch_priority_propagate(); +} + DISPATCH_NOINLINE void dispatch_mach_send(dispatch_mach_t dm, dispatch_mach_msg_t dmsg, @@ -3658,6 +3916,7 @@ dispatch_mach_send(dispatch_mach_t dm, dispatch_mach_msg_t dmsg, } dispatch_retain(dmsg); dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK); + pthread_priority_t priority = _dispatch_mach_priority_propagate(options); options |= _dispatch_mach_send_options(); _dispatch_mach_msg_set_options(dmsg, options & ~DISPATCH_MACH_OPTIONS_MASK); mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg); @@ -3667,7 +3926,7 @@ dispatch_mach_send(dispatch_mach_t dm, dispatch_mach_msg_t dmsg, MACH_PORT_NULL); bool is_reply = (MACH_MSGH_BITS_REMOTE(msg->msgh_bits) == MACH_MSG_TYPE_MOVE_SEND_ONCE); - dmsg->dmsg_priority = _dispatch_priority_propagate(); + dmsg->dmsg_priority = priority; dmsg->dmsg_voucher = _voucher_copy(); _dispatch_voucher_debug("mach-msg[%p] set", dmsg->dmsg_voucher, dmsg); if ((!is_reply && slowpath(dr->dm_tail)) || @@ -3816,7 +4075,9 @@ _dispatch_mach_connect_invoke(dispatch_mach_t dm) DISPATCH_NOINLINE void -_dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg) +_dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg, + dispatch_object_t dou DISPATCH_UNUSED, + dispatch_invoke_flags_t flags DISPATCH_UNUSED) { dispatch_mach_t dm = (dispatch_mach_t)_dispatch_queue_get_current(); dispatch_mach_refs_t dr = dm->ds_refs; @@ -4029,9 +4290,10 @@ _dispatch_mach_invoke2(dispatch_object_t dou, DISPATCH_NOINLINE void -_dispatch_mach_invoke(dispatch_mach_t dm) +_dispatch_mach_invoke(dispatch_mach_t dm, dispatch_object_t dou, + dispatch_invoke_flags_t flags) { - _dispatch_queue_class_invoke(dm, _dispatch_mach_invoke2); + _dispatch_queue_class_invoke(dm, dou._dc, flags, _dispatch_mach_invoke2); } unsigned long @@ -4362,18 +4624,21 @@ _evfiltstr(short filt) _evfilt2(EVFILT_PROC); _evfilt2(EVFILT_SIGNAL); _evfilt2(EVFILT_TIMER); -#ifdef EVFILT_VM - _evfilt2(EVFILT_VM); -#endif -#ifdef EVFILT_MEMORYSTATUS - _evfilt2(EVFILT_MEMORYSTATUS); -#endif #if HAVE_MACH _evfilt2(EVFILT_MACHPORT); _evfilt2(DISPATCH_EVFILT_MACH_NOTIFICATION); #endif _evfilt2(EVFILT_FS); _evfilt2(EVFILT_USER); +#ifdef EVFILT_VM + _evfilt2(EVFILT_VM); +#endif +#ifdef EVFILT_SOCK + _evfilt2(EVFILT_SOCK); +#endif +#ifdef EVFILT_MEMORYSTATUS + _evfilt2(EVFILT_MEMORYSTATUS); +#endif _evfilt2(DISPATCH_EVFILT_TIMER); _evfilt2(DISPATCH_EVFILT_CUSTOM_ADD); @@ -4383,14 +4648,60 @@ _evfiltstr(short filt) } } +#if DISPATCH_DEBUG +static const char * +_evflagstr2(uint16_t *flagsp) +{ +#define _evflag2(f) \ + if ((*flagsp & (f)) == (f) && (f)) { \ + *flagsp &= ~(f); \ + return #f "|"; \ + } + _evflag2(EV_ADD); + _evflag2(EV_DELETE); + _evflag2(EV_ENABLE); + _evflag2(EV_DISABLE); + _evflag2(EV_ONESHOT); + _evflag2(EV_CLEAR); + _evflag2(EV_RECEIPT); + _evflag2(EV_DISPATCH); + _evflag2(EV_UDATA_SPECIFIC); + _evflag2(EV_POLL); + _evflag2(EV_OOBAND); + _evflag2(EV_ERROR); + _evflag2(EV_EOF); + *flagsp = 0; + return "EV_UNKNOWN "; +} + +DISPATCH_NOINLINE +static const char * +_evflagstr(uint16_t flags, char *str, size_t strsize) +{ + str[0] = 0; + while (flags) { + strlcat(str, _evflagstr2(&flags), strsize); + } + size_t sz = strlen(str); + if (sz) str[sz-1] = 0; + return str; +} +#endif + static size_t _dispatch_source_debug_attr(dispatch_source_t ds, char* buf, size_t bufsiz) { dispatch_queue_t target = ds->do_targetq; return dsnprintf(buf, bufsiz, "target = %s[%p], ident = 0x%lx, " - "pending_data = 0x%lx, pending_data_mask = 0x%lx, ", + "mask = 0x%lx, pending_data = 0x%lx, registered = %d, " + "armed = %d, deleted = %d%s%s, canceled = %d, needs_mgr = %d, ", target && target->dq_label ? target->dq_label : "", target, - ds->ds_ident_hack, ds->ds_pending_data, ds->ds_pending_data_mask); + ds->ds_ident_hack, ds->ds_pending_data_mask, ds->ds_pending_data, + ds->ds_is_installed, (bool)(ds->ds_atomic_flags & DSF_ARMED), + (bool)(ds->ds_atomic_flags & DSF_DELETED), ds->ds_pending_delete ? + " (pending)" : "", (ds->ds_atomic_flags & DSF_ONESHOT) ? + " (oneshot)" : "", (bool)(ds->ds_atomic_flags & DSF_CANCELED), + ds->ds_needs_mgr); } static size_t @@ -4414,8 +4725,10 @@ _dispatch_source_debug(dispatch_source_t ds, char* buf, size_t bufsiz) if (ds->ds_is_timer) { offset += _dispatch_timer_debug_attr(ds, &buf[offset], bufsiz - offset); } - offset += dsnprintf(&buf[offset], bufsiz - offset, "filter = %s }", - ds->ds_dkev ? _evfiltstr(ds->ds_dkev->dk_kevent.filter) : "????"); + offset += dsnprintf(&buf[offset], bufsiz - offset, "kevent = %p%s, " + "filter = %s }", ds->ds_dkev, ds->ds_is_direct_kevent ? " (direct)" + : "", ds->ds_dkev ? _evfiltstr(ds->ds_dkev->dk_kevent.filter) : + "????"); return offset; } @@ -4450,14 +4763,17 @@ _dispatch_mach_debug(dispatch_mach_t dm, char* buf, size_t bufsiz) } #if DISPATCH_DEBUG +DISPATCH_NOINLINE static void -_dispatch_kevent_debug(struct kevent64_s* kev, const char* str) -{ - _dispatch_log("kevent[%p] = { ident = 0x%llx, filter = %s, flags = 0x%x, " - "fflags = 0x%x, data = 0x%llx, udata = 0x%llx, ext[0] = 0x%llx, " - "ext[1] = 0x%llx }: %s", kev, kev->ident, _evfiltstr(kev->filter), - kev->flags, kev->fflags, kev->data, kev->udata, kev->ext[0], - kev->ext[1], str); +_dispatch_kevent_debug(const _dispatch_kevent_qos_s* kev, const char* str) +{ + char flagstr[256]; + _dispatch_debug("kevent[%p] = { ident = 0x%llx, filter = %s, " + "flags = %s (0x%x), fflags = 0x%x, data = 0x%llx, udata = 0x%llx, " + "ext[0] = 0x%llx, ext[1] = 0x%llx }: %s", kev, kev->ident, + _evfiltstr(kev->filter), _evflagstr(kev->flags, flagstr, + sizeof(flagstr)), kev->flags, kev->fflags, kev->data, kev->udata, + kev->ext[0], kev->ext[1], str); } static void diff --git a/src/source_internal.h b/src/source_internal.h index 12ccdda97..6e8f40f5a 100644 --- a/src/source_internal.h +++ b/src/source_internal.h @@ -101,13 +101,13 @@ enum { struct dispatch_kevent_s { TAILQ_ENTRY(dispatch_kevent_s) dk_list; TAILQ_HEAD(, dispatch_source_refs_s) dk_sources; - struct kevent64_s dk_kevent; + _dispatch_kevent_qos_s dk_kevent; }; typedef struct dispatch_kevent_s *dispatch_kevent_t; struct dispatch_source_type_s { - struct kevent64_s ke; + _dispatch_kevent_qos_s ke; uint64_t mask; void (*init)(dispatch_source_t ds, dispatch_source_type_t type, uintptr_t handle, unsigned long mask, dispatch_queue_t q); @@ -168,6 +168,8 @@ _dispatch_source_timer_idx(dispatch_source_refs_t dr) // ds_atomic_flags bits #define DSF_CANCELED 1u // cancellation has been requested #define DSF_ARMED 2u // source is armed +#define DSF_DELETED 4u // source received EV_DELETE event +#define DSF_ONESHOT 8u // source received EV_ONESHOT event #define DISPATCH_SOURCE_HEADER(refs) \ dispatch_kevent_t ds_dkev; \ @@ -177,7 +179,10 @@ _dispatch_source_timer_idx(dispatch_source_refs_t dr) ds_is_level:1, \ ds_is_adder:1, \ ds_is_installed:1, \ + ds_is_direct_kevent:1, \ ds_needs_rearm:1, \ + ds_pending_delete:1, \ + ds_needs_mgr:1, \ ds_is_timer:1, \ ds_vmpressure_override:1, \ ds_memorystatus_override:1, \ @@ -262,7 +267,8 @@ struct dispatch_mach_msg_s { void _dispatch_source_xref_dispose(dispatch_source_t ds); void _dispatch_source_dispose(dispatch_source_t ds); -void _dispatch_source_invoke(dispatch_source_t ds); +void _dispatch_source_invoke(dispatch_source_t ds, dispatch_object_t dou, + dispatch_invoke_flags_t flags); unsigned long _dispatch_source_probe(dispatch_source_t ds); size_t _dispatch_source_debug(dispatch_source_t ds, char* buf, size_t bufsiz); void _dispatch_source_set_interval(dispatch_source_t ds, uint64_t interval); @@ -270,17 +276,20 @@ void _dispatch_source_set_event_handler_with_context_f(dispatch_source_t ds, void *ctxt, dispatch_function_t handler); void _dispatch_mach_dispose(dispatch_mach_t dm); -void _dispatch_mach_invoke(dispatch_mach_t dm); +void _dispatch_mach_invoke(dispatch_mach_t dm, dispatch_object_t dou, + dispatch_invoke_flags_t flags); unsigned long _dispatch_mach_probe(dispatch_mach_t dm); size_t _dispatch_mach_debug(dispatch_mach_t dm, char* buf, size_t bufsiz); void _dispatch_mach_msg_dispose(dispatch_mach_msg_t dmsg); -void _dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg); +void _dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg, dispatch_object_t dou, + dispatch_invoke_flags_t flags); size_t _dispatch_mach_msg_debug(dispatch_mach_msg_t dmsg, char* buf, size_t bufsiz); void _dispatch_mach_barrier_invoke(void *ctxt); unsigned long _dispatch_mgr_wakeup(dispatch_queue_t dq); -void _dispatch_mgr_thread(dispatch_queue_t dq); +void _dispatch_mgr_thread(dispatch_queue_t dq, dispatch_object_t dou, + dispatch_invoke_flags_t flags); #endif /* __DISPATCH_SOURCE_INTERNAL__ */ diff --git a/src/trace.h b/src/trace.h index df27ca81b..ebab27c88 100644 --- a/src/trace.h +++ b/src/trace.h @@ -27,7 +27,7 @@ #ifndef __DISPATCH_TRACE__ #define __DISPATCH_TRACE__ -#if !__OBJC2__ +#if !__OBJC2__ && !defined(__cplusplus) #if DISPATCH_USE_DTRACE || DISPATCH_USE_DTRACE_INTROSPECTION typedef struct dispatch_trace_timer_params_s { @@ -98,8 +98,8 @@ _dispatch_trace_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) _DISPATCH_SOURCE_TYPE && (_dq) != &_dispatch_mgr_q) { \ dispatch_source_t _ds = (dispatch_source_t)_do; \ _dc = _ds->ds_refs->ds_handler[DS_EVENT_HANDLER]; \ - _func = _dc->dc_func; \ - _ctxt = _dc->dc_ctxt; \ + _func = _dc ? _dc->dc_func : NULL; \ + _ctxt = _dc ? _dc->dc_ctxt : NULL; \ } else { \ _func = (dispatch_function_t)_dispatch_queue_invoke; \ _ctxt = _do->do_ctxt; \ @@ -321,6 +321,6 @@ _dispatch_trace_timer_fire(dispatch_source_refs_t dr, unsigned long data, #endif // DISPATCH_USE_DTRACE -#endif // !__OBJC2__ +#endif // !__OBJC2__ && !defined(__cplusplus) #endif // __DISPATCH_TRACE__ diff --git a/src/voucher.c b/src/voucher.c index e886fafbe..6f28c24e3 100644 --- a/src/voucher.c +++ b/src/voucher.c @@ -33,23 +33,19 @@ #define VOUCHER_ATM_COLLECT_THRESHOLD 1 #endif #define VATM_COLLECT_THRESHOLD_VALUE(t) (((t) - 1) * 2) -static volatile long _voucher_atm_collect_level; -static long _voucher_atm_collect_threshold = - VATM_COLLECT_THRESHOLD_VALUE(VOUCHER_ATM_COLLECT_THRESHOLD); -static unsigned long _voucher_atm_subid_bits; +static uint64_t volatile _voucher_atm_generation; typedef struct _voucher_atm_s *_voucher_atm_t; static void _voucher_activity_atfork_child(void); -static inline mach_voucher_t _voucher_get_atm_mach_voucher(voucher_t voucher); -static inline mach_voucher_t _voucher_activity_get_atm_mach_voucher( - _voucher_activity_t act); -static inline _voucher_activity_t _voucher_activity_get(voucher_t voucher); static _voucher_activity_t _voucher_activity_copy_from_mach_voucher( mach_voucher_t kv, voucher_activity_id_t va_id); static inline _voucher_activity_t _voucher_activity_retain( _voucher_activity_t act); static inline void _voucher_activity_release(_voucher_activity_t act); +static void _voucher_activity_remove(_voucher_activity_t act); +static inline _voucher_atm_t _voucher_atm_retain(_voucher_atm_t vatm); +static inline void _voucher_atm_release(_voucher_atm_t vatm); #pragma mark - #pragma mark voucher_t @@ -156,6 +152,18 @@ voucher_copy_without_importance(void) return _voucher_copy_without_importance(); } +voucher_t +voucher_retain(voucher_t voucher) +{ + return _voucher_retain(voucher); +} + +void +voucher_release(voucher_t voucher) +{ + return _voucher_release(voucher); +} + void _voucher_thread_cleanup(void *voucher) { @@ -164,7 +172,7 @@ _voucher_thread_cleanup(void *voucher) DISPATCH_CACHELINE_ALIGN static TAILQ_HEAD(, voucher_s) _vouchers[VL_HASH_SIZE]; -#define _vouchers(kv) (&_vouchers[VL_HASH((kv))]) +#define _vouchers_head(kv) (&_vouchers[VL_HASH((kv))]) static os_lock_handoff_s _vouchers_lock = OS_LOCK_HANDOFF_INIT; #define _vouchers_lock_lock() os_lock_lock(&_vouchers_lock) #define _vouchers_lock_unlock() os_lock_unlock(&_vouchers_lock) @@ -175,7 +183,7 @@ _voucher_find_and_retain(mach_voucher_t kv) voucher_t v; if (!kv) return NULL; _vouchers_lock_lock(); - TAILQ_FOREACH(v, _vouchers(kv), v_list) { + TAILQ_FOREACH(v, _vouchers_head(kv), v_list) { if (v->v_ipc_kvoucher == kv) { int xref_cnt = dispatch_atomic_inc2o(v, os_obj_xref_cnt, relaxed); _dispatch_voucher_debug("retain -> %d", v, xref_cnt + 1); @@ -204,7 +212,7 @@ _voucher_insert(voucher_t v) _dispatch_voucher_debug("corruption", v); DISPATCH_CRASH("Voucher corruption"); } - TAILQ_INSERT_TAIL(_vouchers(kv), v, v_list); + TAILQ_INSERT_TAIL(_vouchers_head(kv), v, v_list); _vouchers_lock_unlock(); } @@ -221,7 +229,7 @@ _voucher_remove(voucher_t v) // check for resurrection race with _voucher_find_and_retain if (dispatch_atomic_load2o(v, os_obj_xref_cnt, seq_cst) < 0 && _TAILQ_IS_ENQUEUED(v, v_list)) { - TAILQ_REMOVE(_vouchers(kv), v, v_list); + TAILQ_REMOVE(_vouchers_head(kv), v, v_list); _TAILQ_MARK_NOT_ENQUEUED(v, v_list); v->v_list.tqe_next = (void*)~0ull; } @@ -315,11 +323,20 @@ voucher_replace_default_voucher(void) #endif } +static inline _voucher_atm_t +_voucher_get_atm(voucher_t voucher) +{ + _voucher_atm_t vatm; + vatm = voucher && voucher->v_atm ? voucher->v_atm : _voucher_task_atm; + return vatm; +} + static inline mach_voucher_t _voucher_get_atm_mach_voucher(voucher_t voucher) { - _voucher_activity_t act = _voucher_activity_get(voucher); - return _voucher_activity_get_atm_mach_voucher(act); + _voucher_atm_t vatm = _voucher_get_atm(voucher); + mach_voucher_t kv = vatm ? vatm->vatm_kvoucher : MACH_VOUCHER_NULL; + return kv; } mach_voucher_t @@ -463,6 +480,7 @@ _voucher_create_with_mach_voucher(mach_voucher_t kv) } voucher_activity_id_t va_id = 0, va_base_id = 0; _voucher_activity_t act = NULL; + _voucher_atm_t vatm = NULL; if (activities) { va_id = *(voucher_activity_id_t*)content; act = _voucher_activity_copy_from_mach_voucher(rkv, va_id); @@ -474,8 +492,12 @@ _voucher_create_with_mach_voucher(mach_voucher_t kv) activities++; va_base_id = act->va_id; } + if (act) { + vatm = _voucher_atm_retain(act->va_atm); + } } v = _voucher_alloc(activities, priority, 0); + v->v_atm = vatm; v->v_activity = act; voucher_activity_id_t *activity_ids = _voucher_activity_ids(v); if (activities && va_base_id) { @@ -514,6 +536,7 @@ _voucher_create_with_priority_and_mach_voucher(voucher_t ov, if (activities) { if (ov->v_activity) { v->v_activity = _voucher_activity_retain(ov->v_activity); + v->v_atm = _voucher_atm_retain(ov->v_atm); } memcpy(_voucher_activity_ids(v), _voucher_activity_ids(ov), activities * sizeof(voucher_activity_id_t)); @@ -584,6 +607,7 @@ _voucher_create_without_importance(voucher_t ov) if (activities) { if (ov->v_activity) { v->v_activity = _voucher_activity_retain(ov->v_activity); + v->v_atm = _voucher_atm_retain(ov->v_atm); } memcpy(_voucher_activity_ids(v), _voucher_activity_ids(ov), activities * sizeof(voucher_activity_id_t)); @@ -603,6 +627,46 @@ _voucher_create_without_importance(voucher_t ov) return v; } +voucher_t +_voucher_create_accounting_voucher(voucher_t ov) +{ + // Nothing to do unless the old voucher has a kernel voucher. If it does + // doesn't, it can't have any accounting attributes. + if (!ov || !ov->v_kvoucher) return NULL; + kern_return_t kr = KERN_SUCCESS; + mach_voucher_t okv, kv = MACH_VOUCHER_NULL; + okv = ov->v_ipc_kvoucher ? ov->v_ipc_kvoucher : ov->v_kvoucher; +#if VOUCHER_USE_ATTR_BANK + const mach_voucher_attr_recipe_data_t accounting_copy_recipe = { + .key = MACH_VOUCHER_ATTR_KEY_BANK, + .command = MACH_VOUCHER_ATTR_COPY, + .previous_voucher = okv, + }; + kr = _voucher_create_mach_voucher(&accounting_copy_recipe, + sizeof(accounting_copy_recipe), &kv); +#endif + if (dispatch_assume_zero(kr) || !kv){ + return NULL; + } + voucher_t v = _voucher_find_and_retain(kv); + if (v) { + _dispatch_voucher_debug("kvoucher[0x%08x] find accounting voucher " + "from voucher[%p]", v, kv, ov); + _voucher_dealloc_mach_voucher(kv); + return v; + } + v = _voucher_alloc(0, 0, 0); + v->v_ipc_kvoucher = v->v_kvoucher = kv; + if (kv == okv) { + v->v_kvbase = _voucher_retain(ov); + _voucher_dealloc_mach_voucher(kv); // borrow base reference + } + _voucher_insert(v); + _dispatch_voucher_debug("kvoucher[0x%08x] create accounting voucher " + "from voucher[%p]", v, kv, ov); + return v; +} + voucher_t voucher_create_with_mach_msg(mach_msg_header_t *msg) { @@ -692,7 +756,11 @@ _voucher_dispose(voucher_t voucher) _voucher_activity_release(voucher->v_activity); voucher->v_activity = NULL; } - voucher->v_has_priority= 0; + if (voucher->v_atm) { + _voucher_atm_release(voucher->v_atm); + voucher->v_atm = NULL; + } + voucher->v_has_priority = 0; voucher->v_activities = 0; #if VOUCHER_ENABLE_RECIPE_OBJECTS voucher->v_recipe_extra_size = 0; @@ -773,7 +841,7 @@ void _voucher_init(void) { _voucher_libkernel_init(); - char *e, *end; + char *e; unsigned int i; for (i = 0; i < VL_HASH_SIZE; i++) { TAILQ_INIT(&_vouchers[i]); @@ -796,21 +864,6 @@ _voucher_init(void) _voucher_activity_mode = mode; if (_voucher_activity_disabled()) return; - e = getenv("LIBDISPATCH_ACTIVITY_ATM_SUBID_BITS"); - if (e) { - unsigned long v = strtoul(e, &end, 0); - if (v && !*end) { - _voucher_atm_subid_bits = v; - } - } - e = getenv("LIBDISPATCH_ACTIVITY_ATM_COLLECT_THRESHOLD"); - if (e) { - unsigned long v = strtoul(e, &end, 0); - if (v && v < LONG_MAX/2 && !*end) { - _voucher_atm_collect_threshold = - VATM_COLLECT_THRESHOLD_VALUE((long)v); - } - } // default task activity bool default_task_activity = DISPATCH_DEBUG; e = getenv("LIBDISPATCH_DEFAULT_TASK_ACTIVITY"); @@ -851,24 +904,12 @@ _Static_assert(sizeof(struct _voucher_activity_tracepoint_s) == 64, _Static_assert(sizeof(struct _voucher_activity_buffer_header_s) <= sizeof(struct _voucher_activity_tracepoint_s), "Buffer header too large"); -_Static_assert(offsetof(struct _voucher_activity_s, va_flags2) == - sizeof(struct _voucher_activity_tracepoint_s), - "Extended activity object misaligned"); #if __LP64__ -_Static_assert(sizeof(struct _voucher_activity_s) == - 3 * sizeof(struct _voucher_activity_tracepoint_s), - "Activity object too large"); -_Static_assert(offsetof(struct _voucher_activity_s, va_flags3) == - 2 * sizeof(struct _voucher_activity_tracepoint_s), - "Extended activity object misaligned"); -_Static_assert(offsetof(struct _voucher_atm_s, vatm_activities_lock) % 64 == 0, - "Bad ATM padding"); +_Static_assert(offsetof(struct _voucher_activity_s, va_buffers_lock) % 64 == 0, + "Bad activity padding"); _Static_assert(sizeof(struct _voucher_atm_s) <= 128, "ATM too large"); #else -_Static_assert(sizeof(struct _voucher_activity_s) == - 2 * sizeof(struct _voucher_activity_tracepoint_s), - "Activity object too large"); _Static_assert(sizeof(struct _voucher_atm_s) <= 64, "ATM too large"); #endif @@ -880,41 +921,15 @@ _Static_assert(sizeof(struct _voucher_activity_metadata_s) <= "Metadata too large"); _Static_assert(sizeof(_voucher_activity_bitmap_t) % 64 == 0, "Bad metadata bitmap size"); -_Static_assert(offsetof(struct _voucher_activity_metadata_s, - vam_atm_mbox_bitmap) % 64 == 0, - "Bad metadata padding"); -_Static_assert(offsetof(struct _voucher_activity_metadata_s, - vam_base_atm_subid) % 64 == 0, - "Bad metadata padding"); -_Static_assert(offsetof(struct _voucher_activity_metadata_s, vam_base_atm_lock) - % 32 == 0, - "Bad metadata padding"); -_Static_assert(offsetof(struct _voucher_activity_metadata_s, vam_atms) % 64 ==0, - "Bad metadata padding"); -_Static_assert(sizeof(_voucher_activity_bitmap_t) * 8 * - sizeof(atm_mailbox_offset_t) <= - sizeof(((_voucher_activity_metadata_t)NULL)->vam_kernel_metadata), - "Bad kernel metadata bitmap"); -_Static_assert(sizeof(atm_mailbox_offset_t) == 2 * sizeof(atm_subaid32_t), - "Bad kernel ATM mailbox sizes"); #endif -static const size_t _voucher_atm_mailboxes = - sizeof(((_voucher_activity_metadata_t)NULL)->vam_kernel_metadata) / - sizeof(atm_mailbox_offset_t); - #define va_buffers_lock(va) (&(va)->va_buffers_lock) -#define vatm_activities_lock(vatm) (&(vatm)->vatm_activities_lock) #define vatm_activities(vatm) (&(vatm)->vatm_activities) -#define vatm_used_activities(vatm) (&(vatm)->vatm_used_activities) -#define vam_base_atm_lock() (&_voucher_activity_heap->vam_base_atm_lock) -#define vam_nested_atm_lock() (&_voucher_activity_heap->vam_nested_atm_lock) #define vam_atms_lock() (&_voucher_activity_heap->vam_atms_lock) #define vam_activities_lock() (&_voucher_activity_heap->vam_activities_lock) #define vam_atms(hash) (&_voucher_activity_heap->vam_atms[hash]) #define vam_activities(hash) (&_voucher_activity_heap->vam_activities[hash]) #define vam_buffer_bitmap() (_voucher_activity_heap->vam_buffer_bitmap) -#define vam_atm_mbox_bitmap() (_voucher_activity_heap->vam_atm_mbox_bitmap) #define vam_pressure_locked_bitmap() \ (_voucher_activity_heap->vam_pressure_locked_bitmap) #define vam_buffer(i) ((void*)((char*)_voucher_activity_heap + \ @@ -925,12 +940,14 @@ static _voucher_activity_t _voucher_activity_create_with_atm( voucher_activity_trace_id_t trace_id, uint64_t location, _voucher_activity_buffer_header_t buffer); static _voucher_atm_t _voucher_atm_create(mach_voucher_t kv, atm_aid_t atm_id); -static voucher_activity_id_t _voucher_atm_nested_atm_id_make(void); +static void _voucher_activity_firehose_wait(_voucher_activity_t act, + _voucher_activity_buffer_header_t buffer); DISPATCH_ALWAYS_INLINE static inline uint32_t _voucher_default_activity_buffer_limit() { +#if 0 // FIXME: tune buffer chain sizes switch (_voucher_activity_mode) { case voucher_activity_mode_debug: case voucher_activity_mode_stream: @@ -938,19 +955,16 @@ _voucher_default_activity_buffer_limit() // (twice as much as non-default activities) return MAX(_voucher_activity_buffers_per_heap / 32, 3) - 1; } -#if TARGET_OS_EMBEDDED - // Low-profile modes: Default activity can use a total of 3 buffers. - return 2; -#else - // Low-profile modes: Default activity can use a total of 8 buffers. - return 7; #endif + // Low-profile modes: Default activity can use a total of 4 buffers. + return 3; } DISPATCH_ALWAYS_INLINE static inline uint32_t _voucher_activity_buffer_limit() { +#if 0 // FIXME: tune buffer chain sizes switch (_voucher_activity_mode) { case voucher_activity_mode_debug: case voucher_activity_mode_stream: @@ -958,13 +972,9 @@ _voucher_activity_buffer_limit() // of the entire heap. return MAX(_voucher_activity_buffers_per_heap / 64, 2) - 1; } -#if TARGET_OS_EMBEDDED +#endif // Low-profile modes: Each activity can use a total of 2 buffers. return 1; -#else - // Low-profile modes: Each activity can use a total of 4 buffers. - return 3; -#endif } // The two functions above return the number of *additional* buffers activities @@ -1017,7 +1027,7 @@ _voucher_activity_bitmap_set_first_unset_bit_upto( return index; } -DISPATCH_ALWAYS_INLINE +DISPATCH_ALWAYS_INLINE DISPATCH_UNUSED static inline size_t _voucher_activity_bitmap_set_first_unset_bit( _voucher_activity_bitmap_t volatile bitmap) @@ -1025,7 +1035,6 @@ _voucher_activity_bitmap_set_first_unset_bit( return _voucher_activity_bitmap_set_first_unset_bit_upto(bitmap, UINT_MAX); } - DISPATCH_ALWAYS_INLINE static inline void _voucher_activity_bitmap_clear_bit( @@ -1067,7 +1076,6 @@ _voucher_activity_heap_init(void *ctxt DISPATCH_UNUSED) task_trace_memory_info_data_t trace_memory_info = { .user_memory_address = vm_addr, .buffer_size = vm_size, - .mailbox_array_size = sizeof(heap->vam_kernel_metadata), }; kr = task_set_info(mach_task_self(), TASK_TRACE_MEMORY_INFO, (task_info_t)&trace_memory_info, TASK_TRACE_MEMORY_INFO_COUNT); @@ -1080,38 +1088,29 @@ _voucher_activity_heap_init(void *ctxt DISPATCH_UNUSED) return; } heap = (void*)vm_addr; - heap->vam_self_metadata.vasm_baseaddr = (void*)vm_addr; - heap->vam_buffer_bitmap[0] = 0xf; // first four buffers are reserved + heap->vasm_baseaddr = (void*)vm_addr; + heap->vam_buffer_bitmap[0] = 0x7; // first three buffers are reserved uint32_t i; for (i = 0; i < _voucher_activity_hash_size; i++) { TAILQ_INIT(&heap->vam_activities[i]); TAILQ_INIT(&heap->vam_atms[i]); } - uint32_t subid_max = VATM_SUBID_MAX; - if (_voucher_atm_subid_bits && - _voucher_atm_subid_bits < VATM_SUBID_MAXBITS) { - subid_max = MIN(VATM_SUBID_BITS2MAX(_voucher_atm_subid_bits), - VATM_SUBID_MAX); - } - heap->vam_base_atm_subid_max = subid_max; - _voucher_activity_lock_init(&heap->vam_base_atm_lock); - _voucher_activity_lock_init(&heap->vam_nested_atm_lock); _voucher_activity_lock_init(&heap->vam_atms_lock); _voucher_activity_lock_init(&heap->vam_activities_lock); _voucher_activity_heap = heap; _voucher_atm_t vatm = _voucher_atm_create(0, 0); dispatch_assert(vatm->vatm_kvoucher); - heap->vam_default_activity_atm = vatm; - _voucher_activity_buffer_header_t buffer = vam_buffer(3); // reserved index + _voucher_atm_retain(vatm); + + _voucher_activity_buffer_header_t buffer = vam_buffer(2); // reserved index // consumes vatm reference: - _voucher_activity_t va = _voucher_activity_create_with_atm(vatm, - VATM_ACTID(vatm, _voucher_default_activity_subid), 0, 0, buffer); + _voucher_activity_t va = _voucher_activity_create_with_atm(vatm, 0, 0, 0, + buffer); dispatch_assert(va); va->va_buffer_limit = _voucher_default_activity_buffer_limit(); _voucher_activity_default = va; - heap->vam_base_atm = _voucher_atm_create(0, 0); - heap->vam_nested_atm_id = _voucher_atm_nested_atm_id_make(); + _voucher_task_atm = vatm; } static void @@ -1135,6 +1134,32 @@ voucher_activity_get_metadata_buffer(size_t *length) return _voucher_activity_heap->vam_client_metadata; } +static _voucher_activity_buffer_hook_t _voucher_activity_buffer_hook; + +void +voucher_activity_buffer_hook_install_4libtrace( + _voucher_activity_buffer_hook_t hook) +{ + if (dispatch_atomic_cmpxchg(&_voucher_activity_buffer_hook, NULL, + (void*)hook, release)) return; + DISPATCH_CLIENT_CRASH("_voucher_activity_buffer_hook_install_4libtrace " \ + "called more than once"); +} + +#if DISPATCH_DEBUG && DISPATCH_VOUCHER_ACTIVITY_DEBUG +#define VOUCHER_ACTIVITY_BUFFER_DEBUG(reason, buffer) \ + _dispatch_debug("activity buffer %s (%p)", #reason, buffer) +#else +#define VOUCHER_ACTIVITY_BUFFER_DEBUG(reason, buffer) +#endif + +#define VOUCHER_ACTIVITY_BUFFER_HOOK_CALLOUT(reason, buffer) \ + if (buffer) { VOUCHER_ACTIVITY_BUFFER_DEBUG(reason, buffer); \ + if (slowpath(_voucher_activity_buffer_hook)) { \ + _voucher_activity_buffer_hook( \ + _voucher_activity_buffer_hook_reason_##reason, (buffer)); \ + } } + DISPATCH_ALWAYS_INLINE static inline _voucher_activity_buffer_header_t _voucher_activity_heap_buffer_alloc(void) @@ -1266,16 +1291,17 @@ _voucher_activity_heap_pressure_normal(void) DISPATCH_ALWAYS_INLINE static inline void _voucher_activity_buffer_init(_voucher_activity_t act, - _voucher_activity_buffer_header_t buffer, bool reuse) + _voucher_activity_buffer_header_t buffer, bool initial) { - if (!reuse) { - buffer->vabh_flags = _voucher_activity_trace_flag_buffer_header; - buffer->vabh_activity_id = act->va_id; - } - buffer->vabh_timestamp = _voucher_activity_timestamp(); - buffer->vabh_next_tracepoint_idx = 1; - buffer->vabh_sequence_no = dispatch_atomic_inc2o(act, va_max_sequence_no, - relaxed); + _voucher_activity_tracepoint_t vat = (_voucher_activity_tracepoint_t)buffer; + _voucher_activity_tracepoint_init_with_id(vat, act->va_trace_id, + act->va_location, !initial); + buffer->vabh_flags = _voucher_activity_trace_flag_buffer_header | + _voucher_activity_trace_flag_activity | + (initial ? _voucher_activity_trace_flag_start : 0); + buffer->vabh_activity_id = act->va_id; + buffer->vabh_pos.vabp_atomic_pos = 0; + buffer->vabh_pos.vabp_pos.vabp_next_tracepoint_idx = 1; } static _voucher_activity_buffer_header_t @@ -1285,27 +1311,29 @@ _voucher_activity_buffer_alloc_slow(_voucher_activity_t act, _voucher_activity_buffer_header_t buffer; _voucher_activity_lock_lock(va_buffers_lock(act)); // TODO: revisit locking buffer = act->va_current_buffer; - if (buffer != current) goto out; + if (buffer != current) { + _voucher_activity_lock_unlock(va_buffers_lock(act)); + return buffer; + } buffer = TAILQ_FIRST(&act->va_buffers); - if (buffer) { - _voucher_activity_buffer_init(act, buffer, true); - if (buffer != TAILQ_LAST(&act->va_buffers, + if (buffer != TAILQ_LAST(&act->va_buffers, _voucher_activity_buffer_list_s)) { - TAILQ_REMOVE(&act->va_buffers, buffer, vabh_list); - TAILQ_INSERT_TAIL(&act->va_buffers, buffer, vabh_list); - } + TAILQ_REMOVE(&act->va_buffers, buffer, vabh_list); + TAILQ_INSERT_TAIL(&act->va_buffers, buffer, vabh_list); } - if (!dispatch_atomic_cmpxchgv2o(act, va_current_buffer, current, buffer, + _voucher_activity_lock_unlock(va_buffers_lock(act)); + if (_voucher_activity_buffer_is_full(buffer)) { + _voucher_activity_firehose_wait(act, buffer); + } + if (dispatch_atomic_cmpxchgv2o(act, va_current_buffer, current, buffer, ¤t, release)) { - if (buffer) { - TAILQ_REMOVE(&act->va_buffers, buffer, vabh_list); - _voucher_activity_heap_buffer_free(buffer); + if (_voucher_activity_buffer_mark_full(current)) { + _voucher_activity_firehose_push(act, current); } + _dispatch_voucher_activity_debug("buffer reuse %p", act, buffer); + } else { buffer = current; } -out: - _voucher_activity_lock_unlock(va_buffers_lock(act)); - _dispatch_voucher_activity_debug("buffer reuse %p", act, buffer); return buffer; } @@ -1314,8 +1342,14 @@ _voucher_activity_buffer_alloc(_voucher_activity_t act, _voucher_activity_buffer_header_t current) { _voucher_activity_buffer_header_t buffer = NULL; - if (act->va_max_sequence_no < act->va_buffer_limit) { + if (act->va_buffer_count < act->va_buffer_limit) { buffer = _voucher_activity_heap_buffer_alloc(); + if (buffer && dispatch_atomic_inc2o(act, va_buffer_count, relaxed) > + act->va_buffer_limit) { + dispatch_atomic_dec2o(act, va_buffer_count, relaxed); + _voucher_activity_heap_buffer_free(buffer); + buffer = NULL; + } } if (!buffer) return _voucher_activity_buffer_alloc_slow(act, current); _voucher_activity_buffer_init(act, buffer, false); @@ -1324,11 +1358,15 @@ _voucher_activity_buffer_alloc(_voucher_activity_t act, _voucher_activity_lock_lock(va_buffers_lock(act)); TAILQ_INSERT_TAIL(&act->va_buffers, buffer, vabh_list); _voucher_activity_lock_unlock(va_buffers_lock(act)); + if (_voucher_activity_buffer_mark_full(current)) { + _voucher_activity_firehose_push(act, current); + } + _dispatch_voucher_activity_debug("buffer alloc %p", act, buffer); } else { + dispatch_atomic_dec2o(act, va_buffer_count, relaxed); _voucher_activity_heap_buffer_free(buffer); buffer = current; } - _dispatch_voucher_activity_debug("buffer alloc %p", act, buffer); return buffer; } @@ -1347,36 +1385,32 @@ _voucher_activity_buffer_alloc(_voucher_activity_t act, } } while (0); static void _voucher_activity_dispose(_voucher_activity_t act); -static _voucher_activity_t _voucher_atm_activity_mark_used( - _voucher_activity_t act); -static void _voucher_atm_activity_mark_unused(_voucher_activity_t act); static _voucher_atm_t _voucher_atm_copy(atm_aid_t atm_id); static inline void _voucher_atm_release(_voucher_atm_t vatm); -static void _voucher_atm_activity_insert(_voucher_atm_t vatm, - _voucher_activity_t act); -static void _voucher_atm_activity_remove(_voucher_activity_t act); static atm_aid_t _voucher_mach_voucher_get_atm_id(mach_voucher_t kv); DISPATCH_ALWAYS_INLINE static inline bool -_voucher_activity_copy(_voucher_activity_t act) +_voucher_activity_try_retain(_voucher_activity_t act) { - int use_cnt = dispatch_atomic_inc2o(act, va_use_count, relaxed); + // not using _os_object_refcnt* because we don't need barriers: + // activities are immutable and are in a hash table with a lock + int use_cnt = dispatch_atomic_inc2o(act, va_refcnt, relaxed); _dispatch_voucher_activity_debug("retain -> %d", act, use_cnt + 1); if (slowpath(use_cnt < 0)) { _dispatch_voucher_activity_debug("overrelease", act); DISPATCH_CRASH("Activity overrelease"); } - return (use_cnt == 0); + return use_cnt > 0; } DISPATCH_ALWAYS_INLINE static inline _voucher_activity_t _voucher_activity_retain(_voucher_activity_t act) { - if (_voucher_activity_copy(act)) { - _dispatch_voucher_activity_debug("invalid resurrection", act); - DISPATCH_CRASH("Invalid activity resurrection"); + if (slowpath(!_voucher_activity_try_retain(act))) { + _dispatch_voucher_activity_debug("resurrection", act); + DISPATCH_CRASH("Activity resurrection"); } return act; } @@ -1385,7 +1419,9 @@ DISPATCH_ALWAYS_INLINE static inline void _voucher_activity_release(_voucher_activity_t act) { - int use_cnt = dispatch_atomic_dec2o(act, va_use_count, relaxed); + // not using _os_object_refcnt* because we don't need barriers: + // activities are immutable and are in a hash table with a lock + int use_cnt = dispatch_atomic_dec2o(act, va_refcnt, relaxed); _dispatch_voucher_activity_debug("release -> %d", act, use_cnt + 1); if (fastpath(use_cnt >= 0)) { return; @@ -1394,83 +1430,53 @@ _voucher_activity_release(_voucher_activity_t act) _dispatch_voucher_activity_debug("overrelease", act); DISPATCH_CRASH("Activity overrelease"); } - return _voucher_atm_activity_mark_unused(act); -} - -DISPATCH_ALWAYS_INLINE -static inline _voucher_activity_t -_voucher_activity_atm_retain(_voucher_activity_t act) -{ - int refcnt = dispatch_atomic_inc2o(act, va_refcnt, relaxed); - _dispatch_voucher_activity_debug("atm retain -> %d", act, refcnt + 1); - if (slowpath(refcnt <= 0)) { - _dispatch_voucher_activity_debug("atm resurrection", act); - DISPATCH_CRASH("Activity ATM resurrection"); - } - return act; -} - -DISPATCH_ALWAYS_INLINE -static inline void -_voucher_activity_atm_release(_voucher_activity_t act) -{ - int refcnt = dispatch_atomic_dec2o(act, va_refcnt, relaxed); - _dispatch_voucher_activity_debug("atm release -> %d", act, refcnt + 1); - if (fastpath(refcnt >= 0)) { - return; - } - if (slowpath(refcnt < -1)) { - _dispatch_voucher_activity_debug("atm overrelease", act); - DISPATCH_CRASH("Activity ATM overrelease"); - } - return _voucher_activity_dispose(act); -} - -static inline _voucher_activity_t -_voucher_activity_get(voucher_t v) -{ - _voucher_activity_t act; - act = v && v->v_activity ? v->v_activity : _voucher_activity_default; - return act; + _voucher_activity_remove(act); + _voucher_activity_dispose(act); } static _voucher_activity_t -_voucher_activity_find(voucher_activity_id_t va_id, uint32_t hash) +_voucher_activity_find_and_retain(voucher_activity_id_t va_id, uint32_t hash) { + // not using _os_object_refcnt* because we don't need barriers: + // activities are immutable and are in a hash table with a lock + // // assumes vam_activities_lock held _voucher_activity_t act; - TAILQ_FOREACH(act, vam_activities(hash), va_list){ - if (act->va_id == va_id) break; + TAILQ_FOREACH(act, vam_activities(hash), va_list) { + if (act->va_id == va_id) { + if (fastpath(_voucher_activity_try_retain(act))) { + return act; + } + + // disallow resurrection + dispatch_atomic_dec2o(act, va_refcnt, relaxed); + _dispatch_voucher_activity_debug("undo resurrection", act); + } } - return act; + return NULL; } static _voucher_activity_t _voucher_activity_copy_from_id(voucher_activity_id_t va_id) { - bool resurrect = false; uint32_t hash = VACTID_HASH(va_id); _voucher_activity_lock_lock(vam_activities_lock()); - _voucher_activity_t act = _voucher_activity_find(va_id, hash); + _voucher_activity_t act = _voucher_activity_find_and_retain(va_id, hash); if (act) { - resurrect = _voucher_activity_copy(act); _dispatch_voucher_activity_debug("copy from id 0x%llx", act, va_id); } _voucher_activity_lock_unlock(vam_activities_lock()); - if (resurrect) return _voucher_atm_activity_mark_used(act); return act; } static _voucher_activity_t _voucher_activity_try_insert(_voucher_activity_t act_new) { - bool resurrect = false; voucher_activity_id_t va_id = act_new->va_id; uint32_t hash = VACTID_HASH(va_id); _voucher_activity_lock_lock(vam_activities_lock()); - _voucher_activity_t act = _voucher_activity_find(va_id, hash); + _voucher_activity_t act = _voucher_activity_find_and_retain(va_id, hash); if (act) { - resurrect = _voucher_activity_copy(act); _dispatch_voucher_activity_debug("try insert: failed (%p)", act,act_new); } else { if (slowpath(_TAILQ_IS_ENQUEUED(act_new, va_list))) { @@ -1481,31 +1487,25 @@ _voucher_activity_try_insert(_voucher_activity_t act_new) _dispatch_voucher_activity_debug("try insert: succeeded", act_new); } _voucher_activity_lock_unlock(vam_activities_lock()); - if (resurrect) return _voucher_atm_activity_mark_used(act); return act; } -static bool -_voucher_activity_try_remove(_voucher_activity_t act) +static void +_voucher_activity_remove(_voucher_activity_t act) { - bool r; voucher_activity_id_t va_id = act->va_id; uint32_t hash = VACTID_HASH(va_id); + _voucher_activity_lock_lock(vam_activities_lock()); - if (slowpath(!va_id)) { + if (slowpath(!va_id || !_TAILQ_IS_ENQUEUED(act, va_list))) { _dispatch_voucher_activity_debug("corruption", act); DISPATCH_CRASH("Activity corruption"); } - if ((r = (dispatch_atomic_load2o(act, va_use_count, seq_cst) < 0 && - _TAILQ_IS_ENQUEUED(act, va_list)))) { - TAILQ_REMOVE(vam_activities(hash), act, va_list); - _TAILQ_MARK_NOT_ENQUEUED(act, va_list); - act->va_list.tqe_next = (void*)~0ull; - } - _dispatch_voucher_activity_debug("try remove: %s", act, r ? "succeeded" : - "failed"); + TAILQ_REMOVE(vam_activities(hash), act, va_list); + _TAILQ_MARK_NOT_ENQUEUED(act, va_list); + act->va_list.tqe_next = (void*)~0ull; + _dispatch_voucher_activity_debug("remove", act); _voucher_activity_lock_unlock(vam_activities_lock()); - return r; } static _voucher_activity_t @@ -1519,39 +1519,30 @@ _voucher_activity_create_with_atm(_voucher_atm_t vatm, _voucher_atm_release(vatm); // consume vatm reference return NULL; } - if (!trace_id) trace_id = _voucher_activity_trace_id_release; - _voucher_activity_tracepoint_t vat = (_voucher_activity_tracepoint_t)buffer; - _voucher_activity_tracepoint_init_with_id(vat, trace_id, ~1ull); - _voucher_activity_t act = (_voucher_activity_t)buffer; - act->va_flags = _voucher_activity_trace_flag_buffer_header | - _voucher_activity_trace_flag_activity | - _voucher_activity_trace_flag_start | - _voucher_activity_trace_flag_wide_first; - act->vabh_next_tracepoint_idx = sizeof(*act)/sizeof(*vat); - act->va_max_sequence_no = 0; - act->va_id = va_id ? va_id : VATM_ACTID(vatm, 0); - act->va_use_count = 0; + _voucher_activity_t act = _dispatch_calloc(1ul, + sizeof(struct _voucher_activity_s)); + act->va_id = va_id; + act->va_trace_id = trace_id ? trace_id : _voucher_activity_trace_id_release; + act->va_location = location; act->va_buffer_limit = _voucher_activity_buffer_limit(); TAILQ_INIT(&act->va_buffers); - act->va_flags2 = _voucher_activity_trace_flag_activity | - _voucher_activity_trace_flag_wide_second; -#if __LP64__ - act->va_flags3 = act->va_flags2; -#endif - act->va_refcnt = 0; - act->va_location = location; act->va_current_buffer = buffer; act->va_atm = vatm; // transfer vatm reference _voucher_activity_lock_init(va_buffers_lock(act)); + if (dispatch_assume_zero(pthread_mutex_init(&act->va_mutex, NULL)) || + dispatch_assume_zero(pthread_cond_init(&act->va_cond, NULL))) { + DISPATCH_CLIENT_CRASH("Could not initialize activity"); + } _TAILQ_MARK_NOT_ENQUEUED(act, va_list); _TAILQ_MARK_NOT_ENQUEUED(act, va_atm_list); _TAILQ_MARK_NOT_ENQUEUED(act, va_atm_used_list); + + _voucher_activity_buffer_init(act, buffer, true); + TAILQ_INSERT_TAIL(&act->va_buffers, buffer, vabh_list); _voucher_activity_t actx = _voucher_activity_try_insert(act); if (actx) { _voucher_activity_dispose(act); act = actx; - } else { - _voucher_atm_activity_insert(vatm, act); } _dispatch_voucher_activity_debug("create", act); return act; @@ -1571,20 +1562,56 @@ _voucher_activity_dispose(_voucher_activity_t act) dispatch_assert(!_TAILQ_IS_ENQUEUED(act, va_atm_used_list)); _voucher_activity_buffer_header_t buffer, tmp; TAILQ_FOREACH_SAFE(buffer, &act->va_buffers, vabh_list, tmp) { - _dispatch_voucher_activity_debug("buffer free %p", act, buffer); + if (buffer->vabh_pos.vabp_pos.vabp_next_tracepoint_idx > 1) { + dispatch_assert(_voucher_activity_buffer_mark_full(buffer)); + _voucher_activity_firehose_push(act, buffer); + } TAILQ_REMOVE(&act->va_buffers, buffer, vabh_list); + _dispatch_voucher_activity_debug("buffer free %p", act, buffer); _voucher_activity_heap_buffer_free(buffer); } - buffer = (_voucher_activity_buffer_header_t)act; - _voucher_activity_heap_buffer_free(buffer); + (void)dispatch_assume_zero(pthread_mutex_destroy(&act->va_mutex)); + (void)dispatch_assume_zero(pthread_cond_destroy(&act->va_cond)); + free(act); } +DISPATCH_NOINLINE +void +_voucher_activity_firehose_push(_voucher_activity_t act, + _voucher_activity_buffer_header_t buffer) +{ + if (dispatch_assume_zero(pthread_mutex_lock(&act->va_mutex))) { + DISPATCH_CLIENT_CRASH("Activity corruption: mutex_lock"); + } + _dispatch_voucher_activity_debug("firehose push %p", act, buffer); + // TODO: call firehose_push + VOUCHER_ACTIVITY_BUFFER_HOOK_CALLOUT(full, buffer); + _voucher_activity_buffer_init(act, buffer, false); + if (dispatch_assume_zero(pthread_cond_broadcast(&act->va_cond))) { + DISPATCH_CLIENT_CRASH("Activity corruption: cond_broadcast"); + } + if (dispatch_assume_zero(pthread_mutex_unlock(&act->va_mutex))) { + DISPATCH_CLIENT_CRASH("Activity corruption: mutex_unlock"); + } +} + +DISPATCH_NOINLINE static void -_voucher_activity_collect(_voucher_activity_t act) +_voucher_activity_firehose_wait(_voucher_activity_t act, + _voucher_activity_buffer_header_t buffer) { - _dispatch_voucher_activity_debug("collect", act); - if (_voucher_activity_try_remove(act)) { - _voucher_atm_activity_remove(act); + if (dispatch_assume_zero(pthread_mutex_lock(&act->va_mutex))) { + DISPATCH_CLIENT_CRASH("Activity corruption: mutex_lock"); + } + while (_voucher_activity_buffer_is_full(buffer)) { + _dispatch_voucher_activity_debug("firehose wait %p", act, buffer); + if (dispatch_assume_zero(pthread_cond_wait(&act->va_cond, + &act->va_mutex))){ + DISPATCH_CLIENT_CRASH("Activity corruption: cond_wait"); + } + } + if (dispatch_assume_zero(pthread_mutex_unlock(&act->va_mutex))) { + DISPATCH_CLIENT_CRASH("Activity corruption: mutex_unlock"); } } @@ -1611,76 +1638,43 @@ _voucher_activity_copy_from_mach_voucher(mach_voucher_t kv, _voucher_activity_heap_buffer_free(buffer); return NULL; } - if (VACTID_BASEID(va_id) != VATMID2ACTID(atm_id)) va_id = 0; // consumes vatm reference: act = _voucher_activity_create_with_atm(vatm, va_id, 0, 0, buffer); _dispatch_voucher_activity_debug("copy from kvoucher[0x%08x]", act, kv); return act; } -#pragma mark - -#pragma mark _voucher_atm_mailbox - -DISPATCH_ALWAYS_INLINE -static inline atm_mailbox_offset_t -_voucher_atm_mailbox_alloc(void) -{ - atm_mailbox_offset_t mailbox_offset = MAILBOX_OFFSET_UNSET; - size_t index; - index = _voucher_activity_bitmap_set_first_unset_bit(vam_atm_mbox_bitmap()); - if (index < NO_BITS_WERE_UNSET) { - mailbox_offset = index * sizeof(atm_mailbox_offset_t); -#if DISPATCH_DEBUG && DISPATCH_VOUCHER_ACTIVITY_DEBUG - _dispatch_debug("mailbox alloc %zd (%lld)", index, mailbox_offset); -#endif - } - return mailbox_offset; -} - -DISPATCH_ALWAYS_INLINE -static inline void -_voucher_atm_mailbox_free(atm_mailbox_offset_t mailbox_offset) -{ - if (mailbox_offset == MAILBOX_OFFSET_UNSET) return; - size_t index = (size_t)mailbox_offset / sizeof(atm_mailbox_offset_t); -#if DISPATCH_DEBUG && DISPATCH_VOUCHER_ACTIVITY_DEBUG - _dispatch_debug("mailbox free %zd (%lld)", index, mailbox_offset); -#endif - _voucher_activity_bitmap_clear_bit(vam_atm_mbox_bitmap(), index); -} - -DISPATCH_ALWAYS_INLINE -static inline bool -_voucher_atm_mailbox_set(atm_mailbox_offset_t mailbox_offset, - atm_subaid32_t subaid, bool max_present) -{ - if (mailbox_offset == MAILBOX_OFFSET_UNSET) return false; - char *mailbox_base = (char*)_voucher_activity_heap->vam_kernel_metadata; - atm_subaid32_t *mailbox = (atm_subaid32_t*)(mailbox_base + mailbox_offset); - if (max_present) mailbox++; // second atm_subaid32_t in atm_mailbox_offset_t - if (*mailbox == subaid) return false; - *mailbox = subaid; - return true; -} - #pragma mark - #pragma mark _voucher_atm_t -static bool _voucher_atm_try_remove(_voucher_atm_t vatm); +static void _voucher_atm_remove(_voucher_atm_t vatm); static void _voucher_atm_dispose(_voucher_atm_t vatm, bool unregister); -static inline void _voucher_atm_collect_if_needed(bool updated); DISPATCH_ALWAYS_INLINE -static inline _voucher_atm_t -_voucher_atm_retain(_voucher_atm_t vatm) +static inline bool +_voucher_atm_try_retain(_voucher_atm_t vatm) { - // assumes vam_atms_lock or vam_base_atm_lock held + // not using _os_object_refcnt* because we don't need barriers: + // vouchers atm are immutable and are in a hash table with a lock + // + // assumes vam_atms_lock held int refcnt = dispatch_atomic_inc2o(vatm, vatm_refcnt, relaxed); _dispatch_voucher_atm_debug("retain -> %d", vatm, refcnt + 1); if (slowpath(refcnt < 0)) { _dispatch_voucher_atm_debug("overrelease", vatm); DISPATCH_CRASH("ATM overrelease"); } + return refcnt > 0; +} + +DISPATCH_ALWAYS_INLINE +static inline _voucher_atm_t +_voucher_atm_retain(_voucher_atm_t vatm) +{ + if (slowpath(!_voucher_atm_try_retain(vatm))) { + _dispatch_voucher_atm_debug("resurrection", vatm); + DISPATCH_CRASH("ATM resurrection"); + } return vatm; } @@ -1688,6 +1682,8 @@ DISPATCH_ALWAYS_INLINE static inline void _voucher_atm_release(_voucher_atm_t vatm) { + // not using _os_object_refcnt* because we don't need barriers: + // vouchers atm are immutable are into a hash table with a lock int refcnt = dispatch_atomic_dec2o(vatm, vatm_refcnt, relaxed); _dispatch_voucher_atm_debug("release -> %d", vatm, refcnt + 1); if (fastpath(refcnt >= 0)) { @@ -1697,20 +1693,30 @@ _voucher_atm_release(_voucher_atm_t vatm) _dispatch_voucher_atm_debug("overrelease", vatm); DISPATCH_CRASH("ATM overrelease"); } - if (_voucher_atm_try_remove(vatm)) { - _voucher_atm_dispose(vatm, true); - } + _voucher_atm_remove(vatm); + _voucher_atm_dispose(vatm, true); } static _voucher_atm_t -_voucher_atm_find(atm_aid_t atm_id, uint32_t hash) +_voucher_atm_find_and_retain(atm_aid_t atm_id, uint32_t hash) { + // not using _os_object_refcnt* because we don't need barriers: + // vouchers atm are immutable are into a hash table with a lock + // // assumes vam_atms_lock held _voucher_atm_t vatm; TAILQ_FOREACH(vatm, vam_atms(hash), vatm_list){ - if (vatm->vatm_id == atm_id) break; + if (vatm->vatm_id == atm_id) { + if (fastpath(_voucher_atm_try_retain(vatm))) { + return vatm; + } + + // disallow resurrection + dispatch_atomic_dec2o(vatm, vatm_refcnt, relaxed); + _dispatch_voucher_atm_debug("undo resurrection", vatm); + } } - return vatm; + return NULL; } static _voucher_atm_t @@ -1718,9 +1724,8 @@ _voucher_atm_copy(atm_aid_t atm_id) { uint32_t hash = VATMID_HASH(atm_id); _voucher_activity_lock_lock(vam_atms_lock()); - _voucher_atm_t vatm = _voucher_atm_find(atm_id, hash); + _voucher_atm_t vatm = _voucher_atm_find_and_retain(atm_id, hash); if (vatm) { - _voucher_atm_retain(vatm); _dispatch_voucher_atm_debug("copy", vatm); } _voucher_activity_lock_unlock(vam_atms_lock()); @@ -1733,9 +1738,8 @@ _voucher_atm_try_insert(_voucher_atm_t vatm_new) atm_aid_t atm_id = vatm_new->vatm_id; uint32_t hash = VATMID_HASH(atm_id); _voucher_activity_lock_lock(vam_atms_lock()); - _voucher_atm_t vatm = _voucher_atm_find(atm_id, hash); + _voucher_atm_t vatm = _voucher_atm_find_and_retain(atm_id, hash); if (vatm) { - _voucher_atm_retain(vatm); _dispatch_voucher_atm_debug("try insert: failed (%p)", vatm, vatm_new); } else { if (slowpath(_TAILQ_IS_ENQUEUED(vatm_new, vatm_list))) { @@ -1749,240 +1753,42 @@ _voucher_atm_try_insert(_voucher_atm_t vatm_new) return vatm; } -static bool -_voucher_atm_try_remove(_voucher_atm_t vatm) +static void +_voucher_atm_remove(_voucher_atm_t vatm) { - bool r; atm_aid_t atm_id = vatm->vatm_id; uint32_t hash = VATMID_HASH(atm_id); + _voucher_activity_lock_lock(vam_atms_lock()); - if (slowpath(!atm_id)) { + if (slowpath(!atm_id || !_TAILQ_IS_ENQUEUED(vatm, vatm_list))) { _dispatch_voucher_atm_debug("corruption", vatm); DISPATCH_CRASH("ATM corruption"); } - if ((r = (dispatch_atomic_load2o(vatm, vatm_refcnt, seq_cst) < 0 && - _TAILQ_IS_ENQUEUED(vatm, vatm_list)))) { - TAILQ_REMOVE(vam_atms(hash), vatm, vatm_list); - _TAILQ_MARK_NOT_ENQUEUED(vatm, vatm_list); - vatm->vatm_list.tqe_next = (void*)~0ull; - } - _dispatch_voucher_atm_debug("try remove: %s", vatm, r ? "succeeded" : - "failed"); + TAILQ_REMOVE(vam_atms(hash), vatm, vatm_list); + _TAILQ_MARK_NOT_ENQUEUED(vatm, vatm_list); + vatm->vatm_list.tqe_next = (void*)~0ull; + _dispatch_voucher_atm_debug("remove", vatm); _voucher_activity_lock_unlock(vam_atms_lock()); - return r; -} - -static bool -_voucher_atm_update_mailbox(_voucher_atm_t vatm) -{ - // Update kernel mailbox with largest allocated subaid for this atm_id - // assumes atm_activities_lock held - _voucher_activity_t act = TAILQ_LAST(vatm_activities(vatm), - _voucher_atm_activities_s); - atm_subaid32_t subaid = act ? VACTID_SUBID(act->va_id) : 0; - bool r = _voucher_atm_mailbox_set(vatm->vatm_mailbox_offset, subaid, true); - if (r) { - _dispatch_voucher_atm_debug("update max-present subaid 0x%x", vatm, - subaid); - } - return r; -} - -static bool -_voucher_atm_update_used_mailbox(_voucher_atm_t vatm) -{ - // Update kernel mailbox with smallest in-use subaid for this atm_id - // assumes atm_activities_lock held - _voucher_activity_t act = TAILQ_FIRST(vatm_used_activities(vatm)); - atm_subaid32_t subaid = act ? VACTID_SUBID(act->va_id) : ATM_SUBAID32_MAX; - bool r = _voucher_atm_mailbox_set(vatm->vatm_mailbox_offset, subaid, false); - if (r) { - _dispatch_voucher_atm_debug("update min-used subaid 0x%x", vatm, - subaid); - } - return r; -} - -static void -_voucher_atm_activity_insert(_voucher_atm_t vatm, _voucher_activity_t act) -{ - _voucher_activity_lock_lock(vatm_activities_lock(vatm)); - if (!_TAILQ_IS_ENQUEUED(act, va_atm_list)) { - _voucher_activity_ordered_insert(act, vatm_activities(vatm), - va_atm_list); - _voucher_atm_update_mailbox(vatm); - } - if (!_TAILQ_IS_ENQUEUED(act, va_atm_used_list)) { - _voucher_activity_ordered_insert(act, vatm_used_activities(vatm), - va_atm_used_list); - _voucher_atm_update_used_mailbox(vatm); - } - _dispatch_voucher_activity_debug("atm insert", act); - _voucher_activity_lock_unlock(vatm_activities_lock(vatm)); -} - -static void -_voucher_atm_activity_remove(_voucher_activity_t act) -{ - _voucher_atm_t vatm = act->va_atm; - _voucher_activity_lock_lock(vatm_activities_lock(vatm)); - _dispatch_voucher_activity_debug("atm remove", act); - if (_TAILQ_IS_ENQUEUED(act, va_atm_used_list)) { - TAILQ_REMOVE(vatm_activities(vatm), act, va_atm_used_list); - _TAILQ_MARK_NOT_ENQUEUED(act, va_atm_used_list); - _voucher_atm_update_used_mailbox(vatm); - } - if (_TAILQ_IS_ENQUEUED(act, va_atm_list)) { - TAILQ_REMOVE(vatm_activities(vatm), act, va_atm_list); - _TAILQ_MARK_NOT_ENQUEUED(act, va_atm_list); - _voucher_atm_update_mailbox(vatm); - // Balance initial creation refcnt. Caller must hold additional - // reference to ensure this does not release vatm before the unlock, - // see _voucher_atm_activity_collect - _voucher_activity_atm_release(act); - } - _voucher_activity_lock_unlock(vatm_activities_lock(vatm)); -} - -static _voucher_activity_t -_voucher_atm_activity_mark_used(_voucher_activity_t act) -{ - _voucher_atm_t vatm = act->va_atm; - _voucher_activity_lock_lock(vatm_activities_lock(vatm)); - if (!_TAILQ_IS_ENQUEUED(act, va_atm_used_list)) { - _voucher_activity_ordered_insert(act, vatm_used_activities(vatm), - va_atm_used_list); - _voucher_atm_update_used_mailbox(vatm); - _dispatch_voucher_activity_debug("mark used", act); - } - _voucher_activity_lock_unlock(vatm_activities_lock(vatm)); - return act; -} - -static void -_voucher_atm_activity_mark_unused(_voucher_activity_t act) -{ - bool atm_collect = false, updated = false; - _voucher_atm_t vatm = act->va_atm; - _voucher_activity_lock_lock(vatm_activities_lock(vatm)); - if (_TAILQ_IS_ENQUEUED(act, va_atm_used_list)) { - _dispatch_voucher_activity_debug("mark unused", act); - TAILQ_REMOVE(&vatm->vatm_used_activities, act, va_atm_used_list); - _TAILQ_MARK_NOT_ENQUEUED(act, va_atm_used_list); - atm_collect = true; - _voucher_atm_retain(vatm); - updated = _voucher_atm_update_used_mailbox(vatm); - } - _voucher_activity_lock_unlock(vatm_activities_lock(vatm)); - if (atm_collect) { - _voucher_atm_release(vatm); - _voucher_atm_collect_if_needed(updated); - } -} - -static void -_voucher_atm_activity_collect(_voucher_atm_t vatm, atm_subaid32_t min_subaid) -{ - _dispatch_voucher_atm_debug("collect min subaid 0x%x", vatm, min_subaid); - voucher_activity_id_t min_va_id = VATM_ACTID(vatm, min_subaid); - _voucher_activity_t act; - do { - _voucher_activity_lock_lock(vatm_activities_lock(vatm)); - TAILQ_FOREACH(act, vatm_activities(vatm), va_atm_list) { - if (act->va_id >= min_va_id) { - act = NULL; - break; - } - if (!_TAILQ_IS_ENQUEUED(act, va_atm_used_list)) { - _voucher_activity_atm_retain(act); - break; - } - } - _voucher_activity_lock_unlock(vatm_activities_lock(vatm)); - if (act) { - _voucher_activity_collect(act); - _voucher_activity_atm_release(act); - } - } while (act); } DISPATCH_NOINLINE static void -_voucher_atm_collect(void) +_voucher_atm_fault(mach_voucher_attr_command_t kvc_cmd) { - _voucher_atm_t vatms[_voucher_atm_mailboxes], vatm; - atm_aid_t aids[_voucher_atm_mailboxes]; - mach_atm_subaid_t subaids[_voucher_atm_mailboxes]; - uint32_t i, a = 0, s; + mach_voucher_t kv = _voucher_get_atm_mach_voucher(_voucher_get()); + if (!kv) return; - _voucher_activity_lock_lock(vam_atms_lock()); - for (i = 0; i < _voucher_activity_hash_size; i++) { - TAILQ_FOREACH(vatm, vam_atms(i), vatm_list){ - if (vatm == _voucher_activity_heap->vam_default_activity_atm || - vatm->vatm_mailbox_offset == MAILBOX_OFFSET_UNSET) continue; - _dispatch_voucher_atm_debug("find min subaid", vatm); - vatms[a] = _voucher_atm_retain(vatm); - aids[a] = vatm->vatm_id; - if (++a == _voucher_atm_mailboxes) goto out; + mach_atm_subaid_t subaid = 0; + voucher_t v = _voucher_get(); + if (v) { + unsigned int activities = v->v_activities; + voucher_activity_id_t *activity_ids = _voucher_activity_ids(v); + if (activities) { + subaid = activity_ids[0]; } } -out: - _voucher_activity_lock_unlock(vam_atms_lock()); - if (!a) return; - kern_return_t kr; - mach_voucher_t kv = vatms[0]->vatm_kvoucher; - mach_voucher_attr_content_t kvc_in = (mach_voucher_attr_content_t)&aids; - mach_voucher_attr_content_size_t kvc_in_size = sizeof(atm_aid_t) * a; - mach_voucher_attr_content_t kvc_out = (mach_voucher_attr_content_t)&subaids; - mach_voucher_attr_content_size_t kvc_out_size = sizeof(mach_atm_subaid_t)*a; - kr = mach_voucher_attr_command(kv, MACH_VOUCHER_ATTR_KEY_ATM, - ATM_FIND_MIN_SUB_AID, kvc_in, kvc_in_size, kvc_out, &kvc_out_size); - DISPATCH_VERIFY_MIG(kr); - (void)dispatch_assume_zero(kr); - s = kvc_out_size / sizeof(mach_atm_subaid_t); -#if DISPATCH_DEBUG && DISPATCH_VOUCHER_ACTIVITY_DEBUG - _dispatch_debug("found min subaids (%u out of %u)", s, a); -#endif - for (i = 0; i < a; i++) { - if (i < s) _voucher_atm_activity_collect(vatms[i], - (atm_subaid32_t)subaids[i]); - _voucher_atm_release(vatms[i]); - } -} - -static inline void -_voucher_atm_collect_if_needed(bool updated) -{ - long level; - if (updated) { - level = dispatch_atomic_add(&_voucher_atm_collect_level, 2ul, relaxed); - } else { - level = _voucher_atm_collect_level; - if (!level) return; - } - if (level & 1 || level <= _voucher_atm_collect_threshold) return; - if (!dispatch_atomic_cmpxchg(&_voucher_atm_collect_level, level, level + 1, - acquire)) return; -#if DISPATCH_DEBUG && DISPATCH_VOUCHER_ACTIVITY_DEBUG - _dispatch_debug("atm collect: reached level %ld", level/2); -#endif - if (slowpath(level < 0)) { - DISPATCH_CRASH("ATM collection level corruption"); - } - _voucher_atm_collect(); - dispatch_atomic_sub(&_voucher_atm_collect_level, level + 1, release); -} - -DISPATCH_NOINLINE -static void -_voucher_atm_fault(mach_voucher_attr_command_t kvc_cmd) -{ - _voucher_activity_t act = _voucher_activity_get(_voucher_get()); - mach_voucher_t kv = _voucher_activity_get_atm_mach_voucher(act); - if (!kv) return; kern_return_t kr; - mach_atm_subaid_t subaid = VACTID_SUBID(act->va_id); mach_voucher_attr_content_t kvc_in = (mach_voucher_attr_content_t)&subaid; mach_voucher_attr_content_size_t kvc_in_size = sizeof(mach_atm_subaid_t); mach_voucher_attr_content_t kvc_out = (mach_voucher_attr_content_t)&subaid; @@ -2030,88 +1836,61 @@ _voucher_atm_mach_voucher_create(atm_aid_t *atm_id_ptr) return kv; } -static void -_voucher_atm_mailbox_mach_voucher_register(_voucher_atm_t vatm, - mach_voucher_t kv) +static mach_voucher_t +_voucher_atm_mach_voucher_copy(mach_voucher_t akv) { - _dispatch_voucher_atm_debug("mailbox register %lld with kvoucher[0x%08x]", - vatm, vatm->vatm_mailbox_offset, kv); kern_return_t kr; - mach_voucher_t akv; - atm_mailbox_offset_t offset = vatm->vatm_mailbox_offset; - mach_voucher_attr_recipe_t vr; - size_t vr_size; - static const mach_voucher_attr_recipe_data_t atm_register_recipe = { + mach_voucher_t kv; + const mach_voucher_attr_recipe_data_t atm_copy_recipe = { .key = MACH_VOUCHER_ATTR_KEY_ATM, - .command = MACH_VOUCHER_ATTR_ATM_REGISTER, - .content_size = sizeof(offset), + .command = MACH_VOUCHER_ATTR_COPY, + .previous_voucher = akv, }; - vr_size = sizeof(atm_register_recipe) + atm_register_recipe.content_size; - vr = alloca(vr_size); - *vr = atm_register_recipe; - vr->previous_voucher = kv; - memcpy(&vr->content, &offset, sizeof(offset)); - kr = _voucher_create_mach_voucher(vr, vr_size, &akv); + kr = _voucher_create_mach_voucher(&atm_copy_recipe, + sizeof(atm_copy_recipe), &kv); if (dispatch_assume_zero(kr)) { - DISPATCH_CLIENT_CRASH("Could not register ATM ID"); + DISPATCH_CLIENT_CRASH("Could not copy ATM mach voucher"); } - if (!vatm->vatm_kvoucher) { - vatm->vatm_kvoucher = akv; - } else { -#if !RDAR_17510224 - if (akv != vatm->vatm_kvoucher) { - DISPATCH_CRASH("Unexpected mach voucher returned by ATM ID " - "registration"); - } - _voucher_dealloc_mach_voucher(akv); -#else - DISPATCH_CRASH("Registered invalid ATM object"); -#endif - } - _dispatch_voucher_atm_debug("mailbox registered %lld", vatm, - vatm->vatm_mailbox_offset); + _dispatch_kvoucher_debug("copy atm voucher from [0x%08x]", kv, akv); + return kv; } static void -_voucher_atm_mailbox_register(_voucher_atm_t vatm) +_voucher_atm_register(_voucher_atm_t vatm) { mach_voucher_t kv = vatm->vatm_kvoucher; if (!kv) return; -#if !RDAR_17510224 - _voucher_atm_mailbox_mach_voucher_register(vatm, kv); -#else // RDAR_17510224 - _dispatch_voucher_atm_debug("mailbox register %lld", vatm, - vatm->vatm_mailbox_offset); kern_return_t kr; - atm_mailbox_offset_t offset = vatm->vatm_mailbox_offset; - mach_voucher_attr_content_t kvc_in = (mach_voucher_attr_content_t)&offset; - mach_voucher_attr_content_size_t kvc_in_size = sizeof(offset); + atm_guard_t gen = + dispatch_atomic_inc(&_voucher_atm_generation, relaxed); + _dispatch_voucher_atm_debug("atm register %lld", vatm, gen); + mach_voucher_attr_content_t kvc_in = (mach_voucher_attr_content_t)&gen; + mach_voucher_attr_content_size_t kvc_in_size = sizeof(gen); mach_voucher_attr_content_t kvc_out = NULL; mach_voucher_attr_content_size_t kvc_out_size = 0; kr = mach_voucher_attr_command(kv, MACH_VOUCHER_ATTR_KEY_ATM, ATM_ACTION_REGISTER, kvc_in, kvc_in_size, kvc_out, &kvc_out_size); DISPATCH_VERIFY_MIG(kr); - if (dispatch_assume_zero(kr)) { + if (kr) { DISPATCH_CLIENT_CRASH("Could not register ATM ID"); } - _dispatch_voucher_atm_debug("mailbox registered %lld", vatm, - vatm->vatm_mailbox_offset); -#endif // RDAR_17510224 + vatm->vatm_generation = gen; + _dispatch_voucher_atm_debug("atm registered %lld", vatm, + vatm->vatm_generation); } -static bool -_voucher_atm_mailbox_unregister(_voucher_atm_t vatm) +static void +_voucher_atm_unregister(_voucher_atm_t vatm) { - if (vatm->vatm_mailbox_offset == MAILBOX_OFFSET_UNSET) return false; - _dispatch_voucher_atm_debug("mailbox unregister %lld", vatm, - vatm->vatm_mailbox_offset); + _dispatch_voucher_atm_debug("atm unregister %lld", vatm, + vatm->vatm_generation); mach_voucher_t kv = vatm->vatm_kvoucher; dispatch_assert(kv); kern_return_t kr; - atm_mailbox_offset_t offset = vatm->vatm_mailbox_offset; - mach_voucher_attr_content_t kvc_in = (mach_voucher_attr_content_t)&offset; - mach_voucher_attr_content_size_t kvc_in_size = sizeof(offset); + atm_guard_t gen = vatm->vatm_generation; + mach_voucher_attr_content_t kvc_in = (mach_voucher_attr_content_t)&gen; + mach_voucher_attr_content_size_t kvc_in_size = sizeof(gen); mach_voucher_attr_content_t kvc_out = NULL; mach_voucher_attr_content_size_t kvc_out_size = 0; kr = mach_voucher_attr_command(kv, MACH_VOUCHER_ATTR_KEY_ATM, @@ -2119,43 +1898,25 @@ _voucher_atm_mailbox_unregister(_voucher_atm_t vatm) DISPATCH_VERIFY_MIG(kr); if (kr && kr != KERN_INVALID_VALUE) { (void)dispatch_assume_zero(kr); - DISPATCH_CLIENT_CRASH("Could not unregister ATM ID"); } - _dispatch_voucher_atm_debug("mailbox unregistered %lld", vatm, - vatm->vatm_mailbox_offset); - return true; + _dispatch_voucher_atm_debug("atm unregistered %lld", vatm, + vatm->vatm_generation); } static _voucher_atm_t _voucher_atm_create(mach_voucher_t kv, atm_aid_t atm_id) { - atm_mailbox_offset_t mailbox_offset = _voucher_atm_mailbox_alloc(); - if (kv && mailbox_offset == MAILBOX_OFFSET_UNSET) return NULL; _voucher_atm_t vatm = _dispatch_calloc(1ul, sizeof(struct _voucher_atm_s)); - if (!kv) { - kv = _voucher_atm_mach_voucher_create(&atm_id); - if (mailbox_offset == MAILBOX_OFFSET_UNSET) { - _voucher_dealloc_mach_voucher(kv); - } else { - vatm->vatm_kvoucher = kv; - } - kv = MACH_VOUCHER_NULL; - } + kv = kv ? _voucher_atm_mach_voucher_copy(kv) : + _voucher_atm_mach_voucher_create(&atm_id); + vatm->vatm_kvoucher = kv; vatm->vatm_id = atm_id; - vatm->vatm_mailbox_offset = mailbox_offset; - _voucher_activity_lock_init(vatm_activities_lock(vatm)); - TAILQ_INIT(&vatm->vatm_activities); - TAILQ_INIT(&vatm->vatm_used_activities); - _voucher_atm_mailbox_set(mailbox_offset, 0, true); - _voucher_atm_mailbox_set(mailbox_offset, ATM_SUBAID32_MAX, false); _voucher_atm_t vatmx = _voucher_atm_try_insert(vatm); if (vatmx) { _voucher_atm_dispose(vatm, false); vatm = vatmx; - } else if (kv) { - _voucher_atm_mailbox_mach_voucher_register(vatm, kv); } else { - _voucher_atm_mailbox_register(vatm); + _voucher_atm_register(vatm); } _dispatch_voucher_atm_debug("create with kvoucher[0x%08x]", vatm, kv); return vatm; @@ -2165,114 +1926,48 @@ static void _voucher_atm_dispose(_voucher_atm_t vatm, bool unregister) { _dispatch_voucher_atm_debug("dispose", vatm); - dispatch_assert(TAILQ_EMPTY(&vatm->vatm_activities)); - dispatch_assert(TAILQ_EMPTY(&vatm->vatm_used_activities)); if (slowpath(_TAILQ_IS_ENQUEUED(vatm, vatm_list))) { _dispatch_voucher_atm_debug("corruption", vatm); DISPATCH_CRASH("ATM corruption"); } vatm->vatm_list.tqe_next = DISPATCH_OBJECT_LISTLESS; - bool free_mailbox = (vatm->vatm_mailbox_offset != MAILBOX_OFFSET_UNSET); if (vatm->vatm_kvoucher) { - if (unregister) free_mailbox = _voucher_atm_mailbox_unregister(vatm); + if (unregister) _voucher_atm_unregister(vatm); _voucher_dealloc_mach_voucher(vatm->vatm_kvoucher); vatm->vatm_kvoucher = MACH_VOUCHER_NULL; } - if (free_mailbox) { - _voucher_atm_mailbox_free(vatm->vatm_mailbox_offset); - vatm->vatm_mailbox_offset = MAILBOX_OFFSET_UNSET; - } free(vatm); } -static inline mach_voucher_t -_voucher_activity_get_atm_mach_voucher(_voucher_activity_t act) -{ - mach_voucher_t kv; - kv = act && act->va_atm ? act->va_atm->vatm_kvoucher : MACH_VOUCHER_NULL; - return kv; -} - DISPATCH_NOINLINE -static _voucher_atm_t -_voucher_atm_base_copy_and_activity_id_make(voucher_activity_id_t *va_id_ptr) -{ - _voucher_atm_subid_t subid; - _voucher_atm_t vatm, vatm_old = NULL, vatm_new = NULL; - if (_voucher_activity_heap->vam_base_atm_subid_max == 1) { - vatm = _voucher_atm_create(0, 0); - subid = 1; - goto out; - } - _voucher_activity_lock_lock(vam_base_atm_lock()); - vatm = _voucher_activity_heap->vam_base_atm; -retry: - _voucher_atm_retain(vatm); - subid = _voucher_activity_heap->vam_base_atm_subid; - if (subid++ >= _voucher_activity_heap->vam_base_atm_subid_max) { - _voucher_activity_lock_unlock(vam_base_atm_lock()); - if (!vatm_new) vatm_new = _voucher_atm_create(0, 0); - _voucher_activity_lock_lock(vam_base_atm_lock()); - _voucher_atm_release(vatm); - vatm_old = vatm; - vatm = _voucher_activity_heap->vam_base_atm; - if (vatm != vatm_old) { - vatm_old = NULL; - goto retry; - } - _voucher_activity_heap->vam_base_atm = vatm = vatm_new; - _voucher_activity_heap->vam_base_atm_subid = subid = 1; - vatm_new = NULL; - _voucher_atm_retain(vatm); - _dispatch_voucher_atm_debug("base replace", vatm); - } else { - _voucher_activity_heap->vam_base_atm_subid = subid; - _dispatch_voucher_atm_debug("base copy", vatm); - } - _voucher_activity_lock_unlock(vam_base_atm_lock()); - if (vatm_old) _voucher_atm_release(vatm_old); - if (vatm_new) _voucher_atm_release(vatm_new); -out: - *va_id_ptr = VATM_ACTID(vatm, subid); - return vatm; -} - static voucher_activity_id_t -_voucher_atm_nested_atm_id_make(void) +_voucher_atm_subid_make(_voucher_atm_t vatm, voucher_activity_flag_t flags) { - atm_aid_t atm_id; - mach_voucher_t kv = _voucher_atm_mach_voucher_create(&atm_id); - _voucher_dealloc_mach_voucher(kv); // just need the unique ID - return VATMID2ACTID(atm_id); -} - -static voucher_activity_id_t -_voucher_atm_nested_activity_id_make(void) -{ - voucher_activity_id_t va_id, va_id_old, va_id_new; - _voucher_atm_subid_t subid; - _voucher_activity_lock_lock(vam_nested_atm_lock()); - va_id = _voucher_activity_heap->vam_nested_atm_id; -retry: - subid = _voucher_activity_heap->vam_nested_atm_subid; - if (subid++ >= VATM_SUBID_MAX) { - _voucher_activity_lock_unlock(vam_nested_atm_lock()); - va_id_new = _voucher_atm_nested_atm_id_make(); - va_id_old = va_id; - _voucher_activity_lock_lock(vam_nested_atm_lock()); - va_id = _voucher_activity_heap->vam_nested_atm_id; - if (va_id != va_id_old) goto retry; - _voucher_activity_heap->vam_nested_atm_id = va_id = va_id_new; - subid = 1; - } - _voucher_activity_heap->vam_nested_atm_subid = subid; - _voucher_activity_lock_unlock(vam_nested_atm_lock()); - return va_id + subid; + mach_voucher_t kv = vatm->vatm_kvoucher; + _dispatch_voucher_atm_debug("create subid from atm", vatm); + kern_return_t kr; + mach_atm_subaid_t naid; + mach_voucher_attr_content_t kvc_in = NULL; + mach_voucher_attr_content_size_t kvc_in_size = 0; + mach_voucher_attr_content_t kvc_out = (mach_voucher_attr_content_t)&naid; + mach_voucher_attr_content_size_t kvc_out_size = sizeof(naid); + kr = mach_voucher_attr_command(kv, MACH_VOUCHER_ATTR_KEY_ATM, + ATM_ACTION_GETSUBAID, kvc_in, kvc_in_size, kvc_out, &kvc_out_size); + DISPATCH_VERIFY_MIG(kr); + if (dispatch_assume_zero(kr)) { + DISPATCH_CLIENT_CRASH("Could not get next ATM ID"); + } + _dispatch_voucher_atm_debug("created subid from atm %lld", vatm, naid); + return VATMID2ACTID(naid, flags); } #pragma mark - #pragma mark voucher_activity_id_t +static const size_t _voucher_activity_maxsize = + _voucher_activity_buffer_size - _voucher_activity_buffer_header_size - + _voucher_activity_strings_header_size; + voucher_activity_id_t voucher_activity_start_with_location(voucher_activity_trace_id_t trace_id, uint64_t location, voucher_activity_flag_t flags) @@ -2280,36 +1975,29 @@ voucher_activity_start_with_location(voucher_activity_trace_id_t trace_id, dispatch_once_f(&_voucher_activity_heap_pred, NULL, _voucher_activity_heap_init); if (!_voucher_activity_trace_id_enabled(trace_id)) return 0; - voucher_activity_id_t va_id = 0, va_base_id = 0; + voucher_activity_id_t va_id = 0; _voucher_atm_t vatm = NULL; _voucher_activity_t act = NULL; _voucher_activity_tracepoint_t vat = NULL; unsigned int activities = 1, oactivities = 0; voucher_t ov = _voucher_get(); + vatm = _voucher_get_atm(ov); if (!(flags & voucher_activity_flag_force) && ov && ov->v_activities) { oactivities = ov->v_activities; activities += oactivities; if (activities > _voucher_max_activities) { - va_id = _voucher_atm_nested_activity_id_make(); + va_id = _voucher_atm_subid_make(vatm, flags); goto out; } } - if (activities == 1) { - vatm = _voucher_atm_base_copy_and_activity_id_make(&va_id); - if (vatm->vatm_kvoucher) { - // consumes vatm reference: - act = _voucher_activity_create_with_atm(vatm, va_id, trace_id, - location, NULL); - vat = (_voucher_activity_tracepoint_t)act; - } else { - _voucher_atm_release(vatm); - } - if (!act) { - activities++; - // default to _voucher_activity_default base activity - va_base_id = _voucher_activity_default->va_id; - } - } + _voucher_atm_retain(vatm); + // required for v->v_atm = vatm below + _voucher_atm_retain(vatm); + va_id = _voucher_atm_subid_make(vatm, flags); + // consumes vatm reference: + act = _voucher_activity_create_with_atm(vatm, va_id, trace_id, location, + NULL); + vat = (_voucher_activity_tracepoint_t)act; pthread_priority_t priority = _voucher_get_priority(ov); mach_voucher_attr_recipe_size_t extra = ov ? _voucher_extra_size(ov) : 0; voucher_t v = _voucher_alloc(activities, priority, extra); @@ -2326,24 +2014,13 @@ voucher_activity_start_with_location(voucher_activity_trace_id_t trace_id, memcpy(activity_ids, _voucher_activity_ids(ov), oactivities * sizeof(voucher_activity_id_t)); } - if (!va_id) { - va_id = _voucher_atm_nested_activity_id_make(); - if (ov && ov->v_activity) { - act = _voucher_activity_retain(ov->v_activity); - } - } - if (va_base_id) activity_ids[0] = va_base_id; activity_ids[activities-1] = va_id; + v->v_atm = vatm; v->v_activity = act; _voucher_swap(ov, v); - if (vat) return va_id; // new _voucher_activity_s contains trace info + return va_id; // new activity buffer contains trace info out: - vat = _voucher_activity_trace_with_id(trace_id); - if (vat) { - vat->vat_flags |= _voucher_activity_trace_flag_activity | - _voucher_activity_trace_flag_start; - vat->vat_data[0] = va_id; - } + _voucher_activity_trace_activity_event(trace_id, va_id, start); return va_id; } @@ -2358,13 +2035,8 @@ void voucher_activity_end(voucher_activity_id_t va_id) { if (!va_id) return; - _voucher_activity_tracepoint_t vat; - vat = _voucher_activity_trace_with_id(_voucher_activity_trace_id_release); - if (vat) { - vat->vat_flags |= _voucher_activity_trace_flag_activity | - _voucher_activity_trace_flag_end; - vat->vat_data[0] = va_id; - } + _voucher_activity_trace_activity_event(_voucher_activity_trace_id_release, + va_id, end); voucher_t v = _voucher_get(); if (!v) return; unsigned int activities = v->v_activities, act_idx = activities; @@ -2401,6 +2073,7 @@ voucher_activity_end(voucher_activity_id_t va_id) } else { if (v->v_activity) { nv->v_activity = _voucher_activity_retain(v->v_activity); + nv->v_atm = _voucher_atm_retain(v->v_atm); } memcpy(new_activity_ids, activity_ids, --act_idx * sizeof(voucher_activity_id_t)); @@ -2434,12 +2107,17 @@ voucher_activity_get_namespace(void) { voucher_t v = _voucher_get(); if (!v || !v->v_activity) return 0; - return v->v_activity->va_namespace; + voucher_activity_trace_id_t trace_id = v->v_activity->va_trace_id; + uint8_t cns = (uint8_t)(trace_id >> + _voucher_activity_trace_id_code_namespace_shift); + return cns; } DISPATCH_NOINLINE _voucher_activity_tracepoint_t -_voucher_activity_tracepoint_get_slow(unsigned int slots) +_voucher_activity_buffer_tracepoint_acquire_slow(_voucher_activity_t *vap, + _voucher_activity_buffer_header_t *vabp, unsigned int slots, + size_t strsize, uint16_t *stroffsetp) { _voucher_activity_t act; _voucher_activity_buffer_header_t vab; @@ -2454,16 +2132,18 @@ _voucher_activity_tracepoint_get_slow(unsigned int slots) act = _voucher_activity_default; } vab = act->va_current_buffer; - if (vab && vab->vabh_next_tracepoint_idx <= - _voucher_activity_tracepoints_per_buffer) { + if (act == *vap && vab != *vabp) { goto retry; // another slowpath raced us } do { vab = _voucher_activity_buffer_alloc(act, vab); if (!vab) break; retry: - vat = _voucher_activity_buffer_tracepoint_get(vab, slots); + vat = _voucher_activity_buffer_tracepoint_acquire(vab, slots, strsize, + stroffsetp); } while (!vat); + *vap = act; + *vabp = vab; return vat; } @@ -2485,13 +2165,20 @@ voucher_activity_trace(voucher_activity_trace_id_t trace_id, uint64_t location, void *buffer, size_t length) { if (!_voucher_activity_trace_id_enabled(trace_id)) return 0; + _voucher_activity_t act; + _voucher_activity_buffer_header_t vab; _voucher_activity_tracepoint_t vat; const unsigned int slots = length <= sizeof(vat->vat_data) ? 1 : 2; - vat = _voucher_activity_tracepoint_get(slots); - if (!vat) vat = _voucher_activity_tracepoint_get_slow(slots); + act = _voucher_activity_get(); + vab = _voucher_activity_buffer_get_from_activity(act); + vat = _voucher_activity_buffer_tracepoint_acquire(vab, slots, 0, NULL); + if (!vat) { + vat = _voucher_activity_buffer_tracepoint_acquire_slow(&act, &vab, + slots, 0, NULL); + } if (!vat) return 0; uint64_t timestamp = _voucher_activity_tracepoint_init_with_id(vat, - trace_id, location); + trace_id, location, true); void *tbuf = vat->vat_data; size_t tlen = sizeof(vat->vat_data); if (length < tlen) { @@ -2515,6 +2202,72 @@ voucher_activity_trace(voucher_activity_trace_id_t trace_id, uint64_t location, } } _voucher_activity_trace_fault(trace_id); + if (_voucher_activity_buffer_tracepoint_release(vab)) { + _voucher_activity_firehose_push(act, vab); + } + return timestamp; +} + +uint64_t +voucher_activity_trace_strings(voucher_activity_trace_id_t trace_id, + uint64_t location, void *buffer, size_t length, const char *strings[], + size_t string_lengths[], size_t strings_size) +{ + if (!_voucher_activity_trace_id_enabled(trace_id)) return 0; + _voucher_activity_t act; + _voucher_activity_buffer_header_t vab; + _voucher_activity_tracepoint_t vat; + uint16_t offset; + const unsigned int slots = length <= sizeof(vat->vat_data) ? 1 : 2; + strings_size = MIN(strings_size, _voucher_activity_maxsize - + slots * sizeof(struct _voucher_activity_tracepoint_s)); + act = _voucher_activity_get(); + vab = _voucher_activity_buffer_get_from_activity(act); + vat = _voucher_activity_buffer_tracepoint_acquire(vab, slots, strings_size, + &offset); + if (!vat) { + vat = _voucher_activity_buffer_tracepoint_acquire_slow(&act, &vab, + slots, strings_size, &offset); + } + if (!vat) return 0; + uint64_t timestamp = _voucher_activity_tracepoint_init_with_id(vat, + trace_id, location, false); + vat->vat_flags |= _voucher_activity_trace_flag_tracepoint_strings; + vat->vat_stroff.vats_offset = offset; + void *tbuf = vat->vat_stroff.vats_data; + size_t tlen = sizeof(vat->vat_stroff.vats_data); + if (length < tlen) { + memcpy(tbuf, buffer, length); + } else { + memcpy(tbuf, buffer, tlen); + } + if (length > tlen) { + vat->vat_flags |= _voucher_activity_trace_flag_wide_first; + buffer += tlen; + length -= tlen; + (++vat)->vat_flags = _voucher_activity_trace_flag_tracepoint | + _voucher_activity_trace_flag_wide_second; + vat->vat_type = 0; vat->vat_namespace = 0; + tbuf = (void*)vat + offsetof(typeof(*vat), vat_code); + tlen = sizeof(*vat) - offsetof(typeof(*vat), vat_code); + if (length < tlen) { + memcpy(tbuf, buffer, length); + } else { + memcpy(tbuf, buffer, tlen); + } + } + const uint16_t offsetend = offset - (uint16_t)strings_size; + char *b = (char*)vab + _voucher_activity_buffer_size; + int i = 0; + while (offset > offsetend && strings[i]) { + size_t maxsize = MIN(string_lengths[i] + 1, offset - offsetend); + size_t len = strlcpy(b - offset, strings[i++], maxsize); + offset -= MIN(len + 1, maxsize); + } + _voucher_activity_trace_fault(trace_id); + if (_voucher_activity_buffer_tracepoint_release(vab)) { + _voucher_activity_firehose_push(act, vab); + } return timestamp; } @@ -2524,18 +2277,28 @@ voucher_activity_trace_args(voucher_activity_trace_id_t trace_id, uintptr_t arg4) { if (!_voucher_activity_trace_id_enabled(trace_id)) return 0; + _voucher_activity_t act; + _voucher_activity_buffer_header_t vab; _voucher_activity_tracepoint_t vat; - vat = _voucher_activity_tracepoint_get(1); - if (!vat) vat = _voucher_activity_tracepoint_get_slow(1); + act = _voucher_activity_get(); + vab = _voucher_activity_buffer_get_from_activity(act); + vat = _voucher_activity_buffer_tracepoint_acquire(vab, 1, 0, NULL); + if (!vat) { + vat = _voucher_activity_buffer_tracepoint_acquire_slow(&act, &vab, 1, + 0, NULL); + } if (!vat) return 0; uint64_t timestamp = _voucher_activity_tracepoint_init_with_id(vat, - trace_id, location); + trace_id, location, true); vat->vat_flags |= _voucher_activity_trace_flag_tracepoint_args; vat->vat_data[0] = arg1; vat->vat_data[1] = arg2; vat->vat_data[2] = arg3; vat->vat_data[3] = arg4; _voucher_activity_trace_fault(trace_id); + if (_voucher_activity_buffer_tracepoint_release(vab)) { + _voucher_activity_firehose_push(act, vab); + } return timestamp; } @@ -2576,9 +2339,9 @@ _voucher_debug(voucher_t v, char* buf, size_t bufsiz) if (v->v_activity) { _voucher_activity_t va = v->v_activity; _voucher_atm_t vatm = va->va_atm; - bufprintf("activity[%p] = { ID 0x%llx, use %d, atm[%p] = { " + bufprintf("activity[%p] = { ID 0x%llx, ref %d, atm[%p] = { " "AID 0x%llx, ref %d, kvoucher 0x%x } }, ", va, va->va_id, - va->va_use_count + 1, va->va_atm, vatm->vatm_id, + va->va_refcnt + 1, va->va_atm, vatm->vatm_id, vatm->vatm_refcnt + 1, vatm->vatm_kvoucher); } bufprintf("}"); @@ -2617,6 +2380,18 @@ voucher_copy_without_importance(void) return NULL; } +voucher_t +voucher_retain(voucher_t voucher) +{ + return voucher; +} + +void +voucher_release(voucher_t voucher) +{ + (void)voucher; +} + void voucher_replace_default_voucher(void) { @@ -2656,6 +2431,13 @@ _voucher_create_with_priority_and_mach_voucher(voucher_t voucher, return NULL; } +voucher_t +_voucher_create_accounting_voucher(voucher_t voucher) +{ + (void)voucher; + return NULL; +} + voucher_t voucher_create_with_mach_msg(mach_msg_header_t *msg) { @@ -2701,6 +2483,13 @@ voucher_activity_get_metadata_buffer(size_t *length) return NULL; } +void +voucher_activity_buffer_hook_install_4libtrace( + _voucher_activity_buffer_hook_t hook) +{ + (void)hook; +} + void _voucher_activity_heap_pressure_normal(void) { @@ -2754,6 +2543,16 @@ voucher_activity_trace(voucher_activity_trace_id_t trace_id, uint64_t location, return 0; } +uint64_t +voucher_activity_trace_strings(voucher_activity_trace_id_t trace_id, + uint64_t location, void *buffer, size_t length, const char *strings[], + size_t string_lengths[], size_t strings_size) +{ + (void)trace_id; (void)location; (void)buffer; (void)length; (void)strings; + (void)string_lengths; (void)strings_size; + return 0; +} + uint64_t voucher_activity_trace_args(voucher_activity_trace_id_t trace_id, uint64_t location, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, diff --git a/src/voucher_internal.h b/src/voucher_internal.h index 6fa1538cf..cc5ae2298 100644 --- a/src/voucher_internal.h +++ b/src/voucher_internal.h @@ -93,7 +93,7 @@ voucher_get_mach_voucher(voucher_t voucher); #pragma mark voucher_t #if TARGET_IPHONE_SIMULATOR && \ - IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 101000 + IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 101100 #undef VOUCHER_USE_MACH_VOUCHER #define VOUCHER_USE_MACH_VOUCHER 0 #endif @@ -121,6 +121,7 @@ size_t _voucher_debug(voucher_t v, char* buf, size_t bufsiz); void _voucher_thread_cleanup(void *voucher); mach_voucher_t _voucher_get_mach_voucher(voucher_t voucher); voucher_t _voucher_create_without_importance(voucher_t voucher); +voucher_t _voucher_create_accounting_voucher(voucher_t voucher); mach_voucher_t _voucher_create_mach_voucher_with_priority(voucher_t voucher, pthread_priority_t priority); voucher_t _voucher_create_with_priority_and_mach_voucher(voucher_t voucher, @@ -134,6 +135,9 @@ _OS_OBJECT_DECL_SUBCLASS_INTERFACE(voucher_recipe, object) #endif #endif +voucher_t voucher_retain(voucher_t voucher); +void voucher_release(voucher_t voucher); + #define _TAILQ_IS_ENQUEUED(elm, field) \ ((elm)->field.tqe_prev != NULL) #define _TAILQ_MARK_NOT_ENQUEUED(elm, field) \ @@ -156,7 +160,8 @@ typedef struct voucher_s { TAILQ_ENTRY(voucher_s) v_list; mach_voucher_t v_kvoucher, v_ipc_kvoucher; // if equal, only one reference voucher_t v_kvbase; // if non-NULL, v_kvoucher is a borrowed reference - _voucher_activity_t v_activity; + struct _voucher_atm_s *v_atm; + struct _voucher_activity_s *v_activity; #if VOUCHER_ENABLE_RECIPE_OBJECTS size_t v_recipe_extra_offset; mach_voucher_attr_recipe_size_t v_recipe_extra_size; @@ -230,13 +235,15 @@ typedef uint32_t _voucher_priority_t; #define _dispatch_voucher_debug_machport(name) ((void)(name)) #endif -#if !(USE_OBJC && __OBJC2__) +#if !(USE_OBJC && __OBJC2__) && !defined(__cplusplus) DISPATCH_ALWAYS_INLINE static inline voucher_t _voucher_retain(voucher_t voucher) { #if !DISPATCH_VOUCHER_OBJC_DEBUG + // not using _os_object_refcnt* because we don't need barriers: + // vouchers are immutable and are in a hash table with a lock int xref_cnt = dispatch_atomic_inc2o(voucher, os_obj_xref_cnt, relaxed); _dispatch_voucher_debug("retain -> %d", voucher, xref_cnt + 1); if (slowpath(xref_cnt <= 0)) { @@ -256,6 +263,8 @@ static inline void _voucher_release(voucher_t voucher) { #if !DISPATCH_VOUCHER_OBJC_DEBUG + // not using _os_object_refcnt* because we don't need barriers: + // vouchers are immutable and are in a hash table with a lock int xref_cnt = dispatch_atomic_dec2o(voucher, os_obj_xref_cnt, relaxed); _dispatch_voucher_debug("release -> %d", voucher, xref_cnt + 1); if (fastpath(xref_cnt >= 0)) { @@ -515,76 +524,81 @@ typedef uint32_t _voucher_atm_subid_t; static const size_t _voucher_activity_hash_bits = 6; static const size_t _voucher_activity_hash_size = 1 << _voucher_activity_hash_bits; -#define VACTID_HASH(x) ((((uint32_t)((x) >> 32) + (uint32_t)(x)) * \ - 2654435761u) >> (32-_voucher_activity_hash_bits)) +#define VACTID_HASH(x) \ + (((uint32_t)(x) * 2654435761u) >> (32-_voucher_activity_hash_bits)) #define VATMID_HASH(x) \ (((uint32_t)(x) * 2654435761u) >> (32-_voucher_activity_hash_bits)) -#define VATMID2ACTID(x) ((uint64_t)(x) << 32) -#define VACTID_BASEID(x) ((uint64_t)(x) & (((uint64_t)UINT32_MAX) << 32)) -#define VACTID_SUBID(x) ((uint32_t)(x)) -#define VATM_ACTID(vatm, subid) (VATMID2ACTID((vatm)->vatm_id) + (subid)) -#define VATM_SUBID_BITS2MAX(bits) ((1u << (bits)) - 1) -#define VATM_SUBID_MAXBITS (32) -#define VATM_SUBID_MAX (ATM_SUBAID32_MAX) -#define MAILBOX_OFFSET_UNSET UINT64_MAX - -static const size_t _voucher_activity_buffers_per_heap = 512; -typedef unsigned long _voucher_activity_bitmap_base_t; -static const size_t _voucher_activity_bits_per_bitmap_base_t = - 8 * sizeof(_voucher_activity_bitmap_base_t); -static const size_t _voucher_activity_bitmaps_per_heap = - _voucher_activity_buffers_per_heap / - _voucher_activity_bits_per_bitmap_base_t; -typedef _voucher_activity_bitmap_base_t - _voucher_activity_bitmap_t[_voucher_activity_bitmaps_per_heap]; +#define VATMID2ACTID(x, flags) \ + (((voucher_activity_id_t)(x) & 0xffffffffffffff) | \ + (((voucher_activity_id_t)(flags) & 0xfe) << 55)) typedef struct _voucher_activity_metadata_s { - _voucher_activity_buffer_t vam_kernel_metadata; _voucher_activity_buffer_t vam_client_metadata; - struct _voucher_activity_self_metadata_s vam_self_metadata; -#if __LP64__ - uintptr_t vam_pad0[7]; -#else - uintptr_t vam_pad0[15]; -#endif - // cacheline - _voucher_activity_bitmap_t volatile vam_atm_mbox_bitmap; + struct _voucher_activity_metadata_opaque_s *vasm_baseaddr; _voucher_activity_bitmap_t volatile vam_buffer_bitmap; _voucher_activity_bitmap_t volatile vam_pressure_locked_bitmap; - // cacheline - _voucher_atm_subid_t vam_base_atm_subid; - _voucher_atm_subid_t vam_base_atm_subid_max; - _voucher_atm_subid_t vam_nested_atm_subid; - _voucher_atm_t vam_default_activity_atm; - _voucher_atm_t volatile vam_base_atm; - voucher_activity_id_t volatile vam_nested_atm_id; -#if __LP64__ - uintptr_t vam_pad2[3]; -#else - uintptr_t vam_pad2[1]; -#endif - _voucher_activity_lock_s vam_base_atm_lock; - _voucher_activity_lock_s vam_nested_atm_lock; _voucher_activity_lock_s vam_atms_lock; _voucher_activity_lock_s vam_activities_lock; - // cacheline TAILQ_HEAD(, _voucher_atm_s) vam_atms[_voucher_activity_hash_size]; TAILQ_HEAD(, _voucher_activity_s) vam_activities[_voucher_activity_hash_size]; } *_voucher_activity_metadata_t; +#pragma mark - +#pragma mark _voucher_atm_t + +typedef struct _voucher_atm_s { + int32_t volatile vatm_refcnt; + mach_voucher_t vatm_kvoucher; + atm_aid_t vatm_id; + atm_guard_t vatm_generation; + TAILQ_ENTRY(_voucher_atm_s) vatm_list; +#if __LP64__ + uintptr_t vatm_pad[3]; + // cacheline +#endif +} *_voucher_atm_t; + +extern _voucher_atm_t _voucher_task_atm; + #pragma mark - #pragma mark _voucher_activity_t -_voucher_activity_tracepoint_t _voucher_activity_tracepoint_get_slow( - unsigned int slots); +typedef struct _voucher_activity_s { + voucher_activity_id_t va_id; + voucher_activity_trace_id_t va_trace_id; + uint64_t va_location; + int32_t volatile va_refcnt; + uint32_t volatile va_buffer_count; + uint32_t va_buffer_limit; + _voucher_activity_buffer_header_t volatile va_current_buffer; + _voucher_atm_t va_atm; +#if __LP64__ + uint64_t va_unused; +#endif + // cacheline + _voucher_activity_lock_s va_buffers_lock; + TAILQ_HEAD(_voucher_activity_buffer_list_s, + _voucher_activity_buffer_header_s) va_buffers; + TAILQ_ENTRY(_voucher_activity_s) va_list; + TAILQ_ENTRY(_voucher_activity_s) va_atm_list; + TAILQ_ENTRY(_voucher_activity_s) va_atm_used_list; + pthread_mutex_t va_mutex; + pthread_cond_t va_cond; +} *_voucher_activity_t; + +_voucher_activity_tracepoint_t _voucher_activity_buffer_tracepoint_acquire_slow( + _voucher_activity_t *vap, _voucher_activity_buffer_header_t *vabp, + unsigned int slots, size_t strsize, uint16_t *stroffsetp); +void _voucher_activity_firehose_push(_voucher_activity_t act, + _voucher_activity_buffer_header_t buffer); extern _voucher_activity_t _voucher_activity_default; extern voucher_activity_mode_t _voucher_activity_mode; #if DISPATCH_DEBUG && DISPATCH_VOUCHER_ACTIVITY_DEBUG #define _dispatch_voucher_activity_debug(msg, act, ...) \ - _dispatch_debug("activity[%p] <0x%x>: atm[%p] <%lld>: " msg, (act), \ - (act) ? VACTID_SUBID((act)->va_id) : 0, (act) ? (act)->va_atm : NULL, \ + _dispatch_debug("activity[%p] <0x%llx>: atm[%p] <%lld>: " msg, (act), \ + (act) ? (act)->va_id : 0, (act) ? (act)->va_atm : NULL, \ (act) && (act)->va_atm ? (act)->va_atm->vatm_id : 0, ##__VA_ARGS__) #define _dispatch_voucher_atm_debug(msg, atm, ...) \ _dispatch_debug("atm[%p] <%lld> kvoucher[0x%08x]: " msg, (atm), \ @@ -597,13 +611,14 @@ extern voucher_activity_mode_t _voucher_activity_mode; DISPATCH_ALWAYS_INLINE static inline uint64_t -_voucher_activity_timestamp(void) +_voucher_activity_timestamp(bool approx) { #if TARGET_IPHONE_SIMULATOR && \ IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 101000 + (void)approx; return mach_absolute_time(); #else - return mach_approximate_time(); + return approx ? mach_approximate_time() : mach_absolute_time(); #endif } @@ -616,45 +631,116 @@ _voucher_activity_thread_id(void) return thread_id; } +#define _voucher_activity_buffer_pos2length(pos) \ + ({ _voucher_activity_buffer_position_u _pos = (pos); \ + _pos.vabp_pos.vabp_next_tracepoint_idx * \ + sizeof(struct _voucher_activity_tracepoint_s) + \ + _pos.vabp_pos.vabp_string_offset; }) + DISPATCH_ALWAYS_INLINE static inline _voucher_activity_tracepoint_t -_voucher_activity_buffer_tracepoint_get(_voucher_activity_buffer_header_t vab, - unsigned int slots) +_voucher_activity_buffer_tracepoint_acquire( + _voucher_activity_buffer_header_t vab, unsigned int slots, + size_t strsize, uint16_t *stroffsetp) { - uint32_t idx = dispatch_atomic_add2o(vab, vabh_next_tracepoint_idx, - slots, relaxed); - if (idx <= _voucher_activity_tracepoints_per_buffer) { - return (_voucher_activity_tracepoint_t)vab + (idx - slots); - } - return NULL; + if (!vab) return NULL; + _voucher_activity_buffer_position_u pos_orig, pos; + pos_orig.vabp_atomic_pos = vab->vabh_pos.vabp_atomic_pos; + do { + pos.vabp_atomic_pos = pos_orig.vabp_atomic_pos; + pos.vabp_pos.vabp_next_tracepoint_idx += slots; + pos.vabp_pos.vabp_string_offset += strsize; + size_t len = _voucher_activity_buffer_pos2length(pos); + if (len > _voucher_activity_buffer_size || pos.vabp_pos.vabp_flags) { + return NULL; + } + if (len == _voucher_activity_buffer_size) { + pos.vabp_pos.vabp_flags |= _voucher_activity_buffer_full; + } + pos.vabp_pos.vabp_refcnt++; + } while (!dispatch_atomic_cmpxchgvw2o(vab, vabh_pos.vabp_atomic_pos, + pos_orig.vabp_atomic_pos, pos.vabp_atomic_pos, + &pos_orig.vabp_atomic_pos, relaxed)); + if (stroffsetp) *stroffsetp = pos.vabp_pos.vabp_string_offset; + return (_voucher_activity_tracepoint_t)vab + + pos_orig.vabp_pos.vabp_next_tracepoint_idx; } DISPATCH_ALWAYS_INLINE -static inline _voucher_activity_tracepoint_t -_voucher_activity_tracepoint_get_from_activity(_voucher_activity_t va, - unsigned int slots) +static inline bool +_voucher_activity_buffer_tracepoint_release( + _voucher_activity_buffer_header_t vab) { - _voucher_activity_buffer_header_t vab = va ? va->va_current_buffer : NULL; - return vab ? _voucher_activity_buffer_tracepoint_get(vab, slots) : NULL; + _voucher_activity_buffer_position_u pos_orig, pos; + pos_orig.vabp_atomic_pos = vab->vabh_pos.vabp_atomic_pos; + do { + pos.vabp_atomic_pos = pos_orig.vabp_atomic_pos; + pos.vabp_pos.vabp_refcnt--; + if (!pos.vabp_pos.vabp_refcnt && + (pos.vabp_pos.vabp_flags & _voucher_activity_buffer_full)) { + pos.vabp_pos.vabp_flags |= _voucher_activity_buffer_pushing; + } + } while (!dispatch_atomic_cmpxchgvw2o(vab, vabh_pos.vabp_atomic_pos, + pos_orig.vabp_atomic_pos, pos.vabp_atomic_pos, + &pos_orig.vabp_atomic_pos, relaxed)); + return (pos.vabp_pos.vabp_flags & _voucher_activity_buffer_pushing); } DISPATCH_ALWAYS_INLINE -static inline _voucher_activity_tracepoint_t -_voucher_activity_tracepoint_get(unsigned int slots) +static inline bool +_voucher_activity_buffer_mark_full(_voucher_activity_buffer_header_t vab) +{ + _voucher_activity_buffer_position_u pos_orig, pos; + pos_orig.vabp_atomic_pos = vab->vabh_pos.vabp_atomic_pos; + do { + pos.vabp_atomic_pos = pos_orig.vabp_atomic_pos; + if (pos.vabp_pos.vabp_flags & _voucher_activity_buffer_full) { + return false; + } + pos.vabp_pos.vabp_flags |= _voucher_activity_buffer_full; + if (!pos.vabp_pos.vabp_refcnt) { + pos.vabp_pos.vabp_flags |= _voucher_activity_buffer_pushing; + } + } while (!dispatch_atomic_cmpxchgvw2o(vab, vabh_pos.vabp_atomic_pos, + pos_orig.vabp_atomic_pos, pos.vabp_atomic_pos, + &pos_orig.vabp_atomic_pos, relaxed)); + return (pos.vabp_pos.vabp_flags & _voucher_activity_buffer_pushing); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_voucher_activity_buffer_is_full(_voucher_activity_buffer_header_t vab) +{ + _voucher_activity_buffer_position_u pos; + pos.vabp_atomic_pos = vab->vabh_pos.vabp_atomic_pos; + return (pos.vabp_pos.vabp_flags); +} + +DISPATCH_ALWAYS_INLINE +static inline _voucher_activity_buffer_header_t +_voucher_activity_buffer_get_from_activity(_voucher_activity_t va) +{ + return va ? va->va_current_buffer : NULL; +} + +DISPATCH_ALWAYS_INLINE +static inline _voucher_activity_t +_voucher_activity_get(void) { _voucher_activity_t va; voucher_t v = _voucher_get(); va = v && v->v_activity ? v->v_activity : _voucher_activity_default; - return _voucher_activity_tracepoint_get_from_activity(va, slots); + return va; } DISPATCH_ALWAYS_INLINE static inline uint64_t _voucher_activity_tracepoint_init(_voucher_activity_tracepoint_t vat, - uint8_t type, uint8_t code_namespace, uint32_t code, uint64_t location) + uint8_t type, uint8_t code_namespace, uint32_t code, uint64_t location, + bool approx) { if (!location) location = (uint64_t)__builtin_return_address(0); - uint64_t timestamp = _voucher_activity_timestamp(); + uint64_t timestamp = _voucher_activity_timestamp(approx); vat->vat_flags = _voucher_activity_trace_flag_tracepoint, vat->vat_type = type, vat->vat_namespace = code_namespace, @@ -668,13 +754,14 @@ _voucher_activity_tracepoint_init(_voucher_activity_tracepoint_t vat, DISPATCH_ALWAYS_INLINE static inline uint64_t _voucher_activity_tracepoint_init_with_id(_voucher_activity_tracepoint_t vat, - voucher_activity_trace_id_t trace_id, uint64_t location) + voucher_activity_trace_id_t trace_id, uint64_t location, bool approx) { uint8_t type = (uint8_t)(trace_id >> _voucher_activity_trace_id_type_shift); uint8_t cns = (uint8_t)(trace_id >> _voucher_activity_trace_id_code_namespace_shift); uint32_t code = (uint32_t)trace_id; - return _voucher_activity_tracepoint_init(vat, type, cns, code, location); + return _voucher_activity_tracepoint_init(vat, type, cns, code, location, + approx); } DISPATCH_ALWAYS_INLINE @@ -720,69 +807,82 @@ _voucher_activity_disabled(void) } DISPATCH_ALWAYS_INLINE -static inline _voucher_activity_tracepoint_t +static inline void _voucher_activity_trace_args_inline(uint8_t type, uint8_t code_namespace, uint32_t code, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, uintptr_t arg4) { - if (!_voucher_activity_trace_type_enabled(type)) return NULL; + if (!_voucher_activity_trace_type_enabled(type)) return; + _voucher_activity_t act; + _voucher_activity_buffer_header_t vab; _voucher_activity_tracepoint_t vat; - vat = _voucher_activity_tracepoint_get(1); - if (!vat) return NULL; - _voucher_activity_tracepoint_init(vat, type, code_namespace, code, 0); + act = _voucher_activity_get(); + vab = _voucher_activity_buffer_get_from_activity(act); + vat = _voucher_activity_buffer_tracepoint_acquire(vab, 1, 0, NULL); + if (!vat) return; + _voucher_activity_tracepoint_init(vat, type, code_namespace, code, 0, true); vat->vat_flags |= _voucher_activity_trace_flag_tracepoint_args; vat->vat_data[0] = arg1; vat->vat_data[1] = arg2; vat->vat_data[2] = arg3; vat->vat_data[3] = arg4; - return vat; -} - -DISPATCH_ALWAYS_INLINE -static inline _voucher_activity_tracepoint_t -_voucher_activity_trace_with_id_inline(voucher_activity_trace_id_t trace_id) -{ - _voucher_activity_tracepoint_t vat = _voucher_activity_tracepoint_get(1); - if (!vat) return NULL; - _voucher_activity_tracepoint_init_with_id(vat, trace_id, 0); - return vat; + if (_voucher_activity_buffer_tracepoint_release(vab)) { + _voucher_activity_firehose_push(act, vab); + } } DISPATCH_ALWAYS_INLINE -static inline _voucher_activity_tracepoint_t -_voucher_activity_trace_with_id(voucher_activity_trace_id_t trace_id) +static inline void +_voucher_activity_trace_activity_event(voucher_activity_trace_id_t trace_id, + voucher_activity_id_t va_id, _voucher_activity_tracepoint_flag_t flags) { - _voucher_activity_tracepoint_t vat = _voucher_activity_tracepoint_get(1); - if (!vat) vat = _voucher_activity_tracepoint_get_slow(1); - if (!vat) return NULL; - _voucher_activity_tracepoint_init_with_id(vat, trace_id, 0); - return vat; + _voucher_activity_t act; + _voucher_activity_buffer_header_t vab; + _voucher_activity_tracepoint_t vat; + act = _voucher_activity_get(); + vab = _voucher_activity_buffer_get_from_activity(act); + vat = _voucher_activity_buffer_tracepoint_acquire(vab, 1, 0, NULL); + if (!vat) return; + _voucher_activity_tracepoint_init_with_id(vat, trace_id, 0, false); + vat->vat_flags |= _voucher_activity_trace_flag_activity | flags; + vat->vat_data[0] = va_id; + if (_voucher_activity_buffer_tracepoint_release(vab)) { + _voucher_activity_firehose_push(act, vab); + } } +#define _voucher_activity_trace_activity_event(trace_id, va_id, type) \ + _voucher_activity_trace_activity_event(trace_id, va_id, \ + _voucher_activity_trace_flag_ ## type) DISPATCH_ALWAYS_INLINE static inline void _voucher_activity_trace_msg(voucher_t v, mach_msg_header_t *msg, uint32_t code) { if (!v || !v->v_activity) return; // Don't use default activity for IPC - const uint8_t type = voucher_activity_tracepoint_type_release; + const uint8_t type = voucher_activity_tracepoint_type_debug; const uint8_t code_namespace = _voucher_activity_tracepoint_namespace_ipc; if (!_voucher_activity_trace_type_enabled(type)) return; + _voucher_activity_buffer_header_t vab; _voucher_activity_tracepoint_t vat; - vat = _voucher_activity_tracepoint_get_from_activity(v->v_activity, 1); + vab = _voucher_activity_buffer_get_from_activity(v->v_activity); + vat = _voucher_activity_buffer_tracepoint_acquire(vab, 1, 0, NULL); if (!vat) return; // TODO: slowpath ? - _voucher_activity_tracepoint_init(vat, type, code_namespace, code, 0); + _voucher_activity_tracepoint_init(vat, type, code_namespace, code, 0, true); vat->vat_flags |= _voucher_activity_trace_flag_libdispatch; #if __has_extension(c_static_assert) _Static_assert(sizeof(mach_msg_header_t) <= sizeof(vat->vat_data), "mach_msg_header_t too large"); #endif memcpy(vat->vat_data, msg, sizeof(mach_msg_header_t)); + if (_voucher_activity_buffer_tracepoint_release(vab)) { + _voucher_activity_firehose_push(v->v_activity, vab); + } } #define _voucher_activity_trace_msg(v, msg, type) \ _voucher_activity_trace_msg(v, msg, \ _voucher_activity_tracepoint_namespace_ipc_ ## type) -#endif // !(USE_OBJC && __OBJC2__) +#endif // !(USE_OBJC && __OBJC2__) && !defined(__cplusplus) #else // VOUCHER_USE_MACH_VOUCHER diff --git a/xcodeconfig/libdispatch-introspection.xcconfig b/xcodeconfig/libdispatch-introspection.xcconfig index 1644ea90f..a2f98f9ee 100644 --- a/xcodeconfig/libdispatch-introspection.xcconfig +++ b/xcodeconfig/libdispatch-introspection.xcconfig @@ -19,7 +19,7 @@ // BUILD_VARIANTS = normal -INSTALL_PATH_ACTUAL = /usr/lib/system/introspection +INSTALL_PATH = /usr/lib/system/introspection GCC_PREPROCESSOR_DEFINITIONS = $(GCC_PREPROCESSOR_DEFINITIONS) DISPATCH_INTROSPECTION=1 CONFIGURATION_BUILD_DIR = $(BUILD_DIR)/introspection diff --git a/xcodeconfig/libdispatch-resolved.xcconfig b/xcodeconfig/libdispatch-resolved.xcconfig index 70e405f84..a42add8ef 100644 --- a/xcodeconfig/libdispatch-resolved.xcconfig +++ b/xcodeconfig/libdispatch-resolved.xcconfig @@ -18,7 +18,7 @@ // @APPLE_APACHE_LICENSE_HEADER_END@ // -SUPPORTED_PLATFORMS = iphoneos +SUPPORTED_PLATFORMS = iphoneos appletvos watchos PRODUCT_NAME = libdispatch_$(DISPATCH_RESOLVED_VARIANT) OTHER_LDFLAGS = SKIP_INSTALL = YES diff --git a/xcodeconfig/libdispatch.xcconfig b/xcodeconfig/libdispatch.xcconfig index 1d2293318..7fc525dc7 100644 --- a/xcodeconfig/libdispatch.xcconfig +++ b/xcodeconfig/libdispatch.xcconfig @@ -19,20 +19,17 @@ // #include "/Makefiles/CoreOS/Xcode/BSD.xcconfig" -#include "/AppleInternal/XcodeConfig/SimulatorSupport.xcconfig" +#include "/AppleInternal/XcodeConfig/PlatformSupport.xcconfig" -// Set INSTALL_PATH[sdk=macosx*] when SimulatorSupport.xcconfig is unavailable -INSTALL_PATH[sdk=macosx*] = $(INSTALL_PATH_ACTUAL) - -SUPPORTED_PLATFORMS = macosx iphoneos iphonesimulator iphoneosnano iphonesimulatornano -ARCHS[sdk=iphonesimulator*] = $(NATIVE_ARCH_32_BIT) // Override BSD.xcconfig ARCHS +SDKROOT = macosx.internal +SUPPORTED_PLATFORMS = macosx iphoneos iphonesimulator appletvos appletvsimulator watchos watchsimulator PRODUCT_NAME = libdispatch EXECUTABLE_PREFIX = -INSTALL_PATH_ACTUAL = /usr/lib/system -PUBLIC_HEADERS_FOLDER_PATH = $(INSTALL_PATH_PREFIX)/usr/include/dispatch -PRIVATE_HEADERS_FOLDER_PATH = $(INSTALL_PATH_PREFIX)/usr/local/include/dispatch -OS_PUBLIC_HEADERS_FOLDER_PATH = $(INSTALL_PATH_PREFIX)/usr/include/os -OS_PRIVATE_HEADERS_FOLDER_PATH = $(INSTALL_PATH_PREFIX)/usr/local/include/os +INSTALL_PATH = /usr/lib/system +PUBLIC_HEADERS_FOLDER_PATH = /usr/include/dispatch +PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/dispatch +OS_PUBLIC_HEADERS_FOLDER_PATH = /usr/include/os +OS_PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/os HEADER_SEARCH_PATHS = $(PROJECT_DIR) $(PROJECT_DIR)/private $(PROJECT_DIR)/os LIBRARY_SEARCH_PATHS = $(SDKROOT)/usr/lib/system INSTALLHDRS_SCRIPT_PHASE = YES @@ -42,6 +39,8 @@ BUILD_VARIANTS = normal debug profile ONLY_ACTIVE_ARCH = NO CLANG_LINK_OBJC_RUNTIME = NO GCC_C_LANGUAGE_STANDARD = gnu11 +CLANG_CXX_LANGUAGE_STANDARD = gnu++11 +GCC_ENABLE_CPP_EXCEPTIONS = NO GCC_STRICT_ALIASING = YES GCC_SYMBOLS_PRIVATE_EXTERN = YES GCC_ENABLE_OBJC_GC[sdk=macosx*] = supported @@ -71,13 +70,13 @@ OTHER_CFLAGS_profile = $(OTHER_CFLAGS_normal) -DDISPATCH_PROFILE=1 OTHER_CFLAGS_debug = -fstack-protector -fno-inline -O0 -DDISPATCH_DEBUG=1 GENERATE_PROFILING_CODE = NO DYLIB_CURRENT_VERSION = $(CURRENT_PROJECT_VERSION) -UMBRELLA_LDFLAGS = -umbrella System -nodefaultlibs -ldyld -lcompiler_rt -lsystem_kernel -lsystem_platform -lsystem_pthread -lsystem_malloc -lsystem_c -lsystem_blocks -lunwind -UMBRELLA_LDFLAGS[sdk=iphonesimulator*] = -umbrella System -nodefaultlibs -ldyld_sim -lcompiler_rt_sim -lsystem_sim_c -lsystem_sim_blocks -lunwind_sim -Wl,-upward-lSystem +SIM_SUFFIX[sdk=*simulator*] = _sim +DYLIB_LDFLAGS = -umbrella System -nodefaultlibs -ldyld -lcompiler_rt -lsystem$(SIM_SUFFIX)_kernel -lsystem$(SIM_SUFFIX)_platform -lsystem$(SIM_SUFFIX)_pthread -lsystem_malloc -lsystem_c -lsystem_blocks -lunwind OBJC_LDFLAGS = -Wl,-upward-lobjc -Wl,-order_file,$(SRCROOT)/xcodeconfig/libdispatch.order -Wl,-alias_list,$(SRCROOT)/xcodeconfig/libdispatch_objc.aliases -Wl,-unexported_symbols_list,$(SRCROOT)/xcodeconfig/libdispatch.unexport OBJC_LDFLAGS[sdk=macosx*] = $(OBJC_LDFLAGS) -Wl,-upward-lauto OBJC_LDFLAGS[arch=i386][sdk=macosx*] = OBJC_EXCLUDED_SOURCE_FILE_NAMES_i386_macosx = object.m data.m ALIASES_LDFLAGS = -Wl,-alias_list,$(SRCROOT)/xcodeconfig/libdispatch.aliases PLATFORM_LDFLAGS[sdk=macosx*] = -Wl,-alias_list,$(SRCROOT)/xcodeconfig/libdispatch_macosx.aliases -OTHER_LDFLAGS = $(OTHER_LDFLAGS) $(UMBRELLA_LDFLAGS) $(CR_LDFLAGS) $(OBJC_LDFLAGS) $(ALIASES_LDFLAGS) $(PLATFORM_LDFLAGS) +OTHER_LDFLAGS = $(OTHER_LDFLAGS) $(DYLIB_LDFLAGS) $(CR_LDFLAGS) $(OBJC_LDFLAGS) $(ALIASES_LDFLAGS) $(PLATFORM_LDFLAGS) OTHER_MIGFLAGS = -novouchers diff --git a/xcodescripts/install-dtrace.sh b/xcodescripts/install-dtrace.sh index c0eb3647e..9397c0435 100644 --- a/xcodescripts/install-dtrace.sh +++ b/xcodescripts/install-dtrace.sh @@ -19,7 +19,8 @@ # @APPLE_APACHE_LICENSE_HEADER_END@ # -if [ "${PLATFORM_NAME}" = iphoneos ]; then exit 0; fi +# This check equates to "is macosx or a simulator platform" +if [ "${PLATFORM_NAME}" == "${DEVICE_PLATFORM_NAME}" ]; then exit 0; fi if [ "${DEPLOYMENT_LOCATION}" != YES ]; then DSTROOT="${CONFIGURATION_BUILD_DIR}" From fb10f25794403392ef1a9943519415a62860aaa4 Mon Sep 17 00:00:00 2001 From: Apple OSS Distributions <91980991+AppleOSSDistributions@users.noreply.github.com> Date: Wed, 9 Dec 2015 05:20:03 +0000 Subject: [PATCH 07/18] libdispatch-501.20.1 Imported from libdispatch-501.20.1.tar.gz --- src/semaphore.c | 26 +++++++++++++++++--------- src/voucher.c | 19 ++++++++++--------- 2 files changed, 27 insertions(+), 18 deletions(-) diff --git a/src/semaphore.c b/src/semaphore.c index f356fb876..b8c8971af 100644 --- a/src/semaphore.c +++ b/src/semaphore.c @@ -93,8 +93,6 @@ _pop_timer_resolution(DWORD ms) DISPATCH_WEAK // rdar://problem/8503746 long _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema); -static long _dispatch_group_wake(dispatch_semaphore_t dsema); - #pragma mark - #pragma mark dispatch_semaphore_t @@ -422,6 +420,10 @@ _dispatch_group_create_with_count(long count) dispatch_group_t dg = (dispatch_group_t)_dispatch_alloc( DISPATCH_VTABLE(group), sizeof(struct dispatch_semaphore_s)); _dispatch_semaphore_init(LONG_MAX - count, dg); + if (count) { + dispatch_atomic_store2o((dispatch_semaphore_t)dg, do_ref_cnt, 1, + relaxed); // + } return dg; } @@ -441,8 +443,11 @@ void dispatch_group_enter(dispatch_group_t dg) { dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg; - long value = dispatch_atomic_dec2o(dsema, dsema_value, acquire); - if (slowpath(value < 0)) { + long value = dispatch_atomic_dec_orig2o(dsema, dsema_value, acquire); + if (value == LONG_MAX) { + return _dispatch_retain(dg); // + } + if (slowpath(value <= 0)) { DISPATCH_CLIENT_CRASH( "Too many nested calls to dispatch_group_enter()"); } @@ -450,7 +455,7 @@ dispatch_group_enter(dispatch_group_t dg) DISPATCH_NOINLINE static long -_dispatch_group_wake(dispatch_semaphore_t dsema) +_dispatch_group_wake(dispatch_semaphore_t dsema, bool needs_release) { dispatch_continuation_t next, head, tail = NULL, dc; long rval; @@ -500,6 +505,9 @@ _dispatch_group_wake(dispatch_semaphore_t dsema) } while ((head = next)); _dispatch_release(dsema); } + if (needs_release) { + _dispatch_release(dsema); // + } return 0; } @@ -512,7 +520,7 @@ dispatch_group_leave(dispatch_group_t dg) DISPATCH_CLIENT_CRASH("Unbalanced call to dispatch_group_leave()"); } if (slowpath(value == LONG_MAX)) { - (void)_dispatch_group_wake(dsema); + return (void)_dispatch_group_wake(dsema, true); } } @@ -540,7 +548,7 @@ _dispatch_group_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout) // dsema->dsema_group_waiters value = dispatch_atomic_load2o(dsema, dsema_value, seq_cst); // 19296565 if (value == LONG_MAX) { - return _dispatch_group_wake(dsema); + return _dispatch_group_wake(dsema, false); } // Mach semaphores appear to sometimes spuriously wake up. Therefore, // we keep a parallel count of the number of times a Mach semaphore is @@ -549,7 +557,7 @@ _dispatch_group_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout) // check the values again in case we need to wake any threads value = dispatch_atomic_load2o(dsema, dsema_value, seq_cst); // 19296565 if (value == LONG_MAX) { - return _dispatch_group_wake(dsema); + return _dispatch_group_wake(dsema, false); } #if USE_MACH_SEM @@ -678,7 +686,7 @@ dispatch_group_notify_f(dispatch_group_t dg, dispatch_queue_t dq, void *ctxt, dispatch_atomic_store2o(dsema, dsema_notify_head, dsn, seq_cst); // seq_cst with atomic store to notify_head if (dispatch_atomic_load2o(dsema, dsema_value, seq_cst) == LONG_MAX) { - _dispatch_group_wake(dsema); + _dispatch_group_wake(dsema, false); } } } diff --git a/src/voucher.c b/src/voucher.c index 6f28c24e3..6967f19c1 100644 --- a/src/voucher.c +++ b/src/voucher.c @@ -1990,14 +1990,15 @@ voucher_activity_start_with_location(voucher_activity_trace_id_t trace_id, goto out; } } - _voucher_atm_retain(vatm); - // required for v->v_atm = vatm below - _voucher_atm_retain(vatm); va_id = _voucher_atm_subid_make(vatm, flags); - // consumes vatm reference: - act = _voucher_activity_create_with_atm(vatm, va_id, trace_id, location, - NULL); - vat = (_voucher_activity_tracepoint_t)act; + if (activities == 1) { + // consumes vatm reference: + act = _voucher_activity_create_with_atm(_voucher_atm_retain(vatm), + va_id, trace_id, location, NULL); + vat = (_voucher_activity_tracepoint_t)act; + } else if (ov && ov->v_activity) { + act = _voucher_activity_retain(ov->v_activity); + } pthread_priority_t priority = _voucher_get_priority(ov); mach_voucher_attr_recipe_size_t extra = ov ? _voucher_extra_size(ov) : 0; voucher_t v = _voucher_alloc(activities, priority, extra); @@ -2015,10 +2016,10 @@ voucher_activity_start_with_location(voucher_activity_trace_id_t trace_id, oactivities * sizeof(voucher_activity_id_t)); } activity_ids[activities-1] = va_id; - v->v_atm = vatm; + v->v_atm = _voucher_atm_retain(vatm); v->v_activity = act; _voucher_swap(ov, v); - return va_id; // new activity buffer contains trace info + if (vat) return va_id; // new activity buffer contains trace info out: _voucher_activity_trace_activity_event(trace_id, va_id, start); return va_id; From 9b07a7e742c04ed4b7c25bfff1860feade04e041 Mon Sep 17 00:00:00 2001 From: Apple OSS Distributions <91980991+AppleOSSDistributions@users.noreply.github.com> Date: Thu, 22 Sep 2016 17:57:50 +0000 Subject: [PATCH 08/18] libdispatch-703.1.4 Imported from libdispatch-703.1.4.tar.gz --- .gitmodules | 3 + INSTALL | 74 +- Makefile.am | 15 +- PATCHES | 67 +- autogen.sh | 0 config/config.h | 80 +- configure.ac | 201 +- dispatch/Makefile.am | 8 + dispatch/base.h | 118 +- dispatch/block.h | 9 +- dispatch/data.h | 14 +- dispatch/dispatch.h | 24 +- dispatch/group.h | 13 +- dispatch/introspection.h | 8 +- dispatch/io.h | 12 +- dispatch/module.map | 20 + dispatch/module.modulemap | 10 + dispatch/object.h | 109 +- dispatch/once.h | 25 +- dispatch/queue.h | 446 +- dispatch/semaphore.h | 6 +- dispatch/source.h | 69 +- dispatch/time.h | 8 +- libdispatch.xcodeproj/project.pbxproj | 1696 +++--- man/Makefile.am | 2 + man/dispatch_object.3 | 5 +- man/dispatch_queue_create.3 | 4 - man/dispatch_source_create.3 | 26 +- man/dispatch_time.3 | 2 +- os/Makefile.am | 11 +- os/firehose_buffer_private.h | 184 + os/firehose_server_private.h | 332 ++ os/linux_base.h | 94 + os/object.h | 128 +- os/object_private.h | 56 +- os/voucher_activity_private.h | 327 ++ {private => os}/voucher_private.h | 160 +- private/Makefile.am | 1 + private/benchmark.h | 9 +- private/data_private.h | 20 +- private/introspection_private.h | 2 +- private/io_private.h | 35 +- private/layout_private.h | 10 - private/mach_private.h | 277 +- private/module.modulemap | 11 + private/private.h | 93 +- private/queue_private.h | 155 +- private/source_private.h | 154 +- private/voucher_activity_private.h | 619 --- src/Makefile.am | 138 +- src/allocator.c | 27 +- src/allocator_internal.h | 10 +- src/apply.c | 194 +- src/block.cpp | 9 +- src/data.c | 68 +- src/data.m | 31 +- src/data_internal.h | 36 +- src/firehose/firehose.defs | 56 + src/firehose/firehose_buffer.c | 1147 ++++ src/firehose/firehose_buffer_internal.h | 211 + src/firehose/firehose_inline_internal.h | 502 ++ src/firehose/firehose_internal.h | 51 + src/firehose/firehose_reply.defs | 43 + src/firehose/firehose_server.c | 1137 ++++ src/firehose/firehose_server_internal.h | 71 + src/firehose/firehose_server_object.m | 47 + src/firehose/firehose_types.defs | 28 + src/init.c | 535 +- src/inline_internal.h | 2414 +++++++-- src/internal.h | 585 +- src/introspection.c | 530 +- src/introspection_internal.h | 81 +- src/io.c | 256 +- src/io_internal.h | 12 +- src/libdispatch.codes | 13 + src/object.c | 111 +- src/object.m | 195 +- src/object_internal.h | 572 +- src/once.c | 41 +- src/queue.c | 4824 +++++++++++------ src/queue_internal.h | 798 ++- src/semaphore.c | 470 +- src/semaphore_internal.h | 81 +- src/shims.h | 121 +- src/shims/atomic.h | 463 +- src/shims/atomic_sfb.h | 8 +- src/shims/hw_config.h | 10 + src/shims/linux_stubs.c | 53 + src/shims/linux_stubs.h | 101 + src/shims/lock.c | 421 ++ src/shims/lock.h | 539 ++ src/shims/time.h | 5 + src/shims/tsd.h | 216 +- src/shims/yield.h | 30 +- src/source.c | 4795 +++++++++++----- src/source_internal.h | 144 +- src/swift/Block.swift | 114 + src/swift/Data.swift | 277 + src/swift/Dispatch.apinotes | 328 ++ src/swift/Dispatch.swift | 211 + src/swift/DispatchStubs.cc | 207 + src/swift/IO.swift | 129 + src/swift/Private.swift | 474 ++ src/swift/Queue.swift | 421 ++ src/swift/Source.swift | 425 ++ src/swift/Time.swift | 110 + src/swift/Wrapper.swift | 319 ++ src/time.c | 13 + src/trace.h | 66 +- src/transform.c | 81 +- src/voucher.c | 2482 +++------ src/voucher_internal.h | 712 +-- xcodeconfig/libdispatch-dyld-stub.xcconfig | 28 + .../libdispatch-introspection.xcconfig | 2 +- xcodeconfig/libdispatch-mp-static.xcconfig | 30 + ...cconfig => libdispatch-up-static.xcconfig} | 2 +- xcodeconfig/libdispatch.aliases | 5 +- xcodeconfig/libdispatch.order | 11 +- xcodeconfig/libdispatch.unexport | 34 - xcodeconfig/libdispatch.xcconfig | 18 +- xcodeconfig/libdispatch_objc.aliases | 34 - xcodeconfig/libfirehose.xcconfig | 36 + xcodeconfig/libfirehose_kernel.xcconfig | 35 + xcodescripts/install-headers.sh | 1 + xcodescripts/mig-headers.sh | 6 + .../run-on-install.sh | 9 +- 126 files changed, 24799 insertions(+), 9803 deletions(-) create mode 100644 .gitmodules mode change 100644 => 100755 autogen.sh create mode 100644 dispatch/module.map create mode 100644 dispatch/module.modulemap create mode 100644 os/firehose_buffer_private.h create mode 100644 os/firehose_server_private.h create mode 100644 os/linux_base.h create mode 100644 os/voucher_activity_private.h rename {private => os}/voucher_private.h (80%) create mode 100644 private/module.modulemap delete mode 100644 private/voucher_activity_private.h create mode 100644 src/firehose/firehose.defs create mode 100644 src/firehose/firehose_buffer.c create mode 100644 src/firehose/firehose_buffer_internal.h create mode 100644 src/firehose/firehose_inline_internal.h create mode 100644 src/firehose/firehose_internal.h create mode 100644 src/firehose/firehose_reply.defs create mode 100644 src/firehose/firehose_server.c create mode 100644 src/firehose/firehose_server_internal.h create mode 100644 src/firehose/firehose_server_object.m create mode 100644 src/firehose/firehose_types.defs create mode 100644 src/libdispatch.codes create mode 100644 src/shims/linux_stubs.c create mode 100644 src/shims/linux_stubs.h create mode 100644 src/shims/lock.c create mode 100644 src/shims/lock.h create mode 100644 src/swift/Block.swift create mode 100644 src/swift/Data.swift create mode 100644 src/swift/Dispatch.apinotes create mode 100644 src/swift/Dispatch.swift create mode 100644 src/swift/DispatchStubs.cc create mode 100644 src/swift/IO.swift create mode 100644 src/swift/Private.swift create mode 100644 src/swift/Queue.swift create mode 100644 src/swift/Source.swift create mode 100644 src/swift/Time.swift create mode 100644 src/swift/Wrapper.swift create mode 100644 xcodeconfig/libdispatch-dyld-stub.xcconfig create mode 100644 xcodeconfig/libdispatch-mp-static.xcconfig rename xcodeconfig/{libdispatch-static.xcconfig => libdispatch-up-static.xcconfig} (89%) delete mode 100644 xcodeconfig/libdispatch.unexport delete mode 100644 xcodeconfig/libdispatch_objc.aliases create mode 100644 xcodeconfig/libfirehose.xcconfig create mode 100644 xcodeconfig/libfirehose_kernel.xcconfig rename xcodeconfig/libdispatch_macosx.aliases => xcodescripts/run-on-install.sh (79%) diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 000000000..e6068b432 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "libpwq"] + path = libpwq + url = https://github.com/mheily/libpwq.git diff --git a/INSTALL b/INSTALL index faf66d231..9113e4a8f 100644 --- a/INSTALL +++ b/INSTALL @@ -4,8 +4,9 @@ GCD is a concurrent programming framework first shipped with Mac OS X Snow Leopard. This package is an open source bundling of libdispatch, the core user space library implementing GCD. At the time of writing, support for the BSD kqueue API, and specifically extensions introduced in Mac OS X Snow -Leopard and FreeBSD 9-CURRENT, are required to use libdispatch. Other -systems are currently unsupported. +Leopard and FreeBSD 9-CURRENT, are required to use libdispatch. Support +for Linux is a work in progress (see Linux notes below). Other systems are +currently unsupported. Configuring and installing libdispatch @@ -20,10 +21,15 @@ An uncustomized install requires: The following configure options may be of general interest: ---with-apple-libc-source +--with-apple-libpthread-source - Specify the path to Apple's Libc package, so that appropriate headers can - be found and used. + Specify the path to Apple's libpthread package, so that appropriate headers + can be found and used. + +--with-apple-libplatform-source + + Specify the path to Apple's libplatform package, so that appropriate headers + can be found and used. --with-apple-libclosure-source @@ -38,52 +44,48 @@ The following configure options may be of general interest: --with-blocks-runtime On systems where -fblocks is supported, specify an additional library path - in which libBlocksRuntime can be found. This is not required on Mac OS X, + in which libBlocksRuntime can be found. This is not required on OS X, where the Blocks runtime is included in libSystem, but is required on FreeBSD. The following options are likely to only be useful when building libdispatch on -Mac OS X as a replacement for /usr/lib/system/libdispatch.dylib: +OS X as a replacement for /usr/lib/system/libdispatch.dylib: --with-apple-objc4-source Specify the path to Apple's objc4 package, so that appropriate headers can be found and used. ---with-apple-libauto-source - - Specify the path to Apple's libauto package, so that appropriate headers - can be found and used. - --disable-libdispatch-init-constructor Do not tag libdispatch's init routine as __constructor, in which case it must be run manually before libdispatch routines can be called. This is the - default when building on Mac OS X. For /usr/lib/system/libdispatch.dylib + default when building on OS X. For /usr/lib/system/libdispatch.dylib the init routine is called automatically during process start. --enable-apple-tsd-optimizations Use a non-portable allocation scheme for pthread per-thread data (TSD) keys - when building libdispatch for /usr/lib/system on Mac OS X. This should not - be used on other OS's, or on Mac OS X when building a stand-alone library. + when building libdispatch for /usr/lib/system on OS X. This should not + be used on other OS's, or on OS X when building a stand-alone library. Typical configuration commands The following command lines create the configuration required to build -libdispatch for /usr/lib/system on OS X MountainLion: +libdispatch for /usr/lib/system on OS X El Capitan: - sh autogen.sh + clangpath=$(dirname `xcrun --find clang`) + sudo mkdir -p "$clangpath/../local/lib/clang/enable_objc_gc" + LIBTOOLIZE=glibtoolize sh autogen.sh cflags='-arch x86_64 -arch i386 -g -Os' ./configure CFLAGS="$cflags" OBJCFLAGS="$cflags" CXXFLAGS="$cflags" \ - --prefix=/usr --libdir=/usr/lib/system \ - --disable-dependency-tracking --disable-static \ + --prefix=/usr --libdir=/usr/lib/system --disable-static \ --enable-apple-tsd-optimizations \ - --with-apple-libc-source=/path/to/10.8.0/Libc-825.24 \ - --with-apple-libclosure-source=/path/to/10.8.0/libclosure-59 \ - --with-apple-xnu-source=/path/to/10.8.0/xnu-2050.7.9 \ - --with-apple-objc4-source=/path/to/10.8.0/objc4-532 \ - --with-apple-libauto-source=/path/to/10.8.0/libauto-185.1 + --with-apple-libpthread-source=/path/to/10.11.0/libpthread-137.1.1 \ + --with-apple-libplatform-source=/path/to/10.11.0/libplatform-73.1.1 \ + --with-apple-libclosure-source=/path/to/10.11.0/libclosure-65 \ + --with-apple-xnu-source=/path/to/10.11.0/xnu-3247.1.106 \ + --with-apple-objc4-source=/path/to/10.11.0/objc4-680 make check Typical configuration line for FreeBSD 8.x and 9.x to build libdispatch with @@ -92,3 +94,27 @@ clang and blocks support: sh autogen.sh ./configure CC=clang --with-blocks-runtime=/usr/local/lib make check + +Instructions for building on Linux. Initial focus is on ubuntu 15.04. +Prepare your system + 1. Install compiler, autotools + sudo apt-get install clang + sudo apt-get install autoconf libtool pkg-config + 2. Install dtrace (to generate provider.h) + sudo apt-get install systemtap-sdt-dev + 3. Install libdispatch pre-reqs + sudo apt-get install libblocksruntime-dev libkqueue-dev libbsd-dev + +Initialize git submodules: + We are using git submodules to incorporate a specific revision of the + upstream pthread_workqueue library into the build. + git submodule init + git submodule update + +Build: + sh autogen.sh + ./configure + make + +Note: the build currently fails building tests, but libdispatch.so should + build successfully. diff --git a/Makefile.am b/Makefile.am index 72f432242..cc01c7c27 100644 --- a/Makefile.am +++ b/Makefile.am @@ -4,14 +4,27 @@ ACLOCAL_AMFLAGS = -I m4 +if BUILD_OWN_PTHREAD_WORKQUEUES SUBDIRS= \ dispatch \ + libpwq \ man \ os \ private \ - src + src \ + tests +else +SUBDIRS= \ + dispatch \ + man \ + os \ + private \ + src \ + tests +endif EXTRA_DIST= \ + README.md \ LICENSE \ PATCHES \ autogen.sh \ diff --git a/PATCHES b/PATCHES index 4f88387f1..28f7c5248 100644 --- a/PATCHES +++ b/PATCHES @@ -1,14 +1,21 @@ The libdispatch project exists in a parallel open source repository at: - http://svn.macosforge.org/repository/libdispatch/trunk + http://github.com/apple/swift-corelibs-libdispatch + +Externally contributed changes are synchronized back to the internal repository +via pull request of the result of `git am` of the contributed patch series. -Externally committed revisions are periodically synchronized back to the -internal repository (this repository). +Internal changes are synchronized from the internal darwin/trunk branch to the +external repository via `gi am` on the github darwin/trunk branch and merge to +github master. Key: APPLIED: change set was applied to internal repository. INTERNAL: change set originated internally (i.e. already applied). SKIPPED: change set was skipped. +svn revisions until r218 from legacy open source repository at + http://svn.macosforge.org/repository/libdispatch/trunk + [ 1] SKIPPED [ 2] SKIPPED [ 3] INTERNAL rdar://problem/7148356 @@ -192,3 +199,57 @@ Key: [ 181] [ 182] [ 183] INTERNAL rdar://problem/7581831 +[ 202] INTERNAL libdispatch-187.5 +[ 212] INTERNAL libdispatch-228.18 +[ 213] INTERNAL rdar://problem/11754320 +[ 216] INTERNAL libdispatch-339.1.9 +[ 217] INTERNAL libdispatch-442.1.4 +[ 218] INTERNAL libdispatch-500.1.5 + +github commits starting with 29bdc2f from + + http://github.com/apple/swift-corelibs-libdispatch + +[29bdc2f] INTERNAL libdispatch-500.1.5 +[a60acd6] APPLIED rdar://23661056 +[39ac720] APPLIED rdar://23705483 +[acd56f6] APPLIED rdar://23754944 +[394d9a1] APPLIED rdar://23772602 +[3691f26] APPLIED rdar://23868354 +[8904f45] APPLIED rdar://23868354 +[6dbebd6] APPLIED rdar://23868354 +[b2ccfeb] APPLIED rdar://23868354 +[e7ca00f] APPLIED rdar://23868354 +[35eb408] APPLIED rdar://25159995 +[32411c2] APPLIED rdar://25159995 +[31586d5] APPLIED rdar://25159995 +[50faff5] APPLIED rdar://25159995 +[3ce4e3d] APPLIED rdar://25159995 +[b647aee] APPLIED rdar://25159995 +[ab7e16c] APPLIED rdar://25159995 +[cef2960] APPLIED rdar://25159995 +[dfa43cd] APPLIED rdar://25159995 +[8b9c3a9] APPLIED rdar://25159995 +[fefb6cf] APPLIED rdar://25159995 +[1a9c57f] APPLIED rdar://25159995 +[c04488a] APPLIED rdar://25159995 +[f1d58d1] APPLIED rdar://25159995 +[be83e85] APPLIED rdar://25159995 +[79fbb13] APPLIED rdar://25159995 +[6ead519] APPLIED rdar://25159995 +[1fa1513] APPLIED rdar://25159995 +[4a6ec51] APPLIED rdar://25159995 +[bc16cc9] APPLIED rdar://25159995 +[954ace4] APPLIED rdar://25159995 +[5ea30b5] APPLIED rdar://26822213 +[9f1e778] APPLIED rdar://26822213 +[3339b81] APPLIED rdar://26822213 +[4fa8d8d] APPLIED rdar://26822213 +[e922531] APPLIED rdar://26822213 +[195cbcf] APPLIED rdar://27303844 +[5b893c8] APPLIED rdar://27303844 +[92689ed] APPLIED rdar://27303844 +[ecc14fa] APPLIED rdar://27303844 +[2dbf83c] APPLIED rdar://27303844 +[78b9e82] APPLIED rdar://27303844 +[2c0e5ee] APPLIED rdar://27303844 diff --git a/autogen.sh b/autogen.sh old mode 100644 new mode 100755 diff --git a/config/config.h b/config/config.h index 894428199..ca3a1dbb8 100644 --- a/config/config.h +++ b/config/config.h @@ -13,6 +13,10 @@ don't. */ #define HAVE_DECL_FD_COPY 1 +/* Define to 1 if you have the declaration of `NOTE_LOWAT', and to 0 if you + don't. */ +#define HAVE_DECL_NOTE_LOWAT 1 + /* Define to 1 if you have the declaration of `NOTE_NONE', and to 0 if you don't. */ #define HAVE_DECL_NOTE_NONE 1 @@ -21,6 +25,10 @@ don't. */ #define HAVE_DECL_NOTE_REAP 1 +/* Define to 1 if you have the declaration of `NOTE_REVOKE', and to 0 if you + don't. */ +#define HAVE_DECL_NOTE_REVOKE 1 + /* Define to 1 if you have the declaration of `NOTE_SIGNAL', and to 0 if you don't. */ #define HAVE_DECL_NOTE_SIGNAL 1 @@ -45,6 +53,10 @@ you don't. */ #define HAVE_DECL_VQ_VERYLOWDISK 1 +/* Define to 1 if you have the declaration of `VQ_QUOTA', and to 0 if + you don't. */ +#define HAVE_DECL_VQ_QUOTA 1 + /* Define to 1 if you have the header file. */ #define HAVE_DLFCN_H 1 @@ -75,6 +87,9 @@ /* Define to 1 if you have the `mach_absolute_time' function. */ #define HAVE_MACH_ABSOLUTE_TIME 1 +/* Define to 1 if you have the `mach_port_construct' function. */ +#define HAVE_MACH_PORT_CONSTRUCT 1 + /* Define to 1 if you have the `malloc_create_zone' function. */ #define HAVE_MALLOC_CREATE_ZONE 1 @@ -102,17 +117,20 @@ /* Define to 1 if you have the header file. */ /* #undef HAVE_PTHREAD_NP_H */ +/* Define to 1 if you have the header file. */ +#define HAVE_PTHREAD_QOS_H 1 + /* Define if pthread work queues are present */ #define HAVE_PTHREAD_WORKQUEUES 1 -/* Define to 1 if you have the `pthread_workqueue_setdispatch_np' function. */ -#define HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP 1 +/* Define to 1 if you have the header file. */ +#define HAVE_PTHREAD_WORKQUEUE_H 1 -/* Define to 1 if you have the `_pthread_workqueue_init' function. */ -#define HAVE__PTHREAD_WORKQUEUE_INIT 1 +/* Define to 1 if you have the header file. */ +#define HAVE_PTHREAD_WORKQUEUE_PRIVATE_H 1 -/* Define to 1 if you have the header file. */ -#define HAVE_PTHREAD_QOS_H 1 +/* Define to 1 if you have the `pthread_workqueue_setdispatch_np' function. */ +#define HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP 1 /* Define to 1 if you have the header file. */ #define HAVE_STDINT_H 1 @@ -147,8 +165,10 @@ /* Define to 1 if you have the header file. */ #define HAVE_UNISTD_H 1 -/* Define to the sub-directory in which libtool stores uninstalled libraries. - */ +/* Define to 1 if you have the `_pthread_workqueue_init' function. */ +#define HAVE__PTHREAD_WORKQUEUE_INIT 1 + +/* Define to the sub-directory where libtool stores uninstalled libraries. */ #define LT_OBJDIR ".libs/" /* Name of package */ @@ -161,13 +181,16 @@ #define PACKAGE_NAME "libdispatch" /* Define to the full name and version of this package. */ -#define PACKAGE_STRING "libdispatch 1.2" +#define PACKAGE_STRING "libdispatch 1.3" /* Define to the one symbol short name of this package. */ #define PACKAGE_TARNAME "libdispatch" +/* Define to the home page for this package. */ +#define PACKAGE_URL "http://libdispatch.macosforge.org" + /* Define to the version of this package. */ -#define PACKAGE_VERSION "1.2" +#define PACKAGE_VERSION "1.3" /* Define to 1 if you have the ANSI C header files. */ #define STDC_HEADERS 1 @@ -184,20 +207,30 @@ /* Define to use POSIX semaphores */ /* #undef USE_POSIX_SEM */ -/* Version number of package */ -#define VERSION "1.2" - -/* Define to 1 if on AIX 3. - System headers sometimes define this. - We just want to avoid a redefinition error message. */ +/* Enable extensions on AIX 3, Interix. */ #ifndef _ALL_SOURCE -/* # undef _ALL_SOURCE */ +# define _ALL_SOURCE 1 #endif - /* Enable GNU extensions on systems that have them. */ #ifndef _GNU_SOURCE # define _GNU_SOURCE 1 #endif +/* Enable threading extensions on Solaris. */ +#ifndef _POSIX_PTHREAD_SEMANTICS +# define _POSIX_PTHREAD_SEMANTICS 1 +#endif +/* Enable extensions on HP NonStop. */ +#ifndef _TANDEM_SOURCE +# define _TANDEM_SOURCE 1 +#endif +/* Enable general extensions on Solaris. */ +#ifndef __EXTENSIONS__ +# define __EXTENSIONS__ 1 +#endif + + +/* Version number of package */ +#define VERSION "1.3" /* Define to 1 if on MINIX. */ /* #undef _MINIX */ @@ -211,14 +244,3 @@ /* Define if using Darwin $NOCANCEL */ #define __DARWIN_NON_CANCELABLE 1 - -/* Enable extensions on Solaris. */ -#ifndef __EXTENSIONS__ -# define __EXTENSIONS__ 1 -#endif -#ifndef _POSIX_PTHREAD_SEMANTICS -# define _POSIX_PTHREAD_SEMANTICS 1 -#endif -#ifndef _TANDEM_SOURCE -# define _TANDEM_SOURCE 1 -#endif diff --git a/configure.ac b/configure.ac index 223084c61..e5c7c5ed7 100644 --- a/configure.ac +++ b/configure.ac @@ -2,11 +2,11 @@ # When this file changes, rerun autogen.sh. # -AC_PREREQ(2.59) -AC_INIT([libdispatch], [1.2], [libdispatch@macosforge.org], [libdispatch]) +AC_PREREQ(2.69) +AC_INIT([libdispatch], [1.3], [libdispatch@macosforge.org], [libdispatch], [http://libdispatch.macosforge.org]) AC_REVISION([$$]) AC_CONFIG_AUX_DIR(config) -AC_CONFIG_HEADER([config/config.h]) +AC_CONFIG_HEADER([config/config_ac.h]) AC_CONFIG_MACRO_DIR([m4]) ac_clean_files=a.out.dSYM AM_MAINTAINER_MODE @@ -14,32 +14,42 @@ AM_MAINTAINER_MODE AC_PROG_CC([clang gcc cc]) AC_PROG_CXX([clang++ g++ c++]) AC_PROG_OBJC([clang gcc cc]) +AC_PROG_OBJCXX([clang++ g++ c++]) # # On Mac OS X, some required header files come from other source packages; # allow specifying where those are. # -AC_ARG_WITH([apple-libc-source], - [AS_HELP_STRING([--with-apple-libc-source], - [Specify path to Apple Libc source])], [ - apple_libc_source_pthreads_path=${withval}/pthreads - CPPFLAGS="$CPPFLAGS -I$apple_libc_source_pthreads_path" +AC_ARG_WITH([apple-libpthread-source], + [AS_HELP_STRING([--with-apple-libpthread-source], + [Specify path to Apple libpthread source])], [ + apple_libpthread_source_path=${withval} + CPPFLAGS="$CPPFLAGS -isystem $apple_libpthread_source_path" +]) + +AC_ARG_WITH([apple-libplatform-source], + [AS_HELP_STRING([--with-apple-libplatform-source], + [Specify path to Apple libplatform source])], [ + apple_libplatform_source_include_path=${withval}/include + CPPFLAGS="$CPPFLAGS -isystem $apple_libplatform_source_include_path" ]) AC_ARG_WITH([apple-libclosure-source], [AS_HELP_STRING([--with-apple-libclosure-source], [Specify path to Apple libclosure source])], [ apple_libclosure_source_path=${withval} - CPPFLAGS="$CPPFLAGS -I$apple_libclosure_source_path" + CPPFLAGS="$CPPFLAGS -isystem $apple_libclosure_source_path" ]) AC_ARG_WITH([apple-xnu-source], [AS_HELP_STRING([--with-apple-xnu-source], [Specify path to Apple XNU source])], [ + apple_xnu_source_libsyscall_path=${withval}/libsyscall + apple_xnu_source_libproc_path=${withval}/libsyscall/wrappers/libproc apple_xnu_source_libkern_path=${withval}/libkern apple_xnu_source_bsd_path=${withval}/bsd apple_xnu_source_osfmk_path=${withval}/osfmk - CPPFLAGS="$CPPFLAGS -idirafter $apple_xnu_source_libkern_path -isystem $apple_xnu_source_bsd_path" + CPPFLAGS="$CPPFLAGS -idirafter $apple_xnu_source_libkern_path -isystem $apple_xnu_source_bsd_path -isystem $apple_xnu_source_libsyscall_path -isystem $apple_xnu_source_libproc_path " ]) AC_ARG_WITH([apple-objc4-source], @@ -48,19 +58,12 @@ AC_ARG_WITH([apple-objc4-source], apple_objc4_source_runtime_path=${withval}/runtime ]) -AC_ARG_WITH([apple-libauto-source], - [AS_HELP_STRING([--with-apple-libauto-source], - [Specify path to Apple libauto source])], [ - apple_libauto_source_path=${withval} - CPPFLAGS="$CPPFLAGS -I$apple_libauto_source_path" -]) - AC_CACHE_CHECK([for System.framework/PrivateHeaders], dispatch_cv_system_privateheaders, [AS_IF([test -d /System/Library/Frameworks/System.framework/PrivateHeaders], [dispatch_cv_system_privateheaders=yes], [dispatch_cv_system_privateheaders=no])] ) AS_IF([test "x$dispatch_cv_system_privateheaders" != "xno"], - [CPPFLAGS="$CPPFLAGS -I/System/Library/Frameworks/System.framework/PrivateHeaders"] + [CPPFLAGS="$CPPFLAGS -isystem /System/Library/Frameworks/System.framework/PrivateHeaders"] ) # @@ -91,16 +94,79 @@ AS_IF([test "x$enable_apple_tsd_optimizations" = "xyes"], [Define to use non-portable pthread TSD optimizations for Mac OS X)])] ) +AC_CANONICAL_TARGET + +# +# Enable building Swift overlay support into libdispatch +# +AC_ARG_WITH([swift-toolchain], + [AS_HELP_STRING([--with-swift-toolchain], [Specify path to Swift toolchain])], + [swift_toolchain_path=${withval} + AC_DEFINE(HAVE_SWIFT, 1, [Define if building for Swift]) + SWIFTC="$swift_toolchain_path/bin/swiftc" + case $target_os in + linux*) + os_string="linux" + ;; + *) + os_string=$target_os + ;; + esac + SWIFT_LIBDIR="$swift_toolchain_path/lib/swift/$os_string/$target_cpu" + have_swift=true], + [have_swift=false] +) +AM_CONDITIONAL(HAVE_SWIFT, $have_swift) +AC_SUBST([SWIFTC]) +AC_SUBST([SWIFT_LIBDIR]) + +# +# Enable use of gold linker when building the Swift overlay +# to avoid a symbol relocation issue. +# Ultimately the request to use gold should be passed in as an arg +# +AC_CHECK_PROG(use_gold_linker, ld.gold, true, false) +AM_CONDITIONAL(USE_GOLD_LINKER, $use_gold_linker) + +# +# Enable __thread based TSD on platforms where it is efficient +# Allow override based on command line argument to configure +# +AC_ARG_ENABLE([thread-local-storage], + [AS_HELP_STRING([--enable-thread-local-storage], + [Enable usage of thread local storage via __thread])],, + [case $target_os in + linux*) + enable_thread_local_storage=yes + ;; + *) + enable_thread_local_storage=no + esac] +) +AS_IF([test "x$enable_thread_local_storage" = "xyes"], + [AC_DEFINE(DISPATCH_USE_THREAD_LOCAL_STORAGE, 1, + [Enable usage of thread local storage via __thread])] +) + AC_USE_SYSTEM_EXTENSIONS -AM_INIT_AUTOMAKE([foreign no-dependencies]) +AM_INIT_AUTOMAKE([foreign no-dependencies subdir-objects]) LT_INIT([disable-static]) AC_PROG_INSTALL AC_PATH_PROGS(MIG, mig) +AC_PATH_PROG(DTRACE, dtrace) +AS_IF([test "x$DTRACE" != "x"], [use_dtrace=true],[ + use_dtrace=false + CPPFLAGS="$CPPFLAGS -DDISPATCH_USE_DTRACE=0" +]) +AM_CONDITIONAL(USE_DTRACE, $use_dtrace) AC_PATH_PROG(LEAKS, leaks) AS_IF([test "x$LEAKS" != "x"], - [AC_DEFINE(HAVE_LEAKS, 1, [Define if Apple leaks program is present])] + [AC_DEFINE(HAVE_LEAKS, 1, [Define if Apple leaks program is present]) + have_leaks=true], + [have_leaks=false] ) +AM_CONDITIONAL(HAVE_LEAKS, $have_leaks) DISPATCH_C_ATOMIC_BUILTINS @@ -124,25 +190,66 @@ AC_CHECK_HEADER(sys/event.h, [], [PKG_CHECK_MODULES(KQUEUE, libkqueue)] ) +AC_CHECK_FUNCS([strlcpy getprogname], [], + [PKG_CHECK_MODULES(BSD_OVERLAY, libbsd-overlay,[ + AC_DEFINE(HAVE_STRLCPY, 1, []) + AC_DEFINE(HAVE_GETPROGNAME, 1, []) + ])], [#include ] +) + # # Checks for header files. # AC_HEADER_STDC -AC_CHECK_HEADERS([TargetConditionals.h pthread_np.h malloc/malloc.h libkern/OSCrossEndian.h libkern/OSAtomic.h sys/guarded.h libproc_internal.h]) +AC_CHECK_HEADERS([TargetConditionals.h pthread_np.h malloc/malloc.h libkern/OSCrossEndian.h libkern/OSAtomic.h sys/guarded.h fcntl.h]) -# hack for pthread_machdep.h's #include -AS_IF([test -n "$apple_xnu_source_osfmk_path"], [ +# hack for pthread/private headers +AS_IF([test -n "$apple_libpthread_source_path" -a -n "$apple_xnu_source_osfmk_path"], [ saveCPPFLAGS="$CPPFLAGS" CPPFLAGS="$CPPFLAGS -I." + ln -fsh "$apple_libpthread_source_path"/private/tsd_private.h pthread_machdep.h + ln -fsh "$apple_libpthread_source_path"/private pthread ln -fsh "$apple_xnu_source_osfmk_path" System + mkdir -p mach && ln -fsh "$apple_xnu_source_osfmk_path"/mach/coalition.h mach ]) -AC_CHECK_HEADERS([pthread_machdep.h]) -AS_IF([test -n "$apple_xnu_source_osfmk_path"], [ - rm -f System +AC_CHECK_HEADERS([pthread_machdep.h pthread/qos.h]) + +# pthread_workqueues. +# Look for own version first, then system version. +AS_IF([test -f $srcdir/libpwq/configure.ac], + [AC_DEFINE(BUILD_OWN_PTHREAD_WORKQUEUES, 1, [Define if building pthread work queues from source]) + ac_configure_args="--disable-libpwq-install $ac_configure_args" + AC_CONFIG_SUBDIRS([libpwq]) + build_own_pthread_workqueues=true + AC_DEFINE(HAVE_PTHREAD_WORKQUEUES, 1, [Define if pthread work queues are present]) + have_pthread_workqueues=true], + [build_own_pthread_workqueues=false + AC_CHECK_HEADERS([pthread/workqueue_private.h pthread_workqueue.h], + [AC_DEFINE(HAVE_PTHREAD_WORKQUEUES, 1, [Define if pthread work queues are present]) + have_pthread_workqueues=true], + [have_pthread_workqueues=false] + )] +) +AM_CONDITIONAL(BUILD_OWN_PTHREAD_WORKQUEUES, $build_own_pthread_workqueues) +AM_CONDITIONAL(HAVE_PTHREAD_WORKQUEUES, $have_pthread_workqueues) + +AC_CHECK_HEADERS([libproc_internal.h], [], [], [#include ]) +AC_CHECK_FUNCS([pthread_workqueue_setdispatch_np _pthread_workqueue_init]) +AS_IF([test -n "$apple_libpthread_source_path" -a -n "$apple_xnu_source_osfmk_path"], [ + rm -f pthread_machdep.h pthread System mach/coalition.h CPPFLAGS="$saveCPPFLAGS" + AC_CONFIG_COMMANDS([src/pthread_machdep.h], + [ln -fsh "$apple_libpthread_source_path"/private/tsd_private.h src/pthread_machdep.h], + [apple_libpthread_source_path="$apple_libpthread_source_path"]) + AC_CONFIG_COMMANDS([src/pthread], + [ln -fsh "$apple_libpthread_source_path"/private src/pthread], + [apple_libpthread_source_path="$apple_libpthread_source_path"]) AC_CONFIG_COMMANDS([src/System], [ln -fsh "$apple_xnu_source_osfmk_path" src/System], [apple_xnu_source_osfmk_path="$apple_xnu_source_osfmk_path"]) + AC_CONFIG_COMMANDS([src/mach/coalition.h], + [ln -fsh "$apple_xnu_source_osfmk_path"/mach/coalition.h src/mach], + [apple_xnu_source_osfmk_path="$apple_xnu_source_osfmk_path"]) ]) # hack for xnu/bsd/sys/event.h EVFILT_SOCK declaration AS_IF([test -n "$apple_xnu_source_bsd_path"], [ @@ -193,28 +300,20 @@ AC_CHECK_HEADER([mach/mach.h], [ have_mach=true], [have_mach=false] ) AM_CONDITIONAL(USE_MIG, $have_mach) - -# -# We use the availability of pthread_workqueue.h to decide whether to compile -# in support for pthread work queues. -# -AC_CHECK_HEADER([pthread_workqueue.h], - [AC_DEFINE(HAVE_PTHREAD_WORKQUEUES, 1, [Define if pthread work queues are present])] -) -AC_CHECK_FUNCS([pthread_workqueue_setdispatch_np]) +AC_CHECK_FUNCS([mach_port_construct]) # # Find functions and declarations we care about. # AC_CHECK_DECLS([CLOCK_UPTIME, CLOCK_MONOTONIC], [], [], [[#include ]]) -AC_CHECK_DECLS([NOTE_NONE, NOTE_REAP, NOTE_SIGNAL], [], [], +AC_CHECK_DECLS([NOTE_NONE, NOTE_REAP, NOTE_REVOKE, NOTE_SIGNAL, NOTE_LOWAT], [], [], [[#include ]]) AC_CHECK_DECLS([FD_COPY], [], [], [[#include ]]) AC_CHECK_DECLS([SIGEMT], [], [], [[#include ]]) -AC_CHECK_DECLS([VQ_UPDATE, VQ_VERYLOWDISK], [], [], [[#include ]]) +AC_CHECK_DECLS([VQ_UPDATE, VQ_VERYLOWDISK, VQ_QUOTA], [], [], [[#include ]]) AC_CHECK_DECLS([program_invocation_short_name], [], [], [[#include ]]) -AC_CHECK_FUNCS([pthread_key_init_np pthread_main_np mach_absolute_time malloc_create_zone sysconf getprogname]) +AC_CHECK_FUNCS([pthread_key_init_np pthread_main_np mach_absolute_time malloc_create_zone sysconf]) AC_CHECK_DECLS([POSIX_SPAWN_START_SUSPENDED], [have_posix_spawn_start_suspended=true], [have_posix_spawn_start_suspended=false], @@ -273,15 +372,17 @@ AS_IF([test "x$dispatch_cv_cc_omit_leaf_fp" != "xno"], [ ]) AC_SUBST([OMIT_LEAF_FP_FLAGS]) -AC_CACHE_CHECK([for darwin linker], [dispatch_cv_ld_darwin], [ - saveLDFLAGS="$LDFLAGS" - LDFLAGS="$LDFLAGS -dynamiclib -compatibility_version 1.2.3 -current_version 4.5.6 -dead_strip" - AC_LINK_IFELSE([AC_LANG_PROGRAM([ - extern int foo; int foo;], [foo = 0;])], - [dispatch_cv_ld_darwin="yes"], [dispatch_cv_ld_darwin="no"]) - LDFLAGS="$saveLDFLAGS" +AS_IF([test "x$have_mach" = "xtrue"], [ + AC_CACHE_CHECK([for darwin linker], [dispatch_cv_ld_darwin], [ + saveLDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS -dynamiclib -compatibility_version 1.2.3 -current_version 4.5.6 -dead_strip" + AC_LINK_IFELSE([AC_LANG_PROGRAM([ + extern int foo; int foo;], [foo = 0;])], + [dispatch_cv_ld_darwin="yes"], [dispatch_cv_ld_darwin="no"]) + LDFLAGS="$saveLDFLAGS" + ]) ]) -AM_CONDITIONAL(HAVE_DARWIN_LD, [test "x$dispatch_cv_ld_darwin" != "xno"]) +AM_CONDITIONAL(HAVE_DARWIN_LD, [test "x$dispatch_cv_ld_darwin" == "xyes"]) # # Temporary: some versions of clang do not mark __builtin_trap() as @@ -295,5 +396,11 @@ AC_COMPILE_IFELSE( # # Generate Makefiles. # -AC_CONFIG_FILES([Makefile dispatch/Makefile man/Makefile os/Makefile private/Makefile src/Makefile]) +AC_CONFIG_FILES([Makefile dispatch/Makefile man/Makefile os/Makefile private/Makefile src/Makefile tests/Makefile]) + +# +# Generate testsuite links +# +AC_CONFIG_LINKS([tests/dispatch:$top_srcdir/private tests/leaks-wrapper:tests/leaks-wrapper.sh]) + AC_OUTPUT diff --git a/dispatch/Makefile.am b/dispatch/Makefile.am index 6dc850b21..53ea5986c 100644 --- a/dispatch/Makefile.am +++ b/dispatch/Makefile.am @@ -2,10 +2,15 @@ # # +if HAVE_SWIFT +dispatchdir=${prefix}/lib/swift/dispatch +else dispatchdir=$(includedir)/dispatch +endif dispatch_HEADERS= \ base.h \ + block.h \ data.h \ dispatch.h \ group.h \ @@ -18,3 +23,6 @@ dispatch_HEADERS= \ source.h \ time.h +if HAVE_SWIFT +dispatch_HEADERS+=module.map +endif diff --git a/dispatch/base.h b/dispatch/base.h index 01d5ec5db..8adfb0bdb 100644 --- a/dispatch/base.h +++ b/dispatch/base.h @@ -25,6 +25,22 @@ #error "Please #include instead of this file directly." #endif +#ifndef __has_builtin +#define __has_builtin(x) 0 +#endif +#ifndef __has_include +#define __has_include(x) 0 +#endif +#ifndef __has_feature +#define __has_feature(x) 0 +#endif +#ifndef __has_attribute +#define __has_attribute(x) 0 +#endif +#ifndef __has_extension +#define __has_extension(x) 0 +#endif + #if __GNUC__ #define DISPATCH_NORETURN __attribute__((__noreturn__)) #define DISPATCH_NOTHROW __attribute__((__nothrow__)) @@ -48,6 +64,7 @@ #define DISPATCH_MALLOC __attribute__((__malloc__)) #define DISPATCH_ALWAYS_INLINE __attribute__((__always_inline__)) #define DISPATCH_UNAVAILABLE __attribute__((__unavailable__)) +#define DISPATCH_UNAVAILABLE_MSG(msg) __attribute__((__unavailable__(msg))) #else /*! @parseOnly */ #define DISPATCH_NORETURN @@ -83,6 +100,24 @@ #define DISPATCH_ALWAYS_INLINE /*! @parseOnly */ #define DISPATCH_UNAVAILABLE +/*! @parseOnly */ +#define DISPATCH_UNAVAILABLE_MSG(msg) +#endif + +#ifdef __linux__ +#define DISPATCH_LINUX_UNAVAILABLE() \ + DISPATCH_UNAVAILABLE_MSG( \ + "This interface is unavailable on linux systems") +#else +#define DISPATCH_LINUX_UNAVAILABLE() +#endif + +#ifndef DISPATCH_ALIAS_V2 +#if TARGET_OS_MAC +#define DISPATCH_ALIAS_V2(sym) __asm__("_" #sym "$V2") +#else +#define DISPATCH_ALIAS_V2(sym) +#endif #endif #if TARGET_OS_WIN32 && defined(__DISPATCH_BUILDING_DISPATCH__) && \ @@ -108,23 +143,58 @@ #if __GNUC__ #define DISPATCH_EXPECT(x, v) __builtin_expect((x), (v)) +#define dispatch_compiler_barrier() __asm__ __volatile__("" ::: "memory") #else #define DISPATCH_EXPECT(x, v) (x) +#define dispatch_compiler_barrier() do { } while (0) +#endif + +#if __has_attribute(not_tail_called) +#define DISPATCH_NOT_TAIL_CALLED __attribute__((__not_tail_called__)) +#else +#define DISPATCH_NOT_TAIL_CALLED +#endif + +#if __has_builtin(__builtin_assume) +#define DISPATCH_COMPILER_CAN_ASSUME(expr) __builtin_assume(expr) +#else +#define DISPATCH_COMPILER_CAN_ASSUME(expr) ((void)(expr)) +#endif + +#if __has_attribute(noescape) +#define DISPATCH_NOESCAPE __attribute__((__noescape__)) +#else +#define DISPATCH_NOESCAPE +#endif + +#if __has_feature(assume_nonnull) +#define DISPATCH_ASSUME_NONNULL_BEGIN _Pragma("clang assume_nonnull begin") +#define DISPATCH_ASSUME_NONNULL_END _Pragma("clang assume_nonnull end") +#else +#define DISPATCH_ASSUME_NONNULL_BEGIN +#define DISPATCH_ASSUME_NONNULL_END +#endif + +#if !__has_feature(nullability) +#ifndef _Nullable +#define _Nullable +#endif +#ifndef _Nonnull +#define _Nonnull +#endif +#ifndef _Null_unspecified +#define _Null_unspecified +#endif #endif #ifndef DISPATCH_RETURNS_RETAINED_BLOCK -#if defined(__has_attribute) #if __has_attribute(ns_returns_retained) #define DISPATCH_RETURNS_RETAINED_BLOCK __attribute__((__ns_returns_retained__)) #else #define DISPATCH_RETURNS_RETAINED_BLOCK #endif -#else -#define DISPATCH_RETURNS_RETAINED_BLOCK -#endif #endif -#if defined(__has_feature) && defined(__has_extension) #if __has_feature(objc_fixed_enum) || __has_extension(cxx_strong_enums) #define DISPATCH_ENUM(name, type, ...) \ typedef enum : type { __VA_ARGS__ } name##_t @@ -132,17 +202,47 @@ #define DISPATCH_ENUM(name, type, ...) \ enum { __VA_ARGS__ }; typedef type name##_t #endif + #if __has_feature(enumerator_attributes) #define DISPATCH_ENUM_AVAILABLE_STARTING __OSX_AVAILABLE_STARTING +#define DISPATCH_ENUM_AVAILABLE(os, version) __##os##_AVAILABLE(version) #else #define DISPATCH_ENUM_AVAILABLE_STARTING(...) +#define DISPATCH_ENUM_AVAILABLE(...) #endif + +#if defined(SWIFT_SDK_OVERLAY_DISPATCH_EPOCH) && \ + SWIFT_SDK_OVERLAY_DISPATCH_EPOCH >= 2 +#define DISPATCH_SWIFT3_OVERLAY 1 #else -#define DISPATCH_ENUM(name, type, ...) \ - enum { __VA_ARGS__ }; typedef type name##_t -#define DISPATCH_ENUM_AVAILABLE_STARTING(...) +#define DISPATCH_SWIFT3_OVERLAY 0 +#endif // SWIFT_SDK_OVERLAY_DISPATCH_EPOCH >= 2 + +#if __has_feature(attribute_availability_swift) +#define DISPATCH_SWIFT_UNAVAILABLE(_msg) \ + __attribute__((__availability__(swift, unavailable, message=_msg))) +#else +#define DISPATCH_SWIFT_UNAVAILABLE(_msg) +#endif + +#if DISPATCH_SWIFT3_OVERLAY +#define DISPATCH_SWIFT3_UNAVAILABLE(_msg) DISPATCH_SWIFT_UNAVAILABLE(_msg) +#else +#define DISPATCH_SWIFT3_UNAVAILABLE(_msg) +#endif + +#if __has_attribute(swift_private) +#define DISPATCH_REFINED_FOR_SWIFT __attribute__((__swift_private__)) +#else +#define DISPATCH_REFINED_FOR_SWIFT +#endif + +#if __has_attribute(swift_name) +#define DISPATCH_SWIFT_NAME(_name) __attribute__((__swift_name__(#_name))) +#else +#define DISPATCH_SWIFT_NAME(_name) #endif -typedef void (*dispatch_function_t)(void *); +typedef void (*dispatch_function_t)(void *_Nullable); #endif diff --git a/dispatch/block.h b/dispatch/block.h index e82f665b3..cd56b230d 100644 --- a/dispatch/block.h +++ b/dispatch/block.h @@ -32,6 +32,8 @@ * @group Dispatch block objects */ +DISPATCH_ASSUME_NONNULL_BEGIN + __BEGIN_DECLS /*! @@ -270,7 +272,8 @@ dispatch_block_create_with_qos_class(dispatch_block_flags_t flags, __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW void -dispatch_block_perform(dispatch_block_flags_t flags, dispatch_block_t block); +dispatch_block_perform(dispatch_block_flags_t flags, + DISPATCH_NOESCAPE dispatch_block_t block); /*! * @function dispatch_block_wait @@ -288,7 +291,7 @@ dispatch_block_perform(dispatch_block_flags_t flags, dispatch_block_t block); * dispatch block object may either be waited on once and executed once, * or it may be executed any number of times. The behavior of any other * combination is undefined. Submission to a dispatch queue counts as an - * execution, even if cancelation (dispatch_block_cancel) means the block's + * execution, even if cancellation (dispatch_block_cancel) means the block's * code never runs. * * The result of calling this function from multiple threads simultaneously @@ -417,6 +420,8 @@ dispatch_block_testcancel(dispatch_block_t block); __END_DECLS +DISPATCH_ASSUME_NONNULL_END + #endif // __BLOCKS__ #endif // __DISPATCH_BLOCK__ diff --git a/dispatch/data.h b/dispatch/data.h index d65658478..7ceee0647 100644 --- a/dispatch/data.h +++ b/dispatch/data.h @@ -26,6 +26,8 @@ #include // for HeaderDoc #endif +DISPATCH_ASSUME_NONNULL_BEGIN + __BEGIN_DECLS /*! @header @@ -39,7 +41,7 @@ __BEGIN_DECLS * @typedef dispatch_data_t * A dispatch object representing memory regions. */ -DISPATCH_DECL(dispatch_data); +DISPATCH_DATA_DECL(dispatch_data); /*! * @var dispatch_data_empty @@ -120,8 +122,8 @@ DISPATCH_EXPORT DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_data_t dispatch_data_create(const void *buffer, size_t size, - dispatch_queue_t queue, - dispatch_block_t destructor); + dispatch_queue_t _Nullable queue, + dispatch_block_t _Nullable destructor); #endif /* __BLOCKS__ */ /*! @@ -161,8 +163,8 @@ DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_data_t dispatch_data_create_map(dispatch_data_t data, - const void **buffer_ptr, - size_t *size_ptr); + const void *_Nullable *_Nullable buffer_ptr, + size_t *_Nullable size_ptr); /*! * @function dispatch_data_create_concat @@ -275,4 +277,6 @@ dispatch_data_copy_region(dispatch_data_t data, __END_DECLS +DISPATCH_ASSUME_NONNULL_END + #endif /* __DISPATCH_DATA__ */ diff --git a/dispatch/dispatch.h b/dispatch/dispatch.h index bb32bdf31..a26b95107 100644 --- a/dispatch/dispatch.h +++ b/dispatch/dispatch.h @@ -24,8 +24,22 @@ #ifdef __APPLE__ #include #include -#endif +#else +#define __OSX_AVAILABLE_STARTING(x, y) +#define __OSX_AVAILABLE_BUT_DEPRECATED(...) +#define __OSX_AVAILABLE_BUT_DEPRECATED_MSG(...) +#define __OSX_AVAILABLE(...) +#define __IOS_AVAILABLE(...) +#define __TVOS_AVAILABLE(...) +#define __WATCHOS_AVAILABLE(...) +#define __OSX_DEPRECATED(...) +#define __IOS_DEPRECATED(...) +#define __TVOS_DEPRECATED(...) +#define __WATCHOS_DEPRECATED(...) +#endif // __APPLE__ + #include +#include #include #include #include @@ -33,11 +47,13 @@ #include #include -#ifndef __OSX_AVAILABLE_STARTING -#define __OSX_AVAILABLE_STARTING(x, y) +#if defined(__linux__) && defined(__has_feature) +#if __has_feature(modules) +#include // for off_t (to match Glibc.modulemap) +#endif #endif -#define DISPATCH_API_VERSION 20141121 +#define DISPATCH_API_VERSION 20160712 #ifndef __DISPATCH_BUILDING_DISPATCH__ diff --git a/dispatch/group.h b/dispatch/group.h index 77420c123..c50ad89d1 100644 --- a/dispatch/group.h +++ b/dispatch/group.h @@ -26,6 +26,8 @@ #include // for HeaderDoc #endif +DISPATCH_ASSUME_NONNULL_BEGIN + /*! * @typedef dispatch_group_t * @abstract @@ -119,7 +121,7 @@ DISPATCH_NOTHROW void dispatch_group_async_f(dispatch_group_t group, dispatch_queue_t queue, - void *context, + void *_Nullable context, dispatch_function_t work); /*! @@ -132,8 +134,7 @@ dispatch_group_async_f(dispatch_group_t group, * @discussion * This function waits for the completion of the blocks associated with the * given dispatch group, and returns after all blocks have completed or when - * the specified timeout has elapsed. When a timeout occurs, the group is - * restored to its original state. + * the specified timeout has elapsed. * * This function will return immediately if there are no blocks associated * with the dispatch group (i.e. the group is empty). @@ -229,7 +230,7 @@ DISPATCH_NOTHROW void dispatch_group_notify_f(dispatch_group_t group, dispatch_queue_t queue, - void *context, + void *_Nullable context, dispatch_function_t work); /*! @@ -260,7 +261,7 @@ dispatch_group_enter(dispatch_group_t group); * * @discussion * Calling this function indicates block has completed and left the dispatch - * groupJ by a means other than dispatch_group_async(). + * group by a means other than dispatch_group_async(). * * @param group * The dispatch group to update. @@ -273,4 +274,6 @@ dispatch_group_leave(dispatch_group_t group); __END_DECLS +DISPATCH_ASSUME_NONNULL_END + #endif diff --git a/dispatch/introspection.h b/dispatch/introspection.h index d20d90ad8..9cfb4d1c0 100644 --- a/dispatch/introspection.h +++ b/dispatch/introspection.h @@ -23,6 +23,8 @@ #include +DISPATCH_ASSUME_NONNULL_BEGIN + /*! * @header * @@ -152,7 +154,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) DISPATCH_EXPORT void dispatch_introspection_hook_queue_callout_begin(dispatch_queue_t queue, - void *context, dispatch_function_t function); + void *_Nullable context, dispatch_function_t function); /*! * @function dispatch_introspection_hook_queue_callout_end @@ -177,8 +179,10 @@ __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) DISPATCH_EXPORT void dispatch_introspection_hook_queue_callout_end(dispatch_queue_t queue, - void *context, dispatch_function_t function); + void *_Nullable context, dispatch_function_t function); __END_DECLS +DISPATCH_ASSUME_NONNULL_END + #endif diff --git a/dispatch/io.h b/dispatch/io.h index d53d488f7..5814bc0f7 100644 --- a/dispatch/io.h +++ b/dispatch/io.h @@ -26,6 +26,8 @@ #include // for HeaderDoc #endif +DISPATCH_ASSUME_NONNULL_BEGIN + __BEGIN_DECLS /*! @header @@ -38,7 +40,7 @@ __BEGIN_DECLS * The application may set policies on the dispatch I/O channel to indicate the * desired frequency of I/O handlers for long-running operations. * - * Dispatch I/O also provides a memory managment model for I/O buffers that + * Dispatch I/O also provides a memory management model for I/O buffers that * avoids unnecessary copying of data when pipelined between channels. Dispatch * I/O monitors the overall memory pressure and I/O access patterns for the * application to optimize resource utilization. @@ -145,7 +147,7 @@ void dispatch_write(dispatch_fd_t fd, dispatch_data_t data, dispatch_queue_t queue, - void (^handler)(dispatch_data_t data, int error)); + void (^handler)(dispatch_data_t _Nullable data, int error)); #endif /* __BLOCKS__ */ /*! @@ -168,7 +170,7 @@ DISPATCH_DECL(dispatch_io); * bytes. Read and write operations on a channel of this type are performed * serially (in order of creation) and read/write data at the file pointer * position that is current at the time the operation starts executing. - * Operations of different type (read vs. write) may be perfomed simultaneously. + * Operations of different type (read vs. write) may be performed simultaneously. * Offsets passed to operations on a channel of this type are ignored. * * @const DISPATCH_IO_RANDOM A dispatch I/O channel representing a random @@ -302,7 +304,7 @@ dispatch_io_create_with_io(dispatch_io_type_t type, * @param data The data object to be handled. * @param error An errno condition for the operation. */ -typedef void (^dispatch_io_handler_t)(bool done, dispatch_data_t data, +typedef void (^dispatch_io_handler_t)(bool done, dispatch_data_t _Nullable data, int error); /*! @@ -586,4 +588,6 @@ dispatch_io_set_interval(dispatch_io_t channel, __END_DECLS +DISPATCH_ASSUME_NONNULL_END + #endif /* __DISPATCH_IO__ */ diff --git a/dispatch/module.map b/dispatch/module.map new file mode 100644 index 000000000..6f3c8aab8 --- /dev/null +++ b/dispatch/module.map @@ -0,0 +1,20 @@ +module Dispatch { + requires blocks + export * + link "dispatch" + link "BlocksRuntime" +} + +module DispatchIntrospection [system] [extern_c] { + header "introspection.h" + export * +} + +module CDispatch [system] [extern_c] { + umbrella header "dispatch.h" + module * { export * } + export * + requires blocks + link "dispatch" + link "BlocksRuntime" +} diff --git a/dispatch/module.modulemap b/dispatch/module.modulemap new file mode 100644 index 000000000..addaae436 --- /dev/null +++ b/dispatch/module.modulemap @@ -0,0 +1,10 @@ +module Dispatch [system] [extern_c] { + umbrella header "dispatch.h" + module * { export * } + export * +} + +module DispatchIntrospection [system] [extern_c] { + header "introspection.h" + export * +} diff --git a/dispatch/object.h b/dispatch/object.h index a9b805e75..8b2030138 100644 --- a/dispatch/object.h +++ b/dispatch/object.h @@ -26,6 +26,8 @@ #include // for HeaderDoc #endif +DISPATCH_ASSUME_NONNULL_BEGIN + /*! * @typedef dispatch_object_t * @@ -46,16 +48,23 @@ * analyzer, and enables them to be added to Cocoa collections. * See for details. */ -OS_OBJECT_DECL(dispatch_object); +OS_OBJECT_DECL_CLASS(dispatch_object); + +#if OS_OBJECT_SWIFT3 +#define DISPATCH_DECL(name) OS_OBJECT_DECL_SUBCLASS_SWIFT(name, dispatch_object) +#else // OS_OBJECT_SWIFT3 #define DISPATCH_DECL(name) OS_OBJECT_DECL_SUBCLASS(name, dispatch_object) -#define DISPATCH_GLOBAL_OBJECT(type, object) ((OS_OBJECT_BRIDGE type)&(object)) -#define DISPATCH_RETURNS_RETAINED OS_OBJECT_RETURNS_RETAINED + DISPATCH_INLINE DISPATCH_ALWAYS_INLINE DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void _dispatch_object_validate(dispatch_object_t object) { void *isa = *(void* volatile*)(OS_OBJECT_BRIDGE void*)object; (void)isa; } +#endif // OS_OBJECT_SWIFT3 + +#define DISPATCH_GLOBAL_OBJECT(type, object) ((OS_OBJECT_BRIDGE type)&(object)) +#define DISPATCH_RETURNS_RETAINED OS_OBJECT_RETURNS_RETAINED #elif defined(__cplusplus) && !defined(__DISPATCH_BUILDING_DISPATCH__) /* * Dispatch objects are NOT C++ objects. Nevertheless, we can at least keep C++ @@ -99,6 +108,38 @@ typedef union { #define DISPATCH_RETURNS_RETAINED #endif +#if OS_OBJECT_SWIFT3 && OS_OBJECT_USE_OBJC +#define DISPATCH_SOURCE_TYPE_DECL(name) \ + DISPATCH_EXPORT struct dispatch_source_type_s \ + _dispatch_source_type_##name; \ + OS_OBJECT_DECL_PROTOCOL(dispatch_source_##name, ); \ + OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL( \ + dispatch_source, dispatch_source_##name) +#define DISPATCH_SOURCE_DECL(name) \ + DISPATCH_DECL(name); \ + OS_OBJECT_DECL_PROTOCOL(name, ); \ + OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(name, name) +#ifndef DISPATCH_DATA_DECL +#define DISPATCH_DATA_DECL(name) OS_OBJECT_DECL_SWIFT(name) +#endif // DISPATCH_DATA_DECL +#elif !TARGET_OS_WIN32 +/*! @parseOnly */ +#define DISPATCH_SOURCE_DECL(name) \ + DISPATCH_DECL(name); +/*! @parseOnly */ +#define DISPATCH_DATA_DECL(name) DISPATCH_DECL(name) +/*! @parseOnly */ +#define DISPATCH_SOURCE_TYPE_DECL(name) \ + DISPATCH_EXPORT const struct dispatch_source_type_s \ + _dispatch_source_type_##name +#else +#define DISPATCH_SOURCE_DECL(name) \ + DISPATCH_DECL(name); +#define DISPATCH_SOURCE_TYPE_DECL(name) \ + DISPATCH_EXPORT struct dispatch_source_type_s _dispatch_source_type_##name +#define DISPATCH_DATA_DECL(name) DISPATCH_DECL(name) +#endif + #ifdef __BLOCKS__ /*! * @typedef dispatch_block_t @@ -162,11 +203,13 @@ __BEGIN_DECLS */ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +DISPATCH_SWIFT_UNAVAILABLE("Can't be used with ARC") void dispatch_retain(dispatch_object_t object); #if OS_OBJECT_USE_OBJC_RETAIN_RELEASE #undef dispatch_retain -#define dispatch_retain(object) ({ dispatch_object_t _o = (object); \ +#define dispatch_retain(object) \ + __extension__({ dispatch_object_t _o = (object); \ _dispatch_object_validate(_o); (void)[_o retain]; }) #endif @@ -188,11 +231,13 @@ dispatch_retain(dispatch_object_t object); */ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +DISPATCH_SWIFT_UNAVAILABLE("Can't be used with ARC") void dispatch_release(dispatch_object_t object); #if OS_OBJECT_USE_OBJC_RETAIN_RELEASE #undef dispatch_release -#define dispatch_release(object) ({ dispatch_object_t _o = (object); \ +#define dispatch_release(object) \ + __extension__({ dispatch_object_t _o = (object); \ _dispatch_object_validate(_o); [_o release]; }) #endif @@ -211,7 +256,7 @@ dispatch_release(dispatch_object_t object); __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW -void * +void *_Nullable dispatch_get_context(dispatch_object_t object); /*! @@ -228,9 +273,9 @@ dispatch_get_context(dispatch_object_t object); * */ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT DISPATCH_NOTHROW //DISPATCH_NONNULL1 +DISPATCH_EXPORT DISPATCH_NOTHROW void -dispatch_set_context(dispatch_object_t object, void *context); +dispatch_set_context(dispatch_object_t object, void *_Nullable context); /*! * @function dispatch_set_finalizer_f @@ -254,10 +299,38 @@ dispatch_set_context(dispatch_object_t object, void *context); * context of the dispatch object at the time the finalizer call is made. */ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT DISPATCH_NOTHROW //DISPATCH_NONNULL1 +DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_set_finalizer_f(dispatch_object_t object, - dispatch_function_t finalizer); + dispatch_function_t _Nullable finalizer); + +/*! + * @function dispatch_activate + * + * @abstract + * Activates the specified dispatch object. + * + * @discussion + * Dispatch objects such as queues and sources may be created in an inactive + * state. Objects in this state have to be activated before any blocks + * associated with them will be invoked. + * + * The target queue of inactive objects can be changed using + * dispatch_set_target_queue(). Change of target queue is no longer permitted + * once an initially inactive object has been activated. + * + * Calling dispatch_activate() on an active object has no effect. + * Releasing the last reference count on an inactive object is undefined. + * + * @param object + * The object to be activated. + * The result of passing NULL in this parameter is undefined. + */ +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_activate(dispatch_object_t object); /*! * @function dispatch_suspend @@ -288,6 +361,20 @@ dispatch_suspend(dispatch_object_t object); * @abstract * Resumes the invocation of blocks on a dispatch object. * + * @discussion + * Dispatch objects can be suspended with dispatch_suspend(), which increments + * an internal suspension count. dispatch_resume() is the inverse operation, + * and consumes suspension counts. When the last suspension count is consumed, + * blocks associated with the object will be invoked again. + * + * For backward compatibility reasons, dispatch_resume() on an inactive and not + * otherwise suspended dispatch source object has the same effect as calling + * dispatch_activate(). For new code, using dispatch_activate() is preferred. + * + * If the specified object has zero suspension count and is not an inactive + * source, this function will result in an assertion and the process being + * terminated. + * * @param object * The object to be resumed. * The result of passing NULL in this parameter is undefined. @@ -468,4 +555,6 @@ dispatch_debugv(dispatch_object_t object, const char *message, va_list ap); __END_DECLS +DISPATCH_ASSUME_NONNULL_END + #endif diff --git a/dispatch/once.h b/dispatch/once.h index 32cf2e8de..a8f56441c 100644 --- a/dispatch/once.h +++ b/dispatch/once.h @@ -26,6 +26,8 @@ #include // for HeaderDoc #endif +DISPATCH_ASSUME_NONNULL_BEGIN + __BEGIN_DECLS /*! @@ -35,6 +37,7 @@ __BEGIN_DECLS * A predicate for use with dispatch_once(). It must be initialized to zero. * Note: static and global variables default to zero. */ +DISPATCH_SWIFT3_UNAVAILABLE("Use lazily initialized globals instead") typedef long dispatch_once_t; /*! @@ -57,16 +60,23 @@ typedef long dispatch_once_t; #ifdef __BLOCKS__ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +DISPATCH_SWIFT3_UNAVAILABLE("Use lazily initialized globals instead") void -dispatch_once(dispatch_once_t *predicate, dispatch_block_t block); +dispatch_once(dispatch_once_t *predicate, + DISPATCH_NOESCAPE dispatch_block_t block); DISPATCH_INLINE DISPATCH_ALWAYS_INLINE DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +DISPATCH_SWIFT3_UNAVAILABLE("Use lazily initialized globals instead") void -_dispatch_once(dispatch_once_t *predicate, dispatch_block_t block) +_dispatch_once(dispatch_once_t *predicate, + DISPATCH_NOESCAPE dispatch_block_t block) { if (DISPATCH_EXPECT(*predicate, ~0l) != ~0l) { dispatch_once(predicate, block); + } else { + dispatch_compiler_barrier(); } + DISPATCH_COMPILER_CAN_ASSUME(*predicate == ~0l); } #undef dispatch_once #define dispatch_once _dispatch_once @@ -74,23 +84,30 @@ _dispatch_once(dispatch_once_t *predicate, dispatch_block_t block) __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +DISPATCH_SWIFT3_UNAVAILABLE("Use lazily initialized globals instead") void -dispatch_once_f(dispatch_once_t *predicate, void *context, +dispatch_once_f(dispatch_once_t *predicate, void *_Nullable context, dispatch_function_t function); DISPATCH_INLINE DISPATCH_ALWAYS_INLINE DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +DISPATCH_SWIFT3_UNAVAILABLE("Use lazily initialized globals instead") void -_dispatch_once_f(dispatch_once_t *predicate, void *context, +_dispatch_once_f(dispatch_once_t *predicate, void *_Nullable context, dispatch_function_t function) { if (DISPATCH_EXPECT(*predicate, ~0l) != ~0l) { dispatch_once_f(predicate, context, function); + } else { + dispatch_compiler_barrier(); } + DISPATCH_COMPILER_CAN_ASSUME(*predicate == ~0l); } #undef dispatch_once_f #define dispatch_once_f _dispatch_once_f __END_DECLS +DISPATCH_ASSUME_NONNULL_END + #endif diff --git a/dispatch/queue.h b/dispatch/queue.h index b3cb54f9a..264c34418 100644 --- a/dispatch/queue.h +++ b/dispatch/queue.h @@ -26,6 +26,12 @@ #include // for HeaderDoc #endif +#if __has_include() +#include +#endif + +DISPATCH_ASSUME_NONNULL_BEGIN + /*! * @header * @@ -131,7 +137,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_async_f(dispatch_queue_t queue, - void *context, + void *_Nullable context, dispatch_function_t work); /*! @@ -168,7 +174,7 @@ dispatch_async_f(dispatch_queue_t queue, __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void -dispatch_sync(dispatch_queue_t queue, dispatch_block_t block); +dispatch_sync(dispatch_queue_t queue, DISPATCH_NOESCAPE dispatch_block_t block); #endif /*! @@ -197,7 +203,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_sync_f(dispatch_queue_t queue, - void *context, + void *_Nullable context, dispatch_function_t work); /*! @@ -230,7 +236,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_apply(size_t iterations, dispatch_queue_t queue, - void (^block)(size_t)); + DISPATCH_NOESCAPE void (^block)(size_t)); #endif /*! @@ -263,8 +269,8 @@ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NONNULL4 DISPATCH_NOTHROW void dispatch_apply_f(size_t iterations, dispatch_queue_t queue, - void *context, - void (*work)(void *, size_t)); + void *_Nullable context, + void (*work)(void *_Nullable, size_t)); /*! * @function dispatch_get_current_queue @@ -288,7 +294,7 @@ dispatch_apply_f(size_t iterations, dispatch_queue_t queue, * When dispatch_get_current_queue() is called on the main thread, it may * or may not return the same value as dispatch_get_main_queue(). Comparing * the two is not a valid way to test whether code is executing on the - * main thread. + * main thread (see dispatch_assert_queue() and dispatch_assert_queue_not()). * * This function is deprecated and will be removed in a future release. * @@ -365,7 +371,6 @@ typedef long dispatch_queue_priority_t; * Alias for qos_class_t type. */ #if __has_include() -#include typedef qos_class_t dispatch_qos_class_t; #else typedef unsigned int dispatch_qos_class_t; @@ -425,12 +430,24 @@ DISPATCH_DECL(dispatch_queue_attr); /*! * @const DISPATCH_QUEUE_SERIAL + * * @discussion A dispatch queue that invokes blocks serially in FIFO order. */ #define DISPATCH_QUEUE_SERIAL NULL +/*! + * @const DISPATCH_QUEUE_SERIAL_INACTIVE + * + * @discussion + * A dispatch queue that invokes blocks serially in FIFO order, and that is + * created initially inactive. See dispatch_queue_attr_make_initially_inactive(). + */ +#define DISPATCH_QUEUE_SERIAL_INACTIVE \ + dispatch_queue_attr_make_initially_inactive(DISPATCH_QUEUE_SERIAL) + /*! * @const DISPATCH_QUEUE_CONCURRENT + * * @discussion A dispatch queue that may invoke blocks concurrently and supports * barrier blocks submitted with the dispatch barrier API. */ @@ -441,12 +458,173 @@ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) DISPATCH_EXPORT struct dispatch_queue_attr_s _dispatch_queue_attr_concurrent; +/*! + * @const DISPATCH_QUEUE_CONCURRENT_INACTIVE + * + * @discussion + * A dispatch queue that may invoke blocks concurrently and supports barrier + * blocks submitted with the dispatch barrier API, and that is created initially + * inactive. See dispatch_queue_attr_make_initially_inactive(). + */ +#define DISPATCH_QUEUE_CONCURRENT_INACTIVE \ + dispatch_queue_attr_make_initially_inactive(DISPATCH_QUEUE_CONCURRENT) + +/*! + * @function dispatch_queue_attr_make_initially_inactive + * + * @abstract + * Returns an attribute value which may be provided to dispatch_queue_create() + * or dispatch_queue_create_with_target(), in order to make the created queue + * initially inactive. + * + * @discussion + * Dispatch queues may be created in an inactive state. Queues in this state + * have to be activated before any blocks associated with them will be invoked. + * + * A queue in inactive state cannot be deallocated, dispatch_activate() must be + * called before the last reference to a queue created with this attribute is + * released. + * + * The target queue of a queue in inactive state can be changed using + * dispatch_set_target_queue(). Change of target queue is no longer permitted + * once an initially inactive queue has been activated. + * + * @param attr + * A queue attribute value to be combined with the initially inactive attribute. + * + * @return + * Returns an attribute value which may be provided to dispatch_queue_create() + * and dispatch_queue_create_with_target(). + * The new value combines the attributes specified by the 'attr' parameter with + * the initially inactive attribute. + */ +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW +dispatch_queue_attr_t +dispatch_queue_attr_make_initially_inactive( + dispatch_queue_attr_t _Nullable attr); + +/*! + * @const DISPATCH_QUEUE_SERIAL_WITH_AUTORELEASE_POOL + * + * @discussion + * A dispatch queue created with this attribute invokes blocks serially in FIFO + * order, and surrounds execution of any block submitted asynchronously to it + * with the equivalent of a individual Objective-C @autoreleasepool + * scope. + * + * See dispatch_queue_attr_make_with_autorelease_frequency(). + */ +#define DISPATCH_QUEUE_SERIAL_WITH_AUTORELEASE_POOL \ + dispatch_queue_attr_make_with_autorelease_frequency(\ + DISPATCH_QUEUE_SERIAL, DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM) + +/*! + * @const DISPATCH_QUEUE_CONCURRENT_WITH_AUTORELEASE_POOL + * + * @discussion + * A dispatch queue created with this attribute may invokes blocks concurrently + * and supports barrier blocks submitted with the dispatch barrier API. It also + * surrounds execution of any block submitted asynchronously to it with the + * equivalent of a individual Objective-C @autoreleasepool + * + * See dispatch_queue_attr_make_with_autorelease_frequency(). + */ +#define DISPATCH_QUEUE_CONCURRENT_WITH_AUTORELEASE_POOL \ + dispatch_queue_attr_make_with_autorelease_frequency(\ + DISPATCH_QUEUE_CONCURRENT, DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM) + +/*! + * @typedef dispatch_autorelease_frequency_t + * Values to pass to the dispatch_queue_attr_make_with_autorelease_frequency() + * function. + * + * @const DISPATCH_AUTORELEASE_FREQUENCY_INHERIT + * Dispatch queues with this autorelease frequency inherit the behavior from + * their target queue. This is the default behavior for manually created queues. + * + * @const DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM + * Dispatch queues with this autorelease frequency push and pop an autorelease + * pool around the execution of every block that was submitted to it + * asynchronously. + * @see dispatch_queue_attr_make_with_autorelease_frequency(). + * + * @const DISPATCH_AUTORELEASE_FREQUENCY_NEVER + * Dispatch queues with this autorelease frequency never set up an individual + * autorelease pool around the execution of a block that is submitted to it + * asynchronously. This is the behavior of the global concurrent queues. + */ +DISPATCH_ENUM(dispatch_autorelease_frequency, unsigned long, + DISPATCH_AUTORELEASE_FREQUENCY_INHERIT + DISPATCH_ENUM_AVAILABLE(OSX, 10.12) + DISPATCH_ENUM_AVAILABLE(IOS, 10.0) + DISPATCH_ENUM_AVAILABLE(TVOS, 10.0) + DISPATCH_ENUM_AVAILABLE(WATCHOS, 3.0) = 0, + DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM + DISPATCH_ENUM_AVAILABLE(OSX, 10.12) + DISPATCH_ENUM_AVAILABLE(IOS, 10.0) + DISPATCH_ENUM_AVAILABLE(TVOS, 10.0) + DISPATCH_ENUM_AVAILABLE(WATCHOS, 3.0) = 1, + DISPATCH_AUTORELEASE_FREQUENCY_NEVER + DISPATCH_ENUM_AVAILABLE(OSX, 10.12) + DISPATCH_ENUM_AVAILABLE(IOS, 10.0) + DISPATCH_ENUM_AVAILABLE(TVOS, 10.0) + DISPATCH_ENUM_AVAILABLE(WATCHOS, 3.0) = 2, +); + +/*! + * @function dispatch_queue_attr_make_with_autorelease_frequency + * + * @abstract + * Returns a dispatch queue attribute value with the autorelease frequency + * set to the specified value. + * + * @discussion + * When a queue uses the per-workitem autorelease frequency (either directly + * or inherithed from its target queue), any block submitted asynchronously to + * this queue (via dispatch_async(), dispatch_barrier_async(), + * dispatch_group_notify(), etc...) is executed as if surrounded by a individual + * Objective-C @autoreleasepool scope. + * + * Autorelease frequency has no effect on blocks that are submitted + * synchronously to a queue (via dispatch_sync(), dispatch_barrier_sync()). + * + * The global concurrent queues have the DISPATCH_AUTORELEASE_FREQUENCY_NEVER + * behavior. Manually created dispatch queues use + * DISPATCH_AUTORELEASE_FREQUENCY_INHERIT by default. + * + * Queues created with this attribute cannot change target queues after having + * been activated. See dispatch_set_target_queue() and dispatch_activate(). + * + * @param attr + * A queue attribute value to be combined with the specified autorelease + * frequency or NULL. + * + * @param frequency + * The requested autorelease frequency. + * + * @return + * Returns an attribute value which may be provided to dispatch_queue_create() + * or NULL if an invalid autorelease frequency was requested. + * This new value combines the attributes specified by the 'attr' parameter and + * the chosen autorelease frequency. + */ +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW +dispatch_queue_attr_t +dispatch_queue_attr_make_with_autorelease_frequency( + dispatch_queue_attr_t _Nullable attr, + dispatch_autorelease_frequency_t frequency); + /*! * @function dispatch_queue_attr_make_with_qos_class * * @abstract * Returns an attribute value which may be provided to dispatch_queue_create() - * in order to assign a QOS class and relative priority to the queue. + * or dispatch_queue_create_with_target(), in order to assign a QOS class and + * relative priority to the queue. * * @discussion * When specified in this manner, the QOS class and relative priority take @@ -487,17 +665,86 @@ struct dispatch_queue_attr_s _dispatch_queue_attr_concurrent; * results in NULL being returned. * * @return - * Returns an attribute value which may be provided to dispatch_queue_create(), - * or NULL if an invalid QOS class was requested. + * Returns an attribute value which may be provided to dispatch_queue_create() + * and dispatch_queue_create_with_target(), or NULL if an invalid QOS class was + * requested. * The new value combines the attributes specified by the 'attr' parameter and * the new QOS class and relative priority. */ __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW dispatch_queue_attr_t -dispatch_queue_attr_make_with_qos_class(dispatch_queue_attr_t attr, +dispatch_queue_attr_make_with_qos_class(dispatch_queue_attr_t _Nullable attr, dispatch_qos_class_t qos_class, int relative_priority); +/*! + * @const DISPATCH_TARGET_QUEUE_DEFAULT + * @discussion Constant to pass to the dispatch_queue_create_with_target(), + * dispatch_set_target_queue() and dispatch_source_create() functions to + * indicate that the default target queue for the object type in question + * should be used. + */ +#define DISPATCH_TARGET_QUEUE_DEFAULT NULL + +/*! + * @function dispatch_queue_create_with_target + * + * @abstract + * Creates a new dispatch queue with a specified target queue. + * + * @discussion + * Dispatch queues created with the DISPATCH_QUEUE_SERIAL or a NULL attribute + * invoke blocks serially in FIFO order. + * + * Dispatch queues created with the DISPATCH_QUEUE_CONCURRENT attribute may + * invoke blocks concurrently (similarly to the global concurrent queues, but + * potentially with more overhead), and support barrier blocks submitted with + * the dispatch barrier API, which e.g. enables the implementation of efficient + * reader-writer schemes. + * + * When a dispatch queue is no longer needed, it should be released with + * dispatch_release(). Note that any pending blocks submitted to a queue will + * hold a reference to that queue. Therefore a queue will not be deallocated + * until all pending blocks have finished. + * + * When using a dispatch queue attribute @a attr specifying a QoS class (derived + * from the result of dispatch_queue_attr_make_with_qos_class()), passing the + * result of dispatch_get_global_queue() in @a target will ignore the QoS class + * of that global queue and will use the global queue with the QoS class + * specified by attr instead. + * + * Queues created with dispatch_queue_create_with_target() cannot have their + * target queue changed, unless created inactive (See + * dispatch_queue_attr_make_initially_inactive()), in which case the target + * queue can be changed until the newly created queue is activated with + * dispatch_activate(). + * + * @param label + * A string label to attach to the queue. + * This parameter is optional and may be NULL. + * + * @param attr + * A predefined attribute such as DISPATCH_QUEUE_SERIAL, + * DISPATCH_QUEUE_CONCURRENT, or the result of a call to + * a dispatch_queue_attr_make_with_* function. + * + * @param target + * The target queue for the newly created queue. The target queue is retained. + * If this parameter is DISPATCH_TARGET_QUEUE_DEFAULT, sets the queue's target + * queue to the default target queue for the given queue type. + * + * @result + * The newly created dispatch queue. + */ +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NOTHROW +dispatch_queue_t +dispatch_queue_create_with_target(const char *_Nullable label, + dispatch_queue_attr_t _Nullable attr, dispatch_queue_t _Nullable target) + DISPATCH_ALIAS_V2(dispatch_queue_create_with_target); + /*! * @function dispatch_queue_create * @@ -534,8 +781,9 @@ dispatch_queue_attr_make_with_qos_class(dispatch_queue_attr_t attr, * This parameter is optional and may be NULL. * * @param attr - * DISPATCH_QUEUE_SERIAL, DISPATCH_QUEUE_CONCURRENT, or the result of a call to - * the function dispatch_queue_attr_make_with_qos_class(). + * A predefined attribute such as DISPATCH_QUEUE_SERIAL, + * DISPATCH_QUEUE_CONCURRENT, or the result of a call to + * a dispatch_queue_attr_make_with_* function. * * @result * The newly created dispatch queue. @@ -544,7 +792,8 @@ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_t -dispatch_queue_create(const char *label, dispatch_queue_attr_t attr); +dispatch_queue_create(const char *_Nullable label, + dispatch_queue_attr_t _Nullable attr); /*! * @const DISPATCH_CURRENT_QUEUE_LABEL @@ -572,7 +821,7 @@ dispatch_queue_create(const char *label, dispatch_queue_attr_t attr); __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW const char * -dispatch_queue_get_label(dispatch_queue_t queue); +dispatch_queue_get_label(dispatch_queue_t _Nullable queue); /*! * @function dispatch_queue_get_qos_class @@ -612,15 +861,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NONNULL1 DISPATCH_NOTHROW dispatch_qos_class_t dispatch_queue_get_qos_class(dispatch_queue_t queue, - int *relative_priority_ptr); - -/*! - * @const DISPATCH_TARGET_QUEUE_DEFAULT - * @discussion Constant to pass to the dispatch_set_target_queue() and - * dispatch_source_create() functions to indicate that the default target queue - * for the given object type should be used. - */ -#define DISPATCH_TARGET_QUEUE_DEFAULT NULL + int *_Nullable relative_priority_ptr); /*! * @function dispatch_set_target_queue @@ -657,6 +898,20 @@ dispatch_queue_get_qos_class(dispatch_queue_t queue, * For all other dispatch object types, the only function of the target queue * is to determine where an object's finalizer function is invoked. * + * In general, changing the target queue of an object is an asynchronous + * operation that doesn't take effect immediately, and doesn't affect blocks + * already associated with the specified object. + * + * However, if an object is inactive at the time dispatch_set_target_queue() is + * called, then the target queue change takes effect immediately, and will + * affect blocks already associated with the specified object. After an + * initially inactive object has been activated, calling + * dispatch_set_target_queue() results in an assertion and the process being + * terminated. + * + * If a dispatch queue is active and targeted by other dispatch objects, + * changing its target queue results in undefined behavior. + * * @param object * The object to modify. * The result of passing NULL in this parameter is undefined. @@ -668,9 +923,10 @@ dispatch_queue_get_qos_class(dispatch_queue_t queue, * to the default target queue for the given object type. */ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT DISPATCH_NOTHROW // DISPATCH_NONNULL1 +DISPATCH_EXPORT DISPATCH_NOTHROW void -dispatch_set_target_queue(dispatch_object_t object, dispatch_queue_t queue); +dispatch_set_target_queue(dispatch_object_t object, + dispatch_queue_t _Nullable queue); /*! * @function dispatch_main @@ -751,7 +1007,7 @@ DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NONNULL4 DISPATCH_NOTHROW void dispatch_after_f(dispatch_time_t when, dispatch_queue_t queue, - void *context, + void *_Nullable context, dispatch_function_t work); /*! @@ -831,7 +1087,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_barrier_async_f(dispatch_queue_t queue, - void *context, + void *_Nullable context, dispatch_function_t work); /*! @@ -858,7 +1114,8 @@ dispatch_barrier_async_f(dispatch_queue_t queue, __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void -dispatch_barrier_sync(dispatch_queue_t queue, dispatch_block_t block); +dispatch_barrier_sync(dispatch_queue_t queue, + DISPATCH_NOESCAPE dispatch_block_t block); #endif /*! @@ -890,7 +1147,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_barrier_sync_f(dispatch_queue_t queue, - void *context, + void *_Nullable context, dispatch_function_t work); /*! @@ -920,7 +1177,7 @@ dispatch_barrier_sync_f(dispatch_queue_t queue, * The key to set the context for, typically a pointer to a static variable * specific to the subsystem. Keys are only compared as pointers and never * dereferenced. Passing a string constant directly is not recommended. - * The NULL key is reserved and attemps to set a context for it are ignored. + * The NULL key is reserved and attempts to set a context for it are ignored. * * @param context * The new subsystem-specific context for the object. This may be NULL. @@ -933,7 +1190,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_queue_set_specific(dispatch_queue_t queue, const void *key, - void *context, dispatch_function_t destructor); + void *_Nullable context, dispatch_function_t _Nullable destructor); /*! * @function dispatch_queue_get_specific @@ -961,7 +1218,7 @@ dispatch_queue_set_specific(dispatch_queue_t queue, const void *key, __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW -void * +void *_Nullable dispatch_queue_get_specific(dispatch_queue_t queue, const void *key); /*! @@ -987,9 +1244,128 @@ dispatch_queue_get_specific(dispatch_queue_t queue, const void *key); */ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW -void * +void *_Nullable dispatch_get_specific(const void *key); +/*! + * @functiongroup Dispatch assertion API + * + * This API asserts at runtime that code is executing in (or out of) the context + * of a given queue. It can be used to check that a block accessing a resource + * does so from the proper queue protecting the resource. It also can be used + * to verify that a block that could cause a deadlock if run on a given queue + * never executes on that queue. + */ + +/*! + * @function dispatch_assert_queue + * + * @abstract + * Verifies that the current block is executing on a given dispatch queue. + * + * @discussion + * Some code expects to be run on a specific dispatch queue. This function + * verifies that that expectation is true. + * + * If the currently executing block was submitted to the specified queue or to + * any queue targeting it (see dispatch_set_target_queue()), this function + * returns. + * + * If the currently executing block was submitted with a synchronous API + * (dispatch_sync(), dispatch_barrier_sync(), ...), the context of the + * submitting block is also evaluated (recursively). + * If a synchronously submitting block is found that was itself submitted to + * the specified queue or to any queue targeting it, this function returns. + * + * Otherwise this function asserts: it logs an explanation to the system log and + * terminates the application. + * + * Passing the result of dispatch_get_main_queue() to this function verifies + * that the current block was submitted to the main queue, or to a queue + * targeting it, or is running on the main thread (in any context). + * + * When dispatch_assert_queue() is called outside of the context of a + * submitted block (for example from the context of a thread created manually + * with pthread_create()) then this function will also assert and terminate + * the application. + * + * The variant dispatch_assert_queue_debug() is compiled out when the + * preprocessor macro NDEBUG is defined. (See also assert(3)). + * + * @param queue + * The dispatch queue that the current block is expected to run on. + * The result of passing NULL in this parameter is undefined. + */ +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +DISPATCH_EXPORT DISPATCH_NONNULL1 +void +dispatch_assert_queue(dispatch_queue_t queue) + DISPATCH_ALIAS_V2(dispatch_assert_queue); + +/*! + * @function dispatch_assert_queue_barrier + * + * @abstract + * Verifies that the current block is executing on a given dispatch queue, + * and that the block acts as a barrier on that queue. + * + * @discussion + * This behaves exactly like dispatch_assert_queue(), with the additional check + * that the current block acts as a barrier on the specified queue, which is + * always true if the specified queue is serial (see DISPATCH_BLOCK_BARRIER or + * dispatch_barrier_async() for details). + * + * The variant dispatch_assert_queue_barrier_debug() is compiled out when the + * preprocessor macro NDEBUG is defined. (See also assert()). + * + * @param queue + * The dispatch queue that the current block is expected to run as a barrier on. + * The result of passing NULL in this parameter is undefined. + */ +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +DISPATCH_EXPORT DISPATCH_NONNULL1 +void +dispatch_assert_queue_barrier(dispatch_queue_t queue); + +/*! + * @function dispatch_assert_queue_not + * + * @abstract + * Verifies that the current block is not executing on a given dispatch queue. + * + * @discussion + * This function is the equivalent of dispatch_queue_assert() with the test for + * equality inverted. That means that it will terminate the application when + * dispatch_queue_assert() would return, and vice-versa. See discussion there. + * + * The variant dispatch_assert_queue_not_debug() is compiled out when the + * preprocessor macro NDEBUG is defined. (See also assert(3)). + * + * @param queue + * The dispatch queue that the current block is expected not to run on. + * The result of passing NULL in this parameter is undefined. + */ +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +DISPATCH_EXPORT DISPATCH_NONNULL1 +void +dispatch_assert_queue_not(dispatch_queue_t queue) + DISPATCH_ALIAS_V2(dispatch_assert_queue_not); + +#ifdef NDEBUG +#define dispatch_assert_queue_debug(q) ((void)(0 && (q))) +#define dispatch_assert_queue_barrier_debug(q) ((void)(0 && (q))) +#define dispatch_assert_queue_not_debug(q) ((void)(0 && (q))) +#else +#define dispatch_assert_queue_debug(q) dispatch_assert_queue(q) +#define dispatch_assert_queue_barrier_debug(q) dispatch_assert_queue_barrier(q) +#define dispatch_assert_queue_not_debug(q) dispatch_assert_queue_not(q) +#endif + __END_DECLS +DISPATCH_ASSUME_NONNULL_END + #endif diff --git a/dispatch/semaphore.h b/dispatch/semaphore.h index 8f68407d7..b6139d70d 100644 --- a/dispatch/semaphore.h +++ b/dispatch/semaphore.h @@ -26,6 +26,8 @@ #include // for HeaderDoc #endif +DISPATCH_ASSUME_NONNULL_BEGIN + /*! * @typedef dispatch_semaphore_t * @@ -44,7 +46,7 @@ __BEGIN_DECLS * * @discussion * Passing zero for the value is useful for when two threads need to reconcile - * the completion of a particular event. Passing a value greather than zero is + * the completion of a particular event. Passing a value greater than zero is * useful for managing a finite pool of resources, where the pool size is equal * to the value. * @@ -110,4 +112,6 @@ dispatch_semaphore_signal(dispatch_semaphore_t dsema); __END_DECLS +DISPATCH_ASSUME_NONNULL_END + #endif /* __DISPATCH_SEMAPHORE__ */ diff --git a/dispatch/source.h b/dispatch/source.h index f01fd9392..63b3ff365 100644 --- a/dispatch/source.h +++ b/dispatch/source.h @@ -35,6 +35,8 @@ #include #endif +DISPATCH_ASSUME_NONNULL_BEGIN + /*! * @header * The dispatch framework provides a suite of interfaces for monitoring low- @@ -52,7 +54,7 @@ * Dispatch sources are used to automatically submit event handler blocks to * dispatch queues in response to external events. */ -DISPATCH_DECL(dispatch_source); +DISPATCH_SOURCE_DECL(dispatch_source); __BEGIN_DECLS @@ -64,21 +66,11 @@ __BEGIN_DECLS * is being monitored by the dispatch source. Constants of this type are * passed as a parameter to dispatch_source_create() and determine how the * handle argument is interpreted (i.e. as a file descriptor, mach port, - * signal number, process identifer, etc.), and how the mask arugment is + * signal number, process identifier, etc.), and how the mask argument is * interpreted. */ typedef const struct dispatch_source_type_s *dispatch_source_type_t; -#if !TARGET_OS_WIN32 -/*! @parseOnly */ -#define DISPATCH_SOURCE_TYPE_DECL(name) \ - DISPATCH_EXPORT const struct dispatch_source_type_s \ - _dispatch_source_type_##name -#else -#define DISPATCH_SOURCE_TYPE_DECL(name) \ - DISPATCH_EXPORT struct dispatch_source_type_s _dispatch_source_type_##name -#endif - /*! * @const DISPATCH_SOURCE_TYPE_DATA_ADD * @discussion A dispatch source that coalesces data obtained via calls to @@ -109,7 +101,7 @@ DISPATCH_SOURCE_TYPE_DECL(data_or); * The mask is a mask of desired events from dispatch_source_mach_send_flags_t. */ #define DISPATCH_SOURCE_TYPE_MACH_SEND (&_dispatch_source_type_mach_send) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_SOURCE_TYPE_DECL(mach_send); /*! @@ -119,7 +111,7 @@ DISPATCH_SOURCE_TYPE_DECL(mach_send); * The mask is unused (pass zero for now). */ #define DISPATCH_SOURCE_TYPE_MACH_RECV (&_dispatch_source_type_mach_recv) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_SOURCE_TYPE_DECL(mach_recv); /*! @@ -132,7 +124,7 @@ DISPATCH_SOURCE_TYPE_DECL(mach_recv); */ #define DISPATCH_SOURCE_TYPE_MEMORYPRESSURE \ (&_dispatch_source_type_memorypressure) -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_8_0) +__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_8_0) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_SOURCE_TYPE_DECL(memorypressure); /*! @@ -143,7 +135,7 @@ DISPATCH_SOURCE_TYPE_DECL(memorypressure); * The mask is a mask of desired events from dispatch_source_proc_flags_t. */ #define DISPATCH_SOURCE_TYPE_PROC (&_dispatch_source_type_proc) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_SOURCE_TYPE_DECL(proc); /*! @@ -186,7 +178,7 @@ DISPATCH_SOURCE_TYPE_DECL(timer); * The mask is a mask of desired events from dispatch_source_vnode_flags_t. */ #define DISPATCH_SOURCE_TYPE_VNODE (&_dispatch_source_type_vnode) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_SOURCE_TYPE_DECL(vnode); /*! @@ -289,6 +281,9 @@ typedef unsigned long dispatch_source_proc_flags_t; * * @constant DISPATCH_VNODE_REVOKE * The filesystem object was revoked. + * + * @constant DISPATCH_VNODE_FUNLOCK + * The filesystem object was unlocked. */ #define DISPATCH_VNODE_DELETE 0x1 @@ -298,6 +293,7 @@ typedef unsigned long dispatch_source_proc_flags_t; #define DISPATCH_VNODE_LINK 0x10 #define DISPATCH_VNODE_RENAME 0x20 #define DISPATCH_VNODE_REVOKE 0x40 +#define DISPATCH_VNODE_FUNLOCK 0x100 typedef unsigned long dispatch_source_vnode_flags_t; @@ -321,7 +317,6 @@ typedef unsigned long dispatch_source_vnode_flags_t; typedef unsigned long dispatch_source_timer_flags_t; - /*! * @function dispatch_source_create * @@ -335,23 +330,36 @@ typedef unsigned long dispatch_source_timer_flags_t; * will be coalesced and delivered after the dispatch source is resumed or the * event handler block has returned. * - * Dispatch sources are created in a suspended state. After creating the + * Dispatch sources are created in an inactive state. After creating the * source and setting any desired attributes (i.e. the handler, context, etc.), - * a call must be made to dispatch_resume() in order to begin event delivery. + * a call must be made to dispatch_activate() in order to begin event delivery. + * + * Calling dispatch_set_target_queue() on a source once it has been activated + * is not allowed (see dispatch_activate() and dispatch_set_target_queue()). + * + * For backward compatibility reasons, dispatch_resume() on an inactive, + * and not otherwise suspended source has the same effect as calling + * dispatch_activate(). For new code, using dispatch_activate() is preferred. * * @param type * Declares the type of the dispatch source. Must be one of the defined * dispatch_source_type_t constants. + * * @param handle * The underlying system handle to monitor. The interpretation of this argument * is determined by the constant provided in the type parameter. + * * @param mask * A mask of flags specifying which events are desired. The interpretation of * this argument is determined by the constant provided in the type parameter. + * * @param queue * The dispatch queue to which the event handler block will be submitted. * If queue is DISPATCH_TARGET_QUEUE_DEFAULT, the source will submit the event * handler block to the default priority global queue. + * + * @result + * The newly created dispatch source. Or NULL if invalid arguments are passed. */ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT @@ -360,7 +368,7 @@ dispatch_source_t dispatch_source_create(dispatch_source_type_t type, uintptr_t handle, unsigned long mask, - dispatch_queue_t queue); + dispatch_queue_t _Nullable queue); /*! * @function dispatch_source_set_event_handler @@ -380,7 +388,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_source_set_event_handler(dispatch_source_t source, - dispatch_block_t handler); + dispatch_block_t _Nullable handler); #endif /* __BLOCKS__ */ /*! @@ -395,15 +403,14 @@ dispatch_source_set_event_handler(dispatch_source_t source, * * @param handler * The event handler function to submit to the source's target queue. - * The context parameter passed to the event handler function is the current - * context of the dispatch source at the time the handler call is made. - * The result of passing NULL in this parameter is undefined. + * The context parameter passed to the event handler function is the context of + * the dispatch source current at the time the event handler was set. */ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_source_set_event_handler_f(dispatch_source_t source, - dispatch_function_t handler); + dispatch_function_t _Nullable handler); /*! * @function dispatch_source_set_cancel_handler @@ -437,7 +444,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_source_set_cancel_handler(dispatch_source_t source, - dispatch_block_t handler); + dispatch_block_t _Nullable handler); #endif /* __BLOCKS__ */ /*! @@ -462,7 +469,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_source_set_cancel_handler_f(dispatch_source_t source, - dispatch_function_t handler); + dispatch_function_t _Nullable handler); /*! * @function dispatch_source_cancel @@ -712,7 +719,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_source_set_registration_handler(dispatch_source_t source, - dispatch_block_t handler); + dispatch_block_t _Nullable handler); #endif /* __BLOCKS__ */ /*! @@ -737,8 +744,10 @@ __OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_source_set_registration_handler_f(dispatch_source_t source, - dispatch_function_t handler); + dispatch_function_t _Nullable handler); __END_DECLS +DISPATCH_ASSUME_NONNULL_END + #endif diff --git a/dispatch/time.h b/dispatch/time.h index e0bc2f63a..c2152ea14 100644 --- a/dispatch/time.h +++ b/dispatch/time.h @@ -33,6 +33,8 @@ #include #endif +DISPATCH_ASSUME_NONNULL_BEGIN + #ifdef NSEC_PER_SEC #undef NSEC_PER_SEC #endif @@ -102,7 +104,7 @@ dispatch_time(dispatch_time_t when, int64_t delta); * On Mac OS X the wall clock is based on gettimeofday(3). * * @param when - * A struct timespect to add time to. If NULL is passed, then + * A struct timespec to add time to. If NULL is passed, then * dispatch_walltime() will use the result of gettimeofday(3). * * @param delta @@ -114,8 +116,10 @@ dispatch_time(dispatch_time_t when, int64_t delta); __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_time_t -dispatch_walltime(const struct timespec *when, int64_t delta); +dispatch_walltime(const struct timespec *_Nullable when, int64_t delta); __END_DECLS +DISPATCH_ASSUME_NONNULL_END + #endif diff --git a/libdispatch.xcodeproj/project.pbxproj b/libdispatch.xcodeproj/project.pbxproj index 898ffcaf9..fb0ba910f 100644 --- a/libdispatch.xcodeproj/project.pbxproj +++ b/libdispatch.xcodeproj/project.pbxproj @@ -24,11 +24,33 @@ buildPhases = ( ); dependencies = ( - 4552540F19B138B700B88766 /* PBXTargetDependency */, + 92F3FECF1BEC6F1000025962 /* PBXTargetDependency */, ); name = libdispatch_tests; productName = libdispatch_tests; }; + 6E2ECAFD1C49C2FF00A30A32 /* libdispatch_kernel */ = { + isa = PBXAggregateTarget; + buildConfigurationList = 6E2ECAFE1C49C30000A30A32 /* Build configuration list for PBXAggregateTarget "libdispatch_kernel" */; + buildPhases = ( + ); + dependencies = ( + 6E2ECB021C49C31200A30A32 /* PBXTargetDependency */, + ); + name = libdispatch_kernel; + productName = libdispatch_kernel; + }; + 92CBD7201BED924F006E0892 /* libdispatch_tests_legacy */ = { + isa = PBXAggregateTarget; + buildConfigurationList = 92CBD7231BED924F006E0892 /* Build configuration list for PBXAggregateTarget "libdispatch_tests_legacy" */; + buildPhases = ( + ); + dependencies = ( + 92CBD75A1BED926C006E0892 /* PBXTargetDependency */, + ); + name = libdispatch_tests_legacy; + productName = libdispatch_tests; + }; C927F35A10FD7F0600C5AB8B /* libdispatch_tools */ = { isa = PBXAggregateTarget; buildConfigurationList = C927F35E10FD7F0B00C5AB8B /* Build configuration list for PBXAggregateTarget "libdispatch_tools" */; @@ -58,6 +80,68 @@ 5AAB45C010D30B79004407EA /* data.c in Sources */ = {isa = PBXBuildFile; fileRef = 5AAB45BF10D30B79004407EA /* data.c */; }; 5AAB45C410D30CC7004407EA /* io.h in Headers */ = {isa = PBXBuildFile; fileRef = 5AAB45C310D30CC7004407EA /* io.h */; settings = {ATTRIBUTES = (Public, ); }; }; 5AAB45C610D30D0C004407EA /* data.h in Headers */ = {isa = PBXBuildFile; fileRef = 5AAB45C510D30D0C004407EA /* data.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 6E040C731C499C6500411A2E /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; + 6E040C751C499CE600411A2E /* firehose_buffer_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EDF10831BBB487E007F14BF /* firehose_buffer_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 6E21F2E81BBB23FA0000C6A5 /* firehose_server_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E21F2E41BBB23F00000C6A5 /* firehose_server_internal.h */; }; + 6E21F2E91BBB240E0000C6A5 /* firehose_server.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E21F2E51BBB23F00000C6A5 /* firehose_server.c */; }; + 6E393F981BD60F8D005A551E /* firehose_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF0B26A1BA8C4AE007FA4F6 /* firehose_internal.h */; }; + 6E90269C1BB9BD50004DC3AD /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; settings = {ATTRIBUTES = (Server, ); }; }; + 6E9955581C3AF7710071D40C /* venture_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9955571C3AF7710071D40C /* venture_private.h */; }; + 6E99558A1C3AF7900071D40C /* venture_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9955571C3AF7710071D40C /* venture_private.h */; }; + 6E9955CF1C3B218E0071D40C /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; }; + 6E9956011C3B21980071D40C /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; }; + 6E9956021C3B21990071D40C /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; }; + 6E9956031C3B219A0071D40C /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; }; + 6E9956041C3B219B0071D40C /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; }; + 6E9956051C3B219B0071D40C /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; }; + 6E9956071C3B21AA0071D40C /* venture_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9956061C3B21AA0071D40C /* venture_internal.h */; }; + 6E9956081C3B21B30071D40C /* venture_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9956061C3B21AA0071D40C /* venture_internal.h */; }; + 6E9956091C3B21B40071D40C /* venture_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9956061C3B21AA0071D40C /* venture_internal.h */; }; + 6E9B6B5F1BB4F3C8009E324D /* firehose_buffer_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9B6B201BB4CC73009E324D /* firehose_buffer_internal.h */; }; + 6EA283D71CAB93920041B2E0 /* libdispatch.codes in Copy Trace Definitions */ = {isa = PBXBuildFile; fileRef = 6EA283D01CAB93270041B2E0 /* libdispatch.codes */; }; + 6EB60D2C1BBB197B0092FA94 /* firehose_inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EB60D291BBB19640092FA94 /* firehose_inline_internal.h */; }; + 6EBEC7E51BBDD30C009B1596 /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; }; + 6EBEC7E61BBDD30D009B1596 /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; }; + 6EBEC7E71BBDD30F009B1596 /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; }; + 6EBEC7E81BBDD324009B1596 /* firehose_reply.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72406A031AF95DF800DF4E2B /* firehose_reply.defs */; settings = {ATTRIBUTES = (Server, ); }; }; + 6EBEC7E91BBDD325009B1596 /* firehose_reply.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72406A031AF95DF800DF4E2B /* firehose_reply.defs */; settings = {ATTRIBUTES = (Server, ); }; }; + 6EBEC7EA1BBDD326009B1596 /* firehose_reply.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72406A031AF95DF800DF4E2B /* firehose_reply.defs */; settings = {ATTRIBUTES = (Server, ); }; }; + 6ED64B401BBD898300C35F4D /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; + 6ED64B411BBD898400C35F4D /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; + 6ED64B421BBD898500C35F4D /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; + 6ED64B431BBD898600C35F4D /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; + 6ED64B441BBD898700C35F4D /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; + 6ED64B461BBD89AF00C35F4D /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; }; + 6ED64B471BBD89AF00C35F4D /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; }; + 6ED64B481BBD89B100C35F4D /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; }; + 6ED64B491BBD89BC00C35F4D /* firehose_reply.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72406A031AF95DF800DF4E2B /* firehose_reply.defs */; settings = {ATTRIBUTES = (Server, ); }; }; + 6ED64B4A1BBD89BD00C35F4D /* firehose_reply.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72406A031AF95DF800DF4E2B /* firehose_reply.defs */; settings = {ATTRIBUTES = (Server, ); }; }; + 6ED64B4B1BBD89BE00C35F4D /* firehose_reply.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72406A031AF95DF800DF4E2B /* firehose_reply.defs */; settings = {ATTRIBUTES = (Server, ); }; }; + 6ED64B4F1BBD8A1400C35F4D /* firehose_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF0B26A1BA8C4AE007FA4F6 /* firehose_internal.h */; }; + 6ED64B501BBD8A1400C35F4D /* firehose_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF0B26A1BA8C4AE007FA4F6 /* firehose_internal.h */; }; + 6ED64B511BBD8A2100C35F4D /* firehose_buffer_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9B6B201BB4CC73009E324D /* firehose_buffer_internal.h */; }; + 6ED64B521BBD8A2100C35F4D /* firehose_buffer_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9B6B201BB4CC73009E324D /* firehose_buffer_internal.h */; }; + 6ED64B531BBD8A2300C35F4D /* firehose_buffer_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9B6B201BB4CC73009E324D /* firehose_buffer_internal.h */; }; + 6ED64B571BBD8A3B00C35F4D /* firehose_inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EB60D291BBB19640092FA94 /* firehose_inline_internal.h */; }; + 6ED64B581BBD8A3E00C35F4D /* firehose_inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EB60D291BBB19640092FA94 /* firehose_inline_internal.h */; }; + 6ED64B591BBD8A3F00C35F4D /* firehose_inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EB60D291BBB19640092FA94 /* firehose_inline_internal.h */; }; + 6EDF10B81BBB488A007F14BF /* firehose_buffer_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EDF10831BBB487E007F14BF /* firehose_buffer_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 6EE664271BE2FD5C00ED7B1C /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; + 6EF0B26D1BA8C527007FA4F6 /* firehose_server_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 72EA3FBA1AF41EA400BBA227 /* firehose_server_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 6EF0B2711BA8C540007FA4F6 /* firehose_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF0B26A1BA8C4AE007FA4F6 /* firehose_internal.h */; }; + 6EF0B2781BA8C56E007FA4F6 /* firehose_reply.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72406A031AF95DF800DF4E2B /* firehose_reply.defs */; settings = {ATTRIBUTES = (Client, ); }; }; + 6EF0B27A1BA8C57D007FA4F6 /* firehose_server_object.m in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9D1AE1BB7300289540 /* firehose_server_object.m */; }; + 6EF2CAA51C88998A001ABE83 /* lock.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF2CAA41C88998A001ABE83 /* lock.h */; }; + 6EF2CAAC1C8899D5001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; + 6EF2CAAD1C8899E9001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; + 6EF2CAAE1C8899EA001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; + 6EF2CAAF1C8899EB001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; + 6EF2CAB01C8899EB001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; + 6EF2CAB11C8899EC001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; + 6EF2CAB21C8899EC001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; + 6EF2CAB31C8899ED001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; + 6EF2CAB41C889D65001ABE83 /* lock.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF2CAA41C88998A001ABE83 /* lock.h */; }; + 6EF2CAB51C889D67001ABE83 /* lock.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF2CAA41C88998A001ABE83 /* lock.h */; }; 721F5C5D0F15520500FF03A6 /* semaphore.h in Headers */ = {isa = PBXBuildFile; fileRef = 721F5C5C0F15520500FF03A6 /* semaphore.h */; settings = {ATTRIBUTES = (Public, ); }; }; 721F5CCF0F15553500FF03A6 /* semaphore.c in Sources */ = {isa = PBXBuildFile; fileRef = 721F5CCE0F15553500FF03A6 /* semaphore.c */; }; 72CC94300ECCD8750031B751 /* base.h in Headers */ = {isa = PBXBuildFile; fileRef = 72CC942F0ECCD8750031B751 /* base.h */; settings = {ATTRIBUTES = (Public, ); }; }; @@ -76,6 +160,48 @@ 96BC39BD0F3EBAB100C59689 /* queue_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 96BC39BC0F3EBAB100C59689 /* queue_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; 96C9553B0F3EAEDD000D2CA4 /* once.h in Headers */ = {isa = PBXBuildFile; fileRef = 96C9553A0F3EAEDD000D2CA4 /* once.h */; settings = {ATTRIBUTES = (Public, ); }; }; 96DF70BE0F38FE3C0074BD99 /* once.c in Sources */ = {isa = PBXBuildFile; fileRef = 96DF70BD0F38FE3C0074BD99 /* once.c */; }; + C00B0DF21C5AEBBE000330B3 /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; }; + C00B0DF31C5AEBBE000330B3 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; + C00B0DF41C5AEBBE000330B3 /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; }; + C00B0DF51C5AEBBE000330B3 /* queue.c in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED8A0E8361E600161930 /* queue.c */; }; + C00B0DF61C5AEBBE000330B3 /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; + C00B0DF71C5AEBBE000330B3 /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; }; + C00B0DF81C5AEBBE000330B3 /* block.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E43A724F1AF85BBC00BAA921 /* block.cpp */; }; + C00B0DF91C5AEBBE000330B3 /* semaphore.c in Sources */ = {isa = PBXBuildFile; fileRef = 721F5CCE0F15553500FF03A6 /* semaphore.c */; }; + C00B0DFA1C5AEBBE000330B3 /* firehose_reply.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72406A031AF95DF800DF4E2B /* firehose_reply.defs */; settings = {ATTRIBUTES = (Server, ); }; }; + C00B0DFB1C5AEBBE000330B3 /* once.c in Sources */ = {isa = PBXBuildFile; fileRef = 96DF70BD0F38FE3C0074BD99 /* once.c */; }; + C00B0DFC1C5AEBBE000330B3 /* voucher.c in Sources */ = {isa = PBXBuildFile; fileRef = E44A8E6A1805C3E0009FFDB6 /* voucher.c */; }; + C00B0DFD1C5AEBBE000330B3 /* apply.c in Sources */ = {isa = PBXBuildFile; fileRef = 9676A0E00F3E755D00713ADB /* apply.c */; }; + C00B0DFE1C5AEBBE000330B3 /* object.c in Sources */ = {isa = PBXBuildFile; fileRef = 9661E56A0F3E7DDF00749F3E /* object.c */; }; + C00B0DFF1C5AEBBE000330B3 /* benchmark.c in Sources */ = {isa = PBXBuildFile; fileRef = 965CD6340F3E806200D4E28D /* benchmark.c */; }; + C00B0E001C5AEBBE000330B3 /* source.c in Sources */ = {isa = PBXBuildFile; fileRef = 96A8AA860F41E7A400CD570B /* source.c */; }; + C00B0E011C5AEBBE000330B3 /* time.c in Sources */ = {isa = PBXBuildFile; fileRef = 96032E4A0F5CC8C700241C5F /* time.c */; }; + C00B0E021C5AEBBE000330B3 /* data.c in Sources */ = {isa = PBXBuildFile; fileRef = 5AAB45BF10D30B79004407EA /* data.c */; }; + C00B0E031C5AEBBE000330B3 /* io.c in Sources */ = {isa = PBXBuildFile; fileRef = 5A27262510F26F1900751FBC /* io.c */; }; + C00B0E041C5AEBBE000330B3 /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; }; + C00B0E051C5AEBBE000330B3 /* allocator.c in Sources */ = {isa = PBXBuildFile; fileRef = 2BBF5A62154B64F5002B20F9 /* allocator.c */; }; + C01866A61C5973210040FC07 /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; }; + C01866A71C5973210040FC07 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; + C01866A81C5973210040FC07 /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; }; + C01866A91C5973210040FC07 /* queue.c in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED8A0E8361E600161930 /* queue.c */; }; + C01866AA1C5973210040FC07 /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; + C01866AB1C5973210040FC07 /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; }; + C01866AC1C5973210040FC07 /* block.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E43A724F1AF85BBC00BAA921 /* block.cpp */; }; + C01866AD1C5973210040FC07 /* semaphore.c in Sources */ = {isa = PBXBuildFile; fileRef = 721F5CCE0F15553500FF03A6 /* semaphore.c */; }; + C01866AE1C5973210040FC07 /* firehose_reply.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72406A031AF95DF800DF4E2B /* firehose_reply.defs */; settings = {ATTRIBUTES = (Server, ); }; }; + C01866AF1C5973210040FC07 /* once.c in Sources */ = {isa = PBXBuildFile; fileRef = 96DF70BD0F38FE3C0074BD99 /* once.c */; }; + C01866B01C5973210040FC07 /* voucher.c in Sources */ = {isa = PBXBuildFile; fileRef = E44A8E6A1805C3E0009FFDB6 /* voucher.c */; }; + C01866B11C5973210040FC07 /* apply.c in Sources */ = {isa = PBXBuildFile; fileRef = 9676A0E00F3E755D00713ADB /* apply.c */; }; + C01866B21C5973210040FC07 /* object.c in Sources */ = {isa = PBXBuildFile; fileRef = 9661E56A0F3E7DDF00749F3E /* object.c */; }; + C01866B31C5973210040FC07 /* benchmark.c in Sources */ = {isa = PBXBuildFile; fileRef = 965CD6340F3E806200D4E28D /* benchmark.c */; }; + C01866B41C5973210040FC07 /* source.c in Sources */ = {isa = PBXBuildFile; fileRef = 96A8AA860F41E7A400CD570B /* source.c */; }; + C01866B51C5973210040FC07 /* time.c in Sources */ = {isa = PBXBuildFile; fileRef = 96032E4A0F5CC8C700241C5F /* time.c */; }; + C01866B61C5973210040FC07 /* data.c in Sources */ = {isa = PBXBuildFile; fileRef = 5AAB45BF10D30B79004407EA /* data.c */; }; + C01866B71C5973210040FC07 /* io.c in Sources */ = {isa = PBXBuildFile; fileRef = 5A27262510F26F1900751FBC /* io.c */; }; + C01866B81C5973210040FC07 /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; }; + C01866B91C5973210040FC07 /* allocator.c in Sources */ = {isa = PBXBuildFile; fileRef = 2BBF5A62154B64F5002B20F9 /* allocator.c */; }; + C90144651C73A8A3002638FC /* module.modulemap in Headers */ = {isa = PBXBuildFile; fileRef = C901445E1C73A7FE002638FC /* module.modulemap */; settings = {ATTRIBUTES = (Public, ); }; }; + C90144661C73A9F6002638FC /* module.modulemap in Headers */ = {isa = PBXBuildFile; fileRef = C90144641C73A845002638FC /* module.modulemap */; settings = {ATTRIBUTES = (Private, ); }; }; C913AC0F143BD34800B78976 /* data_private.h in Headers */ = {isa = PBXBuildFile; fileRef = C913AC0E143BD34800B78976 /* data_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; C93D6165143E190E00EB9023 /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; }; C93D6166143E190F00EB9023 /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; }; @@ -208,9 +334,6 @@ E49F24D2125D57FA0057C971 /* time.c in Sources */ = {isa = PBXBuildFile; fileRef = 96032E4A0F5CC8C700241C5F /* time.c */; }; E49F24D3125D57FA0057C971 /* data.c in Sources */ = {isa = PBXBuildFile; fileRef = 5AAB45BF10D30B79004407EA /* data.c */; }; E49F24D4125D57FA0057C971 /* io.c in Sources */ = {isa = PBXBuildFile; fileRef = 5A27262510F26F1900751FBC /* io.c */; }; - E4A2C9C5176019820000F809 /* atomic_llsc.h in Headers */ = {isa = PBXBuildFile; fileRef = E4A2C9C4176019760000F809 /* atomic_llsc.h */; }; - E4A2C9C6176019830000F809 /* atomic_llsc.h in Headers */ = {isa = PBXBuildFile; fileRef = E4A2C9C4176019760000F809 /* atomic_llsc.h */; }; - E4A2C9C7176019840000F809 /* atomic_llsc.h in Headers */ = {isa = PBXBuildFile; fileRef = E4A2C9C4176019760000F809 /* atomic_llsc.h */; }; E4B3C3FE18C50D000039F49F /* voucher_activity_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E4B3C3FD18C50D000039F49F /* voucher_activity_private.h */; }; E4B3C3FF18C50D0E0039F49F /* voucher_activity_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E4B3C3FD18C50D000039F49F /* voucher_activity_private.h */; }; E4B515BD164B2DA300E003AF /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; }; @@ -284,321 +407,6 @@ /* End PBXBuildFile section */ /* Begin PBXContainerItemProxy section */ - 455253A819B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = DF80F67E10B5C71600FAB5AE; - remoteInfo = dispatch_test; - }; - 455253AA19B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01C78108E68D400FAA873; - remoteInfo = dispatch_apply; - }; - 455253AC19B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4EB36CD1088F0B000C33AD4; - remoteInfo = dispatch_api; - }; - 455253AE19B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01CA7108E6C5000FAA873; - remoteInfo = dispatch_c99; - }; - 455253B019B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4C72A26115C3F65009F3CE1; - remoteInfo = dispatch_cf_main; - }; - 455253B219B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01CB9108E6C7200FAA873; - remoteInfo = dispatch_deadname; - }; - 455253B419B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01CC3108E6CC300FAA873; - remoteInfo = dispatch_debug; - }; - 455253B619B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01CCC108E6CD400FAA873; - remoteInfo = dispatch_group; - }; - 455253B819B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01CD5108E6CE300FAA873; - remoteInfo = dispatch_overcommit; - }; - 455253BA19B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01CDE108E6CF300FAA873; - remoteInfo = dispatch_pingpong; - }; - 455253BC19B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01CE7108E6D0500FAA873; - remoteInfo = dispatch_plusplus; - }; - 455253BE19B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01CF0108E6D2900FAA873; - remoteInfo = dispatch_priority; - }; - 455253C019B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01CF9108E6D3800FAA873; - remoteInfo = dispatch_priority2; - }; - 455253C219B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E46D768811D0365F00615518; - remoteInfo = dispatch_concur; - }; - 455253C419B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4324AAC12250F0800A3CAD5; - remoteInfo = dispatch_context_for_key; - }; - 455253C619B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01D02108E6D5600FAA873; - remoteInfo = dispatch_proc; - }; - 455253C819B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01D0B108E6D6000FAA873; - remoteInfo = dispatch_queue_finalizer; - }; - 455253CA19B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01D14108E6D7300FAA873; - remoteInfo = dispatch_read; - }; - 455253CC19B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01D1D108E6D8B00FAA873; - remoteInfo = dispatch_read2; - }; - 455253CE19B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01D26108E6D9A00FAA873; - remoteInfo = dispatch_after; - }; - 455253D019B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01D2F108E6DA700FAA873; - remoteInfo = dispatch_timer; - }; - 455253D219B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4CE9BC31151AB2A00D710C0; - remoteInfo = dispatch_timer_short; - }; - 455253D419B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = 5A2BA66D11D0369E0081FF89; - remoteInfo = dispatch_timer_timeout; - }; - 455253D619B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01D38108E6DB200FAA873; - remoteInfo = dispatch_suspend_timer; - }; - 455253D819B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01D41108E6DBF00FAA873; - remoteInfo = dispatch_sema; - }; - 455253DA19B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01D53108E6DDC00FAA873; - remoteInfo = dispatch_timer_bit31; - }; - 455253DC19B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01D5C108E6E0400FAA873; - remoteInfo = dispatch_timer_bit63; - }; - 455253DE19B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01D74108E6E4B00FAA873; - remoteInfo = dispatch_timer_set_time; - }; - 455253E019B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01D7D108E6E6600FAA873; - remoteInfo = dispatch_drift; - }; - 455253E219B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01D86108E6E7200FAA873; - remoteInfo = dispatch_starfish; - }; - 455253E419B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01D8F108E6E7E00FAA873; - remoteInfo = dispatch_cascade; - }; - 455253E619B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01D98108E6E9500FAA873; - remoteInfo = dispatch_readsync; - }; - 455253E819B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4E24A0710E0020B00C3C692; - remoteInfo = dispatch_sync_on_main; - }; - 455253EA19B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4E24A1810E0021C00C3C692; - remoteInfo = dispatch_sync_gc; - }; - 455253EC19B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4E24C3210E01DF800C3C692; - remoteInfo = dispatch_apply_gc; - }; - 455253EE19B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = 5AAB464A10D330C5004407EA; - remoteInfo = dispatch_data; - }; - 455253F019B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = 5A11B20E10DB124C000FAD7A; - remoteInfo = dispatch_io; - }; - 455253F219B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = 5AA78BAB114821D0009A233B; - remoteInfo = dispatch_io_net; - }; - 455253F419B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = 5AF00EF51135FA1300CA14CE; - remoteInfo = dispatch_vm; - }; - 455253F619B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4E33EB6121C9C9400F4B71C; - remoteInfo = dispatch_vnode; - }; - 455253F819B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = C9E804AF1963EC5F00C2B970; - remoteInfo = dispatch_qos; - }; - 455253FA19B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = C9B1FF84113F458A00843414; - remoteInfo = dispatch_select; - }; - 455253FC19B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = C985912B143D584100718FE3; - remoteInfo = dispatch_transform; - }; - 455253FE19B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01DA1108E6EE000FAA873; - remoteInfo = nsoperation; - }; - 4552540019B1384900B88766 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01CB0108E6C6300FAA873; - remoteInfo = cffd; - }; 4552540219B1384900B88766 /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; @@ -627,13 +435,55 @@ remoteGlobalIDString = E454824F16C1F0FE0042EC2D; remoteInfo = apply_bench; }; - 4552540E19B138B700B88766 /* PBXContainerItemProxy */ = { + 6E2ECB011C49C31200A30A32 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 6E040C621C499B1B00411A2E; + remoteInfo = libfirehose_kernel; + }; + 6EF0B27D1BA8C5BF007FA4F6 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 6EB4E4081BA8BCAD00D7B9D2; + remoteInfo = libfirehose_server; + }; + 92CBD7591BED926C006E0892 /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; proxyType = 1; remoteGlobalIDString = E4D01DC5108E708E00FAA873; remoteInfo = all; }; + 92F3FECE1BEC6F1000025962 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 92F3FECA1BEC69E500025962; + remoteInfo = darwintests; + }; + C00B0E101C5AEBBE000330B3 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = E4D01CB9108E6C7200FAA873; + remoteInfo = dispatch_deadname; + }; + C00B0E131C5AEED6000330B3 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = C00B0DF01C5AEBBE000330B3; + remoteInfo = "libdispatch dyld stub"; + }; + C01866C11C597AEA0040FC07 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = C01866A41C5973210040FC07; + remoteInfo = "libdispatch static"; + }; C927F36610FD7F1000C5AB8B /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; containerPortal = C927F35F10FD7F1000C5AB8B /* ddt.xcodeproj */; @@ -685,6 +535,20 @@ }; /* End PBXContainerItemProxy section */ +/* Begin PBXCopyFilesBuildPhase section */ + 6EA283D61CAB933E0041B2E0 /* Copy Trace Definitions */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 8; + dstPath = "$(INSTALL_PATH_PREFIX)/usr/local/share/misc"; + dstSubfolderSpec = 0; + files = ( + 6EA283D71CAB93920041B2E0 /* libdispatch.codes in Copy Trace Definitions */, + ); + name = "Copy Trace Definitions"; + runOnlyForDeploymentPostprocessing = 1; + }; +/* End PBXCopyFilesBuildPhase section */ + /* Begin PBXFileReference section */ 2BBF5A5F154B64D8002B20F9 /* allocator_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = allocator_internal.h; sourceTree = ""; }; 2BBF5A62154B64F5002B20F9 /* allocator.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = allocator.c; sourceTree = ""; }; @@ -696,12 +560,84 @@ 5AAB45BF10D30B79004407EA /* data.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = data.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; 5AAB45C310D30CC7004407EA /* io.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = io.h; sourceTree = ""; tabWidth = 8; }; 5AAB45C510D30D0C004407EA /* data.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data.h; sourceTree = ""; tabWidth = 8; }; + 6E040C631C499B1B00411A2E /* libfirehose_kernel.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libfirehose_kernel.a; sourceTree = BUILT_PRODUCTS_DIR; }; + 6E040C721C499C3600411A2E /* libfirehose_kernel.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = libfirehose_kernel.xcconfig; sourceTree = ""; }; + 6E1612691C79606E006FC9A9 /* dispatch_queue_label.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_queue_label.c; sourceTree = ""; }; + 6E21F2E41BBB23F00000C6A5 /* firehose_server_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = firehose_server_internal.h; sourceTree = ""; }; + 6E21F2E51BBB23F00000C6A5 /* firehose_server.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = firehose_server.c; sourceTree = ""; }; + 6E326A8F1C2245C4002A6505 /* dispatch_transform.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_transform.c; sourceTree = ""; }; + 6E326AB11C224830002A6505 /* dispatch_cascade.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_cascade.c; sourceTree = ""; }; + 6E326AB31C224870002A6505 /* dispatch_qos.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_qos.c; sourceTree = ""; }; + 6E326AB51C225477002A6505 /* dispatch_proc.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_proc.c; sourceTree = ""; }; + 6E326AB71C225FCA002A6505 /* dispatch_vnode.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_vnode.c; sourceTree = ""; }; + 6E326AB91C229866002A6505 /* dispatch_read.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_read.c; sourceTree = ""; }; + 6E326ABB1C229895002A6505 /* dispatch_read2.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_read2.c; sourceTree = ""; }; + 6E326ABD1C22A577002A6505 /* dispatch_io_net.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_io_net.c; sourceTree = ""; }; + 6E326ABE1C22A577002A6505 /* dispatch_io.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_io.c; sourceTree = ""; }; + 6E326AD81C233209002A6505 /* dispatch_sync_gc.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = dispatch_sync_gc.m; sourceTree = ""; }; + 6E326AD91C233209002A6505 /* dispatch_sync_on_main.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_sync_on_main.c; sourceTree = ""; }; + 6E326ADC1C234396002A6505 /* dispatch_readsync.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_readsync.c; sourceTree = ""; }; + 6E326ADE1C23451A002A6505 /* dispatch_concur.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_concur.c; sourceTree = ""; }; + 6E326AE01C234780002A6505 /* dispatch_starfish.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_starfish.c; sourceTree = ""; }; + 6E326AE61C2392E8002A6505 /* dispatch_timer.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_timer.c; sourceTree = ""; }; + 6E326AEF1C239303002A6505 /* dispatch_context_for_key.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_context_for_key.c; sourceTree = ""; }; + 6E326B121C239431002A6505 /* dispatch_suspend_timer.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_suspend_timer.c; sourceTree = ""; }; + 6E326B131C239431002A6505 /* dispatch_timer_bit.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_timer_bit.c; sourceTree = ""; }; + 6E326B151C239431002A6505 /* dispatch_timer_set_time.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_timer_set_time.c; sourceTree = ""; }; + 6E326B161C239431002A6505 /* dispatch_timer_short.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_timer_short.c; sourceTree = ""; }; + 6E326B171C239431002A6505 /* dispatch_timer_timeout.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_timer_timeout.c; sourceTree = ""; }; + 6E326B441C239B61002A6505 /* dispatch_priority.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_priority.c; sourceTree = ""; }; + 6E4130C91B431697001A152D /* backward-compat.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "backward-compat.xcconfig"; sourceTree = ""; }; + 6E4FC9D11C84123600520351 /* os_venture_basic.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = os_venture_basic.c; sourceTree = ""; }; + 6E62B0531C55806200D2C7C0 /* dispatch_trysync.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_trysync.c; sourceTree = ""; }; + 6E67D8D31C16C20B00FC98AC /* dispatch_apply.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_apply.c; sourceTree = ""; }; + 6E67D8D91C16C94B00FC98AC /* dispatch_cf_main.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_cf_main.c; sourceTree = ""; }; + 6E67D90D1C16CCEB00FC98AC /* dispatch_debug.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_debug.c; sourceTree = ""; }; + 6E67D90F1C16CF0B00FC98AC /* dispatch_group.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_group.c; sourceTree = ""; }; + 6E67D9111C17669C00FC98AC /* dispatch_queue_finalizer.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_queue_finalizer.c; sourceTree = ""; }; + 6E67D9131C17676D00FC98AC /* dispatch_overcommit.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_overcommit.c; sourceTree = ""; }; + 6E67D9151C1768B300FC98AC /* dispatch_pingpong.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_pingpong.c; sourceTree = ""; }; + 6E67D9171C17BA7200FC98AC /* nsoperation.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = nsoperation.m; sourceTree = ""; }; + 6E8E4E6D1C1A35EE0004F5CC /* dispatch_select.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_select.c; sourceTree = ""; }; + 6E8E4E6E1C1A35EE0004F5CC /* test_lib.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = test_lib.c; sourceTree = ""; }; + 6E8E4E6F1C1A35EE0004F5CC /* test_lib.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = test_lib.h; sourceTree = ""; }; + 6E8E4E9B1C1A4EF10004F5CC /* dispatch_sema.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_sema.c; sourceTree = ""; }; + 6E8E4EC31C1A57760004F5CC /* dispatch_after.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_after.c; sourceTree = ""; }; + 6E8E4EC51C1A5D450004F5CC /* cf_file_descriptor.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = cf_file_descriptor.c; sourceTree = ""; }; + 6E8E4EC71C1A61680004F5CC /* dispatch_data.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = dispatch_data.m; sourceTree = ""; }; + 6E8E4EC91C1A670B0004F5CC /* dispatch_vm.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_vm.c; sourceTree = ""; }; + 6E8E4ECB1C1A72650004F5CC /* dispatch_drift.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_drift.c; sourceTree = ""; }; + 6E9926711D01295F000CB89A /* dispatch_block.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_block.c; sourceTree = ""; }; + 6E9955571C3AF7710071D40C /* venture_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = venture_private.h; sourceTree = ""; }; + 6E9955CE1C3B218E0071D40C /* venture.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = venture.c; sourceTree = ""; }; + 6E9956061C3B21AA0071D40C /* venture_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = venture_internal.h; sourceTree = ""; }; + 6E9B6B201BB4CC73009E324D /* firehose_buffer_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = firehose_buffer_internal.h; sourceTree = ""; }; + 6EA283D01CAB93270041B2E0 /* libdispatch.codes */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.codes; sourceTree = ""; }; + 6EA2CB841C005DEF0076794A /* dispatch_source.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_source.c; sourceTree = ""; }; + 6EB4E4091BA8BCAD00D7B9D2 /* libfirehose_server.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libfirehose_server.a; sourceTree = BUILT_PRODUCTS_DIR; }; + 6EB4E4421BA8BD7800D7B9D2 /* libfirehose.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = libfirehose.xcconfig; sourceTree = ""; }; + 6EB60D291BBB19640092FA94 /* firehose_inline_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = firehose_inline_internal.h; sourceTree = ""; }; + 6EDB888D1CB73BDC006776D6 /* dispatch_kevent_cancel_races.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_kevent_cancel_races.c; sourceTree = ""; }; + 6EDF10831BBB487E007F14BF /* firehose_buffer_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = firehose_buffer_private.h; sourceTree = ""; }; + 6EE89F3D1BFAF5B000EB140D /* dispatch_state_machine.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_state_machine.c; sourceTree = ""; }; + 6EF0B26A1BA8C4AE007FA4F6 /* firehose_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = firehose_internal.h; sourceTree = ""; }; + 6EF2CAA41C88998A001ABE83 /* lock.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lock.h; sourceTree = ""; }; + 6EF2CAAB1C8899D5001ABE83 /* lock.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = lock.c; path = shims/lock.c; sourceTree = ""; }; 721F5C5C0F15520500FF03A6 /* semaphore.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = semaphore.h; sourceTree = ""; }; 721F5CCE0F15553500FF03A6 /* semaphore.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = semaphore.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; + 72406A031AF95DF800DF4E2B /* firehose_reply.defs */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.mig; path = firehose_reply.defs; sourceTree = ""; }; + 72406A391AF9926000DF4E2B /* firehose_types.defs */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.mig; path = firehose_types.defs; sourceTree = ""; }; 72B54F690EB169EB00DBECBA /* dispatch_source_create.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; path = dispatch_source_create.3; sourceTree = ""; }; 72CC940C0ECCD5720031B751 /* dispatch_object.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; path = dispatch_object.3; sourceTree = ""; }; 72CC940D0ECCD5720031B751 /* dispatch.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; path = dispatch.3; sourceTree = ""; }; 72CC942F0ECCD8750031B751 /* base.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = base.h; sourceTree = ""; }; + 72DEAA971AE181D300289540 /* firehose_buffer.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = firehose_buffer.c; sourceTree = ""; }; + 72DEAA9B1AE1B0BD00289540 /* firehose.defs */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.mig; path = firehose.defs; sourceTree = ""; }; + 72DEAA9D1AE1BB7300289540 /* firehose_server_object.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = firehose_server_object.m; sourceTree = ""; }; + 72EA3FBA1AF41EA400BBA227 /* firehose_server_private.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = firehose_server_private.h; sourceTree = ""; }; + 924D8EAA1C116B9F002AC2BC /* dispatch_c99.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_c99.c; sourceTree = ""; }; + 92F3FE8F1BEC686300025962 /* dispatch_api.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_api.c; sourceTree = ""; }; + 92F3FE921BEC686300025962 /* Makefile */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.make; path = Makefile; sourceTree = ""; }; 96032E4A0F5CC8C700241C5F /* time.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = time.c; sourceTree = ""; }; 96032E4C0F5CC8D100241C5F /* time.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = time.h; sourceTree = ""; }; 960F0E7D0F3FB232000D88BF /* dispatch_apply.3 */ = {isa = PBXFileReference; explicitFileType = text.man; fileEncoding = 4; path = dispatch_apply.3; sourceTree = ""; }; @@ -721,21 +657,27 @@ 96BC39BC0F3EBAB100C59689 /* queue_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = queue_private.h; sourceTree = ""; }; 96C9553A0F3EAEDD000D2CA4 /* once.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = once.h; sourceTree = ""; }; 96DF70BD0F38FE3C0074BD99 /* once.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = once.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; + C00B0E0A1C5AEBBE000330B3 /* libdispatch_dyld_stub.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch_dyld_stub.a; sourceTree = BUILT_PRODUCTS_DIR; }; + C00B0E121C5AEBF7000330B3 /* libdispatch-dyld-stub.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "libdispatch-dyld-stub.xcconfig"; sourceTree = ""; }; + C01866BD1C5973210040FC07 /* libdispatch.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch.a; sourceTree = BUILT_PRODUCTS_DIR; }; + C01866BE1C59735B0040FC07 /* libdispatch-mp-static.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = "libdispatch-mp-static.xcconfig"; sourceTree = ""; }; + C01866BF1C5976C90040FC07 /* run-on-install.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "run-on-install.sh"; sourceTree = ""; }; + C901445E1C73A7FE002638FC /* module.modulemap */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = "sourcecode.module-map"; path = module.modulemap; sourceTree = ""; }; + C90144641C73A845002638FC /* module.modulemap */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = "sourcecode.module-map"; path = module.modulemap; sourceTree = ""; }; C913AC0E143BD34800B78976 /* data_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data_private.h; sourceTree = ""; tabWidth = 8; }; C927F35F10FD7F1000C5AB8B /* ddt.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = ddt.xcodeproj; path = tools/ddt/ddt.xcodeproj; sourceTree = ""; }; + C96CE17A1CEB851600F4B8E6 /* dispatch_objc.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = dispatch_objc.m; sourceTree = ""; }; C9C5F80D143C1771006DC718 /* transform.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = transform.c; sourceTree = ""; }; D2AAC046055464E500DB518D /* libdispatch.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libdispatch.dylib; sourceTree = BUILT_PRODUCTS_DIR; }; E40041A9125D70590022B135 /* libdispatch-resolved.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "libdispatch-resolved.xcconfig"; sourceTree = ""; }; E40041AA125D705F0022B135 /* libdispatch-resolver.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "libdispatch-resolver.xcconfig"; sourceTree = ""; }; E4128ED513BA9A1700ABB2CB /* hw_config.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = hw_config.h; sourceTree = ""; }; - E416F53F175D04B800B23711 /* libdispatch_macosx.aliases */ = {isa = PBXFileReference; lastKnownFileType = text; path = libdispatch_macosx.aliases; sourceTree = ""; }; E420866F16027AE500EEE210 /* data.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = data.m; sourceTree = ""; }; E421E5F81716ADA10090DC9B /* introspection.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = introspection.h; sourceTree = ""; }; E421E5FB1716B8730090DC9B /* install-dtrace.sh */ = {isa = PBXFileReference; lastKnownFileType = text.script.sh; path = "install-dtrace.sh"; sourceTree = ""; }; E421E5FD1716BEA70090DC9B /* libdispatch.interposable */ = {isa = PBXFileReference; lastKnownFileType = text; path = libdispatch.interposable; sourceTree = ""; }; E422A0D412A557B5005E5BDB /* trace.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = trace.h; sourceTree = ""; }; E422DA3614D2A7E7003C6EE4 /* libdispatch.aliases */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.aliases; sourceTree = ""; }; - E422DA3714D2AE11003C6EE4 /* libdispatch.unexport */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.unexport; sourceTree = ""; }; E43570B8126E93380097AB9F /* provider.d */ = {isa = PBXFileReference; explicitFileType = sourcecode.dtrace; fileEncoding = 4; path = provider.d; sourceTree = ""; }; E43A724F1AF85BBC00BAA921 /* block.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = block.cpp; sourceTree = ""; }; E43D93F11097917E004F6A62 /* libdispatch.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = libdispatch.xcconfig; sourceTree = ""; }; @@ -751,7 +693,7 @@ E454569214746F1B00106147 /* object_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = object_private.h; sourceTree = ""; }; E463024F1761603C00E11F4C /* atomic_sfb.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = atomic_sfb.h; sourceTree = ""; }; E46DBC5714EE10C80001F9F6 /* libdispatch.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch.a; sourceTree = BUILT_PRODUCTS_DIR; }; - E46DBC5814EE11BC0001F9F6 /* libdispatch-static.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "libdispatch-static.xcconfig"; sourceTree = ""; }; + E46DBC5814EE11BC0001F9F6 /* libdispatch-up-static.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "libdispatch-up-static.xcconfig"; sourceTree = ""; }; E47D6BB5125F0F800070D91C /* resolved.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = resolved.h; sourceTree = ""; }; E482F1CD12DBAB590030614D /* postprocess-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "postprocess-headers.sh"; sourceTree = ""; }; E48AF55916E70FD9004105FF /* io_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = io_private.h; path = private/io_private.h; sourceTree = SOURCE_ROOT; tabWidth = 8; }; @@ -759,7 +701,6 @@ E49F24DF125D57FA0057C971 /* libdispatch.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libdispatch.dylib; sourceTree = BUILT_PRODUCTS_DIR; }; E49F251D125D630A0057C971 /* install-manpages.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "install-manpages.sh"; sourceTree = ""; }; E49F251E125D631D0057C971 /* mig-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "mig-headers.sh"; sourceTree = ""; }; - E4A2C9C4176019760000F809 /* atomic_llsc.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = atomic_llsc.h; sourceTree = ""; }; E4B2D42D17A7F0F90034A18F /* libdispatch-resolver_iphoneos.order */ = {isa = PBXFileReference; lastKnownFileType = text; path = "libdispatch-resolver_iphoneos.order"; sourceTree = ""; }; E4B2D42E17A7F0F90034A18F /* libdispatch_iphoneos.order */ = {isa = PBXFileReference; lastKnownFileType = text; path = libdispatch_iphoneos.order; sourceTree = ""; }; E4B3C3FD18C50D000039F49F /* voucher_activity_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = voucher_activity_private.h; sourceTree = ""; }; @@ -774,7 +715,6 @@ E4BA743913A8911B0095BDF1 /* getprogname.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = getprogname.h; sourceTree = ""; }; E4C1ED6E1263E714000D3C8B /* data_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data_internal.h; sourceTree = ""; }; E4D76A9218E325D200B1F98B /* block.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = block.h; sourceTree = ""; }; - E4DC8D45191053EE0005C6F4 /* libdispatch_objc.aliases */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch_objc.aliases; sourceTree = ""; }; E4EB4A2614C35ECE00AA0FA9 /* object.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = object.h; sourceTree = ""; }; E4EB4A2A14C36F4E00AA0FA9 /* install-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "install-headers.sh"; sourceTree = ""; }; E4EC11C312514302000DDBD1 /* libdispatch_up.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch_up.a; sourceTree = BUILT_PRODUCTS_DIR; }; @@ -804,7 +744,7 @@ /* End PBXFileReference section */ /* Begin PBXFrameworksBuildPhase section */ - D289987405E68DCB004EDB86 /* Frameworks */ = { + 6E040C601C499B1B00411A2E /* Frameworks */ = { isa = PBXFrameworksBuildPhase; buildActionMask = 2147483647; files = ( @@ -825,11 +765,15 @@ isa = PBXGroup; children = ( E44DB71E11D2FF080074F2AD /* Build Support */, - E4EB4A2914C35F1800AA0FA9 /* OS Object */, - FC7BEDAA0E83625200161930 /* Public Headers */, - FC7BEDAF0E83626100161930 /* Private Headers */, - FC7BEDB60E8363DC00161930 /* Project Headers */, - 08FB7795FE84155DC02AAC07 /* Source */, + 6E9B6AE21BB39793009E324D /* OS Public Headers */, + E4EB4A2914C35F1800AA0FA9 /* OS Private Headers */, + FC7BEDAA0E83625200161930 /* Dispatch Public Headers */, + FC7BEDAF0E83626100161930 /* Dispatch Private Headers */, + FC7BEDB60E8363DC00161930 /* Dispatch Project Headers */, + 08FB7795FE84155DC02AAC07 /* Dispatch Source */, + 6EF0B2661BA8C43D007FA4F6 /* Firehose Project Headers */, + 6EF0B2641BA8C3A0007FA4F6 /* Firehose Source */, + 92F3FEC91BEC687200025962 /* Darwin Tests */, C6A0FF2B0290797F04C91782 /* Documentation */, 1AB674ADFE9D54B511CA2CBB /* Products */, C927F35F10FD7F1000C5AB8B /* ddt.xcodeproj */, @@ -841,7 +785,7 @@ tabWidth = 4; usesTabs = 1; }; - 08FB7795FE84155DC02AAC07 /* Source */ = { + 08FB7795FE84155DC02AAC07 /* Dispatch Source */ = { isa = PBXGroup; children = ( 2BBF5A62154B64F5002B20F9 /* allocator.c */, @@ -853,6 +797,7 @@ E44EBE3B1251659900645D88 /* init.c */, E4B515DC164B32E000E003AF /* introspection.c */, 5A27262510F26F1900751FBC /* io.c */, + 6EF2CAAB1C8899D5001ABE83 /* lock.c */, 9661E56A0F3E7DDF00749F3E /* object.c */, E4FC3263145F46C9002FBDDB /* object.m */, 96DF70BD0F38FE3C0074BD99 /* once.c */, @@ -861,11 +806,13 @@ 96A8AA860F41E7A400CD570B /* source.c */, 96032E4A0F5CC8C700241C5F /* time.c */, C9C5F80D143C1771006DC718 /* transform.c */, + 6E9955CE1C3B218E0071D40C /* venture.c */, E44A8E6A1805C3E0009FFDB6 /* voucher.c */, + 6EA283D01CAB93270041B2E0 /* libdispatch.codes */, FC7BED950E8361E600161930 /* protocol.defs */, E43570B8126E93380097AB9F /* provider.d */, ); - name = Source; + name = "Dispatch Source"; path = src; sourceTree = ""; }; @@ -878,6 +825,10 @@ E49F24DF125D57FA0057C971 /* libdispatch.dylib */, E46DBC5714EE10C80001F9F6 /* libdispatch.a */, E4B515D6164B2DA300E003AF /* libdispatch.dylib */, + 6EB4E4091BA8BCAD00D7B9D2 /* libfirehose_server.a */, + 6E040C631C499B1B00411A2E /* libfirehose_kernel.a */, + C01866BD1C5973210040FC07 /* libdispatch.a */, + C00B0E0A1C5AEBBE000330B3 /* libdispatch_dyld_stub.a */, ); name = Products; sourceTree = ""; @@ -885,59 +836,108 @@ 4552536F19B1384900B88766 /* Products */ = { isa = PBXGroup; children = ( - 455253A919B1384900B88766 /* libdispatch_test.a */, - 455253AB19B1384900B88766 /* dispatch_apply */, - 455253AD19B1384900B88766 /* dispatch_api */, - 455253AF19B1384900B88766 /* dispatch_c99 */, - 455253B119B1384900B88766 /* dispatch_cf_main */, - 455253B319B1384900B88766 /* dispatch_deadname */, - 455253B519B1384900B88766 /* dispatch_debug */, - 455253B719B1384900B88766 /* dispatch_group */, - 455253B919B1384900B88766 /* dispatch_overcommit */, - 455253BB19B1384900B88766 /* dispatch_pingpong */, - 455253BD19B1384900B88766 /* dispatch_plusplus */, - 455253BF19B1384900B88766 /* dispatch_priority */, - 455253C119B1384900B88766 /* dispatch_priority2 */, - 455253C319B1384900B88766 /* dispatch_concur */, - 455253C519B1384900B88766 /* dispatch_context_for_key */, - 455253C719B1384900B88766 /* dispatch_proc */, - 455253C919B1384900B88766 /* dispatch_queue_finalizer */, - 455253CB19B1384900B88766 /* dispatch_read */, - 455253CD19B1384900B88766 /* dispatch_read2 */, - 455253CF19B1384900B88766 /* dispatch_after */, - 455253D119B1384900B88766 /* dispatch_timer */, - 455253D319B1384900B88766 /* dispatch_timer_short */, - 455253D519B1384900B88766 /* dispatch_timer_timeout */, - 455253D719B1384900B88766 /* dispatch_suspend_timer */, - 455253D919B1384900B88766 /* dispatch_sema */, - 455253DB19B1384900B88766 /* dispatch_timer_bit31 */, - 455253DD19B1384900B88766 /* dispatch_timer_bit63 */, - 455253DF19B1384900B88766 /* dispatch_timer_set_time */, - 455253E119B1384900B88766 /* dispatch_drift */, - 455253E319B1384900B88766 /* dispatch_starfish */, - 455253E519B1384900B88766 /* dispatch_cascade */, - 455253E719B1384900B88766 /* dispatch_readsync */, - 455253E919B1384900B88766 /* dispatch_sync_on_main */, - 455253EB19B1384900B88766 /* dispatch_sync_gc */, - 455253ED19B1384900B88766 /* dispatch_apply_gc */, - 455253EF19B1384900B88766 /* dispatch_data */, - 455253F119B1384900B88766 /* dispatch_io */, - 455253F319B1384900B88766 /* dispatch_io_net */, - 455253F519B1384900B88766 /* dispatch_vm */, - 455253F719B1384900B88766 /* dispatch_vnode */, - 455253F919B1384900B88766 /* dispatch_qos */, - 455253FB19B1384900B88766 /* dispatch_select */, - 455253FD19B1384900B88766 /* dispatch_transform */, - 455253FF19B1384900B88766 /* nsoperation */, - 4552540119B1384900B88766 /* cffd */, 4552540319B1384900B88766 /* bench */, 4552540519B1384900B88766 /* jsgc_bench */, 4552540719B1384900B88766 /* async_bench */, 4552540919B1384900B88766 /* apply_bench */, + C00B0E111C5AEBBE000330B3 /* dispatch_deadname */, ); name = Products; sourceTree = ""; }; + 6E9B6AE21BB39793009E324D /* OS Public Headers */ = { + isa = PBXGroup; + children = ( + E4EB4A2614C35ECE00AA0FA9 /* object.h */, + ); + name = "OS Public Headers"; + path = os; + sourceTree = ""; + }; + 6EF0B2641BA8C3A0007FA4F6 /* Firehose Source */ = { + isa = PBXGroup; + children = ( + 72406A391AF9926000DF4E2B /* firehose_types.defs */, + 72DEAA9B1AE1B0BD00289540 /* firehose.defs */, + 72406A031AF95DF800DF4E2B /* firehose_reply.defs */, + 72DEAA971AE181D300289540 /* firehose_buffer.c */, + 6E21F2E51BBB23F00000C6A5 /* firehose_server.c */, + 72DEAA9D1AE1BB7300289540 /* firehose_server_object.m */, + ); + name = "Firehose Source"; + path = src/firehose; + sourceTree = ""; + }; + 6EF0B2661BA8C43D007FA4F6 /* Firehose Project Headers */ = { + isa = PBXGroup; + children = ( + 6EF0B26A1BA8C4AE007FA4F6 /* firehose_internal.h */, + 6EB60D291BBB19640092FA94 /* firehose_inline_internal.h */, + 6E9B6B201BB4CC73009E324D /* firehose_buffer_internal.h */, + 6E21F2E41BBB23F00000C6A5 /* firehose_server_internal.h */, + ); + name = "Firehose Project Headers"; + path = src/firehose; + sourceTree = ""; + }; + 92F3FEC91BEC687200025962 /* Darwin Tests */ = { + isa = PBXGroup; + children = ( + 6E8E4EC51C1A5D450004F5CC /* cf_file_descriptor.c */, + 6E8E4EC31C1A57760004F5CC /* dispatch_after.c */, + 92F3FE8F1BEC686300025962 /* dispatch_api.c */, + 6E67D8D31C16C20B00FC98AC /* dispatch_apply.c */, + 6E9926711D01295F000CB89A /* dispatch_block.c */, + 924D8EAA1C116B9F002AC2BC /* dispatch_c99.c */, + 6E326AB11C224830002A6505 /* dispatch_cascade.c */, + 6E67D8D91C16C94B00FC98AC /* dispatch_cf_main.c */, + 6E326ADE1C23451A002A6505 /* dispatch_concur.c */, + 6E326AEF1C239303002A6505 /* dispatch_context_for_key.c */, + 6E8E4EC71C1A61680004F5CC /* dispatch_data.m */, + 6E67D90D1C16CCEB00FC98AC /* dispatch_debug.c */, + 6E8E4ECB1C1A72650004F5CC /* dispatch_drift.c */, + 6E67D90F1C16CF0B00FC98AC /* dispatch_group.c */, + 6EDB888D1CB73BDC006776D6 /* dispatch_kevent_cancel_races.c */, + 6E326ABD1C22A577002A6505 /* dispatch_io_net.c */, + 6E326ABE1C22A577002A6505 /* dispatch_io.c */, + C96CE17A1CEB851600F4B8E6 /* dispatch_objc.m */, + 6E67D9131C17676D00FC98AC /* dispatch_overcommit.c */, + 6E67D9151C1768B300FC98AC /* dispatch_pingpong.c */, + 6E326B441C239B61002A6505 /* dispatch_priority.c */, + 6E326AB51C225477002A6505 /* dispatch_proc.c */, + 6E326AB31C224870002A6505 /* dispatch_qos.c */, + 6E67D9111C17669C00FC98AC /* dispatch_queue_finalizer.c */, + 6E1612691C79606E006FC9A9 /* dispatch_queue_label.c */, + 6E326AB91C229866002A6505 /* dispatch_read.c */, + 6E326ABB1C229895002A6505 /* dispatch_read2.c */, + 6E326ADC1C234396002A6505 /* dispatch_readsync.c */, + 6E8E4E6D1C1A35EE0004F5CC /* dispatch_select.c */, + 6E8E4E9B1C1A4EF10004F5CC /* dispatch_sema.c */, + 6EA2CB841C005DEF0076794A /* dispatch_source.c */, + 6E326AE01C234780002A6505 /* dispatch_starfish.c */, + 6EE89F3D1BFAF5B000EB140D /* dispatch_state_machine.c */, + 6E326B121C239431002A6505 /* dispatch_suspend_timer.c */, + 6E326AD81C233209002A6505 /* dispatch_sync_gc.m */, + 6E326AD91C233209002A6505 /* dispatch_sync_on_main.c */, + 6E326B131C239431002A6505 /* dispatch_timer_bit.c */, + 6E326B151C239431002A6505 /* dispatch_timer_set_time.c */, + 6E326B161C239431002A6505 /* dispatch_timer_short.c */, + 6E326B171C239431002A6505 /* dispatch_timer_timeout.c */, + 6E326AE61C2392E8002A6505 /* dispatch_timer.c */, + 6E326A8F1C2245C4002A6505 /* dispatch_transform.c */, + 6E62B0531C55806200D2C7C0 /* dispatch_trysync.c */, + 6E8E4EC91C1A670B0004F5CC /* dispatch_vm.c */, + 6E326AB71C225FCA002A6505 /* dispatch_vnode.c */, + 6E67D9171C17BA7200FC98AC /* nsoperation.m */, + 6E4FC9D11C84123600520351 /* os_venture_basic.c */, + 92F3FE921BEC686300025962 /* Makefile */, + 6E8E4E6E1C1A35EE0004F5CC /* test_lib.c */, + 6E8E4E6F1C1A35EE0004F5CC /* test_lib.h */, + ); + name = "Darwin Tests"; + path = tests; + sourceTree = ""; + }; C6A0FF2B0290797F04C91782 /* Documentation */ = { isa = PBXGroup; children = ( @@ -974,18 +974,20 @@ E40041E4125E71150022B135 /* xcodeconfig */ = { isa = PBXGroup; children = ( + 6E4130C91B431697001A152D /* backward-compat.xcconfig */, E43D93F11097917E004F6A62 /* libdispatch.xcconfig */, E40041AA125D705F0022B135 /* libdispatch-resolver.xcconfig */, E40041A9125D70590022B135 /* libdispatch-resolved.xcconfig */, - E46DBC5814EE11BC0001F9F6 /* libdispatch-static.xcconfig */, + E46DBC5814EE11BC0001F9F6 /* libdispatch-up-static.xcconfig */, + C01866BE1C59735B0040FC07 /* libdispatch-mp-static.xcconfig */, + C00B0E121C5AEBF7000330B3 /* libdispatch-dyld-stub.xcconfig */, E4B515D9164B2E9B00E003AF /* libdispatch-introspection.xcconfig */, + 6EB4E4421BA8BD7800D7B9D2 /* libfirehose.xcconfig */, + 6E040C721C499C3600411A2E /* libfirehose_kernel.xcconfig */, E422DA3614D2A7E7003C6EE4 /* libdispatch.aliases */, - E4DC8D45191053EE0005C6F4 /* libdispatch_objc.aliases */, - E416F53F175D04B800B23711 /* libdispatch_macosx.aliases */, E448727914C6215D00BB45C2 /* libdispatch.order */, E4B2D42E17A7F0F90034A18F /* libdispatch_iphoneos.order */, E4B2D42D17A7F0F90034A18F /* libdispatch-resolver_iphoneos.order */, - E422DA3714D2AE11003C6EE4 /* libdispatch.unexport */, E421E5FD1716BEA70090DC9B /* libdispatch.interposable */, ); path = xcodeconfig; @@ -1020,6 +1022,7 @@ E421E5FB1716B8730090DC9B /* install-dtrace.sh */, E49F251E125D631D0057C971 /* mig-headers.sh */, E482F1CD12DBAB590030614D /* postprocess-headers.sh */, + C01866BF1C5976C90040FC07 /* run-on-install.sh */, ); path = xcodescripts; sourceTree = ""; @@ -1032,13 +1035,17 @@ path = config; sourceTree = ""; }; - E4EB4A2914C35F1800AA0FA9 /* OS Object */ = { + E4EB4A2914C35F1800AA0FA9 /* OS Private Headers */ = { isa = PBXGroup; children = ( - E4EB4A2614C35ECE00AA0FA9 /* object.h */, E454569214746F1B00106147 /* object_private.h */, + 6EDF10831BBB487E007F14BF /* firehose_buffer_private.h */, + 72EA3FBA1AF41EA400BBA227 /* firehose_server_private.h */, + 6E9955571C3AF7710071D40C /* venture_private.h */, + E44A8E711805C473009FFDB6 /* voucher_private.h */, + E4B3C3FD18C50D000039F49F /* voucher_activity_private.h */, ); - name = "OS Object"; + name = "OS Private Headers"; path = os; sourceTree = ""; }; @@ -1046,10 +1053,10 @@ isa = PBXGroup; children = ( 96929D820F3EA1020041FF5D /* atomic.h */, - E4A2C9C4176019760000F809 /* atomic_llsc.h */, E463024F1761603C00E11F4C /* atomic_sfb.h */, E4BA743913A8911B0095BDF1 /* getprogname.h */, E4128ED513BA9A1700ABB2CB /* hw_config.h */, + 6EF2CAA41C88998A001ABE83 /* lock.h */, FC1832A2109923C7003403D5 /* perfmon.h */, FC1832A3109923C7003403D5 /* time.h */, FC1832A4109923C7003403D5 /* tsd.h */, @@ -1058,7 +1065,7 @@ path = shims; sourceTree = ""; }; - FC7BEDAA0E83625200161930 /* Public Headers */ = { + FC7BEDAA0E83625200161930 /* Dispatch Public Headers */ = { isa = PBXGroup; children = ( 72CC942F0ECCD8750031B751 /* base.h */, @@ -1067,6 +1074,7 @@ FC7BED960E8361E600161930 /* dispatch.h */, FC5C9C1D0EADABE3006E462D /* group.h */, 5AAB45C310D30CC7004407EA /* io.h */, + C901445E1C73A7FE002638FC /* module.modulemap */, 961B994F0F3E85C30006BC96 /* object.h */, 96C9553A0F3EAEDD000D2CA4 /* once.h */, FC7BED8B0E8361E600161930 /* queue.h */, @@ -1075,11 +1083,11 @@ 96032E4C0F5CC8D100241C5F /* time.h */, E421E5F81716ADA10090DC9B /* introspection.h */, ); - name = "Public Headers"; + name = "Dispatch Public Headers"; path = dispatch; sourceTree = ""; }; - FC7BEDAF0E83626100161930 /* Private Headers */ = { + FC7BEDAF0E83626100161930 /* Dispatch Private Headers */ = { isa = PBXGroup; children = ( FC7BED930E8361E600161930 /* private.h */, @@ -1088,17 +1096,16 @@ 96BC39BC0F3EBAB100C59689 /* queue_private.h */, FCEF047F0F5661960067401F /* source_private.h */, E4ECBAA415253C25002C313C /* mach_private.h */, - E44A8E711805C473009FFDB6 /* voucher_private.h */, - E4B3C3FD18C50D000039F49F /* voucher_activity_private.h */, + C90144641C73A845002638FC /* module.modulemap */, 961B99350F3E83980006BC96 /* benchmark.h */, E4B515D7164B2DFB00E003AF /* introspection_private.h */, 2BE17C6318EA305E002CA4E8 /* layout_private.h */, ); - name = "Private Headers"; + name = "Dispatch Private Headers"; path = private; sourceTree = ""; }; - FC7BEDB60E8363DC00161930 /* Project Headers */ = { + FC7BEDB60E8363DC00161930 /* Dispatch Project Headers */ = { isa = PBXGroup; children = ( 2BBF5A5F154B64D8002B20F9 /* allocator_internal.h */, @@ -1110,43 +1117,68 @@ 96929D950F3EA2170041FF5D /* queue_internal.h */, 5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */, FC0B34780FA2851C0080FFA0 /* source_internal.h */, + 6E9956061C3B21AA0071D40C /* venture_internal.h */, E44A8E7418066276009FFDB6 /* voucher_internal.h */, E422A0D412A557B5005E5BDB /* trace.h */, E44F9DA816543F79001DCD38 /* introspection_internal.h */, 96929D830F3EA1020041FF5D /* shims.h */, FC1832A0109923B3003403D5 /* shims */, ); - name = "Project Headers"; + name = "Dispatch Project Headers"; path = src; sourceTree = ""; }; /* End PBXGroup section */ /* Begin PBXHeadersBuildPhase section */ - D2AAC043055464E500DB518D /* Headers */ = { + 6E040C611C499B1B00411A2E /* Headers */ = { isa = PBXHeadersBuildPhase; buildActionMask = 2147483647; files = ( - FC7BEDA50E8361E600161930 /* dispatch.h in Headers */, - 72CC94300ECCD8750031B751 /* base.h in Headers */, - 961B99500F3E85C30006BC96 /* object.h in Headers */, + 6E040C751C499CE600411A2E /* firehose_buffer_private.h in Headers */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 6EB4E4071BA8BCAD00D7B9D2 /* Headers */ = { + isa = PBXHeadersBuildPhase; + buildActionMask = 2147483647; + files = ( + 6EB60D2C1BBB197B0092FA94 /* firehose_inline_internal.h in Headers */, + 6EF0B2711BA8C540007FA4F6 /* firehose_internal.h in Headers */, + 6E9B6B5F1BB4F3C8009E324D /* firehose_buffer_internal.h in Headers */, + 6E21F2E81BBB23FA0000C6A5 /* firehose_server_internal.h in Headers */, + 6EF0B26D1BA8C527007FA4F6 /* firehose_server_private.h in Headers */, + 6EDF10B81BBB488A007F14BF /* firehose_buffer_private.h in Headers */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + D2AAC043055464E500DB518D /* Headers */ = { + isa = PBXHeadersBuildPhase; + buildActionMask = 2147483647; + files = ( + FC7BEDA50E8361E600161930 /* dispatch.h in Headers */, + 72CC94300ECCD8750031B751 /* base.h in Headers */, + 961B99500F3E85C30006BC96 /* object.h in Headers */, E44757DA17F4572600B82CA1 /* inline_internal.h in Headers */, FC7BED9A0E8361E600161930 /* queue.h in Headers */, FC7BED9C0E8361E600161930 /* source.h in Headers */, + 6E9955581C3AF7710071D40C /* venture_private.h in Headers */, E4B3C3FE18C50D000039F49F /* voucher_activity_private.h in Headers */, 721F5C5D0F15520500FF03A6 /* semaphore.h in Headers */, FC5C9C1E0EADABE3006E462D /* group.h in Headers */, 96C9553B0F3EAEDD000D2CA4 /* once.h in Headers */, 5AAB45C410D30CC7004407EA /* io.h in Headers */, E44A8E7518066276009FFDB6 /* voucher_internal.h in Headers */, + C90144651C73A8A3002638FC /* module.modulemap in Headers */, E4630253176162D400E11F4C /* atomic_sfb.h in Headers */, 5AAB45C610D30D0C004407EA /* data.h in Headers */, + 6E393F981BD60F8D005A551E /* firehose_internal.h in Headers */, 96032E4D0F5CC8D100241C5F /* time.h in Headers */, FC7BEDA20E8361E600161930 /* private.h in Headers */, E4D76A9318E325D200B1F98B /* block.h in Headers */, - E4A2C9C7176019840000F809 /* atomic_llsc.h in Headers */, C913AC0F143BD34800B78976 /* data_private.h in Headers */, 96BC39BD0F3EBAB100C59689 /* queue_private.h in Headers */, + C90144661C73A9F6002638FC /* module.modulemap in Headers */, FCEF04800F5661960067401F /* source_private.h in Headers */, 961B99360F3E83980006BC96 /* benchmark.h in Headers */, FC7BED9E0E8361E600161930 /* internal.h in Headers */, @@ -1161,12 +1193,16 @@ 96929D840F3EA1020041FF5D /* atomic.h in Headers */, 96929D850F3EA1020041FF5D /* shims.h in Headers */, FC1832A7109923C7003403D5 /* time.h in Headers */, + 6ED64B511BBD8A2100C35F4D /* firehose_buffer_internal.h in Headers */, E48EC97C1835BADD00EAC4F1 /* yield.h in Headers */, 2BE17C6418EA305E002CA4E8 /* layout_private.h in Headers */, FC1832A6109923C7003403D5 /* perfmon.h in Headers */, FC9C70E8105EC9620074F9CA /* config.h in Headers */, + 6E9956071C3B21AA0071D40C /* venture_internal.h in Headers */, + 6EF2CAA51C88998A001ABE83 /* lock.h in Headers */, E422A0D512A557B5005E5BDB /* trace.h in Headers */, E4BA743B13A8911B0095BDF1 /* getprogname.h in Headers */, + 6ED64B571BBD8A3B00C35F4D /* firehose_inline_internal.h in Headers */, E4128ED613BA9A1700ABB2CB /* hw_config.h in Headers */, E454569314746F1B00106147 /* object_private.h in Headers */, E4EB4A2714C35ECE00AA0FA9 /* object.h in Headers */, @@ -1187,6 +1223,7 @@ E44757DC17F4573600B82CA1 /* inline_internal.h in Headers */, E49F24AE125D57FA0057C971 /* queue.h in Headers */, E49F24AF125D57FA0057C971 /* source.h in Headers */, + 6E99558A1C3AF7900071D40C /* venture_private.h in Headers */, E4B3C3FF18C50D0E0039F49F /* voucher_activity_private.h in Headers */, E49F24B0125D57FA0057C971 /* semaphore.h in Headers */, E49F24B1125D57FA0057C971 /* group.h in Headers */, @@ -1198,7 +1235,6 @@ E49F24B5125D57FA0057C971 /* time.h in Headers */, E49F24B6125D57FA0057C971 /* private.h in Headers */, E4D76A9418E325D200B1F98B /* block.h in Headers */, - E4A2C9C6176019830000F809 /* atomic_llsc.h in Headers */, E49F24B7125D57FA0057C971 /* queue_private.h in Headers */, E49F24B8125D57FA0057C971 /* source_private.h in Headers */, E49F24B9125D57FA0057C971 /* benchmark.h in Headers */, @@ -1208,6 +1244,7 @@ E49F24BE125D57FA0057C971 /* source_internal.h in Headers */, E49F24BD125D57FA0057C971 /* semaphore_internal.h in Headers */, E4C1ED701263E714000D3C8B /* data_internal.h in Headers */, + 6ED64B501BBD8A1400C35F4D /* firehose_internal.h in Headers */, E49F24BF125D57FA0057C971 /* io_internal.h in Headers */, E44A8E731805C473009FFDB6 /* voucher_private.h in Headers */, E49F24C1125D57FA0057C971 /* tsd.h in Headers */, @@ -1215,12 +1252,16 @@ E49F24C3125D57FA0057C971 /* shims.h in Headers */, E49F24C4125D57FA0057C971 /* time.h in Headers */, E49F24C5125D57FA0057C971 /* perfmon.h in Headers */, + 6ED64B521BBD8A2100C35F4D /* firehose_buffer_internal.h in Headers */, E48EC97D1835BADD00EAC4F1 /* yield.h in Headers */, 2BE17C6518EA305E002CA4E8 /* layout_private.h in Headers */, E49F24C6125D57FA0057C971 /* config.h in Headers */, E422A0D612A557B5005E5BDB /* trace.h in Headers */, + 6E9956091C3B21B40071D40C /* venture_internal.h in Headers */, + 6EF2CAB41C889D65001ABE83 /* lock.h in Headers */, E4BA743C13A8911B0095BDF1 /* getprogname.h in Headers */, E4128ED713BA9A1700ABB2CB /* hw_config.h in Headers */, + 6ED64B581BBD8A3E00C35F4D /* firehose_inline_internal.h in Headers */, E454569414746F1B00106147 /* object_private.h in Headers */, E4EB4A2814C35ECE00AA0FA9 /* object.h in Headers */, E4ECBAA615253D17002C313C /* mach_private.h in Headers */, @@ -1240,19 +1281,23 @@ E421E5F91716ADA10090DC9B /* introspection.h in Headers */, E44F9DB216544032001DCD38 /* object_internal.h in Headers */, E44F9DB316544037001DCD38 /* queue_internal.h in Headers */, + 6ED64B531BBD8A2300C35F4D /* firehose_buffer_internal.h in Headers */, E44F9DB51654403F001DCD38 /* source_internal.h in Headers */, E44F9DB41654403B001DCD38 /* semaphore_internal.h in Headers */, E44F9DB01654402B001DCD38 /* data_internal.h in Headers */, + 6E9956081C3B21B30071D40C /* venture_internal.h in Headers */, E44F9DB11654402E001DCD38 /* io_internal.h in Headers */, E4630251176162D200E11F4C /* atomic_sfb.h in Headers */, E44F9DBE1654405B001DCD38 /* tsd.h in Headers */, E44F9DB816544053001DCD38 /* atomic.h in Headers */, + 6ED64B591BBD8A3F00C35F4D /* firehose_inline_internal.h in Headers */, + 6EF2CAB51C889D67001ABE83 /* lock.h in Headers */, E44757DB17F4573500B82CA1 /* inline_internal.h in Headers */, + 6ED64B4F1BBD8A1400C35F4D /* firehose_internal.h in Headers */, E44F9DB71654404F001DCD38 /* shims.h in Headers */, E44F9DBC1654405B001DCD38 /* perfmon.h in Headers */, E44F9DBF165440EF001DCD38 /* config.h in Headers */, E44A8E7718066276009FFDB6 /* voucher_internal.h in Headers */, - E4A2C9C5176019820000F809 /* atomic_llsc.h in Headers */, E44F9DB616544043001DCD38 /* trace.h in Headers */, E44F9DB916544056001DCD38 /* getprogname.h in Headers */, E48EC97E1835BADD00EAC4F1 /* yield.h in Headers */, @@ -1266,14 +1311,96 @@ }; /* End PBXHeadersBuildPhase section */ +/* Begin PBXLegacyTarget section */ + 92F3FECA1BEC69E500025962 /* darwintests */ = { + isa = PBXLegacyTarget; + buildArgumentsString = "$(ACTION)"; + buildConfigurationList = 92F3FECB1BEC69E500025962 /* Build configuration list for PBXLegacyTarget "darwintests" */; + buildPhases = ( + ); + buildToolPath = /usr/bin/make; + buildWorkingDirectory = tests/; + dependencies = ( + ); + name = darwintests; + passBuildSettingsInEnvironment = 1; + productName = darwintests; + }; +/* End PBXLegacyTarget section */ + /* Begin PBXNativeTarget section */ + 6E040C621C499B1B00411A2E /* libfirehose_kernel */ = { + isa = PBXNativeTarget; + buildConfigurationList = 6E040C6A1C499B1B00411A2E /* Build configuration list for PBXNativeTarget "libfirehose_kernel" */; + buildPhases = ( + 6E040C5F1C499B1B00411A2E /* Sources */, + 6E040C601C499B1B00411A2E /* Frameworks */, + 6E040C611C499B1B00411A2E /* Headers */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = libfirehose_kernel; + productName = libfirehose_kernel; + productReference = 6E040C631C499B1B00411A2E /* libfirehose_kernel.a */; + productType = "com.apple.product-type.library.static"; + }; + 6EB4E4081BA8BCAD00D7B9D2 /* libfirehose_server */ = { + isa = PBXNativeTarget; + buildConfigurationList = 6EB4E40A1BA8BCAD00D7B9D2 /* Build configuration list for PBXNativeTarget "libfirehose_server" */; + buildPhases = ( + 6EB4E4051BA8BCAD00D7B9D2 /* Sources */, + 6EB4E4071BA8BCAD00D7B9D2 /* Headers */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = libfirehose_server; + productName = libfirehose_server; + productReference = 6EB4E4091BA8BCAD00D7B9D2 /* libfirehose_server.a */; + productType = "com.apple.product-type.library.static"; + }; + C00B0DF01C5AEBBE000330B3 /* libdispatch dyld stub */ = { + isa = PBXNativeTarget; + buildConfigurationList = C00B0E071C5AEBBE000330B3 /* Build configuration list for PBXNativeTarget "libdispatch dyld stub" */; + buildPhases = ( + C00B0DF11C5AEBBE000330B3 /* Sources */, + C00B0E061C5AEBBE000330B3 /* Symlink libdispatch.a -> libdispatch_dyld_target.a */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = "libdispatch dyld stub"; + productName = libdispatch; + productReference = C00B0E0A1C5AEBBE000330B3 /* libdispatch_dyld_stub.a */; + productType = "com.apple.product-type.library.static"; + }; + C01866A41C5973210040FC07 /* libdispatch mp static */ = { + isa = PBXNativeTarget; + buildConfigurationList = C01866BA1C5973210040FC07 /* Build configuration list for PBXNativeTarget "libdispatch mp static" */; + buildPhases = ( + C01866A51C5973210040FC07 /* Sources */, + C01866C01C59777B0040FC07 /* Symlink to the loaderd path */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = "libdispatch mp static"; + productName = libdispatch; + productReference = C01866BD1C5973210040FC07 /* libdispatch.a */; + productType = "com.apple.product-type.library.static"; + }; D2AAC045055464E500DB518D /* libdispatch */ = { isa = PBXNativeTarget; buildConfigurationList = 1DEB91EB08733DB70010E9CD /* Build configuration list for PBXNativeTarget "libdispatch" */; buildPhases = ( D2AAC043055464E500DB518D /* Headers */, D2AAC044055464E500DB518D /* Sources */, - D289987405E68DCB004EDB86 /* Frameworks */, + 6EA283D61CAB933E0041B2E0 /* Copy Trace Definitions */, E4EB4A2B14C3720B00AA0FA9 /* Install Headers */, E482F1C512DBAA110030614D /* Postprocess Headers */, 4CED8B9D0EEDF8B600AF99AB /* Install Manpages */, @@ -1281,19 +1408,22 @@ buildRules = ( ); dependencies = ( + 6EF0B27E1BA8C5BF007FA4F6 /* PBXTargetDependency */, E47D6ECB125FEB9D0070D91C /* PBXTargetDependency */, E47D6ECD125FEBA10070D91C /* PBXTargetDependency */, E4B515DB164B317700E003AF /* PBXTargetDependency */, + C01866C21C597AEA0040FC07 /* PBXTargetDependency */, E437F0D614F7441F00F0B997 /* PBXTargetDependency */, + C00B0E141C5AEED6000330B3 /* PBXTargetDependency */, ); name = libdispatch; productName = libdispatch; productReference = D2AAC046055464E500DB518D /* libdispatch.dylib */; productType = "com.apple.product-type.library.dynamic"; }; - E46DBC1A14EE10C80001F9F6 /* libdispatch static */ = { + E46DBC1A14EE10C80001F9F6 /* libdispatch up static */ = { isa = PBXNativeTarget; - buildConfigurationList = E46DBC5414EE10C80001F9F6 /* Build configuration list for PBXNativeTarget "libdispatch static" */; + buildConfigurationList = E46DBC5414EE10C80001F9F6 /* Build configuration list for PBXNativeTarget "libdispatch up static" */; buildPhases = ( E46DBC3E14EE10C80001F9F6 /* Sources */, ); @@ -1301,7 +1431,7 @@ ); dependencies = ( ); - name = "libdispatch static"; + name = "libdispatch up static"; productName = libdispatch; productReference = E46DBC5714EE10C80001F9F6 /* libdispatch.a */; productType = "com.apple.product-type.library.static"; @@ -1384,7 +1514,61 @@ isa = PBXProject; attributes = { BuildIndependentTargetsInParallel = YES; - LastUpgradeCheck = 0700; + LastUpgradeCheck = 0800; + TargetAttributes = { + 3F3C9326128E637B0042B1F7 = { + ProvisioningStyle = Manual; + }; + 4552540A19B1389700B88766 = { + ProvisioningStyle = Manual; + }; + 6E040C621C499B1B00411A2E = { + CreatedOnToolsVersion = 7.3; + ProvisioningStyle = Manual; + }; + 6E2ECAFD1C49C2FF00A30A32 = { + CreatedOnToolsVersion = 7.3; + ProvisioningStyle = Manual; + }; + 6EB4E4081BA8BCAD00D7B9D2 = { + CreatedOnToolsVersion = 7.0; + ProvisioningStyle = Manual; + }; + 92CBD7201BED924F006E0892 = { + ProvisioningStyle = Manual; + }; + 92F3FECA1BEC69E500025962 = { + CreatedOnToolsVersion = 7.1; + ProvisioningStyle = Manual; + }; + C00B0DF01C5AEBBE000330B3 = { + ProvisioningStyle = Manual; + }; + C01866A41C5973210040FC07 = { + ProvisioningStyle = Manual; + }; + C927F35A10FD7F0600C5AB8B = { + ProvisioningStyle = Manual; + }; + D2AAC045055464E500DB518D = { + ProvisioningStyle = Manual; + }; + E46DBC1A14EE10C80001F9F6 = { + ProvisioningStyle = Manual; + }; + E49F24A9125D57FA0057C971 = { + ProvisioningStyle = Manual; + }; + E4B51595164B2DA300E003AF = { + ProvisioningStyle = Manual; + }; + E4EC118F12514302000DDBD1 = { + ProvisioningStyle = Manual; + }; + E4EC121612514715000DDBD1 = { + ProvisioningStyle = Manual; + }; + }; }; buildConfigurationList = 1DEB91EF08733DB70010E9CD /* Build configuration list for PBXProject "libdispatch" */; compatibilityVersion = "Xcode 3.2"; @@ -1415,330 +1599,22 @@ E4EC121612514715000DDBD1 /* libdispatch mp resolved */, E4EC118F12514302000DDBD1 /* libdispatch up resolved */, E4B51595164B2DA300E003AF /* libdispatch introspection */, - E46DBC1A14EE10C80001F9F6 /* libdispatch static */, + E46DBC1A14EE10C80001F9F6 /* libdispatch up static */, + C01866A41C5973210040FC07 /* libdispatch mp static */, + C00B0DF01C5AEBBE000330B3 /* libdispatch dyld stub */, 3F3C9326128E637B0042B1F7 /* libdispatch_Sim */, + 6E2ECAFD1C49C2FF00A30A32 /* libdispatch_kernel */, C927F35A10FD7F0600C5AB8B /* libdispatch_tools */, 4552540A19B1389700B88766 /* libdispatch_tests */, + 6E040C621C499B1B00411A2E /* libfirehose_kernel */, + 6EB4E4081BA8BCAD00D7B9D2 /* libfirehose_server */, + 92F3FECA1BEC69E500025962 /* darwintests */, + 92CBD7201BED924F006E0892 /* libdispatch_tests_legacy */, ); }; /* End PBXProject section */ /* Begin PBXReferenceProxy section */ - 455253A919B1384900B88766 /* libdispatch_test.a */ = { - isa = PBXReferenceProxy; - fileType = archive.ar; - path = libdispatch_test.a; - remoteRef = 455253A819B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253AB19B1384900B88766 /* dispatch_apply */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_apply; - remoteRef = 455253AA19B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253AD19B1384900B88766 /* dispatch_api */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_api; - remoteRef = 455253AC19B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253AF19B1384900B88766 /* dispatch_c99 */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_c99; - remoteRef = 455253AE19B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253B119B1384900B88766 /* dispatch_cf_main */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_cf_main; - remoteRef = 455253B019B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253B319B1384900B88766 /* dispatch_deadname */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_deadname; - remoteRef = 455253B219B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253B519B1384900B88766 /* dispatch_debug */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_debug; - remoteRef = 455253B419B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253B719B1384900B88766 /* dispatch_group */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_group; - remoteRef = 455253B619B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253B919B1384900B88766 /* dispatch_overcommit */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_overcommit; - remoteRef = 455253B819B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253BB19B1384900B88766 /* dispatch_pingpong */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_pingpong; - remoteRef = 455253BA19B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253BD19B1384900B88766 /* dispatch_plusplus */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_plusplus; - remoteRef = 455253BC19B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253BF19B1384900B88766 /* dispatch_priority */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_priority; - remoteRef = 455253BE19B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253C119B1384900B88766 /* dispatch_priority2 */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_priority2; - remoteRef = 455253C019B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253C319B1384900B88766 /* dispatch_concur */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_concur; - remoteRef = 455253C219B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253C519B1384900B88766 /* dispatch_context_for_key */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_context_for_key; - remoteRef = 455253C419B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253C719B1384900B88766 /* dispatch_proc */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_proc; - remoteRef = 455253C619B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253C919B1384900B88766 /* dispatch_queue_finalizer */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_queue_finalizer; - remoteRef = 455253C819B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253CB19B1384900B88766 /* dispatch_read */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_read; - remoteRef = 455253CA19B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253CD19B1384900B88766 /* dispatch_read2 */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_read2; - remoteRef = 455253CC19B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253CF19B1384900B88766 /* dispatch_after */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_after; - remoteRef = 455253CE19B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253D119B1384900B88766 /* dispatch_timer */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_timer; - remoteRef = 455253D019B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253D319B1384900B88766 /* dispatch_timer_short */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_timer_short; - remoteRef = 455253D219B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253D519B1384900B88766 /* dispatch_timer_timeout */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_timer_timeout; - remoteRef = 455253D419B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253D719B1384900B88766 /* dispatch_suspend_timer */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_suspend_timer; - remoteRef = 455253D619B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253D919B1384900B88766 /* dispatch_sema */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_sema; - remoteRef = 455253D819B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253DB19B1384900B88766 /* dispatch_timer_bit31 */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_timer_bit31; - remoteRef = 455253DA19B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253DD19B1384900B88766 /* dispatch_timer_bit63 */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_timer_bit63; - remoteRef = 455253DC19B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253DF19B1384900B88766 /* dispatch_timer_set_time */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_timer_set_time; - remoteRef = 455253DE19B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253E119B1384900B88766 /* dispatch_drift */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_drift; - remoteRef = 455253E019B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253E319B1384900B88766 /* dispatch_starfish */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_starfish; - remoteRef = 455253E219B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253E519B1384900B88766 /* dispatch_cascade */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_cascade; - remoteRef = 455253E419B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253E719B1384900B88766 /* dispatch_readsync */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_readsync; - remoteRef = 455253E619B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253E919B1384900B88766 /* dispatch_sync_on_main */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_sync_on_main; - remoteRef = 455253E819B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253EB19B1384900B88766 /* dispatch_sync_gc */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_sync_gc; - remoteRef = 455253EA19B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253ED19B1384900B88766 /* dispatch_apply_gc */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_apply_gc; - remoteRef = 455253EC19B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253EF19B1384900B88766 /* dispatch_data */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_data; - remoteRef = 455253EE19B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253F119B1384900B88766 /* dispatch_io */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_io; - remoteRef = 455253F019B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253F319B1384900B88766 /* dispatch_io_net */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_io_net; - remoteRef = 455253F219B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253F519B1384900B88766 /* dispatch_vm */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_vm; - remoteRef = 455253F419B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253F719B1384900B88766 /* dispatch_vnode */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_vnode; - remoteRef = 455253F619B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253F919B1384900B88766 /* dispatch_qos */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_qos; - remoteRef = 455253F819B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253FB19B1384900B88766 /* dispatch_select */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_select; - remoteRef = 455253FA19B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253FD19B1384900B88766 /* dispatch_transform */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_transform; - remoteRef = 455253FC19B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 455253FF19B1384900B88766 /* nsoperation */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = nsoperation; - remoteRef = 455253FE19B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; - 4552540119B1384900B88766 /* cffd */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = cffd; - remoteRef = 4552540019B1384900B88766 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; 4552540319B1384900B88766 /* bench */ = { isa = PBXReferenceProxy; fileType = "compiled.mach-o.executable"; @@ -1767,6 +1643,13 @@ remoteRef = 4552540819B1384900B88766 /* PBXContainerItemProxy */; sourceTree = BUILT_PRODUCTS_DIR; }; + C00B0E111C5AEBBE000330B3 /* dispatch_deadname */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = dispatch_deadname; + remoteRef = C00B0E101C5AEBBE000330B3 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; C927F36710FD7F1000C5AB8B /* ddt */ = { isa = PBXReferenceProxy; fileType = "compiled.mach-o.executable"; @@ -1793,6 +1676,40 @@ shellScript = ". \"${SCRIPT_INPUT_FILE_0}\""; showEnvVarsInLog = 0; }; + C00B0E061C5AEBBE000330B3 /* Symlink libdispatch.a -> libdispatch_dyld_target.a */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + "$(SRCROOT)/xcodescripts/run-on-install.sh", + ); + name = "Symlink libdispatch.a -> libdispatch_dyld_target.a"; + outputPaths = ( + "${DSTROOT}${INSTALL_PATH}/libdispatch.a", + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = "/bin/bash -e"; + shellScript = ". \"${SCRIPT_INPUT_FILE_0}\" /bin/ln -sf ${PRODUCT_NAME}.a ${SCRIPT_OUTPUT_FILE_0}"; + showEnvVarsInLog = 0; + }; + C01866C01C59777B0040FC07 /* Symlink to the loaderd path */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + "$(SRCROOT)/xcodescripts/run-on-install.sh", + ); + name = "Symlink to the loaderd path"; + outputPaths = ( + "${DSTROOT}/usr/local/lib/loaderd/${PRODUCT_NAME}.a", + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = "/bin/bash -e"; + shellScript = ". \"${SCRIPT_INPUT_FILE_0}\" /bin/ln -sf ../../../..${INSTALL_PATH}/${PRODUCT_NAME}.a ${DSTROOT}/usr/local/lib/loaderd/${PRODUCT_NAME}.a"; + showEnvVarsInLog = 0; + }; E4128EB213B9612700ABB2CB /* Postprocess Headers */ = { isa = PBXShellScriptBuildPhase; buildActionMask = 8; @@ -1868,13 +1785,15 @@ "$(SRCROOT)/xcodescripts/install-headers.sh", "$(SRCROOT)/os/object.h", "$(SRCROOT)/os/object_private.h", - "$(SRCROOT)/private/voucher_private.h", - "$(SRCROOT)/private/voucher_activity_private.h", + "$(SRCROOT)/os/venture_private.h", + "$(SRCROOT)/os/voucher_private.h", + "$(SRCROOT)/os/voucher_activity_private.h", ); name = "Install Headers"; outputPaths = ( "$(CONFIGURATION_BUILD_DIR)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/object.h", "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/object_private.h", + "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/venture_private.h", "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/voucher_private.h", "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/voucher_activity_private.h", ); @@ -1892,13 +1811,15 @@ "$(SRCROOT)/xcodescripts/install-headers.sh", "$(SRCROOT)/os/object.h", "$(SRCROOT)/os/object_private.h", - "$(SRCROOT)/private/voucher_private.h", - "$(SRCROOT)/private/voucher_activity_private.h", + "$(SRCROOT)/os/venture_private.h", + "$(SRCROOT)/os/voucher_private.h", + "$(SRCROOT)/os/voucher_activity_private.h", ); name = "Install Headers"; outputPaths = ( "$(CONFIGURATION_BUILD_DIR)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/object.h", "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/object_private.h", + "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/venture_private.h", "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/voucher_private.h", "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/voucher_activity_private.h", ); @@ -1930,16 +1851,22 @@ ); inputPaths = ( "$(SRCROOT)/src/protocol.defs", + "$(SRCROOT)/src/firehose/firehose.defs", + "$(SRCROOT)/src/firehose/firehose_reply.defs", "$(SRCROOT)/xcodescripts/mig-headers.sh", ); name = "Mig Headers"; outputPaths = ( "$(DERIVED_FILE_DIR)/protocol.h", "$(DERIVED_FILE_DIR)/protocolServer.h", + "$(DERIVED_FILE_DIR)/firehose.h", + "$(DERIVED_FILE_DIR)/firehoseServer.h", + "$(DERIVED_FILE_DIR)/firehose_reply.h", + "$(DERIVED_FILE_DIR)/firehose_replyServer.h", ); runOnlyForDeploymentPostprocessing = 0; shellPath = "/bin/bash -e"; - shellScript = ". \"${SCRIPT_INPUT_FILE_1}\""; + shellScript = ". \"${SCRIPT_INPUT_FILE_3}\""; showEnvVarsInLog = 0; }; E4EC121712514715000DDBD1 /* Mig Headers */ = { @@ -1949,16 +1876,22 @@ ); inputPaths = ( "$(SRCROOT)/src/protocol.defs", + "$(SRCROOT)/src/firehose/firehose.defs", + "$(SRCROOT)/src/firehose/firehose_reply.defs", "$(SRCROOT)/xcodescripts/mig-headers.sh", ); name = "Mig Headers"; outputPaths = ( "$(DERIVED_FILE_DIR)/protocol.h", "$(DERIVED_FILE_DIR)/protocolServer.h", + "$(DERIVED_FILE_DIR)/firehose.h", + "$(DERIVED_FILE_DIR)/firehoseServer.h", + "$(DERIVED_FILE_DIR)/firehose_reply.h", + "$(DERIVED_FILE_DIR)/firehose_replyServer.h", ); runOnlyForDeploymentPostprocessing = 0; shellPath = "/bin/bash -e"; - shellScript = ". \"${SCRIPT_INPUT_FILE_1}\""; + shellScript = ". \"${SCRIPT_INPUT_FILE_3}\""; showEnvVarsInLog = 0; }; E4EC122512514715000DDBD1 /* Symlink normal variant */ = { @@ -1980,16 +1913,96 @@ /* End PBXShellScriptBuildPhase section */ /* Begin PBXSourcesBuildPhase section */ + 6E040C5F1C499B1B00411A2E /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 6E040C731C499C6500411A2E /* firehose_buffer.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 6EB4E4051BA8BCAD00D7B9D2 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 6EF0B27A1BA8C57D007FA4F6 /* firehose_server_object.m in Sources */, + 6E90269C1BB9BD50004DC3AD /* firehose.defs in Sources */, + 6E21F2E91BBB240E0000C6A5 /* firehose_server.c in Sources */, + 6EF0B2781BA8C56E007FA4F6 /* firehose_reply.defs in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + C00B0DF11C5AEBBE000330B3 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + C00B0DF21C5AEBBE000330B3 /* protocol.defs in Sources */, + C00B0DF31C5AEBBE000330B3 /* resolver.c in Sources */, + 6EF2CAB31C8899ED001ABE83 /* lock.c in Sources */, + C00B0DF41C5AEBBE000330B3 /* init.c in Sources */, + C00B0DF51C5AEBBE000330B3 /* queue.c in Sources */, + C00B0DF61C5AEBBE000330B3 /* firehose_buffer.c in Sources */, + C00B0DF71C5AEBBE000330B3 /* firehose.defs in Sources */, + C00B0DF81C5AEBBE000330B3 /* block.cpp in Sources */, + C00B0DF91C5AEBBE000330B3 /* semaphore.c in Sources */, + C00B0DFA1C5AEBBE000330B3 /* firehose_reply.defs in Sources */, + C00B0DFB1C5AEBBE000330B3 /* once.c in Sources */, + C00B0DFC1C5AEBBE000330B3 /* voucher.c in Sources */, + C00B0DFD1C5AEBBE000330B3 /* apply.c in Sources */, + C00B0DFE1C5AEBBE000330B3 /* object.c in Sources */, + C00B0DFF1C5AEBBE000330B3 /* benchmark.c in Sources */, + C00B0E001C5AEBBE000330B3 /* source.c in Sources */, + C00B0E011C5AEBBE000330B3 /* time.c in Sources */, + C00B0E021C5AEBBE000330B3 /* data.c in Sources */, + C00B0E031C5AEBBE000330B3 /* io.c in Sources */, + C00B0E041C5AEBBE000330B3 /* transform.c in Sources */, + C00B0E051C5AEBBE000330B3 /* allocator.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + C01866A51C5973210040FC07 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + C01866A61C5973210040FC07 /* protocol.defs in Sources */, + C01866A71C5973210040FC07 /* resolver.c in Sources */, + 6EF2CAB21C8899EC001ABE83 /* lock.c in Sources */, + C01866A81C5973210040FC07 /* init.c in Sources */, + C01866A91C5973210040FC07 /* queue.c in Sources */, + C01866AA1C5973210040FC07 /* firehose_buffer.c in Sources */, + C01866AB1C5973210040FC07 /* firehose.defs in Sources */, + C01866AC1C5973210040FC07 /* block.cpp in Sources */, + C01866AD1C5973210040FC07 /* semaphore.c in Sources */, + C01866AE1C5973210040FC07 /* firehose_reply.defs in Sources */, + C01866AF1C5973210040FC07 /* once.c in Sources */, + C01866B01C5973210040FC07 /* voucher.c in Sources */, + C01866B11C5973210040FC07 /* apply.c in Sources */, + C01866B21C5973210040FC07 /* object.c in Sources */, + C01866B31C5973210040FC07 /* benchmark.c in Sources */, + C01866B41C5973210040FC07 /* source.c in Sources */, + C01866B51C5973210040FC07 /* time.c in Sources */, + C01866B61C5973210040FC07 /* data.c in Sources */, + C01866B71C5973210040FC07 /* io.c in Sources */, + C01866B81C5973210040FC07 /* transform.c in Sources */, + C01866B91C5973210040FC07 /* allocator.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; D2AAC044055464E500DB518D /* Sources */ = { isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( E43570B9126E93380097AB9F /* provider.d in Sources */, FC7BEDA40E8361E600161930 /* protocol.defs in Sources */, + 6E9955CF1C3B218E0071D40C /* venture.c in Sources */, + 6ED64B471BBD89AF00C35F4D /* firehose.defs in Sources */, + 6ED64B441BBD898700C35F4D /* firehose_buffer.c in Sources */, E49F2499125D48D80057C971 /* resolver.c in Sources */, E44EBE3E1251659900645D88 /* init.c in Sources */, FC7BED990E8361E600161930 /* queue.c in Sources */, 721F5CCF0F15553500FF03A6 /* semaphore.c in Sources */, + 6EF2CAAC1C8899D5001ABE83 /* lock.c in Sources */, + 6ED64B491BBD89BC00C35F4D /* firehose_reply.defs in Sources */, 96DF70BE0F38FE3C0074BD99 /* once.c in Sources */, 9676A0E10F3E755D00713ADB /* apply.c in Sources */, 9661E56B0F3E7DDF00749F3E /* object.c in Sources */, @@ -2013,10 +2026,15 @@ files = ( E46DBC4014EE10C80001F9F6 /* protocol.defs in Sources */, E46DBC4114EE10C80001F9F6 /* resolver.c in Sources */, + 6EF2CAB11C8899EC001ABE83 /* lock.c in Sources */, E46DBC4214EE10C80001F9F6 /* init.c in Sources */, E46DBC4314EE10C80001F9F6 /* queue.c in Sources */, + 6EE664271BE2FD5C00ED7B1C /* firehose_buffer.c in Sources */, + 6EBEC7E71BBDD30F009B1596 /* firehose.defs in Sources */, E43A72881AF85BE900BAA921 /* block.cpp in Sources */, E46DBC4414EE10C80001F9F6 /* semaphore.c in Sources */, + 6E9956011C3B21980071D40C /* venture.c in Sources */, + 6EBEC7EA1BBDD326009B1596 /* firehose_reply.defs in Sources */, E46DBC4514EE10C80001F9F6 /* once.c in Sources */, E44A8E701805C3E0009FFDB6 /* voucher.c in Sources */, E46DBC4614EE10C80001F9F6 /* apply.c in Sources */, @@ -2037,10 +2055,15 @@ files = ( E43570BA126E93380097AB9F /* provider.d in Sources */, E49F24C8125D57FA0057C971 /* protocol.defs in Sources */, + 6E9956051C3B219B0071D40C /* venture.c in Sources */, + 6ED64B461BBD89AF00C35F4D /* firehose.defs in Sources */, + 6ED64B401BBD898300C35F4D /* firehose_buffer.c in Sources */, E49F24C9125D57FA0057C971 /* resolver.c in Sources */, E49F24CA125D57FA0057C971 /* init.c in Sources */, E49F24CB125D57FA0057C971 /* queue.c in Sources */, E49F24CC125D57FA0057C971 /* semaphore.c in Sources */, + 6EF2CAAD1C8899E9001ABE83 /* lock.c in Sources */, + 6ED64B4A1BBD89BD00C35F4D /* firehose_reply.defs in Sources */, E49F24CD125D57FA0057C971 /* once.c in Sources */, E49F24CE125D57FA0057C971 /* apply.c in Sources */, E49F24CF125D57FA0057C971 /* object.c in Sources */, @@ -2065,13 +2088,17 @@ E4B515BD164B2DA300E003AF /* provider.d in Sources */, E4B515BE164B2DA300E003AF /* protocol.defs in Sources */, E4B515BF164B2DA300E003AF /* resolver.c in Sources */, + 6ED64B4B1BBD89BE00C35F4D /* firehose_reply.defs in Sources */, + 6ED64B481BBD89B100C35F4D /* firehose.defs in Sources */, E4B515C0164B2DA300E003AF /* init.c in Sources */, E4B515C1164B2DA300E003AF /* queue.c in Sources */, + 6E9956021C3B21990071D40C /* venture.c in Sources */, E4B515C2164B2DA300E003AF /* semaphore.c in Sources */, E4B515C3164B2DA300E003AF /* once.c in Sources */, E43A72871AF85BCD00BAA921 /* block.cpp in Sources */, E4B515C4164B2DA300E003AF /* apply.c in Sources */, E4B515C5164B2DA300E003AF /* object.c in Sources */, + 6ED64B431BBD898600C35F4D /* firehose_buffer.c in Sources */, E4B515C6164B2DA300E003AF /* benchmark.c in Sources */, E4B515C7164B2DA300E003AF /* source.c in Sources */, E4B515C8164B2DA300E003AF /* time.c in Sources */, @@ -2079,6 +2106,7 @@ E4B515CA164B2DA300E003AF /* io.c in Sources */, E44A8E6F1805C3E0009FFDB6 /* voucher.c in Sources */, E4B515CB164B2DA300E003AF /* transform.c in Sources */, + 6EF2CAB01C8899EB001ABE83 /* lock.c in Sources */, E4B515CC164B2DA300E003AF /* object.m in Sources */, E4B515CD164B2DA300E003AF /* allocator.c in Sources */, E4B515CE164B2DA300E003AF /* data.m in Sources */, @@ -2092,10 +2120,15 @@ files = ( E417A38412A472C4004D659D /* provider.d in Sources */, E44EBE5412517EBE00645D88 /* protocol.defs in Sources */, + 6E9956031C3B219A0071D40C /* venture.c in Sources */, + 6EBEC7E61BBDD30D009B1596 /* firehose.defs in Sources */, + 6ED64B421BBD898500C35F4D /* firehose_buffer.c in Sources */, E49F2424125D3C970057C971 /* resolver.c in Sources */, E44EBE5512517EBE00645D88 /* init.c in Sources */, E4EC11AE12514302000DDBD1 /* queue.c in Sources */, E4EC11AF12514302000DDBD1 /* semaphore.c in Sources */, + 6EF2CAAF1C8899EB001ABE83 /* lock.c in Sources */, + 6EBEC7E91BBDD325009B1596 /* firehose_reply.defs in Sources */, E4EC11B012514302000DDBD1 /* once.c in Sources */, E4EC11B112514302000DDBD1 /* apply.c in Sources */, E4EC11B212514302000DDBD1 /* object.c in Sources */, @@ -2119,10 +2152,15 @@ files = ( E417A38512A472C5004D659D /* provider.d in Sources */, E44EBE5612517EBE00645D88 /* protocol.defs in Sources */, + 6E9956041C3B219B0071D40C /* venture.c in Sources */, + 6EBEC7E51BBDD30C009B1596 /* firehose.defs in Sources */, + 6ED64B411BBD898400C35F4D /* firehose_buffer.c in Sources */, E49F2423125D3C960057C971 /* resolver.c in Sources */, E44EBE5712517EBE00645D88 /* init.c in Sources */, E4EC121A12514715000DDBD1 /* queue.c in Sources */, E4EC121B12514715000DDBD1 /* semaphore.c in Sources */, + 6EF2CAAE1C8899EA001ABE83 /* lock.c in Sources */, + 6EBEC7E81BBDD324009B1596 /* firehose_reply.defs in Sources */, E4EC121C12514715000DDBD1 /* once.c in Sources */, E4EC121D12514715000DDBD1 /* apply.c in Sources */, E4EC121E12514715000DDBD1 /* object.c in Sources */, @@ -2143,10 +2181,35 @@ /* End PBXSourcesBuildPhase section */ /* Begin PBXTargetDependency section */ - 4552540F19B138B700B88766 /* PBXTargetDependency */ = { + 6E2ECB021C49C31200A30A32 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 6E040C621C499B1B00411A2E /* libfirehose_kernel */; + targetProxy = 6E2ECB011C49C31200A30A32 /* PBXContainerItemProxy */; + }; + 6EF0B27E1BA8C5BF007FA4F6 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 6EB4E4081BA8BCAD00D7B9D2 /* libfirehose_server */; + targetProxy = 6EF0B27D1BA8C5BF007FA4F6 /* PBXContainerItemProxy */; + }; + 92CBD75A1BED926C006E0892 /* PBXTargetDependency */ = { isa = PBXTargetDependency; name = all; - targetProxy = 4552540E19B138B700B88766 /* PBXContainerItemProxy */; + targetProxy = 92CBD7591BED926C006E0892 /* PBXContainerItemProxy */; + }; + 92F3FECF1BEC6F1000025962 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 92F3FECA1BEC69E500025962 /* darwintests */; + targetProxy = 92F3FECE1BEC6F1000025962 /* PBXContainerItemProxy */; + }; + C00B0E141C5AEED6000330B3 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = C00B0DF01C5AEBBE000330B3 /* libdispatch dyld stub */; + targetProxy = C00B0E131C5AEED6000330B3 /* PBXContainerItemProxy */; + }; + C01866C21C597AEA0040FC07 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = C01866A41C5973210040FC07 /* libdispatch mp static */; + targetProxy = C01866C11C597AEA0040FC07 /* PBXContainerItemProxy */; }; C927F36910FD7F1A00C5AB8B /* PBXTargetDependency */ = { isa = PBXTargetDependency; @@ -2160,7 +2223,7 @@ }; E437F0D614F7441F00F0B997 /* PBXTargetDependency */ = { isa = PBXTargetDependency; - target = E46DBC1A14EE10C80001F9F6 /* libdispatch static */; + target = E46DBC1A14EE10C80001F9F6 /* libdispatch up static */; targetProxy = E437F0D514F7441F00F0B997 /* PBXContainerItemProxy */; }; E47D6ECB125FEB9D0070D91C /* PBXTargetDependency */ = { @@ -2221,6 +2284,106 @@ }; name = Debug; }; + 6E040C641C499B1B00411A2E /* Release */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 6E040C721C499C3600411A2E /* libfirehose_kernel.xcconfig */; + buildSettings = { + }; + name = Release; + }; + 6E040C651C499B1B00411A2E /* Debug */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 6E040C721C499C3600411A2E /* libfirehose_kernel.xcconfig */; + buildSettings = { + }; + name = Debug; + }; + 6E2ECAFF1C49C30000A30A32 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 6E2ECB001C49C30000A30A32 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 6EB4E40B1BA8BCAD00D7B9D2 /* Release */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 6EB4E4421BA8BD7800D7B9D2 /* libfirehose.xcconfig */; + buildSettings = { + }; + name = Release; + }; + 6EB4E40C1BA8BCAD00D7B9D2 /* Debug */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 6EB4E4421BA8BD7800D7B9D2 /* libfirehose.xcconfig */; + buildSettings = { + }; + name = Debug; + }; + 92CBD7241BED924F006E0892 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 92CBD7251BED924F006E0892 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 92F3FECC1BEC69E500025962 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + }; + name = Release; + }; + 92F3FECD1BEC69E500025962 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + }; + name = Debug; + }; + C00B0E081C5AEBBE000330B3 /* Release */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = C00B0E121C5AEBF7000330B3 /* libdispatch-dyld-stub.xcconfig */; + buildSettings = { + PRODUCT_NAME = "$(PRODUCT_NAME)"; + }; + name = Release; + }; + C00B0E091C5AEBBE000330B3 /* Debug */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = C00B0E121C5AEBF7000330B3 /* libdispatch-dyld-stub.xcconfig */; + buildSettings = { + PRODUCT_NAME = "$(PRODUCT_NAME)"; + }; + name = Debug; + }; + C01866BB1C5973210040FC07 /* Release */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = C01866BE1C59735B0040FC07 /* libdispatch-mp-static.xcconfig */; + buildSettings = { + PRODUCT_NAME = "$(PRODUCT_NAME)"; + }; + name = Release; + }; + C01866BC1C5973210040FC07 /* Debug */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = C01866BE1C59735B0040FC07 /* libdispatch-mp-static.xcconfig */; + buildSettings = { + PRODUCT_NAME = "$(PRODUCT_NAME)"; + }; + name = Debug; + }; C927F35B10FD7F0600C5AB8B /* Release */ = { isa = XCBuildConfiguration; buildSettings = { @@ -2235,14 +2398,14 @@ }; E46DBC5514EE10C80001F9F6 /* Release */ = { isa = XCBuildConfiguration; - baseConfigurationReference = E46DBC5814EE11BC0001F9F6 /* libdispatch-static.xcconfig */; + baseConfigurationReference = E46DBC5814EE11BC0001F9F6 /* libdispatch-up-static.xcconfig */; buildSettings = { }; name = Release; }; E46DBC5614EE10C80001F9F6 /* Debug */ = { isa = XCBuildConfiguration; - baseConfigurationReference = E46DBC5814EE11BC0001F9F6 /* libdispatch-static.xcconfig */; + baseConfigurationReference = E46DBC5814EE11BC0001F9F6 /* libdispatch-up-static.xcconfig */; buildSettings = { }; name = Debug; @@ -2277,8 +2440,6 @@ isa = XCBuildConfiguration; baseConfigurationReference = E43D93F11097917E004F6A62 /* libdispatch.xcconfig */; buildSettings = { - BUILD_VARIANTS = debug; - ONLY_ACTIVE_ARCH = YES; }; name = Debug; }; @@ -2360,6 +2521,69 @@ defaultConfigurationIsVisible = 0; defaultConfigurationName = Release; }; + 6E040C6A1C499B1B00411A2E /* Build configuration list for PBXNativeTarget "libfirehose_kernel" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 6E040C641C499B1B00411A2E /* Release */, + 6E040C651C499B1B00411A2E /* Debug */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 6E2ECAFE1C49C30000A30A32 /* Build configuration list for PBXAggregateTarget "libdispatch_kernel" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 6E2ECAFF1C49C30000A30A32 /* Release */, + 6E2ECB001C49C30000A30A32 /* Debug */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 6EB4E40A1BA8BCAD00D7B9D2 /* Build configuration list for PBXNativeTarget "libfirehose_server" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 6EB4E40B1BA8BCAD00D7B9D2 /* Release */, + 6EB4E40C1BA8BCAD00D7B9D2 /* Debug */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 92CBD7231BED924F006E0892 /* Build configuration list for PBXAggregateTarget "libdispatch_tests_legacy" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 92CBD7241BED924F006E0892 /* Release */, + 92CBD7251BED924F006E0892 /* Debug */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 92F3FECB1BEC69E500025962 /* Build configuration list for PBXLegacyTarget "darwintests" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 92F3FECC1BEC69E500025962 /* Release */, + 92F3FECD1BEC69E500025962 /* Debug */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + C00B0E071C5AEBBE000330B3 /* Build configuration list for PBXNativeTarget "libdispatch dyld stub" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + C00B0E081C5AEBBE000330B3 /* Release */, + C00B0E091C5AEBBE000330B3 /* Debug */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + C01866BA1C5973210040FC07 /* Build configuration list for PBXNativeTarget "libdispatch mp static" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + C01866BB1C5973210040FC07 /* Release */, + C01866BC1C5973210040FC07 /* Debug */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; C927F35E10FD7F0B00C5AB8B /* Build configuration list for PBXAggregateTarget "libdispatch_tools" */ = { isa = XCConfigurationList; buildConfigurations = ( @@ -2369,7 +2593,7 @@ defaultConfigurationIsVisible = 0; defaultConfigurationName = Release; }; - E46DBC5414EE10C80001F9F6 /* Build configuration list for PBXNativeTarget "libdispatch static" */ = { + E46DBC5414EE10C80001F9F6 /* Build configuration list for PBXNativeTarget "libdispatch up static" */ = { isa = XCConfigurationList; buildConfigurations = ( E46DBC5514EE10C80001F9F6 /* Release */, diff --git a/man/Makefile.am b/man/Makefile.am index 0d58d141f..3ca6946ce 100644 --- a/man/Makefile.am +++ b/man/Makefile.am @@ -2,6 +2,7 @@ # # +if !HAVE_SWIFT dist_man3_MANS= \ dispatch.3 \ dispatch_after.3 \ @@ -148,3 +149,4 @@ uninstall-hook: dispatch_io_barrier.3 \ dispatch_io_write.3 \ dispatch_write.3 +endif diff --git a/man/dispatch_object.3 b/man/dispatch_object.3 index 21b3d95db..95ba1c348 100644 --- a/man/dispatch_object.3 +++ b/man/dispatch_object.3 @@ -101,9 +101,8 @@ lifetime is tracked by the Objective-C static analyzer. .El .Pp Integration of dispatch objects with Objective-C requires targeting Mac\ OS\ X -10.8 or later, and is disabled when building with Objective-C Garbage -Collection or for the legacy Objective-C runtime. It can also be disabled -manually by using compiler options to define the +10.8 or later, and is disabled when building for the legacy Objective-C runtime. +It can also be disabled manually by using compiler options to define the .Dv OS_OBJECT_USE_OBJC preprocessor macro to .Li 0 . diff --git a/man/dispatch_queue_create.3 b/man/dispatch_queue_create.3 index 0ca0648e9..f3c305145 100644 --- a/man/dispatch_queue_create.3 +++ b/man/dispatch_queue_create.3 @@ -298,10 +298,6 @@ and only if they restore the thread to its original state before returning: .Fn pthread_sigmask .It .Fn pthread_setugid_np -.It -.Fn pthread_chdir -.It -.Fn pthread_fchdir .El .Pp Applications diff --git a/man/dispatch_source_create.3 b/man/dispatch_source_create.3 index b954bcff5..4da708cfb 100644 --- a/man/dispatch_source_create.3 +++ b/man/dispatch_source_create.3 @@ -271,8 +271,8 @@ Sources of this type allow applications to manually trigger the source's event handler via a call to .Fn dispatch_source_merge_data . The data will be merged with the source's pending data via an atomic add or -logic OR (based on the source's type), and the event handler block will be -submitted to the source's target queue. The +atomic bitwise OR (based on the source's type), and the event handler block will +be submitted to the source's target queue. The .Fa data is application defined. These sources have no .Fa handle @@ -297,7 +297,8 @@ The data returned by .Fn dispatch_source_get_data indicates which of the events in the .Fa mask -were observed. Note that because this source type will request notifications on the provided port, it should not be mixed with the use of +were observed. Note that because this source type will request notifications on +the provided port, it should not be mixed with the use of .Fn mach_port_request_notification on the same port. .Pp @@ -314,8 +315,8 @@ on the mach port is waiting to be received. .Pp .Vt DISPATCH_SOURCE_TYPE_MEMORYPRESSURE .Pp -Sources of this type monitor the system memory pressure condition for state changes. -The +Sources of this type monitor the system memory pressure condition for state +changes. The .Fa handle is unused and should be zero. The .Fa mask @@ -525,19 +526,24 @@ may be one or more of the following: The referenced node was removed from the filesystem namespace via .Xr unlink 2 . .It \(bu DISPATCH_VNODE_WRITE -A write to the referenced file occurred +A write to the referenced file occurred. .It \(bu DISPATCH_VNODE_EXTEND -The referenced file was extended +The referenced file was extended. .It \(bu DISPATCH_VNODE_ATTRIB -The metadata attributes of the referenced node have changed +The metadata attributes of the referenced node have changed. .It \(bu DISPATCH_VNODE_LINK -The link count on the referenced node has changed +The link count on the referenced node has changed. .It \(bu DISPATCH_VNODE_RENAME -The referenced node was renamed +The referenced node was renamed. .It \(bu DISPATCH_VNODE_REVOKE Access to the referenced node was revoked via .Xr revoke 2 or the underlying fileystem was unmounted. +.It \(bu DISPATCH_VNODE_FUNLOCK +The referenced file was unlocked by +.Xr flock 2 +or +.Xr close 2 . .El .Pp The data returned by diff --git a/man/dispatch_time.3 b/man/dispatch_time.3 index e318e90e9..4b4f9d863 100644 --- a/man/dispatch_time.3 +++ b/man/dispatch_time.3 @@ -49,7 +49,7 @@ Otherwise, if .Fa base is .Vt DISPATCH_TIME_NOW , -then the the current time of the default host clock is used. +then the current time of the default host clock is used. .Pp The .Fn dispatch_walltime diff --git a/os/Makefile.am b/os/Makefile.am index 2189f16b1..d009a3753 100644 --- a/os/Makefile.am +++ b/os/Makefile.am @@ -2,10 +2,17 @@ # # +if HAVE_SWIFT +osdir=${prefix}/lib/swift/os +else osdir=$(includedir)/os +endif os_HEADERS= \ - object.h + object.h \ + linux_base.h noinst_HEADERS= \ - object_private.h + object_private.h \ + voucher_activity_private.h \ + voucher_private.h diff --git a/os/firehose_buffer_private.h b/os/firehose_buffer_private.h new file mode 100644 index 000000000..2c6466f94 --- /dev/null +++ b/os/firehose_buffer_private.h @@ -0,0 +1,184 @@ +/* + * Copyright (c) 2015 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __FIREHOSE_BUFFER_PRIVATE__ +#define __FIREHOSE_BUFFER_PRIVATE__ + +#if OS_FIREHOSE_SPI +#ifdef KERNEL +#include +#else +#include +#include +#include +#endif + +#define OS_FIREHOSE_SPI_VERSION 20160318 + +/*! + * @group Firehose SPI + * SPI intended for logd only + * Layout of structs is subject to change without notice + */ + +#define FIREHOSE_BUFFER_CHUNK_SIZE 4096ul +#define FIREHOSE_BUFFER_LIBTRACE_HEADER_SIZE 2048ul +#define FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT 16 + +typedef union { + uint64_t fbc_atomic_pos; +#define FIREHOSE_BUFFER_POS_ENTRY_OFFS_INC (1ULL << 0) +#define FIREHOSE_BUFFER_POS_PRIVATE_OFFS_INC (1ULL << 16) +#define FIREHOSE_BUFFER_POS_REFCNT_INC (1ULL << 32) +#define FIREHOSE_BUFFER_POS_FULL_BIT (1ULL << 56) +#define FIREHOSE_BUFFER_POS_USABLE_FOR_STREAM(pos, stream) \ + ((((pos).fbc_atomic_pos >> 48) & 0x1ff) == (uint16_t)stream) + struct { + uint16_t fbc_next_entry_offs; + uint16_t fbc_private_offs; + uint8_t fbc_refcnt; + uint8_t fbc_qos_bits; + uint8_t fbc_stream; + uint8_t fbc_flag_full : 1; + uint8_t fbc_flag_io : 1; + uint8_t _fbc_flag_unused : 6; + }; +} firehose_buffer_pos_u; + +typedef struct firehose_buffer_chunk_s { + uint8_t fbc_start[0]; + firehose_buffer_pos_u volatile fbc_pos; + uint64_t fbc_timestamp; + uint8_t fbc_data[FIREHOSE_BUFFER_CHUNK_SIZE + - sizeof(firehose_buffer_pos_u) + - sizeof(uint64_t)]; +} __attribute__((aligned(8))) *firehose_buffer_chunk_t; + +typedef struct firehose_buffer_range_s { + uint16_t fbr_offset; // offset from the start of the buffer + uint16_t fbr_length; +} *firehose_buffer_range_t; + +#ifdef KERNEL + +// implemented by the kernel +extern void __firehose_buffer_push_to_logd(firehose_buffer_t fb, bool for_io); +extern void __firehose_critical_region_enter(void); +extern void __firehose_critical_region_leave(void); +extern void __firehose_allocate(vm_offset_t *addr, vm_size_t size); + +// exported for the kernel +firehose_tracepoint_t +__firehose_buffer_tracepoint_reserve(uint64_t stamp, firehose_stream_t stream, + uint16_t pubsize, uint16_t privsize, uint8_t **privptr); + +firehose_tracepoint_t +__firehose_buffer_tracepoint_reserve_with_chunk(firehose_buffer_chunk_t fbc, + uint64_t stamp, firehose_stream_t stream, + uint16_t pubsize, uint16_t privsize, uint8_t **privptr); + +void +__firehose_buffer_tracepoint_flush(firehose_tracepoint_t vat, + firehose_tracepoint_id_u vatid); + +void +__firehose_buffer_tracepoint_flush_chunk(firehose_buffer_chunk_t fbc, + firehose_tracepoint_t vat, firehose_tracepoint_id_u vatid); + +firehose_buffer_t +__firehose_buffer_create(size_t *size); + +void +__firehose_merge_updates(firehose_push_reply_t update); + +#else + +#define __firehose_critical_region_enter() +#define __firehose_critical_region_leave() + +OS_EXPORT +const uint32_t _firehose_spi_version; + +OS_ALWAYS_INLINE +static inline const uint8_t * +_firehose_tracepoint_reader_init(firehose_buffer_chunk_t fbc, + const uint8_t **endptr) +{ + const uint8_t *start = fbc->fbc_data; + const uint8_t *end = fbc->fbc_start + fbc->fbc_pos.fbc_next_entry_offs; + + if (end > fbc->fbc_start + FIREHOSE_BUFFER_CHUNK_SIZE) { + end = start; + } + *endptr = end; + return start; +} + +OS_ALWAYS_INLINE +static inline firehose_tracepoint_t +_firehose_tracepoint_reader_next(const uint8_t **ptr, const uint8_t *end) +{ + const uint16_t ft_size = offsetof(struct firehose_tracepoint_s, ft_data); + firehose_tracepoint_t ft; + + do { + ft = (firehose_tracepoint_t)*ptr; + if (ft->ft_data >= end) { + // reached the end + return NULL; + } + if (!ft->ft_length) { + // tracepoint write didn't even start + return NULL; + } + if (ft->ft_length > end - ft->ft_data) { + // invalid length + return NULL; + } + *ptr += roundup(ft_size + ft->ft_length, 8); + // test whether write of the tracepoint was finished + } while (os_unlikely(ft->ft_id.ftid_value == 0)); + + return ft; +} + +#define firehose_tracepoint_foreach(ft, fbc) \ + for (const uint8_t *end, *p = _firehose_tracepoint_reader_init(fbc, &end); \ + ((ft) = _firehose_tracepoint_reader_next(&p, end)); ) + +OS_ALWAYS_INLINE +static inline bool +firehose_buffer_range_validate(firehose_buffer_chunk_t fbc, + firehose_tracepoint_t ft, firehose_buffer_range_t range) +{ + if (range->fbr_offset + range->fbr_length > FIREHOSE_BUFFER_CHUNK_SIZE) { + return false; + } + if (fbc->fbc_start + range->fbr_offset < ft->ft_data + ft->ft_length) { + return false; + } + return true; +} + +#endif // !KERNEL + +#endif // OS_FIREHOSE_SPI + +#endif // __FIREHOSE_BUFFER_PRIVATE__ diff --git a/os/firehose_server_private.h b/os/firehose_server_private.h new file mode 100644 index 000000000..4bff8abc1 --- /dev/null +++ b/os/firehose_server_private.h @@ -0,0 +1,332 @@ +/* + * Copyright (c) 2015 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __FIREHOSE_SERVER_PRIVATE__ +#define __FIREHOSE_SERVER_PRIVATE__ + +#include +#include +#include "firehose_buffer_private.h" + +#if OS_FIREHOSE_SPI +/*! + * @group Firehose SPI + * SPI intended for logd only + */ + +#pragma mark - Firehose Client + +/*! + * @typedef firehose_client_t + * + * @abstract + * Represents a firehose client. + * + * @discussion + * Firehose client objects are os_object_t's, and it's legal to retain/release + * them with os_retain / os_release. + */ +OS_OBJECT_DECL_CLASS(firehose_client); + +/*! + * @typedef firehose_event_t + * + * @const FIREHOSE_EVENT_NONE + * Never passed to callbacks, meaningful for + * firehose_client_metadata_stream_peek. + * + * @const FIREHOSE_EVENT_CLIENT_CONNECTED + * A new client has connected + * + * This is the first event delivered, and no event is delivered until + * the handler of that event returns + * + * The `page` argument really is really a firehose_client_connected_info_t. + * + * @const FIREHOSE_EVENT_CLIENT_DIED + * The specified client is gone and will not flush new buffers + * + * This is the last event delivered, it is never called before all other + * event handlers have returned. This event is generated even when a + * FIREHOSE_EVENT_CLIENT_CORRUPTED event has been generated. + * + * @const FIREHOSE_EVENT_IO_BUFFER_RECEIVED + * A new buffer needs to be pushed, `page` is set to that buffer. + * + * This event can be sent concurrently wrt FIREHOSE_EVENT_MEM_BUFFER_RECEIVED + * events. + * + * @const FIREHOSE_EVENT_MEM_BUFFER_RECEIVED + * A new buffer needs to be pushed, `page` is set to that buffer. + * + * This event can be sent concurrently wrt FIREHOSE_EVENT_IO_BUFFER_RECEIVED + * events. + * + * @const FIREHOSE_EVENT_CLIENT_CORRUPTED + * This event is received when a client is found being corrupted. + * `page` is set to the buffer header page. When this event is received, + * logs have likely been lost for this client. + * + * This buffer isn't really a proper firehose buffer page, but its content may + * be useful for debugging purposes. + * + * @const FIREHOSE_EVENT_CLIENT_FINALIZE + * This event is received when a firehose client structure is about to be + * destroyed. Only firehose_client_get_context() can ever be called with + * the passed firehose client. The `page` argument is NULL for this event. + * + * The event is sent from the context that is dropping the last refcount + * of the client. + */ +OS_ENUM(firehose_event, unsigned long, + FIREHOSE_EVENT_NONE = 0, + FIREHOSE_EVENT_CLIENT_CONNECTED, + FIREHOSE_EVENT_CLIENT_DIED, + FIREHOSE_EVENT_IO_BUFFER_RECEIVED, + FIREHOSE_EVENT_MEM_BUFFER_RECEIVED, + FIREHOSE_EVENT_CLIENT_CORRUPTED, + FIREHOSE_EVENT_CLIENT_FINALIZE, +); + +#define FIREHOSE_CLIENT_CONNECTED_INFO_VERSION 1 + +/*! + * @typedef firehose_client_connected_info + * + * @abstract + * Type of the data passed to CLIENT_CONNECTED events. + */ +typedef struct firehose_client_connected_info_s { + unsigned long fcci_version; + // version 1 + const void *fcci_data; + size_t fcci_size; +} *firehose_client_connected_info_t; + +/*! + * @function firehose_client_get_unique_pid + * + * @abstract + * Returns the unique pid of the specified firehose client + * + * @param client + * The specified client. + * + * @param pid + * The pid for this client. + * + * @returns + * The unique pid of the specified client. + */ +OS_NOTHROW OS_NONNULL1 +uint64_t +firehose_client_get_unique_pid(firehose_client_t client, pid_t *pid); + +/*! + * @function firehose_client_get_metadata_buffer + * + * @abstract + * Returns the metadata buffer for the specified firehose client + * + * @param client + * The specified client. + * + * @param size + * The size of the metadata buffer. + * + * @returns + * The pointer to the buffer. + */ +OS_NOTHROW OS_NONNULL_ALL +void * +firehose_client_get_metadata_buffer(firehose_client_t client, size_t *size); + +/*! + * @function firehose_client_get_context + * + * @abstract + * Gets the context for the specified client. + * + * @param client + * The specified client. + * + * @returns + * The context set for the client with firehose_client_set_context + */ +OS_NOTHROW OS_NONNULL1 +void * +firehose_client_get_context(firehose_client_t client); + +/*! + * @function firehose_client_set_context + * + * @abstract + * Sets the context for the specified client. + * + * @discussion + * Setting the context exchanges the context pointer, but the client must + * ensure proper synchronization with possible getters. + * + * The lifetime of the context is under the control of the API user, + * it is suggested to destroy the context when the CLIENT_DIED event is + * received. + * + * @param client + * The specified client. + * + * @param ctxt + * The new context to set. + * + * @returns + * The previous context set for the client. + */ +OS_NOTHROW OS_NONNULL1 +void * +firehose_client_set_context(firehose_client_t client, void *ctxt); + +/*! + * @function firehose_client_metadata_stream_peek + * + * @abstract + * Peek at the metadata stream in flight buffers for a given client + * + * @discussion + * This function should never be called from the context of a snapshot + * handler. + * + * @param client + * The specified client + * + * @param context + * If this function is called synchronously from the handler passed to + * firehose_server_init, then `context` should be the event being processed. + * Else pass FIREHOSE_EVENT_NONE. + * + * @param peek_should_start + * Handler that is called prior to peeking to solve the race of metadata + * buffers not beeing processed yet at first lookup time, and being processed + * before the peek enumeration starts. + * + * If the handler returns false, then the enumeration doesn't start. + * If the race cannot happen, pass NULL. + * + * @param peek + * Handler that will receive all the live metadata buffers for this process. + * If the handler returns false, the enumeration is interrupted. + */ +OS_NOTHROW OS_NONNULL1 OS_NONNULL4 +void +firehose_client_metadata_stream_peek(firehose_client_t client, + firehose_event_t context, OS_NOESCAPE bool (^peek_should_start)(void), + OS_NOESCAPE bool (^peek)(firehose_buffer_chunk_t fbc)); + +#pragma mark - Firehose Server + +/*! + * @typedef firehose_handler_t + * + * @abstract + * Type of the handler block for firehose_server_init() + */ +typedef void (^firehose_handler_t)(firehose_client_t client, + firehose_event_t event, firehose_buffer_chunk_t page); + +/*! + * @function firehose_server_init + * + * @abstract + * Initializes the firehose MiG server + * + * @discussion + * Initializes the firehose MiG server by boostrap registering the services + * and creating dispatch_sources for the same. + */ +OS_NOTHROW +void +firehose_server_init(mach_port_t firehose_comm_port, + firehose_handler_t handler); + +/*! + * @function firehose_server_assert_spi_version + * + * @abstract + * Checks that libdispatch and firehose components all match + * + * @discussion + * Will assert that all the components have the same SPI versions + */ +OS_NOTHROW +void +firehose_server_assert_spi_version(uint32_t spi_version); + +/*! + * @function firehose_server_resume + * + * @abstract + * Allows firehose events to flow + * + * @discussion + * Must be called after firehose_server_init() + */ +OS_NOTHROW +void +firehose_server_resume(void); + +#pragma mark - Firehose Snapshot + +/*! + * @typedef firehose_snapshot_event + * + */ +OS_ENUM(firehose_snapshot_event, unsigned long, + FIREHOSE_SNAPSHOT_EVENT_IO_START = 1, + FIREHOSE_SNAPSHOT_EVENT_MEM_START, + FIREHOSE_SNAPSHOT_EVENT_IO_BUFFER, + FIREHOSE_SNAPSHOT_EVENT_MEM_BUFFER, + FIREHOSE_SNAPSHOT_EVENT_COMPLETE, +); + +/*! + * @typedef firehose_snapshot_handler_t + * + * @abstract + * Type of the handler block for firehose_snapshot + */ +typedef void (^firehose_snapshot_handler_t)(firehose_client_t client, + firehose_snapshot_event_t event, firehose_buffer_chunk_t page); + +/*! + * @function firehose_snapshot + * + * @abstract + * Gather a snapshot for the current firehose state. + * + * @discussion + * This function can be called several times, in which case snapshots are taken + * one after the other. If coalescing is desired, it has to be built around this + * call. + */ +OS_NOTHROW +void +firehose_snapshot(firehose_snapshot_handler_t handler); + +#endif // OS_FIREHOSE_SPI + +#endif // __FIREHOSE_SERVER_PRIVATE__ diff --git a/os/linux_base.h b/os/linux_base.h new file mode 100644 index 000000000..96a3c825b --- /dev/null +++ b/os/linux_base.h @@ -0,0 +1,94 @@ +/* + * This source file is part of the Swift.org open source project + * + * Copyright (c) 2015 Apple Inc. and the Swift project authors + * + * Licensed under Apache License v2.0 with Runtime Library Exception + * + * See http://swift.org/LICENSE.txt for license information + * See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors + * + */ + +#ifndef __OS_LINUX_BASE__ +#define __OS_LINUX_BASE__ + +#include +#include + +#if __GNUC__ +#define OS_EXPECT(x, v) __builtin_expect((x), (v)) +#else +#define OS_EXPECT(x, v) (x) +#endif + +#ifndef os_likely +#define os_likely(x) OS_EXPECT(!!(x), 1) +#endif +#ifndef os_unlikely +#define os_unlikely(x) OS_EXPECT(!!(x), 0) +#endif + +#if __has_feature(assume_nonnull) +#define OS_ASSUME_NONNULL_BEGIN _Pragma("clang assume_nonnull begin") +#define OS_ASSUME_NONNULL_END _Pragma("clang assume_nonnull end") +#else +#define OS_ASSUME_NONNULL_BEGIN +#define OS_ASSUME_NONNULL_END +#endif + +#if __has_builtin(__builtin_assume) +#define OS_COMPILER_CAN_ASSUME(expr) __builtin_assume(expr) +#else +#define OS_COMPILER_CAN_ASSUME(expr) ((void)(expr)) +#endif + +#if __has_feature(attribute_availability_swift) +// equivalent to __SWIFT_UNAVAILABLE from Availability.h +#define OS_SWIFT_UNAVAILABLE(_msg) \ + __attribute__((__availability__(swift, unavailable, message=_msg))) +#else +#define OS_SWIFT_UNAVAILABLE(_msg) +#endif + +#if __has_attribute(swift_private) +# define OS_REFINED_FOR_SWIFT __attribute__((__swift_private__)) +#else +# define OS_REFINED_FOR_SWIFT +#endif + +#if __has_attribute(swift_name) +# define OS_SWIFT_NAME(_name) __attribute__((__swift_name__(#_name))) +#else +# define OS_SWIFT_NAME(_name) +#endif + +#define __OS_STRINGIFY(s) #s +#define OS_STRINGIFY(s) __OS_STRINGIFY(s) +#define __OS_CONCAT(x, y) x ## y +#define OS_CONCAT(x, y) __OS_CONCAT(x, y) + +/* + * Stub out misc linking and compilation attributes + */ + +#ifdef OS_EXPORT +#undef OS_EXPORT +#endif +#define OS_EXPORT + +#ifdef OS_WARN_RESULT_NEEDS_RELEASE +#undef OS_WARN_RESULT_NEEDS_RELEASE +#endif + +#ifdef OS_WARN_RESULT +#undef OS_WARN_RESULT +#endif +#define OS_WARN_RESULT + +#ifdef OS_NOTHROW +#undef OS_NOTHROW +#endif +#define OS_NOTHROW + +#endif /* __OS_LINUX_BASE__ */ diff --git a/os/object.h b/os/object.h index e07aaec67..f3faa62fd 100644 --- a/os/object.h +++ b/os/object.h @@ -23,8 +23,13 @@ #ifdef __APPLE__ #include +#include #endif +#ifndef __linux__ #include +#else +#include +#endif /*! * @header @@ -50,14 +55,24 @@ */ #ifndef OS_OBJECT_HAVE_OBJC_SUPPORT -#if defined(__OBJC__) && defined(__OBJC2__) && !defined(__OBJC_GC__) && ( \ - __MAC_OS_X_VERSION_MIN_REQUIRED >= __MAC_10_8 || \ - __IPHONE_OS_VERSION_MIN_REQUIRED >= __IPHONE_6_0) -#define OS_OBJECT_HAVE_OBJC_SUPPORT 1 +#if !defined(__OBJC__) || defined(__OBJC_GC__) +# define OS_OBJECT_HAVE_OBJC_SUPPORT 0 +#elif !defined(TARGET_OS_MAC) || !TARGET_OS_MAC +# define OS_OBJECT_HAVE_OBJC_SUPPORT 0 +#elif TARGET_OS_IOS && __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_6_0 +# define OS_OBJECT_HAVE_OBJC_SUPPORT 0 +#elif TARGET_OS_MAC && !TARGET_OS_IPHONE +# if __MAC_OS_X_VERSION_MIN_REQUIRED < __MAC_10_8 +# define OS_OBJECT_HAVE_OBJC_SUPPORT 0 +# elif defined(__i386__) && __MAC_OS_X_VERSION_MIN_REQUIRED < __MAC_10_12 +# define OS_OBJECT_HAVE_OBJC_SUPPORT 0 +# else +# define OS_OBJECT_HAVE_OBJC_SUPPORT 1 +# endif #else -#define OS_OBJECT_HAVE_OBJC_SUPPORT 0 -#endif +# define OS_OBJECT_HAVE_OBJC_SUPPORT 1 #endif +#endif // OS_OBJECT_HAVE_OBJC_SUPPORT #if OS_OBJECT_HAVE_OBJC_SUPPORT #ifndef OS_OBJECT_USE_OBJC @@ -71,18 +86,49 @@ #define OS_OBJECT_USE_OBJC 0 #endif +#ifndef OS_OBJECT_SWIFT3 +#if defined(SWIFT_SDK_OVERLAY_DISPATCH_EPOCH) && \ + SWIFT_SDK_OVERLAY_DISPATCH_EPOCH >= 2 +#define OS_OBJECT_SWIFT3 1 +#else +#define OS_OBJECT_SWIFT3 0 +#endif // SWIFT_SDK_OVERLAY_DISPATCH_EPOCH >= 2 +#endif // OS_OBJECT_SWIFT3 + #if OS_OBJECT_USE_OBJC #import +#if __has_attribute(objc_independent_class) +#define OS_OBJC_INDEPENDENT_CLASS __attribute__((objc_independent_class)) +#endif // __has_attribute(objc_independent_class) +#ifndef OS_OBJC_INDEPENDENT_CLASS +#define OS_OBJC_INDEPENDENT_CLASS +#endif #define OS_OBJECT_CLASS(name) OS_##name -#define OS_OBJECT_DECL_IMPL(name, ...) \ +#define OS_OBJECT_DECL_PROTOCOL(name, ...) \ @protocol OS_OBJECT_CLASS(name) __VA_ARGS__ \ - @end \ - typedef NSObject *name##_t + @end +#define OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL_IMPL(name, proto) \ + @interface name () \ + @end +#define OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(name, proto) \ + OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL_IMPL( \ + OS_OBJECT_CLASS(name), OS_OBJECT_CLASS(proto)) +#define OS_OBJECT_DECL_IMPL(name, ...) \ + OS_OBJECT_DECL_PROTOCOL(name, __VA_ARGS__) \ + typedef NSObject \ + * OS_OBJC_INDEPENDENT_CLASS name##_t +#define OS_OBJECT_DECL_BASE(name, ...) \ + @interface OS_OBJECT_CLASS(name) : __VA_ARGS__ \ + - (instancetype)init OS_SWIFT_UNAVAILABLE("Unavailable in Swift"); \ + @end +#define OS_OBJECT_DECL_IMPL_CLASS(name, ...) \ + OS_OBJECT_DECL_BASE(name, ## __VA_ARGS__) \ + typedef OS_OBJECT_CLASS(name) \ + * OS_OBJC_INDEPENDENT_CLASS name##_t #define OS_OBJECT_DECL(name, ...) \ - OS_OBJECT_DECL_IMPL(name, __VA_ARGS__) + OS_OBJECT_DECL_IMPL(name, ) #define OS_OBJECT_DECL_SUBCLASS(name, super) \ OS_OBJECT_DECL_IMPL(name, ) -#if defined(__has_attribute) #if __has_attribute(ns_returns_retained) #define OS_OBJECT_RETURNS_RETAINED __attribute__((__ns_returns_retained__)) #else @@ -93,11 +139,6 @@ #else #define OS_OBJECT_CONSUMED #endif -#else -#define OS_OBJECT_RETURNS_RETAINED -#define OS_OBJECT_CONSUMED -#endif -#if defined(__has_feature) #if __has_feature(objc_arc) #define OS_OBJECT_BRIDGE __bridge #define OS_WARN_RESULT_NEEDS_RELEASE @@ -105,23 +146,47 @@ #define OS_OBJECT_BRIDGE #define OS_WARN_RESULT_NEEDS_RELEASE OS_WARN_RESULT #endif +#if __has_attribute(objc_runtime_visible) && \ + ((defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && \ + __MAC_OS_X_VERSION_MIN_REQUIRED < __MAC_10_12) || \ + (defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && \ + !defined(__TV_OS_VERSION_MIN_REQUIRED) && \ + !defined(__WATCH_OS_VERSION_MIN_REQUIRED) && \ + __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_10_0) || \ + (defined(__TV_OS_VERSION_MIN_REQUIRED) && \ + __TV_OS_VERSION_MIN_REQUIRED < __TVOS_10_0) || \ + (defined(__WATCH_OS_VERSION_MIN_REQUIRED) && \ + __WATCH_OS_VERSION_MIN_REQUIRED < __WATCHOS_3_0)) +/* + * To provide backward deployment of ObjC objects in Swift on pre-10.12 + * SDKs, OS_object classes can be marked as OS_OBJECT_OBJC_RUNTIME_VISIBLE. + * When compiling with a deployment target earlier than OS X 10.12 (iOS 10.0, + * tvOS 10.0, watchOS 3.0) the Swift compiler will only refer to this type at + * runtime (using the ObjC runtime). + */ +#define OS_OBJECT_OBJC_RUNTIME_VISIBLE __attribute__((objc_runtime_visible)) #else -#define OS_OBJECT_BRIDGE -#define OS_WARN_RESULT_NEEDS_RELEASE OS_WARN_RESULT +#define OS_OBJECT_OBJC_RUNTIME_VISIBLE #endif #ifndef OS_OBJECT_USE_OBJC_RETAIN_RELEASE #if defined(__clang_analyzer__) #define OS_OBJECT_USE_OBJC_RETAIN_RELEASE 1 -#elif defined(__has_feature) -#if __has_feature(objc_arc) +#elif __has_feature(objc_arc) && !OS_OBJECT_SWIFT3 #define OS_OBJECT_USE_OBJC_RETAIN_RELEASE 1 #else #define OS_OBJECT_USE_OBJC_RETAIN_RELEASE 0 #endif -#else -#define OS_OBJECT_USE_OBJC_RETAIN_RELEASE 0 -#endif #endif +#if OS_OBJECT_SWIFT3 +#define OS_OBJECT_DECL_SWIFT(name) \ + OS_EXPORT OS_OBJECT_OBJC_RUNTIME_VISIBLE \ + OS_OBJECT_DECL_IMPL_CLASS(name, NSObject) +#define OS_OBJECT_DECL_SUBCLASS_SWIFT(name, super) \ + OS_EXPORT OS_OBJECT_OBJC_RUNTIME_VISIBLE \ + OS_OBJECT_DECL_IMPL_CLASS(name, OS_OBJECT_CLASS(super)) +OS_EXPORT OS_OBJECT_OBJC_RUNTIME_VISIBLE +OS_OBJECT_DECL_BASE(object, NSObject); +#endif // OS_OBJECT_SWIFT3 #else /*! @parseOnly */ #define OS_OBJECT_RETURNS_RETAINED @@ -131,9 +196,22 @@ #define OS_OBJECT_BRIDGE /*! @parseOnly */ #define OS_WARN_RESULT_NEEDS_RELEASE OS_WARN_RESULT +/*! @parseOnly */ +#define OS_OBJECT_OBJC_RUNTIME_VISIBLE #define OS_OBJECT_USE_OBJC_RETAIN_RELEASE 0 #endif +#if OS_OBJECT_SWIFT3 +#define OS_OBJECT_DECL_CLASS(name) \ + OS_OBJECT_DECL_SUBCLASS_SWIFT(name, object) +#elif OS_OBJECT_USE_OBJC +#define OS_OBJECT_DECL_CLASS(name) \ + OS_OBJECT_DECL(name) +#else +#define OS_OBJECT_DECL_CLASS(name) \ + typedef struct name##_s *name##_t +#endif + #define OS_OBJECT_GLOBAL_OBJECT(type, object) ((OS_OBJECT_BRIDGE type)&(object)) __BEGIN_DECLS @@ -155,7 +233,7 @@ __BEGIN_DECLS * The retained object. */ __OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) -OS_EXPORT +OS_EXPORT OS_SWIFT_UNAVAILABLE("Can't be used with ARC") void* os_retain(void *object); #if OS_OBJECT_USE_OBJC @@ -178,7 +256,7 @@ os_retain(void *object); */ __OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) OS_EXPORT -void +void OS_SWIFT_UNAVAILABLE("Can't be used with ARC") os_release(void *object); #if OS_OBJECT_USE_OBJC #undef os_release diff --git a/os/object_private.h b/os/object_private.h index 0f2f01dff..dc2af8345 100644 --- a/os/object_private.h +++ b/os/object_private.h @@ -53,15 +53,11 @@ #define OS_OBJECT_EXPORT extern #endif -#if OS_OBJECT_USE_OBJC && defined(__has_feature) -#if __has_feature(objc_arc) +#if OS_OBJECT_USE_OBJC && __has_feature(objc_arc) #define _OS_OBJECT_OBJC_ARC 1 #else #define _OS_OBJECT_OBJC_ARC 0 #endif -#else -#define _OS_OBJECT_OBJC_ARC 0 -#endif #define _OS_OBJECT_GLOBAL_REFCNT INT_MAX @@ -71,10 +67,28 @@ int volatile xref_cnt #if OS_OBJECT_HAVE_OBJC_SUPPORT +#define OS_OBJECT_CLASS_SYMBOL(name) OS_##name##_class +#if TARGET_OS_MAC && !TARGET_OS_SIMULATOR && defined(__i386__) +#define OS_OBJECT_HAVE_OBJC1 1 +#define OS_OBJECT_HAVE_OBJC2 0 +#define OS_OBJC_CLASS_RAW_SYMBOL_NAME(name) \ + ".objc_class_name_" OS_STRINGIFY(name) +#define _OS_OBJECT_CLASS_HEADER() \ + const void *_os_obj_objc_isa +#else +#define OS_OBJECT_HAVE_OBJC1 0 +#define OS_OBJECT_HAVE_OBJC2 1 +#define OS_OBJC_CLASS_RAW_SYMBOL_NAME(name) "_OBJC_CLASS_$_" OS_STRINGIFY(name) // Must match size of compiler-generated OBJC_CLASS structure rdar://10640168 #define _OS_OBJECT_CLASS_HEADER() \ void *_os_obj_objc_class_t[5] +#endif +#define OS_OBJECT_OBJC_CLASS_DECL(name) \ + extern void *OS_OBJECT_CLASS_SYMBOL(name) \ + asm(OS_OBJC_CLASS_RAW_SYMBOL_NAME(OS_OBJECT_CLASS(name))) #else +#define OS_OBJECT_HAVE_OBJC1 0 +#define OS_OBJECT_HAVE_OBJC2 0 #define _OS_OBJECT_CLASS_HEADER() \ void (*_os_obj_xref_dispose)(_os_object_t); \ void (*_os_obj_dispose)(_os_object_t) @@ -82,7 +96,22 @@ #define OS_OBJECT_CLASS(name) OS_##name -#if OS_OBJECT_USE_OBJC +#if OS_OBJECT_USE_OBJC && OS_OBJECT_SWIFT3 +@interface OS_OBJECT_CLASS(object) (OSObjectPrivate) +- (void)_xref_dispose; +- (void)_dispose; +@end +OS_OBJECT_DECL_PROTOCOL(object, ); +typedef OS_OBJECT_CLASS(object) *_os_object_t; +#define _OS_OBJECT_DECL_SUBCLASS_INTERFACE(name, super) \ + @interface OS_OBJECT_CLASS(name) : OS_OBJECT_CLASS(super) \ + \ + @end +#define _OS_OBJECT_DECL_PROTOCOL(name, super) \ + OS_OBJECT_DECL_PROTOCOL(name, ) +#define _OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(name, super) \ + OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(name, super) +#elif OS_OBJECT_USE_OBJC __OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) OS_OBJECT_EXPORT @interface OS_OBJECT_CLASS(object) : NSObject @@ -95,49 +124,62 @@ typedef OS_OBJECT_CLASS(object) *_os_object_t; \ @end #else +#define _OS_OBJECT_DECL_SUBCLASS_INTERFACE(name, super) +#define _OS_OBJECT_DECL_PROTOCOL(name, super) +#define _OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(name, super) typedef struct _os_object_s *_os_object_t; #endif +OS_ASSUME_NONNULL_BEGIN + __BEGIN_DECLS #if !_OS_OBJECT_OBJC_ARC __OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) OS_OBJECT_EXPORT OS_OBJECT_MALLOC OS_OBJECT_WARN_RESULT OS_OBJECT_NOTHROW +OS_SWIFT_UNAVAILABLE("Unavailable in Swift") _os_object_t _os_object_alloc(const void *cls, size_t size); __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) OS_OBJECT_EXPORT OS_OBJECT_MALLOC OS_OBJECT_WARN_RESULT OS_OBJECT_NOTHROW +OS_SWIFT_UNAVAILABLE("Unavailable in Swift") _os_object_t _os_object_alloc_realized(const void *cls, size_t size); __OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW +OS_SWIFT_UNAVAILABLE("Unavailable in Swift") void _os_object_dealloc(_os_object_t object); __OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW +OS_SWIFT_UNAVAILABLE("Unavailable in Swift") _os_object_t _os_object_retain(_os_object_t object); __OSX_AVAILABLE_STARTING(__MAC_10_11,__IPHONE_9_0) OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW +OS_SWIFT_UNAVAILABLE("Unavailable in Swift") _os_object_t _os_object_retain_with_resurrect(_os_object_t obj); __OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW +OS_SWIFT_UNAVAILABLE("Unavailable in Swift") void _os_object_release(_os_object_t object); __OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW +OS_SWIFT_UNAVAILABLE("Unavailable in Swift") _os_object_t _os_object_retain_internal(_os_object_t object); __OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW +OS_SWIFT_UNAVAILABLE("Unavailable in Swift") void _os_object_release_internal(_os_object_t object); @@ -145,4 +187,6 @@ _os_object_release_internal(_os_object_t object); __END_DECLS +OS_ASSUME_NONNULL_END + #endif diff --git a/os/voucher_activity_private.h b/os/voucher_activity_private.h new file mode 100644 index 000000000..8f233b33c --- /dev/null +++ b/os/voucher_activity_private.h @@ -0,0 +1,327 @@ +/* + * Copyright (c) 2013-2015 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __OS_VOUCHER_ACTIVITY_PRIVATE__ +#define __OS_VOUCHER_ACTIVITY_PRIVATE__ + +#if OS_VOUCHER_ACTIVITY_SPI +#if __has_include() +#include +#include +#endif +#ifndef __linux__ +#include +#endif +#include +#include "voucher_private.h" + +#define OS_VOUCHER_ACTIVITY_SPI_VERSION 20160329 + +#if OS_VOUCHER_WEAK_IMPORT +#define OS_VOUCHER_EXPORT OS_EXPORT OS_WEAK_IMPORT +#else +#define OS_VOUCHER_EXPORT OS_EXPORT +#endif + +#define __VOUCHER_ACTIVITY_IGNORE_DEPRECATION_PUSH \ + _Pragma("clang diagnostic push") \ + _Pragma("clang diagnostic ignored \"-Wdeprecated-declarations\"") +#define __VOUCHER_ACTIVITY_IGNORE_DEPRECATION_POP \ + _Pragma("clang diagnostic pop") + +__BEGIN_DECLS + +/*! + * @const VOUCHER_CURRENT + * Shorthand for the currently adopted voucher + * + * This value can only be used as an argument to functions, and is never + * actually returned. It looks enough like a tagged pointer object that ARC + * won't crash if this is assigned to a temporary variable. + */ +#define VOUCHER_CURRENT ((OS_OBJECT_BRIDGE voucher_t)(void *)~2ul) + +/*! + * @function voucher_get_activity_id + * + * @abstract + * Returns the activity_id associated with the specified voucher at the time + * of the call. + * + * @discussion + * When the passed voucher is VOUCHER_CURRENT this returns the current + * activity ID. + * + * @param voucher + * The specified voucher. + * + * @param parent_id + * An out parameter to return the parent ID of the returned activity ID. + * + * @result + * The current activity identifier, if any. When 0 is returned, parent_id will + * also always be 0. + */ +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +OS_VOUCHER_EXPORT OS_NOTHROW +firehose_activity_id_t +voucher_get_activity_id(voucher_t voucher, firehose_activity_id_t *parent_id); + +/*! + * @function voucher_get_activity_id_and_creator + * + * @abstract + * Returns the activity_id associated with the specified voucher at the time + * of the call. + * + * @discussion + * When the passed voucher is VOUCHER_CURRENT this returns the current + * activity ID. + * + * @param voucher + * The specified voucher. + * + * @param creator_pid + * The unique pid of the process that created the returned activity ID if any. + * + * @param parent_id + * An out parameter to return the parent ID of the returned activity ID. + * + * @result + * The current activity identifier, if any. When 0 is returned, parent_id will + * also always be 0. + */ +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +OS_VOUCHER_EXPORT OS_NOTHROW +firehose_activity_id_t +voucher_get_activity_id_and_creator(voucher_t voucher, uint64_t *creator_pid, + firehose_activity_id_t *parent_id); + +/*! + * @function voucher_activity_create + * + * @abstract + * Creates a voucher object with a new activity identifier. + * + * @discussion + * As part of voucher transport, activities are automatically propagated by the + * system to other threads and processes (across IPC). + * + * When a voucher with an activity identifier is applied to a thread, work + * on that thread is done on behalf of this activity. + * + * @param trace_id + * Tracepoint identifier returned by voucher_activity_trace_id(), intended for + * identification of the automatic tracepoint generated as part of creating the + * new activity. + * + * @param base + * The base voucher used to create the activity. If the base voucher has an + * activity identifier, then the created activity will be parented to that one. + * If the passed in base has no activity identifier, the activity identifier + * will be a top-level one, on behalf of the process that created the base + * voucher. + * + * If base is VOUCHER_NONE, the activity is a top-level one, on behalf of the + * current process. + * + * If base is VOUCHER_CURRENT, then the activity is naturally based on the + * one currently applied to the current thread (the one voucher_copy() would + * return). + * + * @param flags + * See voucher_activity_flag_t documentation for effect. + * + * @param location + * Location identifier for the automatic tracepoint generated as part of + * creating the new activity. + * + * @result + * A new voucher with an activity identifier. + */ +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW +voucher_t +voucher_activity_create(firehose_tracepoint_id_t trace_id, + voucher_t base, firehose_activity_flags_t flags, uint64_t location); + +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW +voucher_t +voucher_activity_create_with_location(firehose_tracepoint_id_t *trace_id, + voucher_t base, firehose_activity_flags_t flags, uint64_t location); + +/*! + * @group Voucher Activity Trace SPI + * SPI intended for libtrace only + */ + +/*! + * @function voucher_activity_flush + * + * @abstract + * Force flushing the specified stream. + * + * @discussion + * This maks all the buffers currently being written to as full, so that + * their current content is pushed in a timely fashion. + * + * When this call returns, the actual flush may or may not yet have happened. + * + * @param stream + * The stream to flush. + */ +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +OS_VOUCHER_EXPORT OS_NOTHROW +void +voucher_activity_flush(firehose_stream_t stream); + +/*! + * @function voucher_activity_trace + * + * @abstract + * Add a tracepoint to the specified stream. + * + * @param stream + * The stream to trace this entry into. + * + * @param trace_id + * Tracepoint identifier returned by voucher_activity_trace_id() + * + * @param timestamp + * The mach_approximate_time()/mach_absolute_time() value for this tracepoint. + * + * @param pubdata + * Pointer to packed buffer of tracepoint data. + * + * @param publen + * Length of data at 'pubdata'. + */ +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +OS_VOUCHER_EXPORT OS_NOTHROW OS_NONNULL4 +firehose_tracepoint_id_t +voucher_activity_trace(firehose_stream_t stream, + firehose_tracepoint_id_t trace_id, uint64_t timestamp, + const void *pubdata, size_t publen); + +/*! + * @function voucher_activity_trace_with_private_strings + * + * @abstract + * Add a tracepoint to the specified stream, with private data. + * + * @param stream + * The stream to trace this entry into. + * + * @param trace_id + * Tracepoint identifier returned by voucher_activity_trace_id() + * + * @param timestamp + * The mach_approximate_time()/mach_absolute_time() value for this tracepoint. + * + * @param pubdata + * Pointer to packed buffer of tracepoint data. + * + * @param publen + * Length of data at 'pubdata'. + * + * @param privdata + * Pointer to packed buffer of private tracepoint data. + * + * @param privlen + * Length of data at 'privdata'. + */ +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +OS_VOUCHER_EXPORT OS_NOTHROW OS_NONNULL4 OS_NONNULL6 +firehose_tracepoint_id_t +voucher_activity_trace_with_private_strings(firehose_stream_t stream, + firehose_tracepoint_id_t trace_id, uint64_t timestamp, + const void *pubdata, size_t publen, + const void *privdata, size_t privlen); + +typedef struct voucher_activity_hooks_s { +#define VOUCHER_ACTIVITY_HOOKS_VERSION 3 + long vah_version; + // version 1 + mach_port_t (*vah_get_logd_port)(void); + // version 2 + dispatch_mach_handler_function_t vah_debug_channel_handler; + // version 3 + kern_return_t (*vah_get_reconnect_info)(mach_vm_address_t *, mach_vm_size_t *); +} *voucher_activity_hooks_t; + +/*! + * @function voucher_activity_initialize_4libtrace + * + * @abstract + * Configure upcall hooks for libtrace. + * + * @param hooks + * A pointer to a voucher_activity_hooks_s structure. + */ +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +OS_VOUCHER_EXPORT OS_NOTHROW OS_NONNULL_ALL +void +voucher_activity_initialize_4libtrace(voucher_activity_hooks_t hooks); + +/*! + * @function voucher_activity_get_metadata_buffer + * + * @abstract + * Return address and length of buffer in the process trace memory area + * reserved for libtrace metadata. + * + * @param length + * Pointer to size_t variable, filled with length of metadata buffer. + * + * @result + * Address of metadata buffer. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW OS_NONNULL_ALL +void* +voucher_activity_get_metadata_buffer(size_t *length); + +/*! + * @function voucher_get_activity_id_4dyld + * + * @abstract + * Return the current voucher activity ID. Available for the dyld client stub + * only. + */ +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW +firehose_activity_id_t +voucher_get_activity_id_4dyld(void); + +__END_DECLS + +#endif // OS_VOUCHER_ACTIVITY_SPI + +#endif // __OS_VOUCHER_ACTIVITY_PRIVATE__ diff --git a/private/voucher_private.h b/os/voucher_private.h similarity index 80% rename from private/voucher_private.h rename to os/voucher_private.h index e4c31a696..562a70415 100644 --- a/private/voucher_private.h +++ b/os/voucher_private.h @@ -21,10 +21,25 @@ #ifndef __OS_VOUCHER_PRIVATE__ #define __OS_VOUCHER_PRIVATE__ +#ifndef __linux__ #include +#endif +#if __has_include() #include +#include +#endif +#if __has_include() +#include +#endif +#if __has_include() +#include +#endif -#define OS_VOUCHER_SPI_VERSION 20141203 +#ifndef __DISPATCH_BUILDING_DISPATCH__ +#include +#endif /* !__DISPATCH_BUILDING_DISPATCH__ */ + +#define OS_VOUCHER_SPI_VERSION 20150630 #if OS_VOUCHER_WEAK_IMPORT #define OS_VOUCHER_EXPORT OS_EXPORT OS_WEAK_IMPORT @@ -32,6 +47,8 @@ #define OS_VOUCHER_EXPORT OS_EXPORT #endif +DISPATCH_ASSUME_NONNULL_BEGIN + __BEGIN_DECLS /*! @@ -50,11 +67,18 @@ __BEGIN_DECLS * Voucher objects are os_objects (c.f. ). They are memory-managed * with the os_retain()/os_release() functions or -[retain]/-[release] methods. */ -#if OS_OBJECT_USE_OBJC -OS_OBJECT_DECL(voucher); -#else -typedef struct voucher_s *voucher_t; -#endif +OS_OBJECT_DECL_CLASS(voucher); + +/*! + * @const VOUCHER_NULL + * Represents the empty base voucher with no attributes. + */ +#define VOUCHER_NULL ((voucher_t)0) +/*! + * @const VOUCHER_INVALID + * Represents an invalid voucher + */ +#define VOUCHER_INVALID ((voucher_t)-1) /*! * @function voucher_adopt @@ -79,8 +103,8 @@ typedef struct voucher_s *voucher_t; __OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT_NEEDS_RELEASE OS_NOTHROW -voucher_t -voucher_adopt(voucher_t voucher OS_OBJECT_CONSUMED); +voucher_t _Nullable +voucher_adopt(voucher_t _Nullable voucher OS_OBJECT_CONSUMED); /*! * @function voucher_copy @@ -94,7 +118,7 @@ voucher_adopt(voucher_t voucher OS_OBJECT_CONSUMED); */ __OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW -voucher_t +voucher_t _Nullable voucher_copy(void); /*! @@ -113,7 +137,7 @@ voucher_copy(void); */ __OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW -voucher_t +voucher_t _Nullable voucher_copy_without_importance(void); /*! @@ -151,23 +175,19 @@ voucher_replace_default_voucher(void); * * @discussion * This is only intended for use by CoreFoundation to explicitly manage the - * App Nap state of an application following receiption of a de-nap IPC message. + * App Nap state of an application following reception of a de-nap IPC message. * * CAUTION: Do NOT use this SPI without contacting the Darwin Runtime team. */ __OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) OS_VOUCHER_EXPORT OS_NOTHROW void -voucher_decrement_importance_count4CF(voucher_t voucher); +voucher_decrement_importance_count4CF(voucher_t _Nullable voucher); /*! * @group Voucher dispatch block SPI */ -#ifndef __DISPATCH_BUILDING_DISPATCH__ -#include -#endif /* !__DISPATCH_BUILDING_DISPATCH__ */ - /*! * @typedef dispatch_block_flags_t * SPI Flags to pass to the dispatch_block_create* functions. @@ -248,7 +268,7 @@ DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_RETURNS_RETAINED_BLOCK DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_block_t dispatch_block_create_with_voucher(dispatch_block_flags_t flags, - voucher_t voucher, dispatch_block_t block); + voucher_t _Nullable voucher, dispatch_block_t block); /*! * @function dispatch_block_create_with_voucher_and_qos_class @@ -331,7 +351,7 @@ DISPATCH_EXPORT DISPATCH_NONNULL5 DISPATCH_RETURNS_RETAINED_BLOCK DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_block_t dispatch_block_create_with_voucher_and_qos_class(dispatch_block_flags_t flags, - voucher_t voucher, dispatch_qos_class_t qos_class, + voucher_t _Nullable voucher, dispatch_qos_class_t qos_class, int relative_priority, dispatch_block_t block); /*! @@ -391,17 +411,18 @@ __OSX_AVAILABLE_STARTING(__MAC_10_11,__IPHONE_9_0) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_t -dispatch_queue_create_with_accounting_override_voucher(const char *label, - dispatch_queue_attr_t attr, voucher_t voucher); +dispatch_queue_create_with_accounting_override_voucher( + const char *_Nullable label, + dispatch_queue_attr_t _Nullable attr, + voucher_t _Nullable voucher); +#if __has_include() /*! * @group Voucher Mach SPI * SPI intended for clients that need to interact with mach messages or mach * voucher ports directly. */ -#include - /*! * @function voucher_create_with_mach_msg * @@ -421,15 +442,100 @@ dispatch_queue_create_with_accounting_override_voucher(const char *label, */ __OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW -voucher_t +voucher_t _Nullable voucher_create_with_mach_msg(mach_msg_header_t *msg); +/*! + * @group Voucher Persona SPI + * SPI intended for clients that need to interact with personas. + */ + +struct proc_persona_info; + +/*! + * @function voucher_get_current_persona + * + * @abstract + * Retrieve the persona identifier of the 'originator' process for the current + * voucher. + * + * @discussion + * Retrieve the persona identifier of the ’originator’ process possibly stored + * in the PERSONA_TOKEN attribute of the currently adopted voucher. + * + * If the thread has not adopted a voucher, or the current voucher does not + * contain a PERSONA_TOKEN attribute, this function returns the persona + * identifier of the current process. + * + * If the process is not running under a persona, then this returns + * PERSONA_ID_NONE. + * + * @result + * The persona identifier of the 'originator' process for the current voucher, + * or the persona identifier of the current process + * or PERSONA_ID_NONE + */ +__OSX_AVAILABLE_STARTING(__MAC_NA,__IPHONE_9_2) +OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW +uid_t +voucher_get_current_persona(void); + +/*! + * @function voucher_get_current_persona_originator_info + * + * @abstract + * Retrieve the ’originator’ process persona info for the currently adopted + * voucher. + * + * @discussion + * If there is no currently adopted voucher, or no PERSONA_TOKEN attribute + * in that voucher, this function fails. + * + * @param persona_info + * The proc_persona_info structure to fill in case of success + * + * @result + * 0 on success: currently adopted voucher has a PERSONA_TOKEN + * -1 on failure: persona_info is untouched/uninitialized + */ +__OSX_AVAILABLE_STARTING(__MAC_NA,__IPHONE_9_2) +OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW OS_NONNULL1 +int +voucher_get_current_persona_originator_info( + struct proc_persona_info *persona_info); + +/*! + * @function voucher_get_current_persona_proximate_info + * + * @abstract + * Retrieve the ’proximate’ process persona info for the currently adopted + * voucher. + * + * @discussion + * If there is no currently adopted voucher, or no PERSONA_TOKEN attribute + * in that voucher, this function fails. + * + * @param persona_info + * The proc_persona_info structure to fill in case of success + * + * @result + * 0 on success: currently adopted voucher has a PERSONA_TOKEN + * -1 on failure: persona_info is untouched/uninitialized + */ +__OSX_AVAILABLE_STARTING(__MAC_NA,__IPHONE_9_2) +OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW OS_NONNULL1 +int +voucher_get_current_persona_proximate_info( + struct proc_persona_info *persona_info); + +#endif // __has_include() + __END_DECLS +DISPATCH_ASSUME_NONNULL_END + #endif // __OS_VOUCHER_PRIVATE__ -#if (OS_VOUCHER_ACTIVITY_SPI || OS_VOUCHER_ACTIVITY_BUFFER_SPI) && \ - !defined(__DISPATCH_BUILDING_DISPATCH__) && \ - !defined(__OS_VOUCHER_ACTIVITY_PRIVATE__) -#include +#if OS_VOUCHER_ACTIVITY_SPI +#include "voucher_activity_private.h" #endif diff --git a/private/Makefile.am b/private/Makefile.am index de1239168..98840d570 100644 --- a/private/Makefile.am +++ b/private/Makefile.am @@ -7,6 +7,7 @@ noinst_HEADERS= \ data_private.h \ introspection_private.h \ io_private.h \ + layout_private.h \ mach_private.h \ private.h \ queue_private.h \ diff --git a/private/benchmark.h b/private/benchmark.h index c6edfe632..ef3cdbd2f 100644 --- a/private/benchmark.h +++ b/private/benchmark.h @@ -32,6 +32,8 @@ #include // for HeaderDoc #endif +DISPATCH_ASSUME_NONNULL_BEGIN + __BEGIN_DECLS /*! @@ -71,14 +73,17 @@ __BEGIN_DECLS __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW uint64_t -dispatch_benchmark(size_t count, void (^block)(void)); +dispatch_benchmark(size_t count, dispatch_block_t block); #endif __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_NOTHROW uint64_t -dispatch_benchmark_f(size_t count, void *ctxt, void (*func)(void *)); +dispatch_benchmark_f(size_t count, void *_Nullable ctxt, + dispatch_function_t func); __END_DECLS +DISPATCH_ASSUME_NONNULL_END + #endif diff --git a/private/data_private.h b/private/data_private.h index 751b7ce9c..7485525a5 100644 --- a/private/data_private.h +++ b/private/data_private.h @@ -32,6 +32,8 @@ #include // for HeaderDoc #endif +DISPATCH_ASSUME_NONNULL_BEGIN + __BEGIN_DECLS /*! @@ -51,7 +53,7 @@ DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(none); */ #define DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE \ (_dispatch_data_destructor_vm_deallocate) -__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) +__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(vm_deallocate); /*! @@ -80,8 +82,8 @@ DISPATCH_EXPORT DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_data_t dispatch_data_create_f(const void *buffer, size_t size, - dispatch_queue_t queue, - dispatch_function_t destructor); + dispatch_queue_t _Nullable queue, + dispatch_function_t _Nullable destructor); /*! * @function dispatch_data_create_alloc @@ -102,7 +104,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) DISPATCH_EXPORT DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_data_t -dispatch_data_create_alloc(size_t size, void** buffer_ptr); +dispatch_data_create_alloc(size_t size, void *_Nullable *_Nullable buffer_ptr); /*! * @typedef dispatch_data_applier_function_t @@ -116,7 +118,7 @@ dispatch_data_create_alloc(size_t size, void** buffer_ptr); * @param size The size of the memory for the current region. * @result A Boolean indicating whether traversal should continue. */ -typedef bool (*dispatch_data_applier_function_t)(void *context, +typedef bool (*dispatch_data_applier_function_t)(void *_Nullable context, dispatch_data_t region, size_t offset, const void *buffer, size_t size); /*! @@ -143,7 +145,7 @@ typedef bool (*dispatch_data_applier_function_t)(void *context, __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW bool -dispatch_data_apply_f(dispatch_data_t data, void *context, +dispatch_data_apply_f(dispatch_data_t data, void *_Nullable context, dispatch_data_applier_function_t applier); #if TARGET_OS_MAC @@ -159,7 +161,7 @@ dispatch_data_apply_f(dispatch_data_t data, void *context, * * @param data The data object to make a memory entry for. * @result A mach port for the newly made memory entry, or - * MACH_PORT_NULL if an error ocurred. + * MACH_PORT_NULL if an error occurred. */ __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW @@ -286,7 +288,7 @@ DISPATCH_DATA_FORMAT_TYPE_DECL(utf_any); * Flags specifying the input format of the source dispatch_data_t * * @param output_type - * Flags specifying the expected output format of the resulting transfomation. + * Flags specifying the expected output format of the resulting transformation. * * @result * A newly created dispatch data object, dispatch_data_empty if no has been @@ -303,4 +305,6 @@ dispatch_data_create_with_transform(dispatch_data_t data, __END_DECLS +DISPATCH_ASSUME_NONNULL_END + #endif // __DISPATCH_DATA_PRIVATE__ diff --git a/private/introspection_private.h b/private/introspection_private.h index 7ac0e7e92..fa8e49aeb 100644 --- a/private/introspection_private.h +++ b/private/introspection_private.h @@ -535,7 +535,7 @@ typedef void (*dispatch_introspection_hook_queue_item_complete_t)( * @typedef dispatch_introspection_hooks_s * * @abstract - * A structure of function pointer hoooks into libdispatch. + * A structure of function pointer hooks into libdispatch. */ typedef struct dispatch_introspection_hooks_s { diff --git a/private/io_private.h b/private/io_private.h index 4a00ee004..0bb1e3b25 100644 --- a/private/io_private.h +++ b/private/io_private.h @@ -32,6 +32,8 @@ #include // for HeaderDoc #endif +DISPATCH_ASSUME_NONNULL_BEGIN + __BEGIN_DECLS /*! @@ -83,8 +85,8 @@ void dispatch_read_f(dispatch_fd_t fd, size_t length, dispatch_queue_t queue, - void *context, - void (*handler)(void *context, dispatch_data_t data, int error)); + void *_Nullable context, + void (*handler)(void *_Nullable context, dispatch_data_t data, int error)); /*! * @function dispatch_write_f @@ -126,8 +128,9 @@ void dispatch_write_f(dispatch_fd_t fd, dispatch_data_t data, dispatch_queue_t queue, - void *context, - void (*handler)(void *context, dispatch_data_t data, int error)); + void *_Nullable context, + void (*handler)(void *_Nullable context, dispatch_data_t _Nullable data, + int error)); /*! * @function dispatch_io_create_f @@ -164,8 +167,8 @@ dispatch_io_t dispatch_io_create_f(dispatch_io_type_t type, dispatch_fd_t fd, dispatch_queue_t queue, - void *context, - void (*cleanup_handler)(void *context, int error)); + void *_Nullable context, + void (*cleanup_handler)(void *_Nullable context, int error)); /*! * @function dispatch_io_create_with_path_f @@ -204,8 +207,8 @@ dispatch_io_t dispatch_io_create_with_path_f(dispatch_io_type_t type, const char *path, int oflag, mode_t mode, dispatch_queue_t queue, - void *context, - void (*cleanup_handler)(void *context, int error)); + void *_Nullable context, + void (*cleanup_handler)(void *_Nullable context, int error)); /*! * @function dispatch_io_create_with_io_f @@ -248,8 +251,8 @@ dispatch_io_t dispatch_io_create_with_io_f(dispatch_io_type_t type, dispatch_io_t io, dispatch_queue_t queue, - void *context, - void (*cleanup_handler)(void *context, int error)); + void *_Nullable context, + void (*cleanup_handler)(void *_Nullable context, int error)); /*! * @typedef dispatch_io_handler_function_t @@ -260,8 +263,8 @@ dispatch_io_create_with_io_f(dispatch_io_type_t type, * @param data The data object to be handled. * @param error An errno condition for the operation. */ -typedef void (*dispatch_io_handler_function_t)(void *context, bool done, - dispatch_data_t data, int error); +typedef void (*dispatch_io_handler_function_t)(void *_Nullable context, + bool done, dispatch_data_t _Nullable data, int error); /*! * @function dispatch_io_read_f @@ -316,7 +319,7 @@ dispatch_io_read_f(dispatch_io_t channel, off_t offset, size_t length, dispatch_queue_t queue, - void *context, + void *_Nullable context, dispatch_io_handler_function_t io_handler); /*! @@ -373,7 +376,7 @@ dispatch_io_write_f(dispatch_io_t channel, off_t offset, dispatch_data_t data, dispatch_queue_t queue, - void *context, + void *_Nullable context, dispatch_io_handler_function_t io_handler); /*! @@ -403,9 +406,11 @@ __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_io_barrier_f(dispatch_io_t channel, - void *context, + void *_Nullable context, dispatch_function_t barrier); __END_DECLS +DISPATCH_ASSUME_NONNULL_END + #endif /* __DISPATCH_IO_PRIVATE__ */ diff --git a/private/layout_private.h b/private/layout_private.h index 17e8ed836..bf93ee999 100644 --- a/private/layout_private.h +++ b/private/layout_private.h @@ -69,16 +69,6 @@ DISPATCH_EXPORT const struct dispatch_tsd_indexes_s { const uint16_t dti_qos_class_index; } dispatch_tsd_indexes; -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) -DISPATCH_EXPORT const struct voucher_offsets_s { - // always add new fields at the end - const uint16_t vo_version; - const uint16_t vo_activity_ids_count; - const uint16_t vo_activity_ids_count_size; - const uint16_t vo_activity_ids_array; - const uint16_t vo_activity_ids_array_entry_size; -} voucher_offsets; - #endif // DISPATCH_LAYOUT_SPI __END_DECLS diff --git a/private/mach_private.h b/private/mach_private.h index 93c1e811c..2228436a7 100644 --- a/private/mach_private.h +++ b/private/mach_private.h @@ -36,8 +36,12 @@ __BEGIN_DECLS #if DISPATCH_MACH_SPI +#define DISPATCH_MACH_SPI_VERSION 20160505 + #include +DISPATCH_ASSUME_NONNULL_BEGIN + /*! * @functiongroup Dispatch Mach Channel SPI * @@ -53,7 +57,8 @@ DISPATCH_DECL(dispatch_mach); /*! * @typedef dispatch_mach_reason_t - * Reasons for a mach channel handler to be invoked. + * Reasons for a mach channel handler to be invoked, or the result of an + * immediate send attempt. * * @const DISPATCH_MACH_CONNECTED * The channel has been connected. The first handler invocation on a channel @@ -91,6 +96,19 @@ DISPATCH_DECL(dispatch_mach); * * @const DISPATCH_MACH_CANCELED * The channel has been canceled. + * + * @const DISPATCH_MACH_REPLY_RECEIVED + * A synchronous reply to a call to dispatch_mach_send_and_wait_for_reply() has + * been received on another thread, an empty message is passed in the message + * parameter (so that associated port rights can be disposed of). + * The message header will contain a local port with a receive right associated + * with the reply to the message that was synchronously sent to the channel. + * + * @const DISPATCH_MACH_NEEDS_DEFERRED_SEND + * The message could not be sent synchronously. Only returned from a send with + * result operation and never passed to a channel handler. Indicates that the + * message passed to the send operation must not be disposed of until it is + * returned via the channel handler. */ DISPATCH_ENUM(dispatch_mach_reason, unsigned long, DISPATCH_MACH_CONNECTED = 1, @@ -101,9 +119,19 @@ DISPATCH_ENUM(dispatch_mach_reason, unsigned long, DISPATCH_MACH_BARRIER_COMPLETED, DISPATCH_MACH_DISCONNECTED, DISPATCH_MACH_CANCELED, + DISPATCH_MACH_REPLY_RECEIVED, + DISPATCH_MACH_NEEDS_DEFERRED_SEND, DISPATCH_MACH_REASON_LAST, /* unused */ ); +/*! + * @typedef dispatch_mach_send_flags_t + * Flags that can be passed to the *with_flags send functions. + */ +DISPATCH_ENUM(dispatch_mach_send_flags, unsigned long, + DISPATCH_MACH_SEND_DEFAULT = 0, +); + /*! * @typedef dispatch_mach_trailer_t * Trailer type of mach message received by dispatch mach channels @@ -178,8 +206,9 @@ __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_mach_msg_t -dispatch_mach_msg_create(mach_msg_header_t *msg, size_t size, - dispatch_mach_msg_destructor_t destructor, mach_msg_header_t **msg_ptr); +dispatch_mach_msg_create(mach_msg_header_t *_Nullable msg, size_t size, + dispatch_mach_msg_destructor_t destructor, + mach_msg_header_t *_Nonnull *_Nullable msg_ptr); /*! * @function dispatch_mach_msg_get_msg @@ -193,7 +222,8 @@ dispatch_mach_msg_create(mach_msg_header_t *msg, size_t size, __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW mach_msg_header_t* -dispatch_mach_msg_get_msg(dispatch_mach_msg_t message, size_t *size_ptr); +dispatch_mach_msg_get_msg(dispatch_mach_msg_t message, + size_t *_Nullable size_ptr); #ifdef __BLOCKS__ /*! @@ -205,7 +235,7 @@ dispatch_mach_msg_get_msg(dispatch_mach_msg_t message, size_t *size_ptr); * @param error Mach error code for the send operation. */ typedef void (^dispatch_mach_handler_t)(dispatch_mach_reason_t reason, - dispatch_mach_msg_t message, mach_error_t error); + dispatch_mach_msg_t _Nullable message, mach_error_t error); /*! * @function dispatch_mach_create @@ -241,8 +271,8 @@ __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NONNULL3 DISPATCH_NOTHROW dispatch_mach_t -dispatch_mach_create(const char *label, dispatch_queue_t queue, - dispatch_mach_handler_t handler); +dispatch_mach_create(const char *_Nullable label, + dispatch_queue_t _Nullable queue, dispatch_mach_handler_t handler); #endif /*! @@ -254,8 +284,8 @@ dispatch_mach_create(const char *label, dispatch_queue_t queue, * @param message Message object that was sent or received. * @param error Mach error code for the send operation. */ -typedef void (*dispatch_mach_handler_function_t)(void *context, - dispatch_mach_reason_t reason, dispatch_mach_msg_t message, +typedef void (*dispatch_mach_handler_function_t)(void *_Nullable context, + dispatch_mach_reason_t reason, dispatch_mach_msg_t _Nullable message, mach_error_t error); /*! @@ -295,7 +325,8 @@ __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NONNULL4 DISPATCH_NOTHROW dispatch_mach_t -dispatch_mach_create_f(const char *label, dispatch_queue_t queue, void *context, +dispatch_mach_create_f(const char *_Nullable label, + dispatch_queue_t _Nullable queue, void *_Nullable context, dispatch_mach_handler_function_t handler); /*! @@ -327,7 +358,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_mach_connect(dispatch_mach_t channel, mach_port_t receive, - mach_port_t send, dispatch_mach_msg_t checkin); + mach_port_t send, dispatch_mach_msg_t _Nullable checkin); /*! * @function dispatch_mach_reconnect @@ -358,7 +389,7 @@ __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_mach_reconnect(dispatch_mach_t channel, mach_port_t send, - dispatch_mach_msg_t checkin); + dispatch_mach_msg_t _Nullable checkin); /*! * @function dispatch_mach_cancel @@ -426,6 +457,222 @@ void dispatch_mach_send(dispatch_mach_t channel, dispatch_mach_msg_t message, mach_msg_option_t options); +/*! + * @function dispatch_mach_send_with_result + * Asynchronously send a message encapsulated in a dispatch mach message object + * to the specified mach channel. If an immediate send can be performed, return + * its result via out parameters. + * + * Unless the message is being sent to a send-once right (as determined by the + * presence of MACH_MSG_TYPE_MOVE_SEND_ONCE in the message header remote bits), + * the message header remote port is set to the channel send right before the + * send operation is performed. + * + * If the message expects a direct reply (as determined by the presence of + * MACH_MSG_TYPE_MAKE_SEND_ONCE in the message header local bits) the receive + * right specified in the message header local port will be monitored until a + * reply message (or a send-once notification) is received, or the channel is + * canceled. Hence the application must wait for the channel handler to be + * invoked with a DISPATCH_MACH_DISCONNECTED message before releasing that + * receive right. + * + * If the message send operation is attempted but the channel is canceled + * before the send operation succesfully completes, the message returned to the + * channel handler with DISPATCH_MACH_MESSAGE_NOT_SENT may be the result of a + * pseudo-receive operation. If the message expected a direct reply, the + * receive right originally specified in the message header local port will + * returned in a DISPATCH_MACH_DISCONNECTED message. + * + * If an immediate send could be performed, returns the resulting reason + * (e.g. DISPATCH_MACH_MESSAGE_SENT) and possible error to the caller in the + * send_result and send_error out parameters (instead of via the channel + * handler), in which case the passed-in message and associated resources + * can be disposed of synchronously. + * + * If a deferred send is required, returns DISPATCH_MACH_NEEDS_DEFERRED_SEND + * in the send_result out parameter to indicate that the passed-in message has + * been retained and associated resources must not be disposed of until the + * message is returned asynchronusly via the channel handler. + * + * @param channel + * The mach channel to which to send the message. + * + * @param message + * The message object encapsulating the message to send. Unless an immediate + * send could be performed, the object will be retained until the asynchronous + * send operation is complete and the channel handler has returned. The storage + * underlying the message object may be modified by the send operation. + * + * @param options + * Additional send options to pass to mach_msg() when performing the send + * operation. + * + * @param send_flags + * Flags to configure the send operation. Must be 0 for now. + * + * @param send_result + * Out parameter to return the result of the immediate send attempt. + * If a deferred send is required, returns DISPATCH_MACH_NEEDS_DEFERRED_SEND. + * Must not be NULL. + * + * @param send_error + * Out parameter to return the error from the immediate send attempt. + * If a deferred send is required, returns 0. Must not be NULL. + */ +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NONNULL5 +DISPATCH_NONNULL6 DISPATCH_NOTHROW +void +dispatch_mach_send_with_result(dispatch_mach_t channel, + dispatch_mach_msg_t message, mach_msg_option_t options, + dispatch_mach_send_flags_t send_flags, + dispatch_mach_reason_t *send_result, mach_error_t *send_error); + +/*! + * @function dispatch_mach_send_and_wait_for_reply + * Synchronously send a message encapsulated in a dispatch mach message object + * to the specified mach channel and wait for a reply. + * + * Unless the message is being sent to a send-once right (as determined by the + * presence of MACH_MSG_TYPE_MOVE_SEND_ONCE in the message header remote bits), + * the message header remote port is set to the channel send right before the + * send operation is performed. + * + * The message is required to expect a direct reply (as determined by the + * presence of MACH_MSG_TYPE_MAKE_SEND_ONCE in the message header local bits) + * and this function will not complete until the receive right specified in the + * message header local port receives a reply message (or a send-once + * notification) which will be returned, or until that receive right is + * destroyed in response to the channel being canceled, in which case NULL will + * be returned. + * In all these cases the application must wait for the channel handler to + * be invoked with a DISPATCH_MACH_REPLY_RECEIVED or DISPATCH_MACH_DISCONNECTED + * message before releasing that receive right. + * + * Alternatively, the application may specify MACH_PORT_NULL in the header local + * port to indicate that the channel should create and manage the reply receive + * right internally, including destroying it upon channel cancellation. + * This is a more efficient mode of operation as no asynchronous operations are + * required to return the receive right (i.e. the channel handler will not be + * called as described above). + * + * If the message send operation is attempted but the channel is canceled + * before the send operation succesfully completes, the message returned to the + * channel handler with DISPATCH_MACH_MESSAGE_NOT_SENT may be the result of a + * pseudo-receive operation. The receive right originally specified in the + * message header local port will returned in a DISPATCH_MACH_DISCONNECTED + * message (unless it was MACH_PORT_NULL). + * + * @param channel + * The mach channel to which to send the message. + * + * @param message + * The message object encapsulating the message to send. The object will be + * retained until the send operation is complete and the channel handler has + * returned. The storage underlying the message object may be modified by the + * send operation. + * + * @param options + * Additional send options to pass to mach_msg() when performing the send + * operation. + * + * @result + * The received reply message object, or NULL if the channel was canceled. + */ +__OSX_AVAILABLE_STARTING(__MAC_10_11,__IPHONE_9_0) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NOTHROW +dispatch_mach_msg_t _Nullable +dispatch_mach_send_and_wait_for_reply(dispatch_mach_t channel, + dispatch_mach_msg_t message, mach_msg_option_t options); + +/*! + * @function dispatch_mach_send_with_result_and_wait_for_reply + * Synchronously send a message encapsulated in a dispatch mach message object + * to the specified mach channel and wait for a reply. If an immediate send can + * be performed, return its result via out parameters. + * + * Unless the message is being sent to a send-once right (as determined by the + * presence of MACH_MSG_TYPE_MOVE_SEND_ONCE in the message header remote bits), + * the message header remote port is set to the channel send right before the + * send operation is performed. + * + * The message is required to expect a direct reply (as determined by the + * presence of MACH_MSG_TYPE_MAKE_SEND_ONCE in the message header local bits) + * and this function will not complete until the receive right specified in the + * message header local port receives a reply message (or a send-once + * notification) which will be returned, or until that receive right is + * destroyed in response to the channel being canceled, in which case NULL will + * be returned. + * In all these cases the application must wait for the channel handler to + * be invoked with a DISPATCH_MACH_REPLY_RECEIVED or DISPATCH_MACH_DISCONNECTED + * message before releasing that receive right. + * + * Alternatively, the application may specify MACH_PORT_NULL in the header local + * port to indicate that the channel should create and manage the reply receive + * right internally, including destroying it upon channel cancellation. + * This is a more efficient mode of operation as no asynchronous operations are + * required to return the receive right (i.e. the channel handler will not be + * called as described above). + * + * If the message send operation is attempted but the channel is canceled + * before the send operation succesfully completes, the message returned to the + * channel handler with DISPATCH_MACH_MESSAGE_NOT_SENT may be the result of a + * pseudo-receive operation. The receive right originally specified in the + * message header local port will returned in a DISPATCH_MACH_DISCONNECTED + * message (unless it was MACH_PORT_NULL). + * + * If an immediate send could be performed, returns the resulting reason + * (e.g. DISPATCH_MACH_MESSAGE_SENT) and possible error to the caller in the + * send_result and send_error out parameters (instead of via the channel + * handler), in which case the passed-in message and associated resources + * can be disposed of synchronously. + * + * If a deferred send is required, returns DISPATCH_MACH_NEEDS_DEFERRED_SEND + * in the send_result out parameter to indicate that the passed-in message has + * been retained and associated resources must not be disposed of until the + * message is returned asynchronusly via the channel handler. + * + * @param channel + * The mach channel to which to send the message. + * + * @param message + * The message object encapsulating the message to send. Unless an immediate + * send could be performed, the object will be retained until the asynchronous + * send operation is complete and the channel handler has returned. The storage + * underlying the message object may be modified by the send operation. + * + * @param options + * Additional send options to pass to mach_msg() when performing the send + * operation. + * + * @param send_flags + * Flags to configure the send operation. Must be 0 for now. + * + * @param send_result + * Out parameter to return the result of the immediate send attempt. + * If a deferred send is required, returns DISPATCH_MACH_NEEDS_DEFERRED_SEND. + * Must not be NULL. + * + * @param send_error + * Out parameter to return the error from the immediate send attempt. + * If a deferred send is required, returns 0. Must not be NULL. + * + * @result + * The received reply message object, or NULL if the channel was canceled. + */ +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NONNULL5 DISPATCH_NONNULL6 +DISPATCH_NOTHROW +dispatch_mach_msg_t _Nullable +dispatch_mach_send_with_result_and_wait_for_reply(dispatch_mach_t channel, + dispatch_mach_msg_t message, mach_msg_option_t options, + dispatch_mach_send_flags_t send_flags, + dispatch_mach_reason_t *send_result, mach_error_t *send_error); + #ifdef __BLOCKS__ /*! * @function dispatch_mach_send_barrier @@ -467,7 +714,7 @@ dispatch_mach_send_barrier(dispatch_mach_t channel, dispatch_block_t barrier); __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void -dispatch_mach_send_barrier_f(dispatch_mach_t channel, void *context, +dispatch_mach_send_barrier_f(dispatch_mach_t channel, void *_Nullable context, dispatch_function_t barrier); #ifdef __BLOCKS__ @@ -510,7 +757,7 @@ dispatch_mach_receive_barrier(dispatch_mach_t channel, __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void -dispatch_mach_receive_barrier_f(dispatch_mach_t channel, void *context, +dispatch_mach_receive_barrier_f(dispatch_mach_t channel, void *_Nullable context, dispatch_function_t barrier); /*! @@ -539,6 +786,8 @@ DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW mach_port_t dispatch_mach_get_checkin_port(dispatch_mach_t channel); +DISPATCH_ASSUME_NONNULL_END + #endif // DISPATCH_MACH_SPI __END_DECLS diff --git a/private/module.modulemap b/private/module.modulemap new file mode 100644 index 000000000..62975a59b --- /dev/null +++ b/private/module.modulemap @@ -0,0 +1,11 @@ +module DispatchPrivate [system] [extern_c] { + umbrella header "private.h" + exclude header "mach_private.h" + module * { export * } + export * +} + +module DispatchIntrospectionPrivate [system] [extern_c] { + header "introspection_private.h" + export * +} diff --git a/private/private.h b/private/private.h index 46d0e5d48..3c37bed0d 100644 --- a/private/private.h +++ b/private/private.h @@ -54,7 +54,9 @@ #include #include #include +#if DISPATCH_MACH_SPI #include +#endif // DISPATCH_MACH_SPI #include #include #include @@ -64,10 +66,12 @@ #endif /* !__DISPATCH_BUILDING_DISPATCH__ */ // Check that public and private dispatch headers match -#if DISPATCH_API_VERSION != 20141121 // Keep in sync with +#if DISPATCH_API_VERSION != 20160712 // Keep in sync with #error "Dispatch header mismatch between /usr/include and /usr/local/include" #endif +DISPATCH_ASSUME_NONNULL_BEGIN + __BEGIN_DECLS /*! @@ -117,6 +121,34 @@ __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) DISPATCH_EXPORT DISPATCH_NOTHROW bool _dispatch_is_fork_of_multithreaded_parent(void); +/*! + * @function _dispatch_prohibit_transition_to_multithreaded + * + * @abstract + * Sets a mode that aborts if a program tries to use dispatch. + * + * @discussion + * This SPI is intended for use by programs that know they will use fork() and + * want their children to be able to use dispatch before exec(). Such programs + * should call _dispatch_prohibit_transition_to_multithreaded(true) as early as + * possible, which will cause any use of dispatch API that would make the + * process multithreaded to abort immediately. + * + * Once the program no longer intends to call fork() it can call + * _dispatch_prohibit_transition_to_multithreaded(false). + * + * This status is not inherited by the child process, so if the behavior + * is required after fork, _dispatch_prohibit_transition_to_multithreaded(true) + * should be called manually in the child after fork. + * + * If the program already used dispatch before the guard is enabled, then + * this function will abort immediately. + */ +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +DISPATCH_EXPORT DISPATCH_NOTHROW +void _dispatch_prohibit_transition_to_multithreaded(bool prohibit); + /* * dispatch_time convenience macros */ @@ -131,40 +163,60 @@ bool _dispatch_is_fork_of_multithreaded_parent(void); dispatch_time(DISPATCH_TIME_NOW, (t) * NSEC_PER_SEC) /* - * SPI for CoreFoundation/Foundation/libauto ONLY + * SPI for CoreFoundation/Foundation ONLY */ -#define DISPATCH_COCOA_COMPAT (TARGET_OS_MAC || TARGET_OS_WIN32) +#if TARGET_OS_MAC +#define DISPATCH_COCOA_COMPAT 1 +#elif defined(__linux__) +#define DISPATCH_COCOA_COMPAT 1 +#else +#define DISPATCH_COCOA_COMPAT 0 +#endif #if DISPATCH_COCOA_COMPAT +#define DISPATCH_CF_SPI_VERSION 20160712 + +#if TARGET_OS_MAC +typedef mach_port_t dispatch_runloop_handle_t; +#elif defined(__linux__) +typedef int dispatch_runloop_handle_t; +#else +#error "runloop support not implemented on this platform" +#endif + #if TARGET_OS_MAC __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_CONST DISPATCH_WARN_RESULT DISPATCH_NOTHROW -mach_port_t +dispatch_runloop_handle_t _dispatch_get_main_queue_port_4CF(void); +#endif -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) DISPATCH_EXPORT DISPATCH_NOTHROW -void -_dispatch_main_queue_callback_4CF(mach_msg_header_t *msg); -#elif TARGET_OS_WIN32 -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT DISPATCH_NOTHROW -HANDLE +dispatch_runloop_handle_t _dispatch_get_main_queue_handle_4CF(void); +#if TARGET_OS_MAC __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT DISPATCH_NOTHROW void -_dispatch_main_queue_callback_4CF(void); -#endif // TARGET_OS_WIN32 +_dispatch_main_queue_callback_4CF(mach_msg_header_t *_Null_unspecified msg); +#else +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +DISPATCH_EXPORT DISPATCH_NOTHROW +void +_dispatch_main_queue_callback_4CF(void *_Null_unspecified msg); +#endif __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_t -_dispatch_runloop_root_queue_create_4CF(const char *label, unsigned long flags); +_dispatch_runloop_root_queue_create_4CF(const char *_Nullable label, + unsigned long flags); #if TARGET_OS_MAC __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) @@ -191,21 +243,16 @@ _dispatch_source_set_runloop_timer_4CF(dispatch_source_t source, __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT -void (*dispatch_begin_thread_4GC)(void); - -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT -void (*dispatch_end_thread_4GC)(void); -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT -void *(*_dispatch_begin_NSAutoReleasePool)(void); +void *_Nonnull (*_Nullable _dispatch_begin_NSAutoReleasePool)(void); __OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_EXPORT -void (*_dispatch_end_NSAutoReleasePool)(void *); +void (*_Nullable _dispatch_end_NSAutoReleasePool)(void *); #endif /* DISPATCH_COCOA_COMPAT */ __END_DECLS +DISPATCH_ASSUME_NONNULL_END + #endif // __DISPATCH_PRIVATE__ diff --git a/private/queue_private.h b/private/queue_private.h index f2bb69132..33de371c8 100644 --- a/private/queue_private.h +++ b/private/queue_private.h @@ -32,6 +32,8 @@ #include // for HeaderDoc #endif +DISPATCH_ASSUME_NONNULL_BEGIN + __BEGIN_DECLS /*! @@ -54,6 +56,18 @@ enum { * Returns a dispatch queue attribute value with the overcommit flag set to the * specified value. * + * This attribute only makes sense when the specified queue is targeted at + * a root queue. Passing this attribute to dispatch_queue_create_with_target() + * with a target queue that is not a root queue will result in an assertion and + * the process being terminated. + * + * It is recommended to not specify a target queue at all when using this + * attribute and to use dispatch_queue_attr_make_with_qos_class() to select the + * appropriate QoS class instead. + * + * Queues created with this attribute cannot change target after having been + * activated. See dispatch_set_target_queue() and dispatch_activate(). + * * @param attr * A queue attribute value to be combined with the overcommit flag, or NULL. * @@ -68,7 +82,7 @@ enum { __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW dispatch_queue_attr_t -dispatch_queue_attr_make_with_overcommit(dispatch_queue_attr_t attr, +dispatch_queue_attr_make_with_overcommit(dispatch_queue_attr_t _Nullable attr, bool overcommit); /*! @@ -120,50 +134,6 @@ DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_queue_set_width(dispatch_queue_t dq, long width); -/*! - * @function dispatch_queue_create_with_target - * - * @abstract - * Creates a new dispatch queue with a specified target queue. - * - * @discussion - * Dispatch queues created with the DISPATCH_QUEUE_SERIAL or a NULL attribute - * invoke blocks serially in FIFO order. - * - * Dispatch queues created with the DISPATCH_QUEUE_CONCURRENT attribute may - * invoke blocks concurrently (similarly to the global concurrent queues, but - * potentially with more overhead), and support barrier blocks submitted with - * the dispatch barrier API, which e.g. enables the implementation of efficient - * reader-writer schemes. - * - * When a dispatch queue is no longer needed, it should be released with - * dispatch_release(). Note that any pending blocks submitted to a queue will - * hold a reference to that queue. Therefore a queue will not be deallocated - * until all pending blocks have finished. - * - * @param label - * A string label to attach to the queue. - * This parameter is optional and may be NULL. - * - * @param attr - * DISPATCH_QUEUE_SERIAL, DISPATCH_QUEUE_CONCURRENT, or the result of a call to - * the function dispatch_queue_attr_make_with_qos_class(). - * - * @param target - * The target queue for the newly created queue. The target queue is retained. - * If this parameter is DISPATCH_TARGET_QUEUE_DEFAULT, sets the queue's target - * queue to the default target queue for the given queue type. - * - * @result - * The newly created dispatch queue. - */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) -DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT -DISPATCH_NOTHROW -dispatch_queue_t -dispatch_queue_create_with_target(const char *label, - dispatch_queue_attr_t attr, dispatch_queue_t target); - #ifdef __BLOCKS__ /*! * @function dispatch_pthread_root_queue_create @@ -223,8 +193,9 @@ __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_t -dispatch_pthread_root_queue_create(const char *label, unsigned long flags, - const pthread_attr_t *attr, dispatch_block_t configure); +dispatch_pthread_root_queue_create(const char *_Nullable label, + unsigned long flags, const pthread_attr_t *_Nullable attr, + dispatch_block_t _Nullable configure); /*! * @function dispatch_pthread_root_queue_flags_pool_size @@ -257,80 +228,31 @@ dispatch_pthread_root_queue_flags_pool_size(uint8_t pool_size) #endif /* __BLOCKS__ */ /*! - * @constant DISPATCH_APPLY_CURRENT_ROOT_QUEUE - * @discussion Constant to pass to the dispatch_apply() and dispatch_apply_f() - * functions to indicate that the root queue for the current thread should be - * used (i.e. one of the global concurrent queues or a queue created with - * dispatch_pthread_root_queue_create()). If there is no such queue, the - * default priority global concurrent queue will be used. - */ -#define DISPATCH_APPLY_CURRENT_ROOT_QUEUE NULL - -/*! - * @function dispatch_assert_queue + * @function dispatch_pthread_root_queue_copy_current * * @abstract - * Verifies that the current block is executing on a certain dispatch queue. - * - * @discussion - * Some code expects to be run on a specific dispatch queue. This function - * verifies that expectation for debugging. - * - * This function will only return if the currently executing block was submitted - * to the specified queue or to any queue targeting it (see - * dispatch_set_target_queue()). Otherwise, it logs an explanation to the system - * log, then terminates the application. - * - * When dispatch_assert_queue() is called outside of the context of a - * submitted block, its behavior is undefined. - * - * Passing the result of dispatch_get_main_queue() to this function verifies - * that the current block was submitted to the main queue or to a queue - * targeting it. - * IMPORTANT: this is NOT the same as verifying that the current block is - * executing on the main thread. + * Returns a reference to the pthread root queue object that has created the + * currently executing thread, or NULL if the current thread is not associated + * to a pthread root queue. * - * The variant dispatch_assert_queue_debug() is compiled out when the - * preprocessor macro NDEBUG is defined. (See also assert(3)). - * - * @param queue - * The dispatch queue that the current block is expected to run on. - * The result of passing NULL in this parameter is undefined. + * @result + * A new reference to a pthread root queue object or NULL. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) -DISPATCH_EXPORT DISPATCH_NONNULL1 -void -dispatch_assert_queue(dispatch_queue_t queue); +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +DISPATCH_EXPORT DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW +dispatch_queue_t _Nullable +dispatch_pthread_root_queue_copy_current(void); /*! - * @function dispatch_assert_queue_not - * - * @abstract - * Verifies that the current block is not executing on a certain dispatch queue. - * - * @discussion - * This function is the equivalent of dispatch_queue_assert() with the test for - * equality inverted. See discussion there. - * - * The variant dispatch_assert_queue_not_debug() is compiled out when the - * preprocessor macro NDEBUG is defined. (See also assert(3)). - * - * @param queue - * The dispatch queue that the current block is expected not to run on. - * The result of passing NULL in this parameter is undefined. + * @constant DISPATCH_APPLY_CURRENT_ROOT_QUEUE + * @discussion Constant to pass to the dispatch_apply() and dispatch_apply_f() + * functions to indicate that the root queue for the current thread should be + * used (i.e. one of the global concurrent queues or a queue created with + * dispatch_pthread_root_queue_create()). If there is no such queue, the + * default priority global concurrent queue will be used. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) -DISPATCH_EXPORT DISPATCH_NONNULL1 -void -dispatch_assert_queue_not(dispatch_queue_t queue); - -#ifdef NDEBUG -#define dispatch_assert_queue_debug(q) ((void)0) -#define dispatch_assert_queue_not_debug(q) ((void)0) -#else -#define dispatch_assert_queue_debug(q) dispatch_assert_queue(q) -#define dispatch_assert_queue_not_debug(q) dispatch_assert_queue_not(q) -#endif +#define DISPATCH_APPLY_CURRENT_ROOT_QUEUE ((dispatch_queue_t _Nonnull)0) /*! * @function dispatch_async_enforce_qos_class_f @@ -366,10 +288,11 @@ __OSX_AVAILABLE_STARTING(__MAC_10_11,__IPHONE_9_0) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_async_enforce_qos_class_f(dispatch_queue_t queue, - void *context, - dispatch_function_t work); + void *_Nullable context, dispatch_function_t work); __END_DECLS +DISPATCH_ASSUME_NONNULL_END + #endif diff --git a/private/source_private.h b/private/source_private.h index e8373ba26..bb1370238 100644 --- a/private/source_private.h +++ b/private/source_private.h @@ -32,6 +32,8 @@ #include // for HeaderDoc #endif +DISPATCH_ASSUME_NONNULL_BEGIN + __BEGIN_DECLS /*! @@ -77,18 +79,19 @@ DISPATCH_SOURCE_TYPE_DECL(interval); * The handle is a process identifier (pid_t). */ #define DISPATCH_SOURCE_TYPE_VFS (&_dispatch_source_type_vfs) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_vfs; /*! * @const DISPATCH_SOURCE_TYPE_VM * @discussion A dispatch source that monitors virtual memory * The mask is a mask of desired events from dispatch_source_vm_flags_t. - * This type is deprecated, use DISPATCH_SOURCE_TYPE_MEMORYSTATUS instead. + * This type is deprecated, use DISPATCH_SOURCE_TYPE_MEMORYPRESSURE instead. */ #define DISPATCH_SOURCE_TYPE_VM (&_dispatch_source_type_vm) __OSX_AVAILABLE_BUT_DEPRECATED_MSG(__MAC_10_7, __MAC_10_10, __IPHONE_4_3, - __IPHONE_8_0, "Use DISPATCH_SOURCE_TYPE_MEMORYSTATUS instead") + __IPHONE_8_0, "Use DISPATCH_SOURCE_TYPE_MEMORYPRESSURE instead") +DISPATCH_LINUX_UNAVAILABLE() DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_vm; /*! @@ -98,7 +101,11 @@ DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_vm; * dispatch_source_memorystatus_flags_t. */ #define DISPATCH_SOURCE_TYPE_MEMORYSTATUS (&_dispatch_source_type_memorystatus) -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +__OSX_DEPRECATED(10.9, 10.12, "Use DISPATCH_SOURCE_TYPE_MEMORYPRESSURE instead") +__IOS_DEPRECATED(6.0, 10.0, "Use DISPATCH_SOURCE_TYPE_MEMORYPRESSURE instead") +__TVOS_DEPRECATED(6.0, 10.0, "Use DISPATCH_SOURCE_TYPE_MEMORYPRESSURE instead") +__WATCHOS_DEPRECATED(1.0, 3.0, "Use DISPATCH_SOURCE_TYPE_MEMORYPRESSURE instead") +DISPATCH_LINUX_UNAVAILABLE() DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_memorystatus; @@ -107,7 +114,7 @@ DISPATCH_EXPORT const struct dispatch_source_type_s * @discussion A dispatch source that monitors events on socket state changes. */ #define DISPATCH_SOURCE_TYPE_SOCK (&_dispatch_source_type_sock) -__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) +__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_sock; __END_DECLS @@ -150,6 +157,9 @@ __END_DECLS * * @constant DISPATCH_SOCK_CONNINFO_UPDATED * Connection info was updated + * + * @constant DISPATCH_SOCK_NOTIFY_ACK + * Notify acknowledgement */ enum { DISPATCH_SOCK_CONNRESET = 0x00000001, @@ -166,6 +176,7 @@ enum { DISPATCH_SOCK_CONNECTED = 0x00000800, DISPATCH_SOCK_DISCONNECTED = 0x00001000, DISPATCH_SOCK_CONNINFO_UPDATED = 0x00002000, + DISPATCH_SOCK_NOTIFY_ACK = 0x00004000, }; /*! @@ -200,6 +211,9 @@ enum { * * @constant DISPATCH_VFS_VERYLOWDISK * File system has *very* little disk space left. + * + * @constant DISPATCH_VFS_QUOTA + * We hit a user quota (quotactl) for this filesystem. */ enum { DISPATCH_VFS_NOTRESP = 0x0001, @@ -212,6 +226,7 @@ enum { DISPATCH_VFS_NOTRESPLOCK = 0x0080, DISPATCH_VFS_UPDATE = 0x0100, DISPATCH_VFS_VERYLOWDISK = 0x0200, + DISPATCH_VFS_QUOTA = 0x1000, }; /*! @@ -270,36 +285,127 @@ enum { enum { DISPATCH_VM_PRESSURE __OSX_AVAILABLE_BUT_DEPRECATED_MSG( __MAC_10_7, __MAC_10_10, __IPHONE_4_3, __IPHONE_8_0, - "Use DISPATCH_MEMORYSTATUS_PRESSURE_WARN instead") = 0x80000000, + "Use DISPATCH_MEMORYPRESSURE_WARN instead") = 0x80000000, }; /*! - * @enum dispatch_source_memorystatus_flags_t + * @typedef dispatch_source_memorypressure_flags_t + * Type of dispatch_source_memorypressure flags * - * @constant DISPATCH_MEMORYSTATUS_PRESSURE_NORMAL - * The system's memory pressure state has returned to normal. - * @constant DISPATCH_MEMORYSTATUS_PRESSURE_WARN - * The system's memory pressure state has changed to warning. - * @constant DISPATCH_MEMORYSTATUS_PRESSURE_CRITICAL - * The system's memory pressure state has changed to critical. - * @constant DISPATCH_MEMORYSTATUS_LOW_SWAP + * @constant DISPATCH_MEMORYPRESSURE_LOW_SWAP * The system's memory pressure state has entered the "low swap" condition. * Restricted to the root user. */ +enum { + DISPATCH_MEMORYPRESSURE_LOW_SWAP + __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x08, +}; +/*! + * @enum dispatch_source_memorystatus_flags_t + * @warning Deprecated, see DISPATCH_MEMORYPRESSURE_* + */ enum { DISPATCH_MEMORYSTATUS_PRESSURE_NORMAL - __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_6_0) = 0x01, + __OSX_DEPRECATED(10.9, 10.12, "Use DISPATCH_MEMORYPRESSURE_NORMAL instead") + __IOS_DEPRECATED(6.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_NORMAL instead") + __TVOS_DEPRECATED(6.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_NORMAL instead") + __WATCHOS_DEPRECATED(1.0, 3.0, "Use DISPATCH_MEMORYPRESSURE_NORMAL instead") + = 0x01, DISPATCH_MEMORYSTATUS_PRESSURE_WARN - __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_6_0) = 0x02, + __OSX_DEPRECATED(10.9, 10.12, "Use DISPATCH_MEMORYPRESSURE_WARN instead") + __IOS_DEPRECATED(6.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_WARN instead") + __TVOS_DEPRECATED(6.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_WARN instead") + __WATCHOS_DEPRECATED(1.0, 3.0, "Use DISPATCH_MEMORYPRESSURE_WARN instead") + = 0x02, DISPATCH_MEMORYSTATUS_PRESSURE_CRITICAL - __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_8_0) = 0x04, + __OSX_DEPRECATED(10.9, 10.12, "Use DISPATCH_MEMORYPRESSURE_CRITICAL instead") + __IOS_DEPRECATED(8.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_CRITICAL instead") + __TVOS_DEPRECATED(8.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_CRITICAL instead") + __WATCHOS_DEPRECATED(1.0, 3.0, "Use DISPATCH_MEMORYPRESSURE_CRITICAL instead") + = 0x04, DISPATCH_MEMORYSTATUS_LOW_SWAP - __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x08, + __OSX_DEPRECATED(10.10, 10.12, "Use DISPATCH_MEMORYPRESSURE_LOW_SWAP instead") + __IOS_DEPRECATED(8.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_LOW_SWAP instead") + __TVOS_DEPRECATED(8.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_LOW_SWAP instead") + __WATCHOS_DEPRECATED(1.0, 3.0, "Use DISPATCH_MEMORYPRESSURE_LOW_SWAP instead") + = 0x08, }; +/*! + * @typedef dispatch_source_memorypressure_flags_t + * Type of dispatch_source_memorypressure flags + * + * @constant DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN + * The memory of the process has crossed 80% of its high watermark limit. + * + * @constant DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL + * The memory of the process has reached 100% of its high watermark limit. + */ +enum { + DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN + __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.10) + __TVOS_AVAILABLE(10.10) __WATCHOS_AVAILABLE(3.0) = 0x10, + + DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL + __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.10) + __TVOS_AVAILABLE(10.10) __WATCHOS_AVAILABLE(3.0) = 0x20, +}; + + __BEGIN_DECLS +/*! + * @function dispatch_source_cancel_and_wait + * + * @abstract + * Synchronously cancel the dispatch source, preventing any further invocation + * of its event handler block. + * + * @discussion + * Cancellation prevents any further invocation of handler blocks for the + * specified dispatch source, but does not interrupt a handler block that is + * already in progress. + * + * When this function returns, any handler block that may have been in progress + * has returned, the specified source has been unregistered and it is safe to + * reclaim any system resource (such as file descriptors or mach ports) that + * the specified source was monitoring. + * + * If the specified dispatch source is inactive, it will be activated as a side + * effect of calling this function. + * + * It is possible to call this function from several threads concurrently, + * and it is the responsibility of the callers to synchronize reclaiming the + * associated system resources. + * + * This function is not subject to priority inversion when it is waiting on + * a handler block still in progress, unlike patterns based on waiting on + * a dispatch semaphore or a dispatch group signaled (or left) from the source + * cancel handler. + * + * This function must not be called if the specified source has a cancel + * handler set, or from the context of its handler blocks. + * + * This function must not be called from the context of the target queue of + * the specified source or from any queue that synchronizes with it. Note that + * calling dispatch_source_cancel() from such a context already guarantees + * that no handler is in progress, and that no new event will be delivered. + * + * This function must not be called on sources suspended with an explicit + * call to dispatch_suspend(), or being concurrently activated on another + * thread. + * + * @param source + * The dispatch source to be canceled. + * The result of passing NULL in this parameter is undefined. + */ +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.10) +__TVOS_AVAILABLE(10.10) __WATCHOS_AVAILABLE(3.0) +DISPATCH_EXPORT DISPATCH_NOTHROW +void +dispatch_source_cancel_and_wait(dispatch_source_t source); + /*! * @typedef dispatch_timer_aggregate_t * @@ -350,9 +456,9 @@ __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) DISPATCH_EXPORT DISPATCH_NOTHROW uint64_t dispatch_timer_aggregate_get_delay(dispatch_timer_aggregate_t aggregate, - uint64_t *leeway_ptr); + uint64_t *_Nullable leeway_ptr); -#if TARGET_OS_MAC +#if __has_include() /*! * @typedef dispatch_mig_callback_t * @@ -362,7 +468,7 @@ dispatch_timer_aggregate_get_delay(dispatch_timer_aggregate_t aggregate, typedef boolean_t (*dispatch_mig_callback_t)(mach_msg_header_t *message, mach_msg_header_t *reply); -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW mach_msg_return_t dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, @@ -374,13 +480,15 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, * @abstract * Extract the context pointer from a mach message trailer. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW -void * +void *_Nullable dispatch_mach_msg_get_context(mach_msg_header_t *msg); #endif __END_DECLS +DISPATCH_ASSUME_NONNULL_END + #endif diff --git a/private/voucher_activity_private.h b/private/voucher_activity_private.h deleted file mode 100644 index 8a13e769f..000000000 --- a/private/voucher_activity_private.h +++ /dev/null @@ -1,619 +0,0 @@ -/* - * Copyright (c) 2013-2014 Apple Inc. All rights reserved. - * - * @APPLE_APACHE_LICENSE_HEADER_START@ - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * @APPLE_APACHE_LICENSE_HEADER_END@ - */ - -#ifndef __OS_VOUCHER_ACTIVITY_PRIVATE__ -#define __OS_VOUCHER_ACTIVITY_PRIVATE__ - -#include -#include -#if !defined(__DISPATCH_BUILDING_DISPATCH__) -#include -#endif - -#define OS_VOUCHER_ACTIVITY_SPI_VERSION 20150318 - -#if OS_VOUCHER_WEAK_IMPORT -#define OS_VOUCHER_EXPORT OS_EXPORT OS_WEAK_IMPORT -#else -#define OS_VOUCHER_EXPORT OS_EXPORT -#endif - -__BEGIN_DECLS - -#if OS_VOUCHER_ACTIVITY_SPI - -/*! - * @group Voucher Activity SPI - * SPI intended for libtrace only - */ - -/*! - * @typedef voucher_activity_id_t - * - * @abstract - * Opaque activity identifier. - * - * @discussion - * Scalar value type, not reference counted. - */ -typedef uint64_t voucher_activity_id_t; - -/*! - * @enum voucher_activity_tracepoint_type_t - * - * @abstract - * Types of tracepoints. - */ -OS_ENUM(voucher_activity_tracepoint_type, uint8_t, - voucher_activity_tracepoint_type_release = (1u << 0), - voucher_activity_tracepoint_type_debug = (1u << 1), - voucher_activity_tracepoint_type_error = (1u << 6) | (1u << 0), - voucher_activity_tracepoint_type_fault = (1u << 7) | (1u << 6) | (1u << 0), -); - -/*! - * @enum voucher_activity_flag_t - * - * @abstract - * Flags to pass to voucher_activity_start/voucher_activity_start_with_location - */ -OS_ENUM(voucher_activity_flag, unsigned long, - voucher_activity_flag_default = 0, - voucher_activity_flag_force = 0x1, - voucher_activity_flag_debug = 0x2, - voucher_activity_flag_persist = 0x4, - voucher_activity_flag_stream = 0x8, -); - -/*! - * @typedef voucher_activity_trace_id_t - * - * @abstract - * Opaque tracepoint identifier. - */ -typedef uint64_t voucher_activity_trace_id_t; -static const uint8_t _voucher_activity_trace_id_type_shift = 40; -static const uint8_t _voucher_activity_trace_id_code_namespace_shift = 32; - -/*! - * @function voucher_activity_trace_id - * - * @abstract - * Return tracepoint identifier for specified arguments. - * - * @param type - * Tracepoint type from voucher_activity_tracepoint_type_t. - * - * @param code_namespace - * Namespace of 'code' argument. - * - * @param code - * Tracepoint code. - * - * @result - * Tracepoint identifier. - */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) -OS_INLINE OS_ALWAYS_INLINE -voucher_activity_trace_id_t -voucher_activity_trace_id(uint8_t type, uint8_t code_namespace, uint32_t code) -{ - return ((voucher_activity_trace_id_t)type << - _voucher_activity_trace_id_type_shift) | - ((voucher_activity_trace_id_t)code_namespace << - _voucher_activity_trace_id_code_namespace_shift) | - (voucher_activity_trace_id_t)code; -} - -/*! - * @function voucher_activity_start - * - * @abstract - * Creates a new activity identifier and marks the current thread as - * participating in the activity. - * - * @discussion - * As part of voucher transport, activities are automatically propagated by the - * system to other threads and processes (across IPC). - * - * Activities persist as long as any threads in any process are marked as - * participating. There may be many calls to voucher_activity_end() - * corresponding to one call to voucher_activity_start(). - * - * @param trace_id - * Tracepoint identifier returned by voucher_activity_trace_id(), intended for - * identification of the automatic tracepoint generated as part of creating the - * new activity. - * - * @param flags - * Pass voucher_activity_flag_force to indicate that existing activities - * on the current thread should not be inherited and that a new toplevel - * activity should be created. - * - * @result - * A new activity identifier. - */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) -OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW -voucher_activity_id_t -voucher_activity_start(voucher_activity_trace_id_t trace_id, - voucher_activity_flag_t flags); - -/*! - * @function voucher_activity_start_with_location - * - * @abstract - * Creates a new activity identifier and marks the current thread as - * participating in the activity. - * - * @discussion - * As part of voucher transport, activities are automatically propagated by the - * system to other threads and processes (across IPC). - * - * Activities persist as long as any threads in any process are marked as - * participating. There may be many calls to voucher_activity_end() - * corresponding to one call to voucher_activity_start_with_location(). - * - * @param trace_id - * Tracepoint identifier returned by voucher_activity_trace_id(), intended for - * identification of the automatic tracepoint generated as part of creating the - * new activity. - * - * @param location - * Location identifier for the automatic tracepoint generated as part of - * creating the new activity. - * - * @param flags - * Pass voucher_activity_flag_force to indicate that existing activities - * on the current thread should not be inherited and that a new toplevel - * activity should be created. - * - * @result - * A new activity identifier. - */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) -OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW -voucher_activity_id_t -voucher_activity_start_with_location(voucher_activity_trace_id_t trace_id, - uint64_t location, voucher_activity_flag_t flags); - -/*! - * @function voucher_activity_end - * - * @abstract - * Unmarks the current thread if it is marked as particpating in the activity - * with the specified identifier. - * - * @discussion - * Activities persist as long as any threads in any process are marked as - * participating. There may be many calls to voucher_activity_end() - * corresponding to one call to voucher_activity_start() or - * voucher_activity_start_with_location(). - */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) -OS_VOUCHER_EXPORT OS_NOTHROW -void -voucher_activity_end(voucher_activity_id_t activity_id); - -/*! - * @function voucher_get_activities - * - * @abstract - * Returns the list of activity identifiers that the current thread is marked - * with. - * - * @param entries - * Pointer to an array of activity identifiers to be filled in. - * - * @param count - * Pointer to the requested number of activity identifiers. - * On output will be filled with the number of activities that are available. - * - * @result - * Number of activity identifiers written to 'entries' - */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) -OS_VOUCHER_EXPORT OS_NOTHROW -unsigned int -voucher_get_activities(voucher_activity_id_t *entries, unsigned int *count); - -/*! - * @group Voucher Activity Trace SPI - * SPI intended for libtrace only - */ - -/*! - * @function voucher_activity_get_namespace - * - * @abstract - * Returns the namespace of the current activity. - * - * @result - * The namespace of the current activity (if any). - */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) -OS_VOUCHER_EXPORT OS_NOTHROW -uint8_t -voucher_activity_get_namespace(void); - -/*! - * @function voucher_activity_trace - * - * @abstract - * Add a tracepoint to trace buffer of the current activity. - * - * @param trace_id - * Tracepoint identifier returned by voucher_activity_trace_id() - * - * @param location - * Tracepoint location. - * - * @param buffer - * Pointer to packed buffer of tracepoint data. - * - * @param length - * Length of data at 'buffer'. - * - * @result - * Timestamp recorded in tracepoint or 0 if no tracepoint was recorded. - */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) -OS_VOUCHER_EXPORT OS_NOTHROW -uint64_t -voucher_activity_trace(voucher_activity_trace_id_t trace_id, uint64_t location, - void *buffer, size_t length); - -/*! - * @function voucher_activity_trace_strings - * - * @abstract - * Add a tracepoint with strings data to trace buffer of the current activity. - * - * @param trace_id - * Tracepoint identifier returned by voucher_activity_trace_id() - * - * @param location - * Tracepoint location. - * - * @param buffer - * Pointer to packed buffer of tracepoint data. - * - * @param length - * Length of data at 'buffer'. - * - * @param strings - * NULL-terminated array of strings data. - * - * @param string_lengths - * Array of string lengths (required to have the same number of elements as the - * 'strings' array): string_lengths[i] is the maximum number of characters to - * copy from strings[i], excluding the NUL-terminator (may be smaller than the - * length of the string present in strings[i]). - * - * @param total_strings_size - * Total size of all strings data to be copied from strings array (including - * all NUL-terminators). - * - * @result - * Timestamp recorded in tracepoint or 0 if no tracepoint was recorded. - */ -__OSX_AVAILABLE_STARTING(__MAC_10_11,__IPHONE_9_0) -OS_VOUCHER_EXPORT OS_NOTHROW -uint64_t -voucher_activity_trace_strings(voucher_activity_trace_id_t trace_id, - uint64_t location, void *buffer, size_t length, const char *strings[], - size_t string_lengths[], size_t total_strings_size); - -/*! - * @function voucher_activity_trace_args - * - * @abstract - * Add a tracepoint to trace buffer of the current activity, recording - * specified arguments passed in registers. - * - * @param trace_id - * Tracepoint identifier returned by voucher_activity_trace_id() - * - * @param location - * Tracepoint location. - * - * @param arg1 - * Argument to be recorded in tracepoint data. - * - * @param arg2 - * Argument to be recorded in tracepoint data. - * - * @param arg3 - * Argument to be recorded in tracepoint data. - * - * @param arg4 - * Argument to be recorded in tracepoint data. - * - * @result - * Timestamp recorded in tracepoint or 0 if no tracepoint was recorded. - */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) -OS_VOUCHER_EXPORT OS_NOTHROW -uint64_t -voucher_activity_trace_args(voucher_activity_trace_id_t trace_id, - uint64_t location, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, - uintptr_t arg4); - -/*! - * @group Voucher Activity Mode SPI - * SPI intended for libtrace only - */ - -/*! - * @enum voucher_activity_mode_t - * - * @abstract - * Voucher activity mode. - * - * @discussion - * Configure at process start by setting the OS_ACTIVITY_MODE environment - * variable. - */ -OS_ENUM(voucher_activity_mode, unsigned long, - voucher_activity_mode_disable = 0, - voucher_activity_mode_release = (1u << 0), - voucher_activity_mode_debug = (1u << 1), - voucher_activity_mode_stream = (1u << 2), -); - -/*! - * @function voucher_activity_get_mode - * - * @abstract - * Return current mode of voucher activity subsystem. - * - * @result - * Value from voucher_activity_mode_t enum. - */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) -OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW -voucher_activity_mode_t -voucher_activity_get_mode(void); - -/*! - * @function voucher_activity_set_mode_4libtrace - * - * @abstract - * Set the current mode of voucher activity subsystem. - * - * @param mode - * The new mode. - * - * Note that the new mode will take effect soon, but not immediately. - */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) -OS_VOUCHER_EXPORT OS_NOTHROW -void -voucher_activity_set_mode_4libtrace(voucher_activity_mode_t mode); - -/*! - * @group Voucher Activity Metadata SPI - * SPI intended for libtrace only - */ - -/*! - * @function voucher_activity_get_metadata_buffer - * - * @abstract - * Return address and length of buffer in the process trace memory area - * reserved for libtrace metadata. - * - * @param length - * Pointer to size_t variable, filled with length of metadata buffer. - * - * @result - * Address of metadata buffer. - */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) -OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW OS_NONNULL_ALL -void* -voucher_activity_get_metadata_buffer(size_t *length); - -#endif // OS_VOUCHER_ACTIVITY_SPI - -#if OS_VOUCHER_ACTIVITY_BUFFER_SPI - -/*! - * @group Voucher Activity Tracepoint SPI - * SPI intended for diagnosticd only - */ - -OS_ENUM(_voucher_activity_tracepoint_flag, uint16_t, - _voucher_activity_trace_flag_buffer_empty = 0, - _voucher_activity_trace_flag_tracepoint = (1u << 0), - _voucher_activity_trace_flag_tracepoint_args = (1u << 1), - _voucher_activity_trace_flag_tracepoint_strings = (1u << 2), - _voucher_activity_trace_flag_wide_first = (1u << 6), - _voucher_activity_trace_flag_wide_second = (1u << 6) | (1u << 7), - _voucher_activity_trace_flag_start = (1u << 8), - _voucher_activity_trace_flag_end = (1u << 8) | (1u << 9), - _voucher_activity_trace_flag_libdispatch = (1u << 13), - _voucher_activity_trace_flag_activity = (1u << 14), - _voucher_activity_trace_flag_buffer_header = (1u << 15), -); - -// for tracepoints with _voucher_activity_trace_flag_libdispatch -OS_ENUM(_voucher_activity_tracepoint_namespace, uint8_t, - _voucher_activity_tracepoint_namespace_ipc = 0x1 -); -OS_ENUM(_voucher_activity_tracepoint_code, uint32_t, - _voucher_activity_tracepoint_namespace_ipc_send = 0x1, - _voucher_activity_tracepoint_namespace_ipc_receive = 0x2, -); - -typedef struct _voucher_activity_tracepoint_s { - uint16_t vat_flags; // voucher_activity_tracepoint_flag_t - uint8_t vat_type; // voucher_activity_tracepoint_type_t - uint8_t vat_namespace; // namespace for tracepoint code - uint32_t vat_code; // tracepoint code - uint64_t vat_thread; // pthread_t - uint64_t vat_timestamp; // absolute time - uint64_t vat_location; // tracepoint PC - union { - uint64_t vat_data[4]; // trace data - struct { - uint16_t vats_offset; // offset to string data (from buffer end) - uint8_t vats_data[30]; // trace data - } vat_stroff; // iff _vat_flag_tracepoint_strings present - }; -} *_voucher_activity_tracepoint_t; - -/*! - * @group Voucher Activity Buffer Internals - * SPI intended for diagnosticd only - * Layout of structs is subject to change without notice - */ - -#include -#include -#include - -static const size_t _voucher_activity_buffer_size = 4096; -static const size_t _voucher_activity_tracepoints_per_buffer = - _voucher_activity_buffer_size / - sizeof(struct _voucher_activity_tracepoint_s); -static const size_t _voucher_activity_buffer_header_size = - sizeof(struct _voucher_activity_tracepoint_s); -static const size_t _voucher_activity_strings_header_size = 0; // TODO - -typedef uint8_t _voucher_activity_buffer_t[_voucher_activity_buffer_size]; - -static const size_t _voucher_activity_buffers_per_heap = 512; -typedef unsigned long _voucher_activity_bitmap_base_t; -static const size_t _voucher_activity_bits_per_bitmap_base_t = - 8 * sizeof(_voucher_activity_bitmap_base_t); -static const size_t _voucher_activity_bitmaps_per_heap = - _voucher_activity_buffers_per_heap / - _voucher_activity_bits_per_bitmap_base_t; -typedef _voucher_activity_bitmap_base_t - _voucher_activity_bitmap_t[_voucher_activity_bitmaps_per_heap] - __attribute__((__aligned__(64))); - -struct _voucher_activity_self_metadata_s { - struct _voucher_activity_metadata_opaque_s *vasm_baseaddr; - _voucher_activity_bitmap_t volatile vam_buffer_bitmap; -}; - -typedef struct _voucher_activity_metadata_opaque_s { - _voucher_activity_buffer_t vam_client_metadata; - union { - struct _voucher_activity_self_metadata_s vam_self_metadata; - _voucher_activity_buffer_t vam_self_metadata_opaque; - }; -} *_voucher_activity_metadata_opaque_t; - -typedef os_lock_handoff_s _voucher_activity_lock_s; - -OS_ENUM(_voucher_activity_buffer_atomic_flags, uint8_t, - _voucher_activity_buffer_full = (1u << 0), - _voucher_activity_buffer_pushing = (1u << 1), -); - -typedef union { - uint64_t vabp_atomic_pos; - struct { - uint16_t vabp_refcnt; - uint8_t vabp_flags; - uint8_t vabp_unused; - uint16_t vabp_next_tracepoint_idx; - uint16_t vabp_string_offset; // offset from the _end_ of the buffer - } vabp_pos; -} _voucher_activity_buffer_position_u; - -// must match layout of _voucher_activity_tracepoint_s -typedef struct _voucher_activity_buffer_header_s { - uint16_t vabh_flags; // _voucher_activity_trace_flag_buffer_header - uint8_t vat_type; - uint8_t vat_namespace; - uint32_t vat_code; - uint64_t vat_thread; - uint64_t vat_timestamp; - uint64_t vat_location; - voucher_activity_id_t vabh_activity_id; - _voucher_activity_buffer_position_u volatile vabh_pos; - TAILQ_ENTRY(_voucher_activity_buffer_header_s) vabh_list; -} *_voucher_activity_buffer_header_t; - -/*! - * @enum _voucher_activity_buffer_hook_reason - * - * @constant _voucher_activity_buffer_hook_reason_full - * Specified activity buffer is full. - * Will be reported reused or freed later. - * - * @constant _voucher_activity_buffer_hook_reason_reuse - * Specified activity buffer is about to be reused. - * Was previously reported as full. - * - * @constant _voucher_activity_buffer_hook_reason_free - * Specified activity buffer is about to be freed. - * May have been previously reported as full or may be only partially filled. - */ -typedef enum _voucher_activity_buffer_hook_reason { - _voucher_activity_buffer_hook_reason_full = 0x1, - _voucher_activity_buffer_hook_reason_reuse = 0x2, - _voucher_activity_buffer_hook_reason_free = 0x4, -} _voucher_activity_buffer_hook_reason; - -/*! - * @typedef _voucher_activity_buffer_hook_t - * - * @abstract - * A function pointer called when an activity buffer is full or being freed. - * NOTE: callbacks occur under an activity-wide handoff lock and work done - * inside the callback function must not block or otherwise cause that lock to - * be held for a extended period of time. - * - * @param reason - * Reason for callback. - * - * @param buffer - * Pointer to activity buffer. - */ -typedef void (*_voucher_activity_buffer_hook_t)( - _voucher_activity_buffer_hook_reason reason, - _voucher_activity_buffer_header_t buffer); - -/*! - * @function voucher_activity_buffer_hook_install_4libtrace - * - * @abstract - * Install activity buffer hook callback function. - * Must be called from the libtrace initializer, and at most once. - * - * @param hook - * Hook function to install. - */ -__OSX_AVAILABLE_STARTING(__MAC_10_11,__IPHONE_9_0) -OS_VOUCHER_EXPORT OS_NOTHROW -void -voucher_activity_buffer_hook_install_4libtrace( - _voucher_activity_buffer_hook_t hook); - -#endif // OS_VOUCHER_ACTIVITY_BUFFER_SPI - -__END_DECLS - -#endif // __OS_VOUCHER_ACTIVITY_PRIVATE__ diff --git a/src/Makefile.am b/src/Makefile.am index 630a4806d..c417aec97 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -2,12 +2,19 @@ # # +if HAVE_SWIFT +swiftlibdir=${prefix}/lib/swift/linux +swiftlib_LTLIBRARIES=libdispatch.la +else lib_LTLIBRARIES=libdispatch.la +endif libdispatch_la_SOURCES= \ + allocator.c \ apply.c \ benchmark.c \ data.c \ + introspection.c \ init.c \ io.c \ object.c \ @@ -17,10 +24,14 @@ libdispatch_la_SOURCES= \ source.c \ time.c \ transform.c \ + voucher.c \ protocol.defs \ provider.d \ + allocator_internal.h \ data_internal.h \ + inline_internal.h \ internal.h \ + introspection_internal.h \ io_internal.h \ object_internal.h \ queue_internal.h \ @@ -28,49 +39,140 @@ libdispatch_la_SOURCES= \ shims.h \ source_internal.h \ trace.h \ + voucher_internal.h \ + firehose/firehose_internal.h \ shims/atomic.h \ + shims/atomic_sfb.h \ shims/getprogname.h \ shims/hw_config.h \ + shims/linux_stubs.c \ + shims/linux_stubs.h \ + shims/lock.c \ + shims/lock.h \ shims/perfmon.h \ shims/time.h \ - shims/tsd.h + shims/tsd.h \ + shims/yield.h + +EXTRA_libdispatch_la_SOURCES= +EXTRA_libdispatch_la_DEPENDENCIES= -AM_CPPFLAGS=-I$(top_builddir) -I$(top_srcdir) \ - -I$(top_srcdir)/private -I$(top_srcdir)/os +AM_CPPFLAGS=-I$(top_builddir) -I$(top_srcdir) -I$(top_srcdir)/private -AM_CFLAGS=-Wall $(VISIBILITY_FLAGS) $(OMIT_LEAF_FP_FLAGS) \ - $(MARCH_FLAGS) $(CBLOCKS_FLAGS) $(KQUEUE_CFLAGS) +DISPATCH_CFLAGS=-Wall $(VISIBILITY_FLAGS) $(OMIT_LEAF_FP_FLAGS) \ + $(MARCH_FLAGS) $(KQUEUE_CFLAGS) $(BSD_OVERLAY_CFLAGS) +AM_CFLAGS= $(PTHREAD_WORKQUEUE_CFLAGS) $(DISPATCH_CFLAGS) $(CBLOCKS_FLAGS) +AM_OBJCFLAGS=$(DISPATCH_CFLAGS) $(CBLOCKS_FLAGS) +AM_CXXFLAGS=$(PTHREAD_WORKQUEUE_CFLAGS) $(DISPATCH_CFLAGS) $(CXXBLOCKS_FLAGS) +AM_OBJCXXFLAGS=$(DISPATCH_CFLAGS) $(CXXBLOCKS_FLAGS) + +if BUILD_OWN_PTHREAD_WORKQUEUES + PTHREAD_WORKQUEUE_LIBS=$(top_builddir)/libpwq/libpthread_workqueue.la + PTHREAD_WORKQUEUE_CFLAGS=-I$(top_srcdir)/libpwq/include +else +if HAVE_PTHREAD_WORKQUEUES + PTHREAD_WORKQUEUE_LIBS=-lpthread_workqueue +endif +endif libdispatch_la_LDFLAGS=-avoid-version +libdispatch_la_LIBADD=$(KQUEUE_LIBS) $(PTHREAD_WORKQUEUE_LIBS) $(BSD_OVERLAY_LIBS) if HAVE_DARWIN_LD libdispatch_la_LDFLAGS+=-Wl,-compatibility_version,1 \ - -Wl,-current_version,$(VERSION) -Wl,-dead_strip + -Wl,-current_version,$(VERSION) -Wl,-dead_strip \ + -Wl,-alias_list,$(top_srcdir)/xcodeconfig/libdispatch.aliases +endif + +if USE_GOLD_LINKER +libdispatch_la_LDFLAGS+=-Xcompiler -fuse-ld=gold endif if USE_OBJC -libdispatch_la_SOURCES+=object.m -libdispatch_la_OBJCFLAGS=$(AM_CFLAGS) -fobjc-gc +libdispatch_la_SOURCES+=block.cpp data.m object.m +libdispatch_la_OBJCFLAGS=$(AM_OBJCFLAGS) -Wno-switch -fobjc-gc +libdispatch_la_CXXFLAGS=$(AM_CXXFLAGS) -std=gnu++11 -fno-exceptions libdispatch_la_LDFLAGS+=-Wl,-upward-lobjc -Wl,-upward-lauto \ - -Wl,-order_file,$(top_srcdir)/xcodeconfig/libdispatch.order \ - -Wl,-alias_list,$(top_srcdir)/xcodeconfig/libdispatch.aliases \ - -Wl,-unexported_symbols_list,$(top_srcdir)/xcodeconfig/libdispatch.unexport + -Wl,-order_file,$(top_srcdir)/xcodeconfig/libdispatch.order +else +libdispatch_la_SOURCES+=block.cpp +libdispatch_la_CXXFLAGS=$(AM_CXXFLAGS) -std=gnu++11 -fno-exceptions endif -CLEANFILES= -DISTCLEANFILES=System objc - if USE_MIG -BUILT_SOURCES= \ +MIG_SOURCES= \ protocolUser.c \ protocol.h \ protocolServer.c \ protocolServer.h -nodist_libdispatch_la_SOURCES=$(BUILT_SOURCES) -CLEANFILES+=$(BUILT_SOURCES) - %User.c %.h %Server.c %Server.h: $(abs_srcdir)/%.defs $(MIG) -user $*User.c -header $*.h \ -server $*Server.c -sheader $*Server.h $< endif + +if USE_DTRACE +DTRACE_SOURCES=provider.h + +%.h: $(abs_srcdir)/%.d + $(DTRACE) -h -s $< -o $@ +endif + +if HAVE_SWIFT +SWIFT_SRC_FILES=\ + swift/Block.swift \ + swift/Data.swift \ + swift/Dispatch.swift \ + swift/IO.swift \ + swift/Private.swift \ + swift/Queue.swift \ + swift/Source.swift \ + swift/Time.swift \ + swift/Wrapper.swift + +SWIFT_ABS_SRC_FILES = $(SWIFT_SRC_FILES:%=$(abs_srcdir)/%) +SWIFT_OBJ_FILES = $(SWIFT_SRC_FILES:%.swift=$(abs_builddir)/%.o) + +libdispatch_la_SOURCES+=swift/DispatchStubs.cc +EXTRA_libdispatch_la_SOURCES+=$(SWIFT_SRC_FILES) + +EXTRA_libdispatch_la_DEPENDENCIES+=$(SWIFT_OBJ_FILES) $(abs_builddir)/swift/Dispatch.swiftmodule +libdispatch_la_LIBADD+=$(SWIFT_OBJ_FILES) + +SWIFT_GEN_FILES= \ + $(abs_builddir)/swift/Dispatch.swiftmodule \ + $(abs_builddir)/swift/Dispatch.swiftdoc \ + $(SWIFT_OBJ_FILES) \ + $(SWIFT_OBJ_FILES:%=%.d) \ + $(SWIFT_OBJ_FILES:%=%.swiftdeps) \ + $(SWIFT_OBJ_FILES:%=%.~partial.swiftmodule) \ + $(SWIFT_OBJ_FILES:%=%.~partial.swiftdoc) \ + $(SWIFT_OBJ_FILES:%=%.~partial.swiftdeps) + +SWIFTC_FLAGS = -Xcc -fmodule-map-file=$(abs_top_srcdir)/dispatch/module.map -I$(abs_top_srcdir) -Xcc -fblocks + +$(abs_builddir)/swift/%.o: $(abs_srcdir)/swift/%.swift + $(SWIFTC) -frontend -c $(SWIFT_ABS_SRC_FILES) -primary-file $< \ + $(SWIFTC_FLAGS) -module-name Dispatch -module-link-name dispatch \ + -o $@ -emit-module-path $@.~partial.swiftmodule \ + -emit-module-doc-path $@.~partial.swiftdoc -emit-dependencies-path $@.d \ + -emit-reference-dependencies-path $@.swiftdeps \ + -module-cache-path $(top_builddir) + +$(abs_builddir)/swift/Dispatch.swiftmodule: $(SWIFT_ABS_SRC_FILES) + $(SWIFTC) -frontend -emit-module $(SWIFT_OBJ_FILES:%=%.~partial.swiftmodule) \ + $(SWIFTC_FLAGS) -module-cache-path $(top_builddir) -module-link-name dispatch \ + -o $@ -emit-module-doc-path $(@:%.swiftmodule=%.swiftdoc) + +swiftmoddir=${prefix}/lib/swift/linux/${build_cpu} +swiftmod_HEADERS=\ + $(abs_builddir)/swift/Dispatch.swiftmodule \ + $(abs_builddir)/swift/Dispatch.swiftdoc + +endif + +BUILT_SOURCES=$(MIG_SOURCES) $(DTRACE_SOURCES) +nodist_libdispatch_la_SOURCES=$(BUILT_SOURCES) +CLEANFILES=$(BUILT_SOURCES) $(SWIFT_GEN_FILES) +DISTCLEANFILES=pthread_machdep.h pthread System mach objc + diff --git a/src/allocator.c b/src/allocator.c index d6db272cb..a3a8c650a 100644 --- a/src/allocator.c +++ b/src/allocator.c @@ -35,7 +35,7 @@ // once to non-zero. They are not marked volatile. There is a small risk that // some thread may see a stale 0 value and enter try_create_heap. It will // waste some time in an allocate syscall, but eventually it will try to -// cmpxchg, expecting to overwite 0 with an address. This will fail +// cmpxchg, expecting to overwrite 0 with an address. This will fail // (because another thread already did this), the thread will deallocate the // unused allocated memory, and continue with the new value. // @@ -178,11 +178,11 @@ madvisable_page_base_for_continuation(dispatch_continuation_t c) #if DISPATCH_DEBUG struct dispatch_magazine_s *m = magazine_for_continuation(c); if (slowpath(page_base < (void *)&m->conts)) { - DISPATCH_CRASH("madvisable continuation too low"); + DISPATCH_INTERNAL_CRASH(page_base, "madvisable continuation too low"); } if (slowpath(page_base > (void *)&m->conts[SUPERMAPS_PER_MAGAZINE-1] [BITMAPS_PER_SUPERMAP-1][CONTINUATIONS_PER_BITMAP-1])) { - DISPATCH_CRASH("madvisable continuation too high"); + DISPATCH_INTERNAL_CRASH(page_base, "madvisable continuation too high"); } #endif return page_base; @@ -228,7 +228,7 @@ bitmap_set_first_unset_bit_upto_index(volatile bitmap_t *bitmap, // load from it before storing, so we don't need to guard // against reordering those loads. dispatch_assert(sizeof(*bitmap) == sizeof(unsigned long)); - return dispatch_atomic_set_first_bit(bitmap,max_index); + return os_atomic_set_first_bit(bitmap, max_index); } DISPATCH_ALWAYS_INLINE @@ -255,12 +255,13 @@ bitmap_clear_bit(volatile bitmap_t *bitmap, unsigned int index, if (exclusively == CLEAR_EXCLUSIVELY) { if (slowpath((*bitmap & mask) == 0)) { - DISPATCH_CRASH("Corruption: failed to clear bit exclusively"); + DISPATCH_CLIENT_CRASH(*bitmap, + "Corruption: failed to clear bit exclusively"); } } // and-and-fetch - b = dispatch_atomic_and(bitmap, ~mask, release); + b = os_atomic_and(bitmap, ~mask, release); return b == 0; } @@ -284,7 +285,7 @@ mark_bitmap_as_full_if_still_full(volatile bitmap_t *supermap, // don't protect access to other memory. s = s_new; s_masked = s | mask; - if (dispatch_atomic_cmpxchgvw(supermap, s, s_masked, &s_new, relaxed) || + if (os_atomic_cmpxchgvw(supermap, s, s_masked, &s_new, relaxed) || !bitmap_is_full(*bitmap)) { return; } @@ -358,8 +359,7 @@ _dispatch_alloc_try_create_heap(dispatch_heap_t *heap_ptr) MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT))) { if (kr != KERN_NO_SPACE) { - (void)dispatch_assume_zero(kr); - DISPATCH_CLIENT_CRASH("Could not allocate heap"); + DISPATCH_CLIENT_CRASH(kr, "Could not allocate heap"); } _dispatch_temporary_resource_shortage(); vm_addr = vm_page_size; @@ -422,7 +422,7 @@ _dispatch_alloc_try_create_heap(dispatch_heap_t *heap_ptr) #endif // DISPATCH_DEBUG #endif // HAVE_MACH - if (!dispatch_atomic_cmpxchg(heap_ptr, NULL, (void *)aligned_region, + if (!os_atomic_cmpxchg(heap_ptr, NULL, (void *)aligned_region, relaxed)) { // If we lost the race to link in the new region, unmap the whole thing. #if DISPATCH_DEBUG @@ -550,7 +550,7 @@ _dispatch_alloc_maybe_madvise_page(dispatch_continuation_t c) // take ownership of them all. int last_locked = 0; do { - if (!dispatch_atomic_cmpxchg(&page_bitmaps[last_locked], BITMAP_C(0), + if (!os_atomic_cmpxchg(&page_bitmaps[last_locked], BITMAP_C(0), BITMAP_ALL_ONES, relaxed)) { // We didn't get one; since there is a cont allocated in // the page, we can't madvise. Give up and unlock all. @@ -573,7 +573,7 @@ _dispatch_alloc_maybe_madvise_page(dispatch_continuation_t c) page_bitmaps[--last_locked] = BITMAP_C(0); } if (last_locked) { - dispatch_atomic_store(&page_bitmaps[0], BITMAP_C(0), relaxed); + os_atomic_store(&page_bitmaps[0], BITMAP_C(0), relaxed); } return; } @@ -676,7 +676,7 @@ _dispatch_malloc_init(void) malloc_set_zone_name(_dispatch_ccache_zone, "DispatchContinuations"); } #else -static inline void _dispatch_malloc_init(void) {} +#define _dispatch_malloc_init() ((void)0) #endif // DISPATCH_USE_MALLOCZONE static dispatch_continuation_t @@ -769,4 +769,3 @@ _dispatch_continuation_free_to_heap(dispatch_continuation_t c) return _dispatch_malloc_continuation_free(c); #endif } - diff --git a/src/allocator_internal.h b/src/allocator_internal.h index 893ba8283..abe4a1d43 100644 --- a/src/allocator_internal.h +++ b/src/allocator_internal.h @@ -33,14 +33,8 @@ #endif #endif -#if TARGET_IPHONE_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090 -#undef DISPATCH_USE_NANOZONE -#define DISPATCH_USE_NANOZONE 0 -#endif #ifndef DISPATCH_USE_NANOZONE -#if TARGET_OS_MAC && defined(__LP64__) && \ - (__MAC_OS_X_VERSION_MIN_REQUIRED >= 1090 || \ - __IPHONE_OS_VERSION_MIN_REQUIRED >= 70000) +#if TARGET_OS_MAC && defined(__LP64__) #define DISPATCH_USE_NANOZONE 1 #endif #endif @@ -218,7 +212,7 @@ struct dispatch_magazine_header_s { // Link to the next heap in the chain. Only used in magazine 0's header dispatch_heap_t dh_next; - // Points to the first bitmap in the page where this CPU succesfully + // Points to the first bitmap in the page where this CPU successfully // allocated a continuation last time. Only used in the first heap. bitmap_t *last_found_page; }; diff --git a/src/apply.c b/src/apply.c index e0ab2c383..e051a1630 100644 --- a/src/apply.c +++ b/src/apply.c @@ -21,16 +21,20 @@ #include "internal.h" typedef void (*dispatch_apply_function_t)(void *, size_t); +static char const * const _dispatch_apply_key = "apply"; + +#define DISPATCH_APPLY_INVOKE_REDIRECT 0x1 +#define DISPATCH_APPLY_INVOKE_WAIT 0x2 DISPATCH_ALWAYS_INLINE static inline void -_dispatch_apply_invoke2(void *ctxt, bool redirect) +_dispatch_apply_invoke2(void *ctxt, long invoke_flags) { dispatch_apply_t da = (dispatch_apply_t)ctxt; size_t const iter = da->da_iterations; size_t idx, done = 0; - idx = dispatch_atomic_inc_orig2o(da, da_index, acquire); + idx = os_atomic_inc_orig2o(da, da_index, acquire); if (!fastpath(idx < iter)) goto out; // da_dc is only safe to access once the 'index lock' has been acquired @@ -41,38 +45,51 @@ _dispatch_apply_invoke2(void *ctxt, bool redirect) _dispatch_perfmon_workitem_dec(); // this unit executes many items // Handle nested dispatch_apply rdar://problem/9294578 - size_t nested = (size_t)_dispatch_thread_getspecific(dispatch_apply_key); - _dispatch_thread_setspecific(dispatch_apply_key, (void*)da->da_nested); + dispatch_thread_context_s apply_ctxt = { + .dtc_key = _dispatch_apply_key, + .dtc_apply_nesting = da->da_nested, + }; + _dispatch_thread_context_push(&apply_ctxt); - dispatch_queue_t old_dq; + dispatch_thread_frame_s dtf; pthread_priority_t old_dp; - if (redirect) { - old_dq = _dispatch_thread_getspecific(dispatch_queue_key); - _dispatch_thread_setspecific(dispatch_queue_key, dq); - old_dp = _dispatch_set_defaultpriority(dq->dq_priority); + if (invoke_flags & DISPATCH_APPLY_INVOKE_REDIRECT) { + _dispatch_thread_frame_push(&dtf, dq); + old_dp = _dispatch_set_defaultpriority(dq->dq_priority, NULL); } + dispatch_invoke_flags_t flags = da->da_flags; // Striding is the responsibility of the caller. do { - _dispatch_client_callout2(da_ctxt, idx, func); - _dispatch_perfmon_workitem_inc(); - done++; - idx = dispatch_atomic_inc_orig2o(da, da_index, relaxed); + dispatch_invoke_with_autoreleasepool(flags, { + _dispatch_client_callout2(da_ctxt, idx, func); + _dispatch_perfmon_workitem_inc(); + done++; + idx = os_atomic_inc_orig2o(da, da_index, relaxed); + }); } while (fastpath(idx < iter)); - if (redirect) { + if (invoke_flags & DISPATCH_APPLY_INVOKE_REDIRECT) { _dispatch_reset_defaultpriority(old_dp); - _dispatch_thread_setspecific(dispatch_queue_key, old_dq); + _dispatch_thread_frame_pop(&dtf); } - _dispatch_thread_setspecific(dispatch_apply_key, (void*)nested); + + _dispatch_thread_context_pop(&apply_ctxt); // The thread that finished the last workitem wakes up the possibly waiting // thread that called dispatch_apply. They could be one and the same. - if (!dispatch_atomic_sub2o(da, da_todo, done, release)) { - _dispatch_thread_semaphore_signal(da->da_sema); + if (!os_atomic_sub2o(da, da_todo, done, release)) { + _dispatch_thread_event_signal(&da->da_event); } out: - if (dispatch_atomic_dec2o(da, da_thr_cnt, release) == 0) { + if (invoke_flags & DISPATCH_APPLY_INVOKE_WAIT) { + _dispatch_thread_event_wait(&da->da_event); + _dispatch_thread_event_destroy(&da->da_event); + } + if (os_atomic_dec2o(da, da_thr_cnt, release) == 0) { +#if DISPATCH_INTROSPECTION + _dispatch_continuation_free(da->da_dc); +#endif _dispatch_continuation_free((dispatch_continuation_t)da); } } @@ -81,30 +98,59 @@ DISPATCH_NOINLINE void _dispatch_apply_invoke(void *ctxt) { - _dispatch_apply_invoke2(ctxt, false); + _dispatch_apply_invoke2(ctxt, 0); +} + +DISPATCH_NOINLINE +static void +_dispatch_apply_invoke_and_wait(void *ctxt) +{ + _dispatch_apply_invoke2(ctxt, DISPATCH_APPLY_INVOKE_WAIT); + _dispatch_perfmon_workitem_inc(); } DISPATCH_NOINLINE void _dispatch_apply_redirect_invoke(void *ctxt) { - _dispatch_apply_invoke2(ctxt, true); + _dispatch_apply_invoke2(ctxt, DISPATCH_APPLY_INVOKE_REDIRECT); } +DISPATCH_ALWAYS_INLINE +static inline dispatch_invoke_flags_t +_dispatch_apply_autorelease_frequency(dispatch_queue_t dq) +{ + dispatch_invoke_flags_t qaf = 0; + + while (dq && !qaf) { + qaf = _dispatch_queue_autorelease_frequency(dq); + dq = slowpath(dq->do_targetq); + } + return qaf; +} + +DISPATCH_NOINLINE static void _dispatch_apply_serial(void *ctxt) { dispatch_apply_t da = (dispatch_apply_t)ctxt; dispatch_continuation_t dc = da->da_dc; size_t const iter = da->da_iterations; + dispatch_invoke_flags_t flags; size_t idx = 0; _dispatch_perfmon_workitem_dec(); // this unit executes many items + flags = _dispatch_apply_autorelease_frequency(dc->dc_data); do { - _dispatch_client_callout2(dc->dc_ctxt, idx, (void*)dc->dc_func); - _dispatch_perfmon_workitem_inc(); + dispatch_invoke_with_autoreleasepool(flags, { + _dispatch_client_callout2(dc->dc_ctxt, idx, (void*)dc->dc_func); + _dispatch_perfmon_workitem_inc(); + }); } while (++idx < iter); +#if DISPATCH_INTROSPECTION + _dispatch_continuation_free(da->da_dc); +#endif _dispatch_continuation_free((dispatch_continuation_t)da); } @@ -123,12 +169,9 @@ _dispatch_apply_f2(dispatch_queue_t dq, dispatch_apply_t da, for (i = 0; i < continuation_cnt; i++) { dispatch_continuation_t next = _dispatch_continuation_alloc(); - next->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT; - next->dc_func = func; - next->dc_ctxt = da; - _dispatch_continuation_voucher_set(next, 0); - _dispatch_continuation_priority_set(next, 0, 0); + uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; + _dispatch_continuation_init_f(next, dq, da, func, 0, 0, dc_flags); next->do_next = head; head = next; @@ -137,49 +180,47 @@ _dispatch_apply_f2(dispatch_queue_t dq, dispatch_apply_t da, } } - _dispatch_thread_semaphore_t sema = _dispatch_get_thread_semaphore(); - da->da_sema = sema; + _dispatch_thread_event_init(&da->da_event); _dispatch_queue_push_list(dq, head, tail, head->dc_priority, continuation_cnt); // Call the first element directly - _dispatch_apply_invoke(da); - _dispatch_perfmon_workitem_inc(); - - _dispatch_thread_semaphore_wait(sema); - _dispatch_put_thread_semaphore(sema); - + _dispatch_apply_invoke_and_wait(da); } +DISPATCH_NOINLINE static void _dispatch_apply_redirect(void *ctxt) { dispatch_apply_t da = (dispatch_apply_t)ctxt; - uint32_t da_width = 2 * (da->da_thr_cnt - 1); + uint32_t da_width = da->da_thr_cnt - 1; dispatch_queue_t dq = da->da_dc->dc_data, rq = dq, tq; do { - uint32_t running, width = rq->dq_width; - running = dispatch_atomic_add2o(rq, dq_running, da_width, relaxed); - if (slowpath(running > width)) { - uint32_t excess = width > 1 ? running - width : da_width; - for (tq = dq; 1; tq = tq->do_targetq) { - (void)dispatch_atomic_sub2o(tq, dq_running, excess, relaxed); - if (tq == rq) { - break; - } + uint32_t width = _dispatch_queue_try_reserve_apply_width(rq, da_width); + + if (slowpath(da_width > width)) { + uint32_t excess = da_width - width; + for (tq = dq; tq != rq; tq = tq->do_targetq) { + _dispatch_queue_relinquish_width(tq, excess); } da_width -= excess; if (slowpath(!da_width)) { return _dispatch_apply_serial(da); } - da->da_thr_cnt -= excess / 2; + da->da_thr_cnt -= excess; + } + if (!da->da_flags) { + // find first queue in descending target queue order that has + // an autorelease frequency set, and use that as the frequency for + // this continuation. + da->da_flags = _dispatch_queue_autorelease_frequency(dq); } rq = rq->do_targetq; } while (slowpath(rq->do_targetq)); _dispatch_apply_f2(rq, da, _dispatch_apply_redirect_invoke); do { - (void)dispatch_atomic_sub2o(dq, dq_running, da_width, relaxed); + _dispatch_queue_relinquish_width(dq, da_width); dq = dq->do_targetq; } while (slowpath(dq->do_targetq)); } @@ -195,7 +236,10 @@ dispatch_apply_f(size_t iterations, dispatch_queue_t dq, void *ctxt, return; } uint32_t thr_cnt = dispatch_hw_config(active_cpus); - size_t nested = (size_t)_dispatch_thread_getspecific(dispatch_apply_key); + dispatch_thread_context_t dtctxt = _dispatch_thread_context_find(_dispatch_apply_key); + size_t nested = dtctxt ? dtctxt->dtc_apply_nesting : 0; + dispatch_queue_t old_dq = _dispatch_queue_get_current(); + if (!slowpath(nested)) { nested = iterations; } else { @@ -206,9 +250,17 @@ dispatch_apply_f(size_t iterations, dispatch_queue_t dq, void *ctxt, if (iterations < thr_cnt) { thr_cnt = (uint32_t)iterations; } + if (slowpath(dq == DISPATCH_APPLY_CURRENT_ROOT_QUEUE)) { + dq = old_dq ? old_dq : _dispatch_get_root_queue( + _DISPATCH_QOS_CLASS_DEFAULT, false); + while (slowpath(dq->do_targetq)) { + dq = dq->do_targetq; + } + } struct dispatch_continuation_s dc = { .dc_func = (void*)func, .dc_ctxt = ctxt, + .dc_data = dq, }; dispatch_apply_t da = (typeof(da))_dispatch_continuation_alloc(); da->da_index = 0; @@ -216,57 +268,35 @@ dispatch_apply_f(size_t iterations, dispatch_queue_t dq, void *ctxt, da->da_iterations = iterations; da->da_nested = nested; da->da_thr_cnt = thr_cnt; +#if DISPATCH_INTROSPECTION + da->da_dc = _dispatch_continuation_alloc(); + *da->da_dc = dc; +#else da->da_dc = &dc; +#endif + da->da_flags = 0; - dispatch_queue_t old_dq; - old_dq = (dispatch_queue_t)_dispatch_thread_getspecific(dispatch_queue_key); - if (slowpath(dq == DISPATCH_APPLY_CURRENT_ROOT_QUEUE)) { - dq = old_dq ? old_dq : _dispatch_get_root_queue( - _DISPATCH_QOS_CLASS_DEFAULT, false); - while (slowpath(dq->do_targetq)) { - dq = dq->do_targetq; - } - } - if (slowpath(dq->dq_width <= 2) || slowpath(thr_cnt <= 1)) { + if (slowpath(dq->dq_width == 1) || slowpath(thr_cnt <= 1)) { return dispatch_sync_f(dq, da, _dispatch_apply_serial); } if (slowpath(dq->do_targetq)) { if (slowpath(dq == old_dq)) { return dispatch_sync_f(dq, da, _dispatch_apply_serial); } else { - dc.dc_data = dq; return dispatch_sync_f(dq, da, _dispatch_apply_redirect); } } - _dispatch_thread_setspecific(dispatch_queue_key, dq); + + dispatch_thread_frame_s dtf; + _dispatch_thread_frame_push(&dtf, dq); _dispatch_apply_f2(dq, da, _dispatch_apply_invoke); - _dispatch_thread_setspecific(dispatch_queue_key, old_dq); + _dispatch_thread_frame_pop(&dtf); } #ifdef __BLOCKS__ -#if DISPATCH_COCOA_COMPAT -DISPATCH_NOINLINE -static void -_dispatch_apply_slow(size_t iterations, dispatch_queue_t dq, - void (^work)(size_t)) -{ - dispatch_block_t bb = _dispatch_Block_copy((void *)work); - dispatch_apply_f(iterations, dq, bb, - (dispatch_apply_function_t)_dispatch_Block_invoke(bb)); - Block_release(bb); -} -#endif - void dispatch_apply(size_t iterations, dispatch_queue_t dq, void (^work)(size_t)) { -#if DISPATCH_COCOA_COMPAT - // Under GC, blocks transferred to other threads must be Block_copy()ed - // rdar://problem/7455071 - if (dispatch_begin_thread_4GC) { - return _dispatch_apply_slow(iterations, dq, work); - } -#endif dispatch_apply_f(iterations, dq, work, (dispatch_apply_function_t)_dispatch_Block_invoke(work)); } diff --git a/src/block.cpp b/src/block.cpp index 83fff54ed..3060a2a4d 100644 --- a/src/block.cpp +++ b/src/block.cpp @@ -82,6 +82,9 @@ struct dispatch_block_private_data_s { if (!dbpd_performed) dispatch_group_leave(dbpd_group); ((void (*)(dispatch_group_t))dispatch_release)(dbpd_group); } + if (dbpd_queue) { + ((void (*)(os_mpsc_queue_t))_os_object_release_internal)(dbpd_queue); + } if (dbpd_block) Block_release(dbpd_block); if (dbpd_voucher) voucher_release(dbpd_voucher); } @@ -95,7 +98,7 @@ _dispatch_block_create(dispatch_block_flags_t flags, voucher_t voucher, return _dispatch_Block_copy(^{ // Capture stack object: invokes copy constructor (17094902) (void)dbpds; - _dispatch_block_invoke(&dbpds); + _dispatch_block_invoke_direct(&dbpds); }); } @@ -103,7 +106,11 @@ extern "C" { // The compiler hides the name of the function it generates, and changes it if // we try to reference it directly, but the linker still sees it. extern void DISPATCH_BLOCK_SPECIAL_INVOKE(void *) +#ifdef __linux__ + asm("___dispatch_block_create_block_invoke"); +#else asm("____dispatch_block_create_block_invoke"); +#endif void (*_dispatch_block_special_invoke)(void*) = DISPATCH_BLOCK_SPECIAL_INVOKE; } diff --git a/src/data.c b/src/data.c index e65399fdc..644328911 100644 --- a/src/data.c +++ b/src/data.c @@ -92,7 +92,7 @@ ******************************************************************************* */ -#if USE_OBJC +#if DISPATCH_DATA_IS_BRIDGED_TO_NSDATA #define _dispatch_data_retain(x) _dispatch_objc_retain(x) #define _dispatch_data_release(x) _dispatch_objc_release(x) #else @@ -101,26 +101,33 @@ #endif const dispatch_block_t _dispatch_data_destructor_free = ^{ - DISPATCH_CRASH("free destructor called"); + DISPATCH_INTERNAL_CRASH(0, "free destructor called"); }; const dispatch_block_t _dispatch_data_destructor_none = ^{ - DISPATCH_CRASH("none destructor called"); + DISPATCH_INTERNAL_CRASH(0, "none destructor called"); }; +#if !HAVE_MACH +const dispatch_block_t _dispatch_data_destructor_munmap = ^{ + DISPATCH_INTERNAL_CRASH(0, "munmap destructor called"); +}; +#else +// _dispatch_data_destructor_munmap is a linker alias to the following const dispatch_block_t _dispatch_data_destructor_vm_deallocate = ^{ - DISPATCH_CRASH("vmdeallocate destructor called"); + DISPATCH_INTERNAL_CRASH(0, "vmdeallocate destructor called"); }; +#endif const dispatch_block_t _dispatch_data_destructor_inline = ^{ - DISPATCH_CRASH("inline destructor called"); + DISPATCH_INTERNAL_CRASH(0, "inline destructor called"); }; struct dispatch_data_s _dispatch_data_empty = { +#if DISPATCH_DATA_IS_BRIDGED_TO_NSDATA .do_vtable = DISPATCH_DATA_EMPTY_CLASS, -#if !USE_OBJC - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, +#else + DISPATCH_GLOBAL_OBJECT_HEADER(data), .do_next = DISPATCH_OBJECT_LISTLESS, #endif }; @@ -129,11 +136,17 @@ DISPATCH_ALWAYS_INLINE static inline dispatch_data_t _dispatch_data_alloc(size_t n, size_t extra) { - dispatch_data_t data = _dispatch_alloc(DISPATCH_DATA_CLASS, - sizeof(struct dispatch_data_s) + extra + - n * sizeof(range_record)); + dispatch_data_t data; + size_t size; + + if (os_mul_and_add_overflow(n, sizeof(range_record), + sizeof(struct dispatch_data_s) + extra, &size)) { + return DISPATCH_OUT_OF_MEMORY; + } + + data = _dispatch_alloc(DISPATCH_DATA_CLASS, size); data->num_records = n; -#if !USE_OBJC +#if !DISPATCH_DATA_IS_BRIDGED_TO_NSDATA data->do_targetq = dispatch_get_global_queue( DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); data->do_next = DISPATCH_OBJECT_LISTLESS; @@ -149,10 +162,12 @@ _dispatch_data_destroy_buffer(const void* buffer, size_t size, free((void*)buffer); } else if (destructor == DISPATCH_DATA_DESTRUCTOR_NONE) { // do nothing +#if HAVE_MACH } else if (destructor == DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE) { mach_vm_size_t vm_size = size; mach_vm_address_t vm_addr = (uintptr_t)buffer; mach_vm_deallocate(mach_task_self(), vm_addr, vm_size); +#endif } else { if (!queue) { queue = dispatch_get_global_queue( @@ -213,7 +228,7 @@ dispatch_data_create(const void* buffer, size_t size, dispatch_queue_t queue, // copied. data_buf = malloc(size); if (slowpath(!data_buf)) { - return NULL; + return DISPATCH_OUT_OF_MEMORY; } buffer = memcpy(data_buf, buffer, size); data = _dispatch_data_alloc(0, 0); @@ -239,7 +254,9 @@ dispatch_data_create_f(const void *buffer, size_t size, dispatch_queue_t queue, if (destructor != DISPATCH_DATA_DESTRUCTOR_DEFAULT && destructor != DISPATCH_DATA_DESTRUCTOR_FREE && destructor != DISPATCH_DATA_DESTRUCTOR_NONE && +#if HAVE_MACH destructor != DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE && +#endif destructor != DISPATCH_DATA_DESTRUCTOR_INLINE) { destructor = ^{ destructor_function((void*)buffer); }; } @@ -319,6 +336,8 @@ dispatch_data_t dispatch_data_create_concat(dispatch_data_t dd1, dispatch_data_t dd2) { dispatch_data_t data; + size_t n; + if (!dd1->size) { _dispatch_data_retain(dd2); return dd2; @@ -328,8 +347,11 @@ dispatch_data_create_concat(dispatch_data_t dd1, dispatch_data_t dd2) return dd1; } - data = _dispatch_data_alloc(_dispatch_data_num_records(dd1) + - _dispatch_data_num_records(dd2), 0); + if (os_add_overflow(_dispatch_data_num_records(dd1), + _dispatch_data_num_records(dd2), &n)) { + return DISPATCH_OUT_OF_MEMORY; + } + data = _dispatch_data_alloc(n, 0); data->size = dd1->size + dd2->size; // Copy the constituent records into the newly created data object // Reference leaf objects as sub-objects @@ -361,9 +383,10 @@ dispatch_data_create_subrange(dispatch_data_t dd, size_t offset, size_t length) { dispatch_data_t data; + if (offset >= dd->size || !length) { return dispatch_data_empty; - } else if ((offset + length) > dd->size) { + } else if (length > dd->size - offset) { length = dd->size - offset; } else if (length == dd->size) { _dispatch_data_retain(dd); @@ -398,8 +421,8 @@ dispatch_data_create_subrange(dispatch_data_t dd, size_t offset, // Crashing here indicates memory corruption of passed in data object if (slowpath(i >= dd_num_records)) { - DISPATCH_CRASH("dispatch_data_create_subrange out of bounds"); - return NULL; + DISPATCH_INTERNAL_CRASH(i, + "dispatch_data_create_subrange out of bounds"); } // if everything is from a single dispatch data object, avoid boxing it @@ -428,8 +451,8 @@ dispatch_data_create_subrange(dispatch_data_t dd, size_t offset, // Crashing here indicates memory corruption of passed in data object if (slowpath(i + count >= dd_num_records)) { - DISPATCH_CRASH("dispatch_data_create_subrange out of bounds"); - return NULL; + DISPATCH_INTERNAL_CRASH(i + count, + "dispatch_data_create_subrange out of bounds"); } } } @@ -529,7 +552,7 @@ _dispatch_data_get_flattened_bytes(dispatch_data_t dd) void *flatbuf = _dispatch_data_flatten(dd); if (fastpath(flatbuf)) { // we need a release so that readers see the content of the buffer - if (slowpath(!dispatch_atomic_cmpxchgv2o(dd, buf, NULL, flatbuf, + if (slowpath(!os_atomic_cmpxchgv2o(dd, buf, NULL, flatbuf, &buffer, release))) { free(flatbuf); } else { @@ -651,7 +674,8 @@ _dispatch_data_copy_region(dispatch_data_t dd, size_t from, size_t size, return _dispatch_data_copy_region(dd, from, length, location, offset_ptr); } - DISPATCH_CRASH("dispatch_data_copy_region out of bounds"); + DISPATCH_INTERNAL_CRASH(*offset_ptr+offset, + "dispatch_data_copy_region out of bounds"); } // Returs either a leaf object or an object composed of a single leaf object diff --git a/src/data.m b/src/data.m index 92bc1e28c..190b1edd1 100644 --- a/src/data.m +++ b/src/data.m @@ -20,18 +20,15 @@ #include "internal.h" -#if USE_OBJC +#if DISPATCH_DATA_IS_BRIDGED_TO_NSDATA -#if !__OBJC2__ -#error "Cannot build with legacy ObjC runtime" -#endif #if _OS_OBJECT_OBJC_ARC #error "Cannot build with ARC" #endif #include -@interface DISPATCH_CLASS(data) () +@interface DISPATCH_CLASS(data) () @property (readonly) NSUInteger length; @property (readonly) const void *bytes NS_RETURNS_INNER_POINTER; @@ -94,10 +91,6 @@ - (void)dealloc { _dispatch_data_objc_dispose(dealloc); } -- (void)finalize { - _dispatch_data_objc_dispose(finalize); -} - - (BOOL)_bytesAreVM { struct dispatch_data_s *dd = (void*)self; return dd->destructor == DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE; @@ -122,7 +115,7 @@ - (void)_setTargetQueue:(dispatch_queue_t)queue { struct dispatch_data_s *dd = (void*)self; _os_object_retain_internal((_os_object_t)queue); dispatch_queue_t prev; - prev = dispatch_atomic_xchg2o(dd, do_targetq, queue, release); + prev = os_atomic_xchg2o(dd, do_targetq, queue, release); if (prev) _os_object_release_internal((_os_object_t)prev); } @@ -151,6 +144,15 @@ - (BOOL)_isCompact { return !dd->size || _dispatch_data_map_direct(dd, 0, NULL, NULL) != NULL; } +- (void)_suspend { +} + +- (void)_resume { +} + +- (void)_activate { +} + @end @implementation DISPATCH_CLASS(data_empty) @@ -191,6 +193,15 @@ - (void)_setFinalizer:(dispatch_function_t) DISPATCH_UNUSED finalizer { - (void)_setTargetQueue:(dispatch_queue_t) DISPATCH_UNUSED queue { } +- (void)_suspend { +} + +- (void)_resume { +} + +- (void)_activate { +} + @end #endif // USE_OBJC diff --git a/src/data_internal.h b/src/data_internal.h index 40a780ce9..bbef21e41 100644 --- a/src/data_internal.h +++ b/src/data_internal.h @@ -38,30 +38,32 @@ typedef struct range_record_s { size_t length; } range_record; -#if USE_OBJC -#if OS_OBJECT_USE_OBJC -@interface DISPATCH_CLASS(data) : NSObject -@end +#if OS_OBJECT_HAVE_OBJC2 +#define DISPATCH_DATA_IS_BRIDGED_TO_NSDATA 1 +#else +#define DISPATCH_DATA_IS_BRIDGED_TO_NSDATA 0 #endif + +#if DISPATCH_DATA_IS_BRIDGED_TO_NSDATA DISPATCH_OBJC_CLASS_DECL(data); DISPATCH_OBJC_CLASS_DECL(data_empty); -#define DISPATCH_DATA_CLASS DISPATCH_OBJC_CLASS(data) -#define DISPATCH_DATA_EMPTY_CLASS DISPATCH_OBJC_CLASS(data_empty) -#else // USE_OBJC +_OS_OBJECT_DECL_PROTOCOL(dispatch_data, dispatch_object); +#define DISPATCH_DATA_CLASS DISPATCH_VTABLE(data) +#define DISPATCH_DATA_EMPTY_CLASS DISPATCH_VTABLE(data_empty) +#else DISPATCH_CLASS_DECL(data); #define DISPATCH_DATA_CLASS DISPATCH_VTABLE(data) -#define DISPATCH_DATA_EMPTY_CLASS DISPATCH_VTABLE(data) -#endif // USE_OBJC +#endif // DISPATCH_DATA_IS_BRIDGED_TO_NSDATA struct dispatch_data_s { -#if USE_OBJC +#if DISPATCH_DATA_IS_BRIDGED_TO_NSDATA const void *do_vtable; dispatch_queue_t do_targetq; void *ctxt; void *finalizer; -#else // USE_OBJC - DISPATCH_STRUCT_HEADER(data); -#endif // USE_OBJC +#else + DISPATCH_OBJECT_HEADER(data); +#endif // DISPATCH_DATA_IS_BRIDGED_TO_NSDATA const void *buf; dispatch_block_t destructor; size_t size, num_records; @@ -79,7 +81,7 @@ _dispatch_data_leaf(struct dispatch_data_s *dd) * This is about the number of records required to hold that dispatch data * if it's not a leaf. Callers either want that value, or have to special * case the case when the dispatch data *is* a leaf before (and that the actual - * embeded record count of that dispatch data is 0) + * embedded record count of that dispatch data is 0) */ DISPATCH_ALWAYS_INLINE static inline size_t @@ -106,10 +108,8 @@ const void* _dispatch_data_get_flattened_bytes(struct dispatch_data_s *dd); #if !defined(__cplusplus) -#if !__OBJC2__ -const dispatch_block_t _dispatch_data_destructor_inline; +extern const dispatch_block_t _dispatch_data_destructor_inline; #define DISPATCH_DATA_DESTRUCTOR_INLINE (_dispatch_data_destructor_inline) -#endif // !__OBJC2__ /* * the out parameters are about seeing "through" trivial subranges @@ -135,7 +135,7 @@ _dispatch_data_map_direct(struct dispatch_data_s *dd, size_t offset, if (fastpath(_dispatch_data_leaf(dd))) { buffer = dd->buf + offset; } else { - buffer = dispatch_atomic_load((void **)&dd->buf, relaxed); + buffer = os_atomic_load((void **)&dd->buf, relaxed); if (buffer) { buffer += offset; } diff --git a/src/firehose/firehose.defs b/src/firehose/firehose.defs new file mode 100644 index 000000000..986533cc1 --- /dev/null +++ b/src/firehose/firehose.defs @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2015 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include +#include + +#include "firehose_types.defs" + +subsystem firehose 11600; +serverprefix firehose_server_; +userprefix firehose_send_; + +simpleroutine +register( + server_port : mach_port_t; + mem_port : mach_port_move_send_t; + mem_size : mach_vm_size_t; + comm_recvp : mach_port_move_receive_t; + comm_sendp : mach_port_make_send_t; + extra_info_port : mach_port_move_send_t; + extra_info_size : mach_vm_size_t +); + +routine +push( +RequestPort comm_port : mach_port_t; +SReplyPort reply_port : mach_port_make_send_once_t; + qos_class : qos_class_t; + for_io : boolean_t; +out push_reply : firehose_push_reply_t +); + +simpleroutine +push_async( + comm_port : mach_port_t; + qos_class : qos_class_t; + for_io : boolean_t; + expects_notify : boolean_t +); diff --git a/src/firehose/firehose_buffer.c b/src/firehose/firehose_buffer.c new file mode 100644 index 000000000..1305bdea6 --- /dev/null +++ b/src/firehose/firehose_buffer.c @@ -0,0 +1,1147 @@ +/* + * Copyright (c) 2015 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include // VM_MEMORY_GENEALOGY +#ifdef KERNEL + +#define OS_VOUCHER_ACTIVITY_SPI_TYPES 1 +#define OS_FIREHOSE_SPI 1 +#define __OS_EXPOSE_INTERNALS_INDIRECT__ 1 + +#define DISPATCH_PURE_C 1 +#define _safe_cast_to_long(x) \ + ({ _Static_assert(sizeof(typeof(x)) <= sizeof(long), \ + "__builtin_expect doesn't support types wider than long"); \ + (long)(x); }) +#define fastpath(x) ((typeof(x))__builtin_expect(_safe_cast_to_long(x), ~0l)) +#define slowpath(x) ((typeof(x))__builtin_expect(_safe_cast_to_long(x), 0l)) +#define os_likely(x) __builtin_expect(!!(x), 1) +#define os_unlikely(x) __builtin_expect(!!(x), 0) +#define likely(x) __builtin_expect(!!(x), 1) +#define unlikely(x) __builtin_expect(!!(x), 0) + +#define DISPATCH_INTERNAL_CRASH(ac, msg) ({ panic(msg); __builtin_trap(); }) + +#if defined(__x86_64__) || defined(__i386__) +#define dispatch_hardware_pause() __asm__("pause") +#elif (defined(__arm__) && defined(_ARM_ARCH_7) && defined(__thumb__)) || \ + defined(__arm64__) +#define dispatch_hardware_pause() __asm__("yield") +#define dispatch_hardware_wfe() __asm__("wfe") +#else +#define dispatch_hardware_pause() __asm__("") +#endif + +#define _dispatch_wait_until(c) do { \ + while (!fastpath(c)) { \ + dispatch_hardware_pause(); \ + } } while (0) +#define dispatch_compiler_barrier() __asm__ __volatile__("" ::: "memory") + +typedef uint32_t dispatch_lock; +typedef struct dispatch_gate_s { + dispatch_lock dgl_lock; +} dispatch_gate_s, *dispatch_gate_t; +#define DLOCK_LOCK_DATA_CONTENTION 0 +static void _dispatch_gate_wait(dispatch_gate_t l, uint32_t flags); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include // +#include // +#include // os/internal/atomic.h +#include "os/firehose_buffer_private.h" +#include "firehose_buffer_internal.h" +#include "firehose_inline_internal.h" +#else +#include "internal.h" +#include "firehose.h" // MiG +#include "firehose_replyServer.h" // MiG +#endif + +#if OS_FIREHOSE_SPI + +#if __has_feature(c_static_assert) +_Static_assert(sizeof(((firehose_stream_state_u *)NULL)->fss_gate) == + sizeof(((firehose_stream_state_u *)NULL)->fss_allocator), + "fss_gate and fss_allocator alias"); +_Static_assert(offsetof(firehose_stream_state_u, fss_gate) == + offsetof(firehose_stream_state_u, fss_allocator), + "fss_gate and fss_allocator alias"); +_Static_assert(sizeof(struct firehose_buffer_header_s) == + FIREHOSE_BUFFER_CHUNK_SIZE, + "firehose buffer header must be 4k"); +_Static_assert(offsetof(struct firehose_buffer_header_s, fbh_unused) <= + FIREHOSE_BUFFER_CHUNK_SIZE - FIREHOSE_BUFFER_LIBTRACE_HEADER_SIZE, + "we must have enough space for the libtrace header"); +_Static_assert(sizeof(struct firehose_buffer_chunk_s) == + FIREHOSE_BUFFER_CHUNK_SIZE, + "firehose buffer chunks must be 4k"); +_Static_assert(powerof2(FIREHOSE_BUFFER_CHUNK_COUNT), + "CHUNK_COUNT Must be a power of two"); +_Static_assert(FIREHOSE_BUFFER_CHUNK_COUNT <= 64, + "CHUNK_COUNT must be less than 64 (bitmap in uint64_t)"); +#ifdef FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT +_Static_assert(powerof2(FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT), + "madvise chunk count must be a power of two"); +#endif +_Static_assert(howmany(sizeof(struct firehose_tracepoint_s), + sizeof(struct firehose_buffer_chunk_s)) < 255, + "refcount assumes that you cannot have more than 255 tracepoints"); +// FIXME: we should have an event-count instead here +_Static_assert(sizeof(struct firehose_buffer_stream_s) == 128, + "firehose buffer stream must be small (single cacheline if possible)"); +_Static_assert(offsetof(struct firehose_buffer_chunk_s, fbc_data) % 8 == 0, + "Page header is 8 byte aligned"); +_Static_assert(sizeof(struct firehose_tracepoint_s) == 24, + "tracepoint header should be exactly 24 bytes"); +#endif + +#ifdef KERNEL +static firehose_buffer_t kernel_firehose_buffer = NULL; +#endif + +#pragma mark - +#pragma mark Client IPC to the log daemon +#ifndef KERNEL + +static mach_port_t +firehose_client_reconnect(firehose_buffer_t fb, mach_port_t oldsendp) +{ + mach_port_t sendp = MACH_PORT_NULL; + mach_port_t mem_port = MACH_PORT_NULL, extra_info_port = MACH_PORT_NULL; + mach_vm_size_t extra_info_size = 0; + kern_return_t kr; + + dispatch_assert(fb->fb_header.fbh_logd_port); + dispatch_assert(fb->fb_header.fbh_recvp); + dispatch_assert(fb->fb_header.fbh_uniquepid != 0); + + _dispatch_unfair_lock_lock(&fb->fb_header.fbh_logd_lock); + sendp = fb->fb_header.fbh_sendp; + if (sendp != oldsendp || sendp == MACH_PORT_DEAD) { + // someone beat us to reconnecting or logd was unloaded, just go away + goto unlock; + } + + if (oldsendp) { + // same trick as _xpc_pipe_dispose: keeping a send right + // maintains the name, so that we can destroy the receive right + // in case we still have it. + (void)firehose_mach_port_recv_dispose(oldsendp, fb); + firehose_mach_port_send_release(oldsendp); + fb->fb_header.fbh_sendp = MACH_PORT_NULL; + } + + /* Create a memory port for the buffer VM region */ + vm_prot_t flags = VM_PROT_READ | MAP_MEM_VM_SHARE; + memory_object_size_t size = sizeof(union firehose_buffer_u); + mach_vm_address_t addr = (vm_address_t)fb; + + kr = mach_make_memory_entry_64(mach_task_self(), &size, addr, + flags, &mem_port, MACH_PORT_NULL); + if (size < sizeof(union firehose_buffer_u)) { + DISPATCH_CLIENT_CRASH(size, "Invalid size for the firehose buffer"); + } + if (unlikely(kr)) { + // the client probably has some form of memory corruption + // and/or a port leak + DISPATCH_CLIENT_CRASH(kr, "Unable to make memory port"); + } + + /* Create a communication port to the logging daemon */ + uint32_t opts = MPO_CONTEXT_AS_GUARD | MPO_TEMPOWNER | MPO_INSERT_SEND_RIGHT; + sendp = firehose_mach_port_allocate(opts, fb); + + if (oldsendp && _voucher_libtrace_hooks->vah_version >= 3) { + if (_voucher_libtrace_hooks->vah_get_reconnect_info) { + kr = _voucher_libtrace_hooks->vah_get_reconnect_info(&addr, &size); + if (likely(kr == KERN_SUCCESS) && addr && size) { + extra_info_size = size; + kr = mach_make_memory_entry_64(mach_task_self(), &size, addr, + flags, &extra_info_port, MACH_PORT_NULL); + if (unlikely(kr)) { + // the client probably has some form of memory corruption + // and/or a port leak + DISPATCH_CLIENT_CRASH(kr, "Unable to make memory port"); + } + kr = mach_vm_deallocate(mach_task_self(), addr, size); + (void)dispatch_assume_zero(kr); + } + } + } + + /* Call the firehose_register() MIG routine */ + kr = firehose_send_register(fb->fb_header.fbh_logd_port, mem_port, + sizeof(union firehose_buffer_u), sendp, fb->fb_header.fbh_recvp, + extra_info_port, extra_info_size); + if (likely(kr == KERN_SUCCESS)) { + fb->fb_header.fbh_sendp = sendp; + } else if (unlikely(kr == MACH_SEND_INVALID_DEST)) { + // MACH_SEND_INVALID_DEST here means that logd's boostrap port + // turned into a dead name, which in turn means that logd has been + // unloaded. The only option here, is to give up permanently. + // + // same trick as _xpc_pipe_dispose: keeping a send right + // maintains the name, so that we can destroy the receive right + // in case we still have it. + (void)firehose_mach_port_recv_dispose(sendp, fb); + firehose_mach_port_send_release(sendp); + firehose_mach_port_send_release(mem_port); + if (extra_info_port) firehose_mach_port_send_release(extra_info_port); + sendp = fb->fb_header.fbh_sendp = MACH_PORT_DEAD; + } else { + // the client probably has some form of memory corruption + // and/or a port leak + DISPATCH_CLIENT_CRASH(kr, "Unable to register with logd"); + } + +unlock: + _dispatch_unfair_lock_unlock(&fb->fb_header.fbh_logd_lock); + return sendp; +} + +static void +firehose_buffer_update_limits_unlocked(firehose_buffer_t fb) +{ + firehose_bank_state_u old, new; + firehose_buffer_bank_t fbb = &fb->fb_header.fbh_bank; + unsigned long fbb_flags = fbb->fbb_flags; + uint16_t io_streams = 0, mem_streams = 0; + uint16_t total = 0; + + for (size_t i = 0; i < countof(fb->fb_header.fbh_stream); i++) { + firehose_buffer_stream_t fbs = fb->fb_header.fbh_stream + i; + + if (fbs->fbs_state.fss_current == FIREHOSE_STREAM_STATE_PRISTINE) { + continue; + } + if ((1UL << i) & firehose_stream_uses_io_bank) { + io_streams++; + } else { + mem_streams++; + } + } + + if (fbb_flags & FIREHOSE_BUFFER_BANK_FLAG_LOW_MEMORY) { + if (fbb_flags & FIREHOSE_BUFFER_BANK_FLAG_HIGH_RATE) { + total = 1 + 4 * mem_streams + io_streams; // usually 10 + } else { + total = 1 + 2 + mem_streams + io_streams; // usually 6 + } + } else { + if (fbb_flags & FIREHOSE_BUFFER_BANK_FLAG_HIGH_RATE) { + total = 1 + 6 * mem_streams + 3 * io_streams; // usually 16 + } else { + total = 1 + 2 * (mem_streams + io_streams); // usually 7 + } + } + + uint16_t ratio = (uint16_t)(PAGE_SIZE / FIREHOSE_BUFFER_CHUNK_SIZE); + if (ratio > 1) { + total = roundup(total, ratio); + } + total = MAX(total, FIREHOSE_BUFFER_CHUNK_PREALLOCATED_COUNT); + if (!(fbb_flags & FIREHOSE_BUFFER_BANK_FLAG_LOW_MEMORY)) { + total = MAX(total, TARGET_OS_EMBEDDED ? 8 : 12); + } + + new.fbs_max_ref = total; + new.fbs_mem_bank = FIREHOSE_BANK_UNAVAIL_BIT - (total - 1); + new.fbs_io_bank = FIREHOSE_BANK_UNAVAIL_BIT - + MAX(3 * total / 8, 2 * io_streams); + new.fbs_unused = 0; + + old = fbb->fbb_limits; + fbb->fbb_limits = new; + if (old.fbs_atomic_state == new.fbs_atomic_state) { + return; + } + os_atomic_add2o(&fb->fb_header, fbh_bank.fbb_state.fbs_atomic_state, + new.fbs_atomic_state - old.fbs_atomic_state, relaxed); +} +#endif // !KERNEL + +firehose_buffer_t +firehose_buffer_create(mach_port_t logd_port, uint64_t unique_pid, + unsigned long bank_flags) +{ + firehose_buffer_header_t fbh; + firehose_buffer_t fb; + +#ifndef KERNEL + mach_vm_address_t vm_addr = 0; + kern_return_t kr; + + vm_addr = vm_page_size; + const size_t madvise_bytes = FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT * + FIREHOSE_BUFFER_CHUNK_SIZE; + if (slowpath(madvise_bytes % PAGE_SIZE)) { + DISPATCH_INTERNAL_CRASH(madvise_bytes, + "Invalid values for MADVISE_CHUNK_COUNT / CHUNK_SIZE"); + } + + kr = mach_vm_map(mach_task_self(), &vm_addr, sizeof(*fb), 0, + VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE | + VM_MAKE_TAG(VM_MEMORY_GENEALOGY), MEMORY_OBJECT_NULL, 0, FALSE, + VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_NONE); + if (slowpath(kr)) { + if (kr != KERN_NO_SPACE) dispatch_assume_zero(kr); + firehose_mach_port_send_release(logd_port); + return NULL; + } + + uint32_t opts = MPO_CONTEXT_AS_GUARD | MPO_STRICT | MPO_INSERT_SEND_RIGHT; +#else + vm_offset_t vm_addr = 0; + vm_size_t size; + + size = FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_BUFFER_CHUNK_SIZE; + __firehose_allocate(&vm_addr, size); + + (void)logd_port; (void)unique_pid; +#endif // KERNEL + + fb = (firehose_buffer_t)vm_addr; + fbh = &fb->fb_header; +#ifndef KERNEL + fbh->fbh_logd_port = logd_port; + fbh->fbh_pid = getpid(); + fbh->fbh_uniquepid = unique_pid; + fbh->fbh_recvp = firehose_mach_port_allocate(opts, fb); +#endif // !KERNEL + fbh->fbh_spi_version = OS_FIREHOSE_SPI_VERSION; + fbh->fbh_bank.fbb_flags = bank_flags; + +#ifndef KERNEL + for (size_t i = 0; i < countof(fbh->fbh_stream); i++) { + firehose_buffer_stream_t fbs = fbh->fbh_stream + i; + if (i != firehose_stream_metadata) { + fbs->fbs_state.fss_current = FIREHOSE_STREAM_STATE_PRISTINE; + } + } + firehose_buffer_update_limits_unlocked(fb); +#else + uint16_t total = FIREHOSE_BUFFER_CHUNK_PREALLOCATED_COUNT + 1; + const uint16_t num_kernel_io_pages = 8; + uint16_t io_pages = num_kernel_io_pages; + fbh->fbh_bank.fbb_state = (firehose_bank_state_u){ + .fbs_max_ref = total, + .fbs_io_bank = FIREHOSE_BANK_UNAVAIL_BIT - io_pages, + .fbs_mem_bank = FIREHOSE_BANK_UNAVAIL_BIT - (total - io_pages - 1), + }; + fbh->fbh_bank.fbb_limits = fbh->fbh_bank.fbb_state; +#endif // KERNEL + + // now pre-allocate some chunks in the ring directly +#ifdef KERNEL + const uint16_t pre_allocated = FIREHOSE_BUFFER_CHUNK_PREALLOCATED_COUNT - 1; +#else + const uint16_t pre_allocated = FIREHOSE_BUFFER_CHUNK_PREALLOCATED_COUNT; +#endif + + fbh->fbh_bank.fbb_bitmap = (1U << (1 + pre_allocated)) - 1; + + for (uint16_t i = 0; i < pre_allocated; i++) { + fbh->fbh_mem_ring[i] = i + 1; + } + fbh->fbh_bank.fbb_mem_flushed = pre_allocated; + fbh->fbh_ring_mem_head = pre_allocated; + + +#ifdef KERNEL + // install the early boot page as the current one for persist + fbh->fbh_stream[firehose_stream_persist].fbs_state.fss_current = + FIREHOSE_BUFFER_CHUNK_PREALLOCATED_COUNT; + fbh->fbh_bank.fbb_state.fbs_io_bank += 1; +#endif + + fbh->fbh_ring_tail = (firehose_ring_tail_u){ + .frp_mem_flushed = pre_allocated, + }; + return fb; +} + +#ifndef KERNEL +static void +firehose_notify_source_invoke(mach_msg_header_t *hdr) +{ + const size_t reply_size = + sizeof(union __ReplyUnion__firehose_client_firehoseReply_subsystem); + + firehose_mig_server(firehoseReply_server, reply_size, hdr); +} + +static void +firehose_client_register_for_notifications(firehose_buffer_t fb) +{ + static const struct dispatch_continuation_s dc = { + .dc_func = (void *)firehose_notify_source_invoke, + }; + firehose_buffer_header_t fbh = &fb->fb_header; + + dispatch_once(&fbh->fbh_notifs_pred, ^{ + dispatch_source_t ds = _dispatch_source_create_mach_msg_direct_recv( + fbh->fbh_recvp, &dc); + dispatch_set_context(ds, fb); + dispatch_activate(ds); + fbh->fbh_notifs_source = ds; + }); +} + +static void +firehose_client_send_push_async(firehose_buffer_t fb, qos_class_t qos, + bool for_io) +{ + bool ask_for_notifs = fb->fb_header.fbh_notifs_source != NULL; + mach_port_t sendp = fb->fb_header.fbh_sendp; + kern_return_t kr = KERN_FAILURE; + + if (!ask_for_notifs && _dispatch_is_multithreaded_inline()) { + firehose_client_register_for_notifications(fb); + ask_for_notifs = true; + } + + if (slowpath(sendp == MACH_PORT_DEAD)) { + return; + } + + if (fastpath(sendp)) { + kr = firehose_send_push_async(sendp, qos, for_io, ask_for_notifs); + if (likely(kr == KERN_SUCCESS)) { + return; + } + if (kr != MACH_SEND_INVALID_DEST) { + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + } + } + + sendp = firehose_client_reconnect(fb, sendp); + if (fastpath(MACH_PORT_VALID(sendp))) { + kr = firehose_send_push_async(sendp, qos, for_io, ask_for_notifs); + if (likely(kr == KERN_SUCCESS)) { + return; + } + if (kr != MACH_SEND_INVALID_DEST) { + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + } + } +} +#endif // !KERNEL + +static void +firehose_client_merge_updates(firehose_buffer_t fb, bool async_notif, + firehose_push_reply_t reply, firehose_bank_state_u *state_out) +{ + firehose_bank_state_u state; + firehose_ring_tail_u otail, ntail; + uint64_t old_flushed_pos, bank_updates; + uint16_t io_delta = 0; + uint16_t mem_delta = 0; + + if (firehose_atomic_maxv2o(&fb->fb_header, fbh_bank.fbb_mem_flushed, + reply.fpr_mem_flushed_pos, &old_flushed_pos, relaxed)) { + mem_delta = (uint16_t)(reply.fpr_mem_flushed_pos - old_flushed_pos); + } + if (firehose_atomic_maxv2o(&fb->fb_header, fbh_bank.fbb_io_flushed, + reply.fpr_io_flushed_pos, &old_flushed_pos, relaxed)) { + io_delta = (uint16_t)(reply.fpr_io_flushed_pos - old_flushed_pos); + } +#ifndef KERNEL + _dispatch_debug("client side: mem: +%d->%llx, io: +%d->%llx", + mem_delta, reply.fpr_mem_flushed_pos, + io_delta, reply.fpr_io_flushed_pos); +#endif + + if (!mem_delta && !io_delta) { + if (state_out) { + state_out->fbs_atomic_state = os_atomic_load2o(&fb->fb_header, + fbh_bank.fbb_state.fbs_atomic_state, relaxed); + } + return; + } + + bank_updates = ((uint64_t)mem_delta << FIREHOSE_BANK_SHIFT(0)) | + ((uint64_t)io_delta << FIREHOSE_BANK_SHIFT(1)); + state.fbs_atomic_state = os_atomic_sub2o(&fb->fb_header, + fbh_bank.fbb_state.fbs_atomic_state, bank_updates, relaxed); + if (state_out) *state_out = state; + + os_atomic_rmw_loop2o(&fb->fb_header, fbh_ring_tail.frp_atomic_tail, + otail.frp_atomic_tail, ntail.frp_atomic_tail, relaxed, { + ntail = otail; + // overflow handles the generation wraps + ntail.frp_io_flushed += io_delta; + ntail.frp_mem_flushed += mem_delta; + }); + if (async_notif) { + if (io_delta) { + os_atomic_inc2o(&fb->fb_header, fbh_bank.fbb_io_notifs, relaxed); + } + if (mem_delta) { + os_atomic_inc2o(&fb->fb_header, fbh_bank.fbb_mem_notifs, relaxed); + } + } +} + +#ifndef KERNEL +static void +firehose_client_send_push(firehose_buffer_t fb, bool for_io, + firehose_bank_state_u *state_out) +{ + mach_port_t sendp = fb->fb_header.fbh_sendp; + firehose_push_reply_t push_reply = { }; + qos_class_t qos = qos_class_self(); + kern_return_t kr; + + if (slowpath(sendp == MACH_PORT_DEAD)) { + return; + } + if (fastpath(sendp)) { + kr = firehose_send_push(sendp, qos, for_io, &push_reply); + if (likely(kr == KERN_SUCCESS)) { + goto success; + } + if (kr != MACH_SEND_INVALID_DEST) { + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + } + } + + sendp = firehose_client_reconnect(fb, sendp); + if (fastpath(MACH_PORT_VALID(sendp))) { + kr = firehose_send_push(sendp, qos, for_io, &push_reply); + if (likely(kr == KERN_SUCCESS)) { + goto success; + } + if (kr != MACH_SEND_INVALID_DEST) { + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + } + } + + if (state_out) { + state_out->fbs_atomic_state = os_atomic_load2o(&fb->fb_header, + fbh_bank.fbb_state.fbs_atomic_state, relaxed); + } + return; + +success: + if (memcmp(&push_reply, &FIREHOSE_PUSH_REPLY_CORRUPTED, + sizeof(push_reply)) == 0) { + // TODO: find out the actual cause and log it + DISPATCH_CLIENT_CRASH(0, "Memory corruption in the logging buffers"); + } + + if (for_io) { + os_atomic_inc2o(&fb->fb_header, fbh_bank.fbb_io_sync_pushes, relaxed); + } else { + os_atomic_inc2o(&fb->fb_header, fbh_bank.fbb_mem_sync_pushes, relaxed); + } + // TODO + // + // use fbb_*_flushes and fbb_*_sync_pushes to decide to dynamically + // allow using more buffers, if not under memory pressure. + // + // There only is a point for multithreaded clients if: + // - enough samples (total_flushes above some limits) + // - the ratio is really bad (a push per cycle is definitely a problem) + return firehose_client_merge_updates(fb, false, push_reply, state_out); +} + +kern_return_t +firehose_client_push_reply(mach_port_t req_port OS_UNUSED, + kern_return_t rtc, firehose_push_reply_t push_reply OS_UNUSED) +{ + DISPATCH_INTERNAL_CRASH(rtc, "firehose_push_reply should never be sent " + "to the buffer receive port"); +} + +kern_return_t +firehose_client_push_notify_async(mach_port_t server_port OS_UNUSED, + firehose_push_reply_t push_reply) +{ + // see _dispatch_source_merge_mach_msg_direct + dispatch_queue_t dq = _dispatch_queue_get_current(); + firehose_buffer_t fb = dispatch_get_context(dq); + firehose_client_merge_updates(fb, true, push_reply, NULL); + return KERN_SUCCESS; +} + +#endif // !KERNEL +#pragma mark - +#pragma mark Buffer handling + +#ifndef KERNEL +void +firehose_buffer_update_limits(firehose_buffer_t fb) +{ + dispatch_unfair_lock_t fbb_lock = &fb->fb_header.fbh_bank.fbb_lock; + _dispatch_unfair_lock_lock(fbb_lock); + firehose_buffer_update_limits_unlocked(fb); + _dispatch_unfair_lock_unlock(fbb_lock); +} +#endif // !KERNEL + +OS_ALWAYS_INLINE +static inline firehose_tracepoint_t +firehose_buffer_chunk_init(firehose_buffer_chunk_t fbc, + firehose_tracepoint_query_t ask, uint8_t **privptr) +{ + const uint16_t ft_size = offsetof(struct firehose_tracepoint_s, ft_data); + + uint16_t pub_offs = offsetof(struct firehose_buffer_chunk_s, fbc_data); + uint16_t priv_offs = FIREHOSE_BUFFER_CHUNK_SIZE; + + pub_offs += roundup(ft_size + ask->pubsize, 8); + priv_offs -= ask->privsize; + + if (fbc->fbc_pos.fbc_atomic_pos) { + // Needed for process death handling (recycle-reuse): + // No atomic fences required, we merely want to make sure the observers + // will see memory effects in program (asm) order. + // 1. the payload part of the chunk is cleared completely + // 2. the chunk is marked as reused + // This ensures that if we don't see a reference to a chunk in the ring + // and it is dirty, when crawling the chunk, we don't see remnants of + // other tracepoints + // + // We only do that when the fbc_pos is non zero, because zero means + // we just faulted the chunk, and the kernel already bzero-ed it. + bzero(fbc->fbc_data, sizeof(fbc->fbc_data)); + } + dispatch_compiler_barrier(); + // boot starts mach absolute time at 0, and + // wrapping around to values above UINT64_MAX - FIREHOSE_STAMP_SLOP + // breaks firehose_buffer_stream_flush() assumptions + if (ask->stamp > FIREHOSE_STAMP_SLOP) { + fbc->fbc_timestamp = ask->stamp - FIREHOSE_STAMP_SLOP; + } else { + fbc->fbc_timestamp = 0; + } + fbc->fbc_pos = (firehose_buffer_pos_u){ + .fbc_next_entry_offs = pub_offs, + .fbc_private_offs = priv_offs, + .fbc_refcnt = 1, + .fbc_qos_bits = firehose_buffer_qos_bits_propagate(), + .fbc_stream = ask->stream, + .fbc_flag_io = ask->for_io, + }; + + if (privptr) { + *privptr = fbc->fbc_start + priv_offs; + } + return (firehose_tracepoint_t)fbc->fbc_data; +} + +OS_NOINLINE +static firehose_tracepoint_t +firehose_buffer_stream_chunk_install(firehose_buffer_t fb, + firehose_tracepoint_query_t ask, uint8_t **privptr, uint16_t ref) +{ + firehose_stream_state_u state, new_state; + firehose_tracepoint_t ft; + firehose_buffer_stream_t fbs = &fb->fb_header.fbh_stream[ask->stream]; + uint64_t stamp_and_len; + + if (fastpath(ref)) { + firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); + ft = firehose_buffer_chunk_init(fbc, ask, privptr); + // Needed for process death handling (tracepoint-begin): + // write the length before making the chunk visible + stamp_and_len = ask->stamp - fbc->fbc_timestamp; + stamp_and_len |= (uint64_t)ask->pubsize << 48; + os_atomic_store2o(ft, ft_stamp_and_length, stamp_and_len, relaxed); + + if (ask->stream == firehose_stream_metadata) { + os_atomic_or2o(fb, fb_header.fbh_bank.fbb_metadata_bitmap, + 1ULL << ref, relaxed); + } + // release barrier to make the chunk init visible + os_atomic_rmw_loop2o(fbs, fbs_state.fss_atomic_state, + state.fss_atomic_state, new_state.fss_atomic_state, release, { + // We use a generation counter to prevent a theoretical ABA problem: + // a thread could try to acquire a tracepoint in a chunk, fail to + // do so mark it as to be pushed, enqueue it, and then be preempted + // + // It sleeps for a long time, and then tries to acquire the + // allocator bit and uninstalling the chunk. Succeeds in doing so, + // but because the chunk actually happened to have cycled all the + // way back to being installed. That thread would effectively hide + // that unflushed chunk and leak it. + // + // Having a generation counter prevents the uninstallation of the + // chunk to spuriously succeed when it was a re-incarnation of it. + new_state = (firehose_stream_state_u){ + .fss_current = ref, + .fss_generation = state.fss_generation + 1, + }; + }); + } else { + // the allocator gave up just clear the allocator + waiter bits + firehose_stream_state_u mask = { .fss_allocator = ~0u, }; + state.fss_atomic_state = os_atomic_and_orig2o(fbs, + fbs_state.fss_atomic_state, ~mask.fss_atomic_state, relaxed); + ft = NULL; + } + +#ifndef KERNEL + if (unlikely(state.fss_gate.dgl_lock != _dispatch_tid_self())) { + _dispatch_gate_broadcast_slow(&fbs->fbs_state.fss_gate, + state.fss_gate.dgl_lock); + } + + if (unlikely(state.fss_current == FIREHOSE_STREAM_STATE_PRISTINE)) { + firehose_buffer_update_limits(fb); + } +#endif // KERNEL + + // pairs with the one in firehose_buffer_tracepoint_reserve() + __firehose_critical_region_leave(); + return ft; +} + +#ifndef KERNEL +OS_ALWAYS_INLINE +static inline uint16_t +firehose_buffer_ring_try_grow(firehose_buffer_bank_t fbb, uint16_t limit) +{ + uint16_t ref = 0; + uint64_t bitmap; + + _dispatch_unfair_lock_lock(&fbb->fbb_lock); + bitmap = ~(fbb->fbb_bitmap | (~0ULL << limit)); + if (bitmap) { + ref = firehose_bitmap_first_set(bitmap); + fbb->fbb_bitmap |= 1U << ref; + } + _dispatch_unfair_lock_unlock(&fbb->fbb_lock); + return ref; +} + +OS_ALWAYS_INLINE +static inline uint16_t +firehose_buffer_ring_shrink(firehose_buffer_t fb, uint16_t ref) +{ + const size_t madv_size = + FIREHOSE_BUFFER_CHUNK_SIZE * FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT; + const size_t madv_mask = + (1ULL << FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT) - 1; + + dispatch_unfair_lock_t fbb_lock = &fb->fb_header.fbh_bank.fbb_lock; + uint64_t bitmap; + + _dispatch_unfair_lock_lock(fbb_lock); + if (ref < fb->fb_header.fbh_bank.fbb_limits.fbs_max_ref) { + goto done; + } + + bitmap = (fb->fb_header.fbh_bank.fbb_bitmap &= ~(1UL << ref)); + ref &= ~madv_mask; + if ((bitmap & (madv_mask << ref)) == 0) { + // if MADVISE_WIDTH consecutive chunks are free, madvise them free + madvise(firehose_buffer_ref_to_chunk(fb, ref), madv_size, MADV_FREE); + } + ref = 0; +done: + _dispatch_unfair_lock_unlock(fbb_lock); + return ref; +} +#endif // !KERNEL + +OS_NOINLINE +void +firehose_buffer_ring_enqueue(firehose_buffer_t fb, uint16_t ref) +{ + firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); + uint16_t volatile *fbh_ring; + uint16_t volatile *fbh_ring_head; + uint16_t head, gen, dummy, idx; + firehose_buffer_pos_u fbc_pos = fbc->fbc_pos; + bool for_io = fbc_pos.fbc_flag_io; + + if (for_io) { + fbh_ring = fb->fb_header.fbh_io_ring; + fbh_ring_head = &fb->fb_header.fbh_ring_io_head; + } else { + fbh_ring = fb->fb_header.fbh_mem_ring; + fbh_ring_head = &fb->fb_header.fbh_ring_mem_head; + } + +#ifdef KERNEL + // The algorithm in the kernel is simpler: + // 1. reserve a write position for the head + // 2. store the new reference at that position + // Enqueuers can't starve each other that way. + // + // However, the dequeuers now have to sometimes wait for the value written + // in the ring to appear and have to spin, which is okay since the kernel + // disables preemption around these two consecutive atomic operations. + // See firehose_client_drain. + __firehose_critical_region_enter(); + head = os_atomic_inc_orig(fbh_ring_head, relaxed); + gen = head & FIREHOSE_RING_POS_GEN_MASK; + idx = head & FIREHOSE_RING_POS_IDX_MASK; + + while (unlikely(!os_atomic_cmpxchgvw(&fbh_ring[idx], gen, gen | ref, &dummy, + relaxed))) { + // can only ever happen if a recycler is slow, this requires having + // enough cores (>5 for I/O e.g.) + _dispatch_wait_until(fbh_ring[idx] == gen); + } + __firehose_critical_region_leave(); + __firehose_buffer_push_to_logd(fb, for_io); +#else + // The algorithm is: + // 1. read the head position + // 2. cmpxchg head.gen with the (head.gen | ref) at head.idx + // 3. if it fails wait until either the head cursor moves, + // or the cell becomes free + // + // The most likely stall at (3) is because another enqueuer raced us + // and made the cell non empty. + // + // The alternative is to reserve the enqueue slot with an atomic inc. + // Then write the ref into the ring. This would be much simpler as the + // generation packing wouldn't be required (though setting the ring cell + // would still need a cmpxchg loop to avoid clobbering values of slow + // dequeuers) + // + // But then that means that flushers (logd) could be starved until that + // finishes, and logd cannot be held forever (that could even be a logd + // DoS from malicious programs). Meaning that logd would stop draining + // buffer queues when encountering that issue, leading the program to be + // stuck in firehose_client_push() apparently waiting on logd, while + // really it's waiting on itself. It's better for the scheduler if we + // make it clear that we're waiting on ourselves! + + head = os_atomic_load(fbh_ring_head, relaxed); + for (;;) { + gen = head & FIREHOSE_RING_POS_GEN_MASK; + idx = head & FIREHOSE_RING_POS_IDX_MASK; + + // a thread being preempted here for GEN_MASK worth of ring rotations, + // it could lead to the cmpxchg succeed, and have a bogus enqueue + // (confused enqueuer) + if (fastpath(os_atomic_cmpxchgvw(&fbh_ring[idx], gen, gen | ref, &dummy, + relaxed))) { + if (fastpath(os_atomic_cmpxchgv(fbh_ring_head, head, head + 1, + &head, release))) { + __firehose_critical_region_leave(); + break; + } + // this thread is a confused enqueuer, need to undo enqueue + os_atomic_store(&fbh_ring[idx], gen, relaxed); + continue; + } + + _dispatch_wait_until(({ + // wait until either the head moves (another enqueuer is done) + // or (not very likely) a recycler is very slow + // or (very unlikely) the confused thread undoes its enqueue + uint16_t old_head = head; + head = *fbh_ring_head; + head != old_head || fbh_ring[idx] == gen; + })); + } + + pthread_priority_t pp = fbc_pos.fbc_qos_bits; + pp <<= _PTHREAD_PRIORITY_QOS_CLASS_SHIFT; + firehose_client_send_push_async(fb, _pthread_qos_class_decode(pp, NULL, NULL), + for_io); +#endif +} + +OS_ALWAYS_INLINE +static inline uint16_t +firehose_buffer_ring_try_recycle(firehose_buffer_t fb) +{ + firehose_ring_tail_u pos, old; + uint16_t volatile *fbh_ring; + uint16_t gen, ref, entry, tail; + firehose_buffer_chunk_t fbc; + bool for_io; + + os_atomic_rmw_loop2o(&fb->fb_header, fbh_ring_tail.frp_atomic_tail, + old.frp_atomic_tail, pos.frp_atomic_tail, relaxed, { + pos = old; + if (fastpath(old.frp_mem_tail != old.frp_mem_flushed)) { + pos.frp_mem_tail++; + } else if (fastpath(old.frp_io_tail != old.frp_io_flushed)) { + pos.frp_io_tail++; + } else { + os_atomic_rmw_loop_give_up(return 0); + } + }); + + // there's virtually no chance that the lack of acquire barrier above + // lets us read a value from the ring so stale that it's still an Empty + // marker. For correctness purposes have a cheap loop that should never + // really loop, instead of an acquire barrier in the cmpxchg above. + for_io = (pos.frp_io_tail != old.frp_io_tail); + if (for_io) { + fbh_ring = fb->fb_header.fbh_io_ring; + tail = old.frp_io_tail & FIREHOSE_RING_POS_IDX_MASK; + } else { + fbh_ring = fb->fb_header.fbh_mem_ring; + tail = old.frp_mem_tail & FIREHOSE_RING_POS_IDX_MASK; + } + _dispatch_wait_until((entry = fbh_ring[tail]) & FIREHOSE_RING_POS_IDX_MASK); + + // Needed for process death handling (recycle-dequeue): + // No atomic fences required, we merely want to make sure the observers + // will see memory effects in program (asm) order. + // 1. the chunk is marked as "void&full" (clobbering the pos with FULL_BIT) + // 2. then we remove any reference to the chunk from the ring + // This ensures that if we don't see a reference to a chunk in the ring + // and it is dirty, it is a chunk being written to that needs a flush + gen = (entry & FIREHOSE_RING_POS_GEN_MASK) + FIREHOSE_RING_POS_GEN_INC; + ref = entry & FIREHOSE_RING_POS_IDX_MASK; + fbc = firehose_buffer_ref_to_chunk(fb, ref); + + if (!for_io && fbc->fbc_pos.fbc_stream == firehose_stream_metadata) { + os_atomic_and2o(fb, fb_header.fbh_bank.fbb_metadata_bitmap, + ~(1ULL << ref), relaxed); + } + os_atomic_store2o(fbc, fbc_pos.fbc_atomic_pos, + FIREHOSE_BUFFER_POS_FULL_BIT, relaxed); + dispatch_compiler_barrier(); + os_atomic_store(&fbh_ring[tail], gen | 0, relaxed); + return ref; +} + +#ifndef KERNEL +OS_NOINLINE +static firehose_tracepoint_t +firehose_buffer_tracepoint_reserve_slow2(firehose_buffer_t fb, + firehose_tracepoint_query_t ask, uint8_t **privptr, uint16_t ref) +{ + const uint64_t bank_unavail_mask = FIREHOSE_BANK_UNAVAIL_MASK(ask->for_io); + firehose_buffer_bank_t const fbb = &fb->fb_header.fbh_bank; + firehose_bank_state_u state; + uint16_t fbs_max_ref; + + // first wait for our bank to have space, if needed + if (!fastpath(ask->is_bank_ok)) { + state.fbs_atomic_state = + os_atomic_load2o(fbb, fbb_state.fbs_atomic_state, relaxed); + while (state.fbs_atomic_state & bank_unavail_mask) { + firehose_client_send_push(fb, ask->for_io, &state); + if (slowpath(fb->fb_header.fbh_sendp == MACH_PORT_DEAD)) { + // logd was unloaded, give up + return NULL; + } + } + ask->is_bank_ok = true; + fbs_max_ref = state.fbs_max_ref; + } else { + fbs_max_ref = fbb->fbb_state.fbs_max_ref; + } + + // second, if we were passed a chunk, we may need to shrink + if (slowpath(ref)) { + goto try_shrink; + } + + // third, wait for a chunk to come up, and if not, wait on the daemon + for (;;) { + if (fastpath(ref = firehose_buffer_ring_try_recycle(fb))) { + try_shrink: + if (slowpath(ref >= fbs_max_ref)) { + ref = firehose_buffer_ring_shrink(fb, ref); + if (!ref) { + continue; + } + } + break; + } + if (fastpath(ref = firehose_buffer_ring_try_grow(fbb, fbs_max_ref))) { + break; + } + firehose_client_send_push(fb, ask->for_io, NULL); + if (slowpath(fb->fb_header.fbh_sendp == MACH_PORT_DEAD)) { + // logd was unloaded, give up + break; + } + } + + return firehose_buffer_stream_chunk_install(fb, ask, privptr, ref); +} +#else +static inline dispatch_lock +_dispatch_gate_lock_load_seq_cst(dispatch_gate_t l) +{ + return os_atomic_load(&l->dgl_lock, seq_cst); +} +OS_NOINLINE +static void +_dispatch_gate_wait(dispatch_gate_t l, uint32_t flags) +{ + (void)flags; + _dispatch_wait_until(_dispatch_gate_lock_load_seq_cst(l) == 0); +} +#endif // KERNEL + +firehose_tracepoint_t +firehose_buffer_tracepoint_reserve_slow(firehose_buffer_t fb, + firehose_tracepoint_query_t ask, uint8_t **privptr) +{ + const unsigned for_io = ask->for_io; + const firehose_buffer_bank_t fbb = &fb->fb_header.fbh_bank; + firehose_bank_state_u state; + uint16_t ref = 0; + + uint64_t unavail_mask = FIREHOSE_BANK_UNAVAIL_MASK(for_io); +#ifndef KERNEL + state.fbs_atomic_state = os_atomic_add_orig2o(fbb, + fbb_state.fbs_atomic_state, FIREHOSE_BANK_INC(for_io), relaxed); + if (fastpath(!(state.fbs_atomic_state & unavail_mask))) { + ask->is_bank_ok = true; + if (fastpath(ref = firehose_buffer_ring_try_recycle(fb))) { + if (fastpath(ref < state.fbs_max_ref)) { + return firehose_buffer_stream_chunk_install(fb, ask, + privptr, ref); + } + } + } + return firehose_buffer_tracepoint_reserve_slow2(fb, ask, privptr, ref); +#else + firehose_bank_state_u value; + ask->is_bank_ok = os_atomic_rmw_loop2o(fbb, fbb_state.fbs_atomic_state, + state.fbs_atomic_state, value.fbs_atomic_state, relaxed, { + value = state; + if (slowpath((value.fbs_atomic_state & unavail_mask) != 0)) { + os_atomic_rmw_loop_give_up(break); + } + value.fbs_atomic_state += FIREHOSE_BANK_INC(for_io); + }); + if (ask->is_bank_ok) { + ref = firehose_buffer_ring_try_recycle(fb); + if (slowpath(ref == 0)) { + // the kernel has no overlap between I/O and memory chunks, + // having an available bank slot means we should be able to recycle + DISPATCH_INTERNAL_CRASH(0, "Unable to recycle a chunk"); + } + } + // rdar://25137005 installing `0` unlocks the allocator + return firehose_buffer_stream_chunk_install(fb, ask, privptr, ref); +#endif // KERNEL +} + +#ifdef KERNEL +firehose_tracepoint_t +__firehose_buffer_tracepoint_reserve(uint64_t stamp, firehose_stream_t stream, + uint16_t pubsize, uint16_t privsize, uint8_t **privptr) +{ + firehose_buffer_t fb = kernel_firehose_buffer; + if (!fastpath(fb)) { + return NULL; + } + return firehose_buffer_tracepoint_reserve(fb, stamp, stream, pubsize, + privsize, privptr); +} + +firehose_tracepoint_t +__firehose_buffer_tracepoint_reserve_with_chunk(firehose_buffer_chunk_t fbc, + uint64_t stamp, firehose_stream_t stream, + uint16_t pubsize, uint16_t privsize, uint8_t **privptr) +{ + + firehose_tracepoint_t ft; + long result; + + result = firehose_buffer_chunk_try_reserve(fbc, stamp, stream, + pubsize, privsize, privptr); + if (fastpath(result > 0)) { + ft = (firehose_tracepoint_t)(fbc->fbc_start + result); + stamp -= fbc->fbc_timestamp; + stamp |= (uint64_t)pubsize << 48; + // Needed for process death handling (tracepoint-begin) + // see firehose_buffer_stream_chunk_install + os_atomic_store2o(ft, ft_stamp_and_length, stamp, relaxed); + dispatch_compiler_barrier(); + return ft; + } + else { + return NULL; + } +} + +firehose_buffer_t +__firehose_buffer_create(size_t *size) +{ + if (!kernel_firehose_buffer) { + kernel_firehose_buffer = firehose_buffer_create(MACH_PORT_NULL, 0, 0); + } + + if (size) { + *size = FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_BUFFER_CHUNK_SIZE; + } + return kernel_firehose_buffer; +} + +void +__firehose_buffer_tracepoint_flush(firehose_tracepoint_t ft, + firehose_tracepoint_id_u ftid) +{ + return firehose_buffer_tracepoint_flush(kernel_firehose_buffer, ft, ftid); +} + +void +__firehose_buffer_tracepoint_flush_chunk(firehose_buffer_chunk_t fbc, + firehose_tracepoint_t ft, firehose_tracepoint_id_u ftid) +{ + firehose_buffer_pos_u pos; + + // Needed for process death handling (tracepoint-flush): + // We want to make sure the observers + // will see memory effects in program (asm) order. + // 1. write all the data to the tracepoint + // 2. write the tracepoint ID, so that seeing it means the tracepoint + // is valid + ft->ft_thread = thread_tid(current_thread()); + + // release barrier makes the log writes visible + os_atomic_store2o(ft, ft_id.ftid_value, ftid.ftid_value, release); + pos.fbc_atomic_pos = os_atomic_sub2o(fbc, fbc_pos.fbc_atomic_pos, + FIREHOSE_BUFFER_POS_REFCNT_INC, relaxed); + return; +} + +void +__firehose_merge_updates(firehose_push_reply_t update) +{ + firehose_buffer_t fb = kernel_firehose_buffer; + if (fastpath(fb)) { + firehose_client_merge_updates(fb, true, update, NULL); + } +} +#endif // KERNEL + +#endif // OS_FIREHOSE_SPI diff --git a/src/firehose/firehose_buffer_internal.h b/src/firehose/firehose_buffer_internal.h new file mode 100644 index 000000000..db8e02629 --- /dev/null +++ b/src/firehose/firehose_buffer_internal.h @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2015 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __FIREHOSE_BUFFER_INTERNAL__ +#define __FIREHOSE_BUFFER_INTERNAL__ + +#if BYTE_ORDER != LITTLE_ENDIAN +#error unsupported byte order +#endif + +#ifndef KERNEL +#include +#endif + +// firehose buffer is CHUNK_COUNT * CHUNK_SIZE big == 256k +#define FIREHOSE_BUFFER_CHUNK_COUNT 64ul +#ifdef KERNEL +#define FIREHOSE_BUFFER_CHUNK_PREALLOCATED_COUNT 15 +#else +#define FIREHOSE_BUFFER_CHUNK_PREALLOCATED_COUNT 4 +#define FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT 4 +#endif + +static const unsigned long firehose_stream_uses_io_bank = + (1UL << firehose_stream_persist) | + (1UL << firehose_stream_special); + +typedef union { +#define FIREHOSE_BANK_SHIFT(bank) (16 * (bank)) +#define FIREHOSE_BANK_INC(bank) (1ULL << FIREHOSE_BANK_SHIFT(bank)) +#define FIREHOSE_BANK_UNAVAIL_BIT ((uint16_t)0x8000) +#define FIREHOSE_BANK_UNAVAIL_MASK(bank) (FIREHOSE_BANK_INC(bank) << 15) + uint64_t fbs_atomic_state; + struct { + uint16_t fbs_mem_bank; + uint16_t fbs_io_bank; + uint16_t fbs_max_ref; + uint16_t fbs_unused; + }; +} firehose_bank_state_u; + +#if __has_feature(c_static_assert) +_Static_assert(8 * offsetof(firehose_bank_state_u, fbs_mem_bank) + == FIREHOSE_BANK_SHIFT(0), "mem bank shift"); +_Static_assert(8 * offsetof(firehose_bank_state_u, fbs_io_bank) + == FIREHOSE_BANK_SHIFT(1), "mem bank shift"); +#endif + +typedef struct firehose_buffer_bank_s { + firehose_bank_state_u volatile fbb_state; + uint64_t volatile fbb_metadata_bitmap; + uint64_t volatile fbb_mem_flushed; + uint64_t volatile fbb_mem_notifs; + uint64_t volatile fbb_mem_sync_pushes; + uint64_t volatile fbb_io_flushed; + uint64_t volatile fbb_io_notifs; + uint64_t volatile fbb_io_sync_pushes; +#define FIREHOSE_BUFFER_BANK_FLAG_LOW_MEMORY (1UL << 0) +#define FIREHOSE_BUFFER_BANK_FLAG_HIGH_RATE (1UL << 1) + unsigned long volatile fbb_flags; + + uint64_t fbb_bitmap; // protected by fbb_lock + firehose_bank_state_u fbb_limits; // protected by fbb_lock +#ifdef KERNEL + uint32_t _fbb_unused; +#else + dispatch_unfair_lock_s fbb_lock; +#endif +} OS_ALIGNED(64) *firehose_buffer_bank_t; + +typedef union { + uint64_t fss_atomic_state; + dispatch_gate_s fss_gate; + struct { + uint32_t fss_allocator; +#define FIREHOSE_STREAM_STATE_PRISTINE 0xffff + uint16_t fss_current; + uint16_t fss_generation; + }; +} firehose_stream_state_u; + +typedef struct firehose_buffer_stream_s { + firehose_stream_state_u fbs_state; +} OS_ALIGNED(128) *firehose_buffer_stream_t; + +typedef union { + uint64_t frp_atomic_tail; + struct { + uint16_t frp_mem_tail; + uint16_t frp_mem_flushed; + uint16_t frp_io_tail; + uint16_t frp_io_flushed; + }; +} firehose_ring_tail_u; + +#define FIREHOSE_RING_POS_GEN_INC ((uint16_t)(FIREHOSE_BUFFER_CHUNK_COUNT)) +#define FIREHOSE_RING_POS_IDX_MASK ((uint16_t)(FIREHOSE_RING_POS_GEN_INC - 1)) +#define FIREHOSE_RING_POS_GEN_MASK ((uint16_t)~FIREHOSE_RING_POS_IDX_MASK) + +/* + * Rings are circular buffers with CHUNK_COUNT entries, with 3 important markers + * + * +--------+-------------------------+------------+---------------------------+ + * |xxxxxxxx| |............|xxxxxxxxxxxxxxxxxxxxxxxxxxx| + * +--------+-------------------------+------------+---------------------------+ + * ^ ^ ^ + * head tail flushed + * + * A ring position is a uint16_t made of a generation (see GEN_MASK) and an + * index (see IDX_MASK). Slots of that ring hold tagged page references. These + * are made from a generation (see GEN_MASK) and a page reference. + * + * A generation is how many times the head wrapped around. + * + * These conditions hold: + * (uint16_t)(flushed - tail) < FIREHOSE_BUFFER_CHUNK_COUNT + * (uint16_t)(head - flushed) < FIREHOSE_BUFFER_CHUNK_COUNT + * which really means, on the circular buffer, tail <= flushed <= head. + * + * Page references span from 1 to (CHUNK_COUNT - 1). 0 is an invalid page + * (corresponds to the buffer header) and means "unused". + * + * + * - Entries situated between tail and flushed hold references to pages that + * the firehose consumer (logd) has flushed, and can be reused. + * + * - Entries situated between flushed and head are references to pages waiting + * to be flushed. + * + * - Entries not situated between tail and head are either slots being modified + * or that should be set to Empty. Empty is the 0 page reference associated + * with the generation count the head will have the next time it will go over + * that slot. + */ +typedef struct firehose_buffer_header_s { + uint16_t volatile fbh_mem_ring[FIREHOSE_BUFFER_CHUNK_COUNT]; + uint16_t volatile fbh_io_ring[FIREHOSE_BUFFER_CHUNK_COUNT]; + + firehose_ring_tail_u volatile fbh_ring_tail OS_ALIGNED(64); + uint32_t fbh_spi_version; + uint16_t volatile fbh_ring_mem_head OS_ALIGNED(64); + uint16_t volatile fbh_ring_io_head OS_ALIGNED(64); + struct firehose_buffer_bank_s fbh_bank; + struct firehose_buffer_stream_s fbh_stream[_firehose_stream_max]; + + uint64_t fbh_uniquepid; + pid_t fbh_pid; + mach_port_t fbh_logd_port; + mach_port_t volatile fbh_sendp; + mach_port_t fbh_recvp; + + // past that point fields may be aligned differently between 32 and 64bits +#ifndef KERNEL + dispatch_once_t fbh_notifs_pred OS_ALIGNED(64); + dispatch_source_t fbh_notifs_source; + dispatch_unfair_lock_s fbh_logd_lock; +#endif + uint64_t fbh_unused[0]; +} OS_ALIGNED(FIREHOSE_BUFFER_CHUNK_SIZE) *firehose_buffer_header_t; + +union firehose_buffer_u { + struct firehose_buffer_header_s fb_header; + struct firehose_buffer_chunk_s fb_chunks[FIREHOSE_BUFFER_CHUNK_COUNT]; +}; + +// used to let the compiler pack these values in 1 or 2 registers +typedef struct firehose_tracepoint_query_s { + uint16_t pubsize; + uint16_t privsize; + firehose_stream_t stream; + bool is_bank_ok; + bool for_io; + uint64_t stamp; +} *firehose_tracepoint_query_t; + +#ifndef FIREHOSE_SERVER + +firehose_buffer_t +firehose_buffer_create(mach_port_t logd_port, uint64_t unique_pid, + unsigned long bank_flags); + +firehose_tracepoint_t +firehose_buffer_tracepoint_reserve_slow(firehose_buffer_t fb, + firehose_tracepoint_query_t ask, uint8_t **privptr); + +void +firehose_buffer_update_limits(firehose_buffer_t fb); + +void +firehose_buffer_ring_enqueue(firehose_buffer_t fb, uint16_t ref); + +#endif + +#endif // __FIREHOSE_BUFFER_INTERNAL__ diff --git a/src/firehose/firehose_inline_internal.h b/src/firehose/firehose_inline_internal.h new file mode 100644 index 000000000..95768825f --- /dev/null +++ b/src/firehose/firehose_inline_internal.h @@ -0,0 +1,502 @@ +/* + * Copyright (c) 2015 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __FIREHOSE_INLINE_INTERNAL__ +#define __FIREHOSE_INLINE_INTERNAL__ + +#define firehose_atomic_maxv2o(p, f, v, o, m) \ + os_atomic_rmw_loop2o(p, f, *(o), (v), m, { \ + if (*(o) >= (v)) os_atomic_rmw_loop_give_up(break); \ + }) + +#define firehose_atomic_max2o(p, f, v, m) ({ \ + typeof((p)->f) _old; \ + firehose_atomic_maxv2o(p, f, v, &_old, m); \ + }) + +#ifndef KERNEL +// caller must test for non zero first +OS_ALWAYS_INLINE +static inline uint16_t +firehose_bitmap_first_set(uint64_t bitmap) +{ + dispatch_assert(bitmap != 0); + // this builtin returns 0 if bitmap is 0, or (first bit set + 1) + return (uint16_t)__builtin_ffsll((long long)bitmap) - 1; +} +#endif + +#pragma mark - +#pragma mark Mach Misc. +#ifndef KERNEL + +OS_ALWAYS_INLINE +static inline mach_port_t +firehose_mach_port_allocate(uint32_t flags, void *ctx) +{ + mach_port_t port = MACH_PORT_NULL; + mach_port_options_t opts = { + .flags = flags, + }; + kern_return_t kr; + + for (;;) { + kr = mach_port_construct(mach_task_self(), &opts, + (mach_port_context_t)ctx, &port); + if (fastpath(kr == KERN_SUCCESS)) { + break; + } + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + _dispatch_temporary_resource_shortage(); + } + return port; +} + +OS_ALWAYS_INLINE +static inline kern_return_t +firehose_mach_port_recv_dispose(mach_port_t port, void *ctx) +{ + kern_return_t kr; + kr = mach_port_destruct(mach_task_self(), port, 0, + (mach_port_context_t)ctx); + DISPATCH_VERIFY_MIG(kr); + return kr; +} + +OS_ALWAYS_INLINE +static inline void +firehose_mach_port_send_release(mach_port_t port) +{ + kern_return_t kr = mach_port_deallocate(mach_task_self(), port); + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); +} + +OS_ALWAYS_INLINE +static inline void +firehose_mach_port_guard(mach_port_t port, bool strict, void *ctx) +{ + kern_return_t kr = mach_port_guard(mach_task_self(), port, + (mach_port_context_t)ctx, strict); + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); +} + +OS_ALWAYS_INLINE +static inline void +firehose_mig_server(dispatch_mig_callback_t demux, size_t maxmsgsz, + mach_msg_header_t *hdr) +{ + mig_reply_error_t *msg_reply = (mig_reply_error_t *)alloca(maxmsgsz); + kern_return_t rc = KERN_SUCCESS; + bool expects_reply = false; + + if (MACH_MSGH_BITS_REMOTE(hdr->msgh_bits) == MACH_MSG_TYPE_MOVE_SEND_ONCE) { + expects_reply = true; + } + + if (!fastpath(demux(hdr, &msg_reply->Head))) { + rc = MIG_BAD_ID; + } else if (msg_reply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) { + rc = KERN_SUCCESS; + } else { + // if MACH_MSGH_BITS_COMPLEX is _not_ set, then msg_reply->RetCode + // is present + rc = msg_reply->RetCode; + } + + if (slowpath(rc == KERN_SUCCESS && expects_reply)) { + // if crashing here, some handler returned KERN_SUCCESS + // hoping for firehose_mig_server to perform the mach_msg() + // call to reply, and it doesn't know how to do that + DISPATCH_INTERNAL_CRASH(msg_reply->Head.msgh_id, + "firehose_mig_server doesn't handle replies"); + } + if (slowpath(rc != KERN_SUCCESS && rc != MIG_NO_REPLY)) { + // destroy the request - but not the reply port + hdr->msgh_remote_port = 0; + mach_msg_destroy(hdr); + } +} + +#endif // !KERNEL +#pragma mark - +#pragma mark firehose buffer + +OS_ALWAYS_INLINE +static inline firehose_buffer_chunk_t +firehose_buffer_chunk_for_address(void *addr) +{ + uintptr_t chunk_addr = (uintptr_t)addr & ~(FIREHOSE_BUFFER_CHUNK_SIZE - 1); + return (firehose_buffer_chunk_t)chunk_addr; +} + +OS_ALWAYS_INLINE +static inline uint16_t +firehose_buffer_chunk_to_ref(firehose_buffer_t fb, firehose_buffer_chunk_t fbc) +{ + return (uint16_t)(fbc - fb->fb_chunks); +} + +OS_ALWAYS_INLINE +static inline firehose_buffer_chunk_t +firehose_buffer_ref_to_chunk(firehose_buffer_t fb, uint16_t ref) +{ + return fb->fb_chunks + ref; +} + +#ifndef FIREHOSE_SERVER + +OS_ALWAYS_INLINE +static inline bool +firehose_buffer_pos_fits(firehose_buffer_pos_u pos, uint16_t size) +{ + return pos.fbc_next_entry_offs + size <= pos.fbc_private_offs; +} + +#if DISPATCH_PURE_C + +OS_ALWAYS_INLINE +static inline uint8_t +firehose_buffer_qos_bits_propagate(void) +{ +#ifndef KERNEL + pthread_priority_t pp = _dispatch_priority_propagate(); + + pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; + return (uint8_t)(pp >> _PTHREAD_PRIORITY_QOS_CLASS_SHIFT); +#else + return 0; +#endif +} + +OS_ALWAYS_INLINE +static inline long +firehose_buffer_chunk_try_reserve(firehose_buffer_chunk_t fbc, uint64_t stamp, + firehose_stream_t stream, uint16_t pubsize, + uint16_t privsize, uint8_t **privptr) +{ + const uint16_t ft_size = offsetof(struct firehose_tracepoint_s, ft_data); + firehose_buffer_pos_u orig, pos; + uint8_t qos_bits = firehose_buffer_qos_bits_propagate(); + bool reservation_failed, stamp_delta_fits; + + stamp_delta_fits = ((stamp - fbc->fbc_timestamp) >> 48) == 0; + + // no acquire barrier because the returned space is written to only + os_atomic_rmw_loop2o(fbc, fbc_pos.fbc_atomic_pos, + orig.fbc_atomic_pos, pos.fbc_atomic_pos, relaxed, { + if (unlikely(orig.fbc_atomic_pos == 0)) { + // we acquired a really really old reference, and we probably + // just faulted in a new page + // FIXME: if/when we hit this we should try to madvise it back FREE + os_atomic_rmw_loop_give_up(return 0); + } + if (unlikely(!FIREHOSE_BUFFER_POS_USABLE_FOR_STREAM(orig, stream))) { + // nothing to do if the chunk is full, or the stream doesn't match, + // in which case the thread probably: + // - loaded the chunk ref + // - been suspended a long while + // - read the chunk to find a very old thing + os_atomic_rmw_loop_give_up(return 0); + } + pos = orig; + pos.fbc_qos_bits |= qos_bits; + if (unlikely(!firehose_buffer_pos_fits(orig, + ft_size + pubsize + privsize) || !stamp_delta_fits)) { + pos.fbc_flag_full = true; + reservation_failed = true; + } else { + // using these *_INC macros is so that the compiler generates better + // assembly: using the struct individual fields forces the compiler + // to handle carry propagations, and we know it won't happen + pos.fbc_atomic_pos += roundup(ft_size + pubsize, 8) * + FIREHOSE_BUFFER_POS_ENTRY_OFFS_INC; + pos.fbc_atomic_pos -= privsize * + FIREHOSE_BUFFER_POS_PRIVATE_OFFS_INC; + pos.fbc_atomic_pos += FIREHOSE_BUFFER_POS_REFCNT_INC; + const uint16_t minimum_payload_size = 16; + if (!firehose_buffer_pos_fits(pos, + roundup(ft_size + minimum_payload_size , 8))) { + // if we can't even have minimum_payload_size bytes of payload + // for the next tracepoint, just flush right away + pos.fbc_flag_full = true; + } + reservation_failed = false; + } + }); + + if (reservation_failed) { + if (pos.fbc_refcnt) { + // nothing to do, there is a thread writing that will pick up + // the "FULL" flag on flush and push as a consequence + return 0; + } + // caller must enqueue chunk + return -1; + } + if (privptr) { + *privptr = fbc->fbc_start + pos.fbc_private_offs; + } + return orig.fbc_next_entry_offs; +} + +OS_ALWAYS_INLINE +static inline void +firehose_buffer_stream_flush(firehose_buffer_t fb, firehose_stream_t stream) +{ + firehose_buffer_stream_t fbs = &fb->fb_header.fbh_stream[stream]; + firehose_stream_state_u old_state, new_state; + firehose_buffer_chunk_t fbc; + uint64_t stamp = UINT64_MAX; // will cause the reservation to fail + uint16_t ref; + long result; + + old_state.fss_atomic_state = + os_atomic_load2o(fbs, fbs_state.fss_atomic_state, relaxed); + ref = old_state.fss_current; + if (!ref || ref == FIREHOSE_STREAM_STATE_PRISTINE) { + // there is no installed page, nothing to flush, go away + return; + } + + fbc = firehose_buffer_ref_to_chunk(fb, old_state.fss_current); + result = firehose_buffer_chunk_try_reserve(fbc, stamp, stream, 1, 0, NULL); + if (likely(result < 0)) { + firehose_buffer_ring_enqueue(fb, old_state.fss_current); + } + if (unlikely(result > 0)) { + // because we pass a silly stamp that requires a flush + DISPATCH_INTERNAL_CRASH(result, "Allocation should always fail"); + } + + // as a best effort try to uninstall the page we just flushed + // but failing is okay, let's not contend stupidly for something + // allocators know how to handle in the first place + new_state = old_state; + new_state.fss_current = 0; + (void)os_atomic_cmpxchg2o(fbs, fbs_state.fss_atomic_state, + old_state.fss_atomic_state, new_state.fss_atomic_state, relaxed); +} + +/** + * @function firehose_buffer_tracepoint_reserve + * + * @abstract + * Reserves space in the firehose buffer for the tracepoint with specified + * characteristics. + * + * @discussion + * This returns a slot, with the length of the tracepoint already set, so + * that in case of a crash, we maximize our chance to be able to skip the + * tracepoint in case of a partial write. + * + * Once the tracepoint has been written, firehose_buffer_tracepoint_flush() + * must be called. + * + * @param fb + * The buffer to allocate from. + * + * @param stream + * The buffer stream to use. + * + * @param pubsize + * The size of the public data for this tracepoint, cannot be 0, doesn't + * take the size of the tracepoint header into account. + * + * @param privsize + * The size of the private data for this tracepoint, can be 0. + * + * @param privptr + * The pointer to the private buffer, can be NULL + * + * @result + * The pointer to the tracepoint. + */ +OS_ALWAYS_INLINE +static inline firehose_tracepoint_t +firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp, + firehose_stream_t stream, uint16_t pubsize, + uint16_t privsize, uint8_t **privptr) +{ + firehose_buffer_stream_t fbs = &fb->fb_header.fbh_stream[stream]; + firehose_stream_state_u old_state, new_state; + firehose_tracepoint_t ft; + firehose_buffer_chunk_t fbc; +#if KERNEL + bool failable = false; +#endif + bool success; + long result; + uint16_t ref; + + // cannot use os_atomic_rmw_loop2o, _page_try_reserve does a store + old_state.fss_atomic_state = + os_atomic_load2o(fbs, fbs_state.fss_atomic_state, relaxed); + for (;;) { + new_state = old_state; + + ref = old_state.fss_current; + if (likely(ref && ref != FIREHOSE_STREAM_STATE_PRISTINE)) { + fbc = firehose_buffer_ref_to_chunk(fb, ref); + result = firehose_buffer_chunk_try_reserve(fbc, stamp, stream, + pubsize, privsize, privptr); + if (likely(result > 0)) { + ft = (firehose_tracepoint_t)(fbc->fbc_start + result); + stamp -= fbc->fbc_timestamp; + stamp |= (uint64_t)pubsize << 48; + // Needed for process death handling (tracepoint-begin) + // see firehose_buffer_stream_chunk_install + os_atomic_store2o(ft, ft_stamp_and_length, stamp, relaxed); + dispatch_compiler_barrier(); + return ft; + } + if (likely(result < 0)) { + firehose_buffer_ring_enqueue(fb, old_state.fss_current); + } + new_state.fss_current = 0; + } +#if KERNEL + if (failable) { + return NULL; + } +#endif + + if (unlikely(old_state.fss_allocator)) { + _dispatch_gate_wait(&fbs->fbs_state.fss_gate, + DLOCK_LOCK_DATA_CONTENTION); + old_state.fss_atomic_state = + os_atomic_load2o(fbs, fbs_state.fss_atomic_state, relaxed); +#if KERNEL + failable = true; +#endif + continue; + } + + // if the thread doing the allocation is a low priority one + // we may starve high priority ones. + // so disable preemption before we become an allocator + // the reenabling of the preemption is in + // firehose_buffer_stream_chunk_install + __firehose_critical_region_enter(); +#if KERNEL + new_state.fss_allocator = (uint32_t)cpu_number(); +#else + new_state.fss_allocator = _dispatch_tid_self(); +#endif + success = os_atomic_cmpxchgvw2o(fbs, fbs_state.fss_atomic_state, + old_state.fss_atomic_state, new_state.fss_atomic_state, + &old_state.fss_atomic_state, relaxed); + if (likely(success)) { + break; + } + __firehose_critical_region_leave(); + } + + struct firehose_tracepoint_query_s ask = { + .pubsize = pubsize, + .privsize = privsize, + .stream = stream, + .for_io = (firehose_stream_uses_io_bank & (1UL << stream)) != 0, + .stamp = stamp, + }; + return firehose_buffer_tracepoint_reserve_slow(fb, &ask, privptr); +} + +/** + * @function firehose_buffer_tracepoint_flush + * + * @abstract + * Flushes a firehose tracepoint, and sends the chunk to the daemon when full + * and this was the last tracepoint writer for this chunk. + * + * @param fb + * The buffer the tracepoint belongs to. + * + * @param ft + * The tracepoint to flush. + * + * @param ftid + * The firehose tracepoint ID for that tracepoint. + * It is written last, preventing compiler reordering, so that its absence + * on crash recovery means the tracepoint is partial. + */ +OS_ALWAYS_INLINE +static inline void +firehose_buffer_tracepoint_flush(firehose_buffer_t fb, + firehose_tracepoint_t ft, firehose_tracepoint_id_u ftid) +{ + firehose_buffer_chunk_t fbc = firehose_buffer_chunk_for_address(ft); + firehose_buffer_pos_u pos; + + // Needed for process death handling (tracepoint-flush): + // We want to make sure the observers + // will see memory effects in program (asm) order. + // 1. write all the data to the tracepoint + // 2. write the tracepoint ID, so that seeing it means the tracepoint + // is valid +#ifdef KERNEL + ft->ft_thread = thread_tid(current_thread()); +#else + ft->ft_thread = _pthread_threadid_self_np_direct(); +#endif + // release barrier makes the log writes visible + os_atomic_store2o(ft, ft_id.ftid_value, ftid.ftid_value, release); + pos.fbc_atomic_pos = os_atomic_sub2o(fbc, fbc_pos.fbc_atomic_pos, + FIREHOSE_BUFFER_POS_REFCNT_INC, relaxed); + if (pos.fbc_refcnt == 0 && pos.fbc_flag_full) { + firehose_buffer_ring_enqueue(fb, firehose_buffer_chunk_to_ref(fb, fbc)); + } +} + +#ifndef KERNEL +OS_ALWAYS_INLINE +static inline void +firehose_buffer_clear_bank_flags(firehose_buffer_t fb, unsigned long bits) +{ + firehose_buffer_bank_t fbb = &fb->fb_header.fbh_bank; + unsigned long orig_flags; + + orig_flags = os_atomic_and_orig2o(fbb, fbb_flags, ~bits, relaxed); + if (orig_flags != (orig_flags & ~bits)) { + firehose_buffer_update_limits(fb); + } +} + +OS_ALWAYS_INLINE +static inline void +firehose_buffer_set_bank_flags(firehose_buffer_t fb, unsigned long bits) +{ + firehose_buffer_bank_t fbb = &fb->fb_header.fbh_bank; + unsigned long orig_flags; + + orig_flags = os_atomic_or_orig2o(fbb, fbb_flags, bits, relaxed); + if (orig_flags != (orig_flags | bits)) { + firehose_buffer_update_limits(fb); + } +} +#endif // !KERNEL + +#endif // !defined(FIREHOSE_SERVER) + +#endif // DISPATCH_PURE_C + +#endif // __FIREHOSE_INLINE_INTERNAL__ diff --git a/src/firehose/firehose_internal.h b/src/firehose/firehose_internal.h new file mode 100644 index 000000000..29d1ad240 --- /dev/null +++ b/src/firehose/firehose_internal.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2015 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __FIREHOSE_INTERNAL__ +#define __FIREHOSE_INTERNAL__ + +#if OS_FIREHOSE_SPI + +// make sure this is defined so that we get MIG_SERVER_DIED when a send once +// notification is sent back because of a crashed server +#ifndef __MigTypeCheck +#define __MigTypeCheck 1 +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "os/firehose_server_private.h" +#include "firehose_buffer_internal.h" +#ifdef FIREHOSE_SERVER +#include "firehose_server_internal.h" +#endif +#include "firehose_inline_internal.h" + +#endif // OS_FIREHOSE_SPI + +#endif // __FIREHOSE_INTERNAL__ diff --git a/src/firehose/firehose_reply.defs b/src/firehose/firehose_reply.defs new file mode 100644 index 000000000..124defa59 --- /dev/null +++ b/src/firehose/firehose_reply.defs @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2015 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include +#include + +#include "firehose_types.defs" + +subsystem firehoseReply 11700; + +serverprefix firehose_client_; +userprefix firehose_send_; + +skip; // firehose_register + +simpleroutine push_reply( +RequestPort req_port : mach_port_move_send_once_t; +in rtc : kern_return_t; +in push_reply : firehose_push_reply_t +); + +simpleroutine push_notify_async( +RequestPort comm_port : mach_port_t; +in push_reply : firehose_push_reply_t; +WaitTime timeout : natural_t +); diff --git a/src/firehose/firehose_server.c b/src/firehose/firehose_server.c new file mode 100644 index 000000000..a6be2fab7 --- /dev/null +++ b/src/firehose/firehose_server.c @@ -0,0 +1,1137 @@ +/* + * Copyright (c) 2015 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include "internal.h" +#include "firehoseServer.h" // MiG +#include "firehose_reply.h" // MiG + +#if __has_feature(c_static_assert) +_Static_assert(offsetof(struct firehose_client_s, fc_mem_sent_flushed_pos) + % 8 == 0, "Make sure atomic fields are properly aligned"); +#endif + +static struct firehose_server_s { + mach_port_t fs_bootstrap_port; + dispatch_mach_t fs_mach_channel; + dispatch_queue_t fs_ipc_queue; + dispatch_queue_t fs_snapshot_gate_queue; + dispatch_queue_t fs_io_drain_queue; + dispatch_queue_t fs_mem_drain_queue; + firehose_handler_t fs_handler; + + firehose_snapshot_t fs_snapshot; + + int fs_kernel_fd; + firehose_client_t fs_kernel_client; + + TAILQ_HEAD(, firehose_client_s) fs_clients; +} server_config = { + .fs_clients = TAILQ_HEAD_INITIALIZER(server_config.fs_clients), + .fs_kernel_fd = -1, +}; + +#pragma mark - +#pragma mark firehose client state machine + +static void firehose_server_demux(firehose_client_t fc, + mach_msg_header_t *msg_hdr); +static void firehose_client_cancel(firehose_client_t fc); +static void firehose_client_snapshot_finish(firehose_client_t fc, + firehose_snapshot_t snapshot, bool for_io); + +static void +firehose_client_notify(firehose_client_t fc, mach_port_t reply_port) +{ + firehose_push_reply_t push_reply = { + .fpr_mem_flushed_pos = os_atomic_load2o(fc, fc_mem_flushed_pos,relaxed), + .fpr_io_flushed_pos = os_atomic_load2o(fc, fc_io_flushed_pos, relaxed), + }; + kern_return_t kr; + + firehose_atomic_max2o(fc, fc_mem_sent_flushed_pos, + push_reply.fpr_mem_flushed_pos, relaxed); + firehose_atomic_max2o(fc, fc_io_sent_flushed_pos, + push_reply.fpr_io_flushed_pos, relaxed); + + if (fc->fc_is_kernel) { + if (ioctl(server_config.fs_kernel_fd, LOGFLUSHED, &push_reply) < 0) { + dispatch_assume_zero(errno); + } + } else { + if (reply_port == fc->fc_sendp) { + kr = firehose_send_push_notify_async(reply_port, push_reply, 0); + } else { + kr = firehose_send_push_reply(reply_port, KERN_SUCCESS, push_reply); + } + if (kr != MACH_SEND_INVALID_DEST) { + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + } + } +} + +OS_ALWAYS_INLINE +static inline uint16_t +firehose_client_acquire_head(firehose_buffer_t fb, bool for_io) +{ + uint16_t head; + if (for_io) { + head = os_atomic_load2o(&fb->fb_header, fbh_ring_io_head, acquire); + } else { + head = os_atomic_load2o(&fb->fb_header, fbh_ring_mem_head, acquire); + } + return head; +} + +OS_ALWAYS_INLINE +static inline void +firehose_client_push_async_merge(firehose_client_t fc, pthread_priority_t pp, + bool for_io) +{ + if (for_io) { + _dispatch_source_merge_data(fc->fc_io_source, pp, 1); + } else { + _dispatch_source_merge_data(fc->fc_mem_source, pp, 1); + } +} + +OS_NOINLINE OS_COLD +static void +firehose_client_mark_corrupted(firehose_client_t fc, mach_port_t reply_port) +{ + // this client is really confused, do *not* answer to asyncs anymore + fc->fc_memory_corrupted = true; + fc->fc_use_notifs = false; + + // XXX: do not cancel the data sources or a corrupted client could + // prevent snapshots from being taken if unlucky with ordering + + if (reply_port) { + kern_return_t kr = firehose_send_push_reply(reply_port, 0, + FIREHOSE_PUSH_REPLY_CORRUPTED); + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + } +} + +OS_ALWAYS_INLINE +static inline void +firehose_client_snapshot_mark_done(firehose_client_t fc, + firehose_snapshot_t snapshot, bool for_io) +{ + if (for_io) { + fc->fc_needs_io_snapshot = false; + } else { + fc->fc_needs_mem_snapshot = false; + } + dispatch_group_leave(snapshot->fs_group); +} + +#define DRAIN_BATCH_SIZE 4 +#define FIREHOSE_DRAIN_FOR_IO 0x1 +#define FIREHOSE_DRAIN_POLL 0x2 + +OS_NOINLINE +static void +firehose_client_drain(firehose_client_t fc, mach_port_t port, uint32_t flags) +{ + firehose_buffer_t fb = fc->fc_buffer; + firehose_buffer_chunk_t fbc; + firehose_event_t evt; + uint16_t volatile *fbh_ring; + uint16_t flushed, ref, count = 0; + uint16_t client_head, client_flushed, sent_flushed; + firehose_snapshot_t snapshot = NULL; + bool for_io = (flags & FIREHOSE_DRAIN_FOR_IO); + + if (for_io) { + evt = FIREHOSE_EVENT_IO_BUFFER_RECEIVED; + _Static_assert(FIREHOSE_EVENT_IO_BUFFER_RECEIVED == + FIREHOSE_SNAPSHOT_EVENT_IO_BUFFER, ""); + fbh_ring = fb->fb_header.fbh_io_ring; + sent_flushed = (uint16_t)fc->fc_io_sent_flushed_pos; + flushed = (uint16_t)fc->fc_io_flushed_pos; + if (fc->fc_needs_io_snapshot) { + snapshot = server_config.fs_snapshot; + } + } else { + evt = FIREHOSE_EVENT_MEM_BUFFER_RECEIVED; + _Static_assert(FIREHOSE_EVENT_MEM_BUFFER_RECEIVED == + FIREHOSE_SNAPSHOT_EVENT_MEM_BUFFER, ""); + fbh_ring = fb->fb_header.fbh_mem_ring; + sent_flushed = (uint16_t)fc->fc_mem_sent_flushed_pos; + flushed = (uint16_t)fc->fc_mem_flushed_pos; + if (fc->fc_needs_mem_snapshot) { + snapshot = server_config.fs_snapshot; + } + } + + if (slowpath(fc->fc_memory_corrupted)) { + goto corrupt; + } + + client_head = flushed; + do { + if ((uint16_t)(flushed + count) == client_head) { + client_head = firehose_client_acquire_head(fb, for_io); + if ((uint16_t)(flushed + count) == client_head) { + break; + } + if ((uint16_t)(client_head - sent_flushed) >= + FIREHOSE_BUFFER_CHUNK_COUNT) { + goto corrupt; + } + } + + // see firehose_buffer_ring_enqueue + do { + ref = (flushed + count) & FIREHOSE_RING_POS_IDX_MASK; + ref = os_atomic_load(&fbh_ring[ref], relaxed); + ref &= FIREHOSE_RING_POS_IDX_MASK; + } while (fc->fc_is_kernel && !ref); + count++; + if (!ref) { + _dispatch_debug("Ignoring invalid page reference in ring: %d", ref); + continue; + } + + fbc = firehose_buffer_ref_to_chunk(fb, ref); + server_config.fs_handler(fc, evt, fbc); + if (slowpath(snapshot)) { + snapshot->handler(fc, evt, fbc); + } + // clients not using notifications (single threaded) always drain fully + // because they use all their limit, always + } while (!fc->fc_use_notifs || count < DRAIN_BATCH_SIZE || snapshot); + + if (count) { + // we don't load the full fbh_ring_tail because that is a 64bit quantity + // and we only need 16bits from it. and on 32bit arm, there's no way to + // perform an atomic load of a 64bit quantity on read-only memory. + if (for_io) { + os_atomic_add2o(fc, fc_io_flushed_pos, count, relaxed); + client_flushed = os_atomic_load2o(&fb->fb_header, + fbh_ring_tail.frp_io_flushed, relaxed); + } else { + os_atomic_add2o(fc, fc_mem_flushed_pos, count, relaxed); + client_flushed = os_atomic_load2o(&fb->fb_header, + fbh_ring_tail.frp_mem_flushed, relaxed); + } + if (fc->fc_is_kernel) { + // will fire firehose_client_notify() because port is MACH_PORT_DEAD + port = fc->fc_sendp; + } else if (!port && client_flushed == sent_flushed && fc->fc_use_notifs) { + port = fc->fc_sendp; + } + } + + if (slowpath(snapshot)) { + firehose_client_snapshot_finish(fc, snapshot, for_io); + firehose_client_snapshot_mark_done(fc, snapshot, for_io); + } + if (port) { + firehose_client_notify(fc, port); + } + if (fc->fc_is_kernel) { + if (!(flags & FIREHOSE_DRAIN_POLL)) { + // see firehose_client_kernel_source_handle_event + dispatch_resume(fc->fc_kernel_source); + } + } else { + if (fc->fc_use_notifs && count >= DRAIN_BATCH_SIZE) { + // if we hit the drain batch size, the client probably logs a lot + // and there's more to drain, so optimistically schedule draining + // again this is cheap since the queue is hot, and is fair for other + // clients + firehose_client_push_async_merge(fc, 0, for_io); + } + if (count && server_config.fs_kernel_client) { + // the kernel is special because it can drop messages, so if we're + // draining, poll the kernel each time while we're bound to a thread + firehose_client_drain(server_config.fs_kernel_client, + MACH_PORT_NULL, flags | FIREHOSE_DRAIN_POLL); + } + } + return; + +corrupt: + if (snapshot) { + firehose_client_snapshot_mark_done(fc, snapshot, for_io); + } + firehose_client_mark_corrupted(fc, port); + // from now on all IO/mem drains depending on `for_io` will be no-op + // (needs__snapshot: false, memory_corrupted: true). we can safely + // silence the corresponding source of drain wake-ups. + if (!fc->fc_is_kernel) { + dispatch_source_cancel(for_io ? fc->fc_io_source : fc->fc_mem_source); + } +} + +static void +firehose_client_drain_io_async(void *ctx) +{ + firehose_client_drain(ctx, MACH_PORT_NULL, FIREHOSE_DRAIN_FOR_IO); +} + +static void +firehose_client_drain_mem_async(void *ctx) +{ + firehose_client_drain(ctx, MACH_PORT_NULL, 0); +} + +OS_NOINLINE +static void +firehose_client_finalize(firehose_client_t fc OS_OBJECT_CONSUMED) +{ + firehose_snapshot_t snapshot = server_config.fs_snapshot; + firehose_buffer_t fb = fc->fc_buffer; + + dispatch_assert_queue(server_config.fs_io_drain_queue); + + // if a client dies between phase 1 and 2 of starting the snapshot + // (see firehose_snapshot_start)) there's no handler to call, but the + // dispatch group has to be adjusted for this client going away. + if (fc->fc_needs_io_snapshot) { + dispatch_group_leave(snapshot->fs_group); + fc->fc_needs_io_snapshot = false; + } + if (fc->fc_needs_mem_snapshot) { + dispatch_group_leave(snapshot->fs_group); + fc->fc_needs_mem_snapshot = false; + } + if (fc->fc_memory_corrupted) { + server_config.fs_handler(fc, FIREHOSE_EVENT_CLIENT_CORRUPTED, + &fb->fb_chunks[0]); + } + server_config.fs_handler(fc, FIREHOSE_EVENT_CLIENT_DIED, NULL); + + TAILQ_REMOVE(&server_config.fs_clients, fc, fc_entry); + fc->fc_entry.tqe_next = DISPATCH_OBJECT_LISTLESS; + fc->fc_entry.tqe_prev = DISPATCH_OBJECT_LISTLESS; + _os_object_release(&fc->fc_as_os_object); +} + +OS_NOINLINE +static void +firehose_client_handle_death(void *ctxt) +{ + firehose_client_t fc = ctxt; + firehose_buffer_t fb = fc->fc_buffer; + firehose_buffer_header_t fbh = &fb->fb_header; + uint64_t mem_bitmap = 0, bitmap; + + if (fc->fc_memory_corrupted) { + return firehose_client_finalize(fc); + } + + dispatch_assert_queue(server_config.fs_io_drain_queue); + + // acquire to match release barriers from threads that died + os_atomic_thread_fence(acquire); + + bitmap = fbh->fbh_bank.fbb_bitmap & ~1ULL; + for (int for_io = 0; for_io < 2; for_io++) { + uint16_t volatile *fbh_ring; + uint16_t tail, flushed; + + if (for_io) { + fbh_ring = fbh->fbh_io_ring; + tail = fbh->fbh_ring_tail.frp_io_tail; + flushed = (uint16_t)fc->fc_io_flushed_pos; + } else { + fbh_ring = fbh->fbh_mem_ring; + tail = fbh->fbh_ring_tail.frp_mem_tail; + flushed = (uint16_t)fc->fc_mem_flushed_pos; + } + if ((uint16_t)(flushed - tail) >= FIREHOSE_BUFFER_CHUNK_COUNT) { + fc->fc_memory_corrupted = true; + return firehose_client_finalize(fc); + } + + // remove the pages that we flushed already from the bitmap + for (; tail != flushed; tail++) { + uint16_t ring_pos = tail & FIREHOSE_RING_POS_IDX_MASK; + uint16_t ref = fbh_ring[ring_pos] & FIREHOSE_RING_POS_IDX_MASK; + + bitmap &= ~(1ULL << ref); + } + } + + firehose_snapshot_t snapshot = server_config.fs_snapshot; + + // Then look at all the allocated pages not seen in the ring + while (bitmap) { + uint16_t ref = firehose_bitmap_first_set(bitmap); + firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); + uint16_t fbc_length = fbc->fbc_pos.fbc_next_entry_offs; + + bitmap &= ~(1ULL << ref); + if (fbc->fbc_start + fbc_length <= fbc->fbc_data) { + // this page has its "recycle-requeue" done, but hasn't gone + // through "recycle-reuse", or it has no data, ditch it + continue; + } + if (!((firehose_tracepoint_t)fbc->fbc_data)->ft_length) { + // this thing has data, but the first tracepoint is unreadable + // so also just ditch it + continue; + } + if (!fbc->fbc_pos.fbc_flag_io) { + mem_bitmap |= 1ULL << ref; + continue; + } + server_config.fs_handler(fc, FIREHOSE_EVENT_IO_BUFFER_RECEIVED, fbc); + if (fc->fc_needs_io_snapshot && snapshot) { + snapshot->handler(fc, FIREHOSE_SNAPSHOT_EVENT_IO_BUFFER, fbc); + } + } + + if (!mem_bitmap) { + return firehose_client_finalize(fc); + } + + dispatch_async(server_config.fs_mem_drain_queue, ^{ + uint64_t mem_bitmap_copy = mem_bitmap; + + while (mem_bitmap_copy) { + uint16_t ref = firehose_bitmap_first_set(mem_bitmap_copy); + firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); + + mem_bitmap_copy &= ~(1ULL << ref); + server_config.fs_handler(fc, FIREHOSE_EVENT_MEM_BUFFER_RECEIVED, fbc); + if (fc->fc_needs_mem_snapshot && snapshot) { + snapshot->handler(fc, FIREHOSE_SNAPSHOT_EVENT_MEM_BUFFER, fbc); + } + } + + dispatch_async_f(server_config.fs_io_drain_queue, fc, + (dispatch_function_t)firehose_client_finalize); + }); +} + +static void +firehose_client_handle_mach_event(void *ctx, dispatch_mach_reason_t reason, + dispatch_mach_msg_t dmsg, mach_error_t error OS_UNUSED) +{ + mach_msg_header_t *msg_hdr; + firehose_client_t fc = ctx; + mach_port_t oldsendp, oldrecvp; + + if (dmsg) { + msg_hdr = dispatch_mach_msg_get_msg(dmsg, NULL); + oldsendp = msg_hdr->msgh_remote_port; + oldrecvp = msg_hdr->msgh_local_port; + } + + switch (reason) { + case DISPATCH_MACH_MESSAGE_RECEIVED: + if (msg_hdr->msgh_id == MACH_NOTIFY_NO_SENDERS) { + _dispatch_debug("FIREHOSE NO_SENDERS (unique_pid: 0x%llx)", + firehose_client_get_unique_pid(fc, NULL)); + dispatch_mach_cancel(fc->fc_mach_channel); + } else { + firehose_server_demux(fc, msg_hdr); + } + break; + + case DISPATCH_MACH_DISCONNECTED: + if (oldsendp) { + if (slowpath(oldsendp != fc->fc_sendp)) { + DISPATCH_INTERNAL_CRASH(oldsendp, + "disconnect event about unknown send-right"); + } + firehose_mach_port_send_release(fc->fc_sendp); + fc->fc_sendp = MACH_PORT_NULL; + } + if (oldrecvp) { + if (slowpath(oldrecvp != fc->fc_recvp)) { + DISPATCH_INTERNAL_CRASH(oldrecvp, + "disconnect event about unknown receive-right"); + } + firehose_mach_port_recv_dispose(fc->fc_recvp, fc); + fc->fc_recvp = MACH_PORT_NULL; + } + if (fc->fc_recvp == MACH_PORT_NULL && fc->fc_sendp == MACH_PORT_NULL) { + firehose_client_cancel(fc); + } + break; + } +} + +#if !TARGET_OS_SIMULATOR +static void +firehose_client_kernel_source_handle_event(void *ctxt) +{ + firehose_client_t fc = ctxt; + + // resumed in firehose_client_drain for both memory and I/O + dispatch_suspend(fc->fc_kernel_source); + dispatch_suspend(fc->fc_kernel_source); + dispatch_async_f(server_config.fs_mem_drain_queue, + fc, firehose_client_drain_mem_async); + dispatch_async_f(server_config.fs_io_drain_queue, + fc, firehose_client_drain_io_async); +} +#endif + +static inline void +firehose_client_resume(firehose_client_t fc, + const struct firehose_client_connected_info_s *fcci) +{ + dispatch_assert_queue(server_config.fs_io_drain_queue); + TAILQ_INSERT_TAIL(&server_config.fs_clients, fc, fc_entry); + server_config.fs_handler(fc, FIREHOSE_EVENT_CLIENT_CONNECTED, (void *)fcci); + if (fc->fc_is_kernel) { + dispatch_activate(fc->fc_kernel_source); + } else { + dispatch_mach_connect(fc->fc_mach_channel, + fc->fc_recvp, fc->fc_sendp, NULL); + dispatch_activate(fc->fc_io_source); + dispatch_activate(fc->fc_mem_source); + } +} + +static void +firehose_client_cancel(firehose_client_t fc) +{ + dispatch_mach_t dm; + dispatch_block_t block; + + _dispatch_debug("client died (unique_pid: 0x%llx", + firehose_client_get_unique_pid(fc, NULL)); + + dm = fc->fc_mach_channel; + fc->fc_mach_channel = NULL; + dispatch_release(dm); + + fc->fc_use_notifs = false; + dispatch_source_cancel(fc->fc_io_source); + dispatch_source_cancel(fc->fc_mem_source); + + block = dispatch_block_create(DISPATCH_BLOCK_DETACHED, ^{ + dispatch_async_f(server_config.fs_io_drain_queue, fc, + firehose_client_handle_death); + }); + dispatch_async(server_config.fs_mem_drain_queue, block); + _Block_release(block); +} + +static firehose_client_t +_firehose_client_create(firehose_buffer_t fb) +{ + firehose_client_t fc; + + fc = (firehose_client_t)_os_object_alloc_realized(FIREHOSE_CLIENT_CLASS, + sizeof(struct firehose_client_s)); + fc->fc_buffer = fb; + fc->fc_mem_flushed_pos = fb->fb_header.fbh_bank.fbb_mem_flushed; + fc->fc_mem_sent_flushed_pos = fc->fc_mem_flushed_pos; + fc->fc_io_flushed_pos = fb->fb_header.fbh_bank.fbb_io_flushed; + fc->fc_io_sent_flushed_pos = fc->fc_io_flushed_pos; + return fc; +} + +static firehose_client_t +firehose_client_create(firehose_buffer_t fb, + mach_port_t comm_recvp, mach_port_t comm_sendp) +{ + uint64_t unique_pid = fb->fb_header.fbh_uniquepid; + firehose_client_t fc = _firehose_client_create(fb); + dispatch_mach_t dm; + dispatch_source_t ds; + + ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_DATA_OR, 0, 0, + server_config.fs_mem_drain_queue); + _os_object_retain_internal_inline(&fc->fc_as_os_object); + dispatch_set_context(ds, fc); + dispatch_set_finalizer_f(ds, + (dispatch_function_t)_os_object_release_internal); + dispatch_source_set_event_handler_f(ds, firehose_client_drain_mem_async); + fc->fc_mem_source = ds; + + ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_DATA_OR, 0, 0, + server_config.fs_io_drain_queue); + _os_object_retain_internal_inline(&fc->fc_as_os_object); + dispatch_set_context(ds, fc); + dispatch_set_finalizer_f(ds, + (dispatch_function_t)_os_object_release_internal); + dispatch_source_set_event_handler_f(ds, firehose_client_drain_io_async); + fc->fc_io_source = ds; + + _dispatch_debug("FIREHOSE_REGISTER (unique_pid: 0x%llx)", unique_pid); + fc->fc_recvp = comm_recvp; + fc->fc_sendp = comm_sendp; + firehose_mach_port_guard(comm_recvp, true, fc); + dm = dispatch_mach_create_f("com.apple.firehose.peer", + server_config.fs_ipc_queue, + fc, firehose_client_handle_mach_event); + fc->fc_mach_channel = dm; + return fc; +} + +static void +firehose_kernel_client_create(void) +{ +#if !TARGET_OS_SIMULATOR + struct firehose_server_s *fs = &server_config; + firehose_buffer_map_info_t fb_map; + firehose_client_t fc; + dispatch_source_t ds; + int fd; + + while ((fd = open("/dev/oslog", O_RDWR)) < 0) { + if (errno == EINTR) { + continue; + } + if (errno == ENOENT) { + return; + } + DISPATCH_INTERNAL_CRASH(errno, "Unable to open /dev/oslog"); + } + + while (ioctl(fd, LOGBUFFERMAP, &fb_map) < 0) { + if (errno == EINTR) { + continue; + } + DISPATCH_INTERNAL_CRASH(errno, "Unable to map kernel buffer"); + } + if (fb_map.fbmi_size != + FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_BUFFER_CHUNK_SIZE) { + DISPATCH_INTERNAL_CRASH(fb_map.fbmi_size, "Unexpected kernel buffer size"); + } + + fc = _firehose_client_create((firehose_buffer_t)(uintptr_t)fb_map.fbmi_addr); + fc->fc_is_kernel = true; + ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, (uintptr_t)fd, 0, + fs->fs_ipc_queue); + dispatch_set_context(ds, fc); + dispatch_source_set_event_handler_f(ds, + firehose_client_kernel_source_handle_event); + fc->fc_kernel_source = ds; + fc->fc_use_notifs = true; + fc->fc_sendp = MACH_PORT_DEAD; // causes drain() to call notify + + fs->fs_kernel_fd = fd; + fs->fs_kernel_client = fc; +#endif +} + +void +_firehose_client_dispose(firehose_client_t fc) +{ + vm_deallocate(mach_task_self(), (vm_address_t)fc->fc_buffer, + sizeof(*fc->fc_buffer)); + fc->fc_buffer = NULL; + server_config.fs_handler(fc, FIREHOSE_EVENT_CLIENT_FINALIZE, NULL); +} + +void +_firehose_client_xref_dispose(firehose_client_t fc) +{ + _dispatch_debug("Cleaning up client info for unique_pid 0x%llx", + firehose_client_get_unique_pid(fc, NULL)); + + dispatch_release(fc->fc_io_source); + fc->fc_io_source = NULL; + + dispatch_release(fc->fc_mem_source); + fc->fc_mem_source = NULL; +} + +uint64_t +firehose_client_get_unique_pid(firehose_client_t fc, pid_t *pid_out) +{ + firehose_buffer_header_t fbh = &fc->fc_buffer->fb_header; + if (fc->fc_is_kernel) { + if (pid_out) *pid_out = 0; + return 0; + } + if (pid_out) *pid_out = fbh->fbh_pid ?: ~(pid_t)0; + return fbh->fbh_uniquepid ?: ~0ull; +} + +void * +firehose_client_get_metadata_buffer(firehose_client_t client, size_t *size) +{ + firehose_buffer_header_t fbh = &client->fc_buffer->fb_header; + + *size = FIREHOSE_BUFFER_LIBTRACE_HEADER_SIZE; + return (void *)((uintptr_t)(fbh + 1) - *size); +} + +void * +firehose_client_get_context(firehose_client_t fc) +{ + return os_atomic_load2o(fc, fc_ctxt, relaxed); +} + +void * +firehose_client_set_context(firehose_client_t fc, void *ctxt) +{ + return os_atomic_xchg2o(fc, fc_ctxt, ctxt, relaxed); +} + +#pragma mark - +#pragma mark firehose server + +/* + * The current_message context stores the client info for the current message + * being handled. The only reason this works is because currently the message + * processing is serial. If that changes, this would not work. + */ +static firehose_client_t cur_client_info; + +static void +firehose_server_handle_mach_event(void *ctx OS_UNUSED, + dispatch_mach_reason_t reason, dispatch_mach_msg_t dmsg, + mach_error_t error OS_UNUSED) +{ + mach_msg_header_t *msg_hdr = NULL; + + if (reason == DISPATCH_MACH_MESSAGE_RECEIVED) { + msg_hdr = dispatch_mach_msg_get_msg(dmsg, NULL); + /* TODO: Assert this should be a register message */ + firehose_server_demux(NULL, msg_hdr); + } +} + +void +firehose_server_init(mach_port_t comm_port, firehose_handler_t handler) +{ + struct firehose_server_s *fs = &server_config; + dispatch_queue_attr_t attr; + dispatch_mach_t dm; + + // just reference the string so that it's captured + (void)os_atomic_load(&__libfirehose_serverVersionString[0], relaxed); + + attr = dispatch_queue_attr_make_with_qos_class(DISPATCH_QUEUE_SERIAL, + QOS_CLASS_USER_INITIATED, 0); + fs->fs_ipc_queue = dispatch_queue_create_with_target( + "com.apple.firehose.ipc", attr, NULL); + fs->fs_snapshot_gate_queue = dispatch_queue_create_with_target( + "com.apple.firehose.snapshot-gate", DISPATCH_QUEUE_SERIAL, NULL); + fs->fs_io_drain_queue = dispatch_queue_create_with_target( + "com.apple.firehose.drain-io", DISPATCH_QUEUE_SERIAL, NULL); + fs->fs_mem_drain_queue = dispatch_queue_create_with_target( + "com.apple.firehose.drain-mem", DISPATCH_QUEUE_SERIAL, NULL); + + dm = dispatch_mach_create_f("com.apple.firehose.listener", + fs->fs_ipc_queue, NULL, firehose_server_handle_mach_event); + fs->fs_bootstrap_port = comm_port; + fs->fs_mach_channel = dm; + fs->fs_handler = _Block_copy(handler); + firehose_kernel_client_create(); +} + +void +firehose_server_assert_spi_version(uint32_t spi_version) +{ + if (spi_version != OS_FIREHOSE_SPI_VERSION) { + DISPATCH_CLIENT_CRASH(spi_version, "firehose server version mismatch (" + OS_STRINGIFY(OS_FIREHOSE_SPI_VERSION) ")"); + } + if (_firehose_spi_version != OS_FIREHOSE_SPI_VERSION) { + DISPATCH_CLIENT_CRASH(_firehose_spi_version, + "firehose libdispatch version mismatch (" + OS_STRINGIFY(OS_FIREHOSE_SPI_VERSION) ")"); + + } +} + +void +firehose_server_resume(void) +{ + struct firehose_server_s *fs = &server_config; + + if (fs->fs_kernel_client) { + dispatch_async(fs->fs_io_drain_queue, ^{ + struct firehose_client_connected_info_s fcci = { + .fcci_version = FIREHOSE_CLIENT_CONNECTED_INFO_VERSION, + }; + firehose_client_resume(fs->fs_kernel_client, &fcci); + }); + } + dispatch_mach_connect(fs->fs_mach_channel, fs->fs_bootstrap_port, + MACH_PORT_NULL, NULL); +} + +#pragma mark - +#pragma mark firehose snapshot and peeking + +void +firehose_client_metadata_stream_peek(firehose_client_t fc, + firehose_event_t context, bool (^peek_should_start)(void), + bool (^peek)(firehose_buffer_chunk_t fbc)) +{ + if (context != FIREHOSE_EVENT_MEM_BUFFER_RECEIVED) { + return dispatch_sync(server_config.fs_mem_drain_queue, ^{ + firehose_client_metadata_stream_peek(fc, + FIREHOSE_EVENT_MEM_BUFFER_RECEIVED, peek_should_start, peek); + }); + } + + if (peek_should_start && !peek_should_start()) { + return; + } + + firehose_buffer_t fb = fc->fc_buffer; + firehose_buffer_header_t fbh = &fb->fb_header; + uint64_t bitmap = fbh->fbh_bank.fbb_metadata_bitmap; + + while (bitmap) { + uint16_t ref = firehose_bitmap_first_set(bitmap); + firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); + uint16_t fbc_length = fbc->fbc_pos.fbc_next_entry_offs; + + bitmap &= ~(1ULL << ref); + if (fbc->fbc_start + fbc_length <= fbc->fbc_data) { + // this page has its "recycle-requeue" done, but hasn't gone + // through "recycle-reuse", or it has no data, ditch it + continue; + } + if (!((firehose_tracepoint_t)fbc->fbc_data)->ft_length) { + // this thing has data, but the first tracepoint is unreadable + // so also just ditch it + continue; + } + if (fbc->fbc_pos.fbc_stream != firehose_stream_metadata) { + continue; + } + if (!peek(fbc)) { + break; + } + } +} + +OS_NOINLINE OS_COLD +static void +firehose_client_snapshot_finish(firehose_client_t fc, + firehose_snapshot_t snapshot, bool for_io) +{ + firehose_buffer_t fb = fc->fc_buffer; + firehose_buffer_header_t fbh = &fb->fb_header; + firehose_snapshot_event_t evt; + uint16_t volatile *fbh_ring; + uint16_t tail, flushed; + uint64_t bitmap; + + bitmap = ~1ULL; + + if (for_io) { + fbh_ring = fbh->fbh_io_ring; + tail = fbh->fbh_ring_tail.frp_io_tail; + flushed = (uint16_t)fc->fc_io_flushed_pos; + evt = FIREHOSE_SNAPSHOT_EVENT_IO_BUFFER; + } else { + fbh_ring = fbh->fbh_mem_ring; + tail = fbh->fbh_ring_tail.frp_mem_tail; + flushed = (uint16_t)fc->fc_mem_flushed_pos; + evt = FIREHOSE_SNAPSHOT_EVENT_MEM_BUFFER; + } + if ((uint16_t)(flushed - tail) >= FIREHOSE_BUFFER_CHUNK_COUNT) { + fc->fc_memory_corrupted = true; + return; + } + + // remove the pages that we flushed already from the bitmap + for (; tail != flushed; tail++) { + uint16_t idx = tail & FIREHOSE_RING_POS_IDX_MASK; + uint16_t ref = fbh_ring[idx] & FIREHOSE_RING_POS_IDX_MASK; + + bitmap &= ~(1ULL << ref); + } + + // Remove pages that are free by AND-ing with the allocating bitmap. + // The load of fbb_bitmap may not be atomic, but it's ok because bits + // being flipped are pages we don't care about snapshotting. The worst thing + // that can happen is that we go peek at an unmapped page and we fault it in + bitmap &= fbh->fbh_bank.fbb_bitmap; + + // Then look at all the allocated pages not seen in the ring + while (bitmap) { + uint16_t ref = firehose_bitmap_first_set(bitmap); + firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); + uint16_t fbc_length = fbc->fbc_pos.fbc_next_entry_offs; + + bitmap &= ~(1ULL << ref); + if (fbc->fbc_start + fbc_length <= fbc->fbc_data) { + // this page has its "recycle-requeue" done, but hasn't gone + // through "recycle-reuse", or it has no data, ditch it + continue; + } + if (!((firehose_tracepoint_t)fbc->fbc_data)->ft_length) { + // this thing has data, but the first tracepoint is unreadable + // so also just ditch it + continue; + } + if (fbc->fbc_pos.fbc_flag_io != for_io) { + continue; + } + snapshot->handler(fc, evt, fbc); + } +} + +static void +firehose_snapshot_start(void *ctxt) +{ + firehose_snapshot_t snapshot = ctxt; + firehose_client_t fci; + long n = 0; + + // 0. we need to be on the IO queue so that client connection and/or death + // cannot happen concurrently + dispatch_assert_queue(server_config.fs_io_drain_queue); + + // 1. mark all the clients participating in the current snapshot + // and enter the group for each bit set + TAILQ_FOREACH(fci, &server_config.fs_clients, fc_entry) { + if (fci->fc_is_kernel) { +#if TARGET_OS_SIMULATOR + continue; +#endif + } + if (slowpath(fci->fc_memory_corrupted)) { + continue; + } + fci->fc_needs_io_snapshot = true; + fci->fc_needs_mem_snapshot = true; + n += 2; + } + if (n) { + // cheating: equivalent to dispatch_group_enter() n times + // without the acquire barriers that we don't need + os_atomic_add2o(snapshot->fs_group, dg_value, n, relaxed); + } + + dispatch_async(server_config.fs_mem_drain_queue, ^{ + // 2. make fs_snapshot visible, this is what triggers the snapshot + // logic from _drain() or handle_death(). until fs_snapshot is + // published, the bits set above are mostly ignored + server_config.fs_snapshot = snapshot; + + snapshot->handler(NULL, FIREHOSE_SNAPSHOT_EVENT_MEM_START, NULL); + + dispatch_async(server_config.fs_io_drain_queue, ^{ + firehose_client_t fcj; + + snapshot->handler(NULL, FIREHOSE_SNAPSHOT_EVENT_IO_START, NULL); + + // match group_enter from firehose_snapshot() after MEM+IO_START + dispatch_group_leave(snapshot->fs_group); + + // 3. tickle all the clients. the list of clients may have changed + // since step 1, but worry not - new clients don't have + // fc_needs_*_snapshot set so drain is harmless; clients that + // were removed from the list have already left the group + // (see firehose_client_finalize()) + TAILQ_FOREACH(fcj, &server_config.fs_clients, fc_entry) { + if (fcj->fc_is_kernel) { +#if !TARGET_OS_SIMULATOR + firehose_client_kernel_source_handle_event(fcj); +#endif + } else { + dispatch_source_merge_data(fcj->fc_io_source, 1); + dispatch_source_merge_data(fcj->fc_mem_source, 1); + } + } + }); + }); +} + +static void +firehose_snapshot_finish(void *ctxt) +{ + firehose_snapshot_t fs = ctxt; + + fs->handler(NULL, FIREHOSE_SNAPSHOT_EVENT_COMPLETE, NULL); + server_config.fs_snapshot = NULL; + + dispatch_release(fs->fs_group); + Block_release(fs->handler); + free(fs); + + // resume the snapshot gate queue to maybe handle the next snapshot + dispatch_resume(server_config.fs_snapshot_gate_queue); +} + +static void +firehose_snapshot_gate(void *ctxt) +{ + // prevent other snapshots from running until done + dispatch_suspend(server_config.fs_snapshot_gate_queue); + dispatch_async_f(server_config.fs_io_drain_queue, ctxt, + firehose_snapshot_start); +} + +void +firehose_snapshot(firehose_snapshot_handler_t handler) +{ + firehose_snapshot_t snapshot = malloc(sizeof(struct firehose_snapshot_s)); + + snapshot->handler = Block_copy(handler); + snapshot->fs_group = dispatch_group_create(); + + // keep the group entered until IO_START and MEM_START have been sent + // See firehose_snapshot_start() + dispatch_group_enter(snapshot->fs_group); + dispatch_group_notify_f(snapshot->fs_group, server_config.fs_io_drain_queue, + snapshot, firehose_snapshot_finish); + + dispatch_async_f(server_config.fs_snapshot_gate_queue, snapshot, + firehose_snapshot_gate); +} + +#pragma mark - +#pragma mark MiG handler routines + +kern_return_t +firehose_server_register(mach_port_t server_port OS_UNUSED, + mach_port_t mem_port, mach_vm_size_t mem_size, + mach_port_t comm_recvp, mach_port_t comm_sendp, + mach_port_t extra_info_port, mach_vm_size_t extra_info_size) +{ + mach_vm_address_t base_addr = 0; + firehose_client_t fc = NULL; + kern_return_t kr; + struct firehose_client_connected_info_s fcci = { + .fcci_version = FIREHOSE_CLIENT_CONNECTED_INFO_VERSION, + }; + + if (mem_size != sizeof(union firehose_buffer_u)) { + return KERN_INVALID_VALUE; + } + + /* + * Request a MACH_NOTIFY_NO_SENDERS notification for recvp. That should + * indicate the client going away. + */ + mach_port_t previous = MACH_PORT_NULL; + kr = mach_port_request_notification(mach_task_self(), comm_recvp, + MACH_NOTIFY_NO_SENDERS, 0, comm_recvp, + MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous); + DISPATCH_VERIFY_MIG(kr); + if (dispatch_assume_zero(kr)) { + return KERN_FAILURE; + } + dispatch_assert(previous == MACH_PORT_NULL); + + /* Map the memory handle into the server address space */ + kr = mach_vm_map(mach_task_self(), &base_addr, mem_size, 0, + VM_FLAGS_ANYWHERE, mem_port, 0, FALSE, + VM_PROT_READ, VM_PROT_READ, VM_INHERIT_NONE); + DISPATCH_VERIFY_MIG(kr); + if (dispatch_assume_zero(kr)) { + return KERN_NO_SPACE; + } + + if (extra_info_port && extra_info_size) { + mach_vm_address_t addr = 0; + kr = mach_vm_map(mach_task_self(), &addr, extra_info_size, 0, + VM_FLAGS_ANYWHERE, extra_info_port, 0, FALSE, + VM_PROT_READ, VM_PROT_READ, VM_INHERIT_NONE); + if (dispatch_assume_zero(kr)) { + mach_vm_deallocate(mach_task_self(), base_addr, mem_size); + return KERN_NO_SPACE; + } + fcci.fcci_data = (void *)(uintptr_t)addr; + fcci.fcci_size = (size_t)extra_info_size; + } + + fc = firehose_client_create((firehose_buffer_t)base_addr, + comm_recvp, comm_sendp); + dispatch_async(server_config.fs_io_drain_queue, ^{ + firehose_client_resume(fc, &fcci); + if (fcci.fcci_size) { + vm_deallocate(mach_task_self(), (vm_address_t)fcci.fcci_data, + fcci.fcci_size); + } + }); + + if (extra_info_port) firehose_mach_port_send_release(extra_info_port); + firehose_mach_port_send_release(mem_port); + return KERN_SUCCESS; +} + +kern_return_t +firehose_server_push_async(mach_port_t server_port OS_UNUSED, + qos_class_t qos, boolean_t for_io, boolean_t expects_notifs) +{ + firehose_client_t fc = cur_client_info; + pthread_priority_t pp = _pthread_qos_class_encode(qos, 0, + _PTHREAD_PRIORITY_ENFORCE_FLAG); + + _dispatch_debug("FIREHOSE_PUSH_ASYNC (unique_pid %llx)", + firehose_client_get_unique_pid(fc, NULL)); + if (!slowpath(fc->fc_memory_corrupted)) { + if (expects_notifs && !fc->fc_use_notifs) { + fc->fc_use_notifs = true; + } + firehose_client_push_async_merge(fc, pp, for_io); + } + return KERN_SUCCESS; +} + +kern_return_t +firehose_server_push(mach_port_t server_port OS_UNUSED, + mach_port_t reply_port, qos_class_t qos, boolean_t for_io, + firehose_push_reply_t *push_reply OS_UNUSED) +{ + firehose_client_t fc = cur_client_info; + dispatch_block_flags_t flags = DISPATCH_BLOCK_ENFORCE_QOS_CLASS; + dispatch_block_t block; + dispatch_queue_t q; + + _dispatch_debug("FIREHOSE_PUSH (unique_pid %llx)", + firehose_client_get_unique_pid(fc, NULL)); + + if (slowpath(fc->fc_memory_corrupted)) { + firehose_client_mark_corrupted(fc, reply_port); + return MIG_NO_REPLY; + } + + if (for_io) { + q = server_config.fs_io_drain_queue; + } else { + q = server_config.fs_mem_drain_queue; + } + + block = dispatch_block_create_with_qos_class(flags, qos, 0, ^{ + firehose_client_drain(fc, reply_port, + for_io ? FIREHOSE_DRAIN_FOR_IO : 0); + }); + dispatch_async(q, block); + _Block_release(block); + return MIG_NO_REPLY; +} + +static void +firehose_server_demux(firehose_client_t fc, mach_msg_header_t *msg_hdr) +{ + const size_t reply_size = + sizeof(union __ReplyUnion__firehose_server_firehose_subsystem); + + cur_client_info = fc; + firehose_mig_server(firehose_server, reply_size, msg_hdr); +} diff --git a/src/firehose/firehose_server_internal.h b/src/firehose/firehose_server_internal.h new file mode 100644 index 000000000..799172175 --- /dev/null +++ b/src/firehose/firehose_server_internal.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2015 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __FIREHOSE_SERVER_INTERNAL__ +#define __FIREHOSE_SERVER_INTERNAL__ + +OS_OBJECT_CLASS_DECL(firehose_client, object); +#define FIREHOSE_CLIENT_CLASS OS_OBJECT_VTABLE(firehose_client) + +typedef struct firehose_snapshot_s *firehose_snapshot_t; +struct firehose_snapshot_s { + firehose_snapshot_handler_t handler; + dispatch_group_t fs_group; +}; + +struct firehose_client_s { + union { + _OS_OBJECT_HEADER(void *os_obj_isa, os_obj_ref_cnt, os_obj_xref_cnt); + struct _os_object_s fc_as_os_object; + }; + TAILQ_ENTRY(firehose_client_s) fc_entry; + + firehose_buffer_t fc_buffer; + uint64_t volatile fc_mem_sent_flushed_pos; + uint64_t volatile fc_mem_flushed_pos; + uint64_t volatile fc_io_sent_flushed_pos; + uint64_t volatile fc_io_flushed_pos; + + void *volatile fc_ctxt; + + union { + dispatch_mach_t fc_mach_channel; + dispatch_source_t fc_kernel_source; + }; + dispatch_source_t fc_io_source; + dispatch_source_t fc_mem_source; + mach_port_t fc_recvp; + mach_port_t fc_sendp; + bool fc_use_notifs; + bool fc_memory_corrupted; + bool fc_needs_io_snapshot; + bool fc_needs_mem_snapshot; + bool fc_is_kernel; +}; + +void +_firehose_client_xref_dispose(struct firehose_client_s *fc); +void +_firehose_client_dispose(struct firehose_client_s *fc); + +extern unsigned char __libfirehose_serverVersionString[]; +extern double __libfirehose_serverVersionNumber; + +#endif // __FIREHOSE_SERVER_INTERNAL__ diff --git a/src/firehose/firehose_server_object.m b/src/firehose/firehose_server_object.m new file mode 100644 index 000000000..6965ca0f5 --- /dev/null +++ b/src/firehose/firehose_server_object.m @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2015 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include "internal.h" + +#if !USE_OBJC || _OS_OBJECT_OBJC_ARC +#error the firehose server requires the objc-runtime, no ARC +#endif + +@implementation OS_OBJECT_CLASS(firehose_client) +DISPATCH_UNAVAILABLE_INIT() ++ (void)load { } + +- (void)_xref_dispose +{ + _firehose_client_xref_dispose((struct firehose_client_s *)self); + [super _xref_dispose]; +} + +- (void)_dispose +{ + _firehose_client_dispose((struct firehose_client_s *)self); + [super _dispose]; +} + +- (NSString *)debugDescription +{ + return nil; +} +@end diff --git a/src/firehose/firehose_types.defs b/src/firehose/firehose_types.defs new file mode 100644 index 000000000..9462fd808 --- /dev/null +++ b/src/firehose/firehose_types.defs @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2015 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include +#include + +import ; +import ; + +type firehose_push_reply_t = struct [2] of uint64_t; +type qos_class_t = unsigned; diff --git a/src/init.c b/src/init.c index 0aff191f0..45cbff3bf 100644 --- a/src/init.c +++ b/src/init.c @@ -58,70 +58,110 @@ dispatch_atfork_parent(void) #pragma mark - #pragma mark dispatch_globals +DISPATCH_HIDE_SYMBOL(dispatch_assert_queue, 10.12, 10.0, 10.0, 3.0); +DISPATCH_HIDE_SYMBOL(dispatch_assert_queue_not, 10.12, 10.0, 10.0, 3.0); +DISPATCH_HIDE_SYMBOL(dispatch_queue_create_with_target, 10.12, 10.0, 10.0, 3.0); + #if DISPATCH_COCOA_COMPAT -void (*dispatch_begin_thread_4GC)(void); -void (*dispatch_end_thread_4GC)(void); void *(*_dispatch_begin_NSAutoReleasePool)(void); void (*_dispatch_end_NSAutoReleasePool)(void *); #endif -#if !DISPATCH_USE_DIRECT_TSD +#if DISPATCH_USE_THREAD_LOCAL_STORAGE +__thread struct dispatch_tsd __dispatch_tsd; +pthread_key_t __dispatch_tsd_key; +#elif !DISPATCH_USE_DIRECT_TSD pthread_key_t dispatch_queue_key; -pthread_key_t dispatch_sema4_key; +pthread_key_t dispatch_frame_key; pthread_key_t dispatch_cache_key; -pthread_key_t dispatch_io_key; -pthread_key_t dispatch_apply_key; +pthread_key_t dispatch_context_key; +pthread_key_t dispatch_pthread_root_queue_observer_hooks_key; pthread_key_t dispatch_defaultpriority_key; #if DISPATCH_INTROSPECTION pthread_key_t dispatch_introspection_key; #elif DISPATCH_PERF_MON pthread_key_t dispatch_bcounter_key; #endif -#endif // !DISPATCH_USE_DIRECT_TSD +pthread_key_t dispatch_sema4_key; +pthread_key_t dispatch_voucher_key; +pthread_key_t dispatch_deferred_items_key; +#endif // !DISPATCH_USE_DIRECT_TSD && !DISPATCH_USE_THREAD_LOCAL_STORAGE #if VOUCHER_USE_MACH_VOUCHER dispatch_once_t _voucher_task_mach_voucher_pred; mach_voucher_t _voucher_task_mach_voucher; -_voucher_atm_t _voucher_task_atm; -_voucher_activity_t _voucher_activity_default; +#if !VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER +mach_voucher_t _voucher_default_task_mach_voucher; #endif -voucher_activity_mode_t _voucher_activity_mode; +dispatch_once_t _firehose_task_buffer_pred; +firehose_buffer_t _firehose_task_buffer; +const uint32_t _firehose_spi_version = OS_FIREHOSE_SPI_VERSION; +uint64_t _voucher_unique_pid; +voucher_activity_hooks_t _voucher_libtrace_hooks; +dispatch_mach_t _voucher_activity_debug_channel; +#endif +#if HAVE_PTHREAD_WORKQUEUE_QOS && DISPATCH_DEBUG int _dispatch_set_qos_class_enabled; +#endif +#if DISPATCH_USE_KEVENT_WORKQUEUE && DISPATCH_USE_MGR_THREAD +int _dispatch_kevent_workqueue_enabled; +#endif +#if DISPATCH_USE_EVFILT_MACHPORT_DIRECT && \ + DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK +int _dispatch_evfilt_machport_direct_enabled; +#endif +DISPATCH_HW_CONFIG(); +uint8_t _dispatch_unsafe_fork; +bool _dispatch_child_of_unsafe_fork; +#if DISPATCH_USE_MEMORYPRESSURE_SOURCE +bool _dispatch_memory_warn; +int _dispatch_continuation_cache_limit = DISPATCH_CONTINUATION_CACHE_LIMIT; +#endif DISPATCH_NOINLINE -voucher_activity_mode_t -voucher_activity_get_mode(void) +bool +_dispatch_is_multithreaded(void) { - return _voucher_activity_mode; + return _dispatch_is_multithreaded_inline(); } -void -voucher_activity_set_mode_4libtrace(voucher_activity_mode_t mode) +DISPATCH_NOINLINE +bool +_dispatch_is_fork_of_multithreaded_parent(void) { - if (_voucher_activity_disabled()) return; - _voucher_activity_mode = mode; + return _dispatch_child_of_unsafe_fork; } -DISPATCH_HW_CONFIG(); -bool _dispatch_safe_fork = true, _dispatch_child_of_unsafe_fork; - DISPATCH_NOINLINE -bool -_dispatch_is_multithreaded(void) +void +_dispatch_fork_becomes_unsafe_slow(void) { - return !_dispatch_safe_fork; + uint8_t value = os_atomic_or(&_dispatch_unsafe_fork, + _DISPATCH_UNSAFE_FORK_MULTITHREADED, relaxed); + if (value & _DISPATCH_UNSAFE_FORK_PROHIBIT) { + DISPATCH_CLIENT_CRASH(0, "Transition to multithreaded is prohibited"); + } } DISPATCH_NOINLINE -bool -_dispatch_is_fork_of_multithreaded_parent(void) +void +_dispatch_prohibit_transition_to_multithreaded(bool prohibit) { - return _dispatch_child_of_unsafe_fork; + if (prohibit) { + uint8_t value = os_atomic_or(&_dispatch_unsafe_fork, + _DISPATCH_UNSAFE_FORK_PROHIBIT, relaxed); + if (value & _DISPATCH_UNSAFE_FORK_MULTITHREADED) { + DISPATCH_CLIENT_CRASH(0, "The executable is already multithreaded"); + } + } else { + os_atomic_and(&_dispatch_unsafe_fork, + (uint8_t)~_DISPATCH_UNSAFE_FORK_PROHIBIT, relaxed); + } } const struct dispatch_queue_offsets_s dispatch_queue_offsets = { - .dqo_version = 5, + .dqo_version = 6, .dqo_label = offsetof(struct dispatch_queue_s, dq_label), .dqo_label_size = sizeof(((dispatch_queue_t)NULL)->dq_label), .dqo_flags = 0, @@ -130,30 +170,16 @@ const struct dispatch_queue_offsets_s dispatch_queue_offsets = { .dqo_serialnum_size = sizeof(((dispatch_queue_t)NULL)->dq_serialnum), .dqo_width = offsetof(struct dispatch_queue_s, dq_width), .dqo_width_size = sizeof(((dispatch_queue_t)NULL)->dq_width), - .dqo_running = offsetof(struct dispatch_queue_s, dq_running), - .dqo_running_size = sizeof(((dispatch_queue_t)NULL)->dq_running), - .dqo_suspend_cnt = offsetof(struct dispatch_queue_s, do_suspend_cnt), - .dqo_suspend_cnt_size = sizeof(((dispatch_queue_t)NULL)->do_suspend_cnt), + .dqo_running = 0, + .dqo_running_size = 0, + .dqo_suspend_cnt = 0, + .dqo_suspend_cnt_size = 0, .dqo_target_queue = offsetof(struct dispatch_queue_s, do_targetq), .dqo_target_queue_size = sizeof(((dispatch_queue_t)NULL)->do_targetq), .dqo_priority = offsetof(struct dispatch_queue_s, dq_priority), .dqo_priority_size = sizeof(((dispatch_queue_t)NULL)->dq_priority), }; -#if VOUCHER_USE_MACH_VOUCHER -const struct voucher_offsets_s voucher_offsets = { - .vo_version = 1, - .vo_activity_ids_count = offsetof(struct voucher_s, v_activities), - .vo_activity_ids_count_size = sizeof(((voucher_t)NULL)->v_activities), - .vo_activity_ids_array = (uint16_t)_voucher_activity_ids((voucher_t)(NULL)), - .vo_activity_ids_array_entry_size = sizeof(voucher_activity_id_t), -}; -#else // VOUCHER_USE_MACH_VOUCHER -const struct voucher_offsets_s voucher_offsets = { - .vo_version = 0, -}; -#endif // VOUCHER_USE_MACH_VOUCHER - #if DISPATCH_USE_DIRECT_TSD const struct dispatch_tsd_indexes_s dispatch_tsd_indexes = { .dti_version = 2, @@ -161,26 +187,21 @@ const struct dispatch_tsd_indexes_s dispatch_tsd_indexes = { .dti_voucher_index = dispatch_voucher_key, .dti_qos_class_index = dispatch_priority_key, }; -#else // DISPATCH_USE_DIRECT_TSD -#error Not implemented on this platform #endif // DISPATCH_USE_DIRECT_TSD // 6618342 Contact the team that owns the Instrument DTrace probe before // renaming this symbol DISPATCH_CACHELINE_ALIGN struct dispatch_queue_s _dispatch_main_q = { - .do_vtable = DISPATCH_VTABLE(queue), + DISPATCH_GLOBAL_OBJECT_HEADER(queue_main), #if !DISPATCH_USE_RESOLVERS .do_targetq = &_dispatch_root_queues[ DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT], #endif - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, + .dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(1), .dq_label = "com.apple.main-thread", - .dq_running = 1, .dq_width = 1, - .dq_is_thread_bound = 1, + .dq_atomic_bits = DQF_THREAD_BOUND | DQF_CANNOT_TRYSYNC, .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 1, }; @@ -188,36 +209,50 @@ struct dispatch_queue_s _dispatch_main_q = { #pragma mark - #pragma mark dispatch_queue_attr_t -#define DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, overcommit, concurrent) \ +#define DISPATCH_QUEUE_ATTR_INIT(qos, prio, overcommit, freq, concurrent, inactive) \ { \ - .do_vtable = DISPATCH_VTABLE(queue_attr), \ - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, \ - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, \ - .do_next = DISPATCH_OBJECT_LISTLESS, \ + DISPATCH_GLOBAL_OBJECT_HEADER(queue_attr), \ .dqa_qos_class = (qos), \ .dqa_relative_priority = (qos) ? (prio) : 0, \ .dqa_overcommit = _dispatch_queue_attr_overcommit_##overcommit, \ + .dqa_autorelease_frequency = DISPATCH_AUTORELEASE_FREQUENCY_##freq, \ .dqa_concurrent = (concurrent), \ + .dqa_inactive = (inactive), \ + } + +#define DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, freq, concurrent) \ + { \ + [DQA_INDEX_ACTIVE] = DISPATCH_QUEUE_ATTR_INIT(\ + qos, prio, overcommit, freq, concurrent, false), \ + [DQA_INDEX_INACTIVE] = DISPATCH_QUEUE_ATTR_INIT(\ + qos, prio, overcommit, freq, concurrent, true), \ } -#define DISPATCH_QUEUE_ATTR_KIND_INIT(qos, prio) \ +#define DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, prio, overcommit) \ { \ - [DQA_INDEX_NON_OVERCOMMIT][DQA_INDEX_CONCURRENT] = \ - DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, disabled, 1), \ - [DQA_INDEX_NON_OVERCOMMIT][DQA_INDEX_SERIAL] = \ - DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, disabled, 0), \ - [DQA_INDEX_OVERCOMMIT][DQA_INDEX_CONCURRENT] = \ - DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, enabled, 1), \ - [DQA_INDEX_OVERCOMMIT][DQA_INDEX_SERIAL] = \ - DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, enabled, 0), \ - [DQA_INDEX_UNSPECIFIED_OVERCOMMIT][DQA_INDEX_CONCURRENT] = \ - DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, unspecified, 1),\ - [DQA_INDEX_UNSPECIFIED_OVERCOMMIT][DQA_INDEX_SERIAL] = \ - DISPATCH_QUEUE_ATTR_INITIALIZER(qos, prio, unspecified, 0),\ + [DQA_INDEX_AUTORELEASE_FREQUENCY_INHERIT][DQA_INDEX_CONCURRENT] = \ + DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, INHERIT, 1), \ + [DQA_INDEX_AUTORELEASE_FREQUENCY_INHERIT][DQA_INDEX_SERIAL] = \ + DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, INHERIT, 0), \ + [DQA_INDEX_AUTORELEASE_FREQUENCY_WORK_ITEM][DQA_INDEX_CONCURRENT] = \ + DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, WORK_ITEM, 1), \ + [DQA_INDEX_AUTORELEASE_FREQUENCY_WORK_ITEM][DQA_INDEX_SERIAL] = \ + DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, WORK_ITEM, 0), \ + [DQA_INDEX_AUTORELEASE_FREQUENCY_NEVER][DQA_INDEX_CONCURRENT] = \ + DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, NEVER, 1), \ + [DQA_INDEX_AUTORELEASE_FREQUENCY_NEVER][DQA_INDEX_SERIAL] = \ + DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, NEVER, 0), \ } #define DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, prio) \ - [prio] = DISPATCH_QUEUE_ATTR_KIND_INIT(qos, -(prio)) + [prio] = { \ + [DQA_INDEX_UNSPECIFIED_OVERCOMMIT] = \ + DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, -(prio), unspecified), \ + [DQA_INDEX_NON_OVERCOMMIT] = \ + DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, -(prio), disabled), \ + [DQA_INDEX_OVERCOMMIT] = \ + DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, -(prio), enabled), \ + } #define DISPATCH_QUEUE_ATTR_PRIO_INIT(qos) \ { \ @@ -244,11 +279,13 @@ struct dispatch_queue_s _dispatch_main_q = { DISPATCH_QUEUE_ATTR_PRIO_INIT(_DISPATCH_QOS_CLASS_##qos) // DISPATCH_QUEUE_CONCURRENT resp. _dispatch_queue_attr_concurrent is aliased -// to array member [0][0][0][0] and their properties must match! +// to array member [0][0][0][0][0][0] and their properties must match! const struct dispatch_queue_attr_s _dispatch_queue_attrs[] [DISPATCH_QUEUE_ATTR_PRIO_COUNT] [DISPATCH_QUEUE_ATTR_OVERCOMMIT_COUNT] - [DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT] = { + [DISPATCH_QUEUE_ATTR_AUTORELEASE_FREQUENCY_COUNT] + [DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT] + [DISPATCH_QUEUE_ATTR_INACTIVE_COUNT] = { DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(UNSPECIFIED), DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(MAINTENANCE), DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(BACKGROUND), @@ -258,6 +295,20 @@ const struct dispatch_queue_attr_s _dispatch_queue_attrs[] DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(USER_INTERACTIVE), }; +#if DISPATCH_VARIANT_STATIC +// +struct dispatch_queue_attr_s _dispatch_queue_attr_concurrent = + DISPATCH_QUEUE_ATTR_INIT(_DISPATCH_QOS_CLASS_UNSPECIFIED, 0, + unspecified, INHERIT, 1, false); +#endif // DISPATCH_VARIANT_STATIC + +// _dispatch_queue_attr_concurrent is aliased using libdispatch.aliases +// and the -alias_list linker option on Darwin but needs to be done manually +// for other platforms. +#ifndef __APPLE__ +extern struct dispatch_queue_attr_s _dispatch_queue_attr_concurrent + __attribute__((__alias__("_dispatch_queue_attrs"))); +#endif #pragma mark - #pragma mark dispatch_vtables @@ -272,33 +323,72 @@ DISPATCH_VTABLE_INSTANCE(semaphore, DISPATCH_VTABLE_INSTANCE(group, .do_type = DISPATCH_GROUP_TYPE, .do_kind = "group", - .do_dispose = _dispatch_semaphore_dispose, - .do_debug = _dispatch_semaphore_debug, + .do_dispose = _dispatch_group_dispose, + .do_debug = _dispatch_group_debug, ); DISPATCH_VTABLE_INSTANCE(queue, - .do_type = DISPATCH_QUEUE_TYPE, + .do_type = DISPATCH_QUEUE_LEGACY_TYPE, .do_kind = "queue", .do_dispose = _dispatch_queue_dispose, + .do_suspend = _dispatch_queue_suspend, + .do_resume = _dispatch_queue_resume, .do_invoke = _dispatch_queue_invoke, - .do_probe = _dispatch_queue_probe, + .do_wakeup = _dispatch_queue_wakeup, .do_debug = dispatch_queue_debug, + .do_set_targetq = _dispatch_queue_set_target_queue, ); +DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_serial, queue, + .do_type = DISPATCH_QUEUE_SERIAL_TYPE, + .do_kind = "serial-queue", + .do_dispose = _dispatch_queue_dispose, + .do_suspend = _dispatch_queue_suspend, + .do_resume = _dispatch_queue_resume, + .do_finalize_activation = _dispatch_queue_finalize_activation, + .do_invoke = _dispatch_queue_invoke, + .do_wakeup = _dispatch_queue_wakeup, + .do_debug = dispatch_queue_debug, + .do_set_targetq = _dispatch_queue_set_target_queue, +); + +DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_concurrent, queue, + .do_type = DISPATCH_QUEUE_CONCURRENT_TYPE, + .do_kind = "concurrent-queue", + .do_dispose = _dispatch_queue_dispose, + .do_suspend = _dispatch_queue_suspend, + .do_resume = _dispatch_queue_resume, + .do_finalize_activation = _dispatch_queue_finalize_activation, + .do_invoke = _dispatch_queue_invoke, + .do_wakeup = _dispatch_queue_wakeup, + .do_debug = dispatch_queue_debug, + .do_set_targetq = _dispatch_queue_set_target_queue, +); + + DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_root, queue, - .do_type = DISPATCH_QUEUE_ROOT_TYPE, + .do_type = DISPATCH_QUEUE_GLOBAL_ROOT_TYPE, .do_kind = "global-queue", .do_dispose = _dispatch_pthread_root_queue_dispose, - .do_probe = _dispatch_root_queue_probe, + .do_wakeup = _dispatch_root_queue_wakeup, + .do_debug = dispatch_queue_debug, +); + +DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_main, queue, + .do_type = DISPATCH_QUEUE_SERIAL_TYPE, + .do_kind = "main-queue", + .do_dispose = _dispatch_queue_dispose, + .do_invoke = _dispatch_queue_invoke, + .do_wakeup = _dispatch_main_queue_wakeup, .do_debug = dispatch_queue_debug, ); DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_runloop, queue, - .do_type = DISPATCH_QUEUE_ROOT_TYPE, + .do_type = DISPATCH_QUEUE_RUNLOOP_TYPE, .do_kind = "runloop-queue", .do_dispose = _dispatch_runloop_queue_dispose, .do_invoke = _dispatch_queue_invoke, - .do_probe = _dispatch_runloop_queue_probe, + .do_wakeup = _dispatch_runloop_queue_wakeup, .do_debug = dispatch_queue_debug, ); @@ -306,7 +396,7 @@ DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_mgr, queue, .do_type = DISPATCH_QUEUE_MGR_TYPE, .do_kind = "mgr-queue", .do_invoke = _dispatch_mgr_thread, - .do_probe = _dispatch_mgr_queue_probe, + .do_wakeup = _dispatch_mgr_queue_wakeup, .do_debug = dispatch_queue_debug, ); @@ -314,8 +404,8 @@ DISPATCH_VTABLE_INSTANCE(queue_specific_queue, .do_type = DISPATCH_QUEUE_SPECIFIC_TYPE, .do_kind = "queue-context", .do_dispose = _dispatch_queue_specific_queue_dispose, - .do_invoke = (void*)_dispatch_queue_invoke, - .do_probe = (void *)_dispatch_queue_probe, + .do_invoke = (void *)_dispatch_queue_invoke, + .do_wakeup = (void *)_dispatch_queue_wakeup, .do_debug = (void *)dispatch_queue_debug, ); @@ -328,18 +418,27 @@ DISPATCH_VTABLE_INSTANCE(source, .do_type = DISPATCH_SOURCE_KEVENT_TYPE, .do_kind = "kevent-source", .do_dispose = _dispatch_source_dispose, + .do_suspend = (void *)_dispatch_queue_suspend, + .do_resume = (void *)_dispatch_queue_resume, + .do_finalize_activation = _dispatch_source_finalize_activation, .do_invoke = _dispatch_source_invoke, - .do_probe = _dispatch_source_probe, + .do_wakeup = _dispatch_source_wakeup, .do_debug = _dispatch_source_debug, + .do_set_targetq = (void *)_dispatch_queue_set_target_queue, ); +#if HAVE_MACH DISPATCH_VTABLE_INSTANCE(mach, .do_type = DISPATCH_MACH_CHANNEL_TYPE, .do_kind = "mach-channel", .do_dispose = _dispatch_mach_dispose, + .do_suspend = (void *)_dispatch_queue_suspend, + .do_resume = (void *)_dispatch_queue_resume, + .do_finalize_activation = _dispatch_mach_finalize_activation, .do_invoke = _dispatch_mach_invoke, - .do_probe = _dispatch_mach_probe, + .do_wakeup = _dispatch_mach_wakeup, .do_debug = _dispatch_mach_debug, + .do_set_targetq = (void *)_dispatch_queue_set_target_queue, ); DISPATCH_VTABLE_INSTANCE(mach_msg, @@ -349,8 +448,9 @@ DISPATCH_VTABLE_INSTANCE(mach_msg, .do_invoke = _dispatch_mach_msg_invoke, .do_debug = _dispatch_mach_msg_debug, ); +#endif // HAVE_MACH -#if !USE_OBJC +#if !DISPATCH_DATA_IS_BRIDGED_TO_NSDATA DISPATCH_VTABLE_INSTANCE(data, .do_type = DISPATCH_DATA_TYPE, .do_kind = "data", @@ -364,6 +464,7 @@ DISPATCH_VTABLE_INSTANCE(io, .do_kind = "channel", .do_dispose = _dispatch_io_dispose, .do_debug = _dispatch_io_debug, + .do_set_targetq = _dispatch_io_set_target_queue, ); DISPATCH_VTABLE_INSTANCE(operation, @@ -379,18 +480,41 @@ DISPATCH_VTABLE_INSTANCE(disk, .do_dispose = _dispatch_disk_dispose, ); + +const struct dispatch_continuation_vtable_s _dispatch_continuation_vtables[] = { + DC_VTABLE_ENTRY(ASYNC_REDIRECT, + .do_kind = "dc-redirect", + .do_invoke = _dispatch_async_redirect_invoke), +#if HAVE_MACH + DC_VTABLE_ENTRY(MACH_SEND_BARRRIER_DRAIN, + .do_kind = "dc-mach-send-drain", + .do_invoke = _dispatch_mach_send_barrier_drain_invoke), + DC_VTABLE_ENTRY(MACH_SEND_BARRIER, + .do_kind = "dc-mach-send-barrier", + .do_invoke = _dispatch_mach_barrier_invoke), + DC_VTABLE_ENTRY(MACH_RECV_BARRIER, + .do_kind = "dc-mach-recv-barrier", + .do_invoke = _dispatch_mach_barrier_invoke), +#endif +#if HAVE_PTHREAD_WORKQUEUE_QOS + DC_VTABLE_ENTRY(OVERRIDE_STEALING, + .do_kind = "dc-override-stealing", + .do_invoke = _dispatch_queue_override_invoke), + DC_VTABLE_ENTRY(OVERRIDE_OWNING, + .do_kind = "dc-override-owning", + .do_invoke = _dispatch_queue_override_invoke), +#endif +}; + void _dispatch_vtable_init(void) { -#if USE_OBJC +#if OS_OBJECT_HAVE_OBJC2 // ObjC classes and dispatch vtables are co-located via linker order and // alias files, verify correct layout during initialization rdar://10640168 - DISPATCH_OBJC_CLASS_DECL(semaphore); - dispatch_assert((char*)DISPATCH_VTABLE(semaphore) - - (char*)DISPATCH_OBJC_CLASS(semaphore) == 0); dispatch_assert((char*)&DISPATCH_CONCAT(_,DISPATCH_CLASS(semaphore_vtable)) - - (char*)DISPATCH_OBJC_CLASS(semaphore) == - sizeof(_os_object_class_s)); + - (char*)DISPATCH_VTABLE(semaphore) == + offsetof(struct dispatch_semaphore_vtable_s, _os_obj_vtable)); #endif // USE_OBJC } @@ -407,12 +531,20 @@ _dispatch_build_init(void *context DISPATCH_UNUSED) size_t bufsz = sizeof(_dispatch_build); sysctl(mib, 2, _dispatch_build, &bufsz, NULL, 0); +#if TARGET_IPHONE_SIMULATOR + char *sim_version = getenv("SIMULATOR_RUNTIME_BUILD_VERSION"); + if (sim_version) { + (void)strlcat(_dispatch_build, " ", sizeof(_dispatch_build)); + (void)strlcat(_dispatch_build, sim_version, sizeof(_dispatch_build)); + } +#endif // TARGET_IPHONE_SIMULATOR + #else /* * XXXRW: What to do here for !Mac OS X? */ memset(_dispatch_build, 0, sizeof(_dispatch_build)); -#endif +#endif // __APPLE__ } static dispatch_once_t _dispatch_build_pred; @@ -447,19 +579,35 @@ _dispatch_bug_client(const char* msg) _dispatch_bug_log("BUG in libdispatch client: %s", msg); } +#if HAVE_MACH void _dispatch_bug_mach_client(const char* msg, mach_msg_return_t kr) { _dispatch_bug_log("BUG in libdispatch client: %s %s - 0x%x", msg, mach_error_string(kr), kr); } +#endif void _dispatch_bug_kevent_client(const char* msg, const char* filter, const char *operation, int err) { - _dispatch_bug_log("BUG in libdispatch client: %s[%s] %s: \"%s\" - 0x%x", - msg, filter, operation, strerror(err), err); + if (operation && err) { + _dispatch_bug_log("BUG in libdispatch client: %s[%s] %s: \"%s\" - 0x%x", + msg, filter, operation, strerror(err), err); + } else if (operation) { + _dispatch_bug_log("BUG in libdispatch client: %s[%s] %s", + msg, filter, operation); + } else { + _dispatch_bug_log("BUG in libdispatch: %s[%s]: \"%s\" - 0x%x", + msg, filter, strerror(err), err); + } +} + +void +_dispatch_bug_deprecated(const char *msg) +{ + _dispatch_bug_log("DEPRECATED USE in libdispatch client: %s", msg); } void @@ -516,11 +664,11 @@ _dispatch_logv_init(void *context DISPATCH_UNUSED) struct timeval tv; gettimeofday(&tv, NULL); #if DISPATCH_DEBUG - dispatch_log_basetime = mach_absolute_time(); + dispatch_log_basetime = _dispatch_absolute_time(); #endif dprintf(dispatch_logfile, "=== log file opened for %s[%u] at " "%ld.%06u ===\n", getprogname() ?: "", getpid(), - tv.tv_sec, tv.tv_usec); + tv.tv_sec, (int)tv.tv_usec); } } } @@ -548,7 +696,7 @@ _dispatch_logv_file(const char *msg, va_list ap) #if DISPATCH_DEBUG offset += dsnprintf(&buf[offset], bufsiz - offset, "%llu\t", - mach_absolute_time() - dispatch_log_basetime); + _dispatch_absolute_time() - dispatch_log_basetime); #endif r = vsnprintf(&buf[offset], bufsiz - offset, msg, ap); if (r < 0) return; @@ -586,7 +734,7 @@ _dispatch_syslog(const char *msg) static inline void _dispatch_vsyslog(const char *msg, va_list ap) { - vsyslog(LOG_NOTICE, msg, *ap_ptr); + vsyslog(LOG_NOTICE, msg, ap); } #endif // DISPATCH_USE_SIMPLE_ASL @@ -630,7 +778,7 @@ static size_t _dispatch_object_debug2(dispatch_object_t dou, char* buf, size_t bufsiz) { DISPATCH_OBJECT_TFB(_dispatch_objc_debug, dou, buf, bufsiz); - if (dou._do->do_vtable->do_debug) { + if (dx_vtable(dou._do)->do_debug) { return dx_debug(dou._do, buf, bufsiz); } return strlcpy(buf, "NULL vtable slot: ", bufsiz); @@ -645,7 +793,7 @@ _dispatch_debugv(dispatch_object_t dou, const char *msg, va_list ap) int r; #if DISPATCH_DEBUG && !DISPATCH_USE_OS_DEBUG_LOG offset += dsnprintf(&buf[offset], bufsiz - offset, "%llu\t\t%p\t", - mach_absolute_time() - dispatch_log_basetime, + _dispatch_absolute_time() - dispatch_log_basetime, (void *)_dispatch_thread_self()); #endif if (dou._do) { @@ -720,14 +868,35 @@ _dispatch_calloc(size_t num_items, size_t size) return buf; } +/** + * If the source string is mutable, allocates memory and copies the contents. + * Otherwise returns the source string. + */ +const char * +_dispatch_strdup_if_mutable(const char *str) +{ +#if HAVE_DYLD_IS_MEMORY_IMMUTABLE + size_t size = strlen(str) + 1; + if (slowpath(!_dyld_is_memory_immutable(str, size))) { + char *clone = (char *) malloc(size); + if (dispatch_assume(clone)) { + memcpy(clone, str, size); + } + return clone; + } + return str; +#else + return strdup(str); +#endif +} + #pragma mark - #pragma mark dispatch_block_t #ifdef __BLOCKS__ -#undef _dispatch_Block_copy -dispatch_block_t -_dispatch_Block_copy(dispatch_block_t db) +void * +(_dispatch_Block_copy)(void *db) { dispatch_block_t rval; @@ -737,7 +906,7 @@ _dispatch_Block_copy(dispatch_block_t db) } return rval; } - DISPATCH_CLIENT_CRASH("NULL was passed where a block should have been"); + DISPATCH_CLIENT_CRASH(0, "NULL was passed where a block should have been"); } void @@ -754,7 +923,8 @@ _dispatch_call_block_and_release(void *block) #pragma mark dispatch_client_callout // Abort on uncaught exceptions thrown from client callouts rdar://8577499 -#if DISPATCH_USE_CLIENT_CALLOUT && (__USING_SJLJ_EXCEPTIONS__ || !USE_OBJC) +#if DISPATCH_USE_CLIENT_CALLOUT && (__USING_SJLJ_EXCEPTIONS__ || !USE_OBJC || \ + OS_OBJECT_HAVE_OBJC1) // On platforms with SjLj exceptions, avoid the SjLj overhead on every callout // by clearing the unwinder's TSD pointer to the handler stack around callouts @@ -791,6 +961,7 @@ _dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) _dispatch_set_unwind_tsd(u); } +#if HAVE_MACH #undef _dispatch_client_callout4 void _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, @@ -805,6 +976,7 @@ _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, _dispatch_free_unwind_tsd(); _dispatch_set_unwind_tsd(u); } +#endif // HAVE_MACH #endif // DISPATCH_USE_CLIENT_CALLOUT @@ -813,7 +985,7 @@ _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, #if !USE_OBJC -static const _os_object_class_s _os_object_class; +static const _os_object_vtable_s _os_object_vtable; void _os_object_init(void) @@ -836,7 +1008,7 @@ _os_object_alloc_realized(const void *cls, size_t size) _os_object_t _os_object_alloc(const void *cls, size_t size) { - if (!cls) cls = &_os_object_class; + if (!cls) cls = &_os_object_vtable; return _os_object_alloc_realized(cls, size); } @@ -890,7 +1062,9 @@ os_release(void *obj) #if DISPATCH_COCOA_COMPAT -void *_dispatch_autorelease_pool_push(void) { +void* +_dispatch_autorelease_pool_push(void) +{ void *pool = NULL; if (_dispatch_begin_NSAutoReleasePool) { pool = _dispatch_begin_NSAutoReleasePool(); @@ -898,12 +1072,26 @@ void *_dispatch_autorelease_pool_push(void) { return pool; } -void _dispatch_autorelease_pool_pop(void *pool) { +void +_dispatch_autorelease_pool_pop(void *pool) +{ if (_dispatch_end_NSAutoReleasePool) { _dispatch_end_NSAutoReleasePool(pool); } } +void* +_dispatch_last_resort_autorelease_pool_push(void) +{ + return _dispatch_autorelease_pool_push(); +} + +void +_dispatch_last_resort_autorelease_pool_pop(void *pool) +{ + _dispatch_autorelease_pool_pop(pool); +} + #endif // DISPATCH_COCOA_COMPAT #endif // !USE_OBJC @@ -940,6 +1128,23 @@ const struct dispatch_source_type_s _dispatch_source_type_timer = { .init = dispatch_source_type_timer_init, }; +static void +dispatch_source_type_after_init(dispatch_source_t ds, + dispatch_source_type_t type, uintptr_t handle, unsigned long mask, + dispatch_queue_t q) +{ + dispatch_source_type_timer_init(ds, type, handle, mask, q); + ds->ds_needs_rearm = false; + ds_timer(ds->ds_refs).flags |= DISPATCH_TIMER_AFTER; +} + +const struct dispatch_source_type_s _dispatch_source_type_after = { + .ke = { + .filter = DISPATCH_EVFILT_TIMER, + }, + .init = dispatch_source_type_after_init, +}; + static void dispatch_source_type_timer_with_aggregate_init(dispatch_source_t ds, dispatch_source_type_t type, uintptr_t handle, unsigned long mask, @@ -984,7 +1189,6 @@ const struct dispatch_source_type_s _dispatch_source_type_interval = { .init = dispatch_source_type_interval_init, }; -#if !DISPATCH_USE_SELECT_FALLBACK || DISPATCH_DYNAMIC_SELECT_FALLBACK static void dispatch_source_type_readwrite_init(dispatch_source_t ds, dispatch_source_type_t type DISPATCH_UNUSED, @@ -992,20 +1196,18 @@ dispatch_source_type_readwrite_init(dispatch_source_t ds, unsigned long mask DISPATCH_UNUSED, dispatch_queue_t q DISPATCH_UNUSED) { - ds->ds_dkev->dk_kevent.flags |= EV_UDATA_SPECIFIC; - ds->ds_is_direct_kevent = true; + ds->ds_is_level = true; +#ifdef HAVE_DECL_NOTE_LOWAT // bypass kernel check for device kqueue support rdar://19004921 ds->ds_dkev->dk_kevent.fflags = NOTE_LOWAT; +#endif ds->ds_dkev->dk_kevent.data = 1; } -#else -#define dispatch_source_type_readwrite_init NULL -#endif const struct dispatch_source_type_s _dispatch_source_type_read = { .ke = { .filter = EVFILT_READ, - .flags = EV_DISPATCH, + .flags = EV_VANISHED|EV_DISPATCH|EV_UDATA_SPECIFIC, }, .init = dispatch_source_type_readwrite_init, }; @@ -1013,7 +1215,7 @@ const struct dispatch_source_type_s _dispatch_source_type_read = { const struct dispatch_source_type_s _dispatch_source_type_write = { .ke = { .filter = EVFILT_WRITE, - .flags = EV_DISPATCH, + .flags = EV_VANISHED|EV_DISPATCH|EV_UDATA_SPECIFIC, }, .init = dispatch_source_type_readwrite_init, }; @@ -1034,14 +1236,14 @@ _dispatch_ios_simulator_memorypressure_init(void *context DISPATCH_UNUSED) } #endif +#if TARGET_IPHONE_SIMULATOR static void -dispatch_source_type_memorystatus_init(dispatch_source_t ds, +dispatch_source_type_memorypressure_init(dispatch_source_t ds, dispatch_source_type_t type DISPATCH_UNUSED, uintptr_t handle DISPATCH_UNUSED, unsigned long mask DISPATCH_UNUSED, dispatch_queue_t q DISPATCH_UNUSED) { -#if TARGET_IPHONE_SIMULATOR static dispatch_once_t pred; dispatch_once_f(&pred, NULL, _dispatch_ios_simulator_memorypressure_init); handle = (uintptr_t)_dispatch_ios_simulator_memory_warnings_fd; @@ -1052,38 +1254,42 @@ dispatch_source_type_memorystatus_init(dispatch_source_t ds, ds->ds_dkev->dk_kevent.fflags = (uint32_t)mask; ds->ds_ident_hack = handle; ds->ds_pending_data_mask = mask; - ds->ds_memorystatus_override = 1; -#endif - ds->ds_is_level = false; + ds->ds_memorypressure_override = 1; } +#else +#define dispatch_source_type_memorypressure_init NULL +#endif #ifndef NOTE_MEMORYSTATUS_LOW_SWAP #define NOTE_MEMORYSTATUS_LOW_SWAP 0x8 #endif -const struct dispatch_source_type_s _dispatch_source_type_memorystatus = { +const struct dispatch_source_type_s _dispatch_source_type_memorypressure = { .ke = { .filter = EVFILT_MEMORYSTATUS, .flags = EV_DISPATCH|EV_UDATA_SPECIFIC, }, .mask = NOTE_MEMORYSTATUS_PRESSURE_NORMAL|NOTE_MEMORYSTATUS_PRESSURE_WARN - |NOTE_MEMORYSTATUS_PRESSURE_CRITICAL|NOTE_MEMORYSTATUS_LOW_SWAP, - .init = dispatch_source_type_memorystatus_init, + |NOTE_MEMORYSTATUS_PRESSURE_CRITICAL|NOTE_MEMORYSTATUS_LOW_SWAP + |NOTE_MEMORYSTATUS_PROC_LIMIT_WARN|NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL, + .init = dispatch_source_type_memorypressure_init, }; static void dispatch_source_type_vm_init(dispatch_source_t ds, - dispatch_source_type_t type, - uintptr_t handle, - unsigned long mask, - dispatch_queue_t q) + dispatch_source_type_t type DISPATCH_UNUSED, + uintptr_t handle DISPATCH_UNUSED, + unsigned long mask DISPATCH_UNUSED, + dispatch_queue_t q DISPATCH_UNUSED) { - // Map legacy vm pressure to memorystatus warning rdar://problem/15907505 + // Map legacy vm pressure to memorypressure warning rdar://problem/15907505 mask = NOTE_MEMORYSTATUS_PRESSURE_WARN; ds->ds_dkev->dk_kevent.fflags = (uint32_t)mask; ds->ds_pending_data_mask = mask; ds->ds_vmpressure_override = 1; - dispatch_source_type_memorystatus_init(ds, type, handle, mask, q); +#if TARGET_IPHONE_SIMULATOR + dispatch_source_type_memorypressure_init(ds, type, handle, mask, q); +#endif } const struct dispatch_source_type_s _dispatch_source_type_vm = { @@ -1097,27 +1303,24 @@ const struct dispatch_source_type_s _dispatch_source_type_vm = { #elif DISPATCH_USE_VM_PRESSURE -static void -dispatch_source_type_vm_init(dispatch_source_t ds, - dispatch_source_type_t type DISPATCH_UNUSED, - uintptr_t handle DISPATCH_UNUSED, - unsigned long mask DISPATCH_UNUSED, - dispatch_queue_t q DISPATCH_UNUSED) -{ - ds->ds_is_level = false; -} - const struct dispatch_source_type_s _dispatch_source_type_vm = { .ke = { .filter = EVFILT_VM, .flags = EV_DISPATCH|EV_UDATA_SPECIFIC, }, .mask = NOTE_VM_PRESSURE, - .init = dispatch_source_type_vm_init, }; #endif // DISPATCH_USE_VM_PRESSURE +const struct dispatch_source_type_s _dispatch_source_type_signal = { + .ke = { + .filter = EVFILT_SIGNAL, + .flags = EV_UDATA_SPECIFIC, + }, +}; + +#if !defined(__linux__) static void dispatch_source_type_proc_init(dispatch_source_t ds, dispatch_source_type_t type DISPATCH_UNUSED, @@ -1144,20 +1347,16 @@ const struct dispatch_source_type_s _dispatch_source_type_proc = { .init = dispatch_source_type_proc_init, }; -const struct dispatch_source_type_s _dispatch_source_type_signal = { - .ke = { - .filter = EVFILT_SIGNAL, - .flags = EV_UDATA_SPECIFIC, - }, -}; - const struct dispatch_source_type_s _dispatch_source_type_vnode = { .ke = { .filter = EVFILT_VNODE, - .flags = EV_CLEAR|EV_UDATA_SPECIFIC, + .flags = EV_CLEAR|EV_VANISHED|EV_UDATA_SPECIFIC, }, .mask = NOTE_DELETE|NOTE_WRITE|NOTE_EXTEND|NOTE_ATTRIB|NOTE_LINK| - NOTE_RENAME|NOTE_REVOKE + NOTE_RENAME|NOTE_FUNLOCK +#if HAVE_DECL_NOTE_REVOKE + |NOTE_REVOKE +#endif #if HAVE_DECL_NOTE_NONE |NOTE_NONE #endif @@ -1176,6 +1375,9 @@ const struct dispatch_source_type_s _dispatch_source_type_vfs = { #endif #if HAVE_DECL_VQ_VERYLOWDISK |VQ_VERYLOWDISK +#endif +#if HAVE_DECL_VQ_QUOTA + |VQ_QUOTA #endif , }; @@ -1184,7 +1386,7 @@ const struct dispatch_source_type_s _dispatch_source_type_sock = { #ifdef EVFILT_SOCK .ke = { .filter = EVFILT_SOCK, - .flags = EV_CLEAR|EV_UDATA_SPECIFIC, + .flags = EV_CLEAR|EV_VANISHED|EV_UDATA_SPECIFIC, }, .mask = NOTE_CONNRESET | NOTE_READCLOSED | NOTE_WRITECLOSED | NOTE_TIMEOUT | NOTE_NOSRCADDR | NOTE_IFDENIED | NOTE_SUSPEND | @@ -1194,12 +1396,15 @@ const struct dispatch_source_type_s _dispatch_source_type_sock = { #endif #ifdef NOTE_CONNECTED | NOTE_CONNECTED | NOTE_DISCONNECTED | NOTE_CONNINFO_UPDATED +#endif +#ifdef NOTE_NOTIFY_ACK + | NOTE_NOTIFY_ACK #endif , #endif // EVFILT_SOCK }; +#endif // !defined(__linux__) -#if DISPATCH_USE_EV_UDATA_SPECIFIC static void dispatch_source_type_data_init(dispatch_source_t ds, dispatch_source_type_t type DISPATCH_UNUSED, @@ -1207,11 +1412,12 @@ dispatch_source_type_data_init(dispatch_source_t ds, unsigned long mask DISPATCH_UNUSED, dispatch_queue_t q DISPATCH_UNUSED) { + ds->ds_is_installed = true; + ds->ds_is_custom_source = true; + ds->ds_is_direct_kevent = true; + ds->ds_pending_data_mask = ~0ul; ds->ds_needs_rearm = false; // not registered with kevent } -#else -#define dispatch_source_type_data_init NULL -#endif const struct dispatch_source_type_s _dispatch_source_type_data_add = { .ke = { @@ -1261,14 +1467,19 @@ dispatch_source_type_mach_recv_init(dispatch_source_t ds, unsigned long mask DISPATCH_UNUSED, dispatch_queue_t q DISPATCH_UNUSED) { - ds->ds_is_level = false; + ds->ds_pending_data_mask = DISPATCH_MACH_RECV_MESSAGE; +#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK + if (_dispatch_evfilt_machport_direct_enabled) return; + ds->ds_dkev->dk_kevent.fflags = DISPATCH_MACH_RECV_MESSAGE; + ds->ds_dkev->dk_kevent.flags &= ~(EV_UDATA_SPECIFIC|EV_VANISHED); + ds->ds_is_direct_kevent = false; +#endif } const struct dispatch_source_type_s _dispatch_source_type_mach_recv = { .ke = { .filter = EVFILT_MACHPORT, - .flags = EV_DISPATCH, - .fflags = DISPATCH_MACH_RECV_MESSAGE, + .flags = EV_VANISHED|EV_DISPATCH|EV_UDATA_SPECIFIC, }, .init = dispatch_source_type_mach_recv_init, }; diff --git a/src/inline_internal.h b/src/inline_internal.h index 5cc4cd884..d1c73dd4e 100644 --- a/src/inline_internal.h +++ b/src/inline_internal.h @@ -38,10 +38,12 @@ DISPATCH_NOTHROW void _dispatch_client_callout(void *ctxt, dispatch_function_t f); DISPATCH_NOTHROW void _dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)); +#if HAVE_MACH DISPATCH_NOTHROW void _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, dispatch_mach_msg_t dmsg, mach_error_t error, dispatch_mach_handler_function_t f); +#endif // HAVE_MACH #else // !DISPATCH_USE_CLIENT_CALLOUT @@ -59,6 +61,7 @@ _dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) return f(ctxt, i); } +#if HAVE_MACH DISPATCH_ALWAYS_INLINE static inline void _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, @@ -67,39 +70,143 @@ _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, { return f(ctxt, reason, dmsg, error); } +#endif // HAVE_MACH #endif // !DISPATCH_USE_CLIENT_CALLOUT -#if !(USE_OBJC && __OBJC2__) && !defined(__cplusplus) - #pragma mark - #pragma mark _os_object_t & dispatch_object_t +#if DISPATCH_PURE_C + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_object_has_vtable(dispatch_object_t dou) +{ + uintptr_t dc_flags = dou._dc->dc_flags; + + // vtables are pointers far away from the low page in memory + return dc_flags > 0xffful; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_object_is_continuation(dispatch_object_t dou) +{ + if (_dispatch_object_has_vtable(dou)) { + return dx_metatype(dou._do) == _DISPATCH_CONTINUATION_TYPE; + } + return true; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_object_has_type(dispatch_object_t dou, unsigned long type) +{ + return _dispatch_object_has_vtable(dou) && dx_type(dou._do) == type; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_object_is_redirection(dispatch_object_t dou) +{ + return _dispatch_object_has_type(dou, + DISPATCH_CONTINUATION_TYPE(ASYNC_REDIRECT)); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_object_is_barrier(dispatch_object_t dou) +{ + dispatch_queue_flags_t dq_flags; + + if (!_dispatch_object_has_vtable(dou)) { + return (dou._dc->dc_flags & DISPATCH_OBJ_BARRIER_BIT); + } + switch (dx_metatype(dou._do)) { + case _DISPATCH_QUEUE_TYPE: + case _DISPATCH_SOURCE_TYPE: + dq_flags = os_atomic_load2o(dou._dq, dq_atomic_flags, relaxed); + return dq_flags & DQF_BARRIER_BIT; + default: + return false; + } +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_object_is_slow_item(dispatch_object_t dou) +{ + if (_dispatch_object_has_vtable(dou)) { + return false; + } + return (dou._dc->dc_flags & DISPATCH_OBJ_SYNC_SLOW_BIT); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_object_is_slow_non_barrier(dispatch_object_t dou) +{ + if (_dispatch_object_has_vtable(dou)) { + return false; + } + return ((dou._dc->dc_flags & + (DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT)) == + (DISPATCH_OBJ_SYNC_SLOW_BIT)); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_object_is_slow_barrier(dispatch_object_t dou) +{ + if (_dispatch_object_has_vtable(dou)) { + return false; + } + return ((dou._dc->dc_flags & + (DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT)) == + (DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT)); +} DISPATCH_ALWAYS_INLINE static inline _os_object_t _os_object_retain_internal_inline(_os_object_t obj) { int ref_cnt = _os_object_refcnt_inc(obj); - if (slowpath(ref_cnt <= 0)) { - DISPATCH_CRASH("Resurrection of an object"); + if (unlikely(ref_cnt <= 0)) { + _OS_OBJECT_CLIENT_CRASH("Resurrection of an object"); } return obj; } +DISPATCH_ALWAYS_INLINE +static inline void +_os_object_release_internal_inline_no_dispose(_os_object_t obj) +{ + int ref_cnt = _os_object_refcnt_dec(obj); + if (likely(ref_cnt >= 0)) { + return; + } + if (ref_cnt == 0) { + _OS_OBJECT_CLIENT_CRASH("Unexpected release of an object"); + } + _OS_OBJECT_CLIENT_CRASH("Over-release of an object"); +} + DISPATCH_ALWAYS_INLINE static inline void _os_object_release_internal_inline(_os_object_t obj) { int ref_cnt = _os_object_refcnt_dec(obj); - if (fastpath(ref_cnt >= 0)) { + if (likely(ref_cnt >= 0)) { return; } - if (slowpath(ref_cnt < -1)) { - DISPATCH_CRASH("Over-release of an object"); + if (unlikely(ref_cnt < -1)) { + _OS_OBJECT_CLIENT_CRASH("Over-release of an object"); } #if DISPATCH_DEBUG - if (slowpath(obj->os_obj_xref_cnt >= 0)) { - DISPATCH_CRASH("Release while external references exist"); + int xref_cnt = obj->os_obj_xref_cnt; + if (unlikely(xref_cnt >= 0)) { + DISPATCH_INTERNAL_CRASH(xref_cnt, + "Release while external references exist"); } #endif // _os_object_refcnt_dispose_barrier() is in _os_object_dispose() @@ -120,74 +227,1198 @@ _dispatch_release(dispatch_object_t dou) _os_object_release_internal_inline(dou._os_obj); } +DISPATCH_ALWAYS_INLINE_NDEBUG +static inline void +_dispatch_release_tailcall(dispatch_object_t dou) +{ + _os_object_release_internal(dou._os_obj); +} + +DISPATCH_ALWAYS_INLINE DISPATCH_NONNULL_ALL +static inline void +_dispatch_object_set_target_queue_inline(dispatch_object_t dou, + dispatch_queue_t tq) +{ + _dispatch_retain(tq); + tq = os_atomic_xchg2o(dou._do, do_targetq, tq, release); + if (tq) _dispatch_release(tq); + _dispatch_object_debug(dou._do, "%s", __func__); +} + +#endif // DISPATCH_PURE_C +#pragma mark - +#pragma mark dispatch_thread +#if DISPATCH_PURE_C + +#define DISPATCH_DEFERRED_ITEMS_MAGIC 0xdefe55edul /* deferred */ +#define DISPATCH_DEFERRED_ITEMS_EVENT_COUNT 8 +#ifdef WORKQ_KEVENT_EVENT_BUFFER_LEN +_Static_assert(WORKQ_KEVENT_EVENT_BUFFER_LEN >= + DISPATCH_DEFERRED_ITEMS_EVENT_COUNT, + "our list should not be longer than the kernel's"); +#endif + +typedef struct dispatch_deferred_items_s { + uint32_t ddi_magic; + dispatch_queue_t ddi_stashed_dq; + struct dispatch_object_s *ddi_stashed_dou; + dispatch_priority_t ddi_stashed_pp; + int ddi_nevents; + int ddi_maxevents; + _dispatch_kevent_qos_s ddi_eventlist[DISPATCH_DEFERRED_ITEMS_EVENT_COUNT]; +} dispatch_deferred_items_s, *dispatch_deferred_items_t; + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_deferred_items_set(dispatch_deferred_items_t ddi) +{ + _dispatch_thread_setspecific(dispatch_deferred_items_key, (void *)ddi); +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_deferred_items_t +_dispatch_deferred_items_get(void) +{ + dispatch_deferred_items_t ddi = (dispatch_deferred_items_t) + _dispatch_thread_getspecific(dispatch_deferred_items_key); + if (ddi && ddi->ddi_magic == DISPATCH_DEFERRED_ITEMS_MAGIC) { + return ddi; + } + return NULL; +} + +#endif // DISPATCH_PURE_C +#pragma mark - +#pragma mark dispatch_thread +#if DISPATCH_PURE_C + +DISPATCH_ALWAYS_INLINE +static inline dispatch_thread_context_t +_dispatch_thread_context_find(const void *key) +{ + dispatch_thread_context_t dtc = + _dispatch_thread_getspecific(dispatch_context_key); + while (dtc) { + if (dtc->dtc_key == key) { + return dtc; + } + dtc = dtc->dtc_prev; + } + return NULL; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_thread_context_push(dispatch_thread_context_t ctxt) +{ + ctxt->dtc_prev = _dispatch_thread_getspecific(dispatch_context_key); + _dispatch_thread_setspecific(dispatch_context_key, ctxt); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_thread_context_pop(dispatch_thread_context_t ctxt) +{ + dispatch_assert(_dispatch_thread_getspecific(dispatch_context_key) == ctxt); + _dispatch_thread_setspecific(dispatch_context_key, ctxt->dtc_prev); +} + +typedef struct dispatch_thread_frame_iterator_s { + dispatch_queue_t dtfi_queue; + dispatch_thread_frame_t dtfi_frame; +} *dispatch_thread_frame_iterator_t; + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_thread_frame_iterate_start(dispatch_thread_frame_iterator_t it) +{ + _dispatch_thread_getspecific_pair( + dispatch_queue_key, (void **)&it->dtfi_queue, + dispatch_frame_key, (void **)&it->dtfi_frame); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_thread_frame_iterate_next(dispatch_thread_frame_iterator_t it) +{ + dispatch_thread_frame_t dtf = it->dtfi_frame; + dispatch_queue_t dq = it->dtfi_queue; + + if (dtf) { + if (dq->do_targetq) { + // redirections and trysync_f may skip some frames, + // so we need to simulate seeing the missing links + // however the bottom root queue is always present + it->dtfi_queue = dq->do_targetq; + if (it->dtfi_queue == dtf->dtf_queue) { + it->dtfi_frame = dtf->dtf_prev; + } + } else { + it->dtfi_queue = dtf->dtf_queue; + it->dtfi_frame = dtf->dtf_prev; + } + } else if (dq) { + it->dtfi_queue = dq->do_targetq; + } +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_thread_frame_find_queue(dispatch_queue_t dq) +{ + struct dispatch_thread_frame_iterator_s it; + + _dispatch_thread_frame_iterate_start(&it); + while (it.dtfi_queue) { + if (it.dtfi_queue == dq) { + return true; + } + _dispatch_thread_frame_iterate_next(&it); + } + return false; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_thread_frame_t +_dispatch_thread_frame_get_current(void) +{ + return _dispatch_thread_getspecific(dispatch_frame_key); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_thread_frame_set_current(dispatch_thread_frame_t dtf) +{ + _dispatch_thread_setspecific(dispatch_frame_key, dtf); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_thread_frame_save_state(dispatch_thread_frame_t dtf) +{ + _dispatch_thread_getspecific_packed_pair( + dispatch_queue_key, dispatch_frame_key, (void **)&dtf->dtf_queue); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_thread_frame_push(dispatch_thread_frame_t dtf, dispatch_queue_t dq) +{ + _dispatch_thread_frame_save_state(dtf); + _dispatch_thread_setspecific_pair(dispatch_queue_key, dq, + dispatch_frame_key, dtf); + dtf->dtf_deferred = NULL; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_thread_frame_push_and_rebase(dispatch_thread_frame_t dtf, + dispatch_queue_t dq, dispatch_thread_frame_t new_base) +{ + _dispatch_thread_frame_save_state(dtf); + _dispatch_thread_setspecific_pair(dispatch_queue_key, dq, + dispatch_frame_key, new_base); + dtf->dtf_deferred = NULL; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_thread_frame_pop(dispatch_thread_frame_t dtf) +{ + _dispatch_thread_setspecific_packed_pair( + dispatch_queue_key, dispatch_frame_key, (void **)&dtf->dtf_queue); +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_t +_dispatch_thread_frame_stash(dispatch_thread_frame_t dtf) +{ + _dispatch_thread_getspecific_pair( + dispatch_queue_key, (void **)&dtf->dtf_queue, + dispatch_frame_key, (void **)&dtf->dtf_prev); + _dispatch_thread_frame_pop(dtf->dtf_prev); + return dtf->dtf_queue; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_thread_frame_unstash(dispatch_thread_frame_t dtf) +{ + _dispatch_thread_frame_pop(dtf); +} + +DISPATCH_ALWAYS_INLINE +static inline int +_dispatch_wqthread_override_start_check_owner(mach_port_t thread, + pthread_priority_t pp, mach_port_t *ulock_addr) +{ +#if HAVE_PTHREAD_WORKQUEUE_QOS + if (!_dispatch_set_qos_class_enabled) return 0; + return _pthread_workqueue_override_start_direct_check_owner(thread, + pp, ulock_addr); +#else + (void)thread; (void)pp; (void)ulock_addr; + return 0; +#endif +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_wqthread_override_start(mach_port_t thread, + pthread_priority_t pp) +{ +#if HAVE_PTHREAD_WORKQUEUE_QOS + if (!_dispatch_set_qos_class_enabled) return; + (void)_pthread_workqueue_override_start_direct(thread, pp); +#else + (void)thread; (void)pp; +#endif +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_wqthread_override_reset(void) +{ +#if HAVE_PTHREAD_WORKQUEUE_QOS + if (!_dispatch_set_qos_class_enabled) return; + (void)_pthread_workqueue_override_reset(); +#endif +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_thread_override_start(mach_port_t thread, pthread_priority_t pp, + void *resource) +{ +#if HAVE_PTHREAD_WORKQUEUE_QOS + if (!_dispatch_set_qos_class_enabled) return; + (void)_pthread_qos_override_start_direct(thread, pp, resource); +#else + (void)thread; (void)pp; (void)resource; +#endif +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_thread_override_end(mach_port_t thread, void *resource) +{ +#if HAVE_PTHREAD_WORKQUEUE_QOS + if (!_dispatch_set_qos_class_enabled) return; + (void)_pthread_qos_override_end_direct(thread, resource); +#else + (void)thread; (void)resource; +#endif +} + +#if DISPATCH_DEBUG_QOS && HAVE_PTHREAD_WORKQUEUE_QOS +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_qos_class_is_valid(pthread_priority_t pp) +{ + pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; + if (pp > (1UL << (DISPATCH_QUEUE_QOS_COUNT + + _PTHREAD_PRIORITY_QOS_CLASS_SHIFT))) { + return false; + } + return true; +} +#define _dispatch_assert_is_valid_qos_class(pp) ({ typeof(pp) _pp = (pp); \ + if (unlikely(!_dispatch_qos_class_is_valid(_pp))) { \ + DISPATCH_INTERNAL_CRASH(_pp, "Invalid qos class"); \ + } \ + }) + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_qos_override_is_valid(pthread_priority_t pp) +{ + if (pp & (pthread_priority_t)_PTHREAD_PRIORITY_FLAGS_MASK) { + return false; + } + return _dispatch_qos_class_is_valid(pp); +} +#define _dispatch_assert_is_valid_qos_override(pp) ({ typeof(pp) _pp = (pp); \ + if (unlikely(!_dispatch_qos_override_is_valid(_pp))) { \ + DISPATCH_INTERNAL_CRASH(_pp, "Invalid override"); \ + } \ + }) +#else +#define _dispatch_assert_is_valid_qos_override(pp) (void)(pp) +#define _dispatch_assert_is_valid_qos_class(pp) (void)(pp) +#endif + +#endif // DISPATCH_PURE_C +#pragma mark - +#pragma mark dispatch_queue_t state accessors +#if DISPATCH_PURE_C + +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_flags_t +_dispatch_queue_atomic_flags(dispatch_queue_t dq) +{ + return os_atomic_load2o(dq, dq_atomic_flags, relaxed); +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_flags_t +_dispatch_queue_atomic_flags_set(dispatch_queue_t dq, + dispatch_queue_flags_t bits) +{ + return os_atomic_or2o(dq, dq_atomic_flags, bits, relaxed); +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_flags_t +_dispatch_queue_atomic_flags_set_and_clear_orig(dispatch_queue_t dq, + dispatch_queue_flags_t add_bits, dispatch_queue_flags_t clr_bits) +{ + dispatch_queue_flags_t oflags, nflags; + os_atomic_rmw_loop2o(dq, dq_atomic_flags, oflags, nflags, relaxed, { + nflags = (oflags | add_bits) & ~clr_bits; + }); + return oflags; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_flags_t +_dispatch_queue_atomic_flags_set_and_clear(dispatch_queue_t dq, + dispatch_queue_flags_t add_bits, dispatch_queue_flags_t clr_bits) +{ + dispatch_queue_flags_t oflags, nflags; + os_atomic_rmw_loop2o(dq, dq_atomic_flags, oflags, nflags, relaxed, { + nflags = (oflags | add_bits) & ~clr_bits; + }); + return nflags; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_flags_t +_dispatch_queue_atomic_flags_set_orig(dispatch_queue_t dq, + dispatch_queue_flags_t bits) +{ + return os_atomic_or_orig2o(dq, dq_atomic_flags, bits, relaxed); +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_flags_t +_dispatch_queue_atomic_flags_clear(dispatch_queue_t dq, + dispatch_queue_flags_t bits) +{ + return os_atomic_and2o(dq, dq_atomic_flags, ~bits, relaxed); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_queue_is_thread_bound(dispatch_queue_t dq) +{ + return _dispatch_queue_atomic_flags(dq) & DQF_THREAD_BOUND; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_queue_cannot_trysync(dispatch_queue_t dq) +{ + return _dispatch_queue_atomic_flags(dq) & DQF_CANNOT_TRYSYNC; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_queue_label_needs_free(dispatch_queue_t dq) +{ + return _dispatch_queue_atomic_flags(dq) & DQF_LABEL_NEEDS_FREE; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_invoke_flags_t +_dispatch_queue_autorelease_frequency(dispatch_queue_t dq) +{ + const unsigned long factor = + DISPATCH_INVOKE_AUTORELEASE_ALWAYS / DQF_AUTORELEASE_ALWAYS; + dispatch_static_assert(factor > 0); + + dispatch_queue_flags_t qaf = _dispatch_queue_atomic_flags(dq); + + qaf &= _DQF_AUTORELEASE_MASK; + return (dispatch_invoke_flags_t)qaf * factor; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_invoke_flags_t +_dispatch_queue_merge_autorelease_frequency(dispatch_queue_t dq, + dispatch_invoke_flags_t flags) +{ + dispatch_invoke_flags_t qaf = _dispatch_queue_autorelease_frequency(dq); + + if (qaf) { + flags &= ~_DISPATCH_INVOKE_AUTORELEASE_MASK; + flags |= qaf; + } + return flags; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_queue_has_immutable_target(dispatch_queue_t dq) +{ + if (dx_metatype(dq) != _DISPATCH_QUEUE_TYPE) { + return false; + } + return dx_type(dq) != DISPATCH_QUEUE_LEGACY_TYPE; +} + +#endif // DISPATCH_PURE_C +#ifndef __cplusplus + +DISPATCH_ALWAYS_INLINE +static inline uint32_t +_dq_state_suspend_cnt(uint64_t dq_state) +{ + return (uint32_t)(dq_state / DISPATCH_QUEUE_SUSPEND_INTERVAL); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_has_side_suspend_cnt(uint64_t dq_state) +{ + return dq_state & DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT; +} + +DISPATCH_ALWAYS_INLINE +static inline uint32_t +_dq_state_extract_width_bits(uint64_t dq_state) +{ + dq_state &= DISPATCH_QUEUE_WIDTH_MASK; + return (uint32_t)(dq_state >> DISPATCH_QUEUE_WIDTH_SHIFT); +} + +DISPATCH_ALWAYS_INLINE +static inline uint32_t +_dq_state_available_width(uint64_t dq_state) +{ + uint32_t full = DISPATCH_QUEUE_WIDTH_FULL; + if (fastpath(!(dq_state & DISPATCH_QUEUE_WIDTH_FULL_BIT))) { + return full - _dq_state_extract_width_bits(dq_state); + } + return 0; +} + +DISPATCH_ALWAYS_INLINE +static inline uint32_t +_dq_state_used_width(uint64_t dq_state, uint16_t dq_width) +{ + uint32_t full = DISPATCH_QUEUE_WIDTH_FULL; + uint32_t width = _dq_state_extract_width_bits(dq_state); + + if (dq_state & DISPATCH_QUEUE_PENDING_BARRIER) { + // DISPATCH_QUEUE_PENDING_BARRIER means (dq_width - 1) of the used width + // is pre-reservation that we want to ignore + return width - (full - dq_width) - (dq_width - 1); + } + return width - (full - dq_width); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_is_suspended(uint64_t dq_state) +{ + return dq_state >= DISPATCH_QUEUE_NEEDS_ACTIVATION; +} +#define DISPATCH_QUEUE_IS_SUSPENDED(x) _dq_state_is_suspended((x)->dq_state) + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_is_inactive(uint64_t dq_state) +{ + return dq_state & DISPATCH_QUEUE_INACTIVE; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_needs_activation(uint64_t dq_state) +{ + return dq_state & DISPATCH_QUEUE_NEEDS_ACTIVATION; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_is_in_barrier(uint64_t dq_state) +{ + return dq_state & DISPATCH_QUEUE_IN_BARRIER; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_has_available_width(uint64_t dq_state) +{ + return !(dq_state & DISPATCH_QUEUE_WIDTH_FULL_BIT); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_has_pending_barrier(uint64_t dq_state) +{ + return dq_state & DISPATCH_QUEUE_PENDING_BARRIER; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_is_dirty(uint64_t dq_state) +{ + return dq_state & DISPATCH_QUEUE_DIRTY; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_is_enqueued(uint64_t dq_state) +{ + return dq_state & DISPATCH_QUEUE_ENQUEUED; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_has_override(uint64_t dq_state) +{ + return dq_state & DISPATCH_QUEUE_HAS_OVERRIDE; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_lock_owner +_dq_state_drain_owner(uint64_t dq_state) +{ + return _dispatch_lock_owner((dispatch_lock)dq_state); +} +#define DISPATCH_QUEUE_DRAIN_OWNER(dq) \ + _dq_state_drain_owner(os_atomic_load2o(dq, dq_state, relaxed)) + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_drain_pended(uint64_t dq_state) +{ + return (dq_state & DISPATCH_QUEUE_DRAIN_PENDED); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_drain_locked_by(uint64_t dq_state, uint32_t owner) +{ + if (_dq_state_drain_pended(dq_state)) { + return false; + } + return _dq_state_drain_owner(dq_state) == owner; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_drain_locked(uint64_t dq_state) +{ + return (dq_state & DISPATCH_QUEUE_DRAIN_OWNER_MASK) != 0; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_has_waiters(uint64_t dq_state) +{ + return _dispatch_lock_has_waiters((dispatch_lock)dq_state); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_is_sync_runnable(uint64_t dq_state) +{ + return dq_state < DISPATCH_QUEUE_IN_BARRIER; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_is_runnable(uint64_t dq_state) +{ + return dq_state < DISPATCH_QUEUE_WIDTH_FULL_BIT; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_should_wakeup(uint64_t dq_state) +{ + return _dq_state_is_runnable(dq_state) && + !_dq_state_is_enqueued(dq_state) && + !_dq_state_drain_locked(dq_state); +} + +#endif // __cplusplus +#pragma mark - +#pragma mark dispatch_queue_t state machine +#ifndef __cplusplus + +static inline bool _dispatch_queue_need_override(dispatch_queue_class_t dqu, + pthread_priority_t pp); +static inline bool _dispatch_queue_need_override_retain( + dispatch_queue_class_t dqu, pthread_priority_t pp); +static inline dispatch_priority_t _dispatch_queue_reset_override_priority( + dispatch_queue_class_t dqu, bool qp_is_floor); +static inline bool _dispatch_queue_reinstate_override_priority(dispatch_queue_class_t dqu, + dispatch_priority_t new_op); +static inline pthread_priority_t _dispatch_get_defaultpriority(void); +static inline void _dispatch_set_defaultpriority_override(void); +static inline void _dispatch_reset_defaultpriority(pthread_priority_t pp); +static inline pthread_priority_t _dispatch_get_priority(void); +static inline pthread_priority_t _dispatch_set_defaultpriority( + pthread_priority_t pp, pthread_priority_t *new_pp); + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_xref_dispose(struct dispatch_queue_s *dq) +{ + if (slowpath(DISPATCH_QUEUE_IS_SUSPENDED(dq))) { + // Arguments for and against this assert are within 6705399 + DISPATCH_CLIENT_CRASH(dq, "Release of a suspended object"); + } + os_atomic_or2o(dq, dq_atomic_flags, DQF_RELEASED, relaxed); +} + +#endif +#if DISPATCH_PURE_C + +// Note to later developers: ensure that any initialization changes are +// made for statically allocated queues (i.e. _dispatch_main_q). +static inline void +_dispatch_queue_init(dispatch_queue_t dq, dispatch_queue_flags_t dqf, + uint16_t width, bool inactive) +{ + uint64_t dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(width); + + if (inactive) { + dq_state += DISPATCH_QUEUE_INACTIVE + DISPATCH_QUEUE_NEEDS_ACTIVATION; + dq->do_ref_cnt++; // rdar://8181908 see _dispatch_queue_resume + } + dq->do_next = (struct dispatch_queue_s *)DISPATCH_OBJECT_LISTLESS; + dqf |= (dispatch_queue_flags_t)width << DQF_WIDTH_SHIFT; + os_atomic_store2o(dq, dq_atomic_flags, dqf, relaxed); + dq->dq_state = dq_state; + dq->dq_override_voucher = DISPATCH_NO_VOUCHER; + dq->dq_serialnum = + os_atomic_inc_orig(&_dispatch_queue_serial_numbers, relaxed); +} + +/* Used by: + * - _dispatch_queue_set_target_queue + * - changing dispatch source handlers + * + * Tries to prevent concurrent wakeup of an inactive queue by suspending it. + */ +DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT +static inline bool +_dispatch_queue_try_inactive_suspend(dispatch_queue_t dq) +{ + uint64_t dq_state, value; + + (void)os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, { + if (!fastpath(_dq_state_is_inactive(dq_state))) { + os_atomic_rmw_loop_give_up(return false); + } + value = dq_state + DISPATCH_QUEUE_SUSPEND_INTERVAL; + }); + if (slowpath(!_dq_state_is_suspended(dq_state)) || + slowpath(_dq_state_has_side_suspend_cnt(dq_state))) { + // Crashing here means that 128+ dispatch_suspend() calls have been + // made on an inactive object and then dispatch_set_target_queue() or + // dispatch_set_*_handler() has been called. + // + // We don't want to handle the side suspend count in a codepath that + // needs to be fast. + DISPATCH_CLIENT_CRASH(dq, "Too many calls to dispatch_suspend() " + "prior to calling dispatch_set_target_queue() " + "or dispatch_set_*_handler()"); + } + return true; +} + +/* Must be used by any caller meaning to do a speculative wakeup when the caller + * was preventing other wakeups (for example dispatch_resume() or a drainer not + * doing a drain_try_unlock() and not observing DIRTY) + * + * In that case this call loads DIRTY with an acquire barrier so that when + * other threads have made changes (such as dispatch_source_cancel()) the + * caller can take these state machine changes into account in its decision to + * wake up the object. + */ +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_try_wakeup(dispatch_queue_t dq, uint64_t dq_state, + dispatch_wakeup_flags_t flags) +{ + if (_dq_state_should_wakeup(dq_state)) { + if (slowpath(_dq_state_is_dirty(dq_state))) { + // + // seq_cst wrt state changes that were flushed and not acted upon + os_atomic_thread_fence(acquire); + } + return dx_wakeup(dq, 0, flags); + } + if (flags & DISPATCH_WAKEUP_CONSUME) { + return _dispatch_release_tailcall(dq); + } +} + +/* Used by: + * - _dispatch_queue_class_invoke (normal path) + * - _dispatch_queue_override_invoke (stealer) + * + * Initial state must be { sc:0, ib:0, qf:0, dl:0 } + * Final state forces { dl:self, qf:1, d: 0 } + * ib:1 is forced when the width acquired is equivalent to the barrier width + */ +DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT +static inline uint64_t +_dispatch_queue_drain_try_lock(dispatch_queue_t dq, + dispatch_invoke_flags_t flags, uint64_t *dq_state) +{ + uint64_t pending_barrier_width = + (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL; + uint64_t xor_owner_and_set_full_width = + _dispatch_tid_self() | DISPATCH_QUEUE_WIDTH_FULL_BIT; + uint64_t clear_enqueued_bit, old_state, new_state; + + if (flags & DISPATCH_INVOKE_STEALING) { + clear_enqueued_bit = 0; + } else { + clear_enqueued_bit = DISPATCH_QUEUE_ENQUEUED; + } + + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, { + new_state = old_state; + new_state ^= clear_enqueued_bit; + if (likely(_dq_state_is_runnable(old_state) && + !_dq_state_drain_locked(old_state))) { + // + // Only keep the HAS_WAITER bit (and ENQUEUED if stealing). + // In particular acquiring the drain lock clears the DIRTY bit + // + new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK; + // + // For the NOWAITERS_BIT case, the thread identity + // has NOWAITERS_BIT set, and NOWAITERS_BIT was kept above, + // so the xor below flips the NOWAITERS_BIT to 0 as expected. + // + // For the non inverted WAITERS_BIT case, WAITERS_BIT is not set in + // the thread identity, and the xor leaves the bit alone. + // + new_state ^= xor_owner_and_set_full_width; + if (_dq_state_has_pending_barrier(old_state) || + old_state + pending_barrier_width < + DISPATCH_QUEUE_WIDTH_FULL_BIT) { + new_state |= DISPATCH_QUEUE_IN_BARRIER; + } + } else if (!clear_enqueued_bit) { + os_atomic_rmw_loop_give_up(break); + } + }); + + if (dq_state) *dq_state = new_state; + if (likely(_dq_state_is_runnable(old_state) && + !_dq_state_drain_locked(old_state))) { + new_state &= DISPATCH_QUEUE_IN_BARRIER | DISPATCH_QUEUE_WIDTH_FULL_BIT; + old_state &= DISPATCH_QUEUE_WIDTH_MASK; + return new_state - old_state; + } + return 0; +} + +/* Used by _dispatch_barrier_{try,}sync + * + * Note, this fails if any of e:1 or dl!=0, but that allows this code to be a + * simple cmpxchg which is significantly faster on Intel, and makes a + * significant difference on the uncontended codepath. + * + * See discussion for DISPATCH_QUEUE_DIRTY in queue_internal.h + * + * Initial state must be `completely idle` + * Final state forces { ib:1, qf:1, w:0 } + */ +DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT +static inline bool +_dispatch_queue_try_acquire_barrier_sync(dispatch_queue_t dq) +{ + uint64_t value = DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER; + value |= _dispatch_tid_self(); + + return os_atomic_cmpxchg2o(dq, dq_state, + DISPATCH_QUEUE_STATE_INIT_VALUE(dq->dq_width), value, acquire); +} + +/* Used by _dispatch_sync for root queues and some drain codepaths + * + * Root queues have no strict orderning and dispatch_sync() always goes through. + * Drain is the sole setter of `dl` hence can use this non failing version of + * _dispatch_queue_try_acquire_sync(). + * + * Final state: { w += 1 } + */ +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_reserve_sync_width(dispatch_queue_t dq) +{ + (void)os_atomic_add2o(dq, dq_state, + DISPATCH_QUEUE_WIDTH_INTERVAL, relaxed); +} + +/* Used by _dispatch_sync on non-serial queues + * + * Initial state must be { sc:0, ib:0, pb:0, d:0 } + * Final state: { w += 1 } + */ +DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT +static inline bool +_dispatch_queue_try_reserve_sync_width(dispatch_queue_t dq) +{ + uint64_t dq_state, value; + + return os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, { + if (!fastpath(_dq_state_is_sync_runnable(dq_state)) || + slowpath(_dq_state_is_dirty(dq_state)) || + slowpath(_dq_state_has_pending_barrier(dq_state))) { + os_atomic_rmw_loop_give_up(return false); + } + value = dq_state + DISPATCH_QUEUE_WIDTH_INTERVAL; + }); +} + +/* Used by _dispatch_apply_redirect + * + * Try to acquire at most da_width and returns what could be acquired, + * possibly 0 + */ +DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT +static inline uint32_t +_dispatch_queue_try_reserve_apply_width(dispatch_queue_t dq, uint32_t da_width) +{ + uint64_t dq_state, value; + uint32_t width; + + (void)os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, { + width = _dq_state_available_width(dq_state); + if (!fastpath(width)) { + os_atomic_rmw_loop_give_up(return 0); + } + if (width > da_width) { + width = da_width; + } + value = dq_state + width * DISPATCH_QUEUE_WIDTH_INTERVAL; + }); + return width; +} + +/* Used by _dispatch_apply_redirect + * + * Release width acquired by _dispatch_queue_try_acquire_width + */ +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_relinquish_width(dispatch_queue_t dq, uint32_t da_width) +{ + (void)os_atomic_sub2o(dq, dq_state, + da_width * DISPATCH_QUEUE_WIDTH_INTERVAL, relaxed); +} + +/* Used by target-queue recursing code + * + * Initial state must be { sc:0, ib:0, qf:0, pb:0, d:0 } + * Final state: { w += 1 } + */ +DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT +static inline bool +_dispatch_queue_try_acquire_async(dispatch_queue_t dq) +{ + uint64_t dq_state, value; + + return os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, acquire, { + if (!fastpath(_dq_state_is_runnable(dq_state)) || + slowpath(_dq_state_is_dirty(dq_state)) || + slowpath(_dq_state_has_pending_barrier(dq_state))) { + os_atomic_rmw_loop_give_up(return false); + } + value = dq_state + DISPATCH_QUEUE_WIDTH_INTERVAL; + }); +} + +/* Used at the end of Drainers + * + * This adjusts the `owned` width when the next continuation is already known + * to account for its barrierness. + */ +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_dispatch_queue_adjust_owned(dispatch_queue_t dq, uint64_t owned, + struct dispatch_object_s *next_dc) +{ + uint64_t reservation; + + if (slowpath(dq->dq_width > 1)) { + if (next_dc && _dispatch_object_is_barrier(next_dc)) { + reservation = DISPATCH_QUEUE_PENDING_BARRIER; + reservation += (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL; + owned -= reservation; + } + } + return owned; +} + +/* Used at the end of Drainers + * + * Unlocking fails if the DIRTY bit is seen (and the queue is not suspended). + * In that case, only the DIRTY bit is cleared. The DIRTY bit is therefore used + * as a signal to renew the drain lock instead of releasing it. + * + * Successful unlock forces { dl:0, d:0, qo:0 } and gives back `owned` + */ +DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT +static inline bool +_dispatch_queue_drain_try_unlock(dispatch_queue_t dq, uint64_t owned) +{ + uint64_t old_state = os_atomic_load2o(dq, dq_state, relaxed); + uint64_t new_state; + dispatch_priority_t pp = 0, op; + + do { + if (unlikely(_dq_state_is_dirty(old_state) && + !_dq_state_is_suspended(old_state))) { + // just renew the drain lock with an acquire barrier, to see + // what the enqueuer that set DIRTY has done. + os_atomic_and2o(dq, dq_state, ~DISPATCH_QUEUE_DIRTY, acquire); + _dispatch_queue_reinstate_override_priority(dq, pp); + return false; + } + new_state = old_state - owned; + if ((new_state & DISPATCH_QUEUE_WIDTH_FULL_BIT) || + _dq_state_is_suspended(old_state)) { + // the test for the WIDTH_FULL_BIT is about narrow concurrent queues + // releasing the drain lock while being at the width limit + // + // _non_barrier_complete() will set the DIRTY bit when going back + // under the limit which will cause the try_unlock to fail + new_state = DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(new_state); + } else { + new_state &= ~DISPATCH_QUEUE_DIRTY; + new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; + // This current owner is the only one that can clear HAS_OVERRIDE, + // so accumulating reset overrides here is valid. + if (unlikely(_dq_state_has_override(new_state))) { + new_state &= ~DISPATCH_QUEUE_HAS_OVERRIDE; + dispatch_assert(!_dispatch_queue_is_thread_bound(dq)); + op = _dispatch_queue_reset_override_priority(dq, false); + if (op > pp) pp = op; + } + } + } while (!fastpath(os_atomic_cmpxchgvw2o(dq, dq_state, + old_state, new_state, &old_state, release))); + + if (_dq_state_has_override(old_state)) { + // Ensure that the root queue sees that this thread was overridden. + _dispatch_set_defaultpriority_override(); + } + return true; +} + +/* Used at the end of Drainers when the next work item is known + * and that the dirty-head check isn't needed. + * + * This releases `owned`, clears DIRTY, and handles HAS_OVERRIDE when seen. + */ +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_dispatch_queue_drain_lock_transfer_or_unlock(dispatch_queue_t dq, + uint64_t owned, mach_port_t next_owner, uint64_t *orig_state) +{ + uint64_t dq_state, value; + +#ifdef DLOCK_NOWAITERS_BIT + // The NOWAITERS_BIT state must not change through the transfer. It means + // that if next_owner is 0 the bit must be flipped in the rmw_loop below, + // and if next_owner is set, then the bit must be left unchanged. + // + // - when next_owner is 0, the xor below sets NOWAITERS_BIT in next_owner, + // which causes the second xor to flip the bit as expected. + // - if next_owner is not 0, it has the NOWAITERS_BIT set, so we have to + // clear it so that the second xor leaves the NOWAITERS_BIT alone. + next_owner ^= DLOCK_NOWAITERS_BIT; +#endif + os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, release, { + value = dq_state - owned; + // same as DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT + // but we want to be more efficient wrt the WAITERS_BIT + value &= ~DISPATCH_QUEUE_DRAIN_OWNER_MASK; + value &= ~DISPATCH_QUEUE_DRAIN_PENDED; + value &= ~DISPATCH_QUEUE_DIRTY; + value ^= next_owner; + }); + + if (_dq_state_has_override(dq_state)) { + // Ensure that the root queue sees that this thread was overridden. + _dispatch_set_defaultpriority_override(); + } + if (orig_state) *orig_state = dq_state; + return value; +} +#define _dispatch_queue_drain_unlock(dq, owned, orig) \ + _dispatch_queue_drain_lock_transfer_or_unlock(dq, owned, 0, orig) + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_drain_transfer_lock(dispatch_queue_t dq, + uint64_t to_unlock, dispatch_object_t dou) +{ + mach_port_t th_next = 0; + if (dou._dc->dc_flags & DISPATCH_OBJ_BARRIER_BIT) { + th_next = (mach_port_t)dou._dc->dc_data; + } + _dispatch_queue_drain_lock_transfer_or_unlock(dq, to_unlock, th_next, NULL); +} + + #pragma mark - -#pragma mark dispatch_thread +#pragma mark os_mpsc_queue + +// type_t * {volatile,const,_Atomic,...} -> type_t * +// type_t[] -> type_t * +#define os_unqualified_pointer_type(expr) \ + typeof(typeof(*(expr)) *) + +#define os_mpsc_node_type(q, _ns) \ + os_unqualified_pointer_type((q)->_ns##_head) + +// +// Multi Producer calls, can be used safely concurrently +// + +// Returns true when the queue was empty and the head must be set +#define os_mpsc_push_update_tail_list(q, _ns, head, tail, _o_next) ({ \ + os_mpsc_node_type(q, _ns) _head = (head), _tail = (tail), _prev; \ + _tail->_o_next = NULL; \ + _prev = os_atomic_xchg2o((q), _ns##_tail, _tail, release); \ + if (fastpath(_prev)) { \ + os_atomic_store2o(_prev, _o_next, _head, relaxed); \ + } \ + (_prev == NULL); \ + }) + +// Returns true when the queue was empty and the head must be set +#define os_mpsc_push_update_tail(q, _ns, o, _o_next) ({ \ + os_mpsc_node_type(q, _ns) _o = (o); \ + os_mpsc_push_update_tail_list(q, _ns, _o, _o, _o_next); \ + }) + +#define os_mpsc_push_update_head(q, _ns, o) ({ \ + os_atomic_store2o((q), _ns##_head, o, relaxed); \ + }) + +// +// Single Consumer calls, can NOT be used safely concurrently +// + +#define os_mpsc_get_head(q, _ns) ({ \ + os_mpsc_node_type(q, _ns) _head; \ + _dispatch_wait_until(_head = (q)->_ns##_head); \ + _head; \ + }) + +#define os_mpsc_pop_head(q, _ns, head, _o_next) ({ \ + typeof(q) _q = (q); \ + os_mpsc_node_type(_q, _ns) _head = (head), _n = fastpath(_head->_o_next); \ + os_atomic_store2o(_q, _ns##_head, _n, relaxed); \ + /* 22708742: set tail to NULL with release, so that NULL write */ \ + /* to head above doesn't clobber head from concurrent enqueuer */ \ + if (!_n && !os_atomic_cmpxchg2o(_q, _ns##_tail, _head, NULL, release)) { \ + _dispatch_wait_until(_n = fastpath(_head->_o_next)); \ + os_atomic_store2o(_q, _ns##_head, _n, relaxed); \ + } \ + _n; \ + }) + +#define os_mpsc_undo_pop_head(q, _ns, head, next, _o_next) ({ \ + typeof(q) _q = (q); \ + os_mpsc_node_type(_q, _ns) _head = (head), _n = (next); \ + if (!_n && !os_atomic_cmpxchg2o(_q, _ns##_tail, NULL, _head, relaxed)) { \ + _dispatch_wait_until(_n = _q->_ns##_head); \ + _head->_o_next = _n; \ + } \ + os_atomic_store2o(_q, _ns##_head, _head, relaxed); \ + }) + +#define os_mpsc_capture_snapshot(q, _ns, tail) ({ \ + typeof(q) _q = (q); \ + os_mpsc_node_type(_q, _ns) _head; \ + _dispatch_wait_until(_head = _q->_ns##_head); \ + os_atomic_store2o(_q, _ns##_head, NULL, relaxed); \ + /* 22708742: set tail to NULL with release, so that NULL write */ \ + /* to head above doesn't clobber head from concurrent enqueuer */ \ + *(tail) = os_atomic_xchg2o(_q, _ns##_tail, NULL, release); \ + _head; \ + }) + +#define os_mpsc_pop_snapshot_head(head, tail, _o_next) ({ \ + os_unqualified_pointer_type(head) _head = (head), _n = NULL; \ + if (_head != (tail)) { \ + _dispatch_wait_until(_n = _head->_o_next); \ + }; \ + _n; }) + +#define os_mpsc_prepend(q, _ns, head, tail, _o_next) ({ \ + typeof(q) _q = (q); \ + os_mpsc_node_type(_q, _ns) _head = (head), _tail = (tail), _n; \ + _tail->_o_next = NULL; \ + if (!os_atomic_cmpxchg2o(_q, _ns##_tail, NULL, _tail, release)) { \ + _dispatch_wait_until(_n = _q->_ns##_head); \ + _tail->_o_next = _n; \ + } \ + os_atomic_store2o(_q, _ns##_head, _head, relaxed); \ + }) + +#pragma mark - +#pragma mark dispatch_queue_t tq lock DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_wqthread_override_start(mach_port_t thread, - pthread_priority_t priority) +static inline bool +_dispatch_queue_sidelock_trylock(dispatch_queue_t dq, pthread_priority_t pp) { -#if HAVE_PTHREAD_WORKQUEUE_QOS - if (!_dispatch_set_qos_class_enabled) return; - (void)_pthread_workqueue_override_start_direct(thread, priority); -#else - (void)thread; (void)priority; -#endif + dispatch_lock_owner owner; + if (_dispatch_unfair_lock_trylock(&dq->dq_sidelock, &owner)) { + return true; + } + _dispatch_wqthread_override_start_check_owner(owner, pp, + &dq->dq_sidelock.dul_lock); + return false; } DISPATCH_ALWAYS_INLINE static inline void -_dispatch_wqthread_override_reset(void) +_dispatch_queue_sidelock_lock(dispatch_queue_t dq) { -#if HAVE_PTHREAD_WORKQUEUE_QOS - if (!_dispatch_set_qos_class_enabled) return; - (void)_pthread_workqueue_override_reset(); -#endif + return _dispatch_unfair_lock_lock(&dq->dq_sidelock); } DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_thread_override_start(mach_port_t thread, pthread_priority_t priority) +static inline bool +_dispatch_queue_sidelock_tryunlock(dispatch_queue_t dq) { -#if HAVE_PTHREAD_WORKQUEUE_QOS - if (!_dispatch_set_qos_class_enabled) return; - (void)_pthread_override_qos_class_start_direct(thread, priority); -#else - (void)thread; (void)priority; -#endif + if (_dispatch_unfair_lock_tryunlock(&dq->dq_sidelock)) { + return true; + } + // Ensure that the root queue sees that this thread was overridden. + _dispatch_set_defaultpriority_override(); + return false; } DISPATCH_ALWAYS_INLINE static inline void -_dispatch_thread_override_end(mach_port_t thread) +_dispatch_queue_sidelock_unlock(dispatch_queue_t dq) { -#if HAVE_PTHREAD_WORKQUEUE_QOS - if (!_dispatch_set_qos_class_enabled) return; - (void)_pthread_override_qos_class_end_direct(thread); -#else - (void)thread; -#endif + if (_dispatch_unfair_lock_unlock_had_failed_trylock(&dq->dq_sidelock)) { + // Ensure that the root queue sees that this thread was overridden. + _dispatch_set_defaultpriority_override(); + } } #pragma mark - -#pragma mark dispatch_queue_t - -static inline bool _dispatch_queue_need_override(dispatch_queue_t dq, - pthread_priority_t pp); -static inline bool _dispatch_queue_need_override_retain(dispatch_queue_t dq, - pthread_priority_t pp); -static inline bool _dispatch_queue_retain_if_override(dispatch_queue_t dq, - pthread_priority_t pp); -static inline pthread_priority_t _dispatch_queue_get_override_priority( - dispatch_queue_t dq); -static inline pthread_priority_t _dispatch_queue_reset_override_priority( - dispatch_queue_t dq); -static inline pthread_priority_t _dispatch_get_defaultpriority(void); -static inline void _dispatch_set_defaultpriority_override(void); -static inline void _dispatch_reset_defaultpriority(pthread_priority_t priority); -static inline pthread_priority_t _dispatch_get_priority(void); -static inline void _dispatch_set_priority(pthread_priority_t priority); +#pragma mark dispatch_queue_t misc DISPATCH_ALWAYS_INLINE static inline dispatch_queue_t @@ -198,238 +1429,325 @@ _dispatch_queue_get_current(void) DISPATCH_ALWAYS_INLINE static inline void -_dispatch_queue_set_thread(dispatch_queue_t dq) +_dispatch_queue_set_current(dispatch_queue_t dq) { - // The manager queue uses dispatch_queue_drain but is thread bound - if (!dq->dq_is_thread_bound) { - dq->dq_thread = _dispatch_thread_port(); - } + _dispatch_thread_setspecific(dispatch_queue_key, dq); } DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_queue_clear_thread(dispatch_queue_t dq) +static inline struct dispatch_object_s* +_dispatch_queue_head(dispatch_queue_t dq) { - if (!dq->dq_is_thread_bound) { - dq->dq_thread = MACH_PORT_NULL; - } + return os_mpsc_get_head(dq, dq_items); +} + +DISPATCH_ALWAYS_INLINE +static inline struct dispatch_object_s* +_dispatch_queue_next(dispatch_queue_t dq, struct dispatch_object_s *dc) +{ + return os_mpsc_pop_head(dq, dq_items, dc, do_next); } DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_queue_push_list2(dispatch_queue_t dq, struct dispatch_object_s *head, +_dispatch_queue_push_update_tail(dispatch_queue_t dq, struct dispatch_object_s *tail) { - struct dispatch_object_s *prev; - tail->do_next = NULL; - prev = dispatch_atomic_xchg2o(dq, dq_items_tail, tail, release); - if (fastpath(prev)) { - // if we crash here with a value less than 0x1000, then we are at a - // known bug in client code for example, see _dispatch_queue_dispose - // or _dispatch_atfork_child - prev->do_next = head; - } - return (prev != NULL); + // if we crash here with a value less than 0x1000, then we are + // at a known bug in client code. for example, see + // _dispatch_queue_dispose or _dispatch_atfork_child + return os_mpsc_push_update_tail(dq, dq_items, tail, do_next); } DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head, - dispatch_object_t _tail, pthread_priority_t pp, unsigned int n) +static inline bool +_dispatch_queue_push_update_tail_list(dispatch_queue_t dq, + struct dispatch_object_s *head, struct dispatch_object_s *tail) { - struct dispatch_object_s *head = _head._do, *tail = _tail._do; - bool override = _dispatch_queue_need_override_retain(dq, pp); - if (!fastpath(_dispatch_queue_push_list2(dq, head, tail))) { - _dispatch_queue_push_list_slow(dq, pp, head, n, override); - } else if (override) { - _dispatch_queue_wakeup_with_qos_and_release(dq, pp); + // if we crash here with a value less than 0x1000, then we are + // at a known bug in client code. for example, see + // _dispatch_queue_dispose or _dispatch_atfork_child + return os_mpsc_push_update_tail_list(dq, dq_items, head, tail, do_next); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_push_update_head(dispatch_queue_t dq, + struct dispatch_object_s *head, bool retained) +{ + if (dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE) { + dispatch_assert(!retained); + // Lie about "retained" here, it generates better assembly in this + // hotpath, and _dispatch_root_queue_wakeup knows to ignore this + // fake "WAKEUP_CONSUME" bit when it also sees WAKEUP_FLUSH. + // + // We need to bypass the retain below because pthread root queues + // are not global and retaining them would be wrong. + // + // We should eventually have a typeflag for "POOL" kind of root queues. + retained = true; } + // The queue must be retained before dq_items_head is written in order + // to ensure that the reference is still valid when _dispatch_queue_wakeup + // is called. Otherwise, if preempted between the assignment to + // dq_items_head and _dispatch_queue_wakeup, the blocks submitted to the + // queue may release the last reference to the queue when invoked by + // _dispatch_queue_drain. + if (!retained) _dispatch_retain(dq); + os_mpsc_push_update_head(dq, dq_items, head); } DISPATCH_ALWAYS_INLINE static inline void -_dispatch_queue_push(dispatch_queue_t dq, dispatch_object_t _tail, - pthread_priority_t pp) +_dispatch_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head, + dispatch_object_t _tail, pthread_priority_t pp, unsigned int n) { - struct dispatch_object_s *tail = _tail._do; + struct dispatch_object_s *head = _head._do, *tail = _tail._do; bool override = _dispatch_queue_need_override_retain(dq, pp); - if (!fastpath(_dispatch_queue_push_list2(dq, tail, tail))) { - _dispatch_queue_push_slow(dq, pp, tail, override); + dispatch_queue_flags_t flags; + if (slowpath(_dispatch_queue_push_update_tail_list(dq, head, tail))) { + _dispatch_queue_push_update_head(dq, head, override); + if (fastpath(dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE)) { + return _dispatch_queue_push_list_slow(dq, n); + } + flags = DISPATCH_WAKEUP_CONSUME | DISPATCH_WAKEUP_FLUSH; } else if (override) { - _dispatch_queue_wakeup_with_qos_and_release(dq, pp); + flags = DISPATCH_WAKEUP_CONSUME | DISPATCH_WAKEUP_OVERRIDING; + } else { + return; } + dx_wakeup(dq, pp, flags); } DISPATCH_ALWAYS_INLINE static inline void -_dispatch_queue_push_wakeup(dispatch_queue_t dq, dispatch_object_t _tail, - pthread_priority_t pp, bool wakeup) +_dispatch_queue_push_inline(dispatch_queue_t dq, dispatch_object_t _tail, + pthread_priority_t pp, dispatch_wakeup_flags_t flags) { - // caller assumed to have a reference on dq struct dispatch_object_s *tail = _tail._do; - if (!fastpath(_dispatch_queue_push_list2(dq, tail, tail))) { - _dispatch_queue_push_slow(dq, pp, tail, false); - } else if (_dispatch_queue_need_override(dq, pp)) { - _dispatch_queue_wakeup_with_qos(dq, pp); - } else if (slowpath(wakeup)) { - _dispatch_queue_wakeup(dq); + bool override = _dispatch_queue_need_override(dq, pp); + if (flags & DISPATCH_WAKEUP_SLOW_WAITER) { + // when SLOW_WAITER is set, we borrow the reference of the caller + if (unlikely(_dispatch_queue_push_update_tail(dq, tail))) { + _dispatch_queue_push_update_head(dq, tail, true); + flags = DISPATCH_WAKEUP_SLOW_WAITER | DISPATCH_WAKEUP_FLUSH; + } else if (override) { + flags = DISPATCH_WAKEUP_SLOW_WAITER | DISPATCH_WAKEUP_OVERRIDING; + } else { + flags = DISPATCH_WAKEUP_SLOW_WAITER; + } + } else { + if (override) _dispatch_retain(dq); + if (unlikely(_dispatch_queue_push_update_tail(dq, tail))) { + _dispatch_queue_push_update_head(dq, tail, override); + flags = DISPATCH_WAKEUP_CONSUME | DISPATCH_WAKEUP_FLUSH; + } else if (override) { + flags = DISPATCH_WAKEUP_CONSUME | DISPATCH_WAKEUP_OVERRIDING; + } else { + return; + } } + return dx_wakeup(dq, pp, flags); } struct _dispatch_identity_s { - pthread_priority_t old_pri; pthread_priority_t old_pp; - dispatch_queue_t old_dq; }; DISPATCH_ALWAYS_INLINE static inline void _dispatch_root_queue_identity_assume(struct _dispatch_identity_s *di, - dispatch_queue_t assumed_rq) + pthread_priority_t pp) { - di->old_dq = _dispatch_queue_get_current(); - di->old_pri = _dispatch_get_priority(); - di->old_pp = _dispatch_get_defaultpriority(); + // assumed_rq was set by the caller, we need to fake the priorities + dispatch_queue_t assumed_rq = _dispatch_queue_get_current(); + + dispatch_assert(dx_type(assumed_rq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE); - dispatch_assert(dx_type(di->old_dq) == DISPATCH_QUEUE_ROOT_TYPE); - dispatch_assert(dx_type(assumed_rq) == DISPATCH_QUEUE_ROOT_TYPE); + di->old_pp = _dispatch_get_defaultpriority(); - _dispatch_wqthread_override_start(_dispatch_thread_port(), di->old_pri); - _dispatch_set_priority(assumed_rq->dq_priority); + if (!(assumed_rq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG)) { + if (!pp) { + pp = _dispatch_get_priority(); + // _dispatch_root_queue_drain_deferred_item() may turn a manager + // thread into a regular root queue, and we must never try to + // restore the manager flag once we became a regular work queue + // thread. + pp &= ~(pthread_priority_t)_PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; + } + if ((pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK) > + (assumed_rq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { + _dispatch_wqthread_override_start(_dispatch_tid_self(), pp); + // Ensure that the root queue sees that this thread was overridden. + _dispatch_set_defaultpriority_override(); + } + } _dispatch_reset_defaultpriority(assumed_rq->dq_priority); - _dispatch_thread_setspecific(dispatch_queue_key, assumed_rq); } DISPATCH_ALWAYS_INLINE static inline void _dispatch_root_queue_identity_restore(struct _dispatch_identity_s *di) { - _dispatch_thread_setspecific(dispatch_queue_key, di->old_dq); - _dispatch_set_priority(di->old_pri); _dispatch_reset_defaultpriority(di->old_pp); - // Ensure that the root queue sees that this thread was overridden. - _dispatch_set_defaultpriority_override(); } typedef dispatch_queue_t _dispatch_queue_class_invoke_handler_t(dispatch_object_t, - _dispatch_thread_semaphore_t*); + dispatch_invoke_flags_t, uint64_t *owned, struct dispatch_object_s **); DISPATCH_ALWAYS_INLINE static inline void _dispatch_queue_class_invoke(dispatch_object_t dou, - dispatch_continuation_t dc, dispatch_invoke_flags_t flags, + dispatch_invoke_flags_t flags, _dispatch_queue_class_invoke_handler_t invoke) { - pthread_priority_t p = 0; dispatch_queue_t dq = dou._dq; + struct dispatch_object_s *dc = NULL; + dispatch_queue_t tq = NULL; + uint64_t dq_state, to_unlock = 0; bool owning = !slowpath(flags & DISPATCH_INVOKE_STEALING); bool overriding = slowpath(flags & DISPATCH_INVOKE_OVERRIDING); - if (!slowpath(DISPATCH_OBJECT_SUSPENDED(dq)) && - fastpath(dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1, acquire))){ - _dispatch_queue_set_thread(dq); + // When called from a plain _dispatch_queue_drain: + // overriding = false + // owning = true + // + // When called from an override continuation: + // overriding = true + // owning depends on whether the override embedded the queue or steals + DISPATCH_COMPILER_CAN_ASSUME(owning || overriding); - dispatch_queue_t tq = NULL; - _dispatch_thread_semaphore_t sema = 0; + if (owning) { + dq->do_next = DISPATCH_OBJECT_LISTLESS; + } + to_unlock = _dispatch_queue_drain_try_lock(dq, flags, &dq_state); + if (likely(to_unlock)) { struct _dispatch_identity_s di; + pthread_priority_t old_dp; +drain_pending_barrier: if (overriding) { _dispatch_object_debug(dq, "stolen onto thread 0x%x, 0x%lx", - dq->dq_thread, _dispatch_get_defaultpriority()); - _dispatch_root_queue_identity_assume(&di, dc->dc_other); + _dispatch_tid_self(), _dispatch_get_defaultpriority()); + _dispatch_root_queue_identity_assume(&di, 0); } - tq = invoke(dq, &sema); - _dispatch_queue_clear_thread(dq); - - if (!owning && !sema && tq && tq != dq->do_targetq) { - /* - * When (tq && tq != dq->do_targetq) this is a source or mach - * channel asking to get to their manager queue. - * - * Since stealers cannot call _dispatch_queue_push_queue and - * retarget those, they need ot destroy the override so that - * when waking those sources or mach channels on their target queue - * we don't risk a stealer taking them over and not be able to - * retarget again, effectively live-locking them. - * - * Also, we're in the `overriding` case so the thread will be marked - * dirty by _dispatch_root_queue_identity_restore anyway - * so forgetting about p is fine. - */ - (void)_dispatch_queue_reset_override_priority(dq); - p = 0; - } else if (sema || tq || DISPATCH_OBJECT_SUSPENDED(dq)) { - p = _dispatch_queue_get_override_priority(dq); - } else { - p = _dispatch_queue_reset_override_priority(dq); - } - if (overriding) { - _dispatch_root_queue_identity_restore(&di); - } else { - if (p > (dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { + if (!(flags & DISPATCH_INVOKE_MANAGER_DRAIN)) { + pthread_priority_t op, dp; + + old_dp = _dispatch_set_defaultpriority(dq->dq_priority, &dp); + op = dq->dq_override; + if (op > (dp & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { + _dispatch_wqthread_override_start(_dispatch_tid_self(), op); // Ensure that the root queue sees that this thread was overridden. _dispatch_set_defaultpriority_override(); } } - uint32_t running = dispatch_atomic_dec2o(dq, dq_running, release); - if (sema) { - _dispatch_thread_semaphore_signal(sema); - } else if (owning && tq) { - _dispatch_introspection_queue_item_complete(dq); - return _dispatch_queue_push_queue(tq, dq, p); + flags = _dispatch_queue_merge_autorelease_frequency(dq, flags); +attempt_running_slow_head: + tq = invoke(dq, flags, &to_unlock, &dc); + if (slowpath(tq)) { + // Either dc is set, which is a deferred invoke case + // + // or only tq is and it means a reenqueue is required, because of: + // a retarget, a suspension, or a width change. + // + // In both cases, we want to bypass the check for DIRTY. + // That may cause us to leave DIRTY in place but all drain lock + // acquirers clear it + } else { + if (!_dispatch_queue_drain_try_unlock(dq, to_unlock)) { + goto attempt_running_slow_head; + } + to_unlock = 0; + } + if (overriding) { + _dispatch_root_queue_identity_restore(&di); } - if (!owning && running == 0) { - _dispatch_introspection_queue_item_complete(dq); - return _dispatch_queue_wakeup_with_qos_and_release(dq, p); + if (!(flags & DISPATCH_INVOKE_MANAGER_DRAIN)) { + _dispatch_reset_defaultpriority(old_dp); } } else if (overriding) { - mach_port_t th = dq->dq_thread; - if (th) { - p = _dispatch_queue_get_override_priority(dq); + uint32_t owner = _dq_state_drain_owner(dq_state); + pthread_priority_t p = dq->dq_override; + if (owner && p) { _dispatch_object_debug(dq, "overriding thr 0x%x to priority 0x%lx", - th, p); - _dispatch_wqthread_override_start(th, p); + owner, p); + _dispatch_wqthread_override_start_check_owner(owner, p, + &dq->dq_state_lock); } } - _dispatch_introspection_queue_item_complete(dq); if (owning) { - dq->do_next = DISPATCH_OBJECT_LISTLESS; - if (!dispatch_atomic_sub2o(dq, do_suspend_cnt, - DISPATCH_OBJECT_SUSPEND_LOCK, seq_cst)) { - // seq_cst with atomic store to suspend_cnt - if (dispatch_atomic_load2o(dq, dq_running, seq_cst) == 0) { - // verify that the queue is idle - return _dispatch_queue_wakeup_with_qos_and_release(dq, p); + _dispatch_introspection_queue_item_complete(dq); + } + + if (tq && dc) { + return _dispatch_queue_drain_deferred_invoke(dq, flags, to_unlock, dc); + } + + if (tq) { + bool full_width_upgrade_allowed = (tq == _dispatch_queue_get_current()); + uint64_t old_state, new_state; + + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + new_state = old_state - to_unlock; + if (full_width_upgrade_allowed && _dq_state_is_runnable(new_state) && + _dq_state_has_pending_barrier(new_state)) { + new_state += DISPATCH_QUEUE_IN_BARRIER; + new_state += DISPATCH_QUEUE_WIDTH_INTERVAL; + new_state -= DISPATCH_QUEUE_PENDING_BARRIER; + new_state += to_unlock & DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK; + } else { + new_state = DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(new_state); + if (_dq_state_should_wakeup(new_state)) { + // drain was not interupted for suspension + // we will reenqueue right away, just put ENQUEUED back + new_state |= DISPATCH_QUEUE_ENQUEUED; + new_state |= DISPATCH_QUEUE_DIRTY; + } } + }); + if (_dq_state_is_in_barrier(new_state)) { + // we did a "full width upgrade" and just added IN_BARRIER + // so adjust what we own and drain again + to_unlock &= DISPATCH_QUEUE_ENQUEUED; + to_unlock += DISPATCH_QUEUE_IN_BARRIER; + to_unlock += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; + goto drain_pending_barrier; + } + if (_dq_state_has_override(old_state)) { + // Ensure that the root queue sees that this thread was overridden. + _dispatch_set_defaultpriority_override(); + } + + if ((old_state ^ new_state) & DISPATCH_QUEUE_ENQUEUED) { + return _dispatch_queue_push(tq, dq, 0); } } - _dispatch_release(dq); // added when the queue is put on the list + + return _dispatch_release_tailcall(dq); } DISPATCH_ALWAYS_INLINE -static inline unsigned long -_dispatch_queue_class_probe(dispatch_object_t dou) +static inline bool +_dispatch_queue_class_probe(dispatch_queue_class_t dqu) { - dispatch_queue_t dq = dou._dq; struct dispatch_object_s *tail; - // seq_cst with atomic store to suspend_cnt - tail = dispatch_atomic_load2o(dq, dq_items_tail, seq_cst); - return (unsigned long)slowpath(tail != NULL); + // seq_cst wrt atomic store to dq_state + // seq_cst wrt atomic store to dq_flags + tail = os_atomic_load2o(dqu._oq, oq_items_tail, ordered); + return slowpath(tail != NULL); } -DISPATCH_ALWAYS_INLINE +DISPATCH_ALWAYS_INLINE DISPATCH_CONST static inline bool -_dispatch_object_suspended(dispatch_object_t dou) +_dispatch_is_in_root_queues_array(dispatch_queue_t dq) { - struct dispatch_object_s *obj = dou._do; - unsigned int suspend_cnt; - // seq_cst with atomic store to tail - suspend_cnt = dispatch_atomic_load2o(obj, do_suspend_cnt, seq_cst); - return slowpath(suspend_cnt >= DISPATCH_OBJECT_SUSPEND_INTERVAL); + return (dq >= _dispatch_root_queues) && + (dq < _dispatch_root_queues + _DISPATCH_ROOT_QUEUE_IDX_COUNT); } DISPATCH_ALWAYS_INLINE DISPATCH_CONST @@ -474,43 +1792,72 @@ _dispatch_get_root_queue(qos_class_t priority, bool overcommit) return NULL; } -// Note to later developers: ensure that any initialization changes are -// made for statically allocated queues (i.e. _dispatch_main_q). -static inline void -_dispatch_queue_init(dispatch_queue_t dq) +#if HAVE_PTHREAD_WORKQUEUE_QOS +DISPATCH_ALWAYS_INLINE DISPATCH_CONST +static inline dispatch_queue_t +_dispatch_get_root_queue_for_priority(pthread_priority_t pp, bool overcommit) { - dq->do_next = (struct dispatch_queue_s *)DISPATCH_OBJECT_LISTLESS; + uint32_t idx; - dq->dq_running = 0; - dq->dq_width = 1; - dq->dq_override_voucher = DISPATCH_NO_VOUCHER; - dq->dq_serialnum = dispatch_atomic_inc_orig(&_dispatch_queue_serial_numbers, - relaxed); + pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; + idx = (uint32_t)__builtin_ffs((int)pp); + if (unlikely(!_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS] + .dq_priority)) { + // If kernel doesn't support maintenance, bottom bit is background. + // Shift to our idea of where background bit is. + idx++; + } + // ffs starts at 1, and account for the QOS_CLASS_SHIFT + // if pp is 0, idx is 0 or 1 and this will wrap to a value larger than + // DISPATCH_QOS_COUNT + idx -= (_PTHREAD_PRIORITY_QOS_CLASS_SHIFT + 1); + if (unlikely(idx >= DISPATCH_QUEUE_QOS_COUNT)) { + DISPATCH_CLIENT_CRASH(pp, "Corrupted priority"); + } + return &_dispatch_root_queues[2 * idx + overcommit]; +} +#endif + +DISPATCH_ALWAYS_INLINE DISPATCH_CONST +static inline dispatch_queue_t +_dispatch_get_root_queue_with_overcommit(dispatch_queue_t rq, bool overcommit) +{ + bool rq_overcommit = (rq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG); + // root queues in _dispatch_root_queues are not overcommit for even indices + // and overcommit for odd ones, so fixing overcommit is either returning + // the same queue, or picking its neighbour in _dispatch_root_queues + if (overcommit && !rq_overcommit) { + return rq + 1; + } + if (!overcommit && rq_overcommit) { + return rq - 1; + } + return rq; } DISPATCH_ALWAYS_INLINE static inline void _dispatch_queue_set_bound_thread(dispatch_queue_t dq) { - //Tag thread-bound queues with the owning thread - dispatch_assert(dq->dq_is_thread_bound); - dq->dq_thread = _dispatch_thread_port(); + // Tag thread-bound queues with the owning thread + dispatch_assert(_dispatch_queue_is_thread_bound(dq)); + mach_port_t old_owner, self = _dispatch_tid_self(); + uint64_t dq_state = os_atomic_or_orig2o(dq, dq_state, self, relaxed); + if (unlikely(old_owner = _dq_state_drain_owner(dq_state))) { + DISPATCH_INTERNAL_CRASH(old_owner, "Queue bound twice"); + } } DISPATCH_ALWAYS_INLINE static inline void _dispatch_queue_clear_bound_thread(dispatch_queue_t dq) { - dispatch_assert(dq->dq_is_thread_bound); - dq->dq_thread = MACH_PORT_NULL; -} + uint64_t dq_state, value; -DISPATCH_ALWAYS_INLINE -static inline mach_port_t -_dispatch_queue_get_bound_thread(dispatch_queue_t dq) -{ - dispatch_assert(dq->dq_is_thread_bound); - return dq->dq_thread; + dispatch_assert(_dispatch_queue_is_thread_bound(dq)); + os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, { + value = DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(dq_state); + }); } DISPATCH_ALWAYS_INLINE @@ -538,9 +1885,9 @@ static inline pthread_priority_t _dispatch_get_defaultpriority(void) { #if HAVE_PTHREAD_WORKQUEUE_QOS - pthread_priority_t priority = (uintptr_t)_dispatch_thread_getspecific( + pthread_priority_t pp = (uintptr_t)_dispatch_thread_getspecific( dispatch_defaultpriority_key); - return priority; + return pp; #else return 0; #endif @@ -548,20 +1895,16 @@ _dispatch_get_defaultpriority(void) DISPATCH_ALWAYS_INLINE static inline void -_dispatch_reset_defaultpriority(pthread_priority_t priority) +_dispatch_reset_defaultpriority(pthread_priority_t pp) { #if HAVE_PTHREAD_WORKQUEUE_QOS - pthread_priority_t old_priority = _dispatch_get_defaultpriority(); - // if an inner-loop or'd in the override flag to the per-thread priority, - // it needs to be propogated up the chain - priority |= old_priority & _PTHREAD_PRIORITY_OVERRIDE_FLAG; - - if (slowpath(priority != old_priority)) { - _dispatch_thread_setspecific(dispatch_defaultpriority_key, - (void*)priority); - } + pthread_priority_t old_pp = _dispatch_get_defaultpriority(); + // If an inner-loop or'd in the override flag to the per-thread priority, + // it needs to be propagated up the chain. + pp |= old_pp & _PTHREAD_PRIORITY_OVERRIDE_FLAG; + _dispatch_thread_setspecific(dispatch_defaultpriority_key, (void*)pp); #else - (void)priority; + (void)pp; #endif } @@ -570,14 +1913,10 @@ static inline void _dispatch_set_defaultpriority_override(void) { #if HAVE_PTHREAD_WORKQUEUE_QOS - pthread_priority_t old_priority = _dispatch_get_defaultpriority(); - pthread_priority_t priority = old_priority | - _PTHREAD_PRIORITY_OVERRIDE_FLAG; + pthread_priority_t old_pp = _dispatch_get_defaultpriority(); + pthread_priority_t pp = old_pp | _PTHREAD_PRIORITY_OVERRIDE_FLAG; - if (slowpath(priority != old_priority)) { - _dispatch_thread_setspecific(dispatch_defaultpriority_key, - (void*)priority); - } + _dispatch_thread_setspecific(dispatch_defaultpriority_key, (void*)pp); #endif } @@ -586,15 +1925,12 @@ static inline bool _dispatch_reset_defaultpriority_override(void) { #if HAVE_PTHREAD_WORKQUEUE_QOS - pthread_priority_t old_priority = _dispatch_get_defaultpriority(); - pthread_priority_t priority = old_priority & + pthread_priority_t old_pp = _dispatch_get_defaultpriority(); + pthread_priority_t pp = old_pp & ~((pthread_priority_t)_PTHREAD_PRIORITY_OVERRIDE_FLAG); - if (slowpath(priority != old_priority)) { - _dispatch_thread_setspecific(dispatch_defaultpriority_key, - (void*)priority); - return true; - } + _dispatch_thread_setspecific(dispatch_defaultpriority_key, (void*)pp); + return unlikely(pp != old_pp); #endif return false; } @@ -605,12 +1941,18 @@ _dispatch_queue_priority_inherit_from_target(dispatch_queue_t dq, dispatch_queue_t tq) { #if HAVE_PTHREAD_WORKQUEUE_QOS - const pthread_priority_t rootqueue_flag = _PTHREAD_PRIORITY_ROOTQUEUE_FLAG; - const pthread_priority_t inherited_flag = _PTHREAD_PRIORITY_INHERIT_FLAG; - pthread_priority_t dqp = dq->dq_priority, tqp = tq->dq_priority; + const dispatch_priority_t rootqueue_flag = _PTHREAD_PRIORITY_ROOTQUEUE_FLAG; + const dispatch_priority_t inherited_flag = _PTHREAD_PRIORITY_INHERIT_FLAG; + const dispatch_priority_t defaultqueue_flag = + _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG; + dispatch_priority_t dqp = dq->dq_priority, tqp = tq->dq_priority; if ((!(dqp & ~_PTHREAD_PRIORITY_FLAGS_MASK) || (dqp & inherited_flag)) && (tqp & rootqueue_flag)) { - dq->dq_priority = (tqp & ~rootqueue_flag) | inherited_flag; + if (tqp & defaultqueue_flag) { + dq->dq_priority = 0; + } else { + dq->dq_priority = (tqp & ~rootqueue_flag) | inherited_flag; + } } #else (void)dq; (void)tq; @@ -619,275 +1961,290 @@ _dispatch_queue_priority_inherit_from_target(dispatch_queue_t dq, DISPATCH_ALWAYS_INLINE static inline pthread_priority_t -_dispatch_set_defaultpriority(pthread_priority_t priority) +_dispatch_set_defaultpriority(pthread_priority_t pp, pthread_priority_t *new_pp) { #if HAVE_PTHREAD_WORKQUEUE_QOS - pthread_priority_t old_priority = _dispatch_get_defaultpriority(); - if (old_priority) { + const pthread_priority_t default_priority_preserved_flags = + _PTHREAD_PRIORITY_OVERRIDE_FLAG|_PTHREAD_PRIORITY_OVERCOMMIT_FLAG; + pthread_priority_t old_pp = _dispatch_get_defaultpriority(); + if (old_pp) { pthread_priority_t flags, defaultqueue, basepri; - flags = (priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG); - defaultqueue = (old_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG); - basepri = (old_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK); - priority &= ~_PTHREAD_PRIORITY_FLAGS_MASK; - if (!priority) { + flags = (pp & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG); + defaultqueue = (old_pp & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG); + basepri = (old_pp & ~_PTHREAD_PRIORITY_FLAGS_MASK); + pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK; + if (!pp) { flags = _PTHREAD_PRIORITY_INHERIT_FLAG | defaultqueue; - priority = basepri; - } else if (priority < basepri && !defaultqueue) { // rdar://16349734 - priority = basepri; + pp = basepri; + } else if (pp < basepri && !defaultqueue) { // rdar://16349734 + pp = basepri; } - priority |= flags | (old_priority & _PTHREAD_PRIORITY_OVERRIDE_FLAG); - } - if (slowpath(priority != old_priority)) { - _dispatch_thread_setspecific(dispatch_defaultpriority_key, - (void*)priority); + pp |= flags | (old_pp & default_priority_preserved_flags); } - return old_priority; + _dispatch_thread_setspecific(dispatch_defaultpriority_key, (void*)pp); + if (new_pp) *new_pp = pp; + return old_pp; #else - (void)priority; + (void)pp; (void)new_pp; return 0; #endif } DISPATCH_ALWAYS_INLINE static inline pthread_priority_t -_dispatch_priority_adopt(pthread_priority_t priority, unsigned long flags) +_dispatch_priority_adopt(pthread_priority_t pp, unsigned long flags) { #if HAVE_PTHREAD_WORKQUEUE_QOS pthread_priority_t defaultpri = _dispatch_get_defaultpriority(); bool enforce, inherited, defaultqueue; enforce = (flags & DISPATCH_PRIORITY_ENFORCE) || - (priority & _PTHREAD_PRIORITY_ENFORCE_FLAG); + (pp & _PTHREAD_PRIORITY_ENFORCE_FLAG); inherited = (defaultpri & _PTHREAD_PRIORITY_INHERIT_FLAG); defaultqueue = (defaultpri & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG); defaultpri &= ~_PTHREAD_PRIORITY_FLAGS_MASK; - priority &= ~_PTHREAD_PRIORITY_FLAGS_MASK; - if (!priority) { - enforce = false; - } else if (!enforce) { - if (priority < defaultpri) { - if (defaultqueue) enforce = true; // rdar://16349734 - } else if (inherited || defaultqueue) { - enforce = true; - } - } else if (priority < defaultpri && !defaultqueue) { // rdar://16349734 - enforce = false; + pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK; + + if (!pp) { + return defaultpri; + } else if (defaultqueue) { // rdar://16349734 + return pp; + } else if (pp < defaultpri) { + return defaultpri; + } else if (enforce || inherited) { + return pp; + } else { + return defaultpri; } - return enforce ? priority : defaultpri; #else - (void)priority; (void)flags; + (void)pp; (void)flags; return 0; #endif } DISPATCH_ALWAYS_INLINE static inline pthread_priority_t -_dispatch_get_priority(void) +_dispatch_priority_inherit_from_root_queue(pthread_priority_t pp, + dispatch_queue_t rq) { #if HAVE_PTHREAD_WORKQUEUE_QOS - pthread_priority_t priority = (uintptr_t)_dispatch_thread_getspecific( - dispatch_priority_key); - return (priority & ~_PTHREAD_PRIORITY_FLAGS_MASK); + pthread_priority_t p = pp & ~_PTHREAD_PRIORITY_FLAGS_MASK; + pthread_priority_t rqp = rq->dq_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK; + pthread_priority_t defaultqueue = + rq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG; + + if (!p || (!defaultqueue && p < rqp)) { + p = rqp | defaultqueue; + } + return p | (rq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG); #else + (void)rq; (void)pp; return 0; #endif } DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_set_priority_and_mach_voucher(pthread_priority_t priority, - mach_voucher_t kv) +static inline pthread_priority_t +_dispatch_get_priority(void) { #if HAVE_PTHREAD_WORKQUEUE_QOS - _pthread_set_flags_t flags = 0; - if (priority && _dispatch_set_qos_class_enabled) { - pthread_priority_t old_priority = _dispatch_get_priority(); - if (priority != old_priority && old_priority) { - flags |= _PTHREAD_SET_SELF_QOS_FLAG; - } - } - if (kv != VOUCHER_NO_MACH_VOUCHER) { -#if VOUCHER_USE_MACH_VOUCHER - flags |= _PTHREAD_SET_SELF_VOUCHER_FLAG; -#endif - } - if (!flags) return; - int r = _pthread_set_properties_self(flags, priority, kv); - (void)dispatch_assume_zero(r); -#elif VOUCHER_USE_MACH_VOUCHER -#error Invalid build configuration + pthread_priority_t pp = (uintptr_t) + _dispatch_thread_getspecific(dispatch_priority_key); + return pp; #else - (void)priority; (void)kv; + return 0; #endif } -DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT -static inline voucher_t -_dispatch_set_priority_and_adopt_voucher(pthread_priority_t priority, - voucher_t voucher) -{ - pthread_priority_t p = (priority != DISPATCH_NO_PRIORITY) ? priority : 0; - voucher_t ov = DISPATCH_NO_VOUCHER; - mach_voucher_t kv = VOUCHER_NO_MACH_VOUCHER; - if (voucher != DISPATCH_NO_VOUCHER) { - ov = _voucher_get(); - kv = _voucher_swap_and_get_mach_voucher(ov, voucher); +#if HAVE_PTHREAD_WORKQUEUE_QOS +DISPATCH_ALWAYS_INLINE +static inline pthread_priority_t +_dispatch_priority_compute_update(pthread_priority_t pp) +{ + dispatch_assert(pp != DISPATCH_NO_PRIORITY); + if (!_dispatch_set_qos_class_enabled) return 0; + // the priority in _dispatch_get_priority() only tracks manager-ness + // and overcommit, which is inherited from the current value for each update + // however if the priority had the NEEDS_UNBIND flag set we need to clear it + // the first chance we get + // + // the manager bit is invalid input, but we keep it to get meaningful + // assertions in _dispatch_set_priority_and_voucher_slow() + pp &= _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG | ~_PTHREAD_PRIORITY_FLAGS_MASK; + pthread_priority_t cur_priority = _dispatch_get_priority(); + pthread_priority_t unbind = _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG; + pthread_priority_t overcommit = _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; + if (unlikely(cur_priority & unbind)) { + // else we always need an update if the NEEDS_UNBIND flag is set + // the slowpath in _dispatch_set_priority_and_voucher_slow() will + // adjust the priority further with the proper overcommitness + return pp ? pp : (cur_priority & ~unbind); + } else { + cur_priority &= ~overcommit; } - _dispatch_set_priority_and_mach_voucher(p, kv); - return ov; + if (unlikely(pp != cur_priority)) return pp; + return 0; } +#endif DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT static inline voucher_t -_dispatch_adopt_priority_and_voucher(pthread_priority_t priority, - voucher_t v, unsigned long flags) +_dispatch_set_priority_and_voucher(pthread_priority_t pp, + voucher_t v, _dispatch_thread_set_self_t flags) { - pthread_priority_t p = 0; - if (priority != DISPATCH_NO_PRIORITY) { - p = _dispatch_priority_adopt(priority, flags); - } - if (!(flags & DISPATCH_VOUCHER_IGNORE_QUEUE_OVERRIDE)) { - dispatch_queue_t dq = _dispatch_queue_get_current(); - if (dq && dq->dq_override_voucher != DISPATCH_NO_VOUCHER) { - if (v != DISPATCH_NO_VOUCHER && v) _voucher_release(v); - v = dq->dq_override_voucher; - if (v) _voucher_retain(v); +#if HAVE_PTHREAD_WORKQUEUE_QOS + pp = _dispatch_priority_compute_update(pp); + if (likely(!pp)) { + if (v == DISPATCH_NO_VOUCHER) { + return DISPATCH_NO_VOUCHER; + } + if (likely(v == _voucher_get())) { + bool retained = flags & DISPATCH_VOUCHER_CONSUME; + if (flags & DISPATCH_VOUCHER_REPLACE) { + if (retained && v) _voucher_release_no_dispose(v); + v = DISPATCH_NO_VOUCHER; + } else { + if (!retained && v) _voucher_retain(v); + } + return v; } } - return _dispatch_set_priority_and_adopt_voucher(p, v); + return _dispatch_set_priority_and_voucher_slow(pp, v, flags); +#else + (void)pp; (void)v; (void)flags; + return DISPATCH_NO_VOUCHER; +#endif } DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT static inline voucher_t -_dispatch_adopt_queue_override_voucher(dispatch_queue_t dq) -{ - voucher_t v = dq->dq_override_voucher; - if (v == DISPATCH_NO_VOUCHER) return DISPATCH_NO_VOUCHER; - if (v) _voucher_retain(v); - return _dispatch_set_priority_and_adopt_voucher(DISPATCH_NO_PRIORITY, v); -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_adopt_priority_and_replace_voucher(pthread_priority_t priority, - voucher_t voucher, unsigned long flags) +_dispatch_adopt_priority_and_set_voucher(pthread_priority_t pp, + voucher_t v, _dispatch_thread_set_self_t flags) { - voucher_t ov; - ov = _dispatch_adopt_priority_and_voucher(priority, voucher, flags); - if (voucher != DISPATCH_NO_VOUCHER && ov) _voucher_release(ov); -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_reset_priority_and_voucher(pthread_priority_t priority, - voucher_t voucher) -{ - voucher_t ov; - ov = _dispatch_set_priority_and_adopt_voucher(priority, voucher); - if (voucher != DISPATCH_NO_VOUCHER && ov) _voucher_release(ov); + pthread_priority_t p = 0; + if (pp != DISPATCH_NO_PRIORITY) { + p = _dispatch_priority_adopt(pp, flags); + } + return _dispatch_set_priority_and_voucher(p, v, flags); } DISPATCH_ALWAYS_INLINE static inline void -_dispatch_reset_voucher(voucher_t voucher) +_dispatch_reset_priority_and_voucher(pthread_priority_t pp, voucher_t v) { - return _dispatch_reset_priority_and_voucher(DISPATCH_NO_PRIORITY, voucher); + if (pp == DISPATCH_NO_PRIORITY) pp = 0; + (void)_dispatch_set_priority_and_voucher(pp, v, + DISPATCH_VOUCHER_CONSUME | DISPATCH_VOUCHER_REPLACE); } DISPATCH_ALWAYS_INLINE static inline void -_dispatch_set_priority(pthread_priority_t priority) +_dispatch_reset_voucher(voucher_t v, _dispatch_thread_set_self_t flags) { - _dispatch_set_priority_and_mach_voucher(priority, VOUCHER_NO_MACH_VOUCHER); + flags |= DISPATCH_VOUCHER_CONSUME | DISPATCH_VOUCHER_REPLACE; + (void)_dispatch_set_priority_and_voucher(0, v, flags); } DISPATCH_ALWAYS_INLINE -static inline pthread_priority_t -_dispatch_priority_normalize(pthread_priority_t pp) +static inline bool +_dispatch_queue_need_override(dispatch_queue_class_t dqu, pthread_priority_t pp) { - dispatch_assert_zero(pp & ~(pthread_priority_t) - _PTHREAD_PRIORITY_QOS_CLASS_MASK); - unsigned int qosbits = (unsigned int)pp, idx; - if (!qosbits) return 0; - idx = (unsigned int)(sizeof(qosbits)*8) - - (unsigned int)__builtin_clz(qosbits) - 1; - return (1 << idx); + // global queues have their override set to DISPATCH_SATURATED_OVERRIDE + // which makes this test always return false for them. + return dqu._oq->oq_override < (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK); } DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_queue_need_override(dispatch_queue_t dq, pthread_priority_t pp) +_dispatch_queue_received_override(dispatch_queue_class_t dqu, + pthread_priority_t pp) { - if (!pp || dx_type(dq) == DISPATCH_QUEUE_ROOT_TYPE) return false; - uint32_t p = (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK); - uint32_t o = dq->dq_override; - return (o < p); + dispatch_assert(dqu._oq->oq_override != DISPATCH_SATURATED_OVERRIDE); + return dqu._oq->oq_override > (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK); } DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_queue_need_override_retain(dispatch_queue_t dq, pthread_priority_t pp) +_dispatch_queue_need_override_retain(dispatch_queue_class_t dqu, + pthread_priority_t pp) { - bool override = _dispatch_queue_need_override(dq, pp); - if (override) _dispatch_retain(dq); - return override; + if (_dispatch_queue_need_override(dqu, pp)) { + _os_object_retain_internal_inline(dqu._oq->_as_os_obj); + return true; + } + return false; } DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_queue_override_priority(dispatch_queue_t dq, pthread_priority_t *pp, - bool *was_overridden) -{ - uint32_t o = dq->dq_override; - uint32_t p = (*pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK); - if (o < p) { - o = dispatch_atomic_or_orig2o(dq, dq_override, p, relaxed); - if (was_overridden) { - o = (uint32_t)_dispatch_priority_normalize(o); +_dispatch_queue_reinstate_override_priority(dispatch_queue_class_t dqu, + dispatch_priority_t new_op) +{ + dispatch_priority_t old_op; + new_op &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; + if (!new_op) return false; + os_atomic_rmw_loop2o(dqu._oq, oq_override, old_op, new_op, relaxed, { + if (new_op <= old_op) { + os_atomic_rmw_loop_give_up(return false); } - *pp = _dispatch_priority_normalize(o | p); - } else { - o = (uint32_t)_dispatch_priority_normalize(o); - *pp = o; - } - if (was_overridden) { - *was_overridden = - (dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK) < o; - } - return (o < p); -} - -DISPATCH_ALWAYS_INLINE -static inline pthread_priority_t -_dispatch_queue_get_override_priority(dispatch_queue_t dq) -{ - uint32_t p = (dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK); - uint32_t o = dq->dq_override; - if (o == p) return o; - return _dispatch_priority_normalize(o); + }); + return true; } DISPATCH_ALWAYS_INLINE static inline void -_dispatch_queue_set_override_priority(dispatch_queue_t dq) -{ - uint32_t p = 0; - if (!(dq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG)) { - p = dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK; +_dispatch_queue_override_priority(dispatch_queue_class_t dqu, + pthread_priority_t *pp, dispatch_wakeup_flags_t *flags) +{ + os_mpsc_queue_t oq = dqu._oq; + dispatch_priority_t qp = oq->oq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK; + dispatch_priority_t np = (*pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK); + dispatch_priority_t o; + + _dispatch_assert_is_valid_qos_override(np); + if (oq->oq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG) { + qp = 0; + } else if (*flags & DISPATCH_WAKEUP_SLOW_WAITER) { + // when a queue is used as a lock its priority doesn't count + } else if (np < qp) { + // for asynchronous workitems, queue priority is the floor for overrides + np = qp; + } + *flags &= ~_DISPATCH_WAKEUP_OVERRIDE_BITS; + + // this optimizes for the case when no update of the override is required + // os_atomic_rmw_loop2o optimizes for the case when the update happens, + // and can't be used. + o = os_atomic_load2o(oq, oq_override, relaxed); + do { + if (likely(np <= o)) break; + } while (unlikely(!os_atomic_cmpxchgvw2o(oq, oq_override, o, np, &o, relaxed))); + + if (np <= o) { + *pp = o; + } else { + *flags |= DISPATCH_WAKEUP_OVERRIDING; + *pp = np; + } + if (o > qp) { + *flags |= DISPATCH_WAKEUP_WAS_OVERRIDDEN; } - dispatch_atomic_store2o(dq, dq_override, p, relaxed); } DISPATCH_ALWAYS_INLINE -static inline pthread_priority_t -_dispatch_queue_reset_override_priority(dispatch_queue_t dq) -{ - uint32_t p = 0; - if (!(dq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG)) { - p = dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK; +static inline dispatch_priority_t +_dispatch_queue_reset_override_priority(dispatch_queue_class_t dqu, + bool qp_is_floor) +{ + os_mpsc_queue_t oq = dqu._oq; + dispatch_priority_t p = 0; + if (qp_is_floor) { + // thread bound queues floor their dq_override to their + // priority to avoid receiving useless overrides + p = oq->oq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK; } - uint32_t o = dispatch_atomic_xchg2o(dq, dq_override, p, relaxed); - if (o == p) return o; - return _dispatch_priority_normalize(o); + dispatch_priority_t o = os_atomic_xchg2o(oq, oq_override, p, relaxed); + dispatch_assert(o != DISPATCH_SATURATED_OVERRIDE); + return (o > p) ? o : 0; } DISPATCH_ALWAYS_INLINE @@ -895,12 +2252,13 @@ static inline pthread_priority_t _dispatch_priority_propagate(void) { #if HAVE_PTHREAD_WORKQUEUE_QOS - pthread_priority_t priority = _dispatch_get_priority(); - if (priority > _dispatch_user_initiated_priority) { + pthread_priority_t pp = _dispatch_get_priority(); + pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK; + if (pp > _dispatch_user_initiated_priority) { // Cap QOS for propagation at user-initiated - priority = _dispatch_user_initiated_priority; + pp = _dispatch_user_initiated_priority; } - return priority; + return pp; #else return 0; #endif @@ -912,9 +2270,9 @@ static inline bool _dispatch_is_background_thread(void) { #if HAVE_PTHREAD_WORKQUEUE_QOS - pthread_priority_t priority; - priority = _dispatch_get_priority(); - return priority && (priority <= _dispatch_background_priority); + pthread_priority_t pp = _dispatch_get_priority(); + pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK; + return pp && (pp <= _dispatch_background_priority); #else return false; #endif @@ -933,6 +2291,18 @@ _dispatch_block_has_private_data(const dispatch_block_t block) return (_dispatch_Block_invoke(block) == _dispatch_block_special_invoke); } +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_block_sync_should_enforce_qos_class(dispatch_block_flags_t flags) +{ + /* + * Generates better assembly than the actual readable test: + * (flags & ENFORCE_QOS_CLASS) || !(flags & INHERIT_QOS_FLAGS) + */ + flags &= DISPATCH_BLOCK_ENFORCE_QOS_CLASS | DISPATCH_BLOCK_INHERIT_QOS_CLASS; + return flags != DISPATCH_BLOCK_INHERIT_QOS_CLASS; +} + DISPATCH_ALWAYS_INLINE static inline dispatch_block_private_data_t _dispatch_block_get_data(const dispatch_block_t db) @@ -947,7 +2317,8 @@ _dispatch_block_get_data(const dispatch_block_t db) // x points to base of captured dispatch_block_private_data_s object dispatch_block_private_data_t dbpd = (dispatch_block_private_data_t)x; if (dbpd->dbpd_magic != DISPATCH_BLOCK_PRIVATE_DATA_MAGIC) { - DISPATCH_CRASH("Corruption of dispatch block object"); + DISPATCH_CLIENT_CRASH(dbpd->dbpd_magic, + "Corruption of dispatch block object"); } return dbpd; } @@ -968,11 +2339,6 @@ _dispatch_block_get_flags(const dispatch_block_t db) return dbpd ? dbpd->dbpd_flags : 0; } -#define DISPATCH_BLOCK_HAS(flag, db) \ - ((_dispatch_block_get_flags((db)) & DISPATCH_BLOCK_HAS_ ## flag) != 0) -#define DISPATCH_BLOCK_IS(flag, db) \ - ((_dispatch_block_get_flags((db)) & DISPATCH_BLOCK_ ## flag) != 0) - #endif #pragma mark - @@ -983,8 +2349,8 @@ static inline dispatch_continuation_t _dispatch_continuation_alloc_cacheonly(void) { dispatch_continuation_t dc = (dispatch_continuation_t) - fastpath(_dispatch_thread_getspecific(dispatch_cache_key)); - if (dc) { + _dispatch_thread_getspecific(dispatch_cache_key); + if (likely(dc)) { _dispatch_thread_setspecific(dispatch_cache_key, dc->do_next); } return dc; @@ -995,8 +2361,8 @@ static inline dispatch_continuation_t _dispatch_continuation_alloc(void) { dispatch_continuation_t dc = - fastpath(_dispatch_continuation_alloc_cacheonly()); - if(!dc) { + _dispatch_continuation_alloc_cacheonly(); + if (unlikely(!dc)) { return _dispatch_continuation_alloc_from_heap(); } return dc; @@ -1007,10 +2373,10 @@ static inline dispatch_continuation_t _dispatch_continuation_free_cacheonly(dispatch_continuation_t dc) { dispatch_continuation_t prev_dc = (dispatch_continuation_t) - fastpath(_dispatch_thread_getspecific(dispatch_cache_key)); + _dispatch_thread_getspecific(dispatch_cache_key); int cnt = prev_dc ? prev_dc->dc_cache_cnt + 1 : 1; // Cap continuation cache - if (slowpath(cnt > _dispatch_continuation_cache_limit)) { + if (unlikely(cnt > _dispatch_continuation_cache_limit)) { return dc; } dc->do_next = prev_dc; @@ -1024,7 +2390,7 @@ static inline void _dispatch_continuation_free(dispatch_continuation_t dc) { dc = _dispatch_continuation_free_cacheonly(dc); - if (slowpath(dc)) { + if (unlikely(dc)) { _dispatch_continuation_free_to_cache_limit(dc); } } @@ -1033,72 +2399,102 @@ _dispatch_continuation_free(dispatch_continuation_t dc) DISPATCH_ALWAYS_INLINE static inline void -_dispatch_continuation_invoke(dispatch_object_t dou, dispatch_queue_t dq) -{ - dispatch_continuation_t dc = dou._dc, dc1; - dispatch_group_t dg; - - _dispatch_trace_continuation_pop(dq, dou); - if (DISPATCH_OBJ_IS_VTABLE(dou._do)) { - return dx_invoke(dou._do, NULL, DISPATCH_INVOKE_NONE); - } - - // Add the item back to the cache before calling the function. This - // allows the 'hot' continuation to be used for a quick callback. - // - // The ccache version is per-thread. - // Therefore, the object has not been reused yet. - // This generates better assembly. - if ((long)dc->do_vtable & DISPATCH_OBJ_ASYNC_BIT) { - _dispatch_continuation_voucher_adopt(dc); - dc1 = _dispatch_continuation_free_cacheonly(dc); - } else { - dc1 = NULL; - } - if ((long)dc->do_vtable & DISPATCH_OBJ_GROUP_BIT) { - dg = dc->dc_data; +_dispatch_continuation_with_group_invoke(dispatch_continuation_t dc) +{ + struct dispatch_object_s *dou = dc->dc_data; + unsigned long type = dx_type(dou); + if (type == DISPATCH_GROUP_TYPE) { + _dispatch_client_callout(dc->dc_ctxt, dc->dc_func); + _dispatch_introspection_queue_item_complete(dou); + dispatch_group_leave((dispatch_group_t)dou); } else { - dg = NULL; - } - _dispatch_client_callout(dc->dc_ctxt, dc->dc_func); - if (dg) { - dispatch_group_leave(dg); - _dispatch_release(dg); - } - _dispatch_introspection_queue_item_complete(dou); - if (slowpath(dc1)) { - _dispatch_continuation_free_to_cache_limit(dc1); + DISPATCH_INTERNAL_CRASH(dx_type(dou), "Unexpected object type"); } } +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_continuation_invoke_inline(dispatch_object_t dou, voucher_t ov, + dispatch_invoke_flags_t flags) +{ + dispatch_continuation_t dc = dou._dc, dc1; + dispatch_invoke_with_autoreleasepool(flags, { + uintptr_t dc_flags = dc->dc_flags; + // Add the item back to the cache before calling the function. This + // allows the 'hot' continuation to be used for a quick callback. + // + // The ccache version is per-thread. + // Therefore, the object has not been reused yet. + // This generates better assembly. + _dispatch_continuation_voucher_adopt(dc, ov, dc_flags); + if (dc_flags & DISPATCH_OBJ_CONSUME_BIT) { + dc1 = _dispatch_continuation_free_cacheonly(dc); + } else { + dc1 = NULL; + } + if (unlikely(dc_flags & DISPATCH_OBJ_GROUP_BIT)) { + _dispatch_continuation_with_group_invoke(dc); + } else { + _dispatch_client_callout(dc->dc_ctxt, dc->dc_func); + _dispatch_introspection_queue_item_complete(dou); + } + if (unlikely(dc1)) { + _dispatch_continuation_free_to_cache_limit(dc1); + } + }); +} + DISPATCH_ALWAYS_INLINE_NDEBUG static inline void -_dispatch_continuation_pop(dispatch_object_t dou) +_dispatch_continuation_pop_inline(dispatch_object_t dou, dispatch_queue_t dq, + dispatch_invoke_flags_t flags) { - dispatch_queue_t dq = _dispatch_queue_get_current(); dispatch_pthread_root_queue_observer_hooks_t observer_hooks = _dispatch_get_pthread_root_queue_observer_hooks(); if (observer_hooks) observer_hooks->queue_will_execute(dq); - _dispatch_continuation_invoke(dou, dq); + _dispatch_trace_continuation_pop(dq, dou); + flags &= _DISPATCH_INVOKE_PROPAGATE_MASK; + if (_dispatch_object_has_vtable(dou)) { + dx_invoke(dou._do, flags); + } else { + voucher_t ov = dq->dq_override_voucher; + _dispatch_continuation_invoke_inline(dou, ov, flags); + } if (observer_hooks) observer_hooks->queue_did_execute(dq); } +// used to forward the do_invoke of a continuation with a vtable to its real +// implementation. +#define _dispatch_continuation_pop_forwarded(dc, ov, dc_flags, ...) \ + ({ \ + dispatch_continuation_t _dc = (dc), _dc1; \ + uintptr_t _dc_flags = (dc_flags); \ + _dispatch_continuation_voucher_adopt(_dc, ov, _dc_flags); \ + if (_dc_flags & DISPATCH_OBJ_CONSUME_BIT) { \ + _dc1 = _dispatch_continuation_free_cacheonly(_dc); \ + } else { \ + _dc1 = NULL; \ + } \ + __VA_ARGS__; \ + _dispatch_introspection_queue_item_complete(_dc); \ + if (unlikely(_dc1)) { \ + _dispatch_continuation_free_to_cache_limit(_dc1); \ + } \ + }) + DISPATCH_ALWAYS_INLINE static inline void _dispatch_continuation_priority_set(dispatch_continuation_t dc, pthread_priority_t pp, dispatch_block_flags_t flags) { #if HAVE_PTHREAD_WORKQUEUE_QOS - pthread_priority_t prio = 0; - if (flags & DISPATCH_BLOCK_HAS_PRIORITY) { - prio = pp; - } else if (!(flags & DISPATCH_BLOCK_NO_QOS_CLASS)) { - prio = _dispatch_priority_propagate(); + if (likely(!(flags & DISPATCH_BLOCK_HAS_PRIORITY))) { + pp = _dispatch_priority_propagate(); } if (flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS) { - prio |= _PTHREAD_PRIORITY_ENFORCE_FLAG; + pp |= _PTHREAD_PRIORITY_ENFORCE_FLAG; } - dc->dc_priority = prio; + dc->dc_priority = pp; #else (void)dc; (void)pp; (void)flags; #endif @@ -1114,21 +2510,55 @@ _dispatch_continuation_get_override_priority(dispatch_queue_t dq, bool enforce = dc->dc_priority & _PTHREAD_PRIORITY_ENFORCE_FLAG; pthread_priority_t dqp = dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK; bool defaultqueue = dq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG; - if (!p) { - enforce = false; - } else if (!enforce && (!dqp || defaultqueue)) { - enforce = true; - } - if (!enforce) { - p = dqp; + + dispatch_assert(dc->dc_priority != DISPATCH_NO_PRIORITY); + if (p && (enforce || !dqp || defaultqueue)) { + return p; } - return p; + return dqp; #else (void)dq; (void)dc; return 0; #endif } -#endif // !(USE_OBJC && __OBJC2__) && !defined(__cplusplus) +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_continuation_init_f(dispatch_continuation_t dc, + dispatch_queue_class_t dqu, void *ctxt, dispatch_function_t func, + pthread_priority_t pp, dispatch_block_flags_t flags, uintptr_t dc_flags) +{ + dc->dc_flags = dc_flags; + dc->dc_func = func; + dc->dc_ctxt = ctxt; + _dispatch_continuation_voucher_set(dc, dqu, flags); + _dispatch_continuation_priority_set(dc, pp, flags); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_continuation_init(dispatch_continuation_t dc, + dispatch_queue_class_t dqu, dispatch_block_t work, + pthread_priority_t pp, dispatch_block_flags_t flags, uintptr_t dc_flags) +{ + dc->dc_flags = dc_flags | DISPATCH_OBJ_BLOCK_BIT; + dc->dc_ctxt = _dispatch_Block_copy(work); + _dispatch_continuation_priority_set(dc, pp, flags); + + if (unlikely(_dispatch_block_has_private_data(work))) { + // always sets dc_func & dc_voucher + // may update dc_priority & do_vtable + return _dispatch_continuation_init_slow(dc, dqu, flags); + } + + if (dc_flags & DISPATCH_OBJ_CONSUME_BIT) { + dc->dc_func = _dispatch_call_block_and_release; + } else { + dc->dc_func = _dispatch_Block_invoke(work); + } + _dispatch_continuation_voucher_set(dc, dqu, flags); +} + +#endif // DISPATCH_PURE_C #endif /* __DISPATCH_INLINE_INTERNAL__ */ diff --git a/src/internal.h b/src/internal.h index 98626c643..a9aee1123 100644 --- a/src/internal.h +++ b/src/internal.h @@ -27,7 +27,11 @@ #ifndef __DISPATCH_INTERNAL__ #define __DISPATCH_INTERNAL__ +#if __has_include() +#include +#else #include +#endif #define __DISPATCH_BUILDING_DISPATCH__ #define __DISPATCH_INDIRECT__ @@ -35,8 +39,35 @@ #ifdef __APPLE__ #include #include + +#ifndef TARGET_OS_MAC_DESKTOP +#define TARGET_OS_MAC_DESKTOP (TARGET_OS_MAC && \ + !TARGET_OS_SIMULATOR && !TARGET_OS_IPHONE && !TARGET_OS_EMBEDDED) +#endif + +#if TARGET_OS_MAC_DESKTOP +# define DISPATCH_HOST_SUPPORTS_OSX(x) \ + (__MAC_OS_X_VERSION_MIN_REQUIRED >= (x)) +# if !DISPATCH_HOST_SUPPORTS_OSX(101000) +# error "OS X hosts older than OS X 10.10 aren't supported anymore" +# endif // !DISPATCH_HOST_SUPPORTS_OSX(101000) +#elif TARGET_OS_SIMULATOR +# define DISPATCH_HOST_SUPPORTS_OSX(x) \ + (IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED >= (x)) +# if !DISPATCH_HOST_SUPPORTS_OSX(101000) +# error "Simulator hosts older than OS X 10.10 aren't supported anymore" +# endif // !DISPATCH_HOST_SUPPORTS_OSX(101000) +#else +# define DISPATCH_HOST_SUPPORTS_OSX(x) 1 +# if __IPHONE_OS_VERSION_MIN_REQUIRED < 70000 +# error "iOS hosts older than iOS 7.0 aren't supported anymore" +# endif #endif +#else // !__APPLE__ +#define DISPATCH_HOST_SUPPORTS_OSX(x) 0 +#endif // !__APPLE__ + #if !defined(DISPATCH_MACH_SPI) && TARGET_OS_MAC #define DISPATCH_MACH_SPI 1 @@ -47,29 +78,35 @@ #if !defined(OS_VOUCHER_ACTIVITY_SPI) && TARGET_OS_MAC #define OS_VOUCHER_ACTIVITY_SPI 1 #endif -#if !defined(OS_VOUCHER_ACTIVITY_BUFFER_SPI) && TARGET_OS_MAC && \ - __has_include() -#define OS_VOUCHER_ACTIVITY_BUFFER_SPI 1 +#if !defined(OS_FIREHOSE_SPI) && TARGET_OS_MAC +#define OS_FIREHOSE_SPI 1 #endif #if !defined(DISPATCH_LAYOUT_SPI) && TARGET_OS_MAC #define DISPATCH_LAYOUT_SPI 1 #endif -#if !defined(USE_OBJC) && HAVE_OBJC -#define USE_OBJC 1 +#if __has_include() +#include +#if !defined(HAVE_DYLD_IS_MEMORY_IMMUTABLE) +#if defined(DYLD_MACOSX_VERSION_10_12) || defined(DYLD_IOS_VERSION_10_0) +#define HAVE_DYLD_IS_MEMORY_IMMUTABLE 1 +#else +#define HAVE_DYLD_IS_MEMORY_IMMUTABLE 0 #endif +#endif // !defined(HAVE_DYLD_IS_MEMORY_IMMUTABLE) +#endif // __has_include() -#if USE_OBJC && ((!TARGET_IPHONE_SIMULATOR && defined(__i386__)) || \ - (!TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED < 1080)) -// Disable Objective-C support on platforms with legacy objc runtime -#undef USE_OBJC -#define USE_OBJC 0 +#if !defined(USE_OBJC) && HAVE_OBJC +#define USE_OBJC 1 #endif #if USE_OBJC #define OS_OBJECT_HAVE_OBJC_SUPPORT 1 -#if __OBJC__ +#if defined(__OBJC__) #define OS_OBJECT_USE_OBJC 1 +// Force internal Objective-C sources to use class-visible headers +// even when not compiling in Swift. +#define OS_OBJECT_SWIFT3 1 #else #define OS_OBJECT_USE_OBJC 0 #endif // __OBJC__ @@ -80,6 +117,22 @@ #include #include +#define __DISPATCH_HIDE_SYMBOL(sym, version) \ + __asm__(".section __TEXT,__const\n\t" \ + ".globl $ld$hide$os" #version "$_" #sym "\n\t" \ + "$ld$hide$os" #version "$_" #sym ":\n\t" \ + " .byte 0\n\t" \ + ".previous") + + +#ifndef DISPATCH_HIDE_SYMBOL +#if TARGET_OS_MAC && !TARGET_OS_IPHONE +#define DISPATCH_HIDE_SYMBOL(sym, osx, ios, tvos, watchos) \ + __DISPATCH_HIDE_SYMBOL(sym, osx) +#else +#define DISPATCH_HIDE_SYMBOL(sym, osx, ios, tvos, watchos) +#endif +#endif #include #include @@ -95,47 +148,23 @@ #include #endif -#define DISPATCH_STRUCT_DECL(type, name, ...) \ - struct type __VA_ARGS__ name - -// Visual Studio C++ does not support C99 designated initializers. -// This means that static declarations should be zero initialized and cannot -// be const since we must fill in the values during DLL initialization. -#if !TARGET_OS_WIN32 -#define DISPATCH_STRUCT_INSTANCE(type, name, ...) \ -struct type name = { \ -__VA_ARGS__ \ -} -#else -#define DISPATCH_STRUCT_INSTANCE(type, name, ...) \ -struct type name = { 0 } -#endif - -#if !TARGET_OS_WIN32 -#define DISPATCH_CONST_STRUCT_DECL(type, name, ...) \ - const DISPATCH_STRUCT_DECL(type, name, __VA_ARGS__) - -#define DISPATCH_CONST_STRUCT_INSTANCE(type, name, ...) \ - const DISPATCH_STRUCT_INSTANCE(type, name, __VA_ARGS__) +#if defined(__OBJC__) || defined(__cplusplus) +#define DISPATCH_PURE_C 0 #else -#define DISPATCH_CONST_STRUCT_DECL(type, name, ...) \ - DISPATCH_STRUCT_DECL(type, name, __VA_ARGS__) - -#define DISPATCH_CONST_STRUCT_INSTANCE(type, name, ...) \ - DISPATCH_STRUCT_INSTANCE(type, name, __VA_ARGS__) +#define DISPATCH_PURE_C 1 #endif /* private.h must be included last to avoid picking up installed headers. */ -#include "object_private.h" +#include "os/object_private.h" #include "queue_private.h" #include "source_private.h" #include "mach_private.h" #include "data_private.h" +#include "os/voucher_private.h" +#include "os/voucher_activity_private.h" #if !TARGET_OS_WIN32 #include "io_private.h" #endif -#include "voucher_private.h" -#include "voucher_activity_private.h" #include "layout_private.h" #include "benchmark.h" #include "private.h" @@ -174,6 +203,10 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); #define DISPATCH_USE_DTRACE_INTROSPECTION 1 #endif +#ifndef DISPATCH_DEBUG_QOS +#define DISPATCH_DEBUG_QOS DISPATCH_DEBUG +#endif + #if HAVE_LIBKERN_OSCROSSENDIAN_H #include #endif @@ -202,10 +235,13 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); #if HAVE_MALLOC_MALLOC_H #include #endif +#if __has_include() +#include +#endif // __has_include( -#if !TARGET_OS_WIN32 +#if !TARGET_OS_WIN32 #include #include #include @@ -214,8 +250,9 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); #include #include #include -#else -#include "sys_queue.h" +#endif +#if defined(__linux__) +#include #endif #ifdef __BLOCKS__ @@ -244,25 +281,13 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); #include #endif -#ifndef __has_builtin -#define __has_builtin(x) 0 -#endif -#ifndef __has_include -#define __has_include(x) 0 -#endif -#ifndef __has_feature -#define __has_feature(x) 0 -#endif -#ifndef __has_attribute -#define __has_attribute(x) 0 -#endif - #if __GNUC__ #define DISPATCH_NOINLINE __attribute__((__noinline__)) #define DISPATCH_USED __attribute__((__used__)) #define DISPATCH_UNUSED __attribute__((__unused__)) #define DISPATCH_WEAK __attribute__((__weak__)) #define DISPATCH_OVERLOADABLE __attribute__((__overloadable__)) +#define DISPATCH_PACKED __attribute__((__packed__)) #if DISPATCH_DEBUG #define DISPATCH_ALWAYS_INLINE_NDEBUG #else @@ -295,13 +320,36 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); /* I wish we had __builtin_expect_range() */ #if __GNUC__ -#define fastpath(x) ((typeof(x))__builtin_expect((long)(x), ~0l)) -#define slowpath(x) ((typeof(x))__builtin_expect((long)(x), 0l)) +#define _safe_cast_to_long(x) \ + ({ _Static_assert(sizeof(typeof(x)) <= sizeof(long), \ + "__builtin_expect doesn't support types wider than long"); \ + (long)(x); }) +#define fastpath(x) ((typeof(x))__builtin_expect(_safe_cast_to_long(x), ~0l)) +#define slowpath(x) ((typeof(x))__builtin_expect(_safe_cast_to_long(x), 0l)) +#define likely(x) __builtin_expect(!!(x), 1) +#define unlikely(x) __builtin_expect(!!(x), 0) #else #define fastpath(x) (x) #define slowpath(x) (x) +#define likely(x) (!!(x)) +#define unlikely(x) (!!(x)) #endif // __GNUC__ +#if BYTE_ORDER == LITTLE_ENDIAN +#define DISPATCH_STRUCT_LITTLE_ENDIAN_2(a, b) struct { a; b; } +#define DISPATCH_STRUCT_LITTLE_ENDIAN_3(a, b, c) struct { a; b; c; } +#define DISPATCH_STRUCT_LITTLE_ENDIAN_4(a, b, c, d) struct { a; b; c; d; } +#else +#define DISPATCH_STRUCT_LITTLE_ENDIAN_2(a, b) struct { b; a; } +#define DISPATCH_STRUCT_LITTLE_ENDIAN_3(a, b, c) struct { c; b; a; } +#define DISPATCH_STRUCT_LITTLE_ENDIAN_4(a, b, c, d) struct { d; c; b; a; } +#endif + +#define _TAILQ_IS_ENQUEUED(elm, field) \ + ((elm)->field.tqe_prev != NULL) +#define _TAILQ_MARK_NOT_ENQUEUED(elm, field) \ + do { (elm)->field.tqe_prev = NULL; } while (0) + #if DISPATCH_DEBUG // sys/queue.h debugging #undef TRASHIT @@ -316,7 +364,7 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); TRASHIT((head)->tqh_last); \ } while (0) -DISPATCH_NOINLINE +DISPATCH_EXPORT DISPATCH_NOINLINE void _dispatch_bug(size_t line, long val); #if HAVE_MACH @@ -324,10 +372,13 @@ DISPATCH_NOINLINE void _dispatch_bug_client(const char* msg); DISPATCH_NOINLINE void _dispatch_bug_mach_client(const char *msg, mach_msg_return_t kr); +#endif // HAVE_MACH DISPATCH_NOINLINE void _dispatch_bug_kevent_client(const char* msg, const char* filter, const char *operation, int err); -#endif + +DISPATCH_NOINLINE +void _dispatch_bug_deprecated(const char *msg); DISPATCH_NOINLINE DISPATCH_NORETURN void _dispatch_abort(size_t line, long val); @@ -353,12 +404,13 @@ void _dispatch_abort(size_t line, long val); #if DISPATCH_USE_OS_DEBUG_LOG #define _dispatch_log(msg, ...) os_debug_log("libdispatch", msg, ## __VA_ARGS__) #else -DISPATCH_NOINLINE __attribute__((__format__(__printf__,1,2))) +DISPATCH_EXPORT DISPATCH_NOINLINE __attribute__((__format__(__printf__,1,2))) void _dispatch_log(const char *msg, ...); #endif // DISPATCH_USE_OS_DEBUG_LOG -#define dsnprintf(...) \ - ({ int _r = snprintf(__VA_ARGS__); _r < 0 ? 0u : (size_t)_r; }) +#define dsnprintf(buf, siz, ...) \ + ({ size_t _siz = siz; int _r = snprintf(buf, _siz, __VA_ARGS__); \ + _r < 0 ? 0u : ((size_t)_r > _siz ? _siz : (size_t)_r); }) #if __GNUC__ #define dispatch_static_assert(e) ({ \ @@ -368,6 +420,9 @@ void _dispatch_log(const char *msg, ...); #define dispatch_static_assert(e) #endif +#define DISPATCH_BAD_INPUT ((void *_Nonnull)0) +#define DISPATCH_OUT_OF_MEMORY ((void *_Nonnull)0) + /* * For reporting bugs within libdispatch when using the "_debug" version of the * library. @@ -409,7 +464,7 @@ static inline void _dispatch_assert(long e, long line) { static inline void _dispatch_assert_zero(long e, long line) { if (DISPATCH_DEBUG && e) _dispatch_abort(line, e); } -#define dispatch_assert_zero(e) _dispatch_assert((long)(e), __LINE__) +#define dispatch_assert_zero(e) _dispatch_assert((long)(e), __LINE__) #endif /* __GNUC__ */ /* @@ -513,31 +568,40 @@ _dispatch_object_debug(dispatch_object_t object, const char *message, ...); #ifdef __BLOCKS__ #define _dispatch_Block_invoke(bb) \ ((dispatch_function_t)((struct Block_layout *)bb)->invoke) +void *_dispatch_Block_copy(void *block); #if __GNUC__ -dispatch_block_t _dispatch_Block_copy(dispatch_block_t block); #define _dispatch_Block_copy(x) ((typeof(x))_dispatch_Block_copy(x)) -#else -dispatch_block_t _dispatch_Block_copy(const void *block); #endif void _dispatch_call_block_and_release(void *block); #endif /* __BLOCKS__ */ void _dispatch_temporary_resource_shortage(void); void *_dispatch_calloc(size_t num_items, size_t size); +const char *_dispatch_strdup_if_mutable(const char *str); void _dispatch_vtable_init(void); char *_dispatch_get_build(void); uint64_t _dispatch_timeout(dispatch_time_t when); - -extern bool _dispatch_safe_fork, _dispatch_child_of_unsafe_fork; - -#if !defined(DISPATCH_USE_OS_SEMAPHORE_CACHE) && !(TARGET_IPHONE_SIMULATOR) -// rdar://problem/15492045 -#if __has_include() -#define DISPATCH_USE_OS_SEMAPHORE_CACHE 1 -#include -#endif -#endif +uint64_t _dispatch_time_nanoseconds_since_epoch(dispatch_time_t when); + +#define _DISPATCH_UNSAFE_FORK_MULTITHREADED ((uint8_t)1) +#define _DISPATCH_UNSAFE_FORK_PROHIBIT ((uint8_t)2) +extern uint8_t _dispatch_unsafe_fork; +extern bool _dispatch_child_of_unsafe_fork; +void _dispatch_fork_becomes_unsafe_slow(void); + +#define _dispatch_is_multithreaded_inline() \ + ((_dispatch_unsafe_fork & _DISPATCH_UNSAFE_FORK_MULTITHREADED) != 0) + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_fork_becomes_unsafe(void) +{ + if (!fastpath(_dispatch_is_multithreaded_inline())) { + _dispatch_fork_becomes_unsafe_slow(); + DISPATCH_COMPILER_CAN_ASSUME(_dispatch_is_multithreaded_inline()); + } +} /* #includes dependent on internal.h */ #include "shims.h" @@ -548,34 +612,32 @@ extern bool _dispatch_safe_fork, _dispatch_child_of_unsafe_fork; #ifndef WORKQ_ADDTHREADS_OPTION_OVERCOMMIT #define WORKQ_ADDTHREADS_OPTION_OVERCOMMIT 0x00000001 #endif -#if TARGET_IPHONE_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1080 -#ifndef DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK -#define DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK 1 -#endif -#endif -#if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED < 1080 -#undef HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP -#define HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP 0 -#endif -#if TARGET_IPHONE_SIMULATOR && \ - IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 101000 -#ifndef DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK -#define DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK 1 -#endif -#endif -#if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED < 101000 -#undef HAVE__PTHREAD_WORKQUEUE_INIT -#define HAVE__PTHREAD_WORKQUEUE_INIT 0 -#endif #endif // HAVE_PTHREAD_WORKQUEUES #if HAVE__PTHREAD_WORKQUEUE_INIT && PTHREAD_WORKQUEUE_SPI_VERSION >= 20140213 \ && !defined(HAVE_PTHREAD_WORKQUEUE_QOS) #define HAVE_PTHREAD_WORKQUEUE_QOS 1 #endif +#if HAVE__PTHREAD_WORKQUEUE_INIT && (PTHREAD_WORKQUEUE_SPI_VERSION >= 20150304 \ + || (PTHREAD_WORKQUEUE_SPI_VERSION == 20140730 && \ + defined(WORKQ_FEATURE_KEVENT))) \ + && !defined(HAVE_PTHREAD_WORKQUEUE_KEVENT) +#if PTHREAD_WORKQUEUE_SPI_VERSION == 20140730 +// rdar://problem/20609877 +typedef pthread_worqueue_function_kevent_t pthread_workqueue_function_kevent_t; +#endif +#define HAVE_PTHREAD_WORKQUEUE_KEVENT 1 +#endif + +#ifndef PTHREAD_WORKQUEUE_RESETS_VOUCHER_AND_PRIORITY_ON_PARK +#if HAVE_PTHREAD_WORKQUEUE_QOS && DISPATCH_HOST_SUPPORTS_OSX(101200) +#define PTHREAD_WORKQUEUE_RESETS_VOUCHER_AND_PRIORITY_ON_PARK 1 +#else +#define PTHREAD_WORKQUEUE_RESETS_VOUCHER_AND_PRIORITY_ON_PARK 0 +#endif +#endif // PTHREAD_WORKQUEUE_RESETS_VOUCHER_AND_PRIORITY_ON_PARK #if HAVE_MACH -#if !defined(MACH_NOTIFY_SEND_POSSIBLE) || (TARGET_IPHONE_SIMULATOR && \ - IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1070) +#if !defined(MACH_NOTIFY_SEND_POSSIBLE) #undef MACH_NOTIFY_SEND_POSSIBLE #define MACH_NOTIFY_SEND_POSSIBLE MACH_NOTIFY_DEAD_NAME #endif @@ -593,20 +655,22 @@ extern bool _dispatch_safe_fork, _dispatch_child_of_unsafe_fork; #endif #endif // EVFILT_VM -#if TARGET_IPHONE_SIMULATOR -#undef DISPATCH_USE_MEMORYSTATUS_SOURCE -#define DISPATCH_USE_MEMORYSTATUS_SOURCE 0 +#if TARGET_OS_SIMULATOR +#undef DISPATCH_USE_MEMORYPRESSURE_SOURCE +#define DISPATCH_USE_MEMORYPRESSURE_SOURCE 0 #undef DISPATCH_USE_VM_PRESSURE_SOURCE #define DISPATCH_USE_VM_PRESSURE_SOURCE 0 -#endif // TARGET_IPHONE_SIMULATOR -#if !defined(DISPATCH_USE_MEMORYSTATUS_SOURCE) && DISPATCH_USE_MEMORYSTATUS -#define DISPATCH_USE_MEMORYSTATUS_SOURCE 1 +#endif // TARGET_OS_SIMULATOR +#if !defined(DISPATCH_USE_MEMORYPRESSURE_SOURCE) && DISPATCH_USE_MEMORYSTATUS +#define DISPATCH_USE_MEMORYPRESSURE_SOURCE 1 #elif !defined(DISPATCH_USE_VM_PRESSURE_SOURCE) && DISPATCH_USE_VM_PRESSURE #define DISPATCH_USE_VM_PRESSURE_SOURCE 1 #endif +#if DISPATCH_USE_MEMORYPRESSURE_SOURCE +extern bool _dispatch_memory_warn; +#endif -#if !defined(NOTE_LEEWAY) || (TARGET_IPHONE_SIMULATOR && \ - IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090) +#if !defined(NOTE_LEEWAY) #undef NOTE_LEEWAY #define NOTE_LEEWAY 0 #undef NOTE_CRITICAL @@ -615,6 +679,22 @@ extern bool _dispatch_safe_fork, _dispatch_child_of_unsafe_fork; #define NOTE_BACKGROUND 0 #endif // NOTE_LEEWAY +#if !defined(NOTE_FUNLOCK) +#define NOTE_FUNLOCK 0x00000100 +#endif + +#if !defined(NOTE_MACH_CONTINUOUS_TIME) +#define NOTE_MACH_CONTINUOUS_TIME 0 +#endif // NOTE_MACH_CONTINUOUS_TIME + +#if !defined(HOST_NOTIFY_CALENDAR_SET) +#define HOST_NOTIFY_CALENDAR_SET HOST_NOTIFY_CALENDAR_CHANGE +#endif // HOST_NOTIFY_CALENDAR_SET + +#if !defined(HOST_CALENDAR_SET_REPLYID) +#define HOST_CALENDAR_SET_REPLYID 951 +#endif // HOST_CALENDAR_SET_REPLYID + #if HAVE_DECL_NOTE_REAP #if defined(NOTE_REAP) && defined(__APPLE__) #undef NOTE_REAP @@ -622,9 +702,23 @@ extern bool _dispatch_safe_fork, _dispatch_child_of_unsafe_fork; #endif #endif // HAVE_DECL_NOTE_REAP -#if !defined(EV_UDATA_SPECIFIC) || (TARGET_IPHONE_SIMULATOR && \ - IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 101100) || \ - (!TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED < 101100) +#ifndef VQ_QUOTA +#undef HAVE_DECL_VQ_QUOTA // rdar://problem/24160982 +#endif // VQ_QUOTA + +#if !defined(NOTE_MEMORYSTATUS_PROC_LIMIT_WARN) || \ + !DISPATCH_HOST_SUPPORTS_OSX(101200) +#undef NOTE_MEMORYSTATUS_PROC_LIMIT_WARN +#define NOTE_MEMORYSTATUS_PROC_LIMIT_WARN 0 +#endif // NOTE_MEMORYSTATUS_PROC_LIMIT_WARN + +#if !defined(NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL) || \ + !DISPATCH_HOST_SUPPORTS_OSX(101200) +#undef NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL +#define NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL 0 +#endif // NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL + +#if !defined(EV_UDATA_SPECIFIC) || !DISPATCH_HOST_SUPPORTS_OSX(101100) #undef DISPATCH_USE_EV_UDATA_SPECIFIC #define DISPATCH_USE_EV_UDATA_SPECIFIC 0 #elif !defined(DISPATCH_USE_EV_UDATA_SPECIFIC) @@ -634,23 +728,64 @@ extern bool _dispatch_safe_fork, _dispatch_child_of_unsafe_fork; #if !DISPATCH_USE_EV_UDATA_SPECIFIC #undef EV_UDATA_SPECIFIC #define EV_UDATA_SPECIFIC 0 -#undef DISPATCH_DYNAMIC_SELECT_FALLBACK -#define DISPATCH_DYNAMIC_SELECT_FALLBACK 0 -#undef DISPATCH_USE_SELECT_FALLBACK -#define DISPATCH_USE_SELECT_FALLBACK 1 +#undef EV_VANISHED +#define EV_VANISHED 0 #endif // !DISPATCH_USE_EV_UDATA_SPECIFIC -#if !defined(EV_SET_QOS) || (TARGET_IPHONE_SIMULATOR && \ - IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 101100) || \ - (!TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED < 101100) +#ifndef EV_VANISHED +#define EV_VANISHED 0x0200 +#endif + +#ifndef DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS +#if TARGET_OS_MAC && !DISPATCH_HOST_SUPPORTS_OSX(101200) +// deferred delete can return bogus ENOENTs on older kernels +#define DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS 1 +#else +#define DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS 0 +#endif +#endif + +#if !defined(EV_SET_QOS) || !DISPATCH_HOST_SUPPORTS_OSX(101100) #undef DISPATCH_USE_KEVENT_QOS #define DISPATCH_USE_KEVENT_QOS 0 #elif !defined(DISPATCH_USE_KEVENT_QOS) #define DISPATCH_USE_KEVENT_QOS 1 #endif // EV_SET_QOS +#if HAVE_PTHREAD_WORKQUEUE_KEVENT && defined(KEVENT_FLAG_WORKQ) && \ + DISPATCH_USE_EV_UDATA_SPECIFIC && DISPATCH_USE_KEVENT_QOS && \ + DISPATCH_HOST_SUPPORTS_OSX(101200) && \ + !defined(DISPATCH_USE_KEVENT_WORKQUEUE) +#define DISPATCH_USE_KEVENT_WORKQUEUE 1 +#endif + + +#if (!DISPATCH_USE_KEVENT_WORKQUEUE || DISPATCH_DEBUG) && \ + !defined(DISPATCH_USE_MGR_THREAD) +#define DISPATCH_USE_MGR_THREAD 1 +#endif + +#if DISPATCH_USE_KEVENT_WORKQUEUE && DISPATCH_USE_EV_UDATA_SPECIFIC && \ + DISPATCH_HOST_SUPPORTS_OSX(101200) && \ + !defined(DISPATCH_USE_EVFILT_MACHPORT_DIRECT) +#define DISPATCH_USE_EVFILT_MACHPORT_DIRECT 1 +#endif + +#ifndef MACH_SEND_OVERRIDE +#define MACH_SEND_OVERRIDE 0x00000020 +typedef unsigned int mach_msg_priority_t; +#define MACH_MSG_PRIORITY_UNSPECIFIED ((mach_msg_priority_t)0) +#endif // MACH_SEND_OVERRIDE + + +#if (!DISPATCH_USE_EVFILT_MACHPORT_DIRECT || DISPATCH_DEBUG) && \ + !defined(DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK) +#define DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK 1 +#endif + #if DISPATCH_USE_KEVENT_QOS typedef struct kevent_qos_s _dispatch_kevent_qos_s; +typedef typeof(((struct kevent_qos_s*)NULL)->qos) _dispatch_kevent_priority_t; #else // DISPATCH_USE_KEVENT_QOS #ifndef KEVENT_FLAG_IMMEDIATE #define KEVENT_FLAG_NONE 0x00 @@ -673,10 +808,6 @@ typedef struct kevent64_s _dispatch_kevent_qos_s; #endif // DISPATCH_USE_KEVENT_QOS #if defined(F_SETNOSIGPIPE) && defined(F_GETNOSIGPIPE) -#if TARGET_IPHONE_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1070 -#undef DISPATCH_USE_SETNOSIGPIPE -#define DISPATCH_USE_SETNOSIGPIPE 0 -#endif #ifndef DISPATCH_USE_SETNOSIGPIPE #define DISPATCH_USE_SETNOSIGPIPE 1 #endif @@ -695,14 +826,6 @@ typedef struct kevent64_s _dispatch_kevent_qos_s; #if HAVE_LIBPROC_INTERNAL_H #include #include -#if TARGET_IPHONE_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090 -#undef DISPATCH_USE_IMPORTANCE_ASSERTION -#define DISPATCH_USE_IMPORTANCE_ASSERTION 0 -#endif -#if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED < 1090 -#undef DISPATCH_USE_IMPORTANCE_ASSERTION -#define DISPATCH_USE_IMPORTANCE_ASSERTION 0 -#endif #ifndef DISPATCH_USE_IMPORTANCE_ASSERTION #define DISPATCH_USE_IMPORTANCE_ASSERTION 1 #endif @@ -710,10 +833,6 @@ typedef struct kevent64_s _dispatch_kevent_qos_s; #if HAVE_SYS_GUARDED_H #include -#if TARGET_IPHONE_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090 -#undef DISPATCH_USE_GUARDED_FD -#define DISPATCH_USE_GUARDED_FD 0 -#endif #ifndef DISPATCH_USE_GUARDED_FD #define DISPATCH_USE_GUARDED_FD 1 #endif @@ -724,6 +843,68 @@ typedef struct kevent64_s _dispatch_kevent_qos_s; #endif // HAVE_SYS_GUARDED_H +#if __has_include() +#include +#ifndef DBG_DISPATCH +#define DBG_DISPATCH 46 +#endif +#ifndef KDBG_CODE +#define KDBG_CODE(...) 0 +#endif +#define DISPATCH_CODE(subclass, code) \ + KDBG_CODE(DBG_DISPATCH, DISPATCH_TRACE_SUBCLASS_##subclass, code) +#ifdef ARIADNEDBG_CODE +#define ARIADNE_ENTER_DISPATCH_MAIN_CODE ARIADNEDBG_CODE(220, 2) +#else +#define ARIADNE_ENTER_DISPATCH_MAIN_CODE 0 +#endif +#if !defined(DISPATCH_USE_VOUCHER_KDEBUG_TRACE) && DISPATCH_INTROSPECTION +#define DISPATCH_USE_VOUCHER_KDEBUG_TRACE 1 +#endif + +#define DISPATCH_TRACE_SUBCLASS_DEFAULT 0 +#define DISPATCH_TRACE_SUBCLASS_VOUCHER 1 +#define DISPATCH_TRACE_SUBCLASS_PERF 2 +#define DISPATCH_TRACE_SUBCLASS_MACH_MSG 3 + +#define DISPATCH_PERF_non_leaf_retarget DISPATCH_CODE(PERF, 1) +#define DISPATCH_PERF_post_activate_retarget DISPATCH_CODE(PERF, 2) +#define DISPATCH_PERF_post_activate_mutation DISPATCH_CODE(PERF, 3) +#define DISPATCH_PERF_delayed_registration DISPATCH_CODE(PERF, 4) +#define DISPATCH_PERF_mutable_target DISPATCH_CODE(PERF, 5) + +#define DISPATCH_MACH_MSG_hdr_move DISPATCH_CODE(MACH_MSG, 1) + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_ktrace_impl(uint32_t code, uint64_t a, uint64_t b, + uint64_t c, uint64_t d) +{ + if (!code) return; +#ifdef _COMM_PAGE_KDEBUG_ENABLE + if (likely(*(volatile uint32_t *)_COMM_PAGE_KDEBUG_ENABLE == 0)) return; +#endif + kdebug_trace(code, a, b, c, d); +} +#define _dispatch_cast_to_uint64(e) \ + __builtin_choose_expr(sizeof(e) > 4, \ + ((uint64_t)(e)), ((uint64_t)(uintptr_t)(e))) +#define _dispatch_ktrace(code, a, b, c, d) _dispatch_ktrace_impl(code, \ + _dispatch_cast_to_uint64(a), _dispatch_cast_to_uint64(b), \ + _dispatch_cast_to_uint64(c), _dispatch_cast_to_uint64(d)) + +#else // __has_include() +#define DISPATCH_CODE(subclass, code) 0 +#define ARIADNE_ENTER_DISPATCH_MAIN_CODE 0 +#define DISPATCH_USE_VOUCHER_KDEBUG_TRACE 0 +#define _dispatch_ktrace(code, a, b, c, d) +#endif // !__has_include() +#define _dispatch_ktrace4(code, a, b, c, d) _dispatch_ktrace(code, a, b, c, d) +#define _dispatch_ktrace3(code, a, b, c) _dispatch_ktrace(code, a, b, c, 0) +#define _dispatch_ktrace2(code, a, b) _dispatch_ktrace(code, a, b, 0, 0) +#define _dispatch_ktrace1(code, a) _dispatch_ktrace(code, a, 0, 0, 0) +#define _dispatch_ktrace0(code) _dispatch_ktrace(code, 0, 0, 0, 0) + #ifndef MACH_MSGH_BITS_VOUCHER_MASK #define MACH_MSGH_BITS_VOUCHER_MASK 0x001f0000 #define MACH_MSGH_BITS_SET_PORTS(remote, local, voucher) \ @@ -740,9 +921,66 @@ typedef struct kevent64_s _dispatch_kevent_qos_s; #define MACH_SEND_INVALID_VOUCHER 0x10000005 #endif +#if TARGET_OS_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 101100 +#undef VOUCHER_USE_MACH_VOUCHER +#define VOUCHER_USE_MACH_VOUCHER 0 +#endif +#ifndef VOUCHER_USE_MACH_VOUCHER +#if __has_include() +#define VOUCHER_USE_MACH_VOUCHER 1 +#endif +#endif + +#if RDAR_24272659 // FIXME: +#if !VOUCHER_USE_MACH_VOUCHER || !DISPATCH_HOST_SUPPORTS_OSX(101200) +#undef VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER +#define VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER 0 +#elif !defined(VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER) +#define VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER 1 +#endif +#else // RDAR_24272659 +#undef VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER +#define VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER 0 +#endif // RDAR_24272659 + +#if !VOUCHER_USE_MACH_VOUCHER || !DISPATCH_HOST_SUPPORTS_OSX(101200) +#undef VOUCHER_USE_BANK_AUTOREDEEM +#define VOUCHER_USE_BANK_AUTOREDEEM 0 +#elif !defined(VOUCHER_USE_BANK_AUTOREDEEM) +#define VOUCHER_USE_BANK_AUTOREDEEM 1 +#endif + +#if !VOUCHER_USE_MACH_VOUCHER || \ + !__has_include() || \ + !DISPATCH_HOST_SUPPORTS_OSX(101200) +#undef VOUCHER_USE_MACH_VOUCHER_PRIORITY +#define VOUCHER_USE_MACH_VOUCHER_PRIORITY 0 +#elif !defined(VOUCHER_USE_MACH_VOUCHER_PRIORITY) +#define VOUCHER_USE_MACH_VOUCHER_PRIORITY 1 +#endif + +#ifndef VOUCHER_USE_PERSONA +#if VOUCHER_USE_MACH_VOUCHER && defined(BANK_PERSONA_TOKEN) && \ + TARGET_OS_IOS && !TARGET_OS_SIMULATOR +#define VOUCHER_USE_PERSONA 1 +#else +#define VOUCHER_USE_PERSONA 0 +#endif +#endif // VOUCHER_USE_PERSONA + +#if VOUCHER_USE_MACH_VOUCHER +#undef DISPATCH_USE_IMPORTANCE_ASSERTION +#define DISPATCH_USE_IMPORTANCE_ASSERTION 0 +#else +#undef MACH_RCV_VOUCHER +#define MACH_RCV_VOUCHER 0 +#define VOUCHER_USE_PERSONA 0 +#endif // VOUCHER_USE_MACH_VOUCHER + #define _dispatch_hardware_crash() \ __asm__(""); __builtin_trap() // +#define _dispatch_set_crash_log_cause_and_message(ac, msg) #define _dispatch_set_crash_log_message(msg) #define _dispatch_set_crash_log_message_dynamic(msg) @@ -753,19 +991,22 @@ typedef struct kevent64_s _dispatch_kevent_qos_s; // 2) A hand crafted call to mach_msg*() screwed up. Use MIG. #define DISPATCH_VERIFY_MIG(x) do { \ if ((x) == MIG_REPLY_MISMATCH) { \ - _dispatch_set_crash_log_message("MIG_REPLY_MISMATCH"); \ + _dispatch_set_crash_log_cause_and_message((x), \ + "MIG_REPLY_MISMATCH"); \ _dispatch_hardware_crash(); \ } \ } while (0) #endif -#define DISPATCH_CRASH(x) do { \ - _dispatch_set_crash_log_message("BUG IN LIBDISPATCH: " x); \ +#define DISPATCH_INTERNAL_CRASH(c, x) do { \ + _dispatch_set_crash_log_cause_and_message((c), \ + "BUG IN LIBDISPATCH: " x); \ _dispatch_hardware_crash(); \ } while (0) -#define DISPATCH_CLIENT_CRASH(x) do { \ - _dispatch_set_crash_log_message("BUG IN CLIENT OF LIBDISPATCH: " x); \ +#define DISPATCH_CLIENT_CRASH(c, x) do { \ + _dispatch_set_crash_log_cause_and_message((c), \ + "BUG IN CLIENT OF LIBDISPATCH: " x); \ _dispatch_hardware_crash(); \ } while (0) @@ -774,19 +1015,72 @@ typedef struct kevent64_s _dispatch_kevent_qos_s; _dispatch_hardware_crash(); \ } while (0) -extern int _dispatch_set_qos_class_enabled; +#define DISPATCH_ASSERTION_FAILED_MESSAGE \ + "BUG IN CLIENT OF LIBDISPATCH: Assertion failed: " + +#define _dispatch_assert_crash(msg) do { \ + const char *__msg = (msg); \ + _dispatch_log("%s", __msg); \ + _dispatch_set_crash_log_message_dynamic(__msg); \ + _dispatch_hardware_crash(); \ + } while (0) + +#define _dispatch_client_assert_fail(fmt, ...) do { \ + char *_msg = NULL; \ + asprintf(&_msg, "%s" fmt, DISPATCH_ASSERTION_FAILED_MESSAGE, \ + ##__VA_ARGS__); \ + _dispatch_assert_crash(_msg); \ + free(_msg); \ + } while (0) + #define DISPATCH_NO_VOUCHER ((voucher_t)(void*)~0ul) #define DISPATCH_NO_PRIORITY ((pthread_priority_t)~0ul) -#define DISPATCH_PRIORITY_ENFORCE 0x1 -#define DISPATCH_VOUCHER_IGNORE_QUEUE_OVERRIDE 0x2 -static inline void _dispatch_adopt_priority_and_replace_voucher( - pthread_priority_t priority, voucher_t voucher, unsigned long flags); +DISPATCH_ENUM(_dispatch_thread_set_self, unsigned long, + DISPATCH_PRIORITY_ENFORCE = 0x1, + DISPATCH_VOUCHER_REPLACE = 0x2, + DISPATCH_VOUCHER_CONSUME = 0x4, + DISPATCH_THREAD_PARK = 0x8, +); +DISPATCH_WARN_RESULT +static inline voucher_t _dispatch_adopt_priority_and_set_voucher( + pthread_priority_t priority, voucher_t voucher, + _dispatch_thread_set_self_t flags); #if HAVE_MACH -static inline void _dispatch_set_priority_and_mach_voucher( - pthread_priority_t priority, mach_voucher_t kv); mach_port_t _dispatch_get_mach_host_port(void); #endif +#if HAVE_PTHREAD_WORKQUEUE_QOS +#if DISPATCH_DEBUG +extern int _dispatch_set_qos_class_enabled; +#else +#define _dispatch_set_qos_class_enabled (1) +#endif +#endif // HAVE_PTHREAD_WORKQUEUE_QOS +#if DISPATCH_USE_KEVENT_WORKQUEUE +#if !HAVE_PTHREAD_WORKQUEUE_QOS || !DISPATCH_USE_KEVENT_QOS || \ + !DISPATCH_USE_EV_UDATA_SPECIFIC +#error Invalid build configuration +#endif +#if DISPATCH_USE_MGR_THREAD +extern int _dispatch_kevent_workqueue_enabled; +#else +#define _dispatch_kevent_workqueue_enabled (1) +#endif +#endif // DISPATCH_USE_KEVENT_WORKQUEUE + +#if DISPATCH_USE_EVFILT_MACHPORT_DIRECT +#if !DISPATCH_USE_KEVENT_WORKQUEUE || !DISPATCH_USE_EV_UDATA_SPECIFIC +#error Invalid build configuration +#endif +#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK +extern int _dispatch_evfilt_machport_direct_enabled; +#else +#define _dispatch_evfilt_machport_direct_enabled (1) +#endif +#else +#define _dispatch_evfilt_machport_direct_enabled (0) +#endif // DISPATCH_USE_EVFILT_MACHPORT_DIRECT + /* #includes dependent on internal.h */ #include "object_internal.h" @@ -800,5 +1094,6 @@ mach_port_t _dispatch_get_mach_host_port(void); #include "io_internal.h" #endif #include "inline_internal.h" +#include "firehose/firehose_internal.h" #endif /* __DISPATCH_INTERNAL__ */ diff --git a/src/introspection.c b/src/introspection.c index 35b0b573f..d847cb91a 100644 --- a/src/introspection.c +++ b/src/introspection.c @@ -23,6 +23,7 @@ #if DISPATCH_INTROSPECTION +#include #include "internal.h" #include "dispatch/introspection.h" #include "introspection_private.h" @@ -35,38 +36,50 @@ typedef struct dispatch_introspection_thread_s { } dispatch_introspection_thread_s; typedef struct dispatch_introspection_thread_s *dispatch_introspection_thread_t; -static TAILQ_HEAD(, dispatch_introspection_thread_s) - _dispatch_introspection_threads = - TAILQ_HEAD_INITIALIZER(_dispatch_introspection_threads); -static volatile OSSpinLock _dispatch_introspection_threads_lock; +struct dispatch_introspection_state_s _dispatch_introspection = { + .threads = TAILQ_HEAD_INITIALIZER(_dispatch_introspection.threads), + .queues = TAILQ_HEAD_INITIALIZER(_dispatch_introspection.queues), +}; static void _dispatch_introspection_thread_remove(void *ctxt); -static TAILQ_HEAD(, dispatch_queue_s) _dispatch_introspection_queues = - TAILQ_HEAD_INITIALIZER(_dispatch_introspection_queues); -static volatile OSSpinLock _dispatch_introspection_queues_lock; - -static ptrdiff_t _dispatch_introspection_thread_queue_offset; +static void _dispatch_introspection_queue_order_dispose(dispatch_queue_t dq); #pragma mark - #pragma mark dispatch_introspection_init +DISPATCH_NOINLINE +static bool +_dispatch_getenv_bool(const char *env, bool default_v) +{ + const char *v = getenv(env); + + if (v) { + return strcasecmp(v, "YES") == 0 || strcasecmp(v, "Y") == 0 || + strcasecmp(v, "TRUE") == 0 || atoi(v); + } + return default_v; +} + void _dispatch_introspection_init(void) { - TAILQ_INSERT_TAIL(&_dispatch_introspection_queues, + TAILQ_INSERT_TAIL(&_dispatch_introspection.queues, &_dispatch_main_q, diq_list); - TAILQ_INSERT_TAIL(&_dispatch_introspection_queues, + TAILQ_INSERT_TAIL(&_dispatch_introspection.queues, &_dispatch_mgr_q, diq_list); #if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES - TAILQ_INSERT_TAIL(&_dispatch_introspection_queues, + TAILQ_INSERT_TAIL(&_dispatch_introspection.queues, _dispatch_mgr_q.do_targetq, diq_list); #endif for (size_t i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { - TAILQ_INSERT_TAIL(&_dispatch_introspection_queues, + TAILQ_INSERT_TAIL(&_dispatch_introspection.queues, &_dispatch_root_queues[i], diq_list); } + _dispatch_introspection.debug_queue_inversions = + _dispatch_getenv_bool("LIBDISPATCH_DEBUG_QUEUE_INVERSIONS", false); + // Hack to determine queue TSD offset from start of pthread structure uintptr_t thread = _dispatch_thread_self(); thread_identifier_info_data_t tiid; @@ -74,7 +87,7 @@ _dispatch_introspection_init(void) kern_return_t kr = thread_info(pthread_mach_thread_np((void*)thread), THREAD_IDENTIFIER_INFO, (thread_info_t)&tiid, &cnt); if (!dispatch_assume_zero(kr)) { - _dispatch_introspection_thread_queue_offset = + _dispatch_introspection.thread_queue_offset = (void*)(uintptr_t)tiid.dispatch_qaddr - (void*)thread; } _dispatch_thread_key_create(&dispatch_introspection_key, @@ -116,21 +129,21 @@ _dispatch_introspection_thread_add(void) dispatch_introspection_thread_t dit = (void*)_dispatch_continuation_alloc(); dit->dit_isa = (void*)0x41; dit->thread = (void*)thread; - dit->queue = !_dispatch_introspection_thread_queue_offset ? NULL : - (void*)thread + _dispatch_introspection_thread_queue_offset; + dit->queue = !_dispatch_introspection.thread_queue_offset ? NULL : + (void*)thread + _dispatch_introspection.thread_queue_offset; _dispatch_thread_setspecific(dispatch_introspection_key, dit); - OSSpinLockLock(&_dispatch_introspection_threads_lock); - TAILQ_INSERT_TAIL(&_dispatch_introspection_threads, dit, dit_list); - OSSpinLockUnlock(&_dispatch_introspection_threads_lock); + _dispatch_unfair_lock_lock(&_dispatch_introspection.threads_lock); + TAILQ_INSERT_TAIL(&_dispatch_introspection.threads, dit, dit_list); + _dispatch_unfair_lock_unlock(&_dispatch_introspection.threads_lock); } static void _dispatch_introspection_thread_remove(void *ctxt) { dispatch_introspection_thread_t dit = ctxt; - OSSpinLockLock(&_dispatch_introspection_threads_lock); - TAILQ_REMOVE(&_dispatch_introspection_threads, dit, dit_list); - OSSpinLockUnlock(&_dispatch_introspection_threads_lock); + _dispatch_unfair_lock_lock(&_dispatch_introspection.threads_lock); + TAILQ_REMOVE(&_dispatch_introspection.threads, dit, dit_list); + _dispatch_unfair_lock_unlock(&_dispatch_introspection.threads_lock); _dispatch_continuation_free((void*)dit); _dispatch_thread_setspecific(dispatch_introspection_key, NULL); } @@ -138,71 +151,116 @@ _dispatch_introspection_thread_remove(void *ctxt) #pragma mark - #pragma mark dispatch_introspection_info -static inline -dispatch_introspection_queue_function_s +DISPATCH_USED inline +dispatch_introspection_queue_s +dispatch_introspection_queue_get_info(dispatch_queue_t dq) +{ + bool global = (dq->do_xref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || + (dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT); + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + + dispatch_introspection_queue_s diq = { + .queue = dq, + .target_queue = dq->do_targetq, + .label = dq->dq_label, + .serialnum = dq->dq_serialnum, + .width = dq->dq_width, + .suspend_count = _dq_state_suspend_cnt(dq_state) + dq->dq_side_suspend_cnt, + .enqueued = _dq_state_is_enqueued(dq_state) && !global, + .barrier = _dq_state_is_in_barrier(dq_state) && !global, + .draining = (dq->dq_items_head == (void*)~0ul) || + (!dq->dq_items_head && dq->dq_items_tail), + .global = global, + .main = (dq == &_dispatch_main_q), + }; + return diq; +} + +static inline void _dispatch_introspection_continuation_get_info(dispatch_queue_t dq, - dispatch_continuation_t dc, unsigned long *type) + dispatch_continuation_t dc, dispatch_introspection_queue_item_t diqi) { void *ctxt = dc->dc_ctxt; dispatch_function_t func = dc->dc_func; pthread_t waiter = NULL; bool apply = false; - long flags = (long)dc->do_vtable; - if (flags & DISPATCH_OBJ_SYNC_SLOW_BIT) { - waiter = pthread_from_mach_thread_np((mach_port_t)dc->dc_data); - if (flags & DISPATCH_OBJ_BARRIER_BIT) { - dc = dc->dc_ctxt; - dq = dc->dc_data; + uintptr_t flags = dc->dc_flags; + + if (_dispatch_object_has_vtable(dc)) { + flags = 0; + switch (dc_type(dc)) { +#if HAVE_PTHREAD_WORKQUEUE_QOS + case DC_OVERRIDE_STEALING_TYPE: + case DC_OVERRIDE_OWNING_TYPE: + dc = dc->dc_data; + if (_dispatch_object_has_vtable(dc)) { + // these really wrap queues so we should hide the continuation type + dq = (dispatch_queue_t)dc; + diqi->type = dispatch_introspection_queue_item_type_queue; + diqi->queue = dispatch_introspection_queue_get_info(dq); + return; + } + return _dispatch_introspection_continuation_get_info(dq, dc, diqi); +#endif + case DC_ASYNC_REDIRECT_TYPE: + DISPATCH_INTERNAL_CRASH(0, "Handled by the caller"); + case DC_MACH_SEND_BARRRIER_DRAIN_TYPE: + break; + case DC_MACH_SEND_BARRIER_TYPE: + case DC_MACH_RECV_BARRIER_TYPE: + flags = (uintptr_t)dc->dc_data; + dq = dq->do_targetq; + break; } - ctxt = dc->dc_ctxt; - func = dc->dc_func; - } - if (func == _dispatch_sync_recurse_invoke) { - dc = dc->dc_ctxt; - dq = dc->dc_data; - ctxt = dc->dc_ctxt; - func = dc->dc_func; - } else if (func == _dispatch_async_redirect_invoke) { - dq = dc->dc_data; - dc = dc->dc_other; - ctxt = dc->dc_ctxt; - func = dc->dc_func; - flags = (long)dc->do_vtable; - } else if (func == _dispatch_mach_barrier_invoke) { - dq = dq->do_targetq; - ctxt = dc->dc_data; - func = dc->dc_other; - } else if (func == _dispatch_apply_invoke || - func == _dispatch_apply_redirect_invoke) { - dispatch_apply_t da = ctxt; - if (da->da_todo) { - dc = da->da_dc; - if (func == _dispatch_apply_redirect_invoke) { + } else { + if (flags & DISPATCH_OBJ_SYNC_SLOW_BIT) { + waiter = pthread_from_mach_thread_np((mach_port_t)dc->dc_data); + if (flags & DISPATCH_OBJ_BARRIER_BIT) { + dc = dc->dc_ctxt; dq = dc->dc_data; } ctxt = dc->dc_ctxt; func = dc->dc_func; - apply = true; + } + if (func == _dispatch_sync_recurse_invoke) { + dc = dc->dc_ctxt; + dq = dc->dc_data; + ctxt = dc->dc_ctxt; + func = dc->dc_func; + } else if (func == _dispatch_apply_invoke || + func == _dispatch_apply_redirect_invoke) { + dispatch_apply_t da = ctxt; + if (da->da_todo) { + dc = da->da_dc; + dq = dc->dc_data; + ctxt = dc->dc_ctxt; + func = dc->dc_func; + apply = true; + } } } - if (func == _dispatch_call_block_and_release) { - *type = dispatch_introspection_queue_item_type_block; + if (flags & DISPATCH_OBJ_BLOCK_BIT) { + diqi->type = dispatch_introspection_queue_item_type_block; func = _dispatch_Block_invoke(ctxt); } else { - *type = dispatch_introspection_queue_item_type_function; + diqi->type = dispatch_introspection_queue_item_type_function; } - dispatch_introspection_queue_function_s diqf= { + diqi->function = (dispatch_introspection_queue_function_s){ .continuation = dc, .target_queue = dq, .context = ctxt, .function = func, - .group = flags & DISPATCH_OBJ_GROUP_BIT ? dc->dc_data : NULL, .waiter = waiter, - .barrier = flags & DISPATCH_OBJ_BARRIER_BIT, + .barrier = (flags & DISPATCH_OBJ_BARRIER_BIT) || dq->dq_width == 1, .sync = flags & DISPATCH_OBJ_SYNC_SLOW_BIT, .apply = apply, }; - return diqf; + if (flags & DISPATCH_OBJ_GROUP_BIT) { + dispatch_group_t group = dc->dc_data; + if (dx_type(group) == DISPATCH_GROUP_TYPE) { + diqi->function.group = group; + } + } } static inline @@ -218,31 +276,6 @@ _dispatch_introspection_object_get_info(dispatch_object_t dou) return dio; } -DISPATCH_USED inline -dispatch_introspection_queue_s -dispatch_introspection_queue_get_info(dispatch_queue_t dq) -{ - bool global = (dq->do_xref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || - (dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT); - uint16_t width = dq->dq_width; - if (width > 1 && width != DISPATCH_QUEUE_WIDTH_MAX) width /= 2; - dispatch_introspection_queue_s diq = { - .queue = dq, - .target_queue = dq->do_targetq, - .label = dq->dq_label, - .serialnum = dq->dq_serialnum, - .width = width, - .suspend_count = dq->do_suspend_cnt / 2, - .enqueued = (dq->do_suspend_cnt & 1) && !global, - .barrier = (dq->dq_running & 1) && !global, - .draining = (dq->dq_items_head == (void*)~0ul) || - (!dq->dq_items_head && dq->dq_items_tail), - .global = global, - .main = (dq == &_dispatch_main_q), - }; - return diq; -} - static inline dispatch_introspection_source_s _dispatch_introspection_source_get_info(dispatch_source_t ds) @@ -255,31 +288,28 @@ _dispatch_introspection_source_get_info(dispatch_source_t ds) if (dc) { ctxt = dc->dc_ctxt; handler = dc->dc_func; - hdlr_is_block = ((long)dc->do_vtable & DISPATCH_OBJ_BLOCK_RELEASE_BIT); - } - bool after = (handler == _dispatch_after_timer_callback); - if (after && !(ds->ds_atomic_flags & DSF_CANCELED)) { - dc = ctxt; - ctxt = dc->dc_ctxt; - handler = dc->dc_func; - hdlr_is_block = (handler == _dispatch_call_block_and_release); - if (hdlr_is_block) { - handler = _dispatch_Block_invoke(ctxt); - } + hdlr_is_block = (dc->dc_flags & DISPATCH_OBJ_BLOCK_BIT); } + + uint64_t dq_state = os_atomic_load2o(ds, dq_state, relaxed); dispatch_introspection_source_s dis = { .source = ds, .target_queue = ds->do_targetq, - .type = ds->ds_dkev ? (unsigned long)ds->ds_dkev->dk_kevent.filter : 0, - .handle = ds->ds_dkev ? (unsigned long)ds->ds_dkev->dk_kevent.ident : 0, .context = ctxt, .handler = handler, - .suspend_count = ds->do_suspend_cnt / 2, - .enqueued = (ds->do_suspend_cnt & 1), + .suspend_count = _dq_state_suspend_cnt(dq_state) + ds->dq_side_suspend_cnt, + .enqueued = _dq_state_is_enqueued(dq_state), .handler_is_block = hdlr_is_block, .timer = ds->ds_is_timer, - .after = after, + .after = ds->ds_is_timer && (bool)(ds_timer(ds).flags & DISPATCH_TIMER_AFTER), }; + dispatch_kevent_t dk = ds->ds_dkev; + if (ds->ds_is_custom_source) { + dis.type = (unsigned long)dk; + } else if (dk) { + dis.type = (unsigned long)dk->dk_kevent.filter; + dis.handle = (unsigned long)dk->dk_kevent.ident; + } return dis; } @@ -303,11 +333,21 @@ dispatch_introspection_queue_item_get_info(dispatch_queue_t dq, dispatch_continuation_t dc) { dispatch_introspection_queue_item_s diqi; - if (DISPATCH_OBJ_IS_VTABLE(dc)) { - dispatch_object_t dou = (dispatch_object_t)dc; + dispatch_object_t dou; + +again: + dou._dc = dc; + if (_dispatch_object_has_vtable(dou._do)) { unsigned long type = dx_type(dou._do); unsigned long metatype = type & _DISPATCH_META_TYPE_MASK; - if (metatype == _DISPATCH_QUEUE_TYPE && + if (type == DC_ASYNC_REDIRECT_TYPE) { + dq = dc->dc_data; + dc = dc->dc_other; + goto again; + } + if (metatype == _DISPATCH_CONTINUATION_TYPE) { + _dispatch_introspection_continuation_get_info(dq, dc, &diqi); + } else if (metatype == _DISPATCH_QUEUE_TYPE && type != DISPATCH_QUEUE_SPECIFIC_TYPE) { diqi.type = dispatch_introspection_queue_item_type_queue; diqi.queue = dispatch_introspection_queue_get_info(dou._dq); @@ -320,8 +360,7 @@ dispatch_introspection_queue_item_get_info(dispatch_queue_t dq, diqi.object = _dispatch_introspection_object_get_info(dou._do); } } else { - diqi.function = _dispatch_introspection_continuation_get_info(dq, dc, - &diqi.type); + _dispatch_introspection_continuation_get_info(dq, dc, &diqi); } return diqi; } @@ -335,7 +374,7 @@ dispatch_introspection_get_queues(dispatch_queue_t start, size_t count, dispatch_introspection_queue_t queues) { dispatch_queue_t next; - next = start ? start : TAILQ_FIRST(&_dispatch_introspection_queues); + next = start ? start : TAILQ_FIRST(&_dispatch_introspection.queues); while (count--) { if (!next) { queues->queue = NULL; @@ -353,7 +392,7 @@ dispatch_introspection_get_queue_threads(dispatch_continuation_t start, size_t count, dispatch_introspection_queue_thread_t threads) { dispatch_introspection_thread_t next = start ? (void*)start : - TAILQ_FIRST(&_dispatch_introspection_threads); + TAILQ_FIRST(&_dispatch_introspection.threads); while (count--) { if (!next) { threads->object = NULL; @@ -488,9 +527,11 @@ _dispatch_introspection_queue_create_hook(dispatch_queue_t dq) dispatch_queue_t _dispatch_introspection_queue_create(dispatch_queue_t dq) { - OSSpinLockLock(&_dispatch_introspection_queues_lock); - TAILQ_INSERT_TAIL(&_dispatch_introspection_queues, dq, diq_list); - OSSpinLockUnlock(&_dispatch_introspection_queues_lock); + TAILQ_INIT(&dq->diq_order_top_head); + TAILQ_INIT(&dq->diq_order_bottom_head); + _dispatch_unfair_lock_lock(&_dispatch_introspection.queues_lock); + TAILQ_INSERT_TAIL(&_dispatch_introspection.queues, dq, diq_list); + _dispatch_unfair_lock_unlock(&_dispatch_introspection.queues_lock); DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(queue_create, dq); if (DISPATCH_INTROSPECTION_HOOK_ENABLED(queue_create)) { @@ -524,9 +565,10 @@ _dispatch_introspection_queue_dispose(dispatch_queue_t dq) _dispatch_introspection_queue_dispose_hook(dq); } - OSSpinLockLock(&_dispatch_introspection_queues_lock); - TAILQ_REMOVE(&_dispatch_introspection_queues, dq, diq_list); - OSSpinLockUnlock(&_dispatch_introspection_queues_lock); + _dispatch_unfair_lock_lock(&_dispatch_introspection.queues_lock); + TAILQ_REMOVE(&_dispatch_introspection.queues, dq, diq_list); + _dispatch_introspection_queue_order_dispose(dq); + _dispatch_unfair_lock_unlock(&_dispatch_introspection.queues_lock); } DISPATCH_NOINLINE @@ -612,17 +654,267 @@ _dispatch_introspection_queue_item_complete(dispatch_object_t dou) } void -_dispatch_introspection_callout_entry(void *ctxt, dispatch_function_t f) { +_dispatch_introspection_callout_entry(void *ctxt, dispatch_function_t f) +{ dispatch_queue_t dq = _dispatch_queue_get_current(); DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT( queue_callout_begin, dq, ctxt, f); } void -_dispatch_introspection_callout_return(void *ctxt, dispatch_function_t f) { +_dispatch_introspection_callout_return(void *ctxt, dispatch_function_t f) +{ dispatch_queue_t dq = _dispatch_queue_get_current(); DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT( queue_callout_end, dq, ctxt, f); } +#pragma mark - +#pragma mark dispatch introspection deadlock detection + +typedef struct dispatch_queue_order_entry_s *dispatch_queue_order_entry_t; +struct dispatch_queue_order_entry_s { + TAILQ_ENTRY(dispatch_queue_order_entry_s) dqoe_order_top_list; + TAILQ_ENTRY(dispatch_queue_order_entry_s) dqoe_order_bottom_list; + const char *dqoe_top_label; + const char *dqoe_bottom_label; + dispatch_queue_t dqoe_top_tq; + dispatch_queue_t dqoe_bottom_tq; + int dqoe_pcs_n; + void *dqoe_pcs[]; +}; + +static void +_dispatch_introspection_queue_order_dispose(dispatch_queue_t dq) +{ + dispatch_queue_order_entry_t e, te; + dispatch_queue_t otherq; + TAILQ_HEAD(, dispatch_queue_order_entry_s) head; + + // this whole thing happens with _dispatch_introspection.queues_lock locked + + _dispatch_unfair_lock_lock(&dq->diq_order_top_head_lock); + head.tqh_first = dq->diq_order_top_head.tqh_first; + head.tqh_last = dq->diq_order_top_head.tqh_last; + TAILQ_INIT(&dq->diq_order_top_head); + _dispatch_unfair_lock_unlock(&dq->diq_order_top_head_lock); + + TAILQ_FOREACH_SAFE(e, &head, dqoe_order_top_list, te) { + otherq = e->dqoe_bottom_tq; + _dispatch_unfair_lock_lock(&otherq->diq_order_bottom_head_lock); + TAILQ_REMOVE(&otherq->diq_order_bottom_head, e, dqoe_order_bottom_list); + _dispatch_unfair_lock_unlock(&otherq->diq_order_bottom_head_lock); + free(e); + } + + _dispatch_unfair_lock_lock(&dq->diq_order_bottom_head_lock); + head.tqh_first = dq->diq_order_bottom_head.tqh_first; + head.tqh_last = dq->diq_order_bottom_head.tqh_last; + TAILQ_INIT(&dq->diq_order_bottom_head); + _dispatch_unfair_lock_unlock(&dq->diq_order_bottom_head_lock); + + TAILQ_FOREACH_SAFE(e, &head, dqoe_order_bottom_list, te) { + otherq = e->dqoe_top_tq; + _dispatch_unfair_lock_lock(&otherq->diq_order_top_head_lock); + TAILQ_REMOVE(&otherq->diq_order_top_head, e, dqoe_order_top_list); + _dispatch_unfair_lock_unlock(&otherq->diq_order_top_head_lock); + free(e); + } +} + +// caller must make sure dq is not a root quueue +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_t +_dispatch_queue_bottom_target_queue(dispatch_queue_t dq) +{ + while (dq->do_targetq->do_targetq) { + dq = dq->do_targetq; + } + return dq; +} + +typedef struct dispatch_order_frame_s *dispatch_order_frame_t; +struct dispatch_order_frame_s { + dispatch_order_frame_t dof_prev; + dispatch_queue_order_entry_t dof_e; +}; + +DISPATCH_NOINLINE +static void +_dispatch_introspection_lock_inversion_fail(dispatch_order_frame_t dof, + dispatch_queue_t top_q, dispatch_queue_t bottom_q) +{ + _SIMPLE_STRING buf = _simple_salloc(); + const char *leading_word = "with"; + + _simple_sprintf(buf, "%s Lock inversion detected\n" + "queue [%s] trying to sync onto queue [%s] conflicts\n", + DISPATCH_ASSERTION_FAILED_MESSAGE, + bottom_q->dq_label ?: "", top_q->dq_label ?: ""); + + while (dof) { + dispatch_queue_order_entry_t e = dof->dof_e; + char **symbols; + + _simple_sprintf(buf, + "%s queue [%s] syncing onto queue [%s] at:\n", leading_word, + dof->dof_e->dqoe_bottom_label, dof->dof_e->dqoe_top_label); + + symbols = backtrace_symbols(e->dqoe_pcs, e->dqoe_pcs_n); + if (symbols) { + for (int i = 0; i < e->dqoe_pcs_n; i++) { + _simple_sprintf(buf, "%s\n", symbols[i]); + } + free(symbols); + } else { + _simple_sappend(buf, "\n"); + } + + leading_word = "and"; + dof = dof->dof_prev; + } + + // turn off the feature for crash handlers + _dispatch_introspection.debug_queue_inversions = false; + _dispatch_assert_crash(_simple_string(buf)); + _simple_sfree(buf); +} + +static void +_dispatch_introspection_order_check(dispatch_order_frame_t dof_prev, + dispatch_queue_t top_q, dispatch_queue_t top_tq, + dispatch_queue_t bottom_q, dispatch_queue_t bottom_tq) +{ + struct dispatch_order_frame_s dof = { .dof_prev = dof_prev }; + + // has anyone above bottom_tq ever sync()ed onto top_tq ? + _dispatch_unfair_lock_lock(&bottom_tq->diq_order_top_head_lock); + TAILQ_FOREACH(dof.dof_e, &bottom_tq->diq_order_top_head, dqoe_order_top_list) { + if (slowpath(dof.dof_e->dqoe_bottom_tq == top_tq)) { + _dispatch_introspection_lock_inversion_fail(&dof, top_q, bottom_q); + } + _dispatch_introspection_order_check(&dof, top_q, top_tq, + bottom_q, dof.dof_e->dqoe_bottom_tq); + } + _dispatch_unfair_lock_unlock(&bottom_tq->diq_order_top_head_lock); +} + +void +_dispatch_introspection_order_record(dispatch_queue_t top_q, + dispatch_queue_t bottom_q) +{ + dispatch_queue_order_entry_t e, it; + const int pcs_skip = 1, pcs_n_max = 128; + void *pcs[pcs_n_max]; + int pcs_n; + + if (!bottom_q || !bottom_q->do_targetq || !top_q->do_targetq) { + return; + } + + dispatch_queue_t top_tq = _dispatch_queue_bottom_target_queue(top_q); + dispatch_queue_t bottom_tq = _dispatch_queue_bottom_target_queue(bottom_q); + + _dispatch_unfair_lock_lock(&top_tq->diq_order_top_head_lock); + TAILQ_FOREACH(it, &top_tq->diq_order_top_head, dqoe_order_top_list) { + if (it->dqoe_bottom_tq == bottom_tq) { + // that dispatch_sync() is known and validated + // move on + _dispatch_unfair_lock_unlock(&top_tq->diq_order_top_head_lock); + return; + } + } + _dispatch_unfair_lock_unlock(&top_tq->diq_order_top_head_lock); + + _dispatch_introspection_order_check(NULL, top_q, top_tq, bottom_q, bottom_tq); + pcs_n = MAX(backtrace(pcs, pcs_n_max) - pcs_skip, 0); + + bool copy_top_label = false, copy_bottom_label = false; + size_t size = sizeof(struct dispatch_queue_order_entry_s) + + (size_t)pcs_n * sizeof(void *); + + if (_dispatch_queue_label_needs_free(top_q)) { + size += strlen(top_q->dq_label) + 1; + copy_top_label = true; + } + if (_dispatch_queue_label_needs_free(bottom_q)) { + size += strlen(bottom_q->dq_label) + 1; + copy_bottom_label = true; + } + + e = _dispatch_calloc(1, size); + e->dqoe_top_tq = top_tq; + e->dqoe_bottom_tq = bottom_tq; + e->dqoe_pcs_n = pcs_n; + memcpy(e->dqoe_pcs, pcs + pcs_skip, (size_t)pcs_n * sizeof(void *)); + // and then lay out the names of the queues at the end + char *p = (char *)(e->dqoe_pcs + pcs_n); + if (copy_top_label) { + e->dqoe_top_label = strcpy(p, top_q->dq_label); + p += strlen(p) + 1; + } else { + e->dqoe_top_label = top_q->dq_label ?: ""; + } + if (copy_bottom_label) { + e->dqoe_bottom_label = strcpy(p, bottom_q->dq_label); + } else { + e->dqoe_bottom_label = bottom_q->dq_label ?: ""; + } + + _dispatch_unfair_lock_lock(&top_tq->diq_order_top_head_lock); + TAILQ_FOREACH(it, &top_tq->diq_order_top_head, dqoe_order_top_list) { + if (slowpath(it->dqoe_bottom_tq == bottom_tq)) { + // someone else validated it at the same time + // go away quickly + _dispatch_unfair_lock_unlock(&top_tq->diq_order_top_head_lock); + free(e); + return; + } + } + TAILQ_INSERT_HEAD(&top_tq->diq_order_top_head, e, dqoe_order_top_list); + _dispatch_unfair_lock_unlock(&top_tq->diq_order_top_head_lock); + + _dispatch_unfair_lock_lock(&bottom_tq->diq_order_bottom_head_lock); + TAILQ_INSERT_HEAD(&bottom_tq->diq_order_bottom_head, e, dqoe_order_bottom_list); + _dispatch_unfair_lock_unlock(&bottom_tq->diq_order_bottom_head_lock); +} + +void +_dispatch_introspection_target_queue_changed(dispatch_queue_t dq) +{ + if (!_dispatch_introspection.debug_queue_inversions) return; + + if (_dispatch_queue_atomic_flags(dq) & DQF_TARGETED) { + _dispatch_log( + "BUG IN CLIENT OF LIBDISPATCH: queue inversion debugging " + "cannot be used with code that changes the target " + "of a queue already targeted by other dispatch objects\n" + "queue %p[%s] was already targeted by other dispatch objects", + dq, dq->dq_label ?: ""); + _dispatch_introspection.debug_queue_inversions = false; + return; + } + + static char const * const reasons[] = { + [1] = "an initiator", + [2] = "a recipient", + [3] = "both an initiator and a recipient" + }; + bool as_top = !TAILQ_EMPTY(&dq->diq_order_top_head); + bool as_bottom = !TAILQ_EMPTY(&dq->diq_order_top_head); + + if (as_top || as_bottom) { + _dispatch_log( + "BUG IN CLIENT OF LIBDISPATCH: queue inversion debugging " + "expects queues to not participate in dispatch_sync() " + "before their setup is complete\n" + "forgetting that queue 0x%p[%s] participated as %s of " + "a dispatch_sync", dq, dq->dq_label ?: "", + reasons[(int)as_top + 2 * (int)as_bottom]); + _dispatch_unfair_lock_lock(&_dispatch_introspection.queues_lock); + _dispatch_introspection_queue_order_dispose(dq); + _dispatch_unfair_lock_unlock(&_dispatch_introspection.queues_lock); + } +} + #endif // DISPATCH_INTROSPECTION diff --git a/src/introspection_internal.h b/src/introspection_internal.h index 4ed951e7d..06504a8ba 100644 --- a/src/introspection_internal.h +++ b/src/introspection_internal.h @@ -29,10 +29,28 @@ #if DISPATCH_INTROSPECTION -#define DISPATCH_INTROSPECTION_QUEUE_LIST \ - TAILQ_ENTRY(dispatch_queue_s) diq_list -#define DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE \ - sizeof(TAILQ_ENTRY(dispatch_queue_s)) +#define DISPATCH_INTROSPECTION_QUEUE_HEADER \ + TAILQ_ENTRY(dispatch_queue_s) diq_list; \ + dispatch_unfair_lock_s diq_order_top_head_lock; \ + dispatch_unfair_lock_s diq_order_bottom_head_lock; \ + TAILQ_HEAD(, dispatch_queue_order_entry_s) diq_order_top_head; \ + TAILQ_HEAD(, dispatch_queue_order_entry_s) diq_order_bottom_head +#define DISPATCH_INTROSPECTION_QUEUE_HEADER_SIZE \ + sizeof(struct { DISPATCH_INTROSPECTION_QUEUE_HEADER; }) + +struct dispatch_introspection_state_s { + TAILQ_HEAD(, dispatch_introspection_thread_s) threads; + TAILQ_HEAD(, dispatch_queue_s) queues; + dispatch_unfair_lock_s threads_lock; + dispatch_unfair_lock_s queues_lock; + + ptrdiff_t thread_queue_offset; + + // dispatch introspection features + bool debug_queue_inversions; // DISPATCH_DEBUG_QUEUE_INVERSIONS +}; + +extern struct dispatch_introspection_state_s _dispatch_introspection; void _dispatch_introspection_init(void); void _dispatch_introspection_thread_add(void); @@ -46,7 +64,10 @@ void _dispatch_introspection_queue_item_complete(dispatch_object_t dou); void _dispatch_introspection_callout_entry(void *ctxt, dispatch_function_t f); void _dispatch_introspection_callout_return(void *ctxt, dispatch_function_t f); -#if !__OBJC2__ && !defined(__cplusplus) +#if DISPATCH_PURE_C + +void _dispatch_sync_recurse_invoke(void *ctxt); +static dispatch_queue_t _dispatch_queue_get_current(void); DISPATCH_ALWAYS_INLINE static inline void @@ -70,12 +91,41 @@ _dispatch_introspection_queue_pop(dispatch_queue_t dq, dispatch_object_t dou) { _dispatch_introspection_queue_item_dequeue(dq, dou); }; -#endif // !__OBJC2__ && !defined(__cplusplus) +void +_dispatch_introspection_order_record(dispatch_queue_t top_q, + dispatch_queue_t bottom_q); + +void +_dispatch_introspection_target_queue_changed(dispatch_queue_t dq); + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_introspection_barrier_sync_begin(dispatch_queue_t dq, + dispatch_function_t func) +{ + if (!_dispatch_introspection.debug_queue_inversions) return; + if (func != _dispatch_sync_recurse_invoke) { + _dispatch_introspection_order_record(dq, _dispatch_queue_get_current()); + } +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_introspection_non_barrier_sync_begin(dispatch_queue_t dq, + dispatch_function_t func) +{ + if (!_dispatch_introspection.debug_queue_inversions) return; + if (func != _dispatch_sync_recurse_invoke) { + _dispatch_introspection_order_record(dq, _dispatch_queue_get_current()); + } +} + +#endif // DISPATCH_PURE_C #else // DISPATCH_INTROSPECTION -#define DISPATCH_INTROSPECTION_QUEUE_LIST -#define DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE 0 +#define DISPATCH_INTROSPECTION_QUEUE_HEADER +#define DISPATCH_INTROSPECTION_QUEUE_HEADER_SIZE 0 #define _dispatch_introspection_init() #define _dispatch_introspection_thread_add() @@ -120,6 +170,21 @@ static inline void _dispatch_introspection_callout_return(void *ctxt DISPATCH_UNUSED, dispatch_function_t f DISPATCH_UNUSED) {} +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_introspection_target_queue_changed( + dispatch_queue_t dq DISPATCH_UNUSED) {} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_introspection_barrier_sync_begin(dispatch_queue_t dq DISPATCH_UNUSED, + dispatch_function_t func DISPATCH_UNUSED) {} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_introspection_non_barrier_sync_begin(dispatch_queue_t dq DISPATCH_UNUSED, + dispatch_function_t func DISPATCH_UNUSED) {} + #endif // DISPATCH_INTROSPECTION #endif // __DISPATCH_INTROSPECTION_INTERNAL__ diff --git a/src/io.c b/src/io.c index 0ad5b5373..e4f05aec9 100644 --- a/src/io.c +++ b/src/io.c @@ -24,14 +24,7 @@ #define DISPATCH_IO_DEBUG DISPATCH_DEBUG #endif -#if DISPATCH_IO_DEBUG -#define _dispatch_fd_debug(msg, fd, args...) \ - _dispatch_debug("fd[0x%x]: " msg, (fd), ##args) -#else -#define _dispatch_fd_debug(msg, fd, args...) -#endif - -#if USE_OBJC +#if DISPATCH_DATA_IS_BRIDGED_TO_NSDATA #define _dispatch_io_data_retain(x) _dispatch_objc_retain(x) #define _dispatch_io_data_release(x) _dispatch_objc_release(x) #else @@ -75,7 +68,7 @@ static void _dispatch_disk_enqueue_operation(dispatch_disk_t dsk, dispatch_operation_t operation, dispatch_data_t data); static void _dispatch_stream_cleanup_operations(dispatch_stream_t stream, dispatch_io_t channel); -static void _dispatch_disk_cleanup_operations(dispatch_disk_t disk, +static void _dispatch_disk_cleanup_inactive_operations(dispatch_disk_t disk, dispatch_io_t channel); static void _dispatch_stream_source_handler(void *ctx); static void _dispatch_stream_queue_handler(void *ctx); @@ -119,6 +112,38 @@ enum { #define _dispatch_io_Block_copy(x) \ ((typeof(x))_dispatch_Block_copy((dispatch_block_t)(x))) +#pragma mark - +#pragma mark dispatch_io_debug + +#if DISPATCH_IO_DEBUG +#if !DISPATCH_DEBUG +#define _dispatch_io_log(x, ...) do { \ + _dispatch_log("%llu\t%p\t" x, _dispatch_absolute_time(), \ + (void *)_dispatch_thread_self(), ##__VA_ARGS__); \ + } while (0) +#ifdef _dispatch_object_debug +#undef _dispatch_object_debug +#define _dispatch_object_debug dispatch_debug +#pragma clang diagnostic ignored "-Wdeprecated-declarations" +#endif +#else +#define _dispatch_io_log(x, ...) _dispatch_debug(x, ##__VA_ARGS__) +#endif // DISPATCH_DEBUG +#else +#define _dispatch_io_log(x, ...) +#endif // DISPATCH_IO_DEBUG + +#define _dispatch_fd_debug(msg, fd, ...) \ + _dispatch_io_log("fd[0x%x]: " msg, fd, ##__VA_ARGS__) +#define _dispatch_op_debug(msg, op, ...) \ + _dispatch_io_log("op[%p]: " msg, op, ##__VA_ARGS__) +#define _dispatch_channel_debug(msg, channel, ...) \ + _dispatch_io_log("channel[%p]: " msg, channel, ##__VA_ARGS__) +#define _dispatch_fd_entry_debug(msg, fd_entry, ...) \ + _dispatch_io_log("fd_entry[%p]: " msg, fd_entry, ##__VA_ARGS__) +#define _dispatch_disk_debug(msg, disk, ...) \ + _dispatch_io_log("disk[%p]: " msg, disk, ##__VA_ARGS__) + #pragma mark - #pragma mark dispatch_io_hashtables @@ -133,6 +158,8 @@ static dispatch_once_t _dispatch_io_devs_lockq_pred; static dispatch_queue_t _dispatch_io_devs_lockq; static dispatch_queue_t _dispatch_io_fds_lockq; +static char const * const _dispatch_io_key = "io"; + static void _dispatch_io_fds_lockq_init(void *context DISPATCH_UNUSED) { @@ -225,7 +252,8 @@ _dispatch_io_init(dispatch_io_t channel, dispatch_fd_entry_t fd_entry, _dispatch_retain(queue); dispatch_async(!err ? fd_entry->close_queue : channel->queue, ^{ dispatch_async(queue, ^{ - _dispatch_fd_debug("cleanup handler invoke", -1); + _dispatch_channel_debug("cleanup handler invoke: err %d", + channel, err); cleanup_handler(err); }); _dispatch_release(queue); @@ -314,11 +342,11 @@ dispatch_io_create(dispatch_io_type_t type, dispatch_fd_t fd, dispatch_queue_t queue, void (^cleanup_handler)(int)) { if (type != DISPATCH_IO_STREAM && type != DISPATCH_IO_RANDOM) { - return NULL; + return DISPATCH_BAD_INPUT; } - _dispatch_fd_debug("io create", fd); dispatch_io_t channel = _dispatch_io_create(type); channel->fd = fd; + _dispatch_channel_debug("create", channel); channel->fd_actual = fd; dispatch_suspend(channel->queue); _dispatch_retain(queue); @@ -365,16 +393,16 @@ dispatch_io_create_with_path(dispatch_io_type_t type, const char *path, { if ((type != DISPATCH_IO_STREAM && type != DISPATCH_IO_RANDOM) || !(*path == '/')) { - return NULL; + return DISPATCH_BAD_INPUT; } size_t pathlen = strlen(path); dispatch_io_path_data_t path_data = malloc(sizeof(*path_data) + pathlen+1); if (!path_data) { - return NULL; + return DISPATCH_OUT_OF_MEMORY; } - _dispatch_fd_debug("io create with path %s", -1, path); dispatch_io_t channel = _dispatch_io_create(type); channel->fd = -1; + _dispatch_channel_debug("create with path %s", channel, path); channel->fd_actual = -1; path_data->channel = channel; path_data->oflag = oflag; @@ -387,9 +415,11 @@ dispatch_io_create_with_path(dispatch_io_type_t type, const char *path, int err = 0; struct stat st; _dispatch_io_syscall_switch_noerr(err, - (path_data->oflag & O_NOFOLLOW) == O_NOFOLLOW || - (path_data->oflag & O_SYMLINK) == O_SYMLINK ? - lstat(path_data->path, &st) : stat(path_data->path, &st), + (path_data->oflag & O_NOFOLLOW) == O_NOFOLLOW +#ifndef __linux__ + || (path_data->oflag & O_SYMLINK) == O_SYMLINK +#endif + ? lstat(path_data->path, &st) : stat(path_data->path, &st), case 0: err = _dispatch_io_validate_type(channel, st.st_mode); break; @@ -455,10 +485,10 @@ dispatch_io_create_with_io(dispatch_io_type_t type, dispatch_io_t in_channel, dispatch_queue_t queue, void (^cleanup_handler)(int error)) { if (type != DISPATCH_IO_STREAM && type != DISPATCH_IO_RANDOM) { - return NULL; + return DISPATCH_BAD_INPUT; } - _dispatch_fd_debug("io create with io %p", -1, in_channel); dispatch_io_t channel = _dispatch_io_create(type); + _dispatch_channel_debug("create with channel %p", channel, in_channel); dispatch_suspend(channel->queue); _dispatch_retain(queue); _dispatch_retain(channel); @@ -565,7 +595,7 @@ dispatch_io_set_high_water(dispatch_io_t channel, size_t high_water) { _dispatch_retain(channel); dispatch_async(channel->queue, ^{ - _dispatch_fd_debug("io set high water", channel->fd); + _dispatch_channel_debug("set high water: %zu", channel, high_water); if (channel->params.low > high_water) { channel->params.low = high_water; } @@ -579,7 +609,7 @@ dispatch_io_set_low_water(dispatch_io_t channel, size_t low_water) { _dispatch_retain(channel); dispatch_async(channel->queue, ^{ - _dispatch_fd_debug("io set low water", channel->fd); + _dispatch_channel_debug("set low water: %zu", channel, low_water); if (channel->params.high < low_water) { channel->params.high = low_water ? low_water : 1; } @@ -594,7 +624,7 @@ dispatch_io_set_interval(dispatch_io_t channel, uint64_t interval, { _dispatch_retain(channel); dispatch_async(channel->queue, ^{ - _dispatch_fd_debug("io set interval", channel->fd); + _dispatch_channel_debug("set interval: %llu", channel, interval); channel->params.interval = interval < INT64_MAX ? interval : INT64_MAX; channel->params.interval_flags = flags; _dispatch_release(channel); @@ -622,10 +652,12 @@ dispatch_io_get_descriptor(dispatch_io_t channel) return -1; } dispatch_fd_t fd = channel->fd_actual; - if (fd == -1 && _dispatch_thread_getspecific(dispatch_io_key) == channel && - !_dispatch_io_get_error(NULL, channel, false)) { - dispatch_fd_entry_t fd_entry = channel->fd_entry; - (void)_dispatch_fd_entry_open(fd_entry, channel); + if (fd == -1 && !_dispatch_io_get_error(NULL, channel, false)) { + dispatch_thread_context_t ctxt = + _dispatch_thread_context_find(_dispatch_io_key); + if (ctxt && ctxt->dtc_io_in_barrier == channel) { + (void)_dispatch_fd_entry_open(channel->fd_entry, channel); + } } return channel->fd_actual; } @@ -636,15 +668,15 @@ dispatch_io_get_descriptor(dispatch_io_t channel) static void _dispatch_io_stop(dispatch_io_t channel) { - _dispatch_fd_debug("io stop", channel->fd); - (void)dispatch_atomic_or2o(channel, atomic_flags, DIO_STOPPED, relaxed); + _dispatch_channel_debug("stop", channel); + (void)os_atomic_or2o(channel, atomic_flags, DIO_STOPPED, relaxed); _dispatch_retain(channel); dispatch_async(channel->queue, ^{ dispatch_async(channel->barrier_queue, ^{ _dispatch_object_debug(channel, "%s", __func__); dispatch_fd_entry_t fd_entry = channel->fd_entry; if (fd_entry) { - _dispatch_fd_debug("io stop cleanup", channel->fd); + _dispatch_channel_debug("stop cleanup", channel); _dispatch_fd_entry_cleanup_operations(fd_entry, channel); if (!(channel->atomic_flags & DIO_CLOSED)) { channel->fd_entry = NULL; @@ -655,8 +687,8 @@ _dispatch_io_stop(dispatch_io_t channel) _dispatch_retain(channel); dispatch_async(_dispatch_io_fds_lockq, ^{ _dispatch_object_debug(channel, "%s", __func__); - _dispatch_fd_debug("io stop after close cleanup", - channel->fd); + _dispatch_channel_debug("stop cleanup after close", + channel); dispatch_fd_entry_t fdi; uintptr_t hash = DIO_HASH(channel->fd); TAILQ_FOREACH(fdi, &_dispatch_io_fds[hash], fd_list) { @@ -691,9 +723,9 @@ dispatch_io_close(dispatch_io_t channel, unsigned long flags) dispatch_async(channel->queue, ^{ dispatch_async(channel->barrier_queue, ^{ _dispatch_object_debug(channel, "%s", __func__); - _dispatch_fd_debug("io close", channel->fd); + _dispatch_channel_debug("close", channel); if (!(channel->atomic_flags & (DIO_CLOSED|DIO_STOPPED))) { - (void)dispatch_atomic_or2o(channel, atomic_flags, DIO_CLOSED, + (void)os_atomic_or2o(channel, atomic_flags, DIO_CLOSED, relaxed); dispatch_fd_entry_t fd_entry = channel->fd_entry; if (fd_entry) { @@ -719,10 +751,15 @@ dispatch_io_barrier(dispatch_io_t channel, dispatch_block_t barrier) dispatch_async(barrier_queue, ^{ dispatch_suspend(barrier_queue); dispatch_group_notify(barrier_group, io_q, ^{ + dispatch_thread_context_s io_ctxt = { + .dtc_key = _dispatch_io_key, + .dtc_io_in_barrier = channel, + }; + _dispatch_object_debug(channel, "%s", __func__); - _dispatch_thread_setspecific(dispatch_io_key, channel); + _dispatch_thread_context_push(&io_ctxt); barrier(); - _dispatch_thread_setspecific(dispatch_io_key, NULL); + _dispatch_thread_context_pop(&io_ctxt); dispatch_resume(barrier_queue); _dispatch_release(channel); }); @@ -956,10 +993,6 @@ _dispatch_operation_create(dispatch_op_direction_t direction, { // On channel queue dispatch_assert(direction < DOP_DIR_MAX); - _dispatch_fd_debug("operation create", channel->fd); -#if DISPATCH_IO_DEBUG - int fd = channel->fd; -#endif // Safe to call _dispatch_io_get_error() with channel->fd_entry since // that can only be NULL if atomic_flags are set rdar://problem/8362514 int err = _dispatch_io_get_error(NULL, channel, false); @@ -974,7 +1007,8 @@ _dispatch_operation_create(dispatch_op_direction_t direction, } else if (direction == DOP_DIR_WRITE && !err) { d = NULL; } - _dispatch_fd_debug("IO handler invoke", fd); + _dispatch_channel_debug("IO handler invoke: err %d", channel, + err); handler(true, d, err); _dispatch_io_data_release(data); }); @@ -984,6 +1018,7 @@ _dispatch_operation_create(dispatch_op_direction_t direction, } dispatch_operation_t op = _dispatch_alloc(DISPATCH_VTABLE(operation), sizeof(struct dispatch_operation_s)); + _dispatch_channel_debug("operation create: %p", channel, op); op->do_next = DISPATCH_OBJECT_LISTLESS; op->do_xref_cnt = -1; // operation object is not exposed externally op->op_q = dispatch_queue_create("com.apple.libdispatch-io.opq", NULL); @@ -1012,6 +1047,7 @@ void _dispatch_operation_dispose(dispatch_operation_t op) { _dispatch_object_debug(op, "%s", __func__); + _dispatch_op_debug("dispose", op); // Deliver the data if there's any if (op->fd_entry) { _dispatch_operation_deliver_data(op, DOP_DONE); @@ -1038,6 +1074,7 @@ _dispatch_operation_dispose(dispatch_operation_t op) dispatch_release(op->op_q); } Block_release(op->handler); + _dispatch_op_debug("disposed", op); } static void @@ -1060,6 +1097,7 @@ _dispatch_operation_enqueue(dispatch_operation_t op, handler(true, d, err); _dispatch_io_data_release(data); }); + _dispatch_op_debug("release -> %d, err %d", op, op->do_ref_cnt, err); _dispatch_release(op); return; } @@ -1087,13 +1125,14 @@ _dispatch_operation_should_enqueue(dispatch_operation_t op, dispatch_queue_t tq, dispatch_data_t data) { // On stream queue or disk queue - _dispatch_fd_debug("enqueue operation", op->fd_entry->fd); + _dispatch_op_debug("enqueue", op); _dispatch_io_data_retain(data); op->data = data; int err = _dispatch_io_get_error(op, NULL, true); if (err) { op->err = err; // Final release + _dispatch_op_debug("release -> %d, err %d", op, op->do_ref_cnt, err); _dispatch_release(op); return false; } @@ -1230,7 +1269,6 @@ _dispatch_fd_entry_init_async(dispatch_fd_t fd, dispatch_once_f(&_dispatch_io_fds_lockq_pred, NULL, _dispatch_io_fds_lockq_init); dispatch_async(_dispatch_io_fds_lockq, ^{ - _dispatch_fd_debug("fd entry init", fd); dispatch_fd_entry_t fd_entry = NULL; // Check to see if there is an existing entry for the given fd uintptr_t hash = DIO_HASH(fd); @@ -1246,8 +1284,9 @@ _dispatch_fd_entry_init_async(dispatch_fd_t fd, // If we did not find an existing entry, create one fd_entry = _dispatch_fd_entry_create_with_fd(fd, hash); } + _dispatch_fd_entry_debug("init", fd_entry); dispatch_async(fd_entry->barrier_queue, ^{ - _dispatch_fd_debug("fd entry init completion", fd); + _dispatch_fd_entry_debug("init completion", fd_entry); completion_callback(fd_entry); // stat() is complete, release reference to fd_entry _dispatch_fd_entry_release(fd_entry); @@ -1275,16 +1314,16 @@ static dispatch_fd_entry_t _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash) { // On fds lock queue - _dispatch_fd_debug("fd entry create", fd); dispatch_fd_entry_t fd_entry = _dispatch_fd_entry_create( _dispatch_io_fds_lockq); + _dispatch_fd_entry_debug("create: fd %d", fd_entry, fd); fd_entry->fd = fd; TAILQ_INSERT_TAIL(&_dispatch_io_fds[hash], fd_entry, fd_list); fd_entry->barrier_queue = dispatch_queue_create( "com.apple.libdispatch-io.barrierq", NULL); fd_entry->barrier_group = dispatch_group_create(); dispatch_async(fd_entry->barrier_queue, ^{ - _dispatch_fd_debug("fd entry stat", fd); + _dispatch_fd_entry_debug("stat", fd_entry); int err, orig_flags, orig_nosigpipe = -1; struct stat st; _dispatch_io_syscall_switch(err, @@ -1356,7 +1395,7 @@ _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash) // all operations associated with this entry have been freed dispatch_async(fd_entry->close_queue, ^{ if (!fd_entry->disk) { - _dispatch_fd_debug("close queue fd_entry cleanup", fd); + _dispatch_fd_entry_debug("close queue cleanup", fd_entry); dispatch_op_direction_t dir; for (dir = 0; dir < DOP_DIR_MAX; dir++) { _dispatch_stream_dispose(fd_entry, dir); @@ -1374,11 +1413,11 @@ _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash) // source cancels it and suspends the close queue. Freeing the fd_entry // structure must happen after the source cancel handler has finished dispatch_async(fd_entry->close_queue, ^{ - _dispatch_fd_debug("close queue release", fd); + _dispatch_fd_entry_debug("close queue release", fd_entry); dispatch_release(fd_entry->close_queue); - _dispatch_fd_debug("barrier queue release", fd); + _dispatch_fd_entry_debug("barrier queue release", fd_entry); dispatch_release(fd_entry->barrier_queue); - _dispatch_fd_debug("barrier group release", fd); + _dispatch_fd_entry_debug("barrier group release", fd_entry); dispatch_release(fd_entry->barrier_group); if (fd_entry->orig_flags != -1) { _dispatch_io_syscall( @@ -1407,9 +1446,9 @@ _dispatch_fd_entry_create_with_path(dispatch_io_path_data_t path_data, dev_t dev, mode_t mode) { // On devs lock queue - _dispatch_fd_debug("fd entry create with path %s", -1, path_data->path); dispatch_fd_entry_t fd_entry = _dispatch_fd_entry_create( path_data->channel->queue); + _dispatch_fd_entry_debug("create: path %s", fd_entry, path_data->path); if (S_ISREG(mode)) { _dispatch_disk_init(fd_entry, major(dev)); } else { @@ -1428,7 +1467,7 @@ _dispatch_fd_entry_create_with_path(dispatch_io_path_data_t path_data, // that the channel associated with this entry has been closed and that // all operations associated with this entry have been freed dispatch_async(fd_entry->close_queue, ^{ - _dispatch_fd_debug("close queue fd_entry cleanup", -1); + _dispatch_fd_entry_debug("close queue cleanup", fd_entry); if (!fd_entry->disk) { dispatch_op_direction_t dir; for (dir = 0; dir < DOP_DIR_MAX; dir++) { @@ -1447,7 +1486,7 @@ _dispatch_fd_entry_create_with_path(dispatch_io_path_data_t path_data, } }); dispatch_async(fd_entry->close_queue, ^{ - _dispatch_fd_debug("close queue release", -1); + _dispatch_fd_entry_debug("close queue release", fd_entry); dispatch_release(fd_entry->close_queue); dispatch_release(fd_entry->barrier_queue); dispatch_release(fd_entry->barrier_group); @@ -1477,10 +1516,10 @@ _dispatch_fd_entry_open(dispatch_fd_entry_t fd_entry, dispatch_io_t channel) if (err == EINTR) { goto open; } - (void)dispatch_atomic_cmpxchg2o(fd_entry, err, 0, err, relaxed); + (void)os_atomic_cmpxchg2o(fd_entry, err, 0, err, relaxed); return err; } - if (!dispatch_atomic_cmpxchg2o(fd_entry, fd, -1, fd, relaxed)) { + if (!os_atomic_cmpxchg2o(fd_entry, fd, -1, fd, relaxed)) { // Lost the race with another open _dispatch_fd_entry_guarded_close(fd_entry, fd); } else { @@ -1500,7 +1539,7 @@ _dispatch_fd_entry_cleanup_operations(dispatch_fd_entry_t fd_entry, } _dispatch_fd_entry_retain(fd_entry); dispatch_async(fd_entry->disk->pick_queue, ^{ - _dispatch_disk_cleanup_operations(fd_entry->disk, channel); + _dispatch_disk_cleanup_inactive_operations(fd_entry->disk, channel); _dispatch_fd_entry_release(fd_entry); if (channel) { _dispatch_release(channel); @@ -1599,7 +1638,8 @@ _dispatch_disk_init(dispatch_fd_entry_t fd_entry, dev_t dev) TAILQ_INIT(&disk->operations); disk->cur_rq = TAILQ_FIRST(&disk->operations); char label[45]; - snprintf(label, sizeof(label), "com.apple.libdispatch-io.deviceq.%d", dev); + snprintf(label, sizeof(label), "com.apple.libdispatch-io.deviceq.%d", + (int)dev); disk->pick_queue = dispatch_queue_create(label, NULL); TAILQ_INSERT_TAIL(&_dispatch_io_devs[hash], disk, disk_list); out: @@ -1671,7 +1711,7 @@ _dispatch_stream_complete_operation(dispatch_stream_t stream, { // On stream queue _dispatch_object_debug(op, "%s", __func__); - _dispatch_fd_debug("complete operation", op->fd_entry->fd); + _dispatch_op_debug("complete: stream %p", op, stream); TAILQ_REMOVE(&stream->operations[op->params.type], op, operation_list); if (op == stream->op) { stream->op = NULL; @@ -1680,6 +1720,7 @@ _dispatch_stream_complete_operation(dispatch_stream_t stream, dispatch_source_cancel(op->timer); } // Final release will deliver any pending data + _dispatch_op_debug("release -> %d (stream complete)", op, op->do_ref_cnt); _dispatch_release(op); } @@ -1688,7 +1729,7 @@ _dispatch_disk_complete_operation(dispatch_disk_t disk, dispatch_operation_t op) { // On pick queue _dispatch_object_debug(op, "%s", __func__); - _dispatch_fd_debug("complete operation", op->fd_entry->fd); + _dispatch_op_debug("complete: disk %p", op, disk); // Current request is always the last op returned if (disk->cur_rq == op) { disk->cur_rq = TAILQ_PREV(op, dispatch_disk_operations_s, @@ -1707,6 +1748,7 @@ _dispatch_disk_complete_operation(dispatch_disk_t disk, dispatch_operation_t op) dispatch_source_cancel(op->timer); } // Final release will deliver any pending data + _dispatch_op_debug("release -> %d (disk complete)", op, op->do_ref_cnt); _dispatch_release(op); } @@ -1794,18 +1836,34 @@ _dispatch_stream_cleanup_operations(dispatch_stream_t stream, } } -static void -_dispatch_disk_cleanup_operations(dispatch_disk_t disk, dispatch_io_t channel) +static inline void +_dispatch_disk_cleanup_specified_operations(dispatch_disk_t disk, + dispatch_io_t channel, bool inactive_only) { // On pick queue dispatch_operation_t op, tmp; TAILQ_FOREACH_SAFE(op, &disk->operations, operation_list, tmp) { + if (inactive_only && op->active) continue; if (!channel || op->channel == channel) { + _dispatch_op_debug("cleanup: disk %p", op, disk); _dispatch_disk_complete_operation(disk, op); } } } +static void +_dispatch_disk_cleanup_operations(dispatch_disk_t disk, dispatch_io_t channel) +{ + _dispatch_disk_cleanup_specified_operations(disk, channel, false); +} + +static void +_dispatch_disk_cleanup_inactive_operations(dispatch_disk_t disk, + dispatch_io_t channel) +{ + _dispatch_disk_cleanup_specified_operations(disk, channel, true); +} + #pragma mark - #pragma mark dispatch_stream_handler/dispatch_disk_handler @@ -1817,7 +1875,7 @@ _dispatch_stream_source(dispatch_stream_t stream, dispatch_operation_t op) return stream->source; } dispatch_fd_t fd = op->fd_entry->fd; - _dispatch_fd_debug("stream source create", fd); + _dispatch_op_debug("stream source create", op); dispatch_source_t source = NULL; if (op->direction == DOP_DIR_READ) { source = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, @@ -1836,7 +1894,7 @@ _dispatch_stream_source(dispatch_stream_t stream, dispatch_operation_t op) // unregistered dispatch_queue_t close_queue = op->fd_entry->close_queue; dispatch_source_set_cancel_handler(source, ^{ - _dispatch_fd_debug("stream source cancel", fd); + _dispatch_op_debug("stream source cancel", op); dispatch_resume(close_queue); }); stream->source = source; @@ -1884,13 +1942,13 @@ _dispatch_stream_handler(void *ctx) goto pick; } stream->op = op; - _dispatch_fd_debug("stream handler", op->fd_entry->fd); + _dispatch_op_debug("stream handler", op); dispatch_fd_entry_t fd_entry = op->fd_entry; _dispatch_fd_entry_retain(fd_entry); // For performance analysis if (!op->total && dispatch_io_defaults.initial_delivery) { // Empty delivery to signal the start of the operation - _dispatch_fd_debug("initial delivery", op->fd_entry->fd); + _dispatch_op_debug("initial delivery", op); _dispatch_operation_deliver_data(op, DOP_DELIVER); } // TODO: perform on the operation target queue to get correct priority @@ -1948,7 +2006,7 @@ _dispatch_disk_handler(void *ctx) if (disk->io_active) { return; } - _dispatch_fd_debug("disk handler", -1); + _dispatch_disk_debug("disk handler", disk); dispatch_operation_t op; size_t i = disk->free_idx, j = disk->req_idx; if (j <= i) { @@ -1964,8 +2022,10 @@ _dispatch_disk_handler(void *ctx) continue; } _dispatch_retain(op); + _dispatch_op_debug("retain -> %d", op, op->do_ref_cnt + 1); disk->advise_list[i%disk->advise_list_depth] = op; op->active = true; + _dispatch_op_debug("activate: disk %p", op, disk); _dispatch_object_debug(op, "%s", __func__); } else { // No more operations to get @@ -1977,6 +2037,7 @@ _dispatch_disk_handler(void *ctx) op = disk->advise_list[disk->req_idx]; if (op) { disk->io_active = true; + _dispatch_op_debug("async perform: disk %p", op, disk); dispatch_async_f(op->do_targetq, disk, _dispatch_disk_perform); } } @@ -1985,8 +2046,8 @@ static void _dispatch_disk_perform(void *ctxt) { dispatch_disk_t disk = ctxt; + _dispatch_disk_debug("disk perform", disk); size_t chunk_size = dispatch_io_defaults.chunk_size; - _dispatch_fd_debug("disk perform", -1); dispatch_operation_t op; size_t i = disk->advise_idx, j = disk->free_idx; if (j <= i) { @@ -2010,7 +2071,7 @@ _dispatch_disk_perform(void *ctxt) // For performance analysis if (!op->total && dispatch_io_defaults.initial_delivery) { // Empty delivery to signal the start of the operation - _dispatch_fd_debug("initial delivery", op->fd_entry->fd); + _dispatch_op_debug("initial delivery", op); _dispatch_operation_deliver_data(op, DOP_DELIVER); } // Advise two chunks if the list only has one element and this is the @@ -2026,7 +2087,9 @@ _dispatch_disk_perform(void *ctxt) int result = _dispatch_operation_perform(op); disk->advise_list[disk->req_idx] = NULL; disk->req_idx = (++disk->req_idx)%disk->advise_list_depth; + _dispatch_op_debug("async perform completion: disk %p", op, disk); dispatch_async(disk->pick_queue, ^{ + _dispatch_op_debug("perform completion", op); switch (result) { case DISPATCH_OP_DELIVER: _dispatch_operation_deliver_data(op, DOP_DEFAULT); @@ -2048,12 +2111,15 @@ _dispatch_disk_perform(void *ctxt) dispatch_assert(result); break; } + _dispatch_op_debug("deactivate: disk %p", op, disk); op->active = false; disk->io_active = false; _dispatch_disk_handler(disk); // Balancing the retain in _dispatch_disk_handler. Note that op must be // released at the very end, since it might hold the last reference to // the disk + _dispatch_op_debug("release -> %d (disk perform complete)", op, + op->do_ref_cnt); _dispatch_release(op); }); } @@ -2064,6 +2130,16 @@ _dispatch_disk_perform(void *ctxt) static void _dispatch_operation_advise(dispatch_operation_t op, size_t chunk_size) { + _dispatch_op_debug("advise", op); + if (_dispatch_io_get_error(op, NULL, true)) return; +#ifdef __linux__ + // linux does not support fcntl (F_RDAVISE) + // define necessary datastructure and use readahead + struct radvisory { + off_t ra_offset; + int ra_count; + }; +#endif int err; struct radvisory advise; // No point in issuing a read advise for the next chunk if we are already @@ -2083,6 +2159,13 @@ _dispatch_operation_advise(dispatch_operation_t op, size_t chunk_size) } advise.ra_offset = op->advise_offset; op->advise_offset += advise.ra_count; +#ifdef __linux__ + _dispatch_io_syscall_switch(err, + readahead(op->fd_entry->fd, advise.ra_offset, advise.ra_count), + case EINVAL: break; // fd does refer to a non-supported filetype + default: (void)dispatch_assume_zero(err); break; + ); +#else _dispatch_io_syscall_switch(err, fcntl(op->fd_entry->fd, F_RDADVISE, &advise), case EFBIG: break; // advised past the end of the file rdar://10415691 @@ -2090,11 +2173,13 @@ _dispatch_operation_advise(dispatch_operation_t op, size_t chunk_size) // TODO: set disk status on error default: (void)dispatch_assume_zero(err); break; ); +#endif } static int _dispatch_operation_perform(dispatch_operation_t op) { + _dispatch_op_debug("perform", op); int err = _dispatch_io_get_error(op, NULL, true); if (err) { goto error; @@ -2123,7 +2208,7 @@ _dispatch_operation_perform(dispatch_operation_t op) op->buf_siz = max_buf_siz; } op->buf = valloc(op->buf_siz); - _dispatch_fd_debug("buffer allocated", op->fd_entry->fd); + _dispatch_op_debug("buffer allocated", op); } else if (op->direction == DOP_DIR_WRITE) { // Always write the first data piece, if that is smaller than a // chunk, accumulate further data pieces until chunk size is reached @@ -2149,7 +2234,7 @@ _dispatch_operation_perform(dispatch_operation_t op) op->buf_data = dispatch_data_create_map(d, (const void**)&op->buf, NULL); _dispatch_io_data_release(d); - _dispatch_fd_debug("buffer mapped", op->fd_entry->fd); + _dispatch_op_debug("buffer mapped", op); } } if (op->fd_entry->fd == -1) { @@ -2186,7 +2271,7 @@ _dispatch_operation_perform(dispatch_operation_t op) } // EOF is indicated by two handler invocations if (processed == 0) { - _dispatch_fd_debug("EOF", op->fd_entry->fd); + _dispatch_op_debug("performed: EOF", op); return DISPATCH_OP_DELIVER_AND_COMPLETE; } op->buf_len += (size_t)processed; @@ -2202,7 +2287,7 @@ _dispatch_operation_perform(dispatch_operation_t op) if (err == EAGAIN) { // For disk based files with blocking I/O we should never get EAGAIN dispatch_assert(!op->fd_entry->disk); - _dispatch_fd_debug("EAGAIN %d", op->fd_entry->fd, err); + _dispatch_op_debug("performed: EAGAIN", op); if (op->direction == DOP_DIR_READ && op->total && op->channel == op->fd_entry->convenience_channel) { // Convenience read with available data completes on EAGAIN @@ -2210,12 +2295,13 @@ _dispatch_operation_perform(dispatch_operation_t op) } return DISPATCH_OP_RESUME; } + _dispatch_op_debug("performed: err %d", op, err); op->err = err; switch (err) { case ECANCELED: return DISPATCH_OP_ERR; case EBADF: - (void)dispatch_atomic_cmpxchg2o(op->fd_entry, err, 0, err, relaxed); + (void)os_atomic_cmpxchg2o(op->fd_entry, err, 0, err, relaxed); return DISPATCH_OP_FD_ERR; default: return DISPATCH_OP_COMPLETE; @@ -2239,7 +2325,7 @@ _dispatch_operation_deliver_data(dispatch_operation_t op, deliver = true; } else if (op->buf_len < op->buf_siz) { // Request buffer is not yet used up - _dispatch_fd_debug("buffer data", op->fd_entry->fd); + _dispatch_op_debug("buffer data: undelivered %zu", op, undelivered); return; } } else { @@ -2293,17 +2379,14 @@ _dispatch_operation_deliver_data(dispatch_operation_t op, } if (!deliver || ((flags & DOP_NO_EMPTY) && !dispatch_data_get_size(data))) { op->undelivered = undelivered; - _dispatch_fd_debug("buffer data", op->fd_entry->fd); + _dispatch_op_debug("buffer data: undelivered %zu", op, undelivered); return; } op->undelivered = 0; _dispatch_object_debug(op, "%s", __func__); - _dispatch_fd_debug("deliver data", op->fd_entry->fd); + _dispatch_op_debug("deliver data", op); dispatch_op_direction_t direction = op->direction; dispatch_io_handler_t handler = op->handler; -#if DISPATCH_IO_DEBUG - int fd = op->fd_entry->fd; -#endif dispatch_fd_entry_t fd_entry = op->fd_entry; _dispatch_fd_entry_retain(fd_entry); dispatch_io_t channel = op->channel; @@ -2315,7 +2398,7 @@ _dispatch_operation_deliver_data(dispatch_operation_t op, if (done) { if (direction == DOP_DIR_READ && err) { if (dispatch_data_get_size(d)) { - _dispatch_fd_debug("IO handler invoke", fd); + _dispatch_op_debug("IO handler invoke", op); handler(false, d, 0); } d = NULL; @@ -2323,7 +2406,7 @@ _dispatch_operation_deliver_data(dispatch_operation_t op, d = NULL; } } - _dispatch_fd_debug("IO handler invoke", fd); + _dispatch_op_debug("IO handler invoke: err %d", op, err); handler(done, d, err); _dispatch_release(channel); _dispatch_fd_entry_release(fd_entry); @@ -2349,7 +2432,7 @@ _dispatch_io_debug_attr(dispatch_io_t channel, char* buf, size_t bufsiz) channel->barrier_group, channel->err, channel->params.low, channel->params.high, channel->params.interval_flags & DISPATCH_IO_STRICT_INTERVAL ? "(strict)" : "", - channel->params.interval); + (unsigned long long) channel->params.interval); } size_t @@ -2380,10 +2463,11 @@ _dispatch_operation_debug_attr(dispatch_operation_t op, char* buf, "write", op->fd_entry ? op->fd_entry->fd : -1, op->fd_entry, op->channel, op->op_q, oqtarget && oqtarget->dq_label ? oqtarget->dq_label : "", oqtarget, target && target->dq_label ? - target->dq_label : "", target, op->offset, op->length, op->total, - op->undelivered + op->buf_len, op->flags, op->err, op->params.low, - op->params.high, op->params.interval_flags & - DISPATCH_IO_STRICT_INTERVAL ? "(strict)" : "", op->params.interval); + target->dq_label : "", target, (long long)op->offset, op->length, + op->total, op->undelivered + op->buf_len, op->flags, op->err, + op->params.low, op->params.high, op->params.interval_flags & + DISPATCH_IO_STRICT_INTERVAL ? "(strict)" : "", + (unsigned long long)op->params.interval); } size_t diff --git a/src/io_internal.h b/src/io_internal.h index ecdc77583..ad8259a1d 100644 --- a/src/io_internal.h +++ b/src/io_internal.h @@ -66,8 +66,8 @@ typedef unsigned int dispatch_op_flags_t; #define DIO_CLOSED 1u // channel has been closed #define DIO_STOPPED 2u // channel has been stopped (implies closed) -DISPATCH_DECL_INTERNAL(dispatch_operation); -DISPATCH_DECL_INTERNAL(dispatch_disk); +DISPATCH_INTERNAL_CLASS_DECL(operation); +DISPATCH_INTERNAL_CLASS_DECL(disk); struct dispatch_stream_s { dispatch_queue_t dq; @@ -94,9 +94,8 @@ struct dispatch_stat_s { mode_t mode; }; -DISPATCH_CLASS_DECL(disk); struct dispatch_disk_s { - DISPATCH_STRUCT_HEADER(disk); + DISPATCH_OBJECT_HEADER(disk); TAILQ_HEAD(dispatch_disk_operations_s, dispatch_operation_s) operations; dispatch_operation_t cur_rq; dispatch_queue_t pick_queue; @@ -141,9 +140,8 @@ typedef struct dispatch_io_param_s { unsigned long interval_flags; } dispatch_io_param_s; -DISPATCH_CLASS_DECL(operation); struct dispatch_operation_s { - DISPATCH_STRUCT_HEADER(operation); + DISPATCH_OBJECT_HEADER(operation); dispatch_queue_t op_q; dispatch_op_direction_t direction; // READ OR WRITE dispatch_io_param_s params; @@ -167,7 +165,7 @@ struct dispatch_operation_s { DISPATCH_CLASS_DECL(io); struct dispatch_io_s { - DISPATCH_STRUCT_HEADER(io); + DISPATCH_OBJECT_HEADER(io); dispatch_queue_t queue, barrier_queue; dispatch_group_t barrier_group; dispatch_io_param_s params; diff --git a/src/libdispatch.codes b/src/libdispatch.codes new file mode 100644 index 000000000..9aca7e16c --- /dev/null +++ b/src/libdispatch.codes @@ -0,0 +1,13 @@ +0x2bdc0008 DISPATCH_ARIADNE_dispatch_main + +0x2e010004 DISPATCH_VOUCHER_dc_push +0x2e010008 DISPATCH_VOUCHER_dc_pop +0x2e01000c DISPATCH_VOUCHER_dmsg_push +0x2e010010 DISPATCH_VOUCHER_dmsg_pop +0x2e010018 DISPATCH_VOUCHER_activity_adopt + +0x2e020004 DISPATCH_PERF_non_leaf_retarget +0x2e020008 DISPATCH_PERF_post_activate_mutation +0x2e02000c DISPATCH_PERF_post_activate_mutation +0x2e020010 DISPATCH_PERF_delayed_registration +0x2e020014 DISPATCH_PERF_mutable_target diff --git a/src/object.c b/src/object.c index 4089ba0c5..1928df53f 100644 --- a/src/object.c +++ b/src/object.c @@ -64,7 +64,7 @@ _os_object_retain_with_resurrect(_os_object_t obj) { int xref_cnt = _os_object_xrefcnt_inc(obj); if (slowpath(xref_cnt < 0)) { - _OS_OBJECT_CLIENT_CRASH("Resurrection of an overreleased object"); + _OS_OBJECT_CLIENT_CRASH("Resurrection of an over-released object"); } if (slowpath(xref_cnt == 0)) { _os_object_retain_internal(obj); @@ -100,7 +100,7 @@ _os_object_retain_weak(_os_object_t obj) if (slowpath(xref_cnt < -1)) { goto overrelease; } - if (slowpath(!dispatch_atomic_cmpxchgvw2o(obj, os_obj_xref_cnt, xref_cnt, + if (slowpath(!os_atomic_cmpxchgvw2o(obj, os_obj_xref_cnt, xref_cnt, xref_cnt + 1, &xref_cnt, relaxed))) { goto retry; } @@ -128,7 +128,15 @@ _os_object_allows_weak_reference(_os_object_t obj) void * _dispatch_alloc(const void *vtable, size_t size) { +#if OS_OBJECT_HAVE_OBJC1 + const struct dispatch_object_vtable_s *_vtable = vtable; + dispatch_object_t dou; + dou._os_obj = _os_object_alloc_realized(_vtable->_os_obj_objc_isa, size); + dou._do->do_vtable = vtable; + return dou._do; +#else return _os_object_alloc_realized(vtable, size); +#endif } void @@ -151,37 +159,40 @@ _dispatch_dealloc(dispatch_object_t dou) dispatch_queue_t tq = dou._do->do_targetq; dispatch_function_t func = dou._do->do_finalizer; void *ctxt = dou._do->do_ctxt; - +#if OS_OBJECT_HAVE_OBJC1 + // so that ddt doesn't pick up bad objects when malloc reuses this memory + dou._do->do_vtable = NULL; +#endif _os_object_dealloc(dou._os_obj); if (func && ctxt) { dispatch_async_f(tq, ctxt, func); } - _dispatch_release(tq); + _dispatch_release_tailcall(tq); } +#if !USE_OBJC void _dispatch_xref_dispose(dispatch_object_t dou) { - if (slowpath(DISPATCH_OBJECT_SUSPENDED(dou._do))) { - // Arguments for and against this assert are within 6705399 - DISPATCH_CLIENT_CRASH("Release of a suspended object"); + unsigned long metatype = dx_metatype(dou._do); + if (metatype == _DISPATCH_QUEUE_TYPE || metatype == _DISPATCH_SOURCE_TYPE) { + _dispatch_queue_xref_dispose(dou._dq); } -#if !USE_OBJC if (dx_type(dou._do) == DISPATCH_SOURCE_KEVENT_TYPE) { _dispatch_source_xref_dispose(dou._ds); - } else if (dou._dq->do_vtable == DISPATCH_VTABLE(queue_runloop)) { + } else if (dx_type(dou._do) == DISPATCH_QUEUE_RUNLOOP_TYPE) { _dispatch_runloop_queue_xref_dispose(dou._dq); } - return _dispatch_release(dou._os_obj); -#endif + return _dispatch_release_tailcall(dou._os_obj); } +#endif void _dispatch_dispose(dispatch_object_t dou) { if (slowpath(dou._do->do_next != DISPATCH_OBJECT_LISTLESS)) { - DISPATCH_CRASH("Release while enqueued"); + DISPATCH_INTERNAL_CRASH(dou._do->do_next, "Release while enqueued"); } dx_dispose(dou._do); return _dispatch_dealloc(dou); @@ -192,7 +203,7 @@ dispatch_get_context(dispatch_object_t dou) { DISPATCH_OBJECT_TFB(_dispatch_objc_get_context, dou); if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || - slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) { + slowpath(dx_hastypeflag(dou._do, QUEUE_ROOT))) { return NULL; } return dou._do->do_ctxt; @@ -203,7 +214,7 @@ dispatch_set_context(dispatch_object_t dou, void *context) { DISPATCH_OBJECT_TFB(_dispatch_objc_set_context, dou, context); if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || - slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) { + slowpath(dx_hastypeflag(dou._do, QUEUE_ROOT))) { return; } dou._do->do_ctxt = context; @@ -214,69 +225,57 @@ dispatch_set_finalizer_f(dispatch_object_t dou, dispatch_function_t finalizer) { DISPATCH_OBJECT_TFB(_dispatch_objc_set_finalizer_f, dou, finalizer); if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || - slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) { + slowpath(dx_hastypeflag(dou._do, QUEUE_ROOT))) { return; } dou._do->do_finalizer = finalizer; } void -dispatch_suspend(dispatch_object_t dou) +dispatch_set_target_queue(dispatch_object_t dou, dispatch_queue_t tq) { - DISPATCH_OBJECT_TFB(_dispatch_objc_suspend, dou); - if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || - slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) { - return; + DISPATCH_OBJECT_TFB(_dispatch_objc_set_target_queue, dou, tq); + if (dx_vtable(dou._do)->do_set_targetq) { + dx_vtable(dou._do)->do_set_targetq(dou._do, tq); + } else if (dou._do->do_ref_cnt != DISPATCH_OBJECT_GLOBAL_REFCNT && + !slowpath(dx_hastypeflag(dou._do, QUEUE_ROOT))) { + if (slowpath(!tq)) { + tq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, false); + } + _dispatch_object_set_target_queue_inline(dou._do, tq); } - // rdar://8181908 explains why we need to do an internal retain at every - // suspension. - (void)dispatch_atomic_add2o(dou._do, do_suspend_cnt, - DISPATCH_OBJECT_SUSPEND_INTERVAL, acquire); - _dispatch_retain(dou._do); } -DISPATCH_NOINLINE -static void -_dispatch_resume_slow(dispatch_object_t dou) +void +dispatch_activate(dispatch_object_t dou) { - _dispatch_wakeup(dou._do); - // Balancing the retain() done in suspend() for rdar://8181908 - _dispatch_release(dou._do); + DISPATCH_OBJECT_TFB(_dispatch_objc_activate, dou); + if (dx_vtable(dou._do)->do_resume) { + dx_vtable(dou._do)->do_resume(dou._do, true); + } +} + +void +dispatch_suspend(dispatch_object_t dou) +{ + DISPATCH_OBJECT_TFB(_dispatch_objc_suspend, dou); + if (dx_vtable(dou._do)->do_suspend) { + dx_vtable(dou._do)->do_suspend(dou._do); + } } void dispatch_resume(dispatch_object_t dou) { DISPATCH_OBJECT_TFB(_dispatch_objc_resume, dou); - // Global objects cannot be suspended or resumed. This also has the - // side effect of saturating the suspend count of an object and - // guarding against resuming due to overflow. - if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || - slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) { - return; - } - // Check the previous value of the suspend count. If the previous - // value was a single suspend interval, the object should be resumed. - // If the previous value was less than the suspend interval, the object - // has been over-resumed. - unsigned int suspend_cnt = dispatch_atomic_sub_orig2o(dou._do, - do_suspend_cnt, DISPATCH_OBJECT_SUSPEND_INTERVAL, release); - if (fastpath(suspend_cnt > DISPATCH_OBJECT_SUSPEND_INTERVAL)) { - // Balancing the retain() done in suspend() for rdar://8181908 - return _dispatch_release(dou._do); - } - if (fastpath(suspend_cnt == DISPATCH_OBJECT_SUSPEND_INTERVAL)) { - return _dispatch_resume_slow(dou); + if (dx_vtable(dou._do)->do_resume) { + dx_vtable(dou._do)->do_resume(dou._do, false); } - DISPATCH_CLIENT_CRASH("Over-resume of an object"); } size_t _dispatch_object_debug_attr(dispatch_object_t dou, char* buf, size_t bufsiz) { - return dsnprintf(buf, bufsiz, "xrefcnt = 0x%x, refcnt = 0x%x, " - "suspend_cnt = 0x%x, locked = %d, ", dou._do->do_xref_cnt + 1, - dou._do->do_ref_cnt + 1, - dou._do->do_suspend_cnt / DISPATCH_OBJECT_SUSPEND_INTERVAL, - dou._do->do_suspend_cnt & 1); + return dsnprintf(buf, bufsiz, "xrefcnt = 0x%x, refcnt = 0x%x, ", + dou._do->do_xref_cnt + 1, dou._do->do_ref_cnt + 1); } diff --git a/src/object.m b/src/object.m index 1a98d7e0e..323c98b47 100644 --- a/src/object.m +++ b/src/object.m @@ -22,104 +22,16 @@ #if USE_OBJC -#if !__OBJC2__ -#error "Cannot build with legacy ObjC runtime" -#endif #if _OS_OBJECT_OBJC_ARC #error "Cannot build with ARC" #endif +#if defined(__OBJC_GC__) +#error Objective C GC isn't supported anymore +#endif #include #include - -#pragma mark - -#pragma mark _os_object_gc - -#if __OBJC_GC__ -#include -#include - -static bool _os_object_have_gc; -static malloc_zone_t *_os_object_gc_zone; - -static void -_os_object_gc_init(void) -{ - _os_object_have_gc = objc_collectingEnabled(); - if (slowpath(_os_object_have_gc)) { - _os_object_gc_zone = objc_collectableZone(); - (void)[OS_OBJECT_CLASS(object) class]; // OS_object class realization - } -} - -static _os_object_t -_os_object_make_uncollectable(_os_object_t obj) -{ - if (slowpath(_os_object_have_gc)) { - auto_zone_retain(_os_object_gc_zone, obj); - } - return obj; -} - -static _os_object_t -_os_object_make_collectable(_os_object_t obj) -{ - if (slowpath(_os_object_have_gc)) { - auto_zone_release(_os_object_gc_zone, obj); - } - return obj; -} - -DISPATCH_NOINLINE -static id -_os_objc_gc_retain(id obj) -{ - if (fastpath(obj)) { - auto_zone_retain(_os_object_gc_zone, obj); - } - return obj; -} - -DISPATCH_NOINLINE -static void -_os_objc_gc_release(id obj) -{ - if (fastpath(obj)) { - (void)auto_zone_release(_os_object_gc_zone, obj); - } - asm(""); // prevent tailcall -} - -DISPATCH_NOINLINE -static id -_os_object_gc_retain(id obj) -{ - if ([obj isKindOfClass:OS_OBJECT_OBJC_CLASS(object)]) { - return _os_object_retain(obj); - } else { - return _os_objc_gc_retain(obj); - } -} - -DISPATCH_NOINLINE -static void -_os_object_gc_release(id obj) -{ - if ([obj isKindOfClass:OS_OBJECT_OBJC_CLASS(object)]) { - return _os_object_release(obj); - } else { - return _os_objc_gc_release(obj); - } -} - -#else // __OBJC_GC__ -#define _os_object_gc_init() -#define _os_object_make_uncollectable(obj) (obj) -#define _os_object_make_collectable(obj) (obj) -#define _os_object_have_gc 0 -#define _os_object_gc_retain(obj) (obj) -#define _os_object_gc_release(obj) -#endif // __OBJC_GC__ +#include #pragma mark - #pragma mark _os_object_t @@ -150,8 +62,6 @@ _os_object_init(void) { _objc_init(); - _os_object_gc_init(); - if (slowpath(_os_object_have_gc)) return; Block_callbacks_RR callbacks = { sizeof(Block_callbacks_RR), (void (*)(const void *))&objc_retain, @@ -169,7 +79,7 @@ _os_object_alloc_realized(const void *cls, size_t size) { dispatch_assert(size >= sizeof(struct _os_object_s)); - return _os_object_make_uncollectable(_os_objc_alloc(cls, size)); + return _os_objc_alloc(cls, size); } _os_object_t @@ -177,13 +87,13 @@ { dispatch_assert(size >= sizeof(struct _os_object_s)); Class cls = _cls ? [(id)_cls class] : [OS_OBJECT_CLASS(object) class]; - return _os_object_make_uncollectable(_os_objc_alloc(cls, size)); + return _os_objc_alloc(cls, size); } void _os_object_dealloc(_os_object_t obj) { - [_os_object_make_collectable(obj) dealloc]; + [obj dealloc]; } void @@ -206,7 +116,6 @@ void* os_retain(void *obj) { - if (slowpath(_os_object_have_gc)) return _os_object_gc_retain(obj); return objc_retain(obj); } @@ -214,7 +123,6 @@ void os_release(void *obj) { - if (slowpath(_os_object_have_gc)) return _os_object_gc_release(obj); return objc_release(obj); } @@ -222,6 +130,7 @@ #pragma mark _os_object @implementation OS_OBJECT_CLASS(object) +DISPATCH_UNAVAILABLE_INIT() -(id)retain { return _os_object_retain(self); @@ -255,8 +164,7 @@ - (void)_dispose { #pragma mark - #pragma mark _dispatch_objc - -#include +#if OS_OBJECT_HAVE_OBJC2 id _dispatch_objc_alloc(Class cls, size_t size) @@ -313,6 +221,12 @@ - (void)_dispose { return [dou _resume]; } +void +_dispatch_objc_activate(dispatch_object_t dou) +{ + return [dou _activate]; +} + size_t _dispatch_objc_debug(dispatch_object_t dou, char* buf, size_t bufsiz) { @@ -325,6 +239,7 @@ - (void)_dispose { return offset; } +#endif #pragma mark - #pragma mark _dispatch_object @@ -332,18 +247,7 @@ - (void)_dispose { #define DISPATCH_OBJC_LOAD() + (void)load {} @implementation DISPATCH_CLASS(object) - -- (id)init { - self = [super init]; - [self release]; - self = nil; - return self; -} - -- (void)_xref_dispose { - _dispatch_xref_dispose(self); - [super _xref_dispose]; -} +DISPATCH_UNAVAILABLE_INIT() - (void)_dispose { return _dispatch_dispose(self); // calls _os_object_dealloc() @@ -354,7 +258,7 @@ - (NSString *)debugDescription { if (!nsstring) return nil; char buf[2048]; struct dispatch_object_s *obj = (struct dispatch_object_s *)self; - if (obj->do_vtable->do_debug) { + if (dx_vtable(obj)->do_debug) { dx_debug(obj, buf, sizeof(buf)); } else { strlcpy(buf, dx_kind(obj), sizeof(buf)); @@ -368,6 +272,7 @@ - (NSString *)debugDescription { @implementation DISPATCH_CLASS(queue) DISPATCH_OBJC_LOAD() +DISPATCH_UNAVAILABLE_INIT() - (NSString *)description { Class nsstring = objc_lookUpClass("NSString"); @@ -377,22 +282,42 @@ - (NSString *)description { class_getName([self class]), dispatch_queue_get_label(self), self]; } +- (void)_xref_dispose { + _dispatch_queue_xref_dispose((struct dispatch_queue_s *)self); + [super _xref_dispose]; +} + @end @implementation DISPATCH_CLASS(source) DISPATCH_OBJC_LOAD() +DISPATCH_UNAVAILABLE_INIT() - (void)_xref_dispose { + _dispatch_queue_xref_dispose((struct dispatch_queue_s *)self); _dispatch_source_xref_dispose(self); [super _xref_dispose]; } @end +@implementation DISPATCH_CLASS(mach) +DISPATCH_OBJC_LOAD() +DISPATCH_UNAVAILABLE_INIT() + +- (void)_xref_dispose { + _dispatch_queue_xref_dispose((struct dispatch_queue_s *)self); + [super _xref_dispose]; +} + +@end + @implementation DISPATCH_CLASS(queue_runloop) DISPATCH_OBJC_LOAD() +DISPATCH_UNAVAILABLE_INIT() - (void)_xref_dispose { + _dispatch_queue_xref_dispose((struct dispatch_queue_s *)self); _dispatch_runloop_queue_xref_dispose(self); [super _xref_dispose]; } @@ -402,30 +327,30 @@ - (void)_xref_dispose { #define DISPATCH_CLASS_IMPL(name) \ @implementation DISPATCH_CLASS(name) \ DISPATCH_OBJC_LOAD() \ + DISPATCH_UNAVAILABLE_INIT() \ @end +#if !DISPATCH_DATA_IS_BRIDGED_TO_NSDATA +DISPATCH_CLASS_IMPL(data) +#endif DISPATCH_CLASS_IMPL(semaphore) DISPATCH_CLASS_IMPL(group) +DISPATCH_CLASS_IMPL(queue_serial) +DISPATCH_CLASS_IMPL(queue_concurrent) +DISPATCH_CLASS_IMPL(queue_main) DISPATCH_CLASS_IMPL(queue_root) DISPATCH_CLASS_IMPL(queue_mgr) DISPATCH_CLASS_IMPL(queue_specific_queue) DISPATCH_CLASS_IMPL(queue_attr) -DISPATCH_CLASS_IMPL(mach) DISPATCH_CLASS_IMPL(mach_msg) DISPATCH_CLASS_IMPL(io) DISPATCH_CLASS_IMPL(operation) DISPATCH_CLASS_IMPL(disk) @implementation OS_OBJECT_CLASS(voucher) +DISPATCH_UNAVAILABLE_INIT() DISPATCH_OBJC_LOAD() -- (id)init { - self = [super init]; - [self release]; - self = nil; - return self; -} - - (void)_xref_dispose { return _voucher_xref_dispose(self); // calls _os_object_release_internal() } @@ -448,15 +373,9 @@ - (NSString *)debugDescription { #if VOUCHER_ENABLE_RECIPE_OBJECTS @implementation OS_OBJECT_CLASS(voucher_recipe) +DISPATCH_UNAVAILABLE_INIT() DISPATCH_OBJC_LOAD() -- (id)init { - self = [super init]; - [self release]; - self = nil; - return self; -} - - (void)_dispose { } @@ -468,23 +387,26 @@ - (NSString *)debugDescription { @end #endif + #pragma mark - -#pragma mark dispatch_autorelease_pool +#pragma mark dispatch_last_resort_autorelease_pool #if DISPATCH_COCOA_COMPAT void * -_dispatch_autorelease_pool_push(void) { +_dispatch_last_resort_autorelease_pool_push(void) +{ if (!slowpath(_os_object_debug_missing_pools)) { - return objc_autoreleasePoolPush(); + return _dispatch_autorelease_pool_push(); } return NULL; } void -_dispatch_autorelease_pool_pop(void *context) { +_dispatch_last_resort_autorelease_pool_pop(void *context) +{ if (!slowpath(_os_object_debug_missing_pools)) { - return objc_autoreleasePoolPop(context); + return _dispatch_autorelease_pool_pop(context); } } @@ -494,7 +416,8 @@ - (NSString *)debugDescription { #pragma mark dispatch_client_callout // Abort on uncaught exceptions thrown from client callouts rdar://8577499 -#if DISPATCH_USE_CLIENT_CALLOUT && !__USING_SJLJ_EXCEPTIONS__ +#if DISPATCH_USE_CLIENT_CALLOUT && !__USING_SJLJ_EXCEPTIONS__ && \ + OS_OBJECT_HAVE_OBJC2 // On platforms with zero-cost exceptions, use a compiler-generated catch-all // exception handler. @@ -524,6 +447,7 @@ - (NSString *)debugDescription { } } +#if HAVE_MACH #undef _dispatch_client_callout4 void _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, @@ -537,6 +461,7 @@ - (NSString *)debugDescription { objc_terminate(); } } +#endif // HAVE_MACH #endif // DISPATCH_USE_CLIENT_CALLOUT diff --git a/src/object_internal.h b/src/object_internal.h index 4778f4c1f..80bb10251 100644 --- a/src/object_internal.h +++ b/src/object_internal.h @@ -27,116 +27,200 @@ #ifndef __DISPATCH_OBJECT_INTERNAL__ #define __DISPATCH_OBJECT_INTERNAL__ -#if OS_OBJECT_USE_OBJC -#define DISPATCH_DECL_INTERNAL_SUBCLASS(name, super) \ - OS_OBJECT_DECL_SUBCLASS(name, super) -#define DISPATCH_DECL_INTERNAL(name) \ - DISPATCH_DECL_INTERNAL_SUBCLASS(name, dispatch_object) -#define DISPATCH_DECL_SUBCLASS_INTERFACE(name, super) \ - _OS_OBJECT_DECL_SUBCLASS_INTERFACE(name, super) +#if !OS_OBJECT_USE_OBJC +#define OS_OBJECT_DECL(name) DISPATCH_DECL(name) +#define OS_OBJECT_DECL_SUBCLASS(name, super) DISPATCH_DECL(name) +#endif + +#if USE_OBJC +#define OS_OBJECT_EXTRA_VTABLE_SYMBOL(name) _OS_##name##_vtable +#define DISPATCH_CLASS_SYMBOL(name) OS_dispatch_##name##_class +#define DISPATCH_CLASS_RAW_SYMBOL_NAME(name) \ + OS_OBJC_CLASS_RAW_SYMBOL_NAME(DISPATCH_CLASS(name)) #else -#define DISPATCH_DECL_INTERNAL_SUBCLASS(name, super) DISPATCH_DECL(name) -#define DISPATCH_DECL_INTERNAL(name) DISPATCH_DECL(name) -#define DISPATCH_DECL_SUBCLASS_INTERFACE(name, super) -#endif // OS_OBJECT_USE_OBJC - -DISPATCH_ENUM(dispatch_invoke_flags, unsigned long, - DISPATCH_INVOKE_NONE = 0x00, - /* This invoke is a stealer, meaning that it doesn't own the - * enqueue lock, and is not allowed to requeue elsewhere - */ - DISPATCH_INVOKE_STEALING = 0x01, - /* The `dc` argument is a dispatch continuation wrapper - * created by _dispatch_queue_push_override - */ - DISPATCH_INVOKE_OVERRIDING = 0x02, -); +#define OS_OBJECT_CLASS_SYMBOL(name) _##name##_vtable +#define OS_OBJC_CLASS_RAW_SYMBOL_NAME(name) \ + "__" OS_STRINGIFY(name) "_vtable" +#define DISPATCH_CLASS_SYMBOL(name) _dispatch_##name##_vtable +#define DISPATCH_CLASS_RAW_SYMBOL_NAME(name) \ + "__dispatch_" OS_STRINGIFY(name) "_vtable" +#endif + +#define DISPATCH_CLASS(name) OS_dispatch_##name +#if USE_OBJC +#define DISPATCH_OBJC_CLASS_DECL(name) \ + extern void *DISPATCH_CLASS_SYMBOL(name) \ + asm(DISPATCH_CLASS_RAW_SYMBOL_NAME(name)) +#endif + +// define a new proper class +#define OS_OBJECT_CLASS_DECL(name, super, ...) \ + struct name##_s; \ + struct name##_extra_vtable_s { \ + __VA_ARGS__; \ + }; \ + struct name##_vtable_s { \ + _OS_OBJECT_CLASS_HEADER(); \ + struct name##_extra_vtable_s _os_obj_vtable; \ + }; \ + OS_OBJECT_EXTRA_VTABLE_DECL(name, name) \ + extern const struct name##_vtable_s OS_OBJECT_CLASS_SYMBOL(name) \ + asm(OS_OBJC_CLASS_RAW_SYMBOL_NAME(OS_OBJECT_CLASS(name))) + +#if OS_OBJECT_SWIFT3 +#define OS_OBJECT_INTERNAL_CLASS_DECL(name, super, ...) \ + OS_OBJECT_OBJC_RUNTIME_VISIBLE \ + OS_OBJECT_DECL_IMPL_CLASS(name, OS_OBJECT_CLASS(super)); \ + OS_OBJECT_CLASS_DECL(name, super, ## __VA_ARGS__) +#elif OS_OBJECT_USE_OBJC +#define OS_OBJECT_INTERNAL_CLASS_DECL(name, super, ...) \ + OS_OBJECT_DECL(name); \ + OS_OBJECT_CLASS_DECL(name, super, ## __VA_ARGS__) +#else +#define OS_OBJECT_INTERNAL_CLASS_DECL(name, super, ...) \ + typedef struct name##_s *name##_t; \ + OS_OBJECT_CLASS_DECL(name, super, ## __VA_ARGS__) +#endif + +#define DISPATCH_CLASS_DECL_BARE(name) \ + OS_OBJECT_CLASS_DECL(dispatch_##name, dispatch_object, \ + DISPATCH_OBJECT_VTABLE_HEADER(dispatch_##name)) +#define DISPATCH_CLASS_DECL(name) \ + _OS_OBJECT_DECL_PROTOCOL(dispatch_##name, dispatch_object) \ + _OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(dispatch_##name, dispatch_##name) \ + DISPATCH_CLASS_DECL_BARE(name) + +#define DISPATCH_INTERNAL_CLASS_DECL(name) \ + DISPATCH_DECL(dispatch_##name); \ + DISPATCH_CLASS_DECL(name) + +// define a new subclass used in a cluster +#define OS_OBJECT_SUBCLASS_DECL(name, super) \ + _OS_OBJECT_DECL_SUBCLASS_INTERFACE(name, super) \ + struct name##_s; \ + OS_OBJECT_EXTRA_VTABLE_DECL(name, super) \ + extern const struct super##_vtable_s OS_OBJECT_CLASS_SYMBOL(name) \ + asm(OS_OBJC_CLASS_RAW_SYMBOL_NAME(OS_OBJECT_CLASS(name))) + +#define DISPATCH_SUBCLASS_DECL(name, super) \ + OS_OBJECT_SUBCLASS_DECL(dispatch_##name, super) + +#if OS_OBJECT_SWIFT3 +// define a new internal subclass used in a class cluster +#define OS_OBJECT_INTERNAL_SUBCLASS_DECL(name, super) \ + _OS_OBJECT_DECL_PROTOCOL(name, super); \ + OS_OBJECT_SUBCLASS_DECL(name, super) + +#define DISPATCH_INTERNAL_SUBCLASS_DECL(name, super) \ + _OS_OBJECT_DECL_PROTOCOL(dispatch_##name, dispatch_##super) \ + DISPATCH_SUBCLASS_DECL(name, dispatch_##super) +#else +// define a new internal subclass used in a class cluster +#define OS_OBJECT_INTERNAL_SUBCLASS_DECL(name, super) \ + OS_OBJECT_DECL_SUBCLASS(name, super); \ + OS_OBJECT_SUBCLASS_DECL(name, super) + +#define DISPATCH_INTERNAL_SUBCLASS_DECL(name, super) \ + OS_OBJECT_DECL_SUBCLASS(dispatch_##name, dispatch_##super); \ + DISPATCH_SUBCLASS_DECL(name, dispatch_##super) +#endif + +// vtable symbols +#define OS_OBJECT_VTABLE(name) (&OS_OBJECT_CLASS_SYMBOL(name)) +#define DISPATCH_OBJC_CLASS(name) (&DISPATCH_CLASS_SYMBOL(name)) + +// vtables for subclasses used in a class cluster #if USE_OBJC -#define DISPATCH_CLASS(name) OS_OBJECT_CLASS(dispatch_##name) // ObjC classes and dispatch vtables are co-located via linker order and alias // files rdar://10640168 -#define DISPATCH_VTABLE_SUBCLASS_INSTANCE(name, super, ...) \ +#if OS_OBJECT_HAVE_OBJC2 +#define OS_OBJECT_VTABLE_SUBCLASS_INSTANCE(name, super, xdispose, dispose, ...) \ __attribute__((section("__DATA,__objc_data"), used)) \ - static const struct { \ - DISPATCH_VTABLE_HEADER(super); \ - } DISPATCH_CONCAT(_,DISPATCH_CLASS(name##_vtable)) = { \ - __VA_ARGS__ \ + const struct super##_extra_vtable_s \ + OS_OBJECT_EXTRA_VTABLE_SYMBOL(name) = { __VA_ARGS__ } +#define OS_OBJECT_EXTRA_VTABLE_DECL(name, super) +#define DISPATCH_VTABLE(name) DISPATCH_OBJC_CLASS(name) +#else +#define OS_OBJECT_VTABLE_SUBCLASS_INSTANCE(name, super, xdispose, dispose, ...) \ + const struct super##_vtable_s \ + OS_OBJECT_EXTRA_VTABLE_SYMBOL(name) = { \ + ._os_obj_objc_isa = &OS_OBJECT_CLASS_SYMBOL(name), \ + ._os_obj_vtable = { __VA_ARGS__ }, \ } +#define OS_OBJECT_EXTRA_VTABLE_DECL(name, super) \ + extern const struct super##_vtable_s \ + OS_OBJECT_EXTRA_VTABLE_SYMBOL(name); +#define DISPATCH_VTABLE(name) &OS_OBJECT_EXTRA_VTABLE_SYMBOL(dispatch_##name) +#endif #else -#define DISPATCH_VTABLE_SUBCLASS_INSTANCE(name, super, ...) \ - DISPATCH_CONST_STRUCT_INSTANCE(dispatch_##super##_vtable_s, \ - _dispatch_##name##_vtable, \ - ._os_obj_xref_dispose = _dispatch_xref_dispose, \ - ._os_obj_dispose = _dispatch_dispose, \ - __VA_ARGS__) +#define OS_OBJECT_VTABLE_SUBCLASS_INSTANCE(name, super, xdispose, dispose, ...) \ + const struct super##_vtable_s OS_OBJECT_CLASS_SYMBOL(name) = { \ + ._os_obj_xref_dispose = xdispose, \ + ._os_obj_dispose = dispose, \ + ._os_obj_vtable = { __VA_ARGS__ }, \ + } +#define OS_OBJECT_EXTRA_VTABLE_DECL(name, super) +#define DISPATCH_VTABLE(name) DISPATCH_OBJC_CLASS(name) #endif // USE_OBJC -#define DISPATCH_SUBCLASS_DECL(name, super) \ - DISPATCH_DECL_SUBCLASS_INTERFACE(dispatch_##name, super) \ - struct dispatch_##name##_s; \ - extern DISPATCH_CONST_STRUCT_DECL(dispatch_##name##_vtable_s, \ - _dispatch_##name##_vtable, \ - { \ - _OS_OBJECT_CLASS_HEADER(); \ - DISPATCH_VTABLE_HEADER(name); \ - }) -#define DISPATCH_CLASS_DECL(name) DISPATCH_SUBCLASS_DECL(name, dispatch_object) -#define DISPATCH_INTERNAL_SUBCLASS_DECL(name, super) \ - DISPATCH_DECL_INTERNAL_SUBCLASS(dispatch_##name, dispatch_##super); \ - DISPATCH_DECL_SUBCLASS_INTERFACE(dispatch_##name, dispatch_##super) \ - extern DISPATCH_CONST_STRUCT_DECL(dispatch_##super##_vtable_s, \ - _dispatch_##name##_vtable) +#define DISPATCH_VTABLE_SUBCLASS_INSTANCE(name, super, ...) \ + OS_OBJECT_VTABLE_SUBCLASS_INSTANCE(dispatch_##name, dispatch_##super, \ + _dispatch_xref_dispose, _dispatch_dispose, __VA_ARGS__) + +// vtables for proper classes +#define OS_OBJECT_VTABLE_INSTANCE(name, xdispose, dispose, ...) \ + OS_OBJECT_VTABLE_SUBCLASS_INSTANCE(name, name, \ + xdispose, dispose, __VA_ARGS__) + #define DISPATCH_VTABLE_INSTANCE(name, ...) \ DISPATCH_VTABLE_SUBCLASS_INSTANCE(name, name, __VA_ARGS__) -#define DISPATCH_VTABLE(name) &_dispatch_##name##_vtable -#if !TARGET_OS_WIN32 -#define DISPATCH_VTABLE_HEADER(x) \ +#define DISPATCH_INVOKABLE_VTABLE_HEADER(x) \ unsigned long const do_type; \ const char *const do_kind; \ - size_t (*const do_debug)(struct dispatch_##x##_s *, char *, size_t); \ - void (*const do_invoke)(struct dispatch_##x##_s *, dispatch_object_t dc, \ - dispatch_invoke_flags_t); \ - unsigned long (*const do_probe)(struct dispatch_##x##_s *); \ - void (*const do_dispose)(struct dispatch_##x##_s *); + void (*const do_invoke)(struct x##_s *, dispatch_invoke_flags_t) + +#define DISPATCH_QUEUEABLE_VTABLE_HEADER(x) \ + DISPATCH_INVOKABLE_VTABLE_HEADER(x); \ + void (*const do_wakeup)(struct x##_s *, \ + pthread_priority_t, dispatch_wakeup_flags_t); \ + void (*const do_dispose)(struct x##_s *) + +#define DISPATCH_OBJECT_VTABLE_HEADER(x) \ + DISPATCH_QUEUEABLE_VTABLE_HEADER(x); \ + void (*const do_set_targetq)(struct x##_s *, dispatch_queue_t); \ + void (*const do_suspend)(struct x##_s *); \ + void (*const do_resume)(struct x##_s *, bool activate); \ + void (*const do_finalize_activation)(struct x##_s *); \ + size_t (*const do_debug)(struct x##_s *, char *, size_t) + +#define dx_vtable(x) (&(x)->do_vtable->_os_obj_vtable) +#define dx_type(x) dx_vtable(x)->do_type +#define dx_subtype(x) (dx_vtable(x)->do_type & _DISPATCH_SUB_TYPE_MASK) +#define dx_metatype(x) (dx_vtable(x)->do_type & _DISPATCH_META_TYPE_MASK) +#define dx_hastypeflag(x, f) (dx_vtable(x)->do_type & _DISPATCH_##f##_TYPEFLAG) +#define dx_kind(x) dx_vtable(x)->do_kind +#define dx_debug(x, y, z) dx_vtable(x)->do_debug((x), (y), (z)) +#define dx_dispose(x) dx_vtable(x)->do_dispose(x) +#define dx_invoke(x, z) dx_vtable(x)->do_invoke(x, z) +#define dx_wakeup(x, y, z) dx_vtable(x)->do_wakeup(x, y, z) + +#define DISPATCH_OBJECT_GLOBAL_REFCNT _OS_OBJECT_GLOBAL_REFCNT + +#if OS_OBJECT_HAVE_OBJC1 +#define DISPATCH_GLOBAL_OBJECT_HEADER(name) \ + .do_vtable = DISPATCH_VTABLE(name), \ + ._objc_isa = DISPATCH_OBJC_CLASS(name), \ + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, \ + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT #else -// Cannot be const on Win32 because we initialize at runtime. -#define DISPATCH_VTABLE_HEADER(x) \ - unsigned long do_type; \ - const char *do_kind; \ - size_t (*do_debug)(struct dispatch_##x##_s *, char *, size_t); \ - void (*do_invoke)(struct dispatch_##x##_s *, dispatch_object_t dc, \ - dispatch_invoke_flags_t); \ - unsigned long (*do_probe)(struct dispatch_##x##_s *); \ - void (*do_dispose)(struct dispatch_##x##_s *); +#define DISPATCH_GLOBAL_OBJECT_HEADER(name) \ + .do_vtable = DISPATCH_VTABLE(name), \ + .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, \ + .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT #endif -#define dx_type(x) (x)->do_vtable->do_type -#define dx_metatype(x) ((x)->do_vtable->do_type & _DISPATCH_META_TYPE_MASK) -#define dx_kind(x) (x)->do_vtable->do_kind -#define dx_debug(x, y, z) (x)->do_vtable->do_debug((x), (y), (z)) -#define dx_dispose(x) (x)->do_vtable->do_dispose(x) -#define dx_invoke(x, y, z) (x)->do_vtable->do_invoke(x, y, z) -#define dx_probe(x) (x)->do_vtable->do_probe(x) - -#define DISPATCH_STRUCT_HEADER(x) \ - _OS_OBJECT_HEADER( \ - const struct dispatch_##x##_vtable_s *do_vtable, \ - do_ref_cnt, \ - do_xref_cnt); \ - struct dispatch_##x##_s *volatile do_next; \ - struct dispatch_queue_s *do_targetq; \ - void *do_ctxt; \ - void *do_finalizer; \ - unsigned int volatile do_suspend_cnt; - -#define DISPATCH_OBJECT_GLOBAL_REFCNT _OS_OBJECT_GLOBAL_REFCNT -// "word and bit" must be a power of two to be safely subtracted -#define DISPATCH_OBJECT_SUSPEND_LOCK 1u -#define DISPATCH_OBJECT_SUSPEND_INTERVAL 2u -#define DISPATCH_OBJECT_SUSPENDED(x) \ - ((x)->do_suspend_cnt >= DISPATCH_OBJECT_SUSPEND_INTERVAL) #ifdef __LP64__ // the bottom nibble must not be zero, the rest of the bits should be random // we sign extend the 64-bit version so that a better instruction encoding is @@ -146,84 +230,285 @@ DISPATCH_ENUM(dispatch_invoke_flags, unsigned long, #define DISPATCH_OBJECT_LISTLESS ((void *)0x89abcdef) #endif +DISPATCH_ENUM(dispatch_wakeup_flags, uint32_t, + // The caller of dx_wakeup owns an internal refcount on the object being + // woken up + DISPATCH_WAKEUP_CONSUME = 0x00000001, + + // Some change to the object needs to be published to drainers. + // If the drainer isn't the same thread, some scheme such as the dispatch + // queue DIRTY bit must be used and a release barrier likely has to be + // involved before dx_wakeup returns + DISPATCH_WAKEUP_FLUSH = 0x00000002, + + // A slow waiter was just enqueued + DISPATCH_WAKEUP_SLOW_WAITER = 0x00000004, + + // The caller desires to apply an override on the object being woken up + // and has already adjusted the `oq_override` field. When this flag is + // passed, the priority passed to dx_wakeup() should not be 0 + DISPATCH_WAKEUP_OVERRIDING = 0x00000008, + + // At the time this queue was woken up it had an override that must be + // preserved (used to solve a race with _dispatch_queue_drain_try_unlock()) + DISPATCH_WAKEUP_WAS_OVERRIDDEN = 0x00000010, + +#define _DISPATCH_WAKEUP_OVERRIDE_BITS \ + ((dispatch_wakeup_flags_t)(DISPATCH_WAKEUP_OVERRIDING | \ + DISPATCH_WAKEUP_WAS_OVERRIDDEN)) +); + +DISPATCH_ENUM(dispatch_invoke_flags, uint32_t, + DISPATCH_INVOKE_NONE = 0x00000000, + + // Invoke modes + // + // @const DISPATCH_INVOKE_STEALING + // This invoke is a stealer, meaning that it doesn't own the + // enqueue lock at drain lock time. + // + // @const DISPATCH_INVOKE_OVERRIDING + // This invoke is draining the hierarchy on another root queue and needs + // to fake the identity of the original one. + // + DISPATCH_INVOKE_STEALING = 0x00000001, + DISPATCH_INVOKE_OVERRIDING = 0x00000002, + + // Below this point flags are propagated to recursive calls to drain(), + // continuation pop() or dx_invoke(). +#define _DISPATCH_INVOKE_PROPAGATE_MASK 0xffff0000u + + // Drain modes + // + // @const DISPATCH_INVOKE_WORKER_DRAIN + // Invoke has been issued by a worker thread (work queue thread, or + // pthread root queue) drain. This flag is NOT set when the main queue, + // manager queue or runloop queues are drained + // + // @const DISPATCH_INVOKE_REDIRECTING_DRAIN + // Has only been draining concurrent queues so far + // Implies DISPATCH_INVOKE_WORKER_DRAIN + // + // @const DISPATCH_INVOKE_MANAGER_DRAIN + // We're draining from a manager context + // + DISPATCH_INVOKE_WORKER_DRAIN = 0x00010000, + DISPATCH_INVOKE_REDIRECTING_DRAIN = 0x00020000, + DISPATCH_INVOKE_MANAGER_DRAIN = 0x00040000, +#define _DISPATCH_INVOKE_DRAIN_MODE_MASK 0x000f0000u + + // Autoreleasing modes + // + // @const DISPATCH_INVOKE_AUTORELEASE_ALWAYS + // Always use autoreleasepools around callouts + // + // @const DISPATCH_INVOKE_AUTORELEASE_NEVER + // Never use autoreleasepools around callouts + // + DISPATCH_INVOKE_AUTORELEASE_ALWAYS = 0x00100000, + DISPATCH_INVOKE_AUTORELEASE_NEVER = 0x00200000, +#define _DISPATCH_INVOKE_AUTORELEASE_MASK 0x00300000u +); + enum { - _DISPATCH_CONTINUATION_TYPE = 0x00000, // meta-type for continuations + _DISPATCH_META_TYPE_MASK = 0xffff0000, // mask for object meta-types + _DISPATCH_TYPEFLAGS_MASK = 0x0000ff00, // mask for object typeflags + _DISPATCH_SUB_TYPE_MASK = 0x000000ff, // mask for object sub-types + + _DISPATCH_CONTINUATION_TYPE = 0x00000, // meta-type for continuations _DISPATCH_QUEUE_TYPE = 0x10000, // meta-type for queues _DISPATCH_SOURCE_TYPE = 0x20000, // meta-type for sources _DISPATCH_SEMAPHORE_TYPE = 0x30000, // meta-type for semaphores - _DISPATCH_NODE_TYPE = 0x40000, // meta-type for data node + _DISPATCH_NODE_TYPE = 0x40000, // meta-type for data node _DISPATCH_IO_TYPE = 0x50000, // meta-type for io channels _DISPATCH_OPERATION_TYPE = 0x60000, // meta-type for io operations - _DISPATCH_DISK_TYPE = 0x70000, // meta-type for io disks - _DISPATCH_META_TYPE_MASK = 0xfff0000, // mask for object meta-types - _DISPATCH_ATTR_TYPE = 0x10000000, // meta-type for attributes + _DISPATCH_DISK_TYPE = 0x70000, // meta-type for io disks - DISPATCH_CONTINUATION_TYPE = _DISPATCH_CONTINUATION_TYPE, + _DISPATCH_QUEUE_ROOT_TYPEFLAG = 0x0100, // bit set for any root queues +#define DISPATCH_CONTINUATION_TYPE(name) \ + (_DISPATCH_CONTINUATION_TYPE | DC_##name##_TYPE) DISPATCH_DATA_TYPE = 1 | _DISPATCH_NODE_TYPE, - DISPATCH_MACH_MSG_TYPE = 2 | _DISPATCH_NODE_TYPE, + DISPATCH_MACH_MSG_TYPE = 2 | _DISPATCH_NODE_TYPE, + DISPATCH_QUEUE_ATTR_TYPE = 3 | _DISPATCH_NODE_TYPE, - DISPATCH_IO_TYPE = _DISPATCH_IO_TYPE, - DISPATCH_OPERATION_TYPE = _DISPATCH_OPERATION_TYPE, - DISPATCH_DISK_TYPE = _DISPATCH_DISK_TYPE, + DISPATCH_IO_TYPE = 0 | _DISPATCH_IO_TYPE, + DISPATCH_OPERATION_TYPE = 0 | _DISPATCH_OPERATION_TYPE, + DISPATCH_DISK_TYPE = 0 | _DISPATCH_DISK_TYPE, - DISPATCH_QUEUE_ATTR_TYPE = _DISPATCH_QUEUE_TYPE |_DISPATCH_ATTR_TYPE, + DISPATCH_QUEUE_LEGACY_TYPE = 1 | _DISPATCH_QUEUE_TYPE, + DISPATCH_QUEUE_SERIAL_TYPE = 2 | _DISPATCH_QUEUE_TYPE, + DISPATCH_QUEUE_CONCURRENT_TYPE = 3 | _DISPATCH_QUEUE_TYPE, + DISPATCH_QUEUE_GLOBAL_ROOT_TYPE = 4 | _DISPATCH_QUEUE_TYPE | + _DISPATCH_QUEUE_ROOT_TYPEFLAG, + DISPATCH_QUEUE_RUNLOOP_TYPE = 5 | _DISPATCH_QUEUE_TYPE | + _DISPATCH_QUEUE_ROOT_TYPEFLAG, + DISPATCH_QUEUE_MGR_TYPE = 6 | _DISPATCH_QUEUE_TYPE, + DISPATCH_QUEUE_SPECIFIC_TYPE = 7 | _DISPATCH_QUEUE_TYPE, - DISPATCH_QUEUE_TYPE = 1 | _DISPATCH_QUEUE_TYPE, - DISPATCH_QUEUE_ROOT_TYPE = 2 | _DISPATCH_QUEUE_TYPE, - DISPATCH_QUEUE_MGR_TYPE = 3 | _DISPATCH_QUEUE_TYPE, - DISPATCH_QUEUE_SPECIFIC_TYPE = 4 | _DISPATCH_QUEUE_TYPE, + DISPATCH_SEMAPHORE_TYPE = 1 | _DISPATCH_SEMAPHORE_TYPE, + DISPATCH_GROUP_TYPE = 2 | _DISPATCH_SEMAPHORE_TYPE, - DISPATCH_SEMAPHORE_TYPE = 1 | _DISPATCH_SEMAPHORE_TYPE, - DISPATCH_GROUP_TYPE = 2 | _DISPATCH_SEMAPHORE_TYPE, + DISPATCH_SOURCE_KEVENT_TYPE = 1 | _DISPATCH_SOURCE_TYPE, + DISPATCH_MACH_CHANNEL_TYPE = 2 | _DISPATCH_SOURCE_TYPE, - DISPATCH_SOURCE_KEVENT_TYPE = 1 | _DISPATCH_SOURCE_TYPE, - DISPATCH_MACH_CHANNEL_TYPE = 2 | _DISPATCH_SOURCE_TYPE, }; -DISPATCH_SUBCLASS_DECL(object, object); +typedef struct _os_object_vtable_s { + _OS_OBJECT_CLASS_HEADER(); +} _os_object_vtable_s; + +typedef struct _os_object_s { + _OS_OBJECT_HEADER( + const _os_object_vtable_s *os_obj_isa, + os_obj_ref_cnt, + os_obj_xref_cnt); +} _os_object_s; + +#if OS_OBJECT_HAVE_OBJC1 +#define OS_OBJECT_STRUCT_HEADER(x) \ + _OS_OBJECT_HEADER(\ + const void *_objc_isa, \ + do_ref_cnt, \ + do_xref_cnt); \ + const struct x##_vtable_s *do_vtable +#else +#define OS_OBJECT_STRUCT_HEADER(x) \ + _OS_OBJECT_HEADER(\ + const struct x##_vtable_s *do_vtable, \ + do_ref_cnt, \ + do_xref_cnt) +#endif + +#define _DISPATCH_OBJECT_HEADER(x) \ + struct _os_object_s _as_os_obj[0]; \ + OS_OBJECT_STRUCT_HEADER(dispatch_##x); \ + struct dispatch_##x##_s *volatile do_next; \ + struct dispatch_queue_s *do_targetq; \ + void *do_ctxt; \ + void *do_finalizer + +#define DISPATCH_OBJECT_HEADER(x) \ + struct dispatch_object_s _as_do[0]; \ + _DISPATCH_OBJECT_HEADER(x) + +// Swift-unavailable -init requires method in each class. +#define DISPATCH_UNAVAILABLE_INIT() \ + - (instancetype)init { \ + DISPATCH_CLIENT_CRASH(0, "-init called directly"); \ + return [super init]; \ + } + +_OS_OBJECT_DECL_PROTOCOL(dispatch_object, object); + +OS_OBJECT_CLASS_DECL(dispatch_object, object, + DISPATCH_OBJECT_VTABLE_HEADER(dispatch_object)); + struct dispatch_object_s { - DISPATCH_STRUCT_HEADER(object); + _DISPATCH_OBJECT_HEADER(object); +}; + +#if OS_OBJECT_HAVE_OBJC1 +#define _OS_MPSC_QUEUE_FIELDS(ns, __state_field__) \ + struct dispatch_object_s *volatile ns##_items_head; \ + unsigned long ns##_serialnum; \ + union { \ + uint64_t volatile __state_field__; \ + DISPATCH_STRUCT_LITTLE_ENDIAN_2( \ + dispatch_lock __state_field__##_lock, \ + uint32_t __state_field__##_bits \ + ); \ + }; /* needs to be 64-bit aligned */ \ + /* LP64 global queue cacheline boundary */ \ + const char *ns##_label; \ + voucher_t ns##_override_voucher; \ + dispatch_priority_t ns##_priority; \ + dispatch_priority_t volatile ns##_override; \ + struct dispatch_object_s *volatile ns##_items_tail +#else +#define _OS_MPSC_QUEUE_FIELDS(ns, __state_field__) \ + struct dispatch_object_s *volatile ns##_items_head; \ + union { \ + uint64_t volatile __state_field__; \ + DISPATCH_STRUCT_LITTLE_ENDIAN_2( \ + dispatch_lock __state_field__##_lock, \ + uint32_t __state_field__##_bits \ + ); \ + }; /* needs to be 64-bit aligned */ \ + /* LP64 global queue cacheline boundary */ \ + unsigned long ns##_serialnum; \ + const char *ns##_label; \ + voucher_t ns##_override_voucher; \ + dispatch_priority_t ns##_priority; \ + dispatch_priority_t volatile ns##_override; \ + struct dispatch_object_s *volatile ns##_items_tail +#endif + +OS_OBJECT_INTERNAL_CLASS_DECL(os_mpsc_queue, object, + DISPATCH_QUEUEABLE_VTABLE_HEADER(os_mpsc_queue)); + +struct os_mpsc_queue_s { + struct _os_object_s _as_os_obj[0]; + OS_OBJECT_STRUCT_HEADER(os_mpsc_queue); + struct dispatch_object_s *volatile oq_next; + void *oq_opaque1; // do_targetq + void *oq_opaque2; // do_ctxt + void *oq_opaque3; // do_finalizer + _OS_MPSC_QUEUE_FIELDS(oq, __oq_state_do_not_use); }; size_t _dispatch_object_debug_attr(dispatch_object_t dou, char* buf, size_t bufsiz); void *_dispatch_alloc(const void *vtable, size_t size); +#if !USE_OBJC void _dispatch_xref_dispose(dispatch_object_t dou); +#endif void _dispatch_dispose(dispatch_object_t dou); #if DISPATCH_COCOA_COMPAT +#if USE_OBJC +#include +#include +#define _dispatch_autorelease_pool_push() \ + objc_autoreleasePoolPush() +#define _dispatch_autorelease_pool_pop(context) \ + objc_autoreleasePoolPop(context) +#else void *_dispatch_autorelease_pool_push(void); void _dispatch_autorelease_pool_pop(void *context); #endif +void *_dispatch_last_resort_autorelease_pool_push(void); +void _dispatch_last_resort_autorelease_pool_pop(void *context); + +#define dispatch_invoke_with_autoreleasepool(flags, ...) ({ \ + void *pool = NULL; \ + if ((flags) & DISPATCH_INVOKE_AUTORELEASE_ALWAYS) { \ + pool = _dispatch_autorelease_pool_push(); \ + DISPATCH_COMPILER_CAN_ASSUME(pool); \ + }; \ + __VA_ARGS__; \ + if (pool) _dispatch_autorelease_pool_pop(pool); \ + }) +#else +#define dispatch_invoke_with_autoreleasepool(flags, ...) \ + do { (void)flags; __VA_ARGS__; } while (0) +#endif -#if USE_OBJC -#include - -#define OS_OBJC_CLASS_SYMBOL(name) \ - DISPATCH_CONCAT(OBJC_CLASS_$_,name) -#define OS_OBJC_CLASS_DECL(name) \ - extern void *OS_OBJC_CLASS_SYMBOL(name) -#define OS_OBJC_CLASS(name) \ - ((Class)&OS_OBJC_CLASS_SYMBOL(name)) -#define OS_OBJECT_OBJC_CLASS_DECL(name) \ - OS_OBJC_CLASS_DECL(OS_OBJECT_CLASS(name)) -#define OS_OBJECT_OBJC_CLASS(name) \ - OS_OBJC_CLASS(OS_OBJECT_CLASS(name)) -#define DISPATCH_OBJC_CLASS_DECL(name) \ - OS_OBJC_CLASS_DECL(DISPATCH_CLASS(name)) -#define DISPATCH_OBJC_CLASS(name) \ - OS_OBJC_CLASS(DISPATCH_CLASS(name)) +#if USE_OBJC OS_OBJECT_OBJC_CLASS_DECL(object); -DISPATCH_OBJC_CLASS_DECL(object); +#endif +#if OS_OBJECT_HAVE_OBJC2 // ObjC toll-free bridging, keep in sync with libdispatch.order file +// +// This is required by the dispatch_data_t/NSData bridging, which is not +// supported on the old runtime. #define DISPATCH_OBJECT_TFB(f, o, ...) \ if (slowpath((uintptr_t)((o)._os_obj->os_obj_isa) & 1) || \ slowpath((Class)((o)._os_obj->os_obj_isa) < \ - DISPATCH_OBJC_CLASS(object)) || \ + (Class)OS_OBJECT_VTABLE(dispatch_object)) || \ slowpath((Class)((o)._os_obj->os_obj_isa) >= \ - OS_OBJECT_OBJC_CLASS(object))) { \ + (Class)OS_OBJECT_VTABLE(object))) { \ return f((o), ##__VA_ARGS__); \ } @@ -238,6 +523,7 @@ void _dispatch_objc_set_target_queue(dispatch_object_t dou, dispatch_queue_t queue); void _dispatch_objc_suspend(dispatch_object_t dou); void _dispatch_objc_resume(dispatch_object_t dou); +void _dispatch_objc_activate(dispatch_object_t dou); size_t _dispatch_objc_debug(dispatch_object_t dou, char* buf, size_t bufsiz); #if __OBJC2__ @@ -248,11 +534,12 @@ size_t _dispatch_objc_debug(dispatch_object_t dou, char* buf, size_t bufsiz); - (void)_setTargetQueue:(dispatch_queue_t)queue; - (void)_suspend; - (void)_resume; +- (void)_activate; @end #endif // __OBJC2__ -#else // USE_OBJC +#else #define DISPATCH_OBJECT_TFB(f, o, ...) -#endif // USE_OBJC +#endif // OS_OBJECT_HAVE_OBJC2 #pragma mark - #pragma mark _os_object_s @@ -274,7 +561,7 @@ size_t _dispatch_objc_debug(dispatch_object_t dou, char* buf, size_t bufsiz); typeof(o) _o = (o); \ int _ref_cnt = _o->f; \ if (fastpath(_ref_cnt != _OS_OBJECT_GLOBAL_REFCNT)) { \ - _ref_cnt = dispatch_atomic_##op##2o(_o, f, m); \ + _ref_cnt = os_atomic_##op##2o(_o, f, m); \ } \ _ref_cnt; \ }) @@ -286,7 +573,7 @@ size_t _dispatch_objc_debug(dispatch_object_t dou, char* buf, size_t bufsiz); _os_atomic_refcnt_perform2o(o, m, dec, release) #define _os_atomic_refcnt_dispose_barrier2o(o, m) \ - (void)dispatch_atomic_load2o(o, m, acquire) + (void)os_atomic_load2o(o, m, acquire) /* @@ -323,17 +610,6 @@ size_t _dispatch_objc_debug(dispatch_object_t dou, char* buf, size_t bufsiz); #define _os_object_refcnt_dispose_barrier(o) \ _os_atomic_refcnt_dispose_barrier2o(o, os_obj_ref_cnt) -typedef struct _os_object_class_s { - _OS_OBJECT_CLASS_HEADER(); -} _os_object_class_s; - -typedef struct _os_object_s { - _OS_OBJECT_HEADER( - const _os_object_class_s *os_obj_isa, - os_obj_ref_cnt, - os_obj_xref_cnt); -} _os_object_s; - void _os_object_init(void); unsigned long _os_object_retain_count(_os_object_t obj); bool _os_object_retain_weak(_os_object_t obj); diff --git a/src/once.c b/src/once.c index 86b1a032c..d7d6a8e64 100644 --- a/src/once.c +++ b/src/once.c @@ -26,7 +26,7 @@ typedef struct _dispatch_once_waiter_s { volatile struct _dispatch_once_waiter_s *volatile dow_next; - _dispatch_thread_semaphore_t dow_sema; + dispatch_thread_event_s dow_event; mach_port_t dow_thread; } *_dispatch_once_waiter_t; @@ -44,13 +44,23 @@ DISPATCH_NOINLINE void dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func) { +#if DISPATCH_GATE_USE_FOR_DISPATCH_ONCE + dispatch_once_gate_t l = (dispatch_once_gate_t)val; + + if (_dispatch_once_gate_tryenter(l)) { + _dispatch_client_callout(ctxt, func); + _dispatch_once_gate_broadcast(l); + } else { + _dispatch_once_gate_wait(l); + } +#else _dispatch_once_waiter_t volatile *vval = (_dispatch_once_waiter_t*)val; - struct _dispatch_once_waiter_s dow = { NULL, 0, MACH_PORT_NULL }; + struct _dispatch_once_waiter_s dow = { }; _dispatch_once_waiter_t tail = &dow, next, tmp; - _dispatch_thread_semaphore_t sema; + dispatch_thread_event_t event; - if (dispatch_atomic_cmpxchg(vval, NULL, tail, acquire)) { - dow.dow_thread = _dispatch_thread_port(); + if (os_atomic_cmpxchg(vval, NULL, tail, acquire)) { + dow.dow_thread = _dispatch_tid_self(); _dispatch_client_callout(ctxt, func); // The next barrier must be long and strong. @@ -103,36 +113,37 @@ dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func) // On some CPUs, the most fully synchronizing instruction might // need to be issued. - dispatch_atomic_maximally_synchronizing_barrier(); + os_atomic_maximally_synchronizing_barrier(); // above assumed to contain release barrier - next = dispatch_atomic_xchg(vval, DISPATCH_ONCE_DONE, relaxed); + next = os_atomic_xchg(vval, DISPATCH_ONCE_DONE, relaxed); while (next != tail) { _dispatch_wait_until(tmp = (_dispatch_once_waiter_t)next->dow_next); - sema = next->dow_sema; + event = &next->dow_event; next = tmp; - _dispatch_thread_semaphore_signal(sema); + _dispatch_thread_event_signal(event); } } else { - dow.dow_sema = _dispatch_get_thread_semaphore(); + _dispatch_thread_event_init(&dow.dow_event); next = *vval; for (;;) { if (next == DISPATCH_ONCE_DONE) { break; } - if (dispatch_atomic_cmpxchgvw(vval, next, tail, &next, release)) { + if (os_atomic_cmpxchgvw(vval, next, tail, &next, release)) { dow.dow_thread = next->dow_thread; dow.dow_next = next; if (dow.dow_thread) { pthread_priority_t pp = _dispatch_get_priority(); - _dispatch_thread_override_start(dow.dow_thread, pp); + _dispatch_thread_override_start(dow.dow_thread, pp, val); } - _dispatch_thread_semaphore_wait(dow.dow_sema); + _dispatch_thread_event_wait(&dow.dow_event); if (dow.dow_thread) { - _dispatch_thread_override_end(dow.dow_thread); + _dispatch_thread_override_end(dow.dow_thread, val); } break; } } - _dispatch_put_thread_semaphore(dow.dow_sema); + _dispatch_thread_event_destroy(&dow.dow_event); } +#endif } diff --git a/src/queue.c b/src/queue.c index 5868e8799..58c545b17 100644 --- a/src/queue.c +++ b/src/queue.c @@ -48,19 +48,17 @@ #define pthread_workqueue_t void* #endif +static void _dispatch_sig_thread(void *ctxt); static void _dispatch_cache_cleanup(void *value); -static void _dispatch_async_f_redirect(dispatch_queue_t dq, - dispatch_continuation_t dc, pthread_priority_t pp); +static void _dispatch_sync_f(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func, pthread_priority_t pp); +static void _dispatch_async_f2(dispatch_queue_t dq, dispatch_continuation_t dc); static void _dispatch_queue_cleanup(void *ctxt); -static inline void _dispatch_queue_wakeup_global2(dispatch_queue_t dq, - unsigned int n); -static inline void _dispatch_queue_wakeup_global(dispatch_queue_t dq); -static inline _dispatch_thread_semaphore_t - _dispatch_queue_drain_one_barrier_sync(dispatch_queue_t dq); -static inline bool _dispatch_queue_prepare_override(dispatch_queue_t dq, - dispatch_queue_t tq, pthread_priority_t p); -static inline void _dispatch_queue_push_override(dispatch_queue_t dq, - dispatch_queue_t tq, pthread_priority_t p, bool owning); +static void _dispatch_deferred_items_cleanup(void *ctxt); +static void _dispatch_frame_cleanup(void *ctxt); +static void _dispatch_context_cleanup(void *ctxt); +static void _dispatch_non_barrier_complete(dispatch_queue_t dq); +static inline void _dispatch_global_queue_poke(dispatch_queue_t dq); #if HAVE_PTHREAD_WORKQUEUES static void _dispatch_worker_thread4(void *context); #if HAVE_PTHREAD_WORKQUEUE_QOS @@ -76,14 +74,14 @@ static int _dispatch_pthread_sigmask(int how, sigset_t *set, sigset_t *oset); #endif #if DISPATCH_COCOA_COMPAT -static dispatch_once_t _dispatch_main_q_port_pred; -static dispatch_queue_t _dispatch_main_queue_wakeup(void); -unsigned long _dispatch_runloop_queue_wakeup(dispatch_queue_t dq); -static void _dispatch_runloop_queue_port_init(void *ctxt); -static void _dispatch_runloop_queue_port_dispose(dispatch_queue_t dq); +static dispatch_once_t _dispatch_main_q_handle_pred; +static void _dispatch_runloop_queue_poke(dispatch_queue_t dq, + pthread_priority_t pp, dispatch_wakeup_flags_t flags); +static void _dispatch_runloop_queue_handle_init(void *ctxt); +static void _dispatch_runloop_queue_handle_dispose(dispatch_queue_t dq); #endif -static void _dispatch_root_queues_init(void *context); +static void _dispatch_root_queues_init_once(void *context); static dispatch_once_t _dispatch_root_queues_pred; #pragma mark - @@ -103,75 +101,51 @@ static struct dispatch_pthread_root_queue_context_s _dispatch_pthread_root_queue_contexts[] = { [DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS] = { .dpq_thread_mediator = { - .do_vtable = DISPATCH_VTABLE(semaphore), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + DISPATCH_GLOBAL_OBJECT_HEADER(semaphore), }}, [DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT] = { .dpq_thread_mediator = { - .do_vtable = DISPATCH_VTABLE(semaphore), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + DISPATCH_GLOBAL_OBJECT_HEADER(semaphore), }}, [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS] = { .dpq_thread_mediator = { - .do_vtable = DISPATCH_VTABLE(semaphore), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + DISPATCH_GLOBAL_OBJECT_HEADER(semaphore), }}, [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT] = { .dpq_thread_mediator = { - .do_vtable = DISPATCH_VTABLE(semaphore), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + DISPATCH_GLOBAL_OBJECT_HEADER(semaphore), }}, [DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS] = { .dpq_thread_mediator = { - .do_vtable = DISPATCH_VTABLE(semaphore), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + DISPATCH_GLOBAL_OBJECT_HEADER(semaphore), }}, [DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT] = { .dpq_thread_mediator = { - .do_vtable = DISPATCH_VTABLE(semaphore), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + DISPATCH_GLOBAL_OBJECT_HEADER(semaphore), }}, [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS] = { .dpq_thread_mediator = { - .do_vtable = DISPATCH_VTABLE(semaphore), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + DISPATCH_GLOBAL_OBJECT_HEADER(semaphore), }}, [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT] = { .dpq_thread_mediator = { - .do_vtable = DISPATCH_VTABLE(semaphore), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + DISPATCH_GLOBAL_OBJECT_HEADER(semaphore), }}, [DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS] = { .dpq_thread_mediator = { - .do_vtable = DISPATCH_VTABLE(semaphore), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + DISPATCH_GLOBAL_OBJECT_HEADER(semaphore), }}, [DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT] = { .dpq_thread_mediator = { - .do_vtable = DISPATCH_VTABLE(semaphore), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + DISPATCH_GLOBAL_OBJECT_HEADER(semaphore), }}, [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS] = { .dpq_thread_mediator = { - .do_vtable = DISPATCH_VTABLE(semaphore), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + DISPATCH_GLOBAL_OBJECT_HEADER(semaphore), }}, [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT] = { .dpq_thread_mediator = { - .do_vtable = DISPATCH_VTABLE(semaphore), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, + DISPATCH_GLOBAL_OBJECT_HEADER(semaphore), }}, }; #endif @@ -199,6 +173,14 @@ struct dispatch_root_queue_context_s { }; typedef struct dispatch_root_queue_context_s *dispatch_root_queue_context_t; +#define WORKQ_PRIO_INVALID (-1) +#ifndef WORKQ_BG_PRIOQUEUE_CONDITIONAL +#define WORKQ_BG_PRIOQUEUE_CONDITIONAL WORKQ_PRIO_INVALID +#endif +#ifndef WORKQ_HIGH_PRIOQUEUE_CONDITIONAL +#define WORKQ_HIGH_PRIOQUEUE_CONDITIONAL WORKQ_PRIO_INVALID +#endif + DISPATCH_CACHELINE_ALIGN static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { [DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS] = {{{ @@ -226,7 +208,7 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS] = {{{ #if HAVE_PTHREAD_WORKQUEUES .dgq_qos = _DISPATCH_QOS_CLASS_BACKGROUND, - .dgq_wq_priority = WORKQ_BG_PRIOQUEUE, + .dgq_wq_priority = WORKQ_BG_PRIOQUEUE_CONDITIONAL, .dgq_wq_options = 0, #endif #if DISPATCH_ENABLE_THREAD_POOL @@ -237,7 +219,7 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT] = {{{ #if HAVE_PTHREAD_WORKQUEUES .dgq_qos = _DISPATCH_QOS_CLASS_BACKGROUND, - .dgq_wq_priority = WORKQ_BG_PRIOQUEUE, + .dgq_wq_priority = WORKQ_BG_PRIOQUEUE_CONDITIONAL, .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, #endif #if DISPATCH_ENABLE_THREAD_POOL @@ -314,7 +296,7 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS] = {{{ #if HAVE_PTHREAD_WORKQUEUES .dgq_qos = _DISPATCH_QOS_CLASS_USER_INTERACTIVE, - .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE, + .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE_CONDITIONAL, .dgq_wq_options = 0, #endif #if DISPATCH_ENABLE_THREAD_POOL @@ -325,7 +307,7 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT] = {{{ #if HAVE_PTHREAD_WORKQUEUES .dgq_qos = _DISPATCH_QOS_CLASS_USER_INTERACTIVE, - .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE, + .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE_CONDITIONAL, .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, #endif #if DISPATCH_ENABLE_THREAD_POOL @@ -337,165 +319,67 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { // 6618342 Contact the team that owns the Instrument DTrace probe before // renaming this symbol -// dq_running is set to 2 so that barrier operations go through the slow path DISPATCH_CACHELINE_ALIGN struct dispatch_queue_s _dispatch_root_queues[] = { - [DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS] = { - .do_vtable = DISPATCH_VTABLE(queue_root), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, - .do_ctxt = &_dispatch_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS], +#define _DISPATCH_ROOT_QUEUE_ENTRY(n, ...) \ + [DISPATCH_ROOT_QUEUE_IDX_##n] = { \ + DISPATCH_GLOBAL_OBJECT_HEADER(queue_root), \ + .dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, \ + .do_ctxt = &_dispatch_root_queue_contexts[ \ + DISPATCH_ROOT_QUEUE_IDX_##n], \ + .dq_width = DISPATCH_QUEUE_WIDTH_POOL, \ + .dq_override_voucher = DISPATCH_NO_VOUCHER, \ + .dq_override = DISPATCH_SATURATED_OVERRIDE, \ + __VA_ARGS__ \ + } + _DISPATCH_ROOT_QUEUE_ENTRY(MAINTENANCE_QOS, .dq_label = "com.apple.root.maintenance-qos", - .dq_running = 2, - .dq_width = DISPATCH_QUEUE_WIDTH_MAX, - .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 4, - }, - [DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT] = { - .do_vtable = DISPATCH_VTABLE(queue_root), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, - .do_ctxt = &_dispatch_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT], + ), + _DISPATCH_ROOT_QUEUE_ENTRY(MAINTENANCE_QOS_OVERCOMMIT, .dq_label = "com.apple.root.maintenance-qos.overcommit", - .dq_running = 2, - .dq_width = DISPATCH_QUEUE_WIDTH_MAX, - .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 5, - }, - [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS] = { - .do_vtable = DISPATCH_VTABLE(queue_root), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, - .do_ctxt = &_dispatch_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS], + ), + _DISPATCH_ROOT_QUEUE_ENTRY(BACKGROUND_QOS, .dq_label = "com.apple.root.background-qos", - .dq_running = 2, - .dq_width = DISPATCH_QUEUE_WIDTH_MAX, - .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 6, - }, - [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT] = { - .do_vtable = DISPATCH_VTABLE(queue_root), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, - .do_ctxt = &_dispatch_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT], + ), + _DISPATCH_ROOT_QUEUE_ENTRY(BACKGROUND_QOS_OVERCOMMIT, .dq_label = "com.apple.root.background-qos.overcommit", - .dq_running = 2, - .dq_width = DISPATCH_QUEUE_WIDTH_MAX, - .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 7, - }, - [DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS] = { - .do_vtable = DISPATCH_VTABLE(queue_root), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, - .do_ctxt = &_dispatch_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS], + ), + _DISPATCH_ROOT_QUEUE_ENTRY(UTILITY_QOS, .dq_label = "com.apple.root.utility-qos", - .dq_running = 2, - .dq_width = DISPATCH_QUEUE_WIDTH_MAX, - .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 8, - }, - [DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT] = { - .do_vtable = DISPATCH_VTABLE(queue_root), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, - .do_ctxt = &_dispatch_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT], + ), + _DISPATCH_ROOT_QUEUE_ENTRY(UTILITY_QOS_OVERCOMMIT, .dq_label = "com.apple.root.utility-qos.overcommit", - .dq_running = 2, - .dq_width = DISPATCH_QUEUE_WIDTH_MAX, - .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 9, - }, - [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS] = { - .do_vtable = DISPATCH_VTABLE(queue_root), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, - .do_ctxt = &_dispatch_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS], + ), + _DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT_QOS, .dq_label = "com.apple.root.default-qos", - .dq_running = 2, - .dq_width = DISPATCH_QUEUE_WIDTH_MAX, - .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 10, - }, - [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT] = { - .do_vtable = DISPATCH_VTABLE(queue_root), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, - .do_ctxt = &_dispatch_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT], + ), + _DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT_QOS_OVERCOMMIT, .dq_label = "com.apple.root.default-qos.overcommit", - .dq_running = 2, - .dq_width = DISPATCH_QUEUE_WIDTH_MAX, - .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 11, - }, - [DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS] = { - .do_vtable = DISPATCH_VTABLE(queue_root), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, - .do_ctxt = &_dispatch_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS], + ), + _DISPATCH_ROOT_QUEUE_ENTRY(USER_INITIATED_QOS, .dq_label = "com.apple.root.user-initiated-qos", - .dq_running = 2, - .dq_width = DISPATCH_QUEUE_WIDTH_MAX, - .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 12, - }, - [DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT] = { - .do_vtable = DISPATCH_VTABLE(queue_root), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, - .do_ctxt = &_dispatch_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT], + ), + _DISPATCH_ROOT_QUEUE_ENTRY(USER_INITIATED_QOS_OVERCOMMIT, .dq_label = "com.apple.root.user-initiated-qos.overcommit", - .dq_running = 2, - .dq_width = DISPATCH_QUEUE_WIDTH_MAX, - .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 13, - }, - [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS] = { - .do_vtable = DISPATCH_VTABLE(queue_root), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, - .do_ctxt = &_dispatch_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS], + ), + _DISPATCH_ROOT_QUEUE_ENTRY(USER_INTERACTIVE_QOS, .dq_label = "com.apple.root.user-interactive-qos", - .dq_running = 2, - .dq_width = DISPATCH_QUEUE_WIDTH_MAX, - .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 14, - }, - [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT] = { - .do_vtable = DISPATCH_VTABLE(queue_root), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, - .do_ctxt = &_dispatch_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT], + ), + _DISPATCH_ROOT_QUEUE_ENTRY(USER_INTERACTIVE_QOS_OVERCOMMIT, .dq_label = "com.apple.root.user-interactive-qos.overcommit", - .dq_running = 2, - .dq_width = DISPATCH_QUEUE_WIDTH_MAX, - .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 15, - }, + ), }; #if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP @@ -553,26 +437,24 @@ static const int _dispatch_priority2wq[] = { }; #endif -#if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES +#if DISPATCH_USE_MGR_THREAD && DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES static struct dispatch_queue_s _dispatch_mgr_root_queue; #else -#define _dispatch_mgr_root_queue \ - _dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_HIGH_OVERCOMMIT_PRIORITY] +#define _dispatch_mgr_root_queue _dispatch_root_queues[\ + DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT] #endif // 6618342 Contact the team that owns the Instrument DTrace probe before // renaming this symbol DISPATCH_CACHELINE_ALIGN struct dispatch_queue_s _dispatch_mgr_q = { - .do_vtable = DISPATCH_VTABLE(queue_mgr), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, + DISPATCH_GLOBAL_OBJECT_HEADER(queue_mgr), + .dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(1), .do_targetq = &_dispatch_mgr_root_queue, .dq_label = "com.apple.libdispatch-manager", .dq_width = 1, - .dq_is_thread_bound = 1, .dq_override_voucher = DISPATCH_NO_VOUCHER, + .dq_override = DISPATCH_SATURATED_OVERRIDE, .dq_serialnum = 2, }; @@ -580,10 +462,10 @@ dispatch_queue_t dispatch_get_global_queue(long priority, unsigned long flags) { if (flags & ~(unsigned long)DISPATCH_QUEUE_OVERCOMMIT) { - return NULL; + return DISPATCH_BAD_INPUT; } dispatch_once_f(&_dispatch_root_queues_pred, NULL, - _dispatch_root_queues_init); + _dispatch_root_queues_init_once); qos_class_t qos; switch (priority) { #if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK @@ -642,61 +524,94 @@ dispatch_get_current_queue(void) return _dispatch_get_current_queue(); } -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_queue_targets_queue(dispatch_queue_t dq1, dispatch_queue_t dq2) +DISPATCH_NOINLINE DISPATCH_NORETURN +static void +_dispatch_assert_queue_fail(dispatch_queue_t dq, bool expected) { - while (dq1) { - if (dq1 == dq2) { - return true; - } - dq1 = dq1->do_targetq; - } - return false; + _dispatch_client_assert_fail( + "Block was %sexpected to execute on queue [%s]", + expected ? "" : "not ", dq->dq_label ?: ""); } -#define DISPATCH_ASSERT_QUEUE_MESSAGE "BUG in client of libdispatch: " \ - "Assertion failed: Block was run on an unexpected queue" - -DISPATCH_NOINLINE +DISPATCH_NOINLINE DISPATCH_NORETURN static void -_dispatch_assert_queue_fail(dispatch_queue_t dq, bool expected) +_dispatch_assert_queue_barrier_fail(dispatch_queue_t dq) { - char *msg; - asprintf(&msg, "%s\n%s queue: 0x%p[%s]", DISPATCH_ASSERT_QUEUE_MESSAGE, - expected ? "Expected" : "Unexpected", dq, dq->dq_label ? - dq->dq_label : ""); - _dispatch_log("%s", msg); - _dispatch_set_crash_log_message_dynamic(msg); - _dispatch_hardware_crash(); - free(msg); + _dispatch_client_assert_fail( + "Block was expected to act as a barrier on queue [%s]", + dq->dq_label ?: ""); } void dispatch_assert_queue(dispatch_queue_t dq) { - if (slowpath(!(dx_metatype(dq) == _DISPATCH_QUEUE_TYPE))) { - DISPATCH_CLIENT_CRASH("invalid queue passed to " + unsigned long metatype = dx_metatype(dq); + if (unlikely(metatype != _DISPATCH_QUEUE_TYPE)) { + DISPATCH_CLIENT_CRASH(metatype, "invalid queue passed to " "dispatch_assert_queue()"); } - dispatch_queue_t cq = _dispatch_queue_get_current(); - if (fastpath(cq) && fastpath(_dispatch_queue_targets_queue(cq, dq))) { + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + if (unlikely(_dq_state_drain_pended(dq_state))) { + goto fail; + } + if (likely(_dq_state_drain_owner(dq_state) == _dispatch_tid_self())) { return; } + if (likely(dq->dq_width > 1)) { + // we can look at the width: if it is changing while we read it, + // it means that a barrier is running on `dq` concurrently, which + // proves that we're not on `dq`. Hence reading a stale '1' is ok. + if (fastpath(_dispatch_thread_frame_find_queue(dq))) { + return; + } + } +fail: _dispatch_assert_queue_fail(dq, true); } void dispatch_assert_queue_not(dispatch_queue_t dq) { - if (slowpath(!(dx_metatype(dq) == _DISPATCH_QUEUE_TYPE))) { - DISPATCH_CLIENT_CRASH("invalid queue passed to " + unsigned long metatype = dx_metatype(dq); + if (unlikely(metatype != _DISPATCH_QUEUE_TYPE)) { + DISPATCH_CLIENT_CRASH(metatype, "invalid queue passed to " "dispatch_assert_queue_not()"); } - dispatch_queue_t cq = _dispatch_queue_get_current(); - if (slowpath(cq) && slowpath(_dispatch_queue_targets_queue(cq, dq))) { - _dispatch_assert_queue_fail(dq, false); + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + if (_dq_state_drain_pended(dq_state)) { + return; + } + if (likely(_dq_state_drain_owner(dq_state) != _dispatch_tid_self())) { + if (likely(dq->dq_width == 1)) { + // we can look at the width: if it is changing while we read it, + // it means that a barrier is running on `dq` concurrently, which + // proves that we're not on `dq`. Hence reading a stale '1' is ok. + return; + } + if (likely(!_dispatch_thread_frame_find_queue(dq))) { + return; + } + } + _dispatch_assert_queue_fail(dq, false); +} + +void +dispatch_assert_queue_barrier(dispatch_queue_t dq) +{ + dispatch_assert_queue(dq); + + if (likely(dq->dq_width == 1)) { + return; + } + + if (likely(dq->do_targetq)) { + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + if (likely(_dq_state_is_in_barrier(dq_state))) { + return; + } } + + _dispatch_assert_queue_barrier_fail(dq); } #if DISPATCH_DEBUG && DISPATCH_ROOT_QUEUE_DEBUG @@ -711,7 +626,6 @@ dispatch_assert_queue_not(dispatch_queue_t dq) #pragma mark dispatch_init #if HAVE_PTHREAD_WORKQUEUE_QOS -int _dispatch_set_qos_class_enabled; pthread_priority_t _dispatch_background_priority; pthread_priority_t _dispatch_user_initiated_priority; @@ -740,55 +654,64 @@ _dispatch_root_queues_init_qos(int supported) flags |= _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG; } p = _pthread_qos_class_encode(qos, 0, flags); - _dispatch_root_queues[i].dq_priority = p; - } - p = _pthread_qos_class_encode(qos_class_main(), 0, 0); - _dispatch_main_q.dq_priority = p; - _dispatch_queue_set_override_priority(&_dispatch_main_q); - _dispatch_background_priority = _dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS].dq_priority & - ~_PTHREAD_PRIORITY_FLAGS_MASK; - _dispatch_user_initiated_priority = _dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS].dq_priority & - ~_PTHREAD_PRIORITY_FLAGS_MASK; - if (!slowpath(getenv("LIBDISPATCH_DISABLE_SET_QOS"))) { - _dispatch_set_qos_class_enabled = 1; + _dispatch_root_queues[i].dq_priority = (dispatch_priority_t)p; } } -#endif +#endif // HAVE_PTHREAD_WORKQUEUE_QOS static inline bool -_dispatch_root_queues_init_workq(void) +_dispatch_root_queues_init_workq(int *wq_supported) { + int r; bool result = false; + *wq_supported = 0; #if HAVE_PTHREAD_WORKQUEUES bool disable_wq = false; #if DISPATCH_ENABLE_THREAD_POOL && DISPATCH_DEBUG disable_wq = slowpath(getenv("LIBDISPATCH_DISABLE_KWQ")); #endif - int r; -#if HAVE_PTHREAD_WORKQUEUE_QOS +#if DISPATCH_USE_KEVENT_WORKQUEUE || HAVE_PTHREAD_WORKQUEUE_QOS bool disable_qos = false; #if DISPATCH_DEBUG disable_qos = slowpath(getenv("LIBDISPATCH_DISABLE_QOS")); #endif - if (!disable_qos && !disable_wq) { - r = _pthread_workqueue_supported(); - int supported = r; - if (r & WORKQ_FEATURE_FINEPRIO) { +#if DISPATCH_USE_KEVENT_WORKQUEUE + bool disable_kevent_wq = false; +#if DISPATCH_DEBUG + disable_kevent_wq = slowpath(getenv("LIBDISPATCH_DISABLE_KEVENT_WQ")); +#endif +#endif + if (!disable_wq && !disable_qos) { + *wq_supported = _pthread_workqueue_supported(); +#if DISPATCH_USE_KEVENT_WORKQUEUE + if (!disable_kevent_wq && (*wq_supported & WORKQ_FEATURE_KEVENT)) { + r = _pthread_workqueue_init_with_kevent(_dispatch_worker_thread3, + (pthread_workqueue_function_kevent_t) + _dispatch_kevent_worker_thread, + offsetof(struct dispatch_queue_s, dq_serialnum), 0); +#if DISPATCH_USE_MGR_THREAD + _dispatch_kevent_workqueue_enabled = !r; +#endif +#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK + _dispatch_evfilt_machport_direct_enabled = !r; +#endif + result = !r; + } else +#endif + if (*wq_supported & WORKQ_FEATURE_FINEPRIO) { +#if DISPATCH_USE_MGR_THREAD r = _pthread_workqueue_init(_dispatch_worker_thread3, offsetof(struct dispatch_queue_s, dq_serialnum), 0); result = !r; - if (result) _dispatch_root_queues_init_qos(supported); +#endif } + if (result) _dispatch_root_queues_init_qos(*wq_supported); } -#endif // HAVE_PTHREAD_WORKQUEUE_QOS +#endif // DISPATCH_USE_KEVENT_WORKQUEUE || HAVE_PTHREAD_WORKQUEUE_QOS #if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP if (!result && !disable_wq) { -#if PTHREAD_WORKQUEUE_SPI_VERSION >= 20121218 pthread_workqueue_setdispatchoffset_np( offsetof(struct dispatch_queue_s, dq_serialnum)); -#endif r = pthread_workqueue_setdispatch_np(_dispatch_worker_thread2); #if !DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK (void)dispatch_assume_zero(r); @@ -811,7 +734,7 @@ _dispatch_root_queues_init_workq(void) dispatch_root_queue_context_t qc; qc = &_dispatch_root_queue_contexts[i]; #if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK - if (!disable_wq) { + if (!disable_wq && qc->dgq_wq_priority != WORKQ_PRIO_INVALID) { r = pthread_workqueue_attr_setqueuepriority_np(&pwq_attr, qc->dgq_wq_priority); (void)dispatch_assume_zero(r); @@ -850,6 +773,7 @@ _dispatch_root_queue_init_pthread_pool(dispatch_root_queue_context_t qc, thread_pool_size = pool_size; } qc->dgq_thread_pool_size = thread_pool_size; +#if HAVE_PTHREAD_WORKQUEUES if (qc->dgq_qos) { (void)dispatch_assume_zero(pthread_attr_init(&pqc->dpq_thread_attr)); (void)dispatch_assume_zero(pthread_attr_setdetachstate( @@ -859,6 +783,7 @@ _dispatch_root_queue_init_pthread_pool(dispatch_root_queue_context_t qc, &pqc->dpq_thread_attr, qc->dgq_qos, 0)); #endif } +#endif // HAVE_PTHREAD_WORKQUEUES #if USE_MACH_SEM // override the default FIFO behavior for the pool semaphores kern_return_t kr = semaphore_create(mach_task_self(), @@ -868,7 +793,7 @@ _dispatch_root_queue_init_pthread_pool(dispatch_root_queue_context_t qc, (void)dispatch_assume(pqc->dpq_thread_mediator.dsema_port); #elif USE_POSIX_SEM /* XXXRW: POSIX semaphores don't support LIFO? */ - int ret = sem_init(&pqc->dpq_thread_mediator.dsema_sem), 0, 0); + int ret = sem_init(&(pqc->dpq_thread_mediator.dsema_sem), 0, 0); (void)dispatch_assume_zero(ret); #endif } @@ -876,11 +801,19 @@ _dispatch_root_queue_init_pthread_pool(dispatch_root_queue_context_t qc, static dispatch_once_t _dispatch_root_queues_pred; +void +_dispatch_root_queues_init(void) +{ + dispatch_once_f(&_dispatch_root_queues_pred, NULL, + _dispatch_root_queues_init_once); +} + static void -_dispatch_root_queues_init(void *context DISPATCH_UNUSED) +_dispatch_root_queues_init_once(void *context DISPATCH_UNUSED) { - _dispatch_safe_fork = false; - if (!_dispatch_root_queues_init_workq()) { + int wq_supported; + _dispatch_fork_becomes_unsafe(); + if (!_dispatch_root_queues_init_workq(&wq_supported)) { #if DISPATCH_ENABLE_THREAD_POOL int i; for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { @@ -897,13 +830,12 @@ _dispatch_root_queues_init(void *context DISPATCH_UNUSED) &_dispatch_root_queue_contexts[i], 0, overcommit); } #else - DISPATCH_CRASH("Root queue initialization failed"); + DISPATCH_INTERNAL_CRASH((errno << 16) | wq_supported, + "Root queue initialization failed"); #endif // DISPATCH_ENABLE_THREAD_POOL } } -#define countof(x) (sizeof(x) / sizeof(x[0])) - DISPATCH_EXPORT DISPATCH_NOTHROW void libdispatch_init(void) @@ -935,27 +867,56 @@ libdispatch_init(void) dispatch_assert(offsetof(struct dispatch_continuation_s, do_next) == offsetof(struct dispatch_object_s, do_next)); + dispatch_assert(offsetof(struct dispatch_continuation_s, do_vtable) == + offsetof(struct dispatch_object_s, do_vtable)); dispatch_assert(sizeof(struct dispatch_apply_s) <= DISPATCH_CONTINUATION_SIZE); dispatch_assert(sizeof(struct dispatch_queue_s) % DISPATCH_CACHELINE_SIZE == 0); + dispatch_assert(offsetof(struct dispatch_queue_s, dq_state) % _Alignof(uint64_t) == 0); dispatch_assert(sizeof(struct dispatch_root_queue_context_s) % DISPATCH_CACHELINE_SIZE == 0); + +#if HAVE_PTHREAD_WORKQUEUE_QOS + // 26497968 _dispatch_user_initiated_priority should be set for qos + // propagation to work properly + pthread_priority_t p = _pthread_qos_class_encode(qos_class_main(), 0, 0); + _dispatch_main_q.dq_priority = (dispatch_priority_t)p; + _dispatch_main_q.dq_override = p & _PTHREAD_PRIORITY_QOS_CLASS_MASK; + p = _pthread_qos_class_encode(_DISPATCH_QOS_CLASS_USER_INITIATED, 0, 0); + _dispatch_user_initiated_priority = p; + p = _pthread_qos_class_encode(_DISPATCH_QOS_CLASS_BACKGROUND, 0, 0); + _dispatch_background_priority = p; +#if DISPATCH_DEBUG + if (!slowpath(getenv("LIBDISPATCH_DISABLE_SET_QOS"))) { + _dispatch_set_qos_class_enabled = 1; + } +#endif +#endif + +#if DISPATCH_USE_THREAD_LOCAL_STORAGE + _dispatch_thread_key_create(&__dispatch_tsd_key, _libdispatch_tsd_cleanup); +#else _dispatch_thread_key_create(&dispatch_queue_key, _dispatch_queue_cleanup); + _dispatch_thread_key_create(&dispatch_deferred_items_key, + _dispatch_deferred_items_cleanup); + _dispatch_thread_key_create(&dispatch_frame_key, _dispatch_frame_cleanup); _dispatch_thread_key_create(&dispatch_voucher_key, _voucher_thread_cleanup); _dispatch_thread_key_create(&dispatch_cache_key, _dispatch_cache_cleanup); - _dispatch_thread_key_create(&dispatch_io_key, NULL); - _dispatch_thread_key_create(&dispatch_apply_key, NULL); + _dispatch_thread_key_create(&dispatch_context_key, _dispatch_context_cleanup); _dispatch_thread_key_create(&dispatch_defaultpriority_key, NULL); _dispatch_thread_key_create(&dispatch_pthread_root_queue_observer_hooks_key, NULL); #if DISPATCH_PERF_MON && !DISPATCH_INTROSPECTION _dispatch_thread_key_create(&dispatch_bcounter_key, NULL); #endif -#if !DISPATCH_USE_OS_SEMAPHORE_CACHE - _dispatch_thread_key_create(&dispatch_sema4_key, - (void (*)(void *))_dispatch_thread_semaphore_dispose); +#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK + if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { + _dispatch_thread_key_create(&dispatch_sema4_key, + _dispatch_thread_semaphore_dispose); + } +#endif #endif #if DISPATCH_USE_RESOLVERS // rdar://problem/8541707 @@ -963,14 +924,13 @@ libdispatch_init(void) DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT]; #endif - _dispatch_thread_setspecific(dispatch_queue_key, &_dispatch_main_q); + _dispatch_queue_set_current(&_dispatch_main_q); _dispatch_queue_set_bound_thread(&_dispatch_main_q); #if DISPATCH_USE_PTHREAD_ATFORK (void)dispatch_assume_zero(pthread_atfork(dispatch_atfork_prepare, dispatch_atfork_parent, dispatch_atfork_child)); #endif - _dispatch_hw_config_init(); _dispatch_vtable_init(); _os_object_init(); @@ -989,17 +949,16 @@ _dispatch_mach_host_port_init(void *ctxt DISPATCH_UNUSED) mach_port_t mp, mhp = mach_host_self(); kr = host_get_host_port(mhp, &mp); DISPATCH_VERIFY_MIG(kr); - if (!kr) { + if (fastpath(!kr)) { // mach_host_self returned the HOST_PRIV port kr = mach_port_deallocate(mach_task_self(), mhp); DISPATCH_VERIFY_MIG(kr); - (void)dispatch_assume_zero(kr); mhp = mp; } else if (kr != KERN_INVALID_ARGUMENT) { (void)dispatch_assume_zero(kr); } - if (!dispatch_assume(mhp)) { - DISPATCH_CRASH("Could not get unprivileged host port"); + if (!fastpath(mhp)) { + DISPATCH_CLIENT_CRASH(kr, "Could not get unprivileged host port"); } _dispatch_mach_host_port = mhp; } @@ -1013,6 +972,59 @@ _dispatch_get_mach_host_port(void) } #endif +#if DISPATCH_USE_THREAD_LOCAL_STORAGE +#include +#include + +#ifdef SYS_gettid +DISPATCH_ALWAYS_INLINE +static inline pid_t +gettid(void) +{ + return (pid_t) syscall(SYS_gettid); +} +#else +#error "SYS_gettid unavailable on this system" +#endif + +#define _tsd_call_cleanup(k, f) do { \ + if ((f) && tsd->k) ((void(*)(void*))(f))(tsd->k); \ + } while (0) + +void +_libdispatch_tsd_cleanup(void *ctx) +{ + struct dispatch_tsd *tsd = (struct dispatch_tsd*) ctx; + + _tsd_call_cleanup(dispatch_queue_key, _dispatch_queue_cleanup); + _tsd_call_cleanup(dispatch_frame_key, _dispatch_frame_cleanup); + _tsd_call_cleanup(dispatch_cache_key, _dispatch_cache_cleanup); + _tsd_call_cleanup(dispatch_context_key, _dispatch_context_cleanup); + _tsd_call_cleanup(dispatch_pthread_root_queue_observer_hooks_key, + NULL); + _tsd_call_cleanup(dispatch_defaultpriority_key, NULL); +#if DISPATCH_PERF_MON && !DISPATCH_INTROSPECTION + _tsd_call_cleanup(dispatch_bcounter_key, NULL); +#endif +#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK + _tsd_call_cleanup(dispatch_sema4_key, _dispatch_thread_semaphore_dispose); +#endif + _tsd_call_cleanup(dispatch_priority_key, NULL); + _tsd_call_cleanup(dispatch_voucher_key, _voucher_thread_cleanup); + _tsd_call_cleanup(dispatch_deferred_items_key, + _dispatch_deferred_items_cleanup); + tsd->tid = 0; +} + +DISPATCH_NOINLINE +void +libdispatch_tsd_init(void) +{ + pthread_setspecific(__dispatch_tsd_key, &__dispatch_tsd); + __dispatch_tsd.tid = gettid(); +} +#endif + DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void) @@ -1025,9 +1037,12 @@ dispatch_atfork_child(void) _dispatch_mach_host_port = MACH_VOUCHER_NULL; #endif _voucher_atfork_child(); - if (_dispatch_safe_fork) { + if (!_dispatch_is_multithreaded_inline()) { + // clear the _PROHIBIT bit if set + _dispatch_unsafe_fork = 0; return; } + _dispatch_unsafe_fork = 0; _dispatch_child_of_unsafe_fork = true; _dispatch_main_q.dq_items_head = crash; @@ -1091,34 +1106,67 @@ _dispatch_queue_attr_index_qos_class_t _dispatch_queue_attr_qos2idx[] = { #define DISPATCH_QUEUE_ATTR_CONCURRENT2IDX(concurrent) \ ((concurrent) ? DQA_INDEX_CONCURRENT : DQA_INDEX_SERIAL) +#define DISPATCH_QUEUE_ATTR_INACTIVE2IDX(inactive) \ + ((inactive) ? DQA_INDEX_INACTIVE : DQA_INDEX_ACTIVE) + +#define DISPATCH_QUEUE_ATTR_AUTORELEASE_FREQUENCY2IDX(frequency) \ + (frequency) + #define DISPATCH_QUEUE_ATTR_PRIO2IDX(prio) (-(prio)) #define DISPATCH_QUEUE_ATTR_QOS2IDX(qos) (_dispatch_queue_attr_qos2idx[(qos)]) static inline dispatch_queue_attr_t _dispatch_get_queue_attr(qos_class_t qos, int prio, - _dispatch_queue_attr_overcommit_t overcommit, bool concurrent) + _dispatch_queue_attr_overcommit_t overcommit, + dispatch_autorelease_frequency_t frequency, + bool concurrent, bool inactive) { return (dispatch_queue_attr_t)&_dispatch_queue_attrs [DISPATCH_QUEUE_ATTR_QOS2IDX(qos)] [DISPATCH_QUEUE_ATTR_PRIO2IDX(prio)] [DISPATCH_QUEUE_ATTR_OVERCOMMIT2IDX(overcommit)] - [DISPATCH_QUEUE_ATTR_CONCURRENT2IDX(concurrent)]; + [DISPATCH_QUEUE_ATTR_AUTORELEASE_FREQUENCY2IDX(frequency)] + [DISPATCH_QUEUE_ATTR_CONCURRENT2IDX(concurrent)] + [DISPATCH_QUEUE_ATTR_INACTIVE2IDX(inactive)]; +} + +dispatch_queue_attr_t +_dispatch_get_default_queue_attr(void) +{ + return _dispatch_get_queue_attr(_DISPATCH_QOS_CLASS_UNSPECIFIED, 0, + _dispatch_queue_attr_overcommit_unspecified, + DISPATCH_AUTORELEASE_FREQUENCY_INHERIT, false, false); } dispatch_queue_attr_t dispatch_queue_attr_make_with_qos_class(dispatch_queue_attr_t dqa, dispatch_qos_class_t qos_class, int relative_priority) { - if (!_dispatch_qos_class_valid(qos_class, relative_priority)) return NULL; + if (!_dispatch_qos_class_valid(qos_class, relative_priority)) { + return DISPATCH_BAD_INPUT; + } if (!slowpath(dqa)) { - dqa = _dispatch_get_queue_attr(0, 0, - _dispatch_queue_attr_overcommit_unspecified, false); + dqa = _dispatch_get_default_queue_attr(); } else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) { - DISPATCH_CLIENT_CRASH("Invalid queue attribute"); + DISPATCH_CLIENT_CRASH(dqa->do_vtable, "Invalid queue attribute"); } return _dispatch_get_queue_attr(qos_class, relative_priority, - dqa->dqa_overcommit, dqa->dqa_concurrent); + dqa->dqa_overcommit, dqa->dqa_autorelease_frequency, + dqa->dqa_concurrent, dqa->dqa_inactive); +} + +dispatch_queue_attr_t +dispatch_queue_attr_make_initially_inactive(dispatch_queue_attr_t dqa) +{ + if (!slowpath(dqa)) { + dqa = _dispatch_get_default_queue_attr(); + } else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) { + DISPATCH_CLIENT_CRASH(dqa->do_vtable, "Invalid queue attribute"); + } + return _dispatch_get_queue_attr(dqa->dqa_qos_class, + dqa->dqa_relative_priority, dqa->dqa_overcommit, + dqa->dqa_autorelease_frequency, dqa->dqa_concurrent, true); } dispatch_queue_attr_t @@ -1126,15 +1174,38 @@ dispatch_queue_attr_make_with_overcommit(dispatch_queue_attr_t dqa, bool overcommit) { if (!slowpath(dqa)) { - dqa = _dispatch_get_queue_attr(0, 0, - _dispatch_queue_attr_overcommit_unspecified, false); + dqa = _dispatch_get_default_queue_attr(); } else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) { - DISPATCH_CLIENT_CRASH("Invalid queue attribute"); + DISPATCH_CLIENT_CRASH(dqa->do_vtable, "Invalid queue attribute"); } return _dispatch_get_queue_attr(dqa->dqa_qos_class, dqa->dqa_relative_priority, overcommit ? _dispatch_queue_attr_overcommit_enabled : - _dispatch_queue_attr_overcommit_disabled, dqa->dqa_concurrent); + _dispatch_queue_attr_overcommit_disabled, + dqa->dqa_autorelease_frequency, dqa->dqa_concurrent, + dqa->dqa_inactive); +} + +dispatch_queue_attr_t +dispatch_queue_attr_make_with_autorelease_frequency(dispatch_queue_attr_t dqa, + dispatch_autorelease_frequency_t frequency) +{ + switch (frequency) { + case DISPATCH_AUTORELEASE_FREQUENCY_INHERIT: + case DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM: + case DISPATCH_AUTORELEASE_FREQUENCY_NEVER: + break; + default: + return DISPATCH_BAD_INPUT; + } + if (!slowpath(dqa)) { + dqa = _dispatch_get_default_queue_attr(); + } else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) { + DISPATCH_CLIENT_CRASH(dqa->do_vtable, "Invalid queue attribute"); + } + return _dispatch_get_queue_attr(dqa->dqa_qos_class, + dqa->dqa_relative_priority, dqa->dqa_overcommit, + frequency, dqa->dqa_concurrent, dqa->dqa_inactive); } #pragma mark - @@ -1148,92 +1219,173 @@ dispatch_queue_attr_make_with_overcommit(dispatch_queue_attr_t dqa, // we use 'xadd' on Intel, so the initial value == next assigned unsigned long volatile _dispatch_queue_serial_numbers = 16; -dispatch_queue_t -dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa, - dispatch_queue_t tq) +DISPATCH_NOINLINE +static dispatch_queue_t +_dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa, + dispatch_queue_t tq, bool legacy) { #if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK // Be sure the root queue priorities are set dispatch_once_f(&_dispatch_root_queues_pred, NULL, - _dispatch_root_queues_init); + _dispatch_root_queues_init_once); #endif - bool disallow_tq = (slowpath(dqa) && dqa != DISPATCH_QUEUE_CONCURRENT); if (!slowpath(dqa)) { - dqa = _dispatch_get_queue_attr(0, 0, - _dispatch_queue_attr_overcommit_unspecified, false); + dqa = _dispatch_get_default_queue_attr(); } else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) { - DISPATCH_CLIENT_CRASH("Invalid queue attribute"); - } - dispatch_queue_t dq = _dispatch_alloc(DISPATCH_VTABLE(queue), - sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_CACHELINE_PAD); - _dispatch_queue_init(dq); - if (label) { - dq->dq_label = strdup(label); + DISPATCH_CLIENT_CRASH(dqa->do_vtable, "Invalid queue attribute"); } + + // + // Step 1: Normalize arguments (qos, overcommit, tq) + // + qos_class_t qos = dqa->dqa_qos_class; - _dispatch_queue_attr_overcommit_t overcommit = dqa->dqa_overcommit; - if (overcommit == _dispatch_queue_attr_overcommit_unspecified) { - // Serial queues default to overcommit! - overcommit = dqa->dqa_concurrent ? - _dispatch_queue_attr_overcommit_disabled : - _dispatch_queue_attr_overcommit_enabled; +#if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK + if (qos == _DISPATCH_QOS_CLASS_USER_INTERACTIVE && + !_dispatch_root_queues[ + DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS].dq_priority) { + qos = _DISPATCH_QOS_CLASS_USER_INITIATED; } -#if HAVE_PTHREAD_WORKQUEUE_QOS - dq->dq_priority = _pthread_qos_class_encode(qos, dqa->dqa_relative_priority, - overcommit == _dispatch_queue_attr_overcommit_enabled ? - _PTHREAD_PRIORITY_OVERCOMMIT_FLAG : 0); #endif - if (dqa->dqa_concurrent) { - dq->dq_width = DISPATCH_QUEUE_WIDTH_MAX; - } - if (!tq) { - if (qos == _DISPATCH_QOS_CLASS_UNSPECIFIED) { - qos = _DISPATCH_QOS_CLASS_DEFAULT; - } + bool maintenance_fallback = false; #if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK - if (qos == _DISPATCH_QOS_CLASS_USER_INTERACTIVE && + maintenance_fallback = true; +#endif // DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK + if (maintenance_fallback) { + if (qos == _DISPATCH_QOS_CLASS_MAINTENANCE && !_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS].dq_priority) { - qos = _DISPATCH_QOS_CLASS_USER_INITIATED; + DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS].dq_priority) { + qos = _DISPATCH_QOS_CLASS_BACKGROUND; } -#endif - bool maintenance_fallback = false; -#if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK - maintenance_fallback = true; -#endif // DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK - if (maintenance_fallback) { - if (qos == _DISPATCH_QOS_CLASS_MAINTENANCE && - !_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS].dq_priority) { - qos = _DISPATCH_QOS_CLASS_BACKGROUND; - } + } + + _dispatch_queue_attr_overcommit_t overcommit = dqa->dqa_overcommit; + if (overcommit != _dispatch_queue_attr_overcommit_unspecified && tq) { + if (tq->do_targetq) { + DISPATCH_CLIENT_CRASH(tq, "Cannot specify both overcommit and " + "a non-global target queue"); } + } - tq = _dispatch_get_root_queue(qos, overcommit == + if (tq && !tq->do_targetq && + tq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) { + // Handle discrepancies between attr and target queue, attributes win + if (overcommit == _dispatch_queue_attr_overcommit_unspecified) { + if (tq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) { + overcommit = _dispatch_queue_attr_overcommit_enabled; + } else { + overcommit = _dispatch_queue_attr_overcommit_disabled; + } + } + if (qos == _DISPATCH_QOS_CLASS_UNSPECIFIED) { + tq = _dispatch_get_root_queue_with_overcommit(tq, + overcommit == _dispatch_queue_attr_overcommit_enabled); + } else { + tq = NULL; + } + } else if (tq && !tq->do_targetq) { + // target is a pthread or runloop root queue, setting QoS or overcommit + // is disallowed + if (overcommit != _dispatch_queue_attr_overcommit_unspecified) { + DISPATCH_CLIENT_CRASH(tq, "Cannot specify an overcommit attribute " + "and use this kind of target queue"); + } + if (qos != _DISPATCH_QOS_CLASS_UNSPECIFIED) { + DISPATCH_CLIENT_CRASH(tq, "Cannot specify a QoS attribute " + "and use this kind of target queue"); + } + } else { + if (overcommit == _dispatch_queue_attr_overcommit_unspecified) { + // Serial queues default to overcommit! + overcommit = dqa->dqa_concurrent ? + _dispatch_queue_attr_overcommit_disabled : + _dispatch_queue_attr_overcommit_enabled; + } + } + if (!tq) { + qos_class_t tq_qos = qos == _DISPATCH_QOS_CLASS_UNSPECIFIED ? + _DISPATCH_QOS_CLASS_DEFAULT : qos; + tq = _dispatch_get_root_queue(tq_qos, overcommit == _dispatch_queue_attr_overcommit_enabled); if (slowpath(!tq)) { - DISPATCH_CLIENT_CRASH("Invalid queue attribute"); + DISPATCH_CLIENT_CRASH(qos, "Invalid queue attribute"); } + } + + // + // Step 2: Initialize the queue + // + + if (legacy) { + // if any of these attributes is specified, use non legacy classes + if (dqa->dqa_inactive || dqa->dqa_autorelease_frequency) { + legacy = false; + } + } + + const void *vtable; + dispatch_queue_flags_t dqf = 0; + if (legacy) { + vtable = DISPATCH_VTABLE(queue); + } else if (dqa->dqa_concurrent) { + vtable = DISPATCH_VTABLE(queue_concurrent); } else { - _dispatch_retain(tq); - if (disallow_tq) { - // TODO: override target queue's qos/overcommit ? - DISPATCH_CLIENT_CRASH("Invalid combination of target queue & " - "queue attribute"); + vtable = DISPATCH_VTABLE(queue_serial); + } + switch (dqa->dqa_autorelease_frequency) { + case DISPATCH_AUTORELEASE_FREQUENCY_NEVER: + dqf |= DQF_AUTORELEASE_NEVER; + break; + case DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM: + dqf |= DQF_AUTORELEASE_ALWAYS; + break; + } + if (label) { + const char *tmp = _dispatch_strdup_if_mutable(label); + if (tmp != label) { + dqf |= DQF_LABEL_NEEDS_FREE; + label = tmp; } + } + + dispatch_queue_t dq = _dispatch_alloc(vtable, + sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_CACHELINE_PAD); + _dispatch_queue_init(dq, dqf, dqa->dqa_concurrent ? + DISPATCH_QUEUE_WIDTH_MAX : 1, dqa->dqa_inactive); + + dq->dq_label = label; + +#if HAVE_PTHREAD_WORKQUEUE_QOS + dq->dq_priority = (dispatch_priority_t)_pthread_qos_class_encode(qos, + dqa->dqa_relative_priority, + overcommit == _dispatch_queue_attr_overcommit_enabled ? + _PTHREAD_PRIORITY_OVERCOMMIT_FLAG : 0); +#endif + _dispatch_retain(tq); + if (qos == _DISPATCH_QOS_CLASS_UNSPECIFIED) { + // legacy way of inherithing the QoS from the target _dispatch_queue_priority_inherit_from_target(dq, tq); } - _dispatch_queue_set_override_priority(dq); + if (!dqa->dqa_inactive) { + _dispatch_queue_atomic_flags_set(tq, DQF_TARGETED); + } dq->do_targetq = tq; _dispatch_object_debug(dq, "%s", __func__); return _dispatch_introspection_queue_create(dq); } +dispatch_queue_t +dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa, + dispatch_queue_t tq) +{ + return _dispatch_queue_create_with_target(label, dqa, tq, false); +} + dispatch_queue_t dispatch_queue_create(const char *label, dispatch_queue_attr_t attr) { - return dispatch_queue_create_with_target(label, attr, - DISPATCH_TARGET_QUEUE_DEFAULT); + return _dispatch_queue_create_with_target(label, attr, + DISPATCH_TARGET_QUEUE_DEFAULT, true); } dispatch_queue_t @@ -1247,20 +1399,45 @@ dispatch_queue_create_with_accounting_override_voucher(const char *label, } void -_dispatch_queue_destroy(dispatch_object_t dou) +_dispatch_queue_destroy(dispatch_queue_t dq) { - dispatch_queue_t dq = dou._dq; + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + uint64_t initial_state = DISPATCH_QUEUE_STATE_INIT_VALUE(dq->dq_width); + + if (dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE) { + initial_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE; + } + if (dx_type(dq) == DISPATCH_SOURCE_KEVENT_TYPE) { + // dispatch_cancel_and_wait may apply overrides in a racy way with + // the source cancellation finishing. This race is expensive and not + // really worthwhile to resolve since the source becomes dead anyway. + dq_state &= ~DISPATCH_QUEUE_HAS_OVERRIDE; + } + if (slowpath(dq_state != initial_state)) { + if (_dq_state_drain_locked(dq_state)) { + DISPATCH_CLIENT_CRASH(dq, "Release of a locked queue"); + } +#ifndef __LP64__ + dq_state >>= 32; +#endif + DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, + "Release of a queue with corrupt state"); + } if (slowpath(dq == _dispatch_queue_get_current())) { - DISPATCH_CRASH("Release of a queue by itself"); + DISPATCH_CLIENT_CRASH(dq, "Release of a queue by itself"); } if (slowpath(dq->dq_items_tail)) { - DISPATCH_CRASH("Release of a queue while items are enqueued"); + DISPATCH_CLIENT_CRASH(dq->dq_items_tail, + "Release of a queue while items are enqueued"); } - // trash the tail queue so that use after free will crash + // trash the queue so that use after free will crash + dq->dq_items_head = (void *)0x200; dq->dq_items_tail = (void *)0x200; + // poison the state with something that is suspended and is easy to spot + dq->dq_state = 0xdead000000000000; - dispatch_queue_t dqsq = dispatch_atomic_xchg2o(dq, dq_specific_q, + dispatch_queue_t dqsq = os_atomic_xchg2o(dq, dq_specific_q, (void *)0x200, relaxed); if (dqsq) { _dispatch_release(dqsq); @@ -1278,30 +1455,276 @@ _dispatch_queue_dispose(dispatch_queue_t dq) { _dispatch_object_debug(dq, "%s", __func__); _dispatch_introspection_queue_dispose(dq); - if (dq->dq_label) { + if (dq->dq_label && _dispatch_queue_label_needs_free(dq)) { free((void*)dq->dq_label); } _dispatch_queue_destroy(dq); } -const char * -dispatch_queue_get_label(dispatch_queue_t dq) +DISPATCH_NOINLINE +static void +_dispatch_queue_suspend_slow(dispatch_queue_t dq) { - if (slowpath(dq == DISPATCH_CURRENT_QUEUE_LABEL)) { - dq = _dispatch_get_current_queue(); + uint64_t dq_state, value, delta; + + _dispatch_queue_sidelock_lock(dq); + + // what we want to transfer (remove from dq_state) + delta = DISPATCH_QUEUE_SUSPEND_HALF * DISPATCH_QUEUE_SUSPEND_INTERVAL; + // but this is a suspend so add a suspend count at the same time + delta -= DISPATCH_QUEUE_SUSPEND_INTERVAL; + if (dq->dq_side_suspend_cnt == 0) { + // we substract delta from dq_state, and we want to set this bit + delta -= DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT; } - return dq->dq_label ? dq->dq_label : ""; + + os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, { + // unsigned underflow of the substraction can happen because other + // threads could have touched this value while we were trying to acquire + // the lock, or because another thread raced us to do the same operation + // and got to the lock first. + if (slowpath(os_sub_overflow(dq_state, delta, &value))) { + os_atomic_rmw_loop_give_up(goto retry); + } + }); + if (slowpath(os_add_overflow(dq->dq_side_suspend_cnt, + DISPATCH_QUEUE_SUSPEND_HALF, &dq->dq_side_suspend_cnt))) { + DISPATCH_CLIENT_CRASH(0, "Too many nested calls to dispatch_suspend()"); + } + return _dispatch_queue_sidelock_unlock(dq); + +retry: + _dispatch_queue_sidelock_unlock(dq); + return dx_vtable(dq)->do_suspend(dq); } -qos_class_t -dispatch_queue_get_qos_class(dispatch_queue_t dq, int *relative_priority_ptr) +void +_dispatch_queue_suspend(dispatch_queue_t dq) { - qos_class_t qos = _DISPATCH_QOS_CLASS_UNSPECIFIED; - int relative_priority = 0; -#if HAVE_PTHREAD_WORKQUEUE_QOS - pthread_priority_t dqp = dq->dq_priority; - if (dqp & _PTHREAD_PRIORITY_INHERIT_FLAG) dqp = 0; - qos = _pthread_qos_class_decode(dqp, &relative_priority, NULL); + dispatch_assert(dq->do_ref_cnt != DISPATCH_OBJECT_GLOBAL_REFCNT); + + uint64_t dq_state, value; + + os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, { + value = DISPATCH_QUEUE_SUSPEND_INTERVAL; + if (slowpath(os_add_overflow(dq_state, value, &value))) { + os_atomic_rmw_loop_give_up({ + return _dispatch_queue_suspend_slow(dq); + }); + } + }); + + if (!_dq_state_is_suspended(dq_state)) { + // rdar://8181908 we need to extend the queue life for the duration + // of the call to wakeup at _dispatch_queue_resume() time. + _dispatch_retain(dq); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_queue_resume_slow(dispatch_queue_t dq) +{ + uint64_t dq_state, value, delta; + + _dispatch_queue_sidelock_lock(dq); + + // what we want to transfer + delta = DISPATCH_QUEUE_SUSPEND_HALF * DISPATCH_QUEUE_SUSPEND_INTERVAL; + // but this is a resume so consume a suspend count at the same time + delta -= DISPATCH_QUEUE_SUSPEND_INTERVAL; + switch (dq->dq_side_suspend_cnt) { + case 0: + goto retry; + case DISPATCH_QUEUE_SUSPEND_HALF: + // we will transition the side count to 0, so we want to clear this bit + delta -= DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT; + break; + } + os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, { + // unsigned overflow of the addition can happen because other + // threads could have touched this value while we were trying to acquire + // the lock, or because another thread raced us to do the same operation + // and got to the lock first. + if (slowpath(os_add_overflow(dq_state, delta, &value))) { + os_atomic_rmw_loop_give_up(goto retry); + } + }); + dq->dq_side_suspend_cnt -= DISPATCH_QUEUE_SUSPEND_HALF; + return _dispatch_queue_sidelock_unlock(dq); + +retry: + _dispatch_queue_sidelock_unlock(dq); + return dx_vtable(dq)->do_resume(dq, false); +} + +DISPATCH_NOINLINE +static void +_dispatch_queue_resume_finalize_activation(dispatch_queue_t dq) +{ + // Step 2: run the activation finalizer + if (dx_vtable(dq)->do_finalize_activation) { + dx_vtable(dq)->do_finalize_activation(dq); + } + // Step 3: consume the suspend count + return dx_vtable(dq)->do_resume(dq, false); +} + +void +_dispatch_queue_resume(dispatch_queue_t dq, bool activate) +{ + // covers all suspend and inactive bits, including side suspend bit + const uint64_t suspend_bits = DISPATCH_QUEUE_SUSPEND_BITS_MASK; + // backward compatibility: only dispatch sources can abuse + // dispatch_resume() to really mean dispatch_activate() + bool resume_can_activate = (dx_type(dq) == DISPATCH_SOURCE_KEVENT_TYPE); + uint64_t dq_state, value; + + dispatch_assert(dq->do_ref_cnt != DISPATCH_OBJECT_GLOBAL_REFCNT); + + // Activation is a bit tricky as it needs to finalize before the wakeup. + // + // If after doing its updates to the suspend count and/or inactive bit, + // the last suspension related bit that would remain is the + // NEEDS_ACTIVATION one, then this function: + // + // 1. moves the state to { sc:1 i:0 na:0 } (converts the needs-activate into + // a suspend count) + // 2. runs the activation finalizer + // 3. consumes the suspend count set in (1), and finishes the resume flow + // + // Concurrently, some property setters such as setting dispatch source + // handlers or _dispatch_queue_set_target_queue try to do in-place changes + // before activation. These protect their action by taking a suspend count. + // Step (1) above cannot happen if such a setter has locked the object. + if (activate) { + // relaxed atomic because this doesn't publish anything, this is only + // about picking the thread that gets to finalize the activation + os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, { + if ((dq_state & suspend_bits) == + DISPATCH_QUEUE_NEEDS_ACTIVATION + DISPATCH_QUEUE_INACTIVE) { + // { sc:0 i:1 na:1 } -> { sc:1 i:0 na:0 } + value = dq_state - DISPATCH_QUEUE_INACTIVE + - DISPATCH_QUEUE_NEEDS_ACTIVATION + + DISPATCH_QUEUE_SUSPEND_INTERVAL; + } else if (_dq_state_is_inactive(dq_state)) { + // { sc:>0 i:1 na:1 } -> { i:0 na:1 } + // simple activation because sc is not 0 + // resume will deal with na:1 later + value = dq_state - DISPATCH_QUEUE_INACTIVE; + } else { + // object already active, this is a no-op, just exit + os_atomic_rmw_loop_give_up(return); + } + }); + } else { + // release barrier needed to publish the effect of + // - dispatch_set_target_queue() + // - dispatch_set_*_handler() + // - do_finalize_activation() + os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, release, { + if ((dq_state & suspend_bits) == DISPATCH_QUEUE_SUSPEND_INTERVAL + + DISPATCH_QUEUE_NEEDS_ACTIVATION) { + // { sc:1 i:0 na:1 } -> { sc:1 i:0 na:0 } + value = dq_state - DISPATCH_QUEUE_NEEDS_ACTIVATION; + } else if (resume_can_activate && (dq_state & suspend_bits) == + DISPATCH_QUEUE_NEEDS_ACTIVATION + DISPATCH_QUEUE_INACTIVE) { + // { sc:0 i:1 na:1 } -> { sc:1 i:0 na:0 } + value = dq_state - DISPATCH_QUEUE_INACTIVE + - DISPATCH_QUEUE_NEEDS_ACTIVATION + + DISPATCH_QUEUE_SUSPEND_INTERVAL; + } else { + value = DISPATCH_QUEUE_SUSPEND_INTERVAL; + if (slowpath(os_sub_overflow(dq_state, value, &value))) { + // underflow means over-resume or a suspend count transfer + // to the side count is needed + os_atomic_rmw_loop_give_up({ + if (!(dq_state & DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT)) { + goto over_resume; + } + return _dispatch_queue_resume_slow(dq); + }); + } + if (_dq_state_is_runnable(value) && + !_dq_state_drain_locked(value)) { + uint64_t full_width = value; + if (_dq_state_has_pending_barrier(value)) { + full_width -= DISPATCH_QUEUE_PENDING_BARRIER; + full_width += DISPATCH_QUEUE_WIDTH_INTERVAL; + full_width += DISPATCH_QUEUE_IN_BARRIER; + } else { + full_width += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; + full_width += DISPATCH_QUEUE_IN_BARRIER; + } + if ((full_width & DISPATCH_QUEUE_WIDTH_MASK) == + DISPATCH_QUEUE_WIDTH_FULL_BIT) { + value = full_width; + value &= ~DISPATCH_QUEUE_DIRTY; + value |= _dispatch_tid_self(); + } + } + } + }); + } + + if ((dq_state ^ value) & DISPATCH_QUEUE_NEEDS_ACTIVATION) { + // we cleared the NEEDS_ACTIVATION bit and we have a valid suspend count + return _dispatch_queue_resume_finalize_activation(dq); + } + + if (activate) { + // if we're still in an activate codepath here we should have + // { sc:>0 na:1 }, if not we've got a corrupt state + if (!fastpath(_dq_state_is_suspended(value))) { + DISPATCH_CLIENT_CRASH(dq, "Invalid suspension state"); + } + return; + } + + if (_dq_state_is_suspended(value)) { + return; + } + + if ((dq_state ^ value) & DISPATCH_QUEUE_IN_BARRIER) { + _dispatch_release(dq); + return _dispatch_try_lock_transfer_or_wakeup(dq); + } + + if (_dq_state_should_wakeup(value)) { + // + // seq_cst wrt state changes that were flushed and not acted upon + os_atomic_thread_fence(acquire); + pthread_priority_t pp = _dispatch_queue_reset_override_priority(dq, + _dispatch_queue_is_thread_bound(dq)); + return dx_wakeup(dq, pp, DISPATCH_WAKEUP_CONSUME); + } + return _dispatch_release_tailcall(dq); + +over_resume: + if (slowpath(_dq_state_is_inactive(dq_state))) { + DISPATCH_CLIENT_CRASH(dq, "Over-resume of an inactive object"); + } + DISPATCH_CLIENT_CRASH(dq, "Over-resume of an object"); +} + +const char * +dispatch_queue_get_label(dispatch_queue_t dq) +{ + if (slowpath(dq == DISPATCH_CURRENT_QUEUE_LABEL)) { + dq = _dispatch_get_current_queue(); + } + return dq->dq_label ? dq->dq_label : ""; +} + +qos_class_t +dispatch_queue_get_qos_class(dispatch_queue_t dq, int *relative_priority_ptr) +{ + qos_class_t qos = _DISPATCH_QOS_CLASS_UNSPECIFIED; + int relative_priority = 0; +#if HAVE_PTHREAD_WORKQUEUE_QOS + pthread_priority_t dqp = dq->dq_priority; + if (dqp & _PTHREAD_PRIORITY_INHERIT_FLAG) dqp = 0; + qos = _pthread_qos_class_decode(dqp, &relative_priority, NULL); #else (void)dq; #endif @@ -1316,14 +1739,12 @@ _dispatch_queue_set_width2(void *ctxt) uint32_t tmp; dispatch_queue_t dq = _dispatch_queue_get_current(); - if (w == 1 || w == 0) { - dq->dq_width = 1; - _dispatch_object_debug(dq, "%s", __func__); - return; - } if (w > 0) { tmp = (unsigned int)w; } else switch (w) { + case 0: + tmp = 1; + break; case DISPATCH_QUEUE_WIDTH_MAX_PHYSICAL_CPUS: tmp = dispatch_hw_config(physical_cpus); break; @@ -1336,12 +1757,15 @@ _dispatch_queue_set_width2(void *ctxt) tmp = dispatch_hw_config(logical_cpus); break; } - if (tmp > DISPATCH_QUEUE_WIDTH_MAX / 2) { - tmp = DISPATCH_QUEUE_WIDTH_MAX / 2; + if (tmp > DISPATCH_QUEUE_WIDTH_MAX) { + tmp = DISPATCH_QUEUE_WIDTH_MAX; } - // multiply by two since the running count is inc/dec by two - // (the low bit == barrier) - dq->dq_width = (typeof(dq->dq_width))(tmp * 2); + + dispatch_queue_flags_t old_dqf, new_dqf; + os_atomic_rmw_loop2o(dq, dq_atomic_flags, old_dqf, new_dqf, relaxed, { + new_dqf = old_dqf & ~DQF_WIDTH_MASK; + new_dqf |= (tmp << DQF_WIDTH_SHIFT); + }); _dispatch_object_debug(dq, "%s", __func__); } @@ -1349,77 +1773,110 @@ void dispatch_queue_set_width(dispatch_queue_t dq, long width) { if (slowpath(dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || - slowpath(dx_type(dq) == DISPATCH_QUEUE_ROOT_TYPE)) { + slowpath(dx_hastypeflag(dq, QUEUE_ROOT))) { return; } - _dispatch_barrier_trysync_f(dq, (void*)(intptr_t)width, + + unsigned long type = dx_type(dq); + switch (type) { + case DISPATCH_QUEUE_LEGACY_TYPE: + case DISPATCH_QUEUE_CONCURRENT_TYPE: + break; + case DISPATCH_QUEUE_SERIAL_TYPE: + DISPATCH_CLIENT_CRASH(type, "Cannot set width of a serial queue"); + default: + DISPATCH_CLIENT_CRASH(type, "Unexpected dispatch object type"); + } + + _dispatch_barrier_trysync_or_async_f(dq, (void*)(intptr_t)width, _dispatch_queue_set_width2); } -// 6618342 Contact the team that owns the Instrument DTrace probe before -// renaming this symbol static void -_dispatch_set_target_queue2(void *ctxt) +_dispatch_queue_legacy_set_target_queue(void *ctxt) { - dispatch_queue_t prev_dq, dq = _dispatch_queue_get_current(), tq = ctxt; -#if HAVE_PTHREAD_WORKQUEUE_QOS - // see _dispatch_queue_wakeup_with_qos_slow - mach_msg_timeout_t timeout = 1; - mach_port_t th; + dispatch_queue_t dq = _dispatch_queue_get_current(); + dispatch_queue_t tq = ctxt; + dispatch_queue_t otq = dq->do_targetq; - while (!dispatch_atomic_cmpxchgv2o(dq, dq_tqthread, MACH_PORT_NULL, - _dispatch_thread_port(), &th, acquire)) { - _dispatch_thread_switch(th, DISPATCH_YIELD_THREAD_SWITCH_OPTION, - timeout++); + if (_dispatch_queue_atomic_flags(dq) & DQF_TARGETED) { + _dispatch_ktrace3(DISPATCH_PERF_non_leaf_retarget, dq, otq, tq); + _dispatch_bug_deprecated("Changing the target of a queue " + "already targeted by other dispatch objects"); } -#endif + _dispatch_queue_priority_inherit_from_target(dq, tq); - prev_dq = dq->do_targetq; + _dispatch_queue_atomic_flags_set(tq, DQF_TARGETED); +#if HAVE_PTHREAD_WORKQUEUE_QOS + // see _dispatch_queue_class_wakeup() + _dispatch_queue_sidelock_lock(dq); +#endif dq->do_targetq = tq; - _dispatch_release(prev_dq); +#if HAVE_PTHREAD_WORKQUEUE_QOS + // see _dispatch_queue_class_wakeup() + _dispatch_queue_sidelock_unlock(dq); +#endif + _dispatch_object_debug(dq, "%s", __func__); - dispatch_atomic_store2o(dq, dq_tqthread, MACH_PORT_NULL, release); + _dispatch_introspection_target_queue_changed(dq); + _dispatch_release_tailcall(otq); } void -dispatch_set_target_queue(dispatch_object_t dou, dispatch_queue_t dq) +_dispatch_queue_set_target_queue(dispatch_queue_t dq, dispatch_queue_t tq) { - DISPATCH_OBJECT_TFB(_dispatch_objc_set_target_queue, dou, dq); - if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || - slowpath(dx_type(dou._do) == DISPATCH_QUEUE_ROOT_TYPE)) { - return; - } - unsigned long type = dx_metatype(dou._do); - if (slowpath(!dq)) { - bool is_concurrent_q = (type == _DISPATCH_QUEUE_TYPE && - slowpath(dou._dq->dq_width > 1)); - dq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, + dispatch_assert(dq->do_ref_cnt != DISPATCH_OBJECT_GLOBAL_REFCNT && + dq->do_targetq); + + if (slowpath(!tq)) { + bool is_concurrent_q = (dq->dq_width > 1); + tq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, !is_concurrent_q); } - // TODO: put into the vtable - switch(type) { - case _DISPATCH_QUEUE_TYPE: - case _DISPATCH_SOURCE_TYPE: - _dispatch_retain(dq); - return _dispatch_barrier_trysync_f(dou._dq, dq, - _dispatch_set_target_queue2); - case _DISPATCH_IO_TYPE: - return _dispatch_io_set_target_queue(dou._dchannel, dq); - default: { - dispatch_queue_t prev_dq; - _dispatch_retain(dq); - prev_dq = dispatch_atomic_xchg2o(dou._do, do_targetq, dq, release); - if (prev_dq) _dispatch_release(prev_dq); - _dispatch_object_debug(dou._do, "%s", __func__); - return; + + if (_dispatch_queue_try_inactive_suspend(dq)) { + _dispatch_object_set_target_queue_inline(dq, tq); + return dx_vtable(dq)->do_resume(dq, false); + } + + if (dq->dq_override_voucher != DISPATCH_NO_VOUCHER) { + DISPATCH_CLIENT_CRASH(dq, "Cannot change the target of a queue or " + "source with an accounting override voucher " + "after it has been activated"); + } + + unsigned long type = dx_type(dq); + switch (type) { + case DISPATCH_QUEUE_LEGACY_TYPE: + if (_dispatch_queue_atomic_flags(dq) & DQF_TARGETED) { + _dispatch_bug_deprecated("Changing the target of a queue " + "already targeted by other dispatch objects"); } + break; + case DISPATCH_SOURCE_KEVENT_TYPE: + case DISPATCH_MACH_CHANNEL_TYPE: + _dispatch_ktrace1(DISPATCH_PERF_post_activate_retarget, dq); + _dispatch_bug_deprecated("Changing the target of a source " + "after it has been activated"); + break; + + case DISPATCH_QUEUE_SERIAL_TYPE: + case DISPATCH_QUEUE_CONCURRENT_TYPE: + DISPATCH_CLIENT_CRASH(type, "Cannot change the target of this queue " + "after it has been activated"); + default: + DISPATCH_CLIENT_CRASH(type, "Unexpected dispatch object type"); } + + _dispatch_retain(tq); + return _dispatch_barrier_trysync_or_async_f(dq, tq, + _dispatch_queue_legacy_set_target_queue); } #pragma mark - -#pragma mark dispatch_pthread_root_queue +#pragma mark dispatch_mgr_queue -#if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES +#if DISPATCH_USE_MGR_THREAD && DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES static struct dispatch_pthread_root_queue_context_s _dispatch_mgr_root_queue_pthread_context; static struct dispatch_root_queue_context_s @@ -1432,18 +1889,18 @@ static struct dispatch_root_queue_context_s }}}; static struct dispatch_queue_s _dispatch_mgr_root_queue = { - .do_vtable = DISPATCH_VTABLE(queue_root), - .do_ref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT, - .do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK, + DISPATCH_GLOBAL_OBJECT_HEADER(queue_root), + .dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, .do_ctxt = &_dispatch_mgr_root_queue_context, .dq_label = "com.apple.root.libdispatch-manager", - .dq_running = 2, - .dq_width = DISPATCH_QUEUE_WIDTH_MAX, + .dq_width = DISPATCH_QUEUE_WIDTH_POOL, + .dq_override = DISPATCH_SATURATED_OVERRIDE, .dq_override_voucher = DISPATCH_NO_VOUCHER, .dq_serialnum = 3, }; +#endif // DISPATCH_USE_MGR_THREAD && DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES +#if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES || DISPATCH_USE_KEVENT_WORKQUEUE static struct { volatile int prio; volatile qos_class_t qos; @@ -1456,6 +1913,7 @@ static dispatch_once_t _dispatch_mgr_sched_pred; // TODO: switch to "event-reflector thread" property +#if HAVE_PTHREAD_WORKQUEUE_QOS // Must be kept in sync with list of qos classes in sys/qos.h static const int _dispatch_mgr_sched_qos2prio[] = { [_DISPATCH_QOS_CLASS_MAINTENANCE] = 4, @@ -1465,13 +1923,18 @@ static const int _dispatch_mgr_sched_qos2prio[] = { [_DISPATCH_QOS_CLASS_USER_INITIATED] = 37, [_DISPATCH_QOS_CLASS_USER_INTERACTIVE] = 47, }; +#endif // HAVE_PTHREAD_WORKQUEUE_QOS static void _dispatch_mgr_sched_init(void *ctxt DISPATCH_UNUSED) { struct sched_param param; +#if DISPATCH_USE_MGR_THREAD && DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES pthread_attr_t *attr; attr = &_dispatch_mgr_root_queue_pthread_context.dpq_thread_attr; +#else + pthread_attr_t a, *attr = &a; +#endif (void)dispatch_assume_zero(pthread_attr_init(attr)); (void)dispatch_assume_zero(pthread_attr_getschedpolicy(attr, &_dispatch_mgr_sched.policy)); @@ -1489,7 +1952,9 @@ _dispatch_mgr_sched_init(void *ctxt DISPATCH_UNUSED) _dispatch_mgr_sched.default_prio = param.sched_priority; _dispatch_mgr_sched.prio = _dispatch_mgr_sched.default_prio; } +#endif // DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES || DISPATCH_USE_KEVENT_WORKQUEUE +#if DISPATCH_USE_MGR_THREAD && DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES DISPATCH_NOINLINE static pthread_t * _dispatch_mgr_root_queue_init(void) @@ -1510,8 +1975,8 @@ _dispatch_mgr_root_queue_init(void) (void)dispatch_assume_zero(pthread_attr_set_qos_class_np(attr, qos, 0)); } - _dispatch_mgr_q.dq_priority = _pthread_qos_class_encode(qos, 0, 0); - _dispatch_queue_set_override_priority(&_dispatch_mgr_q); + _dispatch_mgr_q.dq_priority = + (dispatch_priority_t)_pthread_qos_class_encode(qos, 0, 0); } #endif param.sched_priority = _dispatch_mgr_sched.prio; @@ -1558,7 +2023,9 @@ _dispatch_mgr_priority_init(void) return _dispatch_mgr_priority_apply(); } } +#endif // DISPATCH_USE_MGR_THREAD && DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES +#if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES DISPATCH_NOINLINE static void _dispatch_mgr_priority_raise(const pthread_attr_t *attr) @@ -1567,27 +2034,80 @@ _dispatch_mgr_priority_raise(const pthread_attr_t *attr) struct sched_param param; (void)dispatch_assume_zero(pthread_attr_getschedparam(attr, ¶m)); #if HAVE_PTHREAD_WORKQUEUE_QOS - qos_class_t qos = 0; + qos_class_t q, qos = 0; (void)pthread_attr_get_qos_class_np((pthread_attr_t *)attr, &qos, NULL); if (qos) { param.sched_priority = _dispatch_mgr_sched_qos2prio[qos]; - qos_class_t q = _dispatch_mgr_sched.qos; - do if (q >= qos) { - break; - } while (slowpath(!dispatch_atomic_cmpxchgvw2o(&_dispatch_mgr_sched, - qos, q, qos, &q, relaxed))); + os_atomic_rmw_loop2o(&_dispatch_mgr_sched, qos, q, qos, relaxed, { + if (q >= qos) os_atomic_rmw_loop_give_up(break); + }); } #endif - int p = _dispatch_mgr_sched.prio; - do if (p >= param.sched_priority) { + int p, prio = param.sched_priority; + os_atomic_rmw_loop2o(&_dispatch_mgr_sched, prio, p, prio, relaxed, { + if (p >= prio) os_atomic_rmw_loop_give_up(return); + }); +#if DISPATCH_USE_KEVENT_WORKQUEUE + dispatch_once_f(&_dispatch_root_queues_pred, NULL, + _dispatch_root_queues_init_once); + if (_dispatch_kevent_workqueue_enabled) { + pthread_priority_t pp = 0; + if (prio > _dispatch_mgr_sched.default_prio) { + // The values of _PTHREAD_PRIORITY_SCHED_PRI_FLAG and + // _PTHREAD_PRIORITY_ROOTQUEUE_FLAG overlap, but that is not + // problematic in this case, since it the second one is only ever + // used on dq_priority fields. + // We never pass the _PTHREAD_PRIORITY_ROOTQUEUE_FLAG to a syscall, + // it is meaningful to libdispatch only. + pp = (pthread_priority_t)prio | _PTHREAD_PRIORITY_SCHED_PRI_FLAG; + } else if (qos) { + pp = _pthread_qos_class_encode(qos, 0, 0); + } + if (pp) { + int r = _pthread_workqueue_set_event_manager_priority(pp); + (void)dispatch_assume_zero(r); + } return; - } while (slowpath(!dispatch_atomic_cmpxchgvw2o(&_dispatch_mgr_sched, prio, - p, param.sched_priority, &p, relaxed))); + } +#endif +#if DISPATCH_USE_MGR_THREAD if (_dispatch_mgr_sched.tid) { return _dispatch_mgr_priority_apply(); } +#endif +} +#endif // DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES + +#if DISPATCH_USE_KEVENT_WORKQUEUE +void +_dispatch_kevent_workqueue_init(void) +{ + // Initialize kevent workqueue support + dispatch_once_f(&_dispatch_root_queues_pred, NULL, + _dispatch_root_queues_init_once); + if (!_dispatch_kevent_workqueue_enabled) return; + dispatch_once_f(&_dispatch_mgr_sched_pred, NULL, _dispatch_mgr_sched_init); + qos_class_t qos = _dispatch_mgr_sched.qos; + int prio = _dispatch_mgr_sched.prio; + pthread_priority_t pp = 0; + if (qos) { + pp = _pthread_qos_class_encode(qos, 0, 0); + _dispatch_mgr_q.dq_priority = (dispatch_priority_t)pp; + } + if (prio > _dispatch_mgr_sched.default_prio) { + pp = (pthread_priority_t)prio | _PTHREAD_PRIORITY_SCHED_PRI_FLAG; + } + if (pp) { + int r = _pthread_workqueue_set_event_manager_priority(pp); + (void)dispatch_assume_zero(r); + } } +#endif + +#pragma mark - +#pragma mark dispatch_pthread_root_queue +#if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES static dispatch_queue_t _dispatch_pthread_root_queue_create(const char *label, unsigned long flags, const pthread_attr_t *attr, dispatch_block_t configure, @@ -1596,27 +2116,34 @@ _dispatch_pthread_root_queue_create(const char *label, unsigned long flags, dispatch_queue_t dq; dispatch_root_queue_context_t qc; dispatch_pthread_root_queue_context_t pqc; + dispatch_queue_flags_t dqf = 0; size_t dqs; uint8_t pool_size = flags & _DISPATCH_PTHREAD_ROOT_QUEUE_FLAG_POOL_SIZE ? (uint8_t)(flags & ~_DISPATCH_PTHREAD_ROOT_QUEUE_FLAG_POOL_SIZE) : 0; dqs = sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_CACHELINE_PAD; + dqs = roundup(dqs, _Alignof(struct dispatch_root_queue_context_s)); dq = _dispatch_alloc(DISPATCH_VTABLE(queue_root), dqs + sizeof(struct dispatch_root_queue_context_s) + sizeof(struct dispatch_pthread_root_queue_context_s)); qc = (void*)dq + dqs; + dispatch_assert((uintptr_t)qc % _Alignof(typeof(*qc)) == 0); pqc = (void*)qc + sizeof(struct dispatch_root_queue_context_s); - - _dispatch_queue_init(dq); + dispatch_assert((uintptr_t)pqc % _Alignof(typeof(*pqc)) == 0); if (label) { - dq->dq_label = strdup(label); + const char *tmp = _dispatch_strdup_if_mutable(label); + if (tmp != label) { + dqf |= DQF_LABEL_NEEDS_FREE; + label = tmp; + } } - dq->do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK; + _dispatch_queue_init(dq, dqf, DISPATCH_QUEUE_WIDTH_POOL, false); + dq->dq_label = label; + dq->dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, + dq->dq_override = DISPATCH_SATURATED_OVERRIDE; dq->do_ctxt = qc; dq->do_targetq = NULL; - dq->dq_running = 2; - dq->dq_width = DISPATCH_QUEUE_WIDTH_MAX; pqc->dpq_thread_mediator.do_vtable = DISPATCH_VTABLE(semaphore); qc->dgq_ctxt = pqc; @@ -1651,6 +2178,36 @@ dispatch_pthread_root_queue_create(const char *label, unsigned long flags, NULL); } +#if DISPATCH_IOHID_SPI +dispatch_queue_t +_dispatch_pthread_root_queue_create_with_observer_hooks_4IOHID(const char *label, + unsigned long flags, const pthread_attr_t *attr, + dispatch_pthread_root_queue_observer_hooks_t observer_hooks, + dispatch_block_t configure) +{ + if (!observer_hooks->queue_will_execute || + !observer_hooks->queue_did_execute) { + DISPATCH_CLIENT_CRASH(0, "Invalid pthread root queue observer hooks"); + } + return _dispatch_pthread_root_queue_create(label, flags, attr, configure, + observer_hooks); +} +#endif + +dispatch_queue_t +dispatch_pthread_root_queue_copy_current(void) +{ + dispatch_queue_t dq = _dispatch_queue_get_current(); + if (!dq) return NULL; + while (slowpath(dq->do_targetq)) { + dq = dq->do_targetq; + } + if (dx_type(dq) != DISPATCH_QUEUE_GLOBAL_ROOT_TYPE || + dq->do_xref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) { + return NULL; + } + return (dispatch_queue_t)_os_object_retain_with_resurrect(dq->_as_os_obj); +} #endif // DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES @@ -1658,7 +2215,7 @@ void _dispatch_pthread_root_queue_dispose(dispatch_queue_t dq) { if (slowpath(dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) { - DISPATCH_CRASH("Global root queue disposed"); + DISPATCH_INTERNAL_CRASH(dq, "Global root queue disposed"); } _dispatch_object_debug(dq, "%s", __func__); _dispatch_introspection_queue_dispose(dq); @@ -1674,7 +2231,7 @@ _dispatch_pthread_root_queue_dispose(dispatch_queue_t dq) dq->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, false); #endif - if (dq->dq_label) { + if (dq->dq_label && _dispatch_queue_label_needs_free(dq)) { free((void*)dq->dq_label); } _dispatch_queue_destroy(dq); @@ -1684,11 +2241,10 @@ _dispatch_pthread_root_queue_dispose(dispatch_queue_t dq) #pragma mark dispatch_queue_specific struct dispatch_queue_specific_queue_s { - DISPATCH_STRUCT_HEADER(queue_specific_queue); - DISPATCH_QUEUE_HEADER; + DISPATCH_QUEUE_HEADER(queue_specific_queue); TAILQ_HEAD(dispatch_queue_specific_head_s, dispatch_queue_specific_s) dqsq_contexts; -}; +} DISPATCH_QUEUE_ALIGN; struct dispatch_queue_specific_s { const void *dqs_key; @@ -1711,7 +2267,7 @@ _dispatch_queue_specific_queue_dispose(dispatch_queue_specific_queue_t dqsq) } free(dqs); } - _dispatch_queue_destroy((dispatch_queue_t)dqsq); + _dispatch_queue_destroy(dqsq->_as_dq); } static void @@ -1721,16 +2277,16 @@ _dispatch_queue_init_specific(dispatch_queue_t dq) dqsq = _dispatch_alloc(DISPATCH_VTABLE(queue_specific_queue), sizeof(struct dispatch_queue_specific_queue_s)); - _dispatch_queue_init((dispatch_queue_t)dqsq); + _dispatch_queue_init(dqsq->_as_dq, DQF_NONE, + DISPATCH_QUEUE_WIDTH_MAX, false); dqsq->do_xref_cnt = -1; dqsq->do_targetq = _dispatch_get_root_queue( _DISPATCH_QOS_CLASS_USER_INITIATED, true); - dqsq->dq_width = DISPATCH_QUEUE_WIDTH_MAX; dqsq->dq_label = "queue-specific"; TAILQ_INIT(&dqsq->dqsq_contexts); - if (slowpath(!dispatch_atomic_cmpxchg2o(dq, dq_specific_q, NULL, - (dispatch_queue_t)dqsq, release))) { - _dispatch_release((dispatch_queue_t)dqsq); + if (slowpath(!os_atomic_cmpxchg2o(dq, dq_specific_q, NULL, + dqsq->_as_dq, release))) { + _dispatch_release(dqsq->_as_dq); } } @@ -1782,7 +2338,7 @@ dispatch_queue_set_specific(dispatch_queue_t dq, const void *key, if (slowpath(!dq->dq_specific_q)) { _dispatch_queue_init_specific(dq); } - _dispatch_barrier_trysync_f(dq->dq_specific_q, dqs, + _dispatch_barrier_trysync_or_async_f(dq->dq_specific_q, dqs, _dispatch_queue_set_specific); } @@ -1842,6 +2398,18 @@ dispatch_get_specific(const void *key) return ctxt; } +#if DISPATCH_IOHID_SPI +bool +_dispatch_queue_is_exclusively_owned_by_current_thread_4IOHID( + dispatch_queue_t dq) // rdar://problem/18033810 +{ + if (dq->dq_width != 1) { + DISPATCH_CLIENT_CRASH(dq->dq_width, "Invalid queue type"); + } + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + return _dq_state_drain_locked_by(dq_state, _dispatch_tid_self()); +} +#endif #pragma mark - #pragma mark dispatch_queue_debug @@ -1851,13 +2419,47 @@ _dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf, size_t bufsiz) { size_t offset = 0; dispatch_queue_t target = dq->do_targetq; - offset += dsnprintf(&buf[offset], bufsiz - offset, "target = %s[%p], " - "width = 0x%x, running = 0x%x, barrier = %d ", + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + + offset += dsnprintf(&buf[offset], bufsiz - offset, + "target = %s[%p], width = 0x%x, state = 0x%016llx", target && target->dq_label ? target->dq_label : "", target, - dq->dq_width / 2, dq->dq_running / 2, dq->dq_running & 1); - if (dq->dq_is_thread_bound) { + dq->dq_width, (unsigned long long)dq_state); + if (_dq_state_is_suspended(dq_state)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", suspended = %d", + _dq_state_suspend_cnt(dq_state)); + } + if (_dq_state_is_inactive(dq_state)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", inactive"); + } else if (_dq_state_needs_activation(dq_state)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", needs-activation"); + } + if (_dq_state_is_enqueued(dq_state)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", enqueued"); + } + if (_dq_state_is_dirty(dq_state)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", dirty"); + } + if (_dq_state_has_override(dq_state)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", async-override"); + } + mach_port_t owner = _dq_state_drain_owner(dq_state); + if (!_dispatch_queue_is_thread_bound(dq) && owner) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", draining on 0x%x", + owner); + } + if (_dq_state_is_in_barrier(dq_state)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", in-barrier"); + } else { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", in-flight = %d", + _dq_state_used_width(dq_state, dq->dq_width)); + } + if (_dq_state_has_pending_barrier(dq_state)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", pending-barrier"); + } + if (_dispatch_queue_is_thread_bound(dq)) { offset += dsnprintf(&buf[offset], bufsiz - offset, ", thread = 0x%x ", - _dispatch_queue_get_bound_thread(dq)); + owner); } return offset; } @@ -1915,6 +2517,90 @@ _dispatch_queue_merge_stats(uint64_t start) } #endif +#pragma mark - +#pragma mark _dispatch_set_priority_and_mach_voucher +#if HAVE_PTHREAD_WORKQUEUE_QOS + +DISPATCH_NOINLINE +void +_dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pp, + mach_voucher_t kv) +{ + _pthread_set_flags_t pflags = 0; + if (pp && _dispatch_set_qos_class_enabled) { + pthread_priority_t old_pri = _dispatch_get_priority(); + if (pp != old_pri) { + if (old_pri & _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG) { + pflags |= _PTHREAD_SET_SELF_WQ_KEVENT_UNBIND; + // when we unbind, overcomitness can flip, so we need to learn + // it from the defaultpri, see _dispatch_priority_compute_update + pp |= (_dispatch_get_defaultpriority() & + _PTHREAD_PRIORITY_OVERCOMMIT_FLAG); + } else { + // else we need to keep the one that is set in the current pri + pp |= (old_pri & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG); + } + if (likely(old_pri & ~_PTHREAD_PRIORITY_FLAGS_MASK)) { + pflags |= _PTHREAD_SET_SELF_QOS_FLAG; + } + if (unlikely(DISPATCH_QUEUE_DRAIN_OWNER(&_dispatch_mgr_q) == + _dispatch_tid_self())) { + DISPATCH_INTERNAL_CRASH(pp, + "Changing the QoS while on the manager queue"); + } + if (unlikely(pp & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG)) { + DISPATCH_INTERNAL_CRASH(pp, "Cannot raise oneself to manager"); + } + if (old_pri & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG) { + DISPATCH_INTERNAL_CRASH(old_pri, + "Cannot turn a manager thread into a normal one"); + } + } + } + if (kv != VOUCHER_NO_MACH_VOUCHER) { +#if VOUCHER_USE_MACH_VOUCHER + pflags |= _PTHREAD_SET_SELF_VOUCHER_FLAG; +#endif + } + if (!pflags) return; + int r = _pthread_set_properties_self(pflags, pp, kv); + if (r == EINVAL) { + DISPATCH_INTERNAL_CRASH(pp, "_pthread_set_properties_self failed"); + } + (void)dispatch_assume_zero(r); +} + +DISPATCH_NOINLINE +voucher_t +_dispatch_set_priority_and_voucher_slow(pthread_priority_t priority, + voucher_t v, _dispatch_thread_set_self_t flags) +{ + voucher_t ov = DISPATCH_NO_VOUCHER; + mach_voucher_t kv = VOUCHER_NO_MACH_VOUCHER; + if (v != DISPATCH_NO_VOUCHER) { + bool retained = flags & DISPATCH_VOUCHER_CONSUME; + ov = _voucher_get(); + if (ov == v && (flags & DISPATCH_VOUCHER_REPLACE)) { + if (retained && v) _voucher_release_no_dispose(v); + ov = DISPATCH_NO_VOUCHER; + } else { + if (!retained && v) _voucher_retain(v); + kv = _voucher_swap_and_get_mach_voucher(ov, v); + } + } +#if !PTHREAD_WORKQUEUE_RESETS_VOUCHER_AND_PRIORITY_ON_PARK + flags &= ~(_dispatch_thread_set_self_t)DISPATCH_THREAD_PARK; +#endif + if (!(flags & DISPATCH_THREAD_PARK)) { + _dispatch_set_priority_and_mach_voucher_slow(priority, kv); + } + if (ov != DISPATCH_NO_VOUCHER && (flags & DISPATCH_VOUCHER_REPLACE)) { + if (ov) _voucher_release(ov); + ov = DISPATCH_NO_VOUCHER; + } + return ov; +} +#endif #pragma mark - #pragma mark dispatch_continuation_t @@ -1941,9 +2627,7 @@ _dispatch_cache_cleanup(void *value) } } -#if DISPATCH_USE_MEMORYSTATUS_SOURCE -int _dispatch_continuation_cache_limit = DISPATCH_CONTINUATION_CACHE_LIMIT; - +#if DISPATCH_USE_MEMORYPRESSURE_SOURCE DISPATCH_NOINLINE void _dispatch_continuation_free_to_cache_limit(dispatch_continuation_t dc) @@ -1966,24 +2650,55 @@ _dispatch_continuation_free_to_cache_limit(dispatch_continuation_t dc) DISPATCH_ALWAYS_INLINE_NDEBUG static inline void -_dispatch_continuation_redirect(dispatch_queue_t dq, dispatch_object_t dou) +_dispatch_continuation_slow_item_signal(dispatch_queue_t dq, + dispatch_object_t dou) { dispatch_continuation_t dc = dou._dc; + pthread_priority_t pp = dq->dq_override; - (void)dispatch_atomic_add2o(dq, dq_running, 2, acquire); - if (!DISPATCH_OBJ_IS_VTABLE(dc) && - (long)dc->do_vtable & DISPATCH_OBJ_SYNC_SLOW_BIT) { - _dispatch_trace_continuation_pop(dq, dou); - _dispatch_wqthread_override_start((mach_port_t)dc->dc_data, - _dispatch_queue_get_override_priority(dq)); - _dispatch_thread_semaphore_signal( - (_dispatch_thread_semaphore_t)dc->dc_other); - _dispatch_introspection_queue_item_complete(dou); - } else { - _dispatch_async_f_redirect(dq, dc, - _dispatch_queue_get_override_priority(dq)); + _dispatch_trace_continuation_pop(dq, dc); + if (pp > (dc->dc_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { + _dispatch_wqthread_override_start((mach_port_t)dc->dc_data, pp); } - _dispatch_perfmon_workitem_inc(); + _dispatch_thread_event_signal((dispatch_thread_event_t)dc->dc_other); + _dispatch_introspection_queue_item_complete(dc); +} + +DISPATCH_NOINLINE +static void +_dispatch_continuation_push(dispatch_queue_t dq, dispatch_continuation_t dc) +{ + _dispatch_queue_push(dq, dc, + _dispatch_continuation_get_override_priority(dq, dc)); +} + +DISPATCH_NOINLINE +static void +_dispatch_continuation_push_sync_slow(dispatch_queue_t dq, + dispatch_continuation_t dc) +{ + _dispatch_queue_push_inline(dq, dc, + _dispatch_continuation_get_override_priority(dq, dc), + DISPATCH_WAKEUP_SLOW_WAITER); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_continuation_async2(dispatch_queue_t dq, dispatch_continuation_t dc, + bool barrier) +{ + if (fastpath(barrier || !DISPATCH_QUEUE_USES_REDIRECTION(dq->dq_width))) { + return _dispatch_continuation_push(dq, dc); + } + return _dispatch_async_f2(dq, dc); +} + +DISPATCH_NOINLINE +void +_dispatch_continuation_async(dispatch_queue_t dq, dispatch_continuation_t dc) +{ + _dispatch_continuation_async2(dq, dc, + dc->dc_flags & DISPATCH_OBJ_BARRIER_BIT); } #pragma mark - @@ -2016,18 +2731,20 @@ _dispatch_block_create_with_voucher_and_priority(dispatch_block_flags_t flags, voucher_t voucher, pthread_priority_t pri, dispatch_block_t block) { flags = _dispatch_block_normalize_flags(flags); - voucher_t cv = NULL; bool assign = (flags & DISPATCH_BLOCK_ASSIGN_CURRENT); + if (assign && !(flags & DISPATCH_BLOCK_HAS_VOUCHER)) { - voucher = cv = voucher_copy(); + voucher = VOUCHER_CURRENT; flags |= DISPATCH_BLOCK_HAS_VOUCHER; } + if (voucher == VOUCHER_CURRENT) { + voucher = _voucher_get(); + } if (assign && !(flags & DISPATCH_BLOCK_HAS_PRIORITY)) { pri = _dispatch_priority_propagate(); flags |= DISPATCH_BLOCK_HAS_PRIORITY; } dispatch_block_t db = _dispatch_block_create(flags, voucher, pri, block); - if (cv) _voucher_release(cv); #if DISPATCH_DEBUG dispatch_assert(_dispatch_block_get_data(db)); #endif @@ -2037,7 +2754,7 @@ _dispatch_block_create_with_voucher_and_priority(dispatch_block_flags_t flags, dispatch_block_t dispatch_block_create(dispatch_block_flags_t flags, dispatch_block_t block) { - if (!_dispatch_block_flags_valid(flags)) return NULL; + if (!_dispatch_block_flags_valid(flags)) return DISPATCH_BAD_INPUT; return _dispatch_block_create_with_voucher_and_priority(flags, NULL, 0, block); } @@ -2047,8 +2764,10 @@ dispatch_block_create_with_qos_class(dispatch_block_flags_t flags, dispatch_qos_class_t qos_class, int relative_priority, dispatch_block_t block) { - if (!_dispatch_block_flags_valid(flags)) return NULL; - if (!_dispatch_qos_class_valid(qos_class, relative_priority)) return NULL; + if (!_dispatch_block_flags_valid(flags) || + !_dispatch_qos_class_valid(qos_class, relative_priority)) { + return DISPATCH_BAD_INPUT; + } flags |= DISPATCH_BLOCK_HAS_PRIORITY; pthread_priority_t pri = 0; #if HAVE_PTHREAD_WORKQUEUE_QOS @@ -2062,7 +2781,7 @@ dispatch_block_t dispatch_block_create_with_voucher(dispatch_block_flags_t flags, voucher_t voucher, dispatch_block_t block) { - if (!_dispatch_block_flags_valid(flags)) return NULL; + if (!_dispatch_block_flags_valid(flags)) return DISPATCH_BAD_INPUT; flags |= DISPATCH_BLOCK_HAS_VOUCHER; return _dispatch_block_create_with_voucher_and_priority(flags, voucher, 0, block); @@ -2073,8 +2792,10 @@ dispatch_block_create_with_voucher_and_qos_class(dispatch_block_flags_t flags, voucher_t voucher, dispatch_qos_class_t qos_class, int relative_priority, dispatch_block_t block) { - if (!_dispatch_block_flags_valid(flags)) return NULL; - if (!_dispatch_qos_class_valid(qos_class, relative_priority)) return NULL; + if (!_dispatch_block_flags_valid(flags) || + !_dispatch_qos_class_valid(qos_class, relative_priority)) { + return DISPATCH_BAD_INPUT; + } flags |= (DISPATCH_BLOCK_HAS_VOUCHER|DISPATCH_BLOCK_HAS_PRIORITY); pthread_priority_t pri = 0; #if HAVE_PTHREAD_WORKQUEUE_QOS @@ -2088,57 +2809,56 @@ void dispatch_block_perform(dispatch_block_flags_t flags, dispatch_block_t block) { if (!_dispatch_block_flags_valid(flags)) { - DISPATCH_CLIENT_CRASH("Invalid flags passed to " + DISPATCH_CLIENT_CRASH(flags, "Invalid flags passed to " "dispatch_block_perform()"); } flags = _dispatch_block_normalize_flags(flags); struct dispatch_block_private_data_s dbpds = DISPATCH_BLOCK_PRIVATE_DATA_PERFORM_INITIALIZER(flags, block); - return _dispatch_block_invoke(&dbpds); + return _dispatch_block_invoke_direct(&dbpds); } #define _dbpd_group(dbpd) ((dbpd)->dbpd_group) void -_dispatch_block_invoke(const struct dispatch_block_private_data_s *dbcpd) +_dispatch_block_invoke_direct(const struct dispatch_block_private_data_s *dbcpd) { dispatch_block_private_data_t dbpd = (dispatch_block_private_data_t)dbcpd; dispatch_block_flags_t flags = dbpd->dbpd_flags; unsigned int atomic_flags = dbpd->dbpd_atomic_flags; if (slowpath(atomic_flags & DBF_WAITED)) { - DISPATCH_CLIENT_CRASH("A block object may not be both run more " - "than once and waited for"); + DISPATCH_CLIENT_CRASH(atomic_flags, "A block object may not be both " + "run more than once and waited for"); } if (atomic_flags & DBF_CANCELED) goto out; pthread_priority_t op = DISPATCH_NO_PRIORITY, p = DISPATCH_NO_PRIORITY; - unsigned long override = DISPATCH_VOUCHER_IGNORE_QUEUE_OVERRIDE; + _dispatch_thread_set_self_t adopt_flags = 0; if (flags & DISPATCH_BLOCK_HAS_PRIORITY) { op = _dispatch_get_priority(); p = dbpd->dbpd_priority; - override |= (flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS) || - !(flags & DISPATCH_BLOCK_INHERIT_QOS_CLASS) ? - DISPATCH_PRIORITY_ENFORCE : 0; + if (_dispatch_block_sync_should_enforce_qos_class(flags)) { + adopt_flags |= DISPATCH_PRIORITY_ENFORCE; + } } voucher_t ov, v = DISPATCH_NO_VOUCHER; if (flags & DISPATCH_BLOCK_HAS_VOUCHER) { v = dbpd->dbpd_voucher; - if (v) _voucher_retain(v); } - ov = _dispatch_adopt_priority_and_voucher(p, v, override); - dbpd->dbpd_thread = _dispatch_thread_port(); + ov = _dispatch_adopt_priority_and_set_voucher(p, v, adopt_flags); + dbpd->dbpd_thread = _dispatch_tid_self(); _dispatch_client_callout(dbpd->dbpd_block, _dispatch_Block_invoke(dbpd->dbpd_block)); _dispatch_reset_priority_and_voucher(op, ov); out: if ((atomic_flags & DBF_PERFORM) == 0) { - if (dispatch_atomic_inc2o(dbpd, dbpd_performed, acquire) == 1) { + if (os_atomic_inc2o(dbpd, dbpd_performed, relaxed) == 1) { dispatch_group_leave(_dbpd_group(dbpd)); } } } -static void +void _dispatch_block_sync_invoke(void *block) { dispatch_block_t b = block; @@ -2146,92 +2866,92 @@ _dispatch_block_sync_invoke(void *block) dispatch_block_flags_t flags = dbpd->dbpd_flags; unsigned int atomic_flags = dbpd->dbpd_atomic_flags; if (slowpath(atomic_flags & DBF_WAITED)) { - DISPATCH_CLIENT_CRASH("A block object may not be both run more " - "than once and waited for"); + DISPATCH_CLIENT_CRASH(atomic_flags, "A block object may not be both " + "run more than once and waited for"); } if (atomic_flags & DBF_CANCELED) goto out; pthread_priority_t op = DISPATCH_NO_PRIORITY, p = DISPATCH_NO_PRIORITY; - unsigned long override = 0; + _dispatch_thread_set_self_t adopt_flags = 0; if (flags & DISPATCH_BLOCK_HAS_PRIORITY) { op = _dispatch_get_priority(); p = dbpd->dbpd_priority; - override = (flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS) || - !(flags & DISPATCH_BLOCK_INHERIT_QOS_CLASS) ? - DISPATCH_PRIORITY_ENFORCE : 0; + if (_dispatch_block_sync_should_enforce_qos_class(flags)) { + adopt_flags |= DISPATCH_PRIORITY_ENFORCE; + } } voucher_t ov, v = DISPATCH_NO_VOUCHER; if (flags & DISPATCH_BLOCK_HAS_VOUCHER) { v = dbpd->dbpd_voucher; - if (v) _voucher_retain(v); } - ov = _dispatch_adopt_priority_and_voucher(p, v, override); + ov = _dispatch_adopt_priority_and_set_voucher(p, v, adopt_flags); dbpd->dbpd_block(); _dispatch_reset_priority_and_voucher(op, ov); out: if ((atomic_flags & DBF_PERFORM) == 0) { - if (dispatch_atomic_inc2o(dbpd, dbpd_performed, acquire) == 1) { + if (os_atomic_inc2o(dbpd, dbpd_performed, relaxed) == 1) { dispatch_group_leave(_dbpd_group(dbpd)); } } - dispatch_queue_t dq = _dispatch_queue_get_current(); - if (dispatch_atomic_cmpxchg2o(dbpd, dbpd_queue, dq, NULL, acquire)) { + os_mpsc_queue_t oq; + oq = os_atomic_xchg2o(dbpd, dbpd_queue, NULL, relaxed); + if (oq) { // balances dispatch_{,barrier_,}sync - _dispatch_release(dq); + _os_object_release_internal(oq->_as_os_obj); } } +DISPATCH_ALWAYS_INLINE static void -_dispatch_block_async_invoke_and_release(void *block) +_dispatch_block_async_invoke2(dispatch_block_t b, bool release) { - dispatch_block_t b = block; dispatch_block_private_data_t dbpd = _dispatch_block_get_data(b); - dispatch_block_flags_t flags = dbpd->dbpd_flags; unsigned int atomic_flags = dbpd->dbpd_atomic_flags; if (slowpath(atomic_flags & DBF_WAITED)) { - DISPATCH_CLIENT_CRASH("A block object may not be both run more " - "than once and waited for"); - } - if (atomic_flags & DBF_CANCELED) goto out; - - pthread_priority_t p = DISPATCH_NO_PRIORITY; - unsigned long override = 0; - if (flags & DISPATCH_BLOCK_HAS_PRIORITY) { - override = (flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS) ? - DISPATCH_PRIORITY_ENFORCE : 0; - p = dbpd->dbpd_priority; + DISPATCH_CLIENT_CRASH(atomic_flags, "A block object may not be both " + "run more than once and waited for"); } - voucher_t v = DISPATCH_NO_VOUCHER; - if (flags & DISPATCH_BLOCK_HAS_VOUCHER) { - v = dbpd->dbpd_voucher; - if (v) _voucher_retain(v); + if (!slowpath(atomic_flags & DBF_CANCELED)) { + dbpd->dbpd_block(); } - _dispatch_adopt_priority_and_replace_voucher(p, v, override); - dbpd->dbpd_block(); -out: if ((atomic_flags & DBF_PERFORM) == 0) { - if (dispatch_atomic_inc2o(dbpd, dbpd_performed, acquire) == 1) { + if (os_atomic_inc2o(dbpd, dbpd_performed, relaxed) == 1) { dispatch_group_leave(_dbpd_group(dbpd)); } } - dispatch_queue_t dq = _dispatch_queue_get_current(); - if (dispatch_atomic_cmpxchg2o(dbpd, dbpd_queue, dq, NULL, acquire)) { + os_mpsc_queue_t oq; + oq = os_atomic_xchg2o(dbpd, dbpd_queue, NULL, relaxed); + if (oq) { // balances dispatch_{,barrier_,group_}async - _dispatch_release(dq); + _os_object_release_internal_inline(oq->_as_os_obj); + } + if (release) { + Block_release(b); } - Block_release(b); } -void -dispatch_block_cancel(dispatch_block_t db) +static void +_dispatch_block_async_invoke(void *block) +{ + _dispatch_block_async_invoke2(block, false); +} + +static void +_dispatch_block_async_invoke_and_release(void *block) +{ + _dispatch_block_async_invoke2(block, true); +} + +void +dispatch_block_cancel(dispatch_block_t db) { dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); if (!dbpd) { - DISPATCH_CLIENT_CRASH("Invalid block object passed to " + DISPATCH_CLIENT_CRASH(db, "Invalid block object passed to " "dispatch_block_cancel()"); } - (void)dispatch_atomic_or2o(dbpd, dbpd_atomic_flags, DBF_CANCELED, relaxed); + (void)os_atomic_or2o(dbpd, dbpd_atomic_flags, DBF_CANCELED, relaxed); } long @@ -2239,7 +2959,7 @@ dispatch_block_testcancel(dispatch_block_t db) { dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); if (!dbpd) { - DISPATCH_CLIENT_CRASH("Invalid block object passed to " + DISPATCH_CLIENT_CRASH(db, "Invalid block object passed to " "dispatch_block_testcancel()"); } return (bool)(dbpd->dbpd_atomic_flags & DBF_CANCELED); @@ -2250,14 +2970,14 @@ dispatch_block_wait(dispatch_block_t db, dispatch_time_t timeout) { dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); if (!dbpd) { - DISPATCH_CLIENT_CRASH("Invalid block object passed to " + DISPATCH_CLIENT_CRASH(db, "Invalid block object passed to " "dispatch_block_wait()"); } - unsigned int flags = dispatch_atomic_or_orig2o(dbpd, dbpd_atomic_flags, + unsigned int flags = os_atomic_or_orig2o(dbpd, dbpd_atomic_flags, DBF_WAITING, relaxed); if (slowpath(flags & (DBF_WAITED | DBF_WAITING))) { - DISPATCH_CLIENT_CRASH("A block object may not be waited for " + DISPATCH_CLIENT_CRASH(flags, "A block object may not be waited for " "more than once"); } @@ -2267,41 +2987,42 @@ dispatch_block_wait(dispatch_block_t db, dispatch_time_t timeout) pthread_priority_t pp = _dispatch_get_priority(); - dispatch_queue_t boost_dq; - boost_dq = dispatch_atomic_xchg2o(dbpd, dbpd_queue, NULL, acquire); - if (boost_dq) { + os_mpsc_queue_t boost_oq; + boost_oq = os_atomic_xchg2o(dbpd, dbpd_queue, NULL, relaxed); + if (boost_oq) { // release balances dispatch_{,barrier_,group_}async. // Can't put the queue back in the timeout case: the block might // finish after we fell out of group_wait and see our NULL, so // neither of us would ever release. Side effect: After a _wait // that times out, subsequent waits will not boost the qos of the // still-running block. - _dispatch_queue_wakeup_with_qos_and_release(boost_dq, pp); + dx_wakeup(boost_oq, pp, DISPATCH_WAKEUP_OVERRIDING | + DISPATCH_WAKEUP_CONSUME); } mach_port_t boost_th = dbpd->dbpd_thread; if (boost_th) { - _dispatch_thread_override_start(boost_th, pp); + _dispatch_thread_override_start(boost_th, pp, dbpd); } - int performed = dispatch_atomic_load2o(dbpd, dbpd_performed, relaxed); - if (slowpath(performed > 1 || (boost_th && boost_dq))) { - DISPATCH_CLIENT_CRASH("A block object may not be both run more " - "than once and waited for"); + int performed = os_atomic_load2o(dbpd, dbpd_performed, relaxed); + if (slowpath(performed > 1 || (boost_th && boost_oq))) { + DISPATCH_CLIENT_CRASH(performed, "A block object may not be both " + "run more than once and waited for"); } long ret = dispatch_group_wait(_dbpd_group(dbpd), timeout); if (boost_th) { - _dispatch_thread_override_end(boost_th); + _dispatch_thread_override_end(boost_th, dbpd); } if (ret) { // timed out: reverse our changes - (void)dispatch_atomic_and2o(dbpd, dbpd_atomic_flags, + (void)os_atomic_and2o(dbpd, dbpd_atomic_flags, ~DBF_WAITING, relaxed); } else { - (void)dispatch_atomic_or2o(dbpd, dbpd_atomic_flags, + (void)os_atomic_or2o(dbpd, dbpd_atomic_flags, DBF_WAITED, relaxed); // don't need to re-test here: the second call would see // the first call's WAITING @@ -2316,18 +3037,81 @@ dispatch_block_notify(dispatch_block_t db, dispatch_queue_t queue, { dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); if (!dbpd) { - DISPATCH_CLIENT_CRASH("Invalid block object passed to " + DISPATCH_CLIENT_CRASH(db, "Invalid block object passed to " "dispatch_block_notify()"); } - int performed = dispatch_atomic_load2o(dbpd, dbpd_performed, relaxed); + int performed = os_atomic_load2o(dbpd, dbpd_performed, relaxed); if (slowpath(performed > 1)) { - DISPATCH_CLIENT_CRASH("A block object may not be both run more " - "than once and observed"); + DISPATCH_CLIENT_CRASH(performed, "A block object may not be both " + "run more than once and observed"); } return dispatch_group_notify(_dbpd_group(dbpd), queue, notification_block); } +DISPATCH_NOINLINE +void +_dispatch_continuation_init_slow(dispatch_continuation_t dc, + dispatch_queue_class_t dqu, dispatch_block_flags_t flags) +{ + dispatch_block_private_data_t dbpd = _dispatch_block_get_data(dc->dc_ctxt); + dispatch_block_flags_t block_flags = dbpd->dbpd_flags; + uintptr_t dc_flags = dc->dc_flags; + os_mpsc_queue_t oq = dqu._oq; + + // balanced in d_block_async_invoke_and_release or d_block_wait + if (os_atomic_cmpxchg2o(dbpd, dbpd_queue, NULL, oq, relaxed)) { + _os_object_retain_internal_inline(oq->_as_os_obj); + } + + if (dc_flags & DISPATCH_OBJ_CONSUME_BIT) { + dc->dc_func = _dispatch_block_async_invoke_and_release; + } else { + dc->dc_func = _dispatch_block_async_invoke; + } + + flags |= block_flags; + if (block_flags & DISPATCH_BLOCK_HAS_PRIORITY) { + _dispatch_continuation_priority_set(dc, dbpd->dbpd_priority, flags); + } else { + _dispatch_continuation_priority_set(dc, dc->dc_priority, flags); + } + if (block_flags & DISPATCH_BLOCK_BARRIER) { + dc_flags |= DISPATCH_OBJ_BARRIER_BIT; + } + if (block_flags & DISPATCH_BLOCK_HAS_VOUCHER) { + voucher_t v = dbpd->dbpd_voucher; + dc->dc_voucher = v ? _voucher_retain(v) : NULL; + dc_flags |= DISPATCH_OBJ_ENFORCE_VOUCHER; + _dispatch_voucher_debug("continuation[%p] set", dc->dc_voucher, dc); + _dispatch_voucher_ktrace_dc_push(dc); + } else { + _dispatch_continuation_voucher_set(dc, oq, flags); + } + dc_flags |= DISPATCH_OBJ_BLOCK_PRIVATE_DATA_BIT; + dc->dc_flags = dc_flags; +} + +void +_dispatch_continuation_update_bits(dispatch_continuation_t dc, + uintptr_t dc_flags) +{ + dc->dc_flags = dc_flags; + if (dc_flags & DISPATCH_OBJ_CONSUME_BIT) { + if (dc_flags & DISPATCH_OBJ_BLOCK_PRIVATE_DATA_BIT) { + dc->dc_func = _dispatch_block_async_invoke_and_release; + } else if (dc_flags & DISPATCH_OBJ_BLOCK_BIT) { + dc->dc_func = _dispatch_call_block_and_release; + } + } else { + if (dc_flags & DISPATCH_OBJ_BLOCK_PRIVATE_DATA_BIT) { + dc->dc_func = _dispatch_block_async_invoke; + } else if (dc_flags & DISPATCH_OBJ_BLOCK_BIT) { + dc->dc_func = _dispatch_Block_invoke(dc->dc_ctxt); + } + } +} + #endif // __BLOCKS__ #pragma mark - @@ -2335,21 +3119,13 @@ dispatch_block_notify(dispatch_block_t db, dispatch_queue_t queue, DISPATCH_NOINLINE static void -_dispatch_barrier_async_f_slow(dispatch_queue_t dq, void *ctxt, +_dispatch_async_f_slow(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, pthread_priority_t pp, - dispatch_block_flags_t flags) + dispatch_block_flags_t flags, uintptr_t dc_flags) { dispatch_continuation_t dc = _dispatch_continuation_alloc_from_heap(); - - dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_BARRIER_BIT); - dc->dc_func = func; - dc->dc_ctxt = ctxt; - _dispatch_continuation_voucher_set(dc, flags); - _dispatch_continuation_priority_set(dc, pp, flags); - - pp = _dispatch_continuation_get_override_priority(dq, dc); - - _dispatch_queue_push(dq, dc, pp); + _dispatch_continuation_init_f(dc, dq, ctxt, func, pp, flags, dc_flags); + _dispatch_continuation_async(dq, dc); } DISPATCH_ALWAYS_INLINE @@ -2358,31 +3134,15 @@ _dispatch_barrier_async_f2(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, pthread_priority_t pp, dispatch_block_flags_t flags) { - dispatch_continuation_t dc; + dispatch_continuation_t dc = _dispatch_continuation_alloc_cacheonly(); + uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_BARRIER_BIT; - dc = fastpath(_dispatch_continuation_alloc_cacheonly()); - if (!dc) { - return _dispatch_barrier_async_f_slow(dq, ctxt, func, pp, flags); + if (!fastpath(dc)) { + return _dispatch_async_f_slow(dq, ctxt, func, pp, flags, dc_flags); } - dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_BARRIER_BIT); - dc->dc_func = func; - dc->dc_ctxt = ctxt; - _dispatch_continuation_voucher_set(dc, flags); - _dispatch_continuation_priority_set(dc, pp, flags); - - pp = _dispatch_continuation_get_override_priority(dq, dc); - - _dispatch_queue_push(dq, dc, pp); -} - -DISPATCH_NOINLINE -static void -_dispatch_barrier_async_f(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, pthread_priority_t pp, - dispatch_block_flags_t flags) -{ - return _dispatch_barrier_async_f2(dq, ctxt, func, pp, flags); + _dispatch_continuation_init_f(dc, dq, ctxt, func, pp, flags, dc_flags); + _dispatch_continuation_push(dq, dc); } DISPATCH_NOINLINE @@ -2390,7 +3150,7 @@ void dispatch_barrier_async_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { - return _dispatch_barrier_async_f2(dq, ctxt, func, 0, 0); + _dispatch_barrier_async_f2(dq, ctxt, func, 0, 0); } DISPATCH_NOINLINE @@ -2398,28 +3158,24 @@ void _dispatch_barrier_async_detached_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { - return _dispatch_barrier_async_f2(dq, ctxt, func, 0, - DISPATCH_BLOCK_NO_QOS_CLASS|DISPATCH_BLOCK_NO_VOUCHER); + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + dc->dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_BARRIER_BIT; + dc->dc_func = func; + dc->dc_ctxt = ctxt; + dc->dc_voucher = DISPATCH_NO_VOUCHER; + dc->dc_priority = DISPATCH_NO_PRIORITY; + _dispatch_queue_push(dq, dc, 0); } #ifdef __BLOCKS__ void dispatch_barrier_async(dispatch_queue_t dq, void (^work)(void)) { - dispatch_function_t func = _dispatch_call_block_and_release; - pthread_priority_t pp = 0; - dispatch_block_flags_t flags = 0; - if (slowpath(_dispatch_block_has_private_data(work))) { - func = _dispatch_block_async_invoke_and_release; - pp = _dispatch_block_get_priority(work); - flags = _dispatch_block_get_flags(work); - // balanced in d_block_async_invoke_and_release or d_block_wait - if (dispatch_atomic_cmpxchg2o(_dispatch_block_get_data(work), - dbpd_queue, NULL, dq, release)) { - _dispatch_retain(dq); - } - } - _dispatch_barrier_async_f(dq, _dispatch_Block_copy(work), func, pp, flags); + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_BARRIER_BIT; + + _dispatch_continuation_init(dc, dq, work, 0, 0, dc_flags); + _dispatch_continuation_push(dq, dc); } #endif @@ -2427,130 +3183,144 @@ dispatch_barrier_async(dispatch_queue_t dq, void (^work)(void)) #pragma mark dispatch_async void -_dispatch_async_redirect_invoke(void *ctxt) +_dispatch_async_redirect_invoke(dispatch_continuation_t dc, + dispatch_invoke_flags_t flags) { - struct dispatch_continuation_s *dc = ctxt; + dispatch_thread_frame_s dtf; struct dispatch_continuation_s *other_dc = dc->dc_other; - dispatch_queue_t old_dq, dq = dc->dc_data, rq; + dispatch_invoke_flags_t ctxt_flags = (dispatch_invoke_flags_t)dc->dc_ctxt; + // if we went through _dispatch_root_queue_push_override, + // the "right" root queue was stuffed into dc_func + dispatch_queue_t assumed_rq = (dispatch_queue_t)dc->dc_func; + dispatch_queue_t dq = dc->dc_data, rq, old_dq; + struct _dispatch_identity_s di; + + pthread_priority_t op, dp, old_dp; + + if (ctxt_flags) { + flags &= ~_DISPATCH_INVOKE_AUTORELEASE_MASK; + flags |= ctxt_flags; + } + old_dq = _dispatch_get_current_queue(); + if (assumed_rq) { + _dispatch_queue_set_current(assumed_rq); + _dispatch_root_queue_identity_assume(&di, 0); + } + + old_dp = _dispatch_set_defaultpriority(dq->dq_priority, &dp); + op = dq->dq_override; + if (op > (dp & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { + _dispatch_wqthread_override_start(_dispatch_tid_self(), op); + // Ensure that the root queue sees that this thread was overridden. + _dispatch_set_defaultpriority_override(); + } - old_dq = _dispatch_thread_getspecific(dispatch_queue_key); - _dispatch_thread_setspecific(dispatch_queue_key, dq); - pthread_priority_t old_dp = _dispatch_set_defaultpriority(dq->dq_priority); - _dispatch_continuation_pop(other_dc); + _dispatch_thread_frame_push(&dtf, dq); + _dispatch_continuation_pop_forwarded(dc, DISPATCH_NO_VOUCHER, + DISPATCH_OBJ_CONSUME_BIT, { + _dispatch_continuation_pop(other_dc, dq, flags); + }); + _dispatch_thread_frame_pop(&dtf); + if (assumed_rq) { + _dispatch_root_queue_identity_restore(&di); + _dispatch_queue_set_current(old_dq); + } _dispatch_reset_defaultpriority(old_dp); - _dispatch_thread_setspecific(dispatch_queue_key, old_dq); rq = dq->do_targetq; while (slowpath(rq->do_targetq) && rq != old_dq) { - if (dispatch_atomic_sub2o(rq, dq_running, 2, relaxed) == 0) { - _dispatch_queue_wakeup(rq); - } + _dispatch_non_barrier_complete(rq); rq = rq->do_targetq; } - if (dispatch_atomic_sub2o(dq, dq_running, 2, relaxed) == 0) { - _dispatch_queue_wakeup(dq); - } - _dispatch_release(dq); -} - -static inline void -_dispatch_async_f_redirect2(dispatch_queue_t dq, dispatch_continuation_t dc, - pthread_priority_t pp) -{ - uint32_t running = 2; + _dispatch_non_barrier_complete(dq); - // Find the queue to redirect to - do { - if (slowpath(dq->dq_items_tail) || - slowpath(DISPATCH_OBJECT_SUSPENDED(dq)) || - slowpath(dq->dq_width == 1)) { - break; - } - running = dispatch_atomic_add2o(dq, dq_running, 2, relaxed); - if (slowpath(running & 1) || slowpath(running > dq->dq_width)) { - running = dispatch_atomic_sub2o(dq, dq_running, 2, relaxed); - break; - } - dq = dq->do_targetq; - } while (slowpath(dq->do_targetq)); + if (dtf.dtf_deferred) { + struct dispatch_object_s *dou = dtf.dtf_deferred; + return _dispatch_queue_drain_deferred_invoke(dq, flags, 0, dou); + } - _dispatch_queue_push_wakeup(dq, dc, pp, running == 0); + _dispatch_release_tailcall(dq); } -DISPATCH_NOINLINE -static void -_dispatch_async_f_redirect(dispatch_queue_t dq, - dispatch_continuation_t other_dc, pthread_priority_t pp) +DISPATCH_ALWAYS_INLINE +static inline dispatch_continuation_t +_dispatch_async_redirect_wrap(dispatch_queue_t dq, dispatch_object_t dou) { dispatch_continuation_t dc = _dispatch_continuation_alloc(); - dc->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT; - dc->dc_func = _dispatch_async_redirect_invoke; - dc->dc_ctxt = dc; + dou._do->do_next = NULL; + dc->do_vtable = DC_VTABLE(ASYNC_REDIRECT); + dc->dc_func = NULL; + dc->dc_ctxt = (void *)(uintptr_t)_dispatch_queue_autorelease_frequency(dq); dc->dc_data = dq; - dc->dc_other = other_dc; - dc->dc_priority = 0; - dc->dc_voucher = NULL; - + dc->dc_other = dou._do; + dc->dc_voucher = DISPATCH_NO_VOUCHER; + dc->dc_priority = DISPATCH_NO_PRIORITY; _dispatch_retain(dq); - dq = dq->do_targetq; - if (slowpath(dq->do_targetq)) { - return _dispatch_async_f_redirect2(dq, dc, pp); - } - - _dispatch_queue_push(dq, dc, pp); + return dc; } DISPATCH_NOINLINE static void -_dispatch_async_f2(dispatch_queue_t dq, dispatch_continuation_t dc, - pthread_priority_t pp) +_dispatch_async_f_redirect(dispatch_queue_t dq, + dispatch_object_t dou, pthread_priority_t pp) { - uint32_t running = 2; + if (!slowpath(_dispatch_object_is_redirection(dou))) { + dou._dc = _dispatch_async_redirect_wrap(dq, dou); + } + dq = dq->do_targetq; - do { - if (slowpath(dq->dq_items_tail) - || slowpath(DISPATCH_OBJECT_SUSPENDED(dq))) { - break; - } - running = dispatch_atomic_add2o(dq, dq_running, 2, relaxed); - if (slowpath(running > dq->dq_width)) { - running = dispatch_atomic_sub2o(dq, dq_running, 2, relaxed); + // Find the queue to redirect to + while (slowpath(DISPATCH_QUEUE_USES_REDIRECTION(dq->dq_width))) { + if (!fastpath(_dispatch_queue_try_acquire_async(dq))) { break; } - if (!slowpath(running & 1)) { - return _dispatch_async_f_redirect(dq, dc, pp); + if (!dou._dc->dc_ctxt) { + // find first queue in descending target queue order that has + // an autorelease frequency set, and use that as the frequency for + // this continuation. + dou._dc->dc_ctxt = (void *) + (uintptr_t)_dispatch_queue_autorelease_frequency(dq); } - running = dispatch_atomic_sub2o(dq, dq_running, 2, relaxed); - // We might get lucky and find that the barrier has ended by now - } while (!(running & 1)); + dq = dq->do_targetq; + } + + _dispatch_queue_push(dq, dou, pp); +} - _dispatch_queue_push_wakeup(dq, dc, pp, running == 0); +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_continuation_redirect(dispatch_queue_t dq, + struct dispatch_object_s *dc) +{ + _dispatch_trace_continuation_pop(dq, dc); + // This is a re-redirect, overrides have already been applied + // by _dispatch_async_f2. + // However we want to end up on the root queue matching `dc` qos, so pick up + // the current override of `dq` which includes dc's overrde (and maybe more) + _dispatch_async_f_redirect(dq, dc, dq->dq_override); + _dispatch_introspection_queue_item_complete(dc); } DISPATCH_NOINLINE static void -_dispatch_async_f_slow(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, pthread_priority_t pp, - dispatch_block_flags_t flags) +_dispatch_async_f2(dispatch_queue_t dq, dispatch_continuation_t dc) { - dispatch_continuation_t dc = _dispatch_continuation_alloc_from_heap(); - - dc->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT; - dc->dc_func = func; - dc->dc_ctxt = ctxt; - _dispatch_continuation_voucher_set(dc, flags); - _dispatch_continuation_priority_set(dc, pp, flags); - - pp = _dispatch_continuation_get_override_priority(dq, dc); + // reserving non barrier width + // doesn't fail if only the ENQUEUED bit is set (unlike its barrier width + // equivalent), so we have to check that this thread hasn't enqueued + // anything ahead of this call or we can break ordering + if (slowpath(dq->dq_items_tail)) { + return _dispatch_continuation_push(dq, dc); + } - // No fastpath/slowpath hint because we simply don't know - if (dq->do_targetq) { - return _dispatch_async_f2(dq, dc, pp); + if (slowpath(!_dispatch_queue_try_acquire_async(dq))) { + return _dispatch_continuation_push(dq, dc); } - _dispatch_queue_push(dq, dc, pp); + return _dispatch_async_f_redirect(dq, dc, + _dispatch_continuation_get_override_priority(dq, dc)); } DISPATCH_ALWAYS_INLINE @@ -2558,39 +3328,22 @@ static inline void _dispatch_async_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, pthread_priority_t pp, dispatch_block_flags_t flags) { - dispatch_continuation_t dc; - - // No fastpath/slowpath hint because we simply don't know - if (dq->dq_width == 1 || flags & DISPATCH_BLOCK_BARRIER) { - return _dispatch_barrier_async_f(dq, ctxt, func, pp, flags); - } - - dc = fastpath(_dispatch_continuation_alloc_cacheonly()); - if (!dc) { - return _dispatch_async_f_slow(dq, ctxt, func, pp, flags); - } + dispatch_continuation_t dc = _dispatch_continuation_alloc_cacheonly(); + uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; - dc->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT; - dc->dc_func = func; - dc->dc_ctxt = ctxt; - _dispatch_continuation_voucher_set(dc, flags); - _dispatch_continuation_priority_set(dc, pp, flags); - - pp = _dispatch_continuation_get_override_priority(dq, dc); - - // No fastpath/slowpath hint because we simply don't know - if (dq->do_targetq) { - return _dispatch_async_f2(dq, dc, pp); + if (!fastpath(dc)) { + return _dispatch_async_f_slow(dq, ctxt, func, pp, flags, dc_flags); } - _dispatch_queue_push(dq, dc, pp); + _dispatch_continuation_init_f(dc, dq, ctxt, func, pp, flags, dc_flags); + _dispatch_continuation_async2(dq, dc, false); } DISPATCH_NOINLINE void dispatch_async_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { - return _dispatch_async_f(dq, ctxt, func, 0, 0); + _dispatch_async_f(dq, ctxt, func, 0, 0); } DISPATCH_NOINLINE @@ -2598,28 +3351,18 @@ void dispatch_async_enforce_qos_class_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { - return _dispatch_async_f(dq, ctxt, func, 0, - DISPATCH_BLOCK_ENFORCE_QOS_CLASS); + _dispatch_async_f(dq, ctxt, func, 0, DISPATCH_BLOCK_ENFORCE_QOS_CLASS); } #ifdef __BLOCKS__ void dispatch_async(dispatch_queue_t dq, void (^work)(void)) { - dispatch_function_t func = _dispatch_call_block_and_release; - dispatch_block_flags_t flags = 0; - pthread_priority_t pp = 0; - if (slowpath(_dispatch_block_has_private_data(work))) { - func = _dispatch_block_async_invoke_and_release; - pp = _dispatch_block_get_priority(work); - flags = _dispatch_block_get_flags(work); - // balanced in d_block_async_invoke_and_release or d_block_wait - if (dispatch_atomic_cmpxchg2o(_dispatch_block_get_data(work), - dbpd_queue, NULL, dq, release)) { - _dispatch_retain(dq); - } - } - _dispatch_async_f(dq, _dispatch_Block_copy(work), func, pp, flags); + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; + + _dispatch_continuation_init(dc, dq, work, 0, 0, dc_flags); + _dispatch_continuation_async(dq, dc); } #endif @@ -2628,35 +3371,12 @@ dispatch_async(dispatch_queue_t dq, void (^work)(void)) DISPATCH_ALWAYS_INLINE static inline void -_dispatch_group_async_f(dispatch_group_t dg, dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, pthread_priority_t pp, - dispatch_block_flags_t flags) +_dispatch_continuation_group_async(dispatch_group_t dg, dispatch_queue_t dq, + dispatch_continuation_t dc) { - dispatch_continuation_t dc; - - _dispatch_retain(dg); dispatch_group_enter(dg); - - dc = _dispatch_continuation_alloc(); - - unsigned long barrier = (flags & DISPATCH_BLOCK_BARRIER) ? - DISPATCH_OBJ_BARRIER_BIT : 0; - dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_GROUP_BIT | - barrier); - dc->dc_func = func; - dc->dc_ctxt = ctxt; dc->dc_data = dg; - _dispatch_continuation_voucher_set(dc, flags); - _dispatch_continuation_priority_set(dc, pp, flags); - - pp = _dispatch_continuation_get_override_priority(dq, dc); - - // No fastpath/slowpath hint because we simply don't know - if (dq->dq_width != 1 && !barrier && dq->do_targetq) { - return _dispatch_async_f2(dq, dc, pp); - } - - _dispatch_queue_push(dq, dc, pp); + _dispatch_continuation_async(dq, dc); } DISPATCH_NOINLINE @@ -2664,7 +3384,11 @@ void dispatch_group_async_f(dispatch_group_t dg, dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { - return _dispatch_group_async_f(dg, dq, ctxt, func, 0, 0); + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_GROUP_BIT; + + _dispatch_continuation_init_f(dc, dq, ctxt, func, 0, 0, dc_flags); + _dispatch_continuation_group_async(dg, dq, dc); } #ifdef __BLOCKS__ @@ -2672,68 +3396,65 @@ void dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq, dispatch_block_t db) { - dispatch_function_t func = _dispatch_call_block_and_release; - dispatch_block_flags_t flags = 0; - pthread_priority_t pp = 0; - if (slowpath(_dispatch_block_has_private_data(db))) { - func = _dispatch_block_async_invoke_and_release; - pp = _dispatch_block_get_priority(db); - flags = _dispatch_block_get_flags(db); - // balanced in d_block_async_invoke_and_release or d_block_wait - if (dispatch_atomic_cmpxchg2o(_dispatch_block_get_data(db), - dbpd_queue, NULL, dq, release)) { - _dispatch_retain(dq); - } - } - _dispatch_group_async_f(dg, dq, _dispatch_Block_copy(db), func, pp, flags); + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_GROUP_BIT; + + _dispatch_continuation_init(dc, dq, db, 0, 0, dc_flags); + _dispatch_continuation_group_async(dg, dq, dc); } #endif #pragma mark - -#pragma mark dispatch_function_invoke - -static void _dispatch_sync_f(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, pthread_priority_t pp); +#pragma mark dispatch_sync / dispatch_barrier_sync recurse and invoke DISPATCH_NOINLINE static void -_dispatch_function_invoke_slow(dispatch_queue_t dq, void *ctxt, +_dispatch_sync_function_invoke_slow(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { - dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key); - _dispatch_thread_setspecific(dispatch_queue_key, dq); - voucher_t ov = _dispatch_adopt_queue_override_voucher(dq); + voucher_t ov; + dispatch_thread_frame_s dtf; + _dispatch_thread_frame_push(&dtf, dq); + ov = _dispatch_set_priority_and_voucher(0, dq->dq_override_voucher, 0); _dispatch_client_callout(ctxt, func); _dispatch_perfmon_workitem_inc(); - _dispatch_reset_voucher(ov); - _dispatch_thread_setspecific(dispatch_queue_key, old_dq); + _dispatch_reset_voucher(ov, 0); + _dispatch_thread_frame_pop(&dtf); } DISPATCH_ALWAYS_INLINE static inline void -_dispatch_function_invoke(dispatch_queue_t dq, void *ctxt, +_dispatch_sync_function_invoke_inline(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { if (slowpath(dq->dq_override_voucher != DISPATCH_NO_VOUCHER)) { - return _dispatch_function_invoke_slow(dq, ctxt, func); + return _dispatch_sync_function_invoke_slow(dq, ctxt, func); } - dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key); - _dispatch_thread_setspecific(dispatch_queue_key, dq); + dispatch_thread_frame_s dtf; + _dispatch_thread_frame_push(&dtf, dq); _dispatch_client_callout(ctxt, func); _dispatch_perfmon_workitem_inc(); - _dispatch_thread_setspecific(dispatch_queue_key, old_dq); + _dispatch_thread_frame_pop(&dtf); +} + +DISPATCH_NOINLINE +static void +_dispatch_sync_function_invoke(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) +{ + _dispatch_sync_function_invoke_inline(dq, ctxt, func); } void _dispatch_sync_recurse_invoke(void *ctxt) { dispatch_continuation_t dc = ctxt; - _dispatch_function_invoke(dc->dc_data, dc->dc_ctxt, dc->dc_func); + _dispatch_sync_function_invoke(dc->dc_data, dc->dc_ctxt, dc->dc_func); } DISPATCH_ALWAYS_INLINE static inline void -_dispatch_function_recurse(dispatch_queue_t dq, void *ctxt, +_dispatch_sync_function_recurse(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, pthread_priority_t pp) { struct dispatch_continuation_s dc = { @@ -2744,70 +3465,118 @@ _dispatch_function_recurse(dispatch_queue_t dq, void *ctxt, _dispatch_sync_f(dq->do_targetq, &dc, _dispatch_sync_recurse_invoke, pp); } +DISPATCH_NOINLINE +static void +_dispatch_non_barrier_sync_f_invoke(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) +{ + _dispatch_sync_function_invoke_inline(dq, ctxt, func); + _dispatch_non_barrier_complete(dq); +} + +DISPATCH_NOINLINE +static void +_dispatch_non_barrier_sync_f_recurse(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func, pthread_priority_t pp) +{ + _dispatch_sync_function_recurse(dq, ctxt, func, pp); + _dispatch_non_barrier_complete(dq); +} + +DISPATCH_ALWAYS_INLINE +static void +_dispatch_non_barrier_sync_f_invoke_inline(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func, pthread_priority_t pp) +{ + _dispatch_introspection_non_barrier_sync_begin(dq, func); + if (slowpath(dq->do_targetq->do_targetq)) { + return _dispatch_non_barrier_sync_f_recurse(dq, ctxt, func, pp); + } + _dispatch_non_barrier_sync_f_invoke(dq, ctxt, func); +} + #pragma mark - #pragma mark dispatch_barrier_sync -static void _dispatch_sync_f_invoke(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func); - -DISPATCH_ALWAYS_INLINE_NDEBUG -static inline _dispatch_thread_semaphore_t -_dispatch_barrier_sync_f_pop(dispatch_queue_t dq, dispatch_object_t dou, - bool lock) +DISPATCH_NOINLINE +static void +_dispatch_barrier_complete(dispatch_queue_t dq) { - _dispatch_thread_semaphore_t sema; - dispatch_continuation_t dc = dou._dc; - mach_port_t th; + uint64_t owned = DISPATCH_QUEUE_IN_BARRIER + + dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; + + if (slowpath(dq->dq_items_tail)) { + return _dispatch_try_lock_transfer_or_wakeup(dq); + } - if (DISPATCH_OBJ_IS_VTABLE(dc) || ((long)dc->do_vtable & - (DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT)) != - (DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT)) { - return 0; + if (!fastpath(_dispatch_queue_drain_try_unlock(dq, owned))) { + // someone enqueued a slow item at the head + // looping may be its last chance + return _dispatch_try_lock_transfer_or_wakeup(dq); } - _dispatch_trace_continuation_pop(dq, dc); - _dispatch_perfmon_workitem_inc(); +} + +DISPATCH_NOINLINE +static void +_dispatch_barrier_sync_f_recurse(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func, pthread_priority_t pp) +{ + _dispatch_sync_function_recurse(dq, ctxt, func, pp); + _dispatch_barrier_complete(dq); +} + +DISPATCH_NOINLINE +static void +_dispatch_barrier_sync_f_invoke(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) +{ + _dispatch_sync_function_invoke_inline(dq, ctxt, func); + _dispatch_barrier_complete(dq); +} - th = (mach_port_t)dc->dc_data; - dc = dc->dc_ctxt; - dq = dc->dc_data; - sema = (_dispatch_thread_semaphore_t)dc->dc_other; - if (lock) { - (void)dispatch_atomic_add2o(dq, do_suspend_cnt, - DISPATCH_OBJECT_SUSPEND_INTERVAL, relaxed); - // rdar://problem/9032024 running lock must be held until sync_f_slow - // returns - (void)dispatch_atomic_add2o(dq, dq_running, 2, relaxed); +DISPATCH_ALWAYS_INLINE +static void +_dispatch_barrier_sync_f_invoke_inline(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func, pthread_priority_t pp) +{ + _dispatch_introspection_barrier_sync_begin(dq, func); + if (slowpath(dq->do_targetq->do_targetq)) { + return _dispatch_barrier_sync_f_recurse(dq, ctxt, func, pp); } - _dispatch_introspection_queue_item_complete(dou); - _dispatch_wqthread_override_start(th, - _dispatch_queue_get_override_priority(dq)); - return sema ? sema : MACH_PORT_DEAD; + _dispatch_barrier_sync_f_invoke(dq, ctxt, func); } +typedef struct dispatch_barrier_sync_context_s { + struct dispatch_continuation_s dbsc_dc; + dispatch_thread_frame_s dbsc_dtf; +} *dispatch_barrier_sync_context_t; + static void _dispatch_barrier_sync_f_slow_invoke(void *ctxt) { - dispatch_continuation_t dc = ctxt; + dispatch_barrier_sync_context_t dbsc = ctxt; + dispatch_continuation_t dc = &dbsc->dbsc_dc; dispatch_queue_t dq = dc->dc_data; - _dispatch_thread_semaphore_t sema; - sema = (_dispatch_thread_semaphore_t)dc->dc_other; + dispatch_thread_event_t event = (dispatch_thread_event_t)dc->dc_other; dispatch_assert(dq == _dispatch_queue_get_current()); #if DISPATCH_COCOA_COMPAT - if (slowpath(dq->dq_is_thread_bound)) { + if (slowpath(_dispatch_queue_is_thread_bound(dq))) { + dispatch_assert(_dispatch_thread_frame_get_current() == NULL); + + // the block runs on the thread the queue is bound to and not + // on the calling thread, but we mean to see the calling thread + // dispatch thread frames, so we fake the link, and then undo it + _dispatch_thread_frame_set_current(&dbsc->dbsc_dtf); // The queue is bound to a non-dispatch thread (e.g. main thread) - _dispatch_continuation_voucher_adopt(dc); + _dispatch_continuation_voucher_adopt(dc, DISPATCH_NO_VOUCHER, + DISPATCH_OBJ_CONSUME_BIT); _dispatch_client_callout(dc->dc_ctxt, dc->dc_func); - dispatch_atomic_store2o(dc, dc_func, NULL, release); - _dispatch_thread_semaphore_signal(sema); // release - return; + os_atomic_store2o(dc, dc_func, NULL, release); + _dispatch_thread_frame_set_current(NULL); } #endif - (void)dispatch_atomic_add2o(dq, do_suspend_cnt, - DISPATCH_OBJECT_SUSPEND_INTERVAL, relaxed); - // rdar://9032024 running lock must be held until sync_f_slow returns - (void)dispatch_atomic_add2o(dq, dq_running, 2, relaxed); - _dispatch_thread_semaphore_signal(sema); // release + _dispatch_thread_event_signal(event); // release } DISPATCH_NOINLINE @@ -2816,337 +3585,283 @@ _dispatch_barrier_sync_f_slow(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, pthread_priority_t pp) { if (slowpath(!dq->do_targetq)) { - // the global concurrent queues do not need strict ordering - (void)dispatch_atomic_add2o(dq, dq_running, 2, relaxed); - return _dispatch_sync_f_invoke(dq, ctxt, func); + // see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE + return _dispatch_sync_function_invoke(dq, ctxt, func); } - if (!pp) pp = (_dispatch_get_priority() | _PTHREAD_PRIORITY_ENFORCE_FLAG); - _dispatch_thread_semaphore_t sema = _dispatch_get_thread_semaphore(); - struct dispatch_continuation_s dc = { - .dc_data = dq, + + if (!pp) { + pp = _dispatch_get_priority(); + pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK; + pp |= _PTHREAD_PRIORITY_ENFORCE_FLAG; + } + dispatch_thread_event_s event; + _dispatch_thread_event_init(&event); + struct dispatch_barrier_sync_context_s dbsc = { + .dbsc_dc = { + .dc_data = dq, #if DISPATCH_COCOA_COMPAT - .dc_func = func, - .dc_ctxt = ctxt, + .dc_func = func, + .dc_ctxt = ctxt, #endif - .dc_other = (void*)sema, + .dc_other = &event, + } }; #if DISPATCH_COCOA_COMPAT // It's preferred to execute synchronous blocks on the current thread - // due to thread-local side effects, garbage collection, etc. However, - // blocks submitted to the main thread MUST be run on the main thread - if (slowpath(dq->dq_is_thread_bound)) { - _dispatch_continuation_voucher_set(&dc, 0); + // due to thread-local side effects, etc. However, blocks submitted + // to the main thread MUST be run on the main thread + if (slowpath(_dispatch_queue_is_thread_bound(dq))) { + // consumed by _dispatch_barrier_sync_f_slow_invoke + // or in the DISPATCH_COCOA_COMPAT hunk below + _dispatch_continuation_voucher_set(&dbsc.dbsc_dc, dq, 0); + // save frame linkage for _dispatch_barrier_sync_f_slow_invoke + _dispatch_thread_frame_save_state(&dbsc.dbsc_dtf); + // thread bound queues cannot mutate their target queue hierarchy + // so it's fine to look now + _dispatch_introspection_barrier_sync_begin(dq, func); } #endif + uint32_t th_self = _dispatch_tid_self(); struct dispatch_continuation_s dbss = { - .do_vtable = (void *)(DISPATCH_OBJ_BARRIER_BIT | - DISPATCH_OBJ_SYNC_SLOW_BIT), + .dc_flags = DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT, .dc_func = _dispatch_barrier_sync_f_slow_invoke, - .dc_ctxt = &dc, - .dc_data = (void*)(uintptr_t)_dispatch_thread_port(), + .dc_ctxt = &dbsc, + .dc_data = (void*)(uintptr_t)th_self, .dc_priority = pp, + .dc_other = &event, + .dc_voucher = DISPATCH_NO_VOUCHER, }; - _dispatch_queue_push(dq, &dbss, - _dispatch_continuation_get_override_priority(dq, &dbss)); - _dispatch_thread_semaphore_wait(sema); // acquire - _dispatch_put_thread_semaphore(sema); + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + if (unlikely(_dq_state_drain_locked_by(dq_state, th_self))) { + DISPATCH_CLIENT_CRASH(dq, "dispatch_barrier_sync called on queue " + "already owned by current thread"); + } - pthread_priority_t p = _dispatch_queue_get_override_priority(dq); - if (p > (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { + _dispatch_continuation_push_sync_slow(dq, &dbss); + _dispatch_thread_event_wait(&event); // acquire + _dispatch_thread_event_destroy(&event); + if (_dispatch_queue_received_override(dq, pp)) { // Ensure that the root queue sees that this thread was overridden. + // pairs with the _dispatch_wqthread_override_start in + // _dispatch_continuation_slow_item_signal _dispatch_set_defaultpriority_override(); } #if DISPATCH_COCOA_COMPAT // Queue bound to a non-dispatch thread - if (dc.dc_func == NULL) { + if (dbsc.dbsc_dc.dc_func == NULL) { return; + } else if (dbsc.dbsc_dc.dc_voucher) { + // this almost never happens, unless a dispatch_sync() onto a thread + // bound queue went to the slow path at the same time dispatch_main() + // is called, or the queue is detached from the runloop. + _voucher_release(dbsc.dbsc_dc.dc_voucher); } #endif - _dispatch_queue_set_thread(dq); - if (slowpath(dq->do_targetq->do_targetq)) { - _dispatch_function_recurse(dq, ctxt, func, pp); - } else { - _dispatch_function_invoke(dq, ctxt, func); - } - _dispatch_queue_clear_thread(dq); + _dispatch_barrier_sync_f_invoke_inline(dq, ctxt, func, pp); +} - if (fastpath(dq->do_suspend_cnt < 2 * DISPATCH_OBJECT_SUSPEND_INTERVAL) && - dq->dq_running == 2) { - // rdar://problem/8290662 "lock transfer" - sema = _dispatch_queue_drain_one_barrier_sync(dq); - if (sema) { - _dispatch_thread_semaphore_signal(sema); // release - return; - } - } - (void)dispatch_atomic_sub2o(dq, do_suspend_cnt, - DISPATCH_OBJECT_SUSPEND_INTERVAL, release); - if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2, relaxed) == 0)) { - _dispatch_queue_wakeup(dq); +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_barrier_sync_f2(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func, pthread_priority_t pp) +{ + if (slowpath(!_dispatch_queue_try_acquire_barrier_sync(dq))) { + // global concurrent queues and queues bound to non-dispatch threads + // always fall into the slow case + return _dispatch_barrier_sync_f_slow(dq, ctxt, func, pp); } + // + // TODO: the more correct thing to do would be to set dq_override to the qos + // of the thread that just acquired the barrier lock here. Unwinding that + // would slow down the uncontended fastpath however. + // + // The chosen tradeoff is that if an enqueue on a lower priority thread + // contends with this fastpath, this thread may receive a useless override. + // Improving this requires the override level to be part of the atomic + // dq_state + // + _dispatch_barrier_sync_f_invoke_inline(dq, ctxt, func, pp); } DISPATCH_NOINLINE static void -_dispatch_barrier_sync_f2(dispatch_queue_t dq) -{ - if (!slowpath(DISPATCH_OBJECT_SUSPENDED(dq))) { - // rdar://problem/8290662 "lock transfer" - _dispatch_thread_semaphore_t sema; - sema = _dispatch_queue_drain_one_barrier_sync(dq); - if (sema) { - (void)dispatch_atomic_add2o(dq, do_suspend_cnt, - DISPATCH_OBJECT_SUSPEND_INTERVAL, relaxed); - // rdar://9032024 running lock must be held until sync_f_slow - // returns: increment by 2 and decrement by 1 - (void)dispatch_atomic_inc2o(dq, dq_running, relaxed); - _dispatch_thread_semaphore_signal(sema); - return; - } - } - if (slowpath(dispatch_atomic_dec2o(dq, dq_running, release) == 0)) { - _dispatch_queue_wakeup(dq); - } +_dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func, pthread_priority_t pp) +{ + _dispatch_barrier_sync_f2(dq, ctxt, func, pp); } DISPATCH_NOINLINE -static void -_dispatch_barrier_sync_f_invoke(dispatch_queue_t dq, void *ctxt, +void +dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { - _dispatch_queue_set_thread(dq); - _dispatch_function_invoke(dq, ctxt, func); - _dispatch_queue_clear_thread(dq); - if (slowpath(dq->dq_items_tail)) { - return _dispatch_barrier_sync_f2(dq); - } - if (slowpath(dispatch_atomic_dec2o(dq, dq_running, release) == 0)) { - _dispatch_queue_wakeup(dq); - } + _dispatch_barrier_sync_f2(dq, ctxt, func, 0); } +#ifdef __BLOCKS__ DISPATCH_NOINLINE static void -_dispatch_barrier_sync_f_recurse(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, pthread_priority_t pp) +_dispatch_sync_block_with_private_data(dispatch_queue_t dq, + void (^work)(void), dispatch_block_flags_t flags) { - _dispatch_queue_set_thread(dq); - _dispatch_function_recurse(dq, ctxt, func, pp); - _dispatch_queue_clear_thread(dq); - if (slowpath(dq->dq_items_tail)) { - return _dispatch_barrier_sync_f2(dq); - } - if (slowpath(dispatch_atomic_dec2o(dq, dq_running, release) == 0)) { - _dispatch_queue_wakeup(dq); - } -} + pthread_priority_t pp = _dispatch_block_get_priority(work); -DISPATCH_NOINLINE -static void -_dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, pthread_priority_t pp) -{ - // 1) ensure that this thread hasn't enqueued anything ahead of this call - // 2) the queue is not suspended - if (slowpath(dq->dq_items_tail) || slowpath(DISPATCH_OBJECT_SUSPENDED(dq))){ - return _dispatch_barrier_sync_f_slow(dq, ctxt, func, pp); - } - if (slowpath(!dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1, acquire))) { - // global concurrent queues and queues bound to non-dispatch threads - // always fall into the slow case - return _dispatch_barrier_sync_f_slow(dq, ctxt, func, pp); - } - if (slowpath(dq->do_targetq->do_targetq)) { - return _dispatch_barrier_sync_f_recurse(dq, ctxt, func, pp); - } - _dispatch_barrier_sync_f_invoke(dq, ctxt, func); -} - -DISPATCH_NOINLINE -void -dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) -{ - // 1) ensure that this thread hasn't enqueued anything ahead of this call - // 2) the queue is not suspended - if (slowpath(dq->dq_items_tail) || slowpath(DISPATCH_OBJECT_SUSPENDED(dq))){ - return _dispatch_barrier_sync_f_slow(dq, ctxt, func, 0); - } - if (slowpath(!dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1, acquire))) { - // global concurrent queues and queues bound to non-dispatch threads - // always fall into the slow case - return _dispatch_barrier_sync_f_slow(dq, ctxt, func, 0); + flags |= _dispatch_block_get_flags(work); + if (flags & DISPATCH_BLOCK_HAS_PRIORITY) { + pthread_priority_t tp = _dispatch_get_priority(); + tp &= ~_PTHREAD_PRIORITY_FLAGS_MASK; + if (pp < tp) { + pp = tp | _PTHREAD_PRIORITY_ENFORCE_FLAG; + } else if (_dispatch_block_sync_should_enforce_qos_class(flags)) { + pp |= _PTHREAD_PRIORITY_ENFORCE_FLAG; + } } - if (slowpath(dq->do_targetq->do_targetq)) { - return _dispatch_barrier_sync_f_recurse(dq, ctxt, func, 0); + // balanced in d_block_sync_invoke or d_block_wait + if (os_atomic_cmpxchg2o(_dispatch_block_get_data(work), + dbpd_queue, NULL, dq, relaxed)) { + _dispatch_retain(dq); } - _dispatch_barrier_sync_f_invoke(dq, ctxt, func); -} - -#ifdef __BLOCKS__ -DISPATCH_NOINLINE -static void -_dispatch_barrier_sync_slow(dispatch_queue_t dq, void (^work)(void)) -{ - bool has_pd = _dispatch_block_has_private_data(work); - dispatch_function_t func = _dispatch_Block_invoke(work); - pthread_priority_t pp = 0; - if (has_pd) { - func = _dispatch_block_sync_invoke; - pp = _dispatch_block_get_priority(work); - dispatch_block_flags_t flags = _dispatch_block_get_flags(work); - if (flags & DISPATCH_BLOCK_HAS_PRIORITY) { - pthread_priority_t tp = _dispatch_get_priority(); - if (pp < tp) { - pp = tp | _PTHREAD_PRIORITY_ENFORCE_FLAG; - } else if (!(flags & DISPATCH_BLOCK_INHERIT_QOS_CLASS)) { - pp |= _PTHREAD_PRIORITY_ENFORCE_FLAG; - } - } - // balanced in d_block_sync_invoke or d_block_wait - if (dispatch_atomic_cmpxchg2o(_dispatch_block_get_data(work), - dbpd_queue, NULL, dq, release)) { - _dispatch_retain(dq); - } -#if DISPATCH_COCOA_COMPAT - } else if (dq->dq_is_thread_bound && dispatch_begin_thread_4GC) { - // Blocks submitted to the main queue MUST be run on the main thread, - // under GC we must Block_copy in order to notify the thread-local - // garbage collector that the objects are transferring to another thread - // rdar://problem/7176237&7181849&7458685 - work = _dispatch_Block_copy(work); - func = _dispatch_call_block_and_release; + if (flags & DISPATCH_BLOCK_BARRIER) { + _dispatch_barrier_sync_f(dq, work, _dispatch_block_sync_invoke, pp); + } else { + _dispatch_sync_f(dq, work, _dispatch_block_sync_invoke, pp); } -#endif - _dispatch_barrier_sync_f(dq, work, func, pp); } void dispatch_barrier_sync(dispatch_queue_t dq, void (^work)(void)) { - if (slowpath(dq->dq_is_thread_bound) || - slowpath(_dispatch_block_has_private_data(work))) { - return _dispatch_barrier_sync_slow(dq, work); + if (slowpath(_dispatch_block_has_private_data(work))) { + dispatch_block_flags_t flags = DISPATCH_BLOCK_BARRIER; + return _dispatch_sync_block_with_private_data(dq, work, flags); } dispatch_barrier_sync_f(dq, work, _dispatch_Block_invoke(work)); } #endif -DISPATCH_NOINLINE -static void -_dispatch_barrier_trysync_f_invoke(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) -{ - _dispatch_queue_set_thread(dq); - _dispatch_function_invoke(dq, ctxt, func); - _dispatch_queue_clear_thread(dq); - if (slowpath(dispatch_atomic_dec2o(dq, dq_running, release) == 0)) { - _dispatch_queue_wakeup(dq); - } -} - DISPATCH_NOINLINE void -_dispatch_barrier_trysync_f(dispatch_queue_t dq, void *ctxt, +_dispatch_barrier_trysync_or_async_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { // Use for mutation of queue-/source-internal state only, ignores target // queue hierarchy! - if (slowpath(dq->dq_items_tail) || slowpath(DISPATCH_OBJECT_SUSPENDED(dq)) - || slowpath(!dispatch_atomic_cmpxchg2o(dq, dq_running, 0, 1, - acquire))) { + if (!fastpath(_dispatch_queue_try_acquire_barrier_sync(dq))) { return _dispatch_barrier_async_detached_f(dq, ctxt, func); } - _dispatch_barrier_trysync_f_invoke(dq, ctxt, func); + // skip the recursion because it's about the queue state only + _dispatch_barrier_sync_f_invoke(dq, ctxt, func); } #pragma mark - #pragma mark dispatch_sync +DISPATCH_NOINLINE +static void +_dispatch_non_barrier_complete(dispatch_queue_t dq) +{ + uint64_t old_state, new_state; + + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + new_state = old_state - DISPATCH_QUEUE_WIDTH_INTERVAL; + if (_dq_state_is_runnable(new_state)) { + if (!_dq_state_is_runnable(old_state)) { + // we're making a FULL -> non FULL transition + new_state |= DISPATCH_QUEUE_DIRTY; + } + if (!_dq_state_drain_locked(new_state)) { + uint64_t full_width = new_state; + if (_dq_state_has_pending_barrier(new_state)) { + full_width -= DISPATCH_QUEUE_PENDING_BARRIER; + full_width += DISPATCH_QUEUE_WIDTH_INTERVAL; + full_width += DISPATCH_QUEUE_IN_BARRIER; + } else { + full_width += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; + full_width += DISPATCH_QUEUE_IN_BARRIER; + } + if ((full_width & DISPATCH_QUEUE_WIDTH_MASK) == + DISPATCH_QUEUE_WIDTH_FULL_BIT) { + new_state = full_width; + new_state &= ~DISPATCH_QUEUE_DIRTY; + new_state |= _dispatch_tid_self(); + } + } + } + }); + + if (_dq_state_is_in_barrier(new_state)) { + return _dispatch_try_lock_transfer_or_wakeup(dq); + } + if (!_dq_state_is_runnable(old_state)) { + _dispatch_queue_try_wakeup(dq, new_state, 0); + } +} + DISPATCH_NOINLINE static void _dispatch_sync_f_slow(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, - pthread_priority_t pp, bool wakeup) + pthread_priority_t pp) { - if (!pp) pp = (_dispatch_get_priority() | _PTHREAD_PRIORITY_ENFORCE_FLAG); - _dispatch_thread_semaphore_t sema = _dispatch_get_thread_semaphore(); + dispatch_assert(dq->do_targetq); + if (!pp) { + pp = _dispatch_get_priority(); + pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK; + pp |= _PTHREAD_PRIORITY_ENFORCE_FLAG; + } + dispatch_thread_event_s event; + _dispatch_thread_event_init(&event); + uint32_t th_self = _dispatch_tid_self(); struct dispatch_continuation_s dc = { - .do_vtable = (void*)DISPATCH_OBJ_SYNC_SLOW_BIT, + .dc_flags = DISPATCH_OBJ_SYNC_SLOW_BIT, #if DISPATCH_INTROSPECTION .dc_func = func, .dc_ctxt = ctxt, - .dc_data = (void*)(uintptr_t)_dispatch_thread_port(), #endif - .dc_other = (void*)sema, + .dc_data = (void*)(uintptr_t)th_self, + .dc_other = &event, .dc_priority = pp, + .dc_voucher = DISPATCH_NO_VOUCHER, }; - _dispatch_queue_push_wakeup(dq, &dc, - _dispatch_continuation_get_override_priority(dq, &dc), wakeup); - _dispatch_thread_semaphore_wait(sema); - _dispatch_put_thread_semaphore(sema); + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + if (unlikely(_dq_state_drain_locked_by(dq_state, th_self))) { + DISPATCH_CLIENT_CRASH(dq, "dispatch_sync called on queue " + "already owned by current thread"); + } - pthread_priority_t p = _dispatch_queue_get_override_priority(dq); - if (p > (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { + _dispatch_continuation_push_sync_slow(dq, &dc); + _dispatch_thread_event_wait(&event); // acquire + _dispatch_thread_event_destroy(&event); + if (_dispatch_queue_received_override(dq, pp)) { // Ensure that the root queue sees that this thread was overridden. + // pairs with the _dispatch_wqthread_override_start in + // _dispatch_continuation_slow_item_signal _dispatch_set_defaultpriority_override(); } - - if (slowpath(dq->do_targetq->do_targetq)) { - _dispatch_function_recurse(dq, ctxt, func, pp); - } else { - _dispatch_function_invoke(dq, ctxt, func); - } - - if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2, relaxed) == 0)) { - _dispatch_queue_wakeup(dq); - } -} - -DISPATCH_NOINLINE -static void -_dispatch_sync_f_invoke(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) -{ - _dispatch_function_invoke(dq, ctxt, func); - if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2, relaxed) == 0)) { - _dispatch_queue_wakeup(dq); - } -} - -DISPATCH_NOINLINE -static void -_dispatch_sync_f_recurse(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, pthread_priority_t pp) -{ - _dispatch_function_recurse(dq, ctxt, func, pp); - if (slowpath(dispatch_atomic_sub2o(dq, dq_running, 2, relaxed) == 0)) { - _dispatch_queue_wakeup(dq); - } + _dispatch_non_barrier_sync_f_invoke_inline(dq, ctxt, func, pp); } +DISPATCH_ALWAYS_INLINE static inline void _dispatch_sync_f2(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, pthread_priority_t pp) { - // 1) ensure that this thread hasn't enqueued anything ahead of this call - // 2) the queue is not suspended - if (slowpath(dq->dq_items_tail) || slowpath(DISPATCH_OBJECT_SUSPENDED(dq))){ - return _dispatch_sync_f_slow(dq, ctxt, func, pp, false); - } - uint32_t running = dispatch_atomic_add2o(dq, dq_running, 2, relaxed); - // re-check suspension after barrier check - if (slowpath(running & 1) || _dispatch_object_suspended(dq)) { - running = dispatch_atomic_sub2o(dq, dq_running, 2, relaxed); - return _dispatch_sync_f_slow(dq, ctxt, func, pp, running == 0); + // reserving non barrier width + // doesn't fail if only the ENQUEUED bit is set (unlike its barrier width + // equivalent), so we have to check that this thread hasn't enqueued + // anything ahead of this call or we can break ordering + if (slowpath(dq->dq_items_tail)) { + return _dispatch_sync_f_slow(dq, ctxt, func, pp); } - if (slowpath(dq->do_targetq->do_targetq)) { - return _dispatch_sync_f_recurse(dq, ctxt, func, pp); + // concurrent queues do not respect width on sync + if (slowpath(!_dispatch_queue_try_reserve_sync_width(dq))) { + return _dispatch_sync_f_slow(dq, ctxt, func, pp); } - _dispatch_sync_f_invoke(dq, ctxt, func); + _dispatch_non_barrier_sync_f_invoke_inline(dq, ctxt, func, pp); } DISPATCH_NOINLINE @@ -3154,148 +3869,182 @@ static void _dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, pthread_priority_t pp) { - if (fastpath(dq->dq_width == 1)) { - return _dispatch_barrier_sync_f(dq, ctxt, func, pp); + if (DISPATCH_QUEUE_USES_REDIRECTION(dq->dq_width)) { + return _dispatch_sync_f2(dq, ctxt, func, pp); } - if (slowpath(!dq->do_targetq)) { - // the global concurrent queues do not need strict ordering - (void)dispatch_atomic_add2o(dq, dq_running, 2, relaxed); - return _dispatch_sync_f_invoke(dq, ctxt, func); - } - _dispatch_sync_f2(dq, ctxt, func, pp); + return _dispatch_barrier_sync_f(dq, ctxt, func, pp); } DISPATCH_NOINLINE void dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { - if (fastpath(dq->dq_width == 1)) { - return dispatch_barrier_sync_f(dq, ctxt, func); - } - if (slowpath(!dq->do_targetq)) { - // the global concurrent queues do not need strict ordering - (void)dispatch_atomic_add2o(dq, dq_running, 2, relaxed); - return _dispatch_sync_f_invoke(dq, ctxt, func); + if (DISPATCH_QUEUE_USES_REDIRECTION(dq->dq_width)) { + return _dispatch_sync_f2(dq, ctxt, func, 0); } - _dispatch_sync_f2(dq, ctxt, func, 0); + return dispatch_barrier_sync_f(dq, ctxt, func); } #ifdef __BLOCKS__ -DISPATCH_NOINLINE -static void -_dispatch_sync_slow(dispatch_queue_t dq, void (^work)(void)) +void +dispatch_sync(dispatch_queue_t dq, void (^work)(void)) { - bool has_pd = _dispatch_block_has_private_data(work); - if (has_pd && (_dispatch_block_get_flags(work) & DISPATCH_BLOCK_BARRIER)) { - return _dispatch_barrier_sync_slow(dq, work); + if (slowpath(_dispatch_block_has_private_data(work))) { + return _dispatch_sync_block_with_private_data(dq, work, 0); } - dispatch_function_t func = _dispatch_Block_invoke(work); - pthread_priority_t pp = 0; - if (has_pd) { - func = _dispatch_block_sync_invoke; - pp = _dispatch_block_get_priority(work); - dispatch_block_flags_t flags = _dispatch_block_get_flags(work); - if (flags & DISPATCH_BLOCK_HAS_PRIORITY) { - pthread_priority_t tp = _dispatch_get_priority(); - if (pp < tp) { - pp = tp | _PTHREAD_PRIORITY_ENFORCE_FLAG; - } else if (!(flags & DISPATCH_BLOCK_INHERIT_QOS_CLASS)) { - pp |= _PTHREAD_PRIORITY_ENFORCE_FLAG; - } + dispatch_sync_f(dq, work, _dispatch_Block_invoke(work)); +} +#endif + +#pragma mark - +#pragma mark dispatch_trysync + +struct trysync_context { + dispatch_queue_t tc_dq; + void *tc_ctxt; + dispatch_function_t tc_func; +}; + +DISPATCH_NOINLINE +static int +_dispatch_trysync_recurse(dispatch_queue_t dq, + struct trysync_context *tc, bool barrier) +{ + dispatch_queue_t tq = dq->do_targetq; + + if (barrier) { + if (slowpath(!_dispatch_queue_try_acquire_barrier_sync(dq))) { + return EWOULDBLOCK; } - // balanced in d_block_sync_invoke or d_block_wait - if (dispatch_atomic_cmpxchg2o(_dispatch_block_get_data(work), - dbpd_queue, NULL, dq, release)) { - _dispatch_retain(dq); + } else { + // check nothing was queued by the current + // thread ahead of this call. _dispatch_queue_try_reserve_sync_width + // ignores the ENQUEUED bit which could cause it to miss a barrier_async + // made by the same thread just before. + if (slowpath(dq->dq_items_tail)) { + return EWOULDBLOCK; + } + // concurrent queues do not respect width on sync + if (slowpath(!_dispatch_queue_try_reserve_sync_width(dq))) { + return EWOULDBLOCK; } -#if DISPATCH_COCOA_COMPAT - } else if (dq->dq_is_thread_bound && dispatch_begin_thread_4GC) { - // Blocks submitted to the main queue MUST be run on the main thread, - // under GC we must Block_copy in order to notify the thread-local - // garbage collector that the objects are transferring to another thread - // rdar://problem/7176237&7181849&7458685 - work = _dispatch_Block_copy(work); - func = _dispatch_call_block_and_release; -#endif } - if (slowpath(!dq->do_targetq)) { - // the global concurrent queues do not need strict ordering - (void)dispatch_atomic_add2o(dq, dq_running, 2, relaxed); - return _dispatch_sync_f_invoke(dq, work, func); + + int rc = 0; + if (_dispatch_queue_cannot_trysync(tq)) { + _dispatch_queue_atomic_flags_set(dq, DQF_CANNOT_TRYSYNC); + rc = ENOTSUP; + } else if (tq->do_targetq) { + rc = _dispatch_trysync_recurse(tq, tc, tq->dq_width == 1); + if (rc == ENOTSUP) { + _dispatch_queue_atomic_flags_set(dq, DQF_CANNOT_TRYSYNC); + } + } else { + dispatch_thread_frame_s dtf; + _dispatch_thread_frame_push(&dtf, tq); + _dispatch_sync_function_invoke(tc->tc_dq, tc->tc_ctxt, tc->tc_func); + _dispatch_thread_frame_pop(&dtf); + } + if (barrier) { + _dispatch_barrier_complete(dq); + } else { + _dispatch_non_barrier_complete(dq); } - _dispatch_sync_f2(dq, work, func, pp); + return rc; } -void -dispatch_sync(dispatch_queue_t dq, void (^work)(void)) +DISPATCH_NOINLINE +bool +_dispatch_barrier_trysync_f(dispatch_queue_t dq, void *ctxt, + dispatch_function_t f) { - if (fastpath(dq->dq_width == 1)) { - return dispatch_barrier_sync(dq, work); + if (slowpath(!dq->do_targetq)) { + _dispatch_sync_function_invoke(dq, ctxt, f); + return true; } - if (slowpath(dq->dq_is_thread_bound) || - slowpath(_dispatch_block_has_private_data(work)) ) { - return _dispatch_sync_slow(dq, work); + if (slowpath(_dispatch_queue_cannot_trysync(dq))) { + return false; } - dispatch_sync_f(dq, work, _dispatch_Block_invoke(work)); + struct trysync_context tc = { + .tc_dq = dq, + .tc_func = f, + .tc_ctxt = ctxt, + }; + return _dispatch_trysync_recurse(dq, &tc, true) == 0; } -#endif - -#pragma mark - -#pragma mark dispatch_after -void -_dispatch_after_timer_callback(void *ctxt) +DISPATCH_NOINLINE +bool +_dispatch_trysync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t f) { - dispatch_continuation_t dc = ctxt, dc1; - dispatch_source_t ds = dc->dc_data; - dc1 = _dispatch_continuation_free_cacheonly(dc); - _dispatch_client_callout(dc->dc_ctxt, dc->dc_func); - dispatch_source_cancel(ds); - dispatch_release(ds); - if (slowpath(dc1)) { - _dispatch_continuation_free_to_cache_limit(dc1); + if (slowpath(!dq->do_targetq)) { + _dispatch_sync_function_invoke(dq, ctxt, f); + return true; } + if (slowpath(_dispatch_queue_cannot_trysync(dq))) { + return false; + } + struct trysync_context tc = { + .tc_dq = dq, + .tc_func = f, + .tc_ctxt = ctxt, + }; + return _dispatch_trysync_recurse(dq, &tc, dq->dq_width == 1) == 0; } -DISPATCH_NOINLINE -void -dispatch_after_f(dispatch_time_t when, dispatch_queue_t queue, void *ctxt, - dispatch_function_t func) +#pragma mark - +#pragma mark dispatch_after + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_after(dispatch_time_t when, dispatch_queue_t queue, + void *ctxt, void *handler, bool block) { - uint64_t delta, leeway; dispatch_source_t ds; + uint64_t leeway, delta; if (when == DISPATCH_TIME_FOREVER) { #if DISPATCH_DEBUG - DISPATCH_CLIENT_CRASH( - "dispatch_after_f() called with 'when' == infinity"); + DISPATCH_CLIENT_CRASH(0, "dispatch_after called with 'when' == infinity"); #endif return; } delta = _dispatch_timeout(when); if (delta == 0) { - return dispatch_async_f(queue, ctxt, func); + if (block) { + return dispatch_async(queue, handler); + } + return dispatch_async_f(queue, ctxt, handler); } leeway = delta / 10; // + if (leeway < NSEC_PER_MSEC) leeway = NSEC_PER_MSEC; if (leeway > 60 * NSEC_PER_SEC) leeway = 60 * NSEC_PER_SEC; // this function can and should be optimized to not use a dispatch source - ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, queue); + ds = dispatch_source_create(&_dispatch_source_type_after, 0, 0, queue); dispatch_assert(ds); - // TODO: don't use a separate continuation & voucher dispatch_continuation_t dc = _dispatch_continuation_alloc(); - dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT); - dc->dc_func = func; - dc->dc_ctxt = ctxt; + if (block) { + _dispatch_continuation_init(dc, ds, handler, 0, 0, 0); + } else { + _dispatch_continuation_init_f(dc, ds, ctxt, handler, 0, 0, 0); + } + // reference `ds` so that it doesn't show up as a leak dc->dc_data = ds; - - dispatch_set_context(ds, dc); - dispatch_source_set_event_handler_f(ds, _dispatch_after_timer_callback); + _dispatch_source_set_event_handler_continuation(ds, dc); dispatch_source_set_timer(ds, when, DISPATCH_TIME_FOREVER, leeway); - dispatch_resume(ds); + dispatch_activate(ds); +} + +DISPATCH_NOINLINE +void +dispatch_after_f(dispatch_time_t when, dispatch_queue_t queue, void *ctxt, + dispatch_function_t func) +{ + _dispatch_after(when, queue, ctxt, func, false); } #ifdef __BLOCKS__ @@ -3303,138 +4052,144 @@ void dispatch_after(dispatch_time_t when, dispatch_queue_t queue, dispatch_block_t work) { - // test before the copy of the block - if (when == DISPATCH_TIME_FOREVER) { -#if DISPATCH_DEBUG - DISPATCH_CLIENT_CRASH( - "dispatch_after() called with 'when' == infinity"); -#endif - return; - } - dispatch_after_f(when, queue, _dispatch_Block_copy(work), - _dispatch_call_block_and_release); + _dispatch_after(when, queue, NULL, work, true); } #endif #pragma mark - -#pragma mark dispatch_queue_push - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_queue_push_list_slow2(dispatch_queue_t dq, pthread_priority_t pp, - struct dispatch_object_s *obj, bool retained) -{ - // The queue must be retained before dq_items_head is written in order - // to ensure that the reference is still valid when _dispatch_wakeup is - // called. Otherwise, if preempted between the assignment to - // dq_items_head and _dispatch_wakeup, the blocks submitted to the - // queue may release the last reference to the queue when invoked by - // _dispatch_queue_drain. - if (!retained) _dispatch_retain(dq); - dq->dq_items_head = obj; - return _dispatch_queue_wakeup_with_qos_and_release(dq, pp); -} +#pragma mark dispatch_queue_wakeup DISPATCH_NOINLINE void -_dispatch_queue_push_list_slow(dispatch_queue_t dq, pthread_priority_t pp, - struct dispatch_object_s *obj, unsigned int n, bool retained) +_dispatch_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, + dispatch_wakeup_flags_t flags) { - if (dx_type(dq) == DISPATCH_QUEUE_ROOT_TYPE && !dq->dq_is_thread_bound) { - dispatch_assert(!retained); - dispatch_atomic_store2o(dq, dq_items_head, obj, relaxed); - return _dispatch_queue_wakeup_global2(dq, n); + dispatch_queue_wakeup_target_t target = DISPATCH_QUEUE_WAKEUP_NONE; + + if (_dispatch_queue_class_probe(dq)) { + target = DISPATCH_QUEUE_WAKEUP_TARGET; + } + if (target) { + return _dispatch_queue_class_wakeup(dq, pp, flags, target); + } else if (pp) { + return _dispatch_queue_class_override_drainer(dq, pp, flags); + } else if (flags & DISPATCH_WAKEUP_CONSUME) { + return _dispatch_release_tailcall(dq); } - _dispatch_queue_push_list_slow2(dq, pp, obj, retained); } -DISPATCH_NOINLINE -void -_dispatch_queue_push_slow(dispatch_queue_t dq, pthread_priority_t pp, - struct dispatch_object_s *obj, bool retained) +#if DISPATCH_COCOA_COMPAT +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_runloop_handle_is_valid(dispatch_runloop_handle_t handle) { - if (dx_type(dq) == DISPATCH_QUEUE_ROOT_TYPE && !dq->dq_is_thread_bound) { - dispatch_assert(!retained); - dispatch_atomic_store2o(dq, dq_items_head, obj, relaxed); - return _dispatch_queue_wakeup_global(dq); - } - _dispatch_queue_push_list_slow2(dq, pp, obj, retained); +#if TARGET_OS_MAC + return MACH_PORT_VALID(handle); +#elif defined(__linux__) + return handle >= 0; +#else +#error "runloop support not implemented on this platform" +#endif } -#pragma mark - -#pragma mark dispatch_queue_probe +DISPATCH_ALWAYS_INLINE +static inline dispatch_runloop_handle_t +_dispatch_runloop_queue_get_handle(dispatch_queue_t dq) +{ +#if TARGET_OS_MAC + return ((dispatch_runloop_handle_t)(uintptr_t)dq->do_ctxt); +#elif defined(__linux__) + // decode: 0 is a valid fd, so offset by 1 to distinguish from NULL + return ((dispatch_runloop_handle_t)(uintptr_t)dq->do_ctxt) - 1; +#else +#error "runloop support not implemented on this platform" +#endif +} -unsigned long -_dispatch_queue_probe(dispatch_queue_t dq) +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_runloop_queue_set_handle(dispatch_queue_t dq, dispatch_runloop_handle_t handle) { - return _dispatch_queue_class_probe(dq); +#if TARGET_OS_MAC + dq->do_ctxt = (void *)(uintptr_t)handle; +#elif defined(__linux__) + // encode: 0 is a valid fd, so offset by 1 to distinguish from NULL + dq->do_ctxt = (void *)(uintptr_t)(handle + 1); +#else +#error "runloop support not implemented on this platform" +#endif } +#endif // DISPATCH_COCOA_COMPAT -#if DISPATCH_COCOA_COMPAT -unsigned long -_dispatch_runloop_queue_probe(dispatch_queue_t dq) +void +_dispatch_runloop_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, + dispatch_wakeup_flags_t flags) { - if (_dispatch_queue_class_probe(dq)) { - if (dq->do_xref_cnt == -1) return true; // - return _dispatch_runloop_queue_wakeup(dq); +#if DISPATCH_COCOA_COMPAT + if (slowpath(_dispatch_queue_atomic_flags(dq) & DQF_RELEASED)) { + // + return _dispatch_queue_wakeup(dq, pp, flags); } - return false; -} -#endif -unsigned long -_dispatch_mgr_queue_probe(dispatch_queue_t dq) -{ if (_dispatch_queue_class_probe(dq)) { - return _dispatch_mgr_wakeup(dq); + return _dispatch_runloop_queue_poke(dq, pp, flags); } - return false; + + pp = _dispatch_queue_reset_override_priority(dq, true); + if (pp) { + mach_port_t owner = DISPATCH_QUEUE_DRAIN_OWNER(dq); + if (_dispatch_queue_class_probe(dq)) { + _dispatch_runloop_queue_poke(dq, pp, flags); + } + _dispatch_thread_override_end(owner, dq); + return; + } + if (flags & DISPATCH_WAKEUP_CONSUME) { + return _dispatch_release_tailcall(dq); + } +#else + return _dispatch_queue_wakeup(dq, pp, flags); +#endif } -unsigned long -_dispatch_root_queue_probe(dispatch_queue_t dq) +void +_dispatch_main_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, + dispatch_wakeup_flags_t flags) { - _dispatch_queue_wakeup_global(dq); - return false; +#if DISPATCH_COCOA_COMPAT + if (_dispatch_queue_is_thread_bound(dq)) { + return _dispatch_runloop_queue_wakeup(dq, pp, flags); + } +#endif + return _dispatch_queue_wakeup(dq, pp, flags); } -#pragma mark - -#pragma mark dispatch_wakeup - -// 6618342 Contact the team that owns the Instrument DTrace probe before -// renaming this symbol -dispatch_queue_t -_dispatch_wakeup(dispatch_object_t dou) +void +_dispatch_root_queue_wakeup(dispatch_queue_t dq, + pthread_priority_t pp DISPATCH_UNUSED, + dispatch_wakeup_flags_t flags) { - unsigned long type = dx_metatype(dou._do); - if (type == _DISPATCH_QUEUE_TYPE || type == _DISPATCH_SOURCE_TYPE) { - return _dispatch_queue_wakeup(dou._dq); - } - if (_dispatch_object_suspended(dou)) { - return NULL; - } - if (!dx_probe(dou._do)) { - return NULL; - } - if (!dispatch_atomic_cmpxchg2o(dou._do, do_suspend_cnt, 0, - DISPATCH_OBJECT_SUSPEND_LOCK, acquire)) { - return NULL; + if (flags & DISPATCH_WAKEUP_CONSUME) { + // see _dispatch_queue_push_set_head + dispatch_assert(flags & DISPATCH_WAKEUP_FLUSH); } - _dispatch_retain(dou._do); - dispatch_queue_t tq = dou._do->do_targetq; - _dispatch_queue_push(tq, dou._do, 0); - return tq; // libdispatch does not need this, but the Instrument DTrace - // probe does + _dispatch_global_queue_poke(dq); } +#pragma mark - +#pragma mark dispatch root queues poke + #if DISPATCH_COCOA_COMPAT static inline void -_dispatch_runloop_queue_wakeup_thread(dispatch_queue_t dq) +_dispatch_runloop_queue_class_poke(dispatch_queue_t dq) { - mach_port_t mp = (mach_port_t)dq->do_ctxt; - if (!mp) { + dispatch_runloop_handle_t handle = _dispatch_runloop_queue_get_handle(dq); + if (!_dispatch_runloop_handle_is_valid(handle)) { return; } + +#if TARGET_OS_MAC + mach_port_t mp = handle; kern_return_t kr = _dispatch_send_wakeup_runloop_thread(mp, 0); switch (kr) { case MACH_SEND_TIMEOUT: @@ -3445,43 +4200,56 @@ _dispatch_runloop_queue_wakeup_thread(dispatch_queue_t dq) (void)dispatch_assume_zero(kr); break; } -} - -DISPATCH_NOINLINE DISPATCH_WEAK -unsigned long -_dispatch_runloop_queue_wakeup(dispatch_queue_t dq) -{ - _dispatch_runloop_queue_wakeup_thread(dq); - return false; +#elif defined(__linux__) + int result; + do { + result = eventfd_write(handle, 1); + } while (result == -1 && errno == EINTR); + (void)dispatch_assume_zero(result); +#else +#error "runloop support not implemented on this platform" +#endif } DISPATCH_NOINLINE -static dispatch_queue_t -_dispatch_main_queue_wakeup(void) -{ - dispatch_queue_t dq = &_dispatch_main_q; - if (!dq->dq_is_thread_bound) { - return NULL; +static void +_dispatch_runloop_queue_poke(dispatch_queue_t dq, + pthread_priority_t pp, dispatch_wakeup_flags_t flags) +{ + // it's not useful to handle WAKEUP_FLUSH because mach_msg() will have + // a release barrier and that when runloop queues stop being thread bound + // they have a non optional wake-up to start being a "normal" queue + // either in _dispatch_runloop_queue_xref_dispose, + // or in _dispatch_queue_cleanup2() for the main thread. + + if (dq == &_dispatch_main_q) { + dispatch_once_f(&_dispatch_main_q_handle_pred, dq, + _dispatch_runloop_queue_handle_init); + } + _dispatch_queue_override_priority(dq, /* inout */ &pp, /* inout */ &flags); + if (flags & DISPATCH_WAKEUP_OVERRIDING) { + mach_port_t owner = DISPATCH_QUEUE_DRAIN_OWNER(dq); + _dispatch_thread_override_start(owner, pp, dq); + if (flags & DISPATCH_WAKEUP_WAS_OVERRIDDEN) { + _dispatch_thread_override_end(owner, dq); + } + } + _dispatch_runloop_queue_class_poke(dq); + if (flags & DISPATCH_WAKEUP_CONSUME) { + return _dispatch_release_tailcall(dq); } - dispatch_once_f(&_dispatch_main_q_port_pred, dq, - _dispatch_runloop_queue_port_init); - _dispatch_runloop_queue_wakeup_thread(dq); - return NULL; } #endif DISPATCH_NOINLINE static void -_dispatch_queue_wakeup_global_slow(dispatch_queue_t dq, unsigned int n) +_dispatch_global_queue_poke_slow(dispatch_queue_t dq, unsigned int n) { dispatch_root_queue_context_t qc = dq->do_ctxt; uint32_t i = n; int r; _dispatch_debug_root_queue(dq, __func__); - dispatch_once_f(&_dispatch_root_queues_pred, NULL, - _dispatch_root_queues_init); - #if HAVE_PTHREAD_WORKQUEUES #if DISPATCH_USE_PTHREAD_POOL if (qc->dgq_kworkqueue != (void*)(~0ul)) @@ -3527,7 +4295,7 @@ _dispatch_queue_wakeup_global_slow(dispatch_queue_t dq, unsigned int n) } uint32_t j, t_count; // seq_cst with atomic store to tail - t_count = dispatch_atomic_load2o(qc, dgq_thread_pool_size, seq_cst); + t_count = os_atomic_load2o(qc, dgq_thread_pool_size, ordered); do { if (!t_count) { _dispatch_root_queue_debug("pthread pool is full for root queue: " @@ -3535,12 +4303,12 @@ _dispatch_queue_wakeup_global_slow(dispatch_queue_t dq, unsigned int n) return; } j = i > t_count ? t_count : i; - } while (!dispatch_atomic_cmpxchgvw2o(qc, dgq_thread_pool_size, t_count, + } while (!os_atomic_cmpxchgvw2o(qc, dgq_thread_pool_size, t_count, t_count - j, &t_count, acquire)); pthread_attr_t *attr = &pqc->dpq_thread_attr; pthread_t tid, *pthr = &tid; -#if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES +#if DISPATCH_USE_MGR_THREAD && DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES if (slowpath(dq == &_dispatch_mgr_root_queue)) { pthr = _dispatch_mgr_root_queue_init(); } @@ -3558,7 +4326,7 @@ _dispatch_queue_wakeup_global_slow(dispatch_queue_t dq, unsigned int n) } static inline void -_dispatch_queue_wakeup_global2(dispatch_queue_t dq, unsigned int n) +_dispatch_global_queue_poke_n(dispatch_queue_t dq, unsigned int n) { if (!_dispatch_queue_class_probe(dq)) { return; @@ -3569,148 +4337,209 @@ _dispatch_queue_wakeup_global2(dispatch_queue_t dq, unsigned int n) #if DISPATCH_USE_PTHREAD_POOL (qc->dgq_kworkqueue != (void*)(~0ul)) && #endif - !dispatch_atomic_cmpxchg2o(qc, dgq_pending, 0, n, relaxed)) { + !os_atomic_cmpxchg2o(qc, dgq_pending, 0, n, relaxed)) { _dispatch_root_queue_debug("worker thread request still pending for " "global queue: %p", dq); return; } #endif // HAVE_PTHREAD_WORKQUEUES - return _dispatch_queue_wakeup_global_slow(dq, n); + return _dispatch_global_queue_poke_slow(dq, n); } static inline void -_dispatch_queue_wakeup_global(dispatch_queue_t dq) +_dispatch_global_queue_poke(dispatch_queue_t dq) { - return _dispatch_queue_wakeup_global2(dq, 1); + return _dispatch_global_queue_poke_n(dq, 1); } -#pragma mark - -#pragma mark dispatch_queue_invoke - -DISPATCH_ALWAYS_INLINE -static inline dispatch_queue_t -dispatch_queue_invoke2(dispatch_object_t dou, - _dispatch_thread_semaphore_t *sema_ptr) -{ - dispatch_queue_t dq = dou._dq; - dispatch_queue_t otq = dq->do_targetq; - dispatch_queue_t cq = _dispatch_queue_get_current(); - - if (slowpath(cq != otq)) { - return otq; - } - - *sema_ptr = _dispatch_queue_drain(dq); - - if (slowpath(otq != dq->do_targetq)) { - // An item on the queue changed the target queue - return dq->do_targetq; - } - return NULL; -} - -// 6618342 Contact the team that owns the Instrument DTrace probe before -// renaming this symbol DISPATCH_NOINLINE void -_dispatch_queue_invoke(dispatch_queue_t dq, dispatch_object_t dou, - dispatch_invoke_flags_t flags) +_dispatch_queue_push_list_slow(dispatch_queue_t dq, unsigned int n) { - _dispatch_queue_class_invoke(dq, dou._dc, flags, dispatch_queue_invoke2); + return _dispatch_global_queue_poke_n(dq, n); } #pragma mark - #pragma mark dispatch_queue_drain -DISPATCH_ALWAYS_INLINE -static inline struct dispatch_object_s* -_dispatch_queue_head(dispatch_queue_t dq) +void +_dispatch_continuation_pop(dispatch_object_t dou, dispatch_queue_t dq, + dispatch_invoke_flags_t flags) { - struct dispatch_object_s *dc; - _dispatch_wait_until(dc = fastpath(dq->dq_items_head)); - return dc; + _dispatch_continuation_pop_inline(dou, dq, flags); } -DISPATCH_ALWAYS_INLINE -static inline struct dispatch_object_s* -_dispatch_queue_next(dispatch_queue_t dq, struct dispatch_object_s *dc) +void +_dispatch_continuation_invoke(dispatch_object_t dou, voucher_t override_voucher, + dispatch_invoke_flags_t flags) { - struct dispatch_object_s *next_dc; - next_dc = fastpath(dc->do_next); - dq->dq_items_head = next_dc; - if (!next_dc && !dispatch_atomic_cmpxchg2o(dq, dq_items_tail, dc, NULL, - relaxed)) { - _dispatch_wait_until(next_dc = fastpath(dc->do_next)); - dq->dq_items_head = next_dc; - } - return next_dc; + _dispatch_continuation_invoke_inline(dou, override_voucher, flags); } -_dispatch_thread_semaphore_t -_dispatch_queue_drain(dispatch_object_t dou) +/* + * Drain comes in 2 flavours (serial/concurrent) and 2 modes + * (redirecting or not). + * + * Serial + * ~~~~~~ + * Serial drain is about serial queues (width == 1). It doesn't support + * the redirecting mode, which doesn't make sense, and treats all continuations + * as barriers. Bookkeeping is minimal in serial flavour, most of the loop + * is optimized away. + * + * Serial drain stops if the width of the queue grows to larger than 1. + * Going through a serial drain prevents any recursive drain from being + * redirecting. + * + * Concurrent + * ~~~~~~~~~~ + * When in non-redirecting mode (meaning one of the target queues is serial), + * non-barriers and barriers alike run in the context of the drain thread. + * Slow non-barrier items are still all signaled so that they can make progress + * toward the dispatch_sync() that will serialize them all . + * + * In redirecting mode, non-barrier work items are redirected downward. + * + * Concurrent drain stops if the width of the queue becomes 1, so that the + * queue drain moves to the more efficient serial mode. + */ +DISPATCH_ALWAYS_INLINE +static dispatch_queue_t +_dispatch_queue_drain(dispatch_queue_t dq, dispatch_invoke_flags_t flags, + uint64_t *owned_ptr, struct dispatch_object_s **dc_out, + bool serial_drain) { - dispatch_queue_t dq = dou._dq, orig_tq, old_dq; - old_dq = _dispatch_thread_getspecific(dispatch_queue_key); - struct dispatch_object_s *dc, *next_dc; - _dispatch_thread_semaphore_t sema = 0; + dispatch_queue_t orig_tq = dq->do_targetq; + dispatch_thread_frame_s dtf; + struct dispatch_object_s *dc = NULL, *next_dc; + uint64_t owned = *owned_ptr; - // Continue draining sources after target queue change rdar://8928171 - bool check_tq = (dx_type(dq) != DISPATCH_SOURCE_KEVENT_TYPE); - - orig_tq = dq->do_targetq; - - _dispatch_thread_setspecific(dispatch_queue_key, dq); - pthread_priority_t old_dp = _dispatch_set_defaultpriority(dq->dq_priority); - - pthread_priority_t op = _dispatch_queue_get_override_priority(dq); - pthread_priority_t dp = _dispatch_get_defaultpriority(); - dp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; - if (op > dp) { - _dispatch_wqthread_override_start(dq->dq_thread, op); + _dispatch_thread_frame_push(&dtf, dq); + if (_dq_state_is_in_barrier(owned)) { + // we really own `IN_BARRIER + dq->dq_width * WIDTH_INTERVAL` + // but width can change while draining barrier work items, so we only + // convert to `dq->dq_width * WIDTH_INTERVAL` when we drop `IN_BARRIER` + owned = DISPATCH_QUEUE_IN_BARRIER; } - //dispatch_debug_queue(dq, __func__); - while (dq->dq_items_tail) { dc = _dispatch_queue_head(dq); do { - if (DISPATCH_OBJECT_SUSPENDED(dq)) { + if (unlikely(DISPATCH_QUEUE_IS_SUSPENDED(dq))) { goto out; } - if (dq->dq_running > dq->dq_width) { + if (unlikely(orig_tq != dq->do_targetq)) { goto out; } - if (slowpath(orig_tq != dq->do_targetq) && check_tq) { + if (unlikely(serial_drain != (dq->dq_width == 1))) { goto out; } - bool redirect = false; - if (!fastpath(dq->dq_width == 1)) { - if (!DISPATCH_OBJ_IS_VTABLE(dc) && - (long)dc->do_vtable & DISPATCH_OBJ_BARRIER_BIT) { - if (dq->dq_running > 1) { - goto out; + if (serial_drain || _dispatch_object_is_barrier(dc)) { + if (!serial_drain && owned != DISPATCH_QUEUE_IN_BARRIER) { + goto out; + } + next_dc = _dispatch_queue_next(dq, dc); + if (_dispatch_object_is_slow_item(dc)) { + owned = 0; + goto out_with_deferred; + } + } else { + if (owned == DISPATCH_QUEUE_IN_BARRIER) { + // we just ran barrier work items, we have to make their + // effect visible to other sync work items on other threads + // that may start coming in after this point, hence the + // release barrier + os_atomic_and2o(dq, dq_state, ~owned, release); + owned = dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; + } else if (unlikely(owned == 0)) { + if (_dispatch_object_is_slow_item(dc)) { + // sync "readers" don't observe the limit + _dispatch_queue_reserve_sync_width(dq); + } else if (!_dispatch_queue_try_acquire_async(dq)) { + goto out_with_no_width; } - } else { - redirect = true; + owned = DISPATCH_QUEUE_WIDTH_INTERVAL; + } + + next_dc = _dispatch_queue_next(dq, dc); + if (_dispatch_object_is_slow_item(dc)) { + owned -= DISPATCH_QUEUE_WIDTH_INTERVAL; + _dispatch_continuation_slow_item_signal(dq, dc); + continue; + } + + if (flags & DISPATCH_INVOKE_REDIRECTING_DRAIN) { + owned -= DISPATCH_QUEUE_WIDTH_INTERVAL; + _dispatch_continuation_redirect(dq, dc); + continue; } } - next_dc = _dispatch_queue_next(dq, dc); - if (redirect) { - _dispatch_continuation_redirect(dq, dc); - continue; - } - if ((sema = _dispatch_barrier_sync_f_pop(dq, dc, true))) { - goto out; - } - _dispatch_continuation_pop(dc); + + _dispatch_continuation_pop_inline(dc, dq, flags); _dispatch_perfmon_workitem_inc(); + if (unlikely(dtf.dtf_deferred)) { + goto out_with_deferred_compute_owned; + } } while ((dc = next_dc)); } out: - _dispatch_reset_defaultpriority(old_dp); - _dispatch_thread_setspecific(dispatch_queue_key, old_dq); - return sema; + if (owned == DISPATCH_QUEUE_IN_BARRIER) { + // if we're IN_BARRIER we really own the full width too + owned += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; + } + if (dc) { + owned = _dispatch_queue_adjust_owned(dq, owned, dc); + } + *owned_ptr = owned; + _dispatch_thread_frame_pop(&dtf); + return dc ? dq->do_targetq : NULL; + +out_with_no_width: + *owned_ptr = 0; + _dispatch_thread_frame_pop(&dtf); + return NULL; + +out_with_deferred_compute_owned: + if (serial_drain) { + owned = DISPATCH_QUEUE_IN_BARRIER + DISPATCH_QUEUE_WIDTH_INTERVAL; + } else { + if (owned == DISPATCH_QUEUE_IN_BARRIER) { + // if we're IN_BARRIER we really own the full width too + owned += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; + } + if (next_dc) { + owned = _dispatch_queue_adjust_owned(dq, owned, next_dc); + } + } +out_with_deferred: + *owned_ptr = owned; + if (unlikely(!dc_out)) { + DISPATCH_INTERNAL_CRASH(dc, + "Deferred continuation on source, mach channel or mgr"); + } + *dc_out = dc; + _dispatch_thread_frame_pop(&dtf); + return dq->do_targetq; +} + +DISPATCH_NOINLINE +static dispatch_queue_t +_dispatch_queue_concurrent_drain(dispatch_queue_t dq, + dispatch_invoke_flags_t flags, uint64_t *owned, + struct dispatch_object_s **dc_ptr) +{ + return _dispatch_queue_drain(dq, flags, owned, dc_ptr, false); +} + +DISPATCH_NOINLINE +dispatch_queue_t +_dispatch_queue_serial_drain(dispatch_queue_t dq, + dispatch_invoke_flags_t flags, uint64_t *owned, + struct dispatch_object_s **dc_ptr) +{ + flags &= ~(dispatch_invoke_flags_t)DISPATCH_INVOKE_REDIRECTING_DRAIN; + return _dispatch_queue_drain(dq, flags, owned, dc_ptr, true); } #if DISPATCH_COCOA_COMPAT @@ -3718,48 +4547,49 @@ static void _dispatch_main_queue_drain(void) { dispatch_queue_t dq = &_dispatch_main_q; + dispatch_thread_frame_s dtf; + if (!dq->dq_items_tail) { return; } - struct dispatch_continuation_s marker = { - .do_vtable = NULL, - }; - struct dispatch_object_s *dmarker = (void*)▮ - _dispatch_queue_push_notrace(dq, dmarker, 0); + + if (!fastpath(_dispatch_queue_is_thread_bound(dq))) { + DISPATCH_CLIENT_CRASH(0, "_dispatch_main_queue_callback_4CF called" + " after dispatch_main()"); + } + mach_port_t owner = DISPATCH_QUEUE_DRAIN_OWNER(dq); + if (slowpath(owner != _dispatch_tid_self())) { + DISPATCH_CLIENT_CRASH(owner, "_dispatch_main_queue_callback_4CF called" + " from the wrong thread"); + } + + dispatch_once_f(&_dispatch_main_q_handle_pred, dq, + _dispatch_runloop_queue_handle_init); _dispatch_perfmon_start(); - dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key); - _dispatch_thread_setspecific(dispatch_queue_key, dq); + // hide the frame chaining when CFRunLoop + // drains the main runloop, as this should not be observable that way + _dispatch_thread_frame_push_and_rebase(&dtf, dq, NULL); + pthread_priority_t old_pri = _dispatch_get_priority(); - pthread_priority_t old_dp = _dispatch_set_defaultpriority(old_pri); + pthread_priority_t old_dp = _dispatch_set_defaultpriority(old_pri, NULL); voucher_t voucher = _voucher_copy(); - struct dispatch_object_s *dc, *next_dc; - dc = _dispatch_queue_head(dq); + struct dispatch_object_s *dc, *next_dc, *tail; + dc = os_mpsc_capture_snapshot(dq, dq_items, &tail); do { - next_dc = _dispatch_queue_next(dq, dc); - if (dc == dmarker) { - goto out; - } - _dispatch_continuation_pop(dc); + next_dc = os_mpsc_pop_snapshot_head(dc, tail, do_next); + _dispatch_continuation_pop_inline(dc, dq, DISPATCH_INVOKE_NONE); _dispatch_perfmon_workitem_inc(); } while ((dc = next_dc)); - DISPATCH_CRASH("Main queue corruption"); - -out: - if (next_dc) { - _dispatch_main_queue_wakeup(); - } else { - pthread_priority_t p = _dispatch_queue_reset_override_priority(dq); - if (p > (dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { - _dispatch_thread_override_end(dq->dq_thread); - } - } + // runloop based queues use their port for the queue PUBLISH pattern + // so this raw call to dx_wakeup(0) is valid + dx_wakeup(dq, 0, 0); _dispatch_voucher_debug("main queue restore", voucher); - _dispatch_reset_priority_and_voucher(old_pri, voucher); _dispatch_reset_defaultpriority(old_dp); - _dispatch_thread_setspecific(dispatch_queue_key, old_dq); + _dispatch_reset_priority_and_voucher(old_pri, voucher); + _dispatch_thread_frame_pop(&dtf); _dispatch_perfmon_end(); _dispatch_force_cache_cleanup(); } @@ -3770,269 +4600,729 @@ _dispatch_runloop_queue_drain_one(dispatch_queue_t dq) if (!dq->dq_items_tail) { return false; } + dispatch_thread_frame_s dtf; _dispatch_perfmon_start(); - dispatch_queue_t old_dq = _dispatch_thread_getspecific(dispatch_queue_key); - _dispatch_thread_setspecific(dispatch_queue_key, dq); + _dispatch_thread_frame_push(&dtf, dq); pthread_priority_t old_pri = _dispatch_get_priority(); - pthread_priority_t old_dp = _dispatch_set_defaultpriority(old_pri); + pthread_priority_t old_dp = _dispatch_set_defaultpriority(old_pri, NULL); voucher_t voucher = _voucher_copy(); struct dispatch_object_s *dc, *next_dc; dc = _dispatch_queue_head(dq); next_dc = _dispatch_queue_next(dq, dc); - _dispatch_continuation_pop(dc); + _dispatch_continuation_pop_inline(dc, dq, DISPATCH_INVOKE_NONE); _dispatch_perfmon_workitem_inc(); + if (!next_dc) { + // runloop based queues use their port for the queue PUBLISH pattern + // so this raw call to dx_wakeup(0) is valid + dx_wakeup(dq, 0, 0); + } + _dispatch_voucher_debug("runloop queue restore", voucher); - _dispatch_reset_priority_and_voucher(old_pri, voucher); _dispatch_reset_defaultpriority(old_dp); - _dispatch_thread_setspecific(dispatch_queue_key, old_dq); + _dispatch_reset_priority_and_voucher(old_pri, voucher); + _dispatch_thread_frame_pop(&dtf); _dispatch_perfmon_end(); _dispatch_force_cache_cleanup(); return next_dc; } #endif -DISPATCH_ALWAYS_INLINE_NDEBUG -static inline _dispatch_thread_semaphore_t -_dispatch_queue_drain_one_barrier_sync(dispatch_queue_t dq) +DISPATCH_NOINLINE +void +_dispatch_try_lock_transfer_or_wakeup(dispatch_queue_t dq) { - // rdar://problem/8290662 "lock transfer" - struct dispatch_object_s *dc; - _dispatch_thread_semaphore_t sema; + dispatch_continuation_t dc_tmp, dc_start, dc_end; + struct dispatch_object_s *dc = NULL; + uint64_t dq_state, owned; + size_t count = 0; + + owned = DISPATCH_QUEUE_IN_BARRIER; + owned += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; +attempt_running_slow_head: + if (slowpath(dq->dq_items_tail) && !DISPATCH_QUEUE_IS_SUSPENDED(dq)) { + dc = _dispatch_queue_head(dq); + if (!_dispatch_object_is_slow_item(dc)) { + // not a slow item, needs to wake up + } else if (fastpath(dq->dq_width == 1) || + _dispatch_object_is_barrier(dc)) { + // rdar://problem/8290662 "barrier/writer lock transfer" + dc_start = dc_end = (dispatch_continuation_t)dc; + owned = 0; + count = 1; + dc = _dispatch_queue_next(dq, dc); + } else { + // "reader lock transfer" + // we must not signal semaphores immediately because our right + // for dequeuing is granted through holding the full "barrier" width + // which a signaled work item could relinquish out from our feet + dc_start = (dispatch_continuation_t)dc; + do { + // no check on width here because concurrent queues + // do not respect width for blocked readers, the thread + // is already spent anyway + dc_end = (dispatch_continuation_t)dc; + owned -= DISPATCH_QUEUE_WIDTH_INTERVAL; + count++; + dc = _dispatch_queue_next(dq, dc); + } while (dc && _dispatch_object_is_slow_non_barrier(dc)); + } - // queue is locked, or suspended and not being drained - dc = dq->dq_items_head; - if (slowpath(!dc) || !(sema = _dispatch_barrier_sync_f_pop(dq, dc, false))){ - return 0; + if (count) { + _dispatch_queue_drain_transfer_lock(dq, owned, dc_start); + do { + // signaled job will release the continuation + dc_tmp = dc_start; + dc_start = dc_start->do_next; + _dispatch_continuation_slow_item_signal(dq, dc_tmp); + } while (dc_tmp != dc_end); + return; + } + } + + if (dc || dx_metatype(dq) != _DISPATCH_QUEUE_TYPE) { + // the following wakeup is needed for sources + // or mach channels: when ds_pending_data is set at the same time + // as a trysync_f happens, lock transfer code above doesn't know about + // ds_pending_data or the wakeup logic, but lock transfer is useless + // for sources and mach channels in the first place. + owned = _dispatch_queue_adjust_owned(dq, owned, dc); + dq_state = _dispatch_queue_drain_unlock(dq, owned, NULL); + return _dispatch_queue_try_wakeup(dq, dq_state, 0); + } else if (!fastpath(_dispatch_queue_drain_try_unlock(dq, owned))) { + // someone enqueued a slow item at the head + // looping may be its last chance + goto attempt_running_slow_head; } - // dequeue dc, it is a barrier sync - (void)_dispatch_queue_next(dq, dc); - return sema; } void _dispatch_mgr_queue_drain(void) { + const dispatch_invoke_flags_t flags = DISPATCH_INVOKE_MANAGER_DRAIN; dispatch_queue_t dq = &_dispatch_mgr_q; - if (!dq->dq_items_tail) { - return _dispatch_force_cache_cleanup(); + uint64_t owned = DISPATCH_QUEUE_SERIAL_DRAIN_OWNED; + + if (dq->dq_items_tail) { + _dispatch_perfmon_start(); + if (slowpath(_dispatch_queue_serial_drain(dq, flags, &owned, NULL))) { + DISPATCH_INTERNAL_CRASH(0, "Interrupted drain on manager queue"); + } + _dispatch_voucher_debug("mgr queue clear", NULL); + _voucher_clear(); + _dispatch_reset_defaultpriority_override(); + _dispatch_perfmon_end(); } - _dispatch_perfmon_start(); - if (slowpath(_dispatch_queue_drain(dq))) { - DISPATCH_CRASH("Sync onto manager queue"); + +#if DISPATCH_USE_KEVENT_WORKQUEUE + if (!_dispatch_kevent_workqueue_enabled) +#endif + { + _dispatch_force_cache_cleanup(); } - _dispatch_voucher_debug("mgr queue clear", NULL); - _voucher_clear(); - _dispatch_queue_reset_override_priority(dq); - _dispatch_reset_defaultpriority_override(); - _dispatch_perfmon_end(); - _dispatch_force_cache_cleanup(); } #pragma mark - -#pragma mark _dispatch_queue_wakeup_with_qos +#pragma mark dispatch_queue_invoke -DISPATCH_NOINLINE -static dispatch_queue_t -_dispatch_queue_wakeup_with_qos_slow(dispatch_queue_t dq, pthread_priority_t pp, - bool retained) -{ - if (!dx_probe(dq) && (dq->dq_is_thread_bound || !dq->dq_thread)) { - if (retained) _dispatch_release(dq); - return NULL; +void +_dispatch_queue_drain_deferred_invoke(dispatch_queue_t dq, + dispatch_invoke_flags_t flags, uint64_t to_unlock, + struct dispatch_object_s *dc) +{ + if (_dispatch_object_is_slow_item(dc)) { + dispatch_assert(to_unlock == 0); + _dispatch_queue_drain_transfer_lock(dq, to_unlock, dc); + _dispatch_continuation_slow_item_signal(dq, dc); + return _dispatch_release_tailcall(dq); + } + + bool should_defer_again = false, should_pend_queue = true; + uint64_t old_state, new_state; + + if (_dispatch_get_current_queue()->do_targetq) { + _dispatch_thread_frame_get_current()->dtf_deferred = dc; + should_defer_again = true; + should_pend_queue = false; + } + + if (dq->dq_width > 1) { + should_pend_queue = false; + } else if (should_pend_queue) { + dispatch_assert(to_unlock == + DISPATCH_QUEUE_WIDTH_INTERVAL + DISPATCH_QUEUE_IN_BARRIER); + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release,{ + new_state = old_state; + if (_dq_state_has_waiters(old_state) || + _dq_state_is_enqueued(old_state)) { + os_atomic_rmw_loop_give_up(break); + } + new_state += DISPATCH_QUEUE_DRAIN_PENDED; + new_state -= DISPATCH_QUEUE_IN_BARRIER; + new_state -= DISPATCH_QUEUE_WIDTH_INTERVAL; + }); + should_pend_queue = (new_state & DISPATCH_QUEUE_DRAIN_PENDED); + } + + if (!should_pend_queue) { + if (to_unlock & DISPATCH_QUEUE_IN_BARRIER) { + _dispatch_try_lock_transfer_or_wakeup(dq); + _dispatch_release(dq); + } else if (to_unlock) { + uint64_t dq_state = _dispatch_queue_drain_unlock(dq, to_unlock, NULL); + _dispatch_queue_try_wakeup(dq, dq_state, DISPATCH_WAKEUP_CONSUME); + } else { + _dispatch_release(dq); + } + dq = NULL; } - if (!dispatch_atomic_cmpxchg2o(dq, do_suspend_cnt, 0, - DISPATCH_OBJECT_SUSPEND_LOCK, acquire)) { - bool was_overridden, override; - override = _dispatch_queue_override_priority(dq, &pp, &was_overridden); - if (override && dq->dq_running > 1) { - override = false; - } + if (!should_defer_again) { + dx_invoke(dc, flags & _DISPATCH_INVOKE_PROPAGATE_MASK); + } -#if DISPATCH_COCOA_COMPAT - if (dq == &_dispatch_main_q && dq->dq_is_thread_bound) { - if (override) { - _dispatch_thread_override_start(dq->dq_thread, pp); - if (was_overridden) { - _dispatch_thread_override_end(dq->dq_thread); - } - } - return _dispatch_main_queue_wakeup(); - } -#endif - if (override) { -#if HAVE_PTHREAD_WORKQUEUE_QOS - mach_port_t th; - // to traverse the tq chain safely we must - // lock it to ensure it cannot change, unless the queue is running - // and we can just override the thread itself - if (dq->dq_thread) { - _dispatch_wqthread_override_start(dq->dq_thread, pp); - } else if (!dispatch_atomic_cmpxchgv2o(dq, dq_tqthread, - MACH_PORT_NULL, _dispatch_thread_port(), &th, acquire)) { - // already locked, override the owner, trysync will do a queue - // wakeup when it returns, see _dispatch_set_target_queue2 - _dispatch_wqthread_override_start(th, pp); - } else { - dispatch_queue_t tq = dq->do_targetq; - if (_dispatch_queue_prepare_override(dq, tq, pp)) { - _dispatch_queue_push_override(dq, tq, pp, false); - } else { - _dispatch_queue_wakeup_with_qos(tq, pp); - } - dispatch_atomic_store2o(dq, dq_tqthread, MACH_PORT_NULL, - release); + if (dq) { + uint32_t self = _dispatch_tid_self(); + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release,{ + new_state = old_state; + if (!_dq_state_drain_pended(old_state) || + _dq_state_drain_owner(old_state) != self) { + os_atomic_rmw_loop_give_up({ + // We may have been overridden, so inform the root queue + _dispatch_set_defaultpriority_override(); + return _dispatch_release_tailcall(dq); + }); } -#endif + new_state = DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(new_state); + }); + if (_dq_state_has_override(old_state)) { + // Ensure that the root queue sees that this thread was overridden. + _dispatch_set_defaultpriority_override(); } - if (retained) _dispatch_release(dq); - return NULL; + return dx_invoke(dq, flags | DISPATCH_INVOKE_STEALING); } +} +void +_dispatch_queue_finalize_activation(dispatch_queue_t dq) +{ dispatch_queue_t tq = dq->do_targetq; - if (!retained) _dispatch_retain(dq); - _dispatch_queue_push_queue(tq, dq, pp); - return tq; // libdispatch does not need this, but the Instrument DTrace - // probe does + _dispatch_queue_priority_inherit_from_target(dq, tq); + _dispatch_queue_atomic_flags_set(tq, DQF_TARGETED); + if (dq->dq_override_voucher == DISPATCH_NO_VOUCHER) { + voucher_t v = tq->dq_override_voucher; + if (v != DISPATCH_NO_VOUCHER) { + if (v) _voucher_retain(v); + dq->dq_override_voucher = v; + } + } } DISPATCH_ALWAYS_INLINE static inline dispatch_queue_t -_dispatch_queue_wakeup_with_qos2(dispatch_queue_t dq, pthread_priority_t pp, - bool retained) +dispatch_queue_invoke2(dispatch_queue_t dq, dispatch_invoke_flags_t flags, + uint64_t *owned, struct dispatch_object_s **dc_ptr) { - if (_dispatch_object_suspended(dq)) { - _dispatch_queue_override_priority(dq, &pp, NULL); - if (retained) _dispatch_release(dq); - return NULL; + dispatch_queue_t otq = dq->do_targetq; + dispatch_queue_t cq = _dispatch_queue_get_current(); + + if (slowpath(cq != otq)) { + return otq; } - return _dispatch_queue_wakeup_with_qos_slow(dq, pp, retained); + if (dq->dq_width == 1) { + return _dispatch_queue_serial_drain(dq, flags, owned, dc_ptr); + } + return _dispatch_queue_concurrent_drain(dq, flags, owned, dc_ptr); } +// 6618342 Contact the team that owns the Instrument DTrace probe before +// renaming this symbol DISPATCH_NOINLINE void -_dispatch_queue_wakeup_with_qos_and_release(dispatch_queue_t dq, - pthread_priority_t pp) +_dispatch_queue_invoke(dispatch_queue_t dq, dispatch_invoke_flags_t flags) { - (void)_dispatch_queue_wakeup_with_qos2(dq, pp, true); + _dispatch_queue_class_invoke(dq, flags, dispatch_queue_invoke2); } -DISPATCH_NOINLINE +#pragma mark - +#pragma mark dispatch_queue_class_wakeup + +#if HAVE_PTHREAD_WORKQUEUE_QOS void -_dispatch_queue_wakeup_with_qos(dispatch_queue_t dq, pthread_priority_t pp) +_dispatch_queue_override_invoke(dispatch_continuation_t dc, + dispatch_invoke_flags_t flags) { - (void)_dispatch_queue_wakeup_with_qos2(dq, pp, false); + dispatch_queue_t old_rq = _dispatch_queue_get_current(); + dispatch_queue_t assumed_rq = dc->dc_other; + voucher_t ov = DISPATCH_NO_VOUCHER; + dispatch_object_t dou; + + dou._do = dc->dc_data; + _dispatch_queue_set_current(assumed_rq); + flags |= DISPATCH_INVOKE_OVERRIDING; + if (dc_type(dc) == DISPATCH_CONTINUATION_TYPE(OVERRIDE_STEALING)) { + flags |= DISPATCH_INVOKE_STEALING; + } else { + // balance the fake continuation push in + // _dispatch_root_queue_push_override + _dispatch_trace_continuation_pop(assumed_rq, dou._do); + } + _dispatch_continuation_pop_forwarded(dc, ov, DISPATCH_OBJ_CONSUME_BIT, { + if (_dispatch_object_has_vtable(dou._do)) { + dx_invoke(dou._do, flags); + } else { + _dispatch_continuation_invoke_inline(dou, ov, flags); + } + }); + _dispatch_queue_set_current(old_rq); } -DISPATCH_NOINLINE -void -_dispatch_queue_wakeup_and_release(dispatch_queue_t dq) +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_need_global_root_queue_push_override(dispatch_queue_t rq, + pthread_priority_t pp) { - (void)_dispatch_queue_wakeup_with_qos2(dq, - _dispatch_queue_get_override_priority(dq), true); + pthread_priority_t rqp = rq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK; + bool defaultqueue = rq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG; + + if (unlikely(!rqp)) return false; + + pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; + return defaultqueue ? pp && pp != rqp : pp > rqp; } -DISPATCH_NOINLINE -dispatch_queue_t -_dispatch_queue_wakeup(dispatch_queue_t dq) +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_need_global_root_queue_push_override_stealer(dispatch_queue_t rq, + pthread_priority_t pp) { - return _dispatch_queue_wakeup_with_qos2(dq, - _dispatch_queue_get_override_priority(dq), false); + pthread_priority_t rqp = rq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK; + bool defaultqueue = rq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG; + + if (unlikely(!rqp)) return false; + + pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; + return defaultqueue || pp > rqp; } -#if HAVE_PTHREAD_WORKQUEUE_QOS DISPATCH_NOINLINE static void -_dispatch_queue_override_invoke_stealing(void *ctxt) +_dispatch_root_queue_push_override(dispatch_queue_t orig_rq, + dispatch_object_t dou, pthread_priority_t pp) { - dispatch_continuation_t dc = (dispatch_continuation_t)ctxt; - dispatch_queue_t dq = dc->dc_data; + bool overcommit = orig_rq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; + dispatch_queue_t rq = _dispatch_get_root_queue_for_priority(pp, overcommit); + dispatch_continuation_t dc = dou._dc; + + if (_dispatch_object_is_redirection(dc)) { + // no double-wrap is needed, _dispatch_async_redirect_invoke will do + // the right thing + dc->dc_func = (void *)orig_rq; + } else { + dc = _dispatch_continuation_alloc(); + dc->do_vtable = DC_VTABLE(OVERRIDE_OWNING); + // fake that we queued `dou` on `orig_rq` for introspection purposes + _dispatch_trace_continuation_push(orig_rq, dou); + dc->dc_ctxt = dc; + dc->dc_other = orig_rq; + dc->dc_data = dou._do; + dc->dc_priority = DISPATCH_NO_PRIORITY; + dc->dc_voucher = DISPATCH_NO_VOUCHER; + } - dx_invoke(dq, dc, DISPATCH_INVOKE_OVERRIDING | DISPATCH_INVOKE_STEALING); + DISPATCH_COMPILER_CAN_ASSUME(dx_type(rq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE); + _dispatch_queue_push_inline(rq, dc, 0, 0); } DISPATCH_NOINLINE static void -_dispatch_queue_override_invoke_owning(void *ctxt) +_dispatch_root_queue_push_override_stealer(dispatch_queue_t orig_rq, + dispatch_queue_t dq, pthread_priority_t pp) { - dispatch_continuation_t dc = (dispatch_continuation_t)ctxt; - dispatch_queue_t dq = dc->dc_data; + bool overcommit = orig_rq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; + dispatch_queue_t rq = _dispatch_get_root_queue_for_priority(pp, overcommit); + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + + dc->do_vtable = DC_VTABLE(OVERRIDE_STEALING); + _dispatch_retain(dq); + dc->dc_func = NULL; + dc->dc_ctxt = dc; + dc->dc_other = orig_rq; + dc->dc_data = dq; + dc->dc_priority = DISPATCH_NO_PRIORITY; + dc->dc_voucher = DISPATCH_NO_VOUCHER; - // balance the fake continuation push in _dispatch_queue_push_override - _dispatch_trace_continuation_pop(dc->dc_other, dc->dc_data); - dx_invoke(dq, dc, DISPATCH_INVOKE_OVERRIDING); + DISPATCH_COMPILER_CAN_ASSUME(dx_type(rq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE); + _dispatch_queue_push_inline(rq, dc, 0, 0); } -#endif -static inline bool -_dispatch_queue_prepare_override(dispatch_queue_t dq, dispatch_queue_t tq, - pthread_priority_t p) +DISPATCH_NOINLINE +static void +_dispatch_queue_class_wakeup_with_override(dispatch_queue_t dq, + pthread_priority_t pp, dispatch_wakeup_flags_t flags, uint64_t dq_state) +{ + mach_port_t owner = _dq_state_drain_owner(dq_state); + pthread_priority_t pp2; + dispatch_queue_t tq; + bool locked; + + if (owner) { + int rc = _dispatch_wqthread_override_start_check_owner(owner, pp, + &dq->dq_state_lock); + // EPERM means the target of the override is not a work queue thread + // and could be a thread bound queue such as the main queue. + // When that happens we must get to that queue and wake it up if we + // want the override to be appplied and take effect. + if (rc != EPERM) { + goto out; + } + } + + if (_dq_state_is_suspended(dq_state)) { + goto out; + } + + tq = dq->do_targetq; + + if (_dispatch_queue_has_immutable_target(dq)) { + locked = false; + } else if (_dispatch_is_in_root_queues_array(tq)) { + // avoid locking when we recognize the target queue as a global root + // queue it is gross, but is a very common case. The locking isn't + // needed because these target queues cannot go away. + locked = false; + } else if (_dispatch_queue_sidelock_trylock(dq, pp)) { + // to traverse the tq chain safely we must + // lock it to ensure it cannot change + locked = true; + tq = dq->do_targetq; + _dispatch_ktrace1(DISPATCH_PERF_mutable_target, dq); + } else { + // + // Leading to being there, the current thread has: + // 1. enqueued an object on `dq` + // 2. raised the dq_override value of `dq` + // 3. set the HAS_OVERRIDE bit and not seen an owner + // 4. tried and failed to acquire the side lock + // + // + // The side lock owner can only be one of three things: + // + // - The suspend/resume side count code. Besides being unlikely, + // it means that at this moment the queue is actually suspended, + // which transfers the responsibility of applying the override to + // the eventual dispatch_resume(). + // + // - A dispatch_set_target_queue() call. The fact that we saw no `owner` + // means that the trysync it does wasn't being drained when (3) + // happened which can only be explained by one of these interleavings: + // + // o `dq` became idle between when the object queued in (1) ran and + // the set_target_queue call and we were unlucky enough that our + // step (3) happened while this queue was idle. There is no reason + // to override anything anymore, the queue drained to completion + // while we were preempted, our job is done. + // + // o `dq` is queued but not draining during (1-3), then when we try + // to lock at (4) the queue is now draining a set_target_queue. + // Since we set HAS_OVERRIDE with a release barrier, the effect of + // (2) was visible to the drainer when he acquired the drain lock, + // and that guy has applied our override. Our job is done. + // + // - Another instance of _dispatch_queue_class_wakeup_with_override(), + // which is fine because trylock leaves a hint that we failed our + // trylock, causing the tryunlock below to fail and reassess whether + // a better override needs to be applied. + // + _dispatch_ktrace1(DISPATCH_PERF_mutable_target, dq); + goto out; + } + +apply_again: + if (dx_type(tq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE) { + if (_dispatch_need_global_root_queue_push_override_stealer(tq, pp)) { + _dispatch_root_queue_push_override_stealer(tq, dq, pp); + } + } else if (_dispatch_queue_need_override(tq, pp)) { + dx_wakeup(tq, pp, DISPATCH_WAKEUP_OVERRIDING); + } + while (unlikely(locked && !_dispatch_queue_sidelock_tryunlock(dq))) { + // rdar://problem/24081326 + // + // Another instance of _dispatch_queue_class_wakeup_with_override() + // tried to acquire the side lock while we were running, and could have + // had a better override than ours to apply. + // + pp2 = dq->dq_override; + if (pp2 > pp) { + pp = pp2; + // The other instance had a better priority than ours, override + // our thread, and apply the override that wasn't applied to `dq` + // because of us. + goto apply_again; + } + } + +out: + if (flags & DISPATCH_WAKEUP_CONSUME) { + return _dispatch_release_tailcall(dq); + } +} +#endif // HAVE_PTHREAD_WORKQUEUE_QOS + +DISPATCH_NOINLINE +void +_dispatch_queue_class_override_drainer(dispatch_queue_t dq, + pthread_priority_t pp, dispatch_wakeup_flags_t flags) { #if HAVE_PTHREAD_WORKQUEUE_QOS - if (dx_type(tq) != DISPATCH_QUEUE_ROOT_TYPE || !tq->dq_priority) { - return false; + uint64_t dq_state, value; + + // + // Someone is trying to override the last work item of the queue. + // Do not remember this override on the queue because we know the precise + // duration the override is required for: until the current drain unlocks. + // + // That is why this function only tries to set HAS_OVERRIDE if we can + // still observe a drainer, and doesn't need to set the DIRTY bit + // because oq_override wasn't touched and there is no race to resolve + // + os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, { + if (!_dq_state_drain_locked(dq_state)) { + os_atomic_rmw_loop_give_up(break); + } + value = dq_state | DISPATCH_QUEUE_HAS_OVERRIDE; + }); + if (_dq_state_drain_locked(dq_state)) { + return _dispatch_queue_class_wakeup_with_override(dq, pp, + flags, dq_state); } - if (p <= (dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { - return false; +#else + (void)pp; +#endif // HAVE_PTHREAD_WORKQUEUE_QOS + if (flags & DISPATCH_WAKEUP_CONSUME) { + return _dispatch_release_tailcall(dq); } - if (p <= (tq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { - return false; +} + +#if DISPATCH_USE_KEVENT_WORKQUEUE +DISPATCH_NOINLINE +static void +_dispatch_trystash_to_deferred_items(dispatch_queue_t dq, dispatch_object_t dou, + pthread_priority_t pp, dispatch_deferred_items_t ddi) +{ + dispatch_priority_t old_pp = ddi->ddi_stashed_pp; + dispatch_queue_t old_dq = ddi->ddi_stashed_dq; + struct dispatch_object_s *old_dou = ddi->ddi_stashed_dou; + dispatch_priority_t rq_overcommit; + + rq_overcommit = dq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; + if (likely(!old_pp || rq_overcommit)) { + ddi->ddi_stashed_dq = dq; + ddi->ddi_stashed_dou = dou._do; + ddi->ddi_stashed_pp = (dispatch_priority_t)pp | rq_overcommit | + _PTHREAD_PRIORITY_PRIORITY_MASK; + if (likely(!old_pp)) { + return; + } + // push the previously stashed item + pp = old_pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK; + dq = old_dq; + dou._do = old_dou; } - return true; -#else - (void)dq; (void)tq; (void)p; - return false; + if (_dispatch_need_global_root_queue_push_override(dq, pp)) { + return _dispatch_root_queue_push_override(dq, dou, pp); + } + // bit of cheating: we should really pass `pp` but we know that we are + // pushing onto a global queue at this point, and we just checked that + // `pp` doesn't matter. + DISPATCH_COMPILER_CAN_ASSUME(dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE); + _dispatch_queue_push_inline(dq, dou, 0, 0); +} #endif + +DISPATCH_NOINLINE +static void +_dispatch_queue_push_slow(dispatch_queue_t dq, dispatch_object_t dou, + pthread_priority_t pp) +{ + dispatch_once_f(&_dispatch_root_queues_pred, NULL, + _dispatch_root_queues_init_once); + _dispatch_queue_push(dq, dou, pp); } -static inline void -_dispatch_queue_push_override(dispatch_queue_t dq, dispatch_queue_t tq, - pthread_priority_t p, bool owning) +DISPATCH_NOINLINE +void +_dispatch_queue_push(dispatch_queue_t dq, dispatch_object_t dou, + pthread_priority_t pp) { + _dispatch_assert_is_valid_qos_override(pp); + if (dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE) { +#if DISPATCH_USE_KEVENT_WORKQUEUE + dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); + if (unlikely(ddi && !(ddi->ddi_stashed_pp & + (dispatch_priority_t)_PTHREAD_PRIORITY_FLAGS_MASK))) { + dispatch_assert(_dispatch_root_queues_pred == DLOCK_ONCE_DONE); + return _dispatch_trystash_to_deferred_items(dq, dou, pp, ddi); + } +#endif #if HAVE_PTHREAD_WORKQUEUE_QOS - unsigned int qosbit, idx, overcommit; - overcommit = (tq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) ? 1 : 0; - qosbit = (p & _PTHREAD_PRIORITY_QOS_CLASS_MASK) >> - _PTHREAD_PRIORITY_QOS_CLASS_SHIFT; - idx = (unsigned int)__builtin_ffs((int)qosbit); - if (!idx || idx > DISPATCH_QUEUE_QOS_COUNT) { - DISPATCH_CRASH("Corrupted override priority"); + // can't use dispatch_once_f() as it would create a frame + if (unlikely(_dispatch_root_queues_pred != DLOCK_ONCE_DONE)) { + return _dispatch_queue_push_slow(dq, dou, pp); + } + if (_dispatch_need_global_root_queue_push_override(dq, pp)) { + return _dispatch_root_queue_push_override(dq, dou, pp); + } +#endif } - dispatch_queue_t rq = &_dispatch_root_queues[((idx-1) << 1) | overcommit]; + _dispatch_queue_push_inline(dq, dou, pp, 0); +} - dispatch_continuation_t dc = _dispatch_continuation_alloc(); - dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_BARRIER_BIT); - if (owning) { - // fake that we queued `dq` on `tq` for introspection purposes - _dispatch_trace_continuation_push(tq, dq); - dc->dc_func = _dispatch_queue_override_invoke_owning; - } else { - dc->dc_func = _dispatch_queue_override_invoke_stealing; +DISPATCH_NOINLINE +static void +_dispatch_queue_class_wakeup_enqueue(dispatch_queue_t dq, pthread_priority_t pp, + dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target) +{ + dispatch_queue_t tq; + + if (flags & (DISPATCH_WAKEUP_OVERRIDING | DISPATCH_WAKEUP_WAS_OVERRIDDEN)) { + // _dispatch_queue_drain_try_unlock may have reset the override while + // we were becoming the enqueuer + _dispatch_queue_reinstate_override_priority(dq, (dispatch_priority_t)pp); + } + if (!(flags & DISPATCH_WAKEUP_CONSUME)) { _dispatch_retain(dq); } - dc->dc_ctxt = dc; - dc->dc_priority = 0; - dc->dc_other = tq; - dc->dc_voucher = NULL; - dc->dc_data = dq; - - _dispatch_queue_push(rq, dc, 0); -#else - (void)dq; (void)tq; (void)p; -#endif + if (target == DISPATCH_QUEUE_WAKEUP_TARGET) { + // try_become_enqueuer has no acquire barrier, as the last block + // of a queue asyncing to that queue is not an uncommon pattern + // and in that case the acquire is completely useless + // + // so instead use a thread fence here when we will read the targetq + // pointer because that is the only thing that really requires + // that barrier. + os_atomic_thread_fence(acquire); + tq = dq->do_targetq; + } else { + dispatch_assert(target == DISPATCH_QUEUE_WAKEUP_MGR); + tq = &_dispatch_mgr_q; + } + return _dispatch_queue_push(tq, dq, pp); } +DISPATCH_NOINLINE void -_dispatch_queue_push_queue(dispatch_queue_t tq, dispatch_queue_t dq, - pthread_priority_t pp) +_dispatch_queue_class_wakeup(dispatch_queue_t dq, pthread_priority_t pp, + dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target) { - _dispatch_queue_override_priority(dq, &pp, NULL); - if (_dispatch_queue_prepare_override(dq, tq, pp)) { - _dispatch_queue_push_override(dq, tq, pp, true); + uint64_t old_state, new_state, bits = 0; + +#if HAVE_PTHREAD_WORKQUEUE_QOS + _dispatch_queue_override_priority(dq, /* inout */ &pp, /* inout */ &flags); +#endif + + if (flags & DISPATCH_WAKEUP_FLUSH) { + bits = DISPATCH_QUEUE_DIRTY; + } + if (flags & DISPATCH_WAKEUP_OVERRIDING) { + // + // Setting the dirty bit here is about forcing callers of + // _dispatch_queue_drain_try_unlock() to loop again when an override + // has just been set to close the following race: + // + // Drainer (in drain_try_unlokc(): + // override_reset(); + // preempted.... + // + // Enqueuer: + // atomic_or(oq_override, override, relaxed); + // atomic_or(dq_state, HAS_OVERRIDE, release); + // + // Drainer: + // ... resumes + // successful drain_unlock() and leaks `oq_override` + // + bits = DISPATCH_QUEUE_DIRTY | DISPATCH_QUEUE_HAS_OVERRIDE; + } + + if (flags & DISPATCH_WAKEUP_SLOW_WAITER) { + uint64_t pending_barrier_width = + (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL; + uint64_t xor_owner_and_set_full_width_and_in_barrier = + _dispatch_tid_self() | DISPATCH_QUEUE_WIDTH_FULL_BIT | + DISPATCH_QUEUE_IN_BARRIER; + +#ifdef DLOCK_NOWAITERS_BIT + bits |= DLOCK_NOWAITERS_BIT; +#else + bits |= DLOCK_WAITERS_BIT; +#endif + flags ^= DISPATCH_WAKEUP_SLOW_WAITER; + dispatch_assert(!(flags & DISPATCH_WAKEUP_CONSUME)); + + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + new_state = old_state | bits; + if (_dq_state_drain_pended(old_state)) { + // same as DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT + // but we want to be more efficient wrt the WAITERS_BIT + new_state &= ~DISPATCH_QUEUE_DRAIN_OWNER_MASK; + new_state &= ~DISPATCH_QUEUE_DRAIN_PENDED; + } + if (unlikely(_dq_state_drain_locked(new_state))) { +#ifdef DLOCK_NOWAITERS_BIT + new_state &= ~(uint64_t)DLOCK_NOWAITERS_BIT; +#endif + } else if (unlikely(!_dq_state_is_runnable(new_state) || + !(flags & DISPATCH_WAKEUP_FLUSH))) { + // either not runnable, or was not for the first item (26700358) + // so we should not try to lock and handle overrides instead + } else if (_dq_state_has_pending_barrier(old_state) || + new_state + pending_barrier_width < + DISPATCH_QUEUE_WIDTH_FULL_BIT) { + // see _dispatch_queue_drain_try_lock + new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK; + new_state ^= xor_owner_and_set_full_width_and_in_barrier; + } else { + new_state |= DISPATCH_QUEUE_ENQUEUED; + } + }); + if ((old_state ^ new_state) & DISPATCH_QUEUE_IN_BARRIER) { + return _dispatch_try_lock_transfer_or_wakeup(dq); + } + } else if (bits) { + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release,{ + new_state = old_state | bits; + if (likely(_dq_state_should_wakeup(old_state))) { + new_state |= DISPATCH_QUEUE_ENQUEUED; + } + }); } else { - _dispatch_queue_push(tq, dq, pp); + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed,{ + new_state = old_state; + if (likely(_dq_state_should_wakeup(old_state))) { + new_state |= DISPATCH_QUEUE_ENQUEUED; + } else { + os_atomic_rmw_loop_give_up(break); + } + }); + } + + if ((old_state ^ new_state) & DISPATCH_QUEUE_ENQUEUED) { + return _dispatch_queue_class_wakeup_enqueue(dq, pp, flags, target); + } + +#if HAVE_PTHREAD_WORKQUEUE_QOS + if ((flags & DISPATCH_WAKEUP_OVERRIDING) + && target == DISPATCH_QUEUE_WAKEUP_TARGET) { + return _dispatch_queue_class_wakeup_with_override(dq, pp, + flags, new_state); + } +#endif + + if (flags & DISPATCH_WAKEUP_CONSUME) { + return _dispatch_release_tailcall(dq); } } @@ -4041,7 +5331,7 @@ _dispatch_queue_push_queue(dispatch_queue_t tq, dispatch_queue_t dq, DISPATCH_NOINLINE static bool -_dispatch_queue_concurrent_drain_one_slow(dispatch_queue_t dq) +_dispatch_root_queue_drain_one_slow(dispatch_queue_t dq) { dispatch_root_queue_context_t qc = dq->do_ctxt; struct dispatch_object_s *const mediator = (void *)~0ul; @@ -4058,7 +5348,7 @@ _dispatch_queue_concurrent_drain_one_slow(dispatch_queue_t dq) // Since we have serious contention, we need to back off. if (!pending) { // Mark this queue as pending to avoid requests for further threads - (void)dispatch_atomic_inc2o(qc, dgq_pending, relaxed); + (void)os_atomic_inc2o(qc, dgq_pending, relaxed); pending = true; } _dispatch_contention_usleep(sleep_time); @@ -4074,17 +5364,17 @@ _dispatch_queue_concurrent_drain_one_slow(dispatch_queue_t dq) available = false; out: if (pending) { - (void)dispatch_atomic_dec2o(qc, dgq_pending, relaxed); + (void)os_atomic_dec2o(qc, dgq_pending, relaxed); } if (!available) { - _dispatch_queue_wakeup_global(dq); + _dispatch_global_queue_poke(dq); } return available; } DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_queue_concurrent_drain_one2(dispatch_queue_t dq) +_dispatch_root_queue_drain_one2(dispatch_queue_t dq) { // Wait for queue head and tail to be both non-empty or both empty bool available; // @@ -4095,24 +5385,24 @@ _dispatch_queue_concurrent_drain_one2(dispatch_queue_t dq) DISPATCH_ALWAYS_INLINE_NDEBUG static inline struct dispatch_object_s * -_dispatch_queue_concurrent_drain_one(dispatch_queue_t dq) +_dispatch_root_queue_drain_one(dispatch_queue_t dq) { struct dispatch_object_s *head, *next, *const mediator = (void *)~0ul; start: // The mediator value acts both as a "lock" and a signal - head = dispatch_atomic_xchg2o(dq, dq_items_head, mediator, relaxed); + head = os_atomic_xchg2o(dq, dq_items_head, mediator, relaxed); if (slowpath(head == NULL)) { // The first xchg on the tail will tell the enqueueing thread that it // is safe to blindly write out to the head pointer. A cmpxchg honors // the algorithm. - if (slowpath(!dispatch_atomic_cmpxchg2o(dq, dq_items_head, mediator, + if (slowpath(!os_atomic_cmpxchg2o(dq, dq_items_head, mediator, NULL, relaxed))) { goto start; } if (slowpath(dq->dq_items_tail) && // - _dispatch_queue_concurrent_drain_one2(dq)) { + _dispatch_root_queue_drain_one2(dq)) { goto start; } _dispatch_root_queue_debug("no work on global queue: %p", dq); @@ -4121,7 +5411,7 @@ _dispatch_queue_concurrent_drain_one(dispatch_queue_t dq) if (slowpath(head == mediator)) { // This thread lost the race for ownership of the queue. - if (fastpath(_dispatch_queue_concurrent_drain_one_slow(dq))) { + if (fastpath(_dispatch_root_queue_drain_one_slow(dq))) { goto start; } return NULL; @@ -4132,9 +5422,10 @@ _dispatch_queue_concurrent_drain_one(dispatch_queue_t dq) next = fastpath(head->do_next); if (slowpath(!next)) { - dispatch_atomic_store2o(dq, dq_items_head, NULL, relaxed); - - if (dispatch_atomic_cmpxchg2o(dq, dq_items_tail, head, NULL, relaxed)) { + os_atomic_store2o(dq, dq_items_head, NULL, relaxed); + // 22708742: set tail to NULL with release, so that NULL write to head + // above doesn't clobber head from concurrent enqueuer + if (os_atomic_cmpxchg2o(dq, dq_items_tail, head, NULL, release)) { // both head and tail are NULL now goto out; } @@ -4142,54 +5433,78 @@ _dispatch_queue_concurrent_drain_one(dispatch_queue_t dq) _dispatch_wait_until(next = head->do_next); } - dispatch_atomic_store2o(dq, dq_items_head, next, relaxed); - _dispatch_queue_wakeup_global(dq); + os_atomic_store2o(dq, dq_items_head, next, relaxed); + _dispatch_global_queue_poke(dq); out: return head; } +void +_dispatch_root_queue_drain_deferred_item(dispatch_queue_t dq, + struct dispatch_object_s *dou, pthread_priority_t pp) +{ + struct _dispatch_identity_s di; + + // fake that we queued `dou` on `dq` for introspection purposes + _dispatch_trace_continuation_push(dq, dou); + + pp = _dispatch_priority_inherit_from_root_queue(pp, dq); + _dispatch_queue_set_current(dq); + _dispatch_root_queue_identity_assume(&di, pp); +#if DISPATCH_COCOA_COMPAT + void *pool = _dispatch_last_resort_autorelease_pool_push(); +#endif // DISPATCH_COCOA_COMPAT + + _dispatch_perfmon_start(); + _dispatch_continuation_pop_inline(dou, dq, + DISPATCH_INVOKE_WORKER_DRAIN | DISPATCH_INVOKE_REDIRECTING_DRAIN); + _dispatch_perfmon_workitem_inc(); + _dispatch_perfmon_end(); + +#if DISPATCH_COCOA_COMPAT + _dispatch_last_resort_autorelease_pool_pop(pool); +#endif // DISPATCH_COCOA_COMPAT + _dispatch_reset_defaultpriority(di.old_pp); + _dispatch_queue_set_current(NULL); + + _dispatch_voucher_debug("root queue clear", NULL); + _dispatch_reset_voucher(NULL, DISPATCH_THREAD_PARK); +} + +DISPATCH_NOT_TAIL_CALLED // prevent tailcall (for Instrument DTrace probe) static void -_dispatch_root_queue_drain(dispatch_queue_t dq) +_dispatch_root_queue_drain(dispatch_queue_t dq, pthread_priority_t pri) { #if DISPATCH_DEBUG - if (_dispatch_thread_getspecific(dispatch_queue_key)) { - DISPATCH_CRASH("Premature thread recycling"); + dispatch_queue_t cq; + if (slowpath(cq = _dispatch_queue_get_current())) { + DISPATCH_INTERNAL_CRASH(cq, "Premature thread recycling"); } #endif - _dispatch_thread_setspecific(dispatch_queue_key, dq); - pthread_priority_t old_pri = _dispatch_get_priority(); - pthread_priority_t pri = dq->dq_priority ? dq->dq_priority : old_pri; - pthread_priority_t old_dp = _dispatch_set_defaultpriority(pri); - + _dispatch_queue_set_current(dq); + if (dq->dq_priority) pri = dq->dq_priority; + pthread_priority_t old_dp = _dispatch_set_defaultpriority(pri, NULL); #if DISPATCH_COCOA_COMPAT - // ensure that high-level memory management techniques do not leak/crash - if (dispatch_begin_thread_4GC) { - dispatch_begin_thread_4GC(); - } - void *pool = _dispatch_autorelease_pool_push(); + void *pool = _dispatch_last_resort_autorelease_pool_push(); #endif // DISPATCH_COCOA_COMPAT _dispatch_perfmon_start(); struct dispatch_object_s *item; bool reset = false; - while ((item = fastpath(_dispatch_queue_concurrent_drain_one(dq)))) { + while ((item = fastpath(_dispatch_root_queue_drain_one(dq)))) { if (reset) _dispatch_wqthread_override_reset(); - _dispatch_continuation_pop(item); + _dispatch_continuation_pop_inline(item, dq, + DISPATCH_INVOKE_WORKER_DRAIN|DISPATCH_INVOKE_REDIRECTING_DRAIN); + _dispatch_perfmon_workitem_inc(); reset = _dispatch_reset_defaultpriority_override(); } - _dispatch_voucher_debug("root queue clear", NULL); - _dispatch_reset_priority_and_voucher(old_pri, NULL); - _dispatch_reset_defaultpriority(old_dp); _dispatch_perfmon_end(); #if DISPATCH_COCOA_COMPAT - _dispatch_autorelease_pool_pop(pool); - if (dispatch_end_thread_4GC) { - dispatch_end_thread_4GC(); - } + _dispatch_last_resort_autorelease_pool_pop(pool); #endif // DISPATCH_COCOA_COMPAT - - _dispatch_thread_setspecific(dispatch_queue_key, NULL); + _dispatch_reset_defaultpriority(old_dp); + _dispatch_queue_set_current(NULL); } #pragma mark - @@ -4203,32 +5518,22 @@ _dispatch_worker_thread4(void *context) dispatch_root_queue_context_t qc = dq->do_ctxt; _dispatch_introspection_thread_add(); - int pending = (int)dispatch_atomic_dec2o(qc, dgq_pending, relaxed); + int pending = (int)os_atomic_dec2o(qc, dgq_pending, relaxed); dispatch_assert(pending >= 0); - _dispatch_root_queue_drain(dq); - __asm__(""); // prevent tailcall (for Instrument DTrace probe) + _dispatch_root_queue_drain(dq, _dispatch_get_priority()); + _dispatch_voucher_debug("root queue clear", NULL); + _dispatch_reset_voucher(NULL, DISPATCH_THREAD_PARK); } #if HAVE_PTHREAD_WORKQUEUE_QOS static void -_dispatch_worker_thread3(pthread_priority_t priority) -{ - // Reset priority TSD to workaround - _dispatch_thread_setspecific(dispatch_priority_key, - (void*)(uintptr_t)(priority & ~_PTHREAD_PRIORITY_FLAGS_MASK)); - unsigned int overcommit, qosbit, idx; - overcommit = (priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) ? 1 : 0; - qosbit = (priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK) >> - _PTHREAD_PRIORITY_QOS_CLASS_SHIFT; - if (!_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS]. - dq_priority) { - // If kernel doesn't support maintenance, bottom bit is background. - // Shift to our idea of where background bit is. - qosbit <<= 1; - } - idx = (unsigned int)__builtin_ffs((int)qosbit); - dispatch_assert(idx > 0 && idx < DISPATCH_QUEUE_QOS_COUNT+1); - dispatch_queue_t dq = &_dispatch_root_queues[((idx-1) << 1) | overcommit]; +_dispatch_worker_thread3(pthread_priority_t pp) +{ + bool overcommit = pp & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; + dispatch_queue_t dq; + pp &= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG | ~_PTHREAD_PRIORITY_FLAGS_MASK; + _dispatch_thread_setspecific(dispatch_priority_key, (void *)(uintptr_t)pp); + dq = _dispatch_get_root_queue_for_priority(pp, overcommit); return _dispatch_worker_thread4(dq); } #endif // HAVE_PTHREAD_WORKQUEUE_QOS @@ -4277,13 +5582,15 @@ _dispatch_worker_thread(void *context) _dispatch_introspection_thread_add(); const int64_t timeout = 5ull * NSEC_PER_SEC; + pthread_priority_t old_pri = _dispatch_get_priority(); do { - _dispatch_root_queue_drain(dq); + _dispatch_root_queue_drain(dq, old_pri); + _dispatch_reset_priority_and_voucher(old_pri, NULL); } while (dispatch_semaphore_wait(&pqc->dpq_thread_mediator, dispatch_time(0, timeout)) == 0); - (void)dispatch_atomic_inc2o(qc, dgq_thread_pool_size, release); - _dispatch_queue_wakeup_global(dq); + (void)os_atomic_inc2o(qc, dgq_thread_pool_size, release); + _dispatch_global_queue_poke(dq); _dispatch_release(dq); return NULL; @@ -4333,17 +5640,14 @@ _dispatch_runloop_root_queue_create_4CF(const char *label, unsigned long flags) size_t dqs; if (slowpath(flags)) { - return NULL; + return DISPATCH_BAD_INPUT; } dqs = sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_CACHELINE_PAD; dq = _dispatch_alloc(DISPATCH_VTABLE(queue_runloop), dqs); - _dispatch_queue_init(dq); + _dispatch_queue_init(dq, DQF_THREAD_BOUND | DQF_CANNOT_TRYSYNC, 1, false); dq->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT,true); dq->dq_label = label ? label : "runloop-queue"; // no-copy contract - dq->do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_LOCK; - dq->dq_running = 1; - dq->dq_is_thread_bound = 1; - _dispatch_runloop_queue_port_init(dq); + _dispatch_runloop_queue_handle_init(dq); _dispatch_queue_set_bound_thread(dq); _dispatch_object_debug(dq, "%s", __func__); return _dispatch_introspection_queue_create(dq); @@ -4353,13 +5657,11 @@ void _dispatch_runloop_queue_xref_dispose(dispatch_queue_t dq) { _dispatch_object_debug(dq, "%s", __func__); - (void)dispatch_atomic_dec2o(dq, dq_running, relaxed); - unsigned int suspend_cnt = dispatch_atomic_sub2o(dq, do_suspend_cnt, - DISPATCH_OBJECT_SUSPEND_LOCK, release); + + pthread_priority_t pp = _dispatch_queue_reset_override_priority(dq, true); _dispatch_queue_clear_bound_thread(dq); - if (suspend_cnt == 0) { - _dispatch_queue_wakeup(dq); - } + dx_wakeup(dq, pp, DISPATCH_WAKEUP_FLUSH); + if (pp) _dispatch_thread_override_end(DISPATCH_QUEUE_DRAIN_OWNER(dq), dq); } void @@ -4367,7 +5669,7 @@ _dispatch_runloop_queue_dispose(dispatch_queue_t dq) { _dispatch_object_debug(dq, "%s", __func__); _dispatch_introspection_queue_dispose(dq); - _dispatch_runloop_queue_port_dispose(dq); + _dispatch_runloop_queue_handle_dispose(dq); _dispatch_queue_destroy(dq); } @@ -4375,7 +5677,7 @@ bool _dispatch_runloop_root_queue_perform_4CF(dispatch_queue_t dq) { if (slowpath(dq->do_vtable != DISPATCH_VTABLE(queue_runloop))) { - DISPATCH_CLIENT_CRASH("Not a runloop queue"); + DISPATCH_CLIENT_CRASH(dq->do_vtable, "Not a runloop queue"); } dispatch_retain(dq); bool r = _dispatch_runloop_queue_drain_one(dq); @@ -4387,28 +5689,31 @@ void _dispatch_runloop_root_queue_wakeup_4CF(dispatch_queue_t dq) { if (slowpath(dq->do_vtable != DISPATCH_VTABLE(queue_runloop))) { - DISPATCH_CLIENT_CRASH("Not a runloop queue"); + DISPATCH_CLIENT_CRASH(dq->do_vtable, "Not a runloop queue"); } - _dispatch_runloop_queue_probe(dq); + _dispatch_runloop_queue_wakeup(dq, 0, false); } -mach_port_t +dispatch_runloop_handle_t _dispatch_runloop_root_queue_get_port_4CF(dispatch_queue_t dq) { if (slowpath(dq->do_vtable != DISPATCH_VTABLE(queue_runloop))) { - DISPATCH_CLIENT_CRASH("Not a runloop queue"); + DISPATCH_CLIENT_CRASH(dq->do_vtable, "Not a runloop queue"); } - return (mach_port_t)dq->do_ctxt; + return _dispatch_runloop_queue_get_handle(dq); } static void -_dispatch_runloop_queue_port_init(void *ctxt) +_dispatch_runloop_queue_handle_init(void *ctxt) { dispatch_queue_t dq = (dispatch_queue_t)ctxt; + dispatch_runloop_handle_t handle; + + _dispatch_fork_becomes_unsafe(); + +#if TARGET_OS_MAC mach_port_t mp; kern_return_t kr; - - _dispatch_safe_fork = false; kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &mp); DISPATCH_VERIFY_MIG(kr); (void)dispatch_assume_zero(kr); @@ -4426,38 +5731,81 @@ _dispatch_runloop_queue_port_init(void *ctxt) DISPATCH_VERIFY_MIG(kr); (void)dispatch_assume_zero(kr); } - dq->do_ctxt = (void*)(uintptr_t)mp; + handle = mp; +#elif defined(__linux__) + int fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); + if (fd == -1) { + int err = errno; + switch (err) { + case EMFILE: + DISPATCH_CLIENT_CRASH(err, "eventfd() failure: " + "process is out of file descriptors"); + break; + case ENFILE: + DISPATCH_CLIENT_CRASH(err, "eventfd() failure: " + "system is out of file descriptors"); + break; + case ENOMEM: + DISPATCH_CLIENT_CRASH(err, "eventfd() failure: " + "kernel is out of memory"); + break; + default: + DISPATCH_INTERNAL_CRASH(err, "eventfd() failure"); + break; + } + } + handle = fd; +#else +#error "runloop support not implemented on this platform" +#endif + _dispatch_runloop_queue_set_handle(dq, handle); _dispatch_program_is_probably_callback_driven = true; } static void -_dispatch_runloop_queue_port_dispose(dispatch_queue_t dq) +_dispatch_runloop_queue_handle_dispose(dispatch_queue_t dq) { - mach_port_t mp = (mach_port_t)dq->do_ctxt; - if (!mp) { + dispatch_runloop_handle_t handle = _dispatch_runloop_queue_get_handle(dq); + if (!_dispatch_runloop_handle_is_valid(handle)) { return; } dq->do_ctxt = NULL; +#if TARGET_OS_MAC + mach_port_t mp = handle; kern_return_t kr = mach_port_deallocate(mach_task_self(), mp); DISPATCH_VERIFY_MIG(kr); (void)dispatch_assume_zero(kr); kr = mach_port_mod_refs(mach_task_self(), mp, MACH_PORT_RIGHT_RECEIVE, -1); DISPATCH_VERIFY_MIG(kr); (void)dispatch_assume_zero(kr); +#elif defined(__linux__) + int rc = close(handle); + (void)dispatch_assume_zero(rc); +#else +#error "runloop support not implemented on this platform" +#endif } #pragma mark - #pragma mark dispatch_main_queue -mach_port_t -_dispatch_get_main_queue_port_4CF(void) +dispatch_runloop_handle_t +_dispatch_get_main_queue_handle_4CF(void) { dispatch_queue_t dq = &_dispatch_main_q; - dispatch_once_f(&_dispatch_main_q_port_pred, dq, - _dispatch_runloop_queue_port_init); - return (mach_port_t)dq->do_ctxt; + dispatch_once_f(&_dispatch_main_q_handle_pred, dq, + _dispatch_runloop_queue_handle_init); + return _dispatch_runloop_queue_get_handle(dq); +} + +#if TARGET_OS_MAC +dispatch_runloop_handle_t +_dispatch_get_main_queue_port_4CF(void) +{ + return _dispatch_get_main_queue_handle_4CF(); } +#endif static bool main_q_is_draining; @@ -4471,7 +5819,13 @@ _dispatch_queue_set_mainq_drain_state(bool arg) } void -_dispatch_main_queue_callback_4CF(mach_msg_header_t *msg DISPATCH_UNUSED) +_dispatch_main_queue_callback_4CF( +#if TARGET_OS_MAC + mach_msg_header_t *_Null_unspecified msg +#else + void *ignored +#endif + DISPATCH_UNUSED) { if (main_q_is_draining) { return; @@ -4491,11 +5845,23 @@ dispatch_main(void) #endif _dispatch_object_debug(&_dispatch_main_q, "%s", __func__); _dispatch_program_is_probably_callback_driven = true; + _dispatch_ktrace0(ARIADNE_ENTER_DISPATCH_MAIN_CODE); +#ifdef __linux__ + // On Linux, if the main thread calls pthread_exit, the process becomes a zombie. + // To avoid that, just before calling pthread_exit we register a TSD destructor + // that will call _dispatch_sig_thread -- thus capturing the main thread in sigsuspend. + // This relies on an implementation detail (currently true in glibc) that TSD destructors + // will be called in the order of creation to cause all the TSD cleanup functions to + // run before the thread becomes trapped in sigsuspend. + pthread_key_t dispatch_main_key; + pthread_key_create(&dispatch_main_key, _dispatch_sig_thread); + pthread_setspecific(dispatch_main_key, &dispatch_main_key); +#endif pthread_exit(NULL); - DISPATCH_CRASH("pthread_exit() returned"); + DISPATCH_INTERNAL_CRASH(errno, "pthread_exit() returned"); #if HAVE_PTHREAD_MAIN_NP } - DISPATCH_CLIENT_CRASH("dispatch_main() must be called on the main thread"); + DISPATCH_CLIENT_CRASH(0, "dispatch_main() must be called on the main thread"); #endif } @@ -4524,28 +5890,69 @@ static void _dispatch_queue_cleanup2(void) { dispatch_queue_t dq = &_dispatch_main_q; - (void)dispatch_atomic_dec2o(dq, dq_running, relaxed); - (void)dispatch_atomic_sub2o(dq, do_suspend_cnt, - DISPATCH_OBJECT_SUSPEND_LOCK, release); _dispatch_queue_clear_bound_thread(dq); - dq->dq_is_thread_bound = 0; + + // + // Here is what happens when both this cleanup happens because of + // dispatch_main() being called, and a concurrent enqueuer makes the queue + // non empty. + // + // _dispatch_queue_cleanup2: + // atomic_and(dq_is_thread_bound, ~DQF_THREAD_BOUND, relaxed); + // maximal_barrier(); + // if (load(dq_items_tail, seq_cst)) { + // // do the wake up the normal serial queue way + // } else { + // // do no wake up <---- + // } + // + // enqueuer: + // store(dq_items_tail, new_tail, release); + // if (load(dq_is_thread_bound, relaxed)) { + // // do the wake up the runloop way <---- + // } else { + // // do the wake up the normal serial way + // } + // + // what would be bad is to take both paths marked <---- because the queue + // wouldn't be woken up until the next time it's used (which may never + // happen) + // + // An enqueuer that speculates the load of the old value of thread_bound + // and then does the store may wake up the main queue the runloop way. + // But then, the cleanup thread will see that store because the load + // of dq_items_tail is sequentially consistent, and we have just thrown away + // our pipeline. + // + // By the time cleanup2() is out of the maximally synchronizing barrier, + // no other thread can speculate the wrong load anymore, and both cleanup2() + // and a concurrent enqueuer would treat the queue in the standard non + // thread bound way + + _dispatch_queue_atomic_flags_clear(dq, + DQF_THREAD_BOUND | DQF_CANNOT_TRYSYNC); + os_atomic_maximally_synchronizing_barrier(); // no need to drop the override, the thread will die anyway - _dispatch_queue_wakeup_with_qos(dq, - _dispatch_queue_reset_override_priority(dq)); + // the barrier above includes an acquire, so it's ok to do this raw + // call to dx_wakeup(0) + dx_wakeup(dq, 0, 0); // overload the "probably" variable to mean that dispatch_main() or // similar non-POSIX API was called // this has to run before the DISPATCH_COCOA_COMPAT below + // See dispatch_main for call to _dispatch_sig_thread on linux. +#ifndef __linux__ if (_dispatch_program_is_probably_callback_driven) { _dispatch_barrier_async_detached_f(_dispatch_get_root_queue( _DISPATCH_QOS_CLASS_DEFAULT, true), NULL, _dispatch_sig_thread); sleep(1); // workaround 6778970 } +#endif #if DISPATCH_COCOA_COMPAT - dispatch_once_f(&_dispatch_main_q_port_pred, dq, - _dispatch_runloop_queue_port_init); - _dispatch_runloop_queue_port_dispose(dq); + dispatch_once_f(&_dispatch_main_q_handle_pred, dq, + _dispatch_runloop_queue_handle_init); + _dispatch_runloop_queue_handle_dispose(dq); #endif } @@ -4556,5 +5963,30 @@ _dispatch_queue_cleanup(void *ctxt) return _dispatch_queue_cleanup2(); } // POSIX defines that destructors are only called if 'ctxt' is non-null - DISPATCH_CRASH("Premature thread exit while a dispatch queue is running"); + DISPATCH_INTERNAL_CRASH(ctxt, + "Premature thread exit while a dispatch queue is running"); +} + +static void +_dispatch_deferred_items_cleanup(void *ctxt) +{ + // POSIX defines that destructors are only called if 'ctxt' is non-null + DISPATCH_INTERNAL_CRASH(ctxt, + "Premature thread exit with unhandled deferred items"); +} + +static void +_dispatch_frame_cleanup(void *ctxt) +{ + // POSIX defines that destructors are only called if 'ctxt' is non-null + DISPATCH_INTERNAL_CRASH(ctxt, + "Premature thread exit while a dispatch frame is active"); +} + +static void +_dispatch_context_cleanup(void *ctxt) +{ + // POSIX defines that destructors are only called if 'ctxt' is non-null + DISPATCH_INTERNAL_CRASH(ctxt, + "Premature thread exit while a dispatch context is set"); } diff --git a/src/queue_internal.h b/src/queue_internal.h index 143ab1e2a..1bff7b014 100644 --- a/src/queue_internal.h +++ b/src/queue_internal.h @@ -48,95 +48,567 @@ #pragma mark - #pragma mark dispatch_queue_t -#define DISPATCH_QUEUE_HEADER \ - uint32_t volatile dq_running; \ - struct dispatch_object_s *volatile dq_items_head; \ - /* LP64 global queue cacheline boundary */ \ - struct dispatch_object_s *volatile dq_items_tail; \ +DISPATCH_ENUM(dispatch_queue_flags, uint32_t, + DQF_NONE = 0x0000, + DQF_AUTORELEASE_ALWAYS = 0x0001, + DQF_AUTORELEASE_NEVER = 0x0002, +#define _DQF_AUTORELEASE_MASK 0x0003 + DQF_THREAD_BOUND = 0x0004, // queue is bound to a thread + DQF_BARRIER_BIT = 0x0008, // queue is a barrier on its target + DQF_TARGETED = 0x0010, // queue is targeted by another object + DQF_LABEL_NEEDS_FREE = 0x0020, // queue label was strduped; need to free it + DQF_CANNOT_TRYSYNC = 0x0040, + DQF_RELEASED = 0x0080, // xref_cnt == -1 + + // only applies to sources + // + // Assuming DSF_ARMED (a), DSF_DEFERRED_DELETE (p), DSF_DELETED (d): + // + // --- + // a-- + // source states for regular operations + // (delivering event / waiting for event) + // + // ap- + // Either armed for deferred deletion delivery, waiting for an EV_DELETE, + // and the next state will be -pd (EV_DELETE delivered), + // Or, a cancellation raced with an event delivery and failed + // (EINPROGRESS), and when the event delivery happens, the next state + // will be -p-. + // + // -pd + // Received EV_DELETE (from ap-), needs to free `ds_dkev`, the knote is + // gone from the kernel, but ds_dkev lives. Next state will be --d. + // + // -p- + // Received an EV_ONESHOT event (from a--), or the delivery of an event + // causing the cancellation to fail with EINPROGRESS was delivered + // (from ap-). The knote still lives, next state will be --d. + // + // --d + // Final state of the source, the knote is gone from the kernel and + // ds_dkev is freed. The source can safely be released. + // + // a-d (INVALID) + // apd (INVALID) + // Setting DSF_DELETED should also always atomically clear DSF_ARMED. If + // the knote is gone from the kernel, it makes no sense whatsoever to + // have it armed. And generally speaking, once `d` or `p` has been set, + // `a` cannot do a cleared -> set transition anymore + // (see _dispatch_source_try_set_armed). + // + DSF_CANCEL_WAITER = 0x0800, // synchronous waiters for cancel + DSF_CANCELED = 0x1000, // cancellation has been requested + DSF_ARMED = 0x2000, // source is armed + DSF_DEFERRED_DELETE = 0x4000, // source is pending delete + DSF_DELETED = 0x8000, // source knote is deleted +#define DSF_STATE_MASK (DSF_ARMED | DSF_DEFERRED_DELETE | DSF_DELETED) + + DQF_WIDTH_MASK = 0xffff0000, +#define DQF_WIDTH_SHIFT 16 +); + +#define _DISPATCH_QUEUE_HEADER(x) \ + struct os_mpsc_queue_s _as_oq[0]; \ + DISPATCH_OBJECT_HEADER(x); \ + _OS_MPSC_QUEUE_FIELDS(dq, dq_state); \ dispatch_queue_t dq_specific_q; \ - uint16_t dq_width; \ - uint16_t dq_is_thread_bound:1; \ - uint32_t volatile dq_override; \ - pthread_priority_t dq_priority; \ - mach_port_t dq_thread; \ - mach_port_t volatile dq_tqthread; \ - voucher_t dq_override_voucher; \ - unsigned long dq_serialnum; \ - const char *dq_label; \ - DISPATCH_INTROSPECTION_QUEUE_LIST; - -#define DISPATCH_QUEUE_WIDTH_MAX UINT16_MAX + union { \ + uint32_t volatile dq_atomic_flags; \ + DISPATCH_STRUCT_LITTLE_ENDIAN_2( \ + uint16_t dq_atomic_bits, \ + uint16_t dq_width \ + ); \ + }; \ + uint32_t dq_side_suspend_cnt; \ + DISPATCH_INTROSPECTION_QUEUE_HEADER; \ + dispatch_unfair_lock_s dq_sidelock + /* LP64: 32bit hole on LP64 */ + +#define DISPATCH_QUEUE_HEADER(x) \ + struct dispatch_queue_s _as_dq[0]; \ + _DISPATCH_QUEUE_HEADER(x) + +#define DISPATCH_QUEUE_ALIGN __attribute__((aligned(8))) + +#define DISPATCH_QUEUE_WIDTH_POOL 0x7fff +#define DISPATCH_QUEUE_WIDTH_MAX 0x7ffe +#define DISPATCH_QUEUE_USES_REDIRECTION(width) \ + ({ uint16_t _width = (width); \ + _width > 1 && _width < DISPATCH_QUEUE_WIDTH_POOL; }) #define DISPATCH_QUEUE_CACHELINE_PADDING \ char _dq_pad[DISPATCH_QUEUE_CACHELINE_PAD] #ifdef __LP64__ #define DISPATCH_QUEUE_CACHELINE_PAD (( \ - (0*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE) \ + (sizeof(uint32_t) - DISPATCH_INTROSPECTION_QUEUE_HEADER_SIZE) \ + + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE) +#elif OS_OBJECT_HAVE_OBJC1 +#define DISPATCH_QUEUE_CACHELINE_PAD (( \ + (11*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_HEADER_SIZE) \ + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE) #else #define DISPATCH_QUEUE_CACHELINE_PAD (( \ - (12*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_LIST_SIZE) \ + (12*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_HEADER_SIZE) \ + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE) #endif +/* + * dispatch queues `dq_state` demystified + * + ******************************************************************************* + * + * Most Significant 32 bit Word + * ---------------------------- + * + * sc: suspend count (bits 63 - 57) + * The suspend count unsurprisingly holds the suspend count of the queue + * Only 7 bits are stored inline. Extra counts are transfered in a side + * suspend count and when that has happened, the ssc: bit is set. + */ +#define DISPATCH_QUEUE_SUSPEND_INTERVAL 0x0200000000000000ull +#define DISPATCH_QUEUE_SUSPEND_HALF 0x40u +/* + * ssc: side suspend count (bit 56) + * This bit means that the total suspend count didn't fit in the inline + * suspend count, and that there are additional suspend counts stored in the + * `dq_side_suspend_cnt` field. + */ +#define DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT 0x0100000000000000ull +/* + * i: inactive bit (bit 55) + * This bit means that the object is inactive (see dispatch_activate) + */ +#define DISPATCH_QUEUE_INACTIVE 0x0080000000000000ull +/* + * na: needs activation (bit 54) + * This bit is set if the object is created inactive. It tells + * dispatch_queue_wakeup to perform various tasks at first wakeup. + * + * This bit is cleared as part of the first wakeup. Having that bit prevents + * the object from being woken up (because _dq_state_should_wakeup will say + * no), except in the dispatch_activate/dispatch_resume codepath. + */ +#define DISPATCH_QUEUE_NEEDS_ACTIVATION 0x0040000000000000ull +/* + * This mask covers the suspend count (sc), side suspend count bit (ssc), + * inactive (i) and needs activation (na) bits + */ +#define DISPATCH_QUEUE_SUSPEND_BITS_MASK 0xffc0000000000000ull +/* + * ib: in barrier (bit 53) + * This bit is set when the queue is currently executing a barrier + */ +#define DISPATCH_QUEUE_IN_BARRIER 0x0020000000000000ull +/* + * qf: queue full (bit 52) + * This bit is a subtle hack that allows to check for any queue width whether + * the full width of the queue is used or reserved (depending on the context) + * In other words that the queue has reached or overflown its capacity. + */ +#define DISPATCH_QUEUE_WIDTH_FULL_BIT 0x0010000000000000ull +#define DISPATCH_QUEUE_WIDTH_FULL 0x8000ull +/* + * w: width (bits 51 - 37) + * This encodes how many work items are in flight. Barriers hold `dq_width` + * of them while they run. This is encoded as a signed offset with respect, + * to full use, where the negative values represent how many available slots + * are left, and the positive values how many work items are exceeding our + * capacity. + * + * When this value is positive, then `wo` is always set to 1. + */ +#define DISPATCH_QUEUE_WIDTH_INTERVAL 0x0000002000000000ull +#define DISPATCH_QUEUE_WIDTH_MASK 0x001fffe000000000ull +#define DISPATCH_QUEUE_WIDTH_SHIFT 37 +/* + * pb: pending barrier (bit 36) + * Drainers set this bit when they couldn't run the next work item and it is + * a barrier. When this bit is set, `dq_width - 1` work item slots are + * reserved so that no wakeup happens until the last work item in flight + * completes. + */ +#define DISPATCH_QUEUE_PENDING_BARRIER 0x0000001000000000ull +/* + * d: dirty bit (bit 35) + * This bit is set when a queue transitions from empty to not empty. + * This bit is set before dq_items_head is set, with appropriate barriers. + * Any thread looking at a queue head is responsible for unblocking any + * dispatch_*_sync that could be enqueued at the beginning. + * + * Drainer perspective + * =================== + * + * When done, any "Drainer", in particular for dispatch_*_sync() handoff + * paths, exits in 3 steps, and the point of the DIRTY bit is to make + * the Drainers take the slowpath at step 2 to take into account enqueuers + * that could have made the queue non idle concurrently. + * + * + * // drainer-exit step 1 + * if (slowpath(dq->dq_items_tail)) { // speculative test + * return handle_non_empty_queue_or_wakeup(dq); + * } + * // drainer-exit step 2 + * if (!_dispatch_queue_drain_try_unlock(dq, ${owned}, ...)) { + * return handle_non_empty_queue_or_wakeup(dq); + * } + * // drainer-exit step 3 + * // no need to wake up the queue, it's really empty for sure + * return; + * + * + * The crux is _dispatch_queue_drain_try_unlock(), it is a function whose + * contract is to release everything the current thread owns from the queue + * state, so that when it's successful, any other thread can acquire + * width from that queue. + * + * But, that function must fail if it sees the DIRTY bit set, leaving + * the state untouched. Leaving the state untouched is vital as it ensures + * that no other Slayer^WDrainer can rise at the same time, because the + * resource stays locked. + * + * + * Note that releasing the DRAIN_LOCK or ENQUEUE_LOCK (see below) currently + * doesn't use that pattern, and always tries to requeue. It isn't a problem + * because while holding either of these locks prevents *some* sync (the + * barrier one) codepaths to acquire the resource, the retry they perform + * at their step D (see just below) isn't affected by the state of these bits + * at all. + * + * + * Sync items perspective + * ====================== + * + * On the dispatch_*_sync() acquire side, the code must look like this: + * + * + * // step A + * if (try_acquire_sync(dq)) { + * return sync_operation_fastpath(dq, item); + * } + * + * // step B + * if (queue_push_and_inline(dq, item)) { + * atomic_store(dq->dq_items_head, item, relaxed); + * // step C + * atomic_or(dq->dq_state, DIRTY, release); + * + * // step D + * if (try_acquire_sync(dq)) { + * try_lock_transfer_or_wakeup(dq); + * } + * } + * + * // step E + * wait_for_lock_transfer(dq); + * + * + * A. If this code can acquire the resource it needs at step A, we're good. + * + * B. If the item isn't the first at enqueue time, then there is no issue + * At least another thread went through C, this thread isn't interesting + * for the possible races, responsibility to make progress is transfered + * to the thread which went through C-D. + * + * C. The DIRTY bit is set with a release barrier, after the head/tail + * has been set, so that seeing the DIRTY bit means that head/tail + * will be visible to any drainer that has the matching acquire barrier. + * + * Drainers may see the head/tail and fail to see DIRTY, in which + * case, their _dispatch_queue_drain_try_unlock() will clear the DIRTY + * bit, and fail, causing the caller to retry exactly once. + * + * D. At this stage, there's two possible outcomes: + * + * - either the acquire works this time, in which case this thread + * successfuly becomes a drainer. That's obviously the happy path. + * It means all drainers are after Step 2 (or there is no Drainer) + * + * - or the acquire fails, which means that another drainer is before + * its Step 2. Since we set the DIRTY bit on the dq_state by now, + * and that drainers manipulate the state atomically, at least one + * drainer that is still before its step 2 will fail its step 2, and + * be responsible for making progress. + * + * + * Async items perspective + * ====================== + * + * On the async codepath, when the queue becomes non empty, the queue + * is always woken up. There is no point in trying to avoid that wake up + * for the async case, because it's required for the async()ed item to make + * progress: a drain of the queue must happen. + * + * So on the async "acquire" side, there is no subtlety at all. + */ +#define DISPATCH_QUEUE_DIRTY 0x0000000800000000ull +/* + * qo: (bit 34) + * Set when a queue has a useful override set. + * This bit is only cleared when the final drain_try_unlock() succeeds. + * + * When the queue dq_override is touched (overrides or-ed in), usually with + * _dispatch_queue_override_priority(), then the HAS_OVERRIDE bit is set + * with a release barrier and one of these three things happen next: + * + * - the queue is enqueued, which will cause it to be drained, and the + * override to be handled by _dispatch_queue_drain_try_unlock(). + * In rare cases it could cause the queue to be queued while empty though. + * + * - the DIRTY bit is also set with a release barrier, which pairs with + * the handling of these bits by _dispatch_queue_drain_try_unlock(), + * so that dq_override is reset properly. + * + * - the queue was suspended, and _dispatch_queue_resume() will handle the + * override as part of its wakeup sequence. + */ +#define DISPATCH_QUEUE_HAS_OVERRIDE 0x0000000400000000ull +/* + * p: pended bit (bit 33) + * Set when a drain lock has been pended. When this bit is set, + * the drain lock is taken and ENQUEUED is never set. + * + * This bit marks a queue that needs further processing but was kept pended + * by an async drainer (not reenqueued) in the hope of being able to drain + * it further later. + */ +#define DISPATCH_QUEUE_DRAIN_PENDED 0x0000000200000000ull +/* + * e: enqueued bit (bit 32) + * Set when a queue is enqueued on its target queue + */ +#define DISPATCH_QUEUE_ENQUEUED 0x0000000100000000ull +/* + * dl: drain lock (bits 31-0) + * This is used by the normal drain to drain exlusively relative to other + * drain stealers (like the QoS Override codepath). It holds the identity + * (thread port) of the current drainer. + */ +#define DISPATCH_QUEUE_DRAIN_UNLOCK_MASK 0x00000002ffffffffull +#ifdef DLOCK_NOWAITERS_BIT +#define DISPATCH_QUEUE_DRAIN_OWNER_MASK \ + ((uint64_t)(DLOCK_OWNER_MASK | DLOCK_NOFAILED_TRYLOCK_BIT)) +#define DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(v) \ + (((v) & ~(DISPATCH_QUEUE_DRAIN_PENDED|DISPATCH_QUEUE_DRAIN_OWNER_MASK))\ + ^ DLOCK_NOWAITERS_BIT) +#define DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK \ + (DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_HAS_OVERRIDE | \ + DLOCK_NOWAITERS_BIT) +#else +#define DISPATCH_QUEUE_DRAIN_OWNER_MASK \ + ((uint64_t)(DLOCK_OWNER_MASK | DLOCK_FAILED_TRYLOCK_BIT)) +#define DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(v) \ + ((v) & ~(DISPATCH_QUEUE_DRAIN_PENDED|DISPATCH_QUEUE_DRAIN_OWNER_MASK)) +#define DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK \ + (DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_HAS_OVERRIDE | \ + DLOCK_WAITERS_BIT) +#endif +/* + ******************************************************************************* + * + * `Drainers` + * + * Drainers are parts of the code that hold the drain lock by setting its value + * to their thread port. There are two kinds: + * 1. async drainers, + * 2. lock transfer handlers. + * + * Drainers from the first category are _dispatch_queue_class_invoke and its + * stealers. Those drainers always try to reserve width at the same time they + * acquire the drain lock, to make sure they can make progress, and else exit + * quickly. + * + * Drainers from the second category are `slow` work items. Those run on the + * calling thread, and when done, try to transfer the width they own to the + * possible next `slow` work item, and if there is no such item, they reliquish + * that right. To do so, prior to taking any decision, they also try to own + * the full "barrier" width on the given queue. + * + * see _dispatch_try_lock_transfer_or_wakeup + * + ******************************************************************************* + * + * Enqueuing and wakeup rules + * + * Nobody should enqueue any dispatch object if it has no chance to make any + * progress. That means that queues that: + * - are suspended + * - have reached or overflown their capacity + * - are currently draining + * - are already enqueued + * + * should not try to be enqueued. + * + ******************************************************************************* + * + * Lock transfer + * + * The point of the lock transfer code is to allow pure dispatch_*_sync() + * callers to make progress without requiring the bring up of a drainer. + * There are two reason for that: + * + * - performance, as draining has to give up for dispatch_*_sync() work items, + * so waking up a queue for this is wasteful. + * + * - liveness, as with dispatch_*_sync() you burn threads waiting, you're more + * likely to hit various thread limits and may not have any drain being + * brought up if the process hits a limit. + * + * + * Lock transfer happens at the end on the dispatch_*_sync() codepaths: + * + * - obviously once a dispatch_*_sync() work item finishes, it owns queue + * width and it should try to transfer that ownership to the possible next + * queued item if it is a dispatch_*_sync() item + * + * - just before such a work item blocks to make sure that that work item + * itself isn't its own last chance to be woken up. That can happen when + * a Drainer pops up everything from the queue, and that a dispatch_*_sync() + * work item has taken the slow path then was preempted for a long time. + * + * That's why such work items, if first in the queue, must try a lock + * transfer procedure. + * + * + * For transfers where a partial width is owned, we give back that width. + * If the queue state is "idle" again, we attempt to acquire the full width. + * If that succeeds, this falls back to the full barrier lock + * transfer, else it wakes up the queue according to its state. + * + * For full barrier transfers, if items eligible for lock transfer are found, + * then they are woken up and the lock transfer is successful. + * + * If none are found, the full barrier width is released. If by doing so the + * DIRTY bit is found, releasing the full barrier width fails and transferring + * the lock is retried from scratch. + */ + +#define DISPATCH_QUEUE_STATE_INIT_VALUE(width) \ + ((DISPATCH_QUEUE_WIDTH_FULL - (width)) << DISPATCH_QUEUE_WIDTH_SHIFT) + +/* Magic dq_state values for global queues: they have QUEUE_FULL and IN_BARRIER + * set to force the slowpath in both dispatch_barrier_sync() and dispatch_sync() + */ +#define DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE \ + (DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER) + +#define DISPATCH_QUEUE_SERIAL_DRAIN_OWNED \ + (DISPATCH_QUEUE_IN_BARRIER | DISPATCH_QUEUE_WIDTH_INTERVAL) + DISPATCH_CLASS_DECL(queue); #if !(defined(__cplusplus) && DISPATCH_INTROSPECTION) struct dispatch_queue_s { - DISPATCH_STRUCT_HEADER(queue); - DISPATCH_QUEUE_HEADER; + _DISPATCH_QUEUE_HEADER(queue); DISPATCH_QUEUE_CACHELINE_PADDING; // for static queues only -}; +} DISPATCH_QUEUE_ALIGN; #endif // !(defined(__cplusplus) && DISPATCH_INTROSPECTION) +DISPATCH_INTERNAL_SUBCLASS_DECL(queue_serial, queue); +DISPATCH_INTERNAL_SUBCLASS_DECL(queue_concurrent, queue); +DISPATCH_INTERNAL_SUBCLASS_DECL(queue_main, queue); DISPATCH_INTERNAL_SUBCLASS_DECL(queue_root, queue); DISPATCH_INTERNAL_SUBCLASS_DECL(queue_runloop, queue); DISPATCH_INTERNAL_SUBCLASS_DECL(queue_mgr, queue); -DISPATCH_DECL_INTERNAL_SUBCLASS(dispatch_queue_specific_queue, dispatch_queue); -DISPATCH_CLASS_DECL(queue_specific_queue); +OS_OBJECT_INTERNAL_CLASS_DECL(dispatch_queue_specific_queue, dispatch_queue, + DISPATCH_OBJECT_VTABLE_HEADER(dispatch_queue_specific_queue)); + +typedef union { + struct os_mpsc_queue_s *_oq; + struct dispatch_queue_s *_dq; + struct dispatch_source_s *_ds; + struct dispatch_mach_s *_dm; + struct dispatch_queue_specific_queue_s *_dqsq; + struct dispatch_timer_aggregate_s *_dta; +#if USE_OBJC + os_mpsc_queue_t _ojbc_oq; + dispatch_queue_t _objc_dq; + dispatch_source_t _objc_ds; + dispatch_mach_t _objc_dm; + dispatch_queue_specific_queue_t _objc_dqsq; + dispatch_timer_aggregate_t _objc_dta; +#endif +} dispatch_queue_class_t __attribute__((__transparent_union__)); -void _dispatch_queue_destroy(dispatch_object_t dou); +typedef struct dispatch_thread_context_s *dispatch_thread_context_t; +typedef struct dispatch_thread_context_s { + dispatch_thread_context_t dtc_prev; + const void *dtc_key; + union { + size_t dtc_apply_nesting; + dispatch_io_t dtc_io_in_barrier; + }; +} dispatch_thread_context_s; + +typedef struct dispatch_thread_frame_s *dispatch_thread_frame_t; +typedef struct dispatch_thread_frame_s { + // must be in the same order as our TSD keys! + dispatch_queue_t dtf_queue; + dispatch_thread_frame_t dtf_prev; + struct dispatch_object_s *dtf_deferred; +} dispatch_thread_frame_s; + +DISPATCH_ENUM(dispatch_queue_wakeup_target, long, + DISPATCH_QUEUE_WAKEUP_NONE = 0, + DISPATCH_QUEUE_WAKEUP_TARGET, + DISPATCH_QUEUE_WAKEUP_MGR, +); + +void _dispatch_queue_class_override_drainer(dispatch_queue_t dqu, + pthread_priority_t pp, dispatch_wakeup_flags_t flags); +void _dispatch_queue_class_wakeup(dispatch_queue_t dqu, pthread_priority_t pp, + dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target); + +void _dispatch_queue_destroy(dispatch_queue_t dq); void _dispatch_queue_dispose(dispatch_queue_t dq); -void _dispatch_queue_invoke(dispatch_queue_t dq, dispatch_object_t dou, - dispatch_invoke_flags_t flags); -void _dispatch_queue_push_list_slow(dispatch_queue_t dq, - pthread_priority_t pp, struct dispatch_object_s *obj, unsigned int n, - bool retained); -void _dispatch_queue_push_slow(dispatch_queue_t dq, - pthread_priority_t pp, struct dispatch_object_s *obj, bool retained); -unsigned long _dispatch_queue_probe(dispatch_queue_t dq); -dispatch_queue_t _dispatch_wakeup(dispatch_object_t dou); -dispatch_queue_t _dispatch_queue_wakeup(dispatch_queue_t dq); -void _dispatch_queue_wakeup_and_release(dispatch_queue_t dq); -void _dispatch_queue_wakeup_with_qos(dispatch_queue_t dq, +void _dispatch_queue_set_target_queue(dispatch_queue_t dq, dispatch_queue_t tq); +void _dispatch_queue_suspend(dispatch_queue_t dq); +void _dispatch_queue_resume(dispatch_queue_t dq, bool activate); +void _dispatch_queue_finalize_activation(dispatch_queue_t dq); +void _dispatch_queue_invoke(dispatch_queue_t dq, dispatch_invoke_flags_t flags); +void _dispatch_queue_push_list_slow(dispatch_queue_t dq, unsigned int n); +void _dispatch_queue_push(dispatch_queue_t dq, dispatch_object_t dou, pthread_priority_t pp); -void _dispatch_queue_wakeup_with_qos_and_release(dispatch_queue_t dq, - pthread_priority_t pp); -void _dispatch_queue_push_queue(dispatch_queue_t tq, dispatch_queue_t dq, - pthread_priority_t pp); -_dispatch_thread_semaphore_t _dispatch_queue_drain(dispatch_object_t dou); +void _dispatch_try_lock_transfer_or_wakeup(dispatch_queue_t dq); +void _dispatch_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, + dispatch_wakeup_flags_t flags); +dispatch_queue_t _dispatch_queue_serial_drain(dispatch_queue_t dq, + dispatch_invoke_flags_t flags, uint64_t *owned, + struct dispatch_object_s **dc_ptr); +void _dispatch_queue_drain_deferred_invoke(dispatch_queue_t dq, + dispatch_invoke_flags_t flags, uint64_t to_unlock, + struct dispatch_object_s *dc); void _dispatch_queue_specific_queue_dispose(dispatch_queue_specific_queue_t dqsq); -unsigned long _dispatch_root_queue_probe(dispatch_queue_t dq); +void _dispatch_root_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, + dispatch_wakeup_flags_t flags); +void _dispatch_root_queue_drain_deferred_item(dispatch_queue_t dq, + struct dispatch_object_s *dou, pthread_priority_t pp); void _dispatch_pthread_root_queue_dispose(dispatch_queue_t dq); -unsigned long _dispatch_runloop_queue_probe(dispatch_queue_t dq); +void _dispatch_main_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, + dispatch_wakeup_flags_t flags); +void _dispatch_runloop_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, + dispatch_wakeup_flags_t flags); void _dispatch_runloop_queue_xref_dispose(dispatch_queue_t dq); void _dispatch_runloop_queue_dispose(dispatch_queue_t dq); void _dispatch_mgr_queue_drain(void); -unsigned long _dispatch_mgr_queue_probe(dispatch_queue_t dq); -#if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES +#if DISPATCH_USE_MGR_THREAD && DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES void _dispatch_mgr_priority_init(void); #else static inline void _dispatch_mgr_priority_init(void) {} #endif -void _dispatch_after_timer_callback(void *ctxt); -void _dispatch_async_redirect_invoke(void *ctxt); +#if DISPATCH_USE_KEVENT_WORKQUEUE +void _dispatch_kevent_workqueue_init(void); +#else +static inline void _dispatch_kevent_workqueue_init(void) {} +#endif void _dispatch_sync_recurse_invoke(void *ctxt); void _dispatch_apply_invoke(void *ctxt); void _dispatch_apply_redirect_invoke(void *ctxt); void _dispatch_barrier_async_detached_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func); -void _dispatch_barrier_trysync_f(dispatch_queue_t dq, void *ctxt, +void _dispatch_barrier_trysync_or_async_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func); #if DISPATCH_DEBUG @@ -168,17 +640,21 @@ enum { DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT, DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS, DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT, + _DISPATCH_ROOT_QUEUE_IDX_COUNT, }; extern unsigned long volatile _dispatch_queue_serial_numbers; extern struct dispatch_queue_s _dispatch_root_queues[]; extern struct dispatch_queue_s _dispatch_mgr_q; +void _dispatch_root_queues_init(void); #if HAVE_PTHREAD_WORKQUEUE_QOS extern pthread_priority_t _dispatch_background_priority; extern pthread_priority_t _dispatch_user_initiated_priority; #endif +typedef uint8_t _dispatch_qos_class_t; + #pragma mark - #pragma mark dispatch_queue_attr_t @@ -190,20 +666,34 @@ typedef enum { DISPATCH_CLASS_DECL(queue_attr); struct dispatch_queue_attr_s { - DISPATCH_STRUCT_HEADER(queue_attr); - qos_class_t dqa_qos_class; - int dqa_relative_priority; - unsigned int dqa_overcommit:2, dqa_concurrent:1; + OS_OBJECT_STRUCT_HEADER(dispatch_queue_attr); + _dispatch_qos_class_t dqa_qos_class; + int8_t dqa_relative_priority; + uint16_t dqa_overcommit:2; + uint16_t dqa_autorelease_frequency:2; + uint16_t dqa_concurrent:1; + uint16_t dqa_inactive:1; }; enum { - DQA_INDEX_NON_OVERCOMMIT = 0, + DQA_INDEX_UNSPECIFIED_OVERCOMMIT = 0, + DQA_INDEX_NON_OVERCOMMIT, DQA_INDEX_OVERCOMMIT, - DQA_INDEX_UNSPECIFIED_OVERCOMMIT, }; #define DISPATCH_QUEUE_ATTR_OVERCOMMIT_COUNT 3 +enum { + DQA_INDEX_AUTORELEASE_FREQUENCY_INHERIT = + DISPATCH_AUTORELEASE_FREQUENCY_INHERIT, + DQA_INDEX_AUTORELEASE_FREQUENCY_WORK_ITEM = + DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM, + DQA_INDEX_AUTORELEASE_FREQUENCY_NEVER = + DISPATCH_AUTORELEASE_FREQUENCY_NEVER, +}; + +#define DISPATCH_QUEUE_ATTR_AUTORELEASE_FREQUENCY_COUNT 3 + enum { DQA_INDEX_CONCURRENT = 0, DQA_INDEX_SERIAL, @@ -211,6 +701,13 @@ enum { #define DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT 2 +enum { + DQA_INDEX_ACTIVE = 0, + DQA_INDEX_INACTIVE, +}; + +#define DISPATCH_QUEUE_ATTR_INACTIVE_COUNT 2 + typedef enum { DQA_INDEX_QOS_CLASS_UNSPECIFIED = 0, DQA_INDEX_QOS_CLASS_MAINTENANCE, @@ -226,17 +723,24 @@ typedef enum { extern const struct dispatch_queue_attr_s _dispatch_queue_attrs[] [DISPATCH_QUEUE_ATTR_PRIO_COUNT] [DISPATCH_QUEUE_ATTR_OVERCOMMIT_COUNT] - [DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT]; + [DISPATCH_QUEUE_ATTR_AUTORELEASE_FREQUENCY_COUNT] + [DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT] + [DISPATCH_QUEUE_ATTR_INACTIVE_COUNT]; + +dispatch_queue_attr_t _dispatch_get_default_queue_attr(void); #pragma mark - #pragma mark dispatch_continuation_t -// If dc_vtable is less than 127, then the object is a continuation. +// If dc_flags is less than 0x1000, then the object is a continuation. // Otherwise, the object has a private layout and memory management rules. The // layout until after 'do_next' must align with normal objects. #if __LP64__ #define DISPATCH_CONTINUATION_HEADER(x) \ - const void *do_vtable; \ + union { \ + const void *do_vtable; \ + uintptr_t dc_flags; \ + }; \ union { \ pthread_priority_t dc_priority; \ int dc_cache_cnt; \ @@ -247,11 +751,32 @@ extern const struct dispatch_queue_attr_s _dispatch_queue_attrs[] dispatch_function_t dc_func; \ void *dc_ctxt; \ void *dc_data; \ - void *dc_other; + void *dc_other #define _DISPATCH_SIZEOF_PTR 8 +#elif OS_OBJECT_HAVE_OBJC1 +#define DISPATCH_CONTINUATION_HEADER(x) \ + dispatch_function_t dc_func; \ + union { \ + pthread_priority_t dc_priority; \ + int dc_cache_cnt; \ + uintptr_t dc_pad; \ + }; \ + struct voucher_s *dc_voucher; \ + union { \ + const void *do_vtable; \ + uintptr_t dc_flags; \ + }; \ + struct dispatch_##x##_s *volatile do_next; \ + void *dc_ctxt; \ + void *dc_data; \ + void *dc_other +#define _DISPATCH_SIZEOF_PTR 4 #else #define DISPATCH_CONTINUATION_HEADER(x) \ - const void *do_vtable; \ + union { \ + const void *do_vtable; \ + uintptr_t dc_flags; \ + }; \ union { \ pthread_priority_t dc_priority; \ int dc_cache_cnt; \ @@ -262,7 +787,7 @@ extern const struct dispatch_queue_attr_s _dispatch_queue_attrs[] dispatch_function_t dc_func; \ void *dc_ctxt; \ void *dc_data; \ - void *dc_other; + void *dc_other #define _DISPATCH_SIZEOF_PTR 4 #endif #define _DISPATCH_CONTINUATION_PTRS 8 @@ -279,35 +804,55 @@ extern const struct dispatch_queue_attr_s _dispatch_queue_attrs[] (((x) + (DISPATCH_CONTINUATION_SIZE - 1u)) & \ ~(DISPATCH_CONTINUATION_SIZE - 1u)) -#define DISPATCH_OBJ_ASYNC_BIT 0x1 -#define DISPATCH_OBJ_BARRIER_BIT 0x2 -#define DISPATCH_OBJ_GROUP_BIT 0x4 -#define DISPATCH_OBJ_SYNC_SLOW_BIT 0x8 -#define DISPATCH_OBJ_BLOCK_RELEASE_BIT 0x10 -#define DISPATCH_OBJ_CTXT_FETCH_BIT 0x20 -#define DISPATCH_OBJ_HAS_VOUCHER_BIT 0x80 -// vtables are pointers far away from the low page in memory -#define DISPATCH_OBJ_IS_VTABLE(x) ((unsigned long)(x)->do_vtable > 0xfful) +// continuation is a dispatch_sync or dispatch_barrier_sync +#define DISPATCH_OBJ_SYNC_SLOW_BIT 0x001ul +// continuation acts as a barrier +#define DISPATCH_OBJ_BARRIER_BIT 0x002ul +// continuation resources are freed on run +// this is set on async or for non event_handler source handlers +#define DISPATCH_OBJ_CONSUME_BIT 0x004ul +// continuation has a group in dc_data +#define DISPATCH_OBJ_GROUP_BIT 0x008ul +// continuation function is a block (copied in dc_ctxt) +#define DISPATCH_OBJ_BLOCK_BIT 0x010ul +// continuation function is a block with private data, implies BLOCK_BIT +#define DISPATCH_OBJ_BLOCK_PRIVATE_DATA_BIT 0x020ul +// source handler requires fetching context from source +#define DISPATCH_OBJ_CTXT_FETCH_BIT 0x040ul +// use the voucher from the continuation even if the queue has voucher set +#define DISPATCH_OBJ_ENFORCE_VOUCHER 0x080ul struct dispatch_continuation_s { + struct dispatch_object_s _as_do[0]; DISPATCH_CONTINUATION_HEADER(continuation); }; typedef struct dispatch_continuation_s *dispatch_continuation_t; +typedef struct dispatch_continuation_vtable_s { + _OS_OBJECT_CLASS_HEADER(); + DISPATCH_INVOKABLE_VTABLE_HEADER(dispatch_continuation); +} *dispatch_continuation_vtable_t; + #ifndef DISPATCH_CONTINUATION_CACHE_LIMIT #if TARGET_OS_EMBEDDED #define DISPATCH_CONTINUATION_CACHE_LIMIT 112 // one 256k heap for 64 threads -#define DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYSTATUS_PRESSURE_WARN 16 +#define DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYPRESSURE_PRESSURE_WARN 16 #else #define DISPATCH_CONTINUATION_CACHE_LIMIT 1024 -#define DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYSTATUS_PRESSURE_WARN 128 +#define DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYPRESSURE_PRESSURE_WARN 128 #endif #endif dispatch_continuation_t _dispatch_continuation_alloc_from_heap(void); void _dispatch_continuation_free_to_heap(dispatch_continuation_t c); +void _dispatch_continuation_async(dispatch_queue_t dq, + dispatch_continuation_t dc); +void _dispatch_continuation_pop(dispatch_object_t dou, dispatch_queue_t dq, + dispatch_invoke_flags_t flags); +void _dispatch_continuation_invoke(dispatch_object_t dou, + voucher_t override_voucher, dispatch_invoke_flags_t flags); -#if DISPATCH_USE_MEMORYSTATUS_SOURCE +#if DISPATCH_USE_MEMORYPRESSURE_SOURCE extern int _dispatch_continuation_cache_limit; void _dispatch_continuation_free_to_cache_limit(dispatch_continuation_t c); #else @@ -316,6 +861,67 @@ void _dispatch_continuation_free_to_cache_limit(dispatch_continuation_t c); _dispatch_continuation_free_to_heap(c) #endif +#pragma mark - +#pragma mark dispatch_continuation vtables + +enum { + _DC_USER_TYPE = 0, + DC_ASYNC_REDIRECT_TYPE, + DC_MACH_SEND_BARRRIER_DRAIN_TYPE, + DC_MACH_SEND_BARRIER_TYPE, + DC_MACH_RECV_BARRIER_TYPE, +#if HAVE_PTHREAD_WORKQUEUE_QOS + DC_OVERRIDE_STEALING_TYPE, + DC_OVERRIDE_OWNING_TYPE, +#endif + _DC_MAX_TYPE, +}; + +DISPATCH_ALWAYS_INLINE +static inline unsigned long +dc_type(dispatch_continuation_t dc) +{ + return dx_type(dc->_as_do); +} + +DISPATCH_ALWAYS_INLINE +static inline unsigned long +dc_subtype(dispatch_continuation_t dc) +{ + return dx_subtype(dc->_as_do); +} + +extern const struct dispatch_continuation_vtable_s + _dispatch_continuation_vtables[_DC_MAX_TYPE]; + +void +_dispatch_async_redirect_invoke(dispatch_continuation_t dc, + dispatch_invoke_flags_t flags); + +#if HAVE_PTHREAD_WORKQUEUE_QOS +void +_dispatch_queue_override_invoke(dispatch_continuation_t dc, + dispatch_invoke_flags_t flags); +#endif + +#define DC_VTABLE(name) (&_dispatch_continuation_vtables[DC_##name##_TYPE]) + +#define DC_VTABLE_ENTRY(name, ...) \ + [DC_##name##_TYPE] = { \ + .do_type = DISPATCH_CONTINUATION_TYPE(name), \ + __VA_ARGS__ \ + } + +#pragma mark - +#pragma mark _dispatch_set_priority_and_voucher +#if HAVE_PTHREAD_WORKQUEUE_QOS + +void _dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pri, + mach_voucher_t kv); +voucher_t _dispatch_set_priority_and_voucher_slow(pthread_priority_t pri, + voucher_t voucher, _dispatch_thread_set_self_t flags); + +#endif #pragma mark - #pragma mark dispatch_apply_t @@ -323,7 +929,8 @@ struct dispatch_apply_s { size_t volatile da_index, da_todo; size_t da_iterations, da_nested; dispatch_continuation_t da_dc; - _dispatch_thread_semaphore_t da_sema; + dispatch_thread_event_s da_event; + dispatch_invoke_flags_t da_flags; uint32_t da_thr_cnt; }; typedef struct dispatch_apply_s *dispatch_apply_t; @@ -346,7 +953,7 @@ typedef struct dispatch_apply_s *dispatch_apply_t; voucher_t dbpd_voucher; \ dispatch_block_t dbpd_block; \ dispatch_group_t dbpd_group; \ - dispatch_queue_t volatile dbpd_queue; \ + os_mpsc_queue_t volatile dbpd_queue; \ mach_port_t dbpd_thread; #if !defined(__cplusplus) @@ -375,8 +982,47 @@ typedef struct dispatch_block_private_data_s *dispatch_block_private_data_t; dispatch_block_t _dispatch_block_create(dispatch_block_flags_t flags, voucher_t voucher, pthread_priority_t priority, dispatch_block_t block); -void _dispatch_block_invoke(const struct dispatch_block_private_data_s *dbcpd); +void _dispatch_block_invoke_direct(const struct dispatch_block_private_data_s *dbcpd); +void _dispatch_block_sync_invoke(void *block); + +void _dispatch_continuation_init_slow(dispatch_continuation_t dc, + dispatch_queue_class_t dqu, dispatch_block_flags_t flags); +void _dispatch_continuation_update_bits(dispatch_continuation_t dc, + uintptr_t dc_flags); + +bool _dispatch_barrier_trysync_f(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func); + +/* exported for tests in dispatch_trysync.c */ +DISPATCH_EXPORT DISPATCH_NOTHROW +bool _dispatch_trysync_f(dispatch_queue_t dq, void *ctxt, + dispatch_function_t f); #endif /* __BLOCKS__ */ +typedef struct dispatch_pthread_root_queue_observer_hooks_s { + void (*queue_will_execute)(dispatch_queue_t queue); + void (*queue_did_execute)(dispatch_queue_t queue); +} dispatch_pthread_root_queue_observer_hooks_s; +typedef dispatch_pthread_root_queue_observer_hooks_s + *dispatch_pthread_root_queue_observer_hooks_t; + +#ifdef __APPLE__ +#define DISPATCH_IOHID_SPI 1 + +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NOTHROW DISPATCH_NONNULL4 +dispatch_queue_t +_dispatch_pthread_root_queue_create_with_observer_hooks_4IOHID( + const char *label, unsigned long flags, const pthread_attr_t *attr, + dispatch_pthread_root_queue_observer_hooks_t observer_hooks, + dispatch_block_t configure); + +DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW +bool +_dispatch_queue_is_exclusively_owned_by_current_thread_4IOHID( + dispatch_queue_t queue); + +#endif // __APPLE__ + #endif diff --git a/src/semaphore.c b/src/semaphore.c index b8c8971af..4d232b7eb 100644 --- a/src/semaphore.c +++ b/src/semaphore.c @@ -21,29 +21,6 @@ #include "internal.h" // semaphores are too fundamental to use the dispatch_assume*() macros -#if USE_MACH_SEM -#define DISPATCH_SEMAPHORE_VERIFY_KR(x) do { \ - if (slowpath((x) == KERN_INVALID_NAME)) { \ - DISPATCH_CLIENT_CRASH("Use-after-free of dispatch_semaphore_t"); \ - } else if (slowpath(x)) { \ - DISPATCH_CRASH("mach semaphore API failure"); \ - } \ - } while (0) -#define DISPATCH_GROUP_VERIFY_KR(x) do { \ - if (slowpath((x) == KERN_INVALID_NAME)) { \ - DISPATCH_CLIENT_CRASH("Use-after-free of dispatch_group_t"); \ - } else if (slowpath(x)) { \ - DISPATCH_CRASH("mach semaphore API failure"); \ - } \ - } while (0) -#elif USE_POSIX_SEM -#define DISPATCH_SEMAPHORE_VERIFY_RET(x) do { \ - if (slowpath((x) == -1)) { \ - DISPATCH_CRASH("POSIX semaphore API failure"); \ - } \ - } while (0) -#endif - #if USE_WIN32_SEM // rdar://problem/8428132 static DWORD best_resolution = 1; // 1ms @@ -94,24 +71,49 @@ DISPATCH_WEAK // rdar://problem/8503746 long _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema); #pragma mark - -#pragma mark dispatch_semaphore_t +#pragma mark dispatch_semaphore_class_t static void -_dispatch_semaphore_init(long value, dispatch_object_t dou) +_dispatch_semaphore_class_init(long value, dispatch_semaphore_class_t dsemau) { - dispatch_semaphore_t dsema = dou._dsema; + struct dispatch_semaphore_header_s *dsema = dsemau._dsema_hdr; - dsema->do_next = (dispatch_semaphore_t)DISPATCH_OBJECT_LISTLESS; + dsema->do_next = DISPATCH_OBJECT_LISTLESS; dsema->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, false); dsema->dsema_value = value; - dsema->dsema_orig = value; #if USE_POSIX_SEM int ret = sem_init(&dsema->dsema_sem, 0, 0); DISPATCH_SEMAPHORE_VERIFY_RET(ret); #endif } +static void +_dispatch_semaphore_class_dispose(dispatch_semaphore_class_t dsemau) +{ + struct dispatch_semaphore_header_s *dsema = dsemau._dsema_hdr; + +#if USE_MACH_SEM + kern_return_t kr; + if (dsema->dsema_port) { + kr = semaphore_destroy(mach_task_self(), dsema->dsema_port); + DISPATCH_VERIFY_MIG(kr); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); + } + dsema->dsema_port = MACH_PORT_DEAD; +#elif USE_POSIX_SEM + int ret = sem_destroy(&dsema->dsema_sem); + DISPATCH_SEMAPHORE_VERIFY_RET(ret); +#elif USE_WIN32_SEM + if (dsema->dsema_handle) { + CloseHandle(dsema->dsema_handle); + } +#endif +} + +#pragma mark - +#pragma mark dispatch_semaphore_t + dispatch_semaphore_t dispatch_semaphore_create(long value) { @@ -121,14 +123,13 @@ dispatch_semaphore_create(long value) // equal to the number of waiting threads. Therefore it is bogus to // initialize the semaphore with a negative value. if (value < 0) { - return NULL; + return DISPATCH_BAD_INPUT; } dsema = (dispatch_semaphore_t)_dispatch_alloc(DISPATCH_VTABLE(semaphore), - sizeof(struct dispatch_semaphore_s) - - sizeof(dsema->dsema_notify_head) - - sizeof(dsema->dsema_notify_tail)); - _dispatch_semaphore_init(value, dsema); + sizeof(struct dispatch_semaphore_s)); + _dispatch_semaphore_class_init(value, dsema); + dsema->dsema_orig = value; return dsema; } @@ -142,7 +143,7 @@ _dispatch_semaphore_create_port(semaphore_t *s4) if (*s4) { return; } - _dispatch_safe_fork = false; + _dispatch_fork_becomes_unsafe(); // lazily allocate the semaphore port @@ -157,7 +158,7 @@ _dispatch_semaphore_create_port(semaphore_t *s4) _dispatch_temporary_resource_shortage(); } - if (!dispatch_atomic_cmpxchg(s4, 0, tmp, relaxed)) { + if (!os_atomic_cmpxchg(s4, 0, tmp, relaxed)) { kr = semaphore_destroy(mach_task_self(), tmp); DISPATCH_VERIFY_MIG(kr); DISPATCH_SEMAPHORE_VERIFY_KR(kr); @@ -179,7 +180,7 @@ _dispatch_semaphore_create_handle(HANDLE *s4) _dispatch_temporary_resource_shortage(); } - if (!dispatch_atomic_cmpxchg(s4, 0, tmp)) { + if (!os_atomic_cmpxchg(s4, 0, tmp)) { CloseHandle(tmp); } } @@ -191,26 +192,11 @@ _dispatch_semaphore_dispose(dispatch_object_t dou) dispatch_semaphore_t dsema = dou._dsema; if (dsema->dsema_value < dsema->dsema_orig) { - DISPATCH_CLIENT_CRASH( - "Semaphore/group object deallocated while in use"); + DISPATCH_CLIENT_CRASH(dsema->dsema_orig - dsema->dsema_value, + "Semaphore object deallocated while in use"); } -#if USE_MACH_SEM - kern_return_t kr; - if (dsema->dsema_port) { - kr = semaphore_destroy(mach_task_self(), dsema->dsema_port); - DISPATCH_VERIFY_MIG(kr); - DISPATCH_SEMAPHORE_VERIFY_KR(kr); - } - dsema->dsema_port = MACH_PORT_DEAD; -#elif USE_POSIX_SEM - int ret = sem_destroy(&dsema->dsema_sem); - DISPATCH_SEMAPHORE_VERIFY_RET(ret); -#elif USE_WIN32_SEM - if (dsema->dsema_handle) { - CloseHandle(dsema->dsema_handle); - } -#endif + _dispatch_semaphore_class_dispose(dsema); } size_t @@ -235,17 +221,6 @@ DISPATCH_NOINLINE long _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema) { - // Before dsema_sent_ksignals is incremented we can rely on the reference - // held by the waiter. However, once this value is incremented the waiter - // may return between the atomic increment and the semaphore_signal(), - // therefore an explicit reference must be held in order to safely access - // dsema after the atomic increment. - _dispatch_retain(dsema); - -#if USE_MACH_SEM || USE_POSIX_SEM - (void)dispatch_atomic_inc2o(dsema, dsema_sent_ksignals, relaxed); -#endif - #if USE_MACH_SEM _dispatch_semaphore_create_port(&dsema->dsema_port); kern_return_t kr = semaphore_signal(dsema->dsema_port); @@ -258,20 +233,19 @@ _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema) int ret = ReleaseSemaphore(dsema->dsema_handle, 1, NULL); dispatch_assume(ret); #endif - - _dispatch_release(dsema); return 1; } long dispatch_semaphore_signal(dispatch_semaphore_t dsema) { - long value = dispatch_atomic_inc2o(dsema, dsema_value, release); + long value = os_atomic_inc2o(dsema, dsema_value, release); if (fastpath(value > 0)) { return 0; } if (slowpath(value == LONG_MIN)) { - DISPATCH_CLIENT_CRASH("Unbalanced call to dispatch_semaphore_signal()"); + DISPATCH_CLIENT_CRASH(value, + "Unbalanced call to dispatch_semaphore_signal()"); } return _dispatch_semaphore_signal_slow(dsema); } @@ -296,34 +270,12 @@ _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, DWORD wait_result; #endif -#if USE_MACH_SEM || USE_POSIX_SEM -again: - // Mach semaphores appear to sometimes spuriously wake up. Therefore, - // we keep a parallel count of the number of times a Mach semaphore is - // signaled (6880961). - orig = dsema->dsema_sent_ksignals; - while (orig) { - if (dispatch_atomic_cmpxchgvw2o(dsema, dsema_sent_ksignals, orig, - orig - 1, &orig, relaxed)) { - return 0; - } - } -#endif - #if USE_MACH_SEM _dispatch_semaphore_create_port(&dsema->dsema_port); #elif USE_WIN32_SEM _dispatch_semaphore_create_handle(&dsema->dsema_handle); #endif - // From xnu/osfmk/kern/sync_sema.c: - // wait_semaphore->count = -1; /* we don't keep an actual count */ - // - // The code above does not match the documentation, and that fact is - // not surprising. The documented semantics are clumsy to use in any - // practical way. The above hack effectively tricks the rest of the - // Mach semaphore logic to behave like the libdispatch algorithm. - switch (timeout) { default: #if USE_MACH_SEM @@ -340,13 +292,13 @@ _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, } #elif USE_POSIX_SEM do { - uint64_t nsec = _dispatch_timeout(timeout); + uint64_t nsec = _dispatch_time_nanoseconds_since_epoch(timeout); _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); ret = slowpath(sem_timedwait(&dsema->dsema_sem, &_timeout)); } while (ret == -1 && errno == EINTR); - if (ret == -1 && errno != ETIMEDOUT) { + if (!(ret == -1 && errno == ETIMEDOUT)) { DISPATCH_SEMAPHORE_VERIFY_RET(ret); break; } @@ -365,7 +317,7 @@ _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, case DISPATCH_TIME_NOW: orig = dsema->dsema_value; while (orig < 0) { - if (dispatch_atomic_cmpxchgvw2o(dsema, dsema_value, orig, orig + 1, + if (os_atomic_cmpxchgvw2o(dsema, dsema_value, orig, orig + 1, &orig, relaxed)) { #if USE_MACH_SEM return KERN_OPERATION_TIMED_OUT; @@ -393,17 +345,13 @@ _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, #endif break; } -#if USE_MACH_SEM || USE_POSIX_SEM - goto again; -#else return 0; -#endif } long dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout) { - long value = dispatch_atomic_dec2o(dsema, dsema_value, acquire); + long value = os_atomic_dec2o(dsema, dsema_value, acquire); if (fastpath(value >= 0)) { return 0; } @@ -418,11 +366,10 @@ static inline dispatch_group_t _dispatch_group_create_with_count(long count) { dispatch_group_t dg = (dispatch_group_t)_dispatch_alloc( - DISPATCH_VTABLE(group), sizeof(struct dispatch_semaphore_s)); - _dispatch_semaphore_init(LONG_MAX - count, dg); + DISPATCH_VTABLE(group), sizeof(struct dispatch_group_s)); + _dispatch_semaphore_class_init(count, dg); if (count) { - dispatch_atomic_store2o((dispatch_semaphore_t)dg, do_ref_cnt, 1, - relaxed); // + os_atomic_store2o(dg, do_ref_cnt, 1, relaxed); // } return dg; } @@ -442,47 +389,48 @@ _dispatch_group_create_and_enter(void) void dispatch_group_enter(dispatch_group_t dg) { - dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg; - long value = dispatch_atomic_dec_orig2o(dsema, dsema_value, acquire); - if (value == LONG_MAX) { - return _dispatch_retain(dg); // - } - if (slowpath(value <= 0)) { - DISPATCH_CLIENT_CRASH( + long value = os_atomic_inc_orig2o(dg, dg_value, acquire); + if (slowpath((unsigned long)value >= (unsigned long)LONG_MAX)) { + DISPATCH_CLIENT_CRASH(value, "Too many nested calls to dispatch_group_enter()"); } + if (value == 0) { + _dispatch_retain(dg); // + } } DISPATCH_NOINLINE static long -_dispatch_group_wake(dispatch_semaphore_t dsema, bool needs_release) +_dispatch_group_wake(dispatch_group_t dg, bool needs_release) { - dispatch_continuation_t next, head, tail = NULL, dc; + dispatch_continuation_t next, head, tail = NULL; long rval; - head = dispatch_atomic_xchg2o(dsema, dsema_notify_head, NULL, relaxed); + // cannot use os_mpsc_capture_snapshot() because we can have concurrent + // _dispatch_group_wake() calls + head = os_atomic_xchg2o(dg, dg_notify_head, NULL, relaxed); if (head) { // snapshot before anything is notified/woken - tail = dispatch_atomic_xchg2o(dsema, dsema_notify_tail, NULL, relaxed); + tail = os_atomic_xchg2o(dg, dg_notify_tail, NULL, release); } - rval = (long)dispatch_atomic_xchg2o(dsema, dsema_group_waiters, 0, relaxed); + rval = (long)os_atomic_xchg2o(dg, dg_waiters, 0, relaxed); if (rval) { // wake group waiters #if USE_MACH_SEM - _dispatch_semaphore_create_port(&dsema->dsema_port); + _dispatch_semaphore_create_port(&dg->dg_port); do { - kern_return_t kr = semaphore_signal(dsema->dsema_port); + kern_return_t kr = semaphore_signal(dg->dg_port); DISPATCH_GROUP_VERIFY_KR(kr); } while (--rval); #elif USE_POSIX_SEM do { - int ret = sem_post(&dsema->dsema_sem); + int ret = sem_post(&dg->dg_sem); DISPATCH_SEMAPHORE_VERIFY_RET(ret); } while (--rval); #elif USE_WIN32_SEM - _dispatch_semaphore_create_handle(&dsema->dsema_handle); + _dispatch_semaphore_create_handle(&dg->dg_handle); int ret; - ret = ReleaseSemaphore(dsema->dsema_handle, rval, NULL); + ret = ReleaseSemaphore(dg->dg_handle, rval, NULL); dispatch_assume(ret); #else #error "No supported semaphore type" @@ -491,22 +439,15 @@ _dispatch_group_wake(dispatch_semaphore_t dsema, bool needs_release) if (head) { // async group notify blocks do { - next = fastpath(head->do_next); - if (!next && head != tail) { - _dispatch_wait_until(next = fastpath(head->do_next)); - } + next = os_mpsc_pop_snapshot_head(head, tail, do_next); dispatch_queue_t dsn_queue = (dispatch_queue_t)head->dc_data; - dc = _dispatch_continuation_free_cacheonly(head); - dispatch_async_f(dsn_queue, head->dc_ctxt, head->dc_func); + _dispatch_continuation_async(dsn_queue, head); _dispatch_release(dsn_queue); - if (slowpath(dc)) { - _dispatch_continuation_free_to_cache_limit(dc); - } } while ((head = next)); - _dispatch_release(dsema); + _dispatch_release(dg); } if (needs_release) { - _dispatch_release(dsema); // + _dispatch_release(dg); // } return 0; } @@ -514,21 +455,53 @@ _dispatch_group_wake(dispatch_semaphore_t dsema, bool needs_release) void dispatch_group_leave(dispatch_group_t dg) { - dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg; - long value = dispatch_atomic_inc2o(dsema, dsema_value, release); + long value = os_atomic_dec2o(dg, dg_value, release); + if (slowpath(value == 0)) { + return (void)_dispatch_group_wake(dg, true); + } if (slowpath(value < 0)) { - DISPATCH_CLIENT_CRASH("Unbalanced call to dispatch_group_leave()"); + DISPATCH_CLIENT_CRASH(value, + "Unbalanced call to dispatch_group_leave()"); } - if (slowpath(value == LONG_MAX)) { - return (void)_dispatch_group_wake(dsema, true); +} + +void +_dispatch_group_dispose(dispatch_object_t dou) +{ + dispatch_group_t dg = dou._dg; + + if (dg->dg_value) { + DISPATCH_CLIENT_CRASH(dg->dg_value, + "Group object deallocated while in use"); } + + _dispatch_semaphore_class_dispose(dg); +} + +size_t +_dispatch_group_debug(dispatch_object_t dou, char *buf, size_t bufsiz) +{ + dispatch_group_t dg = dou._dg; + + size_t offset = 0; + offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", + dx_kind(dg), dg); + offset += _dispatch_object_debug_attr(dg, &buf[offset], bufsiz - offset); +#if USE_MACH_SEM + offset += dsnprintf(&buf[offset], bufsiz - offset, "port = 0x%u, ", + dg->dg_port); +#endif + offset += dsnprintf(&buf[offset], bufsiz - offset, + "count = %ld, waiters = %d }", dg->dg_value, dg->dg_waiters); + return offset; } DISPATCH_NOINLINE static long -_dispatch_group_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout) +_dispatch_group_wait_slow(dispatch_group_t dg, dispatch_time_t timeout) { - long orig, value; + long value; + int orig_waiters; #if USE_MACH_SEM mach_timespec_t _timeout; @@ -543,37 +516,29 @@ _dispatch_group_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout) DWORD wait_result; #endif -again: // check before we cause another signal to be sent by incrementing - // dsema->dsema_group_waiters - value = dispatch_atomic_load2o(dsema, dsema_value, seq_cst); // 19296565 - if (value == LONG_MAX) { - return _dispatch_group_wake(dsema, false); - } - // Mach semaphores appear to sometimes spuriously wake up. Therefore, - // we keep a parallel count of the number of times a Mach semaphore is - // signaled (6880961). - (void)dispatch_atomic_inc2o(dsema, dsema_group_waiters, relaxed); + // dg->dg_waiters + value = os_atomic_load2o(dg, dg_value, ordered); // 19296565 + if (value == 0) { + return _dispatch_group_wake(dg, false); + } + + (void)os_atomic_inc2o(dg, dg_waiters, relaxed); // check the values again in case we need to wake any threads - value = dispatch_atomic_load2o(dsema, dsema_value, seq_cst); // 19296565 - if (value == LONG_MAX) { - return _dispatch_group_wake(dsema, false); + value = os_atomic_load2o(dg, dg_value, ordered); // 19296565 + if (value == 0) { + _dispatch_group_wake(dg, false); + // Fall through to consume the extra signal, forcing timeout to avoid + // useless setups as it won't block + timeout = DISPATCH_TIME_FOREVER; } #if USE_MACH_SEM - _dispatch_semaphore_create_port(&dsema->dsema_port); + _dispatch_semaphore_create_port(&dg->dg_port); #elif USE_WIN32_SEM - _dispatch_semaphore_create_handle(&dsema->dsema_handle); + _dispatch_semaphore_create_handle(&dg->dg_handle); #endif - // From xnu/osfmk/kern/sync_sema.c: - // wait_semaphore->count = -1; /* we don't keep an actual count */ - // - // The code above does not match the documentation, and that fact is - // not surprising. The documented semantics are clumsy to use in any - // practical way. The above hack effectively tricks the rest of the - // Mach semaphore logic to behave like the libdispatch algorithm. - switch (timeout) { default: #if USE_MACH_SEM @@ -581,7 +546,7 @@ _dispatch_group_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout) uint64_t nsec = _dispatch_timeout(timeout); _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); - kr = slowpath(semaphore_timedwait(dsema->dsema_port, _timeout)); + kr = slowpath(semaphore_timedwait(dg->dg_port, _timeout)); } while (kr == KERN_ABORTED); if (kr != KERN_OPERATION_TIMED_OUT) { @@ -590,10 +555,10 @@ _dispatch_group_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout) } #elif USE_POSIX_SEM do { - uint64_t nsec = _dispatch_timeout(timeout); + uint64_t nsec = _dispatch_time_nanoseconds_since_epoch(timeout); _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); - ret = slowpath(sem_timedwait(&dsema->dsema_sem, &_timeout)); + ret = slowpath(sem_timedwait(&dg->dg_sem, &_timeout)); } while (ret == -1 && errno == EINTR); if (!(ret == -1 && errno == ETIMEDOUT)) { @@ -604,19 +569,19 @@ _dispatch_group_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout) nsec = _dispatch_timeout(timeout); msec = (DWORD)(nsec / (uint64_t)1000000); resolution = _push_timer_resolution(msec); - wait_result = WaitForSingleObject(dsema->dsema_handle, msec); + wait_result = WaitForSingleObject(dg->dg_handle, msec); _pop_timer_resolution(resolution); if (wait_result != WAIT_TIMEOUT) { break; } #endif // Fall through and try to undo the earlier change to - // dsema->dsema_group_waiters + // dg->dg_waiters case DISPATCH_TIME_NOW: - orig = dsema->dsema_group_waiters; - while (orig) { - if (dispatch_atomic_cmpxchgvw2o(dsema, dsema_group_waiters, orig, - orig - 1, &orig, relaxed)) { + orig_waiters = dg->dg_waiters; + while (orig_waiters) { + if (os_atomic_cmpxchgvw2o(dg, dg_waiters, orig_waiters, + orig_waiters - 1, &orig_waiters, relaxed)) { #if USE_MACH_SEM return KERN_OPERATION_TIMED_OUT; #elif USE_POSIX_SEM || USE_WIN32_SEM @@ -630,28 +595,26 @@ _dispatch_group_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout) case DISPATCH_TIME_FOREVER: #if USE_MACH_SEM do { - kr = semaphore_wait(dsema->dsema_port); + kr = semaphore_wait(dg->dg_port); } while (kr == KERN_ABORTED); DISPATCH_GROUP_VERIFY_KR(kr); #elif USE_POSIX_SEM do { - ret = sem_wait(&dsema->dsema_sem); + ret = sem_wait(&dg->dg_sem); } while (ret == -1 && errno == EINTR); DISPATCH_SEMAPHORE_VERIFY_RET(ret); #elif USE_WIN32_SEM - WaitForSingleObject(dsema->dsema_handle, INFINITE); + WaitForSingleObject(dg->dg_handle, INFINITE); #endif break; } - goto again; - } + return 0; +} long dispatch_group_wait(dispatch_group_t dg, dispatch_time_t timeout) { - dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg; - - if (dsema->dsema_value == LONG_MAX) { + if (dg->dg_value == 0) { return 0; } if (timeout == 0) { @@ -662,152 +625,45 @@ dispatch_group_wait(dispatch_group_t dg, dispatch_time_t timeout) return (-1); #endif } - return _dispatch_group_wait_slow(dsema, timeout); + return _dispatch_group_wait_slow(dg, timeout); } -DISPATCH_NOINLINE -void -dispatch_group_notify_f(dispatch_group_t dg, dispatch_queue_t dq, void *ctxt, - void (*func)(void *)) +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_group_notify(dispatch_group_t dg, dispatch_queue_t dq, + dispatch_continuation_t dsn) { - dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg; - dispatch_continuation_t prev, dsn = _dispatch_continuation_alloc(); - dsn->do_vtable = (void *)DISPATCH_OBJ_ASYNC_BIT; dsn->dc_data = dq; - dsn->dc_ctxt = ctxt; - dsn->dc_func = func; dsn->do_next = NULL; _dispatch_retain(dq); - prev = dispatch_atomic_xchg2o(dsema, dsema_notify_tail, dsn, release); - if (fastpath(prev)) { - prev->do_next = dsn; - } else { + if (os_mpsc_push_update_tail(dg, dg_notify, dsn, do_next)) { _dispatch_retain(dg); - dispatch_atomic_store2o(dsema, dsema_notify_head, dsn, seq_cst); + os_atomic_store2o(dg, dg_notify_head, dsn, ordered); // seq_cst with atomic store to notify_head - if (dispatch_atomic_load2o(dsema, dsema_value, seq_cst) == LONG_MAX) { - _dispatch_group_wake(dsema, false); + if (os_atomic_load2o(dg, dg_value, ordered) == 0) { + _dispatch_group_wake(dg, false); } } } -#ifdef __BLOCKS__ -void -dispatch_group_notify(dispatch_group_t dg, dispatch_queue_t dq, - dispatch_block_t db) -{ - dispatch_group_notify_f(dg, dq, _dispatch_Block_copy(db), - _dispatch_call_block_and_release); -} -#endif - -#pragma mark - -#pragma mark _dispatch_thread_semaphore_t - -_dispatch_thread_semaphore_t -_dispatch_thread_semaphore_create(void) -{ - _dispatch_safe_fork = false; -#if DISPATCH_USE_OS_SEMAPHORE_CACHE - return _os_semaphore_create(); -#elif USE_MACH_SEM - semaphore_t s4; - kern_return_t kr; - while (slowpath(kr = semaphore_create(mach_task_self(), &s4, - SYNC_POLICY_FIFO, 0))) { - DISPATCH_VERIFY_MIG(kr); - _dispatch_temporary_resource_shortage(); - } - return s4; -#elif USE_POSIX_SEM - sem_t s4; - int ret = sem_init(&s4, 0, 0); - DISPATCH_SEMAPHORE_VERIFY_RET(ret); - return s4; -#elif USE_WIN32_SEM - HANDLE tmp; - while (!dispatch_assume(tmp = CreateSemaphore(NULL, 0, LONG_MAX, NULL))) { - _dispatch_temporary_resource_shortage(); - } - return (_dispatch_thread_semaphore_t)tmp; -#else -#error "No supported semaphore type" -#endif -} - +DISPATCH_NOINLINE void -_dispatch_thread_semaphore_dispose(_dispatch_thread_semaphore_t sema) +dispatch_group_notify_f(dispatch_group_t dg, dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) { -#if DISPATCH_USE_OS_SEMAPHORE_CACHE - return _os_semaphore_dispose(sema); -#elif USE_MACH_SEM - semaphore_t s4 = (semaphore_t)sema; - kern_return_t kr = semaphore_destroy(mach_task_self(), s4); - DISPATCH_VERIFY_MIG(kr); - DISPATCH_SEMAPHORE_VERIFY_KR(kr); -#elif USE_POSIX_SEM - sem_t s4 = (sem_t)sema; - int ret = sem_destroy(&s4); - DISPATCH_SEMAPHORE_VERIFY_RET(ret); -#elif USE_WIN32_SEM - // XXX: signal the semaphore? - WINBOOL success; - success = CloseHandle((HANDLE)sema); - dispatch_assume(success); -#else -#error "No supported semaphore type" -#endif + dispatch_continuation_t dsn = _dispatch_continuation_alloc(); + _dispatch_continuation_init_f(dsn, dq, ctxt, func, 0, 0, + DISPATCH_OBJ_CONSUME_BIT); + _dispatch_group_notify(dg, dq, dsn); } +#ifdef __BLOCKS__ void -_dispatch_thread_semaphore_signal(_dispatch_thread_semaphore_t sema) +dispatch_group_notify(dispatch_group_t dg, dispatch_queue_t dq, + dispatch_block_t db) { - // assumed to contain a release barrier -#if DISPATCH_USE_OS_SEMAPHORE_CACHE - return _os_semaphore_signal(sema); -#elif USE_MACH_SEM - semaphore_t s4 = (semaphore_t)sema; - kern_return_t kr = semaphore_signal(s4); - DISPATCH_SEMAPHORE_VERIFY_KR(kr); -#elif USE_POSIX_SEM - sem_t s4 = (sem_t)sema; - int ret = sem_post(&s4); - DISPATCH_SEMAPHORE_VERIFY_RET(ret); -#elif USE_WIN32_SEM - int ret; - ret = ReleaseSemaphore((HANDLE)sema, 1, NULL); - dispatch_assume(ret); -#else -#error "No supported semaphore type" -#endif + dispatch_continuation_t dsn = _dispatch_continuation_alloc(); + _dispatch_continuation_init(dsn, dq, db, 0, 0, DISPATCH_OBJ_CONSUME_BIT); + _dispatch_group_notify(dg, dq, dsn); } - -void -_dispatch_thread_semaphore_wait(_dispatch_thread_semaphore_t sema) -{ - // assumed to contain an acquire barrier -#if DISPATCH_USE_OS_SEMAPHORE_CACHE - return _os_semaphore_wait(sema); -#elif USE_MACH_SEM - semaphore_t s4 = (semaphore_t)sema; - kern_return_t kr; - do { - kr = semaphore_wait(s4); - } while (slowpath(kr == KERN_ABORTED)); - DISPATCH_SEMAPHORE_VERIFY_KR(kr); -#elif USE_POSIX_SEM - sem_t s4 = (sem_t)sema; - int ret; - do { - ret = sem_wait(&s4); - } while (slowpath(ret != 0)); - DISPATCH_SEMAPHORE_VERIFY_RET(ret); -#elif USE_WIN32_SEM - DWORD wait_result; - do { - wait_result = WaitForSingleObject((HANDLE)sema, INFINITE); - } while (wait_result != WAIT_OBJECT_0); -#else -#error "No supported semaphore type" #endif -} diff --git a/src/semaphore_internal.h b/src/semaphore_internal.h index 11261c3c9..dceda6d97 100644 --- a/src/semaphore_internal.h +++ b/src/semaphore_internal.h @@ -29,65 +29,56 @@ struct dispatch_queue_s; -DISPATCH_CLASS_DECL(semaphore); -struct dispatch_semaphore_s { - DISPATCH_STRUCT_HEADER(semaphore); #if USE_MACH_SEM - semaphore_t dsema_port; +#define DISPATCH_OS_SEMA_FIELD(base) semaphore_t base##_port #elif USE_POSIX_SEM - sem_t dsema_sem; +#define DISPATCH_OS_SEMA_FIELD(base) sem_t base##_sem #elif USE_WIN32_SEM - HANDLE dsema_handle; +#define DISPATCH_OS_SEMA_FIELD(base) HANDLE base##_handle #else #error "No supported semaphore type" #endif + +#define DISPATCH_SEMAPHORE_HEADER(cls, ns) \ + DISPATCH_OBJECT_HEADER(cls); \ + long volatile ns##_value; \ + DISPATCH_OS_SEMA_FIELD(ns) + +struct dispatch_semaphore_header_s { + DISPATCH_SEMAPHORE_HEADER(semaphore, dsema); +}; + +DISPATCH_CLASS_DECL(semaphore); +struct dispatch_semaphore_s { + DISPATCH_SEMAPHORE_HEADER(semaphore, dsema); long dsema_orig; - long volatile dsema_value; - union { - long volatile dsema_sent_ksignals; - long volatile dsema_group_waiters; - }; - struct dispatch_continuation_s *volatile dsema_notify_head; - struct dispatch_continuation_s *volatile dsema_notify_tail; }; DISPATCH_CLASS_DECL(group); +struct dispatch_group_s { + DISPATCH_SEMAPHORE_HEADER(group, dg); + int volatile dg_waiters; + struct dispatch_continuation_s *volatile dg_notify_head; + struct dispatch_continuation_s *volatile dg_notify_tail; +}; + +typedef union { + struct dispatch_semaphore_header_s *_dsema_hdr; + struct dispatch_semaphore_s *_dsema; + struct dispatch_group_s *_dg; +#if USE_OBJC + dispatch_semaphore_t _objc_dsema; + dispatch_group_t _objc_dg; +#endif +} dispatch_semaphore_class_t __attribute__((__transparent_union__)); dispatch_group_t _dispatch_group_create_and_enter(void); +void _dispatch_group_dispose(dispatch_object_t dou); +size_t _dispatch_group_debug(dispatch_object_t dou, char *buf, + size_t bufsiz); + void _dispatch_semaphore_dispose(dispatch_object_t dou); size_t _dispatch_semaphore_debug(dispatch_object_t dou, char *buf, size_t bufsiz); -typedef uintptr_t _dispatch_thread_semaphore_t; - -_dispatch_thread_semaphore_t _dispatch_thread_semaphore_create(void); -void _dispatch_thread_semaphore_dispose(_dispatch_thread_semaphore_t); -void _dispatch_thread_semaphore_wait(_dispatch_thread_semaphore_t); -void _dispatch_thread_semaphore_signal(_dispatch_thread_semaphore_t); - -DISPATCH_ALWAYS_INLINE -static inline _dispatch_thread_semaphore_t -_dispatch_get_thread_semaphore(void) -{ - _dispatch_thread_semaphore_t sema = (_dispatch_thread_semaphore_t) - _dispatch_thread_getspecific(dispatch_sema4_key); - if (slowpath(!sema)) { - return _dispatch_thread_semaphore_create(); - } - _dispatch_thread_setspecific(dispatch_sema4_key, NULL); - return sema; -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_put_thread_semaphore(_dispatch_thread_semaphore_t sema) -{ - _dispatch_thread_semaphore_t old_sema = (_dispatch_thread_semaphore_t) - _dispatch_thread_getspecific(dispatch_sema4_key); - _dispatch_thread_setspecific(dispatch_sema4_key, (void*)sema); - if (slowpath(old_sema)) { - return _dispatch_thread_semaphore_dispose(old_sema); - } -} - #endif diff --git a/src/shims.h b/src/shims.h index ae7f1c3d7..db288225e 100644 --- a/src/shims.h +++ b/src/shims.h @@ -34,11 +34,7 @@ #include #define _DISPATCH_QOS_CLASS_USER_INTERACTIVE QOS_CLASS_USER_INTERACTIVE #define _DISPATCH_QOS_CLASS_USER_INITIATED QOS_CLASS_USER_INITIATED -#ifndef QOS_CLASS_LEGACY -#define _DISPATCH_QOS_CLASS_DEFAULT QOS_CLASS_LEGACY -#else #define _DISPATCH_QOS_CLASS_DEFAULT QOS_CLASS_DEFAULT -#endif #define _DISPATCH_QOS_CLASS_UTILITY QOS_CLASS_UTILITY #define _DISPATCH_QOS_CLASS_BACKGROUND QOS_CLASS_BACKGROUND #define _DISPATCH_QOS_CLASS_UNSPECIFIED QOS_CLASS_UNSPECIFIED @@ -51,9 +47,18 @@ typedef unsigned long pthread_priority_t; #else // sys/qos_private.h #define _DISPATCH_QOS_CLASS_MAINTENANCE 0x05 #endif // sys/qos_private.h +#ifndef _PTHREAD_PRIORITY_OVERCOMMIT_FLAG +#define _PTHREAD_PRIORITY_OVERCOMMIT_FLAG 0x80000000 +#endif +#ifndef _PTHREAD_PRIORITY_INHERIT_FLAG +#define _PTHREAD_PRIORITY_INHERIT_FLAG 0x40000000 +#endif #ifndef _PTHREAD_PRIORITY_ROOTQUEUE_FLAG #define _PTHREAD_PRIORITY_ROOTQUEUE_FLAG 0x20000000 #endif +#ifndef _PTHREAD_PRIORITY_SCHED_PRI_FLAG +#define _PTHREAD_PRIORITY_SCHED_PRI_FLAG 0x20000000 +#endif #ifndef _PTHREAD_PRIORITY_ENFORCE_FLAG #define _PTHREAD_PRIORITY_ENFORCE_FLAG 0x10000000 #endif @@ -63,16 +68,38 @@ typedef unsigned long pthread_priority_t; #ifndef _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG #define _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG 0x04000000 #endif +#ifndef _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG +#define _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG 0x02000000 +#endif +#ifndef _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG +#define _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG 0x01000000 +#endif + #else // HAVE_PTHREAD_QOS_H typedef unsigned int qos_class_t; typedef unsigned long pthread_priority_t; #define QOS_MIN_RELATIVE_PRIORITY (-15) +#define _PTHREAD_PRIORITY_FLAGS_MASK (~0xffffff) #define _PTHREAD_PRIORITY_QOS_CLASS_MASK 0x00ffff00 +#define _PTHREAD_PRIORITY_QOS_CLASS_SHIFT (8ull) +#define _PTHREAD_PRIORITY_PRIORITY_MASK 0x000000ff +#define _PTHREAD_PRIORITY_OVERCOMMIT_FLAG 0x80000000 +#define _PTHREAD_PRIORITY_INHERIT_FLAG 0x40000000 #define _PTHREAD_PRIORITY_ROOTQUEUE_FLAG 0x20000000 #define _PTHREAD_PRIORITY_ENFORCE_FLAG 0x10000000 #define _PTHREAD_PRIORITY_OVERRIDE_FLAG 0x08000000 #define _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG 0x04000000 +#define _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG 0x02000000 +#define _PTHREAD_PRIORITY_SCHED_PRI_FLAG 0x20000000 #endif // HAVE_PTHREAD_QOS_H + +#ifdef __linux__ +#include "shims/linux_stubs.h" +#endif + +typedef uint32_t dispatch_priority_t; +#define DISPATCH_SATURATED_OVERRIDE ((dispatch_priority_t)UINT32_MAX) + #ifndef _DISPATCH_QOS_CLASS_USER_INTERACTIVE enum { _DISPATCH_QOS_CLASS_USER_INTERACTIVE = 0x21, @@ -99,6 +126,10 @@ enum { #include #endif +#if __has_include() +#include +#endif + #if !HAVE_DECL_FD_COPY #define FD_COPY(f, t) (void)(*(t) = *(f)) #endif @@ -128,9 +159,19 @@ _pthread_workqueue_override_start_direct(mach_port_t thread, } #endif // PTHREAD_WORKQUEUE_SPI_VERSION < 20140716 +#if PTHREAD_WORKQUEUE_SPI_VERSION < 20150319 +static inline int +_pthread_workqueue_override_start_direct_check_owner(mach_port_t thread, + pthread_priority_t priority, mach_port_t *ulock_addr) +{ + (void)ulock_addr; + return _pthread_workqueue_override_start_direct(thread, priority); +} +#endif // PTHREAD_WORKQUEUE_SPI_VERSION < 20150319 + #if PTHREAD_WORKQUEUE_SPI_VERSION < 20140707 static inline int -_pthread_override_qos_class_start_direct(pthread_t thread, +_pthread_override_qos_class_start_direct(mach_port_t thread, pthread_priority_t priority) { (void)thread; (void)priority; @@ -145,6 +186,27 @@ _pthread_override_qos_class_end_direct(mach_port_t thread) } #endif // PTHREAD_WORKQUEUE_SPI_VERSION < 20140707 +#if PTHREAD_WORKQUEUE_SPI_VERSION < 20150325 +static inline int +_pthread_qos_override_start_direct(mach_port_t thread, + pthread_priority_t priority, void *resource) +{ + (void)resource; + return _pthread_override_qos_class_start_direct(thread, priority); +} + +static inline int +_pthread_qos_override_end_direct(mach_port_t thread, void *resource) +{ + (void)resource; + return _pthread_override_qos_class_end_direct(thread); +} +#endif // PTHREAD_WORKQUEUE_SPI_VERSION < 20150325 + +#if PTHREAD_WORKQUEUE_SPI_VERSION < 20160427 +#define _PTHREAD_SET_SELF_WQ_KEVENT_UNBIND 0 +#endif + #if !HAVE_NORETURN_BUILTIN_TRAP /* * XXXRW: Work-around for possible clang bug in which __builtin_trap() is not @@ -156,13 +218,19 @@ void __builtin_trap(void); #endif #if DISPATCH_HW_CONFIG_UP -#define DISPATCH_ATOMIC_UP 1 +#define OS_ATOMIC_UP 1 +#else +#define OS_ATOMIC_UP 0 #endif + +#ifndef __OS_INTERNAL_ATOMIC__ #include "shims/atomic.h" +#endif #include "shims/atomic_sfb.h" #include "shims/tsd.h" #include "shims/yield.h" +#include "shims/lock.h" #include "shims/hw_config.h" #include "shims/perfmon.h" @@ -170,6 +238,47 @@ void __builtin_trap(void); #include "shims/getprogname.h" #include "shims/time.h" +#if __has_include() +#include +#elif __has_builtin(__builtin_add_overflow) +#define os_add_overflow(a, b, c) __builtin_add_overflow(a, b, c) +#define os_sub_overflow(a, b, c) __builtin_sub_overflow(a, b, c) +#define os_mul_overflow(a, b, c) __builtin_mul_overflow(a, b, c) +#else +#error unsupported compiler +#endif + +#ifndef os_mul_and_add_overflow +#define os_mul_and_add_overflow(a, x, b, res) __extension__({ \ + __typeof(*(res)) _tmp; \ + bool _s, _t; \ + _s = os_mul_overflow((a), (x), &_tmp); \ + _t = os_add_overflow((b), _tmp, (res)); \ + _s | _t; \ +}) +#endif + + +#if __has_feature(c_static_assert) +#define __dispatch_is_array(x) \ + _Static_assert(!__builtin_types_compatible_p(typeof((x)[0]) *, typeof(x)), \ + #x " isn't an array") +#define countof(x) \ + ({ __dispatch_is_array(x); sizeof(x) / sizeof((x)[0]); }) +#else +#define countof(x) (sizeof(x) / sizeof(x[0])) +#endif + +DISPATCH_ALWAYS_INLINE +static inline void * +_dispatch_mempcpy(void *ptr, const void *data, size_t len) +{ + memcpy(ptr, data, len); + return (char *)ptr + len; +} +#define _dispatch_memappend(ptr, e) \ + _dispatch_mempcpy(ptr, e, sizeof(*(e))) + #ifdef __APPLE__ // Clear the stack before calling long-running thread-handler functions that // never return (and don't take arguments), to facilitate leak detection and diff --git a/src/shims/atomic.h b/src/shims/atomic.h index 24c113b97..519947790 100644 --- a/src/shims/atomic.h +++ b/src/shims/atomic.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2013 Apple Inc. All rights reserved. + * Copyright (c) 2008-2016 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -27,54 +27,28 @@ #ifndef __DISPATCH_SHIMS_ATOMIC__ #define __DISPATCH_SHIMS_ATOMIC__ -// generate error during codegen -#define _dispatch_atomic_unimplemented() \ - ({ __asm__(".err unimplemented"); }) - -#pragma mark - -#pragma mark memory_order - -typedef enum _dispatch_atomic_memory_order -{ - _dispatch_atomic_memory_order_relaxed, - _dispatch_atomic_memory_order_consume, - _dispatch_atomic_memory_order_acquire, - _dispatch_atomic_memory_order_release, - _dispatch_atomic_memory_order_acq_rel, - _dispatch_atomic_memory_order_seq_cst, -} _dispatch_atomic_memory_order; - -#if !DISPATCH_ATOMIC_UP - -#define dispatch_atomic_memory_order_relaxed \ - _dispatch_atomic_memory_order_relaxed -#define dispatch_atomic_memory_order_acquire \ - _dispatch_atomic_memory_order_acquire -#define dispatch_atomic_memory_order_release \ - _dispatch_atomic_memory_order_release -#define dispatch_atomic_memory_order_acq_rel \ - _dispatch_atomic_memory_order_acq_rel -#define dispatch_atomic_memory_order_seq_cst \ - _dispatch_atomic_memory_order_seq_cst - -#else // DISPATCH_ATOMIC_UP +#if !__has_extension(c_atomic) || \ + !__has_extension(c_generic_selections) || \ + !__has_include() +#error libdispatch requires C11 with and generic selections +#endif -#define dispatch_atomic_memory_order_relaxed \ - _dispatch_atomic_memory_order_relaxed -#define dispatch_atomic_memory_order_acquire \ - _dispatch_atomic_memory_order_relaxed -#define dispatch_atomic_memory_order_release \ - _dispatch_atomic_memory_order_relaxed -#define dispatch_atomic_memory_order_acq_rel \ - _dispatch_atomic_memory_order_relaxed -#define dispatch_atomic_memory_order_seq_cst \ - _dispatch_atomic_memory_order_relaxed +#include -#endif // DISPATCH_ATOMIC_UP +#define memory_order_ordered memory_order_seq_cst -#if __has_extension(c_generic_selections) -#define _dispatch_atomic_basetypeof(p) \ +#define _os_atomic_basetypeof(p) \ typeof(*_Generic((p), \ + char*: (char*)(p), \ + volatile char*: (char*)(p), \ + signed char*: (signed char*)(p), \ + volatile signed char*: (signed char*)(p), \ + unsigned char*: (unsigned char*)(p), \ + volatile unsigned char*: (unsigned char*)(p), \ + short*: (short*)(p), \ + volatile short*: (short*)(p), \ + unsigned short*: (unsigned short*)(p), \ + volatile unsigned short*: (unsigned short*)(p), \ int*: (int*)(p), \ volatile int*: (int*)(p), \ unsigned int*: (unsigned int*)(p), \ @@ -87,15 +61,22 @@ typedef enum _dispatch_atomic_memory_order volatile long long*: (long long*)(p), \ unsigned long long*: (unsigned long long*)(p), \ volatile unsigned long long*: (unsigned long long*)(p), \ + const void**: (const void**)(p), \ + const void*volatile*: (const void**)(p), \ default: (void**)(p))) -#endif - -#if __has_extension(c_atomic) && __has_extension(c_generic_selections) -#pragma mark - -#pragma mark c11 -#define _dispatch_atomic_c11_atomic(p) \ +#define _os_atomic_c11_atomic(p) \ _Generic((p), \ + char*: (_Atomic(char)*)(p), \ + volatile char*: (volatile _Atomic(char)*)(p), \ + signed char*: (_Atomic(signed char)*)(p), \ + volatile signed char*: (volatile _Atomic(signed char)*)(p), \ + unsigned char*: (_Atomic(unsigned char)*)(p), \ + volatile unsigned char*: (volatile _Atomic(unsigned char)*)(p), \ + short*: (_Atomic(short)*)(p), \ + volatile short*: (volatile _Atomic(short)*)(p), \ + unsigned short*: (_Atomic(unsigned short)*)(p), \ + volatile unsigned short*: (volatile _Atomic(unsigned short)*)(p), \ int*: (_Atomic(int)*)(p), \ volatile int*: (volatile _Atomic(int)*)(p), \ unsigned int*: (_Atomic(unsigned int)*)(p), \ @@ -109,263 +90,154 @@ typedef enum _dispatch_atomic_memory_order unsigned long long*: (_Atomic(unsigned long long)*)(p), \ volatile unsigned long long*: \ (volatile _Atomic(unsigned long long)*)(p), \ + const void**: (_Atomic(const void*)*)(p), \ + const void*volatile*: (volatile _Atomic(const void*)*)(p), \ default: (volatile _Atomic(void*)*)(p)) -#define _dispatch_atomic_barrier(m) \ - ({ __c11_atomic_thread_fence(dispatch_atomic_memory_order_##m); }) -#define dispatch_atomic_load(p, m) \ - ({ _dispatch_atomic_basetypeof(p) _r = \ - __c11_atomic_load(_dispatch_atomic_c11_atomic(p), \ - dispatch_atomic_memory_order_##m); (typeof(*(p)))_r; }) -#define dispatch_atomic_store(p, v, m) \ - ({ _dispatch_atomic_basetypeof(p) _v = (v); \ - __c11_atomic_store(_dispatch_atomic_c11_atomic(p), _v, \ - dispatch_atomic_memory_order_##m); }) -#define dispatch_atomic_xchg(p, v, m) \ - ({ _dispatch_atomic_basetypeof(p) _v = (v), _r = \ - __c11_atomic_exchange(_dispatch_atomic_c11_atomic(p), _v, \ - dispatch_atomic_memory_order_##m); (typeof(*(p)))_r; }) -#define dispatch_atomic_cmpxchg(p, e, v, m) \ - ({ _dispatch_atomic_basetypeof(p) _v = (v), _r = (e); \ - __c11_atomic_compare_exchange_strong(_dispatch_atomic_c11_atomic(p), \ - &_r, _v, dispatch_atomic_memory_order_##m, \ - dispatch_atomic_memory_order_relaxed); }) -#define dispatch_atomic_cmpxchgv(p, e, v, g, m) \ - ({ _dispatch_atomic_basetypeof(p) _v = (v), _r = (e); _Bool _b = \ - __c11_atomic_compare_exchange_strong(_dispatch_atomic_c11_atomic(p), \ - &_r, _v, dispatch_atomic_memory_order_##m, \ - dispatch_atomic_memory_order_relaxed); *(g) = (typeof(*(p)))_r; _b; }) -#define dispatch_atomic_cmpxchgvw(p, e, v, g, m) \ - ({ _dispatch_atomic_basetypeof(p) _v = (v), _r = (e); _Bool _b = \ - __c11_atomic_compare_exchange_weak(_dispatch_atomic_c11_atomic(p), \ - &_r, _v, dispatch_atomic_memory_order_##m, \ - dispatch_atomic_memory_order_relaxed); *(g) = (typeof(*(p)))_r; _b; }) -#define _dispatch_atomic_c11_op(p, v, m, o, op) \ - ({ _dispatch_atomic_basetypeof(p) _v = (v), _r = \ - __c11_atomic_fetch_##o(_dispatch_atomic_c11_atomic(p), _v, \ - dispatch_atomic_memory_order_##m); (typeof(*(p)))(_r op _v); }) -#define _dispatch_atomic_c11_op_orig(p, v, m, o, op) \ - ({ _dispatch_atomic_basetypeof(p) _v = (v), _r = \ - __c11_atomic_fetch_##o(_dispatch_atomic_c11_atomic(p), _v, \ - dispatch_atomic_memory_order_##m); (typeof(*(p)))_r; }) - -#define dispatch_atomic_add(p, v, m) \ - _dispatch_atomic_c11_op((p), (v), m, add, +) -#define dispatch_atomic_add_orig(p, v, m) \ - _dispatch_atomic_c11_op_orig((p), (v), m, add, +) -#define dispatch_atomic_sub(p, v, m) \ - _dispatch_atomic_c11_op((p), (v), m, sub, -) -#define dispatch_atomic_sub_orig(p, v, m) \ - _dispatch_atomic_c11_op_orig((p), (v), m, sub, -) -#define dispatch_atomic_and(p, v, m) \ - _dispatch_atomic_c11_op((p), (v), m, and, &) -#define dispatch_atomic_and_orig(p, v, m) \ - _dispatch_atomic_c11_op_orig((p), (v), m, and, &) -#define dispatch_atomic_or(p, v, m) \ - _dispatch_atomic_c11_op((p), (v), m, or, |) -#define dispatch_atomic_or_orig(p, v, m) \ - _dispatch_atomic_c11_op_orig((p), (v), m, or, |) -#define dispatch_atomic_xor(p, v, m) \ - _dispatch_atomic_c11_op((p), (v), m, xor, ^) -#define dispatch_atomic_xor_orig(p, v, m) \ - _dispatch_atomic_c11_op_orig((p), (v), m, xor, ^) - -#elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2) -#pragma mark - -#pragma mark gnu99 - -#define _dispatch_atomic_full_barrier() \ - __sync_synchronize() -#define _dispatch_atomic_barrier(m) \ - ({ switch(dispatch_atomic_memory_order_##m) { \ - case _dispatch_atomic_memory_order_relaxed: \ - break; \ - default: \ - _dispatch_atomic_full_barrier(); break; \ - } }) -// seq_cst: only emulate explicit store(seq_cst) -> load(seq_cst) -#define dispatch_atomic_load(p, m) \ - ({ typeof(*(p)) _r = *(p); \ - switch(dispatch_atomic_memory_order_##m) { \ - case _dispatch_atomic_memory_order_seq_cst: \ - _dispatch_atomic_barrier(m); /* fallthrough */ \ - case _dispatch_atomic_memory_order_relaxed: \ - break; \ - default: \ - _dispatch_atomic_unimplemented(); break; \ - } _r; }) -#define dispatch_atomic_store(p, v, m) \ - ({ switch(dispatch_atomic_memory_order_##m) { \ - case _dispatch_atomic_memory_order_release: \ - case _dispatch_atomic_memory_order_seq_cst: \ - _dispatch_atomic_barrier(m); /* fallthrough */ \ - case _dispatch_atomic_memory_order_relaxed: \ - *(p) = (v); break; \ - default: \ - _dispatch_atomic_unimplemented(); break; \ - } switch(dispatch_atomic_memory_order_##m) { \ - case _dispatch_atomic_memory_order_seq_cst: \ - _dispatch_atomic_barrier(m); break; \ - default: \ - break; \ - } }) -#if __has_builtin(__sync_swap) -#define dispatch_atomic_xchg(p, v, m) \ - ((typeof(*(p)))__sync_swap((p), (v))) -#else -#define dispatch_atomic_xchg(p, v, m) \ - ((typeof(*(p)))__sync_lock_test_and_set((p), (v))) -#endif -#define dispatch_atomic_cmpxchg(p, e, v, m) \ - __sync_bool_compare_and_swap((p), (e), (v)) -#define dispatch_atomic_cmpxchgv(p, e, v, g, m) \ - ({ typeof(*(g)) _e = (e), _r = \ - __sync_val_compare_and_swap((p), _e, (v)); \ - bool _b = (_e == _r); *(g) = _r; _b; }) -#define dispatch_atomic_cmpxchgvw(p, e, v, g, m) \ - dispatch_atomic_cmpxchgv((p), (e), (v), (g), m) - -#define dispatch_atomic_add(p, v, m) \ - __sync_add_and_fetch((p), (v)) -#define dispatch_atomic_add_orig(p, v, m) \ - __sync_fetch_and_add((p), (v)) -#define dispatch_atomic_sub(p, v, m) \ - __sync_sub_and_fetch((p), (v)) -#define dispatch_atomic_sub_orig(p, v, m) \ - __sync_fetch_and_sub((p), (v)) -#define dispatch_atomic_and(p, v, m) \ - __sync_and_and_fetch((p), (v)) -#define dispatch_atomic_and_orig(p, v, m) \ - __sync_fetch_and_and((p), (v)) -#define dispatch_atomic_or(p, v, m) \ - __sync_or_and_fetch((p), (v)) -#define dispatch_atomic_or_orig(p, v, m) \ - __sync_fetch_and_or((p), (v)) -#define dispatch_atomic_xor(p, v, m) \ - __sync_xor_and_fetch((p), (v)) -#define dispatch_atomic_xor_orig(p, v, m) \ - __sync_fetch_and_xor((p), (v)) - -#if defined(__x86_64__) || defined(__i386__) -// GCC emits nothing for __sync_synchronize() on x86_64 & i386 -#undef _dispatch_atomic_full_barrier -#define _dispatch_atomic_full_barrier() \ - ({ __asm__ __volatile__( \ - "mfence" \ - : : : "memory"); }) -#undef dispatch_atomic_load -#define dispatch_atomic_load(p, m) \ - ({ switch(dispatch_atomic_memory_order_##m) { \ - case _dispatch_atomic_memory_order_seq_cst: \ - case _dispatch_atomic_memory_order_relaxed: \ - break; \ - default: \ - _dispatch_atomic_unimplemented(); break; \ - } *(p); }) -// xchg is faster than store + mfence -#undef dispatch_atomic_store -#define dispatch_atomic_store(p, v, m) \ - ({ switch(dispatch_atomic_memory_order_##m) { \ - case _dispatch_atomic_memory_order_relaxed: \ - case _dispatch_atomic_memory_order_release: \ - *(p) = (v); break; \ - case _dispatch_atomic_memory_order_seq_cst: \ - (void)dispatch_atomic_xchg((p), (v), m); break; \ - default:\ - _dispatch_atomic_unimplemented(); break; \ - } }) -#endif - -#else -#error "Please upgrade to GCC 4.2 or newer." -#endif - -#pragma mark - -#pragma mark generic - -// assume atomic builtins provide barriers -#define dispatch_atomic_barrier(m) +#define os_atomic_thread_fence(m) atomic_thread_fence(memory_order_##m) // see comment in dispatch_once.c -#define dispatch_atomic_maximally_synchronizing_barrier() \ - _dispatch_atomic_barrier(seq_cst) - -#define dispatch_atomic_load2o(p, f, m) \ - dispatch_atomic_load(&(p)->f, m) -#define dispatch_atomic_store2o(p, f, v, m) \ - dispatch_atomic_store(&(p)->f, (v), m) -#define dispatch_atomic_xchg2o(p, f, v, m) \ - dispatch_atomic_xchg(&(p)->f, (v), m) -#define dispatch_atomic_cmpxchg2o(p, f, e, v, m) \ - dispatch_atomic_cmpxchg(&(p)->f, (e), (v), m) -#define dispatch_atomic_cmpxchgv2o(p, f, e, v, g, m) \ - dispatch_atomic_cmpxchgv(&(p)->f, (e), (v), (g), m) -#define dispatch_atomic_cmpxchgvw2o(p, f, e, v, g, m) \ - dispatch_atomic_cmpxchgvw(&(p)->f, (e), (v), (g), m) -#define dispatch_atomic_add2o(p, f, v, m) \ - dispatch_atomic_add(&(p)->f, (v), m) -#define dispatch_atomic_add_orig2o(p, f, v, m) \ - dispatch_atomic_add_orig(&(p)->f, (v), m) -#define dispatch_atomic_sub2o(p, f, v, m) \ - dispatch_atomic_sub(&(p)->f, (v), m) -#define dispatch_atomic_sub_orig2o(p, f, v, m) \ - dispatch_atomic_sub_orig(&(p)->f, (v), m) -#define dispatch_atomic_and2o(p, f, v, m) \ - dispatch_atomic_and(&(p)->f, (v), m) -#define dispatch_atomic_and_orig2o(p, f, v, m) \ - dispatch_atomic_and_orig(&(p)->f, (v), m) -#define dispatch_atomic_or2o(p, f, v, m) \ - dispatch_atomic_or(&(p)->f, (v), m) -#define dispatch_atomic_or_orig2o(p, f, v, m) \ - dispatch_atomic_or_orig(&(p)->f, (v), m) -#define dispatch_atomic_xor2o(p, f, v, m) \ - dispatch_atomic_xor(&(p)->f, (v), m) -#define dispatch_atomic_xor_orig2o(p, f, v, m) \ - dispatch_atomic_xor_orig(&(p)->f, (v), m) - -#define dispatch_atomic_inc(p, m) \ - dispatch_atomic_add((p), 1, m) -#define dispatch_atomic_inc_orig(p, m) \ - dispatch_atomic_add_orig((p), 1, m) -#define dispatch_atomic_inc2o(p, f, m) \ - dispatch_atomic_add2o(p, f, 1, m) -#define dispatch_atomic_inc_orig2o(p, f, m) \ - dispatch_atomic_add_orig2o(p, f, 1, m) -#define dispatch_atomic_dec(p, m) \ - dispatch_atomic_sub((p), 1, m) -#define dispatch_atomic_dec_orig(p, m) \ - dispatch_atomic_sub_orig((p), 1, m) -#define dispatch_atomic_dec2o(p, f, m) \ - dispatch_atomic_sub2o(p, f, 1, m) -#define dispatch_atomic_dec_orig2o(p, f, m) \ - dispatch_atomic_sub_orig2o(p, f, 1, m) - -#define dispatch_atomic_tsx_xacq_cmpxchgv(p, e, v, g) \ - dispatch_atomic_cmpxchgv((p), (e), (v), (g), acquire) -#define dispatch_atomic_tsx_xrel_store(p, v) \ - dispatch_atomic_store(p, v, release) -#define dispatch_atomic_tsx_xacq_cmpxchgv2o(p, f, e, v, g) \ - dispatch_atomic_tsx_xacq_cmpxchgv(&(p)->f, (e), (v), (g)) -#define dispatch_atomic_tsx_xrel_store2o(p, f, v) \ - dispatch_atomic_tsx_xrel_store(&(p)->f, (v)) +#define os_atomic_maximally_synchronizing_barrier() \ + atomic_thread_fence(memory_order_seq_cst) + +#define os_atomic_load(p, m) \ + ({ _os_atomic_basetypeof(p) _r = \ + atomic_load_explicit(_os_atomic_c11_atomic(p), \ + memory_order_##m); (typeof(*(p)))_r; }) +#define os_atomic_store(p, v, m) \ + ({ _os_atomic_basetypeof(p) _v = (v); \ + atomic_store_explicit(_os_atomic_c11_atomic(p), _v, \ + memory_order_##m); }) +#define os_atomic_xchg(p, v, m) \ + ({ _os_atomic_basetypeof(p) _v = (v), _r = \ + atomic_exchange_explicit(_os_atomic_c11_atomic(p), _v, \ + memory_order_##m); (typeof(*(p)))_r; }) +#define os_atomic_cmpxchg(p, e, v, m) \ + ({ _os_atomic_basetypeof(p) _v = (v), _r = (e); \ + atomic_compare_exchange_strong_explicit(_os_atomic_c11_atomic(p), \ + &_r, _v, memory_order_##m, \ + memory_order_relaxed); }) +#define os_atomic_cmpxchgv(p, e, v, g, m) \ + ({ _os_atomic_basetypeof(p) _v = (v), _r = (e); _Bool _b = \ + atomic_compare_exchange_strong_explicit(_os_atomic_c11_atomic(p), \ + &_r, _v, memory_order_##m, \ + memory_order_relaxed); *(g) = (typeof(*(p)))_r; _b; }) +#define os_atomic_cmpxchgvw(p, e, v, g, m) \ + ({ _os_atomic_basetypeof(p) _v = (v), _r = (e); _Bool _b = \ + atomic_compare_exchange_weak_explicit(_os_atomic_c11_atomic(p), \ + &_r, _v, memory_order_##m, \ + memory_order_relaxed); *(g) = (typeof(*(p)))_r; _b; }) + +#define _os_atomic_c11_op(p, v, m, o, op) \ + ({ _os_atomic_basetypeof(p) _v = (v), _r = \ + atomic_fetch_##o##_explicit(_os_atomic_c11_atomic(p), _v, \ + memory_order_##m); (typeof(*(p)))(_r op _v); }) +#define _os_atomic_c11_op_orig(p, v, m, o, op) \ + ({ _os_atomic_basetypeof(p) _v = (v), _r = \ + atomic_fetch_##o##_explicit(_os_atomic_c11_atomic(p), _v, \ + memory_order_##m); (typeof(*(p)))_r; }) +#define os_atomic_add(p, v, m) \ + _os_atomic_c11_op((p), (v), m, add, +) +#define os_atomic_add_orig(p, v, m) \ + _os_atomic_c11_op_orig((p), (v), m, add, +) +#define os_atomic_sub(p, v, m) \ + _os_atomic_c11_op((p), (v), m, sub, -) +#define os_atomic_sub_orig(p, v, m) \ + _os_atomic_c11_op_orig((p), (v), m, sub, -) +#define os_atomic_and(p, v, m) \ + _os_atomic_c11_op((p), (v), m, and, &) +#define os_atomic_and_orig(p, v, m) \ + _os_atomic_c11_op_orig((p), (v), m, and, &) +#define os_atomic_or(p, v, m) \ + _os_atomic_c11_op((p), (v), m, or, |) +#define os_atomic_or_orig(p, v, m) \ + _os_atomic_c11_op_orig((p), (v), m, or, |) +#define os_atomic_xor(p, v, m) \ + _os_atomic_c11_op((p), (v), m, xor, ^) +#define os_atomic_xor_orig(p, v, m) \ + _os_atomic_c11_op_orig((p), (v), m, xor, ^) + +#define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \ + bool _result = false; \ + typeof(p) _p = (p); \ + ov = os_atomic_load(_p, relaxed); \ + do { \ + __VA_ARGS__; \ + _result = os_atomic_cmpxchgvw(_p, ov, nv, &ov, m); \ + } while (os_unlikely(!_result)); \ + _result; \ + }) +#define os_atomic_rmw_loop2o(p, f, ov, nv, m, ...) \ + os_atomic_rmw_loop(&(p)->f, ov, nv, m, __VA_ARGS__) +#define os_atomic_rmw_loop_give_up_with_fence(m, expr) \ + ({ os_atomic_thread_fence(m); expr; __builtin_unreachable(); }) +#define os_atomic_rmw_loop_give_up(expr) \ + os_atomic_rmw_loop_give_up_with_fence(relaxed, expr) + +#define os_atomic_load2o(p, f, m) \ + os_atomic_load(&(p)->f, m) +#define os_atomic_store2o(p, f, v, m) \ + os_atomic_store(&(p)->f, (v), m) +#define os_atomic_xchg2o(p, f, v, m) \ + os_atomic_xchg(&(p)->f, (v), m) +#define os_atomic_cmpxchg2o(p, f, e, v, m) \ + os_atomic_cmpxchg(&(p)->f, (e), (v), m) +#define os_atomic_cmpxchgv2o(p, f, e, v, g, m) \ + os_atomic_cmpxchgv(&(p)->f, (e), (v), (g), m) +#define os_atomic_cmpxchgvw2o(p, f, e, v, g, m) \ + os_atomic_cmpxchgvw(&(p)->f, (e), (v), (g), m) +#define os_atomic_add2o(p, f, v, m) \ + os_atomic_add(&(p)->f, (v), m) +#define os_atomic_add_orig2o(p, f, v, m) \ + os_atomic_add_orig(&(p)->f, (v), m) +#define os_atomic_sub2o(p, f, v, m) \ + os_atomic_sub(&(p)->f, (v), m) +#define os_atomic_sub_orig2o(p, f, v, m) \ + os_atomic_sub_orig(&(p)->f, (v), m) +#define os_atomic_and2o(p, f, v, m) \ + os_atomic_and(&(p)->f, (v), m) +#define os_atomic_and_orig2o(p, f, v, m) \ + os_atomic_and_orig(&(p)->f, (v), m) +#define os_atomic_or2o(p, f, v, m) \ + os_atomic_or(&(p)->f, (v), m) +#define os_atomic_or_orig2o(p, f, v, m) \ + os_atomic_or_orig(&(p)->f, (v), m) +#define os_atomic_xor2o(p, f, v, m) \ + os_atomic_xor(&(p)->f, (v), m) +#define os_atomic_xor_orig2o(p, f, v, m) \ + os_atomic_xor_orig(&(p)->f, (v), m) + +#define os_atomic_inc(p, m) \ + os_atomic_add((p), 1, m) +#define os_atomic_inc_orig(p, m) \ + os_atomic_add_orig((p), 1, m) +#define os_atomic_inc2o(p, f, m) \ + os_atomic_add2o(p, f, 1, m) +#define os_atomic_inc_orig2o(p, f, m) \ + os_atomic_add_orig2o(p, f, 1, m) +#define os_atomic_dec(p, m) \ + os_atomic_sub((p), 1, m) +#define os_atomic_dec_orig(p, m) \ + os_atomic_sub_orig((p), 1, m) +#define os_atomic_dec2o(p, f, m) \ + os_atomic_sub2o(p, f, 1, m) +#define os_atomic_dec_orig2o(p, f, m) \ + os_atomic_sub_orig2o(p, f, 1, m) #if defined(__x86_64__) || defined(__i386__) -#pragma mark - -#pragma mark x86 - -#undef dispatch_atomic_maximally_synchronizing_barrier +#undef os_atomic_maximally_synchronizing_barrier #ifdef __LP64__ -#define dispatch_atomic_maximally_synchronizing_barrier() \ +#define os_atomic_maximally_synchronizing_barrier() \ ({ unsigned long _clbr; __asm__ __volatile__( \ "cpuid" \ : "=a" (_clbr) : "0" (0) : "rbx", "rcx", "rdx", "cc", "memory"); }) #else #ifdef __llvm__ -#define dispatch_atomic_maximally_synchronizing_barrier() \ +#define os_atomic_maximally_synchronizing_barrier() \ ({ unsigned long _clbr; __asm__ __volatile__( \ "cpuid" \ : "=a" (_clbr) : "0" (0) : "ebx", "ecx", "edx", "cc", "memory"); }) #else // gcc does not allow inline i386 asm to clobber ebx -#define dispatch_atomic_maximally_synchronizing_barrier() \ +#define os_atomic_maximally_synchronizing_barrier() \ ({ unsigned long _clbr; __asm__ __volatile__( \ "pushl %%ebx\n\t" \ "cpuid\n\t" \ @@ -373,9 +245,6 @@ typedef enum _dispatch_atomic_memory_order : "=a" (_clbr) : "0" (0) : "ecx", "edx", "cc", "memory"); }) #endif #endif - - -#endif - +#endif // defined(__x86_64__) || defined(__i386__) #endif // __DISPATCH_SHIMS_ATOMIC__ diff --git a/src/shims/atomic_sfb.h b/src/shims/atomic_sfb.h index 087d98c80..5f972b4fe 100644 --- a/src/shims/atomic_sfb.h +++ b/src/shims/atomic_sfb.h @@ -32,11 +32,11 @@ #endif // Returns UINT_MAX if all the bits in p were already set. -#define dispatch_atomic_set_first_bit(p,m) _dispatch_atomic_set_first_bit(p,m) +#define os_atomic_set_first_bit(p,m) _os_atomic_set_first_bit(p,m) DISPATCH_ALWAYS_INLINE static inline unsigned int -_dispatch_atomic_set_first_bit(volatile unsigned long *p, +_os_atomic_set_first_bit(volatile unsigned long *p, unsigned int max_index) { unsigned int index; @@ -63,10 +63,10 @@ _dispatch_atomic_set_first_bit(volatile unsigned long *p, #if defined(__x86_64__) || defined(__i386__) -#undef dispatch_atomic_set_first_bit +#undef os_atomic_set_first_bit DISPATCH_ALWAYS_INLINE static inline unsigned int -dispatch_atomic_set_first_bit(volatile unsigned long *p, unsigned int max) +os_atomic_set_first_bit(volatile unsigned long *p, unsigned int max) { unsigned long val, bit; if (max > (sizeof(val) * 8)) { diff --git a/src/shims/hw_config.h b/src/shims/hw_config.h index 2b85d4a60..cad211d21 100644 --- a/src/shims/hw_config.h +++ b/src/shims/hw_config.h @@ -81,6 +81,15 @@ static inline uint32_t _dispatch_hw_get_config(_dispatch_hw_config_t c) { uint32_t val = 1; +#if defined(__linux__) && HAVE_SYSCONF + switch (c) { + case _dispatch_hw_config_logical_cpus: + case _dispatch_hw_config_physical_cpus: + return sysconf(_SC_NPROCESSORS_CONF); + case _dispatch_hw_config_active_cpus: + return sysconf(_SC_NPROCESSORS_ONLN); + } +#else const char *name = NULL; int r; #if defined(__APPLE__) @@ -106,6 +115,7 @@ _dispatch_hw_get_config(_dispatch_hw_config_t c) if (r > 0) val = (uint32_t)r; #endif } +#endif return val; } diff --git a/src/shims/linux_stubs.c b/src/shims/linux_stubs.c new file mode 100644 index 000000000..07ee8bc06 --- /dev/null +++ b/src/shims/linux_stubs.c @@ -0,0 +1,53 @@ +/* + * This source file is part of the Swift.org open source project + * + * Copyright (c) 2015 Apple Inc. and the Swift project authors + * + * Licensed under Apache License v2.0 with Runtime Library Exception + * + * See http://swift.org/LICENSE.txt for license information + * See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors + * + */ + +/* + * This file contains stubbed out functions we are using during + * the initial linux port. When the port is complete, this file + * should be empty (and thus removed). + */ + +#include +#include + +#if __has_include() +#include +#else +#include +#endif + +#include "pthread.h" +#include "os/linux_base.h" +#include "internal.h" + + +#undef LINUX_PORT_ERROR +#define LINUX_PORT_ERROR() do { printf("LINUX_PORT_ERROR_CALLED %s:%d: %s\n",__FILE__,__LINE__,__FUNCTION__); abort(); } while (0) + + +/* + * Stubbed out static data + */ + +pthread_key_t dispatch_voucher_key; +pthread_key_t dispatch_pthread_root_queue_observer_hooks_key; + +unsigned short dispatch_timer__program_semaphore; +unsigned short dispatch_timer__wake_semaphore; +unsigned short dispatch_timer__fire_semaphore; +unsigned short dispatch_timer__configure_semaphore; +unsigned short dispatch_queue__pop_semaphore; +unsigned short dispatch_callout__entry_semaphore; +unsigned short dispatch_callout__return_semaphore; +unsigned short dispatch_queue__push_semaphore; +void (*_dispatch_block_special_invoke)(void*); +struct dispatch_queue_attr_s _dispatch_queue_attr_concurrent; diff --git a/src/shims/linux_stubs.h b/src/shims/linux_stubs.h new file mode 100644 index 000000000..6a70c0b11 --- /dev/null +++ b/src/shims/linux_stubs.h @@ -0,0 +1,101 @@ +/* + * This source file is part of the Swift.org open source project + * + * Copyright (c) 2015 Apple Inc. and the Swift project authors + * + * Licensed under Apache License v2.0 with Runtime Library Exception + * + * See http://swift.org/LICENSE.txt for license information + * See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors + * + */ + +// forward declarations for functions we are stubbing out +// in the intial linux port. + +#ifndef __DISPATCH__STUBS__INTERNAL +#define __DISPATCH__STUBS__INTERNAL + +// marker for hacks we have made to make progress +#define __LINUX_PORT_HDD__ 1 + +/* + * Stub out defines for some mach types and related macros + */ + +typedef uint32_t mach_port_t; + +#define MACH_PORT_NULL (0) +#define MACH_PORT_DEAD (-1) + +typedef uint32_t mach_error_t; + +typedef uint32_t mach_vm_size_t; + +typedef uint32_t mach_msg_return_t; + +typedef uint32_t mach_msg_bits_t; + +typedef uintptr_t mach_vm_address_t; + +typedef uint32_t dispatch_mach_msg_t; + +typedef uint32_t dispatch_mach_t; + +typedef uint32_t dispatch_mach_reason_t; + +typedef uint32_t voucher_activity_mode_t; + +typedef uint32_t voucher_activity_trace_id_t; + +typedef uint32_t voucher_activity_id_t; + +typedef uint32_t _voucher_activity_buffer_hook_t;; + +typedef uint32_t voucher_activity_flag_t; + +typedef struct { } mach_msg_header_t; + + +typedef void (*dispatch_mach_handler_function_t)(void*, dispatch_mach_reason_t, + dispatch_mach_msg_t, mach_error_t); + +typedef void (*dispatch_mach_msg_destructor_t)(void*); + +// Print a warning when an unported code path executes. +#define LINUX_PORT_ERROR() do { printf("LINUX_PORT_ERROR_CALLED %s:%d: %s\n",__FILE__,__LINE__,__FUNCTION__); } while (0) + +/* + * Stub out defines for other missing types + */ + +#if __linux__ +// we fall back to use kevent +#define kevent64_s kevent +#define kevent64(kq,cl,nc,el,ne,f,to) kevent(kq,cl,nc,el,ne,to) +#endif + +// SIZE_T_MAX should not be hardcoded like this here. +#define SIZE_T_MAX (0x7fffffff) + +// Define to 0 the NOTE_ values that are not present on Linux. +// Revisit this...would it be better to ifdef out the uses instead?? + +// The following values are passed as part of the EVFILT_TIMER requests + +#define IGNORE_KEVENT64_EXT /* will force the kevent64_s.ext[] to not be used -> leeway ignored */ + +#define NOTE_SECONDS 0x01 +#define NOTE_USECONDS 0x02 +#define NOTE_NSECONDS 0x04 +#define NOTE_ABSOLUTE 0x08 +#define NOTE_CRITICAL 0x10 +#define NOTE_BACKGROUND 0x20 +#define NOTE_LEEWAY 0x40 + +// need to catch the following usage if it happens .. +// we simply return '0' as a value probably not correct + +#define NOTE_VM_PRESSURE ({LINUX_PORT_ERROR(); 0;}) + +#endif diff --git a/src/shims/lock.c b/src/shims/lock.c new file mode 100644 index 000000000..2fab69107 --- /dev/null +++ b/src/shims/lock.c @@ -0,0 +1,421 @@ +/* + * Copyright (c) 2016 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include "internal.h" + +#define _dlock_syscall_switch(err, syscall, ...) \ + for (;;) { \ + int err; \ + switch ((err = ((syscall) < 0 ? errno : 0))) { \ + case EINTR: continue; \ + __VA_ARGS__ \ + } \ + break; \ + } + +#if TARGET_OS_MAC +_Static_assert(DLOCK_LOCK_DATA_CONTENTION == ULF_WAIT_WORKQ_DATA_CONTENTION, + "values should be the same"); + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_thread_switch(dispatch_lock value, dispatch_lock_options_t flags, + uint32_t timeout) +{ + int option; + if (flags & DLOCK_LOCK_DATA_CONTENTION) { + option = SWITCH_OPTION_OSLOCK_DEPRESS; + } else { + option = SWITCH_OPTION_DEPRESS; + } + thread_switch(_dispatch_lock_owner(value), option, timeout); +} +#endif + +#pragma mark - ulock wrappers +#if HAVE_UL_COMPARE_AND_WAIT + +static int +_dispatch_ulock_wait(uint32_t *uaddr, uint32_t val, uint32_t timeout, + uint32_t flags) +{ + dispatch_assert(!DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK); + int rc; + _dlock_syscall_switch(err, + rc = __ulock_wait(UL_COMPARE_AND_WAIT | flags, uaddr, val, timeout), + case 0: return rc > 0 ? ENOTEMPTY : 0; + case ETIMEDOUT: case EFAULT: return err; + default: DISPATCH_INTERNAL_CRASH(err, "ulock_wait() failed"); + ); +} + +static void +_dispatch_ulock_wake(uint32_t *uaddr, uint32_t flags) +{ + dispatch_assert(!DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK); + _dlock_syscall_switch(err, + __ulock_wake(UL_COMPARE_AND_WAIT | flags, uaddr, 0), + case 0: case ENOENT: break; + default: DISPATCH_INTERNAL_CRASH(err, "ulock_wake() failed"); + ); +} + +#endif +#if HAVE_UL_UNFAIR_LOCK + +// returns 0, ETIMEDOUT, ENOTEMPTY, EFAULT +static int +_dispatch_unfair_lock_wait(uint32_t *uaddr, uint32_t val, uint32_t timeout, + dispatch_lock_options_t flags) +{ + if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { + // + timeout = timeout < 1000 ? 1 : timeout / 1000; + _dispatch_thread_switch(val, flags, timeout); + return 0; + } + int rc; + _dlock_syscall_switch(err, + rc = __ulock_wait(UL_UNFAIR_LOCK | flags, uaddr, val, timeout), + case 0: return rc > 0 ? ENOTEMPTY : 0; + case ETIMEDOUT: case EFAULT: return err; + default: DISPATCH_INTERNAL_CRASH(err, "ulock_wait() failed"); + ); +} + +static void +_dispatch_unfair_lock_wake(uint32_t *uaddr, uint32_t flags) +{ + if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { + // + return; + } + _dlock_syscall_switch(err, __ulock_wake(UL_UNFAIR_LOCK | flags, uaddr, 0), + case 0: case ENOENT: break; + default: DISPATCH_INTERNAL_CRASH(err, "ulock_wake() failed"); + ); +} + +#endif +#pragma mark - futex wrappers +#if HAVE_FUTEX +#include +#include + +DISPATCH_ALWAYS_INLINE +static inline int +_dispatch_futex(uint32_t *uaddr, int op, uint32_t val, + const struct timespec *timeout, uint32_t *uaddr2, uint32_t val3, + int opflags) +{ + return syscall(SYS_futex, uaddr, op | opflags, val, timeout, uaddr2, val3); +} + +static int +_dispatch_futex_wait(uint32_t *uaddr, uint32_t val, + const struct timespec *timeout, int opflags) +{ + _dlock_syscall_switch(err, + _dispatch_futex(uaddr, FUTEX_WAIT, val, timeout, NULL, 0, opflags), + case 0: case EWOULDBLOCK: case ETIMEDOUT: return err; + default: DISPATCH_CLIENT_CRASH(err, "futex_wait() failed"); + ); +} + +static void +_dispatch_futex_wake(uint32_t *uaddr, int wake, int opflags) +{ + int rc; + _dlock_syscall_switch(err, + rc = _dispatch_futex(uaddr, FUTEX_WAKE, wake, NULL, NULL, 0, opflags), + case 0: return; + default: DISPATCH_CLIENT_CRASH(err, "futex_wake() failed"); + ); +} + +static void +_dispatch_futex_lock_pi(uint32_t *uaddr, struct timespec *timeout, int detect, + int opflags) +{ + _dlock_syscall_switch(err, + _dispatch_futex(uaddr, FUTEX_LOCK_PI, detect, timeout, + NULL, 0, opflags), + case 0: return; + default: DISPATCH_CLIENT_CRASH(errno, "futex_lock_pi() failed"); + ); +} + +static void +_dispatch_futex_unlock_pi(uint32_t *uaddr, int opflags) +{ + _dlock_syscall_switch(err, + _dispatch_futex(uaddr, FUTEX_UNLOCK_PI, 0, NULL, NULL, 0, opflags), + case 0: return; + default: DISPATCH_CLIENT_CRASH(errno, "futex_unlock_pi() failed"); + ); +} + +#endif +#pragma mark - wait for address + +void +_dispatch_wait_on_address(uint32_t volatile *address, uint32_t value, + dispatch_lock_options_t flags) +{ +#if HAVE_UL_COMPARE_AND_WAIT + _dispatch_ulock_wait((uint32_t *)address, value, 0, flags); +#elif HAVE_FUTEX + _dispatch_futex_wait((uint32_t *)address, value, NULL, FUTEX_PRIVATE_FLAG); +#else + mach_msg_timeout_t timeout = 1; + while (os_atomic_load(address, relaxed) == value) { + thread_switch(MACH_PORT_NULL, SWITCH_OPTION_WAIT, timeout++); + } +#endif + (void)flags; +} + +void +_dispatch_wake_by_address(uint32_t volatile *address) +{ +#if HAVE_UL_COMPARE_AND_WAIT + _dispatch_ulock_wake((uint32_t *)address, ULF_WAKE_ALL); +#elif HAVE_FUTEX + _dispatch_futex_wake((uint32_t *)address, INT_MAX, FUTEX_PRIVATE_FLAG); +#else + (void)address; +#endif +} + +#pragma mark - thread event + +#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK +semaphore_t +_dispatch_thread_semaphore_create(void) +{ + semaphore_t s4; + kern_return_t kr; + while (unlikely(kr = semaphore_create(mach_task_self(), &s4, + SYNC_POLICY_FIFO, 0))) { + DISPATCH_VERIFY_MIG(kr); + _dispatch_temporary_resource_shortage(); + } + return s4; +} + +void +_dispatch_thread_semaphore_dispose(void *ctxt) +{ + semaphore_t s4 = (semaphore_t)(uintptr_t)ctxt; + kern_return_t kr = semaphore_destroy(mach_task_self(), s4); + DISPATCH_VERIFY_MIG(kr); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); +} +#endif + +void +_dispatch_thread_event_signal_slow(dispatch_thread_event_t dte) +{ +#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK + if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { + kern_return_t kr = semaphore_signal(dte->dte_semaphore); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); + return; + } +#endif +#if HAVE_UL_COMPARE_AND_WAIT + _dispatch_ulock_wake(&dte->dte_value, 0); +#elif HAVE_FUTEX + _dispatch_futex_wake(&dte->dte_value, 1, FUTEX_PRIVATE_FLAG); +#elif USE_POSIX_SEM + int rc = sem_post(&dte->dte_sem); + DISPATCH_SEMAPHORE_VERIFY_RET(ret); +#endif +} + +void +_dispatch_thread_event_wait_slow(dispatch_thread_event_t dte) +{ +#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK + if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { + kern_return_t kr; + do { + kr = semaphore_wait(dte->dte_semaphore); + } while (unlikely(kr == KERN_ABORTED)); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); + return; + } +#endif +#if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX + for (;;) { + uint32_t value = os_atomic_load(&dte->dte_value, acquire); + if (likely(value == 0)) return; + if (unlikely(value != UINT32_MAX)) { + DISPATCH_CLIENT_CRASH(value, "Corrupt thread event value"); + } +#if HAVE_UL_COMPARE_AND_WAIT + int rc = _dispatch_ulock_wait(&dte->dte_value, UINT32_MAX, 0, 0); + dispatch_assert(rc == 0 || rc == EFAULT); +#elif HAVE_FUTEX + _dispatch_futex_wait(&dte->dte_value, UINT32_MAX, + NULL, FUTEX_PRIVATE_FLAG); +#endif + } +#elif USE_POSIX_SEM + int rc; + do { + rc = sem_wait(&dte->dte_sem); + } while (unlikely(rc != 0)); + DISPATCH_SEMAPHORE_VERIFY_RET(rc); +#endif +} + +#pragma mark - unfair lock + +#if HAVE_UL_UNFAIR_LOCK +void +_dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t dul, + dispatch_lock_options_t flags) +{ + dispatch_lock tid_self = _dispatch_tid_self(), next = tid_self; + dispatch_lock tid_old, tid_new; + int rc; + + for (;;) { + os_atomic_rmw_loop(&dul->dul_lock, tid_old, tid_new, acquire, { + if (likely(!_dispatch_lock_is_locked(tid_old))) { + tid_new = next; + } else { + tid_new = tid_old & ~DLOCK_NOWAITERS_BIT; + if (tid_new == tid_old) os_atomic_rmw_loop_give_up(break); + } + }); + if (unlikely(_dispatch_lock_is_locked_by(tid_old, tid_self))) { + DISPATCH_CLIENT_CRASH(0, "trying to lock recursively"); + } + if (tid_new == next) { + return; + } + rc = _dispatch_unfair_lock_wait(&dul->dul_lock, tid_new, 0, flags); + if (rc == ENOTEMPTY) { + next = tid_self & ~DLOCK_NOWAITERS_BIT; + } else { + next = tid_self; + } + } +} +#elif HAVE_FUTEX +void +_dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t dul, + dispatch_lock_options_t flags) +{ + (void)flags; + _dispatch_futex_lock_pi(&dul->dul_lock, NULL, 1, FUTEX_PRIVATE_FLAG); +} +#else +void +_dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t dul, + dispatch_lock_options_t flags) +{ + dispatch_lock tid_cur, tid_self = _dispatch_tid_self(); + uint32_t timeout = 1; + + while (unlikely(!os_atomic_cmpxchgv(&dul->dul_lock, + DLOCK_OWNER_NULL, tid_self, &tid_cur, acquire))) { + if (unlikely(_dispatch_lock_is_locked_by(tid_cur, tid_self))) { + DISPATCH_CLIENT_CRASH(0, "trying to lock recursively"); + } + _dispatch_thread_switch(tid_cur, flags, timeout++); + } +} +#endif + +void +_dispatch_unfair_lock_unlock_slow(dispatch_unfair_lock_t dul, + dispatch_lock tid_cur) +{ + dispatch_lock_owner tid_self = _dispatch_tid_self(); + if (unlikely(!_dispatch_lock_is_locked_by(tid_cur, tid_self))) { + DISPATCH_CLIENT_CRASH(tid_cur, "lock not owned by current thread"); + } + +#if HAVE_UL_UNFAIR_LOCK + if (!(tid_cur & DLOCK_NOWAITERS_BIT)) { + _dispatch_unfair_lock_wake(&dul->dul_lock, 0); + } +#elif HAVE_FUTEX + // futex_unlock_pi() handles both OWNER_DIED which we abuse & WAITERS + _dispatch_futex_unlock_pi(&dul->dul_lock, FUTEX_PRIVATE_FLAG); +#else + (void)dul; +#endif +} + +#pragma mark - gate lock + +void +_dispatch_gate_wait_slow(dispatch_gate_t dgl, dispatch_lock value, + dispatch_lock_options_t flags) +{ + dispatch_lock tid_self = _dispatch_tid_self(), tid_old, tid_new; + uint32_t timeout = 1; + + for (;;) { + os_atomic_rmw_loop(&dgl->dgl_lock, tid_old, tid_new, acquire, { + if (likely(tid_old == value)) { + os_atomic_rmw_loop_give_up_with_fence(acquire, return); + } +#ifdef DLOCK_NOWAITERS_BIT + tid_new = tid_old & ~DLOCK_NOWAITERS_BIT; +#else + tid_new = tid_old | DLOCK_WAITERS_BIT; +#endif + if (tid_new == tid_old) os_atomic_rmw_loop_give_up(break); + }); + if (unlikely(_dispatch_lock_is_locked_by(tid_old, tid_self))) { + DISPATCH_CLIENT_CRASH(0, "trying to lock recursively"); + } +#if HAVE_UL_UNFAIR_LOCK + _dispatch_unfair_lock_wait(&dgl->dgl_lock, tid_new, 0, flags); +#elif HAVE_FUTEX + _dispatch_futex_wait(&dgl->dgl_lock, tid_new, NULL, FUTEX_PRIVATE_FLAG); +#else + _dispatch_thread_switch(tid_new, flags, timeout++); +#endif + (void)timeout; + } +} + +void +_dispatch_gate_broadcast_slow(dispatch_gate_t dgl, dispatch_lock tid_cur) +{ + dispatch_lock_owner tid_self = _dispatch_tid_self(); + if (unlikely(!_dispatch_lock_is_locked_by(tid_cur, tid_self))) { + DISPATCH_CLIENT_CRASH(tid_cur, "lock not owned by current thread"); + } + +#if HAVE_UL_UNFAIR_LOCK + _dispatch_unfair_lock_wake(&dgl->dgl_lock, ULF_WAKE_ALL); +#elif HAVE_FUTEX + _dispatch_futex_wake(&dgl->dgl_lock, INT_MAX, FUTEX_PRIVATE_FLAG); +#else + (void)dgl; +#endif +} diff --git a/src/shims/lock.h b/src/shims/lock.h new file mode 100644 index 000000000..246c80738 --- /dev/null +++ b/src/shims/lock.h @@ -0,0 +1,539 @@ +/* + * Copyright (c) 2016 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_SHIMS_LOCK__ +#define __DISPATCH_SHIMS_LOCK__ + +#pragma mark - platform macros + +DISPATCH_ENUM(dispatch_lock_options, uint32_t, + DLOCK_LOCK_NONE = 0x00000000, + DLOCK_LOCK_DATA_CONTENTION = 0x00010000, +); + +#if TARGET_OS_MAC + +typedef mach_port_t dispatch_lock_owner; +typedef uint32_t dispatch_lock; + +#define DLOCK_OWNER_NULL ((dispatch_lock_owner)MACH_PORT_NULL) +#define DLOCK_OWNER_MASK ((dispatch_lock)0xfffffffc) +#define DLOCK_NOWAITERS_BIT ((dispatch_lock)0x00000001) +#define DLOCK_NOFAILED_TRYLOCK_BIT ((dispatch_lock)0x00000002) +#define _dispatch_tid_self() ((dispatch_lock_owner)_dispatch_thread_port()) + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_lock_is_locked(dispatch_lock lock_value) +{ + return (lock_value & DLOCK_OWNER_MASK) != 0; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_lock_owner +_dispatch_lock_owner(dispatch_lock lock_value) +{ + lock_value &= DLOCK_OWNER_MASK; + if (lock_value) { + lock_value |= DLOCK_NOWAITERS_BIT | DLOCK_NOFAILED_TRYLOCK_BIT; + } + return lock_value; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_lock_is_locked_by(dispatch_lock lock_value, dispatch_lock_owner tid) +{ + // equivalent to _dispatch_lock_owner(lock_value) == tid + return ((lock_value ^ tid) & DLOCK_OWNER_MASK) == 0; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_lock_has_waiters(dispatch_lock lock_value) +{ + bool nowaiters_bit = (lock_value & DLOCK_NOWAITERS_BIT); + return _dispatch_lock_is_locked(lock_value) != nowaiters_bit; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_lock_has_failed_trylock(dispatch_lock lock_value) +{ + return !(lock_value & DLOCK_NOFAILED_TRYLOCK_BIT); +} + +#elif defined(__linux__) +#include +#include +#include /* For SYS_xxx definitions */ + +typedef uint32_t dispatch_lock; +typedef pid_t dispatch_lock_owner; + +#define DLOCK_OWNER_NULL ((dispatch_lock_owner)0) +#define DLOCK_OWNER_MASK ((dispatch_lock)FUTEX_TID_MASK) +#define DLOCK_WAITERS_BIT ((dispatch_lock)FUTEX_WAITERS) +#define DLOCK_FAILED_TRYLOCK_BIT ((dispatch_lock)FUTEX_OWNER_DIED) +#define _dispatch_tid_self() \ + ((dispatch_lock_owner)(_dispatch_get_tsd_base()->tid)) + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_lock_is_locked(dispatch_lock lock_value) +{ + return (lock_value & DLOCK_OWNER_MASK) != 0; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_lock_owner +_dispatch_lock_owner(dispatch_lock lock_value) +{ + return (lock_value & DLOCK_OWNER_MASK); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_lock_is_locked_by(dispatch_lock lock_value, dispatch_lock_owner tid) +{ + return _dispatch_lock_owner(lock_value) == tid; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_lock_has_waiters(dispatch_lock lock_value) +{ + return (lock_value & DLOCK_WAITERS_BIT); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_lock_has_failed_trylock(dispatch_lock lock_value) +{ + return !(lock_value & DLOCK_FAILED_TRYLOCK_BIT); +} + +#else +# error define _dispatch_lock encoding scheme for your platform here +#endif + +#if __has_include() +#include +#endif + +#ifndef HAVE_UL_COMPARE_AND_WAIT +#if defined(UL_COMPARE_AND_WAIT) && DISPATCH_HOST_SUPPORTS_OSX(101200) +# define HAVE_UL_COMPARE_AND_WAIT 1 +#else +# define HAVE_UL_COMPARE_AND_WAIT 0 +#endif +#endif // HAVE_UL_COMPARE_AND_WAIT + +#ifndef HAVE_UL_UNFAIR_LOCK +#if defined(UL_UNFAIR_LOCK) && DISPATCH_HOST_SUPPORTS_OSX(101200) +# define HAVE_UL_UNFAIR_LOCK 1 +#else +# define HAVE_UL_UNFAIR_LOCK 0 +#endif +#endif // HAVE_UL_UNFAIR_LOCK + +#ifndef DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK +#define DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK (!HAVE_UL_COMPARE_AND_WAIT && !HAVE_FUTEX) +#endif + +#ifndef HAVE_FUTEX +#ifdef __linux__ +#define HAVE_FUTEX 1 +#else +#define HAVE_FUTEX 0 +#endif +#endif // HAVE_FUTEX + +#if USE_MACH_SEM +#define DISPATCH_SEMAPHORE_VERIFY_KR(x) do { \ + if (unlikely((x) == KERN_INVALID_NAME)) { \ + DISPATCH_CLIENT_CRASH((x), "Use-after-free of dispatch_semaphore_t"); \ + } else if (unlikely(x)) { \ + DISPATCH_INTERNAL_CRASH((x), "mach semaphore API failure"); \ + } \ + } while (0) +#define DISPATCH_GROUP_VERIFY_KR(x) do { \ + if (unlikely((x) == KERN_INVALID_NAME)) { \ + DISPATCH_CLIENT_CRASH((x), "Use-after-free of dispatch_group_t"); \ + } else if (unlikely(x)) { \ + DISPATCH_INTERNAL_CRASH((x), "mach semaphore API failure"); \ + } \ + } while (0) +#elif USE_POSIX_SEM +#define DISPATCH_SEMAPHORE_VERIFY_RET(x) do { \ + if (unlikely((x) == -1)) { \ + DISPATCH_INTERNAL_CRASH(errno, "POSIX semaphore API failure"); \ + } \ + } while (0) +#endif + +#pragma mark - compare and wait + +DISPATCH_NOT_TAIL_CALLED +void _dispatch_wait_on_address(uint32_t volatile *address, uint32_t value, + dispatch_lock_options_t flags); +void _dispatch_wake_by_address(uint32_t volatile *address); + +#pragma mark - thread event +/** + * @typedef dispatch_thread_event_t + * + * @abstract + * Dispatch Thread Events are used for one-time synchronization between threads. + * + * @discussion + * Dispatch Thread Events are cheap synchronization points used when a thread + * needs to block until a certain event has happened. Dispatch Thread Event + * must be initialized and destroyed with _dispatch_thread_event_init() and + * _dispatch_thread_event_destroy(). + * + * A Dispatch Thread Event must be waited on and signaled exactly once between + * initialization and destruction. These objects are simpler than semaphores + * and do not support being signaled and waited on an arbitrary number of times. + * + * This locking primitive has no notion of ownership + */ +typedef struct dispatch_thread_event_s { +#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK + union { + semaphore_t dte_semaphore; + uint32_t dte_value; + }; +#elif HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX + // 1 means signalled but not waited on yet + // UINT32_MAX means waited on, but not signalled yet + // 0 is the initial and final state + uint32_t dte_value; +#elif USE_POSIX_SEM + sem_t dte_sem; +#else +# error define dispatch_thread_event_s for your platform +#endif +} dispatch_thread_event_s, *dispatch_thread_event_t; + +#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK +semaphore_t _dispatch_thread_semaphore_create(void); +void _dispatch_thread_semaphore_dispose(void *); + +DISPATCH_ALWAYS_INLINE +static inline semaphore_t +_dispatch_get_thread_semaphore(void) +{ + semaphore_t sema = (semaphore_t)(uintptr_t) + _dispatch_thread_getspecific(dispatch_sema4_key); + if (unlikely(!sema)) { + return _dispatch_thread_semaphore_create(); + } + _dispatch_thread_setspecific(dispatch_sema4_key, NULL); + return sema; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_put_thread_semaphore(semaphore_t sema) +{ + semaphore_t old_sema = (semaphore_t)(uintptr_t) + _dispatch_thread_getspecific(dispatch_sema4_key); + _dispatch_thread_setspecific(dispatch_sema4_key, (void*)(uintptr_t)sema); + if (unlikely(old_sema)) { + return _dispatch_thread_semaphore_dispose((void *)(uintptr_t)old_sema); + } +} +#endif + +DISPATCH_NOT_TAIL_CALLED +void _dispatch_thread_event_wait_slow(dispatch_thread_event_t); +void _dispatch_thread_event_signal_slow(dispatch_thread_event_t); + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_thread_event_init(dispatch_thread_event_t dte) +{ +#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK + if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { + dte->dte_semaphore = _dispatch_get_thread_semaphore(); + return; + } +#endif +#if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX + dte->dte_value = 0; +#elif USE_POSIX_SEM + int rc = sem_init(&dte->dte_sem, 0, 0); + DISPATCH_SEMAPHORE_VERIFY_RET(rc); +#endif +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_thread_event_signal(dispatch_thread_event_t dte) +{ +#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK + if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { + _dispatch_thread_event_signal_slow(dte); + return; + } +#endif +#if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX + if (os_atomic_inc_orig(&dte->dte_value, release) == 0) { + // 0 -> 1 transition doesn't need a signal + // force a wake even when the value is corrupt, + // waiters do the validation + return; + } +#elif USE_POSIX_SEM + // fallthrough +#endif + _dispatch_thread_event_signal_slow(dte); +} + + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_thread_event_wait(dispatch_thread_event_t dte) +{ +#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK + if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { + _dispatch_thread_event_wait_slow(dte); + return; + } +#endif +#if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX + if (os_atomic_dec(&dte->dte_value, acquire) == 0) { + // 1 -> 0 is always a valid transition, so we can return + // for any other value, go to the slowpath which checks it's not corrupt + return; + } +#elif USE_POSIX_SEM + // fallthrough +#endif + _dispatch_thread_event_wait_slow(dte); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_thread_event_destroy(dispatch_thread_event_t dte) +{ +#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK + if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { + _dispatch_put_thread_semaphore(dte->dte_semaphore); + return; + } +#endif +#if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX + // nothing to do + dispatch_assert(dte->dte_value == 0); +#elif USE_POSIX_SEM + int rc = sem_destroy(&dte->dte_sem); + DISPATCH_SEMAPHORE_VERIFY_RET(rc); +#endif +} + +#pragma mark - unfair lock + +typedef struct dispatch_unfair_lock_s { + dispatch_lock dul_lock; +} dispatch_unfair_lock_s, *dispatch_unfair_lock_t; + +DISPATCH_NOT_TAIL_CALLED +void _dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t l, + dispatch_lock_options_t options); +void _dispatch_unfair_lock_unlock_slow(dispatch_unfair_lock_t l, + dispatch_lock tid_cur); + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_unfair_lock_lock(dispatch_unfair_lock_t l) +{ + dispatch_lock tid_self = _dispatch_tid_self(); + if (likely(os_atomic_cmpxchg(&l->dul_lock, + DLOCK_OWNER_NULL, tid_self, acquire))) { + return; + } + return _dispatch_unfair_lock_lock_slow(l, DLOCK_LOCK_NONE); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_unfair_lock_trylock(dispatch_unfair_lock_t l, + dispatch_lock_owner *owner) +{ + dispatch_lock tid_old, tid_new, tid_self = _dispatch_tid_self(); + + os_atomic_rmw_loop(&l->dul_lock, tid_old, tid_new, acquire, { + if (likely(!_dispatch_lock_is_locked(tid_old))) { + tid_new = tid_self; + } else { +#ifdef DLOCK_NOFAILED_TRYLOCK_BIT + tid_new = tid_old & ~DLOCK_NOFAILED_TRYLOCK_BIT; +#else + tid_new = tid_old | DLOCK_FAILED_TRYLOCK_BIT; +#endif + } + }); + if (owner) *owner = _dispatch_lock_owner(tid_new); + return !_dispatch_lock_is_locked(tid_old); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_unfair_lock_tryunlock(dispatch_unfair_lock_t l) +{ + dispatch_lock tid_old, tid_new; + + os_atomic_rmw_loop(&l->dul_lock, tid_old, tid_new, release, { +#ifdef DLOCK_NOFAILED_TRYLOCK_BIT + if (likely(tid_old & DLOCK_NOFAILED_TRYLOCK_BIT)) { + tid_new = DLOCK_OWNER_NULL; + } else { + tid_new = tid_old | DLOCK_NOFAILED_TRYLOCK_BIT; + } +#else + if (likely(!(tid_old & DLOCK_FAILED_TRYLOCK_BIT))) { + tid_new = DLOCK_OWNER_NULL; + } else { + tid_new = tid_old & ~DLOCK_FAILED_TRYLOCK_BIT; + } +#endif + }); + if (unlikely(tid_new)) { + // unlock failed, renew the lock, which needs an acquire barrier + os_atomic_thread_fence(acquire); + return false; + } + if (unlikely(_dispatch_lock_has_waiters(tid_old))) { + _dispatch_unfair_lock_unlock_slow(l, tid_old); + } + return true; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_unfair_lock_unlock_had_failed_trylock(dispatch_unfair_lock_t l) +{ + dispatch_lock tid_cur, tid_self = _dispatch_tid_self(); +#if HAVE_FUTEX + if (likely(os_atomic_cmpxchgv(&l->dul_lock, + tid_self, DLOCK_OWNER_NULL, &tid_cur, release))) { + return false; + } +#else + tid_cur = os_atomic_xchg(&l->dul_lock, DLOCK_OWNER_NULL, release); + if (likely(tid_cur == tid_self)) return false; +#endif + _dispatch_unfair_lock_unlock_slow(l, tid_cur); + return _dispatch_lock_has_failed_trylock(tid_cur); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_unfair_lock_unlock(dispatch_unfair_lock_t l) +{ + (void)_dispatch_unfair_lock_unlock_had_failed_trylock(l); +} + +#pragma mark - gate lock + +#if HAVE_UL_UNFAIR_LOCK || HAVE_FUTEX +#define DISPATCH_GATE_USE_FOR_DISPATCH_ONCE 1 +#else +#define DISPATCH_GATE_USE_FOR_DISPATCH_ONCE 0 +#endif + +#define DLOCK_GATE_UNLOCKED ((dispatch_lock)0) + +#define DLOCK_ONCE_UNLOCKED ((dispatch_once_t)0) +#define DLOCK_ONCE_DONE (~(dispatch_once_t)0) + +typedef struct dispatch_gate_s { + dispatch_lock dgl_lock; +} dispatch_gate_s, *dispatch_gate_t; + +typedef struct dispatch_once_gate_s { + union { + dispatch_gate_s dgo_gate; + dispatch_once_t dgo_once; + }; +} dispatch_once_gate_s, *dispatch_once_gate_t; + +DISPATCH_NOT_TAIL_CALLED +void _dispatch_gate_wait_slow(dispatch_gate_t l, dispatch_lock value, + uint32_t flags); +void _dispatch_gate_broadcast_slow(dispatch_gate_t l, dispatch_lock tid_cur); + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_gate_tryenter(dispatch_gate_t l) +{ + dispatch_lock tid_self = _dispatch_tid_self(); + return likely(os_atomic_cmpxchg(&l->dgl_lock, + DLOCK_GATE_UNLOCKED, tid_self, acquire)); +} + +#define _dispatch_gate_wait(l, flags) \ + _dispatch_gate_wait_slow(l, DLOCK_GATE_UNLOCKED, flags) + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_gate_broadcast(dispatch_gate_t l) +{ + dispatch_lock tid_cur, tid_self = _dispatch_tid_self(); + tid_cur = os_atomic_xchg(&l->dgl_lock, DLOCK_GATE_UNLOCKED, release); + if (likely(tid_cur == tid_self)) return; + _dispatch_gate_broadcast_slow(l, tid_cur); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_once_gate_tryenter(dispatch_once_gate_t l) +{ + dispatch_once_t tid_self = (dispatch_once_t)_dispatch_tid_self(); + return likely(os_atomic_cmpxchg(&l->dgo_once, + DLOCK_ONCE_UNLOCKED, tid_self, acquire)); +} + +#define _dispatch_once_gate_wait(l) \ + _dispatch_gate_wait_slow(&(l)->dgo_gate, (dispatch_lock)DLOCK_ONCE_DONE, \ + DLOCK_LOCK_NONE) + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_once_gate_broadcast(dispatch_once_gate_t l) +{ + dispatch_once_t tid_cur, tid_self = (dispatch_once_t)_dispatch_tid_self(); + // see once.c for explanation about this trick + os_atomic_maximally_synchronizing_barrier(); + // above assumed to contain release barrier + tid_cur = os_atomic_xchg(&l->dgo_once, DLOCK_ONCE_DONE, relaxed); + if (likely(tid_cur == tid_self)) return; + _dispatch_gate_broadcast_slow(&l->dgo_gate, (dispatch_lock)tid_cur); +} + +#endif // __DISPATCH_SHIMS_LOCK__ diff --git a/src/shims/time.h b/src/shims/time.h index 7b34bc7af..7b297711c 100644 --- a/src/shims/time.h +++ b/src/shims/time.h @@ -132,5 +132,10 @@ _dispatch_absolute_time(void) #endif // HAVE_MACH_ABSOLUTE_TIME } +static inline uint64_t +_dispatch_approximate_time(void) +{ + return _dispatch_absolute_time(); +} #endif // __DISPATCH_SHIMS_TIME__ diff --git a/src/shims/tsd.h b/src/shims/tsd.h index cf5238512..2e3ece8b0 100644 --- a/src/shims/tsd.h +++ b/src/shims/tsd.h @@ -39,32 +39,51 @@ #if __has_include() #include #endif + +#if !defined(OS_GS_RELATIVE) && (defined(__i386__) || defined(__x86_64__)) +#define OS_GS_RELATIVE __attribute__((address_space(256))) +#endif + +#ifdef _os_tsd_get_base +#ifdef OS_GS_RELATIVE +typedef long dispatch_tsd_pair_t \ + __attribute__((vector_size(sizeof(long) * 2), aligned(sizeof(long)))); +#define _os_tsd_get_pair_address(k) \ + (dispatch_tsd_pair_t OS_GS_RELATIVE *)((k) * sizeof(long)) +#else +typedef struct { void *a; void *b; } dispatch_tsd_pair_t; +#define _os_tsd_get_pair_address(k) \ + (dispatch_tsd_pair_t *)(_os_tsd_get_base() + (k)) +#endif +#endif // _os_tsd_get_base #endif #if DISPATCH_USE_DIRECT_TSD +// dispatch_queue_key & dispatch_frame_key need to be contiguous +// in that order, and queue_key to be an even number static const unsigned long dispatch_queue_key = __PTK_LIBDISPATCH_KEY0; -static const unsigned long dispatch_voucher_key = __PTK_LIBDISPATCH_KEY1; +static const unsigned long dispatch_frame_key = __PTK_LIBDISPATCH_KEY1; static const unsigned long dispatch_cache_key = __PTK_LIBDISPATCH_KEY2; -static const unsigned long dispatch_io_key = __PTK_LIBDISPATCH_KEY3; -static const unsigned long dispatch_apply_key = __PTK_LIBDISPATCH_KEY4; +static const unsigned long dispatch_context_key = __PTK_LIBDISPATCH_KEY3; +static const unsigned long dispatch_pthread_root_queue_observer_hooks_key = + __PTK_LIBDISPATCH_KEY4; static const unsigned long dispatch_defaultpriority_key =__PTK_LIBDISPATCH_KEY5; #if DISPATCH_INTROSPECTION static const unsigned long dispatch_introspection_key = __PTK_LIBDISPATCH_KEY6; #elif DISPATCH_PERF_MON static const unsigned long dispatch_bcounter_key = __PTK_LIBDISPATCH_KEY6; #endif -#if DISPATCH_USE_OS_SEMAPHORE_CACHE -static const unsigned long dispatch_sema4_key = __TSD_SEMAPHORE_CACHE; -#else static const unsigned long dispatch_sema4_key = __PTK_LIBDISPATCH_KEY7; -#endif -static const unsigned long dispatch_pthread_root_queue_observer_hooks_key = - __PTK_LIBDISPATCH_KEY8; #ifndef __TSD_THREAD_QOS_CLASS #define __TSD_THREAD_QOS_CLASS 4 #endif +#ifndef __TSD_THREAD_VOUCHER +#define __TSD_THREAD_VOUCHER 6 +#endif static const unsigned long dispatch_priority_key = __TSD_THREAD_QOS_CLASS; +static const unsigned long dispatch_voucher_key = __PTK_LIBDISPATCH_KEY8; +static const unsigned long dispatch_deferred_items_key = __PTK_LIBDISPATCH_KEY9; DISPATCH_TSD_INLINE static inline void @@ -73,24 +92,89 @@ _dispatch_thread_key_create(const unsigned long *k, void (*d)(void *)) if (!*k || !d) return; dispatch_assert_zero(pthread_key_init_np((int)*k, d)); } +#elif DISPATCH_USE_THREAD_LOCAL_STORAGE + +DISPATCH_TSD_INLINE +static inline void +_dispatch_thread_key_create(pthread_key_t *k, void (*d)(void *)) +{ + dispatch_assert_zero(pthread_key_create(k, d)); +} + +struct dispatch_tsd { + pid_t tid; + void *dispatch_queue_key; + void *dispatch_frame_key; + void *dispatch_cache_key; + void *dispatch_context_key; + void *dispatch_pthread_root_queue_observer_hooks_key; + void *dispatch_defaultpriority_key; +#if DISPATCH_INTROSPECTION + void *dispatch_introspection_key; +#elif DISPATCH_PERF_MON + void *dispatch_bcounter_key; +#endif +#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK + void *dispatch_sema4_key; +#endif + void *dispatch_priority_key; + void *dispatch_voucher_key; + void *dispatch_deferred_items_key; +}; + +extern __thread struct dispatch_tsd __dispatch_tsd; +extern pthread_key_t __dispatch_tsd_key; +extern void libdispatch_tsd_init(void); +extern void _libdispatch_tsd_cleanup(void *ctx); + +DISPATCH_ALWAYS_INLINE +static inline struct dispatch_tsd * +_dispatch_get_tsd_base(void) +{ + if (unlikely(__dispatch_tsd.tid == 0)) { + libdispatch_tsd_init(); + } + OS_COMPILER_CAN_ASSUME(__dispatch_tsd.tid != 0); + return &__dispatch_tsd; +} + +#define _dispatch_thread_getspecific(key) \ + (_dispatch_get_tsd_base()->key) +#define _dispatch_thread_setspecific(key, value) \ + (void)(_dispatch_get_tsd_base()->key = (value)) + +#define _dispatch_thread_getspecific_pair(k1, p1, k2, p2) \ + ( *(p1) = _dispatch_thread_getspecific(k1), \ + *(p2) = _dispatch_thread_getspecific(k2) ) + +#define _dispatch_thread_getspecific_packed_pair(k1, k2, p) \ + ( (p)[0] = _dispatch_thread_getspecific(k1), \ + (p)[1] = _dispatch_thread_getspecific(k2) ) + +#define _dispatch_thread_setspecific_pair(k1, p1, k2, p2) \ + ( _dispatch_thread_setspecific(k1,p1), \ + _dispatch_thread_setspecific(k2,p2) ) + +#define _dispatch_thread_setspecific_packed_pair(k1, k2, p) \ + ( _dispatch_thread_setspecific(k1,(p)[0]), \ + _dispatch_thread_setspecific(k2,(p)[1]) ) + #else extern pthread_key_t dispatch_queue_key; -extern pthread_key_t dispatch_voucher_key; -#if DISPATCH_USE_OS_SEMAPHORE_CACHE -#error "Invalid DISPATCH_USE_OS_SEMAPHORE_CACHE configuration" -#else -extern pthread_key_t dispatch_sema4_key; -#endif +extern pthread_key_t dispatch_frame_key; extern pthread_key_t dispatch_cache_key; -extern pthread_key_t dispatch_io_key; -extern pthread_key_t dispatch_apply_key; +extern pthread_key_t dispatch_context_key; +extern pthread_key_t dispatch_pthread_root_queue_observer_hooks_key; extern pthread_key_t dispatch_defaultpriority_key; #if DISPATCH_INTROSPECTION extern pthread_key_t dispatch_introspection_key; #elif DISPATCH_PERF_MON extern pthread_key_t dispatch_bcounter_key; #endif -exern pthread_key_t dispatch_pthread_root_queue_observer_hooks_key; +extern pthread_key_t dispatch_sema4_key; +extern pthread_key_t dispatch_priority_key; +extern pthread_key_t dispatch_voucher_key; +extern pthread_key_t dispatch_deferred_items_key; DISPATCH_TSD_INLINE static inline void @@ -100,8 +184,7 @@ _dispatch_thread_key_create(pthread_key_t *k, void (*d)(void *)) } #endif -#if DISPATCH_USE_TSD_BASE && !DISPATCH_DEBUG -#else // DISPATCH_USE_TSD_BASE +#ifndef DISPATCH_USE_THREAD_LOCAL_STORAGE DISPATCH_TSD_INLINE static inline void _dispatch_thread_setspecific(pthread_key_t k, void *v) @@ -109,8 +192,14 @@ _dispatch_thread_setspecific(pthread_key_t k, void *v) #if DISPATCH_USE_DIRECT_TSD if (_pthread_has_direct_tsd()) { (void)_pthread_setspecific_direct(k, v); - return; + } else { +#if TARGET_IPHONE_SIMULATOR + (void)_pthread_setspecific_static(k, v); // rdar://26058142 +#else + __builtin_trap(); // unreachable +#endif } + return; #endif dispatch_assert_zero(pthread_setspecific(k, v)); } @@ -126,7 +215,70 @@ _dispatch_thread_getspecific(pthread_key_t k) #endif return pthread_getspecific(k); } -#endif // DISPATCH_USE_TSD_BASE + +// this is used when loading a pair at once and the caller will want to +// look at each component individually. +// some platforms can load a pair of pointers efficiently that way (like arm) +// intel doesn't, hence this degrades to two loads on intel +DISPATCH_TSD_INLINE +static inline void +_dispatch_thread_getspecific_pair(pthread_key_t k1, void **p1, + pthread_key_t k2, void **p2) +{ + *p1 = _dispatch_thread_getspecific(k1); + *p2 = _dispatch_thread_getspecific(k2); +} + +// this is used for save/restore purposes +// and the caller doesn't need to look at a specific component +// this does SSE on intel, and SSE is bad at breaking/assembling components +DISPATCH_TSD_INLINE +static inline void +_dispatch_thread_getspecific_packed_pair(pthread_key_t k1, pthread_key_t k2, + void **p) +{ +#if DISPATCH_USE_DIRECT_TSD && defined(_os_tsd_get_pair_address) + dispatch_assert(k2 == k1 + 1); + if (_pthread_has_direct_tsd()) { + *(dispatch_tsd_pair_t *)p = *_os_tsd_get_pair_address(k1); + return; + } +#endif + p[0] = _dispatch_thread_getspecific(k1); + p[1] = _dispatch_thread_getspecific(k2); +} + +// this is used when storing a pair at once from separated components +// some platforms can store a pair of pointers efficiently that way (like arm) +// intel doesn't, hence this degrades to two stores on intel +DISPATCH_TSD_INLINE +static inline void +_dispatch_thread_setspecific_pair(pthread_key_t k1, void *p1, + pthread_key_t k2, void *p2) +{ + _dispatch_thread_setspecific(k1, p1); + _dispatch_thread_setspecific(k2, p2); +} + +// this is used for save/restore purposes +// and the caller doesn't need to look at a specific component +// this does SSE on intel, and SSE is bad at breaking/assembling components +DISPATCH_TSD_INLINE +static inline void +_dispatch_thread_setspecific_packed_pair(pthread_key_t k1, pthread_key_t k2, + void **p) +{ +#if DISPATCH_USE_DIRECT_TSD && defined(_os_tsd_get_pair_address) + dispatch_assert(k2 == k1 + 1); + if (_pthread_has_direct_tsd()) { + *_os_tsd_get_pair_address(k1) = *(dispatch_tsd_pair_t *)p; + return; + } +#endif + _dispatch_thread_setspecific(k1, p[0]); + _dispatch_thread_setspecific(k2, p[1]); +} +#endif #if TARGET_OS_WIN32 #define _dispatch_thread_self() ((uintptr_t)GetCurrentThreadId()) @@ -141,22 +293,28 @@ _dispatch_thread_getspecific(pthread_key_t k) #if TARGET_OS_WIN32 #define _dispatch_thread_port() ((mach_port_t)0) -#else +#elif !DISPATCH_USE_THREAD_LOCAL_STORAGE #if DISPATCH_USE_DIRECT_TSD -#define _dispatch_thread_port() ((mach_port_t)_dispatch_thread_getspecific(\ - _PTHREAD_TSD_SLOT_MACH_THREAD_SELF)) +#define _dispatch_thread_port() ((mach_port_t)(uintptr_t)\ + _dispatch_thread_getspecific(_PTHREAD_TSD_SLOT_MACH_THREAD_SELF)) #else -#define _dispatch_thread_port() (pthread_mach_thread_np(_dispatch_thread_self())) +#define _dispatch_thread_port() pthread_mach_thread_np(_dispatch_thread_self()) #endif #endif +#if HAVE_MACH +#define _dispatch_get_thread_mig_reply_port() ((mach_port_t)(uintptr_t) \ + _dispatch_thread_getspecific(_PTHREAD_TSD_SLOT_MIG_REPLY)) +#define _dispatch_set_thread_mig_reply_port(p) ( \ + _dispatch_thread_setspecific(_PTHREAD_TSD_SLOT_MIG_REPLY, \ + (void*)(uintptr_t)(p))) +#endif + DISPATCH_TSD_INLINE DISPATCH_CONST static inline unsigned int _dispatch_cpu_number(void) { -#if TARGET_IPHONE_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090 - return 0; -#elif __has_include() +#if __has_include() return _os_cpu_number(); #elif defined(__x86_64__) || defined(__i386__) struct { uintptr_t p1, p2; } p; diff --git a/src/shims/yield.h b/src/shims/yield.h index 2a884d6a9..1850aeeed 100644 --- a/src/shims/yield.h +++ b/src/shims/yield.h @@ -33,7 +33,7 @@ #if DISPATCH_HW_CONFIG_UP #define _dispatch_wait_until(c) do { \ int _spins = 0; \ - while (!(c)) { \ + while (!fastpath(c)) { \ _spins++; \ _dispatch_preemption_yield(_spins); \ } } while (0) @@ -44,7 +44,7 @@ #endif #define _dispatch_wait_until(c) do { \ int _spins = -(DISPATCH_WAIT_SPINS); \ - while (!(c)) { \ + while (!fastpath(c)) { \ if (slowpath(_spins++ >= 0)) { \ _dispatch_preemption_yield(_spins); \ } else { \ @@ -53,7 +53,7 @@ } } while (0) #else #define _dispatch_wait_until(c) do { \ - while (!(c)) { \ + while (!fastpath(c)) { \ dispatch_hardware_pause(); \ } } while (0) #endif @@ -109,16 +109,18 @@ #pragma mark _dispatch_preemption_yield #if HAVE_MACH -#if defined(SWITCH_OPTION_OSLOCK_DEPRESS) && !(TARGET_IPHONE_SIMULATOR && \ - IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090) +#if defined(SWITCH_OPTION_OSLOCK_DEPRESS) #define DISPATCH_YIELD_THREAD_SWITCH_OPTION SWITCH_OPTION_OSLOCK_DEPRESS #else #define DISPATCH_YIELD_THREAD_SWITCH_OPTION SWITCH_OPTION_DEPRESS #endif -#define _dispatch_preemption_yield(n) _dispatch_thread_switch(MACH_PORT_NULL, \ +#define _dispatch_preemption_yield(n) thread_switch(MACH_PORT_NULL, \ + DISPATCH_YIELD_THREAD_SWITCH_OPTION, (mach_msg_timeout_t)(n)) +#define _dispatch_preemption_yield_to(th, n) thread_switch(th, \ DISPATCH_YIELD_THREAD_SWITCH_OPTION, (mach_msg_timeout_t)(n)) #else #define _dispatch_preemption_yield(n) pthread_yield_np() +#define _dispatch_preemption_yield_to(th, n) pthread_yield_np() #endif // HAVE_MACH #pragma mark - @@ -132,25 +134,15 @@ #endif #if HAVE_MACH -#if defined(SWITCH_OPTION_DISPATCH_CONTENTION) && !(TARGET_IPHONE_SIMULATOR && \ - IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090) -#define _dispatch_contention_usleep(u) _dispatch_thread_switch(MACH_PORT_NULL, \ +#if defined(SWITCH_OPTION_DISPATCH_CONTENTION) +#define _dispatch_contention_usleep(u) thread_switch(MACH_PORT_NULL, \ SWITCH_OPTION_DISPATCH_CONTENTION, (u)) #else -#define _dispatch_contention_usleep(u) _dispatch_thread_switch(MACH_PORT_NULL, \ +#define _dispatch_contention_usleep(u) thread_switch(MACH_PORT_NULL, \ SWITCH_OPTION_WAIT, (((u)-1)/1000)+1) #endif #else #define _dispatch_contention_usleep(u) usleep((u)) #endif // HAVE_MACH -#pragma mark - -#pragma mark _dispatch_thread_switch - -#if HAVE_MACH -#define _dispatch_thread_switch(thread_name, option, option_time) \ - thread_switch((thread_name), (option), (option_time)) - -#endif // HAVE_MACH - #endif // __DISPATCH_SHIMS_YIELD__ diff --git a/src/source.c b/src/source.c index dde7db9af..a5a2c94a2 100644 --- a/src/source.c +++ b/src/source.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2013 Apple Inc. All rights reserved. + * Copyright (c) 2008-2016 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -26,13 +26,17 @@ #include #define DKEV_DISPOSE_IMMEDIATE_DELETE 0x1 -#define DKEV_DISPOSE_IGNORE_ENOENT 0x2 +#define DKEV_UNREGISTER_DISCONNECTED 0x2 +#define DKEV_UNREGISTER_REPLY_REMOVE 0x4 +#define DKEV_UNREGISTER_WAKEUP 0x8 +static void _dispatch_source_handler_free(dispatch_source_t ds, long kind); static void _dispatch_source_merge_kevent(dispatch_source_t ds, const _dispatch_kevent_qos_s *ke); -static bool _dispatch_kevent_register(dispatch_kevent_t *dkp, uint32_t *flgp); +static bool _dispatch_kevent_register(dispatch_kevent_t *dkp, + pthread_priority_t pp, uint32_t *flgp); static long _dispatch_kevent_unregister(dispatch_kevent_t dk, uint32_t flg, - int options); + unsigned int options); static long _dispatch_kevent_resume(dispatch_kevent_t dk, uint32_t new_flags, uint32_t del_flags); static void _dispatch_kevent_drain(_dispatch_kevent_qos_s *ke); @@ -49,33 +53,46 @@ static void _dispatch_timer_aggregates_unregister(dispatch_source_t ds, unsigned int tidx); static inline unsigned long _dispatch_source_timer_data( dispatch_source_refs_t dr, unsigned long prev); -static long _dispatch_kq_update(const _dispatch_kevent_qos_s *); -static void _dispatch_memorystatus_init(void); +static void _dispatch_kq_deferred_update(const _dispatch_kevent_qos_s *ke); +static long _dispatch_kq_immediate_update(_dispatch_kevent_qos_s *ke); +static void _dispatch_memorypressure_init(void); #if HAVE_MACH static void _dispatch_mach_host_calendar_change_register(void); +#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK static void _dispatch_mach_recv_msg_buf_init(void); static kern_return_t _dispatch_kevent_machport_resume(dispatch_kevent_t dk, uint32_t new_flags, uint32_t del_flags); +#endif static kern_return_t _dispatch_kevent_mach_notify_resume(dispatch_kevent_t dk, uint32_t new_flags, uint32_t del_flags); -static inline void _dispatch_kevent_mach_portset(_dispatch_kevent_qos_s *ke); +static void _dispatch_mach_kevent_merge(_dispatch_kevent_qos_s *ke); +static mach_msg_size_t _dispatch_kevent_mach_msg_size( + _dispatch_kevent_qos_s *ke); #else static inline void _dispatch_mach_host_calendar_change_register(void) {} static inline void _dispatch_mach_recv_msg_buf_init(void) {} #endif static const char * _evfiltstr(short filt); #if DISPATCH_DEBUG -static void _dispatch_kevent_debug(const _dispatch_kevent_qos_s* kev, - const char* str); +static void dispatch_kevent_debug(const char *verb, + const _dispatch_kevent_qos_s *kev, int i, int n, + const char *function, unsigned int line); static void _dispatch_kevent_debugger(void *context); #define DISPATCH_ASSERT_ON_MANAGER_QUEUE() \ dispatch_assert(_dispatch_queue_get_current() == &_dispatch_mgr_q) #else static inline void -_dispatch_kevent_debug(const _dispatch_kevent_qos_s* kev DISPATCH_UNUSED, - const char* str DISPATCH_UNUSED) {} +dispatch_kevent_debug(const char *verb, const _dispatch_kevent_qos_s *kev, + int i, int n, const char *function, unsigned int line) +{ + (void)verb; (void)kev; (void)i; (void)n; (void)function; (void)line; +} #define DISPATCH_ASSERT_ON_MANAGER_QUEUE() #endif +#define _dispatch_kevent_debug(verb, _kev) \ + dispatch_kevent_debug(verb, _kev, 0, 1, __FUNCTION__, __LINE__) +#define _dispatch_kevent_debug_n(verb, _kev, i, n) \ + dispatch_kevent_debug(verb, _kev, i, n, __FUNCTION__, __LINE__) #ifndef DISPATCH_MGR_QUEUE_DEBUG #define DISPATCH_MGR_QUEUE_DEBUG 0 #endif @@ -83,32 +100,47 @@ _dispatch_kevent_debug(const _dispatch_kevent_qos_s* kev DISPATCH_UNUSED, #define _dispatch_kevent_mgr_debug _dispatch_kevent_debug #else static inline void -_dispatch_kevent_mgr_debug(_dispatch_kevent_qos_s* kev DISPATCH_UNUSED, - const char* str DISPATCH_UNUSED) {} +_dispatch_kevent_mgr_debug(_dispatch_kevent_qos_s* kev DISPATCH_UNUSED) {} #endif #pragma mark - #pragma mark dispatch_source_t dispatch_source_t -dispatch_source_create(dispatch_source_type_t type, - uintptr_t handle, - unsigned long mask, - dispatch_queue_t dq) +dispatch_source_create(dispatch_source_type_t type, uintptr_t handle, + unsigned long mask, dispatch_queue_t dq) { + // ensure _dispatch_evfilt_machport_direct_enabled is initialized + _dispatch_root_queues_init(); const _dispatch_kevent_qos_s *proto_kev = &type->ke; dispatch_source_t ds; dispatch_kevent_t dk; // input validation if (type == NULL || (mask & ~type->mask)) { - return NULL; + return DISPATCH_BAD_INPUT; + } + if (type->mask && !mask) { + // expect a non-zero mask when the type declares one ... except + switch (type->ke.filter) { + case DISPATCH_EVFILT_TIMER: + break; // timers don't need masks +#if DISPATCH_USE_VM_PRESSURE + case EVFILT_VM: + break; // type->init forces the only acceptable mask +#endif + case DISPATCH_EVFILT_MACH_NOTIFICATION: + break; // type->init handles zero mask as a legacy case + default: + // otherwise reject as invalid input + return DISPATCH_BAD_INPUT; + } } switch (type->ke.filter) { case EVFILT_SIGNAL: if (handle >= NSIG) { - return NULL; + return DISPATCH_BAD_INPUT; } break; case EVFILT_FS: @@ -121,12 +153,12 @@ dispatch_source_create(dispatch_source_type_t type, case DISPATCH_EVFILT_CUSTOM_ADD: case DISPATCH_EVFILT_CUSTOM_OR: if (handle) { - return NULL; + return DISPATCH_BAD_INPUT; } break; case DISPATCH_EVFILT_TIMER: - if (!!handle ^ !!type->ke.ident) { - return NULL; + if ((handle == 0) != (type->ke.ident == 0)) { + return DISPATCH_BAD_INPUT; } break; default: @@ -136,42 +168,54 @@ dispatch_source_create(dispatch_source_type_t type, ds = _dispatch_alloc(DISPATCH_VTABLE(source), sizeof(struct dispatch_source_s)); // Initialize as a queue first, then override some settings below. - _dispatch_queue_init((dispatch_queue_t)ds); + _dispatch_queue_init(ds->_as_dq, DQF_NONE, 1, true); ds->dq_label = "source"; - ds->do_ref_cnt++; // the reference the manager queue holds - ds->do_ref_cnt++; // since source is created suspended - ds->do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_INTERVAL; - dk = _dispatch_calloc(1ul, sizeof(struct dispatch_kevent_s)); - dk->dk_kevent = *proto_kev; - dk->dk_kevent.ident = handle; - dk->dk_kevent.flags |= EV_ADD|EV_ENABLE; - dk->dk_kevent.fflags |= (uint32_t)mask; - dk->dk_kevent.udata = (uintptr_t)dk; - TAILQ_INIT(&dk->dk_sources); + switch (type->ke.filter) { + case DISPATCH_EVFILT_CUSTOM_OR: + dk = DISPATCH_KEV_CUSTOM_OR; + break; + case DISPATCH_EVFILT_CUSTOM_ADD: + dk = DISPATCH_KEV_CUSTOM_ADD; + break; + default: + dk = _dispatch_calloc(1ul, sizeof(struct dispatch_kevent_s)); + dk->dk_kevent = *proto_kev; + dk->dk_kevent.ident = handle; + dk->dk_kevent.flags |= EV_ADD|EV_ENABLE; + dk->dk_kevent.fflags |= (uint32_t)mask; + dk->dk_kevent.udata = (_dispatch_kevent_qos_udata_t)dk; + TAILQ_INIT(&dk->dk_sources); + ds->ds_pending_data_mask = dk->dk_kevent.fflags; + ds->ds_ident_hack = (uintptr_t)dk->dk_kevent.ident; + if (EV_UDATA_SPECIFIC & proto_kev->flags) { + dk->dk_kevent.flags |= EV_DISPATCH; + ds->ds_is_direct_kevent = true; + ds->ds_needs_rearm = true; + } + break; + } ds->ds_dkev = dk; - ds->ds_pending_data_mask = dk->dk_kevent.fflags; - ds->ds_ident_hack = (uintptr_t)dk->dk_kevent.ident; + if ((EV_DISPATCH|EV_ONESHOT) & proto_kev->flags) { - ds->ds_is_level = true; ds->ds_needs_rearm = true; } else if (!(EV_CLEAR & proto_kev->flags)) { // we cheat and use EV_CLEAR to mean a "flag thingy" ds->ds_is_adder = true; } - if (EV_UDATA_SPECIFIC & proto_kev->flags) { - dispatch_assert(!(EV_ONESHOT & proto_kev->flags)); - dk->dk_kevent.flags |= EV_DISPATCH; - ds->ds_is_direct_kevent = true; - ds->ds_needs_rearm = true; - } // Some sources require special processing if (type->init != NULL) { type->init(ds, type, handle, mask, dq); } dispatch_assert(!(ds->ds_is_level && ds->ds_is_adder)); + if (!ds->ds_is_custom_source && (dk->dk_kevent.flags & EV_VANISHED)) { + // see _dispatch_source_merge_kevent + dispatch_assert(!(dk->dk_kevent.flags & EV_ONESHOT)); + dispatch_assert(dk->dk_kevent.flags & EV_DISPATCH); + dispatch_assert(dk->dk_kevent.flags & EV_UDATA_SPECIFIC); + } if (fastpath(!ds->ds_refs)) { ds->ds_refs = _dispatch_calloc(1ul, @@ -179,69 +223,37 @@ dispatch_source_create(dispatch_source_type_t type, } ds->ds_refs->dr_source_wref = _dispatch_ptr2wref(ds); - if (!ds->ds_is_direct_kevent) { - // The initial target queue is the manager queue, in order to get - // the source installed. - ds->do_targetq = &_dispatch_mgr_q; - // First item on the queue sets the user-specified target queue - dispatch_set_target_queue(ds, dq); + if (slowpath(!dq)) { + dq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, true); } else { - if (slowpath(!dq)) { - dq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, true); - } else { - _dispatch_retain(dq); - } - ds->do_targetq = dq; - _dispatch_queue_priority_inherit_from_target((dispatch_queue_t)ds, dq); - _dispatch_queue_set_override_priority(dq); + _dispatch_retain(dq); } + ds->do_targetq = dq; _dispatch_object_debug(ds, "%s", __func__); return ds; } -DISPATCH_ALWAYS_INLINE -static inline dispatch_queue_t -_dispatch_source_get_kevent_queue(dispatch_source_t ds) -{ - if (ds->ds_is_direct_kevent) { - return ds->do_targetq; - } - return &_dispatch_mgr_q; -} - void _dispatch_source_dispose(dispatch_source_t ds) { _dispatch_object_debug(ds, "%s", __func__); + _dispatch_source_handler_free(ds, DS_REGISTN_HANDLER); + _dispatch_source_handler_free(ds, DS_EVENT_HANDLER); + _dispatch_source_handler_free(ds, DS_CANCEL_HANDLER); free(ds->ds_refs); - _dispatch_queue_destroy(ds); + _dispatch_queue_destroy(ds->_as_dq); } void _dispatch_source_xref_dispose(dispatch_source_t ds) { - _dispatch_wakeup(ds); -} - -void -dispatch_source_cancel(dispatch_source_t ds) -{ - _dispatch_object_debug(ds, "%s", __func__); - // Right after we set the cancel flag, someone else - // could potentially invoke the source, do the cancelation, - // unregister the source, and deallocate it. We would - // need to therefore retain/release before setting the bit - - _dispatch_retain(ds); - (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_CANCELED, relaxed); - _dispatch_wakeup(ds); - _dispatch_release(ds); + dx_wakeup(ds, 0, DISPATCH_WAKEUP_FLUSH); } long dispatch_source_testcancel(dispatch_source_t ds) { - return (bool)(ds->ds_atomic_flags & DSF_CANCELED); + return (bool)(ds->dq_atomic_flags & DSF_CANCELED); } unsigned long @@ -252,7 +264,7 @@ dispatch_source_get_mask(dispatch_source_t ds) mask = NOTE_VM_PRESSURE; } #if TARGET_IPHONE_SIMULATOR - else if (ds->ds_memorystatus_override) { + else if (ds->ds_memorypressure_override) { mask = NOTE_MEMORYSTATUS_PRESSURE_WARN; } #endif @@ -264,7 +276,7 @@ dispatch_source_get_handle(dispatch_source_t ds) { unsigned int handle = (unsigned int)ds->ds_ident_hack; #if TARGET_IPHONE_SIMULATOR - if (ds->ds_memorystatus_override) { + if (ds->ds_memorypressure_override) { handle = 0; } #endif @@ -279,118 +291,173 @@ dispatch_source_get_data(dispatch_source_t ds) data = NOTE_VM_PRESSURE; } #if TARGET_IPHONE_SIMULATOR - else if (ds->ds_memorystatus_override) { + else if (ds->ds_memorypressure_override) { data = NOTE_MEMORYSTATUS_PRESSURE_WARN; } #endif return data; } -void -dispatch_source_merge_data(dispatch_source_t ds, unsigned long val) +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_source_merge_data2(dispatch_source_t ds, + pthread_priority_t pp, unsigned long val) { _dispatch_kevent_qos_s kev = { .fflags = (typeof(kev.fflags))val, .data = (typeof(kev.data))val, +#if DISPATCH_USE_KEVENT_QOS + .qos = (_dispatch_kevent_priority_t)pp, +#endif }; +#if !DISPATCH_USE_KEVENT_QOS + (void)pp; +#endif - dispatch_assert( - ds->ds_dkev->dk_kevent.filter == DISPATCH_EVFILT_CUSTOM_ADD || - ds->ds_dkev->dk_kevent.filter == DISPATCH_EVFILT_CUSTOM_OR); - + dispatch_assert(ds->ds_dkev == DISPATCH_KEV_CUSTOM_OR || + ds->ds_dkev == DISPATCH_KEV_CUSTOM_ADD); + _dispatch_kevent_debug("synthetic data", &kev); _dispatch_source_merge_kevent(ds, &kev); } +void +dispatch_source_merge_data(dispatch_source_t ds, unsigned long val) +{ + _dispatch_source_merge_data2(ds, 0, val); +} + +void +_dispatch_source_merge_data(dispatch_source_t ds, pthread_priority_t pp, + unsigned long val) +{ + _dispatch_source_merge_data2(ds, pp, val); +} + #pragma mark - #pragma mark dispatch_source_handler DISPATCH_ALWAYS_INLINE static inline dispatch_continuation_t -_dispatch_source_handler_alloc(dispatch_source_t ds, void *handler, long kind, +_dispatch_source_get_handler(dispatch_source_refs_t dr, long kind) +{ + return os_atomic_load(&dr->ds_handler[kind], relaxed); +} +#define _dispatch_source_get_event_handler(dr) \ + _dispatch_source_get_handler(dr, DS_EVENT_HANDLER) +#define _dispatch_source_get_cancel_handler(dr) \ + _dispatch_source_get_handler(dr, DS_CANCEL_HANDLER) +#define _dispatch_source_get_registration_handler(dr) \ + _dispatch_source_get_handler(dr, DS_REGISTN_HANDLER) + +DISPATCH_ALWAYS_INLINE +static inline dispatch_continuation_t +_dispatch_source_handler_alloc(dispatch_source_t ds, void *func, long kind, bool block) { + // sources don't propagate priority by default + const dispatch_block_flags_t flags = + DISPATCH_BLOCK_HAS_PRIORITY | DISPATCH_BLOCK_NO_VOUCHER; dispatch_continuation_t dc = _dispatch_continuation_alloc(); - if (handler) { - dc->do_vtable = (void *)((block ? DISPATCH_OBJ_BLOCK_RELEASE_BIT : - DISPATCH_OBJ_CTXT_FETCH_BIT) | (kind != DS_EVENT_HANDLER ? - DISPATCH_OBJ_ASYNC_BIT : 0l)); - dc->dc_priority = 0; - dc->dc_voucher = NULL; + if (func) { + uintptr_t dc_flags = 0; + + if (kind != DS_EVENT_HANDLER) { + dc_flags |= DISPATCH_OBJ_CONSUME_BIT; + } if (block) { #ifdef __BLOCKS__ - if (slowpath(_dispatch_block_has_private_data(handler))) { - // sources don't propagate priority by default - dispatch_block_flags_t flags = DISPATCH_BLOCK_NO_QOS_CLASS; - flags |= _dispatch_block_get_flags(handler); - _dispatch_continuation_priority_set(dc, - _dispatch_block_get_priority(handler), flags); - } - if (kind != DS_EVENT_HANDLER) { - dc->dc_func = _dispatch_call_block_and_release; - } else { - dc->dc_func = _dispatch_Block_invoke(handler); - } - dc->dc_ctxt = _dispatch_Block_copy(handler); + _dispatch_continuation_init(dc, ds, func, 0, flags, dc_flags); #endif /* __BLOCKS__ */ } else { - dc->dc_func = handler; - dc->dc_ctxt = ds->do_ctxt; + dc_flags |= DISPATCH_OBJ_CTXT_FETCH_BIT; + _dispatch_continuation_init_f(dc, ds, ds->do_ctxt, func, + 0, flags, dc_flags); } - _dispatch_trace_continuation_push((dispatch_queue_t)ds, dc); + _dispatch_trace_continuation_push(ds->_as_dq, dc); } else { + dc->dc_flags = 0; dc->dc_func = NULL; } - dc->dc_data = (void*)kind; return dc; } -static inline void -_dispatch_source_handler_replace(dispatch_source_refs_t dr, long kind, - dispatch_continuation_t dc_new) +DISPATCH_NOINLINE +static void +_dispatch_source_handler_dispose(dispatch_continuation_t dc) { - dispatch_continuation_t dc = dr->ds_handler[kind]; - if (dc) { #ifdef __BLOCKS__ - if ((long)dc->do_vtable & DISPATCH_OBJ_BLOCK_RELEASE_BIT) { - Block_release(dc->dc_ctxt); - } + if (dc->dc_flags & DISPATCH_OBJ_BLOCK_BIT) { + Block_release(dc->dc_ctxt); + } #endif /* __BLOCKS__ */ - if (dc->dc_voucher) { - _voucher_release(dc->dc_voucher); - dc->dc_voucher = NULL; - } - _dispatch_continuation_free(dc); + if (dc->dc_voucher) { + _voucher_release(dc->dc_voucher); + dc->dc_voucher = VOUCHER_INVALID; } - dr->ds_handler[kind] = dc_new; + _dispatch_continuation_free(dc); +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_continuation_t +_dispatch_source_handler_take(dispatch_source_t ds, long kind) +{ + return os_atomic_xchg(&ds->ds_refs->ds_handler[kind], NULL, relaxed); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_source_handler_free(dispatch_source_t ds, long kind) +{ + dispatch_continuation_t dc = _dispatch_source_handler_take(ds, kind); + if (dc) _dispatch_source_handler_dispose(dc); } +DISPATCH_ALWAYS_INLINE static inline void -_dispatch_source_handler_free(dispatch_source_refs_t dr, long kind) +_dispatch_source_handler_replace(dispatch_source_t ds, long kind, + dispatch_continuation_t dc) { - _dispatch_source_handler_replace(dr, kind, NULL); + if (!dc->dc_func) { + _dispatch_continuation_free(dc); + dc = NULL; + } else if (dc->dc_flags & DISPATCH_OBJ_CTXT_FETCH_BIT) { + dc->dc_ctxt = ds->do_ctxt; + } + dc = os_atomic_xchg(&ds->ds_refs->ds_handler[kind], dc, release); + if (dc) _dispatch_source_handler_dispose(dc); } +DISPATCH_NOINLINE static void -_dispatch_source_set_handler(void *context) +_dispatch_source_set_handler_slow(void *context) { dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current(); dispatch_assert(dx_type(ds) == DISPATCH_SOURCE_KEVENT_TYPE); + dispatch_continuation_t dc = context; long kind = (long)dc->dc_data; - dc->dc_data = 0; - if (!dc->dc_func) { - _dispatch_continuation_free(dc); - dc = NULL; - } else if ((long)dc->do_vtable & DISPATCH_OBJ_CTXT_FETCH_BIT) { - dc->dc_ctxt = ds->do_ctxt; + dc->dc_data = NULL; + _dispatch_source_handler_replace(ds, kind, dc); +} + +DISPATCH_NOINLINE +static void +_dispatch_source_set_handler(dispatch_source_t ds, long kind, + dispatch_continuation_t dc) +{ + dispatch_assert(dx_type(ds) == DISPATCH_SOURCE_KEVENT_TYPE); + if (_dispatch_queue_try_inactive_suspend(ds->_as_dq)) { + _dispatch_source_handler_replace(ds, kind, dc); + return dx_vtable(ds)->do_resume(ds, false); } - _dispatch_source_handler_replace(ds->ds_refs, kind, dc); - if (kind == DS_EVENT_HANDLER && dc && dc->dc_priority) { -#if HAVE_PTHREAD_WORKQUEUE_QOS - ds->dq_priority = dc->dc_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK; - _dispatch_queue_set_override_priority((dispatch_queue_t)ds); -#endif + _dispatch_ktrace1(DISPATCH_PERF_post_activate_mutation, ds); + if (kind == DS_REGISTN_HANDLER) { + _dispatch_bug_deprecated("Setting registration handler after " + "the source has been activated"); } + dc->dc_data = (void *)kind; + _dispatch_barrier_trysync_or_async_f(ds->_as_dq, dc, + _dispatch_source_set_handler_slow); } #ifdef __BLOCKS__ @@ -400,8 +467,7 @@ dispatch_source_set_event_handler(dispatch_source_t ds, { dispatch_continuation_t dc; dc = _dispatch_source_handler_alloc(ds, handler, DS_EVENT_HANDLER, true); - _dispatch_barrier_trysync_f((dispatch_queue_t)ds, dc, - _dispatch_source_set_handler); + _dispatch_source_set_handler(ds, DS_EVENT_HANDLER, dc); } #endif /* __BLOCKS__ */ @@ -411,21 +477,15 @@ dispatch_source_set_event_handler_f(dispatch_source_t ds, { dispatch_continuation_t dc; dc = _dispatch_source_handler_alloc(ds, handler, DS_EVENT_HANDLER, false); - _dispatch_barrier_trysync_f((dispatch_queue_t)ds, dc, - _dispatch_source_set_handler); + _dispatch_source_set_handler(ds, DS_EVENT_HANDLER, dc); } void -_dispatch_source_set_event_handler_with_context_f(dispatch_source_t ds, - void *ctxt, dispatch_function_t handler) +_dispatch_source_set_event_handler_continuation(dispatch_source_t ds, + dispatch_continuation_t dc) { - dispatch_continuation_t dc; - dc = _dispatch_source_handler_alloc(ds, handler, DS_EVENT_HANDLER, false); - dc->do_vtable = (void *)((long)dc->do_vtable &~DISPATCH_OBJ_CTXT_FETCH_BIT); - dc->dc_other = dc->dc_ctxt; - dc->dc_ctxt = ctxt; - _dispatch_barrier_trysync_f((dispatch_queue_t)ds, dc, - _dispatch_source_set_handler); + _dispatch_trace_continuation_push(ds->_as_dq, dc); + _dispatch_source_set_handler(ds, DS_EVENT_HANDLER, dc); } #ifdef __BLOCKS__ @@ -435,8 +495,7 @@ dispatch_source_set_cancel_handler(dispatch_source_t ds, { dispatch_continuation_t dc; dc = _dispatch_source_handler_alloc(ds, handler, DS_CANCEL_HANDLER, true); - _dispatch_barrier_trysync_f((dispatch_queue_t)ds, dc, - _dispatch_source_set_handler); + _dispatch_source_set_handler(ds, DS_CANCEL_HANDLER, dc); } #endif /* __BLOCKS__ */ @@ -446,8 +505,7 @@ dispatch_source_set_cancel_handler_f(dispatch_source_t ds, { dispatch_continuation_t dc; dc = _dispatch_source_handler_alloc(ds, handler, DS_CANCEL_HANDLER, false); - _dispatch_barrier_trysync_f((dispatch_queue_t)ds, dc, - _dispatch_source_set_handler); + _dispatch_source_set_handler(ds, DS_CANCEL_HANDLER, dc); } #ifdef __BLOCKS__ @@ -457,8 +515,7 @@ dispatch_source_set_registration_handler(dispatch_source_t ds, { dispatch_continuation_t dc; dc = _dispatch_source_handler_alloc(ds, handler, DS_REGISTN_HANDLER, true); - _dispatch_barrier_trysync_f((dispatch_queue_t)ds, dc, - _dispatch_source_set_handler); + _dispatch_source_set_handler(ds, DS_REGISTN_HANDLER, dc); } #endif /* __BLOCKS__ */ @@ -468,67 +525,62 @@ dispatch_source_set_registration_handler_f(dispatch_source_t ds, { dispatch_continuation_t dc; dc = _dispatch_source_handler_alloc(ds, handler, DS_REGISTN_HANDLER, false); - _dispatch_barrier_trysync_f((dispatch_queue_t)ds, dc, - _dispatch_source_set_handler); + _dispatch_source_set_handler(ds, DS_REGISTN_HANDLER, dc); } #pragma mark - #pragma mark dispatch_source_invoke static void -_dispatch_source_registration_callout(dispatch_source_t ds) +_dispatch_source_registration_callout(dispatch_source_t ds, dispatch_queue_t cq, + dispatch_invoke_flags_t flags) { - dispatch_source_refs_t dr = ds->ds_refs; - dispatch_continuation_t dc = dr->ds_handler[DS_REGISTN_HANDLER]; - if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == -1)) { + dispatch_continuation_t dc; + + dc = _dispatch_source_handler_take(ds, DS_REGISTN_HANDLER); + if (ds->dq_atomic_flags & (DSF_CANCELED | DQF_RELEASED)) { // no registration callout if source is canceled rdar://problem/8955246 - return _dispatch_source_handler_free(dr, DS_REGISTN_HANDLER); + return _dispatch_source_handler_dispose(dc); } - pthread_priority_t old_dp = _dispatch_set_defaultpriority(ds->dq_priority); - if ((long)dc->do_vtable & DISPATCH_OBJ_CTXT_FETCH_BIT) { + if (dc->dc_flags & DISPATCH_OBJ_CTXT_FETCH_BIT) { dc->dc_ctxt = ds->do_ctxt; } - _dispatch_continuation_pop(dc); - dr->ds_handler[DS_REGISTN_HANDLER] = NULL; - _dispatch_reset_defaultpriority(old_dp); + _dispatch_continuation_pop(dc, cq, flags); } static void -_dispatch_source_cancel_callout(dispatch_source_t ds) +_dispatch_source_cancel_callout(dispatch_source_t ds, dispatch_queue_t cq, + dispatch_invoke_flags_t flags) { - dispatch_source_refs_t dr = ds->ds_refs; - dispatch_continuation_t dc = dr->ds_handler[DS_CANCEL_HANDLER]; + dispatch_continuation_t dc; + + dc = _dispatch_source_handler_take(ds, DS_CANCEL_HANDLER); ds->ds_pending_data_mask = 0; ds->ds_pending_data = 0; ds->ds_data = 0; - _dispatch_source_handler_free(dr, DS_EVENT_HANDLER); - _dispatch_source_handler_free(dr, DS_REGISTN_HANDLER); + _dispatch_source_handler_free(ds, DS_EVENT_HANDLER); + _dispatch_source_handler_free(ds, DS_REGISTN_HANDLER); if (!dc) { return; } - if (!(ds->ds_atomic_flags & DSF_CANCELED)) { - return _dispatch_source_handler_free(dr, DS_CANCEL_HANDLER); + if (!(ds->dq_atomic_flags & DSF_CANCELED)) { + return _dispatch_source_handler_dispose(dc); } - pthread_priority_t old_dp = _dispatch_set_defaultpriority(ds->dq_priority); - if ((long)dc->do_vtable & DISPATCH_OBJ_CTXT_FETCH_BIT) { + if (dc->dc_flags & DISPATCH_OBJ_CTXT_FETCH_BIT) { dc->dc_ctxt = ds->do_ctxt; } - _dispatch_continuation_pop(dc); - dr->ds_handler[DS_CANCEL_HANDLER] = NULL; - _dispatch_reset_defaultpriority(old_dp); + _dispatch_continuation_pop(dc, cq, flags); } static void -_dispatch_source_latch_and_call(dispatch_source_t ds) +_dispatch_source_latch_and_call(dispatch_source_t ds, dispatch_queue_t cq, + dispatch_invoke_flags_t flags) { unsigned long prev; - if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == -1)) { - return; - } dispatch_source_refs_t dr = ds->ds_refs; - dispatch_continuation_t dc = dr->ds_handler[DS_EVENT_HANDLER]; - prev = dispatch_atomic_xchg2o(ds, ds_pending_data, 0, relaxed); + dispatch_continuation_t dc = _dispatch_source_get_handler(dr, DS_EVENT_HANDLER); + prev = os_atomic_xchg2o(ds, ds_pending_data, 0, relaxed); if (ds->ds_is_level) { ds->ds_data = ~prev; } else if (ds->ds_is_timer && ds_timer(dr).target && prev) { @@ -539,12 +591,11 @@ _dispatch_source_latch_and_call(dispatch_source_t ds) if (!dispatch_assume(prev) || !dc) { return; } - pthread_priority_t old_dp = _dispatch_set_defaultpriority(ds->dq_priority); - voucher_t voucher = dc->dc_voucher ? _voucher_retain(dc->dc_voucher) : NULL; - _dispatch_continuation_voucher_adopt(dc); // consumes voucher reference - _dispatch_continuation_pop(dc); - if (voucher) dc->dc_voucher = voucher; - _dispatch_reset_defaultpriority(old_dp); + _dispatch_continuation_pop(dc, cq, flags); + if (ds->ds_is_timer && (ds_timer(dr).flags & DISPATCH_TIMER_AFTER)) { + _dispatch_source_handler_free(ds, DS_EVENT_HANDLER); + dispatch_release(ds); // dispatch_after sources are one-shot + } } static void @@ -553,101 +604,237 @@ _dispatch_source_kevent_unregister(dispatch_source_t ds) _dispatch_object_debug(ds, "%s", __func__); uint32_t flags = (uint32_t)ds->ds_pending_data_mask; dispatch_kevent_t dk = ds->ds_dkev; - if (ds->ds_atomic_flags & DSF_DELETED) { + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds->_as_dq); + if (ds->ds_is_custom_source) { + ds->ds_dkev = NULL; + goto done; + } + + if (ds->ds_is_direct_kevent && + ((dqf & DSF_DELETED) || !(ds->ds_is_installed))) { dk->dk_kevent.flags |= EV_DELETE; // already deleted - dk->dk_kevent.flags &= ~(EV_ADD|EV_ENABLE); + dk->dk_kevent.flags &= ~(EV_ADD|EV_ENABLE|EV_VANISHED); } if (dk->dk_kevent.filter == DISPATCH_EVFILT_TIMER) { ds->ds_dkev = NULL; - _dispatch_timers_unregister(ds, dk); + if (ds->ds_is_installed) { + _dispatch_timers_unregister(ds, dk); + } } else if (!ds->ds_is_direct_kevent) { ds->ds_dkev = NULL; + dispatch_assert((bool)ds->ds_is_installed); TAILQ_REMOVE(&dk->dk_sources, ds->ds_refs, dr_list); _dispatch_kevent_unregister(dk, flags, 0); } else { - int dkev_dispose_options = 0; - if (ds->ds_needs_rearm && !(ds->ds_atomic_flags & DSF_ARMED)) { + unsigned int dkev_dispose_options = 0; + if (ds->ds_needs_rearm && !(dqf & DSF_ARMED)) { dkev_dispose_options |= DKEV_DISPOSE_IMMEDIATE_DELETE; - } - if (ds->ds_needs_mgr) { - dkev_dispose_options |= DKEV_DISPOSE_IGNORE_ENOENT; - ds->ds_needs_mgr = false; + } else if (dx_type(ds) == DISPATCH_MACH_CHANNEL_TYPE) { + if (!ds->ds_is_direct_kevent) { + dkev_dispose_options |= DKEV_DISPOSE_IMMEDIATE_DELETE; + } } long r = _dispatch_kevent_unregister(dk, flags, dkev_dispose_options); if (r == EINPROGRESS) { _dispatch_debug("kevent-source[%p]: deferred delete kevent[%p]", ds, dk); - ds->ds_pending_delete = true; + _dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_DEFERRED_DELETE); return; // deferred unregistration +#if DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS } else if (r == ENOENT) { _dispatch_debug("kevent-source[%p]: ENOENT delete kevent[%p]", ds, dk); - ds->ds_needs_mgr = true; + _dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_DEFERRED_DELETE); return; // potential concurrent EV_DELETE delivery rdar://22047283 +#endif + } else { + dispatch_assume_zero(r); } ds->ds_dkev = NULL; _TAILQ_TRASH_ENTRY(ds->ds_refs, dr_list); } - (void)dispatch_atomic_and2o(ds, ds_atomic_flags, ~DSF_ARMED, relaxed); - _dispatch_debug("kevent-source[%p]: disarmed kevent[%p]", ds, ds->ds_dkev); +done: + dqf = _dispatch_queue_atomic_flags_set_and_clear_orig(ds->_as_dq, + DSF_DELETED, DSF_ARMED | DSF_DEFERRED_DELETE | DSF_CANCEL_WAITER); + if (dqf & DSF_CANCEL_WAITER) { + _dispatch_wake_by_address(&ds->dq_atomic_flags); + } + ds->ds_is_installed = true; ds->ds_needs_rearm = false; // re-arm is pointless and bad now + _dispatch_debug("kevent-source[%p]: disarmed kevent[%p]", ds, dk); _dispatch_release(ds); // the retain is done at creation time } -static void +DISPATCH_ALWAYS_INLINE +static bool +_dispatch_source_tryarm(dispatch_source_t ds) +{ + dispatch_queue_flags_t oqf, nqf; + return os_atomic_rmw_loop2o(ds, dq_atomic_flags, oqf, nqf, relaxed, { + if (oqf & (DSF_DEFERRED_DELETE | DSF_DELETED)) { + // the test is inside the loop because it's convenient but the + // result should not change for the duration of the rmw_loop + os_atomic_rmw_loop_give_up(break); + } + nqf = oqf | DSF_ARMED; + }); +} + +static bool _dispatch_source_kevent_resume(dispatch_source_t ds, uint32_t new_flags) { switch (ds->ds_dkev->dk_kevent.filter) { case DISPATCH_EVFILT_TIMER: _dispatch_timers_update(ds); - (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED, relaxed); + _dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_ARMED); _dispatch_debug("kevent-source[%p]: rearmed kevent[%p]", ds, ds->ds_dkev); - return; + return true; +#if HAVE_MACH case EVFILT_MACHPORT: - if (ds->ds_pending_data_mask & DISPATCH_MACH_RECV_MESSAGE) { + if ((ds->ds_pending_data_mask & DISPATCH_MACH_RECV_MESSAGE) && + !ds->ds_is_direct_kevent) { new_flags |= DISPATCH_MACH_RECV_MESSAGE; // emulate EV_DISPATCH } break; +#endif } - if ((ds->ds_atomic_flags & DSF_DELETED) || - _dispatch_kevent_resume(ds->ds_dkev, new_flags, 0)) { - _dispatch_source_kevent_unregister(ds); + if (unlikely(!_dispatch_source_tryarm(ds))) { + return false; } + if (unlikely(_dispatch_kevent_resume(ds->ds_dkev, new_flags, 0))) { + _dispatch_queue_atomic_flags_set_and_clear(ds->_as_dq, DSF_DELETED, + DSF_ARMED); + return false; + } + _dispatch_debug("kevent-source[%p]: armed kevent[%p]", ds, ds->ds_dkev); + return true; } static void -_dispatch_source_kevent_register(dispatch_source_t ds) +_dispatch_source_kevent_register(dispatch_source_t ds, pthread_priority_t pp) { - dispatch_assert_zero(ds->ds_is_installed); + dispatch_assert_zero((bool)ds->ds_is_installed); switch (ds->ds_dkev->dk_kevent.filter) { case DISPATCH_EVFILT_TIMER: _dispatch_timers_update(ds); - (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED, relaxed); + _dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_ARMED); _dispatch_debug("kevent-source[%p]: armed kevent[%p]", ds, ds->ds_dkev); return; } uint32_t flags; - bool do_resume = _dispatch_kevent_register(&ds->ds_dkev, &flags); + bool do_resume = _dispatch_kevent_register(&ds->ds_dkev, pp, &flags); TAILQ_INSERT_TAIL(&ds->ds_dkev->dk_sources, ds->ds_refs, dr_list); - (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED, relaxed); - _dispatch_debug("kevent-source[%p]: armed kevent[%p]", ds, ds->ds_dkev); + ds->ds_is_installed = true; if (do_resume || ds->ds_needs_rearm) { - _dispatch_source_kevent_resume(ds, flags); + if (unlikely(!_dispatch_source_kevent_resume(ds, flags))) { + _dispatch_source_kevent_unregister(ds); + } + } else { + _dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_ARMED); } _dispatch_object_debug(ds, "%s", __func__); } +static void +_dispatch_source_set_event_handler_context(void *ctxt) +{ + dispatch_source_t ds = ctxt; + dispatch_continuation_t dc = _dispatch_source_get_event_handler(ds->ds_refs); + + if (dc && (dc->dc_flags & DISPATCH_OBJ_CTXT_FETCH_BIT)) { + dc->dc_ctxt = ds->do_ctxt; + } +} + +static pthread_priority_t +_dispatch_source_compute_kevent_priority(dispatch_source_t ds) +{ + pthread_priority_t p = ds->dq_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK; + dispatch_queue_t tq = ds->do_targetq; + pthread_priority_t tqp = tq->dq_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK; + + while (unlikely(tq->do_targetq)) { + if (unlikely(tq == &_dispatch_mgr_q)) { + return _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; + } + if (unlikely(_dispatch_queue_is_thread_bound(tq))) { + // thread bound hierarchies are weird, we need to install + // from the context of the thread this hierarchy is bound to + return 0; + } + if (unlikely(DISPATCH_QUEUE_IS_SUSPENDED(tq))) { + // this queue may not be activated yet, so the queue graph may not + // have stabilized yet + _dispatch_ktrace1(DISPATCH_PERF_delayed_registration, ds); + return 0; + } + if (unlikely(!_dispatch_queue_has_immutable_target(tq))) { + if (!_dispatch_is_in_root_queues_array(tq->do_targetq)) { + // we're not allowed to dereference tq->do_targetq + _dispatch_ktrace1(DISPATCH_PERF_delayed_registration, ds); + return 0; + } + } + if (!(tq->dq_priority & _PTHREAD_PRIORITY_INHERIT_FLAG)) { + if (p < tqp) p = tqp; + } + tq = tq->do_targetq; + tqp = tq->dq_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK; + } + + if (unlikely(!tqp)) { + // pthread root queues opt out of QoS + return 0; + } + return _dispatch_priority_inherit_from_root_queue(p, tq); +} + +void +_dispatch_source_finalize_activation(dispatch_source_t ds) +{ + dispatch_continuation_t dc; + + if (unlikely(ds->ds_is_direct_kevent && + (_dispatch_queue_atomic_flags(ds->_as_dq) & DSF_CANCELED))) { + return _dispatch_source_kevent_unregister(ds); + } + + dc = _dispatch_source_get_event_handler(ds->ds_refs); + if (dc) { + if (_dispatch_object_is_barrier(dc)) { + _dispatch_queue_atomic_flags_set(ds->_as_dq, DQF_BARRIER_BIT); + } + ds->dq_priority = dc->dc_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK; + if (dc->dc_flags & DISPATCH_OBJ_CTXT_FETCH_BIT) { + _dispatch_barrier_async_detached_f(ds->_as_dq, ds, + _dispatch_source_set_event_handler_context); + } + } + + // call "super" + _dispatch_queue_finalize_activation(ds->_as_dq); + + if (ds->ds_is_direct_kevent && !ds->ds_is_installed) { + pthread_priority_t pp = _dispatch_source_compute_kevent_priority(ds); + if (pp) _dispatch_source_kevent_register(ds, pp); + } +} + DISPATCH_ALWAYS_INLINE static inline dispatch_queue_t -_dispatch_source_invoke2(dispatch_object_t dou, - _dispatch_thread_semaphore_t *sema_ptr DISPATCH_UNUSED) +_dispatch_source_invoke2(dispatch_object_t dou, dispatch_invoke_flags_t flags, + uint64_t *owned, struct dispatch_object_s **dc_ptr DISPATCH_UNUSED) { dispatch_source_t ds = dou._ds; + dispatch_queue_t retq = NULL; + dispatch_queue_t dq = _dispatch_queue_get_current(); + if (_dispatch_queue_class_probe(ds)) { - if (slowpath(_dispatch_queue_drain(ds))) { - DISPATCH_CLIENT_CRASH("Sync onto source"); - } + // Intentionally always drain even when on the manager queue + // and not the source's regular target queue: we need to be able + // to drain timer setting and the like there. + retq = _dispatch_queue_serial_drain(ds->_as_dq, flags, owned, NULL); } // This function performs all source actions. Each action is responsible @@ -655,164 +842,319 @@ _dispatch_source_invoke2(dispatch_object_t dou, // current queue is not the correct queue for this action, the correct queue // will be returned and the invoke will be re-driven on that queue. - // The order of tests here in invoke and in probe should be consistent. + // The order of tests here in invoke and in wakeup should be consistent. - dispatch_queue_t dq = _dispatch_queue_get_current(); - dispatch_queue_t dkq = _dispatch_source_get_kevent_queue(ds); dispatch_source_refs_t dr = ds->ds_refs; + dispatch_queue_t dkq = &_dispatch_mgr_q; + + if (ds->ds_is_direct_kevent) { + dkq = ds->do_targetq; + } if (!ds->ds_is_installed) { // The source needs to be installed on the kevent queue. if (dq != dkq) { return dkq; } - _dispatch_source_kevent_register(ds); - ds->ds_is_installed = true; - if (dr->ds_handler[DS_REGISTN_HANDLER]) { - return ds->do_targetq; - } - if (slowpath(ds->do_xref_cnt == -1)) { - return dkq; // rdar://problem/9558246 - } - } else if (slowpath(DISPATCH_OBJECT_SUSPENDED(ds))) { + _dispatch_source_kevent_register(ds, _dispatch_get_defaultpriority()); + } + + if (unlikely(DISPATCH_QUEUE_IS_SUSPENDED(ds))) { // Source suspended by an item drained from the source queue. - return NULL; - } else if (dr->ds_handler[DS_REGISTN_HANDLER]) { + return ds->do_targetq; + } + + if (_dispatch_source_get_registration_handler(dr)) { // The source has been registered and the registration handler needs // to be delivered on the target queue. if (dq != ds->do_targetq) { return ds->do_targetq; } // clears ds_registration_handler - _dispatch_source_registration_callout(ds); - if (slowpath(ds->do_xref_cnt == -1)) { - return dkq; // rdar://problem/9558246 - } - } else if ((ds->ds_atomic_flags & DSF_DELETED) && (ds->ds_pending_delete || - (ds->ds_atomic_flags & DSF_ONESHOT))) { - // Pending source kevent unregistration has been completed - if (ds->ds_needs_mgr) { - dkq = &_dispatch_mgr_q; - } + _dispatch_source_registration_callout(ds, dq, flags); + } + + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds->_as_dq); + bool prevent_starvation = false; + + if ((dqf & DSF_DEFERRED_DELETE) && + ((dqf & DSF_DELETED) || !(dqf & DSF_ARMED))) { +unregister_event: + // DSF_DELETE: Pending source kevent unregistration has been completed + // !DSF_ARMED: event was delivered and can safely be unregistered if (dq != dkq) { return dkq; } - ds->ds_pending_delete = false; - if (ds->ds_atomic_flags & DSF_ONESHOT) { - (void)dispatch_atomic_and2o(ds, ds_atomic_flags, ~DSF_ONESHOT, - relaxed); - } - if (ds->ds_dkev) { - _dispatch_source_kevent_unregister(ds); - if (ds->ds_needs_mgr) { - return &_dispatch_mgr_q; + _dispatch_source_kevent_unregister(ds); + dqf = _dispatch_queue_atomic_flags(ds->_as_dq); + } + + if (!(dqf & (DSF_CANCELED | DQF_RELEASED)) && ds->ds_pending_data) { + // The source has pending data to deliver via the event handler callback + // on the target queue. Some sources need to be rearmed on the kevent + // queue after event delivery. + if (dq == ds->do_targetq) { + _dispatch_source_latch_and_call(ds, dq, flags); + dqf = _dispatch_queue_atomic_flags(ds->_as_dq); + + // starvation avoidance: if the source triggers itself then force a + // re-queue to give other things already queued on the target queue + // a chance to run. + // + // however, if the source is directly targetting an overcommit root + // queue, this would requeue the source and ask for a new overcommit + // thread right away. + prevent_starvation = dq->do_targetq || + !(dq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG); + if (prevent_starvation && ds->ds_pending_data) { + retq = ds->do_targetq; } - } - if (dr->ds_handler[DS_EVENT_HANDLER] || - dr->ds_handler[DS_CANCEL_HANDLER] || - dr->ds_handler[DS_REGISTN_HANDLER]) { + } else { + // there is no point trying to be eager, the next thing to do is + // to deliver the event return ds->do_targetq; } - } else if (((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == -1)) - && !ds->ds_pending_delete) { + } + + if ((dqf & (DSF_CANCELED | DQF_RELEASED)) && !(dqf & DSF_DEFERRED_DELETE)) { // The source has been cancelled and needs to be uninstalled from the // kevent queue. After uninstallation, the cancellation handler needs // to be delivered to the target queue. - if (ds->ds_dkev) { - if (ds->ds_needs_mgr) { - dkq = &_dispatch_mgr_q; - } + if (!(dqf & DSF_DELETED)) { if (dq != dkq) { return dkq; } _dispatch_source_kevent_unregister(ds); - if (ds->ds_needs_mgr) { - return &_dispatch_mgr_q; - } - if (ds->ds_pending_delete) { - // deferred unregistration - if (ds->ds_needs_rearm) { - return dkq; + dqf = _dispatch_queue_atomic_flags(ds->_as_dq); + if (unlikely(dqf & DSF_DEFERRED_DELETE)) { + if (!(dqf & DSF_ARMED)) { + goto unregister_event; } - return NULL; + // we need to wait for the EV_DELETE + return retq; } } - if (dr->ds_handler[DS_EVENT_HANDLER] || - dr->ds_handler[DS_CANCEL_HANDLER] || - dr->ds_handler[DS_REGISTN_HANDLER]) { - if (dq != ds->do_targetq) { - return ds->do_targetq; - } - } - _dispatch_source_cancel_callout(ds); - } else if (ds->ds_pending_data && !ds->ds_pending_delete) { - // The source has pending data to deliver via the event handler callback - // on the target queue. Some sources need to be rearmed on the kevent - // queue after event delivery. - if (dq != ds->do_targetq) { - return ds->do_targetq; - } - _dispatch_source_latch_and_call(ds); - if (ds->ds_needs_rearm) { - return dkq; + if (dq != ds->do_targetq && (_dispatch_source_get_event_handler(dr) || + _dispatch_source_get_cancel_handler(dr) || + _dispatch_source_get_registration_handler(dr))) { + retq = ds->do_targetq; + } else { + _dispatch_source_cancel_callout(ds, dq, flags); + dqf = _dispatch_queue_atomic_flags(ds->_as_dq); } - } else if (ds->ds_needs_rearm && !(ds->ds_atomic_flags & DSF_ARMED)) { + prevent_starvation = false; + } + + if (ds->ds_needs_rearm && !(dqf & DSF_ARMED)) { // The source needs to be rearmed on the kevent queue. if (dq != dkq) { return dkq; } - (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED, relaxed); - _dispatch_debug("kevent-source[%p]: rearmed kevent[%p]", ds, - ds->ds_dkev); - _dispatch_source_kevent_resume(ds, 0); - } - - return NULL; + if (unlikely(dqf & DSF_DEFERRED_DELETE)) { + // no need for resume when we can directly unregister the kevent + goto unregister_event; + } + if (prevent_starvation) { + // keep the old behavior to force re-enqueue to our target queue + // for the rearm. It is inefficient though and we should + // improve this . + // + // if the handler didn't run, or this is a pending delete + // or our target queue is a global queue, then starvation is + // not a concern and we can rearm right away. + return ds->do_targetq; + } + if (unlikely(!_dispatch_source_kevent_resume(ds, 0))) { + dqf = _dispatch_queue_atomic_flags(ds->_as_dq); + goto unregister_event; + } + } + + return retq; } DISPATCH_NOINLINE void -_dispatch_source_invoke(dispatch_source_t ds, dispatch_object_t dou, - dispatch_invoke_flags_t flags) +_dispatch_source_invoke(dispatch_source_t ds, dispatch_invoke_flags_t flags) { - _dispatch_queue_class_invoke(ds, dou._dc, flags, _dispatch_source_invoke2); + _dispatch_queue_class_invoke(ds->_as_dq, flags, _dispatch_source_invoke2); } -unsigned long -_dispatch_source_probe(dispatch_source_t ds) +void +_dispatch_source_wakeup(dispatch_source_t ds, pthread_priority_t pp, + dispatch_wakeup_flags_t flags) { // This function determines whether the source needs to be invoked. - // The order of tests here in probe and in invoke should be consistent. + // The order of tests here in wakeup and in invoke should be consistent. dispatch_source_refs_t dr = ds->ds_refs; + dispatch_queue_wakeup_target_t dkq = DISPATCH_QUEUE_WAKEUP_MGR; + dispatch_queue_wakeup_target_t tq = DISPATCH_QUEUE_WAKEUP_NONE; + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds->_as_dq); + bool deferred_delete = (dqf & DSF_DEFERRED_DELETE); + + if (ds->ds_is_direct_kevent) { + dkq = DISPATCH_QUEUE_WAKEUP_TARGET; + } + if (!ds->ds_is_installed) { // The source needs to be installed on the kevent queue. - return true; - } else if (dr->ds_handler[DS_REGISTN_HANDLER]) { + tq = dkq; + } else if (_dispatch_source_get_registration_handler(dr)) { // The registration handler needs to be delivered to the target queue. - return true; - } else if ((ds->ds_atomic_flags & DSF_DELETED) && (ds->ds_pending_delete || - (ds->ds_atomic_flags & DSF_ONESHOT))) { + tq = DISPATCH_QUEUE_WAKEUP_TARGET; + } else if (deferred_delete && ((dqf & DSF_DELETED) || !(dqf & DSF_ARMED))) { // Pending source kevent unregistration has been completed - return true; - } else if (((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == -1)) - && !ds->ds_pending_delete) { + // or EV_ONESHOT event can be acknowledged + tq = dkq; + } else if (!(dqf & (DSF_CANCELED | DQF_RELEASED)) && ds->ds_pending_data) { + // The source has pending data to deliver to the target queue. + tq = DISPATCH_QUEUE_WAKEUP_TARGET; + } else if ((dqf & (DSF_CANCELED | DQF_RELEASED)) && !deferred_delete) { // The source needs to be uninstalled from the kevent queue, or the // cancellation handler needs to be delivered to the target queue. // Note: cancellation assumes installation. - if (ds->ds_dkev || dr->ds_handler[DS_EVENT_HANDLER] || - dr->ds_handler[DS_CANCEL_HANDLER] || - dr->ds_handler[DS_REGISTN_HANDLER]) { - return true; + if (!(dqf & DSF_DELETED)) { + tq = dkq; + } else if (_dispatch_source_get_event_handler(dr) || + _dispatch_source_get_cancel_handler(dr) || + _dispatch_source_get_registration_handler(dr)) { + tq = DISPATCH_QUEUE_WAKEUP_TARGET; } - } else if (ds->ds_pending_data && !ds->ds_pending_delete) { - // The source has pending data to deliver to the target queue. - return true; - } else if (ds->ds_needs_rearm && !(ds->ds_atomic_flags & DSF_ARMED)) { + } else if (ds->ds_needs_rearm && !(dqf & DSF_ARMED)) { // The source needs to be rearmed on the kevent queue. - return true; + tq = dkq; + } + if (!tq && _dispatch_queue_class_probe(ds)) { + tq = DISPATCH_QUEUE_WAKEUP_TARGET; + } + + if (tq) { + return _dispatch_queue_class_wakeup(ds->_as_dq, pp, flags, tq); + } else if (pp) { + return _dispatch_queue_class_override_drainer(ds->_as_dq, pp, flags); + } else if (flags & DISPATCH_WAKEUP_CONSUME) { + return _dispatch_release_tailcall(ds); + } +} + +void +dispatch_source_cancel(dispatch_source_t ds) +{ + _dispatch_object_debug(ds, "%s", __func__); + // Right after we set the cancel flag, someone else + // could potentially invoke the source, do the cancellation, + // unregister the source, and deallocate it. We would + // need to therefore retain/release before setting the bit + _dispatch_retain(ds); + + dispatch_queue_t q = ds->_as_dq; + if (_dispatch_queue_atomic_flags_set_orig(q, DSF_CANCELED) & DSF_CANCELED) { + _dispatch_release_tailcall(ds); + } else { + dx_wakeup(ds, 0, DISPATCH_WAKEUP_FLUSH | DISPATCH_WAKEUP_CONSUME); + } +} + +void +dispatch_source_cancel_and_wait(dispatch_source_t ds) +{ + dispatch_queue_flags_t old_dqf, dqf, new_dqf; + pthread_priority_t pp; + + if (unlikely(_dispatch_source_get_cancel_handler(ds->ds_refs))) { + DISPATCH_CLIENT_CRASH(ds, "Source has a cancel handler"); + } + + _dispatch_object_debug(ds, "%s", __func__); + os_atomic_rmw_loop2o(ds, dq_atomic_flags, old_dqf, new_dqf, relaxed, { + new_dqf = old_dqf | DSF_CANCELED; + if (old_dqf & DSF_CANCEL_WAITER) { + os_atomic_rmw_loop_give_up(break); + } + if ((old_dqf & DSF_STATE_MASK) == DSF_DELETED) { + // just add DSF_CANCELED + } else if ((old_dqf & DSF_DEFERRED_DELETE) || !ds->ds_is_direct_kevent){ + new_dqf |= DSF_CANCEL_WAITER; + } + }); + dqf = new_dqf; + + if (old_dqf & DQF_RELEASED) { + DISPATCH_CLIENT_CRASH(ds, "Dispatch source used after last release"); + } + if ((old_dqf & DSF_STATE_MASK) == DSF_DELETED) { + return; + } + if (dqf & DSF_CANCEL_WAITER) { + goto override; + } + + // simplified version of _dispatch_queue_drain_try_lock + // that also sets the DIRTY bit on failure to lock + dispatch_lock_owner tid_self = _dispatch_tid_self(); + uint64_t xor_owner_and_set_full_width = tid_self | + DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER; + uint64_t old_state, new_state; + + os_atomic_rmw_loop2o(ds, dq_state, old_state, new_state, seq_cst, { + new_state = old_state; + if (likely(_dq_state_is_runnable(old_state) && + !_dq_state_drain_locked(old_state))) { + new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK; + new_state ^= xor_owner_and_set_full_width; + } else if (old_dqf & DSF_CANCELED) { + os_atomic_rmw_loop_give_up(break); + } else { + // this case needs a release barrier, hence the seq_cst above + new_state |= DISPATCH_QUEUE_DIRTY; + } + }); + + if (unlikely(_dq_state_is_suspended(old_state))) { + if (unlikely(_dq_state_suspend_cnt(old_state))) { + DISPATCH_CLIENT_CRASH(ds, "Source is suspended"); + } + // inactive sources have never been registered and there is no need + // to wait here because activation will notice and mark the source + // as deleted without ever trying to use the fd or mach port. + return dispatch_activate(ds); + } + + if (likely(_dq_state_is_runnable(old_state) && + !_dq_state_drain_locked(old_state))) { + // same thing _dispatch_source_invoke2() does when handling cancellation + dqf = _dispatch_queue_atomic_flags(ds->_as_dq); + if (!(dqf & (DSF_DEFERRED_DELETE | DSF_DELETED))) { + _dispatch_source_kevent_unregister(ds); + dqf = _dispatch_queue_atomic_flags(ds->_as_dq); + if (likely((dqf & DSF_STATE_MASK) == DSF_DELETED)) { + _dispatch_source_cancel_callout(ds, NULL, DISPATCH_INVOKE_NONE); + } + } + _dispatch_try_lock_transfer_or_wakeup(ds->_as_dq); + } else if (unlikely(_dq_state_drain_locked_by(old_state, tid_self))) { + DISPATCH_CLIENT_CRASH(ds, "dispatch_source_cancel_and_wait " + "called from a source handler"); + } else { +override: + pp = _dispatch_get_priority() & _PTHREAD_PRIORITY_QOS_CLASS_MASK; + if (pp) dx_wakeup(ds, pp, DISPATCH_WAKEUP_OVERRIDING); + dispatch_activate(ds); + } + + dqf = _dispatch_queue_atomic_flags(ds->_as_dq); + while (unlikely((dqf & DSF_STATE_MASK) != DSF_DELETED)) { + if (unlikely(!(dqf & DSF_CANCEL_WAITER))) { + if (!os_atomic_cmpxchgvw2o(ds, dq_atomic_flags, + dqf, dqf | DSF_CANCEL_WAITER, &dqf, relaxed)) { + continue; + } + dqf |= DSF_CANCEL_WAITER; + } + _dispatch_wait_on_address(&ds->dq_atomic_flags, dqf, DLOCK_LOCK_NONE); + dqf = _dispatch_queue_atomic_flags(ds->_as_dq); } - return _dispatch_queue_class_probe(ds); } static void @@ -820,53 +1162,90 @@ _dispatch_source_merge_kevent(dispatch_source_t ds, const _dispatch_kevent_qos_s *ke) { _dispatch_object_debug(ds, "%s", __func__); - bool retained = false; + dispatch_wakeup_flags_t flags = 0; + dispatch_queue_flags_t dqf; + pthread_priority_t pp = 0; + + if (ds->ds_needs_rearm || (ke->flags & (EV_DELETE | EV_ONESHOT))) { + // once we modify the queue atomic flags below, it will allow concurrent + // threads running _dispatch_source_invoke2 to dispose of the source, + // so we can't safely borrow the reference we get from the knote udata + // anymore, and need our own + flags = DISPATCH_WAKEUP_CONSUME; + _dispatch_retain(ds); // rdar://20382435 + } + if ((ke->flags & EV_UDATA_SPECIFIC) && (ke->flags & EV_ONESHOT) && !(ke->flags & EV_DELETE)) { - _dispatch_debug("kevent-source[%p]: deferred delete oneshot kevent[%p]", - ds, (void*)ke->udata); - (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ONESHOT, relaxed); + dqf = _dispatch_queue_atomic_flags_set_and_clear(ds->_as_dq, + DSF_DEFERRED_DELETE, DSF_ARMED); + if (ke->flags & EV_VANISHED) { + _dispatch_bug_kevent_client("kevent", _evfiltstr(ke->filter), + "monitored resource vanished before the source " + "cancel handler was invoked", 0); + } + _dispatch_debug("kevent-source[%p]: %s kevent[%p]", ds, + (ke->flags & EV_VANISHED) ? "vanished" : + "deferred delete oneshot", (void*)ke->udata); } else if ((ke->flags & EV_DELETE) || (ke->flags & EV_ONESHOT)) { + dqf = _dispatch_queue_atomic_flags_set_and_clear(ds->_as_dq, + DSF_DELETED, DSF_ARMED); _dispatch_debug("kevent-source[%p]: delete kevent[%p]", ds, (void*)ke->udata); - retained = true; - _dispatch_retain(ds); - (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_DELETED, relaxed); if (ke->flags & EV_DELETE) goto done; + } else if (ds->ds_needs_rearm) { + dqf = _dispatch_queue_atomic_flags_clear(ds->_as_dq, DSF_ARMED); + _dispatch_debug("kevent-source[%p]: disarmed kevent[%p] ", + ds, (void*)ke->udata); + } else { + dqf = _dispatch_queue_atomic_flags(ds->_as_dq); } - if ((ds->ds_atomic_flags & DSF_CANCELED) || (ds->do_xref_cnt == -1)) { + + if (dqf & (DSF_CANCELED | DQF_RELEASED)) { goto done; // rdar://20204025 } - if (ds->ds_is_level) { +#if HAVE_MACH + if (ke->filter == EVFILT_MACHPORT && + dx_type(ds) == DISPATCH_MACH_CHANNEL_TYPE) { + DISPATCH_INTERNAL_CRASH(ke->flags,"Unexpected kevent for mach channel"); + } +#endif + + unsigned long data; + if ((ke->flags & EV_UDATA_SPECIFIC) && (ke->flags & EV_ONESHOT) && + (ke->flags & EV_VANISHED)) { + // if the resource behind the ident vanished, the event handler can't + // do anything useful anymore, so do not try to call it at all + // + // Note: if the kernel doesn't support EV_VANISHED we always get it + // back unchanged from the flags passed at EV_ADD (registration) time + // Since we never ask for both EV_ONESHOT and EV_VANISHED for sources, + // if we get both bits it was a real EV_VANISHED delivery + os_atomic_store2o(ds, ds_pending_data, 0, relaxed); +#if HAVE_MACH + } else if (ke->filter == EVFILT_MACHPORT) { + data = DISPATCH_MACH_RECV_MESSAGE; + os_atomic_store2o(ds, ds_pending_data, data, relaxed); +#endif + } else if (ds->ds_is_level) { // ke->data is signed and "negative available data" makes no sense // zero bytes happens when EV_EOF is set - // 10A268 does not fail this assert with EVFILT_READ and a 10 GB file dispatch_assert(ke->data >= 0l); - dispatch_atomic_store2o(ds, ds_pending_data, ~(unsigned long)ke->data, - relaxed); + data = ~(unsigned long)ke->data; + os_atomic_store2o(ds, ds_pending_data, data, relaxed); } else if (ds->ds_is_adder) { - (void)dispatch_atomic_add2o(ds, ds_pending_data, - (unsigned long)ke->data, relaxed); + data = (unsigned long)ke->data; + os_atomic_add2o(ds, ds_pending_data, data, relaxed); } else if (ke->fflags & ds->ds_pending_data_mask) { - (void)dispatch_atomic_or2o(ds, ds_pending_data, - ke->fflags & ds->ds_pending_data_mask, relaxed); + data = ke->fflags & ds->ds_pending_data_mask; + os_atomic_or2o(ds, ds_pending_data, data, relaxed); } + done: - // EV_DISPATCH and EV_ONESHOT sources are no longer armed after delivery - if (ds->ds_needs_rearm) { - if (!retained) { - retained = true; - _dispatch_retain(ds); // rdar://20382435 - } - (void)dispatch_atomic_and2o(ds, ds_atomic_flags, ~DSF_ARMED, relaxed); - _dispatch_debug("kevent-source[%p]: disarmed kevent[%p] ", - ds, (void*)ke->udata); - } - if (retained) { - _dispatch_queue_wakeup_and_release((dispatch_queue_t)ds); - } else { - _dispatch_queue_wakeup((dispatch_queue_t)ds); - } +#if DISPATCH_USE_KEVENT_QOS + pp = ((pthread_priority_t)ke->qos) & ~_PTHREAD_PRIORITY_FLAGS_MASK; +#endif + dx_wakeup(ds, pp, flags | DISPATCH_WAKEUP_FLUSH); } #pragma mark - @@ -915,9 +1294,9 @@ _dispatch_kevent_init() TAILQ_INSERT_TAIL(&_dispatch_sources[0], &_dispatch_kevent_data_add, dk_list); _dispatch_kevent_data_or.dk_kevent.udata = - (uintptr_t)&_dispatch_kevent_data_or; + (_dispatch_kevent_qos_udata_t)&_dispatch_kevent_data_or; _dispatch_kevent_data_add.dk_kevent.udata = - (uintptr_t)&_dispatch_kevent_data_add; + (_dispatch_kevent_qos_udata_t)&_dispatch_kevent_data_add; #endif // !DISPATCH_USE_EV_UDATA_SPECIFIC } @@ -931,6 +1310,7 @@ _dispatch_kevent_hash(uint64_t ident, short filter) MACH_PORT_INDEX(ident) : ident); #else value = ident; + (void)filter; #endif return DSL_HASH((uintptr_t)value); } @@ -961,7 +1341,8 @@ _dispatch_kevent_insert(dispatch_kevent_t dk) // Find existing kevents, and merge any new flags if necessary static bool -_dispatch_kevent_register(dispatch_kevent_t *dkp, uint32_t *flgp) +_dispatch_kevent_register(dispatch_kevent_t *dkp, pthread_priority_t pp, + uint32_t *flgp) { dispatch_kevent_t dk = NULL, ds_dkev = *dkp; uint32_t new_flags; @@ -981,6 +1362,21 @@ _dispatch_kevent_register(dispatch_kevent_t *dkp, uint32_t *flgp) do_resume = new_flags; } else { dk = ds_dkev; +#if DISPATCH_USE_KEVENT_WORKQUEUE + if (!_dispatch_kevent_workqueue_enabled) { + // do nothing + } else if (!(dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) { + dk->dk_kevent.qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; + } else { + pp &= (~_PTHREAD_PRIORITY_FLAGS_MASK | + _PTHREAD_PRIORITY_OVERCOMMIT_FLAG); + if (!pp) pp = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; + _dispatch_assert_is_valid_qos_class(pp); + dk->dk_kevent.qos = (_dispatch_kevent_priority_t)pp; + } +#else + (void)pp; +#endif _dispatch_kevent_insert(dk); new_flags = dk->dk_kevent.fflags; do_resume = true; @@ -999,6 +1395,10 @@ _dispatch_kevent_resume(dispatch_kevent_t dk, uint32_t new_flags, uint32_t del_flags) { long r; + bool oneshot; + if (dk->dk_kevent.flags & EV_DELETE) { + return 0; + } switch (dk->dk_kevent.filter) { case DISPATCH_EVFILT_TIMER: case DISPATCH_EVFILT_CUSTOM_ADD: @@ -1006,29 +1406,39 @@ _dispatch_kevent_resume(dispatch_kevent_t dk, uint32_t new_flags, // these types not registered with kevent return 0; #if HAVE_MACH - case EVFILT_MACHPORT: - return _dispatch_kevent_machport_resume(dk, new_flags, del_flags); case DISPATCH_EVFILT_MACH_NOTIFICATION: return _dispatch_kevent_mach_notify_resume(dk, new_flags, del_flags); +#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK + case EVFILT_MACHPORT: + if (!(dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) { + return _dispatch_kevent_machport_resume(dk, new_flags, del_flags); + } + // fall through #endif +#endif // HAVE_MACH default: - if (dk->dk_kevent.flags & EV_DELETE) { - return 0; - } - r = _dispatch_kq_update(&dk->dk_kevent); + // oneshot dk may be freed by the time we return from + // _dispatch_kq_immediate_update if the event was delivered (and then + // unregistered) concurrently. + oneshot = (dk->dk_kevent.flags & EV_ONESHOT); + r = _dispatch_kq_immediate_update(&dk->dk_kevent); if (r && (dk->dk_kevent.flags & EV_ADD) && (dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) { dk->dk_kevent.flags |= EV_DELETE; - dk->dk_kevent.flags &= ~(EV_ADD|EV_ENABLE); - } else if (dk->dk_kevent.flags & EV_DISPATCH) { - dk->dk_kevent.flags &= ~EV_ADD; + dk->dk_kevent.flags &= ~(EV_ADD|EV_ENABLE|EV_VANISHED); + } else if (!oneshot && (dk->dk_kevent.flags & EV_DISPATCH)) { + // we can safely skip doing this for ONESHOT events because + // the next kq update we will do is _dispatch_kevent_dispose() + // which also clears EV_ADD. + dk->dk_kevent.flags &= ~(EV_ADD|EV_VANISHED); } return r; } + (void)new_flags; (void)del_flags; } static long -_dispatch_kevent_dispose(dispatch_kevent_t dk, int options) +_dispatch_kevent_dispose(dispatch_kevent_t dk, unsigned int options) { long r = 0; switch (dk->dk_kevent.filter) { @@ -1041,56 +1451,63 @@ _dispatch_kevent_dispose(dispatch_kevent_t dk, int options) // these sources live on statically allocated lists } return r; + } + if (!(dk->dk_kevent.flags & EV_DELETE)) { + dk->dk_kevent.flags |= EV_DELETE; + dk->dk_kevent.flags &= ~(EV_ADD|EV_ENABLE|EV_VANISHED); + if (options & DKEV_DISPOSE_IMMEDIATE_DELETE) { + dk->dk_kevent.flags |= EV_ENABLE; + } + switch (dk->dk_kevent.filter) { #if HAVE_MACH - case EVFILT_MACHPORT: - _dispatch_kevent_machport_resume(dk, 0, dk->dk_kevent.fflags); - break; - case DISPATCH_EVFILT_MACH_NOTIFICATION: - _dispatch_kevent_mach_notify_resume(dk, 0, dk->dk_kevent.fflags); - break; -#endif - default: - if (~dk->dk_kevent.flags & EV_DELETE) { - dk->dk_kevent.flags |= EV_DELETE; - dk->dk_kevent.flags &= ~(EV_ADD|EV_ENABLE); - if (options & DKEV_DISPOSE_IMMEDIATE_DELETE) { - dk->dk_kevent.flags |= EV_ENABLE; - } - r = _dispatch_kq_update(&dk->dk_kevent); - if (r == ENOENT && (options & DKEV_DISPOSE_IGNORE_ENOENT)) { - r = 0; + case DISPATCH_EVFILT_MACH_NOTIFICATION: + r = _dispatch_kevent_mach_notify_resume(dk, 0,dk->dk_kevent.fflags); + break; +#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK + case EVFILT_MACHPORT: + if (!(dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) { + r = _dispatch_kevent_machport_resume(dk,0,dk->dk_kevent.fflags); + break; } + // fall through +#endif +#endif + default: if (options & DKEV_DISPOSE_IMMEDIATE_DELETE) { - dk->dk_kevent.flags &= ~EV_ENABLE; + _dispatch_kq_deferred_update(&dk->dk_kevent); + } else { + r = _dispatch_kq_immediate_update(&dk->dk_kevent); } + break; + } + if (options & DKEV_DISPOSE_IMMEDIATE_DELETE) { + dk->dk_kevent.flags &= ~EV_ENABLE; } - break; } - if ((r == EINPROGRESS || r == ENOENT) && - (dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) { - // deferred EV_DELETE or concurrent concurrent EV_DELETE delivery - dk->dk_kevent.flags &= ~EV_DELETE; - dk->dk_kevent.flags |= EV_ENABLE; - } else { - if ((dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) { -#if DISPATCH_DEBUG - // zero/trash dr linkage - dispatch_source_refs_t dr = TAILQ_FIRST(&dk->dk_sources); - TAILQ_REMOVE(&dk->dk_sources, dr, dr_list); + if (dk->dk_kevent.flags & EV_UDATA_SPECIFIC) { + bool deferred_delete = (r == EINPROGRESS); +#if DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS + if (r == ENOENT) deferred_delete = true; #endif - } else { - uintptr_t hash = _dispatch_kevent_hash(dk->dk_kevent.ident, - dk->dk_kevent.filter); - TAILQ_REMOVE(&_dispatch_sources[hash], dk, dk_list); + if (deferred_delete) { + // deferred EV_DELETE or concurrent concurrent EV_DELETE delivery + dk->dk_kevent.flags &= ~EV_DELETE; + dk->dk_kevent.flags |= EV_ENABLE; + return r; } - _dispatch_kevent_unguard(dk); - free(dk); + } else { + uintptr_t hash = _dispatch_kevent_hash(dk->dk_kevent.ident, + dk->dk_kevent.filter); + TAILQ_REMOVE(&_dispatch_sources[hash], dk, dk_list); } + _dispatch_kevent_unguard(dk); + free(dk); return r; } static long -_dispatch_kevent_unregister(dispatch_kevent_t dk, uint32_t flg, int options) +_dispatch_kevent_unregister(dispatch_kevent_t dk, uint32_t flg, + unsigned int options) { dispatch_source_refs_t dri; uint32_t del_flags, fflags = 0; @@ -1125,19 +1542,52 @@ _dispatch_kevent_proc_exit(_dispatch_kevent_qos_s *ke) _dispatch_kevent_qos_s fake; fake = *ke; fake.flags &= ~EV_ERROR; + fake.flags |= EV_ONESHOT; fake.fflags = NOTE_EXIT; fake.data = 0; - _dispatch_kevent_drain(&fake); + _dispatch_kevent_debug("synthetic NOTE_EXIT", ke); + _dispatch_kevent_merge(&fake); } DISPATCH_NOINLINE static void _dispatch_kevent_error(_dispatch_kevent_qos_s *ke) { - _dispatch_kevent_debug(ke, __func__); + _dispatch_kevent_qos_s *kev = NULL; + + if (ke->flags & EV_DELETE) { + if (ke->flags & EV_UDATA_SPECIFIC) { + if (ke->data == EINPROGRESS) { + // deferred EV_DELETE + return; + } +#if DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS + if (ke->data == ENOENT) { + // deferred EV_DELETE + return; + } +#endif + } + // for EV_DELETE if the update was deferred we may have reclaimed + // our dispatch_kevent_t, and it is unsafe to dereference it now. + } else if (ke->udata) { + kev = &((dispatch_kevent_t)ke->udata)->dk_kevent; + ke->flags |= kev->flags; + } + +#if HAVE_MACH + if (ke->filter == EVFILT_MACHPORT && ke->data == ENOTSUP && + (ke->flags & EV_ADD) && _dispatch_evfilt_machport_direct_enabled && + kev && (kev->fflags & MACH_RCV_MSG)) { + DISPATCH_INTERNAL_CRASH(ke->ident, + "Missing EVFILT_MACHPORT support for ports"); + } +#endif + if (ke->data) { // log the unexpected error _dispatch_bug_kevent_client("kevent", _evfiltstr(ke->filter), + !ke->udata ? NULL : ke->flags & EV_DELETE ? "delete" : ke->flags & EV_ADD ? "add" : ke->flags & EV_ENABLE ? "enable" : "monitor", @@ -1153,29 +1603,29 @@ _dispatch_kevent_drain(_dispatch_kevent_qos_s *ke) dispatch_once_f(&pred, NULL, _dispatch_kevent_debugger); #endif if (ke->filter == EVFILT_USER) { - _dispatch_kevent_mgr_debug(ke, __func__); + _dispatch_kevent_mgr_debug(ke); return; } if (slowpath(ke->flags & EV_ERROR)) { if (ke->filter == EVFILT_PROC && ke->data == ESRCH) { - ke->data = 0; // don't return error from caller - if (ke->flags & EV_DELETE) { - _dispatch_debug("kevent[0x%llx]: ignoring ESRCH from " - "EVFILT_PROC EV_DELETE", ke->udata); - return; - } _dispatch_debug("kevent[0x%llx]: ESRCH from EVFILT_PROC: " - "generating fake NOTE_EXIT", ke->udata); + "generating fake NOTE_EXIT", (unsigned long long)ke->udata); return _dispatch_kevent_proc_exit(ke); } + _dispatch_debug("kevent[0x%llx]: handling error", + (unsigned long long)ke->udata); return _dispatch_kevent_error(ke); } if (ke->filter == EVFILT_TIMER) { + _dispatch_debug("kevent[0x%llx]: handling timer", + (unsigned long long)ke->udata); return _dispatch_timers_kevent(ke); } #if HAVE_MACH if (ke->filter == EVFILT_MACHPORT) { - return _dispatch_kevent_mach_portset(ke); + _dispatch_debug("kevent[0x%llx]: handling mach port", + (unsigned long long)ke->udata); + return _dispatch_mach_kevent_merge(ke); } #endif return _dispatch_kevent_merge(ke); @@ -1185,13 +1635,9 @@ DISPATCH_NOINLINE static void _dispatch_kevent_merge(_dispatch_kevent_qos_s *ke) { - _dispatch_kevent_debug(ke, __func__); - dispatch_kevent_t dk; + dispatch_kevent_t dk = (void*)ke->udata; dispatch_source_refs_t dri, dr_next; - dk = (void*)ke->udata; - dispatch_assert(dk); - TAILQ_FOREACH_SAFE(dri, &dk->dk_sources, dr_list, dr_next) { _dispatch_source_merge_kevent(_dispatch_source_from_refs(dri), ke); } @@ -1302,7 +1748,7 @@ static inline uint64_t _dispatch_source_timer_now(uint64_t nows[], unsigned int tidx) { unsigned int tk = DISPATCH_TIMER_KIND(tidx); - if (nows && fastpath(nows[tk])) { + if (nows && fastpath(nows[tk] != 0)) { return nows[tk]; } uint64_t now; @@ -1353,7 +1799,7 @@ _dispatch_source_set_timer3(void *context) // older timer params ds->ds_pending_data = 0; // Re-arm in case we got disarmed because of pending set_timer suspension - (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED, release); + _dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_ARMED); _dispatch_debug("kevent-source[%p]: rearmed kevent[%p]", ds, ds->ds_dkev); dispatch_resume(ds); // Must happen after resume to avoid getting disarmed due to suspension @@ -1435,7 +1881,7 @@ _dispatch_source_set_timer(dispatch_source_t ds, dispatch_time_t start, { if (slowpath(!ds->ds_is_timer) || slowpath(ds_timer(ds->ds_refs).flags & DISPATCH_TIMER_INTERVAL)) { - DISPATCH_CLIENT_CRASH("Attempt to set timer on a non-timer source"); + DISPATCH_CLIENT_CRASH(ds, "Attempt to set timer on a non-timer source"); } struct dispatch_set_timer_params *params; @@ -1446,7 +1892,7 @@ _dispatch_source_set_timer(dispatch_source_t ds, dispatch_time_t start, // The use of suspend/resume requires the external retain/release dispatch_retain(ds); if (source_sync) { - return _dispatch_barrier_trysync_f((dispatch_queue_t)ds, params, + return _dispatch_barrier_trysync_or_async_f(ds->_as_dq, params, _dispatch_source_set_timer2); } else { return _dispatch_source_set_timer2(params); @@ -1525,8 +1971,13 @@ struct dispatch_timer_s _dispatch_timer[] = { #define DISPATCH_TIMER_COUNT \ ((sizeof(_dispatch_timer) / sizeof(_dispatch_timer[0]))) +#if __linux__ +#define DISPATCH_KEVENT_TIMER_UDATA(tidx) \ + (void*)&_dispatch_kevent_timer[tidx] +#else #define DISPATCH_KEVENT_TIMER_UDATA(tidx) \ (uintptr_t)&_dispatch_kevent_timer[tidx] +#endif #ifdef __LP64__ #define DISPATCH_KEVENT_TIMER_UDATA_INITIALIZER(tidx) \ .udata = DISPATCH_KEVENT_TIMER_UDATA(tidx) @@ -1562,21 +2013,29 @@ struct dispatch_kevent_s _dispatch_kevent_timer[] = { ((sizeof(_dispatch_kevent_timer) / sizeof(_dispatch_kevent_timer[0]))) #define DISPATCH_KEVENT_TIMEOUT_IDENT_MASK (~0ull << 8) -#define DISPATCH_KEVENT_TIMEOUT_INITIALIZER(qos, note) \ - [qos] = { \ - .ident = DISPATCH_KEVENT_TIMEOUT_IDENT_MASK|(qos), \ +#define DISPATCH_KEVENT_TIMEOUT_INITIALIZER(tidx, note) \ + [tidx] = { \ + .ident = DISPATCH_KEVENT_TIMEOUT_IDENT_MASK|(tidx), \ .filter = EVFILT_TIMER, \ .flags = EV_ONESHOT, \ .fflags = NOTE_ABSOLUTE|NOTE_NSECONDS|NOTE_LEEWAY|(note), \ } -#define DISPATCH_KEVENT_TIMEOUT_INIT(qos, note) \ - DISPATCH_KEVENT_TIMEOUT_INITIALIZER(DISPATCH_TIMER_QOS_##qos, note) +#define DISPATCH_KEVENT_TIMEOUT_INIT(kind, qos, note) \ + DISPATCH_KEVENT_TIMEOUT_INITIALIZER(DISPATCH_TIMER_INDEX( \ + DISPATCH_TIMER_KIND_##kind, DISPATCH_TIMER_QOS_##qos), note) _dispatch_kevent_qos_s _dispatch_kevent_timeout[] = { - DISPATCH_KEVENT_TIMEOUT_INIT(NORMAL, 0), - DISPATCH_KEVENT_TIMEOUT_INIT(CRITICAL, NOTE_CRITICAL), - DISPATCH_KEVENT_TIMEOUT_INIT(BACKGROUND, NOTE_BACKGROUND), + DISPATCH_KEVENT_TIMEOUT_INIT(WALL, NORMAL, NOTE_MACH_CONTINUOUS_TIME), + DISPATCH_KEVENT_TIMEOUT_INIT(WALL, CRITICAL, NOTE_MACH_CONTINUOUS_TIME | NOTE_CRITICAL), + DISPATCH_KEVENT_TIMEOUT_INIT(WALL, BACKGROUND, NOTE_MACH_CONTINUOUS_TIME | NOTE_BACKGROUND), + DISPATCH_KEVENT_TIMEOUT_INIT(MACH, NORMAL, 0), + DISPATCH_KEVENT_TIMEOUT_INIT(MACH, CRITICAL, NOTE_CRITICAL), + DISPATCH_KEVENT_TIMEOUT_INIT(MACH, BACKGROUND, NOTE_BACKGROUND), }; +#define DISPATCH_KEVENT_TIMEOUT_COUNT \ + ((sizeof(_dispatch_kevent_timeout) / sizeof(_dispatch_kevent_timeout[0]))) +static_assert(DISPATCH_KEVENT_TIMEOUT_COUNT == DISPATCH_TIMER_INDEX_COUNT - 1, + "should have a kevent for everything but disarm (ddt assumes this)"); #define DISPATCH_KEVENT_COALESCING_WINDOW_INIT(qos, ms) \ [DISPATCH_TIMER_QOS_##qos] = 2ull * (ms) * NSEC_PER_MSEC @@ -1622,11 +2081,11 @@ static const uint64_t _dispatch_kevent_coalescing_window[] = { dr_list); }) #define _dispatch_timers_check(dra, dta) ({ \ - unsigned int qosm = _dispatch_timers_qos_mask; \ + unsigned int timerm = _dispatch_timers_mask; \ bool update = false; \ unsigned int tidx; \ for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) { \ - if (!(qosm & 1 << DISPATCH_TIMER_QOS(tidx))){ \ + if (!(timerm & (1 << tidx))){ \ continue; \ } \ dispatch_timer_source_refs_t dr = (dispatch_timer_source_refs_t) \ @@ -1647,7 +2106,7 @@ static const uint64_t _dispatch_kevent_coalescing_window[] = { update; }) static bool _dispatch_timers_reconfigure, _dispatch_timer_expired; -static unsigned int _dispatch_timers_qos_mask; +static unsigned int _dispatch_timers_mask; static bool _dispatch_timers_force_max_leeway; static void @@ -1656,7 +2115,7 @@ _dispatch_timers_init(void) #ifndef __LP64__ unsigned int tidx; for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) { - _dispatch_kevent_timer[tidx].dk_kevent.udata = \ + _dispatch_kevent_timer[tidx].dk_kevent.udata = DISPATCH_KEVENT_TIMER_UDATA(tidx); } #endif // __LP64__ @@ -1678,7 +2137,7 @@ _dispatch_timers_unregister(dispatch_source_t ds, dispatch_kevent_t dk) _dispatch_timer, (dispatch_timer_source_refs_t)dr, dt_list); if (tidx != DISPATCH_TIMER_INDEX_DISARM) { _dispatch_timers_reconfigure = true; - _dispatch_timers_qos_mask |= 1 << DISPATCH_TIMER_QOS(tidx); + _dispatch_timers_mask |= 1 << tidx; } } @@ -1699,10 +2158,10 @@ _dispatch_timers_update(dispatch_source_t ds) } // Move timers that are disabled, suspended or have missed intervals to the // disarmed list, rearm after resume resp. source invoke will reenable them - if (!ds_timer(dr).target || DISPATCH_OBJECT_SUSPENDED(ds) || + if (!ds_timer(dr).target || DISPATCH_QUEUE_IS_SUSPENDED(ds) || ds->ds_pending_data) { tidx = DISPATCH_TIMER_INDEX_DISARM; - (void)dispatch_atomic_and2o(ds, ds_atomic_flags, ~DSF_ARMED, relaxed); + _dispatch_queue_atomic_flags_clear(ds->_as_dq, DSF_ARMED); _dispatch_debug("kevent-source[%p]: disarmed kevent[%p]", ds, ds->ds_dkev); } else { @@ -1714,7 +2173,7 @@ _dispatch_timers_update(dispatch_source_t ds) if (slowpath(!ds->ds_is_installed)) { ds->ds_is_installed = true; if (tidx != DISPATCH_TIMER_INDEX_DISARM) { - (void)dispatch_atomic_or2o(ds, ds_atomic_flags, DSF_ARMED, relaxed); + _dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_ARMED); _dispatch_debug("kevent-source[%p]: rearmed kevent[%p]", ds, ds->ds_dkev); } @@ -1726,7 +2185,7 @@ _dispatch_timers_update(dispatch_source_t ds) } if (tidx != DISPATCH_TIMER_INDEX_DISARM) { _dispatch_timers_reconfigure = true; - _dispatch_timers_qos_mask |= 1 << DISPATCH_TIMER_QOS(tidx); + _dispatch_timers_mask |= 1 << tidx; } if (dk != &_dispatch_kevent_timer[tidx]){ ds->ds_dkev = &_dispatch_kevent_timer[tidx]; @@ -1764,7 +2223,7 @@ _dispatch_timers_run2(uint64_t nows[], unsigned int tidx) } // Remove timers that are suspended or have missed intervals from the // list, rearm after resume resp. source invoke will reenable them - if (DISPATCH_OBJECT_SUSPENDED(ds) || ds->ds_pending_data) { + if (DISPATCH_QUEUE_IS_SUSPENDED(ds) || ds->ds_pending_data) { _dispatch_timers_update(ds); continue; } @@ -1784,10 +2243,13 @@ _dispatch_timers_run2(uint64_t nows[], unsigned int tidx) ds_timer(dr).last_fire = now; unsigned long data; - data = dispatch_atomic_add2o(ds, ds_pending_data, + data = os_atomic_add2o(ds, ds_pending_data, (unsigned long)missed, relaxed); _dispatch_trace_timer_fire(dr, data, (unsigned long)missed); - _dispatch_wakeup(ds); + dx_wakeup(ds, 0, DISPATCH_WAKEUP_FLUSH); + if (ds_timer(dr).flags & DISPATCH_TIMER_AFTER) { + _dispatch_source_kevent_unregister(ds); + } } } @@ -1805,7 +2267,7 @@ _dispatch_timers_run(uint64_t nows[]) static inline unsigned int _dispatch_timers_get_delay(uint64_t nows[], struct dispatch_timer_s timer[], - uint64_t *delay, uint64_t *leeway, int qos) + uint64_t *delay, uint64_t *leeway, int qos, int kind) { unsigned int tidx, ridx = DISPATCH_TIMER_COUNT; uint64_t tmp, delta = UINT64_MAX, dldelta = UINT64_MAX; @@ -1814,6 +2276,9 @@ _dispatch_timers_get_delay(uint64_t nows[], struct dispatch_timer_s timer[], if (qos >= 0 && qos != DISPATCH_TIMER_QOS(tidx)){ continue; } + if (kind >= 0 && kind != DISPATCH_TIMER_KIND(tidx)){ + continue; + } uint64_t target = timer[tidx].target; if (target == UINT64_MAX) { continue; @@ -1858,19 +2323,58 @@ _dispatch_timers_get_delay(uint64_t nows[], struct dispatch_timer_s timer[], return ridx; } + +#ifdef __linux__ +// in linux we map the _dispatch_kevent_qos_s to struct kevent instead +// of struct kevent64. We loose the kevent.ext[] members and the time +// out is based on relavite msec based time vs. absolute nsec based time. +// For now we make the adjustments right here until the solution +// to either extend libkqueue with a proper kevent64 API or removing kevent +// all together and move to a lower API (e.g. epoll or kernel_module. +// Also leeway is ignored. + +static void +_dispatch_kevent_timer_set_delay(_dispatch_kevent_qos_s *ke, uint64_t delay, + uint64_t leeway, uint64_t nows[]) +{ + // call to update nows[] + _dispatch_source_timer_now(nows, DISPATCH_TIMER_KIND_WALL); + // adjust nsec based delay to msec based and ignore leeway + delay /= 1000000L; + if ((int64_t)(delay) <= 0) { + delay = 1; // if value <= 0 the dispatch will stop + } + ke->data = (int64_t)delay; +} + +#else +static void +_dispatch_kevent_timer_set_delay(_dispatch_kevent_qos_s *ke, uint64_t delay, + uint64_t leeway, uint64_t nows[]) +{ + delay += _dispatch_source_timer_now(nows, DISPATCH_TIMER_KIND_WALL); + if (slowpath(_dispatch_timers_force_max_leeway)) { + ke->data = (int64_t)(delay + leeway); + ke->ext[1] = 0; + } else { + ke->data = (int64_t)delay; + ke->ext[1] = leeway; + } +} +#endif // __linux__ + static bool _dispatch_timers_program2(uint64_t nows[], _dispatch_kevent_qos_s *ke, - unsigned int qos) + unsigned int tidx) { - unsigned int tidx; bool poll; uint64_t delay, leeway; - tidx = _dispatch_timers_get_delay(nows, _dispatch_timer, &delay, &leeway, - (int)qos); + _dispatch_timers_get_delay(nows, _dispatch_timer, &delay, &leeway, + (int)DISPATCH_TIMER_QOS(tidx), (int)DISPATCH_TIMER_KIND(tidx)); poll = (delay == 0); if (poll || delay == UINT64_MAX) { - _dispatch_trace_next_timer_set(NULL, qos); + _dispatch_trace_next_timer_set(NULL, DISPATCH_TIMER_QOS(tidx)); if (!ke->data) { return poll; } @@ -1879,20 +2383,18 @@ _dispatch_timers_program2(uint64_t nows[], _dispatch_kevent_qos_s *ke, ke->flags &= ~(EV_ADD|EV_ENABLE); } else { _dispatch_trace_next_timer_set( - TAILQ_FIRST(&_dispatch_kevent_timer[tidx].dk_sources), qos); - _dispatch_trace_next_timer_program(delay, qos); - delay += _dispatch_source_timer_now(nows, DISPATCH_TIMER_KIND_WALL); - if (slowpath(_dispatch_timers_force_max_leeway)) { - ke->data = (int64_t)(delay + leeway); - ke->ext[1] = 0; - } else { - ke->data = (int64_t)delay; - ke->ext[1] = leeway; - } + TAILQ_FIRST(&_dispatch_kevent_timer[tidx].dk_sources), DISPATCH_TIMER_QOS(tidx)); + _dispatch_trace_next_timer_program(delay, DISPATCH_TIMER_QOS(tidx)); + _dispatch_kevent_timer_set_delay(ke, delay, leeway, nows); ke->flags |= EV_ADD|EV_ENABLE; ke->flags &= ~EV_DELETE; +#if DISPATCH_USE_KEVENT_WORKQUEUE + if (_dispatch_kevent_workqueue_enabled) { + ke->qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; + } +#endif } - _dispatch_kq_update(ke); + _dispatch_kq_deferred_update(ke); return poll; } @@ -1901,13 +2403,13 @@ static bool _dispatch_timers_program(uint64_t nows[]) { bool poll = false; - unsigned int qos, qosm = _dispatch_timers_qos_mask; - for (qos = 0; qos < DISPATCH_TIMER_QOS_COUNT; qos++) { - if (!(qosm & 1 << qos)){ + unsigned int tidx, timerm = _dispatch_timers_mask; + for (tidx = 0; tidx < DISPATCH_KEVENT_TIMEOUT_COUNT; tidx++) { + if (!(timerm & 1 << tidx)){ continue; } - poll |= _dispatch_timers_program2(nows, &_dispatch_kevent_timeout[qos], - qos); + poll |= _dispatch_timers_program2(nows, &_dispatch_kevent_timeout[tidx], + tidx); } return poll; } @@ -1921,28 +2423,34 @@ _dispatch_timers_configure(void) return _dispatch_timers_check(_dispatch_kevent_timer, _dispatch_timer); } +#if HAVE_MACH static void _dispatch_timers_calendar_change(void) { + unsigned int qos; + // calendar change may have gone past the wallclock deadline _dispatch_timer_expired = true; - _dispatch_timers_qos_mask = ~0u; + for (qos = 0; qos < DISPATCH_TIMER_QOS_COUNT; qos++) { + _dispatch_timers_mask |= + 1 << DISPATCH_TIMER_INDEX(DISPATCH_TIMER_KIND_WALL, qos); + } } +#endif static void _dispatch_timers_kevent(_dispatch_kevent_qos_s *ke) { - _dispatch_kevent_debug(ke, __func__); dispatch_assert(ke->data > 0); dispatch_assert((ke->ident & DISPATCH_KEVENT_TIMEOUT_IDENT_MASK) == DISPATCH_KEVENT_TIMEOUT_IDENT_MASK); - unsigned int qos = ke->ident & ~DISPATCH_KEVENT_TIMEOUT_IDENT_MASK; - dispatch_assert(qos < DISPATCH_TIMER_QOS_COUNT); - dispatch_assert(_dispatch_kevent_timeout[qos].data); - _dispatch_kevent_timeout[qos].data = 0; // kevent deleted via EV_ONESHOT + unsigned int tidx = ke->ident & ~DISPATCH_KEVENT_TIMEOUT_IDENT_MASK; + dispatch_assert(tidx < DISPATCH_KEVENT_TIMEOUT_COUNT); + dispatch_assert(_dispatch_kevent_timeout[tidx].data != 0); + _dispatch_kevent_timeout[tidx].data = 0; // kevent deleted via EV_ONESHOT _dispatch_timer_expired = true; - _dispatch_timers_qos_mask |= 1 << qos; - _dispatch_trace_next_timer_wake(qos); + _dispatch_timers_mask |= 1 << tidx; + _dispatch_trace_next_timer_wake(DISPATCH_TIMER_QOS(tidx)); } static inline bool @@ -1963,7 +2471,7 @@ _dispatch_mgr_timers(void) expired = _dispatch_timer_expired = _dispatch_timers_program(nows); expired = expired || _dispatch_mgr_q.dq_items_tail; } - _dispatch_timers_qos_mask = 0; + _dispatch_timers_mask = 0; } return expired; } @@ -1976,8 +2484,7 @@ typedef struct { } dispatch_timer_aggregate_refs_s; typedef struct dispatch_timer_aggregate_s { - DISPATCH_STRUCT_HEADER(queue); - DISPATCH_QUEUE_HEADER; + DISPATCH_QUEUE_HEADER(queue); TAILQ_ENTRY(dispatch_timer_aggregate_s) dta_list; dispatch_timer_aggregate_refs_s dta_kevent_timer[DISPATCH_KEVENT_TIMER_COUNT]; @@ -1986,7 +2493,7 @@ typedef struct dispatch_timer_aggregate_s { } dta_timer[DISPATCH_TIMER_COUNT]; struct dispatch_timer_s dta_timer_data[DISPATCH_TIMER_COUNT]; unsigned int dta_refcount; -} dispatch_timer_aggregate_s; +} DISPATCH_QUEUE_ALIGN dispatch_timer_aggregate_s; typedef TAILQ_HEAD(, dispatch_timer_aggregate_s) dispatch_timer_aggregates_s; static dispatch_timer_aggregates_s _dispatch_timer_aggregates = @@ -1998,10 +2505,10 @@ dispatch_timer_aggregate_create(void) unsigned int tidx; dispatch_timer_aggregate_t dta = _dispatch_alloc(DISPATCH_VTABLE(queue), sizeof(struct dispatch_timer_aggregate_s)); - _dispatch_queue_init((dispatch_queue_t)dta); + _dispatch_queue_init(dta->_as_dq, DQF_NONE, + DISPATCH_QUEUE_WIDTH_MAX, false); dta->do_targetq = _dispatch_get_root_queue( _DISPATCH_QOS_CLASS_USER_INITIATED, true); - dta->dq_width = DISPATCH_QUEUE_WIDTH_MAX; //FIXME: aggregates need custom vtable //dta->dq_label = "timer-aggregate"; for (tidx = 0; tidx < DISPATCH_KEVENT_TIMER_COUNT; tidx++) { @@ -2015,7 +2522,7 @@ dispatch_timer_aggregate_create(void) dta->dta_timer_data[tidx].deadline = UINT64_MAX; } return (dispatch_timer_aggregate_t)_dispatch_introspection_queue_create( - (dispatch_queue_t)dta); + dta->_as_dq); } typedef struct dispatch_timer_delay_s { @@ -2029,7 +2536,7 @@ _dispatch_timer_aggregate_get_delay(void *ctxt) dispatch_timer_delay_t dtd = ctxt; struct { uint64_t nows[DISPATCH_TIMER_KIND_COUNT]; } dtn = {}; _dispatch_timers_get_delay(dtn.nows, dtd->timer, &dtd->delay, &dtd->leeway, - -1); + -1, -1); } uint64_t @@ -2039,8 +2546,7 @@ dispatch_timer_aggregate_get_delay(dispatch_timer_aggregate_t dta, struct dispatch_timer_delay_s dtd = { .timer = dta->dta_timer_data, }; - dispatch_sync_f((dispatch_queue_t)dta, &dtd, - _dispatch_timer_aggregate_get_delay); + dispatch_sync_f(dta->_as_dq, &dtd, _dispatch_timer_aggregate_get_delay); if (leeway_ptr) { *leeway_ptr = dtd.leeway; } @@ -2072,7 +2578,7 @@ _dispatch_timer_aggregates_configure(void) } dtau = _dispatch_calloc(DISPATCH_TIMER_COUNT, sizeof(*dtau)); memcpy(dtau, dta->dta_timer, sizeof(dta->dta_timer)); - _dispatch_barrier_async_detached_f((dispatch_queue_t)dta, dtau, + _dispatch_barrier_async_detached_f(dta->_as_dq, dtau, _dispatch_timer_aggregate_update); } } @@ -2121,340 +2627,262 @@ _dispatch_timer_aggregates_unregister(dispatch_source_t ds, unsigned int tidx) } #pragma mark - -#pragma mark dispatch_select +#pragma mark dispatch_kqueue static int _dispatch_kq; -#if DISPATCH_USE_SELECT_FALLBACK +#if DISPATCH_DEBUG_QOS && DISPATCH_USE_KEVENT_WORKQUEUE +#define _dispatch_kevent_assert_valid_qos(ke) ({ \ + if (_dispatch_kevent_workqueue_enabled) { \ + const _dispatch_kevent_qos_s *_ke = (ke); \ + if (_ke->flags & (EV_ADD|EV_ENABLE)) { \ + _dispatch_assert_is_valid_qos_class(\ + (pthread_priority_t)_ke->qos); \ + dispatch_assert(_ke->qos); \ + } \ + } \ + }) +#else +#define _dispatch_kevent_assert_valid_qos(ke) ((void)ke) +#endif -static unsigned int _dispatch_select_workaround; -static fd_set _dispatch_rfds; -static fd_set _dispatch_wfds; -static uint64_t*_dispatch_rfd_ptrs; -static uint64_t*_dispatch_wfd_ptrs; -DISPATCH_NOINLINE -static bool -_dispatch_select_register(const _dispatch_kevent_qos_s *kev) +static void +_dispatch_kq_init(void *context DISPATCH_UNUSED) { - // Must execute on manager queue - DISPATCH_ASSERT_ON_MANAGER_QUEUE(); - - // If an EINVAL or ENOENT error occurred while adding/enabling a read or - // write kevent, assume it was due to a type of filedescriptor not - // supported by kqueue and fall back to select - switch (kev->filter) { - case EVFILT_READ: - if ((kev->data == EINVAL || kev->data == ENOENT) && - dispatch_assume(kev->ident < FD_SETSIZE)) { - FD_SET((int)kev->ident, &_dispatch_rfds); - if (slowpath(!_dispatch_rfd_ptrs)) { - _dispatch_rfd_ptrs = _dispatch_calloc(FD_SETSIZE, - sizeof(*_dispatch_rfd_ptrs)); - } - if (!_dispatch_rfd_ptrs[kev->ident]) { - _dispatch_rfd_ptrs[kev->ident] = kev->udata; - _dispatch_select_workaround++; - _dispatch_debug("select workaround used to read fd %d: 0x%lx", - (int)kev->ident, (long)kev->data); + _dispatch_fork_becomes_unsafe(); +#if DISPATCH_USE_KEVENT_WORKQUEUE + _dispatch_kevent_workqueue_init(); + if (_dispatch_kevent_workqueue_enabled) { + int r; + const _dispatch_kevent_qos_s kev[] = { + [0] = { + .ident = 1, + .filter = EVFILT_USER, + .flags = EV_ADD|EV_CLEAR, + .qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG, + }, + [1] = { + .ident = 1, + .filter = EVFILT_USER, + .fflags = NOTE_TRIGGER, + }, + }; + _dispatch_kq = -1; +retry: + r = kevent_qos(-1, kev, 2, NULL, 0, NULL, NULL, + KEVENT_FLAG_WORKQ|KEVENT_FLAG_IMMEDIATE); + if (slowpath(r == -1)) { + int err = errno; + switch (err) { + case EINTR: + goto retry; + default: + DISPATCH_CLIENT_CRASH(err, + "Failed to initalize workqueue kevent"); + break; } - return true; } - break; - case EVFILT_WRITE: - if ((kev->data == EINVAL || kev->data == ENOENT) && - dispatch_assume(kev->ident < FD_SETSIZE)) { - FD_SET((int)kev->ident, &_dispatch_wfds); - if (slowpath(!_dispatch_wfd_ptrs)) { - _dispatch_wfd_ptrs = _dispatch_calloc(FD_SETSIZE, - sizeof(*_dispatch_wfd_ptrs)); - } - if (!_dispatch_wfd_ptrs[kev->ident]) { - _dispatch_wfd_ptrs[kev->ident] = kev->udata; - _dispatch_select_workaround++; - _dispatch_debug("select workaround used to write fd %d: 0x%lx", - (int)kev->ident, (long)kev->data); - } - return true; + return; + } +#endif // DISPATCH_USE_KEVENT_WORKQUEUE +#if DISPATCH_USE_MGR_THREAD + static const _dispatch_kevent_qos_s kev = { + .ident = 1, + .filter = EVFILT_USER, + .flags = EV_ADD|EV_CLEAR, + }; + + _dispatch_fork_becomes_unsafe(); +#if DISPATCH_USE_GUARDED_FD + guardid_t guard = (uintptr_t)&kev; + _dispatch_kq = guarded_kqueue_np(&guard, GUARD_CLOSE | GUARD_DUP); +#else + _dispatch_kq = kqueue(); +#endif + if (_dispatch_kq == -1) { + int err = errno; + switch (err) { + case EMFILE: + DISPATCH_CLIENT_CRASH(err, "kqueue() failure: " + "process is out of file descriptors"); + break; + case ENFILE: + DISPATCH_CLIENT_CRASH(err, "kqueue() failure: " + "system is out of file descriptors"); + break; + case ENOMEM: + DISPATCH_CLIENT_CRASH(err, "kqueue() failure: " + "kernel is out of memory"); + break; + default: + DISPATCH_INTERNAL_CRASH(err, "kqueue() failure"); + break; } - break; } - return false; + (void)dispatch_assume_zero(kevent_qos(_dispatch_kq, &kev, 1, NULL, 0, NULL, + NULL, 0)); + _dispatch_queue_push(_dispatch_mgr_q.do_targetq, &_dispatch_mgr_q, 0); +#endif // DISPATCH_USE_MGR_THREAD } DISPATCH_NOINLINE -static bool -_dispatch_select_unregister(const _dispatch_kevent_qos_s *kev) +static long +_dispatch_kq_update(const _dispatch_kevent_qos_s *ke, int n) { - // Must execute on manager queue - DISPATCH_ASSERT_ON_MANAGER_QUEUE(); + int i, r; + _dispatch_kevent_qos_s kev_error[n]; + static dispatch_once_t pred; + dispatch_once_f(&pred, NULL, _dispatch_kq_init); - switch (kev->filter) { - case EVFILT_READ: - if (_dispatch_rfd_ptrs && kev->ident < FD_SETSIZE && - _dispatch_rfd_ptrs[kev->ident]) { - FD_CLR((int)kev->ident, &_dispatch_rfds); - _dispatch_rfd_ptrs[kev->ident] = 0; - _dispatch_select_workaround--; - return true; - } - break; - case EVFILT_WRITE: - if (_dispatch_wfd_ptrs && kev->ident < FD_SETSIZE && - _dispatch_wfd_ptrs[kev->ident]) { - FD_CLR((int)kev->ident, &_dispatch_wfds); - _dispatch_wfd_ptrs[kev->ident] = 0; - _dispatch_select_workaround--; - return true; + for (i = 0; i < n; i++) { + if (ke[i].filter != EVFILT_USER || DISPATCH_MGR_QUEUE_DEBUG) { + _dispatch_kevent_debug_n("updating", ke + i, i, n); } - break; } - return false; -} -DISPATCH_NOINLINE -static bool -_dispatch_mgr_select(bool poll) -{ - static const struct timeval timeout_immediately = { 0, 0 }; - fd_set tmp_rfds, tmp_wfds; - int err, i, r; - bool kevent_avail = false; - - FD_COPY(&_dispatch_rfds, &tmp_rfds); - FD_COPY(&_dispatch_wfds, &tmp_wfds); - - r = select(FD_SETSIZE, &tmp_rfds, &tmp_wfds, NULL, - poll ? (struct timeval*)&timeout_immediately : NULL); - if (slowpath(r == -1)) { - err = errno; - if (err != EBADF) { - if (err != EINTR) { - (void)dispatch_assume_zero(err); - } - return false; - } - for (i = 0; i < FD_SETSIZE; i++) { - if (i == _dispatch_kq) { - continue; - } - if (!FD_ISSET(i, &_dispatch_rfds) && !FD_ISSET(i, &_dispatch_wfds)){ - continue; - } - r = dup(i); - if (dispatch_assume(r != -1)) { - close(r); - } else { - if (_dispatch_rfd_ptrs && _dispatch_rfd_ptrs[i]) { - FD_CLR(i, &_dispatch_rfds); - _dispatch_rfd_ptrs[i] = 0; - _dispatch_select_workaround--; - } - if (_dispatch_wfd_ptrs && _dispatch_wfd_ptrs[i]) { - FD_CLR(i, &_dispatch_wfds); - _dispatch_wfd_ptrs[i] = 0; - _dispatch_select_workaround--; - } - } - } - return false; + unsigned int flags = KEVENT_FLAG_ERROR_EVENTS; +#if DISPATCH_USE_KEVENT_WORKQUEUE + if (_dispatch_kevent_workqueue_enabled) { + flags |= KEVENT_FLAG_WORKQ; } - if (r > 0) { - for (i = 0; i < FD_SETSIZE; i++) { - if (FD_ISSET(i, &tmp_rfds)) { - if (i == _dispatch_kq) { - kevent_avail = true; - continue; - } - FD_CLR(i, &_dispatch_rfds); // emulate EV_DISPATCH - _dispatch_kevent_qos_s kev = { - .ident = (uint64_t)i, - .filter = EVFILT_READ, - .flags = EV_ADD|EV_ENABLE|EV_DISPATCH, - .data = 1, - .udata = _dispatch_rfd_ptrs[i], - }; - _dispatch_kevent_drain(&kev); - } - if (FD_ISSET(i, &tmp_wfds)) { - FD_CLR(i, &_dispatch_wfds); // emulate EV_DISPATCH - _dispatch_kevent_qos_s kev = { - .ident = (uint64_t)i, - .filter = EVFILT_WRITE, - .flags = EV_ADD|EV_ENABLE|EV_DISPATCH, - .data = 1, - .udata = _dispatch_wfd_ptrs[i], - }; - _dispatch_kevent_drain(&kev); - } - } - } - return kevent_avail; -} - -#endif // DISPATCH_USE_SELECT_FALLBACK - -#pragma mark - -#pragma mark dispatch_kqueue - -static void -_dispatch_kq_init(void *context DISPATCH_UNUSED) -{ - static const _dispatch_kevent_qos_s kev = { - .ident = 1, - .filter = EVFILT_USER, - .flags = EV_ADD|EV_CLEAR, - }; - - _dispatch_safe_fork = false; -#if DISPATCH_USE_GUARDED_FD - guardid_t guard = (uintptr_t)&kev; - _dispatch_kq = guarded_kqueue_np(&guard, GUARD_CLOSE | GUARD_DUP); -#else - _dispatch_kq = kqueue(); #endif - if (_dispatch_kq == -1) { + +retry: + r = kevent_qos(_dispatch_kq, ke, n, kev_error, n, NULL, NULL, flags); + if (slowpath(r == -1)) { int err = errno; switch (err) { - case EMFILE: - DISPATCH_CLIENT_CRASH("kqueue() failure: " - "process is out of file descriptors"); - break; - case ENFILE: - DISPATCH_CLIENT_CRASH("kqueue() failure: " - "system is out of file descriptors"); - break; - case ENOMEM: - DISPATCH_CLIENT_CRASH("kqueue() failure: " - "kernel is out of memory"); + case EINTR: + goto retry; + case EBADF: + DISPATCH_CLIENT_CRASH(err, "Do not close random Unix descriptors"); break; default: (void)dispatch_assume_zero(err); - DISPATCH_CRASH("kqueue() failure"); break; } + return err; } -#if DISPATCH_USE_SELECT_FALLBACK - else if (dispatch_assume(_dispatch_kq < FD_SETSIZE)) { - // in case we fall back to select() - FD_SET(_dispatch_kq, &_dispatch_rfds); + for (i = 0, n = r; i < n; i++) { + if (kev_error[i].flags & EV_ERROR) { + _dispatch_kevent_debug("returned error", &kev_error[i]); + _dispatch_kevent_drain(&kev_error[i]); + r = (int)kev_error[i].data; + } else { + _dispatch_kevent_mgr_debug(&kev_error[i]); + r = 0; + } } -#endif // DISPATCH_USE_SELECT_FALLBACK - - (void)dispatch_assume_zero(kevent_qos(_dispatch_kq, &kev, 1, NULL, 0, NULL, - NULL, 0)); - _dispatch_queue_push(_dispatch_mgr_q.do_targetq, &_dispatch_mgr_q, 0); + return r; } -static int -_dispatch_get_kq(void) +DISPATCH_ALWAYS_INLINE +static void +_dispatch_kq_update_all(const _dispatch_kevent_qos_s *kev, int n) { - static dispatch_once_t pred; + (void)_dispatch_kq_update(kev, n); +} - dispatch_once_f(&pred, NULL, _dispatch_kq_init); +DISPATCH_ALWAYS_INLINE +static long +_dispatch_kq_update_one(const _dispatch_kevent_qos_s *kev) +{ + return _dispatch_kq_update(kev, 1); +} - return _dispatch_kq; +static inline bool +_dispatch_kevent_maps_to_same_knote(const _dispatch_kevent_qos_s *e1, + const _dispatch_kevent_qos_s *e2) +{ + return e1->filter == e2->filter && + e1->ident == e2->ident && + e1->udata == e2->udata; } -DISPATCH_NOINLINE -static long -_dispatch_kq_update(const _dispatch_kevent_qos_s *kev) +static inline int +_dispatch_deferred_event_find_slot(dispatch_deferred_items_t ddi, + const _dispatch_kevent_qos_s *ke) { - int r; - _dispatch_kevent_qos_s kev_error; + _dispatch_kevent_qos_s *events = ddi->ddi_eventlist; + int i; -#if DISPATCH_USE_SELECT_FALLBACK - if (slowpath(_dispatch_select_workaround) && (kev->flags & EV_DELETE)) { - if (_dispatch_select_unregister(kev)) { - return 0; - } - } -#endif // DISPATCH_USE_SELECT_FALLBACK - if (kev->filter != EVFILT_USER || DISPATCH_MGR_QUEUE_DEBUG) { - _dispatch_kevent_debug(kev, __func__); - } -retry: - r = kevent_qos(_dispatch_get_kq(), kev, 1, &kev_error, - 1, NULL, NULL, KEVENT_FLAG_ERROR_EVENTS); - if (slowpath(r == -1)) { - int err = errno; - switch (err) { - case EINTR: - goto retry; - case EBADF: - DISPATCH_CLIENT_CRASH("Do not close random Unix descriptors"); - break; - default: - (void)dispatch_assume_zero(err); + for (i = 0; i < ddi->ddi_nevents; i++) { + if (_dispatch_kevent_maps_to_same_knote(&events[i], ke)) { break; } - return err; - } - if (r == 0) { - return 0; - } - if (kev_error.flags & EV_ERROR && kev_error.data) { - _dispatch_kevent_debug(&kev_error, __func__); } - r = (int)kev_error.data; - switch (r) { - case 0: - _dispatch_kevent_mgr_debug(&kev_error, __func__); - break; - case EINPROGRESS: - // deferred EV_DELETE - break; - case ENOENT: - if ((kev->flags & EV_DELETE) && (kev->flags & EV_UDATA_SPECIFIC)) { - // potential concurrent EV_DELETE delivery - break; + return i; +} + +static void +_dispatch_kq_deferred_update(const _dispatch_kevent_qos_s *ke) +{ + dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); + int slot; + + _dispatch_kevent_assert_valid_qos(ke); + if (ddi) { + if (unlikely(ddi->ddi_nevents == ddi->ddi_maxevents)) { + _dispatch_deferred_items_set(NULL); + _dispatch_kq_update_all(ddi->ddi_eventlist, ddi->ddi_nevents); + ddi->ddi_nevents = 0; + _dispatch_deferred_items_set(ddi); } - // fall through - case EINVAL: - if ((kev->flags & (EV_ADD|EV_ENABLE)) && !(kev->flags & EV_DELETE)) { -#if DISPATCH_USE_SELECT_FALLBACK - if (_dispatch_select_register(&kev_error)) { - r = 0; - break; + if (ke->filter != EVFILT_USER || DISPATCH_MGR_QUEUE_DEBUG) { + _dispatch_kevent_debug("deferred", ke); + } + bool needs_enable = false; + slot = _dispatch_deferred_event_find_slot(ddi, ke); + if (slot == ddi->ddi_nevents) { + ddi->ddi_nevents++; + } else if (ke->flags & EV_DELETE) { + // when deleting and an enable is pending, + // we must merge EV_ENABLE to do an immediate deletion + needs_enable = (ddi->ddi_eventlist[slot].flags & EV_ENABLE); + } + ddi->ddi_eventlist[slot] = *ke; + if (needs_enable) { + ddi->ddi_eventlist[slot].flags |= EV_ENABLE; + } + } else { + _dispatch_kq_update_one(ke); + } +} + +static long +_dispatch_kq_immediate_update(_dispatch_kevent_qos_s *ke) +{ + dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); + int slot, last; + + _dispatch_kevent_assert_valid_qos(ke); + if (ddi) { + _dispatch_kevent_qos_s *events = ddi->ddi_eventlist; + slot = _dispatch_deferred_event_find_slot(ddi, ke); + if (slot < ddi->ddi_nevents) { + // when deleting and an enable is pending, + // we must merge EV_ENABLE to do an immediate deletion + if ((ke->flags & EV_DELETE) && (events[slot].flags & EV_ENABLE)) { + ke->flags |= EV_ENABLE; } -#elif DISPATCH_DEBUG - if (kev->filter == EVFILT_READ || kev->filter == EVFILT_WRITE) { - DISPATCH_CRASH("Unsupported fd for EVFILT_READ or EVFILT_WRITE " - "kevent"); + last = --ddi->ddi_nevents; + if (slot != last) { + events[slot] = events[last]; } -#endif // DISPATCH_USE_SELECT_FALLBACK } - // fall through - case EBADF: - case EPERM: - default: - kev_error.flags |= kev->flags; - _dispatch_kevent_drain(&kev_error); - r = (int)kev_error.data; - break; } - return r; + return _dispatch_kq_update_one(ke); } #pragma mark - #pragma mark dispatch_mgr -static _dispatch_kevent_qos_s *_dispatch_kevent_enable; - -static void inline -_dispatch_mgr_kevent_reenable(_dispatch_kevent_qos_s *ke) -{ - dispatch_assert(!_dispatch_kevent_enable || _dispatch_kevent_enable == ke); - _dispatch_kevent_enable = ke; -} - -unsigned long -_dispatch_mgr_wakeup(dispatch_queue_t dq DISPATCH_UNUSED) +DISPATCH_NOINLINE +static void +_dispatch_mgr_queue_poke(dispatch_queue_t dq DISPATCH_UNUSED, + pthread_priority_t pp DISPATCH_UNUSED) { - if (_dispatch_queue_get_current() == &_dispatch_mgr_q) { - return false; - } - static const _dispatch_kevent_qos_s kev = { .ident = 1, .filter = EVFILT_USER, @@ -2464,126 +2892,345 @@ _dispatch_mgr_wakeup(dispatch_queue_t dq DISPATCH_UNUSED) #if DISPATCH_DEBUG && DISPATCH_MGR_QUEUE_DEBUG _dispatch_debug("waking up the dispatch manager queue: %p", dq); #endif + _dispatch_kq_deferred_update(&kev); +} - _dispatch_kq_update(&kev); +void +_dispatch_mgr_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, + dispatch_wakeup_flags_t flags) +{ + if (flags & DISPATCH_WAKEUP_FLUSH) { + os_atomic_or2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, release); + } - return false; + if (_dispatch_queue_get_current() == &_dispatch_mgr_q) { + return; + } + + if (!_dispatch_queue_class_probe(&_dispatch_mgr_q)) { + return; + } + + _dispatch_mgr_queue_poke(dq, pp); } DISPATCH_NOINLINE static void -_dispatch_mgr_init(void) +_dispatch_event_init(void) { - (void)dispatch_atomic_inc2o(&_dispatch_mgr_q, dq_running, relaxed); - _dispatch_thread_setspecific(dispatch_queue_key, &_dispatch_mgr_q); - _dispatch_queue_set_bound_thread(&_dispatch_mgr_q); - _dispatch_mgr_priority_init(); _dispatch_kevent_init(); _dispatch_timers_init(); +#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK _dispatch_mach_recv_msg_buf_init(); - _dispatch_memorystatus_init(); +#endif + _dispatch_memorypressure_init(); + _voucher_activity_debug_channel_init(); +} + +#if DISPATCH_USE_MGR_THREAD +DISPATCH_NOINLINE +static void +_dispatch_mgr_init(void) +{ + uint64_t owned = DISPATCH_QUEUE_SERIAL_DRAIN_OWNED; + _dispatch_queue_set_current(&_dispatch_mgr_q); + if (_dispatch_queue_drain_try_lock(&_dispatch_mgr_q, + DISPATCH_INVOKE_STEALING, NULL) != owned) { + DISPATCH_INTERNAL_CRASH(0, "Locking the manager should not fail"); + } + _dispatch_mgr_priority_init(); + _dispatch_event_init(); +} + +DISPATCH_NOINLINE +static bool +_dispatch_mgr_wait_for_event(dispatch_deferred_items_t ddi, bool poll) +{ + int r; + dispatch_assert((size_t)ddi->ddi_maxevents < countof(ddi->ddi_eventlist)); + +retry: + r = kevent_qos(_dispatch_kq, ddi->ddi_eventlist, ddi->ddi_nevents, + ddi->ddi_eventlist + ddi->ddi_maxevents, 1, NULL, NULL, + poll ? KEVENT_FLAG_IMMEDIATE : KEVENT_FLAG_NONE); + if (slowpath(r == -1)) { + int err = errno; + switch (err) { + case EINTR: + goto retry; + case EBADF: + DISPATCH_CLIENT_CRASH(err, "Do not close random Unix descriptors"); + break; + default: + (void)dispatch_assume_zero(err); + break; + } + } + ddi->ddi_nevents = 0; + return r > 0; } DISPATCH_NOINLINE DISPATCH_NORETURN static void _dispatch_mgr_invoke(void) { - _dispatch_kevent_qos_s kev; + dispatch_deferred_items_s ddi; bool poll; - int r; + + ddi.ddi_magic = DISPATCH_DEFERRED_ITEMS_MAGIC; + ddi.ddi_stashed_pp = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; + ddi.ddi_nevents = 0; + ddi.ddi_maxevents = 1; + + _dispatch_deferred_items_set(&ddi); for (;;) { _dispatch_mgr_queue_drain(); poll = _dispatch_mgr_timers(); -#if DISPATCH_USE_SELECT_FALLBACK - if (slowpath(_dispatch_select_workaround)) { - poll = _dispatch_mgr_select(poll); - if (!poll) continue; - } -#endif // DISPATCH_USE_SELECT_FALLBACK poll = poll || _dispatch_queue_class_probe(&_dispatch_mgr_q); - r = kevent_qos(_dispatch_kq, _dispatch_kevent_enable, - _dispatch_kevent_enable ? 1 : 0, &kev, 1, NULL, NULL, - poll ? KEVENT_FLAG_IMMEDIATE : KEVENT_FLAG_NONE); - _dispatch_kevent_enable = NULL; - if (slowpath(r == -1)) { - int err = errno; - switch (err) { - case EINTR: - break; - case EBADF: - DISPATCH_CLIENT_CRASH("Do not close random Unix descriptors"); - break; - default: - (void)dispatch_assume_zero(err); - break; - } - } else if (r) { - _dispatch_kevent_drain(&kev); + if (_dispatch_mgr_wait_for_event(&ddi, poll)) { + _dispatch_kevent_qos_s *ke = ddi.ddi_eventlist + ddi.ddi_maxevents; + _dispatch_kevent_debug("received", ke); + _dispatch_kevent_drain(ke); } } } +#endif // DISPATCH_USE_MGR_THREAD DISPATCH_NORETURN void _dispatch_mgr_thread(dispatch_queue_t dq DISPATCH_UNUSED, - dispatch_object_t dou DISPATCH_UNUSED, dispatch_invoke_flags_t flags DISPATCH_UNUSED) { +#if DISPATCH_USE_KEVENT_WORKQUEUE + if (_dispatch_kevent_workqueue_enabled) { + DISPATCH_INTERNAL_CRASH(0, "Manager queue invoked with " + "kevent workqueue enabled"); + } +#endif +#if DISPATCH_USE_MGR_THREAD _dispatch_mgr_init(); // never returns, so burn bridges behind us & clear stack 2k ahead _dispatch_clear_stack(2048); _dispatch_mgr_invoke(); +#endif } -#pragma mark - -#pragma mark dispatch_memorystatus +#if DISPATCH_USE_KEVENT_WORKQUEUE + +#define DISPATCH_KEVENT_WORKER_IS_NOT_MANAGER ((pthread_priority_t)(~0ul)) + +DISPATCH_ALWAYS_INLINE +static inline pthread_priority_t +_dispatch_kevent_worker_thread_init(dispatch_deferred_items_t ddi) +{ + uint64_t owned = DISPATCH_QUEUE_SERIAL_DRAIN_OWNED; + + ddi->ddi_magic = DISPATCH_DEFERRED_ITEMS_MAGIC; + ddi->ddi_nevents = 0; + ddi->ddi_maxevents = countof(ddi->ddi_eventlist); + ddi->ddi_stashed_pp = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; + + pthread_priority_t pp = _dispatch_get_priority(); + if (!(pp & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG)) { + // If this thread does not have the event manager flag set, don't setup + // as the dispatch manager and let the caller know to only process + // the delivered events. + // + // Also add the NEEDS_UNBIND flag so that + // _dispatch_priority_compute_update knows it has to unbind + pp &= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG | ~_PTHREAD_PRIORITY_FLAGS_MASK; + pp |= _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG; + _dispatch_thread_setspecific(dispatch_priority_key, + (void *)(uintptr_t)pp); + ddi->ddi_stashed_pp = 0; + return DISPATCH_KEVENT_WORKER_IS_NOT_MANAGER; + } + + if ((pp & _PTHREAD_PRIORITY_SCHED_PRI_FLAG) || + !(pp & ~_PTHREAD_PRIORITY_FLAGS_MASK)) { + // When the phtread kext is delivering kevents to us, and pthread + // root queues are in use, then the pthread priority TSD is set + // to a sched pri with the _PTHREAD_PRIORITY_SCHED_PRI_FLAG bit set. + // + // Given that this isn't a valid QoS we need to fixup the TSD, + // and the best option is to clear the qos/priority bits which tells + // us to not do any QoS related calls on this thread. + // + // However, in that case the manager thread is opted out of QoS, + // as far as pthread is concerned, and can't be turned into + // something else, so we can't stash. + pp &= (pthread_priority_t)_PTHREAD_PRIORITY_FLAGS_MASK; + } + // Managers always park without mutating to a regular worker thread, and + // hence never need to unbind from userland, and when draining a manager, + // the NEEDS_UNBIND flag would cause the mutation to happen. + // So we need to strip this flag + pp &= ~(pthread_priority_t)_PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG; + _dispatch_thread_setspecific(dispatch_priority_key, (void *)(uintptr_t)pp); + + // ensure kevents registered from this thread are registered at manager QoS + pthread_priority_t old_dp = _dispatch_set_defaultpriority( + (pthread_priority_t)_PTHREAD_PRIORITY_EVENT_MANAGER_FLAG, NULL); + _dispatch_queue_set_current(&_dispatch_mgr_q); + if (_dispatch_queue_drain_try_lock(&_dispatch_mgr_q, + DISPATCH_INVOKE_STEALING, NULL) != owned) { + DISPATCH_INTERNAL_CRASH(0, "Locking the manager should not fail"); + } + static int event_thread_init; + if (!event_thread_init) { + event_thread_init = 1; + _dispatch_event_init(); + } + return old_dp; +} + +DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT +static inline bool +_dispatch_kevent_worker_thread_reset(pthread_priority_t old_dp) +{ + dispatch_queue_t dq = &_dispatch_mgr_q; + uint64_t orig_dq_state; + + _dispatch_queue_drain_unlock(dq, DISPATCH_QUEUE_SERIAL_DRAIN_OWNED, + &orig_dq_state); + _dispatch_reset_defaultpriority(old_dp); + _dispatch_queue_set_current(NULL); + return _dq_state_is_dirty(orig_dq_state); +} + +DISPATCH_NOINLINE +void +_dispatch_kevent_worker_thread(_dispatch_kevent_qos_s **events, int *nevents) +{ + _dispatch_introspection_thread_add(); + + if (!events && !nevents) { + // events for worker thread request have already been delivered earlier + return; + } + + _dispatch_kevent_qos_s *ke = *events; + int n = *nevents; + if (!dispatch_assume(n) || !dispatch_assume(*events)) return; -#if DISPATCH_USE_MEMORYSTATUS_SOURCE -#define DISPATCH_MEMORYSTATUS_SOURCE_TYPE DISPATCH_SOURCE_TYPE_MEMORYSTATUS -#define DISPATCH_MEMORYSTATUS_SOURCE_MASK ( \ - DISPATCH_MEMORYSTATUS_PRESSURE_NORMAL | \ - DISPATCH_MEMORYSTATUS_PRESSURE_WARN) + dispatch_deferred_items_s ddi; + pthread_priority_t old_dp = _dispatch_kevent_worker_thread_init(&ddi); + + _dispatch_deferred_items_set(&ddi); + for (int i = 0; i < n; i++) { + _dispatch_kevent_debug("received", ke); + _dispatch_kevent_drain(ke++); + } + + if (old_dp != DISPATCH_KEVENT_WORKER_IS_NOT_MANAGER) { + _dispatch_mgr_queue_drain(); + bool poll = _dispatch_mgr_timers(); + if (_dispatch_kevent_worker_thread_reset(old_dp)) { + poll = true; + } + if (poll) _dispatch_mgr_queue_poke(&_dispatch_mgr_q, 0); + } + _dispatch_deferred_items_set(NULL); + + if (ddi.ddi_stashed_pp & _PTHREAD_PRIORITY_PRIORITY_MASK) { + *nevents = 0; + if (ddi.ddi_nevents) { + _dispatch_kq_update_all(ddi.ddi_eventlist, ddi.ddi_nevents); + } + ddi.ddi_stashed_pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; + return _dispatch_root_queue_drain_deferred_item(ddi.ddi_stashed_dq, + ddi.ddi_stashed_dou, ddi.ddi_stashed_pp); +#ifndef WORKQ_KEVENT_EVENT_BUFFER_LEN + } else if (ddi.ddi_nevents > *nevents) { + *nevents = 0; + _dispatch_kq_update_all(ddi.ddi_eventlist, ddi.ddi_nevents); +#endif + } else { + *nevents = ddi.ddi_nevents; + dispatch_static_assert(__builtin_types_compatible_p(typeof(**events), + typeof(*ddi.ddi_eventlist))); + memcpy(*events, ddi.ddi_eventlist, + (size_t)ddi.ddi_nevents * sizeof(*ddi.ddi_eventlist)); + } +} +#endif // DISPATCH_USE_KEVENT_WORKQUEUE + +#pragma mark - +#pragma mark dispatch_memorypressure + +#if DISPATCH_USE_MEMORYPRESSURE_SOURCE +#define DISPATCH_MEMORYPRESSURE_SOURCE_TYPE DISPATCH_SOURCE_TYPE_MEMORYPRESSURE +#define DISPATCH_MEMORYPRESSURE_SOURCE_MASK ( \ + DISPATCH_MEMORYPRESSURE_NORMAL | \ + DISPATCH_MEMORYPRESSURE_WARN | \ + DISPATCH_MEMORYPRESSURE_CRITICAL | \ + DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN | \ + DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL) +#define DISPATCH_MEMORYPRESSURE_MALLOC_MASK ( \ + DISPATCH_MEMORYPRESSURE_WARN | \ + DISPATCH_MEMORYPRESSURE_CRITICAL | \ + DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN | \ + DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL) #elif DISPATCH_USE_VM_PRESSURE_SOURCE -#define DISPATCH_MEMORYSTATUS_SOURCE_TYPE DISPATCH_SOURCE_TYPE_VM -#define DISPATCH_MEMORYSTATUS_SOURCE_MASK DISPATCH_VM_PRESSURE +#define DISPATCH_MEMORYPRESSURE_SOURCE_TYPE DISPATCH_SOURCE_TYPE_VM +#define DISPATCH_MEMORYPRESSURE_SOURCE_MASK DISPATCH_VM_PRESSURE #endif -#if DISPATCH_USE_MEMORYSTATUS_SOURCE || DISPATCH_USE_VM_PRESSURE_SOURCE -static dispatch_source_t _dispatch_memorystatus_source; +#if DISPATCH_USE_MEMORYPRESSURE_SOURCE || DISPATCH_USE_VM_PRESSURE_SOURCE +static dispatch_source_t _dispatch_memorypressure_source; static void -_dispatch_memorystatus_handler(void *context DISPATCH_UNUSED) +_dispatch_memorypressure_handler(void *context DISPATCH_UNUSED) { -#if DISPATCH_USE_MEMORYSTATUS_SOURCE - unsigned long memorystatus; - memorystatus = dispatch_source_get_data(_dispatch_memorystatus_source); - if (memorystatus & DISPATCH_MEMORYSTATUS_PRESSURE_NORMAL) { +#if DISPATCH_USE_MEMORYPRESSURE_SOURCE + unsigned long memorypressure; + memorypressure = dispatch_source_get_data(_dispatch_memorypressure_source); + + if (memorypressure & DISPATCH_MEMORYPRESSURE_NORMAL) { + _dispatch_memory_warn = false; _dispatch_continuation_cache_limit = DISPATCH_CONTINUATION_CACHE_LIMIT; - _voucher_activity_heap_pressure_normal(); - return; +#if VOUCHER_USE_MACH_VOUCHER + if (_firehose_task_buffer) { + firehose_buffer_clear_bank_flags(_firehose_task_buffer, + FIREHOSE_BUFFER_BANK_FLAG_LOW_MEMORY); + } +#endif } - _dispatch_continuation_cache_limit = - DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYSTATUS_PRESSURE_WARN; - _voucher_activity_heap_pressure_warn(); + if (memorypressure & DISPATCH_MEMORYPRESSURE_WARN) { + _dispatch_memory_warn = true; + _dispatch_continuation_cache_limit = + DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYPRESSURE_PRESSURE_WARN; +#if VOUCHER_USE_MACH_VOUCHER + if (_firehose_task_buffer) { + firehose_buffer_set_bank_flags(_firehose_task_buffer, + FIREHOSE_BUFFER_BANK_FLAG_LOW_MEMORY); + } #endif + } + if (memorypressure & DISPATCH_MEMORYPRESSURE_MALLOC_MASK) { + malloc_memory_event_handler(memorypressure & DISPATCH_MEMORYPRESSURE_MALLOC_MASK); + } +#elif DISPATCH_USE_VM_PRESSURE_SOURCE + // we must have gotten DISPATCH_VM_PRESSURE malloc_zone_pressure_relief(0,0); +#endif } static void -_dispatch_memorystatus_init(void) +_dispatch_memorypressure_init(void) { - _dispatch_memorystatus_source = dispatch_source_create( - DISPATCH_MEMORYSTATUS_SOURCE_TYPE, 0, - DISPATCH_MEMORYSTATUS_SOURCE_MASK, + _dispatch_memorypressure_source = dispatch_source_create( + DISPATCH_MEMORYPRESSURE_SOURCE_TYPE, 0, + DISPATCH_MEMORYPRESSURE_SOURCE_MASK, _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, true)); - dispatch_source_set_event_handler_f(_dispatch_memorystatus_source, - _dispatch_memorystatus_handler); - dispatch_resume(_dispatch_memorystatus_source); + dispatch_source_set_event_handler_f(_dispatch_memorypressure_source, + _dispatch_memorypressure_handler); + dispatch_activate(_dispatch_memorypressure_source); } #else -static inline void _dispatch_memorystatus_init(void) {} -#endif // DISPATCH_USE_MEMORYSTATUS_SOURCE || DISPATCH_USE_VM_PRESSURE_SOURCE +static inline void _dispatch_memorypressure_init(void) {} +#endif // DISPATCH_USE_MEMORYPRESSURE_SOURCE || DISPATCH_USE_VM_PRESSURE_SOURCE #pragma mark - #pragma mark dispatch_mach @@ -2616,9 +3263,6 @@ static inline void _dispatch_memorystatus_init(void) {} #define _DISPATCH_MACHPORT_HASH(x) \ _DISPATCH_HASH((x), _DISPATCH_MACHPORT_HASH_SIZE) -#ifndef MACH_RCV_LARGE_IDENTITY -#define MACH_RCV_LARGE_IDENTITY 0x00000008 -#endif #ifndef MACH_RCV_VOUCHER #define MACH_RCV_VOUCHER 0x00000800 #endif @@ -2629,28 +3273,27 @@ static inline void _dispatch_memorystatus_init(void) {} MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0)) | \ MACH_RCV_VOUCHER -#define DISPATCH_MACH_KEVENT_ARMED(dk) ((dk)->dk_kevent.ext[0]) +#define DISPATCH_MACH_NOTIFICATION_ARMED(dk) ((dk)->dk_kevent.ext[0]) -static void _dispatch_kevent_machport_drain(_dispatch_kevent_qos_s *ke); -static void _dispatch_kevent_mach_msg_drain(_dispatch_kevent_qos_s *ke); -static void _dispatch_kevent_mach_msg_recv(mach_msg_header_t *hdr); -static void _dispatch_kevent_mach_msg_destroy(mach_msg_header_t *hdr); +static void _dispatch_kevent_mach_msg_recv(_dispatch_kevent_qos_s *ke, + mach_msg_header_t *hdr); +static void _dispatch_kevent_mach_msg_destroy(_dispatch_kevent_qos_s *ke, + mach_msg_header_t *hdr); static void _dispatch_source_merge_mach_msg(dispatch_source_t ds, dispatch_source_refs_t dr, dispatch_kevent_t dk, - mach_msg_header_t *hdr, mach_msg_size_t siz); + _dispatch_kevent_qos_s *ke, mach_msg_header_t *hdr, + mach_msg_size_t siz); static kern_return_t _dispatch_mach_notify_update(dispatch_kevent_t dk, uint32_t new_flags, uint32_t del_flags, uint32_t mask, mach_msg_id_t notify_msgid, mach_port_mscount_t notify_sync); static void _dispatch_mach_notify_source_invoke(mach_msg_header_t *hdr); static void _dispatch_mach_reply_kevent_unregister(dispatch_mach_t dm, - dispatch_mach_reply_refs_t dmr, bool disconnected); -static void _dispatch_mach_kevent_unregister(dispatch_mach_t dm); -static inline void _dispatch_mach_msg_set_options(dispatch_object_t dou, - mach_msg_option_t options); + dispatch_mach_reply_refs_t dmr, unsigned int options); +static void _dispatch_mach_notification_kevent_unregister(dispatch_mach_t dm); static void _dispatch_mach_msg_recv(dispatch_mach_t dm, - dispatch_mach_reply_refs_t dmr, mach_msg_header_t *hdr, - mach_msg_size_t siz); -static void _dispatch_mach_merge_kevent(dispatch_mach_t dm, + dispatch_mach_reply_refs_t dmr, _dispatch_kevent_qos_s *ke, + mach_msg_header_t *hdr, mach_msg_size_t siz); +static void _dispatch_mach_merge_notification_kevent(dispatch_mach_t dm, const _dispatch_kevent_qos_s *ke); static inline mach_msg_option_t _dispatch_mach_checkin_options(void); @@ -2658,38 +3301,71 @@ static const size_t _dispatch_mach_recv_msg_size = DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE; static const size_t dispatch_mach_trailer_size = sizeof(dispatch_mach_trailer_t); -static mach_msg_size_t _dispatch_mach_recv_msg_buf_size; -static mach_port_t _dispatch_mach_portset, _dispatch_mach_recv_portset; static mach_port_t _dispatch_mach_notify_port; -static _dispatch_kevent_qos_s _dispatch_mach_recv_kevent = { - .filter = EVFILT_MACHPORT, - .flags = EV_ADD|EV_ENABLE|EV_DISPATCH, - .fflags = DISPATCH_MACH_RCV_OPTIONS, -}; static dispatch_source_t _dispatch_mach_notify_source; + +static inline void* +_dispatch_kevent_mach_msg_buf(_dispatch_kevent_qos_s *ke) +{ + return (void*)ke->ext[0]; +} + +static inline mach_msg_size_t +_dispatch_kevent_mach_msg_size(_dispatch_kevent_qos_s *ke) +{ + // buffer size in the successful receive case, but message size (like + // msgh_size) in the MACH_RCV_TOO_LARGE case, i.e. add trailer size. + return (mach_msg_size_t)ke->ext[1]; +} + +static void +_dispatch_source_type_mach_recv_direct_init(dispatch_source_t ds, + dispatch_source_type_t type DISPATCH_UNUSED, + uintptr_t handle DISPATCH_UNUSED, + unsigned long mask DISPATCH_UNUSED, + dispatch_queue_t q DISPATCH_UNUSED) +{ + ds->ds_pending_data_mask = DISPATCH_MACH_RECV_MESSAGE_DIRECT; +#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK + if (_dispatch_evfilt_machport_direct_enabled) return; + ds->ds_dkev->dk_kevent.fflags = DISPATCH_MACH_RECV_MESSAGE_DIRECT; + ds->ds_dkev->dk_kevent.flags &= ~(EV_UDATA_SPECIFIC|EV_VANISHED); + ds->ds_is_direct_kevent = false; +#endif +} + static const struct dispatch_source_type_s _dispatch_source_type_mach_recv_direct = { .ke = { .filter = EVFILT_MACHPORT, - .flags = EV_CLEAR, - .fflags = DISPATCH_MACH_RECV_MESSAGE_DIRECT, + .flags = EV_VANISHED|EV_DISPATCH|EV_UDATA_SPECIFIC, + .fflags = DISPATCH_MACH_RCV_OPTIONS, }, + .init = _dispatch_source_type_mach_recv_direct_init, +}; + +#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK +static mach_port_t _dispatch_mach_portset, _dispatch_mach_recv_portset; +static _dispatch_kevent_qos_s _dispatch_mach_recv_kevent = { + .filter = EVFILT_MACHPORT, + .flags = EV_ADD|EV_ENABLE|EV_DISPATCH, + .fflags = DISPATCH_MACH_RCV_OPTIONS, }; static void _dispatch_mach_recv_msg_buf_init(void) { + if (_dispatch_evfilt_machport_direct_enabled) return; mach_vm_size_t vm_size = mach_vm_round_page( _dispatch_mach_recv_msg_size + dispatch_mach_trailer_size); - _dispatch_mach_recv_msg_buf_size = (mach_msg_size_t)vm_size; mach_vm_address_t vm_addr = vm_page_size; kern_return_t kr; while (slowpath(kr = mach_vm_allocate(mach_task_self(), &vm_addr, vm_size, VM_FLAGS_ANYWHERE))) { if (kr != KERN_NO_SPACE) { - (void)dispatch_assume_zero(kr); - DISPATCH_CLIENT_CRASH("Could not allocate mach msg receive buffer"); + DISPATCH_CLIENT_CRASH(kr, + "Could not allocate mach msg receive buffer"); } _dispatch_temporary_resource_shortage(); vm_addr = vm_page_size; @@ -2697,13 +3373,78 @@ _dispatch_mach_recv_msg_buf_init(void) _dispatch_mach_recv_kevent.ext[0] = (uintptr_t)vm_addr; _dispatch_mach_recv_kevent.ext[1] = vm_size; } +#endif -static inline void* -_dispatch_get_mach_recv_msg_buf(void) +DISPATCH_NOINLINE +static void +_dispatch_source_merge_mach_msg_direct(dispatch_source_t ds, + _dispatch_kevent_qos_s *ke, mach_msg_header_t *hdr) +{ + dispatch_continuation_t dc = _dispatch_source_get_event_handler(ds->ds_refs); + dispatch_queue_t cq = _dispatch_queue_get_current(); + + // see firehose_client_push_notify_async + _dispatch_queue_set_current(ds->_as_dq); + dc->dc_func(hdr); + _dispatch_queue_set_current(cq); + if (hdr != _dispatch_kevent_mach_msg_buf(ke)) { + free(hdr); + } +} + +dispatch_source_t +_dispatch_source_create_mach_msg_direct_recv(mach_port_t recvp, + const struct dispatch_continuation_s *dc) +{ + dispatch_source_t ds; + ds = dispatch_source_create(&_dispatch_source_type_mach_recv_direct, + recvp, 0, &_dispatch_mgr_q); + os_atomic_store(&ds->ds_refs->ds_handler[DS_EVENT_HANDLER], + (dispatch_continuation_t)dc, relaxed); + return ds; +} + +static void +_dispatch_mach_notify_port_init(void *context DISPATCH_UNUSED) +{ + kern_return_t kr; +#if HAVE_MACH_PORT_CONSTRUCT + mach_port_options_t opts = { .flags = MPO_CONTEXT_AS_GUARD | MPO_STRICT }; +#ifdef __LP64__ + const mach_port_context_t guard = 0xfeed09071f1ca7edull; +#else + const mach_port_context_t guard = 0xff1ca7edull; +#endif + kr = mach_port_construct(mach_task_self(), &opts, guard, + &_dispatch_mach_notify_port); +#else + kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, + &_dispatch_mach_notify_port); +#endif + DISPATCH_VERIFY_MIG(kr); + if (slowpath(kr)) { + DISPATCH_CLIENT_CRASH(kr, + "mach_port_construct() failed: cannot create receive right"); + } + + static const struct dispatch_continuation_s dc = { + .dc_func = (void*)_dispatch_mach_notify_source_invoke, + }; + _dispatch_mach_notify_source = _dispatch_source_create_mach_msg_direct_recv( + _dispatch_mach_notify_port, &dc); + dispatch_assert(_dispatch_mach_notify_source); + dispatch_activate(_dispatch_mach_notify_source); +} + +static mach_port_t +_dispatch_get_mach_notify_port(void) { - return (void*)_dispatch_mach_recv_kevent.ext[0]; + static dispatch_once_t pred; + dispatch_once_f(&pred, NULL, _dispatch_mach_notify_port_init); + return _dispatch_mach_notify_port; } +#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK static void _dispatch_mach_recv_portset_init(void *context DISPATCH_UNUSED) { @@ -2712,34 +3453,22 @@ _dispatch_mach_recv_portset_init(void *context DISPATCH_UNUSED) kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET, &_dispatch_mach_recv_portset); DISPATCH_VERIFY_MIG(kr); - if (dispatch_assume_zero(kr)) { - DISPATCH_CLIENT_CRASH( + if (slowpath(kr)) { + DISPATCH_CLIENT_CRASH(kr, "mach_port_allocate() failed: cannot create port set"); } - dispatch_assert(_dispatch_get_mach_recv_msg_buf()); + _dispatch_kevent_qos_s *ke = &_dispatch_mach_recv_kevent; + dispatch_assert(_dispatch_kevent_mach_msg_buf(ke)); dispatch_assert(dispatch_mach_trailer_size == REQUESTED_TRAILER_SIZE_NATIVE(MACH_RCV_TRAILER_ELEMENTS( DISPATCH_MACH_RCV_TRAILER))); - _dispatch_mach_recv_kevent.ident = _dispatch_mach_recv_portset; - _dispatch_kq_update(&_dispatch_mach_recv_kevent); - - kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, - &_dispatch_mach_notify_port); - DISPATCH_VERIFY_MIG(kr); - if (dispatch_assume_zero(kr)) { - DISPATCH_CLIENT_CRASH( - "mach_port_allocate() failed: cannot create receive right"); + ke->ident = _dispatch_mach_recv_portset; +#if DISPATCH_USE_KEVENT_WORKQUEUE + if (_dispatch_kevent_workqueue_enabled) { + ke->qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; } - _dispatch_mach_notify_source = dispatch_source_create( - &_dispatch_source_type_mach_recv_direct, - _dispatch_mach_notify_port, 0, &_dispatch_mgr_q); - static const struct dispatch_continuation_s dc = { - .dc_func = (void*)_dispatch_mach_notify_source_invoke, - }; - _dispatch_mach_notify_source->ds_refs->ds_handler[DS_EVENT_HANDLER] = - (dispatch_continuation_t)&dc; - dispatch_assert(_dispatch_mach_notify_source); - dispatch_resume(_dispatch_mach_notify_source); +#endif + _dispatch_kq_immediate_update(&_dispatch_mach_recv_kevent); } static mach_port_t @@ -2757,17 +3486,23 @@ _dispatch_mach_portset_init(void *context DISPATCH_UNUSED) .filter = EVFILT_MACHPORT, .flags = EV_ADD, }; +#if DISPATCH_USE_KEVENT_WORKQUEUE + if (_dispatch_kevent_workqueue_enabled) { + kev.qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; + } +#endif + kern_return_t kr; kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET, &_dispatch_mach_portset); DISPATCH_VERIFY_MIG(kr); - if (dispatch_assume_zero(kr)) { - DISPATCH_CLIENT_CRASH( + if (slowpath(kr)) { + DISPATCH_CLIENT_CRASH(kr, "mach_port_allocate() failed: cannot create port set"); } kev.ident = _dispatch_mach_portset; - _dispatch_kq_update(&kev); + _dispatch_kq_immediate_update(&kev); } static mach_port_t @@ -2810,22 +3545,6 @@ _dispatch_mach_portset_update(dispatch_kevent_t dk, mach_port_t mps) return mps ? kr : 0; } -static void -_dispatch_kevent_mach_recv_reenable(_dispatch_kevent_qos_s *ke DISPATCH_UNUSED) -{ -#if (TARGET_IPHONE_SIMULATOR && \ - IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 1090) || \ - (!TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED < 1090) - // delete and re-add kevent to workaround - if (ke->ext[1] != _dispatch_mach_recv_kevent.ext[1]) { - _dispatch_kevent_qos_s kev = _dispatch_mach_recv_kevent; - kev.flags = EV_DELETE; - _dispatch_kq_update(&kev); - } -#endif - _dispatch_mgr_kevent_reenable(&_dispatch_mach_recv_kevent); -} - static kern_return_t _dispatch_kevent_machport_resume(dispatch_kevent_t dk, uint32_t new_flags, uint32_t del_flags) @@ -2848,6 +3567,7 @@ _dispatch_kevent_machport_resume(dispatch_kevent_t dk, uint32_t new_flags, } return kr; } +#endif // DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK static kern_return_t _dispatch_kevent_mach_notify_resume(dispatch_kevent_t dk, uint32_t new_flags, @@ -2871,23 +3591,11 @@ _dispatch_kevent_mach_notify_resume(dispatch_kevent_t dk, uint32_t new_flags, return kr; } -static inline void -_dispatch_kevent_mach_portset(_dispatch_kevent_qos_s *ke) -{ - if (ke->ident == _dispatch_mach_recv_portset) { - return _dispatch_kevent_mach_msg_drain(ke); - } else if (ke->ident == _dispatch_mach_portset) { - return _dispatch_kevent_machport_drain(ke); - } else { - return _dispatch_kevent_error(ke); - } -} - +#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK DISPATCH_NOINLINE static void _dispatch_kevent_machport_drain(_dispatch_kevent_qos_s *ke) { - _dispatch_kevent_debug(ke, __func__); mach_port_t name = (mach_port_name_t)ke->data; dispatch_kevent_t dk; @@ -2905,92 +3613,57 @@ _dispatch_kevent_machport_drain(_dispatch_kevent_qos_s *ke) .fflags = DISPATCH_MACH_RECV_MESSAGE, .udata = (uintptr_t)dk, }; - _dispatch_kevent_debug(&kev, __func__); + _dispatch_kevent_debug("synthetic", &kev); _dispatch_kevent_merge(&kev); } +#endif DISPATCH_NOINLINE static void _dispatch_kevent_mach_msg_drain(_dispatch_kevent_qos_s *ke) { - _dispatch_kevent_debug(ke, __func__); - mach_msg_header_t *hdr = (mach_msg_header_t*)ke->ext[0]; - mach_msg_size_t siz, msgsiz; + mach_msg_header_t *hdr = _dispatch_kevent_mach_msg_buf(ke); + mach_msg_size_t siz; mach_msg_return_t kr = (mach_msg_return_t)ke->fflags; - _dispatch_kevent_mach_recv_reenable(ke); - if (!dispatch_assume(hdr)) { - DISPATCH_CRASH("EVFILT_MACHPORT with no message"); + if (!fastpath(hdr)) { + DISPATCH_INTERNAL_CRASH(kr, "EVFILT_MACHPORT with no message"); } if (fastpath(!kr)) { - return _dispatch_kevent_mach_msg_recv(hdr); + _dispatch_kevent_mach_msg_recv(ke, hdr); + goto out; } else if (kr != MACH_RCV_TOO_LARGE) { goto out; + } else if (!ke->data) { + DISPATCH_INTERNAL_CRASH(0, "MACH_RCV_LARGE_IDENTITY with no identity"); } - if (!dispatch_assume(ke->ext[1] <= UINT_MAX - - dispatch_mach_trailer_size)) { - DISPATCH_CRASH("EVFILT_MACHPORT with overlarge message"); + if (slowpath(ke->ext[1] > (UINT_MAX - dispatch_mach_trailer_size))) { + DISPATCH_INTERNAL_CRASH(ke->ext[1], + "EVFILT_MACHPORT with overlarge message"); } - siz = (mach_msg_size_t)ke->ext[1] + dispatch_mach_trailer_size; + siz = _dispatch_kevent_mach_msg_size(ke) + dispatch_mach_trailer_size; hdr = malloc(siz); - if (ke->data) { - if (!dispatch_assume(hdr)) { - // Kernel will discard message too large to fit - hdr = _dispatch_get_mach_recv_msg_buf(); - siz = _dispatch_mach_recv_msg_buf_size; - } - mach_port_t name = (mach_port_name_t)ke->data; - const mach_msg_option_t options = ((DISPATCH_MACH_RCV_OPTIONS | - MACH_RCV_TIMEOUT) & ~MACH_RCV_LARGE); - kr = mach_msg(hdr, options, 0, siz, name, MACH_MSG_TIMEOUT_NONE, - MACH_PORT_NULL); - if (fastpath(!kr)) { - return _dispatch_kevent_mach_msg_recv(hdr); - } else if (kr == MACH_RCV_TOO_LARGE) { - _dispatch_log("BUG in libdispatch client: " - "_dispatch_kevent_mach_msg_drain: dropped message too " - "large to fit in memory: id = 0x%x, size = %lld", - hdr->msgh_id, ke->ext[1]); - kr = MACH_MSG_SUCCESS; - } - } else { - // We don't know which port in the portset contains the large message, - // so need to receive all messages pending on the portset to ensure the - // large message is drained. - bool received = false; - for (;;) { - if (!dispatch_assume(hdr)) { - DISPATCH_CLIENT_CRASH("Message too large to fit in memory"); - } - const mach_msg_option_t options = (DISPATCH_MACH_RCV_OPTIONS | - MACH_RCV_TIMEOUT); - kr = mach_msg(hdr, options, 0, siz, _dispatch_mach_recv_portset, - MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); - if ((!kr || kr == MACH_RCV_TOO_LARGE) && !dispatch_assume( - hdr->msgh_size <= UINT_MAX - dispatch_mach_trailer_size)) { - DISPATCH_CRASH("Overlarge message"); - } - if (fastpath(!kr)) { - msgsiz = hdr->msgh_size + dispatch_mach_trailer_size; - if (msgsiz < siz) { - void *shrink = realloc(hdr, msgsiz); - if (shrink) hdr = shrink; - } - _dispatch_kevent_mach_msg_recv(hdr); - hdr = NULL; - received = true; - } else if (kr == MACH_RCV_TOO_LARGE) { - siz = hdr->msgh_size + dispatch_mach_trailer_size; - } else { - if (kr == MACH_RCV_TIMED_OUT && received) { - kr = MACH_MSG_SUCCESS; - } - break; - } - hdr = reallocf(hdr, siz); - } + if (!dispatch_assume(hdr)) { + // Kernel will discard message too large to fit + hdr = NULL; + siz = 0; } - if (hdr != _dispatch_get_mach_recv_msg_buf()) { + mach_port_t name = (mach_port_name_t)ke->data; + const mach_msg_option_t options = ((DISPATCH_MACH_RCV_OPTIONS | + MACH_RCV_TIMEOUT) & ~MACH_RCV_LARGE); + kr = mach_msg(hdr, options, 0, siz, name, MACH_MSG_TIMEOUT_NONE, + MACH_PORT_NULL); + if (fastpath(!kr)) { + _dispatch_kevent_mach_msg_recv(ke, hdr); + goto out; + } else if (kr == MACH_RCV_TOO_LARGE) { + _dispatch_log("BUG in libdispatch client: " + "_dispatch_kevent_mach_msg_drain: dropped message too " + "large to fit in memory: id = 0x%x, size = %u", + hdr->msgh_id, _dispatch_kevent_mach_msg_size(ke)); + kr = MACH_MSG_SUCCESS; + } + if (hdr != _dispatch_kevent_mach_msg_buf(ke)) { free(hdr); } out: @@ -3000,8 +3673,57 @@ _dispatch_kevent_mach_msg_drain(_dispatch_kevent_qos_s *ke) } } +DISPATCH_NOINLINE static void -_dispatch_kevent_mach_msg_recv(mach_msg_header_t *hdr) +_dispatch_mach_kevent_merge(_dispatch_kevent_qos_s *ke) +{ + if (unlikely(!(ke->flags & EV_UDATA_SPECIFIC))) { +#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK + if (ke->ident == _dispatch_mach_recv_portset) { + _dispatch_kevent_mach_msg_drain(ke); + return _dispatch_kq_deferred_update(&_dispatch_mach_recv_kevent); + } else if (ke->ident == _dispatch_mach_portset) { + return _dispatch_kevent_machport_drain(ke); + } +#endif + return _dispatch_kevent_error(ke); + } + + dispatch_kevent_t dk = (dispatch_kevent_t)ke->udata; + dispatch_source_refs_t dr = TAILQ_FIRST(&dk->dk_sources); + bool is_reply = (dk->dk_kevent.flags & EV_ONESHOT); + dispatch_source_t ds = _dispatch_source_from_refs(dr); + + if (_dispatch_kevent_mach_msg_size(ke)) { + _dispatch_kevent_mach_msg_drain(ke); + if (is_reply) { + // _dispatch_kevent_mach_msg_drain() should have deleted this event + dispatch_assert(ke->flags & EV_DELETE); + return; + } + + if (!(ds->dq_atomic_flags & DSF_CANCELED)) { + // re-arm the mach channel + ke->fflags = DISPATCH_MACH_RCV_OPTIONS; + ke->data = 0; + ke->ext[0] = 0; + ke->ext[1] = 0; + return _dispatch_kq_deferred_update(ke); + } + } else if (is_reply) { + DISPATCH_INTERNAL_CRASH(ke->flags, "Unexpected EVFILT_MACHPORT event"); + } + if (unlikely((ke->flags & EV_VANISHED) && + (dx_type(ds) == DISPATCH_MACH_CHANNEL_TYPE))) { + DISPATCH_CLIENT_CRASH(ke->flags, + "Unexpected EV_VANISHED (do not destroy random mach ports)"); + } + return _dispatch_kevent_merge(ke); +} + +static void +_dispatch_kevent_mach_msg_recv(_dispatch_kevent_qos_s *ke, + mach_msg_header_t *hdr) { dispatch_source_refs_t dri; dispatch_kevent_t dk; @@ -3012,38 +3734,42 @@ _dispatch_kevent_mach_msg_recv(mach_msg_header_t *hdr) dispatch_mach_trailer_size)) { _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: " "received overlarge message"); - return _dispatch_kevent_mach_msg_destroy(hdr); + return _dispatch_kevent_mach_msg_destroy(ke, hdr); } if (!dispatch_assume(name)) { _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: " "received message with MACH_PORT_NULL port"); - return _dispatch_kevent_mach_msg_destroy(hdr); + return _dispatch_kevent_mach_msg_destroy(ke, hdr); } _dispatch_debug_machport(name); - dk = _dispatch_kevent_find(name, EVFILT_MACHPORT); + if (ke->flags & EV_UDATA_SPECIFIC) { + dk = (void*)ke->udata; + } else { + dk = _dispatch_kevent_find(name, EVFILT_MACHPORT); + } if (!dispatch_assume(dk)) { _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: " "received message with unknown kevent"); - return _dispatch_kevent_mach_msg_destroy(hdr); + return _dispatch_kevent_mach_msg_destroy(ke, hdr); } - _dispatch_kevent_debug(&dk->dk_kevent, __func__); TAILQ_FOREACH(dri, &dk->dk_sources, dr_list) { dispatch_source_t dsi = _dispatch_source_from_refs(dri); if (dsi->ds_pending_data_mask & _DISPATCH_MACH_RECV_DIRECT_FLAGS) { - return _dispatch_source_merge_mach_msg(dsi, dri, dk, hdr, siz); + return _dispatch_source_merge_mach_msg(dsi, dri, dk, ke, hdr, siz); } } _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: " "received message with no listeners"); - return _dispatch_kevent_mach_msg_destroy(hdr); + return _dispatch_kevent_mach_msg_destroy(ke, hdr); } static void -_dispatch_kevent_mach_msg_destroy(mach_msg_header_t *hdr) +_dispatch_kevent_mach_msg_destroy(_dispatch_kevent_qos_s *ke, + mach_msg_header_t *hdr) { if (hdr) { mach_msg_destroy(hdr); - if (hdr != _dispatch_get_mach_recv_msg_buf()) { + if (hdr != _dispatch_kevent_mach_msg_buf(ke)) { free(hdr); } } @@ -3051,17 +3777,17 @@ _dispatch_kevent_mach_msg_destroy(mach_msg_header_t *hdr) static void _dispatch_source_merge_mach_msg(dispatch_source_t ds, dispatch_source_refs_t dr, - dispatch_kevent_t dk, mach_msg_header_t *hdr, mach_msg_size_t siz) + dispatch_kevent_t dk, _dispatch_kevent_qos_s *ke, + mach_msg_header_t *hdr, mach_msg_size_t siz) { - if (ds == _dispatch_mach_notify_source) { - _dispatch_mach_notify_source_invoke(hdr); - return _dispatch_kevent_mach_msg_destroy(hdr); + if (dx_type(ds) == DISPATCH_SOURCE_KEVENT_TYPE) { + return _dispatch_source_merge_mach_msg_direct(ds, ke, hdr); } dispatch_mach_reply_refs_t dmr = NULL; - if (dk->dk_kevent.fflags & DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE) { + if (dk->dk_kevent.flags & EV_ONESHOT) { dmr = (dispatch_mach_reply_refs_t)dr; } - return _dispatch_mach_msg_recv((dispatch_mach_t)ds, dmr, hdr, siz); + return _dispatch_mach_msg_recv((dispatch_mach_t)ds, dmr, ke, hdr, siz); } DISPATCH_NOINLINE @@ -3093,14 +3819,14 @@ _dispatch_mach_notify_merge(mach_port_t name, uint32_t flag, bool final) // Re-register for notification before delivery unreg = _dispatch_kevent_resume(dk, flag, 0); } - DISPATCH_MACH_KEVENT_ARMED(dk) = 0; + DISPATCH_MACH_NOTIFICATION_ARMED(dk) = 0; TAILQ_FOREACH_SAFE(dri, &dk->dk_sources, dr_list, dr_next) { dispatch_source_t dsi = _dispatch_source_from_refs(dri); if (dx_type(dsi) == DISPATCH_MACH_CHANNEL_TYPE) { dispatch_mach_t dm = (dispatch_mach_t)dsi; - _dispatch_mach_merge_kevent(dm, &kev); + _dispatch_mach_merge_notification_kevent(dm, &kev); if (unreg && dm->dm_dkev) { - _dispatch_mach_kevent_unregister(dm); + _dispatch_mach_notification_kevent_unregister(dm); } } else { _dispatch_source_merge_kevent(dsi, &kev); @@ -3108,7 +3834,7 @@ _dispatch_mach_notify_merge(mach_port_t name, uint32_t flag, bool final) _dispatch_source_kevent_unregister(dsi); } } - if (!dr_next || DISPATCH_MACH_KEVENT_ARMED(dk)) { + if (!dr_next || DISPATCH_MACH_NOTIFICATION_ARMED(dk)) { // current merge is last in list (dk might have been freed) // or it re-armed the notification return; @@ -3131,24 +3857,22 @@ _dispatch_mach_notify_update(dispatch_kevent_t dk, uint32_t new_flags, _dispatch_debug_machport(port); if ((dk->dk_kevent.data & mask) && !(prev & mask)) { - // initialize _dispatch_mach_notify_port: - (void)_dispatch_get_mach_recv_portset(); _dispatch_debug("machport[0x%08x]: registering for send-possible " "notification", port); previous = MACH_PORT_NULL; krr = mach_port_request_notification(mach_task_self(), port, - notify_msgid, notify_sync, _dispatch_mach_notify_port, + notify_msgid, notify_sync, _dispatch_get_mach_notify_port(), MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous); DISPATCH_VERIFY_MIG(krr); switch(krr) { case KERN_INVALID_NAME: case KERN_INVALID_RIGHT: - // Supress errors & clear registration state + // Suppress errors & clear registration state dk->dk_kevent.data &= ~mask; break; default: - // Else, we dont expect any errors from mach. Log any errors + // Else, we don't expect any errors from mach. Log any errors if (dispatch_assume_zero(krr)) { // log the error & clear registration state dk->dk_kevent.data &= ~mask; @@ -3198,10 +3922,18 @@ _dispatch_mach_notify_update(dispatch_kevent_t dk, uint32_t new_flags, static void _dispatch_mach_host_notify_update(void *context DISPATCH_UNUSED) { - (void)_dispatch_get_mach_recv_portset(); + static int notify_type = HOST_NOTIFY_CALENDAR_SET; + kern_return_t kr; _dispatch_debug("registering for calendar-change notification"); - kern_return_t kr = host_request_notification(_dispatch_get_mach_host_port(), - HOST_NOTIFY_CALENDAR_CHANGE, _dispatch_mach_notify_port); +retry: + kr = host_request_notification(_dispatch_get_mach_host_port(), + notify_type, _dispatch_get_mach_notify_port()); + // Fallback when missing support for newer _SET variant, fires strictly more. + if (kr == KERN_INVALID_ARGUMENT && + notify_type != HOST_NOTIFY_CALENDAR_CHANGE){ + notify_type = HOST_NOTIFY_CALENDAR_CHANGE; + goto retry; + } DISPATCH_VERIFY_MIG(kr); (void)dispatch_assume_zero(kr); } @@ -3221,8 +3953,9 @@ _dispatch_mach_notify_source_invoke(mach_msg_header_t *hdr) __ReplyUnion___dispatch_libdispatch_internal_protocol_subsystem)); dispatch_assert(sizeof(mig_reply_error_t) < _dispatch_mach_recv_msg_size); boolean_t success = libdispatch_internal_protocol_server(hdr, &reply.Head); - if (!success && reply.RetCode == MIG_BAD_ID && hdr->msgh_id == 950) { - // host_notify_reply.defs: host_calendar_changed + if (!success && reply.RetCode == MIG_BAD_ID && + (hdr->msgh_id == HOST_CALENDAR_SET_REPLYID || + hdr->msgh_id == HOST_CALENDAR_CHANGED_REPLYID)) { _dispatch_debug("calendar-change notification"); _dispatch_timers_calendar_change(); _dispatch_mach_host_notify_update(NULL); @@ -3232,6 +3965,9 @@ _dispatch_mach_notify_source_invoke(mach_msg_header_t *hdr) if (dispatch_assume(success) && reply.RetCode != MIG_NO_REPLY) { (void)dispatch_assume_zero(reply.RetCode); } + if (!success || (reply.RetCode && reply.RetCode != MIG_NO_REPLY)) { + mach_msg_destroy(hdr); + } } kern_return_t @@ -3282,20 +4018,41 @@ _dispatch_mach_notify_send_possible(mach_port_t notify DISPATCH_UNUSED, #pragma mark - #pragma mark dispatch_mach_t -#define DISPATCH_MACH_NEVER_CONNECTED (UINT32_MAX/2) +#define DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT 0x1 #define DISPATCH_MACH_REGISTER_FOR_REPLY 0x2 +#define DISPATCH_MACH_WAIT_FOR_REPLY 0x4 +#define DISPATCH_MACH_OWNED_REPLY_PORT 0x8 #define DISPATCH_MACH_OPTIONS_MASK 0xffff +#define DM_SEND_STATUS_SUCCESS 0x1 +#define DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT 0x2 + +DISPATCH_ENUM(dispatch_mach_send_invoke_flags, uint32_t, + DM_SEND_INVOKE_NONE = 0x0, + DM_SEND_INVOKE_FLUSH = 0x1, + DM_SEND_INVOKE_NEEDS_BARRIER = 0x2, + DM_SEND_INVOKE_CANCEL = 0x4, + DM_SEND_INVOKE_CAN_RUN_BARRIER = 0x8, + DM_SEND_INVOKE_IMMEDIATE_SEND = 0x10, +); +#define DM_SEND_INVOKE_IMMEDIATE_SEND_MASK \ + ((dispatch_mach_send_invoke_flags_t)DM_SEND_INVOKE_IMMEDIATE_SEND) + +static inline pthread_priority_t _dispatch_mach_priority_propagate( + mach_msg_option_t options); static mach_port_t _dispatch_mach_msg_get_remote_port(dispatch_object_t dou); +static mach_port_t _dispatch_mach_msg_get_reply_port(dispatch_object_t dou); static void _dispatch_mach_msg_disconnected(dispatch_mach_t dm, mach_port_t local_port, mach_port_t remote_port); +static inline void _dispatch_mach_msg_reply_received(dispatch_mach_t dm, + dispatch_mach_reply_refs_t dmr, mach_port_t local_port); static dispatch_mach_msg_t _dispatch_mach_msg_create_reply_disconnected( dispatch_object_t dou, dispatch_mach_reply_refs_t dmr); static bool _dispatch_mach_reconnect_invoke(dispatch_mach_t dm, dispatch_object_t dou); static inline mach_msg_header_t* _dispatch_mach_msg_get_msg( dispatch_mach_msg_t dmsg); -static void _dispatch_mach_push(dispatch_object_t dm, dispatch_object_t dou, +static void _dispatch_mach_send_push(dispatch_mach_t dm, dispatch_object_t dou, pthread_priority_t pp); static dispatch_mach_t @@ -3307,13 +4064,10 @@ _dispatch_mach_create(const char *label, dispatch_queue_t q, void *context, dm = _dispatch_alloc(DISPATCH_VTABLE(mach), sizeof(struct dispatch_mach_s)); - _dispatch_queue_init((dispatch_queue_t)dm); - dm->dq_label = label; + _dispatch_queue_init(dm->_as_dq, DQF_NONE, 1, true); + dm->dq_label = label; dm->do_ref_cnt++; // the reference _dispatch_mach_cancel_invoke holds - dm->do_ref_cnt++; // since channel is created suspended - dm->do_suspend_cnt = DISPATCH_OBJECT_SUSPEND_INTERVAL; - dm->do_targetq = &_dispatch_mgr_q; dr = _dispatch_calloc(1ul, sizeof(struct dispatch_mach_refs_s)); dr->dr_source_wref = _dispatch_ptr2wref(dm); @@ -3328,8 +4082,12 @@ _dispatch_mach_create(const char *label, dispatch_queue_t q, void *context, dm->dm_refs->dm_disconnect_cnt = DISPATCH_MACH_NEVER_CONNECTED; TAILQ_INIT(&dm->dm_refs->dm_replies); - // First item on the channel sets the user-specified target queue - dispatch_set_target_queue(dm, q); + if (slowpath(!q)) { + q = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, true); + } else { + _dispatch_retain(q); + } + dm->do_targetq = q; _dispatch_object_debug(dm, "%s", __func__); return dm; } @@ -3360,7 +4118,7 @@ _dispatch_mach_dispose(dispatch_mach_t dm) } free(dr); free(dm->dm_refs); - _dispatch_queue_destroy(dm); + _dispatch_queue_destroy(dm->_as_dq); } void @@ -3369,98 +4127,273 @@ dispatch_mach_connect(dispatch_mach_t dm, mach_port_t receive, { dispatch_mach_send_refs_t dr = dm->dm_refs; dispatch_kevent_t dk; + uint32_t disconnect_cnt; + dispatch_source_type_t type = &_dispatch_source_type_mach_recv_direct; + dm->ds_is_direct_kevent = (bool)_dispatch_evfilt_machport_direct_enabled; if (MACH_PORT_VALID(receive)) { dk = _dispatch_calloc(1ul, sizeof(struct dispatch_kevent_s)); - dk->dk_kevent = _dispatch_source_type_mach_recv_direct.ke; + dk->dk_kevent = type->ke; dk->dk_kevent.ident = receive; - dk->dk_kevent.flags |= EV_ADD|EV_ENABLE; + dk->dk_kevent.flags |= EV_ADD|EV_ENABLE|EV_VANISHED; dk->dk_kevent.udata = (uintptr_t)dk; TAILQ_INIT(&dk->dk_sources); dm->ds_dkev = dk; - dm->ds_pending_data_mask = dk->dk_kevent.fflags; + dm->ds_pending_data_mask = DISPATCH_MACH_RECV_MESSAGE_DIRECT; + dm->ds_needs_rearm = dm->ds_is_direct_kevent; + if (!dm->ds_is_direct_kevent) { + dk->dk_kevent.fflags = DISPATCH_MACH_RECV_MESSAGE_DIRECT; + dk->dk_kevent.flags &= ~(EV_UDATA_SPECIFIC|EV_VANISHED); + } _dispatch_retain(dm); // the reference the manager queue holds } dr->dm_send = send; if (MACH_PORT_VALID(send)) { if (checkin) { dispatch_retain(checkin); - mach_msg_option_t options = _dispatch_mach_checkin_options(); - _dispatch_mach_msg_set_options(checkin, options); + checkin->dmsg_options = _dispatch_mach_checkin_options(); dr->dm_checkin_port = _dispatch_mach_msg_get_remote_port(checkin); } - dr->dm_checkin = checkin; + dr->dm_checkin = checkin; + } + // monitor message reply ports + dm->ds_pending_data_mask |= DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE; + dispatch_assert(DISPATCH_MACH_NEVER_CONNECTED - 1 == + DISPATCH_MACH_NEVER_INSTALLED); + disconnect_cnt = os_atomic_dec2o(dr, dm_disconnect_cnt, release); + if (unlikely(disconnect_cnt != DISPATCH_MACH_NEVER_INSTALLED)) { + DISPATCH_CLIENT_CRASH(disconnect_cnt, "Channel already connected"); + } + _dispatch_object_debug(dm, "%s", __func__); + return dispatch_activate(dm); +} + +// assumes low bit of mach port names is always set +#define DISPATCH_MACH_REPLY_PORT_UNOWNED 0x1u + +static inline void +_dispatch_mach_reply_mark_reply_port_owned(dispatch_mach_reply_refs_t dmr) +{ + dmr->dmr_reply &= ~DISPATCH_MACH_REPLY_PORT_UNOWNED; +} + +static inline bool +_dispatch_mach_reply_is_reply_port_owned(dispatch_mach_reply_refs_t dmr) +{ + mach_port_t reply_port = dmr->dmr_reply; + return reply_port ? !(reply_port & DISPATCH_MACH_REPLY_PORT_UNOWNED) :false; +} + +static inline mach_port_t +_dispatch_mach_reply_get_reply_port(dispatch_mach_reply_refs_t dmr) +{ + mach_port_t reply_port = dmr->dmr_reply; + return reply_port ? (reply_port | DISPATCH_MACH_REPLY_PORT_UNOWNED) : 0; +} + +static inline bool +_dispatch_mach_reply_tryremove(dispatch_mach_t dm, + dispatch_mach_reply_refs_t dmr) +{ + bool removed; + _dispatch_unfair_lock_lock(&dm->dm_refs->dm_replies_lock); + if ((removed = _TAILQ_IS_ENQUEUED(dmr, dmr_list))) { + TAILQ_REMOVE(&dm->dm_refs->dm_replies, dmr, dmr_list); + _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list); + } + _dispatch_unfair_lock_unlock(&dm->dm_refs->dm_replies_lock); + return removed; +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_reply_waiter_unregister(dispatch_mach_t dm, + dispatch_mach_reply_refs_t dmr, unsigned int options) +{ + dispatch_mach_msg_t dmsgr = NULL; + bool disconnected = (options & DKEV_UNREGISTER_DISCONNECTED); + if (options & DKEV_UNREGISTER_REPLY_REMOVE) { + _dispatch_unfair_lock_lock(&dm->dm_refs->dm_replies_lock); + if (unlikely(!_TAILQ_IS_ENQUEUED(dmr, dmr_list))) { + DISPATCH_INTERNAL_CRASH(0, "Could not find reply registration"); + } + TAILQ_REMOVE(&dm->dm_refs->dm_replies, dmr, dmr_list); + _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list); + _dispatch_unfair_lock_unlock(&dm->dm_refs->dm_replies_lock); } - // monitor message reply ports - dm->ds_pending_data_mask |= DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE; - if (slowpath(!dispatch_atomic_cmpxchg2o(dr, dm_disconnect_cnt, - DISPATCH_MACH_NEVER_CONNECTED, 0, release))) { - DISPATCH_CLIENT_CRASH("Channel already connected"); + if (disconnected) { + dmsgr = _dispatch_mach_msg_create_reply_disconnected(NULL, dmr); + } else if (dmr->dmr_voucher) { + _voucher_release(dmr->dmr_voucher); + dmr->dmr_voucher = NULL; } - _dispatch_object_debug(dm, "%s", __func__); - return dispatch_resume(dm); + _dispatch_debug("machport[0x%08x]: unregistering for sync reply%s, ctxt %p", + _dispatch_mach_reply_get_reply_port(dmr), + disconnected ? " (disconnected)" : "", dmr->dmr_ctxt); + if (dmsgr) { + return _dispatch_queue_push(dm->_as_dq, dmsgr, dmsgr->dmsg_priority); + } + dispatch_assert(!(options & DKEV_UNREGISTER_WAKEUP)); } DISPATCH_NOINLINE static void _dispatch_mach_reply_kevent_unregister(dispatch_mach_t dm, - dispatch_mach_reply_refs_t dmr, bool disconnected) + dispatch_mach_reply_refs_t dmr, unsigned int options) { dispatch_mach_msg_t dmsgr = NULL; + bool replies_empty = false; + bool disconnected = (options & DKEV_UNREGISTER_DISCONNECTED); + if (options & DKEV_UNREGISTER_REPLY_REMOVE) { + _dispatch_unfair_lock_lock(&dm->dm_refs->dm_replies_lock); + if (unlikely(!_TAILQ_IS_ENQUEUED(dmr, dmr_list))) { + DISPATCH_INTERNAL_CRASH(0, "Could not find reply registration"); + } + TAILQ_REMOVE(&dm->dm_refs->dm_replies, dmr, dmr_list); + _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list); + replies_empty = TAILQ_EMPTY(&dm->dm_refs->dm_replies); + _dispatch_unfair_lock_unlock(&dm->dm_refs->dm_replies_lock); + } if (disconnected) { dmsgr = _dispatch_mach_msg_create_reply_disconnected(NULL, dmr); + } else if (dmr->dmr_voucher) { + _voucher_release(dmr->dmr_voucher); + dmr->dmr_voucher = NULL; } + uint32_t flags = DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE; dispatch_kevent_t dk = dmr->dmr_dkev; - TAILQ_REMOVE(&dk->dk_sources, (dispatch_source_refs_t)dmr, dr_list); - _dispatch_kevent_unregister(dk, DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE, 0); - TAILQ_REMOVE(&dm->dm_refs->dm_replies, dmr, dmr_list); - if (dmr->dmr_voucher) _voucher_release(dmr->dmr_voucher); + _dispatch_debug("machport[0x%08x]: unregistering for reply%s, ctxt %p", + (mach_port_t)dk->dk_kevent.ident, + disconnected ? " (disconnected)" : "", dmr->dmr_ctxt); + if (!dm->ds_is_direct_kevent) { + dmr->dmr_dkev = NULL; + TAILQ_REMOVE(&dk->dk_sources, (dispatch_source_refs_t)dmr, dr_list); + _dispatch_kevent_unregister(dk, flags, 0); + } else { + long r = _dispatch_kevent_unregister(dk, flags, options); + if (r == EINPROGRESS) { + _dispatch_debug("machport[0x%08x]: deferred delete kevent[%p]", + (mach_port_t)dk->dk_kevent.ident, dk); + dispatch_assert(options == DKEV_UNREGISTER_DISCONNECTED); + // dmr must be put back so that the event delivery finds it, the + // replies lock is held by the caller. + TAILQ_INSERT_HEAD(&dm->dm_refs->dm_replies, dmr, dmr_list); + if (dmsgr) { + dmr->dmr_voucher = dmsgr->dmsg_voucher; + dmsgr->dmsg_voucher = NULL; + dispatch_release(dmsgr); + } + return; // deferred unregistration + } + dispatch_assume_zero(r); + dmr->dmr_dkev = NULL; + _TAILQ_TRASH_ENTRY(dmr, dr_list); + } free(dmr); - if (dmsgr) _dispatch_mach_push(dm, dmsgr, dmsgr->dmsg_priority); + if (dmsgr) { + return _dispatch_queue_push(dm->_as_dq, dmsgr, dmsgr->dmsg_priority); + } + if ((options & DKEV_UNREGISTER_WAKEUP) && replies_empty && + (dm->dm_refs->dm_disconnect_cnt || + (dm->dq_atomic_flags & DSF_CANCELED))) { + dx_wakeup(dm, 0, DISPATCH_WAKEUP_FLUSH); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_reply_waiter_register(dispatch_mach_t dm, + dispatch_mach_reply_refs_t dmr, mach_port_t reply_port, + dispatch_mach_msg_t dmsg, mach_msg_option_t msg_opts) +{ + dmr->dr_source_wref = _dispatch_ptr2wref(dm); + dmr->dmr_dkev = NULL; + dmr->dmr_reply = reply_port; + if (msg_opts & DISPATCH_MACH_OWNED_REPLY_PORT) { + _dispatch_mach_reply_mark_reply_port_owned(dmr); + } else { + if (dmsg->dmsg_voucher) { + dmr->dmr_voucher = _voucher_retain(dmsg->dmsg_voucher); + } + dmr->dmr_priority = (dispatch_priority_t)dmsg->dmsg_priority; + // make reply context visible to leaks rdar://11777199 + dmr->dmr_ctxt = dmsg->do_ctxt; + } + + _dispatch_debug("machport[0x%08x]: registering for sync reply, ctxt %p", + reply_port, dmsg->do_ctxt); + _dispatch_unfair_lock_lock(&dm->dm_refs->dm_replies_lock); + if (unlikely(_TAILQ_IS_ENQUEUED(dmr, dmr_list))) { + DISPATCH_INTERNAL_CRASH(dmr->dmr_list.tqe_prev, "Reply already registered"); + } + TAILQ_INSERT_TAIL(&dm->dm_refs->dm_replies, dmr, dmr_list); + _dispatch_unfair_lock_unlock(&dm->dm_refs->dm_replies_lock); } DISPATCH_NOINLINE static void -_dispatch_mach_reply_kevent_register(dispatch_mach_t dm, mach_port_t reply, +_dispatch_mach_reply_kevent_register(dispatch_mach_t dm, mach_port_t reply_port, dispatch_mach_msg_t dmsg) { dispatch_kevent_t dk; dispatch_mach_reply_refs_t dmr; + dispatch_source_type_t type = &_dispatch_source_type_mach_recv_direct; + pthread_priority_t mp, pp; dk = _dispatch_calloc(1ul, sizeof(struct dispatch_kevent_s)); - dk->dk_kevent = _dispatch_source_type_mach_recv_direct.ke; - dk->dk_kevent.ident = reply; - dk->dk_kevent.flags |= EV_ADD|EV_ENABLE; - dk->dk_kevent.fflags = DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE; + dk->dk_kevent = type->ke; + dk->dk_kevent.ident = reply_port; + dk->dk_kevent.flags |= EV_ADD|EV_ENABLE|EV_ONESHOT; dk->dk_kevent.udata = (uintptr_t)dk; TAILQ_INIT(&dk->dk_sources); + if (!dm->ds_is_direct_kevent) { + dk->dk_kevent.fflags = DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE; + dk->dk_kevent.flags &= ~(EV_UDATA_SPECIFIC|EV_VANISHED); + } dmr = _dispatch_calloc(1ul, sizeof(struct dispatch_mach_reply_refs_s)); dmr->dr_source_wref = _dispatch_ptr2wref(dm); dmr->dmr_dkev = dk; + dmr->dmr_reply = reply_port; if (dmsg->dmsg_voucher) { - dmr->dmr_voucher =_voucher_retain(dmsg->dmsg_voucher); + dmr->dmr_voucher = _voucher_retain(dmsg->dmsg_voucher); } - dmr->dmr_priority = dmsg->dmsg_priority; + dmr->dmr_priority = (dispatch_priority_t)dmsg->dmsg_priority; // make reply context visible to leaks rdar://11777199 dmr->dmr_ctxt = dmsg->do_ctxt; - _dispatch_debug("machport[0x%08x]: registering for reply, ctxt %p", reply, - dmsg->do_ctxt); + pp = dm->dq_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK; + if (pp && dm->ds_is_direct_kevent) { + mp = dmsg->dmsg_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK; + if (pp < mp) pp = mp; + pp |= dm->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; + } else { + pp = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; + } + + _dispatch_debug("machport[0x%08x]: registering for reply, ctxt %p", + reply_port, dmsg->do_ctxt); uint32_t flags; - bool do_resume = _dispatch_kevent_register(&dmr->dmr_dkev, &flags); + bool do_resume = _dispatch_kevent_register(&dmr->dmr_dkev, pp, &flags); TAILQ_INSERT_TAIL(&dmr->dmr_dkev->dk_sources, (dispatch_source_refs_t)dmr, dr_list); + _dispatch_unfair_lock_lock(&dm->dm_refs->dm_replies_lock); + if (unlikely(_TAILQ_IS_ENQUEUED(dmr, dmr_list))) { + DISPATCH_INTERNAL_CRASH(dmr->dmr_list.tqe_prev, "Reply already registered"); + } TAILQ_INSERT_TAIL(&dm->dm_refs->dm_replies, dmr, dmr_list); + _dispatch_unfair_lock_unlock(&dm->dm_refs->dm_replies_lock); if (do_resume && _dispatch_kevent_resume(dmr->dmr_dkev, flags, 0)) { - _dispatch_mach_reply_kevent_unregister(dm, dmr, true); + return _dispatch_mach_reply_kevent_unregister(dm, dmr, + DKEV_UNREGISTER_DISCONNECTED|DKEV_UNREGISTER_REPLY_REMOVE); } } DISPATCH_NOINLINE static void -_dispatch_mach_kevent_unregister(dispatch_mach_t dm) +_dispatch_mach_notification_kevent_unregister(dispatch_mach_t dm) { + DISPATCH_ASSERT_ON_MANAGER_QUEUE(); dispatch_kevent_t dk = dm->dm_dkev; dm->dm_dkev = NULL; TAILQ_REMOVE(&dk->dk_sources, (dispatch_source_refs_t)dm->dm_refs, @@ -3473,8 +4406,9 @@ _dispatch_mach_kevent_unregister(dispatch_mach_t dm) DISPATCH_NOINLINE static void -_dispatch_mach_kevent_register(dispatch_mach_t dm, mach_port_t send) +_dispatch_mach_notification_kevent_register(dispatch_mach_t dm,mach_port_t send) { + DISPATCH_ASSERT_ON_MANAGER_QUEUE(); dispatch_kevent_t dk; dk = _dispatch_calloc(1ul, sizeof(struct dispatch_kevent_s)); @@ -3488,49 +4422,103 @@ _dispatch_mach_kevent_register(dispatch_mach_t dm, mach_port_t send) dm->ds_pending_data_mask |= dk->dk_kevent.fflags; uint32_t flags; - bool do_resume = _dispatch_kevent_register(&dk, &flags); + bool do_resume = _dispatch_kevent_register(&dk, + _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG, &flags); TAILQ_INSERT_TAIL(&dk->dk_sources, (dispatch_source_refs_t)dm->dm_refs, dr_list); dm->dm_dkev = dk; if (do_resume && _dispatch_kevent_resume(dm->dm_dkev, flags, 0)) { - _dispatch_mach_kevent_unregister(dm); + _dispatch_mach_notification_kevent_unregister(dm); } } -static inline void -_dispatch_mach_push(dispatch_object_t dm, dispatch_object_t dou, - pthread_priority_t pp) +static mach_port_t +_dispatch_get_thread_reply_port(void) { - return _dispatch_queue_push(dm._dq, dou, pp); + mach_port_t reply_port, mrp = _dispatch_get_thread_mig_reply_port(); + if (mrp) { + reply_port = mrp; + _dispatch_debug("machport[0x%08x]: borrowed thread sync reply port", + reply_port); + } else { + reply_port = mach_reply_port(); + _dispatch_set_thread_mig_reply_port(reply_port); + _dispatch_debug("machport[0x%08x]: allocated thread sync reply port", + reply_port); + } + _dispatch_debug_machport(reply_port); + return reply_port; } -static inline void -_dispatch_mach_msg_set_options(dispatch_object_t dou, mach_msg_option_t options) +static void +_dispatch_clear_thread_reply_port(mach_port_t reply_port) +{ + mach_port_t mrp = _dispatch_get_thread_mig_reply_port(); + if (reply_port != mrp) { + if (mrp) { + _dispatch_debug("machport[0x%08x]: did not clear thread sync reply " + "port (found 0x%08x)", reply_port, mrp); + } + return; + } + _dispatch_set_thread_mig_reply_port(MACH_PORT_NULL); + _dispatch_debug_machport(reply_port); + _dispatch_debug("machport[0x%08x]: cleared thread sync reply port", + reply_port); +} + +static void +_dispatch_set_thread_reply_port(mach_port_t reply_port) { - dou._do->do_suspend_cnt = (unsigned int)options; + _dispatch_debug_machport(reply_port); + mach_port_t mrp = _dispatch_get_thread_mig_reply_port(); + if (mrp) { + kern_return_t kr = mach_port_mod_refs(mach_task_self(), reply_port, + MACH_PORT_RIGHT_RECEIVE, -1); + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + _dispatch_debug("machport[0x%08x]: deallocated sync reply port " + "(found 0x%08x)", reply_port, mrp); + } else { + _dispatch_set_thread_mig_reply_port(reply_port); + _dispatch_debug("machport[0x%08x]: restored thread sync reply port", + reply_port); + } } -static inline mach_msg_option_t -_dispatch_mach_msg_get_options(dispatch_object_t dou) +static inline mach_port_t +_dispatch_mach_msg_get_remote_port(dispatch_object_t dou) { - mach_msg_option_t options = (mach_msg_option_t)dou._do->do_suspend_cnt; - return options; + mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dou._dmsg); + mach_port_t remote = hdr->msgh_remote_port; + return remote; +} + +static inline mach_port_t +_dispatch_mach_msg_get_reply_port(dispatch_object_t dou) +{ + mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dou._dmsg); + mach_port_t local = hdr->msgh_local_port; + if (!MACH_PORT_VALID(local) || MACH_MSGH_BITS_LOCAL(hdr->msgh_bits) != + MACH_MSG_TYPE_MAKE_SEND_ONCE) return MACH_PORT_NULL; + return local; } static inline void -_dispatch_mach_msg_set_reason(dispatch_object_t dou, mach_error_t err, +_dispatch_mach_msg_set_reason(dispatch_mach_msg_t dmsg, mach_error_t err, unsigned long reason) { dispatch_assert_zero(reason & ~(unsigned long)code_emask); - dou._do->do_suspend_cnt = (unsigned int)((err || !reason) ? err : + dmsg->dmsg_error = ((err || !reason) ? err : err_local|err_sub(0x3e0)|(mach_error_t)reason); } static inline unsigned long -_dispatch_mach_msg_get_reason(dispatch_object_t dou, mach_error_t *err_ptr) +_dispatch_mach_msg_get_reason(dispatch_mach_msg_t dmsg, mach_error_t *err_ptr) { - mach_error_t err = (mach_error_t)dou._do->do_suspend_cnt; - dou._do->do_suspend_cnt = 0; + mach_error_t err = dmsg->dmsg_error; + + dmsg->dmsg_error = 0; if ((err & system_emask) == err_local && err_get_sub(err) == 0x3e0) { *err_ptr = 0; return err_get_code(err); @@ -3541,13 +4529,16 @@ _dispatch_mach_msg_get_reason(dispatch_object_t dou, mach_error_t *err_ptr) static void _dispatch_mach_msg_recv(dispatch_mach_t dm, dispatch_mach_reply_refs_t dmr, - mach_msg_header_t *hdr, mach_msg_size_t siz) + _dispatch_kevent_qos_s *ke, mach_msg_header_t *hdr, mach_msg_size_t siz) { _dispatch_debug_machport(hdr->msgh_remote_port); _dispatch_debug("machport[0x%08x]: received msg id 0x%x, reply on 0x%08x", hdr->msgh_local_port, hdr->msgh_id, hdr->msgh_remote_port); - if (slowpath(dm->ds_atomic_flags & DSF_CANCELED)) { - return _dispatch_kevent_mach_msg_destroy(hdr); + bool canceled = (dm->dq_atomic_flags & DSF_CANCELED); + if (!dmr && canceled) { + // message received after cancellation, _dispatch_mach_kevent_merge is + // responsible for mach channel source state (e.g. deferred deletion) + return _dispatch_kevent_mach_msg_destroy(ke, hdr); } dispatch_mach_msg_t dmsg; voucher_t voucher; @@ -3559,31 +4550,166 @@ _dispatch_mach_msg_recv(dispatch_mach_t dm, dispatch_mach_reply_refs_t dmr, dmr->dmr_voucher = NULL; // transfer reference priority = dmr->dmr_priority; ctxt = dmr->dmr_ctxt; - _dispatch_mach_reply_kevent_unregister(dm, dmr, false); + unsigned int options = DKEV_DISPOSE_IMMEDIATE_DELETE; + options |= DKEV_UNREGISTER_REPLY_REMOVE; + options |= DKEV_UNREGISTER_WAKEUP; + if (canceled) options |= DKEV_UNREGISTER_DISCONNECTED; + _dispatch_mach_reply_kevent_unregister(dm, dmr, options); + ke->flags |= EV_DELETE; // remember that unregister deleted the event + if (canceled) return; } else { voucher = voucher_create_with_mach_msg(hdr); priority = _voucher_get_priority(voucher); } dispatch_mach_msg_destructor_t destructor; - destructor = (hdr == _dispatch_get_mach_recv_msg_buf()) ? + destructor = (hdr == _dispatch_kevent_mach_msg_buf(ke)) ? DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT : DISPATCH_MACH_MSG_DESTRUCTOR_FREE; dmsg = dispatch_mach_msg_create(hdr, siz, destructor, NULL); + if (hdr == _dispatch_kevent_mach_msg_buf(ke)) { + _dispatch_ktrace2(DISPATCH_MACH_MSG_hdr_move, (uint64_t)hdr, (uint64_t)dmsg->dmsg_buf); + } dmsg->dmsg_voucher = voucher; dmsg->dmsg_priority = priority; dmsg->do_ctxt = ctxt; _dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_MESSAGE_RECEIVED); _dispatch_voucher_debug("mach-msg[%p] create", voucher, dmsg); _dispatch_voucher_ktrace_dmsg_push(dmsg); - return _dispatch_mach_push(dm, dmsg, dmsg->dmsg_priority); + return _dispatch_queue_push(dm->_as_dq, dmsg, dmsg->dmsg_priority); } -static inline mach_port_t -_dispatch_mach_msg_get_remote_port(dispatch_object_t dou) +DISPATCH_ALWAYS_INLINE +static inline dispatch_mach_msg_t +_dispatch_mach_msg_reply_recv(dispatch_mach_t dm, + dispatch_mach_reply_refs_t dmr, mach_port_t reply_port) +{ + if (slowpath(!MACH_PORT_VALID(reply_port))) { + DISPATCH_CLIENT_CRASH(reply_port, "Invalid reply port"); + } + void *ctxt = dmr->dmr_ctxt; + mach_msg_header_t *hdr, *hdr2 = NULL; + void *hdr_copyout_addr; + mach_msg_size_t siz, msgsiz = 0; + mach_msg_return_t kr; + mach_msg_option_t options; + siz = mach_vm_round_page(_dispatch_mach_recv_msg_size + + dispatch_mach_trailer_size); + hdr = alloca(siz); + for (mach_vm_address_t p = mach_vm_trunc_page(hdr + vm_page_size); + p < (mach_vm_address_t)hdr + siz; p += vm_page_size) { + *(char*)p = 0; // ensure alloca buffer doesn't overlap with stack guard + } + options = DISPATCH_MACH_RCV_OPTIONS & (~MACH_RCV_VOUCHER); +retry: + _dispatch_debug_machport(reply_port); + _dispatch_debug("machport[0x%08x]: MACH_RCV_MSG %s", reply_port, + (options & MACH_RCV_TIMEOUT) ? "poll" : "wait"); + kr = mach_msg(hdr, options, 0, siz, reply_port, MACH_MSG_TIMEOUT_NONE, + MACH_PORT_NULL); + hdr_copyout_addr = hdr; + _dispatch_debug_machport(reply_port); + _dispatch_debug("machport[0x%08x]: MACH_RCV_MSG (size %u, opts 0x%x) " + "returned: %s - 0x%x", reply_port, siz, options, + mach_error_string(kr), kr); + switch (kr) { + case MACH_RCV_TOO_LARGE: + if (!fastpath(hdr->msgh_size <= UINT_MAX - + dispatch_mach_trailer_size)) { + DISPATCH_CLIENT_CRASH(hdr->msgh_size, "Overlarge message"); + } + if (options & MACH_RCV_LARGE) { + msgsiz = hdr->msgh_size + dispatch_mach_trailer_size; + hdr2 = malloc(msgsiz); + if (dispatch_assume(hdr2)) { + hdr = hdr2; + siz = msgsiz; + } + options |= MACH_RCV_TIMEOUT; + options &= ~MACH_RCV_LARGE; + goto retry; + } + _dispatch_log("BUG in libdispatch client: " + "dispatch_mach_send_and_wait_for_reply: dropped message too " + "large to fit in memory: id = 0x%x, size = %u", hdr->msgh_id, + hdr->msgh_size); + break; + case MACH_RCV_INVALID_NAME: // rdar://problem/21963848 + case MACH_RCV_PORT_CHANGED: // rdar://problem/21885327 + case MACH_RCV_PORT_DIED: + // channel was disconnected/canceled and reply port destroyed + _dispatch_debug("machport[0x%08x]: sync reply port destroyed, ctxt %p: " + "%s - 0x%x", reply_port, ctxt, mach_error_string(kr), kr); + goto out; + case MACH_MSG_SUCCESS: + if (hdr->msgh_remote_port) { + _dispatch_debug_machport(hdr->msgh_remote_port); + } + _dispatch_debug("machport[0x%08x]: received msg id 0x%x, size = %u, " + "reply on 0x%08x", hdr->msgh_local_port, hdr->msgh_id, + hdr->msgh_size, hdr->msgh_remote_port); + siz = hdr->msgh_size + dispatch_mach_trailer_size; + if (hdr2 && siz < msgsiz) { + void *shrink = realloc(hdr2, msgsiz); + if (shrink) hdr = hdr2 = shrink; + } + break; + default: + dispatch_assume_zero(kr); + break; + } + _dispatch_mach_msg_reply_received(dm, dmr, hdr->msgh_local_port); + hdr->msgh_local_port = MACH_PORT_NULL; + if (slowpath((dm->dq_atomic_flags & DSF_CANCELED) || kr)) { + if (!kr) mach_msg_destroy(hdr); + goto out; + } + dispatch_mach_msg_t dmsg; + dispatch_mach_msg_destructor_t destructor = (!hdr2) ? + DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT : + DISPATCH_MACH_MSG_DESTRUCTOR_FREE; + dmsg = dispatch_mach_msg_create(hdr, siz, destructor, NULL); + if (!hdr2 || hdr != hdr_copyout_addr) { + _dispatch_ktrace2(DISPATCH_MACH_MSG_hdr_move, (uint64_t)hdr_copyout_addr, (uint64_t)_dispatch_mach_msg_get_msg(dmsg)); + } + dmsg->do_ctxt = ctxt; + return dmsg; +out: + free(hdr2); + return NULL; +} + +static inline void +_dispatch_mach_msg_reply_received(dispatch_mach_t dm, + dispatch_mach_reply_refs_t dmr, mach_port_t local_port) { - mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dou._dmsg); - mach_port_t remote = hdr->msgh_remote_port; - return remote; + bool removed = _dispatch_mach_reply_tryremove(dm, dmr); + if (!MACH_PORT_VALID(local_port) || !removed) { + // port moved/destroyed during receive, or reply waiter was never + // registered or already removed (disconnected) + return; + } + mach_port_t reply_port = _dispatch_mach_reply_get_reply_port(dmr); + _dispatch_debug("machport[0x%08x]: unregistered for sync reply, ctxt %p", + reply_port, dmr->dmr_ctxt); + if (_dispatch_mach_reply_is_reply_port_owned(dmr)) { + _dispatch_set_thread_reply_port(reply_port); + if (local_port != reply_port) { + DISPATCH_CLIENT_CRASH(local_port, + "Reply received on unexpected port"); + } + return; + } + mach_msg_header_t *hdr; + dispatch_mach_msg_t dmsg; + dmsg = dispatch_mach_msg_create(NULL, sizeof(mach_msg_header_t), + DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT, &hdr); + hdr->msgh_local_port = local_port; + dmsg->dmsg_voucher = dmr->dmr_voucher; + dmr->dmr_voucher = NULL; // transfer reference + dmsg->dmsg_priority = dmr->dmr_priority; + dmsg->do_ctxt = dmr->dmr_ctxt; + _dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_REPLY_RECEIVED); + return _dispatch_queue_push(dm->_as_dq, dmsg, dmsg->dmsg_priority); } static inline void @@ -3597,7 +4723,9 @@ _dispatch_mach_msg_disconnected(dispatch_mach_t dm, mach_port_t local_port, if (local_port) hdr->msgh_local_port = local_port; if (remote_port) hdr->msgh_remote_port = remote_port; _dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_DISCONNECTED); - return _dispatch_mach_push(dm, dmsg, dmsg->dmsg_priority); + _dispatch_debug("machport[0x%08x]: %s right disconnected", local_port ? + local_port : remote_port, local_port ? "receive" : "send"); + return _dispatch_queue_push(dm->_as_dq, dmsg, dmsg->dmsg_priority); } static inline dispatch_mach_msg_t @@ -3605,25 +4733,56 @@ _dispatch_mach_msg_create_reply_disconnected(dispatch_object_t dou, dispatch_mach_reply_refs_t dmr) { dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr; - if (dmsg && !dmsg->dmsg_reply) return NULL; + mach_port_t reply_port = dmsg ? dmsg->dmsg_reply : + _dispatch_mach_reply_get_reply_port(dmr); + voucher_t v; + + if (!reply_port) { + if (!dmsg) { + v = dmr->dmr_voucher; + dmr->dmr_voucher = NULL; // transfer reference + if (v) _voucher_release(v); + } + return NULL; + } + + if (dmsg) { + v = dmsg->dmsg_voucher; + if (v) _voucher_retain(v); + } else { + v = dmr->dmr_voucher; + dmr->dmr_voucher = NULL; // transfer reference + } + + if ((dmsg && (dmsg->dmsg_options & DISPATCH_MACH_WAIT_FOR_REPLY) && + (dmsg->dmsg_options & DISPATCH_MACH_OWNED_REPLY_PORT)) || + (dmr && !dmr->dmr_dkev && + _dispatch_mach_reply_is_reply_port_owned(dmr))) { + if (v) _voucher_release(v); + // deallocate owned reply port to break _dispatch_mach_msg_reply_recv + // out of waiting in mach_msg(MACH_RCV_MSG) + kern_return_t kr = mach_port_mod_refs(mach_task_self(), reply_port, + MACH_PORT_RIGHT_RECEIVE, -1); + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + return NULL; + } + mach_msg_header_t *hdr; dmsgr = dispatch_mach_msg_create(NULL, sizeof(mach_msg_header_t), DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT, &hdr); + dmsgr->dmsg_voucher = v; + hdr->msgh_local_port = reply_port; if (dmsg) { - hdr->msgh_local_port = dmsg->dmsg_reply; - if (dmsg->dmsg_voucher) { - dmsgr->dmsg_voucher = _voucher_retain(dmsg->dmsg_voucher); - } dmsgr->dmsg_priority = dmsg->dmsg_priority; dmsgr->do_ctxt = dmsg->do_ctxt; } else { - hdr->msgh_local_port = (mach_port_t)dmr->dmr_dkev->dk_kevent.ident; - dmsgr->dmsg_voucher = dmr->dmr_voucher; - dmr->dmr_voucher = NULL; // transfer reference dmsgr->dmsg_priority = dmr->dmr_priority; dmsgr->do_ctxt = dmr->dmr_ctxt; } _dispatch_mach_msg_set_reason(dmsgr, 0, DISPATCH_MACH_DISCONNECTED); + _dispatch_debug("machport[0x%08x]: reply disconnected, ctxt %p", + hdr->msgh_local_port, dmsgr->do_ctxt); return dmsgr; } @@ -3632,52 +4791,69 @@ static void _dispatch_mach_msg_not_sent(dispatch_mach_t dm, dispatch_object_t dou) { dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr; + mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg); + mach_msg_option_t msg_opts = dmsg->dmsg_options; + _dispatch_debug("machport[0x%08x]: not sent msg id 0x%x, ctxt %p, " + "msg_opts 0x%x, kvoucher 0x%08x, reply on 0x%08x", + msg->msgh_remote_port, msg->msgh_id, dmsg->do_ctxt, + msg_opts, msg->msgh_voucher_port, dmsg->dmsg_reply); + unsigned long reason = (msg_opts & DISPATCH_MACH_REGISTER_FOR_REPLY) ? + 0 : DISPATCH_MACH_MESSAGE_NOT_SENT; dmsgr = _dispatch_mach_msg_create_reply_disconnected(dmsg, NULL); - _dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_MESSAGE_NOT_SENT); - _dispatch_mach_push(dm, dmsg, dmsg->dmsg_priority); - if (dmsgr) _dispatch_mach_push(dm, dmsgr, dmsgr->dmsg_priority); + _dispatch_mach_msg_set_reason(dmsg, 0, reason); + _dispatch_queue_push(dm->_as_dq, dmsg, dmsg->dmsg_priority); + if (dmsgr) _dispatch_queue_push(dm->_as_dq, dmsgr, dmsgr->dmsg_priority); } DISPATCH_NOINLINE -static dispatch_object_t -_dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou) +static uint32_t +_dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, + dispatch_mach_reply_refs_t dmr, pthread_priority_t pp, + dispatch_mach_send_invoke_flags_t send_flags) { dispatch_mach_send_refs_t dr = dm->dm_refs; dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr = NULL; voucher_t voucher = dmsg->dmsg_voucher; mach_voucher_t ipc_kvoucher = MACH_VOUCHER_NULL; + uint32_t send_status = 0; bool clear_voucher = false, kvoucher_move_send = false; - dr->dm_needs_mgr = 0; - if (slowpath(dr->dm_checkin) && dmsg != dr->dm_checkin) { - // send initial checkin message - if (dm->dm_dkev && slowpath(_dispatch_queue_get_current() != - &_dispatch_mgr_q)) { - // send kevent must be uninstalled on the manager queue - dr->dm_needs_mgr = 1; - goto out; - } - dr->dm_checkin = _dispatch_mach_msg_send(dm, dr->dm_checkin)._dmsg; - if (slowpath(dr->dm_checkin)) { - goto out; + mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg); + bool is_reply = (MACH_MSGH_BITS_REMOTE(msg->msgh_bits) == + MACH_MSG_TYPE_MOVE_SEND_ONCE); + mach_port_t reply_port = dmsg->dmsg_reply; + if (!is_reply) { + dr->dm_needs_mgr = 0; + if (unlikely(dr->dm_checkin && dmsg != dr->dm_checkin)) { + // send initial checkin message + if (dm->dm_dkev && slowpath(_dispatch_queue_get_current() != + &_dispatch_mgr_q)) { + // send kevent must be uninstalled on the manager queue + dr->dm_needs_mgr = 1; + goto out; + } + if (unlikely(!_dispatch_mach_msg_send(dm, + dr->dm_checkin, NULL, pp, DM_SEND_INVOKE_NONE))) { + goto out; + } + dr->dm_checkin = NULL; } } - mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg); mach_msg_return_t kr = 0; - mach_port_t reply = dmsg->dmsg_reply; - mach_msg_option_t opts = 0, msg_opts = _dispatch_mach_msg_get_options(dmsg); - if (!slowpath(msg_opts & DISPATCH_MACH_REGISTER_FOR_REPLY)) { + mach_msg_option_t opts = 0, msg_opts = dmsg->dmsg_options; + if (!(msg_opts & DISPATCH_MACH_REGISTER_FOR_REPLY)) { + mach_msg_priority_t msg_priority = MACH_MSG_PRIORITY_UNSPECIFIED; opts = MACH_SEND_MSG | (msg_opts & ~DISPATCH_MACH_OPTIONS_MASK); - if (MACH_MSGH_BITS_REMOTE(msg->msgh_bits) != - MACH_MSG_TYPE_MOVE_SEND_ONCE) { + if (!is_reply) { if (dmsg != dr->dm_checkin) { msg->msgh_remote_port = dr->dm_send; } if (_dispatch_queue_get_current() == &_dispatch_mgr_q) { if (slowpath(!dm->dm_dkev)) { - _dispatch_mach_kevent_register(dm, msg->msgh_remote_port); + _dispatch_mach_notification_kevent_register(dm, + msg->msgh_remote_port); } if (fastpath(dm->dm_dkev)) { - if (DISPATCH_MACH_KEVENT_ARMED(dm->dm_dkev)) { + if (DISPATCH_MACH_NOTIFICATION_ARMED(dm->dm_dkev)) { goto out; } opts |= MACH_SEND_NOTIFY; @@ -3696,20 +4872,34 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou) } else { clear_voucher = _voucher_mach_msg_set(msg, voucher); } + if (pp && _dispatch_evfilt_machport_direct_enabled) { + opts |= MACH_SEND_OVERRIDE; + msg_priority = (mach_msg_priority_t)pp; + } } - _voucher_activity_trace_msg(voucher, msg, send); _dispatch_debug_machport(msg->msgh_remote_port); - if (reply) _dispatch_debug_machport(reply); + if (reply_port) _dispatch_debug_machport(reply_port); + if (msg_opts & DISPATCH_MACH_WAIT_FOR_REPLY) { + if (msg_opts & DISPATCH_MACH_OWNED_REPLY_PORT) { + _dispatch_clear_thread_reply_port(reply_port); + } + _dispatch_mach_reply_waiter_register(dm, dmr, reply_port, dmsg, + msg_opts); + } kr = mach_msg(msg, opts, msg->msgh_size, 0, MACH_PORT_NULL, 0, - MACH_PORT_NULL); + msg_priority); _dispatch_debug("machport[0x%08x]: sent msg id 0x%x, ctxt %p, " "opts 0x%x, msg_opts 0x%x, kvoucher 0x%08x, reply on 0x%08x: " "%s - 0x%x", msg->msgh_remote_port, msg->msgh_id, dmsg->do_ctxt, - opts, msg_opts, msg->msgh_voucher_port, reply, + opts, msg_opts, msg->msgh_voucher_port, reply_port, mach_error_string(kr), kr); + if (unlikely(kr && (msg_opts & DISPATCH_MACH_WAIT_FOR_REPLY))) { + _dispatch_mach_reply_waiter_unregister(dm, dmr, + DKEV_UNREGISTER_REPLY_REMOVE); + } if (clear_voucher) { if (kr == MACH_SEND_INVALID_VOUCHER && msg->msgh_voucher_port) { - DISPATCH_CRASH("Voucher port corruption"); + DISPATCH_CLIENT_CRASH(kr, "Voucher port corruption"); } mach_voucher_t kv; kv = _voucher_mach_msg_clear(msg, kvoucher_move_send); @@ -3720,7 +4910,7 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou) if (opts & MACH_SEND_NOTIFY) { _dispatch_debug("machport[0x%08x]: send-possible notification " "armed", (mach_port_t)dm->dm_dkev->dk_kevent.ident); - DISPATCH_MACH_KEVENT_ARMED(dm->dm_dkev) = 1; + DISPATCH_MACH_NOTIFICATION_ARMED(dm->dm_dkev) = 1; } else { // send kevent must be installed on the manager queue dr->dm_needs_mgr = 1; @@ -3739,142 +4929,500 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou) } else if (ipc_kvoucher && (kr || !kvoucher_move_send)) { _voucher_dealloc_mach_voucher(ipc_kvoucher); } - if (fastpath(!kr) && reply && - !(dm->ds_dkev && dm->ds_dkev->dk_kevent.ident == reply)) { - if (_dispatch_queue_get_current() != &_dispatch_mgr_q) { + if (!(msg_opts & DISPATCH_MACH_WAIT_FOR_REPLY) && !kr && reply_port && + !(dm->ds_dkev && dm->ds_dkev->dk_kevent.ident == reply_port)) { + if (!dm->ds_is_direct_kevent && + _dispatch_queue_get_current() != &_dispatch_mgr_q) { // reply receive kevent must be installed on the manager queue dr->dm_needs_mgr = 1; - _dispatch_mach_msg_set_options(dmsg, msg_opts | - DISPATCH_MACH_REGISTER_FOR_REPLY); + dmsg->dmsg_options = msg_opts | DISPATCH_MACH_REGISTER_FOR_REPLY; goto out; } - _dispatch_mach_reply_kevent_register(dm, reply, dmsg); + _dispatch_mach_reply_kevent_register(dm, reply_port, dmsg); } - if (slowpath(dmsg == dr->dm_checkin) && dm->dm_dkev) { - _dispatch_mach_kevent_unregister(dm); + if (unlikely(!is_reply && dmsg == dr->dm_checkin && dm->dm_dkev)) { + _dispatch_mach_notification_kevent_unregister(dm); } if (slowpath(kr)) { - // Send failed, so reply was never connected + // Send failed, so reply was never registered dmsgr = _dispatch_mach_msg_create_reply_disconnected(dmsg, NULL); } _dispatch_mach_msg_set_reason(dmsg, kr, 0); - _dispatch_mach_push(dm, dmsg, dmsg->dmsg_priority); - if (dmsgr) _dispatch_mach_push(dm, dmsgr, dmsgr->dmsg_priority); - dmsg = NULL; + if ((send_flags & DM_SEND_INVOKE_IMMEDIATE_SEND) && + (msg_opts & DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT)) { + // Return sent message synchronously + send_status |= DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT; + } else { + _dispatch_queue_push(dm->_as_dq, dmsg, dmsg->dmsg_priority); + } + if (dmsgr) _dispatch_queue_push(dm->_as_dq, dmsgr, dmsgr->dmsg_priority); + send_status |= DM_SEND_STATUS_SUCCESS; +out: + return send_status; +} + +#pragma mark - +#pragma mark dispatch_mach_send_refs_t + +static void _dispatch_mach_cancel(dispatch_mach_t dm); +static void _dispatch_mach_send_barrier_drain_push(dispatch_mach_t dm, + pthread_priority_t pp); + +DISPATCH_ALWAYS_INLINE +static inline pthread_priority_t +_dm_state_get_override(uint64_t dm_state) +{ + dm_state &= DISPATCH_MACH_STATE_OVERRIDE_MASK; + return (pthread_priority_t)(dm_state >> 32); +} + +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_dm_state_override_from_priority(pthread_priority_t pp) +{ + uint64_t pp_state = pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK; + return pp_state << 32; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dm_state_needs_override(uint64_t dm_state, uint64_t pp_state) +{ + return (pp_state > (dm_state & DISPATCH_MACH_STATE_OVERRIDE_MASK)); +} + +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_dm_state_merge_override(uint64_t dm_state, uint64_t pp_state) +{ + if (_dm_state_needs_override(dm_state, pp_state)) { + dm_state &= ~DISPATCH_MACH_STATE_OVERRIDE_MASK; + dm_state |= pp_state; + dm_state |= DISPATCH_MACH_STATE_DIRTY; + dm_state |= DISPATCH_MACH_STATE_RECEIVED_OVERRIDE; + } + return dm_state; +} + +#define _dispatch_mach_send_push_update_tail(dr, tail) \ + os_mpsc_push_update_tail(dr, dm, tail, do_next) +#define _dispatch_mach_send_push_update_head(dr, head) \ + os_mpsc_push_update_head(dr, dm, head) +#define _dispatch_mach_send_get_head(dr) \ + os_mpsc_get_head(dr, dm) +#define _dispatch_mach_send_unpop_head(dr, dc, dc_next) \ + os_mpsc_undo_pop_head(dr, dm, dc, dc_next, do_next) +#define _dispatch_mach_send_pop_head(dr, head) \ + os_mpsc_pop_head(dr, dm, head, do_next) + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_mach_send_push_inline(dispatch_mach_send_refs_t dr, + dispatch_object_t dou) +{ + if (_dispatch_mach_send_push_update_tail(dr, dou._do)) { + _dispatch_mach_send_push_update_head(dr, dou._do); + return true; + } + return false; +} + +DISPATCH_NOINLINE +static bool +_dispatch_mach_send_drain(dispatch_mach_t dm, dispatch_invoke_flags_t flags, + dispatch_mach_send_invoke_flags_t send_flags) +{ + dispatch_mach_send_refs_t dr = dm->dm_refs; + dispatch_mach_reply_refs_t dmr; + dispatch_mach_msg_t dmsg; + struct dispatch_object_s *dc = NULL, *next_dc = NULL; + pthread_priority_t pp = _dm_state_get_override(dr->dm_state); + uint64_t old_state, new_state; + uint32_t send_status; + bool needs_mgr, disconnecting, returning_send_result = false; + +again: + needs_mgr = false; disconnecting = false; + while (dr->dm_tail) { + dc = _dispatch_mach_send_get_head(dr); + do { + dispatch_mach_send_invoke_flags_t sf = send_flags; + // Only request immediate send result for the first message + send_flags &= ~DM_SEND_INVOKE_IMMEDIATE_SEND_MASK; + next_dc = _dispatch_mach_send_pop_head(dr, dc); + if (_dispatch_object_has_type(dc, + DISPATCH_CONTINUATION_TYPE(MACH_SEND_BARRIER))) { + if (!(send_flags & DM_SEND_INVOKE_CAN_RUN_BARRIER)) { + goto partial_drain; + } + _dispatch_continuation_pop(dc, dm->_as_dq, flags); + continue; + } + if (_dispatch_object_is_slow_item(dc)) { + dmsg = ((dispatch_continuation_t)dc)->dc_data; + dmr = ((dispatch_continuation_t)dc)->dc_other; + } else if (_dispatch_object_has_vtable(dc)) { + dmsg = (dispatch_mach_msg_t)dc; + dmr = NULL; + } else { + if ((dm->dm_dkev || !dm->ds_is_direct_kevent) && + (_dispatch_queue_get_current() != &_dispatch_mgr_q)) { + // send kevent must be uninstalled on the manager queue + needs_mgr = true; + goto partial_drain; + } + if (unlikely(!_dispatch_mach_reconnect_invoke(dm, dc))) { + disconnecting = true; + goto partial_drain; + } + continue; + } + _dispatch_voucher_ktrace_dmsg_pop(dmsg); + if (unlikely(dr->dm_disconnect_cnt || + (dm->dq_atomic_flags & DSF_CANCELED))) { + _dispatch_mach_msg_not_sent(dm, dmsg); + continue; + } + send_status = _dispatch_mach_msg_send(dm, dmsg, dmr, pp, sf); + if (unlikely(!send_status)) { + goto partial_drain; + } + if (send_status & DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT) { + returning_send_result = true; + } + } while ((dc = next_dc)); + } + + os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, release, { + if (old_state & DISPATCH_MACH_STATE_DIRTY) { + new_state = old_state; + new_state &= ~DISPATCH_MACH_STATE_DIRTY; + new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE; + new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER; + } else { + // unlock + new_state = 0; + } + }); + goto out; + +partial_drain: + // if this is not a complete drain, we must undo some things + _dispatch_mach_send_unpop_head(dr, dc, next_dc); + + if (_dispatch_object_has_type(dc, + DISPATCH_CONTINUATION_TYPE(MACH_SEND_BARRIER))) { + os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, release, { + new_state = old_state; + new_state |= DISPATCH_MACH_STATE_DIRTY; + new_state |= DISPATCH_MACH_STATE_PENDING_BARRIER; + new_state &= ~DISPATCH_MACH_STATE_UNLOCK_MASK; + }); + } else { + os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, release, { + new_state = old_state; + if (old_state & (DISPATCH_MACH_STATE_DIRTY | + DISPATCH_MACH_STATE_RECEIVED_OVERRIDE)) { + new_state &= ~DISPATCH_MACH_STATE_DIRTY; + new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE; + new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER; + } else { + new_state |= DISPATCH_MACH_STATE_DIRTY; + new_state &= ~DISPATCH_MACH_STATE_UNLOCK_MASK; + } + }); + } + out: - return (dispatch_object_t)dmsg; + if (old_state & DISPATCH_MACH_STATE_RECEIVED_OVERRIDE) { + // Ensure that the root queue sees that this thread was overridden. + _dispatch_set_defaultpriority_override(); + } + + if (unlikely(new_state & DISPATCH_MACH_STATE_UNLOCK_MASK)) { + os_atomic_thread_fence(acquire); + pp = _dm_state_get_override(new_state); + goto again; + } + + if (new_state & DISPATCH_MACH_STATE_PENDING_BARRIER) { + pp = _dm_state_get_override(new_state); + _dispatch_mach_send_barrier_drain_push(dm, pp); + } else { + if (needs_mgr) { + pp = _dm_state_get_override(new_state); + } else { + pp = 0; + } + if (!disconnecting) dx_wakeup(dm, pp, DISPATCH_WAKEUP_FLUSH); + } + return returning_send_result; +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_send_invoke(dispatch_mach_t dm, + dispatch_invoke_flags_t flags, + dispatch_mach_send_invoke_flags_t send_flags) +{ + dispatch_lock_owner tid_self = _dispatch_tid_self(); + uint64_t old_state, new_state; + pthread_priority_t pp_floor; + + uint64_t canlock_mask = DISPATCH_MACH_STATE_UNLOCK_MASK; + uint64_t canlock_state = 0; + + if (send_flags & DM_SEND_INVOKE_NEEDS_BARRIER) { + canlock_mask |= DISPATCH_MACH_STATE_PENDING_BARRIER; + canlock_state = DISPATCH_MACH_STATE_PENDING_BARRIER; + } else if (!(send_flags & DM_SEND_INVOKE_CAN_RUN_BARRIER)) { + canlock_mask |= DISPATCH_MACH_STATE_PENDING_BARRIER; + } + + if (flags & DISPATCH_INVOKE_MANAGER_DRAIN) { + pp_floor = 0; + } else { + // _dispatch_queue_class_invoke will have applied the queue override + // (if any) before we get here. Else use the default base priority + // as an estimation of the priority we already asked for. + pp_floor = dm->_as_dq->dq_override; + if (!pp_floor) { + pp_floor = _dispatch_get_defaultpriority(); + pp_floor &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; + } + } + +retry: + os_atomic_rmw_loop2o(dm->dm_refs, dm_state, old_state, new_state, acquire, { + new_state = old_state; + if (unlikely((old_state & canlock_mask) != canlock_state)) { + if (!(send_flags & DM_SEND_INVOKE_FLUSH)) { + os_atomic_rmw_loop_give_up(break); + } + new_state |= DISPATCH_MACH_STATE_DIRTY; + } else { + if (likely(pp_floor)) { + pthread_priority_t pp = _dm_state_get_override(old_state); + if (unlikely(pp > pp_floor)) { + os_atomic_rmw_loop_give_up({ + _dispatch_wqthread_override_start(tid_self, pp); + // Ensure that the root queue sees + // that this thread was overridden. + _dispatch_set_defaultpriority_override(); + pp_floor = pp; + goto retry; + }); + } + } + new_state |= tid_self; + new_state &= ~DISPATCH_MACH_STATE_DIRTY; + new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE; + new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER; + } + }); + + if (unlikely((old_state & canlock_mask) != canlock_state)) { + return; + } + if (send_flags & DM_SEND_INVOKE_CANCEL) { + _dispatch_mach_cancel(dm); + } + _dispatch_mach_send_drain(dm, flags, send_flags); +} + +DISPATCH_NOINLINE +void +_dispatch_mach_send_barrier_drain_invoke(dispatch_continuation_t dc, + dispatch_invoke_flags_t flags) +{ + dispatch_mach_t dm = (dispatch_mach_t)_dispatch_queue_get_current(); + uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; + dispatch_thread_frame_s dtf; + + DISPATCH_COMPILER_CAN_ASSUME(dc->dc_priority == DISPATCH_NO_PRIORITY); + DISPATCH_COMPILER_CAN_ASSUME(dc->dc_voucher == DISPATCH_NO_VOUCHER); + // hide the mach channel (see _dispatch_mach_barrier_invoke comment) + _dispatch_thread_frame_stash(&dtf); + _dispatch_continuation_pop_forwarded(dc, DISPATCH_NO_VOUCHER, dc_flags,{ + _dispatch_mach_send_invoke(dm, flags, + DM_SEND_INVOKE_NEEDS_BARRIER | DM_SEND_INVOKE_CAN_RUN_BARRIER); + }); + _dispatch_thread_frame_unstash(&dtf); } -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_mach_send_push_wakeup(dispatch_mach_t dm, dispatch_object_t dou, - bool wakeup) +DISPATCH_NOINLINE +static void +_dispatch_mach_send_barrier_drain_push(dispatch_mach_t dm, + pthread_priority_t pp) +{ + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + + dc->do_vtable = DC_VTABLE(MACH_SEND_BARRRIER_DRAIN); + dc->dc_func = NULL; + dc->dc_ctxt = NULL; + dc->dc_voucher = DISPATCH_NO_VOUCHER; + dc->dc_priority = DISPATCH_NO_PRIORITY; + return _dispatch_queue_push(dm->_as_dq, dc, pp); +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_send_push(dispatch_mach_t dm, dispatch_continuation_t dc, + pthread_priority_t pp) { dispatch_mach_send_refs_t dr = dm->dm_refs; - struct dispatch_object_s *prev, *dc = dou._do; - dc->do_next = NULL; + uint64_t pp_state, old_state, new_state, state_flags = 0; + dispatch_lock_owner owner; + bool wakeup; + + // when pushing a send barrier that destroys + // the last reference to this channel, and the send queue is already + // draining on another thread, the send barrier may run as soon as + // _dispatch_mach_send_push_inline() returns. + _dispatch_retain(dm); + pp_state = _dm_state_override_from_priority(pp); + + wakeup = _dispatch_mach_send_push_inline(dr, dc); + if (wakeup) { + state_flags = DISPATCH_MACH_STATE_DIRTY; + if (dc->do_vtable == DC_VTABLE(MACH_SEND_BARRIER)) { + state_flags |= DISPATCH_MACH_STATE_PENDING_BARRIER; + } + } - prev = dispatch_atomic_xchg2o(dr, dm_tail, dc, release); - if (fastpath(prev)) { - prev->do_next = dc; + if (state_flags) { + os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, release, { + new_state = _dm_state_merge_override(old_state, pp_state); + new_state |= state_flags; + }); } else { - dr->dm_head = dc; + os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, relaxed, { + new_state = _dm_state_merge_override(old_state, pp_state); + if (old_state == new_state) { + os_atomic_rmw_loop_give_up(break); + } + }); } - if (wakeup || !prev) { - _dispatch_wakeup(dm); + + pp = _dm_state_get_override(new_state); + owner = _dispatch_lock_owner((dispatch_lock)old_state); + if (owner) { + if (_dm_state_needs_override(old_state, pp_state)) { + _dispatch_wqthread_override_start_check_owner(owner, pp, + &dr->dm_state_lock.dul_lock); + } + return _dispatch_release_tailcall(dm); } -} -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_mach_send_push(dispatch_mach_t dm, dispatch_object_t dou) -{ - return _dispatch_mach_send_push_wakeup(dm, dou, false); + dispatch_wakeup_flags_t wflags = 0; + if (state_flags & DISPATCH_MACH_STATE_PENDING_BARRIER) { + _dispatch_mach_send_barrier_drain_push(dm, pp); + } else if (wakeup || dr->dm_disconnect_cnt || + (dm->dq_atomic_flags & DSF_CANCELED)) { + wflags = DISPATCH_WAKEUP_FLUSH | DISPATCH_WAKEUP_CONSUME; + } else if (old_state & DISPATCH_MACH_STATE_PENDING_BARRIER) { + wflags = DISPATCH_WAKEUP_OVERRIDING | DISPATCH_WAKEUP_CONSUME; + } + if (wflags) { + return dx_wakeup(dm, pp, wflags); + } + return _dispatch_release_tailcall(dm); } DISPATCH_NOINLINE -static void -_dispatch_mach_send_drain(dispatch_mach_t dm) +static bool +_dispatch_mach_send_push_and_trydrain(dispatch_mach_t dm, + dispatch_object_t dou, pthread_priority_t pp, + dispatch_mach_send_invoke_flags_t send_flags) { dispatch_mach_send_refs_t dr = dm->dm_refs; - struct dispatch_object_s *dc = NULL, *next_dc = NULL; - while (dr->dm_tail) { - _dispatch_wait_until(dc = fastpath(dr->dm_head)); - do { - next_dc = fastpath(dc->do_next); - dr->dm_head = next_dc; - if (!next_dc && !dispatch_atomic_cmpxchg2o(dr, dm_tail, dc, NULL, - relaxed)) { - _dispatch_wait_until(next_dc = fastpath(dc->do_next)); - dr->dm_head = next_dc; - } - if (!DISPATCH_OBJ_IS_VTABLE(dc)) { - if ((long)dc->do_vtable & DISPATCH_OBJ_BARRIER_BIT) { - // send barrier - // leave send queue locked until barrier has completed - return _dispatch_mach_push(dm, dc, - ((dispatch_continuation_t)dc)->dc_priority); - } -#if DISPATCH_MACH_SEND_SYNC - if (slowpath((long)dc->do_vtable & DISPATCH_OBJ_SYNC_SLOW_BIT)){ - _dispatch_thread_semaphore_signal( - (_dispatch_thread_semaphore_t)dc->do_ctxt); - continue; - } -#endif // DISPATCH_MACH_SEND_SYNC - if (slowpath(!_dispatch_mach_reconnect_invoke(dm, dc))) { - goto out; - } - continue; + dispatch_lock_owner tid_self = _dispatch_tid_self(); + uint64_t pp_state, old_state, new_state, canlock_mask, state_flags = 0; + dispatch_lock_owner owner; + + pp_state = _dm_state_override_from_priority(pp); + bool wakeup = _dispatch_mach_send_push_inline(dr, dou); + if (wakeup) { + state_flags = DISPATCH_MACH_STATE_DIRTY; + } + + if (unlikely(dr->dm_disconnect_cnt || + (dm->dq_atomic_flags & DSF_CANCELED))) { + os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, release, { + new_state = _dm_state_merge_override(old_state, pp_state); + new_state |= state_flags; + }); + dx_wakeup(dm, pp, DISPATCH_WAKEUP_FLUSH); + return false; + } + + canlock_mask = DISPATCH_MACH_STATE_UNLOCK_MASK | + DISPATCH_MACH_STATE_PENDING_BARRIER; + if (state_flags) { + os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, seq_cst, { + new_state = _dm_state_merge_override(old_state, pp_state); + new_state |= state_flags; + if (likely((old_state & canlock_mask) == 0)) { + new_state |= tid_self; + new_state &= ~DISPATCH_MACH_STATE_DIRTY; + new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE; + new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER; } - _dispatch_voucher_ktrace_dmsg_pop((dispatch_mach_msg_t)dc); - if (slowpath(dr->dm_disconnect_cnt) || - slowpath(dm->ds_atomic_flags & DSF_CANCELED)) { - _dispatch_mach_msg_not_sent(dm, dc); - continue; + }); + } else { + os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, acquire, { + new_state = _dm_state_merge_override(old_state, pp_state); + if (new_state == old_state) { + os_atomic_rmw_loop_give_up(return false); } - if (slowpath(dc = _dispatch_mach_msg_send(dm, dc)._do)) { - goto out; + if (likely((old_state & canlock_mask) == 0)) { + new_state |= tid_self; + new_state &= ~DISPATCH_MACH_STATE_DIRTY; + new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE; + new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER; } - } while ((dc = next_dc)); + }); } -out: - // if this is not a complete drain, we must undo some things - if (slowpath(dc)) { - if (!next_dc && - !dispatch_atomic_cmpxchg2o(dr, dm_tail, NULL, dc, relaxed)) { - // wait for enqueue slow path to finish - _dispatch_wait_until(next_dc = fastpath(dr->dm_head)); - dc->do_next = next_dc; + + owner = _dispatch_lock_owner((dispatch_lock)old_state); + if (owner) { + if (_dm_state_needs_override(old_state, pp_state)) { + _dispatch_wqthread_override_start_check_owner(owner, pp, + &dr->dm_state_lock.dul_lock); } - dr->dm_head = dc; + return false; } - (void)dispatch_atomic_dec2o(dr, dm_sending, release); - _dispatch_wakeup(dm); -} -static inline void -_dispatch_mach_send(dispatch_mach_t dm) -{ - dispatch_mach_send_refs_t dr = dm->dm_refs; - if (!fastpath(dr->dm_tail) || !fastpath(dispatch_atomic_cmpxchg2o(dr, - dm_sending, 0, 1, acquire))) { - return; + if (old_state & DISPATCH_MACH_STATE_PENDING_BARRIER) { + dx_wakeup(dm, pp, DISPATCH_WAKEUP_OVERRIDING); + return false; } - _dispatch_object_debug(dm, "%s", __func__); - _dispatch_mach_send_drain(dm); + + // Ensure our message is still at the head of the queue and has not already + // been dequeued by another thread that raced us to the send queue lock. + // A plain load of the head and comparison against our object pointer is + // sufficient. + if (unlikely(!(wakeup && dou._do == dr->dm_head))) { + // Don't request immediate send result for messages we don't own + send_flags &= ~DM_SEND_INVOKE_IMMEDIATE_SEND_MASK; + } + return _dispatch_mach_send_drain(dm, DISPATCH_INVOKE_NONE, send_flags); } static void -_dispatch_mach_merge_kevent(dispatch_mach_t dm, +_dispatch_mach_merge_notification_kevent(dispatch_mach_t dm, const _dispatch_kevent_qos_s *ke) { if (!(ke->fflags & dm->ds_pending_data_mask)) { return; } - _dispatch_mach_send(dm); + _dispatch_mach_send_invoke(dm, DISPATCH_INVOKE_MANAGER_DRAIN, + DM_SEND_INVOKE_FLUSH); } +#pragma mark - +#pragma mark dispatch_mach_t + static inline mach_msg_option_t _dispatch_mach_checkin_options(void) { @@ -3906,55 +5454,186 @@ _dispatch_mach_priority_propagate(mach_msg_option_t options) } DISPATCH_NOINLINE -void -dispatch_mach_send(dispatch_mach_t dm, dispatch_mach_msg_t dmsg, - mach_msg_option_t options) +static bool +_dispatch_mach_send_msg(dispatch_mach_t dm, dispatch_mach_msg_t dmsg, + dispatch_continuation_t dc_wait, mach_msg_option_t options) { dispatch_mach_send_refs_t dr = dm->dm_refs; if (slowpath(dmsg->do_next != DISPATCH_OBJECT_LISTLESS)) { - DISPATCH_CLIENT_CRASH("Message already enqueued"); + DISPATCH_CLIENT_CRASH(dmsg->do_next, "Message already enqueued"); } dispatch_retain(dmsg); - dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK); pthread_priority_t priority = _dispatch_mach_priority_propagate(options); options |= _dispatch_mach_send_options(); - _dispatch_mach_msg_set_options(dmsg, options & ~DISPATCH_MACH_OPTIONS_MASK); + dmsg->dmsg_options = options; mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg); - dmsg->dmsg_reply = (MACH_MSGH_BITS_LOCAL(msg->msgh_bits) == - MACH_MSG_TYPE_MAKE_SEND_ONCE && - MACH_PORT_VALID(msg->msgh_local_port) ? msg->msgh_local_port : - MACH_PORT_NULL); + dmsg->dmsg_reply = _dispatch_mach_msg_get_reply_port(dmsg); bool is_reply = (MACH_MSGH_BITS_REMOTE(msg->msgh_bits) == MACH_MSG_TYPE_MOVE_SEND_ONCE); dmsg->dmsg_priority = priority; dmsg->dmsg_voucher = _voucher_copy(); _dispatch_voucher_debug("mach-msg[%p] set", dmsg->dmsg_voucher, dmsg); - if ((!is_reply && slowpath(dr->dm_tail)) || - slowpath(dr->dm_disconnect_cnt) || - slowpath(dm->ds_atomic_flags & DSF_CANCELED) || - slowpath(!dispatch_atomic_cmpxchg2o(dr, dm_sending, 0, 1, - acquire))) { + + uint32_t send_status; + bool returning_send_result = false; + dispatch_mach_send_invoke_flags_t send_flags = DM_SEND_INVOKE_NONE; + if (options & DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT) { + send_flags = DM_SEND_INVOKE_IMMEDIATE_SEND; + } + if (is_reply && !dmsg->dmsg_reply && !dr->dm_disconnect_cnt && + !(dm->dq_atomic_flags & DSF_CANCELED)) { + // replies are sent to a send-once right and don't need the send queue + dispatch_assert(!dc_wait); + send_status = _dispatch_mach_msg_send(dm, dmsg, NULL, 0, send_flags); + dispatch_assert(send_status); + returning_send_result = !!(send_status & + DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT); + } else { _dispatch_voucher_ktrace_dmsg_push(dmsg); - return _dispatch_mach_send_push(dm, dmsg); + priority &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; + dispatch_object_t dou = { ._dmsg = dmsg }; + if (dc_wait) dou._dc = dc_wait; + returning_send_result = _dispatch_mach_send_push_and_trydrain(dm, dou, + priority, send_flags); + } + if (returning_send_result) { + _dispatch_voucher_debug("mach-msg[%p] clear", dmsg->dmsg_voucher, dmsg); + if (dmsg->dmsg_voucher) _voucher_release(dmsg->dmsg_voucher); + dmsg->dmsg_voucher = NULL; + dmsg->do_next = DISPATCH_OBJECT_LISTLESS; + dispatch_release(dmsg); } - if (slowpath(dmsg = _dispatch_mach_msg_send(dm, dmsg)._dmsg)) { - (void)dispatch_atomic_dec2o(dr, dm_sending, release); - _dispatch_voucher_ktrace_dmsg_push(dmsg); - return _dispatch_mach_send_push_wakeup(dm, dmsg, true); + return returning_send_result; +} + +DISPATCH_NOINLINE +void +dispatch_mach_send(dispatch_mach_t dm, dispatch_mach_msg_t dmsg, + mach_msg_option_t options) +{ + dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK); + options &= ~DISPATCH_MACH_OPTIONS_MASK; + bool returned_send_result = _dispatch_mach_send_msg(dm, dmsg, NULL,options); + dispatch_assert(!returned_send_result); +} + +DISPATCH_NOINLINE +void +dispatch_mach_send_with_result(dispatch_mach_t dm, dispatch_mach_msg_t dmsg, + mach_msg_option_t options, dispatch_mach_send_flags_t send_flags, + dispatch_mach_reason_t *send_result, mach_error_t *send_error) +{ + if (unlikely(send_flags != DISPATCH_MACH_SEND_DEFAULT)) { + DISPATCH_CLIENT_CRASH(send_flags, "Invalid send flags"); } - if (!is_reply && slowpath(dr->dm_tail)) { - return _dispatch_mach_send_drain(dm); + dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK); + options &= ~DISPATCH_MACH_OPTIONS_MASK; + options |= DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT; + bool returned_send_result = _dispatch_mach_send_msg(dm, dmsg, NULL,options); + unsigned long reason = DISPATCH_MACH_NEEDS_DEFERRED_SEND; + mach_error_t err = 0; + if (returned_send_result) { + reason = _dispatch_mach_msg_get_reason(dmsg, &err); } - (void)dispatch_atomic_dec2o(dr, dm_sending, release); - _dispatch_wakeup(dm); + *send_result = reason; + *send_error = err; } -static void +static inline +dispatch_mach_msg_t +_dispatch_mach_send_and_wait_for_reply(dispatch_mach_t dm, + dispatch_mach_msg_t dmsg, mach_msg_option_t options, + bool *returned_send_result) +{ + mach_port_t reply_port = _dispatch_mach_msg_get_reply_port(dmsg); + if (!reply_port) { + // use per-thread mach reply port + reply_port = _dispatch_get_thread_reply_port(); + mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dmsg); + dispatch_assert(MACH_MSGH_BITS_LOCAL(hdr->msgh_bits) == + MACH_MSG_TYPE_MAKE_SEND_ONCE); + hdr->msgh_local_port = reply_port; + options |= DISPATCH_MACH_OWNED_REPLY_PORT; + } + + dispatch_mach_reply_refs_t dmr; +#if DISPATCH_DEBUG + dmr = _dispatch_calloc(1, sizeof(*dmr)); +#else + struct dispatch_mach_reply_refs_s dmr_buf = { }; + dmr = &dmr_buf; +#endif + struct dispatch_continuation_s dc_wait = { + .dc_flags = DISPATCH_OBJ_SYNC_SLOW_BIT, + .dc_data = dmsg, + .dc_other = dmr, + .dc_priority = DISPATCH_NO_PRIORITY, + .dc_voucher = DISPATCH_NO_VOUCHER, + }; + dmr->dmr_ctxt = dmsg->do_ctxt; + *returned_send_result = _dispatch_mach_send_msg(dm, dmsg, &dc_wait,options); + if (options & DISPATCH_MACH_OWNED_REPLY_PORT) { + _dispatch_clear_thread_reply_port(reply_port); + } + dmsg = _dispatch_mach_msg_reply_recv(dm, dmr, reply_port); +#if DISPATCH_DEBUG + free(dmr); +#endif + return dmsg; +} + +DISPATCH_NOINLINE +dispatch_mach_msg_t +dispatch_mach_send_and_wait_for_reply(dispatch_mach_t dm, + dispatch_mach_msg_t dmsg, mach_msg_option_t options) +{ + bool returned_send_result; + dispatch_mach_msg_t reply; + dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK); + options &= ~DISPATCH_MACH_OPTIONS_MASK; + options |= DISPATCH_MACH_WAIT_FOR_REPLY; + reply = _dispatch_mach_send_and_wait_for_reply(dm, dmsg, options, + &returned_send_result); + dispatch_assert(!returned_send_result); + return reply; +} + +DISPATCH_NOINLINE +dispatch_mach_msg_t +dispatch_mach_send_with_result_and_wait_for_reply(dispatch_mach_t dm, + dispatch_mach_msg_t dmsg, mach_msg_option_t options, + dispatch_mach_send_flags_t send_flags, + dispatch_mach_reason_t *send_result, mach_error_t *send_error) +{ + if (unlikely(send_flags != DISPATCH_MACH_SEND_DEFAULT)) { + DISPATCH_CLIENT_CRASH(send_flags, "Invalid send flags"); + } + bool returned_send_result; + dispatch_mach_msg_t reply; + dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK); + options &= ~DISPATCH_MACH_OPTIONS_MASK; + options |= DISPATCH_MACH_WAIT_FOR_REPLY; + options |= DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT; + reply = _dispatch_mach_send_and_wait_for_reply(dm, dmsg, options, + &returned_send_result); + unsigned long reason = DISPATCH_MACH_NEEDS_DEFERRED_SEND; + mach_error_t err = 0; + if (returned_send_result) { + reason = _dispatch_mach_msg_get_reason(dmsg, &err); + } + *send_result = reason; + *send_error = err; + return reply; +} + +DISPATCH_NOINLINE +static bool _dispatch_mach_disconnect(dispatch_mach_t dm) { dispatch_mach_send_refs_t dr = dm->dm_refs; + bool disconnected; if (dm->dm_dkev) { - _dispatch_mach_kevent_unregister(dm); + _dispatch_mach_notification_kevent_unregister(dm); } if (MACH_PORT_VALID(dr->dm_send)) { _dispatch_mach_msg_disconnected(dm, MACH_PORT_NULL, dr->dm_send); @@ -3964,50 +5643,53 @@ _dispatch_mach_disconnect(dispatch_mach_t dm) _dispatch_mach_msg_not_sent(dm, dr->dm_checkin); dr->dm_checkin = NULL; } - if (!TAILQ_EMPTY(&dm->dm_refs->dm_replies)) { - dispatch_mach_reply_refs_t dmr, tmp; - TAILQ_FOREACH_SAFE(dmr, &dm->dm_refs->dm_replies, dmr_list, tmp){ - _dispatch_mach_reply_kevent_unregister(dm, dmr, true); + _dispatch_unfair_lock_lock(&dm->dm_refs->dm_replies_lock); + dispatch_mach_reply_refs_t dmr, tmp; + TAILQ_FOREACH_SAFE(dmr, &dm->dm_refs->dm_replies, dmr_list, tmp) { + TAILQ_REMOVE(&dm->dm_refs->dm_replies, dmr, dmr_list); + _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list); + if (dmr->dmr_dkev) { + _dispatch_mach_reply_kevent_unregister(dm, dmr, + DKEV_UNREGISTER_DISCONNECTED); + } else { + _dispatch_mach_reply_waiter_unregister(dm, dmr, + DKEV_UNREGISTER_DISCONNECTED); } } + disconnected = TAILQ_EMPTY(&dm->dm_refs->dm_replies); + _dispatch_unfair_lock_unlock(&dm->dm_refs->dm_replies_lock); + return disconnected; } -DISPATCH_NOINLINE -static bool +static void _dispatch_mach_cancel(dispatch_mach_t dm) { - dispatch_mach_send_refs_t dr = dm->dm_refs; - if (!fastpath(dispatch_atomic_cmpxchg2o(dr, dm_sending, 0, 1, acquire))) { - return false; - } _dispatch_object_debug(dm, "%s", __func__); - _dispatch_mach_disconnect(dm); + if (!_dispatch_mach_disconnect(dm)) return; if (dm->ds_dkev) { mach_port_t local_port = (mach_port_t)dm->ds_dkev->dk_kevent.ident; - _dispatch_source_kevent_unregister((dispatch_source_t)dm); - _dispatch_mach_msg_disconnected(dm, local_port, MACH_PORT_NULL); + _dispatch_source_kevent_unregister(dm->_as_ds); + if ((dm->dq_atomic_flags & DSF_STATE_MASK) == DSF_DELETED) { + _dispatch_mach_msg_disconnected(dm, local_port, MACH_PORT_NULL); + } + } else { + _dispatch_queue_atomic_flags_set_and_clear(dm->_as_dq, DSF_DELETED, + DSF_ARMED | DSF_DEFERRED_DELETE); } - (void)dispatch_atomic_dec2o(dr, dm_sending, release); - return true; } DISPATCH_NOINLINE static bool _dispatch_mach_reconnect_invoke(dispatch_mach_t dm, dispatch_object_t dou) { - if (dm->dm_dkev || !TAILQ_EMPTY(&dm->dm_refs->dm_replies)) { - if (slowpath(_dispatch_queue_get_current() != &_dispatch_mgr_q)) { - // send/reply kevents must be uninstalled on the manager queue - return false; - } - } - _dispatch_mach_disconnect(dm); + if (!_dispatch_mach_disconnect(dm)) return false; dispatch_mach_send_refs_t dr = dm->dm_refs; dr->dm_checkin = dou._dc->dc_data; dr->dm_send = (mach_port_t)dou._dc->dc_other; _dispatch_continuation_free(dou._dc); - (void)dispatch_atomic_dec2o(dr, dm_disconnect_cnt, relaxed); + (void)os_atomic_dec2o(dr, dm_disconnect_cnt, relaxed); _dispatch_object_debug(dm, "%s", __func__); + _dispatch_release(dm); // return true; } @@ -4017,47 +5699,34 @@ dispatch_mach_reconnect(dispatch_mach_t dm, mach_port_t send, dispatch_mach_msg_t checkin) { dispatch_mach_send_refs_t dr = dm->dm_refs; - (void)dispatch_atomic_inc2o(dr, dm_disconnect_cnt, relaxed); + (void)os_atomic_inc2o(dr, dm_disconnect_cnt, relaxed); if (MACH_PORT_VALID(send) && checkin) { dispatch_retain(checkin); - mach_msg_option_t options = _dispatch_mach_checkin_options(); - _dispatch_mach_msg_set_options(checkin, options); + checkin->dmsg_options = _dispatch_mach_checkin_options(); dr->dm_checkin_port = _dispatch_mach_msg_get_remote_port(checkin); } else { checkin = NULL; dr->dm_checkin_port = MACH_PORT_NULL; } dispatch_continuation_t dc = _dispatch_continuation_alloc(); - dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT); + dc->dc_flags = DISPATCH_OBJ_CONSUME_BIT; + // actually called manually in _dispatch_mach_send_drain dc->dc_func = (void*)_dispatch_mach_reconnect_invoke; dc->dc_ctxt = dc; dc->dc_data = checkin; dc->dc_other = (void*)(uintptr_t)send; - return _dispatch_mach_send_push(dm, dc); -} - -#if DISPATCH_MACH_SEND_SYNC -DISPATCH_NOINLINE -static void -_dispatch_mach_send_sync_slow(dispatch_mach_t dm) -{ - _dispatch_thread_semaphore_t sema = _dispatch_get_thread_semaphore(); - struct dispatch_object_s dc = { - .do_vtable = (void *)(DISPATCH_OBJ_SYNC_SLOW_BIT), - .do_ctxt = (void*)sema, - }; - _dispatch_mach_send_push(dm, &dc); - _dispatch_thread_semaphore_wait(sema); - _dispatch_put_thread_semaphore(sema); + dc->dc_voucher = DISPATCH_NO_VOUCHER; + dc->dc_priority = DISPATCH_NO_PRIORITY; + _dispatch_retain(dm); // + return _dispatch_mach_send_push(dm, dc, 0); } -#endif // DISPATCH_MACH_SEND_SYNC DISPATCH_NOINLINE mach_port_t dispatch_mach_get_checkin_port(dispatch_mach_t dm) { dispatch_mach_send_refs_t dr = dm->dm_refs; - if (slowpath(dm->ds_atomic_flags & DSF_CANCELED)) { + if (slowpath(dm->dq_atomic_flags & DSF_CANCELED)) { return MACH_PORT_DEAD; } return dr->dm_checkin_port; @@ -4076,121 +5745,149 @@ _dispatch_mach_connect_invoke(dispatch_mach_t dm) DISPATCH_NOINLINE void _dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg, - dispatch_object_t dou DISPATCH_UNUSED, - dispatch_invoke_flags_t flags DISPATCH_UNUSED) + dispatch_invoke_flags_t flags) { - dispatch_mach_t dm = (dispatch_mach_t)_dispatch_queue_get_current(); - dispatch_mach_refs_t dr = dm->ds_refs; + dispatch_thread_frame_s dtf; + dispatch_mach_refs_t dr; + dispatch_mach_t dm; mach_error_t err; unsigned long reason = _dispatch_mach_msg_get_reason(dmsg, &err); + _dispatch_thread_set_self_t adopt_flags = DISPATCH_PRIORITY_ENFORCE| + DISPATCH_VOUCHER_CONSUME|DISPATCH_VOUCHER_REPLACE; + // hide mach channel + dm = (dispatch_mach_t)_dispatch_thread_frame_stash(&dtf); + dr = dm->ds_refs; dmsg->do_next = DISPATCH_OBJECT_LISTLESS; - _dispatch_thread_setspecific(dispatch_queue_key, dm->do_targetq); _dispatch_voucher_ktrace_dmsg_pop(dmsg); _dispatch_voucher_debug("mach-msg[%p] adopt", dmsg->dmsg_voucher, dmsg); - _dispatch_adopt_priority_and_replace_voucher(dmsg->dmsg_priority, - dmsg->dmsg_voucher, DISPATCH_PRIORITY_ENFORCE); + (void)_dispatch_adopt_priority_and_set_voucher(dmsg->dmsg_priority, + dmsg->dmsg_voucher, adopt_flags); dmsg->dmsg_voucher = NULL; - if (slowpath(!dm->dm_connect_handler_called)) { - _dispatch_mach_connect_invoke(dm); - } - _dispatch_client_callout4(dr->dm_handler_ctxt, reason, dmsg, err, - dr->dm_handler_func); - _dispatch_thread_setspecific(dispatch_queue_key, (dispatch_queue_t)dm); + dispatch_invoke_with_autoreleasepool(flags, { + if (slowpath(!dm->dm_connect_handler_called)) { + _dispatch_mach_connect_invoke(dm); + } + _dispatch_client_callout4(dr->dm_handler_ctxt, reason, dmsg, err, + dr->dm_handler_func); + }); + _dispatch_thread_frame_unstash(&dtf); _dispatch_introspection_queue_item_complete(dmsg); dispatch_release(dmsg); } DISPATCH_NOINLINE void -_dispatch_mach_barrier_invoke(void *ctxt) +_dispatch_mach_barrier_invoke(dispatch_continuation_t dc, + dispatch_invoke_flags_t flags) { - dispatch_mach_t dm = (dispatch_mach_t)_dispatch_queue_get_current(); - dispatch_mach_refs_t dr = dm->ds_refs; - struct dispatch_continuation_s *dc = ctxt; - void *context = dc->dc_data; - dispatch_function_t barrier = dc->dc_other; - bool send_barrier = ((long)dc->do_vtable & DISPATCH_OBJ_BARRIER_BIT); - - _dispatch_thread_setspecific(dispatch_queue_key, dm->do_targetq); - if (slowpath(!dm->dm_connect_handler_called)) { - _dispatch_mach_connect_invoke(dm); - } - _dispatch_client_callout(context, barrier); - _dispatch_client_callout4(dr->dm_handler_ctxt, - DISPATCH_MACH_BARRIER_COMPLETED, NULL, 0, dr->dm_handler_func); - _dispatch_thread_setspecific(dispatch_queue_key, (dispatch_queue_t)dm); - if (send_barrier) { - (void)dispatch_atomic_dec2o(dm->dm_refs, dm_sending, release); + dispatch_thread_frame_s dtf; + dispatch_mach_t dm = dc->dc_other; + dispatch_mach_refs_t dr; + uintptr_t dc_flags = (uintptr_t)dc->dc_data; + unsigned long type = dc_type(dc); + + // hide mach channel from clients + if (type == DISPATCH_CONTINUATION_TYPE(MACH_RECV_BARRIER)) { + // on the send queue, the mach channel isn't the current queue + // its target queue is the current one already + _dispatch_thread_frame_stash(&dtf); + } + dr = dm->ds_refs; + DISPATCH_COMPILER_CAN_ASSUME(dc_flags & DISPATCH_OBJ_CONSUME_BIT); + _dispatch_continuation_pop_forwarded(dc, dm->dq_override_voucher, dc_flags,{ + dispatch_invoke_with_autoreleasepool(flags, { + if (slowpath(!dm->dm_connect_handler_called)) { + _dispatch_mach_connect_invoke(dm); + } + _dispatch_client_callout(dc->dc_ctxt, dc->dc_func); + _dispatch_client_callout4(dr->dm_handler_ctxt, + DISPATCH_MACH_BARRIER_COMPLETED, NULL, 0, + dr->dm_handler_func); + }); + }); + if (type == DISPATCH_CONTINUATION_TYPE(MACH_RECV_BARRIER)) { + _dispatch_thread_frame_unstash(&dtf); } } DISPATCH_NOINLINE void dispatch_mach_send_barrier_f(dispatch_mach_t dm, void *context, - dispatch_function_t barrier) + dispatch_function_t func) { dispatch_continuation_t dc = _dispatch_continuation_alloc(); - dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT | DISPATCH_OBJ_BARRIER_BIT); - dc->dc_func = _dispatch_mach_barrier_invoke; - dc->dc_ctxt = dc; - dc->dc_data = context; - dc->dc_other = barrier; - _dispatch_continuation_voucher_set(dc, 0); - _dispatch_continuation_priority_set(dc, 0, 0); + uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; + pthread_priority_t pp; - dispatch_mach_send_refs_t dr = dm->dm_refs; - if (slowpath(dr->dm_tail) || slowpath(!dispatch_atomic_cmpxchg2o(dr, - dm_sending, 0, 1, acquire))) { - return _dispatch_mach_send_push(dm, dc); - } - // leave send queue locked until barrier has completed - return _dispatch_mach_push(dm, dc, dc->dc_priority); + _dispatch_continuation_init_f(dc, dm, context, func, 0, 0, dc_flags); + dc->dc_data = (void *)dc->dc_flags; + dc->dc_other = dm; + dc->do_vtable = DC_VTABLE(MACH_SEND_BARRIER); + _dispatch_trace_continuation_push(dm->_as_dq, dc); + pp = _dispatch_continuation_get_override_priority(dm->_as_dq, dc); + return _dispatch_mach_send_push(dm, dc, pp); } DISPATCH_NOINLINE void -dispatch_mach_receive_barrier_f(dispatch_mach_t dm, void *context, - dispatch_function_t barrier) +dispatch_mach_send_barrier(dispatch_mach_t dm, dispatch_block_t barrier) { dispatch_continuation_t dc = _dispatch_continuation_alloc(); - dc->do_vtable = (void *)(DISPATCH_OBJ_ASYNC_BIT); - dc->dc_func = _dispatch_mach_barrier_invoke; - dc->dc_ctxt = dc; - dc->dc_data = context; - dc->dc_other = barrier; - _dispatch_continuation_voucher_set(dc, 0); - _dispatch_continuation_priority_set(dc, 0, 0); + uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; + pthread_priority_t pp; - return _dispatch_mach_push(dm, dc, dc->dc_priority); + _dispatch_continuation_init(dc, dm, barrier, 0, 0, dc_flags); + dc->dc_data = (void *)dc->dc_flags; + dc->dc_other = dm; + dc->do_vtable = DC_VTABLE(MACH_SEND_BARRIER); + _dispatch_trace_continuation_push(dm->_as_dq, dc); + pp = _dispatch_continuation_get_override_priority(dm->_as_dq, dc); + return _dispatch_mach_send_push(dm, dc, pp); } DISPATCH_NOINLINE void -dispatch_mach_send_barrier(dispatch_mach_t dm, dispatch_block_t barrier) +dispatch_mach_receive_barrier_f(dispatch_mach_t dm, void *context, + dispatch_function_t func) { - dispatch_mach_send_barrier_f(dm, _dispatch_Block_copy(barrier), - _dispatch_call_block_and_release); + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; + + _dispatch_continuation_init_f(dc, dm, context, func, 0, 0, dc_flags); + dc->dc_data = (void *)dc->dc_flags; + dc->dc_other = dm; + dc->do_vtable = DC_VTABLE(MACH_RECV_BARRIER); + return _dispatch_continuation_async(dm->_as_dq, dc); } DISPATCH_NOINLINE void dispatch_mach_receive_barrier(dispatch_mach_t dm, dispatch_block_t barrier) { - dispatch_mach_receive_barrier_f(dm, _dispatch_Block_copy(barrier), - _dispatch_call_block_and_release); + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; + + _dispatch_continuation_init(dc, dm, barrier, 0, 0, dc_flags); + dc->dc_data = (void *)dc->dc_flags; + dc->dc_other = dm; + dc->do_vtable = DC_VTABLE(MACH_RECV_BARRIER); + return _dispatch_continuation_async(dm->_as_dq, dc); } DISPATCH_NOINLINE static void -_dispatch_mach_cancel_invoke(dispatch_mach_t dm) +_dispatch_mach_cancel_invoke(dispatch_mach_t dm, dispatch_invoke_flags_t flags) { dispatch_mach_refs_t dr = dm->ds_refs; - if (slowpath(!dm->dm_connect_handler_called)) { - _dispatch_mach_connect_invoke(dm); - } - _dispatch_client_callout4(dr->dm_handler_ctxt, - DISPATCH_MACH_CANCELED, NULL, 0, dr->dm_handler_func); + + dispatch_invoke_with_autoreleasepool(flags, { + if (slowpath(!dm->dm_connect_handler_called)) { + _dispatch_mach_connect_invoke(dm); + } + _dispatch_client_callout4(dr->dm_handler_ctxt, + DISPATCH_MACH_CANCELED, NULL, 0, dr->dm_handler_func); + }); dm->dm_cancel_handler_called = 1; _dispatch_release(dm); // the retain is done at creation time } @@ -4199,15 +5896,55 @@ DISPATCH_NOINLINE void dispatch_mach_cancel(dispatch_mach_t dm) { - dispatch_source_cancel((dispatch_source_t)dm); + dispatch_source_cancel(dm->_as_ds); +} + +static void +_dispatch_mach_install(dispatch_mach_t dm, pthread_priority_t pp) +{ + uint32_t disconnect_cnt; + + if (dm->ds_dkev) { + _dispatch_source_kevent_register(dm->_as_ds, pp); + } + if (dm->ds_is_direct_kevent) { + pp &= (~_PTHREAD_PRIORITY_FLAGS_MASK | + _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG | + _PTHREAD_PRIORITY_OVERCOMMIT_FLAG); + // _dispatch_mach_reply_kevent_register assumes this has been done + // which is unlike regular sources or queues, the DEFAULTQUEUE flag + // is used so that the priority of that channel doesn't act as a floor + // QoS for incoming messages (26761457) + dm->dq_priority = (dispatch_priority_t)pp; + } + dm->ds_is_installed = true; + if (unlikely(!os_atomic_cmpxchgv2o(dm->dm_refs, dm_disconnect_cnt, + DISPATCH_MACH_NEVER_INSTALLED, 0, &disconnect_cnt, release))) { + DISPATCH_INTERNAL_CRASH(disconnect_cnt, "Channel already installed"); + } +} + +void +_dispatch_mach_finalize_activation(dispatch_mach_t dm) +{ + if (dm->ds_is_direct_kevent && !dm->ds_is_installed) { + dispatch_source_t ds = dm->_as_ds; + pthread_priority_t pp = _dispatch_source_compute_kevent_priority(ds); + if (pp) _dispatch_mach_install(dm, pp); + } + + // call "super" + _dispatch_queue_finalize_activation(dm->_as_dq); } DISPATCH_ALWAYS_INLINE static inline dispatch_queue_t -_dispatch_mach_invoke2(dispatch_object_t dou, - _dispatch_thread_semaphore_t *sema_ptr DISPATCH_UNUSED) +_dispatch_mach_invoke2(dispatch_object_t dou, dispatch_invoke_flags_t flags, + uint64_t *owned, struct dispatch_object_s **dc_ptr DISPATCH_UNUSED) { dispatch_mach_t dm = dou._dm; + dispatch_queue_t retq = NULL; + dispatch_queue_t dq = _dispatch_queue_get_current(); // This function performs all mach channel actions. Each action is // responsible for verifying that it takes place on the appropriate queue. @@ -4215,122 +5952,158 @@ _dispatch_mach_invoke2(dispatch_object_t dou, // correct queue will be returned and the invoke will be re-driven on that // queue. - // The order of tests here in invoke and in probe should be consistent. + // The order of tests here in invoke and in wakeup should be consistent. - dispatch_queue_t dq = _dispatch_queue_get_current(); dispatch_mach_send_refs_t dr = dm->dm_refs; + dispatch_queue_t dkq = &_dispatch_mgr_q; + + if (dm->ds_is_direct_kevent) { + dkq = dm->do_targetq; + } if (slowpath(!dm->ds_is_installed)) { - // The channel needs to be installed on the manager queue. - if (dq != &_dispatch_mgr_q) { - return &_dispatch_mgr_q; - } - if (dm->ds_dkev) { - _dispatch_source_kevent_register((dispatch_source_t)dm); - } - dm->ds_is_installed = true; - _dispatch_mach_send(dm); - // Apply initial target queue change - _dispatch_queue_drain(dou); - if (dm->dq_items_tail) { - return dm->do_targetq; - } - } else if (dm->dq_items_tail) { - // The channel has pending messages to deliver to the target queue. - if (dq != dm->do_targetq) { - return dm->do_targetq; - } - dispatch_queue_t tq = dm->do_targetq; - if (slowpath(_dispatch_queue_drain(dou))) { - DISPATCH_CLIENT_CRASH("Sync onto mach channel"); - } - if (slowpath(tq != dm->do_targetq)) { - // An item on the channel changed the target queue - return dm->do_targetq; - } - } else if (dr->dm_sending) { - // Sending and uninstallation below require the send lock, the channel - // will be woken up when the lock is dropped - return NULL; - } else if (dr->dm_tail) { - if (slowpath(dr->dm_needs_mgr) || (slowpath(dr->dm_disconnect_cnt) && - (dm->dm_dkev || !TAILQ_EMPTY(&dm->dm_refs->dm_replies)))) { - // Send/reply kevents need to be installed or uninstalled - if (dq != &_dispatch_mgr_q) { - return &_dispatch_mgr_q; - } + // The channel needs to be installed on the kevent queue. + if (dq != dkq) { + return dkq; + } + _dispatch_mach_install(dm, _dispatch_get_defaultpriority()); + } + + if (_dispatch_queue_class_probe(dm)) { + if (dq == dm->do_targetq) { + retq = _dispatch_queue_serial_drain(dm->_as_dq, flags, owned, NULL); + } else { + retq = dm->do_targetq; } - if (!(dm->dm_dkev && DISPATCH_MACH_KEVENT_ARMED(dm->dm_dkev)) || - (dm->ds_atomic_flags & DSF_CANCELED) || dr->dm_disconnect_cnt) { + } + + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dm->_as_dq); + + if (dr->dm_tail) { + bool requires_mgr = dr->dm_needs_mgr || (dr->dm_disconnect_cnt && + (dm->dm_dkev || !dm->ds_is_direct_kevent)); + if (!(dm->dm_dkev && DISPATCH_MACH_NOTIFICATION_ARMED(dm->dm_dkev)) || + (dqf & DSF_CANCELED) || dr->dm_disconnect_cnt) { // The channel has pending messages to send. - _dispatch_mach_send(dm); + if (unlikely(requires_mgr && dq != &_dispatch_mgr_q)) { + return retq ? retq : &_dispatch_mgr_q; + } + dispatch_mach_send_invoke_flags_t send_flags = DM_SEND_INVOKE_NONE; + if (dq != &_dispatch_mgr_q) { + send_flags |= DM_SEND_INVOKE_CAN_RUN_BARRIER; + } + _dispatch_mach_send_invoke(dm, flags, send_flags); } - } else if (dm->ds_atomic_flags & DSF_CANCELED){ + } else if (dqf & DSF_CANCELED) { // The channel has been cancelled and needs to be uninstalled from the // manager queue. After uninstallation, the cancellation handler needs // to be delivered to the target queue. - if (dm->ds_dkev || dm->dm_dkev || dr->dm_send || - !TAILQ_EMPTY(&dm->dm_refs->dm_replies)) { + if ((dqf & DSF_STATE_MASK) == (DSF_ARMED | DSF_DEFERRED_DELETE)) { + // waiting for the delivery of a deferred delete event + return retq; + } + if ((dqf & DSF_STATE_MASK) != DSF_DELETED) { if (dq != &_dispatch_mgr_q) { - return &_dispatch_mgr_q; + return retq ? retq : &_dispatch_mgr_q; } - if (!_dispatch_mach_cancel(dm)) { - return NULL; + _dispatch_mach_send_invoke(dm, flags, DM_SEND_INVOKE_CANCEL); + dqf = _dispatch_queue_atomic_flags(dm->_as_dq); + if (unlikely((dqf & DSF_STATE_MASK) != DSF_DELETED)) { + // waiting for the delivery of a deferred delete event + // or deletion didn't happen because send_invoke couldn't + // acquire the send lock + return retq; } } if (!dm->dm_cancel_handler_called) { if (dq != dm->do_targetq) { - return dm->do_targetq; + return retq ? retq : dm->do_targetq; } - _dispatch_mach_cancel_invoke(dm); + _dispatch_mach_cancel_invoke(dm, flags); } } - return NULL; + + return retq; } DISPATCH_NOINLINE void -_dispatch_mach_invoke(dispatch_mach_t dm, dispatch_object_t dou, - dispatch_invoke_flags_t flags) +_dispatch_mach_invoke(dispatch_mach_t dm, dispatch_invoke_flags_t flags) { - _dispatch_queue_class_invoke(dm, dou._dc, flags, _dispatch_mach_invoke2); + _dispatch_queue_class_invoke(dm, flags, _dispatch_mach_invoke2); } -unsigned long -_dispatch_mach_probe(dispatch_mach_t dm) +void +_dispatch_mach_wakeup(dispatch_mach_t dm, pthread_priority_t pp, + dispatch_wakeup_flags_t flags) { // This function determines whether the mach channel needs to be invoked. // The order of tests here in probe and in invoke should be consistent. dispatch_mach_send_refs_t dr = dm->dm_refs; + dispatch_queue_wakeup_target_t dkq = DISPATCH_QUEUE_WAKEUP_MGR; + dispatch_queue_wakeup_target_t tq = DISPATCH_QUEUE_WAKEUP_NONE; + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dm->_as_dq); - if (slowpath(!dm->ds_is_installed)) { - // The channel needs to be installed on the manager queue. - return true; - } else if (_dispatch_queue_class_probe(dm)) { - // The source has pending messages to deliver to the target queue. - return true; - } else if (dr->dm_sending) { + if (dm->ds_is_direct_kevent) { + dkq = DISPATCH_QUEUE_WAKEUP_TARGET; + } + + if (!dm->ds_is_installed) { + // The channel needs to be installed on the kevent queue. + tq = dkq; + goto done; + } + + if (_dispatch_queue_class_probe(dm)) { + tq = DISPATCH_QUEUE_WAKEUP_TARGET; + goto done; + } + + if (_dispatch_lock_is_locked(dr->dm_state_lock.dul_lock)) { // Sending and uninstallation below require the send lock, the channel // will be woken up when the lock is dropped - return false; - } else if (dr->dm_tail && - (!(dm->dm_dkev && DISPATCH_MACH_KEVENT_ARMED(dm->dm_dkev)) || - (dm->ds_atomic_flags & DSF_CANCELED) || dr->dm_disconnect_cnt)) { - // The channel has pending messages to send. - return true; - } else if (dm->ds_atomic_flags & DSF_CANCELED) { - if (dm->ds_dkev || dm->dm_dkev || dr->dm_send || - !TAILQ_EMPTY(&dm->dm_refs->dm_replies) || - !dm->dm_cancel_handler_called) { - // The channel needs to be uninstalled from the manager queue, or + _dispatch_queue_reinstate_override_priority(dm, (dispatch_priority_t)pp); + goto done; + } + + if (dr->dm_tail) { + bool requires_mgr = dr->dm_needs_mgr || (dr->dm_disconnect_cnt && + (dm->dm_dkev || !dm->ds_is_direct_kevent)); + if (!(dm->dm_dkev && DISPATCH_MACH_NOTIFICATION_ARMED(dm->dm_dkev)) || + (dqf & DSF_CANCELED) || dr->dm_disconnect_cnt) { + if (unlikely(requires_mgr)) { + tq = DISPATCH_QUEUE_WAKEUP_MGR; + } else { + tq = DISPATCH_QUEUE_WAKEUP_TARGET; + } + } else { + // can happen when we can't send because the port is full + // but we should not lose the override + _dispatch_queue_reinstate_override_priority(dm, + (dispatch_priority_t)pp); + } + } else if (dqf & DSF_CANCELED) { + if ((dqf & DSF_STATE_MASK) == (DSF_ARMED | DSF_DEFERRED_DELETE)) { + // waiting for the delivery of a deferred delete event + } else if ((dqf & DSF_STATE_MASK) != DSF_DELETED) { + // The channel needs to be uninstalled from the manager queue + tq = DISPATCH_QUEUE_WAKEUP_MGR; + } else if (!dm->dm_cancel_handler_called) { // the cancellation handler needs to be delivered to the target // queue. - return true; + tq = DISPATCH_QUEUE_WAKEUP_TARGET; } } - // Nothing to do. - return false; + +done: + if (tq) { + return _dispatch_queue_class_wakeup(dm->_as_dq, pp, flags, tq); + } else if (pp) { + return _dispatch_queue_class_override_drainer(dm->_as_dq, pp, flags); + } else if (flags & DISPATCH_WAKEUP_CONSUME) { + return _dispatch_release_tailcall(dm); + } } #pragma mark - @@ -4342,7 +6115,7 @@ dispatch_mach_msg_create(mach_msg_header_t *msg, size_t size, { if (slowpath(size < sizeof(mach_msg_header_t)) || slowpath(destructor && !msg)) { - DISPATCH_CLIENT_CRASH("Empty message"); + DISPATCH_CLIENT_CRASH(size, "Empty message"); } dispatch_mach_msg_t dmsg = _dispatch_alloc(DISPATCH_VTABLE(mach_msg), sizeof(struct dispatch_mach_msg_s) + @@ -4410,7 +6183,7 @@ _dispatch_mach_msg_debug(dispatch_mach_msg_t dmsg, char* buf, size_t bufsiz) offset += dsnprintf(&buf[offset], bufsiz - offset, "xrefcnt = 0x%x, " "refcnt = 0x%x, ", dmsg->do_xref_cnt + 1, dmsg->do_ref_cnt + 1); offset += dsnprintf(&buf[offset], bufsiz - offset, "opts/err = 0x%x, " - "msgh[%p] = { ", dmsg->do_suspend_cnt, dmsg->dmsg_buf); + "msgh[%p] = { ", dmsg->dmsg_options, dmsg->dmsg_buf); mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dmsg); if (hdr->msgh_id) { offset += dsnprintf(&buf[offset], bufsiz - offset, "id 0x%x, ", @@ -4465,11 +6238,19 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, bool received = false; size_t rcv_size = maxmsgsz + MAX_TRAILER_SIZE; - // XXX FIXME -- allocate these elsewhere bufRequest = alloca(rcv_size); + bufRequest->RetCode = 0; + for (mach_vm_address_t p = mach_vm_trunc_page(bufRequest + vm_page_size); + p < (mach_vm_address_t)bufRequest + rcv_size; p += vm_page_size) { + *(char*)p = 0; // ensure alloca buffer doesn't overlap with stack guard + } + bufReply = alloca(rcv_size); bufReply->Head.msgh_size = 0; - bufRequest->RetCode = 0; + for (mach_vm_address_t p = mach_vm_trunc_page(bufReply + vm_page_size); + p < (mach_vm_address_t)bufReply + rcv_size; p += vm_page_size) { + *(char*)p = 0; // ensure alloca buffer doesn't overlap with stack guard + } #if DISPATCH_DEBUG options |= MACH_RCV_LARGE; // rdar://problem/8422992 @@ -4477,7 +6258,7 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, tmp_options = options; // XXX FIXME -- change this to not starve out the target queue for (;;) { - if (DISPATCH_OBJECT_SUSPENDED(ds) || (--cnt == 0)) { + if (DISPATCH_QUEUE_IS_SUSPENDED(ds) || (--cnt == 0)) { options &= ~MACH_RCV_MSG; tmp_options &= ~MACH_RCV_MSG; @@ -4559,11 +6340,14 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, bufReply = bufTemp; #if DISPATCH_USE_IMPORTANCE_ASSERTION +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wdeprecated-declarations" int r = proc_importance_assertion_begin_with_msg(&bufRequest->Head, NULL, &assertion_token); if (r && slowpath(r != EIO)) { (void)dispatch_assume_zero(r); } +#pragma clang diagnostic pop #endif _voucher_replace(voucher_create_with_mach_msg(&bufRequest->Head)); demux_success = callback(&bufRequest->Head, &bufReply->Head); @@ -4666,10 +6450,15 @@ _evflagstr2(uint16_t *flagsp) _evflag2(EV_RECEIPT); _evflag2(EV_DISPATCH); _evflag2(EV_UDATA_SPECIFIC); +#ifdef EV_POLL _evflag2(EV_POLL); +#endif +#ifdef EV_OOBAND _evflag2(EV_OOBAND); +#endif _evflag2(EV_ERROR); _evflag2(EV_EOF); + _evflag2(EV_VANISHED); *flagsp = 0; return "EV_UNKNOWN "; } @@ -4694,24 +6483,25 @@ _dispatch_source_debug_attr(dispatch_source_t ds, char* buf, size_t bufsiz) dispatch_queue_t target = ds->do_targetq; return dsnprintf(buf, bufsiz, "target = %s[%p], ident = 0x%lx, " "mask = 0x%lx, pending_data = 0x%lx, registered = %d, " - "armed = %d, deleted = %d%s%s, canceled = %d, needs_mgr = %d, ", + "armed = %d, deleted = %d%s, canceled = %d, ", target && target->dq_label ? target->dq_label : "", target, ds->ds_ident_hack, ds->ds_pending_data_mask, ds->ds_pending_data, - ds->ds_is_installed, (bool)(ds->ds_atomic_flags & DSF_ARMED), - (bool)(ds->ds_atomic_flags & DSF_DELETED), ds->ds_pending_delete ? - " (pending)" : "", (ds->ds_atomic_flags & DSF_ONESHOT) ? - " (oneshot)" : "", (bool)(ds->ds_atomic_flags & DSF_CANCELED), - ds->ds_needs_mgr); + ds->ds_is_installed, (bool)(ds->dq_atomic_flags & DSF_ARMED), + (bool)(ds->dq_atomic_flags & DSF_DELETED), + (ds->dq_atomic_flags & DSF_DEFERRED_DELETE) ? " (pending)" : "", + (bool)(ds->dq_atomic_flags & DSF_CANCELED)); } static size_t _dispatch_timer_debug_attr(dispatch_source_t ds, char* buf, size_t bufsiz) { dispatch_source_refs_t dr = ds->ds_refs; - return dsnprintf(buf, bufsiz, "timer = { target = 0x%llx, deadline = 0x%llx," - " last_fire = 0x%llx, interval = 0x%llx, flags = 0x%lx }, ", - ds_timer(dr).target, ds_timer(dr).deadline, ds_timer(dr).last_fire, - ds_timer(dr).interval, ds_timer(dr).flags); + return dsnprintf(buf, bufsiz, "timer = { target = 0x%llx, deadline = 0x%llx" + ", last_fire = 0x%llx, interval = 0x%llx, flags = 0x%lx }, ", + (unsigned long long)ds_timer(dr).target, + (unsigned long long)ds_timer(dr).deadline, + (unsigned long long)ds_timer(dr).last_fire, + (unsigned long long)ds_timer(dr).interval, ds_timer(dr).flags); } size_t @@ -4725,30 +6515,39 @@ _dispatch_source_debug(dispatch_source_t ds, char* buf, size_t bufsiz) if (ds->ds_is_timer) { offset += _dispatch_timer_debug_attr(ds, &buf[offset], bufsiz - offset); } + const char *filter; + if (!ds->ds_dkev) { + filter = "????"; + } else if (ds->ds_is_custom_source) { + filter = _evfiltstr((int16_t)(uintptr_t)ds->ds_dkev); + } else { + filter = _evfiltstr(ds->ds_dkev->dk_kevent.filter); + } offset += dsnprintf(&buf[offset], bufsiz - offset, "kevent = %p%s, " "filter = %s }", ds->ds_dkev, ds->ds_is_direct_kevent ? " (direct)" - : "", ds->ds_dkev ? _evfiltstr(ds->ds_dkev->dk_kevent.filter) : - "????"); + : "", filter); return offset; } +#if HAVE_MACH static size_t _dispatch_mach_debug_attr(dispatch_mach_t dm, char* buf, size_t bufsiz) { dispatch_queue_t target = dm->do_targetq; return dsnprintf(buf, bufsiz, "target = %s[%p], receive = 0x%x, " "send = 0x%x, send-possible = 0x%x%s, checkin = 0x%x%s, " - "sending = %d, disconnected = %d, canceled = %d ", + "send state = %016llx, disconnected = %d, canceled = %d ", target && target->dq_label ? target->dq_label : "", target, dm->ds_dkev ?(mach_port_t)dm->ds_dkev->dk_kevent.ident:0, dm->dm_refs->dm_send, dm->dm_dkev ?(mach_port_t)dm->dm_dkev->dk_kevent.ident:0, - dm->dm_dkev && DISPATCH_MACH_KEVENT_ARMED(dm->dm_dkev) ? + dm->dm_dkev && DISPATCH_MACH_NOTIFICATION_ARMED(dm->dm_dkev) ? " (armed)" : "", dm->dm_refs->dm_checkin_port, dm->dm_refs->dm_checkin ? " (pending)" : "", - dm->dm_refs->dm_sending, dm->dm_refs->dm_disconnect_cnt, - (bool)(dm->ds_atomic_flags & DSF_CANCELED)); + dm->dm_refs->dm_state, dm->dm_refs->dm_disconnect_cnt, + (bool)(dm->dq_atomic_flags & DSF_CANCELED)); } + size_t _dispatch_mach_debug(dispatch_mach_t dm, char* buf, size_t bufsiz) { @@ -4761,19 +6560,44 @@ _dispatch_mach_debug(dispatch_mach_t dm, char* buf, size_t bufsiz) offset += dsnprintf(&buf[offset], bufsiz - offset, "}"); return offset; } +#endif // HAVE_MACH #if DISPATCH_DEBUG DISPATCH_NOINLINE static void -_dispatch_kevent_debug(const _dispatch_kevent_qos_s* kev, const char* str) +dispatch_kevent_debug(const char *verb, const _dispatch_kevent_qos_s *kev, + int i, int n, const char *function, unsigned int line) { char flagstr[256]; - _dispatch_debug("kevent[%p] = { ident = 0x%llx, filter = %s, " + char i_n[31]; + + if (n > 1) { + snprintf(i_n, sizeof(i_n), "%d/%d ", i + 1, n); + } else { + i_n[0] = '\0'; + } +#if DISPATCH_USE_KEVENT_QOS + _dispatch_debug("%s kevent[%p] %s= { ident = 0x%llx, filter = %s, " "flags = %s (0x%x), fflags = 0x%x, data = 0x%llx, udata = 0x%llx, " - "ext[0] = 0x%llx, ext[1] = 0x%llx }: %s", kev, kev->ident, + "qos = 0x%x, ext[0] = 0x%llx, ext[1] = 0x%llx, ext[2] = 0x%llx, " + "ext[3] = 0x%llx }: %s #%u", verb, kev, i_n, kev->ident, _evfiltstr(kev->filter), _evflagstr(kev->flags, flagstr, sizeof(flagstr)), kev->flags, kev->fflags, kev->data, kev->udata, - kev->ext[0], kev->ext[1], str); + kev->qos, kev->ext[0], kev->ext[1], kev->ext[2], kev->ext[3], + function, line); +#else + _dispatch_debug("%s kevent[%p] %s= { ident = 0x%llx, filter = %s, " + "flags = %s (0x%x), fflags = 0x%x, data = 0x%llx, udata = 0x%llx, " + "ext[0] = 0x%llx, ext[1] = 0x%llx }: %s #%u", verb, kev, i_n, + kev->ident, _evfiltstr(kev->filter), _evflagstr(kev->flags, flagstr, + sizeof(flagstr)), kev->flags, kev->fflags, kev->data, kev->udata, +#ifndef IGNORE_KEVENT64_EXT + kev->ext[0], kev->ext[1], +#else + 0ull, 0ull, +#endif + function, line); +#endif } static void @@ -4815,9 +6639,6 @@ _dispatch_kevent_debugger2(void *context) fprintf(debug_stream, "PID %u\n", getpid()); fprintf(debug_stream, "\n
      \n"); - //fprintf(debug_stream, "DKDKDKDK" - // "DKDKDK\n"); - for (i = 0; i < DSL_HASH_SIZE; i++) { if (TAILQ_EMPTY(&_dispatch_sources[i])) { continue; @@ -4832,16 +6653,16 @@ _dispatch_kevent_debugger2(void *context) fprintf(debug_stream, "\t\t
        \n"); TAILQ_FOREACH(dr, &dk->dk_sources, dr_list) { ds = _dispatch_source_from_refs(dr); - fprintf(debug_stream, "\t\t\t
      • DS %p refcnt 0x%x suspend " - "0x%x data 0x%lx mask 0x%lx flags 0x%x
      • \n", - ds, ds->do_ref_cnt + 1, ds->do_suspend_cnt, + fprintf(debug_stream, "\t\t\t
      • DS %p refcnt 0x%x state " + "0x%llx data 0x%lx mask 0x%lx flags 0x%x
      • \n", + ds, ds->do_ref_cnt + 1, ds->dq_state, ds->ds_pending_data, ds->ds_pending_data_mask, - ds->ds_atomic_flags); - if (ds->do_suspend_cnt == DISPATCH_OBJECT_SUSPEND_LOCK) { + ds->dq_atomic_flags); + if (_dq_state_is_enqueued(ds->dq_state)) { dispatch_queue_t dq = ds->do_targetq; - fprintf(debug_stream, "\t\t
        DQ: %p refcnt 0x%x suspend " - "0x%x label: %s\n", dq, dq->do_ref_cnt + 1, - dq->do_suspend_cnt, dq->dq_label ? dq->dq_label:""); + fprintf(debug_stream, "\t\t
        DQ: %p refcnt 0x%x state " + "0x%llx label: %s\n", dq, dq->do_ref_cnt + 1, + dq->dq_state, dq->dq_label ?: ""); } } fprintf(debug_stream, "\t\t
      \n"); @@ -4881,9 +6702,11 @@ _dispatch_kevent_debugger(void *context DISPATCH_UNUSED) int val, r, fd, sock_opt = 1; socklen_t slen = sizeof(sa_u); +#ifndef __linux__ if (issetugid()) { return; } +#endif valstr = getenv("LIBDISPATCH_DEBUGGER"); if (!valstr) { return; diff --git a/src/source_internal.h b/src/source_internal.h index 6e8f40f5a..41b6d11a0 100644 --- a/src/source_internal.h +++ b/src/source_internal.h @@ -38,6 +38,7 @@ #define DISPATCH_EVFILT_MACH_NOTIFICATION (-EVFILT_SYSCOUNT - 4) #define DISPATCH_EVFILT_SYSCOUNT ( EVFILT_SYSCOUNT + 4) +#if HAVE_MACH // NOTE: dispatch_source_mach_send_flags_t and dispatch_source_mach_recv_flags_t // bit values must not overlap as they share the same kevent fflags ! @@ -68,28 +69,32 @@ enum { DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE = 0x20, DISPATCH_MACH_RECV_NO_SENDERS = 0x40, }; +#endif // HAVE_MACH enum { + /* DISPATCH_TIMER_STRICT 0x1 */ + /* DISPATCH_TIMER_BACKGROUND = 0x2, */ DISPATCH_TIMER_WALL_CLOCK = 0x4, DISPATCH_TIMER_INTERVAL = 0x8, DISPATCH_TIMER_WITH_AGGREGATE = 0x10, + /* DISPATCH_INTERVAL_UI_ANIMATION = 0x20 */ + DISPATCH_TIMER_AFTER = 0x40, }; -// low bits are timer QoS class #define DISPATCH_TIMER_QOS_NORMAL 0u #define DISPATCH_TIMER_QOS_CRITICAL 1u #define DISPATCH_TIMER_QOS_BACKGROUND 2u #define DISPATCH_TIMER_QOS_COUNT (DISPATCH_TIMER_QOS_BACKGROUND + 1) -#define DISPATCH_TIMER_QOS(tidx) ((uintptr_t)(tidx) & 0x3ul) +#define DISPATCH_TIMER_QOS(tidx) (((uintptr_t)(tidx) >> 1) & 0x3ul) #define DISPATCH_TIMER_KIND_WALL 0u #define DISPATCH_TIMER_KIND_MACH 1u #define DISPATCH_TIMER_KIND_COUNT (DISPATCH_TIMER_KIND_MACH + 1) -#define DISPATCH_TIMER_KIND(tidx) (((uintptr_t)(tidx) >> 2) & 0x1ul) +#define DISPATCH_TIMER_KIND(tidx) ((uintptr_t)(tidx) & 0x1ul) -#define DISPATCH_TIMER_INDEX(kind, qos) (((kind) << 2) | (qos)) +#define DISPATCH_TIMER_INDEX(kind, qos) ((qos) << 1 | (kind)) #define DISPATCH_TIMER_INDEX_DISARM \ - DISPATCH_TIMER_INDEX(DISPATCH_TIMER_KIND_COUNT, 0) + DISPATCH_TIMER_INDEX(0, DISPATCH_TIMER_QOS_COUNT) #define DISPATCH_TIMER_INDEX_COUNT (DISPATCH_TIMER_INDEX_DISARM + 1) #define DISPATCH_TIMER_IDENT(flags) ({ unsigned long f = (flags); \ DISPATCH_TIMER_INDEX(f & DISPATCH_TIMER_WALL_CLOCK ? \ @@ -106,6 +111,11 @@ struct dispatch_kevent_s { typedef struct dispatch_kevent_s *dispatch_kevent_t; +typedef typeof(((dispatch_kevent_t)NULL)->dk_kevent.udata) _dispatch_kevent_qos_udata_t; + +#define DISPATCH_KEV_CUSTOM_ADD ((dispatch_kevent_t)DISPATCH_EVFILT_CUSTOM_ADD) +#define DISPATCH_KEV_CUSTOM_OR ((dispatch_kevent_t)DISPATCH_EVFILT_CUSTOM_OR) + struct dispatch_source_type_s { _dispatch_kevent_qos_s ke; uint64_t mask; @@ -134,7 +144,7 @@ enum { typedef struct dispatch_source_refs_s { TAILQ_ENTRY(dispatch_source_refs_s) dr_list; uintptr_t dr_source_wref; // "weak" backref to dispatch_source_t - dispatch_continuation_t ds_handler[3]; + dispatch_continuation_t volatile ds_handler[3]; } *dispatch_source_refs_t; typedef struct dispatch_timer_source_refs_s { @@ -165,42 +175,43 @@ _dispatch_source_timer_idx(dispatch_source_refs_t dr) return DISPATCH_TIMER_IDENT(ds_timer(dr).flags); } -// ds_atomic_flags bits -#define DSF_CANCELED 1u // cancellation has been requested -#define DSF_ARMED 2u // source is armed -#define DSF_DELETED 4u // source received EV_DELETE event -#define DSF_ONESHOT 8u // source received EV_ONESHOT event - -#define DISPATCH_SOURCE_HEADER(refs) \ - dispatch_kevent_t ds_dkev; \ - dispatch_##refs##_refs_t ds_refs; \ - unsigned int ds_atomic_flags; \ +#define _DISPATCH_SOURCE_HEADER(refs) \ + DISPATCH_QUEUE_HEADER(refs); \ + /* LP64: fills 32bit hole in QUEUE_HEADER */ \ unsigned int \ ds_is_level:1, \ ds_is_adder:1, \ ds_is_installed:1, \ ds_is_direct_kevent:1, \ + ds_is_custom_source:1, \ ds_needs_rearm:1, \ - ds_pending_delete:1, \ - ds_needs_mgr:1, \ ds_is_timer:1, \ ds_vmpressure_override:1, \ - ds_memorystatus_override:1, \ + ds_memorypressure_override:1, \ dm_handler_is_block:1, \ dm_connect_handler_called:1, \ dm_cancel_handler_called:1; \ + dispatch_kevent_t ds_dkev; \ + dispatch_##refs##_refs_t ds_refs; \ unsigned long ds_pending_data_mask; -DISPATCH_CLASS_DECL(source); +#define DISPATCH_SOURCE_HEADER(refs) \ + struct dispatch_source_s _as_ds[0]; \ + _DISPATCH_SOURCE_HEADER(refs) + +DISPATCH_CLASS_DECL_BARE(source); +_OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(dispatch_source, dispatch_object); + +#if DISPATCH_PURE_C struct dispatch_source_s { - DISPATCH_STRUCT_HEADER(source); - DISPATCH_QUEUE_HEADER; - DISPATCH_SOURCE_HEADER(source); + _DISPATCH_SOURCE_HEADER(source); unsigned long ds_ident_hack; unsigned long ds_data; unsigned long ds_pending_data; -}; +} DISPATCH_QUEUE_ALIGN; +#endif +#if HAVE_MACH // Mach channel state which may contain references to the channel object // layout must match dispatch_source_refs_s struct dispatch_mach_refs_s { @@ -216,19 +227,39 @@ struct dispatch_mach_reply_refs_s { uintptr_t dr_source_wref; // "weak" backref to dispatch_mach_t dispatch_kevent_t dmr_dkev; void *dmr_ctxt; - pthread_priority_t dmr_priority; + mach_port_t dmr_reply; + dispatch_priority_t dmr_priority; voucher_t dmr_voucher; TAILQ_ENTRY(dispatch_mach_reply_refs_s) dmr_list; }; typedef struct dispatch_mach_reply_refs_s *dispatch_mach_reply_refs_t; +#define _DISPATCH_MACH_STATE_UNUSED_MASK_2 0xff00000000000000ull +#define DISPATCH_MACH_STATE_OVERRIDE_MASK 0x00ffff0000000000ull +#define _DISPATCH_MACH_STATE_UNUSED_MASK_1 0x000000f000000000ull +#define DISPATCH_MACH_STATE_DIRTY 0x0000000800000000ull +#define DISPATCH_MACH_STATE_RECEIVED_OVERRIDE 0x0000000400000000ull +#define _DISPATCH_MACH_STATE_UNUSED_MASK_0 0x0000000200000000ull +#define DISPATCH_MACH_STATE_PENDING_BARRIER 0x0000000100000000ull +#define DISPATCH_MACH_STATE_UNLOCK_MASK 0x00000000ffffffffull + struct dispatch_mach_send_refs_s { TAILQ_ENTRY(dispatch_mach_send_refs_s) dr_list; uintptr_t dr_source_wref; // "weak" backref to dispatch_mach_t dispatch_mach_msg_t dm_checkin; TAILQ_HEAD(, dispatch_mach_reply_refs_s) dm_replies; + dispatch_unfair_lock_s dm_replies_lock; +#define DISPATCH_MACH_DISCONNECT_MAGIC_BASE (0x80000000) +#define DISPATCH_MACH_NEVER_INSTALLED (DISPATCH_MACH_DISCONNECT_MAGIC_BASE + 0) +#define DISPATCH_MACH_NEVER_CONNECTED (DISPATCH_MACH_DISCONNECT_MAGIC_BASE + 1) uint32_t volatile dm_disconnect_cnt; - uint32_t volatile dm_sending; + union { + uint64_t volatile dm_state; + DISPATCH_STRUCT_LITTLE_ENDIAN_2( + dispatch_unfair_lock_s dm_state_lock, + uint32_t dm_state_bits + ); + }; unsigned int dm_needs_mgr:1; struct dispatch_object_s *volatile dm_tail; struct dispatch_object_s *volatile dm_head; @@ -237,17 +268,21 @@ struct dispatch_mach_send_refs_s { typedef struct dispatch_mach_send_refs_s *dispatch_mach_send_refs_t; DISPATCH_CLASS_DECL(mach); +#if DISPATCH_PURE_C struct dispatch_mach_s { - DISPATCH_STRUCT_HEADER(mach); - DISPATCH_QUEUE_HEADER; DISPATCH_SOURCE_HEADER(mach); dispatch_kevent_t dm_dkev; dispatch_mach_send_refs_t dm_refs; -}; +} DISPATCH_QUEUE_ALIGN; +#endif DISPATCH_CLASS_DECL(mach_msg); struct dispatch_mach_msg_s { - DISPATCH_STRUCT_HEADER(mach_msg); + DISPATCH_OBJECT_HEADER(mach_msg); + union { + mach_msg_option_t dmsg_options; + mach_error_t dmsg_error; + }; mach_port_t dmsg_reply; pthread_priority_t dmsg_priority; voucher_t dmsg_voucher; @@ -258,6 +293,9 @@ struct dispatch_mach_msg_s { char dmsg_buf[0]; }; }; +#endif // HAVE_MACH + +extern const struct dispatch_source_type_s _dispatch_source_type_after; #if TARGET_OS_EMBEDDED #define DSL_HASH_SIZE 64u // must be a power of two @@ -265,31 +303,49 @@ struct dispatch_mach_msg_s { #define DSL_HASH_SIZE 256u // must be a power of two #endif +dispatch_source_t +_dispatch_source_create_mach_msg_direct_recv(mach_port_t recvp, + const struct dispatch_continuation_s *dc); void _dispatch_source_xref_dispose(dispatch_source_t ds); void _dispatch_source_dispose(dispatch_source_t ds); -void _dispatch_source_invoke(dispatch_source_t ds, dispatch_object_t dou, - dispatch_invoke_flags_t flags); -unsigned long _dispatch_source_probe(dispatch_source_t ds); +void _dispatch_source_finalize_activation(dispatch_source_t ds); +void _dispatch_source_invoke(dispatch_source_t ds, dispatch_invoke_flags_t flags); +void _dispatch_source_wakeup(dispatch_source_t ds, pthread_priority_t pp, + dispatch_wakeup_flags_t flags); size_t _dispatch_source_debug(dispatch_source_t ds, char* buf, size_t bufsiz); void _dispatch_source_set_interval(dispatch_source_t ds, uint64_t interval); -void _dispatch_source_set_event_handler_with_context_f(dispatch_source_t ds, - void *ctxt, dispatch_function_t handler); +void _dispatch_source_set_event_handler_continuation(dispatch_source_t ds, + dispatch_continuation_t dc); +DISPATCH_EXPORT // for firehose server +void _dispatch_source_merge_data(dispatch_source_t ds, pthread_priority_t pp, + unsigned long val); +#if HAVE_MACH void _dispatch_mach_dispose(dispatch_mach_t dm); -void _dispatch_mach_invoke(dispatch_mach_t dm, dispatch_object_t dou, - dispatch_invoke_flags_t flags); -unsigned long _dispatch_mach_probe(dispatch_mach_t dm); +void _dispatch_mach_finalize_activation(dispatch_mach_t dm); +void _dispatch_mach_invoke(dispatch_mach_t dm, dispatch_invoke_flags_t flags); +void _dispatch_mach_wakeup(dispatch_mach_t dm, pthread_priority_t pp, + dispatch_wakeup_flags_t flags); size_t _dispatch_mach_debug(dispatch_mach_t dm, char* buf, size_t bufsiz); void _dispatch_mach_msg_dispose(dispatch_mach_msg_t dmsg); -void _dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg, dispatch_object_t dou, +void _dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg, dispatch_invoke_flags_t flags); -size_t _dispatch_mach_msg_debug(dispatch_mach_msg_t dmsg, char* buf, size_t bufsiz); +size_t _dispatch_mach_msg_debug(dispatch_mach_msg_t dmsg, char* buf, + size_t bufsiz); -void _dispatch_mach_barrier_invoke(void *ctxt); - -unsigned long _dispatch_mgr_wakeup(dispatch_queue_t dq); -void _dispatch_mgr_thread(dispatch_queue_t dq, dispatch_object_t dou, +void _dispatch_mach_send_barrier_drain_invoke(dispatch_continuation_t dc, + dispatch_invoke_flags_t flags); +void _dispatch_mach_barrier_invoke(dispatch_continuation_t dc, dispatch_invoke_flags_t flags); +#endif // HAVE_MACH + +void _dispatch_mgr_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, + dispatch_wakeup_flags_t flags); +void _dispatch_mgr_thread(dispatch_queue_t dq, dispatch_invoke_flags_t flags); +#if DISPATCH_USE_KEVENT_WORKQUEUE +void _dispatch_kevent_worker_thread(_dispatch_kevent_qos_s **events, + int *nevents); +#endif #endif /* __DISPATCH_SOURCE_INTERNAL__ */ diff --git a/src/swift/Block.swift b/src/swift/Block.swift new file mode 100644 index 000000000..c1266cea1 --- /dev/null +++ b/src/swift/Block.swift @@ -0,0 +1,114 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See http://swift.org/LICENSE.txt for license information +// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +import CDispatch + +public struct DispatchWorkItemFlags : OptionSet, RawRepresentable { + public let rawValue: UInt + public init(rawValue: UInt) { self.rawValue = rawValue } + + public static let barrier = DispatchWorkItemFlags(rawValue: 0x1) + + @available(OSX 10.10, iOS 8.0, *) + public static let detached = DispatchWorkItemFlags(rawValue: 0x2) + + @available(OSX 10.10, iOS 8.0, *) + public static let assignCurrentContext = DispatchWorkItemFlags(rawValue: 0x4) + + @available(OSX 10.10, iOS 8.0, *) + public static let noQoS = DispatchWorkItemFlags(rawValue: 0x8) + + @available(OSX 10.10, iOS 8.0, *) + public static let inheritQoS = DispatchWorkItemFlags(rawValue: 0x10) + + @available(OSX 10.10, iOS 8.0, *) + public static let enforceQoS = DispatchWorkItemFlags(rawValue: 0x20) +} + +@available(OSX 10.10, iOS 8.0, *) +public class DispatchWorkItem { + internal var _block: _DispatchBlock + internal var _group: DispatchGroup? + + public init(group: DispatchGroup? = nil, qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], block: @convention(block) () -> ()) { + _block = dispatch_block_create_with_qos_class(dispatch_block_flags_t(flags.rawValue), + qos.qosClass.rawValue.rawValue, Int32(qos.relativePriority), block) + } + + // Used by DispatchQueue.synchronously to provide a @noescape path through + // dispatch_block_t, as we know the lifetime of the block in question. + internal init(flags: DispatchWorkItemFlags = [], noescapeBlock: @noescape () -> ()) { + _block = _swift_dispatch_block_create_noescape(dispatch_block_flags_t(flags.rawValue), noescapeBlock) + } + + public func perform() { + if let g = _group { + g.enter() + defer { g.leave() } + } + _block() + } + + public func wait() { + _ = dispatch_block_wait(_block, DispatchTime.distantFuture.rawValue) + } + + public func wait(timeout: DispatchTime) -> DispatchTimeoutResult { + return dispatch_block_wait(_block, timeout.rawValue) == 0 ? .Success : .TimedOut + } + + public func wait(wallTimeout: DispatchWallTime) -> DispatchTimeoutResult { + return dispatch_block_wait(_block, wallTimeout.rawValue) == 0 ? .Success : .TimedOut + } + + public func notify(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], queue: DispatchQueue, execute: @convention(block) () -> Void) { + if qos != .unspecified || !flags.isEmpty { + let item = DispatchWorkItem(qos: qos, flags: flags, block: execute) + dispatch_block_notify(_block, queue.__wrapped, item._block) + } else { + dispatch_block_notify(_block, queue.__wrapped, execute) + } + } + + public func notify(queue: DispatchQueue, execute: DispatchWorkItem) { + dispatch_block_notify(_block, queue.__wrapped, execute._block) + } + + public func cancel() { + dispatch_block_cancel(_block) + } + + public var isCancelled: Bool { + return dispatch_block_testcancel(_block) != 0 + } +} + +@available(OSX 10.10, iOS 8.0, *) +public extension DispatchWorkItem { + @available(*, deprecated, renamed: "DispatchWorkItem.wait(self:wallTimeout:)") + public func wait(timeout: DispatchWallTime) -> Int { + switch wait(wallTimeout: timeout) { + case .Success: return 0 + case .TimedOut: return DispatchTimeoutResult.KERN_OPERATION_TIMED_OUT + } + } +} + +/// The dispatch_block_t typealias is different from usual closures in that it +/// uses @convention(block). This is to avoid unnecessary bridging between +/// C blocks and Swift closures, which interferes with dispatch APIs that depend +/// on the referential identity of a block. Particularly, dispatch_block_create. +internal typealias _DispatchBlock = @convention(block) () -> Void +internal typealias dispatch_block_t = @convention(block) () -> Void + +@_silgen_name("_swift_dispatch_block_create_noescape") +internal func _swift_dispatch_block_create_noescape(_ flags: dispatch_block_flags_t, _ block: @noescape () -> ()) -> _DispatchBlock diff --git a/src/swift/Data.swift b/src/swift/Data.swift new file mode 100644 index 000000000..0d21e27c0 --- /dev/null +++ b/src/swift/Data.swift @@ -0,0 +1,277 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See http://swift.org/LICENSE.txt for license information +// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +import CDispatch + +public struct DispatchData : RandomAccessCollection { + public typealias Iterator = DispatchDataIterator + public typealias Index = Int + public typealias Indices = DefaultRandomAccessIndices + + public static let empty: DispatchData = DispatchData(data: _swift_dispatch_data_empty()) + +#if false /* FIXME: dragging in _TMBO (Objective-C) */ + public enum Deallocator { + /// Use `free` + case free + + /// Use `munmap` + case unmap + + /// A custom deallocator + case custom(DispatchQueue?, @convention(block) () -> Void) + + private var _deallocator: (DispatchQueue?, @convention(block) () -> Void) { + switch self { + case .free: return (nil, _dispatch_data_destructor_free()) + case .unmap: return (nil, _dispatch_data_destructor_munmap()) + case .custom(let q, let b): return (q, b) + } + } + } +#endif + internal var __wrapped: dispatch_data_t + + /// Initialize a `Data` with copied memory content. + /// + /// - parameter bytes: A pointer to the memory. It will be copied. + /// - parameter count: The number of bytes to copy. + public init(bytes buffer: UnsafeBufferPointer) { + __wrapped = dispatch_data_create( + buffer.baseAddress!, buffer.count, nil, _dispatch_data_destructor_default()) + } +#if false /* FIXME: dragging in _TMBO (Objective-C) */ + /// Initialize a `Data` without copying the bytes. + /// + /// - parameter bytes: A pointer to the bytes. + /// - parameter count: The size of the bytes. + /// - parameter deallocator: Specifies the mechanism to free the indicated buffer. + public init(bytesNoCopy bytes: UnsafeBufferPointer, deallocator: Deallocator = .free) { + let (q, b) = deallocator._deallocator + + __wrapped = dispatch_data_create(bytes.baseAddress!, bytes.count, q?.__wrapped, b) + } +#endif + internal init(data: dispatch_data_t) { + __wrapped = data + } + + public var count: Int { + return CDispatch.dispatch_data_get_size(__wrapped) + } + + public func withUnsafeBytes( + body: @noescape (UnsafePointer) throws -> Result) rethrows -> Result + { + var ptr: UnsafePointer? = nil + var size = 0; + let data = CDispatch.dispatch_data_create_map(__wrapped, &ptr, &size) + defer { _fixLifetime(data) } + return try body(UnsafePointer(ptr!)) + } + + public func enumerateBytes( + block: @noescape (buffer: UnsafeBufferPointer, byteIndex: Int, stop: inout Bool) -> Void) + { + _swift_dispatch_data_apply(__wrapped) { (data: dispatch_data_t, offset: Int, ptr: UnsafePointer, size: Int) in + let bp = UnsafeBufferPointer(start: UnsafePointer(ptr), count: size) + var stop = false + block(buffer: bp, byteIndex: offset, stop: &stop) + return !stop + } + } + + /// Append bytes to the data. + /// + /// - parameter bytes: A pointer to the bytes to copy in to the data. + /// - parameter count: The number of bytes to copy. + public mutating func append(_ bytes: UnsafePointer, count: Int) { + let data = dispatch_data_create(bytes, count, nil, _dispatch_data_destructor_default()) + self.append(DispatchData(data: data)) + } + + /// Append data to the data. + /// + /// - parameter data: The data to append to this data. + public mutating func append(_ other: DispatchData) { + let data = CDispatch.dispatch_data_create_concat(__wrapped, other.__wrapped) + __wrapped = data + } + + /// Append a buffer of bytes to the data. + /// + /// - parameter buffer: The buffer of bytes to append. The size is calculated from `SourceType` and `buffer.count`. + public mutating func append(_ buffer : UnsafeBufferPointer) { + self.append(UnsafePointer(buffer.baseAddress!), count: buffer.count * sizeof(SourceType.self)) + } + + private func _copyBytesHelper(to pointer: UnsafeMutablePointer, from range: CountableRange) { + var copiedCount = 0 + _ = CDispatch.dispatch_data_apply(__wrapped) { (data: dispatch_data_t, offset: Int, ptr: UnsafePointer, size: Int) in + let limit = Swift.min((range.endIndex - range.startIndex) - copiedCount, size) + memcpy(pointer + copiedCount, ptr, limit) + copiedCount += limit + return copiedCount < (range.endIndex - range.startIndex) + } + } + + /// Copy the contents of the data to a pointer. + /// + /// - parameter pointer: A pointer to the buffer you wish to copy the bytes into. + /// - parameter count: The number of bytes to copy. + /// - warning: This method does not verify that the contents at pointer have enough space to hold `count` bytes. + public func copyBytes(to pointer: UnsafeMutablePointer, count: Int) { + _copyBytesHelper(to: pointer, from: 0.., from range: CountableRange) { + _copyBytesHelper(to: pointer, from: range) + } + + /// Copy the contents of the data into a buffer. + /// + /// This function copies the bytes in `range` from the data into the buffer. If the count of the `range` is greater than `sizeof(DestinationType) * buffer.count` then the first N bytes will be copied into the buffer. + /// - precondition: The range must be within the bounds of the data. Otherwise `fatalError` is called. + /// - parameter buffer: A buffer to copy the data into. + /// - parameter range: A range in the data to copy into the buffer. If the range is empty, this function will return 0 without copying anything. If the range is nil, as much data as will fit into `buffer` is copied. + /// - returns: Number of bytes copied into the destination buffer. + public func copyBytes(to buffer: UnsafeMutableBufferPointer, from range: CountableRange? = nil) -> Int { + let cnt = count + guard cnt > 0 else { return 0 } + + let copyRange : CountableRange + if let r = range { + guard !r.isEmpty else { return 0 } + precondition(r.startIndex >= 0) + precondition(r.startIndex < cnt, "The range is outside the bounds of the data") + + precondition(r.endIndex >= 0) + precondition(r.endIndex <= cnt, "The range is outside the bounds of the data") + + copyRange = r.startIndex..<(r.startIndex + Swift.min(buffer.count * sizeof(DestinationType.self), r.count)) + } else { + copyRange = 0.. = UnsafeMutablePointer(buffer.baseAddress!) + _copyBytesHelper(to: pointer, from: copyRange) + return copyRange.count + } + + /// Sets or returns the byte at the specified index. + public subscript(index: Index) -> UInt8 { + var offset = 0 + let subdata = CDispatch.dispatch_data_copy_region(__wrapped, index, &offset) + + var ptr: UnsafePointer? = nil + var size = 0 + let map = CDispatch.dispatch_data_create_map(subdata, &ptr, &size) + defer { _fixLifetime(map) } + + let pptr = UnsafePointer(ptr!) + return pptr[index - offset] + } + + public subscript(bounds: Range) -> RandomAccessSlice { + return RandomAccessSlice(base: self, bounds: bounds) + } + + /// Return a new copy of the data in a specified range. + /// + /// - parameter range: The range to copy. + public func subdata(in range: CountableRange) -> DispatchData { + let subrange = CDispatch.dispatch_data_create_subrange( + __wrapped, range.startIndex, range.endIndex - range.startIndex) + return DispatchData(data: subrange) + } + + public func region(location: Int) -> (data: DispatchData, offset: Int) { + var offset: Int = 0 + let data = CDispatch.dispatch_data_copy_region(__wrapped, location, &offset) + return (DispatchData(data: data), offset) + } + + public var startIndex: Index { + return 0 + } + + public var endIndex: Index { + return count + } + + public func index(before i: Index) -> Index { + return i - 1 + } + + public func index(after i: Index) -> Index { + return i + 1 + } + + /// An iterator over the contents of the data. + /// + /// The iterator will increment byte-by-byte. + public func makeIterator() -> DispatchData.Iterator { + return DispatchDataIterator(_data: self) + } +} + +public struct DispatchDataIterator : IteratorProtocol, Sequence { + + /// Create an iterator over the given DisaptchData + public init(_data: DispatchData) { + var ptr: UnsafePointer? + self._count = 0 + self._data = CDispatch.dispatch_data_create_map(_data.__wrapped, &ptr, &self._count) + self._ptr = UnsafePointer(ptr!) + self._position = _data.startIndex + } + + /// Advance to the next element and return it, or `nil` if no next + /// element exists. + /// + /// - Precondition: No preceding call to `self.next()` has returned `nil`. + public mutating func next() -> DispatchData._Element? { + if _position == _count { return nil } + let element = _ptr[_position]; + _position = _position + 1 + return element + } + + internal let _data: dispatch_data_t + internal var _ptr: UnsafePointer + internal var _count: Int + internal var _position: DispatchData.Index +} + +typealias _swift_data_applier = @convention(block) @noescape (dispatch_data_t, Int, UnsafePointer, Int) -> Bool + +@_silgen_name("_swift_dispatch_data_apply") +internal func _swift_dispatch_data_apply(_ data: dispatch_data_t, _ block: _swift_data_applier) + +@_silgen_name("_swift_dispatch_data_empty") +internal func _swift_dispatch_data_empty() -> dispatch_data_t + +@_silgen_name("_swift_dispatch_data_destructor_free") +internal func _dispatch_data_destructor_free() -> _DispatchBlock + +@_silgen_name("_swift_dispatch_data_destructor_munmap") +internal func _dispatch_data_destructor_munmap() -> _DispatchBlock + +@_silgen_name("_swift_dispatch_data_destructor_default") +internal func _dispatch_data_destructor_default() -> _DispatchBlock diff --git a/src/swift/Dispatch.apinotes b/src/swift/Dispatch.apinotes new file mode 100644 index 000000000..6e804515a --- /dev/null +++ b/src/swift/Dispatch.apinotes @@ -0,0 +1,328 @@ +--- +Name: Dispatch +Typedefs: +- Name: dispatch_object_t + Availability: nonswift +- Name: dispatch_block_t + Availability: nonswift +- Name: dispatch_queue_t + Availability: nonswift +- Name: dispatch_semaphore_t + Availability: nonswift +- Name: dispatch_io_t + Availability: nonswift +- Name: dispatch_data_t + Availability: nonswift +- Name: dispatch_group_t + Availability: nonswift +- Name: dispatch_qos_class_t + Availability: nonswift +- Name: dispatch_data_applier_t + Availability: nonswift +- Name: dispatch_fd_t + Availability: nonswift +- Name: dispatch_io_handler_t + Availability: nonswift +- Name: dispatch_source_t + Availability: nonswift +- Name: dispatch_function_t + Availability: nonswift +- Name: dispatch_io_close_flags_t + Availability: nonswift +- Name: dispatch_io_interval_flags_t + Availability: nonswift +- Name: dispatch_io_type_t + Availability: nonswift +- Name: dispatch_source_timer_flags_t + Availability: nonswift +- Name: dispatch_autorelease_frequency_t + SwiftPrivate: true +- Name: dispatch_queue_attr_t + Availability: nonswift +- Name: dispatch_queue_priority_t + Availability: nonswift +- Name: dispatch_block_flags_t + SwiftPrivate: true +- Name: dispatch_source_type_t + SwiftPrivate: true +- Name: dispatch_source_mach_send_flags_t + Availability: nonswift +- Name: dispatch_source_memorypressure_flags_t + Availability: nonswift +- Name: dispatch_source_proc_flags_t + Availability: nonswift +- Name: dispatch_source_vnode_flags_t + Availability: nonswift +Classes: +- Name: OS_dispatch_object + SwiftName: DispatchObject +- Name: OS_dispatch_queue + SwiftName: DispatchQueue +- Name: OS_dispatch_io + SwiftName: DispatchIO +- Name: OS_dispatch_semaphore + SwiftName: DispatchSemaphore +- Name: OS_dispatch_group + SwiftName: DispatchGroup +- Name: OS_dispatch_source + SwiftName: DispatchSource +- Name: OS_dispatch_queue_attr + SwiftPrivate: true +- Name: OS_dispatch_data + SwiftName: __DispatchData +Protocols: +- Name: OS_dispatch_source + SwiftName: DispatchSourceType +- Name: OS_dispatch_source_mach_send + SwiftName: DispatchSourceMachSend +- Name: OS_dispatch_source_mach_recv + SwiftName: DispatchSourceMachReceive +- Name: OS_dispatch_source_memorypressure + SwiftName: DispatchSourceMemoryPressure +- Name: OS_dispatch_source_proc + SwiftName: DispatchSourceProcess +- Name: OS_dispatch_source_read + SwiftName: DispatchSourceRead +- Name: OS_dispatch_source_signal + SwiftName: DispatchSourceSignal +- Name: OS_dispatch_source_timer + SwiftName: DispatchSourceTimer +- Name: OS_dispatch_source_data_or + SwiftName: DispatchSourceUserDataOr +- Name: OS_dispatch_source_data_add + SwiftName: DispatchSourceUserDataAdd +- Name: OS_dispatch_source_vnode + SwiftName: DispatchSourceFileSystemObject +- Name: OS_dispatch_source_write + SwiftName: DispatchSourceWrite +Functions: +- Name: dispatch_release + Availability: nonswift +- Name: dispatch_retain + Availability: nonswift +# dispatch_queue_t +- Name: dispatch_queue_create + SwiftName: 'DispatchQueue.init(__label:attr:)' + SwiftPrivate: true +- Name: dispatch_get_global_queue + SwiftPrivate: true +- Name: dispatch_queue_create_with_target + SwiftName: 'DispatchQueue.init(__label:attr:queue:)' + SwiftPrivate: true +- Name: dispatch_assert_queue + SwiftPrivate: true +- Name: dispatch_assert_queue_barrier + SwiftPrivate: true +- Name: dispatch_assert_queue_not + SwiftPrivate: true +- Name: dispatch_async + SwiftPrivate: true +- Name: dispatch_async_f + Availability: nonswift +- Name: dispatch_barrier_async + SwiftPrivate: true +- Name: dispatch_barrier_async_f + Availability: nonswift +- Name: dispatch_apply + SwiftPrivate: true +- Name: dispatch_apply_f + Availability: nonswift +- Name: dispatch_sync + SwiftName: 'DispatchQueue.sync(self:execute:)' +- Name: dispatch_sync_f + Availability: nonswift +- Name: dispatch_barrier_sync + SwiftPrivate: true +- Name: dispatch_barrier_sync_f + Availability: nonswift +- Name: dispatch_queue_get_label + SwiftPrivate: true +- Name: dispatch_queue_get_qos_class + SwiftPrivate: true +- Name: dispatch_after + SwiftPrivate: true +- Name: dispatch_after_f + Availability: nonswift +- Name: dispatch_queue_get_specific + SwiftPrivate: true +- Name: dispatch_queue_set_specific + SwiftPrivate: true +- Name: dispatch_get_specific + SwiftPrivate: true +- Name: dispatch_get_main_queue + Availability: nonswift +- Name: dispatch_queue_attr_make_initially_inactive + SwiftPrivate: true +- Name: dispatch_queue_attr_make_with_autorelease_frequency + SwiftPrivate: true +- Name: dispatch_queue_attr_make_with_qos_class + SwiftPrivate: true +# dispatch_object_t +- Name: dispatch_set_target_queue + SwiftName: 'DispatchObject.setTarget(self:queue:)' +- Name: dispatch_activate + SwiftName: 'DispatchObject.activate(self:)' +- Name: dispatch_suspend + SwiftName: 'DispatchObject.suspend(self:)' +- Name: dispatch_resume + SwiftName: 'DispatchObject.resume(self:)' +- Name: dispatch_set_finalizer_f + Availability: nonswift +- Name: dispatch_get_context + Availability: nonswift +- Name: dispatch_set_context + Availability: nonswift +- Name: _dispatch_object_validate + Availability: nonswift +# dispatch_block +- Name: dispatch_block_create + Availability: nonswift + AvailabilityMsg: 'Use DispatchWorkItem()' +- Name: dispatch_block_create_with_qos_class + Availability: nonswift + AvailabilityMsg: 'Use DispatchWorkItem()' +- Name: dispatch_block_perform + Availability: nonswift + AvailabilityMsg: 'Use DispatchWorkItem.perform()' +- Name: dispatch_block_wait + Availability: nonswift + AvailabilityMsg: 'Use DispatchWorkItem.wait(timeout:)' +- Name: dispatch_block_notify + Availability: nonswift + AvailabilityMsg: 'Use DispatchWorkItem.notify(queue:execute:)' +- Name: dispatch_block_cancel + Availability: nonswift + AvailabilityMsg: 'Use DispatchWorkItem.cancel()' +- Name: dispatch_block_testcancel + Availability: nonswift + AvailabilityMsg: 'Use DispatchWorkItem.isCancelled' +# dispatch_data +- Name: dispatch_data_create + SwiftPrivate: true +- Name: dispatch_data_get_size + SwiftPrivate: true +- Name: dispatch_data_apply + SwiftPrivate: true +- Name: dispatch_data_create_concat + SwiftPrivate: true +- Name: dispatch_data_create_subrange + SwiftPrivate: true +- Name: dispatch_data_copy_region + SwiftPrivate: true +- Name: dispatch_data_create_map + SwiftPrivate: true +# dispatch_group_t +- Name: dispatch_group_create + SwiftName: 'DispatchGroup.init()' + Availability: available +- Name: dispatch_group_async + SwiftPrivate: true +- Name: dispatch_group_async_f + Availability: nonswift +- Name: dispatch_group_wait + SwiftPrivate: true +- Name: dispatch_group_notify + SwiftPrivate: true +- Name: dispatch_group_notify_f + Availability: nonswift +- Name: dispatch_group_enter + SwiftName: 'DispatchGroup.enter(self:)' +- Name: dispatch_group_leave + SwiftName: 'DispatchGroup.leave(self:)' +# dispatch_io +- Name: dispatch_io_create + SwiftPrivate: true + SwiftName: 'DispatchIO.init(__type:fd:queue:handler:)' +- Name: dispatch_io_create_with_path + SwiftPrivate: true + SwiftName: 'DispatchIO.init(__type:path:oflag:mode:queue:handler:)' +- Name: dispatch_io_create_with_io + SwiftPrivate: true + SwiftName: 'DispatchIO.init(__type:io:queue:handler:)' +- Name: dispatch_io_read + SwiftPrivate: true +- Name: dispatch_io_write + SwiftPrivate: true +- Name: dispatch_io_close + SwiftPrivate: true +- Name: dispatch_io_barrier + SwiftName: 'DispatchIO.barrier(self:execute:)' +- Name: dispatch_io_get_descriptor + SwiftName: 'getter:DispatchIO.fileDescriptor(self:)' +- Name: dispatch_io_set_high_water + SwiftName: 'DispatchIO.setLimit(self:highWater:)' +- Name: dispatch_io_set_low_water + SwiftName: 'DispatchIO.setLimit(self:lowWater:)' +- Name: dispatch_io_set_interval + SwiftPrivate: true +- Name: dispatch_read + SwiftPrivate: true +- Name: dispatch_write + SwiftPrivate: true +# dispatch_semaphore +- Name: dispatch_semaphore_create + SwiftName: 'DispatchSemaphore.init(value:)' +- Name: dispatch_semaphore_wait + SwiftPrivate: true +- Name: dispatch_semaphore_signal + SwiftPrivate: true +# dispatch_source +- Name: dispatch_source_create + SwiftPrivate: true +- Name: dispatch_source_get_handle + SwiftPrivate: true +- Name: dispatch_source_get_mask + SwiftPrivate: true +- Name: dispatch_source_get_data + SwiftPrivate: true +- Name: dispatch_source_merge_data + SwiftPrivate: true +- Name: dispatch_source_set_event_handler + SwiftPrivate: true +- Name: dispatch_source_set_event_handler_f + Availability: nonswift +- Name: dispatch_source_set_cancel_handler + SwiftPrivate: true +- Name: dispatch_source_set_cancel_handler_f + Availability: nonswift +- Name: dispatch_source_set_registration_handler + SwiftPrivate: true +- Name: dispatch_source_set_registration_handler_f + Availability: nonswift +- Name: dispatch_source_cancel + SwiftPrivate: true +- Name: dispatch_source_testcancel + SwiftPrivate: true +- Name: dispatch_source_set_timer + SwiftPrivate: true +# dispatch_time +- Name: dispatch_time + SwiftPrivate: true +- Name: dispatch_walltime + SwiftPrivate: true +- Name: dispatch_main + SwiftName: 'dispatchMain()' +Globals: +- Name: _dispatch_data_destructor_free + Availability: nonswift +- Name: _dispatch_data_destructor_munmap + Availability: nonswift +Enumerators: +- Name: DISPATCH_BLOCK_BARRIER + Availability: nonswift +- Name: DISPATCH_BLOCK_DETACHED + Availability: nonswift +- Name: DISPATCH_BLOCK_ASSIGN_CURRENT + Availability: nonswift +- Name: DISPATCH_BLOCK_NO_QOS_CLASS + Availability: nonswift +- Name: DISPATCH_BLOCK_INHERIT_QOS_CLASS + Availability: nonswift +- Name: DISPATCH_BLOCK_ENFORCE_QOS_CLASS + Availability: nonswift +- Name: DISPATCH_AUTORELEASE_FREQUENCY_INHERIT + Availability: nonswift +- Name: DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM + Availability: nonswift +- Name: DISPATCH_AUTORELEASE_FREQUENCY_NEVER + Availability: nonswift diff --git a/src/swift/Dispatch.swift b/src/swift/Dispatch.swift new file mode 100644 index 000000000..2b9cb2164 --- /dev/null +++ b/src/swift/Dispatch.swift @@ -0,0 +1,211 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See http://swift.org/LICENSE.txt for license information +// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +@_exported import Dispatch + +import CDispatch + +/// dispatch_assert + +@available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) +public enum DispatchPredicate { + case onQueue(DispatchQueue) + case onQueueAsBarrier(DispatchQueue) + case notOnQueue(DispatchQueue) +} + +@available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) +public func _dispatchPreconditionTest(_ condition: DispatchPredicate) -> Bool { + switch condition { + case .onQueue(let q): + dispatch_assert_queue(q.__wrapped) + case .onQueueAsBarrier(let q): + dispatch_assert_queue_barrier(q.__wrapped) + case .notOnQueue(let q): + dispatch_assert_queue_not(q.__wrapped) + } + return true +} + +@_transparent +@available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) +public func dispatchPrecondition(condition: @autoclosure () -> DispatchPredicate) { + // precondition is able to determine release-vs-debug asserts where the overlay + // cannot, so formulating this into a call that we can call with precondition() + precondition(_dispatchPreconditionTest(condition()), "dispatchPrecondition failure") +} + +/// qos_class_t + +public struct DispatchQoS : Equatable { + public let qosClass: QoSClass + public let relativePriority: Int + + @available(OSX 10.10, iOS 8.0, *) + public static let background = DispatchQoS(qosClass: .background, relativePriority: 0) + + @available(OSX 10.10, iOS 8.0, *) + public static let utility = DispatchQoS(qosClass: .utility, relativePriority: 0) + + @available(OSX 10.10, iOS 8.0, *) + public static let `default` = DispatchQoS(qosClass: .default, relativePriority: 0) + + @available(OSX, introduced: 10.10, deprecated: 10.10, renamed: "DispatchQoS.default") + @available(iOS, introduced: 8.0, deprecated: 8.0, renamed: "DispatchQoS.default") + @available(*, deprecated, renamed: "DispatchQoS.default") + public static let defaultQoS = DispatchQoS.default + + @available(OSX 10.10, iOS 8.0, *) + public static let userInitiated = DispatchQoS(qosClass: .userInitiated, relativePriority: 0) + + @available(OSX 10.10, iOS 8.0, *) + public static let userInteractive = DispatchQoS(qosClass: .userInteractive, relativePriority: 0) + + public static let unspecified = DispatchQoS(qosClass: .unspecified, relativePriority: 0) + + public enum QoSClass { + @available(OSX 10.10, iOS 8.0, *) + case background + + @available(OSX 10.10, iOS 8.0, *) + case utility + + @available(OSX 10.10, iOS 8.0, *) + case `default` + + @available(OSX, introduced: 10.10, deprecated: 10.10, renamed: "QoSClass.default") + @available(iOS, introduced: 8.0, deprecated: 8.0, renamed: "QoSClass.default") + @available(*, deprecated, renamed: "QoSClass.default") + static let defaultQoS = QoSClass.default + + @available(OSX 10.10, iOS 8.0, *) + case userInitiated + + @available(OSX 10.10, iOS 8.0, *) + case userInteractive + + case unspecified + + @available(OSX 10.10, iOS 8.0, *) + internal init?(qosClass: _OSQoSClass) { + switch qosClass { + case .QOS_CLASS_BACKGROUND: self = .background + case .QOS_CLASS_UTILITY: self = .utility + case .QOS_CLASS_DEFAULT: self = .default + case .QOS_CLASS_USER_INITIATED: self = .userInitiated + case .QOS_CLASS_USER_INTERACTIVE: self = .userInteractive + case .QOS_CLASS_UNSPECIFIED: self = .unspecified + default: return nil + } + } + + @available(OSX 10.10, iOS 8.0, *) + internal var rawValue: _OSQoSClass { + switch self { + case .background: return .QOS_CLASS_BACKGROUND + case .utility: return .QOS_CLASS_UTILITY + case .default: return .QOS_CLASS_DEFAULT + case .userInitiated: return .QOS_CLASS_USER_INITIATED + case .userInteractive: return .QOS_CLASS_USER_INTERACTIVE + case .unspecified: return .QOS_CLASS_UNSPECIFIED + } + } + } + + public init(qosClass: QoSClass, relativePriority: Int) { + self.qosClass = qosClass + self.relativePriority = relativePriority + } +} + +public func ==(a: DispatchQoS, b: DispatchQoS) -> Bool { + return a.qosClass == b.qosClass && a.relativePriority == b.relativePriority +} + +/// + +public enum DispatchTimeoutResult { + static let KERN_OPERATION_TIMED_OUT:Int = 49 + case Success + case TimedOut +} + +/// dispatch_group + +public extension DispatchGroup { + public func notify(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], queue: DispatchQueue, execute work: @convention(block) () -> ()) { + if #available(OSX 10.10, iOS 8.0, *), qos != .unspecified || !flags.isEmpty { + let item = DispatchWorkItem(qos: qos, flags: flags, block: work) + dispatch_group_notify(self.__wrapped, queue.__wrapped, item._block) + } else { + dispatch_group_notify(self.__wrapped, queue.__wrapped, work) + } + } + + @available(OSX 10.10, iOS 8.0, *) + public func notify(queue: DispatchQueue, work: DispatchWorkItem) { + dispatch_group_notify(self.__wrapped, queue.__wrapped, work._block) + } + + public func wait() { + _ = dispatch_group_wait(self.__wrapped, DispatchTime.distantFuture.rawValue) + } + + public func wait(timeout: DispatchTime) -> DispatchTimeoutResult { + return dispatch_group_wait(self.__wrapped, timeout.rawValue) == 0 ? .Success : .TimedOut + } + + public func wait(wallTimeout timeout: DispatchWallTime) -> DispatchTimeoutResult { + return dispatch_group_wait(self.__wrapped, timeout.rawValue) == 0 ? .Success : .TimedOut + } +} + +public extension DispatchGroup { + @available(*, deprecated, renamed: "DispatchGroup.wait(self:wallTimeout:)") + public func wait(walltime timeout: DispatchWallTime) -> Int { + switch wait(wallTimeout: timeout) { + case .Success: return 0 + case .TimedOut: return DispatchTimeoutResult.KERN_OPERATION_TIMED_OUT + } + } +} + +/// dispatch_semaphore + +public extension DispatchSemaphore { + @discardableResult + public func signal() -> Int { + return dispatch_semaphore_signal(self.__wrapped) + } + + public func wait() { + _ = dispatch_semaphore_wait(self.__wrapped, DispatchTime.distantFuture.rawValue) + } + + public func wait(timeout: DispatchTime) -> DispatchTimeoutResult { + return dispatch_semaphore_wait(self.__wrapped, timeout.rawValue) == 0 ? .Success : .TimedOut + } + + public func wait(wallTimeout: DispatchWallTime) -> DispatchTimeoutResult { + return dispatch_semaphore_wait(self.__wrapped, wallTimeout.rawValue) == 0 ? .Success : .TimedOut + } +} + +public extension DispatchSemaphore { + @available(*, deprecated, renamed: "DispatchSemaphore.wait(self:wallTimeout:)") + public func wait(walltime timeout: DispatchWalltime) -> Int { + switch wait(wallTimeout: timeout) { + case .Success: return 0 + case .TimedOut: return DispatchTimeoutResult.KERN_OPERATION_TIMED_OUT + } + } +} diff --git a/src/swift/DispatchStubs.cc b/src/swift/DispatchStubs.cc new file mode 100644 index 000000000..1e5ec74f7 --- /dev/null +++ b/src/swift/DispatchStubs.cc @@ -0,0 +1,207 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See http://swift.org/LICENSE.txt for license information +// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +#include +#include + +#define DISPATCH_RUNTIME_STDLIB_INTERFACE __attribute__((__visibility__("default"))) + +#if USE_OBJC +@protocol OS_dispatch_source; +@protocol OS_dispatch_source_mach_send; +@protocol OS_dispatch_source_mach_recv; +@protocol OS_dispatch_source_memorypressure; +@protocol OS_dispatch_source_proc; +@protocol OS_dispatch_source_read; +@protocol OS_dispatch_source_signal; +@protocol OS_dispatch_source_timer; +@protocol OS_dispatch_source_data_add; +@protocol OS_dispatch_source_data_or; +@protocol OS_dispatch_source_vnode; +@protocol OS_dispatch_source_write; + +// #include +__attribute__((constructor)) +static void _dispatch_overlay_constructor() { + Class source = objc_lookUpClass("OS_dispatch_source"); + if (source) { + class_addProtocol(source, @protocol(OS_dispatch_source)); + class_addProtocol(source, @protocol(OS_dispatch_source_mach_send)); + class_addProtocol(source, @protocol(OS_dispatch_source_mach_recv)); + class_addProtocol(source, @protocol(OS_dispatch_source_memorypressure)); + class_addProtocol(source, @protocol(OS_dispatch_source_proc)); + class_addProtocol(source, @protocol(OS_dispatch_source_read)); + class_addProtocol(source, @protocol(OS_dispatch_source_signal)); + class_addProtocol(source, @protocol(OS_dispatch_source_timer)); + class_addProtocol(source, @protocol(OS_dispatch_source_data_add)); + class_addProtocol(source, @protocol(OS_dispatch_source_data_or)); + class_addProtocol(source, @protocol(OS_dispatch_source_vnode)); + class_addProtocol(source, @protocol(OS_dispatch_source_write)); + } +} + +#endif /* USE_OBJC */ + +#if 0 /* FIXME -- adding directory to include path may need build-script plumbing to do properly... */ +#include "swift/Runtime/Config.h" +#else +#define SWIFT_CC(x) /* FIXME!! */ +#endif + +SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE +extern "C" dispatch_queue_attr_t +_swift_dispatch_queue_concurrent(void) { + return DISPATCH_QUEUE_CONCURRENT; +} + +SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE +extern "C" void +_swift_dispatch_apply_current(size_t iterations, __attribute__((__noescape__)) void (^block)(size_t)) { + dispatch_apply(iterations, (dispatch_queue_t _Nonnull)0, block); +} + +SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE +extern "C" dispatch_queue_t +_swift_dispatch_get_main_queue(void) { + return dispatch_get_main_queue(); +} + +SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE +extern "C" dispatch_data_t +_swift_dispatch_data_empty(void) { + return dispatch_data_empty; +} + +SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE +extern "C" dispatch_block_t +_swift_dispatch_data_destructor_default(void) { + return DISPATCH_DATA_DESTRUCTOR_DEFAULT; +} + +SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE +extern "C" dispatch_block_t +_swift_dispatch_data_destructor_free(void) { + return _dispatch_data_destructor_free; +} + +SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE +extern "C" dispatch_block_t +_swift_dispatch_data_destructor_munmap(void) { + return _dispatch_data_destructor_munmap; +} + +SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE +extern "C" dispatch_block_t +_swift_dispatch_block_create_with_qos_class(dispatch_block_flags_t flags, dispatch_qos_class_t qos, int relative_priority, dispatch_block_t block) { + return dispatch_block_create_with_qos_class(flags, qos, relative_priority, block); +} + +SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE +extern "C" dispatch_block_t +_swift_dispatch_block_create_noescape(dispatch_block_flags_t flags, dispatch_block_t block) { + return dispatch_block_create(flags, block); +} + +SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE +extern "C" void +_swift_dispatch_block_cancel(dispatch_block_t block) { + dispatch_block_cancel(block); +} + +SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE +extern "C" long +_swift_dispatch_block_wait(dispatch_block_t block, dispatch_time_t timeout) { + return dispatch_block_wait(block, timeout); +} + +SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE +extern "C" void +_swift_dispatch_block_notify(dispatch_block_t block, dispatch_queue_t queue, dispatch_block_t notification_block) { + dispatch_block_notify(block, queue, notification_block); +} + +SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE +extern "C" long +_swift_dispatch_block_testcancel(dispatch_block_t block) { + return dispatch_block_testcancel(block); +} + +SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE +extern "C" bool +_swift_dispatch_data_apply(dispatch_data_t data, bool (^applier)(dispatch_data_t, size_t, const void *, size_t)) { + return dispatch_data_apply(data, applier); +} + +SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE +extern "C" void +_swift_dispatch_async(dispatch_queue_t queue, dispatch_block_t block) { + dispatch_async(queue, block); +} + +SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE +extern "C" void +_swift_dispatch_group_async(dispatch_group_t group, dispatch_queue_t queue, dispatch_block_t block) { + dispatch_group_async(group, queue, block); +} + +SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE +extern "C" void +_swift_dispatch_sync(dispatch_queue_t queue, dispatch_block_t block) { + dispatch_sync(queue, block); +} + +SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE +extern "C" void +_swift_dispatch_release(dispatch_object_t obj) { + dispatch_release(obj); +} + +// DISPATCH_RUNTIME_STDLIB_INTERFACE +// extern "C" dispatch_queue_t +// _swift_apply_current_root_queue() { +// return DISPATCH_APPLY_CURRENT_ROOT_QUEUE; +// } + +#define SOURCE(t) \ + SWIFT_CC(swift) \ + DISPATCH_RUNTIME_STDLIB_INTERFACE extern "C" dispatch_source_type_t \ + _swift_dispatch_source_type_##t(void) { \ + return DISPATCH_SOURCE_TYPE_##t; \ + } + +SOURCE(DATA_ADD) +SOURCE(DATA_OR) +#if HAVE_MACH +SOURCE(MACH_SEND) +SOURCE(MACH_RECV) +SOURCE(MEMORYPRESSURE) +#endif +#ifndef __linux__ +SOURCE(PROC) +#endif +SOURCE(READ) +SOURCE(SIGNAL) +SOURCE(TIMER) +#ifndef __linux__ +SOURCE(VNODE) +#endif +SOURCE(WRITE) + +// See comment in CFFuntime.c explaining why objc_retainAutoreleasedReturnValue is needed. +extern "C" void swift_release(void *); +extern "C" void * objc_retainAutoreleasedReturnValue(void *obj) { + if (obj) { + swift_release(obj); + return obj; + } + else return NULL; +} diff --git a/src/swift/IO.swift b/src/swift/IO.swift new file mode 100644 index 000000000..6e6b6692e --- /dev/null +++ b/src/swift/IO.swift @@ -0,0 +1,129 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See http://swift.org/LICENSE.txt for license information +// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +import CDispatch + +public extension DispatchIO { + + public enum StreamType : UInt { + case stream = 0 + case random = 1 + } + + public struct CloseFlags : OptionSet, RawRepresentable { + public let rawValue: UInt + public init(rawValue: UInt) { self.rawValue = rawValue } + + public static let stop = CloseFlags(rawValue: 1) + } + + public struct IntervalFlags : OptionSet, RawRepresentable { + public let rawValue: UInt + public init(rawValue: UInt) { self.rawValue = rawValue } + public init(nilLiteral: ()) { self.rawValue = 0 } + + public static let strictInterval = IntervalFlags(rawValue: 1) + } + + public class func read(fromFileDescriptor: Int32, maxLength: Int, runningHandlerOn queue: DispatchQueue, handler: (data: DispatchData, error: Int32) -> Void) { + dispatch_read(fromFileDescriptor, maxLength, queue.__wrapped) { (data: dispatch_data_t, error: Int32) in + handler(data: DispatchData(data: data), error: error) + } + } + + public class func write(fromFileDescriptor: Int32, data: DispatchData, runningHandlerOn queue: DispatchQueue, handler: (data: DispatchData?, error: Int32) -> Void) { + dispatch_write(fromFileDescriptor, data.__wrapped, queue.__wrapped) { (data: dispatch_data_t?, error: Int32) in + handler(data: data.flatMap { DispatchData(data: $0) }, error: error) + } + } + + public convenience init( + type: StreamType, + fileDescriptor: Int32, + queue: DispatchQueue, + cleanupHandler: (error: Int32) -> Void) + { + self.init(__type: type.rawValue, fd: fileDescriptor, queue: queue, handler: cleanupHandler) + } + + public convenience init( + type: StreamType, + path: UnsafePointer, + oflag: Int32, + mode: mode_t, + queue: DispatchQueue, + cleanupHandler: (error: Int32) -> Void) + { + self.init(__type: type.rawValue, path: path, oflag: oflag, mode: mode, queue: queue, handler: cleanupHandler) + } + + public convenience init( + type: StreamType, + io: DispatchIO, + queue: DispatchQueue, + cleanupHandler: (error: Int32) -> Void) + { + self.init(__type: type.rawValue, io: io, queue: queue, handler: cleanupHandler) + } + + public func read(offset: off_t, length: Int, queue: DispatchQueue, ioHandler: (done: Bool, data: DispatchData?, error: Int32) -> Void) { + dispatch_io_read(self.__wrapped, offset, length, queue.__wrapped) { (done: Bool, data: dispatch_data_t?, error: Int32) in + ioHandler(done: done, data: data.flatMap { DispatchData(data: $0) }, error: error) + } + } + + public func write(offset: off_t, data: DispatchData, queue: DispatchQueue, ioHandler: (done: Bool, data: DispatchData?, error: Int32) -> Void) { + dispatch_io_write(self.__wrapped, offset, data.__wrapped, queue.__wrapped) { (done: Bool, data: dispatch_data_t?, error: Int32) in + ioHandler(done: done, data: data.flatMap { DispatchData(data: $0) }, error: error) + } + } + + public func setInterval(interval: DispatchTimeInterval, flags: IntervalFlags = []) { + dispatch_io_set_interval(self.__wrapped, interval.rawValue, flags.rawValue) + } + + public func close(flags: CloseFlags = []) { + dispatch_io_close(self.__wrapped, flags.rawValue) + } +} + +extension DispatchIO { + @available(*, deprecated, renamed: "DispatchIO.read(fromFileDescriptor:maxLength:runningHandlerOn:handler:)") + public class func read(fd: Int32, length: Int, queue: DispatchQueue, handler: (DispatchData, Int32) -> Void) { + DispatchIO.read(fromFileDescriptor: fd, maxLength: length, runningHandlerOn: queue, handler: handler) + } + + @available(*, deprecated, renamed: "DispatchIO.write(fromFileDescriptor:data:runningHandlerOn:handler:)") + public class func write(fd: Int32, data: DispatchData, queue: DispatchQueue, handler: (DispatchData?, Int32) -> Void) { + DispatchIO.write(fromFileDescriptor: fd, data: data, runningHandlerOn: queue, handler: handler) + } + + @available(*, deprecated, renamed: "DispatchIO.barrier(self:execute:)") + public func withBarrier(barrier work: () -> ()) { + barrier(execute: work) + } + + @available(*, deprecated, renamed: "DispatchIO.setLimit(self:highWater:)") + public func setHighWater(highWater: Int) { + setLimit(highWater: highWater) + } + + @available(*, deprecated, renamed: "DispatchIO.setLimit(self:lowWater:)") + public func setLowWater(lowWater: Int) { + setLimit(lowWater: lowWater) + } + + @available(*, deprecated, renamed: "DispatchIO.setInterval(self:interval:flags:)") + public func setInterval(interval: UInt64, flags: IntervalFlags) { + setInterval(interval: .nanoseconds(Int(interval)), flags: flags) + } +} diff --git a/src/swift/Private.swift b/src/swift/Private.swift new file mode 100644 index 000000000..e38f72861 --- /dev/null +++ b/src/swift/Private.swift @@ -0,0 +1,474 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See http://swift.org/LICENSE.txt for license information +// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +// Redeclarations of all SwiftPrivate functions with appropriate markup. + +import CDispatch + +@available(*, unavailable, renamed:"DispatchQueue.init(label:attributes:target:)") +public func dispatch_queue_create(_ label: UnsafePointer?, _ attr: dispatch_queue_attr_t?) -> DispatchQueue +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchQueue.init(label:attributes:target:)") +public func dispatch_queue_create_with_target(_ label: UnsafePointer?, _ attr: dispatch_queue_attr_t?, _ queue: DispatchQueue?) -> DispatchQueue +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchIO.init(type:fileDescriptor:queue:cleanupHandler:)") +public func dispatch_io_create(_ type: UInt, _ fd: Int32, _ queue: DispatchQueue, _ cleanup_handler: (Int32) -> Void) -> DispatchIO +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchIO.init(type:path:oflag:mode:queue:cleanupHandler:)") +public func dispatch_io_create_with_path(_ type: UInt, _ path: UnsafePointer, _ oflag: Int32, _ mode: mode_t, _ queue: DispatchQueue, _ cleanup_handler: (Int32) -> Void) -> DispatchIO +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchIO.init(type:io:queue:cleanupHandler:)") +public func dispatch_io_create_with_io(_ type: UInt, _ io: DispatchIO, _ queue: DispatchQueue, _ cleanup_handler: (Int32) -> Void) -> DispatchIO +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchIO.read(fileDescriptor:length:queue:handler:)") +public func dispatch_read(_ fd: Int32, _ length: Int, _ queue: DispatchQueue, _ handler: (dispatch_data_t, Int32) -> Void) +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchIO.read(self:offset:length:queue:ioHandler:)") +func dispatch_io_read(_ channel: DispatchIO, _ offset: off_t, _ length: Int, _ queue: DispatchQueue, _ io_handler: (Bool, dispatch_data_t?, Int32) -> Void) +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchIO.write(self:offset:data:queue:ioHandler:)") +func dispatch_io_write(_ channel: DispatchIO, _ offset: off_t, _ data: dispatch_data_t, _ queue: DispatchQueue, _ io_handler: (Bool, dispatch_data_t?, Int32) -> Void) +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchIO.write(fileDescriptor:data:queue:handler:)") +func dispatch_write(_ fd: Int32, _ data: dispatch_data_t, _ queue: DispatchQueue, _ handler: (dispatch_data_t?, Int32) -> Void) +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchData.init(bytes:)") +public func dispatch_data_create(_ buffer: UnsafePointer, _ size: Int, _ queue: DispatchQueue?, _ destructor: (() -> Void)?) -> dispatch_data_t +{ + fatalError() +} + +@available(*, unavailable, renamed:"getter:DispatchData.count(self:)") +public func dispatch_data_get_size(_ data: dispatch_data_t) -> Int +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchData.withUnsafeBytes(self:body:)") +public func dispatch_data_create_map(_ data: dispatch_data_t, _ buffer_ptr: UnsafeMutablePointer?>?, _ size_ptr: UnsafeMutablePointer?) -> dispatch_data_t +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchData.append(self:_:)") +public func dispatch_data_create_concat(_ data1: dispatch_data_t, _ data2: dispatch_data_t) -> dispatch_data_t +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchData.subdata(self:in:)") +public func dispatch_data_create_subrange(_ data: dispatch_data_t, _ offset: Int, _ length: Int) -> dispatch_data_t +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchData.enumerateBytes(self:block:)") +public func dispatch_data_apply(_ data: dispatch_data_t, _ applier: (dispatch_data_t, Int, UnsafePointer, Int) -> Bool) -> Bool +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchData.region(self:location:)") +public func dispatch_data_copy_region(_ data: dispatch_data_t, _ location: Int, _ offset_ptr: UnsafeMutablePointer) -> dispatch_data_t +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchQueue.asynchronously(self:group:qos:flags:execute:)") +public func dispatch_group_async(_ group: DispatchGroup, _ queue: DispatchQueue, _ block: () -> Void) +{ + fatalError() +} + +@available(*, unavailable, renamed: "DispatchGroup.notify(self:qos:flags:queue:execute:)") +public func dispatch_group_notify(_ group: DispatchGroup, _ queue: DispatchQueue, _ block: () -> Void) +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchGroup.wait(self:timeout:)") +public func dispatch_group_wait(_ group: DispatchGroup, _ timeout: dispatch_time_t) -> Int +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchIO.close(self:flags:)") +public func dispatch_io_close(_ channel: DispatchIO, _ flags: UInt) +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchIO.setInterval(self:interval:flags:)") +public func dispatch_io_set_interval(_ channel: DispatchIO, _ interval: UInt64, _ flags: UInt) +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchQueue.apply(attributes:iterations:execute:)") +public func dispatch_apply(_ iterations: Int, _ queue: DispatchQueue, _ block: @noescape (Int) -> Void) +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchQueue.asynchronously(self:execute:)") +public func dispatch_async(_ queue: DispatchQueue, _ block: () -> Void) +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchQueue.global(attributes:)") +public func dispatch_get_global_queue(_ identifier: Int, _ flags: UInt) -> DispatchQueue +{ + fatalError() +} + +@available(*, unavailable, renamed: "DispatchQueue.main") +public func dispatch_get_main_queue() -> DispatchQueue +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchQueueAttributes.initiallyInactive") +public func dispatch_queue_attr_make_initially_inactive(_ attr: dispatch_queue_attr_t?) -> dispatch_queue_attr_t +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchQueueAttributes.autoreleaseWorkItem") +public func dispatch_queue_attr_make_with_autorelease_frequency(_ attr: dispatch_queue_attr_t?, _ frequency: dispatch_autorelease_frequency_t) -> dispatch_queue_attr_t +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchQueueAttributes.qosUserInitiated") +public func dispatch_queue_attr_make_with_qos_class(_ attr: dispatch_queue_attr_t?, _ qos_class: dispatch_qos_class_t, _ relative_priority: Int32) -> dispatch_queue_attr_t +{ + fatalError() +} + +@available(*, unavailable, renamed:"getter:DispatchQueue.label(self:)") +public func dispatch_queue_get_label(_ queue: DispatchQueue?) -> UnsafePointer +{ + fatalError() +} + +@available(*, unavailable, renamed:"getter:DispatchQueue.qos(self:)") +public func dispatch_queue_get_qos_class(_ queue: DispatchQueue, _ relative_priority_ptr: UnsafeMutablePointer?) -> dispatch_qos_class_t +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchQueue.after(self:when:execute:)") +public func dispatch_after(_ when: dispatch_time_t, _ queue: DispatchQueue, _ block: () -> Void) +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchQueue.asynchronously(self:group:qos:flags:execute:)") +public func dispatch_barrier_async(_ queue: DispatchQueue, _ block: () -> Void) +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchQueue.synchronously(self:flags:execute:)") +public func dispatch_barrier_sync(_ queue: DispatchQueue, _ block: @noescape () -> Void) +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchQueue.setSpecific(self:key:value:)") +public func dispatch_queue_set_specific(_ queue: DispatchQueue, _ key: UnsafePointer, _ context: UnsafeMutablePointer?, _ destructor: (@convention(c) (UnsafeMutablePointer?) -> Void)?) +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchQueue.getSpecific(self:key:)") +public func dispatch_queue_get_specific(_ queue: DispatchQueue, _ key: UnsafePointer) -> UnsafeMutablePointer? +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchQueue.getSpecific(key:)") +public func dispatch_get_specific(_ key: UnsafePointer) -> UnsafeMutablePointer? +{ + fatalError() +} + +@available(*, unavailable, renamed:"dispatchPrecondition(_:)") +public func dispatch_assert_queue(_ queue: DispatchQueue) +{ + fatalError() +} + +@available(*, unavailable, renamed:"dispatchPrecondition(_:)") +public func dispatch_assert_queue_barrier(_ queue: DispatchQueue) +{ + fatalError() +} + +@available(*, unavailable, renamed:"dispatchPrecondition(_:)") +public func dispatch_assert_queue_not(_ queue: DispatchQueue) +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchSemaphore.wait(self:timeout:)") +public func dispatch_semaphore_wait(_ dsema: DispatchSemaphore, _ timeout: dispatch_time_t) -> Int +{ + fatalError() +} + +@available(*, unavailable, renamed: "DispatchSemaphore.signal(self:)") +public func dispatch_semaphore_signal(_ dsema: DispatchSemaphore) -> Int +{ + fatalError() +} + +@available(*, unavailable, message:"Use DispatchSource class methods") +public func dispatch_source_create(_ type: dispatch_source_type_t, _ handle: UInt, _ mask: UInt, _ queue: DispatchQueue?) -> DispatchSource +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchSource.setEventHandler(self:handler:)") +public func dispatch_source_set_event_handler(_ source: DispatchSource, _ handler: (() -> Void)?) +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchSource.setCancelHandler(self:handler:)") +public func dispatch_source_set_cancel_handler(_ source: DispatchSource, _ handler: (() -> Void)?) +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchSource.cancel(self:)") +public func dispatch_source_cancel(_ source: DispatchSource) +{ + fatalError() +} + +@available(*, unavailable, renamed:"getter:DispatchSource.isCancelled(self:)") +public func dispatch_source_testcancel(_ source: DispatchSource) -> Int +{ + fatalError() +} + +@available(*, unavailable, renamed:"getter:DispatchSource.handle(self:)") +public func dispatch_source_get_handle(_ source: DispatchSource) -> UInt +{ + fatalError() +} + +@available(*, unavailable, renamed:"getter:DispatchSource.mask(self:)") +public func dispatch_source_get_mask(_ source: DispatchSource) -> UInt +{ + fatalError() +} + +@available(*, unavailable, renamed:"getter:DispatchSource.data(self:)") +public func dispatch_source_get_data(_ source: DispatchSource) -> UInt +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchUserDataAdd.mergeData(self:value:)") +public func dispatch_source_merge_data(_ source: DispatchSource, _ value: UInt) +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchTimerSource.setTimer(self:start:interval:leeway:)") +public func dispatch_source_set_timer(_ source: DispatchSource, _ start: dispatch_time_t, _ interval: UInt64, _ leeway: UInt64) +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchSource.setRegistrationHandler(self:handler:)") +public func dispatch_source_set_registration_handler(_ source: DispatchSource, _ handler: (() -> Void)?) +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchTime.now()") +public func dispatch_time(_ when: dispatch_time_t, _ delta: Int64) -> dispatch_time_t +{ + fatalError() +} + +@available(*, unavailable, renamed:"DispatchWalltime.init(time:)") +public func dispatch_walltime(_ when: UnsafePointer?, _ delta: Int64) -> dispatch_time_t +{ + fatalError() +} + +@available(*, unavailable, renamed: "DispatchQueue.GlobalAttributes.qosUserInitiated") +public var DISPATCH_QUEUE_PRIORITY_HIGH: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchQueue.GlobalAttributes.qosDefault") +public var DISPATCH_QUEUE_PRIORITY_DEFAULT: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchQueue.GlobalAttributes.qosUtility") +public var DISPATCH_QUEUE_PRIORITY_LOW: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchQueue.GlobalAttributes.qosBackground") +public var DISPATCH_QUEUE_PRIORITY_BACKGROUND: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchIO.StreamType.stream") +public var DISPATCH_IO_STREAM: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchIO.StreamType.random") +public var DISPATCH_IO_RANDOM: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchIO.CloseFlags.stop") +public var DISPATCH_IO_STOP: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchIO.IntervalFlags.strictInterval") +public var DISPATCH_IO_STRICT_INTERVAL: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchSource.MachSendEvent.dead") +public var DISPATCH_MACH_SEND_DEAD: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchSource.MemoryPressureEvent.normal") +public var DISPATCH_MEMORYPRESSURE_NORMAL: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchSource.MemoryPressureEvent.warning") +public var DISPATCH_MEMORYPRESSURE_WARN: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchSource.MemoryPressureEvent.critical") +public var DISPATCH_MEMORYPRESSURE_CRITICAL: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchSource.ProcessEvent.exit") +public var DISPATCH_PROC_EXIT: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchSource.ProcessEvent.fork") +public var DISPATCH_PROC_FORK: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchSource.ProcessEvent.exec") +public var DISPATCH_PROC_EXEC: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchSource.ProcessEvent.signal") +public var DISPATCH_PROC_SIGNAL: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchSource.TimerFlags.strict") +public var DISPATCH_TIMER_STRICT: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchSource.FileSystemEvent.delete") +public var DISPATCH_VNODE_DELETE: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchSource.FileSystemEvent.write") +public var DISPATCH_VNODE_WRITE: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchSource.FileSystemEvent.extend") +public var DISPATCH_VNODE_EXTEND: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchSource.FileSystemEvent.attrib") +public var DISPATCH_VNODE_ATTRIB: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchSource.FileSystemEvent.link") +public var DISPATCH_VNODE_LINK: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchSource.FileSystemEvent.rename") +public var DISPATCH_VNODE_RENAME: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchSource.FileSystemEvent.revoke") +public var DISPATCH_VNODE_REVOKE: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchSource.FileSystemEvent.funlock") +public var DISPATCH_VNODE_FUNLOCK: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchTime.now()") +public var DISPATCH_TIME_NOW: Int { + fatalError() +} + +@available(*, unavailable, renamed: "DispatchTime.distantFuture") +public var DISPATCH_TIME_FOREVER: Int { + fatalError() +} diff --git a/src/swift/Queue.swift b/src/swift/Queue.swift new file mode 100644 index 000000000..5a45fdcd1 --- /dev/null +++ b/src/swift/Queue.swift @@ -0,0 +1,421 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See http://swift.org/LICENSE.txt for license information +// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +// dispatch/queue.h + +import CDispatch + +public struct DispatchQueueAttributes : OptionSet { + public let rawValue: UInt64 + public init(rawValue: UInt64) { self.rawValue = rawValue } + + public static let serial = DispatchQueueAttributes(rawValue: 0<<0) + public static let concurrent = DispatchQueueAttributes(rawValue: 1<<1) + + @available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) + public static let initiallyInactive = DispatchQueueAttributes(rawValue: 1<<2) + + @available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) + public static let autoreleaseInherit = DispatchQueueAttributes(rawValue: 1<<3) + + @available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) + public static let autoreleaseWorkItem = DispatchQueueAttributes(rawValue: 1<<4) + + @available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) + public static let autoreleaseNever = DispatchQueueAttributes(rawValue: 1<<5) + + @available(OSX 10.10, iOS 8.0, *) + public static let qosUserInteractive = DispatchQueueAttributes(rawValue: 1<<6) + + @available(OSX 10.10, iOS 8.0, *) + public static let qosUserInitiated = DispatchQueueAttributes(rawValue: 1<<7) + + @available(OSX 10.10, iOS 8.0, *) + public static let qosDefault = DispatchQueueAttributes(rawValue: 1<<8) + + @available(OSX 10.10, iOS 8.0, *) + public static let qosUtility = DispatchQueueAttributes(rawValue: 1<<9) + + @available(OSX 10.10, iOS 8.0, *) + public static let qosBackground = DispatchQueueAttributes(rawValue: 1<<10) + + @available(*, deprecated, message: ".noQoS has no effect, it should not be used") + public static let noQoS = DispatchQueueAttributes(rawValue: 1<<11) + + private var attr: dispatch_queue_attr_t? { + var attr: dispatch_queue_attr_t? + + if self.contains(.concurrent) { + attr = _swift_dispatch_queue_concurrent() + } + if #available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) { + if self.contains(.initiallyInactive) { + attr = CDispatch.dispatch_queue_attr_make_initially_inactive(attr) + } + if self.contains(.autoreleaseWorkItem) { + // DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM + attr = CDispatch.dispatch_queue_attr_make_with_autorelease_frequency(attr, dispatch_autorelease_frequency_t(1)) + } else if self.contains(.autoreleaseInherit) { + // DISPATCH_AUTORELEASE_FREQUENCY_INHERIT + attr = CDispatch.dispatch_queue_attr_make_with_autorelease_frequency(attr, dispatch_autorelease_frequency_t(0)) + } else if self.contains(.autoreleaseNever) { + // DISPATCH_AUTORELEASE_FREQUENCY_NEVER + attr = CDispatch.dispatch_queue_attr_make_with_autorelease_frequency(attr, dispatch_autorelease_frequency_t(2)) + } + } + if #available(OSX 10.10, iOS 8.0, *) { + if self.contains(.qosUserInteractive) { + attr = CDispatch.dispatch_queue_attr_make_with_qos_class(attr, _OSQoSClass.QOS_CLASS_USER_INTERACTIVE.rawValue, 0) + } else if self.contains(.qosUserInitiated) { + attr = CDispatch.dispatch_queue_attr_make_with_qos_class(attr, _OSQoSClass.QOS_CLASS_USER_INITIATED.rawValue, 0) + } else if self.contains(.qosDefault) { + attr = CDispatch.dispatch_queue_attr_make_with_qos_class(attr, _OSQoSClass.QOS_CLASS_DEFAULT.rawValue, 0) + } else if self.contains(.qosUtility) { + attr = CDispatch.dispatch_queue_attr_make_with_qos_class(attr, _OSQoSClass.QOS_CLASS_UTILITY.rawValue, 0) + } else if self.contains(.qosBackground) { + attr = CDispatch.dispatch_queue_attr_make_with_qos_class(attr, _OSQoSClass.QOS_CLASS_BACKGROUND.rawValue, 0) + } + } + return attr + } +} + + +public final class DispatchSpecificKey { + public init() {} +} + +internal class _DispatchSpecificValue { + internal let value: T + internal init(value: T) { self.value = value } +} + +public extension DispatchQueue { + + public struct GlobalAttributes : OptionSet { + public let rawValue: UInt64 + public init(rawValue: UInt64) { self.rawValue = rawValue } + + @available(OSX 10.10, iOS 8.0, *) + public static let qosUserInteractive = GlobalAttributes(rawValue: 1<<0) + + @available(OSX 10.10, iOS 8.0, *) + public static let qosUserInitiated = GlobalAttributes(rawValue: 1<<1) + + @available(OSX 10.10, iOS 8.0, *) + public static let qosDefault = GlobalAttributes(rawValue: 1<<2) + + @available(OSX 10.10, iOS 8.0, *) + public static let qosUtility = GlobalAttributes(rawValue: 1<<3) + + @available(OSX 10.10, iOS 8.0, *) + public static let qosBackground = GlobalAttributes(rawValue: 1<<4) + + // Avoid using our own deprecated constants here by declaring + // non-deprecated constants and then basing the public ones on those. + internal static let _priorityHigh = GlobalAttributes(rawValue: 1<<5) + internal static let _priorityDefault = GlobalAttributes(rawValue: 1<<6) + internal static let _priorityLow = GlobalAttributes(rawValue: 1<<7) + internal static let _priorityBackground = GlobalAttributes(rawValue: 1<<8) + + @available(OSX, deprecated: 10.10, message: "Use qos attributes instead") + @available(*, deprecated: 8.0, message: "Use qos attributes instead") + public static let priorityHigh = _priorityHigh + + @available(OSX, deprecated: 10.10, message: "Use qos attributes instead") + @available(*, deprecated: 8.0, message: "Use qos attributes instead") + public static let priorityDefault = _priorityDefault + + @available(OSX, deprecated: 10.10, message: "Use qos attributes instead") + @available(*, deprecated: 8.0, message: "Use qos attributes instead") + public static let priorityLow = _priorityLow + + @available(OSX, deprecated: 10.10, message: "Use qos attributes instead") + @available(*, deprecated: 8.0, message: "Use qos attributes instead") + public static let priorityBackground = _priorityBackground + + internal var _translatedValue: Int { + if #available(OSX 10.10, iOS 8.0, *) { + if self.contains(.qosUserInteractive) { return Int(_OSQoSClass.QOS_CLASS_USER_INTERACTIVE.rawValue) } + else if self.contains(.qosUserInitiated) { return Int(_OSQoSClass.QOS_CLASS_USER_INITIATED.rawValue) } + else if self.contains(.qosDefault) { return Int(_OSQoSClass.QOS_CLASS_DEFAULT.rawValue) } + else if self.contains(.qosUtility) { return Int(_OSQoSClass.QOS_CLASS_UTILITY.rawValue) } + else { return Int(_OSQoSClass.QOS_CLASS_BACKGROUND.rawValue) } + } + if self.contains(._priorityHigh) { return 2 } // DISPATCH_QUEUE_PRIORITY_HIGH + else if self.contains(._priorityDefault) { return 0 } // DISPATCH_QUEUE_PRIORITY_DEFAULT + else if self.contains(._priorityLow) { return -2 } // // DISPATCH_QUEUE_PRIORITY_LOW + else if self.contains(._priorityBackground) { return Int(Int16.min) } // // DISPATCH_QUEUE_PRIORITY_BACKGROUND + return 0 + } + } + + public class func concurrentPerform(iterations: Int, execute work: @noescape (Int) -> Void) { + _swift_dispatch_apply_current(iterations, work) + } + + public class var main: DispatchQueue { + return DispatchQueue(queue: _swift_dispatch_get_main_queue()) + } + + public class func global(attributes: GlobalAttributes = []) -> DispatchQueue { + // SubOptimal? Should we be caching these global DispatchQueue objects? + return DispatchQueue(queue:dispatch_get_global_queue(attributes._translatedValue, 0)) + } + + public class func getSpecific(key: DispatchSpecificKey) -> T? { + let k = Unmanaged.passUnretained(key).toOpaque() + if let p = CDispatch.dispatch_get_specific(k) { + let v = Unmanaged<_DispatchSpecificValue> + .fromOpaque(p) + .takeUnretainedValue() + return v.value + } + return nil + } + + public convenience init( + label: String, + attributes: DispatchQueueAttributes = .serial, + target: DispatchQueue? = nil) + { + if #available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) { + self.init(__label: label, attr: attributes.attr, queue: target) + } else { + self.init(__label: label, attr: attributes.attr) + if let tq = target { self.setTarget(queue: tq) } + } + } + + public var label: String { + return String(validatingUTF8: dispatch_queue_get_label(self.__wrapped))! + } + + @available(OSX 10.10, iOS 8.0, *) + public func sync(execute workItem: DispatchWorkItem) { + dispatch_sync(self.__wrapped, workItem._block) + } + + @available(OSX 10.10, iOS 8.0, *) + public func async(execute workItem: DispatchWorkItem) { + // _swift_dispatch_{group,}_async preserves the @convention(block) + // for work item blocks. + if let g = workItem._group { + dispatch_group_async(g.__wrapped, self.__wrapped, workItem._block) + } else { + dispatch_async(self.__wrapped, workItem._block) + } + } + + public func async(group: DispatchGroup? = nil, qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], execute work: @convention(block) () -> Void) { + if group == nil && qos == .unspecified && flags.isEmpty { + // Fast-path route for the most common API usage + dispatch_async(self.__wrapped, work) + return + } + + if #available(OSX 10.10, iOS 8.0, *), (qos != .unspecified || !flags.isEmpty) { + let workItem = DispatchWorkItem(qos: qos, flags: flags, block: work) + if let g = group { + dispatch_group_async(g.__wrapped, self.__wrapped, workItem._block) + } else { + dispatch_async(self.__wrapped, workItem._block) + } + } else { + if let g = group { + dispatch_group_async(g.__wrapped, self.__wrapped, work) + } else { + dispatch_async(self.__wrapped, work) + } + } + } + + private func _syncBarrier(block: @noescape () -> ()) { + dispatch_barrier_sync(self.__wrapped, block) + } + + private func _syncHelper( + fn: (@noescape () -> ()) -> (), + execute work: @noescape () throws -> T, + rescue: ((Swift.Error) throws -> (T))) rethrows -> T + { + var result: T? + var error: Swift.Error? + fn { + do { + result = try work() + } catch let e { + error = e + } + } + if let e = error { + return try rescue(e) + } else { + return result! + } + } + + @available(OSX 10.10, iOS 8.0, *) + private func _syncHelper( + fn: (DispatchWorkItem) -> (), + flags: DispatchWorkItemFlags, + execute work: @noescape () throws -> T, + rescue: ((Swift.Error) throws -> (T))) rethrows -> T + { + var result: T? + var error: Swift.Error? + let workItem = DispatchWorkItem(flags: flags, noescapeBlock: { + do { + result = try work() + } catch let e { + error = e + } + }) + fn(workItem) + if let e = error { + return try rescue(e) + } else { + return result! + } + } + + public func sync(execute work: @noescape () throws -> T) rethrows -> T { + return try self._syncHelper(fn: sync, execute: work, rescue: { throw $0 }) + } + + public func sync(flags: DispatchWorkItemFlags, execute work: @noescape () throws -> T) rethrows -> T { + if flags == .barrier { + return try self._syncHelper(fn: _syncBarrier, execute: work, rescue: { throw $0 }) + } else if #available(OSX 10.10, iOS 8.0, *), !flags.isEmpty { + return try self._syncHelper(fn: sync, flags: flags, execute: work, rescue: { throw $0 }) + } else { + return try self._syncHelper(fn: sync, execute: work, rescue: { throw $0 }) + } + } + + public func after(when: DispatchTime, qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], execute work: @convention(block) () -> Void) { + if #available(OSX 10.10, iOS 8.0, *), qos != .unspecified || !flags.isEmpty { + let item = DispatchWorkItem(qos: qos, flags: flags, block: work) + dispatch_after(when.rawValue, self.__wrapped, item._block) + } else { + dispatch_after(when.rawValue, self.__wrapped, work) + } + } + + @available(OSX 10.10, iOS 8.0, *) + public func after(when: DispatchTime, execute: DispatchWorkItem) { + dispatch_after(when.rawValue, self.__wrapped, execute._block) + } + + public func after(walltime when: DispatchWallTime, qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], execute work: @convention(block) () -> Void) { + if #available(OSX 10.10, iOS 8.0, *), qos != .unspecified || !flags.isEmpty { + let item = DispatchWorkItem(qos: qos, flags: flags, block: work) + dispatch_after(when.rawValue, self.__wrapped, item._block) + } else { + dispatch_after(when.rawValue, self.__wrapped, work) + } + } + + @available(OSX 10.10, iOS 8.0, *) + public func after(walltime when: DispatchWallTime, execute: DispatchWorkItem) { + dispatch_after(when.rawValue, self.__wrapped, execute._block) + } + + @available(OSX 10.10, iOS 8.0, *) + public var qos: DispatchQoS { + var relPri: Int32 = 0 + let cls = DispatchQoS.QoSClass(qosClass: _OSQoSClass(qosClass: dispatch_queue_get_qos_class(self.__wrapped, &relPri))!)! + return DispatchQoS(qosClass: cls, relativePriority: Int(relPri)) + } + + public func getSpecific(key: DispatchSpecificKey) -> T? { + let k = Unmanaged.passUnretained(key).toOpaque() + if let p = dispatch_queue_get_specific(self.__wrapped, k) { + let v = Unmanaged<_DispatchSpecificValue> + .fromOpaque(p) + .takeUnretainedValue() + return v.value + } + return nil + } + + public func setSpecific(key: DispatchSpecificKey, value: T) { + let v = _DispatchSpecificValue(value: value) + let k = Unmanaged.passUnretained(key).toOpaque() + let p = Unmanaged.passRetained(v).toOpaque() + dispatch_queue_set_specific(self.__wrapped, k, p, _destructDispatchSpecificValue) + } +} + +extension DispatchQueue { + @available(*, deprecated, renamed: "DispatchQueue.sync(self:execute:)") + public func synchronously(execute work: @noescape () -> ()) { + sync(execute: work) + } + + @available(OSX, introduced: 10.10, deprecated: 10.12, renamed: "DispatchQueue.sync(self:execute:)") + @available(iOS, introduced: 8.0, deprecated: 10.0, renamed: "DispatchQueue.sync(self:execute:)") + @available(*, deprecated, renamed: "DispatchQueue.sync(self:execute:)") + public func synchronously(execute workItem: DispatchWorkItem) { + sync(execute: workItem) + } + + @available(OSX, introduced: 10.10, deprecated: 10.12, renamed: "DispatchQueue.async(self:execute:)") + @available(iOS, introduced: 8.0, deprecated: 10.0, renamed: "DispatchQueue.async(self:execute:)") + @available(*, deprecated, renamed: "DispatchQueue.async(self:execute:)") + public func asynchronously(execute workItem: DispatchWorkItem) { + async(execute: workItem) + } + + @available(*, deprecated, renamed: "DispatchQueue.async(self:group:qos:flags:execute:)") + public func asynchronously(group: DispatchGroup? = nil, qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], execute work: @convention(block) () -> Void) { + async(group: group, qos: qos, flags: flags, execute: work) + } + + @available(*, deprecated, renamed: "DispatchQueue.sync(self:execute:)") + public func synchronously(execute work: @noescape () throws -> T) rethrows -> T { + return try sync(execute: work) + } + + @available(*, deprecated, renamed: "DispatchQueue.sync(self:flags:execute:)") + public func synchronously(flags: DispatchWorkItemFlags, execute work: @noescape () throws -> T) rethrows -> T { + return try sync(flags: flags, execute: work) + } + + @available(*, deprecated, renamed: "DispatchQueue.concurrentPerform(iterations:execute:)") + public func apply(applier iterations: Int, execute block: @noescape (Int) -> Void) { + DispatchQueue.concurrentPerform(iterations: iterations, execute: block) + } + + @available(*, deprecated, renamed: "DispatchQueue.setTarget(self:queue:)") + public func setTargetQueue(queue: DispatchQueue) { + self.setTarget(queue: queue) + } +} + +private func _destructDispatchSpecificValue(ptr: UnsafeMutablePointer?) { + if let p = ptr { + Unmanaged.fromOpaque(p).release() + } +} + +@_silgen_name("_swift_dispatch_queue_concurrent") +internal func _swift_dispatch_queue_concurrent() -> dispatch_queue_attr_t + +@_silgen_name("_swift_dispatch_get_main_queue") +internal func _swift_dispatch_get_main_queue() -> dispatch_queue_t + +@_silgen_name("_swift_dispatch_apply_current_root_queue") +internal func _swift_dispatch_apply_current_root_queue() -> dispatch_queue_t + +@_silgen_name("_swift_dispatch_apply_current") +internal func _swift_dispatch_apply_current(_ iterations: Int, _ block: @convention(block) @noescape (Int) -> Void) diff --git a/src/swift/Source.swift b/src/swift/Source.swift new file mode 100644 index 000000000..2830f010e --- /dev/null +++ b/src/swift/Source.swift @@ -0,0 +1,425 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See http://swift.org/LICENSE.txt for license information +// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +import CDispatch + +public extension DispatchSourceType { + + public func setEventHandler(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], handler: DispatchSourceHandler?) { + if #available(OSX 10.10, iOS 8.0, *), let h = handler where qos != .unspecified || !flags.isEmpty { + let item = DispatchWorkItem(qos: qos, flags: flags, block: h) + CDispatch.dispatch_source_set_event_handler((self as! DispatchSource).__wrapped, item._block) + } else { + CDispatch.dispatch_source_set_event_handler((self as! DispatchSource).__wrapped, handler) + } + } + + @available(OSX 10.10, iOS 8.0, *) + public func setEventHandler(handler: DispatchWorkItem) { + CDispatch.dispatch_source_set_event_handler((self as! DispatchSource).__wrapped, handler._block) + } + + public func setCancelHandler(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], handler: DispatchSourceHandler?) { + if #available(OSX 10.10, iOS 8.0, *), let h = handler where qos != .unspecified || !flags.isEmpty { + let item = DispatchWorkItem(qos: qos, flags: flags, block: h) + CDispatch.dispatch_source_set_cancel_handler((self as! DispatchSource).__wrapped, item._block) + } else { + CDispatch.dispatch_source_set_cancel_handler((self as! DispatchSource).__wrapped, handler) + } + } + + @available(OSX 10.10, iOS 8.0, *) + public func setCancelHandler(handler: DispatchWorkItem) { + CDispatch.dispatch_source_set_cancel_handler((self as! DispatchSource).__wrapped, handler._block) + } + + public func setRegistrationHandler(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], handler: DispatchSourceHandler?) { + if #available(OSX 10.10, iOS 8.0, *), let h = handler where qos != .unspecified || !flags.isEmpty { + let item = DispatchWorkItem(qos: qos, flags: flags, block: h) + CDispatch.dispatch_source_set_registration_handler((self as! DispatchSource).__wrapped, item._block) + } else { + CDispatch.dispatch_source_set_registration_handler((self as! DispatchSource).__wrapped, handler) + } + } + + @available(OSX 10.10, iOS 8.0, *) + public func setRegistrationHandler(handler: DispatchWorkItem) { + CDispatch.dispatch_source_set_registration_handler((self as! DispatchSource).__wrapped, handler._block) + } + + @available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) + public func activate() { + (self as! DispatchSource).activate() + } + + public func cancel() { + CDispatch.dispatch_source_cancel((self as! DispatchSource).__wrapped) + } + + public func resume() { + (self as! DispatchSource).resume() + } + + public func suspend() { + (self as! DispatchSource).suspend() + } + + public var handle: UInt { + return CDispatch.dispatch_source_get_handle((self as! DispatchSource).__wrapped) + } + + public var mask: UInt { + return CDispatch.dispatch_source_get_mask((self as! DispatchSource).__wrapped) + } + + public var data: UInt { + return CDispatch.dispatch_source_get_data((self as! DispatchSource).__wrapped) + } + + public var isCancelled: Bool { + return CDispatch.dispatch_source_testcancel((self as! DispatchSource).__wrapped) != 0 + } +} + +public extension DispatchSource { +#if HAVE_MACH + public struct MachSendEvent : OptionSet, RawRepresentable { + public let rawValue: UInt + public init(rawValue: UInt) { self.rawValue = rawValue } + + public static let dead = MachSendEvent(rawValue: 0x1) + } +#endif + +#if HAVE_MACH + public struct MemoryPressureEvent : OptionSet, RawRepresentable { + public let rawValue: UInt + public init(rawValue: UInt) { self.rawValue = rawValue } + + public static let normal = MemoryPressureEvent(rawValue: 0x1) + public static let warning = MemoryPressureEvent(rawValue: 0x2) + public static let critical = MemoryPressureEvent(rawValue: 0x4) + public static let all: MemoryPressureEvent = [.normal, .warning, .critical] + } +#endif + +#if !os(Linux) + public struct ProcessEvent : OptionSet, RawRepresentable { + public let rawValue: UInt + public init(rawValue: UInt) { self.rawValue = rawValue } + + public static let exit = ProcessEvent(rawValue: 0x80000000) + public static let fork = ProcessEvent(rawValue: 0x40000000) + public static let exec = ProcessEvent(rawValue: 0x20000000) + public static let signal = ProcessEvent(rawValue: 0x08000000) + public static let all: ProcessEvent = [.exit, .fork, .exec, .signal] + } +#endif + + public struct TimerFlags : OptionSet, RawRepresentable { + public let rawValue: UInt + public init(rawValue: UInt) { self.rawValue = rawValue } + + public static let strict = TimerFlags(rawValue: 1) + } + + public struct FileSystemEvent : OptionSet, RawRepresentable { + public let rawValue: UInt + public init(rawValue: UInt) { self.rawValue = rawValue } + + public static let delete = FileSystemEvent(rawValue: 0x1) + public static let write = FileSystemEvent(rawValue: 0x2) + public static let extend = FileSystemEvent(rawValue: 0x4) + public static let attrib = FileSystemEvent(rawValue: 0x8) + public static let link = FileSystemEvent(rawValue: 0x10) + public static let rename = FileSystemEvent(rawValue: 0x20) + public static let revoke = FileSystemEvent(rawValue: 0x40) + public static let funlock = FileSystemEvent(rawValue: 0x100) + + public static let all: FileSystemEvent = [ + .delete, .write, .extend, .attrib, .link, .rename, .revoke] + } + +#if HAVE_MACH + public class func machSend(port: mach_port_t, eventMask: MachSendEvent, queue: DispatchQueue? = nil) -> DispatchSourceMachSend { + let source = dispatch_source_create(_swift_dispatch_source_type_mach_send(), UInt(port), eventMask.rawValue, queue?.__wrapped) + return DispatchSource(source: source) as DispatchSourceMachSend + } +#endif + +#if HAVE_MACH + public class func machReceive(port: mach_port_t, queue: DispatchQueue? = nil) -> DispatchSourceMachReceive { + let source = dispatch_source_create(_swift_dispatch_source_type_mach_recv(), UInt(port), 0, queue?.__wrapped) + return DispatchSource(source) as DispatchSourceMachReceive + } +#endif + +#if HAVE_MACH + public class func memoryPressure(eventMask: MemoryPressureEvent, queue: DispatchQueue? = nil) -> DispatchSourceMemoryPressure { + let source = dispatch_source_create(_swift_dispatch_source_type_memorypressure(), 0, eventMask.rawValue, queue.__wrapped) + return DispatchSourceMemoryPressure(source) + } +#endif + +#if !os(Linux) + public class func process(identifier: pid_t, eventMask: ProcessEvent, queue: DispatchQueue? = nil) -> DispatchSourceProcess { + let source = dispatch_source_create(_swift_dispatch_source_type_proc(), UInt(identifier), eventMask.rawValue, queue?.__wrapped) + return DispatchSource(source: source) as DispatchSourceProcess + } +#endif + + public class func read(fileDescriptor: Int32, queue: DispatchQueue? = nil) -> DispatchSourceRead { + let source = dispatch_source_create(_swift_dispatch_source_type_read(), UInt(fileDescriptor), 0, queue?.__wrapped) + return DispatchSource(source: source) as DispatchSourceRead + } + + public class func signal(signal: Int32, queue: DispatchQueue? = nil) -> DispatchSourceSignal { + let source = dispatch_source_create(_swift_dispatch_source_type_signal(), UInt(signal), 0, queue?.__wrapped) + return DispatchSource(source: source) as DispatchSourceSignal + } + + public class func timer(flags: TimerFlags = [], queue: DispatchQueue? = nil) -> DispatchSourceTimer { + let source = dispatch_source_create(_swift_dispatch_source_type_timer(), 0, flags.rawValue, queue?.__wrapped) + return DispatchSource(source: source) as DispatchSourceTimer + } + + public class func userDataAdd(queue: DispatchQueue? = nil) -> DispatchSourceUserDataAdd { + let source = dispatch_source_create(_swift_dispatch_source_type_data_add(), 0, 0, queue?.__wrapped) + return DispatchSource(source: source) as DispatchSourceUserDataAdd + } + + public class func userDataOr(queue: DispatchQueue? = nil) -> DispatchSourceUserDataOr { + let source = dispatch_source_create(_swift_dispatch_source_type_data_or(), 0, 0, queue?.__wrapped) + return DispatchSource(source: source) as DispatchSourceUserDataOr + } + +#if !os(Linux) + public class func fileSystemObject(fileDescriptor: Int32, eventMask: FileSystemEvent, queue: DispatchQueue? = nil) -> DispatchSourceFileSystemObject { + let source = dispatch_source_create(_swift_dispatch_source_type_vnode(), UInt(fileDescriptor), eventMask.rawValue, queue?.__wrapped) + return DispatchSource(source: source) as DispatchSourceFileSystemObject + } +#endif + + public class func write(fileDescriptor: Int32, queue: DispatchQueue? = nil) -> DispatchSourceWrite { + let source = dispatch_source_create(_swift_dispatch_source_type_write(), UInt(fileDescriptor), 0, queue?.__wrapped) + return DispatchSource(source: source) as DispatchSourceWrite + } +} + +#if HAVE_MACH +public extension DispatchSourceMachSend { + public var handle: mach_port_t { + return mach_port_t(dispatch_source_get_handle(self as! DispatchSource)) + } + + public var data: DispatchSource.MachSendEvent { + let data = dispatch_source_get_data(self as! DispatchSource) + return DispatchSource.MachSendEvent(rawValue: data) + } + + public var mask: DispatchSource.MachSendEvent { + let mask = dispatch_source_get_mask(self as! DispatchSource) + return DispatchSource.MachSendEvent(rawValue: mask) + } +} +#endif + +#if HAVE_MACH +public extension DispatchSourceMachReceive { + public var handle: mach_port_t { + return mach_port_t(dispatch_source_get_handle(self as! DispatchSource)) + } +} +#endif + +#if HAVE_MACH +public extension DispatchSourceMemoryPressure { + public var data: DispatchSource.MemoryPressureEvent { + let data = dispatch_source_get_data(self as! DispatchSource) + return DispatchSource.MemoryPressureEvent(rawValue: data) + } + + public var mask: DispatchSource.MemoryPressureEvent { + let mask = dispatch_source_get_mask(self as! DispatchSource) + return DispatchSource.MemoryPressureEvent(rawValue: mask) + } +} +#endif + +#if !os(Linux) +public extension DispatchSourceProcess { + public var handle: pid_t { + return pid_t(dispatch_source_get_handle(self as! DispatchSource)) + } + + public var data: DispatchSource.ProcessEvent { + let data = dispatch_source_get_data(self as! DispatchSource) + return DispatchSource.ProcessEvent(rawValue: data) + } + + public var mask: DispatchSource.ProcessEvent { + let mask = dispatch_source_get_mask(self as! DispatchSource) + return DispatchSource.ProcessEvent(rawValue: mask) + } +} +#endif + +public extension DispatchSourceTimer { + public func scheduleOneshot(deadline: DispatchTime, leeway: DispatchTimeInterval = .nanoseconds(0)) { + dispatch_source_set_timer((self as! DispatchSource).__wrapped, deadline.rawValue, ~0, UInt64(leeway.rawValue)) + } + + public func scheduleOneshot(wallDeadline: DispatchWallTime, leeway: DispatchTimeInterval = .nanoseconds(0)) { + dispatch_source_set_timer((self as! DispatchSource).__wrapped, wallDeadline.rawValue, ~0, UInt64(leeway.rawValue)) + } + + public func scheduleRepeating(deadline: DispatchTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval = .nanoseconds(0)) { + dispatch_source_set_timer((self as! DispatchSource).__wrapped, deadline.rawValue, interval.rawValue, UInt64(leeway.rawValue)) + } + + public func scheduleRepeating(deadline: DispatchTime, interval: Double, leeway: DispatchTimeInterval = .nanoseconds(0)) { + dispatch_source_set_timer((self as! DispatchSource).__wrapped, deadline.rawValue, UInt64(interval * Double(NSEC_PER_SEC)), UInt64(leeway.rawValue)) + } + + public func scheduleRepeating(wallDeadline: DispatchWallTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval = .nanoseconds(0)) { + dispatch_source_set_timer((self as! DispatchSource).__wrapped, wallDeadline.rawValue, interval.rawValue, UInt64(leeway.rawValue)) + } + + public func scheduleRepeating(wallDeadline: DispatchWallTime, interval: Double, leeway: DispatchTimeInterval = .nanoseconds(0)) { + dispatch_source_set_timer((self as! DispatchSource).__wrapped, wallDeadline.rawValue, UInt64(interval * Double(NSEC_PER_SEC)), UInt64(leeway.rawValue)) + } +} + +public extension DispatchSourceTimer { + @available(*, deprecated, renamed: "DispatchSourceTimer.scheduleOneshot(self:deadline:leeway:)") + public func setTimer(start: DispatchTime, leeway: DispatchTimeInterval = .nanoseconds(0)) { + scheduleOneshot(deadline: start, leeway: leeway) + } + + @available(*, deprecated, renamed: "DispatchSourceTimer.scheduleOneshot(self:wallDeadline:leeway:)") + public func setTimer(walltime start: DispatchWallTime, leeway: DispatchTimeInterval = .nanoseconds(0)) { + scheduleOneshot(wallDeadline: start, leeway: leeway) + } + + @available(*, deprecated, renamed: "DispatchSourceTimer.scheduleRepeating(self:deadline:interval:leeway:)") + public func setTimer(start: DispatchTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval = .nanoseconds(0)) { + scheduleRepeating(deadline: start, interval: interval, leeway: leeway) + } + + @available(*, deprecated, renamed: "DispatchSourceTimer.scheduleRepeating(self:deadline:interval:leeway:)") + public func setTimer(start: DispatchTime, interval: Double, leeway: DispatchTimeInterval = .nanoseconds(0)) { + scheduleRepeating(deadline: start, interval: interval, leeway: leeway) + } + + @available(*, deprecated, renamed: "DispatchSourceTimer.scheduleRepeating(self:wallDeadline:interval:leeway:)") + public func setTimer(walltime start: DispatchWallTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval = .nanoseconds(0)) { + scheduleRepeating(wallDeadline: start, interval: interval, leeway: leeway) + } + + @available(*, deprecated, renamed: "DispatchSourceTimer.scheduleRepeating(self:wallDeadline:interval:leeway:)") + public func setTimer(walltime start: DispatchWallTime, interval: Double, leeway: DispatchTimeInterval = .nanoseconds(0)) { + scheduleRepeating(wallDeadline: start, interval: interval, leeway: leeway) + } +} + +#if !os(Linux) +public extension DispatchSourceFileSystemObject { + public var handle: Int32 { + return Int32(dispatch_source_get_handle((self as! DispatchSource).__wrapped)) + } + + public var data: DispatchSource.FileSystemEvent { + let data = dispatch_source_get_data((self as! DispatchSource).__wrapped) + return DispatchSource.FileSystemEvent(rawValue: data) + } + + public var mask: DispatchSource.FileSystemEvent { + let data = dispatch_source_get_mask((self as! DispatchSource).__wrapped) + return DispatchSource.FileSystemEvent(rawValue: data) + } +} +#endif + +public extension DispatchSourceUserDataAdd { + /// @function mergeData + /// + /// @abstract + /// Merges data into a dispatch source of type DISPATCH_SOURCE_TYPE_DATA_ADD or + /// DISPATCH_SOURCE_TYPE_DATA_OR and submits its event handler block to its + /// target queue. + /// + /// @param value + /// The value to coalesce with the pending data using a logical OR or an ADD + /// as specified by the dispatch source type. A value of zero has no effect + /// and will not result in the submission of the event handler block. + public func mergeData(value: UInt) { + dispatch_source_merge_data((self as! DispatchSource).__wrapped, value) + } +} + +public extension DispatchSourceUserDataOr { +#if false /*FIXME: clashes with UserDataAdd?? */ + /// @function mergeData + /// + /// @abstract + /// Merges data into a dispatch source of type DISPATCH_SOURCE_TYPE_DATA_ADD or + /// DISPATCH_SOURCE_TYPE_DATA_OR and submits its event handler block to its + /// target queue. + /// + /// @param value + /// The value to coalesce with the pending data using a logical OR or an ADD + /// as specified by the dispatch source type. A value of zero has no effect + /// and will not result in the submission of the event handler block. + public func mergeData(value: UInt) { + dispatch_source_merge_data((self as! DispatchSource).__wrapped, value) + } +#endif +} + +@_silgen_name("_swift_dispatch_source_type_DATA_ADD") +internal func _swift_dispatch_source_type_data_add() -> dispatch_source_type_t + +@_silgen_name("_swift_dispatch_source_type_DATA_OR") +internal func _swift_dispatch_source_type_data_or() -> dispatch_source_type_t + +#if HAVE_MACH +@_silgen_name("_swift_dispatch_source_type_MACH_SEND") +internal func _swift_dispatch_source_type_mach_send() -> dispatch_source_type_t + +@_silgen_name("_swift_dispatch_source_type_MACH_RECV") +internal func _swift_dispatch_source_type_mach_recv() -> dispatch_source_type_t + +@_silgen_name("_swift_dispatch_source_type_MEMORYPRESSURE") +internal func _swift_dispatch_source_type_memorypressure() -> dispatch_source_type_t +#endif + +#if !os(Linux) +@_silgen_name("_swift_dispatch_source_type_PROC") +internal func _swift_dispatch_source_type_proc() -> dispatch_source_type_t +#endif + +@_silgen_name("_swift_dispatch_source_type_READ") +internal func _swift_dispatch_source_type_read() -> dispatch_source_type_t + +@_silgen_name("_swift_dispatch_source_type_SIGNAL") +internal func _swift_dispatch_source_type_signal() -> dispatch_source_type_t + +@_silgen_name("_swift_dispatch_source_type_TIMER") +internal func _swift_dispatch_source_type_timer() -> dispatch_source_type_t + +#if !os(Linux) +@_silgen_name("_swift_dispatch_source_type_VNODE") +internal func _swift_dispatch_source_type_vnode() -> dispatch_source_type_t +#endif + +@_silgen_name("_swift_dispatch_source_type_WRITE") +internal func _swift_dispatch_source_type_write() -> dispatch_source_type_t diff --git a/src/swift/Time.swift b/src/swift/Time.swift new file mode 100644 index 000000000..76a6979eb --- /dev/null +++ b/src/swift/Time.swift @@ -0,0 +1,110 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See http://swift.org/LICENSE.txt for license information +// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +// dispatch/time.h +// DISPATCH_TIME_NOW: ok +// DISPATCH_TIME_FOREVER: ok + +import CDispatch + +public struct DispatchTime { + public let rawValue: dispatch_time_t + + public static func now() -> DispatchTime { + let t = CDispatch.dispatch_time(0, 0) + return DispatchTime(rawValue: t) + } + + public static let distantFuture = DispatchTime(rawValue: ~0) + + private init(rawValue: dispatch_time_t) { + self.rawValue = rawValue + } +} + +public struct DispatchWallTime { + public let rawValue: dispatch_time_t + + public static func now() -> DispatchWallTime { + return DispatchWallTime(rawValue: CDispatch.dispatch_walltime(nil, 0)) + } + + public static let distantFuture = DispatchWallTime(rawValue: ~0) + + private init(rawValue: dispatch_time_t) { + self.rawValue = rawValue + } + + public init(time: timespec) { + var t = time + self.rawValue = CDispatch.dispatch_walltime(&t, 0) + } +} + +@available(*, deprecated, renamed: "DispatchWallTime") +public typealias DispatchWalltime = DispatchWallTime + +public enum DispatchTimeInterval { + case seconds(Int) + case milliseconds(Int) + case microseconds(Int) + case nanoseconds(Int) + + internal var rawValue: UInt64 { + switch self { + case .seconds(let s): return UInt64(s) * NSEC_PER_SEC + case .milliseconds(let ms): return UInt64(ms) * NSEC_PER_MSEC + case .microseconds(let us): return UInt64(us) * NSEC_PER_USEC + case .nanoseconds(let ns): return UInt64(ns) + } + } +} + +public func +(time: DispatchTime, interval: DispatchTimeInterval) -> DispatchTime { + let t = CDispatch.dispatch_time(time.rawValue, Int64(interval.rawValue)) + return DispatchTime(rawValue: t) +} + +public func -(time: DispatchTime, interval: DispatchTimeInterval) -> DispatchTime { + let t = CDispatch.dispatch_time(time.rawValue, -Int64(interval.rawValue)) + return DispatchTime(rawValue: t) +} + +public func +(time: DispatchTime, seconds: Double) -> DispatchTime { + let t = CDispatch.dispatch_time(time.rawValue, Int64(seconds * Double(NSEC_PER_SEC))) + return DispatchTime(rawValue: t) +} + +public func -(time: DispatchTime, seconds: Double) -> DispatchTime { + let t = CDispatch.dispatch_time(time.rawValue, Int64(-seconds * Double(NSEC_PER_SEC))) + return DispatchTime(rawValue: t) +} + +public func +(time: DispatchWallTime, interval: DispatchTimeInterval) -> DispatchWallTime { + let t = CDispatch.dispatch_time(time.rawValue, Int64(interval.rawValue)) + return DispatchWallTime(rawValue: t) +} + +public func -(time: DispatchWallTime, interval: DispatchTimeInterval) -> DispatchWallTime { + let t = CDispatch.dispatch_time(time.rawValue, -Int64(interval.rawValue)) + return DispatchWallTime(rawValue: t) +} + +public func +(time: DispatchWallTime, seconds: Double) -> DispatchWallTime { + let t = CDispatch.dispatch_time(time.rawValue, Int64(seconds * Double(NSEC_PER_SEC))) + return DispatchWallTime(rawValue: t) +} + +public func -(time: DispatchWallTime, seconds: Double) -> DispatchWallTime { + let t = CDispatch.dispatch_time(time.rawValue, Int64(-seconds * Double(NSEC_PER_SEC))) + return DispatchWallTime(rawValue: t) +} diff --git a/src/swift/Wrapper.swift b/src/swift/Wrapper.swift new file mode 100644 index 000000000..d38bb9358 --- /dev/null +++ b/src/swift/Wrapper.swift @@ -0,0 +1,319 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See http://swift.org/LICENSE.txt for license information +// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// +//===----------------------------------------------------------------------===// + +import CDispatch + +// This file contains declarations that are provided by the +// importer via Dispatch.apinote when the platform has Objective-C support + +public class DispatchObject { + + internal func wrapped() -> dispatch_object_t { + fatalError("should be overriden in subclass") + } + + public func setTarget(queue:DispatchQueue) { + dispatch_set_target_queue(wrapped(), queue.__wrapped) + } + + public func activate() { + dispatch_activate(wrapped()) + } + + public func suspend() { + dispatch_suspend(wrapped()) + } + + public func resume() { + dispatch_resume(wrapped()) + } +} + + +public class DispatchGroup : DispatchObject { + internal let __wrapped:dispatch_group_t; + + final internal override func wrapped() -> dispatch_object_t { + return unsafeBitCast(__wrapped, to: dispatch_object_t.self) + } + + public override init() { + __wrapped = dispatch_group_create() + } + + deinit { + _swift_dispatch_release(wrapped()) + } + + public func enter() { + dispatch_group_enter(__wrapped) + } + + public func leave() { + dispatch_group_enter(__wrapped) + } +} + +public class DispatchSemaphore : DispatchObject { + internal let __wrapped: dispatch_semaphore_t; + + final internal override func wrapped() -> dispatch_object_t { + return unsafeBitCast(__wrapped, to: dispatch_object_t.self) + } + + public init(value: Int) { + __wrapped = dispatch_semaphore_create(value) + } + + deinit { + _swift_dispatch_release(wrapped()) + } +} + +public class DispatchIO : DispatchObject { + internal let __wrapped:dispatch_io_t + + final internal override func wrapped() -> dispatch_object_t { + return unsafeBitCast(__wrapped, to: dispatch_object_t.self) + } + + internal init(__type: UInt, fd: Int32, queue: DispatchQueue, + handler: (error: Int32) -> Void) { + __wrapped = dispatch_io_create(__type, fd, queue.__wrapped, handler) + } + + internal init(__type: UInt, path: UnsafePointer, oflag: Int32, + mode: mode_t, queue: DispatchQueue, handler: (error: Int32) -> Void) { + __wrapped = dispatch_io_create_with_path(__type, path, oflag, mode, queue.__wrapped, handler) + } + + internal init(__type: UInt, io: DispatchIO, + queue: DispatchQueue, handler: (error: Int32) -> Void) { + __wrapped = dispatch_io_create_with_io(__type, io.__wrapped, queue.__wrapped, handler) + } + + internal init(queue:dispatch_queue_t) { + __wrapped = queue + } + + deinit { + _swift_dispatch_release(wrapped()) + } + + public func barrier(execute: () -> ()) { + dispatch_io_barrier(self.__wrapped, execute) + } + + public var fileDescriptor: Int32 { + return dispatch_io_get_descriptor(__wrapped) + } + + public func setLimit(highWater: Int) { + dispatch_io_set_high_water(__wrapped, highWater) + } + + public func setLimit(lowWater: Int) { + dispatch_io_set_low_water(__wrapped, lowWater) + } +} + +public class DispatchQueue : DispatchObject { + internal let __wrapped:dispatch_queue_t; + + final internal override func wrapped() -> dispatch_object_t { + return unsafeBitCast(__wrapped, to: dispatch_object_t.self) + } + + internal init(__label: String, attr: dispatch_queue_attr_t?) { + __wrapped = dispatch_queue_create(__label, attr) + } + + internal init(__label: String, attr: dispatch_queue_attr_t?, queue: DispatchQueue?) { + __wrapped = dispatch_queue_create_with_target(__label, attr, queue?.__wrapped) + } + + internal init(queue:dispatch_queue_t) { + __wrapped = queue + } + + deinit { + _swift_dispatch_release(wrapped()) + } + + public func sync(execute workItem: @noescape ()->()) { + dispatch_sync(self.__wrapped, workItem) + } +} + +public class DispatchSource : DispatchObject, + DispatchSourceType, DispatchSourceRead, + DispatchSourceSignal, DispatchSourceTimer, + DispatchSourceUserDataAdd, DispatchSourceUserDataOr, + DispatchSourceWrite { + internal let __wrapped:dispatch_source_t + + final internal override func wrapped() -> dispatch_object_t { + return unsafeBitCast(__wrapped, to: dispatch_object_t.self) + } + + internal init(source:dispatch_source_t) { + __wrapped = source + } + + deinit { + _swift_dispatch_release(wrapped()) + } +} + +#if HAVE_MACH +extension DispatchSource : DispatchSourceMachSend, + DispatchSourceMachReceive, DispatchSourceMemoryPressure { +} +#endif + +#if !os(Linux) +extension DispatchSource : DispatchSourceProcess, + DispatchSourceFileSystemObject { +} +#endif + +public typealias DispatchSourceHandler = @convention(block) () -> Void + +public protocol DispatchSourceType { + func setEventHandler(qos: DispatchQoS, flags: DispatchWorkItemFlags, handler: DispatchSourceHandler?) + + func setEventHandler(handler: DispatchWorkItem) + + func setCancelHandler(qos: DispatchQoS, flags: DispatchWorkItemFlags, handler: DispatchSourceHandler?) + + func setCancelHandler(handler: DispatchWorkItem) + + func setRegistrationHandler(qos: DispatchQoS, flags: DispatchWorkItemFlags, handler: DispatchSourceHandler?) + + func setRegistrationHandler(handler: DispatchWorkItem) + + func cancel() + + func resume() + + func suspend() + + var handle: UInt { get } + + var mask: UInt { get } + + var data: UInt { get } + + var isCancelled: Bool { get } +} + +public protocol DispatchSourceUserDataAdd : DispatchSourceType { + func mergeData(value: UInt) +} + +public protocol DispatchSourceUserDataOr { +#if false /*FIXME: clashes with UserDataAdd?? */ + func mergeData(value: UInt) +#endif +} + +#if HAVE_MACH +public protocol DispatchSourceMachSend : DispatchSourceType { + public var handle: mach_port_t { get } + + public var data: DispatchSource.MachSendEvent { get } + + public var mask: DispatchSource.MachSendEvent { get } +} +#endif + +#if HAVE_MACH +public protocol DispatchSourceMachReceive : DispatchSourceType { + var handle: mach_port_t { get } +} +#endif + +#if HAVE_MACH +public protocol DispatchSourceMemoryPressure : DispatchSourceType { + public var data: DispatchSource.MemoryPressureEvent { get } + + public var mask: DispatchSource.MemoryPressureEvent { get } +} +#endif + +#if !os(Linux) +public protocol DispatchSourceProcess : DispatchSourceType { + var handle: pid_t { get } + + var data: DispatchSource.ProcessEvent { get } + + var mask: DispatchSource.ProcessEvent { get } +} +#endif + +public protocol DispatchSourceRead : DispatchSourceType { +} + +public protocol DispatchSourceSignal : DispatchSourceType { +} + +public protocol DispatchSourceTimer : DispatchSourceType { + func setTimer(start: DispatchTime, leeway: DispatchTimeInterval) + + func setTimer(walltime start: DispatchWallTime, leeway: DispatchTimeInterval) + + func setTimer(start: DispatchTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval) + + func setTimer(start: DispatchTime, interval: Double, leeway: DispatchTimeInterval) + + func setTimer(walltime start: DispatchWallTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval) + + func setTimer(walltime start: DispatchWallTime, interval: Double, leeway: DispatchTimeInterval) +} + +#if !os(Linux) +public protocol DispatchSourceFileSystemObject : DispatchSourceType { + var handle: Int32 { get } + + var data: DispatchSource.FileSystemEvent { get } + + var mask: DispatchSource.FileSystemEvent { get } +} +#endif + +public protocol DispatchSourceWrite : DispatchSourceType { +} + + +internal enum _OSQoSClass : UInt32 { + case QOS_CLASS_USER_INTERACTIVE = 0x21 + case QOS_CLASS_USER_INITIATED = 0x19 + case QOS_CLASS_DEFAULT = 0x15 + case QOS_CLASS_UTILITY = 0x11 + case QOS_CLASS_BACKGROUND = 0x09 + case QOS_CLASS_UNSPECIFIED = 0x00 + + internal init?(qosClass:dispatch_qos_class_t) { + switch qosClass { + case 0x21: self = .QOS_CLASS_USER_INTERACTIVE + case 0x19: self = .QOS_CLASS_USER_INITIATED + case 0x15: self = .QOS_CLASS_DEFAULT + case 0x11: self = QOS_CLASS_UTILITY + case 0x09: self = QOS_CLASS_BACKGROUND + case 0x00: self = QOS_CLASS_UNSPECIFIED + default: return nil + } + } +} + +@_silgen_name("_swift_dispatch_release") +internal func _swift_dispatch_release(_ obj: dispatch_object_t) -> Void diff --git a/src/time.c b/src/time.c index 35b0e5201..6d008319b 100644 --- a/src/time.c +++ b/src/time.c @@ -145,3 +145,16 @@ _dispatch_timeout(dispatch_time_t when) now = _dispatch_absolute_time(); return now >= when ? 0 : _dispatch_time_mach2nano(when - now); } + +uint64_t +_dispatch_time_nanoseconds_since_epoch(dispatch_time_t when) +{ + if (when == DISPATCH_TIME_FOREVER) { + return DISPATCH_TIME_FOREVER; + } + if ((int64_t)when < 0) { + // time in nanoseconds since the POSIX epoch already + return (uint64_t)-(int64_t)when; + } + return _dispatch_get_nanoseconds() + _dispatch_timeout(when); +} diff --git a/src/trace.h b/src/trace.h index ebab27c88..d73ff3fb3 100644 --- a/src/trace.h +++ b/src/trace.h @@ -27,7 +27,7 @@ #ifndef __DISPATCH_TRACE__ #define __DISPATCH_TRACE__ -#if !__OBJC2__ && !defined(__cplusplus) +#if DISPATCH_PURE_C #if DISPATCH_USE_DTRACE || DISPATCH_USE_DTRACE_INTROSPECTION typedef struct dispatch_trace_timer_params_s { @@ -92,12 +92,13 @@ _dispatch_trace_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) char *_kind; \ dispatch_function_t _func; \ void *_ctxt; \ - if (DISPATCH_OBJ_IS_VTABLE(_do)) { \ + if (_dispatch_object_has_vtable(_do)) { \ _kind = (char*)dx_kind(_do); \ if ((dx_type(_do) & _DISPATCH_META_TYPE_MASK) == \ _DISPATCH_SOURCE_TYPE && (_dq) != &_dispatch_mgr_q) { \ dispatch_source_t _ds = (dispatch_source_t)_do; \ - _dc = _ds->ds_refs->ds_handler[DS_EVENT_HANDLER]; \ + _dc = os_atomic_load(&_ds->ds_refs->ds_handler[ \ + DS_EVENT_HANDLER], relaxed); \ _func = _dc ? _dc->dc_func : NULL; \ _ctxt = _dc ? _dc->dc_ctxt : NULL; \ } else { \ @@ -107,10 +108,10 @@ _dispatch_trace_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) } else { \ _dc = (void*)_do; \ _ctxt = _dc->dc_ctxt; \ - if ((long)_dc->do_vtable & DISPATCH_OBJ_SYNC_SLOW_BIT) { \ + if (_dc->dc_flags & DISPATCH_OBJ_SYNC_SLOW_BIT) { \ _kind = "semaphore"; \ _func = (dispatch_function_t)dispatch_semaphore_signal; \ - } else if (_dc->dc_func == _dispatch_call_block_and_release) { \ + } else if (_dc->dc_flags & DISPATCH_OBJ_BLOCK_BIT) { \ _kind = "block"; \ _func = _dispatch_Block_invoke(_dc->dc_ctxt); \ } else { \ @@ -145,27 +146,15 @@ _dispatch_trace_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head, DISPATCH_ALWAYS_INLINE static inline void -_dispatch_trace_queue_push(dispatch_queue_t dq, dispatch_object_t _tail, pthread_priority_t pp) +_dispatch_trace_queue_push_inline(dispatch_queue_t dq, dispatch_object_t _tail, + pthread_priority_t pp, dispatch_wakeup_flags_t flags) { if (slowpath(DISPATCH_QUEUE_PUSH_ENABLED())) { struct dispatch_object_s *dou = _tail._do; _dispatch_trace_continuation(dq, dou, DISPATCH_QUEUE_PUSH); } _dispatch_introspection_queue_push(dq, _tail); - _dispatch_queue_push(dq, _tail, pp); -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_trace_queue_push_wakeup(dispatch_queue_t dq, dispatch_object_t _tail, - pthread_priority_t pp, bool wakeup) -{ - if (slowpath(DISPATCH_QUEUE_PUSH_ENABLED())) { - struct dispatch_object_s *dou = _tail._do; - _dispatch_trace_continuation(dq, dou, DISPATCH_QUEUE_PUSH); - } - _dispatch_introspection_queue_push(dq, _tail); - _dispatch_queue_push_wakeup(dq, _tail, pp, wakeup); + _dispatch_queue_push_inline(dq, _tail, pp, flags); } DISPATCH_ALWAYS_INLINE @@ -179,16 +168,8 @@ _dispatch_trace_continuation_push(dispatch_queue_t dq, dispatch_object_t _tail) _dispatch_introspection_queue_push(dq, _tail); } -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_queue_push_notrace(dispatch_queue_t dq, dispatch_object_t dou, pthread_priority_t pp) -{ - _dispatch_queue_push(dq, dou, pp); -} - #define _dispatch_queue_push_list _dispatch_trace_queue_push_list -#define _dispatch_queue_push _dispatch_trace_queue_push -#define _dispatch_queue_push_wakeup _dispatch_trace_queue_push_wakeup +#define _dispatch_queue_push_inline _dispatch_trace_queue_push_inline DISPATCH_ALWAYS_INLINE static inline void @@ -200,7 +181,6 @@ _dispatch_trace_continuation_pop(dispatch_queue_t dq, dispatch_object_t dou) _dispatch_introspection_queue_pop(dq, dou); } #else -#define _dispatch_queue_push_notrace _dispatch_queue_push #define _dispatch_trace_continuation_push(dq, dou) \ do { (void)(dq); (void)(dou); } while(0) #define _dispatch_trace_continuation_pop(dq, dou) \ @@ -209,17 +189,11 @@ _dispatch_trace_continuation_pop(dispatch_queue_t dq, dispatch_object_t dou) #if DISPATCH_USE_DTRACE static inline dispatch_function_t -_dispatch_trace_timer_function(dispatch_source_t ds, dispatch_source_refs_t dr) +_dispatch_trace_timer_function(dispatch_source_refs_t dr) { - dispatch_continuation_t dc = dr->ds_handler[DS_EVENT_HANDLER]; - dispatch_function_t func = dc ? dc->dc_func : NULL; - if (func == _dispatch_after_timer_callback && - !(ds->ds_atomic_flags & DSF_CANCELED)) { - dc = ds->do_ctxt; - func = dc->dc_func != _dispatch_call_block_and_release ? dc->dc_func : - dc->dc_ctxt ? _dispatch_Block_invoke(dc->dc_ctxt) : NULL; - } - return func; + dispatch_continuation_t dc; + dc = os_atomic_load(&dr->ds_handler[DS_EVENT_HANDLER], relaxed); + return dc ? dc->dc_func : NULL; } DISPATCH_ALWAYS_INLINE @@ -262,8 +236,8 @@ _dispatch_trace_timer_configure(dispatch_source_t ds, uintptr_t ident, struct dispatch_timer_source_s *values) { struct dispatch_trace_timer_params_s params; - DISPATCH_TIMER_CONFIGURE(ds, _dispatch_trace_timer_function(ds, - ds->ds_refs), _dispatch_trace_timer_params(ident, values, 0, + DISPATCH_TIMER_CONFIGURE(ds, _dispatch_trace_timer_function(ds->ds_refs), + _dispatch_trace_timer_params(ident, values, 0, ¶ms)); } @@ -275,7 +249,7 @@ _dispatch_trace_timer_program(dispatch_source_refs_t dr, uint64_t deadline) if (deadline && dr) { dispatch_source_t ds = _dispatch_source_from_refs(dr); struct dispatch_trace_timer_params_s params; - DISPATCH_TIMER_PROGRAM(ds, _dispatch_trace_timer_function(ds, dr), + DISPATCH_TIMER_PROGRAM(ds, _dispatch_trace_timer_function(dr), _dispatch_trace_timer_params(ds->ds_ident_hack, &ds_timer(dr), deadline, ¶ms)); } @@ -289,7 +263,7 @@ _dispatch_trace_timer_wake(dispatch_source_refs_t dr) if (slowpath(DISPATCH_TIMER_WAKE_ENABLED())) { if (dr) { dispatch_source_t ds = _dispatch_source_from_refs(dr); - DISPATCH_TIMER_WAKE(ds, _dispatch_trace_timer_function(ds, dr)); + DISPATCH_TIMER_WAKE(ds, _dispatch_trace_timer_function(dr)); } } } @@ -302,7 +276,7 @@ _dispatch_trace_timer_fire(dispatch_source_refs_t dr, unsigned long data, if (slowpath(DISPATCH_TIMER_FIRE_ENABLED())) { if (!(data - missed) && dr) { dispatch_source_t ds = _dispatch_source_from_refs(dr); - DISPATCH_TIMER_FIRE(ds, _dispatch_trace_timer_function(ds, dr)); + DISPATCH_TIMER_FIRE(ds, _dispatch_trace_timer_function(dr)); } } } @@ -321,6 +295,6 @@ _dispatch_trace_timer_fire(dispatch_source_refs_t dr, unsigned long data, #endif // DISPATCH_USE_DTRACE -#endif // !__OBJC2__ && !defined(__cplusplus) +#endif // DISPATCH_PURE_C #endif // __DISPATCH_TRACE__ diff --git a/src/transform.c b/src/transform.c index e6fa4017e..2c885ca36 100644 --- a/src/transform.c +++ b/src/transform.c @@ -20,7 +20,17 @@ #include "internal.h" +#ifdef __APPLE__ #include +#elif __linux__ +#include +#define OSLittleEndian __LITTLE_ENDIAN +#define OSBigEndian __BIG_ENDIAN +#define OSSwapLittleToHostInt16 le16toh +#define OSSwapBigToHostInt16 be16toh +#define OSSwapHostToLittleInt16 htole16 +#define OSSwapHostToBigInt16 htobe16 +#endif #if defined(__LITTLE_ENDIAN__) #define DISPATCH_DATA_FORMAT_TYPE_UTF16_HOST DISPATCH_DATA_FORMAT_TYPE_UTF16LE @@ -28,6 +38,8 @@ #elif defined(__BIG_ENDIAN__) #define DISPATCH_DATA_FORMAT_TYPE_UTF16_HOST DISPATCH_DATA_FORMAT_TYPE_UTF16BE #define DISPATCH_DATA_FORMAT_TYPE_UTF16_REV DISPATCH_DATA_FORMAT_TYPE_UTF16LE +#else +#error Unsupported Endianness #endif enum { @@ -103,16 +115,6 @@ typedef struct dispatch_transform_buffer_s { size_t size; } dispatch_transform_buffer_s; -static size_t -_dispatch_transform_sizet_mul(size_t a, size_t b) -{ - size_t rv = SIZE_MAX; - if (a == 0 || rv/a >= b) { - rv = a * b; - } - return rv; -} - #define BUFFER_MALLOC_MAX (100*1024*1024) static bool @@ -286,11 +288,13 @@ _dispatch_transform_to_utf16(dispatch_data_t data, int32_t byteOrder) DISPATCH_UNUSED dispatch_data_t region, size_t offset, const void *_buffer, size_t size) { const uint8_t *src = _buffer; - size_t i; + size_t i, dest_size; if (offset == 0) { - size_t dest_size = 2 + _dispatch_transform_sizet_mul(size, - sizeof(uint16_t)); + if (os_mul_and_add_overflow(size, sizeof(uint16_t), + sizeof(uint16_t), &dest_size)) { + return (bool)false; + } if (!_dispatch_transform_buffer_new(&buffer, dest_size, 0)) { return (bool)false; } @@ -312,6 +316,7 @@ _dispatch_transform_to_utf16(dispatch_data_t data, int32_t byteOrder) for (i = 0; i < size;) { uint32_t wch = 0; uint8_t byte_size = _dispatch_transform_utf8_length(*src); + size_t next; if (byte_size == 0) { return (bool)false; @@ -336,7 +341,9 @@ _dispatch_transform_to_utf16(dispatch_data_t data, int32_t byteOrder) i += byte_size; } - size_t next = _dispatch_transform_sizet_mul(size - i, sizeof(uint16_t)); + if (os_mul_overflow(size - i, sizeof(uint16_t), &next)) { + return (bool)false; + } if (wch >= 0xd800 && wch < 0xdfff) { // Illegal range (surrogate pair) return (bool)false; @@ -390,8 +397,8 @@ _dispatch_transform_from_utf16(dispatch_data_t data, int32_t byteOrder) const uint16_t *src = _buffer; if (offset == 0) { + size_t dest_size = howmany(size, 3) * 2; // Assume first buffer will be mostly single-byte UTF-8 sequences - size_t dest_size = _dispatch_transform_sizet_mul(size, 2) / 3; if (!_dispatch_transform_buffer_new(&buffer, dest_size, 0)) { return (bool)false; } @@ -418,6 +425,7 @@ _dispatch_transform_from_utf16(dispatch_data_t data, int32_t byteOrder) for (i = 0; i < max; i++) { uint32_t wch = 0; uint16_t ch; + size_t next; if ((i == (max - 1)) && (max > (size / 2))) { // Last byte of an odd sized range @@ -472,7 +480,9 @@ _dispatch_transform_from_utf16(dispatch_data_t data, int32_t byteOrder) wch = ch; } - size_t next = _dispatch_transform_sizet_mul(max - i, 2); + if (os_mul_overflow(max - i, 2, &next)) { + return (bool)false; + } if (wch < 0x80) { if (!_dispatch_transform_buffer_new(&buffer, 1, next)) { return (bool)false; @@ -554,8 +564,7 @@ _dispatch_transform_from_base32_with_table(dispatch_data_t data, bool success = dispatch_data_apply(data, ^( DISPATCH_UNUSED dispatch_data_t region, DISPATCH_UNUSED size_t offset, const void *buffer, size_t size) { - size_t i, dest_size = (size * 5) / 8; - + size_t i, dest_size = howmany(size, 8) * 5; uint8_t *dest = (uint8_t*)malloc(dest_size * sizeof(uint8_t)); uint8_t *ptr = dest; if (dest == NULL) { @@ -632,18 +641,17 @@ _dispatch_transform_from_base32_with_table(dispatch_data_t data, static dispatch_data_t _dispatch_transform_to_base32_with_table(dispatch_data_t data, const unsigned char* table) { - size_t total = dispatch_data_get_size(data); + size_t total = dispatch_data_get_size(data), dest_size; __block size_t count = 0; - if (total > SIZE_T_MAX-4 || ((total+4)/5 > SIZE_T_MAX/8)) { - /* We can't hold larger than size_t in a dispatch_data_t - * and we want to avoid an integer overflow in the next - * calculation. - */ + dest_size = howmany(total, 5); + // + // os_mul_overflow(dest_size, 8, &dest_size) + if (dest_size > SIZE_T_MAX / 8) { return NULL; } + dest_size *= 8; - size_t dest_size = (total + 4) / 5 * 8; uint8_t *dest = (uint8_t*)malloc(dest_size); if (dest == NULL) { return NULL; @@ -799,7 +807,7 @@ _dispatch_transform_from_base64(dispatch_data_t data) bool success = dispatch_data_apply(data, ^( DISPATCH_UNUSED dispatch_data_t region, DISPATCH_UNUSED size_t offset, const void *buffer, size_t size) { - size_t i, dest_size = (size * 3) / 4; + size_t i, dest_size = howmany(size, 4) * 3; uint8_t *dest = (uint8_t*)malloc(dest_size * sizeof(uint8_t)); uint8_t *ptr = dest; @@ -868,18 +876,17 @@ _dispatch_transform_to_base64(dispatch_data_t data) { // RFC 4648 states that we should not linebreak // http://tools.ietf.org/html/rfc4648 - size_t total = dispatch_data_get_size(data); + size_t total = dispatch_data_get_size(data), dest_size; __block size_t count = 0; - if (total > SIZE_T_MAX-2 || ((total+2)/3> SIZE_T_MAX/4)) { - /* We can't hold larger than size_t in a dispatch_data_t - * and we want to avoid an integer overflow in the next - * calculation. - */ + dest_size = howmany(total, 3); + // + // os_mul_overflow(dest_size, 4, &dest_size) + if (dest_size > SIZE_T_MAX / 4) { return NULL; } + dest_size *= 4; - size_t dest_size = (total + 2) / 3 * 4; uint8_t *dest = (uint8_t*)malloc(dest_size); if (dest == NULL) { return NULL; @@ -968,16 +975,16 @@ dispatch_data_create_with_transform(dispatch_data_t data, if (input->type == _DISPATCH_DATA_FORMAT_UTF_ANY) { input = _dispatch_transform_detect_utf(data); if (input == NULL) { - return NULL; + return DISPATCH_BAD_INPUT; } } if ((input->type & ~output->input_mask) != 0) { - return NULL; + return DISPATCH_BAD_INPUT; } if ((output->type & ~input->output_mask) != 0) { - return NULL; + return DISPATCH_BAD_INPUT; } if (dispatch_data_get_size(data) == 0) { @@ -993,7 +1000,7 @@ dispatch_data_create_with_transform(dispatch_data_t data, } if (!temp1) { - return NULL; + return DISPATCH_BAD_INPUT; } dispatch_data_t temp2; diff --git a/src/voucher.c b/src/voucher.c index 6967f19c1..94a293427 100644 --- a/src/voucher.c +++ b/src/voucher.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Apple Inc. All rights reserved. + * Copyright (c) 2013-2016 Apple Inc. All rights reserved. * * @APPLE_APACHE_LICENSE_HEADER_START@ * @@ -20,90 +20,50 @@ #include "internal.h" -#if VOUCHER_USE_MACH_VOUCHER - -#include +#if !defined(VOUCHER_EXPORT_PERSONA_SPI) +#if TARGET_OS_IPHONE +#define VOUCHER_EXPORT_PERSONA_SPI 1 +#else +#define VOUCHER_EXPORT_PERSONA_SPI 0 +#endif +#endif -// -#ifndef VM_MEMORY_GENEALOGY -#define VM_MEMORY_GENEALOGY 78 +#ifndef PERSONA_ID_NONE +#define PERSONA_ID_NONE ((uid_t)-1) #endif -#ifndef VOUCHER_ATM_COLLECT_THRESHOLD -#define VOUCHER_ATM_COLLECT_THRESHOLD 1 +#if !DISPATCH_VARIANT_DYLD_STUB + +#if VOUCHER_USE_MACH_VOUCHER +#if !HAVE_PTHREAD_WORKQUEUE_QOS +#error Unsupported configuration, workqueue QoS support is required #endif -#define VATM_COLLECT_THRESHOLD_VALUE(t) (((t) - 1) * 2) -static uint64_t volatile _voucher_atm_generation; +#include +#include -typedef struct _voucher_atm_s *_voucher_atm_t; +#define MACH_ACTIVITY_ID_RANGE_SIZE 16 +#define MACH_ACTIVITY_ID_MASK ((1ULL << FIREHOSE_ACTIVITY_ID_FLAGS_SHIFT) - 1) +#define FIREHOSE_ACTIVITY_ID_MAKE(aid, flags) \ + FIREHOSE_ACTIVITY_ID_MERGE_FLAGS((aid) & MACH_ACTIVITY_ID_MASK, flags) -static void _voucher_activity_atfork_child(void); -static _voucher_activity_t _voucher_activity_copy_from_mach_voucher( - mach_voucher_t kv, voucher_activity_id_t va_id); -static inline _voucher_activity_t _voucher_activity_retain( - _voucher_activity_t act); -static inline void _voucher_activity_release(_voucher_activity_t act); -static void _voucher_activity_remove(_voucher_activity_t act); -static inline _voucher_atm_t _voucher_atm_retain(_voucher_atm_t vatm); -static inline void _voucher_atm_release(_voucher_atm_t vatm); +static volatile uint64_t _voucher_aid_next; #pragma mark - #pragma mark voucher_t -#if USE_OBJC -OS_OBJECT_OBJC_CLASS_DECL(voucher); -#define VOUCHER_CLASS OS_OBJECT_OBJC_CLASS(voucher) -#else -const _os_object_class_s _voucher_class = { - ._os_obj_xref_dispose = (void(*)(_os_object_t))_voucher_xref_dispose, - ._os_obj_dispose = (void(*)(_os_object_t))_voucher_dispose, -}; -#define VOUCHER_CLASS &_voucher_class +OS_OBJECT_CLASS_DECL(voucher, object); +#if !USE_OBJC +OS_OBJECT_VTABLE_INSTANCE(voucher, + (void (*)(_os_object_t))_voucher_xref_dispose, + (void (*)(_os_object_t))_voucher_dispose); #endif // USE_OBJC - -static const voucher_activity_trace_id_t _voucher_activity_trace_id_release = - (voucher_activity_trace_id_t)voucher_activity_tracepoint_type_release << - _voucher_activity_trace_id_type_shift; -static const unsigned int _voucher_max_activities = 16; - -DISPATCH_ALWAYS_INLINE -static inline void -_voucher_recipes_init(mach_voucher_attr_recipe_data_t *recipes, - mach_voucher_attr_content_size_t bits_size) -{ - static const mach_voucher_attr_recipe_data_t base_recipe = { - .key = MACH_VOUCHER_ATTR_KEY_ALL, - .command = MACH_VOUCHER_ATTR_COPY, - }; - _voucher_recipes_base(recipes) = base_recipe; - static const mach_voucher_attr_recipe_data_t atm_recipe = { - .key = MACH_VOUCHER_ATTR_KEY_ATM, - .command = MACH_VOUCHER_ATTR_COPY, - }; - _voucher_recipes_atm(recipes) = atm_recipe; - static const mach_voucher_attr_recipe_data_t bits_recipe = { - .key = MACH_VOUCHER_ATTR_KEY_USER_DATA, - .command = MACH_VOUCHER_ATTR_USER_DATA_STORE, - }; - _voucher_recipes_bits(recipes) = bits_recipe; - if (!bits_size) return; - _voucher_recipes_bits(recipes).content_size = bits_size; - *_voucher_recipes_magic(recipes) = _voucher_magic_v1; -} +#define VOUCHER_CLASS OS_OBJECT_VTABLE(voucher) static inline voucher_t -_voucher_alloc(unsigned int activities, pthread_priority_t priority, - mach_voucher_attr_recipe_size_t extra) +_voucher_alloc(mach_voucher_attr_recipe_size_t extra) { - if (activities > _voucher_max_activities) { - activities = _voucher_max_activities; - } voucher_t voucher; - size_t voucher_size, recipes_size; - mach_voucher_attr_content_size_t bits_size; - recipes_size = (priority||activities||extra) ? _voucher_recipes_size() : 0; - bits_size = recipes_size ? _voucher_bits_size(activities) : 0; - voucher_size = sizeof(voucher_s) + recipes_size + bits_size + extra; + size_t voucher_size = sizeof(voucher_s) + extra; voucher = (voucher_t)_os_object_alloc_realized(VOUCHER_CLASS, voucher_size); #if VOUCHER_ENABLE_RECIPE_OBJECTS voucher->v_recipe_extra_size = extra; @@ -111,11 +71,6 @@ _voucher_alloc(unsigned int activities, pthread_priority_t priority, #else dispatch_assert(!extra); #endif - voucher->v_has_priority = priority ? 1 : 0; - voucher->v_activities = activities; - if (!recipes_size) return voucher; - _voucher_recipes_init(voucher->v_recipes, bits_size); - *_voucher_priority(voucher) = (_voucher_priority_t)priority; _dispatch_voucher_debug("alloc", voucher); return voucher; } @@ -126,7 +81,7 @@ voucher_create(voucher_recipe_t recipe) { // TODO: capture current activities or current kvoucher ? mach_voucher_attr_recipe_size_t extra = recipe ? recipe->vr_size : 0; - voucher_t voucher = _voucher_alloc(0, 0, extra); + voucher_t voucher = _voucher_alloc(extra); if (extra) { memcpy(_voucher_extra_recipes(voucher), recipe->vr_data, extra); } @@ -134,9 +89,46 @@ voucher_create(voucher_recipe_t recipe) } #endif +DISPATCH_ALWAYS_INLINE +static inline voucher_t +_voucher_clone(const voucher_t ov, voucher_fields_t ignore_fields) +{ + mach_voucher_attr_recipe_size_t extra = 0; + voucher_t v; + + if (ov && !(ignore_fields & VOUCHER_FIELD_EXTRA)) { + extra = _voucher_extra_size(ov); + } + v = _voucher_alloc(extra); + if (ov) { + voucher_fields_t fields = ~ignore_fields; + if ((fields & VOUCHER_FIELD_KVOUCHER) && ov->v_kvoucher) { + voucher_t kvb = ov->v_kvbase ? ov->v_kvbase : ov; + v->v_kvbase = _voucher_retain(kvb); + v->v_kvoucher = kvb->v_kvoucher; + v->v_kv_has_importance = kvb->v_kv_has_importance; + } + if (fields & VOUCHER_FIELD_PRIORITY) { + v->v_priority = ov->v_priority; + } + if (fields & VOUCHER_FIELD_ACTIVITY) { + v->v_activity = ov->v_activity; + v->v_activity_creator = ov->v_activity_creator; + v->v_parent_activity = ov->v_parent_activity; + } + if ((fields & VOUCHER_FIELD_EXTRA) && extra) { + memcpy(_voucher_extra_recipes(v), _voucher_extra_recipes(ov),extra); + } + } + return v; +} + voucher_t voucher_adopt(voucher_t voucher) { + if (voucher == VOUCHER_CURRENT) { + return _voucher_copy(); + } return _voucher_adopt(voucher); } @@ -167,15 +159,18 @@ voucher_release(voucher_t voucher) void _voucher_thread_cleanup(void *voucher) { - _voucher_swap(voucher, NULL); + // when a thread exits and has a voucher left, the kernel + // will get rid of the voucher kernel object that is set on the thread, + // we only need to release the voucher_t object. + _voucher_release(voucher); } DISPATCH_CACHELINE_ALIGN static TAILQ_HEAD(, voucher_s) _vouchers[VL_HASH_SIZE]; #define _vouchers_head(kv) (&_vouchers[VL_HASH((kv))]) -static os_lock_handoff_s _vouchers_lock = OS_LOCK_HANDOFF_INIT; -#define _vouchers_lock_lock() os_lock_lock(&_vouchers_lock) -#define _vouchers_lock_unlock() os_lock_unlock(&_vouchers_lock) +static dispatch_unfair_lock_s _vouchers_lock; +#define _vouchers_lock_lock() _dispatch_unfair_lock_lock(&_vouchers_lock) +#define _vouchers_lock_unlock() _dispatch_unfair_lock_unlock(&_vouchers_lock) static voucher_t _voucher_find_and_retain(mach_voucher_t kv) @@ -185,15 +180,15 @@ _voucher_find_and_retain(mach_voucher_t kv) _vouchers_lock_lock(); TAILQ_FOREACH(v, _vouchers_head(kv), v_list) { if (v->v_ipc_kvoucher == kv) { - int xref_cnt = dispatch_atomic_inc2o(v, os_obj_xref_cnt, relaxed); + int xref_cnt = os_atomic_inc2o(v, os_obj_xref_cnt, relaxed); _dispatch_voucher_debug("retain -> %d", v, xref_cnt + 1); if (slowpath(xref_cnt < 0)) { - _dispatch_voucher_debug("overrelease", v); - DISPATCH_CRASH("Voucher overrelease"); + _dispatch_voucher_debug("over-release", v); + _OS_OBJECT_CLIENT_CRASH("Voucher over-release"); } if (xref_cnt == 0) { // resurrection: raced with _voucher_remove - (void)dispatch_atomic_inc2o(v, os_obj_ref_cnt, relaxed); + (void)os_atomic_inc2o(v, os_obj_ref_cnt, relaxed); } break; } @@ -210,7 +205,7 @@ _voucher_insert(voucher_t v) _vouchers_lock_lock(); if (slowpath(_TAILQ_IS_ENQUEUED(v, v_list))) { _dispatch_voucher_debug("corruption", v); - DISPATCH_CRASH("Voucher corruption"); + DISPATCH_CLIENT_CRASH(v->v_list.tqe_prev, "Voucher corruption"); } TAILQ_INSERT_TAIL(_vouchers_head(kv), v, v_list); _vouchers_lock_unlock(); @@ -224,10 +219,10 @@ _voucher_remove(voucher_t v) _vouchers_lock_lock(); if (slowpath(!kv)) { _dispatch_voucher_debug("corruption", v); - DISPATCH_CRASH("Voucher corruption"); + DISPATCH_CLIENT_CRASH(0, "Voucher corruption"); } // check for resurrection race with _voucher_find_and_retain - if (dispatch_atomic_load2o(v, os_obj_xref_cnt, seq_cst) < 0 && + if (os_atomic_load2o(v, os_obj_xref_cnt, ordered) < 0 && _TAILQ_IS_ENQUEUED(v, v_list)) { TAILQ_REMOVE(_vouchers_head(kv), v, v_list); _TAILQ_MARK_NOT_ENQUEUED(v, v_list); @@ -267,36 +262,29 @@ _voucher_create_mach_voucher(const mach_voucher_attr_recipe_data_t *recipes, return kr; } -#if __has_include() && !defined(VOUCHER_USE_ATTR_BANK) -#include -#define VOUCHER_USE_ATTR_BANK 1 -mach_voucher_t _voucher_default_task_mach_voucher; -#endif - void _voucher_task_mach_voucher_init(void* ctxt DISPATCH_UNUSED) { -#if VOUCHER_USE_ATTR_BANK kern_return_t kr; - mach_voucher_t kv; + mach_voucher_t kv = MACH_VOUCHER_NULL; +#if !VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER static const mach_voucher_attr_recipe_data_t task_create_recipe = { .key = MACH_VOUCHER_ATTR_KEY_BANK, .command = MACH_VOUCHER_ATTR_BANK_CREATE, }; kr = _voucher_create_mach_voucher(&task_create_recipe, sizeof(task_create_recipe), &kv); - if (dispatch_assume_zero(kr)) { - DISPATCH_CLIENT_CRASH("Could not create task mach voucher"); + if (slowpath(kr)) { + DISPATCH_CLIENT_CRASH(kr, "Could not create task mach voucher"); } _voucher_default_task_mach_voucher = kv; - _voucher_task_mach_voucher = kv; #endif + _voucher_task_mach_voucher = kv; } void voucher_replace_default_voucher(void) { -#if VOUCHER_USE_ATTR_BANK (void)_voucher_get_task_mach_voucher(); // initalize task mach voucher mach_voucher_t kv, tkv = MACH_VOUCHER_NULL; voucher_t v = _voucher_get(); @@ -315,28 +303,101 @@ voucher_replace_default_voucher(void) } } if (!tkv) tkv = _voucher_default_task_mach_voucher; - kv = dispatch_atomic_xchg(&_voucher_task_mach_voucher, tkv, relaxed); + kv = os_atomic_xchg(&_voucher_task_mach_voucher, tkv, relaxed); if (kv && kv != _voucher_default_task_mach_voucher) { _voucher_dealloc_mach_voucher(kv); } _dispatch_voucher_debug("kvoucher[0x%08x] replace default voucher", v, tkv); -#endif } -static inline _voucher_atm_t -_voucher_get_atm(voucher_t voucher) -{ - _voucher_atm_t vatm; - vatm = voucher && voucher->v_atm ? voucher->v_atm : _voucher_task_atm; - return vatm; -} +#define _voucher_mach_recipe_size(payload_size) \ + (sizeof(mach_voucher_attr_recipe_data_t) + (payload_size)) + +#if VOUCHER_USE_MACH_VOUCHER_PRIORITY +#define _voucher_mach_recipe_alloca(v) ((mach_voucher_attr_recipe_t)alloca(\ + _voucher_mach_recipe_size(0) + \ + _voucher_mach_recipe_size(sizeof(ipc_pthread_priority_value_t)) + \ + _voucher_mach_recipe_size(sizeof(_voucher_mach_udata_s)) + \ + _voucher_extra_size(v))) +#else +#define _voucher_mach_recipe_alloca(v) ((mach_voucher_attr_recipe_t)alloca(\ + _voucher_mach_recipe_size(0) + \ + _voucher_mach_recipe_size(sizeof(_voucher_mach_udata_s)) + \ + _voucher_extra_size(v))) +#endif -static inline mach_voucher_t -_voucher_get_atm_mach_voucher(voucher_t voucher) +DISPATCH_ALWAYS_INLINE +static inline mach_voucher_attr_recipe_size_t +_voucher_mach_recipe_init(mach_voucher_attr_recipe_t mvar_buf, voucher_s *v, + mach_voucher_t kvb, pthread_priority_t pp) { - _voucher_atm_t vatm = _voucher_get_atm(voucher); - mach_voucher_t kv = vatm ? vatm->vatm_kvoucher : MACH_VOUCHER_NULL; - return kv; + mach_voucher_attr_recipe_size_t extra = _voucher_extra_size(v); + mach_voucher_attr_recipe_size_t size = 0; + + // normalize to just the QoS class and 0 relative priority + pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; + if (pp) pp |= _PTHREAD_PRIORITY_PRIORITY_MASK; + + *mvar_buf++ = (mach_voucher_attr_recipe_data_t){ + .key = MACH_VOUCHER_ATTR_KEY_ALL, + .command = MACH_VOUCHER_ATTR_COPY, + .previous_voucher = kvb, + }; + size += _voucher_mach_recipe_size(0); + +#if VOUCHER_USE_MACH_VOUCHER_PRIORITY + if (pp) { + ipc_pthread_priority_value_t value = (ipc_pthread_priority_value_t)pp; + *mvar_buf++ = (mach_voucher_attr_recipe_data_t){ + .key = MACH_VOUCHER_ATTR_KEY_PTHPRIORITY, + .command = MACH_VOUCHER_ATTR_PTHPRIORITY_CREATE, + .content_size = sizeof(value), + }; + mvar_buf = _dispatch_memappend(mvar_buf, &value); + size += _voucher_mach_recipe_size(sizeof(value)); + } +#endif // VOUCHER_USE_MACH_VOUCHER_PRIORITY + + if ((v && v->v_activity) || pp) { + _voucher_mach_udata_s *udata_buf; + unsigned udata_size = 0; + + if (v && v->v_activity) { + udata_size = offsetof(_voucher_mach_udata_s, _vmu_after_activity); + } else { + udata_size = offsetof(_voucher_mach_udata_s, _vmu_after_priority); + } + *mvar_buf = (mach_voucher_attr_recipe_data_t){ + .key = MACH_VOUCHER_ATTR_KEY_USER_DATA, + .command = MACH_VOUCHER_ATTR_USER_DATA_STORE, + .content_size = udata_size, + }; + udata_buf = (_voucher_mach_udata_s *)(mvar_buf->content); + + if (v && v->v_activity) { + *udata_buf = (_voucher_mach_udata_s){ + .vmu_magic = VOUCHER_MAGIC_V3, + .vmu_priority = (_voucher_priority_t)pp, + .vmu_activity = v->v_activity, + .vmu_activity_pid = v->v_activity_creator, + .vmu_parent_activity = v->v_parent_activity, + }; + } else { + *udata_buf = (_voucher_mach_udata_s){ + .vmu_magic = VOUCHER_MAGIC_V3, + .vmu_priority = (_voucher_priority_t)pp, + }; + } + + mvar_buf = (mach_voucher_attr_recipe_t)(mvar_buf->content + udata_size); + size += _voucher_mach_recipe_size(udata_size); + } + + if (extra) { + memcpy(mvar_buf, _voucher_extra_recipes(v), extra); + size += extra; + } + return size; } mach_voucher_t @@ -346,22 +407,22 @@ _voucher_get_mach_voucher(voucher_t voucher) if (voucher->v_ipc_kvoucher) return voucher->v_ipc_kvoucher; mach_voucher_t kvb = voucher->v_kvoucher; if (!kvb) kvb = _voucher_get_task_mach_voucher(); - if (!voucher->v_has_priority && !voucher->v_activities && + if (!voucher->v_activity && !voucher->v_priority && !_voucher_extra_size(voucher)) { return kvb; } - kern_return_t kr; + + mach_voucher_attr_recipe_t mvar = _voucher_mach_recipe_alloca(voucher); + mach_voucher_attr_recipe_size_t size; mach_voucher_t kv, kvo; - _voucher_base_recipe(voucher).previous_voucher = kvb; - _voucher_atm_recipe(voucher).previous_voucher = - _voucher_get_atm_mach_voucher(voucher); - kr = _voucher_create_mach_voucher(voucher->v_recipes, - _voucher_recipes_size() + _voucher_extra_size(voucher) + - _voucher_bits_recipe(voucher).content_size, &kv); + kern_return_t kr; + + size = _voucher_mach_recipe_init(mvar, voucher, kvb, voucher->v_priority); + kr = _voucher_create_mach_voucher(mvar, size, &kv); if (dispatch_assume_zero(kr) || !kv){ return MACH_VOUCHER_NULL; } - if (!dispatch_atomic_cmpxchgv2o(voucher, v_ipc_kvoucher, MACH_VOUCHER_NULL, + if (!os_atomic_cmpxchgv2o(voucher, v_ipc_kvoucher, MACH_VOUCHER_NULL, kv, &kvo, relaxed)) { _voucher_dealloc_mach_voucher(kv); kv = kvo; @@ -386,25 +447,12 @@ _voucher_create_mach_voucher_with_priority(voucher_t voucher, kern_return_t kr; mach_voucher_t kv, kvb = voucher ? voucher->v_kvoucher : MACH_VOUCHER_NULL; if (!kvb) kvb = _voucher_get_task_mach_voucher(); - mach_voucher_attr_recipe_data_t *recipes; - size_t recipes_size = _voucher_recipes_size(); - if (voucher && (voucher->v_has_priority || voucher->v_activities || - _voucher_extra_size(voucher))) { - recipes_size += _voucher_bits_recipe(voucher).content_size + - _voucher_extra_size(voucher); - recipes = alloca(recipes_size); - memcpy(recipes, voucher->v_recipes, recipes_size); - _voucher_recipes_atm(recipes).previous_voucher = - _voucher_get_atm_mach_voucher(voucher); - } else { - mach_voucher_attr_content_size_t bits_size = _voucher_bits_size(0); - recipes_size += bits_size; - recipes = alloca(recipes_size); - _voucher_recipes_init(recipes, bits_size); - } - _voucher_recipes_base(recipes).previous_voucher = kvb; - *_voucher_recipes_priority(recipes) = (_voucher_priority_t)priority; - kr = _voucher_create_mach_voucher(recipes, recipes_size, &kv); + + mach_voucher_attr_recipe_t mvar = _voucher_mach_recipe_alloca(voucher); + mach_voucher_attr_recipe_size_t size; + + size = _voucher_mach_recipe_init(mvar, voucher, kvb, priority); + kr = _voucher_create_mach_voucher(mvar, size, &kv); if (dispatch_assume_zero(kr) || !kv){ return MACH_VOUCHER_NULL; } @@ -414,102 +462,120 @@ _voucher_create_mach_voucher_with_priority(voucher_t voucher, } static voucher_t -_voucher_create_with_mach_voucher(mach_voucher_t kv) +_voucher_create_with_mach_voucher(mach_voucher_t kv, mach_msg_bits_t msgh_bits) { if (!kv) return NULL; kern_return_t kr; - mach_voucher_t rkv; mach_voucher_attr_recipe_t vr; size_t vr_size; mach_voucher_attr_recipe_size_t kvr_size = 0; + mach_voucher_attr_content_size_t udata_sz = 0; + _voucher_mach_udata_s *udata = NULL; +#if !VOUCHER_USE_BANK_AUTOREDEEM + mach_voucher_t rkv; const mach_voucher_attr_recipe_data_t redeem_recipe[] = { [0] = { .key = MACH_VOUCHER_ATTR_KEY_ALL, .command = MACH_VOUCHER_ATTR_COPY, .previous_voucher = kv, }, -#if VOUCHER_USE_ATTR_BANK [1] = { .key = MACH_VOUCHER_ATTR_KEY_BANK, .command = MACH_VOUCHER_ATTR_REDEEM, }, -#endif }; kr = _voucher_create_mach_voucher(redeem_recipe, sizeof(redeem_recipe), &rkv); if (!dispatch_assume_zero(kr)) { _voucher_dealloc_mach_voucher(kv); + _dispatch_kvoucher_debug("redeemed from 0x%08x", rkv, kv); + kv = rkv; } else { _dispatch_voucher_debug_machport(kv); - rkv = kv; } - voucher_t v = _voucher_find_and_retain(rkv); +#endif + voucher_t v = _voucher_find_and_retain(kv); if (v) { - _dispatch_voucher_debug("kvoucher[0x%08x] find with 0x%08x", v, rkv,kv); - _voucher_dealloc_mach_voucher(rkv); + _dispatch_voucher_debug("kvoucher[0x%08x] found", v, kv); + _voucher_dealloc_mach_voucher(kv); return v; } - vr_size = sizeof(*vr) + _voucher_bits_size(_voucher_max_activities); + vr_size = sizeof(*vr) + sizeof(_voucher_mach_udata_s); vr = alloca(vr_size); - if (rkv) { + if (kv) { kvr_size = (mach_voucher_attr_recipe_size_t)vr_size; - kr = mach_voucher_extract_attr_recipe(rkv, + kr = mach_voucher_extract_attr_recipe(kv, MACH_VOUCHER_ATTR_KEY_USER_DATA, (void*)vr, &kvr_size); DISPATCH_VERIFY_MIG(kr); - if (dispatch_assume_zero(kr)) kvr_size = 0; - } - mach_voucher_attr_content_size_t content_size = vr->content_size; - uint8_t *content = vr->content; - bool valid = false, has_priority = false; - unsigned int activities = 0; - if (kvr_size >= sizeof(*vr) + sizeof(_voucher_magic_t)) { - valid = (*(_voucher_magic_t*)content == _voucher_magic_v1); - content += sizeof(_voucher_magic_t); - content_size -= sizeof(_voucher_magic_t); - } - if (valid) { - has_priority = (content_size >= sizeof(_voucher_priority_t)); - activities = has_priority ? (content_size - sizeof(_voucher_priority_t)) - / sizeof(voucher_activity_id_t) : 0; - } - pthread_priority_t priority = 0; - if (has_priority) { - priority = (pthread_priority_t)*(_voucher_priority_t*)content; - content += sizeof(_voucher_priority_t); - content_size -= sizeof(_voucher_priority_t); - } - voucher_activity_id_t va_id = 0, va_base_id = 0; - _voucher_activity_t act = NULL; - _voucher_atm_t vatm = NULL; - if (activities) { - va_id = *(voucher_activity_id_t*)content; - act = _voucher_activity_copy_from_mach_voucher(rkv, va_id); - if (!act && _voucher_activity_default) { - activities++; - // default to _voucher_activity_default base activity - va_base_id = _voucher_activity_default->va_id; - } else if (act && act->va_id != va_id) { - activities++; - va_base_id = act->va_id; + if (!dispatch_assume_zero(kr) && kvr_size >= sizeof(*vr)) { + udata_sz = vr->content_size; + udata = (_voucher_mach_udata_s*)vr->content; + dispatch_assume(udata_sz >= sizeof(_voucher_magic_t)); } - if (act) { - vatm = _voucher_atm_retain(act->va_atm); + } + vr = NULL; + + v = _voucher_alloc(0); + v->v_ipc_kvoucher = v->v_kvoucher = kv; + v->v_kv_has_importance = !!(msgh_bits & MACH_MSGH_BITS_RAISEIMP); + + if (udata_sz >= offsetof(_voucher_mach_udata_s,_vmu_after_priority)){ + if (udata->vmu_magic == VOUCHER_MAGIC_V3) { + v->v_priority = udata->vmu_priority; } } - v = _voucher_alloc(activities, priority, 0); - v->v_atm = vatm; - v->v_activity = act; - voucher_activity_id_t *activity_ids = _voucher_activity_ids(v); - if (activities && va_base_id) { - *activity_ids++ = va_base_id; - activities--; + bool remove_kv_userdata = false; + if (udata_sz >= offsetof(_voucher_mach_udata_s, _vmu_after_activity)) { +#if !RDAR_25050791 + remove_kv_userdata = true; +#endif + if (udata->vmu_magic == VOUCHER_MAGIC_V3 && udata->vmu_activity) { + v->v_activity = udata->vmu_activity; + v->v_activity_creator = udata->vmu_activity_pid; + v->v_parent_activity = udata->vmu_parent_activity; + } } - if (activities) { - memcpy(activity_ids, content, content_size); + + if (remove_kv_userdata) { + mach_voucher_t nkv = MACH_VOUCHER_NULL; + const mach_voucher_attr_recipe_data_t remove_userdata_recipe[] = { + [0] = { + .key = MACH_VOUCHER_ATTR_KEY_ALL, + .command = MACH_VOUCHER_ATTR_COPY, + .previous_voucher = kv, + }, + [1] = { + .key = MACH_VOUCHER_ATTR_KEY_USER_DATA, + .command = MACH_VOUCHER_ATTR_REMOVE, + }, +#if VOUCHER_USE_MACH_VOUCHER_PRIORITY + [2] = { + .key = MACH_VOUCHER_ATTR_KEY_PTHPRIORITY, + .command = MACH_VOUCHER_ATTR_REMOVE, + }, +#endif + }; + mach_voucher_attr_recipe_size_t size = sizeof(remove_userdata_recipe); + + kr = _voucher_create_mach_voucher(remove_userdata_recipe, size, &nkv); + if (!dispatch_assume_zero(kr)) { + _dispatch_voucher_debug("kvoucher[0x%08x] udata removal " + "(created 0x%08x)", v, kv, nkv); + v->v_ipc_kvoucher = MACH_VOUCHER_NULL; + v->v_kvoucher = nkv; + v->v_kvbase = _voucher_find_and_retain(nkv); + if (v->v_kvbase) { + _voucher_dealloc_mach_voucher(nkv); // borrow base reference + } + _voucher_dealloc_mach_voucher(kv); + kv = nkv; + } else { + _dispatch_voucher_debug_machport(kv); + } } - v->v_ipc_kvoucher = v->v_kvoucher = rkv; + _voucher_insert(v); - _dispatch_voucher_debug("kvoucher[0x%08x] create with 0x%08x", v, rkv, kv); + _dispatch_voucher_debug("kvoucher[0x%08x] create", v, kv); return v; } @@ -522,24 +588,18 @@ _voucher_create_with_priority_and_mach_voucher(voucher_t ov, return ov ? _voucher_retain(ov) : NULL; } voucher_t v = _voucher_find_and_retain(kv); + voucher_fields_t ignore_fields = VOUCHER_FIELD_PRIORITY; + if (v) { _dispatch_voucher_debug("kvoucher[0x%08x] find", v, kv); _voucher_dealloc_mach_voucher(kv); return v; } - unsigned int activities = ov ? ov->v_activities : 0; - mach_voucher_attr_recipe_size_t extra = ov ? _voucher_extra_size(ov) : 0; - v = _voucher_alloc(activities, priority, extra); - if (extra) { - memcpy(_voucher_extra_recipes(v), _voucher_extra_recipes(ov), extra); - } - if (activities) { - if (ov->v_activity) { - v->v_activity = _voucher_activity_retain(ov->v_activity); - v->v_atm = _voucher_atm_retain(ov->v_atm); - } - memcpy(_voucher_activity_ids(v), _voucher_activity_ids(ov), - activities * sizeof(voucher_activity_id_t)); + + if (kv) ignore_fields |= VOUCHER_FIELD_KVOUCHER; + v = _voucher_clone(ov, ignore_fields); + if (priority) { + v->v_priority = (_voucher_priority_t)priority; } if (kv) { v->v_ipc_kvoucher = v->v_kvoucher = kv; @@ -547,10 +607,6 @@ _voucher_create_with_priority_and_mach_voucher(voucher_t ov, _dispatch_voucher_debug("kvoucher[0x%08x] create with priority from " "voucher[%p]", v, kv, ov); _dispatch_voucher_debug_machport(kv); - } else if (ov && ov->v_kvoucher) { - voucher_t kvb = ov->v_kvbase ? ov->v_kvbase : ov; - v->v_kvbase = _voucher_retain(kvb); - v->v_kvoucher = kvb->v_kvoucher; } return v; } @@ -561,8 +617,7 @@ _voucher_create_without_importance(voucher_t ov) // Nothing to do unless the old voucher has a kernel voucher. If it // doesn't, it can't have any importance, now or in the future. if (!ov) return NULL; - // TODO: 17487167: track presence of importance attribute - if (!ov->v_kvoucher) return _voucher_retain(ov); + if (!ov->v_kvoucher || !ov->v_kv_has_importance) return _voucher_retain(ov); kern_return_t kr; mach_voucher_t kv, okv; // Copy kernel voucher, removing importance. @@ -596,22 +651,8 @@ _voucher_create_without_importance(voucher_t ov) return v; } voucher_t kvbase = v; - // Copy userspace contents - unsigned int activities = ov->v_activities; - pthread_priority_t priority = _voucher_get_priority(ov); - mach_voucher_attr_recipe_size_t extra = _voucher_extra_size(ov); - v = _voucher_alloc(activities, priority, extra); - if (extra) { - memcpy(_voucher_extra_recipes(v), _voucher_extra_recipes(ov), extra); - } - if (activities) { - if (ov->v_activity) { - v->v_activity = _voucher_activity_retain(ov->v_activity); - v->v_atm = _voucher_atm_retain(ov->v_atm); - } - memcpy(_voucher_activity_ids(v), _voucher_activity_ids(ov), - activities * sizeof(voucher_activity_id_t)); - } + voucher_fields_t ignore_fields = VOUCHER_FIELD_KVOUCHER; + v = _voucher_clone(ov, ignore_fields); v->v_kvoucher = kv; if (ov->v_ipc_kvoucher) { v->v_ipc_kvoucher = kv; @@ -636,7 +677,6 @@ _voucher_create_accounting_voucher(voucher_t ov) kern_return_t kr = KERN_SUCCESS; mach_voucher_t okv, kv = MACH_VOUCHER_NULL; okv = ov->v_ipc_kvoucher ? ov->v_ipc_kvoucher : ov->v_kvoucher; -#if VOUCHER_USE_ATTR_BANK const mach_voucher_attr_recipe_data_t accounting_copy_recipe = { .key = MACH_VOUCHER_ATTR_KEY_BANK, .command = MACH_VOUCHER_ATTR_COPY, @@ -644,7 +684,6 @@ _voucher_create_accounting_voucher(voucher_t ov) }; kr = _voucher_create_mach_voucher(&accounting_copy_recipe, sizeof(accounting_copy_recipe), &kv); -#endif if (dispatch_assume_zero(kr) || !kv){ return NULL; } @@ -655,7 +694,7 @@ _voucher_create_accounting_voucher(voucher_t ov) _voucher_dealloc_mach_voucher(kv); return v; } - v = _voucher_alloc(0, 0, 0); + v = _voucher_alloc(0); v->v_ipc_kvoucher = v->v_kvoucher = kv; if (kv == okv) { v->v_kvbase = _voucher_retain(ov); @@ -670,20 +709,15 @@ _voucher_create_accounting_voucher(voucher_t ov) voucher_t voucher_create_with_mach_msg(mach_msg_header_t *msg) { - voucher_t v = _voucher_create_with_mach_voucher(_voucher_mach_msg_get(msg)); - _voucher_activity_trace_msg(v, msg, receive); - return v; + mach_msg_bits_t msgh_bits; + mach_voucher_t kv = _voucher_mach_msg_get(msg, &msgh_bits); + return _voucher_create_with_mach_voucher(kv, msgh_bits); } -#ifndef MACH_VOUCHER_IMPORTANCE_ATTR_DROP_EXTERNAL -#define MACH_VOUCHER_IMPORTANCE_ATTR_DROP_EXTERNAL 2 -#endif - void voucher_decrement_importance_count4CF(voucher_t v) { - if (!v || !v->v_kvoucher) return; - // TODO: 17487167: track presence of importance attribute + if (!v || !v->v_kvoucher || !v->v_kv_has_importance) return; kern_return_t kr; mach_voucher_t kv = v->v_ipc_kvoucher ? v->v_ipc_kvoucher : v->v_kvoucher; uint32_t dec = 1; @@ -700,14 +734,13 @@ voucher_decrement_importance_count4CF(voucher_t v) MACH_VOUCHER_IMPORTANCE_ATTR_DROP_EXTERNAL, kvc_in, kvc_in_size, kvc_out, &kvc_out_size); DISPATCH_VERIFY_MIG(kr); + if (kr == KERN_INVALID_TASK) return; // non-denap receiver rdar://25643185 #if DISPATCH_DEBUG _dispatch_voucher_debug("kvoucher[0x%08x] decrement importance count to %u:" " %s - 0x%x", v, kv, count, mach_error_string(kr), kr); #endif - if (kr != KERN_INVALID_ARGUMENT && - dispatch_assume_zero(kr) == KERN_FAILURE) { - // TODO: 17487167: skip KERN_INVALID_ARGUMENT check - DISPATCH_CLIENT_CRASH("Voucher importance count underflow"); + if (slowpath(dispatch_assume_zero(kr) == KERN_FAILURE)) { + DISPATCH_CLIENT_CRASH(kr, "Voucher importance count underflow"); } } @@ -733,7 +766,7 @@ _voucher_dispose(voucher_t voucher) _dispatch_voucher_debug("dispose", voucher); if (slowpath(_TAILQ_IS_ENQUEUED(voucher, v_list))) { _dispatch_voucher_debug("corruption", voucher); - DISPATCH_CRASH("Voucher corruption"); + DISPATCH_CLIENT_CRASH(voucher->v_list.tqe_prev, "Voucher corruption"); } voucher->v_list.tqe_next = DISPATCH_OBJECT_LISTLESS; if (voucher->v_ipc_kvoucher) { @@ -752,16 +785,10 @@ _voucher_dispose(voucher_t voucher) _voucher_release(voucher->v_kvbase); voucher->v_kvbase = NULL; } - if (voucher->v_activity) { - _voucher_activity_release(voucher->v_activity); - voucher->v_activity = NULL; - } - if (voucher->v_atm) { - _voucher_atm_release(voucher->v_atm); - voucher->v_atm = NULL; - } - voucher->v_has_priority = 0; - voucher->v_activities = 0; + voucher->v_activity = 0; + voucher->v_activity_creator = 0; + voucher->v_parent_activity = 0; + voucher->v_priority = 0; #if VOUCHER_ENABLE_RECIPE_OBJECTS voucher->v_recipe_extra_size = 0; voucher->v_recipe_extra_offset = 0; @@ -769,16 +796,141 @@ _voucher_dispose(voucher_t voucher) return _os_object_dealloc((_os_object_t)voucher); } +static void +_voucher_activity_debug_channel_barrier_nop(void *ctxt DISPATCH_UNUSED) +{ +} + +void +_voucher_activity_debug_channel_init(void) +{ + dispatch_mach_handler_function_t handler = NULL; + + if (_voucher_libtrace_hooks && _voucher_libtrace_hooks->vah_version >= 2) { + handler = _voucher_libtrace_hooks->vah_debug_channel_handler; + } + + if (!handler) return; + + dispatch_mach_t dm; + mach_port_t dbgp; + kern_return_t kr; + + kr = task_get_debug_control_port(mach_task_self(), &dbgp); + DISPATCH_VERIFY_MIG(kr); + if (kr) { + DISPATCH_CLIENT_CRASH(kr, "Couldn't get debug control port"); + } + if (dbgp) { + dm = dispatch_mach_create_f("com.apple.debug-channel", + DISPATCH_TARGET_QUEUE_DEFAULT, NULL, handler); + dispatch_mach_connect(dm, dbgp, MACH_PORT_NULL, NULL); + // will force the DISPATCH_MACH_CONNECTED event + dispatch_mach_send_barrier_f(dm, NULL, + _voucher_activity_debug_channel_barrier_nop); + _voucher_activity_debug_channel = dm; + } +} + void _voucher_atfork_child(void) { - _voucher_activity_atfork_child(); _dispatch_thread_setspecific(dispatch_voucher_key, NULL); _voucher_task_mach_voucher_pred = 0; _voucher_task_mach_voucher = MACH_VOUCHER_NULL; +#if !VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER + _voucher_default_task_mach_voucher = MACH_PORT_NULL; +#endif + _voucher_aid_next = 0; + _firehose_task_buffer_pred = 0; + _firehose_task_buffer = NULL; // firehose buffer is VM_INHERIT_NONE +} + +#if VOUCHER_EXPORT_PERSONA_SPI +#if VOUCHER_USE_PERSONA +static kern_return_t +_voucher_get_current_persona_token(struct persona_token *token) +{ + kern_return_t kr = KERN_FAILURE; + voucher_t v = _voucher_get(); + + if (v && v->v_kvoucher) { + mach_voucher_t kv = v->v_ipc_kvoucher ?: v->v_kvoucher; + mach_voucher_attr_content_t kvc_in = NULL; + mach_voucher_attr_content_size_t kvc_in_size = 0; + mach_voucher_attr_content_t kvc_out = + (mach_voucher_attr_content_t)token; + mach_voucher_attr_content_size_t kvc_out_size = sizeof(*token); + + kr = mach_voucher_attr_command(kv, MACH_VOUCHER_ATTR_KEY_BANK, + BANK_PERSONA_TOKEN, kvc_in, kvc_in_size, + kvc_out, &kvc_out_size); + if (kr != KERN_NOT_SUPPORTED + // Voucher doesn't have a PERSONA_TOKEN + && kr != KERN_INVALID_VALUE + // Kernel doesn't understand BANK_PERSONA_TOKEN + && kr != KERN_INVALID_ARGUMENT) { + (void)dispatch_assume_zero(kr); + } + } + return kr; +} +#endif + +uid_t +voucher_get_current_persona(void) +{ + uid_t persona_id = PERSONA_ID_NONE; + +#if VOUCHER_USE_PERSONA + struct persona_token token; + int err; + + if (_voucher_get_current_persona_token(&token) == KERN_SUCCESS) { + return token.originator.persona_id; + } + + // falling back to the process persona if there is no adopted voucher + if (kpersona_get(&persona_id) < 0) { + err = errno; + if (err != ESRCH) { + (void)dispatch_assume_zero(err); + } + } +#endif + return persona_id; +} + +int +voucher_get_current_persona_originator_info(struct proc_persona_info *persona_info) +{ +#if VOUCHER_USE_PERSONA + struct persona_token token; + if (_voucher_get_current_persona_token(&token) == KERN_SUCCESS) { + *persona_info = token.originator; + return 0; + } +#else + (void)persona_info; +#endif + return -1; +} - // TODO: voucher/activity inheritance on fork ? +int +voucher_get_current_persona_proximate_info(struct proc_persona_info *persona_info) +{ +#if VOUCHER_USE_PERSONA + struct persona_token token; + if (_voucher_get_current_persona_token(&token) == KERN_SUCCESS) { + *persona_info = token.proximate; + return 0; + } +#else + (void)persona_info; +#endif + return -1; } +#endif #pragma mark - #pragma mark _voucher_init @@ -786,10 +938,7 @@ _voucher_atfork_child(void) boolean_t voucher_mach_msg_set(mach_msg_header_t *msg) { - voucher_t v = _voucher_get(); - bool clear_voucher = _voucher_mach_msg_set(msg, v); - if (clear_voucher) _voucher_activity_trace_msg(v, msg, send); - return clear_voucher; + return _voucher_mach_msg_set(msg, _voucher_get()); } void @@ -801,10 +950,10 @@ voucher_mach_msg_clear(mach_msg_header_t *msg) voucher_mach_msg_state_t voucher_mach_msg_adopt(mach_msg_header_t *msg) { - mach_voucher_t kv = _voucher_mach_msg_get(msg); + mach_msg_bits_t msgh_bits; + mach_voucher_t kv = _voucher_mach_msg_get(msg, &msgh_bits); if (!kv) return VOUCHER_MACH_MSG_STATE_UNCHANGED; - voucher_t v = _voucher_create_with_mach_voucher(kv); - _voucher_activity_trace_msg(v, msg, receive); + voucher_t v = _voucher_create_with_mach_voucher(kv, msgh_bits); return (voucher_mach_msg_state_t)_voucher_adopt(v); } @@ -837,1470 +986,383 @@ _voucher_libkernel_init(void) #define _voucher_libkernel_init() #endif +void +voucher_activity_initialize_4libtrace(voucher_activity_hooks_t hooks) +{ + if (!os_atomic_cmpxchg(&_voucher_libtrace_hooks, NULL, + hooks, relaxed)) { + DISPATCH_CLIENT_CRASH(_voucher_libtrace_hooks, + "voucher_activity_initialize_4libtrace called twice"); + } +} + void _voucher_init(void) { _voucher_libkernel_init(); - char *e; unsigned int i; for (i = 0; i < VL_HASH_SIZE; i++) { TAILQ_INIT(&_vouchers[i]); } - voucher_activity_mode_t mode; - mode = DISPATCH_DEBUG ? voucher_activity_mode_debug - : voucher_activity_mode_release; - e = getenv("OS_ACTIVITY_MODE"); - if (e) { - if (strcmp(e, "release") == 0) { - mode = voucher_activity_mode_release; - } else if (strcmp(e, "debug") == 0) { - mode = voucher_activity_mode_debug; - } else if (strcmp(e, "stream") == 0) { - mode = voucher_activity_mode_stream; - } else if (strcmp(e, "disable") == 0) { - mode = voucher_activity_mode_disable; - } - } - _voucher_activity_mode = mode; - if (_voucher_activity_disabled()) return; - - // default task activity - bool default_task_activity = DISPATCH_DEBUG; - e = getenv("LIBDISPATCH_DEFAULT_TASK_ACTIVITY"); - if (e) default_task_activity = atoi(e); - if (default_task_activity) { - (void)voucher_activity_start(_voucher_activity_trace_id_release, 0); - } } #pragma mark - -#pragma mark _voucher_activity_lock_s +#pragma mark voucher_activity_t -DISPATCH_ALWAYS_INLINE -static inline void -_voucher_activity_lock_init(_voucher_activity_lock_s *lock) { - static const os_lock_handoff_s _os_lock_handoff_init = OS_LOCK_HANDOFF_INIT; - *lock = _os_lock_handoff_init; -} - -DISPATCH_ALWAYS_INLINE -static inline void -_voucher_activity_lock_lock(_voucher_activity_lock_s *lock) { - return os_lock_lock(lock); -} - -DISPATCH_ALWAYS_INLINE -static inline void -_voucher_activity_lock_unlock(_voucher_activity_lock_s *lock) { - return os_lock_unlock(lock); -} - -#pragma mark - -#pragma mark _voucher_activity_heap - -#if __has_extension(c_static_assert) -_Static_assert(sizeof(struct _voucher_activity_tracepoint_s) == 64, - "Tracepoint too large"); -_Static_assert(sizeof(struct _voucher_activity_buffer_header_s) <= - sizeof(struct _voucher_activity_tracepoint_s), - "Buffer header too large"); -#if __LP64__ -_Static_assert(offsetof(struct _voucher_activity_s, va_buffers_lock) % 64 == 0, - "Bad activity padding"); -_Static_assert(sizeof(struct _voucher_atm_s) <= 128, - "ATM too large"); -#else -_Static_assert(sizeof(struct _voucher_atm_s) <= 64, - "ATM too large"); -#endif -_Static_assert(sizeof(_voucher_activity_buffer_t) == - sizeof(struct {char x[_voucher_activity_buffer_size];}), - "Buffer too large"); -_Static_assert(sizeof(struct _voucher_activity_metadata_s) <= - sizeof(struct _voucher_activity_metadata_opaque_s), - "Metadata too large"); -_Static_assert(sizeof(_voucher_activity_bitmap_t) % 64 == 0, - "Bad metadata bitmap size"); -#endif - -#define va_buffers_lock(va) (&(va)->va_buffers_lock) -#define vatm_activities(vatm) (&(vatm)->vatm_activities) -#define vam_atms_lock() (&_voucher_activity_heap->vam_atms_lock) -#define vam_activities_lock() (&_voucher_activity_heap->vam_activities_lock) -#define vam_atms(hash) (&_voucher_activity_heap->vam_atms[hash]) -#define vam_activities(hash) (&_voucher_activity_heap->vam_activities[hash]) -#define vam_buffer_bitmap() (_voucher_activity_heap->vam_buffer_bitmap) -#define vam_pressure_locked_bitmap() \ - (_voucher_activity_heap->vam_pressure_locked_bitmap) -#define vam_buffer(i) ((void*)((char*)_voucher_activity_heap + \ - (i) * _voucher_activity_buffer_size)) - -static _voucher_activity_t _voucher_activity_create_with_atm( - _voucher_atm_t vatm, voucher_activity_id_t va_id, - voucher_activity_trace_id_t trace_id, uint64_t location, - _voucher_activity_buffer_header_t buffer); -static _voucher_atm_t _voucher_atm_create(mach_voucher_t kv, atm_aid_t atm_id); -static void _voucher_activity_firehose_wait(_voucher_activity_t act, - _voucher_activity_buffer_header_t buffer); - -DISPATCH_ALWAYS_INLINE -static inline uint32_t -_voucher_default_activity_buffer_limit() +DISPATCH_NOINLINE +static uint64_t +_voucher_activity_id_allocate_slow(uint64_t aid) { -#if 0 // FIXME: tune buffer chain sizes - switch (_voucher_activity_mode) { - case voucher_activity_mode_debug: - case voucher_activity_mode_stream: - // High-profile modes: Default activity can use 1/32nd of the heap - // (twice as much as non-default activities) - return MAX(_voucher_activity_buffers_per_heap / 32, 3) - 1; + kern_return_t kr; + uint64_t next; + + kr = mach_generate_activity_id(mach_task_self(), 1, &next); + if (unlikely(kr)) { + DISPATCH_CLIENT_CRASH(kr, "Could not generate an activity ID"); + } + next *= MACH_ACTIVITY_ID_RANGE_SIZE; + next &= MACH_ACTIVITY_ID_MASK; + if (unlikely(next == 0)) { + next++; } -#endif - // Low-profile modes: Default activity can use a total of 4 buffers. - return 3; -} -DISPATCH_ALWAYS_INLINE -static inline uint32_t -_voucher_activity_buffer_limit() -{ -#if 0 // FIXME: tune buffer chain sizes - switch (_voucher_activity_mode) { - case voucher_activity_mode_debug: - case voucher_activity_mode_stream: - // High-profile modes: 64 activities, each of which can use 1/64th - // of the entire heap. - return MAX(_voucher_activity_buffers_per_heap / 64, 2) - 1; + if (unlikely(aid == 0)) { + if (os_atomic_cmpxchg(&_voucher_aid_next, 0, next + 1, relaxed)) { + return next; + } } -#endif - // Low-profile modes: Each activity can use a total of 2 buffers. - return 1; + return os_atomic_xchg(&_voucher_aid_next, next, relaxed); } -// The two functions above return the number of *additional* buffers activities -// may allocate, hence the gymnastics with - 1. - DISPATCH_ALWAYS_INLINE -static inline uint32_t -_voucher_heap_buffer_limit() -{ - switch (_voucher_activity_mode) { - case voucher_activity_mode_debug: - case voucher_activity_mode_stream: - // High-profile modes: Use it all. - return _voucher_activity_buffers_per_heap; - } -#if TARGET_OS_EMBEDDED - // Low-profile modes: 3 activities, each of which can use 2 buffers; - // plus the default activity, which can use 3; plus 3 buffers of overhead. - return 12; -#else - // Low-profile modes: 13 activities, each of which can use 4 buffers; - // plus the default activity, which can use 8; plus 3 buffers of overhead. - return 64; -#endif +static firehose_activity_id_t +_voucher_activity_id_allocate(firehose_activity_flags_t flags) +{ + uint64_t aid, next; + os_atomic_rmw_loop(&_voucher_aid_next, aid, next, relaxed, { + next = aid + 1; + if (aid == 0 || next % MACH_ACTIVITY_ID_RANGE_SIZE == 0) { + os_atomic_rmw_loop_give_up({ + aid = _voucher_activity_id_allocate_slow(aid); + break; + }); + } + }); + return FIREHOSE_ACTIVITY_ID_MAKE(aid, flags); } -#define NO_BITS_WERE_UNSET (UINT_MAX) +#define _voucher_activity_tracepoint_reserve(stamp, stream, pub, priv, privbuf) \ + firehose_buffer_tracepoint_reserve(_firehose_task_buffer, stamp, \ + stream, pub, priv, privbuf) -DISPATCH_ALWAYS_INLINE -static inline size_t -_voucher_activity_bitmap_set_first_unset_bit_upto( - _voucher_activity_bitmap_t volatile bitmap, - unsigned int max_index) -{ - dispatch_assert(max_index != 0); - unsigned int index = NO_BITS_WERE_UNSET, max_map, max_bit, i; - max_map = max_index / _voucher_activity_bits_per_bitmap_base_t; - max_map = MIN(max_map, _voucher_activity_bitmaps_per_heap - 1); - max_bit = max_index % _voucher_activity_bits_per_bitmap_base_t; - for (i = 0; i < max_map; i++) { - index = dispatch_atomic_set_first_bit(&bitmap[i], UINT_MAX); - if (fastpath(index < NO_BITS_WERE_UNSET)) { - return index + i * _voucher_activity_bits_per_bitmap_base_t; - } - } - index = dispatch_atomic_set_first_bit(&bitmap[i], max_bit); - if (fastpath(index < NO_BITS_WERE_UNSET)) { - return index + i * _voucher_activity_bits_per_bitmap_base_t; - } - return index; -} +#define _voucher_activity_tracepoint_flush(ft, ftid) \ + firehose_buffer_tracepoint_flush(_firehose_task_buffer, ft, ftid) -DISPATCH_ALWAYS_INLINE DISPATCH_UNUSED -static inline size_t -_voucher_activity_bitmap_set_first_unset_bit( - _voucher_activity_bitmap_t volatile bitmap) +DISPATCH_NOINLINE +static void +_firehose_task_buffer_init(void *ctx OS_UNUSED) { - return _voucher_activity_bitmap_set_first_unset_bit_upto(bitmap, UINT_MAX); -} + mach_port_t logd_port; -DISPATCH_ALWAYS_INLINE -static inline void -_voucher_activity_bitmap_clear_bit( - _voucher_activity_bitmap_t volatile bitmap, size_t index) -{ - size_t i = index / _voucher_activity_bits_per_bitmap_base_t; - _voucher_activity_bitmap_base_t mask = ((typeof(mask))1) << - (index % _voucher_activity_bits_per_bitmap_base_t); - if (slowpath((bitmap[i] & mask) == 0)) { - DISPATCH_CRASH("Corruption: failed to clear bit exclusively"); + /* Query the uniquepid of the current process */ + struct proc_uniqidentifierinfo p_uniqinfo = { }; + int info_size = 0; + + info_size = proc_pidinfo(getpid(), PROC_PIDUNIQIDENTIFIERINFO, 1, + &p_uniqinfo, PROC_PIDUNIQIDENTIFIERINFO_SIZE); + if (slowpath(info_size != PROC_PIDUNIQIDENTIFIERINFO_SIZE)) { + DISPATCH_INTERNAL_CRASH(info_size, "Unable to get the unique pid"); } - (void)dispatch_atomic_and(&bitmap[i], ~mask, release); -} + _voucher_unique_pid = p_uniqinfo.p_uniqueid; -_voucher_activity_metadata_t _voucher_activity_heap; -static dispatch_once_t _voucher_activity_heap_pred; -static void -_voucher_activity_heap_init(void *ctxt DISPATCH_UNUSED) -{ - if (_voucher_activity_disabled()) return; - kern_return_t kr; - mach_vm_size_t vm_size = _voucher_activity_buffer_size * - _voucher_activity_buffers_per_heap; - mach_vm_address_t vm_addr = vm_page_size; - while (slowpath(kr = mach_vm_map(mach_task_self(), &vm_addr, vm_size, - 0, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_GENEALOGY), - MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL, - VM_INHERIT_NONE))) { - if (kr != KERN_NO_SPACE) { - (void)dispatch_assume_zero(kr); - _voucher_activity_mode = voucher_activity_mode_disable; - return; + if (!fastpath(_voucher_libtrace_hooks)) { + if (0) { // + DISPATCH_CLIENT_CRASH(0, + "Activity subsystem isn't initialized yet"); } - _dispatch_temporary_resource_shortage(); - vm_addr = vm_page_size; - } - _voucher_activity_metadata_t heap; - task_trace_memory_info_data_t trace_memory_info = { - .user_memory_address = vm_addr, - .buffer_size = vm_size, - }; - kr = task_set_info(mach_task_self(), TASK_TRACE_MEMORY_INFO, - (task_info_t)&trace_memory_info, TASK_TRACE_MEMORY_INFO_COUNT); - DISPATCH_VERIFY_MIG(kr); - if (kr) { - if (kr != KERN_NOT_SUPPORTED) (void)dispatch_assume_zero(kr); - kr = mach_vm_deallocate(mach_task_self(), vm_addr, vm_size); - (void)dispatch_assume_zero(kr); - _voucher_activity_mode = voucher_activity_mode_disable; return; } - heap = (void*)vm_addr; - heap->vasm_baseaddr = (void*)vm_addr; - heap->vam_buffer_bitmap[0] = 0x7; // first three buffers are reserved - uint32_t i; - for (i = 0; i < _voucher_activity_hash_size; i++) { - TAILQ_INIT(&heap->vam_activities[i]); - TAILQ_INIT(&heap->vam_atms[i]); + logd_port = _voucher_libtrace_hooks->vah_get_logd_port(); + if (logd_port) { + unsigned long flags = 0; +#if DISPATCH_USE_MEMORYPRESSURE_SOURCE + if (_dispatch_memory_warn) { + flags |= FIREHOSE_BUFFER_BANK_FLAG_LOW_MEMORY; + } +#endif + // firehose_buffer_create always consumes the send-right + _firehose_task_buffer = firehose_buffer_create(logd_port, + _voucher_unique_pid, flags); } - _voucher_activity_lock_init(&heap->vam_atms_lock); - _voucher_activity_lock_init(&heap->vam_activities_lock); - _voucher_activity_heap = heap; - - _voucher_atm_t vatm = _voucher_atm_create(0, 0); - dispatch_assert(vatm->vatm_kvoucher); - _voucher_atm_retain(vatm); - - _voucher_activity_buffer_header_t buffer = vam_buffer(2); // reserved index - // consumes vatm reference: - _voucher_activity_t va = _voucher_activity_create_with_atm(vatm, 0, 0, 0, - buffer); - dispatch_assert(va); - va->va_buffer_limit = _voucher_default_activity_buffer_limit(); - _voucher_activity_default = va; - _voucher_task_atm = vatm; } -static void -_voucher_activity_atfork_child(void) +DISPATCH_ALWAYS_INLINE +static inline bool +_voucher_activity_disabled(void) { - _voucher_activity_heap_pred = 0; - _voucher_activity_heap = NULL; // activity heap is VM_INHERIT_NONE - _voucher_activity_default = NULL; + dispatch_once_f(&_firehose_task_buffer_pred, + NULL, _firehose_task_buffer_init); + + firehose_buffer_t fb = _firehose_task_buffer; + if (fastpath(fb)) { + return slowpath(fb->fb_header.fbh_sendp == MACH_PORT_DEAD); + } + return true; } void* voucher_activity_get_metadata_buffer(size_t *length) { - dispatch_once_f(&_voucher_activity_heap_pred, NULL, - _voucher_activity_heap_init); if (_voucher_activity_disabled()) { *length = 0; return NULL; } - *length = sizeof(_voucher_activity_heap->vam_client_metadata); - return _voucher_activity_heap->vam_client_metadata; -} -static _voucher_activity_buffer_hook_t _voucher_activity_buffer_hook; + firehose_buffer_header_t fbh = &_firehose_task_buffer->fb_header; -void -voucher_activity_buffer_hook_install_4libtrace( - _voucher_activity_buffer_hook_t hook) + *length = FIREHOSE_BUFFER_LIBTRACE_HEADER_SIZE; + return (void *)((uintptr_t)(fbh + 1) - *length); +} + +voucher_t +voucher_activity_create(firehose_tracepoint_id_t trace_id, + voucher_t base, firehose_activity_flags_t flags, uint64_t location) { - if (dispatch_atomic_cmpxchg(&_voucher_activity_buffer_hook, NULL, - (void*)hook, release)) return; - DISPATCH_CLIENT_CRASH("_voucher_activity_buffer_hook_install_4libtrace " \ - "called more than once"); + return voucher_activity_create_with_location(&trace_id, base, flags, location); } -#if DISPATCH_DEBUG && DISPATCH_VOUCHER_ACTIVITY_DEBUG -#define VOUCHER_ACTIVITY_BUFFER_DEBUG(reason, buffer) \ - _dispatch_debug("activity buffer %s (%p)", #reason, buffer) -#else -#define VOUCHER_ACTIVITY_BUFFER_DEBUG(reason, buffer) -#endif +voucher_t +voucher_activity_create_with_location(firehose_tracepoint_id_t *trace_id, + voucher_t base, firehose_activity_flags_t flags, uint64_t location) +{ + firehose_activity_id_t va_id = 0, current_id = 0, parent_id = 0; + firehose_tracepoint_id_u ftid = { .ftid_value = *trace_id }; + uint16_t pubsize = sizeof(va_id) + sizeof(location); + uint64_t creator_id = 0; + voucher_t ov = _voucher_get(); + voucher_t v; -#define VOUCHER_ACTIVITY_BUFFER_HOOK_CALLOUT(reason, buffer) \ - if (buffer) { VOUCHER_ACTIVITY_BUFFER_DEBUG(reason, buffer); \ - if (slowpath(_voucher_activity_buffer_hook)) { \ - _voucher_activity_buffer_hook( \ - _voucher_activity_buffer_hook_reason_##reason, (buffer)); \ - } } + if (base == VOUCHER_CURRENT) { + base = ov; + } + if (_voucher_activity_disabled()) { + *trace_id = 0; + return base ? _voucher_retain(base) : VOUCHER_NULL; + } -DISPATCH_ALWAYS_INLINE -static inline _voucher_activity_buffer_header_t -_voucher_activity_heap_buffer_alloc(void) -{ - _voucher_activity_buffer_header_t buffer = NULL; - size_t index; - index = _voucher_activity_bitmap_set_first_unset_bit_upto( - vam_buffer_bitmap(), _voucher_heap_buffer_limit() - 1); - if (index < NO_BITS_WERE_UNSET) { - buffer = vam_buffer(index); - } -#if DISPATCH_DEBUG && DISPATCH_VOUCHER_ACTIVITY_DEBUG - _dispatch_debug("activity heap alloc %zd (%p)", index, buffer); -#endif - return buffer; -} + FIREHOSE_TRACE_ID_CLEAR_FLAG(ftid, base, has_unique_pid); + if (ov && (current_id = ov->v_activity)) { + FIREHOSE_TRACE_ID_SET_FLAG(ftid, base, has_current_aid); + pubsize += sizeof(firehose_activity_id_t); + if ((creator_id = ov->v_activity_creator)) { + FIREHOSE_TRACE_ID_SET_FLAG(ftid, base, has_unique_pid); + pubsize += sizeof(uint64_t); + } + } + if (base != VOUCHER_NULL) { + parent_id = base->v_activity; + } -DISPATCH_ALWAYS_INLINE -static inline void -_voucher_activity_heap_buffer_free(_voucher_activity_buffer_header_t buffer) -{ - buffer->vabh_flags = _voucher_activity_trace_flag_buffer_empty; - size_t index = (size_t)((char*)buffer - (char*)_voucher_activity_heap) / - _voucher_activity_buffer_size; -#if DISPATCH_DEBUG && DISPATCH_VOUCHER_ACTIVITY_DEBUG - _dispatch_debug("activity heap free %zd (%p)", index, buffer); -#endif - _voucher_activity_bitmap_clear_bit(vam_buffer_bitmap(), index); -} - -#define _voucher_activity_heap_can_madvise() \ - (PAGE_SIZE == _voucher_activity_buffer_size) // - -DISPATCH_ALWAYS_INLINE -static inline void -_voucher_activity_heap_madvise(size_t bitmap_num, unsigned int start, - unsigned int len) -{ - size_t base = bitmap_num * _voucher_activity_bits_per_bitmap_base_t; -#if DISPATCH_DEBUG -#if DISPATCH_VOUCHER_ACTIVITY_DEBUG - _dispatch_debug("activity heap madvise %zd (%p) -> %zd (%p)", base + start, - vam_buffer(base + start), base + start + len, - vam_buffer(base + start + len)); -#endif - dispatch_assert(!(len * _voucher_activity_buffer_size % vm_page_size)); - const uint64_t pattern = 0xFACEFACEFACEFACE; - _voucher_activity_buffer_header_t buffer = vam_buffer(base + start); - for (unsigned int i = 0; i < len; i++, buffer++) { - memset_pattern8((char*)buffer + sizeof(buffer->vabh_flags), &pattern, - _voucher_activity_buffer_size - sizeof(buffer->vabh_flags)); + if (parent_id) { + FIREHOSE_TRACE_ID_SET_FLAG(ftid, activity, has_other_aid); + pubsize += sizeof(firehose_activity_id_t); + flags |= FIREHOSE_ACTIVITY_ID_FLAGS(parent_id); } -#endif - (void)dispatch_assume_zero(madvise(vam_buffer(base + start), - len * _voucher_activity_buffer_size, MADV_FREE)); -} - -DISPATCH_ALWAYS_INLINE -static inline void -_voucher_activity_heap_madvise_contiguous(size_t bitmap_num, - _voucher_activity_bitmap_base_t bits) -{ - // TODO: x86 has fast ctz; arm has fast clz; haswell has fast ctz - dispatch_assert(_voucher_activity_heap_can_madvise()); - if (bits == 0) { - return; - } else if (~bits == 0) { - _voucher_activity_heap_madvise(bitmap_num, 0, - _voucher_activity_bits_per_bitmap_base_t); - } else while (bits != 0) { - unsigned int start = (typeof(start))__builtin_ctzl(bits), len; - typeof(bits) inverse = ~bits >> start; - if (inverse) { - len = (typeof(len))__builtin_ctzl(inverse); - } else { - len = _voucher_activity_bits_per_bitmap_base_t - start; - } - typeof(bits) mask = ((((typeof(bits))1) << len) - 1) << start; - bits &= ~mask; - _voucher_activity_heap_madvise(bitmap_num, start, len); - } -} -void -_voucher_activity_heap_pressure_warn(void) -{ - if (!_voucher_activity_heap_can_madvise() || !_voucher_activity_heap) { - return; + if (firehose_precise_timestamps_enabled()) { + flags |= firehose_activity_flags_precise_timestamp; } - volatile _voucher_activity_bitmap_base_t *bitmap, *pressure_locked_bitmap; - bitmap = vam_buffer_bitmap(); - pressure_locked_bitmap = vam_pressure_locked_bitmap(); + voucher_fields_t ignore_fields = VOUCHER_FIELD_ACTIVITY; + v = _voucher_clone(base, ignore_fields); + v->v_activity = va_id = _voucher_activity_id_allocate(flags); + v->v_activity_creator = _voucher_unique_pid; + v->v_parent_activity = parent_id; - // number of bitmaps needed to map the current buffer limit = - // ceil(buffer limit / bits per bitmap) - size_t nbuffers = _voucher_heap_buffer_limit(); - size_t nbitmaps_quot = nbuffers / _voucher_activity_bits_per_bitmap_base_t; - size_t nbitmaps_rem = nbuffers % _voucher_activity_bits_per_bitmap_base_t; - size_t nbitmaps = nbitmaps_quot + ((nbitmaps_rem == 0) ? 0 : 1); + static const firehose_stream_t streams[2] = { + firehose_stream_metadata, + firehose_stream_persist, + }; + firehose_tracepoint_t ft; + uint64_t stamp = firehose_tracepoint_time(flags); - for (size_t i = 0; i < nbitmaps; i++) { - _voucher_activity_bitmap_base_t got_bits; - got_bits = dispatch_atomic_or_orig(&bitmap[i], ~((typeof(bitmap[i]))0), - relaxed); - got_bits = ~got_bits; // Now 1 means 'acquired this one, madvise it' - _voucher_activity_heap_madvise_contiguous(i, got_bits); - pressure_locked_bitmap[i] |= got_bits; - } -} + for (size_t i = 0; i < countof(streams); i++) { + ft = _voucher_activity_tracepoint_reserve(stamp, streams[i], pubsize, + 0, NULL); + if (!fastpath(ft)) continue; -void -_voucher_activity_heap_pressure_normal(void) -{ - if (!_voucher_activity_heap_can_madvise() || !_voucher_activity_heap) { - return; - } - volatile _voucher_activity_bitmap_base_t *bitmap, *pressure_locked_bitmap; - bitmap = vam_buffer_bitmap(); - pressure_locked_bitmap = vam_pressure_locked_bitmap(); - for (size_t i = 0; i < _voucher_activity_bitmaps_per_heap; i++) { - _voucher_activity_bitmap_base_t free_bits = pressure_locked_bitmap[i]; - pressure_locked_bitmap[i] = 0; - if (free_bits != 0) { - (void)dispatch_atomic_and(&bitmap[i], ~free_bits, release); + uint8_t *pubptr = ft->ft_data; + if (current_id) { + pubptr = _dispatch_memappend(pubptr, ¤t_id); } - } -} - -DISPATCH_ALWAYS_INLINE -static inline void -_voucher_activity_buffer_init(_voucher_activity_t act, - _voucher_activity_buffer_header_t buffer, bool initial) -{ - _voucher_activity_tracepoint_t vat = (_voucher_activity_tracepoint_t)buffer; - _voucher_activity_tracepoint_init_with_id(vat, act->va_trace_id, - act->va_location, !initial); - buffer->vabh_flags = _voucher_activity_trace_flag_buffer_header | - _voucher_activity_trace_flag_activity | - (initial ? _voucher_activity_trace_flag_start : 0); - buffer->vabh_activity_id = act->va_id; - buffer->vabh_pos.vabp_atomic_pos = 0; - buffer->vabh_pos.vabp_pos.vabp_next_tracepoint_idx = 1; -} - -static _voucher_activity_buffer_header_t -_voucher_activity_buffer_alloc_slow(_voucher_activity_t act, - _voucher_activity_buffer_header_t current) -{ - _voucher_activity_buffer_header_t buffer; - _voucher_activity_lock_lock(va_buffers_lock(act)); // TODO: revisit locking - buffer = act->va_current_buffer; - if (buffer != current) { - _voucher_activity_lock_unlock(va_buffers_lock(act)); - return buffer; - } - buffer = TAILQ_FIRST(&act->va_buffers); - if (buffer != TAILQ_LAST(&act->va_buffers, - _voucher_activity_buffer_list_s)) { - TAILQ_REMOVE(&act->va_buffers, buffer, vabh_list); - TAILQ_INSERT_TAIL(&act->va_buffers, buffer, vabh_list); - } - _voucher_activity_lock_unlock(va_buffers_lock(act)); - if (_voucher_activity_buffer_is_full(buffer)) { - _voucher_activity_firehose_wait(act, buffer); - } - if (dispatch_atomic_cmpxchgv2o(act, va_current_buffer, current, buffer, - ¤t, release)) { - if (_voucher_activity_buffer_mark_full(current)) { - _voucher_activity_firehose_push(act, current); + if (creator_id) { + pubptr = _dispatch_memappend(pubptr, &creator_id); } - _dispatch_voucher_activity_debug("buffer reuse %p", act, buffer); - } else { - buffer = current; - } - return buffer; -} - -static _voucher_activity_buffer_header_t -_voucher_activity_buffer_alloc(_voucher_activity_t act, - _voucher_activity_buffer_header_t current) -{ - _voucher_activity_buffer_header_t buffer = NULL; - if (act->va_buffer_count < act->va_buffer_limit) { - buffer = _voucher_activity_heap_buffer_alloc(); - if (buffer && dispatch_atomic_inc2o(act, va_buffer_count, relaxed) > - act->va_buffer_limit) { - dispatch_atomic_dec2o(act, va_buffer_count, relaxed); - _voucher_activity_heap_buffer_free(buffer); - buffer = NULL; + if (parent_id) { + pubptr = _dispatch_memappend(pubptr, &parent_id); } + pubptr = _dispatch_memappend(pubptr, &va_id); + pubptr = _dispatch_memappend(pubptr, &location); + _voucher_activity_tracepoint_flush(ft, ftid); } - if (!buffer) return _voucher_activity_buffer_alloc_slow(act, current); - _voucher_activity_buffer_init(act, buffer, false); - if (dispatch_atomic_cmpxchgv2o(act, va_current_buffer, current, buffer, - ¤t, release)) { - _voucher_activity_lock_lock(va_buffers_lock(act)); - TAILQ_INSERT_TAIL(&act->va_buffers, buffer, vabh_list); - _voucher_activity_lock_unlock(va_buffers_lock(act)); - if (_voucher_activity_buffer_mark_full(current)) { - _voucher_activity_firehose_push(act, current); - } - _dispatch_voucher_activity_debug("buffer alloc %p", act, buffer); - } else { - dispatch_atomic_dec2o(act, va_buffer_count, relaxed); - _voucher_activity_heap_buffer_free(buffer); - buffer = current; - } - return buffer; + *trace_id = ftid.ftid_value; + return v; } -#pragma mark - -#pragma mark _voucher_activity_t - -#define _voucher_activity_ordered_insert(_act, head, field) do { \ - typeof(_act) _vai; \ - TAILQ_FOREACH(_vai, (head), field) { \ - if (_act->va_id < _vai->va_id) break; \ - } \ - if (_vai) { \ - TAILQ_INSERT_BEFORE(_vai, _act, field); \ - } else { \ - TAILQ_INSERT_TAIL((head), _act, field); \ - } } while (0); - -static void _voucher_activity_dispose(_voucher_activity_t act); -static _voucher_atm_t _voucher_atm_copy(atm_aid_t atm_id); -static inline void _voucher_atm_release(_voucher_atm_t vatm); -static atm_aid_t _voucher_mach_voucher_get_atm_id(mach_voucher_t kv); - -DISPATCH_ALWAYS_INLINE -static inline bool -_voucher_activity_try_retain(_voucher_activity_t act) +void +_voucher_activity_swap(firehose_activity_id_t old_id, + firehose_activity_id_t new_id) { - // not using _os_object_refcnt* because we don't need barriers: - // activities are immutable and are in a hash table with a lock - int use_cnt = dispatch_atomic_inc2o(act, va_refcnt, relaxed); - _dispatch_voucher_activity_debug("retain -> %d", act, use_cnt + 1); - if (slowpath(use_cnt < 0)) { - _dispatch_voucher_activity_debug("overrelease", act); - DISPATCH_CRASH("Activity overrelease"); - } - return use_cnt > 0; -} + if (_voucher_activity_disabled()) return; -DISPATCH_ALWAYS_INLINE -static inline _voucher_activity_t -_voucher_activity_retain(_voucher_activity_t act) -{ - if (slowpath(!_voucher_activity_try_retain(act))) { - _dispatch_voucher_activity_debug("resurrection", act); - DISPATCH_CRASH("Activity resurrection"); - } - return act; -} + firehose_tracepoint_id_u ftid = { .ftid = { + ._namespace = firehose_tracepoint_namespace_activity, + ._type = _firehose_tracepoint_type_activity_swap, + } }; + uint16_t pubsize = 0; -DISPATCH_ALWAYS_INLINE -static inline void -_voucher_activity_release(_voucher_activity_t act) -{ - // not using _os_object_refcnt* because we don't need barriers: - // activities are immutable and are in a hash table with a lock - int use_cnt = dispatch_atomic_dec2o(act, va_refcnt, relaxed); - _dispatch_voucher_activity_debug("release -> %d", act, use_cnt + 1); - if (fastpath(use_cnt >= 0)) { - return; + if (old_id) { + FIREHOSE_TRACE_ID_SET_FLAG(ftid, base, has_current_aid); + pubsize += sizeof(firehose_activity_id_t); } - if (slowpath(use_cnt < -1)) { - _dispatch_voucher_activity_debug("overrelease", act); - DISPATCH_CRASH("Activity overrelease"); + if (new_id) { + FIREHOSE_TRACE_ID_SET_FLAG(ftid, activity, has_other_aid); + pubsize += sizeof(firehose_activity_id_t); } - _voucher_activity_remove(act); - _voucher_activity_dispose(act); -} -static _voucher_activity_t -_voucher_activity_find_and_retain(voucher_activity_id_t va_id, uint32_t hash) -{ - // not using _os_object_refcnt* because we don't need barriers: - // activities are immutable and are in a hash table with a lock - // - // assumes vam_activities_lock held - _voucher_activity_t act; - TAILQ_FOREACH(act, vam_activities(hash), va_list) { - if (act->va_id == va_id) { - if (fastpath(_voucher_activity_try_retain(act))) { - return act; - } + firehose_stream_t stream = firehose_stream_metadata; + firehose_tracepoint_t ft; + firehose_activity_flags_t flags = FIREHOSE_ACTIVITY_ID_FLAGS(old_id) | + FIREHOSE_ACTIVITY_ID_FLAGS(new_id); + uint64_t stamp = firehose_tracepoint_time(flags); - // disallow resurrection - dispatch_atomic_dec2o(act, va_refcnt, relaxed); - _dispatch_voucher_activity_debug("undo resurrection", act); - } - } - return NULL; -} + _dispatch_voucher_ktrace_activity_adopt(new_id); -static _voucher_activity_t -_voucher_activity_copy_from_id(voucher_activity_id_t va_id) -{ - uint32_t hash = VACTID_HASH(va_id); - _voucher_activity_lock_lock(vam_activities_lock()); - _voucher_activity_t act = _voucher_activity_find_and_retain(va_id, hash); - if (act) { - _dispatch_voucher_activity_debug("copy from id 0x%llx", act, va_id); - } - _voucher_activity_lock_unlock(vam_activities_lock()); - return act; + ft = _voucher_activity_tracepoint_reserve(stamp, stream, pubsize, 0, NULL); + if (!fastpath(ft)) return; + uint8_t *pubptr = ft->ft_data; + if (old_id) pubptr = _dispatch_memappend(pubptr, &old_id); + if (new_id) pubptr = _dispatch_memappend(pubptr, &new_id); + _voucher_activity_tracepoint_flush(ft, ftid); } -static _voucher_activity_t -_voucher_activity_try_insert(_voucher_activity_t act_new) +firehose_activity_id_t +voucher_get_activity_id_and_creator(voucher_t v, uint64_t *creator_pid, + firehose_activity_id_t *parent_id) { - voucher_activity_id_t va_id = act_new->va_id; - uint32_t hash = VACTID_HASH(va_id); - _voucher_activity_lock_lock(vam_activities_lock()); - _voucher_activity_t act = _voucher_activity_find_and_retain(va_id, hash); - if (act) { - _dispatch_voucher_activity_debug("try insert: failed (%p)", act,act_new); - } else { - if (slowpath(_TAILQ_IS_ENQUEUED(act_new, va_list))) { - _dispatch_voucher_activity_debug("corruption", act_new); - DISPATCH_CRASH("Activity corruption"); - } - TAILQ_INSERT_TAIL(vam_activities(hash), act_new, va_list); - _dispatch_voucher_activity_debug("try insert: succeeded", act_new); + if (v == VOUCHER_CURRENT) { + v = _voucher_get(); } - _voucher_activity_lock_unlock(vam_activities_lock()); - return act; -} - -static void -_voucher_activity_remove(_voucher_activity_t act) -{ - voucher_activity_id_t va_id = act->va_id; - uint32_t hash = VACTID_HASH(va_id); - - _voucher_activity_lock_lock(vam_activities_lock()); - if (slowpath(!va_id || !_TAILQ_IS_ENQUEUED(act, va_list))) { - _dispatch_voucher_activity_debug("corruption", act); - DISPATCH_CRASH("Activity corruption"); + if (v == VOUCHER_NULL) { + if (creator_pid) *creator_pid = 0; + if (parent_id) *parent_id = FIREHOSE_ACTIVITY_ID_NULL; + return FIREHOSE_ACTIVITY_ID_NULL; } - TAILQ_REMOVE(vam_activities(hash), act, va_list); - _TAILQ_MARK_NOT_ENQUEUED(act, va_list); - act->va_list.tqe_next = (void*)~0ull; - _dispatch_voucher_activity_debug("remove", act); - _voucher_activity_lock_unlock(vam_activities_lock()); + if (creator_pid) *creator_pid = v->v_activity_creator; + if (parent_id) *parent_id = v->v_parent_activity; + return v->v_activity; } -static _voucher_activity_t -_voucher_activity_create_with_atm(_voucher_atm_t vatm, - voucher_activity_id_t va_id, voucher_activity_trace_id_t trace_id, - uint64_t location, _voucher_activity_buffer_header_t buffer) +firehose_activity_id_t +voucher_get_activity_id(voucher_t v, firehose_activity_id_t *parent_id) { - if (!buffer) buffer = _voucher_activity_heap_buffer_alloc(); - if (!buffer) { - _dispatch_voucher_atm_debug("no buffer", vatm); - _voucher_atm_release(vatm); // consume vatm reference - return NULL; - } - _voucher_activity_t act = _dispatch_calloc(1ul, - sizeof(struct _voucher_activity_s)); - act->va_id = va_id; - act->va_trace_id = trace_id ? trace_id : _voucher_activity_trace_id_release; - act->va_location = location; - act->va_buffer_limit = _voucher_activity_buffer_limit(); - TAILQ_INIT(&act->va_buffers); - act->va_current_buffer = buffer; - act->va_atm = vatm; // transfer vatm reference - _voucher_activity_lock_init(va_buffers_lock(act)); - if (dispatch_assume_zero(pthread_mutex_init(&act->va_mutex, NULL)) || - dispatch_assume_zero(pthread_cond_init(&act->va_cond, NULL))) { - DISPATCH_CLIENT_CRASH("Could not initialize activity"); - } - _TAILQ_MARK_NOT_ENQUEUED(act, va_list); - _TAILQ_MARK_NOT_ENQUEUED(act, va_atm_list); - _TAILQ_MARK_NOT_ENQUEUED(act, va_atm_used_list); - - _voucher_activity_buffer_init(act, buffer, true); - TAILQ_INSERT_TAIL(&act->va_buffers, buffer, vabh_list); - _voucher_activity_t actx = _voucher_activity_try_insert(act); - if (actx) { - _voucher_activity_dispose(act); - act = actx; - } - _dispatch_voucher_activity_debug("create", act); - return act; + return voucher_get_activity_id_and_creator(v, NULL, parent_id); } -static void -_voucher_activity_dispose(_voucher_activity_t act) -{ - _dispatch_voucher_activity_debug("dispose", act); - _voucher_atm_release(act->va_atm); - if (slowpath(_TAILQ_IS_ENQUEUED(act, va_list))) { - _dispatch_voucher_activity_debug("corruption", act); - DISPATCH_CRASH("Activity corruption"); - } - act->va_list.tqe_next = DISPATCH_OBJECT_LISTLESS; - dispatch_assert(!_TAILQ_IS_ENQUEUED(act, va_atm_list)); - dispatch_assert(!_TAILQ_IS_ENQUEUED(act, va_atm_used_list)); - _voucher_activity_buffer_header_t buffer, tmp; - TAILQ_FOREACH_SAFE(buffer, &act->va_buffers, vabh_list, tmp) { - if (buffer->vabh_pos.vabp_pos.vabp_next_tracepoint_idx > 1) { - dispatch_assert(_voucher_activity_buffer_mark_full(buffer)); - _voucher_activity_firehose_push(act, buffer); - } - TAILQ_REMOVE(&act->va_buffers, buffer, vabh_list); - _dispatch_voucher_activity_debug("buffer free %p", act, buffer); - _voucher_activity_heap_buffer_free(buffer); - } - (void)dispatch_assume_zero(pthread_mutex_destroy(&act->va_mutex)); - (void)dispatch_assume_zero(pthread_cond_destroy(&act->va_cond)); - free(act); -} - -DISPATCH_NOINLINE void -_voucher_activity_firehose_push(_voucher_activity_t act, - _voucher_activity_buffer_header_t buffer) -{ - if (dispatch_assume_zero(pthread_mutex_lock(&act->va_mutex))) { - DISPATCH_CLIENT_CRASH("Activity corruption: mutex_lock"); - } - _dispatch_voucher_activity_debug("firehose push %p", act, buffer); - // TODO: call firehose_push - VOUCHER_ACTIVITY_BUFFER_HOOK_CALLOUT(full, buffer); - _voucher_activity_buffer_init(act, buffer, false); - if (dispatch_assume_zero(pthread_cond_broadcast(&act->va_cond))) { - DISPATCH_CLIENT_CRASH("Activity corruption: cond_broadcast"); - } - if (dispatch_assume_zero(pthread_mutex_unlock(&act->va_mutex))) { - DISPATCH_CLIENT_CRASH("Activity corruption: mutex_unlock"); - } -} - -DISPATCH_NOINLINE -static void -_voucher_activity_firehose_wait(_voucher_activity_t act, - _voucher_activity_buffer_header_t buffer) -{ - if (dispatch_assume_zero(pthread_mutex_lock(&act->va_mutex))) { - DISPATCH_CLIENT_CRASH("Activity corruption: mutex_lock"); - } - while (_voucher_activity_buffer_is_full(buffer)) { - _dispatch_voucher_activity_debug("firehose wait %p", act, buffer); - if (dispatch_assume_zero(pthread_cond_wait(&act->va_cond, - &act->va_mutex))){ - DISPATCH_CLIENT_CRASH("Activity corruption: cond_wait"); - } - } - if (dispatch_assume_zero(pthread_mutex_unlock(&act->va_mutex))) { - DISPATCH_CLIENT_CRASH("Activity corruption: mutex_unlock"); - } -} - -static _voucher_activity_t -_voucher_activity_copy_from_mach_voucher(mach_voucher_t kv, - voucher_activity_id_t va_id) -{ - dispatch_once_f(&_voucher_activity_heap_pred, NULL, - _voucher_activity_heap_init); - if (_voucher_activity_disabled()) return NULL; - _voucher_activity_t act = NULL; - if (dispatch_assume(va_id)) { - if ((act = _voucher_activity_copy_from_id(va_id))) return act; - } - atm_aid_t atm_id = _voucher_mach_voucher_get_atm_id(kv); - if (!dispatch_assume(atm_id)) return NULL; - _voucher_activity_buffer_header_t buffer; - buffer = _voucher_activity_heap_buffer_alloc(); - if (!buffer) return NULL; - _dispatch_kvoucher_debug("atm copy/create from <%lld>", kv, atm_id); - _voucher_atm_t vatm = _voucher_atm_copy(atm_id); - if (!vatm) vatm = _voucher_atm_create(kv, atm_id); - if (!vatm) { - _voucher_activity_heap_buffer_free(buffer); - return NULL; - } - // consumes vatm reference: - act = _voucher_activity_create_with_atm(vatm, va_id, 0, 0, buffer); - _dispatch_voucher_activity_debug("copy from kvoucher[0x%08x]", act, kv); - return act; -} - -#pragma mark - -#pragma mark _voucher_atm_t - -static void _voucher_atm_remove(_voucher_atm_t vatm); -static void _voucher_atm_dispose(_voucher_atm_t vatm, bool unregister); - -DISPATCH_ALWAYS_INLINE -static inline bool -_voucher_atm_try_retain(_voucher_atm_t vatm) -{ - // not using _os_object_refcnt* because we don't need barriers: - // vouchers atm are immutable and are in a hash table with a lock - // - // assumes vam_atms_lock held - int refcnt = dispatch_atomic_inc2o(vatm, vatm_refcnt, relaxed); - _dispatch_voucher_atm_debug("retain -> %d", vatm, refcnt + 1); - if (slowpath(refcnt < 0)) { - _dispatch_voucher_atm_debug("overrelease", vatm); - DISPATCH_CRASH("ATM overrelease"); - } - return refcnt > 0; -} - -DISPATCH_ALWAYS_INLINE -static inline _voucher_atm_t -_voucher_atm_retain(_voucher_atm_t vatm) +voucher_activity_flush(firehose_stream_t stream) { - if (slowpath(!_voucher_atm_try_retain(vatm))) { - _dispatch_voucher_atm_debug("resurrection", vatm); - DISPATCH_CRASH("ATM resurrection"); - } - return vatm; + if (_voucher_activity_disabled()) return; + firehose_buffer_stream_flush(_firehose_task_buffer, stream); } DISPATCH_ALWAYS_INLINE -static inline void -_voucher_atm_release(_voucher_atm_t vatm) -{ - // not using _os_object_refcnt* because we don't need barriers: - // vouchers atm are immutable are into a hash table with a lock - int refcnt = dispatch_atomic_dec2o(vatm, vatm_refcnt, relaxed); - _dispatch_voucher_atm_debug("release -> %d", vatm, refcnt + 1); - if (fastpath(refcnt >= 0)) { - return; - } - if (slowpath(refcnt < -1)) { - _dispatch_voucher_atm_debug("overrelease", vatm); - DISPATCH_CRASH("ATM overrelease"); - } - _voucher_atm_remove(vatm); - _voucher_atm_dispose(vatm, true); -} - -static _voucher_atm_t -_voucher_atm_find_and_retain(atm_aid_t atm_id, uint32_t hash) -{ - // not using _os_object_refcnt* because we don't need barriers: - // vouchers atm are immutable are into a hash table with a lock - // - // assumes vam_atms_lock held - _voucher_atm_t vatm; - TAILQ_FOREACH(vatm, vam_atms(hash), vatm_list){ - if (vatm->vatm_id == atm_id) { - if (fastpath(_voucher_atm_try_retain(vatm))) { - return vatm; - } - - // disallow resurrection - dispatch_atomic_dec2o(vatm, vatm_refcnt, relaxed); - _dispatch_voucher_atm_debug("undo resurrection", vatm); - } - } - return NULL; -} - -static _voucher_atm_t -_voucher_atm_copy(atm_aid_t atm_id) -{ - uint32_t hash = VATMID_HASH(atm_id); - _voucher_activity_lock_lock(vam_atms_lock()); - _voucher_atm_t vatm = _voucher_atm_find_and_retain(atm_id, hash); - if (vatm) { - _dispatch_voucher_atm_debug("copy", vatm); - } - _voucher_activity_lock_unlock(vam_atms_lock()); - return vatm; -} - -static _voucher_atm_t -_voucher_atm_try_insert(_voucher_atm_t vatm_new) -{ - atm_aid_t atm_id = vatm_new->vatm_id; - uint32_t hash = VATMID_HASH(atm_id); - _voucher_activity_lock_lock(vam_atms_lock()); - _voucher_atm_t vatm = _voucher_atm_find_and_retain(atm_id, hash); - if (vatm) { - _dispatch_voucher_atm_debug("try insert: failed (%p)", vatm, vatm_new); - } else { - if (slowpath(_TAILQ_IS_ENQUEUED(vatm_new, vatm_list))) { - _dispatch_voucher_atm_debug("corruption", vatm_new); - DISPATCH_CRASH("ATM corruption"); - } - TAILQ_INSERT_TAIL(vam_atms(hash), vatm_new, vatm_list); - _dispatch_voucher_atm_debug("try insert: succeeded", vatm_new); - } - _voucher_activity_lock_unlock(vam_atms_lock()); - return vatm; -} - -static void -_voucher_atm_remove(_voucher_atm_t vatm) -{ - atm_aid_t atm_id = vatm->vatm_id; - uint32_t hash = VATMID_HASH(atm_id); +static inline firehose_tracepoint_id_t +_voucher_activity_trace(firehose_stream_t stream, + firehose_tracepoint_id_u ftid, uint64_t stamp, + const void *pubdata, size_t publen, + const void *privdata, size_t privlen) +{ + const uint16_t ft_size = offsetof(struct firehose_tracepoint_s, ft_data); + const size_t _firehose_chunk_payload_size = + sizeof(((struct firehose_buffer_chunk_s *)0)->fbc_data); + + if (_voucher_activity_disabled()) return 0; + + firehose_tracepoint_t ft; + firehose_activity_id_t va_id = 0; + firehose_buffer_chunk_t fbc; + uint8_t *privptr, *pubptr; + size_t pubsize = publen; + voucher_t ov = _voucher_get(); + uint64_t creator_pid; - _voucher_activity_lock_lock(vam_atms_lock()); - if (slowpath(!atm_id || !_TAILQ_IS_ENQUEUED(vatm, vatm_list))) { - _dispatch_voucher_atm_debug("corruption", vatm); - DISPATCH_CRASH("ATM corruption"); + if ((va_id = _voucher_get_activity_id(ov, &creator_pid))) { + FIREHOSE_TRACE_ID_SET_FLAG(ftid, base, has_current_aid); + pubsize += sizeof(va_id); } - TAILQ_REMOVE(vam_atms(hash), vatm, vatm_list); - _TAILQ_MARK_NOT_ENQUEUED(vatm, vatm_list); - vatm->vatm_list.tqe_next = (void*)~0ull; - _dispatch_voucher_atm_debug("remove", vatm); - _voucher_activity_lock_unlock(vam_atms_lock()); -} - -DISPATCH_NOINLINE -static void -_voucher_atm_fault(mach_voucher_attr_command_t kvc_cmd) -{ - mach_voucher_t kv = _voucher_get_atm_mach_voucher(_voucher_get()); - if (!kv) return; - - mach_atm_subaid_t subaid = 0; - voucher_t v = _voucher_get(); - if (v) { - unsigned int activities = v->v_activities; - voucher_activity_id_t *activity_ids = _voucher_activity_ids(v); - if (activities) { - subaid = activity_ids[0]; + if (FIREHOSE_TRACE_ID_HAS_FLAG(ftid, base, has_unique_pid)) { + if (creator_pid) { + pubsize += sizeof(creator_pid); + } else { + FIREHOSE_TRACE_ID_CLEAR_FLAG(ftid, base, has_unique_pid); } - } - - kern_return_t kr; - mach_voucher_attr_content_t kvc_in = (mach_voucher_attr_content_t)&subaid; - mach_voucher_attr_content_size_t kvc_in_size = sizeof(mach_atm_subaid_t); - mach_voucher_attr_content_t kvc_out = (mach_voucher_attr_content_t)&subaid; - mach_voucher_attr_content_size_t kvc_out_size = sizeof(mach_atm_subaid_t); - kr = mach_voucher_attr_command(kv, MACH_VOUCHER_ATTR_KEY_ATM, - kvc_cmd, kvc_in, kvc_in_size, kvc_out, &kvc_out_size); - DISPATCH_VERIFY_MIG(kr); - (void)dispatch_assume_zero(kr); -} - -static atm_aid_t -_voucher_mach_voucher_get_atm_id(mach_voucher_t kv) -{ - kern_return_t kr; - atm_aid_t atm_id = 0; - mach_voucher_attr_content_t kvc = (mach_voucher_attr_content_t)&atm_id; - mach_voucher_attr_content_size_t kvc_size = sizeof(atm_id); - kr = mach_voucher_extract_attr_content(kv, MACH_VOUCHER_ATTR_KEY_ATM, kvc, - &kvc_size); - DISPATCH_VERIFY_MIG(kr); - (void)dispatch_assume_zero(kr); - return atm_id; -} - -static mach_voucher_t -_voucher_atm_mach_voucher_create(atm_aid_t *atm_id_ptr) -{ - kern_return_t kr; - mach_voucher_t kv; - static const mach_voucher_attr_recipe_data_t atm_create_recipe = { - .key = MACH_VOUCHER_ATTR_KEY_ATM, - .command = MACH_VOUCHER_ATTR_ATM_CREATE, - }; - kr = _voucher_create_mach_voucher(&atm_create_recipe, - sizeof(atm_create_recipe), &kv); - if (dispatch_assume_zero(kr)) { - DISPATCH_CLIENT_CRASH("Could not create ATM mach voucher"); - } - atm_aid_t atm_id = _voucher_mach_voucher_get_atm_id(kv); - if (!dispatch_assume(atm_id)) { - DISPATCH_CLIENT_CRASH("Could not extract ATM ID"); - } - _dispatch_kvoucher_debug("atm create <%lld>", kv, atm_id); - *atm_id_ptr = atm_id; - return kv; -} - -static mach_voucher_t -_voucher_atm_mach_voucher_copy(mach_voucher_t akv) -{ - kern_return_t kr; - mach_voucher_t kv; - const mach_voucher_attr_recipe_data_t atm_copy_recipe = { - .key = MACH_VOUCHER_ATTR_KEY_ATM, - .command = MACH_VOUCHER_ATTR_COPY, - .previous_voucher = akv, - }; - kr = _voucher_create_mach_voucher(&atm_copy_recipe, - sizeof(atm_copy_recipe), &kv); - if (dispatch_assume_zero(kr)) { - DISPATCH_CLIENT_CRASH("Could not copy ATM mach voucher"); - } - _dispatch_kvoucher_debug("copy atm voucher from [0x%08x]", kv, akv); - return kv; -} - -static void -_voucher_atm_register(_voucher_atm_t vatm) -{ - mach_voucher_t kv = vatm->vatm_kvoucher; - if (!kv) return; - kern_return_t kr; - atm_guard_t gen = - dispatch_atomic_inc(&_voucher_atm_generation, relaxed); - _dispatch_voucher_atm_debug("atm register %lld", vatm, gen); - mach_voucher_attr_content_t kvc_in = (mach_voucher_attr_content_t)&gen; - mach_voucher_attr_content_size_t kvc_in_size = sizeof(gen); - mach_voucher_attr_content_t kvc_out = NULL; - mach_voucher_attr_content_size_t kvc_out_size = 0; - kr = mach_voucher_attr_command(kv, MACH_VOUCHER_ATTR_KEY_ATM, - ATM_ACTION_REGISTER, kvc_in, kvc_in_size, kvc_out, - &kvc_out_size); - DISPATCH_VERIFY_MIG(kr); - if (kr) { - DISPATCH_CLIENT_CRASH("Could not register ATM ID"); - } - vatm->vatm_generation = gen; - _dispatch_voucher_atm_debug("atm registered %lld", vatm, - vatm->vatm_generation); -} - -static void -_voucher_atm_unregister(_voucher_atm_t vatm) -{ - _dispatch_voucher_atm_debug("atm unregister %lld", vatm, - vatm->vatm_generation); - mach_voucher_t kv = vatm->vatm_kvoucher; - dispatch_assert(kv); - kern_return_t kr; - atm_guard_t gen = vatm->vatm_generation; - mach_voucher_attr_content_t kvc_in = (mach_voucher_attr_content_t)&gen; - mach_voucher_attr_content_size_t kvc_in_size = sizeof(gen); - mach_voucher_attr_content_t kvc_out = NULL; - mach_voucher_attr_content_size_t kvc_out_size = 0; - kr = mach_voucher_attr_command(kv, MACH_VOUCHER_ATTR_KEY_ATM, - ATM_ACTION_UNREGISTER, kvc_in, kvc_in_size, kvc_out, &kvc_out_size); - DISPATCH_VERIFY_MIG(kr); - if (kr && kr != KERN_INVALID_VALUE) { - (void)dispatch_assume_zero(kr); - } - _dispatch_voucher_atm_debug("atm unregistered %lld", vatm, - vatm->vatm_generation); -} - -static _voucher_atm_t -_voucher_atm_create(mach_voucher_t kv, atm_aid_t atm_id) -{ - _voucher_atm_t vatm = _dispatch_calloc(1ul, sizeof(struct _voucher_atm_s)); - kv = kv ? _voucher_atm_mach_voucher_copy(kv) : - _voucher_atm_mach_voucher_create(&atm_id); - vatm->vatm_kvoucher = kv; - vatm->vatm_id = atm_id; - _voucher_atm_t vatmx = _voucher_atm_try_insert(vatm); - if (vatmx) { - _voucher_atm_dispose(vatm, false); - vatm = vatmx; } else { - _voucher_atm_register(vatm); + creator_pid = 0; } - _dispatch_voucher_atm_debug("create with kvoucher[0x%08x]", vatm, kv); - return vatm; -} -static void -_voucher_atm_dispose(_voucher_atm_t vatm, bool unregister) -{ - _dispatch_voucher_atm_debug("dispose", vatm); - if (slowpath(_TAILQ_IS_ENQUEUED(vatm, vatm_list))) { - _dispatch_voucher_atm_debug("corruption", vatm); - DISPATCH_CRASH("ATM corruption"); - } - vatm->vatm_list.tqe_next = DISPATCH_OBJECT_LISTLESS; - if (vatm->vatm_kvoucher) { - if (unregister) _voucher_atm_unregister(vatm); - _voucher_dealloc_mach_voucher(vatm->vatm_kvoucher); - vatm->vatm_kvoucher = MACH_VOUCHER_NULL; + if (privlen) { + FIREHOSE_TRACE_ID_SET_FLAG(ftid, log, has_private_data); + pubsize += sizeof(struct firehose_buffer_range_s); } - free(vatm); -} -DISPATCH_NOINLINE -static voucher_activity_id_t -_voucher_atm_subid_make(_voucher_atm_t vatm, voucher_activity_flag_t flags) -{ - mach_voucher_t kv = vatm->vatm_kvoucher; - _dispatch_voucher_atm_debug("create subid from atm", vatm); - kern_return_t kr; - mach_atm_subaid_t naid; - mach_voucher_attr_content_t kvc_in = NULL; - mach_voucher_attr_content_size_t kvc_in_size = 0; - mach_voucher_attr_content_t kvc_out = (mach_voucher_attr_content_t)&naid; - mach_voucher_attr_content_size_t kvc_out_size = sizeof(naid); - kr = mach_voucher_attr_command(kv, MACH_VOUCHER_ATTR_KEY_ATM, - ATM_ACTION_GETSUBAID, kvc_in, kvc_in_size, kvc_out, &kvc_out_size); - DISPATCH_VERIFY_MIG(kr); - if (dispatch_assume_zero(kr)) { - DISPATCH_CLIENT_CRASH("Could not get next ATM ID"); + if (slowpath(ft_size + pubsize + privlen > _firehose_chunk_payload_size)) { + DISPATCH_CLIENT_CRASH(ft_size + pubsize + privlen, "Log is too large"); } - _dispatch_voucher_atm_debug("created subid from atm %lld", vatm, naid); - return VATMID2ACTID(naid, flags); -} -#pragma mark - -#pragma mark voucher_activity_id_t - -static const size_t _voucher_activity_maxsize = - _voucher_activity_buffer_size - _voucher_activity_buffer_header_size - - _voucher_activity_strings_header_size; - -voucher_activity_id_t -voucher_activity_start_with_location(voucher_activity_trace_id_t trace_id, - uint64_t location, voucher_activity_flag_t flags) -{ - dispatch_once_f(&_voucher_activity_heap_pred, NULL, - _voucher_activity_heap_init); - if (!_voucher_activity_trace_id_enabled(trace_id)) return 0; - voucher_activity_id_t va_id = 0; - _voucher_atm_t vatm = NULL; - _voucher_activity_t act = NULL; - _voucher_activity_tracepoint_t vat = NULL; - unsigned int activities = 1, oactivities = 0; - voucher_t ov = _voucher_get(); - vatm = _voucher_get_atm(ov); - if (!(flags & voucher_activity_flag_force) && ov && ov->v_activities) { - oactivities = ov->v_activities; - activities += oactivities; - if (activities > _voucher_max_activities) { - va_id = _voucher_atm_subid_make(vatm, flags); - goto out; - } - } - va_id = _voucher_atm_subid_make(vatm, flags); - if (activities == 1) { - // consumes vatm reference: - act = _voucher_activity_create_with_atm(_voucher_atm_retain(vatm), - va_id, trace_id, location, NULL); - vat = (_voucher_activity_tracepoint_t)act; - } else if (ov && ov->v_activity) { - act = _voucher_activity_retain(ov->v_activity); - } - pthread_priority_t priority = _voucher_get_priority(ov); - mach_voucher_attr_recipe_size_t extra = ov ? _voucher_extra_size(ov) : 0; - voucher_t v = _voucher_alloc(activities, priority, extra); - if (extra) { - memcpy(_voucher_extra_recipes(v), _voucher_extra_recipes(ov), extra); + ft = _voucher_activity_tracepoint_reserve(stamp, stream, (uint16_t)pubsize, + (uint16_t)privlen, &privptr); + if (!fastpath(ft)) return 0; + pubptr = ft->ft_data; + if (va_id) { + pubptr = _dispatch_memappend(pubptr, &va_id); } - if (ov && ov->v_kvoucher) { - voucher_t kvb = ov->v_kvbase ? ov->v_kvbase : ov; - v->v_kvbase = _voucher_retain(kvb); - v->v_kvoucher = kvb->v_kvoucher; + if (creator_pid) { + pubptr = _dispatch_memappend(pubptr, &creator_pid); } - voucher_activity_id_t *activity_ids = _voucher_activity_ids(v); - if (oactivities) { - memcpy(activity_ids, _voucher_activity_ids(ov), - oactivities * sizeof(voucher_activity_id_t)); - } - activity_ids[activities-1] = va_id; - v->v_atm = _voucher_atm_retain(vatm); - v->v_activity = act; - _voucher_swap(ov, v); - if (vat) return va_id; // new activity buffer contains trace info -out: - _voucher_activity_trace_activity_event(trace_id, va_id, start); - return va_id; -} - -voucher_activity_id_t -voucher_activity_start(voucher_activity_trace_id_t trace_id, - voucher_activity_flag_t flags) -{ - return voucher_activity_start_with_location(trace_id, 0, flags); -} - -void -voucher_activity_end(voucher_activity_id_t va_id) -{ - if (!va_id) return; - _voucher_activity_trace_activity_event(_voucher_activity_trace_id_release, - va_id, end); - voucher_t v = _voucher_get(); - if (!v) return; - unsigned int activities = v->v_activities, act_idx = activities; - voucher_activity_id_t *activity_ids = _voucher_activity_ids(v); - while (act_idx) { - if (activity_ids[act_idx-1] == va_id) break; - act_idx--; - } - if (!act_idx) return; // activity_id not found - pthread_priority_t priority = _voucher_get_priority(v); - mach_voucher_attr_recipe_size_t extra = _voucher_extra_size(v); - voucher_t nv = NULL; - if (act_idx > 1 || activities == 1) --activities; - if (priority || activities || extra || v->v_kvoucher) { - nv = _voucher_alloc(activities, priority, extra); - if (extra) { - memcpy(_voucher_extra_recipes(nv), _voucher_extra_recipes(v),extra); - } - } - if (v->v_kvoucher) { - voucher_t kvb = v->v_kvbase ? v->v_kvbase : v; - nv->v_kvbase = _voucher_retain(kvb); - nv->v_kvoucher = kvb->v_kvoucher; - } - bool atm_collect = !activities; - if (activities) { - voucher_activity_id_t *new_activity_ids = _voucher_activity_ids(nv); - if (act_idx == 1 && _voucher_activity_default) { - atm_collect = true; - // default to _voucher_activity_default base activity - new_activity_ids[0] = _voucher_activity_default->va_id; - memcpy(&new_activity_ids[1], &activity_ids[1], - (activities - 1) * sizeof(voucher_activity_id_t)); - } else { - if (v->v_activity) { - nv->v_activity = _voucher_activity_retain(v->v_activity); - nv->v_atm = _voucher_atm_retain(v->v_atm); - } - memcpy(new_activity_ids, activity_ids, - --act_idx * sizeof(voucher_activity_id_t)); - if (act_idx < activities) { - memcpy(&new_activity_ids[act_idx], &activity_ids[act_idx+1], - (activities - act_idx) * sizeof(voucher_activity_id_t)); - } - } + if (privlen) { + fbc = firehose_buffer_chunk_for_address(ft); + struct firehose_buffer_range_s range = { + .fbr_offset = (uint16_t)(privptr - fbc->fbc_start), + .fbr_length = (uint16_t)privlen, + }; + pubptr = _dispatch_memappend(pubptr, &range); + _dispatch_mempcpy(privptr, privdata, privlen); } - _voucher_swap(v, nv); + _dispatch_mempcpy(pubptr, pubdata, publen); + _voucher_activity_tracepoint_flush(ft, ftid); + return ftid.ftid_value; } -unsigned int -voucher_get_activities(voucher_activity_id_t *entries, unsigned int *count) +firehose_tracepoint_id_t +voucher_activity_trace(firehose_stream_t stream, + firehose_tracepoint_id_t trace_id, uint64_t timestamp, + const void *pubdata, size_t publen) { - voucher_t v = _voucher_get(); - if (!v || !count) return 0; - unsigned int activities = v->v_activities; - if (*count < activities) activities = *count; - *count = v->v_activities; - voucher_activity_id_t *activity_ids = _voucher_activity_ids(v); - if (activities && entries) { - memcpy(entries, activity_ids, activities * - sizeof(voucher_activity_id_t)); - } - return activities; + firehose_tracepoint_id_u ftid = { .ftid_value = trace_id }; + return _voucher_activity_trace(stream, ftid, timestamp, pubdata, publen, + NULL, 0); } -uint8_t -voucher_activity_get_namespace(void) +firehose_tracepoint_id_t +voucher_activity_trace_with_private_strings(firehose_stream_t stream, + firehose_tracepoint_id_t trace_id, uint64_t timestamp, + const void *pubdata, size_t publen, + const void *privdata, size_t privlen) { - voucher_t v = _voucher_get(); - if (!v || !v->v_activity) return 0; - voucher_activity_trace_id_t trace_id = v->v_activity->va_trace_id; - uint8_t cns = (uint8_t)(trace_id >> - _voucher_activity_trace_id_code_namespace_shift); - return cns; -} - -DISPATCH_NOINLINE -_voucher_activity_tracepoint_t -_voucher_activity_buffer_tracepoint_acquire_slow(_voucher_activity_t *vap, - _voucher_activity_buffer_header_t *vabp, unsigned int slots, - size_t strsize, uint16_t *stroffsetp) -{ - _voucher_activity_t act; - _voucher_activity_buffer_header_t vab; - _voucher_activity_tracepoint_t vat = NULL; - voucher_t v = _voucher_get(); - if (v && v->v_activity) { - act = v->v_activity; - } else { - dispatch_once_f(&_voucher_activity_heap_pred, NULL, - _voucher_activity_heap_init); - if (_voucher_activity_disabled()) return NULL; - act = _voucher_activity_default; - } - vab = act->va_current_buffer; - if (act == *vap && vab != *vabp) { - goto retry; // another slowpath raced us - } - do { - vab = _voucher_activity_buffer_alloc(act, vab); - if (!vab) break; -retry: - vat = _voucher_activity_buffer_tracepoint_acquire(vab, slots, strsize, - stroffsetp); - } while (!vat); - *vap = act; - *vabp = vab; - return vat; -} - -static inline void -_voucher_activity_trace_fault(voucher_activity_trace_id_t trace_id) -{ - if (!slowpath(_voucher_activity_trace_id_is_subtype(trace_id, error))) { - return; - } - mach_voucher_attr_command_t atm_cmd = ATM_ACTION_COLLECT; - if (_voucher_activity_trace_id_is_subtype(trace_id, fault)) { - atm_cmd = ATM_ACTION_LOGFAIL; - } - return _voucher_atm_fault(atm_cmd); -} - -uint64_t -voucher_activity_trace(voucher_activity_trace_id_t trace_id, uint64_t location, - void *buffer, size_t length) -{ - if (!_voucher_activity_trace_id_enabled(trace_id)) return 0; - _voucher_activity_t act; - _voucher_activity_buffer_header_t vab; - _voucher_activity_tracepoint_t vat; - const unsigned int slots = length <= sizeof(vat->vat_data) ? 1 : 2; - act = _voucher_activity_get(); - vab = _voucher_activity_buffer_get_from_activity(act); - vat = _voucher_activity_buffer_tracepoint_acquire(vab, slots, 0, NULL); - if (!vat) { - vat = _voucher_activity_buffer_tracepoint_acquire_slow(&act, &vab, - slots, 0, NULL); - } - if (!vat) return 0; - uint64_t timestamp = _voucher_activity_tracepoint_init_with_id(vat, - trace_id, location, true); - void *tbuf = vat->vat_data; - size_t tlen = sizeof(vat->vat_data); - if (length < tlen) { - memcpy(tbuf, buffer, length); - } else { - memcpy(tbuf, buffer, tlen); - } - if (length > tlen) { - vat->vat_flags |= _voucher_activity_trace_flag_wide_first; - buffer += tlen; - length -= tlen; - (++vat)->vat_flags = _voucher_activity_trace_flag_tracepoint | - _voucher_activity_trace_flag_wide_second; - vat->vat_type = 0; vat->vat_namespace = 0; - tbuf = (void*)vat + offsetof(typeof(*vat), vat_code); - tlen = sizeof(*vat) - offsetof(typeof(*vat), vat_code); - if (length < tlen) { - memcpy(tbuf, buffer, length); - } else { - memcpy(tbuf, buffer, tlen); - } - } - _voucher_activity_trace_fault(trace_id); - if (_voucher_activity_buffer_tracepoint_release(vab)) { - _voucher_activity_firehose_push(act, vab); - } - return timestamp; -} - -uint64_t -voucher_activity_trace_strings(voucher_activity_trace_id_t trace_id, - uint64_t location, void *buffer, size_t length, const char *strings[], - size_t string_lengths[], size_t strings_size) -{ - if (!_voucher_activity_trace_id_enabled(trace_id)) return 0; - _voucher_activity_t act; - _voucher_activity_buffer_header_t vab; - _voucher_activity_tracepoint_t vat; - uint16_t offset; - const unsigned int slots = length <= sizeof(vat->vat_data) ? 1 : 2; - strings_size = MIN(strings_size, _voucher_activity_maxsize - - slots * sizeof(struct _voucher_activity_tracepoint_s)); - act = _voucher_activity_get(); - vab = _voucher_activity_buffer_get_from_activity(act); - vat = _voucher_activity_buffer_tracepoint_acquire(vab, slots, strings_size, - &offset); - if (!vat) { - vat = _voucher_activity_buffer_tracepoint_acquire_slow(&act, &vab, - slots, strings_size, &offset); - } - if (!vat) return 0; - uint64_t timestamp = _voucher_activity_tracepoint_init_with_id(vat, - trace_id, location, false); - vat->vat_flags |= _voucher_activity_trace_flag_tracepoint_strings; - vat->vat_stroff.vats_offset = offset; - void *tbuf = vat->vat_stroff.vats_data; - size_t tlen = sizeof(vat->vat_stroff.vats_data); - if (length < tlen) { - memcpy(tbuf, buffer, length); - } else { - memcpy(tbuf, buffer, tlen); - } - if (length > tlen) { - vat->vat_flags |= _voucher_activity_trace_flag_wide_first; - buffer += tlen; - length -= tlen; - (++vat)->vat_flags = _voucher_activity_trace_flag_tracepoint | - _voucher_activity_trace_flag_wide_second; - vat->vat_type = 0; vat->vat_namespace = 0; - tbuf = (void*)vat + offsetof(typeof(*vat), vat_code); - tlen = sizeof(*vat) - offsetof(typeof(*vat), vat_code); - if (length < tlen) { - memcpy(tbuf, buffer, length); - } else { - memcpy(tbuf, buffer, tlen); - } - } - const uint16_t offsetend = offset - (uint16_t)strings_size; - char *b = (char*)vab + _voucher_activity_buffer_size; - int i = 0; - while (offset > offsetend && strings[i]) { - size_t maxsize = MIN(string_lengths[i] + 1, offset - offsetend); - size_t len = strlcpy(b - offset, strings[i++], maxsize); - offset -= MIN(len + 1, maxsize); - } - _voucher_activity_trace_fault(trace_id); - if (_voucher_activity_buffer_tracepoint_release(vab)) { - _voucher_activity_firehose_push(act, vab); - } - return timestamp; -} - -uint64_t -voucher_activity_trace_args(voucher_activity_trace_id_t trace_id, - uint64_t location, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, - uintptr_t arg4) -{ - if (!_voucher_activity_trace_id_enabled(trace_id)) return 0; - _voucher_activity_t act; - _voucher_activity_buffer_header_t vab; - _voucher_activity_tracepoint_t vat; - act = _voucher_activity_get(); - vab = _voucher_activity_buffer_get_from_activity(act); - vat = _voucher_activity_buffer_tracepoint_acquire(vab, 1, 0, NULL); - if (!vat) { - vat = _voucher_activity_buffer_tracepoint_acquire_slow(&act, &vab, 1, - 0, NULL); - } - if (!vat) return 0; - uint64_t timestamp = _voucher_activity_tracepoint_init_with_id(vat, - trace_id, location, true); - vat->vat_flags |= _voucher_activity_trace_flag_tracepoint_args; - vat->vat_data[0] = arg1; - vat->vat_data[1] = arg2; - vat->vat_data[2] = arg3; - vat->vat_data[3] = arg4; - _voucher_activity_trace_fault(trace_id); - if (_voucher_activity_buffer_tracepoint_release(vab)) { - _voucher_activity_firehose_push(act, vab); - } - return timestamp; + firehose_tracepoint_id_u ftid = { .ftid_value = trace_id }; + return _voucher_activity_trace(stream, ftid, timestamp, + pubdata, publen, privdata, privlen); } #pragma mark - @@ -2312,40 +1374,27 @@ _voucher_debug(voucher_t v, char* buf, size_t bufsiz) size_t offset = 0; #define bufprintf(...) \ offset += dsnprintf(&buf[offset], bufsiz - offset, ##__VA_ARGS__) - bufprintf("voucher[%p] = { xrefcnt = 0x%x, refcnt = 0x%x, ", v, + bufprintf("voucher[%p] = { xrefcnt = 0x%x, refcnt = 0x%x", v, v->os_obj_xref_cnt + 1, v->os_obj_ref_cnt + 1); if (v->v_kvbase) { - bufprintf("base voucher %p, ", v->v_kvbase); + bufprintf(", base voucher %p", v->v_kvbase); } if (v->v_kvoucher) { - bufprintf("kvoucher%s 0x%x, ", v->v_kvoucher == v->v_ipc_kvoucher ? + bufprintf(", kvoucher%s 0x%x", v->v_kvoucher == v->v_ipc_kvoucher ? " & ipc kvoucher" : "", v->v_kvoucher); } if (v->v_ipc_kvoucher && v->v_ipc_kvoucher != v->v_kvoucher) { - bufprintf("ipc kvoucher 0x%x, ", v->v_ipc_kvoucher); + bufprintf(", ipc kvoucher 0x%x", v->v_ipc_kvoucher); } - if (v->v_has_priority) { - bufprintf("QOS 0x%x, ", *_voucher_priority(v)); - } - if (v->v_activities) { - voucher_activity_id_t *activity_ids = _voucher_activity_ids(v); - bufprintf("activity IDs = { "); - unsigned int i; - for (i = 0; i < v->v_activities; i++) { - bufprintf("0x%llx, ", *activity_ids++); - } - bufprintf("}, "); + if (v->v_priority) { + bufprintf(", QOS 0x%x", v->v_priority); } if (v->v_activity) { - _voucher_activity_t va = v->v_activity; - _voucher_atm_t vatm = va->va_atm; - bufprintf("activity[%p] = { ID 0x%llx, ref %d, atm[%p] = { " - "AID 0x%llx, ref %d, kvoucher 0x%x } }, ", va, va->va_id, - va->va_refcnt + 1, va->va_atm, vatm->vatm_id, - vatm->vatm_refcnt + 1, vatm->vatm_kvoucher); - } - bufprintf("}"); + bufprintf(", activity 0x%llx (pid: 0x%16llx, parent 0x%llx)", + v->v_activity, v->v_activity_creator, v->v_parent_activity); + } + bufprintf(" }"); return offset; } @@ -2416,6 +1465,13 @@ _voucher_dealloc_mach_voucher(mach_voucher_t kv) (void)kv; } +mach_voucher_t +_voucher_get_mach_voucher(voucher_t voucher) +{ + (void)voucher; + return MACH_VOUCHER_NULL; +} + mach_voucher_t _voucher_create_mach_voucher_with_priority(voucher_t voucher, pthread_priority_t priority) @@ -2467,101 +1523,113 @@ _voucher_dispose(voucher_t voucher) (void)voucher; } -void -_voucher_atfork_child(void) +#if VOUCHER_EXPORT_PERSONA_SPI +uid_t +voucher_get_current_persona(void) { + return PERSONA_ID_NONE; } -void -_voucher_init(void) +int +voucher_get_current_persona_originator_info(struct proc_persona_info *persona_info) { + (void)persona_info; + return -1; } -void* -voucher_activity_get_metadata_buffer(size_t *length) +int +voucher_get_current_persona_proximate_info(struct proc_persona_info *persona_info) { - *length = 0; - return NULL; + (void)persona_info; + return -1; } +#endif void -voucher_activity_buffer_hook_install_4libtrace( - _voucher_activity_buffer_hook_t hook) +_voucher_activity_debug_channel_init(void) { - (void)hook; } void -_voucher_activity_heap_pressure_normal(void) +_voucher_atfork_child(void) { } void -_voucher_activity_heap_pressure_warn(void) +_voucher_init(void) { } -voucher_activity_id_t -voucher_activity_start_with_location(voucher_activity_trace_id_t trace_id, - uint64_t location, voucher_activity_flag_t flags) +void* +voucher_activity_get_metadata_buffer(size_t *length) { - (void)trace_id; (void)location; (void)flags; - return 0; + *length = 0; + return NULL; } -voucher_activity_id_t -voucher_activity_start(voucher_activity_trace_id_t trace_id, - voucher_activity_flag_t flags) +voucher_t +voucher_activity_create(firehose_tracepoint_id_t trace_id, + voucher_t base, firehose_activity_flags_t flags, uint64_t location) { - (void)trace_id; (void)flags; - return 0; + (void)trace_id; (void)base; (void)flags; (void)location; + return NULL; } -void -voucher_activity_end(voucher_activity_id_t activity_id) +voucher_t +voucher_activity_create_with_location(firehose_tracepoint_id_t *trace_id, + voucher_t base, firehose_activity_flags_t flags, uint64_t location) { - (void)activity_id; + (void)trace_id; (void)base; (void)flags; (void)location; + return NULL; } -unsigned int -voucher_get_activities(voucher_activity_id_t *entries, unsigned int *count) +firehose_activity_id_t +voucher_get_activity_id(voucher_t voucher, firehose_activity_id_t *parent_id) { - (void)entries; (void)count; + (void)voucher; (void)parent_id; return 0; } -uint8_t -voucher_activity_get_namespace(void) +firehose_activity_id_t +voucher_get_activity_id_and_creator(voucher_t voucher, uint64_t *creator_pid, + firehose_activity_id_t *parent_id) { + if (creator_pid) *creator_pid = 0; + (void)voucher; (void)parent_id; return 0; } -uint64_t -voucher_activity_trace(voucher_activity_trace_id_t trace_id, uint64_t location, - void *buffer, size_t length) +firehose_tracepoint_id_t +voucher_activity_trace(firehose_stream_t stream, + firehose_tracepoint_id_t trace_id, uint64_t timestamp, + const void *pubdata, size_t publen) { - (void)trace_id; (void)location; (void)buffer; (void)length; + (void)stream; (void)trace_id; (void)timestamp; + (void)pubdata; (void)publen; return 0; } -uint64_t -voucher_activity_trace_strings(voucher_activity_trace_id_t trace_id, - uint64_t location, void *buffer, size_t length, const char *strings[], - size_t string_lengths[], size_t strings_size) +firehose_tracepoint_id_t +voucher_activity_trace_with_private_strings(firehose_stream_t stream, + firehose_tracepoint_id_t trace_id, uint64_t timestamp, + const void *pubdata, size_t publen, + const void *privdata, size_t privlen) { - (void)trace_id; (void)location; (void)buffer; (void)length; (void)strings; - (void)string_lengths; (void)strings_size; + (void)stream; (void)trace_id; (void)timestamp; + (void)pubdata; (void)publen; (void)privdata; (void)privlen; return 0; } -uint64_t -voucher_activity_trace_args(voucher_activity_trace_id_t trace_id, - uint64_t location, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, - uintptr_t arg4) +void +voucher_activity_flush(firehose_stream_t stream) { - (void)trace_id; (void)location; - (void)arg1; (void)arg2; (void)arg3; (void)arg4; - return 0; + (void)stream; +} + +void +voucher_activity_initialize_4libtrace(voucher_activity_hooks_t hooks) +{ + (void)hooks; } size_t @@ -2572,3 +1640,17 @@ _voucher_debug(voucher_t v, char* buf, size_t bufsiz) } #endif // VOUCHER_USE_MACH_VOUCHER + +#else // DISPATCH_VARIANT_DYLD_STUB + +firehose_activity_id_t +voucher_get_activity_id_4dyld(void) +{ +#if VOUCHER_USE_MACH_VOUCHER + return _voucher_get_activity_id(_voucher_get(), NULL); +#else + return 0; +#endif +} + +#endif // DISPATCH_VARIANT_DYLD_STUB diff --git a/src/voucher_internal.h b/src/voucher_internal.h index cc5ae2298..3aa1a6579 100644 --- a/src/voucher_internal.h +++ b/src/voucher_internal.h @@ -40,12 +40,7 @@ * @group Voucher Creation SPI * SPI intended for clients that need to create vouchers. */ - -#if OS_OBJECT_USE_OBJC -OS_OBJECT_DECL(voucher_recipe); -#else -typedef struct voucher_recipe_s *voucher_recipe_t; -#endif +OS_OBJECT_DECL_CLASS(voucher_recipe); /*! * @function voucher_create @@ -92,29 +87,11 @@ voucher_get_mach_voucher(voucher_t voucher); #pragma mark - #pragma mark voucher_t -#if TARGET_IPHONE_SIMULATOR && \ - IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 101100 -#undef VOUCHER_USE_MACH_VOUCHER -#define VOUCHER_USE_MACH_VOUCHER 0 -#endif -#ifndef VOUCHER_USE_MACH_VOUCHER -#if __has_include() -#define VOUCHER_USE_MACH_VOUCHER 1 -#endif -#endif - -#if VOUCHER_USE_MACH_VOUCHER -#undef DISPATCH_USE_IMPORTANCE_ASSERTION -#define DISPATCH_USE_IMPORTANCE_ASSERTION 0 -#else -#undef MACH_RCV_VOUCHER -#define MACH_RCV_VOUCHER 0 -#endif // VOUCHER_USE_MACH_VOUCHER - void _voucher_init(void); void _voucher_atfork_child(void); -void _voucher_activity_heap_pressure_warn(void); -void _voucher_activity_heap_pressure_normal(void); +void _voucher_activity_debug_channel_init(void); +void _voucher_activity_swap(firehose_activity_id_t old_id, + firehose_activity_id_t new_id); void _voucher_xref_dispose(voucher_t voucher); void _voucher_dispose(voucher_t voucher); size_t _voucher_debug(voucher_t v, char* buf, size_t bufsiz); @@ -128,21 +105,13 @@ voucher_t _voucher_create_with_priority_and_mach_voucher(voucher_t voucher, pthread_priority_t priority, mach_voucher_t kv); void _voucher_dealloc_mach_voucher(mach_voucher_t kv); -#if OS_OBJECT_USE_OBJC -_OS_OBJECT_DECL_SUBCLASS_INTERFACE(voucher, object) #if VOUCHER_ENABLE_RECIPE_OBJECTS _OS_OBJECT_DECL_SUBCLASS_INTERFACE(voucher_recipe, object) #endif -#endif voucher_t voucher_retain(voucher_t voucher); void voucher_release(voucher_t voucher); -#define _TAILQ_IS_ENQUEUED(elm, field) \ - ((elm)->field.tqe_prev != NULL) -#define _TAILQ_MARK_NOT_ENQUEUED(elm, field) \ - do { (elm)->field.tqe_prev = NULL; } while (0) - #define VOUCHER_NO_MACH_VOUCHER MACH_PORT_DEAD #if VOUCHER_USE_MACH_VOUCHER @@ -152,29 +121,69 @@ void voucher_release(voucher_t voucher); #define DISPATCH_VOUCHER_ACTIVITY_DEBUG 1 #endif +#if VOUCHER_USE_MACH_VOUCHER_PRIORITY +#include +#endif + +typedef uint32_t _voucher_magic_t; +typedef uint32_t _voucher_priority_t; + +#define VOUCHER_MAGIC_V3 ((_voucher_magic_t)0x0390cefa) // FACE9003 + +typedef struct _voucher_mach_udata_s { + _voucher_magic_t vmu_magic; + _voucher_priority_t vmu_priority; + uint8_t _vmu_after_priority[0]; + firehose_activity_id_t vmu_activity; + uint64_t vmu_activity_pid; + firehose_activity_id_t vmu_parent_activity; + uint8_t _vmu_after_activity[0]; +} _voucher_mach_udata_s; + +OS_ENUM(voucher_fields, uint16_t, + VOUCHER_FIELD_NONE = 0, + VOUCHER_FIELD_KVOUCHER = 1u << 0, + VOUCHER_FIELD_PRIORITY = 1u << 1, + VOUCHER_FIELD_ACTIVITY = 1u << 2, + +#if VOUCHER_ENABLE_RECIPE_OBJECTS + VOUCHER_FIELD_EXTRA = 1u << 15, +#else + VOUCHER_FIELD_EXTRA = 0, +#endif +); + typedef struct voucher_s { _OS_OBJECT_HEADER( - void *os_obj_isa, + struct voucher_vtable_s *os_obj_isa, os_obj_ref_cnt, os_obj_xref_cnt); TAILQ_ENTRY(voucher_s) v_list; mach_voucher_t v_kvoucher, v_ipc_kvoucher; // if equal, only one reference voucher_t v_kvbase; // if non-NULL, v_kvoucher is a borrowed reference - struct _voucher_atm_s *v_atm; - struct _voucher_activity_s *v_activity; + firehose_activity_id_t v_activity; + uint64_t v_activity_creator; + firehose_activity_id_t v_parent_activity; + _voucher_priority_t v_priority; + unsigned int v_kv_has_importance:1; #if VOUCHER_ENABLE_RECIPE_OBJECTS size_t v_recipe_extra_offset; mach_voucher_attr_recipe_size_t v_recipe_extra_size; #endif - unsigned int v_has_priority:1; - unsigned int v_activities; - mach_voucher_attr_recipe_data_t v_recipes[]; } voucher_s; +#if VOUCHER_ENABLE_RECIPE_OBJECTS +#define _voucher_extra_size(v) ((v)->v_recipe_extra_size) +#define _voucher_extra_recipes(v) ((char*)(v) + (v)->v_recipe_extra_offset) +#else +#define _voucher_extra_size(v) 0 +#define _voucher_extra_recipes(v) NULL +#endif + #if VOUCHER_ENABLE_RECIPE_OBJECTS typedef struct voucher_recipe_s { _OS_OBJECT_HEADER( - const _os_object_class_s *os_obj_isa, + const _os_object_vtable_s *os_obj_isa, os_obj_ref_cnt, os_obj_xref_cnt); size_t vr_allocation_size; @@ -183,14 +192,6 @@ typedef struct voucher_recipe_s { } voucher_recipe_s; #endif -#define _voucher_recipes_base(r) (r[0]) -#define _voucher_recipes_atm(r) (r[1]) -#define _voucher_recipes_bits(r) (r[2]) -#define _voucher_base_recipe(v) (_voucher_recipes_base((v)->v_recipes)) -#define _voucher_atm_recipe(v) (_voucher_recipes_atm((v)->v_recipes)) -#define _voucher_bits_recipe(v) (_voucher_recipes_bits((v)->v_recipes)) -#define _voucher_recipes_size() (3 * sizeof(mach_voucher_attr_recipe_data_t)) - #if TARGET_OS_EMBEDDED #define VL_HASH_SIZE 64u // must be a power of two #else @@ -198,44 +199,24 @@ typedef struct voucher_recipe_s { #endif #define VL_HASH(kv) (MACH_PORT_INDEX(kv) & (VL_HASH_SIZE - 1)) -typedef uint32_t _voucher_magic_t; -const _voucher_magic_t _voucher_magic_v1 = 0x0190cefa; // little-endian FACE9001 -#define _voucher_recipes_magic(r) ((_voucher_magic_t*) \ - (_voucher_recipes_bits(r).content)) -#define _voucher_magic(v) _voucher_recipes_magic((v)->v_recipes) -typedef uint32_t _voucher_priority_t; -#define _voucher_recipes_priority(r) ((_voucher_priority_t*) \ - (_voucher_recipes_bits(r).content + sizeof(_voucher_magic_t))) -#define _voucher_priority(v) _voucher_recipes_priority((v)->v_recipes) -#define _voucher_activity_ids(v) ((voucher_activity_id_t*) \ - (_voucher_bits_recipe(v).content + sizeof(_voucher_magic_t) + \ - sizeof(_voucher_priority_t))) -#define _voucher_bits_size(activities) \ - (sizeof(_voucher_magic_t) + sizeof(_voucher_priority_t) + \ - (activities) * sizeof(voucher_activity_id_t)) - -#if VOUCHER_ENABLE_RECIPE_OBJECTS -#define _voucher_extra_size(v) ((v)->v_recipe_extra_size) -#define _voucher_extra_recipes(v) ((char*)(v) + (v)->v_recipe_extra_offset) -#else -#define _voucher_extra_size(v) 0 -#define _voucher_extra_recipes(v) NULL -#endif - #if DISPATCH_DEBUG && DISPATCH_VOUCHER_DEBUG #define _dispatch_voucher_debug(msg, v, ...) \ _dispatch_debug("voucher[%p]: " msg, v, ##__VA_ARGS__) #define _dispatch_kvoucher_debug(msg, kv, ...) \ _dispatch_debug("kvoucher[0x%08x]: " msg, kv, ##__VA_ARGS__) +#if DISPATCH_MACHPORT_DEBUG #define _dispatch_voucher_debug_machport(name) \ dispatch_debug_machport((name), __func__) #else +#define _dispatch_voucher_debug_machport(name) ((void)(name)) +#endif +#else #define _dispatch_voucher_debug(msg, v, ...) #define _dispatch_kvoucher_debug(msg, kv, ...) #define _dispatch_voucher_debug_machport(name) ((void)(name)) #endif -#if !(USE_OBJC && __OBJC2__) && !defined(__cplusplus) +#if DISPATCH_PURE_C DISPATCH_ALWAYS_INLINE static inline voucher_t @@ -244,11 +225,10 @@ _voucher_retain(voucher_t voucher) #if !DISPATCH_VOUCHER_OBJC_DEBUG // not using _os_object_refcnt* because we don't need barriers: // vouchers are immutable and are in a hash table with a lock - int xref_cnt = dispatch_atomic_inc2o(voucher, os_obj_xref_cnt, relaxed); + int xref_cnt = os_atomic_inc2o(voucher, os_obj_xref_cnt, relaxed); _dispatch_voucher_debug("retain -> %d", voucher, xref_cnt + 1); - if (slowpath(xref_cnt <= 0)) { - _dispatch_voucher_debug("resurrection", voucher); - DISPATCH_CRASH("Voucher resurrection"); + if (unlikely(xref_cnt <= 0)) { + _OS_OBJECT_CLIENT_CRASH("Voucher resurrection"); } #else os_retain(voucher); @@ -265,14 +245,13 @@ _voucher_release(voucher_t voucher) #if !DISPATCH_VOUCHER_OBJC_DEBUG // not using _os_object_refcnt* because we don't need barriers: // vouchers are immutable and are in a hash table with a lock - int xref_cnt = dispatch_atomic_dec2o(voucher, os_obj_xref_cnt, relaxed); + int xref_cnt = os_atomic_dec2o(voucher, os_obj_xref_cnt, relaxed); _dispatch_voucher_debug("release -> %d", voucher, xref_cnt + 1); - if (fastpath(xref_cnt >= 0)) { + if (likely(xref_cnt >= 0)) { return; } - if (slowpath(xref_cnt < -1)) { - _dispatch_voucher_debug("overrelease", voucher); - DISPATCH_CRASH("Voucher overrelease"); + if (unlikely(xref_cnt < -1)) { + _OS_OBJECT_CLIENT_CRASH("Voucher over-release"); } return _os_object_xref_dispose((_os_object_t)voucher); #else @@ -281,6 +260,25 @@ _voucher_release(voucher_t voucher) #endif // DISPATCH_DEBUG } +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_release_no_dispose(voucher_t voucher) +{ +#if !DISPATCH_VOUCHER_OBJC_DEBUG + // not using _os_object_refcnt* because we don't need barriers: + // vouchers are immutable and are in a hash table with a lock + int xref_cnt = os_atomic_dec2o(voucher, os_obj_xref_cnt, relaxed); + _dispatch_voucher_debug("release -> %d", voucher, xref_cnt + 1); + if (likely(xref_cnt >= 0)) { + return; + } + _OS_OBJECT_CLIENT_CRASH("Voucher over-release"); +#else + _dispatch_voucher_debug("release -> %d", voucher, voucher->os_obj_xref_cnt); + return os_release(voucher); +#endif // DISPATCH_DEBUG +} + DISPATCH_ALWAYS_INLINE static inline voucher_t _voucher_get(void) @@ -311,7 +309,7 @@ static inline void _voucher_mach_voucher_set(mach_voucher_t kv) { if (kv == VOUCHER_NO_MACH_VOUCHER) return; - _dispatch_set_priority_and_mach_voucher(0, kv); + _dispatch_set_priority_and_mach_voucher_slow(0, kv); } DISPATCH_ALWAYS_INLINE @@ -323,17 +321,12 @@ _voucher_swap_and_get_mach_voucher(voucher_t ov, voucher_t voucher) _dispatch_thread_setspecific(dispatch_voucher_key, voucher); mach_voucher_t kv = voucher ? voucher->v_kvoucher : MACH_VOUCHER_NULL; mach_voucher_t okv = ov ? ov->v_kvoucher : MACH_VOUCHER_NULL; + firehose_activity_id_t aid = voucher ? voucher->v_activity : 0; + firehose_activity_id_t oaid = ov ? ov->v_activity : 0; + if (aid != oaid) _voucher_activity_swap(aid, oaid); return (kv != okv) ? kv : VOUCHER_NO_MACH_VOUCHER; } -DISPATCH_ALWAYS_INLINE -static inline void -_voucher_swap(voucher_t ov, voucher_t voucher) -{ - _voucher_mach_voucher_set(_voucher_swap_and_get_mach_voucher(ov, voucher)); - if (ov) _voucher_release(ov); -} - DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT static inline voucher_t _voucher_adopt(voucher_t voucher) @@ -347,8 +340,8 @@ DISPATCH_ALWAYS_INLINE static inline void _voucher_replace(voucher_t voucher) { - voucher_t ov = _voucher_get(); - _voucher_swap(ov, voucher); + voucher_t ov = _voucher_adopt(voucher); + if (ov) _voucher_release(ov); } DISPATCH_ALWAYS_INLINE @@ -360,16 +353,27 @@ _voucher_clear(void) DISPATCH_ALWAYS_INLINE static inline pthread_priority_t -_voucher_get_priority(voucher_t voucher) +_voucher_get_priority(voucher_t v) +{ + return v ? (pthread_priority_t)v->v_priority : 0; +} + +DISPATCH_ALWAYS_INLINE +static inline firehose_activity_id_t +_voucher_get_activity_id(voucher_t v, uint64_t *creator_pid) { - return voucher && voucher->v_has_priority ? - (pthread_priority_t)*_voucher_priority(voucher) : 0; + if (creator_pid) *creator_pid = v ? v->v_activity_creator : 0; + return v ? v->v_activity : 0; } void _voucher_task_mach_voucher_init(void* ctxt); extern dispatch_once_t _voucher_task_mach_voucher_pred; extern mach_voucher_t _voucher_task_mach_voucher; - +#if VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER +#define _voucher_default_task_mach_voucher MACH_VOUCHER_NULL +#else +extern mach_voucher_t _voucher_default_task_mach_voucher; +#endif DISPATCH_ALWAYS_INLINE static inline mach_voucher_t _voucher_get_task_mach_voucher(void) @@ -411,12 +415,17 @@ _voucher_mach_msg_set(mach_msg_header_t *msg, voucher_t voucher) DISPATCH_ALWAYS_INLINE static inline mach_voucher_t -_voucher_mach_msg_get(mach_msg_header_t *msg) +_voucher_mach_msg_get(mach_msg_header_t *msg, mach_msg_bits_t *msgh_bits) { - if (!MACH_MSGH_BITS_HAS_VOUCHER(msg->msgh_bits)) return MACH_VOUCHER_NULL; + if (!MACH_MSGH_BITS_HAS_VOUCHER(msg->msgh_bits)) { + *msgh_bits = 0; + return MACH_VOUCHER_NULL; + } mach_voucher_t kv = msg->msgh_voucher_port; msg->msgh_voucher_port = MACH_VOUCHER_NULL; - msg->msgh_bits &= (mach_msg_bits_t)~MACH_MSGH_BITS_VOUCHER_MASK; + mach_msg_bits_t mask = MACH_MSGH_BITS_VOUCHER_MASK|MACH_MSGH_BITS_RAISEIMP; + *msgh_bits = msg->msgh_bits & mask; + msg->msgh_bits &= ~mask; return kv; } @@ -449,440 +458,111 @@ _voucher_mach_msg_clear(mach_msg_header_t *msg, bool move_send) #pragma mark - #pragma mark dispatch_continuation_t + voucher_t -#if DISPATCH_USE_KDEBUG_TRACE +#if DISPATCH_USE_VOUCHER_KDEBUG_TRACE +#define DISPATCH_VOUCHER_CODE(code) DISPATCH_CODE(VOUCHER, code) +#else +#define DISPATCH_VOUCHER_CODE(code) 0 +#endif // DISPATCH_USE_VOUCHER_KDEBUG_TRACE + +#define DISPATCH_TRACE_VOUCHER_DC_PUSH DISPATCH_VOUCHER_CODE(0x1) +#define DISPATCH_TRACE_VOUCHER_DC_POP DISPATCH_VOUCHER_CODE(0x2) +#define DISPATCH_TRACE_VOUCHER_DMSG_PUSH DISPATCH_VOUCHER_CODE(0x3) +#define DISPATCH_TRACE_VOUCHER_DMSG_POP DISPATCH_VOUCHER_CODE(0x4) +#define DISPATCH_TRACE_VOUCHER_ACTIVITY_ADOPT DISPATCH_VOUCHER_CODE(0x5) + DISPATCH_ALWAYS_INLINE static inline void -_dispatch_voucher_ktrace(int code, natural_t voucher, void *container) +_dispatch_voucher_ktrace(uint32_t code, voucher_t v, const void *container) { - if (!voucher) return; - __kdebug_trace(APPSDBG_CODE(DBG_MACH_CHUD, (0xfac >> 2)) | DBG_FUNC_NONE, - code, (int)voucher, (int)(uintptr_t)container, -#ifdef __LP64__ - (int)((uintptr_t)container >> 32) -#else - 0 -#endif - ); + if (v == DISPATCH_NO_VOUCHER) return; + natural_t voucher = v ? v->v_kvoucher : MACH_VOUCHER_NULL; + _dispatch_ktrace2(code, voucher, (uintptr_t)container); } +#define _dispatch_voucher_ktrace(code, v, container) \ + _dispatch_voucher_ktrace(DISPATCH_TRACE_VOUCHER_##code, v, container) #define _dispatch_voucher_ktrace_dc_push(dc) \ - _dispatch_voucher_ktrace(0x1, (dc)->dc_voucher ? \ - (dc)->dc_voucher->v_kvoucher : MACH_VOUCHER_NULL, (dc)) -#define _dispatch_voucher_ktrace_dc_pop(dc) \ - _dispatch_voucher_ktrace(0x2, (dc)->dc_voucher ? \ - (dc)->dc_voucher->v_kvoucher : MACH_VOUCHER_NULL, (dc)) + _dispatch_voucher_ktrace(DC_PUSH, (dc)->dc_voucher, (dc)) +#define _dispatch_voucher_ktrace_dc_pop(dc, v) \ + _dispatch_voucher_ktrace(DC_POP, v, (dc)) #define _dispatch_voucher_ktrace_dmsg_push(dmsg) \ - _dispatch_voucher_ktrace(0x3, (dmsg)->dmsg_voucher ? \ - (dmsg)->dmsg_voucher->v_kvoucher : MACH_VOUCHER_NULL, (dmsg)) + _dispatch_voucher_ktrace(DMSG_PUSH, (dmsg)->dmsg_voucher, (dmsg)) #define _dispatch_voucher_ktrace_dmsg_pop(dmsg) \ - _dispatch_voucher_ktrace(0x4, (dmsg)->dmsg_voucher ? \ - (dmsg)->dmsg_voucher->v_kvoucher : MACH_VOUCHER_NULL, (dmsg)) -#else -#define _dispatch_voucher_ktrace_dc_push(dc) -#define _dispatch_voucher_ktrace_dc_pop(dc) -#define _dispatch_voucher_ktrace_dmsg_push(dmsg) -#define _dispatch_voucher_ktrace_dmsg_pop(dmsg) -#endif // DISPATCH_USE_KDEBUG_TRACE + _dispatch_voucher_ktrace(DMSG_POP, (dmsg)->dmsg_voucher, (dmsg)) +#define _dispatch_voucher_ktrace_activity_adopt(aid) \ + _dispatch_ktrace1(DISPATCH_TRACE_VOUCHER_ACTIVITY_ADOPT, aid); DISPATCH_ALWAYS_INLINE static inline void _dispatch_continuation_voucher_set(dispatch_continuation_t dc, - dispatch_block_flags_t flags) + dispatch_queue_class_t dqu, dispatch_block_flags_t flags) { - unsigned long bits = (unsigned long)dc->do_vtable; voucher_t v = NULL; - if (flags & DISPATCH_BLOCK_HAS_VOUCHER) { - bits |= DISPATCH_OBJ_HAS_VOUCHER_BIT; + // _dispatch_continuation_voucher_set is never called for blocks with + // private data or with the DISPATCH_BLOCK_HAS_VOUCHER flag set. + // only _dispatch_continuation_init_slow handles this bit. + dispatch_assert(!(flags & DISPATCH_BLOCK_HAS_VOUCHER)); + + if (dqu._oq->oq_override_voucher != DISPATCH_NO_VOUCHER) { + // if the queue has an override voucher, we should not capture anything + // + // if the continuation is enqueued before the queue is activated, then + // this optimization fails and we do capture whatever is current + // + // _dispatch_continuation_voucher_adopt() would do the right thing + // but using DISPATCH_NO_VOUCHER here is more efficient. + v = DISPATCH_NO_VOUCHER; } else if (!(flags & DISPATCH_BLOCK_NO_VOUCHER)) { v = _voucher_copy(); } - dc->do_vtable = (void*)bits; dc->dc_voucher = v; _dispatch_voucher_debug("continuation[%p] set", dc->dc_voucher, dc); _dispatch_voucher_ktrace_dc_push(dc); } -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_continuation_voucher_adopt(dispatch_continuation_t dc) -{ - unsigned long bits = (unsigned long)dc->do_vtable; - voucher_t v = DISPATCH_NO_VOUCHER; - if (!(bits & DISPATCH_OBJ_HAS_VOUCHER_BIT)) { - _dispatch_voucher_ktrace_dc_pop(dc); - _dispatch_voucher_debug("continuation[%p] adopt", dc->dc_voucher, dc); - v = dc->dc_voucher; - dc->dc_voucher = NULL; - } - _dispatch_adopt_priority_and_replace_voucher(dc->dc_priority, v, 0); -} - -#pragma mark - -#pragma mark _voucher_activity_heap - -typedef uint32_t _voucher_atm_subid_t; -static const size_t _voucher_activity_hash_bits = 6; -static const size_t _voucher_activity_hash_size = - 1 << _voucher_activity_hash_bits; -#define VACTID_HASH(x) \ - (((uint32_t)(x) * 2654435761u) >> (32-_voucher_activity_hash_bits)) -#define VATMID_HASH(x) \ - (((uint32_t)(x) * 2654435761u) >> (32-_voucher_activity_hash_bits)) -#define VATMID2ACTID(x, flags) \ - (((voucher_activity_id_t)(x) & 0xffffffffffffff) | \ - (((voucher_activity_id_t)(flags) & 0xfe) << 55)) - -typedef struct _voucher_activity_metadata_s { - _voucher_activity_buffer_t vam_client_metadata; - struct _voucher_activity_metadata_opaque_s *vasm_baseaddr; - _voucher_activity_bitmap_t volatile vam_buffer_bitmap; - _voucher_activity_bitmap_t volatile vam_pressure_locked_bitmap; - _voucher_activity_lock_s vam_atms_lock; - _voucher_activity_lock_s vam_activities_lock; - TAILQ_HEAD(, _voucher_atm_s) vam_atms[_voucher_activity_hash_size]; - TAILQ_HEAD(, _voucher_activity_s) - vam_activities[_voucher_activity_hash_size]; -} *_voucher_activity_metadata_t; - -#pragma mark - -#pragma mark _voucher_atm_t - -typedef struct _voucher_atm_s { - int32_t volatile vatm_refcnt; - mach_voucher_t vatm_kvoucher; - atm_aid_t vatm_id; - atm_guard_t vatm_generation; - TAILQ_ENTRY(_voucher_atm_s) vatm_list; -#if __LP64__ - uintptr_t vatm_pad[3]; - // cacheline -#endif -} *_voucher_atm_t; - -extern _voucher_atm_t _voucher_task_atm; - -#pragma mark - -#pragma mark _voucher_activity_t - -typedef struct _voucher_activity_s { - voucher_activity_id_t va_id; - voucher_activity_trace_id_t va_trace_id; - uint64_t va_location; - int32_t volatile va_refcnt; - uint32_t volatile va_buffer_count; - uint32_t va_buffer_limit; - _voucher_activity_buffer_header_t volatile va_current_buffer; - _voucher_atm_t va_atm; -#if __LP64__ - uint64_t va_unused; -#endif - // cacheline - _voucher_activity_lock_s va_buffers_lock; - TAILQ_HEAD(_voucher_activity_buffer_list_s, - _voucher_activity_buffer_header_s) va_buffers; - TAILQ_ENTRY(_voucher_activity_s) va_list; - TAILQ_ENTRY(_voucher_activity_s) va_atm_list; - TAILQ_ENTRY(_voucher_activity_s) va_atm_used_list; - pthread_mutex_t va_mutex; - pthread_cond_t va_cond; -} *_voucher_activity_t; - -_voucher_activity_tracepoint_t _voucher_activity_buffer_tracepoint_acquire_slow( - _voucher_activity_t *vap, _voucher_activity_buffer_header_t *vabp, - unsigned int slots, size_t strsize, uint16_t *stroffsetp); -void _voucher_activity_firehose_push(_voucher_activity_t act, - _voucher_activity_buffer_header_t buffer); -extern _voucher_activity_t _voucher_activity_default; -extern voucher_activity_mode_t _voucher_activity_mode; - -#if DISPATCH_DEBUG && DISPATCH_VOUCHER_ACTIVITY_DEBUG -#define _dispatch_voucher_activity_debug(msg, act, ...) \ - _dispatch_debug("activity[%p] <0x%llx>: atm[%p] <%lld>: " msg, (act), \ - (act) ? (act)->va_id : 0, (act) ? (act)->va_atm : NULL, \ - (act) && (act)->va_atm ? (act)->va_atm->vatm_id : 0, ##__VA_ARGS__) -#define _dispatch_voucher_atm_debug(msg, atm, ...) \ - _dispatch_debug("atm[%p] <%lld> kvoucher[0x%08x]: " msg, (atm), \ - (atm) ? (atm)->vatm_id : 0, (atm) ? (atm)->vatm_kvoucher : 0, \ - ##__VA_ARGS__) -#else -#define _dispatch_voucher_activity_debug(msg, act, ...) -#define _dispatch_voucher_atm_debug(msg, atm, ...) -#endif - -DISPATCH_ALWAYS_INLINE -static inline uint64_t -_voucher_activity_timestamp(bool approx) -{ -#if TARGET_IPHONE_SIMULATOR && \ - IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 101000 - (void)approx; - return mach_absolute_time(); -#else - return approx ? mach_approximate_time() : mach_absolute_time(); -#endif -} - -DISPATCH_ALWAYS_INLINE -static inline uint64_t -_voucher_activity_thread_id(void) -{ - uint64_t thread_id; - pthread_threadid_np(NULL, &thread_id); // TODO: 15923074: use TSD thread_id - return thread_id; -} - -#define _voucher_activity_buffer_pos2length(pos) \ - ({ _voucher_activity_buffer_position_u _pos = (pos); \ - _pos.vabp_pos.vabp_next_tracepoint_idx * \ - sizeof(struct _voucher_activity_tracepoint_s) + \ - _pos.vabp_pos.vabp_string_offset; }) +static inline dispatch_queue_t _dispatch_queue_get_current(void); DISPATCH_ALWAYS_INLINE -static inline _voucher_activity_tracepoint_t -_voucher_activity_buffer_tracepoint_acquire( - _voucher_activity_buffer_header_t vab, unsigned int slots, - size_t strsize, uint16_t *stroffsetp) -{ - if (!vab) return NULL; - _voucher_activity_buffer_position_u pos_orig, pos; - pos_orig.vabp_atomic_pos = vab->vabh_pos.vabp_atomic_pos; - do { - pos.vabp_atomic_pos = pos_orig.vabp_atomic_pos; - pos.vabp_pos.vabp_next_tracepoint_idx += slots; - pos.vabp_pos.vabp_string_offset += strsize; - size_t len = _voucher_activity_buffer_pos2length(pos); - if (len > _voucher_activity_buffer_size || pos.vabp_pos.vabp_flags) { - return NULL; - } - if (len == _voucher_activity_buffer_size) { - pos.vabp_pos.vabp_flags |= _voucher_activity_buffer_full; - } - pos.vabp_pos.vabp_refcnt++; - } while (!dispatch_atomic_cmpxchgvw2o(vab, vabh_pos.vabp_atomic_pos, - pos_orig.vabp_atomic_pos, pos.vabp_atomic_pos, - &pos_orig.vabp_atomic_pos, relaxed)); - if (stroffsetp) *stroffsetp = pos.vabp_pos.vabp_string_offset; - return (_voucher_activity_tracepoint_t)vab + - pos_orig.vabp_pos.vabp_next_tracepoint_idx; -} - -DISPATCH_ALWAYS_INLINE -static inline bool -_voucher_activity_buffer_tracepoint_release( - _voucher_activity_buffer_header_t vab) -{ - _voucher_activity_buffer_position_u pos_orig, pos; - pos_orig.vabp_atomic_pos = vab->vabh_pos.vabp_atomic_pos; - do { - pos.vabp_atomic_pos = pos_orig.vabp_atomic_pos; - pos.vabp_pos.vabp_refcnt--; - if (!pos.vabp_pos.vabp_refcnt && - (pos.vabp_pos.vabp_flags & _voucher_activity_buffer_full)) { - pos.vabp_pos.vabp_flags |= _voucher_activity_buffer_pushing; - } - } while (!dispatch_atomic_cmpxchgvw2o(vab, vabh_pos.vabp_atomic_pos, - pos_orig.vabp_atomic_pos, pos.vabp_atomic_pos, - &pos_orig.vabp_atomic_pos, relaxed)); - return (pos.vabp_pos.vabp_flags & _voucher_activity_buffer_pushing); -} - -DISPATCH_ALWAYS_INLINE -static inline bool -_voucher_activity_buffer_mark_full(_voucher_activity_buffer_header_t vab) -{ - _voucher_activity_buffer_position_u pos_orig, pos; - pos_orig.vabp_atomic_pos = vab->vabh_pos.vabp_atomic_pos; - do { - pos.vabp_atomic_pos = pos_orig.vabp_atomic_pos; - if (pos.vabp_pos.vabp_flags & _voucher_activity_buffer_full) { - return false; - } - pos.vabp_pos.vabp_flags |= _voucher_activity_buffer_full; - if (!pos.vabp_pos.vabp_refcnt) { - pos.vabp_pos.vabp_flags |= _voucher_activity_buffer_pushing; - } - } while (!dispatch_atomic_cmpxchgvw2o(vab, vabh_pos.vabp_atomic_pos, - pos_orig.vabp_atomic_pos, pos.vabp_atomic_pos, - &pos_orig.vabp_atomic_pos, relaxed)); - return (pos.vabp_pos.vabp_flags & _voucher_activity_buffer_pushing); -} - -DISPATCH_ALWAYS_INLINE -static inline bool -_voucher_activity_buffer_is_full(_voucher_activity_buffer_header_t vab) -{ - _voucher_activity_buffer_position_u pos; - pos.vabp_atomic_pos = vab->vabh_pos.vabp_atomic_pos; - return (pos.vabp_pos.vabp_flags); -} - -DISPATCH_ALWAYS_INLINE -static inline _voucher_activity_buffer_header_t -_voucher_activity_buffer_get_from_activity(_voucher_activity_t va) -{ - return va ? va->va_current_buffer : NULL; -} - -DISPATCH_ALWAYS_INLINE -static inline _voucher_activity_t -_voucher_activity_get(void) -{ - _voucher_activity_t va; - voucher_t v = _voucher_get(); - va = v && v->v_activity ? v->v_activity : _voucher_activity_default; - return va; -} - -DISPATCH_ALWAYS_INLINE -static inline uint64_t -_voucher_activity_tracepoint_init(_voucher_activity_tracepoint_t vat, - uint8_t type, uint8_t code_namespace, uint32_t code, uint64_t location, - bool approx) -{ - if (!location) location = (uint64_t)__builtin_return_address(0); - uint64_t timestamp = _voucher_activity_timestamp(approx); - vat->vat_flags = _voucher_activity_trace_flag_tracepoint, - vat->vat_type = type, - vat->vat_namespace = code_namespace, - vat->vat_code = code, - vat->vat_timestamp = timestamp, - vat->vat_thread = _voucher_activity_thread_id(), - vat->vat_location = location; - return timestamp; -} - -DISPATCH_ALWAYS_INLINE -static inline uint64_t -_voucher_activity_tracepoint_init_with_id(_voucher_activity_tracepoint_t vat, - voucher_activity_trace_id_t trace_id, uint64_t location, bool approx) +static inline void +_dispatch_continuation_voucher_adopt(dispatch_continuation_t dc, + voucher_t ov, uintptr_t dc_flags) { - uint8_t type = (uint8_t)(trace_id >> _voucher_activity_trace_id_type_shift); - uint8_t cns = (uint8_t)(trace_id >> - _voucher_activity_trace_id_code_namespace_shift); - uint32_t code = (uint32_t)trace_id; - return _voucher_activity_tracepoint_init(vat, type, cns, code, location, - approx); -} + voucher_t v = dc->dc_voucher; + _dispatch_thread_set_self_t consume = (dc_flags & DISPATCH_OBJ_CONSUME_BIT); + dispatch_assert(DISPATCH_OBJ_CONSUME_BIT == DISPATCH_VOUCHER_CONSUME); -DISPATCH_ALWAYS_INLINE -static inline bool -_voucher_activity_trace_id_is_subtype(voucher_activity_trace_id_t trace_id, - uint8_t type) -{ - voucher_activity_trace_id_t type_id = voucher_activity_trace_id(type, 0, 0); - return (trace_id & type_id) == type_id; -} -#define _voucher_activity_trace_id_is_subtype(trace_id, name) \ - _voucher_activity_trace_id_is_subtype(trace_id, \ - voucher_activity_tracepoint_type_ ## name) - -DISPATCH_ALWAYS_INLINE -static inline bool -_voucher_activity_trace_id_enabled(voucher_activity_trace_id_t trace_id) -{ - switch (_voucher_activity_mode) { - case voucher_activity_mode_release: - return _voucher_activity_trace_id_is_subtype(trace_id, release); - case voucher_activity_mode_stream: - case voucher_activity_mode_debug: - return _voucher_activity_trace_id_is_subtype(trace_id, debug) || - _voucher_activity_trace_id_is_subtype(trace_id, release); + if (consume) { + dc->dc_voucher = VOUCHER_INVALID; } - return false; -} - -DISPATCH_ALWAYS_INLINE -static inline bool -_voucher_activity_trace_type_enabled(uint8_t type) -{ - voucher_activity_trace_id_t type_id = voucher_activity_trace_id(type, 0, 0); - return _voucher_activity_trace_id_enabled(type_id); -} - -DISPATCH_ALWAYS_INLINE -static inline bool -_voucher_activity_disabled(void) -{ - return slowpath(_voucher_activity_mode == voucher_activity_mode_disable); -} - -DISPATCH_ALWAYS_INLINE -static inline void -_voucher_activity_trace_args_inline(uint8_t type, uint8_t code_namespace, - uint32_t code, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, - uintptr_t arg4) -{ - if (!_voucher_activity_trace_type_enabled(type)) return; - _voucher_activity_t act; - _voucher_activity_buffer_header_t vab; - _voucher_activity_tracepoint_t vat; - act = _voucher_activity_get(); - vab = _voucher_activity_buffer_get_from_activity(act); - vat = _voucher_activity_buffer_tracepoint_acquire(vab, 1, 0, NULL); - if (!vat) return; - _voucher_activity_tracepoint_init(vat, type, code_namespace, code, 0, true); - vat->vat_flags |= _voucher_activity_trace_flag_tracepoint_args; - vat->vat_data[0] = arg1; - vat->vat_data[1] = arg2; - vat->vat_data[2] = arg3; - vat->vat_data[3] = arg4; - if (_voucher_activity_buffer_tracepoint_release(vab)) { - _voucher_activity_firehose_push(act, vab); + if (likely(v != DISPATCH_NO_VOUCHER)) { + _dispatch_voucher_ktrace_dc_pop(dc, v); + _dispatch_voucher_debug("continuation[%p] adopt", v, dc); + + if (likely(!(dc_flags & DISPATCH_OBJ_ENFORCE_VOUCHER))) { + if (unlikely(ov != DISPATCH_NO_VOUCHER && v != ov)) { + if (consume) _voucher_release(v); + consume = 0; + v = ov; + } + } + } else { + consume = 0; + v = ov; } + (void)_dispatch_adopt_priority_and_set_voucher(dc->dc_priority, v, + consume | DISPATCH_VOUCHER_REPLACE); } -DISPATCH_ALWAYS_INLINE -static inline void -_voucher_activity_trace_activity_event(voucher_activity_trace_id_t trace_id, - voucher_activity_id_t va_id, _voucher_activity_tracepoint_flag_t flags) -{ - _voucher_activity_t act; - _voucher_activity_buffer_header_t vab; - _voucher_activity_tracepoint_t vat; - act = _voucher_activity_get(); - vab = _voucher_activity_buffer_get_from_activity(act); - vat = _voucher_activity_buffer_tracepoint_acquire(vab, 1, 0, NULL); - if (!vat) return; - _voucher_activity_tracepoint_init_with_id(vat, trace_id, 0, false); - vat->vat_flags |= _voucher_activity_trace_flag_activity | flags; - vat->vat_data[0] = va_id; - if (_voucher_activity_buffer_tracepoint_release(vab)) { - _voucher_activity_firehose_push(act, vab); - } -} -#define _voucher_activity_trace_activity_event(trace_id, va_id, type) \ - _voucher_activity_trace_activity_event(trace_id, va_id, \ - _voucher_activity_trace_flag_ ## type) +#pragma mark - +#pragma mark _voucher activity subsystem -DISPATCH_ALWAYS_INLINE -static inline void -_voucher_activity_trace_msg(voucher_t v, mach_msg_header_t *msg, uint32_t code) -{ - if (!v || !v->v_activity) return; // Don't use default activity for IPC - const uint8_t type = voucher_activity_tracepoint_type_debug; - const uint8_t code_namespace = _voucher_activity_tracepoint_namespace_ipc; - if (!_voucher_activity_trace_type_enabled(type)) return; - _voucher_activity_buffer_header_t vab; - _voucher_activity_tracepoint_t vat; - vab = _voucher_activity_buffer_get_from_activity(v->v_activity); - vat = _voucher_activity_buffer_tracepoint_acquire(vab, 1, 0, NULL); - if (!vat) return; // TODO: slowpath ? - _voucher_activity_tracepoint_init(vat, type, code_namespace, code, 0, true); - vat->vat_flags |= _voucher_activity_trace_flag_libdispatch; -#if __has_extension(c_static_assert) - _Static_assert(sizeof(mach_msg_header_t) <= sizeof(vat->vat_data), - "mach_msg_header_t too large"); -#endif - memcpy(vat->vat_data, msg, sizeof(mach_msg_header_t)); - if (_voucher_activity_buffer_tracepoint_release(vab)) { - _voucher_activity_firehose_push(v->v_activity, vab); - } -} -#define _voucher_activity_trace_msg(v, msg, type) \ - _voucher_activity_trace_msg(v, msg, \ - _voucher_activity_tracepoint_namespace_ipc_ ## type) +extern dispatch_once_t _firehose_task_buffer_pred; +extern union firehose_buffer_u *_firehose_task_buffer; +extern uint64_t _voucher_unique_pid; +extern dispatch_mach_t _voucher_activity_debug_channel; +extern voucher_activity_hooks_t _voucher_libtrace_hooks; -#endif // !(USE_OBJC && __OBJC2__) && !defined(__cplusplus) +#endif // DISPATCH_PURE_C #else // VOUCHER_USE_MACH_VOUCHER @@ -983,9 +663,9 @@ _voucher_mach_msg_set(mach_msg_header_t *msg, voucher_t voucher) DISPATCH_ALWAYS_INLINE static inline mach_voucher_t -_voucher_mach_msg_get(mach_msg_header_t *msg) +_voucher_mach_msg_get(mach_msg_header_t *msg, mach_msg_bits_t *msgh_bits) { - (void)msg; + (void)msg;(void)msgh_bits; return 0; } @@ -997,31 +677,25 @@ _voucher_mach_msg_clear(mach_msg_header_t *msg, bool move_send) return MACH_VOUCHER_NULL; } +#define _dispatch_voucher_ktrace_dc_push(dc) +#define _dispatch_voucher_ktrace_dc_pop(dc, v) #define _dispatch_voucher_ktrace_dmsg_push(dmsg) #define _dispatch_voucher_ktrace_dmsg_pop(dmsg) DISPATCH_ALWAYS_INLINE static inline void _dispatch_continuation_voucher_set(dispatch_continuation_t dc, - dispatch_block_flags_t flags) + dispatch_queue_class_t dqu, dispatch_block_flags_t flags) { - (void)dc; (void)flags; + (void)dc; (void)dqu; (void)flags; } DISPATCH_ALWAYS_INLINE static inline void -_dispatch_continuation_voucher_adopt(dispatch_continuation_t dc) -{ - (void)dc; -} - -#define _voucher_activity_trace_msg(v, msg, type) - -DISPATCH_ALWAYS_INLINE -static inline bool -_voucher_activity_disabled(void) +_dispatch_continuation_voucher_adopt(dispatch_continuation_t dc, voucher_t ov, + uintptr_t dc_flags) { - return true; + (void)dc; (void)ov; (void)dc_flags; } #endif // VOUCHER_USE_MACH_VOUCHER diff --git a/xcodeconfig/libdispatch-dyld-stub.xcconfig b/xcodeconfig/libdispatch-dyld-stub.xcconfig new file mode 100644 index 000000000..aabda625b --- /dev/null +++ b/xcodeconfig/libdispatch-dyld-stub.xcconfig @@ -0,0 +1,28 @@ +// +// Copyright (c) 2016 Apple Inc. All rights reserved. +// +// @APPLE_APACHE_LICENSE_HEADER_START@ +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// @APPLE_APACHE_LICENSE_HEADER_END@ +// + +OTHER_LDFLAGS = +BUILD_VARIANTS = normal +GCC_PREPROCESSOR_DEFINITIONS = $(inherited) DISPATCH_VARIANT_STATIC=1 DISPATCH_VARIANT_DYLD_STUB=1 USE_OBJC=0 DISPATCH_USE_DTRACE=0 +PRODUCT_NAME = libdispatch_dyld_stub +INSTALL_PATH = /usr/local/lib/dyld_stub +EXCLUDED_SOURCE_FILE_NAMES = * +INCLUDED_SOURCE_FILE_NAMES = voucher.c // it's minimal with DISPATCH_VARIANT_DYLD_STUB +VERSIONING_SYSTEM = diff --git a/xcodeconfig/libdispatch-introspection.xcconfig b/xcodeconfig/libdispatch-introspection.xcconfig index a2f98f9ee..c7826d5e6 100644 --- a/xcodeconfig/libdispatch-introspection.xcconfig +++ b/xcodeconfig/libdispatch-introspection.xcconfig @@ -21,6 +21,6 @@ BUILD_VARIANTS = normal INSTALL_PATH = /usr/lib/system/introspection -GCC_PREPROCESSOR_DEFINITIONS = $(GCC_PREPROCESSOR_DEFINITIONS) DISPATCH_INTROSPECTION=1 +GCC_PREPROCESSOR_DEFINITIONS = $(inherited) DISPATCH_INTROSPECTION=1 CONFIGURATION_BUILD_DIR = $(BUILD_DIR)/introspection OTHER_LDFLAGS = $(OTHER_LDFLAGS) -Wl,-interposable_list,$(SRCROOT)/xcodeconfig/libdispatch.interposable diff --git a/xcodeconfig/libdispatch-mp-static.xcconfig b/xcodeconfig/libdispatch-mp-static.xcconfig new file mode 100644 index 000000000..1f0eddc4c --- /dev/null +++ b/xcodeconfig/libdispatch-mp-static.xcconfig @@ -0,0 +1,30 @@ +// +// Copyright (c) 2012-2013 Apple Inc. All rights reserved. +// +// @APPLE_APACHE_LICENSE_HEADER_START@ +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// @APPLE_APACHE_LICENSE_HEADER_END@ +// + +OTHER_LDFLAGS = +BUILD_VARIANTS = normal debug +GCC_PREPROCESSOR_DEFINITIONS = $(inherited) DISPATCH_VARIANT_STATIC=1 USE_OBJC=0 DISPATCH_USE_DTRACE=0 +PRODUCT_NAME = libdispatch +INSTALL_PATH = /usr/local/lib/system + +// skip simulator +SUPPORTED_PLATFORMS = macosx iphoneos appletvos watchos +SKIP_INSTALL[sdk=*simulator*] = YES +EXCLUDED_SOURCE_FILE_NAMES[sdk=*simulator*] = * diff --git a/xcodeconfig/libdispatch-static.xcconfig b/xcodeconfig/libdispatch-up-static.xcconfig similarity index 89% rename from xcodeconfig/libdispatch-static.xcconfig rename to xcodeconfig/libdispatch-up-static.xcconfig index 632e01cef..0ece6354e 100644 --- a/xcodeconfig/libdispatch-static.xcconfig +++ b/xcodeconfig/libdispatch-up-static.xcconfig @@ -22,4 +22,4 @@ OTHER_LDFLAGS = BUILD_VARIANTS = normal SKIP_INSTALL = YES EXCLUDED_SOURCE_FILE_NAMES = * -GCC_PREPROCESSOR_DEFINITIONS = $(GCC_PREPROCESSOR_DEFINITIONS) USE_OBJC=0 DISPATCH_USE_DTRACE=0 +GCC_PREPROCESSOR_DEFINITIONS = $(inherited) USE_OBJC=0 DISPATCH_USE_DTRACE=0 diff --git a/xcodeconfig/libdispatch.aliases b/xcodeconfig/libdispatch.aliases index c29b16337..65dfd04f9 100644 --- a/xcodeconfig/libdispatch.aliases +++ b/xcodeconfig/libdispatch.aliases @@ -19,5 +19,8 @@ # __dispatch_data_destructor_vm_deallocate __dispatch_data_destructor_munmap -__dispatch_source_type_memorystatus __dispatch_source_type_memorypressure +__dispatch_source_type_memorypressure __dispatch_source_type_memorystatus __dispatch_queue_attrs __dispatch_queue_attr_concurrent +_dispatch_assert_queue$V2 _dispatch_assert_queue +_dispatch_assert_queue_not$V2 _dispatch_assert_queue_not +_dispatch_queue_create_with_target$V2 _dispatch_queue_create_with_target diff --git a/xcodeconfig/libdispatch.order b/xcodeconfig/libdispatch.order index 8bb455055..9642ca4dd 100644 --- a/xcodeconfig/libdispatch.order +++ b/xcodeconfig/libdispatch.order @@ -28,8 +28,14 @@ _OBJC_CLASS_$_OS_dispatch_group __OS_dispatch_group_vtable _OBJC_CLASS_$_OS_dispatch_queue __OS_dispatch_queue_vtable +_OBJC_CLASS_$_OS_dispatch_queue_serial +__OS_dispatch_queue_serial_vtable +_OBJC_CLASS_$_OS_dispatch_queue_concurrent +__OS_dispatch_queue_concurrent_vtable _OBJC_CLASS_$_OS_dispatch_queue_root __OS_dispatch_queue_root_vtable +_OBJC_CLASS_$_OS_dispatch_queue_main +__OS_dispatch_queue_main_vtable _OBJC_CLASS_$_OS_dispatch_queue_runloop __OS_dispatch_queue_runloop_vtable _OBJC_CLASS_$_OS_dispatch_queue_mgr @@ -62,7 +68,10 @@ _OBJC_METACLASS_$_OS_dispatch_object _OBJC_METACLASS_$_OS_dispatch_semaphore _OBJC_METACLASS_$_OS_dispatch_group _OBJC_METACLASS_$_OS_dispatch_queue +_OBJC_METACLASS_$_OS_dispatch_queue_serial +_OBJC_METACLASS_$_OS_dispatch_queue_concurrent _OBJC_METACLASS_$_OS_dispatch_queue_root +_OBJC_METACLASS_$_OS_dispatch_queue_main _OBJC_METACLASS_$_OS_dispatch_queue_runloop _OBJC_METACLASS_$_OS_dispatch_queue_mgr _OBJC_METACLASS_$_OS_dispatch_queue_specific_queue @@ -75,6 +84,6 @@ _OBJC_METACLASS_$_OS_dispatch_operation _OBJC_METACLASS_$_OS_dispatch_disk _OBJC_METACLASS_$_OS_object _OBJC_METACLASS_$_OS_voucher -_OBJC_METACLASS_$_OS_voucher_recipe +#_OBJC_METACLASS_$_OS_voucher_recipe _OBJC_METACLASS_$_OS_dispatch_data _OBJC_METACLASS_$_OS_dispatch_data_empty diff --git a/xcodeconfig/libdispatch.unexport b/xcodeconfig/libdispatch.unexport deleted file mode 100644 index dba78b92e..000000000 --- a/xcodeconfig/libdispatch.unexport +++ /dev/null @@ -1,34 +0,0 @@ -# -# Copyright (c) 2012-2013 Apple Inc. All rights reserved. -# -# @APPLE_APACHE_LICENSE_HEADER_START@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# @APPLE_APACHE_LICENSE_HEADER_END@ -# - -__dispatch_semaphore_vtable -__dispatch_group_vtable -__dispatch_queue_vtable -__dispatch_queue_root_vtable -__dispatch_queue_runloop_vtable -__dispatch_queue_mgr_vtable -__dispatch_queue_specific_queue_vtable -__dispatch_queue_attr_vtable -__dispatch_source_vtable -__dispatch_mach_vtable -__dispatch_mach_msg_vtable -__dispatch_io_vtable -__dispatch_operation_vtable -__dispatch_disk_vtable diff --git a/xcodeconfig/libdispatch.xcconfig b/xcodeconfig/libdispatch.xcconfig index 7fc525dc7..d5b08d6dd 100644 --- a/xcodeconfig/libdispatch.xcconfig +++ b/xcodeconfig/libdispatch.xcconfig @@ -30,8 +30,8 @@ PUBLIC_HEADERS_FOLDER_PATH = /usr/include/dispatch PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/dispatch OS_PUBLIC_HEADERS_FOLDER_PATH = /usr/include/os OS_PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/os -HEADER_SEARCH_PATHS = $(PROJECT_DIR) $(PROJECT_DIR)/private $(PROJECT_DIR)/os -LIBRARY_SEARCH_PATHS = $(SDKROOT)/usr/lib/system +HEADER_SEARCH_PATHS = $(PROJECT_DIR) $(PROJECT_DIR)/private $(PROJECT_DIR)/src +LIBRARY_SEARCH_PATHS = $(SDKROOT)/usr/lib/system $(SDKROOT)/usr/local/lib INSTALLHDRS_SCRIPT_PHASE = YES ALWAYS_SEARCH_USER_PATHS = NO USE_HEADERMAP = NO @@ -43,7 +43,6 @@ CLANG_CXX_LANGUAGE_STANDARD = gnu++11 GCC_ENABLE_CPP_EXCEPTIONS = NO GCC_STRICT_ALIASING = YES GCC_SYMBOLS_PRIVATE_EXTERN = YES -GCC_ENABLE_OBJC_GC[sdk=macosx*] = supported GCC_ENABLE_PASCAL_STRINGS = NO GCC_WARN_SHADOW = YES GCC_WARN_64_TO_32_BIT_CONVERSION = YES @@ -61,22 +60,19 @@ CLANG_WARN_SUSPICIOUS_IMPLICIT_CONVERSION = YES CLANG_WARN_DOCUMENTATION_COMMENTS = YES GCC_TREAT_WARNINGS_AS_ERRORS = YES GCC_OPTIMIZATION_LEVEL = s -GCC_PREPROCESSOR_DEFINITIONS = __DARWIN_NON_CANCELABLE=1 -WARNING_CFLAGS = -Wall -Wextra -Waggregate-return -Wfloat-equal -Wpacked -Wmissing-declarations -Wstrict-overflow=4 -Wstrict-aliasing=2 -Wno-unknown-warning-option +GCC_PREPROCESSOR_DEFINITIONS = __DARWIN_NON_CANCELABLE=1 $(DISPATCH_PREPROCESSOR_DEFINITIONS) +GCC_NO_COMMON_BLOCKS = YES +WARNING_CFLAGS = -Wall -Wextra -Waggregate-return -Wfloat-equal -Wpacked -Wmissing-declarations -Wstrict-overflow=4 -Wstrict-aliasing=2 -Wno-packed -Wno-unknown-warning-option OTHER_CFLAGS = -fverbose-asm -isystem $(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders OTHER_CFLAGS[arch=i386][sdk=macosx*] = $(OTHER_CFLAGS) -fno-unwind-tables -fno-asynchronous-unwind-tables -fno-exceptions OTHER_CFLAGS_normal = -momit-leaf-frame-pointer OTHER_CFLAGS_profile = $(OTHER_CFLAGS_normal) -DDISPATCH_PROFILE=1 -OTHER_CFLAGS_debug = -fstack-protector -fno-inline -O0 -DDISPATCH_DEBUG=1 +OTHER_CFLAGS_debug = -fstack-protector -fno-inline -O0 -DDISPATCH_DEBUG=1 -DOS_DEBUG=1 GENERATE_PROFILING_CODE = NO DYLIB_CURRENT_VERSION = $(CURRENT_PROJECT_VERSION) SIM_SUFFIX[sdk=*simulator*] = _sim DYLIB_LDFLAGS = -umbrella System -nodefaultlibs -ldyld -lcompiler_rt -lsystem$(SIM_SUFFIX)_kernel -lsystem$(SIM_SUFFIX)_platform -lsystem$(SIM_SUFFIX)_pthread -lsystem_malloc -lsystem_c -lsystem_blocks -lunwind -OBJC_LDFLAGS = -Wl,-upward-lobjc -Wl,-order_file,$(SRCROOT)/xcodeconfig/libdispatch.order -Wl,-alias_list,$(SRCROOT)/xcodeconfig/libdispatch_objc.aliases -Wl,-unexported_symbols_list,$(SRCROOT)/xcodeconfig/libdispatch.unexport -OBJC_LDFLAGS[sdk=macosx*] = $(OBJC_LDFLAGS) -Wl,-upward-lauto -OBJC_LDFLAGS[arch=i386][sdk=macosx*] = -OBJC_EXCLUDED_SOURCE_FILE_NAMES_i386_macosx = object.m data.m +OBJC_LDFLAGS = -Wl,-upward-lobjc -Wl,-order_file,$(SRCROOT)/xcodeconfig/libdispatch.order ALIASES_LDFLAGS = -Wl,-alias_list,$(SRCROOT)/xcodeconfig/libdispatch.aliases -PLATFORM_LDFLAGS[sdk=macosx*] = -Wl,-alias_list,$(SRCROOT)/xcodeconfig/libdispatch_macosx.aliases OTHER_LDFLAGS = $(OTHER_LDFLAGS) $(DYLIB_LDFLAGS) $(CR_LDFLAGS) $(OBJC_LDFLAGS) $(ALIASES_LDFLAGS) $(PLATFORM_LDFLAGS) OTHER_MIGFLAGS = -novouchers diff --git a/xcodeconfig/libdispatch_objc.aliases b/xcodeconfig/libdispatch_objc.aliases deleted file mode 100644 index ad104a190..000000000 --- a/xcodeconfig/libdispatch_objc.aliases +++ /dev/null @@ -1,34 +0,0 @@ -# -# Copyright (c) 2012-2013 Apple Inc. All rights reserved. -# -# @APPLE_APACHE_LICENSE_HEADER_START@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# @APPLE_APACHE_LICENSE_HEADER_END@ -# - -_OBJC_CLASS_$_OS_dispatch_semaphore __dispatch_semaphore_vtable -_OBJC_CLASS_$_OS_dispatch_group __dispatch_group_vtable -_OBJC_CLASS_$_OS_dispatch_queue __dispatch_queue_vtable -_OBJC_CLASS_$_OS_dispatch_queue_root __dispatch_queue_root_vtable -_OBJC_CLASS_$_OS_dispatch_queue_runloop __dispatch_queue_runloop_vtable -_OBJC_CLASS_$_OS_dispatch_queue_mgr __dispatch_queue_mgr_vtable -_OBJC_CLASS_$_OS_dispatch_queue_specific_queue __dispatch_queue_specific_queue_vtable -_OBJC_CLASS_$_OS_dispatch_queue_attr __dispatch_queue_attr_vtable -_OBJC_CLASS_$_OS_dispatch_source __dispatch_source_vtable -_OBJC_CLASS_$_OS_dispatch_mach __dispatch_mach_vtable -_OBJC_CLASS_$_OS_dispatch_mach_msg __dispatch_mach_msg_vtable -_OBJC_CLASS_$_OS_dispatch_io __dispatch_io_vtable -_OBJC_CLASS_$_OS_dispatch_operation __dispatch_operation_vtable -_OBJC_CLASS_$_OS_dispatch_disk __dispatch_disk_vtable diff --git a/xcodeconfig/libfirehose.xcconfig b/xcodeconfig/libfirehose.xcconfig new file mode 100644 index 000000000..07a8b9ac1 --- /dev/null +++ b/xcodeconfig/libfirehose.xcconfig @@ -0,0 +1,36 @@ +// +// Copyright (c) 2015 Apple Inc. All rights reserved. +// +// @APPLE_APACHE_LICENSE_HEADER_START@ +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// @APPLE_APACHE_LICENSE_HEADER_END@ +// + +OTHER_MIGFLAGS = -novouchers +OTHER_LDFLAGS = +SUPPORTED_PLATFORMS = macosx iphoneos iphonesimulator appletvos appletvsimulator watchos watchsimulator +PRODUCT_NAME = $(TARGET_NAME) +INSTALL_PATH = /usr/local/lib/ +PUBLIC_HEADERS_FOLDER_PATH = /usr/include/os +PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/os +STRIP_INSTALLED_PRODUCT = NO +COPY_PHASE_STRIP = NO +SEPARATE_STRIP = NO +GCC_PREPROCESSOR_DEFINITIONS = $(inherited) FIREHOSE_SERVER=1 DISPATCH_USE_DTRACE=0 + +VALID_ARCHS[sdk=macosx*] = $(NATIVE_ARCH_ACTUAL) + +COPY_HEADERS_RUN_UNIFDEF = YES +COPY_HEADERS_UNIFDEF_FLAGS = -UKERNEL diff --git a/xcodeconfig/libfirehose_kernel.xcconfig b/xcodeconfig/libfirehose_kernel.xcconfig new file mode 100644 index 000000000..f6b2a99f6 --- /dev/null +++ b/xcodeconfig/libfirehose_kernel.xcconfig @@ -0,0 +1,35 @@ +// +// Copyright (c) 2015 Apple Inc. All rights reserved. +// +// @APPLE_APACHE_LICENSE_HEADER_START@ +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// @APPLE_APACHE_LICENSE_HEADER_END@ +// + +#include "libfirehose.xcconfig" + +OTHER_CFLAGS = -mkernel -nostdinc -Wno-packed +// LLVM_LTO = YES +PRODUCT_NAME = $(TARGET_NAME) +INSTALL_PATH = /usr/local/lib/kernel/ +PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/kernel/os +SUPPORTED_PLATFORMS = macosx iphoneos appletvos watchos + +HEADER_SEARCH_PATHS = $(PROJECT_DIR) $(SDKROOT)/System/Library/Frameworks/Kernel.framework/PrivateHeaders $(SDKROOT)/System/Library/Frameworks/Kernel.framework/Headers $(SDKROOT)/usr/local/include/os $(SDKROOT)/usr/local/include/firehose + +GCC_PREPROCESSOR_DEFINITIONS = $(inherited) KERNEL=1 DISPATCH_USE_DTRACE=0 + +COPY_HEADERS_RUN_UNIFDEF = YES +COPY_HEADERS_UNIFDEF_FLAGS = -DKERNEL=1 -DOS_FIREHOSE_SPI=1 -DOS_VOUCHER_ACTIVITY_SPI_TYPES=1 -UOS_VOUCHER_ACTIVITY_SPI diff --git a/xcodescripts/install-headers.sh b/xcodescripts/install-headers.sh index 1610b81ad..1fb149b63 100755 --- a/xcodescripts/install-headers.sh +++ b/xcodescripts/install-headers.sh @@ -29,3 +29,4 @@ cp -X "${SCRIPT_INPUT_FILE_1}" "${DSTROOT}${OS_PUBLIC_HEADERS_FOLDER_PATH}" cp -X "${SCRIPT_INPUT_FILE_2}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" cp -X "${SCRIPT_INPUT_FILE_3}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" cp -X "${SCRIPT_INPUT_FILE_4}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_5}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" diff --git a/xcodescripts/mig-headers.sh b/xcodescripts/mig-headers.sh index f81eb856f..003e9f218 100755 --- a/xcodescripts/mig-headers.sh +++ b/xcodescripts/mig-headers.sh @@ -26,4 +26,10 @@ for a in ${ARCHS}; do xcrun mig ${OTHER_MIGFLAGS} -arch $a -header "${SCRIPT_OUTPUT_FILE_0}" \ -sheader "${SCRIPT_OUTPUT_FILE_1}" -user /dev/null \ -server /dev/null "${SCRIPT_INPUT_FILE_0}" + xcrun mig ${OTHER_MIGFLAGS} -arch $a -header "${SCRIPT_OUTPUT_FILE_2}" \ + -sheader "${SCRIPT_OUTPUT_FILE_3}" -user /dev/null \ + -server /dev/null "${SCRIPT_INPUT_FILE_1}" + xcrun mig ${OTHER_MIGFLAGS} -arch $a -header "${SCRIPT_OUTPUT_FILE_4}" \ + -sheader "${SCRIPT_OUTPUT_FILE_5}" -user /dev/null \ + -server /dev/null "${SCRIPT_INPUT_FILE_2}" done diff --git a/xcodeconfig/libdispatch_macosx.aliases b/xcodescripts/run-on-install.sh similarity index 79% rename from xcodeconfig/libdispatch_macosx.aliases rename to xcodescripts/run-on-install.sh index 66b24a31e..730b511d7 100644 --- a/xcodeconfig/libdispatch_macosx.aliases +++ b/xcodescripts/run-on-install.sh @@ -1,5 +1,6 @@ +#!/bin/bash -e # -# Copyright (c) 2013 Apple Inc. All rights reserved. +# Copyright (c) 2016 Apple Inc. All rights reserved. # # @APPLE_APACHE_LICENSE_HEADER_START@ # @@ -17,3 +18,9 @@ # # @APPLE_APACHE_LICENSE_HEADER_END@ # + +if [[ "x${ACTION}" == "xinstall" && "x${SKIP_INSTALL}" == "xNO" ]]; then + $@ +else + exit 0 +fi From 96dcfcae1b617657c67e405712d92df6b8e7af12 Mon Sep 17 00:00:00 2001 From: Apple OSS Distributions <91980991+AppleOSSDistributions@users.noreply.github.com> Date: Tue, 26 Sep 2017 16:46:25 +0000 Subject: [PATCH 09/18] libdispatch-913.1.6 Imported from libdispatch-913.1.6.tar.gz --- .gitmodules | 3 - CMakeLists.txt | 229 + INSTALL | 120 - INSTALL.md | 158 + Makefile.am | 27 +- PATCHES | 100 + cmake/config.h.in | 248 + cmake/modules/DTrace.cmake | 26 + cmake/modules/DispatchAppleOptions.cmake | 43 + cmake/modules/FindLibRT.cmake | 39 + cmake/modules/SwiftSupport.cmake | 69 + config/config.h | 19 + configure.ac | 231 +- dispatch/CMakeLists.txt | 24 + dispatch/Makefile.am | 2 +- dispatch/base.h | 17 +- dispatch/block.h | 26 +- dispatch/{ => darwin}/module.modulemap | 1 - dispatch/data.h | 23 +- dispatch/dispatch.h | 25 +- .../{module.map => generic/module.modulemap} | 3 - dispatch/group.h | 16 +- dispatch/introspection.h | 14 +- dispatch/io.h | 26 +- dispatch/object.h | 24 +- dispatch/once.h | 16 +- dispatch/queue.h | 140 +- dispatch/semaphore.h | 6 +- dispatch/source.h | 78 +- dispatch/time.h | 4 +- libdispatch.xcodeproj/project.pbxproj | 582 +- m4/blocks.m4 | 104 +- man/CMakeLists.txt | 23 + man/dispatch_apply.3 | 41 +- man/dispatch_object.3 | 19 +- man/dispatch_queue_create.3 | 3 +- man/dispatch_semaphore_create.3 | 14 + man/dispatch_source_create.3 | 58 +- man/dispatch_time.3 | 25 +- os/CMakeLists.txt | 10 + os/firehose_buffer_private.h | 79 +- os/firehose_server_private.h | 126 +- os/linux_base.h | 28 +- os/object.h | 12 +- os/object_private.h | 43 +- os/voucher_activity_private.h | 105 +- os/voucher_private.h | 88 +- private/CMakeLists.txt | 5 + private/benchmark.h | 4 +- private/{ => darwin}/module.modulemap | 1 - private/data_private.h | 30 +- private/generic/module.modulemap | 10 + private/introspection_private.h | 8 +- private/io_private.h | 16 +- private/layout_private.h | 4 +- private/mach_private.h | 275 +- private/private.h | 55 +- private/queue_private.h | 76 +- private/source_private.h | 333 +- src/BlocksRuntime/Block.h | 54 + src/BlocksRuntime/Block_private.h | 264 + src/BlocksRuntime/data.c | 24 + src/BlocksRuntime/runtime.c | 765 ++ src/CMakeLists.txt | 204 + src/Makefile.am | 138 +- src/allocator.c | 22 +- src/apply.c | 93 +- src/block.cpp | 5 +- src/data.c | 63 +- src/data.m | 52 +- src/data_internal.h | 11 +- src/event/event.c | 327 + src/event/event_config.h | 219 + src/event/event_epoll.c | 650 ++ src/event/event_internal.h | 449 ++ src/event/event_kevent.c | 2208 +++++ src/event/workqueue.c | 249 + src/event/workqueue_internal.h | 44 + src/firehose/firehose.defs | 8 +- src/firehose/firehose_buffer.c | 333 +- src/firehose/firehose_buffer_internal.h | 13 +- src/firehose/firehose_inline_internal.h | 160 +- src/firehose/firehose_internal.h | 2 + src/firehose/firehose_reply.defs | 4 +- src/firehose/firehose_server.c | 706 +- src/firehose/firehose_server_internal.h | 24 +- src/init.c | 743 +- src/inline_internal.h | 1854 +++-- src/internal.h | 407 +- src/introspection.c | 43 +- src/introspection_internal.h | 28 +- src/io.c | 82 +- src/io_internal.h | 7 +- src/libdispatch.codes | 6 + src/mach.c | 2982 +++++++ src/mach_internal.h | 131 + src/object.c | 138 +- src/object.m | 89 +- src/object_internal.h | 205 +- src/once.c | 82 +- src/provider.d | 38 + src/queue.c | 4334 +++++----- src/queue_internal.h | 428 +- src/semaphore.c | 349 +- src/semaphore_internal.h | 18 +- src/shims.h | 150 +- src/shims/android_stubs.h | 23 + src/shims/atomic.h | 166 +- src/shims/atomic_sfb.h | 64 +- src/shims/getprogname.h | 7 + src/shims/hw_config.h | 30 +- src/shims/linux_stubs.c | 4 + src/shims/linux_stubs.h | 75 +- src/shims/lock.c | 414 +- src/shims/lock.h | 379 +- src/shims/perfmon.h | 68 +- src/shims/priority.h | 269 + src/shims/time.h | 151 +- src/shims/tsd.h | 44 +- src/shims/yield.h | 27 +- src/source.c | 7154 ++++------------- src/source_internal.h | 324 +- src/swift/Block.swift | 35 +- src/swift/Data.swift | 204 +- src/swift/Dispatch.apinotes | 2 + src/swift/Dispatch.swift | 50 +- src/swift/DispatchStubs.cc | 74 +- src/swift/IO.swift | 73 +- src/swift/Private.swift | 74 +- src/swift/Queue.swift | 378 +- src/swift/Source.swift | 438 +- src/swift/Time.swift | 128 +- src/swift/Wrapper.swift | 104 +- src/time.c | 98 +- src/trace.h | 62 +- src/voucher.c | 296 +- src/voucher_internal.h | 152 +- tools/voucher_trace.d | 78 + xcodeconfig/libdispatch-dyld-stub.xcconfig | 10 +- xcodeconfig/libdispatch-mp-static.xcconfig | 11 +- xcodeconfig/libdispatch-resolved.xcconfig | 1 + .../libdispatch-resolver_iphoneos.order | 20 - xcodeconfig/libdispatch-up-static.xcconfig | 9 +- xcodeconfig/libdispatch.aliases | 3 +- xcodeconfig/libdispatch.xcconfig | 31 +- xcodeconfig/libdispatch_iphoneos.order | 20 - xcodeconfig/libfirehose.xcconfig | 7 +- xcodeconfig/libfirehose_kernel.xcconfig | 10 +- xcodescripts/install-manpages.sh | 2 +- 149 files changed, 20936 insertions(+), 13772 deletions(-) create mode 100644 CMakeLists.txt delete mode 100644 INSTALL create mode 100644 INSTALL.md create mode 100644 cmake/config.h.in create mode 100644 cmake/modules/DTrace.cmake create mode 100644 cmake/modules/DispatchAppleOptions.cmake create mode 100644 cmake/modules/FindLibRT.cmake create mode 100644 cmake/modules/SwiftSupport.cmake create mode 100644 dispatch/CMakeLists.txt rename dispatch/{ => darwin}/module.modulemap (88%) rename dispatch/{module.map => generic/module.modulemap} (80%) create mode 100644 man/CMakeLists.txt create mode 100644 os/CMakeLists.txt create mode 100644 private/CMakeLists.txt rename private/{ => darwin}/module.modulemap (90%) create mode 100644 private/generic/module.modulemap create mode 100644 src/BlocksRuntime/Block.h create mode 100644 src/BlocksRuntime/Block_private.h create mode 100644 src/BlocksRuntime/data.c create mode 100644 src/BlocksRuntime/runtime.c create mode 100644 src/CMakeLists.txt create mode 100644 src/event/event.c create mode 100644 src/event/event_config.h create mode 100644 src/event/event_epoll.c create mode 100644 src/event/event_internal.h create mode 100644 src/event/event_kevent.c create mode 100644 src/event/workqueue.c create mode 100644 src/event/workqueue_internal.h create mode 100644 src/mach.c create mode 100644 src/mach_internal.h create mode 100644 src/shims/android_stubs.h create mode 100644 src/shims/priority.h create mode 100755 tools/voucher_trace.d delete mode 100644 xcodeconfig/libdispatch-resolver_iphoneos.order delete mode 100644 xcodeconfig/libdispatch_iphoneos.order diff --git a/.gitmodules b/.gitmodules index e6068b432..e69de29bb 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +0,0 @@ -[submodule "libpwq"] - path = libpwq - url = https://github.com/mheily/libpwq.git diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 000000000..f6b078e25 --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,229 @@ + +cmake_minimum_required(VERSION 3.4.3) + +list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules") + +project(dispatch + VERSION 1.3 + LANGUAGES C CXX) +enable_testing() + +set(CMAKE_C_VISIBILITY_PRESET hidden) +set(CMAKE_CXX_STANDARD 11) + +set(CMAKE_THREAD_PREFER_PTHREAD TRUE) +set(THREADS_PREFER_PTHREAD_FLAG TRUE) +find_package(Threads REQUIRED) + +include(CheckCSourceCompiles) +include(CheckFunctionExists) +include(CheckIncludeFiles) +include(CheckLibraryExists) +include(CheckSymbolExists) +include(GNUInstallDirs) + +set(WITH_BLOCKS_RUNTIME "" CACHE PATH "Path to blocks runtime") + +include(DispatchAppleOptions) + +option(ENABLE_DISPATCH_INIT_CONSTRUCTOR "enable libdispatch_init as a constructor" ON) +set(USE_LIBDISPATCH_INIT_CONSTRUCTOR ${ENABLE_DISPATCH_INIT_CONSTRUCTOR}) + +# TODO(compnerd) swift options + +option(BUILD_SHARED_LIBS "build shared libraries" ON) + +option(ENABLE_TESTING "build libdispatch tests" ON) + +if(CMAKE_SYSTEM_NAME STREQUAL Linux OR + CMAKE_SYSTEM_NAME STREQUAL Android) + set(USE_GOLD_LINKER_DEFAULT ON) +else() + set(USE_GOLD_LINKER_DEFAULT OFF) +endif() +option(USE_GOLD_LINKER "use the gold linker" ${USE_GOLD_LINKER_DEFAULT}) + +option(ENABLE_THREAD_LOCAL_STORAGE "enable usage of thread local storage via __thread" ON) +set(DISPATCH_USE_THREAD_LOCAL_STORAGE ${ENABLE_THREAD_LOCAL_STORAGE}) + +if(CMAKE_SYSTEM_NAME STREQUAL Linux OR + CMAKE_SYSTEM_NAME STREQUAL Android OR + CMAKE_SYSTEM_NAME STREQUAL Windows) + set(ENABLE_INTERNAL_PTHREAD_WORKQUEUES_DEFAULT ON) +else() + set(ENABLE_INTERNAL_PTHREAD_WORKQUEUES_DEFAULT OFF) +endif() +option(ENABLE_INTERNAL_PTHREAD_WORKQUEUES "use libdispatch's own implementation of pthread workqueues" ${ENABLE_INTERNAL_PTHREAD_WORKQUEUES_DEFAULT}) +if(ENABLE_INTERNAL_PTHREAD_WORKQUEUES) + set(DISPATCH_USE_INTERNAL_WORKQUEUE 1) + set(HAVE_PTHREAD_WORKQUEUES 0) +else() + check_include_files(pthread/workqueue_private.h HAVE_PTHREAD_WORKQUEUE_PRIVATE_H) + check_include_files(pthread_workqueue.h HAVE_PTHREAD_WORKQUEUE_H) + if(HAVE_PTHREAD_WORKQUEUE_PRIVATE_H AND HAVE_PTHREAD_WORKQUEUE_H) + set(HAVE_PTHREAD_WORKQUEUES 1) + set(DISPATCH_USE_INTERNAL_WORKQUEUE 0) + else() + set(HAVE_PTHREAD_WORKQUEUES 0) + set(DISPATCH_USE_INTERNAL_WORKQUEUE 1) + endif() +endif() + +if(CMAKE_SYSTEM_NAME STREQUAL Linux OR + CMAKE_SYSTEM_NAME STREQUAL Android OR + CMAKE_SYSTEM_NAME STREQUAL Windows) + add_library(BlocksRuntime + STATIC + ${CMAKE_SOURCE_DIR}/src/BlocksRuntime/data.c + ${CMAKE_SOURCE_DIR}/src/BlocksRuntime/runtime.c) + set_target_properties(BlocksRuntime + PROPERTIES + POSITION_INDEPENDENT_CODE TRUE) + if(HAVE_OBJC AND CMAKE_DL_LIBS) + set_target_properties(BlocksRuntime + PROPERTIES + INTERFACE_LINK_LIBRARIES ${CMAKE_DL_LIBS}) + endif() + set(WITH_BLOCKS_RUNTIME "${CMAKE_SOURCE_DIR}/src/BlocksRuntime" CACHE PATH "Path to blocks runtime" FORCE) +else() + # TODO(compnerd) support system installed BlocksRuntime + # find_package(BlocksRuntime REQUIRED) +endif() + +check_symbol_exists(__GNU_LIBRARY__ "features.h" _GNU_SOURCE) +if(_GNU_SOURCE) + set(CMAKE_REQUIRED_DEFINITIONS ${CMAKE_REQUIRED_DEFINITIONS} -D_GNU_SOURCE) +endif() + +check_c_source_compiles("void __attribute__((__noreturn__)) main() { __builtin_trap(); }" + __BUILTIN_TRAP) +if(__BUILTIN_TRAP) + set(HAVE_NORETURN_BUILTIN_TRAP 1) +endif() + +find_package(LibRT) + +check_function_exists(_pthread_workqueue_init HAVE__PTHREAD_WORKQUEUE_INIT) +check_function_exists(getprogname HAVE_GETPROGNAME) +check_function_exists(mach_absolute_time HAVE_MACH_ABSOLUTE_TIME) +check_function_exists(mach_approximate_time HAVE_MACH_APPROXIMATE_TIME) +check_function_exists(mach_port_construct HAVE_MACH_PORT_CONSTRUCT) +check_function_exists(malloc_create_zone HAVE_MALLOC_CREATE_ZONE) +check_function_exists(pthread_key_init_np HAVE_PTHREAD_KEY_INIT_NP) +check_function_exists(pthread_main_np HAVE_PTHREAD_MAIN_NP) +check_function_exists(pthread_workqueue_setdispatch_np HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP) +check_function_exists(strlcpy HAVE_STRLCPY) +check_function_exists(sysconf HAVE_SYSCONF) + +if(NOT HAVE_STRLCPY AND NOT HAVE_GETPROGNAME) + include(FindPkgConfig) + pkg_check_modules(BSD_OVERLAY libbsd-overlay) + if(BSD_OVERLAY_FOUND) + set(HAVE_STRLCPY 1 CACHE INTERNAL "Have function strlcpy" FORCE) + set(HAVE_GETPROGNAME 1 CACHE INTERNAL "Have function getprogname" FORCE) + endif() +endif() + +find_package(Threads REQUIRED) + +check_include_files("TargetConditionals.h" HAVE_TARGETCONDITIONALS_H) +check_include_files("dlfcn.h" HAVE_DLFCN_H) +check_include_files("fcntl.h" HAVE_FCNTL_H) +check_include_files("inttypes.h" HAVE_INTTYPES_H) +check_include_files("libkern/OSAtomic.h" HAVE_LIBKERN_OSATOMIC_H) +check_include_files("libkern/OSCrossEndian.h" HAVE_LIBKERN_OSCROSSENDIAN_H) +check_include_files("libproc_internal.h" HAVE_LIBPROC_INTERNAL_H) +check_include_files("mach/mach.h" HAVE_MACH) +if(HAVE_MACH) + set(__DARWIN_NON_CANCELABLE 1) + set(USE_MACH_SEM 1) +else() + set(__DARWIN_NON_CANCELABLE 0) + set(USE_MACH_SEM 0) +endif() +check_include_files("malloc/malloc.h" HAVE_MALLOC_MALLOC_H) +check_include_files("memory.h" HAVE_MEMORY_H) +check_include_files("pthread/qos.h" HAVE_PTHREAD_QOS_H) +check_include_files("pthread/workqueue_private.h" HAVE_PTHREAD_WORKQUEUE_PRIVATE_H) +check_include_files("pthread_machdep.h" HAVE_PTHREAD_MACHDEP_H) +check_include_files("pthread_np.h" HAVE_PTHREAD_NP_H) +check_include_files("pthread_workqueue.h" HAVE_PTHREAD_WORKQUEUE_H) +check_include_files("stdint.h" HAVE_STDINT_H) +check_include_files("stdlib.h" HAVE_STDLIB_H) +check_include_files("string.h" HAVE_STRING_H) +check_include_files("strings.h" HAVE_STRINGS_H) +check_include_files("sys/cdefs.h" HAVE_SYS_CDEFS_H) +check_include_files("sys/guarded.h" HAVE_SYS_GUARDED_H) +check_include_files("sys/stat.h" HAVE_SYS_STAT_H) +check_include_files("sys/types.h" HAVE_SYS_TYPES_H) +check_include_files("unistd.h" HAVE_UNISTD_H) +check_include_files("objc/objc-internal.h" HAVE_OBJC) + +check_library_exists(pthread sem_init "" USE_POSIX_SEM) +if(CMAKE_SYSTEM_NAME STREQUAL Windows) + add_definitions(-DTARGET_OS_WIN32) + add_definitions(-DUSE_WIN32_SEM) +endif() + +check_symbol_exists(CLOCK_UPTIME "time.h" HAVE_DECL_CLOCK_UPTIME) +check_symbol_exists(CLOCK_UPTIME_FAST "time.h" HAVE_DECL_CLOCK_UPTIME_FAST) +check_symbol_exists(CLOCK_MONOTONIC "time.h" HAVE_DECL_CLOCK_MONOTONIC) +check_symbol_exists(CLOCK_REALTIME "time.h" HAVE_DECL_CLOCK_REALTIME) +check_symbol_exists(FD_COPY "sys/select.h" HAVE_DECL_FD_COPY) +check_symbol_exists(NOTE_LOWAT "sys/event.h" HAVE_DECL_NOTE_LOWAT) +check_symbol_exists(NOTE_NONE "sys/event.h" HAVE_DECL_NOTE_NONE) +check_symbol_exists(NOTE_REAP "sys/event.h" HAVE_DECL_NOTE_REAP) +check_symbol_exists(NOTE_REVOKE "sys/event.h" HAVE_DECL_NOTE_REVOKE) +check_symbol_exists(NOTE_SIGNAL "sys/event.h" HAVE_DECL_NOTE_SIGNAL) +check_symbol_exists(POSIX_SPAWN_START_SUSPENDED "sys/spawn.h" HAVE_DECL_POSIX_SPAWN_START_SUSPENDED) +check_symbol_exists(SIGEMT "signal.h" HAVE_DECL_SIGEMT) +check_symbol_exists(VQ_DESIRED_DISK "sys/mount.h" HAVE_DECL_VQ_DESIRED_DISK) +check_symbol_exists(VQ_NEARLOWDISK "sys/mount.h" HAVE_DECL_VQ_NEARLOWDISK) +check_symbol_exists(VQ_QUOTA "sys/mount.h" HAVE_DECL_VQ_QUOTA) +check_symbol_exists(VQ_UPDATE "sys/mount.h" HAVE_DECL_VQ_UPDATE) +check_symbol_exists(VQ_VERYLOWDISK "sys/mount.h" HAVE_DECL_VQ_VERYLOWDISK) + +check_symbol_exists(program_invocation_name "errno.h" HAVE_DECL_PROGRAM_INVOCATION_SHORT_NAME) + +find_program(dtrace_EXECUTABLE dtrace) +if(dtrace_EXECUTABLE) + add_definitions(-DDISPATCH_USE_DTRACE=1) +else() + add_definitions(-DDISPATCH_USE_DTRACE=0) +endif() + +find_program(leaks_EXECUTABLE leaks) +if(leaks_EXECUTABLE) + set(HAVE_LEAKS TRUE) +endif() + +if(CMAKE_SYSTEM_NAME STREQUAL Darwin) + add_custom_command(OUTPUT + "${CMAKE_SOURCE_DIR}/dispatch/module.modulemap" + "${CMAKE_SOURCE_DIR}/private/module.modulemap" + COMMAND + ${CMAKE_COMMAND} -E create_symlink "${CMAKE_SOURCE_DIR}/darwin/module.modulemap" "${CMAKE_SOURCE_DIR}/dispatch/module.modulemap" + COMMAND + ${CMAKE_COMMAND} -E create_symlink "${CMAKE_SOURCE_DIR}/darwin/module.modulemap" "${CMAKE_SOURCE_DIR}/private/module.modulemap") +else() + add_custom_command(OUTPUT + "${CMAKE_SOURCE_DIR}/dispatch/module.modulemap" + "${CMAKE_SOURCE_DIR}/private/module.modulemap" + COMMAND + ${CMAKE_COMMAND} -E create_symlink "${CMAKE_SOURCE_DIR}/generic/module.modulemap" "${CMAKE_SOURCE_DIR}/dispatch/module.modulemap" + COMMAND + ${CMAKE_COMMAND} -E create_symlink "${CMAKE_SOURCE_DIR}/generic/module.modulemap" "${CMAKE_SOURCE_DIR}/private/module.modulemap") +endif() +configure_file("${CMAKE_SOURCE_DIR}/cmake/config.h.in" + "${CMAKE_BINARY_DIR}/config/config_ac.h") +add_definitions(-DHAVE_CONFIG_H) + +add_subdirectory(dispatch) +add_subdirectory(man) +add_subdirectory(os) +add_subdirectory(private) +add_subdirectory(src) +if(ENABLE_TESTING) + add_subdirectory(tests) +endif() + diff --git a/INSTALL b/INSTALL deleted file mode 100644 index 9113e4a8f..000000000 --- a/INSTALL +++ /dev/null @@ -1,120 +0,0 @@ -Grand Central Dispatch (GCD) - -GCD is a concurrent programming framework first shipped with Mac OS X Snow -Leopard. This package is an open source bundling of libdispatch, the core -user space library implementing GCD. At the time of writing, support for -the BSD kqueue API, and specifically extensions introduced in Mac OS X Snow -Leopard and FreeBSD 9-CURRENT, are required to use libdispatch. Support -for Linux is a work in progress (see Linux notes below). Other systems are -currently unsupported. - - Configuring and installing libdispatch - -GCD is built using autoconf, automake, and libtool, and has a number of -compile-time configuration options that should be reviewed before starting. -An uncustomized install requires: - - sh autogen.sh - ./configure - make - make install - -The following configure options may be of general interest: - ---with-apple-libpthread-source - - Specify the path to Apple's libpthread package, so that appropriate headers - can be found and used. - ---with-apple-libplatform-source - - Specify the path to Apple's libplatform package, so that appropriate headers - can be found and used. - ---with-apple-libclosure-source - - Specify the path to Apple's Libclosure package, so that appropriate headers - can be found and used. - ---with-apple-xnu-source - - Specify the path to Apple's XNU package, so that appropriate headers can be - found and used. - ---with-blocks-runtime - - On systems where -fblocks is supported, specify an additional library path - in which libBlocksRuntime can be found. This is not required on OS X, - where the Blocks runtime is included in libSystem, but is required on - FreeBSD. - -The following options are likely to only be useful when building libdispatch on -OS X as a replacement for /usr/lib/system/libdispatch.dylib: - ---with-apple-objc4-source - - Specify the path to Apple's objc4 package, so that appropriate headers can - be found and used. - ---disable-libdispatch-init-constructor - - Do not tag libdispatch's init routine as __constructor, in which case it - must be run manually before libdispatch routines can be called. This is the - default when building on OS X. For /usr/lib/system/libdispatch.dylib - the init routine is called automatically during process start. - ---enable-apple-tsd-optimizations - - Use a non-portable allocation scheme for pthread per-thread data (TSD) keys - when building libdispatch for /usr/lib/system on OS X. This should not - be used on other OS's, or on OS X when building a stand-alone library. - - Typical configuration commands - -The following command lines create the configuration required to build -libdispatch for /usr/lib/system on OS X El Capitan: - - clangpath=$(dirname `xcrun --find clang`) - sudo mkdir -p "$clangpath/../local/lib/clang/enable_objc_gc" - LIBTOOLIZE=glibtoolize sh autogen.sh - cflags='-arch x86_64 -arch i386 -g -Os' - ./configure CFLAGS="$cflags" OBJCFLAGS="$cflags" CXXFLAGS="$cflags" \ - --prefix=/usr --libdir=/usr/lib/system --disable-static \ - --enable-apple-tsd-optimizations \ - --with-apple-libpthread-source=/path/to/10.11.0/libpthread-137.1.1 \ - --with-apple-libplatform-source=/path/to/10.11.0/libplatform-73.1.1 \ - --with-apple-libclosure-source=/path/to/10.11.0/libclosure-65 \ - --with-apple-xnu-source=/path/to/10.11.0/xnu-3247.1.106 \ - --with-apple-objc4-source=/path/to/10.11.0/objc4-680 - make check - -Typical configuration line for FreeBSD 8.x and 9.x to build libdispatch with -clang and blocks support: - - sh autogen.sh - ./configure CC=clang --with-blocks-runtime=/usr/local/lib - make check - -Instructions for building on Linux. Initial focus is on ubuntu 15.04. -Prepare your system - 1. Install compiler, autotools - sudo apt-get install clang - sudo apt-get install autoconf libtool pkg-config - 2. Install dtrace (to generate provider.h) - sudo apt-get install systemtap-sdt-dev - 3. Install libdispatch pre-reqs - sudo apt-get install libblocksruntime-dev libkqueue-dev libbsd-dev - -Initialize git submodules: - We are using git submodules to incorporate a specific revision of the - upstream pthread_workqueue library into the build. - git submodule init - git submodule update - -Build: - sh autogen.sh - ./configure - make - -Note: the build currently fails building tests, but libdispatch.so should - build successfully. diff --git a/INSTALL.md b/INSTALL.md new file mode 100644 index 000000000..9940c2cf7 --- /dev/null +++ b/INSTALL.md @@ -0,0 +1,158 @@ +## Grand Central Dispatch (GCD) + +GCD is a concurrent programming framework first shipped with Mac OS X Snow +Leopard. This package is an open source bundling of libdispatch, the core +user space library implementing GCD. At the time of writing, support for +the BSD kqueue API, and specifically extensions introduced in Mac OS X Snow +Leopard and FreeBSD 9-CURRENT, are required to use libdispatch. Linux is +supported, but requires specific packages to be installed (see Linux +section at the end of the file). Other systems are currently unsupported. + +### Configuring and installing libdispatch (general comments) + +GCD is built using autoconf, automake, and libtool, and has a number of +compile-time configuration options that should be reviewed before starting. +An uncustomized install of the C-API to libdispatch requires: + + sh autogen.sh + ./configure + make + make install + +libdispatch can be optionally built to include a Swift API. This requires a +Swift toolchain to compile the Swift code in libdispatch and can be done +in two possible scenarios. + +If you are building your own Swift toolchain from source, then you should build +libdispatch simply by giving additional arguments to swift/utils/build-script: + + ./swift/utils/build-script --libdispatch -- --install-libdispatch + +To build libdispatch using a pre-built Swift toolchain and install libdispatch +into that toolchain (to allow that toolchain to compile Swift code containing +"import Dispatch") requires: + + sh autogen.sh + ./configure --with-swift-toolchain= --prefix= + make + make install + +Note that once libdispatch is installed into a Swift toolchain, that +toolchain cannot be used to compile libdispatch again (you must 'make uninstall' +libdispatch from the toolchain before using it to rebuild libdispatch). + +You can also use the build-toolchain script to create a toolchain +that includes libdispatch on Linux: + +1. Add libdispatch and install-libdispatch lines to ./swift/utils/build-presets.ini under `[preset: buildbot_linux]` section, as following: + + ``` + [preset: buildbot_linux] + mixin-preset=mixin_linux_installation + build-subdir=buildbot_linux + lldb + release + test + validation-test + long-test + libdispatch + foundation + lit-args=-v + dash-dash + + install-libdispatch + install-foundation + reconfigure + ``` + +2. Run: + + ``` + ./swift/utils/build-toolchain local.swift + ``` + +Note that adding libdispatch in build-presets.ini is for Linux only as Swift on macOS platforms uses the system installed libdispatch, so its not required. + +### Building and installing on OS X + +The following configure options may be of general interest: + +`--with-apple-libpthread-source` + +Specify the path to Apple's libpthread package, so that appropriate headers + can be found and used. + +`--with-apple-libplatform-source` + +Specify the path to Apple's libplatform package, so that appropriate headers + can be found and used. + +`--with-apple-xnu-source` + +Specify the path to Apple's XNU package, so that appropriate headers can be + found and used. + +`--with-blocks-runtime` + +On systems where -fblocks is supported, specify an additional library path in which libBlocksRuntime can be found. This is not required on OS X, where the Blocks runtime is included in libSystem, but is required on FreeBSD. + +The following options are likely to only be useful when building libdispatch on +OS X as a replacement for /usr/lib/system/libdispatch.dylib: + +`--disable-libdispatch-init-constructor` + +Do not tag libdispatch's init routine as __constructor, in which case it must be run manually before libdispatch routines can be called. This is the default when building on OS X. For /usr/lib/system/libdispatch.dylib the init routine is called automatically during process start. + +`--enable-apple-tsd-optimizations` + +Use a non-portable allocation scheme for pthread per-thread data (TSD) keys when building libdispatch for /usr/lib/system on OS X. This should not be used on other OS's, or on OS X when building a stand-alone library. + +#### Typical configuration commands + +The following command lines create the configuration required to build +libdispatch for /usr/lib/system on OS X El Capitan: + + clangpath=$(dirname `xcrun --find clang`) + sudo mkdir -p "$clangpath/../local/lib/clang/enable_objc_gc" + LIBTOOLIZE=glibtoolize sh autogen.sh + cflags='-arch x86_64 -arch i386 -g -Os' + ./configure CFLAGS="$cflags" OBJCFLAGS="$cflags" CXXFLAGS="$cflags" \ + --prefix=/usr --libdir=/usr/lib/system --disable-static \ + --enable-apple-tsd-optimizations \ + --with-apple-libpthread-source=/path/to/10.11.0/libpthread-137.1.1 \ + --with-apple-libplatform-source=/path/to/10.11.0/libplatform-73.1.1 \ + --with-apple-xnu-source=/path/to/10.11.0/xnu-3247.1.106 \ + make check + +### Building and installing for FreeBSD + +Typical configuration line for FreeBSD 8.x and 9.x to build libdispatch with +clang and blocks support: + + sh autogen.sh + ./configure CC=clang --with-blocks-runtime=/usr/local/lib + make check + +### Building and installing for Linux + +Note that libdispatch development and testing is done only +on Ubuntu; currently supported versions are 14.04, 15.10 and 16.04. + +1. The first thing to do is install required packages: + + `sudo apt-get install autoconf libtool pkg-config clang systemtap-sdt-dev libbsd-dev linux-libc-dev` + + Note: compiling libdispatch requires clang 3.8 or better and +the gold linker. If the default clang on your Ubuntu version is +too old, see http://apt.llvm.org/ to install a newer version. +On older Ubuntu releases, you may need to install binutils-gold +to get the gold linker. + +2. Build (as in the general instructions above) + + ``` + sh autogen.sh + ./configure + make + make install + ``` diff --git a/Makefile.am b/Makefile.am index cc01c7c27..f1be02951 100644 --- a/Makefile.am +++ b/Makefile.am @@ -4,25 +4,18 @@ ACLOCAL_AMFLAGS = -I m4 -if BUILD_OWN_PTHREAD_WORKQUEUES -SUBDIRS= \ - dispatch \ - libpwq \ - man \ - os \ - private \ - src \ - tests -else -SUBDIRS= \ - dispatch \ - man \ - os \ - private \ - src \ - tests +if BUILD_TESTS + MAYBE_TESTS = tests endif +SUBDIRS= \ + dispatch \ + man \ + os \ + private \ + src \ + $(MAYBE_TESTS) + EXTRA_DIST= \ README.md \ LICENSE \ diff --git a/PATCHES b/PATCHES index 28f7c5248..c3d28b330 100644 --- a/PATCHES +++ b/PATCHES @@ -253,3 +253,103 @@ github commits starting with 29bdc2f from [2dbf83c] APPLIED rdar://27303844 [78b9e82] APPLIED rdar://27303844 [2c0e5ee] APPLIED rdar://27303844 +[5ee237f] APPLIED rdar://27600964 +[77299ec] APPLIED rdar://27600964 +[57c5c28] APPLIED rdar://27600964 +[f8423ec] APPLIED rdar://27600964 +[325f73d] APPLIED rdar://27600964 +[b84e87e] APPLIED rdar://27600964 +[ae71a91] APPLIED rdar://27600964 +[8669dea] APPLIED rdar://27600964 +[a8d0327] APPLIED rdar://27600964 +[2e4e6af] APPLIED rdar://27600964 +[2457fb2] APPLIED rdar://27600964 +[4d58038] APPLIED rdar://27600964 +[98d0a05] APPLIED rdar://27600964 +[8976101] APPLIED rdar://27600964 +[0d9ea5f] APPLIED rdar://28486911 +[e7e9a32] APPLIED rdar://28486911 +[44174d9] APPLIED rdar://28486911 +[6402cb7] APPLIED rdar://28486911 +[e2d5eb5] APPLIED rdar://28486911 +[758bb7f] APPLIED rdar://28486911 +[4c588e9] APPLIED rdar://28486911 +[1300d06] APPLIED rdar://28486911 +[ae1f7e8] APPLIED rdar://28486911 +[40a9bfb] APPLIED rdar://28486911 +[6366081] APPLIED rdar://28486911 +[81d1d0c] APPLIED rdar://28486911 +[5526122] APPLIED rdar://28486911 +[1a7ff3f] APPLIED rdar://28486911 +[e905735] APPLIED rdar://28486911 +[7fe8323] APPLIED rdar://28486911 +[6249878] APPLIED rdar://28486911 +[20792fe] APPLIED rdar://28486911 +[3639fbe] APPLIED rdar://28486911 +[bda3baf] APPLIED rdar://28486911 +[8803d07] APPLIED rdar://28486911 +[d04a0df] APPLIED rdar://28486911 +[69d2a6a] APPLIED rdar://28486911 +[367bd95] APPLIED rdar://28486911 +[152985f] APPLIED rdar://28486911 +[ba7802e] APPLIED rdar://28486911 +[92773e0] APPLIED rdar://30568673 +[548a1b9] APPLIED rdar://30568673 +[b628e5c] APPLIED rdar://30568673 +[a055ddb] APPLIED rdar://30568673 +[012f48b] APPLIED rdar://30568673 +[353adba] APPLIED rdar://30568673 +[eb730eb] APPLIED rdar://30568673 +[ac16fbb] APPLIED rdar://30568673 +[967876e] APPLIED rdar://30568673 +[44c2291] APPLIED rdar://30568673 +[ceb1fac] APPLIED rdar://30568673 +[c95febb] APPLIED rdar://30568673 +[b6e9cf4] APPLIED rdar://30568673 +[e199473] APPLIED rdar://30568673 +[3767ac7] APPLIED rdar://30568673 +[10eb0e4] APPLIED rdar://30568673 +[787dd92] APPLIED rdar://30568673 +[ba4cac5] APPLIED rdar://30568673 +[7974138] APPLIED rdar://30568673 +[cd12dcb] APPLIED rdar://32283666 +[ff05109] APPLIED rdar://32283666 +[73315ee] APPLIED rdar://32283666 +[fcc1924] APPLIED rdar://32283666 +[272e818] APPLIED rdar://32283666 +[b6f8908] APPLIED rdar://32283666 +[a6c16d0] APPLIED rdar://32283666 +[1cc64e1] APPLIED rdar://32283666 +[d137aa4] APPLIED rdar://32283666 +[a69853f] APPLIED rdar://32283666 +[eea0667] APPLIED rdar://32283666 +[f84d21d] APPLIED rdar://32283666 +[3da8398] APPLIED rdar://32283666 +[2df80a3] APPLIED rdar://32283666 +[97a2f06] APPLIED rdar://32283666 +[f76b8f5] APPLIED rdar://32283666 +[3828fbb] APPLIED rdar://32283666 +[5e8789e] APPLIED rdar://32283666 +[3fba60a] APPLIED rdar://32283666 +[d6eb245] APPLIED rdar://32283666 +[0b6c22e] APPLIED rdar://33531111 +[5a3c02a] APPLIED rdar://33531111 +[22df1e7] APPLIED rdar://33531111 +[21273de] APPLIED rdar://33531111 +[dc1857c] APPLIED rdar://33531111 +[56f36b6] APPLIED rdar://33531111 +[c87c6bb] APPLIED rdar://33531111 +[b791d23] APPLIED rdar://33531111 +[c2d0c49] APPLIED rdar://33531111 +[1d25040] APPLIED rdar://33531111 +[ab89c6c] APPLIED rdar://33531111 +[e591e7e] APPLIED rdar://33531111 +[ded5bab] APPLIED rdar://33531111 +[ce90d0c] APPLIED rdar://33531111 +[69c8f3e] APPLIED rdar://33531111 +[23a3a84] APPLIED rdar://33531111 +[79b7529] APPLIED rdar://33531111 +[f8e71eb] APPLIED rdar://33531111 +[8947dcf] APPLIED rdar://33531111 +[5ad9208] APPLIED rdar://33531111 +[698d085] APPLIED rdar://33531111 diff --git a/cmake/config.h.in b/cmake/config.h.in new file mode 100644 index 000000000..6696e9863 --- /dev/null +++ b/cmake/config.h.in @@ -0,0 +1,248 @@ + +/* Define if building pthread work queues from source */ +#cmakedefine01 DISPATCH_USE_INTERNAL_WORKQUEUE + +/* Enable usage of thread local storage via __thread */ +#cmakedefine01 DISPATCH_USE_THREAD_LOCAL_STORAGE + +/* Define to 1 if you have the declaration of `CLOCK_MONOTONIC', and to 0 if + you don't. */ +#cmakedefine01 HAVE_DECL_CLOCK_MONOTONIC + +/* Define to 1 if you have the declaration of `CLOCK_REALTIME', and to 0 if + you don't. */ +#cmakedefine01 HAVE_DECL_CLOCK_REALTIME + +/* Define to 1 if you have the declaration of `CLOCK_UPTIME', and to 0 if you + don't. */ +#cmakedefine01 HAVE_DECL_CLOCK_UPTIME + +/* Define to 1 if you have the declaration of `CLOCK_UPTIME_FAST', and to 0 if + you don't. */ +#cmakedefine01 HAVE_DECL_CLOCK_UPTIME_FAST + +/* Define to 1 if you have the declaration of `FD_COPY', and to 0 if you + don't. */ +#cmakedefine01 HAVE_DECL_FD_COPY + +/* Define to 1 if you have the declaration of `NOTE_LOWAT', and to 0 if you + don't. */ +#cmakedefine01 HAVE_DECL_NOTE_LOWAT + +/* Define to 1 if you have the declaration of `NOTE_NONE', and to 0 if you + don't. */ +#cmakedefine01 HAVE_DECL_NOTE_NONE + +/* Define to 1 if you have the declaration of `NOTE_REAP', and to 0 if you + don't. */ +#cmakedefine01 HAVE_DECL_NOTE_REAP + +/* Define to 1 if you have the declaration of `NOTE_REVOKE', and to 0 if you + don't. */ +#cmakedefine01 HAVE_DECL_NOTE_REVOKE + +/* Define to 1 if you have the declaration of `NOTE_SIGNAL', and to 0 if you + don't. */ +#cmakedefine01 HAVE_DECL_NOTE_SIGNAL + +/* Define to 1 if you have the declaration of `POSIX_SPAWN_START_SUSPENDED', + and to 0 if you don't. */ +#cmakedefine01 HAVE_DECL_POSIX_SPAWN_START_SUSPENDED + +/* Define to 1 if you have the declaration of `program_invocation_short_name', + and to 0 if you don't. */ +#cmakedefine01 HAVE_DECL_PROGRAM_INVOCATION_SHORT_NAME + +/* Define to 1 if you have the declaration of `SIGEMT', and to 0 if you don't. + */ +#cmakedefine01 HAVE_DECL_SIGEMT + +/* Define to 1 if you have the declaration of `VQ_DESIRED_DISK', and to 0 if + you don't. */ +#cmakedefine01 HAVE_DECL_VQ_DESIRED_DISK + +/* Define to 1 if you have the declaration of `VQ_NEARLOWDISK', and to 0 if + you don't. */ +#cmakedefine01 HAVE_DECL_VQ_NEARLOWDISK + +/* Define to 1 if you have the declaration of `VQ_QUOTA', and to 0 if you + don't. */ +#cmakedefine01 HAVE_DECL_VQ_QUOTA + +/* Define to 1 if you have the declaration of `VQ_UPDATE', and to 0 if you + don't. */ +#cmakedefine01 HAVE_DECL_VQ_UPDATE + +/* Define to 1 if you have the declaration of `VQ_VERYLOWDISK', and to 0 if + you don't. */ +#cmakedefine01 HAVE_DECL_VQ_VERYLOWDISK + +/* Define to 1 if you have the header file. */ +#cmakedefine01 HAVE_DLFCN_H + +/* Define to 1 if you have the header file. */ +#cmakedefine01 HAVE_FCNTL_H + +/* Define to 1 if you have the `getprogname' function. */ +#cmakedefine01 HAVE_GETPROGNAME + +/* Define to 1 if you have the header file. */ +#cmakedefine01 HAVE_INTTYPES_H + +/* Define if Apple leaks program is present */ +#cmakedefine HAVE_LEAKS + +/* Define to 1 if you have the header file. */ +#cmakedefine HAVE_LIBKERN_OSATOMIC_H + +/* Define to 1 if you have the header file. */ +#cmakedefine HAVE_LIBKERN_OSCROSSENDIAN_H + +/* Define to 1 if you have the header file. */ +#cmakedefine HAVE_LIBPROC_INTERNAL_H + +/* Define if mach is present */ +#cmakedefine HAVE_MACH + +/* Define to 1 if you have the `mach_absolute_time' function. */ +#cmakedefine HAVE_MACH_ABSOLUTE_TIME + +/* Define to 1 if you have the `mach_approximate_time' function. */ +#cmakedefine HAVE_MACH_APPROXIMATE_TIME + +/* Define to 1 if you have the `mach_port_construct' function. */ +#cmakedefine HAVE_MACH_PORT_CONSTRUCT + +/* Define to 1 if you have the `malloc_create_zone' function. */ +#cmakedefine HAVE_MALLOC_CREATE_ZONE + +/* Define to 1 if you have the header file. */ +#cmakedefine HAVE_MALLOC_MALLOC_H + +/* Define to 1 if you have the header file. */ +#cmakedefine01 HAVE_MEMORY_H + +/* Define if __builtin_trap marked noreturn */ +#cmakedefine01 HAVE_NORETURN_BUILTIN_TRAP + +/* Define if you have the Objective-C runtime */ +#cmakedefine HAVE_OBJC + +/* Define to 1 if you have the `pthread_key_init_np' function. */ +#cmakedefine HAVE_PTHREAD_KEY_INIT_NP + +/* Define to 1 if you have the header file. */ +#cmakedefine HAVE_PTHREAD_MACHDEP_H + +/* Define to 1 if you have the `pthread_main_np' function. */ +#cmakedefine HAVE_PTHREAD_MAIN_NP + +/* Define to 1 if you have the header file. */ +#cmakedefine HAVE_PTHREAD_NP_H + +/* Define to 1 if you have the header file. */ +#cmakedefine HAVE_PTHREAD_QOS_H + +/* Define if pthread work queues are present */ +#cmakedefine01 HAVE_PTHREAD_WORKQUEUES + +/* Define to 1 if you have the header file. */ +#cmakedefine HAVE_PTHREAD_WORKQUEUE_H + +/* Define to 1 if you have the header file. */ +#cmakedefine HAVE_PTHREAD_WORKQUEUE_PRIVATE_H + +/* Define to 1 if you have the `pthread_workqueue_setdispatch_np' function. */ +#cmakedefine HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP + +/* Define to 1 if you have the header file. */ +#cmakedefine01 HAVE_STDINT_H + +/* Define to 1 if you have the header file. */ +#cmakedefine01 HAVE_STDLIB_H + +/* Define to 1 if you have the header file. */ +#cmakedefine01 HAVE_STRINGS_H + +/* Define to 1 if you have the header file. */ +#cmakedefine01 HAVE_STRING_H + +/* Define to 1 if you have the `strlcpy' function. */ +#cmakedefine01 HAVE_STRLCPY + +/* Define if building for Swift */ +#undef HAVE_SWIFT + +/* Define to 1 if you have the `sysconf' function. */ +#cmakedefine01 HAVE_SYSCONF + +/* Define to 1 if you have the header file. */ +#cmakedefine01 HAVE_SYS_CDEFS_H + +/* Define to 1 if you have the header file. */ +#cmakedefine HAVE_SYS_GUARDED_H + +/* Define to 1 if you have the header file. */ +#cmakedefine01 HAVE_SYS_STAT_H + +/* Define to 1 if you have the header file. */ +#cmakedefine01 HAVE_SYS_TYPES_H + +/* Define to 1 if you have the header file. */ +#cmakedefine HAVE_TARGETCONDITIONALS_H + +/* Define to 1 if you have the header file. */ +#cmakedefine01 HAVE_UNISTD_H + +/* Define to 1 if you have the `_pthread_workqueue_init' function. */ +#cmakedefine HAVE__PTHREAD_WORKQUEUE_INIT + +/* Define to use non-portable pthread TSD optimizations for Mac OS X) */ +#cmakedefine USE_APPLE_TSD_OPTIMIZATIONS + +/* Define to tag libdispatch_init as a constructor */ +#cmakedefine01 USE_LIBDISPATCH_INIT_CONSTRUCTOR + +/* Define to use Mach semaphores */ +#cmakedefine USE_MACH_SEM + +/* Define to use POSIX semaphores */ +#cmakedefine01 USE_POSIX_SEM + +/* Enable extensions on AIX 3, Interix. */ +#ifndef _ALL_SOURCE +#cmakedefine01 _ALL_SOURCE +#endif +/* Enable GNU extensions on systems that have them. */ +#ifndef _GNU_SOURCE +#cmakedefine01 _GNU_SOURCE +#endif +/* Enable threading extensions on Solaris. */ +#ifndef _POSIX_PTHREAD_SEMANTICS +#cmakedefine01 _POSIX_PTHREAD_SEMANTICS +#endif +/* Enable extensions on HP NonStop. */ +#ifndef _TANDEM_SOURCE +#cmakedefine01 _TANDEM_SOURCE +#endif +/* Enable general extensions on Solaris. */ +#ifndef __EXTENSIONS__ +#cmakedefine01 __EXTENSIONS__ +#endif + + +/* Version number of package */ +#define VERSION "${PROJECT_VERSION}" + +/* Define to 1 if on MINIX. */ +#cmakedefine _MINIX + +/* Define to 2 if the system does not provide POSIX.1 features except with + this defined. */ +#cmakedefine _POSIX_1_SOURCE + +/* Define to 1 if you need to in order for `stat' and other things to work. */ +#cmakedefine _POSIX_SOURCE + +/* Define if using Darwin $NOCANCEL */ +#cmakedefine __DARWIN_NON_CANCELABLE diff --git a/cmake/modules/DTrace.cmake b/cmake/modules/DTrace.cmake new file mode 100644 index 000000000..20a28ccaa --- /dev/null +++ b/cmake/modules/DTrace.cmake @@ -0,0 +1,26 @@ + +function(dtrace_usdt_probe script) + set(options) + set(single_parameter_options TARGET_NAME OUTPUT_SOURCES) + set(multiple_parameter_options) + + cmake_parse_arguments("" "${options}" "${single_parameter_options}" "${multiple_parameter_options}" ${ARGN}) + + get_filename_component(script_we ${script} NAME_WE) + + add_custom_command(OUTPUT + ${CMAKE_CURRENT_BINARY_DIR}/${script_we}.h + COMMAND + ${dtrace_EXECUTABLE} -h -s ${script} -o ${CMAKE_CURRENT_BINARY_DIR}/${script_we}.h + DEPENDS + ${script}) + add_custom_target(dtrace-usdt-header-${script_we} + DEPENDS + ${CMAKE_CURRENT_BINARY_DIR}/${script_we}.h) + if(_TARGET_NAME) + set(${_TARGET_NAME} dtrace-usdt-header-${script_we} PARENT_SCOPE) + endif() + if(_OUTPUT_SOURCES) + set(${_OUTPUT_SOURCES} ${CMAKE_CURRENT_BINARY_DIR}/${script_we}.h PARENT_SCOPE) + endif() +endfunction() diff --git a/cmake/modules/DispatchAppleOptions.cmake b/cmake/modules/DispatchAppleOptions.cmake new file mode 100644 index 000000000..1f95f881e --- /dev/null +++ b/cmake/modules/DispatchAppleOptions.cmake @@ -0,0 +1,43 @@ + +set(WITH_APPLE_PTHREAD_SOURCE "" CACHE PATH "Path to Apple's libpthread") +set(WITH_APPLE_LIBPLATFORM_SOURCE "" CACHE PATH "Path to Apple's libplatform") +set(WITH_APPLE_LIBCLOSURE_SOURCE "" CACHE PATH "Path to Apple's libclosure") +set(WITH_APPLE_XNU_SOURCE "" CACHE PATH "Path to Apple's XNU") +set(WITH_APPLE_OBJC4_SOURCE "" CACHE PATH "Path to Apple's ObjC4") + +if(WITH_APPLE_PTHREAD_SOURCE) + include_directories(SYSTEM "${WITH_APPLE_PTHREAD_SOURCE}") +endif() +if(WITH_APPLE_LIBPLATFORM_SOURCE) + include_directories(SYSTEM "${WITH_APPLE_LIBPLATFORM_SOURCE}/include") +endif() +if(WITH_APPLE_LIBCLOSURE_SOURCE) + include_directories(SYSTEM "${WITH_APPLE_LIBCLOSURE_SOURCE}") +endif() +if(WITH_APPLE_XNU_SOURCE) + # FIXME(compnerd) this should use -idirafter + include_directories("${WITH_APPLE_XNU_SOURCE}/libkern") + include_directories(SYSTEM + "${WITH_APPLE_XNU_SOURCE}/bsd" + "${WITH_APPLE_XNU_SOURCE}/libsyscall" + "${WITH_APPLE_XNU_SOURCE}/libsyscall/wrappers/libproc") + + # hack for xnu/bsd/sys/event.h EVFILT_SOCK declaration + add_definitions(-DPRIVATE=1) +endif() + +if(IS_DIRECTORY "/System/Library/Frameworks/System.framework/PrivateHeaders") + include_directories(SYSTEM + "/System/Library/Frameworks/System.framework/PrivateHeaders") +endif() + +option(ENABLE_APPLE_TSD_OPTIMIZATIONS "use non-portable pthread TSD optimizations" OFF) +if(ENABLE_APPLE_TSD_OPTIMIZATIONS) + set(USE_APPLE_TSD_OPTIMIZATIONS 1) +else() + set(USE_APPLE_TSD_OPTIMIZATIONS 0) +endif() + +# TODO(compnerd) link in libpthread headers + + diff --git a/cmake/modules/FindLibRT.cmake b/cmake/modules/FindLibRT.cmake new file mode 100644 index 000000000..0a9f0d80e --- /dev/null +++ b/cmake/modules/FindLibRT.cmake @@ -0,0 +1,39 @@ +#.rst: +# FindLibRT +# --------- +# +# Find librt library and headers. +# +# The mdoule defines the following variables: +# +# :: +# +# LibRT_FOUND - true if librt was found +# LibRT_INCLUDE_DIR - include search path +# LibRT_LIBRARIES - libraries to link + +if(UNIX) + find_path(LibRT_INCLUDE_DIR + NAMES + time.h) + find_library(LibRT_LIBRARIES rt) + + include(FindPackageHandleStandardArgs) + find_package_handle_standard_args(LibRT + REQUIRED_VARS + LibRT_LIBRARIES + LibRT_INCLUDE_DIR) + + if(LibRT_FOUND) + if(NOT TARGET RT::rt) + add_library(RT::rt UNKNOWN IMPORTED) + set_target_properties(RT::rt + PROPERTIES + IMPORTED_LOCATION ${LibRT_LIBRARIES} + INTERFACE_INCLUDE_DIRECTORIES ${LibRT_INCLUDE_DIR}) + endif() + endif() + + mark_as_advanced(LibRT_LIBRARIES LibRT_INCLUDE_DIR) +endif() + diff --git a/cmake/modules/SwiftSupport.cmake b/cmake/modules/SwiftSupport.cmake new file mode 100644 index 000000000..196593999 --- /dev/null +++ b/cmake/modules/SwiftSupport.cmake @@ -0,0 +1,69 @@ + +include(CMakeParseArguments) + +function(add_swift_library library) + set(options) + set(single_value_options MODULE_NAME;MODULE_LINK_NAME;MODULE_PATH;MODULE_CACHE_PATH;OUTPUT) + set(multiple_value_options SOURCES;SWIFT_FLAGS;CFLAGS) + + cmake_parse_arguments(ASL "${options}" "${single_value_options}" "${multiple_value_options}" ${ARGN}) + + set(flags ${CMAKE_SWIFT_FLAGS}) + + list(APPEND flags -emit-library) + + if(ASL_MODULE_NAME) + list(APPEND flags -module-name;${ASL_MODULE_NAME}) + endif() + if(ASL_MODULE_LINK_NAME) + list(APPEND flags -module-link-name;${ASL_MODULE_LINK_NAME}) + endif() + if(ASL_MODULE_PATH) + list(APPEND flags -emit-module-path;${ASL_MODULE_PATH}) + endif() + if(ASL_MODULE_CACHE_PATH) + list(APPEND flags -module-cache-path;${ASL_MODULE_CACHE_PATH}) + endif() + if(ASL_SWIFT_FLAGS) + foreach(flag ${ASL_SWIFT_FLAGS}) + list(APPEND flags ${flag}) + endforeach() + endif() + if(ASL_CFLAGS) + foreach(flag ${ASL_CFLAGS}) + list(APPEND flags -Xcc;${flag}) + endforeach() + endif() + + # FIXME: We shouldn't /have/ to build things in a single process. + # + list(APPEND flags -force-single-frontend-invocation) + + set(sources) + foreach(source ${ASL_SOURCES}) + get_filename_component(location ${source} PATH) + if(IS_ABSOLUTE ${location}) + list(APPEND sources ${source}) + else() + list(APPEND sources ${CMAKE_CURRENT_SOURCE_DIR}/${source}) + endif() + endforeach() + + get_filename_component(module_directory ${ASL_MODULE_PATH} DIRECTORY) + + add_custom_command(OUTPUT + ${ASL_OUTPUT} + ${ASL_MODULE_PATH} + ${module_directory}/${ASL_MODULE_NAME}.swiftdoc + DEPENDS + ${ASL_SOURCES} + COMMAND + ${CMAKE_COMMAND} -E make_directory ${module_directory} + COMMAND + ${CMAKE_SWIFT_COMPILER} ${flags} -c ${sources} -o ${ASL_OUTPUT}) + add_custom_target(${library} + DEPENDS + ${ASL_OUTPUT} + ${ASL_MODULE_PATH} + ${module_directory}/${ASL_MODULE_NAME}.swiftdoc) +endfunction() diff --git a/config/config.h b/config/config.h index ca3a1dbb8..91d7cfe8e 100644 --- a/config/config.h +++ b/config/config.h @@ -5,10 +5,18 @@ you don't. */ #define HAVE_DECL_CLOCK_MONOTONIC 0 +/* Define to 1 if you have the declaration of `CLOCK_REALTIME', and to 0 if + you don't. */ +#define CLOCK_REALTIME 0 + /* Define to 1 if you have the declaration of `CLOCK_UPTIME', and to 0 if you don't. */ #define HAVE_DECL_CLOCK_UPTIME 0 +/* Define to 1 if you have the declaration of `HAVE_DECL_CLOCK_UPTIME_FAST', + and to 0 if you don't. */ +#define HAVE_DECL_CLOCK_UPTIME_FAST 0 + /* Define to 1 if you have the declaration of `FD_COPY', and to 0 if you don't. */ #define HAVE_DECL_FD_COPY 1 @@ -57,6 +65,14 @@ you don't. */ #define HAVE_DECL_VQ_QUOTA 1 +/* Define to 1 if you have the declaration of `VQ_NEARLOWDISK', and to 0 if + you don't. */ +#define HAVE_DECL_VQ_NEARLOWDISK 1 + +/* Define to 1 if you have the declaration of `VQ_DESIRED_DISK', and to 0 if + you don't. */ +#define HAVE_DECL_VQ_DESIRED_DISK 1 + /* Define to 1 if you have the header file. */ #define HAVE_DLFCN_H 1 @@ -87,6 +103,9 @@ /* Define to 1 if you have the `mach_absolute_time' function. */ #define HAVE_MACH_ABSOLUTE_TIME 1 +/* Define to 1 if you have the `mach_approximate_time' function. */ +#define HAVE_MACH_APPROXIMATE_TIME 1 + /* Define to 1 if you have the `mach_port_construct' function. */ #define HAVE_MACH_PORT_CONSTRUCT 1 diff --git a/configure.ac b/configure.ac index e5c7c5ed7..8f38f0829 100644 --- a/configure.ac +++ b/configure.ac @@ -3,7 +3,7 @@ # AC_PREREQ(2.69) -AC_INIT([libdispatch], [1.3], [libdispatch@macosforge.org], [libdispatch], [http://libdispatch.macosforge.org]) +AC_INIT([libdispatch], [1.3], [https://bugs.swift.org], [libdispatch], [https://github.com/apple/swift-corelibs-libdispatch]) AC_REVISION([$$]) AC_CONFIG_AUX_DIR(config) AC_CONFIG_HEADER([config/config_ac.h]) @@ -11,11 +11,102 @@ AC_CONFIG_MACRO_DIR([m4]) ac_clean_files=a.out.dSYM AM_MAINTAINER_MODE +AC_CANONICAL_BUILD +AC_CANONICAL_HOST +AC_CANONICAL_TARGET + +# +# Command line argument to specify build variant (default to release). +# Impacts default value of CFLAGS et al. so must come before AC_PROG_CC +# +AC_ARG_WITH([build-variant], + [AS_HELP_STRING([--with-build-variant=release|debug|releaseassert|releasedebuginfo], [Specify build variant [default=release]])], + [dispatch_build_variant=${withval}], + [dispatch_build_variant=release] +) +AS_CASE([$dispatch_build_variant], + [debug], [ + default_compiler_flags="-g -O0" + dispatch_enable_asserts=true + dispatch_enable_optimization=false + ], + [release], [ + default_compiler_flags="-O2" + dispatch_enable_asserts=false + dispatch_enable_optimization=true + ], + [releaseassert], [ + default_compiler_flags="-O2" + dispatch_enable_asserts=true + dispatch_enable_optimization=true + ], + [releasedebuginfo], [ + default_compiler_flags="-g -O2" + dispatch_enable_asserts=false + dispatch_enable_optimization=true + ], + [AC_MSG_ERROR("invalid build-variant $dispatch_build_variant")] +) +AM_CONDITIONAL(DISPATCH_ENABLE_ASSERTS, $dispatch_enable_asserts) +AM_CONDITIONAL(DISPATCH_ENABLE_OPTIMIZATION, $dispatch_enable_optimization) + +: ${CFLAGS=$default_compiler_flags} +: ${CXXFLAGS=$default_compiler_flags} +: ${OBJCFLAGS=$default_compiler_flags} +: ${OBJCXXFLAGS=$default_compiler_flags} + AC_PROG_CC([clang gcc cc]) AC_PROG_CXX([clang++ g++ c++]) AC_PROG_OBJC([clang gcc cc]) AC_PROG_OBJCXX([clang++ g++ c++]) +# +# Android cross-compilation support +# +AC_ARG_WITH([android-ndk], + [AS_HELP_STRING([--with-android-ndk], + [Android NDK location])], [ + android_ndk=${withval} +]) +AC_ARG_WITH([android-ndk-gcc-version], + [AS_HELP_STRING([--with-android-ndk-gcc-version], + [Android NDK GCC version [defaults=4.9]])], + [android_ndk_gcc_version=${withval}], [android_ndk_gcc_version=4.9]) +AC_ARG_WITH([android-api-level], + [AS_HELP_STRING([--with-android-api-level], + [Android API level to link with])], [ + android_api_level=${withval} +]) +AC_ARG_ENABLE([android], + [AS_HELP_STRING([--enable-android], + [Compile for Android])], [ + android=true + + # Override values until there's real support for multiple Android platforms + host=armv7-none-linux-androideabi + host_alias=arm-linux-androideabi + host_cpu=armv7 + host_os=linux-androideabi + host_vendor=unknown + arch=arm + + sysroot=${android_ndk}/platforms/android-${android_api_level}/arch-${arch} + toolchain=${android_ndk}/toolchains/${host_alias}-${android_ndk_gcc_version}/prebuilt/linux-${build_cpu} + + CFLAGS="$CFLAGS -target ${host_alias} --sysroot=${sysroot} -B${toolchain}/${host_alias}/bin" + CXXFLAGS="$CXXFLAGS -target ${host_alias} --sysroot=${sysroot} -B${toolchain}/${host_alias}/bin" + SWIFTC_FLAGS="-target ${host} -sdk ${sysroot} -L${toolchain}/lib/gcc/${host_alias}/${android_ndk_gcc_version}.x" + LIBS="$LIBS -L${toolchain}/lib/gcc/${host_alias}/${android_ndk_gcc_version}.x" + LDFLAGS="$LDFLAGS -Wc,'-target','${host_alias}','-B${toolchain}/${host_alias}/bin'" + + # FIXME: empty CFLAGS and CXXFLAGS are assumed for this to work. + # FIXME: there should be a more elegant way to do this + ac_configure_args=`echo $ac_configure_args | sed -e "s/ 'CFLAGS='//" -e "s/ 'CXXFLAGS='//"` + # CFLAGS, CXXFLAGS and LIBS needs to be passed to libkqueue and libpwq + ac_configure_args="$ac_configure_args --enable-bionic-libc 'CFLAGS=$CFLAGS' 'CXXFLAGS=$CXXFLAGS' 'LIBS=$LIBS'" +], [android=false]) +AM_CONDITIONAL(ANDROID, $android) + # # On Mac OS X, some required header files come from other source packages; # allow specifying where those are. @@ -34,13 +125,6 @@ AC_ARG_WITH([apple-libplatform-source], CPPFLAGS="$CPPFLAGS -isystem $apple_libplatform_source_include_path" ]) -AC_ARG_WITH([apple-libclosure-source], - [AS_HELP_STRING([--with-apple-libclosure-source], - [Specify path to Apple libclosure source])], [ - apple_libclosure_source_path=${withval} - CPPFLAGS="$CPPFLAGS -isystem $apple_libclosure_source_path" -]) - AC_ARG_WITH([apple-xnu-source], [AS_HELP_STRING([--with-apple-xnu-source], [Specify path to Apple XNU source])], [ @@ -52,12 +136,6 @@ AC_ARG_WITH([apple-xnu-source], CPPFLAGS="$CPPFLAGS -idirafter $apple_xnu_source_libkern_path -isystem $apple_xnu_source_bsd_path -isystem $apple_xnu_source_libsyscall_path -isystem $apple_xnu_source_libproc_path " ]) -AC_ARG_WITH([apple-objc4-source], - [AS_HELP_STRING([--with-apple-objc4-source], - [Specify path to Apple objc4 source])], [ - apple_objc4_source_runtime_path=${withval}/runtime -]) - AC_CACHE_CHECK([for System.framework/PrivateHeaders], dispatch_cv_system_privateheaders, [AS_IF([test -d /System/Library/Frameworks/System.framework/PrivateHeaders], [dispatch_cv_system_privateheaders=yes], [dispatch_cv_system_privateheaders=no])] @@ -94,8 +172,6 @@ AS_IF([test "x$enable_apple_tsd_optimizations" = "xyes"], [Define to use non-portable pthread TSD optimizations for Mac OS X)])] ) -AC_CANONICAL_TARGET - # # Enable building Swift overlay support into libdispatch # @@ -105,8 +181,17 @@ AC_ARG_WITH([swift-toolchain], AC_DEFINE(HAVE_SWIFT, 1, [Define if building for Swift]) SWIFTC="$swift_toolchain_path/bin/swiftc" case $target_os in + *android*) + os_string="android" + ;; linux*) os_string="linux" + case $target_cpu in + armv7l*) + target_cpu="armv7" + ;; + *) + esac ;; *) os_string=$target_os @@ -118,7 +203,9 @@ AC_ARG_WITH([swift-toolchain], ) AM_CONDITIONAL(HAVE_SWIFT, $have_swift) AC_SUBST([SWIFTC]) +AC_SUBST([SWIFTC_FLAGS]) AC_SUBST([SWIFT_LIBDIR]) +AC_SUBST([OS_STRING], ["$os_string"]) # # Enable use of gold linker when building the Swift overlay @@ -128,6 +215,18 @@ AC_SUBST([SWIFT_LIBDIR]) AC_CHECK_PROG(use_gold_linker, ld.gold, true, false) AM_CONDITIONAL(USE_GOLD_LINKER, $use_gold_linker) +# +# Enable an extended test suite that includes +# tests that are too unreliable to be enabled by +# default in the Swift CI environment, but are still +# useful for libdispatch developers to be able to run. +# +AC_ARG_ENABLE([extended-test-suite], + [AS_HELP_STRING([--enable-extended-test-suite], + [Include additional test cases that may fail intermittently])] +) +AM_CONDITIONAL(EXTENDED_TEST_SUITE, test "x$enable_extended_test_suite" = "xyes") + # # Enable __thread based TSD on platforms where it is efficient # Allow override based on command line argument to configure @@ -183,13 +282,6 @@ esac AC_SEARCH_LIBS(clock_gettime, rt) AC_SEARCH_LIBS(pthread_create, pthread) -# -# Prefer native kqueue(2); otherwise use libkqueue if present. -# -AC_CHECK_HEADER(sys/event.h, [], - [PKG_CHECK_MODULES(KQUEUE, libkqueue)] -) - AC_CHECK_FUNCS([strlcpy getprogname], [], [PKG_CHECK_MODULES(BSD_OVERLAY, libbsd-overlay,[ AC_DEFINE(HAVE_STRLCPY, 1, []) @@ -215,22 +307,35 @@ AS_IF([test -n "$apple_libpthread_source_path" -a -n "$apple_xnu_source_osfmk_pa AC_CHECK_HEADERS([pthread_machdep.h pthread/qos.h]) # pthread_workqueues. -# Look for own version first, then system version. -AS_IF([test -f $srcdir/libpwq/configure.ac], - [AC_DEFINE(BUILD_OWN_PTHREAD_WORKQUEUES, 1, [Define if building pthread work queues from source]) - ac_configure_args="--disable-libpwq-install $ac_configure_args" - AC_CONFIG_SUBDIRS([libpwq]) - build_own_pthread_workqueues=true - AC_DEFINE(HAVE_PTHREAD_WORKQUEUES, 1, [Define if pthread work queues are present]) - have_pthread_workqueues=true], - [build_own_pthread_workqueues=false - AC_CHECK_HEADERS([pthread/workqueue_private.h pthread_workqueue.h], +# We can either use libdispatch's internal_workqueue or pthread_workqueue. +# If not specifically configured, default to internal_workqueues on +# Linux and pthread_workqueue on all other platforms. +# On any platform, if pthread_workqueue is not available, fall back +# to using internal_workqueue. +AC_ARG_ENABLE([internal-libpwq], + [AS_HELP_STRING([--enable-internal-libpwq], + [Use libdispatch's own implementation of pthread workqueues.])],, + [case $target_os in + linux*) + enable_internal_libpwq=yes + ;; + *) + enable_internal_libpwq=no + esac] +) +AS_IF([test "x$enable_internal_libpwq" = "xyes"], + [AC_DEFINE(DISPATCH_USE_INTERNAL_WORKQUEUE, 1, [Use libdispatch's own implementation of pthread workqueues]) + have_pthread_workqueues=false, + dispatch_use_internal_workqueue=true], + [AC_CHECK_HEADERS([pthread/workqueue_private.h pthread_workqueue.h], [AC_DEFINE(HAVE_PTHREAD_WORKQUEUES, 1, [Define if pthread work queues are present]) - have_pthread_workqueues=true], - [have_pthread_workqueues=false] - )] + have_pthread_workqueues=true, + dispatch_use_internal_workqueue=false], + [have_pthread_workqueues=false, + dispatch_use_internal_workqueue=true] + )] ) -AM_CONDITIONAL(BUILD_OWN_PTHREAD_WORKQUEUES, $build_own_pthread_workqueues) +AM_CONDITIONAL(DISPATCH_USE_INTERNAL_WORKQUEUE, $dispatch_use_internal_workqueue) AM_CONDITIONAL(HAVE_PTHREAD_WORKQUEUES, $have_pthread_workqueues) AC_CHECK_HEADERS([libproc_internal.h], [], [], [#include ]) @@ -269,24 +374,10 @@ AC_CHECK_HEADER([Foundation/Foundation.h], [have_foundation=true], [have_foundation=false] ) AM_CONDITIONAL(HAVE_FOUNDATION, $have_foundation) -# hack for objc4/runtime/objc-internal.h -AS_IF([test -n "$apple_objc4_source_runtime_path"], [ - saveCPPFLAGS="$CPPFLAGS" - CPPFLAGS="$CPPFLAGS -I." - ln -fsh "$apple_objc4_source_runtime_path" objc -]) -AC_CHECK_HEADER([objc/objc-internal.h], [ +AC_CHECK_HEADER([objc/NSObject.h], [ AC_DEFINE(HAVE_OBJC, 1, [Define if you have the Objective-C runtime]) - have_objc=true], [have_objc=false], - [#include ] + have_objc=true], [have_objc=false] ) -AS_IF([test -n "$apple_objc4_source_runtime_path"], [ - rm -f objc - CPPFLAGS="$saveCPPFLAGS" - AC_CONFIG_COMMANDS([src/objc], - [ln -fsh "$apple_objc4_source_runtime_path" src/objc], - [apple_objc4_source_runtime_path="$apple_objc4_source_runtime_path"]) -]) AM_CONDITIONAL(USE_OBJC, $have_objc) AC_LANG_POP([Objective C]) @@ -305,15 +396,15 @@ AC_CHECK_FUNCS([mach_port_construct]) # # Find functions and declarations we care about. # -AC_CHECK_DECLS([CLOCK_UPTIME, CLOCK_MONOTONIC], [], [], +AC_CHECK_DECLS([CLOCK_UPTIME, CLOCK_MONOTONIC, CLOCK_REALTIME, CLOCK_UPTIME_FAST], [], [], [[#include ]]) AC_CHECK_DECLS([NOTE_NONE, NOTE_REAP, NOTE_REVOKE, NOTE_SIGNAL, NOTE_LOWAT], [], [], [[#include ]]) AC_CHECK_DECLS([FD_COPY], [], [], [[#include ]]) AC_CHECK_DECLS([SIGEMT], [], [], [[#include ]]) -AC_CHECK_DECLS([VQ_UPDATE, VQ_VERYLOWDISK, VQ_QUOTA], [], [], [[#include ]]) +AC_CHECK_DECLS([VQ_UPDATE, VQ_VERYLOWDISK, VQ_QUOTA, VQ_NEARLOWDISK, VQ_DESIRED_DISK], [], [], [[#include ]]) AC_CHECK_DECLS([program_invocation_short_name], [], [], [[#include ]]) -AC_CHECK_FUNCS([pthread_key_init_np pthread_main_np mach_absolute_time malloc_create_zone sysconf]) +AC_CHECK_FUNCS([pthread_key_init_np pthread_main_np mach_absolute_time mach_approximate_time malloc_create_zone sysconf]) AC_CHECK_DECLS([POSIX_SPAWN_START_SUSPENDED], [have_posix_spawn_start_suspended=true], [have_posix_spawn_start_suspended=false], @@ -325,6 +416,11 @@ AC_CHECK_FUNC([sem_init], [have_sem_init=true], [have_sem_init=false] ) +AC_CHECK_HEADER([linux/futex.h], [ + AC_DEFINE(HAVE_FUTEX, 1, [Define if linux/futex.h is present]) + have_futex=true], [have_futex=false] +) + # # We support both Mach semaphores and POSIX semaphores; if the former are # available, prefer them. @@ -384,6 +480,20 @@ AS_IF([test "x$have_mach" = "xtrue"], [ ]) AM_CONDITIONAL(HAVE_DARWIN_LD, [test "x$dispatch_cv_ld_darwin" == "xyes"]) +# +# symlink platform-specific module.modulemap files +# +AS_CASE([$target_os], + [darwin*], [ dispatch_module_map_os=darwin ], + [ dispatch_module_map_os=generic ] +) +AC_CONFIG_COMMANDS([modulemaps], [ + ln -fs $dispatch_module_map_os/module.modulemap $ac_top_srcdir/dispatch/module.modulemap + ln -fs $dispatch_module_map_os/module.modulemap $ac_top_srcdir/private/module.modulemap + ], + [dispatch_module_map_os="$dispatch_module_map_os"] +) + # # Temporary: some versions of clang do not mark __builtin_trap() as # __attribute__((__noreturn__)). Detect and add if required. @@ -393,6 +503,13 @@ AC_COMPILE_IFELSE( [AC_DEFINE(HAVE_NORETURN_BUILTIN_TRAP, 1, [Define if __builtin_trap marked noreturn])] ) +# +# Add option to avoid building tests +# +AC_ARG_ENABLE([build-tests], + [AS_HELP_STRING([--disable-build-tests], [Disable tests compilation])]) +AM_CONDITIONAL(BUILD_TESTS, [test "x$enable_build_tests" != "xno"]) + # # Generate Makefiles. # @@ -401,6 +518,6 @@ AC_CONFIG_FILES([Makefile dispatch/Makefile man/Makefile os/Makefile private/Mak # # Generate testsuite links # -AC_CONFIG_LINKS([tests/dispatch:$top_srcdir/private tests/leaks-wrapper:tests/leaks-wrapper.sh]) +AC_CONFIG_LINKS([tests/dispatch:$ac_top_srcdir/private tests/leaks-wrapper:tests/leaks-wrapper.sh]) AC_OUTPUT diff --git a/dispatch/CMakeLists.txt b/dispatch/CMakeLists.txt new file mode 100644 index 000000000..dbfb866a8 --- /dev/null +++ b/dispatch/CMakeLists.txt @@ -0,0 +1,24 @@ + +install(FILES + base.h + block.h + data.h + dispatch.h + group.h + introspection.h + io.h + object.h + once.h + queue.h + semaphore.h + source.h + time.h + DESTINATION + ${CMAKE_INSTALL_FULL_INCLUDEDIR}/dispatch/) +if(ENABLE_SWIFT) + install(FILES + module.modulemap + DESTINATION + ${CMAKE_INSTALL_FULL_INCLUEDIR}/dispatch/) +endif() + diff --git a/dispatch/Makefile.am b/dispatch/Makefile.am index 53ea5986c..89fd3daf0 100644 --- a/dispatch/Makefile.am +++ b/dispatch/Makefile.am @@ -24,5 +24,5 @@ dispatch_HEADERS= \ time.h if HAVE_SWIFT -dispatch_HEADERS+=module.map +dispatch_HEADERS+=module.modulemap endif diff --git a/dispatch/base.h b/dispatch/base.h index 8adfb0bdb..4c82b010c 100644 --- a/dispatch/base.h +++ b/dispatch/base.h @@ -204,11 +204,14 @@ #endif #if __has_feature(enumerator_attributes) -#define DISPATCH_ENUM_AVAILABLE_STARTING __OSX_AVAILABLE_STARTING -#define DISPATCH_ENUM_AVAILABLE(os, version) __##os##_AVAILABLE(version) +#define DISPATCH_ENUM_API_AVAILABLE(...) API_AVAILABLE(__VA_ARGS__) +#define DISPATCH_ENUM_API_DEPRECATED(...) API_DEPRECATED(__VA_ARGS__) +#define DISPATCH_ENUM_API_DEPRECATED_WITH_REPLACEMENT(...) \ + API_DEPRECATED_WITH_REPLACEMENT(__VA_ARGS__) #else -#define DISPATCH_ENUM_AVAILABLE_STARTING(...) -#define DISPATCH_ENUM_AVAILABLE(...) +#define DISPATCH_ENUM_API_AVAILABLE(...) +#define DISPATCH_ENUM_API_DEPRECATED(...) +#define DISPATCH_ENUM_API_DEPRECATED_WITH_REPLACEMENT(...) #endif #if defined(SWIFT_SDK_OVERLAY_DISPATCH_EPOCH) && \ @@ -243,6 +246,12 @@ #define DISPATCH_SWIFT_NAME(_name) #endif +#ifndef __cplusplus +#define DISPATCH_TRANSPARENT_UNION __attribute__((__transparent_union__)) +#else +#define DISPATCH_TRANSPARENT_UNION +#endif + typedef void (*dispatch_function_t)(void *_Nullable); #endif diff --git a/dispatch/block.h b/dispatch/block.h index cd56b230d..cbdcb5eff 100644 --- a/dispatch/block.h +++ b/dispatch/block.h @@ -101,17 +101,17 @@ __BEGIN_DECLS */ DISPATCH_ENUM(dispatch_block_flags, unsigned long, DISPATCH_BLOCK_BARRIER - DISPATCH_ENUM_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x1, + DISPATCH_ENUM_API_AVAILABLE(macos(10.10), ios(8.0)) = 0x1, DISPATCH_BLOCK_DETACHED - DISPATCH_ENUM_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x2, + DISPATCH_ENUM_API_AVAILABLE(macos(10.10), ios(8.0)) = 0x2, DISPATCH_BLOCK_ASSIGN_CURRENT - DISPATCH_ENUM_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x4, + DISPATCH_ENUM_API_AVAILABLE(macos(10.10), ios(8.0)) = 0x4, DISPATCH_BLOCK_NO_QOS_CLASS - DISPATCH_ENUM_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x8, + DISPATCH_ENUM_API_AVAILABLE(macos(10.10), ios(8.0)) = 0x8, DISPATCH_BLOCK_INHERIT_QOS_CLASS - DISPATCH_ENUM_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x10, + DISPATCH_ENUM_API_AVAILABLE(macos(10.10), ios(8.0)) = 0x10, DISPATCH_BLOCK_ENFORCE_QOS_CLASS - DISPATCH_ENUM_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x20, + DISPATCH_ENUM_API_AVAILABLE(macos(10.10), ios(8.0)) = 0x20, ); /*! @@ -164,7 +164,7 @@ DISPATCH_ENUM(dispatch_block_flags, unsigned long, * When not building with Objective-C ARC, must be released with a -[release] * message or the Block_release() function. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_RETURNS_RETAINED_BLOCK DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_block_t @@ -236,7 +236,7 @@ dispatch_block_create(dispatch_block_flags_t flags, dispatch_block_t block); * When not building with Objective-C ARC, must be released with a -[release] * message or the Block_release() function. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_NONNULL4 DISPATCH_RETURNS_RETAINED_BLOCK DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_block_t @@ -269,7 +269,7 @@ dispatch_block_create_with_qos_class(dispatch_block_flags_t flags, * @param block * The block to create the temporary block object from. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW void dispatch_block_perform(dispatch_block_flags_t flags, @@ -320,7 +320,7 @@ dispatch_block_perform(dispatch_block_flags_t flags, * Returns zero on success (the dispatch block object completed within the * specified timeout) or non-zero on error (i.e. timed out). */ -__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW long dispatch_block_wait(dispatch_block_t block, dispatch_time_t timeout); @@ -361,7 +361,7 @@ dispatch_block_wait(dispatch_block_t block, dispatch_time_t timeout); * @param notification_block * The notification block to submit when the observed block object completes. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_block_notify(dispatch_block_t block, dispatch_queue_t queue, @@ -393,7 +393,7 @@ dispatch_block_notify(dispatch_block_t block, dispatch_queue_t queue, * The result of passing NULL or a block object not returned by one of the * dispatch_block_create* functions is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_block_cancel(dispatch_block_t block); @@ -412,7 +412,7 @@ dispatch_block_cancel(dispatch_block_t block); * @result * Non-zero if canceled and zero if not canceled. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW long diff --git a/dispatch/module.modulemap b/dispatch/darwin/module.modulemap similarity index 88% rename from dispatch/module.modulemap rename to dispatch/darwin/module.modulemap index addaae436..e30807f91 100644 --- a/dispatch/module.modulemap +++ b/dispatch/darwin/module.modulemap @@ -1,6 +1,5 @@ module Dispatch [system] [extern_c] { umbrella header "dispatch.h" - module * { export * } export * } diff --git a/dispatch/data.h b/dispatch/data.h index 7ceee0647..33a0c9d51 100644 --- a/dispatch/data.h +++ b/dispatch/data.h @@ -50,7 +50,7 @@ DISPATCH_DATA_DECL(dispatch_data); */ #define dispatch_data_empty \ DISPATCH_GLOBAL_OBJECT(dispatch_data_t, _dispatch_data_empty) -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT struct dispatch_data_s _dispatch_data_empty; /*! @@ -83,7 +83,7 @@ DISPATCH_EXPORT struct dispatch_data_s _dispatch_data_empty; * was allocated by the malloc() family and should be destroyed with free(3). */ #define DISPATCH_DATA_DESTRUCTOR_FREE (_dispatch_data_destructor_free) -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(free); /*! @@ -92,7 +92,7 @@ DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(free); * from buffers that require deallocation with munmap(2). */ #define DISPATCH_DATA_DESTRUCTOR_MUNMAP (_dispatch_data_destructor_munmap) -__OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(munmap); #ifdef __BLOCKS__ @@ -117,7 +117,7 @@ DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(munmap); * is no longer needed. * @result A newly created dispatch data object. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_data_t dispatch_data_create(const void *buffer, @@ -134,7 +134,7 @@ dispatch_data_create(const void *buffer, * @param data The dispatch data object to query. * @result The number of bytes represented by the data object. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_PURE DISPATCH_NONNULL1 DISPATCH_NOTHROW size_t dispatch_data_get_size(dispatch_data_t data); @@ -158,7 +158,7 @@ dispatch_data_get_size(dispatch_data_t data); * size of the mapped contiguous memory region, or NULL. * @result A newly created dispatch data object. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_data_t @@ -181,7 +181,7 @@ dispatch_data_create_map(dispatch_data_t data, * @result A newly created object representing the concatenation of the * data1 and data2 objects. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_data_t @@ -202,7 +202,7 @@ dispatch_data_create_concat(dispatch_data_t data1, dispatch_data_t data2); * @result A newly created object representing the specified * subrange of the data object. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_data_t @@ -247,10 +247,11 @@ typedef bool (^dispatch_data_applier_t)(dispatch_data_t region, * @result A Boolean indicating whether traversal completed * successfully. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW bool -dispatch_data_apply(dispatch_data_t data, dispatch_data_applier_t applier); +dispatch_data_apply(dispatch_data_t data, + DISPATCH_NOESCAPE dispatch_data_applier_t applier); #endif /* __BLOCKS__ */ /*! @@ -267,7 +268,7 @@ dispatch_data_apply(dispatch_data_t data, dispatch_data_applier_t applier); * start of the queried data object. * @result A newly created dispatch data object. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_data_t diff --git a/dispatch/dispatch.h b/dispatch/dispatch.h index a26b95107..2d45b8356 100644 --- a/dispatch/dispatch.h +++ b/dispatch/dispatch.h @@ -23,37 +23,32 @@ #ifdef __APPLE__ #include +#include #include -#else -#define __OSX_AVAILABLE_STARTING(x, y) -#define __OSX_AVAILABLE_BUT_DEPRECATED(...) -#define __OSX_AVAILABLE_BUT_DEPRECATED_MSG(...) -#define __OSX_AVAILABLE(...) -#define __IOS_AVAILABLE(...) -#define __TVOS_AVAILABLE(...) -#define __WATCHOS_AVAILABLE(...) -#define __OSX_DEPRECATED(...) -#define __IOS_DEPRECATED(...) -#define __TVOS_DEPRECATED(...) -#define __WATCHOS_DEPRECATED(...) -#endif // __APPLE__ +#include +#elif defined(__linux__) +#include +#endif -#include #include #include #include #include #include +#if !defined(HAVE_UNISTD_H) || HAVE_UNISTD_H #include +#endif #include #if defined(__linux__) && defined(__has_feature) #if __has_feature(modules) +#if !defined(__arm__) #include // for off_t (to match Glibc.modulemap) #endif #endif +#endif -#define DISPATCH_API_VERSION 20160712 +#define DISPATCH_API_VERSION 20170124 #ifndef __DISPATCH_BUILDING_DISPATCH__ diff --git a/dispatch/module.map b/dispatch/generic/module.modulemap similarity index 80% rename from dispatch/module.map rename to dispatch/generic/module.modulemap index 6f3c8aab8..8c3e7d016 100644 --- a/dispatch/module.map +++ b/dispatch/generic/module.modulemap @@ -2,7 +2,6 @@ module Dispatch { requires blocks export * link "dispatch" - link "BlocksRuntime" } module DispatchIntrospection [system] [extern_c] { @@ -12,9 +11,7 @@ module DispatchIntrospection [system] [extern_c] { module CDispatch [system] [extern_c] { umbrella header "dispatch.h" - module * { export * } export * requires blocks link "dispatch" - link "BlocksRuntime" } diff --git a/dispatch/group.h b/dispatch/group.h index c50ad89d1..8d74ada2e 100644 --- a/dispatch/group.h +++ b/dispatch/group.h @@ -51,7 +51,7 @@ __BEGIN_DECLS * @result * The newly created group, or NULL on failure. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_group_t @@ -81,7 +81,7 @@ dispatch_group_create(void); * The block to perform asynchronously. */ #ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_group_async(dispatch_group_t group, @@ -115,7 +115,7 @@ dispatch_group_async(dispatch_group_t group, * parameter passed to this function is the context provided to * dispatch_group_async_f(). */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NONNULL4 DISPATCH_NOTHROW void @@ -158,7 +158,7 @@ dispatch_group_async_f(dispatch_group_t group, * Returns zero on success (all blocks associated with the group completed * within the specified timeout) or non-zero on error (i.e. timed out). */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW long dispatch_group_wait(dispatch_group_t group, dispatch_time_t timeout); @@ -194,7 +194,7 @@ dispatch_group_wait(dispatch_group_t group, dispatch_time_t timeout); * The block to submit when the group completes. */ #ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_group_notify(dispatch_group_t group, @@ -224,7 +224,7 @@ dispatch_group_notify(dispatch_group_t group, * parameter passed to this function is the context provided to * dispatch_group_notify_f(). */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NONNULL4 DISPATCH_NOTHROW void @@ -248,7 +248,7 @@ dispatch_group_notify_f(dispatch_group_t group, * The dispatch group to update. * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_group_enter(dispatch_group_t group); @@ -267,7 +267,7 @@ dispatch_group_enter(dispatch_group_t group); * The dispatch group to update. * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_group_leave(dispatch_group_t group); diff --git a/dispatch/introspection.h b/dispatch/introspection.h index 9cfb4d1c0..ea7dcd8f5 100644 --- a/dispatch/introspection.h +++ b/dispatch/introspection.h @@ -49,7 +49,7 @@ __BEGIN_DECLS * The newly created dispatch queue. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT void dispatch_introspection_hook_queue_create(dispatch_queue_t queue); @@ -65,7 +65,7 @@ dispatch_introspection_hook_queue_create(dispatch_queue_t queue); * The dispatch queue about to be destroyed. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT void dispatch_introspection_hook_queue_destroy(dispatch_queue_t queue); @@ -84,7 +84,7 @@ dispatch_introspection_hook_queue_destroy(dispatch_queue_t queue); * The object about to be enqueued. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT void dispatch_introspection_hook_queue_item_enqueue(dispatch_queue_t queue, @@ -104,7 +104,7 @@ dispatch_introspection_hook_queue_item_enqueue(dispatch_queue_t queue, * The dequeued object. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT void dispatch_introspection_hook_queue_item_dequeue(dispatch_queue_t queue, @@ -126,7 +126,7 @@ dispatch_introspection_hook_queue_item_dequeue(dispatch_queue_t queue, * Opaque dentifier for completed item. Must NOT be dereferenced. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_7_1) +API_AVAILABLE(macos(10.10), ios(7.1)) DISPATCH_EXPORT void dispatch_introspection_hook_queue_item_complete(dispatch_object_t item); @@ -150,7 +150,7 @@ dispatch_introspection_hook_queue_item_complete(dispatch_object_t item); * this is the block object's invoke function. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT void dispatch_introspection_hook_queue_callout_begin(dispatch_queue_t queue, @@ -175,7 +175,7 @@ dispatch_introspection_hook_queue_callout_begin(dispatch_queue_t queue, * this is the block object's invoke function. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT void dispatch_introspection_hook_queue_callout_end(dispatch_queue_t queue, diff --git a/dispatch/io.h b/dispatch/io.h index 5814bc0f7..a9e6892e5 100644 --- a/dispatch/io.h +++ b/dispatch/io.h @@ -102,7 +102,7 @@ typedef int dispatch_fd_t; * param error An errno condition for the read operation or * zero if the read was successful. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_NONNULL4 DISPATCH_NOTHROW void dispatch_read(dispatch_fd_t fd, @@ -140,7 +140,7 @@ dispatch_read(dispatch_fd_t fd, * param error An errno condition for the write operation or * zero if the write was successful. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NONNULL3 DISPATCH_NONNULL4 DISPATCH_NOTHROW void @@ -211,7 +211,7 @@ typedef unsigned long dispatch_io_type_t; * @result The newly created dispatch I/O channel or NULL if an error * occurred (invalid type specified). */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_io_t @@ -247,7 +247,7 @@ dispatch_io_create(dispatch_io_type_t type, * @result The newly created dispatch I/O channel or NULL if an error * occurred (invalid type or non-absolute path specified). */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_io_t @@ -287,7 +287,7 @@ dispatch_io_create_with_path(dispatch_io_type_t type, * @result The newly created dispatch I/O channel or NULL if an error * occurred (invalid type specified). */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_io_t @@ -349,7 +349,7 @@ typedef void (^dispatch_io_handler_t)(bool done, dispatch_data_t _Nullable data, * param error An errno condition for the read operation or zero if * the read was successful. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL4 DISPATCH_NONNULL5 DISPATCH_NOTHROW void @@ -402,7 +402,7 @@ dispatch_io_read(dispatch_io_t channel, * param error An errno condition for the write operation or zero * if the write was successful. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NONNULL4 DISPATCH_NONNULL5 DISPATCH_NOTHROW void @@ -441,7 +441,7 @@ typedef unsigned long dispatch_io_close_flags_t; * @param channel The dispatch I/O channel to close. * @param flags The flags for the close operation. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_io_close(dispatch_io_t channel, dispatch_io_close_flags_t flags); @@ -468,7 +468,7 @@ dispatch_io_close(dispatch_io_t channel, dispatch_io_close_flags_t flags); * @param channel The dispatch I/O channel to schedule the barrier on. * @param barrier The barrier block. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_io_barrier(dispatch_io_t channel, dispatch_block_t barrier); @@ -488,7 +488,7 @@ dispatch_io_barrier(dispatch_io_t channel, dispatch_block_t barrier); * @param channel The dispatch I/O channel to query. * @result The file descriptor underlying the channel, or -1. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_fd_t dispatch_io_get_descriptor(dispatch_io_t channel); @@ -509,7 +509,7 @@ dispatch_io_get_descriptor(dispatch_io_t channel); * @param channel The dispatch I/O channel on which to set the policy. * @param high_water The number of bytes to use as a high water mark. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_io_set_high_water(dispatch_io_t channel, size_t high_water); @@ -540,7 +540,7 @@ dispatch_io_set_high_water(dispatch_io_t channel, size_t high_water); * @param channel The dispatch I/O channel on which to set the policy. * @param low_water The number of bytes to use as a low water mark. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_io_set_low_water(dispatch_io_t channel, size_t low_water); @@ -579,7 +579,7 @@ typedef unsigned long dispatch_io_interval_flags_t; * @param flags Flags indicating desired data delivery behavior at * interval time. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_io_set_interval(dispatch_io_t channel, diff --git a/dispatch/object.h b/dispatch/object.h index 8b2030138..3ff36c2d3 100644 --- a/dispatch/object.h +++ b/dispatch/object.h @@ -92,14 +92,13 @@ typedef union { struct dispatch_source_s *_ds; struct dispatch_mach_s *_dm; struct dispatch_mach_msg_s *_dmsg; - struct dispatch_timer_aggregate_s *_dta; struct dispatch_source_attr_s *_dsa; struct dispatch_semaphore_s *_dsema; struct dispatch_data_s *_ddata; struct dispatch_io_s *_dchannel; struct dispatch_operation_s *_doperation; struct dispatch_disk_s *_ddisk; -} dispatch_object_t __attribute__((__transparent_union__)); +} dispatch_object_t DISPATCH_TRANSPARENT_UNION; /*! @parseOnly */ #define DISPATCH_DECL(name) typedef struct name##_s *name##_t /*! @parseOnly */ @@ -201,7 +200,7 @@ __BEGIN_DECLS * The object to retain. * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW DISPATCH_SWIFT_UNAVAILABLE("Can't be used with ARC") void @@ -229,7 +228,7 @@ dispatch_retain(dispatch_object_t object); * The object to release. * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW DISPATCH_SWIFT_UNAVAILABLE("Can't be used with ARC") void @@ -253,7 +252,7 @@ dispatch_release(dispatch_object_t object); * @result * The context of the object; may be NULL. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW void *_Nullable @@ -272,7 +271,7 @@ dispatch_get_context(dispatch_object_t object); * The new client defined context for the object. This may be NULL. * */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_set_context(dispatch_object_t object, void *_Nullable context); @@ -298,7 +297,7 @@ dispatch_set_context(dispatch_object_t object, void *_Nullable context); * The context parameter passed to the finalizer function is the current * context of the dispatch object at the time the finalizer call is made. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_set_finalizer_f(dispatch_object_t object, @@ -326,8 +325,7 @@ dispatch_set_finalizer_f(dispatch_object_t object, * The object to be activated. * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_activate(dispatch_object_t object); @@ -350,7 +348,7 @@ dispatch_activate(dispatch_object_t object); * The object to be suspended. * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_suspend(dispatch_object_t object); @@ -379,7 +377,7 @@ dispatch_suspend(dispatch_object_t object); * The object to be resumed. * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_resume(dispatch_object_t object); @@ -541,13 +539,13 @@ dispatch_testcancel(void *object); * @param message * The message to log above and beyond the introspection. */ -__OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_6,__MAC_10_9,__IPHONE_4_0,__IPHONE_6_0) +API_DEPRECATED("unsupported interface", macos(10.6,10.9), ios(4.0,6.0)) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW __attribute__((__format__(printf,2,3))) void dispatch_debug(dispatch_object_t object, const char *message, ...); -__OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_6,__MAC_10_9,__IPHONE_4_0,__IPHONE_6_0) +API_DEPRECATED("unsupported interface", macos(10.6,10.9), ios(4.0,6.0)) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW __attribute__((__format__(printf,2,0))) void diff --git a/dispatch/once.h b/dispatch/once.h index a8f56441c..37a49506d 100644 --- a/dispatch/once.h +++ b/dispatch/once.h @@ -40,6 +40,14 @@ __BEGIN_DECLS DISPATCH_SWIFT3_UNAVAILABLE("Use lazily initialized globals instead") typedef long dispatch_once_t; +#if defined(__x86_64__) || defined(__i386__) || defined(__s390x__) +#define DISPATCH_ONCE_INLINE_FASTPATH 1 +#elif defined(__APPLE__) +#define DISPATCH_ONCE_INLINE_FASTPATH 1 +#else +#define DISPATCH_ONCE_INLINE_FASTPATH 0 +#endif + /*! * @function dispatch_once * @@ -58,13 +66,14 @@ typedef long dispatch_once_t; * initialized by the block. */ #ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW DISPATCH_SWIFT3_UNAVAILABLE("Use lazily initialized globals instead") void dispatch_once(dispatch_once_t *predicate, DISPATCH_NOESCAPE dispatch_block_t block); +#if DISPATCH_ONCE_INLINE_FASTPATH DISPATCH_INLINE DISPATCH_ALWAYS_INLINE DISPATCH_NONNULL_ALL DISPATCH_NOTHROW DISPATCH_SWIFT3_UNAVAILABLE("Use lazily initialized globals instead") void @@ -81,14 +90,16 @@ _dispatch_once(dispatch_once_t *predicate, #undef dispatch_once #define dispatch_once _dispatch_once #endif +#endif // DISPATCH_ONCE_INLINE_FASTPATH -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW DISPATCH_SWIFT3_UNAVAILABLE("Use lazily initialized globals instead") void dispatch_once_f(dispatch_once_t *predicate, void *_Nullable context, dispatch_function_t function); +#if DISPATCH_ONCE_INLINE_FASTPATH DISPATCH_INLINE DISPATCH_ALWAYS_INLINE DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW DISPATCH_SWIFT3_UNAVAILABLE("Use lazily initialized globals instead") @@ -105,6 +116,7 @@ _dispatch_once_f(dispatch_once_t *predicate, void *_Nullable context, } #undef dispatch_once_f #define dispatch_once_f _dispatch_once_f +#endif // DISPATCH_ONCE_INLINE_FASTPATH __END_DECLS diff --git a/dispatch/queue.h b/dispatch/queue.h index 264c34418..8dab75f9d 100644 --- a/dispatch/queue.h +++ b/dispatch/queue.h @@ -103,7 +103,7 @@ __BEGIN_DECLS * The result of passing NULL in this parameter is undefined. */ #ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_async(dispatch_queue_t queue, dispatch_block_t block); @@ -133,7 +133,7 @@ dispatch_async(dispatch_queue_t queue, dispatch_block_t block); * dispatch_async_f(). * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_async_f(dispatch_queue_t queue, @@ -171,7 +171,7 @@ dispatch_async_f(dispatch_queue_t queue, * The result of passing NULL in this parameter is undefined. */ #ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_sync(dispatch_queue_t queue, DISPATCH_NOESCAPE dispatch_block_t block); @@ -199,22 +199,56 @@ dispatch_sync(dispatch_queue_t queue, DISPATCH_NOESCAPE dispatch_block_t block); * dispatch_sync_f(). * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_sync_f(dispatch_queue_t queue, void *_Nullable context, dispatch_function_t work); + +#if !defined(__APPLE__) || TARGET_OS_WATCH || TARGET_OS_TV || \ + (defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && \ + __IPHONE_OS_VERSION_MIN_REQUIRED >= __IPHONE_7_0) || \ + (defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && \ + __MAC_OS_X_VERSION_MIN_REQUIRED >= __MAC_10_9) +#define DISPATCH_APPLY_AUTO_AVAILABLE 1 +#else +#define DISPATCH_APPLY_AUTO_AVAILABLE 0 +#endif + +/*! + * @constant DISPATCH_APPLY_AUTO + * + * @abstract + * Constant to pass to dispatch_apply() or dispatch_apply_f() to request that + * the system automatically use worker threads that match the configuration of + * the current thread as closely as possible. + * + * @discussion + * When submitting a block for parallel invocation, passing this constant as the + * queue argument will automatically use the global concurrent queue that + * matches the Quality of Service of the caller most closely. + * + * No assumptions should be made about which global concurrent queue will + * actually be used. + * + * Using this constant deploys backward to macOS 10.9, iOS 7.0 and any tvOS or + * watchOS version. + */ +#if DISPATCH_APPLY_AUTO_AVAILABLE +#define DISPATCH_APPLY_AUTO ((dispatch_queue_t _Nonnull)0) +#endif + /*! * @function dispatch_apply * * @abstract - * Submits a block to a dispatch queue for multiple invocations. + * Submits a block to a dispatch queue for parallel invocation. * * @discussion - * Submits a block to a dispatch queue for multiple invocations. This function - * waits for the task block to complete before returning. If the target queue + * Submits a block to a dispatch queue for parallel invocation. This function + * waits for the task block to complete before returning. If the specified queue * is concurrent, the block may be invoked concurrently, and it must therefore * be reentrant safe. * @@ -224,15 +258,16 @@ dispatch_sync_f(dispatch_queue_t queue, * The number of iterations to perform. * * @param queue - * The target dispatch queue to which the block is submitted. - * The result of passing NULL in this parameter is undefined. + * The dispatch queue to which the block is submitted. + * The preferred value to pass is DISPATCH_APPLY_AUTO to automatically use + * a queue appropriate for the calling thread. * * @param block * The block to be invoked the specified number of iterations. * The result of passing NULL in this parameter is undefined. */ #ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_apply(size_t iterations, dispatch_queue_t queue, @@ -243,7 +278,7 @@ dispatch_apply(size_t iterations, dispatch_queue_t queue, * @function dispatch_apply_f * * @abstract - * Submits a function to a dispatch queue for multiple invocations. + * Submits a function to a dispatch queue for parallel invocation. * * @discussion * See dispatch_apply() for details. @@ -252,20 +287,21 @@ dispatch_apply(size_t iterations, dispatch_queue_t queue, * The number of iterations to perform. * * @param queue - * The target dispatch queue to which the function is submitted. - * The result of passing NULL in this parameter is undefined. + * The dispatch queue to which the function is submitted. + * The preferred value to pass is DISPATCH_APPLY_AUTO to automatically use + * a queue appropriate for the calling thread. * * @param context * The application-defined context parameter to pass to the function. * * @param work - * The application-defined function to invoke on the target queue. The first + * The application-defined function to invoke on the specified queue. The first * parameter passed to this function is the context provided to * dispatch_apply_f(). The second parameter passed to this function is the * current index of iteration. * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL4 DISPATCH_NOTHROW void dispatch_apply_f(size_t iterations, dispatch_queue_t queue, @@ -301,12 +337,12 @@ dispatch_apply_f(size_t iterations, dispatch_queue_t queue, * @result * Returns the current queue. */ -__OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_6,__MAC_10_9,__IPHONE_4_0,__IPHONE_6_0) +API_DEPRECATED("unsupported interface", macos(10.6,10.9), ios(4.0,6.0)) DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_t dispatch_get_current_queue(void); -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT struct dispatch_queue_s _dispatch_main_q; /*! @@ -415,7 +451,7 @@ typedef unsigned int dispatch_qos_class_t; * Returns the requested global queue or NULL if the requested global queue * does not exist. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_CONST DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_t dispatch_get_global_queue(long identifier, unsigned long flags); @@ -454,7 +490,7 @@ DISPATCH_DECL(dispatch_queue_attr); #define DISPATCH_QUEUE_CONCURRENT \ DISPATCH_GLOBAL_OBJECT(dispatch_queue_attr_t, \ _dispatch_queue_attr_concurrent) -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) +API_AVAILABLE(macos(10.7), ios(4.3)) DISPATCH_EXPORT struct dispatch_queue_attr_s _dispatch_queue_attr_concurrent; @@ -498,8 +534,7 @@ struct dispatch_queue_attr_s _dispatch_queue_attr_concurrent; * The new value combines the attributes specified by the 'attr' parameter with * the initially inactive attribute. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW dispatch_queue_attr_t dispatch_queue_attr_make_initially_inactive( @@ -556,21 +591,9 @@ dispatch_queue_attr_make_initially_inactive( * asynchronously. This is the behavior of the global concurrent queues. */ DISPATCH_ENUM(dispatch_autorelease_frequency, unsigned long, - DISPATCH_AUTORELEASE_FREQUENCY_INHERIT - DISPATCH_ENUM_AVAILABLE(OSX, 10.12) - DISPATCH_ENUM_AVAILABLE(IOS, 10.0) - DISPATCH_ENUM_AVAILABLE(TVOS, 10.0) - DISPATCH_ENUM_AVAILABLE(WATCHOS, 3.0) = 0, - DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM - DISPATCH_ENUM_AVAILABLE(OSX, 10.12) - DISPATCH_ENUM_AVAILABLE(IOS, 10.0) - DISPATCH_ENUM_AVAILABLE(TVOS, 10.0) - DISPATCH_ENUM_AVAILABLE(WATCHOS, 3.0) = 1, - DISPATCH_AUTORELEASE_FREQUENCY_NEVER - DISPATCH_ENUM_AVAILABLE(OSX, 10.12) - DISPATCH_ENUM_AVAILABLE(IOS, 10.0) - DISPATCH_ENUM_AVAILABLE(TVOS, 10.0) - DISPATCH_ENUM_AVAILABLE(WATCHOS, 3.0) = 2, + DISPATCH_AUTORELEASE_FREQUENCY_INHERIT DISPATCH_ENUM_API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) = 0, + DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM DISPATCH_ENUM_API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) = 1, + DISPATCH_AUTORELEASE_FREQUENCY_NEVER DISPATCH_ENUM_API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) = 2, ); /*! @@ -610,8 +633,7 @@ DISPATCH_ENUM(dispatch_autorelease_frequency, unsigned long, * This new value combines the attributes specified by the 'attr' parameter and * the chosen autorelease frequency. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW dispatch_queue_attr_t dispatch_queue_attr_make_with_autorelease_frequency( @@ -671,7 +693,7 @@ dispatch_queue_attr_make_with_autorelease_frequency( * The new value combines the attributes specified by the 'attr' parameter and * the new QOS class and relative priority. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW dispatch_queue_attr_t dispatch_queue_attr_make_with_qos_class(dispatch_queue_attr_t _Nullable attr, @@ -736,8 +758,7 @@ dispatch_queue_attr_make_with_qos_class(dispatch_queue_attr_t _Nullable attr, * @result * The newly created dispatch queue. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_t @@ -788,7 +809,7 @@ dispatch_queue_create_with_target(const char *_Nullable label, * @result * The newly created dispatch queue. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_t @@ -818,7 +839,7 @@ dispatch_queue_create(const char *_Nullable label, * @result * The label of the queue. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW const char * dispatch_queue_get_label(dispatch_queue_t _Nullable queue); @@ -857,7 +878,7 @@ dispatch_queue_get_label(dispatch_queue_t _Nullable queue); * - QOS_CLASS_BACKGROUND * - QOS_CLASS_UNSPECIFIED */ -__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NONNULL1 DISPATCH_NOTHROW dispatch_qos_class_t dispatch_queue_get_qos_class(dispatch_queue_t queue, @@ -922,7 +943,7 @@ dispatch_queue_get_qos_class(dispatch_queue_t queue, * If queue is DISPATCH_TARGET_QUEUE_DEFAULT, set the object's target queue * to the default target queue for the given object type. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_set_target_queue(dispatch_object_t object, @@ -941,7 +962,7 @@ dispatch_set_target_queue(dispatch_object_t object, * Applications that call NSApplicationMain() or CFRunLoopRun() on the * main thread do not need to call dispatch_main(). */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NOTHROW DISPATCH_NORETURN void dispatch_main(void); @@ -969,7 +990,7 @@ dispatch_main(void); * The result of passing NULL in this parameter is undefined. */ #ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_after(dispatch_time_t when, @@ -1002,7 +1023,7 @@ dispatch_after(dispatch_time_t when, * dispatch_after_f(). * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NONNULL4 DISPATCH_NOTHROW void dispatch_after_f(dispatch_time_t when, @@ -1049,7 +1070,7 @@ dispatch_after_f(dispatch_time_t when, * The result of passing NULL in this parameter is undefined. */ #ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) +API_AVAILABLE(macos(10.7), ios(4.3)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_barrier_async(dispatch_queue_t queue, dispatch_block_t block); @@ -1083,7 +1104,7 @@ dispatch_barrier_async(dispatch_queue_t queue, dispatch_block_t block); * dispatch_barrier_async_f(). * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) +API_AVAILABLE(macos(10.7), ios(4.3)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_barrier_async_f(dispatch_queue_t queue, @@ -1111,7 +1132,7 @@ dispatch_barrier_async_f(dispatch_queue_t queue, * The result of passing NULL in this parameter is undefined. */ #ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) +API_AVAILABLE(macos(10.7), ios(4.3)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_barrier_sync(dispatch_queue_t queue, @@ -1143,7 +1164,7 @@ dispatch_barrier_sync(dispatch_queue_t queue, * dispatch_barrier_sync_f(). * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) +API_AVAILABLE(macos(10.7), ios(4.3)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_barrier_sync_f(dispatch_queue_t queue, @@ -1186,7 +1207,7 @@ dispatch_barrier_sync_f(dispatch_queue_t queue, * The destructor function pointer. This may be NULL and is ignored if context * is NULL. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_queue_set_specific(dispatch_queue_t queue, const void *key, @@ -1215,7 +1236,7 @@ dispatch_queue_set_specific(dispatch_queue_t queue, const void *key, * @result * The context for the specified key or NULL if no context was found. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW void *_Nullable @@ -1242,7 +1263,7 @@ dispatch_queue_get_specific(dispatch_queue_t queue, const void *key); * @result * The context for the specified key or NULL if no context was found. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_5_0) +API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW void *_Nullable dispatch_get_specific(const void *key); @@ -1296,8 +1317,7 @@ dispatch_get_specific(const void *key); * The dispatch queue that the current block is expected to run on. * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 void dispatch_assert_queue(dispatch_queue_t queue) @@ -1323,8 +1343,7 @@ dispatch_assert_queue(dispatch_queue_t queue) * The dispatch queue that the current block is expected to run as a barrier on. * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 void dispatch_assert_queue_barrier(dispatch_queue_t queue); @@ -1347,8 +1366,7 @@ dispatch_assert_queue_barrier(dispatch_queue_t queue); * The dispatch queue that the current block is expected not to run on. * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 void dispatch_assert_queue_not(dispatch_queue_t queue) diff --git a/dispatch/semaphore.h b/dispatch/semaphore.h index b6139d70d..f5394b45d 100644 --- a/dispatch/semaphore.h +++ b/dispatch/semaphore.h @@ -57,7 +57,7 @@ __BEGIN_DECLS * @result * The newly created semaphore, or NULL on failure. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_semaphore_t @@ -83,7 +83,7 @@ dispatch_semaphore_create(long value); * @result * Returns zero on success, or non-zero if the timeout occurred. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW long dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout); @@ -105,7 +105,7 @@ dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout); * This function returns non-zero if a thread is woken. Otherwise, zero is * returned. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW long dispatch_semaphore_signal(dispatch_semaphore_t dsema); diff --git a/dispatch/source.h b/dispatch/source.h index 63b3ff365..6992d4226 100644 --- a/dispatch/source.h +++ b/dispatch/source.h @@ -79,7 +79,7 @@ typedef const struct dispatch_source_type_s *dispatch_source_type_t; * The mask is unused (pass zero for now). */ #define DISPATCH_SOURCE_TYPE_DATA_ADD (&_dispatch_source_type_data_add) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_SOURCE_TYPE_DECL(data_add); /*! @@ -90,9 +90,24 @@ DISPATCH_SOURCE_TYPE_DECL(data_add); * The mask is unused (pass zero for now). */ #define DISPATCH_SOURCE_TYPE_DATA_OR (&_dispatch_source_type_data_or) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_SOURCE_TYPE_DECL(data_or); +/*! + * @const DISPATCH_SOURCE_TYPE_DATA_REPLACE + * @discussion A dispatch source that tracks data obtained via calls to + * dispatch_source_merge_data(). Newly obtained data values replace existing + * data values not yet delivered to the source handler + * + * A data value of zero will cause the source handler to not be invoked. + * + * The handle is unused (pass zero for now). + * The mask is unused (pass zero for now). + */ +#define DISPATCH_SOURCE_TYPE_DATA_REPLACE (&_dispatch_source_type_data_replace) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +DISPATCH_SOURCE_TYPE_DECL(data_replace); + /*! * @const DISPATCH_SOURCE_TYPE_MACH_SEND * @discussion A dispatch source that monitors a Mach port for dead name @@ -101,7 +116,7 @@ DISPATCH_SOURCE_TYPE_DECL(data_or); * The mask is a mask of desired events from dispatch_source_mach_send_flags_t. */ #define DISPATCH_SOURCE_TYPE_MACH_SEND (&_dispatch_source_type_mach_send) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE() +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_SOURCE_TYPE_DECL(mach_send); /*! @@ -111,7 +126,7 @@ DISPATCH_SOURCE_TYPE_DECL(mach_send); * The mask is unused (pass zero for now). */ #define DISPATCH_SOURCE_TYPE_MACH_RECV (&_dispatch_source_type_mach_recv) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE() +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_SOURCE_TYPE_DECL(mach_recv); /*! @@ -124,7 +139,7 @@ DISPATCH_SOURCE_TYPE_DECL(mach_recv); */ #define DISPATCH_SOURCE_TYPE_MEMORYPRESSURE \ (&_dispatch_source_type_memorypressure) -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_8_0) DISPATCH_LINUX_UNAVAILABLE() +API_AVAILABLE(macos(10.9), ios(8.0)) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_SOURCE_TYPE_DECL(memorypressure); /*! @@ -135,7 +150,7 @@ DISPATCH_SOURCE_TYPE_DECL(memorypressure); * The mask is a mask of desired events from dispatch_source_proc_flags_t. */ #define DISPATCH_SOURCE_TYPE_PROC (&_dispatch_source_type_proc) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE() +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_SOURCE_TYPE_DECL(proc); /*! @@ -146,7 +161,7 @@ DISPATCH_SOURCE_TYPE_DECL(proc); * The mask is unused (pass zero for now). */ #define DISPATCH_SOURCE_TYPE_READ (&_dispatch_source_type_read) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_SOURCE_TYPE_DECL(read); /*! @@ -156,7 +171,7 @@ DISPATCH_SOURCE_TYPE_DECL(read); * The mask is unused (pass zero for now). */ #define DISPATCH_SOURCE_TYPE_SIGNAL (&_dispatch_source_type_signal) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_SOURCE_TYPE_DECL(signal); /*! @@ -167,7 +182,7 @@ DISPATCH_SOURCE_TYPE_DECL(signal); * The mask specifies which flags from dispatch_source_timer_flags_t to apply. */ #define DISPATCH_SOURCE_TYPE_TIMER (&_dispatch_source_type_timer) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_SOURCE_TYPE_DECL(timer); /*! @@ -178,7 +193,7 @@ DISPATCH_SOURCE_TYPE_DECL(timer); * The mask is a mask of desired events from dispatch_source_vnode_flags_t. */ #define DISPATCH_SOURCE_TYPE_VNODE (&_dispatch_source_type_vnode) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE() +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_SOURCE_TYPE_DECL(vnode); /*! @@ -189,7 +204,7 @@ DISPATCH_SOURCE_TYPE_DECL(vnode); * The mask is unused (pass zero for now). */ #define DISPATCH_SOURCE_TYPE_WRITE (&_dispatch_source_type_write) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_SOURCE_TYPE_DECL(write); /*! @@ -361,7 +376,7 @@ typedef unsigned long dispatch_source_timer_flags_t; * @result * The newly created dispatch source. Or NULL if invalid arguments are passed. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_source_t @@ -384,7 +399,7 @@ dispatch_source_create(dispatch_source_type_t type, * The event handler block to submit to the source's target queue. */ #ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_source_set_event_handler(dispatch_source_t source, @@ -406,7 +421,7 @@ dispatch_source_set_event_handler(dispatch_source_t source, * The context parameter passed to the event handler function is the context of * the dispatch source current at the time the event handler was set. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_source_set_event_handler_f(dispatch_source_t source, @@ -425,12 +440,13 @@ dispatch_source_set_event_handler_f(dispatch_source_t source, * the source's event handler block has returned. * * IMPORTANT: - * A cancellation handler is required for file descriptor and mach port based - * sources in order to safely close the descriptor or destroy the port. Closing - * the descriptor or port before the cancellation handler may result in a race - * condition. If a new descriptor is allocated with the same value as the - * recently closed descriptor while the source's event handler is still running, - * the event handler may read/write data to the wrong descriptor. + * Source cancellation and a cancellation handler are required for file + * descriptor and mach port based sources in order to safely close the + * descriptor or destroy the port. + * Closing the descriptor or port before the cancellation handler is invoked may + * result in a race condition. If a new descriptor is allocated with the same + * value as the recently closed descriptor while the source's event handler is + * still running, the event handler may read/write data to the wrong descriptor. * * @param source * The dispatch source to modify. @@ -440,7 +456,7 @@ dispatch_source_set_event_handler_f(dispatch_source_t source, * The cancellation handler block to submit to the source's target queue. */ #ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_source_set_cancel_handler(dispatch_source_t source, @@ -465,7 +481,7 @@ dispatch_source_set_cancel_handler(dispatch_source_t source, * The context parameter passed to the event handler function is the current * context of the dispatch source at the time the handler call is made. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_source_set_cancel_handler_f(dispatch_source_t source, @@ -493,7 +509,7 @@ dispatch_source_set_cancel_handler_f(dispatch_source_t source, * The dispatch source to be canceled. * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_source_cancel(dispatch_source_t source); @@ -511,7 +527,7 @@ dispatch_source_cancel(dispatch_source_t source); * @result * Non-zero if canceled and zero if not canceled. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW long @@ -542,7 +558,7 @@ dispatch_source_testcancel(dispatch_source_t source); * DISPATCH_SOURCE_TYPE_VNODE: file descriptor (int) * DISPATCH_SOURCE_TYPE_WRITE: file descriptor (int) */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW uintptr_t @@ -573,7 +589,7 @@ dispatch_source_get_handle(dispatch_source_t source); * DISPATCH_SOURCE_TYPE_VNODE: dispatch_source_vnode_flags_t * DISPATCH_SOURCE_TYPE_WRITE: n/a */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW unsigned long @@ -611,7 +627,7 @@ dispatch_source_get_mask(dispatch_source_t source); * DISPATCH_SOURCE_TYPE_VNODE: dispatch_source_vnode_flags_t * DISPATCH_SOURCE_TYPE_WRITE: estimated buffer space available */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW unsigned long @@ -633,7 +649,7 @@ dispatch_source_get_data(dispatch_source_t source); * as specified by the dispatch source type. A value of zero has no effect * and will not result in the submission of the event handler block. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_source_merge_data(dispatch_source_t source, unsigned long value); @@ -685,7 +701,7 @@ dispatch_source_merge_data(dispatch_source_t source, unsigned long value); * @param leeway * The nanosecond leeway for the timer. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_source_set_timer(dispatch_source_t source, @@ -715,7 +731,7 @@ dispatch_source_set_timer(dispatch_source_t source, * The registration handler block to submit to the source's target queue. */ #ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) +API_AVAILABLE(macos(10.7), ios(4.3)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_source_set_registration_handler(dispatch_source_t source, @@ -740,7 +756,7 @@ dispatch_source_set_registration_handler(dispatch_source_t source, * The context parameter passed to the registration handler function is the * current context of the dispatch source at the time the handler call is made. */ -__OSX_AVAILABLE_STARTING(__MAC_10_7,__IPHONE_4_3) +API_AVAILABLE(macos(10.7), ios(4.3)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_source_set_registration_handler_f(dispatch_source_t source, diff --git a/dispatch/time.h b/dispatch/time.h index c2152ea14..ce99f2700 100644 --- a/dispatch/time.h +++ b/dispatch/time.h @@ -89,7 +89,7 @@ typedef uint64_t dispatch_time_t; * @result * A new dispatch_time_t. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_time_t dispatch_time(dispatch_time_t when, int64_t delta); @@ -113,7 +113,7 @@ dispatch_time(dispatch_time_t when, int64_t delta); * @result * A new dispatch_time_t. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_time_t dispatch_walltime(const struct timespec *_Nullable when, int64_t delta); diff --git a/libdispatch.xcodeproj/project.pbxproj b/libdispatch.xcodeproj/project.pbxproj index fb0ba910f..e7134e709 100644 --- a/libdispatch.xcodeproj/project.pbxproj +++ b/libdispatch.xcodeproj/project.pbxproj @@ -85,6 +85,26 @@ 6E21F2E81BBB23FA0000C6A5 /* firehose_server_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E21F2E41BBB23F00000C6A5 /* firehose_server_internal.h */; }; 6E21F2E91BBB240E0000C6A5 /* firehose_server.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E21F2E51BBB23F00000C6A5 /* firehose_server.c */; }; 6E393F981BD60F8D005A551E /* firehose_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF0B26A1BA8C4AE007FA4F6 /* firehose_internal.h */; }; + 6E4BACBD1D48A41500B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; + 6E4BACC21D48A42000B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; + 6E4BACC31D48A42100B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; + 6E4BACC41D48A42200B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; + 6E4BACC51D48A42200B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; + 6E4BACC61D48A42300B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; + 6E4BACC71D48A42300B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; + 6E4BACC81D48A42400B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; + 6E4BACCA1D48A89500B562AE /* mach_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E4BACC91D48A89500B562AE /* mach_internal.h */; }; + 6E4BACF51D49A04600B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; + 6E4BACF61D49A04700B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; + 6E4BACF71D49A04700B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; + 6E4BACF81D49A04800B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; + 6E4BACF91D49A04800B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; + 6E4BACFA1D49A04900B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; + 6E4BACFB1D49A04A00B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; + 6E4BACFC1D49A04A00B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; + 6E5ACCBA1D3C4D0B007DA2B4 /* event_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E5ACCB91D3C4D0B007DA2B4 /* event_internal.h */; }; + 6E5ACCBB1D3C4D0E007DA2B4 /* event_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E5ACCB91D3C4D0B007DA2B4 /* event_internal.h */; }; + 6E5ACCBC1D3C4D0F007DA2B4 /* event_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E5ACCB91D3C4D0B007DA2B4 /* event_internal.h */; }; 6E90269C1BB9BD50004DC3AD /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; settings = {ATTRIBUTES = (Server, ); }; }; 6E9955581C3AF7710071D40C /* venture_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9955571C3AF7710071D40C /* venture_private.h */; }; 6E99558A1C3AF7900071D40C /* venture_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9955571C3AF7710071D40C /* venture_private.h */; }; @@ -99,6 +119,25 @@ 6E9956091C3B21B40071D40C /* venture_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9956061C3B21AA0071D40C /* venture_internal.h */; }; 6E9B6B5F1BB4F3C8009E324D /* firehose_buffer_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9B6B201BB4CC73009E324D /* firehose_buffer_internal.h */; }; 6EA283D71CAB93920041B2E0 /* libdispatch.codes in Copy Trace Definitions */ = {isa = PBXBuildFile; fileRef = 6EA283D01CAB93270041B2E0 /* libdispatch.codes */; }; + 6EA793891D458A5800929B1B /* event_config.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EA793881D458A5800929B1B /* event_config.h */; }; + 6EA7938E1D458A5C00929B1B /* event_config.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EA793881D458A5800929B1B /* event_config.h */; }; + 6EA7938F1D458A5E00929B1B /* event_config.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EA793881D458A5800929B1B /* event_config.h */; }; + 6EA962971D48622600759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; + 6EA962981D48622700759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; + 6EA962991D48622800759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; + 6EA9629A1D48622900759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; + 6EA9629B1D48622900759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; + 6EA9629C1D48622A00759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; + 6EA9629D1D48622B00759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; + 6EA9629E1D48622C00759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; + 6EA9629F1D48625000759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; + 6EA962A01D48625100759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; + 6EA962A11D48625100759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; + 6EA962A21D48625200759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; + 6EA962A31D48625300759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; + 6EA962A41D48625300759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; + 6EA962A51D48625400759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; + 6EA962A61D48625500759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; 6EB60D2C1BBB197B0092FA94 /* firehose_inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EB60D291BBB19640092FA94 /* firehose_inline_internal.h */; }; 6EBEC7E51BBDD30C009B1596 /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; }; 6EBEC7E61BBDD30D009B1596 /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; }; @@ -142,6 +181,7 @@ 6EF2CAB31C8899ED001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; 6EF2CAB41C889D65001ABE83 /* lock.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF2CAA41C88998A001ABE83 /* lock.h */; }; 6EF2CAB51C889D67001ABE83 /* lock.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF2CAA41C88998A001ABE83 /* lock.h */; }; + 6EFBDA4B1D61A0D600282887 /* priority.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EFBDA4A1D61A0D600282887 /* priority.h */; }; 721F5C5D0F15520500FF03A6 /* semaphore.h in Headers */ = {isa = PBXBuildFile; fileRef = 721F5C5C0F15520500FF03A6 /* semaphore.h */; settings = {ATTRIBUTES = (Public, ); }; }; 721F5CCF0F15553500FF03A6 /* semaphore.c in Sources */ = {isa = PBXBuildFile; fileRef = 721F5CCE0F15553500FF03A6 /* semaphore.c */; }; 72CC94300ECCD8750031B751 /* base.h in Headers */ = {isa = PBXBuildFile; fileRef = 72CC942F0ECCD8750031B751 /* base.h */; settings = {ATTRIBUTES = (Public, ); }; }; @@ -291,6 +331,37 @@ E48EC97C1835BADD00EAC4F1 /* yield.h in Headers */ = {isa = PBXBuildFile; fileRef = E48EC97B1835BADD00EAC4F1 /* yield.h */; }; E48EC97D1835BADD00EAC4F1 /* yield.h in Headers */ = {isa = PBXBuildFile; fileRef = E48EC97B1835BADD00EAC4F1 /* yield.h */; }; E48EC97E1835BADD00EAC4F1 /* yield.h in Headers */ = {isa = PBXBuildFile; fileRef = E48EC97B1835BADD00EAC4F1 /* yield.h */; }; + E49BB6D11E70748100868613 /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; }; + E49BB6D21E70748100868613 /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; }; + E49BB6D31E70748100868613 /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; }; + E49BB6D41E70748100868613 /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; }; + E49BB6D51E70748100868613 /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; + E49BB6D61E70748100868613 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; + E49BB6D71E70748100868613 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; + E49BB6D81E70748100868613 /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; + E49BB6D91E70748100868613 /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; }; + E49BB6DA1E70748100868613 /* queue.c in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED8A0E8361E600161930 /* queue.c */; }; + E49BB6DB1E70748100868613 /* semaphore.c in Sources */ = {isa = PBXBuildFile; fileRef = 721F5CCE0F15553500FF03A6 /* semaphore.c */; }; + E49BB6DC1E70748100868613 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; + E49BB6DD1E70748100868613 /* firehose_reply.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72406A031AF95DF800DF4E2B /* firehose_reply.defs */; settings = {ATTRIBUTES = (Server, ); }; }; + E49BB6DE1E70748100868613 /* once.c in Sources */ = {isa = PBXBuildFile; fileRef = 96DF70BD0F38FE3C0074BD99 /* once.c */; }; + E49BB6DF1E70748100868613 /* apply.c in Sources */ = {isa = PBXBuildFile; fileRef = 9676A0E00F3E755D00713ADB /* apply.c */; }; + E49BB6E01E70748100868613 /* object.c in Sources */ = {isa = PBXBuildFile; fileRef = 9661E56A0F3E7DDF00749F3E /* object.c */; }; + E49BB6E11E70748100868613 /* benchmark.c in Sources */ = {isa = PBXBuildFile; fileRef = 965CD6340F3E806200D4E28D /* benchmark.c */; }; + E49BB6E21E70748100868613 /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; + E49BB6E31E70748100868613 /* source.c in Sources */ = {isa = PBXBuildFile; fileRef = 96A8AA860F41E7A400CD570B /* source.c */; }; + E49BB6E41E70748100868613 /* time.c in Sources */ = {isa = PBXBuildFile; fileRef = 96032E4A0F5CC8C700241C5F /* time.c */; }; + E49BB6E51E70748100868613 /* data.c in Sources */ = {isa = PBXBuildFile; fileRef = 5AAB45BF10D30B79004407EA /* data.c */; }; + E49BB6E61E70748100868613 /* io.c in Sources */ = {isa = PBXBuildFile; fileRef = 5A27262510F26F1900751FBC /* io.c */; }; + E49BB6E71E70748100868613 /* block.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E43A724F1AF85BBC00BAA921 /* block.cpp */; }; + E49BB6E81E70748100868613 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; + E49BB6E91E70748100868613 /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; }; + E49BB6EA1E70748100868613 /* object.m in Sources */ = {isa = PBXBuildFile; fileRef = E4FC3263145F46C9002FBDDB /* object.m */; }; + E49BB6EB1E70748100868613 /* allocator.c in Sources */ = {isa = PBXBuildFile; fileRef = 2BBF5A62154B64F5002B20F9 /* allocator.c */; }; + E49BB6EC1E70748100868613 /* data.m in Sources */ = {isa = PBXBuildFile; fileRef = E420866F16027AE500EEE210 /* data.m */; }; + E49BB6ED1E70748100868613 /* voucher.c in Sources */ = {isa = PBXBuildFile; fileRef = E44A8E6A1805C3E0009FFDB6 /* voucher.c */; }; + E49BB7091E70A39700868613 /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; }; + E49BB70A1E70A3B000868613 /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; }; E49F2423125D3C960057C971 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; E49F2424125D3C970057C971 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; E49F2499125D48D80057C971 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; @@ -463,13 +534,6 @@ remoteGlobalIDString = 92F3FECA1BEC69E500025962; remoteInfo = darwintests; }; - C00B0E101C5AEBBE000330B3 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = E4D01CB9108E6C7200FAA873; - remoteInfo = dispatch_deadname; - }; C00B0E131C5AEED6000330B3 /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; @@ -526,6 +590,13 @@ remoteGlobalIDString = E4EC121612514715000DDBD1; remoteInfo = "libdispatch mp resolved"; }; + E49BB6F71E7074C100868613 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = E49BB6CE1E70748100868613; + remoteInfo = "libdispatch alt resolved"; + }; E4B515DA164B317700E003AF /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; @@ -587,8 +658,12 @@ 6E326B161C239431002A6505 /* dispatch_timer_short.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_timer_short.c; sourceTree = ""; }; 6E326B171C239431002A6505 /* dispatch_timer_timeout.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_timer_timeout.c; sourceTree = ""; }; 6E326B441C239B61002A6505 /* dispatch_priority.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_priority.c; sourceTree = ""; }; - 6E4130C91B431697001A152D /* backward-compat.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "backward-compat.xcconfig"; sourceTree = ""; }; + 6E4BACBC1D48A41500B562AE /* mach.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = mach.c; sourceTree = ""; }; + 6E4BACC91D48A89500B562AE /* mach_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mach_internal.h; sourceTree = ""; }; 6E4FC9D11C84123600520351 /* os_venture_basic.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = os_venture_basic.c; sourceTree = ""; }; + 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = event_kevent.c; sourceTree = ""; }; + 6E5ACCB91D3C4D0B007DA2B4 /* event_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = event_internal.h; sourceTree = ""; }; + 6E5ACCBD1D3C6719007DA2B4 /* event.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = event.c; sourceTree = ""; }; 6E62B0531C55806200D2C7C0 /* dispatch_trysync.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_trysync.c; sourceTree = ""; }; 6E67D8D31C16C20B00FC98AC /* dispatch_apply.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_apply.c; sourceTree = ""; }; 6E67D8D91C16C94B00FC98AC /* dispatch_cf_main.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_cf_main.c; sourceTree = ""; }; @@ -614,15 +689,22 @@ 6E9B6B201BB4CC73009E324D /* firehose_buffer_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = firehose_buffer_internal.h; sourceTree = ""; }; 6EA283D01CAB93270041B2E0 /* libdispatch.codes */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.codes; sourceTree = ""; }; 6EA2CB841C005DEF0076794A /* dispatch_source.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_source.c; sourceTree = ""; }; + 6EA7937D1D456D1300929B1B /* event_epoll.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = event_epoll.c; sourceTree = ""; }; + 6EA793881D458A5800929B1B /* event_config.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = event_config.h; sourceTree = ""; }; 6EB4E4091BA8BCAD00D7B9D2 /* libfirehose_server.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libfirehose_server.a; sourceTree = BUILT_PRODUCTS_DIR; }; 6EB4E4421BA8BD7800D7B9D2 /* libfirehose.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = libfirehose.xcconfig; sourceTree = ""; }; 6EB60D291BBB19640092FA94 /* firehose_inline_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = firehose_inline_internal.h; sourceTree = ""; }; + 6EC5ABE31D4436E4004F8674 /* dispatch_deadname.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_deadname.c; sourceTree = ""; }; + 6EC670C61E37E201004F10D6 /* dispatch_network_event_thread.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_network_event_thread.c; sourceTree = ""; }; + 6EC670C71E37E201004F10D6 /* perf_mach_async.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = perf_mach_async.c; sourceTree = ""; }; + 6EC670C81E37E201004F10D6 /* perf_pipepingpong.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = perf_pipepingpong.c; sourceTree = ""; }; 6EDB888D1CB73BDC006776D6 /* dispatch_kevent_cancel_races.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_kevent_cancel_races.c; sourceTree = ""; }; 6EDF10831BBB487E007F14BF /* firehose_buffer_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = firehose_buffer_private.h; sourceTree = ""; }; 6EE89F3D1BFAF5B000EB140D /* dispatch_state_machine.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_state_machine.c; sourceTree = ""; }; 6EF0B26A1BA8C4AE007FA4F6 /* firehose_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = firehose_internal.h; sourceTree = ""; }; 6EF2CAA41C88998A001ABE83 /* lock.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lock.h; sourceTree = ""; }; 6EF2CAAB1C8899D5001ABE83 /* lock.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = lock.c; path = shims/lock.c; sourceTree = ""; }; + 6EFBDA4A1D61A0D600282887 /* priority.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = priority.h; sourceTree = ""; }; 721F5C5C0F15520500FF03A6 /* semaphore.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = semaphore.h; sourceTree = ""; }; 721F5CCE0F15553500FF03A6 /* semaphore.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = semaphore.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; 72406A031AF95DF800DF4E2B /* firehose_reply.defs */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.mig; path = firehose_reply.defs; sourceTree = ""; }; @@ -657,13 +739,21 @@ 96BC39BC0F3EBAB100C59689 /* queue_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = queue_private.h; sourceTree = ""; }; 96C9553A0F3EAEDD000D2CA4 /* once.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = once.h; sourceTree = ""; }; 96DF70BD0F38FE3C0074BD99 /* once.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = once.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; + B63B793F1E8F004F0060C1E1 /* dispatch_no_blocks.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_no_blocks.c; sourceTree = ""; }; + B68330BC1EBCF6080003E71C /* dispatch_wl.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_wl.c; sourceTree = ""; }; + B69878521F06F8790088F94F /* dispatch_signals.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_signals.c; sourceTree = ""; }; + B6AC73FD1EB10973009FB2F2 /* perf_thread_request.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = perf_thread_request.c; sourceTree = ""; }; + B6AE9A4A1D7F53B300AC007F /* dispatch_queue_create.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_queue_create.c; sourceTree = ""; }; + B6AE9A561D7F53C100AC007F /* perf_async_bench.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = perf_async_bench.m; sourceTree = ""; }; + B6AE9A581D7F53CB00AC007F /* perf_bench.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = perf_bench.m; sourceTree = ""; }; + B6FA01801F0AD522004479BF /* dispatch_pthread_root_queue.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_pthread_root_queue.c; sourceTree = ""; }; C00B0E0A1C5AEBBE000330B3 /* libdispatch_dyld_stub.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch_dyld_stub.a; sourceTree = BUILT_PRODUCTS_DIR; }; C00B0E121C5AEBF7000330B3 /* libdispatch-dyld-stub.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "libdispatch-dyld-stub.xcconfig"; sourceTree = ""; }; C01866BD1C5973210040FC07 /* libdispatch.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch.a; sourceTree = BUILT_PRODUCTS_DIR; }; C01866BE1C59735B0040FC07 /* libdispatch-mp-static.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = "libdispatch-mp-static.xcconfig"; sourceTree = ""; }; C01866BF1C5976C90040FC07 /* run-on-install.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "run-on-install.sh"; sourceTree = ""; }; - C901445E1C73A7FE002638FC /* module.modulemap */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = "sourcecode.module-map"; path = module.modulemap; sourceTree = ""; }; - C90144641C73A845002638FC /* module.modulemap */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = "sourcecode.module-map"; path = module.modulemap; sourceTree = ""; }; + C901445E1C73A7FE002638FC /* module.modulemap */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = "sourcecode.module-map"; name = module.modulemap; path = darwin/module.modulemap; sourceTree = ""; }; + C90144641C73A845002638FC /* module.modulemap */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = "sourcecode.module-map"; name = module.modulemap; path = darwin/module.modulemap; sourceTree = ""; }; C913AC0E143BD34800B78976 /* data_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data_private.h; sourceTree = ""; tabWidth = 8; }; C927F35F10FD7F1000C5AB8B /* ddt.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = ddt.xcodeproj; path = tools/ddt/ddt.xcodeproj; sourceTree = ""; }; C96CE17A1CEB851600F4B8E6 /* dispatch_objc.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = dispatch_objc.m; sourceTree = ""; }; @@ -692,17 +782,16 @@ E44F9DA816543F79001DCD38 /* introspection_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = introspection_internal.h; sourceTree = ""; }; E454569214746F1B00106147 /* object_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = object_private.h; sourceTree = ""; }; E463024F1761603C00E11F4C /* atomic_sfb.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = atomic_sfb.h; sourceTree = ""; }; - E46DBC5714EE10C80001F9F6 /* libdispatch.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch.a; sourceTree = BUILT_PRODUCTS_DIR; }; + E46DBC5714EE10C80001F9F6 /* libdispatch_up.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch_up.a; sourceTree = BUILT_PRODUCTS_DIR; }; E46DBC5814EE11BC0001F9F6 /* libdispatch-up-static.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "libdispatch-up-static.xcconfig"; sourceTree = ""; }; E47D6BB5125F0F800070D91C /* resolved.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = resolved.h; sourceTree = ""; }; E482F1CD12DBAB590030614D /* postprocess-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "postprocess-headers.sh"; sourceTree = ""; }; E48AF55916E70FD9004105FF /* io_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = io_private.h; path = private/io_private.h; sourceTree = SOURCE_ROOT; tabWidth = 8; }; E48EC97B1835BADD00EAC4F1 /* yield.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = yield.h; sourceTree = ""; }; + E49BB6F21E70748100868613 /* libdispatch_alt.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch_alt.a; sourceTree = BUILT_PRODUCTS_DIR; }; E49F24DF125D57FA0057C971 /* libdispatch.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libdispatch.dylib; sourceTree = BUILT_PRODUCTS_DIR; }; E49F251D125D630A0057C971 /* install-manpages.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "install-manpages.sh"; sourceTree = ""; }; E49F251E125D631D0057C971 /* mig-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "mig-headers.sh"; sourceTree = ""; }; - E4B2D42D17A7F0F90034A18F /* libdispatch-resolver_iphoneos.order */ = {isa = PBXFileReference; lastKnownFileType = text; path = "libdispatch-resolver_iphoneos.order"; sourceTree = ""; }; - E4B2D42E17A7F0F90034A18F /* libdispatch_iphoneos.order */ = {isa = PBXFileReference; lastKnownFileType = text; path = libdispatch_iphoneos.order; sourceTree = ""; }; E4B3C3FD18C50D000039F49F /* voucher_activity_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = voucher_activity_private.h; sourceTree = ""; }; E4B515D6164B2DA300E003AF /* libdispatch.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libdispatch.dylib; sourceTree = BUILT_PRODUCTS_DIR; }; E4B515D7164B2DFB00E003AF /* introspection_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = introspection_private.h; sourceTree = ""; }; @@ -771,8 +860,6 @@ FC7BEDAF0E83626100161930 /* Dispatch Private Headers */, FC7BEDB60E8363DC00161930 /* Dispatch Project Headers */, 08FB7795FE84155DC02AAC07 /* Dispatch Source */, - 6EF0B2661BA8C43D007FA4F6 /* Firehose Project Headers */, - 6EF0B2641BA8C3A0007FA4F6 /* Firehose Source */, 92F3FEC91BEC687200025962 /* Darwin Tests */, C6A0FF2B0290797F04C91782 /* Documentation */, 1AB674ADFE9D54B511CA2CBB /* Products */, @@ -798,6 +885,7 @@ E4B515DC164B32E000E003AF /* introspection.c */, 5A27262510F26F1900751FBC /* io.c */, 6EF2CAAB1C8899D5001ABE83 /* lock.c */, + 6E4BACBC1D48A41500B562AE /* mach.c */, 9661E56A0F3E7DDF00749F3E /* object.c */, E4FC3263145F46C9002FBDDB /* object.m */, 96DF70BD0F38FE3C0074BD99 /* once.c */, @@ -811,6 +899,8 @@ 6EA283D01CAB93270041B2E0 /* libdispatch.codes */, FC7BED950E8361E600161930 /* protocol.defs */, E43570B8126E93380097AB9F /* provider.d */, + 6E5ACCAF1D3BF2A0007DA2B4 /* event */, + 6EF0B2641BA8C3A0007FA4F6 /* firehose */, ); name = "Dispatch Source"; path = src; @@ -820,15 +910,16 @@ isa = PBXGroup; children = ( D2AAC046055464E500DB518D /* libdispatch.dylib */, - E4EC11C312514302000DDBD1 /* libdispatch_up.a */, - E4EC122D12514715000DDBD1 /* libdispatch_mp.a */, - E49F24DF125D57FA0057C971 /* libdispatch.dylib */, - E46DBC5714EE10C80001F9F6 /* libdispatch.a */, E4B515D6164B2DA300E003AF /* libdispatch.dylib */, - 6EB4E4091BA8BCAD00D7B9D2 /* libfirehose_server.a */, - 6E040C631C499B1B00411A2E /* libfirehose_kernel.a */, + E49F24DF125D57FA0057C971 /* libdispatch.dylib */, + E4EC122D12514715000DDBD1 /* libdispatch_mp.a */, + E4EC11C312514302000DDBD1 /* libdispatch_up.a */, + E49BB6F21E70748100868613 /* libdispatch_alt.a */, + E46DBC5714EE10C80001F9F6 /* libdispatch_up.a */, C01866BD1C5973210040FC07 /* libdispatch.a */, C00B0E0A1C5AEBBE000330B3 /* libdispatch_dyld_stub.a */, + 6E040C631C499B1B00411A2E /* libfirehose_kernel.a */, + 6EB4E4091BA8BCAD00D7B9D2 /* libfirehose_server.a */, ); name = Products; sourceTree = ""; @@ -840,11 +931,29 @@ 4552540519B1384900B88766 /* jsgc_bench */, 4552540719B1384900B88766 /* async_bench */, 4552540919B1384900B88766 /* apply_bench */, - C00B0E111C5AEBBE000330B3 /* dispatch_deadname */, ); name = Products; sourceTree = ""; }; + 6E5ACCAE1D3BF27F007DA2B4 /* event */ = { + isa = PBXGroup; + children = ( + 6EA793881D458A5800929B1B /* event_config.h */, + 6E5ACCB91D3C4D0B007DA2B4 /* event_internal.h */, + ); + path = event; + sourceTree = ""; + }; + 6E5ACCAF1D3BF2A0007DA2B4 /* event */ = { + isa = PBXGroup; + children = ( + 6E5ACCBD1D3C6719007DA2B4 /* event.c */, + 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */, + 6EA7937D1D456D1300929B1B /* event_epoll.c */, + ); + path = event; + sourceTree = ""; + }; 6E9B6AE21BB39793009E324D /* OS Public Headers */ = { isa = PBXGroup; children = ( @@ -854,7 +963,7 @@ path = os; sourceTree = ""; }; - 6EF0B2641BA8C3A0007FA4F6 /* Firehose Source */ = { + 6EF0B2641BA8C3A0007FA4F6 /* firehose */ = { isa = PBXGroup; children = ( 72406A391AF9926000DF4E2B /* firehose_types.defs */, @@ -864,11 +973,10 @@ 6E21F2E51BBB23F00000C6A5 /* firehose_server.c */, 72DEAA9D1AE1BB7300289540 /* firehose_server_object.m */, ); - name = "Firehose Source"; - path = src/firehose; + path = firehose; sourceTree = ""; }; - 6EF0B2661BA8C43D007FA4F6 /* Firehose Project Headers */ = { + 6EF0B2661BA8C43D007FA4F6 /* firehose */ = { isa = PBXGroup; children = ( 6EF0B26A1BA8C4AE007FA4F6 /* firehose_internal.h */, @@ -876,8 +984,7 @@ 6E9B6B201BB4CC73009E324D /* firehose_buffer_internal.h */, 6E21F2E41BBB23F00000C6A5 /* firehose_server_internal.h */, ); - name = "Firehose Project Headers"; - path = src/firehose; + path = firehose; sourceTree = ""; }; 92F3FEC91BEC687200025962 /* Darwin Tests */ = { @@ -894,18 +1001,23 @@ 6E326ADE1C23451A002A6505 /* dispatch_concur.c */, 6E326AEF1C239303002A6505 /* dispatch_context_for_key.c */, 6E8E4EC71C1A61680004F5CC /* dispatch_data.m */, + 6EC5ABE31D4436E4004F8674 /* dispatch_deadname.c */, 6E67D90D1C16CCEB00FC98AC /* dispatch_debug.c */, 6E8E4ECB1C1A72650004F5CC /* dispatch_drift.c */, 6E67D90F1C16CF0B00FC98AC /* dispatch_group.c */, - 6EDB888D1CB73BDC006776D6 /* dispatch_kevent_cancel_races.c */, 6E326ABD1C22A577002A6505 /* dispatch_io_net.c */, 6E326ABE1C22A577002A6505 /* dispatch_io.c */, + 6EDB888D1CB73BDC006776D6 /* dispatch_kevent_cancel_races.c */, + 6EC670C61E37E201004F10D6 /* dispatch_network_event_thread.c */, + B63B793F1E8F004F0060C1E1 /* dispatch_no_blocks.c */, C96CE17A1CEB851600F4B8E6 /* dispatch_objc.m */, 6E67D9131C17676D00FC98AC /* dispatch_overcommit.c */, 6E67D9151C1768B300FC98AC /* dispatch_pingpong.c */, 6E326B441C239B61002A6505 /* dispatch_priority.c */, 6E326AB51C225477002A6505 /* dispatch_proc.c */, + B6FA01801F0AD522004479BF /* dispatch_pthread_root_queue.c */, 6E326AB31C224870002A6505 /* dispatch_qos.c */, + B6AE9A4A1D7F53B300AC007F /* dispatch_queue_create.c */, 6E67D9111C17669C00FC98AC /* dispatch_queue_finalizer.c */, 6E1612691C79606E006FC9A9 /* dispatch_queue_label.c */, 6E326AB91C229866002A6505 /* dispatch_read.c */, @@ -913,6 +1025,7 @@ 6E326ADC1C234396002A6505 /* dispatch_readsync.c */, 6E8E4E6D1C1A35EE0004F5CC /* dispatch_select.c */, 6E8E4E9B1C1A4EF10004F5CC /* dispatch_sema.c */, + B69878521F06F8790088F94F /* dispatch_signals.c */, 6EA2CB841C005DEF0076794A /* dispatch_source.c */, 6E326AE01C234780002A6505 /* dispatch_starfish.c */, 6EE89F3D1BFAF5B000EB140D /* dispatch_state_machine.c */, @@ -928,8 +1041,14 @@ 6E62B0531C55806200D2C7C0 /* dispatch_trysync.c */, 6E8E4EC91C1A670B0004F5CC /* dispatch_vm.c */, 6E326AB71C225FCA002A6505 /* dispatch_vnode.c */, + B68330BC1EBCF6080003E71C /* dispatch_wl.c */, 6E67D9171C17BA7200FC98AC /* nsoperation.m */, 6E4FC9D11C84123600520351 /* os_venture_basic.c */, + B6AE9A561D7F53C100AC007F /* perf_async_bench.m */, + B6AE9A581D7F53CB00AC007F /* perf_bench.m */, + 6EC670C71E37E201004F10D6 /* perf_mach_async.c */, + 6EC670C81E37E201004F10D6 /* perf_pipepingpong.c */, + B6AC73FD1EB10973009FB2F2 /* perf_thread_request.c */, 92F3FE921BEC686300025962 /* Makefile */, 6E8E4E6E1C1A35EE0004F5CC /* test_lib.c */, 6E8E4E6F1C1A35EE0004F5CC /* test_lib.h */, @@ -974,7 +1093,6 @@ E40041E4125E71150022B135 /* xcodeconfig */ = { isa = PBXGroup; children = ( - 6E4130C91B431697001A152D /* backward-compat.xcconfig */, E43D93F11097917E004F6A62 /* libdispatch.xcconfig */, E40041AA125D705F0022B135 /* libdispatch-resolver.xcconfig */, E40041A9125D70590022B135 /* libdispatch-resolved.xcconfig */, @@ -986,8 +1104,6 @@ 6E040C721C499C3600411A2E /* libfirehose_kernel.xcconfig */, E422DA3614D2A7E7003C6EE4 /* libdispatch.aliases */, E448727914C6215D00BB45C2 /* libdispatch.order */, - E4B2D42E17A7F0F90034A18F /* libdispatch_iphoneos.order */, - E4B2D42D17A7F0F90034A18F /* libdispatch-resolver_iphoneos.order */, E421E5FD1716BEA70090DC9B /* libdispatch.interposable */, ); path = xcodeconfig; @@ -1008,8 +1124,8 @@ isa = PBXGroup; children = ( E47D6BB5125F0F800070D91C /* resolved.h */, - E44EBE371251656400645D88 /* resolver.c */, E44EBE331251654000645D88 /* resolver.h */, + E44EBE371251656400645D88 /* resolver.c */, ); path = resolver; sourceTree = ""; @@ -1058,6 +1174,7 @@ E4128ED513BA9A1700ABB2CB /* hw_config.h */, 6EF2CAA41C88998A001ABE83 /* lock.h */, FC1832A2109923C7003403D5 /* perfmon.h */, + 6EFBDA4A1D61A0D600282887 /* priority.h */, FC1832A3109923C7003403D5 /* time.h */, FC1832A4109923C7003403D5 /* tsd.h */, E48EC97B1835BADD00EAC4F1 /* yield.h */, @@ -1113,6 +1230,7 @@ E44757D917F4572600B82CA1 /* inline_internal.h */, E4C1ED6E1263E714000D3C8B /* data_internal.h */, 5A0095A110F274B0000E2A31 /* io_internal.h */, + 6E4BACC91D48A89500B562AE /* mach_internal.h */, 965ECC200F3EAB71004DDD89 /* object_internal.h */, 96929D950F3EA2170041FF5D /* queue_internal.h */, 5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */, @@ -1122,6 +1240,8 @@ E422A0D412A557B5005E5BDB /* trace.h */, E44F9DA816543F79001DCD38 /* introspection_internal.h */, 96929D830F3EA1020041FF5D /* shims.h */, + 6E5ACCAE1D3BF27F007DA2B4 /* event */, + 6EF0B2661BA8C43D007FA4F6 /* firehose */, FC1832A0109923B3003403D5 /* shims */, ); name = "Dispatch Project Headers"; @@ -1166,6 +1286,7 @@ E4B3C3FE18C50D000039F49F /* voucher_activity_private.h in Headers */, 721F5C5D0F15520500FF03A6 /* semaphore.h in Headers */, FC5C9C1E0EADABE3006E462D /* group.h in Headers */, + 6EFBDA4B1D61A0D600282887 /* priority.h in Headers */, 96C9553B0F3EAEDD000D2CA4 /* once.h in Headers */, 5AAB45C410D30CC7004407EA /* io.h in Headers */, E44A8E7518066276009FFDB6 /* voucher_internal.h in Headers */, @@ -1190,9 +1311,11 @@ E44A8E721805C473009FFDB6 /* voucher_private.h in Headers */, 5A0095A210F274B0000E2A31 /* io_internal.h in Headers */, FC1832A8109923C7003403D5 /* tsd.h in Headers */, + 6EA793891D458A5800929B1B /* event_config.h in Headers */, 96929D840F3EA1020041FF5D /* atomic.h in Headers */, 96929D850F3EA1020041FF5D /* shims.h in Headers */, FC1832A7109923C7003403D5 /* time.h in Headers */, + 6E4BACCA1D48A89500B562AE /* mach_internal.h in Headers */, 6ED64B511BBD8A2100C35F4D /* firehose_buffer_internal.h in Headers */, E48EC97C1835BADD00EAC4F1 /* yield.h in Headers */, 2BE17C6418EA305E002CA4E8 /* layout_private.h in Headers */, @@ -1202,6 +1325,7 @@ 6EF2CAA51C88998A001ABE83 /* lock.h in Headers */, E422A0D512A557B5005E5BDB /* trace.h in Headers */, E4BA743B13A8911B0095BDF1 /* getprogname.h in Headers */, + 6E5ACCBA1D3C4D0B007DA2B4 /* event_internal.h in Headers */, 6ED64B571BBD8A3B00C35F4D /* firehose_inline_internal.h in Headers */, E4128ED613BA9A1700ABB2CB /* hw_config.h in Headers */, E454569314746F1B00106147 /* object_private.h in Headers */, @@ -1219,6 +1343,7 @@ files = ( E49F24AB125D57FA0057C971 /* dispatch.h in Headers */, E49F24AC125D57FA0057C971 /* base.h in Headers */, + 6E5ACCBB1D3C4D0E007DA2B4 /* event_internal.h in Headers */, E49F24AD125D57FA0057C971 /* object.h in Headers */, E44757DC17F4573600B82CA1 /* inline_internal.h in Headers */, E49F24AE125D57FA0057C971 /* queue.h in Headers */, @@ -1244,6 +1369,7 @@ E49F24BE125D57FA0057C971 /* source_internal.h in Headers */, E49F24BD125D57FA0057C971 /* semaphore_internal.h in Headers */, E4C1ED701263E714000D3C8B /* data_internal.h in Headers */, + 6EA7938F1D458A5E00929B1B /* event_config.h in Headers */, 6ED64B501BBD8A1400C35F4D /* firehose_internal.h in Headers */, E49F24BF125D57FA0057C971 /* io_internal.h in Headers */, E44A8E731805C473009FFDB6 /* voucher_private.h in Headers */, @@ -1285,6 +1411,7 @@ E44F9DB51654403F001DCD38 /* source_internal.h in Headers */, E44F9DB41654403B001DCD38 /* semaphore_internal.h in Headers */, E44F9DB01654402B001DCD38 /* data_internal.h in Headers */, + 6E5ACCBC1D3C4D0F007DA2B4 /* event_internal.h in Headers */, 6E9956081C3B21B30071D40C /* venture_internal.h in Headers */, E44F9DB11654402E001DCD38 /* io_internal.h in Headers */, E4630251176162D200E11F4C /* atomic_sfb.h in Headers */, @@ -1293,6 +1420,7 @@ 6ED64B591BBD8A3F00C35F4D /* firehose_inline_internal.h in Headers */, 6EF2CAB51C889D67001ABE83 /* lock.h in Headers */, E44757DB17F4573500B82CA1 /* inline_internal.h in Headers */, + 6EA7938E1D458A5C00929B1B /* event_config.h in Headers */, 6ED64B4F1BBD8A1400C35F4D /* firehose_internal.h in Headers */, E44F9DB71654404F001DCD38 /* shims.h in Headers */, E44F9DBC1654405B001DCD38 /* perfmon.h in Headers */, @@ -1409,8 +1537,9 @@ ); dependencies = ( 6EF0B27E1BA8C5BF007FA4F6 /* PBXTargetDependency */, - E47D6ECB125FEB9D0070D91C /* PBXTargetDependency */, E47D6ECD125FEBA10070D91C /* PBXTargetDependency */, + E47D6ECB125FEB9D0070D91C /* PBXTargetDependency */, + E49BB6F81E7074C100868613 /* PBXTargetDependency */, E4B515DB164B317700E003AF /* PBXTargetDependency */, C01866C21C597AEA0040FC07 /* PBXTargetDependency */, E437F0D614F7441F00F0B997 /* PBXTargetDependency */, @@ -1433,7 +1562,24 @@ ); name = "libdispatch up static"; productName = libdispatch; - productReference = E46DBC5714EE10C80001F9F6 /* libdispatch.a */; + productReference = E46DBC5714EE10C80001F9F6 /* libdispatch_up.a */; + productType = "com.apple.product-type.library.static"; + }; + E49BB6CE1E70748100868613 /* libdispatch alt resolved */ = { + isa = PBXNativeTarget; + buildConfigurationList = E49BB6EF1E70748100868613 /* Build configuration list for PBXNativeTarget "libdispatch alt resolved" */; + buildPhases = ( + E49BB6CF1E70748100868613 /* Mig Headers */, + E49BB6D01E70748100868613 /* Sources */, + E49BB6EE1E70748100868613 /* Symlink normal variant */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = "libdispatch alt resolved"; + productName = libdispatch; + productReference = E49BB6F21E70748100868613 /* libdispatch_alt.a */; productType = "com.apple.product-type.library.static"; }; E49F24A9125D57FA0057C971 /* libdispatch no resolver */ = { @@ -1514,7 +1660,7 @@ isa = PBXProject; attributes = { BuildIndependentTargetsInParallel = YES; - LastUpgradeCheck = 0800; + LastUpgradeCheck = 0900; TargetAttributes = { 3F3C9326128E637B0042B1F7 = { ProvisioningStyle = Manual; @@ -1598,6 +1744,7 @@ E49F24A9125D57FA0057C971 /* libdispatch no resolver */, E4EC121612514715000DDBD1 /* libdispatch mp resolved */, E4EC118F12514302000DDBD1 /* libdispatch up resolved */, + E49BB6CE1E70748100868613 /* libdispatch alt resolved */, E4B51595164B2DA300E003AF /* libdispatch introspection */, E46DBC1A14EE10C80001F9F6 /* libdispatch up static */, C01866A41C5973210040FC07 /* libdispatch mp static */, @@ -1606,10 +1753,10 @@ 6E2ECAFD1C49C2FF00A30A32 /* libdispatch_kernel */, C927F35A10FD7F0600C5AB8B /* libdispatch_tools */, 4552540A19B1389700B88766 /* libdispatch_tests */, + 92CBD7201BED924F006E0892 /* libdispatch_tests_legacy */, + 92F3FECA1BEC69E500025962 /* darwintests */, 6E040C621C499B1B00411A2E /* libfirehose_kernel */, 6EB4E4081BA8BCAD00D7B9D2 /* libfirehose_server */, - 92F3FECA1BEC69E500025962 /* darwintests */, - 92CBD7201BED924F006E0892 /* libdispatch_tests_legacy */, ); }; /* End PBXProject section */ @@ -1643,13 +1790,6 @@ remoteRef = 4552540819B1384900B88766 /* PBXContainerItemProxy */; sourceTree = BUILT_PRODUCTS_DIR; }; - C00B0E111C5AEBBE000330B3 /* dispatch_deadname */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = dispatch_deadname; - remoteRef = C00B0E101C5AEBBE000330B3 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; C927F36710FD7F1000C5AB8B /* ddt */ = { isa = PBXReferenceProxy; fileType = "compiled.mach-o.executable"; @@ -1760,6 +1900,47 @@ shellScript = ". \"${SCRIPT_INPUT_FILE_0}\""; showEnvVarsInLog = 0; }; + E49BB6CF1E70748100868613 /* Mig Headers */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + "$(SRCROOT)/src/protocol.defs", + "$(SRCROOT)/src/firehose/firehose.defs", + "$(SRCROOT)/src/firehose/firehose_reply.defs", + "$(SRCROOT)/xcodescripts/mig-headers.sh", + ); + name = "Mig Headers"; + outputPaths = ( + "$(DERIVED_FILE_DIR)/protocol.h", + "$(DERIVED_FILE_DIR)/protocolServer.h", + "$(DERIVED_FILE_DIR)/firehose.h", + "$(DERIVED_FILE_DIR)/firehoseServer.h", + "$(DERIVED_FILE_DIR)/firehose_reply.h", + "$(DERIVED_FILE_DIR)/firehose_replyServer.h", + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = "/bin/bash -e"; + shellScript = ". \"${SCRIPT_INPUT_FILE_3}\""; + showEnvVarsInLog = 0; + }; + E49BB6EE1E70748100868613 /* Symlink normal variant */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + name = "Symlink normal variant"; + outputPaths = ( + "$(CONFIGURATION_BUILD_DIR)/$(PRODUCT_NAME)_normal.a", + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = "/bin/bash -e"; + shellScript = "ln -fs \"${PRODUCT_NAME}.a\" \"${SCRIPT_OUTPUT_FILE_0}\""; + showEnvVarsInLog = 0; + }; E49F24D7125D57FA0057C971 /* Install Manpages */ = { isa = PBXShellScriptBuildPhase; buildActionMask = 8; @@ -1925,10 +2106,10 @@ isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( - 6EF0B27A1BA8C57D007FA4F6 /* firehose_server_object.m in Sources */, 6E90269C1BB9BD50004DC3AD /* firehose.defs in Sources */, - 6E21F2E91BBB240E0000C6A5 /* firehose_server.c in Sources */, 6EF0B2781BA8C56E007FA4F6 /* firehose_reply.defs in Sources */, + 6EF0B27A1BA8C57D007FA4F6 /* firehose_server_object.m in Sources */, + 6E21F2E91BBB240E0000C6A5 /* firehose_server.c in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -1937,26 +2118,31 @@ buildActionMask = 2147483647; files = ( C00B0DF21C5AEBBE000330B3 /* protocol.defs in Sources */, + C00B0DF71C5AEBBE000330B3 /* firehose.defs in Sources */, + C00B0DFA1C5AEBBE000330B3 /* firehose_reply.defs in Sources */, C00B0DF31C5AEBBE000330B3 /* resolver.c in Sources */, - 6EF2CAB31C8899ED001ABE83 /* lock.c in Sources */, C00B0DF41C5AEBBE000330B3 /* init.c in Sources */, - C00B0DF51C5AEBBE000330B3 /* queue.c in Sources */, - C00B0DF61C5AEBBE000330B3 /* firehose_buffer.c in Sources */, - C00B0DF71C5AEBBE000330B3 /* firehose.defs in Sources */, + C00B0DFE1C5AEBBE000330B3 /* object.c in Sources */, C00B0DF81C5AEBBE000330B3 /* block.cpp in Sources */, + 6EF2CAB31C8899ED001ABE83 /* lock.c in Sources */, C00B0DF91C5AEBBE000330B3 /* semaphore.c in Sources */, - C00B0DFA1C5AEBBE000330B3 /* firehose_reply.defs in Sources */, C00B0DFB1C5AEBBE000330B3 /* once.c in Sources */, - C00B0DFC1C5AEBBE000330B3 /* voucher.c in Sources */, + C00B0DF51C5AEBBE000330B3 /* queue.c in Sources */, C00B0DFD1C5AEBBE000330B3 /* apply.c in Sources */, - C00B0DFE1C5AEBBE000330B3 /* object.c in Sources */, - C00B0DFF1C5AEBBE000330B3 /* benchmark.c in Sources */, C00B0E001C5AEBBE000330B3 /* source.c in Sources */, - C00B0E011C5AEBBE000330B3 /* time.c in Sources */, - C00B0E021C5AEBBE000330B3 /* data.c in Sources */, + 6E4BACC81D48A42400B562AE /* mach.c in Sources */, + 6EA9629E1D48622C00759D53 /* event.c in Sources */, + 6EA962A61D48625500759D53 /* event_kevent.c in Sources */, + 6E4BACFC1D49A04A00B562AE /* event_epoll.c in Sources */, + C00B0DFC1C5AEBBE000330B3 /* voucher.c in Sources */, + C00B0DF61C5AEBBE000330B3 /* firehose_buffer.c in Sources */, C00B0E031C5AEBBE000330B3 /* io.c in Sources */, + C00B0E021C5AEBBE000330B3 /* data.c in Sources */, C00B0E041C5AEBBE000330B3 /* transform.c in Sources */, + C00B0E011C5AEBBE000330B3 /* time.c in Sources */, C00B0E051C5AEBBE000330B3 /* allocator.c in Sources */, + C00B0DFF1C5AEBBE000330B3 /* benchmark.c in Sources */, + E49BB70A1E70A3B000868613 /* venture.c in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -1965,26 +2151,31 @@ buildActionMask = 2147483647; files = ( C01866A61C5973210040FC07 /* protocol.defs in Sources */, + C01866AB1C5973210040FC07 /* firehose.defs in Sources */, + C01866AE1C5973210040FC07 /* firehose_reply.defs in Sources */, C01866A71C5973210040FC07 /* resolver.c in Sources */, - 6EF2CAB21C8899EC001ABE83 /* lock.c in Sources */, C01866A81C5973210040FC07 /* init.c in Sources */, - C01866A91C5973210040FC07 /* queue.c in Sources */, - C01866AA1C5973210040FC07 /* firehose_buffer.c in Sources */, - C01866AB1C5973210040FC07 /* firehose.defs in Sources */, + C01866B21C5973210040FC07 /* object.c in Sources */, C01866AC1C5973210040FC07 /* block.cpp in Sources */, + 6EF2CAB21C8899EC001ABE83 /* lock.c in Sources */, C01866AD1C5973210040FC07 /* semaphore.c in Sources */, - C01866AE1C5973210040FC07 /* firehose_reply.defs in Sources */, C01866AF1C5973210040FC07 /* once.c in Sources */, - C01866B01C5973210040FC07 /* voucher.c in Sources */, + C01866A91C5973210040FC07 /* queue.c in Sources */, C01866B11C5973210040FC07 /* apply.c in Sources */, - C01866B21C5973210040FC07 /* object.c in Sources */, - C01866B31C5973210040FC07 /* benchmark.c in Sources */, C01866B41C5973210040FC07 /* source.c in Sources */, - C01866B51C5973210040FC07 /* time.c in Sources */, - C01866B61C5973210040FC07 /* data.c in Sources */, + 6E4BACC71D48A42300B562AE /* mach.c in Sources */, + 6EA9629D1D48622B00759D53 /* event.c in Sources */, + 6EA962A51D48625400759D53 /* event_kevent.c in Sources */, + 6E4BACFB1D49A04A00B562AE /* event_epoll.c in Sources */, + C01866B01C5973210040FC07 /* voucher.c in Sources */, + C01866AA1C5973210040FC07 /* firehose_buffer.c in Sources */, C01866B71C5973210040FC07 /* io.c in Sources */, + C01866B61C5973210040FC07 /* data.c in Sources */, C01866B81C5973210040FC07 /* transform.c in Sources */, + C01866B51C5973210040FC07 /* time.c in Sources */, C01866B91C5973210040FC07 /* allocator.c in Sources */, + C01866B31C5973210040FC07 /* benchmark.c in Sources */, + E49BB7091E70A39700868613 /* venture.c in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -1994,29 +2185,33 @@ files = ( E43570B9126E93380097AB9F /* provider.d in Sources */, FC7BEDA40E8361E600161930 /* protocol.defs in Sources */, - 6E9955CF1C3B218E0071D40C /* venture.c in Sources */, 6ED64B471BBD89AF00C35F4D /* firehose.defs in Sources */, - 6ED64B441BBD898700C35F4D /* firehose_buffer.c in Sources */, + 6ED64B491BBD89BC00C35F4D /* firehose_reply.defs in Sources */, E49F2499125D48D80057C971 /* resolver.c in Sources */, E44EBE3E1251659900645D88 /* init.c in Sources */, - FC7BED990E8361E600161930 /* queue.c in Sources */, - 721F5CCF0F15553500FF03A6 /* semaphore.c in Sources */, + 9661E56B0F3E7DDF00749F3E /* object.c in Sources */, + E4FC3264145F46C9002FBDDB /* object.m in Sources */, + E43A72501AF85BBC00BAA921 /* block.cpp in Sources */, 6EF2CAAC1C8899D5001ABE83 /* lock.c in Sources */, - 6ED64B491BBD89BC00C35F4D /* firehose_reply.defs in Sources */, + 721F5CCF0F15553500FF03A6 /* semaphore.c in Sources */, 96DF70BE0F38FE3C0074BD99 /* once.c in Sources */, + FC7BED990E8361E600161930 /* queue.c in Sources */, 9676A0E10F3E755D00713ADB /* apply.c in Sources */, - 9661E56B0F3E7DDF00749F3E /* object.c in Sources */, - 965CD6350F3E806200D4E28D /* benchmark.c in Sources */, 96A8AA870F41E7A400CD570B /* source.c in Sources */, - 96032E4B0F5CC8C700241C5F /* time.c in Sources */, - 5AAB45C010D30B79004407EA /* data.c in Sources */, + 6E4BACBD1D48A41500B562AE /* mach.c in Sources */, + 6EA962971D48622600759D53 /* event.c in Sources */, + 6EA9629F1D48625000759D53 /* event_kevent.c in Sources */, + 6E4BACF51D49A04600B562AE /* event_epoll.c in Sources */, + E44A8E6B1805C3E0009FFDB6 /* voucher.c in Sources */, + 6ED64B441BBD898700C35F4D /* firehose_buffer.c in Sources */, 5A27262610F26F1900751FBC /* io.c in Sources */, - E43A72501AF85BBC00BAA921 /* block.cpp in Sources */, + 5AAB45C010D30B79004407EA /* data.c in Sources */, + E420867016027AE500EEE210 /* data.m in Sources */, C9C5F80E143C1771006DC718 /* transform.c in Sources */, - E4FC3264145F46C9002FBDDB /* object.m in Sources */, + 96032E4B0F5CC8C700241C5F /* time.c in Sources */, 2BBF5A63154B64F5002B20F9 /* allocator.c in Sources */, - E420867016027AE500EEE210 /* data.m in Sources */, - E44A8E6B1805C3E0009FFDB6 /* voucher.c in Sources */, + 965CD6350F3E806200D4E28D /* benchmark.c in Sources */, + 6E9955CF1C3B218E0071D40C /* venture.c in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -2025,27 +2220,67 @@ buildActionMask = 2147483647; files = ( E46DBC4014EE10C80001F9F6 /* protocol.defs in Sources */, + 6EBEC7E71BBDD30F009B1596 /* firehose.defs in Sources */, + 6EBEC7EA1BBDD326009B1596 /* firehose_reply.defs in Sources */, E46DBC4114EE10C80001F9F6 /* resolver.c in Sources */, - 6EF2CAB11C8899EC001ABE83 /* lock.c in Sources */, E46DBC4214EE10C80001F9F6 /* init.c in Sources */, - E46DBC4314EE10C80001F9F6 /* queue.c in Sources */, - 6EE664271BE2FD5C00ED7B1C /* firehose_buffer.c in Sources */, - 6EBEC7E71BBDD30F009B1596 /* firehose.defs in Sources */, + E46DBC4714EE10C80001F9F6 /* object.c in Sources */, E43A72881AF85BE900BAA921 /* block.cpp in Sources */, + 6EF2CAB11C8899EC001ABE83 /* lock.c in Sources */, E46DBC4414EE10C80001F9F6 /* semaphore.c in Sources */, - 6E9956011C3B21980071D40C /* venture.c in Sources */, - 6EBEC7EA1BBDD326009B1596 /* firehose_reply.defs in Sources */, E46DBC4514EE10C80001F9F6 /* once.c in Sources */, - E44A8E701805C3E0009FFDB6 /* voucher.c in Sources */, + E46DBC4314EE10C80001F9F6 /* queue.c in Sources */, E46DBC4614EE10C80001F9F6 /* apply.c in Sources */, - E46DBC4714EE10C80001F9F6 /* object.c in Sources */, - E46DBC4814EE10C80001F9F6 /* benchmark.c in Sources */, E46DBC4914EE10C80001F9F6 /* source.c in Sources */, - E46DBC4A14EE10C80001F9F6 /* time.c in Sources */, - E46DBC4B14EE10C80001F9F6 /* data.c in Sources */, + 6E4BACC61D48A42300B562AE /* mach.c in Sources */, + 6EA9629C1D48622A00759D53 /* event.c in Sources */, + 6EA962A41D48625300759D53 /* event_kevent.c in Sources */, + 6E4BACFA1D49A04900B562AE /* event_epoll.c in Sources */, + E44A8E701805C3E0009FFDB6 /* voucher.c in Sources */, + 6EE664271BE2FD5C00ED7B1C /* firehose_buffer.c in Sources */, E46DBC4C14EE10C80001F9F6 /* io.c in Sources */, + E46DBC4B14EE10C80001F9F6 /* data.c in Sources */, E46DBC4D14EE10C80001F9F6 /* transform.c in Sources */, + E46DBC4A14EE10C80001F9F6 /* time.c in Sources */, 2BBF5A67154B64F5002B20F9 /* allocator.c in Sources */, + E46DBC4814EE10C80001F9F6 /* benchmark.c in Sources */, + 6E9956011C3B21980071D40C /* venture.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + E49BB6D01E70748100868613 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + E49BB6D11E70748100868613 /* provider.d in Sources */, + E49BB6D21E70748100868613 /* protocol.defs in Sources */, + E49BB6D41E70748100868613 /* firehose.defs in Sources */, + E49BB6DD1E70748100868613 /* firehose_reply.defs in Sources */, + E49BB6D71E70748100868613 /* resolver.c in Sources */, + E49BB6D91E70748100868613 /* init.c in Sources */, + E49BB6E01E70748100868613 /* object.c in Sources */, + E49BB6EA1E70748100868613 /* object.m in Sources */, + E49BB6E71E70748100868613 /* block.cpp in Sources */, + E49BB6DC1E70748100868613 /* lock.c in Sources */, + E49BB6DB1E70748100868613 /* semaphore.c in Sources */, + E49BB6DE1E70748100868613 /* once.c in Sources */, + E49BB6D81E70748100868613 /* mach.c in Sources */, + E49BB6DA1E70748100868613 /* queue.c in Sources */, + E49BB6DF1E70748100868613 /* apply.c in Sources */, + E49BB6E31E70748100868613 /* source.c in Sources */, + E49BB6E81E70748100868613 /* event.c in Sources */, + E49BB6D61E70748100868613 /* event_kevent.c in Sources */, + E49BB6E21E70748100868613 /* event_epoll.c in Sources */, + E49BB6ED1E70748100868613 /* voucher.c in Sources */, + E49BB6D51E70748100868613 /* firehose_buffer.c in Sources */, + E49BB6E61E70748100868613 /* io.c in Sources */, + E49BB6E51E70748100868613 /* data.c in Sources */, + E49BB6EC1E70748100868613 /* data.m in Sources */, + E49BB6E91E70748100868613 /* transform.c in Sources */, + E49BB6E41E70748100868613 /* time.c in Sources */, + E49BB6EB1E70748100868613 /* allocator.c in Sources */, + E49BB6E11E70748100868613 /* benchmark.c in Sources */, + E49BB6D31E70748100868613 /* venture.c in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -2055,29 +2290,33 @@ files = ( E43570BA126E93380097AB9F /* provider.d in Sources */, E49F24C8125D57FA0057C971 /* protocol.defs in Sources */, - 6E9956051C3B219B0071D40C /* venture.c in Sources */, 6ED64B461BBD89AF00C35F4D /* firehose.defs in Sources */, - 6ED64B401BBD898300C35F4D /* firehose_buffer.c in Sources */, + 6ED64B4A1BBD89BD00C35F4D /* firehose_reply.defs in Sources */, E49F24C9125D57FA0057C971 /* resolver.c in Sources */, E49F24CA125D57FA0057C971 /* init.c in Sources */, - E49F24CB125D57FA0057C971 /* queue.c in Sources */, - E49F24CC125D57FA0057C971 /* semaphore.c in Sources */, + E49F24CF125D57FA0057C971 /* object.c in Sources */, + E4FC3265145F46C9002FBDDB /* object.m in Sources */, + E43A72841AF85BCB00BAA921 /* block.cpp in Sources */, 6EF2CAAD1C8899E9001ABE83 /* lock.c in Sources */, - 6ED64B4A1BBD89BD00C35F4D /* firehose_reply.defs in Sources */, + E49F24CC125D57FA0057C971 /* semaphore.c in Sources */, E49F24CD125D57FA0057C971 /* once.c in Sources */, + E49F24CB125D57FA0057C971 /* queue.c in Sources */, E49F24CE125D57FA0057C971 /* apply.c in Sources */, - E49F24CF125D57FA0057C971 /* object.c in Sources */, - E49F24D0125D57FA0057C971 /* benchmark.c in Sources */, E49F24D1125D57FA0057C971 /* source.c in Sources */, - E49F24D2125D57FA0057C971 /* time.c in Sources */, - E49F24D3125D57FA0057C971 /* data.c in Sources */, + 6E4BACC21D48A42000B562AE /* mach.c in Sources */, + 6EA962981D48622700759D53 /* event.c in Sources */, + 6EA962A01D48625100759D53 /* event_kevent.c in Sources */, + 6E4BACF61D49A04700B562AE /* event_epoll.c in Sources */, + E44A8E6C1805C3E0009FFDB6 /* voucher.c in Sources */, + 6ED64B401BBD898300C35F4D /* firehose_buffer.c in Sources */, E49F24D4125D57FA0057C971 /* io.c in Sources */, - E43A72841AF85BCB00BAA921 /* block.cpp in Sources */, + E49F24D3125D57FA0057C971 /* data.c in Sources */, + E420867116027AE500EEE210 /* data.m in Sources */, C93D6165143E190E00EB9023 /* transform.c in Sources */, - E4FC3265145F46C9002FBDDB /* object.m in Sources */, + E49F24D2125D57FA0057C971 /* time.c in Sources */, 2BBF5A64154B64F5002B20F9 /* allocator.c in Sources */, - E420867116027AE500EEE210 /* data.m in Sources */, - E44A8E6C1805C3E0009FFDB6 /* voucher.c in Sources */, + E49F24D0125D57FA0057C971 /* benchmark.c in Sources */, + 6E9956051C3B219B0071D40C /* venture.c in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -2087,29 +2326,33 @@ files = ( E4B515BD164B2DA300E003AF /* provider.d in Sources */, E4B515BE164B2DA300E003AF /* protocol.defs in Sources */, - E4B515BF164B2DA300E003AF /* resolver.c in Sources */, - 6ED64B4B1BBD89BE00C35F4D /* firehose_reply.defs in Sources */, 6ED64B481BBD89B100C35F4D /* firehose.defs in Sources */, + 6ED64B4B1BBD89BE00C35F4D /* firehose_reply.defs in Sources */, + E4B515BF164B2DA300E003AF /* resolver.c in Sources */, E4B515C0164B2DA300E003AF /* init.c in Sources */, - E4B515C1164B2DA300E003AF /* queue.c in Sources */, - 6E9956021C3B21990071D40C /* venture.c in Sources */, + E4B515C5164B2DA300E003AF /* object.c in Sources */, + E4B515CC164B2DA300E003AF /* object.m in Sources */, + E43A72871AF85BCD00BAA921 /* block.cpp in Sources */, + 6EF2CAB01C8899EB001ABE83 /* lock.c in Sources */, E4B515C2164B2DA300E003AF /* semaphore.c in Sources */, E4B515C3164B2DA300E003AF /* once.c in Sources */, - E43A72871AF85BCD00BAA921 /* block.cpp in Sources */, + E4B515C1164B2DA300E003AF /* queue.c in Sources */, E4B515C4164B2DA300E003AF /* apply.c in Sources */, - E4B515C5164B2DA300E003AF /* object.c in Sources */, - 6ED64B431BBD898600C35F4D /* firehose_buffer.c in Sources */, - E4B515C6164B2DA300E003AF /* benchmark.c in Sources */, E4B515C7164B2DA300E003AF /* source.c in Sources */, - E4B515C8164B2DA300E003AF /* time.c in Sources */, - E4B515C9164B2DA300E003AF /* data.c in Sources */, - E4B515CA164B2DA300E003AF /* io.c in Sources */, + 6E4BACC51D48A42200B562AE /* mach.c in Sources */, + 6EA9629B1D48622900759D53 /* event.c in Sources */, + 6EA962A31D48625300759D53 /* event_kevent.c in Sources */, + 6E4BACF91D49A04800B562AE /* event_epoll.c in Sources */, E44A8E6F1805C3E0009FFDB6 /* voucher.c in Sources */, + 6ED64B431BBD898600C35F4D /* firehose_buffer.c in Sources */, + E4B515CA164B2DA300E003AF /* io.c in Sources */, + E4B515C9164B2DA300E003AF /* data.c in Sources */, + E4B515CE164B2DA300E003AF /* data.m in Sources */, E4B515CB164B2DA300E003AF /* transform.c in Sources */, - 6EF2CAB01C8899EB001ABE83 /* lock.c in Sources */, - E4B515CC164B2DA300E003AF /* object.m in Sources */, + E4B515C8164B2DA300E003AF /* time.c in Sources */, E4B515CD164B2DA300E003AF /* allocator.c in Sources */, - E4B515CE164B2DA300E003AF /* data.m in Sources */, + E4B515C6164B2DA300E003AF /* benchmark.c in Sources */, + 6E9956021C3B21990071D40C /* venture.c in Sources */, E4B515DD164B32E000E003AF /* introspection.c in Sources */, ); runOnlyForDeploymentPostprocessing = 0; @@ -2120,29 +2363,33 @@ files = ( E417A38412A472C4004D659D /* provider.d in Sources */, E44EBE5412517EBE00645D88 /* protocol.defs in Sources */, - 6E9956031C3B219A0071D40C /* venture.c in Sources */, 6EBEC7E61BBDD30D009B1596 /* firehose.defs in Sources */, - 6ED64B421BBD898500C35F4D /* firehose_buffer.c in Sources */, + 6EBEC7E91BBDD325009B1596 /* firehose_reply.defs in Sources */, E49F2424125D3C970057C971 /* resolver.c in Sources */, E44EBE5512517EBE00645D88 /* init.c in Sources */, - E4EC11AE12514302000DDBD1 /* queue.c in Sources */, - E4EC11AF12514302000DDBD1 /* semaphore.c in Sources */, + E4EC11B212514302000DDBD1 /* object.c in Sources */, + E4FC3266145F46C9002FBDDB /* object.m in Sources */, + E43A72861AF85BCC00BAA921 /* block.cpp in Sources */, 6EF2CAAF1C8899EB001ABE83 /* lock.c in Sources */, - 6EBEC7E91BBDD325009B1596 /* firehose_reply.defs in Sources */, + E4EC11AF12514302000DDBD1 /* semaphore.c in Sources */, E4EC11B012514302000DDBD1 /* once.c in Sources */, + E4EC11AE12514302000DDBD1 /* queue.c in Sources */, E4EC11B112514302000DDBD1 /* apply.c in Sources */, - E4EC11B212514302000DDBD1 /* object.c in Sources */, - E4EC11B312514302000DDBD1 /* benchmark.c in Sources */, E4EC11B412514302000DDBD1 /* source.c in Sources */, - E4EC11B512514302000DDBD1 /* time.c in Sources */, - E4EC11B712514302000DDBD1 /* data.c in Sources */, + 6E4BACC41D48A42200B562AE /* mach.c in Sources */, + 6EA9629A1D48622900759D53 /* event.c in Sources */, + 6EA962A21D48625200759D53 /* event_kevent.c in Sources */, + 6E4BACF81D49A04800B562AE /* event_epoll.c in Sources */, + E44A8E6E1805C3E0009FFDB6 /* voucher.c in Sources */, + 6ED64B421BBD898500C35F4D /* firehose_buffer.c in Sources */, E4EC11B812514302000DDBD1 /* io.c in Sources */, - E43A72861AF85BCC00BAA921 /* block.cpp in Sources */, + E4EC11B712514302000DDBD1 /* data.c in Sources */, + E420867316027AE500EEE210 /* data.m in Sources */, C93D6166143E190F00EB9023 /* transform.c in Sources */, - E4FC3266145F46C9002FBDDB /* object.m in Sources */, + E4EC11B512514302000DDBD1 /* time.c in Sources */, 2BBF5A65154B64F5002B20F9 /* allocator.c in Sources */, - E420867316027AE500EEE210 /* data.m in Sources */, - E44A8E6E1805C3E0009FFDB6 /* voucher.c in Sources */, + E4EC11B312514302000DDBD1 /* benchmark.c in Sources */, + 6E9956031C3B219A0071D40C /* venture.c in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -2152,29 +2399,33 @@ files = ( E417A38512A472C5004D659D /* provider.d in Sources */, E44EBE5612517EBE00645D88 /* protocol.defs in Sources */, - 6E9956041C3B219B0071D40C /* venture.c in Sources */, 6EBEC7E51BBDD30C009B1596 /* firehose.defs in Sources */, - 6ED64B411BBD898400C35F4D /* firehose_buffer.c in Sources */, + 6EBEC7E81BBDD324009B1596 /* firehose_reply.defs in Sources */, E49F2423125D3C960057C971 /* resolver.c in Sources */, E44EBE5712517EBE00645D88 /* init.c in Sources */, - E4EC121A12514715000DDBD1 /* queue.c in Sources */, - E4EC121B12514715000DDBD1 /* semaphore.c in Sources */, + E4EC121E12514715000DDBD1 /* object.c in Sources */, + E4FC3267145F46C9002FBDDB /* object.m in Sources */, + E43A72851AF85BCC00BAA921 /* block.cpp in Sources */, 6EF2CAAE1C8899EA001ABE83 /* lock.c in Sources */, - 6EBEC7E81BBDD324009B1596 /* firehose_reply.defs in Sources */, + E4EC121B12514715000DDBD1 /* semaphore.c in Sources */, E4EC121C12514715000DDBD1 /* once.c in Sources */, + E4EC121A12514715000DDBD1 /* queue.c in Sources */, E4EC121D12514715000DDBD1 /* apply.c in Sources */, - E4EC121E12514715000DDBD1 /* object.c in Sources */, - E4EC121F12514715000DDBD1 /* benchmark.c in Sources */, E4EC122012514715000DDBD1 /* source.c in Sources */, - E4EC122112514715000DDBD1 /* time.c in Sources */, - E4EC122312514715000DDBD1 /* data.c in Sources */, + 6E4BACC31D48A42100B562AE /* mach.c in Sources */, + 6EA962991D48622800759D53 /* event.c in Sources */, + 6EA962A11D48625100759D53 /* event_kevent.c in Sources */, + 6E4BACF71D49A04700B562AE /* event_epoll.c in Sources */, + E44A8E6D1805C3E0009FFDB6 /* voucher.c in Sources */, + 6ED64B411BBD898400C35F4D /* firehose_buffer.c in Sources */, E4EC122412514715000DDBD1 /* io.c in Sources */, - E43A72851AF85BCC00BAA921 /* block.cpp in Sources */, + E4EC122312514715000DDBD1 /* data.c in Sources */, + E420867216027AE500EEE210 /* data.m in Sources */, C93D6167143E190F00EB9023 /* transform.c in Sources */, - E4FC3267145F46C9002FBDDB /* object.m in Sources */, + E4EC122112514715000DDBD1 /* time.c in Sources */, 2BBF5A66154B64F5002B20F9 /* allocator.c in Sources */, - E420867216027AE500EEE210 /* data.m in Sources */, - E44A8E6D1805C3E0009FFDB6 /* voucher.c in Sources */, + E4EC121F12514715000DDBD1 /* benchmark.c in Sources */, + 6E9956041C3B219B0071D40C /* venture.c in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -2236,6 +2487,11 @@ target = E4EC121612514715000DDBD1 /* libdispatch mp resolved */; targetProxy = E47D6ECC125FEBA10070D91C /* PBXContainerItemProxy */; }; + E49BB6F81E7074C100868613 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = E49BB6CE1E70748100868613 /* libdispatch alt resolved */; + targetProxy = E49BB6F71E7074C100868613 /* PBXContainerItemProxy */; + }; E4B515DB164B317700E003AF /* PBXTargetDependency */ = { isa = PBXTargetDependency; target = E4B51595164B2DA300E003AF /* libdispatch introspection */; @@ -2356,7 +2612,6 @@ isa = XCBuildConfiguration; baseConfigurationReference = C00B0E121C5AEBF7000330B3 /* libdispatch-dyld-stub.xcconfig */; buildSettings = { - PRODUCT_NAME = "$(PRODUCT_NAME)"; }; name = Release; }; @@ -2364,7 +2619,6 @@ isa = XCBuildConfiguration; baseConfigurationReference = C00B0E121C5AEBF7000330B3 /* libdispatch-dyld-stub.xcconfig */; buildSettings = { - PRODUCT_NAME = "$(PRODUCT_NAME)"; }; name = Debug; }; @@ -2372,7 +2626,6 @@ isa = XCBuildConfiguration; baseConfigurationReference = C01866BE1C59735B0040FC07 /* libdispatch-mp-static.xcconfig */; buildSettings = { - PRODUCT_NAME = "$(PRODUCT_NAME)"; }; name = Release; }; @@ -2380,7 +2633,6 @@ isa = XCBuildConfiguration; baseConfigurationReference = C01866BE1C59735B0040FC07 /* libdispatch-mp-static.xcconfig */; buildSettings = { - PRODUCT_NAME = "$(PRODUCT_NAME)"; }; name = Debug; }; @@ -2410,15 +2662,40 @@ }; name = Debug; }; + E49BB6F01E70748100868613 /* Release */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = E40041A9125D70590022B135 /* libdispatch-resolved.xcconfig */; + buildSettings = { + DISPATCH_RESOLVED_VARIANT = alt; + }; + name = Release; + }; + E49BB6F11E70748100868613 /* Debug */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = E40041A9125D70590022B135 /* libdispatch-resolved.xcconfig */; + buildSettings = { + DISPATCH_RESOLVED_VARIANT = alt; + }; + name = Debug; + }; E49F24D9125D57FA0057C971 /* Release */ = { isa = XCBuildConfiguration; buildSettings = { + WARNING_CFLAGS = ( + "-Weverything", + "$(inherited)", + ); }; name = Release; }; E49F24DA125D57FA0057C971 /* Debug */ = { isa = XCBuildConfiguration; buildSettings = { + ONLY_ACTIVE_ARCH = YES; + WARNING_CFLAGS = ( + "-Weverything", + "$(inherited)", + ); }; name = Debug; }; @@ -2602,6 +2879,15 @@ defaultConfigurationIsVisible = 0; defaultConfigurationName = Release; }; + E49BB6EF1E70748100868613 /* Build configuration list for PBXNativeTarget "libdispatch alt resolved" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + E49BB6F01E70748100868613 /* Release */, + E49BB6F11E70748100868613 /* Debug */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; E49F24D8125D57FA0057C971 /* Build configuration list for PBXNativeTarget "libdispatch no resolver" */ = { isa = XCConfigurationList; buildConfigurations = ( diff --git a/m4/blocks.m4 b/m4/blocks.m4 index 49ee2a364..38a8610fc 100644 --- a/m4/blocks.m4 +++ b/m4/blocks.m4 @@ -10,6 +10,21 @@ AC_ARG_WITH([blocks-runtime], LIBS="$LIBS -L$blocks_runtime"] ) +# +# Configure argument to enable/disable using an embedded blocks runtime +# +AC_ARG_ENABLE([embedded_blocks_runtime], + [AS_HELP_STRING([--enable-embedded-blocks-runtime], + [Embed blocks runtime in libdispatch [default=yes on Linux, default=no on all other platforms]])],, + [case $target_os in + linux*) + enable_embedded_blocks_runtime=yes + ;; + *) + enable_embedded_blocks_runtime=no + esac] +) + # # Detect compiler support for Blocks; perhaps someday -fblocks won't be # required, in which case we'll need to change this. @@ -29,30 +44,32 @@ AC_CACHE_CHECK([for C Blocks support], [dispatch_cv_cblocks], [ AS_IF([test "x$dispatch_cv_cblocks" != "xno"], [ CBLOCKS_FLAGS="$dispatch_cv_cblocks" - # - # It may be necessary to directly link the Blocks runtime on some - # systems, so give it a try if we can't link a C program that uses - # Blocks. We will want to remove this at somepoint, as really -fblocks - # should force that linkage already. - # - saveCFLAGS="$CFLAGS" - CFLAGS="$CFLAGS -fblocks -O0" - AC_MSG_CHECKING([whether additional libraries are required for the Blocks runtime]) - AC_TRY_LINK([], [ - ^{ int j; j=0; }(); - ], [ - AC_MSG_RESULT([no]); - ], [ - saveLIBS="$LIBS" - LIBS="$LIBS -lBlocksRuntime" - AC_TRY_LINK([], [ - ^{ int k; k=0; }(); - ], [ - AC_MSG_RESULT([-lBlocksRuntime]) - ], [ - AC_MSG_ERROR([can't find Blocks runtime]) - ]) - ]) + AS_IF([test "x$enable_embedded_blocks_runtime" != "xyes"], [ + # + # It may be necessary to directly link the Blocks runtime on some + # systems, so give it a try if we can't link a C program that uses + # Blocks. We will want to remove this at somepoint, as really -fblocks + # should force that linkage already. + # + saveCFLAGS="$CFLAGS" + CFLAGS="$CFLAGS -fblocks -O0" + AC_MSG_CHECKING([whether additional libraries are required for the Blocks runtime]) + AC_TRY_LINK([], [ + ^{ int j; j=0; }(); + ], [ + AC_MSG_RESULT([no]); + ], [ + saveLIBS="$LIBS" + LIBS="$LIBS -lBlocksRuntime" + AC_TRY_LINK([], [ + ^{ int k; k=0; }(); + ], [ + AC_MSG_RESULT([-lBlocksRuntime]) + ], [ + AC_MSG_ERROR([can't find Blocks runtime]) + ]) + ]) + ]) CFLAGS="$saveCFLAGS" have_cblocks=true ], [ @@ -61,6 +78,7 @@ AS_IF([test "x$dispatch_cv_cblocks" != "xno"], [ ]) AM_CONDITIONAL(HAVE_CBLOCKS, $have_cblocks) AC_SUBST([CBLOCKS_FLAGS]) +AM_CONDITIONAL([BUILD_OWN_BLOCKS_RUNTIME], [test "x$enable_embedded_blocks_runtime" = "xyes"]) # # Because a different C++ compiler may be specified than C compiler, we have @@ -82,24 +100,26 @@ AC_CACHE_CHECK([for C++ Blocks support], [dispatch_cv_cxxblocks], [ AS_IF([test "x$dispatch_cv_cxxblocks" != "xno"], [ CXXBLOCKS_FLAGS="$dispatch_cv_cxxblocks" - saveCXXFLAGS="$CXXFLAGS" - CXXFLAGS="$CXXFLAGS -fblocks -O0" - AC_MSG_CHECKING([whether additional libraries are required for the Blocks runtime]) - AC_TRY_LINK([], [ - ^{ int j; j=0; }(); - ], [ - AC_MSG_RESULT([no]); - ], [ - saveLIBS="$LIBS" - LIBS="$LIBS -lBlocksRuntime" - AC_TRY_LINK([], [ - ^{ int k; k=0; }(); - ], [ - AC_MSG_RESULT([-lBlocksRuntime]) - ], [ - AC_MSG_ERROR([can't find Blocks runtime]) - ]) - ]) + AS_IF([test "x$enable_embedded_blocks_runtime" != "xyes"], [ + saveCXXFLAGS="$CXXFLAGS" + CXXFLAGS="$CXXFLAGS -fblocks -O0" + AC_MSG_CHECKING([whether additional libraries are required for the Blocks runtime]) + AC_TRY_LINK([], [ + ^{ int j; j=0; }(); + ], [ + AC_MSG_RESULT([no]); + ], [ + saveLIBS="$LIBS" + LIBS="$LIBS -lBlocksRuntime" + AC_TRY_LINK([], [ + ^{ int k; k=0; }(); + ], [ + AC_MSG_RESULT([-lBlocksRuntime]) + ], [ + AC_MSG_ERROR([can't find Blocks runtime]) + ]) + ]) + ]) CXXFLAGS="$saveCXXFLAGS" have_cxxblocks=true ], [ diff --git a/man/CMakeLists.txt b/man/CMakeLists.txt new file mode 100644 index 000000000..e81b14b41 --- /dev/null +++ b/man/CMakeLists.txt @@ -0,0 +1,23 @@ + +# TODO(compnerd) add symlinks +if(NOT ENABLE_SWIFT) + install(FILES + dispatch.3 + dispatch_after.3 + dispatch_api.3 + dispatch_apply.3 + dispatch_async.3 + dispatch_data_create.3 + dispatch_group_create.3 + dispatch_io_create.3 + dispatch_io_read.3 + dispatch_object.3 + dispatch_once.3 + dispatch_queue_create.3 + dispatch_read.3 + dispatch_semaphore_create.3 + dispatch_source_create.3 + dispatch_time.3 + DESTINATION + "${CMAKE_INSTALL_FULL_MANDIR}/man3") +endif() diff --git a/man/dispatch_apply.3 b/man/dispatch_apply.3 index 5a43a0a13..57c99a8a7 100644 --- a/man/dispatch_apply.3 +++ b/man/dispatch_apply.3 @@ -1,4 +1,4 @@ -.\" Copyright (c) 2008-2010 Apple Inc. All rights reserved. +.\" Copyright (c) 2008-2017 Apple Inc. All rights reserved. .Dd May 1, 2009 .Dt dispatch_apply 3 .Os Darwin @@ -20,21 +20,32 @@ The .Fn dispatch_apply function provides data-level concurrency through a "for (;;)" loop like primitive: .Bd -literal -dispatch_queue_t the_queue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); size_t iterations = 10; // 'idx' is zero indexed, just like: // for (idx = 0; idx < iterations; idx++) -dispatch_apply(iterations, the_queue, ^(size_t idx) { +dispatch_apply(iterations, DISPATCH_APPLY_AUTO, ^(size_t idx) { printf("%zu\\n", idx); }); .Ed .Pp +Although any queue can be used, it is strongly recommended to use +.Vt DISPATCH_APPLY_AUTO +as the +.Vt queue +argument to both +.Fn dispatch_apply +and +.Fn dispatch_apply_f , +as shown in the example above, since this allows the system to automatically use worker threads +that match the configuration of the current thread as closely as possible. +No assumptions should be made about which global concurrent queue will be used. +.Pp Like a "for (;;)" loop, the .Fn dispatch_apply function is synchronous. -If asynchronous behavior is desired, please wrap the call to +If asynchronous behavior is desired, wrap the call to .Fn dispatch_apply with a call to .Fn dispatch_async @@ -49,7 +60,7 @@ achieved (perhaps using a power of two search): .Bd -literal #define STRIDE 3 -dispatch_apply(count / STRIDE, queue, ^(size_t idx) { +dispatch_apply(count / STRIDE, DISPATCH_APPLY_AUTO, ^(size_t idx) { size_t j = idx * STRIDE; size_t j_stop = j + STRIDE; do { @@ -74,12 +85,21 @@ This is in contrast to asynchronous functions which must retain both the block and target queue for the duration of the asynchronous operation (as the calling function may immediately release its interest in these objects). .Sh FUNDAMENTALS -Conceptually, .Fn dispatch_apply -is a convenient wrapper around +and +.Fn dispatch_apply_f +attempt to quickly create enough worker threads to efficiently iterate work in parallel. +By contrast, a loop that passes work items individually to .Fn dispatch_async -and a semaphore to wait for completion. -In practice, the dispatch library optimizes this function. +or +.Fn dispatch_async_f +will incur more overhead and does not express the desired parallel execution semantics to +the system, so may not create an optimal number of worker threads for a parallel workload. +For this reason, prefer to use +.Fn dispatch_apply +or +.Fn dispatch_apply_f +when parallel execution is important. .Pp The .Fn dispatch_apply @@ -99,5 +119,4 @@ use a for-loop around invocations of .Sh SEE ALSO .Xr dispatch 3 , .Xr dispatch_async 3 , -.Xr dispatch_queue_create 3 , -.Xr dispatch_semaphore_create 3 +.Xr dispatch_queue_create 3 diff --git a/man/dispatch_object.3 b/man/dispatch_object.3 index 95ba1c348..cddcf32aa 100644 --- a/man/dispatch_object.3 +++ b/man/dispatch_object.3 @@ -23,6 +23,10 @@ .Fo dispatch_resume .Fa "dispatch_object_t object" .Fc +.Ft void +.Fo dispatch_activate +.Fa "dispatch_object_t object" +.Fc .Ft "void *" .Fo dispatch_get_context .Fa "dispatch_object_t object" @@ -40,7 +44,7 @@ .Sh DESCRIPTION Dispatch objects share functions for coordinating memory management, suspension, cancellation and context pointers. -.Sh MEMORY MANGEMENT +.Sh MEMORY MANAGEMENT Objects returned by creation functions in the dispatch framework may be uniformly retained and released with the functions .Fn dispatch_retain @@ -123,6 +127,17 @@ dispatch_async(queue, ^{ dispatch_release(object); }); .Ed +.Sh ACTIVATION +Dispatch objects such as queues and sources may be created in an inactive +state. Objects in this state must be activated before any blocks +associated with them will be invoked. Calling +.Fn dispatch_activate +on an active object has no effect. +.Pp +Changing attributes such as the target queue or a source handler is no longer permitted +once the object has been activated (see +.Xr dispatch_set_target_queue 3 , +.Xr dispatch_source_set_event_handler 3 ). .Sh SUSPENSION The invocation of blocks on dispatch queues or dispatch sources may be suspended or resumed with the functions @@ -148,7 +163,7 @@ and .Fn dispatch_resume such that the dispatch object is fully resumed when the last reference is released. The result of releasing all references to a dispatch object while in -a suspended state is undefined. +an inactive or suspended state is undefined. .Sh CONTEXT POINTERS Dispatch objects support supplemental context pointers. The value of the context pointer may be retrieved and updated with diff --git a/man/dispatch_queue_create.3 b/man/dispatch_queue_create.3 index f3c305145..833e564a0 100644 --- a/man/dispatch_queue_create.3 +++ b/man/dispatch_queue_create.3 @@ -72,7 +72,8 @@ debugging and performance analysis. If a label is provided, it is copied. By convention, clients should pass a reverse DNS style label. For example: .Pp .Bd -literal -offset indent -my_queue = dispatch_queue_create("com.example.subsystem.taskXYZ", NULL); +my_queue = dispatch_queue_create("com.example.subsystem.taskXYZ", + DISPATCH_QUEUE_SERIAL); .Ed .Pp The diff --git a/man/dispatch_semaphore_create.3 b/man/dispatch_semaphore_create.3 index 81c291546..da263658a 100644 --- a/man/dispatch_semaphore_create.3 +++ b/man/dispatch_semaphore_create.3 @@ -23,6 +23,13 @@ .Fc .Sh DESCRIPTION Dispatch semaphores are used to synchronize threads. +.Pp +The +.Fn dispatch_semaphore_wait +function decrements the semaphore. If the resulting value is less than zero, +it waits for a signal from a thread that increments the semaphore by calling +.Fn dispatch_semaphore_signal +before returning. The .Fa timeout parameter is creatable with the @@ -30,6 +37,13 @@ parameter is creatable with the or .Xr dispatch_walltime 3 functions. +.Pp +The +.Fn dispatch_semaphore_signal +function increments the counting semaphore. If the previous value was less than zero, +it wakes one of the threads that are waiting in +.Fn dispatch_semaphore_wait +before returning. .Sh COMPLETION SYNCHRONIZATION If the .Fa count diff --git a/man/dispatch_source_create.3 b/man/dispatch_source_create.3 index 4da708cfb..b4e9a7ad8 100644 --- a/man/dispatch_source_create.3 +++ b/man/dispatch_source_create.3 @@ -113,6 +113,8 @@ DISPATCH_SOURCE_TYPE_DATA_ADD .It DISPATCH_SOURCE_TYPE_DATA_OR .It +DISPATCH_SOURCE_TYPE_DATA_REPLACE +.It DISPATCH_SOURCE_TYPE_MACH_SEND .It DISPATCH_SOURCE_TYPE_MACH_RECV @@ -168,12 +170,34 @@ The result of calling this function from any other context is undefined. The .Fn dispatch_source_merge_data function is intended for use with the -.Vt DISPATCH_SOURCE_TYPE_DATA_ADD -and +.Vt DISPATCH_SOURCE_TYPE_DATA_ADD , .Vt DISPATCH_SOURCE_TYPE_DATA_OR +and +.Vt DISPATCH_SOURCE_TYPE_DATA_REPLACE source types. The result of using this function with any other source type is -undefined. Calling this function will atomically add or bitwise OR the data -into the source's data, and trigger the delivery of the source's event handler. +undefined. Data merging is performed according to the source type: +.Bl -tag -width "XXDISPATCH_SOURCE_TYPE_DATA_REPLACE" -compact -offset indent +.It \(bu DISPATCH_SOURCE_TYPE_DATA_ADD +.Vt data +is atomically added to the source's data +.It \(bu DISPATCH_SOURCE_TYPE_DATA_OR +.Vt data +is atomically bitwise ORed into the source's data +.It \(bu DISPATCH_SOURCE_TYPE_DATA_REPLACE +.Vt data +atomically replaces the source's data. +.El +.Pp +If the source data value resulting from the merge operation is 0, the source +handler will not be invoked. This can happen if: +.Bl -bullet -compact -offset indent +.It +the atomic addition wraps for sources of type +.Vt DISPATCH_SOURCE_TYPE_DATA_ADD , +.It +0 is merged for sources of type +.Vt DISPATCH_SOURCE_TYPE_DATA_REPLACE . +.El .Pp .Sh SOURCE EVENT HANDLERS In order to receive events from the dispatch source, an event handler should be @@ -265,14 +289,15 @@ The following section contains a summary of supported dispatch event types and the interpretation of their parameters and returned data. .Pp .Vt DISPATCH_SOURCE_TYPE_DATA_ADD , -.Vt DISPATCH_SOURCE_TYPE_DATA_OR +.Vt DISPATCH_SOURCE_TYPE_DATA_OR , +.Vt DISPATCH_SOURCE_TYPE_DATA_REPLACE .Pp Sources of this type allow applications to manually trigger the source's event handler via a call to .Fn dispatch_source_merge_data . The data will be merged with the source's pending data via an atomic add or -atomic bitwise OR (based on the source's type), and the event handler block will -be submitted to the source's target queue. The +atomic bitwise OR, or direct replacement (based on the source's type), and the +event handler block will be submitted to the source's target queue. The .Fa data is application defined. These sources have no .Fa handle @@ -295,7 +320,7 @@ The port's corresponding receive right has been destroyed .Pp The data returned by .Fn dispatch_source_get_data -indicates which of the events in the +is a bitmask that indicates which of the events in the .Fa mask were observed. Note that because this source type will request notifications on the provided port, it should not be mixed with the use of @@ -372,7 +397,7 @@ A signal was delivered to the process. .Pp The data returned by .Fn dispatch_source_get_data -indicates which of the events in the +is a bitmask that indicates which of the events in the .Fa mask were observed. .Pp @@ -499,19 +524,6 @@ was created with the timer is based on .Xr gettimeofday 3 . .Pp -.Em Note : -Under the C language, untyped numbers default to the -.Vt int -type. This can lead to truncation bugs when arithmetic operations with other -numbers are expected to generate a -.Vt uint64_t -sized result. When in doubt, use -.Vt ull -as a suffix. For example: -.Bd -literal -offset indent -3ull * NSEC_PER_SEC -.Ed -.Pp .Vt DISPATCH_SOURCE_TYPE_VNODE .Pp Sources of this type monitor the virtual filesystem nodes for state changes. @@ -548,7 +560,7 @@ or .Pp The data returned by .Fn dispatch_source_get_data -indicates which of the events in the +is a bitmask that indicates which of the events in the .Fa mask were observed. .Pp diff --git a/man/dispatch_time.3 b/man/dispatch_time.3 index 4b4f9d863..685898de0 100644 --- a/man/dispatch_time.3 +++ b/man/dispatch_time.3 @@ -80,28 +80,10 @@ parameter is ignored. .Pp Underflow causes the smallest representable value to be returned for a given clock. -.Sh CAVEATS -Under the C language, untyped numbers default to the -.Vt int -type. This can lead to truncation bugs when arithmetic operations with other -numbers are expected to generate a -.Vt int64_t -sized result, such as the -.Fa offset -argument to -.Fn dispatch_time -and -.Fn dispatch_walltime . -When in doubt, use -.Vt ull -as a suffix. For example: -.Bd -literal -offset indent -3ull * NSEC_PER_SEC -.Ed .Sh EXAMPLES Create a milestone two seconds in the future: .Bd -literal -offset indent -milestone = dispatch_time(DISPATCH_TIME_NOW, 2LL * NSEC_PER_SEC); +milestone = dispatch_time(DISPATCH_TIME_NOW, 2 * NSEC_PER_SEC); .Ed .Pp Create a milestone for use as an infinite timeout: @@ -116,6 +98,11 @@ ts.tv_sec = 0x7FFFFFFF; ts.tv_nsec = 0; milestone = dispatch_walltime(&ts, 0); .Ed +.Pp +Use a negative delta to create a milestone an hour before the one above: +.Bd -literal -offset indent +milestone = dispatch_walltime(&ts, -60 * 60 * NSEC_PER_SEC); +.Ed .Sh RETURN VALUE These functions return an abstract value for use with .Fn dispatch_after , diff --git a/os/CMakeLists.txt b/os/CMakeLists.txt new file mode 100644 index 000000000..6e2b41518 --- /dev/null +++ b/os/CMakeLists.txt @@ -0,0 +1,10 @@ + +# TODO(compnerd) ensure that object_private.h voucher_activity_private.h +# voucher_private.h are included in the source tarball + +install(FILES + object.h + linux_base.h + DESTINATION + "${CMAKE_INSTALL_FULL_INCLUDEDIR}/os") + diff --git a/os/firehose_buffer_private.h b/os/firehose_buffer_private.h index 2c6466f94..d131d6dc4 100644 --- a/os/firehose_buffer_private.h +++ b/os/firehose_buffer_private.h @@ -26,11 +26,12 @@ #include #else #include +#include #include #include #endif -#define OS_FIREHOSE_SPI_VERSION 20160318 +#define OS_FIREHOSE_SPI_VERSION 20170222 /*! * @group Firehose SPI @@ -38,39 +39,9 @@ * Layout of structs is subject to change without notice */ -#define FIREHOSE_BUFFER_CHUNK_SIZE 4096ul #define FIREHOSE_BUFFER_LIBTRACE_HEADER_SIZE 2048ul #define FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT 16 -typedef union { - uint64_t fbc_atomic_pos; -#define FIREHOSE_BUFFER_POS_ENTRY_OFFS_INC (1ULL << 0) -#define FIREHOSE_BUFFER_POS_PRIVATE_OFFS_INC (1ULL << 16) -#define FIREHOSE_BUFFER_POS_REFCNT_INC (1ULL << 32) -#define FIREHOSE_BUFFER_POS_FULL_BIT (1ULL << 56) -#define FIREHOSE_BUFFER_POS_USABLE_FOR_STREAM(pos, stream) \ - ((((pos).fbc_atomic_pos >> 48) & 0x1ff) == (uint16_t)stream) - struct { - uint16_t fbc_next_entry_offs; - uint16_t fbc_private_offs; - uint8_t fbc_refcnt; - uint8_t fbc_qos_bits; - uint8_t fbc_stream; - uint8_t fbc_flag_full : 1; - uint8_t fbc_flag_io : 1; - uint8_t _fbc_flag_unused : 6; - }; -} firehose_buffer_pos_u; - -typedef struct firehose_buffer_chunk_s { - uint8_t fbc_start[0]; - firehose_buffer_pos_u volatile fbc_pos; - uint64_t fbc_timestamp; - uint8_t fbc_data[FIREHOSE_BUFFER_CHUNK_SIZE - - sizeof(firehose_buffer_pos_u) - - sizeof(uint64_t)]; -} __attribute__((aligned(8))) *firehose_buffer_chunk_t; - typedef struct firehose_buffer_range_s { uint16_t fbr_offset; // offset from the start of the buffer uint16_t fbr_length; @@ -78,6 +49,8 @@ typedef struct firehose_buffer_range_s { #ifdef KERNEL +typedef struct firehose_chunk_s *firehose_chunk_t; + // implemented by the kernel extern void __firehose_buffer_push_to_logd(firehose_buffer_t fb, bool for_io); extern void __firehose_critical_region_enter(void); @@ -89,19 +62,10 @@ firehose_tracepoint_t __firehose_buffer_tracepoint_reserve(uint64_t stamp, firehose_stream_t stream, uint16_t pubsize, uint16_t privsize, uint8_t **privptr); -firehose_tracepoint_t -__firehose_buffer_tracepoint_reserve_with_chunk(firehose_buffer_chunk_t fbc, - uint64_t stamp, firehose_stream_t stream, - uint16_t pubsize, uint16_t privsize, uint8_t **privptr); - void __firehose_buffer_tracepoint_flush(firehose_tracepoint_t vat, firehose_tracepoint_id_u vatid); -void -__firehose_buffer_tracepoint_flush_chunk(firehose_buffer_chunk_t fbc, - firehose_tracepoint_t vat, firehose_tracepoint_id_u vatid); - firehose_buffer_t __firehose_buffer_create(size_t *size); @@ -118,13 +82,12 @@ const uint32_t _firehose_spi_version; OS_ALWAYS_INLINE static inline const uint8_t * -_firehose_tracepoint_reader_init(firehose_buffer_chunk_t fbc, - const uint8_t **endptr) +_firehose_tracepoint_reader_init(firehose_chunk_t fc, const uint8_t **endptr) { - const uint8_t *start = fbc->fbc_data; - const uint8_t *end = fbc->fbc_start + fbc->fbc_pos.fbc_next_entry_offs; + const uint8_t *start = fc->fc_data; + const uint8_t *end = fc->fc_start + fc->fc_pos.fcp_next_entry_offs; - if (end > fbc->fbc_start + FIREHOSE_BUFFER_CHUNK_SIZE) { + if (end > fc->fc_start + FIREHOSE_CHUNK_SIZE) { end = start; } *endptr = end; @@ -136,27 +99,29 @@ static inline firehose_tracepoint_t _firehose_tracepoint_reader_next(const uint8_t **ptr, const uint8_t *end) { const uint16_t ft_size = offsetof(struct firehose_tracepoint_s, ft_data); - firehose_tracepoint_t ft; + struct ft_unaligned_s { + struct firehose_tracepoint_s ft; + } __attribute__((packed, aligned(1))) *uft; do { - ft = (firehose_tracepoint_t)*ptr; - if (ft->ft_data >= end) { + uft = (struct ft_unaligned_s *)*ptr; + if (uft->ft.ft_data >= end) { // reached the end return NULL; } - if (!ft->ft_length) { + if (!uft->ft.ft_length) { // tracepoint write didn't even start return NULL; } - if (ft->ft_length > end - ft->ft_data) { + if (uft->ft.ft_length > end - uft->ft.ft_data) { // invalid length return NULL; } - *ptr += roundup(ft_size + ft->ft_length, 8); + *ptr += roundup(ft_size + uft->ft.ft_length, 8); // test whether write of the tracepoint was finished - } while (os_unlikely(ft->ft_id.ftid_value == 0)); + } while (os_unlikely(uft->ft.ft_id.ftid_value == 0)); - return ft; + return (firehose_tracepoint_t)uft; } #define firehose_tracepoint_foreach(ft, fbc) \ @@ -165,13 +130,13 @@ _firehose_tracepoint_reader_next(const uint8_t **ptr, const uint8_t *end) OS_ALWAYS_INLINE static inline bool -firehose_buffer_range_validate(firehose_buffer_chunk_t fbc, - firehose_tracepoint_t ft, firehose_buffer_range_t range) +firehose_buffer_range_validate(firehose_chunk_t fc, firehose_tracepoint_t ft, + firehose_buffer_range_t range) { - if (range->fbr_offset + range->fbr_length > FIREHOSE_BUFFER_CHUNK_SIZE) { + if (range->fbr_offset + range->fbr_length > FIREHOSE_CHUNK_SIZE) { return false; } - if (fbc->fbc_start + range->fbr_offset < ft->ft_data + ft->ft_length) { + if (fc->fc_start + range->fbr_offset < ft->ft_data + ft->ft_length) { return false; } return true; diff --git a/os/firehose_server_private.h b/os/firehose_server_private.h index 4bff8abc1..fc352da1c 100644 --- a/os/firehose_server_private.h +++ b/os/firehose_server_private.h @@ -139,6 +139,32 @@ OS_NOTHROW OS_NONNULL1 uint64_t firehose_client_get_unique_pid(firehose_client_t client, pid_t *pid); +/*! + * @function firehose_client_get_pid_version + * + * @abstract + * Returns the pid version for that client. + * + * @param client + * The specified client. + */ +OS_NOTHROW OS_NONNULL1 +int +firehose_client_get_pid_version(firehose_client_t client); + +/*! + * @function firehose_client_get_euid + * + * @abstract + * Returns the EUID for that client as discovered at connect time. + * + * @param client + * The specified client. + */ +OS_NOTHROW OS_NONNULL1 +uid_t +firehose_client_get_euid(firehose_client_t client); + /*! * @function firehose_client_get_metadata_buffer * @@ -201,6 +227,23 @@ OS_NOTHROW OS_NONNULL1 void * firehose_client_set_context(firehose_client_t client, void *ctxt); +/*! + * @function firehose_client_initiate_quarantine + * + * @abstract + * Starts the procedure to move the given client to the high volume quarantine + * + * @discussion + * When the client is in the high volume quarantine, their firehose chunks + * have the fcp_quarantined bit set to 1. + * + * @param client + * The specified client. + */ +OS_NOTHROW OS_NONNULL1 +void +firehose_client_initiate_quarantine(firehose_client_t client); + /*! * @function firehose_client_metadata_stream_peek * @@ -235,7 +278,7 @@ OS_NOTHROW OS_NONNULL1 OS_NONNULL4 void firehose_client_metadata_stream_peek(firehose_client_t client, firehose_event_t context, OS_NOESCAPE bool (^peek_should_start)(void), - OS_NOESCAPE bool (^peek)(firehose_buffer_chunk_t fbc)); + OS_NOESCAPE bool (^peek)(firehose_chunk_t fbc)); #pragma mark - Firehose Server @@ -246,7 +289,7 @@ firehose_client_metadata_stream_peek(firehose_client_t client, * Type of the handler block for firehose_server_init() */ typedef void (^firehose_handler_t)(firehose_client_t client, - firehose_event_t event, firehose_buffer_chunk_t page); + firehose_event_t event, firehose_chunk_t page); /*! * @function firehose_server_init @@ -276,6 +319,20 @@ OS_NOTHROW void firehose_server_assert_spi_version(uint32_t spi_version); +/*! + * @function firehose_server_has_ever_flushed_pages + * + * @abstract + * Checks whether the firehose server has ever flushed any pages this boot. + * + * @discussion + * Must be called after firehose_server_init() and before calling + * firehose_server_resume(). + */ +OS_NOTHROW +bool +firehose_server_has_ever_flushed_pages(void); + /*! * @function firehose_server_resume * @@ -289,11 +346,72 @@ OS_NOTHROW void firehose_server_resume(void); +/*! + * @function firehose_server_cancel + * + * @abstract + * Cancels the server, disconnects all clients, and prevents new connections. + */ +OS_NOTHROW +void +firehose_server_cancel(void); + +/*! + * @typedef firehose_server_queue_t + * + * @abstract + * Values to pass to firehose_server_get_queue() + */ +OS_ENUM(firehose_server_queue, unsigned long, + FIREHOSE_SERVER_QUEUE_UNKNOWN, + FIREHOSE_SERVER_QUEUE_IO, + FIREHOSE_SERVER_QUEUE_MEMORY, +); + +/*! + * @function firehose_server_copy_queue + * + * @abstract + * Returns internal queues to the firehose server subsystem. + */ +OS_NOTHROW OS_OBJECT_RETURNS_RETAINED +dispatch_queue_t +firehose_server_copy_queue(firehose_server_queue_t which); + +/*! + * @function firehose_server_quarantined_suspend + * + * @abstract + * Suspends processing of quarantined clients until + * firehose_server_quarantined_resume() is called for the same queue. + * + * @discussion + * Suspending processing of quarantined clients causes firehose_snapshot() + * to block until the processing is enabled again. + * + * However if this is used to pace the processing, it is a good idea to disable + * this pacing until the snapshot has completed. + * + * Similarly, quarantine suspension must be off during shutdown. + */ +OS_NOTHROW +void +firehose_server_quarantined_suspend(firehose_server_queue_t q); + +/*! + * @function firehose_server_quarantined_resume + * + * @abstract + * Resumes processing of quarantined clients. + */ +OS_NOTHROW +void +firehose_server_quarantined_resume(firehose_server_queue_t q); + #pragma mark - Firehose Snapshot /*! * @typedef firehose_snapshot_event - * */ OS_ENUM(firehose_snapshot_event, unsigned long, FIREHOSE_SNAPSHOT_EVENT_IO_START = 1, @@ -310,7 +428,7 @@ OS_ENUM(firehose_snapshot_event, unsigned long, * Type of the handler block for firehose_snapshot */ typedef void (^firehose_snapshot_handler_t)(firehose_client_t client, - firehose_snapshot_event_t event, firehose_buffer_chunk_t page); + firehose_snapshot_event_t event, firehose_chunk_t page); /*! * @function firehose_snapshot diff --git a/os/linux_base.h b/os/linux_base.h index 96a3c825b..c8b9cad7c 100644 --- a/os/linux_base.h +++ b/os/linux_base.h @@ -13,13 +13,31 @@ #ifndef __OS_LINUX_BASE__ #define __OS_LINUX_BASE__ -#include #include +#if HAVE_SYS_CDEFS_H +#include +#endif + +#ifndef API_AVAILABLE +#define API_AVAILABLE(...) +#endif +#ifndef API_DEPRECATED +#define API_DEPRECATED(...) +#endif +#ifndef API_UNAVAILABLE +#define API_UNAVAILABLE(...) +#endif +#ifndef API_DEPRECATED_WITH_REPLACEMENT +#define API_DEPRECATED_WITH_REPLACEMENT(...) +#endif + #if __GNUC__ #define OS_EXPECT(x, v) __builtin_expect((x), (v)) +#define OS_UNUSED __attribute__((__unused__)) #else #define OS_EXPECT(x, v) (x) +#define OS_UNUSED #endif #ifndef os_likely @@ -68,6 +86,14 @@ #define __OS_CONCAT(x, y) x ## y #define OS_CONCAT(x, y) __OS_CONCAT(x, y) +#if __has_feature(objc_fixed_enum) || __has_extension(cxx_strong_enums) +#define OS_ENUM(_name, _type, ...) \ +typedef enum : _type { __VA_ARGS__ } _name##_t +#else +#define OS_ENUM(_name, _type, ...) \ +enum { __VA_ARGS__ }; typedef _type _name##_t +#endif + /* * Stub out misc linking and compilation attributes */ diff --git a/os/object.h b/os/object.h index f3faa62fd..100721fc0 100644 --- a/os/object.h +++ b/os/object.h @@ -23,11 +23,10 @@ #ifdef __APPLE__ #include +#include #include -#endif -#ifndef __linux__ #include -#else +#elif defined(__linux__) #include #endif @@ -75,6 +74,9 @@ #endif // OS_OBJECT_HAVE_OBJC_SUPPORT #if OS_OBJECT_HAVE_OBJC_SUPPORT +#if defined(__swift__) && __swift__ && !OS_OBJECT_USE_OBJC +#define OS_OBJECT_USE_OBJC 1 +#endif #ifndef OS_OBJECT_USE_OBJC #define OS_OBJECT_USE_OBJC 1 #endif @@ -232,7 +234,7 @@ __BEGIN_DECLS * @result * The retained object. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) OS_EXPORT OS_SWIFT_UNAVAILABLE("Can't be used with ARC") void* os_retain(void *object); @@ -254,7 +256,7 @@ os_retain(void *object); * @param object * The object to release. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) OS_EXPORT void OS_SWIFT_UNAVAILABLE("Can't be used with ARC") os_release(void *object); diff --git a/os/object_private.h b/os/object_private.h index dc2af8345..215c3d146 100644 --- a/os/object_private.h +++ b/os/object_private.h @@ -27,20 +27,18 @@ #ifndef __OS_OBJECT_PRIVATE__ #define __OS_OBJECT_PRIVATE__ -#include -#include #include - -#ifndef __OSX_AVAILABLE_STARTING -#define __OSX_AVAILABLE_STARTING(x, y) -#endif +#include +#include #if __GNUC__ #define OS_OBJECT_NOTHROW __attribute__((__nothrow__)) #define OS_OBJECT_NONNULL __attribute__((__nonnull__)) #define OS_OBJECT_WARN_RESULT __attribute__((__warn_unused_result__)) #define OS_OBJECT_MALLOC __attribute__((__malloc__)) +#ifndef OS_OBJECT_EXPORT #define OS_OBJECT_EXPORT extern __attribute__((visibility("default"))) +#endif #else /*! @parseOnly */ #define OS_OBJECT_NOTHROW @@ -50,8 +48,11 @@ #define OS_OBJECT_WARN_RESULT /*! @parseOnly */ #define OS_OBJECT_MALLOC +#ifndef OS_OBJECT_EXPORT +/*! @parseOnly */ #define OS_OBJECT_EXPORT extern #endif +#endif #if OS_OBJECT_USE_OBJC && __has_feature(objc_arc) #define _OS_OBJECT_OBJC_ARC 1 @@ -112,7 +113,7 @@ typedef OS_OBJECT_CLASS(object) *_os_object_t; #define _OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(name, super) \ OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(name, super) #elif OS_OBJECT_USE_OBJC -__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) +API_AVAILABLE(macos(10.8), ios(6.0)) OS_OBJECT_EXPORT @interface OS_OBJECT_CLASS(object) : NSObject - (void)_xref_dispose; @@ -136,53 +137,65 @@ __BEGIN_DECLS #if !_OS_OBJECT_OBJC_ARC -__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) +API_AVAILABLE(macos(10.8), ios(6.0)) OS_OBJECT_EXPORT OS_OBJECT_MALLOC OS_OBJECT_WARN_RESULT OS_OBJECT_NOTHROW OS_SWIFT_UNAVAILABLE("Unavailable in Swift") _os_object_t _os_object_alloc(const void *cls, size_t size); -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.8), ios(6.0)) OS_OBJECT_EXPORT OS_OBJECT_MALLOC OS_OBJECT_WARN_RESULT OS_OBJECT_NOTHROW OS_SWIFT_UNAVAILABLE("Unavailable in Swift") _os_object_t _os_object_alloc_realized(const void *cls, size_t size); -__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) +API_AVAILABLE(macos(10.8), ios(6.0)) OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW OS_SWIFT_UNAVAILABLE("Unavailable in Swift") void _os_object_dealloc(_os_object_t object); -__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) +API_AVAILABLE(macos(10.8), ios(6.0)) OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW OS_SWIFT_UNAVAILABLE("Unavailable in Swift") _os_object_t _os_object_retain(_os_object_t object); -__OSX_AVAILABLE_STARTING(__MAC_10_11,__IPHONE_9_0) +API_AVAILABLE(macos(10.8), ios(6.0)) OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW OS_SWIFT_UNAVAILABLE("Unavailable in Swift") _os_object_t _os_object_retain_with_resurrect(_os_object_t obj); -__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) +API_AVAILABLE(macos(10.8), ios(6.0)) OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW OS_SWIFT_UNAVAILABLE("Unavailable in Swift") void _os_object_release(_os_object_t object); -__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) +API_AVAILABLE(macos(10.8), ios(6.0)) OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW OS_SWIFT_UNAVAILABLE("Unavailable in Swift") _os_object_t _os_object_retain_internal(_os_object_t object); -__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) +API_AVAILABLE(macos(10.8), ios(6.0)) OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW OS_SWIFT_UNAVAILABLE("Unavailable in Swift") void _os_object_release_internal(_os_object_t object); +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW +OS_SWIFT_UNAVAILABLE("Unavailable in Swift") +_os_object_t +_os_object_retain_internal_n(_os_object_t object, uint16_t n); + +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW +OS_SWIFT_UNAVAILABLE("Unavailable in Swift") +void +_os_object_release_internal_n(_os_object_t object, uint16_t n); + #endif // !_OS_OBJECT_OBJC_ARC __END_DECLS diff --git a/os/voucher_activity_private.h b/os/voucher_activity_private.h index 8f233b33c..8ce0ef583 100644 --- a/os/voucher_activity_private.h +++ b/os/voucher_activity_private.h @@ -28,11 +28,13 @@ #endif #ifndef __linux__ #include +#include #endif +#include #include #include "voucher_private.h" -#define OS_VOUCHER_ACTIVITY_SPI_VERSION 20160329 +#define OS_VOUCHER_ACTIVITY_SPI_VERSION 20161003 #if OS_VOUCHER_WEAK_IMPORT #define OS_VOUCHER_EXPORT OS_EXPORT OS_WEAK_IMPORT @@ -40,12 +42,6 @@ #define OS_VOUCHER_EXPORT OS_EXPORT #endif -#define __VOUCHER_ACTIVITY_IGNORE_DEPRECATION_PUSH \ - _Pragma("clang diagnostic push") \ - _Pragma("clang diagnostic ignored \"-Wdeprecated-declarations\"") -#define __VOUCHER_ACTIVITY_IGNORE_DEPRECATION_POP \ - _Pragma("clang diagnostic pop") - __BEGIN_DECLS /*! @@ -79,8 +75,7 @@ __BEGIN_DECLS * The current activity identifier, if any. When 0 is returned, parent_id will * also always be 0. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) OS_VOUCHER_EXPORT OS_NOTHROW firehose_activity_id_t voucher_get_activity_id(voucher_t voucher, firehose_activity_id_t *parent_id); @@ -109,15 +104,14 @@ voucher_get_activity_id(voucher_t voucher, firehose_activity_id_t *parent_id); * The current activity identifier, if any. When 0 is returned, parent_id will * also always be 0. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) OS_VOUCHER_EXPORT OS_NOTHROW firehose_activity_id_t voucher_get_activity_id_and_creator(voucher_t voucher, uint64_t *creator_pid, firehose_activity_id_t *parent_id); /*! - * @function voucher_activity_create + * @function voucher_activity_create_with_data * * @abstract * Creates a voucher object with a new activity identifier. @@ -151,22 +145,24 @@ voucher_get_activity_id_and_creator(voucher_t voucher, uint64_t *creator_pid, * @param flags * See voucher_activity_flag_t documentation for effect. * - * @param location - * Location identifier for the automatic tracepoint generated as part of - * creating the new activity. + * @param pubdata + * Pointer to packed buffer of tracepoint data. + * + * @param publen + * Length of data at 'pubdata'. * * @result * A new voucher with an activity identifier. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW voucher_t -voucher_activity_create(firehose_tracepoint_id_t trace_id, - voucher_t base, firehose_activity_flags_t flags, uint64_t location); +voucher_activity_create_with_data(firehose_tracepoint_id_t *trace_id, + voucher_t base, firehose_activity_flags_t flags, + const void *pubdata, size_t publen); -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_DEPRECATED_WITH_REPLACEMENT("voucher_activity_create_with_data", + macos(10.12,10.12), ios(10.0,10.0), tvos(10.0,10.0), watchos(3.0,3.0)) OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW voucher_t voucher_activity_create_with_location(firehose_tracepoint_id_t *trace_id, @@ -177,6 +173,21 @@ voucher_activity_create_with_location(firehose_tracepoint_id_t *trace_id, * SPI intended for libtrace only */ +/*! + * @function voucher_activity_id_allocate + * + * @abstract + * Allocate a new system-wide unique activity ID. + * + * @param flags + * The bottom-most 8 bits of the flags will be used to generate the ID. + * See firehose_activity_flags_t. + */ +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +OS_VOUCHER_EXPORT OS_NOTHROW +firehose_activity_id_t +voucher_activity_id_allocate(firehose_activity_flags_t flags); + /*! * @function voucher_activity_flush * @@ -192,8 +203,7 @@ voucher_activity_create_with_location(firehose_tracepoint_id_t *trace_id, * @param stream * The stream to flush. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) OS_VOUCHER_EXPORT OS_NOTHROW void voucher_activity_flush(firehose_stream_t stream); @@ -219,8 +229,7 @@ voucher_activity_flush(firehose_stream_t stream); * @param publen * Length of data at 'pubdata'. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) OS_VOUCHER_EXPORT OS_NOTHROW OS_NONNULL4 firehose_tracepoint_id_t voucher_activity_trace(firehose_stream_t stream, @@ -228,7 +237,7 @@ voucher_activity_trace(firehose_stream_t stream, const void *pubdata, size_t publen); /*! - * @function voucher_activity_trace_with_private_strings + * @function voucher_activity_trace_v * * @abstract * Add a tracepoint to the specified stream, with private data. @@ -242,20 +251,29 @@ voucher_activity_trace(firehose_stream_t stream, * @param timestamp * The mach_approximate_time()/mach_absolute_time() value for this tracepoint. * - * @param pubdata - * Pointer to packed buffer of tracepoint data. + * @param iov + * Array of `struct iovec` pointing to the data to layout. + * The total size of this iovec must span exactly `publen + privlen` bytes. + * The `publen` boundary must coincide with the end of an iovec (each iovec + * must either be pure public or pure private data). * * @param publen - * Length of data at 'pubdata'. - * - * @param privdata - * Pointer to packed buffer of private tracepoint data. + * Total length of data to read from the iovec for the public data. * * @param privlen - * Length of data at 'privdata'. + * Length of data to read from the iovec after the public data for the private + * data. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +OS_VOUCHER_EXPORT OS_NOTHROW OS_NONNULL4 +firehose_tracepoint_id_t +voucher_activity_trace_v(firehose_stream_t stream, + firehose_tracepoint_id_t trace_id, uint64_t timestamp, + const struct iovec *iov, size_t publen, size_t privlen); + + +API_DEPRECATED_WITH_REPLACEMENT("voucher_activity_trace_v", + macos(10.12,10.12), ios(10.0,10.0), tvos(10.0,10.0), watchos(3.0,3.0)) OS_VOUCHER_EXPORT OS_NOTHROW OS_NONNULL4 OS_NONNULL6 firehose_tracepoint_id_t voucher_activity_trace_with_private_strings(firehose_stream_t stream, @@ -263,15 +281,14 @@ voucher_activity_trace_with_private_strings(firehose_stream_t stream, const void *pubdata, size_t publen, const void *privdata, size_t privlen); -typedef struct voucher_activity_hooks_s { -#define VOUCHER_ACTIVITY_HOOKS_VERSION 3 +typedef const struct voucher_activity_hooks_s { +#define VOUCHER_ACTIVITY_HOOKS_VERSION 5 long vah_version; - // version 1 mach_port_t (*vah_get_logd_port)(void); - // version 2 dispatch_mach_handler_function_t vah_debug_channel_handler; - // version 3 kern_return_t (*vah_get_reconnect_info)(mach_vm_address_t *, mach_vm_size_t *); + void (*vah_metadata_init)(void *metadata_buffer, size_t size); + void (*vah_quarantine_starts)(void); } *voucher_activity_hooks_t; /*! @@ -283,8 +300,7 @@ typedef struct voucher_activity_hooks_s { * @param hooks * A pointer to a voucher_activity_hooks_s structure. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) OS_VOUCHER_EXPORT OS_NOTHROW OS_NONNULL_ALL void voucher_activity_initialize_4libtrace(voucher_activity_hooks_t hooks); @@ -302,7 +318,7 @@ voucher_activity_initialize_4libtrace(voucher_activity_hooks_t hooks); * @result * Address of metadata buffer. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW OS_NONNULL_ALL void* voucher_activity_get_metadata_buffer(size_t *length); @@ -314,8 +330,7 @@ voucher_activity_get_metadata_buffer(size_t *length); * Return the current voucher activity ID. Available for the dyld client stub * only. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW firehose_activity_id_t voucher_get_activity_id_4dyld(void); diff --git a/os/voucher_private.h b/os/voucher_private.h index 562a70415..aecbbc9ff 100644 --- a/os/voucher_private.h +++ b/os/voucher_private.h @@ -23,6 +23,7 @@ #ifndef __linux__ #include +#include #endif #if __has_include() #include @@ -100,7 +101,7 @@ OS_OBJECT_DECL_CLASS(voucher); * @result * The previously adopted voucher object. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT_NEEDS_RELEASE OS_NOTHROW voucher_t _Nullable @@ -116,7 +117,7 @@ voucher_adopt(voucher_t _Nullable voucher OS_OBJECT_CONSUMED); * @result * The currently adopted voucher object. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW voucher_t _Nullable voucher_copy(void); @@ -135,7 +136,7 @@ voucher_copy(void); * @result * A copy of the currently adopted voucher object, with importance removed. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW voucher_t _Nullable voucher_copy_without_importance(void); @@ -161,7 +162,7 @@ voucher_copy_without_importance(void); * * CAUTION: Do NOT use this SPI without contacting the Darwin Runtime team. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) OS_VOUCHER_EXPORT OS_NOTHROW void voucher_replace_default_voucher(void); @@ -179,7 +180,7 @@ voucher_replace_default_voucher(void); * * CAUTION: Do NOT use this SPI without contacting the Darwin Runtime team. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) OS_VOUCHER_EXPORT OS_NOTHROW void voucher_decrement_importance_count4CF(voucher_t _Nullable voucher); @@ -201,8 +202,23 @@ voucher_decrement_importance_count4CF(voucher_t _Nullable voucher); * This flag is ignored if a specific voucher object is assigned with the * dispatch_block_create_with_voucher* functions, and is equivalent to passing * the NULL voucher to these functions. + * + * @const DISPATCH_BLOCK_IF_LAST_RESET_QUEUE_QOS_OVERRIDE + * Flag indicating that this dispatch block object should try to reset the + * recorded maximum QoS of all currently enqueued items on a serial dispatch + * queue at the base of a queue hierarchy. + * + * This is only works if the queue becomes empty by dequeuing the block in + * question, and then allows that block to enqueue more work on this hierarchy + * without perpetuating QoS overrides resulting from items previously executed + * on the hierarchy. + * + * A dispatch block object created with this flag set cannot be used with + * dispatch_block_wait() or dispatch_block_cancel(). */ -#define DISPATCH_BLOCK_NO_VOUCHER (0x40) +#define DISPATCH_BLOCK_NO_VOUCHER (0x40ul) + +#define DISPATCH_BLOCK_IF_LAST_RESET_QUEUE_QOS_OVERRIDE (0x80ul) /*! * @function dispatch_block_create_with_voucher @@ -263,7 +279,7 @@ voucher_decrement_importance_count4CF(voucher_t _Nullable voucher); * When not building with Objective-C ARC, must be released with a -[release] * message or the Block_release() function. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_RETURNS_RETAINED_BLOCK DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_block_t @@ -346,7 +362,7 @@ dispatch_block_create_with_voucher(dispatch_block_flags_t flags, * When not building with Objective-C ARC, must be released with a -[release] * message or the Block_release() function. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_NONNULL5 DISPATCH_RETURNS_RETAINED_BLOCK DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_block_t @@ -362,52 +378,10 @@ dispatch_block_create_with_voucher_and_qos_class(dispatch_block_flags_t flags, * @function dispatch_queue_create_with_accounting_override_voucher * * @abstract - * Creates a new dispatch queue with an accounting override voucher created - * from the specified voucher. - * - * @discussion - * See dispatch_queue_create() headerdoc for generic details on queue creation. - * - * The resource accounting attributes of the specified voucher are extracted - * and used to create an accounting override voucher for the new queue. - * - * Every block executed on the returned queue will initially have this override - * voucher adopted, any voucher automatically associated with or explicitly - * assigned to the block will NOT be used and released immediately before block - * execution starts. - * - * The accounting override voucher will be automatically propagated to any - * asynchronous work generated from the queue following standard voucher - * propagation rules. - * - * NOTE: this SPI should only be used in special circumstances when a subsystem - * has complete control over all workitems submitted to a queue (e.g. no client - * block is ever submitted to the queue) and if and only if such queues have a - * one-to-one mapping with resource accounting identities. - * - * CAUTION: use of this SPI represents a potential voucher propagation hole. It - * is the responsibility of the caller to ensure that any callbacks into client - * code from the queue have the correct client voucher applied (rather than the - * automatically propagated accounting override voucher), e.g. by use of the - * dispatch_block_create() API to capture client state at the time the callback - * is registered. - * - * @param label - * A string label to attach to the queue. - * This parameter is optional and may be NULL. - * - * @param attr - * DISPATCH_QUEUE_SERIAL, DISPATCH_QUEUE_CONCURRENT, or the result of a call to - * the function dispatch_queue_attr_make_with_qos_class(). - * - * @param voucher - * A voucher whose resource accounting attributes are used to create the - * accounting override voucher attached to the queue. - * - * @result - * The newly created dispatch queue. + * Deprecated, do not use, will abort process if called. */ -__OSX_AVAILABLE_STARTING(__MAC_10_11,__IPHONE_9_0) +API_DEPRECATED("removed SPI", \ + macos(10.11,10.12), ios(9.0,10.0), watchos(2.0,3.0), tvos(9.0,10.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_t @@ -440,7 +414,7 @@ dispatch_queue_create_with_accounting_override_voucher( * The newly created voucher object or NULL if the message was not carrying a * mach voucher. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW voucher_t _Nullable voucher_create_with_mach_msg(mach_msg_header_t *msg); @@ -475,7 +449,7 @@ struct proc_persona_info; * or the persona identifier of the current process * or PERSONA_ID_NONE */ -__OSX_AVAILABLE_STARTING(__MAC_NA,__IPHONE_9_2) +API_AVAILABLE(ios(9.2)) OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW uid_t voucher_get_current_persona(void); @@ -498,7 +472,7 @@ voucher_get_current_persona(void); * 0 on success: currently adopted voucher has a PERSONA_TOKEN * -1 on failure: persona_info is untouched/uninitialized */ -__OSX_AVAILABLE_STARTING(__MAC_NA,__IPHONE_9_2) +API_AVAILABLE(ios(9.2)) OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW OS_NONNULL1 int voucher_get_current_persona_originator_info( @@ -522,7 +496,7 @@ voucher_get_current_persona_originator_info( * 0 on success: currently adopted voucher has a PERSONA_TOKEN * -1 on failure: persona_info is untouched/uninitialized */ -__OSX_AVAILABLE_STARTING(__MAC_NA,__IPHONE_9_2) +API_AVAILABLE(ios(9.2)) OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW OS_NONNULL1 int voucher_get_current_persona_proximate_info( diff --git a/private/CMakeLists.txt b/private/CMakeLists.txt new file mode 100644 index 000000000..18788d727 --- /dev/null +++ b/private/CMakeLists.txt @@ -0,0 +1,5 @@ + +# TODO(compnerd) ensure that benchmark.h data_private.h introduction_private.h +# io_private.h layout_private.h mach_private.h private.h queue_private.h +# source_private.h are included in the source tarball + diff --git a/private/benchmark.h b/private/benchmark.h index ef3cdbd2f..ab5715648 100644 --- a/private/benchmark.h +++ b/private/benchmark.h @@ -70,13 +70,13 @@ __BEGIN_DECLS * cache-line. */ #ifdef __BLOCKS__ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW uint64_t dispatch_benchmark(size_t count, dispatch_block_t block); #endif -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_NOTHROW uint64_t dispatch_benchmark_f(size_t count, void *_Nullable ctxt, diff --git a/private/module.modulemap b/private/darwin/module.modulemap similarity index 90% rename from private/module.modulemap rename to private/darwin/module.modulemap index 62975a59b..ceb963a1f 100644 --- a/private/module.modulemap +++ b/private/darwin/module.modulemap @@ -1,7 +1,6 @@ module DispatchPrivate [system] [extern_c] { umbrella header "private.h" exclude header "mach_private.h" - module * { export * } export * } diff --git a/private/data_private.h b/private/data_private.h index 7485525a5..364a8ffe0 100644 --- a/private/data_private.h +++ b/private/data_private.h @@ -43,7 +43,7 @@ __BEGIN_DECLS * encapsulate buffers that should not be copied or freed by the system. */ #define DISPATCH_DATA_DESTRUCTOR_NONE (_dispatch_data_destructor_none) -__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) +API_AVAILABLE(macos(10.8), ios(6.0)) DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(none); /*! @@ -53,7 +53,7 @@ DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(none); */ #define DISPATCH_DATA_DESTRUCTOR_VM_DEALLOCATE \ (_dispatch_data_destructor_vm_deallocate) -__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) DISPATCH_LINUX_UNAVAILABLE() +API_AVAILABLE(macos(10.8), ios(6.0)) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(vm_deallocate); /*! @@ -77,7 +77,7 @@ DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(vm_deallocate); * data buffer when it is no longer needed. * @result A newly created dispatch data object. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_data_t dispatch_data_create_f(const void *buffer, @@ -100,7 +100,7 @@ dispatch_data_create_f(const void *buffer, * location of the newly allocated memory region, or NULL. * @result A newly created dispatch data object. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +API_AVAILABLE(macos(10.9), ios(6.0)) DISPATCH_EXPORT DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_data_t @@ -142,7 +142,7 @@ typedef bool (*dispatch_data_applier_function_t)(void *_Nullable context, * @result A Boolean indicating whether traversal completed * successfully. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +API_AVAILABLE(macos(10.9), ios(6.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW bool dispatch_data_apply_f(dispatch_data_t data, void *_Nullable context, @@ -163,7 +163,7 @@ dispatch_data_apply_f(dispatch_data_t data, void *_Nullable context, * @result A mach port for the newly made memory entry, or * MACH_PORT_NULL if an error occurred. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +API_AVAILABLE(macos(10.9), ios(6.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW mach_port_t dispatch_data_make_memory_entry(dispatch_data_t data); @@ -198,7 +198,7 @@ typedef const struct dispatch_data_format_type_s *dispatch_data_format_type_t; * or should be, comprised of raw data bytes with no given encoding. */ #define DISPATCH_DATA_FORMAT_TYPE_NONE (&_dispatch_data_format_type_none) -__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) +API_AVAILABLE(macos(10.8), ios(6.0)) DISPATCH_DATA_FORMAT_TYPE_DECL(none); /*! @@ -209,7 +209,7 @@ DISPATCH_DATA_FORMAT_TYPE_DECL(none); * types. */ #define DISPATCH_DATA_FORMAT_TYPE_BASE32 (&_dispatch_data_format_type_base32) -__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) +API_AVAILABLE(macos(10.8), ios(6.0)) DISPATCH_DATA_FORMAT_TYPE_DECL(base32); /*! @@ -221,7 +221,7 @@ DISPATCH_DATA_FORMAT_TYPE_DECL(base32); */ #define DISPATCH_DATA_FORMAT_TYPE_BASE32HEX \ (&_dispatch_data_format_type_base32hex) -__OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_DATA_FORMAT_TYPE_DECL(base32hex); /*! @@ -232,7 +232,7 @@ DISPATCH_DATA_FORMAT_TYPE_DECL(base32hex); * types. */ #define DISPATCH_DATA_FORMAT_TYPE_BASE64 (&_dispatch_data_format_type_base64) -__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) +API_AVAILABLE(macos(10.8), ios(6.0)) DISPATCH_DATA_FORMAT_TYPE_DECL(base64); /*! @@ -242,7 +242,7 @@ DISPATCH_DATA_FORMAT_TYPE_DECL(base64); * with other UTF format types. */ #define DISPATCH_DATA_FORMAT_TYPE_UTF8 (&_dispatch_data_format_type_utf8) -__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) +API_AVAILABLE(macos(10.8), ios(6.0)) DISPATCH_DATA_FORMAT_TYPE_DECL(utf8); /*! @@ -252,7 +252,7 @@ DISPATCH_DATA_FORMAT_TYPE_DECL(utf8); * conjunction with other UTF format types. */ #define DISPATCH_DATA_FORMAT_TYPE_UTF16LE (&_dispatch_data_format_type_utf16le) -__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) +API_AVAILABLE(macos(10.8), ios(6.0)) DISPATCH_DATA_FORMAT_TYPE_DECL(utf16le); /*! @@ -262,7 +262,7 @@ DISPATCH_DATA_FORMAT_TYPE_DECL(utf16le); * conjunction with other UTF format types. */ #define DISPATCH_DATA_FORMAT_TYPE_UTF16BE (&_dispatch_data_format_type_utf16be) -__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) +API_AVAILABLE(macos(10.8), ios(6.0)) DISPATCH_DATA_FORMAT_TYPE_DECL(utf16be); /*! @@ -274,7 +274,7 @@ DISPATCH_DATA_FORMAT_TYPE_DECL(utf16be); * format. */ #define DISPATCH_DATA_FORMAT_TYPE_UTF_ANY (&_dispatch_data_format_type_utf_any) -__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) +API_AVAILABLE(macos(10.8), ios(6.0)) DISPATCH_DATA_FORMAT_TYPE_DECL(utf_any); /*! @@ -295,7 +295,7 @@ DISPATCH_DATA_FORMAT_TYPE_DECL(utf_any); * produced, or NULL if an error occurred. */ -__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) +API_AVAILABLE(macos(10.8), ios(6.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_data_t diff --git a/private/generic/module.modulemap b/private/generic/module.modulemap new file mode 100644 index 000000000..ceb963a1f --- /dev/null +++ b/private/generic/module.modulemap @@ -0,0 +1,10 @@ +module DispatchPrivate [system] [extern_c] { + umbrella header "private.h" + exclude header "mach_private.h" + export * +} + +module DispatchIntrospectionPrivate [system] [extern_c] { + header "introspection_private.h" + export * +} diff --git a/private/introspection_private.h b/private/introspection_private.h index fa8e49aeb..972c68857 100644 --- a/private/introspection_private.h +++ b/private/introspection_private.h @@ -68,8 +68,8 @@ typedef struct dispatch_queue_s *dispatch_queue_t; typedef struct dispatch_source_s *dispatch_source_t; typedef struct dispatch_group_s *dispatch_group_t; typedef struct dispatch_object_s *dispatch_object_t; -#ifndef __OSX_AVAILABLE_STARTING -#define __OSX_AVAILABLE_STARTING(x,y) +#ifndef API_AVAILABLE +#define API_AVAILABLE(...) #endif #ifndef DISPATCH_EXPORT #define DISPATCH_EXPORT extern @@ -135,7 +135,7 @@ typedef struct dispatch_object_s *dispatch_object_t; * Size of dispatch_introspection_source_s structure. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT const struct dispatch_introspection_versions_s { unsigned long introspection_version; unsigned long hooks_version; @@ -716,7 +716,7 @@ dispatch_introspection_queue_item_get_info(dispatch_queue_t queue, * hooks on output. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT void dispatch_introspection_hooks_install(dispatch_introspection_hooks_t hooks); diff --git a/private/io_private.h b/private/io_private.h index 0bb1e3b25..293258161 100644 --- a/private/io_private.h +++ b/private/io_private.h @@ -79,7 +79,7 @@ __BEGIN_DECLS * param error An errno condition for the read operation or * zero if the read was successful. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_NONNULL5 DISPATCH_NOTHROW void dispatch_read_f(dispatch_fd_t fd, @@ -121,7 +121,7 @@ dispatch_read_f(dispatch_fd_t fd, * param error An errno condition for the write operation or * zero if the write was successful. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NONNULL3 DISPATCH_NONNULL5 DISPATCH_NOTHROW void @@ -160,7 +160,7 @@ dispatch_write_f(dispatch_fd_t fd, * @result The newly created dispatch I/O channel or NULL if an error * occurred (invalid type specified). */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_io_t @@ -200,7 +200,7 @@ dispatch_io_create_f(dispatch_io_type_t type, * @result The newly created dispatch I/O channel or NULL if an error * occurred (invalid type or non-absolute path specified). */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_io_t @@ -244,7 +244,7 @@ dispatch_io_create_with_path_f(dispatch_io_type_t type, * @result The newly created dispatch I/O channel or NULL if an error * occurred (invalid type specified). */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_io_t @@ -311,7 +311,7 @@ typedef void (*dispatch_io_handler_function_t)(void *_Nullable context, * param error An errno condition for the read operation or zero if * the read was successful. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL4 DISPATCH_NONNULL6 DISPATCH_NOTHROW void @@ -368,7 +368,7 @@ dispatch_io_read_f(dispatch_io_t channel, * param error An errno condition for the write operation or zero * if the write was successful. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NONNULL4 DISPATCH_NONNULL6 DISPATCH_NOTHROW void @@ -402,7 +402,7 @@ dispatch_io_write_f(dispatch_io_t channel, * the barrier function. * @param barrier The barrier function. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_io_barrier_f(dispatch_io_t channel, diff --git a/private/layout_private.h b/private/layout_private.h index bf93ee999..0c0cd942d 100644 --- a/private/layout_private.h +++ b/private/layout_private.h @@ -29,7 +29,7 @@ __BEGIN_DECLS #if !TARGET_OS_WIN32 -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT const struct dispatch_queue_offsets_s { // always add new fields at the end const uint16_t dqo_version; @@ -60,7 +60,7 @@ DISPATCH_EXPORT const struct dispatch_queue_offsets_s { * SPI intended for CoreSymbolication only */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT const struct dispatch_tsd_indexes_s { // always add new fields at the end const uint16_t dti_version; diff --git a/private/mach_private.h b/private/mach_private.h index 2228436a7..bc5322332 100644 --- a/private/mach_private.h +++ b/private/mach_private.h @@ -36,7 +36,7 @@ __BEGIN_DECLS #if DISPATCH_MACH_SPI -#define DISPATCH_MACH_SPI_VERSION 20160505 +#define DISPATCH_MACH_SPI_VERSION 20161026 #include @@ -109,6 +109,23 @@ DISPATCH_DECL(dispatch_mach); * result operation and never passed to a channel handler. Indicates that the * message passed to the send operation must not be disposed of until it is * returned via the channel handler. + * + * @const DISPATCH_MACH_SIGTERM_RECEIVED + * A SIGTERM signal has been received. This notification is delivered at most + * once during the lifetime of the channel. This event is sent only for XPC + * channels (i.e. channels that were created by calling + * dispatch_mach_create_4libxpc()) and only if the + * dmxh_enable_sigterm_notification function in the XPC hooks structure is not + * set or it returned true when it was called at channel activation time. + * + * @const DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED + * The channel has been disconnected by a call to dispatch_mach_reconnect() or + * dispatch_mach_cancel(), an empty message is passed in the message parameter + * (so that associated port rights can be disposed of). The message header will + * contain a local port with the receive right previously allocated to receive + * an asynchronous reply to a message previously sent to the channel. Used + * only if the channel is disconnected while waiting for a reply to a message + * sent with dispatch_mach_send_with_result_and_async_reply_4libxpc(). */ DISPATCH_ENUM(dispatch_mach_reason, unsigned long, DISPATCH_MACH_CONNECTED = 1, @@ -121,6 +138,8 @@ DISPATCH_ENUM(dispatch_mach_reason, unsigned long, DISPATCH_MACH_CANCELED, DISPATCH_MACH_REPLY_RECEIVED, DISPATCH_MACH_NEEDS_DEFERRED_SEND, + DISPATCH_MACH_SIGTERM_RECEIVED, + DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED, DISPATCH_MACH_REASON_LAST, /* unused */ ); @@ -202,7 +221,7 @@ DISPATCH_ENUM(dispatch_mach_msg_destructor, unsigned int, * buffer, or NULL. * @result A newly created dispatch mach message object. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_mach_msg_t @@ -219,7 +238,7 @@ dispatch_mach_msg_create(mach_msg_header_t *_Nullable msg, size_t size, * size of the message buffer, or NULL. * @result Pointer to message buffer underlying the object. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW mach_msg_header_t* dispatch_mach_msg_get_msg(dispatch_mach_msg_t message, @@ -267,7 +286,7 @@ typedef void (^dispatch_mach_handler_t)(dispatch_mach_reason_t reason, * @result * The newly created dispatch mach channel. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +API_AVAILABLE(macos(10.9), ios(6.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NONNULL3 DISPATCH_NOTHROW dispatch_mach_t @@ -321,7 +340,7 @@ typedef void (*dispatch_mach_handler_function_t)(void *_Nullable context, * @result * The newly created dispatch mach channel. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +API_AVAILABLE(macos(10.9), ios(6.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NONNULL4 DISPATCH_NOTHROW dispatch_mach_t @@ -354,7 +373,7 @@ dispatch_mach_create_f(const char *_Nullable label, * to channel cancellation or reconnection) and the channel handler has * returned. May be NULL. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +API_AVAILABLE(macos(10.9), ios(6.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_mach_connect(dispatch_mach_t channel, mach_port_t receive, @@ -385,7 +404,7 @@ dispatch_mach_connect(dispatch_mach_t channel, mach_port_t receive, * is complete (or not peformed due to channel cancellation or reconnection) * and the channel handler has returned. May be NULL. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +API_AVAILABLE(macos(10.9), ios(6.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_mach_reconnect(dispatch_mach_t channel, mach_port_t send, @@ -408,7 +427,7 @@ dispatch_mach_reconnect(dispatch_mach_t channel, mach_port_t send, * @param channel * The mach channel to cancel. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +API_AVAILABLE(macos(10.9), ios(6.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_mach_cancel(dispatch_mach_t channel); @@ -451,7 +470,7 @@ dispatch_mach_cancel(dispatch_mach_t channel); * Additional send options to pass to mach_msg() when performing the send * operation. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +API_AVAILABLE(macos(10.9), ios(6.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NOTHROW void dispatch_mach_send(dispatch_mach_t channel, dispatch_mach_msg_t message, @@ -519,8 +538,7 @@ dispatch_mach_send(dispatch_mach_t channel, dispatch_mach_msg_t message, * Out parameter to return the error from the immediate send attempt. * If a deferred send is required, returns 0. Must not be NULL. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NONNULL5 DISPATCH_NONNULL6 DISPATCH_NOTHROW void @@ -580,7 +598,7 @@ dispatch_mach_send_with_result(dispatch_mach_t channel, * @result * The received reply message object, or NULL if the channel was canceled. */ -__OSX_AVAILABLE_STARTING(__MAC_10_11,__IPHONE_9_0) +API_AVAILABLE(macos(10.11), ios(9.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NOTHROW dispatch_mach_msg_t _Nullable @@ -662,8 +680,7 @@ dispatch_mach_send_and_wait_for_reply(dispatch_mach_t channel, * @result * The received reply message object, or NULL if the channel was canceled. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NONNULL5 DISPATCH_NONNULL6 DISPATCH_NOTHROW @@ -688,7 +705,7 @@ dispatch_mach_send_with_result_and_wait_for_reply(dispatch_mach_t channel, * @param barrier * The barrier block to submit to the channel target queue. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +API_AVAILABLE(macos(10.9), ios(6.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_mach_send_barrier(dispatch_mach_t channel, dispatch_block_t barrier); @@ -711,7 +728,7 @@ dispatch_mach_send_barrier(dispatch_mach_t channel, dispatch_block_t barrier); * @param barrier * The barrier function to submit to the channel target queue. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +API_AVAILABLE(macos(10.9), ios(6.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_mach_send_barrier_f(dispatch_mach_t channel, void *_Nullable context, @@ -731,7 +748,7 @@ dispatch_mach_send_barrier_f(dispatch_mach_t channel, void *_Nullable context, * @param barrier * The barrier block to submit to the channel target queue. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +API_AVAILABLE(macos(10.9), ios(6.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_mach_receive_barrier(dispatch_mach_t channel, @@ -754,7 +771,7 @@ dispatch_mach_receive_barrier(dispatch_mach_t channel, * @param barrier * The barrier function to submit to the channel target queue. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +API_AVAILABLE(macos(10.9), ios(6.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_mach_receive_barrier_f(dispatch_mach_t channel, void *_Nullable context, @@ -781,11 +798,231 @@ dispatch_mach_receive_barrier_f(dispatch_mach_t channel, void *_Nullable context * @result * The most recently specified check-in port for the channel. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +API_AVAILABLE(macos(10.9), ios(6.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW mach_port_t dispatch_mach_get_checkin_port(dispatch_mach_t channel); +// SPI for libxpc +/* + * Type for the callback for receipt of asynchronous replies to + * dispatch_mach_send_with_result_and_async_reply_4libxpc(). + */ +typedef void (*_Nonnull dispatch_mach_async_reply_callback_t)(void *context, + dispatch_mach_reason_t reason, dispatch_mach_msg_t message); + +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +typedef const struct dispatch_mach_xpc_hooks_s { +#define DISPATCH_MACH_XPC_HOOKS_VERSION 3 + unsigned long version; + + /* Fields available in version 1. */ + + /* + * Called to handle a Mach message event inline if possible. Returns true + * if the event was handled, false if the event should be delivered to the + * channel event handler. The implementation should not make any assumptions + * about the thread in which the function is called and cannot assume that + * invocations of this function are serialized relative to each other or + * relative to the channel's event handler function. In addition, the + * handler must not throw an exception or call out to any code that might + * throw an exception. + */ + bool (* _Nonnull dmxh_direct_message_handler)(void *_Nullable context, + dispatch_mach_reason_t reason, dispatch_mach_msg_t message, + mach_error_t error); + + /* Fields available in version 2. */ + + /* + * Gets the queue to which a reply to a message sent using + * dispatch_mach_send_with_result_and_async_reply_4libxpc() should be + * delivered. The msg_context argument is the value of the do_ctxt field + * of the outgoing message, as returned by dispatch_get_context(). If this + * function returns NULL, the reply will be delivered to the channel queue. + * This function should not make any assumptions about the thread on which + * it is called and, since it may be called more than once per message, it + * should execute as quickly as possible and not attempt to synchronize with + * other code. + */ + dispatch_queue_t _Nullable (*_Nonnull dmxh_msg_context_reply_queue)( + void *_Nonnull msg_context); + + /* + * Called when a reply to a message sent by + * dispatch_mach_send_with_result_and_async_reply_4libxpc() is received. The + * message argument points to the reply message and the context argument is + * the context value passed to dispatch_mach_create_4libxpc() when creating + * the Mach channel. The handler is called on the queue that is returned by + * dmxh_msg_context_reply_queue() when the reply is received or if the + * channel is disconnected. The reason argument is + * DISPATCH_MACH_MESSAGE_RECEIVED if a reply has been received or + * DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED if the channel has been + * disconnected. Refer to the documentation for + * dispatch_mach_send_with_result_and_async_reply_4libxpc() for more + * details. + */ + dispatch_mach_async_reply_callback_t dmxh_async_reply_handler; + + /* Fields available in version 3. */ + /** + * Called once when the Mach channel has been activated. If this function + * returns true, a DISPATCH_MACH_SIGTERM_RECEIVED notification will be + * delivered to the channel's event handler when a SIGTERM is received. + */ + bool (* _Nullable dmxh_enable_sigterm_notification)( + void *_Nullable context); +} *dispatch_mach_xpc_hooks_t; + +#define DISPATCH_MACH_XPC_SUPPORTS_ASYNC_REPLIES(hooks) ((hooks)->version >= 2) + +/*! + * @function dispatch_mach_hooks_install_4libxpc + * + * @abstract + * installs XPC callbacks for dispatch Mach channels. + * + * @discussion + * In order to improve the performance of the XPC/dispatch interface, it is + * sometimes useful for dispatch to be able to call directly into XPC. The + * channel hooks structure should be initialized with pointers to XPC callback + * functions, or NULL for callbacks that XPC does not support. The version + * number in the structure must be set to reflect the fields that have been + * initialized. This function may be called only once. + * + * @param hooks + * A pointer to the channel hooks structure. This must remain valid once set. + */ +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_mach_hooks_install_4libxpc(dispatch_mach_xpc_hooks_t hooks); + +/*! + * @function dispatch_mach_create_4libxpc + * Create a dispatch mach channel to asynchronously receive and send mach + * messages, specifically for libxpc. + * + * The specified handler will be called with the corresponding reason parameter + * for each message received and for each message that was successfully sent, + * that failed to be sent, or was not sent; as well as when a barrier block + * has completed, or when channel connection, reconnection or cancellation has + * taken effect. However, the handler will not be called for messages that + * were passed to the XPC hooks dmxh_direct_message_handler function if that + * function returned true. + * + * Dispatch mach channels are created in a disconnected state, they must be + * connected via dispatch_mach_connect() to begin receiving and sending + * messages. + * + * @param label + * An optional string label to attach to the channel. The string is not copied, + * if it is non-NULL it must point to storage that remains valid for the + * lifetime of the channel object. May be NULL. + * + * @param queue + * The target queue of the channel, where the handler and barrier blocks will + * be submitted. + * + * @param context + * The application-defined context to pass to the handler. + * + * @param handler + * The handler function to submit when a message has been sent or received. + * + * @result + * The newly created dispatch mach channel. + */ +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NONNULL4 DISPATCH_NOTHROW +dispatch_mach_t +dispatch_mach_create_4libxpc(const char *_Nullable label, + dispatch_queue_t _Nullable queue, void *_Nullable context, + dispatch_mach_handler_function_t handler); + +/*! + * @function dispatch_mach_send_with_result_and_async_reply_4libxpc + * SPI for XPC that asynchronously sends a message encapsulated in a dispatch + * mach message object to the specified mach channel. If an immediate send can + * be performed, returns its result via out parameters. + * + * The reply message is processed on the queue returned by the + * dmxh_msg_context_reply_queue function in the dispatch_mach_xpc_hooks_s + * structure, which is called with a single argument whose value is the + * do_ctxt field of the message argument to this function. The reply message is + * delivered to the dmxh_async_reply_handler hook function instead of being + * passed to the channel event handler. + * + * If the dmxh_msg_context_reply_queue function is not implemented or returns + * NULL, the reply message is delivered to the channel event handler on the + * channel queue. + * + * Unless the message is being sent to a send-once right (as determined by the + * presence of MACH_MSG_TYPE_MOVE_SEND_ONCE in the message header remote bits), + * the message header remote port is set to the channel send right before the + * send operation is performed. + * + * The message is required to expect a direct reply (as determined by the + * presence of MACH_MSG_TYPE_MAKE_SEND_ONCE in the message header local bits). + * The receive right specified in the message header local port will be + * monitored until a reply message (or a send-once notification) is received, or + * the channel is canceled. Hence the application must wait for the reply + * to be received or for a DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED message + * before releasing that receive right. + * + * If the message send operation is attempted but the channel is canceled + * before the send operation succesfully completes, the message returned to the + * channel handler with DISPATCH_MACH_MESSAGE_NOT_SENT may be the result of a + * pseudo-receive operation and the receive right originally specified in the + * message header local port will be returned in a + * DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED message. + * + * If an immediate send could be performed, returns the resulting reason + * (e.g. DISPATCH_MACH_MESSAGE_SENT) and possible error to the caller in the + * send_result and send_error out parameters (instead of via the channel + * handler), in which case the passed-in message and associated resources + * can be disposed of synchronously. + * + * If a deferred send is required, returns DISPATCH_MACH_NEEDS_DEFERRED_SEND + * in the send_result out parameter to indicate that the passed-in message has + * been retained and associated resources must not be disposed of until the + * message is returned asynchronusly via the channel handler. + * + * @param channel + * The mach channel to which to send the message. + * + * @param message + * The message object encapsulating the message to send. Unless an immediate + * send could be performed, the object will be retained until the asynchronous + * send operation is complete and the channel handler has returned. The storage + * underlying the message object may be modified by the send operation. + * + * @param options + * Additional send options to pass to mach_msg() when performing the send + * operation. + * + * @param send_flags + * Flags to configure the send operation. Must be 0 for now. + * + * @param send_result + * Out parameter to return the result of the immediate send attempt. + * If a deferred send is required, returns DISPATCH_MACH_NEEDS_DEFERRED_SEND. + * Must not be NULL. + * + * @param send_error + * Out parameter to return the error from the immediate send attempt. + * If a deferred send is required, returns 0. Must not be NULL. + */ +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NONNULL5 +DISPATCH_NONNULL6 DISPATCH_NOTHROW +void +dispatch_mach_send_with_result_and_async_reply_4libxpc(dispatch_mach_t channel, + dispatch_mach_msg_t message, mach_msg_option_t options, + dispatch_mach_send_flags_t send_flags, + dispatch_mach_reason_t *send_result, mach_error_t *send_error); + DISPATCH_ASSUME_NONNULL_END #endif // DISPATCH_MACH_SPI diff --git a/private/private.h b/private/private.h index 3c37bed0d..ed9f876cc 100644 --- a/private/private.h +++ b/private/private.h @@ -43,6 +43,9 @@ #include #endif #include +#if TARGET_OS_MAC +#include +#endif #ifndef __DISPATCH_BUILDING_DISPATCH__ #include @@ -66,7 +69,7 @@ #endif /* !__DISPATCH_BUILDING_DISPATCH__ */ // Check that public and private dispatch headers match -#if DISPATCH_API_VERSION != 20160712 // Keep in sync with +#if DISPATCH_API_VERSION != 20170124 // Keep in sync with #error "Dispatch header mismatch between /usr/include and /usr/local/include" #endif @@ -93,7 +96,7 @@ __BEGIN_DECLS * Boolean indicating whether the process has used libdispatch and become * multithreaded. */ -__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) +API_AVAILABLE(macos(10.8), ios(6.0)) DISPATCH_EXPORT DISPATCH_NOTHROW bool _dispatch_is_multithreaded(void); @@ -117,7 +120,7 @@ bool _dispatch_is_multithreaded(void); * Boolean indicating whether the parent process had used libdispatch and * become multithreaded at the time of fork. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT DISPATCH_NOTHROW bool _dispatch_is_fork_of_multithreaded_parent(void); @@ -144,8 +147,7 @@ bool _dispatch_is_fork_of_multithreaded_parent(void); * If the program already used dispatch before the guard is enabled, then * this function will abort immediately. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_EXPORT DISPATCH_NOTHROW void _dispatch_prohibit_transition_to_multithreaded(bool prohibit); @@ -187,31 +189,23 @@ typedef int dispatch_runloop_handle_t; #endif #if TARGET_OS_MAC -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_CONST DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_runloop_handle_t _dispatch_get_main_queue_port_4CF(void); #endif -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_EXPORT DISPATCH_NOTHROW dispatch_runloop_handle_t _dispatch_get_main_queue_handle_4CF(void); -#if TARGET_OS_MAC -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) -DISPATCH_EXPORT DISPATCH_NOTHROW -void -_dispatch_main_queue_callback_4CF(mach_msg_header_t *_Null_unspecified msg); -#else -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NOTHROW void _dispatch_main_queue_callback_4CF(void *_Null_unspecified msg); -#endif -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_t @@ -219,38 +213,53 @@ _dispatch_runloop_root_queue_create_4CF(const char *_Nullable label, unsigned long flags); #if TARGET_OS_MAC -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW mach_port_t _dispatch_runloop_root_queue_get_port_4CF(dispatch_queue_t queue); + +#ifdef __BLOCKS__ +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NOTHROW +dispatch_queue_t +_dispatch_network_root_queue_create_4NW(const char *_Nullable label, + const pthread_attr_t *_Nullable attrs, + dispatch_block_t _Nullable configure); +#endif #endif -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT DISPATCH_NOTHROW void _dispatch_runloop_root_queue_wakeup_4CF(dispatch_queue_t queue); -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW bool _dispatch_runloop_root_queue_perform_4CF(dispatch_queue_t queue); -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void _dispatch_source_set_runloop_timer_4CF(dispatch_source_t source, dispatch_time_t start, uint64_t interval, uint64_t leeway); -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT void *_Nonnull (*_Nullable _dispatch_begin_NSAutoReleasePool)(void); -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT void (*_Nullable _dispatch_end_NSAutoReleasePool)(void *); #endif /* DISPATCH_COCOA_COMPAT */ +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +DISPATCH_EXPORT DISPATCH_NOTHROW +void +_dispatch_poll_for_events_4launchd(void); + __END_DECLS DISPATCH_ASSUME_NONNULL_END diff --git a/private/queue_private.h b/private/queue_private.h index 33de371c8..2b50eb891 100644 --- a/private/queue_private.h +++ b/private/queue_private.h @@ -79,7 +79,7 @@ enum { * This new value combines the attributes specified by the 'attr' parameter and * the overcommit flag. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW dispatch_queue_attr_t dispatch_queue_attr_make_with_overcommit(dispatch_queue_attr_t _Nullable attr, @@ -98,6 +98,39 @@ dispatch_queue_attr_make_with_overcommit(dispatch_queue_attr_t _Nullable attr, */ #define DISPATCH_QUEUE_PRIORITY_NON_INTERACTIVE INT8_MIN +/*! + * @function dispatch_queue_set_label_nocopy + * + * @abstract + * Set the label for a given queue, without copying the input string. + * + * @discussion + * The queue must have been initially created with a NULL label, else using + * this function to set the queue label is undefined. + * + * The caller of this function must make sure the label pointer remains valid + * while it is used as the queue label and while any callers to + * dispatch_queue_get_label() may have obtained it. Since the queue lifetime + * may extend past the last release, it is advised to call this function with + * a constant string or NULL before the queue is released, or to destroy the + * label from a finalizer for that queue. + * + * This function should be called before any work item could call + * dispatch_queue_get_label(DISPATCH_CURRENT_QUEUE_LABEL) or from the context of + * the queue itself. + * + * @param queue + * The queue to adjust. Attempts to set the label of the main queue or a global + * concurrent queue will be ignored. + * + * @param label + * The new label for the queue. + */ +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW +void +dispatch_queue_set_label_nocopy(dispatch_queue_t queue, const char *label); + /*! * @function dispatch_queue_set_width * @@ -115,8 +148,8 @@ dispatch_queue_attr_make_with_overcommit(dispatch_queue_attr_t _Nullable attr, * with the desired concurrency width. * * @param queue - * The queue to adjust. Passing the main queue or a global concurrent queue - * will be ignored. + * The queue to adjust. Attempts to set the width of the main queue or a global + * concurrent queue will be ignored. * * @param width * The new maximum width of concurrency depending on available resources. @@ -128,8 +161,8 @@ dispatch_queue_attr_make_with_overcommit(dispatch_queue_attr_t _Nullable attr, #define DISPATCH_QUEUE_WIDTH_MAX_PHYSICAL_CPUS -2 #define DISPATCH_QUEUE_WIDTH_MAX_LOGICAL_CPUS -3 -__OSX_AVAILABLE_BUT_DEPRECATED_MSG(__MAC_10_6,__MAC_10_10,__IPHONE_4_0,__IPHONE_8_0, \ - "Use dispatch_queue_create(name, DISPATCH_QUEUE_CONCURRENT) instead") +API_DEPRECATED("Use dispatch_queue_create(name, DISPATCH_QUEUE_CONCURRENT)", + macos(10.6,10.10), ios(4.0,8.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_queue_set_width(dispatch_queue_t dq, long width); @@ -189,7 +222,7 @@ dispatch_queue_set_width(dispatch_queue_t dq, long width); * @result * The newly created dispatch pthread root queue. */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_6_0) +API_AVAILABLE(macos(10.9), ios(6.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_t @@ -238,19 +271,19 @@ dispatch_pthread_root_queue_flags_pool_size(uint8_t pool_size) * @result * A new reference to a pthread root queue object or NULL. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) -__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_EXPORT DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_t _Nullable dispatch_pthread_root_queue_copy_current(void); /*! * @constant DISPATCH_APPLY_CURRENT_ROOT_QUEUE - * @discussion Constant to pass to the dispatch_apply() and dispatch_apply_f() - * functions to indicate that the root queue for the current thread should be - * used (i.e. one of the global concurrent queues or a queue created with - * dispatch_pthread_root_queue_create()). If there is no such queue, the - * default priority global concurrent queue will be used. + * + * @discussion + * This constant is deprecated, please use DISPATCH_APPLY_AUTO. + * + * DISPATCH_APPLY_AUTO also selects the current pthread root queue if + * applicable. */ #define DISPATCH_APPLY_CURRENT_ROOT_QUEUE ((dispatch_queue_t _Nonnull)0) @@ -284,13 +317,28 @@ dispatch_pthread_root_queue_copy_current(void); * dispatch_async_f(). * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE_STARTING(__MAC_10_11,__IPHONE_9_0) +API_AVAILABLE(macos(10.11), ios(9.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_async_enforce_qos_class_f(dispatch_queue_t queue, void *_Nullable context, dispatch_function_t work); +#ifdef __ANDROID__ +/*! + * @function _dispatch_install_thread_detach_callback + * + * @param callback + * Function to be called before each worker thread exits to detach JVM. + * + * Hook to be able to detach threads from the Java JVM before they exit. + * If JNI has been used on a thread on Android it needs to have been + * "detached" before the thread exits or the application will crash. + */ +DISPATCH_EXPORT +void _dispatch_install_thread_detach_callback(dispatch_function_t cb); +#endif + __END_DECLS DISPATCH_ASSUME_NONNULL_END diff --git a/private/source_private.h b/private/source_private.h index bb1370238..ad22e6a6a 100644 --- a/private/source_private.h +++ b/private/source_private.h @@ -36,17 +36,6 @@ DISPATCH_ASSUME_NONNULL_BEGIN __BEGIN_DECLS -/*! - * @const DISPATCH_SOURCE_TYPE_TIMER_WITH_AGGREGATE - * @discussion A dispatch timer source that is part of a timer aggregate. - * The handle is the dispatch timer aggregate object. - * The mask specifies which flags from dispatch_source_timer_flags_t to apply. - */ -#define DISPATCH_SOURCE_TYPE_TIMER_WITH_AGGREGATE \ - (&_dispatch_source_type_timer_with_aggregate) -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) -DISPATCH_SOURCE_TYPE_DECL(timer_with_aggregate); - /*! * @const DISPATCH_SOURCE_TYPE_INTERVAL * @discussion A dispatch source that submits the event handler block at a @@ -69,7 +58,7 @@ DISPATCH_SOURCE_TYPE_DECL(timer_with_aggregate); * The mask specifies which flags from dispatch_source_timer_flags_t to apply. */ #define DISPATCH_SOURCE_TYPE_INTERVAL (&_dispatch_source_type_interval) -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_SOURCE_TYPE_DECL(interval); /*! @@ -79,8 +68,8 @@ DISPATCH_SOURCE_TYPE_DECL(interval); * The handle is a process identifier (pid_t). */ #define DISPATCH_SOURCE_TYPE_VFS (&_dispatch_source_type_vfs) -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE() -DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_vfs; +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_LINUX_UNAVAILABLE() +DISPATCH_SOURCE_TYPE_DECL(vfs); /*! * @const DISPATCH_SOURCE_TYPE_VM @@ -89,10 +78,9 @@ DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_vfs; * This type is deprecated, use DISPATCH_SOURCE_TYPE_MEMORYPRESSURE instead. */ #define DISPATCH_SOURCE_TYPE_VM (&_dispatch_source_type_vm) -__OSX_AVAILABLE_BUT_DEPRECATED_MSG(__MAC_10_7, __MAC_10_10, __IPHONE_4_3, - __IPHONE_8_0, "Use DISPATCH_SOURCE_TYPE_MEMORYPRESSURE instead") -DISPATCH_LINUX_UNAVAILABLE() -DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_vm; +API_DEPRECATED_WITH_REPLACEMENT("DISPATCH_SOURCE_TYPE_MEMORYPRESSURE", + macos(10.7,10.10), ios(4.3,8.0)) DISPATCH_LINUX_UNAVAILABLE() +DISPATCH_SOURCE_TYPE_DECL(vm); /*! * @const DISPATCH_SOURCE_TYPE_MEMORYSTATUS @@ -101,21 +89,26 @@ DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_vm; * dispatch_source_memorystatus_flags_t. */ #define DISPATCH_SOURCE_TYPE_MEMORYSTATUS (&_dispatch_source_type_memorystatus) -__OSX_DEPRECATED(10.9, 10.12, "Use DISPATCH_SOURCE_TYPE_MEMORYPRESSURE instead") -__IOS_DEPRECATED(6.0, 10.0, "Use DISPATCH_SOURCE_TYPE_MEMORYPRESSURE instead") -__TVOS_DEPRECATED(6.0, 10.0, "Use DISPATCH_SOURCE_TYPE_MEMORYPRESSURE instead") -__WATCHOS_DEPRECATED(1.0, 3.0, "Use DISPATCH_SOURCE_TYPE_MEMORYPRESSURE instead") +API_DEPRECATED_WITH_REPLACEMENT("DISPATCH_SOURCE_TYPE_MEMORYPRESSURE", + macos(10.9, 10.12), ios(6.0, 10.0), tvos(6.0, 10.0), watchos(1.0, 3.0)) DISPATCH_LINUX_UNAVAILABLE() -DISPATCH_EXPORT const struct dispatch_source_type_s - _dispatch_source_type_memorystatus; +DISPATCH_SOURCE_TYPE_DECL(memorystatus); /*! * @const DISPATCH_SOURCE_TYPE_SOCK * @discussion A dispatch source that monitors events on socket state changes. */ #define DISPATCH_SOURCE_TYPE_SOCK (&_dispatch_source_type_sock) -__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) DISPATCH_LINUX_UNAVAILABLE() -DISPATCH_EXPORT const struct dispatch_source_type_s _dispatch_source_type_sock; +API_AVAILABLE(macos(10.8), ios(6.0)) DISPATCH_LINUX_UNAVAILABLE() +DISPATCH_SOURCE_TYPE_DECL(sock); + +/*! + * @const DISPATCH_SOURCE_TYPE_NW_CHANNEL + * @discussion A dispatch source that monitors events on a network channel. + */ +#define DISPATCH_SOURCE_TYPE_NW_CHANNEL (&_dispatch_source_type_nw_channel) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_LINUX_UNAVAILABLE() +DISPATCH_SOURCE_TYPE_DECL(nw_channel); __END_DECLS @@ -179,6 +172,16 @@ enum { DISPATCH_SOCK_NOTIFY_ACK = 0x00004000, }; +/*! + * @enum dispatch_source_nw_channel_flags_t + * + * @constant DISPATCH_NW_CHANNEL_FLOW_ADV_UPDATE + * Received network channel flow advisory. + */ +enum { + DISPATCH_NW_CHANNEL_FLOW_ADV_UPDATE = 0x00000001, +}; + /*! * @enum dispatch_source_vfs_flags_t * @@ -214,6 +217,12 @@ enum { * * @constant DISPATCH_VFS_QUOTA * We hit a user quota (quotactl) for this filesystem. + * + * @constant DISPATCH_VFS_NEARLOWDISK + * Filesystem is nearly full (below NEARLOWDISK level). + * + * @constant DISPATCH_VFS_DESIREDDISK + * Filesystem has exceeded the DESIREDDISK level */ enum { DISPATCH_VFS_NOTRESP = 0x0001, @@ -227,6 +236,8 @@ enum { DISPATCH_VFS_UPDATE = 0x0100, DISPATCH_VFS_VERYLOWDISK = 0x0200, DISPATCH_VFS_QUOTA = 0x1000, + DISPATCH_VFS_NEARLOWDISK = 0x2000, + DISPATCH_VFS_DESIREDDISK = 0x4000, }; /*! @@ -269,10 +280,20 @@ enum { * @constant DISPATCH_PROC_REAP * The process has been reaped by the parent process via wait*(). * This flag is deprecated and will be removed in a future release. + * + * @constant DISPATCH_PROC_EXIT_STATUS + * The process has exited. Specifying this flag allows the process exit status + * to be retrieved from the source's status value, as returned by the + * dispatch_source_get_extended_data() function. The macros + * DISPATCH_PROC_EXIT_STATUS_EXITED(), DISPATCH_PROC_EXIT_STATUS_CODE(), + * DISPATCH_PROC_EXIT_STATUS_SIGNALED(), DISPATCH_PROC_EXIT_STATUS_TERMSIG() and + * DISPATCH_PROC_EXIT_STATUS_CORE_DUMPED() can be used to examine the status + * value. */ enum { - DISPATCH_PROC_REAP __OSX_AVAILABLE_BUT_DEPRECATED( - __MAC_10_6, __MAC_10_9, __IPHONE_4_0, __IPHONE_7_0) = 0x10000000, + DISPATCH_PROC_REAP DISPATCH_ENUM_API_DEPRECATED("unsupported flag", + macos(10.6,10.9), ios(4.0,7.0)) = 0x10000000, + DISPATCH_PROC_EXIT_STATUS DISPATCH_ENUM_API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(2.0)) = 0x04000000, }; /*! @@ -283,9 +304,8 @@ enum { */ enum { - DISPATCH_VM_PRESSURE __OSX_AVAILABLE_BUT_DEPRECATED_MSG( - __MAC_10_7, __MAC_10_10, __IPHONE_4_3, __IPHONE_8_0, - "Use DISPATCH_MEMORYPRESSURE_WARN instead") = 0x80000000, + DISPATCH_VM_PRESSURE DISPATCH_ENUM_API_DEPRECATED_WITH_REPLACEMENT("DISPATCH_MEMORYPRESSURE_WARN", macos(10.7, 10.10), ios(4.3, 8.0)) + = 0x80000000, }; /*! @@ -297,8 +317,7 @@ enum { * Restricted to the root user. */ enum { - DISPATCH_MEMORYPRESSURE_LOW_SWAP - __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x08, + DISPATCH_MEMORYPRESSURE_LOW_SWAP DISPATCH_ENUM_API_AVAILABLE(macos(10.10), ios(8.0)) = 0x08, }; /*! @@ -307,29 +326,17 @@ enum { */ enum { DISPATCH_MEMORYSTATUS_PRESSURE_NORMAL - __OSX_DEPRECATED(10.9, 10.12, "Use DISPATCH_MEMORYPRESSURE_NORMAL instead") - __IOS_DEPRECATED(6.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_NORMAL instead") - __TVOS_DEPRECATED(6.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_NORMAL instead") - __WATCHOS_DEPRECATED(1.0, 3.0, "Use DISPATCH_MEMORYPRESSURE_NORMAL instead") - = 0x01, + DISPATCH_ENUM_API_DEPRECATED_WITH_REPLACEMENT("DISPATCH_MEMORYPRESSURE_NORMAL", macos(10.9, 10.12), + ios(6.0, 10.0), tvos(6.0, 10.0), watchos(1.0, 3.0)) = 0x01, DISPATCH_MEMORYSTATUS_PRESSURE_WARN - __OSX_DEPRECATED(10.9, 10.12, "Use DISPATCH_MEMORYPRESSURE_WARN instead") - __IOS_DEPRECATED(6.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_WARN instead") - __TVOS_DEPRECATED(6.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_WARN instead") - __WATCHOS_DEPRECATED(1.0, 3.0, "Use DISPATCH_MEMORYPRESSURE_WARN instead") - = 0x02, + DISPATCH_ENUM_API_DEPRECATED_WITH_REPLACEMENT("DISPATCH_MEMORYPRESSURE_WARN", macos(10.9, 10.12), + ios(6.0, 10.0), tvos(6.0, 10.0), watchos(1.0, 3.0)) = 0x02, DISPATCH_MEMORYSTATUS_PRESSURE_CRITICAL - __OSX_DEPRECATED(10.9, 10.12, "Use DISPATCH_MEMORYPRESSURE_CRITICAL instead") - __IOS_DEPRECATED(8.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_CRITICAL instead") - __TVOS_DEPRECATED(8.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_CRITICAL instead") - __WATCHOS_DEPRECATED(1.0, 3.0, "Use DISPATCH_MEMORYPRESSURE_CRITICAL instead") - = 0x04, + DISPATCH_ENUM_API_DEPRECATED_WITH_REPLACEMENT("DISPATCH_MEMORYPRESSURE_CRITICAL", macos(10.9, 10.12), + ios(6.0, 10.0), tvos(6.0, 10.0), watchos(1.0, 3.0)) = 0x04, DISPATCH_MEMORYSTATUS_LOW_SWAP - __OSX_DEPRECATED(10.10, 10.12, "Use DISPATCH_MEMORYPRESSURE_LOW_SWAP instead") - __IOS_DEPRECATED(8.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_LOW_SWAP instead") - __TVOS_DEPRECATED(8.0, 10.0, "Use DISPATCH_MEMORYPRESSURE_LOW_SWAP instead") - __WATCHOS_DEPRECATED(1.0, 3.0, "Use DISPATCH_MEMORYPRESSURE_LOW_SWAP instead") - = 0x08, + DISPATCH_ENUM_API_DEPRECATED_WITH_REPLACEMENT("DISPATCH_MEMORYPRESSURE_LOW_SWAP", macos(10.9, 10.12), + ios(6.0, 10.0), tvos(6.0, 10.0), watchos(1.0, 3.0)) = 0x08, }; /*! @@ -341,20 +348,116 @@ enum { * * @constant DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL * The memory of the process has reached 100% of its high watermark limit. + * + * @constant DISPATCH_MEMORYPRESSURE_MSL_STATUS + * Mask for enabling/disabling malloc stack logging. */ enum { - DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN - __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.10) - __TVOS_AVAILABLE(10.10) __WATCHOS_AVAILABLE(3.0) = 0x10, + DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN DISPATCH_ENUM_API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) = 0x10, - DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL - __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.10) - __TVOS_AVAILABLE(10.10) __WATCHOS_AVAILABLE(3.0) = 0x20, + DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL DISPATCH_ENUM_API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) = 0x20, + + DISPATCH_MEMORYPRESSURE_MSL_STATUS DISPATCH_ENUM_API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) = 0xf0000000, }; +/*! + * Macros to check the exit status obtained from the status field of the + * structure returned by the dispatch_source_get_extended_data() function for a + * source of type DISPATCH_SOURCE_TYPE_PROC when DISPATCH_PROC_EXIT_STATUS has + * been requested. + * + * DISPATCH_PROC_EXIT_STATUS_EXITED returns whether the process exited. If this + * is true, the exit status can be obtained from DISPATCH_PROC_EXIT_STATUS_CODE. + * + * DISPATCH_PROC_EXIT_STATUS_SIGNALED returns whether the process was terminated + * by a signal. + * + * DISPATCH_PROC_EXIT_STATUS_TERMSIG returns the signal that caused the process + * to terminate, or 0 if the process was not terminated by a signal. + * + * DISPATCH_PROC_EXIT_STATUS_CORE_DUMPED returns whether a core dump of the + * process was created. + */ +#define DISPATCH_PROC_EXIT_STATUS_EXITED(status) ((bool)WIFEXITED(status)) +#define DISPATCH_PROC_EXIT_STATUS_CODE(status) ((int)WEXITSTATUS(status)) +#define DISPATCH_PROC_EXIT_STATUS_SIGNALED(status) ((bool)WIFSIGNALED(status)) +#define DISPATCH_PROC_EXIT_STATUS_TERMSIG(status) ((int)WTERMSIG(status)) +#define DISPATCH_PROC_EXIT_STATUS_CORE_DUMPED(status) ((bool)WCOREDUMP(status)) __BEGIN_DECLS +/*! + * @function dispatch_source_set_mandatory_cancel_handler + * + * @abstract + * Sets the event handler block for the given dispatch source, and indicates + * that calling dispatch_source_cancel() is mandatory for this source object. + * + * @discussion + * The cancellation handler (if specified) will be submitted to the source's + * target queue in response to a call to dispatch_source_cancel() once the + * system has released all references to the source's underlying handle and + * the source's event handler block has returned. + * + * When this function has been used used to set a cancellation handler, then + * the following result in an assertion and the process being terminated: + * - releasing the last reference on the dispatch source without having + * cancelled it by calling dispatch_source_cancel(); + * - changing any handler after the source has been activated; + * - changing the target queue of the source after it has been activated. + * + * IMPORTANT: + * Source cancellation and a cancellation handler are required for file + * descriptor and mach port based sources in order to safely close the + * descriptor or destroy the port. Making the cancellation handler of such + * sources mandatory is strongly recommended. + * Closing the descriptor or port before the cancellation handler is invoked may + * result in a race condition. If a new descriptor is allocated with the same + * value as the recently closed descriptor while the source's event handler is + * still running, the event handler may read/write data to the wrong descriptor. + * + * @param source + * The dispatch source to modify. + * The result of passing NULL in this parameter is undefined. + * + * @param handler + * The cancellation handler block to submit to the source's target queue. + * The result of passing NULL in this parameter is undefined. + */ +#ifdef __BLOCKS__ +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_source_set_mandatory_cancel_handler(dispatch_source_t source, + dispatch_block_t handler); +#endif /* __BLOCKS__ */ + +/*! + * @function dispatch_source_set_mandatory_cancel_handler_f + * + * @abstract + * Sets the event handler function for the given dispatch source, and causes an + * assertion if this source is released before having been explicitly canceled. + * + * @discussion + * See dispatch_source_set_mandatory_cancel_handler() for more details. + * + * @param source + * The dispatch source to modify. + * The result of passing NULL in this parameter is undefined. + * + * @param handler + * The cancellation handler function to submit to the source's target queue. + * The context parameter passed to the event handler function is the current + * context of the dispatch source at the time the handler call is made. + * The result of passing NULL in this parameter is undefined. + */ +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_source_set_mandatory_cancel_handler_f(dispatch_source_t source, + dispatch_function_t handler); + /*! * @function dispatch_source_cancel_and_wait * @@ -400,64 +503,11 @@ __BEGIN_DECLS * The dispatch source to be canceled. * The result of passing NULL in this parameter is undefined. */ -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.10) -__TVOS_AVAILABLE(10.10) __WATCHOS_AVAILABLE(3.0) +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_source_cancel_and_wait(dispatch_source_t source); -/*! - * @typedef dispatch_timer_aggregate_t - * - * @abstract - * Dispatch timer aggregates are sets of related timers. - */ -DISPATCH_DECL(dispatch_timer_aggregate); - -/*! - * @function dispatch_timer_aggregate_create - * - * @abstract - * Creates a new dispatch timer aggregate. - * - * @discussion - * A dispatch timer aggregate is a set of related timers whose overall timing - * parameters can be queried. - * - * Timers are added to an aggregate when a timer source is created with type - * DISPATCH_SOURCE_TYPE_TIMER_WITH_AGGREGATE. - * - * @result - * The newly created dispatch timer aggregate. - */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) -DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT -DISPATCH_NOTHROW -dispatch_timer_aggregate_t -dispatch_timer_aggregate_create(void); - -/*! - * @function dispatch_timer_aggregate_get_delay - * - * @abstract - * Retrieves the delay until a timer in the given aggregate will next fire. - * - * @param aggregate - * The dispatch timer aggregate to query. - * - * @param leeway_ptr - * Optional pointer to a variable filled with the leeway (in ns) that will be - * applied to the return value. May be NULL. - * - * @result - * Delay in ns from now. - */ -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) -DISPATCH_EXPORT DISPATCH_NOTHROW -uint64_t -dispatch_timer_aggregate_get_delay(dispatch_timer_aggregate_t aggregate, - uint64_t *_Nullable leeway_ptr); - #if __has_include() /*! * @typedef dispatch_mig_callback_t @@ -468,7 +518,7 @@ dispatch_timer_aggregate_get_delay(dispatch_timer_aggregate_t aggregate, typedef boolean_t (*dispatch_mig_callback_t)(mach_msg_header_t *message, mach_msg_header_t *reply); -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE() +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW mach_msg_return_t dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, @@ -480,13 +530,66 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, * @abstract * Extract the context pointer from a mach message trailer. */ -__OSX_AVAILABLE_STARTING(__MAC_10_6,__IPHONE_4_0) DISPATCH_LINUX_UNAVAILABLE() +API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void *_Nullable dispatch_mach_msg_get_context(mach_msg_header_t *msg); #endif +/*! + * @typedef dispatch_source_extended_data_t + * + * @abstract + * Type used by dispatch_source_get_extended_data() to return a consistent + * snapshot of the data and status of a dispatch source. + */ +typedef struct dispatch_source_extended_data_s { + unsigned long data; + unsigned long status; +} *dispatch_source_extended_data_t; + +/*! + * @function dispatch_source_get_extended_data + * + * @abstract + * Returns the current data and status values for a dispatch source. + * + * @discussion + * This function is intended to be called from within the event handler block. + * The result of calling this function outside of the event handler callback is + * undefined. + * + * @param source + * The result of passing NULL in this parameter is undefined. + * + * @param data + * A pointer to a dispatch_source_extended_data_s in which the data and status + * will be returned. The data field is populated with the value that would be + * returned by dispatch_source_get_data(). The value of the status field should + * be interpreted according to the type of the dispatch source: + * + * DISPATCH_SOURCE_TYPE_PROC: dispatch_source_proc_exit_flags_t + * + * If called from the event handler of a data source type not listed above, the + * status value is undefined. + * + * @param size + * The size of the specified structure. Should be set to + * sizeof(dispatch_source_extended_data_s). + * + * @result + * The size of the structure returned in *data, which will never be greater than + * the value of the size argument. If this is less than the value of the size + * argument, the remaining space in data will have been populated with zeroes. + */ +API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE +DISPATCH_NOTHROW +size_t +dispatch_source_get_extended_data(dispatch_source_t source, + dispatch_source_extended_data_t data, size_t size); + __END_DECLS DISPATCH_ASSUME_NONNULL_END diff --git a/src/BlocksRuntime/Block.h b/src/BlocksRuntime/Block.h new file mode 100644 index 000000000..15c724226 --- /dev/null +++ b/src/BlocksRuntime/Block.h @@ -0,0 +1,54 @@ +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See http://swift.org/LICENSE.txt for license information +// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// + + +#ifndef _Block_H_ +#define _Block_H_ + +#if !defined(BLOCK_EXPORT) +# if defined(__cplusplus) +# define BLOCK_EXPORT extern "C" __attribute__((visibility("default"))) +# else +# define BLOCK_EXPORT extern __attribute__((visibility("default"))) +# endif +#endif + +#if __cplusplus +extern "C" { +#endif + +// Create a heap based copy of a Block or simply add a reference to an existing one. +// This must be paired with Block_release to recover memory, even when running +// under Objective-C Garbage Collection. +BLOCK_EXPORT void *_Block_copy(const void *aBlock); + +// Lose the reference, and if heap based and last reference, recover the memory +BLOCK_EXPORT void _Block_release(const void *aBlock); + +// Used by the compiler. Do not call this function yourself. +BLOCK_EXPORT void _Block_object_assign(void *, const void *, const int); + +// Used by the compiler. Do not call this function yourself. +BLOCK_EXPORT void _Block_object_dispose(const void *, const int); + +// Used by the compiler. Do not use these variables yourself. +BLOCK_EXPORT void * _NSConcreteGlobalBlock[32]; +BLOCK_EXPORT void * _NSConcreteStackBlock[32]; + +#if __cplusplus +} +#endif + +// Type correct macros + +#define Block_copy(...) ((__typeof(__VA_ARGS__))_Block_copy((const void *)(__VA_ARGS__))) +#define Block_release(...) _Block_release((const void *)(__VA_ARGS__)) + + +#endif diff --git a/src/BlocksRuntime/Block_private.h b/src/BlocksRuntime/Block_private.h new file mode 100644 index 000000000..deeb19a0a --- /dev/null +++ b/src/BlocksRuntime/Block_private.h @@ -0,0 +1,264 @@ +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See http://swift.org/LICENSE.txt for license information +// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// + + +#ifndef _BLOCK_PRIVATE_H_ +#define _BLOCK_PRIVATE_H_ + +#include +#include +#include + +#include "Block.h" + +#if __cplusplus +extern "C" { +#endif + + +// Values for Block_layout->flags to describe block objects +enum { + BLOCK_DEALLOCATING = (0x0001), // runtime + BLOCK_REFCOUNT_MASK = (0xfffe), // runtime + BLOCK_NEEDS_FREE = (1 << 24), // runtime + BLOCK_HAS_COPY_DISPOSE = (1 << 25), // compiler + BLOCK_HAS_CTOR = (1 << 26), // compiler: helpers have C++ code + BLOCK_IS_GC = (1 << 27), // runtime + BLOCK_IS_GLOBAL = (1 << 28), // compiler + BLOCK_USE_STRET = (1 << 29), // compiler: undefined if !BLOCK_HAS_SIGNATURE + BLOCK_HAS_SIGNATURE = (1 << 30), // compiler + BLOCK_HAS_EXTENDED_LAYOUT=(1 << 31) // compiler +}; + +#define BLOCK_DESCRIPTOR_1 1 +struct Block_descriptor_1 { + uintptr_t reserved; + uintptr_t size; +}; + +#define BLOCK_DESCRIPTOR_2 1 +struct Block_descriptor_2 { + // requires BLOCK_HAS_COPY_DISPOSE + void (*copy)(void *dst, const void *src); + void (*dispose)(const void *); +}; + +#define BLOCK_DESCRIPTOR_3 1 +struct Block_descriptor_3 { + // requires BLOCK_HAS_SIGNATURE + const char *signature; + const char *layout; // contents depend on BLOCK_HAS_EXTENDED_LAYOUT +}; + +struct Block_layout { + void *isa; + volatile int32_t flags; // contains ref count + int32_t reserved; + void (*invoke)(void *, ...); + struct Block_descriptor_1 *descriptor; + // imported variables +}; + + +// Values for Block_byref->flags to describe __block variables +enum { + // Byref refcount must use the same bits as Block_layout's refcount. + // BLOCK_DEALLOCATING = (0x0001), // runtime + // BLOCK_REFCOUNT_MASK = (0xfffe), // runtime + + BLOCK_BYREF_LAYOUT_MASK = (0xf << 28), // compiler + BLOCK_BYREF_LAYOUT_EXTENDED = ( 1 << 28), // compiler + BLOCK_BYREF_LAYOUT_NON_OBJECT = ( 2 << 28), // compiler + BLOCK_BYREF_LAYOUT_STRONG = ( 3 << 28), // compiler + BLOCK_BYREF_LAYOUT_WEAK = ( 4 << 28), // compiler + BLOCK_BYREF_LAYOUT_UNRETAINED = ( 5 << 28), // compiler + + BLOCK_BYREF_IS_GC = ( 1 << 27), // runtime + + BLOCK_BYREF_HAS_COPY_DISPOSE = ( 1 << 25), // compiler + BLOCK_BYREF_NEEDS_FREE = ( 1 << 24), // runtime +}; + +struct Block_byref { + void *isa; + struct Block_byref *forwarding; + volatile int32_t flags; // contains ref count + uint32_t size; +}; + +struct Block_byref_2 { + // requires BLOCK_BYREF_HAS_COPY_DISPOSE + void (*byref_keep)(struct Block_byref *dst, struct Block_byref *src); + void (*byref_destroy)(struct Block_byref *); +}; + +struct Block_byref_3 { + // requires BLOCK_BYREF_LAYOUT_EXTENDED + const char *layout; +}; + + +// Extended layout encoding. + +// Values for Block_descriptor_3->layout with BLOCK_HAS_EXTENDED_LAYOUT +// and for Block_byref_3->layout with BLOCK_BYREF_LAYOUT_EXTENDED + +// If the layout field is less than 0x1000, then it is a compact encoding +// of the form 0xXYZ: X strong pointers, then Y byref pointers, +// then Z weak pointers. + +// If the layout field is 0x1000 or greater, it points to a +// string of layout bytes. Each byte is of the form 0xPN. +// Operator P is from the list below. Value N is a parameter for the operator. +// Byte 0x00 terminates the layout; remaining block data is non-pointer bytes. + +enum { + BLOCK_LAYOUT_ESCAPE = 0, // N=0 halt, rest is non-pointer. N!=0 reserved. + BLOCK_LAYOUT_NON_OBJECT_BYTES = 1, // N bytes non-objects + BLOCK_LAYOUT_NON_OBJECT_WORDS = 2, // N words non-objects + BLOCK_LAYOUT_STRONG = 3, // N words strong pointers + BLOCK_LAYOUT_BYREF = 4, // N words byref pointers + BLOCK_LAYOUT_WEAK = 5, // N words weak pointers + BLOCK_LAYOUT_UNRETAINED = 6, // N words unretained pointers + BLOCK_LAYOUT_UNKNOWN_WORDS_7 = 7, // N words, reserved + BLOCK_LAYOUT_UNKNOWN_WORDS_8 = 8, // N words, reserved + BLOCK_LAYOUT_UNKNOWN_WORDS_9 = 9, // N words, reserved + BLOCK_LAYOUT_UNKNOWN_WORDS_A = 0xA, // N words, reserved + BLOCK_LAYOUT_UNUSED_B = 0xB, // unspecified, reserved + BLOCK_LAYOUT_UNUSED_C = 0xC, // unspecified, reserved + BLOCK_LAYOUT_UNUSED_D = 0xD, // unspecified, reserved + BLOCK_LAYOUT_UNUSED_E = 0xE, // unspecified, reserved + BLOCK_LAYOUT_UNUSED_F = 0xF, // unspecified, reserved +}; + + +// Runtime support functions used by compiler when generating copy/dispose helpers + +// Values for _Block_object_assign() and _Block_object_dispose() parameters +enum { + // see function implementation for a more complete description of these fields and combinations + BLOCK_FIELD_IS_OBJECT = 3, // id, NSObject, __attribute__((NSObject)), block, ... + BLOCK_FIELD_IS_BLOCK = 7, // a block variable + BLOCK_FIELD_IS_BYREF = 8, // the on stack structure holding the __block variable + BLOCK_FIELD_IS_WEAK = 16, // declared __weak, only used in byref copy helpers + BLOCK_BYREF_CALLER = 128, // called from __block (byref) copy/dispose support routines. +}; + +enum { + BLOCK_ALL_COPY_DISPOSE_FLAGS = + BLOCK_FIELD_IS_OBJECT | BLOCK_FIELD_IS_BLOCK | BLOCK_FIELD_IS_BYREF | + BLOCK_FIELD_IS_WEAK | BLOCK_BYREF_CALLER +}; + +// Runtime entry point called by compiler when assigning objects inside copy helper routines +BLOCK_EXPORT void _Block_object_assign(void *destAddr, const void *object, const int flags); + // BLOCK_FIELD_IS_BYREF is only used from within block copy helpers + + +// runtime entry point called by the compiler when disposing of objects inside dispose helper routine +BLOCK_EXPORT void _Block_object_dispose(const void *object, const int flags); + + +// Other support functions + +// runtime entry to get total size of a closure +BLOCK_EXPORT size_t Block_size(void *aBlock); + +// indicates whether block was compiled with compiler that sets the ABI related metadata bits +BLOCK_EXPORT bool _Block_has_signature(void *aBlock); + +// returns TRUE if return value of block is on the stack, FALSE otherwise +BLOCK_EXPORT bool _Block_use_stret(void *aBlock); + +// Returns a string describing the block's parameter and return types. +// The encoding scheme is the same as Objective-C @encode. +// Returns NULL for blocks compiled with some compilers. +BLOCK_EXPORT const char * _Block_signature(void *aBlock); + +// Returns a string describing the block's GC layout. +// This uses the GC skip/scan encoding. +// May return NULL. +BLOCK_EXPORT const char * _Block_layout(void *aBlock); + +// Returns a string describing the block's layout. +// This uses the "extended layout" form described above. +// May return NULL. +BLOCK_EXPORT const char * _Block_extended_layout(void *aBlock); + +// Callable only from the ARR weak subsystem while in exclusion zone +BLOCK_EXPORT bool _Block_tryRetain(const void *aBlock); + +// Callable only from the ARR weak subsystem while in exclusion zone +BLOCK_EXPORT bool _Block_isDeallocating(const void *aBlock); + + +// the raw data space for runtime classes for blocks +// class+meta used for stack, malloc, and collectable based blocks +BLOCK_EXPORT void * _NSConcreteMallocBlock[32]; +BLOCK_EXPORT void * _NSConcreteAutoBlock[32]; +BLOCK_EXPORT void * _NSConcreteFinalizingBlock[32]; +BLOCK_EXPORT void * _NSConcreteWeakBlockVariable[32]; +// declared in Block.h +// BLOCK_EXPORT void * _NSConcreteGlobalBlock[32]; +// BLOCK_EXPORT void * _NSConcreteStackBlock[32]; + + +// the intercept routines that must be used under GC +BLOCK_EXPORT void _Block_use_GC( void *(*alloc)(const unsigned long, const bool isOne, const bool isObject), + void (*setHasRefcount)(const void *, const bool), + void (*gc_assign_strong)(void *, void **), + void (*gc_assign_weak)(const void *, void *), + void (*gc_memmove)(void *, void *, unsigned long)); + +// earlier version, now simply transitional +BLOCK_EXPORT void _Block_use_GC5( void *(*alloc)(const unsigned long, const bool isOne, const bool isObject), + void (*setHasRefcount)(const void *, const bool), + void (*gc_assign_strong)(void *, void **), + void (*gc_assign_weak)(const void *, void *)); + +BLOCK_EXPORT void _Block_use_RR( void (*retain)(const void *), + void (*release)(const void *)); + +struct Block_callbacks_RR { + size_t size; // size == sizeof(struct Block_callbacks_RR) + void (*retain)(const void *); + void (*release)(const void *); + void (*destructInstance)(const void *); +}; +typedef struct Block_callbacks_RR Block_callbacks_RR; + +BLOCK_EXPORT void _Block_use_RR2(const Block_callbacks_RR *callbacks); + +// make a collectable GC heap based Block. Not useful under non-GC. +BLOCK_EXPORT void *_Block_copy_collectable(const void *aBlock); + +// thread-unsafe diagnostic +BLOCK_EXPORT const char *_Block_dump(const void *block); + + +// Obsolete + +// first layout +struct Block_basic { + void *isa; + int Block_flags; // int32_t + int Block_size; // XXX should be packed into Block_flags + void (*Block_invoke)(void *); + void (*Block_copy)(void *dst, void *src); // iff BLOCK_HAS_COPY_DISPOSE + void (*Block_dispose)(void *); // iff BLOCK_HAS_COPY_DISPOSE + //long params[0]; // where const imports, __block storage references, etc. get laid down +} __attribute__((deprecated)); + + +#if __cplusplus +} +#endif + + +#endif diff --git a/src/BlocksRuntime/data.c b/src/BlocksRuntime/data.c new file mode 100644 index 000000000..dd36051d9 --- /dev/null +++ b/src/BlocksRuntime/data.c @@ -0,0 +1,24 @@ +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See http://swift.org/LICENSE.txt for license information +// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// + +/******************** +NSBlock support + +We allocate space and export a symbol to be used as the Class for the on-stack and malloc'ed copies until ObjC arrives on the scene. These data areas are set up by Foundation to link in as real classes post facto. + +We keep these in a separate file so that we can include the runtime code in test subprojects but not include the data so that compiled code that sees the data in libSystem doesn't get confused by a second copy. Somehow these don't get unified in a common block. +**********************/ +#define BLOCK_EXPORT __attribute__((visibility("default"))) + +BLOCK_EXPORT void * _NSConcreteStackBlock[32] = { 0 }; +BLOCK_EXPORT void * _NSConcreteMallocBlock[32] = { 0 }; +BLOCK_EXPORT void * _NSConcreteAutoBlock[32] = { 0 }; +BLOCK_EXPORT void * _NSConcreteFinalizingBlock[32] = { 0 }; +BLOCK_EXPORT void * _NSConcreteGlobalBlock[32] = { 0 }; +BLOCK_EXPORT void * _NSConcreteWeakBlockVariable[32] = { 0 }; diff --git a/src/BlocksRuntime/runtime.c b/src/BlocksRuntime/runtime.c new file mode 100644 index 000000000..8c98e8d1e --- /dev/null +++ b/src/BlocksRuntime/runtime.c @@ -0,0 +1,765 @@ +// This source file is part of the Swift.org open source project +// +// Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors +// Licensed under Apache License v2.0 with Runtime Library Exception +// +// See http://swift.org/LICENSE.txt for license information +// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// + +#include "Block_private.h" +#include +#include +#include +#include +#if HAVE_OBJC +#define __USE_GNU +#include +#endif +#if __has_include() +#include +#else +#include +#endif +#ifndef os_assumes +#define os_assumes(_x) _x +#endif +#ifndef os_assert +#define os_assert(_x) assert(_x) +#endif + +#if TARGET_OS_WIN32 +#define _CRT_SECURE_NO_WARNINGS 1 +#include +static __inline bool OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst) +{ + // fixme barrier is overkill -- see objc-os.h + long original = InterlockedCompareExchange(dst, newl, oldl); + return (original == oldl); +} + +static __inline bool OSAtomicCompareAndSwapInt(int oldi, int newi, int volatile *dst) +{ + // fixme barrier is overkill -- see objc-os.h + int original = InterlockedCompareExchange(dst, newi, oldi); + return (original == oldi); +} +#else +#define OSAtomicCompareAndSwapLong(_Old, _New, _Ptr) __sync_bool_compare_and_swap(_Ptr, _Old, _New) +#define OSAtomicCompareAndSwapInt(_Old, _New, _Ptr) __sync_bool_compare_and_swap(_Ptr, _Old, _New) +#endif + +/*********************** +Globals +************************/ + +#if HAVE_OBJC +static void *_Block_copy_class = _NSConcreteMallocBlock; +static void *_Block_copy_finalizing_class = _NSConcreteMallocBlock; +static int _Block_copy_flag = BLOCK_NEEDS_FREE; +#endif +static int _Byref_flag_initial_value = BLOCK_BYREF_NEEDS_FREE | 4; // logical 2 + +static bool isGC = false; + +/******************************************************************************* +Internal Utilities +********************************************************************************/ + + +static int32_t latching_incr_int(volatile int32_t *where) { + while (1) { + int32_t old_value = *where; + if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) { + return BLOCK_REFCOUNT_MASK; + } + if (OSAtomicCompareAndSwapInt(old_value, old_value+2, where)) { + return old_value+2; + } + } +} + +static bool latching_incr_int_not_deallocating(volatile int32_t *where) { + while (1) { + int32_t old_value = *where; + if (old_value & BLOCK_DEALLOCATING) { + // if deallocating we can't do this + return false; + } + if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) { + // if latched, we're leaking this block, and we succeed + return true; + } + if (OSAtomicCompareAndSwapInt(old_value, old_value+2, where)) { + // otherwise, we must store a new retained value without the deallocating bit set + return true; + } + } +} + + +// return should_deallocate? +static bool latching_decr_int_should_deallocate(volatile int32_t *where) { + while (1) { + int32_t old_value = *where; + if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) { + return false; // latched high + } + if ((old_value & BLOCK_REFCOUNT_MASK) == 0) { + return false; // underflow, latch low + } + int32_t new_value = old_value - 2; + bool result = false; + if ((old_value & (BLOCK_REFCOUNT_MASK|BLOCK_DEALLOCATING)) == 2) { + new_value = old_value - 1; + result = true; + } + if (OSAtomicCompareAndSwapInt(old_value, new_value, where)) { + return result; + } + } +} + +// hit zero? +static bool latching_decr_int_now_zero(volatile int32_t *where) { + while (1) { + int32_t old_value = *where; + if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) { + return false; // latched high + } + if ((old_value & BLOCK_REFCOUNT_MASK) == 0) { + return false; // underflow, latch low + } + int32_t new_value = old_value - 2; + if (OSAtomicCompareAndSwapInt(old_value, new_value, where)) { + return (new_value & BLOCK_REFCOUNT_MASK) == 0; + } + } +} + + +/*********************** +GC support stub routines +************************/ +#if !TARGET_OS_WIN32 +#pragma mark GC Support Routines +#endif + + + +static void *_Block_alloc_default(const unsigned long size, const bool initialCountIsOne, const bool isObject) { + (void)initialCountIsOne; + (void)isObject; + return malloc(size); +} + +static void _Block_assign_default(void *value, void **destptr) { + *destptr = value; +} + +static void _Block_setHasRefcount_default(const void *ptr, const bool hasRefcount) { + (void)ptr; + (void)hasRefcount; +} + +#if HAVE_OBJC +static void _Block_do_nothing(const void *aBlock) { } +#endif + +static void _Block_retain_object_default(const void *ptr) { + (void)ptr; +} + +static void _Block_release_object_default(const void *ptr) { + (void)ptr; +} + +static void _Block_assign_weak_default(const void *ptr, void *dest) { +#if !TARGET_OS_WIN32 + *(long *)dest = (long)ptr; +#else + *(void **)dest = (void *)ptr; +#endif +} + +static void _Block_memmove_default(void *dst, void *src, unsigned long size) { + memmove(dst, src, (size_t)size); +} + +#if HAVE_OBJC +static void _Block_memmove_gc_broken(void *dest, void *src, unsigned long size) { + void **destp = (void **)dest; + void **srcp = (void **)src; + while (size) { + _Block_assign_default(*srcp, destp); + destp++; + srcp++; + size -= sizeof(void *); + } +} +#endif + +static void _Block_destructInstance_default(const void *aBlock) { + (void)aBlock; +} + +/************************************************************************** +GC support callout functions - initially set to stub routines +***************************************************************************/ + +static void *(*_Block_allocator)(const unsigned long, const bool isOne, const bool isObject) = _Block_alloc_default; +static void (*_Block_deallocator)(const void *) = (void (*)(const void *))free; +static void (*_Block_assign)(void *value, void **destptr) = _Block_assign_default; +static void (*_Block_setHasRefcount)(const void *ptr, const bool hasRefcount) = _Block_setHasRefcount_default; +static void (*_Block_retain_object)(const void *ptr) = _Block_retain_object_default; +static void (*_Block_release_object)(const void *ptr) = _Block_release_object_default; +static void (*_Block_assign_weak)(const void *dest, void *ptr) = _Block_assign_weak_default; +static void (*_Block_memmove)(void *dest, void *src, unsigned long size) = _Block_memmove_default; +static void (*_Block_destructInstance) (const void *aBlock) = _Block_destructInstance_default; + + +#if HAVE_OBJC +/************************************************************************** +GC support SPI functions - called from ObjC runtime and CoreFoundation +***************************************************************************/ + +// Public SPI +// Called from objc-auto to turn on GC. +// version 3, 4 arg, but changed 1st arg +void _Block_use_GC( void *(*alloc)(const unsigned long, const bool isOne, const bool isObject), + void (*setHasRefcount)(const void *, const bool), + void (*gc_assign)(void *, void **), + void (*gc_assign_weak)(const void *, void *), + void (*gc_memmove)(void *, void *, unsigned long)) { + + isGC = true; + _Block_allocator = alloc; + _Block_deallocator = _Block_do_nothing; + _Block_assign = gc_assign; + _Block_copy_flag = BLOCK_IS_GC; + _Block_copy_class = _NSConcreteAutoBlock; + // blocks with ctors & dtors need to have the dtor run from a class with a finalizer + _Block_copy_finalizing_class = _NSConcreteFinalizingBlock; + _Block_setHasRefcount = setHasRefcount; + _Byref_flag_initial_value = BLOCK_BYREF_IS_GC; // no refcount + _Block_retain_object = _Block_do_nothing; + _Block_release_object = _Block_do_nothing; + _Block_assign_weak = gc_assign_weak; + _Block_memmove = gc_memmove; +} + +// transitional +void _Block_use_GC5( void *(*alloc)(const unsigned long, const bool isOne, const bool isObject), + void (*setHasRefcount)(const void *, const bool), + void (*gc_assign)(void *, void **), + void (*gc_assign_weak)(const void *, void *)) { + // until objc calls _Block_use_GC it will call us; supply a broken internal memmove implementation until then + _Block_use_GC(alloc, setHasRefcount, gc_assign, gc_assign_weak, _Block_memmove_gc_broken); +} + + +// Called from objc-auto to alternatively turn on retain/release. +// Prior to this the only "object" support we can provide is for those +// super special objects that live in libSystem, namely dispatch queues. +// Blocks and Block_byrefs have their own special entry points. +BLOCK_EXPORT +void _Block_use_RR( void (*retain)(const void *), + void (*release)(const void *)) { + _Block_retain_object = retain; + _Block_release_object = release; + _Block_destructInstance = dlsym(RTLD_DEFAULT, "objc_destructInstance"); +} +#endif // HAVE_OBJC + +// Called from CF to indicate MRR. Newer version uses a versioned structure, so we can add more functions +// without defining a new entry point. +BLOCK_EXPORT +void _Block_use_RR2(const Block_callbacks_RR *callbacks) { + _Block_retain_object = callbacks->retain; + _Block_release_object = callbacks->release; + _Block_destructInstance = callbacks->destructInstance; +} + +/**************************************************************************** +Accessors for block descriptor fields +*****************************************************************************/ +#if 0 +static struct Block_descriptor_1 * _Block_descriptor_1(struct Block_layout *aBlock) +{ + return aBlock->descriptor; +} +#endif + +static struct Block_descriptor_2 * _Block_descriptor_2(struct Block_layout *aBlock) +{ + if (! (aBlock->flags & BLOCK_HAS_COPY_DISPOSE)) return NULL; + uint8_t *desc = (uint8_t *)aBlock->descriptor; + desc += sizeof(struct Block_descriptor_1); + return (struct Block_descriptor_2 *)desc; +} + +static struct Block_descriptor_3 * _Block_descriptor_3(struct Block_layout *aBlock) +{ + if (! (aBlock->flags & BLOCK_HAS_SIGNATURE)) return NULL; + uint8_t *desc = (uint8_t *)aBlock->descriptor; + desc += sizeof(struct Block_descriptor_1); + if (aBlock->flags & BLOCK_HAS_COPY_DISPOSE) { + desc += sizeof(struct Block_descriptor_2); + } + return (struct Block_descriptor_3 *)desc; +} + +static __inline bool _Block_has_layout(struct Block_layout *aBlock) { + if (! (aBlock->flags & BLOCK_HAS_SIGNATURE)) return false; + uint8_t *desc = (uint8_t *)aBlock->descriptor; + desc += sizeof(struct Block_descriptor_1); + if (aBlock->flags & BLOCK_HAS_COPY_DISPOSE) { + desc += sizeof(struct Block_descriptor_2); + } + return ((struct Block_descriptor_3 *)desc)->layout != NULL; +} + +static void _Block_call_copy_helper(void *result, struct Block_layout *aBlock) +{ + struct Block_descriptor_2 *desc = _Block_descriptor_2(aBlock); + if (!desc) return; + + (*desc->copy)(result, aBlock); // do fixup +} + +static void _Block_call_dispose_helper(struct Block_layout *aBlock) +{ + struct Block_descriptor_2 *desc = _Block_descriptor_2(aBlock); + if (!desc) return; + + (*desc->dispose)(aBlock); +} + +/******************************************************************************* +Internal Support routines for copying +********************************************************************************/ + +#if !TARGET_OS_WIN32 +#pragma mark Copy/Release support +#endif + +// Copy, or bump refcount, of a block. If really copying, call the copy helper if present. +static void *_Block_copy_internal(const void *arg, const bool wantsOne) { + struct Block_layout *aBlock; + + if (!arg) return NULL; + + + // The following would be better done as a switch statement + aBlock = (struct Block_layout *)arg; + if (aBlock->flags & BLOCK_NEEDS_FREE) { + // latches on high + latching_incr_int(&aBlock->flags); + return aBlock; + } + else if (aBlock->flags & BLOCK_IS_GC) { + // GC refcounting is expensive so do most refcounting here. + if (wantsOne && ((latching_incr_int(&aBlock->flags) & BLOCK_REFCOUNT_MASK) == 2)) { + // Tell collector to hang on this - it will bump the GC refcount version + _Block_setHasRefcount(aBlock, true); + } + return aBlock; + } + else if (aBlock->flags & BLOCK_IS_GLOBAL) { + return aBlock; + } + + // Its a stack block. Make a copy. + if (!isGC) { + struct Block_layout *result = malloc(aBlock->descriptor->size); + if (!result) return NULL; + memmove(result, aBlock, aBlock->descriptor->size); // bitcopy first + // reset refcount + result->flags &= ~(BLOCK_REFCOUNT_MASK|BLOCK_DEALLOCATING); // XXX not needed + result->flags |= BLOCK_NEEDS_FREE | 2; // logical refcount 1 + result->isa = _NSConcreteMallocBlock; + _Block_call_copy_helper(result, aBlock); + return result; + } + else { + // Under GC want allocation with refcount 1 so we ask for "true" if wantsOne + // This allows the copy helper routines to make non-refcounted block copies under GC + int32_t flags = aBlock->flags; + bool hasCTOR = (flags & BLOCK_HAS_CTOR) != 0; + struct Block_layout *result = _Block_allocator(aBlock->descriptor->size, wantsOne, hasCTOR || _Block_has_layout(aBlock)); + if (!result) return NULL; + memmove(result, aBlock, aBlock->descriptor->size); // bitcopy first + // reset refcount + // if we copy a malloc block to a GC block then we need to clear NEEDS_FREE. + flags &= ~(BLOCK_NEEDS_FREE|BLOCK_REFCOUNT_MASK|BLOCK_DEALLOCATING); // XXX not needed + if (wantsOne) + flags |= BLOCK_IS_GC | 2; + else + flags |= BLOCK_IS_GC; + result->flags = flags; + _Block_call_copy_helper(result, aBlock); + if (hasCTOR) { + result->isa = _NSConcreteFinalizingBlock; + } + else { + result->isa = _NSConcreteAutoBlock; + } + return result; + } +} + + + + + +// Runtime entry points for maintaining the sharing knowledge of byref data blocks. + +// A closure has been copied and its fixup routine is asking us to fix up the reference to the shared byref data +// Closures that aren't copied must still work, so everyone always accesses variables after dereferencing the forwarding ptr. +// We ask if the byref pointer that we know about has already been copied to the heap, and if so, increment it. +// Otherwise we need to copy it and update the stack forwarding pointer +static void _Block_byref_assign_copy(void *dest, const void *arg, const int flags) { + struct Block_byref **destp = (struct Block_byref **)dest; + struct Block_byref *src = (struct Block_byref *)arg; + + if (src->forwarding->flags & BLOCK_BYREF_IS_GC) { + ; // don't need to do any more work + } + else if ((src->forwarding->flags & BLOCK_REFCOUNT_MASK) == 0) { + // src points to stack + bool isWeak = ((flags & (BLOCK_FIELD_IS_BYREF|BLOCK_FIELD_IS_WEAK)) == (BLOCK_FIELD_IS_BYREF|BLOCK_FIELD_IS_WEAK)); + // if its weak ask for an object (only matters under GC) + struct Block_byref *copy = (struct Block_byref *)_Block_allocator(src->size, false, isWeak); + copy->flags = src->flags | _Byref_flag_initial_value; // non-GC one for caller, one for stack + copy->forwarding = copy; // patch heap copy to point to itself (skip write-barrier) + src->forwarding = copy; // patch stack to point to heap copy + copy->size = src->size; + if (isWeak) { + copy->isa = &_NSConcreteWeakBlockVariable; // mark isa field so it gets weak scanning + } + if (src->flags & BLOCK_BYREF_HAS_COPY_DISPOSE) { + // Trust copy helper to copy everything of interest + // If more than one field shows up in a byref block this is wrong XXX + struct Block_byref_2 *src2 = (struct Block_byref_2 *)(src+1); + struct Block_byref_2 *copy2 = (struct Block_byref_2 *)(copy+1); + copy2->byref_keep = src2->byref_keep; + copy2->byref_destroy = src2->byref_destroy; + + if (src->flags & BLOCK_BYREF_LAYOUT_EXTENDED) { + struct Block_byref_3 *src3 = (struct Block_byref_3 *)(src2+1); + struct Block_byref_3 *copy3 = (struct Block_byref_3*)(copy2+1); + copy3->layout = src3->layout; + } + + (*src2->byref_keep)(copy, src); + } + else { + // just bits. Blast 'em using _Block_memmove in case they're __strong + // This copy includes Block_byref_3, if any. + _Block_memmove(copy+1, src+1, + src->size - sizeof(struct Block_byref)); + } + } + // already copied to heap + else if ((src->forwarding->flags & BLOCK_BYREF_NEEDS_FREE) == BLOCK_BYREF_NEEDS_FREE) { + latching_incr_int(&src->forwarding->flags); + } + // assign byref data block pointer into new Block + _Block_assign(src->forwarding, (void **)destp); +} + +// Old compiler SPI +static void _Block_byref_release(const void *arg) { + struct Block_byref *byref = (struct Block_byref *)arg; + int32_t refcount; + + // dereference the forwarding pointer since the compiler isn't doing this anymore (ever?) + byref = byref->forwarding; + + // To support C++ destructors under GC we arrange for there to be a finalizer for this + // by using an isa that directs the code to a finalizer that calls the byref_destroy method. + if ((byref->flags & BLOCK_BYREF_NEEDS_FREE) == 0) { + return; // stack or GC or global + } + refcount = byref->flags & BLOCK_REFCOUNT_MASK; + os_assert(refcount); + if (latching_decr_int_should_deallocate(&byref->flags)) { + if (byref->flags & BLOCK_BYREF_HAS_COPY_DISPOSE) { + struct Block_byref_2 *byref2 = (struct Block_byref_2 *)(byref+1); + (*byref2->byref_destroy)(byref); + } + _Block_deallocator((struct Block_layout *)byref); + } +} + + +/************************************************************ + * + * API supporting SPI + * _Block_copy, _Block_release, and (old) _Block_destroy + * + ***********************************************************/ + +#if !TARGET_OS_WIN32 +#pragma mark SPI/API +#endif + +BLOCK_EXPORT +void *_Block_copy(const void *arg) { + return _Block_copy_internal(arg, true); +} + + +// API entry point to release a copied Block +BLOCK_EXPORT +void _Block_release(const void *arg) { + struct Block_layout *aBlock = (struct Block_layout *)arg; + if (!aBlock + || (aBlock->flags & BLOCK_IS_GLOBAL) + || ((aBlock->flags & (BLOCK_IS_GC|BLOCK_NEEDS_FREE)) == 0) + ) return; + if (aBlock->flags & BLOCK_IS_GC) { + if (latching_decr_int_now_zero(&aBlock->flags)) { + // Tell GC we no longer have our own refcounts. GC will decr its refcount + // and unless someone has done a CFRetain or marked it uncollectable it will + // now be subject to GC reclamation. + _Block_setHasRefcount(aBlock, false); + } + } + else if (aBlock->flags & BLOCK_NEEDS_FREE) { + if (latching_decr_int_should_deallocate(&aBlock->flags)) { + _Block_call_dispose_helper(aBlock); + _Block_destructInstance(aBlock); + _Block_deallocator(aBlock); + } + } +} + +BLOCK_EXPORT +bool _Block_tryRetain(const void *arg) { + struct Block_layout *aBlock = (struct Block_layout *)arg; + return latching_incr_int_not_deallocating(&aBlock->flags); +} + +BLOCK_EXPORT +bool _Block_isDeallocating(const void *arg) { + struct Block_layout *aBlock = (struct Block_layout *)arg; + return (aBlock->flags & BLOCK_DEALLOCATING) != 0; +} + +// Old Compiler SPI point to release a copied Block used by the compiler in dispose helpers +static void _Block_destroy(const void *arg) { + struct Block_layout *aBlock; + if (!arg) return; + aBlock = (struct Block_layout *)arg; + if (aBlock->flags & BLOCK_IS_GC) { + // assert(aBlock->Block_flags & BLOCK_HAS_CTOR); + return; // ignore, we are being called because of a DTOR + } + _Block_release(aBlock); +} + + + +/************************************************************ + * + * SPI used by other layers + * + ***********************************************************/ + +// SPI, also internal. Called from NSAutoBlock only under GC +BLOCK_EXPORT +void *_Block_copy_collectable(const void *aBlock) { + return _Block_copy_internal(aBlock, false); +} + + +// SPI +BLOCK_EXPORT +size_t Block_size(void *aBlock) { + return ((struct Block_layout *)aBlock)->descriptor->size; +} + +BLOCK_EXPORT +bool _Block_use_stret(void *aBlock) { + struct Block_layout *layout = (struct Block_layout *)aBlock; + + int requiredFlags = BLOCK_HAS_SIGNATURE | BLOCK_USE_STRET; + return (layout->flags & requiredFlags) == requiredFlags; +} + +// Checks for a valid signature, not merely the BLOCK_HAS_SIGNATURE bit. +BLOCK_EXPORT +bool _Block_has_signature(void *aBlock) { + return _Block_signature(aBlock) ? true : false; +} + +BLOCK_EXPORT +const char * _Block_signature(void *aBlock) +{ + struct Block_descriptor_3 *desc3 = _Block_descriptor_3(aBlock); + if (!desc3) return NULL; + + return desc3->signature; +} + +BLOCK_EXPORT +const char * _Block_layout(void *aBlock) +{ + // Don't return extended layout to callers expecting GC layout + struct Block_layout *layout = (struct Block_layout *)aBlock; + if (layout->flags & BLOCK_HAS_EXTENDED_LAYOUT) return NULL; + + struct Block_descriptor_3 *desc3 = _Block_descriptor_3(aBlock); + if (!desc3) return NULL; + + return desc3->layout; +} + +BLOCK_EXPORT +const char * _Block_extended_layout(void *aBlock) +{ + // Don't return GC layout to callers expecting extended layout + struct Block_layout *layout = (struct Block_layout *)aBlock; + if (! (layout->flags & BLOCK_HAS_EXTENDED_LAYOUT)) return NULL; + + struct Block_descriptor_3 *desc3 = _Block_descriptor_3(aBlock); + if (!desc3) return NULL; + + // Return empty string (all non-object bytes) instead of NULL + // so callers can distinguish "empty layout" from "no layout". + if (!desc3->layout) return ""; + else return desc3->layout; +} + +#if !TARGET_OS_WIN32 +#pragma mark Compiler SPI entry points +#endif + + +/******************************************************* + +Entry points used by the compiler - the real API! + + +A Block can reference four different kinds of things that require help when the Block is copied to the heap. +1) C++ stack based objects +2) References to Objective-C objects +3) Other Blocks +4) __block variables + +In these cases helper functions are synthesized by the compiler for use in Block_copy and Block_release, called the copy and dispose helpers. The copy helper emits a call to the C++ const copy constructor for C++ stack based objects and for the rest calls into the runtime support function _Block_object_assign. The dispose helper has a call to the C++ destructor for case 1 and a call into _Block_object_dispose for the rest. + +The flags parameter of _Block_object_assign and _Block_object_dispose is set to + * BLOCK_FIELD_IS_OBJECT (3), for the case of an Objective-C Object, + * BLOCK_FIELD_IS_BLOCK (7), for the case of another Block, and + * BLOCK_FIELD_IS_BYREF (8), for the case of a __block variable. +If the __block variable is marked weak the compiler also or's in BLOCK_FIELD_IS_WEAK (16) + +So the Block copy/dispose helpers should only ever generate the four flag values of 3, 7, 8, and 24. + +When a __block variable is either a C++ object, an Objective-C object, or another Block then the compiler also generates copy/dispose helper functions. Similarly to the Block copy helper, the "__block" copy helper (formerly and still a.k.a. "byref" copy helper) will do a C++ copy constructor (not a const one though!) and the dispose helper will do the destructor. And similarly the helpers will call into the same two support functions with the same values for objects and Blocks with the additional BLOCK_BYREF_CALLER (128) bit of information supplied. + +So the __block copy/dispose helpers will generate flag values of 3 or 7 for objects and Blocks respectively, with BLOCK_FIELD_IS_WEAK (16) or'ed as appropriate and always 128 or'd in, for the following set of possibilities: + __block id 128+3 (0x83) + __block (^Block) 128+7 (0x87) + __weak __block id 128+3+16 (0x93) + __weak __block (^Block) 128+7+16 (0x97) + + +********************************************************/ + +// +// When Blocks or Block_byrefs hold objects then their copy routine helpers use this entry point +// to do the assignment. +// +BLOCK_EXPORT +void _Block_object_assign(void *destAddr, const void *object, const int flags) { + switch (os_assumes(flags & BLOCK_ALL_COPY_DISPOSE_FLAGS)) { + case BLOCK_FIELD_IS_OBJECT: + /******* + id object = ...; + [^{ object; } copy]; + ********/ + + _Block_retain_object(object); + _Block_assign((void *)object, destAddr); + break; + + case BLOCK_FIELD_IS_BLOCK: + /******* + void (^object)(void) = ...; + [^{ object; } copy]; + ********/ + + _Block_assign(_Block_copy_internal(object, false), destAddr); + break; + + case BLOCK_FIELD_IS_BYREF | BLOCK_FIELD_IS_WEAK: + case BLOCK_FIELD_IS_BYREF: + /******* + // copy the onstack __block container to the heap + __block ... x; + __weak __block ... x; + [^{ x; } copy]; + ********/ + + _Block_byref_assign_copy(destAddr, object, flags); + break; + + case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT: + case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK: + /******* + // copy the actual field held in the __block container + __block id object; + __block void (^object)(void); + [^{ object; } copy]; + ********/ + + // under manual retain release __block object/block variables are dangling + _Block_assign((void *)object, destAddr); + break; + + case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT | BLOCK_FIELD_IS_WEAK: + case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK | BLOCK_FIELD_IS_WEAK: + /******* + // copy the actual field held in the __block container + __weak __block id object; + __weak __block void (^object)(void); + [^{ object; } copy]; + ********/ + + _Block_assign_weak(object, destAddr); + break; + + default: + break; + } +} + +// When Blocks or Block_byrefs hold objects their destroy helper routines call this entry point +// to help dispose of the contents +// Used initially only for __attribute__((NSObject)) marked pointers. +BLOCK_EXPORT +void _Block_object_dispose(const void *object, const int flags) { + switch (os_assumes(flags & BLOCK_ALL_COPY_DISPOSE_FLAGS)) { + case BLOCK_FIELD_IS_BYREF | BLOCK_FIELD_IS_WEAK: + case BLOCK_FIELD_IS_BYREF: + // get rid of the __block data structure held in a Block + _Block_byref_release(object); + break; + case BLOCK_FIELD_IS_BLOCK: + _Block_destroy(object); + break; + case BLOCK_FIELD_IS_OBJECT: + _Block_release_object(object); + break; + case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT: + case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK: + case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT | BLOCK_FIELD_IS_WEAK: + case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK | BLOCK_FIELD_IS_WEAK: + break; + default: + break; + } +} diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt new file mode 100644 index 000000000..2ec2691fc --- /dev/null +++ b/src/CMakeLists.txt @@ -0,0 +1,204 @@ + +include(SwiftSupport) +include(DTrace) + +add_library(dispatch + allocator.c + apply.c + benchmark.c + data.c + init.c + introspection.c + io.c + mach.c + object.c + once.c + queue.c + semaphore.c + source.c + time.c + transform.c + voucher.c + protocol.defs + provider.d + allocator_internal.h + data_internal.h + inline_internal.h + internal.h + introspection_internal.h + io_internal.h + mach_internal.h + object_internal.h + queue_internal.h + semaphore_internal.h + shims.h + source_internal.h + trace.h + voucher_internal.h + event/event.c + event/event_config.h + event/event_epoll.c + event/event_internal.h + event/event_kevent.c + firehose/firehose_internal.h + shims/android_stubs.h + shims/atomic.h + shims/atomic_sfb.h + shims/getprogname.h + shims/hw_config.h + shims/linux_stubs.c + shims/linux_stubs.h + shims/lock.c + shims/lock.h + shims/perfmon.h + shims/time.h + shims/tsd.h + shims/yield.h) +if(DISPATCH_USE_INTERNAL_WORKQUEUE) + target_sources(dispatch + PRIVATE + event/workqueue.c + event/workqueue_internal.h) +endif() +target_sources(dispatch + PRIVATE + block.cpp) +if(HAVE_OBJC) + target_sources(dispatch + PRIVATE + data.m + object.m) +endif() +if(CMAKE_SWIFT_COMPILER) + set(swift_optimization_flags) + if(CMAKE_BUILD_TYPE MATCHES Release) + set(swift_optimization_flags -O) + endif() + add_swift_library(swiftDispatch + MODULE_NAME + Dispatch + MODULE_LINK_NAME + dispatch + MODULE_PATH + ${CMAKE_CURRENT_BINARY_DIR}/swift/Dispatch.swiftmodule + OUTPUT + ${CMAKE_CURRENT_BINARY_DIR}/swiftDispatch.o + SOURCES + swift/Block.swift + swift/Data.swift + swift/Dispatch.swift + swift/IO.swift + swift/Private.swift + swift/Queue.swift + swift/Source.swift + swift/Time.swift + swift/Wrapper.swift + CFLAGS + -fblocks + -fmodule-map-file=${CMAKE_SOURCE_DIR}/dispatch/module.modulemap + SWIFT_FLAGS + -I ${CMAKE_SOURCE_DIR} + ${swift_optimization_flags}) + target_sources(dispatch + PRIVATE + swift/DispatchStubs.cc + ${CMAKE_CURRENT_BINARY_DIR}/swiftDispatch.o) +endif() +if(dtrace_EXECUTABLE) + dtrace_usdt_probe(${CMAKE_CURRENT_SOURCE_DIR}/provider.d + OUTPUT_SOURCES + dispatch_dtrace_provider_headers) + target_sources(dispatch + PRIVATE + ${dispatch_dtrace_provider_headers}) +endif() +target_include_directories(dispatch + PRIVATE + ${CMAKE_BINARY_DIR} + ${CMAKE_SOURCE_DIR} + ${CMAKE_CURRENT_SOURCE_DIR} + ${CMAKE_CURRENT_BINARY_DIR} + ${CMAKE_SOURCE_DIR}/private) +if(WITH_BLOCKS_RUNTIME) + target_include_directories(dispatch + SYSTEM BEFORE PRIVATE + "${WITH_BLOCKS_RUNTIME}") +endif() +if("${CMAKE_C_SIMULATE_ID}" STREQUAL "MSVC") + target_compile_options(dispatch PRIVATE /EHsc-) +else() + target_compile_options(dispatch PRIVATE -fno-exceptions) +endif() +if(DISPATCH_ENABLE_ASSERTS) + target_compile_definitions(dispatch + PRIVATE + -DDISPATCH_DEBUG=1) +endif() +if(CMAKE_SYSTEM_NAME STREQUAL Windows) + target_compile_definitions(dispatch + PRIVATE + -D_CRT_SECURE_NO_WARNINGS) +endif() +if(BSD_OVERLAY_FOUND) + target_compile_options(dispatch + PRIVATE + ${BSD_OVERLAY_CFLAGS}) +endif() +if("${CMAKE_C_SIMULATE_ID}" STREQUAL "MSVC") + target_compile_options(dispatch + PRIVATE + /W3) +else() + target_compile_options(dispatch + PRIVATE + -Wall) +endif() +# FIXME(compnerd) add check for -fblocks? +if("${CMAKE_C_SIMULATE_ID}" STREQUAL "MSVC") + target_compile_options(dispatch + PRIVATE + -Xclang -fblocks) +else() + # FIXME(compnerd) add check for -momit-leaf-frame-pointer? + target_compile_options(dispatch + PRIVATE + -fblocks + -momit-leaf-frame-pointer) +endif() +if(BSD_OVERLAY_FOUND) + target_link_libraries(dispatch PRIVATE ${BSD_OVERLAY_LDFLAGS}) +endif() +target_link_libraries(dispatch PRIVATE Threads::Threads) +if(WITH_BLOCKS_RUNTIME) + target_link_libraries(dispatch PRIVATE BlocksRuntime) +endif() +if(CMAKE_SYSTEM_NAME STREQUAL Darwin) + set_property(TARGET dispatch + APPEND_STRING + PROPERTY LINK_FLAGS + "-Xlinker -compatibility_version -Xlinker 1" + "-Xlinker -current_version -Xlinker ${VERSION}" + "-Xlinker -dead_strip" + "-Xlinker -alias_list -Xlinker ${CMAKE_SOURCE_DIR}/xcodeconfig/libdispatch.aliases") +endif() +if(USE_GOLD_LINKER) + set_property(TARGET dispatch + APPEND_STRING + PROPERTY LINK_FLAGS + -fuse-ld=gold) +endif() + +# Temporary staging; the various swift projects that depend on libdispatch +# all expect libdispatch.so to be in src/.libs/libdispatch.so +# So for now, make a copy so we don't have to do a coordinated commit across +# all the swift projects to change this assumption. +add_custom_command(TARGET dispatch POST_BUILD + COMMAND cmake -E make_directory .libs + COMMAND cmake -E copy $ .libs + COMMENT "Copying libdispatch to .libs") + +install(TARGETS + dispatch + DESTINATION + "${CMAKE_INSTALL_FULL_LIBDIR}") + diff --git a/src/Makefile.am b/src/Makefile.am index c417aec97..8beaf1e85 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -3,44 +3,58 @@ # if HAVE_SWIFT -swiftlibdir=${prefix}/lib/swift/linux +swiftlibdir=${prefix}/lib/swift/${OS_STRING} swiftlib_LTLIBRARIES=libdispatch.la else lib_LTLIBRARIES=libdispatch.la endif -libdispatch_la_SOURCES= \ - allocator.c \ - apply.c \ - benchmark.c \ - data.c \ +if DISPATCH_USE_INTERNAL_WORKQUEUE +INTERNAL_WORKQUEUE_SOURCES= \ + event/workqueue.c \ + event/workqueue_internal.h +endif + +libdispatch_la_SOURCES= \ + allocator.c \ + apply.c \ + benchmark.c \ + data.c \ + init.c \ introspection.c \ - init.c \ - io.c \ - object.c \ - once.c \ - queue.c \ - semaphore.c \ - source.c \ - time.c \ - transform.c \ - voucher.c \ + io.c \ + mach.c \ + object.c \ + once.c \ + queue.c \ + semaphore.c \ + source.c \ + time.c \ + transform.c \ + voucher.c \ protocol.defs \ - provider.d \ - allocator_internal.h \ + provider.d \ + allocator_internal.h \ data_internal.h \ inline_internal.h \ - internal.h \ + internal.h \ introspection_internal.h \ io_internal.h \ + mach_internal.h \ object_internal.h \ queue_internal.h \ - semaphore_internal.h \ - shims.h \ + semaphore_internal.h \ + shims.h \ source_internal.h \ - trace.h \ + trace.h \ voucher_internal.h \ - firehose/firehose_internal.h \ + event/event.c \ + event/event_config.h \ + event/event_epoll.c \ + event/event_internal.h \ + event/event_kevent.c \ + firehose/firehose_internal.h \ + shims/android_stubs.h \ shims/atomic.h \ shims/atomic_sfb.h \ shims/getprogname.h \ @@ -51,8 +65,9 @@ libdispatch_la_SOURCES= \ shims/lock.h \ shims/perfmon.h \ shims/time.h \ - shims/tsd.h \ - shims/yield.h + shims/tsd.h \ + shims/yield.h \ + $(INTERNAL_WORKQUEUE_SOURCES) EXTRA_libdispatch_la_SOURCES= EXTRA_libdispatch_la_DEPENDENCIES= @@ -60,23 +75,31 @@ EXTRA_libdispatch_la_DEPENDENCIES= AM_CPPFLAGS=-I$(top_builddir) -I$(top_srcdir) -I$(top_srcdir)/private DISPATCH_CFLAGS=-Wall $(VISIBILITY_FLAGS) $(OMIT_LEAF_FP_FLAGS) \ - $(MARCH_FLAGS) $(KQUEUE_CFLAGS) $(BSD_OVERLAY_CFLAGS) + $(MARCH_FLAGS) $(BSD_OVERLAY_CFLAGS) +if DISPATCH_ENABLE_ASSERTS +DISPATCH_CFLAGS+=-DDISPATCH_DEBUG=1 +endif AM_CFLAGS= $(PTHREAD_WORKQUEUE_CFLAGS) $(DISPATCH_CFLAGS) $(CBLOCKS_FLAGS) AM_OBJCFLAGS=$(DISPATCH_CFLAGS) $(CBLOCKS_FLAGS) AM_CXXFLAGS=$(PTHREAD_WORKQUEUE_CFLAGS) $(DISPATCH_CFLAGS) $(CXXBLOCKS_FLAGS) AM_OBJCXXFLAGS=$(DISPATCH_CFLAGS) $(CXXBLOCKS_FLAGS) -if BUILD_OWN_PTHREAD_WORKQUEUES - PTHREAD_WORKQUEUE_LIBS=$(top_builddir)/libpwq/libpthread_workqueue.la - PTHREAD_WORKQUEUE_CFLAGS=-I$(top_srcdir)/libpwq/include -else if HAVE_PTHREAD_WORKQUEUES PTHREAD_WORKQUEUE_LIBS=-lpthread_workqueue + PTHREAD_WORKQUEUE_CFLAGS= +endif + +if BUILD_OWN_BLOCKS_RUNTIME +libdispatch_la_SOURCES+= BlocksRuntime/data.c BlocksRuntime/runtime.c +CBLOCKS_FLAGS+= -I$(top_srcdir)/src/BlocksRuntime +CXXBLOCKS_FLAGS+= -I$(top_srcdir)/src/BlocksRuntime +if USE_OBJC +BLOCKS_RUNTIME_LIBS=-ldl endif endif libdispatch_la_LDFLAGS=-avoid-version -libdispatch_la_LIBADD=$(KQUEUE_LIBS) $(PTHREAD_WORKQUEUE_LIBS) $(BSD_OVERLAY_LIBS) +libdispatch_la_LIBADD=$(PTHREAD_WORKQUEUE_LIBS) $(BSD_OVERLAY_LIBS) $(BLOCKS_RUNTIME_LIBS) if HAVE_DARWIN_LD libdispatch_la_LDFLAGS+=-Wl,-compatibility_version,1 \ @@ -131,48 +154,47 @@ SWIFT_SRC_FILES=\ swift/Wrapper.swift SWIFT_ABS_SRC_FILES = $(SWIFT_SRC_FILES:%=$(abs_srcdir)/%) -SWIFT_OBJ_FILES = $(SWIFT_SRC_FILES:%.swift=$(abs_builddir)/%.o) +SWIFT_OBJ_FILES = $(abs_builddir)/swift/swift_overlay.o +SWIFT_LIBTOOL_OBJ_FILES = $(abs_builddir)/swift/swift_overlay.lo + +SWIFTC_FLAGS+= -Xcc -fmodule-map-file=$(abs_top_srcdir)/dispatch/module.modulemap -I$(abs_top_srcdir) -Xcc -fblocks +if DISPATCH_ENABLE_OPTIMIZATION +SWIFTC_FLAGS+=-O +endif + +# this saves the object file, then tricks libtool into generating a .lo file and +# then moves the object file back in the places libtool expects them to be for +# the PIC and non-PIC case. +$(abs_builddir)/swift/swift_overlay.lo: $(abs_builddir)/swift/swift_overlay.o + mv $(abs_builddir)/swift/swift_overlay.o $(abs_builddir)/swift/.libs/swift_overlay.o.save + $(LIBTOOL) --mode=compile --tag=CC true -o $< -c /dev/null + cp $(abs_builddir)/swift/.libs/swift_overlay.o.save $(abs_builddir)/swift/.libs/swift_overlay.o + mv $(abs_builddir)/swift/.libs/swift_overlay.o.save $(abs_builddir)/swift/swift_overlay.o + +$(abs_builddir)/swift/swift_overlay.o: $(SWIFT_ABS_SRC_FILES) $(SWIFTC) + @rm -f $@ + $(SWIFTC) -whole-module-optimization -emit-library -c $(SWIFT_ABS_SRC_FILES) \ + $(SWIFTC_FLAGS) -module-name Dispatch -module-link-name dispatch \ + -o $@ -emit-module-path $(abs_builddir)/swift/Dispatch.swiftmodule libdispatch_la_SOURCES+=swift/DispatchStubs.cc EXTRA_libdispatch_la_SOURCES+=$(SWIFT_SRC_FILES) -EXTRA_libdispatch_la_DEPENDENCIES+=$(SWIFT_OBJ_FILES) $(abs_builddir)/swift/Dispatch.swiftmodule -libdispatch_la_LIBADD+=$(SWIFT_OBJ_FILES) +EXTRA_libdispatch_la_DEPENDENCIES+=$(SWIFT_OBJ_FILES) $(SWIFT_LIBTOOL_OBJ_FILES) $(abs_builddir)/swift/Dispatch.swiftmodule +libdispatch_la_LIBADD+=$(SWIFT_LIBTOOL_OBJ_FILES) SWIFT_GEN_FILES= \ $(abs_builddir)/swift/Dispatch.swiftmodule \ $(abs_builddir)/swift/Dispatch.swiftdoc \ - $(SWIFT_OBJ_FILES) \ - $(SWIFT_OBJ_FILES:%=%.d) \ - $(SWIFT_OBJ_FILES:%=%.swiftdeps) \ - $(SWIFT_OBJ_FILES:%=%.~partial.swiftmodule) \ - $(SWIFT_OBJ_FILES:%=%.~partial.swiftdoc) \ - $(SWIFT_OBJ_FILES:%=%.~partial.swiftdeps) - -SWIFTC_FLAGS = -Xcc -fmodule-map-file=$(abs_top_srcdir)/dispatch/module.map -I$(abs_top_srcdir) -Xcc -fblocks - -$(abs_builddir)/swift/%.o: $(abs_srcdir)/swift/%.swift - $(SWIFTC) -frontend -c $(SWIFT_ABS_SRC_FILES) -primary-file $< \ - $(SWIFTC_FLAGS) -module-name Dispatch -module-link-name dispatch \ - -o $@ -emit-module-path $@.~partial.swiftmodule \ - -emit-module-doc-path $@.~partial.swiftdoc -emit-dependencies-path $@.d \ - -emit-reference-dependencies-path $@.swiftdeps \ - -module-cache-path $(top_builddir) - -$(abs_builddir)/swift/Dispatch.swiftmodule: $(SWIFT_ABS_SRC_FILES) - $(SWIFTC) -frontend -emit-module $(SWIFT_OBJ_FILES:%=%.~partial.swiftmodule) \ - $(SWIFTC_FLAGS) -module-cache-path $(top_builddir) -module-link-name dispatch \ - -o $@ -emit-module-doc-path $(@:%.swiftmodule=%.swiftdoc) + $(SWIFT_OBJ_FILES) -swiftmoddir=${prefix}/lib/swift/linux/${build_cpu} +swiftmoddir=${prefix}/lib/swift/${OS_STRING}/${host_cpu} swiftmod_HEADERS=\ $(abs_builddir)/swift/Dispatch.swiftmodule \ $(abs_builddir)/swift/Dispatch.swiftdoc - endif BUILT_SOURCES=$(MIG_SOURCES) $(DTRACE_SOURCES) nodist_libdispatch_la_SOURCES=$(BUILT_SOURCES) CLEANFILES=$(BUILT_SOURCES) $(SWIFT_GEN_FILES) DISTCLEANFILES=pthread_machdep.h pthread System mach objc - diff --git a/src/allocator.c b/src/allocator.c index a3a8c650a..e6ea77217 100644 --- a/src/allocator.c +++ b/src/allocator.c @@ -274,22 +274,16 @@ mark_bitmap_as_full_if_still_full(volatile bitmap_t *supermap, dispatch_assert(bitmap_index < BITMAPS_PER_SUPERMAP); #endif const bitmap_t mask = BITMAP_C(1) << bitmap_index; - bitmap_t s, s_new, s_masked; + bitmap_t s, s_new; - if (!bitmap_is_full(*bitmap)) { - return; - } - s_new = *supermap; - for (;;) { - // No barriers because supermaps are only advisory, they - // don't protect access to other memory. - s = s_new; - s_masked = s | mask; - if (os_atomic_cmpxchgvw(supermap, s, s_masked, &s_new, relaxed) || - !bitmap_is_full(*bitmap)) { - return; + // No barriers because supermaps are only advisory, they + // don't protect access to other memory. + os_atomic_rmw_loop(supermap, s, s_new, relaxed, { + if (!bitmap_is_full(*bitmap)) { + os_atomic_rmw_loop_give_up(return); } - } + s_new = s | mask; + }); } #pragma mark - diff --git a/src/apply.c b/src/apply.c index e051a1630..6f44cf90b 100644 --- a/src/apply.c +++ b/src/apply.c @@ -35,7 +35,7 @@ _dispatch_apply_invoke2(void *ctxt, long invoke_flags) size_t idx, done = 0; idx = os_atomic_inc_orig2o(da, da_index, acquire); - if (!fastpath(idx < iter)) goto out; + if (unlikely(idx >= iter)) goto out; // da_dc is only safe to access once the 'index lock' has been acquired dispatch_apply_function_t const func = (void *)da->da_dc->dc_func; @@ -52,10 +52,10 @@ _dispatch_apply_invoke2(void *ctxt, long invoke_flags) _dispatch_thread_context_push(&apply_ctxt); dispatch_thread_frame_s dtf; - pthread_priority_t old_dp; + dispatch_priority_t old_dbp = 0; if (invoke_flags & DISPATCH_APPLY_INVOKE_REDIRECT) { _dispatch_thread_frame_push(&dtf, dq); - old_dp = _dispatch_set_defaultpriority(dq->dq_priority, NULL); + old_dbp = _dispatch_set_basepri(dq->dq_priority); } dispatch_invoke_flags_t flags = da->da_flags; @@ -67,10 +67,10 @@ _dispatch_apply_invoke2(void *ctxt, long invoke_flags) done++; idx = os_atomic_inc_orig2o(da, da_index, relaxed); }); - } while (fastpath(idx < iter)); + } while (likely(idx < iter)); if (invoke_flags & DISPATCH_APPLY_INVOKE_REDIRECT) { - _dispatch_reset_defaultpriority(old_dp); + _dispatch_reset_basepri(old_dbp); _dispatch_thread_frame_pop(&dtf); } @@ -124,7 +124,7 @@ _dispatch_apply_autorelease_frequency(dispatch_queue_t dq) while (dq && !qaf) { qaf = _dispatch_queue_autorelease_frequency(dq); - dq = slowpath(dq->do_targetq); + dq = dq->do_targetq; } return qaf; } @@ -159,11 +159,11 @@ static inline void _dispatch_apply_f2(dispatch_queue_t dq, dispatch_apply_t da, dispatch_function_t func) { - uint32_t i = 0; + int32_t i = 0; dispatch_continuation_t head = NULL, tail = NULL; // The current thread does not need a continuation - uint32_t continuation_cnt = da->da_thr_cnt - 1; + int32_t continuation_cnt = da->da_thr_cnt - 1; dispatch_assert(continuation_cnt); @@ -181,9 +181,8 @@ _dispatch_apply_f2(dispatch_queue_t dq, dispatch_apply_t da, } _dispatch_thread_event_init(&da->da_event); - - _dispatch_queue_push_list(dq, head, tail, head->dc_priority, - continuation_cnt); + // FIXME: dq may not be the right queue for the priority of `head` + _dispatch_root_queue_push_inline(dq, head, tail, continuation_cnt); // Call the first element directly _dispatch_apply_invoke_and_wait(da); } @@ -193,19 +192,19 @@ static void _dispatch_apply_redirect(void *ctxt) { dispatch_apply_t da = (dispatch_apply_t)ctxt; - uint32_t da_width = da->da_thr_cnt - 1; + int32_t da_width = da->da_thr_cnt - 1; dispatch_queue_t dq = da->da_dc->dc_data, rq = dq, tq; do { - uint32_t width = _dispatch_queue_try_reserve_apply_width(rq, da_width); + int32_t width = _dispatch_queue_try_reserve_apply_width(rq, da_width); - if (slowpath(da_width > width)) { - uint32_t excess = da_width - width; + if (unlikely(da_width > width)) { + int32_t excess = da_width - width; for (tq = dq; tq != rq; tq = tq->do_targetq) { _dispatch_queue_relinquish_width(tq, excess); } da_width -= excess; - if (slowpath(!da_width)) { + if (unlikely(!da_width)) { return _dispatch_apply_serial(da); } da->da_thr_cnt -= excess; @@ -217,45 +216,69 @@ _dispatch_apply_redirect(void *ctxt) da->da_flags = _dispatch_queue_autorelease_frequency(dq); } rq = rq->do_targetq; - } while (slowpath(rq->do_targetq)); + } while (unlikely(rq->do_targetq)); _dispatch_apply_f2(rq, da, _dispatch_apply_redirect_invoke); do { _dispatch_queue_relinquish_width(dq, da_width); dq = dq->do_targetq; - } while (slowpath(dq->do_targetq)); + } while (unlikely(dq->do_targetq)); } #define DISPATCH_APPLY_MAX UINT16_MAX // must be < sqrt(SIZE_MAX) +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_t +_dispatch_apply_root_queue(dispatch_queue_t dq) +{ + if (dq) { + while (unlikely(dq->do_targetq)) { + dq = dq->do_targetq; + } + // if the current root queue is a pthread root queue, select it + if (!_dispatch_priority_qos(dq->dq_priority)) { + return dq; + } + } + + pthread_priority_t pp = _dispatch_get_priority(); + dispatch_qos_t qos = _dispatch_qos_from_pp(pp); + return _dispatch_get_root_queue(qos ? qos : DISPATCH_QOS_DEFAULT, false); +} + DISPATCH_NOINLINE void dispatch_apply_f(size_t iterations, dispatch_queue_t dq, void *ctxt, void (*func)(void *, size_t)) { - if (slowpath(iterations == 0)) { + if (unlikely(iterations == 0)) { return; } - uint32_t thr_cnt = dispatch_hw_config(active_cpus); - dispatch_thread_context_t dtctxt = _dispatch_thread_context_find(_dispatch_apply_key); + dispatch_thread_context_t dtctxt = + _dispatch_thread_context_find(_dispatch_apply_key); size_t nested = dtctxt ? dtctxt->dtc_apply_nesting : 0; dispatch_queue_t old_dq = _dispatch_queue_get_current(); - if (!slowpath(nested)) { + if (likely(dq == DISPATCH_APPLY_AUTO)) { + dq = _dispatch_apply_root_queue(old_dq); + } + dispatch_qos_t qos = _dispatch_priority_qos(dq->dq_priority); + if (unlikely(dq->do_targetq)) { + // if the queue passed-in is not a root queue, use the current QoS + // since the caller participates in the work anyway + qos = _dispatch_qos_from_pp(_dispatch_get_priority()); + } + int32_t thr_cnt = (int32_t)_dispatch_qos_max_parallelism(qos, + DISPATCH_MAX_PARALLELISM_ACTIVE); + + if (likely(!nested)) { nested = iterations; } else { - thr_cnt = nested < thr_cnt ? thr_cnt / nested : 1; + thr_cnt = nested < (size_t)thr_cnt ? thr_cnt / (int32_t)nested : 1; nested = nested < DISPATCH_APPLY_MAX && iterations < DISPATCH_APPLY_MAX ? nested * iterations : DISPATCH_APPLY_MAX; } - if (iterations < thr_cnt) { - thr_cnt = (uint32_t)iterations; - } - if (slowpath(dq == DISPATCH_APPLY_CURRENT_ROOT_QUEUE)) { - dq = old_dq ? old_dq : _dispatch_get_root_queue( - _DISPATCH_QOS_CLASS_DEFAULT, false); - while (slowpath(dq->do_targetq)) { - dq = dq->do_targetq; - } + if (iterations < (size_t)thr_cnt) { + thr_cnt = (int32_t)iterations; } struct dispatch_continuation_s dc = { .dc_func = (void*)func, @@ -276,11 +299,11 @@ dispatch_apply_f(size_t iterations, dispatch_queue_t dq, void *ctxt, #endif da->da_flags = 0; - if (slowpath(dq->dq_width == 1) || slowpath(thr_cnt <= 1)) { + if (unlikely(dq->dq_width == 1 || thr_cnt <= 1)) { return dispatch_sync_f(dq, da, _dispatch_apply_serial); } - if (slowpath(dq->do_targetq)) { - if (slowpath(dq == old_dq)) { + if (unlikely(dq->do_targetq)) { + if (unlikely(dq == old_dq)) { return dispatch_sync_f(dq, da, _dispatch_apply_serial); } else { return dispatch_sync_f(dq, da, _dispatch_apply_redirect); diff --git a/src/block.cpp b/src/block.cpp index 3060a2a4d..2a6f00799 100644 --- a/src/block.cpp +++ b/src/block.cpp @@ -32,6 +32,8 @@ extern "C" { #include "internal.h" } +// NOTE: this file must not contain any atomic operations + #if DISPATCH_DEBUG && DISPATCH_BLOCK_PRIVATE_DATA_DEBUG #define _dispatch_block_private_data_debug(msg, ...) \ _dispatch_debug("block_private[%p]: " msg, (this), ##__VA_ARGS__) @@ -83,7 +85,8 @@ struct dispatch_block_private_data_s { ((void (*)(dispatch_group_t))dispatch_release)(dbpd_group); } if (dbpd_queue) { - ((void (*)(os_mpsc_queue_t))_os_object_release_internal)(dbpd_queue); + ((void (*)(os_mpsc_queue_t, uint16_t)) + _os_object_release_internal_n)(dbpd_queue, 2); } if (dbpd_block) Block_release(dbpd_block); if (dbpd_voucher) voucher_release(dbpd_voucher); diff --git a/src/data.c b/src/data.c index 644328911..3efab2f89 100644 --- a/src/data.c +++ b/src/data.c @@ -100,51 +100,22 @@ #define _dispatch_data_release(x) dispatch_release(x) #endif -const dispatch_block_t _dispatch_data_destructor_free = ^{ - DISPATCH_INTERNAL_CRASH(0, "free destructor called"); -}; - -const dispatch_block_t _dispatch_data_destructor_none = ^{ - DISPATCH_INTERNAL_CRASH(0, "none destructor called"); -}; - -#if !HAVE_MACH -const dispatch_block_t _dispatch_data_destructor_munmap = ^{ - DISPATCH_INTERNAL_CRASH(0, "munmap destructor called"); -}; -#else -// _dispatch_data_destructor_munmap is a linker alias to the following -const dispatch_block_t _dispatch_data_destructor_vm_deallocate = ^{ - DISPATCH_INTERNAL_CRASH(0, "vmdeallocate destructor called"); -}; -#endif - -const dispatch_block_t _dispatch_data_destructor_inline = ^{ - DISPATCH_INTERNAL_CRASH(0, "inline destructor called"); -}; - -struct dispatch_data_s _dispatch_data_empty = { -#if DISPATCH_DATA_IS_BRIDGED_TO_NSDATA - .do_vtable = DISPATCH_DATA_EMPTY_CLASS, -#else - DISPATCH_GLOBAL_OBJECT_HEADER(data), - .do_next = DISPATCH_OBJECT_LISTLESS, -#endif -}; - DISPATCH_ALWAYS_INLINE static inline dispatch_data_t _dispatch_data_alloc(size_t n, size_t extra) { dispatch_data_t data; size_t size; + size_t base_size; - if (os_mul_and_add_overflow(n, sizeof(range_record), - sizeof(struct dispatch_data_s) + extra, &size)) { + if (os_add_overflow(sizeof(struct dispatch_data_s), extra, &base_size)) { + return DISPATCH_OUT_OF_MEMORY; + } + if (os_mul_and_add_overflow(n, sizeof(range_record), base_size, &size)) { return DISPATCH_OUT_OF_MEMORY; } - data = _dispatch_alloc(DISPATCH_DATA_CLASS, size); + data = _dispatch_object_alloc(DISPATCH_DATA_CLASS, size); data->num_records = n; #if !DISPATCH_DATA_IS_BRIDGED_TO_NSDATA data->do_targetq = dispatch_get_global_queue( @@ -167,6 +138,8 @@ _dispatch_data_destroy_buffer(const void* buffer, size_t size, mach_vm_size_t vm_size = size; mach_vm_address_t vm_addr = (uintptr_t)buffer; mach_vm_deallocate(mach_task_self(), vm_addr, vm_size); +#else + (void)size; #endif } else { if (!queue) { @@ -192,8 +165,8 @@ _dispatch_data_init(dispatch_data_t data, const void *buffer, size_t size, } void -dispatch_data_init(dispatch_data_t data, const void *buffer, size_t size, - dispatch_block_t destructor) +_dispatch_data_init_with_bytes(dispatch_data_t data, const void *buffer, + size_t size, dispatch_block_t destructor) { if (!buffer || !size) { if (destructor) { @@ -284,7 +257,7 @@ dispatch_data_create_alloc(size_t size, void** buffer_ptr) } void -_dispatch_data_dispose(dispatch_data_t dd) +_dispatch_data_dispose(dispatch_data_t dd, DISPATCH_UNUSED bool *allow_free) { if (_dispatch_data_leaf(dd)) { _dispatch_data_destroy_buffer(dd->buf, dd->size, dd->do_targetq, @@ -298,6 +271,18 @@ _dispatch_data_dispose(dispatch_data_t dd) } } +void +_dispatch_data_set_target_queue(dispatch_data_t dd, dispatch_queue_t tq) +{ +#if DISPATCH_DATA_IS_BRIDGED_TO_NSDATA + _dispatch_retain(tq); + tq = os_atomic_xchg2o(dd, do_targetq, tq, release); + if (tq) _dispatch_release(tq); +#else + _dispatch_object_set_target_queue_inline(dd, tq); +#endif +} + size_t _dispatch_data_debug(dispatch_data_t dd, char* buf, size_t bufsiz) { @@ -433,7 +418,7 @@ dispatch_data_create_subrange(dispatch_data_t dd, size_t offset, // find the record containing the end of the current range // and optimize the case when you just remove bytes at the origin - size_t count, last_length; + size_t count, last_length = 0; if (to_the_end) { count = dd_num_records - i; diff --git a/src/data.m b/src/data.m index 190b1edd1..1d024ffe7 100644 --- a/src/data.m +++ b/src/data.m @@ -28,9 +28,11 @@ #include +// NOTE: this file must not contain any atomic operations + @interface DISPATCH_CLASS(data) () -@property (readonly) NSUInteger length; -@property (readonly) const void *bytes NS_RETURNS_INNER_POINTER; +@property (readonly,nonatomic) NSUInteger length; +@property (readonly,nonatomic) const void *bytes NS_RETURNS_INNER_POINTER; - (id)initWithBytes:(void *)bytes length:(NSUInteger)length copy:(BOOL)copy freeWhenDone:(BOOL)freeBytes bytesAreVM:(BOOL)vm; @@ -66,29 +68,26 @@ - (id)initWithBytes:(void *)bytes length:(NSUInteger)length copy:(BOOL)copy } else { destructor = DISPATCH_DATA_DESTRUCTOR_NONE; } - dispatch_data_init(self, bytes, length, destructor); + _dispatch_data_init_with_bytes(self, bytes, length, destructor); return self; } -#define _dispatch_data_objc_dispose(selector) \ - struct dispatch_data_s *dd = (void*)self; \ - _dispatch_data_dispose(self); \ - dispatch_queue_t tq = dd->do_targetq; \ - dispatch_function_t func = dd->finalizer; \ - void *ctxt = dd->ctxt; \ - [super selector]; \ - if (func && ctxt) { \ - if (!tq) { \ - tq = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT,0);\ - } \ - dispatch_async_f(tq, ctxt, func); \ - } \ - if (tq) { \ - _os_object_release_internal((_os_object_t)tq); \ - } - - (void)dealloc { - _dispatch_data_objc_dispose(dealloc); + struct dispatch_data_s *dd = (void*)self; + _dispatch_data_dispose(self, NULL); + dispatch_queue_t tq = dd->do_targetq; + dispatch_function_t func = dd->finalizer; + void *ctxt = dd->ctxt; + [super dealloc]; + if (func && ctxt) { + if (!tq) { + tq = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT,0); + } + dispatch_async_f(tq, ctxt, func); + } + if (tq) { + _os_object_release_internal((_os_object_t)tq); + } } - (BOOL)_bytesAreVM { @@ -113,10 +112,7 @@ - (void)_setFinalizer:(dispatch_function_t)finalizer { - (void)_setTargetQueue:(dispatch_queue_t)queue { struct dispatch_data_s *dd = (void*)self; - _os_object_retain_internal((_os_object_t)queue); - dispatch_queue_t prev; - prev = os_atomic_xchg2o(dd, do_targetq, queue, release); - if (prev) _os_object_release_internal((_os_object_t)prev); + return _dispatch_data_set_target_queue(dd, queue); } - (NSString *)debugDescription { @@ -124,9 +120,9 @@ - (NSString *)debugDescription { if (!nsstring) return nil; char buf[2048]; _dispatch_data_debug(self, buf, sizeof(buf)); - return [nsstring stringWithFormat: - [nsstring stringWithUTF8String:"<%s: %s>"], - class_getName([self class]), buf]; + NSString *format = [nsstring stringWithUTF8String:"<%s: %s>"]; + if (!format) return nil; + return [nsstring stringWithFormat:format, class_getName([self class]), buf]; } - (NSUInteger)length { diff --git a/src/data_internal.h b/src/data_internal.h index bbef21e41..19fc3d9ad 100644 --- a/src/data_internal.h +++ b/src/data_internal.h @@ -100,12 +100,13 @@ struct dispatch_data_format_type_s { dispatch_transform_t encode; }; -void dispatch_data_init(dispatch_data_t data, const void *buffer, size_t size, - dispatch_block_t destructor); -void _dispatch_data_dispose(dispatch_data_t data); +void _dispatch_data_init_with_bytes(dispatch_data_t data, const void *buffer, + size_t size, dispatch_block_t destructor); +void _dispatch_data_dispose(dispatch_data_t data, bool *allow_free); +void _dispatch_data_set_target_queue(struct dispatch_data_s *dd, + dispatch_queue_t tq); size_t _dispatch_data_debug(dispatch_data_t data, char* buf, size_t bufsiz); -const void* -_dispatch_data_get_flattened_bytes(struct dispatch_data_s *dd); +const void* _dispatch_data_get_flattened_bytes(struct dispatch_data_s *dd); #if !defined(__cplusplus) extern const dispatch_block_t _dispatch_data_destructor_inline; diff --git a/src/event/event.c b/src/event/event.c new file mode 100644 index 000000000..34abbf041 --- /dev/null +++ b/src/event/event.c @@ -0,0 +1,327 @@ +/* + * Copyright (c) 2008-2016 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include "internal.h" + +DISPATCH_NOINLINE +static dispatch_unote_t +_dispatch_unote_create(dispatch_source_type_t dst, + uintptr_t handle, unsigned long mask) +{ + dispatch_unote_linkage_t dul; + dispatch_unote_class_t du; + + if (mask & ~dst->dst_mask) { + return DISPATCH_UNOTE_NULL; + } + + if (dst->dst_filter != DISPATCH_EVFILT_TIMER) { + if (dst->dst_mask && !mask) { + return DISPATCH_UNOTE_NULL; + } + } + + if ((dst->dst_flags & EV_UDATA_SPECIFIC) || + (dst->dst_filter == DISPATCH_EVFILT_TIMER)) { + du = _dispatch_calloc(1u, dst->dst_size); + } else { + dul = _dispatch_calloc(1u, sizeof(*dul) + dst->dst_size); + du = _dispatch_unote_linkage_get_unote(dul)._du; + } + du->du_type = dst; + du->du_can_be_wlh = dst->dst_per_trigger_qos; + du->du_ident = (uint32_t)handle; + du->du_filter = dst->dst_filter; + du->du_fflags = (typeof(du->du_fflags))mask; + if (dst->dst_flags & EV_UDATA_SPECIFIC) { + du->du_is_direct = true; + } + du->du_data_action = DISPATCH_UNOTE_ACTION_DATA_OR; + return (dispatch_unote_t){ ._du = du }; +} + +DISPATCH_NOINLINE +dispatch_unote_t +_dispatch_unote_create_with_handle(dispatch_source_type_t dst, + uintptr_t handle, unsigned long mask) +{ + if (!handle) { + return DISPATCH_UNOTE_NULL; + } + return _dispatch_unote_create(dst, handle, mask); +} + +DISPATCH_NOINLINE +dispatch_unote_t +_dispatch_unote_create_with_fd(dispatch_source_type_t dst, + uintptr_t handle, unsigned long mask) +{ +#if !TARGET_OS_MAC // + if (handle > INT_MAX) { + return DISPATCH_UNOTE_NULL; + } +#endif + dispatch_unote_t du = _dispatch_unote_create(dst, handle, mask); + if (du._du) { + int16_t filter = dst->dst_filter; + du._du->du_data_action = (filter == EVFILT_READ||filter == EVFILT_WRITE) + ? DISPATCH_UNOTE_ACTION_DATA_SET : DISPATCH_UNOTE_ACTION_DATA_OR; + } + return du; +} + +DISPATCH_NOINLINE +dispatch_unote_t +_dispatch_unote_create_without_handle(dispatch_source_type_t dst, + uintptr_t handle, unsigned long mask) +{ + if (handle) { + return DISPATCH_UNOTE_NULL; + } + return _dispatch_unote_create(dst, handle, mask); +} + +DISPATCH_NOINLINE +void +_dispatch_unote_dispose(dispatch_unote_t du) +{ + void *ptr = du._du; +#if HAVE_MACH + if (du._du->dmrr_handler_is_block) { + Block_release(du._dmrr->dmrr_handler_ctxt); + } +#endif + if (du._du->du_is_timer) { + if (unlikely(du._dt->dt_heap_entry[DTH_TARGET_ID] != DTH_INVALID_ID || + du._dt->dt_heap_entry[DTH_DEADLINE_ID] != DTH_INVALID_ID)) { + DISPATCH_INTERNAL_CRASH(0, "Disposing of timer still in its heap"); + } + if (unlikely(du._dt->dt_pending_config)) { + free(du._dt->dt_pending_config); + du._dt->dt_pending_config = NULL; + } + } else if (!du._du->du_is_direct) { + ptr = _dispatch_unote_get_linkage(du); + } + free(ptr); +} + +#pragma mark data or / add + +static dispatch_unote_t +_dispatch_source_data_create(dispatch_source_type_t dst, uintptr_t handle, + unsigned long mask) +{ + if (handle || mask) { + return DISPATCH_UNOTE_NULL; + } + + // bypass _dispatch_unote_create() because this is always "direct" + // even when EV_UDATA_SPECIFIC is 0 + dispatch_unote_class_t du = _dispatch_calloc(1u, dst->dst_size); + du->du_type = dst; + du->du_filter = dst->dst_filter; + du->du_is_direct = true; + return (dispatch_unote_t){ ._du = du }; +} + +const dispatch_source_type_s _dispatch_source_type_data_add = { + .dst_kind = "data-add", + .dst_filter = DISPATCH_EVFILT_CUSTOM_ADD, + .dst_flags = EV_UDATA_SPECIFIC|EV_CLEAR, + .dst_size = sizeof(struct dispatch_source_refs_s), + + .dst_create = _dispatch_source_data_create, + .dst_merge_evt = NULL, +}; + +const dispatch_source_type_s _dispatch_source_type_data_or = { + .dst_kind = "data-or", + .dst_filter = DISPATCH_EVFILT_CUSTOM_OR, + .dst_flags = EV_UDATA_SPECIFIC|EV_CLEAR, + .dst_size = sizeof(struct dispatch_source_refs_s), + + .dst_create = _dispatch_source_data_create, + .dst_merge_evt = NULL, +}; + +const dispatch_source_type_s _dispatch_source_type_data_replace = { + .dst_kind = "data-replace", + .dst_filter = DISPATCH_EVFILT_CUSTOM_REPLACE, + .dst_flags = EV_UDATA_SPECIFIC|EV_CLEAR, + .dst_size = sizeof(struct dispatch_source_refs_s), + + .dst_create = _dispatch_source_data_create, + .dst_merge_evt = NULL, +}; + +#pragma mark file descriptors + +const dispatch_source_type_s _dispatch_source_type_read = { + .dst_kind = "read", + .dst_filter = EVFILT_READ, + .dst_flags = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_VANISHED, +#if DISPATCH_EVENT_BACKEND_KEVENT +#if HAVE_DECL_NOTE_LOWAT + .dst_fflags = NOTE_LOWAT, +#endif + .dst_data = 1, +#endif // DISPATCH_EVENT_BACKEND_KEVENT + .dst_size = sizeof(struct dispatch_source_refs_s), + + .dst_create = _dispatch_unote_create_with_fd, + .dst_merge_evt = _dispatch_source_merge_evt, +}; + +const dispatch_source_type_s _dispatch_source_type_write = { + .dst_kind = "write", + .dst_filter = EVFILT_WRITE, + .dst_flags = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_VANISHED, +#if DISPATCH_EVENT_BACKEND_KEVENT +#if HAVE_DECL_NOTE_LOWAT + .dst_fflags = NOTE_LOWAT, +#endif + .dst_data = 1, +#endif // DISPATCH_EVENT_BACKEND_KEVENT + .dst_size = sizeof(struct dispatch_source_refs_s), + + .dst_create = _dispatch_unote_create_with_fd, + .dst_merge_evt = _dispatch_source_merge_evt, +}; + +#pragma mark signals + +static dispatch_unote_t +_dispatch_source_signal_create(dispatch_source_type_t dst, uintptr_t handle, + unsigned long mask) +{ + if (handle >= NSIG) { + return DISPATCH_UNOTE_NULL; + } + dispatch_unote_t du = _dispatch_unote_create_with_handle(dst, handle, mask); + if (du._du) { + du._du->du_data_action = DISPATCH_UNOTE_ACTION_DATA_ADD; + } + return du; +} + +const dispatch_source_type_s _dispatch_source_type_signal = { + .dst_kind = "signal", + .dst_filter = EVFILT_SIGNAL, + .dst_flags = DISPATCH_EV_DIRECT|EV_CLEAR, + .dst_size = sizeof(struct dispatch_source_refs_s), + + .dst_create = _dispatch_source_signal_create, + .dst_merge_evt = _dispatch_source_merge_evt, +}; + +#pragma mark timers + +bool _dispatch_timers_reconfigure, _dispatch_timers_expired; +uint32_t _dispatch_timers_processing_mask; +#if DISPATCH_USE_DTRACE +uint32_t _dispatch_timers_will_wake; +#endif +#define DISPATCH_TIMER_HEAP_INITIALIZER(tidx) \ + [tidx] = { \ + .dth_target = UINT64_MAX, \ + .dth_deadline = UINT64_MAX, \ + } +#define DISPATCH_TIMER_HEAP_INIT(kind, qos) \ + DISPATCH_TIMER_HEAP_INITIALIZER(DISPATCH_TIMER_INDEX( \ + DISPATCH_CLOCK_##kind, DISPATCH_TIMER_QOS_##qos)) + +struct dispatch_timer_heap_s _dispatch_timers_heap[] = { + DISPATCH_TIMER_HEAP_INIT(WALL, NORMAL), + DISPATCH_TIMER_HEAP_INIT(MACH, NORMAL), +#if DISPATCH_HAVE_TIMER_QOS + DISPATCH_TIMER_HEAP_INIT(WALL, CRITICAL), + DISPATCH_TIMER_HEAP_INIT(MACH, CRITICAL), + DISPATCH_TIMER_HEAP_INIT(WALL, BACKGROUND), + DISPATCH_TIMER_HEAP_INIT(MACH, BACKGROUND), +#endif +}; + +static dispatch_unote_t +_dispatch_source_timer_create(dispatch_source_type_t dst, + uintptr_t handle, unsigned long mask) +{ + uint32_t fflags = dst->dst_fflags; + dispatch_unote_t du; + + // normalize flags + if (mask & DISPATCH_TIMER_STRICT) { + mask &= ~(unsigned long)DISPATCH_TIMER_BACKGROUND; + } + + if (fflags & DISPATCH_TIMER_INTERVAL) { + if (!handle) return DISPATCH_UNOTE_NULL; + du = _dispatch_unote_create_without_handle(dst, 0, mask); + } else { + du = _dispatch_unote_create_without_handle(dst, handle, mask); + } + + if (du._dt) { + du._dt->du_is_timer = true; + du._dt->du_data_action = DISPATCH_UNOTE_ACTION_DATA_ADD; + du._dt->du_fflags |= fflags; + du._dt->du_ident = _dispatch_source_timer_idx(du); + du._dt->dt_timer.target = UINT64_MAX; + du._dt->dt_timer.deadline = UINT64_MAX; + du._dt->dt_timer.interval = UINT64_MAX; + du._dt->dt_heap_entry[DTH_TARGET_ID] = DTH_INVALID_ID; + du._dt->dt_heap_entry[DTH_DEADLINE_ID] = DTH_INVALID_ID; + } + return du; +} + +const dispatch_source_type_s _dispatch_source_type_timer = { + .dst_kind = "timer", + .dst_filter = DISPATCH_EVFILT_TIMER, + .dst_flags = EV_DISPATCH, + .dst_mask = DISPATCH_TIMER_STRICT|DISPATCH_TIMER_BACKGROUND, + .dst_fflags = 0, + .dst_size = sizeof(struct dispatch_timer_source_refs_s), + + .dst_create = _dispatch_source_timer_create, +}; + +const dispatch_source_type_s _dispatch_source_type_after = { + .dst_kind = "timer (after)", + .dst_filter = DISPATCH_EVFILT_TIMER, + .dst_flags = EV_DISPATCH, + .dst_mask = 0, + .dst_fflags = DISPATCH_TIMER_AFTER, + .dst_size = sizeof(struct dispatch_timer_source_refs_s), + + .dst_create = _dispatch_source_timer_create, +}; + +const dispatch_source_type_s _dispatch_source_type_interval = { + .dst_kind = "timer (interval)", + .dst_filter = DISPATCH_EVFILT_TIMER, + .dst_flags = EV_DISPATCH, + .dst_mask = DISPATCH_TIMER_STRICT|DISPATCH_TIMER_BACKGROUND + |DISPATCH_INTERVAL_UI_ANIMATION, + .dst_fflags = DISPATCH_TIMER_INTERVAL|DISPATCH_TIMER_CLOCK_MACH, + .dst_size = sizeof(struct dispatch_timer_source_refs_s), + + .dst_create = _dispatch_source_timer_create, +}; diff --git a/src/event/event_config.h b/src/event/event_config.h new file mode 100644 index 000000000..60f776f95 --- /dev/null +++ b/src/event/event_config.h @@ -0,0 +1,219 @@ +/* + * Copyright (c) 2016 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __DISPATCH_EVENT_EVENT_CONFIG__ +#define __DISPATCH_EVENT_EVENT_CONFIG__ + +#if defined(__linux__) +# include +# define DISPATCH_EVENT_BACKEND_EPOLL 1 +# define DISPATCH_EVENT_BACKEND_KEVENT 0 +#elif __has_include() +# include +# define DISPATCH_EVENT_BACKEND_EPOLL 0 +# define DISPATCH_EVENT_BACKEND_KEVENT 1 +#else +# error unsupported event loop +#endif + +#if DISPATCH_DEBUG +#define DISPATCH_MGR_QUEUE_DEBUG 1 +#define DISPATCH_WLH_DEBUG 1 +#endif + +#ifndef DISPATCH_MGR_QUEUE_DEBUG +#define DISPATCH_MGR_QUEUE_DEBUG 0 +#endif + +#ifndef DISPATCH_WLH_DEBUG +#define DISPATCH_WLH_DEBUG 0 +#endif + +#ifndef DISPATCH_MACHPORT_DEBUG +#define DISPATCH_MACHPORT_DEBUG 0 +#endif + +#ifndef DISPATCH_TIMER_ASSERTIONS +#if DISPATCH_DEBUG +#define DISPATCH_TIMER_ASSERTIONS 1 +#else +#define DISPATCH_TIMER_ASSERTIONS 0 +#endif +#endif + +#if DISPATCH_TIMER_ASSERTIONS +#define DISPATCH_TIMER_ASSERT(a, op, b, text) ({ \ + typeof(a) _a = (a); \ + if (unlikely(!(_a op (b)))) { \ + DISPATCH_CLIENT_CRASH(_a, "Timer: " text); \ + } \ + }) +#else +#define DISPATCH_TIMER_ASSERT(a, op, b, text) ((void)0) +#endif + +#ifndef EV_VANISHED +#define EV_VANISHED 0x0200 +#endif + +#if DISPATCH_EVENT_BACKEND_KEVENT +# if defined(EV_SET_QOS) +# define DISPATCH_USE_KEVENT_QOS 1 +# ifndef KEVENT_FLAG_IMMEDIATE +# define KEVENT_FLAG_IMMEDIATE 0x001 +# endif +# ifndef KEVENT_FLAG_ERROR_EVENTS +# define KEVENT_FLAG_ERROR_EVENTS 0x002 +# endif +# else +# define DISPATCH_USE_KEVENT_QOS 0 +# endif + +# ifdef NOTE_LEEWAY +# define DISPATCH_HAVE_TIMER_COALESCING 1 +# else +# define NOTE_LEEWAY 0 +# define DISPATCH_HAVE_TIMER_COALESCING 0 +# endif // !NOTE_LEEWAY +# if defined(NOTE_CRITICAL) && defined(NOTE_BACKGROUND) +# define DISPATCH_HAVE_TIMER_QOS 1 +# else +# undef NOTE_CRITICAL +# define NOTE_CRITICAL 0 +# undef NOTE_BACKGROUND +# define NOTE_BACKGROUND 0 +# define DISPATCH_HAVE_TIMER_QOS 0 +# endif // !defined(NOTE_CRITICAL) || !defined(NOTE_BACKGROUND) + +# ifndef NOTE_FUNLOCK +# define NOTE_FUNLOCK 0x00000100 +# endif + +# if HAVE_DECL_NOTE_REAP +# if defined(NOTE_REAP) && defined(__APPLE__) +# undef NOTE_REAP +# define NOTE_REAP 0x10000000 // +# endif +# endif // HAVE_DECL_NOTE_REAP + +# ifndef VQ_QUOTA +# undef HAVE_DECL_VQ_QUOTA // rdar://problem/24160982 +# endif // VQ_QUOTA + +# ifndef VQ_NEARLOWDISK +# undef HAVE_DECL_VQ_NEARLOWDISK +# endif // VQ_NEARLOWDISK + +# ifndef VQ_DESIRED_DISK +# undef HAVE_DECL_VQ_DESIRED_DISK +# endif // VQ_DESIRED_DISK + +# if !defined(EVFILT_NW_CHANNEL) && defined(__APPLE__) +# define EVFILT_NW_CHANNEL (-16) +# define NOTE_FLOW_ADV_UPDATE 0x1 +# endif +#else // DISPATCH_EVENT_BACKEND_KEVENT +# define EV_ADD 0x0001 +# define EV_DELETE 0x0002 +# define EV_ENABLE 0x0004 + +# define EV_ONESHOT 0x0010 +# define EV_CLEAR 0x0020 +# define EV_DISPATCH 0x0080 + +# define EVFILT_READ (-1) +# define EVFILT_WRITE (-2) +# define EVFILT_SIGNAL (-3) +# define EVFILT_TIMER (-4) +# define EVFILT_SYSCOUNT 4 + +# define DISPATCH_HAVE_TIMER_QOS 0 +# define DISPATCH_HAVE_TIMER_COALESCING 0 +# define KEVENT_FLAG_IMMEDIATE 0x001 +#endif // !DISPATCH_EVENT_BACKEND_KEVENT + +#ifdef EV_UDATA_SPECIFIC +# define DISPATCH_EV_DIRECT (EV_UDATA_SPECIFIC|EV_DISPATCH) +#else +# define DISPATCH_EV_DIRECT 0x0000 +# define EV_UDATA_SPECIFIC 0x0000 +# undef EV_VANISHED +# define EV_VANISHED 0x0000 +#endif + +#define DISPATCH_EV_MSG_NEEDS_FREE 0x10000 // mach message needs to be freed() + +#define DISPATCH_EVFILT_TIMER (-EVFILT_SYSCOUNT - 1) +#define DISPATCH_EVFILT_CUSTOM_ADD (-EVFILT_SYSCOUNT - 2) +#define DISPATCH_EVFILT_CUSTOM_OR (-EVFILT_SYSCOUNT - 3) +#define DISPATCH_EVFILT_CUSTOM_REPLACE (-EVFILT_SYSCOUNT - 4) +#define DISPATCH_EVFILT_MACH_NOTIFICATION (-EVFILT_SYSCOUNT - 5) +#define DISPATCH_EVFILT_SYSCOUNT ( EVFILT_SYSCOUNT + 5) + +#if HAVE_MACH +# if !EV_UDATA_SPECIFIC +# error mach support requires EV_UDATA_SPECIFIC +# endif + +# ifndef MACH_RCV_VOUCHER +# define MACH_RCV_VOUCHER 0x00000800 +# endif + +# ifndef MACH_NOTIFY_SEND_POSSIBLE +# undef MACH_NOTIFY_SEND_POSSIBLE +# define MACH_NOTIFY_SEND_POSSIBLE MACH_NOTIFY_DEAD_NAME +# endif + +# ifndef NOTE_MACH_CONTINUOUS_TIME +# define NOTE_MACH_CONTINUOUS_TIME 0 +# endif // NOTE_MACH_CONTINUOUS_TIME + +# ifndef HOST_NOTIFY_CALENDAR_SET +# define HOST_NOTIFY_CALENDAR_SET HOST_NOTIFY_CALENDAR_CHANGE +# endif // HOST_NOTIFY_CALENDAR_SET + +# ifndef HOST_CALENDAR_SET_REPLYID +# define HOST_CALENDAR_SET_REPLYID 951 +# endif // HOST_CALENDAR_SET_REPLYID + +# ifndef MACH_SEND_OVERRIDE +# define MACH_SEND_OVERRIDE 0x00000020 +typedef unsigned int mach_msg_priority_t; +# define MACH_MSG_PRIORITY_UNSPECIFIED ((mach_msg_priority_t)0) +# endif // MACH_SEND_OVERRIDE + +# ifndef MACH_SEND_SYNC_OVERRIDE +# define MACH_SEND_SYNC_OVERRIDE 0x00100000 +# endif // MACH_SEND_SYNC_OVERRIDE + +# ifndef MACH_RCV_SYNC_WAIT +# define MACH_RCV_SYNC_WAIT 0x00004000 +# endif // MACH_RCV_SYNC_WAIT + +# define DISPATCH_MACH_TRAILER_SIZE sizeof(dispatch_mach_trailer_t) +# define DISPATCH_MACH_RCV_TRAILER MACH_RCV_TRAILER_CTX +# define DISPATCH_MACH_RCV_OPTIONS ( \ + MACH_RCV_MSG | MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY | \ + MACH_RCV_TRAILER_ELEMENTS(DISPATCH_MACH_RCV_TRAILER) | \ + MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0) | \ + MACH_RCV_VOUCHER) +#endif // HAVE_MACH + +#endif // __DISPATCH_EVENT_EVENT_CONFIG__ diff --git a/src/event/event_epoll.c b/src/event/event_epoll.c new file mode 100644 index 000000000..add4dde65 --- /dev/null +++ b/src/event/event_epoll.c @@ -0,0 +1,650 @@ +/* + * Copyright (c) 2016 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + + +#include "internal.h" +#if DISPATCH_EVENT_BACKEND_EPOLL +#include +#include +#include +#include +#include + +#ifndef EPOLLFREE +#define EPOLLFREE 0x4000 +#endif + +#if !DISPATCH_USE_MGR_THREAD +#error unsupported configuration +#endif + +#define DISPATCH_EPOLL_MAX_EVENT_COUNT 16 + +enum { + DISPATCH_EPOLL_EVENTFD = 0x0001, + DISPATCH_EPOLL_CLOCK_WALL = 0x0002, + DISPATCH_EPOLL_CLOCK_MACH = 0x0003, +}; + +typedef struct dispatch_muxnote_s { + TAILQ_ENTRY(dispatch_muxnote_s) dmn_list; + TAILQ_HEAD(, dispatch_unote_linkage_s) dmn_readers_head; + TAILQ_HEAD(, dispatch_unote_linkage_s) dmn_writers_head; + int dmn_fd; + uint32_t dmn_ident; + uint32_t dmn_events; + int16_t dmn_filter; + bool dmn_skip_outq_ioctl; + bool dmn_skip_inq_ioctl; +} *dispatch_muxnote_t; + +typedef struct dispatch_epoll_timeout_s { + int det_fd; + uint16_t det_ident; + bool det_registered; + bool det_armed; +} *dispatch_epoll_timeout_t; + +static int _dispatch_epfd, _dispatch_eventfd; + +static dispatch_once_t epoll_init_pred; +static void _dispatch_epoll_init(void *); + +DISPATCH_CACHELINE_ALIGN +static TAILQ_HEAD(dispatch_muxnote_bucket_s, dispatch_muxnote_s) +_dispatch_sources[DSL_HASH_SIZE]; + +#define DISPATCH_EPOLL_TIMEOUT_INITIALIZER(clock) \ + [DISPATCH_CLOCK_##clock] = { \ + .det_fd = -1, \ + .det_ident = DISPATCH_EPOLL_CLOCK_##clock, \ + } +static struct dispatch_epoll_timeout_s _dispatch_epoll_timeout[] = { + DISPATCH_EPOLL_TIMEOUT_INITIALIZER(WALL), + DISPATCH_EPOLL_TIMEOUT_INITIALIZER(MACH), +}; + +#pragma mark dispatch_muxnote_t + +DISPATCH_ALWAYS_INLINE +static inline struct dispatch_muxnote_bucket_s * +_dispatch_muxnote_bucket(uint32_t ident) +{ + return &_dispatch_sources[DSL_HASH(ident)]; +} +#define _dispatch_unote_muxnote_bucket(du) \ + _dispatch_muxnote_bucket(du._du->du_ident) + +DISPATCH_ALWAYS_INLINE +static inline dispatch_muxnote_t +_dispatch_muxnote_find(struct dispatch_muxnote_bucket_s *dmb, + uint32_t ident, int16_t filter) +{ + dispatch_muxnote_t dmn; + if (filter == EVFILT_WRITE) filter = EVFILT_READ; + TAILQ_FOREACH(dmn, dmb, dmn_list) { + if (dmn->dmn_ident == ident && dmn->dmn_filter == filter) { + break; + } + } + return dmn; +} +#define _dispatch_unote_muxnote_find(dmb, du) \ + _dispatch_muxnote_find(dmb, du._du->du_ident, du._du->du_filter) + +static void +_dispatch_muxnote_dispose(dispatch_muxnote_t dmn) +{ + if (dmn->dmn_filter != EVFILT_READ || (uint32_t)dmn->dmn_fd != dmn->dmn_ident) { + close(dmn->dmn_fd); + } + free(dmn); +} + +static pthread_t manager_thread; + +static void +_dispatch_muxnote_signal_block_and_raise(int signo) +{ + // On linux, for signals to be delivered to the signalfd, signals + // must be blocked, else any thread that hasn't them blocked may + // receive them. Fix that by lazily noticing, blocking said signal, + // and raising the signal again when it happens + _dispatch_sigmask(); + pthread_kill(manager_thread, signo); +} + +static dispatch_muxnote_t +_dispatch_muxnote_create(dispatch_unote_t du, uint32_t events) +{ + static sigset_t signals_with_unotes; + static struct sigaction sa = { + .sa_handler = _dispatch_muxnote_signal_block_and_raise, + .sa_flags = SA_RESTART, + }; + + dispatch_muxnote_t dmn; + struct stat sb; + int fd = (int)du._du->du_ident; + int16_t filter = du._du->du_filter; + bool skip_outq_ioctl = false, skip_inq_ioctl = false; + sigset_t sigmask; + + switch (filter) { + case EVFILT_SIGNAL: { + int signo = (int)du._du->du_ident; + if (!sigismember(&signals_with_unotes, signo)) { + manager_thread = pthread_self(); + sigaddset(&signals_with_unotes, signo); + sigaction(signo, &sa, NULL); + } + sigemptyset(&sigmask); + sigaddset(&sigmask, signo); + fd = signalfd(-1, &sigmask, SFD_NONBLOCK | SFD_CLOEXEC); + if (fd < 0) { + return NULL; + } + break; + } + case EVFILT_WRITE: + filter = EVFILT_READ; + case EVFILT_READ: + if (fstat(fd, &sb) < 0) { + return NULL; + } + if (S_ISREG(sb.st_mode)) { + // make a dummy fd that is both readable & writeable + fd = eventfd(1, EFD_CLOEXEC | EFD_NONBLOCK); + if (fd < 0) { + return NULL; + } + // Linux doesn't support output queue size ioctls for regular files + skip_outq_ioctl = true; + } else if (S_ISSOCK(sb.st_mode)) { + socklen_t vlen = sizeof(int); + int v; + // Linux doesn't support saying how many clients are ready to be + // accept()ed for sockets + if (getsockopt(fd, SOL_SOCKET, SO_ACCEPTCONN, &v, &vlen) == 0) { + skip_inq_ioctl = (bool)v; + } + } + break; + + default: + DISPATCH_INTERNAL_CRASH(0, "Unexpected filter"); + } + + dmn = _dispatch_calloc(1, sizeof(struct dispatch_muxnote_s)); + TAILQ_INIT(&dmn->dmn_readers_head); + TAILQ_INIT(&dmn->dmn_writers_head); + dmn->dmn_fd = fd; + dmn->dmn_ident = du._du->du_ident; + dmn->dmn_filter = filter; + dmn->dmn_events = events; + dmn->dmn_skip_outq_ioctl = skip_outq_ioctl; + dmn->dmn_skip_inq_ioctl = skip_inq_ioctl; + return dmn; +} + +#pragma mark dispatch_unote_t + +static int +_dispatch_epoll_update(dispatch_muxnote_t dmn, int op) +{ + dispatch_once_f(&epoll_init_pred, NULL, _dispatch_epoll_init); + struct epoll_event ev = { + .events = dmn->dmn_events, + .data = { .ptr = dmn }, + }; + return epoll_ctl(_dispatch_epfd, op, dmn->dmn_fd, &ev); +} + +bool +_dispatch_unote_register(dispatch_unote_t du, + DISPATCH_UNUSED dispatch_wlh_t wlh, dispatch_priority_t pri) +{ + struct dispatch_muxnote_bucket_s *dmb; + dispatch_muxnote_t dmn; + uint32_t events = EPOLLFREE; + + dispatch_assert(!_dispatch_unote_registered(du)); + du._du->du_priority = pri; + + switch (du._du->du_filter) { + case DISPATCH_EVFILT_CUSTOM_ADD: + case DISPATCH_EVFILT_CUSTOM_OR: + case DISPATCH_EVFILT_CUSTOM_REPLACE: + du._du->du_wlh = DISPATCH_WLH_ANON; + return true; + case EVFILT_WRITE: + events |= EPOLLOUT; + break; + default: + events |= EPOLLIN; + break; + } + + if (du._du->du_type->dst_flags & EV_DISPATCH) { + events |= EPOLLONESHOT; + } + + dmb = _dispatch_unote_muxnote_bucket(du); + dmn = _dispatch_unote_muxnote_find(dmb, du); + if (dmn) { + events &= ~dmn->dmn_events; + if (events) { + dmn->dmn_events |= events; + if (_dispatch_epoll_update(dmn, EPOLL_CTL_MOD) < 0) { + dmn->dmn_events &= ~events; + dmn = NULL; + } + } + } else { + dmn = _dispatch_muxnote_create(du, events); + if (_dispatch_epoll_update(dmn, EPOLL_CTL_ADD) < 0) { + _dispatch_muxnote_dispose(dmn); + dmn = NULL; + } else { + TAILQ_INSERT_TAIL(dmb, dmn, dmn_list); + } + } + + if (dmn) { + dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du); + if (events & EPOLLOUT) { + TAILQ_INSERT_TAIL(&dmn->dmn_writers_head, dul, du_link); + } else { + TAILQ_INSERT_TAIL(&dmn->dmn_readers_head, dul, du_link); + } + dul->du_muxnote = dmn; + dispatch_assert(du._du->du_wlh == NULL); + du._du->du_wlh = DISPATCH_WLH_ANON; + } + return dmn != NULL; +} + +void +_dispatch_unote_resume(dispatch_unote_t du) +{ + dispatch_muxnote_t dmn = _dispatch_unote_get_linkage(du)->du_muxnote; + dispatch_assert(_dispatch_unote_registered(du)); + + _dispatch_epoll_update(dmn, EPOLL_CTL_MOD); +} + +bool +_dispatch_unote_unregister(dispatch_unote_t du, DISPATCH_UNUSED uint32_t flags) +{ + switch (du._du->du_filter) { + case DISPATCH_EVFILT_CUSTOM_ADD: + case DISPATCH_EVFILT_CUSTOM_OR: + case DISPATCH_EVFILT_CUSTOM_REPLACE: + du._du->du_wlh = NULL; + return true; + } + if (_dispatch_unote_registered(du)) { + dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du); + dispatch_muxnote_t dmn = dul->du_muxnote; + uint32_t events = dmn->dmn_events; + + if (du._du->du_filter == EVFILT_WRITE) { + TAILQ_REMOVE(&dmn->dmn_writers_head, dul, du_link); + } else { + TAILQ_REMOVE(&dmn->dmn_readers_head, dul, du_link); + } + _TAILQ_TRASH_ENTRY(dul, du_link); + dul->du_muxnote = NULL; + + if (TAILQ_EMPTY(&dmn->dmn_readers_head)) { + events &= (uint32_t)(~EPOLLIN); + } + if (TAILQ_EMPTY(&dmn->dmn_writers_head)) { + events &= (uint32_t)(~EPOLLOUT); + } + + if (events == dmn->dmn_events) { + // nothing to do + } else if (events & (EPOLLIN | EPOLLOUT)) { + dmn->dmn_events = events; + _dispatch_epoll_update(dmn, EPOLL_CTL_MOD); + } else { + epoll_ctl(_dispatch_epfd, EPOLL_CTL_DEL, dmn->dmn_fd, NULL); + TAILQ_REMOVE(_dispatch_unote_muxnote_bucket(du), dmn, dmn_list); + _dispatch_muxnote_dispose(dmn); + } + dispatch_assert(du._du->du_wlh == DISPATCH_WLH_ANON); + du._du->du_wlh = NULL; + } + return true; +} + +#pragma mark timers + +static void +_dispatch_event_merge_timer(dispatch_clock_t clock) +{ + _dispatch_timers_expired = true; + _dispatch_timers_processing_mask |= 1 << DISPATCH_TIMER_INDEX(clock, 0); +#if DISPATCH_USE_DTRACE + _dispatch_timers_will_wake |= 1 << 0; +#endif + _dispatch_epoll_timeout[clock].det_armed = false; + _dispatch_timers_heap[clock].dth_flags &= ~DTH_ARMED; +} + +static void +_dispatch_timeout_program(uint32_t tidx, uint64_t target, + DISPATCH_UNUSED uint64_t leeway) +{ + dispatch_clock_t clock = DISPATCH_TIMER_CLOCK(tidx); + dispatch_epoll_timeout_t timer = &_dispatch_epoll_timeout[clock]; + struct epoll_event ev = { + .events = EPOLLONESHOT | EPOLLIN, + .data = { .u32 = timer->det_ident }, + }; + int op; + + if (target >= INT64_MAX && !timer->det_registered) { + return; + } + + if (unlikely(timer->det_fd < 0)) { + clockid_t clockid; + int fd; + switch (DISPATCH_TIMER_CLOCK(tidx)) { + case DISPATCH_CLOCK_MACH: + clockid = CLOCK_MONOTONIC; + break; + case DISPATCH_CLOCK_WALL: + clockid = CLOCK_REALTIME; + break; + } + fd = timerfd_create(clockid, TFD_NONBLOCK | TFD_CLOEXEC); + if (!dispatch_assume(fd >= 0)) { + return; + } + timer->det_fd = fd; + } + + if (target < INT64_MAX) { + struct itimerspec its = { .it_value = { + .tv_sec = target / NSEC_PER_SEC, + .tv_nsec = target % NSEC_PER_SEC, + } }; + dispatch_assume_zero(timerfd_settime(timer->det_fd, TFD_TIMER_ABSTIME, + &its, NULL)); + if (!timer->det_registered) { + op = EPOLL_CTL_ADD; + } else if (!timer->det_armed) { + op = EPOLL_CTL_MOD; + } else { + return; + } + } else { + op = EPOLL_CTL_DEL; + } + dispatch_assume_zero(epoll_ctl(_dispatch_epfd, op, timer->det_fd, &ev)); + timer->det_armed = timer->det_registered = (op != EPOLL_CTL_DEL);; +} + +void +_dispatch_event_loop_timer_arm(uint32_t tidx, dispatch_timer_delay_s range, + dispatch_clock_now_cache_t nows) +{ + uint64_t target = range.delay; + target += _dispatch_time_now_cached(DISPATCH_TIMER_CLOCK(tidx), nows); + _dispatch_timers_heap[tidx].dth_flags |= DTH_ARMED; + _dispatch_timeout_program(tidx, target, range.leeway); +} + +void +_dispatch_event_loop_timer_delete(uint32_t tidx) +{ + _dispatch_timers_heap[tidx].dth_flags &= ~DTH_ARMED; + _dispatch_timeout_program(tidx, UINT64_MAX, UINT64_MAX); +} + +#pragma mark dispatch_loop + +void +_dispatch_event_loop_atfork_child(void) +{ +} + +static void +_dispatch_epoll_init(void *context DISPATCH_UNUSED) +{ + _dispatch_fork_becomes_unsafe(); + + unsigned int i; + for (i = 0; i < DSL_HASH_SIZE; i++) { + TAILQ_INIT(&_dispatch_sources[i]); + } + + _dispatch_epfd = epoll_create1(EPOLL_CLOEXEC); + if (_dispatch_epfd < 0) { + DISPATCH_INTERNAL_CRASH(errno, "epoll_create1() failed"); + } + + _dispatch_eventfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); + if (_dispatch_eventfd < 0) { + DISPATCH_INTERNAL_CRASH(errno, "epoll_eventfd() failed"); + } + + struct epoll_event ev = { + .events = EPOLLIN | EPOLLFREE, + .data = { .u32 = DISPATCH_EPOLL_EVENTFD, }, + }; + int op = EPOLL_CTL_ADD; + if (epoll_ctl(_dispatch_epfd, op, _dispatch_eventfd, &ev) < 0) { + DISPATCH_INTERNAL_CRASH(errno, "epoll_ctl() failed"); + } + +#if DISPATCH_USE_MGR_THREAD + dx_push(_dispatch_mgr_q.do_targetq, &_dispatch_mgr_q, 0); +#endif +} + +void +_dispatch_event_loop_poke(dispatch_wlh_t wlh DISPATCH_UNUSED, + uint64_t dq_state DISPATCH_UNUSED, uint32_t flags DISPATCH_UNUSED) +{ + dispatch_once_f(&epoll_init_pred, NULL, _dispatch_epoll_init); + dispatch_assume_zero(eventfd_write(_dispatch_eventfd, 1)); +} + +static void +_dispatch_event_merge_signal(dispatch_muxnote_t dmn) +{ + dispatch_unote_linkage_t dul, dul_next; + struct signalfd_siginfo si; + ssize_t rc; + + // Linux has the weirdest semantics around signals: if it finds a thread + // that has not masked a process wide-signal, it may deliver it to this + // thread, meaning that the signalfd may have been made readable, but the + // signal consumed through the legacy delivery mechanism. + // + // Because of this we can get a misfire of the signalfd yielding EAGAIN the + // first time around. The _dispatch_muxnote_signal_block_and_raise() hack + // will kick in, the thread with the wrong mask will be fixed up, and the + // signal delivered to us again properly. + if ((rc = read(dmn->dmn_fd, &si, sizeof(si))) == sizeof(si)) { + TAILQ_FOREACH_SAFE(dul, &dmn->dmn_readers_head, du_link, dul_next) { + dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul); + dux_merge_evt(du._du, EV_ADD|EV_ENABLE|EV_CLEAR, 1, 0, 0); + } + } else { + dispatch_assume(rc == -1 && errno == EAGAIN); + } +} + +static uintptr_t +_dispatch_get_buffer_size(dispatch_muxnote_t dmn, bool writer) +{ + int n; + + if (writer ? dmn->dmn_skip_outq_ioctl : dmn->dmn_skip_inq_ioctl) { + return 1; + } + + if (ioctl((int)dmn->dmn_ident, writer ? SIOCOUTQ : SIOCINQ, &n) != 0) { + switch (errno) { + case EINVAL: + case ENOTTY: + // this file descriptor actually doesn't support the buffer + // size ioctl, remember that for next time to avoid the syscall. + break; + default: + dispatch_assume_zero(errno); + break; + } + if (writer) { + dmn->dmn_skip_outq_ioctl = true; + } else { + dmn->dmn_skip_inq_ioctl = true; + } + return 1; + } + return (uintptr_t)n; +} + +static void +_dispatch_event_merge_fd(dispatch_muxnote_t dmn, uint32_t events) +{ + dispatch_unote_linkage_t dul, dul_next; + uintptr_t data; + + if (events & EPOLLIN) { + data = _dispatch_get_buffer_size(dmn, false); + TAILQ_FOREACH_SAFE(dul, &dmn->dmn_readers_head, du_link, dul_next) { + dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul); + dux_merge_evt(du._du, EV_ADD|EV_ENABLE|EV_DISPATCH, ~data, 0, 0); + } + } + + if (events & EPOLLOUT) { + data = _dispatch_get_buffer_size(dmn, true); + TAILQ_FOREACH_SAFE(dul, &dmn->dmn_writers_head, du_link, dul_next) { + dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul); + dux_merge_evt(du._du, EV_ADD|EV_ENABLE|EV_DISPATCH, ~data, 0, 0); + } + } +} + +DISPATCH_NOINLINE +void +_dispatch_event_loop_drain(uint32_t flags) +{ + struct epoll_event ev[DISPATCH_EPOLL_MAX_EVENT_COUNT]; + int i, r; + int timeout = (flags & KEVENT_FLAG_IMMEDIATE) ? 0 : -1; + +retry: + r = epoll_wait(_dispatch_epfd, ev, countof(ev), timeout); + if (unlikely(r == -1)) { + int err = errno; + switch (err) { + case EINTR: + goto retry; + case EBADF: + DISPATCH_CLIENT_CRASH(err, "Do not close random Unix descriptors"); + break; + default: + (void)dispatch_assume_zero(err); + break; + } + return; + } + + for (i = 0; i < r; i++) { + dispatch_muxnote_t dmn; + eventfd_t value; + + if (ev[i].events & EPOLLFREE) { + DISPATCH_CLIENT_CRASH(0, "Do not close random Unix descriptors"); + } + + switch (ev[i].data.u32) { + case DISPATCH_EPOLL_EVENTFD: + dispatch_assume_zero(eventfd_read(_dispatch_eventfd, &value)); + break; + + case DISPATCH_EPOLL_CLOCK_WALL: + _dispatch_event_merge_timer(DISPATCH_CLOCK_WALL); + break; + + case DISPATCH_EPOLL_CLOCK_MACH: + _dispatch_event_merge_timer(DISPATCH_CLOCK_MACH); + break; + + default: + dmn = ev[i].data.ptr; + switch (dmn->dmn_filter) { + case EVFILT_SIGNAL: + _dispatch_event_merge_signal(dmn); + break; + + case EVFILT_READ: + _dispatch_event_merge_fd(dmn, ev[i].events); + break; + } + } + } +} + +void +_dispatch_event_loop_wake_owner(dispatch_sync_context_t dsc, + dispatch_wlh_t wlh, uint64_t old_state, uint64_t new_state) +{ + (void)dsc; (void)wlh; (void)old_state; (void)new_state; +} + +void +_dispatch_event_loop_wait_for_ownership(dispatch_sync_context_t dsc) +{ + if (dsc->dsc_release_storage) { + _dispatch_queue_release_storage(dsc->dc_data); + } +} + +void +_dispatch_event_loop_end_ownership(dispatch_wlh_t wlh, uint64_t old_state, + uint64_t new_state, uint32_t flags) +{ + (void)wlh; (void)old_state; (void)new_state; (void)flags; +} + +#if DISPATCH_WLH_DEBUG +void +_dispatch_event_loop_assert_not_owned(dispatch_wlh_t wlh) +{ + (void)wlh; +} +#endif + +void +_dispatch_event_loop_leave_immediate(dispatch_wlh_t wlh, uint64_t dq_state) +{ + (void)wlh; (void)dq_state; +} + +#endif // DISPATCH_EVENT_BACKEND_EPOLL diff --git a/src/event/event_internal.h b/src/event/event_internal.h new file mode 100644 index 000000000..842c4ee5b --- /dev/null +++ b/src/event/event_internal.h @@ -0,0 +1,449 @@ +/* + * Copyright (c) 2008-2016 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_EVENT_EVENT_INTERNAL__ +#define __DISPATCH_EVENT_EVENT_INTERNAL__ + +#include "event_config.h" + +struct dispatch_sync_context_s; +typedef struct dispatch_wlh_s *dispatch_wlh_t; // opaque handle +#define DISPATCH_WLH_ANON ((dispatch_wlh_t)(void*)(~0ul)) +#define DISPATCH_WLH_MANAGER ((dispatch_wlh_t)(void*)(~2ul)) + +#define DISPATCH_UNOTE_DATA_ACTION_SIZE 2 + +#define DISPATCH_UNOTE_CLASS_HEADER() \ + dispatch_source_type_t du_type; \ + uintptr_t du_owner_wref; /* "weak" back reference to the owner object */ \ + dispatch_wlh_t du_wlh; \ + uint32_t du_ident; \ + int8_t du_filter; \ + os_atomic(bool) dmsr_notification_armed; \ + uint16_t du_data_action : DISPATCH_UNOTE_DATA_ACTION_SIZE; \ + uint16_t du_is_direct : 1; \ + uint16_t du_is_timer : 1; \ + uint16_t du_memorypressure_override : 1; \ + uint16_t du_vmpressure_override : 1; \ + uint16_t du_can_be_wlh : 1; \ + uint16_t dmr_async_reply : 1; \ + uint16_t dmrr_handler_is_block : 1; \ + uint16_t du_unused : 7; \ + uint32_t du_fflags; \ + dispatch_priority_t du_priority + +#define _dispatch_ptr2wref(ptr) (~(uintptr_t)(ptr)) +#define _dispatch_wref2ptr(ref) ((void*)~(ref)) +#define _dispatch_source_from_refs(dr) \ + ((dispatch_source_t)_dispatch_wref2ptr((dr)->du_owner_wref)) + +DISPATCH_ENUM(dispatch_unote_action, uint8_t, + DISPATCH_UNOTE_ACTION_DATA_OR = 0, + DISPATCH_UNOTE_ACTION_DATA_OR_STATUS_SET, + DISPATCH_UNOTE_ACTION_DATA_SET, + DISPATCH_UNOTE_ACTION_DATA_ADD, + DISPATCH_UNOTE_ACTION_LAST = DISPATCH_UNOTE_ACTION_DATA_ADD +); +_Static_assert(DISPATCH_UNOTE_ACTION_LAST < + (1 << DISPATCH_UNOTE_DATA_ACTION_SIZE), + "DISPATCH_UNOTE_ACTION_LAST too large for du_data_action field"); + +typedef struct dispatch_unote_class_s { + DISPATCH_UNOTE_CLASS_HEADER(); +} *dispatch_unote_class_t; + + +enum { + DS_EVENT_HANDLER = 0, + DS_CANCEL_HANDLER, + DS_REGISTN_HANDLER, +}; + +#define DISPATCH_SOURCE_REFS_HEADER() \ + DISPATCH_UNOTE_CLASS_HEADER(); \ + struct dispatch_continuation_s *volatile ds_handler[3] + +// Source state which may contain references to the source object +// Separately allocated so that 'leaks' can see sources +typedef struct dispatch_source_refs_s { + DISPATCH_SOURCE_REFS_HEADER(); +} *dispatch_source_refs_t; + +typedef struct dispatch_timer_delay_s { + uint64_t delay, leeway; +} dispatch_timer_delay_s; + +#define DTH_INVALID_ID (~0u) +#define DTH_TARGET_ID 0u +#define DTH_DEADLINE_ID 1u +#define DTH_ID_COUNT 2u + +typedef struct dispatch_timer_source_s { + union { + struct { + uint64_t target; + uint64_t deadline; + }; + uint64_t heap_key[DTH_ID_COUNT]; + }; + uint64_t interval; +} *dispatch_timer_source_t; + +typedef struct dispatch_timer_config_s { + struct dispatch_timer_source_s dtc_timer; + dispatch_clock_t dtc_clock; +} *dispatch_timer_config_t; + +typedef struct dispatch_timer_source_refs_s { + DISPATCH_SOURCE_REFS_HEADER(); + struct dispatch_timer_source_s dt_timer; + struct dispatch_timer_config_s *dt_pending_config; + uint32_t dt_heap_entry[DTH_ID_COUNT]; +} *dispatch_timer_source_refs_t; + +typedef struct dispatch_timer_heap_s { + uint64_t dth_target, dth_deadline; + uint32_t dth_count; + uint16_t dth_segments; +#define DTH_ARMED 1u + uint16_t dth_flags; + dispatch_timer_source_refs_t dth_min[DTH_ID_COUNT]; + void **dth_heap; +} *dispatch_timer_heap_t; + +#if HAVE_MACH +#if DISPATCH_MACHPORT_DEBUG +void dispatch_debug_machport(mach_port_t name, const char *str); +#define _dispatch_debug_machport(name) \ + dispatch_debug_machport((name), __func__) +#else +#define _dispatch_debug_machport(name) ((void)(name)) +#endif // DISPATCH_MACHPORT_DEBUG + +// Mach channel state which may contain references to the channel object +// layout must match dispatch_source_refs_s +struct dispatch_mach_recv_refs_s { + DISPATCH_UNOTE_CLASS_HEADER(); + dispatch_mach_handler_function_t dmrr_handler_func; + void *dmrr_handler_ctxt; +}; +typedef struct dispatch_mach_recv_refs_s *dispatch_mach_recv_refs_t; + +struct dispatch_mach_reply_refs_s { + DISPATCH_UNOTE_CLASS_HEADER(); + dispatch_priority_t dmr_priority; + void *dmr_ctxt; + voucher_t dmr_voucher; + TAILQ_ENTRY(dispatch_mach_reply_refs_s) dmr_list; + mach_port_t dmr_waiter_tid; +}; +typedef struct dispatch_mach_reply_refs_s *dispatch_mach_reply_refs_t; + +#define _DISPATCH_MACH_STATE_UNUSED_MASK 0xffffffa000000000ull +#define DISPATCH_MACH_STATE_DIRTY 0x0000002000000000ull +#define DISPATCH_MACH_STATE_PENDING_BARRIER 0x0000001000000000ull +#define DISPATCH_MACH_STATE_RECEIVED_OVERRIDE 0x0000000800000000ull +#define DISPATCH_MACH_STATE_MAX_QOS_MASK 0x0000000700000000ull +#define DISPATCH_MACH_STATE_MAX_QOS_SHIFT 32 +#define DISPATCH_MACH_STATE_UNLOCK_MASK 0x00000000ffffffffull + +struct dispatch_mach_send_refs_s { + DISPATCH_UNOTE_CLASS_HEADER(); + dispatch_mach_msg_t dmsr_checkin; + TAILQ_HEAD(, dispatch_mach_reply_refs_s) dmsr_replies; + dispatch_unfair_lock_s dmsr_replies_lock; +#define DISPATCH_MACH_DISCONNECT_MAGIC_BASE (0x80000000) +#define DISPATCH_MACH_NEVER_INSTALLED (DISPATCH_MACH_DISCONNECT_MAGIC_BASE + 0) +#define DISPATCH_MACH_NEVER_CONNECTED (DISPATCH_MACH_DISCONNECT_MAGIC_BASE + 1) + uint32_t volatile dmsr_disconnect_cnt; + DISPATCH_UNION_LE(uint64_t volatile dmsr_state, + dispatch_unfair_lock_s dmsr_state_lock, + uint32_t dmsr_state_bits + ) DISPATCH_ATOMIC64_ALIGN; + struct dispatch_object_s *volatile dmsr_tail; + struct dispatch_object_s *volatile dmsr_head; + mach_port_t dmsr_send, dmsr_checkin_port; +}; +typedef struct dispatch_mach_send_refs_s *dispatch_mach_send_refs_t; + +void _dispatch_mach_notification_set_armed(dispatch_mach_send_refs_t dmsr); + +struct dispatch_xpc_term_refs_s { + DISPATCH_UNOTE_CLASS_HEADER(); +}; +typedef struct dispatch_xpc_term_refs_s *dispatch_xpc_term_refs_t; +#endif // HAVE_MACH + +typedef union dispatch_unote_u { + dispatch_unote_class_t _du; + dispatch_source_refs_t _dr; + dispatch_timer_source_refs_t _dt; +#if HAVE_MACH + dispatch_mach_recv_refs_t _dmrr; + dispatch_mach_send_refs_t _dmsr; + dispatch_mach_reply_refs_t _dmr; + dispatch_xpc_term_refs_t _dxtr; +#endif +} dispatch_unote_t DISPATCH_TRANSPARENT_UNION; + +#define DISPATCH_UNOTE_NULL ((dispatch_unote_t){ ._du = NULL }) + +#if TARGET_OS_EMBEDDED +#define DSL_HASH_SIZE 64u // must be a power of two +#else +#define DSL_HASH_SIZE 256u // must be a power of two +#endif +#define DSL_HASH(x) ((x) & (DSL_HASH_SIZE - 1)) + +typedef struct dispatch_unote_linkage_s { + TAILQ_ENTRY(dispatch_unote_linkage_s) du_link; + struct dispatch_muxnote_s *du_muxnote; +} DISPATCH_ATOMIC64_ALIGN *dispatch_unote_linkage_t; + +#define DU_UNREGISTER_IMMEDIATE_DELETE 0x01 +#define DU_UNREGISTER_ALREADY_DELETED 0x02 +#define DU_UNREGISTER_DISCONNECTED 0x04 +#define DU_UNREGISTER_REPLY_REMOVE 0x08 + +typedef struct dispatch_source_type_s { + const char *dst_kind; + int8_t dst_filter; + uint8_t dst_per_trigger_qos : 1; + uint16_t dst_flags; + uint32_t dst_fflags; + uint32_t dst_mask; + uint32_t dst_size; +#if DISPATCH_EVENT_BACKEND_KEVENT + uint32_t dst_data; +#endif + + dispatch_unote_t (*dst_create)(dispatch_source_type_t dst, + uintptr_t handle, unsigned long mask); +#if DISPATCH_EVENT_BACKEND_KEVENT + bool (*dst_update_mux)(struct dispatch_muxnote_s *dmn); +#endif + void (*dst_merge_evt)(dispatch_unote_t du, uint32_t flags, uintptr_t data, + uintptr_t status, pthread_priority_t pp); +#if HAVE_MACH + void (*dst_merge_msg)(dispatch_unote_t du, uint32_t flags, + mach_msg_header_t *msg, mach_msg_size_t sz); +#endif +} dispatch_source_type_s; + +#define dux_create(dst, handle, mask) (dst)->dst_create(dst, handle, mask) +#define dux_merge_evt(du, ...) (du)->du_type->dst_merge_evt(du, __VA_ARGS__) +#define dux_merge_msg(du, ...) (du)->du_type->dst_merge_msg(du, __VA_ARGS__) + +extern const dispatch_source_type_s _dispatch_source_type_after; + +#if HAVE_MACH +extern const dispatch_source_type_s _dispatch_source_type_mach_recv_direct; +extern const dispatch_source_type_s _dispatch_mach_type_send; +extern const dispatch_source_type_s _dispatch_mach_type_recv; +extern const dispatch_source_type_s _dispatch_mach_type_reply; +extern const dispatch_source_type_s _dispatch_xpc_type_sigterm; +#endif + +#pragma mark - +#pragma mark deferred items + +#if DISPATCH_EVENT_BACKEND_KEVENT +#if DISPATCH_USE_KEVENT_QOS +typedef struct kevent_qos_s dispatch_kevent_s; +#else +typedef struct kevent dispatch_kevent_s; +#endif +typedef dispatch_kevent_s *dispatch_kevent_t; +#endif // DISPATCH_EVENT_BACKEND_KEVENT + +#define DISPATCH_DEFERRED_ITEMS_EVENT_COUNT 16 + +typedef struct dispatch_deferred_items_s { + dispatch_queue_t ddi_stashed_rq; + dispatch_object_t ddi_stashed_dou; + dispatch_qos_t ddi_stashed_qos; +#if DISPATCH_EVENT_BACKEND_KEVENT + dispatch_kevent_t ddi_eventlist; + uint16_t ddi_nevents; + uint16_t ddi_maxevents; + bool ddi_can_stash; + uint16_t ddi_wlh_needs_delete : 1; + uint16_t ddi_wlh_needs_update : 1; + uint16_t ddi_wlh_servicing : 1; +#endif +} dispatch_deferred_items_s, *dispatch_deferred_items_t; + +#pragma mark - +#pragma mark inlines + +#if DISPATCH_PURE_C + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_deferred_items_set(dispatch_deferred_items_t ddi) +{ + _dispatch_thread_setspecific(dispatch_deferred_items_key, (void *)ddi); +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_deferred_items_t +_dispatch_deferred_items_get(void) +{ + return (dispatch_deferred_items_t) + _dispatch_thread_getspecific(dispatch_deferred_items_key); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_needs_to_return_to_kernel(void) +{ + return (uintptr_t)_dispatch_thread_getspecific(dispatch_r2k_key) != 0; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_set_return_to_kernel(void) +{ + _dispatch_thread_setspecific(dispatch_r2k_key, (void *)1); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_clear_return_to_kernel(void) +{ + _dispatch_thread_setspecific(dispatch_r2k_key, (void *)0); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_unote_registered(dispatch_unote_t du) +{ + return du._du->du_wlh != NULL; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_unote_wlh_changed(dispatch_unote_t du, dispatch_wlh_t expected_wlh) +{ + dispatch_wlh_t wlh = du._du->du_wlh; + return wlh && wlh != DISPATCH_WLH_ANON && wlh != expected_wlh; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_unote_linkage_t +_dispatch_unote_get_linkage(dispatch_unote_t du) +{ + dispatch_assert(!du._du->du_is_direct); + return (dispatch_unote_linkage_t)((char *)du._du + - sizeof(struct dispatch_unote_linkage_s)); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_unote_needs_rearm(dispatch_unote_t du) +{ + return du._du->du_type->dst_flags & (EV_ONESHOT | EV_DISPATCH); +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_unote_t +_dispatch_unote_linkage_get_unote(dispatch_unote_linkage_t dul) +{ + return (dispatch_unote_t){ ._du = (dispatch_unote_class_t)(dul + 1) }; +} + +#endif // DISPATCH_PURE_C + +#pragma mark - +#pragma mark prototypes + +#if DISPATCH_HAVE_TIMER_QOS +#define DISPATCH_TIMER_QOS_NORMAL 0u +#define DISPATCH_TIMER_QOS_CRITICAL 1u +#define DISPATCH_TIMER_QOS_BACKGROUND 2u +#define DISPATCH_TIMER_QOS_COUNT 3u +#else +#define DISPATCH_TIMER_QOS_NORMAL 0u +#define DISPATCH_TIMER_QOS_COUNT 1u +#endif + +#define DISPATCH_TIMER_QOS(tidx) (((uintptr_t)(tidx) >> 1) & 3u) +#define DISPATCH_TIMER_CLOCK(tidx) (dispatch_clock_t)((tidx) & 1u) + +#define DISPATCH_TIMER_INDEX(clock, qos) ((qos) << 1 | (clock)) +#define DISPATCH_TIMER_COUNT \ + DISPATCH_TIMER_INDEX(0, DISPATCH_TIMER_QOS_COUNT) +#define DISPATCH_TIMER_IDENT_CANCELED (~0u) + +extern struct dispatch_timer_heap_s _dispatch_timers_heap[DISPATCH_TIMER_COUNT]; +extern bool _dispatch_timers_reconfigure, _dispatch_timers_expired; +extern uint32_t _dispatch_timers_processing_mask; +#if DISPATCH_USE_DTRACE +extern uint32_t _dispatch_timers_will_wake; +#endif + +dispatch_unote_t _dispatch_unote_create_with_handle(dispatch_source_type_t dst, + uintptr_t handle, unsigned long mask); +dispatch_unote_t _dispatch_unote_create_with_fd(dispatch_source_type_t dst, + uintptr_t handle, unsigned long mask); +dispatch_unote_t _dispatch_unote_create_without_handle( + dispatch_source_type_t dst, uintptr_t handle, unsigned long mask); + +bool _dispatch_unote_register(dispatch_unote_t du, dispatch_wlh_t wlh, + dispatch_priority_t pri); +void _dispatch_unote_resume(dispatch_unote_t du); +bool _dispatch_unote_unregister(dispatch_unote_t du, uint32_t flags); +void _dispatch_unote_dispose(dispatch_unote_t du); + +void _dispatch_event_loop_atfork_child(void); +#define DISPATCH_EVENT_LOOP_CONSUME_2 DISPATCH_WAKEUP_CONSUME_2 +#define DISPATCH_EVENT_LOOP_OVERRIDE 0x80000000 +void _dispatch_event_loop_poke(dispatch_wlh_t wlh, uint64_t dq_state, + uint32_t flags); +void _dispatch_event_loop_wake_owner(struct dispatch_sync_context_s *dsc, + dispatch_wlh_t wlh, uint64_t old_state, uint64_t new_state); +void _dispatch_event_loop_wait_for_ownership( + struct dispatch_sync_context_s *dsc); +void _dispatch_event_loop_end_ownership(dispatch_wlh_t wlh, + uint64_t old_state, uint64_t new_state, uint32_t flags); +#if DISPATCH_WLH_DEBUG +void _dispatch_event_loop_assert_not_owned(dispatch_wlh_t wlh); +#else +#undef _dispatch_event_loop_assert_not_owned +#define _dispatch_event_loop_assert_not_owned(wlh) ((void)wlh) +#endif +void _dispatch_event_loop_leave_immediate(dispatch_wlh_t wlh, uint64_t dq_state); +#if DISPATCH_EVENT_BACKEND_KEVENT +void _dispatch_event_loop_leave_deferred(dispatch_wlh_t wlh, + uint64_t dq_state); +void _dispatch_event_loop_merge(dispatch_kevent_t events, int nevents); +#endif +void _dispatch_event_loop_drain(uint32_t flags); +void _dispatch_event_loop_timer_arm(unsigned int tidx, + dispatch_timer_delay_s range, dispatch_clock_now_cache_t nows); +void _dispatch_event_loop_timer_delete(unsigned int tidx); + +#endif /* __DISPATCH_EVENT_EVENT_INTERNAL__ */ diff --git a/src/event/event_kevent.c b/src/event/event_kevent.c new file mode 100644 index 000000000..8fe76d55c --- /dev/null +++ b/src/event/event_kevent.c @@ -0,0 +1,2208 @@ +/* + * Copyright (c) 2008-2016 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include "internal.h" +#if DISPATCH_EVENT_BACKEND_KEVENT +#if HAVE_MACH +#include "protocol.h" +#include "protocolServer.h" +#endif + +#if DISPATCH_USE_KEVENT_WORKQUEUE && !DISPATCH_USE_KEVENT_QOS +#error unsupported configuration +#endif + +#define DISPATCH_KEVENT_MUXED_MARKER 1ul +#define DISPATCH_MACH_AUDIT_TOKEN_PID (5) + +typedef struct dispatch_muxnote_s { + TAILQ_ENTRY(dispatch_muxnote_s) dmn_list; + TAILQ_HEAD(, dispatch_unote_linkage_s) dmn_unotes_head; + dispatch_wlh_t dmn_wlh; + dispatch_kevent_s dmn_kev; +} *dispatch_muxnote_t; + +static bool _dispatch_timers_force_max_leeway; +static int _dispatch_kq = -1; +static struct { + dispatch_once_t pred; + dispatch_unfair_lock_s lock; +} _dispatch_muxnotes; +#if !DISPATCH_USE_KEVENT_WORKQUEUE +#define _dispatch_muxnotes_lock() \ + _dispatch_unfair_lock_lock(&_dispatch_muxnotes.lock) +#define _dispatch_muxnotes_unlock() \ + _dispatch_unfair_lock_unlock(&_dispatch_muxnotes.lock) +#else +#define _dispatch_muxnotes_lock() +#define _dispatch_muxnotes_unlock() +#endif // !DISPATCH_USE_KEVENT_WORKQUEUE + +DISPATCH_CACHELINE_ALIGN +static TAILQ_HEAD(dispatch_muxnote_bucket_s, dispatch_muxnote_s) +_dispatch_sources[DSL_HASH_SIZE]; + +#define DISPATCH_NOTE_CLOCK_WALL NOTE_MACH_CONTINUOUS_TIME +#define DISPATCH_NOTE_CLOCK_MACH 0 + +static const uint32_t _dispatch_timer_index_to_fflags[] = { +#define DISPATCH_TIMER_FFLAGS_INIT(kind, qos, note) \ + [DISPATCH_TIMER_INDEX(DISPATCH_CLOCK_##kind, DISPATCH_TIMER_QOS_##qos)] = \ + DISPATCH_NOTE_CLOCK_##kind | NOTE_ABSOLUTE | \ + NOTE_NSECONDS | NOTE_LEEWAY | (note) + DISPATCH_TIMER_FFLAGS_INIT(WALL, NORMAL, 0), + DISPATCH_TIMER_FFLAGS_INIT(MACH, NORMAL, 0), +#if DISPATCH_HAVE_TIMER_QOS + DISPATCH_TIMER_FFLAGS_INIT(WALL, CRITICAL, NOTE_CRITICAL), + DISPATCH_TIMER_FFLAGS_INIT(MACH, CRITICAL, NOTE_CRITICAL), + DISPATCH_TIMER_FFLAGS_INIT(WALL, BACKGROUND, NOTE_BACKGROUND), + DISPATCH_TIMER_FFLAGS_INIT(MACH, BACKGROUND, NOTE_BACKGROUND), +#endif +#undef DISPATCH_TIMER_FFLAGS_INIT +}; + +static void _dispatch_kevent_timer_drain(dispatch_kevent_t ke); + +#pragma mark - +#pragma mark kevent debug + +DISPATCH_NOINLINE +static const char * +_evfiltstr(short filt) +{ + switch (filt) { +#define _evfilt2(f) case (f): return #f + _evfilt2(EVFILT_READ); + _evfilt2(EVFILT_WRITE); + _evfilt2(EVFILT_SIGNAL); + _evfilt2(EVFILT_TIMER); + +#ifdef DISPATCH_EVENT_BACKEND_KEVENT + _evfilt2(EVFILT_AIO); + _evfilt2(EVFILT_VNODE); + _evfilt2(EVFILT_PROC); +#if HAVE_MACH + _evfilt2(EVFILT_MACHPORT); + _evfilt2(DISPATCH_EVFILT_MACH_NOTIFICATION); +#endif + _evfilt2(EVFILT_FS); + _evfilt2(EVFILT_USER); +#ifdef EVFILT_SOCK + _evfilt2(EVFILT_SOCK); +#endif +#ifdef EVFILT_MEMORYSTATUS + _evfilt2(EVFILT_MEMORYSTATUS); +#endif +#endif // DISPATCH_EVENT_BACKEND_KEVENT + + _evfilt2(DISPATCH_EVFILT_TIMER); + _evfilt2(DISPATCH_EVFILT_CUSTOM_ADD); + _evfilt2(DISPATCH_EVFILT_CUSTOM_OR); + _evfilt2(DISPATCH_EVFILT_CUSTOM_REPLACE); + default: + return "EVFILT_missing"; + } +} + +#if DISPATCH_DEBUG +static const char * +_evflagstr2(uint16_t *flagsp) +{ +#define _evflag2(f) \ + if ((*flagsp & (f)) == (f) && (f)) { \ + *flagsp &= ~(f); \ + return #f "|"; \ + } + _evflag2(EV_ADD); + _evflag2(EV_DELETE); + _evflag2(EV_ENABLE); + _evflag2(EV_DISABLE); + _evflag2(EV_ONESHOT); + _evflag2(EV_CLEAR); + _evflag2(EV_RECEIPT); + _evflag2(EV_DISPATCH); + _evflag2(EV_UDATA_SPECIFIC); +#ifdef EV_POLL + _evflag2(EV_POLL); +#endif +#ifdef EV_OOBAND + _evflag2(EV_OOBAND); +#endif + _evflag2(EV_ERROR); + _evflag2(EV_EOF); + _evflag2(EV_VANISHED); + *flagsp = 0; + return "EV_UNKNOWN "; +} + +DISPATCH_NOINLINE +static const char * +_evflagstr(uint16_t flags, char *str, size_t strsize) +{ + str[0] = 0; + while (flags) { + strlcat(str, _evflagstr2(&flags), strsize); + } + size_t sz = strlen(str); + if (sz) str[sz-1] = 0; + return str; +} + +DISPATCH_NOINLINE +static void +dispatch_kevent_debug(const char *verb, const dispatch_kevent_s *kev, + int i, int n, const char *function, unsigned int line) +{ + char flagstr[256]; + char i_n[31]; + + if (n > 1) { + snprintf(i_n, sizeof(i_n), "%d/%d ", i + 1, n); + } else { + i_n[0] = '\0'; + } + if (verb == NULL) { + if (kev->flags & EV_DELETE) { + verb = "deleting"; + } else if (kev->flags & EV_ADD) { + verb = "adding"; + } else { + verb = "updating"; + } + } +#if DISPATCH_USE_KEVENT_QOS + _dispatch_debug("%s kevent[%p] %s= { ident = 0x%llx, filter = %s, " + "flags = %s (0x%x), fflags = 0x%x, data = 0x%llx, udata = 0x%llx, " + "qos = 0x%x, ext[0] = 0x%llx, ext[1] = 0x%llx, ext[2] = 0x%llx, " + "ext[3] = 0x%llx }: %s #%u", verb, kev, i_n, kev->ident, + _evfiltstr(kev->filter), _evflagstr(kev->flags, flagstr, + sizeof(flagstr)), kev->flags, kev->fflags, kev->data, kev->udata, + kev->qos, kev->ext[0], kev->ext[1], kev->ext[2], kev->ext[3], + function, line); +#else + _dispatch_debug("%s kevent[%p] %s= { ident = 0x%llx, filter = %s, " + "flags = %s (0x%x), fflags = 0x%x, data = 0x%llx, udata = 0x%llx}: " + "%s #%u", verb, kev, i_n, + kev->ident, _evfiltstr(kev->filter), _evflagstr(kev->flags, flagstr, + sizeof(flagstr)), kev->flags, kev->fflags, kev->data, kev->udata, + function, line); +#endif +} +#else +static inline void +dispatch_kevent_debug(const char *verb, const dispatch_kevent_s *kev, + int i, int n, const char *function, unsigned int line) +{ + (void)verb; (void)kev; (void)i; (void)n; (void)function; (void)line; +} +#endif // DISPATCH_DEBUG +#define _dispatch_kevent_debug_n(verb, _kev, i, n) \ + dispatch_kevent_debug(verb, _kev, i, n, __FUNCTION__, __LINE__) +#define _dispatch_kevent_debug(verb, _kev) \ + _dispatch_kevent_debug_n(verb, _kev, 0, 0) +#if DISPATCH_MGR_QUEUE_DEBUG +#define _dispatch_kevent_mgr_debug(verb, kev) _dispatch_kevent_debug(verb, kev) +#else +#define _dispatch_kevent_mgr_debug(verb, kev) ((void)verb, (void)kev) +#endif // DISPATCH_MGR_QUEUE_DEBUG +#if DISPATCH_WLH_DEBUG +#define _dispatch_kevent_wlh_debug(verb, kev) _dispatch_kevent_debug(verb, kev) +#else +#define _dispatch_kevent_wlh_debug(verb, kev) ((void)verb, (void)kev) +#endif // DISPATCH_WLH_DEBUG + +#if DISPATCH_MACHPORT_DEBUG +#ifndef MACH_PORT_TYPE_SPREQUEST +#define MACH_PORT_TYPE_SPREQUEST 0x40000000 +#endif + +DISPATCH_NOINLINE +void +dispatch_debug_machport(mach_port_t name, const char* str) +{ + mach_port_type_t type; + mach_msg_bits_t ns = 0, nr = 0, nso = 0, nd = 0; + unsigned int dnreqs = 0, dnrsiz; + kern_return_t kr = mach_port_type(mach_task_self(), name, &type); + if (kr) { + _dispatch_log("machport[0x%08x] = { error(0x%x) \"%s\" }: %s", name, + kr, mach_error_string(kr), str); + return; + } + if (type & MACH_PORT_TYPE_SEND) { + (void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name, + MACH_PORT_RIGHT_SEND, &ns)); + } + if (type & MACH_PORT_TYPE_SEND_ONCE) { + (void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name, + MACH_PORT_RIGHT_SEND_ONCE, &nso)); + } + if (type & MACH_PORT_TYPE_DEAD_NAME) { + (void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name, + MACH_PORT_RIGHT_DEAD_NAME, &nd)); + } + if (type & (MACH_PORT_TYPE_RECEIVE|MACH_PORT_TYPE_SEND)) { + kr = mach_port_dnrequest_info(mach_task_self(), name, &dnrsiz, &dnreqs); + if (kr != KERN_INVALID_RIGHT) (void)dispatch_assume_zero(kr); + } + if (type & MACH_PORT_TYPE_RECEIVE) { + mach_port_status_t status = { .mps_pset = 0, }; + mach_msg_type_number_t cnt = MACH_PORT_RECEIVE_STATUS_COUNT; + (void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name, + MACH_PORT_RIGHT_RECEIVE, &nr)); + (void)dispatch_assume_zero(mach_port_get_attributes(mach_task_self(), + name, MACH_PORT_RECEIVE_STATUS, (void*)&status, &cnt)); + _dispatch_log("machport[0x%08x] = { R(%03u) S(%03u) SO(%03u) D(%03u) " + "dnreqs(%03u) spreq(%s) nsreq(%s) pdreq(%s) srights(%s) " + "sorights(%03u) qlim(%03u) msgcount(%03u) mkscount(%03u) " + "seqno(%03u) }: %s", name, nr, ns, nso, nd, dnreqs, + type & MACH_PORT_TYPE_SPREQUEST ? "Y":"N", + status.mps_nsrequest ? "Y":"N", status.mps_pdrequest ? "Y":"N", + status.mps_srights ? "Y":"N", status.mps_sorights, + status.mps_qlimit, status.mps_msgcount, status.mps_mscount, + status.mps_seqno, str); + } else if (type & (MACH_PORT_TYPE_SEND|MACH_PORT_TYPE_SEND_ONCE| + MACH_PORT_TYPE_DEAD_NAME)) { + _dispatch_log("machport[0x%08x] = { R(%03u) S(%03u) SO(%03u) D(%03u) " + "dnreqs(%03u) spreq(%s) }: %s", name, nr, ns, nso, nd, dnreqs, + type & MACH_PORT_TYPE_SPREQUEST ? "Y":"N", str); + } else { + _dispatch_log("machport[0x%08x] = { type(0x%08x) }: %s", name, type, + str); + } +} +#endif + +#pragma mark dispatch_kevent_t + +#if HAVE_MACH + +static dispatch_once_t _dispatch_mach_host_port_pred; +static mach_port_t _dispatch_mach_host_port; + +static inline void* +_dispatch_kevent_mach_msg_buf(dispatch_kevent_t ke) +{ + return (void*)ke->ext[0]; +} + +static inline mach_msg_size_t +_dispatch_kevent_mach_msg_size(dispatch_kevent_t ke) +{ + // buffer size in the successful receive case, but message size (like + // msgh_size) in the MACH_RCV_TOO_LARGE case, i.e. add trailer size. + return (mach_msg_size_t)ke->ext[1]; +} + +static void _dispatch_kevent_mach_msg_drain(dispatch_kevent_t ke); +static inline void _dispatch_mach_host_calendar_change_register(void); + +// DISPATCH_MACH_NOTIFICATION_ARMED are muxnotes that aren't registered with +// kevent for real, but with mach_port_request_notification() +// +// the kevent structure is used for bookkeeping: +// - ident, filter, flags and fflags have their usual meaning +// - data is used to monitor the actual state of the +// mach_port_request_notification() +// - ext[0] is a boolean that trackes whether the notification is armed or not +#define DISPATCH_MACH_NOTIFICATION_ARMED(dk) ((dk)->ext[0]) +#endif + +DISPATCH_ALWAYS_INLINE +static dispatch_muxnote_t +_dispatch_kevent_get_muxnote(dispatch_kevent_t ke) +{ + uintptr_t dmn_addr = (uintptr_t)ke->udata & ~DISPATCH_KEVENT_MUXED_MARKER; + return (dispatch_muxnote_t)dmn_addr; +} + +DISPATCH_ALWAYS_INLINE +static dispatch_unote_t +_dispatch_kevent_get_unote(dispatch_kevent_t ke) +{ + dispatch_assert((ke->udata & DISPATCH_KEVENT_MUXED_MARKER) == 0); + return (dispatch_unote_t){ ._du = (dispatch_unote_class_t)ke->udata }; +} + +DISPATCH_NOINLINE +static void +_dispatch_kevent_print_error(dispatch_kevent_t ke) +{ + _dispatch_debug("kevent[0x%llx]: handling error", + (unsigned long long)ke->udata); + if (ke->flags & EV_DELETE) { + if (ke->flags & EV_UDATA_SPECIFIC) { + if (ke->data == EINPROGRESS) { + // deferred EV_DELETE + return; + } + } + // for EV_DELETE if the update was deferred we may have reclaimed + // the udata already, and it is unsafe to dereference it now. + } else if (ke->udata & DISPATCH_KEVENT_MUXED_MARKER) { + ke->flags |= _dispatch_kevent_get_muxnote(ke)->dmn_kev.flags; + } else if (ke->udata) { + if (!_dispatch_unote_registered(_dispatch_kevent_get_unote(ke))) { + ke->flags |= EV_ADD; + } + } + +#if HAVE_MACH + if (ke->filter == EVFILT_MACHPORT && ke->data == ENOTSUP && + (ke->flags & EV_ADD) && (ke->fflags & MACH_RCV_MSG)) { + DISPATCH_INTERNAL_CRASH(ke->ident, + "Missing EVFILT_MACHPORT support for ports"); + } +#endif + + if (ke->data) { + // log the unexpected error + _dispatch_bug_kevent_client("kevent", _evfiltstr(ke->filter), + !ke->udata ? NULL : + ke->flags & EV_DELETE ? "delete" : + ke->flags & EV_ADD ? "add" : + ke->flags & EV_ENABLE ? "enable" : "monitor", + (int)ke->data); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_kevent_merge(dispatch_unote_t du, dispatch_kevent_t ke) +{ + uintptr_t data; + uintptr_t status = 0; + pthread_priority_t pp = 0; +#if DISPATCH_USE_KEVENT_QOS + pp = ((pthread_priority_t)ke->qos) & ~_PTHREAD_PRIORITY_FLAGS_MASK; +#endif + dispatch_unote_action_t action = du._du->du_data_action; + if (action == DISPATCH_UNOTE_ACTION_DATA_SET) { + // ke->data is signed and "negative available data" makes no sense + // zero bytes happens when EV_EOF is set + dispatch_assert(ke->data >= 0l); + data = ~(unsigned long)ke->data; +#if HAVE_MACH + } else if (du._du->du_filter == EVFILT_MACHPORT) { + data = DISPATCH_MACH_RECV_MESSAGE; +#endif + } else if (action == DISPATCH_UNOTE_ACTION_DATA_ADD) { + data = (unsigned long)ke->data; + } else if (action == DISPATCH_UNOTE_ACTION_DATA_OR) { + data = ke->fflags & du._du->du_fflags; + } else if (action == DISPATCH_UNOTE_ACTION_DATA_OR_STATUS_SET) { + data = ke->fflags & du._du->du_fflags; + status = (unsigned long)ke->data; + } else { + DISPATCH_INTERNAL_CRASH(action, "Corrupt unote action"); + } + return dux_merge_evt(du._du, ke->flags, data, status, pp); +} + +DISPATCH_NOINLINE +static void +_dispatch_kevent_merge_muxed(dispatch_kevent_t ke) +{ + dispatch_muxnote_t dmn = _dispatch_kevent_get_muxnote(ke); + dispatch_unote_linkage_t dul, dul_next; + + TAILQ_FOREACH_SAFE(dul, &dmn->dmn_unotes_head, du_link, dul_next) { + _dispatch_kevent_merge(_dispatch_unote_linkage_get_unote(dul), ke); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_kevent_drain(dispatch_kevent_t ke) +{ + if (ke->filter == EVFILT_USER) { + _dispatch_kevent_mgr_debug("received", ke); + return; + } + _dispatch_kevent_debug("received", ke); + if (unlikely(ke->flags & EV_ERROR)) { + if (ke->filter == EVFILT_PROC && ke->data == ESRCH) { + // EVFILT_PROC may fail with ESRCH when the process exists but is a zombie + // . As a workaround, we simulate an exit event for + // any EVFILT_PROC with an invalid pid . + ke->flags &= ~(EV_ERROR | EV_ADD | EV_ENABLE | EV_UDATA_SPECIFIC); + ke->flags |= EV_ONESHOT; + ke->fflags = NOTE_EXIT; + ke->data = 0; + _dispatch_kevent_debug("synthetic NOTE_EXIT", ke); + } else { + return _dispatch_kevent_print_error(ke); + } + } + if (ke->filter == EVFILT_TIMER) { + return _dispatch_kevent_timer_drain(ke); + } + +#if HAVE_MACH + if (ke->filter == EVFILT_MACHPORT) { + if (_dispatch_kevent_mach_msg_size(ke)) { + return _dispatch_kevent_mach_msg_drain(ke); + } + } +#endif + + if (ke->udata & DISPATCH_KEVENT_MUXED_MARKER) { + return _dispatch_kevent_merge_muxed(ke); + } + return _dispatch_kevent_merge(_dispatch_kevent_get_unote(ke), ke); +} + +#pragma mark dispatch_kq + +#if DISPATCH_USE_MGR_THREAD +DISPATCH_NOINLINE +static int +_dispatch_kq_create(const void *guard_ptr) +{ + static const dispatch_kevent_s kev = { + .ident = 1, + .filter = EVFILT_USER, + .flags = EV_ADD|EV_CLEAR, + .udata = (uintptr_t)DISPATCH_WLH_MANAGER, + }; + int kqfd; + + _dispatch_fork_becomes_unsafe(); +#if DISPATCH_USE_GUARDED_FD + guardid_t guard = (uintptr_t)guard_ptr; + kqfd = guarded_kqueue_np(&guard, GUARD_CLOSE | GUARD_DUP); +#else + (void)guard_ptr; + kqfd = kqueue(); +#endif + if (kqfd == -1) { + int err = errno; + switch (err) { + case EMFILE: + DISPATCH_CLIENT_CRASH(err, "kqueue() failure: " + "process is out of file descriptors"); + break; + case ENFILE: + DISPATCH_CLIENT_CRASH(err, "kqueue() failure: " + "system is out of file descriptors"); + break; + case ENOMEM: + DISPATCH_CLIENT_CRASH(err, "kqueue() failure: " + "kernel is out of memory"); + break; + default: + DISPATCH_INTERNAL_CRASH(err, "kqueue() failure"); + break; + } + } +#if DISPATCH_USE_KEVENT_QOS + dispatch_assume_zero(kevent_qos(kqfd, &kev, 1, NULL, 0, NULL, NULL, 0)); +#else + dispatch_assume_zero(kevent(kqfd, &kev, 1, NULL, 0, NULL)); +#endif + return kqfd; +} +#endif + +static void +_dispatch_kq_init(void *context) +{ + bool *kq_initialized = context; + + _dispatch_fork_becomes_unsafe(); + if (unlikely(getenv("LIBDISPATCH_TIMERS_FORCE_MAX_LEEWAY"))) { + _dispatch_timers_force_max_leeway = true; + } + *kq_initialized = true; + +#if DISPATCH_USE_KEVENT_WORKQUEUE + _dispatch_kevent_workqueue_init(); + if (_dispatch_kevent_workqueue_enabled) { + int r; + int kqfd = _dispatch_kq; + const dispatch_kevent_s ke = { + .ident = 1, + .filter = EVFILT_USER, + .flags = EV_ADD|EV_CLEAR, + .qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG, + .udata = (uintptr_t)DISPATCH_WLH_MANAGER, + }; +retry: + r = kevent_qos(kqfd, &ke, 1, NULL, 0, NULL, NULL, + KEVENT_FLAG_WORKQ|KEVENT_FLAG_IMMEDIATE); + if (unlikely(r == -1)) { + int err = errno; + switch (err) { + case EINTR: + goto retry; + default: + DISPATCH_CLIENT_CRASH(err, + "Failed to initalize workqueue kevent"); + break; + } + } + return; + } +#endif // DISPATCH_USE_KEVENT_WORKQUEUE +#if DISPATCH_USE_MGR_THREAD + _dispatch_kq = _dispatch_kq_create(&_dispatch_mgr_q); + dx_push(_dispatch_mgr_q.do_targetq, &_dispatch_mgr_q, 0); +#endif // DISPATCH_USE_MGR_THREAD +} + +#if DISPATCH_USE_MEMORYPRESSURE_SOURCE +static void _dispatch_memorypressure_init(void); +#else +#define _dispatch_memorypressure_init() ((void)0) +#endif + +DISPATCH_NOINLINE +static int +_dispatch_kq_poll(dispatch_wlh_t wlh, dispatch_kevent_t ke, int n, + dispatch_kevent_t ke_out, int n_out, void *buf, size_t *avail, + uint32_t flags) +{ + static dispatch_once_t pred; + bool kq_initialized = false; + int r = 0; + + dispatch_once_f(&pred, &kq_initialized, _dispatch_kq_init); + if (unlikely(kq_initialized)) { + // The calling thread was the one doing the initialization + // + // The event loop needs the memory pressure source and debug channel, + // however creating these will recursively call _dispatch_kq_poll(), + // so we can't quite initialize them under the dispatch once. + _dispatch_memorypressure_init(); + _voucher_activity_debug_channel_init(); + } + + +#if !DISPATCH_USE_KEVENT_QOS + if (flags & KEVENT_FLAG_ERROR_EVENTS) { + // emulate KEVENT_FLAG_ERROR_EVENTS + for (r = 0; r < n; r++) { + ke[r].flags |= EV_RECEIPT; + } + out_n = n; + } +#endif + +retry: + if (wlh == DISPATCH_WLH_ANON) { + int kqfd = _dispatch_kq; +#if DISPATCH_USE_KEVENT_QOS + if (_dispatch_kevent_workqueue_enabled) { + flags |= KEVENT_FLAG_WORKQ; + } + r = kevent_qos(kqfd, ke, n, ke_out, n_out, buf, avail, flags); +#else + const struct timespec timeout_immediately = {}, *timeout = NULL; + if (flags & KEVENT_FLAG_IMMEDIATE) timeout = &timeout_immediately; + r = kevent(kqfd, ke, n, ke_out, n_out, timeout); +#endif + } + if (unlikely(r == -1)) { + int err = errno; + switch (err) { + case ENOMEM: + _dispatch_temporary_resource_shortage(); + /* FALLTHROUGH */ + case EINTR: + goto retry; + case EBADF: + DISPATCH_CLIENT_CRASH(err, "Do not close random Unix descriptors"); + default: + DISPATCH_CLIENT_CRASH(err, "Unexpected error from kevent"); + } + } + return r; +} + +DISPATCH_NOINLINE +static int +_dispatch_kq_drain(dispatch_wlh_t wlh, dispatch_kevent_t ke, int n, + uint32_t flags) +{ + dispatch_kevent_s ke_out[DISPATCH_DEFERRED_ITEMS_EVENT_COUNT]; + bool poll_for_events = !(flags & KEVENT_FLAG_ERROR_EVENTS); + int i, n_out = countof(ke_out), r = 0; + size_t *avail = NULL; + void *buf = NULL; + +#if DISPATCH_USE_KEVENT_QOS + size_t size; + if (poll_for_events) { + size = DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE + + DISPATCH_MACH_TRAILER_SIZE; + buf = alloca(size); + avail = &size; + } +#endif + +#if DISPATCH_DEBUG + for (r = 0; r < n; r++) { + if (ke[r].filter != EVFILT_USER || DISPATCH_MGR_QUEUE_DEBUG) { + _dispatch_kevent_debug_n(NULL, ke + r, r, n); + } + } +#endif + + if (poll_for_events) _dispatch_clear_return_to_kernel(); + n = _dispatch_kq_poll(wlh, ke, n, ke_out, n_out, buf, avail, flags); + if (n == 0) { + r = 0; + } else if (flags & KEVENT_FLAG_ERROR_EVENTS) { + for (i = 0, r = 0; i < n; i++) { + if ((ke_out[i].flags & EV_ERROR) && ke_out[i].data) { + _dispatch_kevent_drain(&ke_out[i]); + r = (int)ke_out[i].data; + } + } + } else { + for (i = 0, r = 0; i < n; i++) { + _dispatch_kevent_drain(&ke_out[i]); + } + } + return r; +} + +DISPATCH_ALWAYS_INLINE +static inline int +_dispatch_kq_update_one(dispatch_wlh_t wlh, dispatch_kevent_t ke) +{ + return _dispatch_kq_drain(wlh, ke, 1, + KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_ERROR_EVENTS); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_kq_update_all(dispatch_wlh_t wlh, dispatch_kevent_t ke, int n) +{ + (void)_dispatch_kq_drain(wlh, ke, n, + KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_ERROR_EVENTS); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_kq_unote_set_kevent(dispatch_unote_t _du, dispatch_kevent_t dk, + uint16_t action) +{ + dispatch_unote_class_t du = _du._du; + dispatch_source_type_t dst = du->du_type; + uint16_t flags = dst->dst_flags | action; + + if ((flags & EV_VANISHED) && !(flags & EV_ADD)) { + flags &= ~EV_VANISHED; + } + pthread_priority_t pp = _dispatch_priority_to_pp(du->du_priority); + *dk = (dispatch_kevent_s){ + .ident = du->du_ident, + .filter = dst->dst_filter, + .flags = flags, + .udata = (uintptr_t)du, + .fflags = du->du_fflags | dst->dst_fflags, + .data = (typeof(dk->data))dst->dst_data, +#if DISPATCH_USE_KEVENT_QOS + .qos = (typeof(dk->qos))pp, +#endif + }; +} + +DISPATCH_ALWAYS_INLINE +static inline int +_dispatch_kq_deferred_find_slot(dispatch_deferred_items_t ddi, + int16_t filter, uint64_t ident, uint64_t udata) +{ + dispatch_kevent_t events = ddi->ddi_eventlist; + int i; + + for (i = 0; i < ddi->ddi_nevents; i++) { + if (events[i].filter == filter && events[i].ident == ident && + events[i].udata == udata) { + break; + } + } + return i; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_kevent_t +_dispatch_kq_deferred_reuse_slot(dispatch_wlh_t wlh, + dispatch_deferred_items_t ddi, int slot) +{ + if (wlh != DISPATCH_WLH_ANON) _dispatch_set_return_to_kernel(); + if (unlikely(slot == ddi->ddi_maxevents)) { + int nevents = ddi->ddi_nevents; + ddi->ddi_nevents = 1; + _dispatch_kq_update_all(wlh, ddi->ddi_eventlist, nevents); + dispatch_assert(ddi->ddi_nevents == 1); + slot = 0; + } else if (slot == ddi->ddi_nevents) { + ddi->ddi_nevents++; + } + return ddi->ddi_eventlist + slot; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_kq_deferred_discard_slot(dispatch_deferred_items_t ddi, int slot) +{ + if (slot < ddi->ddi_nevents) { + int last = --ddi->ddi_nevents; + if (slot != last) { + ddi->ddi_eventlist[slot] = ddi->ddi_eventlist[last]; + } + } +} + +DISPATCH_NOINLINE +static void +_dispatch_kq_deferred_update(dispatch_wlh_t wlh, dispatch_kevent_t ke) +{ + dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); + + if (ddi && ddi->ddi_maxevents && wlh == _dispatch_get_wlh()) { + int slot = _dispatch_kq_deferred_find_slot(ddi, ke->filter, ke->ident, + ke->udata); + dispatch_kevent_t dk = _dispatch_kq_deferred_reuse_slot(wlh, ddi, slot); + *dk = *ke; + if (ke->filter != EVFILT_USER) { + _dispatch_kevent_mgr_debug("deferred", ke); + } + } else { + _dispatch_kq_update_one(wlh, ke); + } +} + +DISPATCH_NOINLINE +static int +_dispatch_kq_immediate_update(dispatch_wlh_t wlh, dispatch_kevent_t ke) +{ + dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); + if (ddi && wlh == _dispatch_get_wlh()) { + int slot = _dispatch_kq_deferred_find_slot(ddi, ke->filter, ke->ident, + ke->udata); + _dispatch_kq_deferred_discard_slot(ddi, slot); + } + return _dispatch_kq_update_one(wlh, ke); +} + +DISPATCH_NOINLINE +static bool +_dispatch_kq_unote_update(dispatch_wlh_t wlh, dispatch_unote_t _du, + uint16_t action_flags) +{ + dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); + dispatch_unote_class_t du = _du._du; + dispatch_kevent_t ke; + int r = 0; + + if (action_flags & EV_ADD) { + // as soon as we register we may get an event delivery and it has to + // see du_wlh already set, else it will not unregister the kevent + dispatch_assert(du->du_wlh == NULL); + _dispatch_wlh_retain(wlh); + du->du_wlh = wlh; + } + + if (ddi && wlh == _dispatch_get_wlh()) { + int slot = _dispatch_kq_deferred_find_slot(ddi, + du->du_filter, du->du_ident, (uintptr_t)du); + if (slot < ddi->ddi_nevents) { + // when deleting and an enable is pending, + // we must merge EV_ENABLE to do an immediate deletion + action_flags |= (ddi->ddi_eventlist[slot].flags & EV_ENABLE); + } + + if (!(action_flags & EV_ADD) && (action_flags & EV_ENABLE)) { + // can be deferred, so do it! + ke = _dispatch_kq_deferred_reuse_slot(wlh, ddi, slot); + _dispatch_kq_unote_set_kevent(du, ke, action_flags); + _dispatch_kevent_debug("deferred", ke); + goto done; + } + + // get rid of the deferred item if any, we can't wait + _dispatch_kq_deferred_discard_slot(ddi, slot); + } + + if (action_flags) { + dispatch_kevent_s dk; + _dispatch_kq_unote_set_kevent(du, &dk, action_flags); + r = _dispatch_kq_update_one(wlh, &dk); + } + +done: + if (action_flags & EV_ADD) { + if (unlikely(r)) { + _dispatch_wlh_release(du->du_wlh); + du->du_wlh = NULL; + } + return r == 0; + } + + if (action_flags & EV_DELETE) { + if (r == EINPROGRESS) { + return false; + } + _dispatch_wlh_release(du->du_wlh); + du->du_wlh = NULL; + } + + dispatch_assume_zero(r); + return true; +} + +#pragma mark dispatch_muxnote_t + +static void +_dispatch_muxnotes_init(void *ctxt DISPATCH_UNUSED) +{ + uint32_t i; + for (i = 0; i < DSL_HASH_SIZE; i++) { + TAILQ_INIT(&_dispatch_sources[i]); + } +} + +DISPATCH_ALWAYS_INLINE +static inline struct dispatch_muxnote_bucket_s * +_dispatch_muxnote_bucket(uint64_t ident, int16_t filter) +{ + switch (filter) { +#if HAVE_MACH + case EVFILT_MACHPORT: + case DISPATCH_EVFILT_MACH_NOTIFICATION: + ident = MACH_PORT_INDEX(ident); + break; +#endif + case EVFILT_SIGNAL: // signo + case EVFILT_PROC: // pid_t + default: // fd + break; + } + + dispatch_once_f(&_dispatch_muxnotes.pred, NULL, _dispatch_muxnotes_init); + return &_dispatch_sources[DSL_HASH((uintptr_t)ident)]; +} +#define _dispatch_unote_muxnote_bucket(du) \ + _dispatch_muxnote_bucket(du._du->du_ident, du._du->du_filter) + +DISPATCH_ALWAYS_INLINE +static inline dispatch_muxnote_t +_dispatch_muxnote_find(struct dispatch_muxnote_bucket_s *dmb, + dispatch_wlh_t wlh, uint64_t ident, int16_t filter) +{ + dispatch_muxnote_t dmn; + _dispatch_muxnotes_lock(); + TAILQ_FOREACH(dmn, dmb, dmn_list) { + if (dmn->dmn_wlh == wlh && dmn->dmn_kev.ident == ident && + dmn->dmn_kev.filter == filter) { + break; + } + } + _dispatch_muxnotes_unlock(); + return dmn; +} +#define _dispatch_unote_muxnote_find(dmb, du, wlh) \ + _dispatch_muxnote_find(dmb, wlh, du._du->du_ident, du._du->du_filter) + +DISPATCH_ALWAYS_INLINE +static inline dispatch_muxnote_t +_dispatch_mach_muxnote_find(mach_port_t name, int16_t filter) +{ + struct dispatch_muxnote_bucket_s *dmb; + dmb = _dispatch_muxnote_bucket(name, filter); + return _dispatch_muxnote_find(dmb, DISPATCH_WLH_ANON, name, filter); +} + +DISPATCH_NOINLINE +static bool +_dispatch_unote_register_muxed(dispatch_unote_t du, dispatch_wlh_t wlh) +{ + struct dispatch_muxnote_bucket_s *dmb = _dispatch_unote_muxnote_bucket(du); + dispatch_muxnote_t dmn; + bool installed = true; + + dmn = _dispatch_unote_muxnote_find(dmb, du, wlh); + if (dmn) { + uint32_t flags = du._du->du_fflags & ~dmn->dmn_kev.fflags; + if (flags) { + dmn->dmn_kev.fflags |= flags; + if (unlikely(du._du->du_type->dst_update_mux)) { + installed = du._du->du_type->dst_update_mux(dmn); + } else { + installed = !_dispatch_kq_immediate_update(dmn->dmn_wlh, + &dmn->dmn_kev); + } + if (!installed) dmn->dmn_kev.fflags &= ~flags; + } + } else { + dmn = _dispatch_calloc(1, sizeof(struct dispatch_muxnote_s)); + TAILQ_INIT(&dmn->dmn_unotes_head); + _dispatch_kq_unote_set_kevent(du, &dmn->dmn_kev, EV_ADD | EV_ENABLE); +#if DISPATCH_USE_KEVENT_QOS + dmn->dmn_kev.qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; +#endif + dmn->dmn_kev.udata = (uintptr_t)dmn | DISPATCH_KEVENT_MUXED_MARKER; + dmn->dmn_wlh = wlh; + if (unlikely(du._du->du_type->dst_update_mux)) { + installed = du._du->du_type->dst_update_mux(dmn); + } else { + installed = !_dispatch_kq_immediate_update(dmn->dmn_wlh, + &dmn->dmn_kev); + } + if (installed) { + dmn->dmn_kev.flags &= ~(EV_ADD | EV_VANISHED); + _dispatch_muxnotes_lock(); + TAILQ_INSERT_TAIL(dmb, dmn, dmn_list); + _dispatch_muxnotes_unlock(); + } else { + free(dmn); + } + } + + if (installed) { + dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du); + TAILQ_INSERT_TAIL(&dmn->dmn_unotes_head, dul, du_link); + dul->du_muxnote = dmn; + + if (du._du->du_filter == DISPATCH_EVFILT_MACH_NOTIFICATION) { + bool armed = DISPATCH_MACH_NOTIFICATION_ARMED(&dmn->dmn_kev); + os_atomic_store2o(du._dmsr, dmsr_notification_armed, armed,relaxed); + } + du._du->du_wlh = DISPATCH_WLH_ANON; + } + return installed; +} + +bool +_dispatch_unote_register(dispatch_unote_t du, dispatch_wlh_t wlh, + dispatch_priority_t pri) +{ + dispatch_assert(!_dispatch_unote_registered(du)); + du._du->du_priority = pri; + switch (du._du->du_filter) { + case DISPATCH_EVFILT_CUSTOM_ADD: + case DISPATCH_EVFILT_CUSTOM_OR: + case DISPATCH_EVFILT_CUSTOM_REPLACE: + du._du->du_wlh = DISPATCH_WLH_ANON; + return true; + } + if (!du._du->du_is_direct) { + return _dispatch_unote_register_muxed(du, DISPATCH_WLH_ANON); + } + return _dispatch_kq_unote_update(wlh, du, EV_ADD | EV_ENABLE); +} + +void +_dispatch_unote_resume(dispatch_unote_t du) +{ + dispatch_assert(_dispatch_unote_registered(du)); + + if (du._du->du_is_direct) { + dispatch_wlh_t wlh = du._du->du_wlh; + _dispatch_kq_unote_update(wlh, du, EV_ENABLE); + } else if (unlikely(du._du->du_type->dst_update_mux)) { + dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du); + du._du->du_type->dst_update_mux(dul->du_muxnote); + } else { + dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du); + dispatch_muxnote_t dmn = dul->du_muxnote; + _dispatch_kq_deferred_update(dmn->dmn_wlh, &dmn->dmn_kev); + } +} + +DISPATCH_NOINLINE +static bool +_dispatch_unote_unregister_muxed(dispatch_unote_t du, uint32_t flags) +{ + dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du); + dispatch_muxnote_t dmn = dul->du_muxnote; + bool update = false, dispose = false; + + if (dmn->dmn_kev.filter == DISPATCH_EVFILT_MACH_NOTIFICATION) { + os_atomic_store2o(du._dmsr, dmsr_notification_armed, false, relaxed); + } + dispatch_assert(du._du->du_wlh == DISPATCH_WLH_ANON); + du._du->du_wlh = NULL; + TAILQ_REMOVE(&dmn->dmn_unotes_head, dul, du_link); + _TAILQ_TRASH_ENTRY(dul, du_link); + dul->du_muxnote = NULL; + + if (TAILQ_EMPTY(&dmn->dmn_unotes_head)) { + dmn->dmn_kev.flags |= EV_DELETE; + update = dispose = true; + } else { + uint32_t fflags = du._du->du_type->dst_fflags; + TAILQ_FOREACH(dul, &dmn->dmn_unotes_head, du_link) { + du = _dispatch_unote_linkage_get_unote(dul); + fflags |= du._du->du_fflags; + } + if (dmn->dmn_kev.fflags & ~fflags) { + dmn->dmn_kev.fflags &= fflags; + update = true; + } + } + if (update && !(flags & DU_UNREGISTER_ALREADY_DELETED)) { + if (unlikely(du._du->du_type->dst_update_mux)) { + dispatch_assume(du._du->du_type->dst_update_mux(dmn)); + } else { + _dispatch_kq_deferred_update(dmn->dmn_wlh, &dmn->dmn_kev); + } + } + if (dispose) { + struct dispatch_muxnote_bucket_s *dmb; + dmb = _dispatch_muxnote_bucket(dmn->dmn_kev.ident, dmn->dmn_kev.filter); + _dispatch_muxnotes_lock(); + TAILQ_REMOVE(dmb, dmn, dmn_list); + _dispatch_muxnotes_unlock(); + free(dmn); + } + return true; +} + +bool +_dispatch_unote_unregister(dispatch_unote_t du, uint32_t flags) +{ + switch (du._du->du_filter) { + case DISPATCH_EVFILT_CUSTOM_ADD: + case DISPATCH_EVFILT_CUSTOM_OR: + case DISPATCH_EVFILT_CUSTOM_REPLACE: + du._du->du_wlh = NULL; + return true; + } + dispatch_wlh_t wlh = du._du->du_wlh; + if (wlh) { + if (!du._du->du_is_direct) { + return _dispatch_unote_unregister_muxed(du, flags); + } + uint16_t action_flags; + if (flags & DU_UNREGISTER_ALREADY_DELETED) { + action_flags = 0; + } else if (flags & DU_UNREGISTER_IMMEDIATE_DELETE) { + action_flags = EV_DELETE | EV_ENABLE; + } else { + action_flags = EV_DELETE; + } + return _dispatch_kq_unote_update(wlh, du, action_flags); + } + return true; +} + +#pragma mark - +#pragma mark dispatch_event_loop + +void +_dispatch_event_loop_atfork_child(void) +{ +#if HAVE_MACH + _dispatch_mach_host_port_pred = 0; + _dispatch_mach_host_port = MACH_PORT_NULL; +#endif +} + + +DISPATCH_NOINLINE +void +_dispatch_event_loop_poke(dispatch_wlh_t wlh, uint64_t dq_state, uint32_t flags) +{ + if (wlh == DISPATCH_WLH_MANAGER) { + dispatch_kevent_s ke = (dispatch_kevent_s){ + .ident = 1, + .filter = EVFILT_USER, + .fflags = NOTE_TRIGGER, + .udata = (uintptr_t)DISPATCH_WLH_MANAGER, + }; + return _dispatch_kq_deferred_update(DISPATCH_WLH_ANON, &ke); + } else if (wlh && wlh != DISPATCH_WLH_ANON) { + (void)dq_state; (void)flags; + } + DISPATCH_INTERNAL_CRASH(wlh, "Unsupported wlh configuration"); +} + +DISPATCH_NOINLINE +void +_dispatch_event_loop_drain(uint32_t flags) +{ + dispatch_wlh_t wlh = _dispatch_get_wlh(); + dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); + int n; + +again: + n = ddi->ddi_nevents; + ddi->ddi_nevents = 0; + _dispatch_kq_drain(wlh, ddi->ddi_eventlist, n, flags); + + if ((flags & KEVENT_FLAG_IMMEDIATE) && + !(flags & KEVENT_FLAG_ERROR_EVENTS) && + _dispatch_needs_to_return_to_kernel()) { + goto again; + } +} + +void +_dispatch_event_loop_merge(dispatch_kevent_t events, int nevents) +{ + dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); + dispatch_kevent_s kev[nevents]; + + // now we can re-use the whole event list, but we need to save one slot + // for the event loop poke + memcpy(kev, events, sizeof(kev)); + ddi->ddi_maxevents = DISPATCH_DEFERRED_ITEMS_EVENT_COUNT - 1; + + for (int i = 0; i < nevents; i++) { + _dispatch_kevent_drain(&kev[i]); + } + + dispatch_wlh_t wlh = _dispatch_get_wlh(); + if (wlh == DISPATCH_WLH_ANON && ddi->ddi_stashed_dou._do) { + if (ddi->ddi_nevents) { + // We will drain the stashed item and not return to the kernel + // right away. As a consequence, do not delay these updates. + _dispatch_event_loop_drain(KEVENT_FLAG_IMMEDIATE | + KEVENT_FLAG_ERROR_EVENTS); + } + _dispatch_trace_continuation_push(ddi->ddi_stashed_rq, + ddi->ddi_stashed_dou); + } +} + +void +_dispatch_event_loop_leave_immediate(dispatch_wlh_t wlh, uint64_t dq_state) +{ + (void)wlh; (void)dq_state; +} + +void +_dispatch_event_loop_leave_deferred(dispatch_wlh_t wlh, uint64_t dq_state) +{ + (void)wlh; (void)dq_state; +} + +void +_dispatch_event_loop_wake_owner(dispatch_sync_context_t dsc, + dispatch_wlh_t wlh, uint64_t old_state, uint64_t new_state) +{ + (void)dsc; (void)wlh; (void)old_state; (void)new_state; +} + +void +_dispatch_event_loop_wait_for_ownership(dispatch_sync_context_t dsc) +{ + if (dsc->dsc_release_storage) { + _dispatch_queue_release_storage(dsc->dc_data); + } +} + +void +_dispatch_event_loop_end_ownership(dispatch_wlh_t wlh, uint64_t old_state, + uint64_t new_state, uint32_t flags) +{ + (void)wlh; (void)old_state; (void)new_state; (void)flags; +} + +#if DISPATCH_WLH_DEBUG +void +_dispatch_event_loop_assert_not_owned(dispatch_wlh_t wlh) +{ + (void)wlh; +} +#endif // DISPATCH_WLH_DEBUG + +#pragma mark - +#pragma mark dispatch_event_loop timers + +#define DISPATCH_KEVENT_TIMEOUT_IDENT_MASK (~0ull << 8) + +DISPATCH_NOINLINE +static void +_dispatch_kevent_timer_drain(dispatch_kevent_t ke) +{ + dispatch_assert(ke->data > 0); + dispatch_assert((ke->ident & DISPATCH_KEVENT_TIMEOUT_IDENT_MASK) == + DISPATCH_KEVENT_TIMEOUT_IDENT_MASK); + uint32_t tidx = ke->ident & ~DISPATCH_KEVENT_TIMEOUT_IDENT_MASK; + + dispatch_assert(tidx < DISPATCH_TIMER_COUNT); + _dispatch_timers_expired = true; + _dispatch_timers_processing_mask |= 1 << tidx; + _dispatch_timers_heap[tidx].dth_flags &= ~DTH_ARMED; +#if DISPATCH_USE_DTRACE + _dispatch_timers_will_wake |= 1 << DISPATCH_TIMER_QOS(tidx); +#endif +} + +DISPATCH_NOINLINE +static void +_dispatch_event_loop_timer_program(uint32_t tidx, + uint64_t target, uint64_t leeway, uint16_t action) +{ + dispatch_kevent_s ke = { + .ident = DISPATCH_KEVENT_TIMEOUT_IDENT_MASK | tidx, + .filter = EVFILT_TIMER, + .flags = action | EV_ONESHOT, + .fflags = _dispatch_timer_index_to_fflags[tidx], + .data = (int64_t)target, + .udata = (uintptr_t)&_dispatch_timers_heap[tidx], +#if DISPATCH_HAVE_TIMER_COALESCING + .ext[1] = leeway, +#endif +#if DISPATCH_USE_KEVENT_QOS + .qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG, +#endif + }; + + _dispatch_kq_deferred_update(DISPATCH_WLH_ANON, &ke); +} + +void +_dispatch_event_loop_timer_arm(uint32_t tidx, dispatch_timer_delay_s range, + dispatch_clock_now_cache_t nows) +{ + if (unlikely(_dispatch_timers_force_max_leeway)) { + range.delay += range.leeway; + range.leeway = 0; + } +#if HAVE_MACH + if (DISPATCH_TIMER_CLOCK(tidx) == DISPATCH_CLOCK_WALL) { + _dispatch_mach_host_calendar_change_register(); + } +#endif + + // EVFILT_TIMER NOTE_ABSOLUTE always expects + // a WALL deadline + uint64_t now = _dispatch_time_now_cached(DISPATCH_CLOCK_WALL, nows); + _dispatch_timers_heap[tidx].dth_flags |= DTH_ARMED; + _dispatch_event_loop_timer_program(tidx, now + range.delay, range.leeway, + EV_ADD | EV_ENABLE); +} + +void +_dispatch_event_loop_timer_delete(uint32_t tidx) +{ + _dispatch_timers_heap[tidx].dth_flags &= ~DTH_ARMED; + _dispatch_event_loop_timer_program(tidx, 0, 0, EV_DELETE); +} + +#pragma mark - +#pragma mark kevent specific sources + +static dispatch_unote_t +_dispatch_source_proc_create(dispatch_source_type_t dst DISPATCH_UNUSED, + uintptr_t handle, unsigned long mask DISPATCH_UNUSED) +{ + dispatch_unote_t du = _dispatch_unote_create_with_handle(dst, handle, mask); + if (du._du && (mask & DISPATCH_PROC_EXIT_STATUS)) { + du._du->du_data_action = DISPATCH_UNOTE_ACTION_DATA_OR_STATUS_SET; + } + return du; +} + +const dispatch_source_type_s _dispatch_source_type_proc = { + .dst_kind = "proc", + .dst_filter = EVFILT_PROC, + .dst_flags = DISPATCH_EV_DIRECT|EV_CLEAR, + .dst_fflags = NOTE_EXIT, // rdar://16655831 + .dst_mask = NOTE_EXIT|NOTE_FORK|NOTE_EXEC|NOTE_EXITSTATUS +#if HAVE_DECL_NOTE_SIGNAL + |NOTE_SIGNAL +#endif +#if HAVE_DECL_NOTE_REAP + |NOTE_REAP +#endif + , + .dst_size = sizeof(struct dispatch_source_refs_s), + + .dst_create = _dispatch_source_proc_create, + .dst_merge_evt = _dispatch_source_merge_evt, +}; + +const dispatch_source_type_s _dispatch_source_type_vnode = { + .dst_kind = "vnode", + .dst_filter = EVFILT_VNODE, + .dst_flags = DISPATCH_EV_DIRECT|EV_CLEAR|EV_VANISHED, + .dst_mask = NOTE_DELETE|NOTE_WRITE|NOTE_EXTEND|NOTE_ATTRIB|NOTE_LINK + |NOTE_RENAME|NOTE_FUNLOCK +#if HAVE_DECL_NOTE_REVOKE + |NOTE_REVOKE +#endif +#if HAVE_DECL_NOTE_NONE + |NOTE_NONE +#endif + , + .dst_size = sizeof(struct dispatch_source_refs_s), + + .dst_create = _dispatch_unote_create_with_fd, + .dst_merge_evt = _dispatch_source_merge_evt, +}; + +const dispatch_source_type_s _dispatch_source_type_vfs = { + .dst_kind = "vfs", + .dst_filter = EVFILT_FS, + .dst_flags = DISPATCH_EV_DIRECT|EV_CLEAR, + .dst_mask = VQ_NOTRESP|VQ_NEEDAUTH|VQ_LOWDISK|VQ_MOUNT|VQ_UNMOUNT + |VQ_DEAD|VQ_ASSIST|VQ_NOTRESPLOCK +#if HAVE_DECL_VQ_UPDATE + |VQ_UPDATE +#endif +#if HAVE_DECL_VQ_VERYLOWDISK + |VQ_VERYLOWDISK +#endif +#if HAVE_DECL_VQ_QUOTA + |VQ_QUOTA +#endif +#if HAVE_DECL_VQ_NEARLOWDISK + |VQ_NEARLOWDISK +#endif +#if HAVE_DECL_VQ_DESIRED_DISK + |VQ_DESIRED_DISK +#endif + , + .dst_size = sizeof(struct dispatch_source_refs_s), + + .dst_create = _dispatch_unote_create_without_handle, + .dst_merge_evt = _dispatch_source_merge_evt, +}; + +#ifdef EVFILT_SOCK +const dispatch_source_type_s _dispatch_source_type_sock = { + .dst_kind = "sock", + .dst_filter = EVFILT_SOCK, + .dst_flags = DISPATCH_EV_DIRECT|EV_CLEAR|EV_VANISHED, + .dst_mask = NOTE_CONNRESET|NOTE_READCLOSED|NOTE_WRITECLOSED + |NOTE_TIMEOUT|NOTE_NOSRCADDR|NOTE_IFDENIED|NOTE_SUSPEND|NOTE_RESUME + |NOTE_KEEPALIVE +#ifdef NOTE_ADAPTIVE_WTIMO + |NOTE_ADAPTIVE_WTIMO|NOTE_ADAPTIVE_RTIMO +#endif +#ifdef NOTE_CONNECTED + |NOTE_CONNECTED|NOTE_DISCONNECTED|NOTE_CONNINFO_UPDATED +#endif +#ifdef NOTE_NOTIFY_ACK + |NOTE_NOTIFY_ACK +#endif + , + .dst_size = sizeof(struct dispatch_source_refs_s), + + .dst_create = _dispatch_unote_create_with_fd, + .dst_merge_evt = _dispatch_source_merge_evt, +}; +#endif // EVFILT_SOCK + +#ifdef EVFILT_NW_CHANNEL +const dispatch_source_type_s _dispatch_source_type_nw_channel = { + .dst_kind = "nw_channel", + .dst_filter = EVFILT_NW_CHANNEL, + .dst_flags = DISPATCH_EV_DIRECT|EV_CLEAR|EV_VANISHED, + .dst_mask = NOTE_FLOW_ADV_UPDATE, + .dst_size = sizeof(struct dispatch_source_refs_s), + .dst_create = _dispatch_unote_create_with_fd, + .dst_merge_evt = _dispatch_source_merge_evt, +}; +#endif // EVFILT_NW_CHANNEL + +#if DISPATCH_USE_MEMORYSTATUS + +#if DISPATCH_USE_MEMORYPRESSURE_SOURCE +#define DISPATCH_MEMORYPRESSURE_SOURCE_MASK ( \ + DISPATCH_MEMORYPRESSURE_NORMAL | \ + DISPATCH_MEMORYPRESSURE_WARN | \ + DISPATCH_MEMORYPRESSURE_CRITICAL | \ + DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN | \ + DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL | \ + DISPATCH_MEMORYPRESSURE_MSL_STATUS) + +#define DISPATCH_MEMORYPRESSURE_MALLOC_MASK ( \ + DISPATCH_MEMORYPRESSURE_WARN | \ + DISPATCH_MEMORYPRESSURE_CRITICAL | \ + DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN | \ + DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL | \ + DISPATCH_MEMORYPRESSURE_MSL_STATUS) + + +static void +_dispatch_memorypressure_handler(void *context) +{ + dispatch_source_t ds = context; + unsigned long memorypressure = dispatch_source_get_data(ds); + + if (memorypressure & DISPATCH_MEMORYPRESSURE_NORMAL) { + _dispatch_memory_warn = false; + _dispatch_continuation_cache_limit = DISPATCH_CONTINUATION_CACHE_LIMIT; +#if VOUCHER_USE_MACH_VOUCHER + if (_firehose_task_buffer) { + firehose_buffer_clear_bank_flags(_firehose_task_buffer, + FIREHOSE_BUFFER_BANK_FLAG_LOW_MEMORY); + } +#endif + } + if (memorypressure & DISPATCH_MEMORYPRESSURE_WARN) { + _dispatch_memory_warn = true; + _dispatch_continuation_cache_limit = + DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYPRESSURE_PRESSURE_WARN; +#if VOUCHER_USE_MACH_VOUCHER + if (_firehose_task_buffer) { + firehose_buffer_set_bank_flags(_firehose_task_buffer, + FIREHOSE_BUFFER_BANK_FLAG_LOW_MEMORY); + } +#endif + } + memorypressure &= DISPATCH_MEMORYPRESSURE_MALLOC_MASK; + if (memorypressure) { + malloc_memory_event_handler(memorypressure); + } +} + +static void +_dispatch_memorypressure_init(void) +{ + dispatch_source_t ds = dispatch_source_create( + DISPATCH_SOURCE_TYPE_MEMORYPRESSURE, 0, + DISPATCH_MEMORYPRESSURE_SOURCE_MASK, &_dispatch_mgr_q); + dispatch_set_context(ds, ds); + dispatch_source_set_event_handler_f(ds, _dispatch_memorypressure_handler); + dispatch_activate(ds); +} +#endif // DISPATCH_USE_MEMORYPRESSURE_SOURCE + +#if TARGET_OS_SIMULATOR // rdar://problem/9219483 +static int _dispatch_ios_simulator_memory_warnings_fd = -1; +static void +_dispatch_ios_simulator_memorypressure_init(void *context DISPATCH_UNUSED) +{ + char *e = getenv("SIMULATOR_MEMORY_WARNINGS"); + if (!e) return; + _dispatch_ios_simulator_memory_warnings_fd = open(e, O_EVTONLY); + if (_dispatch_ios_simulator_memory_warnings_fd == -1) { + (void)dispatch_assume_zero(errno); + } +} + +static dispatch_unote_t +_dispatch_source_memorypressure_create(dispatch_source_type_t dst, + uintptr_t handle, unsigned long mask) +{ + static dispatch_once_t pred; + dispatch_once_f(&pred, NULL, _dispatch_ios_simulator_memorypressure_init); + + if (handle) { + return DISPATCH_UNOTE_NULL; + } + + dst = &_dispatch_source_type_vnode; + handle = (uintptr_t)_dispatch_ios_simulator_memory_warnings_fd; + mask = NOTE_ATTRIB; + + dispatch_unote_t du = dux_create(dst, handle, mask); + if (du._du) { + du._du->du_memorypressure_override = true; + } + return du; +} +#endif // TARGET_OS_SIMULATOR + +const dispatch_source_type_s _dispatch_source_type_memorypressure = { + .dst_kind = "memorystatus", + .dst_filter = EVFILT_MEMORYSTATUS, + .dst_flags = EV_UDATA_SPECIFIC|EV_DISPATCH, + .dst_mask = NOTE_MEMORYSTATUS_PRESSURE_NORMAL + |NOTE_MEMORYSTATUS_PRESSURE_WARN|NOTE_MEMORYSTATUS_PRESSURE_CRITICAL + |NOTE_MEMORYSTATUS_LOW_SWAP|NOTE_MEMORYSTATUS_PROC_LIMIT_WARN + |NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL + |NOTE_MEMORYSTATUS_MSL_STATUS, + .dst_size = sizeof(struct dispatch_source_refs_s), + +#if TARGET_OS_SIMULATOR + .dst_create = _dispatch_source_memorypressure_create, + // redirected to _dispatch_source_type_vnode +#else + .dst_create = _dispatch_unote_create_without_handle, + .dst_merge_evt = _dispatch_source_merge_evt, +#endif +}; + +static dispatch_unote_t +_dispatch_source_vm_create(dispatch_source_type_t dst DISPATCH_UNUSED, + uintptr_t handle, unsigned long mask DISPATCH_UNUSED) +{ + // Map legacy vm pressure to memorypressure warning rdar://problem/15907505 + dispatch_unote_t du = dux_create(&_dispatch_source_type_memorypressure, + handle, NOTE_MEMORYSTATUS_PRESSURE_WARN); + if (du._du) { + du._du->du_vmpressure_override = 1; + } + return du; +} + +const dispatch_source_type_s _dispatch_source_type_vm = { + .dst_kind = "vm (deprecated)", + .dst_filter = EVFILT_MEMORYSTATUS, + .dst_flags = EV_UDATA_SPECIFIC|EV_DISPATCH, + .dst_mask = NOTE_VM_PRESSURE, + .dst_size = sizeof(struct dispatch_source_refs_s), + + .dst_create = _dispatch_source_vm_create, + // redirected to _dispatch_source_type_memorypressure +}; +#endif // DISPATCH_USE_MEMORYSTATUS + +#pragma mark mach send / notifications +#if HAVE_MACH + +// Flags for all notifications that are registered/unregistered when a +// send-possible notification is requested/delivered +#define _DISPATCH_MACH_SP_FLAGS (DISPATCH_MACH_SEND_POSSIBLE| \ + DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_DELETED) + +static void _dispatch_mach_host_notify_update(void *context); + +static mach_port_t _dispatch_mach_notify_port; +static dispatch_source_t _dispatch_mach_notify_source; + +static void +_dispatch_timers_calendar_change(void) +{ + uint32_t qos; + + // calendar change may have gone past the wallclock deadline + _dispatch_timers_expired = true; + for (qos = 0; qos < DISPATCH_TIMER_QOS_COUNT; qos++) { + _dispatch_timers_processing_mask |= + 1 << DISPATCH_TIMER_INDEX(DISPATCH_CLOCK_WALL, qos); + } +} + +static mach_msg_audit_trailer_t * +_dispatch_mach_msg_get_audit_trailer(mach_msg_header_t *hdr) +{ + mach_msg_trailer_t *tlr = NULL; + mach_msg_audit_trailer_t *audit_tlr = NULL; + tlr = (mach_msg_trailer_t *)((unsigned char *)hdr + + round_msg(hdr->msgh_size)); + // The trailer should always be of format zero. + if (tlr->msgh_trailer_type == MACH_MSG_TRAILER_FORMAT_0) { + if (tlr->msgh_trailer_size >= sizeof(mach_msg_audit_trailer_t)) { + audit_tlr = (mach_msg_audit_trailer_t *)tlr; + } + } + return audit_tlr; +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_notify_source_invoke(mach_msg_header_t *hdr) +{ + mig_reply_error_t reply; + mach_msg_audit_trailer_t *tlr = NULL; + dispatch_assert(sizeof(mig_reply_error_t) == sizeof(union + __ReplyUnion___dispatch_libdispatch_internal_protocol_subsystem)); + dispatch_assert(sizeof(mig_reply_error_t) < + DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE); + tlr = _dispatch_mach_msg_get_audit_trailer(hdr); + if (!tlr) { + DISPATCH_INTERNAL_CRASH(0, "message received without expected trailer"); + } + if (hdr->msgh_id <= MACH_NOTIFY_LAST + && dispatch_assume_zero(tlr->msgh_audit.val[ + DISPATCH_MACH_AUDIT_TOKEN_PID])) { + mach_msg_destroy(hdr); + return; + } + boolean_t success = libdispatch_internal_protocol_server(hdr, &reply.Head); + if (!success && reply.RetCode == MIG_BAD_ID && + (hdr->msgh_id == HOST_CALENDAR_SET_REPLYID || + hdr->msgh_id == HOST_CALENDAR_CHANGED_REPLYID)) { + _dispatch_debug("calendar-change notification"); + _dispatch_timers_calendar_change(); + _dispatch_mach_host_notify_update(NULL); + success = TRUE; + reply.RetCode = KERN_SUCCESS; + } + if (dispatch_assume(success) && reply.RetCode != MIG_NO_REPLY) { + (void)dispatch_assume_zero(reply.RetCode); + } + if (!success || (reply.RetCode && reply.RetCode != MIG_NO_REPLY)) { + mach_msg_destroy(hdr); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_notify_port_init(void *context DISPATCH_UNUSED) +{ + kern_return_t kr; +#if HAVE_MACH_PORT_CONSTRUCT + mach_port_options_t opts = { .flags = MPO_CONTEXT_AS_GUARD | MPO_STRICT }; +#if DISPATCH_SIZEOF_PTR == 8 + const mach_port_context_t guard = 0xfeed09071f1ca7edull; +#else + const mach_port_context_t guard = 0xff1ca7edull; +#endif + kr = mach_port_construct(mach_task_self(), &opts, guard, + &_dispatch_mach_notify_port); +#else + kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, + &_dispatch_mach_notify_port); +#endif + DISPATCH_VERIFY_MIG(kr); + if (unlikely(kr)) { + DISPATCH_CLIENT_CRASH(kr, + "mach_port_construct() failed: cannot create receive right"); + } + + static const struct dispatch_continuation_s dc = { + .dc_func = (void*)_dispatch_mach_notify_source_invoke, + }; + _dispatch_mach_notify_source = _dispatch_source_create_mach_msg_direct_recv( + _dispatch_mach_notify_port, &dc); + dispatch_assert(_dispatch_mach_notify_source); + dispatch_activate(_dispatch_mach_notify_source); +} + +static void +_dispatch_mach_host_port_init(void *ctxt DISPATCH_UNUSED) +{ + kern_return_t kr; + mach_port_t mp, mhp = mach_host_self(); + kr = host_get_host_port(mhp, &mp); + DISPATCH_VERIFY_MIG(kr); + if (likely(!kr)) { + // mach_host_self returned the HOST_PRIV port + kr = mach_port_deallocate(mach_task_self(), mhp); + DISPATCH_VERIFY_MIG(kr); + mhp = mp; + } else if (kr != KERN_INVALID_ARGUMENT) { + (void)dispatch_assume_zero(kr); + } + if (unlikely(!mhp)) { + DISPATCH_CLIENT_CRASH(kr, "Could not get unprivileged host port"); + } + _dispatch_mach_host_port = mhp; +} + +mach_port_t +_dispatch_get_mach_host_port(void) +{ + dispatch_once_f(&_dispatch_mach_host_port_pred, NULL, + _dispatch_mach_host_port_init); + return _dispatch_mach_host_port; +} + +DISPATCH_ALWAYS_INLINE +static inline mach_port_t +_dispatch_get_mach_notify_port(void) +{ + static dispatch_once_t pred; + dispatch_once_f(&pred, NULL, _dispatch_mach_notify_port_init); + return _dispatch_mach_notify_port; +} + +static void +_dispatch_mach_host_notify_update(void *context DISPATCH_UNUSED) +{ + static int notify_type = HOST_NOTIFY_CALENDAR_SET; + kern_return_t kr; + _dispatch_debug("registering for calendar-change notification"); +retry: + kr = host_request_notification(_dispatch_get_mach_host_port(), + notify_type, _dispatch_get_mach_notify_port()); + // Fallback when missing support for newer _SET variant, fires strictly more + if (kr == KERN_INVALID_ARGUMENT && + notify_type != HOST_NOTIFY_CALENDAR_CHANGE) { + notify_type = HOST_NOTIFY_CALENDAR_CHANGE; + goto retry; + } + DISPATCH_VERIFY_MIG(kr); + (void)dispatch_assume_zero(kr); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_mach_host_calendar_change_register(void) +{ + static dispatch_once_t pred; + dispatch_once_f(&pred, NULL, _dispatch_mach_host_notify_update); +} + +static kern_return_t +_dispatch_mach_notify_update(dispatch_muxnote_t dmn, uint32_t new_flags, + uint32_t del_flags, uint32_t mask, mach_msg_id_t notify_msgid, + mach_port_mscount_t notify_sync) +{ + mach_port_t previous, port = (mach_port_t)dmn->dmn_kev.ident; + typeof(dmn->dmn_kev.data) prev = dmn->dmn_kev.data; + kern_return_t kr, krr = 0; + + // Update notification registration state. + dmn->dmn_kev.data |= (new_flags | dmn->dmn_kev.fflags) & mask; + dmn->dmn_kev.data &= ~(del_flags & mask); + + _dispatch_debug_machport(port); + if ((dmn->dmn_kev.data & mask) && !(prev & mask)) { + _dispatch_debug("machport[0x%08x]: registering for send-possible " + "notification", port); + previous = MACH_PORT_NULL; + krr = mach_port_request_notification(mach_task_self(), port, + notify_msgid, notify_sync, _dispatch_get_mach_notify_port(), + MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous); + DISPATCH_VERIFY_MIG(krr); + + switch (krr) { + case KERN_INVALID_NAME: + case KERN_INVALID_RIGHT: + // Suppress errors & clear registration state + dmn->dmn_kev.data &= ~mask; + break; + default: + // Else, we don't expect any errors from mach. Log any errors + if (dispatch_assume_zero(krr)) { + // log the error & clear registration state + dmn->dmn_kev.data &= ~mask; + } else if (dispatch_assume_zero(previous)) { + // Another subsystem has beat libdispatch to requesting the + // specified Mach notification on this port. We should + // technically cache the previous port and message it when the + // kernel messages our port. Or we can just say screw those + // subsystems and deallocate the previous port. + // They should adopt libdispatch :-P + kr = mach_port_deallocate(mach_task_self(), previous); + DISPATCH_VERIFY_MIG(kr); + (void)dispatch_assume_zero(kr); + previous = MACH_PORT_NULL; + } + } + } else if (!(dmn->dmn_kev.data & mask) && (prev & mask)) { + _dispatch_debug("machport[0x%08x]: unregistering for send-possible " + "notification", port); + previous = MACH_PORT_NULL; + kr = mach_port_request_notification(mach_task_self(), port, + notify_msgid, notify_sync, MACH_PORT_NULL, + MACH_MSG_TYPE_MOVE_SEND_ONCE, &previous); + DISPATCH_VERIFY_MIG(kr); + + switch (kr) { + case KERN_INVALID_NAME: + case KERN_INVALID_RIGHT: + case KERN_INVALID_ARGUMENT: + break; + default: + if (dispatch_assume_zero(kr)) { + // log the error + } + } + } else { + return 0; + } + if (unlikely(previous)) { + // the kernel has not consumed the send-once right yet + (void)dispatch_assume_zero( + _dispatch_send_consume_send_once_right(previous)); + } + return krr; +} + +static bool +_dispatch_kevent_mach_notify_resume(dispatch_muxnote_t dmn, uint32_t new_flags, + uint32_t del_flags) +{ + kern_return_t kr = KERN_SUCCESS; + dispatch_assert_zero(new_flags & del_flags); + if ((new_flags & _DISPATCH_MACH_SP_FLAGS) || + (del_flags & _DISPATCH_MACH_SP_FLAGS)) { + // Requesting a (delayed) non-sync send-possible notification + // registers for both immediate dead-name notification and delayed-arm + // send-possible notification for the port. + // The send-possible notification is armed when a mach_msg() with the + // the MACH_SEND_NOTIFY to the port times out. + // If send-possible is unavailable, fall back to immediate dead-name + // registration rdar://problem/2527840&9008724 + kr = _dispatch_mach_notify_update(dmn, new_flags, del_flags, + _DISPATCH_MACH_SP_FLAGS, MACH_NOTIFY_SEND_POSSIBLE, + MACH_NOTIFY_SEND_POSSIBLE == MACH_NOTIFY_DEAD_NAME); + } + return kr == KERN_SUCCESS; +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_notify_merge(mach_port_t name, uint32_t data, bool final) +{ + dispatch_unote_linkage_t dul, dul_next; + dispatch_muxnote_t dmn; + + _dispatch_debug_machport(name); + dmn = _dispatch_mach_muxnote_find(name, DISPATCH_EVFILT_MACH_NOTIFICATION); + if (!dmn) { + return; + } + + dmn->dmn_kev.data &= ~_DISPATCH_MACH_SP_FLAGS; + if (!final) { + // Re-register for notification before delivery + final = !_dispatch_kevent_mach_notify_resume(dmn, data, 0); + } + + uint32_t flags = final ? EV_ONESHOT : EV_ENABLE; + DISPATCH_MACH_NOTIFICATION_ARMED(&dmn->dmn_kev) = 0; + TAILQ_FOREACH_SAFE(dul, &dmn->dmn_unotes_head, du_link, dul_next) { + dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul); + os_atomic_store2o(du._dmsr, dmsr_notification_armed, false, relaxed); + dux_merge_evt(du._du, flags, (data & du._du->du_fflags), 0, 0); + if (!dul_next || DISPATCH_MACH_NOTIFICATION_ARMED(&dmn->dmn_kev)) { + // current merge is last in list (dmn might have been freed) + // or it re-armed the notification + break; + } + } +} + +kern_return_t +_dispatch_mach_notify_port_deleted(mach_port_t notify DISPATCH_UNUSED, + mach_port_name_t name) +{ +#if DISPATCH_DEBUG + _dispatch_log("Corruption: Mach send/send-once/dead-name right 0x%x " + "deleted prematurely", name); +#endif + _dispatch_debug_machport(name); + _dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_DELETED, true); + return KERN_SUCCESS; +} + +kern_return_t +_dispatch_mach_notify_dead_name(mach_port_t notify DISPATCH_UNUSED, + mach_port_name_t name) +{ + kern_return_t kr; + + _dispatch_debug("machport[0x%08x]: dead-name notification", name); + _dispatch_debug_machport(name); + _dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_DEAD, true); + + // the act of receiving a dead name notification allocates a dead-name + // right that must be deallocated + kr = mach_port_deallocate(mach_task_self(), name); + DISPATCH_VERIFY_MIG(kr); + //(void)dispatch_assume_zero(kr); + return KERN_SUCCESS; +} + +kern_return_t +_dispatch_mach_notify_send_possible(mach_port_t notify DISPATCH_UNUSED, + mach_port_name_t name) +{ + _dispatch_debug("machport[0x%08x]: send-possible notification", name); + _dispatch_debug_machport(name); + _dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_POSSIBLE, false); + return KERN_SUCCESS; +} + +void +_dispatch_mach_notification_set_armed(dispatch_mach_send_refs_t dmsr) +{ + dispatch_muxnote_t dmn = _dispatch_unote_get_linkage(dmsr)->du_muxnote; + dispatch_unote_linkage_t dul; + dispatch_unote_t du; + + if (!_dispatch_unote_registered(dmsr)) { + return; + } + + DISPATCH_MACH_NOTIFICATION_ARMED(&dmn->dmn_kev) = true; + TAILQ_FOREACH(dul, &dmn->dmn_unotes_head, du_link) { + du = _dispatch_unote_linkage_get_unote(dul); + os_atomic_store2o(du._dmsr, dmsr_notification_armed, true, relaxed); + } +} + +static dispatch_unote_t +_dispatch_source_mach_send_create(dispatch_source_type_t dst, + uintptr_t handle, unsigned long mask) +{ + if (!mask) { + // Preserve legacy behavior that (mask == 0) => DISPATCH_MACH_SEND_DEAD + mask = DISPATCH_MACH_SEND_DEAD; + } + if (!handle) { + handle = MACH_PORT_DEAD; // + } + return _dispatch_unote_create_with_handle(dst, handle, mask); +} + +static bool +_dispatch_mach_send_update(dispatch_muxnote_t dmn) +{ + if (dmn->dmn_kev.flags & EV_DELETE) { + return _dispatch_kevent_mach_notify_resume(dmn, 0, dmn->dmn_kev.fflags); + } else { + return _dispatch_kevent_mach_notify_resume(dmn, dmn->dmn_kev.fflags, 0); + } +} + +const dispatch_source_type_s _dispatch_source_type_mach_send = { + .dst_kind = "mach_send", + .dst_filter = DISPATCH_EVFILT_MACH_NOTIFICATION, + .dst_flags = EV_CLEAR, + .dst_mask = DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_POSSIBLE, + .dst_size = sizeof(struct dispatch_source_refs_s), + + .dst_create = _dispatch_source_mach_send_create, + .dst_update_mux = _dispatch_mach_send_update, + .dst_merge_evt = _dispatch_source_merge_evt, +}; + +static dispatch_unote_t +_dispatch_mach_send_create(dispatch_source_type_t dst, + uintptr_t handle, unsigned long mask) +{ + // without handle because the mach code will set the ident later + dispatch_unote_t du = + _dispatch_unote_create_without_handle(dst, handle, mask); + if (du._dmsr) { + du._dmsr->dmsr_disconnect_cnt = DISPATCH_MACH_NEVER_CONNECTED; + TAILQ_INIT(&du._dmsr->dmsr_replies); + } + return du; +} + +const dispatch_source_type_s _dispatch_mach_type_send = { + .dst_kind = "mach_send (mach)", + .dst_filter = DISPATCH_EVFILT_MACH_NOTIFICATION, + .dst_flags = EV_CLEAR, + .dst_mask = DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_POSSIBLE, + .dst_size = sizeof(struct dispatch_mach_send_refs_s), + + .dst_create = _dispatch_mach_send_create, + .dst_update_mux = _dispatch_mach_send_update, + .dst_merge_evt = _dispatch_mach_merge_notification, +}; + +#endif // HAVE_MACH +#pragma mark mach recv / reply +#if HAVE_MACH + +static void +_dispatch_kevent_mach_msg_recv(dispatch_unote_t du, uint32_t flags, + mach_msg_header_t *hdr) +{ + mach_msg_size_t siz = hdr->msgh_size + DISPATCH_MACH_TRAILER_SIZE; + mach_port_t name = hdr->msgh_local_port; + + if (!dispatch_assume(hdr->msgh_size <= UINT_MAX - + DISPATCH_MACH_TRAILER_SIZE)) { + _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: " + "received overlarge message"); + } else if (!dispatch_assume(name)) { + _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: " + "received message with MACH_PORT_NULL port"); + } else { + _dispatch_debug_machport(name); + if (likely(du._du)) { + return dux_merge_msg(du._du, flags, hdr, siz); + } + _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: " + "received message with no listeners"); + } + + mach_msg_destroy(hdr); + if (flags & DISPATCH_EV_MSG_NEEDS_FREE) { + free(hdr); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_kevent_mach_msg_drain(dispatch_kevent_t ke) +{ + mach_msg_header_t *hdr = _dispatch_kevent_mach_msg_buf(ke); + mach_msg_size_t siz; + mach_msg_return_t kr = (mach_msg_return_t)ke->fflags; + uint32_t flags = ke->flags; + dispatch_unote_t du = _dispatch_kevent_get_unote(ke); + + if (unlikely(!hdr)) { + DISPATCH_INTERNAL_CRASH(kr, "EVFILT_MACHPORT with no message"); + } + if (likely(!kr)) { + _dispatch_kevent_mach_msg_recv(du, flags, hdr); + goto out; + } else if (kr != MACH_RCV_TOO_LARGE) { + goto out; + } else if (!ke->data) { + DISPATCH_INTERNAL_CRASH(0, "MACH_RCV_LARGE_IDENTITY with no identity"); + } + if (unlikely(ke->ext[1] > (UINT_MAX - DISPATCH_MACH_TRAILER_SIZE))) { + DISPATCH_INTERNAL_CRASH(ke->ext[1], + "EVFILT_MACHPORT with overlarge message"); + } + siz = _dispatch_kevent_mach_msg_size(ke) + DISPATCH_MACH_TRAILER_SIZE; + hdr = malloc(siz); + if (dispatch_assume(hdr)) { + flags |= DISPATCH_EV_MSG_NEEDS_FREE; + } else { + // Kernel will discard message too large to fit + hdr = NULL; + siz = 0; + } + mach_port_t name = (mach_port_name_t)ke->data; + const mach_msg_option_t options = ((DISPATCH_MACH_RCV_OPTIONS | + MACH_RCV_TIMEOUT) & ~MACH_RCV_LARGE); + kr = mach_msg(hdr, options, 0, siz, name, MACH_MSG_TIMEOUT_NONE, + MACH_PORT_NULL); + if (likely(!kr)) { + _dispatch_kevent_mach_msg_recv(du, flags, hdr); + goto out; + } else if (kr == MACH_RCV_TOO_LARGE) { + _dispatch_log("BUG in libdispatch client: " + "_dispatch_kevent_mach_msg_drain: dropped message too " + "large to fit in memory: id = 0x%x, size = %u", + hdr->msgh_id, _dispatch_kevent_mach_msg_size(ke)); + kr = MACH_MSG_SUCCESS; + } + if (flags & DISPATCH_EV_MSG_NEEDS_FREE) { + free(hdr); + } +out: + if (unlikely(kr)) { + _dispatch_bug_mach_client("_dispatch_kevent_mach_msg_drain: " + "message reception failed", kr); + } +} + +const dispatch_source_type_s _dispatch_source_type_mach_recv = { + .dst_kind = "mach_recv", + .dst_filter = EVFILT_MACHPORT, + .dst_flags = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_VANISHED, + .dst_fflags = 0, + .dst_size = sizeof(struct dispatch_source_refs_s), + + .dst_create = _dispatch_unote_create_with_handle, + .dst_merge_evt = _dispatch_source_merge_evt, + .dst_merge_msg = NULL, // never receives messages directly + + .dst_per_trigger_qos = true, +}; + +static void +_dispatch_source_mach_recv_direct_merge_msg(dispatch_unote_t du, uint32_t flags, + mach_msg_header_t *msg, mach_msg_size_t msgsz DISPATCH_UNUSED) +{ + dispatch_continuation_t dc = du._dr->ds_handler[DS_EVENT_HANDLER]; + dispatch_source_t ds = _dispatch_source_from_refs(du._dr); + dispatch_queue_t cq = _dispatch_queue_get_current(); + + // see firehose_client_push_notify_async + _dispatch_queue_set_current(ds->_as_dq); + dc->dc_func(msg); + _dispatch_queue_set_current(cq); + if (flags & DISPATCH_EV_MSG_NEEDS_FREE) { + free(msg); + } + if ((ds->dq_atomic_flags & DSF_CANCELED) || + (flags & (EV_ONESHOT | EV_DELETE))) { + return _dispatch_source_merge_evt(du, flags, 0, 0, 0); + } + if (_dispatch_unote_needs_rearm(du)) { + return _dispatch_unote_resume(du); + } +} + +static void +_dispatch_mach_recv_direct_merge(dispatch_unote_t du, + uint32_t flags, uintptr_t data, + uintptr_t status DISPATCH_UNUSED, + pthread_priority_t pp) +{ + if (flags & EV_VANISHED) { + DISPATCH_CLIENT_CRASH(du._du->du_ident, + "Unexpected EV_VANISHED (do not destroy random mach ports)"); + } + return _dispatch_source_merge_evt(du, flags, data, 0, pp); +} + +const dispatch_source_type_s _dispatch_source_type_mach_recv_direct = { + .dst_kind = "direct mach_recv", + .dst_filter = EVFILT_MACHPORT, + .dst_flags = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_VANISHED, + .dst_fflags = DISPATCH_MACH_RCV_OPTIONS, + .dst_size = sizeof(struct dispatch_source_refs_s), + + .dst_create = _dispatch_unote_create_with_handle, + .dst_merge_evt = _dispatch_mach_recv_direct_merge, + .dst_merge_msg = _dispatch_source_mach_recv_direct_merge_msg, + + .dst_per_trigger_qos = true, +}; + +const dispatch_source_type_s _dispatch_mach_type_recv = { + .dst_kind = "mach_recv (channel)", + .dst_filter = EVFILT_MACHPORT, + .dst_flags = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_VANISHED, + .dst_fflags = DISPATCH_MACH_RCV_OPTIONS, + .dst_size = sizeof(struct dispatch_mach_recv_refs_s), + + // without handle because the mach code will set the ident after connect + .dst_create = _dispatch_unote_create_without_handle, + .dst_merge_evt = _dispatch_mach_recv_direct_merge, + .dst_merge_msg = _dispatch_mach_merge_msg, + + .dst_per_trigger_qos = true, +}; + +DISPATCH_NORETURN +static void +_dispatch_mach_reply_merge_evt(dispatch_unote_t du, + uint32_t flags DISPATCH_UNUSED, uintptr_t data DISPATCH_UNUSED, + uintptr_t status DISPATCH_UNUSED, + pthread_priority_t pp DISPATCH_UNUSED) +{ + DISPATCH_INTERNAL_CRASH(du._du->du_ident, "Unexpected event"); +} + +const dispatch_source_type_s _dispatch_mach_type_reply = { + .dst_kind = "mach reply", + .dst_filter = EVFILT_MACHPORT, + .dst_flags = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_ONESHOT|EV_VANISHED, + .dst_fflags = DISPATCH_MACH_RCV_OPTIONS, + .dst_size = sizeof(struct dispatch_mach_reply_refs_s), + + .dst_create = _dispatch_unote_create_with_handle, + .dst_merge_evt = _dispatch_mach_reply_merge_evt, + .dst_merge_msg = _dispatch_mach_reply_merge_msg, +}; + +#pragma mark Mach channel SIGTERM notification (for XPC channels only) + +const dispatch_source_type_s _dispatch_xpc_type_sigterm = { + .dst_kind = "sigterm (xpc)", + .dst_filter = EVFILT_SIGNAL, + .dst_flags = DISPATCH_EV_DIRECT|EV_CLEAR|EV_ONESHOT, + .dst_fflags = 0, + .dst_size = sizeof(struct dispatch_xpc_term_refs_s), + + .dst_create = _dispatch_unote_create_with_handle, + .dst_merge_evt = _dispatch_xpc_sigterm_merge, +}; + +#endif // HAVE_MACH + +#endif // DISPATCH_EVENT_BACKEND_KEVENT diff --git a/src/event/workqueue.c b/src/event/workqueue.c new file mode 100644 index 000000000..73362a58a --- /dev/null +++ b/src/event/workqueue.c @@ -0,0 +1,249 @@ +/* + * Copyright (c) 2017-2017 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include "internal.h" + +#if DISPATCH_USE_INTERNAL_WORKQUEUE + +/* + * dispatch_workq monitors the thread pool that is + * executing the work enqueued on libdispatch's pthread + * root queues and dynamically adjusts its size. + * + * The dynamic monitoring could be implemented using either + * (a) low-frequency user-level approximation of the number of runnable + * worker threads via reading the /proc file system + * (b) a Linux kernel extension that hooks the process change handler + * to accurately track the number of runnable normal worker threads + * This file provides an implementation of option (a). + * + * Using either form of monitoring, if (i) there appears to be + * work available in the monitored pthread root queue, (ii) the + * number of runnable workers is below the target size for the pool, + * and (iii) the total number of worker threads is below an upper limit, + * then an additional worker thread will be added to the pool. + */ + +#pragma mark static data for monitoring subsystem + +/* + * State for the user-level monitoring of a workqueue. + */ +typedef struct dispatch_workq_monitor_s { + /* The dispatch_queue we are monitoring */ + dispatch_queue_t dq; + + /* The observed number of runnable worker threads */ + int32_t num_runnable; + + /* The desired number of runnable worker threads */ + int32_t target_runnable; + + /* + * Tracking of registered workers; all accesses must hold lock. + * Invariant: registered_tids[0]...registered_tids[num_registered_tids-1] + * contain the dispatch_tids of the worker threads we are monitoring. + */ + dispatch_unfair_lock_s registered_tid_lock; + dispatch_tid *registered_tids; + int num_registered_tids; +} dispatch_workq_monitor_s, *dispatch_workq_monitor_t; + +static dispatch_workq_monitor_s _dispatch_workq_monitors[DISPATCH_QOS_MAX]; + +#pragma mark Implementation of the monitoring subsystem. + +#define WORKQ_MAX_TRACKED_TIDS DISPATCH_WORKQ_MAX_PTHREAD_COUNT +#define WORKQ_OVERSUBSCRIBE_FACTOR 2 + +static void _dispatch_workq_init_once(void *context DISPATCH_UNUSED); +static dispatch_once_t _dispatch_workq_init_once_pred; + +void +_dispatch_workq_worker_register(dispatch_queue_t root_q, qos_class_t cls) +{ + dispatch_once_f(&_dispatch_workq_init_once_pred, NULL, &_dispatch_workq_init_once); + +#if HAVE_DISPATCH_WORKQ_MONITORING + dispatch_qos_t qos = _dispatch_qos_from_qos_class(cls); + dispatch_workq_monitor_t mon = &_dispatch_workq_monitors[qos-1]; + dispatch_assert(mon->dq == root_q); + dispatch_tid tid = _dispatch_tid_self(); + _dispatch_unfair_lock_lock(&mon->registered_tid_lock); + dispatch_assert(mon->num_registered_tids < WORKQ_MAX_TRACKED_TIDS-1); + int worker_id = mon->num_registered_tids++; + mon->registered_tids[worker_id] = tid; + _dispatch_unfair_lock_unlock(&mon->registered_tid_lock); +#endif // HAVE_DISPATCH_WORKQ_MONITORING +} + +void +_dispatch_workq_worker_unregister(dispatch_queue_t root_q, qos_class_t cls) +{ +#if HAVE_DISPATCH_WORKQ_MONITORING + dispatch_qos_t qos = _dispatch_qos_from_qos_class(cls); + dispatch_workq_monitor_t mon = &_dispatch_workq_monitors[qos-1]; + dispatch_assert(mon->dq == root_q); + dispatch_tid tid = _dispatch_tid_self(); + _dispatch_unfair_lock_lock(&mon->registered_tid_lock); + for (int i = 0; i < mon->num_registered_tids; i++) { + if (mon->registered_tids[i] == tid) { + int last = mon->num_registered_tids - 1; + mon->registered_tids[i] = mon->registered_tids[last]; + mon->registered_tids[last] = 0; + mon->num_registered_tids--; + break; + } + } + _dispatch_unfair_lock_unlock(&mon->registered_tid_lock); +#endif // HAVE_DISPATCH_WORKQ_MONITORING +} + + +#if HAVE_DISPATCH_WORKQ_MONITORING +#if defined(__linux__) +/* + * For each pid that is a registered worker, read /proc/[pid]/stat + * to get a count of the number of them that are actually runnable. + * See the proc(5) man page for the format of the contents of /proc/[pid]/stat + */ +static void +_dispatch_workq_count_runnable_workers(dispatch_workq_monitor_t mon) +{ + char path[128]; + char buf[4096]; + int running_count = 0; + + _dispatch_unfair_lock_lock(&mon->registered_tid_lock); + + for (int i = 0; i < mon->num_registered_tids; i++) { + dispatch_tid tid = mon->registered_tids[i]; + int fd; + ssize_t bytes_read = -1; + + int r = snprintf(path, sizeof(path), "/proc/%d/stat", tid); + dispatch_assert(r > 0 && r < (int)sizeof(path)); + + fd = open(path, O_RDONLY | O_NONBLOCK); + if (unlikely(fd == -1)) { + DISPATCH_CLIENT_CRASH(tid, + "workq: registered worker exited prematurely"); + } else { + bytes_read = read(fd, buf, sizeof(buf)-1); + (void)close(fd); + } + + if (bytes_read > 0) { + buf[bytes_read] = '\0'; + char state; + if (sscanf(buf, "%*d %*s %c", &state) == 1) { + // _dispatch_debug("workq: Worker %d, state %c\n", tid, state); + if (state == 'R') { + running_count++; + } + } else { + _dispatch_debug("workq: sscanf of state failed for %d", tid); + } + } else { + _dispatch_debug("workq: Failed to read %s", path); + } + } + + mon->num_runnable = running_count; + + _dispatch_unfair_lock_unlock(&mon->registered_tid_lock); +} +#else +#error must define _dispatch_workq_count_runnable_workers +#endif + +static void +_dispatch_workq_monitor_pools(void *context DISPATCH_UNUSED) +{ + int global_soft_max = WORKQ_OVERSUBSCRIBE_FACTOR * (int)dispatch_hw_config(active_cpus); + int global_runnable = 0; + for (dispatch_qos_t i = DISPATCH_QOS_MAX; i > DISPATCH_QOS_UNSPECIFIED; i--) { + dispatch_workq_monitor_t mon = &_dispatch_workq_monitors[i-1]; + dispatch_queue_t dq = mon->dq; + + if (!_dispatch_queue_class_probe(dq)) { + _dispatch_debug("workq: %s is empty.", dq->dq_label); + continue; + } + + _dispatch_workq_count_runnable_workers(mon); + _dispatch_debug("workq: %s has %d runnable wokers (target is %d)", + dq->dq_label, mon->num_runnable, mon->target_runnable); + + global_runnable += mon->num_runnable; + + if (mon->num_runnable == 0) { + // We have work, but no worker is runnable. + // It is likely the program is stalled. Therefore treat + // this as if dq were an overcommit queue and call poke + // with the limit being the maximum number of workers for dq. + int32_t floor = mon->target_runnable - WORKQ_MAX_TRACKED_TIDS; + _dispatch_debug("workq: %s has no runnable workers; poking with floor %d", + dq->dq_label, floor); + _dispatch_global_queue_poke(dq, 1, floor); + global_runnable += 1; // account for poke in global estimate + } else if (mon->num_runnable < mon->target_runnable && + global_runnable < global_soft_max) { + // We are below target, but some workers are still runnable. + // We want to oversubscribe to hit the desired load target. + // However, this under-utilization may be transitory so set the + // floor as a small multiple of threads per core. + int32_t floor = (1 - WORKQ_OVERSUBSCRIBE_FACTOR) * mon->target_runnable; + int32_t floor2 = mon->target_runnable - WORKQ_MAX_TRACKED_TIDS; + floor = MAX(floor, floor2); + _dispatch_debug("workq: %s under utilization target; poking with floor %d", + dq->dq_label, floor); + _dispatch_global_queue_poke(dq, 1, floor); + global_runnable += 1; // account for poke in global estimate + } + } +} +#endif // HAVE_DISPATCH_WORKQ_MONITORING + +static void +_dispatch_workq_init_once(void *context DISPATCH_UNUSED) +{ +#if HAVE_DISPATCH_WORKQ_MONITORING + int target_runnable = (int)dispatch_hw_config(active_cpus); + for (dispatch_qos_t i = DISPATCH_QOS_MAX; i > DISPATCH_QOS_UNSPECIFIED; i--) { + dispatch_workq_monitor_t mon = &_dispatch_workq_monitors[i-1]; + mon->dq = _dispatch_get_root_queue(i, false); + void *buf = _dispatch_calloc(WORKQ_MAX_TRACKED_TIDS, sizeof(dispatch_tid)); + mon->registered_tids = buf; + mon->target_runnable = target_runnable; + } + + // Create monitoring timer that will periodically run on dispatch_mgr_q + dispatch_source_t ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, + 0, 0, &_dispatch_mgr_q); + dispatch_source_set_timer(ds, dispatch_time(DISPATCH_TIME_NOW, 0), + NSEC_PER_SEC, 0); + dispatch_source_set_event_handler_f(ds, _dispatch_workq_monitor_pools); + dispatch_set_context(ds, ds); // avoid appearing as leaked + dispatch_activate(ds); +#endif // HAVE_DISPATCH_WORKQ_MONITORING +} + +#endif // DISPATCH_USE_INTERNAL_WORKQUEUE diff --git a/src/event/workqueue_internal.h b/src/event/workqueue_internal.h new file mode 100644 index 000000000..94dfe4e36 --- /dev/null +++ b/src/event/workqueue_internal.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2017-2017 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_WORKQUEUE_INTERNAL__ +#define __DISPATCH_WORKQUEUE_INTERNAL__ + +#define WORKQ_ADDTHREADS_OPTION_OVERCOMMIT 0x1 + +#define DISPATCH_WORKQ_MAX_PTHREAD_COUNT 255 + +void _dispatch_workq_worker_register(dispatch_queue_t root_q, qos_class_t cls); +void _dispatch_workq_worker_unregister(dispatch_queue_t root_q, qos_class_t cls); + +#if defined(__linux__) +#define HAVE_DISPATCH_WORKQ_MONITORING 1 +#else +#define HAVE_DISPATCH_WORKQ_MONITORING 0 +#endif + +#endif /* __DISPATCH_WORKQUEUE_INTERNAL__ */ + diff --git a/src/firehose/firehose.defs b/src/firehose/firehose.defs index 986533cc1..e4fdf3324 100644 --- a/src/firehose/firehose.defs +++ b/src/firehose/firehose.defs @@ -35,16 +35,18 @@ register( comm_recvp : mach_port_move_receive_t; comm_sendp : mach_port_make_send_t; extra_info_port : mach_port_move_send_t; - extra_info_size : mach_vm_size_t + extra_info_size : mach_vm_size_t; + ServerAuditToken atoken : audit_token_t ); routine -push( +push_and_wait( RequestPort comm_port : mach_port_t; SReplyPort reply_port : mach_port_make_send_once_t; qos_class : qos_class_t; for_io : boolean_t; -out push_reply : firehose_push_reply_t +out push_reply : firehose_push_reply_t; +out quarantinedOut : boolean_t ); simpleroutine diff --git a/src/firehose/firehose_buffer.c b/src/firehose/firehose_buffer.c index 1305bdea6..3bb790c7c 100644 --- a/src/firehose/firehose_buffer.c +++ b/src/firehose/firehose_buffer.c @@ -37,6 +37,10 @@ #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) +#ifndef OS_FALLTHROUGH +#define OS_FALLTHROUGH +#endif + #define DISPATCH_INTERNAL_CRASH(ac, msg) ({ panic(msg); __builtin_trap(); }) #if defined(__x86_64__) || defined(__i386__) @@ -49,10 +53,13 @@ #define dispatch_hardware_pause() __asm__("") #endif -#define _dispatch_wait_until(c) do { \ - while (!fastpath(c)) { \ +#define _dispatch_wait_until(c) ({ \ + typeof(c) _c; \ + for (;;) { \ + if (likely(_c = (c))) break; \ dispatch_hardware_pause(); \ - } } while (0) + } \ + _c; }) #define dispatch_compiler_barrier() __asm__ __volatile__("" ::: "memory") typedef uint32_t dispatch_lock; @@ -62,6 +69,8 @@ typedef struct dispatch_gate_s { #define DLOCK_LOCK_DATA_CONTENTION 0 static void _dispatch_gate_wait(dispatch_gate_t l, uint32_t flags); +#define fcp_quarntined fcp_quarantined + #include #include #include @@ -71,9 +80,10 @@ static void _dispatch_gate_wait(dispatch_gate_t l, uint32_t flags); #include #include #include +#include // os/internal/atomic.h #include // #include // -#include // os/internal/atomic.h +#include // #include "os/firehose_buffer_private.h" #include "firehose_buffer_internal.h" #include "firehose_inline_internal.h" @@ -93,14 +103,11 @@ _Static_assert(offsetof(firehose_stream_state_u, fss_gate) == offsetof(firehose_stream_state_u, fss_allocator), "fss_gate and fss_allocator alias"); _Static_assert(sizeof(struct firehose_buffer_header_s) == - FIREHOSE_BUFFER_CHUNK_SIZE, + FIREHOSE_CHUNK_SIZE, "firehose buffer header must be 4k"); _Static_assert(offsetof(struct firehose_buffer_header_s, fbh_unused) <= - FIREHOSE_BUFFER_CHUNK_SIZE - FIREHOSE_BUFFER_LIBTRACE_HEADER_SIZE, + FIREHOSE_CHUNK_SIZE - FIREHOSE_BUFFER_LIBTRACE_HEADER_SIZE, "we must have enough space for the libtrace header"); -_Static_assert(sizeof(struct firehose_buffer_chunk_s) == - FIREHOSE_BUFFER_CHUNK_SIZE, - "firehose buffer chunks must be 4k"); _Static_assert(powerof2(FIREHOSE_BUFFER_CHUNK_COUNT), "CHUNK_COUNT Must be a power of two"); _Static_assert(FIREHOSE_BUFFER_CHUNK_COUNT <= 64, @@ -109,14 +116,8 @@ _Static_assert(FIREHOSE_BUFFER_CHUNK_COUNT <= 64, _Static_assert(powerof2(FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT), "madvise chunk count must be a power of two"); #endif -_Static_assert(howmany(sizeof(struct firehose_tracepoint_s), - sizeof(struct firehose_buffer_chunk_s)) < 255, - "refcount assumes that you cannot have more than 255 tracepoints"); -// FIXME: we should have an event-count instead here _Static_assert(sizeof(struct firehose_buffer_stream_s) == 128, "firehose buffer stream must be small (single cacheline if possible)"); -_Static_assert(offsetof(struct firehose_buffer_chunk_s, fbc_data) % 8 == 0, - "Page header is 8 byte aligned"); _Static_assert(sizeof(struct firehose_tracepoint_s) == 24, "tracepoint header should be exactly 24 bytes"); #endif @@ -177,21 +178,19 @@ firehose_client_reconnect(firehose_buffer_t fb, mach_port_t oldsendp) uint32_t opts = MPO_CONTEXT_AS_GUARD | MPO_TEMPOWNER | MPO_INSERT_SEND_RIGHT; sendp = firehose_mach_port_allocate(opts, fb); - if (oldsendp && _voucher_libtrace_hooks->vah_version >= 3) { - if (_voucher_libtrace_hooks->vah_get_reconnect_info) { - kr = _voucher_libtrace_hooks->vah_get_reconnect_info(&addr, &size); - if (likely(kr == KERN_SUCCESS) && addr && size) { - extra_info_size = size; - kr = mach_make_memory_entry_64(mach_task_self(), &size, addr, - flags, &extra_info_port, MACH_PORT_NULL); - if (unlikely(kr)) { - // the client probably has some form of memory corruption - // and/or a port leak - DISPATCH_CLIENT_CRASH(kr, "Unable to make memory port"); - } - kr = mach_vm_deallocate(mach_task_self(), addr, size); - (void)dispatch_assume_zero(kr); + if (oldsendp && _voucher_libtrace_hooks->vah_get_reconnect_info) { + kr = _voucher_libtrace_hooks->vah_get_reconnect_info(&addr, &size); + if (likely(kr == KERN_SUCCESS) && addr && size) { + extra_info_size = size; + kr = mach_make_memory_entry_64(mach_task_self(), &size, addr, + flags, &extra_info_port, MACH_PORT_NULL); + if (unlikely(kr)) { + // the client probably has some form of memory corruption + // and/or a port leak + DISPATCH_CLIENT_CRASH(kr, "Unable to make memory port"); } + kr = mach_vm_deallocate(mach_task_self(), addr, size); + (void)dispatch_assume_zero(kr); } } @@ -261,7 +260,7 @@ firehose_buffer_update_limits_unlocked(firehose_buffer_t fb) } } - uint16_t ratio = (uint16_t)(PAGE_SIZE / FIREHOSE_BUFFER_CHUNK_SIZE); + uint16_t ratio = (uint16_t)(PAGE_SIZE / FIREHOSE_CHUNK_SIZE); if (ratio > 1) { total = roundup(total, ratio); } @@ -299,7 +298,7 @@ firehose_buffer_create(mach_port_t logd_port, uint64_t unique_pid, vm_addr = vm_page_size; const size_t madvise_bytes = FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT * - FIREHOSE_BUFFER_CHUNK_SIZE; + FIREHOSE_CHUNK_SIZE; if (slowpath(madvise_bytes % PAGE_SIZE)) { DISPATCH_INTERNAL_CRASH(madvise_bytes, "Invalid values for MADVISE_CHUNK_COUNT / CHUNK_SIZE"); @@ -320,7 +319,7 @@ firehose_buffer_create(mach_port_t logd_port, uint64_t unique_pid, vm_offset_t vm_addr = 0; vm_size_t size; - size = FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_BUFFER_CHUNK_SIZE; + size = FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_CHUNK_SIZE; __firehose_allocate(&vm_addr, size); (void)logd_port; (void)unique_pid; @@ -453,23 +452,58 @@ firehose_client_send_push_async(firehose_buffer_t fb, qos_class_t qos, } } } + +OS_NOINLINE +static void +firehose_client_start_quarantine(firehose_buffer_t fb) +{ + if (_voucher_libtrace_hooks->vah_version < 5) return; + if (!_voucher_libtrace_hooks->vah_quarantine_starts) return; + + _voucher_libtrace_hooks->vah_quarantine_starts(); + + fb->fb_header.fbh_quarantined = true; + firehose_buffer_stream_flush(fb, firehose_stream_special); + firehose_buffer_stream_flush(fb, firehose_stream_persist); + firehose_buffer_stream_flush(fb, firehose_stream_memory); +} #endif // !KERNEL static void firehose_client_merge_updates(firehose_buffer_t fb, bool async_notif, - firehose_push_reply_t reply, firehose_bank_state_u *state_out) + firehose_push_reply_t reply, bool quarantined, + firehose_bank_state_u *state_out) { + firehose_buffer_header_t fbh = &fb->fb_header; firehose_bank_state_u state; firehose_ring_tail_u otail, ntail; uint64_t old_flushed_pos, bank_updates; uint16_t io_delta = 0; uint16_t mem_delta = 0; - if (firehose_atomic_maxv2o(&fb->fb_header, fbh_bank.fbb_mem_flushed, + if (quarantined) { +#ifndef KERNEL + // this isn't a dispatch_once so that the upcall to libtrace + // can actually log itself without blocking on the gate. + if (async_notif) { + if (os_atomic_xchg(&fbh->fbh_quarantined_state, + FBH_QUARANTINE_STARTED, relaxed) != + FBH_QUARANTINE_STARTED) { + firehose_client_start_quarantine(fb); + } + } else if (os_atomic_load(&fbh->fbh_quarantined_state, relaxed) == + FBH_QUARANTINE_NONE) { + os_atomic_cmpxchg(&fbh->fbh_quarantined_state, FBH_QUARANTINE_NONE, + FBH_QUARANTINE_PENDING, relaxed); + } +#endif + } + + if (firehose_atomic_maxv2o(fbh, fbh_bank.fbb_mem_flushed, reply.fpr_mem_flushed_pos, &old_flushed_pos, relaxed)) { mem_delta = (uint16_t)(reply.fpr_mem_flushed_pos - old_flushed_pos); } - if (firehose_atomic_maxv2o(&fb->fb_header, fbh_bank.fbb_io_flushed, + if (firehose_atomic_maxv2o(fbh, fbh_bank.fbb_io_flushed, reply.fpr_io_flushed_pos, &old_flushed_pos, relaxed)) { io_delta = (uint16_t)(reply.fpr_io_flushed_pos - old_flushed_pos); } @@ -481,50 +515,57 @@ firehose_client_merge_updates(firehose_buffer_t fb, bool async_notif, if (!mem_delta && !io_delta) { if (state_out) { - state_out->fbs_atomic_state = os_atomic_load2o(&fb->fb_header, + state_out->fbs_atomic_state = os_atomic_load2o(fbh, fbh_bank.fbb_state.fbs_atomic_state, relaxed); } return; } - bank_updates = ((uint64_t)mem_delta << FIREHOSE_BANK_SHIFT(0)) | - ((uint64_t)io_delta << FIREHOSE_BANK_SHIFT(1)); - state.fbs_atomic_state = os_atomic_sub2o(&fb->fb_header, - fbh_bank.fbb_state.fbs_atomic_state, bank_updates, relaxed); - if (state_out) *state_out = state; - - os_atomic_rmw_loop2o(&fb->fb_header, fbh_ring_tail.frp_atomic_tail, + __firehose_critical_region_enter(); + os_atomic_rmw_loop2o(fbh, fbh_ring_tail.frp_atomic_tail, otail.frp_atomic_tail, ntail.frp_atomic_tail, relaxed, { ntail = otail; // overflow handles the generation wraps ntail.frp_io_flushed += io_delta; ntail.frp_mem_flushed += mem_delta; }); + + bank_updates = ((uint64_t)mem_delta << FIREHOSE_BANK_SHIFT(0)) | + ((uint64_t)io_delta << FIREHOSE_BANK_SHIFT(1)); + state.fbs_atomic_state = os_atomic_sub2o(fbh, + fbh_bank.fbb_state.fbs_atomic_state, bank_updates, release); + __firehose_critical_region_leave(); + + if (state_out) *state_out = state; + if (async_notif) { if (io_delta) { - os_atomic_inc2o(&fb->fb_header, fbh_bank.fbb_io_notifs, relaxed); + os_atomic_inc2o(fbh, fbh_bank.fbb_io_notifs, relaxed); } if (mem_delta) { - os_atomic_inc2o(&fb->fb_header, fbh_bank.fbb_mem_notifs, relaxed); + os_atomic_inc2o(fbh, fbh_bank.fbb_mem_notifs, relaxed); } } } #ifndef KERNEL +OS_NOT_TAIL_CALLED OS_NOINLINE static void -firehose_client_send_push(firehose_buffer_t fb, bool for_io, +firehose_client_send_push_and_wait(firehose_buffer_t fb, bool for_io, firehose_bank_state_u *state_out) { mach_port_t sendp = fb->fb_header.fbh_sendp; firehose_push_reply_t push_reply = { }; qos_class_t qos = qos_class_self(); + boolean_t quarantined = false; kern_return_t kr; if (slowpath(sendp == MACH_PORT_DEAD)) { return; } if (fastpath(sendp)) { - kr = firehose_send_push(sendp, qos, for_io, &push_reply); + kr = firehose_send_push_and_wait(sendp, qos, for_io, + &push_reply, &quarantined); if (likely(kr == KERN_SUCCESS)) { goto success; } @@ -536,7 +577,8 @@ firehose_client_send_push(firehose_buffer_t fb, bool for_io, sendp = firehose_client_reconnect(fb, sendp); if (fastpath(MACH_PORT_VALID(sendp))) { - kr = firehose_send_push(sendp, qos, for_io, &push_reply); + kr = firehose_send_push_and_wait(sendp, qos, for_io, + &push_reply, &quarantined); if (likely(kr == KERN_SUCCESS)) { goto success; } @@ -572,12 +614,22 @@ firehose_client_send_push(firehose_buffer_t fb, bool for_io, // There only is a point for multithreaded clients if: // - enough samples (total_flushes above some limits) // - the ratio is really bad (a push per cycle is definitely a problem) - return firehose_client_merge_updates(fb, false, push_reply, state_out); + return firehose_client_merge_updates(fb, false, push_reply, quarantined, + state_out); +} + +OS_NOT_TAIL_CALLED OS_NOINLINE +static void +__FIREHOSE_CLIENT_THROTTLED_DUE_TO_HEAVY_LOGGING__(firehose_buffer_t fb, + bool for_io, firehose_bank_state_u *state_out) +{ + firehose_client_send_push_and_wait(fb, for_io, state_out); } kern_return_t firehose_client_push_reply(mach_port_t req_port OS_UNUSED, - kern_return_t rtc, firehose_push_reply_t push_reply OS_UNUSED) + kern_return_t rtc, firehose_push_reply_t push_reply OS_UNUSED, + boolean_t quarantined OS_UNUSED) { DISPATCH_INTERNAL_CRASH(rtc, "firehose_push_reply should never be sent " "to the buffer receive port"); @@ -585,12 +637,12 @@ firehose_client_push_reply(mach_port_t req_port OS_UNUSED, kern_return_t firehose_client_push_notify_async(mach_port_t server_port OS_UNUSED, - firehose_push_reply_t push_reply) + firehose_push_reply_t push_reply, boolean_t quarantined) { // see _dispatch_source_merge_mach_msg_direct dispatch_queue_t dq = _dispatch_queue_get_current(); firehose_buffer_t fb = dispatch_get_context(dq); - firehose_client_merge_updates(fb, true, push_reply, NULL); + firehose_client_merge_updates(fb, true, push_reply, quarantined, NULL); return KERN_SUCCESS; } @@ -611,18 +663,18 @@ firehose_buffer_update_limits(firehose_buffer_t fb) OS_ALWAYS_INLINE static inline firehose_tracepoint_t -firehose_buffer_chunk_init(firehose_buffer_chunk_t fbc, +firehose_buffer_chunk_init(firehose_chunk_t fc, firehose_tracepoint_query_t ask, uint8_t **privptr) { const uint16_t ft_size = offsetof(struct firehose_tracepoint_s, ft_data); - uint16_t pub_offs = offsetof(struct firehose_buffer_chunk_s, fbc_data); - uint16_t priv_offs = FIREHOSE_BUFFER_CHUNK_SIZE; + uint16_t pub_offs = offsetof(struct firehose_chunk_s, fc_data); + uint16_t priv_offs = FIREHOSE_CHUNK_SIZE; pub_offs += roundup(ft_size + ask->pubsize, 8); priv_offs -= ask->privsize; - if (fbc->fbc_pos.fbc_atomic_pos) { + if (fc->fc_pos.fcp_atomic_pos) { // Needed for process death handling (recycle-reuse): // No atomic fences required, we merely want to make sure the observers // will see memory effects in program (asm) order. @@ -632,32 +684,33 @@ firehose_buffer_chunk_init(firehose_buffer_chunk_t fbc, // and it is dirty, when crawling the chunk, we don't see remnants of // other tracepoints // - // We only do that when the fbc_pos is non zero, because zero means + // We only do that when the fc_pos is non zero, because zero means // we just faulted the chunk, and the kernel already bzero-ed it. - bzero(fbc->fbc_data, sizeof(fbc->fbc_data)); + bzero(fc->fc_data, sizeof(fc->fc_data)); } dispatch_compiler_barrier(); // boot starts mach absolute time at 0, and // wrapping around to values above UINT64_MAX - FIREHOSE_STAMP_SLOP // breaks firehose_buffer_stream_flush() assumptions if (ask->stamp > FIREHOSE_STAMP_SLOP) { - fbc->fbc_timestamp = ask->stamp - FIREHOSE_STAMP_SLOP; + fc->fc_timestamp = ask->stamp - FIREHOSE_STAMP_SLOP; } else { - fbc->fbc_timestamp = 0; + fc->fc_timestamp = 0; } - fbc->fbc_pos = (firehose_buffer_pos_u){ - .fbc_next_entry_offs = pub_offs, - .fbc_private_offs = priv_offs, - .fbc_refcnt = 1, - .fbc_qos_bits = firehose_buffer_qos_bits_propagate(), - .fbc_stream = ask->stream, - .fbc_flag_io = ask->for_io, + fc->fc_pos = (firehose_chunk_pos_u){ + .fcp_next_entry_offs = pub_offs, + .fcp_private_offs = priv_offs, + .fcp_refcnt = 1, + .fcp_qos = firehose_buffer_qos_bits_propagate(), + .fcp_stream = ask->stream, + .fcp_flag_io = ask->for_io, + .fcp_quarantined = ask->quarantined, }; if (privptr) { - *privptr = fbc->fbc_start + priv_offs; + *privptr = fc->fc_start + priv_offs; } - return (firehose_tracepoint_t)fbc->fbc_data; + return (firehose_tracepoint_t)fc->fc_data; } OS_NOINLINE @@ -667,20 +720,25 @@ firehose_buffer_stream_chunk_install(firehose_buffer_t fb, { firehose_stream_state_u state, new_state; firehose_tracepoint_t ft; - firehose_buffer_stream_t fbs = &fb->fb_header.fbh_stream[ask->stream]; + firehose_buffer_header_t fbh = &fb->fb_header; + firehose_buffer_stream_t fbs = &fbh->fbh_stream[ask->stream]; uint64_t stamp_and_len; if (fastpath(ref)) { - firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); - ft = firehose_buffer_chunk_init(fbc, ask, privptr); + firehose_chunk_t fc = firehose_buffer_ref_to_chunk(fb, ref); + ft = firehose_buffer_chunk_init(fc, ask, privptr); // Needed for process death handling (tracepoint-begin): // write the length before making the chunk visible - stamp_and_len = ask->stamp - fbc->fbc_timestamp; + stamp_and_len = ask->stamp - fc->fc_timestamp; stamp_and_len |= (uint64_t)ask->pubsize << 48; os_atomic_store2o(ft, ft_stamp_and_length, stamp_and_len, relaxed); - +#ifdef KERNEL + ft->ft_thread = thread_tid(current_thread()); +#else + ft->ft_thread = _pthread_threadid_self_np_direct(); +#endif if (ask->stream == firehose_stream_metadata) { - os_atomic_or2o(fb, fb_header.fbh_bank.fbb_metadata_bitmap, + os_atomic_or2o(fbh, fbh_bank.fbb_metadata_bitmap, 1ULL << ref, relaxed); } // release barrier to make the chunk init visible @@ -711,8 +769,11 @@ firehose_buffer_stream_chunk_install(firehose_buffer_t fb, ft = NULL; } + // pairs with the one in firehose_buffer_tracepoint_reserve() + __firehose_critical_region_leave(); + #ifndef KERNEL - if (unlikely(state.fss_gate.dgl_lock != _dispatch_tid_self())) { + if (unlikely(_dispatch_lock_is_locked_by_self(state.fss_gate.dgl_lock))) { _dispatch_gate_broadcast_slow(&fbs->fbs_state.fss_gate, state.fss_gate.dgl_lock); } @@ -720,10 +781,16 @@ firehose_buffer_stream_chunk_install(firehose_buffer_t fb, if (unlikely(state.fss_current == FIREHOSE_STREAM_STATE_PRISTINE)) { firehose_buffer_update_limits(fb); } + + if (unlikely(os_atomic_load2o(fbh, fbh_quarantined_state, relaxed) == + FBH_QUARANTINE_PENDING)) { + if (os_atomic_cmpxchg2o(fbh, fbh_quarantined_state, + FBH_QUARANTINE_PENDING, FBH_QUARANTINE_STARTED, relaxed)) { + firehose_client_start_quarantine(fb); + } + } #endif // KERNEL - // pairs with the one in firehose_buffer_tracepoint_reserve() - __firehose_critical_region_leave(); return ft; } @@ -750,7 +817,7 @@ static inline uint16_t firehose_buffer_ring_shrink(firehose_buffer_t fb, uint16_t ref) { const size_t madv_size = - FIREHOSE_BUFFER_CHUNK_SIZE * FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT; + FIREHOSE_CHUNK_SIZE * FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT; const size_t madv_mask = (1ULL << FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT) - 1; @@ -779,12 +846,12 @@ OS_NOINLINE void firehose_buffer_ring_enqueue(firehose_buffer_t fb, uint16_t ref) { - firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); + firehose_chunk_t fc = firehose_buffer_ref_to_chunk(fb, ref); uint16_t volatile *fbh_ring; uint16_t volatile *fbh_ring_head; uint16_t head, gen, dummy, idx; - firehose_buffer_pos_u fbc_pos = fbc->fbc_pos; - bool for_io = fbc_pos.fbc_flag_io; + firehose_chunk_pos_u fc_pos = fc->fc_pos; + bool for_io = fc_pos.fcp_flag_io; if (for_io) { fbh_ring = fb->fb_header.fbh_io_ring; @@ -809,7 +876,7 @@ firehose_buffer_ring_enqueue(firehose_buffer_t fb, uint16_t ref) gen = head & FIREHOSE_RING_POS_GEN_MASK; idx = head & FIREHOSE_RING_POS_IDX_MASK; - while (unlikely(!os_atomic_cmpxchgvw(&fbh_ring[idx], gen, gen | ref, &dummy, + while (unlikely(!os_atomic_cmpxchgv(&fbh_ring[idx], gen, gen | ref, &dummy, relaxed))) { // can only ever happen if a recycler is slow, this requires having // enough cores (>5 for I/O e.g.) @@ -849,7 +916,7 @@ firehose_buffer_ring_enqueue(firehose_buffer_t fb, uint16_t ref) // a thread being preempted here for GEN_MASK worth of ring rotations, // it could lead to the cmpxchg succeed, and have a bogus enqueue // (confused enqueuer) - if (fastpath(os_atomic_cmpxchgvw(&fbh_ring[idx], gen, gen | ref, &dummy, + if (fastpath(os_atomic_cmpxchgv(&fbh_ring[idx], gen, gen | ref, &dummy, relaxed))) { if (fastpath(os_atomic_cmpxchgv(fbh_ring_head, head, head + 1, &head, release))) { @@ -871,13 +938,22 @@ firehose_buffer_ring_enqueue(firehose_buffer_t fb, uint16_t ref) })); } - pthread_priority_t pp = fbc_pos.fbc_qos_bits; + pthread_priority_t pp = fc_pos.fcp_qos; pp <<= _PTHREAD_PRIORITY_QOS_CLASS_SHIFT; firehose_client_send_push_async(fb, _pthread_qos_class_decode(pp, NULL, NULL), for_io); #endif } +#ifndef KERNEL +void +firehose_buffer_force_connect(firehose_buffer_t fb) +{ + mach_port_t sendp = fb->fb_header.fbh_sendp; + if (sendp == MACH_PORT_NULL) firehose_client_reconnect(fb, MACH_PORT_NULL); +} +#endif + OS_ALWAYS_INLINE static inline uint16_t firehose_buffer_ring_try_recycle(firehose_buffer_t fb) @@ -885,7 +961,7 @@ firehose_buffer_ring_try_recycle(firehose_buffer_t fb) firehose_ring_tail_u pos, old; uint16_t volatile *fbh_ring; uint16_t gen, ref, entry, tail; - firehose_buffer_chunk_t fbc; + firehose_chunk_t fc; bool for_io; os_atomic_rmw_loop2o(&fb->fb_header, fbh_ring_tail.frp_atomic_tail, @@ -923,14 +999,14 @@ firehose_buffer_ring_try_recycle(firehose_buffer_t fb) // and it is dirty, it is a chunk being written to that needs a flush gen = (entry & FIREHOSE_RING_POS_GEN_MASK) + FIREHOSE_RING_POS_GEN_INC; ref = entry & FIREHOSE_RING_POS_IDX_MASK; - fbc = firehose_buffer_ref_to_chunk(fb, ref); + fc = firehose_buffer_ref_to_chunk(fb, ref); - if (!for_io && fbc->fbc_pos.fbc_stream == firehose_stream_metadata) { + if (!for_io && fc->fc_pos.fcp_stream == firehose_stream_metadata) { os_atomic_and2o(fb, fb_header.fbh_bank.fbb_metadata_bitmap, ~(1ULL << ref), relaxed); } - os_atomic_store2o(fbc, fbc_pos.fbc_atomic_pos, - FIREHOSE_BUFFER_POS_FULL_BIT, relaxed); + os_atomic_store2o(fc, fc_pos.fcp_atomic_pos, + FIREHOSE_CHUNK_POS_FULL_BIT, relaxed); dispatch_compiler_barrier(); os_atomic_store(&fbh_ring[tail], gen | 0, relaxed); return ref; @@ -939,10 +1015,11 @@ firehose_buffer_ring_try_recycle(firehose_buffer_t fb) #ifndef KERNEL OS_NOINLINE static firehose_tracepoint_t -firehose_buffer_tracepoint_reserve_slow2(firehose_buffer_t fb, +firehose_buffer_tracepoint_reserve_wait_for_chunks_from_logd(firehose_buffer_t fb, firehose_tracepoint_query_t ask, uint8_t **privptr, uint16_t ref) { const uint64_t bank_unavail_mask = FIREHOSE_BANK_UNAVAIL_MASK(ask->for_io); + const uint64_t bank_inc = FIREHOSE_BANK_INC(ask->for_io); firehose_buffer_bank_t const fbb = &fb->fb_header.fbh_bank; firehose_bank_state_u state; uint16_t fbs_max_ref; @@ -951,8 +1028,13 @@ firehose_buffer_tracepoint_reserve_slow2(firehose_buffer_t fb, if (!fastpath(ask->is_bank_ok)) { state.fbs_atomic_state = os_atomic_load2o(fbb, fbb_state.fbs_atomic_state, relaxed); - while (state.fbs_atomic_state & bank_unavail_mask) { - firehose_client_send_push(fb, ask->for_io, &state); + while ((state.fbs_atomic_state - bank_inc) & bank_unavail_mask) { + if (ask->quarantined) { + __FIREHOSE_CLIENT_THROTTLED_DUE_TO_HEAVY_LOGGING__(fb, + ask->for_io, &state); + } else { + firehose_client_send_push_and_wait(fb, ask->for_io, &state); + } if (slowpath(fb->fb_header.fbh_sendp == MACH_PORT_DEAD)) { // logd was unloaded, give up return NULL; @@ -984,7 +1066,12 @@ firehose_buffer_tracepoint_reserve_slow2(firehose_buffer_t fb, if (fastpath(ref = firehose_buffer_ring_try_grow(fbb, fbs_max_ref))) { break; } - firehose_client_send_push(fb, ask->for_io, NULL); + if (ask->quarantined) { + __FIREHOSE_CLIENT_THROTTLED_DUE_TO_HEAVY_LOGGING__(fb, + ask->for_io, &state); + } else { + firehose_client_send_push_and_wait(fb, ask->for_io, NULL); + } if (slowpath(fb->fb_header.fbh_sendp == MACH_PORT_DEAD)) { // logd was unloaded, give up break; @@ -1020,7 +1107,7 @@ firehose_buffer_tracepoint_reserve_slow(firehose_buffer_t fb, uint64_t unavail_mask = FIREHOSE_BANK_UNAVAIL_MASK(for_io); #ifndef KERNEL state.fbs_atomic_state = os_atomic_add_orig2o(fbb, - fbb_state.fbs_atomic_state, FIREHOSE_BANK_INC(for_io), relaxed); + fbb_state.fbs_atomic_state, FIREHOSE_BANK_INC(for_io), acquire); if (fastpath(!(state.fbs_atomic_state & unavail_mask))) { ask->is_bank_ok = true; if (fastpath(ref = firehose_buffer_ring_try_recycle(fb))) { @@ -1030,11 +1117,12 @@ firehose_buffer_tracepoint_reserve_slow(firehose_buffer_t fb, } } } - return firehose_buffer_tracepoint_reserve_slow2(fb, ask, privptr, ref); + return firehose_buffer_tracepoint_reserve_wait_for_chunks_from_logd(fb, ask, + privptr, ref); #else firehose_bank_state_u value; ask->is_bank_ok = os_atomic_rmw_loop2o(fbb, fbb_state.fbs_atomic_state, - state.fbs_atomic_state, value.fbs_atomic_state, relaxed, { + state.fbs_atomic_state, value.fbs_atomic_state, acquire, { value = state; if (slowpath((value.fbs_atomic_state & unavail_mask) != 0)) { os_atomic_rmw_loop_give_up(break); @@ -1067,32 +1155,6 @@ __firehose_buffer_tracepoint_reserve(uint64_t stamp, firehose_stream_t stream, privsize, privptr); } -firehose_tracepoint_t -__firehose_buffer_tracepoint_reserve_with_chunk(firehose_buffer_chunk_t fbc, - uint64_t stamp, firehose_stream_t stream, - uint16_t pubsize, uint16_t privsize, uint8_t **privptr) -{ - - firehose_tracepoint_t ft; - long result; - - result = firehose_buffer_chunk_try_reserve(fbc, stamp, stream, - pubsize, privsize, privptr); - if (fastpath(result > 0)) { - ft = (firehose_tracepoint_t)(fbc->fbc_start + result); - stamp -= fbc->fbc_timestamp; - stamp |= (uint64_t)pubsize << 48; - // Needed for process death handling (tracepoint-begin) - // see firehose_buffer_stream_chunk_install - os_atomic_store2o(ft, ft_stamp_and_length, stamp, relaxed); - dispatch_compiler_barrier(); - return ft; - } - else { - return NULL; - } -} - firehose_buffer_t __firehose_buffer_create(size_t *size) { @@ -1101,7 +1163,7 @@ __firehose_buffer_create(size_t *size) } if (size) { - *size = FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_BUFFER_CHUNK_SIZE; + *size = FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_CHUNK_SIZE; } return kernel_firehose_buffer; } @@ -1113,33 +1175,12 @@ __firehose_buffer_tracepoint_flush(firehose_tracepoint_t ft, return firehose_buffer_tracepoint_flush(kernel_firehose_buffer, ft, ftid); } -void -__firehose_buffer_tracepoint_flush_chunk(firehose_buffer_chunk_t fbc, - firehose_tracepoint_t ft, firehose_tracepoint_id_u ftid) -{ - firehose_buffer_pos_u pos; - - // Needed for process death handling (tracepoint-flush): - // We want to make sure the observers - // will see memory effects in program (asm) order. - // 1. write all the data to the tracepoint - // 2. write the tracepoint ID, so that seeing it means the tracepoint - // is valid - ft->ft_thread = thread_tid(current_thread()); - - // release barrier makes the log writes visible - os_atomic_store2o(ft, ft_id.ftid_value, ftid.ftid_value, release); - pos.fbc_atomic_pos = os_atomic_sub2o(fbc, fbc_pos.fbc_atomic_pos, - FIREHOSE_BUFFER_POS_REFCNT_INC, relaxed); - return; -} - void __firehose_merge_updates(firehose_push_reply_t update) { firehose_buffer_t fb = kernel_firehose_buffer; if (fastpath(fb)) { - firehose_client_merge_updates(fb, true, update, NULL); + firehose_client_merge_updates(fb, true, update, false, NULL); } } #endif // KERNEL diff --git a/src/firehose/firehose_buffer_internal.h b/src/firehose/firehose_buffer_internal.h index db8e02629..e41d9cb29 100644 --- a/src/firehose/firehose_buffer_internal.h +++ b/src/firehose/firehose_buffer_internal.h @@ -171,13 +171,18 @@ typedef struct firehose_buffer_header_s { dispatch_once_t fbh_notifs_pred OS_ALIGNED(64); dispatch_source_t fbh_notifs_source; dispatch_unfair_lock_s fbh_logd_lock; +#define FBH_QUARANTINE_NONE 0 +#define FBH_QUARANTINE_PENDING 1 +#define FBH_QUARANTINE_STARTED 2 + uint8_t volatile fbh_quarantined_state; + bool fbh_quarantined; #endif uint64_t fbh_unused[0]; -} OS_ALIGNED(FIREHOSE_BUFFER_CHUNK_SIZE) *firehose_buffer_header_t; +} OS_ALIGNED(FIREHOSE_CHUNK_SIZE) *firehose_buffer_header_t; union firehose_buffer_u { struct firehose_buffer_header_s fb_header; - struct firehose_buffer_chunk_s fb_chunks[FIREHOSE_BUFFER_CHUNK_COUNT]; + struct firehose_chunk_s fb_chunks[FIREHOSE_BUFFER_CHUNK_COUNT]; }; // used to let the compiler pack these values in 1 or 2 registers @@ -187,6 +192,7 @@ typedef struct firehose_tracepoint_query_s { firehose_stream_t stream; bool is_bank_ok; bool for_io; + bool quarantined; uint64_t stamp; } *firehose_tracepoint_query_t; @@ -206,6 +212,9 @@ firehose_buffer_update_limits(firehose_buffer_t fb); void firehose_buffer_ring_enqueue(firehose_buffer_t fb, uint16_t ref); +void +firehose_buffer_force_connect(firehose_buffer_t fb); + #endif #endif // __FIREHOSE_BUFFER_INTERNAL__ diff --git a/src/firehose/firehose_inline_internal.h b/src/firehose/firehose_inline_internal.h index 95768825f..3939ee25b 100644 --- a/src/firehose/firehose_inline_internal.h +++ b/src/firehose/firehose_inline_internal.h @@ -55,17 +55,11 @@ firehose_mach_port_allocate(uint32_t flags, void *ctx) mach_port_options_t opts = { .flags = flags, }; - kern_return_t kr; - - for (;;) { - kr = mach_port_construct(mach_task_self(), &opts, - (mach_port_context_t)ctx, &port); - if (fastpath(kr == KERN_SUCCESS)) { - break; - } + kern_return_t kr = mach_port_construct(mach_task_self(), &opts, + (mach_port_context_t)ctx, &port); + if (unlikely(kr)) { DISPATCH_VERIFY_MIG(kr); - dispatch_assume_zero(kr); - _dispatch_temporary_resource_shortage(); + DISPATCH_CLIENT_CRASH(kr, "Unable to allocate mach port"); } return port; } @@ -142,36 +136,28 @@ firehose_mig_server(dispatch_mig_callback_t demux, size_t maxmsgsz, #pragma mark firehose buffer OS_ALWAYS_INLINE -static inline firehose_buffer_chunk_t +static inline firehose_chunk_t firehose_buffer_chunk_for_address(void *addr) { - uintptr_t chunk_addr = (uintptr_t)addr & ~(FIREHOSE_BUFFER_CHUNK_SIZE - 1); - return (firehose_buffer_chunk_t)chunk_addr; + uintptr_t chunk_addr = (uintptr_t)addr & ~(FIREHOSE_CHUNK_SIZE - 1); + return (firehose_chunk_t)chunk_addr; } OS_ALWAYS_INLINE static inline uint16_t -firehose_buffer_chunk_to_ref(firehose_buffer_t fb, firehose_buffer_chunk_t fbc) +firehose_buffer_chunk_to_ref(firehose_buffer_t fb, firehose_chunk_t fbc) { return (uint16_t)(fbc - fb->fb_chunks); } OS_ALWAYS_INLINE -static inline firehose_buffer_chunk_t +static inline firehose_chunk_t firehose_buffer_ref_to_chunk(firehose_buffer_t fb, uint16_t ref) { return fb->fb_chunks + ref; } #ifndef FIREHOSE_SERVER - -OS_ALWAYS_INLINE -static inline bool -firehose_buffer_pos_fits(firehose_buffer_pos_u pos, uint16_t size) -{ - return pos.fbc_next_entry_offs + size <= pos.fbc_private_offs; -} - #if DISPATCH_PURE_C OS_ALWAYS_INLINE @@ -188,84 +174,13 @@ firehose_buffer_qos_bits_propagate(void) #endif } -OS_ALWAYS_INLINE -static inline long -firehose_buffer_chunk_try_reserve(firehose_buffer_chunk_t fbc, uint64_t stamp, - firehose_stream_t stream, uint16_t pubsize, - uint16_t privsize, uint8_t **privptr) -{ - const uint16_t ft_size = offsetof(struct firehose_tracepoint_s, ft_data); - firehose_buffer_pos_u orig, pos; - uint8_t qos_bits = firehose_buffer_qos_bits_propagate(); - bool reservation_failed, stamp_delta_fits; - - stamp_delta_fits = ((stamp - fbc->fbc_timestamp) >> 48) == 0; - - // no acquire barrier because the returned space is written to only - os_atomic_rmw_loop2o(fbc, fbc_pos.fbc_atomic_pos, - orig.fbc_atomic_pos, pos.fbc_atomic_pos, relaxed, { - if (unlikely(orig.fbc_atomic_pos == 0)) { - // we acquired a really really old reference, and we probably - // just faulted in a new page - // FIXME: if/when we hit this we should try to madvise it back FREE - os_atomic_rmw_loop_give_up(return 0); - } - if (unlikely(!FIREHOSE_BUFFER_POS_USABLE_FOR_STREAM(orig, stream))) { - // nothing to do if the chunk is full, or the stream doesn't match, - // in which case the thread probably: - // - loaded the chunk ref - // - been suspended a long while - // - read the chunk to find a very old thing - os_atomic_rmw_loop_give_up(return 0); - } - pos = orig; - pos.fbc_qos_bits |= qos_bits; - if (unlikely(!firehose_buffer_pos_fits(orig, - ft_size + pubsize + privsize) || !stamp_delta_fits)) { - pos.fbc_flag_full = true; - reservation_failed = true; - } else { - // using these *_INC macros is so that the compiler generates better - // assembly: using the struct individual fields forces the compiler - // to handle carry propagations, and we know it won't happen - pos.fbc_atomic_pos += roundup(ft_size + pubsize, 8) * - FIREHOSE_BUFFER_POS_ENTRY_OFFS_INC; - pos.fbc_atomic_pos -= privsize * - FIREHOSE_BUFFER_POS_PRIVATE_OFFS_INC; - pos.fbc_atomic_pos += FIREHOSE_BUFFER_POS_REFCNT_INC; - const uint16_t minimum_payload_size = 16; - if (!firehose_buffer_pos_fits(pos, - roundup(ft_size + minimum_payload_size , 8))) { - // if we can't even have minimum_payload_size bytes of payload - // for the next tracepoint, just flush right away - pos.fbc_flag_full = true; - } - reservation_failed = false; - } - }); - - if (reservation_failed) { - if (pos.fbc_refcnt) { - // nothing to do, there is a thread writing that will pick up - // the "FULL" flag on flush and push as a consequence - return 0; - } - // caller must enqueue chunk - return -1; - } - if (privptr) { - *privptr = fbc->fbc_start + pos.fbc_private_offs; - } - return orig.fbc_next_entry_offs; -} - OS_ALWAYS_INLINE static inline void firehose_buffer_stream_flush(firehose_buffer_t fb, firehose_stream_t stream) { firehose_buffer_stream_t fbs = &fb->fb_header.fbh_stream[stream]; firehose_stream_state_u old_state, new_state; - firehose_buffer_chunk_t fbc; + firehose_chunk_t fc; uint64_t stamp = UINT64_MAX; // will cause the reservation to fail uint16_t ref; long result; @@ -275,11 +190,15 @@ firehose_buffer_stream_flush(firehose_buffer_t fb, firehose_stream_t stream) ref = old_state.fss_current; if (!ref || ref == FIREHOSE_STREAM_STATE_PRISTINE) { // there is no installed page, nothing to flush, go away +#ifndef KERNEL + firehose_buffer_force_connect(fb); +#endif return; } - fbc = firehose_buffer_ref_to_chunk(fb, old_state.fss_current); - result = firehose_buffer_chunk_try_reserve(fbc, stamp, stream, 1, 0, NULL); + fc = firehose_buffer_ref_to_chunk(fb, old_state.fss_current); + result = firehose_chunk_tracepoint_try_reserve(fc, stamp, stream, + firehose_buffer_qos_bits_propagate(), 1, 0, NULL); if (likely(result < 0)) { firehose_buffer_ring_enqueue(fb, old_state.fss_current); } @@ -339,8 +258,7 @@ firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp, { firehose_buffer_stream_t fbs = &fb->fb_header.fbh_stream[stream]; firehose_stream_state_u old_state, new_state; - firehose_tracepoint_t ft; - firehose_buffer_chunk_t fbc; + firehose_chunk_t fc; #if KERNEL bool failable = false; #endif @@ -356,18 +274,19 @@ firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp, ref = old_state.fss_current; if (likely(ref && ref != FIREHOSE_STREAM_STATE_PRISTINE)) { - fbc = firehose_buffer_ref_to_chunk(fb, ref); - result = firehose_buffer_chunk_try_reserve(fbc, stamp, stream, + fc = firehose_buffer_ref_to_chunk(fb, ref); + result = firehose_chunk_tracepoint_try_reserve(fc, stamp, stream, + firehose_buffer_qos_bits_propagate(), pubsize, privsize, privptr); if (likely(result > 0)) { - ft = (firehose_tracepoint_t)(fbc->fbc_start + result); - stamp -= fbc->fbc_timestamp; - stamp |= (uint64_t)pubsize << 48; - // Needed for process death handling (tracepoint-begin) - // see firehose_buffer_stream_chunk_install - os_atomic_store2o(ft, ft_stamp_and_length, stamp, relaxed); - dispatch_compiler_barrier(); - return ft; + uint64_t thread; +#ifdef KERNEL + thread = thread_tid(current_thread()); +#else + thread = _pthread_threadid_self_np_direct(); +#endif + return firehose_chunk_tracepoint_begin(fc, + stamp, pubsize, thread, result); } if (likely(result < 0)) { firehose_buffer_ring_enqueue(fb, old_state.fss_current); @@ -400,9 +319,9 @@ firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp, #if KERNEL new_state.fss_allocator = (uint32_t)cpu_number(); #else - new_state.fss_allocator = _dispatch_tid_self(); + new_state.fss_allocator = _dispatch_lock_value_for_self(); #endif - success = os_atomic_cmpxchgvw2o(fbs, fbs_state.fss_atomic_state, + success = os_atomic_cmpxchgv2o(fbs, fbs_state.fss_atomic_state, old_state.fss_atomic_state, new_state.fss_atomic_state, &old_state.fss_atomic_state, relaxed); if (likely(success)) { @@ -416,6 +335,9 @@ firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp, .privsize = privsize, .stream = stream, .for_io = (firehose_stream_uses_io_bank & (1UL << stream)) != 0, +#ifndef KERNEL + .quarantined = fb->fb_header.fbh_quarantined, +#endif .stamp = stamp, }; return firehose_buffer_tracepoint_reserve_slow(fb, &ask, privptr); @@ -444,8 +366,7 @@ static inline void firehose_buffer_tracepoint_flush(firehose_buffer_t fb, firehose_tracepoint_t ft, firehose_tracepoint_id_u ftid) { - firehose_buffer_chunk_t fbc = firehose_buffer_chunk_for_address(ft); - firehose_buffer_pos_u pos; + firehose_chunk_t fc = firehose_buffer_chunk_for_address(ft); // Needed for process death handling (tracepoint-flush): // We want to make sure the observers @@ -453,17 +374,8 @@ firehose_buffer_tracepoint_flush(firehose_buffer_t fb, // 1. write all the data to the tracepoint // 2. write the tracepoint ID, so that seeing it means the tracepoint // is valid -#ifdef KERNEL - ft->ft_thread = thread_tid(current_thread()); -#else - ft->ft_thread = _pthread_threadid_self_np_direct(); -#endif - // release barrier makes the log writes visible - os_atomic_store2o(ft, ft_id.ftid_value, ftid.ftid_value, release); - pos.fbc_atomic_pos = os_atomic_sub2o(fbc, fbc_pos.fbc_atomic_pos, - FIREHOSE_BUFFER_POS_REFCNT_INC, relaxed); - if (pos.fbc_refcnt == 0 && pos.fbc_flag_full) { - firehose_buffer_ring_enqueue(fb, firehose_buffer_chunk_to_ref(fb, fbc)); + if (firehose_chunk_tracepoint_end(fc, ft, ftid)) { + firehose_buffer_ring_enqueue(fb, firehose_buffer_chunk_to_ref(fb, fc)); } } diff --git a/src/firehose/firehose_internal.h b/src/firehose/firehose_internal.h index 29d1ad240..7040995d1 100644 --- a/src/firehose/firehose_internal.h +++ b/src/firehose/firehose_internal.h @@ -29,6 +29,8 @@ #define __MigTypeCheck 1 #endif +#define fcp_quarntined fcp_quarantined + #include #include #include diff --git a/src/firehose/firehose_reply.defs b/src/firehose/firehose_reply.defs index 124defa59..c08054516 100644 --- a/src/firehose/firehose_reply.defs +++ b/src/firehose/firehose_reply.defs @@ -33,11 +33,13 @@ skip; // firehose_register simpleroutine push_reply( RequestPort req_port : mach_port_move_send_once_t; in rtc : kern_return_t; -in push_reply : firehose_push_reply_t +in push_reply : firehose_push_reply_t; +in quarantined : boolean_t ); simpleroutine push_notify_async( RequestPort comm_port : mach_port_t; in push_reply : firehose_push_reply_t; +in quarantined : boolean_t; WaitTime timeout : natural_t ); diff --git a/src/firehose/firehose_server.c b/src/firehose/firehose_server.c index a6be2fab7..ba335dbe3 100644 --- a/src/firehose/firehose_server.c +++ b/src/firehose/firehose_server.c @@ -31,6 +31,11 @@ _Static_assert(offsetof(struct firehose_client_s, fc_mem_sent_flushed_pos) % 8 == 0, "Make sure atomic fields are properly aligned"); #endif +typedef struct fs_client_queue_s { + struct firehose_client_s *volatile fs_client_head; + struct firehose_client_s *volatile fs_client_tail; +} fs_client_queue_s, *fs_client_queue_t; + static struct firehose_server_s { mach_port_t fs_bootstrap_port; dispatch_mach_t fs_mach_channel; @@ -41,24 +46,161 @@ static struct firehose_server_s { firehose_handler_t fs_handler; firehose_snapshot_t fs_snapshot; - int fs_kernel_fd; firehose_client_t fs_kernel_client; TAILQ_HEAD(, firehose_client_s) fs_clients; + os_unfair_lock fs_clients_lock; + fs_client_queue_s fs_queues[4]; + dispatch_source_t fs_sources[4]; } server_config = { .fs_clients = TAILQ_HEAD_INITIALIZER(server_config.fs_clients), + .fs_clients_lock = OS_UNFAIR_LOCK_INIT, .fs_kernel_fd = -1, }; -#pragma mark - -#pragma mark firehose client state machine +OS_ALWAYS_INLINE +static inline void +fs_clients_lock(void) +{ + os_unfair_lock_lock_with_options(&server_config.fs_clients_lock, + OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION); +} + +OS_ALWAYS_INLINE +static inline void +fs_clients_unlock(void) +{ + os_unfair_lock_unlock(&server_config.fs_clients_lock); +} static void firehose_server_demux(firehose_client_t fc, mach_msg_header_t *msg_hdr); static void firehose_client_cancel(firehose_client_t fc); static void firehose_client_snapshot_finish(firehose_client_t fc, firehose_snapshot_t snapshot, bool for_io); +static void firehose_client_handle_death(void *ctxt); + +#pragma mark - +#pragma mark firehose client enqueueing + +OS_ALWAYS_INLINE +static inline bool +fs_idx_is_for_io(size_t idx) +{ + return idx & 1; +} + +OS_ALWAYS_INLINE +static inline bool +fs_queue_is_for_io(fs_client_queue_t q) +{ + return (q - server_config.fs_queues) & 1; +} + +OS_ALWAYS_INLINE +static inline bool +fs_queue_is_for_quarantined(fs_client_queue_t q) +{ + return (q - server_config.fs_queues) & 2; +} + +OS_ALWAYS_INLINE +static inline fs_client_queue_t +fs_queue(bool quarantined, bool for_io) +{ + return &server_config.fs_queues[quarantined * 2 + for_io]; +} + +OS_ALWAYS_INLINE +static inline dispatch_source_t +fs_source(bool quarantined, bool for_io) +{ + return server_config.fs_sources[quarantined * 2 + for_io]; +} + +OS_ALWAYS_INLINE +static inline void +firehose_client_push(firehose_client_t fc, pthread_priority_t pp, + bool quarantined, bool for_io) +{ + fs_client_queue_t queue = fs_queue(quarantined, for_io); + if (fc && os_mpsc_push_update_tail(queue, fs_client, fc, fc_next[for_io])) { + os_mpsc_push_update_head(queue, fs_client, fc); + _dispatch_source_merge_data(fs_source(quarantined, for_io), pp, 1); + } else if (pp) { + _dispatch_source_merge_data(fs_source(quarantined, for_io), pp, 1); + } +} + +OS_ALWAYS_INLINE +static inline bool +firehose_client_wakeup(firehose_client_t fc, pthread_priority_t pp, + bool for_io) +{ + uintptr_t canceled_bit = FC_STATE_CANCELED(for_io); + uintptr_t enqueued_bit = FC_STATE_ENQUEUED(for_io); + uintptr_t old_state, new_state; + + os_atomic_rmw_loop(&fc->fc_state, old_state, new_state, relaxed, { + if (old_state & canceled_bit) { + os_atomic_rmw_loop_give_up(return false); + } + if (old_state & enqueued_bit) { + os_atomic_rmw_loop_give_up(break); + } + new_state = old_state | enqueued_bit; + }); + firehose_client_push(old_state & enqueued_bit ? NULL : fc, pp, + fc->fc_quarantined, for_io); + return true; +} + +OS_ALWAYS_INLINE +static inline void +firehose_client_start_cancel(firehose_client_t fc, bool for_io) +{ + uintptr_t canceling_bit = FC_STATE_CANCELING(for_io); + uintptr_t canceled_bit = FC_STATE_CANCELED(for_io); + uintptr_t enqueued_bit = FC_STATE_ENQUEUED(for_io); + uintptr_t old_state, new_state; + + os_atomic_rmw_loop(&fc->fc_state, old_state, new_state, relaxed, { + if (old_state & (canceled_bit | canceling_bit)) { + os_atomic_rmw_loop_give_up(return); + } + new_state = old_state | enqueued_bit | canceling_bit; + }); + firehose_client_push(old_state & enqueued_bit ? NULL : fc, 0, + fc->fc_quarantined, for_io); +} + +OS_ALWAYS_INLINE +static inline bool +firehose_client_dequeue(firehose_client_t fc, bool for_io) +{ + uintptr_t canceling_bit = FC_STATE_CANCELING(for_io); + uintptr_t canceled_bit = FC_STATE_CANCELED(for_io); + uintptr_t enqueued_bit = FC_STATE_ENQUEUED(for_io); + uintptr_t old_state, new_state; + + os_atomic_rmw_loop(&fc->fc_state, old_state, new_state, relaxed, { + new_state = old_state & ~(canceling_bit | enqueued_bit); + if (old_state & canceling_bit) { + new_state |= canceled_bit; + } + }); + + if (((old_state ^ new_state) & FC_STATE_CANCELED_MASK) && + (new_state & FC_STATE_CANCELED_MASK) == FC_STATE_CANCELED_MASK) { + dispatch_async_f(server_config.fs_io_drain_queue, fc, + firehose_client_handle_death); + } + return !(new_state & canceled_bit); +} + +#pragma mark - +#pragma mark firehose client state machine static void firehose_client_notify(firehose_client_t fc, mach_port_t reply_port) @@ -74,15 +216,17 @@ firehose_client_notify(firehose_client_t fc, mach_port_t reply_port) firehose_atomic_max2o(fc, fc_io_sent_flushed_pos, push_reply.fpr_io_flushed_pos, relaxed); - if (fc->fc_is_kernel) { + if (!fc->fc_pid) { if (ioctl(server_config.fs_kernel_fd, LOGFLUSHED, &push_reply) < 0) { dispatch_assume_zero(errno); } } else { if (reply_port == fc->fc_sendp) { - kr = firehose_send_push_notify_async(reply_port, push_reply, 0); + kr = firehose_send_push_notify_async(reply_port, push_reply, + fc->fc_quarantined, 0); } else { - kr = firehose_send_push_reply(reply_port, KERN_SUCCESS, push_reply); + kr = firehose_send_push_reply(reply_port, KERN_SUCCESS, push_reply, + fc->fc_quarantined); } if (kr != MACH_SEND_INVALID_DEST) { DISPATCH_VERIFY_MIG(kr); @@ -104,18 +248,6 @@ firehose_client_acquire_head(firehose_buffer_t fb, bool for_io) return head; } -OS_ALWAYS_INLINE -static inline void -firehose_client_push_async_merge(firehose_client_t fc, pthread_priority_t pp, - bool for_io) -{ - if (for_io) { - _dispatch_source_merge_data(fc->fc_io_source, pp, 1); - } else { - _dispatch_source_merge_data(fc->fc_mem_source, pp, 1); - } -} - OS_NOINLINE OS_COLD static void firehose_client_mark_corrupted(firehose_client_t fc, mach_port_t reply_port) @@ -129,7 +261,7 @@ firehose_client_mark_corrupted(firehose_client_t fc, mach_port_t reply_port) if (reply_port) { kern_return_t kr = firehose_send_push_reply(reply_port, 0, - FIREHOSE_PUSH_REPLY_CORRUPTED); + FIREHOSE_PUSH_REPLY_CORRUPTED, false); DISPATCH_VERIFY_MIG(kr); dispatch_assume_zero(kr); } @@ -154,10 +286,10 @@ firehose_client_snapshot_mark_done(firehose_client_t fc, OS_NOINLINE static void -firehose_client_drain(firehose_client_t fc, mach_port_t port, uint32_t flags) +firehose_client_drain_one(firehose_client_t fc, mach_port_t port, uint32_t flags) { firehose_buffer_t fb = fc->fc_buffer; - firehose_buffer_chunk_t fbc; + firehose_chunk_t fbc; firehose_event_t evt; uint16_t volatile *fbh_ring; uint16_t flushed, ref, count = 0; @@ -172,9 +304,7 @@ firehose_client_drain(firehose_client_t fc, mach_port_t port, uint32_t flags) fbh_ring = fb->fb_header.fbh_io_ring; sent_flushed = (uint16_t)fc->fc_io_sent_flushed_pos; flushed = (uint16_t)fc->fc_io_flushed_pos; - if (fc->fc_needs_io_snapshot) { - snapshot = server_config.fs_snapshot; - } + if (fc->fc_needs_io_snapshot) snapshot = server_config.fs_snapshot; } else { evt = FIREHOSE_EVENT_MEM_BUFFER_RECEIVED; _Static_assert(FIREHOSE_EVENT_MEM_BUFFER_RECEIVED == @@ -182,9 +312,7 @@ firehose_client_drain(firehose_client_t fc, mach_port_t port, uint32_t flags) fbh_ring = fb->fb_header.fbh_mem_ring; sent_flushed = (uint16_t)fc->fc_mem_sent_flushed_pos; flushed = (uint16_t)fc->fc_mem_flushed_pos; - if (fc->fc_needs_mem_snapshot) { - snapshot = server_config.fs_snapshot; - } + if (fc->fc_needs_mem_snapshot) snapshot = server_config.fs_snapshot; } if (slowpath(fc->fc_memory_corrupted)) { @@ -209,7 +337,7 @@ firehose_client_drain(firehose_client_t fc, mach_port_t port, uint32_t flags) ref = (flushed + count) & FIREHOSE_RING_POS_IDX_MASK; ref = os_atomic_load(&fbh_ring[ref], relaxed); ref &= FIREHOSE_RING_POS_IDX_MASK; - } while (fc->fc_is_kernel && !ref); + } while (!fc->fc_pid && !ref); count++; if (!ref) { _dispatch_debug("Ignoring invalid page reference in ring: %d", ref); @@ -217,10 +345,17 @@ firehose_client_drain(firehose_client_t fc, mach_port_t port, uint32_t flags) } fbc = firehose_buffer_ref_to_chunk(fb, ref); + if (fbc->fc_pos.fcp_stream == firehose_stream_metadata) { + // serialize with firehose_client_metadata_stream_peek + os_unfair_lock_lock(&fc->fc_lock); + } server_config.fs_handler(fc, evt, fbc); if (slowpath(snapshot)) { snapshot->handler(fc, evt, fbc); } + if (fbc->fc_pos.fcp_stream == firehose_stream_metadata) { + os_unfair_lock_unlock(&fc->fc_lock); + } // clients not using notifications (single threaded) always drain fully // because they use all their limit, always } while (!fc->fc_use_notifs || count < DRAIN_BATCH_SIZE || snapshot); @@ -238,7 +373,7 @@ firehose_client_drain(firehose_client_t fc, mach_port_t port, uint32_t flags) client_flushed = os_atomic_load2o(&fb->fb_header, fbh_ring_tail.frp_mem_flushed, relaxed); } - if (fc->fc_is_kernel) { + if (!fc->fc_pid) { // will fire firehose_client_notify() because port is MACH_PORT_DEAD port = fc->fc_sendp; } else if (!port && client_flushed == sent_flushed && fc->fc_use_notifs) { @@ -253,7 +388,7 @@ firehose_client_drain(firehose_client_t fc, mach_port_t port, uint32_t flags) if (port) { firehose_client_notify(fc, port); } - if (fc->fc_is_kernel) { + if (!fc->fc_pid) { if (!(flags & FIREHOSE_DRAIN_POLL)) { // see firehose_client_kernel_source_handle_event dispatch_resume(fc->fc_kernel_source); @@ -264,12 +399,12 @@ firehose_client_drain(firehose_client_t fc, mach_port_t port, uint32_t flags) // and there's more to drain, so optimistically schedule draining // again this is cheap since the queue is hot, and is fair for other // clients - firehose_client_push_async_merge(fc, 0, for_io); + firehose_client_wakeup(fc, 0, for_io); } if (count && server_config.fs_kernel_client) { // the kernel is special because it can drop messages, so if we're // draining, poll the kernel each time while we're bound to a thread - firehose_client_drain(server_config.fs_kernel_client, + firehose_client_drain_one(server_config.fs_kernel_client, MACH_PORT_NULL, flags | FIREHOSE_DRAIN_POLL); } } @@ -283,21 +418,37 @@ firehose_client_drain(firehose_client_t fc, mach_port_t port, uint32_t flags) // from now on all IO/mem drains depending on `for_io` will be no-op // (needs__snapshot: false, memory_corrupted: true). we can safely // silence the corresponding source of drain wake-ups. - if (!fc->fc_is_kernel) { - dispatch_source_cancel(for_io ? fc->fc_io_source : fc->fc_mem_source); + if (fc->fc_pid) { + firehose_client_start_cancel(fc, for_io); } } static void -firehose_client_drain_io_async(void *ctx) -{ - firehose_client_drain(ctx, MACH_PORT_NULL, FIREHOSE_DRAIN_FOR_IO); -} - -static void -firehose_client_drain_mem_async(void *ctx) +firehose_client_drain(void *ctxt) { - firehose_client_drain(ctx, MACH_PORT_NULL, 0); + fs_client_queue_t queue = ctxt; + bool for_io = fs_queue_is_for_io(queue); + bool quarantined = fs_queue_is_for_quarantined(queue); + firehose_client_t fc, fc_next; + size_t clients = 0; + + while (queue->fs_client_tail) { + fc = os_mpsc_get_head(queue, fs_client); + do { + fc_next = os_mpsc_pop_head(queue, fs_client, fc, fc_next[for_io]); + if (firehose_client_dequeue(fc, for_io)) { + firehose_client_drain_one(fc, MACH_PORT_NULL, + for_io ? FIREHOSE_DRAIN_FOR_IO : 0); + } + // process quarantined clients 4 times as slow as the other ones + // also reasyncing every 4 clients allows for discovering + // quarantined suspension faster + if (++clients == (quarantined ? 1 : 4)) { + dispatch_source_merge_data(fs_source(quarantined, for_io), 1); + return; + } + } while ((fc = fc_next)); + } } OS_NOINLINE @@ -326,7 +477,12 @@ firehose_client_finalize(firehose_client_t fc OS_OBJECT_CONSUMED) } server_config.fs_handler(fc, FIREHOSE_EVENT_CLIENT_DIED, NULL); + fs_clients_lock(); TAILQ_REMOVE(&server_config.fs_clients, fc, fc_entry); + fs_clients_unlock(); + + dispatch_release(fc->fc_mach_channel); + fc->fc_mach_channel = NULL; fc->fc_entry.tqe_next = DISPATCH_OBJECT_LISTLESS; fc->fc_entry.tqe_prev = DISPATCH_OBJECT_LISTLESS; _os_object_release(&fc->fc_as_os_object); @@ -383,26 +539,26 @@ firehose_client_handle_death(void *ctxt) // Then look at all the allocated pages not seen in the ring while (bitmap) { uint16_t ref = firehose_bitmap_first_set(bitmap); - firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); - uint16_t fbc_length = fbc->fbc_pos.fbc_next_entry_offs; + firehose_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); + uint16_t fbc_length = fbc->fc_pos.fcp_next_entry_offs; bitmap &= ~(1ULL << ref); - if (fbc->fbc_start + fbc_length <= fbc->fbc_data) { + if (fbc->fc_start + fbc_length <= fbc->fc_data) { // this page has its "recycle-requeue" done, but hasn't gone // through "recycle-reuse", or it has no data, ditch it continue; } - if (!((firehose_tracepoint_t)fbc->fbc_data)->ft_length) { + if (!((firehose_tracepoint_t)fbc->fc_data)->ft_length) { // this thing has data, but the first tracepoint is unreadable // so also just ditch it continue; } - if (!fbc->fbc_pos.fbc_flag_io) { + if (!fbc->fc_pos.fcp_flag_io) { mem_bitmap |= 1ULL << ref; continue; } server_config.fs_handler(fc, FIREHOSE_EVENT_IO_BUFFER_RECEIVED, fbc); - if (fc->fc_needs_io_snapshot && snapshot) { + if (fc->fc_needs_io_snapshot) { snapshot->handler(fc, FIREHOSE_SNAPSHOT_EVENT_IO_BUFFER, fbc); } } @@ -416,11 +572,11 @@ firehose_client_handle_death(void *ctxt) while (mem_bitmap_copy) { uint16_t ref = firehose_bitmap_first_set(mem_bitmap_copy); - firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); + firehose_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); mem_bitmap_copy &= ~(1ULL << ref); server_config.fs_handler(fc, FIREHOSE_EVENT_MEM_BUFFER_RECEIVED, fbc); - if (fc->fc_needs_mem_snapshot && snapshot) { + if (fc->fc_needs_mem_snapshot) { snapshot->handler(fc, FIREHOSE_SNAPSHOT_EVENT_MEM_BUFFER, fbc); } } @@ -434,18 +590,13 @@ static void firehose_client_handle_mach_event(void *ctx, dispatch_mach_reason_t reason, dispatch_mach_msg_t dmsg, mach_error_t error OS_UNUSED) { - mach_msg_header_t *msg_hdr; + mach_msg_header_t *msg_hdr = NULL; firehose_client_t fc = ctx; - mach_port_t oldsendp, oldrecvp; - - if (dmsg) { - msg_hdr = dispatch_mach_msg_get_msg(dmsg, NULL); - oldsendp = msg_hdr->msgh_remote_port; - oldrecvp = msg_hdr->msgh_local_port; - } + mach_port_t port; switch (reason) { case DISPATCH_MACH_MESSAGE_RECEIVED: + msg_hdr = dispatch_mach_msg_get_msg(dmsg, NULL); if (msg_hdr->msgh_id == MACH_NOTIFY_NO_SENDERS) { _dispatch_debug("FIREHOSE NO_SENDERS (unique_pid: 0x%llx)", firehose_client_get_unique_pid(fc, NULL)); @@ -456,25 +607,33 @@ firehose_client_handle_mach_event(void *ctx, dispatch_mach_reason_t reason, break; case DISPATCH_MACH_DISCONNECTED: - if (oldsendp) { - if (slowpath(oldsendp != fc->fc_sendp)) { - DISPATCH_INTERNAL_CRASH(oldsendp, - "disconnect event about unknown send-right"); + msg_hdr = dispatch_mach_msg_get_msg(dmsg, NULL); + port = msg_hdr->msgh_remote_port; + if (MACH_PORT_VALID(port)) { + if (port != fc->fc_sendp) { + DISPATCH_INTERNAL_CRASH(port, "Unknown send-right"); } firehose_mach_port_send_release(fc->fc_sendp); fc->fc_sendp = MACH_PORT_NULL; } - if (oldrecvp) { - if (slowpath(oldrecvp != fc->fc_recvp)) { - DISPATCH_INTERNAL_CRASH(oldrecvp, - "disconnect event about unknown receive-right"); + port = msg_hdr->msgh_local_port; + if (MACH_PORT_VALID(port)) { + if (port != fc->fc_recvp) { + DISPATCH_INTERNAL_CRASH(port, "Unknown recv-right"); } firehose_mach_port_recv_dispose(fc->fc_recvp, fc); fc->fc_recvp = MACH_PORT_NULL; } - if (fc->fc_recvp == MACH_PORT_NULL && fc->fc_sendp == MACH_PORT_NULL) { - firehose_client_cancel(fc); + break; + + case DISPATCH_MACH_CANCELED: + if (MACH_PORT_VALID(fc->fc_sendp)) { + DISPATCH_INTERNAL_CRASH(fc->fc_sendp, "send-right leak"); + } + if (MACH_PORT_VALID(fc->fc_recvp)) { + DISPATCH_INTERNAL_CRASH(fc->fc_recvp, "recv-right leak"); } + firehose_client_cancel(fc); break; } } @@ -488,10 +647,8 @@ firehose_client_kernel_source_handle_event(void *ctxt) // resumed in firehose_client_drain for both memory and I/O dispatch_suspend(fc->fc_kernel_source); dispatch_suspend(fc->fc_kernel_source); - dispatch_async_f(server_config.fs_mem_drain_queue, - fc, firehose_client_drain_mem_async); - dispatch_async_f(server_config.fs_io_drain_queue, - fc, firehose_client_drain_io_async); + firehose_client_wakeup(fc, 0, false); + firehose_client_wakeup(fc, 0, true); } #endif @@ -500,41 +657,37 @@ firehose_client_resume(firehose_client_t fc, const struct firehose_client_connected_info_s *fcci) { dispatch_assert_queue(server_config.fs_io_drain_queue); + + fs_clients_lock(); TAILQ_INSERT_TAIL(&server_config.fs_clients, fc, fc_entry); + fs_clients_unlock(); + server_config.fs_handler(fc, FIREHOSE_EVENT_CLIENT_CONNECTED, (void *)fcci); - if (fc->fc_is_kernel) { + if (!fc->fc_pid) { dispatch_activate(fc->fc_kernel_source); } else { dispatch_mach_connect(fc->fc_mach_channel, fc->fc_recvp, fc->fc_sendp, NULL); - dispatch_activate(fc->fc_io_source); - dispatch_activate(fc->fc_mem_source); } } static void firehose_client_cancel(firehose_client_t fc) { - dispatch_mach_t dm; - dispatch_block_t block; - _dispatch_debug("client died (unique_pid: 0x%llx", firehose_client_get_unique_pid(fc, NULL)); - dm = fc->fc_mach_channel; - fc->fc_mach_channel = NULL; - dispatch_release(dm); - + if (MACH_PORT_VALID(fc->fc_sendp)) { + firehose_mach_port_send_release(fc->fc_sendp); + fc->fc_sendp = MACH_PORT_NULL; + } + if (MACH_PORT_VALID(fc->fc_recvp)) { + firehose_mach_port_recv_dispose(fc->fc_recvp, fc); + fc->fc_recvp = MACH_PORT_NULL; + } fc->fc_use_notifs = false; - dispatch_source_cancel(fc->fc_io_source); - dispatch_source_cancel(fc->fc_mem_source); - - block = dispatch_block_create(DISPATCH_BLOCK_DETACHED, ^{ - dispatch_async_f(server_config.fs_io_drain_queue, fc, - firehose_client_handle_death); - }); - dispatch_async(server_config.fs_mem_drain_queue, block); - _Block_release(block); + firehose_client_start_cancel(fc, false); + firehose_client_start_cancel(fc, true); } static firehose_client_t @@ -552,32 +705,30 @@ _firehose_client_create(firehose_buffer_t fb) return fc; } +#pragma pack(4) +typedef struct firehose_token_s { + uid_t auid; + uid_t euid; + gid_t egid; + uid_t ruid; + gid_t rgid; + pid_t pid; + au_asid_t asid; + dev_t execcnt; +} *firehose_token_t; +#pragma pack() + static firehose_client_t -firehose_client_create(firehose_buffer_t fb, +firehose_client_create(firehose_buffer_t fb, firehose_token_t token, mach_port_t comm_recvp, mach_port_t comm_sendp) { uint64_t unique_pid = fb->fb_header.fbh_uniquepid; firehose_client_t fc = _firehose_client_create(fb); dispatch_mach_t dm; - dispatch_source_t ds; - ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_DATA_OR, 0, 0, - server_config.fs_mem_drain_queue); - _os_object_retain_internal_inline(&fc->fc_as_os_object); - dispatch_set_context(ds, fc); - dispatch_set_finalizer_f(ds, - (dispatch_function_t)_os_object_release_internal); - dispatch_source_set_event_handler_f(ds, firehose_client_drain_mem_async); - fc->fc_mem_source = ds; - - ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_DATA_OR, 0, 0, - server_config.fs_io_drain_queue); - _os_object_retain_internal_inline(&fc->fc_as_os_object); - dispatch_set_context(ds, fc); - dispatch_set_finalizer_f(ds, - (dispatch_function_t)_os_object_release_internal); - dispatch_source_set_event_handler_f(ds, firehose_client_drain_io_async); - fc->fc_io_source = ds; + fc->fc_pid = token->pid ? token->pid : ~0; + fc->fc_euid = token->euid; + fc->fc_pidversion = token->execcnt; _dispatch_debug("FIREHOSE_REGISTER (unique_pid: 0x%llx)", unique_pid); fc->fc_recvp = comm_recvp; @@ -617,12 +768,11 @@ firehose_kernel_client_create(void) DISPATCH_INTERNAL_CRASH(errno, "Unable to map kernel buffer"); } if (fb_map.fbmi_size != - FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_BUFFER_CHUNK_SIZE) { + FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_CHUNK_SIZE) { DISPATCH_INTERNAL_CRASH(fb_map.fbmi_size, "Unexpected kernel buffer size"); } fc = _firehose_client_create((firehose_buffer_t)(uintptr_t)fb_map.fbmi_addr); - fc->fc_is_kernel = true; ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, (uintptr_t)fd, 0, fs->fs_ipc_queue); dispatch_set_context(ds, fc); @@ -651,24 +801,27 @@ _firehose_client_xref_dispose(firehose_client_t fc) { _dispatch_debug("Cleaning up client info for unique_pid 0x%llx", firehose_client_get_unique_pid(fc, NULL)); - - dispatch_release(fc->fc_io_source); - fc->fc_io_source = NULL; - - dispatch_release(fc->fc_mem_source); - fc->fc_mem_source = NULL; } uint64_t firehose_client_get_unique_pid(firehose_client_t fc, pid_t *pid_out) { firehose_buffer_header_t fbh = &fc->fc_buffer->fb_header; - if (fc->fc_is_kernel) { - if (pid_out) *pid_out = 0; - return 0; - } - if (pid_out) *pid_out = fbh->fbh_pid ?: ~(pid_t)0; - return fbh->fbh_uniquepid ?: ~0ull; + if (pid_out) *pid_out = fc->fc_pid; + if (!fc->fc_pid) return 0; + return fbh->fbh_uniquepid ? fbh->fbh_uniquepid : ~0ull; +} + +uid_t +firehose_client_get_euid(firehose_client_t fc) +{ + return fc->fc_euid; +} + +int +firehose_client_get_pid_version(firehose_client_t fc) +{ + return fc->fc_pidversion; } void * @@ -692,6 +845,12 @@ firehose_client_set_context(firehose_client_t fc, void *ctxt) return os_atomic_xchg2o(fc, fc_ctxt, ctxt, relaxed); } +void +firehose_client_initiate_quarantine(firehose_client_t fc) +{ + fc->fc_quarantined = true; +} + #pragma mark - #pragma mark firehose server @@ -720,22 +879,24 @@ void firehose_server_init(mach_port_t comm_port, firehose_handler_t handler) { struct firehose_server_s *fs = &server_config; - dispatch_queue_attr_t attr; + dispatch_queue_attr_t attr = DISPATCH_QUEUE_SERIAL_WITH_AUTORELEASE_POOL; + dispatch_queue_attr_t attr_ui; dispatch_mach_t dm; + dispatch_source_t ds; // just reference the string so that it's captured (void)os_atomic_load(&__libfirehose_serverVersionString[0], relaxed); - attr = dispatch_queue_attr_make_with_qos_class(DISPATCH_QUEUE_SERIAL, + attr_ui = dispatch_queue_attr_make_with_qos_class(attr, QOS_CLASS_USER_INITIATED, 0); fs->fs_ipc_queue = dispatch_queue_create_with_target( - "com.apple.firehose.ipc", attr, NULL); + "com.apple.firehose.ipc", attr_ui, NULL); fs->fs_snapshot_gate_queue = dispatch_queue_create_with_target( - "com.apple.firehose.snapshot-gate", DISPATCH_QUEUE_SERIAL, NULL); + "com.apple.firehose.snapshot-gate", attr, NULL); fs->fs_io_drain_queue = dispatch_queue_create_with_target( - "com.apple.firehose.drain-io", DISPATCH_QUEUE_SERIAL, NULL); + "com.apple.firehose.drain-io", attr, NULL); fs->fs_mem_drain_queue = dispatch_queue_create_with_target( - "com.apple.firehose.drain-mem", DISPATCH_QUEUE_SERIAL, NULL); + "com.apple.firehose.drain-mem", attr, NULL); dm = dispatch_mach_create_f("com.apple.firehose.listener", fs->fs_ipc_queue, NULL, firehose_server_handle_mach_event); @@ -743,6 +904,15 @@ firehose_server_init(mach_port_t comm_port, firehose_handler_t handler) fs->fs_mach_channel = dm; fs->fs_handler = _Block_copy(handler); firehose_kernel_client_create(); + + for (size_t i = 0; i < countof(fs->fs_sources); i++) { + ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_DATA_OR, 0, 0, + fs_idx_is_for_io(i) ? server_config.fs_io_drain_queue : + server_config.fs_mem_drain_queue); + dispatch_set_context(ds, &fs->fs_queues[i]); + dispatch_source_set_event_handler_f(ds, firehose_client_drain); + fs->fs_sources[i] = ds; + } } void @@ -760,6 +930,17 @@ firehose_server_assert_spi_version(uint32_t spi_version) } } +bool +firehose_server_has_ever_flushed_pages(void) +{ + // Use the IO pages flushed count from the kernel client as an + // approximation for whether the firehose has ever flushed pages during + // this boot. logd uses this detect the first time it starts after a + // fresh boot. + firehose_client_t fhc = server_config.fs_kernel_client; + return !fhc || fhc->fc_io_flushed_pos > 0; +} + void firehose_server_resume(void) { @@ -775,54 +956,115 @@ firehose_server_resume(void) } dispatch_mach_connect(fs->fs_mach_channel, fs->fs_bootstrap_port, MACH_PORT_NULL, NULL); + for (size_t i = 0; i < countof(fs->fs_sources); i++) { + dispatch_activate(fs->fs_sources[i]); + } } -#pragma mark - -#pragma mark firehose snapshot and peeking +void +firehose_server_cancel(void) +{ + firehose_client_t fc; + + dispatch_mach_cancel(server_config.fs_mach_channel); + + fs_clients_lock(); + TAILQ_FOREACH(fc, &server_config.fs_clients, fc_entry) { + dispatch_mach_cancel(fc->fc_mach_channel); + } + fs_clients_unlock(); +} + +dispatch_queue_t +firehose_server_copy_queue(firehose_server_queue_t which) +{ + dispatch_queue_t dq; + switch (which) { + case FIREHOSE_SERVER_QUEUE_IO: + dq = server_config.fs_io_drain_queue; + break; + case FIREHOSE_SERVER_QUEUE_MEMORY: + dq = server_config.fs_mem_drain_queue; + break; + default: + DISPATCH_INTERNAL_CRASH(which, "Invalid firehose server queue type"); + } + dispatch_retain(dq); + return dq; +} void -firehose_client_metadata_stream_peek(firehose_client_t fc, - firehose_event_t context, bool (^peek_should_start)(void), - bool (^peek)(firehose_buffer_chunk_t fbc)) +firehose_server_quarantined_suspend(firehose_server_queue_t which) { - if (context != FIREHOSE_EVENT_MEM_BUFFER_RECEIVED) { - return dispatch_sync(server_config.fs_mem_drain_queue, ^{ - firehose_client_metadata_stream_peek(fc, - FIREHOSE_EVENT_MEM_BUFFER_RECEIVED, peek_should_start, peek); - }); + switch (which) { + case FIREHOSE_SERVER_QUEUE_IO: + dispatch_suspend(fs_source(true, true)); + break; + case FIREHOSE_SERVER_QUEUE_MEMORY: + dispatch_suspend(fs_source(true, false)); + break; + default: + DISPATCH_INTERNAL_CRASH(which, "Invalid firehose server queue type"); } +} - if (peek_should_start && !peek_should_start()) { - return; +void +firehose_server_quarantined_resume(firehose_server_queue_t which) +{ + switch (which) { + case FIREHOSE_SERVER_QUEUE_IO: + dispatch_resume(fs_source(true, true)); + break; + case FIREHOSE_SERVER_QUEUE_MEMORY: + dispatch_resume(fs_source(true, false)); + break; + default: + DISPATCH_INTERNAL_CRASH(which, "Invalid firehose server queue type"); } +} - firehose_buffer_t fb = fc->fc_buffer; - firehose_buffer_header_t fbh = &fb->fb_header; - uint64_t bitmap = fbh->fbh_bank.fbb_metadata_bitmap; - while (bitmap) { - uint16_t ref = firehose_bitmap_first_set(bitmap); - firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); - uint16_t fbc_length = fbc->fbc_pos.fbc_next_entry_offs; +#pragma mark - +#pragma mark firehose snapshot and peeking - bitmap &= ~(1ULL << ref); - if (fbc->fbc_start + fbc_length <= fbc->fbc_data) { - // this page has its "recycle-requeue" done, but hasn't gone - // through "recycle-reuse", or it has no data, ditch it - continue; - } - if (!((firehose_tracepoint_t)fbc->fbc_data)->ft_length) { - // this thing has data, but the first tracepoint is unreadable - // so also just ditch it - continue; - } - if (fbc->fbc_pos.fbc_stream != firehose_stream_metadata) { - continue; - } - if (!peek(fbc)) { - break; +void +firehose_client_metadata_stream_peek(firehose_client_t fc, + OS_UNUSED firehose_event_t context, bool (^peek_should_start)(void), + bool (^peek)(firehose_chunk_t fbc)) +{ + os_unfair_lock_lock(&fc->fc_lock); + + if (peek_should_start && peek_should_start()) { + firehose_buffer_t fb = fc->fc_buffer; + firehose_buffer_header_t fbh = &fb->fb_header; + uint64_t bitmap = fbh->fbh_bank.fbb_metadata_bitmap; + + while (bitmap) { + uint16_t ref = firehose_bitmap_first_set(bitmap); + firehose_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); + uint16_t fbc_length = fbc->fc_pos.fcp_next_entry_offs; + + bitmap &= ~(1ULL << ref); + if (fbc->fc_start + fbc_length <= fbc->fc_data) { + // this page has its "recycle-requeue" done, but hasn't gone + // through "recycle-reuse", or it has no data, ditch it + continue; + } + if (!((firehose_tracepoint_t)fbc->fc_data)->ft_length) { + // this thing has data, but the first tracepoint is unreadable + // so also just ditch it + continue; + } + if (fbc->fc_pos.fcp_stream != firehose_stream_metadata) { + continue; + } + if (!peek(fbc)) { + break; + } } } + + os_unfair_lock_unlock(&fc->fc_lock); } OS_NOINLINE OS_COLD @@ -872,21 +1114,21 @@ firehose_client_snapshot_finish(firehose_client_t fc, // Then look at all the allocated pages not seen in the ring while (bitmap) { uint16_t ref = firehose_bitmap_first_set(bitmap); - firehose_buffer_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); - uint16_t fbc_length = fbc->fbc_pos.fbc_next_entry_offs; + firehose_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); + uint16_t fbc_length = fbc->fc_pos.fcp_next_entry_offs; bitmap &= ~(1ULL << ref); - if (fbc->fbc_start + fbc_length <= fbc->fbc_data) { + if (fbc->fc_start + fbc_length <= fbc->fc_data) { // this page has its "recycle-requeue" done, but hasn't gone // through "recycle-reuse", or it has no data, ditch it continue; } - if (!((firehose_tracepoint_t)fbc->fbc_data)->ft_length) { + if (!((firehose_tracepoint_t)fbc->fc_data)->ft_length) { // this thing has data, but the first tracepoint is unreadable // so also just ditch it continue; } - if (fbc->fbc_pos.fbc_flag_io != for_io) { + if (fbc->fc_pos.fcp_flag_io != for_io) { continue; } snapshot->handler(fc, evt, fbc); @@ -894,70 +1136,35 @@ firehose_client_snapshot_finish(firehose_client_t fc, } static void -firehose_snapshot_start(void *ctxt) +firehose_snapshot_tickle_clients(firehose_snapshot_t fs, bool for_io) { - firehose_snapshot_t snapshot = ctxt; - firehose_client_t fci; + firehose_client_t fc; long n = 0; - // 0. we need to be on the IO queue so that client connection and/or death - // cannot happen concurrently - dispatch_assert_queue(server_config.fs_io_drain_queue); - - // 1. mark all the clients participating in the current snapshot - // and enter the group for each bit set - TAILQ_FOREACH(fci, &server_config.fs_clients, fc_entry) { - if (fci->fc_is_kernel) { + fs_clients_lock(); + TAILQ_FOREACH(fc, &server_config.fs_clients, fc_entry) { + if (slowpath(fc->fc_memory_corrupted)) { + continue; + } + if (!fc->fc_pid) { #if TARGET_OS_SIMULATOR continue; #endif - } - if (slowpath(fci->fc_memory_corrupted)) { + } else if (!firehose_client_wakeup(fc, 0, for_io)) { continue; } - fci->fc_needs_io_snapshot = true; - fci->fc_needs_mem_snapshot = true; - n += 2; - } - if (n) { - // cheating: equivalent to dispatch_group_enter() n times - // without the acquire barriers that we don't need - os_atomic_add2o(snapshot->fs_group, dg_value, n, relaxed); + n++; + if (for_io) { + fc->fc_needs_io_snapshot = true; + } else { + fc->fc_needs_mem_snapshot = true; + } } + fs_clients_unlock(); - dispatch_async(server_config.fs_mem_drain_queue, ^{ - // 2. make fs_snapshot visible, this is what triggers the snapshot - // logic from _drain() or handle_death(). until fs_snapshot is - // published, the bits set above are mostly ignored - server_config.fs_snapshot = snapshot; - - snapshot->handler(NULL, FIREHOSE_SNAPSHOT_EVENT_MEM_START, NULL); - - dispatch_async(server_config.fs_io_drain_queue, ^{ - firehose_client_t fcj; - - snapshot->handler(NULL, FIREHOSE_SNAPSHOT_EVENT_IO_START, NULL); - - // match group_enter from firehose_snapshot() after MEM+IO_START - dispatch_group_leave(snapshot->fs_group); - - // 3. tickle all the clients. the list of clients may have changed - // since step 1, but worry not - new clients don't have - // fc_needs_*_snapshot set so drain is harmless; clients that - // were removed from the list have already left the group - // (see firehose_client_finalize()) - TAILQ_FOREACH(fcj, &server_config.fs_clients, fc_entry) { - if (fcj->fc_is_kernel) { -#if !TARGET_OS_SIMULATOR - firehose_client_kernel_source_handle_event(fcj); -#endif - } else { - dispatch_source_merge_data(fcj->fc_io_source, 1); - dispatch_source_merge_data(fcj->fc_mem_source, 1); - } - } - }); - }); + // cheating: equivalent to dispatch_group_enter() n times + // without the acquire barriers that we don't need + if (n) os_atomic_add2o(fs->fs_group, dg_value, n, relaxed); } static void @@ -979,10 +1186,37 @@ firehose_snapshot_finish(void *ctxt) static void firehose_snapshot_gate(void *ctxt) { + firehose_snapshot_t fs = ctxt; + // prevent other snapshots from running until done + dispatch_suspend(server_config.fs_snapshot_gate_queue); - dispatch_async_f(server_config.fs_io_drain_queue, ctxt, - firehose_snapshot_start); + + server_config.fs_snapshot = fs; + dispatch_group_async(fs->fs_group, server_config.fs_mem_drain_queue, ^{ + // start the fs_mem_snapshot, this is what triggers the snapshot + // logic from _drain() or handle_death() + fs->handler(NULL, FIREHOSE_SNAPSHOT_EVENT_MEM_START, NULL); + firehose_snapshot_tickle_clients(fs, false); + + dispatch_group_async(fs->fs_group, server_config.fs_io_drain_queue, ^{ + // start the fs_io_snapshot, this is what triggers the snapshot + // logic from _drain() or handle_death() + // 29868879: must always happen after the memory snapshot started + fs->handler(NULL, FIREHOSE_SNAPSHOT_EVENT_IO_START, NULL); + firehose_snapshot_tickle_clients(fs, true); + +#if !TARGET_OS_SIMULATOR + if (server_config.fs_kernel_client) { + firehose_client_kernel_source_handle_event( + server_config.fs_kernel_client); + } +#endif + }); + }); + + dispatch_group_notify_f(fs->fs_group, server_config.fs_io_drain_queue, + fs, firehose_snapshot_finish); } void @@ -993,12 +1227,6 @@ firehose_snapshot(firehose_snapshot_handler_t handler) snapshot->handler = Block_copy(handler); snapshot->fs_group = dispatch_group_create(); - // keep the group entered until IO_START and MEM_START have been sent - // See firehose_snapshot_start() - dispatch_group_enter(snapshot->fs_group); - dispatch_group_notify_f(snapshot->fs_group, server_config.fs_io_drain_queue, - snapshot, firehose_snapshot_finish); - dispatch_async_f(server_config.fs_snapshot_gate_queue, snapshot, firehose_snapshot_gate); } @@ -1010,7 +1238,8 @@ kern_return_t firehose_server_register(mach_port_t server_port OS_UNUSED, mach_port_t mem_port, mach_vm_size_t mem_size, mach_port_t comm_recvp, mach_port_t comm_sendp, - mach_port_t extra_info_port, mach_vm_size_t extra_info_size) + mach_port_t extra_info_port, mach_vm_size_t extra_info_size, + audit_token_t atoken) { mach_vm_address_t base_addr = 0; firehose_client_t fc = NULL; @@ -1060,7 +1289,7 @@ firehose_server_register(mach_port_t server_port OS_UNUSED, } fc = firehose_client_create((firehose_buffer_t)base_addr, - comm_recvp, comm_sendp); + (firehose_token_t)&atoken, comm_recvp, comm_sendp); dispatch_async(server_config.fs_io_drain_queue, ^{ firehose_client_resume(fc, &fcci); if (fcci.fcci_size) { @@ -1088,15 +1317,16 @@ firehose_server_push_async(mach_port_t server_port OS_UNUSED, if (expects_notifs && !fc->fc_use_notifs) { fc->fc_use_notifs = true; } - firehose_client_push_async_merge(fc, pp, for_io); + firehose_client_wakeup(fc, pp, for_io); } return KERN_SUCCESS; } kern_return_t -firehose_server_push(mach_port_t server_port OS_UNUSED, +firehose_server_push_and_wait(mach_port_t server_port OS_UNUSED, mach_port_t reply_port, qos_class_t qos, boolean_t for_io, - firehose_push_reply_t *push_reply OS_UNUSED) + firehose_push_reply_t *push_reply OS_UNUSED, + boolean_t *quarantinedOut OS_UNUSED) { firehose_client_t fc = cur_client_info; dispatch_block_flags_t flags = DISPATCH_BLOCK_ENFORCE_QOS_CLASS; @@ -1118,7 +1348,7 @@ firehose_server_push(mach_port_t server_port OS_UNUSED, } block = dispatch_block_create_with_qos_class(flags, qos, 0, ^{ - firehose_client_drain(fc, reply_port, + firehose_client_drain_one(fc, reply_port, for_io ? FIREHOSE_DRAIN_FOR_IO : 0); }); dispatch_async(q, block); diff --git a/src/firehose/firehose_server_internal.h b/src/firehose/firehose_server_internal.h index 799172175..13f52b880 100644 --- a/src/firehose/firehose_server_internal.h +++ b/src/firehose/firehose_server_internal.h @@ -36,6 +36,7 @@ struct firehose_client_s { struct _os_object_s fc_as_os_object; }; TAILQ_ENTRY(firehose_client_s) fc_entry; + struct firehose_client_s *volatile fc_next[2]; firehose_buffer_t fc_buffer; uint64_t volatile fc_mem_sent_flushed_pos; @@ -43,21 +44,38 @@ struct firehose_client_s { uint64_t volatile fc_io_sent_flushed_pos; uint64_t volatile fc_io_flushed_pos; +#define FC_STATE_ENQUEUED(for_io) (0x0001u << (for_io)) +#define FC_STATE_MEM_ENQUEUED 0x0001 +#define FC_STATE_IO_ENQUEUED 0x0002 + +#define FC_STATE_CANCELING(for_io) (0x0010u << (for_io)) +#define FC_STATE_MEM_CANCELING 0x0010 +#define FC_STATE_IO_CANCELING 0x0020 + +#define FC_STATE_CANCELED(for_io) (0x0100u << (for_io)) +#define FC_STATE_MEM_CANCELED 0x0100 +#define FC_STATE_IO_CANCELED 0x0200 +#define FC_STATE_CANCELED_MASK 0x0300 + + uintptr_t volatile fc_state; + void *volatile fc_ctxt; union { dispatch_mach_t fc_mach_channel; dispatch_source_t fc_kernel_source; }; - dispatch_source_t fc_io_source; - dispatch_source_t fc_mem_source; mach_port_t fc_recvp; mach_port_t fc_sendp; + os_unfair_lock fc_lock; + pid_t fc_pid; + int fc_pidversion; + uid_t fc_euid; bool fc_use_notifs; bool fc_memory_corrupted; bool fc_needs_io_snapshot; bool fc_needs_mem_snapshot; - bool fc_is_kernel; + bool fc_quarantined; }; void diff --git a/src/init.c b/src/init.c index 45cbff3bf..6672fac45 100644 --- a/src/init.c +++ b/src/init.c @@ -21,6 +21,8 @@ // Contains exported global data and initialization & other routines that must // only exist once in the shared library even when resolvers are used. +// NOTE: this file must not contain any atomic operations + #include "internal.h" #if HAVE_MACH @@ -47,12 +49,52 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_prepare(void) { + _os_object_atfork_prepare(); } DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_parent(void) { + _os_object_atfork_parent(); +} + +DISPATCH_EXPORT DISPATCH_NOTHROW +void +dispatch_atfork_child(void) +{ + _os_object_atfork_child(); + _voucher_atfork_child(); + _dispatch_event_loop_atfork_child(); + if (_dispatch_is_multithreaded_inline()) { + _dispatch_child_of_unsafe_fork = true; + } + _dispatch_queue_atfork_child(); + // clear the _PROHIBIT and _MULTITHREADED bits if set + _dispatch_unsafe_fork = 0; +} + +int +_dispatch_sigmask(void) +{ + sigset_t mask; + int r = 0; + + /* Workaround: 6269619 Not all signals can be delivered on any thread */ + r |= sigfillset(&mask); + r |= sigdelset(&mask, SIGILL); + r |= sigdelset(&mask, SIGTRAP); +#if HAVE_DECL_SIGEMT + r |= sigdelset(&mask, SIGEMT); +#endif + r |= sigdelset(&mask, SIGFPE); + r |= sigdelset(&mask, SIGBUS); + r |= sigdelset(&mask, SIGSEGV); + r |= sigdelset(&mask, SIGSYS); + r |= sigdelset(&mask, SIGPIPE); + r |= sigdelset(&mask, SIGPROF); + r |= pthread_sigmask(SIG_BLOCK, &mask, NULL); + return dispatch_assume_zero(r); } #pragma mark - @@ -76,13 +118,13 @@ pthread_key_t dispatch_frame_key; pthread_key_t dispatch_cache_key; pthread_key_t dispatch_context_key; pthread_key_t dispatch_pthread_root_queue_observer_hooks_key; -pthread_key_t dispatch_defaultpriority_key; +pthread_key_t dispatch_basepri_key; #if DISPATCH_INTROSPECTION pthread_key_t dispatch_introspection_key; #elif DISPATCH_PERF_MON pthread_key_t dispatch_bcounter_key; #endif -pthread_key_t dispatch_sema4_key; +pthread_key_t dispatch_wlh_key; pthread_key_t dispatch_voucher_key; pthread_key_t dispatch_deferred_items_key; #endif // !DISPATCH_USE_DIRECT_TSD && !DISPATCH_USE_THREAD_LOCAL_STORAGE @@ -106,10 +148,6 @@ int _dispatch_set_qos_class_enabled; #if DISPATCH_USE_KEVENT_WORKQUEUE && DISPATCH_USE_MGR_THREAD int _dispatch_kevent_workqueue_enabled; #endif -#if DISPATCH_USE_EVFILT_MACHPORT_DIRECT && \ - DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK -int _dispatch_evfilt_machport_direct_enabled; -#endif DISPATCH_HW_CONFIG(); uint8_t _dispatch_unsafe_fork; @@ -133,33 +171,6 @@ _dispatch_is_fork_of_multithreaded_parent(void) return _dispatch_child_of_unsafe_fork; } -DISPATCH_NOINLINE -void -_dispatch_fork_becomes_unsafe_slow(void) -{ - uint8_t value = os_atomic_or(&_dispatch_unsafe_fork, - _DISPATCH_UNSAFE_FORK_MULTITHREADED, relaxed); - if (value & _DISPATCH_UNSAFE_FORK_PROHIBIT) { - DISPATCH_CLIENT_CRASH(0, "Transition to multithreaded is prohibited"); - } -} - -DISPATCH_NOINLINE -void -_dispatch_prohibit_transition_to_multithreaded(bool prohibit) -{ - if (prohibit) { - uint8_t value = os_atomic_or(&_dispatch_unsafe_fork, - _DISPATCH_UNSAFE_FORK_PROHIBIT, relaxed); - if (value & _DISPATCH_UNSAFE_FORK_MULTITHREADED) { - DISPATCH_CLIENT_CRASH(0, "The executable is already multithreaded"); - } - } else { - os_atomic_and(&_dispatch_unsafe_fork, - (uint8_t)~_DISPATCH_UNSAFE_FORK_PROHIBIT, relaxed); - } -} - const struct dispatch_queue_offsets_s dispatch_queue_offsets = { .dqo_version = 6, .dqo_label = offsetof(struct dispatch_queue_s, dq_label), @@ -176,8 +187,8 @@ const struct dispatch_queue_offsets_s dispatch_queue_offsets = { .dqo_suspend_cnt_size = 0, .dqo_target_queue = offsetof(struct dispatch_queue_s, do_targetq), .dqo_target_queue_size = sizeof(((dispatch_queue_t)NULL)->do_targetq), - .dqo_priority = offsetof(struct dispatch_queue_s, dq_priority), - .dqo_priority_size = sizeof(((dispatch_queue_t)NULL)->dq_priority), + .dqo_priority = 0, + .dqo_priority_size = 0, }; #if DISPATCH_USE_DIRECT_TSD @@ -198,85 +209,92 @@ struct dispatch_queue_s _dispatch_main_q = { .do_targetq = &_dispatch_root_queues[ DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT], #endif - .dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(1), + .dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(1) | + DISPATCH_QUEUE_ROLE_BASE_ANON, .dq_label = "com.apple.main-thread", - .dq_width = 1, - .dq_atomic_bits = DQF_THREAD_BOUND | DQF_CANNOT_TRYSYNC, - .dq_override_voucher = DISPATCH_NO_VOUCHER, + .dq_atomic_flags = DQF_THREAD_BOUND | DQF_CANNOT_TRYSYNC | DQF_WIDTH(1), .dq_serialnum = 1, }; #pragma mark - #pragma mark dispatch_queue_attr_t -#define DISPATCH_QUEUE_ATTR_INIT(qos, prio, overcommit, freq, concurrent, inactive) \ - { \ - DISPATCH_GLOBAL_OBJECT_HEADER(queue_attr), \ - .dqa_qos_class = (qos), \ - .dqa_relative_priority = (qos) ? (prio) : 0, \ - .dqa_overcommit = _dispatch_queue_attr_overcommit_##overcommit, \ - .dqa_autorelease_frequency = DISPATCH_AUTORELEASE_FREQUENCY_##freq, \ - .dqa_concurrent = (concurrent), \ - .dqa_inactive = (inactive), \ - } +#define DISPATCH_QUEUE_ATTR_INIT(qos, prio, overcommit, freq, concurrent, \ + inactive) \ + { \ + DISPATCH_GLOBAL_OBJECT_HEADER(queue_attr), \ + .dqa_qos_and_relpri = (_dispatch_priority_make(qos, prio) & \ + DISPATCH_PRIORITY_REQUESTED_MASK), \ + .dqa_overcommit = _dispatch_queue_attr_overcommit_##overcommit, \ + .dqa_autorelease_frequency = DISPATCH_AUTORELEASE_FREQUENCY_##freq, \ + .dqa_concurrent = (concurrent), \ + .dqa_inactive = (inactive), \ + } -#define DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, freq, concurrent) \ - { \ - [DQA_INDEX_ACTIVE] = DISPATCH_QUEUE_ATTR_INIT(\ - qos, prio, overcommit, freq, concurrent, false), \ - [DQA_INDEX_INACTIVE] = DISPATCH_QUEUE_ATTR_INIT(\ - qos, prio, overcommit, freq, concurrent, true), \ - } +#define DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, freq, \ + concurrent) \ + { \ + [DQA_INDEX_ACTIVE] = DISPATCH_QUEUE_ATTR_INIT( \ + qos, prio, overcommit, freq, concurrent, false), \ + [DQA_INDEX_INACTIVE] = DISPATCH_QUEUE_ATTR_INIT( \ + qos, prio, overcommit, freq, concurrent, true), \ + } #define DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, prio, overcommit) \ - { \ - [DQA_INDEX_AUTORELEASE_FREQUENCY_INHERIT][DQA_INDEX_CONCURRENT] = \ - DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, INHERIT, 1), \ - [DQA_INDEX_AUTORELEASE_FREQUENCY_INHERIT][DQA_INDEX_SERIAL] = \ - DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, INHERIT, 0), \ - [DQA_INDEX_AUTORELEASE_FREQUENCY_WORK_ITEM][DQA_INDEX_CONCURRENT] = \ - DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, WORK_ITEM, 1), \ - [DQA_INDEX_AUTORELEASE_FREQUENCY_WORK_ITEM][DQA_INDEX_SERIAL] = \ - DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, WORK_ITEM, 0), \ - [DQA_INDEX_AUTORELEASE_FREQUENCY_NEVER][DQA_INDEX_CONCURRENT] = \ - DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, NEVER, 1), \ - [DQA_INDEX_AUTORELEASE_FREQUENCY_NEVER][DQA_INDEX_SERIAL] = \ - DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, NEVER, 0), \ - } + { \ + [DQA_INDEX_AUTORELEASE_FREQUENCY_INHERIT][DQA_INDEX_CONCURRENT] = \ + DISPATCH_QUEUE_ATTR_ACTIVE_INIT( \ + qos, prio, overcommit, INHERIT, 1), \ + [DQA_INDEX_AUTORELEASE_FREQUENCY_INHERIT][DQA_INDEX_SERIAL] = \ + DISPATCH_QUEUE_ATTR_ACTIVE_INIT( \ + qos, prio, overcommit, INHERIT, 0), \ + [DQA_INDEX_AUTORELEASE_FREQUENCY_WORK_ITEM][DQA_INDEX_CONCURRENT] = \ + DISPATCH_QUEUE_ATTR_ACTIVE_INIT( \ + qos, prio, overcommit, WORK_ITEM, 1), \ + [DQA_INDEX_AUTORELEASE_FREQUENCY_WORK_ITEM][DQA_INDEX_SERIAL] = \ + DISPATCH_QUEUE_ATTR_ACTIVE_INIT( \ + qos, prio, overcommit, WORK_ITEM, 0), \ + [DQA_INDEX_AUTORELEASE_FREQUENCY_NEVER][DQA_INDEX_CONCURRENT] = \ + DISPATCH_QUEUE_ATTR_ACTIVE_INIT( \ + qos, prio, overcommit, NEVER, 1), \ + [DQA_INDEX_AUTORELEASE_FREQUENCY_NEVER][DQA_INDEX_SERIAL] = \ + DISPATCH_QUEUE_ATTR_ACTIVE_INIT(\ + qos, prio, overcommit, NEVER, 0), \ + } #define DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, prio) \ - [prio] = { \ - [DQA_INDEX_UNSPECIFIED_OVERCOMMIT] = \ - DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, -(prio), unspecified), \ - [DQA_INDEX_NON_OVERCOMMIT] = \ - DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, -(prio), disabled), \ - [DQA_INDEX_OVERCOMMIT] = \ - DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, -(prio), enabled), \ - } + [prio] = { \ + [DQA_INDEX_UNSPECIFIED_OVERCOMMIT] = \ + DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, -(prio), unspecified),\ + [DQA_INDEX_NON_OVERCOMMIT] = \ + DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, -(prio), disabled), \ + [DQA_INDEX_OVERCOMMIT] = \ + DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, -(prio), enabled), \ + } #define DISPATCH_QUEUE_ATTR_PRIO_INIT(qos) \ - { \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 0), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 1), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 2), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 3), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 4), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 5), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 6), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 7), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 8), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 9), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 10), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 11), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 12), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 13), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 14), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 15), \ - } + { \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 0), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 1), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 2), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 3), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 4), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 5), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 6), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 7), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 8), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 9), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 10), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 11), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 12), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 13), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 14), \ + DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 15), \ + } #define DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(qos) \ - [DQA_INDEX_QOS_CLASS_##qos] = \ - DISPATCH_QUEUE_ATTR_PRIO_INIT(_DISPATCH_QOS_CLASS_##qos) + [DQA_INDEX_QOS_CLASS_##qos] = \ + DISPATCH_QUEUE_ATTR_PRIO_INIT(DISPATCH_QOS_##qos) // DISPATCH_QUEUE_CONCURRENT resp. _dispatch_queue_attr_concurrent is aliased // to array member [0][0][0][0][0][0] and their properties must match! @@ -298,7 +316,7 @@ const struct dispatch_queue_attr_s _dispatch_queue_attrs[] #if DISPATCH_VARIANT_STATIC // struct dispatch_queue_attr_s _dispatch_queue_attr_concurrent = - DISPATCH_QUEUE_ATTR_INIT(_DISPATCH_QOS_CLASS_UNSPECIFIED, 0, + DISPATCH_QUEUE_ATTR_INIT(QOS_CLASS_UNSPECIFIED, 0, unspecified, INHERIT, 1, false); #endif // DISPATCH_VARIANT_STATIC @@ -333,6 +351,7 @@ DISPATCH_VTABLE_INSTANCE(queue, .do_dispose = _dispatch_queue_dispose, .do_suspend = _dispatch_queue_suspend, .do_resume = _dispatch_queue_resume, + .do_push = _dispatch_queue_push, .do_invoke = _dispatch_queue_invoke, .do_wakeup = _dispatch_queue_wakeup, .do_debug = dispatch_queue_debug, @@ -346,6 +365,7 @@ DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_serial, queue, .do_suspend = _dispatch_queue_suspend, .do_resume = _dispatch_queue_resume, .do_finalize_activation = _dispatch_queue_finalize_activation, + .do_push = _dispatch_queue_push, .do_invoke = _dispatch_queue_invoke, .do_wakeup = _dispatch_queue_wakeup, .do_debug = dispatch_queue_debug, @@ -359,6 +379,7 @@ DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_concurrent, queue, .do_suspend = _dispatch_queue_suspend, .do_resume = _dispatch_queue_resume, .do_finalize_activation = _dispatch_queue_finalize_activation, + .do_push = _dispatch_queue_push, .do_invoke = _dispatch_queue_invoke, .do_wakeup = _dispatch_queue_wakeup, .do_debug = dispatch_queue_debug, @@ -370,14 +391,18 @@ DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_root, queue, .do_type = DISPATCH_QUEUE_GLOBAL_ROOT_TYPE, .do_kind = "global-queue", .do_dispose = _dispatch_pthread_root_queue_dispose, + .do_push = _dispatch_root_queue_push, + .do_invoke = NULL, .do_wakeup = _dispatch_root_queue_wakeup, .do_debug = dispatch_queue_debug, ); + DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_main, queue, .do_type = DISPATCH_QUEUE_SERIAL_TYPE, .do_kind = "main-queue", .do_dispose = _dispatch_queue_dispose, + .do_push = _dispatch_queue_push, .do_invoke = _dispatch_queue_invoke, .do_wakeup = _dispatch_main_queue_wakeup, .do_debug = dispatch_queue_debug, @@ -387,6 +412,7 @@ DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_runloop, queue, .do_type = DISPATCH_QUEUE_RUNLOOP_TYPE, .do_kind = "runloop-queue", .do_dispose = _dispatch_runloop_queue_dispose, + .do_push = _dispatch_queue_push, .do_invoke = _dispatch_queue_invoke, .do_wakeup = _dispatch_runloop_queue_wakeup, .do_debug = dispatch_queue_debug, @@ -395,6 +421,7 @@ DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_runloop, queue, DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_mgr, queue, .do_type = DISPATCH_QUEUE_MGR_TYPE, .do_kind = "mgr-queue", + .do_push = _dispatch_mgr_queue_push, .do_invoke = _dispatch_mgr_thread, .do_wakeup = _dispatch_mgr_queue_wakeup, .do_debug = dispatch_queue_debug, @@ -404,6 +431,7 @@ DISPATCH_VTABLE_INSTANCE(queue_specific_queue, .do_type = DISPATCH_QUEUE_SPECIFIC_TYPE, .do_kind = "queue-context", .do_dispose = _dispatch_queue_specific_queue_dispose, + .do_push = (void *)_dispatch_queue_push, .do_invoke = (void *)_dispatch_queue_invoke, .do_wakeup = (void *)_dispatch_queue_wakeup, .do_debug = (void *)dispatch_queue_debug, @@ -421,6 +449,7 @@ DISPATCH_VTABLE_INSTANCE(source, .do_suspend = (void *)_dispatch_queue_suspend, .do_resume = (void *)_dispatch_queue_resume, .do_finalize_activation = _dispatch_source_finalize_activation, + .do_push = (void *)_dispatch_queue_push, .do_invoke = _dispatch_source_invoke, .do_wakeup = _dispatch_source_wakeup, .do_debug = _dispatch_source_debug, @@ -435,6 +464,7 @@ DISPATCH_VTABLE_INSTANCE(mach, .do_suspend = (void *)_dispatch_queue_suspend, .do_resume = (void *)_dispatch_queue_resume, .do_finalize_activation = _dispatch_mach_finalize_activation, + .do_push = (void *)_dispatch_queue_push, .do_invoke = _dispatch_mach_invoke, .do_wakeup = _dispatch_mach_wakeup, .do_debug = _dispatch_mach_debug, @@ -456,6 +486,7 @@ DISPATCH_VTABLE_INSTANCE(data, .do_kind = "data", .do_dispose = _dispatch_data_dispose, .do_debug = _dispatch_data_debug, + .do_set_targetq = (void*)_dispatch_data_set_target_queue, ); #endif @@ -481,31 +512,6 @@ DISPATCH_VTABLE_INSTANCE(disk, ); -const struct dispatch_continuation_vtable_s _dispatch_continuation_vtables[] = { - DC_VTABLE_ENTRY(ASYNC_REDIRECT, - .do_kind = "dc-redirect", - .do_invoke = _dispatch_async_redirect_invoke), -#if HAVE_MACH - DC_VTABLE_ENTRY(MACH_SEND_BARRRIER_DRAIN, - .do_kind = "dc-mach-send-drain", - .do_invoke = _dispatch_mach_send_barrier_drain_invoke), - DC_VTABLE_ENTRY(MACH_SEND_BARRIER, - .do_kind = "dc-mach-send-barrier", - .do_invoke = _dispatch_mach_barrier_invoke), - DC_VTABLE_ENTRY(MACH_RECV_BARRIER, - .do_kind = "dc-mach-recv-barrier", - .do_invoke = _dispatch_mach_barrier_invoke), -#endif -#if HAVE_PTHREAD_WORKQUEUE_QOS - DC_VTABLE_ENTRY(OVERRIDE_STEALING, - .do_kind = "dc-override-stealing", - .do_invoke = _dispatch_queue_override_invoke), - DC_VTABLE_ENTRY(OVERRIDE_OWNING, - .do_kind = "dc-override-owning", - .do_invoke = _dispatch_queue_override_invoke), -#endif -}; - void _dispatch_vtable_init(void) { @@ -518,6 +524,41 @@ _dispatch_vtable_init(void) #endif // USE_OBJC } +#pragma mark - +#pragma mark dispatch_data globals + +const dispatch_block_t _dispatch_data_destructor_free = ^{ + DISPATCH_INTERNAL_CRASH(0, "free destructor called"); +}; + +const dispatch_block_t _dispatch_data_destructor_none = ^{ + DISPATCH_INTERNAL_CRASH(0, "none destructor called"); +}; + +#if !HAVE_MACH +const dispatch_block_t _dispatch_data_destructor_munmap = ^{ + DISPATCH_INTERNAL_CRASH(0, "munmap destructor called"); +}; +#else +// _dispatch_data_destructor_munmap is a linker alias to the following +const dispatch_block_t _dispatch_data_destructor_vm_deallocate = ^{ + DISPATCH_INTERNAL_CRASH(0, "vmdeallocate destructor called"); +}; +#endif + +const dispatch_block_t _dispatch_data_destructor_inline = ^{ + DISPATCH_INTERNAL_CRASH(0, "inline destructor called"); +}; + +struct dispatch_data_s _dispatch_data_empty = { +#if DISPATCH_DATA_IS_BRIDGED_TO_NSDATA + .do_vtable = DISPATCH_DATA_EMPTY_CLASS, +#else + DISPATCH_GLOBAL_OBJECT_HEADER(data), + .do_next = DISPATCH_OBJECT_LISTLESS, +#endif +}; + #pragma mark - #pragma mark dispatch_bug @@ -856,6 +897,7 @@ void _dispatch_temporary_resource_shortage(void) { sleep(1); + asm(""); // prevent tailcall } void * @@ -962,6 +1004,22 @@ _dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) } #if HAVE_MACH + +#undef _dispatch_client_callout3 +DISPATCH_NOINLINE +void +_dispatch_client_callout3(void *ctxt, dispatch_mach_reason_t reason, + dispatch_mach_msg_t dmsg, dispatch_mach_async_reply_callback_t f) +{ + _dispatch_get_tsd_base(); + void *u = _dispatch_get_unwind_tsd(); + if (fastpath(!u)) return f(ctxt, reason, dmsg); + _dispatch_set_unwind_tsd(NULL); + f(ctxt, reason, dmsg); + _dispatch_free_unwind_tsd(); + _dispatch_set_unwind_tsd(u); +} + #undef _dispatch_client_callout4 void _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, @@ -1057,6 +1115,24 @@ os_release(void *obj) } } +void +_os_object_atfork_prepare(void) +{ + return; +} + +void +_os_object_atfork_parent(void) +{ + return; +} + +void +_os_object_atfork_child(void) +{ + return; +} + #pragma mark - #pragma mark dispatch_autorelease_pool no_objc @@ -1080,412 +1156,25 @@ _dispatch_autorelease_pool_pop(void *pool) } } -void* -_dispatch_last_resort_autorelease_pool_push(void) +void +_dispatch_last_resort_autorelease_pool_push(dispatch_invoke_context_t dic) { - return _dispatch_autorelease_pool_push(); + dic->dic_autorelease_pool = _dispatch_autorelease_pool_push(); } void -_dispatch_last_resort_autorelease_pool_pop(void *pool) +_dispatch_last_resort_autorelease_pool_pop(dispatch_invoke_context_t dic) { - _dispatch_autorelease_pool_pop(pool); + _dispatch_autorelease_pool_pop(dic->dic_autorelease_pool); + dic->dic_autorelease_pool = NULL; } #endif // DISPATCH_COCOA_COMPAT #endif // !USE_OBJC -#pragma mark - -#pragma mark dispatch_source_types - -static void -dispatch_source_type_timer_init(dispatch_source_t ds, - dispatch_source_type_t type DISPATCH_UNUSED, - uintptr_t handle DISPATCH_UNUSED, - unsigned long mask, - dispatch_queue_t q) -{ - if (fastpath(!ds->ds_refs)) { - ds->ds_refs = _dispatch_calloc(1ul, - sizeof(struct dispatch_timer_source_refs_s)); - } - ds->ds_needs_rearm = true; - ds->ds_is_timer = true; - if (q == dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_BACKGROUND, 0) - || q == dispatch_get_global_queue( - DISPATCH_QUEUE_PRIORITY_BACKGROUND, DISPATCH_QUEUE_OVERCOMMIT)){ - mask |= DISPATCH_TIMER_BACKGROUND; // - } - ds_timer(ds->ds_refs).flags = mask; -} - -const struct dispatch_source_type_s _dispatch_source_type_timer = { - .ke = { - .filter = DISPATCH_EVFILT_TIMER, - }, - .mask = DISPATCH_TIMER_STRICT|DISPATCH_TIMER_BACKGROUND| - DISPATCH_TIMER_WALL_CLOCK, - .init = dispatch_source_type_timer_init, -}; - -static void -dispatch_source_type_after_init(dispatch_source_t ds, - dispatch_source_type_t type, uintptr_t handle, unsigned long mask, - dispatch_queue_t q) -{ - dispatch_source_type_timer_init(ds, type, handle, mask, q); - ds->ds_needs_rearm = false; - ds_timer(ds->ds_refs).flags |= DISPATCH_TIMER_AFTER; -} - -const struct dispatch_source_type_s _dispatch_source_type_after = { - .ke = { - .filter = DISPATCH_EVFILT_TIMER, - }, - .init = dispatch_source_type_after_init, -}; - -static void -dispatch_source_type_timer_with_aggregate_init(dispatch_source_t ds, - dispatch_source_type_t type, uintptr_t handle, unsigned long mask, - dispatch_queue_t q) -{ - ds->ds_refs = _dispatch_calloc(1ul, - sizeof(struct dispatch_timer_source_aggregate_refs_s)); - dispatch_source_type_timer_init(ds, type, handle, mask, q); - ds_timer(ds->ds_refs).flags |= DISPATCH_TIMER_WITH_AGGREGATE; - ds->dq_specific_q = (void*)handle; - _dispatch_retain(ds->dq_specific_q); -} - -const struct dispatch_source_type_s _dispatch_source_type_timer_with_aggregate={ - .ke = { - .filter = DISPATCH_EVFILT_TIMER, - .ident = ~0ull, - }, - .mask = DISPATCH_TIMER_STRICT|DISPATCH_TIMER_BACKGROUND, - .init = dispatch_source_type_timer_with_aggregate_init, -}; - -static void -dispatch_source_type_interval_init(dispatch_source_t ds, - dispatch_source_type_t type, uintptr_t handle, unsigned long mask, - dispatch_queue_t q) -{ - dispatch_source_type_timer_init(ds, type, handle, mask, q); - ds_timer(ds->ds_refs).flags |= DISPATCH_TIMER_INTERVAL; - unsigned long ident = _dispatch_source_timer_idx(ds->ds_refs); - ds->ds_dkev->dk_kevent.ident = ds->ds_ident_hack = ident; - _dispatch_source_set_interval(ds, handle); -} - -const struct dispatch_source_type_s _dispatch_source_type_interval = { - .ke = { - .filter = DISPATCH_EVFILT_TIMER, - .ident = ~0ull, - }, - .mask = DISPATCH_TIMER_STRICT|DISPATCH_TIMER_BACKGROUND| - DISPATCH_INTERVAL_UI_ANIMATION, - .init = dispatch_source_type_interval_init, -}; - -static void -dispatch_source_type_readwrite_init(dispatch_source_t ds, - dispatch_source_type_t type DISPATCH_UNUSED, - uintptr_t handle DISPATCH_UNUSED, - unsigned long mask DISPATCH_UNUSED, - dispatch_queue_t q DISPATCH_UNUSED) -{ - ds->ds_is_level = true; -#ifdef HAVE_DECL_NOTE_LOWAT - // bypass kernel check for device kqueue support rdar://19004921 - ds->ds_dkev->dk_kevent.fflags = NOTE_LOWAT; -#endif - ds->ds_dkev->dk_kevent.data = 1; -} - -const struct dispatch_source_type_s _dispatch_source_type_read = { - .ke = { - .filter = EVFILT_READ, - .flags = EV_VANISHED|EV_DISPATCH|EV_UDATA_SPECIFIC, - }, - .init = dispatch_source_type_readwrite_init, -}; - -const struct dispatch_source_type_s _dispatch_source_type_write = { - .ke = { - .filter = EVFILT_WRITE, - .flags = EV_VANISHED|EV_DISPATCH|EV_UDATA_SPECIFIC, - }, - .init = dispatch_source_type_readwrite_init, -}; - -#if DISPATCH_USE_MEMORYSTATUS - -#if TARGET_IPHONE_SIMULATOR // rdar://problem/9219483 -static int _dispatch_ios_simulator_memory_warnings_fd = -1; -static void -_dispatch_ios_simulator_memorypressure_init(void *context DISPATCH_UNUSED) -{ - char *e = getenv("SIMULATOR_MEMORY_WARNINGS"); - if (!e) return; - _dispatch_ios_simulator_memory_warnings_fd = open(e, O_EVTONLY); - if (_dispatch_ios_simulator_memory_warnings_fd == -1) { - (void)dispatch_assume_zero(errno); - } -} -#endif - -#if TARGET_IPHONE_SIMULATOR -static void -dispatch_source_type_memorypressure_init(dispatch_source_t ds, - dispatch_source_type_t type DISPATCH_UNUSED, - uintptr_t handle DISPATCH_UNUSED, - unsigned long mask DISPATCH_UNUSED, - dispatch_queue_t q DISPATCH_UNUSED) -{ - static dispatch_once_t pred; - dispatch_once_f(&pred, NULL, _dispatch_ios_simulator_memorypressure_init); - handle = (uintptr_t)_dispatch_ios_simulator_memory_warnings_fd; - mask = NOTE_ATTRIB; - ds->ds_dkev->dk_kevent.filter = EVFILT_VNODE; - ds->ds_dkev->dk_kevent.ident = handle; - ds->ds_dkev->dk_kevent.flags |= EV_CLEAR; - ds->ds_dkev->dk_kevent.fflags = (uint32_t)mask; - ds->ds_ident_hack = handle; - ds->ds_pending_data_mask = mask; - ds->ds_memorypressure_override = 1; -} -#else -#define dispatch_source_type_memorypressure_init NULL -#endif - -#ifndef NOTE_MEMORYSTATUS_LOW_SWAP -#define NOTE_MEMORYSTATUS_LOW_SWAP 0x8 -#endif - -const struct dispatch_source_type_s _dispatch_source_type_memorypressure = { - .ke = { - .filter = EVFILT_MEMORYSTATUS, - .flags = EV_DISPATCH|EV_UDATA_SPECIFIC, - }, - .mask = NOTE_MEMORYSTATUS_PRESSURE_NORMAL|NOTE_MEMORYSTATUS_PRESSURE_WARN - |NOTE_MEMORYSTATUS_PRESSURE_CRITICAL|NOTE_MEMORYSTATUS_LOW_SWAP - |NOTE_MEMORYSTATUS_PROC_LIMIT_WARN|NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL, - .init = dispatch_source_type_memorypressure_init, -}; - -static void -dispatch_source_type_vm_init(dispatch_source_t ds, - dispatch_source_type_t type DISPATCH_UNUSED, - uintptr_t handle DISPATCH_UNUSED, - unsigned long mask DISPATCH_UNUSED, - dispatch_queue_t q DISPATCH_UNUSED) -{ - // Map legacy vm pressure to memorypressure warning rdar://problem/15907505 - mask = NOTE_MEMORYSTATUS_PRESSURE_WARN; - ds->ds_dkev->dk_kevent.fflags = (uint32_t)mask; - ds->ds_pending_data_mask = mask; - ds->ds_vmpressure_override = 1; -#if TARGET_IPHONE_SIMULATOR - dispatch_source_type_memorypressure_init(ds, type, handle, mask, q); -#endif -} - -const struct dispatch_source_type_s _dispatch_source_type_vm = { - .ke = { - .filter = EVFILT_MEMORYSTATUS, - .flags = EV_DISPATCH|EV_UDATA_SPECIFIC, - }, - .mask = NOTE_VM_PRESSURE, - .init = dispatch_source_type_vm_init, -}; - -#elif DISPATCH_USE_VM_PRESSURE - -const struct dispatch_source_type_s _dispatch_source_type_vm = { - .ke = { - .filter = EVFILT_VM, - .flags = EV_DISPATCH|EV_UDATA_SPECIFIC, - }, - .mask = NOTE_VM_PRESSURE, -}; - -#endif // DISPATCH_USE_VM_PRESSURE - -const struct dispatch_source_type_s _dispatch_source_type_signal = { - .ke = { - .filter = EVFILT_SIGNAL, - .flags = EV_UDATA_SPECIFIC, - }, -}; - -#if !defined(__linux__) -static void -dispatch_source_type_proc_init(dispatch_source_t ds, - dispatch_source_type_t type DISPATCH_UNUSED, - uintptr_t handle DISPATCH_UNUSED, - unsigned long mask DISPATCH_UNUSED, - dispatch_queue_t q DISPATCH_UNUSED) -{ - ds->ds_dkev->dk_kevent.fflags |= NOTE_EXIT; // rdar://16655831 -} - -const struct dispatch_source_type_s _dispatch_source_type_proc = { - .ke = { - .filter = EVFILT_PROC, - .flags = EV_CLEAR|EV_UDATA_SPECIFIC, - }, - .mask = NOTE_EXIT|NOTE_FORK|NOTE_EXEC -#if HAVE_DECL_NOTE_SIGNAL - |NOTE_SIGNAL -#endif -#if HAVE_DECL_NOTE_REAP - |NOTE_REAP -#endif - , - .init = dispatch_source_type_proc_init, -}; - -const struct dispatch_source_type_s _dispatch_source_type_vnode = { - .ke = { - .filter = EVFILT_VNODE, - .flags = EV_CLEAR|EV_VANISHED|EV_UDATA_SPECIFIC, - }, - .mask = NOTE_DELETE|NOTE_WRITE|NOTE_EXTEND|NOTE_ATTRIB|NOTE_LINK| - NOTE_RENAME|NOTE_FUNLOCK -#if HAVE_DECL_NOTE_REVOKE - |NOTE_REVOKE -#endif -#if HAVE_DECL_NOTE_NONE - |NOTE_NONE -#endif - , -}; - -const struct dispatch_source_type_s _dispatch_source_type_vfs = { - .ke = { - .filter = EVFILT_FS, - .flags = EV_CLEAR|EV_UDATA_SPECIFIC, - }, - .mask = VQ_NOTRESP|VQ_NEEDAUTH|VQ_LOWDISK|VQ_MOUNT|VQ_UNMOUNT|VQ_DEAD| - VQ_ASSIST|VQ_NOTRESPLOCK -#if HAVE_DECL_VQ_UPDATE - |VQ_UPDATE -#endif -#if HAVE_DECL_VQ_VERYLOWDISK - |VQ_VERYLOWDISK -#endif -#if HAVE_DECL_VQ_QUOTA - |VQ_QUOTA -#endif - , -}; - -const struct dispatch_source_type_s _dispatch_source_type_sock = { -#ifdef EVFILT_SOCK - .ke = { - .filter = EVFILT_SOCK, - .flags = EV_CLEAR|EV_VANISHED|EV_UDATA_SPECIFIC, - }, - .mask = NOTE_CONNRESET | NOTE_READCLOSED | NOTE_WRITECLOSED | - NOTE_TIMEOUT | NOTE_NOSRCADDR | NOTE_IFDENIED | NOTE_SUSPEND | - NOTE_RESUME | NOTE_KEEPALIVE -#ifdef NOTE_ADAPTIVE_WTIMO - | NOTE_ADAPTIVE_WTIMO | NOTE_ADAPTIVE_RTIMO -#endif -#ifdef NOTE_CONNECTED - | NOTE_CONNECTED | NOTE_DISCONNECTED | NOTE_CONNINFO_UPDATED -#endif -#ifdef NOTE_NOTIFY_ACK - | NOTE_NOTIFY_ACK -#endif - , -#endif // EVFILT_SOCK -}; -#endif // !defined(__linux__) - -static void -dispatch_source_type_data_init(dispatch_source_t ds, - dispatch_source_type_t type DISPATCH_UNUSED, - uintptr_t handle DISPATCH_UNUSED, - unsigned long mask DISPATCH_UNUSED, - dispatch_queue_t q DISPATCH_UNUSED) -{ - ds->ds_is_installed = true; - ds->ds_is_custom_source = true; - ds->ds_is_direct_kevent = true; - ds->ds_pending_data_mask = ~0ul; - ds->ds_needs_rearm = false; // not registered with kevent -} - -const struct dispatch_source_type_s _dispatch_source_type_data_add = { - .ke = { - .filter = DISPATCH_EVFILT_CUSTOM_ADD, - .flags = EV_UDATA_SPECIFIC, - }, - .init = dispatch_source_type_data_init, -}; - -const struct dispatch_source_type_s _dispatch_source_type_data_or = { - .ke = { - .filter = DISPATCH_EVFILT_CUSTOM_OR, - .flags = EV_CLEAR|EV_UDATA_SPECIFIC, - .fflags = ~0u, - }, - .init = dispatch_source_type_data_init, -}; - -#if HAVE_MACH - -static void -dispatch_source_type_mach_send_init(dispatch_source_t ds, - dispatch_source_type_t type DISPATCH_UNUSED, - uintptr_t handle DISPATCH_UNUSED, unsigned long mask, - dispatch_queue_t q DISPATCH_UNUSED) -{ - if (!mask) { - // Preserve legacy behavior that (mask == 0) => DISPATCH_MACH_SEND_DEAD - ds->ds_dkev->dk_kevent.fflags = DISPATCH_MACH_SEND_DEAD; - ds->ds_pending_data_mask = DISPATCH_MACH_SEND_DEAD; - } -} - -const struct dispatch_source_type_s _dispatch_source_type_mach_send = { - .ke = { - .filter = DISPATCH_EVFILT_MACH_NOTIFICATION, - .flags = EV_CLEAR, - }, - .mask = DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_POSSIBLE, - .init = dispatch_source_type_mach_send_init, -}; - -static void -dispatch_source_type_mach_recv_init(dispatch_source_t ds, - dispatch_source_type_t type DISPATCH_UNUSED, - uintptr_t handle DISPATCH_UNUSED, - unsigned long mask DISPATCH_UNUSED, - dispatch_queue_t q DISPATCH_UNUSED) -{ - ds->ds_pending_data_mask = DISPATCH_MACH_RECV_MESSAGE; -#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK - if (_dispatch_evfilt_machport_direct_enabled) return; - ds->ds_dkev->dk_kevent.fflags = DISPATCH_MACH_RECV_MESSAGE; - ds->ds_dkev->dk_kevent.flags &= ~(EV_UDATA_SPECIFIC|EV_VANISHED); - ds->ds_is_direct_kevent = false; -#endif -} - -const struct dispatch_source_type_s _dispatch_source_type_mach_recv = { - .ke = { - .filter = EVFILT_MACHPORT, - .flags = EV_VANISHED|EV_DISPATCH|EV_UDATA_SPECIFIC, - }, - .init = dispatch_source_type_mach_recv_init, -}; - #pragma mark - #pragma mark dispatch_mig +#if HAVE_MACH void * dispatch_mach_msg_get_context(mach_msg_header_t *msg) @@ -1520,22 +1209,16 @@ kern_return_t _dispatch_mach_notify_port_destroyed(mach_port_t notify DISPATCH_UNUSED, mach_port_t name) { - kern_return_t kr; - // this function should never be called - (void)dispatch_assume_zero(name); - kr = mach_port_mod_refs(mach_task_self(), name, MACH_PORT_RIGHT_RECEIVE,-1); - DISPATCH_VERIFY_MIG(kr); - (void)dispatch_assume_zero(kr); - return KERN_SUCCESS; + DISPATCH_INTERNAL_CRASH(name, "unexpected receipt of port-destroyed"); + return KERN_FAILURE; } kern_return_t -_dispatch_mach_notify_no_senders(mach_port_t notify, - mach_port_mscount_t mscnt DISPATCH_UNUSED) +_dispatch_mach_notify_no_senders(mach_port_t notify DISPATCH_UNUSED, + mach_port_mscount_t mscnt) { - // this function should never be called - (void)dispatch_assume_zero(notify); - return KERN_SUCCESS; + DISPATCH_INTERNAL_CRASH(mscnt, "unexpected receipt of no-more-senders"); + return KERN_FAILURE; } kern_return_t diff --git a/src/inline_internal.h b/src/inline_internal.h index d1c73dd4e..1279874d4 100644 --- a/src/inline_internal.h +++ b/src/inline_internal.h @@ -40,6 +40,9 @@ DISPATCH_NOTHROW void _dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)); #if HAVE_MACH DISPATCH_NOTHROW void +_dispatch_client_callout3(void *ctxt, dispatch_mach_reason_t reason, + dispatch_mach_msg_t dmsg, dispatch_mach_async_reply_callback_t f); +DISPATCH_NOTHROW void _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, dispatch_mach_msg_t dmsg, mach_error_t error, dispatch_mach_handler_function_t f); @@ -62,6 +65,14 @@ _dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) } #if HAVE_MACH +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_client_callout3(void *ctxt, dispatch_mach_reason_t reason, + dispatch_mach_msg_t dmsg, dispatch_mach_async_reply_callback_t f) +{ + return f(ctxt, reason, dmsg); +} + DISPATCH_ALWAYS_INLINE static inline void _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, @@ -88,6 +99,13 @@ _dispatch_object_has_vtable(dispatch_object_t dou) return dc_flags > 0xffful; } +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_object_is_queue(dispatch_object_t dou) +{ + return _dispatch_object_has_vtable(dou) && dx_vtable(dou._do)->do_push; +} + DISPATCH_ALWAYS_INLINE static inline bool _dispatch_object_is_continuation(dispatch_object_t dou) @@ -134,43 +152,31 @@ _dispatch_object_is_barrier(dispatch_object_t dou) DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_object_is_slow_item(dispatch_object_t dou) -{ - if (_dispatch_object_has_vtable(dou)) { - return false; - } - return (dou._dc->dc_flags & DISPATCH_OBJ_SYNC_SLOW_BIT); -} - -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_object_is_slow_non_barrier(dispatch_object_t dou) +_dispatch_object_is_sync_waiter(dispatch_object_t dou) { if (_dispatch_object_has_vtable(dou)) { return false; } - return ((dou._dc->dc_flags & - (DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT)) == - (DISPATCH_OBJ_SYNC_SLOW_BIT)); + return (dou._dc->dc_flags & DISPATCH_OBJ_SYNC_WAITER_BIT); } DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_object_is_slow_barrier(dispatch_object_t dou) +_dispatch_object_is_sync_waiter_non_barrier(dispatch_object_t dou) { if (_dispatch_object_has_vtable(dou)) { return false; } return ((dou._dc->dc_flags & - (DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT)) == - (DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT)); + (DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_WAITER_BIT)) == + (DISPATCH_OBJ_SYNC_WAITER_BIT)); } DISPATCH_ALWAYS_INLINE static inline _os_object_t -_os_object_retain_internal_inline(_os_object_t obj) +_os_object_retain_internal_n_inline(_os_object_t obj, int n) { - int ref_cnt = _os_object_refcnt_inc(obj); + int ref_cnt = _os_object_refcnt_add(obj, n); if (unlikely(ref_cnt <= 0)) { _OS_OBJECT_CLIENT_CRASH("Resurrection of an object"); } @@ -179,23 +185,20 @@ _os_object_retain_internal_inline(_os_object_t obj) DISPATCH_ALWAYS_INLINE static inline void -_os_object_release_internal_inline_no_dispose(_os_object_t obj) +_os_object_release_internal_n_no_dispose_inline(_os_object_t obj, int n) { - int ref_cnt = _os_object_refcnt_dec(obj); + int ref_cnt = _os_object_refcnt_sub(obj, n); if (likely(ref_cnt >= 0)) { return; } - if (ref_cnt == 0) { - _OS_OBJECT_CLIENT_CRASH("Unexpected release of an object"); - } _OS_OBJECT_CLIENT_CRASH("Over-release of an object"); } DISPATCH_ALWAYS_INLINE static inline void -_os_object_release_internal_inline(_os_object_t obj) +_os_object_release_internal_n_inline(_os_object_t obj, int n) { - int ref_cnt = _os_object_refcnt_dec(obj); + int ref_cnt = _os_object_refcnt_sub(obj, n); if (likely(ref_cnt >= 0)) { return; } @@ -217,74 +220,110 @@ DISPATCH_ALWAYS_INLINE_NDEBUG static inline void _dispatch_retain(dispatch_object_t dou) { - (void)_os_object_retain_internal_inline(dou._os_obj); + (void)_os_object_retain_internal_n_inline(dou._os_obj, 1); +} + +DISPATCH_ALWAYS_INLINE_NDEBUG +static inline void +_dispatch_retain_2(dispatch_object_t dou) +{ + (void)_os_object_retain_internal_n_inline(dou._os_obj, 2); +} + +DISPATCH_ALWAYS_INLINE_NDEBUG +static inline void +_dispatch_retain_n(dispatch_object_t dou, int n) +{ + (void)_os_object_retain_internal_n_inline(dou._os_obj, n); } DISPATCH_ALWAYS_INLINE_NDEBUG static inline void _dispatch_release(dispatch_object_t dou) { - _os_object_release_internal_inline(dou._os_obj); + _os_object_release_internal_n_inline(dou._os_obj, 1); } DISPATCH_ALWAYS_INLINE_NDEBUG static inline void -_dispatch_release_tailcall(dispatch_object_t dou) +_dispatch_release_2(dispatch_object_t dou) { - _os_object_release_internal(dou._os_obj); + _os_object_release_internal_n_inline(dou._os_obj, 2); } -DISPATCH_ALWAYS_INLINE DISPATCH_NONNULL_ALL +DISPATCH_ALWAYS_INLINE_NDEBUG static inline void -_dispatch_object_set_target_queue_inline(dispatch_object_t dou, - dispatch_queue_t tq) +_dispatch_release_n(dispatch_object_t dou, int n) { - _dispatch_retain(tq); - tq = os_atomic_xchg2o(dou._do, do_targetq, tq, release); - if (tq) _dispatch_release(tq); - _dispatch_object_debug(dou._do, "%s", __func__); + _os_object_release_internal_n_inline(dou._os_obj, n); } -#endif // DISPATCH_PURE_C -#pragma mark - -#pragma mark dispatch_thread -#if DISPATCH_PURE_C +DISPATCH_ALWAYS_INLINE_NDEBUG +static inline void +_dispatch_release_no_dispose(dispatch_object_t dou) +{ + _os_object_release_internal_n_no_dispose_inline(dou._os_obj, 1); +} -#define DISPATCH_DEFERRED_ITEMS_MAGIC 0xdefe55edul /* deferred */ -#define DISPATCH_DEFERRED_ITEMS_EVENT_COUNT 8 -#ifdef WORKQ_KEVENT_EVENT_BUFFER_LEN -_Static_assert(WORKQ_KEVENT_EVENT_BUFFER_LEN >= - DISPATCH_DEFERRED_ITEMS_EVENT_COUNT, - "our list should not be longer than the kernel's"); -#endif +DISPATCH_ALWAYS_INLINE_NDEBUG +static inline void +_dispatch_release_2_no_dispose(dispatch_object_t dou) +{ + _os_object_release_internal_n_no_dispose_inline(dou._os_obj, 2); +} + +DISPATCH_ALWAYS_INLINE_NDEBUG +static inline void +_dispatch_release_tailcall(dispatch_object_t dou) +{ + _os_object_release_internal(dou._os_obj); +} -typedef struct dispatch_deferred_items_s { - uint32_t ddi_magic; - dispatch_queue_t ddi_stashed_dq; - struct dispatch_object_s *ddi_stashed_dou; - dispatch_priority_t ddi_stashed_pp; - int ddi_nevents; - int ddi_maxevents; - _dispatch_kevent_qos_s ddi_eventlist[DISPATCH_DEFERRED_ITEMS_EVENT_COUNT]; -} dispatch_deferred_items_s, *dispatch_deferred_items_t; +DISPATCH_ALWAYS_INLINE_NDEBUG +static inline void +_dispatch_release_2_tailcall(dispatch_object_t dou) +{ + _os_object_release_internal_n(dou._os_obj, 2); +} DISPATCH_ALWAYS_INLINE static inline void -_dispatch_deferred_items_set(dispatch_deferred_items_t ddi) +_dispatch_queue_retain_storage(dispatch_queue_t dq) { - _dispatch_thread_setspecific(dispatch_deferred_items_key, (void *)ddi); + int ref_cnt = os_atomic_inc2o(dq, dq_sref_cnt, relaxed); + if (unlikely(ref_cnt <= 0)) { + _OS_OBJECT_CLIENT_CRASH("Resurrection of an object"); + } } DISPATCH_ALWAYS_INLINE -static inline dispatch_deferred_items_t -_dispatch_deferred_items_get(void) -{ - dispatch_deferred_items_t ddi = (dispatch_deferred_items_t) - _dispatch_thread_getspecific(dispatch_deferred_items_key); - if (ddi && ddi->ddi_magic == DISPATCH_DEFERRED_ITEMS_MAGIC) { - return ddi; +static inline void +_dispatch_queue_release_storage(dispatch_queue_t dq) +{ + // this refcount only delays the _dispatch_object_dealloc() and there's no + // need for visibility wrt to the allocation, the internal refcount already + // gives us that, and the object becomes immutable after the last internal + // refcount release. + int ref_cnt = os_atomic_dec2o(dq, dq_sref_cnt, relaxed); + if (unlikely(ref_cnt >= 0)) { + return; } - return NULL; + if (unlikely(ref_cnt < -1)) { + _OS_OBJECT_CLIENT_CRASH("Over-release of an object"); + } + dq->dq_state = 0xdead000000000000; + _dispatch_object_dealloc(dq); +} + +DISPATCH_ALWAYS_INLINE DISPATCH_NONNULL_ALL +static inline void +_dispatch_object_set_target_queue_inline(dispatch_object_t dou, + dispatch_queue_t tq) +{ + _dispatch_retain(tq); + tq = os_atomic_xchg2o(dou._do, do_targetq, tq, release); + if (tq) _dispatch_release(tq); + _dispatch_object_debug(dou._do, "%s", __func__); } #endif // DISPATCH_PURE_C @@ -345,12 +384,12 @@ _dispatch_thread_frame_iterate_next(dispatch_thread_frame_iterator_t it) dispatch_queue_t dq = it->dtfi_queue; if (dtf) { - if (dq->do_targetq) { - // redirections and trysync_f may skip some frames, - // so we need to simulate seeing the missing links - // however the bottom root queue is always present - it->dtfi_queue = dq->do_targetq; - if (it->dtfi_queue == dtf->dtf_queue) { + dispatch_queue_t tq = dq->do_targetq; + if (tq) { + // redirections, dispatch_sync and dispatch_trysync_f may skip + // frames, so we need to simulate seeing the missing links + it->dtfi_queue = tq; + if (dq == dtf->dtf_queue) { it->dtfi_frame = dtf->dtf_prev; } } else { @@ -385,13 +424,6 @@ _dispatch_thread_frame_get_current(void) return _dispatch_thread_getspecific(dispatch_frame_key); } -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_thread_frame_set_current(dispatch_thread_frame_t dtf) -{ - _dispatch_thread_setspecific(dispatch_frame_key, dtf); -} - DISPATCH_ALWAYS_INLINE static inline void _dispatch_thread_frame_save_state(dispatch_thread_frame_t dtf) @@ -407,7 +439,6 @@ _dispatch_thread_frame_push(dispatch_thread_frame_t dtf, dispatch_queue_t dq) _dispatch_thread_frame_save_state(dtf); _dispatch_thread_setspecific_pair(dispatch_queue_key, dq, dispatch_frame_key, dtf); - dtf->dtf_deferred = NULL; } DISPATCH_ALWAYS_INLINE @@ -418,7 +449,6 @@ _dispatch_thread_frame_push_and_rebase(dispatch_thread_frame_t dtf, _dispatch_thread_frame_save_state(dtf); _dispatch_thread_setspecific_pair(dispatch_queue_key, dq, dispatch_frame_key, new_base); - dtf->dtf_deferred = NULL; } DISPATCH_ALWAYS_INLINE @@ -450,28 +480,28 @@ _dispatch_thread_frame_unstash(dispatch_thread_frame_t dtf) DISPATCH_ALWAYS_INLINE static inline int _dispatch_wqthread_override_start_check_owner(mach_port_t thread, - pthread_priority_t pp, mach_port_t *ulock_addr) + dispatch_qos_t qos, mach_port_t *ulock_addr) { #if HAVE_PTHREAD_WORKQUEUE_QOS if (!_dispatch_set_qos_class_enabled) return 0; return _pthread_workqueue_override_start_direct_check_owner(thread, - pp, ulock_addr); + _dispatch_qos_to_pp(qos), ulock_addr); #else - (void)thread; (void)pp; (void)ulock_addr; + (void)thread; (void)qos; (void)ulock_addr; return 0; #endif } DISPATCH_ALWAYS_INLINE static inline void -_dispatch_wqthread_override_start(mach_port_t thread, - pthread_priority_t pp) +_dispatch_wqthread_override_start(mach_port_t thread, dispatch_qos_t qos) { #if HAVE_PTHREAD_WORKQUEUE_QOS if (!_dispatch_set_qos_class_enabled) return; - (void)_pthread_workqueue_override_start_direct(thread, pp); + (void)_pthread_workqueue_override_start_direct(thread, + _dispatch_qos_to_pp(qos)); #else - (void)thread; (void)pp; + (void)thread; (void)qos; #endif } @@ -510,43 +540,6 @@ _dispatch_thread_override_end(mach_port_t thread, void *resource) #endif } -#if DISPATCH_DEBUG_QOS && HAVE_PTHREAD_WORKQUEUE_QOS -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_qos_class_is_valid(pthread_priority_t pp) -{ - pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; - if (pp > (1UL << (DISPATCH_QUEUE_QOS_COUNT + - _PTHREAD_PRIORITY_QOS_CLASS_SHIFT))) { - return false; - } - return true; -} -#define _dispatch_assert_is_valid_qos_class(pp) ({ typeof(pp) _pp = (pp); \ - if (unlikely(!_dispatch_qos_class_is_valid(_pp))) { \ - DISPATCH_INTERNAL_CRASH(_pp, "Invalid qos class"); \ - } \ - }) - -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_qos_override_is_valid(pthread_priority_t pp) -{ - if (pp & (pthread_priority_t)_PTHREAD_PRIORITY_FLAGS_MASK) { - return false; - } - return _dispatch_qos_class_is_valid(pp); -} -#define _dispatch_assert_is_valid_qos_override(pp) ({ typeof(pp) _pp = (pp); \ - if (unlikely(!_dispatch_qos_override_is_valid(_pp))) { \ - DISPATCH_INTERNAL_CRASH(_pp, "Invalid override"); \ - } \ - }) -#else -#define _dispatch_assert_is_valid_qos_override(pp) (void)(pp) -#define _dispatch_assert_is_valid_qos_class(pp) (void)(pp) -#endif - #endif // DISPATCH_PURE_C #pragma mark - #pragma mark dispatch_queue_t state accessors @@ -658,12 +651,116 @@ _dispatch_queue_merge_autorelease_frequency(dispatch_queue_t dq, DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_queue_has_immutable_target(dispatch_queue_t dq) +_dispatch_queue_is_legacy(dispatch_queue_t dq) { - if (dx_metatype(dq) != _DISPATCH_QUEUE_TYPE) { - return false; + return _dispatch_queue_atomic_flags(dq) & DQF_LEGACY; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_wlh_retain(dispatch_wlh_t wlh) +{ + if (wlh && wlh != DISPATCH_WLH_ANON) { + _dispatch_queue_retain_storage((dispatch_queue_t)wlh); + } +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_wlh_release(dispatch_wlh_t wlh) +{ + if (wlh && wlh != DISPATCH_WLH_ANON) { + _dispatch_queue_release_storage((dispatch_queue_t)wlh); + } +} + +#define DISPATCH_WLH_STORAGE_REF 1ul + +DISPATCH_ALWAYS_INLINE DISPATCH_PURE +static inline dispatch_wlh_t +_dispatch_get_wlh(void) +{ + return _dispatch_thread_getspecific(dispatch_wlh_key); +} + +DISPATCH_ALWAYS_INLINE DISPATCH_PURE +static inline dispatch_wlh_t +_dispatch_get_wlh_reference(void) +{ + dispatch_wlh_t wlh = _dispatch_thread_getspecific(dispatch_wlh_key); + if (wlh != DISPATCH_WLH_ANON) { + wlh = (dispatch_wlh_t)((uintptr_t)wlh & ~DISPATCH_WLH_STORAGE_REF); + } + return wlh; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_adopt_wlh_anon_recurse(void) +{ + dispatch_wlh_t cur_wlh = _dispatch_get_wlh_reference(); + if (cur_wlh == DISPATCH_WLH_ANON) return false; + _dispatch_debug("wlh[anon]: set current (releasing %p)", cur_wlh); + _dispatch_wlh_release(cur_wlh); + _dispatch_thread_setspecific(dispatch_wlh_key, (void *)DISPATCH_WLH_ANON); + return true; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_adopt_wlh_anon(void) +{ + if (unlikely(!_dispatch_adopt_wlh_anon_recurse())) { + DISPATCH_INTERNAL_CRASH(0, "Lingering DISPATCH_WLH_ANON"); + } +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_adopt_wlh(dispatch_wlh_t wlh) +{ + dispatch_wlh_t cur_wlh = _dispatch_get_wlh_reference(); + _dispatch_debug("wlh[%p]: adopt current (releasing %p)", wlh, cur_wlh); + if (cur_wlh == DISPATCH_WLH_ANON) { + DISPATCH_INTERNAL_CRASH(0, "Lingering DISPATCH_WLH_ANON"); + } + if (cur_wlh != wlh) { + dispatch_assert(wlh); + _dispatch_wlh_release(cur_wlh); + _dispatch_wlh_retain(wlh); } - return dx_type(dq) != DISPATCH_QUEUE_LEGACY_TYPE; + _dispatch_thread_setspecific(dispatch_wlh_key, (void *)wlh); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_preserve_wlh_storage_reference(dispatch_wlh_t wlh) +{ + dispatch_assert(wlh != DISPATCH_WLH_ANON); + dispatch_assert(wlh == _dispatch_get_wlh()); + _dispatch_thread_setspecific(dispatch_wlh_key, + (void *)((uintptr_t)wlh | DISPATCH_WLH_STORAGE_REF)); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_reset_wlh(void) +{ + dispatch_assert(_dispatch_get_wlh() == DISPATCH_WLH_ANON); + _dispatch_debug("wlh[anon]: clear current"); + _dispatch_thread_setspecific(dispatch_wlh_key, NULL); + _dispatch_clear_return_to_kernel(); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_wlh_should_poll_unote(dispatch_unote_t du) +{ + if (likely(_dispatch_needs_to_return_to_kernel())) { + dispatch_wlh_t wlh = _dispatch_get_wlh(); + return wlh != DISPATCH_WLH_ANON && du._du->du_wlh == wlh; + } + return false; } #endif // DISPATCH_PURE_C @@ -684,30 +781,30 @@ _dq_state_has_side_suspend_cnt(uint64_t dq_state) } DISPATCH_ALWAYS_INLINE -static inline uint32_t +static inline int32_t _dq_state_extract_width_bits(uint64_t dq_state) { dq_state &= DISPATCH_QUEUE_WIDTH_MASK; - return (uint32_t)(dq_state >> DISPATCH_QUEUE_WIDTH_SHIFT); + return (int32_t)(dq_state >> DISPATCH_QUEUE_WIDTH_SHIFT); } DISPATCH_ALWAYS_INLINE -static inline uint32_t +static inline int32_t _dq_state_available_width(uint64_t dq_state) { - uint32_t full = DISPATCH_QUEUE_WIDTH_FULL; - if (fastpath(!(dq_state & DISPATCH_QUEUE_WIDTH_FULL_BIT))) { + int32_t full = DISPATCH_QUEUE_WIDTH_FULL; + if (likely(!(dq_state & DISPATCH_QUEUE_WIDTH_FULL_BIT))) { return full - _dq_state_extract_width_bits(dq_state); } return 0; } DISPATCH_ALWAYS_INLINE -static inline uint32_t +static inline int32_t _dq_state_used_width(uint64_t dq_state, uint16_t dq_width) { - uint32_t full = DISPATCH_QUEUE_WIDTH_FULL; - uint32_t width = _dq_state_extract_width_bits(dq_state); + int32_t full = DISPATCH_QUEUE_WIDTH_FULL; + int32_t width = _dq_state_extract_width_bits(dq_state); if (dq_state & DISPATCH_QUEUE_PENDING_BARRIER) { // DISPATCH_QUEUE_PENDING_BARRIER means (dq_width - 1) of the used width @@ -723,7 +820,8 @@ _dq_state_is_suspended(uint64_t dq_state) { return dq_state >= DISPATCH_QUEUE_NEEDS_ACTIVATION; } -#define DISPATCH_QUEUE_IS_SUSPENDED(x) _dq_state_is_suspended((x)->dq_state) +#define DISPATCH_QUEUE_IS_SUSPENDED(x) \ + _dq_state_is_suspended(os_atomic_load2o(x, dq_state, relaxed)) DISPATCH_ALWAYS_INLINE static inline bool @@ -767,58 +865,129 @@ _dq_state_is_dirty(uint64_t dq_state) return dq_state & DISPATCH_QUEUE_DIRTY; } +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_is_base_wlh(uint64_t dq_state) +{ + return dq_state & DISPATCH_QUEUE_ROLE_BASE_WLH; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_is_base_anon(uint64_t dq_state) +{ + return dq_state & DISPATCH_QUEUE_ROLE_BASE_ANON; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_is_inner_queue(uint64_t dq_state) +{ + return (dq_state & DISPATCH_QUEUE_ROLE_MASK) == DISPATCH_QUEUE_ROLE_INNER; +} + DISPATCH_ALWAYS_INLINE static inline bool _dq_state_is_enqueued(uint64_t dq_state) +{ + return dq_state & (DISPATCH_QUEUE_ENQUEUED|DISPATCH_QUEUE_ENQUEUED_ON_MGR); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_is_enqueued_on_target(uint64_t dq_state) { return dq_state & DISPATCH_QUEUE_ENQUEUED; } DISPATCH_ALWAYS_INLINE static inline bool -_dq_state_has_override(uint64_t dq_state) +_dq_state_is_enqueued_on_manager(uint64_t dq_state) { - return dq_state & DISPATCH_QUEUE_HAS_OVERRIDE; + return dq_state & DISPATCH_QUEUE_ENQUEUED_ON_MGR; } DISPATCH_ALWAYS_INLINE -static inline dispatch_lock_owner -_dq_state_drain_owner(uint64_t dq_state) +static inline bool +_dq_state_in_sync_transfer(uint64_t dq_state) { - return _dispatch_lock_owner((dispatch_lock)dq_state); + return dq_state & DISPATCH_QUEUE_SYNC_TRANSFER; } -#define DISPATCH_QUEUE_DRAIN_OWNER(dq) \ - _dq_state_drain_owner(os_atomic_load2o(dq, dq_state, relaxed)) DISPATCH_ALWAYS_INLINE static inline bool -_dq_state_drain_pended(uint64_t dq_state) +_dq_state_received_override(uint64_t dq_state) { - return (dq_state & DISPATCH_QUEUE_DRAIN_PENDED); + return _dq_state_is_base_anon(dq_state) && + (dq_state & DISPATCH_QUEUE_RECEIVED_OVERRIDE); } DISPATCH_ALWAYS_INLINE static inline bool -_dq_state_drain_locked_by(uint64_t dq_state, uint32_t owner) +_dq_state_received_sync_wait(uint64_t dq_state) { - if (_dq_state_drain_pended(dq_state)) { - return false; + return _dq_state_is_base_wlh(dq_state) && + (dq_state & DISPATCH_QUEUE_RECEIVED_SYNC_WAIT); +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_qos_t +_dq_state_max_qos(uint64_t dq_state) +{ + dq_state &= DISPATCH_QUEUE_MAX_QOS_MASK; + return (dispatch_qos_t)(dq_state >> DISPATCH_QUEUE_MAX_QOS_SHIFT); +} + +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_dq_state_from_qos(dispatch_qos_t qos) +{ + return (uint64_t)(qos) << DISPATCH_QUEUE_MAX_QOS_SHIFT; +} + +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_dq_state_merge_qos(uint64_t dq_state, dispatch_qos_t qos) +{ + uint64_t qos_bits = _dq_state_from_qos(qos); + if ((dq_state & DISPATCH_QUEUE_MAX_QOS_MASK) < qos_bits) { + dq_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; + dq_state |= qos_bits; + if (unlikely(_dq_state_is_base_anon(dq_state))) { + dq_state |= DISPATCH_QUEUE_RECEIVED_OVERRIDE; + } } - return _dq_state_drain_owner(dq_state) == owner; + return dq_state; } +DISPATCH_ALWAYS_INLINE +static inline dispatch_tid +_dq_state_drain_owner(uint64_t dq_state) +{ + return _dispatch_lock_owner((dispatch_lock)dq_state); +} +#define DISPATCH_QUEUE_DRAIN_OWNER(dq) \ + _dq_state_drain_owner(os_atomic_load2o(dq, dq_state, relaxed)) + DISPATCH_ALWAYS_INLINE static inline bool -_dq_state_drain_locked(uint64_t dq_state) +_dq_state_drain_locked_by(uint64_t dq_state, dispatch_tid tid) { - return (dq_state & DISPATCH_QUEUE_DRAIN_OWNER_MASK) != 0; + return _dispatch_lock_is_locked_by((dispatch_lock)dq_state, tid); } DISPATCH_ALWAYS_INLINE static inline bool -_dq_state_has_waiters(uint64_t dq_state) +_dq_state_drain_locked_by_self(uint64_t dq_state) { - return _dispatch_lock_has_waiters((dispatch_lock)dq_state); + return _dispatch_lock_is_locked_by_self((dispatch_lock)dq_state); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_drain_locked(uint64_t dq_state) +{ + return _dispatch_lock_is_locked((dispatch_lock)dq_state); } DISPATCH_ALWAYS_INLINE @@ -837,64 +1006,58 @@ _dq_state_is_runnable(uint64_t dq_state) DISPATCH_ALWAYS_INLINE static inline bool -_dq_state_should_wakeup(uint64_t dq_state) +_dq_state_should_override(uint64_t dq_state) { - return _dq_state_is_runnable(dq_state) && - !_dq_state_is_enqueued(dq_state) && - !_dq_state_drain_locked(dq_state); + if (_dq_state_is_suspended(dq_state) || + _dq_state_is_enqueued_on_manager(dq_state)) { + return false; + } + if (_dq_state_is_enqueued_on_target(dq_state)) { + return true; + } + if (_dq_state_is_base_wlh(dq_state)) { + return false; + } + return _dq_state_drain_locked(dq_state); } + #endif // __cplusplus #pragma mark - #pragma mark dispatch_queue_t state machine -#ifndef __cplusplus -static inline bool _dispatch_queue_need_override(dispatch_queue_class_t dqu, - pthread_priority_t pp); -static inline bool _dispatch_queue_need_override_retain( - dispatch_queue_class_t dqu, pthread_priority_t pp); -static inline dispatch_priority_t _dispatch_queue_reset_override_priority( - dispatch_queue_class_t dqu, bool qp_is_floor); -static inline bool _dispatch_queue_reinstate_override_priority(dispatch_queue_class_t dqu, - dispatch_priority_t new_op); -static inline pthread_priority_t _dispatch_get_defaultpriority(void); -static inline void _dispatch_set_defaultpriority_override(void); -static inline void _dispatch_reset_defaultpriority(pthread_priority_t pp); static inline pthread_priority_t _dispatch_get_priority(void); -static inline pthread_priority_t _dispatch_set_defaultpriority( - pthread_priority_t pp, pthread_priority_t *new_pp); - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_queue_xref_dispose(struct dispatch_queue_s *dq) -{ - if (slowpath(DISPATCH_QUEUE_IS_SUSPENDED(dq))) { - // Arguments for and against this assert are within 6705399 - DISPATCH_CLIENT_CRASH(dq, "Release of a suspended object"); - } - os_atomic_or2o(dq, dq_atomic_flags, DQF_RELEASED, relaxed); -} +static inline dispatch_priority_t _dispatch_get_basepri(void); +static inline dispatch_qos_t _dispatch_get_basepri_override_qos_floor(void); +static inline void _dispatch_set_basepri_override_qos(dispatch_qos_t qos); +static inline void _dispatch_reset_basepri(dispatch_priority_t dbp); +static inline dispatch_priority_t _dispatch_set_basepri(dispatch_priority_t dbp); +static inline bool _dispatch_queue_need_override_retain( + dispatch_queue_class_t dqu, dispatch_qos_t qos); -#endif #if DISPATCH_PURE_C // Note to later developers: ensure that any initialization changes are // made for statically allocated queues (i.e. _dispatch_main_q). static inline void _dispatch_queue_init(dispatch_queue_t dq, dispatch_queue_flags_t dqf, - uint16_t width, bool inactive) + uint16_t width, uint64_t initial_state_bits) { uint64_t dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(width); - if (inactive) { - dq_state += DISPATCH_QUEUE_INACTIVE + DISPATCH_QUEUE_NEEDS_ACTIVATION; - dq->do_ref_cnt++; // rdar://8181908 see _dispatch_queue_resume + dispatch_assert((initial_state_bits & ~(DISPATCH_QUEUE_ROLE_MASK | + DISPATCH_QUEUE_INACTIVE)) == 0); + + if (initial_state_bits & DISPATCH_QUEUE_INACTIVE) { + dq_state |= DISPATCH_QUEUE_INACTIVE + DISPATCH_QUEUE_NEEDS_ACTIVATION; + dq->do_ref_cnt += 2; // rdar://8181908 see _dispatch_queue_resume } + + dq_state |= (initial_state_bits & DISPATCH_QUEUE_ROLE_MASK); dq->do_next = (struct dispatch_queue_s *)DISPATCH_OBJECT_LISTLESS; - dqf |= (dispatch_queue_flags_t)width << DQF_WIDTH_SHIFT; + dqf |= DQF_WIDTH(width); os_atomic_store2o(dq, dq_atomic_flags, dqf, relaxed); dq->dq_state = dq_state; - dq->dq_override_voucher = DISPATCH_NO_VOUCHER; dq->dq_serialnum = os_atomic_inc_orig(&_dispatch_queue_serial_numbers, relaxed); } @@ -909,16 +1072,16 @@ DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT static inline bool _dispatch_queue_try_inactive_suspend(dispatch_queue_t dq) { - uint64_t dq_state, value; + uint64_t old_state, new_state; - (void)os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, { - if (!fastpath(_dq_state_is_inactive(dq_state))) { + (void)os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + if (unlikely(!_dq_state_is_inactive(old_state))) { os_atomic_rmw_loop_give_up(return false); } - value = dq_state + DISPATCH_QUEUE_SUSPEND_INTERVAL; + new_state = old_state + DISPATCH_QUEUE_SUSPEND_INTERVAL; }); - if (slowpath(!_dq_state_is_suspended(dq_state)) || - slowpath(_dq_state_has_side_suspend_cnt(dq_state))) { + if (unlikely(!_dq_state_is_suspended(old_state) || + _dq_state_has_side_suspend_cnt(old_state))) { // Crashing here means that 128+ dispatch_suspend() calls have been // made on an inactive object and then dispatch_set_target_queue() or // dispatch_set_*_handler() has been called. @@ -932,95 +1095,157 @@ _dispatch_queue_try_inactive_suspend(dispatch_queue_t dq) return true; } -/* Must be used by any caller meaning to do a speculative wakeup when the caller - * was preventing other wakeups (for example dispatch_resume() or a drainer not - * doing a drain_try_unlock() and not observing DIRTY) - * - * In that case this call loads DIRTY with an acquire barrier so that when - * other threads have made changes (such as dispatch_source_cancel()) the - * caller can take these state machine changes into account in its decision to - * wake up the object. - */ DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_queue_try_wakeup(dispatch_queue_t dq, uint64_t dq_state, - dispatch_wakeup_flags_t flags) +static inline bool +_dq_state_needs_lock_override(uint64_t dq_state, dispatch_qos_t qos) { - if (_dq_state_should_wakeup(dq_state)) { - if (slowpath(_dq_state_is_dirty(dq_state))) { - // - // seq_cst wrt state changes that were flushed and not acted upon - os_atomic_thread_fence(acquire); - } - return dx_wakeup(dq, 0, flags); - } - if (flags & DISPATCH_WAKEUP_CONSUME) { - return _dispatch_release_tailcall(dq); - } + return _dq_state_is_base_anon(dq_state) && + qos < _dq_state_max_qos(dq_state); +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_qos_t +_dispatch_queue_override_self(uint64_t dq_state) +{ + dispatch_qos_t qos = _dq_state_max_qos(dq_state); + _dispatch_wqthread_override_start(_dispatch_tid_self(), qos); + // ensure that the root queue sees + // that this thread was overridden. + _dispatch_set_basepri_override_qos(qos); + return qos; } -/* Used by: - * - _dispatch_queue_class_invoke (normal path) - * - _dispatch_queue_override_invoke (stealer) - * - * Initial state must be { sc:0, ib:0, qf:0, dl:0 } - * Final state forces { dl:self, qf:1, d: 0 } - * ib:1 is forced when the width acquired is equivalent to the barrier width - */ DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT static inline uint64_t _dispatch_queue_drain_try_lock(dispatch_queue_t dq, - dispatch_invoke_flags_t flags, uint64_t *dq_state) + dispatch_invoke_flags_t flags) { uint64_t pending_barrier_width = (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL; - uint64_t xor_owner_and_set_full_width = - _dispatch_tid_self() | DISPATCH_QUEUE_WIDTH_FULL_BIT; - uint64_t clear_enqueued_bit, old_state, new_state; + uint64_t set_owner_and_set_full_width = + _dispatch_lock_value_for_self() | DISPATCH_QUEUE_WIDTH_FULL_BIT; + uint64_t lock_fail_mask, old_state, new_state, dequeue_mask; + + // same as !_dq_state_is_runnable() + lock_fail_mask = ~(DISPATCH_QUEUE_WIDTH_FULL_BIT - 1); + // same as _dq_state_drain_locked() + lock_fail_mask |= DISPATCH_QUEUE_DRAIN_OWNER_MASK; if (flags & DISPATCH_INVOKE_STEALING) { - clear_enqueued_bit = 0; + lock_fail_mask |= DISPATCH_QUEUE_ENQUEUED_ON_MGR; + dequeue_mask = 0; + } else if (flags & DISPATCH_INVOKE_MANAGER_DRAIN) { + dequeue_mask = DISPATCH_QUEUE_ENQUEUED_ON_MGR; } else { - clear_enqueued_bit = DISPATCH_QUEUE_ENQUEUED; + lock_fail_mask |= DISPATCH_QUEUE_ENQUEUED_ON_MGR; + dequeue_mask = DISPATCH_QUEUE_ENQUEUED; } + dispatch_assert(!(flags & DISPATCH_INVOKE_WLH)); + + dispatch_qos_t oq_floor = _dispatch_get_basepri_override_qos_floor(); +retry: + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, { + new_state = old_state; + if (likely(!(old_state & lock_fail_mask))) { + if (unlikely(_dq_state_needs_lock_override(old_state, oq_floor))) { + os_atomic_rmw_loop_give_up({ + oq_floor = _dispatch_queue_override_self(old_state); + goto retry; + }); + } + // + // Only keep the HAS_WAITER, MAX_QOS and ENQUEUED bits + // In particular acquiring the drain lock clears the DIRTY and + // RECEIVED_OVERRIDE bits. + // + new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK; + new_state |= set_owner_and_set_full_width; + if (_dq_state_has_pending_barrier(old_state) || + old_state + pending_barrier_width < + DISPATCH_QUEUE_WIDTH_FULL_BIT) { + new_state |= DISPATCH_QUEUE_IN_BARRIER; + } + } else if (dequeue_mask) { + // dequeue_mask is in a register, xor yields better assembly + new_state ^= dequeue_mask; + } else { + os_atomic_rmw_loop_give_up(break); + } + }); + + dispatch_assert((old_state & dequeue_mask) == dequeue_mask); + if (likely(!(old_state & lock_fail_mask))) { + new_state &= DISPATCH_QUEUE_IN_BARRIER | DISPATCH_QUEUE_WIDTH_FULL_BIT | + dequeue_mask; + old_state &= DISPATCH_QUEUE_WIDTH_MASK; + return new_state - old_state; + } + return 0; +} + +DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT +static inline bool +_dispatch_queue_drain_try_lock_wlh(dispatch_queue_t dq, uint64_t *dq_state) +{ + uint64_t old_state, new_state; + uint64_t lock_bits = _dispatch_lock_value_for_self() | + DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER; + + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, { + new_state = old_state; + if (unlikely(_dq_state_is_suspended(old_state))) { + new_state &= ~DISPATCH_QUEUE_ENQUEUED; + } else if (unlikely(_dq_state_drain_locked(old_state))) { + os_atomic_rmw_loop_give_up(break); + } else { + new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK; + new_state |= lock_bits; + } + }); + if (unlikely(!_dq_state_is_base_wlh(old_state) || + !_dq_state_is_enqueued_on_target(old_state) || + _dq_state_is_enqueued_on_manager(old_state))) { +#if !__LP64__ + old_state >>= 32; +#endif + DISPATCH_INTERNAL_CRASH(old_state, "Invalid wlh state"); + } + + if (dq_state) *dq_state = new_state; + return !_dq_state_is_suspended(old_state) && + !_dq_state_drain_locked(old_state); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_mgr_lock(dispatch_queue_t dq) +{ + uint64_t old_state, new_state, set_owner_and_set_full_width = + _dispatch_lock_value_for_self() | DISPATCH_QUEUE_SERIAL_DRAIN_OWNED; os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, { new_state = old_state; - new_state ^= clear_enqueued_bit; - if (likely(_dq_state_is_runnable(old_state) && - !_dq_state_drain_locked(old_state))) { - // - // Only keep the HAS_WAITER bit (and ENQUEUED if stealing). - // In particular acquiring the drain lock clears the DIRTY bit - // - new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK; - // - // For the NOWAITERS_BIT case, the thread identity - // has NOWAITERS_BIT set, and NOWAITERS_BIT was kept above, - // so the xor below flips the NOWAITERS_BIT to 0 as expected. - // - // For the non inverted WAITERS_BIT case, WAITERS_BIT is not set in - // the thread identity, and the xor leaves the bit alone. - // - new_state ^= xor_owner_and_set_full_width; - if (_dq_state_has_pending_barrier(old_state) || - old_state + pending_barrier_width < - DISPATCH_QUEUE_WIDTH_FULL_BIT) { - new_state |= DISPATCH_QUEUE_IN_BARRIER; - } - } else if (!clear_enqueued_bit) { - os_atomic_rmw_loop_give_up(break); + if (unlikely(!_dq_state_is_runnable(old_state) || + _dq_state_drain_locked(old_state))) { + DISPATCH_INTERNAL_CRASH((uintptr_t)old_state, + "Locking the manager should not fail"); } + new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK; + new_state |= set_owner_and_set_full_width; }); +} - if (dq_state) *dq_state = new_state; - if (likely(_dq_state_is_runnable(old_state) && - !_dq_state_drain_locked(old_state))) { - new_state &= DISPATCH_QUEUE_IN_BARRIER | DISPATCH_QUEUE_WIDTH_FULL_BIT; - old_state &= DISPATCH_QUEUE_WIDTH_MASK; - return new_state - old_state; - } - return 0; +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_queue_mgr_unlock(dispatch_queue_t dq) +{ + uint64_t old_state, new_state; + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + new_state = old_state - DISPATCH_QUEUE_SERIAL_DRAIN_OWNED; + new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; + new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; + }); + return _dq_state_is_dirty(old_state); } /* Used by _dispatch_barrier_{try,}sync @@ -1036,13 +1261,29 @@ _dispatch_queue_drain_try_lock(dispatch_queue_t dq, */ DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT static inline bool -_dispatch_queue_try_acquire_barrier_sync(dispatch_queue_t dq) -{ - uint64_t value = DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER; - value |= _dispatch_tid_self(); +_dispatch_queue_try_acquire_barrier_sync_and_suspend(dispatch_queue_t dq, + uint32_t tid, uint64_t suspend_count) +{ + uint64_t init = DISPATCH_QUEUE_STATE_INIT_VALUE(dq->dq_width); + uint64_t value = DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER | + _dispatch_lock_value_from_tid(tid) | + (suspend_count * DISPATCH_QUEUE_SUSPEND_INTERVAL); + uint64_t old_state, new_state; + + return os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, { + uint64_t role = old_state & DISPATCH_QUEUE_ROLE_MASK; + if (old_state != (init | role)) { + os_atomic_rmw_loop_give_up(break); + } + new_state = value | role; + }); +} - return os_atomic_cmpxchg2o(dq, dq_state, - DISPATCH_QUEUE_STATE_INIT_VALUE(dq->dq_width), value, acquire); +DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT +static inline bool +_dispatch_queue_try_acquire_barrier_sync(dispatch_queue_t dq, uint32_t tid) +{ + return _dispatch_queue_try_acquire_barrier_sync_and_suspend(dq, tid, 0); } /* Used by _dispatch_sync for root queues and some drain codepaths @@ -1070,15 +1311,23 @@ DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT static inline bool _dispatch_queue_try_reserve_sync_width(dispatch_queue_t dq) { - uint64_t dq_state, value; + uint64_t old_state, new_state; - return os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, { - if (!fastpath(_dq_state_is_sync_runnable(dq_state)) || - slowpath(_dq_state_is_dirty(dq_state)) || - slowpath(_dq_state_has_pending_barrier(dq_state))) { + // reserving non barrier width + // doesn't fail if only the ENQUEUED bit is set (unlike its barrier width + // equivalent), so we have to check that this thread hasn't enqueued + // anything ahead of this call or we can break ordering + if (unlikely(dq->dq_items_tail)) { + return false; + } + + return os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + if (unlikely(!_dq_state_is_sync_runnable(old_state)) || + _dq_state_is_dirty(old_state) || + _dq_state_has_pending_barrier(old_state)) { os_atomic_rmw_loop_give_up(return false); } - value = dq_state + DISPATCH_QUEUE_WIDTH_INTERVAL; + new_state = old_state + DISPATCH_QUEUE_WIDTH_INTERVAL; }); } @@ -1088,21 +1337,21 @@ _dispatch_queue_try_reserve_sync_width(dispatch_queue_t dq) * possibly 0 */ DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT -static inline uint32_t -_dispatch_queue_try_reserve_apply_width(dispatch_queue_t dq, uint32_t da_width) +static inline int32_t +_dispatch_queue_try_reserve_apply_width(dispatch_queue_t dq, int32_t da_width) { - uint64_t dq_state, value; - uint32_t width; + uint64_t old_state, new_state; + int32_t width; - (void)os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, { - width = _dq_state_available_width(dq_state); - if (!fastpath(width)) { + (void)os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + width = (int32_t)_dq_state_available_width(old_state); + if (unlikely(!width)) { os_atomic_rmw_loop_give_up(return 0); } if (width > da_width) { width = da_width; } - value = dq_state + width * DISPATCH_QUEUE_WIDTH_INTERVAL; + new_state = old_state + (uint64_t)width * DISPATCH_QUEUE_WIDTH_INTERVAL; }); return width; } @@ -1113,10 +1362,10 @@ _dispatch_queue_try_reserve_apply_width(dispatch_queue_t dq, uint32_t da_width) */ DISPATCH_ALWAYS_INLINE static inline void -_dispatch_queue_relinquish_width(dispatch_queue_t dq, uint32_t da_width) +_dispatch_queue_relinquish_width(dispatch_queue_t dq, int32_t da_width) { (void)os_atomic_sub2o(dq, dq_state, - da_width * DISPATCH_QUEUE_WIDTH_INTERVAL, relaxed); + (uint64_t)da_width * DISPATCH_QUEUE_WIDTH_INTERVAL, relaxed); } /* Used by target-queue recursing code @@ -1128,16 +1377,49 @@ DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT static inline bool _dispatch_queue_try_acquire_async(dispatch_queue_t dq) { - uint64_t dq_state, value; + uint64_t old_state, new_state; - return os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, acquire, { - if (!fastpath(_dq_state_is_runnable(dq_state)) || - slowpath(_dq_state_is_dirty(dq_state)) || - slowpath(_dq_state_has_pending_barrier(dq_state))) { + return os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, { + if (unlikely(!_dq_state_is_runnable(old_state) || + _dq_state_is_dirty(old_state) || + _dq_state_has_pending_barrier(old_state))) { os_atomic_rmw_loop_give_up(return false); } - value = dq_state + DISPATCH_QUEUE_WIDTH_INTERVAL; + new_state = old_state + DISPATCH_QUEUE_WIDTH_INTERVAL; + }); +} + +/* Used by concurrent drain + * + * Either acquires the full barrier width, in which case the Final state is: + * { ib:1 qf:1 pb:0 d:0 } + * Or if there isn't enough width prepare the queue with the PENDING_BARRIER bit + * { ib:0 pb:1 d:0} + * + * This always clears the dirty bit as we know for sure we shouldn't reevaluate + * the state machine here + */ +DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT +static inline bool +_dispatch_queue_try_upgrade_full_width(dispatch_queue_t dq, uint64_t owned) +{ + uint64_t old_state, new_state; + uint64_t pending_barrier_width = DISPATCH_QUEUE_PENDING_BARRIER + + (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL; + + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, { + new_state = old_state - owned; + if (likely(!_dq_state_has_pending_barrier(old_state))) { + new_state += pending_barrier_width; + } + if (likely(_dq_state_is_runnable(new_state))) { + new_state += DISPATCH_QUEUE_WIDTH_INTERVAL; + new_state += DISPATCH_QUEUE_IN_BARRIER; + new_state -= DISPATCH_QUEUE_PENDING_BARRIER; + } + new_state &= ~DISPATCH_QUEUE_DIRTY; }); + return new_state & DISPATCH_QUEUE_IN_BARRIER; } /* Used at the end of Drainers @@ -1152,7 +1434,7 @@ _dispatch_queue_adjust_owned(dispatch_queue_t dq, uint64_t owned, { uint64_t reservation; - if (slowpath(dq->dq_width > 1)) { + if (unlikely(dq->dq_width > 1)) { if (next_dc && _dispatch_object_is_barrier(next_dc)) { reservation = DISPATCH_QUEUE_PENDING_BARRIER; reservation += (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL; @@ -1168,112 +1450,42 @@ _dispatch_queue_adjust_owned(dispatch_queue_t dq, uint64_t owned, * In that case, only the DIRTY bit is cleared. The DIRTY bit is therefore used * as a signal to renew the drain lock instead of releasing it. * - * Successful unlock forces { dl:0, d:0, qo:0 } and gives back `owned` + * Successful unlock forces { dl:0, d:!done, qo:0 } and gives back `owned` */ DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT static inline bool -_dispatch_queue_drain_try_unlock(dispatch_queue_t dq, uint64_t owned) -{ - uint64_t old_state = os_atomic_load2o(dq, dq_state, relaxed); - uint64_t new_state; - dispatch_priority_t pp = 0, op; - - do { - if (unlikely(_dq_state_is_dirty(old_state) && - !_dq_state_is_suspended(old_state))) { - // just renew the drain lock with an acquire barrier, to see - // what the enqueuer that set DIRTY has done. - os_atomic_and2o(dq, dq_state, ~DISPATCH_QUEUE_DIRTY, acquire); - _dispatch_queue_reinstate_override_priority(dq, pp); - return false; - } - new_state = old_state - owned; - if ((new_state & DISPATCH_QUEUE_WIDTH_FULL_BIT) || - _dq_state_is_suspended(old_state)) { - // the test for the WIDTH_FULL_BIT is about narrow concurrent queues - // releasing the drain lock while being at the width limit - // - // _non_barrier_complete() will set the DIRTY bit when going back - // under the limit which will cause the try_unlock to fail - new_state = DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(new_state); +_dispatch_queue_drain_try_unlock(dispatch_queue_t dq, uint64_t owned, bool done) +{ + uint64_t old_state, new_state; + + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + new_state = old_state - owned; + new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; + if (unlikely(_dq_state_is_suspended(old_state))) { + // nothing to do + } else if (unlikely(_dq_state_is_dirty(old_state))) { + os_atomic_rmw_loop_give_up({ + // just renew the drain lock with an acquire barrier, to see + // what the enqueuer that set DIRTY has done. + // the xor generates better assembly as DISPATCH_QUEUE_DIRTY + // is already in a register + os_atomic_xor2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, acquire); + return false; + }); + } else if (likely(done)) { + new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; } else { - new_state &= ~DISPATCH_QUEUE_DIRTY; - new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; - // This current owner is the only one that can clear HAS_OVERRIDE, - // so accumulating reset overrides here is valid. - if (unlikely(_dq_state_has_override(new_state))) { - new_state &= ~DISPATCH_QUEUE_HAS_OVERRIDE; - dispatch_assert(!_dispatch_queue_is_thread_bound(dq)); - op = _dispatch_queue_reset_override_priority(dq, false); - if (op > pp) pp = op; - } + new_state |= DISPATCH_QUEUE_DIRTY; } - } while (!fastpath(os_atomic_cmpxchgvw2o(dq, dq_state, - old_state, new_state, &old_state, release))); - - if (_dq_state_has_override(old_state)) { - // Ensure that the root queue sees that this thread was overridden. - _dispatch_set_defaultpriority_override(); - } - return true; -} - -/* Used at the end of Drainers when the next work item is known - * and that the dirty-head check isn't needed. - * - * This releases `owned`, clears DIRTY, and handles HAS_OVERRIDE when seen. - */ -DISPATCH_ALWAYS_INLINE -static inline uint64_t -_dispatch_queue_drain_lock_transfer_or_unlock(dispatch_queue_t dq, - uint64_t owned, mach_port_t next_owner, uint64_t *orig_state) -{ - uint64_t dq_state, value; - -#ifdef DLOCK_NOWAITERS_BIT - // The NOWAITERS_BIT state must not change through the transfer. It means - // that if next_owner is 0 the bit must be flipped in the rmw_loop below, - // and if next_owner is set, then the bit must be left unchanged. - // - // - when next_owner is 0, the xor below sets NOWAITERS_BIT in next_owner, - // which causes the second xor to flip the bit as expected. - // - if next_owner is not 0, it has the NOWAITERS_BIT set, so we have to - // clear it so that the second xor leaves the NOWAITERS_BIT alone. - next_owner ^= DLOCK_NOWAITERS_BIT; -#endif - os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, release, { - value = dq_state - owned; - // same as DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT - // but we want to be more efficient wrt the WAITERS_BIT - value &= ~DISPATCH_QUEUE_DRAIN_OWNER_MASK; - value &= ~DISPATCH_QUEUE_DRAIN_PENDED; - value &= ~DISPATCH_QUEUE_DIRTY; - value ^= next_owner; }); - if (_dq_state_has_override(dq_state)) { + if (_dq_state_received_override(old_state)) { // Ensure that the root queue sees that this thread was overridden. - _dispatch_set_defaultpriority_override(); - } - if (orig_state) *orig_state = dq_state; - return value; -} -#define _dispatch_queue_drain_unlock(dq, owned, orig) \ - _dispatch_queue_drain_lock_transfer_or_unlock(dq, owned, 0, orig) - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_queue_drain_transfer_lock(dispatch_queue_t dq, - uint64_t to_unlock, dispatch_object_t dou) -{ - mach_port_t th_next = 0; - if (dou._dc->dc_flags & DISPATCH_OBJ_BARRIER_BIT) { - th_next = (mach_port_t)dou._dc->dc_data; + _dispatch_set_basepri_override_qos(_dq_state_max_qos(old_state)); } - _dispatch_queue_drain_lock_transfer_or_unlock(dq, to_unlock, th_next, NULL); + return true; } - #pragma mark - #pragma mark os_mpsc_queue @@ -1294,7 +1506,7 @@ _dispatch_queue_drain_transfer_lock(dispatch_queue_t dq, os_mpsc_node_type(q, _ns) _head = (head), _tail = (tail), _prev; \ _tail->_o_next = NULL; \ _prev = os_atomic_xchg2o((q), _ns##_tail, _tail, release); \ - if (fastpath(_prev)) { \ + if (likely(_prev)) { \ os_atomic_store2o(_prev, _o_next, _head, relaxed); \ } \ (_prev == NULL); \ @@ -1314,20 +1526,22 @@ _dispatch_queue_drain_transfer_lock(dispatch_queue_t dq, // Single Consumer calls, can NOT be used safely concurrently // -#define os_mpsc_get_head(q, _ns) ({ \ - os_mpsc_node_type(q, _ns) _head; \ - _dispatch_wait_until(_head = (q)->_ns##_head); \ - _head; \ - }) +#define os_mpsc_get_head(q, _ns) \ + _dispatch_wait_until(os_atomic_load2o(q, _ns##_head, dependency)) + +#define os_mpsc_get_next(_n, _o_next) \ + _dispatch_wait_until(os_atomic_load2o(_n, _o_next, dependency)) #define os_mpsc_pop_head(q, _ns, head, _o_next) ({ \ typeof(q) _q = (q); \ - os_mpsc_node_type(_q, _ns) _head = (head), _n = fastpath(_head->_o_next); \ + os_mpsc_node_type(_q, _ns) _head = (head), _n; \ + _n = os_atomic_load2o(_head, _o_next, dependency); \ os_atomic_store2o(_q, _ns##_head, _n, relaxed); \ /* 22708742: set tail to NULL with release, so that NULL write */ \ /* to head above doesn't clobber head from concurrent enqueuer */ \ - if (!_n && !os_atomic_cmpxchg2o(_q, _ns##_tail, _head, NULL, release)) { \ - _dispatch_wait_until(_n = fastpath(_head->_o_next)); \ + if (unlikely(!_n && \ + !os_atomic_cmpxchg2o(_q, _ns##_tail, _head, NULL, release))) { \ + _n = os_mpsc_get_next(_head, _o_next); \ os_atomic_store2o(_q, _ns##_head, _n, relaxed); \ } \ _n; \ @@ -1336,17 +1550,17 @@ _dispatch_queue_drain_transfer_lock(dispatch_queue_t dq, #define os_mpsc_undo_pop_head(q, _ns, head, next, _o_next) ({ \ typeof(q) _q = (q); \ os_mpsc_node_type(_q, _ns) _head = (head), _n = (next); \ - if (!_n && !os_atomic_cmpxchg2o(_q, _ns##_tail, NULL, _head, relaxed)) { \ - _dispatch_wait_until(_n = _q->_ns##_head); \ - _head->_o_next = _n; \ + if (unlikely(!_n && \ + !os_atomic_cmpxchg2o(_q, _ns##_tail, NULL, _head, relaxed))) { \ + _n = os_mpsc_get_head(q, _ns); \ + os_atomic_store2o(_head, _o_next, _n, relaxed); \ } \ os_atomic_store2o(_q, _ns##_head, _head, relaxed); \ }) #define os_mpsc_capture_snapshot(q, _ns, tail) ({ \ typeof(q) _q = (q); \ - os_mpsc_node_type(_q, _ns) _head; \ - _dispatch_wait_until(_head = _q->_ns##_head); \ + os_mpsc_node_type(_q, _ns) _head = os_mpsc_get_head(q, _ns); \ os_atomic_store2o(_q, _ns##_head, NULL, relaxed); \ /* 22708742: set tail to NULL with release, so that NULL write */ \ /* to head above doesn't clobber head from concurrent enqueuer */ \ @@ -1357,17 +1571,17 @@ _dispatch_queue_drain_transfer_lock(dispatch_queue_t dq, #define os_mpsc_pop_snapshot_head(head, tail, _o_next) ({ \ os_unqualified_pointer_type(head) _head = (head), _n = NULL; \ if (_head != (tail)) { \ - _dispatch_wait_until(_n = _head->_o_next); \ + _n = os_mpsc_get_next(_head, _o_next); \ }; \ _n; }) #define os_mpsc_prepend(q, _ns, head, tail, _o_next) ({ \ typeof(q) _q = (q); \ os_mpsc_node_type(_q, _ns) _head = (head), _tail = (tail), _n; \ - _tail->_o_next = NULL; \ - if (!os_atomic_cmpxchg2o(_q, _ns##_tail, NULL, _tail, release)) { \ - _dispatch_wait_until(_n = _q->_ns##_head); \ - _tail->_o_next = _n; \ + os_atomic_store2o(_tail, _o_next, NULL, relaxed); \ + if (unlikely(!os_atomic_cmpxchg2o(_q, _ns##_tail, NULL, _tail, release))) { \ + _n = os_mpsc_get_head(q, _ns); \ + os_atomic_store2o(_tail, _o_next, _n, relaxed); \ } \ os_atomic_store2o(_q, _ns##_head, _head, relaxed); \ }) @@ -1377,13 +1591,13 @@ _dispatch_queue_drain_transfer_lock(dispatch_queue_t dq, DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_queue_sidelock_trylock(dispatch_queue_t dq, pthread_priority_t pp) +_dispatch_queue_sidelock_trylock(dispatch_queue_t dq, dispatch_qos_t qos) { - dispatch_lock_owner owner; + dispatch_tid owner; if (_dispatch_unfair_lock_trylock(&dq->dq_sidelock, &owner)) { return true; } - _dispatch_wqthread_override_start_check_owner(owner, pp, + _dispatch_wqthread_override_start_check_owner(owner, qos, &dq->dq_sidelock.dul_lock); return false; } @@ -1403,7 +1617,9 @@ _dispatch_queue_sidelock_tryunlock(dispatch_queue_t dq) return true; } // Ensure that the root queue sees that this thread was overridden. - _dispatch_set_defaultpriority_override(); + // Since we don't know which override QoS was used, use MAINTENANCE + // as a marker for _dispatch_reset_basepri_override() + _dispatch_set_basepri_override_qos(DISPATCH_QOS_MAINTENANCE); return false; } @@ -1413,7 +1629,9 @@ _dispatch_queue_sidelock_unlock(dispatch_queue_t dq) { if (_dispatch_unfair_lock_unlock_had_failed_trylock(&dq->dq_sidelock)) { // Ensure that the root queue sees that this thread was overridden. - _dispatch_set_defaultpriority_override(); + // Since we don't know which override QoS was used, use MAINTENANCE + // as a marker for _dispatch_reset_basepri_override() + _dispatch_set_basepri_override_qos(DISPATCH_QOS_MAINTENANCE); } } @@ -1473,141 +1691,85 @@ _dispatch_queue_push_update_tail_list(dispatch_queue_t dq, DISPATCH_ALWAYS_INLINE static inline void _dispatch_queue_push_update_head(dispatch_queue_t dq, - struct dispatch_object_s *head, bool retained) + struct dispatch_object_s *head) { - if (dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE) { - dispatch_assert(!retained); - // Lie about "retained" here, it generates better assembly in this - // hotpath, and _dispatch_root_queue_wakeup knows to ignore this - // fake "WAKEUP_CONSUME" bit when it also sees WAKEUP_FLUSH. - // - // We need to bypass the retain below because pthread root queues - // are not global and retaining them would be wrong. - // - // We should eventually have a typeflag for "POOL" kind of root queues. - retained = true; - } - // The queue must be retained before dq_items_head is written in order - // to ensure that the reference is still valid when _dispatch_queue_wakeup - // is called. Otherwise, if preempted between the assignment to - // dq_items_head and _dispatch_queue_wakeup, the blocks submitted to the - // queue may release the last reference to the queue when invoked by - // _dispatch_queue_drain. - if (!retained) _dispatch_retain(dq); os_mpsc_push_update_head(dq, dq_items, head); } DISPATCH_ALWAYS_INLINE static inline void -_dispatch_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head, - dispatch_object_t _tail, pthread_priority_t pp, unsigned int n) +_dispatch_root_queue_push_inline(dispatch_queue_t dq, dispatch_object_t _head, + dispatch_object_t _tail, int n) { struct dispatch_object_s *head = _head._do, *tail = _tail._do; - bool override = _dispatch_queue_need_override_retain(dq, pp); - dispatch_queue_flags_t flags; - if (slowpath(_dispatch_queue_push_update_tail_list(dq, head, tail))) { - _dispatch_queue_push_update_head(dq, head, override); - if (fastpath(dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE)) { - return _dispatch_queue_push_list_slow(dq, n); - } - flags = DISPATCH_WAKEUP_CONSUME | DISPATCH_WAKEUP_FLUSH; - } else if (override) { - flags = DISPATCH_WAKEUP_CONSUME | DISPATCH_WAKEUP_OVERRIDING; - } else { - return; + if (unlikely(_dispatch_queue_push_update_tail_list(dq, head, tail))) { + _dispatch_queue_push_update_head(dq, head); + return _dispatch_global_queue_poke(dq, n, 0); } - dx_wakeup(dq, pp, flags); } DISPATCH_ALWAYS_INLINE static inline void _dispatch_queue_push_inline(dispatch_queue_t dq, dispatch_object_t _tail, - pthread_priority_t pp, dispatch_wakeup_flags_t flags) + dispatch_qos_t qos) { struct dispatch_object_s *tail = _tail._do; - bool override = _dispatch_queue_need_override(dq, pp); - if (flags & DISPATCH_WAKEUP_SLOW_WAITER) { - // when SLOW_WAITER is set, we borrow the reference of the caller - if (unlikely(_dispatch_queue_push_update_tail(dq, tail))) { - _dispatch_queue_push_update_head(dq, tail, true); - flags = DISPATCH_WAKEUP_SLOW_WAITER | DISPATCH_WAKEUP_FLUSH; - } else if (override) { - flags = DISPATCH_WAKEUP_SLOW_WAITER | DISPATCH_WAKEUP_OVERRIDING; - } else { - flags = DISPATCH_WAKEUP_SLOW_WAITER; - } + dispatch_wakeup_flags_t flags = 0; + // If we are going to call dx_wakeup(), the queue must be retained before + // the item we're pushing can be dequeued, which means: + // - before we exchange the tail if we may have to override + // - before we set the head if we made the queue non empty. + // Otherwise, if preempted between one of these and the call to dx_wakeup() + // the blocks submitted to the queue may release the last reference to the + // queue when invoked by _dispatch_queue_drain. + bool overriding = _dispatch_queue_need_override_retain(dq, qos); + if (unlikely(_dispatch_queue_push_update_tail(dq, tail))) { + if (!overriding) _dispatch_retain_2(dq->_as_os_obj); + _dispatch_queue_push_update_head(dq, tail); + flags = DISPATCH_WAKEUP_CONSUME_2 | DISPATCH_WAKEUP_MAKE_DIRTY; + } else if (overriding) { + flags = DISPATCH_WAKEUP_CONSUME_2; } else { - if (override) _dispatch_retain(dq); - if (unlikely(_dispatch_queue_push_update_tail(dq, tail))) { - _dispatch_queue_push_update_head(dq, tail, override); - flags = DISPATCH_WAKEUP_CONSUME | DISPATCH_WAKEUP_FLUSH; - } else if (override) { - flags = DISPATCH_WAKEUP_CONSUME | DISPATCH_WAKEUP_OVERRIDING; - } else { - return; - } + return; } - return dx_wakeup(dq, pp, flags); + return dx_wakeup(dq, qos, flags); } -struct _dispatch_identity_s { - pthread_priority_t old_pp; -}; - DISPATCH_ALWAYS_INLINE static inline void -_dispatch_root_queue_identity_assume(struct _dispatch_identity_s *di, - pthread_priority_t pp) +_dispatch_queue_push_queue(dispatch_queue_t tq, dispatch_queue_t dq, + uint64_t dq_state) { - // assumed_rq was set by the caller, we need to fake the priorities - dispatch_queue_t assumed_rq = _dispatch_queue_get_current(); - - dispatch_assert(dx_type(assumed_rq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE); - - di->old_pp = _dispatch_get_defaultpriority(); - - if (!(assumed_rq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG)) { - if (!pp) { - pp = _dispatch_get_priority(); - // _dispatch_root_queue_drain_deferred_item() may turn a manager - // thread into a regular root queue, and we must never try to - // restore the manager flag once we became a regular work queue - // thread. - pp &= ~(pthread_priority_t)_PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; - } - if ((pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK) > - (assumed_rq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { - _dispatch_wqthread_override_start(_dispatch_tid_self(), pp); - // Ensure that the root queue sees that this thread was overridden. - _dispatch_set_defaultpriority_override(); - } - } - _dispatch_reset_defaultpriority(assumed_rq->dq_priority); + return dx_push(tq, dq, _dq_state_max_qos(dq_state)); } DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_root_queue_identity_restore(struct _dispatch_identity_s *di) +static inline dispatch_priority_t +_dispatch_root_queue_identity_assume(dispatch_queue_t assumed_rq) { - _dispatch_reset_defaultpriority(di->old_pp); + dispatch_priority_t old_dbp = _dispatch_get_basepri(); + dispatch_assert(dx_hastypeflag(assumed_rq, QUEUE_ROOT)); + _dispatch_reset_basepri(assumed_rq->dq_priority); + _dispatch_queue_set_current(assumed_rq); + return old_dbp; } -typedef dispatch_queue_t +typedef dispatch_queue_wakeup_target_t _dispatch_queue_class_invoke_handler_t(dispatch_object_t, - dispatch_invoke_flags_t, uint64_t *owned, struct dispatch_object_s **); + dispatch_invoke_context_t dic, dispatch_invoke_flags_t, + uint64_t *owned); DISPATCH_ALWAYS_INLINE static inline void _dispatch_queue_class_invoke(dispatch_object_t dou, - dispatch_invoke_flags_t flags, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, + dispatch_invoke_flags_t const_restrict_flags, _dispatch_queue_class_invoke_handler_t invoke) { dispatch_queue_t dq = dou._dq; - struct dispatch_object_s *dc = NULL; - dispatch_queue_t tq = NULL; - uint64_t dq_state, to_unlock = 0; - bool owning = !slowpath(flags & DISPATCH_INVOKE_STEALING); - bool overriding = slowpath(flags & DISPATCH_INVOKE_OVERRIDING); + dispatch_queue_wakeup_target_t tq = DISPATCH_QUEUE_WAKEUP_NONE; + bool owning = !(flags & DISPATCH_INVOKE_STEALING); + uint64_t owned = 0; // When called from a plain _dispatch_queue_drain: // overriding = false @@ -1616,39 +1778,45 @@ _dispatch_queue_class_invoke(dispatch_object_t dou, // When called from an override continuation: // overriding = true // owning depends on whether the override embedded the queue or steals - DISPATCH_COMPILER_CAN_ASSUME(owning || overriding); - if (owning) { + if (!(flags & (DISPATCH_INVOKE_STEALING | DISPATCH_INVOKE_WLH))) { dq->do_next = DISPATCH_OBJECT_LISTLESS; } - to_unlock = _dispatch_queue_drain_try_lock(dq, flags, &dq_state); - if (likely(to_unlock)) { - struct _dispatch_identity_s di; - pthread_priority_t old_dp; - -drain_pending_barrier: - if (overriding) { - _dispatch_object_debug(dq, "stolen onto thread 0x%x, 0x%lx", - _dispatch_tid_self(), _dispatch_get_defaultpriority()); - _dispatch_root_queue_identity_assume(&di, 0); - } - + flags |= const_restrict_flags; + if (likely(flags & DISPATCH_INVOKE_WLH)) { + owned = DISPATCH_QUEUE_SERIAL_DRAIN_OWNED | DISPATCH_QUEUE_ENQUEUED; + } else { + owned = _dispatch_queue_drain_try_lock(dq, flags); + } + if (likely(owned)) { + dispatch_priority_t old_dbp; if (!(flags & DISPATCH_INVOKE_MANAGER_DRAIN)) { - pthread_priority_t op, dp; - - old_dp = _dispatch_set_defaultpriority(dq->dq_priority, &dp); - op = dq->dq_override; - if (op > (dp & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { - _dispatch_wqthread_override_start(_dispatch_tid_self(), op); - // Ensure that the root queue sees that this thread was overridden. - _dispatch_set_defaultpriority_override(); - } + old_dbp = _dispatch_set_basepri(dq->dq_priority); + } else { + old_dbp = 0; } flags = _dispatch_queue_merge_autorelease_frequency(dq, flags); attempt_running_slow_head: - tq = invoke(dq, flags, &to_unlock, &dc); - if (slowpath(tq)) { +#if DISPATCH_COCOA_COMPAT + if ((flags & DISPATCH_INVOKE_WLH) && + !(flags & DISPATCH_INVOKE_AUTORELEASE_ALWAYS)) { + _dispatch_last_resort_autorelease_pool_push(dic); + } +#endif // DISPATCH_COCOA_COMPAT + tq = invoke(dq, dic, flags, &owned); +#if DISPATCH_COCOA_COMPAT + if ((flags & DISPATCH_INVOKE_WLH) && + !(flags & DISPATCH_INVOKE_AUTORELEASE_ALWAYS)) { + dispatch_thread_frame_s dtf; + _dispatch_thread_frame_push(&dtf, dq); + _dispatch_last_resort_autorelease_pool_pop(dic); + _dispatch_thread_frame_pop(&dtf); + } +#endif // DISPATCH_COCOA_COMPAT + dispatch_assert(tq != DISPATCH_QUEUE_WAKEUP_TARGET); + if (unlikely(tq != DISPATCH_QUEUE_WAKEUP_NONE && + tq != DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT)) { // Either dc is set, which is a deferred invoke case // // or only tq is and it means a reenqueue is required, because of: @@ -1657,78 +1825,60 @@ _dispatch_queue_class_invoke(dispatch_object_t dou, // In both cases, we want to bypass the check for DIRTY. // That may cause us to leave DIRTY in place but all drain lock // acquirers clear it - } else { - if (!_dispatch_queue_drain_try_unlock(dq, to_unlock)) { + } else if (!_dispatch_queue_drain_try_unlock(dq, owned, + tq == DISPATCH_QUEUE_WAKEUP_NONE)) { + tq = _dispatch_queue_get_current(); + if (dx_hastypeflag(tq, QUEUE_ROOT) || !owning) { goto attempt_running_slow_head; } - to_unlock = 0; - } - if (overriding) { - _dispatch_root_queue_identity_restore(&di); + DISPATCH_COMPILER_CAN_ASSUME(tq != DISPATCH_QUEUE_WAKEUP_NONE); + } else { + owned = 0; + tq = NULL; } if (!(flags & DISPATCH_INVOKE_MANAGER_DRAIN)) { - _dispatch_reset_defaultpriority(old_dp); - } - } else if (overriding) { - uint32_t owner = _dq_state_drain_owner(dq_state); - pthread_priority_t p = dq->dq_override; - if (owner && p) { - _dispatch_object_debug(dq, "overriding thr 0x%x to priority 0x%lx", - owner, p); - _dispatch_wqthread_override_start_check_owner(owner, p, - &dq->dq_state_lock); + _dispatch_reset_basepri(old_dbp); } } - - if (owning) { + if (likely(owning)) { _dispatch_introspection_queue_item_complete(dq); } - if (tq && dc) { - return _dispatch_queue_drain_deferred_invoke(dq, flags, to_unlock, dc); - } - if (tq) { - bool full_width_upgrade_allowed = (tq == _dispatch_queue_get_current()); - uint64_t old_state, new_state; + if (const_restrict_flags & DISPATCH_INVOKE_DISALLOW_SYNC_WAITERS) { + dispatch_assert(dic->dic_deferred == NULL); + } else if (dic->dic_deferred) { + return _dispatch_queue_drain_sync_waiter(dq, dic, + flags, owned); + } + uint64_t old_state, new_state, enqueued = DISPATCH_QUEUE_ENQUEUED; + if (tq == DISPATCH_QUEUE_WAKEUP_MGR) { + enqueued = DISPATCH_QUEUE_ENQUEUED_ON_MGR; + } os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { - new_state = old_state - to_unlock; - if (full_width_upgrade_allowed && _dq_state_is_runnable(new_state) && - _dq_state_has_pending_barrier(new_state)) { - new_state += DISPATCH_QUEUE_IN_BARRIER; - new_state += DISPATCH_QUEUE_WIDTH_INTERVAL; - new_state -= DISPATCH_QUEUE_PENDING_BARRIER; - new_state += to_unlock & DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK; - } else { - new_state = DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(new_state); - if (_dq_state_should_wakeup(new_state)) { - // drain was not interupted for suspension - // we will reenqueue right away, just put ENQUEUED back - new_state |= DISPATCH_QUEUE_ENQUEUED; - new_state |= DISPATCH_QUEUE_DIRTY; - } + new_state = old_state - owned; + new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; + new_state |= DISPATCH_QUEUE_DIRTY; + if (_dq_state_is_runnable(new_state) && + !_dq_state_is_enqueued(new_state)) { + // drain was not interupted for suspension + // we will reenqueue right away, just put ENQUEUED back + new_state |= enqueued; } }); - if (_dq_state_is_in_barrier(new_state)) { - // we did a "full width upgrade" and just added IN_BARRIER - // so adjust what we own and drain again - to_unlock &= DISPATCH_QUEUE_ENQUEUED; - to_unlock += DISPATCH_QUEUE_IN_BARRIER; - to_unlock += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; - goto drain_pending_barrier; - } - if (_dq_state_has_override(old_state)) { + old_state -= owned; + if (_dq_state_received_override(old_state)) { // Ensure that the root queue sees that this thread was overridden. - _dispatch_set_defaultpriority_override(); + _dispatch_set_basepri_override_qos(_dq_state_max_qos(new_state)); } - - if ((old_state ^ new_state) & DISPATCH_QUEUE_ENQUEUED) { - return _dispatch_queue_push(tq, dq, 0); + if ((old_state ^ new_state) & enqueued) { + dispatch_assert(_dq_state_is_enqueued(new_state)); + return _dispatch_queue_push_queue(tq, dq, new_state); } } - return _dispatch_release_tailcall(dq); + _dispatch_release_2_tailcall(dq); } DISPATCH_ALWAYS_INLINE @@ -1739,7 +1889,7 @@ _dispatch_queue_class_probe(dispatch_queue_class_t dqu) // seq_cst wrt atomic store to dq_state // seq_cst wrt atomic store to dq_flags tail = os_atomic_load2o(dqu._oq, oq_items_tail, ordered); - return slowpath(tail != NULL); + return unlikely(tail != NULL); } DISPATCH_ALWAYS_INLINE DISPATCH_CONST @@ -1752,87 +1902,12 @@ _dispatch_is_in_root_queues_array(dispatch_queue_t dq) DISPATCH_ALWAYS_INLINE DISPATCH_CONST static inline dispatch_queue_t -_dispatch_get_root_queue(qos_class_t priority, bool overcommit) -{ - if (overcommit) switch (priority) { - case _DISPATCH_QOS_CLASS_MAINTENANCE: - return &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT]; - case _DISPATCH_QOS_CLASS_BACKGROUND: - return &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT]; - case _DISPATCH_QOS_CLASS_UTILITY: - return &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT]; - case _DISPATCH_QOS_CLASS_DEFAULT: - return &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT]; - case _DISPATCH_QOS_CLASS_USER_INITIATED: - return &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT]; - case _DISPATCH_QOS_CLASS_USER_INTERACTIVE: - return &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT]; - } else switch (priority) { - case _DISPATCH_QOS_CLASS_MAINTENANCE: - return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS]; - case _DISPATCH_QOS_CLASS_BACKGROUND: - return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS]; - case _DISPATCH_QOS_CLASS_UTILITY: - return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS]; - case _DISPATCH_QOS_CLASS_DEFAULT: - return &_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS]; - case _DISPATCH_QOS_CLASS_USER_INITIATED: - return &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS]; - case _DISPATCH_QOS_CLASS_USER_INTERACTIVE: - return &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS]; - } - return NULL; -} - -#if HAVE_PTHREAD_WORKQUEUE_QOS -DISPATCH_ALWAYS_INLINE DISPATCH_CONST -static inline dispatch_queue_t -_dispatch_get_root_queue_for_priority(pthread_priority_t pp, bool overcommit) -{ - uint32_t idx; - - pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; - idx = (uint32_t)__builtin_ffs((int)pp); - if (unlikely(!_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS] - .dq_priority)) { - // If kernel doesn't support maintenance, bottom bit is background. - // Shift to our idea of where background bit is. - idx++; - } - // ffs starts at 1, and account for the QOS_CLASS_SHIFT - // if pp is 0, idx is 0 or 1 and this will wrap to a value larger than - // DISPATCH_QOS_COUNT - idx -= (_PTHREAD_PRIORITY_QOS_CLASS_SHIFT + 1); - if (unlikely(idx >= DISPATCH_QUEUE_QOS_COUNT)) { - DISPATCH_CLIENT_CRASH(pp, "Corrupted priority"); - } - return &_dispatch_root_queues[2 * idx + overcommit]; -} -#endif - -DISPATCH_ALWAYS_INLINE DISPATCH_CONST -static inline dispatch_queue_t -_dispatch_get_root_queue_with_overcommit(dispatch_queue_t rq, bool overcommit) +_dispatch_get_root_queue(dispatch_qos_t qos, bool overcommit) { - bool rq_overcommit = (rq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG); - // root queues in _dispatch_root_queues are not overcommit for even indices - // and overcommit for odd ones, so fixing overcommit is either returning - // the same queue, or picking its neighbour in _dispatch_root_queues - if (overcommit && !rq_overcommit) { - return rq + 1; + if (unlikely(qos == DISPATCH_QOS_UNSPECIFIED || qos > DISPATCH_QOS_MAX)) { + DISPATCH_CLIENT_CRASH(qos, "Corrupted priority"); } - if (!overcommit && rq_overcommit) { - return rq - 1; - } - return rq; + return &_dispatch_root_queues[2 * (qos - 1) + overcommit]; } DISPATCH_ALWAYS_INLINE @@ -1841,23 +1916,21 @@ _dispatch_queue_set_bound_thread(dispatch_queue_t dq) { // Tag thread-bound queues with the owning thread dispatch_assert(_dispatch_queue_is_thread_bound(dq)); - mach_port_t old_owner, self = _dispatch_tid_self(); - uint64_t dq_state = os_atomic_or_orig2o(dq, dq_state, self, relaxed); - if (unlikely(old_owner = _dq_state_drain_owner(dq_state))) { - DISPATCH_INTERNAL_CRASH(old_owner, "Queue bound twice"); - } + uint64_t old_state, new_state; + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + new_state = old_state; + new_state &= ~DISPATCH_QUEUE_DRAIN_OWNER_MASK; + new_state |= _dispatch_lock_value_for_self(); + }); } DISPATCH_ALWAYS_INLINE static inline void _dispatch_queue_clear_bound_thread(dispatch_queue_t dq) { - uint64_t dq_state, value; - dispatch_assert(_dispatch_queue_is_thread_bound(dq)); - os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, { - value = DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(dq_state); - }); + _dispatch_queue_atomic_flags_clear(dq, DQF_THREAD_BOUND|DQF_CANNOT_TRYSYNC); + os_atomic_and2o(dq, dq_state, ~DISPATCH_QUEUE_DRAIN_OWNER_MASK, relaxed); } DISPATCH_ALWAYS_INLINE @@ -1881,13 +1954,12 @@ _dispatch_set_pthread_root_queue_observer_hooks( #pragma mark dispatch_priority DISPATCH_ALWAYS_INLINE -static inline pthread_priority_t -_dispatch_get_defaultpriority(void) +static inline dispatch_priority_t +_dispatch_get_basepri(void) { #if HAVE_PTHREAD_WORKQUEUE_QOS - pthread_priority_t pp = (uintptr_t)_dispatch_thread_getspecific( - dispatch_defaultpriority_key); - return pp; + return (dispatch_priority_t)(uintptr_t)_dispatch_thread_getspecific( + dispatch_basepri_key); #else return 0; #endif @@ -1895,99 +1967,107 @@ _dispatch_get_defaultpriority(void) DISPATCH_ALWAYS_INLINE static inline void -_dispatch_reset_defaultpriority(pthread_priority_t pp) +_dispatch_reset_basepri(dispatch_priority_t dbp) { #if HAVE_PTHREAD_WORKQUEUE_QOS - pthread_priority_t old_pp = _dispatch_get_defaultpriority(); + dispatch_priority_t old_dbp = _dispatch_get_basepri(); // If an inner-loop or'd in the override flag to the per-thread priority, // it needs to be propagated up the chain. - pp |= old_pp & _PTHREAD_PRIORITY_OVERRIDE_FLAG; - _dispatch_thread_setspecific(dispatch_defaultpriority_key, (void*)pp); + dbp &= ~DISPATCH_PRIORITY_OVERRIDE_MASK; + dbp |= (old_dbp & DISPATCH_PRIORITY_OVERRIDE_MASK); + _dispatch_thread_setspecific(dispatch_basepri_key, (void*)(uintptr_t)dbp); #else - (void)pp; + (void)dbp; #endif } +DISPATCH_ALWAYS_INLINE +static inline dispatch_qos_t +_dispatch_get_basepri_override_qos_floor(void) +{ + dispatch_priority_t dbp = _dispatch_get_basepri(); + dispatch_qos_t qos = _dispatch_priority_qos(dbp); + dispatch_qos_t oqos = _dispatch_priority_override_qos(dbp); + dispatch_qos_t qos_floor = MAX(qos, oqos); + return qos_floor ? qos_floor : DISPATCH_QOS_SATURATED; +} + DISPATCH_ALWAYS_INLINE static inline void -_dispatch_set_defaultpriority_override(void) +_dispatch_set_basepri_override_qos(dispatch_qos_t qos) { #if HAVE_PTHREAD_WORKQUEUE_QOS - pthread_priority_t old_pp = _dispatch_get_defaultpriority(); - pthread_priority_t pp = old_pp | _PTHREAD_PRIORITY_OVERRIDE_FLAG; - - _dispatch_thread_setspecific(dispatch_defaultpriority_key, (void*)pp); + dispatch_priority_t dbp = _dispatch_get_basepri(); + if (_dispatch_priority_override_qos(dbp) >= qos) return; + dbp &= ~DISPATCH_PRIORITY_OVERRIDE_MASK; + dbp |= qos << DISPATCH_PRIORITY_OVERRIDE_SHIFT; + _dispatch_thread_setspecific(dispatch_basepri_key, (void*)(uintptr_t)dbp); +#else + (void)qos; #endif } DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_reset_defaultpriority_override(void) +_dispatch_reset_basepri_override(void) { #if HAVE_PTHREAD_WORKQUEUE_QOS - pthread_priority_t old_pp = _dispatch_get_defaultpriority(); - pthread_priority_t pp = old_pp & - ~((pthread_priority_t)_PTHREAD_PRIORITY_OVERRIDE_FLAG); - - _dispatch_thread_setspecific(dispatch_defaultpriority_key, (void*)pp); - return unlikely(pp != old_pp); + dispatch_priority_t dbp = _dispatch_get_basepri(); + dispatch_qos_t oqos = _dispatch_priority_override_qos(dbp); + if (oqos) { + dbp &= ~DISPATCH_PRIORITY_OVERRIDE_MASK; + _dispatch_thread_setspecific(dispatch_basepri_key, (void*)(uintptr_t)dbp); + return oqos != DISPATCH_QOS_SATURATED; + } #endif return false; } DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_queue_priority_inherit_from_target(dispatch_queue_t dq, - dispatch_queue_t tq) +static inline dispatch_priority_t +_dispatch_set_basepri(dispatch_priority_t dbp) { #if HAVE_PTHREAD_WORKQUEUE_QOS - const dispatch_priority_t rootqueue_flag = _PTHREAD_PRIORITY_ROOTQUEUE_FLAG; - const dispatch_priority_t inherited_flag = _PTHREAD_PRIORITY_INHERIT_FLAG; - const dispatch_priority_t defaultqueue_flag = - _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG; - dispatch_priority_t dqp = dq->dq_priority, tqp = tq->dq_priority; - if ((!(dqp & ~_PTHREAD_PRIORITY_FLAGS_MASK) || (dqp & inherited_flag)) && - (tqp & rootqueue_flag)) { - if (tqp & defaultqueue_flag) { - dq->dq_priority = 0; - } else { - dq->dq_priority = (tqp & ~rootqueue_flag) | inherited_flag; + const dispatch_priority_t preserved_mask = + DISPATCH_PRIORITY_OVERRIDE_MASK | DISPATCH_PRIORITY_FLAG_OVERCOMMIT; + dispatch_priority_t old_dbp = _dispatch_get_basepri(); + if (old_dbp) { + dispatch_priority_t flags, defaultqueue, basepri; + flags = (dbp & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE); + defaultqueue = (old_dbp & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE); + basepri = old_dbp & DISPATCH_PRIORITY_REQUESTED_MASK; + dbp &= DISPATCH_PRIORITY_REQUESTED_MASK; + if (!dbp) { + flags = DISPATCH_PRIORITY_FLAG_INHERIT | defaultqueue; + dbp = basepri; + } else if (dbp < basepri && !defaultqueue) { // rdar://16349734 + dbp = basepri; } + dbp |= flags | (old_dbp & preserved_mask); + } else { + dbp &= ~DISPATCH_PRIORITY_OVERRIDE_MASK; } + _dispatch_thread_setspecific(dispatch_basepri_key, (void*)(uintptr_t)dbp); + return old_dbp; #else - (void)dq; (void)tq; + (void)dbp; + return 0; #endif } DISPATCH_ALWAYS_INLINE -static inline pthread_priority_t -_dispatch_set_defaultpriority(pthread_priority_t pp, pthread_priority_t *new_pp) +static inline dispatch_priority_t +_dispatch_set_basepri_wlh(dispatch_priority_t dbp) { #if HAVE_PTHREAD_WORKQUEUE_QOS - const pthread_priority_t default_priority_preserved_flags = - _PTHREAD_PRIORITY_OVERRIDE_FLAG|_PTHREAD_PRIORITY_OVERCOMMIT_FLAG; - pthread_priority_t old_pp = _dispatch_get_defaultpriority(); - if (old_pp) { - pthread_priority_t flags, defaultqueue, basepri; - flags = (pp & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG); - defaultqueue = (old_pp & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG); - basepri = (old_pp & ~_PTHREAD_PRIORITY_FLAGS_MASK); - pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK; - if (!pp) { - flags = _PTHREAD_PRIORITY_INHERIT_FLAG | defaultqueue; - pp = basepri; - } else if (pp < basepri && !defaultqueue) { // rdar://16349734 - pp = basepri; - } - pp |= flags | (old_pp & default_priority_preserved_flags); - } - _dispatch_thread_setspecific(dispatch_defaultpriority_key, (void*)pp); - if (new_pp) *new_pp = pp; - return old_pp; + dispatch_assert(!_dispatch_get_basepri()); + // _dispatch_set_basepri_override_qos(DISPATCH_QOS_SATURATED) + dbp |= DISPATCH_QOS_SATURATED << DISPATCH_PRIORITY_OVERRIDE_SHIFT; + _dispatch_thread_setspecific(dispatch_basepri_key, (void*)(uintptr_t)dbp); #else - (void)pp; (void)new_pp; - return 0; + (void)dbp; #endif + return 0; } DISPATCH_ALWAYS_INLINE @@ -1995,25 +2075,24 @@ static inline pthread_priority_t _dispatch_priority_adopt(pthread_priority_t pp, unsigned long flags) { #if HAVE_PTHREAD_WORKQUEUE_QOS - pthread_priority_t defaultpri = _dispatch_get_defaultpriority(); - bool enforce, inherited, defaultqueue; - enforce = (flags & DISPATCH_PRIORITY_ENFORCE) || + dispatch_priority_t inherited, defaultqueue, dbp = _dispatch_get_basepri(); + pthread_priority_t basepp = _dispatch_priority_to_pp_strip_flags(dbp); + bool enforce = (flags & DISPATCH_PRIORITY_ENFORCE) || (pp & _PTHREAD_PRIORITY_ENFORCE_FLAG); - inherited = (defaultpri & _PTHREAD_PRIORITY_INHERIT_FLAG); - defaultqueue = (defaultpri & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG); - defaultpri &= ~_PTHREAD_PRIORITY_FLAGS_MASK; + inherited = (dbp & DISPATCH_PRIORITY_FLAG_INHERIT); + defaultqueue = (dbp & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE); pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK; if (!pp) { - return defaultpri; + return basepp; } else if (defaultqueue) { // rdar://16349734 return pp; - } else if (pp < defaultpri) { - return defaultpri; + } else if (pp < basepp) { + return basepp; } else if (enforce || inherited) { return pp; } else { - return defaultpri; + return basepp; } #else (void)pp; (void)flags; @@ -2022,22 +2101,61 @@ _dispatch_priority_adopt(pthread_priority_t pp, unsigned long flags) } DISPATCH_ALWAYS_INLINE -static inline pthread_priority_t -_dispatch_priority_inherit_from_root_queue(pthread_priority_t pp, +static inline void +_dispatch_queue_priority_inherit_from_target(dispatch_queue_t dq, + dispatch_queue_t tq) +{ +#if HAVE_PTHREAD_WORKQUEUE_QOS + const dispatch_priority_t rootqueue_flag = DISPATCH_PRIORITY_FLAG_ROOTQUEUE; + const dispatch_priority_t inherited_flag = DISPATCH_PRIORITY_FLAG_INHERIT; + const dispatch_priority_t defaultqueue_flag = + DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE; + dispatch_priority_t pri = dq->dq_priority, tpri = tq->dq_priority; + + if ((!_dispatch_priority_qos(pri) || (pri & inherited_flag)) && + (tpri & rootqueue_flag)) { + if (_dispatch_priority_override_qos(pri) == DISPATCH_QOS_SATURATED) { + pri &= DISPATCH_PRIORITY_OVERRIDE_MASK; + } else { + pri = 0; + } + if (tpri & defaultqueue_flag) { + // base queues need to know they target + // the default root queue so that _dispatch_queue_override_qos() + // in _dispatch_queue_class_wakeup() can fallback to QOS_DEFAULT + // if no other priority was provided. + pri |= defaultqueue_flag; + } else { + pri |= (tpri & ~rootqueue_flag) | inherited_flag; + } + dq->dq_priority = pri; + } else if (pri & defaultqueue_flag) { + // the DEFAULTQUEUE flag is only set on queues due to the code above, + // and must never be kept if we don't target a global root queue. + dq->dq_priority = (pri & ~defaultqueue_flag); + } +#else + (void)dq; (void)tq; +#endif +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_priority_t +_dispatch_priority_inherit_from_root_queue(dispatch_priority_t pri, dispatch_queue_t rq) { #if HAVE_PTHREAD_WORKQUEUE_QOS - pthread_priority_t p = pp & ~_PTHREAD_PRIORITY_FLAGS_MASK; - pthread_priority_t rqp = rq->dq_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK; - pthread_priority_t defaultqueue = - rq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG; + dispatch_priority_t p = pri & DISPATCH_PRIORITY_REQUESTED_MASK; + dispatch_priority_t rqp = rq->dq_priority & DISPATCH_PRIORITY_REQUESTED_MASK; + dispatch_priority_t defaultqueue = + rq->dq_priority & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE; if (!p || (!defaultqueue && p < rqp)) { p = rqp | defaultqueue; } - return p | (rq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG); + return p | (rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT); #else - (void)rq; (void)pp; + (void)rq; (void)pri; return 0; #endif } @@ -2075,7 +2193,7 @@ _dispatch_priority_compute_update(pthread_priority_t pp) pthread_priority_t overcommit = _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; if (unlikely(cur_priority & unbind)) { // else we always need an update if the NEEDS_UNBIND flag is set - // the slowpath in _dispatch_set_priority_and_voucher_slow() will + // the slow path in _dispatch_set_priority_and_voucher_slow() will // adjust the priority further with the proper overcommitness return pp ? pp : (cur_priority & ~unbind); } else { @@ -2089,7 +2207,7 @@ _dispatch_priority_compute_update(pthread_priority_t pp) DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT static inline voucher_t _dispatch_set_priority_and_voucher(pthread_priority_t pp, - voucher_t v, _dispatch_thread_set_self_t flags) + voucher_t v, dispatch_thread_set_self_t flags) { #if HAVE_PTHREAD_WORKQUEUE_QOS pp = _dispatch_priority_compute_update(pp); @@ -2118,7 +2236,7 @@ _dispatch_set_priority_and_voucher(pthread_priority_t pp, DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT static inline voucher_t _dispatch_adopt_priority_and_set_voucher(pthread_priority_t pp, - voucher_t v, _dispatch_thread_set_self_t flags) + voucher_t v, dispatch_thread_set_self_t flags) { pthread_priority_t p = 0; if (pp != DISPATCH_NO_PRIORITY) { @@ -2138,7 +2256,7 @@ _dispatch_reset_priority_and_voucher(pthread_priority_t pp, voucher_t v) DISPATCH_ALWAYS_INLINE static inline void -_dispatch_reset_voucher(voucher_t v, _dispatch_thread_set_self_t flags) +_dispatch_reset_voucher(voucher_t v, dispatch_thread_set_self_t flags) { flags |= DISPATCH_VOUCHER_CONSUME | DISPATCH_VOUCHER_REPLACE; (void)_dispatch_set_priority_and_voucher(0, v, flags); @@ -2146,124 +2264,75 @@ _dispatch_reset_voucher(voucher_t v, _dispatch_thread_set_self_t flags) DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_queue_need_override(dispatch_queue_class_t dqu, pthread_priority_t pp) -{ - // global queues have their override set to DISPATCH_SATURATED_OVERRIDE - // which makes this test always return false for them. - return dqu._oq->oq_override < (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK); -} - -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_queue_received_override(dispatch_queue_class_t dqu, - pthread_priority_t pp) +_dispatch_queue_need_override(dispatch_queue_class_t dqu, dispatch_qos_t qos) { - dispatch_assert(dqu._oq->oq_override != DISPATCH_SATURATED_OVERRIDE); - return dqu._oq->oq_override > (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK); + uint64_t dq_state = os_atomic_load2o(dqu._dq, dq_state, relaxed); + // dq_priority "override qos" contains the priority at which the queue + // is already running for thread-bound queues. + // For non thread-bound queues, the qos of the queue may not be observed + // when the first work item is dispatched synchronously. + return _dq_state_max_qos(dq_state) < qos && + _dispatch_priority_override_qos(dqu._dq->dq_priority) < qos; } DISPATCH_ALWAYS_INLINE static inline bool _dispatch_queue_need_override_retain(dispatch_queue_class_t dqu, - pthread_priority_t pp) + dispatch_qos_t qos) { - if (_dispatch_queue_need_override(dqu, pp)) { - _os_object_retain_internal_inline(dqu._oq->_as_os_obj); + if (_dispatch_queue_need_override(dqu, qos)) { + _os_object_retain_internal_n_inline(dqu._oq->_as_os_obj, 2); return true; } return false; } DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_queue_reinstate_override_priority(dispatch_queue_class_t dqu, - dispatch_priority_t new_op) -{ - dispatch_priority_t old_op; - new_op &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; - if (!new_op) return false; - os_atomic_rmw_loop2o(dqu._oq, oq_override, old_op, new_op, relaxed, { - if (new_op <= old_op) { - os_atomic_rmw_loop_give_up(return false); - } - }); - return true; -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_queue_override_priority(dispatch_queue_class_t dqu, - pthread_priority_t *pp, dispatch_wakeup_flags_t *flags) +static inline dispatch_qos_t +_dispatch_queue_override_qos(dispatch_queue_class_t dqu, dispatch_qos_t qos) { - os_mpsc_queue_t oq = dqu._oq; - dispatch_priority_t qp = oq->oq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK; - dispatch_priority_t np = (*pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK); - dispatch_priority_t o; - - _dispatch_assert_is_valid_qos_override(np); - if (oq->oq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG) { - qp = 0; - } else if (*flags & DISPATCH_WAKEUP_SLOW_WAITER) { - // when a queue is used as a lock its priority doesn't count - } else if (np < qp) { - // for asynchronous workitems, queue priority is the floor for overrides - np = qp; - } - *flags &= ~_DISPATCH_WAKEUP_OVERRIDE_BITS; - - // this optimizes for the case when no update of the override is required - // os_atomic_rmw_loop2o optimizes for the case when the update happens, - // and can't be used. - o = os_atomic_load2o(oq, oq_override, relaxed); - do { - if (likely(np <= o)) break; - } while (unlikely(!os_atomic_cmpxchgvw2o(oq, oq_override, o, np, &o, relaxed))); - - if (np <= o) { - *pp = o; - } else { - *flags |= DISPATCH_WAKEUP_OVERRIDING; - *pp = np; - } - if (o > qp) { - *flags |= DISPATCH_WAKEUP_WAS_OVERRIDDEN; + if (dqu._oq->oq_priority & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE) { + // queues targeting the default root queue use any asynchronous + // workitem priority available and fallback to QOS_DEFAULT otherwise. + return qos ? qos : DISPATCH_QOS_DEFAULT; } + // for asynchronous workitems, queue priority is the floor for overrides + return MAX(qos, _dispatch_priority_qos(dqu._oq->oq_priority)); } -DISPATCH_ALWAYS_INLINE -static inline dispatch_priority_t -_dispatch_queue_reset_override_priority(dispatch_queue_class_t dqu, - bool qp_is_floor) -{ - os_mpsc_queue_t oq = dqu._oq; - dispatch_priority_t p = 0; - if (qp_is_floor) { - // thread bound queues floor their dq_override to their - // priority to avoid receiving useless overrides - p = oq->oq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK; - } - dispatch_priority_t o = os_atomic_xchg2o(oq, oq_override, p, relaxed); - dispatch_assert(o != DISPATCH_SATURATED_OVERRIDE); - return (o > p) ? o : 0; -} +#define DISPATCH_PRIORITY_PROPAGATE_CURRENT 0x1 +#define DISPATCH_PRIORITY_PROPAGATE_FOR_SYNC_IPC 0x2 DISPATCH_ALWAYS_INLINE static inline pthread_priority_t -_dispatch_priority_propagate(void) +_dispatch_priority_compute_propagated(pthread_priority_t pp, + unsigned int flags) { #if HAVE_PTHREAD_WORKQUEUE_QOS - pthread_priority_t pp = _dispatch_get_priority(); + if (flags & DISPATCH_PRIORITY_PROPAGATE_CURRENT) { + pp = _dispatch_get_priority(); + } pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK; - if (pp > _dispatch_user_initiated_priority) { + if (!(flags & DISPATCH_PRIORITY_PROPAGATE_FOR_SYNC_IPC) && + pp > _dispatch_qos_to_pp(DISPATCH_QOS_USER_INITIATED)) { // Cap QOS for propagation at user-initiated - pp = _dispatch_user_initiated_priority; + return _dispatch_qos_to_pp(DISPATCH_QOS_USER_INITIATED); } return pp; #else + (void)pp; (void)flags; return 0; #endif } +DISPATCH_ALWAYS_INLINE +static inline pthread_priority_t +_dispatch_priority_propagate(void) +{ + return _dispatch_priority_compute_propagated(0, + DISPATCH_PRIORITY_PROPAGATE_CURRENT); +} + // including maintenance DISPATCH_ALWAYS_INLINE static inline bool @@ -2271,8 +2340,7 @@ _dispatch_is_background_thread(void) { #if HAVE_PTHREAD_WORKQUEUE_QOS pthread_priority_t pp = _dispatch_get_priority(); - pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK; - return pp && (pp <= _dispatch_background_priority); + return _dispatch_qos_is_background(_dispatch_qos_from_pp(pp)); #else return false; #endif @@ -2291,16 +2359,21 @@ _dispatch_block_has_private_data(const dispatch_block_t block) return (_dispatch_Block_invoke(block) == _dispatch_block_special_invoke); } -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_block_sync_should_enforce_qos_class(dispatch_block_flags_t flags) +DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT +static inline pthread_priority_t +_dispatch_block_invoke_should_set_priority(dispatch_block_flags_t flags, + pthread_priority_t new_pri) { - /* - * Generates better assembly than the actual readable test: - * (flags & ENFORCE_QOS_CLASS) || !(flags & INHERIT_QOS_FLAGS) - */ - flags &= DISPATCH_BLOCK_ENFORCE_QOS_CLASS | DISPATCH_BLOCK_INHERIT_QOS_CLASS; - return flags != DISPATCH_BLOCK_INHERIT_QOS_CLASS; + pthread_priority_t old_pri, p = 0; // 0 means do not change priority. + if ((flags & DISPATCH_BLOCK_HAS_PRIORITY) + && ((flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS) || + !(flags & DISPATCH_BLOCK_INHERIT_QOS_CLASS))) { + old_pri = _dispatch_get_priority(); + new_pri &= ~_PTHREAD_PRIORITY_FLAGS_MASK; + p = old_pri & ~_PTHREAD_PRIORITY_FLAGS_MASK; + if (!p || p >= new_pri) p = 0; + } + return p; } DISPATCH_ALWAYS_INLINE @@ -2442,12 +2515,14 @@ _dispatch_continuation_invoke_inline(dispatch_object_t dou, voucher_t ov, _dispatch_continuation_free_to_cache_limit(dc1); } }); + _dispatch_perfmon_workitem_inc(); } DISPATCH_ALWAYS_INLINE_NDEBUG static inline void -_dispatch_continuation_pop_inline(dispatch_object_t dou, dispatch_queue_t dq, - dispatch_invoke_flags_t flags) +_dispatch_continuation_pop_inline(dispatch_object_t dou, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, + dispatch_queue_t dq) { dispatch_pthread_root_queue_observer_hooks_t observer_hooks = _dispatch_get_pthread_root_queue_observer_hooks(); @@ -2455,10 +2530,9 @@ _dispatch_continuation_pop_inline(dispatch_object_t dou, dispatch_queue_t dq, _dispatch_trace_continuation_pop(dq, dou); flags &= _DISPATCH_INVOKE_PROPAGATE_MASK; if (_dispatch_object_has_vtable(dou)) { - dx_invoke(dou._do, flags); + dx_invoke(dou._do, dic, flags); } else { - voucher_t ov = dq->dq_override_voucher; - _dispatch_continuation_invoke_inline(dou, ov, flags); + _dispatch_continuation_invoke_inline(dou, DISPATCH_NO_VOUCHER, flags); } if (observer_hooks) observer_hooks->queue_did_execute(dq); } @@ -2501,21 +2575,21 @@ _dispatch_continuation_priority_set(dispatch_continuation_t dc, } DISPATCH_ALWAYS_INLINE -static inline pthread_priority_t -_dispatch_continuation_get_override_priority(dispatch_queue_t dq, +static inline dispatch_qos_t +_dispatch_continuation_override_qos(dispatch_queue_t dq, dispatch_continuation_t dc) { #if HAVE_PTHREAD_WORKQUEUE_QOS - pthread_priority_t p = dc->dc_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK; + dispatch_qos_t dc_qos = _dispatch_qos_from_pp(dc->dc_priority); bool enforce = dc->dc_priority & _PTHREAD_PRIORITY_ENFORCE_FLAG; - pthread_priority_t dqp = dq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK; - bool defaultqueue = dq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG; + dispatch_qos_t dq_qos = _dispatch_priority_qos(dq->dq_priority); + bool defaultqueue = dq->dq_priority & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE; dispatch_assert(dc->dc_priority != DISPATCH_NO_PRIORITY); - if (p && (enforce || !dqp || defaultqueue)) { - return p; + if (dc_qos && (enforce || !dq_qos || defaultqueue)) { + return dc_qos; } - return dqp; + return dq_qos; #else (void)dq; (void)dc; return 0; @@ -2559,6 +2633,36 @@ _dispatch_continuation_init(dispatch_continuation_t dc, _dispatch_continuation_voucher_set(dc, dqu, flags); } +#if HAVE_MACH +#pragma mark dispatch_mach_reply_refs_t + +// assumes low bit of mach port names is always set +#define DISPATCH_MACH_REPLY_PORT_UNOWNED 0x1u + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_mach_reply_mark_reply_port_owned(dispatch_mach_reply_refs_t dmr) +{ + dmr->du_ident &= ~DISPATCH_MACH_REPLY_PORT_UNOWNED; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_mach_reply_is_reply_port_owned(dispatch_mach_reply_refs_t dmr) +{ + mach_port_t reply_port = (mach_port_t)dmr->du_ident; + return reply_port ? !(reply_port & DISPATCH_MACH_REPLY_PORT_UNOWNED) :false; +} + +DISPATCH_ALWAYS_INLINE +static inline mach_port_t +_dispatch_mach_reply_get_reply_port(mach_port_t reply_port) +{ + return reply_port ? (reply_port | DISPATCH_MACH_REPLY_PORT_UNOWNED) : 0; +} + +#endif // HAVE_MACH + #endif // DISPATCH_PURE_C #endif /* __DISPATCH_INLINE_INTERNAL__ */ diff --git a/src/internal.h b/src/internal.h index a9aee1123..286e53458 100644 --- a/src/internal.h +++ b/src/internal.h @@ -38,6 +38,7 @@ #ifdef __APPLE__ #include +#include #include #ifndef TARGET_OS_MAC_DESKTOP @@ -46,26 +47,26 @@ #endif #if TARGET_OS_MAC_DESKTOP -# define DISPATCH_HOST_SUPPORTS_OSX(x) \ +# define DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(x) \ (__MAC_OS_X_VERSION_MIN_REQUIRED >= (x)) -# if !DISPATCH_HOST_SUPPORTS_OSX(101000) -# error "OS X hosts older than OS X 10.10 aren't supported anymore" -# endif // !DISPATCH_HOST_SUPPORTS_OSX(101000) +# if !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200) +# error "OS X hosts older than OS X 10.12 aren't supported anymore" +# endif // !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200) #elif TARGET_OS_SIMULATOR -# define DISPATCH_HOST_SUPPORTS_OSX(x) \ +# define DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(x) \ (IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED >= (x)) -# if !DISPATCH_HOST_SUPPORTS_OSX(101000) -# error "Simulator hosts older than OS X 10.10 aren't supported anymore" -# endif // !DISPATCH_HOST_SUPPORTS_OSX(101000) +# if !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200) +# error "Simulator hosts older than OS X 10.12 aren't supported anymore" +# endif // !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200) #else -# define DISPATCH_HOST_SUPPORTS_OSX(x) 1 -# if __IPHONE_OS_VERSION_MIN_REQUIRED < 70000 -# error "iOS hosts older than iOS 7.0 aren't supported anymore" +# define DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(x) 1 +# if __IPHONE_OS_VERSION_MIN_REQUIRED < 90000 +# error "iOS hosts older than iOS 9.0 aren't supported anymore" # endif #endif #else // !__APPLE__ -#define DISPATCH_HOST_SUPPORTS_OSX(x) 0 +#define DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(x) 0 #endif // !__APPLE__ @@ -78,6 +79,9 @@ #if !defined(OS_VOUCHER_ACTIVITY_SPI) && TARGET_OS_MAC #define OS_VOUCHER_ACTIVITY_SPI 1 #endif +#if !defined(OS_VOUCHER_ACTIVITY_GENERATE_SWAPS) +#define OS_VOUCHER_ACTIVITY_GENERATE_SWAPS 0 +#endif #if !defined(OS_FIREHOSE_SPI) && TARGET_OS_MAC #define OS_FIREHOSE_SPI 1 #endif @@ -155,6 +159,7 @@ #endif /* private.h must be included last to avoid picking up installed headers. */ +#include #include "os/object_private.h" #include "queue_private.h" #include "source_private.h" @@ -184,6 +189,8 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); #define DISPATCH_USE_CLIENT_CALLOUT 1 #endif +#define DISPATCH_ALLOW_NON_LEAF_RETARGET 1 + /* The "_debug" library build */ #ifndef DISPATCH_DEBUG #define DISPATCH_DEBUG 0 @@ -235,28 +242,29 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); #if HAVE_MALLOC_MALLOC_H #include #endif -#if __has_include() -#include -#endif // __has_include( #if !TARGET_OS_WIN32 -#include #include #include +#ifdef __ANDROID__ +#include +#else #include +#endif /* __ANDROID__ */ #include #include #include #include #endif -#if defined(__linux__) -#include -#endif #ifdef __BLOCKS__ +#if __has_include() #include +#else +#include "BlocksRuntime/Block_private.h" +#endif // __has_include() #include #endif /* __BLOCKS__ */ @@ -304,6 +312,31 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); #define DISPATCH_CONCAT(x,y) DISPATCH_CONCAT1(x,y) #define DISPATCH_CONCAT1(x,y) x ## y +#define DISPATCH_COUNT_ARGS(...) DISPATCH_COUNT_ARGS1(, ## __VA_ARGS__, \ + _8, _7, _6, _5, _4, _3, _2, _1, _0) +#define DISPATCH_COUNT_ARGS1(z, a, b, c, d, e, f, g, h, cnt, ...) cnt + +#if BYTE_ORDER == LITTLE_ENDIAN +#define DISPATCH_STRUCT_LE_2(a, b) struct { a; b; } +#define DISPATCH_STRUCT_LE_3(a, b, c) struct { a; b; c; } +#define DISPATCH_STRUCT_LE_4(a, b, c, d) struct { a; b; c; d; } +#else +#define DISPATCH_STRUCT_LE_2(a, b) struct { b; a; } +#define DISPATCH_STRUCT_LE_3(a, b, c) struct { c; b; a; } +#define DISPATCH_STRUCT_LE_4(a, b, c, d) struct { d; c; b; a; } +#endif +#if __has_feature(c_startic_assert) +#define DISPATCH_UNION_ASSERT(alias, st) \ + _Static_assert(sizeof(struct { alias; }) == sizeof(st), "bogus union"); +#else +#define DISPATCH_UNION_ASSERT(alias, st) +#endif +#define DISPATCH_UNION_LE(alias, ...) \ + DISPATCH_UNION_ASSERT(alias, DISPATCH_CONCAT(DISPATCH_STRUCT_LE, \ + DISPATCH_COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__)) \ + union { alias; DISPATCH_CONCAT(DISPATCH_STRUCT_LE, \ + DISPATCH_COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__); } + // workaround 6368156 #ifdef NSEC_PER_SEC #undef NSEC_PER_SEC @@ -335,16 +368,6 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); #define unlikely(x) (!!(x)) #endif // __GNUC__ -#if BYTE_ORDER == LITTLE_ENDIAN -#define DISPATCH_STRUCT_LITTLE_ENDIAN_2(a, b) struct { a; b; } -#define DISPATCH_STRUCT_LITTLE_ENDIAN_3(a, b, c) struct { a; b; c; } -#define DISPATCH_STRUCT_LITTLE_ENDIAN_4(a, b, c, d) struct { a; b; c; d; } -#else -#define DISPATCH_STRUCT_LITTLE_ENDIAN_2(a, b) struct { b; a; } -#define DISPATCH_STRUCT_LITTLE_ENDIAN_3(a, b, c) struct { c; b; a; } -#define DISPATCH_STRUCT_LITTLE_ENDIAN_4(a, b, c, d) struct { d; c; b; a; } -#endif - #define _TAILQ_IS_ENQUEUED(elm, field) \ ((elm)->field.tqe_prev != NULL) #define _TAILQ_MARK_NOT_ENQUEUED(elm, field) \ @@ -352,8 +375,12 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); #if DISPATCH_DEBUG // sys/queue.h debugging +#if defined(__linux__) +#define QUEUE_MACRO_DEBUG 1 +#else #undef TRASHIT #define TRASHIT(x) do {(x) = (void *)-1;} while (0) +#endif #endif // DISPATCH_DEBUG #define _TAILQ_TRASH_ENTRY(elm, field) do { \ TRASHIT((elm)->field.tqe_next); \ @@ -367,9 +394,9 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); DISPATCH_EXPORT DISPATCH_NOINLINE void _dispatch_bug(size_t line, long val); -#if HAVE_MACH DISPATCH_NOINLINE void _dispatch_bug_client(const char* msg); +#if HAVE_MACH DISPATCH_NOINLINE void _dispatch_bug_mach_client(const char *msg, mach_msg_return_t kr); #endif // HAVE_MACH @@ -427,25 +454,27 @@ void _dispatch_log(const char *msg, ...); * For reporting bugs within libdispatch when using the "_debug" version of the * library. */ -#if __GNUC__ +#if __APPLE__ #define dispatch_assert(e) do { \ if (__builtin_constant_p(e)) { \ dispatch_static_assert(e); \ } else { \ - typeof(e) _e = fastpath(e); /* always eval 'e' */ \ - if (DISPATCH_DEBUG && !_e) { \ + typeof(e) _e = (e); /* always eval 'e' */ \ + if (unlikely(DISPATCH_DEBUG && !_e)) { \ _dispatch_abort(__LINE__, (long)_e); \ } \ } \ } while (0) #else -static inline void _dispatch_assert(long e, long line) { +static inline void +_dispatch_assert(long e, size_t line) +{ if (DISPATCH_DEBUG && !e) _dispatch_abort(line, e); } #define dispatch_assert(e) _dispatch_assert((long)(e), __LINE__) #endif /* __GNUC__ */ -#if __GNUC__ +#if __APPLE__ /* * A lot of API return zero upon success and not-zero on fail. Let's capture * and log the non-zero value @@ -454,17 +483,19 @@ static inline void _dispatch_assert(long e, long line) { if (__builtin_constant_p(e)) { \ dispatch_static_assert(e); \ } else { \ - typeof(e) _e = slowpath(e); /* always eval 'e' */ \ - if (DISPATCH_DEBUG && _e) { \ + typeof(e) _e = (e); /* always eval 'e' */ \ + if (unlikely(DISPATCH_DEBUG && _e)) { \ _dispatch_abort(__LINE__, (long)_e); \ } \ } \ } while (0) #else -static inline void _dispatch_assert_zero(long e, long line) { +static inline void +_dispatch_assert_zero(long e, size_t line) +{ if (DISPATCH_DEBUG && e) _dispatch_abort(line, e); } -#define dispatch_assert_zero(e) _dispatch_assert((long)(e), __LINE__) +#define dispatch_assert_zero(e) _dispatch_assert_zero((long)(e), __LINE__) #endif /* __GNUC__ */ /* @@ -475,8 +506,8 @@ static inline void _dispatch_assert_zero(long e, long line) { */ #if __GNUC__ #define dispatch_assume(e) ({ \ - typeof(e) _e = fastpath(e); /* always eval 'e' */ \ - if (!_e) { \ + typeof(e) _e = (e); /* always eval 'e' */ \ + if (unlikely(!_e)) { \ if (__builtin_constant_p(e)) { \ dispatch_static_assert(e); \ } \ @@ -485,7 +516,9 @@ static inline void _dispatch_assert_zero(long e, long line) { _e; \ }) #else -static inline long _dispatch_assume(long e, long line) { +static inline long +_dispatch_assume(long e, long line) +{ if (!e) _dispatch_bug(line, e); return e; } @@ -498,8 +531,8 @@ static inline long _dispatch_assume(long e, long line) { */ #if __GNUC__ #define dispatch_assume_zero(e) ({ \ - typeof(e) _e = slowpath(e); /* always eval 'e' */ \ - if (_e) { \ + typeof(e) _e = (e); /* always eval 'e' */ \ + if (unlikely(_e)) { \ if (__builtin_constant_p(e)) { \ dispatch_static_assert(e); \ } \ @@ -508,7 +541,9 @@ static inline long _dispatch_assume(long e, long line) { _e; \ }) #else -static inline long _dispatch_assume_zero(long e, long line) { +static inline long +_dispatch_assume_zero(long e, long line) +{ if (e) _dispatch_bug(line, e); return e; } @@ -523,8 +558,8 @@ static inline long _dispatch_assume_zero(long e, long line) { if (__builtin_constant_p(e)) { \ dispatch_static_assert(e); \ } else { \ - typeof(e) _e = fastpath(e); /* always eval 'e' */ \ - if (DISPATCH_DEBUG && !_e) { \ + typeof(e) _e = (e); /* always eval 'e' */ \ + if (unlikely(DISPATCH_DEBUG && !_e)) { \ _dispatch_log("%s() 0x%lx: " msg, __func__, (long)_e, ##args); \ abort(); \ } \ @@ -532,8 +567,8 @@ static inline long _dispatch_assume_zero(long e, long line) { } while (0) #else #define dispatch_debug_assert(e, msg, args...) do { \ - long _e = (long)fastpath(e); /* always eval 'e' */ \ - if (DISPATCH_DEBUG && !_e) { \ + typeof(e) _e = (e); /* always eval 'e' */ \ + if (unlikely(DISPATCH_DEBUG && !_e)) { \ _dispatch_log("%s() 0x%lx: " msg, __FUNCTION__, _e, ##args); \ abort(); \ } \ @@ -548,13 +583,6 @@ static inline long _dispatch_assume_zero(long e, long line) { } \ } while (0) -#if DISPATCH_DEBUG -#if HAVE_MACH -DISPATCH_NOINLINE DISPATCH_USED -void dispatch_debug_machport(mach_port_t name, const char* str); -#endif -#endif - #if DISPATCH_DEBUG /* This is the private version of the deprecated dispatch_debug() */ DISPATCH_NONNULL2 DISPATCH_NOTHROW @@ -580,6 +608,7 @@ void *_dispatch_calloc(size_t num_items, size_t size); const char *_dispatch_strdup_if_mutable(const char *str); void _dispatch_vtable_init(void); char *_dispatch_get_build(void); +int _dispatch_sigmask(void); uint64_t _dispatch_timeout(dispatch_time_t when); uint64_t _dispatch_time_nanoseconds_since_epoch(dispatch_time_t when); @@ -597,51 +626,40 @@ DISPATCH_ALWAYS_INLINE static inline void _dispatch_fork_becomes_unsafe(void) { - if (!fastpath(_dispatch_is_multithreaded_inline())) { + if (unlikely(!_dispatch_is_multithreaded_inline())) { _dispatch_fork_becomes_unsafe_slow(); DISPATCH_COMPILER_CAN_ASSUME(_dispatch_is_multithreaded_inline()); } } +#if DISPATCH_INTROSPECTION +#undef DISPATCH_PERF_MON +#define DISPATCH_PERF_MON 0 +#endif + /* #includes dependent on internal.h */ #include "shims.h" +#include "event/event_internal.h" // Older Mac OS X and iOS Simulator fallbacks -#if HAVE_PTHREAD_WORKQUEUES -#ifndef WORKQ_ADDTHREADS_OPTION_OVERCOMMIT -#define WORKQ_ADDTHREADS_OPTION_OVERCOMMIT 0x00000001 -#endif -#endif // HAVE_PTHREAD_WORKQUEUES #if HAVE__PTHREAD_WORKQUEUE_INIT && PTHREAD_WORKQUEUE_SPI_VERSION >= 20140213 \ && !defined(HAVE_PTHREAD_WORKQUEUE_QOS) #define HAVE_PTHREAD_WORKQUEUE_QOS 1 #endif -#if HAVE__PTHREAD_WORKQUEUE_INIT && (PTHREAD_WORKQUEUE_SPI_VERSION >= 20150304 \ - || (PTHREAD_WORKQUEUE_SPI_VERSION == 20140730 && \ - defined(WORKQ_FEATURE_KEVENT))) \ +#if HAVE__PTHREAD_WORKQUEUE_INIT && PTHREAD_WORKQUEUE_SPI_VERSION >= 20150304 \ && !defined(HAVE_PTHREAD_WORKQUEUE_KEVENT) -#if PTHREAD_WORKQUEUE_SPI_VERSION == 20140730 -// rdar://problem/20609877 -typedef pthread_worqueue_function_kevent_t pthread_workqueue_function_kevent_t; -#endif #define HAVE_PTHREAD_WORKQUEUE_KEVENT 1 #endif -#ifndef PTHREAD_WORKQUEUE_RESETS_VOUCHER_AND_PRIORITY_ON_PARK -#if HAVE_PTHREAD_WORKQUEUE_QOS && DISPATCH_HOST_SUPPORTS_OSX(101200) -#define PTHREAD_WORKQUEUE_RESETS_VOUCHER_AND_PRIORITY_ON_PARK 1 + +#ifndef HAVE_PTHREAD_WORKQUEUE_NARROWING +#if !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(109900) +#define HAVE_PTHREAD_WORKQUEUE_NARROWING 0 #else -#define PTHREAD_WORKQUEUE_RESETS_VOUCHER_AND_PRIORITY_ON_PARK 0 +#define HAVE_PTHREAD_WORKQUEUE_NARROWING 1 #endif -#endif // PTHREAD_WORKQUEUE_RESETS_VOUCHER_AND_PRIORITY_ON_PARK - -#if HAVE_MACH -#if !defined(MACH_NOTIFY_SEND_POSSIBLE) -#undef MACH_NOTIFY_SEND_POSSIBLE -#define MACH_NOTIFY_SEND_POSSIBLE MACH_NOTIFY_DEAD_NAME #endif -#endif // HAVE_MACH #ifdef EVFILT_MEMORYSTATUS #ifndef DISPATCH_USE_MEMORYSTATUS @@ -649,163 +667,38 @@ typedef pthread_worqueue_function_kevent_t pthread_workqueue_function_kevent_t; #endif #endif // EVFILT_MEMORYSTATUS -#if defined(EVFILT_VM) && !DISPATCH_USE_MEMORYSTATUS -#ifndef DISPATCH_USE_VM_PRESSURE -#define DISPATCH_USE_VM_PRESSURE 1 -#endif -#endif // EVFILT_VM - #if TARGET_OS_SIMULATOR #undef DISPATCH_USE_MEMORYPRESSURE_SOURCE #define DISPATCH_USE_MEMORYPRESSURE_SOURCE 0 -#undef DISPATCH_USE_VM_PRESSURE_SOURCE -#define DISPATCH_USE_VM_PRESSURE_SOURCE 0 #endif // TARGET_OS_SIMULATOR #if !defined(DISPATCH_USE_MEMORYPRESSURE_SOURCE) && DISPATCH_USE_MEMORYSTATUS #define DISPATCH_USE_MEMORYPRESSURE_SOURCE 1 -#elif !defined(DISPATCH_USE_VM_PRESSURE_SOURCE) && DISPATCH_USE_VM_PRESSURE -#define DISPATCH_USE_VM_PRESSURE_SOURCE 1 #endif #if DISPATCH_USE_MEMORYPRESSURE_SOURCE -extern bool _dispatch_memory_warn; -#endif - -#if !defined(NOTE_LEEWAY) -#undef NOTE_LEEWAY -#define NOTE_LEEWAY 0 -#undef NOTE_CRITICAL -#define NOTE_CRITICAL 0 -#undef NOTE_BACKGROUND -#define NOTE_BACKGROUND 0 -#endif // NOTE_LEEWAY - -#if !defined(NOTE_FUNLOCK) -#define NOTE_FUNLOCK 0x00000100 -#endif - -#if !defined(NOTE_MACH_CONTINUOUS_TIME) -#define NOTE_MACH_CONTINUOUS_TIME 0 -#endif // NOTE_MACH_CONTINUOUS_TIME - -#if !defined(HOST_NOTIFY_CALENDAR_SET) -#define HOST_NOTIFY_CALENDAR_SET HOST_NOTIFY_CALENDAR_CHANGE -#endif // HOST_NOTIFY_CALENDAR_SET - -#if !defined(HOST_CALENDAR_SET_REPLYID) -#define HOST_CALENDAR_SET_REPLYID 951 -#endif // HOST_CALENDAR_SET_REPLYID - -#if HAVE_DECL_NOTE_REAP -#if defined(NOTE_REAP) && defined(__APPLE__) -#undef NOTE_REAP -#define NOTE_REAP 0x10000000 // -#endif -#endif // HAVE_DECL_NOTE_REAP - -#ifndef VQ_QUOTA -#undef HAVE_DECL_VQ_QUOTA // rdar://problem/24160982 -#endif // VQ_QUOTA - -#if !defined(NOTE_MEMORYSTATUS_PROC_LIMIT_WARN) || \ - !DISPATCH_HOST_SUPPORTS_OSX(101200) -#undef NOTE_MEMORYSTATUS_PROC_LIMIT_WARN -#define NOTE_MEMORYSTATUS_PROC_LIMIT_WARN 0 -#endif // NOTE_MEMORYSTATUS_PROC_LIMIT_WARN - -#if !defined(NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL) || \ - !DISPATCH_HOST_SUPPORTS_OSX(101200) -#undef NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL -#define NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL 0 -#endif // NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL - -#if !defined(EV_UDATA_SPECIFIC) || !DISPATCH_HOST_SUPPORTS_OSX(101100) -#undef DISPATCH_USE_EV_UDATA_SPECIFIC -#define DISPATCH_USE_EV_UDATA_SPECIFIC 0 -#elif !defined(DISPATCH_USE_EV_UDATA_SPECIFIC) -#define DISPATCH_USE_EV_UDATA_SPECIFIC 1 -#endif // EV_UDATA_SPECIFIC - -#if !DISPATCH_USE_EV_UDATA_SPECIFIC -#undef EV_UDATA_SPECIFIC -#define EV_UDATA_SPECIFIC 0 -#undef EV_VANISHED -#define EV_VANISHED 0 -#endif // !DISPATCH_USE_EV_UDATA_SPECIFIC - -#ifndef EV_VANISHED -#define EV_VANISHED 0x0200 -#endif - -#ifndef DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS -#if TARGET_OS_MAC && !DISPATCH_HOST_SUPPORTS_OSX(101200) -// deferred delete can return bogus ENOENTs on older kernels -#define DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS 1 +#if __has_include() +#include #else -#define DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS 0 -#endif +extern void malloc_memory_event_handler(unsigned long); +#endif // __has_include(qos) _dispatch_kevent_priority_t; -#else // DISPATCH_USE_KEVENT_QOS -#ifndef KEVENT_FLAG_IMMEDIATE -#define KEVENT_FLAG_NONE 0x00 -#define KEVENT_FLAG_IMMEDIATE 0x01 -#define KEVENT_FLAG_ERROR_EVENTS 0x02 -#endif // KEVENT_FLAG_IMMEDIATE -typedef struct kevent64_s _dispatch_kevent_qos_s; -#define kevent_qos(_kq, _changelist, _nchanges, _eventlist, _nevents, \ - _data_out, _data_available, _flags) \ - ({ unsigned int _f = (_flags); _dispatch_kevent_qos_s _kev_copy; \ - const _dispatch_kevent_qos_s *_cl = (_changelist); \ - int _n = (_nchanges); const struct timespec _timeout_immediately = {}; \ - dispatch_static_assert(!(_data_out) && !(_data_available)); \ - if (_f & KEVENT_FLAG_ERROR_EVENTS) { \ - dispatch_static_assert(_n == 1); \ - _kev_copy = *_cl; _kev_copy.flags |= EV_RECEIPT; } \ - kevent64((_kq), _f & KEVENT_FLAG_ERROR_EVENTS ? &_kev_copy : _cl, _n, \ - (_eventlist), (_nevents), 0, \ - _f & KEVENT_FLAG_IMMEDIATE ? &_timeout_immediately : NULL); }) -#endif // DISPATCH_USE_KEVENT_QOS + +#if defined(MACH_SEND_SYNC_OVERRIDE) && defined(MACH_RCV_SYNC_WAIT) && \ + DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(109900) && \ + !defined(DISPATCH_USE_MACH_SEND_SYNC_OVERRIDE) +#define DISPATCH_USE_MACH_SEND_SYNC_OVERRIDE 1 +#endif #if defined(F_SETNOSIGPIPE) && defined(F_GETNOSIGPIPE) #ifndef DISPATCH_USE_SETNOSIGPIPE @@ -836,13 +729,17 @@ typedef struct kevent64_s _dispatch_kevent_qos_s; #ifndef DISPATCH_USE_GUARDED_FD #define DISPATCH_USE_GUARDED_FD 1 #endif -// change_fdguard_np() requires GUARD_DUP -#if DISPATCH_USE_GUARDED_FD && RDAR_11814513 -#define DISPATCH_USE_GUARDED_FD_CHANGE_FDGUARD 1 -#endif #endif // HAVE_SYS_GUARDED_H +#if DISPATCH_USE_DTRACE || DISPATCH_USE_DTRACE_INTROSPECTION +typedef struct dispatch_trace_timer_params_s { + int64_t deadline, interval, leeway; +} *dispatch_trace_timer_params_t; + +#include "provider.h" +#endif // DISPATCH_USE_DTRACE || DISPATCH_USE_DTRACE_INTROSPECTION + #if __has_include() #include #ifndef DBG_DISPATCH @@ -850,15 +747,22 @@ typedef struct kevent64_s _dispatch_kevent_qos_s; #endif #ifndef KDBG_CODE #define KDBG_CODE(...) 0 +#define DBG_FUNC_START 0 +#define DBG_FUNC_END 0 #endif #define DISPATCH_CODE(subclass, code) \ KDBG_CODE(DBG_DISPATCH, DISPATCH_TRACE_SUBCLASS_##subclass, code) +#define DISPATCH_CODE_START(subclass, code) \ + (DISPATCH_CODE(subclass, code) | DBG_FUNC_START) +#define DISPATCH_CODE_END(subclass, code) \ + (DISPATCH_CODE(subclass, code) | DBG_FUNC_END) #ifdef ARIADNEDBG_CODE #define ARIADNE_ENTER_DISPATCH_MAIN_CODE ARIADNEDBG_CODE(220, 2) #else #define ARIADNE_ENTER_DISPATCH_MAIN_CODE 0 #endif -#if !defined(DISPATCH_USE_VOUCHER_KDEBUG_TRACE) && DISPATCH_INTROSPECTION +#if !defined(DISPATCH_USE_VOUCHER_KDEBUG_TRACE) && \ + (DISPATCH_INTROSPECTION || DISPATCH_PROFILE || DISPATCH_DEBUG) #define DISPATCH_USE_VOUCHER_KDEBUG_TRACE 1 #endif @@ -866,15 +770,21 @@ typedef struct kevent64_s _dispatch_kevent_qos_s; #define DISPATCH_TRACE_SUBCLASS_VOUCHER 1 #define DISPATCH_TRACE_SUBCLASS_PERF 2 #define DISPATCH_TRACE_SUBCLASS_MACH_MSG 3 +#define DISPATCH_TRACE_SUBCLASS_PERF_MON 4 #define DISPATCH_PERF_non_leaf_retarget DISPATCH_CODE(PERF, 1) #define DISPATCH_PERF_post_activate_retarget DISPATCH_CODE(PERF, 2) #define DISPATCH_PERF_post_activate_mutation DISPATCH_CODE(PERF, 3) #define DISPATCH_PERF_delayed_registration DISPATCH_CODE(PERF, 4) #define DISPATCH_PERF_mutable_target DISPATCH_CODE(PERF, 5) +#define DISPATCH_PERF_strict_bg_timer DISPATCH_CODE(PERF, 6) #define DISPATCH_MACH_MSG_hdr_move DISPATCH_CODE(MACH_MSG, 1) +#define DISPATCH_PERF_MON_worker_thread_start DISPATCH_CODE_START(PERF_MON, 1) +#define DISPATCH_PERF_MON_worker_thread_end DISPATCH_CODE_END(PERF_MON, 1) +#define DISPATCH_PERF_MON_worker_useless DISPATCH_CODE(PERF_MON, 2) + DISPATCH_ALWAYS_INLINE static inline void _dispatch_ktrace_impl(uint32_t code, uint64_t a, uint64_t b, @@ -921,42 +831,18 @@ _dispatch_ktrace_impl(uint32_t code, uint64_t a, uint64_t b, #define MACH_SEND_INVALID_VOUCHER 0x10000005 #endif -#if TARGET_OS_SIMULATOR && IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED < 101100 -#undef VOUCHER_USE_MACH_VOUCHER -#define VOUCHER_USE_MACH_VOUCHER 0 -#endif #ifndef VOUCHER_USE_MACH_VOUCHER #if __has_include() #define VOUCHER_USE_MACH_VOUCHER 1 #endif -#endif +#endif // VOUCHER_USE_MACH_VOUCHER +#ifndef VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER #if RDAR_24272659 // FIXME: -#if !VOUCHER_USE_MACH_VOUCHER || !DISPATCH_HOST_SUPPORTS_OSX(101200) -#undef VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER -#define VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER 0 -#elif !defined(VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER) #define VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER 1 -#endif #else // RDAR_24272659 -#undef VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER #define VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER 0 #endif // RDAR_24272659 - -#if !VOUCHER_USE_MACH_VOUCHER || !DISPATCH_HOST_SUPPORTS_OSX(101200) -#undef VOUCHER_USE_BANK_AUTOREDEEM -#define VOUCHER_USE_BANK_AUTOREDEEM 0 -#elif !defined(VOUCHER_USE_BANK_AUTOREDEEM) -#define VOUCHER_USE_BANK_AUTOREDEEM 1 -#endif - -#if !VOUCHER_USE_MACH_VOUCHER || \ - !__has_include() || \ - !DISPATCH_HOST_SUPPORTS_OSX(101200) -#undef VOUCHER_USE_MACH_VOUCHER_PRIORITY -#define VOUCHER_USE_MACH_VOUCHER_PRIORITY 0 -#elif !defined(VOUCHER_USE_MACH_VOUCHER_PRIORITY) -#define VOUCHER_USE_MACH_VOUCHER_PRIORITY 1 #endif #ifndef VOUCHER_USE_PERSONA @@ -980,7 +866,7 @@ _dispatch_ktrace_impl(uint32_t code, uint64_t a, uint64_t b, #define _dispatch_hardware_crash() \ __asm__(""); __builtin_trap() // -#define _dispatch_set_crash_log_cause_and_message(ac, msg) +#define _dispatch_set_crash_log_cause_and_message(ac, msg) ((void)(ac)) #define _dispatch_set_crash_log_message(msg) #define _dispatch_set_crash_log_message_dynamic(msg) @@ -1035,7 +921,7 @@ _dispatch_ktrace_impl(uint32_t code, uint64_t a, uint64_t b, #define DISPATCH_NO_VOUCHER ((voucher_t)(void*)~0ul) #define DISPATCH_NO_PRIORITY ((pthread_priority_t)~0ul) -DISPATCH_ENUM(_dispatch_thread_set_self, unsigned long, +DISPATCH_ENUM(dispatch_thread_set_self, unsigned long, DISPATCH_PRIORITY_ENFORCE = 0x1, DISPATCH_VOUCHER_REPLACE = 0x2, DISPATCH_VOUCHER_CONSUME = 0x4, @@ -1044,7 +930,7 @@ DISPATCH_ENUM(_dispatch_thread_set_self, unsigned long, DISPATCH_WARN_RESULT static inline voucher_t _dispatch_adopt_priority_and_set_voucher( pthread_priority_t priority, voucher_t voucher, - _dispatch_thread_set_self_t flags); + dispatch_thread_set_self_t flags); #if HAVE_MACH mach_port_t _dispatch_get_mach_host_port(void); #endif @@ -1057,8 +943,7 @@ extern int _dispatch_set_qos_class_enabled; #endif #endif // HAVE_PTHREAD_WORKQUEUE_QOS #if DISPATCH_USE_KEVENT_WORKQUEUE -#if !HAVE_PTHREAD_WORKQUEUE_QOS || !DISPATCH_USE_KEVENT_QOS || \ - !DISPATCH_USE_EV_UDATA_SPECIFIC +#if !HAVE_PTHREAD_WORKQUEUE_QOS || !EV_UDATA_SPECIFIC #error Invalid build configuration #endif #if DISPATCH_USE_MGR_THREAD @@ -1066,20 +951,9 @@ extern int _dispatch_kevent_workqueue_enabled; #else #define _dispatch_kevent_workqueue_enabled (1) #endif -#endif // DISPATCH_USE_KEVENT_WORKQUEUE - -#if DISPATCH_USE_EVFILT_MACHPORT_DIRECT -#if !DISPATCH_USE_KEVENT_WORKQUEUE || !DISPATCH_USE_EV_UDATA_SPECIFIC -#error Invalid build configuration -#endif -#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK -extern int _dispatch_evfilt_machport_direct_enabled; -#else -#define _dispatch_evfilt_machport_direct_enabled (1) -#endif #else -#define _dispatch_evfilt_machport_direct_enabled (0) -#endif // DISPATCH_USE_EVFILT_MACHPORT_DIRECT +#define _dispatch_kevent_workqueue_enabled (0) +#endif // DISPATCH_USE_KEVENT_WORKQUEUE /* #includes dependent on internal.h */ @@ -1088,6 +962,7 @@ extern int _dispatch_evfilt_machport_direct_enabled; #include "introspection_internal.h" #include "queue_internal.h" #include "source_internal.h" +#include "mach_internal.h" #include "voucher_internal.h" #include "data_internal.h" #if !TARGET_OS_WIN32 diff --git a/src/introspection.c b/src/introspection.c index d847cb91a..8692a8bc5 100644 --- a/src/introspection.c +++ b/src/introspection.c @@ -193,7 +193,7 @@ _dispatch_introspection_continuation_get_info(dispatch_queue_t dq, case DC_OVERRIDE_STEALING_TYPE: case DC_OVERRIDE_OWNING_TYPE: dc = dc->dc_data; - if (_dispatch_object_has_vtable(dc)) { + if (!_dispatch_object_is_continuation(dc)) { // these really wrap queues so we should hide the continuation type dq = (dispatch_queue_t)dc; diqi->type = dispatch_introspection_queue_item_type_queue; @@ -204,6 +204,8 @@ _dispatch_introspection_continuation_get_info(dispatch_queue_t dq, #endif case DC_ASYNC_REDIRECT_TYPE: DISPATCH_INTERNAL_CRASH(0, "Handled by the caller"); + case DC_MACH_ASYNC_REPLY_TYPE: + break; case DC_MACH_SEND_BARRRIER_DRAIN_TYPE: break; case DC_MACH_SEND_BARRIER_TYPE: @@ -211,23 +213,17 @@ _dispatch_introspection_continuation_get_info(dispatch_queue_t dq, flags = (uintptr_t)dc->dc_data; dq = dq->do_targetq; break; + default: + DISPATCH_INTERNAL_CRASH(dc->do_vtable, "Unknown dc vtable type"); } } else { - if (flags & DISPATCH_OBJ_SYNC_SLOW_BIT) { - waiter = pthread_from_mach_thread_np((mach_port_t)dc->dc_data); - if (flags & DISPATCH_OBJ_BARRIER_BIT) { - dc = dc->dc_ctxt; - dq = dc->dc_data; - } - ctxt = dc->dc_ctxt; - func = dc->dc_func; + if (flags & DISPATCH_OBJ_SYNC_WAITER_BIT) { + dispatch_sync_context_t dsc = (dispatch_sync_context_t)dc; + waiter = pthread_from_mach_thread_np(dsc->dsc_waiter); + ctxt = dsc->dsc_ctxt; + func = dsc->dsc_func; } - if (func == _dispatch_sync_recurse_invoke) { - dc = dc->dc_ctxt; - dq = dc->dc_data; - ctxt = dc->dc_ctxt; - func = dc->dc_func; - } else if (func == _dispatch_apply_invoke || + if (func == _dispatch_apply_invoke || func == _dispatch_apply_redirect_invoke) { dispatch_apply_t da = ctxt; if (da->da_todo) { @@ -252,7 +248,7 @@ _dispatch_introspection_continuation_get_info(dispatch_queue_t dq, .function = func, .waiter = waiter, .barrier = (flags & DISPATCH_OBJ_BARRIER_BIT) || dq->dq_width == 1, - .sync = flags & DISPATCH_OBJ_SYNC_SLOW_BIT, + .sync = flags & DISPATCH_OBJ_SYNC_WAITER_BIT, .apply = apply, }; if (flags & DISPATCH_OBJ_GROUP_BIT) { @@ -300,16 +296,11 @@ _dispatch_introspection_source_get_info(dispatch_source_t ds) .suspend_count = _dq_state_suspend_cnt(dq_state) + ds->dq_side_suspend_cnt, .enqueued = _dq_state_is_enqueued(dq_state), .handler_is_block = hdlr_is_block, - .timer = ds->ds_is_timer, - .after = ds->ds_is_timer && (bool)(ds_timer(ds).flags & DISPATCH_TIMER_AFTER), + .timer = dr->du_is_timer, + .after = dr->du_is_timer && (dr->du_fflags & DISPATCH_TIMER_AFTER), + .type = (unsigned long)dr->du_filter, + .handle = (unsigned long)dr->du_ident, }; - dispatch_kevent_t dk = ds->ds_dkev; - if (ds->ds_is_custom_source) { - dis.type = (unsigned long)dk; - } else if (dk) { - dis.type = (unsigned long)dk->dk_kevent.filter; - dis.handle = (unsigned long)dk->dk_kevent.ident; - } return dis; } @@ -739,7 +730,7 @@ struct dispatch_order_frame_s { dispatch_queue_order_entry_t dof_e; }; -DISPATCH_NOINLINE +DISPATCH_NOINLINE DISPATCH_NORETURN static void _dispatch_introspection_lock_inversion_fail(dispatch_order_frame_t dof, dispatch_queue_t top_q, dispatch_queue_t bottom_q) diff --git a/src/introspection_internal.h b/src/introspection_internal.h index 06504a8ba..e2fa6d18b 100644 --- a/src/introspection_internal.h +++ b/src/introspection_internal.h @@ -66,7 +66,6 @@ void _dispatch_introspection_callout_return(void *ctxt, dispatch_function_t f); #if DISPATCH_PURE_C -void _dispatch_sync_recurse_invoke(void *ctxt); static dispatch_queue_t _dispatch_queue_get_current(void); DISPATCH_ALWAYS_INLINE @@ -100,24 +99,10 @@ _dispatch_introspection_target_queue_changed(dispatch_queue_t dq); DISPATCH_ALWAYS_INLINE static inline void -_dispatch_introspection_barrier_sync_begin(dispatch_queue_t dq, - dispatch_function_t func) +_dispatch_introspection_sync_begin(dispatch_queue_t dq) { if (!_dispatch_introspection.debug_queue_inversions) return; - if (func != _dispatch_sync_recurse_invoke) { - _dispatch_introspection_order_record(dq, _dispatch_queue_get_current()); - } -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_introspection_non_barrier_sync_begin(dispatch_queue_t dq, - dispatch_function_t func) -{ - if (!_dispatch_introspection.debug_queue_inversions) return; - if (func != _dispatch_sync_recurse_invoke) { - _dispatch_introspection_order_record(dq, _dispatch_queue_get_current()); - } + _dispatch_introspection_order_record(dq, _dispatch_queue_get_current()); } #endif // DISPATCH_PURE_C @@ -129,7 +114,6 @@ _dispatch_introspection_non_barrier_sync_begin(dispatch_queue_t dq, #define _dispatch_introspection_init() #define _dispatch_introspection_thread_add() -#define _dispatch_introspection_thread_remove() DISPATCH_ALWAYS_INLINE static inline dispatch_queue_t @@ -177,13 +161,7 @@ _dispatch_introspection_target_queue_changed( DISPATCH_ALWAYS_INLINE static inline void -_dispatch_introspection_barrier_sync_begin(dispatch_queue_t dq DISPATCH_UNUSED, - dispatch_function_t func DISPATCH_UNUSED) {} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_introspection_non_barrier_sync_begin(dispatch_queue_t dq DISPATCH_UNUSED, - dispatch_function_t func DISPATCH_UNUSED) {} +_dispatch_introspection_sync_begin(dispatch_queue_t dq DISPATCH_UNUSED) {} #endif // DISPATCH_INTROSPECTION diff --git a/src/io.c b/src/io.c index e4f05aec9..155b6cf02 100644 --- a/src/io.c +++ b/src/io.c @@ -24,6 +24,10 @@ #define DISPATCH_IO_DEBUG DISPATCH_DEBUG #endif +#ifndef PAGE_SIZE +#define PAGE_SIZE ((size_t)getpagesize()) +#endif + #if DISPATCH_DATA_IS_BRIDGED_TO_NSDATA #define _dispatch_io_data_retain(x) _dispatch_objc_retain(x) #define _dispatch_io_data_release(x) _dispatch_objc_release(x) @@ -229,11 +233,10 @@ _dispatch_iocntl(uint32_t param, uint64_t value) static dispatch_io_t _dispatch_io_create(dispatch_io_type_t type) { - dispatch_io_t channel = _dispatch_alloc(DISPATCH_VTABLE(io), + dispatch_io_t channel = _dispatch_object_alloc(DISPATCH_VTABLE(io), sizeof(struct dispatch_io_s)); channel->do_next = DISPATCH_OBJECT_LISTLESS; - channel->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, - true); + channel->do_targetq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, true); channel->params.type = type; channel->params.high = SIZE_MAX; channel->params.low = dispatch_io_defaults.low_water_chunks * @@ -275,7 +278,7 @@ _dispatch_io_init(dispatch_io_t channel, dispatch_fd_entry_t fd_entry, } void -_dispatch_io_dispose(dispatch_io_t channel) +_dispatch_io_dispose(dispatch_io_t channel, DISPATCH_UNUSED bool *allow_free) { _dispatch_object_debug(channel, "%s", __func__); if (channel->fd_entry && @@ -679,6 +682,9 @@ _dispatch_io_stop(dispatch_io_t channel) _dispatch_channel_debug("stop cleanup", channel); _dispatch_fd_entry_cleanup_operations(fd_entry, channel); if (!(channel->atomic_flags & DIO_CLOSED)) { + if (fd_entry->path_data) { + fd_entry->path_data->channel = NULL; + } channel->fd_entry = NULL; _dispatch_fd_entry_release(fd_entry); } @@ -729,9 +735,10 @@ dispatch_io_close(dispatch_io_t channel, unsigned long flags) relaxed); dispatch_fd_entry_t fd_entry = channel->fd_entry; if (fd_entry) { - if (!fd_entry->path_data) { - channel->fd_entry = NULL; + if (fd_entry->path_data) { + fd_entry->path_data->channel = NULL; } + channel->fd_entry = NULL; _dispatch_fd_entry_release(fd_entry); } } @@ -885,7 +892,7 @@ dispatch_read(dispatch_fd_t fd, size_t length, dispatch_queue_t queue, dispatch_operation_t op = _dispatch_operation_create(DOP_DIR_READ, channel, 0, length, dispatch_data_empty, - _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT,false), + _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false), ^(bool done, dispatch_data_t data, int error) { if (data) { data = dispatch_data_create_concat(deliver_data, data); @@ -956,7 +963,7 @@ dispatch_write(dispatch_fd_t fd, dispatch_data_t data, dispatch_queue_t queue, dispatch_operation_t op = _dispatch_operation_create(DOP_DIR_WRITE, channel, 0, dispatch_data_get_size(data), data, - _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT,false), + _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false), ^(bool done, dispatch_data_t d, int error) { if (done) { if (d) { @@ -1016,14 +1023,13 @@ _dispatch_operation_create(dispatch_op_direction_t direction, }); return NULL; } - dispatch_operation_t op = _dispatch_alloc(DISPATCH_VTABLE(operation), + dispatch_operation_t op = _dispatch_object_alloc(DISPATCH_VTABLE(operation), sizeof(struct dispatch_operation_s)); _dispatch_channel_debug("operation create: %p", channel, op); op->do_next = DISPATCH_OBJECT_LISTLESS; op->do_xref_cnt = -1; // operation object is not exposed externally - op->op_q = dispatch_queue_create("com.apple.libdispatch-io.opq", NULL); - op->op_q->do_targetq = queue; - _dispatch_retain(queue); + op->op_q = dispatch_queue_create_with_target("com.apple.libdispatch-io.opq", + NULL, queue); op->active = false; op->direction = direction; op->offset = offset + channel->f_ptr; @@ -1044,7 +1050,8 @@ _dispatch_operation_create(dispatch_op_direction_t direction, } void -_dispatch_operation_dispose(dispatch_operation_t op) +_dispatch_operation_dispose(dispatch_operation_t op, + DISPATCH_UNUSED bool *allow_free) { _dispatch_object_debug(op, "%s", __func__); _dispatch_op_debug("dispose", op); @@ -1151,8 +1158,9 @@ _dispatch_operation_timer(dispatch_queue_t tq, dispatch_operation_t op) } dispatch_source_t timer = dispatch_source_create( DISPATCH_SOURCE_TYPE_TIMER, 0, 0, tq); - dispatch_source_set_timer(timer, dispatch_time(DISPATCH_TIME_NOW, - (int64_t)op->params.interval), op->params.interval, 0); + dispatch_source_set_timer(timer, + dispatch_time(DISPATCH_TIME_NOW, (int64_t)op->params.interval), + op->params.interval, 0); dispatch_source_set_event_handler(timer, ^{ // On stream queue or pick queue if (dispatch_source_testcancel(timer)) { @@ -1232,9 +1240,10 @@ _dispatch_fd_entry_guarded_open(dispatch_fd_entry_t fd_entry, const char *path, return fd; } errno = 0; +#else + (void)fd_entry; #endif return open(path, oflag, mode); - (void)fd_entry; } static inline int @@ -1244,11 +1253,12 @@ _dispatch_fd_entry_guarded_close(dispatch_fd_entry_t fd_entry, int fd) { guardid_t guard = (uintptr_t)fd_entry; return guarded_close_np(fd, &guard); } else +#else + (void)fd_entry; #endif { return close(fd); } - (void)fd_entry; } static inline void @@ -1299,12 +1309,10 @@ _dispatch_fd_entry_create(dispatch_queue_t q) { dispatch_fd_entry_t fd_entry; fd_entry = _dispatch_calloc(1ul, sizeof(struct dispatch_fd_entry_s)); - fd_entry->close_queue = dispatch_queue_create( - "com.apple.libdispatch-io.closeq", NULL); // Use target queue to ensure that no concurrent lookups are going on when // the close queue is running - fd_entry->close_queue->do_targetq = q; - _dispatch_retain(q); + fd_entry->close_queue = dispatch_queue_create_with_target( + "com.apple.libdispatch-io.closeq", NULL, q); // Suspend the cleanup queue until closing _dispatch_fd_entry_retain(fd_entry); return fd_entry; @@ -1364,7 +1372,7 @@ _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash) break; ); } - int32_t dev = major(st.st_dev); + dev_t dev = major(st.st_dev); // We have to get the disk on the global dev queue. The // barrier queue cannot continue until that is complete dispatch_suspend(fd_entry->barrier_queue); @@ -1384,8 +1392,9 @@ _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash) break; ); } - _dispatch_stream_init(fd_entry, _dispatch_get_root_queue( - _DISPATCH_QOS_CLASS_DEFAULT, false)); + + _dispatch_stream_init(fd_entry, + _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false)); } fd_entry->orig_flags = orig_flags; fd_entry->orig_nosigpipe = orig_nosigpipe; @@ -1452,8 +1461,8 @@ _dispatch_fd_entry_create_with_path(dispatch_io_path_data_t path_data, if (S_ISREG(mode)) { _dispatch_disk_init(fd_entry, major(dev)); } else { - _dispatch_stream_init(fd_entry, _dispatch_get_root_queue( - _DISPATCH_QOS_CLASS_DEFAULT, false)); + _dispatch_stream_init(fd_entry, + _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false)); } fd_entry->fd = -1; fd_entry->orig_flags = -1; @@ -1577,11 +1586,9 @@ _dispatch_stream_init(dispatch_fd_entry_t fd_entry, dispatch_queue_t tq) for (direction = 0; direction < DOP_DIR_MAX; direction++) { dispatch_stream_t stream; stream = _dispatch_calloc(1ul, sizeof(struct dispatch_stream_s)); - stream->dq = dispatch_queue_create("com.apple.libdispatch-io.streamq", - NULL); + stream->dq = dispatch_queue_create_with_target( + "com.apple.libdispatch-io.streamq", NULL, tq); dispatch_set_context(stream->dq, stream); - _dispatch_retain(tq); - stream->dq->do_targetq = tq; TAILQ_INIT(&stream->operations[DISPATCH_IO_RANDOM]); TAILQ_INIT(&stream->operations[DISPATCH_IO_STREAM]); fd_entry->streams[direction] = stream; @@ -1626,14 +1633,13 @@ _dispatch_disk_init(dispatch_fd_entry_t fd_entry, dev_t dev) } // Otherwise create a new entry size_t pending_reqs_depth = dispatch_io_defaults.max_pending_io_reqs; - disk = _dispatch_alloc(DISPATCH_VTABLE(disk), + disk = _dispatch_object_alloc(DISPATCH_VTABLE(disk), sizeof(struct dispatch_disk_s) + (pending_reqs_depth * sizeof(dispatch_operation_t))); disk->do_next = DISPATCH_OBJECT_LISTLESS; disk->do_xref_cnt = -1; disk->advise_list_depth = pending_reqs_depth; - disk->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, - false); + disk->do_targetq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false); disk->dev = dev; TAILQ_INIT(&disk->operations); disk->cur_rq = TAILQ_FIRST(&disk->operations); @@ -1648,7 +1654,7 @@ _dispatch_disk_init(dispatch_fd_entry_t fd_entry, dev_t dev) } void -_dispatch_disk_dispose(dispatch_disk_t disk) +_dispatch_disk_dispose(dispatch_disk_t disk, DISPATCH_UNUSED bool *allow_free) { uintptr_t hash = DIO_HASH(disk->dev); TAILQ_REMOVE(&_dispatch_io_devs[hash], disk, disk_list); @@ -1893,7 +1899,7 @@ _dispatch_stream_source(dispatch_stream_t stream, dispatch_operation_t op) // Close queue must not run user cleanup handlers until sources are fully // unregistered dispatch_queue_t close_queue = op->fd_entry->close_queue; - dispatch_source_set_cancel_handler(source, ^{ + dispatch_source_set_mandatory_cancel_handler(source, ^{ _dispatch_op_debug("stream source cancel", op); dispatch_resume(close_queue); }); @@ -2161,7 +2167,7 @@ _dispatch_operation_advise(dispatch_operation_t op, size_t chunk_size) op->advise_offset += advise.ra_count; #ifdef __linux__ _dispatch_io_syscall_switch(err, - readahead(op->fd_entry->fd, advise.ra_offset, advise.ra_count), + readahead(op->fd_entry->fd, advise.ra_offset, (size_t)advise.ra_count), case EINVAL: break; // fd does refer to a non-supported filetype default: (void)dispatch_assume_zero(err); break; ); @@ -2284,10 +2290,10 @@ _dispatch_operation_perform(dispatch_operation_t op) return DISPATCH_OP_DELIVER; } error: - if (err == EAGAIN) { + if (err == EAGAIN || err == EWOULDBLOCK) { // For disk based files with blocking I/O we should never get EAGAIN dispatch_assert(!op->fd_entry->disk); - _dispatch_op_debug("performed: EAGAIN", op); + _dispatch_op_debug("performed: EAGAIN/EWOULDBLOCK", op); if (op->direction == DOP_DIR_READ && op->total && op->channel == op->fd_entry->convenience_channel) { // Convenience read with available data completes on EAGAIN diff --git a/src/io_internal.h b/src/io_internal.h index ad8259a1d..672727fae 100644 --- a/src/io_internal.h +++ b/src/io_internal.h @@ -178,10 +178,11 @@ struct dispatch_io_s { void _dispatch_io_set_target_queue(dispatch_io_t channel, dispatch_queue_t dq); size_t _dispatch_io_debug(dispatch_io_t channel, char* buf, size_t bufsiz); -void _dispatch_io_dispose(dispatch_io_t channel); +void _dispatch_io_dispose(dispatch_io_t channel, bool *allow_free); size_t _dispatch_operation_debug(dispatch_operation_t op, char* buf, size_t bufsiz); -void _dispatch_operation_dispose(dispatch_operation_t operation); -void _dispatch_disk_dispose(dispatch_disk_t disk); +void _dispatch_operation_dispose(dispatch_operation_t operation, + bool *allow_free); +void _dispatch_disk_dispose(dispatch_disk_t disk, bool *allow_free); #endif // __DISPATCH_IO_INTERNAL__ diff --git a/src/libdispatch.codes b/src/libdispatch.codes index 9aca7e16c..0ecc3331f 100644 --- a/src/libdispatch.codes +++ b/src/libdispatch.codes @@ -11,3 +11,9 @@ 0x2e02000c DISPATCH_PERF_post_activate_mutation 0x2e020010 DISPATCH_PERF_delayed_registration 0x2e020014 DISPATCH_PERF_mutable_target +0x2e020018 DISPATCH_PERF_strict_bg_timer + +0x2e030004 DISPATCH_MACH_MSG_hdr_move + +0x2e040004 DISPATCH_PERF_MON_worker_thread +0x2e040008 DISPATCH_PERF_MON_worker_useless diff --git a/src/mach.c b/src/mach.c new file mode 100644 index 000000000..699492da0 --- /dev/null +++ b/src/mach.c @@ -0,0 +1,2982 @@ +/* + * Copyright (c) 2008-2016 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include "internal.h" +#if HAVE_MACH + +#define DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT 0x1 +#define DISPATCH_MACH_REGISTER_FOR_REPLY 0x2 +#define DISPATCH_MACH_WAIT_FOR_REPLY 0x4 +#define DISPATCH_MACH_OWNED_REPLY_PORT 0x8 +#define DISPATCH_MACH_ASYNC_REPLY 0x10 +#define DISPATCH_MACH_OPTIONS_MASK 0xffff + +#define DM_SEND_STATUS_SUCCESS 0x1 +#define DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT 0x2 + +DISPATCH_ENUM(dispatch_mach_send_invoke_flags, uint32_t, + DM_SEND_INVOKE_NONE = 0x0, + DM_SEND_INVOKE_MAKE_DIRTY = 0x1, + DM_SEND_INVOKE_NEEDS_BARRIER = 0x2, + DM_SEND_INVOKE_CANCEL = 0x4, + DM_SEND_INVOKE_CAN_RUN_BARRIER = 0x8, + DM_SEND_INVOKE_IMMEDIATE_SEND = 0x10, +); +#define DM_SEND_INVOKE_IMMEDIATE_SEND_MASK \ + ((dispatch_mach_send_invoke_flags_t)DM_SEND_INVOKE_IMMEDIATE_SEND) + +static inline mach_msg_option_t _dispatch_mach_checkin_options(void); +static mach_port_t _dispatch_mach_msg_get_remote_port(dispatch_object_t dou); +static mach_port_t _dispatch_mach_msg_get_reply_port(dispatch_object_t dou); +static void _dispatch_mach_msg_disconnected(dispatch_mach_t dm, + mach_port_t local_port, mach_port_t remote_port); +static inline void _dispatch_mach_msg_reply_received(dispatch_mach_t dm, + dispatch_mach_reply_refs_t dmr, mach_port_t local_port); +static dispatch_mach_msg_t _dispatch_mach_msg_create_reply_disconnected( + dispatch_object_t dou, dispatch_mach_reply_refs_t dmr, + dispatch_mach_reason_t reason); +static bool _dispatch_mach_reconnect_invoke(dispatch_mach_t dm, + dispatch_object_t dou); +static inline mach_msg_header_t* _dispatch_mach_msg_get_msg( + dispatch_mach_msg_t dmsg); +static void _dispatch_mach_send_push(dispatch_mach_t dm, dispatch_object_t dou, + dispatch_qos_t qos); +static void _dispatch_mach_cancel(dispatch_mach_t dm); +static void _dispatch_mach_push_send_barrier_drain(dispatch_mach_t dm, + dispatch_qos_t qos); +static void _dispatch_mach_handle_or_push_received_msg(dispatch_mach_t dm, + dispatch_mach_msg_t dmsg); +static void _dispatch_mach_push_async_reply_msg(dispatch_mach_t dm, + dispatch_mach_msg_t dmsg, dispatch_queue_t drq); +static dispatch_queue_t _dispatch_mach_msg_context_async_reply_queue( + void *ctxt); +static dispatch_continuation_t _dispatch_mach_msg_async_reply_wrap( + dispatch_mach_msg_t dmsg, dispatch_mach_t dm); +static void _dispatch_mach_notification_kevent_unregister(dispatch_mach_t dm); +static void _dispatch_mach_notification_kevent_register(dispatch_mach_t dm, + mach_port_t send); + +// For tests only. +DISPATCH_EXPORT void _dispatch_mach_hooks_install_default(void); + +dispatch_source_t +_dispatch_source_create_mach_msg_direct_recv(mach_port_t recvp, + const struct dispatch_continuation_s *dc) +{ + dispatch_source_t ds; + ds = dispatch_source_create(&_dispatch_source_type_mach_recv_direct, + recvp, 0, &_dispatch_mgr_q); + os_atomic_store(&ds->ds_refs->ds_handler[DS_EVENT_HANDLER], + (dispatch_continuation_t)dc, relaxed); + return ds; +} + +#pragma mark - +#pragma mark dispatch to XPC callbacks + +static dispatch_mach_xpc_hooks_t _dispatch_mach_xpc_hooks; + +// Default dmxh_direct_message_handler callback that does not handle +// messages inline. +static bool +_dispatch_mach_xpc_no_handle_message( + void *_Nullable context DISPATCH_UNUSED, + dispatch_mach_reason_t reason DISPATCH_UNUSED, + dispatch_mach_msg_t message DISPATCH_UNUSED, + mach_error_t error DISPATCH_UNUSED) +{ + return false; +} + +// Default dmxh_msg_context_reply_queue callback that returns a NULL queue. +static dispatch_queue_t +_dispatch_mach_msg_context_no_async_reply_queue( + void *_Nonnull msg_context DISPATCH_UNUSED) +{ + return NULL; +} + +// Default dmxh_async_reply_handler callback that crashes when called. +DISPATCH_NORETURN +static void +_dispatch_mach_default_async_reply_handler(void *context DISPATCH_UNUSED, + dispatch_mach_reason_t reason DISPATCH_UNUSED, + dispatch_mach_msg_t message DISPATCH_UNUSED) +{ + DISPATCH_CLIENT_CRASH(_dispatch_mach_xpc_hooks, + "_dispatch_mach_default_async_reply_handler called"); +} + +// Default dmxh_enable_sigterm_notification callback that enables delivery of +// SIGTERM notifications (for backwards compatibility). +static bool +_dispatch_mach_enable_sigterm(void *_Nullable context DISPATCH_UNUSED) +{ + return true; +} + +// Callbacks from dispatch to XPC. The default is to not support any callbacks. +static const struct dispatch_mach_xpc_hooks_s _dispatch_mach_xpc_hooks_default + = { + .version = DISPATCH_MACH_XPC_HOOKS_VERSION, + .dmxh_direct_message_handler = &_dispatch_mach_xpc_no_handle_message, + .dmxh_msg_context_reply_queue = + &_dispatch_mach_msg_context_no_async_reply_queue, + .dmxh_async_reply_handler = &_dispatch_mach_default_async_reply_handler, + .dmxh_enable_sigterm_notification = &_dispatch_mach_enable_sigterm, +}; + +static dispatch_mach_xpc_hooks_t _dispatch_mach_xpc_hooks + = &_dispatch_mach_xpc_hooks_default; + +void +dispatch_mach_hooks_install_4libxpc(dispatch_mach_xpc_hooks_t hooks) +{ + if (!os_atomic_cmpxchg(&_dispatch_mach_xpc_hooks, + &_dispatch_mach_xpc_hooks_default, hooks, relaxed)) { + DISPATCH_CLIENT_CRASH(_dispatch_mach_xpc_hooks, + "dispatch_mach_hooks_install_4libxpc called twice"); + } +} + +void +_dispatch_mach_hooks_install_default(void) +{ + os_atomic_store(&_dispatch_mach_xpc_hooks, + &_dispatch_mach_xpc_hooks_default, relaxed); +} + +#pragma mark - +#pragma mark dispatch_mach_t + +static dispatch_mach_t +_dispatch_mach_create(const char *label, dispatch_queue_t q, void *context, + dispatch_mach_handler_function_t handler, bool handler_is_block, + bool is_xpc) +{ + dispatch_mach_recv_refs_t dmrr; + dispatch_mach_send_refs_t dmsr; + dispatch_mach_t dm; + dm = _dispatch_object_alloc(DISPATCH_VTABLE(mach), + sizeof(struct dispatch_mach_s)); + _dispatch_queue_init(dm->_as_dq, DQF_LEGACY, 1, + DISPATCH_QUEUE_INACTIVE | DISPATCH_QUEUE_ROLE_INNER); + + dm->dq_label = label; + dm->do_ref_cnt++; // the reference _dispatch_mach_cancel_invoke holds + dm->dm_is_xpc = is_xpc; + + dmrr = dux_create(&_dispatch_mach_type_recv, 0, 0)._dmrr; + dispatch_assert(dmrr->du_is_direct); + dmrr->du_owner_wref = _dispatch_ptr2wref(dm); + dmrr->dmrr_handler_func = handler; + dmrr->dmrr_handler_ctxt = context; + dmrr->dmrr_handler_is_block = handler_is_block; + dm->dm_recv_refs = dmrr; + + dmsr = dux_create(&_dispatch_mach_type_send, 0, + DISPATCH_MACH_SEND_POSSIBLE|DISPATCH_MACH_SEND_DEAD)._dmsr; + dmsr->du_owner_wref = _dispatch_ptr2wref(dm); + dm->dm_send_refs = dmsr; + + if (slowpath(!q)) { + q = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, true); + } else { + _dispatch_retain(q); + } + dm->do_targetq = q; + _dispatch_object_debug(dm, "%s", __func__); + return dm; +} + +dispatch_mach_t +dispatch_mach_create(const char *label, dispatch_queue_t q, + dispatch_mach_handler_t handler) +{ + dispatch_block_t bb = _dispatch_Block_copy((void*)handler); + return _dispatch_mach_create(label, q, bb, + (dispatch_mach_handler_function_t)_dispatch_Block_invoke(bb), true, + false); +} + +dispatch_mach_t +dispatch_mach_create_f(const char *label, dispatch_queue_t q, void *context, + dispatch_mach_handler_function_t handler) +{ + return _dispatch_mach_create(label, q, context, handler, false, false); +} + +dispatch_mach_t +dispatch_mach_create_4libxpc(const char *label, dispatch_queue_t q, + void *context, dispatch_mach_handler_function_t handler) +{ + return _dispatch_mach_create(label, q, context, handler, false, true); +} + +void +_dispatch_mach_dispose(dispatch_mach_t dm, bool *allow_free) +{ + _dispatch_object_debug(dm, "%s", __func__); + _dispatch_unote_dispose(dm->dm_recv_refs); + dm->dm_recv_refs = NULL; + _dispatch_unote_dispose(dm->dm_send_refs); + dm->dm_send_refs = NULL; + if (dm->dm_xpc_term_refs) { + _dispatch_unote_dispose(dm->dm_xpc_term_refs); + dm->dm_xpc_term_refs = NULL; + } + _dispatch_queue_destroy(dm->_as_dq, allow_free); +} + +void +dispatch_mach_connect(dispatch_mach_t dm, mach_port_t receive, + mach_port_t send, dispatch_mach_msg_t checkin) +{ + dispatch_mach_send_refs_t dmsr = dm->dm_send_refs; + uint32_t disconnect_cnt; + + if (MACH_PORT_VALID(receive)) { + dm->dm_recv_refs->du_ident = receive; + _dispatch_retain(dm); // the reference the manager queue holds + } + dmsr->dmsr_send = send; + if (MACH_PORT_VALID(send)) { + if (checkin) { + dispatch_mach_msg_t dmsg = checkin; + dispatch_retain(dmsg); + dmsg->dmsg_options = _dispatch_mach_checkin_options(); + dmsr->dmsr_checkin_port = _dispatch_mach_msg_get_remote_port(dmsg); + } + dmsr->dmsr_checkin = checkin; + } + dispatch_assert(DISPATCH_MACH_NEVER_CONNECTED - 1 == + DISPATCH_MACH_NEVER_INSTALLED); + disconnect_cnt = os_atomic_dec2o(dmsr, dmsr_disconnect_cnt, release); + if (unlikely(disconnect_cnt != DISPATCH_MACH_NEVER_INSTALLED)) { + DISPATCH_CLIENT_CRASH(disconnect_cnt, "Channel already connected"); + } + _dispatch_object_debug(dm, "%s", __func__); + return dispatch_activate(dm); +} + +static inline bool +_dispatch_mach_reply_tryremove(dispatch_mach_t dm, + dispatch_mach_reply_refs_t dmr) +{ + bool removed; + _dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock); + if ((removed = _TAILQ_IS_ENQUEUED(dmr, dmr_list))) { + TAILQ_REMOVE(&dm->dm_send_refs->dmsr_replies, dmr, dmr_list); + _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list); + } + _dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock); + return removed; +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_reply_waiter_unregister(dispatch_mach_t dm, + dispatch_mach_reply_refs_t dmr, uint32_t options) +{ + dispatch_mach_msg_t dmsgr = NULL; + bool disconnected = (options & DU_UNREGISTER_DISCONNECTED); + if (options & DU_UNREGISTER_REPLY_REMOVE) { + _dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock); + if (unlikely(!_TAILQ_IS_ENQUEUED(dmr, dmr_list))) { + DISPATCH_INTERNAL_CRASH(0, "Could not find reply registration"); + } + TAILQ_REMOVE(&dm->dm_send_refs->dmsr_replies, dmr, dmr_list); + _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list); + _dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock); + } + if (disconnected) { + dmsgr = _dispatch_mach_msg_create_reply_disconnected(NULL, dmr, + DISPATCH_MACH_DISCONNECTED); + } else if (dmr->dmr_voucher) { + _voucher_release(dmr->dmr_voucher); + dmr->dmr_voucher = NULL; + } + _dispatch_debug("machport[0x%08x]: unregistering for sync reply%s, ctxt %p", + _dispatch_mach_reply_get_reply_port((mach_port_t)dmr->du_ident), + disconnected ? " (disconnected)" : "", dmr->dmr_ctxt); + if (dmsgr) { + return _dispatch_mach_handle_or_push_received_msg(dm, dmsgr); + } +} + +DISPATCH_NOINLINE +static bool +_dispatch_mach_reply_list_remove(dispatch_mach_t dm, + dispatch_mach_reply_refs_t dmr) { + // dmsr_replies_lock must be held by the caller. + bool removed = false; + if (likely(_TAILQ_IS_ENQUEUED(dmr, dmr_list))) { + TAILQ_REMOVE(&dm->dm_send_refs->dmsr_replies, dmr, dmr_list); + _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list); + removed = true; + } + return removed; +} + +DISPATCH_NOINLINE +static bool +_dispatch_mach_reply_kevent_unregister(dispatch_mach_t dm, + dispatch_mach_reply_refs_t dmr, uint32_t options) +{ + dispatch_assert(!_TAILQ_IS_ENQUEUED(dmr, dmr_list)); + + bool disconnected = (options & DU_UNREGISTER_DISCONNECTED); + _dispatch_debug("machport[0x%08x]: unregistering for reply%s, ctxt %p", + (mach_port_t)dmr->du_ident, disconnected ? " (disconnected)" : "", + dmr->dmr_ctxt); + if (!_dispatch_unote_unregister(dmr, options)) { + _dispatch_debug("machport[0x%08x]: deferred delete kevent[%p]", + (mach_port_t)dmr->du_ident, dmr); + dispatch_assert(options == DU_UNREGISTER_DISCONNECTED); + return false; + } + + dispatch_mach_msg_t dmsgr = NULL; + dispatch_queue_t drq = NULL; + if (disconnected) { + // The next call is guaranteed to always transfer or consume the voucher + // in the dmr, if there is one. + dmsgr = _dispatch_mach_msg_create_reply_disconnected(NULL, dmr, + dmr->dmr_async_reply ? DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED + : DISPATCH_MACH_DISCONNECTED); + if (dmr->dmr_ctxt) { + drq = _dispatch_mach_msg_context_async_reply_queue(dmr->dmr_ctxt); + } + dispatch_assert(dmr->dmr_voucher == NULL); + } else if (dmr->dmr_voucher) { + _voucher_release(dmr->dmr_voucher); + dmr->dmr_voucher = NULL; + } + _dispatch_unote_dispose(dmr); + + if (dmsgr) { + if (drq) { + _dispatch_mach_push_async_reply_msg(dm, dmsgr, drq); + } else { + _dispatch_mach_handle_or_push_received_msg(dm, dmsgr); + } + } + return true; +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_reply_waiter_register(dispatch_mach_t dm, + dispatch_mach_reply_refs_t dmr, mach_port_t reply_port, + dispatch_mach_msg_t dmsg, mach_msg_option_t msg_opts) +{ + dmr->du_owner_wref = _dispatch_ptr2wref(dm); + dmr->du_wlh = NULL; + dmr->du_filter = EVFILT_MACHPORT; + dmr->du_ident = reply_port; + if (msg_opts & DISPATCH_MACH_OWNED_REPLY_PORT) { + _dispatch_mach_reply_mark_reply_port_owned(dmr); + } else { + if (dmsg->dmsg_voucher) { + dmr->dmr_voucher = _voucher_retain(dmsg->dmsg_voucher); + } + dmr->dmr_priority = _dispatch_priority_from_pp(dmsg->dmsg_priority); + // make reply context visible to leaks rdar://11777199 + dmr->dmr_ctxt = dmsg->do_ctxt; + } + + _dispatch_debug("machport[0x%08x]: registering for sync reply, ctxt %p", + reply_port, dmsg->do_ctxt); + _dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock); + if (unlikely(_TAILQ_IS_ENQUEUED(dmr, dmr_list))) { + DISPATCH_INTERNAL_CRASH(dmr->dmr_list.tqe_prev, + "Reply already registered"); + } + TAILQ_INSERT_TAIL(&dm->dm_send_refs->dmsr_replies, dmr, dmr_list); + _dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock); +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_reply_kevent_register(dispatch_mach_t dm, mach_port_t reply_port, + dispatch_mach_msg_t dmsg) +{ + dispatch_mach_reply_refs_t dmr; + dispatch_priority_t mpri, pri, overcommit; + dispatch_wlh_t wlh; + + dmr = dux_create(&_dispatch_mach_type_reply, reply_port, 0)._dmr; + dispatch_assert(dmr->du_is_direct); + dmr->du_owner_wref = _dispatch_ptr2wref(dm); + if (dmsg->dmsg_voucher) { + dmr->dmr_voucher = _voucher_retain(dmsg->dmsg_voucher); + } + dmr->dmr_priority = _dispatch_priority_from_pp(dmsg->dmsg_priority); + // make reply context visible to leaks rdar://11777199 + dmr->dmr_ctxt = dmsg->do_ctxt; + + dispatch_queue_t drq = NULL; + if (dmsg->dmsg_options & DISPATCH_MACH_ASYNC_REPLY) { + dmr->dmr_async_reply = true; + drq = _dispatch_mach_msg_context_async_reply_queue(dmsg->do_ctxt); + } + + if (!drq) { + pri = dm->dq_priority; + wlh = dm->dm_recv_refs->du_wlh; + } else if (dx_type(drq) == DISPATCH_QUEUE_NETWORK_EVENT_TYPE) { + pri = DISPATCH_PRIORITY_FLAG_MANAGER; + wlh = (dispatch_wlh_t)drq; + } else if (dx_hastypeflag(drq, QUEUE_ROOT)) { + pri = drq->dq_priority; + wlh = DISPATCH_WLH_ANON; + } else if (drq == dm->do_targetq) { + pri = dm->dq_priority; + wlh = dm->dm_recv_refs->du_wlh; + } else if (!(pri = _dispatch_queue_compute_priority_and_wlh(drq, &wlh))) { + pri = drq->dq_priority; + wlh = DISPATCH_WLH_ANON; + } + if (pri & DISPATCH_PRIORITY_REQUESTED_MASK) { + overcommit = pri & DISPATCH_PRIORITY_FLAG_OVERCOMMIT; + pri &= DISPATCH_PRIORITY_REQUESTED_MASK; + mpri = _dispatch_priority_from_pp_strip_flags(dmsg->dmsg_priority); + if (pri < mpri) pri = mpri; + pri |= overcommit; + } else { + pri = DISPATCH_PRIORITY_FLAG_MANAGER; + } + + _dispatch_debug("machport[0x%08x]: registering for reply, ctxt %p", + reply_port, dmsg->do_ctxt); + _dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock); + if (unlikely(_TAILQ_IS_ENQUEUED(dmr, dmr_list))) { + DISPATCH_INTERNAL_CRASH(dmr->dmr_list.tqe_prev, + "Reply already registered"); + } + TAILQ_INSERT_TAIL(&dm->dm_send_refs->dmsr_replies, dmr, dmr_list); + _dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock); + + if (!_dispatch_unote_register(dmr, wlh, pri)) { + _dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock); + _dispatch_mach_reply_list_remove(dm, dmr); + _dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock); + _dispatch_mach_reply_kevent_unregister(dm, dmr, + DU_UNREGISTER_DISCONNECTED); + } +} + +#pragma mark - +#pragma mark dispatch_mach_msg + +DISPATCH_ALWAYS_INLINE DISPATCH_CONST +static inline bool +_dispatch_use_mach_special_reply_port(void) +{ +#if DISPATCH_USE_MACH_SEND_SYNC_OVERRIDE + return true; +#else +#define thread_get_special_reply_port() ({__builtin_trap(); MACH_PORT_NULL;}) + return false; +#endif +} + +static mach_port_t +_dispatch_get_thread_reply_port(void) +{ + mach_port_t reply_port, mrp; + if (_dispatch_use_mach_special_reply_port()) { + mrp = _dispatch_get_thread_special_reply_port(); + } else { + mrp = _dispatch_get_thread_mig_reply_port(); + } + if (mrp) { + reply_port = mrp; + _dispatch_debug("machport[0x%08x]: borrowed thread sync reply port", + reply_port); + } else { + if (_dispatch_use_mach_special_reply_port()) { + reply_port = thread_get_special_reply_port(); + _dispatch_set_thread_special_reply_port(reply_port); + } else { + reply_port = mach_reply_port(); + _dispatch_set_thread_mig_reply_port(reply_port); + } + if (unlikely(!MACH_PORT_VALID(reply_port))) { + DISPATCH_CLIENT_CRASH(_dispatch_use_mach_special_reply_port(), + "Unable to allocate reply port, possible port leak"); + } + _dispatch_debug("machport[0x%08x]: allocated thread sync reply port", + reply_port); + } + _dispatch_debug_machport(reply_port); + return reply_port; +} + +static void +_dispatch_clear_thread_reply_port(mach_port_t reply_port) +{ + mach_port_t mrp; + if (_dispatch_use_mach_special_reply_port()) { + mrp = _dispatch_get_thread_special_reply_port(); + } else { + mrp = _dispatch_get_thread_mig_reply_port(); + } + if (reply_port != mrp) { + if (mrp) { + _dispatch_debug("machport[0x%08x]: did not clear thread sync reply " + "port (found 0x%08x)", reply_port, mrp); + } + return; + } + if (_dispatch_use_mach_special_reply_port()) { + _dispatch_set_thread_special_reply_port(MACH_PORT_NULL); + } else { + _dispatch_set_thread_mig_reply_port(MACH_PORT_NULL); + } + _dispatch_debug_machport(reply_port); + _dispatch_debug("machport[0x%08x]: cleared thread sync reply port", + reply_port); +} + +static void +_dispatch_set_thread_reply_port(mach_port_t reply_port) +{ + _dispatch_debug_machport(reply_port); + mach_port_t mrp; + if (_dispatch_use_mach_special_reply_port()) { + mrp = _dispatch_get_thread_special_reply_port(); + } else { + mrp = _dispatch_get_thread_mig_reply_port(); + } + if (mrp) { + kern_return_t kr = mach_port_mod_refs(mach_task_self(), reply_port, + MACH_PORT_RIGHT_RECEIVE, -1); + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + _dispatch_debug("machport[0x%08x]: deallocated sync reply port " + "(found 0x%08x)", reply_port, mrp); + } else { + if (_dispatch_use_mach_special_reply_port()) { + _dispatch_set_thread_special_reply_port(reply_port); + } else { + _dispatch_set_thread_mig_reply_port(reply_port); + } + _dispatch_debug("machport[0x%08x]: restored thread sync reply port", + reply_port); + } +} + +static inline mach_port_t +_dispatch_mach_msg_get_remote_port(dispatch_object_t dou) +{ + mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dou._dmsg); + mach_port_t remote = hdr->msgh_remote_port; + return remote; +} + +static inline mach_port_t +_dispatch_mach_msg_get_reply_port(dispatch_object_t dou) +{ + mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dou._dmsg); + mach_port_t local = hdr->msgh_local_port; + if (!MACH_PORT_VALID(local) || MACH_MSGH_BITS_LOCAL(hdr->msgh_bits) != + MACH_MSG_TYPE_MAKE_SEND_ONCE) return MACH_PORT_NULL; + return local; +} + +static inline void +_dispatch_mach_msg_set_reason(dispatch_mach_msg_t dmsg, mach_error_t err, + unsigned long reason) +{ + dispatch_assert_zero(reason & ~(unsigned long)code_emask); + dmsg->dmsg_error = ((err || !reason) ? err : + err_local|err_sub(0x3e0)|(mach_error_t)reason); +} + +static inline unsigned long +_dispatch_mach_msg_get_reason(dispatch_mach_msg_t dmsg, mach_error_t *err_ptr) +{ + mach_error_t err = dmsg->dmsg_error; + + if ((err & system_emask) == err_local && err_get_sub(err) == 0x3e0) { + *err_ptr = 0; + return err_get_code(err); + } + *err_ptr = err; + return err ? DISPATCH_MACH_MESSAGE_SEND_FAILED : DISPATCH_MACH_MESSAGE_SENT; +} + +static inline dispatch_mach_msg_t +_dispatch_mach_msg_create_recv(mach_msg_header_t *hdr, mach_msg_size_t siz, + dispatch_mach_reply_refs_t dmr, uint32_t flags) +{ + dispatch_mach_msg_destructor_t destructor; + dispatch_mach_msg_t dmsg; + voucher_t voucher; + pthread_priority_t pp; + + if (dmr) { + _voucher_mach_msg_clear(hdr, false); // deallocate reply message voucher + pp = _dispatch_priority_to_pp(dmr->dmr_priority); + voucher = dmr->dmr_voucher; + dmr->dmr_voucher = NULL; // transfer reference + } else { + voucher = voucher_create_with_mach_msg(hdr); + pp = _dispatch_priority_compute_propagated( + _voucher_get_priority(voucher), 0); + } + + destructor = (flags & DISPATCH_EV_MSG_NEEDS_FREE) ? + DISPATCH_MACH_MSG_DESTRUCTOR_FREE : + DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT; + dmsg = dispatch_mach_msg_create(hdr, siz, destructor, NULL); + if (!(flags & DISPATCH_EV_MSG_NEEDS_FREE)) { + _dispatch_ktrace2(DISPATCH_MACH_MSG_hdr_move, + (uint64_t)hdr, (uint64_t)dmsg->dmsg_buf); + } + dmsg->dmsg_voucher = voucher; + dmsg->dmsg_priority = pp; + dmsg->do_ctxt = dmr ? dmr->dmr_ctxt : NULL; + _dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_MESSAGE_RECEIVED); + _dispatch_voucher_debug("mach-msg[%p] create", voucher, dmsg); + _dispatch_voucher_ktrace_dmsg_push(dmsg); + return dmsg; +} + +void +_dispatch_mach_merge_msg(dispatch_unote_t du, uint32_t flags, + mach_msg_header_t *hdr, mach_msg_size_t siz) +{ + // this function is very similar with what _dispatch_source_merge_evt does + // but can't reuse it as handling the message must be protected by the + // internal refcount between the first half and the trailer of what + // _dispatch_source_merge_evt does. + + dispatch_mach_recv_refs_t dmrr = du._dmrr; + dispatch_mach_t dm = _dispatch_wref2ptr(dmrr->du_owner_wref); + dispatch_queue_flags_t dqf; + dispatch_mach_msg_t dmsg; + + dispatch_assert(_dispatch_unote_needs_rearm(du)); + + if (flags & EV_VANISHED) { + DISPATCH_CLIENT_CRASH(du._du->du_ident, + "Unexpected EV_VANISHED (do not destroy random mach ports)"); + } + + // once we modify the queue atomic flags below, it will allow concurrent + // threads running _dispatch_mach_invoke2 to dispose of the source, + // so we can't safely borrow the reference we get from the muxnote udata + // anymore, and need our own + dispatch_wakeup_flags_t wflags = DISPATCH_WAKEUP_CONSUME_2; + _dispatch_retain_2(dm); // rdar://20382435 + + if (unlikely((flags & EV_ONESHOT) && !(flags & EV_DELETE))) { + dqf = _dispatch_queue_atomic_flags_set_and_clear(dm->_as_dq, + DSF_DEFERRED_DELETE, DSF_ARMED); + _dispatch_debug("kevent-source[%p]: deferred delete oneshot kevent[%p]", + dm, dmrr); + } else if (unlikely(flags & (EV_ONESHOT | EV_DELETE))) { + _dispatch_source_refs_unregister(dm->_as_ds, + DU_UNREGISTER_ALREADY_DELETED); + dqf = _dispatch_queue_atomic_flags(dm->_as_dq); + _dispatch_debug("kevent-source[%p]: deleted kevent[%p]", dm, dmrr); + } else { + dqf = _dispatch_queue_atomic_flags_clear(dm->_as_dq, DSF_ARMED); + _dispatch_debug("kevent-source[%p]: disarmed kevent[%p]", dm, dmrr); + } + + _dispatch_debug_machport(hdr->msgh_remote_port); + _dispatch_debug("machport[0x%08x]: received msg id 0x%x, reply on 0x%08x", + hdr->msgh_local_port, hdr->msgh_id, hdr->msgh_remote_port); + + if (dqf & DSF_CANCELED) { + _dispatch_debug("machport[0x%08x]: drop msg id 0x%x, reply on 0x%08x", + hdr->msgh_local_port, hdr->msgh_id, hdr->msgh_remote_port); + mach_msg_destroy(hdr); + if (flags & DISPATCH_EV_MSG_NEEDS_FREE) { + free(hdr); + } + return dx_wakeup(dm, 0, wflags | DISPATCH_WAKEUP_MAKE_DIRTY); + } + + // Once the mach channel disarming is visible, cancellation will switch to + // immediate deletion. If we're preempted here, then the whole cancellation + // sequence may be complete by the time we really enqueue the message. + // + // _dispatch_mach_msg_invoke_with_mach() is responsible for filtering it out + // to keep the promise that DISPATCH_MACH_DISCONNECTED is the last + // event sent. + + dmsg = _dispatch_mach_msg_create_recv(hdr, siz, NULL, flags); + _dispatch_mach_handle_or_push_received_msg(dm, dmsg); + return _dispatch_release_2_tailcall(dm); +} + +void +_dispatch_mach_reply_merge_msg(dispatch_unote_t du, uint32_t flags, + mach_msg_header_t *hdr, mach_msg_size_t siz) +{ + dispatch_mach_reply_refs_t dmr = du._dmr; + dispatch_mach_t dm = _dispatch_wref2ptr(dmr->du_owner_wref); + bool canceled = (_dispatch_queue_atomic_flags(dm->_as_dq) & DSF_CANCELED); + dispatch_mach_msg_t dmsg = NULL; + + _dispatch_debug_machport(hdr->msgh_remote_port); + _dispatch_debug("machport[0x%08x]: received msg id 0x%x, reply on 0x%08x", + hdr->msgh_local_port, hdr->msgh_id, hdr->msgh_remote_port); + + if (!canceled) { + dmsg = _dispatch_mach_msg_create_recv(hdr, siz, dmr, flags); + } + + if (dmsg) { + dispatch_queue_t drq = NULL; + if (dmsg->do_ctxt) { + drq = _dispatch_mach_msg_context_async_reply_queue(dmsg->do_ctxt); + } + if (drq) { + _dispatch_mach_push_async_reply_msg(dm, dmsg, drq); + } else { + _dispatch_mach_handle_or_push_received_msg(dm, dmsg); + } + } else { + _dispatch_debug("machport[0x%08x]: drop msg id 0x%x, reply on 0x%08x", + hdr->msgh_local_port, hdr->msgh_id, hdr->msgh_remote_port); + mach_msg_destroy(hdr); + if (flags & DISPATCH_EV_MSG_NEEDS_FREE) { + free(hdr); + } + } + + dispatch_wakeup_flags_t wflags = 0; + uint32_t options = DU_UNREGISTER_IMMEDIATE_DELETE; + if (canceled) { + options |= DU_UNREGISTER_DISCONNECTED; + } + + _dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock); + bool removed = _dispatch_mach_reply_list_remove(dm, dmr); + dispatch_assert(removed); + if (TAILQ_EMPTY(&dm->dm_send_refs->dmsr_replies) && + (dm->dm_send_refs->dmsr_disconnect_cnt || + (dm->dq_atomic_flags & DSF_CANCELED))) { + // When the list is empty, _dispatch_mach_disconnect() may release the + // last reference count on the Mach channel. To avoid this, take our + // own reference before releasing the lock. + wflags = DISPATCH_WAKEUP_MAKE_DIRTY | DISPATCH_WAKEUP_CONSUME_2; + _dispatch_retain_2(dm); + } + _dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock); + + bool result = _dispatch_mach_reply_kevent_unregister(dm, dmr, options); + dispatch_assert(result); + if (wflags) dx_wakeup(dm, 0, wflags); +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_mach_msg_t +_dispatch_mach_msg_reply_recv(dispatch_mach_t dm, + dispatch_mach_reply_refs_t dmr, mach_port_t reply_port, + mach_port_t send) +{ + if (slowpath(!MACH_PORT_VALID(reply_port))) { + DISPATCH_CLIENT_CRASH(reply_port, "Invalid reply port"); + } + void *ctxt = dmr->dmr_ctxt; + mach_msg_header_t *hdr, *hdr2 = NULL; + void *hdr_copyout_addr; + mach_msg_size_t siz, msgsiz = 0; + mach_msg_return_t kr; + mach_msg_option_t options; + mach_port_t notify = MACH_PORT_NULL; + siz = mach_vm_round_page(DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE + + DISPATCH_MACH_TRAILER_SIZE); + hdr = alloca(siz); + for (mach_vm_address_t p = mach_vm_trunc_page(hdr + vm_page_size); + p < (mach_vm_address_t)hdr + siz; p += vm_page_size) { + *(char*)p = 0; // ensure alloca buffer doesn't overlap with stack guard + } + options = DISPATCH_MACH_RCV_OPTIONS & (~MACH_RCV_VOUCHER); + if (MACH_PORT_VALID(send)) { + notify = send; + options |= MACH_RCV_SYNC_WAIT; + } + +retry: + _dispatch_debug_machport(reply_port); + _dispatch_debug("machport[0x%08x]: MACH_RCV_MSG %s", reply_port, + (options & MACH_RCV_TIMEOUT) ? "poll" : "wait"); + kr = mach_msg(hdr, options, 0, siz, reply_port, MACH_MSG_TIMEOUT_NONE, + notify); + hdr_copyout_addr = hdr; + _dispatch_debug_machport(reply_port); + _dispatch_debug("machport[0x%08x]: MACH_RCV_MSG (size %u, opts 0x%x) " + "returned: %s - 0x%x", reply_port, siz, options, + mach_error_string(kr), kr); + switch (kr) { + case MACH_RCV_TOO_LARGE: + if (!fastpath(hdr->msgh_size <= UINT_MAX - + DISPATCH_MACH_TRAILER_SIZE)) { + DISPATCH_CLIENT_CRASH(hdr->msgh_size, "Overlarge message"); + } + if (options & MACH_RCV_LARGE) { + msgsiz = hdr->msgh_size + DISPATCH_MACH_TRAILER_SIZE; + hdr2 = malloc(msgsiz); + if (dispatch_assume(hdr2)) { + hdr = hdr2; + siz = msgsiz; + } + options |= MACH_RCV_TIMEOUT; + options &= ~MACH_RCV_LARGE; + goto retry; + } + _dispatch_log("BUG in libdispatch client: " + "dispatch_mach_send_and_wait_for_reply: dropped message too " + "large to fit in memory: id = 0x%x, size = %u", hdr->msgh_id, + hdr->msgh_size); + break; + case MACH_RCV_INVALID_NAME: // rdar://problem/21963848 + case MACH_RCV_PORT_CHANGED: // rdar://problem/21885327 + case MACH_RCV_PORT_DIED: + // channel was disconnected/canceled and reply port destroyed + _dispatch_debug("machport[0x%08x]: sync reply port destroyed, ctxt %p: " + "%s - 0x%x", reply_port, ctxt, mach_error_string(kr), kr); + goto out; + case MACH_MSG_SUCCESS: + if (hdr->msgh_remote_port) { + _dispatch_debug_machport(hdr->msgh_remote_port); + } + _dispatch_debug("machport[0x%08x]: received msg id 0x%x, size = %u, " + "reply on 0x%08x", hdr->msgh_local_port, hdr->msgh_id, + hdr->msgh_size, hdr->msgh_remote_port); + siz = hdr->msgh_size + DISPATCH_MACH_TRAILER_SIZE; + if (hdr2 && siz < msgsiz) { + void *shrink = realloc(hdr2, msgsiz); + if (shrink) hdr = hdr2 = shrink; + } + break; + case MACH_RCV_INVALID_NOTIFY: + default: + DISPATCH_INTERNAL_CRASH(kr, "Unexpected error from mach_msg_receive"); + break; + } + _dispatch_mach_msg_reply_received(dm, dmr, hdr->msgh_local_port); + hdr->msgh_local_port = MACH_PORT_NULL; + if (slowpath((dm->dq_atomic_flags & DSF_CANCELED) || kr)) { + if (!kr) mach_msg_destroy(hdr); + goto out; + } + dispatch_mach_msg_t dmsg; + dispatch_mach_msg_destructor_t destructor = (!hdr2) ? + DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT : + DISPATCH_MACH_MSG_DESTRUCTOR_FREE; + dmsg = dispatch_mach_msg_create(hdr, siz, destructor, NULL); + if (!hdr2 || hdr != hdr_copyout_addr) { + _dispatch_ktrace2(DISPATCH_MACH_MSG_hdr_move, + (uint64_t)hdr_copyout_addr, + (uint64_t)_dispatch_mach_msg_get_msg(dmsg)); + } + dmsg->do_ctxt = ctxt; + return dmsg; +out: + free(hdr2); + return NULL; +} + +static inline void +_dispatch_mach_msg_reply_received(dispatch_mach_t dm, + dispatch_mach_reply_refs_t dmr, mach_port_t local_port) +{ + bool removed = _dispatch_mach_reply_tryremove(dm, dmr); + if (!MACH_PORT_VALID(local_port) || !removed) { + // port moved/destroyed during receive, or reply waiter was never + // registered or already removed (disconnected) + return; + } + mach_port_t reply_port = _dispatch_mach_reply_get_reply_port( + (mach_port_t)dmr->du_ident); + _dispatch_debug("machport[0x%08x]: unregistered for sync reply, ctxt %p", + reply_port, dmr->dmr_ctxt); + if (_dispatch_mach_reply_is_reply_port_owned(dmr)) { + _dispatch_set_thread_reply_port(reply_port); + if (local_port != reply_port) { + DISPATCH_CLIENT_CRASH(local_port, + "Reply received on unexpected port"); + } + return; + } + mach_msg_header_t *hdr; + dispatch_mach_msg_t dmsg; + dmsg = dispatch_mach_msg_create(NULL, sizeof(mach_msg_header_t), + DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT, &hdr); + hdr->msgh_local_port = local_port; + dmsg->dmsg_voucher = dmr->dmr_voucher; + dmr->dmr_voucher = NULL; // transfer reference + dmsg->dmsg_priority = _dispatch_priority_to_pp(dmr->dmr_priority); + dmsg->do_ctxt = dmr->dmr_ctxt; + _dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_REPLY_RECEIVED); + return _dispatch_mach_handle_or_push_received_msg(dm, dmsg); +} + +static inline void +_dispatch_mach_msg_disconnected(dispatch_mach_t dm, mach_port_t local_port, + mach_port_t remote_port) +{ + mach_msg_header_t *hdr; + dispatch_mach_msg_t dmsg; + dmsg = dispatch_mach_msg_create(NULL, sizeof(mach_msg_header_t), + DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT, &hdr); + if (local_port) hdr->msgh_local_port = local_port; + if (remote_port) hdr->msgh_remote_port = remote_port; + _dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_DISCONNECTED); + _dispatch_debug("machport[0x%08x]: %s right disconnected", local_port ? + local_port : remote_port, local_port ? "receive" : "send"); + return _dispatch_mach_handle_or_push_received_msg(dm, dmsg); +} + +static inline dispatch_mach_msg_t +_dispatch_mach_msg_create_reply_disconnected(dispatch_object_t dou, + dispatch_mach_reply_refs_t dmr, dispatch_mach_reason_t reason) +{ + dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr; + mach_port_t reply_port = dmsg ? dmsg->dmsg_reply : + _dispatch_mach_reply_get_reply_port((mach_port_t)dmr->du_ident); + voucher_t v; + + if (!reply_port) { + if (!dmsg) { + v = dmr->dmr_voucher; + dmr->dmr_voucher = NULL; // transfer reference + if (v) _voucher_release(v); + } + return NULL; + } + + if (dmsg) { + v = dmsg->dmsg_voucher; + if (v) _voucher_retain(v); + } else { + v = dmr->dmr_voucher; + dmr->dmr_voucher = NULL; // transfer reference + } + + if ((dmsg && (dmsg->dmsg_options & DISPATCH_MACH_WAIT_FOR_REPLY) && + (dmsg->dmsg_options & DISPATCH_MACH_OWNED_REPLY_PORT)) || + (dmr && !_dispatch_unote_registered(dmr) && + _dispatch_mach_reply_is_reply_port_owned(dmr))) { + if (v) _voucher_release(v); + // deallocate owned reply port to break _dispatch_mach_msg_reply_recv + // out of waiting in mach_msg(MACH_RCV_MSG) + kern_return_t kr = mach_port_mod_refs(mach_task_self(), reply_port, + MACH_PORT_RIGHT_RECEIVE, -1); + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + return NULL; + } + + mach_msg_header_t *hdr; + dmsgr = dispatch_mach_msg_create(NULL, sizeof(mach_msg_header_t), + DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT, &hdr); + dmsgr->dmsg_voucher = v; + hdr->msgh_local_port = reply_port; + if (dmsg) { + dmsgr->dmsg_priority = dmsg->dmsg_priority; + dmsgr->do_ctxt = dmsg->do_ctxt; + } else { + dmsgr->dmsg_priority = _dispatch_priority_to_pp(dmr->dmr_priority); + dmsgr->do_ctxt = dmr->dmr_ctxt; + } + _dispatch_mach_msg_set_reason(dmsgr, 0, reason); + _dispatch_debug("machport[0x%08x]: reply disconnected, ctxt %p", + hdr->msgh_local_port, dmsgr->do_ctxt); + return dmsgr; +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_msg_not_sent(dispatch_mach_t dm, dispatch_object_t dou) +{ + dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr; + dispatch_queue_t drq = NULL; + mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg); + mach_msg_option_t msg_opts = dmsg->dmsg_options; + _dispatch_debug("machport[0x%08x]: not sent msg id 0x%x, ctxt %p, " + "msg_opts 0x%x, kvoucher 0x%08x, reply on 0x%08x", + msg->msgh_remote_port, msg->msgh_id, dmsg->do_ctxt, + msg_opts, msg->msgh_voucher_port, dmsg->dmsg_reply); + unsigned long reason = (msg_opts & DISPATCH_MACH_REGISTER_FOR_REPLY) ? + 0 : DISPATCH_MACH_MESSAGE_NOT_SENT; + dmsgr = _dispatch_mach_msg_create_reply_disconnected(dmsg, NULL, + msg_opts & DISPATCH_MACH_ASYNC_REPLY + ? DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED + : DISPATCH_MACH_DISCONNECTED); + if (dmsg->do_ctxt) { + drq = _dispatch_mach_msg_context_async_reply_queue(dmsg->do_ctxt); + } + _dispatch_mach_msg_set_reason(dmsg, 0, reason); + _dispatch_mach_handle_or_push_received_msg(dm, dmsg); + if (dmsgr) { + if (drq) { + _dispatch_mach_push_async_reply_msg(dm, dmsgr, drq); + } else { + _dispatch_mach_handle_or_push_received_msg(dm, dmsgr); + } + } +} + +DISPATCH_NOINLINE +static uint32_t +_dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, + dispatch_mach_reply_refs_t dmr, dispatch_qos_t qos, + dispatch_mach_send_invoke_flags_t send_flags) +{ + dispatch_mach_send_refs_t dsrr = dm->dm_send_refs; + dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr = NULL; + voucher_t voucher = dmsg->dmsg_voucher; + dispatch_queue_t drq = NULL; + mach_voucher_t ipc_kvoucher = MACH_VOUCHER_NULL; + uint32_t send_status = 0; + bool clear_voucher = false, kvoucher_move_send = false; + mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg); + bool is_reply = (MACH_MSGH_BITS_REMOTE(msg->msgh_bits) == + MACH_MSG_TYPE_MOVE_SEND_ONCE); + mach_port_t reply_port = dmsg->dmsg_reply; + if (!is_reply) { + dm->dm_needs_mgr = 0; + if (unlikely(dsrr->dmsr_checkin && dmsg != dsrr->dmsr_checkin)) { + // send initial checkin message + if (unlikely(_dispatch_unote_registered(dsrr) && + _dispatch_queue_get_current() != &_dispatch_mgr_q)) { + // send kevent must be uninstalled on the manager queue + dm->dm_needs_mgr = 1; + goto out; + } + if (unlikely(!_dispatch_mach_msg_send(dm, + dsrr->dmsr_checkin, NULL, qos, DM_SEND_INVOKE_NONE))) { + goto out; + } + dsrr->dmsr_checkin = NULL; + } + } + mach_msg_return_t kr = 0; + mach_msg_option_t opts = 0, msg_opts = dmsg->dmsg_options; + if (!(msg_opts & DISPATCH_MACH_REGISTER_FOR_REPLY)) { + mach_msg_priority_t msg_priority = MACH_MSG_PRIORITY_UNSPECIFIED; + opts = MACH_SEND_MSG | (msg_opts & ~DISPATCH_MACH_OPTIONS_MASK); + if (!is_reply) { + if (dmsg != dsrr->dmsr_checkin) { + msg->msgh_remote_port = dsrr->dmsr_send; + } + if (_dispatch_queue_get_current() == &_dispatch_mgr_q) { + if (unlikely(!_dispatch_unote_registered(dsrr))) { + _dispatch_mach_notification_kevent_register(dm, + msg->msgh_remote_port); + } + if (likely(_dispatch_unote_registered(dsrr))) { + if (os_atomic_load2o(dsrr, dmsr_notification_armed, + relaxed)) { + goto out; + } + opts |= MACH_SEND_NOTIFY; + } + } + opts |= MACH_SEND_TIMEOUT; + if (dmsg->dmsg_priority != _voucher_get_priority(voucher)) { + ipc_kvoucher = _voucher_create_mach_voucher_with_priority( + voucher, dmsg->dmsg_priority); + } + _dispatch_voucher_debug("mach-msg[%p] msg_set", voucher, dmsg); + if (ipc_kvoucher) { + kvoucher_move_send = true; + clear_voucher = _voucher_mach_msg_set_mach_voucher(msg, + ipc_kvoucher, kvoucher_move_send); + } else { + clear_voucher = _voucher_mach_msg_set(msg, voucher); + } + if (qos) { + opts |= MACH_SEND_OVERRIDE; + msg_priority = (mach_msg_priority_t) + _dispatch_priority_compute_propagated( + _dispatch_qos_to_pp(qos), 0); + } + } + _dispatch_debug_machport(msg->msgh_remote_port); + if (reply_port) _dispatch_debug_machport(reply_port); + if (msg_opts & DISPATCH_MACH_WAIT_FOR_REPLY) { + if (msg_opts & DISPATCH_MACH_OWNED_REPLY_PORT) { + if (_dispatch_use_mach_special_reply_port()) { + opts |= MACH_SEND_SYNC_OVERRIDE; + } + _dispatch_clear_thread_reply_port(reply_port); + } + _dispatch_mach_reply_waiter_register(dm, dmr, reply_port, dmsg, + msg_opts); + } + kr = mach_msg(msg, opts, msg->msgh_size, 0, MACH_PORT_NULL, 0, + msg_priority); + _dispatch_debug("machport[0x%08x]: sent msg id 0x%x, ctxt %p, " + "opts 0x%x, msg_opts 0x%x, kvoucher 0x%08x, reply on 0x%08x: " + "%s - 0x%x", msg->msgh_remote_port, msg->msgh_id, dmsg->do_ctxt, + opts, msg_opts, msg->msgh_voucher_port, reply_port, + mach_error_string(kr), kr); + if (unlikely(kr && (msg_opts & DISPATCH_MACH_WAIT_FOR_REPLY))) { + _dispatch_mach_reply_waiter_unregister(dm, dmr, + DU_UNREGISTER_REPLY_REMOVE); + } + if (clear_voucher) { + if (kr == MACH_SEND_INVALID_VOUCHER && msg->msgh_voucher_port) { + DISPATCH_CLIENT_CRASH(kr, "Voucher port corruption"); + } + mach_voucher_t kv; + kv = _voucher_mach_msg_clear(msg, kvoucher_move_send); + if (kvoucher_move_send) ipc_kvoucher = kv; + } + } + if (kr == MACH_SEND_TIMED_OUT && (opts & MACH_SEND_TIMEOUT)) { + if (opts & MACH_SEND_NOTIFY) { + _dispatch_debug("machport[0x%08x]: send-possible notification " + "armed", (mach_port_t)dsrr->du_ident); + _dispatch_mach_notification_set_armed(dsrr); + } else { + // send kevent must be installed on the manager queue + dm->dm_needs_mgr = 1; + } + if (ipc_kvoucher) { + _dispatch_kvoucher_debug("reuse on re-send", ipc_kvoucher); + voucher_t ipc_voucher; + ipc_voucher = _voucher_create_with_priority_and_mach_voucher( + voucher, dmsg->dmsg_priority, ipc_kvoucher); + _dispatch_voucher_debug("mach-msg[%p] replace voucher[%p]", + ipc_voucher, dmsg, voucher); + if (dmsg->dmsg_voucher) _voucher_release(dmsg->dmsg_voucher); + dmsg->dmsg_voucher = ipc_voucher; + } + goto out; + } else if (ipc_kvoucher && (kr || !kvoucher_move_send)) { + _voucher_dealloc_mach_voucher(ipc_kvoucher); + } + dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs; + if (!(msg_opts & DISPATCH_MACH_WAIT_FOR_REPLY) && !kr && reply_port && + !(_dispatch_unote_registered(dmrr) && + dmrr->du_ident == reply_port)) { + _dispatch_mach_reply_kevent_register(dm, reply_port, dmsg); + } + if (unlikely(!is_reply && dmsg == dsrr->dmsr_checkin && + _dispatch_unote_registered(dsrr))) { + _dispatch_mach_notification_kevent_unregister(dm); + } + if (slowpath(kr)) { + // Send failed, so reply was never registered + dmsgr = _dispatch_mach_msg_create_reply_disconnected(dmsg, NULL, + msg_opts & DISPATCH_MACH_ASYNC_REPLY + ? DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED + : DISPATCH_MACH_DISCONNECTED); + if (dmsg->do_ctxt) { + drq = _dispatch_mach_msg_context_async_reply_queue(dmsg->do_ctxt); + } + } + _dispatch_mach_msg_set_reason(dmsg, kr, 0); + if ((send_flags & DM_SEND_INVOKE_IMMEDIATE_SEND) && + (msg_opts & DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT)) { + // Return sent message synchronously + send_status |= DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT; + } else { + _dispatch_mach_handle_or_push_received_msg(dm, dmsg); + } + if (dmsgr) { + if (drq) { + _dispatch_mach_push_async_reply_msg(dm, dmsgr, drq); + } else { + _dispatch_mach_handle_or_push_received_msg(dm, dmsgr); + } + } + send_status |= DM_SEND_STATUS_SUCCESS; +out: + return send_status; +} + +#pragma mark - +#pragma mark dispatch_mach_send_refs_t + +#define _dmsr_state_needs_lock_override(dq_state, qos) \ + unlikely(qos < _dq_state_max_qos(dq_state)) + +DISPATCH_ALWAYS_INLINE +static inline dispatch_qos_t +_dmsr_state_max_qos(uint64_t dmsr_state) +{ + return _dq_state_max_qos(dmsr_state); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dmsr_state_needs_override(uint64_t dmsr_state, dispatch_qos_t qos) +{ + dmsr_state &= DISPATCH_MACH_STATE_MAX_QOS_MASK; + return dmsr_state < _dq_state_from_qos(qos); +} + +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_dmsr_state_merge_override(uint64_t dmsr_state, dispatch_qos_t qos) +{ + if (_dmsr_state_needs_override(dmsr_state, qos)) { + dmsr_state &= ~DISPATCH_MACH_STATE_MAX_QOS_MASK; + dmsr_state |= _dq_state_from_qos(qos); + dmsr_state |= DISPATCH_MACH_STATE_DIRTY; + dmsr_state |= DISPATCH_MACH_STATE_RECEIVED_OVERRIDE; + } + return dmsr_state; +} + +#define _dispatch_mach_send_push_update_tail(dmsr, tail) \ + os_mpsc_push_update_tail(dmsr, dmsr, tail, do_next) +#define _dispatch_mach_send_push_update_head(dmsr, head) \ + os_mpsc_push_update_head(dmsr, dmsr, head) +#define _dispatch_mach_send_get_head(dmsr) \ + os_mpsc_get_head(dmsr, dmsr) +#define _dispatch_mach_send_unpop_head(dmsr, dc, dc_next) \ + os_mpsc_undo_pop_head(dmsr, dmsr, dc, dc_next, do_next) +#define _dispatch_mach_send_pop_head(dmsr, head) \ + os_mpsc_pop_head(dmsr, dmsr, head, do_next) + +#define dm_push(dm, dc, qos) \ + _dispatch_queue_push((dm)->_as_dq, dc, qos) + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_mach_send_push_inline(dispatch_mach_send_refs_t dmsr, + dispatch_object_t dou) +{ + if (_dispatch_mach_send_push_update_tail(dmsr, dou._do)) { + _dispatch_mach_send_push_update_head(dmsr, dou._do); + return true; + } + return false; +} + +DISPATCH_NOINLINE +static bool +_dispatch_mach_send_drain(dispatch_mach_t dm, dispatch_invoke_flags_t flags, + dispatch_mach_send_invoke_flags_t send_flags) +{ + dispatch_mach_send_refs_t dmsr = dm->dm_send_refs; + dispatch_mach_reply_refs_t dmr; + dispatch_mach_msg_t dmsg; + struct dispatch_object_s *dc = NULL, *next_dc = NULL; + dispatch_qos_t qos = _dmsr_state_max_qos(dmsr->dmsr_state); + uint64_t old_state, new_state; + uint32_t send_status; + bool needs_mgr, disconnecting, returning_send_result = false; + +again: + needs_mgr = false; disconnecting = false; + while (dmsr->dmsr_tail) { + dc = _dispatch_mach_send_get_head(dmsr); + do { + dispatch_mach_send_invoke_flags_t sf = send_flags; + // Only request immediate send result for the first message + send_flags &= ~DM_SEND_INVOKE_IMMEDIATE_SEND_MASK; + next_dc = _dispatch_mach_send_pop_head(dmsr, dc); + if (_dispatch_object_has_type(dc, + DISPATCH_CONTINUATION_TYPE(MACH_SEND_BARRIER))) { + if (!(send_flags & DM_SEND_INVOKE_CAN_RUN_BARRIER)) { + goto partial_drain; + } + _dispatch_continuation_pop(dc, NULL, flags, dm->_as_dq); + continue; + } + if (_dispatch_object_is_sync_waiter(dc)) { + dmsg = ((dispatch_continuation_t)dc)->dc_data; + dmr = ((dispatch_continuation_t)dc)->dc_other; + } else if (_dispatch_object_has_vtable(dc)) { + dmsg = (dispatch_mach_msg_t)dc; + dmr = NULL; + } else { + if (_dispatch_unote_registered(dmsr) && + (_dispatch_queue_get_current() != &_dispatch_mgr_q)) { + // send kevent must be uninstalled on the manager queue + needs_mgr = true; + goto partial_drain; + } + if (unlikely(!_dispatch_mach_reconnect_invoke(dm, dc))) { + disconnecting = true; + goto partial_drain; + } + _dispatch_perfmon_workitem_inc(); + continue; + } + _dispatch_voucher_ktrace_dmsg_pop(dmsg); + if (unlikely(dmsr->dmsr_disconnect_cnt || + (dm->dq_atomic_flags & DSF_CANCELED))) { + _dispatch_mach_msg_not_sent(dm, dmsg); + _dispatch_perfmon_workitem_inc(); + continue; + } + send_status = _dispatch_mach_msg_send(dm, dmsg, dmr, qos, sf); + if (unlikely(!send_status)) { + goto partial_drain; + } + if (send_status & DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT) { + returning_send_result = true; + } + _dispatch_perfmon_workitem_inc(); + } while ((dc = next_dc)); + } + + os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, release, { + if (old_state & DISPATCH_MACH_STATE_DIRTY) { + new_state = old_state; + new_state &= ~DISPATCH_MACH_STATE_DIRTY; + new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE; + new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER; + } else { + // unlock + new_state = 0; + } + }); + goto out; + +partial_drain: + // if this is not a complete drain, we must undo some things + _dispatch_mach_send_unpop_head(dmsr, dc, next_dc); + + if (_dispatch_object_has_type(dc, + DISPATCH_CONTINUATION_TYPE(MACH_SEND_BARRIER))) { + os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, release, { + new_state = old_state; + new_state |= DISPATCH_MACH_STATE_DIRTY; + new_state |= DISPATCH_MACH_STATE_PENDING_BARRIER; + new_state &= ~DISPATCH_MACH_STATE_UNLOCK_MASK; + new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE; + }); + } else { + os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, release, { + new_state = old_state; + if (old_state & (DISPATCH_MACH_STATE_DIRTY | + DISPATCH_MACH_STATE_RECEIVED_OVERRIDE)) { + new_state &= ~DISPATCH_MACH_STATE_DIRTY; + new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE; + new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER; + } else { + new_state |= DISPATCH_MACH_STATE_DIRTY; + new_state &= ~DISPATCH_MACH_STATE_UNLOCK_MASK; + } + }); + } + +out: + if (old_state & DISPATCH_MACH_STATE_RECEIVED_OVERRIDE) { + // Ensure that the root queue sees that this thread was overridden. + _dispatch_set_basepri_override_qos(_dmsr_state_max_qos(old_state)); + } + + if (unlikely(new_state & DISPATCH_MACH_STATE_UNLOCK_MASK)) { + qos = _dmsr_state_max_qos(new_state); + os_atomic_thread_fence(dependency); + dmsr = os_atomic_force_dependency_on(dmsr, new_state); + goto again; + } + + if (new_state & DISPATCH_MACH_STATE_PENDING_BARRIER) { + qos = _dmsr_state_max_qos(new_state); + _dispatch_mach_push_send_barrier_drain(dm, qos); + } else { + if (needs_mgr || dm->dm_needs_mgr) { + qos = _dmsr_state_max_qos(new_state); + } else { + qos = 0; + } + if (!disconnecting) dx_wakeup(dm, qos, DISPATCH_WAKEUP_MAKE_DIRTY); + } + return returning_send_result; +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_send_invoke(dispatch_mach_t dm, dispatch_invoke_flags_t flags, + dispatch_mach_send_invoke_flags_t send_flags) +{ + dispatch_mach_send_refs_t dmsr = dm->dm_send_refs; + dispatch_lock owner_self = _dispatch_lock_value_for_self(); + uint64_t old_state, new_state; + + uint64_t canlock_mask = DISPATCH_MACH_STATE_UNLOCK_MASK; + uint64_t canlock_state = 0; + + if (send_flags & DM_SEND_INVOKE_NEEDS_BARRIER) { + canlock_mask |= DISPATCH_MACH_STATE_PENDING_BARRIER; + canlock_state = DISPATCH_MACH_STATE_PENDING_BARRIER; + } else if (!(send_flags & DM_SEND_INVOKE_CAN_RUN_BARRIER)) { + canlock_mask |= DISPATCH_MACH_STATE_PENDING_BARRIER; + } + + dispatch_qos_t oq_floor = _dispatch_get_basepri_override_qos_floor(); +retry: + os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, acquire, { + new_state = old_state; + if (unlikely((old_state & canlock_mask) != canlock_state)) { + if (!(send_flags & DM_SEND_INVOKE_MAKE_DIRTY)) { + os_atomic_rmw_loop_give_up(break); + } + new_state |= DISPATCH_MACH_STATE_DIRTY; + } else { + if (_dmsr_state_needs_lock_override(old_state, oq_floor)) { + os_atomic_rmw_loop_give_up({ + oq_floor = _dispatch_queue_override_self(old_state); + goto retry; + }); + } + new_state |= owner_self; + new_state &= ~DISPATCH_MACH_STATE_DIRTY; + new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE; + new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER; + } + }); + + if (unlikely((old_state & canlock_mask) != canlock_state)) { + return; + } + if (send_flags & DM_SEND_INVOKE_CANCEL) { + _dispatch_mach_cancel(dm); + } + _dispatch_mach_send_drain(dm, flags, send_flags); +} + +DISPATCH_NOINLINE +void +_dispatch_mach_send_barrier_drain_invoke(dispatch_continuation_t dc, + DISPATCH_UNUSED dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags) +{ + dispatch_mach_t dm = (dispatch_mach_t)_dispatch_queue_get_current(); + uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; + dispatch_thread_frame_s dtf; + + DISPATCH_COMPILER_CAN_ASSUME(dc->dc_priority == DISPATCH_NO_PRIORITY); + DISPATCH_COMPILER_CAN_ASSUME(dc->dc_voucher == DISPATCH_NO_VOUCHER); + // hide the mach channel (see _dispatch_mach_barrier_invoke comment) + _dispatch_thread_frame_stash(&dtf); + _dispatch_continuation_pop_forwarded(dc, DISPATCH_NO_VOUCHER, dc_flags,{ + _dispatch_mach_send_invoke(dm, flags, + DM_SEND_INVOKE_NEEDS_BARRIER | DM_SEND_INVOKE_CAN_RUN_BARRIER); + }); + _dispatch_thread_frame_unstash(&dtf); +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_push_send_barrier_drain(dispatch_mach_t dm, dispatch_qos_t qos) +{ + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + + dc->do_vtable = DC_VTABLE(MACH_SEND_BARRRIER_DRAIN); + dc->dc_func = NULL; + dc->dc_ctxt = NULL; + dc->dc_voucher = DISPATCH_NO_VOUCHER; + dc->dc_priority = DISPATCH_NO_PRIORITY; + dm_push(dm, dc, qos); +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_send_push(dispatch_mach_t dm, dispatch_continuation_t dc, + dispatch_qos_t qos) +{ + dispatch_mach_send_refs_t dmsr = dm->dm_send_refs; + uint64_t old_state, new_state, state_flags = 0; + dispatch_tid owner; + bool wakeup; + + // when pushing a send barrier that destroys + // the last reference to this channel, and the send queue is already + // draining on another thread, the send barrier may run as soon as + // _dispatch_mach_send_push_inline() returns. + _dispatch_retain_2(dm); + + wakeup = _dispatch_mach_send_push_inline(dmsr, dc); + if (wakeup) { + state_flags = DISPATCH_MACH_STATE_DIRTY; + if (dc->do_vtable == DC_VTABLE(MACH_SEND_BARRIER)) { + state_flags |= DISPATCH_MACH_STATE_PENDING_BARRIER; + } + } + + if (state_flags) { + os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, release, { + new_state = _dmsr_state_merge_override(old_state, qos); + new_state |= state_flags; + }); + } else { + os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, relaxed, { + new_state = _dmsr_state_merge_override(old_state, qos); + if (old_state == new_state) { + os_atomic_rmw_loop_give_up(break); + } + }); + } + + qos = _dmsr_state_max_qos(new_state); + owner = _dispatch_lock_owner((dispatch_lock)old_state); + if (owner) { + if (_dmsr_state_needs_override(old_state, qos)) { + _dispatch_wqthread_override_start_check_owner(owner, qos, + &dmsr->dmsr_state_lock.dul_lock); + } + return _dispatch_release_2_tailcall(dm); + } + + dispatch_wakeup_flags_t wflags = 0; + if (state_flags & DISPATCH_MACH_STATE_PENDING_BARRIER) { + _dispatch_mach_push_send_barrier_drain(dm, qos); + } else if (wakeup || dmsr->dmsr_disconnect_cnt || + (dm->dq_atomic_flags & DSF_CANCELED)) { + wflags = DISPATCH_WAKEUP_MAKE_DIRTY | DISPATCH_WAKEUP_CONSUME_2; + } else if (old_state & DISPATCH_MACH_STATE_PENDING_BARRIER) { + wflags = DISPATCH_WAKEUP_CONSUME_2; + } + if (wflags) { + return dx_wakeup(dm, qos, wflags); + } + return _dispatch_release_2_tailcall(dm); +} + +DISPATCH_NOINLINE +static bool +_dispatch_mach_send_push_and_trydrain(dispatch_mach_t dm, + dispatch_object_t dou, dispatch_qos_t qos, + dispatch_mach_send_invoke_flags_t send_flags) +{ + dispatch_mach_send_refs_t dmsr = dm->dm_send_refs; + dispatch_lock owner_self = _dispatch_lock_value_for_self(); + uint64_t old_state, new_state, canlock_mask, state_flags = 0; + dispatch_tid owner; + + bool wakeup = _dispatch_mach_send_push_inline(dmsr, dou); + if (wakeup) { + state_flags = DISPATCH_MACH_STATE_DIRTY; + } + + if (unlikely(dmsr->dmsr_disconnect_cnt || + (dm->dq_atomic_flags & DSF_CANCELED))) { + os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, release, { + new_state = _dmsr_state_merge_override(old_state, qos); + new_state |= state_flags; + }); + dx_wakeup(dm, qos, DISPATCH_WAKEUP_MAKE_DIRTY); + return false; + } + + canlock_mask = DISPATCH_MACH_STATE_UNLOCK_MASK | + DISPATCH_MACH_STATE_PENDING_BARRIER; + if (state_flags) { + os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, seq_cst, { + new_state = _dmsr_state_merge_override(old_state, qos); + new_state |= state_flags; + if (likely((old_state & canlock_mask) == 0)) { + new_state |= owner_self; + new_state &= ~DISPATCH_MACH_STATE_DIRTY; + new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE; + new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER; + } + }); + } else { + os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, acquire, { + new_state = _dmsr_state_merge_override(old_state, qos); + if (new_state == old_state) { + os_atomic_rmw_loop_give_up(return false); + } + if (likely((old_state & canlock_mask) == 0)) { + new_state |= owner_self; + new_state &= ~DISPATCH_MACH_STATE_DIRTY; + new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE; + new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER; + } + }); + } + + owner = _dispatch_lock_owner((dispatch_lock)old_state); + if (owner) { + if (_dmsr_state_needs_override(old_state, qos)) { + _dispatch_wqthread_override_start_check_owner(owner, qos, + &dmsr->dmsr_state_lock.dul_lock); + } + return false; + } + + if (old_state & DISPATCH_MACH_STATE_PENDING_BARRIER) { + dx_wakeup(dm, qos, 0); + return false; + } + + // Ensure our message is still at the head of the queue and has not already + // been dequeued by another thread that raced us to the send queue lock. + // A plain load of the head and comparison against our object pointer is + // sufficient. + if (unlikely(!(wakeup && dou._do == dmsr->dmsr_head))) { + // Don't request immediate send result for messages we don't own + send_flags &= ~DM_SEND_INVOKE_IMMEDIATE_SEND_MASK; + } + return _dispatch_mach_send_drain(dm, DISPATCH_INVOKE_NONE, send_flags); +} + +#pragma mark - +#pragma mark dispatch_mach + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_mach_notification_kevent_unregister(dispatch_mach_t dm) +{ + DISPATCH_ASSERT_ON_MANAGER_QUEUE(); + if (_dispatch_unote_registered(dm->dm_send_refs)) { + dispatch_assume(_dispatch_unote_unregister(dm->dm_send_refs, 0)); + } + dm->dm_send_refs->du_ident = 0; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_mach_notification_kevent_register(dispatch_mach_t dm,mach_port_t send) +{ + DISPATCH_ASSERT_ON_MANAGER_QUEUE(); + dm->dm_send_refs->du_ident = send; + dispatch_assume(_dispatch_unote_register(dm->dm_send_refs, + DISPATCH_WLH_ANON, 0)); +} + +void +_dispatch_mach_merge_notification(dispatch_unote_t du, + uint32_t flags DISPATCH_UNUSED, uintptr_t data, + uintptr_t status DISPATCH_UNUSED, + pthread_priority_t pp DISPATCH_UNUSED) +{ + dispatch_mach_send_refs_t dmsr = du._dmsr; + dispatch_mach_t dm = _dispatch_wref2ptr(dmsr->du_owner_wref); + + if (data & dmsr->du_fflags) { + _dispatch_mach_send_invoke(dm, DISPATCH_INVOKE_MANAGER_DRAIN, + DM_SEND_INVOKE_MAKE_DIRTY); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_handle_or_push_received_msg(dispatch_mach_t dm, + dispatch_mach_msg_t dmsg) +{ + mach_error_t error; + dispatch_mach_reason_t reason = _dispatch_mach_msg_get_reason(dmsg, &error); + if (reason == DISPATCH_MACH_MESSAGE_RECEIVED || !dm->dm_is_xpc || + !_dispatch_mach_xpc_hooks->dmxh_direct_message_handler( + dm->dm_recv_refs->dmrr_handler_ctxt, reason, dmsg, error)) { + // Not XPC client or not a message that XPC can handle inline - push + // it onto the channel queue. + dm_push(dm, dmsg, _dispatch_qos_from_pp(dmsg->dmsg_priority)); + } else { + // XPC handled the message inline. Do the cleanup that would otherwise + // have happened in _dispatch_mach_msg_invoke(), leaving out steps that + // are not required in this context. + dmsg->do_next = DISPATCH_OBJECT_LISTLESS; + dispatch_release(dmsg); + } +} + +DISPATCH_ALWAYS_INLINE +static void +_dispatch_mach_push_async_reply_msg(dispatch_mach_t dm, + dispatch_mach_msg_t dmsg, dispatch_queue_t drq) { + // Push the message onto the given queue. This function is only used for + // replies to messages sent by + // dispatch_mach_send_with_result_and_async_reply_4libxpc(). + dispatch_continuation_t dc = _dispatch_mach_msg_async_reply_wrap(dmsg, dm); + _dispatch_trace_continuation_push(drq, dc); + dx_push(drq, dc, _dispatch_qos_from_pp(dmsg->dmsg_priority)); +} + +#pragma mark - +#pragma mark dispatch_mach_t + +static inline mach_msg_option_t +_dispatch_mach_checkin_options(void) +{ + mach_msg_option_t options = 0; +#if DISPATCH_USE_CHECKIN_NOIMPORTANCE + options = MACH_SEND_NOIMPORTANCE; // +#endif + return options; +} + + +static inline mach_msg_option_t +_dispatch_mach_send_options(void) +{ + mach_msg_option_t options = 0; + return options; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_qos_t +_dispatch_mach_priority_propagate(mach_msg_option_t options, + pthread_priority_t *msg_pp) +{ +#if DISPATCH_USE_NOIMPORTANCE_QOS + if (options & MACH_SEND_NOIMPORTANCE) { + *msg_pp = 0; + return 0; + } +#endif + unsigned int flags = DISPATCH_PRIORITY_PROPAGATE_CURRENT; + if ((options & DISPATCH_MACH_WAIT_FOR_REPLY) && + (options & DISPATCH_MACH_OWNED_REPLY_PORT) && + _dispatch_use_mach_special_reply_port()) { + flags |= DISPATCH_PRIORITY_PROPAGATE_FOR_SYNC_IPC; + } + *msg_pp = _dispatch_priority_compute_propagated(0, flags); + // TODO: remove QoS contribution of sync IPC messages to send queue + // rdar://31848737 + return _dispatch_qos_from_pp(*msg_pp); +} + +DISPATCH_NOINLINE +static bool +_dispatch_mach_send_msg(dispatch_mach_t dm, dispatch_mach_msg_t dmsg, + dispatch_continuation_t dc_wait, mach_msg_option_t options) +{ + dispatch_mach_send_refs_t dmsr = dm->dm_send_refs; + if (slowpath(dmsg->do_next != DISPATCH_OBJECT_LISTLESS)) { + DISPATCH_CLIENT_CRASH(dmsg->do_next, "Message already enqueued"); + } + dispatch_retain(dmsg); + pthread_priority_t msg_pp; + dispatch_qos_t qos = _dispatch_mach_priority_propagate(options, &msg_pp); + options |= _dispatch_mach_send_options(); + dmsg->dmsg_options = options; + mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg); + dmsg->dmsg_reply = _dispatch_mach_msg_get_reply_port(dmsg); + bool is_reply = (MACH_MSGH_BITS_REMOTE(msg->msgh_bits) == + MACH_MSG_TYPE_MOVE_SEND_ONCE); + dmsg->dmsg_priority = msg_pp; + dmsg->dmsg_voucher = _voucher_copy(); + _dispatch_voucher_debug("mach-msg[%p] set", dmsg->dmsg_voucher, dmsg); + + uint32_t send_status; + bool returning_send_result = false; + dispatch_mach_send_invoke_flags_t send_flags = DM_SEND_INVOKE_NONE; + if (options & DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT) { + send_flags = DM_SEND_INVOKE_IMMEDIATE_SEND; + } + if (is_reply && !dmsg->dmsg_reply && !dmsr->dmsr_disconnect_cnt && + !(dm->dq_atomic_flags & DSF_CANCELED)) { + // replies are sent to a send-once right and don't need the send queue + dispatch_assert(!dc_wait); + send_status = _dispatch_mach_msg_send(dm, dmsg, NULL, 0, send_flags); + dispatch_assert(send_status); + returning_send_result = !!(send_status & + DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT); + } else { + _dispatch_voucher_ktrace_dmsg_push(dmsg); + dispatch_object_t dou = { ._dmsg = dmsg }; + if (dc_wait) dou._dc = dc_wait; + returning_send_result = _dispatch_mach_send_push_and_trydrain(dm, dou, + qos, send_flags); + } + if (returning_send_result) { + _dispatch_voucher_debug("mach-msg[%p] clear", dmsg->dmsg_voucher, dmsg); + if (dmsg->dmsg_voucher) _voucher_release(dmsg->dmsg_voucher); + dmsg->dmsg_voucher = NULL; + dmsg->do_next = DISPATCH_OBJECT_LISTLESS; + dispatch_release(dmsg); + } + return returning_send_result; +} + +DISPATCH_NOINLINE +void +dispatch_mach_send(dispatch_mach_t dm, dispatch_mach_msg_t dmsg, + mach_msg_option_t options) +{ + dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK); + options &= ~DISPATCH_MACH_OPTIONS_MASK; + bool returned_send_result = _dispatch_mach_send_msg(dm, dmsg, NULL,options); + dispatch_assert(!returned_send_result); +} + +DISPATCH_NOINLINE +void +dispatch_mach_send_with_result(dispatch_mach_t dm, dispatch_mach_msg_t dmsg, + mach_msg_option_t options, dispatch_mach_send_flags_t send_flags, + dispatch_mach_reason_t *send_result, mach_error_t *send_error) +{ + if (unlikely(send_flags != DISPATCH_MACH_SEND_DEFAULT)) { + DISPATCH_CLIENT_CRASH(send_flags, "Invalid send flags"); + } + dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK); + options &= ~DISPATCH_MACH_OPTIONS_MASK; + options |= DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT; + bool returned_send_result = _dispatch_mach_send_msg(dm, dmsg, NULL,options); + unsigned long reason = DISPATCH_MACH_NEEDS_DEFERRED_SEND; + mach_error_t err = 0; + if (returned_send_result) { + reason = _dispatch_mach_msg_get_reason(dmsg, &err); + } + *send_result = reason; + *send_error = err; +} + +static inline +dispatch_mach_msg_t +_dispatch_mach_send_and_wait_for_reply(dispatch_mach_t dm, + dispatch_mach_msg_t dmsg, mach_msg_option_t options, + bool *returned_send_result) +{ + mach_port_t send = MACH_PORT_NULL; + mach_port_t reply_port = _dispatch_mach_msg_get_reply_port(dmsg); + if (!reply_port) { + // use per-thread mach reply port + reply_port = _dispatch_get_thread_reply_port(); + mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dmsg); + dispatch_assert(MACH_MSGH_BITS_LOCAL(hdr->msgh_bits) == + MACH_MSG_TYPE_MAKE_SEND_ONCE); + hdr->msgh_local_port = reply_port; + options |= DISPATCH_MACH_OWNED_REPLY_PORT; + } + options |= DISPATCH_MACH_WAIT_FOR_REPLY; + + dispatch_mach_reply_refs_t dmr; +#if DISPATCH_DEBUG + dmr = _dispatch_calloc(1, sizeof(*dmr)); +#else + struct dispatch_mach_reply_refs_s dmr_buf = { }; + dmr = &dmr_buf; +#endif + struct dispatch_continuation_s dc_wait = { + .dc_flags = DISPATCH_OBJ_SYNC_WAITER_BIT, + .dc_data = dmsg, + .dc_other = dmr, + .dc_priority = DISPATCH_NO_PRIORITY, + .dc_voucher = DISPATCH_NO_VOUCHER, + }; + dmr->dmr_ctxt = dmsg->do_ctxt; + dmr->dmr_waiter_tid = _dispatch_tid_self(); + *returned_send_result = _dispatch_mach_send_msg(dm, dmsg, &dc_wait,options); + if (options & DISPATCH_MACH_OWNED_REPLY_PORT) { + _dispatch_clear_thread_reply_port(reply_port); + if (_dispatch_use_mach_special_reply_port()) { + // link special reply port to send right for remote receive right + // TODO: extend to pre-connect phase + send = dm->dm_send_refs->dmsr_send; + } + } + dmsg = _dispatch_mach_msg_reply_recv(dm, dmr, reply_port, send); +#if DISPATCH_DEBUG + free(dmr); +#endif + return dmsg; +} + +DISPATCH_NOINLINE +dispatch_mach_msg_t +dispatch_mach_send_and_wait_for_reply(dispatch_mach_t dm, + dispatch_mach_msg_t dmsg, mach_msg_option_t options) +{ + bool returned_send_result; + dispatch_mach_msg_t reply; + dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK); + options &= ~DISPATCH_MACH_OPTIONS_MASK; + reply = _dispatch_mach_send_and_wait_for_reply(dm, dmsg, options, + &returned_send_result); + dispatch_assert(!returned_send_result); + return reply; +} + +DISPATCH_NOINLINE +dispatch_mach_msg_t +dispatch_mach_send_with_result_and_wait_for_reply(dispatch_mach_t dm, + dispatch_mach_msg_t dmsg, mach_msg_option_t options, + dispatch_mach_send_flags_t send_flags, + dispatch_mach_reason_t *send_result, mach_error_t *send_error) +{ + if (unlikely(send_flags != DISPATCH_MACH_SEND_DEFAULT)) { + DISPATCH_CLIENT_CRASH(send_flags, "Invalid send flags"); + } + bool returned_send_result; + dispatch_mach_msg_t reply; + dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK); + options &= ~DISPATCH_MACH_OPTIONS_MASK; + options |= DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT; + reply = _dispatch_mach_send_and_wait_for_reply(dm, dmsg, options, + &returned_send_result); + unsigned long reason = DISPATCH_MACH_NEEDS_DEFERRED_SEND; + mach_error_t err = 0; + if (returned_send_result) { + reason = _dispatch_mach_msg_get_reason(dmsg, &err); + } + *send_result = reason; + *send_error = err; + return reply; +} + +DISPATCH_NOINLINE +void +dispatch_mach_send_with_result_and_async_reply_4libxpc(dispatch_mach_t dm, + dispatch_mach_msg_t dmsg, mach_msg_option_t options, + dispatch_mach_send_flags_t send_flags, + dispatch_mach_reason_t *send_result, mach_error_t *send_error) +{ + if (unlikely(send_flags != DISPATCH_MACH_SEND_DEFAULT)) { + DISPATCH_CLIENT_CRASH(send_flags, "Invalid send flags"); + } + if (unlikely(!dm->dm_is_xpc)) { + DISPATCH_CLIENT_CRASH(0, + "dispatch_mach_send_with_result_and_wait_for_reply is XPC only"); + } + + dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK); + options &= ~DISPATCH_MACH_OPTIONS_MASK; + options |= DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT; + mach_port_t reply_port = _dispatch_mach_msg_get_reply_port(dmsg); + if (!reply_port) { + DISPATCH_CLIENT_CRASH(0, "Reply port needed for async send with reply"); + } + options |= DISPATCH_MACH_ASYNC_REPLY; + bool returned_send_result = _dispatch_mach_send_msg(dm, dmsg, NULL,options); + unsigned long reason = DISPATCH_MACH_NEEDS_DEFERRED_SEND; + mach_error_t err = 0; + if (returned_send_result) { + reason = _dispatch_mach_msg_get_reason(dmsg, &err); + } + *send_result = reason; + *send_error = err; +} + +DISPATCH_NOINLINE +static bool +_dispatch_mach_disconnect(dispatch_mach_t dm) +{ + dispatch_mach_send_refs_t dmsr = dm->dm_send_refs; + bool disconnected; + if (_dispatch_unote_registered(dmsr)) { + _dispatch_mach_notification_kevent_unregister(dm); + } + if (MACH_PORT_VALID(dmsr->dmsr_send)) { + _dispatch_mach_msg_disconnected(dm, MACH_PORT_NULL, dmsr->dmsr_send); + dmsr->dmsr_send = MACH_PORT_NULL; + } + if (dmsr->dmsr_checkin) { + _dispatch_mach_msg_not_sent(dm, dmsr->dmsr_checkin); + dmsr->dmsr_checkin = NULL; + } + _dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock); + dispatch_mach_reply_refs_t dmr, tmp; + TAILQ_FOREACH_SAFE(dmr, &dm->dm_send_refs->dmsr_replies, dmr_list, tmp) { + TAILQ_REMOVE(&dm->dm_send_refs->dmsr_replies, dmr, dmr_list); + _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list); + if (_dispatch_unote_registered(dmr)) { + if (!_dispatch_mach_reply_kevent_unregister(dm, dmr, + DU_UNREGISTER_DISCONNECTED)) { + TAILQ_INSERT_HEAD(&dm->dm_send_refs->dmsr_replies, dmr, + dmr_list); + } + } else { + _dispatch_mach_reply_waiter_unregister(dm, dmr, + DU_UNREGISTER_DISCONNECTED); + } + } + disconnected = TAILQ_EMPTY(&dm->dm_send_refs->dmsr_replies); + _dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock); + return disconnected; +} + +static void +_dispatch_mach_cancel(dispatch_mach_t dm) +{ + _dispatch_object_debug(dm, "%s", __func__); + if (!_dispatch_mach_disconnect(dm)) return; + + bool uninstalled = true; + dispatch_assert(!dm->dm_uninstalled); + + if (dm->dm_xpc_term_refs) { + uninstalled = _dispatch_unote_unregister(dm->dm_xpc_term_refs, 0); + } + + dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs; + mach_port_t local_port = (mach_port_t)dmrr->du_ident; + if (local_port) { + // handle the deferred delete case properly, similar to what + // _dispatch_source_invoke2() does + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dm->_as_dq); + if ((dqf & DSF_DEFERRED_DELETE) && !(dqf & DSF_ARMED)) { + _dispatch_source_refs_unregister(dm->_as_ds, + DU_UNREGISTER_IMMEDIATE_DELETE); + dqf = _dispatch_queue_atomic_flags(dm->_as_dq); + } else if (!(dqf & DSF_DEFERRED_DELETE) && !(dqf & DSF_DELETED)) { + _dispatch_source_refs_unregister(dm->_as_ds, 0); + dqf = _dispatch_queue_atomic_flags(dm->_as_dq); + } + if ((dqf & DSF_STATE_MASK) == DSF_DELETED) { + _dispatch_mach_msg_disconnected(dm, local_port, MACH_PORT_NULL); + dmrr->du_ident = 0; + } else { + uninstalled = false; + } + } else { + _dispatch_queue_atomic_flags_set_and_clear(dm->_as_dq, DSF_DELETED, + DSF_ARMED | DSF_DEFERRED_DELETE); + } + + if (dm->dm_send_refs->dmsr_disconnect_cnt) { + uninstalled = false; // + } + if (uninstalled) dm->dm_uninstalled = uninstalled; +} + +DISPATCH_NOINLINE +static bool +_dispatch_mach_reconnect_invoke(dispatch_mach_t dm, dispatch_object_t dou) +{ + if (!_dispatch_mach_disconnect(dm)) return false; + dispatch_mach_send_refs_t dmsr = dm->dm_send_refs; + dmsr->dmsr_checkin = dou._dc->dc_data; + dmsr->dmsr_send = (mach_port_t)dou._dc->dc_other; + _dispatch_continuation_free(dou._dc); + (void)os_atomic_dec2o(dmsr, dmsr_disconnect_cnt, relaxed); + _dispatch_object_debug(dm, "%s", __func__); + _dispatch_release(dm); // + return true; +} + +DISPATCH_NOINLINE +void +dispatch_mach_reconnect(dispatch_mach_t dm, mach_port_t send, + dispatch_mach_msg_t checkin) +{ + dispatch_mach_send_refs_t dmsr = dm->dm_send_refs; + (void)os_atomic_inc2o(dmsr, dmsr_disconnect_cnt, relaxed); + if (MACH_PORT_VALID(send) && checkin) { + dispatch_mach_msg_t dmsg = checkin; + dispatch_retain(dmsg); + dmsg->dmsg_options = _dispatch_mach_checkin_options(); + dmsr->dmsr_checkin_port = _dispatch_mach_msg_get_remote_port(dmsg); + } else { + checkin = NULL; + dmsr->dmsr_checkin_port = MACH_PORT_NULL; + } + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + dc->dc_flags = DISPATCH_OBJ_CONSUME_BIT; + // actually called manually in _dispatch_mach_send_drain + dc->dc_func = (void*)_dispatch_mach_reconnect_invoke; + dc->dc_ctxt = dc; + dc->dc_data = checkin; + dc->dc_other = (void*)(uintptr_t)send; + dc->dc_voucher = DISPATCH_NO_VOUCHER; + dc->dc_priority = DISPATCH_NO_PRIORITY; + _dispatch_retain(dm); // + return _dispatch_mach_send_push(dm, dc, 0); +} + +DISPATCH_NOINLINE +mach_port_t +dispatch_mach_get_checkin_port(dispatch_mach_t dm) +{ + dispatch_mach_send_refs_t dmsr = dm->dm_send_refs; + if (slowpath(dm->dq_atomic_flags & DSF_CANCELED)) { + return MACH_PORT_DEAD; + } + return dmsr->dmsr_checkin_port; +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_connect_invoke(dispatch_mach_t dm) +{ + dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs; + _dispatch_client_callout4(dmrr->dmrr_handler_ctxt, + DISPATCH_MACH_CONNECTED, NULL, 0, dmrr->dmrr_handler_func); + dm->dm_connect_handler_called = 1; + _dispatch_perfmon_workitem_inc(); +} + +DISPATCH_ALWAYS_INLINE +static void +_dispatch_mach_msg_invoke_with_mach(dispatch_mach_msg_t dmsg, + dispatch_invoke_flags_t flags, dispatch_mach_t dm) +{ + dispatch_mach_recv_refs_t dmrr; + mach_error_t err; + unsigned long reason = _dispatch_mach_msg_get_reason(dmsg, &err); + dispatch_thread_set_self_t adopt_flags = DISPATCH_PRIORITY_ENFORCE| + DISPATCH_VOUCHER_CONSUME|DISPATCH_VOUCHER_REPLACE; + + dmrr = dm->dm_recv_refs; + dmsg->do_next = DISPATCH_OBJECT_LISTLESS; + _dispatch_voucher_ktrace_dmsg_pop(dmsg); + _dispatch_voucher_debug("mach-msg[%p] adopt", dmsg->dmsg_voucher, dmsg); + (void)_dispatch_adopt_priority_and_set_voucher(dmsg->dmsg_priority, + dmsg->dmsg_voucher, adopt_flags); + dmsg->dmsg_voucher = NULL; + dispatch_invoke_with_autoreleasepool(flags, { + if (flags & DISPATCH_INVOKE_ASYNC_REPLY) { + _dispatch_client_callout3(dmrr->dmrr_handler_ctxt, reason, dmsg, + _dispatch_mach_xpc_hooks->dmxh_async_reply_handler); + } else { + if (slowpath(!dm->dm_connect_handler_called)) { + _dispatch_mach_connect_invoke(dm); + } + if (reason == DISPATCH_MACH_MESSAGE_RECEIVED && + (_dispatch_queue_atomic_flags(dm->_as_dq) & DSF_CANCELED)) { + // Do not deliver message received + // after cancellation: _dispatch_mach_merge_msg can be preempted + // for a long time between clearing DSF_ARMED but before + // enqueuing the message, allowing for cancellation to complete, + // and then the message event to be delivered. + // + // This makes XPC unhappy because some of these messages are + // port-destroyed notifications that can cause it to try to + // reconnect on a channel that is almost fully canceled + } else { + _dispatch_client_callout4(dmrr->dmrr_handler_ctxt, reason, dmsg, + err, dmrr->dmrr_handler_func); + } + } + _dispatch_perfmon_workitem_inc(); + }); + _dispatch_introspection_queue_item_complete(dmsg); + dispatch_release(dmsg); +} + +DISPATCH_NOINLINE +void +_dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg, + DISPATCH_UNUSED dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags) +{ + dispatch_thread_frame_s dtf; + + // hide mach channel + dispatch_mach_t dm = (dispatch_mach_t)_dispatch_thread_frame_stash(&dtf); + _dispatch_mach_msg_invoke_with_mach(dmsg, flags, dm); + _dispatch_thread_frame_unstash(&dtf); +} + +DISPATCH_NOINLINE +void +_dispatch_mach_barrier_invoke(dispatch_continuation_t dc, + DISPATCH_UNUSED dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags) +{ + dispatch_thread_frame_s dtf; + dispatch_mach_t dm = dc->dc_other; + dispatch_mach_recv_refs_t dmrr; + uintptr_t dc_flags = (uintptr_t)dc->dc_data; + unsigned long type = dc_type(dc); + + // hide mach channel from clients + if (type == DISPATCH_CONTINUATION_TYPE(MACH_RECV_BARRIER)) { + // on the send queue, the mach channel isn't the current queue + // its target queue is the current one already + _dispatch_thread_frame_stash(&dtf); + } + dmrr = dm->dm_recv_refs; + DISPATCH_COMPILER_CAN_ASSUME(dc_flags & DISPATCH_OBJ_CONSUME_BIT); + _dispatch_continuation_pop_forwarded(dc, DISPATCH_NO_VOUCHER, dc_flags, { + dispatch_invoke_with_autoreleasepool(flags, { + if (slowpath(!dm->dm_connect_handler_called)) { + _dispatch_mach_connect_invoke(dm); + } + _dispatch_client_callout(dc->dc_ctxt, dc->dc_func); + _dispatch_client_callout4(dmrr->dmrr_handler_ctxt, + DISPATCH_MACH_BARRIER_COMPLETED, NULL, 0, + dmrr->dmrr_handler_func); + }); + }); + if (type == DISPATCH_CONTINUATION_TYPE(MACH_RECV_BARRIER)) { + _dispatch_thread_frame_unstash(&dtf); + } +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_mach_barrier_set_vtable(dispatch_continuation_t dc, + dispatch_mach_t dm, dispatch_continuation_vtable_t vtable) +{ + dc->dc_data = (void *)dc->dc_flags; + dc->dc_other = dm; + dc->do_vtable = vtable; // Must be after dc_flags load, dc_vtable aliases +} + +DISPATCH_NOINLINE +void +dispatch_mach_send_barrier_f(dispatch_mach_t dm, void *context, + dispatch_function_t func) +{ + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_MACH_BARRIER; + dispatch_qos_t qos; + + _dispatch_continuation_init_f(dc, dm, context, func, 0, 0, dc_flags); + _dispatch_mach_barrier_set_vtable(dc, dm, DC_VTABLE(MACH_SEND_BARRIER)); + _dispatch_trace_continuation_push(dm->_as_dq, dc); + qos = _dispatch_continuation_override_qos(dm->_as_dq, dc); + return _dispatch_mach_send_push(dm, dc, qos); +} + +DISPATCH_NOINLINE +void +dispatch_mach_send_barrier(dispatch_mach_t dm, dispatch_block_t barrier) +{ + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_MACH_BARRIER; + dispatch_qos_t qos; + + _dispatch_continuation_init(dc, dm, barrier, 0, 0, dc_flags); + _dispatch_mach_barrier_set_vtable(dc, dm, DC_VTABLE(MACH_SEND_BARRIER)); + _dispatch_trace_continuation_push(dm->_as_dq, dc); + qos = _dispatch_continuation_override_qos(dm->_as_dq, dc); + return _dispatch_mach_send_push(dm, dc, qos); +} + +DISPATCH_NOINLINE +void +dispatch_mach_receive_barrier_f(dispatch_mach_t dm, void *context, + dispatch_function_t func) +{ + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_MACH_BARRIER; + + _dispatch_continuation_init_f(dc, dm, context, func, 0, 0, dc_flags); + _dispatch_mach_barrier_set_vtable(dc, dm, DC_VTABLE(MACH_RECV_BARRIER)); + return _dispatch_continuation_async(dm->_as_dq, dc); +} + +DISPATCH_NOINLINE +void +dispatch_mach_receive_barrier(dispatch_mach_t dm, dispatch_block_t barrier) +{ + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_MACH_BARRIER; + + _dispatch_continuation_init(dc, dm, barrier, 0, 0, dc_flags); + _dispatch_mach_barrier_set_vtable(dc, dm, DC_VTABLE(MACH_RECV_BARRIER)); + return _dispatch_continuation_async(dm->_as_dq, dc); +} + +DISPATCH_NOINLINE +static void +_dispatch_mach_cancel_invoke(dispatch_mach_t dm, dispatch_invoke_flags_t flags) +{ + dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs; + + dispatch_invoke_with_autoreleasepool(flags, { + if (slowpath(!dm->dm_connect_handler_called)) { + _dispatch_mach_connect_invoke(dm); + } + _dispatch_client_callout4(dmrr->dmrr_handler_ctxt, + DISPATCH_MACH_CANCELED, NULL, 0, dmrr->dmrr_handler_func); + _dispatch_perfmon_workitem_inc(); + }); + dm->dm_cancel_handler_called = 1; + _dispatch_release(dm); // the retain is done at creation time +} + +DISPATCH_NOINLINE +void +dispatch_mach_cancel(dispatch_mach_t dm) +{ + dispatch_source_cancel(dm->_as_ds); +} + +static void +_dispatch_mach_install(dispatch_mach_t dm, dispatch_wlh_t wlh, + dispatch_priority_t pri) +{ + dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs; + uint32_t disconnect_cnt; + + if (dmrr->du_ident) { + _dispatch_source_refs_register(dm->_as_ds, wlh, pri); + dispatch_assert(dmrr->du_is_direct); + } + + if (dm->dm_is_xpc) { + bool monitor_sigterm; + if (_dispatch_mach_xpc_hooks->version < 3) { + monitor_sigterm = true; + } else if (!_dispatch_mach_xpc_hooks->dmxh_enable_sigterm_notification){ + monitor_sigterm = true; + } else { + monitor_sigterm = + _dispatch_mach_xpc_hooks->dmxh_enable_sigterm_notification( + dm->dm_recv_refs->dmrr_handler_ctxt); + } + if (monitor_sigterm) { + dispatch_xpc_term_refs_t _dxtr = + dux_create(&_dispatch_xpc_type_sigterm, SIGTERM, 0)._dxtr; + _dxtr->du_owner_wref = _dispatch_ptr2wref(dm); + dm->dm_xpc_term_refs = _dxtr; + _dispatch_unote_register(dm->dm_xpc_term_refs, wlh, pri); + } + } + if (!dm->dq_priority) { + // _dispatch_mach_reply_kevent_register assumes this has been done + // which is unlike regular sources or queues, the DEFAULTQUEUE flag + // is used so that the priority of the channel doesn't act as + // a QoS floor for incoming messages (26761457) + dm->dq_priority = pri; + } + dm->ds_is_installed = true; + if (unlikely(!os_atomic_cmpxchgv2o(dm->dm_send_refs, dmsr_disconnect_cnt, + DISPATCH_MACH_NEVER_INSTALLED, 0, &disconnect_cnt, release))) { + DISPATCH_INTERNAL_CRASH(disconnect_cnt, "Channel already installed"); + } +} + +void +_dispatch_mach_finalize_activation(dispatch_mach_t dm, bool *allow_resume) +{ + dispatch_priority_t pri; + dispatch_wlh_t wlh; + + // call "super" + _dispatch_queue_finalize_activation(dm->_as_dq, allow_resume); + + if (!dm->ds_is_installed) { + pri = _dispatch_queue_compute_priority_and_wlh(dm->_as_dq, &wlh); + if (pri) _dispatch_mach_install(dm, wlh, pri); + } +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_mach_tryarm(dispatch_mach_t dm, dispatch_queue_flags_t *out_dqf) +{ + dispatch_queue_flags_t oqf, nqf; + bool rc = os_atomic_rmw_loop2o(dm, dq_atomic_flags, oqf, nqf, relaxed, { + nqf = oqf; + if (nqf & (DSF_ARMED | DSF_CANCELED | DSF_DEFERRED_DELETE | + DSF_DELETED)) { + // the test is inside the loop because it's convenient but the + // result should not change for the duration of the rmw_loop + os_atomic_rmw_loop_give_up(break); + } + nqf |= DSF_ARMED; + }); + if (out_dqf) *out_dqf = nqf; + return rc; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_wakeup_target_t +_dispatch_mach_invoke2(dispatch_object_t dou, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, + uint64_t *owned) +{ + dispatch_mach_t dm = dou._dm; + dispatch_queue_wakeup_target_t retq = NULL; + dispatch_queue_t dq = _dispatch_queue_get_current(); + dispatch_mach_send_refs_t dmsr = dm->dm_send_refs; + dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs; + dispatch_queue_flags_t dqf = 0; + + if (!(flags & DISPATCH_INVOKE_MANAGER_DRAIN) && dmrr && + _dispatch_unote_wlh_changed(dmrr, _dispatch_get_wlh())) { + dqf = _dispatch_queue_atomic_flags_set_orig(dm->_as_dq, + DSF_WLH_CHANGED); + if (!(dqf & DSF_WLH_CHANGED)) { + if (dm->dm_is_xpc) { + _dispatch_bug_deprecated("Changing target queue " + "hierarchy after xpc connection was activated"); + } else { + _dispatch_bug_deprecated("Changing target queue " + "hierarchy after mach channel was activated"); + } + } + } + + // This function performs all mach channel actions. Each action is + // responsible for verifying that it takes place on the appropriate queue. + // If the current queue is not the correct queue for this action, the + // correct queue will be returned and the invoke will be re-driven on that + // queue. + + // The order of tests here in invoke and in wakeup should be consistent. + + if (unlikely(!dm->ds_is_installed)) { + // The channel needs to be installed on the kevent queue. + if (unlikely(flags & DISPATCH_INVOKE_MANAGER_DRAIN)) { + return dm->do_targetq; + } + _dispatch_mach_install(dm, _dispatch_get_wlh(),_dispatch_get_basepri()); + _dispatch_perfmon_workitem_inc(); + } + + if (_dispatch_queue_class_probe(dm)) { + if (dq == dm->do_targetq) { +drain: + retq = _dispatch_queue_serial_drain(dm->_as_dq, dic, flags, owned); + } else { + retq = dm->do_targetq; + } + } + + if (!retq && _dispatch_unote_registered(dmrr)) { + if (_dispatch_mach_tryarm(dm, &dqf)) { + _dispatch_unote_resume(dmrr); + if (dq == dm->do_targetq && !dq->do_targetq && !dmsr->dmsr_tail && + (dq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) && + _dispatch_wlh_should_poll_unote(dmrr)) { + // try to redrive the drain from under the lock for channels + // targeting an overcommit root queue to avoid parking + // when the next message has already fired + _dispatch_event_loop_drain(KEVENT_FLAG_IMMEDIATE); + if (dm->dq_items_tail) goto drain; + } + } + } else { + dqf = _dispatch_queue_atomic_flags(dm->_as_dq); + } + + if (dmsr->dmsr_tail) { + bool requires_mgr = dm->dm_needs_mgr || (dmsr->dmsr_disconnect_cnt && + _dispatch_unote_registered(dmsr)); + if (!os_atomic_load2o(dmsr, dmsr_notification_armed, relaxed) || + (dqf & DSF_CANCELED) || dmsr->dmsr_disconnect_cnt) { + // The channel has pending messages to send. + if (unlikely(requires_mgr && dq != &_dispatch_mgr_q)) { + return retq ? retq : &_dispatch_mgr_q; + } + dispatch_mach_send_invoke_flags_t send_flags = DM_SEND_INVOKE_NONE; + if (dq != &_dispatch_mgr_q) { + send_flags |= DM_SEND_INVOKE_CAN_RUN_BARRIER; + } + _dispatch_mach_send_invoke(dm, flags, send_flags); + } + if (!retq) retq = DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT; + } else if (!retq && (dqf & DSF_CANCELED)) { + // The channel has been cancelled and needs to be uninstalled from the + // manager queue. After uninstallation, the cancellation handler needs + // to be delivered to the target queue. + if (!dm->dm_uninstalled) { + if ((dqf & DSF_STATE_MASK) == (DSF_ARMED | DSF_DEFERRED_DELETE)) { + // waiting for the delivery of a deferred delete event + return retq ? retq : DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT; + } + if (dq != &_dispatch_mgr_q) { + return retq ? retq : &_dispatch_mgr_q; + } + _dispatch_mach_send_invoke(dm, flags, DM_SEND_INVOKE_CANCEL); + if (unlikely(!dm->dm_uninstalled)) { + // waiting for the delivery of a deferred delete event + // or deletion didn't happen because send_invoke couldn't + // acquire the send lock + return retq ? retq : DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT; + } + } + if (!dm->dm_cancel_handler_called) { + if (dq != dm->do_targetq) { + return retq ? retq : dm->do_targetq; + } + _dispatch_mach_cancel_invoke(dm, flags); + } + } + + return retq; +} + +DISPATCH_NOINLINE +void +_dispatch_mach_invoke(dispatch_mach_t dm, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags) +{ + _dispatch_queue_class_invoke(dm, dic, flags, + DISPATCH_INVOKE_DISALLOW_SYNC_WAITERS, _dispatch_mach_invoke2); +} + +void +_dispatch_mach_wakeup(dispatch_mach_t dm, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags) +{ + // This function determines whether the mach channel needs to be invoked. + // The order of tests here in probe and in invoke should be consistent. + + dispatch_mach_send_refs_t dmsr = dm->dm_send_refs; + dispatch_queue_wakeup_target_t tq = DISPATCH_QUEUE_WAKEUP_NONE; + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dm->_as_dq); + + if (!dm->ds_is_installed) { + // The channel needs to be installed on the kevent queue. + tq = DISPATCH_QUEUE_WAKEUP_TARGET; + goto done; + } + + if (_dispatch_queue_class_probe(dm)) { + tq = DISPATCH_QUEUE_WAKEUP_TARGET; + goto done; + } + + if (_dispatch_lock_is_locked(dmsr->dmsr_state_lock.dul_lock)) { + // Sending and uninstallation below require the send lock, the channel + // will be woken up when the lock is dropped + goto done; + } + + if (dmsr->dmsr_tail) { + bool requires_mgr = dm->dm_needs_mgr || (dmsr->dmsr_disconnect_cnt && + _dispatch_unote_registered(dmsr)); + if (!os_atomic_load2o(dmsr, dmsr_notification_armed, relaxed) || + (dqf & DSF_CANCELED) || dmsr->dmsr_disconnect_cnt) { + if (unlikely(requires_mgr)) { + tq = DISPATCH_QUEUE_WAKEUP_MGR; + } else { + tq = DISPATCH_QUEUE_WAKEUP_TARGET; + } + } + } else if (dqf & DSF_CANCELED) { + if (!dm->dm_uninstalled) { + if ((dqf & DSF_STATE_MASK) == (DSF_ARMED | DSF_DEFERRED_DELETE)) { + // waiting for the delivery of a deferred delete event + } else { + // The channel needs to be uninstalled from the manager queue + tq = DISPATCH_QUEUE_WAKEUP_MGR; + } + } else if (!dm->dm_cancel_handler_called) { + // the cancellation handler needs to be delivered to the target + // queue. + tq = DISPATCH_QUEUE_WAKEUP_TARGET; + } + } + +done: + if ((tq == DISPATCH_QUEUE_WAKEUP_TARGET) && + dm->do_targetq == &_dispatch_mgr_q) { + tq = DISPATCH_QUEUE_WAKEUP_MGR; + } + + return _dispatch_queue_class_wakeup(dm->_as_dq, qos, flags, tq); +} + +static void +_dispatch_mach_sigterm_invoke(void *ctx) +{ + dispatch_mach_t dm = ctx; + if (!(dm->dq_atomic_flags & DSF_CANCELED)) { + dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs; + _dispatch_client_callout4(dmrr->dmrr_handler_ctxt, + DISPATCH_MACH_SIGTERM_RECEIVED, NULL, 0, + dmrr->dmrr_handler_func); + } +} + +void +_dispatch_xpc_sigterm_merge(dispatch_unote_t du, + uint32_t flags DISPATCH_UNUSED, uintptr_t data DISPATCH_UNUSED, + uintptr_t status DISPATCH_UNUSED, pthread_priority_t pp) +{ + dispatch_mach_t dm = _dispatch_wref2ptr(du._du->du_owner_wref); + uint32_t options = 0; + if ((flags & EV_UDATA_SPECIFIC) && (flags & EV_ONESHOT) && + !(flags & EV_DELETE)) { + options = DU_UNREGISTER_IMMEDIATE_DELETE; + } else { + dispatch_assert((flags & EV_ONESHOT) && (flags & EV_DELETE)); + options = DU_UNREGISTER_ALREADY_DELETED; + } + _dispatch_unote_unregister(du, options); + + if (!(dm->dq_atomic_flags & DSF_CANCELED)) { + _dispatch_barrier_async_detached_f(dm->_as_dq, dm, + _dispatch_mach_sigterm_invoke); + } else { + dx_wakeup(dm, _dispatch_qos_from_pp(pp), DISPATCH_WAKEUP_MAKE_DIRTY); + } +} + +#pragma mark - +#pragma mark dispatch_mach_msg_t + +dispatch_mach_msg_t +dispatch_mach_msg_create(mach_msg_header_t *msg, size_t size, + dispatch_mach_msg_destructor_t destructor, mach_msg_header_t **msg_ptr) +{ + if (slowpath(size < sizeof(mach_msg_header_t)) || + slowpath(destructor && !msg)) { + DISPATCH_CLIENT_CRASH(size, "Empty message"); + } + + dispatch_mach_msg_t dmsg; + size_t msg_size = sizeof(struct dispatch_mach_msg_s); + if (!destructor && os_add_overflow(msg_size, + (size - sizeof(dmsg->dmsg_msg)), &msg_size)) { + DISPATCH_CLIENT_CRASH(size, "Message size too large"); + } + + dmsg = _dispatch_object_alloc(DISPATCH_VTABLE(mach_msg), msg_size); + if (destructor) { + dmsg->dmsg_msg = msg; + } else if (msg) { + memcpy(dmsg->dmsg_buf, msg, size); + } + dmsg->do_next = DISPATCH_OBJECT_LISTLESS; + dmsg->do_targetq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false); + dmsg->dmsg_destructor = destructor; + dmsg->dmsg_size = size; + if (msg_ptr) { + *msg_ptr = _dispatch_mach_msg_get_msg(dmsg); + } + return dmsg; +} + +void +_dispatch_mach_msg_dispose(dispatch_mach_msg_t dmsg, + DISPATCH_UNUSED bool *allow_free) +{ + if (dmsg->dmsg_voucher) { + _voucher_release(dmsg->dmsg_voucher); + dmsg->dmsg_voucher = NULL; + } + switch (dmsg->dmsg_destructor) { + case DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT: + break; + case DISPATCH_MACH_MSG_DESTRUCTOR_FREE: + free(dmsg->dmsg_msg); + break; + case DISPATCH_MACH_MSG_DESTRUCTOR_VM_DEALLOCATE: { + mach_vm_size_t vm_size = dmsg->dmsg_size; + mach_vm_address_t vm_addr = (uintptr_t)dmsg->dmsg_msg; + (void)dispatch_assume_zero(mach_vm_deallocate(mach_task_self(), + vm_addr, vm_size)); + break; + }} +} + +static inline mach_msg_header_t* +_dispatch_mach_msg_get_msg(dispatch_mach_msg_t dmsg) +{ + return dmsg->dmsg_destructor ? dmsg->dmsg_msg : + (mach_msg_header_t*)dmsg->dmsg_buf; +} + +mach_msg_header_t* +dispatch_mach_msg_get_msg(dispatch_mach_msg_t dmsg, size_t *size_ptr) +{ + if (size_ptr) { + *size_ptr = dmsg->dmsg_size; + } + return _dispatch_mach_msg_get_msg(dmsg); +} + +size_t +_dispatch_mach_msg_debug(dispatch_mach_msg_t dmsg, char* buf, size_t bufsiz) +{ + size_t offset = 0; + offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", + dx_kind(dmsg), dmsg); + offset += _dispatch_object_debug_attr(dmsg, buf + offset, bufsiz - offset); + offset += dsnprintf(&buf[offset], bufsiz - offset, "opts/err = 0x%x, " + "msgh[%p] = { ", dmsg->dmsg_options, dmsg->dmsg_buf); + mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dmsg); + if (hdr->msgh_id) { + offset += dsnprintf(&buf[offset], bufsiz - offset, "id 0x%x, ", + hdr->msgh_id); + } + if (hdr->msgh_size) { + offset += dsnprintf(&buf[offset], bufsiz - offset, "size %u, ", + hdr->msgh_size); + } + if (hdr->msgh_bits) { + offset += dsnprintf(&buf[offset], bufsiz - offset, "bits msgh_bits), + MACH_MSGH_BITS_REMOTE(hdr->msgh_bits)); + if (MACH_MSGH_BITS_OTHER(hdr->msgh_bits)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", o 0x%x", + MACH_MSGH_BITS_OTHER(hdr->msgh_bits)); + } + offset += dsnprintf(&buf[offset], bufsiz - offset, ">, "); + } + if (hdr->msgh_local_port && hdr->msgh_remote_port) { + offset += dsnprintf(&buf[offset], bufsiz - offset, "local 0x%x, " + "remote 0x%x", hdr->msgh_local_port, hdr->msgh_remote_port); + } else if (hdr->msgh_local_port) { + offset += dsnprintf(&buf[offset], bufsiz - offset, "local 0x%x", + hdr->msgh_local_port); + } else if (hdr->msgh_remote_port) { + offset += dsnprintf(&buf[offset], bufsiz - offset, "remote 0x%x", + hdr->msgh_remote_port); + } else { + offset += dsnprintf(&buf[offset], bufsiz - offset, "no ports"); + } + offset += dsnprintf(&buf[offset], bufsiz - offset, " } }"); + return offset; +} + +DISPATCH_ALWAYS_INLINE +static dispatch_queue_t +_dispatch_mach_msg_context_async_reply_queue(void *msg_context) +{ + if (DISPATCH_MACH_XPC_SUPPORTS_ASYNC_REPLIES(_dispatch_mach_xpc_hooks)) { + return _dispatch_mach_xpc_hooks->dmxh_msg_context_reply_queue( + msg_context); + } + return NULL; +} + +static dispatch_continuation_t +_dispatch_mach_msg_async_reply_wrap(dispatch_mach_msg_t dmsg, + dispatch_mach_t dm) +{ + _dispatch_retain(dm); // Released in _dispatch_mach_msg_async_reply_invoke() + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + dc->do_vtable = DC_VTABLE(MACH_ASYNC_REPLY); + dc->dc_data = dmsg; + dc->dc_other = dm; + dc->dc_priority = DISPATCH_NO_PRIORITY; + dc->dc_voucher = DISPATCH_NO_VOUCHER; + return dc; +} + +DISPATCH_NOINLINE +void +_dispatch_mach_msg_async_reply_invoke(dispatch_continuation_t dc, + DISPATCH_UNUSED dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags) +{ + // _dispatch_mach_msg_invoke_with_mach() releases the reference on dmsg + // taken by _dispatch_mach_msg_async_reply_wrap() after handling it. + dispatch_mach_msg_t dmsg = dc->dc_data; + dispatch_mach_t dm = dc->dc_other; + _dispatch_mach_msg_invoke_with_mach(dmsg, + flags | DISPATCH_INVOKE_ASYNC_REPLY, dm); + + // Balances _dispatch_mach_msg_async_reply_wrap + _dispatch_release(dc->dc_other); + + _dispatch_continuation_free(dc); +} + +#pragma mark - +#pragma mark dispatch_mig_server + +mach_msg_return_t +dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, + dispatch_mig_callback_t callback) +{ + mach_msg_options_t options = MACH_RCV_MSG | MACH_RCV_TIMEOUT + | MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_CTX) + | MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0) | MACH_RCV_VOUCHER; + mach_msg_options_t tmp_options; + mig_reply_error_t *bufTemp, *bufRequest, *bufReply; + mach_msg_return_t kr = 0; + uint64_t assertion_token = 0; + uint32_t cnt = 1000; // do not stall out serial queues + boolean_t demux_success; + bool received = false; + size_t rcv_size = maxmsgsz + MAX_TRAILER_SIZE; + dispatch_source_refs_t dr = ds->ds_refs; + + bufRequest = alloca(rcv_size); + bufRequest->RetCode = 0; + for (mach_vm_address_t p = mach_vm_trunc_page(bufRequest + vm_page_size); + p < (mach_vm_address_t)bufRequest + rcv_size; p += vm_page_size) { + *(char*)p = 0; // ensure alloca buffer doesn't overlap with stack guard + } + + bufReply = alloca(rcv_size); + bufReply->Head.msgh_size = 0; + for (mach_vm_address_t p = mach_vm_trunc_page(bufReply + vm_page_size); + p < (mach_vm_address_t)bufReply + rcv_size; p += vm_page_size) { + *(char*)p = 0; // ensure alloca buffer doesn't overlap with stack guard + } + +#if DISPATCH_DEBUG + options |= MACH_RCV_LARGE; // rdar://problem/8422992 +#endif + tmp_options = options; + // XXX FIXME -- change this to not starve out the target queue + for (;;) { + if (DISPATCH_QUEUE_IS_SUSPENDED(ds) || (--cnt == 0)) { + options &= ~MACH_RCV_MSG; + tmp_options &= ~MACH_RCV_MSG; + + if (!(tmp_options & MACH_SEND_MSG)) { + goto out; + } + } + kr = mach_msg(&bufReply->Head, tmp_options, bufReply->Head.msgh_size, + (mach_msg_size_t)rcv_size, (mach_port_t)dr->du_ident, 0, 0); + + tmp_options = options; + + if (slowpath(kr)) { + switch (kr) { + case MACH_SEND_INVALID_DEST: + case MACH_SEND_TIMED_OUT: + if (bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) { + mach_msg_destroy(&bufReply->Head); + } + break; + case MACH_RCV_TIMED_OUT: + // Don't return an error if a message was sent this time or + // a message was successfully received previously + // rdar://problems/7363620&7791738 + if(bufReply->Head.msgh_remote_port || received) { + kr = MACH_MSG_SUCCESS; + } + break; + case MACH_RCV_INVALID_NAME: + break; +#if DISPATCH_DEBUG + case MACH_RCV_TOO_LARGE: + // receive messages that are too large and log their id and size + // rdar://problem/8422992 + tmp_options &= ~MACH_RCV_LARGE; + size_t large_size = bufReply->Head.msgh_size + MAX_TRAILER_SIZE; + void *large_buf = malloc(large_size); + if (large_buf) { + rcv_size = large_size; + bufReply = large_buf; + } + if (!mach_msg(&bufReply->Head, tmp_options, 0, + (mach_msg_size_t)rcv_size, + (mach_port_t)dr->du_ident, 0, 0)) { + _dispatch_log("BUG in libdispatch client: " + "dispatch_mig_server received message larger than " + "requested size %zd: id = 0x%x, size = %d", + maxmsgsz, bufReply->Head.msgh_id, + bufReply->Head.msgh_size); + } + if (large_buf) { + free(large_buf); + } + // fall through +#endif + default: + _dispatch_bug_mach_client( + "dispatch_mig_server: mach_msg() failed", kr); + break; + } + goto out; + } + + if (!(tmp_options & MACH_RCV_MSG)) { + goto out; + } + + if (assertion_token) { +#if DISPATCH_USE_IMPORTANCE_ASSERTION + int r = proc_importance_assertion_complete(assertion_token); + (void)dispatch_assume_zero(r); +#endif + assertion_token = 0; + } + received = true; + + bufTemp = bufRequest; + bufRequest = bufReply; + bufReply = bufTemp; + +#if DISPATCH_USE_IMPORTANCE_ASSERTION +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wdeprecated-declarations" + int r = proc_importance_assertion_begin_with_msg(&bufRequest->Head, + NULL, &assertion_token); + if (r && slowpath(r != EIO)) { + (void)dispatch_assume_zero(r); + } +#pragma clang diagnostic pop +#endif + _voucher_replace(voucher_create_with_mach_msg(&bufRequest->Head)); + demux_success = callback(&bufRequest->Head, &bufReply->Head); + + if (!demux_success) { + // destroy the request - but not the reply port + bufRequest->Head.msgh_remote_port = 0; + mach_msg_destroy(&bufRequest->Head); + } else if (!(bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX)) { + // if MACH_MSGH_BITS_COMPLEX is _not_ set, then bufReply->RetCode + // is present + if (slowpath(bufReply->RetCode)) { + if (bufReply->RetCode == MIG_NO_REPLY) { + continue; + } + + // destroy the request - but not the reply port + bufRequest->Head.msgh_remote_port = 0; + mach_msg_destroy(&bufRequest->Head); + } + } + + if (bufReply->Head.msgh_remote_port) { + tmp_options |= MACH_SEND_MSG; + if (MACH_MSGH_BITS_REMOTE(bufReply->Head.msgh_bits) != + MACH_MSG_TYPE_MOVE_SEND_ONCE) { + tmp_options |= MACH_SEND_TIMEOUT; + } + } + } + +out: + if (assertion_token) { +#if DISPATCH_USE_IMPORTANCE_ASSERTION + int r = proc_importance_assertion_complete(assertion_token); + (void)dispatch_assume_zero(r); +#endif + } + + return kr; +} + +#pragma mark - +#pragma mark dispatch_mach_debug + +static size_t +_dispatch_mach_debug_attr(dispatch_mach_t dm, char *buf, size_t bufsiz) +{ + dispatch_queue_t target = dm->do_targetq; + dispatch_mach_send_refs_t dmsr = dm->dm_send_refs; + dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs; + + return dsnprintf(buf, bufsiz, "target = %s[%p], receive = 0x%x, " + "send = 0x%x, send-possible = 0x%x%s, checkin = 0x%x%s, " + "send state = %016llx, disconnected = %d, canceled = %d ", + target && target->dq_label ? target->dq_label : "", target, + (mach_port_t)dmrr->du_ident, dmsr->dmsr_send, + (mach_port_t)dmsr->du_ident, + dmsr->dmsr_notification_armed ? " (armed)" : "", + dmsr->dmsr_checkin_port, dmsr->dmsr_checkin ? " (pending)" : "", + dmsr->dmsr_state, dmsr->dmsr_disconnect_cnt, + (bool)(dm->dq_atomic_flags & DSF_CANCELED)); +} + +size_t +_dispatch_mach_debug(dispatch_mach_t dm, char* buf, size_t bufsiz) +{ + size_t offset = 0; + offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", + dm->dq_label && !dm->dm_cancel_handler_called ? dm->dq_label : + dx_kind(dm), dm); + offset += _dispatch_object_debug_attr(dm, &buf[offset], bufsiz - offset); + offset += _dispatch_mach_debug_attr(dm, &buf[offset], bufsiz - offset); + offset += dsnprintf(&buf[offset], bufsiz - offset, "}"); + return offset; +} + +#endif /* HAVE_MACH */ diff --git a/src/mach_internal.h b/src/mach_internal.h new file mode 100644 index 000000000..8c8edd8d3 --- /dev/null +++ b/src/mach_internal.h @@ -0,0 +1,131 @@ +/* + * Copyright (c) 2008-2016 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_MACH_INTERNAL__ +#define __DISPATCH_MACH_INTERNAL__ +#if HAVE_MACH + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +// NOTE: dispatch_source_mach_send_flags_t and dispatch_source_mach_recv_flags_t +// bit values must not overlap as they share the same kevent fflags ! + +/*! + * @enum dispatch_source_mach_send_flags_t + * + * @constant DISPATCH_MACH_SEND_DELETED + * Port-deleted notification. Disabled for source registration. + */ +enum { + DISPATCH_MACH_SEND_DELETED = 0x4, +}; +/*! + * @enum dispatch_source_mach_recv_flags_t + * + * @constant DISPATCH_MACH_RECV_MESSAGE + * Receive right has pending messages + */ +enum { + DISPATCH_MACH_RECV_MESSAGE = 0x2, +}; + + +DISPATCH_CLASS_DECL(mach); +DISPATCH_CLASS_DECL(mach_msg); + +#ifndef __cplusplus +struct dispatch_mach_s { + DISPATCH_SOURCE_HEADER(mach); + dispatch_mach_send_refs_t dm_send_refs; + dispatch_xpc_term_refs_t dm_xpc_term_refs; +} DISPATCH_ATOMIC64_ALIGN; + +struct dispatch_mach_msg_s { + DISPATCH_OBJECT_HEADER(mach_msg); + union { + mach_msg_option_t dmsg_options; + mach_error_t dmsg_error; + }; + mach_port_t dmsg_reply; + pthread_priority_t dmsg_priority; + voucher_t dmsg_voucher; + dispatch_mach_msg_destructor_t dmsg_destructor; + size_t dmsg_size; + union { + mach_msg_header_t *dmsg_msg; + char dmsg_buf[0]; + }; +}; + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_mach_xref_dispose(struct dispatch_mach_s *dm) +{ + if (dm->dm_is_xpc) { + dm->dm_recv_refs->dmrr_handler_ctxt = (void *)0xbadfeed; + } +} +#endif // __cplusplus + +dispatch_source_t +_dispatch_source_create_mach_msg_direct_recv(mach_port_t recvp, + const struct dispatch_continuation_s *dc); + +void _dispatch_mach_msg_async_reply_invoke(dispatch_continuation_t dc, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); +void _dispatch_mach_dispose(dispatch_mach_t dm, bool *allow_free); +void _dispatch_mach_finalize_activation(dispatch_mach_t dm, bool *allow_resume); +void _dispatch_mach_invoke(dispatch_mach_t dm, dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags); +void _dispatch_mach_wakeup(dispatch_mach_t dm, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags); +size_t _dispatch_mach_debug(dispatch_mach_t dm, char* buf, size_t bufsiz); +void _dispatch_mach_merge_notification(dispatch_unote_t du, + uint32_t flags, uintptr_t data, uintptr_t status, + pthread_priority_t pp); +void _dispatch_mach_merge_msg(dispatch_unote_t du, uint32_t flags, + mach_msg_header_t *msg, mach_msg_size_t msgsz); +void _dispatch_mach_reply_merge_msg(dispatch_unote_t du, uint32_t flags, + mach_msg_header_t *msg, mach_msg_size_t msgsz); +void _dispatch_xpc_sigterm_merge(dispatch_unote_t du, uint32_t flags, + uintptr_t data, uintptr_t status, pthread_priority_t pp); + +void _dispatch_mach_msg_dispose(dispatch_mach_msg_t dmsg, bool *allow_free); +void _dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); +size_t _dispatch_mach_msg_debug(dispatch_mach_msg_t dmsg, char* buf, + size_t bufsiz); + +void _dispatch_mach_send_barrier_drain_invoke(dispatch_continuation_t dc, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); +void _dispatch_mach_barrier_invoke(dispatch_continuation_t dc, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); + +#endif // HAVE_MACH +#endif /* __DISPATCH_MACH_INTERNAL__ */ diff --git a/src/object.c b/src/object.c index 1928df53f..86d100507 100644 --- a/src/object.c +++ b/src/object.c @@ -37,14 +37,28 @@ DISPATCH_NOINLINE _os_object_t _os_object_retain_internal(_os_object_t obj) { - return _os_object_retain_internal_inline(obj); + return _os_object_retain_internal_n_inline(obj, 1); +} + +DISPATCH_NOINLINE +_os_object_t +_os_object_retain_internal_n(_os_object_t obj, uint16_t n) +{ + return _os_object_retain_internal_n_inline(obj, n); } DISPATCH_NOINLINE void _os_object_release_internal(_os_object_t obj) { - return _os_object_release_internal_inline(obj); + return _os_object_release_internal_n_inline(obj, 1); +} + +DISPATCH_NOINLINE +void +_os_object_release_internal_n(_os_object_t obj, uint16_t n) +{ + return _os_object_release_internal_n_inline(obj, n); } DISPATCH_NOINLINE @@ -89,21 +103,19 @@ _os_object_release(_os_object_t obj) bool _os_object_retain_weak(_os_object_t obj) { - int xref_cnt = obj->os_obj_xref_cnt; - if (slowpath(xref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) { - return true; // global object - } -retry: - if (slowpath(xref_cnt == -1)) { - return false; - } - if (slowpath(xref_cnt < -1)) { - goto overrelease; - } - if (slowpath(!os_atomic_cmpxchgvw2o(obj, os_obj_xref_cnt, xref_cnt, - xref_cnt + 1, &xref_cnt, relaxed))) { - goto retry; - } + int xref_cnt, nxref_cnt; + os_atomic_rmw_loop2o(obj, os_obj_xref_cnt, xref_cnt, nxref_cnt, relaxed, { + if (slowpath(xref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) { + os_atomic_rmw_loop_give_up(return true); // global object + } + if (slowpath(xref_cnt == -1)) { + os_atomic_rmw_loop_give_up(return false); + } + if (slowpath(xref_cnt < -1)) { + os_atomic_rmw_loop_give_up(goto overrelease); + } + nxref_cnt = xref_cnt + 1; + }); return true; overrelease: _OS_OBJECT_CLIENT_CRASH("Over-release of an object"); @@ -126,7 +138,7 @@ _os_object_allows_weak_reference(_os_object_t obj) #pragma mark dispatch_object_t void * -_dispatch_alloc(const void *vtable, size_t size) +_dispatch_object_alloc(const void *vtable, size_t size) { #if OS_OBJECT_HAVE_OBJC1 const struct dispatch_object_vtable_s *_vtable = vtable; @@ -139,6 +151,27 @@ _dispatch_alloc(const void *vtable, size_t size) #endif } +void +_dispatch_object_finalize(dispatch_object_t dou) +{ +#if USE_OBJC + objc_destructInstance((id)dou._do); +#else + (void)dou; +#endif +} + +void +_dispatch_object_dealloc(dispatch_object_t dou) +{ + // so that ddt doesn't pick up bad objects when malloc reuses this memory + dou._os_obj->os_obj_isa = NULL; +#if OS_OBJECT_HAVE_OBJC1 + dou._do->do_vtable = NULL; +#endif + free(dou._os_obj); +} + void dispatch_retain(dispatch_object_t dou) { @@ -153,24 +186,6 @@ dispatch_release(dispatch_object_t dou) _os_object_release(dou._os_obj); } -static void -_dispatch_dealloc(dispatch_object_t dou) -{ - dispatch_queue_t tq = dou._do->do_targetq; - dispatch_function_t func = dou._do->do_finalizer; - void *ctxt = dou._do->do_ctxt; -#if OS_OBJECT_HAVE_OBJC1 - // so that ddt doesn't pick up bad objects when malloc reuses this memory - dou._do->do_vtable = NULL; -#endif - _os_object_dealloc(dou._os_obj); - - if (func && ctxt) { - dispatch_async_f(tq, ctxt, func); - } - _dispatch_release_tailcall(tq); -} - #if !USE_OBJC void _dispatch_xref_dispose(dispatch_object_t dou) @@ -181,6 +196,10 @@ _dispatch_xref_dispose(dispatch_object_t dou) } if (dx_type(dou._do) == DISPATCH_SOURCE_KEVENT_TYPE) { _dispatch_source_xref_dispose(dou._ds); +#if HAVE_MACH + } else if (dx_type(dou._do) == DISPATCH_MACH_CHANNEL_TYPE) { + _dispatch_mach_xref_dispose(dou._dm); +#endif } else if (dx_type(dou._do) == DISPATCH_QUEUE_RUNLOOP_TYPE) { _dispatch_runloop_queue_xref_dispose(dou._dq); } @@ -191,19 +210,35 @@ _dispatch_xref_dispose(dispatch_object_t dou) void _dispatch_dispose(dispatch_object_t dou) { + dispatch_queue_t tq = dou._do->do_targetq; + dispatch_function_t func = dou._do->do_finalizer; + void *ctxt = dou._do->do_ctxt; + bool allow_free = true; + if (slowpath(dou._do->do_next != DISPATCH_OBJECT_LISTLESS)) { DISPATCH_INTERNAL_CRASH(dou._do->do_next, "Release while enqueued"); } - dx_dispose(dou._do); - return _dispatch_dealloc(dou); + + dx_dispose(dou._do, &allow_free); + + // Past this point, the only thing left of the object is its memory + if (likely(allow_free)) { + _dispatch_object_finalize(dou); + _dispatch_object_dealloc(dou); + } + if (func && ctxt) { + dispatch_async_f(tq, ctxt, func); + } + if (tq) _dispatch_release_tailcall(tq); } void * dispatch_get_context(dispatch_object_t dou) { DISPATCH_OBJECT_TFB(_dispatch_objc_get_context, dou); - if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || - slowpath(dx_hastypeflag(dou._do, QUEUE_ROOT))) { + if (unlikely(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT || + dx_hastypeflag(dou._do, QUEUE_ROOT) || + dx_hastypeflag(dou._do, QUEUE_BASE))) { return NULL; } return dou._do->do_ctxt; @@ -213,8 +248,9 @@ void dispatch_set_context(dispatch_object_t dou, void *context) { DISPATCH_OBJECT_TFB(_dispatch_objc_set_context, dou, context); - if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || - slowpath(dx_hastypeflag(dou._do, QUEUE_ROOT))) { + if (unlikely(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT || + dx_hastypeflag(dou._do, QUEUE_ROOT) || + dx_hastypeflag(dou._do, QUEUE_BASE))) { return; } dou._do->do_ctxt = context; @@ -224,8 +260,9 @@ void dispatch_set_finalizer_f(dispatch_object_t dou, dispatch_function_t finalizer) { DISPATCH_OBJECT_TFB(_dispatch_objc_set_finalizer_f, dou, finalizer); - if (slowpath(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || - slowpath(dx_hastypeflag(dou._do, QUEUE_ROOT))) { + if (unlikely(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT || + dx_hastypeflag(dou._do, QUEUE_ROOT) || + dx_hastypeflag(dou._do, QUEUE_BASE))) { return; } dou._do->do_finalizer = finalizer; @@ -237,10 +274,11 @@ dispatch_set_target_queue(dispatch_object_t dou, dispatch_queue_t tq) DISPATCH_OBJECT_TFB(_dispatch_objc_set_target_queue, dou, tq); if (dx_vtable(dou._do)->do_set_targetq) { dx_vtable(dou._do)->do_set_targetq(dou._do, tq); - } else if (dou._do->do_ref_cnt != DISPATCH_OBJECT_GLOBAL_REFCNT && - !slowpath(dx_hastypeflag(dou._do, QUEUE_ROOT))) { + } else if (likely(dou._do->do_ref_cnt != DISPATCH_OBJECT_GLOBAL_REFCNT && + !dx_hastypeflag(dou._do, QUEUE_ROOT) && + !dx_hastypeflag(dou._do, QUEUE_BASE))) { if (slowpath(!tq)) { - tq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, false); + tq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false); } _dispatch_object_set_target_queue_inline(dou._do, tq); } @@ -268,7 +306,9 @@ void dispatch_resume(dispatch_object_t dou) { DISPATCH_OBJECT_TFB(_dispatch_objc_resume, dou); - if (dx_vtable(dou._do)->do_resume) { + // the do_suspend below is not a typo. Having a do_resume but no do_suspend + // allows for objects to support activate, but have no-ops suspend/resume + if (dx_vtable(dou._do)->do_suspend) { dx_vtable(dou._do)->do_resume(dou._do, false); } } @@ -276,6 +316,6 @@ dispatch_resume(dispatch_object_t dou) size_t _dispatch_object_debug_attr(dispatch_object_t dou, char* buf, size_t bufsiz) { - return dsnprintf(buf, bufsiz, "xrefcnt = 0x%x, refcnt = 0x%x, ", + return dsnprintf(buf, bufsiz, "xref = %d, ref = %d, ", dou._do->do_xref_cnt + 1, dou._do->do_ref_cnt + 1); } diff --git a/src/object.m b/src/object.m index 323c98b47..efee82947 100644 --- a/src/object.m +++ b/src/object.m @@ -29,10 +29,21 @@ #error Objective C GC isn't supported anymore #endif +#if __has_include() #include +#else +extern id _Nullable objc_retain(id _Nullable obj) __asm__("_objc_retain"); +extern void objc_release(id _Nullable obj) __asm__("_objc_release"); +extern void _objc_init(void); +extern void _objc_atfork_prepare(void); +extern void _objc_atfork_parent(void); +extern void _objc_atfork_child(void); +#endif // __has_include() #include #include +// NOTE: this file must not contain any atomic operations + #pragma mark - #pragma mark _os_object_t @@ -126,6 +137,24 @@ return objc_release(obj); } +void +_os_object_atfork_prepare(void) +{ + return _objc_atfork_prepare(); +} + +void +_os_object_atfork_parent(void) +{ + return _objc_atfork_parent(); +} + +void +_os_object_atfork_child(void) +{ + return _objc_atfork_child(); +} + #pragma mark - #pragma mark _os_object @@ -233,7 +262,7 @@ - (void)_dispose { NSUInteger offset = 0; NSString *desc = [dou debugDescription]; [desc getBytes:buf maxLength:bufsiz-1 usedLength:&offset - encoding:NSUTF8StringEncoding options:0 + encoding:NSUTF8StringEncoding options:(NSStringEncodingConversionOptions)0 range:NSMakeRange(0, [desc length]) remainingRange:NULL]; if (offset) buf[offset] = 0; return offset; @@ -263,9 +292,14 @@ - (NSString *)debugDescription { } else { strlcpy(buf, dx_kind(obj), sizeof(buf)); } - return [nsstring stringWithFormat: - [nsstring stringWithUTF8String:"<%s: %s>"], - class_getName([self class]), buf]; + NSString *format = [nsstring stringWithUTF8String:"<%s: %s>"]; + if (!format) return nil; + return [nsstring stringWithFormat:format, class_getName([self class]), buf]; +} + +- (void)dealloc DISPATCH_NORETURN { + DISPATCH_INTERNAL_CRASH(0, "Calling dealloc on a dispatch object"); + [super dealloc]; // make clang happy } @end @@ -277,9 +311,10 @@ @implementation DISPATCH_CLASS(queue) - (NSString *)description { Class nsstring = objc_lookUpClass("NSString"); if (!nsstring) return nil; - return [nsstring stringWithFormat: - [nsstring stringWithUTF8String:"<%s: %s[%p]>"], - class_getName([self class]), dispatch_queue_get_label(self), self]; + NSString *format = [nsstring stringWithUTF8String:"<%s: %s>"]; + if (!format) return nil; + return [nsstring stringWithFormat:format, class_getName([self class]), + dispatch_queue_get_label(self), self]; } - (void)_xref_dispose { @@ -307,6 +342,7 @@ @implementation DISPATCH_CLASS(mach) - (void)_xref_dispose { _dispatch_queue_xref_dispose((struct dispatch_queue_s *)self); + _dispatch_mach_xref_dispose((struct dispatch_mach_s *)self); [super _xref_dispose]; } @@ -351,6 +387,14 @@ @implementation OS_OBJECT_CLASS(voucher) DISPATCH_UNAVAILABLE_INIT() DISPATCH_OBJC_LOAD() +-(id)retain { + return (id)_voucher_retain_inline((struct voucher_s *)self); +} + +-(oneway void)release { + return _voucher_release_inline((struct voucher_s *)self); +} + - (void)_xref_dispose { return _voucher_xref_dispose(self); // calls _os_object_release_internal() } @@ -364,9 +408,9 @@ - (NSString *)debugDescription { if (!nsstring) return nil; char buf[2048]; _voucher_debug(self, buf, sizeof(buf)); - return [nsstring stringWithFormat: - [nsstring stringWithUTF8String:"<%s: %s>"], - class_getName([self class]), buf]; + NSString *format = [nsstring stringWithUTF8String:"<%s: %s>"]; + if (!format) return nil; + return [nsstring stringWithFormat:format, class_getName([self class]), buf]; } @end @@ -393,20 +437,20 @@ - (NSString *)debugDescription { #if DISPATCH_COCOA_COMPAT -void * -_dispatch_last_resort_autorelease_pool_push(void) +void +_dispatch_last_resort_autorelease_pool_push(dispatch_invoke_context_t dic) { if (!slowpath(_os_object_debug_missing_pools)) { - return _dispatch_autorelease_pool_push(); + dic->dic_autorelease_pool = _dispatch_autorelease_pool_push(); } - return NULL; } void -_dispatch_last_resort_autorelease_pool_pop(void *context) +_dispatch_last_resort_autorelease_pool_pop(dispatch_invoke_context_t dic) { if (!slowpath(_os_object_debug_missing_pools)) { - return _dispatch_autorelease_pool_pop(context); + _dispatch_autorelease_pool_pop(dic->dic_autorelease_pool); + dic->dic_autorelease_pool = NULL; } } @@ -448,6 +492,19 @@ - (NSString *)debugDescription { } #if HAVE_MACH +#undef _dispatch_client_callout3 +void +_dispatch_client_callout3(void *ctxt, dispatch_mach_reason_t reason, + dispatch_mach_msg_t dmsg, dispatch_mach_async_reply_callback_t f) +{ + @try { + return f(ctxt, reason, dmsg); + } + @catch (...) { + objc_terminate(); + } +} + #undef _dispatch_client_callout4 void _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, diff --git a/src/object_internal.h b/src/object_internal.h index 80bb10251..4504f6587 100644 --- a/src/object_internal.h +++ b/src/object_internal.h @@ -179,20 +179,23 @@ #define DISPATCH_INVOKABLE_VTABLE_HEADER(x) \ unsigned long const do_type; \ const char *const do_kind; \ - void (*const do_invoke)(struct x##_s *, dispatch_invoke_flags_t) + void (*const do_invoke)(struct x##_s *, dispatch_invoke_context_t, \ + dispatch_invoke_flags_t); \ + void (*const do_push)(struct x##_s *, dispatch_object_t, \ + dispatch_qos_t) #define DISPATCH_QUEUEABLE_VTABLE_HEADER(x) \ DISPATCH_INVOKABLE_VTABLE_HEADER(x); \ void (*const do_wakeup)(struct x##_s *, \ - pthread_priority_t, dispatch_wakeup_flags_t); \ - void (*const do_dispose)(struct x##_s *) + dispatch_qos_t, dispatch_wakeup_flags_t); \ + void (*const do_dispose)(struct x##_s *, bool *allow_free) #define DISPATCH_OBJECT_VTABLE_HEADER(x) \ DISPATCH_QUEUEABLE_VTABLE_HEADER(x); \ void (*const do_set_targetq)(struct x##_s *, dispatch_queue_t); \ void (*const do_suspend)(struct x##_s *); \ void (*const do_resume)(struct x##_s *, bool activate); \ - void (*const do_finalize_activation)(struct x##_s *); \ + void (*const do_finalize_activation)(struct x##_s *, bool *allow_resume); \ size_t (*const do_debug)(struct x##_s *, char *, size_t) #define dx_vtable(x) (&(x)->do_vtable->_os_obj_vtable) @@ -202,8 +205,9 @@ #define dx_hastypeflag(x, f) (dx_vtable(x)->do_type & _DISPATCH_##f##_TYPEFLAG) #define dx_kind(x) dx_vtable(x)->do_kind #define dx_debug(x, y, z) dx_vtable(x)->do_debug((x), (y), (z)) -#define dx_dispose(x) dx_vtable(x)->do_dispose(x) -#define dx_invoke(x, z) dx_vtable(x)->do_invoke(x, z) +#define dx_dispose(x, y) dx_vtable(x)->do_dispose(x, y) +#define dx_invoke(x, y, z) dx_vtable(x)->do_invoke(x, y, z) +#define dx_push(x, y, z) dx_vtable(x)->do_push(x, y, z) #define dx_wakeup(x, y, z) dx_vtable(x)->do_wakeup(x, y, z) #define DISPATCH_OBJECT_GLOBAL_REFCNT _OS_OBJECT_GLOBAL_REFCNT @@ -221,7 +225,7 @@ .do_xref_cnt = DISPATCH_OBJECT_GLOBAL_REFCNT #endif -#ifdef __LP64__ +#if DISPATCH_SIZEOF_PTR == 8 // the bottom nibble must not be zero, the rest of the bits should be random // we sign extend the 64-bit version so that a better instruction encoding is // generated on Intel @@ -231,32 +235,48 @@ #endif DISPATCH_ENUM(dispatch_wakeup_flags, uint32_t, - // The caller of dx_wakeup owns an internal refcount on the object being - // woken up - DISPATCH_WAKEUP_CONSUME = 0x00000001, + // The caller of dx_wakeup owns two internal refcounts on the object being + // woken up. Two are needed for WLH wakeups where two threads need + // the object to remain valid in a non-coordinated way + // - the thread doing the poke for the duration of the poke + // - drainers for the duration of their drain + DISPATCH_WAKEUP_CONSUME_2 = 0x00000001, // Some change to the object needs to be published to drainers. // If the drainer isn't the same thread, some scheme such as the dispatch // queue DIRTY bit must be used and a release barrier likely has to be // involved before dx_wakeup returns - DISPATCH_WAKEUP_FLUSH = 0x00000002, + DISPATCH_WAKEUP_MAKE_DIRTY = 0x00000002, - // A slow waiter was just enqueued - DISPATCH_WAKEUP_SLOW_WAITER = 0x00000004, + // This wakeup is made by a sync owner that still holds the drain lock + DISPATCH_WAKEUP_BARRIER_COMPLETE = 0x00000004, - // The caller desires to apply an override on the object being woken up - // and has already adjusted the `oq_override` field. When this flag is - // passed, the priority passed to dx_wakeup() should not be 0 - DISPATCH_WAKEUP_OVERRIDING = 0x00000008, + // This wakeup is caused by a dispatch_block_wait() + DISPATCH_WAKEUP_BLOCK_WAIT = 0x00000008, +); - // At the time this queue was woken up it had an override that must be - // preserved (used to solve a race with _dispatch_queue_drain_try_unlock()) - DISPATCH_WAKEUP_WAS_OVERRIDDEN = 0x00000010, +typedef struct dispatch_invoke_context_s { + struct dispatch_object_s *dic_deferred; +#if HAVE_PTHREAD_WORKQUEUE_NARROWING + uint64_t dic_next_narrow_check; +#endif +#if DISPATCH_COCOA_COMPAT + void *dic_autorelease_pool; +#endif +} dispatch_invoke_context_s, *dispatch_invoke_context_t; -#define _DISPATCH_WAKEUP_OVERRIDE_BITS \ - ((dispatch_wakeup_flags_t)(DISPATCH_WAKEUP_OVERRIDING | \ - DISPATCH_WAKEUP_WAS_OVERRIDDEN)) -); +#if HAVE_PTHREAD_WORKQUEUE_NARROWING +#define DISPATCH_THREAD_IS_NARROWING 1 + +#define dispatch_with_disabled_narrowing(dic, ...) ({ \ + uint64_t suspend_narrow_check = dic->dic_next_narrow_check; \ + dic->dic_next_narrow_check = 0; \ + __VA_ARGS__; \ + dic->dic_next_narrow_check = suspend_narrow_check; \ + }) +#else +#define dispatch_with_disabled_narrowing(dic, ...) __VA_ARGS__ +#endif DISPATCH_ENUM(dispatch_invoke_flags, uint32_t, DISPATCH_INVOKE_NONE = 0x00000000, @@ -267,12 +287,22 @@ DISPATCH_ENUM(dispatch_invoke_flags, uint32_t, // This invoke is a stealer, meaning that it doesn't own the // enqueue lock at drain lock time. // - // @const DISPATCH_INVOKE_OVERRIDING - // This invoke is draining the hierarchy on another root queue and needs - // to fake the identity of the original one. + // @const DISPATCH_INVOKE_WLH + // This invoke is for a bottom WLH // DISPATCH_INVOKE_STEALING = 0x00000001, - DISPATCH_INVOKE_OVERRIDING = 0x00000002, + DISPATCH_INVOKE_WLH = 0x00000002, + + // Misc flags + // + // @const DISPATCH_INVOKE_ASYNC_REPLY + // An asynchronous reply to a message is being handled. + // + // @const DISPATCH_INVOKE_DISALLOW_SYNC_WAITERS + // The next serial drain should not allow sync waiters. + // + DISPATCH_INVOKE_ASYNC_REPLY = 0x00000004, + DISPATCH_INVOKE_DISALLOW_SYNC_WAITERS = 0x00000008, // Below this point flags are propagated to recursive calls to drain(), // continuation pop() or dx_invoke(). @@ -325,32 +355,37 @@ enum { _DISPATCH_DISK_TYPE = 0x70000, // meta-type for io disks _DISPATCH_QUEUE_ROOT_TYPEFLAG = 0x0100, // bit set for any root queues + _DISPATCH_QUEUE_BASE_TYPEFLAG = 0x0200, // base of a hierarchy + // targets a root queue #define DISPATCH_CONTINUATION_TYPE(name) \ (_DISPATCH_CONTINUATION_TYPE | DC_##name##_TYPE) - DISPATCH_DATA_TYPE = 1 | _DISPATCH_NODE_TYPE, - DISPATCH_MACH_MSG_TYPE = 2 | _DISPATCH_NODE_TYPE, - DISPATCH_QUEUE_ATTR_TYPE = 3 | _DISPATCH_NODE_TYPE, - - DISPATCH_IO_TYPE = 0 | _DISPATCH_IO_TYPE, - DISPATCH_OPERATION_TYPE = 0 | _DISPATCH_OPERATION_TYPE, - DISPATCH_DISK_TYPE = 0 | _DISPATCH_DISK_TYPE, - - DISPATCH_QUEUE_LEGACY_TYPE = 1 | _DISPATCH_QUEUE_TYPE, - DISPATCH_QUEUE_SERIAL_TYPE = 2 | _DISPATCH_QUEUE_TYPE, - DISPATCH_QUEUE_CONCURRENT_TYPE = 3 | _DISPATCH_QUEUE_TYPE, - DISPATCH_QUEUE_GLOBAL_ROOT_TYPE = 4 | _DISPATCH_QUEUE_TYPE | + DISPATCH_DATA_TYPE = 1 | _DISPATCH_NODE_TYPE, + DISPATCH_MACH_MSG_TYPE = 2 | _DISPATCH_NODE_TYPE, + DISPATCH_QUEUE_ATTR_TYPE = 3 | _DISPATCH_NODE_TYPE, + + DISPATCH_IO_TYPE = 0 | _DISPATCH_IO_TYPE, + DISPATCH_OPERATION_TYPE = 0 | _DISPATCH_OPERATION_TYPE, + DISPATCH_DISK_TYPE = 0 | _DISPATCH_DISK_TYPE, + + DISPATCH_QUEUE_LEGACY_TYPE = 1 | _DISPATCH_QUEUE_TYPE, + DISPATCH_QUEUE_SERIAL_TYPE = 2 | _DISPATCH_QUEUE_TYPE, + DISPATCH_QUEUE_CONCURRENT_TYPE = 3 | _DISPATCH_QUEUE_TYPE, + DISPATCH_QUEUE_GLOBAL_ROOT_TYPE = 4 | _DISPATCH_QUEUE_TYPE | _DISPATCH_QUEUE_ROOT_TYPEFLAG, - DISPATCH_QUEUE_RUNLOOP_TYPE = 5 | _DISPATCH_QUEUE_TYPE | - _DISPATCH_QUEUE_ROOT_TYPEFLAG, - DISPATCH_QUEUE_MGR_TYPE = 6 | _DISPATCH_QUEUE_TYPE, - DISPATCH_QUEUE_SPECIFIC_TYPE = 7 | _DISPATCH_QUEUE_TYPE, + DISPATCH_QUEUE_NETWORK_EVENT_TYPE = 5 | _DISPATCH_QUEUE_TYPE | + _DISPATCH_QUEUE_BASE_TYPEFLAG, + DISPATCH_QUEUE_RUNLOOP_TYPE = 6 | _DISPATCH_QUEUE_TYPE | + _DISPATCH_QUEUE_BASE_TYPEFLAG, + DISPATCH_QUEUE_MGR_TYPE = 7 | _DISPATCH_QUEUE_TYPE | + _DISPATCH_QUEUE_BASE_TYPEFLAG, + DISPATCH_QUEUE_SPECIFIC_TYPE = 8 | _DISPATCH_QUEUE_TYPE, - DISPATCH_SEMAPHORE_TYPE = 1 | _DISPATCH_SEMAPHORE_TYPE, - DISPATCH_GROUP_TYPE = 2 | _DISPATCH_SEMAPHORE_TYPE, + DISPATCH_SEMAPHORE_TYPE = 1 | _DISPATCH_SEMAPHORE_TYPE, + DISPATCH_GROUP_TYPE = 2 | _DISPATCH_SEMAPHORE_TYPE, - DISPATCH_SOURCE_KEVENT_TYPE = 1 | _DISPATCH_SOURCE_TYPE, - DISPATCH_MACH_CHANNEL_TYPE = 2 | _DISPATCH_SOURCE_TYPE, + DISPATCH_SOURCE_KEVENT_TYPE = 1 | _DISPATCH_SOURCE_TYPE, + DISPATCH_MACH_CHANNEL_TYPE = 2 | _DISPATCH_SOURCE_TYPE, }; @@ -410,38 +445,29 @@ struct dispatch_object_s { #if OS_OBJECT_HAVE_OBJC1 #define _OS_MPSC_QUEUE_FIELDS(ns, __state_field__) \ - struct dispatch_object_s *volatile ns##_items_head; \ - unsigned long ns##_serialnum; \ - union { \ - uint64_t volatile __state_field__; \ - DISPATCH_STRUCT_LITTLE_ENDIAN_2( \ + DISPATCH_UNION_LE(uint64_t volatile __state_field__, \ dispatch_lock __state_field__##_lock, \ uint32_t __state_field__##_bits \ - ); \ - }; /* needs to be 64-bit aligned */ \ - /* LP64 global queue cacheline boundary */ \ + ) DISPATCH_ATOMIC64_ALIGN; \ + struct dispatch_object_s *volatile ns##_items_head; \ + unsigned long ns##_serialnum; \ const char *ns##_label; \ - voucher_t ns##_override_voucher; \ + struct dispatch_object_s *volatile ns##_items_tail; \ dispatch_priority_t ns##_priority; \ - dispatch_priority_t volatile ns##_override; \ - struct dispatch_object_s *volatile ns##_items_tail + int volatile ns##_sref_cnt #else #define _OS_MPSC_QUEUE_FIELDS(ns, __state_field__) \ struct dispatch_object_s *volatile ns##_items_head; \ - union { \ - uint64_t volatile __state_field__; \ - DISPATCH_STRUCT_LITTLE_ENDIAN_2( \ + DISPATCH_UNION_LE(uint64_t volatile __state_field__, \ dispatch_lock __state_field__##_lock, \ uint32_t __state_field__##_bits \ - ); \ - }; /* needs to be 64-bit aligned */ \ + ) DISPATCH_ATOMIC64_ALIGN; \ /* LP64 global queue cacheline boundary */ \ unsigned long ns##_serialnum; \ const char *ns##_label; \ - voucher_t ns##_override_voucher; \ + struct dispatch_object_s *volatile ns##_items_tail; \ dispatch_priority_t ns##_priority; \ - dispatch_priority_t volatile ns##_override; \ - struct dispatch_object_s *volatile ns##_items_tail + int volatile ns##_sref_cnt #endif OS_OBJECT_INTERNAL_CLASS_DECL(os_mpsc_queue, object, @@ -459,7 +485,9 @@ struct os_mpsc_queue_s { size_t _dispatch_object_debug_attr(dispatch_object_t dou, char* buf, size_t bufsiz); -void *_dispatch_alloc(const void *vtable, size_t size); +void *_dispatch_object_alloc(const void *vtable, size_t size); +void _dispatch_object_finalize(dispatch_object_t dou); +void _dispatch_object_dealloc(dispatch_object_t dou); #if !USE_OBJC void _dispatch_xref_dispose(dispatch_object_t dou); #endif @@ -467,17 +495,22 @@ void _dispatch_dispose(dispatch_object_t dou); #if DISPATCH_COCOA_COMPAT #if USE_OBJC #include +#if __has_include() #include +#else +extern void *objc_autoreleasePoolPush(void); +extern void objc_autoreleasePoolPop(void *context); +#endif // __has_include() #define _dispatch_autorelease_pool_push() \ - objc_autoreleasePoolPush() + objc_autoreleasePoolPush() #define _dispatch_autorelease_pool_pop(context) \ - objc_autoreleasePoolPop(context) + objc_autoreleasePoolPop(context) #else void *_dispatch_autorelease_pool_push(void); void _dispatch_autorelease_pool_pop(void *context); #endif -void *_dispatch_last_resort_autorelease_pool_push(void); -void _dispatch_last_resort_autorelease_pool_pop(void *context); +void _dispatch_last_resort_autorelease_pool_push(dispatch_invoke_context_t dic); +void _dispatch_last_resort_autorelease_pool_pop(dispatch_invoke_context_t dic); #define dispatch_invoke_with_autoreleasepool(flags, ...) ({ \ void *pool = NULL; \ @@ -493,7 +526,6 @@ void _dispatch_last_resort_autorelease_pool_pop(void *context); do { (void)flags; __VA_ARGS__; } while (0) #endif - #if USE_OBJC OS_OBJECT_OBJC_CLASS_DECL(object); #endif @@ -557,20 +589,20 @@ size_t _dispatch_objc_debug(dispatch_object_t dou, char* buf, size_t bufsiz); * a barrier to perform prior to tearing down an object when the refcount * reached -1. */ -#define _os_atomic_refcnt_perform2o(o, f, op, m) ({ \ +#define _os_atomic_refcnt_perform2o(o, f, op, n, m) ({ \ typeof(o) _o = (o); \ int _ref_cnt = _o->f; \ if (fastpath(_ref_cnt != _OS_OBJECT_GLOBAL_REFCNT)) { \ - _ref_cnt = os_atomic_##op##2o(_o, f, m); \ + _ref_cnt = os_atomic_##op##2o(_o, f, n, m); \ } \ _ref_cnt; \ }) -#define _os_atomic_refcnt_inc2o(o, m) \ - _os_atomic_refcnt_perform2o(o, m, inc, relaxed) +#define _os_atomic_refcnt_add2o(o, m, n) \ + _os_atomic_refcnt_perform2o(o, m, add, n, relaxed) -#define _os_atomic_refcnt_dec2o(o, m) \ - _os_atomic_refcnt_perform2o(o, m, dec, release) +#define _os_atomic_refcnt_sub2o(o, m, n) \ + _os_atomic_refcnt_perform2o(o, m, sub, n, release) #define _os_atomic_refcnt_dispose_barrier2o(o, m) \ (void)os_atomic_load2o(o, m, acquire) @@ -593,23 +625,26 @@ size_t _dispatch_objc_debug(dispatch_object_t dou, char* buf, size_t bufsiz); * */ #define _os_object_xrefcnt_inc(o) \ - _os_atomic_refcnt_inc2o(o, os_obj_xref_cnt) + _os_atomic_refcnt_add2o(o, os_obj_xref_cnt, 1) #define _os_object_xrefcnt_dec(o) \ - _os_atomic_refcnt_dec2o(o, os_obj_xref_cnt) + _os_atomic_refcnt_sub2o(o, os_obj_xref_cnt, 1) #define _os_object_xrefcnt_dispose_barrier(o) \ _os_atomic_refcnt_dispose_barrier2o(o, os_obj_xref_cnt) -#define _os_object_refcnt_inc(o) \ - _os_atomic_refcnt_inc2o(o, os_obj_ref_cnt) +#define _os_object_refcnt_add(o, n) \ + _os_atomic_refcnt_add2o(o, os_obj_ref_cnt, n) -#define _os_object_refcnt_dec(o) \ - _os_atomic_refcnt_dec2o(o, os_obj_ref_cnt) +#define _os_object_refcnt_sub(o, n) \ + _os_atomic_refcnt_sub2o(o, os_obj_ref_cnt, n) #define _os_object_refcnt_dispose_barrier(o) \ _os_atomic_refcnt_dispose_barrier2o(o, os_obj_ref_cnt) +void _os_object_atfork_child(void); +void _os_object_atfork_parent(void); +void _os_object_atfork_prepare(void); void _os_object_init(void); unsigned long _os_object_retain_count(_os_object_t obj); bool _os_object_retain_weak(_os_object_t obj); diff --git a/src/once.c b/src/once.c index d7d6a8e64..c01538c9d 100644 --- a/src/once.c +++ b/src/once.c @@ -40,9 +40,15 @@ dispatch_once(dispatch_once_t *val, dispatch_block_t block) } #endif -DISPATCH_NOINLINE -void -dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func) +#if DISPATCH_ONCE_INLINE_FASTPATH +#define DISPATCH_ONCE_SLOW_INLINE inline DISPATCH_ALWAYS_INLINE +#else +#define DISPATCH_ONCE_SLOW_INLINE DISPATCH_NOINLINE +#endif // DISPATCH_ONCE_INLINE_FASTPATH + +DISPATCH_ONCE_SLOW_INLINE +static void +dispatch_once_f_slow(dispatch_once_t *val, void *ctxt, dispatch_function_t func) { #if DISPATCH_GATE_USE_FOR_DISPATCH_ONCE dispatch_once_gate_t l = (dispatch_once_gate_t)val; @@ -63,61 +69,9 @@ dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func) dow.dow_thread = _dispatch_tid_self(); _dispatch_client_callout(ctxt, func); - // The next barrier must be long and strong. - // - // The scenario: SMP systems with weakly ordered memory models - // and aggressive out-of-order instruction execution. - // - // The problem: - // - // The dispatch_once*() wrapper macro causes the callee's - // instruction stream to look like this (pseudo-RISC): - // - // load r5, pred-addr - // cmpi r5, -1 - // beq 1f - // call dispatch_once*() - // 1f: - // load r6, data-addr - // - // May be re-ordered like so: - // - // load r6, data-addr - // load r5, pred-addr - // cmpi r5, -1 - // beq 1f - // call dispatch_once*() - // 1f: - // - // Normally, a barrier on the read side is used to workaround - // the weakly ordered memory model. But barriers are expensive - // and we only need to synchronize once! After func(ctxt) - // completes, the predicate will be marked as "done" and the - // branch predictor will correctly skip the call to - // dispatch_once*(). - // - // A far faster alternative solution: Defeat the speculative - // read-ahead of peer CPUs. - // - // Modern architectures will throw away speculative results - // once a branch mis-prediction occurs. Therefore, if we can - // ensure that the predicate is not marked as being complete - // until long after the last store by func(ctxt), then we have - // defeated the read-ahead of peer CPUs. - // - // In other words, the last "store" by func(ctxt) must complete - // and then N cycles must elapse before ~0l is stored to *val. - // The value of N is whatever is sufficient to defeat the - // read-ahead mechanism of peer CPUs. - // - // On some CPUs, the most fully synchronizing instruction might - // need to be issued. - - os_atomic_maximally_synchronizing_barrier(); - // above assumed to contain release barrier - next = os_atomic_xchg(vval, DISPATCH_ONCE_DONE, relaxed); + next = (_dispatch_once_waiter_t)_dispatch_once_xchg_done(val); while (next != tail) { - _dispatch_wait_until(tmp = (_dispatch_once_waiter_t)next->dow_next); + tmp = (_dispatch_once_waiter_t)_dispatch_wait_until(next->dow_next); event = &next->dow_event; next = tmp; _dispatch_thread_event_signal(event); @@ -129,7 +83,7 @@ dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func) if (next == DISPATCH_ONCE_DONE) { break; } - if (os_atomic_cmpxchgvw(vval, next, tail, &next, release)) { + if (os_atomic_cmpxchgv(vval, next, tail, &next, release)) { dow.dow_thread = next->dow_thread; dow.dow_next = next; if (dow.dow_thread) { @@ -147,3 +101,15 @@ dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func) } #endif } + +DISPATCH_NOINLINE +void +dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func) +{ +#if !DISPATCH_ONCE_INLINE_FASTPATH + if (likely(os_atomic_load(val, acquire) == DLOCK_ONCE_DONE)) { + return; + } +#endif // !DISPATCH_ONCE_INLINE_FASTPATH + return dispatch_once_f_slow(val, ctxt, func); +} diff --git a/src/provider.d b/src/provider.d index ede3c56b3..13bcf7a93 100644 --- a/src/provider.d +++ b/src/provider.d @@ -101,3 +101,41 @@ provider dispatch { #pragma D attributes Private/Private/Common provider dispatch function #pragma D attributes Evolving/Evolving/Common provider dispatch name #pragma D attributes Evolving/Evolving/Common provider dispatch args + +typedef struct voucher_s *voucher_t; + +/* + * Probes for vouchers + */ +provider voucher { + + /* + * Voucher lifetime: + * + * voucher$target:::create A new voucher is being created + * voucher$target:::dispose A voucher is being freed + * voucher$target:::retain A voucher is being retained + * voucher$target:::release A voucher is being released + */ + probe create(voucher_t voucher, mach_port_t kv, uint64_t activity_id); + probe dispose(voucher_t voucher); + probe retain(voucher_t voucher, int resulting_refcnt); + probe release(voucher_t voucher, int resulting_refcnt); + + /* + * Thread adoption + * + * voucher$target:::adopt A voucher is being adopted by the current thread + * voucher$target:::orphan A voucher is being orphanned by the current thread + */ + probe adopt(voucher_t voucher); + probe orphan(voucher_t voucher); + +}; + +#pragma D attributes Evolving/Evolving/Common provider voucher provider +#pragma D attributes Private/Private/Common provider voucher module +#pragma D attributes Private/Private/Common provider voucher function +#pragma D attributes Evolving/Evolving/Common provider voucher name +#pragma D attributes Evolving/Evolving/Common provider voucher args + diff --git a/src/queue.c b/src/queue.c index 58c545b17..23eb63a7e 100644 --- a/src/queue.c +++ b/src/queue.c @@ -20,9 +20,12 @@ #include "internal.h" #if HAVE_MACH -#include "protocol.h" +#include "protocol.h" // _dispatch_send_wakeup_runloop_thread #endif +#if HAVE_PTHREAD_WORKQUEUES || DISPATCH_USE_INTERNAL_WORKQUEUE +#define DISPATCH_USE_WORKQUEUES 1 +#endif #if (!HAVE_PTHREAD_WORKQUEUES || DISPATCH_DEBUG) && \ !defined(DISPATCH_ENABLE_THREAD_POOL) #define DISPATCH_ENABLE_THREAD_POOL 1 @@ -30,60 +33,70 @@ #if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES || DISPATCH_ENABLE_THREAD_POOL #define DISPATCH_USE_PTHREAD_POOL 1 #endif -#if HAVE_PTHREAD_WORKQUEUES && (!HAVE_PTHREAD_WORKQUEUE_QOS || DISPATCH_DEBUG) \ - && !defined(DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK) -#define DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK 1 -#endif -#if HAVE_PTHREAD_WORKQUEUES && DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK && \ - !HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP && \ +#if HAVE_PTHREAD_WORKQUEUES && (!HAVE_PTHREAD_WORKQUEUE_QOS || \ + DISPATCH_DEBUG) && !HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP && \ !defined(DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK) #define DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK 1 #endif -#if HAVE_PTHREAD_WORKQUEUE_QOS && !DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK -#undef HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP -#define HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP 0 +#if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP && (DISPATCH_DEBUG || \ + (!DISPATCH_USE_KEVENT_WORKQUEUE && !HAVE_PTHREAD_WORKQUEUE_QOS)) && \ + !defined(DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP) +#define DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP 1 +#endif +#if DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP || \ + DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK || \ + DISPATCH_USE_INTERNAL_WORKQUEUE +#if !DISPATCH_USE_INTERNAL_WORKQUEUE +#define DISPATCH_USE_WORKQ_PRIORITY 1 +#endif +#define DISPATCH_USE_WORKQ_OPTIONS 1 #endif -#if HAVE_PTHREAD_WORKQUEUES && DISPATCH_USE_PTHREAD_POOL && \ + +#if DISPATCH_USE_WORKQUEUES && DISPATCH_USE_PTHREAD_POOL && \ !DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK #define pthread_workqueue_t void* #endif static void _dispatch_sig_thread(void *ctxt); static void _dispatch_cache_cleanup(void *value); -static void _dispatch_sync_f(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, pthread_priority_t pp); static void _dispatch_async_f2(dispatch_queue_t dq, dispatch_continuation_t dc); static void _dispatch_queue_cleanup(void *ctxt); +static void _dispatch_wlh_cleanup(void *ctxt); static void _dispatch_deferred_items_cleanup(void *ctxt); static void _dispatch_frame_cleanup(void *ctxt); static void _dispatch_context_cleanup(void *ctxt); -static void _dispatch_non_barrier_complete(dispatch_queue_t dq); -static inline void _dispatch_global_queue_poke(dispatch_queue_t dq); +static void _dispatch_queue_barrier_complete(dispatch_queue_t dq, + dispatch_qos_t qos, dispatch_wakeup_flags_t flags); +static void _dispatch_queue_non_barrier_complete(dispatch_queue_t dq); +static void _dispatch_queue_push_sync_waiter(dispatch_queue_t dq, + dispatch_sync_context_t dsc, dispatch_qos_t qos); +#if HAVE_PTHREAD_WORKQUEUE_QOS +static void _dispatch_root_queue_push_override_stealer(dispatch_queue_t orig_rq, + dispatch_queue_t dq, dispatch_qos_t qos); +static inline void _dispatch_queue_class_wakeup_with_override(dispatch_queue_t, + uint64_t dq_state, dispatch_wakeup_flags_t flags); +#endif #if HAVE_PTHREAD_WORKQUEUES static void _dispatch_worker_thread4(void *context); #if HAVE_PTHREAD_WORKQUEUE_QOS static void _dispatch_worker_thread3(pthread_priority_t priority); #endif -#if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP +#if DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP static void _dispatch_worker_thread2(int priority, int options, void *context); #endif #endif #if DISPATCH_USE_PTHREAD_POOL static void *_dispatch_worker_thread(void *context); -static int _dispatch_pthread_sigmask(int how, sigset_t *set, sigset_t *oset); #endif #if DISPATCH_COCOA_COMPAT static dispatch_once_t _dispatch_main_q_handle_pred; static void _dispatch_runloop_queue_poke(dispatch_queue_t dq, - pthread_priority_t pp, dispatch_wakeup_flags_t flags); + dispatch_qos_t qos, dispatch_wakeup_flags_t flags); static void _dispatch_runloop_queue_handle_init(void *ctxt); static void _dispatch_runloop_queue_handle_dispose(dispatch_queue_t dq); #endif -static void _dispatch_root_queues_init_once(void *context); -static dispatch_once_t _dispatch_root_queues_pred; - #pragma mark - #pragma mark dispatch_root_queue @@ -150,22 +163,29 @@ static struct dispatch_pthread_root_queue_context_s }; #endif -#define MAX_PTHREAD_COUNT 255 +#ifndef DISPATCH_WORKQ_MAX_PTHREAD_COUNT +#define DISPATCH_WORKQ_MAX_PTHREAD_COUNT 255 +#endif struct dispatch_root_queue_context_s { union { struct { - unsigned int volatile dgq_pending; -#if HAVE_PTHREAD_WORKQUEUES + int volatile dgq_pending; +#if DISPATCH_USE_WORKQUEUES qos_class_t dgq_qos; - int dgq_wq_priority, dgq_wq_options; +#if DISPATCH_USE_WORKQ_PRIORITY + int dgq_wq_priority; +#endif +#if DISPATCH_USE_WORKQ_OPTIONS + int dgq_wq_options; +#endif #if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK || DISPATCH_USE_PTHREAD_POOL pthread_workqueue_t dgq_kworkqueue; #endif -#endif // HAVE_PTHREAD_WORKQUEUES +#endif // DISPATCH_USE_WORKQUEUES #if DISPATCH_USE_PTHREAD_POOL void *dgq_ctxt; - uint32_t volatile dgq_thread_pool_size; + int32_t volatile dgq_thread_pool_size; #endif }; char _dgq_pad[DISPATCH_CACHELINE_SIZE]; @@ -184,132 +204,180 @@ typedef struct dispatch_root_queue_context_s *dispatch_root_queue_context_t; DISPATCH_CACHELINE_ALIGN static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { [DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS] = {{{ -#if HAVE_PTHREAD_WORKQUEUES - .dgq_qos = _DISPATCH_QOS_CLASS_MAINTENANCE, +#if DISPATCH_USE_WORKQUEUES + .dgq_qos = QOS_CLASS_MAINTENANCE, +#if DISPATCH_USE_WORKQ_PRIORITY .dgq_wq_priority = WORKQ_BG_PRIOQUEUE, +#endif +#if DISPATCH_USE_WORKQ_OPTIONS .dgq_wq_options = 0, #endif +#endif #if DISPATCH_ENABLE_THREAD_POOL .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS], #endif }}}, [DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT] = {{{ -#if HAVE_PTHREAD_WORKQUEUES - .dgq_qos = _DISPATCH_QOS_CLASS_MAINTENANCE, +#if DISPATCH_USE_WORKQUEUES + .dgq_qos = QOS_CLASS_MAINTENANCE, +#if DISPATCH_USE_WORKQ_PRIORITY .dgq_wq_priority = WORKQ_BG_PRIOQUEUE, +#endif +#if DISPATCH_USE_WORKQ_OPTIONS .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, #endif +#endif #if DISPATCH_ENABLE_THREAD_POOL .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT], #endif }}}, [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS] = {{{ -#if HAVE_PTHREAD_WORKQUEUES - .dgq_qos = _DISPATCH_QOS_CLASS_BACKGROUND, +#if DISPATCH_USE_WORKQUEUES + .dgq_qos = QOS_CLASS_BACKGROUND, +#if DISPATCH_USE_WORKQ_PRIORITY .dgq_wq_priority = WORKQ_BG_PRIOQUEUE_CONDITIONAL, +#endif +#if DISPATCH_USE_WORKQ_OPTIONS .dgq_wq_options = 0, #endif +#endif #if DISPATCH_ENABLE_THREAD_POOL .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS], #endif }}}, [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT] = {{{ -#if HAVE_PTHREAD_WORKQUEUES - .dgq_qos = _DISPATCH_QOS_CLASS_BACKGROUND, +#if DISPATCH_USE_WORKQUEUES + .dgq_qos = QOS_CLASS_BACKGROUND, +#if DISPATCH_USE_WORKQ_PRIORITY .dgq_wq_priority = WORKQ_BG_PRIOQUEUE_CONDITIONAL, +#endif +#if DISPATCH_USE_WORKQ_OPTIONS .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, #endif +#endif #if DISPATCH_ENABLE_THREAD_POOL .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT], #endif }}}, [DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS] = {{{ -#if HAVE_PTHREAD_WORKQUEUES - .dgq_qos = _DISPATCH_QOS_CLASS_UTILITY, +#if DISPATCH_USE_WORKQUEUES + .dgq_qos = QOS_CLASS_UTILITY, +#if DISPATCH_USE_WORKQ_PRIORITY .dgq_wq_priority = WORKQ_LOW_PRIOQUEUE, +#endif +#if DISPATCH_USE_WORKQ_OPTIONS .dgq_wq_options = 0, #endif +#endif #if DISPATCH_ENABLE_THREAD_POOL .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS], #endif }}}, [DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT] = {{{ -#if HAVE_PTHREAD_WORKQUEUES - .dgq_qos = _DISPATCH_QOS_CLASS_UTILITY, +#if DISPATCH_USE_WORKQUEUES + .dgq_qos = QOS_CLASS_UTILITY, +#if DISPATCH_USE_WORKQ_PRIORITY .dgq_wq_priority = WORKQ_LOW_PRIOQUEUE, +#endif +#if DISPATCH_USE_WORKQ_OPTIONS .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, #endif +#endif #if DISPATCH_ENABLE_THREAD_POOL .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT], #endif }}}, [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS] = {{{ -#if HAVE_PTHREAD_WORKQUEUES - .dgq_qos = _DISPATCH_QOS_CLASS_DEFAULT, +#if DISPATCH_USE_WORKQUEUES + .dgq_qos = QOS_CLASS_DEFAULT, +#if DISPATCH_USE_WORKQ_PRIORITY .dgq_wq_priority = WORKQ_DEFAULT_PRIOQUEUE, +#endif +#if DISPATCH_USE_WORKQ_OPTIONS .dgq_wq_options = 0, #endif +#endif #if DISPATCH_ENABLE_THREAD_POOL .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS], #endif }}}, [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT] = {{{ -#if HAVE_PTHREAD_WORKQUEUES - .dgq_qos = _DISPATCH_QOS_CLASS_DEFAULT, +#if DISPATCH_USE_WORKQUEUES + .dgq_qos = QOS_CLASS_DEFAULT, +#if DISPATCH_USE_WORKQ_PRIORITY .dgq_wq_priority = WORKQ_DEFAULT_PRIOQUEUE, +#endif +#if DISPATCH_USE_WORKQ_OPTIONS .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, #endif +#endif #if DISPATCH_ENABLE_THREAD_POOL .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT], #endif }}}, [DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS] = {{{ -#if HAVE_PTHREAD_WORKQUEUES - .dgq_qos = _DISPATCH_QOS_CLASS_USER_INITIATED, +#if DISPATCH_USE_WORKQUEUES + .dgq_qos = QOS_CLASS_USER_INITIATED, +#if DISPATCH_USE_WORKQ_PRIORITY .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE, +#endif +#if DISPATCH_USE_WORKQ_OPTIONS .dgq_wq_options = 0, #endif +#endif #if DISPATCH_ENABLE_THREAD_POOL .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS], #endif }}}, [DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT] = {{{ -#if HAVE_PTHREAD_WORKQUEUES - .dgq_qos = _DISPATCH_QOS_CLASS_USER_INITIATED, +#if DISPATCH_USE_WORKQUEUES + .dgq_qos = QOS_CLASS_USER_INITIATED, +#if DISPATCH_USE_WORKQ_PRIORITY .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE, +#endif +#if DISPATCH_USE_WORKQ_OPTIONS .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, #endif +#endif #if DISPATCH_ENABLE_THREAD_POOL .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT], #endif }}}, [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS] = {{{ -#if HAVE_PTHREAD_WORKQUEUES - .dgq_qos = _DISPATCH_QOS_CLASS_USER_INTERACTIVE, +#if DISPATCH_USE_WORKQUEUES + .dgq_qos = QOS_CLASS_USER_INTERACTIVE, +#if DISPATCH_USE_WORKQ_PRIORITY .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE_CONDITIONAL, +#endif +#if DISPATCH_USE_WORKQ_OPTIONS .dgq_wq_options = 0, #endif +#endif #if DISPATCH_ENABLE_THREAD_POOL .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS], #endif }}}, [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT] = {{{ -#if HAVE_PTHREAD_WORKQUEUES - .dgq_qos = _DISPATCH_QOS_CLASS_USER_INTERACTIVE, +#if DISPATCH_USE_WORKQUEUES + .dgq_qos = QOS_CLASS_USER_INTERACTIVE, +#if DISPATCH_USE_WORKQ_PRIORITY .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE_CONDITIONAL, +#endif +#if DISPATCH_USE_WORKQ_OPTIONS .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, #endif +#endif #if DISPATCH_ENABLE_THREAD_POOL .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT], @@ -321,68 +389,75 @@ static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { // renaming this symbol DISPATCH_CACHELINE_ALIGN struct dispatch_queue_s _dispatch_root_queues[] = { -#define _DISPATCH_ROOT_QUEUE_ENTRY(n, ...) \ - [DISPATCH_ROOT_QUEUE_IDX_##n] = { \ +#define _DISPATCH_ROOT_QUEUE_IDX(n, flags) \ + ((flags & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) ? \ + DISPATCH_ROOT_QUEUE_IDX_##n##_QOS_OVERCOMMIT : \ + DISPATCH_ROOT_QUEUE_IDX_##n##_QOS) +#define _DISPATCH_ROOT_QUEUE_ENTRY(n, flags, ...) \ + [_DISPATCH_ROOT_QUEUE_IDX(n, flags)] = { \ DISPATCH_GLOBAL_OBJECT_HEADER(queue_root), \ .dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, \ .do_ctxt = &_dispatch_root_queue_contexts[ \ - DISPATCH_ROOT_QUEUE_IDX_##n], \ - .dq_width = DISPATCH_QUEUE_WIDTH_POOL, \ - .dq_override_voucher = DISPATCH_NO_VOUCHER, \ - .dq_override = DISPATCH_SATURATED_OVERRIDE, \ + _DISPATCH_ROOT_QUEUE_IDX(n, flags)], \ + .dq_atomic_flags = DQF_WIDTH(DISPATCH_QUEUE_WIDTH_POOL), \ + .dq_priority = _dispatch_priority_make(DISPATCH_QOS_##n, 0) | flags | \ + DISPATCH_PRIORITY_FLAG_ROOTQUEUE | \ + ((flags & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE) ? 0 : \ + DISPATCH_QOS_##n << DISPATCH_PRIORITY_OVERRIDE_SHIFT), \ __VA_ARGS__ \ } - _DISPATCH_ROOT_QUEUE_ENTRY(MAINTENANCE_QOS, + _DISPATCH_ROOT_QUEUE_ENTRY(MAINTENANCE, 0, .dq_label = "com.apple.root.maintenance-qos", .dq_serialnum = 4, ), - _DISPATCH_ROOT_QUEUE_ENTRY(MAINTENANCE_QOS_OVERCOMMIT, + _DISPATCH_ROOT_QUEUE_ENTRY(MAINTENANCE, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, .dq_label = "com.apple.root.maintenance-qos.overcommit", .dq_serialnum = 5, ), - _DISPATCH_ROOT_QUEUE_ENTRY(BACKGROUND_QOS, + _DISPATCH_ROOT_QUEUE_ENTRY(BACKGROUND, 0, .dq_label = "com.apple.root.background-qos", .dq_serialnum = 6, ), - _DISPATCH_ROOT_QUEUE_ENTRY(BACKGROUND_QOS_OVERCOMMIT, + _DISPATCH_ROOT_QUEUE_ENTRY(BACKGROUND, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, .dq_label = "com.apple.root.background-qos.overcommit", .dq_serialnum = 7, ), - _DISPATCH_ROOT_QUEUE_ENTRY(UTILITY_QOS, + _DISPATCH_ROOT_QUEUE_ENTRY(UTILITY, 0, .dq_label = "com.apple.root.utility-qos", .dq_serialnum = 8, ), - _DISPATCH_ROOT_QUEUE_ENTRY(UTILITY_QOS_OVERCOMMIT, + _DISPATCH_ROOT_QUEUE_ENTRY(UTILITY, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, .dq_label = "com.apple.root.utility-qos.overcommit", .dq_serialnum = 9, ), - _DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT_QOS, + _DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT, DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE, .dq_label = "com.apple.root.default-qos", .dq_serialnum = 10, ), - _DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT_QOS_OVERCOMMIT, + _DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT, + DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE | DISPATCH_PRIORITY_FLAG_OVERCOMMIT, .dq_label = "com.apple.root.default-qos.overcommit", .dq_serialnum = 11, ), - _DISPATCH_ROOT_QUEUE_ENTRY(USER_INITIATED_QOS, + _DISPATCH_ROOT_QUEUE_ENTRY(USER_INITIATED, 0, .dq_label = "com.apple.root.user-initiated-qos", .dq_serialnum = 12, ), - _DISPATCH_ROOT_QUEUE_ENTRY(USER_INITIATED_QOS_OVERCOMMIT, + _DISPATCH_ROOT_QUEUE_ENTRY(USER_INITIATED, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, .dq_label = "com.apple.root.user-initiated-qos.overcommit", .dq_serialnum = 13, ), - _DISPATCH_ROOT_QUEUE_ENTRY(USER_INTERACTIVE_QOS, + _DISPATCH_ROOT_QUEUE_ENTRY(USER_INTERACTIVE, 0, .dq_label = "com.apple.root.user-interactive-qos", .dq_serialnum = 14, ), - _DISPATCH_ROOT_QUEUE_ENTRY(USER_INTERACTIVE_QOS_OVERCOMMIT, + _DISPATCH_ROOT_QUEUE_ENTRY(USER_INTERACTIVE, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, .dq_label = "com.apple.root.user-interactive-qos.overcommit", .dq_serialnum = 15, ), }; -#if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP +#if DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP static const dispatch_queue_t _dispatch_wq2root_queues[][2] = { [WORKQ_BG_PRIOQUEUE][0] = &_dispatch_root_queues[ DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS], @@ -405,37 +480,7 @@ static const dispatch_queue_t _dispatch_wq2root_queues[][2] = { &_dispatch_root_queues[ DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT], }; -#endif // HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP - -#define DISPATCH_PRIORITY_COUNT 5 - -enum { - // No DISPATCH_PRIORITY_IDX_MAINTENANCE define because there is no legacy - // maintenance priority - DISPATCH_PRIORITY_IDX_BACKGROUND = 0, - DISPATCH_PRIORITY_IDX_NON_INTERACTIVE, - DISPATCH_PRIORITY_IDX_LOW, - DISPATCH_PRIORITY_IDX_DEFAULT, - DISPATCH_PRIORITY_IDX_HIGH, -}; - -static qos_class_t _dispatch_priority2qos[] = { - [DISPATCH_PRIORITY_IDX_BACKGROUND] = _DISPATCH_QOS_CLASS_BACKGROUND, - [DISPATCH_PRIORITY_IDX_NON_INTERACTIVE] = _DISPATCH_QOS_CLASS_UTILITY, - [DISPATCH_PRIORITY_IDX_LOW] = _DISPATCH_QOS_CLASS_UTILITY, - [DISPATCH_PRIORITY_IDX_DEFAULT] = _DISPATCH_QOS_CLASS_DEFAULT, - [DISPATCH_PRIORITY_IDX_HIGH] = _DISPATCH_QOS_CLASS_USER_INITIATED, -}; - -#if HAVE_PTHREAD_WORKQUEUE_QOS -static const int _dispatch_priority2wq[] = { - [DISPATCH_PRIORITY_IDX_BACKGROUND] = WORKQ_BG_PRIOQUEUE, - [DISPATCH_PRIORITY_IDX_NON_INTERACTIVE] = WORKQ_NON_INTERACTIVE_PRIOQUEUE, - [DISPATCH_PRIORITY_IDX_LOW] = WORKQ_LOW_PRIOQUEUE, - [DISPATCH_PRIORITY_IDX_DEFAULT] = WORKQ_DEFAULT_PRIOQUEUE, - [DISPATCH_PRIORITY_IDX_HIGH] = WORKQ_HIGH_PRIOQUEUE, -}; -#endif +#endif // DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP #if DISPATCH_USE_MGR_THREAD && DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES static struct dispatch_queue_s _dispatch_mgr_root_queue; @@ -449,12 +494,13 @@ static struct dispatch_queue_s _dispatch_mgr_root_queue; DISPATCH_CACHELINE_ALIGN struct dispatch_queue_s _dispatch_mgr_q = { DISPATCH_GLOBAL_OBJECT_HEADER(queue_mgr), - .dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(1), + .dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(1) | + DISPATCH_QUEUE_ROLE_BASE_ANON, .do_targetq = &_dispatch_mgr_root_queue, .dq_label = "com.apple.libdispatch-manager", - .dq_width = 1, - .dq_override_voucher = DISPATCH_NO_VOUCHER, - .dq_override = DISPATCH_SATURATED_OVERRIDE, + .dq_atomic_flags = DQF_WIDTH(1), + .dq_priority = DISPATCH_PRIORITY_FLAG_MANAGER | + DISPATCH_PRIORITY_SATURATED_OVERRIDE, .dq_serialnum = 2, }; @@ -464,48 +510,16 @@ dispatch_get_global_queue(long priority, unsigned long flags) if (flags & ~(unsigned long)DISPATCH_QUEUE_OVERCOMMIT) { return DISPATCH_BAD_INPUT; } - dispatch_once_f(&_dispatch_root_queues_pred, NULL, - _dispatch_root_queues_init_once); - qos_class_t qos; - switch (priority) { -#if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK - case _DISPATCH_QOS_CLASS_MAINTENANCE: - if (!_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS] - .dq_priority) { - // map maintenance to background on old kernel - qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_BACKGROUND]; - } else { - qos = (qos_class_t)priority; - } - break; -#endif // DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK - case DISPATCH_QUEUE_PRIORITY_BACKGROUND: - qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_BACKGROUND]; - break; - case DISPATCH_QUEUE_PRIORITY_NON_INTERACTIVE: - qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_NON_INTERACTIVE]; - break; - case DISPATCH_QUEUE_PRIORITY_LOW: - qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_LOW]; - break; - case DISPATCH_QUEUE_PRIORITY_DEFAULT: - qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_DEFAULT]; - break; - case DISPATCH_QUEUE_PRIORITY_HIGH: - qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_HIGH]; - break; - case _DISPATCH_QOS_CLASS_USER_INTERACTIVE: -#if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK - if (!_dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS] - .dq_priority) { - qos = _dispatch_priority2qos[DISPATCH_PRIORITY_IDX_HIGH]; - break; - } + dispatch_qos_t qos = _dispatch_qos_from_queue_priority(priority); +#if !HAVE_PTHREAD_WORKQUEUE_QOS + if (qos == QOS_CLASS_MAINTENANCE) { + qos = DISPATCH_QOS_BACKGROUND; + } else if (qos == QOS_CLASS_USER_INTERACTIVE) { + qos = DISPATCH_QOS_USER_INITIATED; + } #endif - // fallthrough - default: - qos = (qos_class_t)priority; - break; + if (qos == DISPATCH_QOS_UNSPECIFIED) { + return DISPATCH_BAD_INPUT; } return _dispatch_get_root_queue(qos, flags & DISPATCH_QUEUE_OVERCOMMIT); } @@ -515,7 +529,7 @@ static inline dispatch_queue_t _dispatch_get_current_queue(void) { return _dispatch_queue_get_current() ?: - _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, true); + _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, true); } dispatch_queue_t @@ -551,21 +565,20 @@ dispatch_assert_queue(dispatch_queue_t dq) "dispatch_assert_queue()"); } uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); - if (unlikely(_dq_state_drain_pended(dq_state))) { - goto fail; - } - if (likely(_dq_state_drain_owner(dq_state) == _dispatch_tid_self())) { + if (likely(_dq_state_drain_locked_by_self(dq_state))) { return; } - if (likely(dq->dq_width > 1)) { - // we can look at the width: if it is changing while we read it, - // it means that a barrier is running on `dq` concurrently, which - // proves that we're not on `dq`. Hence reading a stale '1' is ok. - if (fastpath(_dispatch_thread_frame_find_queue(dq))) { + // we can look at the width: if it is changing while we read it, + // it means that a barrier is running on `dq` concurrently, which + // proves that we're not on `dq`. Hence reading a stale '1' is ok. + // + // However if we can have thread bound queues, these mess with lock + // ownership and we always have to take the slowpath + if (likely(DISPATCH_COCOA_COMPAT || dq->dq_width > 1)) { + if (likely(_dispatch_thread_frame_find_queue(dq))) { return; } } -fail: _dispatch_assert_queue_fail(dq, true); } @@ -578,14 +591,14 @@ dispatch_assert_queue_not(dispatch_queue_t dq) "dispatch_assert_queue_not()"); } uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); - if (_dq_state_drain_pended(dq_state)) { - return; - } - if (likely(_dq_state_drain_owner(dq_state) != _dispatch_tid_self())) { - if (likely(dq->dq_width == 1)) { - // we can look at the width: if it is changing while we read it, - // it means that a barrier is running on `dq` concurrently, which - // proves that we're not on `dq`. Hence reading a stale '1' is ok. + if (likely(!_dq_state_drain_locked_by_self(dq_state))) { + // we can look at the width: if it is changing while we read it, + // it means that a barrier is running on `dq` concurrently, which + // proves that we're not on `dq`. Hence reading a stale '1' is ok. + // + // However if we can have thread bound queues, these mess with lock + // ownership and we always have to take the slowpath + if (likely(!DISPATCH_COCOA_COMPAT && dq->dq_width == 1)) { return; } if (likely(!_dispatch_thread_frame_find_queue(dq))) { @@ -625,48 +638,14 @@ dispatch_assert_queue_barrier(dispatch_queue_t dq) #pragma mark - #pragma mark dispatch_init -#if HAVE_PTHREAD_WORKQUEUE_QOS -pthread_priority_t _dispatch_background_priority; -pthread_priority_t _dispatch_user_initiated_priority; - -static void -_dispatch_root_queues_init_qos(int supported) -{ - pthread_priority_t p; - qos_class_t qos; - unsigned int i; - for (i = 0; i < DISPATCH_PRIORITY_COUNT; i++) { - p = _pthread_qos_class_encode_workqueue(_dispatch_priority2wq[i], 0); - qos = _pthread_qos_class_decode(p, NULL, NULL); - dispatch_assert(qos != _DISPATCH_QOS_CLASS_UNSPECIFIED); - _dispatch_priority2qos[i] = qos; - } - for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { - qos = _dispatch_root_queue_contexts[i].dgq_qos; - if (qos == _DISPATCH_QOS_CLASS_MAINTENANCE && - !(supported & WORKQ_FEATURE_MAINTENANCE)) { - continue; - } - unsigned long flags = i & 1 ? _PTHREAD_PRIORITY_OVERCOMMIT_FLAG : 0; - flags |= _PTHREAD_PRIORITY_ROOTQUEUE_FLAG; - if (i == DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS || - i == DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT) { - flags |= _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG; - } - p = _pthread_qos_class_encode(qos, 0, flags); - _dispatch_root_queues[i].dq_priority = (dispatch_priority_t)p; - } -} -#endif // HAVE_PTHREAD_WORKQUEUE_QOS - static inline bool _dispatch_root_queues_init_workq(int *wq_supported) { - int r; + int r; (void)r; bool result = false; *wq_supported = 0; -#if HAVE_PTHREAD_WORKQUEUES - bool disable_wq = false; +#if DISPATCH_USE_WORKQUEUES + bool disable_wq = false; (void)disable_wq; #if DISPATCH_ENABLE_THREAD_POOL && DISPATCH_DEBUG disable_wq = slowpath(getenv("LIBDISPATCH_DISABLE_KWQ")); #endif @@ -677,10 +656,11 @@ _dispatch_root_queues_init_workq(int *wq_supported) #endif #if DISPATCH_USE_KEVENT_WORKQUEUE bool disable_kevent_wq = false; -#if DISPATCH_DEBUG +#if DISPATCH_DEBUG || DISPATCH_PROFILE disable_kevent_wq = slowpath(getenv("LIBDISPATCH_DISABLE_KEVENT_WQ")); #endif #endif + if (!disable_wq && !disable_qos) { *wq_supported = _pthread_workqueue_supported(); #if DISPATCH_USE_KEVENT_WORKQUEUE @@ -691,13 +671,10 @@ _dispatch_root_queues_init_workq(int *wq_supported) offsetof(struct dispatch_queue_s, dq_serialnum), 0); #if DISPATCH_USE_MGR_THREAD _dispatch_kevent_workqueue_enabled = !r; -#endif -#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK - _dispatch_evfilt_machport_direct_enabled = !r; #endif result = !r; } else -#endif +#endif // DISPATCH_USE_KEVENT_WORKQUEUE if (*wq_supported & WORKQ_FEATURE_FINEPRIO) { #if DISPATCH_USE_MGR_THREAD r = _pthread_workqueue_init(_dispatch_worker_thread3, @@ -705,10 +682,13 @@ _dispatch_root_queues_init_workq(int *wq_supported) result = !r; #endif } - if (result) _dispatch_root_queues_init_qos(*wq_supported); + if (!(*wq_supported & WORKQ_FEATURE_MAINTENANCE)) { + DISPATCH_INTERNAL_CRASH(*wq_supported, + "QoS Maintenance support required"); + } } #endif // DISPATCH_USE_KEVENT_WORKQUEUE || HAVE_PTHREAD_WORKQUEUE_QOS -#if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP +#if DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP if (!result && !disable_wq) { pthread_workqueue_setdispatchoffset_np( offsetof(struct dispatch_queue_s, dq_serialnum)); @@ -718,7 +698,7 @@ _dispatch_root_queues_init_workq(int *wq_supported) #endif result = !r; } -#endif // HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP +#endif // DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP #if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK || DISPATCH_USE_PTHREAD_POOL if (!result) { #if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK @@ -728,7 +708,7 @@ _dispatch_root_queues_init_workq(int *wq_supported) (void)dispatch_assume_zero(r); } #endif - int i; + size_t i; for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { pthread_workqueue_t pwq = NULL; dispatch_root_queue_context_t qc; @@ -747,7 +727,15 @@ _dispatch_root_queues_init_workq(int *wq_supported) result = result || dispatch_assume(pwq); } #endif // DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK - qc->dgq_kworkqueue = pwq ? pwq : (void*)(~0ul); + if (pwq) { + qc->dgq_kworkqueue = pwq; + } else { + qc->dgq_kworkqueue = (void*)(~0ul); + // because the fastpath of _dispatch_global_queue_poke didn't + // know yet that we're using the internal pool implementation + // we have to undo its setting of dgq_pending + qc->dgq_pending = 0; + } } #if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK if (!disable_wq) { @@ -757,23 +745,23 @@ _dispatch_root_queues_init_workq(int *wq_supported) #endif } #endif // DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK || DISPATCH_ENABLE_THREAD_POOL -#endif // HAVE_PTHREAD_WORKQUEUES +#endif // DISPATCH_USE_WORKQUEUES return result; } #if DISPATCH_USE_PTHREAD_POOL static inline void _dispatch_root_queue_init_pthread_pool(dispatch_root_queue_context_t qc, - uint8_t pool_size, bool overcommit) + int32_t pool_size, bool overcommit) { dispatch_pthread_root_queue_context_t pqc = qc->dgq_ctxt; - uint32_t thread_pool_size = overcommit ? MAX_PTHREAD_COUNT : - dispatch_hw_config(active_cpus); + int32_t thread_pool_size = overcommit ? DISPATCH_WORKQ_MAX_PTHREAD_COUNT : + (int32_t)dispatch_hw_config(active_cpus); if (slowpath(pool_size) && pool_size < thread_pool_size) { thread_pool_size = pool_size; } qc->dgq_thread_pool_size = thread_pool_size; -#if HAVE_PTHREAD_WORKQUEUES +#if DISPATCH_USE_WORKQUEUES if (qc->dgq_qos) { (void)dispatch_assume_zero(pthread_attr_init(&pqc->dpq_thread_attr)); (void)dispatch_assume_zero(pthread_attr_setdetachstate( @@ -784,30 +772,12 @@ _dispatch_root_queue_init_pthread_pool(dispatch_root_queue_context_t qc, #endif } #endif // HAVE_PTHREAD_WORKQUEUES -#if USE_MACH_SEM - // override the default FIFO behavior for the pool semaphores - kern_return_t kr = semaphore_create(mach_task_self(), - &pqc->dpq_thread_mediator.dsema_port, SYNC_POLICY_LIFO, 0); - DISPATCH_VERIFY_MIG(kr); - (void)dispatch_assume_zero(kr); - (void)dispatch_assume(pqc->dpq_thread_mediator.dsema_port); -#elif USE_POSIX_SEM - /* XXXRW: POSIX semaphores don't support LIFO? */ - int ret = sem_init(&(pqc->dpq_thread_mediator.dsema_sem), 0, 0); - (void)dispatch_assume_zero(ret); -#endif + _dispatch_sema4_t *sema = &pqc->dpq_thread_mediator.dsema_sema; + _dispatch_sema4_init(sema, _DSEMA4_POLICY_LIFO); + _dispatch_sema4_create(sema, _DSEMA4_POLICY_LIFO); } #endif // DISPATCH_USE_PTHREAD_POOL -static dispatch_once_t _dispatch_root_queues_pred; - -void -_dispatch_root_queues_init(void) -{ - dispatch_once_f(&_dispatch_root_queues_pred, NULL, - _dispatch_root_queues_init_once); -} - static void _dispatch_root_queues_init_once(void *context DISPATCH_UNUSED) { @@ -815,10 +785,10 @@ _dispatch_root_queues_init_once(void *context DISPATCH_UNUSED) _dispatch_fork_becomes_unsafe(); if (!_dispatch_root_queues_init_workq(&wq_supported)) { #if DISPATCH_ENABLE_THREAD_POOL - int i; + size_t i; for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { bool overcommit = true; -#if TARGET_OS_EMBEDDED +#if TARGET_OS_EMBEDDED || (DISPATCH_USE_INTERNAL_WORKQUEUE && HAVE_DISPATCH_WORKQ_MONITORING) // some software hangs if the non-overcommitting queues do not // overcommit when threads block. Someday, this behavior should // apply to all platforms @@ -836,12 +806,19 @@ _dispatch_root_queues_init_once(void *context DISPATCH_UNUSED) } } +void +_dispatch_root_queues_init(void) +{ + static dispatch_once_t _dispatch_root_queues_pred; + dispatch_once_f(&_dispatch_root_queues_pred, NULL, + _dispatch_root_queues_init_once); +} + DISPATCH_EXPORT DISPATCH_NOTHROW void libdispatch_init(void) { - dispatch_assert(DISPATCH_QUEUE_QOS_COUNT == 6); - dispatch_assert(DISPATCH_ROOT_QUEUE_COUNT == 12); + dispatch_assert(DISPATCH_ROOT_QUEUE_COUNT == 2 * DISPATCH_QOS_MAX); dispatch_assert(DISPATCH_QUEUE_PRIORITY_LOW == -DISPATCH_QUEUE_PRIORITY_HIGH); @@ -849,13 +826,7 @@ libdispatch_init(void) DISPATCH_ROOT_QUEUE_COUNT); dispatch_assert(countof(_dispatch_root_queue_contexts) == DISPATCH_ROOT_QUEUE_COUNT); - dispatch_assert(countof(_dispatch_priority2qos) == - DISPATCH_PRIORITY_COUNT); -#if HAVE_PTHREAD_WORKQUEUE_QOS - dispatch_assert(countof(_dispatch_priority2wq) == - DISPATCH_PRIORITY_COUNT); -#endif -#if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP +#if DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP dispatch_assert(sizeof(_dispatch_wq2root_queues) / sizeof(_dispatch_wq2root_queues[0][0]) == WORKQ_NUM_PRIOQUEUE * 2); @@ -877,17 +848,10 @@ libdispatch_init(void) dispatch_assert(sizeof(struct dispatch_root_queue_context_s) % DISPATCH_CACHELINE_SIZE == 0); - #if HAVE_PTHREAD_WORKQUEUE_QOS - // 26497968 _dispatch_user_initiated_priority should be set for qos - // propagation to work properly - pthread_priority_t p = _pthread_qos_class_encode(qos_class_main(), 0, 0); - _dispatch_main_q.dq_priority = (dispatch_priority_t)p; - _dispatch_main_q.dq_override = p & _PTHREAD_PRIORITY_QOS_CLASS_MASK; - p = _pthread_qos_class_encode(_DISPATCH_QOS_CLASS_USER_INITIATED, 0, 0); - _dispatch_user_initiated_priority = p; - p = _pthread_qos_class_encode(_DISPATCH_QOS_CLASS_BACKGROUND, 0, 0); - _dispatch_background_priority = p; + dispatch_qos_t qos = _dispatch_qos_from_qos_class(qos_class_main()); + dispatch_priority_t pri = _dispatch_priority_make(qos, 0); + _dispatch_main_q.dq_priority = _dispatch_priority_with_override_qos(pri, qos); #if DISPATCH_DEBUG if (!slowpath(getenv("LIBDISPATCH_DISABLE_SET_QOS"))) { _dispatch_set_qos_class_enabled = 1; @@ -898,25 +862,24 @@ libdispatch_init(void) #if DISPATCH_USE_THREAD_LOCAL_STORAGE _dispatch_thread_key_create(&__dispatch_tsd_key, _libdispatch_tsd_cleanup); #else + _dispatch_thread_key_create(&dispatch_priority_key, NULL); + _dispatch_thread_key_create(&dispatch_r2k_key, NULL); _dispatch_thread_key_create(&dispatch_queue_key, _dispatch_queue_cleanup); - _dispatch_thread_key_create(&dispatch_deferred_items_key, - _dispatch_deferred_items_cleanup); _dispatch_thread_key_create(&dispatch_frame_key, _dispatch_frame_cleanup); - _dispatch_thread_key_create(&dispatch_voucher_key, _voucher_thread_cleanup); _dispatch_thread_key_create(&dispatch_cache_key, _dispatch_cache_cleanup); _dispatch_thread_key_create(&dispatch_context_key, _dispatch_context_cleanup); - _dispatch_thread_key_create(&dispatch_defaultpriority_key, NULL); _dispatch_thread_key_create(&dispatch_pthread_root_queue_observer_hooks_key, NULL); -#if DISPATCH_PERF_MON && !DISPATCH_INTROSPECTION + _dispatch_thread_key_create(&dispatch_basepri_key, NULL); +#if DISPATCH_INTROSPECTION + _dispatch_thread_key_create(&dispatch_introspection_key , NULL); +#elif DISPATCH_PERF_MON _dispatch_thread_key_create(&dispatch_bcounter_key, NULL); #endif -#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK - if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { - _dispatch_thread_key_create(&dispatch_sema4_key, - _dispatch_thread_semaphore_dispose); - } -#endif + _dispatch_thread_key_create(&dispatch_wlh_key, _dispatch_wlh_cleanup); + _dispatch_thread_key_create(&dispatch_voucher_key, _voucher_thread_cleanup); + _dispatch_thread_key_create(&dispatch_deferred_items_key, + _dispatch_deferred_items_cleanup); #endif #if DISPATCH_USE_RESOLVERS // rdar://problem/8541707 @@ -932,50 +895,18 @@ libdispatch_init(void) dispatch_atfork_parent, dispatch_atfork_child)); #endif _dispatch_hw_config_init(); + _dispatch_time_init(); _dispatch_vtable_init(); _os_object_init(); _voucher_init(); _dispatch_introspection_init(); } -#if HAVE_MACH -static dispatch_once_t _dispatch_mach_host_port_pred; -static mach_port_t _dispatch_mach_host_port; - -static void -_dispatch_mach_host_port_init(void *ctxt DISPATCH_UNUSED) -{ - kern_return_t kr; - mach_port_t mp, mhp = mach_host_self(); - kr = host_get_host_port(mhp, &mp); - DISPATCH_VERIFY_MIG(kr); - if (fastpath(!kr)) { - // mach_host_self returned the HOST_PRIV port - kr = mach_port_deallocate(mach_task_self(), mhp); - DISPATCH_VERIFY_MIG(kr); - mhp = mp; - } else if (kr != KERN_INVALID_ARGUMENT) { - (void)dispatch_assume_zero(kr); - } - if (!fastpath(mhp)) { - DISPATCH_CLIENT_CRASH(kr, "Could not get unprivileged host port"); - } - _dispatch_mach_host_port = mhp; -} - -mach_port_t -_dispatch_get_mach_host_port(void) -{ - dispatch_once_f(&_dispatch_mach_host_port_pred, NULL, - _dispatch_mach_host_port_init); - return _dispatch_mach_host_port; -} -#endif - #if DISPATCH_USE_THREAD_LOCAL_STORAGE #include #include +#ifndef __ANDROID__ #ifdef SYS_gettid DISPATCH_ALWAYS_INLINE static inline pid_t @@ -985,34 +916,54 @@ gettid(void) } #else #error "SYS_gettid unavailable on this system" -#endif +#endif /* SYS_gettid */ +#endif /* ! __ANDROID__ */ #define _tsd_call_cleanup(k, f) do { \ if ((f) && tsd->k) ((void(*)(void*))(f))(tsd->k); \ - } while (0) + } while (0) + +#ifdef __ANDROID__ +static void (*_dispatch_thread_detach_callback)(void); + +void +_dispatch_install_thread_detach_callback(dispatch_function_t cb) +{ + if (os_atomic_xchg(&_dispatch_thread_detach_callback, cb, relaxed)) { + DISPATCH_CLIENT_CRASH(0, "Installing a thread detach callback twice"); + } +} +#endif void _libdispatch_tsd_cleanup(void *ctx) { struct dispatch_tsd *tsd = (struct dispatch_tsd*) ctx; + _tsd_call_cleanup(dispatch_priority_key, NULL); + _tsd_call_cleanup(dispatch_r2k_key, NULL); + _tsd_call_cleanup(dispatch_queue_key, _dispatch_queue_cleanup); _tsd_call_cleanup(dispatch_frame_key, _dispatch_frame_cleanup); _tsd_call_cleanup(dispatch_cache_key, _dispatch_cache_cleanup); _tsd_call_cleanup(dispatch_context_key, _dispatch_context_cleanup); _tsd_call_cleanup(dispatch_pthread_root_queue_observer_hooks_key, NULL); - _tsd_call_cleanup(dispatch_defaultpriority_key, NULL); -#if DISPATCH_PERF_MON && !DISPATCH_INTROSPECTION + _tsd_call_cleanup(dispatch_basepri_key, NULL); +#if DISPATCH_INTROSPECTION + _tsd_call_cleanup(dispatch_introspection_key, NULL); +#elif DISPATCH_PERF_MON _tsd_call_cleanup(dispatch_bcounter_key, NULL); #endif -#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK - _tsd_call_cleanup(dispatch_sema4_key, _dispatch_thread_semaphore_dispose); -#endif - _tsd_call_cleanup(dispatch_priority_key, NULL); + _tsd_call_cleanup(dispatch_wlh_key, _dispatch_wlh_cleanup); _tsd_call_cleanup(dispatch_voucher_key, _voucher_thread_cleanup); _tsd_call_cleanup(dispatch_deferred_items_key, _dispatch_deferred_items_cleanup); +#ifdef __ANDROID__ + if (_dispatch_thread_detach_callback) { + _dispatch_thread_detach_callback(); + } +#endif tsd->tid = 0; } @@ -1025,28 +976,22 @@ libdispatch_tsd_init(void) } #endif -DISPATCH_EXPORT DISPATCH_NOTHROW +DISPATCH_NOTHROW void -dispatch_atfork_child(void) +_dispatch_queue_atfork_child(void) { + dispatch_queue_t main_q = &_dispatch_main_q; void *crash = (void *)0x100; size_t i; -#if HAVE_MACH - _dispatch_mach_host_port_pred = 0; - _dispatch_mach_host_port = MACH_VOUCHER_NULL; -#endif - _voucher_atfork_child(); - if (!_dispatch_is_multithreaded_inline()) { - // clear the _PROHIBIT bit if set - _dispatch_unsafe_fork = 0; - return; + if (_dispatch_queue_is_thread_bound(main_q)) { + _dispatch_queue_set_bound_thread(main_q); } - _dispatch_unsafe_fork = 0; - _dispatch_child_of_unsafe_fork = true; - _dispatch_main_q.dq_items_head = crash; - _dispatch_main_q.dq_items_tail = crash; + if (!_dispatch_is_multithreaded_inline()) return; + + main_q->dq_items_head = crash; + main_q->dq_items_tail = crash; _dispatch_mgr_q.dq_items_head = crash; _dispatch_mgr_q.dq_items_tail = crash; @@ -1057,6 +1002,33 @@ dispatch_atfork_child(void) } } +DISPATCH_NOINLINE +void +_dispatch_fork_becomes_unsafe_slow(void) +{ + uint8_t value = os_atomic_or(&_dispatch_unsafe_fork, + _DISPATCH_UNSAFE_FORK_MULTITHREADED, relaxed); + if (value & _DISPATCH_UNSAFE_FORK_PROHIBIT) { + DISPATCH_CLIENT_CRASH(0, "Transition to multithreaded is prohibited"); + } +} + +DISPATCH_NOINLINE +void +_dispatch_prohibit_transition_to_multithreaded(bool prohibit) +{ + if (prohibit) { + uint8_t value = os_atomic_or(&_dispatch_unsafe_fork, + _DISPATCH_UNSAFE_FORK_PROHIBIT, relaxed); + if (value & _DISPATCH_UNSAFE_FORK_MULTITHREADED) { + DISPATCH_CLIENT_CRASH(0, "The executable is already multithreaded"); + } + } else { + os_atomic_and(&_dispatch_unsafe_fork, + (uint8_t)~_DISPATCH_UNSAFE_FORK_PROHIBIT, relaxed); + } +} + #pragma mark - #pragma mark dispatch_queue_attr_t @@ -1066,13 +1038,13 @@ _dispatch_qos_class_valid(dispatch_qos_class_t qos_class, int relative_priority) { qos_class_t qos = (qos_class_t)qos_class; switch (qos) { - case _DISPATCH_QOS_CLASS_MAINTENANCE: - case _DISPATCH_QOS_CLASS_BACKGROUND: - case _DISPATCH_QOS_CLASS_UTILITY: - case _DISPATCH_QOS_CLASS_DEFAULT: - case _DISPATCH_QOS_CLASS_USER_INITIATED: - case _DISPATCH_QOS_CLASS_USER_INTERACTIVE: - case _DISPATCH_QOS_CLASS_UNSPECIFIED: + case QOS_CLASS_MAINTENANCE: + case QOS_CLASS_BACKGROUND: + case QOS_CLASS_UTILITY: + case QOS_CLASS_DEFAULT: + case QOS_CLASS_USER_INITIATED: + case QOS_CLASS_USER_INTERACTIVE: + case QOS_CLASS_UNSPECIFIED: break; default: return false; @@ -1083,20 +1055,6 @@ _dispatch_qos_class_valid(dispatch_qos_class_t qos_class, int relative_priority) return true; } -#define DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(qos) \ - [_DISPATCH_QOS_CLASS_##qos] = DQA_INDEX_QOS_CLASS_##qos - -static const -_dispatch_queue_attr_index_qos_class_t _dispatch_queue_attr_qos2idx[] = { - DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(UNSPECIFIED), - DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(MAINTENANCE), - DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(BACKGROUND), - DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(UTILITY), - DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(DEFAULT), - DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(USER_INITIATED), - DISPATCH_QUEUE_ATTR_QOS2IDX_INITIALIZER(USER_INTERACTIVE), -}; - #define DISPATCH_QUEUE_ATTR_OVERCOMMIT2IDX(overcommit) \ ((overcommit) == _dispatch_queue_attr_overcommit_disabled ? \ DQA_INDEX_NON_OVERCOMMIT : \ @@ -1114,10 +1072,10 @@ _dispatch_queue_attr_index_qos_class_t _dispatch_queue_attr_qos2idx[] = { #define DISPATCH_QUEUE_ATTR_PRIO2IDX(prio) (-(prio)) -#define DISPATCH_QUEUE_ATTR_QOS2IDX(qos) (_dispatch_queue_attr_qos2idx[(qos)]) +#define DISPATCH_QUEUE_ATTR_QOS2IDX(qos) (qos) static inline dispatch_queue_attr_t -_dispatch_get_queue_attr(qos_class_t qos, int prio, +_dispatch_get_queue_attr(dispatch_qos_t qos, int prio, _dispatch_queue_attr_overcommit_t overcommit, dispatch_autorelease_frequency_t frequency, bool concurrent, bool inactive) @@ -1134,16 +1092,16 @@ _dispatch_get_queue_attr(qos_class_t qos, int prio, dispatch_queue_attr_t _dispatch_get_default_queue_attr(void) { - return _dispatch_get_queue_attr(_DISPATCH_QOS_CLASS_UNSPECIFIED, 0, + return _dispatch_get_queue_attr(DISPATCH_QOS_UNSPECIFIED, 0, _dispatch_queue_attr_overcommit_unspecified, DISPATCH_AUTORELEASE_FREQUENCY_INHERIT, false, false); } dispatch_queue_attr_t dispatch_queue_attr_make_with_qos_class(dispatch_queue_attr_t dqa, - dispatch_qos_class_t qos_class, int relative_priority) + dispatch_qos_class_t qos_class, int relpri) { - if (!_dispatch_qos_class_valid(qos_class, relative_priority)) { + if (!_dispatch_qos_class_valid(qos_class, relpri)) { return DISPATCH_BAD_INPUT; } if (!slowpath(dqa)) { @@ -1151,8 +1109,8 @@ dispatch_queue_attr_make_with_qos_class(dispatch_queue_attr_t dqa, } else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) { DISPATCH_CLIENT_CRASH(dqa->do_vtable, "Invalid queue attribute"); } - return _dispatch_get_queue_attr(qos_class, relative_priority, - dqa->dqa_overcommit, dqa->dqa_autorelease_frequency, + return _dispatch_get_queue_attr(_dispatch_qos_from_qos_class(qos_class), + relpri, dqa->dqa_overcommit, dqa->dqa_autorelease_frequency, dqa->dqa_concurrent, dqa->dqa_inactive); } @@ -1164,8 +1122,9 @@ dispatch_queue_attr_make_initially_inactive(dispatch_queue_attr_t dqa) } else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) { DISPATCH_CLIENT_CRASH(dqa->do_vtable, "Invalid queue attribute"); } - return _dispatch_get_queue_attr(dqa->dqa_qos_class, - dqa->dqa_relative_priority, dqa->dqa_overcommit, + dispatch_priority_t pri = dqa->dqa_qos_and_relpri; + return _dispatch_get_queue_attr(_dispatch_priority_qos(pri), + _dispatch_priority_relpri(pri), dqa->dqa_overcommit, dqa->dqa_autorelease_frequency, dqa->dqa_concurrent, true); } @@ -1178,8 +1137,9 @@ dispatch_queue_attr_make_with_overcommit(dispatch_queue_attr_t dqa, } else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) { DISPATCH_CLIENT_CRASH(dqa->do_vtable, "Invalid queue attribute"); } - return _dispatch_get_queue_attr(dqa->dqa_qos_class, - dqa->dqa_relative_priority, overcommit ? + dispatch_priority_t pri = dqa->dqa_qos_and_relpri; + return _dispatch_get_queue_attr(_dispatch_priority_qos(pri), + _dispatch_priority_relpri(pri), overcommit ? _dispatch_queue_attr_overcommit_enabled : _dispatch_queue_attr_overcommit_disabled, dqa->dqa_autorelease_frequency, dqa->dqa_concurrent, @@ -1203,32 +1163,135 @@ dispatch_queue_attr_make_with_autorelease_frequency(dispatch_queue_attr_t dqa, } else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) { DISPATCH_CLIENT_CRASH(dqa->do_vtable, "Invalid queue attribute"); } - return _dispatch_get_queue_attr(dqa->dqa_qos_class, - dqa->dqa_relative_priority, dqa->dqa_overcommit, + dispatch_priority_t pri = dqa->dqa_qos_and_relpri; + return _dispatch_get_queue_attr(_dispatch_priority_qos(pri), + _dispatch_priority_relpri(pri), dqa->dqa_overcommit, frequency, dqa->dqa_concurrent, dqa->dqa_inactive); } #pragma mark - #pragma mark dispatch_queue_t -// skip zero -// 1 - main_q -// 2 - mgr_q -// 3 - mgr_root_q -// 4,5,6,7,8,9,10,11,12,13,14,15 - global queues -// we use 'xadd' on Intel, so the initial value == next assigned -unsigned long volatile _dispatch_queue_serial_numbers = 16; +void +dispatch_queue_set_label_nocopy(dispatch_queue_t dq, const char *label) +{ + if (dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) { + return; + } + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dq); + if (unlikely(dqf & DQF_LABEL_NEEDS_FREE)) { + DISPATCH_CLIENT_CRASH(dq, "Cannot change label for this queue"); + } + dq->dq_label = label; +} + +static inline bool +_dispatch_base_queue_is_wlh(dispatch_queue_t dq, dispatch_queue_t tq) +{ + (void)dq; (void)tq; + return false; +} + +static void +_dispatch_queue_inherit_wlh_from_target(dispatch_queue_t dq, + dispatch_queue_t tq) +{ + uint64_t old_state, new_state, role; + + if (!dx_hastypeflag(tq, QUEUE_ROOT)) { + role = DISPATCH_QUEUE_ROLE_INNER; + } else if (_dispatch_base_queue_is_wlh(dq, tq)) { + role = DISPATCH_QUEUE_ROLE_BASE_WLH; + } else { + role = DISPATCH_QUEUE_ROLE_BASE_ANON; + } + + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + new_state = old_state & ~DISPATCH_QUEUE_ROLE_MASK; + new_state |= role; + if (old_state == new_state) { + os_atomic_rmw_loop_give_up(break); + } + }); + + dispatch_wlh_t cur_wlh = _dispatch_get_wlh(); + if (cur_wlh == (dispatch_wlh_t)dq && !_dq_state_is_base_wlh(new_state)) { + _dispatch_event_loop_leave_immediate(cur_wlh, new_state); + } + if (!dx_hastypeflag(tq, QUEUE_ROOT)) { +#if DISPATCH_ALLOW_NON_LEAF_RETARGET + _dispatch_queue_atomic_flags_set(tq, DQF_TARGETED); +#else + _dispatch_queue_atomic_flags_set_and_clear(tq, DQF_TARGETED, DQF_LEGACY); +#endif + } +} + +unsigned long volatile _dispatch_queue_serial_numbers = + DISPATCH_QUEUE_SERIAL_NUMBER_INIT; + +dispatch_priority_t +_dispatch_queue_compute_priority_and_wlh(dispatch_queue_t dq, + dispatch_wlh_t *wlh_out) +{ + dispatch_priority_t p = dq->dq_priority & DISPATCH_PRIORITY_REQUESTED_MASK; + dispatch_queue_t tq = dq->do_targetq; + dispatch_priority_t tqp = tq->dq_priority &DISPATCH_PRIORITY_REQUESTED_MASK; + dispatch_wlh_t wlh = DISPATCH_WLH_ANON; + + if (_dq_state_is_base_wlh(dq->dq_state)) { + wlh = (dispatch_wlh_t)dq; + } + + while (unlikely(!dx_hastypeflag(tq, QUEUE_ROOT))) { + if (unlikely(tq == &_dispatch_mgr_q)) { + if (wlh_out) *wlh_out = DISPATCH_WLH_ANON; + return DISPATCH_PRIORITY_FLAG_MANAGER; + } + if (unlikely(_dispatch_queue_is_thread_bound(tq))) { + // thread-bound hierarchies are weird, we need to install + // from the context of the thread this hierarchy is bound to + if (wlh_out) *wlh_out = NULL; + return 0; + } + if (unlikely(DISPATCH_QUEUE_IS_SUSPENDED(tq))) { + // this queue may not be activated yet, so the queue graph may not + // have stabilized yet + _dispatch_ktrace1(DISPATCH_PERF_delayed_registration, dq); + if (wlh_out) *wlh_out = NULL; + return 0; + } + + if (_dq_state_is_base_wlh(tq->dq_state)) { + wlh = (dispatch_wlh_t)tq; + } else if (unlikely(_dispatch_queue_is_legacy(tq))) { + // we're not allowed to dereference tq->do_targetq + _dispatch_ktrace1(DISPATCH_PERF_delayed_registration, dq); + if (wlh_out) *wlh_out = NULL; + return 0; + } + + if (!(tq->dq_priority & DISPATCH_PRIORITY_FLAG_INHERIT)) { + if (p < tqp) p = tqp; + } + tq = tq->do_targetq; + tqp = tq->dq_priority & DISPATCH_PRIORITY_REQUESTED_MASK; + } + + if (unlikely(!tqp)) { + // pthread root queues opt out of QoS + if (wlh_out) *wlh_out = DISPATCH_WLH_ANON; + return DISPATCH_PRIORITY_FLAG_MANAGER; + } + if (wlh_out) *wlh_out = wlh; + return _dispatch_priority_inherit_from_root_queue(p, tq); +} DISPATCH_NOINLINE static dispatch_queue_t _dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa, dispatch_queue_t tq, bool legacy) { -#if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK - // Be sure the root queue priorities are set - dispatch_once_f(&_dispatch_root_queues_pred, NULL, - _dispatch_root_queues_init_once); -#endif if (!slowpath(dqa)) { dqa = _dispatch_get_default_queue_attr(); } else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) { @@ -1239,25 +1302,15 @@ _dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa, // Step 1: Normalize arguments (qos, overcommit, tq) // - qos_class_t qos = dqa->dqa_qos_class; -#if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK - if (qos == _DISPATCH_QOS_CLASS_USER_INTERACTIVE && - !_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS].dq_priority) { - qos = _DISPATCH_QOS_CLASS_USER_INITIATED; + dispatch_qos_t qos = _dispatch_priority_qos(dqa->dqa_qos_and_relpri); +#if !HAVE_PTHREAD_WORKQUEUE_QOS + if (qos == DISPATCH_QOS_USER_INTERACTIVE) { + qos = DISPATCH_QOS_USER_INITIATED; } -#endif - bool maintenance_fallback = false; -#if DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK - maintenance_fallback = true; -#endif // DISPATCH_USE_NOQOS_WORKQUEUE_FALLBACK - if (maintenance_fallback) { - if (qos == _DISPATCH_QOS_CLASS_MAINTENANCE && - !_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS].dq_priority) { - qos = _DISPATCH_QOS_CLASS_BACKGROUND; - } + if (qos == DISPATCH_QOS_MAINTENANCE) { + qos = DISPATCH_QOS_BACKGROUND; } +#endif // !HAVE_PTHREAD_WORKQUEUE_QOS _dispatch_queue_attr_overcommit_t overcommit = dqa->dqa_overcommit; if (overcommit != _dispatch_queue_attr_overcommit_unspecified && tq) { @@ -1271,14 +1324,15 @@ _dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa, tq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) { // Handle discrepancies between attr and target queue, attributes win if (overcommit == _dispatch_queue_attr_overcommit_unspecified) { - if (tq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) { + if (tq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) { overcommit = _dispatch_queue_attr_overcommit_enabled; } else { overcommit = _dispatch_queue_attr_overcommit_disabled; } } - if (qos == _DISPATCH_QOS_CLASS_UNSPECIFIED) { - tq = _dispatch_get_root_queue_with_overcommit(tq, + if (qos == DISPATCH_QOS_UNSPECIFIED) { + dispatch_qos_t tq_qos = _dispatch_priority_qos(tq->dq_priority); + tq = _dispatch_get_root_queue(tq_qos, overcommit == _dispatch_queue_attr_overcommit_enabled); } else { tq = NULL; @@ -1290,7 +1344,7 @@ _dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa, DISPATCH_CLIENT_CRASH(tq, "Cannot specify an overcommit attribute " "and use this kind of target queue"); } - if (qos != _DISPATCH_QOS_CLASS_UNSPECIFIED) { + if (qos != DISPATCH_QOS_UNSPECIFIED) { DISPATCH_CLIENT_CRASH(tq, "Cannot specify a QoS attribute " "and use this kind of target queue"); } @@ -1303,10 +1357,9 @@ _dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa, } } if (!tq) { - qos_class_t tq_qos = qos == _DISPATCH_QOS_CLASS_UNSPECIFIED ? - _DISPATCH_QOS_CLASS_DEFAULT : qos; - tq = _dispatch_get_root_queue(tq_qos, overcommit == - _dispatch_queue_attr_overcommit_enabled); + tq = _dispatch_get_root_queue( + qos == DISPATCH_QOS_UNSPECIFIED ? DISPATCH_QOS_DEFAULT : qos, + overcommit == _dispatch_queue_attr_overcommit_enabled); if (slowpath(!tq)) { DISPATCH_CLIENT_CRASH(qos, "Invalid queue attribute"); } @@ -1340,6 +1393,9 @@ _dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa, dqf |= DQF_AUTORELEASE_ALWAYS; break; } + if (legacy) { + dqf |= DQF_LEGACY; + } if (label) { const char *tmp = _dispatch_strdup_if_mutable(label); if (tmp != label) { @@ -1348,26 +1404,26 @@ _dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa, } } - dispatch_queue_t dq = _dispatch_alloc(vtable, + dispatch_queue_t dq = _dispatch_object_alloc(vtable, sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_CACHELINE_PAD); _dispatch_queue_init(dq, dqf, dqa->dqa_concurrent ? - DISPATCH_QUEUE_WIDTH_MAX : 1, dqa->dqa_inactive); + DISPATCH_QUEUE_WIDTH_MAX : 1, DISPATCH_QUEUE_ROLE_INNER | + (dqa->dqa_inactive ? DISPATCH_QUEUE_INACTIVE : 0)); dq->dq_label = label; - #if HAVE_PTHREAD_WORKQUEUE_QOS - dq->dq_priority = (dispatch_priority_t)_pthread_qos_class_encode(qos, - dqa->dqa_relative_priority, - overcommit == _dispatch_queue_attr_overcommit_enabled ? - _PTHREAD_PRIORITY_OVERCOMMIT_FLAG : 0); + dq->dq_priority = dqa->dqa_qos_and_relpri; + if (overcommit == _dispatch_queue_attr_overcommit_enabled) { + dq->dq_priority |= DISPATCH_PRIORITY_FLAG_OVERCOMMIT; + } #endif _dispatch_retain(tq); - if (qos == _DISPATCH_QOS_CLASS_UNSPECIFIED) { + if (qos == QOS_CLASS_UNSPECIFIED) { // legacy way of inherithing the QoS from the target _dispatch_queue_priority_inherit_from_target(dq, tq); } if (!dqa->dqa_inactive) { - _dispatch_queue_atomic_flags_set(tq, DQF_TARGETED); + _dispatch_queue_inherit_wlh_from_target(dq, tq); } dq->do_targetq = tq; _dispatch_object_debug(dq, "%s", __func__); @@ -1392,30 +1448,26 @@ dispatch_queue_t dispatch_queue_create_with_accounting_override_voucher(const char *label, dispatch_queue_attr_t attr, voucher_t voucher) { - dispatch_queue_t dq = dispatch_queue_create_with_target(label, attr, - DISPATCH_TARGET_QUEUE_DEFAULT); - dq->dq_override_voucher = _voucher_create_accounting_voucher(voucher); - return dq; + (void)label; (void)attr; (void)voucher; + DISPATCH_CLIENT_CRASH(0, "Unsupported interface"); } void -_dispatch_queue_destroy(dispatch_queue_t dq) +_dispatch_queue_destroy(dispatch_queue_t dq, bool *allow_free) { uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); uint64_t initial_state = DISPATCH_QUEUE_STATE_INIT_VALUE(dq->dq_width); - if (dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE) { + if (dx_hastypeflag(dq, QUEUE_ROOT)) { initial_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE; } - if (dx_type(dq) == DISPATCH_SOURCE_KEVENT_TYPE) { - // dispatch_cancel_and_wait may apply overrides in a racy way with - // the source cancellation finishing. This race is expensive and not - // really worthwhile to resolve since the source becomes dead anyway. - dq_state &= ~DISPATCH_QUEUE_HAS_OVERRIDE; - } + dq_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; + dq_state &= ~DISPATCH_QUEUE_DIRTY; + dq_state &= ~DISPATCH_QUEUE_ROLE_MASK; if (slowpath(dq_state != initial_state)) { if (_dq_state_drain_locked(dq_state)) { - DISPATCH_CLIENT_CRASH(dq, "Release of a locked queue"); + DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, + "Release of a locked queue"); } #ifndef __LP64__ dq_state >>= 32; @@ -1423,9 +1475,6 @@ _dispatch_queue_destroy(dispatch_queue_t dq) DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, "Release of a queue with corrupt state"); } - if (slowpath(dq == _dispatch_queue_get_current())) { - DISPATCH_CLIENT_CRASH(dq, "Release of a queue by itself"); - } if (slowpath(dq->dq_items_tail)) { DISPATCH_CLIENT_CRASH(dq->dq_items_tail, "Release of a queue while items are enqueued"); @@ -1434,36 +1483,66 @@ _dispatch_queue_destroy(dispatch_queue_t dq) // trash the queue so that use after free will crash dq->dq_items_head = (void *)0x200; dq->dq_items_tail = (void *)0x200; - // poison the state with something that is suspended and is easy to spot - dq->dq_state = 0xdead000000000000; dispatch_queue_t dqsq = os_atomic_xchg2o(dq, dq_specific_q, (void *)0x200, relaxed); if (dqsq) { _dispatch_release(dqsq); } - if (dq->dq_override_voucher != DISPATCH_NO_VOUCHER) { - if (dq->dq_override_voucher) _voucher_release(dq->dq_override_voucher); - dq->dq_override_voucher = DISPATCH_NO_VOUCHER; + + // fastpath for queues that never got their storage retained + if (likely(os_atomic_load2o(dq, dq_sref_cnt, relaxed) == 0)) { + // poison the state with something that is suspended and is easy to spot + dq->dq_state = 0xdead000000000000; + return; } + + // Take over freeing the memory from _dispatch_object_dealloc() + // + // As soon as we call _dispatch_queue_release_storage(), we forfeit + // the possibility for the caller of dx_dispose() to finalize the object + // so that responsibility is ours. + _dispatch_object_finalize(dq); + *allow_free = false; + dq->dq_label = ""; + dq->do_targetq = NULL; + dq->do_finalizer = NULL; + dq->do_ctxt = NULL; + return _dispatch_queue_release_storage(dq); } // 6618342 Contact the team that owns the Instrument DTrace probe before // renaming this symbol void -_dispatch_queue_dispose(dispatch_queue_t dq) +_dispatch_queue_dispose(dispatch_queue_t dq, bool *allow_free) { _dispatch_object_debug(dq, "%s", __func__); _dispatch_introspection_queue_dispose(dq); if (dq->dq_label && _dispatch_queue_label_needs_free(dq)) { free((void*)dq->dq_label); } - _dispatch_queue_destroy(dq); + _dispatch_queue_destroy(dq, allow_free); } -DISPATCH_NOINLINE -static void -_dispatch_queue_suspend_slow(dispatch_queue_t dq) +void +_dispatch_queue_xref_dispose(dispatch_queue_t dq) +{ + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + if (unlikely(_dq_state_is_suspended(dq_state))) { + long state = (long)dq_state; + if (sizeof(long) < sizeof(uint64_t)) state = (long)(dq_state >> 32); + if (unlikely(_dq_state_is_inactive(dq_state))) { + // Arguments for and against this assert are within 6705399 + DISPATCH_CLIENT_CRASH(state, "Release of an inactive object"); + } + DISPATCH_CLIENT_CRASH(dq_state, "Release of a suspended object"); + } + os_atomic_or2o(dq, dq_atomic_flags, DQF_RELEASED, relaxed); +} + +DISPATCH_NOINLINE +static void +_dispatch_queue_suspend_slow(dispatch_queue_t dq) { uint64_t dq_state, value, delta; @@ -1483,11 +1562,11 @@ _dispatch_queue_suspend_slow(dispatch_queue_t dq) // threads could have touched this value while we were trying to acquire // the lock, or because another thread raced us to do the same operation // and got to the lock first. - if (slowpath(os_sub_overflow(dq_state, delta, &value))) { + if (unlikely(os_sub_overflow(dq_state, delta, &value))) { os_atomic_rmw_loop_give_up(goto retry); } }); - if (slowpath(os_add_overflow(dq->dq_side_suspend_cnt, + if (unlikely(os_add_overflow(dq->dq_side_suspend_cnt, DISPATCH_QUEUE_SUSPEND_HALF, &dq->dq_side_suspend_cnt))) { DISPATCH_CLIENT_CRASH(0, "Too many nested calls to dispatch_suspend()"); } @@ -1507,7 +1586,7 @@ _dispatch_queue_suspend(dispatch_queue_t dq) os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, { value = DISPATCH_QUEUE_SUSPEND_INTERVAL; - if (slowpath(os_add_overflow(dq_state, value, &value))) { + if (unlikely(os_add_overflow(dq_state, value, &value))) { os_atomic_rmw_loop_give_up({ return _dispatch_queue_suspend_slow(dq); }); @@ -1517,7 +1596,7 @@ _dispatch_queue_suspend(dispatch_queue_t dq) if (!_dq_state_is_suspended(dq_state)) { // rdar://8181908 we need to extend the queue life for the duration // of the call to wakeup at _dispatch_queue_resume() time. - _dispatch_retain(dq); + _dispatch_retain_2(dq); } } @@ -1546,7 +1625,7 @@ _dispatch_queue_resume_slow(dispatch_queue_t dq) // threads could have touched this value while we were trying to acquire // the lock, or because another thread raced us to do the same operation // and got to the lock first. - if (slowpath(os_add_overflow(dq_state, delta, &value))) { + if (unlikely(os_add_overflow(dq_state, delta, &value))) { os_atomic_rmw_loop_give_up(goto retry); } }); @@ -1562,12 +1641,15 @@ DISPATCH_NOINLINE static void _dispatch_queue_resume_finalize_activation(dispatch_queue_t dq) { + bool allow_resume = true; // Step 2: run the activation finalizer if (dx_vtable(dq)->do_finalize_activation) { - dx_vtable(dq)->do_finalize_activation(dq); + dx_vtable(dq)->do_finalize_activation(dq, &allow_resume); } // Step 3: consume the suspend count - return dx_vtable(dq)->do_resume(dq, false); + if (allow_resume) { + return dx_vtable(dq)->do_resume(dq, false); + } } void @@ -1575,9 +1657,15 @@ _dispatch_queue_resume(dispatch_queue_t dq, bool activate) { // covers all suspend and inactive bits, including side suspend bit const uint64_t suspend_bits = DISPATCH_QUEUE_SUSPEND_BITS_MASK; + uint64_t pending_barrier_width = + (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL; + uint64_t set_owner_and_set_full_width_and_in_barrier = + _dispatch_lock_value_for_self() | DISPATCH_QUEUE_WIDTH_FULL_BIT | + DISPATCH_QUEUE_IN_BARRIER; + // backward compatibility: only dispatch sources can abuse // dispatch_resume() to really mean dispatch_activate() - bool resume_can_activate = (dx_type(dq) == DISPATCH_SOURCE_KEVENT_TYPE); + bool is_source = (dx_metatype(dq) == _DISPATCH_SOURCE_TYPE); uint64_t dq_state, value; dispatch_assert(dq->do_ref_cnt != DISPATCH_OBJECT_GLOBAL_REFCNT); @@ -1627,42 +1715,48 @@ _dispatch_queue_resume(dispatch_queue_t dq, bool activate) + DISPATCH_QUEUE_NEEDS_ACTIVATION) { // { sc:1 i:0 na:1 } -> { sc:1 i:0 na:0 } value = dq_state - DISPATCH_QUEUE_NEEDS_ACTIVATION; - } else if (resume_can_activate && (dq_state & suspend_bits) == + } else if (is_source && (dq_state & suspend_bits) == DISPATCH_QUEUE_NEEDS_ACTIVATION + DISPATCH_QUEUE_INACTIVE) { // { sc:0 i:1 na:1 } -> { sc:1 i:0 na:0 } value = dq_state - DISPATCH_QUEUE_INACTIVE - DISPATCH_QUEUE_NEEDS_ACTIVATION + DISPATCH_QUEUE_SUSPEND_INTERVAL; - } else { - value = DISPATCH_QUEUE_SUSPEND_INTERVAL; - if (slowpath(os_sub_overflow(dq_state, value, &value))) { - // underflow means over-resume or a suspend count transfer - // to the side count is needed - os_atomic_rmw_loop_give_up({ - if (!(dq_state & DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT)) { - goto over_resume; - } - return _dispatch_queue_resume_slow(dq); - }); - } - if (_dq_state_is_runnable(value) && - !_dq_state_drain_locked(value)) { - uint64_t full_width = value; - if (_dq_state_has_pending_barrier(value)) { - full_width -= DISPATCH_QUEUE_PENDING_BARRIER; - full_width += DISPATCH_QUEUE_WIDTH_INTERVAL; - full_width += DISPATCH_QUEUE_IN_BARRIER; - } else { - full_width += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; - full_width += DISPATCH_QUEUE_IN_BARRIER; - } - if ((full_width & DISPATCH_QUEUE_WIDTH_MASK) == - DISPATCH_QUEUE_WIDTH_FULL_BIT) { - value = full_width; - value &= ~DISPATCH_QUEUE_DIRTY; - value |= _dispatch_tid_self(); + } else if (unlikely(os_sub_overflow(dq_state, + DISPATCH_QUEUE_SUSPEND_INTERVAL, &value))) { + // underflow means over-resume or a suspend count transfer + // to the side count is needed + os_atomic_rmw_loop_give_up({ + if (!(dq_state & DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT)) { + goto over_resume; } - } + return _dispatch_queue_resume_slow(dq); + }); + // + // below this, value = dq_state - DISPATCH_QUEUE_SUSPEND_INTERVAL + // + } else if (!_dq_state_is_runnable(value)) { + // Out of width or still suspended. + // For the former, force _dispatch_queue_non_barrier_complete + // to reconsider whether it has work to do + value |= DISPATCH_QUEUE_DIRTY; + } else if (_dq_state_drain_locked(value)) { + // still locked by someone else, make drain_try_unlock() fail + // and reconsider whether it has work to do + value |= DISPATCH_QUEUE_DIRTY; + } else if (!is_source && (_dq_state_has_pending_barrier(value) || + value + pending_barrier_width < + DISPATCH_QUEUE_WIDTH_FULL_BIT)) { + // if we can, acquire the full width drain lock + // and then perform a lock transfer + // + // However this is never useful for a source where there are no + // sync waiters, so never take the lock and do a plain wakeup + value &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK; + value |= set_owner_and_set_full_width_and_in_barrier; + } else { + // clear overrides and force a wakeup + value &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; + value &= ~DISPATCH_QUEUE_MAX_QOS_MASK; } }); } @@ -1675,7 +1769,7 @@ _dispatch_queue_resume(dispatch_queue_t dq, bool activate) if (activate) { // if we're still in an activate codepath here we should have // { sc:>0 na:1 }, if not we've got a corrupt state - if (!fastpath(_dq_state_is_suspended(value))) { + if (unlikely(!_dq_state_is_suspended(value))) { DISPATCH_CLIENT_CRASH(dq, "Invalid suspension state"); } return; @@ -1685,23 +1779,29 @@ _dispatch_queue_resume(dispatch_queue_t dq, bool activate) return; } - if ((dq_state ^ value) & DISPATCH_QUEUE_IN_BARRIER) { - _dispatch_release(dq); - return _dispatch_try_lock_transfer_or_wakeup(dq); - } - - if (_dq_state_should_wakeup(value)) { + if (_dq_state_is_dirty(dq_state)) { // - // seq_cst wrt state changes that were flushed and not acted upon - os_atomic_thread_fence(acquire); - pthread_priority_t pp = _dispatch_queue_reset_override_priority(dq, - _dispatch_queue_is_thread_bound(dq)); - return dx_wakeup(dq, pp, DISPATCH_WAKEUP_CONSUME); + // dependency ordering for dq state changes that were flushed + // and not acted upon + os_atomic_thread_fence(dependency); + dq = os_atomic_force_dependency_on(dq, dq_state); + } + // Balancing the retain_2 done in suspend() for rdar://8181908 + dispatch_wakeup_flags_t flags = DISPATCH_WAKEUP_CONSUME_2; + if ((dq_state ^ value) & DISPATCH_QUEUE_IN_BARRIER) { + flags |= DISPATCH_WAKEUP_BARRIER_COMPLETE; + } else if (!_dq_state_is_runnable(value)) { + if (_dq_state_is_base_wlh(dq_state)) { + _dispatch_event_loop_assert_not_owned((dispatch_wlh_t)dq); + } + return _dispatch_release_2(dq); } - return _dispatch_release_tailcall(dq); + dispatch_assert(!_dq_state_received_sync_wait(dq_state)); + dispatch_assert(!_dq_state_in_sync_transfer(dq_state)); + return dx_wakeup(dq, _dq_state_max_qos(dq_state), flags); over_resume: - if (slowpath(_dq_state_is_inactive(dq_state))) { + if (unlikely(_dq_state_is_inactive(dq_state))) { DISPATCH_CLIENT_CRASH(dq, "Over-resume of an inactive object"); } DISPATCH_CLIENT_CRASH(dq, "Over-resume of an object"); @@ -1717,19 +1817,13 @@ dispatch_queue_get_label(dispatch_queue_t dq) } qos_class_t -dispatch_queue_get_qos_class(dispatch_queue_t dq, int *relative_priority_ptr) +dispatch_queue_get_qos_class(dispatch_queue_t dq, int *relpri_ptr) { - qos_class_t qos = _DISPATCH_QOS_CLASS_UNSPECIFIED; - int relative_priority = 0; -#if HAVE_PTHREAD_WORKQUEUE_QOS - pthread_priority_t dqp = dq->dq_priority; - if (dqp & _PTHREAD_PRIORITY_INHERIT_FLAG) dqp = 0; - qos = _pthread_qos_class_decode(dqp, &relative_priority, NULL); -#else - (void)dq; -#endif - if (relative_priority_ptr) *relative_priority_ptr = relative_priority; - return qos; + dispatch_qos_class_t qos = _dispatch_priority_qos(dq->dq_priority); + if (relpri_ptr) { + *relpri_ptr = qos ? _dispatch_priority_relpri(dq->dq_priority) : 0; + } + return _dispatch_qos_to_qos_class(qos); } static void @@ -1739,23 +1833,24 @@ _dispatch_queue_set_width2(void *ctxt) uint32_t tmp; dispatch_queue_t dq = _dispatch_queue_get_current(); - if (w > 0) { - tmp = (unsigned int)w; - } else switch (w) { - case 0: - tmp = 1; - break; - case DISPATCH_QUEUE_WIDTH_MAX_PHYSICAL_CPUS: - tmp = dispatch_hw_config(physical_cpus); - break; - case DISPATCH_QUEUE_WIDTH_ACTIVE_CPUS: - tmp = dispatch_hw_config(active_cpus); - break; - default: - // fall through - case DISPATCH_QUEUE_WIDTH_MAX_LOGICAL_CPUS: - tmp = dispatch_hw_config(logical_cpus); - break; + if (w >= 0) { + tmp = w ? (unsigned int)w : 1; + } else { + dispatch_qos_t qos = _dispatch_qos_from_pp(_dispatch_get_priority()); + switch (w) { + case DISPATCH_QUEUE_WIDTH_MAX_PHYSICAL_CPUS: + tmp = _dispatch_qos_max_parallelism(qos, + DISPATCH_MAX_PARALLELISM_PHYSICAL); + break; + case DISPATCH_QUEUE_WIDTH_ACTIVE_CPUS: + tmp = _dispatch_qos_max_parallelism(qos, + DISPATCH_MAX_PARALLELISM_ACTIVE); + break; + case DISPATCH_QUEUE_WIDTH_MAX_LOGICAL_CPUS: + default: + tmp = _dispatch_qos_max_parallelism(qos, 0); + break; + } } if (tmp > DISPATCH_QUEUE_WIDTH_MAX) { tmp = DISPATCH_QUEUE_WIDTH_MAX; @@ -1763,17 +1858,18 @@ _dispatch_queue_set_width2(void *ctxt) dispatch_queue_flags_t old_dqf, new_dqf; os_atomic_rmw_loop2o(dq, dq_atomic_flags, old_dqf, new_dqf, relaxed, { - new_dqf = old_dqf & ~DQF_WIDTH_MASK; - new_dqf |= (tmp << DQF_WIDTH_SHIFT); + new_dqf = (old_dqf & DQF_FLAGS_MASK) | DQF_WIDTH(tmp); }); + _dispatch_queue_inherit_wlh_from_target(dq, dq->do_targetq); _dispatch_object_debug(dq, "%s", __func__); } void dispatch_queue_set_width(dispatch_queue_t dq, long width) { - if (slowpath(dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || - slowpath(dx_hastypeflag(dq, QUEUE_ROOT))) { + if (unlikely(dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT || + dx_hastypeflag(dq, QUEUE_ROOT) || + dx_hastypeflag(dq, QUEUE_BASE))) { return; } @@ -1788,8 +1884,15 @@ dispatch_queue_set_width(dispatch_queue_t dq, long width) DISPATCH_CLIENT_CRASH(type, "Unexpected dispatch object type"); } - _dispatch_barrier_trysync_or_async_f(dq, (void*)(intptr_t)width, - _dispatch_queue_set_width2); + if (likely((int)width >= 0)) { + _dispatch_barrier_trysync_or_async_f(dq, (void*)(intptr_t)width, + _dispatch_queue_set_width2, DISPATCH_BARRIER_TRYSYNC_SUSPEND); + } else { + // The negative width constants need to execute on the queue to + // query the queue QoS + _dispatch_barrier_async_detached_f(dq, (void*)(intptr_t)width, + _dispatch_queue_set_width2); + } } static void @@ -1800,13 +1903,18 @@ _dispatch_queue_legacy_set_target_queue(void *ctxt) dispatch_queue_t otq = dq->do_targetq; if (_dispatch_queue_atomic_flags(dq) & DQF_TARGETED) { +#if DISPATCH_ALLOW_NON_LEAF_RETARGET _dispatch_ktrace3(DISPATCH_PERF_non_leaf_retarget, dq, otq, tq); _dispatch_bug_deprecated("Changing the target of a queue " "already targeted by other dispatch objects"); +#else + DISPATCH_CLIENT_CRASH(0, "Cannot change the target of a queue " + "already targeted by other dispatch objects"); +#endif } _dispatch_queue_priority_inherit_from_target(dq, tq); - _dispatch_queue_atomic_flags_set(tq, DQF_TARGETED); + _dispatch_queue_inherit_wlh_from_target(dq, tq); #if HAVE_PTHREAD_WORKQUEUE_QOS // see _dispatch_queue_class_wakeup() _dispatch_queue_sidelock_lock(dq); @@ -1828,10 +1936,9 @@ _dispatch_queue_set_target_queue(dispatch_queue_t dq, dispatch_queue_t tq) dispatch_assert(dq->do_ref_cnt != DISPATCH_OBJECT_GLOBAL_REFCNT && dq->do_targetq); - if (slowpath(!tq)) { + if (unlikely(!tq)) { bool is_concurrent_q = (dq->dq_width > 1); - tq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, - !is_concurrent_q); + tq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, !is_concurrent_q); } if (_dispatch_queue_try_inactive_suspend(dq)) { @@ -1839,19 +1946,33 @@ _dispatch_queue_set_target_queue(dispatch_queue_t dq, dispatch_queue_t tq) return dx_vtable(dq)->do_resume(dq, false); } - if (dq->dq_override_voucher != DISPATCH_NO_VOUCHER) { - DISPATCH_CLIENT_CRASH(dq, "Cannot change the target of a queue or " - "source with an accounting override voucher " +#if !DISPATCH_ALLOW_NON_LEAF_RETARGET + if (_dispatch_queue_atomic_flags(dq) & DQF_TARGETED) { + DISPATCH_CLIENT_CRASH(0, "Cannot change the target of a queue " + "already targeted by other dispatch objects"); + } +#endif + + if (unlikely(!_dispatch_queue_is_legacy(dq))) { +#if DISPATCH_ALLOW_NON_LEAF_RETARGET + if (_dispatch_queue_atomic_flags(dq) & DQF_TARGETED) { + DISPATCH_CLIENT_CRASH(0, "Cannot change the target of a queue " + "already targeted by other dispatch objects"); + } +#endif + DISPATCH_CLIENT_CRASH(0, "Cannot change the target of this object " "after it has been activated"); } unsigned long type = dx_type(dq); switch (type) { case DISPATCH_QUEUE_LEGACY_TYPE: +#if DISPATCH_ALLOW_NON_LEAF_RETARGET if (_dispatch_queue_atomic_flags(dq) & DQF_TARGETED) { _dispatch_bug_deprecated("Changing the target of a queue " "already targeted by other dispatch objects"); } +#endif break; case DISPATCH_SOURCE_KEVENT_TYPE: case DISPATCH_MACH_CHANNEL_TYPE: @@ -1859,18 +1980,14 @@ _dispatch_queue_set_target_queue(dispatch_queue_t dq, dispatch_queue_t tq) _dispatch_bug_deprecated("Changing the target of a source " "after it has been activated"); break; - - case DISPATCH_QUEUE_SERIAL_TYPE: - case DISPATCH_QUEUE_CONCURRENT_TYPE: - DISPATCH_CLIENT_CRASH(type, "Cannot change the target of this queue " - "after it has been activated"); default: DISPATCH_CLIENT_CRASH(type, "Unexpected dispatch object type"); } _dispatch_retain(tq); return _dispatch_barrier_trysync_or_async_f(dq, tq, - _dispatch_queue_legacy_set_target_queue); + _dispatch_queue_legacy_set_target_queue, + DISPATCH_BARRIER_TRYSYNC_SUSPEND); } #pragma mark - @@ -1881,7 +1998,7 @@ static struct dispatch_pthread_root_queue_context_s _dispatch_mgr_root_queue_pthread_context; static struct dispatch_root_queue_context_s _dispatch_mgr_root_queue_context = {{{ -#if HAVE_PTHREAD_WORKQUEUES +#if DISPATCH_USE_WORKQUEUES .dgq_kworkqueue = (void*)(~0ul), #endif .dgq_ctxt = &_dispatch_mgr_root_queue_pthread_context, @@ -1893,9 +2010,9 @@ static struct dispatch_queue_s _dispatch_mgr_root_queue = { .dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, .do_ctxt = &_dispatch_mgr_root_queue_context, .dq_label = "com.apple.root.libdispatch-manager", - .dq_width = DISPATCH_QUEUE_WIDTH_POOL, - .dq_override = DISPATCH_SATURATED_OVERRIDE, - .dq_override_voucher = DISPATCH_NO_VOUCHER, + .dq_atomic_flags = DQF_WIDTH(DISPATCH_QUEUE_WIDTH_POOL), + .dq_priority = DISPATCH_PRIORITY_FLAG_MANAGER | + DISPATCH_PRIORITY_SATURATED_OVERRIDE, .dq_serialnum = 3, }; #endif // DISPATCH_USE_MGR_THREAD && DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES @@ -1911,17 +2028,16 @@ static struct { static dispatch_once_t _dispatch_mgr_sched_pred; -// TODO: switch to "event-reflector thread" property - #if HAVE_PTHREAD_WORKQUEUE_QOS +// TODO: switch to "event-reflector thread" property // Must be kept in sync with list of qos classes in sys/qos.h static const int _dispatch_mgr_sched_qos2prio[] = { - [_DISPATCH_QOS_CLASS_MAINTENANCE] = 4, - [_DISPATCH_QOS_CLASS_BACKGROUND] = 4, - [_DISPATCH_QOS_CLASS_UTILITY] = 20, - [_DISPATCH_QOS_CLASS_DEFAULT] = 31, - [_DISPATCH_QOS_CLASS_USER_INITIATED] = 37, - [_DISPATCH_QOS_CLASS_USER_INTERACTIVE] = 47, + [QOS_CLASS_MAINTENANCE] = 4, + [QOS_CLASS_BACKGROUND] = 4, + [QOS_CLASS_UTILITY] = 20, + [QOS_CLASS_DEFAULT] = 31, + [QOS_CLASS_USER_INITIATED] = 37, + [QOS_CLASS_USER_INTERACTIVE] = 47, }; #endif // HAVE_PTHREAD_WORKQUEUE_QOS @@ -1941,8 +2057,8 @@ _dispatch_mgr_sched_init(void *ctxt DISPATCH_UNUSED) (void)dispatch_assume_zero(pthread_attr_getschedparam(attr, ¶m)); #if HAVE_PTHREAD_WORKQUEUE_QOS qos_class_t qos = qos_class_main(); - if (qos == _DISPATCH_QOS_CLASS_DEFAULT) { - qos = _DISPATCH_QOS_CLASS_USER_INITIATED; // rdar://problem/17279292 + if (qos == QOS_CLASS_DEFAULT) { + qos = QOS_CLASS_USER_INITIATED; // rdar://problem/17279292 } if (qos) { _dispatch_mgr_sched.qos = qos; @@ -1975,8 +2091,6 @@ _dispatch_mgr_root_queue_init(void) (void)dispatch_assume_zero(pthread_attr_set_qos_class_np(attr, qos, 0)); } - _dispatch_mgr_q.dq_priority = - (dispatch_priority_t)_pthread_qos_class_encode(qos, 0, 0); } #endif param.sched_priority = _dispatch_mgr_sched.prio; @@ -2048,8 +2162,7 @@ _dispatch_mgr_priority_raise(const pthread_attr_t *attr) if (p >= prio) os_atomic_rmw_loop_give_up(return); }); #if DISPATCH_USE_KEVENT_WORKQUEUE - dispatch_once_f(&_dispatch_root_queues_pred, NULL, - _dispatch_root_queues_init_once); + _dispatch_root_queues_init(); if (_dispatch_kevent_workqueue_enabled) { pthread_priority_t pp = 0; if (prio > _dispatch_mgr_sched.default_prio) { @@ -2083,8 +2196,7 @@ void _dispatch_kevent_workqueue_init(void) { // Initialize kevent workqueue support - dispatch_once_f(&_dispatch_root_queues_pred, NULL, - _dispatch_root_queues_init_once); + _dispatch_root_queues_init(); if (!_dispatch_kevent_workqueue_enabled) return; dispatch_once_f(&_dispatch_mgr_sched_pred, NULL, _dispatch_mgr_sched_init); qos_class_t qos = _dispatch_mgr_sched.qos; @@ -2092,7 +2204,6 @@ _dispatch_kevent_workqueue_init(void) pthread_priority_t pp = 0; if (qos) { pp = _pthread_qos_class_encode(qos, 0, 0); - _dispatch_mgr_q.dq_priority = (dispatch_priority_t)pp; } if (prio > _dispatch_mgr_sched.default_prio) { pp = (pthread_priority_t)prio | _PTHREAD_PRIORITY_SCHED_PRI_FLAG; @@ -2102,7 +2213,7 @@ _dispatch_kevent_workqueue_init(void) (void)dispatch_assume_zero(r); } } -#endif +#endif // DISPATCH_USE_KEVENT_WORKQUEUE #pragma mark - #pragma mark dispatch_pthread_root_queue @@ -2118,12 +2229,12 @@ _dispatch_pthread_root_queue_create(const char *label, unsigned long flags, dispatch_pthread_root_queue_context_t pqc; dispatch_queue_flags_t dqf = 0; size_t dqs; - uint8_t pool_size = flags & _DISPATCH_PTHREAD_ROOT_QUEUE_FLAG_POOL_SIZE ? - (uint8_t)(flags & ~_DISPATCH_PTHREAD_ROOT_QUEUE_FLAG_POOL_SIZE) : 0; + int32_t pool_size = flags & _DISPATCH_PTHREAD_ROOT_QUEUE_FLAG_POOL_SIZE ? + (int8_t)(flags & ~_DISPATCH_PTHREAD_ROOT_QUEUE_FLAG_POOL_SIZE) : 0; dqs = sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_CACHELINE_PAD; dqs = roundup(dqs, _Alignof(struct dispatch_root_queue_context_s)); - dq = _dispatch_alloc(DISPATCH_VTABLE(queue_root), dqs + + dq = _dispatch_object_alloc(DISPATCH_VTABLE(queue_root), dqs + sizeof(struct dispatch_root_queue_context_s) + sizeof(struct dispatch_pthread_root_queue_context_s)); qc = (void*)dq + dqs; @@ -2138,16 +2249,15 @@ _dispatch_pthread_root_queue_create(const char *label, unsigned long flags, } } - _dispatch_queue_init(dq, dqf, DISPATCH_QUEUE_WIDTH_POOL, false); + _dispatch_queue_init(dq, dqf, DISPATCH_QUEUE_WIDTH_POOL, 0); dq->dq_label = label; - dq->dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, - dq->dq_override = DISPATCH_SATURATED_OVERRIDE; + dq->dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE; dq->do_ctxt = qc; - dq->do_targetq = NULL; + dq->dq_priority = DISPATCH_PRIORITY_SATURATED_OVERRIDE; pqc->dpq_thread_mediator.do_vtable = DISPATCH_VTABLE(semaphore); qc->dgq_ctxt = pqc; -#if HAVE_PTHREAD_WORKQUEUES +#if DISPATCH_USE_WORKQUEUES qc->dgq_kworkqueue = (void*)(~0ul); #endif _dispatch_root_queue_init_pthread_pool(qc, pool_size, true); @@ -2199,7 +2309,7 @@ dispatch_pthread_root_queue_copy_current(void) { dispatch_queue_t dq = _dispatch_queue_get_current(); if (!dq) return NULL; - while (slowpath(dq->do_targetq)) { + while (unlikely(dq->do_targetq)) { dq = dq->do_targetq; } if (dx_type(dq) != DISPATCH_QUEUE_GLOBAL_ROOT_TYPE || @@ -2212,7 +2322,7 @@ dispatch_pthread_root_queue_copy_current(void) #endif // DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES void -_dispatch_pthread_root_queue_dispose(dispatch_queue_t dq) +_dispatch_pthread_root_queue_dispose(dispatch_queue_t dq, bool *allow_free) { if (slowpath(dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) { DISPATCH_INTERNAL_CRASH(dq, "Global root queue disposed"); @@ -2224,17 +2334,16 @@ _dispatch_pthread_root_queue_dispose(dispatch_queue_t dq) dispatch_pthread_root_queue_context_t pqc = qc->dgq_ctxt; pthread_attr_destroy(&pqc->dpq_thread_attr); - _dispatch_semaphore_dispose(&pqc->dpq_thread_mediator); + _dispatch_semaphore_dispose(&pqc->dpq_thread_mediator, NULL); if (pqc->dpq_thread_configure) { Block_release(pqc->dpq_thread_configure); } - dq->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, - false); + dq->do_targetq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false); #endif if (dq->dq_label && _dispatch_queue_label_needs_free(dq)) { free((void*)dq->dq_label); } - _dispatch_queue_destroy(dq); + _dispatch_queue_destroy(dq, allow_free); } #pragma mark - @@ -2244,7 +2353,7 @@ struct dispatch_queue_specific_queue_s { DISPATCH_QUEUE_HEADER(queue_specific_queue); TAILQ_HEAD(dispatch_queue_specific_head_s, dispatch_queue_specific_s) dqsq_contexts; -} DISPATCH_QUEUE_ALIGN; +} DISPATCH_ATOMIC64_ALIGN; struct dispatch_queue_specific_s { const void *dqs_key; @@ -2255,19 +2364,19 @@ struct dispatch_queue_specific_s { DISPATCH_DECL(dispatch_queue_specific); void -_dispatch_queue_specific_queue_dispose(dispatch_queue_specific_queue_t dqsq) +_dispatch_queue_specific_queue_dispose(dispatch_queue_specific_queue_t dqsq, + bool *allow_free) { dispatch_queue_specific_t dqs, tmp; + dispatch_queue_t rq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false); TAILQ_FOREACH_SAFE(dqs, &dqsq->dqsq_contexts, dqs_list, tmp) { if (dqs->dqs_destructor) { - dispatch_async_f(_dispatch_get_root_queue( - _DISPATCH_QOS_CLASS_DEFAULT, false), dqs->dqs_ctxt, - dqs->dqs_destructor); + dispatch_async_f(rq, dqs->dqs_ctxt, dqs->dqs_destructor); } free(dqs); } - _dispatch_queue_destroy(dqsq->_as_dq); + _dispatch_queue_destroy(dqsq->_as_dq, allow_free); } static void @@ -2275,13 +2384,13 @@ _dispatch_queue_init_specific(dispatch_queue_t dq) { dispatch_queue_specific_queue_t dqsq; - dqsq = _dispatch_alloc(DISPATCH_VTABLE(queue_specific_queue), + dqsq = _dispatch_object_alloc(DISPATCH_VTABLE(queue_specific_queue), sizeof(struct dispatch_queue_specific_queue_s)); - _dispatch_queue_init(dqsq->_as_dq, DQF_NONE, - DISPATCH_QUEUE_WIDTH_MAX, false); + _dispatch_queue_init(dqsq->_as_dq, DQF_NONE, DISPATCH_QUEUE_WIDTH_MAX, + DISPATCH_QUEUE_ROLE_BASE_ANON); dqsq->do_xref_cnt = -1; dqsq->do_targetq = _dispatch_get_root_queue( - _DISPATCH_QOS_CLASS_USER_INITIATED, true); + DISPATCH_QOS_USER_INITIATED, true); dqsq->dq_label = "queue-specific"; TAILQ_INIT(&dqsq->dqsq_contexts); if (slowpath(!os_atomic_cmpxchg2o(dq, dq_specific_q, NULL, @@ -2302,7 +2411,7 @@ _dispatch_queue_set_specific(void *ctxt) // Destroy previous context for existing key if (dqs->dqs_destructor) { dispatch_async_f(_dispatch_get_root_queue( - _DISPATCH_QOS_CLASS_DEFAULT, false), dqs->dqs_ctxt, + DISPATCH_QOS_DEFAULT, false), dqs->dqs_ctxt, dqs->dqs_destructor); } if (dqsn->dqs_ctxt) { @@ -2339,7 +2448,7 @@ dispatch_queue_set_specific(dispatch_queue_t dq, const void *key, _dispatch_queue_init_specific(dq); } _dispatch_barrier_trysync_or_async_f(dq->dq_specific_q, dqs, - _dispatch_queue_set_specific); + _dispatch_queue_set_specific, 0); } static void @@ -2360,6 +2469,18 @@ _dispatch_queue_get_specific(void *ctxt) *ctxtp = NULL; } +DISPATCH_ALWAYS_INLINE +static inline void * +_dispatch_queue_get_specific_inline(dispatch_queue_t dq, const void *key) +{ + void *ctxt = NULL; + if (fastpath(dx_metatype(dq) == _DISPATCH_QUEUE_TYPE && dq->dq_specific_q)){ + ctxt = (void *)key; + dispatch_sync_f(dq->dq_specific_q, &ctxt, _dispatch_queue_get_specific); + } + return ctxt; +} + DISPATCH_NOINLINE void * dispatch_queue_get_specific(dispatch_queue_t dq, const void *key) @@ -2367,13 +2488,7 @@ dispatch_queue_get_specific(dispatch_queue_t dq, const void *key) if (slowpath(!key)) { return NULL; } - void *ctxt = NULL; - - if (fastpath(dq->dq_specific_q)) { - ctxt = (void *)key; - dispatch_sync_f(dq->dq_specific_q, &ctxt, _dispatch_queue_get_specific); - } - return ctxt; + return _dispatch_queue_get_specific_inline(dq, key); } DISPATCH_NOINLINE @@ -2387,12 +2502,8 @@ dispatch_get_specific(const void *key) dispatch_queue_t dq = _dispatch_queue_get_current(); while (slowpath(dq)) { - if (slowpath(dq->dq_specific_q)) { - ctxt = (void *)key; - dispatch_sync_f(dq->dq_specific_q, &ctxt, - _dispatch_queue_get_specific); - if (ctxt) break; - } + ctxt = _dispatch_queue_get_specific_inline(dq, key); + if (ctxt) break; dq = dq->do_targetq; } return ctxt; @@ -2407,7 +2518,7 @@ _dispatch_queue_is_exclusively_owned_by_current_thread_4IOHID( DISPATCH_CLIENT_CRASH(dq->dq_width, "Invalid queue type"); } uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); - return _dq_state_drain_locked_by(dq_state, _dispatch_tid_self()); + return _dq_state_drain_locked_by_self(dq_state); } #endif @@ -2419,12 +2530,13 @@ _dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf, size_t bufsiz) { size_t offset = 0; dispatch_queue_t target = dq->do_targetq; + const char *tlabel = target && target->dq_label ? target->dq_label : ""; uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); - offset += dsnprintf(&buf[offset], bufsiz - offset, + offset += dsnprintf(&buf[offset], bufsiz - offset, "sref = %d, " "target = %s[%p], width = 0x%x, state = 0x%016llx", - target && target->dq_label ? target->dq_label : "", target, - dq->dq_width, (unsigned long long)dq_state); + dq->dq_sref_cnt + 1, tlabel, target, dq->dq_width, + (unsigned long long)dq_state); if (_dq_state_is_suspended(dq_state)) { offset += dsnprintf(&buf[offset], bufsiz - offset, ", suspended = %d", _dq_state_suspend_cnt(dq_state)); @@ -2440,8 +2552,9 @@ _dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf, size_t bufsiz) if (_dq_state_is_dirty(dq_state)) { offset += dsnprintf(&buf[offset], bufsiz - offset, ", dirty"); } - if (_dq_state_has_override(dq_state)) { - offset += dsnprintf(&buf[offset], bufsiz - offset, ", async-override"); + dispatch_qos_t qos = _dq_state_max_qos(dq_state); + if (qos) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", max qos %d", qos); } mach_port_t owner = _dq_state_drain_owner(dq_state); if (!_dispatch_queue_is_thread_bound(dq) && owner) { @@ -2487,34 +2600,40 @@ dispatch_debug_queue(dispatch_queue_t dq, const char* str) { } #endif -#if DISPATCH_PERF_MON && !DISPATCH_INTROSPECTION -static OSSpinLock _dispatch_stats_lock; +#if DISPATCH_PERF_MON + +#define DISPATCH_PERF_MON_BUCKETS 8 + static struct { - uint64_t time_total; - uint64_t count_total; - uint64_t thread_total; -} _dispatch_stats[65]; // ffs*/fls*() returns zero when no bits are set + uint64_t volatile time_total; + uint64_t volatile count_total; + uint64_t volatile thread_total; +} _dispatch_stats[DISPATCH_PERF_MON_BUCKETS]; +DISPATCH_USED static size_t _dispatch_stat_buckets = DISPATCH_PERF_MON_BUCKETS; -static void -_dispatch_queue_merge_stats(uint64_t start) +void +_dispatch_queue_merge_stats(uint64_t start, bool trace, perfmon_thread_type type) { uint64_t delta = _dispatch_absolute_time() - start; unsigned long count; - + int bucket = 0; count = (unsigned long)_dispatch_thread_getspecific(dispatch_bcounter_key); _dispatch_thread_setspecific(dispatch_bcounter_key, NULL); - - int bucket = flsl((long)count); - - // 64-bit counters on 32-bit require a lock or a queue - OSSpinLockLock(&_dispatch_stats_lock); - - _dispatch_stats[bucket].time_total += delta; - _dispatch_stats[bucket].count_total += count; - _dispatch_stats[bucket].thread_total++; - - OSSpinLockUnlock(&_dispatch_stats_lock); + if (count == 0) { + bucket = 0; + if (trace) _dispatch_ktrace1(DISPATCH_PERF_MON_worker_useless, type); + } else { + bucket = MIN(DISPATCH_PERF_MON_BUCKETS - 1, + (int)sizeof(count) * CHAR_BIT - __builtin_clzl(count)); + os_atomic_add(&_dispatch_stats[bucket].count_total, count, relaxed); + } + os_atomic_add(&_dispatch_stats[bucket].time_total, delta, relaxed); + os_atomic_inc(&_dispatch_stats[bucket].thread_total, relaxed); + if (trace) { + _dispatch_ktrace3(DISPATCH_PERF_MON_worker_thread_end, count, delta, type); + } } + #endif #pragma mark - @@ -2534,8 +2653,8 @@ _dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pp, pflags |= _PTHREAD_SET_SELF_WQ_KEVENT_UNBIND; // when we unbind, overcomitness can flip, so we need to learn // it from the defaultpri, see _dispatch_priority_compute_update - pp |= (_dispatch_get_defaultpriority() & - _PTHREAD_PRIORITY_OVERCOMMIT_FLAG); + pp |= (_dispatch_get_basepri() & + DISPATCH_PRIORITY_FLAG_OVERCOMMIT); } else { // else we need to keep the one that is set in the current pri pp |= (old_pri & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG); @@ -2543,8 +2662,9 @@ _dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pp, if (likely(old_pri & ~_PTHREAD_PRIORITY_FLAGS_MASK)) { pflags |= _PTHREAD_SET_SELF_QOS_FLAG; } - if (unlikely(DISPATCH_QUEUE_DRAIN_OWNER(&_dispatch_mgr_q) == - _dispatch_tid_self())) { + uint64_t mgr_dq_state = + os_atomic_load2o(&_dispatch_mgr_q, dq_state, relaxed); + if (unlikely(_dq_state_drain_locked_by_self(mgr_dq_state))) { DISPATCH_INTERNAL_CRASH(pp, "Changing the QoS while on the manager queue"); } @@ -2573,7 +2693,7 @@ _dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pp, DISPATCH_NOINLINE voucher_t _dispatch_set_priority_and_voucher_slow(pthread_priority_t priority, - voucher_t v, _dispatch_thread_set_self_t flags) + voucher_t v, dispatch_thread_set_self_t flags) { voucher_t ov = DISPATCH_NO_VOUCHER; mach_voucher_t kv = VOUCHER_NO_MACH_VOUCHER; @@ -2588,9 +2708,6 @@ _dispatch_set_priority_and_voucher_slow(pthread_priority_t priority, kv = _voucher_swap_and_get_mach_voucher(ov, v); } } -#if !PTHREAD_WORKQUEUE_RESETS_VOUCHER_AND_PRIORITY_ON_PARK - flags &= ~(_dispatch_thread_set_self_t)DISPATCH_THREAD_PARK; -#endif if (!(flags & DISPATCH_THREAD_PARK)) { _dispatch_set_priority_and_mach_voucher_slow(priority, kv); } @@ -2604,6 +2721,34 @@ _dispatch_set_priority_and_voucher_slow(pthread_priority_t priority, #pragma mark - #pragma mark dispatch_continuation_t +const struct dispatch_continuation_vtable_s _dispatch_continuation_vtables[] = { + DC_VTABLE_ENTRY(ASYNC_REDIRECT, + .do_kind = "dc-redirect", + .do_invoke = _dispatch_async_redirect_invoke), +#if HAVE_MACH + DC_VTABLE_ENTRY(MACH_SEND_BARRRIER_DRAIN, + .do_kind = "dc-mach-send-drain", + .do_invoke = _dispatch_mach_send_barrier_drain_invoke), + DC_VTABLE_ENTRY(MACH_SEND_BARRIER, + .do_kind = "dc-mach-send-barrier", + .do_invoke = _dispatch_mach_barrier_invoke), + DC_VTABLE_ENTRY(MACH_RECV_BARRIER, + .do_kind = "dc-mach-recv-barrier", + .do_invoke = _dispatch_mach_barrier_invoke), + DC_VTABLE_ENTRY(MACH_ASYNC_REPLY, + .do_kind = "dc-mach-async-reply", + .do_invoke = _dispatch_mach_msg_async_reply_invoke), +#endif +#if HAVE_PTHREAD_WORKQUEUE_QOS + DC_VTABLE_ENTRY(OVERRIDE_STEALING, + .do_kind = "dc-override-stealing", + .do_invoke = _dispatch_queue_override_invoke), + DC_VTABLE_ENTRY(OVERRIDE_OWNING, + .do_kind = "dc-override-owning", + .do_invoke = _dispatch_queue_override_invoke), +#endif +}; + static void _dispatch_force_cache_cleanup(void) { @@ -2637,7 +2782,7 @@ _dispatch_continuation_free_to_cache_limit(dispatch_continuation_t dc) dc = _dispatch_thread_getspecific(dispatch_cache_key); int cnt; if (!dc || (cnt = dc->dc_cache_cnt - - _dispatch_continuation_cache_limit) <= 0){ + _dispatch_continuation_cache_limit) <= 0) { return; } do { @@ -2648,38 +2793,11 @@ _dispatch_continuation_free_to_cache_limit(dispatch_continuation_t dc) } #endif -DISPATCH_ALWAYS_INLINE_NDEBUG -static inline void -_dispatch_continuation_slow_item_signal(dispatch_queue_t dq, - dispatch_object_t dou) -{ - dispatch_continuation_t dc = dou._dc; - pthread_priority_t pp = dq->dq_override; - - _dispatch_trace_continuation_pop(dq, dc); - if (pp > (dc->dc_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { - _dispatch_wqthread_override_start((mach_port_t)dc->dc_data, pp); - } - _dispatch_thread_event_signal((dispatch_thread_event_t)dc->dc_other); - _dispatch_introspection_queue_item_complete(dc); -} - DISPATCH_NOINLINE static void _dispatch_continuation_push(dispatch_queue_t dq, dispatch_continuation_t dc) { - _dispatch_queue_push(dq, dc, - _dispatch_continuation_get_override_priority(dq, dc)); -} - -DISPATCH_NOINLINE -static void -_dispatch_continuation_push_sync_slow(dispatch_queue_t dq, - dispatch_continuation_t dc) -{ - _dispatch_queue_push_inline(dq, dc, - _dispatch_continuation_get_override_priority(dq, dc), - DISPATCH_WAKEUP_SLOW_WAITER); + dx_push(dq, dc, _dispatch_continuation_override_qos(dq, dc)); } DISPATCH_ALWAYS_INLINE @@ -2734,12 +2852,16 @@ _dispatch_block_create_with_voucher_and_priority(dispatch_block_flags_t flags, bool assign = (flags & DISPATCH_BLOCK_ASSIGN_CURRENT); if (assign && !(flags & DISPATCH_BLOCK_HAS_VOUCHER)) { +#if OS_VOUCHER_ACTIVITY_SPI voucher = VOUCHER_CURRENT; +#endif flags |= DISPATCH_BLOCK_HAS_VOUCHER; } +#if OS_VOUCHER_ACTIVITY_SPI if (voucher == VOUCHER_CURRENT) { voucher = _voucher_get(); } +#endif if (assign && !(flags & DISPATCH_BLOCK_HAS_PRIORITY)) { pri = _dispatch_priority_propagate(); flags |= DISPATCH_BLOCK_HAS_PRIORITY; @@ -2832,20 +2954,16 @@ _dispatch_block_invoke_direct(const struct dispatch_block_private_data_s *dbcpd) } if (atomic_flags & DBF_CANCELED) goto out; - pthread_priority_t op = DISPATCH_NO_PRIORITY, p = DISPATCH_NO_PRIORITY; - _dispatch_thread_set_self_t adopt_flags = 0; - if (flags & DISPATCH_BLOCK_HAS_PRIORITY) { - op = _dispatch_get_priority(); + pthread_priority_t op = 0, p = 0; + op = _dispatch_block_invoke_should_set_priority(flags, dbpd->dbpd_priority); + if (op) { p = dbpd->dbpd_priority; - if (_dispatch_block_sync_should_enforce_qos_class(flags)) { - adopt_flags |= DISPATCH_PRIORITY_ENFORCE; - } } voucher_t ov, v = DISPATCH_NO_VOUCHER; if (flags & DISPATCH_BLOCK_HAS_VOUCHER) { v = dbpd->dbpd_voucher; } - ov = _dispatch_adopt_priority_and_set_voucher(p, v, adopt_flags); + ov = _dispatch_set_priority_and_voucher(p, v, 0); dbpd->dbpd_thread = _dispatch_tid_self(); _dispatch_client_callout(dbpd->dbpd_block, _dispatch_Block_invoke(dbpd->dbpd_block)); @@ -2865,28 +2983,18 @@ _dispatch_block_sync_invoke(void *block) dispatch_block_private_data_t dbpd = _dispatch_block_get_data(b); dispatch_block_flags_t flags = dbpd->dbpd_flags; unsigned int atomic_flags = dbpd->dbpd_atomic_flags; - if (slowpath(atomic_flags & DBF_WAITED)) { + if (unlikely(atomic_flags & DBF_WAITED)) { DISPATCH_CLIENT_CRASH(atomic_flags, "A block object may not be both " "run more than once and waited for"); } if (atomic_flags & DBF_CANCELED) goto out; - pthread_priority_t op = DISPATCH_NO_PRIORITY, p = DISPATCH_NO_PRIORITY; - _dispatch_thread_set_self_t adopt_flags = 0; - if (flags & DISPATCH_BLOCK_HAS_PRIORITY) { - op = _dispatch_get_priority(); - p = dbpd->dbpd_priority; - if (_dispatch_block_sync_should_enforce_qos_class(flags)) { - adopt_flags |= DISPATCH_PRIORITY_ENFORCE; - } - } - voucher_t ov, v = DISPATCH_NO_VOUCHER; + voucher_t ov = DISPATCH_NO_VOUCHER; if (flags & DISPATCH_BLOCK_HAS_VOUCHER) { - v = dbpd->dbpd_voucher; + ov = _dispatch_adopt_priority_and_set_voucher(0, dbpd->dbpd_voucher, 0); } - ov = _dispatch_adopt_priority_and_set_voucher(p, v, adopt_flags); dbpd->dbpd_block(); - _dispatch_reset_priority_and_voucher(op, ov); + _dispatch_reset_voucher(ov, 0); out: if ((atomic_flags & DBF_PERFORM) == 0) { if (os_atomic_inc2o(dbpd, dbpd_performed, relaxed) == 1) { @@ -2898,13 +3006,57 @@ _dispatch_block_sync_invoke(void *block) oq = os_atomic_xchg2o(dbpd, dbpd_queue, NULL, relaxed); if (oq) { // balances dispatch_{,barrier_,}sync - _os_object_release_internal(oq->_as_os_obj); + _os_object_release_internal_n(oq->_as_os_obj, 2); } } -DISPATCH_ALWAYS_INLINE static void -_dispatch_block_async_invoke2(dispatch_block_t b, bool release) +_dispatch_block_async_invoke_reset_max_qos(dispatch_queue_t dq, + dispatch_qos_t qos) +{ + uint64_t old_state, new_state, qos_bits = _dq_state_from_qos(qos); + + // Only dispatch queues can reach this point (as opposed to sources or more + // complex objects) which allows us to handle the DIRTY bit protocol by only + // looking at the tail + dispatch_assert(dx_metatype(dq) == _DISPATCH_QUEUE_TYPE); + +again: + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + dispatch_assert(_dq_state_is_base_wlh(old_state)); + if ((old_state & DISPATCH_QUEUE_MAX_QOS_MASK) <= qos_bits) { + // Nothing to do if the QoS isn't going down + os_atomic_rmw_loop_give_up(return); + } + if (_dq_state_is_dirty(old_state)) { + os_atomic_rmw_loop_give_up({ + // just renew the drain lock with an acquire barrier, to see + // what the enqueuer that set DIRTY has done. + // the xor generates better assembly as DISPATCH_QUEUE_DIRTY + // is already in a register + os_atomic_xor2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, acquire); + if (!dq->dq_items_tail) { + goto again; + } + return; + }); + } + + new_state = old_state; + new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; + new_state |= qos_bits; + }); + + _dispatch_deferred_items_get()->ddi_wlh_needs_update = true; + _dispatch_event_loop_drain(KEVENT_FLAG_IMMEDIATE); +} + +#define DISPATCH_BLOCK_ASYNC_INVOKE_RELEASE 0x1 +#define DISPATCH_BLOCK_ASYNC_INVOKE_NO_OVERRIDE_RESET 0x2 + +DISPATCH_NOINLINE +static void +_dispatch_block_async_invoke2(dispatch_block_t b, unsigned long invoke_flags) { dispatch_block_private_data_t dbpd = _dispatch_block_get_data(b); unsigned int atomic_flags = dbpd->dbpd_atomic_flags; @@ -2912,6 +3064,17 @@ _dispatch_block_async_invoke2(dispatch_block_t b, bool release) DISPATCH_CLIENT_CRASH(atomic_flags, "A block object may not be both " "run more than once and waited for"); } + + if (unlikely((dbpd->dbpd_flags & + DISPATCH_BLOCK_IF_LAST_RESET_QUEUE_QOS_OVERRIDE) && + !(invoke_flags & DISPATCH_BLOCK_ASYNC_INVOKE_NO_OVERRIDE_RESET))) { + dispatch_queue_t dq = _dispatch_get_current_queue(); + dispatch_qos_t qos = _dispatch_qos_from_pp(_dispatch_get_priority()); + if ((dispatch_wlh_t)dq == _dispatch_get_wlh() && !dq->dq_items_tail) { + _dispatch_block_async_invoke_reset_max_qos(dq, qos); + } + } + if (!slowpath(atomic_flags & DBF_CANCELED)) { dbpd->dbpd_block(); } @@ -2920,13 +3083,14 @@ _dispatch_block_async_invoke2(dispatch_block_t b, bool release) dispatch_group_leave(_dbpd_group(dbpd)); } } - os_mpsc_queue_t oq; - oq = os_atomic_xchg2o(dbpd, dbpd_queue, NULL, relaxed); + + os_mpsc_queue_t oq = os_atomic_xchg2o(dbpd, dbpd_queue, NULL, relaxed); if (oq) { // balances dispatch_{,barrier_,group_}async - _os_object_release_internal_inline(oq->_as_os_obj); + _os_object_release_internal_n_inline(oq->_as_os_obj, 2); } - if (release) { + + if (invoke_flags & DISPATCH_BLOCK_ASYNC_INVOKE_RELEASE) { Block_release(b); } } @@ -2934,20 +3098,35 @@ _dispatch_block_async_invoke2(dispatch_block_t b, bool release) static void _dispatch_block_async_invoke(void *block) { - _dispatch_block_async_invoke2(block, false); + _dispatch_block_async_invoke2(block, 0); } static void _dispatch_block_async_invoke_and_release(void *block) { - _dispatch_block_async_invoke2(block, true); + _dispatch_block_async_invoke2(block, DISPATCH_BLOCK_ASYNC_INVOKE_RELEASE); +} + +static void +_dispatch_block_async_invoke_and_release_mach_barrier(void *block) +{ + _dispatch_block_async_invoke2(block, DISPATCH_BLOCK_ASYNC_INVOKE_RELEASE | + DISPATCH_BLOCK_ASYNC_INVOKE_NO_OVERRIDE_RESET); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_block_supports_wait_and_cancel(dispatch_block_private_data_t dbpd) +{ + return dbpd && !(dbpd->dbpd_flags & + DISPATCH_BLOCK_IF_LAST_RESET_QUEUE_QOS_OVERRIDE); } void dispatch_block_cancel(dispatch_block_t db) { dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); - if (!dbpd) { + if (unlikely(!_dispatch_block_supports_wait_and_cancel(dbpd))) { DISPATCH_CLIENT_CRASH(db, "Invalid block object passed to " "dispatch_block_cancel()"); } @@ -2958,7 +3137,7 @@ long dispatch_block_testcancel(dispatch_block_t db) { dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); - if (!dbpd) { + if (unlikely(!_dispatch_block_supports_wait_and_cancel(dbpd))) { DISPATCH_CLIENT_CRASH(db, "Invalid block object passed to " "dispatch_block_testcancel()"); } @@ -2969,7 +3148,7 @@ long dispatch_block_wait(dispatch_block_t db, dispatch_time_t timeout) { dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); - if (!dbpd) { + if (unlikely(!_dispatch_block_supports_wait_and_cancel(dbpd))) { DISPATCH_CLIENT_CRASH(db, "Invalid block object passed to " "dispatch_block_wait()"); } @@ -2996,8 +3175,8 @@ dispatch_block_wait(dispatch_block_t db, dispatch_time_t timeout) // neither of us would ever release. Side effect: After a _wait // that times out, subsequent waits will not boost the qos of the // still-running block. - dx_wakeup(boost_oq, pp, DISPATCH_WAKEUP_OVERRIDING | - DISPATCH_WAKEUP_CONSUME); + dx_wakeup(boost_oq, _dispatch_qos_from_pp(pp), + DISPATCH_WAKEUP_BLOCK_WAIT | DISPATCH_WAKEUP_CONSUME_2); } mach_port_t boost_th = dbpd->dbpd_thread; @@ -3061,10 +3240,13 @@ _dispatch_continuation_init_slow(dispatch_continuation_t dc, // balanced in d_block_async_invoke_and_release or d_block_wait if (os_atomic_cmpxchg2o(dbpd, dbpd_queue, NULL, oq, relaxed)) { - _os_object_retain_internal_inline(oq->_as_os_obj); + _os_object_retain_internal_n_inline(oq->_as_os_obj, 2); } - if (dc_flags & DISPATCH_OBJ_CONSUME_BIT) { + if (dc_flags & DISPATCH_OBJ_MACH_BARRIER) { + dispatch_assert(dc_flags & DISPATCH_OBJ_CONSUME_BIT); + dc->dc_func = _dispatch_block_async_invoke_and_release_mach_barrier; + } else if (dc_flags & DISPATCH_OBJ_CONSUME_BIT) { dc->dc_func = _dispatch_block_async_invoke_and_release; } else { dc->dc_func = _dispatch_block_async_invoke; @@ -3092,28 +3274,7 @@ _dispatch_continuation_init_slow(dispatch_continuation_t dc, dc->dc_flags = dc_flags; } -void -_dispatch_continuation_update_bits(dispatch_continuation_t dc, - uintptr_t dc_flags) -{ - dc->dc_flags = dc_flags; - if (dc_flags & DISPATCH_OBJ_CONSUME_BIT) { - if (dc_flags & DISPATCH_OBJ_BLOCK_PRIVATE_DATA_BIT) { - dc->dc_func = _dispatch_block_async_invoke_and_release; - } else if (dc_flags & DISPATCH_OBJ_BLOCK_BIT) { - dc->dc_func = _dispatch_call_block_and_release; - } - } else { - if (dc_flags & DISPATCH_OBJ_BLOCK_PRIVATE_DATA_BIT) { - dc->dc_func = _dispatch_block_async_invoke; - } else if (dc_flags & DISPATCH_OBJ_BLOCK_BIT) { - dc->dc_func = _dispatch_Block_invoke(dc->dc_ctxt); - } - } -} - #endif // __BLOCKS__ - #pragma mark - #pragma mark dispatch_barrier_async @@ -3164,12 +3325,12 @@ _dispatch_barrier_async_detached_f(dispatch_queue_t dq, void *ctxt, dc->dc_ctxt = ctxt; dc->dc_voucher = DISPATCH_NO_VOUCHER; dc->dc_priority = DISPATCH_NO_PRIORITY; - _dispatch_queue_push(dq, dc, 0); + dx_push(dq, dc, 0); } #ifdef __BLOCKS__ void -dispatch_barrier_async(dispatch_queue_t dq, void (^work)(void)) +dispatch_barrier_async(dispatch_queue_t dq, dispatch_block_t work) { dispatch_continuation_t dc = _dispatch_continuation_alloc(); uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_BARRIER_BIT; @@ -3184,7 +3345,7 @@ dispatch_barrier_async(dispatch_queue_t dq, void (^work)(void)) void _dispatch_async_redirect_invoke(dispatch_continuation_t dc, - dispatch_invoke_flags_t flags) + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags) { dispatch_thread_frame_s dtf; struct dispatch_continuation_s *other_dc = dc->dc_other; @@ -3193,9 +3354,7 @@ _dispatch_async_redirect_invoke(dispatch_continuation_t dc, // the "right" root queue was stuffed into dc_func dispatch_queue_t assumed_rq = (dispatch_queue_t)dc->dc_func; dispatch_queue_t dq = dc->dc_data, rq, old_dq; - struct _dispatch_identity_s di; - - pthread_priority_t op, dp, old_dp; + dispatch_priority_t old_dbp; if (ctxt_flags) { flags &= ~_DISPATCH_INVOKE_AUTORELEASE_MASK; @@ -3203,44 +3362,29 @@ _dispatch_async_redirect_invoke(dispatch_continuation_t dc, } old_dq = _dispatch_get_current_queue(); if (assumed_rq) { - _dispatch_queue_set_current(assumed_rq); - _dispatch_root_queue_identity_assume(&di, 0); - } - - old_dp = _dispatch_set_defaultpriority(dq->dq_priority, &dp); - op = dq->dq_override; - if (op > (dp & _PTHREAD_PRIORITY_QOS_CLASS_MASK)) { - _dispatch_wqthread_override_start(_dispatch_tid_self(), op); - // Ensure that the root queue sees that this thread was overridden. - _dispatch_set_defaultpriority_override(); + old_dbp = _dispatch_root_queue_identity_assume(assumed_rq); + _dispatch_set_basepri(dq->dq_priority); + } else { + old_dbp = _dispatch_set_basepri(dq->dq_priority); } _dispatch_thread_frame_push(&dtf, dq); _dispatch_continuation_pop_forwarded(dc, DISPATCH_NO_VOUCHER, DISPATCH_OBJ_CONSUME_BIT, { - _dispatch_continuation_pop(other_dc, dq, flags); + _dispatch_continuation_pop(other_dc, dic, flags, dq); }); _dispatch_thread_frame_pop(&dtf); - if (assumed_rq) { - _dispatch_root_queue_identity_restore(&di); - _dispatch_queue_set_current(old_dq); - } - _dispatch_reset_defaultpriority(old_dp); + if (assumed_rq) _dispatch_queue_set_current(old_dq); + _dispatch_reset_basepri(old_dbp); rq = dq->do_targetq; while (slowpath(rq->do_targetq) && rq != old_dq) { - _dispatch_non_barrier_complete(rq); + _dispatch_queue_non_barrier_complete(rq); rq = rq->do_targetq; } - _dispatch_non_barrier_complete(dq); - - if (dtf.dtf_deferred) { - struct dispatch_object_s *dou = dtf.dtf_deferred; - return _dispatch_queue_drain_deferred_invoke(dq, flags, 0, dou); - } - - _dispatch_release_tailcall(dq); + _dispatch_queue_non_barrier_complete(dq); + _dispatch_release_tailcall(dq); // pairs with _dispatch_async_redirect_wrap } DISPATCH_ALWAYS_INLINE @@ -3257,14 +3401,14 @@ _dispatch_async_redirect_wrap(dispatch_queue_t dq, dispatch_object_t dou) dc->dc_other = dou._do; dc->dc_voucher = DISPATCH_NO_VOUCHER; dc->dc_priority = DISPATCH_NO_PRIORITY; - _dispatch_retain(dq); + _dispatch_retain(dq); // released in _dispatch_async_redirect_invoke return dc; } DISPATCH_NOINLINE static void _dispatch_async_f_redirect(dispatch_queue_t dq, - dispatch_object_t dou, pthread_priority_t pp) + dispatch_object_t dou, dispatch_qos_t qos) { if (!slowpath(_dispatch_object_is_redirection(dou))) { dou._dc = _dispatch_async_redirect_wrap(dq, dou); @@ -3286,7 +3430,7 @@ _dispatch_async_f_redirect(dispatch_queue_t dq, dq = dq->do_targetq; } - _dispatch_queue_push(dq, dou, pp); + dx_push(dq, dou, qos); } DISPATCH_ALWAYS_INLINE @@ -3299,7 +3443,8 @@ _dispatch_continuation_redirect(dispatch_queue_t dq, // by _dispatch_async_f2. // However we want to end up on the root queue matching `dc` qos, so pick up // the current override of `dq` which includes dc's overrde (and maybe more) - _dispatch_async_f_redirect(dq, dc, dq->dq_override); + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + _dispatch_async_f_redirect(dq, dc, _dq_state_max_qos(dq_state)); _dispatch_introspection_queue_item_complete(dc); } @@ -3320,7 +3465,7 @@ _dispatch_async_f2(dispatch_queue_t dq, dispatch_continuation_t dc) } return _dispatch_async_f_redirect(dq, dc, - _dispatch_continuation_get_override_priority(dq, dc)); + _dispatch_continuation_override_qos(dq, dc)); } DISPATCH_ALWAYS_INLINE @@ -3356,7 +3501,7 @@ dispatch_async_enforce_qos_class_f(dispatch_queue_t dq, void *ctxt, #ifdef __BLOCKS__ void -dispatch_async(dispatch_queue_t dq, void (^work)(void)) +dispatch_async(dispatch_queue_t dq, dispatch_block_t work) { dispatch_continuation_t dc = _dispatch_continuation_alloc(); uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; @@ -3405,31 +3550,66 @@ dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq, #endif #pragma mark - -#pragma mark dispatch_sync / dispatch_barrier_sync recurse and invoke +#pragma mark _dispatch_sync_invoke / _dispatch_sync_complete DISPATCH_NOINLINE static void -_dispatch_sync_function_invoke_slow(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) +_dispatch_queue_non_barrier_complete(dispatch_queue_t dq) { - voucher_t ov; - dispatch_thread_frame_s dtf; - _dispatch_thread_frame_push(&dtf, dq); - ov = _dispatch_set_priority_and_voucher(0, dq->dq_override_voucher, 0); - _dispatch_client_callout(ctxt, func); - _dispatch_perfmon_workitem_inc(); - _dispatch_reset_voucher(ov, 0); - _dispatch_thread_frame_pop(&dtf); + uint64_t old_state, new_state, owner_self = _dispatch_lock_value_for_self(); + + // see _dispatch_queue_resume() + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + new_state = old_state - DISPATCH_QUEUE_WIDTH_INTERVAL; + if (unlikely(_dq_state_drain_locked(old_state))) { + // make drain_try_unlock() fail and reconsider whether there's + // enough width now for a new item + new_state |= DISPATCH_QUEUE_DIRTY; + } else if (likely(_dq_state_is_runnable(new_state))) { + uint64_t full_width = new_state; + if (_dq_state_has_pending_barrier(old_state)) { + full_width -= DISPATCH_QUEUE_PENDING_BARRIER; + full_width += DISPATCH_QUEUE_WIDTH_INTERVAL; + full_width += DISPATCH_QUEUE_IN_BARRIER; + } else { + full_width += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; + full_width += DISPATCH_QUEUE_IN_BARRIER; + } + if ((full_width & DISPATCH_QUEUE_WIDTH_MASK) == + DISPATCH_QUEUE_WIDTH_FULL_BIT) { + new_state = full_width; + new_state &= ~DISPATCH_QUEUE_DIRTY; + new_state |= owner_self; + } else if (_dq_state_is_dirty(old_state)) { + new_state |= DISPATCH_QUEUE_ENQUEUED; + } + } + }); + + if ((old_state ^ new_state) & DISPATCH_QUEUE_IN_BARRIER) { + if (_dq_state_is_dirty(old_state)) { + // + // dependency ordering for dq state changes that were flushed + // and not acted upon + os_atomic_thread_fence(dependency); + dq = os_atomic_force_dependency_on(dq, old_state); + } + return _dispatch_queue_barrier_complete(dq, 0, 0); + } + + if ((old_state ^ new_state) & DISPATCH_QUEUE_ENQUEUED) { + _dispatch_retain_2(dq); + dispatch_assert(!_dq_state_is_base_wlh(new_state)); + return dx_push(dq->do_targetq, dq, _dq_state_max_qos(new_state)); + } } + DISPATCH_ALWAYS_INLINE static inline void _dispatch_sync_function_invoke_inline(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { - if (slowpath(dq->dq_override_voucher != DISPATCH_NO_VOUCHER)) { - return _dispatch_sync_function_invoke_slow(dq, ctxt, func); - } dispatch_thread_frame_s dtf; _dispatch_thread_frame_push(&dtf, dq); _dispatch_client_callout(ctxt, func); @@ -3445,637 +3625,737 @@ _dispatch_sync_function_invoke(dispatch_queue_t dq, void *ctxt, _dispatch_sync_function_invoke_inline(dq, ctxt, func); } -void -_dispatch_sync_recurse_invoke(void *ctxt) -{ - dispatch_continuation_t dc = ctxt; - _dispatch_sync_function_invoke(dc->dc_data, dc->dc_ctxt, dc->dc_func); -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_sync_function_recurse(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, pthread_priority_t pp) +DISPATCH_NOINLINE +static void +_dispatch_sync_complete_recurse(dispatch_queue_t dq, dispatch_queue_t stop_dq, + uintptr_t dc_flags) { - struct dispatch_continuation_s dc = { - .dc_data = dq, - .dc_func = func, - .dc_ctxt = ctxt, - }; - _dispatch_sync_f(dq->do_targetq, &dc, _dispatch_sync_recurse_invoke, pp); + bool barrier = (dc_flags & DISPATCH_OBJ_BARRIER_BIT); + do { + if (dq == stop_dq) return; + if (barrier) { + _dispatch_queue_barrier_complete(dq, 0, 0); + } else { + _dispatch_queue_non_barrier_complete(dq); + } + dq = dq->do_targetq; + barrier = (dq->dq_width == 1); + } while (unlikely(dq->do_targetq)); } DISPATCH_NOINLINE static void -_dispatch_non_barrier_sync_f_invoke(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) +_dispatch_sync_invoke_and_complete_recurse(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func, uintptr_t dc_flags) { _dispatch_sync_function_invoke_inline(dq, ctxt, func); - _dispatch_non_barrier_complete(dq); + _dispatch_sync_complete_recurse(dq, NULL, dc_flags); } DISPATCH_NOINLINE static void -_dispatch_non_barrier_sync_f_recurse(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, pthread_priority_t pp) +_dispatch_sync_invoke_and_complete(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) { - _dispatch_sync_function_recurse(dq, ctxt, func, pp); - _dispatch_non_barrier_complete(dq); + _dispatch_sync_function_invoke_inline(dq, ctxt, func); + _dispatch_queue_non_barrier_complete(dq); } -DISPATCH_ALWAYS_INLINE +/* + * For queues we can cheat and inline the unlock code, which is invalid + * for objects with a more complex state machine (sources or mach channels) + */ +DISPATCH_NOINLINE static void -_dispatch_non_barrier_sync_f_invoke_inline(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, pthread_priority_t pp) +_dispatch_queue_barrier_sync_invoke_and_complete(dispatch_queue_t dq, + void *ctxt, dispatch_function_t func) { - _dispatch_introspection_non_barrier_sync_begin(dq, func); - if (slowpath(dq->do_targetq->do_targetq)) { - return _dispatch_non_barrier_sync_f_recurse(dq, ctxt, func, pp); + _dispatch_sync_function_invoke_inline(dq, ctxt, func); + if (unlikely(dq->dq_items_tail || dq->dq_width > 1)) { + return _dispatch_queue_barrier_complete(dq, 0, 0); } - _dispatch_non_barrier_sync_f_invoke(dq, ctxt, func); -} -#pragma mark - -#pragma mark dispatch_barrier_sync + // Presence of any of these bits requires more work that only + // _dispatch_queue_barrier_complete() handles properly + // + // Note: testing for RECEIVED_OVERRIDE or RECEIVED_SYNC_WAIT without + // checking the role is sloppy, but is a super fast check, and neither of + // these bits should be set if the lock was never contended/discovered. + const uint64_t fail_unlock_mask = DISPATCH_QUEUE_SUSPEND_BITS_MASK | + DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_DIRTY | + DISPATCH_QUEUE_RECEIVED_OVERRIDE | DISPATCH_QUEUE_SYNC_TRANSFER | + DISPATCH_QUEUE_RECEIVED_SYNC_WAIT; + uint64_t old_state, new_state; -DISPATCH_NOINLINE + // similar to _dispatch_queue_drain_try_unlock + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + new_state = old_state - DISPATCH_QUEUE_SERIAL_DRAIN_OWNED; + new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; + new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; + if (unlikely(old_state & fail_unlock_mask)) { + os_atomic_rmw_loop_give_up({ + return _dispatch_queue_barrier_complete(dq, 0, 0); + }); + } + }); + if (_dq_state_is_base_wlh(old_state)) { + _dispatch_event_loop_assert_not_owned((dispatch_wlh_t)dq); + } +} + +#pragma mark - +#pragma mark _dispatch_sync_wait / _dispatch_sync_waiter_wake + +#define DISPATCH_SYNC_WAITER_NO_UNLOCK (~0ull) + +DISPATCH_NOINLINE static void -_dispatch_barrier_complete(dispatch_queue_t dq) +_dispatch_sync_waiter_wake(dispatch_sync_context_t dsc, + dispatch_wlh_t wlh, uint64_t old_state, uint64_t new_state) { - uint64_t owned = DISPATCH_QUEUE_IN_BARRIER + - dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; + dispatch_wlh_t waiter_wlh = dsc->dc_data; - if (slowpath(dq->dq_items_tail)) { - return _dispatch_try_lock_transfer_or_wakeup(dq); + if (_dq_state_in_sync_transfer(old_state) || + _dq_state_in_sync_transfer(new_state) || + (waiter_wlh != DISPATCH_WLH_ANON)) { + _dispatch_event_loop_wake_owner(dsc, wlh, old_state, new_state); } - - if (!fastpath(_dispatch_queue_drain_try_unlock(dq, owned))) { - // someone enqueued a slow item at the head - // looping may be its last chance - return _dispatch_try_lock_transfer_or_wakeup(dq); + if (waiter_wlh == DISPATCH_WLH_ANON) { + if (dsc->dsc_override_qos > dsc->dsc_override_qos_floor) { + _dispatch_wqthread_override_start(dsc->dsc_waiter, + dsc->dsc_override_qos); + } + _dispatch_thread_event_signal(&dsc->dsc_event); } + _dispatch_introspection_queue_item_complete(dsc->_as_dc); } DISPATCH_NOINLINE static void -_dispatch_barrier_sync_f_recurse(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, pthread_priority_t pp) +_dispatch_sync_waiter_redirect_or_wake(dispatch_queue_t dq, uint64_t owned, + dispatch_object_t dou) { - _dispatch_sync_function_recurse(dq, ctxt, func, pp); - _dispatch_barrier_complete(dq); + dispatch_sync_context_t dsc = (dispatch_sync_context_t)dou._dc; + uint64_t next_owner = 0, old_state, new_state; + dispatch_wlh_t wlh = NULL; + + _dispatch_trace_continuation_pop(dq, dsc->_as_dc); + + if (owned == DISPATCH_SYNC_WAITER_NO_UNLOCK) { + dispatch_assert(!(dsc->dc_flags & DISPATCH_OBJ_BARRIER_BIT)); + new_state = old_state = os_atomic_load2o(dq, dq_state, relaxed); + } else { + if (dsc->dc_flags & DISPATCH_OBJ_BARRIER_BIT) { + next_owner = _dispatch_lock_value_from_tid(dsc->dsc_waiter); + } + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + new_state = old_state - owned; + new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; + new_state &= ~DISPATCH_QUEUE_DIRTY; + new_state |= next_owner; + if (_dq_state_is_base_wlh(old_state)) { + new_state |= DISPATCH_QUEUE_SYNC_TRANSFER; + } + }); + if (_dq_state_is_base_wlh(old_state)) { + wlh = (dispatch_wlh_t)dq; + } else if (_dq_state_received_override(old_state)) { + // Ensure that the root queue sees that this thread was overridden. + _dispatch_set_basepri_override_qos(_dq_state_max_qos(old_state)); + } + } + + if (dsc->dc_data == DISPATCH_WLH_ANON) { + if (dsc->dsc_override_qos < _dq_state_max_qos(old_state)) { + dsc->dsc_override_qos = _dq_state_max_qos(old_state); + } + } + + if (unlikely(_dq_state_is_inner_queue(old_state))) { + dispatch_queue_t tq = dq->do_targetq; + if (likely(tq->dq_width == 1)) { + dsc->dc_flags = DISPATCH_OBJ_BARRIER_BIT | + DISPATCH_OBJ_SYNC_WAITER_BIT; + } else { + dsc->dc_flags = DISPATCH_OBJ_SYNC_WAITER_BIT; + } + _dispatch_introspection_queue_item_complete(dsc->_as_dc); + return _dispatch_queue_push_sync_waiter(tq, dsc, 0); + } + + return _dispatch_sync_waiter_wake(dsc, wlh, old_state, new_state); } DISPATCH_NOINLINE static void -_dispatch_barrier_sync_f_invoke(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) +_dispatch_queue_class_barrier_complete(dispatch_queue_t dq, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target, + uint64_t owned) { - _dispatch_sync_function_invoke_inline(dq, ctxt, func); - _dispatch_barrier_complete(dq); -} + uint64_t old_state, new_state, enqueue; + dispatch_queue_t tq; -DISPATCH_ALWAYS_INLINE -static void -_dispatch_barrier_sync_f_invoke_inline(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, pthread_priority_t pp) -{ - _dispatch_introspection_barrier_sync_begin(dq, func); - if (slowpath(dq->do_targetq->do_targetq)) { - return _dispatch_barrier_sync_f_recurse(dq, ctxt, func, pp); + if (target == DISPATCH_QUEUE_WAKEUP_MGR) { + tq = &_dispatch_mgr_q; + enqueue = DISPATCH_QUEUE_ENQUEUED_ON_MGR; + } else if (target) { + tq = (target == DISPATCH_QUEUE_WAKEUP_TARGET) ? dq->do_targetq : target; + enqueue = DISPATCH_QUEUE_ENQUEUED; + } else { + tq = NULL; + enqueue = 0; } - _dispatch_barrier_sync_f_invoke(dq, ctxt, func); -} - -typedef struct dispatch_barrier_sync_context_s { - struct dispatch_continuation_s dbsc_dc; - dispatch_thread_frame_s dbsc_dtf; -} *dispatch_barrier_sync_context_t; -static void -_dispatch_barrier_sync_f_slow_invoke(void *ctxt) -{ - dispatch_barrier_sync_context_t dbsc = ctxt; - dispatch_continuation_t dc = &dbsc->dbsc_dc; - dispatch_queue_t dq = dc->dc_data; - dispatch_thread_event_t event = (dispatch_thread_event_t)dc->dc_other; + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + new_state = _dq_state_merge_qos(old_state - owned, qos); + new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; + if (unlikely(_dq_state_is_suspended(old_state))) { + if (likely(_dq_state_is_base_wlh(old_state))) { + new_state &= ~DISPATCH_QUEUE_ENQUEUED; + } + } else if (enqueue) { + if (!_dq_state_is_enqueued(old_state)) { + new_state |= enqueue; + } + } else if (unlikely(_dq_state_is_dirty(old_state))) { + os_atomic_rmw_loop_give_up({ + // just renew the drain lock with an acquire barrier, to see + // what the enqueuer that set DIRTY has done. + // the xor generates better assembly as DISPATCH_QUEUE_DIRTY + // is already in a register + os_atomic_xor2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, acquire); + flags |= DISPATCH_WAKEUP_BARRIER_COMPLETE; + return dx_wakeup(dq, qos, flags); + }); + } else if (likely(_dq_state_is_base_wlh(old_state))) { + new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; + new_state &= ~DISPATCH_QUEUE_ENQUEUED; + } else { + new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; + } + }); + old_state -= owned; + dispatch_assert(_dq_state_drain_locked_by_self(old_state)); + dispatch_assert(!_dq_state_is_enqueued_on_manager(old_state)); - dispatch_assert(dq == _dispatch_queue_get_current()); -#if DISPATCH_COCOA_COMPAT - if (slowpath(_dispatch_queue_is_thread_bound(dq))) { - dispatch_assert(_dispatch_thread_frame_get_current() == NULL); - // the block runs on the thread the queue is bound to and not - // on the calling thread, but we mean to see the calling thread - // dispatch thread frames, so we fake the link, and then undo it - _dispatch_thread_frame_set_current(&dbsc->dbsc_dtf); - // The queue is bound to a non-dispatch thread (e.g. main thread) - _dispatch_continuation_voucher_adopt(dc, DISPATCH_NO_VOUCHER, - DISPATCH_OBJ_CONSUME_BIT); - _dispatch_client_callout(dc->dc_ctxt, dc->dc_func); - os_atomic_store2o(dc, dc_func, NULL, release); - _dispatch_thread_frame_set_current(NULL); + if (_dq_state_received_override(old_state)) { + // Ensure that the root queue sees that this thread was overridden. + _dispatch_set_basepri_override_qos(_dq_state_max_qos(old_state)); } + + if (tq) { + if (likely((old_state ^ new_state) & enqueue)) { + dispatch_assert(_dq_state_is_enqueued(new_state)); + dispatch_assert(flags & DISPATCH_WAKEUP_CONSUME_2); + return _dispatch_queue_push_queue(tq, dq, new_state); + } +#if HAVE_PTHREAD_WORKQUEUE_QOS + // when doing sync to async handoff + // if the queue received an override we have to forecefully redrive + // the same override so that a new stealer is enqueued because + // the previous one may be gone already + if (_dq_state_should_override(new_state)) { + return _dispatch_queue_class_wakeup_with_override(dq, new_state, + flags); + } #endif - _dispatch_thread_event_signal(event); // release + } + if (flags & DISPATCH_WAKEUP_CONSUME_2) { + return _dispatch_release_2_tailcall(dq); + } } DISPATCH_NOINLINE static void -_dispatch_barrier_sync_f_slow(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, pthread_priority_t pp) +_dispatch_queue_barrier_complete(dispatch_queue_t dq, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags) { - if (slowpath(!dq->do_targetq)) { - // see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE - return _dispatch_sync_function_invoke(dq, ctxt, func); - } + dispatch_continuation_t dc_tmp, dc_start = NULL, dc_end = NULL; + dispatch_queue_wakeup_target_t target = DISPATCH_QUEUE_WAKEUP_NONE; + struct dispatch_object_s *dc = NULL; + uint64_t owned = DISPATCH_QUEUE_IN_BARRIER + + dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; + size_t count = 0; - if (!pp) { - pp = _dispatch_get_priority(); - pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK; - pp |= _PTHREAD_PRIORITY_ENFORCE_FLAG; - } - dispatch_thread_event_s event; - _dispatch_thread_event_init(&event); - struct dispatch_barrier_sync_context_s dbsc = { - .dbsc_dc = { - .dc_data = dq, -#if DISPATCH_COCOA_COMPAT - .dc_func = func, - .dc_ctxt = ctxt, -#endif - .dc_other = &event, + dispatch_assert(dx_metatype(dq) == _DISPATCH_QUEUE_TYPE); + + if (dq->dq_items_tail && !DISPATCH_QUEUE_IS_SUSPENDED(dq)) { + dc = _dispatch_queue_head(dq); + if (!_dispatch_object_is_sync_waiter(dc)) { + // not a slow item, needs to wake up + } else if (likely(dq->dq_width == 1) || + _dispatch_object_is_barrier(dc)) { + // rdar://problem/8290662 "barrier/writer lock transfer" + dc_start = dc_end = (dispatch_continuation_t)dc; + owned = 0; + count = 1; + dc = _dispatch_queue_next(dq, dc); + } else { + // "reader lock transfer" + // we must not wake waiters immediately because our right + // for dequeuing is granted through holding the full "barrier" width + // which a signaled work item could relinquish out from our feet + dc_start = (dispatch_continuation_t)dc; + do { + // no check on width here because concurrent queues + // do not respect width for blocked readers, the thread + // is already spent anyway + dc_end = (dispatch_continuation_t)dc; + owned -= DISPATCH_QUEUE_WIDTH_INTERVAL; + count++; + dc = _dispatch_queue_next(dq, dc); + } while (dc && _dispatch_object_is_sync_waiter_non_barrier(dc)); } - }; -#if DISPATCH_COCOA_COMPAT - // It's preferred to execute synchronous blocks on the current thread - // due to thread-local side effects, etc. However, blocks submitted - // to the main thread MUST be run on the main thread - if (slowpath(_dispatch_queue_is_thread_bound(dq))) { - // consumed by _dispatch_barrier_sync_f_slow_invoke - // or in the DISPATCH_COCOA_COMPAT hunk below - _dispatch_continuation_voucher_set(&dbsc.dbsc_dc, dq, 0); - // save frame linkage for _dispatch_barrier_sync_f_slow_invoke - _dispatch_thread_frame_save_state(&dbsc.dbsc_dtf); - // thread bound queues cannot mutate their target queue hierarchy - // so it's fine to look now - _dispatch_introspection_barrier_sync_begin(dq, func); - } -#endif - uint32_t th_self = _dispatch_tid_self(); - struct dispatch_continuation_s dbss = { - .dc_flags = DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_SLOW_BIT, - .dc_func = _dispatch_barrier_sync_f_slow_invoke, - .dc_ctxt = &dbsc, - .dc_data = (void*)(uintptr_t)th_self, - .dc_priority = pp, - .dc_other = &event, - .dc_voucher = DISPATCH_NO_VOUCHER, - }; - uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); - if (unlikely(_dq_state_drain_locked_by(dq_state, th_self))) { - DISPATCH_CLIENT_CRASH(dq, "dispatch_barrier_sync called on queue " - "already owned by current thread"); + if (count) { + do { + dc_tmp = dc_start; + dc_start = dc_start->do_next; + _dispatch_sync_waiter_redirect_or_wake(dq, owned, dc_tmp); + owned = DISPATCH_SYNC_WAITER_NO_UNLOCK; + } while (dc_tmp != dc_end); + if (flags & DISPATCH_WAKEUP_CONSUME_2) { + return _dispatch_release_2_tailcall(dq); + } + return; + } + if (!(flags & DISPATCH_WAKEUP_CONSUME_2)) { + _dispatch_retain_2(dq); + flags |= DISPATCH_WAKEUP_CONSUME_2; + } + target = DISPATCH_QUEUE_WAKEUP_TARGET; } - _dispatch_continuation_push_sync_slow(dq, &dbss); - _dispatch_thread_event_wait(&event); // acquire - _dispatch_thread_event_destroy(&event); - if (_dispatch_queue_received_override(dq, pp)) { - // Ensure that the root queue sees that this thread was overridden. - // pairs with the _dispatch_wqthread_override_start in - // _dispatch_continuation_slow_item_signal - _dispatch_set_defaultpriority_override(); - } + return _dispatch_queue_class_barrier_complete(dq, qos, flags, target,owned); +} #if DISPATCH_COCOA_COMPAT - // Queue bound to a non-dispatch thread - if (dbsc.dbsc_dc.dc_func == NULL) { - return; - } else if (dbsc.dbsc_dc.dc_voucher) { - // this almost never happens, unless a dispatch_sync() onto a thread - // bound queue went to the slow path at the same time dispatch_main() - // is called, or the queue is detached from the runloop. - _voucher_release(dbsc.dbsc_dc.dc_voucher); - } -#endif +static void +_dispatch_sync_thread_bound_invoke(void *ctxt) +{ + dispatch_sync_context_t dsc = ctxt; + dispatch_queue_t cq = _dispatch_queue_get_current(); + dispatch_queue_t orig_dq = dsc->dc_other; + dispatch_thread_frame_s dtf; + dispatch_assert(_dispatch_queue_is_thread_bound(cq)); + + // the block runs on the thread the queue is bound to and not + // on the calling thread, but we mean to see the calling thread + // dispatch thread frames, so we fake the link, and then undo it + _dispatch_thread_frame_push_and_rebase(&dtf, orig_dq, &dsc->dsc_dtf); + _dispatch_client_callout(dsc->dsc_ctxt, dsc->dsc_func); + _dispatch_thread_frame_pop(&dtf); - _dispatch_barrier_sync_f_invoke_inline(dq, ctxt, func, pp); + // communicate back to _dispatch_sync_wait who the thread bound queue + // was so that we skip it during _dispatch_sync_complete_recurse + dsc->dc_other = cq; + dsc->dsc_func = NULL; + _dispatch_thread_event_signal(&dsc->dsc_event); // release } +#endif DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_barrier_sync_f2(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, pthread_priority_t pp) +static inline uint64_t +_dispatch_sync_wait_prepare(dispatch_queue_t dq) { - if (slowpath(!_dispatch_queue_try_acquire_barrier_sync(dq))) { - // global concurrent queues and queues bound to non-dispatch threads - // always fall into the slow case - return _dispatch_barrier_sync_f_slow(dq, ctxt, func, pp); - } - // - // TODO: the more correct thing to do would be to set dq_override to the qos - // of the thread that just acquired the barrier lock here. Unwinding that - // would slow down the uncontended fastpath however. - // - // The chosen tradeoff is that if an enqueue on a lower priority thread - // contends with this fastpath, this thread may receive a useless override. - // Improving this requires the override level to be part of the atomic - // dq_state - // - _dispatch_barrier_sync_f_invoke_inline(dq, ctxt, func, pp); + uint64_t old_state, new_state; + + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + if (_dq_state_is_suspended(old_state) || + !_dq_state_is_base_wlh(old_state)) { + os_atomic_rmw_loop_give_up(return old_state); + } + if (!_dq_state_drain_locked(old_state) || + _dq_state_in_sync_transfer(old_state)) { + os_atomic_rmw_loop_give_up(return old_state); + } + new_state = old_state | DISPATCH_QUEUE_RECEIVED_SYNC_WAIT; + }); + return new_state; } -DISPATCH_NOINLINE static void -_dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, pthread_priority_t pp) +_dispatch_sync_waiter_compute_wlh(dispatch_queue_t dq, + dispatch_sync_context_t dsc) { - _dispatch_barrier_sync_f2(dq, ctxt, func, pp); -} + bool needs_locking = _dispatch_queue_is_legacy(dq); -DISPATCH_NOINLINE -void -dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) -{ - _dispatch_barrier_sync_f2(dq, ctxt, func, 0); + if (needs_locking) { + dsc->dsc_release_storage = true; + _dispatch_queue_sidelock_lock(dq); + } + + dispatch_queue_t tq = dq->do_targetq; + uint64_t dq_state = _dispatch_sync_wait_prepare(tq); + + if (_dq_state_is_suspended(dq_state) || + _dq_state_is_base_anon(dq_state)) { + dsc->dsc_release_storage = false; + dsc->dc_data = DISPATCH_WLH_ANON; + } else if (_dq_state_is_base_wlh(dq_state)) { + if (dsc->dsc_release_storage) { + _dispatch_queue_retain_storage(tq); + } + dsc->dc_data = (dispatch_wlh_t)tq; + } else { + _dispatch_sync_waiter_compute_wlh(tq, dsc); + } + if (needs_locking) _dispatch_queue_sidelock_unlock(dq); } -#ifdef __BLOCKS__ DISPATCH_NOINLINE static void -_dispatch_sync_block_with_private_data(dispatch_queue_t dq, - void (^work)(void), dispatch_block_flags_t flags) +_dispatch_sync_wait(dispatch_queue_t top_dq, void *ctxt, + dispatch_function_t func, uintptr_t top_dc_flags, + dispatch_queue_t dq, uintptr_t dc_flags) { - pthread_priority_t pp = _dispatch_block_get_priority(work); + pthread_priority_t pp = _dispatch_get_priority(); + dispatch_tid tid = _dispatch_tid_self(); + dispatch_qos_t qos; + uint64_t dq_state; - flags |= _dispatch_block_get_flags(work); - if (flags & DISPATCH_BLOCK_HAS_PRIORITY) { - pthread_priority_t tp = _dispatch_get_priority(); - tp &= ~_PTHREAD_PRIORITY_FLAGS_MASK; - if (pp < tp) { - pp = tp | _PTHREAD_PRIORITY_ENFORCE_FLAG; - } else if (_dispatch_block_sync_should_enforce_qos_class(flags)) { - pp |= _PTHREAD_PRIORITY_ENFORCE_FLAG; - } + dq_state = _dispatch_sync_wait_prepare(dq); + if (unlikely(_dq_state_drain_locked_by(dq_state, tid))) { + DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, + "dispatch_sync called on queue " + "already owned by current thread"); } - // balanced in d_block_sync_invoke or d_block_wait - if (os_atomic_cmpxchg2o(_dispatch_block_get_data(work), - dbpd_queue, NULL, dq, relaxed)) { - _dispatch_retain(dq); + + struct dispatch_sync_context_s dsc = { + .dc_flags = dc_flags | DISPATCH_OBJ_SYNC_WAITER_BIT, + .dc_other = top_dq, + .dc_priority = pp | _PTHREAD_PRIORITY_ENFORCE_FLAG, + .dc_voucher = DISPATCH_NO_VOUCHER, + .dsc_func = func, + .dsc_ctxt = ctxt, + .dsc_waiter = tid, + }; + if (_dq_state_is_suspended(dq_state) || + _dq_state_is_base_anon(dq_state)) { + dsc.dc_data = DISPATCH_WLH_ANON; + } else if (_dq_state_is_base_wlh(dq_state)) { + dsc.dc_data = (dispatch_wlh_t)dq; + } else { + _dispatch_sync_waiter_compute_wlh(dq, &dsc); } - if (flags & DISPATCH_BLOCK_BARRIER) { - _dispatch_barrier_sync_f(dq, work, _dispatch_block_sync_invoke, pp); +#if DISPATCH_COCOA_COMPAT + // It's preferred to execute synchronous blocks on the current thread + // due to thread-local side effects, etc. However, blocks submitted + // to the main thread MUST be run on the main thread + // + // Since we don't know whether that will happen, save the frame linkage + // for the sake of _dispatch_sync_thread_bound_invoke + _dispatch_thread_frame_save_state(&dsc.dsc_dtf); + + // Since the continuation doesn't have the CONSUME bit, the voucher will be + // retained on adoption on the thread bound queue if it happens so we can + // borrow this thread's reference + dsc.dc_voucher = _voucher_get(); + dsc.dc_func = _dispatch_sync_thread_bound_invoke; + dsc.dc_ctxt = &dsc; +#endif + + if (dsc.dc_data == DISPATCH_WLH_ANON) { + dsc.dsc_override_qos_floor = dsc.dsc_override_qos = + _dispatch_get_basepri_override_qos_floor(); + qos = _dispatch_qos_from_pp(pp); + _dispatch_thread_event_init(&dsc.dsc_event); } else { - _dispatch_sync_f(dq, work, _dispatch_block_sync_invoke, pp); + qos = 0; + } + _dispatch_queue_push_sync_waiter(dq, &dsc, qos); + if (dsc.dc_data == DISPATCH_WLH_ANON) { + _dispatch_thread_event_wait(&dsc.dsc_event); // acquire + _dispatch_thread_event_destroy(&dsc.dsc_event); + // If _dispatch_sync_waiter_wake() gave this thread an override, + // ensure that the root queue sees it. + if (dsc.dsc_override_qos > dsc.dsc_override_qos_floor) { + _dispatch_set_basepri_override_qos(dsc.dsc_override_qos); + } + } else { + _dispatch_event_loop_wait_for_ownership(&dsc); } -} - -void -dispatch_barrier_sync(dispatch_queue_t dq, void (^work)(void)) -{ - if (slowpath(_dispatch_block_has_private_data(work))) { - dispatch_block_flags_t flags = DISPATCH_BLOCK_BARRIER; - return _dispatch_sync_block_with_private_data(dq, work, flags); + _dispatch_introspection_sync_begin(top_dq); +#if DISPATCH_COCOA_COMPAT + if (unlikely(dsc.dsc_func == NULL)) { + // Queue bound to a non-dispatch thread, the continuation already ran + // so just unlock all the things, except for the thread bound queue + dispatch_queue_t bound_dq = dsc.dc_other; + return _dispatch_sync_complete_recurse(top_dq, bound_dq, top_dc_flags); } - dispatch_barrier_sync_f(dq, work, _dispatch_Block_invoke(work)); -} #endif + _dispatch_sync_invoke_and_complete_recurse(top_dq, ctxt, func,top_dc_flags); +} DISPATCH_NOINLINE -void -_dispatch_barrier_trysync_or_async_f(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) +static void +_dispatch_sync_f_slow(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func, uintptr_t dc_flags) { - // Use for mutation of queue-/source-internal state only, ignores target - // queue hierarchy! - if (!fastpath(_dispatch_queue_try_acquire_barrier_sync(dq))) { - return _dispatch_barrier_async_detached_f(dq, ctxt, func); + if (unlikely(!dq->do_targetq)) { + return _dispatch_sync_function_invoke(dq, ctxt, func); } - // skip the recursion because it's about the queue state only - _dispatch_barrier_sync_f_invoke(dq, ctxt, func); + _dispatch_sync_wait(dq, ctxt, func, dc_flags, dq, dc_flags); } #pragma mark - -#pragma mark dispatch_sync +#pragma mark dispatch_sync / dispatch_barrier_sync DISPATCH_NOINLINE static void -_dispatch_non_barrier_complete(dispatch_queue_t dq) +_dispatch_sync_recurse(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func, uintptr_t dc_flags) { - uint64_t old_state, new_state; + dispatch_tid tid = _dispatch_tid_self(); + dispatch_queue_t tq = dq->do_targetq; - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { - new_state = old_state - DISPATCH_QUEUE_WIDTH_INTERVAL; - if (_dq_state_is_runnable(new_state)) { - if (!_dq_state_is_runnable(old_state)) { - // we're making a FULL -> non FULL transition - new_state |= DISPATCH_QUEUE_DIRTY; + do { + if (likely(tq->dq_width == 1)) { + if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(tq, tid))) { + return _dispatch_sync_wait(dq, ctxt, func, dc_flags, tq, + DISPATCH_OBJ_BARRIER_BIT); } - if (!_dq_state_drain_locked(new_state)) { - uint64_t full_width = new_state; - if (_dq_state_has_pending_barrier(new_state)) { - full_width -= DISPATCH_QUEUE_PENDING_BARRIER; - full_width += DISPATCH_QUEUE_WIDTH_INTERVAL; - full_width += DISPATCH_QUEUE_IN_BARRIER; - } else { - full_width += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; - full_width += DISPATCH_QUEUE_IN_BARRIER; - } - if ((full_width & DISPATCH_QUEUE_WIDTH_MASK) == - DISPATCH_QUEUE_WIDTH_FULL_BIT) { - new_state = full_width; - new_state &= ~DISPATCH_QUEUE_DIRTY; - new_state |= _dispatch_tid_self(); - } + } else { + if (unlikely(!_dispatch_queue_try_reserve_sync_width(tq))) { + return _dispatch_sync_wait(dq, ctxt, func, dc_flags, tq, 0); } } - }); + tq = tq->do_targetq; + } while (unlikely(tq->do_targetq)); - if (_dq_state_is_in_barrier(new_state)) { - return _dispatch_try_lock_transfer_or_wakeup(dq); - } - if (!_dq_state_is_runnable(old_state)) { - _dispatch_queue_try_wakeup(dq, new_state, 0); - } + return _dispatch_sync_invoke_and_complete_recurse(dq, ctxt, func, dc_flags); } DISPATCH_NOINLINE -static void -_dispatch_sync_f_slow(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, - pthread_priority_t pp) -{ - dispatch_assert(dq->do_targetq); - if (!pp) { - pp = _dispatch_get_priority(); - pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK; - pp |= _PTHREAD_PRIORITY_ENFORCE_FLAG; - } - dispatch_thread_event_s event; - _dispatch_thread_event_init(&event); - uint32_t th_self = _dispatch_tid_self(); - struct dispatch_continuation_s dc = { - .dc_flags = DISPATCH_OBJ_SYNC_SLOW_BIT, -#if DISPATCH_INTROSPECTION - .dc_func = func, - .dc_ctxt = ctxt, -#endif - .dc_data = (void*)(uintptr_t)th_self, - .dc_other = &event, - .dc_priority = pp, - .dc_voucher = DISPATCH_NO_VOUCHER, - }; +void +dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) +{ + dispatch_tid tid = _dispatch_tid_self(); - uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); - if (unlikely(_dq_state_drain_locked_by(dq_state, th_self))) { - DISPATCH_CLIENT_CRASH(dq, "dispatch_sync called on queue " - "already owned by current thread"); + // The more correct thing to do would be to merge the qos of the thread + // that just acquired the barrier lock into the queue state. + // + // However this is too expensive for the fastpath, so skip doing it. + // The chosen tradeoff is that if an enqueue on a lower priority thread + // contends with this fastpath, this thread may receive a useless override. + // + // Global concurrent queues and queues bound to non-dispatch threads + // always fall into the slow case, see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE + if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(dq, tid))) { + return _dispatch_sync_f_slow(dq, ctxt, func, DISPATCH_OBJ_BARRIER_BIT); } - _dispatch_continuation_push_sync_slow(dq, &dc); - _dispatch_thread_event_wait(&event); // acquire - _dispatch_thread_event_destroy(&event); - if (_dispatch_queue_received_override(dq, pp)) { - // Ensure that the root queue sees that this thread was overridden. - // pairs with the _dispatch_wqthread_override_start in - // _dispatch_continuation_slow_item_signal - _dispatch_set_defaultpriority_override(); + _dispatch_introspection_sync_begin(dq); + if (unlikely(dq->do_targetq->do_targetq)) { + return _dispatch_sync_recurse(dq, ctxt, func, DISPATCH_OBJ_BARRIER_BIT); } - _dispatch_non_barrier_sync_f_invoke_inline(dq, ctxt, func, pp); + _dispatch_queue_barrier_sync_invoke_and_complete(dq, ctxt, func); } -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_sync_f2(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, - pthread_priority_t pp) +DISPATCH_NOINLINE +void +dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { - // reserving non barrier width - // doesn't fail if only the ENQUEUED bit is set (unlike its barrier width - // equivalent), so we have to check that this thread hasn't enqueued - // anything ahead of this call or we can break ordering - if (slowpath(dq->dq_items_tail)) { - return _dispatch_sync_f_slow(dq, ctxt, func, pp); + if (likely(dq->dq_width == 1)) { + return dispatch_barrier_sync_f(dq, ctxt, func); } - // concurrent queues do not respect width on sync - if (slowpath(!_dispatch_queue_try_reserve_sync_width(dq))) { - return _dispatch_sync_f_slow(dq, ctxt, func, pp); + + // Global concurrent queues and queues bound to non-dispatch threads + // always fall into the slow case, see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE + if (unlikely(!_dispatch_queue_try_reserve_sync_width(dq))) { + return _dispatch_sync_f_slow(dq, ctxt, func, 0); + } + + _dispatch_introspection_sync_begin(dq); + if (unlikely(dq->do_targetq->do_targetq)) { + return _dispatch_sync_recurse(dq, ctxt, func, 0); } - _dispatch_non_barrier_sync_f_invoke_inline(dq, ctxt, func, pp); + _dispatch_sync_invoke_and_complete(dq, ctxt, func); } +#ifdef __BLOCKS__ DISPATCH_NOINLINE static void -_dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, - pthread_priority_t pp) +_dispatch_sync_block_with_private_data(dispatch_queue_t dq, + dispatch_block_t work, dispatch_block_flags_t flags) { - if (DISPATCH_QUEUE_USES_REDIRECTION(dq->dq_width)) { - return _dispatch_sync_f2(dq, ctxt, func, pp); + dispatch_block_private_data_t dbpd = _dispatch_block_get_data(work); + pthread_priority_t op = 0, p = 0; + + flags |= dbpd->dbpd_flags; + op = _dispatch_block_invoke_should_set_priority(flags, dbpd->dbpd_priority); + if (op) { + p = dbpd->dbpd_priority; + } + voucher_t ov, v = DISPATCH_NO_VOUCHER; + if (flags & DISPATCH_BLOCK_HAS_VOUCHER) { + v = dbpd->dbpd_voucher; + } + ov = _dispatch_set_priority_and_voucher(p, v, 0); + + // balanced in d_block_sync_invoke or d_block_wait + if (os_atomic_cmpxchg2o(dbpd, dbpd_queue, NULL, dq->_as_oq, relaxed)) { + _dispatch_retain_2(dq); + } + if (flags & DISPATCH_BLOCK_BARRIER) { + dispatch_barrier_sync_f(dq, work, _dispatch_block_sync_invoke); + } else { + dispatch_sync_f(dq, work, _dispatch_block_sync_invoke); } - return _dispatch_barrier_sync_f(dq, ctxt, func, pp); + _dispatch_reset_priority_and_voucher(op, ov); } -DISPATCH_NOINLINE void -dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) +dispatch_barrier_sync(dispatch_queue_t dq, dispatch_block_t work) { - if (DISPATCH_QUEUE_USES_REDIRECTION(dq->dq_width)) { - return _dispatch_sync_f2(dq, ctxt, func, 0); + if (unlikely(_dispatch_block_has_private_data(work))) { + dispatch_block_flags_t flags = DISPATCH_BLOCK_BARRIER; + return _dispatch_sync_block_with_private_data(dq, work, flags); } - return dispatch_barrier_sync_f(dq, ctxt, func); + dispatch_barrier_sync_f(dq, work, _dispatch_Block_invoke(work)); } -#ifdef __BLOCKS__ void -dispatch_sync(dispatch_queue_t dq, void (^work)(void)) +dispatch_sync(dispatch_queue_t dq, dispatch_block_t work) { - if (slowpath(_dispatch_block_has_private_data(work))) { + if (unlikely(_dispatch_block_has_private_data(work))) { return _dispatch_sync_block_with_private_data(dq, work, 0); } dispatch_sync_f(dq, work, _dispatch_Block_invoke(work)); } -#endif +#endif // __BLOCKS__ #pragma mark - #pragma mark dispatch_trysync -struct trysync_context { - dispatch_queue_t tc_dq; - void *tc_ctxt; - dispatch_function_t tc_func; -}; +DISPATCH_NOINLINE +static void +_dispatch_barrier_trysync_or_async_f_complete(dispatch_queue_t dq, + void *ctxt, dispatch_function_t func, uint32_t flags) +{ + dispatch_wakeup_flags_t wflags = DISPATCH_WAKEUP_BARRIER_COMPLETE; + + _dispatch_sync_function_invoke_inline(dq, ctxt, func); + if (flags & DISPATCH_BARRIER_TRYSYNC_SUSPEND) { + uint64_t dq_state = os_atomic_sub2o(dq, dq_state, + DISPATCH_QUEUE_SUSPEND_INTERVAL, relaxed); + if (!_dq_state_is_suspended(dq_state)) { + wflags |= DISPATCH_WAKEUP_CONSUME_2; + } + } + dx_wakeup(dq, 0, wflags); +} +// Use for mutation of queue-/source-internal state only +// ignores target queue hierarchy! DISPATCH_NOINLINE -static int -_dispatch_trysync_recurse(dispatch_queue_t dq, - struct trysync_context *tc, bool barrier) +void +_dispatch_barrier_trysync_or_async_f(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func, uint32_t flags) { - dispatch_queue_t tq = dq->do_targetq; + dispatch_tid tid = _dispatch_tid_self(); + uint64_t suspend_count = (flags & DISPATCH_BARRIER_TRYSYNC_SUSPEND) ? 1 : 0; + if (unlikely(!_dispatch_queue_try_acquire_barrier_sync_and_suspend(dq, tid, + suspend_count))) { + return _dispatch_barrier_async_detached_f(dq, ctxt, func); + } + if (flags & DISPATCH_BARRIER_TRYSYNC_SUSPEND) { + _dispatch_retain_2(dq); // see _dispatch_queue_suspend + } + _dispatch_barrier_trysync_or_async_f_complete(dq, ctxt, func, flags); +} - if (barrier) { - if (slowpath(!_dispatch_queue_try_acquire_barrier_sync(dq))) { - return EWOULDBLOCK; +DISPATCH_NOINLINE +static long +_dispatch_trysync_recurse(dispatch_queue_t dq, void *ctxt, + dispatch_function_t f, uintptr_t dc_flags) +{ + dispatch_tid tid = _dispatch_tid_self(); + dispatch_queue_t q, tq = dq->do_targetq; + + for (;;) { + if (likely(tq->do_targetq == NULL)) { + _dispatch_sync_invoke_and_complete_recurse(dq, ctxt, f, dc_flags); + return true; } - } else { - // check nothing was queued by the current - // thread ahead of this call. _dispatch_queue_try_reserve_sync_width - // ignores the ENQUEUED bit which could cause it to miss a barrier_async - // made by the same thread just before. - if (slowpath(dq->dq_items_tail)) { - return EWOULDBLOCK; + if (unlikely(_dispatch_queue_cannot_trysync(tq))) { + for (q = dq; q != tq; q = q->do_targetq) { + _dispatch_queue_atomic_flags_set(q, DQF_CANNOT_TRYSYNC); + } + break; } - // concurrent queues do not respect width on sync - if (slowpath(!_dispatch_queue_try_reserve_sync_width(dq))) { - return EWOULDBLOCK; + if (likely(tq->dq_width == 1)) { + if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(tq, tid))) { + break; + } + } else { + if (unlikely(!_dispatch_queue_try_reserve_sync_width(tq))) { + break; + } } + tq = tq->do_targetq; } - int rc = 0; - if (_dispatch_queue_cannot_trysync(tq)) { - _dispatch_queue_atomic_flags_set(dq, DQF_CANNOT_TRYSYNC); - rc = ENOTSUP; - } else if (tq->do_targetq) { - rc = _dispatch_trysync_recurse(tq, tc, tq->dq_width == 1); - if (rc == ENOTSUP) { - _dispatch_queue_atomic_flags_set(dq, DQF_CANNOT_TRYSYNC); - } - } else { - dispatch_thread_frame_s dtf; - _dispatch_thread_frame_push(&dtf, tq); - _dispatch_sync_function_invoke(tc->tc_dq, tc->tc_ctxt, tc->tc_func); - _dispatch_thread_frame_pop(&dtf); - } - if (barrier) { - _dispatch_barrier_complete(dq); - } else { - _dispatch_non_barrier_complete(dq); - } - return rc; + _dispatch_sync_complete_recurse(dq, tq, dc_flags); + return false; } DISPATCH_NOINLINE -bool +long _dispatch_barrier_trysync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t f) { - if (slowpath(!dq->do_targetq)) { - _dispatch_sync_function_invoke(dq, ctxt, f); - return true; + dispatch_tid tid = _dispatch_tid_self(); + if (unlikely(!dq->do_targetq)) { + DISPATCH_CLIENT_CRASH(dq, "_dispatch_trsync called on a root queue"); } - if (slowpath(_dispatch_queue_cannot_trysync(dq))) { + if (unlikely(_dispatch_queue_cannot_trysync(dq))) { return false; } - struct trysync_context tc = { - .tc_dq = dq, - .tc_func = f, - .tc_ctxt = ctxt, - }; - return _dispatch_trysync_recurse(dq, &tc, true) == 0; + if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(dq, tid))) { + return false; + } + return _dispatch_trysync_recurse(dq, ctxt, f, DISPATCH_OBJ_BARRIER_BIT); } DISPATCH_NOINLINE -bool +long _dispatch_trysync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t f) { - if (slowpath(!dq->do_targetq)) { - _dispatch_sync_function_invoke(dq, ctxt, f); - return true; - } - if (slowpath(_dispatch_queue_cannot_trysync(dq))) { - return false; + if (likely(dq->dq_width == 1)) { + return _dispatch_barrier_trysync_f(dq, ctxt, f); } - struct trysync_context tc = { - .tc_dq = dq, - .tc_func = f, - .tc_ctxt = ctxt, - }; - return _dispatch_trysync_recurse(dq, &tc, dq->dq_width == 1) == 0; -} - -#pragma mark - -#pragma mark dispatch_after - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_after(dispatch_time_t when, dispatch_queue_t queue, - void *ctxt, void *handler, bool block) -{ - dispatch_source_t ds; - uint64_t leeway, delta; - - if (when == DISPATCH_TIME_FOREVER) { -#if DISPATCH_DEBUG - DISPATCH_CLIENT_CRASH(0, "dispatch_after called with 'when' == infinity"); -#endif - return; + if (unlikely(!dq->do_targetq)) { + DISPATCH_CLIENT_CRASH(dq, "_dispatch_trsync called on a root queue"); } - - delta = _dispatch_timeout(when); - if (delta == 0) { - if (block) { - return dispatch_async(queue, handler); - } - return dispatch_async_f(queue, ctxt, handler); + if (unlikely(_dispatch_queue_cannot_trysync(dq))) { + return false; } - leeway = delta / 10; // - - if (leeway < NSEC_PER_MSEC) leeway = NSEC_PER_MSEC; - if (leeway > 60 * NSEC_PER_SEC) leeway = 60 * NSEC_PER_SEC; - - // this function can and should be optimized to not use a dispatch source - ds = dispatch_source_create(&_dispatch_source_type_after, 0, 0, queue); - dispatch_assert(ds); - - dispatch_continuation_t dc = _dispatch_continuation_alloc(); - if (block) { - _dispatch_continuation_init(dc, ds, handler, 0, 0, 0); - } else { - _dispatch_continuation_init_f(dc, ds, ctxt, handler, 0, 0, 0); + if (unlikely(!_dispatch_queue_try_reserve_sync_width(dq))) { + return false; } - // reference `ds` so that it doesn't show up as a leak - dc->dc_data = ds; - _dispatch_source_set_event_handler_continuation(ds, dc); - dispatch_source_set_timer(ds, when, DISPATCH_TIME_FOREVER, leeway); - dispatch_activate(ds); -} - -DISPATCH_NOINLINE -void -dispatch_after_f(dispatch_time_t when, dispatch_queue_t queue, void *ctxt, - dispatch_function_t func) -{ - _dispatch_after(when, queue, ctxt, func, false); -} - -#ifdef __BLOCKS__ -void -dispatch_after(dispatch_time_t when, dispatch_queue_t queue, - dispatch_block_t work) -{ - _dispatch_after(when, queue, NULL, work, true); + return _dispatch_trysync_recurse(dq, ctxt, f, 0); } -#endif #pragma mark - #pragma mark dispatch_queue_wakeup DISPATCH_NOINLINE void -_dispatch_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, +_dispatch_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos, dispatch_wakeup_flags_t flags) { dispatch_queue_wakeup_target_t target = DISPATCH_QUEUE_WAKEUP_NONE; + if (unlikely(flags & DISPATCH_WAKEUP_BARRIER_COMPLETE)) { + return _dispatch_queue_barrier_complete(dq, qos, flags); + } if (_dispatch_queue_class_probe(dq)) { target = DISPATCH_QUEUE_WAKEUP_TARGET; } - if (target) { - return _dispatch_queue_class_wakeup(dq, pp, flags, target); - } else if (pp) { - return _dispatch_queue_class_override_drainer(dq, pp, flags); - } else if (flags & DISPATCH_WAKEUP_CONSUME) { - return _dispatch_release_tailcall(dq); - } + return _dispatch_queue_class_wakeup(dq, qos, flags, target); } #if DISPATCH_COCOA_COMPAT @@ -4121,59 +4401,60 @@ _dispatch_runloop_queue_set_handle(dispatch_queue_t dq, dispatch_runloop_handle_ } #endif // DISPATCH_COCOA_COMPAT +DISPATCH_ALWAYS_INLINE +static inline dispatch_qos_t +_dispatch_runloop_queue_reset_max_qos(dispatch_queue_class_t dqu) +{ + uint64_t old_state, clear_bits = DISPATCH_QUEUE_MAX_QOS_MASK | + DISPATCH_QUEUE_RECEIVED_OVERRIDE; + old_state = os_atomic_and_orig2o(dqu._dq, dq_state, ~clear_bits, relaxed); + return _dq_state_max_qos(old_state); +} + void -_dispatch_runloop_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, +_dispatch_runloop_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos, dispatch_wakeup_flags_t flags) { #if DISPATCH_COCOA_COMPAT if (slowpath(_dispatch_queue_atomic_flags(dq) & DQF_RELEASED)) { // - return _dispatch_queue_wakeup(dq, pp, flags); + return _dispatch_queue_wakeup(dq, qos, flags); } + if (flags & DISPATCH_WAKEUP_MAKE_DIRTY) { + os_atomic_or2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, release); + } if (_dispatch_queue_class_probe(dq)) { - return _dispatch_runloop_queue_poke(dq, pp, flags); + return _dispatch_runloop_queue_poke(dq, qos, flags); } - pp = _dispatch_queue_reset_override_priority(dq, true); - if (pp) { + qos = _dispatch_runloop_queue_reset_max_qos(dq); + if (qos) { mach_port_t owner = DISPATCH_QUEUE_DRAIN_OWNER(dq); if (_dispatch_queue_class_probe(dq)) { - _dispatch_runloop_queue_poke(dq, pp, flags); + _dispatch_runloop_queue_poke(dq, qos, flags); } _dispatch_thread_override_end(owner, dq); return; } - if (flags & DISPATCH_WAKEUP_CONSUME) { - return _dispatch_release_tailcall(dq); + if (flags & DISPATCH_WAKEUP_CONSUME_2) { + return _dispatch_release_2_tailcall(dq); } #else - return _dispatch_queue_wakeup(dq, pp, flags); + return _dispatch_queue_wakeup(dq, qos, flags); #endif } void -_dispatch_main_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, +_dispatch_main_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos, dispatch_wakeup_flags_t flags) { #if DISPATCH_COCOA_COMPAT if (_dispatch_queue_is_thread_bound(dq)) { - return _dispatch_runloop_queue_wakeup(dq, pp, flags); + return _dispatch_runloop_queue_wakeup(dq, qos, flags); } #endif - return _dispatch_queue_wakeup(dq, pp, flags); -} - -void -_dispatch_root_queue_wakeup(dispatch_queue_t dq, - pthread_priority_t pp DISPATCH_UNUSED, - dispatch_wakeup_flags_t flags) -{ - if (flags & DISPATCH_WAKEUP_CONSUME) { - // see _dispatch_queue_push_set_head - dispatch_assert(flags & DISPATCH_WAKEUP_FLUSH); - } - _dispatch_global_queue_poke(dq); + return _dispatch_queue_wakeup(dq, qos, flags); } #pragma mark - @@ -4188,7 +4469,7 @@ _dispatch_runloop_queue_class_poke(dispatch_queue_t dq) return; } -#if TARGET_OS_MAC +#if HAVE_MACH mach_port_t mp = handle; kern_return_t kr = _dispatch_send_wakeup_runloop_thread(mp, 0); switch (kr) { @@ -4213,44 +4494,56 @@ _dispatch_runloop_queue_class_poke(dispatch_queue_t dq) DISPATCH_NOINLINE static void -_dispatch_runloop_queue_poke(dispatch_queue_t dq, - pthread_priority_t pp, dispatch_wakeup_flags_t flags) +_dispatch_runloop_queue_poke(dispatch_queue_t dq, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags) { - // it's not useful to handle WAKEUP_FLUSH because mach_msg() will have - // a release barrier and that when runloop queues stop being thread bound + // it's not useful to handle WAKEUP_MAKE_DIRTY because mach_msg() will have + // a release barrier and that when runloop queues stop being thread-bound // they have a non optional wake-up to start being a "normal" queue // either in _dispatch_runloop_queue_xref_dispose, // or in _dispatch_queue_cleanup2() for the main thread. + uint64_t old_state, new_state; if (dq == &_dispatch_main_q) { dispatch_once_f(&_dispatch_main_q_handle_pred, dq, _dispatch_runloop_queue_handle_init); } - _dispatch_queue_override_priority(dq, /* inout */ &pp, /* inout */ &flags); - if (flags & DISPATCH_WAKEUP_OVERRIDING) { - mach_port_t owner = DISPATCH_QUEUE_DRAIN_OWNER(dq); + + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + new_state = _dq_state_merge_qos(old_state, qos); + if (old_state == new_state) { + os_atomic_rmw_loop_give_up(goto no_change); + } + }); + + dispatch_qos_t dq_qos = _dispatch_priority_qos(dq->dq_priority); + if (qos > dq_qos) { + mach_port_t owner = _dq_state_drain_owner(new_state); + pthread_priority_t pp = _dispatch_qos_to_pp(qos); _dispatch_thread_override_start(owner, pp, dq); - if (flags & DISPATCH_WAKEUP_WAS_OVERRIDDEN) { + if (_dq_state_max_qos(old_state) > dq_qos) { _dispatch_thread_override_end(owner, dq); } } +no_change: _dispatch_runloop_queue_class_poke(dq); - if (flags & DISPATCH_WAKEUP_CONSUME) { - return _dispatch_release_tailcall(dq); + if (flags & DISPATCH_WAKEUP_CONSUME_2) { + return _dispatch_release_2_tailcall(dq); } } #endif DISPATCH_NOINLINE static void -_dispatch_global_queue_poke_slow(dispatch_queue_t dq, unsigned int n) +_dispatch_global_queue_poke_slow(dispatch_queue_t dq, int n, int floor) { dispatch_root_queue_context_t qc = dq->do_ctxt; - uint32_t i = n; - int r; + int remaining = n; + int r = ENOSYS; + _dispatch_root_queues_init(); _dispatch_debug_root_queue(dq, __func__); -#if HAVE_PTHREAD_WORKQUEUES +#if DISPATCH_USE_WORKQUEUES #if DISPATCH_USE_PTHREAD_POOL if (qc->dgq_kworkqueue != (void*)(~0ul)) #endif @@ -4265,46 +4558,62 @@ _dispatch_global_queue_poke_slow(dispatch_queue_t dq, unsigned int n) r = pthread_workqueue_additem_np(qc->dgq_kworkqueue, _dispatch_worker_thread4, dq, &wh, &gen_cnt); (void)dispatch_assume_zero(r); - } while (--i); + } while (--remaining); return; } #endif // DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK -#if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP - if (!dq->dq_priority) { - r = pthread_workqueue_addthreads_np(qc->dgq_wq_priority, - qc->dgq_wq_options, (int)i); - (void)dispatch_assume_zero(r); - return; - } -#endif #if HAVE_PTHREAD_WORKQUEUE_QOS - r = _pthread_workqueue_addthreads((int)i, dq->dq_priority); - (void)dispatch_assume_zero(r); + r = _pthread_workqueue_addthreads(remaining, + _dispatch_priority_to_pp(dq->dq_priority)); +#elif DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP + r = pthread_workqueue_addthreads_np(qc->dgq_wq_priority, + qc->dgq_wq_options, remaining); #endif + (void)dispatch_assume_zero(r); return; } -#endif // HAVE_PTHREAD_WORKQUEUES +#endif // DISPATCH_USE_WORKQUEUES #if DISPATCH_USE_PTHREAD_POOL dispatch_pthread_root_queue_context_t pqc = qc->dgq_ctxt; if (fastpath(pqc->dpq_thread_mediator.do_vtable)) { while (dispatch_semaphore_signal(&pqc->dpq_thread_mediator)) { - if (!--i) { + _dispatch_root_queue_debug("signaled sleeping worker for " + "global queue: %p", dq); + if (!--remaining) { return; } } } - uint32_t j, t_count; + + bool overcommit = dq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT; + if (overcommit) { + os_atomic_add2o(qc, dgq_pending, remaining, relaxed); + } else { + if (!os_atomic_cmpxchg2o(qc, dgq_pending, 0, remaining, relaxed)) { + _dispatch_root_queue_debug("worker thread request still pending for " + "global queue: %p", dq); + return; + } + } + + int32_t can_request, t_count; // seq_cst with atomic store to tail t_count = os_atomic_load2o(qc, dgq_thread_pool_size, ordered); do { - if (!t_count) { + can_request = t_count < floor ? 0 : t_count - floor; + if (remaining > can_request) { + _dispatch_root_queue_debug("pthread pool reducing request from %d to %d", + remaining, can_request); + os_atomic_sub2o(qc, dgq_pending, remaining - can_request, relaxed); + remaining = can_request; + } + if (remaining == 0) { _dispatch_root_queue_debug("pthread pool is full for root queue: " "%p", dq); return; } - j = i > t_count ? t_count : i; } while (!os_atomic_cmpxchgvw2o(qc, dgq_thread_pool_size, t_count, - t_count - j, &t_count, acquire)); + t_count - remaining, &t_count, acquire)); pthread_attr_t *attr = &pqc->dpq_thread_attr; pthread_t tid, *pthr = &tid; @@ -4314,24 +4623,25 @@ _dispatch_global_queue_poke_slow(dispatch_queue_t dq, unsigned int n) } #endif do { - _dispatch_retain(dq); + _dispatch_retain(dq); // released in _dispatch_worker_thread while ((r = pthread_create(pthr, attr, _dispatch_worker_thread, dq))) { if (r != EAGAIN) { (void)dispatch_assume_zero(r); } _dispatch_temporary_resource_shortage(); } - } while (--j); + } while (--remaining); #endif // DISPATCH_USE_PTHREAD_POOL } -static inline void -_dispatch_global_queue_poke_n(dispatch_queue_t dq, unsigned int n) +DISPATCH_NOINLINE +void +_dispatch_global_queue_poke(dispatch_queue_t dq, int n, int floor) { if (!_dispatch_queue_class_probe(dq)) { return; } -#if HAVE_PTHREAD_WORKQUEUES +#if DISPATCH_USE_WORKQUEUES dispatch_root_queue_context_t qc = dq->do_ctxt; if ( #if DISPATCH_USE_PTHREAD_POOL @@ -4342,39 +4652,129 @@ _dispatch_global_queue_poke_n(dispatch_queue_t dq, unsigned int n) "global queue: %p", dq); return; } -#endif // HAVE_PTHREAD_WORKQUEUES - return _dispatch_global_queue_poke_slow(dq, n); +#endif // DISPATCH_USE_WORKQUEUES + return _dispatch_global_queue_poke_slow(dq, n, floor); } -static inline void -_dispatch_global_queue_poke(dispatch_queue_t dq) +#pragma mark - +#pragma mark dispatch_queue_drain + +void +_dispatch_continuation_pop(dispatch_object_t dou, dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags, dispatch_queue_t dq) { - return _dispatch_global_queue_poke_n(dq, 1); + _dispatch_continuation_pop_inline(dou, dic, flags, dq); } -DISPATCH_NOINLINE void -_dispatch_queue_push_list_slow(dispatch_queue_t dq, unsigned int n) +_dispatch_continuation_invoke(dispatch_object_t dou, voucher_t ov, + dispatch_invoke_flags_t flags) { - return _dispatch_global_queue_poke_n(dq, n); + _dispatch_continuation_invoke_inline(dou, ov, flags); } -#pragma mark - -#pragma mark dispatch_queue_drain +DISPATCH_NOINLINE +static void +_dispatch_return_to_kernel(void) +{ + if (unlikely(_dispatch_get_wlh() == DISPATCH_WLH_ANON)) { + _dispatch_clear_return_to_kernel(); + } else { + _dispatch_event_loop_drain(KEVENT_FLAG_IMMEDIATE); + } +} void -_dispatch_continuation_pop(dispatch_object_t dou, dispatch_queue_t dq, - dispatch_invoke_flags_t flags) +_dispatch_poll_for_events_4launchd(void) { - _dispatch_continuation_pop_inline(dou, dq, flags); +#if DISPATCH_USE_KEVENT_WORKQUEUE + if (_dispatch_get_wlh()) { + dispatch_assert(_dispatch_deferred_items_get()->ddi_wlh_servicing); + _dispatch_event_loop_drain(KEVENT_FLAG_IMMEDIATE); + } +#endif } -void -_dispatch_continuation_invoke(dispatch_object_t dou, voucher_t override_voucher, - dispatch_invoke_flags_t flags) +#if HAVE_PTHREAD_WORKQUEUE_NARROWING +static os_atomic(uint64_t) _dispatch_narrowing_deadlines[DISPATCH_QOS_MAX]; +#if !DISPATCH_TIME_UNIT_USES_NANOSECONDS +static uint64_t _dispatch_narrow_check_interval_cache; +#endif + +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_dispatch_narrow_check_interval(void) +{ +#if DISPATCH_TIME_UNIT_USES_NANOSECONDS + return 50 * NSEC_PER_MSEC; +#else + if (_dispatch_narrow_check_interval_cache == 0) { + _dispatch_narrow_check_interval_cache = + _dispatch_time_nano2mach(50 * NSEC_PER_MSEC); + } + return _dispatch_narrow_check_interval_cache; +#endif +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_drain_init_narrowing_check_deadline(dispatch_invoke_context_t dic, + dispatch_priority_t pri) { - _dispatch_continuation_invoke_inline(dou, override_voucher, flags); + if (_dispatch_priority_qos(pri) && + !(pri & DISPATCH_PRIORITY_FLAG_OVERCOMMIT)) { + dic->dic_next_narrow_check = _dispatch_approximate_time() + + _dispatch_narrow_check_interval(); + } +} + +DISPATCH_NOINLINE +static bool +_dispatch_queue_drain_should_narrow_slow(uint64_t now, + dispatch_invoke_context_t dic) +{ + if (dic->dic_next_narrow_check != DISPATCH_THREAD_IS_NARROWING) { + pthread_priority_t pp = _dispatch_get_priority(); + dispatch_qos_t qos = _dispatch_qos_from_pp(pp); + if (unlikely(!qos || qos > countof(_dispatch_narrowing_deadlines))) { + DISPATCH_CLIENT_CRASH(pp, "Thread QoS corruption"); + } + size_t idx = qos - 1; // no entry needed for DISPATCH_QOS_UNSPECIFIED + os_atomic(uint64_t) *deadline = &_dispatch_narrowing_deadlines[idx]; + uint64_t oldval, newval = now + _dispatch_narrow_check_interval(); + + dic->dic_next_narrow_check = newval; + os_atomic_rmw_loop(deadline, oldval, newval, relaxed, { + if (now < oldval) { + os_atomic_rmw_loop_give_up(return false); + } + }); + + if (!_pthread_workqueue_should_narrow(pp)) { + return false; + } + dic->dic_next_narrow_check = DISPATCH_THREAD_IS_NARROWING; + } + return true; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_queue_drain_should_narrow(dispatch_invoke_context_t dic) +{ + uint64_t next_check = dic->dic_next_narrow_check; + if (unlikely(next_check)) { + uint64_t now = _dispatch_approximate_time(); + if (unlikely(next_check < now)) { + return _dispatch_queue_drain_should_narrow_slow(now, dic); + } + } + return false; } +#else +#define _dispatch_queue_drain_init_narrowing_check_deadline(rq, dic) ((void)0) +#define _dispatch_queue_drain_should_narrow(dic) false +#endif /* * Drain comes in 2 flavours (serial/concurrent) and 2 modes @@ -4404,86 +4804,109 @@ _dispatch_continuation_invoke(dispatch_object_t dou, voucher_t override_voucher, * queue drain moves to the more efficient serial mode. */ DISPATCH_ALWAYS_INLINE -static dispatch_queue_t -_dispatch_queue_drain(dispatch_queue_t dq, dispatch_invoke_flags_t flags, - uint64_t *owned_ptr, struct dispatch_object_s **dc_out, - bool serial_drain) +static dispatch_queue_wakeup_target_t +_dispatch_queue_drain(dispatch_queue_t dq, dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags, uint64_t *owned_ptr, bool serial_drain) { dispatch_queue_t orig_tq = dq->do_targetq; dispatch_thread_frame_s dtf; struct dispatch_object_s *dc = NULL, *next_dc; - uint64_t owned = *owned_ptr; + uint64_t dq_state, owned = *owned_ptr; + + if (unlikely(!dq->dq_items_tail)) return NULL; _dispatch_thread_frame_push(&dtf, dq); - if (_dq_state_is_in_barrier(owned)) { + if (serial_drain || _dq_state_is_in_barrier(owned)) { // we really own `IN_BARRIER + dq->dq_width * WIDTH_INTERVAL` // but width can change while draining barrier work items, so we only // convert to `dq->dq_width * WIDTH_INTERVAL` when we drop `IN_BARRIER` owned = DISPATCH_QUEUE_IN_BARRIER; + } else { + owned &= DISPATCH_QUEUE_WIDTH_MASK; } - while (dq->dq_items_tail) { - dc = _dispatch_queue_head(dq); - do { - if (unlikely(DISPATCH_QUEUE_IS_SUSPENDED(dq))) { - goto out; + dc = _dispatch_queue_head(dq); + goto first_iteration; + + for (;;) { + dc = next_dc; + if (unlikely(dic->dic_deferred)) { + goto out_with_deferred_compute_owned; + } + if (unlikely(_dispatch_needs_to_return_to_kernel())) { + _dispatch_return_to_kernel(); + } + if (unlikely(!dc)) { + if (!dq->dq_items_tail) { + break; } - if (unlikely(orig_tq != dq->do_targetq)) { - goto out; + dc = _dispatch_queue_head(dq); + } + if (unlikely(serial_drain != (dq->dq_width == 1))) { + break; + } + if (unlikely(_dispatch_queue_drain_should_narrow(dic))) { + break; + } + +first_iteration: + dq_state = os_atomic_load(&dq->dq_state, relaxed); + if (unlikely(_dq_state_is_suspended(dq_state))) { + break; + } + if (unlikely(orig_tq != dq->do_targetq)) { + break; + } + + if (serial_drain || _dispatch_object_is_barrier(dc)) { + if (!serial_drain && owned != DISPATCH_QUEUE_IN_BARRIER) { + if (!_dispatch_queue_try_upgrade_full_width(dq, owned)) { + goto out_with_no_width; + } + owned = DISPATCH_QUEUE_IN_BARRIER; } - if (unlikely(serial_drain != (dq->dq_width == 1))) { - goto out; + next_dc = _dispatch_queue_next(dq, dc); + if (_dispatch_object_is_sync_waiter(dc)) { + owned = 0; + dic->dic_deferred = dc; + goto out_with_deferred; } - if (serial_drain || _dispatch_object_is_barrier(dc)) { - if (!serial_drain && owned != DISPATCH_QUEUE_IN_BARRIER) { - goto out; - } - next_dc = _dispatch_queue_next(dq, dc); - if (_dispatch_object_is_slow_item(dc)) { - owned = 0; - goto out_with_deferred; - } - } else { - if (owned == DISPATCH_QUEUE_IN_BARRIER) { - // we just ran barrier work items, we have to make their - // effect visible to other sync work items on other threads - // that may start coming in after this point, hence the - // release barrier - os_atomic_and2o(dq, dq_state, ~owned, release); - owned = dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; - } else if (unlikely(owned == 0)) { - if (_dispatch_object_is_slow_item(dc)) { - // sync "readers" don't observe the limit - _dispatch_queue_reserve_sync_width(dq); - } else if (!_dispatch_queue_try_acquire_async(dq)) { - goto out_with_no_width; - } - owned = DISPATCH_QUEUE_WIDTH_INTERVAL; - } - - next_dc = _dispatch_queue_next(dq, dc); - if (_dispatch_object_is_slow_item(dc)) { - owned -= DISPATCH_QUEUE_WIDTH_INTERVAL; - _dispatch_continuation_slow_item_signal(dq, dc); - continue; + } else { + if (owned == DISPATCH_QUEUE_IN_BARRIER) { + // we just ran barrier work items, we have to make their + // effect visible to other sync work items on other threads + // that may start coming in after this point, hence the + // release barrier + os_atomic_xor2o(dq, dq_state, owned, release); + owned = dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; + } else if (unlikely(owned == 0)) { + if (_dispatch_object_is_sync_waiter(dc)) { + // sync "readers" don't observe the limit + _dispatch_queue_reserve_sync_width(dq); + } else if (!_dispatch_queue_try_acquire_async(dq)) { + goto out_with_no_width; } + owned = DISPATCH_QUEUE_WIDTH_INTERVAL; + } - if (flags & DISPATCH_INVOKE_REDIRECTING_DRAIN) { - owned -= DISPATCH_QUEUE_WIDTH_INTERVAL; - _dispatch_continuation_redirect(dq, dc); - continue; - } + next_dc = _dispatch_queue_next(dq, dc); + if (_dispatch_object_is_sync_waiter(dc)) { + owned -= DISPATCH_QUEUE_WIDTH_INTERVAL; + _dispatch_sync_waiter_redirect_or_wake(dq, + DISPATCH_SYNC_WAITER_NO_UNLOCK, dc); + continue; } - _dispatch_continuation_pop_inline(dc, dq, flags); - _dispatch_perfmon_workitem_inc(); - if (unlikely(dtf.dtf_deferred)) { - goto out_with_deferred_compute_owned; + if (flags & DISPATCH_INVOKE_REDIRECTING_DRAIN) { + owned -= DISPATCH_QUEUE_WIDTH_INTERVAL; + _dispatch_continuation_redirect(dq, dc); + continue; } - } while ((dc = next_dc)); + } + + _dispatch_continuation_pop_inline(dc, dic, flags, dq); } -out: if (owned == DISPATCH_QUEUE_IN_BARRIER) { // if we're IN_BARRIER we really own the full width too owned += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; @@ -4491,14 +4914,15 @@ _dispatch_queue_drain(dispatch_queue_t dq, dispatch_invoke_flags_t flags, if (dc) { owned = _dispatch_queue_adjust_owned(dq, owned, dc); } - *owned_ptr = owned; + *owned_ptr &= DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_ENQUEUED_ON_MGR; + *owned_ptr |= owned; _dispatch_thread_frame_pop(&dtf); return dc ? dq->do_targetq : NULL; out_with_no_width: - *owned_ptr = 0; + *owned_ptr &= DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_ENQUEUED_ON_MGR; _dispatch_thread_frame_pop(&dtf); - return NULL; + return DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT; out_with_deferred_compute_owned: if (serial_drain) { @@ -4508,41 +4932,75 @@ _dispatch_queue_drain(dispatch_queue_t dq, dispatch_invoke_flags_t flags, // if we're IN_BARRIER we really own the full width too owned += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; } - if (next_dc) { - owned = _dispatch_queue_adjust_owned(dq, owned, next_dc); + if (dc) { + owned = _dispatch_queue_adjust_owned(dq, owned, dc); } } out_with_deferred: - *owned_ptr = owned; - if (unlikely(!dc_out)) { + *owned_ptr &= DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_ENQUEUED_ON_MGR; + *owned_ptr |= owned; + if (unlikely(flags & DISPATCH_INVOKE_DISALLOW_SYNC_WAITERS)) { DISPATCH_INTERNAL_CRASH(dc, "Deferred continuation on source, mach channel or mgr"); } - *dc_out = dc; _dispatch_thread_frame_pop(&dtf); return dq->do_targetq; } DISPATCH_NOINLINE -static dispatch_queue_t +static dispatch_queue_wakeup_target_t _dispatch_queue_concurrent_drain(dispatch_queue_t dq, - dispatch_invoke_flags_t flags, uint64_t *owned, - struct dispatch_object_s **dc_ptr) + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, + uint64_t *owned) { - return _dispatch_queue_drain(dq, flags, owned, dc_ptr, false); + return _dispatch_queue_drain(dq, dic, flags, owned, false); } DISPATCH_NOINLINE -dispatch_queue_t -_dispatch_queue_serial_drain(dispatch_queue_t dq, - dispatch_invoke_flags_t flags, uint64_t *owned, - struct dispatch_object_s **dc_ptr) +dispatch_queue_wakeup_target_t +_dispatch_queue_serial_drain(dispatch_queue_t dq, dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags, uint64_t *owned) { flags &= ~(dispatch_invoke_flags_t)DISPATCH_INVOKE_REDIRECTING_DRAIN; - return _dispatch_queue_drain(dq, flags, owned, dc_ptr, true); + return _dispatch_queue_drain(dq, dic, flags, owned, true); } #if DISPATCH_COCOA_COMPAT +DISPATCH_NOINLINE +static void +_dispatch_main_queue_update_priority_from_thread(void) +{ + dispatch_queue_t dq = &_dispatch_main_q; + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + mach_port_t owner = _dq_state_drain_owner(dq_state); + + dispatch_priority_t main_pri = + _dispatch_priority_from_pp_strip_flags(_dispatch_get_priority()); + dispatch_qos_t main_qos = _dispatch_priority_qos(main_pri); + dispatch_qos_t max_qos = _dq_state_max_qos(dq_state); + dispatch_qos_t old_qos = _dispatch_priority_qos(dq->dq_priority); + + // the main thread QoS was adjusted by someone else, learn the new QoS + // and reinitialize _dispatch_main_q.dq_priority + dq->dq_priority = _dispatch_priority_with_override_qos(main_pri, main_qos); + + if (old_qos < max_qos && main_qos == DISPATCH_QOS_UNSPECIFIED) { + // main thread is opted out of QoS and we had an override + return _dispatch_thread_override_end(owner, dq); + } + + if (old_qos < max_qos && max_qos <= main_qos) { + // main QoS was raised, and we had an override which is now useless + return _dispatch_thread_override_end(owner, dq); + } + + if (main_qos < max_qos && max_qos <= old_qos) { + // main thread QoS was lowered, and we actually need an override + pthread_priority_t pp = _dispatch_qos_to_pp(max_qos); + return _dispatch_thread_override_start(owner, pp, dq); + } +} + static void _dispatch_main_queue_drain(void) { @@ -4553,45 +5011,54 @@ _dispatch_main_queue_drain(void) return; } + _dispatch_perfmon_start_notrace(); if (!fastpath(_dispatch_queue_is_thread_bound(dq))) { DISPATCH_CLIENT_CRASH(0, "_dispatch_main_queue_callback_4CF called" " after dispatch_main()"); } - mach_port_t owner = DISPATCH_QUEUE_DRAIN_OWNER(dq); - if (slowpath(owner != _dispatch_tid_self())) { - DISPATCH_CLIENT_CRASH(owner, "_dispatch_main_queue_callback_4CF called" + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + if (unlikely(!_dq_state_drain_locked_by_self(dq_state))) { + DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, + "_dispatch_main_queue_callback_4CF called" " from the wrong thread"); } dispatch_once_f(&_dispatch_main_q_handle_pred, dq, _dispatch_runloop_queue_handle_init); - _dispatch_perfmon_start(); // hide the frame chaining when CFRunLoop // drains the main runloop, as this should not be observable that way + _dispatch_adopt_wlh_anon(); _dispatch_thread_frame_push_and_rebase(&dtf, dq, NULL); - pthread_priority_t old_pri = _dispatch_get_priority(); - pthread_priority_t old_dp = _dispatch_set_defaultpriority(old_pri, NULL); + pthread_priority_t pp = _dispatch_get_priority(); + dispatch_priority_t pri = _dispatch_priority_from_pp(pp); + dispatch_qos_t qos = _dispatch_priority_qos(pri); voucher_t voucher = _voucher_copy(); + if (unlikely(qos != _dispatch_priority_qos(dq->dq_priority))) { + _dispatch_main_queue_update_priority_from_thread(); + } + dispatch_priority_t old_dbp = _dispatch_set_basepri(pri); + _dispatch_set_basepri_override_qos(DISPATCH_QOS_SATURATED); + + dispatch_invoke_context_s dic = { }; struct dispatch_object_s *dc, *next_dc, *tail; dc = os_mpsc_capture_snapshot(dq, dq_items, &tail); do { next_dc = os_mpsc_pop_snapshot_head(dc, tail, do_next); - _dispatch_continuation_pop_inline(dc, dq, DISPATCH_INVOKE_NONE); - _dispatch_perfmon_workitem_inc(); + _dispatch_continuation_pop_inline(dc, &dic, DISPATCH_INVOKE_NONE, dq); } while ((dc = next_dc)); - // runloop based queues use their port for the queue PUBLISH pattern - // so this raw call to dx_wakeup(0) is valid dx_wakeup(dq, 0, 0); _dispatch_voucher_debug("main queue restore", voucher); - _dispatch_reset_defaultpriority(old_dp); - _dispatch_reset_priority_and_voucher(old_pri, voucher); + _dispatch_reset_basepri(old_dbp); + _dispatch_reset_basepri_override(); + _dispatch_reset_priority_and_voucher(pp, voucher); _dispatch_thread_frame_pop(&dtf); - _dispatch_perfmon_end(); + _dispatch_reset_wlh(); _dispatch_force_cache_cleanup(); + _dispatch_perfmon_end_notrace(); } static bool @@ -4600,119 +5067,56 @@ _dispatch_runloop_queue_drain_one(dispatch_queue_t dq) if (!dq->dq_items_tail) { return false; } + _dispatch_perfmon_start_notrace(); dispatch_thread_frame_s dtf; - _dispatch_perfmon_start(); + bool should_reset_wlh = _dispatch_adopt_wlh_anon_recurse(); _dispatch_thread_frame_push(&dtf, dq); - pthread_priority_t old_pri = _dispatch_get_priority(); - pthread_priority_t old_dp = _dispatch_set_defaultpriority(old_pri, NULL); + pthread_priority_t pp = _dispatch_get_priority(); + dispatch_priority_t pri = _dispatch_priority_from_pp(pp); voucher_t voucher = _voucher_copy(); + dispatch_priority_t old_dbp = _dispatch_set_basepri(pri); + _dispatch_set_basepri_override_qos(DISPATCH_QOS_SATURATED); + dispatch_invoke_context_s dic = { }; struct dispatch_object_s *dc, *next_dc; dc = _dispatch_queue_head(dq); next_dc = _dispatch_queue_next(dq, dc); - _dispatch_continuation_pop_inline(dc, dq, DISPATCH_INVOKE_NONE); - _dispatch_perfmon_workitem_inc(); + _dispatch_continuation_pop_inline(dc, &dic, DISPATCH_INVOKE_NONE, dq); if (!next_dc) { - // runloop based queues use their port for the queue PUBLISH pattern - // so this raw call to dx_wakeup(0) is valid dx_wakeup(dq, 0, 0); } _dispatch_voucher_debug("runloop queue restore", voucher); - _dispatch_reset_defaultpriority(old_dp); - _dispatch_reset_priority_and_voucher(old_pri, voucher); + _dispatch_reset_basepri(old_dbp); + _dispatch_reset_basepri_override(); + _dispatch_reset_priority_and_voucher(pp, voucher); _dispatch_thread_frame_pop(&dtf); - _dispatch_perfmon_end(); + if (should_reset_wlh) _dispatch_reset_wlh(); _dispatch_force_cache_cleanup(); + _dispatch_perfmon_end_notrace(); return next_dc; } #endif -DISPATCH_NOINLINE -void -_dispatch_try_lock_transfer_or_wakeup(dispatch_queue_t dq) -{ - dispatch_continuation_t dc_tmp, dc_start, dc_end; - struct dispatch_object_s *dc = NULL; - uint64_t dq_state, owned; - size_t count = 0; - - owned = DISPATCH_QUEUE_IN_BARRIER; - owned += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; -attempt_running_slow_head: - if (slowpath(dq->dq_items_tail) && !DISPATCH_QUEUE_IS_SUSPENDED(dq)) { - dc = _dispatch_queue_head(dq); - if (!_dispatch_object_is_slow_item(dc)) { - // not a slow item, needs to wake up - } else if (fastpath(dq->dq_width == 1) || - _dispatch_object_is_barrier(dc)) { - // rdar://problem/8290662 "barrier/writer lock transfer" - dc_start = dc_end = (dispatch_continuation_t)dc; - owned = 0; - count = 1; - dc = _dispatch_queue_next(dq, dc); - } else { - // "reader lock transfer" - // we must not signal semaphores immediately because our right - // for dequeuing is granted through holding the full "barrier" width - // which a signaled work item could relinquish out from our feet - dc_start = (dispatch_continuation_t)dc; - do { - // no check on width here because concurrent queues - // do not respect width for blocked readers, the thread - // is already spent anyway - dc_end = (dispatch_continuation_t)dc; - owned -= DISPATCH_QUEUE_WIDTH_INTERVAL; - count++; - dc = _dispatch_queue_next(dq, dc); - } while (dc && _dispatch_object_is_slow_non_barrier(dc)); - } - - if (count) { - _dispatch_queue_drain_transfer_lock(dq, owned, dc_start); - do { - // signaled job will release the continuation - dc_tmp = dc_start; - dc_start = dc_start->do_next; - _dispatch_continuation_slow_item_signal(dq, dc_tmp); - } while (dc_tmp != dc_end); - return; - } - } - - if (dc || dx_metatype(dq) != _DISPATCH_QUEUE_TYPE) { - // the following wakeup is needed for sources - // or mach channels: when ds_pending_data is set at the same time - // as a trysync_f happens, lock transfer code above doesn't know about - // ds_pending_data or the wakeup logic, but lock transfer is useless - // for sources and mach channels in the first place. - owned = _dispatch_queue_adjust_owned(dq, owned, dc); - dq_state = _dispatch_queue_drain_unlock(dq, owned, NULL); - return _dispatch_queue_try_wakeup(dq, dq_state, 0); - } else if (!fastpath(_dispatch_queue_drain_try_unlock(dq, owned))) { - // someone enqueued a slow item at the head - // looping may be its last chance - goto attempt_running_slow_head; - } -} - void _dispatch_mgr_queue_drain(void) { const dispatch_invoke_flags_t flags = DISPATCH_INVOKE_MANAGER_DRAIN; + dispatch_invoke_context_s dic = { }; dispatch_queue_t dq = &_dispatch_mgr_q; uint64_t owned = DISPATCH_QUEUE_SERIAL_DRAIN_OWNED; if (dq->dq_items_tail) { _dispatch_perfmon_start(); - if (slowpath(_dispatch_queue_serial_drain(dq, flags, &owned, NULL))) { + _dispatch_set_basepri_override_qos(DISPATCH_QOS_SATURATED); + if (slowpath(_dispatch_queue_serial_drain(dq, &dic, flags, &owned))) { DISPATCH_INTERNAL_CRASH(0, "Interrupted drain on manager queue"); } _dispatch_voucher_debug("mgr queue clear", NULL); _voucher_clear(); - _dispatch_reset_defaultpriority_override(); - _dispatch_perfmon_end(); + _dispatch_reset_basepri_override(); + _dispatch_perfmon_end(perfmon_thread_manager); } #if DISPATCH_USE_KEVENT_WORKQUEUE @@ -4727,102 +5131,43 @@ _dispatch_mgr_queue_drain(void) #pragma mark dispatch_queue_invoke void -_dispatch_queue_drain_deferred_invoke(dispatch_queue_t dq, - dispatch_invoke_flags_t flags, uint64_t to_unlock, - struct dispatch_object_s *dc) -{ - if (_dispatch_object_is_slow_item(dc)) { - dispatch_assert(to_unlock == 0); - _dispatch_queue_drain_transfer_lock(dq, to_unlock, dc); - _dispatch_continuation_slow_item_signal(dq, dc); - return _dispatch_release_tailcall(dq); - } - - bool should_defer_again = false, should_pend_queue = true; - uint64_t old_state, new_state; - - if (_dispatch_get_current_queue()->do_targetq) { - _dispatch_thread_frame_get_current()->dtf_deferred = dc; - should_defer_again = true; - should_pend_queue = false; - } - - if (dq->dq_width > 1) { - should_pend_queue = false; - } else if (should_pend_queue) { - dispatch_assert(to_unlock == - DISPATCH_QUEUE_WIDTH_INTERVAL + DISPATCH_QUEUE_IN_BARRIER); - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release,{ - new_state = old_state; - if (_dq_state_has_waiters(old_state) || - _dq_state_is_enqueued(old_state)) { - os_atomic_rmw_loop_give_up(break); - } - new_state += DISPATCH_QUEUE_DRAIN_PENDED; - new_state -= DISPATCH_QUEUE_IN_BARRIER; - new_state -= DISPATCH_QUEUE_WIDTH_INTERVAL; - }); - should_pend_queue = (new_state & DISPATCH_QUEUE_DRAIN_PENDED); - } - - if (!should_pend_queue) { - if (to_unlock & DISPATCH_QUEUE_IN_BARRIER) { - _dispatch_try_lock_transfer_or_wakeup(dq); - _dispatch_release(dq); - } else if (to_unlock) { - uint64_t dq_state = _dispatch_queue_drain_unlock(dq, to_unlock, NULL); - _dispatch_queue_try_wakeup(dq, dq_state, DISPATCH_WAKEUP_CONSUME); - } else { - _dispatch_release(dq); - } - dq = NULL; - } - - if (!should_defer_again) { - dx_invoke(dc, flags & _DISPATCH_INVOKE_PROPAGATE_MASK); - } - - if (dq) { - uint32_t self = _dispatch_tid_self(); - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release,{ - new_state = old_state; - if (!_dq_state_drain_pended(old_state) || - _dq_state_drain_owner(old_state) != self) { - os_atomic_rmw_loop_give_up({ - // We may have been overridden, so inform the root queue - _dispatch_set_defaultpriority_override(); - return _dispatch_release_tailcall(dq); - }); - } - new_state = DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(new_state); - }); - if (_dq_state_has_override(old_state)) { - // Ensure that the root queue sees that this thread was overridden. - _dispatch_set_defaultpriority_override(); - } - return dx_invoke(dq, flags | DISPATCH_INVOKE_STEALING); +_dispatch_queue_drain_sync_waiter(dispatch_queue_t dq, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, + uint64_t owned) +{ + struct dispatch_object_s *dc = dic->dic_deferred; + dispatch_assert(_dispatch_object_is_sync_waiter(dc)); + dic->dic_deferred = NULL; + if (flags & DISPATCH_INVOKE_WLH) { + // Leave the enqueued bit in place, completion of the last sync waiter + // in the handoff chain is responsible for dequeuing + // + // We currently have a +2 to consume, but we need to keep a +1 + // for the thread request + dispatch_assert(_dq_state_is_enqueued_on_target(owned)); + dispatch_assert(!_dq_state_is_enqueued_on_manager(owned)); + owned &= ~DISPATCH_QUEUE_ENQUEUED; + _dispatch_release_no_dispose(dq); + } else { + // The sync waiter must own a reference + _dispatch_release_2_no_dispose(dq); } + return _dispatch_sync_waiter_redirect_or_wake(dq, owned, dc); } void -_dispatch_queue_finalize_activation(dispatch_queue_t dq) +_dispatch_queue_finalize_activation(dispatch_queue_t dq, + DISPATCH_UNUSED bool *allow_resume) { dispatch_queue_t tq = dq->do_targetq; _dispatch_queue_priority_inherit_from_target(dq, tq); - _dispatch_queue_atomic_flags_set(tq, DQF_TARGETED); - if (dq->dq_override_voucher == DISPATCH_NO_VOUCHER) { - voucher_t v = tq->dq_override_voucher; - if (v != DISPATCH_NO_VOUCHER) { - if (v) _voucher_retain(v); - dq->dq_override_voucher = v; - } - } + _dispatch_queue_inherit_wlh_from_target(dq, tq); } DISPATCH_ALWAYS_INLINE -static inline dispatch_queue_t -dispatch_queue_invoke2(dispatch_queue_t dq, dispatch_invoke_flags_t flags, - uint64_t *owned, struct dispatch_object_s **dc_ptr) +static inline dispatch_queue_wakeup_target_t +dispatch_queue_invoke2(dispatch_queue_t dq, dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags, uint64_t *owned) { dispatch_queue_t otq = dq->do_targetq; dispatch_queue_t cq = _dispatch_queue_get_current(); @@ -4831,18 +5176,19 @@ dispatch_queue_invoke2(dispatch_queue_t dq, dispatch_invoke_flags_t flags, return otq; } if (dq->dq_width == 1) { - return _dispatch_queue_serial_drain(dq, flags, owned, dc_ptr); + return _dispatch_queue_serial_drain(dq, dic, flags, owned); } - return _dispatch_queue_concurrent_drain(dq, flags, owned, dc_ptr); + return _dispatch_queue_concurrent_drain(dq, dic, flags, owned); } // 6618342 Contact the team that owns the Instrument DTrace probe before // renaming this symbol DISPATCH_NOINLINE void -_dispatch_queue_invoke(dispatch_queue_t dq, dispatch_invoke_flags_t flags) +_dispatch_queue_invoke(dispatch_queue_t dq, dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags) { - _dispatch_queue_class_invoke(dq, flags, dispatch_queue_invoke2); + _dispatch_queue_class_invoke(dq, dic, flags, 0, dispatch_queue_invoke2); } #pragma mark - @@ -4851,16 +5197,16 @@ _dispatch_queue_invoke(dispatch_queue_t dq, dispatch_invoke_flags_t flags) #if HAVE_PTHREAD_WORKQUEUE_QOS void _dispatch_queue_override_invoke(dispatch_continuation_t dc, - dispatch_invoke_flags_t flags) + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags) { dispatch_queue_t old_rq = _dispatch_queue_get_current(); dispatch_queue_t assumed_rq = dc->dc_other; + dispatch_priority_t old_dp; voucher_t ov = DISPATCH_NO_VOUCHER; dispatch_object_t dou; dou._do = dc->dc_data; - _dispatch_queue_set_current(assumed_rq); - flags |= DISPATCH_INVOKE_OVERRIDING; + old_dp = _dispatch_root_queue_identity_assume(assumed_rq); if (dc_type(dc) == DISPATCH_CONTINUATION_TYPE(OVERRIDE_STEALING)) { flags |= DISPATCH_INVOKE_STEALING; } else { @@ -4870,49 +5216,44 @@ _dispatch_queue_override_invoke(dispatch_continuation_t dc, } _dispatch_continuation_pop_forwarded(dc, ov, DISPATCH_OBJ_CONSUME_BIT, { if (_dispatch_object_has_vtable(dou._do)) { - dx_invoke(dou._do, flags); + dx_invoke(dou._do, dic, flags); } else { _dispatch_continuation_invoke_inline(dou, ov, flags); } }); + _dispatch_reset_basepri(old_dp); _dispatch_queue_set_current(old_rq); } DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_need_global_root_queue_push_override(dispatch_queue_t rq, - pthread_priority_t pp) +_dispatch_root_queue_push_needs_override(dispatch_queue_t rq, + dispatch_qos_t qos) { - pthread_priority_t rqp = rq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK; - bool defaultqueue = rq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG; + dispatch_qos_t rqos = _dispatch_priority_qos(rq->dq_priority); + bool defaultqueue = rq->dq_priority & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE; - if (unlikely(!rqp)) return false; + if (unlikely(!rqos)) return false; - pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; - return defaultqueue ? pp && pp != rqp : pp > rqp; + return defaultqueue ? qos && qos != rqos : qos > rqos; } DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_need_global_root_queue_push_override_stealer(dispatch_queue_t rq, - pthread_priority_t pp) +_dispatch_root_queue_push_queue_override_needed(dispatch_queue_t rq, + dispatch_qos_t qos) { - pthread_priority_t rqp = rq->dq_priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK; - bool defaultqueue = rq->dq_priority & _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG; - - if (unlikely(!rqp)) return false; - - pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; - return defaultqueue || pp > rqp; + // for root queues, the override is the guaranteed minimum override level + return qos > _dispatch_priority_override_qos(rq->dq_priority); } DISPATCH_NOINLINE static void _dispatch_root_queue_push_override(dispatch_queue_t orig_rq, - dispatch_object_t dou, pthread_priority_t pp) + dispatch_object_t dou, dispatch_qos_t qos) { - bool overcommit = orig_rq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; - dispatch_queue_t rq = _dispatch_get_root_queue_for_priority(pp, overcommit); + bool overcommit = orig_rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT; + dispatch_queue_t rq = _dispatch_get_root_queue(qos, overcommit); dispatch_continuation_t dc = dou._dc; if (_dispatch_object_is_redirection(dc)) { @@ -4930,69 +5271,57 @@ _dispatch_root_queue_push_override(dispatch_queue_t orig_rq, dc->dc_priority = DISPATCH_NO_PRIORITY; dc->dc_voucher = DISPATCH_NO_VOUCHER; } - - DISPATCH_COMPILER_CAN_ASSUME(dx_type(rq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE); - _dispatch_queue_push_inline(rq, dc, 0, 0); + _dispatch_root_queue_push_inline(rq, dc, dc, 1); } DISPATCH_NOINLINE static void _dispatch_root_queue_push_override_stealer(dispatch_queue_t orig_rq, - dispatch_queue_t dq, pthread_priority_t pp) + dispatch_queue_t dq, dispatch_qos_t qos) { - bool overcommit = orig_rq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; - dispatch_queue_t rq = _dispatch_get_root_queue_for_priority(pp, overcommit); + bool overcommit = orig_rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT; + dispatch_queue_t rq = _dispatch_get_root_queue(qos, overcommit); dispatch_continuation_t dc = _dispatch_continuation_alloc(); dc->do_vtable = DC_VTABLE(OVERRIDE_STEALING); - _dispatch_retain(dq); + _dispatch_retain_2(dq); dc->dc_func = NULL; dc->dc_ctxt = dc; dc->dc_other = orig_rq; dc->dc_data = dq; dc->dc_priority = DISPATCH_NO_PRIORITY; dc->dc_voucher = DISPATCH_NO_VOUCHER; - - DISPATCH_COMPILER_CAN_ASSUME(dx_type(rq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE); - _dispatch_queue_push_inline(rq, dc, 0, 0); + _dispatch_root_queue_push_inline(rq, dc, dc, 1); } DISPATCH_NOINLINE static void -_dispatch_queue_class_wakeup_with_override(dispatch_queue_t dq, - pthread_priority_t pp, dispatch_wakeup_flags_t flags, uint64_t dq_state) +_dispatch_queue_class_wakeup_with_override_slow(dispatch_queue_t dq, + uint64_t dq_state, dispatch_wakeup_flags_t flags) { - mach_port_t owner = _dq_state_drain_owner(dq_state); - pthread_priority_t pp2; + dispatch_qos_t oqos, qos = _dq_state_max_qos(dq_state); dispatch_queue_t tq; bool locked; - if (owner) { - int rc = _dispatch_wqthread_override_start_check_owner(owner, pp, + if (_dq_state_is_base_anon(dq_state)) { + mach_port_t owner = _dq_state_drain_owner(dq_state); + if (owner) { + (void)_dispatch_wqthread_override_start_check_owner(owner, qos, &dq->dq_state_lock); - // EPERM means the target of the override is not a work queue thread - // and could be a thread bound queue such as the main queue. - // When that happens we must get to that queue and wake it up if we - // want the override to be appplied and take effect. - if (rc != EPERM) { goto out; } } - if (_dq_state_is_suspended(dq_state)) { - goto out; - } - tq = dq->do_targetq; - if (_dispatch_queue_has_immutable_target(dq)) { + if (likely(!_dispatch_queue_is_legacy(dq))) { locked = false; } else if (_dispatch_is_in_root_queues_array(tq)) { // avoid locking when we recognize the target queue as a global root // queue it is gross, but is a very common case. The locking isn't // needed because these target queues cannot go away. locked = false; - } else if (_dispatch_queue_sidelock_trylock(dq, pp)) { + } else if (_dispatch_queue_sidelock_trylock(dq, qos)) { // to traverse the tq chain safely we must // lock it to ensure it cannot change locked = true; @@ -5002,10 +5331,9 @@ _dispatch_queue_class_wakeup_with_override(dispatch_queue_t dq, // // Leading to being there, the current thread has: // 1. enqueued an object on `dq` - // 2. raised the dq_override value of `dq` - // 3. set the HAS_OVERRIDE bit and not seen an owner - // 4. tried and failed to acquire the side lock - // + // 2. raised the max_qos value, set RECEIVED_OVERRIDE on `dq` + // and didn't see an owner + // 3. tried and failed to acquire the side lock // // The side lock owner can only be one of three things: // @@ -5015,20 +5343,19 @@ _dispatch_queue_class_wakeup_with_override(dispatch_queue_t dq, // the eventual dispatch_resume(). // // - A dispatch_set_target_queue() call. The fact that we saw no `owner` - // means that the trysync it does wasn't being drained when (3) + // means that the trysync it does wasn't being drained when (2) // happened which can only be explained by one of these interleavings: // // o `dq` became idle between when the object queued in (1) ran and // the set_target_queue call and we were unlucky enough that our - // step (3) happened while this queue was idle. There is no reason + // step (2) happened while this queue was idle. There is no reason // to override anything anymore, the queue drained to completion // while we were preempted, our job is done. // - // o `dq` is queued but not draining during (1-3), then when we try - // to lock at (4) the queue is now draining a set_target_queue. - // Since we set HAS_OVERRIDE with a release barrier, the effect of - // (2) was visible to the drainer when he acquired the drain lock, - // and that guy has applied our override. Our job is done. + // o `dq` is queued but not draining during (1-2), then when we try + // to lock at (3) the queue is now draining a set_target_queue. + // This drainer must have seen the effects of (2) and that guy has + // applied our override. Our job is done. // // - Another instance of _dispatch_queue_class_wakeup_with_override(), // which is fine because trylock leaves a hint that we failed our @@ -5040,12 +5367,12 @@ _dispatch_queue_class_wakeup_with_override(dispatch_queue_t dq, } apply_again: - if (dx_type(tq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE) { - if (_dispatch_need_global_root_queue_push_override_stealer(tq, pp)) { - _dispatch_root_queue_push_override_stealer(tq, dq, pp); + if (dx_hastypeflag(tq, QUEUE_ROOT)) { + if (_dispatch_root_queue_push_queue_override_needed(tq, qos)) { + _dispatch_root_queue_push_override_stealer(tq, dq, qos); } - } else if (_dispatch_queue_need_override(tq, pp)) { - dx_wakeup(tq, pp, DISPATCH_WAKEUP_OVERRIDING); + } else if (_dispatch_queue_need_override(tq, qos)) { + dx_wakeup(tq, qos, 0); } while (unlikely(locked && !_dispatch_queue_sidelock_tryunlock(dq))) { // rdar://problem/24081326 @@ -5054,9 +5381,9 @@ _dispatch_queue_class_wakeup_with_override(dispatch_queue_t dq, // tried to acquire the side lock while we were running, and could have // had a better override than ours to apply. // - pp2 = dq->dq_override; - if (pp2 > pp) { - pp = pp2; + oqos = _dq_state_max_qos(os_atomic_load2o(dq, dq_state, relaxed)); + if (oqos > qos) { + qos = oqos; // The other instance had a better priority than ours, override // our thread, and apply the override that wasn't applied to `dq` // because of us. @@ -5065,264 +5392,262 @@ _dispatch_queue_class_wakeup_with_override(dispatch_queue_t dq, } out: - if (flags & DISPATCH_WAKEUP_CONSUME) { - return _dispatch_release_tailcall(dq); + if (flags & DISPATCH_WAKEUP_CONSUME_2) { + return _dispatch_release_2_tailcall(dq); } } + + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_class_wakeup_with_override(dispatch_queue_t dq, + uint64_t dq_state, dispatch_wakeup_flags_t flags) +{ + dispatch_assert(_dq_state_should_override(dq_state)); + + return _dispatch_queue_class_wakeup_with_override_slow(dq, dq_state, flags); +} #endif // HAVE_PTHREAD_WORKQUEUE_QOS DISPATCH_NOINLINE void -_dispatch_queue_class_override_drainer(dispatch_queue_t dq, - pthread_priority_t pp, dispatch_wakeup_flags_t flags) +_dispatch_root_queue_push(dispatch_queue_t rq, dispatch_object_t dou, + dispatch_qos_t qos) { -#if HAVE_PTHREAD_WORKQUEUE_QOS - uint64_t dq_state, value; - - // - // Someone is trying to override the last work item of the queue. - // Do not remember this override on the queue because we know the precise - // duration the override is required for: until the current drain unlocks. - // - // That is why this function only tries to set HAS_OVERRIDE if we can - // still observe a drainer, and doesn't need to set the DIRTY bit - // because oq_override wasn't touched and there is no race to resolve - // - os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, { - if (!_dq_state_drain_locked(dq_state)) { - os_atomic_rmw_loop_give_up(break); +#if DISPATCH_USE_KEVENT_WORKQUEUE + dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); + if (unlikely(ddi && ddi->ddi_can_stash)) { + dispatch_object_t old_dou = ddi->ddi_stashed_dou; + dispatch_priority_t rq_overcommit; + rq_overcommit = rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT; + + if (likely(!old_dou._do || rq_overcommit)) { + dispatch_queue_t old_rq = ddi->ddi_stashed_rq; + dispatch_qos_t old_qos = ddi->ddi_stashed_qos; + ddi->ddi_stashed_rq = rq; + ddi->ddi_stashed_dou = dou; + ddi->ddi_stashed_qos = qos; + _dispatch_debug("deferring item %p, rq %p, qos %d", + dou._do, rq, qos); + if (rq_overcommit) { + ddi->ddi_can_stash = false; + } + if (likely(!old_dou._do)) { + return; + } + // push the previously stashed item + qos = old_qos; + rq = old_rq; + dou = old_dou; } - value = dq_state | DISPATCH_QUEUE_HAS_OVERRIDE; - }); - if (_dq_state_drain_locked(dq_state)) { - return _dispatch_queue_class_wakeup_with_override(dq, pp, - flags, dq_state); } -#else - (void)pp; -#endif // HAVE_PTHREAD_WORKQUEUE_QOS - if (flags & DISPATCH_WAKEUP_CONSUME) { - return _dispatch_release_tailcall(dq); +#endif +#if HAVE_PTHREAD_WORKQUEUE_QOS + if (_dispatch_root_queue_push_needs_override(rq, qos)) { + return _dispatch_root_queue_push_override(rq, dou, qos); } +#else + (void)qos; +#endif + _dispatch_root_queue_push_inline(rq, dou, dou, 1); } -#if DISPATCH_USE_KEVENT_WORKQUEUE -DISPATCH_NOINLINE -static void -_dispatch_trystash_to_deferred_items(dispatch_queue_t dq, dispatch_object_t dou, - pthread_priority_t pp, dispatch_deferred_items_t ddi) -{ - dispatch_priority_t old_pp = ddi->ddi_stashed_pp; - dispatch_queue_t old_dq = ddi->ddi_stashed_dq; - struct dispatch_object_s *old_dou = ddi->ddi_stashed_dou; - dispatch_priority_t rq_overcommit; - - rq_overcommit = dq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; - if (likely(!old_pp || rq_overcommit)) { - ddi->ddi_stashed_dq = dq; - ddi->ddi_stashed_dou = dou._do; - ddi->ddi_stashed_pp = (dispatch_priority_t)pp | rq_overcommit | - _PTHREAD_PRIORITY_PRIORITY_MASK; - if (likely(!old_pp)) { - return; - } - // push the previously stashed item - pp = old_pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK; - dq = old_dq; - dou._do = old_dou; +void +_dispatch_root_queue_wakeup(dispatch_queue_t dq, + DISPATCH_UNUSED dispatch_qos_t qos, dispatch_wakeup_flags_t flags) +{ + if (!(flags & DISPATCH_WAKEUP_BLOCK_WAIT)) { + DISPATCH_INTERNAL_CRASH(dq->dq_priority, + "Don't try to wake up or override a root queue"); } - if (_dispatch_need_global_root_queue_push_override(dq, pp)) { - return _dispatch_root_queue_push_override(dq, dou, pp); + if (flags & DISPATCH_WAKEUP_CONSUME_2) { + return _dispatch_release_2_tailcall(dq); } - // bit of cheating: we should really pass `pp` but we know that we are - // pushing onto a global queue at this point, and we just checked that - // `pp` doesn't matter. - DISPATCH_COMPILER_CAN_ASSUME(dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE); - _dispatch_queue_push_inline(dq, dou, 0, 0); } -#endif DISPATCH_NOINLINE -static void -_dispatch_queue_push_slow(dispatch_queue_t dq, dispatch_object_t dou, - pthread_priority_t pp) +void +_dispatch_queue_push(dispatch_queue_t dq, dispatch_object_t dou, + dispatch_qos_t qos) { - dispatch_once_f(&_dispatch_root_queues_pred, NULL, - _dispatch_root_queues_init_once); - _dispatch_queue_push(dq, dou, pp); + _dispatch_queue_push_inline(dq, dou, qos); } DISPATCH_NOINLINE void -_dispatch_queue_push(dispatch_queue_t dq, dispatch_object_t dou, - pthread_priority_t pp) +_dispatch_queue_class_wakeup(dispatch_queue_t dq, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target) { - _dispatch_assert_is_valid_qos_override(pp); - if (dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE) { -#if DISPATCH_USE_KEVENT_WORKQUEUE - dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); - if (unlikely(ddi && !(ddi->ddi_stashed_pp & - (dispatch_priority_t)_PTHREAD_PRIORITY_FLAGS_MASK))) { - dispatch_assert(_dispatch_root_queues_pred == DLOCK_ONCE_DONE); - return _dispatch_trystash_to_deferred_items(dq, dou, pp, ddi); + dispatch_assert(target != DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT); + + if (target && !(flags & DISPATCH_WAKEUP_CONSUME_2)) { + _dispatch_retain_2(dq); + flags |= DISPATCH_WAKEUP_CONSUME_2; + } + + if (unlikely(flags & DISPATCH_WAKEUP_BARRIER_COMPLETE)) { + // + // _dispatch_queue_class_barrier_complete() is about what both regular + // queues and sources needs to evaluate, but the former can have sync + // handoffs to perform which _dispatch_queue_class_barrier_complete() + // doesn't handle, only _dispatch_queue_barrier_complete() does. + // + // _dispatch_queue_wakeup() is the one for plain queues that calls + // _dispatch_queue_barrier_complete(), and this is only taken for non + // queue types. + // + dispatch_assert(dx_metatype(dq) != _DISPATCH_QUEUE_TYPE); + qos = _dispatch_queue_override_qos(dq, qos); + return _dispatch_queue_class_barrier_complete(dq, qos, flags, target, + DISPATCH_QUEUE_SERIAL_DRAIN_OWNED); + } + + if (target) { + uint64_t old_state, new_state, enqueue = DISPATCH_QUEUE_ENQUEUED; + if (target == DISPATCH_QUEUE_WAKEUP_MGR) { + enqueue = DISPATCH_QUEUE_ENQUEUED_ON_MGR; + } + qos = _dispatch_queue_override_qos(dq, qos); + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + new_state = _dq_state_merge_qos(old_state, qos); + if (likely(!_dq_state_is_suspended(old_state) && + !_dq_state_is_enqueued(old_state) && + (!_dq_state_drain_locked(old_state) || + (enqueue != DISPATCH_QUEUE_ENQUEUED_ON_MGR && + _dq_state_is_base_wlh(old_state))))) { + new_state |= enqueue; + } + if (flags & DISPATCH_WAKEUP_MAKE_DIRTY) { + new_state |= DISPATCH_QUEUE_DIRTY; + } else if (new_state == old_state) { + os_atomic_rmw_loop_give_up(goto done); + } + }); + + if (likely((old_state ^ new_state) & enqueue)) { + dispatch_queue_t tq; + if (target == DISPATCH_QUEUE_WAKEUP_TARGET) { + // the rmw_loop above has no acquire barrier, as the last block + // of a queue asyncing to that queue is not an uncommon pattern + // and in that case the acquire would be completely useless + // + // so instead use depdendency ordering to read + // the targetq pointer. + os_atomic_thread_fence(dependency); + tq = os_atomic_load_with_dependency_on2o(dq, do_targetq, + (long)new_state); + } else { + tq = target; + } + dispatch_assert(_dq_state_is_enqueued(new_state)); + return _dispatch_queue_push_queue(tq, dq, new_state); } -#endif #if HAVE_PTHREAD_WORKQUEUE_QOS - // can't use dispatch_once_f() as it would create a frame - if (unlikely(_dispatch_root_queues_pred != DLOCK_ONCE_DONE)) { - return _dispatch_queue_push_slow(dq, dou, pp); + if (unlikely((old_state ^ new_state) & DISPATCH_QUEUE_MAX_QOS_MASK)) { + if (_dq_state_should_override(new_state)) { + return _dispatch_queue_class_wakeup_with_override(dq, new_state, + flags); + } } - if (_dispatch_need_global_root_queue_push_override(dq, pp)) { - return _dispatch_root_queue_push_override(dq, dou, pp); + } else if (qos) { + // + // Someone is trying to override the last work item of the queue. + // + uint64_t old_state, new_state; + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + if (!_dq_state_drain_locked(old_state) || + !_dq_state_is_enqueued(old_state)) { + os_atomic_rmw_loop_give_up(goto done); + } + new_state = _dq_state_merge_qos(old_state, qos); + if (new_state == old_state) { + os_atomic_rmw_loop_give_up(goto done); + } + }); + if (_dq_state_should_override(new_state)) { + return _dispatch_queue_class_wakeup_with_override(dq, new_state, + flags); } -#endif +#endif // HAVE_PTHREAD_WORKQUEUE_QOS + } +done: + if (likely(flags & DISPATCH_WAKEUP_CONSUME_2)) { + return _dispatch_release_2_tailcall(dq); } - _dispatch_queue_push_inline(dq, dou, pp, 0); } DISPATCH_NOINLINE static void -_dispatch_queue_class_wakeup_enqueue(dispatch_queue_t dq, pthread_priority_t pp, - dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target) +_dispatch_queue_push_sync_waiter(dispatch_queue_t dq, + dispatch_sync_context_t dsc, dispatch_qos_t qos) { - dispatch_queue_t tq; + uint64_t old_state, new_state; - if (flags & (DISPATCH_WAKEUP_OVERRIDING | DISPATCH_WAKEUP_WAS_OVERRIDDEN)) { - // _dispatch_queue_drain_try_unlock may have reset the override while - // we were becoming the enqueuer - _dispatch_queue_reinstate_override_priority(dq, (dispatch_priority_t)pp); - } - if (!(flags & DISPATCH_WAKEUP_CONSUME)) { - _dispatch_retain(dq); - } - if (target == DISPATCH_QUEUE_WAKEUP_TARGET) { - // try_become_enqueuer has no acquire barrier, as the last block - // of a queue asyncing to that queue is not an uncommon pattern - // and in that case the acquire is completely useless - // - // so instead use a thread fence here when we will read the targetq - // pointer because that is the only thing that really requires - // that barrier. - os_atomic_thread_fence(acquire); - tq = dq->do_targetq; - } else { - dispatch_assert(target == DISPATCH_QUEUE_WAKEUP_MGR); - tq = &_dispatch_mgr_q; + if (unlikely(dx_type(dq) == DISPATCH_QUEUE_NETWORK_EVENT_TYPE)) { + DISPATCH_CLIENT_CRASH(0, + "dispatch_sync onto a network event queue"); } - return _dispatch_queue_push(tq, dq, pp); -} - -DISPATCH_NOINLINE -void -_dispatch_queue_class_wakeup(dispatch_queue_t dq, pthread_priority_t pp, - dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target) -{ - uint64_t old_state, new_state, bits = 0; -#if HAVE_PTHREAD_WORKQUEUE_QOS - _dispatch_queue_override_priority(dq, /* inout */ &pp, /* inout */ &flags); -#endif + _dispatch_trace_continuation_push(dq, dsc->_as_dc); - if (flags & DISPATCH_WAKEUP_FLUSH) { - bits = DISPATCH_QUEUE_DIRTY; - } - if (flags & DISPATCH_WAKEUP_OVERRIDING) { - // - // Setting the dirty bit here is about forcing callers of - // _dispatch_queue_drain_try_unlock() to loop again when an override - // has just been set to close the following race: - // - // Drainer (in drain_try_unlokc(): - // override_reset(); - // preempted.... - // - // Enqueuer: - // atomic_or(oq_override, override, relaxed); - // atomic_or(dq_state, HAS_OVERRIDE, release); - // - // Drainer: - // ... resumes - // successful drain_unlock() and leaks `oq_override` - // - bits = DISPATCH_QUEUE_DIRTY | DISPATCH_QUEUE_HAS_OVERRIDE; - } + if (unlikely(_dispatch_queue_push_update_tail(dq, dsc->_as_do))) { + // for slow waiters, we borrow the reference of the caller + // so we don't need to protect the wakeup with a temporary retain + _dispatch_queue_push_update_head(dq, dsc->_as_do); + if (unlikely(_dispatch_queue_is_thread_bound(dq))) { + return dx_wakeup(dq, qos, DISPATCH_WAKEUP_MAKE_DIRTY); + } - if (flags & DISPATCH_WAKEUP_SLOW_WAITER) { uint64_t pending_barrier_width = (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL; - uint64_t xor_owner_and_set_full_width_and_in_barrier = - _dispatch_tid_self() | DISPATCH_QUEUE_WIDTH_FULL_BIT | - DISPATCH_QUEUE_IN_BARRIER; - -#ifdef DLOCK_NOWAITERS_BIT - bits |= DLOCK_NOWAITERS_BIT; -#else - bits |= DLOCK_WAITERS_BIT; -#endif - flags ^= DISPATCH_WAKEUP_SLOW_WAITER; - dispatch_assert(!(flags & DISPATCH_WAKEUP_CONSUME)); - + uint64_t set_owner_and_set_full_width_and_in_barrier = + _dispatch_lock_value_for_self() | + DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER; + // similar to _dispatch_queue_drain_try_unlock() os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { - new_state = old_state | bits; - if (_dq_state_drain_pended(old_state)) { - // same as DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT - // but we want to be more efficient wrt the WAITERS_BIT - new_state &= ~DISPATCH_QUEUE_DRAIN_OWNER_MASK; - new_state &= ~DISPATCH_QUEUE_DRAIN_PENDED; - } - if (unlikely(_dq_state_drain_locked(new_state))) { -#ifdef DLOCK_NOWAITERS_BIT - new_state &= ~(uint64_t)DLOCK_NOWAITERS_BIT; -#endif - } else if (unlikely(!_dq_state_is_runnable(new_state) || - !(flags & DISPATCH_WAKEUP_FLUSH))) { - // either not runnable, or was not for the first item (26700358) - // so we should not try to lock and handle overrides instead + new_state = _dq_state_merge_qos(old_state, qos); + new_state |= DISPATCH_QUEUE_DIRTY; + if (unlikely(_dq_state_drain_locked(old_state) || + !_dq_state_is_runnable(old_state))) { + // not runnable, so we should just handle overrides + } else if (_dq_state_is_base_wlh(old_state) && + _dq_state_is_enqueued(old_state)) { + // 32123779 let the event thread redrive since it's out already } else if (_dq_state_has_pending_barrier(old_state) || new_state + pending_barrier_width < DISPATCH_QUEUE_WIDTH_FULL_BIT) { // see _dispatch_queue_drain_try_lock new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK; - new_state ^= xor_owner_and_set_full_width_and_in_barrier; - } else { - new_state |= DISPATCH_QUEUE_ENQUEUED; + new_state |= set_owner_and_set_full_width_and_in_barrier; } }); + + if (_dq_state_is_base_wlh(old_state) && + (dsc->dsc_waiter == _dispatch_tid_self())) { + dsc->dsc_wlh_was_first = true; + } + if ((old_state ^ new_state) & DISPATCH_QUEUE_IN_BARRIER) { - return _dispatch_try_lock_transfer_or_wakeup(dq); + return _dispatch_queue_barrier_complete(dq, qos, 0); } - } else if (bits) { - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release,{ - new_state = old_state | bits; - if (likely(_dq_state_should_wakeup(old_state))) { - new_state |= DISPATCH_QUEUE_ENQUEUED; +#if HAVE_PTHREAD_WORKQUEUE_QOS + if (unlikely((old_state ^ new_state) & DISPATCH_QUEUE_MAX_QOS_MASK)) { + if (_dq_state_should_override(new_state)) { + return _dispatch_queue_class_wakeup_with_override(dq, + new_state, 0); } - }); - } else { - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed,{ - new_state = old_state; - if (likely(_dq_state_should_wakeup(old_state))) { - new_state |= DISPATCH_QUEUE_ENQUEUED; - } else { - os_atomic_rmw_loop_give_up(break); + } + } else if (unlikely(qos)) { + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + new_state = _dq_state_merge_qos(old_state, qos); + if (old_state == new_state) { + os_atomic_rmw_loop_give_up(return); } }); - } - - if ((old_state ^ new_state) & DISPATCH_QUEUE_ENQUEUED) { - return _dispatch_queue_class_wakeup_enqueue(dq, pp, flags, target); - } - -#if HAVE_PTHREAD_WORKQUEUE_QOS - if ((flags & DISPATCH_WAKEUP_OVERRIDING) - && target == DISPATCH_QUEUE_WAKEUP_TARGET) { - return _dispatch_queue_class_wakeup_with_override(dq, pp, - flags, new_state); - } -#endif - - if (flags & DISPATCH_WAKEUP_CONSUME) { - return _dispatch_release_tailcall(dq); + if (_dq_state_should_override(new_state)) { + return _dispatch_queue_class_wakeup_with_override(dq, new_state, 0); + } +#endif // HAVE_PTHREAD_WORKQUEUE_QOS } } @@ -5367,7 +5692,7 @@ _dispatch_root_queue_drain_one_slow(dispatch_queue_t dq) (void)os_atomic_dec2o(qc, dgq_pending, relaxed); } if (!available) { - _dispatch_global_queue_poke(dq); + _dispatch_global_queue_poke(dq, 1, 0); } return available; } @@ -5430,50 +5755,110 @@ _dispatch_root_queue_drain_one(dispatch_queue_t dq) goto out; } // There must be a next item now. - _dispatch_wait_until(next = head->do_next); + next = os_mpsc_get_next(head, do_next); } os_atomic_store2o(dq, dq_items_head, next, relaxed); - _dispatch_global_queue_poke(dq); + _dispatch_global_queue_poke(dq, 1, 0); out: return head; } +#if DISPATCH_USE_KEVENT_WORKQUEUE void -_dispatch_root_queue_drain_deferred_item(dispatch_queue_t dq, - struct dispatch_object_s *dou, pthread_priority_t pp) -{ - struct _dispatch_identity_s di; +_dispatch_root_queue_drain_deferred_wlh(dispatch_deferred_items_t ddi + DISPATCH_PERF_MON_ARGS_PROTO) +{ + dispatch_queue_t rq = ddi->ddi_stashed_rq; + dispatch_queue_t dq = ddi->ddi_stashed_dou._dq; + _dispatch_queue_set_current(rq); + dispatch_priority_t old_pri = _dispatch_set_basepri_wlh(rq->dq_priority); + dispatch_invoke_context_s dic = { }; + dispatch_invoke_flags_t flags = DISPATCH_INVOKE_WORKER_DRAIN | + DISPATCH_INVOKE_REDIRECTING_DRAIN | DISPATCH_INVOKE_WLH; + _dispatch_queue_drain_init_narrowing_check_deadline(&dic, rq->dq_priority); + uint64_t dq_state; + + ddi->ddi_wlh_servicing = true; + if (unlikely(_dispatch_needs_to_return_to_kernel())) { + _dispatch_return_to_kernel(); + } +retry: + dispatch_assert(ddi->ddi_wlh_needs_delete); + _dispatch_trace_continuation_pop(rq, dq); + + if (_dispatch_queue_drain_try_lock_wlh(dq, &dq_state)) { + dx_invoke(dq, &dic, flags); + if (!ddi->ddi_wlh_needs_delete) { + goto park; + } + dq_state = os_atomic_load2o(dq, dq_state, relaxed); + if (unlikely(!_dq_state_is_base_wlh(dq_state))) { // rdar://32671286 + goto park; + } + if (unlikely(_dq_state_is_enqueued_on_target(dq_state))) { + _dispatch_retain(dq); + _dispatch_trace_continuation_push(dq->do_targetq, dq); + goto retry; + } + } else { + if (_dq_state_is_suspended(dq_state)) { + dispatch_assert(!_dq_state_is_enqueued(dq_state)); + _dispatch_release_2_no_dispose(dq); + } else { + dispatch_assert(_dq_state_is_enqueued(dq_state)); + dispatch_assert(_dq_state_drain_locked(dq_state)); + _dispatch_release_no_dispose(dq); + } + } - // fake that we queued `dou` on `dq` for introspection purposes - _dispatch_trace_continuation_push(dq, dou); + _dispatch_event_loop_leave_deferred((dispatch_wlh_t)dq, dq_state); - pp = _dispatch_priority_inherit_from_root_queue(pp, dq); - _dispatch_queue_set_current(dq); - _dispatch_root_queue_identity_assume(&di, pp); +park: + // event thread that could steal + _dispatch_perfmon_end(perfmon_thread_event_steal); + _dispatch_reset_basepri(old_pri); + _dispatch_reset_basepri_override(); + _dispatch_queue_set_current(NULL); + + _dispatch_voucher_debug("root queue clear", NULL); + _dispatch_reset_voucher(NULL, DISPATCH_THREAD_PARK); +} + +void +_dispatch_root_queue_drain_deferred_item(dispatch_deferred_items_t ddi + DISPATCH_PERF_MON_ARGS_PROTO) +{ + dispatch_queue_t rq = ddi->ddi_stashed_rq; + _dispatch_queue_set_current(rq); + dispatch_priority_t old_pri = _dispatch_set_basepri(rq->dq_priority); + + dispatch_invoke_context_s dic = { }; + dispatch_invoke_flags_t flags = DISPATCH_INVOKE_WORKER_DRAIN | + DISPATCH_INVOKE_REDIRECTING_DRAIN; #if DISPATCH_COCOA_COMPAT - void *pool = _dispatch_last_resort_autorelease_pool_push(); + _dispatch_last_resort_autorelease_pool_push(&dic); #endif // DISPATCH_COCOA_COMPAT + _dispatch_queue_drain_init_narrowing_check_deadline(&dic, rq->dq_priority); + _dispatch_continuation_pop_inline(ddi->ddi_stashed_dou, &dic, flags, rq); - _dispatch_perfmon_start(); - _dispatch_continuation_pop_inline(dou, dq, - DISPATCH_INVOKE_WORKER_DRAIN | DISPATCH_INVOKE_REDIRECTING_DRAIN); - _dispatch_perfmon_workitem_inc(); - _dispatch_perfmon_end(); - + // event thread that could steal + _dispatch_perfmon_end(perfmon_thread_event_steal); #if DISPATCH_COCOA_COMPAT - _dispatch_last_resort_autorelease_pool_pop(pool); + _dispatch_last_resort_autorelease_pool_pop(&dic); #endif // DISPATCH_COCOA_COMPAT - _dispatch_reset_defaultpriority(di.old_pp); + _dispatch_reset_basepri(old_pri); + _dispatch_reset_basepri_override(); _dispatch_queue_set_current(NULL); _dispatch_voucher_debug("root queue clear", NULL); _dispatch_reset_voucher(NULL, DISPATCH_THREAD_PARK); } +#endif DISPATCH_NOT_TAIL_CALLED // prevent tailcall (for Instrument DTrace probe) static void -_dispatch_root_queue_drain(dispatch_queue_t dq, pthread_priority_t pri) +_dispatch_root_queue_drain(dispatch_queue_t dq, pthread_priority_t pp) { #if DISPATCH_DEBUG dispatch_queue_t cq; @@ -5482,28 +5867,43 @@ _dispatch_root_queue_drain(dispatch_queue_t dq, pthread_priority_t pri) } #endif _dispatch_queue_set_current(dq); - if (dq->dq_priority) pri = dq->dq_priority; - pthread_priority_t old_dp = _dispatch_set_defaultpriority(pri, NULL); -#if DISPATCH_COCOA_COMPAT - void *pool = _dispatch_last_resort_autorelease_pool_push(); -#endif // DISPATCH_COCOA_COMPAT + dispatch_priority_t pri = dq->dq_priority; + if (!pri) pri = _dispatch_priority_from_pp(pp); + dispatch_priority_t old_dbp = _dispatch_set_basepri(pri); + _dispatch_adopt_wlh_anon(); - _dispatch_perfmon_start(); struct dispatch_object_s *item; bool reset = false; + dispatch_invoke_context_s dic = { }; +#if DISPATCH_COCOA_COMPAT + _dispatch_last_resort_autorelease_pool_push(&dic); +#endif // DISPATCH_COCOA_COMPAT + dispatch_invoke_flags_t flags = DISPATCH_INVOKE_WORKER_DRAIN | + DISPATCH_INVOKE_REDIRECTING_DRAIN; + _dispatch_queue_drain_init_narrowing_check_deadline(&dic, pri); + _dispatch_perfmon_start(); while ((item = fastpath(_dispatch_root_queue_drain_one(dq)))) { if (reset) _dispatch_wqthread_override_reset(); - _dispatch_continuation_pop_inline(item, dq, - DISPATCH_INVOKE_WORKER_DRAIN|DISPATCH_INVOKE_REDIRECTING_DRAIN); - _dispatch_perfmon_workitem_inc(); - reset = _dispatch_reset_defaultpriority_override(); + _dispatch_continuation_pop_inline(item, &dic, flags, dq); + reset = _dispatch_reset_basepri_override(); + if (unlikely(_dispatch_queue_drain_should_narrow(&dic))) { + break; + } + } + + // overcommit or not. worker thread + if (pri & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) { + _dispatch_perfmon_end(perfmon_thread_worker_oc); + } else { + _dispatch_perfmon_end(perfmon_thread_worker_non_oc); } - _dispatch_perfmon_end(); #if DISPATCH_COCOA_COMPAT - _dispatch_last_resort_autorelease_pool_pop(pool); + _dispatch_last_resort_autorelease_pool_pop(&dic); #endif // DISPATCH_COCOA_COMPAT - _dispatch_reset_defaultpriority(old_dp); + _dispatch_reset_wlh(); + _dispatch_reset_basepri(old_dbp); + _dispatch_reset_basepri_override(); _dispatch_queue_set_current(NULL); } @@ -5518,7 +5918,7 @@ _dispatch_worker_thread4(void *context) dispatch_root_queue_context_t qc = dq->do_ctxt; _dispatch_introspection_thread_add(); - int pending = (int)os_atomic_dec2o(qc, dgq_pending, relaxed); + int pending = os_atomic_dec2o(qc, dgq_pending, relaxed); dispatch_assert(pending >= 0); _dispatch_root_queue_drain(dq, _dispatch_get_priority()); _dispatch_voucher_debug("root queue clear", NULL); @@ -5533,12 +5933,12 @@ _dispatch_worker_thread3(pthread_priority_t pp) dispatch_queue_t dq; pp &= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG | ~_PTHREAD_PRIORITY_FLAGS_MASK; _dispatch_thread_setspecific(dispatch_priority_key, (void *)(uintptr_t)pp); - dq = _dispatch_get_root_queue_for_priority(pp, overcommit); + dq = _dispatch_get_root_queue(_dispatch_qos_from_pp(pp), overcommit); return _dispatch_worker_thread4(dq); } #endif // HAVE_PTHREAD_WORKQUEUE_QOS -#if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP +#if DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP // 6618342 Contact the team that owns the Instrument DTrace probe before // renaming this symbol static void @@ -5551,7 +5951,7 @@ _dispatch_worker_thread2(int priority, int options, return _dispatch_worker_thread4(dq); } -#endif // HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP +#endif // DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP #endif // HAVE_PTHREAD_WORKQUEUES #if DISPATCH_USE_PTHREAD_POOL @@ -5564,6 +5964,11 @@ _dispatch_worker_thread(void *context) dispatch_root_queue_context_t qc = dq->do_ctxt; dispatch_pthread_root_queue_context_t pqc = qc->dgq_ctxt; + int pending = os_atomic_dec2o(qc, dgq_pending, relaxed); + if (unlikely(pending < 0)) { + DISPATCH_INTERNAL_CRASH(pending, "Pending thread request underflow"); + } + if (pqc->dpq_observer_hooks.queue_will_execute) { _dispatch_set_pthread_root_queue_observer_hooks( &pqc->dpq_observer_hooks); @@ -5572,15 +5977,19 @@ _dispatch_worker_thread(void *context) pqc->dpq_thread_configure(); } - sigset_t mask; - int r; // workaround tweaks the kernel workqueue does for us - r = sigfillset(&mask); - (void)dispatch_assume_zero(r); - r = _dispatch_pthread_sigmask(SIG_BLOCK, &mask, NULL); - (void)dispatch_assume_zero(r); + _dispatch_sigmask(); _dispatch_introspection_thread_add(); +#if DISPATCH_USE_INTERNAL_WORKQUEUE + bool overcommit = (qc->dgq_wq_options & WORKQ_ADDTHREADS_OPTION_OVERCOMMIT); + bool manager = (dq == &_dispatch_mgr_root_queue); + bool monitored = !(overcommit || manager); + if (monitored) { + _dispatch_workq_worker_register(dq, qc->dgq_qos); + } +#endif + const int64_t timeout = 5ull * NSEC_PER_SEC; pthread_priority_t old_pri = _dispatch_get_priority(); do { @@ -5589,43 +5998,31 @@ _dispatch_worker_thread(void *context) } while (dispatch_semaphore_wait(&pqc->dpq_thread_mediator, dispatch_time(0, timeout)) == 0); +#if DISPATCH_USE_INTERNAL_WORKQUEUE + if (monitored) { + _dispatch_workq_worker_unregister(dq, qc->dgq_qos); + } +#endif (void)os_atomic_inc2o(qc, dgq_thread_pool_size, release); - _dispatch_global_queue_poke(dq); - _dispatch_release(dq); - + _dispatch_global_queue_poke(dq, 1, 0); + _dispatch_release(dq); // retained in _dispatch_global_queue_poke_slow return NULL; } +#endif // DISPATCH_USE_PTHREAD_POOL -int -_dispatch_pthread_sigmask(int how, sigset_t *set, sigset_t *oset) -{ - int r; - - /* Workaround: 6269619 Not all signals can be delivered on any thread */ - - r = sigdelset(set, SIGILL); - (void)dispatch_assume_zero(r); - r = sigdelset(set, SIGTRAP); - (void)dispatch_assume_zero(r); -#if HAVE_DECL_SIGEMT - r = sigdelset(set, SIGEMT); - (void)dispatch_assume_zero(r); -#endif - r = sigdelset(set, SIGFPE); - (void)dispatch_assume_zero(r); - r = sigdelset(set, SIGBUS); - (void)dispatch_assume_zero(r); - r = sigdelset(set, SIGSEGV); - (void)dispatch_assume_zero(r); - r = sigdelset(set, SIGSYS); - (void)dispatch_assume_zero(r); - r = sigdelset(set, SIGPIPE); - (void)dispatch_assume_zero(r); +#pragma mark - +#pragma mark dispatch_network_root_queue +#if TARGET_OS_MAC - return pthread_sigmask(how, set, oset); +dispatch_queue_t +_dispatch_network_root_queue_create_4NW(const char *label, + const pthread_attr_t *attrs, dispatch_block_t configure) +{ + unsigned long flags = dispatch_pthread_root_queue_flags_pool_size(1); + return dispatch_pthread_root_queue_create(label, flags, attrs, configure); } -#endif // DISPATCH_USE_PTHREAD_POOL +#endif // TARGET_OS_MAC #pragma mark - #pragma mark dispatch_runloop_queue @@ -5643,9 +6040,10 @@ _dispatch_runloop_root_queue_create_4CF(const char *label, unsigned long flags) return DISPATCH_BAD_INPUT; } dqs = sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_CACHELINE_PAD; - dq = _dispatch_alloc(DISPATCH_VTABLE(queue_runloop), dqs); - _dispatch_queue_init(dq, DQF_THREAD_BOUND | DQF_CANNOT_TRYSYNC, 1, false); - dq->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT,true); + dq = _dispatch_object_alloc(DISPATCH_VTABLE(queue_runloop), dqs); + _dispatch_queue_init(dq, DQF_THREAD_BOUND | DQF_CANNOT_TRYSYNC, 1, + DISPATCH_QUEUE_ROLE_BASE_ANON); + dq->do_targetq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, true); dq->dq_label = label ? label : "runloop-queue"; // no-copy contract _dispatch_runloop_queue_handle_init(dq); _dispatch_queue_set_bound_thread(dq); @@ -5658,19 +6056,19 @@ _dispatch_runloop_queue_xref_dispose(dispatch_queue_t dq) { _dispatch_object_debug(dq, "%s", __func__); - pthread_priority_t pp = _dispatch_queue_reset_override_priority(dq, true); + dispatch_qos_t qos = _dispatch_runloop_queue_reset_max_qos(dq); _dispatch_queue_clear_bound_thread(dq); - dx_wakeup(dq, pp, DISPATCH_WAKEUP_FLUSH); - if (pp) _dispatch_thread_override_end(DISPATCH_QUEUE_DRAIN_OWNER(dq), dq); + dx_wakeup(dq, qos, DISPATCH_WAKEUP_MAKE_DIRTY); + if (qos) _dispatch_thread_override_end(DISPATCH_QUEUE_DRAIN_OWNER(dq), dq); } void -_dispatch_runloop_queue_dispose(dispatch_queue_t dq) +_dispatch_runloop_queue_dispose(dispatch_queue_t dq, bool *allow_free) { _dispatch_object_debug(dq, "%s", __func__); _dispatch_introspection_queue_dispose(dq); _dispatch_runloop_queue_handle_dispose(dq); - _dispatch_queue_destroy(dq); + _dispatch_queue_destroy(dq, allow_free); } bool @@ -5694,6 +6092,7 @@ _dispatch_runloop_root_queue_wakeup_4CF(dispatch_queue_t dq) _dispatch_runloop_queue_wakeup(dq, 0, false); } +#if TARGET_OS_MAC dispatch_runloop_handle_t _dispatch_runloop_root_queue_get_port_4CF(dispatch_queue_t dq) { @@ -5702,6 +6101,7 @@ _dispatch_runloop_root_queue_get_port_4CF(dispatch_queue_t dq) } return _dispatch_runloop_queue_get_handle(dq); } +#endif static void _dispatch_runloop_queue_handle_init(void *ctxt) @@ -5820,12 +6220,7 @@ _dispatch_queue_set_mainq_drain_state(bool arg) void _dispatch_main_queue_callback_4CF( -#if TARGET_OS_MAC - mach_msg_header_t *_Null_unspecified msg -#else - void *ignored -#endif - DISPATCH_UNUSED) + void *ignored DISPATCH_UNUSED) { if (main_q_is_draining) { return; @@ -5840,6 +6235,7 @@ _dispatch_main_queue_callback_4CF( void dispatch_main(void) { + _dispatch_root_queues_init(); #if HAVE_PTHREAD_MAIN_NP if (pthread_main_np()) { #endif @@ -5856,6 +6252,7 @@ dispatch_main(void) pthread_key_t dispatch_main_key; pthread_key_create(&dispatch_main_key, _dispatch_sig_thread); pthread_setspecific(dispatch_main_key, &dispatch_main_key); + _dispatch_sigmask(); #endif pthread_exit(NULL); DISPATCH_INTERNAL_CRASH(errno, "pthread_exit() returned"); @@ -5890,52 +6287,25 @@ static void _dispatch_queue_cleanup2(void) { dispatch_queue_t dq = &_dispatch_main_q; - _dispatch_queue_clear_bound_thread(dq); + uint64_t old_state, new_state; - // - // Here is what happens when both this cleanup happens because of - // dispatch_main() being called, and a concurrent enqueuer makes the queue - // non empty. - // - // _dispatch_queue_cleanup2: - // atomic_and(dq_is_thread_bound, ~DQF_THREAD_BOUND, relaxed); - // maximal_barrier(); - // if (load(dq_items_tail, seq_cst)) { - // // do the wake up the normal serial queue way - // } else { - // // do no wake up <---- - // } - // - // enqueuer: - // store(dq_items_tail, new_tail, release); - // if (load(dq_is_thread_bound, relaxed)) { - // // do the wake up the runloop way <---- - // } else { - // // do the wake up the normal serial way - // } + // Turning the main queue from a runloop queue into an ordinary serial queue + // is a 3 steps operation: + // 1. finish taking the main queue lock the usual way + // 2. clear the THREAD_BOUND flag + // 3. do a handoff // - // what would be bad is to take both paths marked <---- because the queue - // wouldn't be woken up until the next time it's used (which may never - // happen) - // - // An enqueuer that speculates the load of the old value of thread_bound - // and then does the store may wake up the main queue the runloop way. - // But then, the cleanup thread will see that store because the load - // of dq_items_tail is sequentially consistent, and we have just thrown away - // our pipeline. - // - // By the time cleanup2() is out of the maximally synchronizing barrier, - // no other thread can speculate the wrong load anymore, and both cleanup2() - // and a concurrent enqueuer would treat the queue in the standard non - // thread bound way - - _dispatch_queue_atomic_flags_clear(dq, - DQF_THREAD_BOUND | DQF_CANNOT_TRYSYNC); - os_atomic_maximally_synchronizing_barrier(); - // no need to drop the override, the thread will die anyway - // the barrier above includes an acquire, so it's ok to do this raw - // call to dx_wakeup(0) - dx_wakeup(dq, 0, 0); + // If an enqueuer executes concurrently, he may do the wakeup the runloop + // way, because he still believes the queue to be thread-bound, but the + // dirty bit will force this codepath to notice the enqueue, and the usual + // lock transfer will do the proper wakeup. + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, { + new_state = old_state & ~DISPATCH_QUEUE_DIRTY; + new_state += DISPATCH_QUEUE_WIDTH_INTERVAL; + new_state += DISPATCH_QUEUE_IN_BARRIER; + }); + _dispatch_queue_atomic_flags_clear(dq, DQF_THREAD_BOUND|DQF_CANNOT_TRYSYNC); + _dispatch_queue_barrier_complete(dq, 0, 0); // overload the "probably" variable to mean that dispatch_main() or // similar non-POSIX API was called @@ -5944,7 +6314,7 @@ _dispatch_queue_cleanup2(void) #ifndef __linux__ if (_dispatch_program_is_probably_callback_driven) { _dispatch_barrier_async_detached_f(_dispatch_get_root_queue( - _DISPATCH_QOS_CLASS_DEFAULT, true), NULL, _dispatch_sig_thread); + DISPATCH_QOS_DEFAULT, true), NULL, _dispatch_sig_thread); sleep(1); // workaround 6778970 } #endif @@ -5967,6 +6337,16 @@ _dispatch_queue_cleanup(void *ctxt) "Premature thread exit while a dispatch queue is running"); } +static void +_dispatch_wlh_cleanup(void *ctxt) +{ + // POSIX defines that destructors are only called if 'ctxt' is non-null + dispatch_queue_t wlh; + wlh = (dispatch_queue_t)((uintptr_t)ctxt & ~DISPATCH_WLH_STORAGE_REF); + _dispatch_queue_release_storage(wlh); +} + +DISPATCH_NORETURN static void _dispatch_deferred_items_cleanup(void *ctxt) { @@ -5975,6 +6355,7 @@ _dispatch_deferred_items_cleanup(void *ctxt) "Premature thread exit with unhandled deferred items"); } +DISPATCH_NORETURN static void _dispatch_frame_cleanup(void *ctxt) { @@ -5983,6 +6364,7 @@ _dispatch_frame_cleanup(void *ctxt) "Premature thread exit while a dispatch frame is active"); } +DISPATCH_NORETURN static void _dispatch_context_cleanup(void *ctxt) { diff --git a/src/queue_internal.h b/src/queue_internal.h index 1bff7b014..f70356a2c 100644 --- a/src/queue_internal.h +++ b/src/queue_internal.h @@ -44,21 +44,25 @@ #define DISPATCH_CACHELINE_ALIGN \ __attribute__((__aligned__(DISPATCH_CACHELINE_SIZE))) +#define DISPATCH_CACHELINE_PAD_SIZE(type) \ + (roundup(sizeof(type), DISPATCH_CACHELINE_SIZE) - sizeof(type)) + #pragma mark - #pragma mark dispatch_queue_t DISPATCH_ENUM(dispatch_queue_flags, uint32_t, - DQF_NONE = 0x0000, - DQF_AUTORELEASE_ALWAYS = 0x0001, - DQF_AUTORELEASE_NEVER = 0x0002, -#define _DQF_AUTORELEASE_MASK 0x0003 - DQF_THREAD_BOUND = 0x0004, // queue is bound to a thread - DQF_BARRIER_BIT = 0x0008, // queue is a barrier on its target - DQF_TARGETED = 0x0010, // queue is targeted by another object - DQF_LABEL_NEEDS_FREE = 0x0020, // queue label was strduped; need to free it - DQF_CANNOT_TRYSYNC = 0x0040, - DQF_RELEASED = 0x0080, // xref_cnt == -1 + DQF_NONE = 0x00000000, + DQF_AUTORELEASE_ALWAYS = 0x00010000, + DQF_AUTORELEASE_NEVER = 0x00020000, +#define _DQF_AUTORELEASE_MASK 0x00030000 + DQF_THREAD_BOUND = 0x00040000, // queue is bound to a thread + DQF_BARRIER_BIT = 0x00080000, // queue is a barrier on its target + DQF_TARGETED = 0x00100000, // queue is targeted by another object + DQF_LABEL_NEEDS_FREE = 0x00200000, // queue label was strduped; need to free it + DQF_CANNOT_TRYSYNC = 0x00400000, + DQF_RELEASED = 0x00800000, // xref_cnt == -1 + DQF_LEGACY = 0x01000000, // only applies to sources // @@ -77,81 +81,71 @@ DISPATCH_ENUM(dispatch_queue_flags, uint32_t, // will be -p-. // // -pd - // Received EV_DELETE (from ap-), needs to free `ds_dkev`, the knote is - // gone from the kernel, but ds_dkev lives. Next state will be --d. + // Received EV_DELETE (from ap-), needs to unregister ds_refs, the muxnote + // is gone from the kernel. Next state will be --d. // // -p- // Received an EV_ONESHOT event (from a--), or the delivery of an event // causing the cancellation to fail with EINPROGRESS was delivered - // (from ap-). The knote still lives, next state will be --d. + // (from ap-). The muxnote still lives, next state will be --d. // // --d - // Final state of the source, the knote is gone from the kernel and - // ds_dkev is freed. The source can safely be released. + // Final state of the source, the muxnote is gone from the kernel and + // ds_refs is unregistered. The source can safely be released. // // a-d (INVALID) // apd (INVALID) // Setting DSF_DELETED should also always atomically clear DSF_ARMED. If - // the knote is gone from the kernel, it makes no sense whatsoever to + // the muxnote is gone from the kernel, it makes no sense whatsoever to // have it armed. And generally speaking, once `d` or `p` has been set, // `a` cannot do a cleared -> set transition anymore // (see _dispatch_source_try_set_armed). // - DSF_CANCEL_WAITER = 0x0800, // synchronous waiters for cancel - DSF_CANCELED = 0x1000, // cancellation has been requested - DSF_ARMED = 0x2000, // source is armed - DSF_DEFERRED_DELETE = 0x4000, // source is pending delete - DSF_DELETED = 0x8000, // source knote is deleted + DSF_WLH_CHANGED = 0x04000000, + DSF_CANCEL_WAITER = 0x08000000, // synchronous waiters for cancel + DSF_CANCELED = 0x10000000, // cancellation has been requested + DSF_ARMED = 0x20000000, // source is armed + DSF_DEFERRED_DELETE = 0x40000000, // source is pending delete + DSF_DELETED = 0x80000000, // source muxnote is deleted #define DSF_STATE_MASK (DSF_ARMED | DSF_DEFERRED_DELETE | DSF_DELETED) - DQF_WIDTH_MASK = 0xffff0000, -#define DQF_WIDTH_SHIFT 16 +#define DQF_FLAGS_MASK ((dispatch_queue_flags_t)0xffff0000) +#define DQF_WIDTH_MASK ((dispatch_queue_flags_t)0x0000ffff) +#define DQF_WIDTH(n) ((dispatch_queue_flags_t)(uint16_t)(n)) ); #define _DISPATCH_QUEUE_HEADER(x) \ struct os_mpsc_queue_s _as_oq[0]; \ DISPATCH_OBJECT_HEADER(x); \ _OS_MPSC_QUEUE_FIELDS(dq, dq_state); \ - dispatch_queue_t dq_specific_q; \ - union { \ - uint32_t volatile dq_atomic_flags; \ - DISPATCH_STRUCT_LITTLE_ENDIAN_2( \ - uint16_t dq_atomic_bits, \ - uint16_t dq_width \ - ); \ - }; \ uint32_t dq_side_suspend_cnt; \ - DISPATCH_INTROSPECTION_QUEUE_HEADER; \ - dispatch_unfair_lock_s dq_sidelock - /* LP64: 32bit hole on LP64 */ + dispatch_unfair_lock_s dq_sidelock; \ + union { \ + dispatch_queue_t dq_specific_q; \ + struct dispatch_source_refs_s *ds_refs; \ + struct dispatch_timer_source_refs_s *ds_timer_refs; \ + struct dispatch_mach_recv_refs_s *dm_recv_refs; \ + }; \ + DISPATCH_UNION_LE(uint32_t volatile dq_atomic_flags, \ + const uint16_t dq_width, \ + const uint16_t __dq_opaque \ + ); \ + DISPATCH_INTROSPECTION_QUEUE_HEADER + /* LP64: 32bit hole */ #define DISPATCH_QUEUE_HEADER(x) \ struct dispatch_queue_s _as_dq[0]; \ _DISPATCH_QUEUE_HEADER(x) -#define DISPATCH_QUEUE_ALIGN __attribute__((aligned(8))) +struct _dispatch_unpadded_queue_s { + _DISPATCH_QUEUE_HEADER(dummy); +}; -#define DISPATCH_QUEUE_WIDTH_POOL 0x7fff -#define DISPATCH_QUEUE_WIDTH_MAX 0x7ffe -#define DISPATCH_QUEUE_USES_REDIRECTION(width) \ - ({ uint16_t _width = (width); \ - _width > 1 && _width < DISPATCH_QUEUE_WIDTH_POOL; }) +#define DISPATCH_QUEUE_CACHELINE_PAD \ + DISPATCH_CACHELINE_PAD_SIZE(struct _dispatch_unpadded_queue_s) #define DISPATCH_QUEUE_CACHELINE_PADDING \ char _dq_pad[DISPATCH_QUEUE_CACHELINE_PAD] -#ifdef __LP64__ -#define DISPATCH_QUEUE_CACHELINE_PAD (( \ - (sizeof(uint32_t) - DISPATCH_INTROSPECTION_QUEUE_HEADER_SIZE) \ - + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE) -#elif OS_OBJECT_HAVE_OBJC1 -#define DISPATCH_QUEUE_CACHELINE_PAD (( \ - (11*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_HEADER_SIZE) \ - + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE) -#else -#define DISPATCH_QUEUE_CACHELINE_PAD (( \ - (12*sizeof(void*) - DISPATCH_INTROSPECTION_QUEUE_HEADER_SIZE) \ - + DISPATCH_CACHELINE_SIZE) % DISPATCH_CACHELINE_SIZE) -#endif /* * dispatch queues `dq_state` demystified @@ -161,27 +155,27 @@ DISPATCH_ENUM(dispatch_queue_flags, uint32_t, * Most Significant 32 bit Word * ---------------------------- * - * sc: suspend count (bits 63 - 57) + * sc: suspend count (bits 63 - 58) * The suspend count unsurprisingly holds the suspend count of the queue * Only 7 bits are stored inline. Extra counts are transfered in a side * suspend count and when that has happened, the ssc: bit is set. */ -#define DISPATCH_QUEUE_SUSPEND_INTERVAL 0x0200000000000000ull -#define DISPATCH_QUEUE_SUSPEND_HALF 0x40u +#define DISPATCH_QUEUE_SUSPEND_INTERVAL 0x0400000000000000ull +#define DISPATCH_QUEUE_SUSPEND_HALF 0x20u /* - * ssc: side suspend count (bit 56) + * ssc: side suspend count (bit 57) * This bit means that the total suspend count didn't fit in the inline * suspend count, and that there are additional suspend counts stored in the * `dq_side_suspend_cnt` field. */ -#define DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT 0x0100000000000000ull +#define DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT 0x0200000000000000ull /* - * i: inactive bit (bit 55) + * i: inactive bit (bit 56) * This bit means that the object is inactive (see dispatch_activate) */ -#define DISPATCH_QUEUE_INACTIVE 0x0080000000000000ull +#define DISPATCH_QUEUE_INACTIVE 0x0100000000000000ull /* - * na: needs activation (bit 54) + * na: needs activation (bit 55) * This bit is set if the object is created inactive. It tells * dispatch_queue_wakeup to perform various tasks at first wakeup. * @@ -189,27 +183,32 @@ DISPATCH_ENUM(dispatch_queue_flags, uint32_t, * the object from being woken up (because _dq_state_should_wakeup will say * no), except in the dispatch_activate/dispatch_resume codepath. */ -#define DISPATCH_QUEUE_NEEDS_ACTIVATION 0x0040000000000000ull +#define DISPATCH_QUEUE_NEEDS_ACTIVATION 0x0080000000000000ull /* * This mask covers the suspend count (sc), side suspend count bit (ssc), * inactive (i) and needs activation (na) bits */ -#define DISPATCH_QUEUE_SUSPEND_BITS_MASK 0xffc0000000000000ull +#define DISPATCH_QUEUE_SUSPEND_BITS_MASK 0xff80000000000000ull /* - * ib: in barrier (bit 53) + * ib: in barrier (bit 54) * This bit is set when the queue is currently executing a barrier */ -#define DISPATCH_QUEUE_IN_BARRIER 0x0020000000000000ull +#define DISPATCH_QUEUE_IN_BARRIER 0x0040000000000000ull /* - * qf: queue full (bit 52) + * qf: queue full (bit 53) * This bit is a subtle hack that allows to check for any queue width whether * the full width of the queue is used or reserved (depending on the context) * In other words that the queue has reached or overflown its capacity. */ -#define DISPATCH_QUEUE_WIDTH_FULL_BIT 0x0010000000000000ull -#define DISPATCH_QUEUE_WIDTH_FULL 0x8000ull +#define DISPATCH_QUEUE_WIDTH_FULL_BIT 0x0020000000000000ull +#define DISPATCH_QUEUE_WIDTH_FULL 0x1000ull +#define DISPATCH_QUEUE_WIDTH_POOL (DISPATCH_QUEUE_WIDTH_FULL - 1) +#define DISPATCH_QUEUE_WIDTH_MAX (DISPATCH_QUEUE_WIDTH_FULL - 2) +#define DISPATCH_QUEUE_USES_REDIRECTION(width) \ + ({ uint16_t _width = (width); \ + _width > 1 && _width < DISPATCH_QUEUE_WIDTH_POOL; }) /* - * w: width (bits 51 - 37) + * w: width (bits 52 - 41) * This encodes how many work items are in flight. Barriers hold `dq_width` * of them while they run. This is encoded as a signed offset with respect, * to full use, where the negative values represent how many available slots @@ -218,19 +217,19 @@ DISPATCH_ENUM(dispatch_queue_flags, uint32_t, * * When this value is positive, then `wo` is always set to 1. */ -#define DISPATCH_QUEUE_WIDTH_INTERVAL 0x0000002000000000ull -#define DISPATCH_QUEUE_WIDTH_MASK 0x001fffe000000000ull -#define DISPATCH_QUEUE_WIDTH_SHIFT 37 +#define DISPATCH_QUEUE_WIDTH_INTERVAL 0x0000020000000000ull +#define DISPATCH_QUEUE_WIDTH_MASK 0x003ffe0000000000ull +#define DISPATCH_QUEUE_WIDTH_SHIFT 41 /* - * pb: pending barrier (bit 36) + * pb: pending barrier (bit 40) * Drainers set this bit when they couldn't run the next work item and it is * a barrier. When this bit is set, `dq_width - 1` work item slots are * reserved so that no wakeup happens until the last work item in flight * completes. */ -#define DISPATCH_QUEUE_PENDING_BARRIER 0x0000001000000000ull +#define DISPATCH_QUEUE_PENDING_BARRIER 0x0000010000000000ull /* - * d: dirty bit (bit 35) + * d: dirty bit (bit 39) * This bit is set when a queue transitions from empty to not empty. * This bit is set before dq_items_head is set, with appropriate barriers. * Any thread looking at a queue head is responsible for unblocking any @@ -342,68 +341,70 @@ DISPATCH_ENUM(dispatch_queue_flags, uint32_t, * * So on the async "acquire" side, there is no subtlety at all. */ -#define DISPATCH_QUEUE_DIRTY 0x0000000800000000ull +#define DISPATCH_QUEUE_DIRTY 0x0000008000000000ull /* - * qo: (bit 34) - * Set when a queue has a useful override set. - * This bit is only cleared when the final drain_try_unlock() succeeds. - * - * When the queue dq_override is touched (overrides or-ed in), usually with - * _dispatch_queue_override_priority(), then the HAS_OVERRIDE bit is set - * with a release barrier and one of these three things happen next: - * - * - the queue is enqueued, which will cause it to be drained, and the - * override to be handled by _dispatch_queue_drain_try_unlock(). - * In rare cases it could cause the queue to be queued while empty though. + * md: enqueued/draining on manager (bit 38) + * Set when enqueued and draining on the manager hierarchy. * - * - the DIRTY bit is also set with a release barrier, which pairs with - * the handling of these bits by _dispatch_queue_drain_try_unlock(), - * so that dq_override is reset properly. + * Unlike the ENQUEUED bit, it is kept until the queue is unlocked from its + * invoke call on the manager. This is used to prevent stealing, and + * overrides to be applied down the target queue chain. + */ +#define DISPATCH_QUEUE_ENQUEUED_ON_MGR 0x0000004000000000ull +/* + * r: queue graph role (bits 37 - 36) + * Queue role in the target queue graph * - * - the queue was suspended, and _dispatch_queue_resume() will handle the - * override as part of its wakeup sequence. + * 11: unused + * 10: WLH base + * 01: non wlh base + * 00: inner queue */ -#define DISPATCH_QUEUE_HAS_OVERRIDE 0x0000000400000000ull +#define DISPATCH_QUEUE_ROLE_MASK 0x0000003000000000ull +#define DISPATCH_QUEUE_ROLE_BASE_WLH 0x0000002000000000ull +#define DISPATCH_QUEUE_ROLE_BASE_ANON 0x0000001000000000ull +#define DISPATCH_QUEUE_ROLE_INNER 0x0000000000000000ull /* - * p: pended bit (bit 33) - * Set when a drain lock has been pended. When this bit is set, - * the drain lock is taken and ENQUEUED is never set. + * o: has override (bit 35, if role is DISPATCH_QUEUE_ROLE_BASE_ANON) + * Set when a queue has received a QOS override and needs to reset it. + * This bit is only cleared when the final drain_try_unlock() succeeds. * - * This bit marks a queue that needs further processing but was kept pended - * by an async drainer (not reenqueued) in the hope of being able to drain - * it further later. + * sw: has received sync wait (bit 35, if role DISPATCH_QUEUE_ROLE_BASE_WLH) + * Set when a queue owner has been exposed to the kernel because of + * dispatch_sync() contention. */ -#define DISPATCH_QUEUE_DRAIN_PENDED 0x0000000200000000ull +#define DISPATCH_QUEUE_RECEIVED_OVERRIDE 0x0000000800000000ull +#define DISPATCH_QUEUE_RECEIVED_SYNC_WAIT 0x0000000800000000ull /* - * e: enqueued bit (bit 32) - * Set when a queue is enqueued on its target queue + * max_qos: max qos (bits 34 - 32) + * This is the maximum qos that has been enqueued on the queue */ -#define DISPATCH_QUEUE_ENQUEUED 0x0000000100000000ull +#define DISPATCH_QUEUE_MAX_QOS_MASK 0x0000000700000000ull +#define DISPATCH_QUEUE_MAX_QOS_SHIFT 32 /* * dl: drain lock (bits 31-0) * This is used by the normal drain to drain exlusively relative to other * drain stealers (like the QoS Override codepath). It holds the identity * (thread port) of the current drainer. + * + * st: sync transfer (bit 1 or 30) + * Set when a dispatch_sync() is transferred to + * + * e: enqueued bit (bit 0 or 31) + * Set when a queue is enqueued on its target queue */ -#define DISPATCH_QUEUE_DRAIN_UNLOCK_MASK 0x00000002ffffffffull -#ifdef DLOCK_NOWAITERS_BIT -#define DISPATCH_QUEUE_DRAIN_OWNER_MASK \ - ((uint64_t)(DLOCK_OWNER_MASK | DLOCK_NOFAILED_TRYLOCK_BIT)) -#define DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(v) \ - (((v) & ~(DISPATCH_QUEUE_DRAIN_PENDED|DISPATCH_QUEUE_DRAIN_OWNER_MASK))\ - ^ DLOCK_NOWAITERS_BIT) -#define DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK \ - (DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_HAS_OVERRIDE | \ - DLOCK_NOWAITERS_BIT) -#else -#define DISPATCH_QUEUE_DRAIN_OWNER_MASK \ - ((uint64_t)(DLOCK_OWNER_MASK | DLOCK_FAILED_TRYLOCK_BIT)) -#define DISPATCH_QUEUE_DRAIN_UNLOCK_PRESERVE_WAITERS_BIT(v) \ - ((v) & ~(DISPATCH_QUEUE_DRAIN_PENDED|DISPATCH_QUEUE_DRAIN_OWNER_MASK)) +#define DISPATCH_QUEUE_DRAIN_OWNER_MASK ((uint64_t)DLOCK_OWNER_MASK) +#define DISPATCH_QUEUE_SYNC_TRANSFER ((uint64_t)DLOCK_FAILED_TRYLOCK_BIT) +#define DISPATCH_QUEUE_ENQUEUED ((uint64_t)DLOCK_WAITERS_BIT) + #define DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK \ - (DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_HAS_OVERRIDE | \ - DLOCK_WAITERS_BIT) -#endif + (DISPATCH_QUEUE_ENQUEUED_ON_MGR | DISPATCH_QUEUE_ENQUEUED | \ + DISPATCH_QUEUE_ROLE_MASK | DISPATCH_QUEUE_MAX_QOS_MASK) + +#define DISPATCH_QUEUE_DRAIN_UNLOCK_MASK \ + (DISPATCH_QUEUE_DRAIN_OWNER_MASK | DISPATCH_QUEUE_RECEIVED_OVERRIDE | \ + DISPATCH_QUEUE_RECEIVED_SYNC_WAIT | DISPATCH_QUEUE_SYNC_TRANSFER) + /* ******************************************************************************* * @@ -425,8 +426,6 @@ DISPATCH_ENUM(dispatch_queue_flags, uint32_t, * that right. To do so, prior to taking any decision, they also try to own * the full "barrier" width on the given queue. * - * see _dispatch_try_lock_transfer_or_wakeup - * ******************************************************************************* * * Enqueuing and wakeup rules @@ -497,12 +496,17 @@ DISPATCH_ENUM(dispatch_queue_flags, uint32_t, (DISPATCH_QUEUE_IN_BARRIER | DISPATCH_QUEUE_WIDTH_INTERVAL) DISPATCH_CLASS_DECL(queue); -#if !(defined(__cplusplus) && DISPATCH_INTROSPECTION) + +#if !defined(__cplusplus) || !DISPATCH_INTROSPECTION struct dispatch_queue_s { _DISPATCH_QUEUE_HEADER(queue); DISPATCH_QUEUE_CACHELINE_PADDING; // for static queues only -} DISPATCH_QUEUE_ALIGN; -#endif // !(defined(__cplusplus) && DISPATCH_INTROSPECTION) +} DISPATCH_ATOMIC64_ALIGN; + +#if __has_feature(c_static_assert) && !DISPATCH_INTROSPECTION +_Static_assert(sizeof(struct dispatch_queue_s) <= 128, "dispatch queue size"); +#endif +#endif // !defined(__cplusplus) || !DISPATCH_INTROSPECTION DISPATCH_INTERNAL_SUBCLASS_DECL(queue_serial, queue); DISPATCH_INTERNAL_SUBCLASS_DECL(queue_concurrent, queue); @@ -520,16 +524,14 @@ typedef union { struct dispatch_source_s *_ds; struct dispatch_mach_s *_dm; struct dispatch_queue_specific_queue_s *_dqsq; - struct dispatch_timer_aggregate_s *_dta; #if USE_OBJC os_mpsc_queue_t _ojbc_oq; dispatch_queue_t _objc_dq; dispatch_source_t _objc_ds; dispatch_mach_t _objc_dm; dispatch_queue_specific_queue_t _objc_dqsq; - dispatch_timer_aggregate_t _objc_dta; #endif -} dispatch_queue_class_t __attribute__((__transparent_union__)); +} dispatch_queue_class_t DISPATCH_TRANSPARENT_UNION; typedef struct dispatch_thread_context_s *dispatch_thread_context_t; typedef struct dispatch_thread_context_s { @@ -546,52 +548,59 @@ typedef struct dispatch_thread_frame_s { // must be in the same order as our TSD keys! dispatch_queue_t dtf_queue; dispatch_thread_frame_t dtf_prev; - struct dispatch_object_s *dtf_deferred; } dispatch_thread_frame_s; -DISPATCH_ENUM(dispatch_queue_wakeup_target, long, - DISPATCH_QUEUE_WAKEUP_NONE = 0, - DISPATCH_QUEUE_WAKEUP_TARGET, - DISPATCH_QUEUE_WAKEUP_MGR, -); +typedef dispatch_queue_t dispatch_queue_wakeup_target_t; +#define DISPATCH_QUEUE_WAKEUP_NONE ((dispatch_queue_wakeup_target_t)0) +#define DISPATCH_QUEUE_WAKEUP_TARGET ((dispatch_queue_wakeup_target_t)1) +#define DISPATCH_QUEUE_WAKEUP_MGR (&_dispatch_mgr_q) +#define DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT ((dispatch_queue_wakeup_target_t)-1) -void _dispatch_queue_class_override_drainer(dispatch_queue_t dqu, - pthread_priority_t pp, dispatch_wakeup_flags_t flags); -void _dispatch_queue_class_wakeup(dispatch_queue_t dqu, pthread_priority_t pp, +void _dispatch_queue_class_wakeup(dispatch_queue_t dqu, dispatch_qos_t qos, dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target); - -void _dispatch_queue_destroy(dispatch_queue_t dq); -void _dispatch_queue_dispose(dispatch_queue_t dq); +dispatch_priority_t _dispatch_queue_compute_priority_and_wlh( + dispatch_queue_t dq, dispatch_wlh_t *wlh_out); +void _dispatch_queue_destroy(dispatch_queue_t dq, bool *allow_free); +void _dispatch_queue_dispose(dispatch_queue_t dq, bool *allow_free); +void _dispatch_queue_xref_dispose(struct dispatch_queue_s *dq); void _dispatch_queue_set_target_queue(dispatch_queue_t dq, dispatch_queue_t tq); void _dispatch_queue_suspend(dispatch_queue_t dq); void _dispatch_queue_resume(dispatch_queue_t dq, bool activate); -void _dispatch_queue_finalize_activation(dispatch_queue_t dq); -void _dispatch_queue_invoke(dispatch_queue_t dq, dispatch_invoke_flags_t flags); -void _dispatch_queue_push_list_slow(dispatch_queue_t dq, unsigned int n); +void _dispatch_queue_finalize_activation(dispatch_queue_t dq, + bool *allow_resume); +void _dispatch_queue_invoke(dispatch_queue_t dq, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); +void _dispatch_global_queue_poke(dispatch_queue_t dq, int n, int floor); void _dispatch_queue_push(dispatch_queue_t dq, dispatch_object_t dou, - pthread_priority_t pp); -void _dispatch_try_lock_transfer_or_wakeup(dispatch_queue_t dq); -void _dispatch_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, + dispatch_qos_t qos); +void _dispatch_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos, dispatch_wakeup_flags_t flags); -dispatch_queue_t _dispatch_queue_serial_drain(dispatch_queue_t dq, - dispatch_invoke_flags_t flags, uint64_t *owned, - struct dispatch_object_s **dc_ptr); -void _dispatch_queue_drain_deferred_invoke(dispatch_queue_t dq, - dispatch_invoke_flags_t flags, uint64_t to_unlock, - struct dispatch_object_s *dc); -void _dispatch_queue_specific_queue_dispose(dispatch_queue_specific_queue_t - dqsq); -void _dispatch_root_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, +dispatch_queue_wakeup_target_t _dispatch_queue_serial_drain(dispatch_queue_t dq, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, + uint64_t *owned); +void _dispatch_queue_drain_sync_waiter(dispatch_queue_t dq, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, + uint64_t owned); +void _dispatch_queue_specific_queue_dispose( + dispatch_queue_specific_queue_t dqsq, bool *allow_free); +void _dispatch_root_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos, dispatch_wakeup_flags_t flags); -void _dispatch_root_queue_drain_deferred_item(dispatch_queue_t dq, - struct dispatch_object_s *dou, pthread_priority_t pp); -void _dispatch_pthread_root_queue_dispose(dispatch_queue_t dq); -void _dispatch_main_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, +void _dispatch_root_queue_push(dispatch_queue_t dq, dispatch_object_t dou, + dispatch_qos_t qos); +#if DISPATCH_USE_KEVENT_WORKQUEUE +void _dispatch_root_queue_drain_deferred_item(dispatch_deferred_items_t ddi + DISPATCH_PERF_MON_ARGS_PROTO); +void _dispatch_root_queue_drain_deferred_wlh(dispatch_deferred_items_t ddi + DISPATCH_PERF_MON_ARGS_PROTO); +#endif +void _dispatch_pthread_root_queue_dispose(dispatch_queue_t dq, + bool *allow_free); +void _dispatch_main_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos, dispatch_wakeup_flags_t flags); -void _dispatch_runloop_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, +void _dispatch_runloop_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos, dispatch_wakeup_flags_t flags); void _dispatch_runloop_queue_xref_dispose(dispatch_queue_t dq); -void _dispatch_runloop_queue_dispose(dispatch_queue_t dq); +void _dispatch_runloop_queue_dispose(dispatch_queue_t dq, bool *allow_free); void _dispatch_mgr_queue_drain(void); #if DISPATCH_USE_MGR_THREAD && DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES void _dispatch_mgr_priority_init(void); @@ -603,13 +612,14 @@ void _dispatch_kevent_workqueue_init(void); #else static inline void _dispatch_kevent_workqueue_init(void) {} #endif -void _dispatch_sync_recurse_invoke(void *ctxt); void _dispatch_apply_invoke(void *ctxt); void _dispatch_apply_redirect_invoke(void *ctxt); void _dispatch_barrier_async_detached_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func); +#define DISPATCH_BARRIER_TRYSYNC_SUSPEND 0x1 void _dispatch_barrier_trysync_or_async_f(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func); + dispatch_function_t func, uint32_t flags); +void _dispatch_queue_atfork_child(void); #if DISPATCH_DEBUG void dispatch_debug_queue(dispatch_queue_t dq, const char* str); @@ -622,10 +632,9 @@ size_t dispatch_queue_debug(dispatch_queue_t dq, char* buf, size_t bufsiz); size_t _dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf, size_t bufsiz); -#define DISPATCH_QUEUE_QOS_COUNT 6 -#define DISPATCH_ROOT_QUEUE_COUNT (DISPATCH_QUEUE_QOS_COUNT * 2) +#define DISPATCH_ROOT_QUEUE_COUNT (DISPATCH_QOS_MAX * 2) -// must be in lowest to highest qos order (as encoded in pthread_priority_t) +// must be in lowest to highest qos order (as encoded in dispatch_qos_t) // overcommit qos index values need bit 1 set enum { DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS = 0, @@ -643,18 +652,25 @@ enum { _DISPATCH_ROOT_QUEUE_IDX_COUNT, }; +// skip zero +// 1 - main_q +// 2 - mgr_q +// 3 - mgr_root_q +// 4,5,6,7,8,9,10,11,12,13,14,15 - global queues +// we use 'xadd' on Intel, so the initial value == next assigned +#define DISPATCH_QUEUE_SERIAL_NUMBER_INIT 16 extern unsigned long volatile _dispatch_queue_serial_numbers; extern struct dispatch_queue_s _dispatch_root_queues[]; extern struct dispatch_queue_s _dispatch_mgr_q; void _dispatch_root_queues_init(void); -#if HAVE_PTHREAD_WORKQUEUE_QOS -extern pthread_priority_t _dispatch_background_priority; -extern pthread_priority_t _dispatch_user_initiated_priority; +#if DISPATCH_DEBUG +#define DISPATCH_ASSERT_ON_MANAGER_QUEUE() \ + dispatch_assert_queue(&_dispatch_mgr_q) +#else +#define DISPATCH_ASSERT_ON_MANAGER_QUEUE() #endif -typedef uint8_t _dispatch_qos_class_t; - #pragma mark - #pragma mark dispatch_queue_attr_t @@ -667,8 +683,7 @@ typedef enum { DISPATCH_CLASS_DECL(queue_attr); struct dispatch_queue_attr_s { OS_OBJECT_STRUCT_HEADER(dispatch_queue_attr); - _dispatch_qos_class_t dqa_qos_class; - int8_t dqa_relative_priority; + dispatch_priority_requested_t dqa_qos_and_relpri; uint16_t dqa_overcommit:2; uint16_t dqa_autorelease_frequency:2; uint16_t dqa_concurrent:1; @@ -752,7 +767,6 @@ dispatch_queue_attr_t _dispatch_get_default_queue_attr(void); void *dc_ctxt; \ void *dc_data; \ void *dc_other -#define _DISPATCH_SIZEOF_PTR 8 #elif OS_OBJECT_HAVE_OBJC1 #define DISPATCH_CONTINUATION_HEADER(x) \ dispatch_function_t dc_func; \ @@ -770,7 +784,6 @@ dispatch_queue_attr_t _dispatch_get_default_queue_attr(void); void *dc_ctxt; \ void *dc_data; \ void *dc_other -#define _DISPATCH_SIZEOF_PTR 4 #else #define DISPATCH_CONTINUATION_HEADER(x) \ union { \ @@ -788,24 +801,23 @@ dispatch_queue_attr_t _dispatch_get_default_queue_attr(void); void *dc_ctxt; \ void *dc_data; \ void *dc_other -#define _DISPATCH_SIZEOF_PTR 4 #endif #define _DISPATCH_CONTINUATION_PTRS 8 #if DISPATCH_HW_CONFIG_UP // UP devices don't contend on continuations so we don't need to force them to // occupy a whole cacheline (which is intended to avoid contention) #define DISPATCH_CONTINUATION_SIZE \ - (_DISPATCH_CONTINUATION_PTRS * _DISPATCH_SIZEOF_PTR) + (_DISPATCH_CONTINUATION_PTRS * DISPATCH_SIZEOF_PTR) #else #define DISPATCH_CONTINUATION_SIZE ROUND_UP_TO_CACHELINE_SIZE( \ - (_DISPATCH_CONTINUATION_PTRS * _DISPATCH_SIZEOF_PTR)) + (_DISPATCH_CONTINUATION_PTRS * DISPATCH_SIZEOF_PTR)) #endif #define ROUND_UP_TO_CONTINUATION_SIZE(x) \ (((x) + (DISPATCH_CONTINUATION_SIZE - 1u)) & \ ~(DISPATCH_CONTINUATION_SIZE - 1u)) // continuation is a dispatch_sync or dispatch_barrier_sync -#define DISPATCH_OBJ_SYNC_SLOW_BIT 0x001ul +#define DISPATCH_OBJ_SYNC_WAITER_BIT 0x001ul // continuation acts as a barrier #define DISPATCH_OBJ_BARRIER_BIT 0x002ul // continuation resources are freed on run @@ -821,17 +833,35 @@ dispatch_queue_attr_t _dispatch_get_default_queue_attr(void); #define DISPATCH_OBJ_CTXT_FETCH_BIT 0x040ul // use the voucher from the continuation even if the queue has voucher set #define DISPATCH_OBJ_ENFORCE_VOUCHER 0x080ul +// never set on continuations, used by mach.c only +#define DISPATCH_OBJ_MACH_BARRIER 0x1000000ul -struct dispatch_continuation_s { +typedef struct dispatch_continuation_s { struct dispatch_object_s _as_do[0]; DISPATCH_CONTINUATION_HEADER(continuation); -}; -typedef struct dispatch_continuation_s *dispatch_continuation_t; +} *dispatch_continuation_t; + +typedef struct dispatch_sync_context_s { + struct dispatch_object_s _as_do[0]; + struct dispatch_continuation_s _as_dc[0]; + DISPATCH_CONTINUATION_HEADER(continuation); + dispatch_function_t dsc_func; + void *dsc_ctxt; +#if DISPATCH_COCOA_COMPAT + dispatch_thread_frame_s dsc_dtf; +#endif + dispatch_thread_event_s dsc_event; + dispatch_tid dsc_waiter; + dispatch_qos_t dsc_override_qos_floor; + dispatch_qos_t dsc_override_qos; + bool dsc_wlh_was_first; + bool dsc_release_storage; +} *dispatch_sync_context_t; typedef struct dispatch_continuation_vtable_s { _OS_OBJECT_CLASS_HEADER(); DISPATCH_INVOKABLE_VTABLE_HEADER(dispatch_continuation); -} *dispatch_continuation_vtable_t; +} const *dispatch_continuation_vtable_t; #ifndef DISPATCH_CONTINUATION_CACHE_LIMIT #if TARGET_OS_EMBEDDED @@ -847,8 +877,9 @@ dispatch_continuation_t _dispatch_continuation_alloc_from_heap(void); void _dispatch_continuation_free_to_heap(dispatch_continuation_t c); void _dispatch_continuation_async(dispatch_queue_t dq, dispatch_continuation_t dc); -void _dispatch_continuation_pop(dispatch_object_t dou, dispatch_queue_t dq, - dispatch_invoke_flags_t flags); +void _dispatch_continuation_pop(dispatch_object_t dou, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, + dispatch_queue_t dq); void _dispatch_continuation_invoke(dispatch_object_t dou, voucher_t override_voucher, dispatch_invoke_flags_t flags); @@ -870,6 +901,7 @@ enum { DC_MACH_SEND_BARRRIER_DRAIN_TYPE, DC_MACH_SEND_BARRIER_TYPE, DC_MACH_RECV_BARRIER_TYPE, + DC_MACH_ASYNC_REPLY_TYPE, #if HAVE_PTHREAD_WORKQUEUE_QOS DC_OVERRIDE_STEALING_TYPE, DC_OVERRIDE_OWNING_TYPE, @@ -896,12 +928,12 @@ extern const struct dispatch_continuation_vtable_s void _dispatch_async_redirect_invoke(dispatch_continuation_t dc, - dispatch_invoke_flags_t flags); + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); #if HAVE_PTHREAD_WORKQUEUE_QOS void _dispatch_queue_override_invoke(dispatch_continuation_t dc, - dispatch_invoke_flags_t flags); + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); #endif #define DC_VTABLE(name) (&_dispatch_continuation_vtables[DC_##name##_TYPE]) @@ -919,8 +951,14 @@ _dispatch_queue_override_invoke(dispatch_continuation_t dc, void _dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pri, mach_voucher_t kv); voucher_t _dispatch_set_priority_and_voucher_slow(pthread_priority_t pri, - voucher_t voucher, _dispatch_thread_set_self_t flags); - + voucher_t voucher, dispatch_thread_set_self_t flags); +#else +static inline void +_dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pri, + mach_voucher_t kv) +{ + (void)pri; (void)kv; +} #endif #pragma mark - #pragma mark dispatch_apply_t @@ -931,7 +969,7 @@ struct dispatch_apply_s { dispatch_continuation_t da_dc; dispatch_thread_event_s da_event; dispatch_invoke_flags_t da_flags; - uint32_t da_thr_cnt; + int32_t da_thr_cnt; }; typedef struct dispatch_apply_s *dispatch_apply_t; @@ -940,7 +978,7 @@ typedef struct dispatch_apply_s *dispatch_apply_t; #ifdef __BLOCKS__ -#define DISPATCH_BLOCK_API_MASK (0x80u - 1) +#define DISPATCH_BLOCK_API_MASK (0x100u - 1) #define DISPATCH_BLOCK_HAS_VOUCHER (1u << 31) #define DISPATCH_BLOCK_HAS_PRIORITY (1u << 30) @@ -987,15 +1025,13 @@ void _dispatch_block_sync_invoke(void *block); void _dispatch_continuation_init_slow(dispatch_continuation_t dc, dispatch_queue_class_t dqu, dispatch_block_flags_t flags); -void _dispatch_continuation_update_bits(dispatch_continuation_t dc, - uintptr_t dc_flags); -bool _dispatch_barrier_trysync_f(dispatch_queue_t dq, void *ctxt, +long _dispatch_barrier_trysync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func); /* exported for tests in dispatch_trysync.c */ DISPATCH_EXPORT DISPATCH_NOTHROW -bool _dispatch_trysync_f(dispatch_queue_t dq, void *ctxt, +long _dispatch_trysync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t f); #endif /* __BLOCKS__ */ diff --git a/src/semaphore.c b/src/semaphore.c index 4d232b7eb..3fe94c6e3 100644 --- a/src/semaphore.c +++ b/src/semaphore.c @@ -20,53 +20,6 @@ #include "internal.h" -// semaphores are too fundamental to use the dispatch_assume*() macros -#if USE_WIN32_SEM -// rdar://problem/8428132 -static DWORD best_resolution = 1; // 1ms - -DWORD -_push_timer_resolution(DWORD ms) -{ - MMRESULT res; - static dispatch_once_t once; - - if (ms > 16) { - // only update timer resolution if smaller than default 15.6ms - // zero means not updated - return 0; - } - - // aim for the best resolution we can accomplish - dispatch_once(&once, ^{ - TIMECAPS tc; - MMRESULT res; - res = timeGetDevCaps(&tc, sizeof(tc)); - if (res == MMSYSERR_NOERROR) { - best_resolution = min(max(tc.wPeriodMin, best_resolution), - tc.wPeriodMax); - } - }); - - res = timeBeginPeriod(best_resolution); - if (res == TIMERR_NOERROR) { - return best_resolution; - } - // zero means not updated - return 0; -} - -// match ms parameter to result from _push_timer_resolution -void -_pop_timer_resolution(DWORD ms) -{ - if (ms) { - timeEndPeriod(ms); - } -} -#endif /* USE_WIN32_SEM */ - - DISPATCH_WEAK // rdar://problem/8503746 long _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema); @@ -79,36 +32,9 @@ _dispatch_semaphore_class_init(long value, dispatch_semaphore_class_t dsemau) struct dispatch_semaphore_header_s *dsema = dsemau._dsema_hdr; dsema->do_next = DISPATCH_OBJECT_LISTLESS; - dsema->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, - false); + dsema->do_targetq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false); dsema->dsema_value = value; -#if USE_POSIX_SEM - int ret = sem_init(&dsema->dsema_sem, 0, 0); - DISPATCH_SEMAPHORE_VERIFY_RET(ret); -#endif -} - -static void -_dispatch_semaphore_class_dispose(dispatch_semaphore_class_t dsemau) -{ - struct dispatch_semaphore_header_s *dsema = dsemau._dsema_hdr; - -#if USE_MACH_SEM - kern_return_t kr; - if (dsema->dsema_port) { - kr = semaphore_destroy(mach_task_self(), dsema->dsema_port); - DISPATCH_VERIFY_MIG(kr); - DISPATCH_SEMAPHORE_VERIFY_KR(kr); - } - dsema->dsema_port = MACH_PORT_DEAD; -#elif USE_POSIX_SEM - int ret = sem_destroy(&dsema->dsema_sem); - DISPATCH_SEMAPHORE_VERIFY_RET(ret); -#elif USE_WIN32_SEM - if (dsema->dsema_handle) { - CloseHandle(dsema->dsema_handle); - } -#endif + _dispatch_sema4_init(&dsema->dsema_sema, _DSEMA4_POLICY_FIFO); } #pragma mark - @@ -126,68 +52,16 @@ dispatch_semaphore_create(long value) return DISPATCH_BAD_INPUT; } - dsema = (dispatch_semaphore_t)_dispatch_alloc(DISPATCH_VTABLE(semaphore), - sizeof(struct dispatch_semaphore_s)); + dsema = (dispatch_semaphore_t)_dispatch_object_alloc( + DISPATCH_VTABLE(semaphore), sizeof(struct dispatch_semaphore_s)); _dispatch_semaphore_class_init(value, dsema); dsema->dsema_orig = value; return dsema; } -#if USE_MACH_SEM -static void -_dispatch_semaphore_create_port(semaphore_t *s4) -{ - kern_return_t kr; - semaphore_t tmp; - - if (*s4) { - return; - } - _dispatch_fork_becomes_unsafe(); - - // lazily allocate the semaphore port - - // Someday: - // 1) Switch to a doubly-linked FIFO in user-space. - // 2) User-space timers for the timeout. - // 3) Use the per-thread semaphore port. - - while ((kr = semaphore_create(mach_task_self(), &tmp, - SYNC_POLICY_FIFO, 0))) { - DISPATCH_VERIFY_MIG(kr); - _dispatch_temporary_resource_shortage(); - } - - if (!os_atomic_cmpxchg(s4, 0, tmp, relaxed)) { - kr = semaphore_destroy(mach_task_self(), tmp); - DISPATCH_VERIFY_MIG(kr); - DISPATCH_SEMAPHORE_VERIFY_KR(kr); - } -} -#elif USE_WIN32_SEM -static void -_dispatch_semaphore_create_handle(HANDLE *s4) -{ - HANDLE tmp; - - if (*s4) { - return; - } - - // lazily allocate the semaphore port - - while (!dispatch_assume(tmp = CreateSemaphore(NULL, 0, LONG_MAX, NULL))) { - _dispatch_temporary_resource_shortage(); - } - - if (!os_atomic_cmpxchg(s4, 0, tmp)) { - CloseHandle(tmp); - } -} -#endif - void -_dispatch_semaphore_dispose(dispatch_object_t dou) +_dispatch_semaphore_dispose(dispatch_object_t dou, + DISPATCH_UNUSED bool *allow_free) { dispatch_semaphore_t dsema = dou._dsema; @@ -196,7 +70,7 @@ _dispatch_semaphore_dispose(dispatch_object_t dou) "Semaphore object deallocated while in use"); } - _dispatch_semaphore_class_dispose(dsema); + _dispatch_sema4_dispose(&dsema->dsema_sema, _DSEMA4_POLICY_FIFO); } size_t @@ -210,7 +84,7 @@ _dispatch_semaphore_debug(dispatch_object_t dou, char *buf, size_t bufsiz) offset += _dispatch_object_debug_attr(dsema, &buf[offset], bufsiz - offset); #if USE_MACH_SEM offset += dsnprintf(&buf[offset], bufsiz - offset, "port = 0x%u, ", - dsema->dsema_port); + dsema->dsema_sema); #endif offset += dsnprintf(&buf[offset], bufsiz - offset, "value = %ld, orig = %ld }", dsema->dsema_value, dsema->dsema_orig); @@ -221,18 +95,8 @@ DISPATCH_NOINLINE long _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema) { -#if USE_MACH_SEM - _dispatch_semaphore_create_port(&dsema->dsema_port); - kern_return_t kr = semaphore_signal(dsema->dsema_port); - DISPATCH_SEMAPHORE_VERIFY_KR(kr); -#elif USE_POSIX_SEM - int ret = sem_post(&dsema->dsema_sem); - DISPATCH_SEMAPHORE_VERIFY_RET(ret); -#elif USE_WIN32_SEM - _dispatch_semaphore_create_handle(&dsema->dsema_handle); - int ret = ReleaseSemaphore(dsema->dsema_handle, 1, NULL); - dispatch_assume(ret); -#endif + _dispatch_sema4_create(&dsema->dsema_sema, _DSEMA4_POLICY_FIFO); + _dispatch_sema4_signal(&dsema->dsema_sema, 1); return 1; } @@ -257,61 +121,12 @@ _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, { long orig; -#if USE_MACH_SEM - mach_timespec_t _timeout; - kern_return_t kr; -#elif USE_POSIX_SEM - struct timespec _timeout; - int ret; -#elif USE_WIN32_SEM - uint64_t nsec; - DWORD msec; - DWORD resolution; - DWORD wait_result; -#endif - -#if USE_MACH_SEM - _dispatch_semaphore_create_port(&dsema->dsema_port); -#elif USE_WIN32_SEM - _dispatch_semaphore_create_handle(&dsema->dsema_handle); -#endif - + _dispatch_sema4_create(&dsema->dsema_sema, _DSEMA4_POLICY_FIFO); switch (timeout) { default: -#if USE_MACH_SEM - do { - uint64_t nsec = _dispatch_timeout(timeout); - _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); - _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); - kr = slowpath(semaphore_timedwait(dsema->dsema_port, _timeout)); - } while (kr == KERN_ABORTED); - - if (kr != KERN_OPERATION_TIMED_OUT) { - DISPATCH_SEMAPHORE_VERIFY_KR(kr); - break; - } -#elif USE_POSIX_SEM - do { - uint64_t nsec = _dispatch_time_nanoseconds_since_epoch(timeout); - _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); - _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); - ret = slowpath(sem_timedwait(&dsema->dsema_sem, &_timeout)); - } while (ret == -1 && errno == EINTR); - - if (!(ret == -1 && errno == ETIMEDOUT)) { - DISPATCH_SEMAPHORE_VERIFY_RET(ret); - break; - } -#elif USE_WIN32_SEM - nsec = _dispatch_timeout(timeout); - msec = (DWORD)(nsec / (uint64_t)1000000); - resolution = _push_timer_resolution(msec); - wait_result = WaitForSingleObject(dsema->dsema_handle, msec); - _pop_timer_resolution(resolution); - if (wait_result != WAIT_TIMEOUT) { + if (!_dispatch_sema4_timedwait(&dsema->dsema_sema, timeout)) { break; } -#endif // Fall through and try to undo what the fast path did to // dsema->dsema_value case DISPATCH_TIME_NOW: @@ -319,30 +134,13 @@ _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, while (orig < 0) { if (os_atomic_cmpxchgvw2o(dsema, dsema_value, orig, orig + 1, &orig, relaxed)) { -#if USE_MACH_SEM - return KERN_OPERATION_TIMED_OUT; -#elif USE_POSIX_SEM || USE_WIN32_SEM - errno = ETIMEDOUT; - return -1; -#endif + return _DSEMA4_TIMEOUT(); } } // Another thread called semaphore_signal(). // Fall through and drain the wakeup. case DISPATCH_TIME_FOREVER: -#if USE_MACH_SEM - do { - kr = semaphore_wait(dsema->dsema_port); - } while (kr == KERN_ABORTED); - DISPATCH_SEMAPHORE_VERIFY_KR(kr); -#elif USE_POSIX_SEM - do { - ret = sem_wait(&dsema->dsema_sem); - } while (ret != 0); - DISPATCH_SEMAPHORE_VERIFY_RET(ret); -#elif USE_WIN32_SEM - WaitForSingleObject(dsema->dsema_handle, INFINITE); -#endif + _dispatch_sema4_wait(&dsema->dsema_sema); break; } return 0; @@ -365,7 +163,7 @@ DISPATCH_ALWAYS_INLINE static inline dispatch_group_t _dispatch_group_create_with_count(long count) { - dispatch_group_t dg = (dispatch_group_t)_dispatch_alloc( + dispatch_group_t dg = (dispatch_group_t)_dispatch_object_alloc( DISPATCH_VTABLE(group), sizeof(struct dispatch_group_s)); _dispatch_semaphore_class_init(count, dg); if (count) { @@ -416,26 +214,10 @@ _dispatch_group_wake(dispatch_group_t dg, bool needs_release) rval = (long)os_atomic_xchg2o(dg, dg_waiters, 0, relaxed); if (rval) { // wake group waiters -#if USE_MACH_SEM - _dispatch_semaphore_create_port(&dg->dg_port); - do { - kern_return_t kr = semaphore_signal(dg->dg_port); - DISPATCH_GROUP_VERIFY_KR(kr); - } while (--rval); -#elif USE_POSIX_SEM - do { - int ret = sem_post(&dg->dg_sem); - DISPATCH_SEMAPHORE_VERIFY_RET(ret); - } while (--rval); -#elif USE_WIN32_SEM - _dispatch_semaphore_create_handle(&dg->dg_handle); - int ret; - ret = ReleaseSemaphore(dg->dg_handle, rval, NULL); - dispatch_assume(ret); -#else -#error "No supported semaphore type" -#endif + _dispatch_sema4_create(&dg->dg_sema, _DSEMA4_POLICY_FIFO); + _dispatch_sema4_signal(&dg->dg_sema, rval); } + uint16_t refs = needs_release ? 1 : 0; // if (head) { // async group notify blocks do { @@ -444,11 +226,9 @@ _dispatch_group_wake(dispatch_group_t dg, bool needs_release) _dispatch_continuation_async(dsn_queue, head); _dispatch_release(dsn_queue); } while ((head = next)); - _dispatch_release(dg); - } - if (needs_release) { - _dispatch_release(dg); // + refs++; } + if (refs) _dispatch_release_n(dg, refs); return 0; } @@ -466,7 +246,7 @@ dispatch_group_leave(dispatch_group_t dg) } void -_dispatch_group_dispose(dispatch_object_t dou) +_dispatch_group_dispose(dispatch_object_t dou, DISPATCH_UNUSED bool *allow_free) { dispatch_group_t dg = dou._dg; @@ -475,7 +255,7 @@ _dispatch_group_dispose(dispatch_object_t dou) "Group object deallocated while in use"); } - _dispatch_semaphore_class_dispose(dg); + _dispatch_sema4_dispose(&dg->dg_sema, _DSEMA4_POLICY_FIFO); } size_t @@ -489,7 +269,7 @@ _dispatch_group_debug(dispatch_object_t dou, char *buf, size_t bufsiz) offset += _dispatch_object_debug_attr(dg, &buf[offset], bufsiz - offset); #if USE_MACH_SEM offset += dsnprintf(&buf[offset], bufsiz - offset, "port = 0x%u, ", - dg->dg_port); + dg->dg_sema); #endif offset += dsnprintf(&buf[offset], bufsiz - offset, "count = %ld, waiters = %d }", dg->dg_value, dg->dg_waiters); @@ -503,19 +283,6 @@ _dispatch_group_wait_slow(dispatch_group_t dg, dispatch_time_t timeout) long value; int orig_waiters; -#if USE_MACH_SEM - mach_timespec_t _timeout; - kern_return_t kr; -#elif USE_POSIX_SEM // KVV - struct timespec _timeout; - int ret; -#elif USE_WIN32_SEM // KVV - uint64_t nsec; - DWORD msec; - DWORD resolution; - DWORD wait_result; -#endif - // check before we cause another signal to be sent by incrementing // dg->dg_waiters value = os_atomic_load2o(dg, dg_value, ordered); // 19296565 @@ -533,48 +300,12 @@ _dispatch_group_wait_slow(dispatch_group_t dg, dispatch_time_t timeout) timeout = DISPATCH_TIME_FOREVER; } -#if USE_MACH_SEM - _dispatch_semaphore_create_port(&dg->dg_port); -#elif USE_WIN32_SEM - _dispatch_semaphore_create_handle(&dg->dg_handle); -#endif - + _dispatch_sema4_create(&dg->dg_sema, _DSEMA4_POLICY_FIFO); switch (timeout) { default: -#if USE_MACH_SEM - do { - uint64_t nsec = _dispatch_timeout(timeout); - _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); - _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); - kr = slowpath(semaphore_timedwait(dg->dg_port, _timeout)); - } while (kr == KERN_ABORTED); - - if (kr != KERN_OPERATION_TIMED_OUT) { - DISPATCH_GROUP_VERIFY_KR(kr); - break; - } -#elif USE_POSIX_SEM - do { - uint64_t nsec = _dispatch_time_nanoseconds_since_epoch(timeout); - _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); - _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); - ret = slowpath(sem_timedwait(&dg->dg_sem, &_timeout)); - } while (ret == -1 && errno == EINTR); - - if (!(ret == -1 && errno == ETIMEDOUT)) { - DISPATCH_SEMAPHORE_VERIFY_RET(ret); + if (!_dispatch_sema4_timedwait(&dg->dg_sema, timeout)) { break; } -#elif USE_WIN32_SEM - nsec = _dispatch_timeout(timeout); - msec = (DWORD)(nsec / (uint64_t)1000000); - resolution = _push_timer_resolution(msec); - wait_result = WaitForSingleObject(dg->dg_handle, msec); - _pop_timer_resolution(resolution); - if (wait_result != WAIT_TIMEOUT) { - break; - } -#endif // Fall through and try to undo the earlier change to // dg->dg_waiters case DISPATCH_TIME_NOW: @@ -582,30 +313,13 @@ _dispatch_group_wait_slow(dispatch_group_t dg, dispatch_time_t timeout) while (orig_waiters) { if (os_atomic_cmpxchgvw2o(dg, dg_waiters, orig_waiters, orig_waiters - 1, &orig_waiters, relaxed)) { -#if USE_MACH_SEM - return KERN_OPERATION_TIMED_OUT; -#elif USE_POSIX_SEM || USE_WIN32_SEM - errno = ETIMEDOUT; - return -1; -#endif + return _DSEMA4_TIMEOUT(); } } - // Another thread called semaphore_signal(). + // Another thread is running _dispatch_group_wake() // Fall through and drain the wakeup. case DISPATCH_TIME_FOREVER: -#if USE_MACH_SEM - do { - kr = semaphore_wait(dg->dg_port); - } while (kr == KERN_ABORTED); - DISPATCH_GROUP_VERIFY_KR(kr); -#elif USE_POSIX_SEM - do { - ret = sem_wait(&dg->dg_sem); - } while (ret == -1 && errno == EINTR); - DISPATCH_SEMAPHORE_VERIFY_RET(ret); -#elif USE_WIN32_SEM - WaitForSingleObject(dg->dg_handle, INFINITE); -#endif + _dispatch_sema4_wait(&dg->dg_sema); break; } return 0; @@ -618,12 +332,7 @@ dispatch_group_wait(dispatch_group_t dg, dispatch_time_t timeout) return 0; } if (timeout == 0) { -#if USE_MACH_SEM - return KERN_OPERATION_TIMED_OUT; -#elif USE_POSIX_SEM || USE_WIN32_SEM - errno = ETIMEDOUT; - return (-1); -#endif + return _DSEMA4_TIMEOUT(); } return _dispatch_group_wait_slow(dg, timeout); } diff --git a/src/semaphore_internal.h b/src/semaphore_internal.h index dceda6d97..f9d0983aa 100644 --- a/src/semaphore_internal.h +++ b/src/semaphore_internal.h @@ -29,20 +29,10 @@ struct dispatch_queue_s; -#if USE_MACH_SEM -#define DISPATCH_OS_SEMA_FIELD(base) semaphore_t base##_port -#elif USE_POSIX_SEM -#define DISPATCH_OS_SEMA_FIELD(base) sem_t base##_sem -#elif USE_WIN32_SEM -#define DISPATCH_OS_SEMA_FIELD(base) HANDLE base##_handle -#else -#error "No supported semaphore type" -#endif - #define DISPATCH_SEMAPHORE_HEADER(cls, ns) \ DISPATCH_OBJECT_HEADER(cls); \ long volatile ns##_value; \ - DISPATCH_OS_SEMA_FIELD(ns) + _dispatch_sema4_t ns##_sema struct dispatch_semaphore_header_s { DISPATCH_SEMAPHORE_HEADER(semaphore, dsema); @@ -70,14 +60,14 @@ typedef union { dispatch_semaphore_t _objc_dsema; dispatch_group_t _objc_dg; #endif -} dispatch_semaphore_class_t __attribute__((__transparent_union__)); +} dispatch_semaphore_class_t DISPATCH_TRANSPARENT_UNION; dispatch_group_t _dispatch_group_create_and_enter(void); -void _dispatch_group_dispose(dispatch_object_t dou); +void _dispatch_group_dispose(dispatch_object_t dou, bool *allow_free); size_t _dispatch_group_debug(dispatch_object_t dou, char *buf, size_t bufsiz); -void _dispatch_semaphore_dispose(dispatch_object_t dou); +void _dispatch_semaphore_dispose(dispatch_object_t dou, bool *allow_free); size_t _dispatch_semaphore_debug(dispatch_object_t dou, char *buf, size_t bufsiz); diff --git a/src/shims.h b/src/shims.h index db288225e..28e1c53a9 100644 --- a/src/shims.h +++ b/src/shims.h @@ -28,89 +28,17 @@ #define __DISPATCH_OS_SHIMS__ #include -#if HAVE_PTHREAD_QOS_H && __has_include() -#include -#if __has_include() -#include -#define _DISPATCH_QOS_CLASS_USER_INTERACTIVE QOS_CLASS_USER_INTERACTIVE -#define _DISPATCH_QOS_CLASS_USER_INITIATED QOS_CLASS_USER_INITIATED -#define _DISPATCH_QOS_CLASS_DEFAULT QOS_CLASS_DEFAULT -#define _DISPATCH_QOS_CLASS_UTILITY QOS_CLASS_UTILITY -#define _DISPATCH_QOS_CLASS_BACKGROUND QOS_CLASS_BACKGROUND -#define _DISPATCH_QOS_CLASS_UNSPECIFIED QOS_CLASS_UNSPECIFIED -#else // pthread/qos_private.h -typedef unsigned long pthread_priority_t; -#endif // pthread/qos_private.h -#if __has_include() -#include -#define _DISPATCH_QOS_CLASS_MAINTENANCE QOS_CLASS_MAINTENANCE -#else // sys/qos_private.h -#define _DISPATCH_QOS_CLASS_MAINTENANCE 0x05 -#endif // sys/qos_private.h -#ifndef _PTHREAD_PRIORITY_OVERCOMMIT_FLAG -#define _PTHREAD_PRIORITY_OVERCOMMIT_FLAG 0x80000000 -#endif -#ifndef _PTHREAD_PRIORITY_INHERIT_FLAG -#define _PTHREAD_PRIORITY_INHERIT_FLAG 0x40000000 -#endif -#ifndef _PTHREAD_PRIORITY_ROOTQUEUE_FLAG -#define _PTHREAD_PRIORITY_ROOTQUEUE_FLAG 0x20000000 -#endif -#ifndef _PTHREAD_PRIORITY_SCHED_PRI_FLAG -#define _PTHREAD_PRIORITY_SCHED_PRI_FLAG 0x20000000 -#endif -#ifndef _PTHREAD_PRIORITY_ENFORCE_FLAG -#define _PTHREAD_PRIORITY_ENFORCE_FLAG 0x10000000 -#endif -#ifndef _PTHREAD_PRIORITY_OVERRIDE_FLAG -#define _PTHREAD_PRIORITY_OVERRIDE_FLAG 0x08000000 -#endif -#ifndef _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG -#define _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG 0x04000000 -#endif -#ifndef _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG -#define _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG 0x02000000 -#endif -#ifndef _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG -#define _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG 0x01000000 -#endif - -#else // HAVE_PTHREAD_QOS_H -typedef unsigned int qos_class_t; -typedef unsigned long pthread_priority_t; -#define QOS_MIN_RELATIVE_PRIORITY (-15) -#define _PTHREAD_PRIORITY_FLAGS_MASK (~0xffffff) -#define _PTHREAD_PRIORITY_QOS_CLASS_MASK 0x00ffff00 -#define _PTHREAD_PRIORITY_QOS_CLASS_SHIFT (8ull) -#define _PTHREAD_PRIORITY_PRIORITY_MASK 0x000000ff -#define _PTHREAD_PRIORITY_OVERCOMMIT_FLAG 0x80000000 -#define _PTHREAD_PRIORITY_INHERIT_FLAG 0x40000000 -#define _PTHREAD_PRIORITY_ROOTQUEUE_FLAG 0x20000000 -#define _PTHREAD_PRIORITY_ENFORCE_FLAG 0x10000000 -#define _PTHREAD_PRIORITY_OVERRIDE_FLAG 0x08000000 -#define _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG 0x04000000 -#define _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG 0x02000000 -#define _PTHREAD_PRIORITY_SCHED_PRI_FLAG 0x20000000 -#endif // HAVE_PTHREAD_QOS_H - #ifdef __linux__ #include "shims/linux_stubs.h" #endif -typedef uint32_t dispatch_priority_t; -#define DISPATCH_SATURATED_OVERRIDE ((dispatch_priority_t)UINT32_MAX) - -#ifndef _DISPATCH_QOS_CLASS_USER_INTERACTIVE -enum { - _DISPATCH_QOS_CLASS_USER_INTERACTIVE = 0x21, - _DISPATCH_QOS_CLASS_USER_INITIATED = 0x19, - _DISPATCH_QOS_CLASS_DEFAULT = 0x15, - _DISPATCH_QOS_CLASS_UTILITY = 0x11, - _DISPATCH_QOS_CLASS_BACKGROUND = 0x09, - _DISPATCH_QOS_CLASS_MAINTENANCE = 0x05, - _DISPATCH_QOS_CLASS_UNSPECIFIED = 0x00, -}; -#endif // _DISPATCH_QOS_CLASS_USER_INTERACTIVE +#ifdef __ANDROID__ +#include "shims/android_stubs.h" +#endif + +#include "shims/hw_config.h" +#include "shims/priority.h" + #if HAVE_PTHREAD_WORKQUEUES #if __has_include() #include @@ -122,6 +50,10 @@ enum { #endif #endif // HAVE_PTHREAD_WORKQUEUES +#if DISPATCH_USE_INTERNAL_WORKQUEUE +#include "event/workqueue_internal.h" +#endif + #if HAVE_PTHREAD_NP_H #include #endif @@ -207,6 +139,60 @@ _pthread_qos_override_end_direct(mach_port_t thread, void *resource) #define _PTHREAD_SET_SELF_WQ_KEVENT_UNBIND 0 #endif +#if PTHREAD_WORKQUEUE_SPI_VERSION < 20160427 +static inline bool +_pthread_workqueue_should_narrow(pthread_priority_t priority) +{ + (void)priority; + return false; +} +#endif + +#if HAVE_PTHREAD_QOS_H && __has_include() && \ + defined(PTHREAD_MAX_PARALLELISM_PHYSICAL) && \ + DISPATCH_HAVE_HW_CONFIG_COMMPAGE && \ + DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(109900) +#define DISPATCH_USE_PTHREAD_QOS_MAX_PARALLELISM 1 +#define DISPATCH_MAX_PARALLELISM_PHYSICAL PTHREAD_MAX_PARALLELISM_PHYSICAL +#else +#define DISPATCH_MAX_PARALLELISM_PHYSICAL 0x1 +#endif +#define DISPATCH_MAX_PARALLELISM_ACTIVE 0x2 +_Static_assert(!(DISPATCH_MAX_PARALLELISM_PHYSICAL & + DISPATCH_MAX_PARALLELISM_ACTIVE), "Overlapping parallelism flags"); + +DISPATCH_ALWAYS_INLINE +static inline uint32_t +_dispatch_qos_max_parallelism(dispatch_qos_t qos, unsigned long flags) +{ + uint32_t p; + int r = 0; + + if (qos) { +#if DISPATCH_USE_PTHREAD_QOS_MAX_PARALLELISM + r = pthread_qos_max_parallelism(_dispatch_qos_to_qos_class(qos), + flags & PTHREAD_MAX_PARALLELISM_PHYSICAL); +#endif + } + if (likely(r > 0)) { + p = (uint32_t)r; + } else { + p = (flags & DISPATCH_MAX_PARALLELISM_PHYSICAL) ? + dispatch_hw_config(physical_cpus) : + dispatch_hw_config(logical_cpus); + } + if (flags & DISPATCH_MAX_PARALLELISM_ACTIVE) { + uint32_t active_cpus = dispatch_hw_config(active_cpus); + if ((flags & DISPATCH_MAX_PARALLELISM_PHYSICAL) && + active_cpus < dispatch_hw_config(logical_cpus)) { + active_cpus /= dispatch_hw_config(logical_cpus) / + dispatch_hw_config(physical_cpus); + } + if (active_cpus < p) p = active_cpus; + } + return p; +} + #if !HAVE_NORETURN_BUILTIN_TRAP /* * XXXRW: Work-around for possible clang bug in which __builtin_trap() is not @@ -227,12 +213,13 @@ void __builtin_trap(void); #ifndef __OS_INTERNAL_ATOMIC__ #include "shims/atomic.h" #endif +#define DISPATCH_ATOMIC64_ALIGN __attribute__((aligned(8))) + #include "shims/atomic_sfb.h" #include "shims/tsd.h" #include "shims/yield.h" #include "shims/lock.h" -#include "shims/hw_config.h" #include "shims/perfmon.h" #include "shims/getprogname.h" @@ -286,7 +273,8 @@ _dispatch_mempcpy(void *ptr, const void *data, size_t len) #define _dispatch_clear_stack(s) do { \ void *a[(s)/sizeof(void*) ? (s)/sizeof(void*) : 1]; \ a[0] = pthread_get_stackaddr_np(pthread_self()); \ - bzero((void*)&a[1], (size_t)(a[0] - (void*)&a[1])); \ + void* volatile const p = (void*)&a[1]; /* */ \ + bzero((void*)p, (size_t)(a[0] - (void*)&a[1])); \ } while (0) #else #define _dispatch_clear_stack(s) diff --git a/src/shims/android_stubs.h b/src/shims/android_stubs.h new file mode 100644 index 000000000..c8032a390 --- /dev/null +++ b/src/shims/android_stubs.h @@ -0,0 +1,23 @@ +/* + * This source file is part of the Swift.org open source project + * + * Copyright (c) 2015 Apple Inc. and the Swift project authors + * + * Licensed under Apache License v2.0 with Runtime Library Exception + * + * See http://swift.org/LICENSE.txt for license information + * See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors + * + */ + +// forward declarations for functions we are stubbing out +// in the intial android port. + +#ifndef __DISPATCH__ANDROID__STUBS__INTERNAL +#define __DISPATCH__ANDROID__STUBS__INTERNAL + +#if !__has_feature(c_static_assert) +#define _Static_assert(...) +#endif + +#endif /* __DISPATCH__ANDROID__STUBS__INTERNAL */ diff --git a/src/shims/atomic.h b/src/shims/atomic.h index 519947790..64af8b272 100644 --- a/src/shims/atomic.h +++ b/src/shims/atomic.h @@ -27,114 +27,50 @@ #ifndef __DISPATCH_SHIMS_ATOMIC__ #define __DISPATCH_SHIMS_ATOMIC__ -#if !__has_extension(c_atomic) || \ - !__has_extension(c_generic_selections) || \ - !__has_include() -#error libdispatch requires C11 with and generic selections +#if !__has_extension(c_atomic) || !__has_include() +#error libdispatch requires C11 with #endif #include -#define memory_order_ordered memory_order_seq_cst +#define memory_order_ordered memory_order_seq_cst +#define memory_order_dependency memory_order_acquire -#define _os_atomic_basetypeof(p) \ - typeof(*_Generic((p), \ - char*: (char*)(p), \ - volatile char*: (char*)(p), \ - signed char*: (signed char*)(p), \ - volatile signed char*: (signed char*)(p), \ - unsigned char*: (unsigned char*)(p), \ - volatile unsigned char*: (unsigned char*)(p), \ - short*: (short*)(p), \ - volatile short*: (short*)(p), \ - unsigned short*: (unsigned short*)(p), \ - volatile unsigned short*: (unsigned short*)(p), \ - int*: (int*)(p), \ - volatile int*: (int*)(p), \ - unsigned int*: (unsigned int*)(p), \ - volatile unsigned int*: (unsigned int*)(p), \ - long*: (long*)(p), \ - volatile long*: (long*)(p), \ - unsigned long*: (unsigned long*)(p), \ - volatile unsigned long*: (unsigned long*)(p), \ - long long*: (long long*)(p), \ - volatile long long*: (long long*)(p), \ - unsigned long long*: (unsigned long long*)(p), \ - volatile unsigned long long*: (unsigned long long*)(p), \ - const void**: (const void**)(p), \ - const void*volatile*: (const void**)(p), \ - default: (void**)(p))) +#define os_atomic(type) type _Atomic #define _os_atomic_c11_atomic(p) \ - _Generic((p), \ - char*: (_Atomic(char)*)(p), \ - volatile char*: (volatile _Atomic(char)*)(p), \ - signed char*: (_Atomic(signed char)*)(p), \ - volatile signed char*: (volatile _Atomic(signed char)*)(p), \ - unsigned char*: (_Atomic(unsigned char)*)(p), \ - volatile unsigned char*: (volatile _Atomic(unsigned char)*)(p), \ - short*: (_Atomic(short)*)(p), \ - volatile short*: (volatile _Atomic(short)*)(p), \ - unsigned short*: (_Atomic(unsigned short)*)(p), \ - volatile unsigned short*: (volatile _Atomic(unsigned short)*)(p), \ - int*: (_Atomic(int)*)(p), \ - volatile int*: (volatile _Atomic(int)*)(p), \ - unsigned int*: (_Atomic(unsigned int)*)(p), \ - volatile unsigned int*: (volatile _Atomic(unsigned int)*)(p), \ - long*: (_Atomic(long)*)(p), \ - volatile long*: (volatile _Atomic(long)*)(p), \ - unsigned long*: (_Atomic(unsigned long)*)(p), \ - volatile unsigned long*: (volatile _Atomic(unsigned long)*)(p), \ - long long*: (_Atomic(long long)*)(p), \ - volatile long long*: (volatile _Atomic(long long)*)(p), \ - unsigned long long*: (_Atomic(unsigned long long)*)(p), \ - volatile unsigned long long*: \ - (volatile _Atomic(unsigned long long)*)(p), \ - const void**: (_Atomic(const void*)*)(p), \ - const void*volatile*: (volatile _Atomic(const void*)*)(p), \ - default: (volatile _Atomic(void*)*)(p)) + ((typeof(*(p)) _Atomic *)(p)) -#define os_atomic_thread_fence(m) atomic_thread_fence(memory_order_##m) -// see comment in dispatch_once.c -#define os_atomic_maximally_synchronizing_barrier() \ - atomic_thread_fence(memory_order_seq_cst) +// This removes the _Atomic and volatile qualifiers on the type of *p +#define _os_atomic_basetypeof(p) \ + typeof(atomic_load_explicit(_os_atomic_c11_atomic(p), memory_order_relaxed)) #define os_atomic_load(p, m) \ - ({ _os_atomic_basetypeof(p) _r = \ - atomic_load_explicit(_os_atomic_c11_atomic(p), \ - memory_order_##m); (typeof(*(p)))_r; }) + atomic_load_explicit(_os_atomic_c11_atomic(p), memory_order_##m) #define os_atomic_store(p, v, m) \ - ({ _os_atomic_basetypeof(p) _v = (v); \ - atomic_store_explicit(_os_atomic_c11_atomic(p), _v, \ - memory_order_##m); }) + atomic_store_explicit(_os_atomic_c11_atomic(p), v, memory_order_##m) #define os_atomic_xchg(p, v, m) \ - ({ _os_atomic_basetypeof(p) _v = (v), _r = \ - atomic_exchange_explicit(_os_atomic_c11_atomic(p), _v, \ - memory_order_##m); (typeof(*(p)))_r; }) + atomic_exchange_explicit(_os_atomic_c11_atomic(p), v, memory_order_##m) #define os_atomic_cmpxchg(p, e, v, m) \ - ({ _os_atomic_basetypeof(p) _v = (v), _r = (e); \ + ({ _os_atomic_basetypeof(p) _r = (e); \ atomic_compare_exchange_strong_explicit(_os_atomic_c11_atomic(p), \ - &_r, _v, memory_order_##m, \ - memory_order_relaxed); }) + &_r, v, memory_order_##m, memory_order_relaxed); }) #define os_atomic_cmpxchgv(p, e, v, g, m) \ - ({ _os_atomic_basetypeof(p) _v = (v), _r = (e); _Bool _b = \ + ({ _os_atomic_basetypeof(p) _r = (e); _Bool _b = \ atomic_compare_exchange_strong_explicit(_os_atomic_c11_atomic(p), \ - &_r, _v, memory_order_##m, \ - memory_order_relaxed); *(g) = (typeof(*(p)))_r; _b; }) + &_r, v, memory_order_##m, memory_order_relaxed); *(g) = _r; _b; }) #define os_atomic_cmpxchgvw(p, e, v, g, m) \ - ({ _os_atomic_basetypeof(p) _v = (v), _r = (e); _Bool _b = \ + ({ _os_atomic_basetypeof(p) _r = (e); _Bool _b = \ atomic_compare_exchange_weak_explicit(_os_atomic_c11_atomic(p), \ - &_r, _v, memory_order_##m, \ - memory_order_relaxed); *(g) = (typeof(*(p)))_r; _b; }) + &_r, v, memory_order_##m, memory_order_relaxed); *(g) = _r; _b; }) #define _os_atomic_c11_op(p, v, m, o, op) \ ({ _os_atomic_basetypeof(p) _v = (v), _r = \ atomic_fetch_##o##_explicit(_os_atomic_c11_atomic(p), _v, \ memory_order_##m); (typeof(*(p)))(_r op _v); }) #define _os_atomic_c11_op_orig(p, v, m, o, op) \ - ({ _os_atomic_basetypeof(p) _v = (v), _r = \ - atomic_fetch_##o##_explicit(_os_atomic_c11_atomic(p), _v, \ - memory_order_##m); (typeof(*(p)))_r; }) + atomic_fetch_##o##_explicit(_os_atomic_c11_atomic(p), v, \ + memory_order_##m) #define os_atomic_add(p, v, m) \ _os_atomic_c11_op((p), (v), m, add, +) #define os_atomic_add_orig(p, v, m) \ @@ -156,22 +92,13 @@ #define os_atomic_xor_orig(p, v, m) \ _os_atomic_c11_op_orig((p), (v), m, xor, ^) -#define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \ - bool _result = false; \ - typeof(p) _p = (p); \ - ov = os_atomic_load(_p, relaxed); \ - do { \ - __VA_ARGS__; \ - _result = os_atomic_cmpxchgvw(_p, ov, nv, &ov, m); \ - } while (os_unlikely(!_result)); \ - _result; \ - }) -#define os_atomic_rmw_loop2o(p, f, ov, nv, m, ...) \ - os_atomic_rmw_loop(&(p)->f, ov, nv, m, __VA_ARGS__) -#define os_atomic_rmw_loop_give_up_with_fence(m, expr) \ - ({ os_atomic_thread_fence(m); expr; __builtin_unreachable(); }) -#define os_atomic_rmw_loop_give_up(expr) \ - os_atomic_rmw_loop_give_up_with_fence(relaxed, expr) +#define os_atomic_force_dependency_on(p, e) (p) +#define os_atomic_load_with_dependency_on(p, e) \ + os_atomic_load(os_atomic_force_dependency_on(p, e), relaxed) +#define os_atomic_load_with_dependency_on2o(p, f, e) \ + os_atomic_load_with_dependency_on(&(p)->f, e) + +#define os_atomic_thread_fence(m) atomic_thread_fence(memory_order_##m) #define os_atomic_load2o(p, f, m) \ os_atomic_load(&(p)->f, m) @@ -223,28 +150,21 @@ #define os_atomic_dec_orig2o(p, f, m) \ os_atomic_sub_orig2o(p, f, 1, m) -#if defined(__x86_64__) || defined(__i386__) -#undef os_atomic_maximally_synchronizing_barrier -#ifdef __LP64__ -#define os_atomic_maximally_synchronizing_barrier() \ - ({ unsigned long _clbr; __asm__ __volatile__( \ - "cpuid" \ - : "=a" (_clbr) : "0" (0) : "rbx", "rcx", "rdx", "cc", "memory"); }) -#else -#ifdef __llvm__ -#define os_atomic_maximally_synchronizing_barrier() \ - ({ unsigned long _clbr; __asm__ __volatile__( \ - "cpuid" \ - : "=a" (_clbr) : "0" (0) : "ebx", "ecx", "edx", "cc", "memory"); }) -#else // gcc does not allow inline i386 asm to clobber ebx -#define os_atomic_maximally_synchronizing_barrier() \ - ({ unsigned long _clbr; __asm__ __volatile__( \ - "pushl %%ebx\n\t" \ - "cpuid\n\t" \ - "popl %%ebx" \ - : "=a" (_clbr) : "0" (0) : "ecx", "edx", "cc", "memory"); }) -#endif -#endif -#endif // defined(__x86_64__) || defined(__i386__) +#define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \ + bool _result = false; \ + typeof(p) _p = (p); \ + ov = os_atomic_load(_p, relaxed); \ + do { \ + __VA_ARGS__; \ + _result = os_atomic_cmpxchgvw(_p, ov, nv, &ov, m); \ + } while (os_unlikely(!_result)); \ + _result; \ + }) +#define os_atomic_rmw_loop2o(p, f, ov, nv, m, ...) \ + os_atomic_rmw_loop(&(p)->f, ov, nv, m, __VA_ARGS__) +#define os_atomic_rmw_loop_give_up_with_fence(m, expr) \ + ({ os_atomic_thread_fence(m); expr; __builtin_unreachable(); }) +#define os_atomic_rmw_loop_give_up(expr) \ + os_atomic_rmw_loop_give_up_with_fence(relaxed, expr) #endif // __DISPATCH_SHIMS_ATOMIC__ diff --git a/src/shims/atomic_sfb.h b/src/shims/atomic_sfb.h index 5f972b4fe..de074a444 100644 --- a/src/shims/atomic_sfb.h +++ b/src/shims/atomic_sfb.h @@ -27,43 +27,9 @@ #ifndef __DISPATCH_SHIMS_ATOMIC_SFB__ #define __DISPATCH_SHIMS_ATOMIC_SFB__ -#if __clang__ && __clang_major__ < 5 // -#define __builtin_ffs(x) __builtin_ffs((unsigned int)(x)) -#endif - -// Returns UINT_MAX if all the bits in p were already set. -#define os_atomic_set_first_bit(p,m) _os_atomic_set_first_bit(p,m) - -DISPATCH_ALWAYS_INLINE -static inline unsigned int -_os_atomic_set_first_bit(volatile unsigned long *p, - unsigned int max_index) -{ - unsigned int index; - unsigned long b, mask, b_masked; - - for (;;) { - b = *p; - // ffs returns 1 + index, or 0 if none set. - index = (unsigned int)__builtin_ffsl((long)~b); - if (slowpath(index == 0)) { - return UINT_MAX; - } - index--; - if (slowpath(index > max_index)) { - return UINT_MAX; - } - mask = ((typeof(b))1) << index; - b_masked = b | mask; - if (__sync_bool_compare_and_swap(p, b, b_masked)) { - return index; - } - } -} - #if defined(__x86_64__) || defined(__i386__) -#undef os_atomic_set_first_bit +// Returns UINT_MAX if all the bits in p were already set. DISPATCH_ALWAYS_INLINE static inline unsigned int os_atomic_set_first_bit(volatile unsigned long *p, unsigned int max) @@ -108,7 +74,35 @@ os_atomic_set_first_bit(volatile unsigned long *p, unsigned int max) return (unsigned int)bit; } +#else + +#if __clang__ && __clang_major__ < 5 // +#define __builtin_ffs(x) __builtin_ffs((unsigned int)(x)) #endif +DISPATCH_ALWAYS_INLINE +static inline unsigned int +os_atomic_set_first_bit(volatile unsigned long *p, unsigned int max_index) +{ + unsigned int index; + unsigned long b, b_masked; + + os_atomic_rmw_loop(p, b, b_masked, relaxed, { + // ffs returns 1 + index, or 0 if none set + index = (unsigned int)__builtin_ffsl((long)~b); + if (slowpath(index == 0)) { + os_atomic_rmw_loop_give_up(return UINT_MAX); + } + index--; + if (slowpath(index > max_index)) { + os_atomic_rmw_loop_give_up(return UINT_MAX); + } + b_masked = b | (1UL << index); + }); + + return index; +} + +#endif #endif // __DISPATCH_SHIMS_ATOMIC_SFB__ diff --git a/src/shims/getprogname.h b/src/shims/getprogname.h index 74aba1318..7eb19787e 100644 --- a/src/shims/getprogname.h +++ b/src/shims/getprogname.h @@ -23,11 +23,18 @@ #define __DISPATCH_SHIMS_GETPROGNAME__ #if !HAVE_GETPROGNAME + +#ifdef __ANDROID__ +extern const char *__progname; +#endif /* __ANDROID */ + static inline char * getprogname(void) { # if HAVE_DECL_PROGRAM_INVOCATION_SHORT_NAME return program_invocation_short_name; +# elif defined(__ANDROID__) + return __progname; # else # error getprogname(3) is not available on this platform # endif diff --git a/src/shims/hw_config.h b/src/shims/hw_config.h index cad211d21..485dad663 100644 --- a/src/shims/hw_config.h +++ b/src/shims/hw_config.h @@ -27,6 +27,22 @@ #ifndef __DISPATCH_SHIMS_HW_CONFIG__ #define __DISPATCH_SHIMS_HW_CONFIG__ +#ifdef __SIZEOF_POINTER__ +#define DISPATCH_SIZEOF_PTR __SIZEOF_POINTER__ +#elif defined(_WIN64) +#define DISPATCH_SIZEOF_PTR 8 +#elif defined(_WIN32) +#define DISPATCH_SIZEOF_PTR 4 +#elif defined(_MSC_VER) +#error "could not determine pointer size as a constant int for MSVC" +#elif defined(__LP64__) || defined(__LLP64__) +#define DISPATCH_SIZEOF_PTR 8 +#elif defined(__ILP32__) +#define DISPATCH_SIZEOF_PTR 4 +#else +#error "could not determine pointer size as a constant int" +#endif // __SIZEOF_POINTER__ + #if !TARGET_OS_WIN32 typedef enum { @@ -85,9 +101,19 @@ _dispatch_hw_get_config(_dispatch_hw_config_t c) switch (c) { case _dispatch_hw_config_logical_cpus: case _dispatch_hw_config_physical_cpus: - return sysconf(_SC_NPROCESSORS_CONF); + return (uint32_t)sysconf(_SC_NPROCESSORS_CONF); case _dispatch_hw_config_active_cpus: - return sysconf(_SC_NPROCESSORS_ONLN); + { +#ifdef __USE_GNU + // Prefer pthread_getaffinity_np because it considers + // scheduler cpu affinity. This matters if the program + // is restricted to a subset of the online cpus (eg via numactl). + cpu_set_t cpuset; + if (pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset) == 0) + return (uint32_t)CPU_COUNT(&cpuset); +#endif + return (uint32_t)sysconf(_SC_NPROCESSORS_ONLN); + } } #else const char *name = NULL; diff --git a/src/shims/linux_stubs.c b/src/shims/linux_stubs.c index 07ee8bc06..4923eb0ca 100644 --- a/src/shims/linux_stubs.c +++ b/src/shims/linux_stubs.c @@ -17,7 +17,11 @@ */ #include +#ifdef __ANDROID__ +#include +#else #include +#endif /* __ANDROID__ */ #if __has_include() #include diff --git a/src/shims/linux_stubs.h b/src/shims/linux_stubs.h index 6a70c0b11..ec684170d 100644 --- a/src/shims/linux_stubs.h +++ b/src/shims/linux_stubs.h @@ -16,8 +16,17 @@ #ifndef __DISPATCH__STUBS__INTERNAL #define __DISPATCH__STUBS__INTERNAL -// marker for hacks we have made to make progress -#define __LINUX_PORT_HDD__ 1 +#ifndef TAILQ_FOREACH_SAFE +#define TAILQ_FOREACH_SAFE(var, head, field, temp) \ + for ((var) = TAILQ_FIRST((head)); \ + (var) && ((temp) = TAILQ_NEXT((var), field), 1); (var) = (temp)) +#endif + +#if DISPATCH_DEBUG +#ifndef TRASHIT +#define TRASHIT(x) do { (x) = (void *)-1; } while (0) +#endif +#endif /* * Stub out defines for some mach types and related macros @@ -30,72 +39,28 @@ typedef uint32_t mach_port_t; typedef uint32_t mach_error_t; -typedef uint32_t mach_vm_size_t; - typedef uint32_t mach_msg_return_t; typedef uint32_t mach_msg_bits_t; -typedef uintptr_t mach_vm_address_t; - -typedef uint32_t dispatch_mach_msg_t; - -typedef uint32_t dispatch_mach_t; - -typedef uint32_t dispatch_mach_reason_t; - -typedef uint32_t voucher_activity_mode_t; - -typedef uint32_t voucher_activity_trace_id_t; - -typedef uint32_t voucher_activity_id_t; - -typedef uint32_t _voucher_activity_buffer_hook_t;; +typedef void *dispatch_mach_msg_t; -typedef uint32_t voucher_activity_flag_t; +typedef uint64_t firehose_activity_id_t; -typedef struct { } mach_msg_header_t; - - -typedef void (*dispatch_mach_handler_function_t)(void*, dispatch_mach_reason_t, - dispatch_mach_msg_t, mach_error_t); - -typedef void (*dispatch_mach_msg_destructor_t)(void*); +typedef void *mach_msg_header_t; // Print a warning when an unported code path executes. -#define LINUX_PORT_ERROR() do { printf("LINUX_PORT_ERROR_CALLED %s:%d: %s\n",__FILE__,__LINE__,__FUNCTION__); } while (0) +#define LINUX_PORT_ERROR() do { \ + printf("LINUX_PORT_ERROR_CALLED %s:%d: %s\n",\ + __FILE__,__LINE__,__FUNCTION__); } while (0) /* * Stub out defines for other missing types */ -#if __linux__ -// we fall back to use kevent -#define kevent64_s kevent -#define kevent64(kq,cl,nc,el,ne,f,to) kevent(kq,cl,nc,el,ne,to) -#endif - // SIZE_T_MAX should not be hardcoded like this here. -#define SIZE_T_MAX (0x7fffffff) - -// Define to 0 the NOTE_ values that are not present on Linux. -// Revisit this...would it be better to ifdef out the uses instead?? - -// The following values are passed as part of the EVFILT_TIMER requests - -#define IGNORE_KEVENT64_EXT /* will force the kevent64_s.ext[] to not be used -> leeway ignored */ - -#define NOTE_SECONDS 0x01 -#define NOTE_USECONDS 0x02 -#define NOTE_NSECONDS 0x04 -#define NOTE_ABSOLUTE 0x08 -#define NOTE_CRITICAL 0x10 -#define NOTE_BACKGROUND 0x20 -#define NOTE_LEEWAY 0x40 - -// need to catch the following usage if it happens .. -// we simply return '0' as a value probably not correct - -#define NOTE_VM_PRESSURE ({LINUX_PORT_ERROR(); 0;}) +#ifndef SIZE_T_MAX +#define SIZE_T_MAX (~(size_t)0) +#endif #endif diff --git a/src/shims/lock.c b/src/shims/lock.c index 2fab69107..24af953c3 100644 --- a/src/shims/lock.c +++ b/src/shims/lock.c @@ -34,6 +34,7 @@ _Static_assert(DLOCK_LOCK_DATA_CONTENTION == ULF_WAIT_WORKQ_DATA_CONTENTION, "values should be the same"); +#if !HAVE_UL_UNFAIR_LOCK DISPATCH_ALWAYS_INLINE static inline void _dispatch_thread_switch(dispatch_lock value, dispatch_lock_options_t flags, @@ -47,6 +48,266 @@ _dispatch_thread_switch(dispatch_lock value, dispatch_lock_options_t flags, } thread_switch(_dispatch_lock_owner(value), option, timeout); } +#endif // HAVE_UL_UNFAIR_LOCK +#endif + +#pragma mark - semaphores + +#if USE_MACH_SEM +#if __has_include() +#include +#define DISPATCH_USE_OS_SEMAPHORE_CACHE 1 +#else +#define DISPATCH_USE_OS_SEMAPHORE_CACHE 0 +#endif + +#define DISPATCH_SEMAPHORE_VERIFY_KR(x) do { \ + DISPATCH_VERIFY_MIG(x); \ + if (unlikely((x) == KERN_INVALID_NAME)) { \ + DISPATCH_CLIENT_CRASH((x), \ + "Use-after-free of dispatch_semaphore_t or dispatch_group_t"); \ + } else if (unlikely(x)) { \ + DISPATCH_INTERNAL_CRASH((x), "mach semaphore API failure"); \ + } \ + } while (0) + +void +_dispatch_sema4_create_slow(_dispatch_sema4_t *s4, int policy) +{ + semaphore_t tmp = MACH_PORT_NULL; + + _dispatch_fork_becomes_unsafe(); + + // lazily allocate the semaphore port + + // Someday: + // 1) Switch to a doubly-linked FIFO in user-space. + // 2) User-space timers for the timeout. + +#if DISPATCH_USE_OS_SEMAPHORE_CACHE + if (policy == _DSEMA4_POLICY_FIFO) { + tmp = (_dispatch_sema4_t)os_get_cached_semaphore(); + if (!os_atomic_cmpxchg(s4, MACH_PORT_NULL, tmp, relaxed)) { + os_put_cached_semaphore((os_semaphore_t)tmp); + } + return; + } +#endif + + kern_return_t kr = semaphore_create(mach_task_self(), &tmp, policy, 0); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); + + if (!os_atomic_cmpxchg(s4, MACH_PORT_NULL, tmp, relaxed)) { + kr = semaphore_destroy(mach_task_self(), tmp); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); + } +} + +void +_dispatch_sema4_dispose_slow(_dispatch_sema4_t *sema, int policy) +{ + semaphore_t sema_port = *sema; + *sema = MACH_PORT_DEAD; +#if DISPATCH_USE_OS_SEMAPHORE_CACHE + if (policy == _DSEMA4_POLICY_FIFO) { + return os_put_cached_semaphore((os_semaphore_t)sema_port); + } +#endif + kern_return_t kr = semaphore_destroy(mach_task_self(), sema_port); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); +} + +void +_dispatch_sema4_signal(_dispatch_sema4_t *sema, long count) +{ + do { + kern_return_t kr = semaphore_signal(*sema); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); + } while (--count); +} + +void +_dispatch_sema4_wait(_dispatch_sema4_t *sema) +{ + kern_return_t kr; + do { + kr = semaphore_wait(*sema); + } while (kr == KERN_ABORTED); + DISPATCH_SEMAPHORE_VERIFY_KR(kr); +} + +bool +_dispatch_sema4_timedwait(_dispatch_sema4_t *sema, dispatch_time_t timeout) +{ + mach_timespec_t _timeout; + kern_return_t kr; + + do { + uint64_t nsec = _dispatch_timeout(timeout); + _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); + _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); + kr = slowpath(semaphore_timedwait(*sema, _timeout)); + } while (kr == KERN_ABORTED); + + if (kr == KERN_OPERATION_TIMED_OUT) { + return true; + } + DISPATCH_SEMAPHORE_VERIFY_KR(kr); + return false; +} +#elif USE_POSIX_SEM +#define DISPATCH_SEMAPHORE_VERIFY_RET(x) do { \ + if (unlikely((x) == -1)) { \ + DISPATCH_INTERNAL_CRASH(errno, "POSIX semaphore API failure"); \ + } \ + } while (0) + +void +_dispatch_sema4_init(_dispatch_sema4_t *sema, int policy DISPATCH_UNUSED) +{ + int rc = sem_init(sema, 0, 0); + DISPATCH_SEMAPHORE_VERIFY_RET(rc); +} + +void +_dispatch_sema4_dispose_slow(_dispatch_sema4_t *sema, int policy DISPATCH_UNUSED) +{ + int rc = sem_destroy(sema); + DISPATCH_SEMAPHORE_VERIFY_RET(rc); +} + +void +_dispatch_sema4_signal(_dispatch_sema4_t *sema, long count) +{ + do { + int ret = sem_post(sema); + DISPATCH_SEMAPHORE_VERIFY_RET(ret); + } while (--count); +} + +void +_dispatch_sema4_wait(_dispatch_sema4_t *sema) +{ + int ret = sem_wait(sema); + DISPATCH_SEMAPHORE_VERIFY_RET(ret); +} + +bool +_dispatch_sema4_timedwait(_dispatch_sema4_t *sema, dispatch_time_t timeout) +{ + struct timespec _timeout; + int ret; + + do { + uint64_t nsec = _dispatch_time_nanoseconds_since_epoch(timeout); + _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); + _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); + ret = slowpath(sem_timedwait(sema, &_timeout)); + } while (ret == -1 && errno == EINTR); + + if (ret == -1 && errno == ETIMEDOUT) { + return true; + } + DISPATCH_SEMAPHORE_VERIFY_RET(ret); + return false; +} +#elif USE_WIN32_SEM +// rdar://problem/8428132 +static DWORD best_resolution = 1; // 1ms + +static DWORD +_push_timer_resolution(DWORD ms) +{ + MMRESULT res; + static dispatch_once_t once; + + if (ms > 16) { + // only update timer resolution if smaller than default 15.6ms + // zero means not updated + return 0; + } + + // aim for the best resolution we can accomplish + dispatch_once(&once, ^{ + TIMECAPS tc; + MMRESULT res; + res = timeGetDevCaps(&tc, sizeof(tc)); + if (res == MMSYSERR_NOERROR) { + best_resolution = min(max(tc.wPeriodMin, best_resolution), + tc.wPeriodMax); + } + }); + + res = timeBeginPeriod(best_resolution); + if (res == TIMERR_NOERROR) { + return best_resolution; + } + // zero means not updated + return 0; +} + +// match ms parameter to result from _push_timer_resolution +DISPATCH_ALWAYS_INLINE +static inline void +_pop_timer_resolution(DWORD ms) +{ + if (ms) timeEndPeriod(ms); +} + +void +_dispatch_sema4_create_slow(_dispatch_sema4_t *s4, int policy DISPATCH_UNUSED) +{ + HANDLE tmp; + + // lazily allocate the semaphore port + + while (!dispatch_assume(tmp = CreateSemaphore(NULL, 0, LONG_MAX, NULL))) { + _dispatch_temporary_resource_shortage(); + } + + if (!os_atomic_cmpxchg(s4, 0, tmp, relaxed)) { + CloseHandle(tmp); + } +} + +void +_dispatch_sema4_dispose_slow(_dispatch_sema4_t *sema, int policy DISPATCH_UNUSED) +{ + HANDLE sema_handle = *sema; + CloseHandle(sema_handle); + *sema = 0; +} + +void +_dispatch_sema4_signal(_dispatch_sema4_t *sema, long count) +{ + int ret = ReleaseSemaphore(*sema, count, NULL); + dispatch_assume(ret); +} + +void +_dispatch_sema4_wait(_dispatch_sema4_t *sema) +{ + WaitForSingleObject(*sema, INFINITE); +} + +bool +_dispatch_sema4_timedwait(_dispatch_sema4_t *sema, dispatch_time_t timeout) +{ + uint64_t nsec; + DWORD msec; + DWORD resolution; + DWORD wait_result; + + nsec = _dispatch_timeout(timeout); + msec = (DWORD)(nsec / (uint64_t)1000000); + resolution = _push_timer_resolution(msec); + wait_result = WaitForSingleObject(dsema->dsema_handle, msec); + _pop_timer_resolution(resolution); + return wait_result == WAIT_TIMEOUT; +} +#else +#error "port has to implement _dispatch_sema4_t" #endif #pragma mark - ulock wrappers @@ -56,12 +317,13 @@ static int _dispatch_ulock_wait(uint32_t *uaddr, uint32_t val, uint32_t timeout, uint32_t flags) { - dispatch_assert(!DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK); int rc; _dlock_syscall_switch(err, rc = __ulock_wait(UL_COMPARE_AND_WAIT | flags, uaddr, val, timeout), case 0: return rc > 0 ? ENOTEMPTY : 0; case ETIMEDOUT: case EFAULT: return err; + case EOWNERDEAD: DISPATCH_CLIENT_CRASH(*uaddr, + "corruption of lock owner"); default: DISPATCH_INTERNAL_CRASH(err, "ulock_wait() failed"); ); } @@ -69,7 +331,6 @@ _dispatch_ulock_wait(uint32_t *uaddr, uint32_t val, uint32_t timeout, static void _dispatch_ulock_wake(uint32_t *uaddr, uint32_t flags) { - dispatch_assert(!DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK); _dlock_syscall_switch(err, __ulock_wake(UL_COMPARE_AND_WAIT | flags, uaddr, 0), case 0: case ENOENT: break; @@ -85,17 +346,13 @@ static int _dispatch_unfair_lock_wait(uint32_t *uaddr, uint32_t val, uint32_t timeout, dispatch_lock_options_t flags) { - if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { - // - timeout = timeout < 1000 ? 1 : timeout / 1000; - _dispatch_thread_switch(val, flags, timeout); - return 0; - } int rc; _dlock_syscall_switch(err, rc = __ulock_wait(UL_UNFAIR_LOCK | flags, uaddr, val, timeout), case 0: return rc > 0 ? ENOTEMPTY : 0; case ETIMEDOUT: case EFAULT: return err; + case EOWNERDEAD: DISPATCH_CLIENT_CRASH(*uaddr, + "corruption of lock owner"); default: DISPATCH_INTERNAL_CRASH(err, "ulock_wait() failed"); ); } @@ -103,10 +360,6 @@ _dispatch_unfair_lock_wait(uint32_t *uaddr, uint32_t val, uint32_t timeout, static void _dispatch_unfair_lock_wake(uint32_t *uaddr, uint32_t flags) { - if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { - // - return; - } _dlock_syscall_switch(err, __ulock_wake(UL_UNFAIR_LOCK | flags, uaddr, 0), case 0: case ENOENT: break; default: DISPATCH_INTERNAL_CRASH(err, "ulock_wake() failed"); @@ -117,7 +370,11 @@ _dispatch_unfair_lock_wake(uint32_t *uaddr, uint32_t flags) #pragma mark - futex wrappers #if HAVE_FUTEX #include +#ifdef __ANDROID__ +#include +#else #include +#endif /* __ANDROID__ */ DISPATCH_ALWAYS_INLINE static inline int @@ -125,7 +382,7 @@ _dispatch_futex(uint32_t *uaddr, int op, uint32_t val, const struct timespec *timeout, uint32_t *uaddr2, uint32_t val3, int opflags) { - return syscall(SYS_futex, uaddr, op | opflags, val, timeout, uaddr2, val3); + return (int)syscall(SYS_futex, uaddr, op | opflags, val, timeout, uaddr2, val3); } static int @@ -144,7 +401,7 @@ _dispatch_futex_wake(uint32_t *uaddr, int wake, int opflags) { int rc; _dlock_syscall_switch(err, - rc = _dispatch_futex(uaddr, FUTEX_WAKE, wake, NULL, NULL, 0, opflags), + rc = _dispatch_futex(uaddr, FUTEX_WAKE, (uint32_t)wake, NULL, NULL, 0, opflags), case 0: return; default: DISPATCH_CLIENT_CRASH(err, "futex_wake() failed"); ); @@ -155,7 +412,7 @@ _dispatch_futex_lock_pi(uint32_t *uaddr, struct timespec *timeout, int detect, int opflags) { _dlock_syscall_switch(err, - _dispatch_futex(uaddr, FUTEX_LOCK_PI, detect, timeout, + _dispatch_futex(uaddr, FUTEX_LOCK_PI, (uint32_t)detect, timeout, NULL, 0, opflags), case 0: return; default: DISPATCH_CLIENT_CRASH(errno, "futex_lock_pi() failed"); @@ -206,63 +463,21 @@ _dispatch_wake_by_address(uint32_t volatile *address) #pragma mark - thread event -#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK -semaphore_t -_dispatch_thread_semaphore_create(void) -{ - semaphore_t s4; - kern_return_t kr; - while (unlikely(kr = semaphore_create(mach_task_self(), &s4, - SYNC_POLICY_FIFO, 0))) { - DISPATCH_VERIFY_MIG(kr); - _dispatch_temporary_resource_shortage(); - } - return s4; -} - -void -_dispatch_thread_semaphore_dispose(void *ctxt) -{ - semaphore_t s4 = (semaphore_t)(uintptr_t)ctxt; - kern_return_t kr = semaphore_destroy(mach_task_self(), s4); - DISPATCH_VERIFY_MIG(kr); - DISPATCH_SEMAPHORE_VERIFY_KR(kr); -} -#endif - void _dispatch_thread_event_signal_slow(dispatch_thread_event_t dte) { -#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK - if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { - kern_return_t kr = semaphore_signal(dte->dte_semaphore); - DISPATCH_SEMAPHORE_VERIFY_KR(kr); - return; - } -#endif #if HAVE_UL_COMPARE_AND_WAIT _dispatch_ulock_wake(&dte->dte_value, 0); #elif HAVE_FUTEX _dispatch_futex_wake(&dte->dte_value, 1, FUTEX_PRIVATE_FLAG); -#elif USE_POSIX_SEM - int rc = sem_post(&dte->dte_sem); - DISPATCH_SEMAPHORE_VERIFY_RET(ret); +#else + _dispatch_sema4_signal(&dte->dte_sema, 1); #endif } void _dispatch_thread_event_wait_slow(dispatch_thread_event_t dte) { -#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK - if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { - kern_return_t kr; - do { - kr = semaphore_wait(dte->dte_semaphore); - } while (unlikely(kr == KERN_ABORTED)); - DISPATCH_SEMAPHORE_VERIFY_KR(kr); - return; - } -#endif #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX for (;;) { uint32_t value = os_atomic_load(&dte->dte_value, acquire); @@ -278,12 +493,8 @@ _dispatch_thread_event_wait_slow(dispatch_thread_event_t dte) NULL, FUTEX_PRIVATE_FLAG); #endif } -#elif USE_POSIX_SEM - int rc; - do { - rc = sem_wait(&dte->dte_sem); - } while (unlikely(rc != 0)); - DISPATCH_SEMAPHORE_VERIFY_RET(rc); +#else + _dispatch_sema4_wait(&dte->dte_sema); #endif } @@ -294,30 +505,30 @@ void _dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t dul, dispatch_lock_options_t flags) { - dispatch_lock tid_self = _dispatch_tid_self(), next = tid_self; - dispatch_lock tid_old, tid_new; + dispatch_lock value_self = _dispatch_lock_value_for_self(); + dispatch_lock old_value, new_value, next = value_self; int rc; for (;;) { - os_atomic_rmw_loop(&dul->dul_lock, tid_old, tid_new, acquire, { - if (likely(!_dispatch_lock_is_locked(tid_old))) { - tid_new = next; + os_atomic_rmw_loop(&dul->dul_lock, old_value, new_value, acquire, { + if (likely(!_dispatch_lock_is_locked(old_value))) { + new_value = next; } else { - tid_new = tid_old & ~DLOCK_NOWAITERS_BIT; - if (tid_new == tid_old) os_atomic_rmw_loop_give_up(break); + new_value = old_value | DLOCK_WAITERS_BIT; + if (new_value == old_value) os_atomic_rmw_loop_give_up(break); } }); - if (unlikely(_dispatch_lock_is_locked_by(tid_old, tid_self))) { + if (unlikely(_dispatch_lock_is_locked_by(old_value, value_self))) { DISPATCH_CLIENT_CRASH(0, "trying to lock recursively"); } - if (tid_new == next) { + if (new_value == next) { return; } - rc = _dispatch_unfair_lock_wait(&dul->dul_lock, tid_new, 0, flags); + rc = _dispatch_unfair_lock_wait(&dul->dul_lock, new_value, 0, flags); if (rc == ENOTEMPTY) { - next = tid_self & ~DLOCK_NOWAITERS_BIT; + next = value_self | DLOCK_WAITERS_BIT; } else { - next = tid_self; + next = value_self; } } } @@ -334,30 +545,28 @@ void _dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t dul, dispatch_lock_options_t flags) { - dispatch_lock tid_cur, tid_self = _dispatch_tid_self(); + dispatch_lock cur, value_self = _dispatch_lock_value_for_self(); uint32_t timeout = 1; while (unlikely(!os_atomic_cmpxchgv(&dul->dul_lock, - DLOCK_OWNER_NULL, tid_self, &tid_cur, acquire))) { - if (unlikely(_dispatch_lock_is_locked_by(tid_cur, tid_self))) { + DLOCK_OWNER_NULL, value_self, &cur, acquire))) { + if (unlikely(_dispatch_lock_is_locked_by(cur, self))) { DISPATCH_CLIENT_CRASH(0, "trying to lock recursively"); } - _dispatch_thread_switch(tid_cur, flags, timeout++); + _dispatch_thread_switch(cur, flags, timeout++); } } #endif void -_dispatch_unfair_lock_unlock_slow(dispatch_unfair_lock_t dul, - dispatch_lock tid_cur) +_dispatch_unfair_lock_unlock_slow(dispatch_unfair_lock_t dul, dispatch_lock cur) { - dispatch_lock_owner tid_self = _dispatch_tid_self(); - if (unlikely(!_dispatch_lock_is_locked_by(tid_cur, tid_self))) { - DISPATCH_CLIENT_CRASH(tid_cur, "lock not owned by current thread"); + if (unlikely(!_dispatch_lock_is_locked_by_self(cur))) { + DISPATCH_CLIENT_CRASH(cur, "lock not owned by current thread"); } #if HAVE_UL_UNFAIR_LOCK - if (!(tid_cur & DLOCK_NOWAITERS_BIT)) { + if (_dispatch_lock_has_waiters(cur)) { _dispatch_unfair_lock_wake(&dul->dul_lock, 0); } #elif HAVE_FUTEX @@ -374,41 +583,38 @@ void _dispatch_gate_wait_slow(dispatch_gate_t dgl, dispatch_lock value, dispatch_lock_options_t flags) { - dispatch_lock tid_self = _dispatch_tid_self(), tid_old, tid_new; + dispatch_lock self = _dispatch_lock_value_for_self(); + dispatch_lock old_value, new_value; uint32_t timeout = 1; for (;;) { - os_atomic_rmw_loop(&dgl->dgl_lock, tid_old, tid_new, acquire, { - if (likely(tid_old == value)) { + os_atomic_rmw_loop(&dgl->dgl_lock, old_value, new_value, acquire, { + if (likely(old_value == value)) { os_atomic_rmw_loop_give_up_with_fence(acquire, return); } -#ifdef DLOCK_NOWAITERS_BIT - tid_new = tid_old & ~DLOCK_NOWAITERS_BIT; -#else - tid_new = tid_old | DLOCK_WAITERS_BIT; -#endif - if (tid_new == tid_old) os_atomic_rmw_loop_give_up(break); + new_value = old_value | DLOCK_WAITERS_BIT; + if (new_value == old_value) os_atomic_rmw_loop_give_up(break); }); - if (unlikely(_dispatch_lock_is_locked_by(tid_old, tid_self))) { + if (unlikely(_dispatch_lock_is_locked_by(old_value, self))) { DISPATCH_CLIENT_CRASH(0, "trying to lock recursively"); } #if HAVE_UL_UNFAIR_LOCK - _dispatch_unfair_lock_wait(&dgl->dgl_lock, tid_new, 0, flags); + _dispatch_unfair_lock_wait(&dgl->dgl_lock, new_value, 0, flags); #elif HAVE_FUTEX - _dispatch_futex_wait(&dgl->dgl_lock, tid_new, NULL, FUTEX_PRIVATE_FLAG); + _dispatch_futex_wait(&dgl->dgl_lock, new_value, NULL, FUTEX_PRIVATE_FLAG); #else - _dispatch_thread_switch(tid_new, flags, timeout++); + _dispatch_thread_switch(new_value, flags, timeout++); #endif (void)timeout; + (void)flags; } } void -_dispatch_gate_broadcast_slow(dispatch_gate_t dgl, dispatch_lock tid_cur) +_dispatch_gate_broadcast_slow(dispatch_gate_t dgl, dispatch_lock cur) { - dispatch_lock_owner tid_self = _dispatch_tid_self(); - if (unlikely(!_dispatch_lock_is_locked_by(tid_cur, tid_self))) { - DISPATCH_CLIENT_CRASH(tid_cur, "lock not owned by current thread"); + if (unlikely(!_dispatch_lock_is_locked_by_self(cur))) { + DISPATCH_CLIENT_CRASH(cur, "lock not owned by current thread"); } #if HAVE_UL_UNFAIR_LOCK diff --git a/src/shims/lock.h b/src/shims/lock.h index 246c80738..37a3ecfc8 100644 --- a/src/shims/lock.h +++ b/src/shims/lock.h @@ -30,96 +30,95 @@ #pragma mark - platform macros DISPATCH_ENUM(dispatch_lock_options, uint32_t, - DLOCK_LOCK_NONE = 0x00000000, - DLOCK_LOCK_DATA_CONTENTION = 0x00010000, + DLOCK_LOCK_NONE = 0x00000000, + DLOCK_LOCK_DATA_CONTENTION = 0x00010000, ); #if TARGET_OS_MAC -typedef mach_port_t dispatch_lock_owner; +typedef mach_port_t dispatch_tid; typedef uint32_t dispatch_lock; -#define DLOCK_OWNER_NULL ((dispatch_lock_owner)MACH_PORT_NULL) #define DLOCK_OWNER_MASK ((dispatch_lock)0xfffffffc) -#define DLOCK_NOWAITERS_BIT ((dispatch_lock)0x00000001) -#define DLOCK_NOFAILED_TRYLOCK_BIT ((dispatch_lock)0x00000002) -#define _dispatch_tid_self() ((dispatch_lock_owner)_dispatch_thread_port()) +#define DLOCK_WAITERS_BIT ((dispatch_lock)0x00000001) +#define DLOCK_FAILED_TRYLOCK_BIT ((dispatch_lock)0x00000002) -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_lock_is_locked(dispatch_lock lock_value) -{ - return (lock_value & DLOCK_OWNER_MASK) != 0; -} +#define DLOCK_OWNER_NULL ((dispatch_tid)MACH_PORT_NULL) +#define _dispatch_tid_self() ((dispatch_tid)_dispatch_thread_port()) DISPATCH_ALWAYS_INLINE -static inline dispatch_lock_owner +static inline dispatch_tid _dispatch_lock_owner(dispatch_lock lock_value) { - lock_value &= DLOCK_OWNER_MASK; - if (lock_value) { - lock_value |= DLOCK_NOWAITERS_BIT | DLOCK_NOFAILED_TRYLOCK_BIT; + if (lock_value & DLOCK_OWNER_MASK) { + return lock_value | DLOCK_WAITERS_BIT | DLOCK_FAILED_TRYLOCK_BIT; } - return lock_value; + return DLOCK_OWNER_NULL; } +#elif defined(__linux__) + +#include +#include +#include /* For SYS_xxx definitions */ + +typedef uint32_t dispatch_tid; +typedef uint32_t dispatch_lock; + +#define DLOCK_OWNER_MASK ((dispatch_lock)FUTEX_TID_MASK) +#define DLOCK_WAITERS_BIT ((dispatch_lock)FUTEX_WAITERS) +#define DLOCK_FAILED_TRYLOCK_BIT ((dispatch_lock)FUTEX_OWNER_DIED) + +#define DLOCK_OWNER_NULL ((dispatch_tid)0) +#define _dispatch_tid_self() ((dispatch_tid)(_dispatch_get_tsd_base()->tid)) + DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_lock_is_locked_by(dispatch_lock lock_value, dispatch_lock_owner tid) +static inline dispatch_tid +_dispatch_lock_owner(dispatch_lock lock_value) { - // equivalent to _dispatch_lock_owner(lock_value) == tid - return ((lock_value ^ tid) & DLOCK_OWNER_MASK) == 0; + return lock_value & DLOCK_OWNER_MASK; } +#else +# error define _dispatch_lock encoding scheme for your platform here +#endif + DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_lock_has_waiters(dispatch_lock lock_value) +static inline dispatch_lock +_dispatch_lock_value_from_tid(dispatch_tid tid) { - bool nowaiters_bit = (lock_value & DLOCK_NOWAITERS_BIT); - return _dispatch_lock_is_locked(lock_value) != nowaiters_bit; + return tid & DLOCK_OWNER_MASK; } DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_lock_has_failed_trylock(dispatch_lock lock_value) +static inline dispatch_lock +_dispatch_lock_value_for_self(void) { - return !(lock_value & DLOCK_NOFAILED_TRYLOCK_BIT); + return _dispatch_lock_value_from_tid(_dispatch_tid_self()); } -#elif defined(__linux__) -#include -#include -#include /* For SYS_xxx definitions */ - -typedef uint32_t dispatch_lock; -typedef pid_t dispatch_lock_owner; - -#define DLOCK_OWNER_NULL ((dispatch_lock_owner)0) -#define DLOCK_OWNER_MASK ((dispatch_lock)FUTEX_TID_MASK) -#define DLOCK_WAITERS_BIT ((dispatch_lock)FUTEX_WAITERS) -#define DLOCK_FAILED_TRYLOCK_BIT ((dispatch_lock)FUTEX_OWNER_DIED) -#define _dispatch_tid_self() \ - ((dispatch_lock_owner)(_dispatch_get_tsd_base()->tid)) - DISPATCH_ALWAYS_INLINE static inline bool _dispatch_lock_is_locked(dispatch_lock lock_value) { + // equivalent to _dispatch_lock_owner(lock_value) == 0 return (lock_value & DLOCK_OWNER_MASK) != 0; } DISPATCH_ALWAYS_INLINE -static inline dispatch_lock_owner -_dispatch_lock_owner(dispatch_lock lock_value) +static inline bool +_dispatch_lock_is_locked_by(dispatch_lock lock_value, dispatch_tid tid) { - return (lock_value & DLOCK_OWNER_MASK); + // equivalent to _dispatch_lock_owner(lock_value) == tid + return ((lock_value ^ tid) & DLOCK_OWNER_MASK) == 0; } DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_lock_is_locked_by(dispatch_lock lock_value, dispatch_lock_owner tid) +_dispatch_lock_is_locked_by_self(dispatch_lock lock_value) { - return _dispatch_lock_owner(lock_value) == tid; + // equivalent to _dispatch_lock_owner(lock_value) == tid + return ((lock_value ^ _dispatch_tid_self()) & DLOCK_OWNER_MASK) == 0; } DISPATCH_ALWAYS_INLINE @@ -133,35 +132,17 @@ DISPATCH_ALWAYS_INLINE static inline bool _dispatch_lock_has_failed_trylock(dispatch_lock lock_value) { - return !(lock_value & DLOCK_FAILED_TRYLOCK_BIT); + return (lock_value & DLOCK_FAILED_TRYLOCK_BIT); } -#else -# error define _dispatch_lock encoding scheme for your platform here -#endif - #if __has_include() #include +#ifdef UL_COMPARE_AND_WAIT +#define HAVE_UL_COMPARE_AND_WAIT 1 #endif - -#ifndef HAVE_UL_COMPARE_AND_WAIT -#if defined(UL_COMPARE_AND_WAIT) && DISPATCH_HOST_SUPPORTS_OSX(101200) -# define HAVE_UL_COMPARE_AND_WAIT 1 -#else -# define HAVE_UL_COMPARE_AND_WAIT 0 -#endif -#endif // HAVE_UL_COMPARE_AND_WAIT - -#ifndef HAVE_UL_UNFAIR_LOCK -#if defined(UL_UNFAIR_LOCK) && DISPATCH_HOST_SUPPORTS_OSX(101200) -# define HAVE_UL_UNFAIR_LOCK 1 -#else -# define HAVE_UL_UNFAIR_LOCK 0 +#ifdef UL_UNFAIR_LOCK +#define HAVE_UL_UNFAIR_LOCK 1 #endif -#endif // HAVE_UL_UNFAIR_LOCK - -#ifndef DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK -#define DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK (!HAVE_UL_COMPARE_AND_WAIT && !HAVE_FUTEX) #endif #ifndef HAVE_FUTEX @@ -172,29 +153,68 @@ _dispatch_lock_has_failed_trylock(dispatch_lock lock_value) #endif #endif // HAVE_FUTEX +#pragma mark - semaphores + #if USE_MACH_SEM -#define DISPATCH_SEMAPHORE_VERIFY_KR(x) do { \ - if (unlikely((x) == KERN_INVALID_NAME)) { \ - DISPATCH_CLIENT_CRASH((x), "Use-after-free of dispatch_semaphore_t"); \ - } else if (unlikely(x)) { \ - DISPATCH_INTERNAL_CRASH((x), "mach semaphore API failure"); \ - } \ - } while (0) -#define DISPATCH_GROUP_VERIFY_KR(x) do { \ - if (unlikely((x) == KERN_INVALID_NAME)) { \ - DISPATCH_CLIENT_CRASH((x), "Use-after-free of dispatch_group_t"); \ - } else if (unlikely(x)) { \ - DISPATCH_INTERNAL_CRASH((x), "mach semaphore API failure"); \ - } \ - } while (0) + +typedef semaphore_t _dispatch_sema4_t; +#define _DSEMA4_POLICY_FIFO SYNC_POLICY_FIFO +#define _DSEMA4_POLICY_LIFO SYNC_POLICY_LIFO +#define _DSEMA4_TIMEOUT() KERN_OPERATION_TIMED_OUT + +#define _dispatch_sema4_init(sema, policy) (void)(*(sema) = MACH_PORT_NULL) +#define _dispatch_sema4_is_created(sema) (*(sema) != MACH_PORT_NULL) +void _dispatch_sema4_create_slow(_dispatch_sema4_t *sema, int policy); + #elif USE_POSIX_SEM -#define DISPATCH_SEMAPHORE_VERIFY_RET(x) do { \ - if (unlikely((x) == -1)) { \ - DISPATCH_INTERNAL_CRASH(errno, "POSIX semaphore API failure"); \ - } \ - } while (0) + +typedef sem_t _dispatch_sema4_t; +#define _DSEMA4_POLICY_FIFO 0 +#define _DSEMA4_POLICY_LIFO 0 +#define _DSEMA4_TIMEOUT() ((errno) = ETIMEDOUT, -1) + +void _dispatch_sema4_init(_dispatch_sema4_t *sema, int policy); +#define _dispatch_sema4_is_created(sema) ((void)sema, 1) +#define _dispatch_sema4_create_slow(sema, policy) ((void)sema, (void)policy) + +#elif USE_WIN32_SEM + +typedef HANDLE _dispatch_sema4_t; +#define _DSEMA4_POLICY_FIFO 0 +#define _DSEMA4_POLICY_LIFO 0 +#define _DSEMA4_TIMEOUT() ((errno) = ETIMEDOUT, -1) + +#define _dispatch_sema4_init(sema, policy) (void)(*(sema) = 0) +#define _dispatch_sema4_is_created(sema) (*(sema) != 0) +void _dispatch_sema4_create_slow(_dispatch_sema4_t *sema, int policy); + +#else +#error "port has to implement _dispatch_sema4_t" #endif +void _dispatch_sema4_dispose_slow(_dispatch_sema4_t *sema, int policy); +void _dispatch_sema4_signal(_dispatch_sema4_t *sema, long count); +void _dispatch_sema4_wait(_dispatch_sema4_t *sema); +bool _dispatch_sema4_timedwait(_dispatch_sema4_t *sema, dispatch_time_t timeout); + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_sema4_create(_dispatch_sema4_t *sema, int policy) +{ + if (!_dispatch_sema4_is_created(sema)) { + _dispatch_sema4_create_slow(sema, policy); + } +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_sema4_dispose(_dispatch_sema4_t *sema, int policy) +{ + if (_dispatch_sema4_is_created(sema)) { + _dispatch_sema4_dispose_slow(sema, policy); + } +} + #pragma mark - compare and wait DISPATCH_NOT_TAIL_CALLED @@ -222,53 +242,16 @@ void _dispatch_wake_by_address(uint32_t volatile *address); * This locking primitive has no notion of ownership */ typedef struct dispatch_thread_event_s { -#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK - union { - semaphore_t dte_semaphore; - uint32_t dte_value; - }; -#elif HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX +#if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX // 1 means signalled but not waited on yet // UINT32_MAX means waited on, but not signalled yet // 0 is the initial and final state uint32_t dte_value; -#elif USE_POSIX_SEM - sem_t dte_sem; #else -# error define dispatch_thread_event_s for your platform + _dispatch_sema4_t dte_sema; #endif } dispatch_thread_event_s, *dispatch_thread_event_t; -#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK -semaphore_t _dispatch_thread_semaphore_create(void); -void _dispatch_thread_semaphore_dispose(void *); - -DISPATCH_ALWAYS_INLINE -static inline semaphore_t -_dispatch_get_thread_semaphore(void) -{ - semaphore_t sema = (semaphore_t)(uintptr_t) - _dispatch_thread_getspecific(dispatch_sema4_key); - if (unlikely(!sema)) { - return _dispatch_thread_semaphore_create(); - } - _dispatch_thread_setspecific(dispatch_sema4_key, NULL); - return sema; -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_put_thread_semaphore(semaphore_t sema) -{ - semaphore_t old_sema = (semaphore_t)(uintptr_t) - _dispatch_thread_getspecific(dispatch_sema4_key); - _dispatch_thread_setspecific(dispatch_sema4_key, (void*)(uintptr_t)sema); - if (unlikely(old_sema)) { - return _dispatch_thread_semaphore_dispose((void *)(uintptr_t)old_sema); - } -} -#endif - DISPATCH_NOT_TAIL_CALLED void _dispatch_thread_event_wait_slow(dispatch_thread_event_t); void _dispatch_thread_event_signal_slow(dispatch_thread_event_t); @@ -277,17 +260,10 @@ DISPATCH_ALWAYS_INLINE static inline void _dispatch_thread_event_init(dispatch_thread_event_t dte) { -#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK - if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { - dte->dte_semaphore = _dispatch_get_thread_semaphore(); - return; - } -#endif #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX dte->dte_value = 0; -#elif USE_POSIX_SEM - int rc = sem_init(&dte->dte_sem, 0, 0); - DISPATCH_SEMAPHORE_VERIFY_RET(rc); +#else + _dispatch_sema4_init(&dte->dte_sema, _DSEMA4_POLICY_FIFO); #endif } @@ -295,12 +271,6 @@ DISPATCH_ALWAYS_INLINE static inline void _dispatch_thread_event_signal(dispatch_thread_event_t dte) { -#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK - if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { - _dispatch_thread_event_signal_slow(dte); - return; - } -#endif #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX if (os_atomic_inc_orig(&dte->dte_value, release) == 0) { // 0 -> 1 transition doesn't need a signal @@ -308,7 +278,7 @@ _dispatch_thread_event_signal(dispatch_thread_event_t dte) // waiters do the validation return; } -#elif USE_POSIX_SEM +#else // fallthrough #endif _dispatch_thread_event_signal_slow(dte); @@ -319,19 +289,13 @@ DISPATCH_ALWAYS_INLINE static inline void _dispatch_thread_event_wait(dispatch_thread_event_t dte) { -#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK - if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { - _dispatch_thread_event_wait_slow(dte); - return; - } -#endif #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX if (os_atomic_dec(&dte->dte_value, acquire) == 0) { // 1 -> 0 is always a valid transition, so we can return // for any other value, go to the slowpath which checks it's not corrupt return; } -#elif USE_POSIX_SEM +#else // fallthrough #endif _dispatch_thread_event_wait_slow(dte); @@ -341,18 +305,11 @@ DISPATCH_ALWAYS_INLINE static inline void _dispatch_thread_event_destroy(dispatch_thread_event_t dte) { -#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK - if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) { - _dispatch_put_thread_semaphore(dte->dte_semaphore); - return; - } -#endif #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX // nothing to do dispatch_assert(dte->dte_value == 0); -#elif USE_POSIX_SEM - int rc = sem_destroy(&dte->dte_sem); - DISPATCH_SEMAPHORE_VERIFY_RET(rc); +#else + _dispatch_sema4_dispose(&dte->dte_sema, _DSEMA4_POLICY_FIFO); #endif } @@ -372,9 +329,9 @@ DISPATCH_ALWAYS_INLINE static inline void _dispatch_unfair_lock_lock(dispatch_unfair_lock_t l) { - dispatch_lock tid_self = _dispatch_tid_self(); + dispatch_lock value_self = _dispatch_lock_value_for_self(); if (likely(os_atomic_cmpxchg(&l->dul_lock, - DLOCK_OWNER_NULL, tid_self, acquire))) { + DLOCK_OWNER_NULL, value_self, acquire))) { return; } return _dispatch_unfair_lock_lock_slow(l, DLOCK_LOCK_NONE); @@ -382,54 +339,42 @@ _dispatch_unfair_lock_lock(dispatch_unfair_lock_t l) DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_unfair_lock_trylock(dispatch_unfair_lock_t l, - dispatch_lock_owner *owner) +_dispatch_unfair_lock_trylock(dispatch_unfair_lock_t l, dispatch_tid *owner) { - dispatch_lock tid_old, tid_new, tid_self = _dispatch_tid_self(); + dispatch_lock value_self = _dispatch_lock_value_for_self(); + dispatch_lock old_value, new_value; - os_atomic_rmw_loop(&l->dul_lock, tid_old, tid_new, acquire, { - if (likely(!_dispatch_lock_is_locked(tid_old))) { - tid_new = tid_self; + os_atomic_rmw_loop(&l->dul_lock, old_value, new_value, acquire, { + if (likely(!_dispatch_lock_is_locked(old_value))) { + new_value = value_self; } else { -#ifdef DLOCK_NOFAILED_TRYLOCK_BIT - tid_new = tid_old & ~DLOCK_NOFAILED_TRYLOCK_BIT; -#else - tid_new = tid_old | DLOCK_FAILED_TRYLOCK_BIT; -#endif + new_value = old_value | DLOCK_FAILED_TRYLOCK_BIT; } }); - if (owner) *owner = _dispatch_lock_owner(tid_new); - return !_dispatch_lock_is_locked(tid_old); + if (owner) *owner = _dispatch_lock_owner(new_value); + return !_dispatch_lock_is_locked(old_value); } DISPATCH_ALWAYS_INLINE static inline bool _dispatch_unfair_lock_tryunlock(dispatch_unfair_lock_t l) { - dispatch_lock tid_old, tid_new; + dispatch_lock old_value, new_value; - os_atomic_rmw_loop(&l->dul_lock, tid_old, tid_new, release, { -#ifdef DLOCK_NOFAILED_TRYLOCK_BIT - if (likely(tid_old & DLOCK_NOFAILED_TRYLOCK_BIT)) { - tid_new = DLOCK_OWNER_NULL; + os_atomic_rmw_loop(&l->dul_lock, old_value, new_value, release, { + if (unlikely(old_value & DLOCK_FAILED_TRYLOCK_BIT)) { + new_value = old_value ^ DLOCK_FAILED_TRYLOCK_BIT; } else { - tid_new = tid_old | DLOCK_NOFAILED_TRYLOCK_BIT; + new_value = DLOCK_OWNER_NULL; } -#else - if (likely(!(tid_old & DLOCK_FAILED_TRYLOCK_BIT))) { - tid_new = DLOCK_OWNER_NULL; - } else { - tid_new = tid_old & ~DLOCK_FAILED_TRYLOCK_BIT; - } -#endif }); - if (unlikely(tid_new)) { + if (unlikely(new_value)) { // unlock failed, renew the lock, which needs an acquire barrier os_atomic_thread_fence(acquire); return false; } - if (unlikely(_dispatch_lock_has_waiters(tid_old))) { - _dispatch_unfair_lock_unlock_slow(l, tid_old); + if (unlikely(_dispatch_lock_has_waiters(old_value))) { + _dispatch_unfair_lock_unlock_slow(l, old_value); } return true; } @@ -438,18 +383,18 @@ DISPATCH_ALWAYS_INLINE static inline bool _dispatch_unfair_lock_unlock_had_failed_trylock(dispatch_unfair_lock_t l) { - dispatch_lock tid_cur, tid_self = _dispatch_tid_self(); + dispatch_lock cur, value_self = _dispatch_lock_value_for_self(); #if HAVE_FUTEX if (likely(os_atomic_cmpxchgv(&l->dul_lock, - tid_self, DLOCK_OWNER_NULL, &tid_cur, release))) { + value_self, DLOCK_OWNER_NULL, &cur, release))) { return false; } #else - tid_cur = os_atomic_xchg(&l->dul_lock, DLOCK_OWNER_NULL, release); - if (likely(tid_cur == tid_self)) return false; + cur = os_atomic_xchg(&l->dul_lock, DLOCK_OWNER_NULL, release); + if (likely(cur == value_self)) return false; #endif - _dispatch_unfair_lock_unlock_slow(l, tid_cur); - return _dispatch_lock_has_failed_trylock(tid_cur); + _dispatch_unfair_lock_unlock_slow(l, cur); + return _dispatch_lock_has_failed_trylock(cur); } DISPATCH_ALWAYS_INLINE @@ -492,9 +437,8 @@ DISPATCH_ALWAYS_INLINE static inline bool _dispatch_gate_tryenter(dispatch_gate_t l) { - dispatch_lock tid_self = _dispatch_tid_self(); - return likely(os_atomic_cmpxchg(&l->dgl_lock, - DLOCK_GATE_UNLOCKED, tid_self, acquire)); + return os_atomic_cmpxchg(&l->dgl_lock, DLOCK_GATE_UNLOCKED, + _dispatch_lock_value_for_self(), acquire); } #define _dispatch_gate_wait(l, flags) \ @@ -504,36 +448,39 @@ DISPATCH_ALWAYS_INLINE static inline void _dispatch_gate_broadcast(dispatch_gate_t l) { - dispatch_lock tid_cur, tid_self = _dispatch_tid_self(); - tid_cur = os_atomic_xchg(&l->dgl_lock, DLOCK_GATE_UNLOCKED, release); - if (likely(tid_cur == tid_self)) return; - _dispatch_gate_broadcast_slow(l, tid_cur); + dispatch_lock cur, value_self = _dispatch_lock_value_for_self(); + cur = os_atomic_xchg(&l->dgl_lock, DLOCK_GATE_UNLOCKED, release); + if (likely(cur == value_self)) return; + _dispatch_gate_broadcast_slow(l, cur); } DISPATCH_ALWAYS_INLINE static inline bool _dispatch_once_gate_tryenter(dispatch_once_gate_t l) { - dispatch_once_t tid_self = (dispatch_once_t)_dispatch_tid_self(); - return likely(os_atomic_cmpxchg(&l->dgo_once, - DLOCK_ONCE_UNLOCKED, tid_self, acquire)); + return os_atomic_cmpxchg(&l->dgo_once, DLOCK_ONCE_UNLOCKED, + (dispatch_once_t)_dispatch_lock_value_for_self(), acquire); } #define _dispatch_once_gate_wait(l) \ _dispatch_gate_wait_slow(&(l)->dgo_gate, (dispatch_lock)DLOCK_ONCE_DONE, \ DLOCK_LOCK_NONE) +DISPATCH_ALWAYS_INLINE +static inline dispatch_once_t +_dispatch_once_xchg_done(dispatch_once_t *pred) +{ + return os_atomic_xchg(pred, DLOCK_ONCE_DONE, release); +} + DISPATCH_ALWAYS_INLINE static inline void _dispatch_once_gate_broadcast(dispatch_once_gate_t l) { - dispatch_once_t tid_cur, tid_self = (dispatch_once_t)_dispatch_tid_self(); - // see once.c for explanation about this trick - os_atomic_maximally_synchronizing_barrier(); - // above assumed to contain release barrier - tid_cur = os_atomic_xchg(&l->dgo_once, DLOCK_ONCE_DONE, relaxed); - if (likely(tid_cur == tid_self)) return; - _dispatch_gate_broadcast_slow(&l->dgo_gate, (dispatch_lock)tid_cur); + dispatch_lock value_self = _dispatch_lock_value_for_self(); + dispatch_once_t cur = _dispatch_once_xchg_done(&l->dgo_once); + if (likely(cur == (dispatch_once_t)value_self)) return; + _dispatch_gate_broadcast_slow(&l->dgo_gate, (dispatch_lock)cur); } #endif // __DISPATCH_SHIMS_LOCK__ diff --git a/src/shims/perfmon.h b/src/shims/perfmon.h index 8af33ead9..be9327baf 100644 --- a/src/shims/perfmon.h +++ b/src/shims/perfmon.h @@ -27,26 +27,22 @@ #ifndef __DISPATCH_SHIMS_PERFMON__ #define __DISPATCH_SHIMS_PERFMON__ -#if DISPATCH_PERF_MON && !DISPATCH_INTROSPECTION - -#if defined (USE_APPLE_TSD_OPTIMIZATIONS) && defined(SIMULATE_5491082) && \ - (defined(__i386__) || defined(__x86_64__)) -#ifdef __LP64__ -#define _dispatch_perfmon_workitem_inc() asm("incq %%gs:%0" : "+m" \ - (*(void **)(dispatch_bcounter_key * sizeof(void *) + \ - _PTHREAD_TSD_OFFSET)) :: "cc") -#define _dispatch_perfmon_workitem_dec() asm("decq %%gs:%0" : "+m" \ - (*(void **)(dispatch_bcounter_key * sizeof(void *) + \ - _PTHREAD_TSD_OFFSET)) :: "cc") -#else -#define _dispatch_perfmon_workitem_inc() asm("incl %%gs:%0" : "+m" \ - (*(void **)(dispatch_bcounter_key * sizeof(void *) + \ - _PTHREAD_TSD_OFFSET)) :: "cc") -#define _dispatch_perfmon_workitem_dec() asm("decl %%gs:%0" : "+m" \ - (*(void **)(dispatch_bcounter_key * sizeof(void *) + \ - _PTHREAD_TSD_OFFSET)) :: "cc") +#if DISPATCH_PERF_MON +#if DISPATCH_INTROSPECTION +#error invalid configuration #endif -#else /* !USE_APPLE_TSD_OPTIMIZATIONS */ + +typedef enum { + perfmon_thread_no_trace = 0, + perfmon_thread_event_no_steal, // 1) Event threads that couldn't steal + perfmon_thread_event_steal, // 2) Event threads failing to steal very late + perfmon_thread_worker_non_oc, // 3) Non overcommit threads finding + // nothing on the root queues + perfmon_thread_worker_oc, // 4) Overcommit thread finding nothing to do + perfmon_thread_manager, +} perfmon_thread_type; + +DISPATCH_ALWAYS_INLINE static inline void _dispatch_perfmon_workitem_inc(void) { @@ -54,6 +50,8 @@ _dispatch_perfmon_workitem_inc(void) cnt = (unsigned long)_dispatch_thread_getspecific(dispatch_bcounter_key); _dispatch_thread_setspecific(dispatch_bcounter_key, (void *)++cnt); } + +DISPATCH_ALWAYS_INLINE static inline void _dispatch_perfmon_workitem_dec(void) { @@ -61,18 +59,40 @@ _dispatch_perfmon_workitem_dec(void) cnt = (unsigned long)_dispatch_thread_getspecific(dispatch_bcounter_key); _dispatch_thread_setspecific(dispatch_bcounter_key, (void *)--cnt); } -#endif /* USE_APPLE_TSD_OPTIMIZATIONS */ +#define DISPATCH_PERF_MON_ARGS_PROTO , uint64_t perfmon_start +#define DISPATCH_PERF_MON_ARGS , perfmon_start +#define DISPATCH_PERF_MON_VAR uint64_t perfmon_start; +#define DISPATCH_PERF_MON_VAR_INIT uint64_t perfmon_start = 0; + +#define _dispatch_perfmon_start_impl(trace) ({ \ + if (trace) _dispatch_ktrace0(DISPATCH_PERF_MON_worker_thread_start); \ + perfmon_start = _dispatch_absolute_time(); \ + }) #define _dispatch_perfmon_start() \ - uint64_t start = _dispatch_absolute_time() -#define _dispatch_perfmon_end() \ - _dispatch_queue_merge_stats(start) + DISPATCH_PERF_MON_VAR _dispatch_perfmon_start_impl(true) +#define _dispatch_perfmon_start_notrace() \ + DISPATCH_PERF_MON_VAR _dispatch_perfmon_start_impl(false) +#define _dispatch_perfmon_end(thread_type) \ + _dispatch_queue_merge_stats(perfmon_start, true, thread_type) +#define _dispatch_perfmon_end_notrace() \ + _dispatch_queue_merge_stats(perfmon_start, false, perfmon_thread_no_trace) + +void _dispatch_queue_merge_stats(uint64_t start, bool trace, perfmon_thread_type type); + #else +#define DISPATCH_PERF_MON_ARGS_PROTO +#define DISPATCH_PERF_MON_ARGS +#define DISPATCH_PERF_MON_VAR +#define DISPATCH_PERF_MON_VAR_INIT #define _dispatch_perfmon_workitem_inc() #define _dispatch_perfmon_workitem_dec() +#define _dispatch_perfmon_start_impl(trace) #define _dispatch_perfmon_start() -#define _dispatch_perfmon_end() +#define _dispatch_perfmon_end(thread_type) +#define _dispatch_perfmon_start_notrace() +#define _dispatch_perfmon_end_notrace() #endif // DISPATCH_PERF_MON diff --git a/src/shims/priority.h b/src/shims/priority.h new file mode 100644 index 000000000..3e85ff54c --- /dev/null +++ b/src/shims/priority.h @@ -0,0 +1,269 @@ +/* + * Copyright (c) 2008-2016 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_SHIMS_PRIORITY__ +#define __DISPATCH_SHIMS_PRIORITY__ + +#if HAVE_PTHREAD_QOS_H && __has_include() +#include +#include +#ifndef _PTHREAD_PRIORITY_OVERCOMMIT_FLAG +#define _PTHREAD_PRIORITY_OVERCOMMIT_FLAG 0x80000000 +#endif +#ifndef _PTHREAD_PRIORITY_SCHED_PRI_FLAG +#define _PTHREAD_PRIORITY_SCHED_PRI_FLAG 0x20000000 +#endif +#ifndef _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG +#define _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG 0x04000000 +#endif +#ifndef _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG +#define _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG 0x02000000 +#endif +#ifndef _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG +#define _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG 0x01000000 +#endif +#else // HAVE_PTHREAD_QOS_H +OS_ENUM(qos_class, unsigned int, + QOS_CLASS_USER_INTERACTIVE = 0x21, + QOS_CLASS_USER_INITIATED = 0x19, + QOS_CLASS_DEFAULT = 0x15, + QOS_CLASS_UTILITY = 0x11, + QOS_CLASS_BACKGROUND = 0x09, + QOS_CLASS_MAINTENANCE = 0x05, + QOS_CLASS_UNSPECIFIED = 0x00, +); +typedef unsigned long pthread_priority_t; +#define QOS_MIN_RELATIVE_PRIORITY (-15) +#define _PTHREAD_PRIORITY_FLAGS_MASK (~0xffffff) +#define _PTHREAD_PRIORITY_QOS_CLASS_MASK 0x00ffff00 +#define _PTHREAD_PRIORITY_QOS_CLASS_SHIFT (8ull) +#define _PTHREAD_PRIORITY_PRIORITY_MASK 0x000000ff +#define _PTHREAD_PRIORITY_OVERCOMMIT_FLAG 0x80000000 +#define _PTHREAD_PRIORITY_SCHED_PRI_FLAG 0x20000000 +#define _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG 0x04000000 +#define _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG 0x02000000 +#define _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG 0x01000000 +#define _PTHREAD_PRIORITY_ENFORCE_FLAG 0x10000000 + +#endif // HAVE_PTHREAD_QOS_H + +typedef uint32_t dispatch_qos_t; +typedef uint32_t dispatch_priority_t; +typedef uint32_t dispatch_priority_t; +typedef uint16_t dispatch_priority_requested_t; + +#define DISPATCH_QOS_UNSPECIFIED ((dispatch_qos_t)0) +#define DISPATCH_QOS_MAINTENANCE ((dispatch_qos_t)1) +#define DISPATCH_QOS_BACKGROUND ((dispatch_qos_t)2) +#define DISPATCH_QOS_UTILITY ((dispatch_qos_t)3) +#define DISPATCH_QOS_DEFAULT ((dispatch_qos_t)4) +#define DISPATCH_QOS_USER_INITIATED ((dispatch_qos_t)5) +#define DISPATCH_QOS_USER_INTERACTIVE ((dispatch_qos_t)6) +#define DISPATCH_QOS_MAX DISPATCH_QOS_USER_INTERACTIVE +#define DISPATCH_QOS_SATURATED ((dispatch_qos_t)15) + +#define DISPATCH_PRIORITY_RELPRI_MASK ((dispatch_priority_t)0x000000ff) +#define DISPATCH_PRIORITY_RELPRI_SHIFT 0 +#define DISPATCH_PRIORITY_QOS_MASK ((dispatch_priority_t)0x0000ff00) +#define DISPATCH_PRIORITY_QOS_SHIFT 8 +#define DISPATCH_PRIORITY_REQUESTED_MASK ((dispatch_priority_t)0x0000ffff) +#define DISPATCH_PRIORITY_OVERRIDE_MASK ((dispatch_priority_t)0x00ff0000) +#define DISPATCH_PRIORITY_OVERRIDE_SHIFT 16 +#define DISPATCH_PRIORITY_FLAGS_MASK ((dispatch_priority_t)0xff000000) + +#define DISPATCH_PRIORITY_SATURATED_OVERRIDE ((dispatch_priority_t)0x000f0000) + +#define DISPATCH_PRIORITY_FLAG_OVERCOMMIT ((dispatch_priority_t)0x80000000) // _PTHREAD_PRIORITY_OVERCOMMIT_FLAG +#define DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE ((dispatch_priority_t)0x04000000) // _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG +#define DISPATCH_PRIORITY_FLAG_MANAGER ((dispatch_priority_t)0x02000000) // _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG +#define DISPATCH_PRIORITY_PTHREAD_PRIORITY_FLAGS_MASK \ + (DISPATCH_PRIORITY_FLAG_OVERCOMMIT | DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE | \ + DISPATCH_PRIORITY_FLAG_MANAGER) + +// not passed to pthread +#define DISPATCH_PRIORITY_FLAG_INHERIT ((dispatch_priority_t)0x40000000) // _PTHREAD_PRIORITY_INHERIT_FLAG +#define DISPATCH_PRIORITY_FLAG_ENFORCE ((dispatch_priority_t)0x10000000) // _PTHREAD_PRIORITY_ENFORCE_FLAG +#define DISPATCH_PRIORITY_FLAG_ROOTQUEUE ((dispatch_priority_t)0x20000000) // _PTHREAD_PRIORITY_ROOTQUEUE_FLAG + +#pragma mark dispatch_qos + +DISPATCH_ALWAYS_INLINE +static inline dispatch_qos_t +_dispatch_qos_from_qos_class(qos_class_t cls) +{ + switch ((unsigned int)cls) { + case QOS_CLASS_USER_INTERACTIVE: return DISPATCH_QOS_USER_INTERACTIVE; + case QOS_CLASS_USER_INITIATED: return DISPATCH_QOS_USER_INITIATED; + case QOS_CLASS_DEFAULT: return DISPATCH_QOS_DEFAULT; + case QOS_CLASS_UTILITY: return DISPATCH_QOS_UTILITY; + case QOS_CLASS_BACKGROUND: return DISPATCH_QOS_BACKGROUND; + case QOS_CLASS_MAINTENANCE: return DISPATCH_QOS_MAINTENANCE; + default: return DISPATCH_QOS_UNSPECIFIED; + } +} + +DISPATCH_ALWAYS_INLINE +static inline qos_class_t +_dispatch_qos_to_qos_class(dispatch_qos_t qos) +{ + switch (qos) { + case DISPATCH_QOS_USER_INTERACTIVE: return QOS_CLASS_USER_INTERACTIVE; + case DISPATCH_QOS_USER_INITIATED: return QOS_CLASS_USER_INITIATED; + case DISPATCH_QOS_DEFAULT: return QOS_CLASS_DEFAULT; + case DISPATCH_QOS_UTILITY: return QOS_CLASS_UTILITY; + case DISPATCH_QOS_BACKGROUND: return QOS_CLASS_BACKGROUND; + case DISPATCH_QOS_MAINTENANCE: return (qos_class_t)QOS_CLASS_MAINTENANCE; + default: return QOS_CLASS_UNSPECIFIED; + } +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_qos_t +_dispatch_qos_from_queue_priority(long priority) +{ + switch (priority) { + case DISPATCH_QUEUE_PRIORITY_BACKGROUND: return DISPATCH_QOS_BACKGROUND; + case DISPATCH_QUEUE_PRIORITY_NON_INTERACTIVE: return DISPATCH_QOS_UTILITY; + case DISPATCH_QUEUE_PRIORITY_LOW: return DISPATCH_QOS_UTILITY; + case DISPATCH_QUEUE_PRIORITY_DEFAULT: return DISPATCH_QOS_DEFAULT; + case DISPATCH_QUEUE_PRIORITY_HIGH: return DISPATCH_QOS_USER_INITIATED; + default: return _dispatch_qos_from_qos_class((qos_class_t)priority); + } +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_qos_t +_dispatch_qos_from_pp(pthread_priority_t pp) +{ + pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; + pp >>= _PTHREAD_PRIORITY_QOS_CLASS_SHIFT; + return (dispatch_qos_t)__builtin_ffs((int)pp); +} + +DISPATCH_ALWAYS_INLINE +static inline pthread_priority_t +_dispatch_qos_to_pp(dispatch_qos_t qos) +{ + pthread_priority_t pp; + pp = 1ul << ((qos - 1) + _PTHREAD_PRIORITY_QOS_CLASS_SHIFT); + return pp | _PTHREAD_PRIORITY_PRIORITY_MASK; +} + +// including maintenance +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_qos_is_background(dispatch_qos_t qos) +{ + return qos && qos <= DISPATCH_QOS_BACKGROUND; +} + +#pragma mark dispatch_priority + +#define _dispatch_priority_make(qos, relpri) \ + (qos ? ((((qos) << DISPATCH_PRIORITY_QOS_SHIFT) & DISPATCH_PRIORITY_QOS_MASK) | \ + ((dispatch_priority_t)(relpri - 1) & DISPATCH_PRIORITY_RELPRI_MASK)) : 0) + +DISPATCH_ALWAYS_INLINE +static inline dispatch_priority_t +_dispatch_priority_with_override_qos(dispatch_priority_t pri, + dispatch_qos_t oqos) +{ + pri &= ~DISPATCH_PRIORITY_OVERRIDE_MASK; + pri |= oqos << DISPATCH_PRIORITY_OVERRIDE_SHIFT; + return pri; +} + +DISPATCH_ALWAYS_INLINE +static inline int +_dispatch_priority_relpri(dispatch_priority_t dbp) +{ + if (dbp & DISPATCH_PRIORITY_QOS_MASK) { + return (int8_t)(dbp & DISPATCH_PRIORITY_RELPRI_MASK) + 1; + } + return 0; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_qos_t +_dispatch_priority_qos(dispatch_priority_t dbp) +{ + dbp &= DISPATCH_PRIORITY_QOS_MASK; + return dbp >> DISPATCH_PRIORITY_QOS_SHIFT; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_qos_t +_dispatch_priority_override_qos(dispatch_priority_t dbp) +{ + dbp &= DISPATCH_PRIORITY_OVERRIDE_MASK; + return dbp >> DISPATCH_PRIORITY_OVERRIDE_SHIFT; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_priority_t +_dispatch_priority_from_pp_impl(pthread_priority_t pp, bool keep_flags) +{ + dispatch_assert(!(pp & _PTHREAD_PRIORITY_SCHED_PRI_FLAG)); + + dispatch_priority_t dbp; + if (keep_flags) { + dbp = pp & (DISPATCH_PRIORITY_PTHREAD_PRIORITY_FLAGS_MASK | + DISPATCH_PRIORITY_RELPRI_MASK); + } else { + dbp = pp & DISPATCH_PRIORITY_RELPRI_MASK; + } + + dbp |= _dispatch_qos_from_pp(pp) << DISPATCH_PRIORITY_QOS_SHIFT; + return dbp; +} +#define _dispatch_priority_from_pp(pp) \ + _dispatch_priority_from_pp_impl(pp, true) +#define _dispatch_priority_from_pp_strip_flags(pp) \ + _dispatch_priority_from_pp_impl(pp, false) + +DISPATCH_ALWAYS_INLINE +static inline pthread_priority_t +_dispatch_priority_to_pp_impl(dispatch_priority_t dbp, bool keep_flags) +{ + pthread_priority_t pp; + if (keep_flags) { + pp = dbp & (DISPATCH_PRIORITY_PTHREAD_PRIORITY_FLAGS_MASK | + DISPATCH_PRIORITY_RELPRI_MASK); + } else { + pp = dbp & DISPATCH_PRIORITY_RELPRI_MASK; + } + dispatch_qos_t qos = _dispatch_priority_qos(dbp); + if (qos) { + pp |= (1ul << ((qos - 1) + _PTHREAD_PRIORITY_QOS_CLASS_SHIFT)); + } + return pp; +} +#define _dispatch_priority_to_pp(pp) \ + _dispatch_priority_to_pp_impl(pp, true) +#define _dispatch_priority_to_pp_strip_flags(pp) \ + _dispatch_priority_to_pp_impl(pp, false) + +#endif // __DISPATCH_SHIMS_PRIORITY__ diff --git a/src/shims/time.h b/src/shims/time.h index 7b297711c..0b8e92617 100644 --- a/src/shims/time.h +++ b/src/shims/time.h @@ -40,9 +40,21 @@ sleep(unsigned int seconds) } #endif -uint64_t _dispatch_get_nanoseconds(void); +typedef enum { + DISPATCH_CLOCK_WALL, + DISPATCH_CLOCK_MACH, +#define DISPATCH_CLOCK_COUNT (DISPATCH_CLOCK_MACH + 1) +} dispatch_clock_t; + +void _dispatch_time_init(void); #if defined(__i386__) || defined(__x86_64__) || !HAVE_MACH_ABSOLUTE_TIME +#define DISPATCH_TIME_UNIT_USES_NANOSECONDS 1 +#else +#define DISPATCH_TIME_UNIT_USES_NANOSECONDS 0 +#endif + +#if DISPATCH_TIME_UNIT_USES_NANOSECONDS // x86 currently implements mach time in nanoseconds // this is NOT likely to change DISPATCH_ALWAYS_INLINE @@ -59,83 +71,122 @@ _dispatch_time_nano2mach(uint64_t nsec) return nsec; } #else -typedef struct _dispatch_host_time_data_s { - dispatch_once_t pred; - long double frac; - bool ratio_1_to_1; -} _dispatch_host_time_data_s; -extern _dispatch_host_time_data_s _dispatch_host_time_data; -void _dispatch_get_host_time_init(void *context); - +#define DISPATCH_USE_HOST_TIME 1 +extern uint64_t (*_dispatch_host_time_mach2nano)(uint64_t machtime); +extern uint64_t (*_dispatch_host_time_nano2mach)(uint64_t nsec); static inline uint64_t _dispatch_time_mach2nano(uint64_t machtime) { - _dispatch_host_time_data_s *const data = &_dispatch_host_time_data; - dispatch_once_f(&data->pred, NULL, _dispatch_get_host_time_init); - - if (!machtime || slowpath(data->ratio_1_to_1)) { - return machtime; - } - if (machtime >= INT64_MAX) { - return INT64_MAX; - } - long double big_tmp = ((long double)machtime * data->frac) + .5; - if (slowpath(big_tmp >= INT64_MAX)) { - return INT64_MAX; - } - return (uint64_t)big_tmp; + return _dispatch_host_time_mach2nano(machtime); } static inline uint64_t _dispatch_time_nano2mach(uint64_t nsec) { - _dispatch_host_time_data_s *const data = &_dispatch_host_time_data; - dispatch_once_f(&data->pred, NULL, _dispatch_get_host_time_init); - - if (!nsec || slowpath(data->ratio_1_to_1)) { - return nsec; - } - if (nsec >= INT64_MAX) { - return INT64_MAX; - } - long double big_tmp = ((long double)nsec / data->frac) + .5; - if (slowpath(big_tmp >= INT64_MAX)) { - return INT64_MAX; - } - return (uint64_t)big_tmp; + return _dispatch_host_time_nano2mach(nsec); } +#endif // DISPATCH_USE_HOST_TIME + +/* XXXRW: Some kind of overflow detection needed? */ +#define _dispatch_timespec_to_nano(ts) \ + ((uint64_t)(ts).tv_sec * NSEC_PER_SEC + (uint64_t)(ts).tv_nsec) +#define _dispatch_timeval_to_nano(tv) \ + ((uint64_t)(tv).tv_sec * NSEC_PER_SEC + \ + (uint64_t)(tv).tv_usec * NSEC_PER_USEC) + +static inline uint64_t +_dispatch_get_nanoseconds(void) +{ + dispatch_static_assert(sizeof(NSEC_PER_SEC) == 8); + dispatch_static_assert(sizeof(USEC_PER_SEC) == 8); + +#if TARGET_OS_MAC + return clock_gettime_nsec_np(CLOCK_REALTIME); +#elif HAVE_DECL_CLOCK_REALTIME + struct timespec ts; + dispatch_assume_zero(clock_gettime(CLOCK_REALTIME, &ts)); + return _dispatch_timespec_to_nano(ts); +#elif TARGET_OS_WIN32 + // FILETIME is 100-nanosecond intervals since January 1, 1601 (UTC). + FILETIME ft; + ULARGE_INTEGER li; + GetSystemTimeAsFileTime(&ft); + li.LowPart = ft.dwLowDateTime; + li.HighPart = ft.dwHighDateTime; + return li.QuadPart * 100ull; +#else + struct timeval tv; + dispatch_assert_zero(gettimeofday(&tv, NULL)); + return _dispatch_timeval_to_nano(tv); #endif +} static inline uint64_t _dispatch_absolute_time(void) { #if HAVE_MACH_ABSOLUTE_TIME return mach_absolute_time(); +#elif HAVE_DECL_CLOCK_UPTIME && !defined(__linux__) + struct timespec ts; + dispatch_assume_zero(clock_gettime(CLOCK_UPTIME, &ts)); + return _dispatch_timespec_to_nano(ts); +#elif HAVE_DECL_CLOCK_MONOTONIC + struct timespec ts; + dispatch_assume_zero(clock_gettime(CLOCK_MONOTONIC, &ts)); + return _dispatch_timespec_to_nano(ts); #elif TARGET_OS_WIN32 LARGE_INTEGER now; return QueryPerformanceCounter(&now) ? now.QuadPart : 0; #else - struct timespec ts; - int ret; +#error platform needs to implement _dispatch_absolute_time() +#endif +} -#if HAVE_DECL_CLOCK_UPTIME - ret = clock_gettime(CLOCK_UPTIME, &ts); -#elif HAVE_DECL_CLOCK_MONOTONIC - ret = clock_gettime(CLOCK_MONOTONIC, &ts); +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_dispatch_approximate_time(void) +{ +#if HAVE_MACH_APPROXIMATE_TIME + return mach_approximate_time(); +#elif HAVE_DECL_CLOCK_UPTIME_FAST && !defined(__linux__) + struct timespec ts; + dispatch_assume_zero(clock_gettime(CLOCK_UPTIME_FAST, &ts)); + return _dispatch_timespec_to_nano(ts); +#elif defined(__linux__) + struct timespec ts; + dispatch_assume_zero(clock_gettime(CLOCK_REALTIME_COARSE, &ts)); + return _dispatch_timespec_to_nano(ts); #else -#error "clock_gettime: no supported absolute time clock" + return _dispatch_absolute_time(); #endif - (void)dispatch_assume_zero(ret); +} - /* XXXRW: Some kind of overflow detection needed? */ - return (ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec); -#endif // HAVE_MACH_ABSOLUTE_TIME +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_dispatch_time_now(dispatch_clock_t clock) +{ + switch (clock) { + case DISPATCH_CLOCK_MACH: + return _dispatch_absolute_time(); + case DISPATCH_CLOCK_WALL: + return _dispatch_get_nanoseconds(); + } + __builtin_unreachable(); } +typedef struct { + uint64_t nows[DISPATCH_CLOCK_COUNT]; +} dispatch_clock_now_cache_s, *dispatch_clock_now_cache_t; + +DISPATCH_ALWAYS_INLINE static inline uint64_t -_dispatch_approximate_time(void) +_dispatch_time_now_cached(dispatch_clock_t clock, + dispatch_clock_now_cache_t cache) { - return _dispatch_absolute_time(); + if (likely(cache->nows[clock])) { + return cache->nows[clock]; + } + return cache->nows[clock] = _dispatch_time_now(clock); } #endif // __DISPATCH_SHIMS_TIME__ diff --git a/src/shims/tsd.h b/src/shims/tsd.h index 2e3ece8b0..c119e4f01 100644 --- a/src/shims/tsd.h +++ b/src/shims/tsd.h @@ -59,6 +59,19 @@ typedef struct { void *a; void *b; } dispatch_tsd_pair_t; #endif #if DISPATCH_USE_DIRECT_TSD +#ifndef __TSD_THREAD_QOS_CLASS +#define __TSD_THREAD_QOS_CLASS 4 +#endif +#ifndef __TSD_RETURN_TO_KERNEL +#define __TSD_RETURN_TO_KERNEL 5 +#endif +#ifndef __TSD_MACH_SPECIAL_REPLY +#define __TSD_MACH_SPECIAL_REPLY 8 +#endif + +static const unsigned long dispatch_priority_key = __TSD_THREAD_QOS_CLASS; +static const unsigned long dispatch_r2k_key = __TSD_RETURN_TO_KERNEL; + // dispatch_queue_key & dispatch_frame_key need to be contiguous // in that order, and queue_key to be an even number static const unsigned long dispatch_queue_key = __PTK_LIBDISPATCH_KEY0; @@ -67,21 +80,13 @@ static const unsigned long dispatch_cache_key = __PTK_LIBDISPATCH_KEY2; static const unsigned long dispatch_context_key = __PTK_LIBDISPATCH_KEY3; static const unsigned long dispatch_pthread_root_queue_observer_hooks_key = __PTK_LIBDISPATCH_KEY4; -static const unsigned long dispatch_defaultpriority_key =__PTK_LIBDISPATCH_KEY5; +static const unsigned long dispatch_basepri_key = __PTK_LIBDISPATCH_KEY5; #if DISPATCH_INTROSPECTION static const unsigned long dispatch_introspection_key = __PTK_LIBDISPATCH_KEY6; #elif DISPATCH_PERF_MON static const unsigned long dispatch_bcounter_key = __PTK_LIBDISPATCH_KEY6; #endif -static const unsigned long dispatch_sema4_key = __PTK_LIBDISPATCH_KEY7; - -#ifndef __TSD_THREAD_QOS_CLASS -#define __TSD_THREAD_QOS_CLASS 4 -#endif -#ifndef __TSD_THREAD_VOUCHER -#define __TSD_THREAD_VOUCHER 6 -#endif -static const unsigned long dispatch_priority_key = __TSD_THREAD_QOS_CLASS; +static const unsigned long dispatch_wlh_key = __PTK_LIBDISPATCH_KEY7; static const unsigned long dispatch_voucher_key = __PTK_LIBDISPATCH_KEY8; static const unsigned long dispatch_deferred_items_key = __PTK_LIBDISPATCH_KEY9; @@ -108,16 +113,15 @@ struct dispatch_tsd { void *dispatch_cache_key; void *dispatch_context_key; void *dispatch_pthread_root_queue_observer_hooks_key; - void *dispatch_defaultpriority_key; + void *dispatch_basepri_key; #if DISPATCH_INTROSPECTION void *dispatch_introspection_key; #elif DISPATCH_PERF_MON void *dispatch_bcounter_key; -#endif -#if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK - void *dispatch_sema4_key; #endif void *dispatch_priority_key; + void *dispatch_r2k_key; + void *dispatch_wlh_key; void *dispatch_voucher_key; void *dispatch_deferred_items_key; }; @@ -160,19 +164,20 @@ _dispatch_get_tsd_base(void) _dispatch_thread_setspecific(k2,(p)[1]) ) #else +extern pthread_key_t dispatch_priority_key; +extern pthread_key_t dispatch_r2k_key; extern pthread_key_t dispatch_queue_key; extern pthread_key_t dispatch_frame_key; extern pthread_key_t dispatch_cache_key; extern pthread_key_t dispatch_context_key; extern pthread_key_t dispatch_pthread_root_queue_observer_hooks_key; -extern pthread_key_t dispatch_defaultpriority_key; +extern pthread_key_t dispatch_basepri_key; #if DISPATCH_INTROSPECTION extern pthread_key_t dispatch_introspection_key; #elif DISPATCH_PERF_MON extern pthread_key_t dispatch_bcounter_key; #endif -extern pthread_key_t dispatch_sema4_key; -extern pthread_key_t dispatch_priority_key; +extern pthread_key_t dispatch_wlh_key; extern pthread_key_t dispatch_voucher_key; extern pthread_key_t dispatch_deferred_items_key; @@ -308,6 +313,11 @@ _dispatch_thread_setspecific_packed_pair(pthread_key_t k1, pthread_key_t k2, #define _dispatch_set_thread_mig_reply_port(p) ( \ _dispatch_thread_setspecific(_PTHREAD_TSD_SLOT_MIG_REPLY, \ (void*)(uintptr_t)(p))) +#define _dispatch_get_thread_special_reply_port() ((mach_port_t)(uintptr_t) \ + _dispatch_thread_getspecific(__TSD_MACH_SPECIAL_REPLY)) +#define _dispatch_set_thread_special_reply_port(p) ( \ + _dispatch_thread_setspecific(__TSD_MACH_SPECIAL_REPLY, \ + (void*)(uintptr_t)(p))) #endif DISPATCH_TSD_INLINE DISPATCH_CONST diff --git a/src/shims/yield.h b/src/shims/yield.h index 1850aeeed..67f8679ac 100644 --- a/src/shims/yield.h +++ b/src/shims/yield.h @@ -31,31 +31,40 @@ #pragma mark _dispatch_wait_until #if DISPATCH_HW_CONFIG_UP -#define _dispatch_wait_until(c) do { \ +#define _dispatch_wait_until(c) ({ \ + typeof(c) _c; \ int _spins = 0; \ - while (!fastpath(c)) { \ + for (;;) { \ + if (likely(_c = (c))) break; \ _spins++; \ _dispatch_preemption_yield(_spins); \ - } } while (0) + } \ + _c; }) #elif TARGET_OS_EMBEDDED // #ifndef DISPATCH_WAIT_SPINS #define DISPATCH_WAIT_SPINS 1024 #endif -#define _dispatch_wait_until(c) do { \ +#define _dispatch_wait_until(c) ({ \ + typeof(c) _c; \ int _spins = -(DISPATCH_WAIT_SPINS); \ - while (!fastpath(c)) { \ + for (;;) { \ + if (likely(_c = (c))) break; \ if (slowpath(_spins++ >= 0)) { \ _dispatch_preemption_yield(_spins); \ } else { \ dispatch_hardware_pause(); \ } \ - } } while (0) + } \ + _c; }) #else -#define _dispatch_wait_until(c) do { \ - while (!fastpath(c)) { \ +#define _dispatch_wait_until(c) ({ \ + typeof(c) _c; \ + for (;;) { \ + if (likely(_c = (c))) break; \ dispatch_hardware_pause(); \ - } } while (0) + } \ + _c; }) #endif #pragma mark - diff --git a/src/source.c b/src/source.c index a5a2c94a2..6f504787d 100644 --- a/src/source.c +++ b/src/source.c @@ -19,235 +19,78 @@ */ #include "internal.h" -#if HAVE_MACH -#include "protocol.h" -#include "protocolServer.h" -#endif -#include - -#define DKEV_DISPOSE_IMMEDIATE_DELETE 0x1 -#define DKEV_UNREGISTER_DISCONNECTED 0x2 -#define DKEV_UNREGISTER_REPLY_REMOVE 0x4 -#define DKEV_UNREGISTER_WAKEUP 0x8 static void _dispatch_source_handler_free(dispatch_source_t ds, long kind); -static void _dispatch_source_merge_kevent(dispatch_source_t ds, - const _dispatch_kevent_qos_s *ke); -static bool _dispatch_kevent_register(dispatch_kevent_t *dkp, - pthread_priority_t pp, uint32_t *flgp); -static long _dispatch_kevent_unregister(dispatch_kevent_t dk, uint32_t flg, - unsigned int options); -static long _dispatch_kevent_resume(dispatch_kevent_t dk, uint32_t new_flags, - uint32_t del_flags); -static void _dispatch_kevent_drain(_dispatch_kevent_qos_s *ke); -static void _dispatch_kevent_merge(_dispatch_kevent_qos_s *ke); -static void _dispatch_timers_kevent(_dispatch_kevent_qos_s *ke); -static void _dispatch_timers_unregister(dispatch_source_t ds, - dispatch_kevent_t dk); -static void _dispatch_timers_update(dispatch_source_t ds); -static void _dispatch_timer_aggregates_check(void); -static void _dispatch_timer_aggregates_register(dispatch_source_t ds); -static void _dispatch_timer_aggregates_update(dispatch_source_t ds, - unsigned int tidx); -static void _dispatch_timer_aggregates_unregister(dispatch_source_t ds, - unsigned int tidx); +static void _dispatch_source_set_interval(dispatch_source_t ds, uint64_t interval); + +#define DISPATCH_TIMERS_UNREGISTER 0x1 +#define DISPATCH_TIMERS_RETAIN_2 0x2 +static void _dispatch_timers_update(dispatch_unote_t du, uint32_t flags); +static void _dispatch_timers_unregister(dispatch_timer_source_refs_t dt); + +static void _dispatch_source_timer_configure(dispatch_source_t ds); static inline unsigned long _dispatch_source_timer_data( - dispatch_source_refs_t dr, unsigned long prev); -static void _dispatch_kq_deferred_update(const _dispatch_kevent_qos_s *ke); -static long _dispatch_kq_immediate_update(_dispatch_kevent_qos_s *ke); -static void _dispatch_memorypressure_init(void); -#if HAVE_MACH -static void _dispatch_mach_host_calendar_change_register(void); -#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK -static void _dispatch_mach_recv_msg_buf_init(void); -static kern_return_t _dispatch_kevent_machport_resume(dispatch_kevent_t dk, - uint32_t new_flags, uint32_t del_flags); -#endif -static kern_return_t _dispatch_kevent_mach_notify_resume(dispatch_kevent_t dk, - uint32_t new_flags, uint32_t del_flags); -static void _dispatch_mach_kevent_merge(_dispatch_kevent_qos_s *ke); -static mach_msg_size_t _dispatch_kevent_mach_msg_size( - _dispatch_kevent_qos_s *ke); -#else -static inline void _dispatch_mach_host_calendar_change_register(void) {} -static inline void _dispatch_mach_recv_msg_buf_init(void) {} -#endif -static const char * _evfiltstr(short filt); -#if DISPATCH_DEBUG -static void dispatch_kevent_debug(const char *verb, - const _dispatch_kevent_qos_s *kev, int i, int n, - const char *function, unsigned int line); -static void _dispatch_kevent_debugger(void *context); -#define DISPATCH_ASSERT_ON_MANAGER_QUEUE() \ - dispatch_assert(_dispatch_queue_get_current() == &_dispatch_mgr_q) -#else -static inline void -dispatch_kevent_debug(const char *verb, const _dispatch_kevent_qos_s *kev, - int i, int n, const char *function, unsigned int line) -{ - (void)verb; (void)kev; (void)i; (void)n; (void)function; (void)line; -} -#define DISPATCH_ASSERT_ON_MANAGER_QUEUE() -#endif -#define _dispatch_kevent_debug(verb, _kev) \ - dispatch_kevent_debug(verb, _kev, 0, 1, __FUNCTION__, __LINE__) -#define _dispatch_kevent_debug_n(verb, _kev, i, n) \ - dispatch_kevent_debug(verb, _kev, i, n, __FUNCTION__, __LINE__) -#ifndef DISPATCH_MGR_QUEUE_DEBUG -#define DISPATCH_MGR_QUEUE_DEBUG 0 -#endif -#if DISPATCH_MGR_QUEUE_DEBUG -#define _dispatch_kevent_mgr_debug _dispatch_kevent_debug -#else -static inline void -_dispatch_kevent_mgr_debug(_dispatch_kevent_qos_s* kev DISPATCH_UNUSED) {} -#endif + dispatch_source_t ds, dispatch_unote_t du); #pragma mark - #pragma mark dispatch_source_t dispatch_source_t -dispatch_source_create(dispatch_source_type_t type, uintptr_t handle, +dispatch_source_create(dispatch_source_type_t dst, uintptr_t handle, unsigned long mask, dispatch_queue_t dq) { - // ensure _dispatch_evfilt_machport_direct_enabled is initialized - _dispatch_root_queues_init(); - const _dispatch_kevent_qos_s *proto_kev = &type->ke; + dispatch_source_refs_t dr; dispatch_source_t ds; - dispatch_kevent_t dk; - // input validation - if (type == NULL || (mask & ~type->mask)) { + dr = dux_create(dst, handle, mask)._dr; + if (unlikely(!dr)) { return DISPATCH_BAD_INPUT; } - if (type->mask && !mask) { - // expect a non-zero mask when the type declares one ... except - switch (type->ke.filter) { - case DISPATCH_EVFILT_TIMER: - break; // timers don't need masks -#if DISPATCH_USE_VM_PRESSURE - case EVFILT_VM: - break; // type->init forces the only acceptable mask -#endif - case DISPATCH_EVFILT_MACH_NOTIFICATION: - break; // type->init handles zero mask as a legacy case - default: - // otherwise reject as invalid input - return DISPATCH_BAD_INPUT; - } - } - - switch (type->ke.filter) { - case EVFILT_SIGNAL: - if (handle >= NSIG) { - return DISPATCH_BAD_INPUT; - } - break; - case EVFILT_FS: -#if DISPATCH_USE_VM_PRESSURE - case EVFILT_VM: -#endif -#if DISPATCH_USE_MEMORYSTATUS - case EVFILT_MEMORYSTATUS: -#endif - case DISPATCH_EVFILT_CUSTOM_ADD: - case DISPATCH_EVFILT_CUSTOM_OR: - if (handle) { - return DISPATCH_BAD_INPUT; - } - break; - case DISPATCH_EVFILT_TIMER: - if ((handle == 0) != (type->ke.ident == 0)) { - return DISPATCH_BAD_INPUT; - } - break; - default: - break; - } - ds = _dispatch_alloc(DISPATCH_VTABLE(source), + ds = _dispatch_object_alloc(DISPATCH_VTABLE(source), sizeof(struct dispatch_source_s)); // Initialize as a queue first, then override some settings below. - _dispatch_queue_init(ds->_as_dq, DQF_NONE, 1, true); + _dispatch_queue_init(ds->_as_dq, DQF_LEGACY, 1, + DISPATCH_QUEUE_INACTIVE | DISPATCH_QUEUE_ROLE_INNER); ds->dq_label = "source"; ds->do_ref_cnt++; // the reference the manager queue holds - - switch (type->ke.filter) { - case DISPATCH_EVFILT_CUSTOM_OR: - dk = DISPATCH_KEV_CUSTOM_OR; - break; - case DISPATCH_EVFILT_CUSTOM_ADD: - dk = DISPATCH_KEV_CUSTOM_ADD; - break; - default: - dk = _dispatch_calloc(1ul, sizeof(struct dispatch_kevent_s)); - dk->dk_kevent = *proto_kev; - dk->dk_kevent.ident = handle; - dk->dk_kevent.flags |= EV_ADD|EV_ENABLE; - dk->dk_kevent.fflags |= (uint32_t)mask; - dk->dk_kevent.udata = (_dispatch_kevent_qos_udata_t)dk; - TAILQ_INIT(&dk->dk_sources); - - ds->ds_pending_data_mask = dk->dk_kevent.fflags; - ds->ds_ident_hack = (uintptr_t)dk->dk_kevent.ident; - if (EV_UDATA_SPECIFIC & proto_kev->flags) { - dk->dk_kevent.flags |= EV_DISPATCH; - ds->ds_is_direct_kevent = true; - ds->ds_needs_rearm = true; - } - break; - } - ds->ds_dkev = dk; - - if ((EV_DISPATCH|EV_ONESHOT) & proto_kev->flags) { - ds->ds_needs_rearm = true; - } else if (!(EV_CLEAR & proto_kev->flags)) { - // we cheat and use EV_CLEAR to mean a "flag thingy" - ds->ds_is_adder = true; - } - // Some sources require special processing - if (type->init != NULL) { - type->init(ds, type, handle, mask, dq); - } - dispatch_assert(!(ds->ds_is_level && ds->ds_is_adder)); - if (!ds->ds_is_custom_source && (dk->dk_kevent.flags & EV_VANISHED)) { - // see _dispatch_source_merge_kevent - dispatch_assert(!(dk->dk_kevent.flags & EV_ONESHOT)); - dispatch_assert(dk->dk_kevent.flags & EV_DISPATCH); - dispatch_assert(dk->dk_kevent.flags & EV_UDATA_SPECIFIC); - } - - if (fastpath(!ds->ds_refs)) { - ds->ds_refs = _dispatch_calloc(1ul, - sizeof(struct dispatch_source_refs_s)); - } - ds->ds_refs->dr_source_wref = _dispatch_ptr2wref(ds); + ds->ds_refs = dr; + dr->du_owner_wref = _dispatch_ptr2wref(ds); if (slowpath(!dq)) { - dq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, true); + dq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, true); } else { - _dispatch_retain(dq); + _dispatch_retain((dispatch_queue_t _Nonnull)dq); } ds->do_targetq = dq; + if (dr->du_is_timer && (dr->du_fflags & DISPATCH_TIMER_INTERVAL)) { + _dispatch_source_set_interval(ds, handle); + } _dispatch_object_debug(ds, "%s", __func__); return ds; } void -_dispatch_source_dispose(dispatch_source_t ds) +_dispatch_source_dispose(dispatch_source_t ds, bool *allow_free) { _dispatch_object_debug(ds, "%s", __func__); _dispatch_source_handler_free(ds, DS_REGISTN_HANDLER); _dispatch_source_handler_free(ds, DS_EVENT_HANDLER); _dispatch_source_handler_free(ds, DS_CANCEL_HANDLER); - free(ds->ds_refs); - _dispatch_queue_destroy(ds->_as_dq); + _dispatch_unote_dispose(ds->ds_refs); + ds->ds_refs = NULL; + _dispatch_queue_destroy(ds->_as_dq, allow_free); } void _dispatch_source_xref_dispose(dispatch_source_t ds) { - dx_wakeup(ds, 0, DISPATCH_WAKEUP_FLUSH); + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds->_as_dq); + if (unlikely(!(dqf & (DQF_LEGACY|DSF_CANCELED)))) { + DISPATCH_CLIENT_CRASH(ds, "Release of a source that has not been " + "cancelled, but has a mandatory cancel handler"); + } + dx_wakeup(ds, 0, DISPATCH_WAKEUP_MAKE_DIRTY); } long @@ -259,78 +102,121 @@ dispatch_source_testcancel(dispatch_source_t ds) unsigned long dispatch_source_get_mask(dispatch_source_t ds) { - unsigned long mask = ds->ds_pending_data_mask; - if (ds->ds_vmpressure_override) { - mask = NOTE_VM_PRESSURE; + dispatch_source_refs_t dr = ds->ds_refs; + if (ds->dq_atomic_flags & DSF_CANCELED) { + return 0; + } +#if DISPATCH_USE_MEMORYSTATUS + if (dr->du_vmpressure_override) { + return NOTE_VM_PRESSURE; } #if TARGET_IPHONE_SIMULATOR - else if (ds->ds_memorypressure_override) { - mask = NOTE_MEMORYSTATUS_PRESSURE_WARN; + if (dr->du_memorypressure_override) { + return NOTE_MEMORYSTATUS_PRESSURE_WARN; } #endif - return mask; +#endif // DISPATCH_USE_MEMORYSTATUS + return dr->du_fflags; } uintptr_t dispatch_source_get_handle(dispatch_source_t ds) { - unsigned int handle = (unsigned int)ds->ds_ident_hack; + dispatch_source_refs_t dr = ds->ds_refs; #if TARGET_IPHONE_SIMULATOR - if (ds->ds_memorypressure_override) { - handle = 0; + if (dr->du_memorypressure_override) { + return 0; } #endif - return handle; + return dr->du_ident; } unsigned long dispatch_source_get_data(dispatch_source_t ds) { - unsigned long data = ds->ds_data; - if (ds->ds_vmpressure_override) { - data = NOTE_VM_PRESSURE; +#if DISPATCH_USE_MEMORYSTATUS + dispatch_source_refs_t dr = ds->ds_refs; + if (dr->du_vmpressure_override) { + return NOTE_VM_PRESSURE; } #if TARGET_IPHONE_SIMULATOR - else if (ds->ds_memorypressure_override) { - data = NOTE_MEMORYSTATUS_PRESSURE_WARN; + if (dr->du_memorypressure_override) { + return NOTE_MEMORYSTATUS_PRESSURE_WARN; } #endif - return data; +#endif // DISPATCH_USE_MEMORYSTATUS + uint64_t value = os_atomic_load2o(ds, ds_data, relaxed); + return (unsigned long)( + ds->ds_refs->du_data_action == DISPATCH_UNOTE_ACTION_DATA_OR_STATUS_SET + ? DISPATCH_SOURCE_GET_DATA(value) : value); } -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_source_merge_data2(dispatch_source_t ds, - pthread_priority_t pp, unsigned long val) -{ - _dispatch_kevent_qos_s kev = { - .fflags = (typeof(kev.fflags))val, - .data = (typeof(kev.data))val, -#if DISPATCH_USE_KEVENT_QOS - .qos = (_dispatch_kevent_priority_t)pp, -#endif - }; -#if !DISPATCH_USE_KEVENT_QOS - (void)pp; -#endif - - dispatch_assert(ds->ds_dkev == DISPATCH_KEV_CUSTOM_OR || - ds->ds_dkev == DISPATCH_KEV_CUSTOM_ADD); - _dispatch_kevent_debug("synthetic data", &kev); - _dispatch_source_merge_kevent(ds, &kev); +size_t +dispatch_source_get_extended_data(dispatch_source_t ds, + dispatch_source_extended_data_t edata, size_t size) +{ + size_t target_size = MIN(size, + sizeof(struct dispatch_source_extended_data_s)); + if (size > 0) { + unsigned long data, status = 0; + if (ds->ds_refs->du_data_action + == DISPATCH_UNOTE_ACTION_DATA_OR_STATUS_SET) { + uint64_t combined = os_atomic_load(&ds->ds_data, relaxed); + data = DISPATCH_SOURCE_GET_DATA(combined); + status = DISPATCH_SOURCE_GET_STATUS(combined); + } else { + data = dispatch_source_get_data(ds); + } + if (size >= offsetof(struct dispatch_source_extended_data_s, data) + + sizeof(edata->data)) { + edata->data = data; + } + if (size >= offsetof(struct dispatch_source_extended_data_s, status) + + sizeof(edata->status)) { + edata->status = status; + } + if (size > sizeof(struct dispatch_source_extended_data_s)) { + memset( + (char *)edata + sizeof(struct dispatch_source_extended_data_s), + 0, size - sizeof(struct dispatch_source_extended_data_s)); + } + } + return target_size; } +DISPATCH_NOINLINE void -dispatch_source_merge_data(dispatch_source_t ds, unsigned long val) +_dispatch_source_merge_data(dispatch_source_t ds, pthread_priority_t pp, + unsigned long val) { - _dispatch_source_merge_data2(ds, 0, val); + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds->_as_dq); + int filter = ds->ds_refs->du_filter; + + if (unlikely(dqf & (DSF_CANCELED | DSF_DELETED))) { + return; + } + + switch (filter) { + case DISPATCH_EVFILT_CUSTOM_ADD: + os_atomic_add2o(ds, ds_pending_data, val, relaxed); + break; + case DISPATCH_EVFILT_CUSTOM_OR: + os_atomic_or2o(ds, ds_pending_data, val, relaxed); + break; + case DISPATCH_EVFILT_CUSTOM_REPLACE: + os_atomic_store2o(ds, ds_pending_data, val, relaxed); + break; + default: + DISPATCH_CLIENT_CRASH(filter, "Invalid source type"); + } + + dx_wakeup(ds, _dispatch_qos_from_pp(pp), DISPATCH_WAKEUP_MAKE_DIRTY); } void -_dispatch_source_merge_data(dispatch_source_t ds, pthread_priority_t pp, - unsigned long val) +dispatch_source_merge_data(dispatch_source_t ds, unsigned long val) { - _dispatch_source_merge_data2(ds, pp, val); + _dispatch_source_merge_data(ds, 0, val); } #pragma mark - @@ -450,6 +336,10 @@ _dispatch_source_set_handler(dispatch_source_t ds, long kind, _dispatch_source_handler_replace(ds, kind, dc); return dx_vtable(ds)->do_resume(ds, false); } + if (unlikely(!_dispatch_queue_is_legacy(ds->_as_dq))) { + DISPATCH_CLIENT_CRASH(kind, "Cannot change a handler of this source " + "after it has been activated"); + } _dispatch_ktrace1(DISPATCH_PERF_post_activate_mutation, ds); if (kind == DS_REGISTN_HANDLER) { _dispatch_bug_deprecated("Setting registration handler after " @@ -457,7 +347,7 @@ _dispatch_source_set_handler(dispatch_source_t ds, long kind, } dc->dc_data = (void *)kind; _dispatch_barrier_trysync_or_async_f(ds->_as_dq, dc, - _dispatch_source_set_handler_slow); + _dispatch_source_set_handler_slow, 0); } #ifdef __BLOCKS__ @@ -480,27 +370,40 @@ dispatch_source_set_event_handler_f(dispatch_source_t ds, _dispatch_source_set_handler(ds, DS_EVENT_HANDLER, dc); } -void -_dispatch_source_set_event_handler_continuation(dispatch_source_t ds, - dispatch_continuation_t dc) +#ifdef __BLOCKS__ +DISPATCH_NOINLINE +static void +_dispatch_source_set_cancel_handler(dispatch_source_t ds, + dispatch_block_t handler) { - _dispatch_trace_continuation_push(ds->_as_dq, dc); - _dispatch_source_set_handler(ds, DS_EVENT_HANDLER, dc); + dispatch_continuation_t dc; + dc = _dispatch_source_handler_alloc(ds, handler, DS_CANCEL_HANDLER, true); + _dispatch_source_set_handler(ds, DS_CANCEL_HANDLER, dc); } -#ifdef __BLOCKS__ void dispatch_source_set_cancel_handler(dispatch_source_t ds, dispatch_block_t handler) { - dispatch_continuation_t dc; - dc = _dispatch_source_handler_alloc(ds, handler, DS_CANCEL_HANDLER, true); - _dispatch_source_set_handler(ds, DS_CANCEL_HANDLER, dc); + if (unlikely(!_dispatch_queue_is_legacy(ds->_as_dq))) { + DISPATCH_CLIENT_CRASH(0, "Cannot set a non mandatory handler on " + "this source"); + } + return _dispatch_source_set_cancel_handler(ds, handler); } -#endif /* __BLOCKS__ */ void -dispatch_source_set_cancel_handler_f(dispatch_source_t ds, +dispatch_source_set_mandatory_cancel_handler(dispatch_source_t ds, + dispatch_block_t handler) +{ + _dispatch_queue_atomic_flags_clear(ds->_as_dq, DQF_LEGACY); + return _dispatch_source_set_cancel_handler(ds, handler); +} +#endif /* __BLOCKS__ */ + +DISPATCH_NOINLINE +static void +_dispatch_source_set_cancel_handler_f(dispatch_source_t ds, dispatch_function_t handler) { dispatch_continuation_t dc; @@ -508,6 +411,25 @@ dispatch_source_set_cancel_handler_f(dispatch_source_t ds, _dispatch_source_set_handler(ds, DS_CANCEL_HANDLER, dc); } +void +dispatch_source_set_cancel_handler_f(dispatch_source_t ds, + dispatch_function_t handler) +{ + if (unlikely(!_dispatch_queue_is_legacy(ds->_as_dq))) { + DISPATCH_CLIENT_CRASH(0, "Cannot set a non mandatory handler on " + "this source"); + } + return _dispatch_source_set_cancel_handler_f(ds, handler); +} + +void +dispatch_source_set_mandatory_cancel_handler_f(dispatch_source_t ds, + dispatch_function_t handler) +{ + _dispatch_queue_atomic_flags_clear(ds->_as_dq, DQF_LEGACY); + return _dispatch_source_set_cancel_handler_f(ds, handler); +} + #ifdef __BLOCKS__ void dispatch_source_set_registration_handler(dispatch_source_t ds, @@ -545,7 +467,7 @@ _dispatch_source_registration_callout(dispatch_source_t ds, dispatch_queue_t cq, if (dc->dc_flags & DISPATCH_OBJ_CTXT_FETCH_BIT) { dc->dc_ctxt = ds->do_ctxt; } - _dispatch_continuation_pop(dc, cq, flags); + _dispatch_continuation_pop(dc, NULL, flags, cq); } static void @@ -555,7 +477,6 @@ _dispatch_source_cancel_callout(dispatch_source_t ds, dispatch_queue_t cq, dispatch_continuation_t dc; dc = _dispatch_source_handler_take(ds, DS_CANCEL_HANDLER); - ds->ds_pending_data_mask = 0; ds->ds_pending_data = 0; ds->ds_data = 0; _dispatch_source_handler_free(ds, DS_EVENT_HANDLER); @@ -569,104 +490,87 @@ _dispatch_source_cancel_callout(dispatch_source_t ds, dispatch_queue_t cq, if (dc->dc_flags & DISPATCH_OBJ_CTXT_FETCH_BIT) { dc->dc_ctxt = ds->do_ctxt; } - _dispatch_continuation_pop(dc, cq, flags); + _dispatch_continuation_pop(dc, NULL, flags, cq); } static void _dispatch_source_latch_and_call(dispatch_source_t ds, dispatch_queue_t cq, dispatch_invoke_flags_t flags) { - unsigned long prev; - dispatch_source_refs_t dr = ds->ds_refs; dispatch_continuation_t dc = _dispatch_source_get_handler(dr, DS_EVENT_HANDLER); - prev = os_atomic_xchg2o(ds, ds_pending_data, 0, relaxed); - if (ds->ds_is_level) { + uint64_t prev; + + if (dr->du_is_timer && !(dr->du_fflags & DISPATCH_TIMER_AFTER)) { + prev = _dispatch_source_timer_data(ds, dr); + } else { + prev = os_atomic_xchg2o(ds, ds_pending_data, 0, relaxed); + } + if (dr->du_data_action == DISPATCH_UNOTE_ACTION_DATA_SET) { ds->ds_data = ~prev; - } else if (ds->ds_is_timer && ds_timer(dr).target && prev) { - ds->ds_data = _dispatch_source_timer_data(dr, prev); } else { ds->ds_data = prev; } - if (!dispatch_assume(prev) || !dc) { + if (!dispatch_assume(prev != 0) || !dc) { return; } - _dispatch_continuation_pop(dc, cq, flags); - if (ds->ds_is_timer && (ds_timer(dr).flags & DISPATCH_TIMER_AFTER)) { + _dispatch_continuation_pop(dc, NULL, flags, cq); + if (dr->du_is_timer && (dr->du_fflags & DISPATCH_TIMER_AFTER)) { _dispatch_source_handler_free(ds, DS_EVENT_HANDLER); dispatch_release(ds); // dispatch_after sources are one-shot } } +DISPATCH_NOINLINE static void -_dispatch_source_kevent_unregister(dispatch_source_t ds) +_dispatch_source_refs_finalize_unregistration(dispatch_source_t ds) +{ + dispatch_queue_flags_t dqf; + dispatch_source_refs_t dr = ds->ds_refs; + + dqf = _dispatch_queue_atomic_flags_set_and_clear_orig(ds->_as_dq, + DSF_DELETED, DSF_ARMED | DSF_DEFERRED_DELETE | DSF_CANCEL_WAITER); + if (dqf & DSF_CANCEL_WAITER) { + _dispatch_wake_by_address(&ds->dq_atomic_flags); + } + _dispatch_debug("kevent-source[%p]: disarmed kevent[%p]", ds, dr); + _dispatch_release_tailcall(ds); // the retain is done at creation time +} + +void +_dispatch_source_refs_unregister(dispatch_source_t ds, uint32_t options) { _dispatch_object_debug(ds, "%s", __func__); - uint32_t flags = (uint32_t)ds->ds_pending_data_mask; - dispatch_kevent_t dk = ds->ds_dkev; dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds->_as_dq); - if (ds->ds_is_custom_source) { - ds->ds_dkev = NULL; - goto done; - } + dispatch_source_refs_t dr = ds->ds_refs; - if (ds->ds_is_direct_kevent && - ((dqf & DSF_DELETED) || !(ds->ds_is_installed))) { - dk->dk_kevent.flags |= EV_DELETE; // already deleted - dk->dk_kevent.flags &= ~(EV_ADD|EV_ENABLE|EV_VANISHED); - } - if (dk->dk_kevent.filter == DISPATCH_EVFILT_TIMER) { - ds->ds_dkev = NULL; - if (ds->ds_is_installed) { - _dispatch_timers_unregister(ds, dk); + if (dr->du_is_timer) { + // Because of the optimization to unregister fired oneshot timers + // from the target queue, we can't trust _dispatch_unote_registered() + // to tell the truth, it may not have happened yet + if (dqf & DSF_ARMED) { + _dispatch_timers_unregister(ds->ds_timer_refs); + _dispatch_release_2(ds); } - } else if (!ds->ds_is_direct_kevent) { - ds->ds_dkev = NULL; - dispatch_assert((bool)ds->ds_is_installed); - TAILQ_REMOVE(&dk->dk_sources, ds->ds_refs, dr_list); - _dispatch_kevent_unregister(dk, flags, 0); + dr->du_ident = DISPATCH_TIMER_IDENT_CANCELED; } else { - unsigned int dkev_dispose_options = 0; - if (ds->ds_needs_rearm && !(dqf & DSF_ARMED)) { - dkev_dispose_options |= DKEV_DISPOSE_IMMEDIATE_DELETE; - } else if (dx_type(ds) == DISPATCH_MACH_CHANNEL_TYPE) { - if (!ds->ds_is_direct_kevent) { - dkev_dispose_options |= DKEV_DISPOSE_IMMEDIATE_DELETE; - } + if (_dispatch_unote_needs_rearm(dr) && !(dqf & DSF_ARMED)) { + options |= DU_UNREGISTER_IMMEDIATE_DELETE; } - long r = _dispatch_kevent_unregister(dk, flags, dkev_dispose_options); - if (r == EINPROGRESS) { + if (!_dispatch_unote_unregister(dr, options)) { _dispatch_debug("kevent-source[%p]: deferred delete kevent[%p]", - ds, dk); + ds, dr); _dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_DEFERRED_DELETE); return; // deferred unregistration -#if DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS - } else if (r == ENOENT) { - _dispatch_debug("kevent-source[%p]: ENOENT delete kevent[%p]", - ds, dk); - _dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_DEFERRED_DELETE); - return; // potential concurrent EV_DELETE delivery rdar://22047283 -#endif - } else { - dispatch_assume_zero(r); } - ds->ds_dkev = NULL; - _TAILQ_TRASH_ENTRY(ds->ds_refs, dr_list); - } -done: - dqf = _dispatch_queue_atomic_flags_set_and_clear_orig(ds->_as_dq, - DSF_DELETED, DSF_ARMED | DSF_DEFERRED_DELETE | DSF_CANCEL_WAITER); - if (dqf & DSF_CANCEL_WAITER) { - _dispatch_wake_by_address(&ds->dq_atomic_flags); } + ds->ds_is_installed = true; - ds->ds_needs_rearm = false; // re-arm is pointless and bad now - _dispatch_debug("kevent-source[%p]: disarmed kevent[%p]", ds, dk); - _dispatch_release(ds); // the retain is done at creation time + _dispatch_source_refs_finalize_unregistration(ds); } DISPATCH_ALWAYS_INLINE -static bool +static inline bool _dispatch_source_tryarm(dispatch_source_t ds) { dispatch_queue_flags_t oqf, nqf; @@ -680,58 +584,56 @@ _dispatch_source_tryarm(dispatch_source_t ds) }); } -static bool -_dispatch_source_kevent_resume(dispatch_source_t ds, uint32_t new_flags) +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_source_refs_resume(dispatch_source_t ds) { - switch (ds->ds_dkev->dk_kevent.filter) { - case DISPATCH_EVFILT_TIMER: - _dispatch_timers_update(ds); - _dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_ARMED); - _dispatch_debug("kevent-source[%p]: rearmed kevent[%p]", ds, - ds->ds_dkev); + dispatch_source_refs_t dr = ds->ds_refs; + if (dr->du_is_timer) { + _dispatch_timers_update(dr, 0); return true; -#if HAVE_MACH - case EVFILT_MACHPORT: - if ((ds->ds_pending_data_mask & DISPATCH_MACH_RECV_MESSAGE) && - !ds->ds_is_direct_kevent) { - new_flags |= DISPATCH_MACH_RECV_MESSAGE; // emulate EV_DISPATCH - } - break; -#endif } if (unlikely(!_dispatch_source_tryarm(ds))) { return false; } - if (unlikely(_dispatch_kevent_resume(ds->ds_dkev, new_flags, 0))) { - _dispatch_queue_atomic_flags_set_and_clear(ds->_as_dq, DSF_DELETED, - DSF_ARMED); - return false; - } - _dispatch_debug("kevent-source[%p]: armed kevent[%p]", ds, ds->ds_dkev); + _dispatch_unote_resume(dr); + _dispatch_debug("kevent-source[%p]: rearmed kevent[%p]", ds, dr); return true; } -static void -_dispatch_source_kevent_register(dispatch_source_t ds, pthread_priority_t pp) +void +_dispatch_source_refs_register(dispatch_source_t ds, dispatch_wlh_t wlh, + dispatch_priority_t pri) { - dispatch_assert_zero((bool)ds->ds_is_installed); - switch (ds->ds_dkev->dk_kevent.filter) { - case DISPATCH_EVFILT_TIMER: - _dispatch_timers_update(ds); - _dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_ARMED); - _dispatch_debug("kevent-source[%p]: armed kevent[%p]", ds, ds->ds_dkev); + dispatch_source_refs_t dr = ds->ds_refs; + dispatch_priority_t kbp; + + dispatch_assert(!ds->ds_is_installed); + + if (dr->du_is_timer) { + dispatch_queue_t dq = ds->_as_dq; + kbp = _dispatch_queue_compute_priority_and_wlh(dq, NULL); + // aggressively coalesce background/maintenance QoS timers + // + if (_dispatch_qos_is_background(_dispatch_priority_qos(kbp))) { + if (dr->du_fflags & DISPATCH_TIMER_STRICT) { + _dispatch_ktrace1(DISPATCH_PERF_strict_bg_timer, ds); + } else { + dr->du_fflags |= DISPATCH_TIMER_BACKGROUND; + dr->du_ident = _dispatch_source_timer_idx(dr); + } + } + _dispatch_timers_update(dr, 0); return; } - uint32_t flags; - bool do_resume = _dispatch_kevent_register(&ds->ds_dkev, pp, &flags); - TAILQ_INSERT_TAIL(&ds->ds_dkev->dk_sources, ds->ds_refs, dr_list); - ds->ds_is_installed = true; - if (do_resume || ds->ds_needs_rearm) { - if (unlikely(!_dispatch_source_kevent_resume(ds, flags))) { - _dispatch_source_kevent_unregister(ds); - } + + if (unlikely(!_dispatch_source_tryarm(ds) || + !_dispatch_unote_register(dr, wlh, pri))) { + // Do the parts of dispatch_source_refs_unregister() that + // are required after this partial initialization. + _dispatch_source_refs_finalize_unregistration(ds); } else { - _dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_ARMED); + _dispatch_debug("kevent-source[%p]: armed kevent[%p]", ds, dr); } _dispatch_object_debug(ds, "%s", __func__); } @@ -747,65 +649,34 @@ _dispatch_source_set_event_handler_context(void *ctxt) } } -static pthread_priority_t -_dispatch_source_compute_kevent_priority(dispatch_source_t ds) +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_source_install(dispatch_source_t ds, dispatch_wlh_t wlh, + dispatch_priority_t pri) { - pthread_priority_t p = ds->dq_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK; - dispatch_queue_t tq = ds->do_targetq; - pthread_priority_t tqp = tq->dq_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK; - - while (unlikely(tq->do_targetq)) { - if (unlikely(tq == &_dispatch_mgr_q)) { - return _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; - } - if (unlikely(_dispatch_queue_is_thread_bound(tq))) { - // thread bound hierarchies are weird, we need to install - // from the context of the thread this hierarchy is bound to - return 0; - } - if (unlikely(DISPATCH_QUEUE_IS_SUSPENDED(tq))) { - // this queue may not be activated yet, so the queue graph may not - // have stabilized yet - _dispatch_ktrace1(DISPATCH_PERF_delayed_registration, ds); - return 0; - } - if (unlikely(!_dispatch_queue_has_immutable_target(tq))) { - if (!_dispatch_is_in_root_queues_array(tq->do_targetq)) { - // we're not allowed to dereference tq->do_targetq - _dispatch_ktrace1(DISPATCH_PERF_delayed_registration, ds); - return 0; - } - } - if (!(tq->dq_priority & _PTHREAD_PRIORITY_INHERIT_FLAG)) { - if (p < tqp) p = tqp; - } - tq = tq->do_targetq; - tqp = tq->dq_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK; - } - - if (unlikely(!tqp)) { - // pthread root queues opt out of QoS - return 0; - } - return _dispatch_priority_inherit_from_root_queue(p, tq); + _dispatch_source_refs_register(ds, wlh, pri); + ds->ds_is_installed = true; } void -_dispatch_source_finalize_activation(dispatch_source_t ds) +_dispatch_source_finalize_activation(dispatch_source_t ds, bool *allow_resume) { dispatch_continuation_t dc; + dispatch_source_refs_t dr = ds->ds_refs; + dispatch_priority_t pri; + dispatch_wlh_t wlh; - if (unlikely(ds->ds_is_direct_kevent && + if (unlikely(dr->du_is_direct && (_dispatch_queue_atomic_flags(ds->_as_dq) & DSF_CANCELED))) { - return _dispatch_source_kevent_unregister(ds); + return _dispatch_source_refs_unregister(ds, 0); } - dc = _dispatch_source_get_event_handler(ds->ds_refs); + dc = _dispatch_source_get_event_handler(dr); if (dc) { if (_dispatch_object_is_barrier(dc)) { _dispatch_queue_atomic_flags_set(ds->_as_dq, DQF_BARRIER_BIT); } - ds->dq_priority = dc->dc_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK; + ds->dq_priority = _dispatch_priority_from_pp_strip_flags(dc->dc_priority); if (dc->dc_flags & DISPATCH_OBJ_CTXT_FETCH_BIT) { _dispatch_barrier_async_detached_f(ds->_as_dq, ds, _dispatch_source_set_event_handler_context); @@ -813,28 +684,43 @@ _dispatch_source_finalize_activation(dispatch_source_t ds) } // call "super" - _dispatch_queue_finalize_activation(ds->_as_dq); + _dispatch_queue_finalize_activation(ds->_as_dq, allow_resume); - if (ds->ds_is_direct_kevent && !ds->ds_is_installed) { - pthread_priority_t pp = _dispatch_source_compute_kevent_priority(ds); - if (pp) _dispatch_source_kevent_register(ds, pp); + if (dr->du_is_direct && !ds->ds_is_installed) { + dispatch_queue_t dq = ds->_as_dq; + pri = _dispatch_queue_compute_priority_and_wlh(dq, &wlh); + if (pri) _dispatch_source_install(ds, wlh, pri); } } DISPATCH_ALWAYS_INLINE -static inline dispatch_queue_t -_dispatch_source_invoke2(dispatch_object_t dou, dispatch_invoke_flags_t flags, - uint64_t *owned, struct dispatch_object_s **dc_ptr DISPATCH_UNUSED) +static inline dispatch_queue_wakeup_target_t +_dispatch_source_invoke2(dispatch_object_t dou, dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags, uint64_t *owned) { dispatch_source_t ds = dou._ds; - dispatch_queue_t retq = NULL; + dispatch_queue_wakeup_target_t retq = DISPATCH_QUEUE_WAKEUP_NONE; dispatch_queue_t dq = _dispatch_queue_get_current(); + dispatch_source_refs_t dr = ds->ds_refs; + dispatch_queue_flags_t dqf; + + if (!(flags & DISPATCH_INVOKE_MANAGER_DRAIN) && + _dispatch_unote_wlh_changed(dr, _dispatch_get_wlh())) { + dqf = _dispatch_queue_atomic_flags_set_orig(ds->_as_dq, + DSF_WLH_CHANGED); + if (!(dqf & DSF_WLH_CHANGED)) { + _dispatch_bug_deprecated("Changing target queue " + "hierarchy after source was activated"); + } + } if (_dispatch_queue_class_probe(ds)) { // Intentionally always drain even when on the manager queue // and not the source's regular target queue: we need to be able // to drain timer setting and the like there. - retq = _dispatch_queue_serial_drain(ds->_as_dq, flags, owned, NULL); + dispatch_with_disabled_narrowing(dic, { + retq = _dispatch_queue_serial_drain(ds->_as_dq, dic, flags, owned); + }); } // This function performs all source actions. Each action is responsible @@ -844,19 +730,32 @@ _dispatch_source_invoke2(dispatch_object_t dou, dispatch_invoke_flags_t flags, // The order of tests here in invoke and in wakeup should be consistent. - dispatch_source_refs_t dr = ds->ds_refs; dispatch_queue_t dkq = &_dispatch_mgr_q; + bool prevent_starvation = false; - if (ds->ds_is_direct_kevent) { + if (dr->du_is_direct) { dkq = ds->do_targetq; } + if (dr->du_is_timer && + os_atomic_load2o(ds, ds_timer_refs->dt_pending_config, relaxed)) { + dqf = _dispatch_queue_atomic_flags(ds->_as_dq); + if (!(dqf & (DSF_CANCELED | DQF_RELEASED))) { + // timer has to be configured on the kevent queue + if (dq != dkq) { + return dkq; + } + _dispatch_source_timer_configure(ds); + } + } + if (!ds->ds_is_installed) { // The source needs to be installed on the kevent queue. if (dq != dkq) { return dkq; } - _dispatch_source_kevent_register(ds, _dispatch_get_defaultpriority()); + _dispatch_source_install(ds, _dispatch_get_wlh(), + _dispatch_get_basepri()); } if (unlikely(DISPATCH_QUEUE_IS_SUSPENDED(ds))) { @@ -874,22 +773,20 @@ _dispatch_source_invoke2(dispatch_object_t dou, dispatch_invoke_flags_t flags, _dispatch_source_registration_callout(ds, dq, flags); } - dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds->_as_dq); - bool prevent_starvation = false; - - if ((dqf & DSF_DEFERRED_DELETE) && - ((dqf & DSF_DELETED) || !(dqf & DSF_ARMED))) { + dqf = _dispatch_queue_atomic_flags(ds->_as_dq); + if ((dqf & DSF_DEFERRED_DELETE) && !(dqf & DSF_ARMED)) { unregister_event: // DSF_DELETE: Pending source kevent unregistration has been completed // !DSF_ARMED: event was delivered and can safely be unregistered if (dq != dkq) { return dkq; } - _dispatch_source_kevent_unregister(ds); + _dispatch_source_refs_unregister(ds, DU_UNREGISTER_IMMEDIATE_DELETE); dqf = _dispatch_queue_atomic_flags(ds->_as_dq); } - if (!(dqf & (DSF_CANCELED | DQF_RELEASED)) && ds->ds_pending_data) { + if (!(dqf & (DSF_CANCELED | DQF_RELEASED)) && + os_atomic_load2o(ds, ds_pending_data, relaxed)) { // The source has pending data to deliver via the event handler callback // on the target queue. Some sources need to be rearmed on the kevent // queue after event delivery. @@ -901,12 +798,13 @@ _dispatch_source_invoke2(dispatch_object_t dou, dispatch_invoke_flags_t flags, // re-queue to give other things already queued on the target queue // a chance to run. // - // however, if the source is directly targetting an overcommit root + // however, if the source is directly targeting an overcommit root // queue, this would requeue the source and ask for a new overcommit // thread right away. prevent_starvation = dq->do_targetq || - !(dq->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG); - if (prevent_starvation && ds->ds_pending_data) { + !(dq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT); + if (prevent_starvation && + os_atomic_load2o(ds, ds_pending_data, relaxed)) { retq = ds->do_targetq; } } else { @@ -921,17 +819,21 @@ _dispatch_source_invoke2(dispatch_object_t dou, dispatch_invoke_flags_t flags, // kevent queue. After uninstallation, the cancellation handler needs // to be delivered to the target queue. if (!(dqf & DSF_DELETED)) { - if (dq != dkq) { + if (dr->du_is_timer && !(dqf & DSF_ARMED)) { + // timers can cheat if not armed because there's nothing left + // to do on the manager queue and unregistration can happen + // on the regular target queue + } else if (dq != dkq) { return dkq; } - _dispatch_source_kevent_unregister(ds); + _dispatch_source_refs_unregister(ds, 0); dqf = _dispatch_queue_atomic_flags(ds->_as_dq); if (unlikely(dqf & DSF_DEFERRED_DELETE)) { if (!(dqf & DSF_ARMED)) { goto unregister_event; } // we need to wait for the EV_DELETE - return retq; + return retq ? retq : DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT; } } if (dq != ds->do_targetq && (_dispatch_source_get_event_handler(dr) || @@ -945,7 +847,8 @@ _dispatch_source_invoke2(dispatch_object_t dou, dispatch_invoke_flags_t flags, prevent_starvation = false; } - if (ds->ds_needs_rearm && !(dqf & DSF_ARMED)) { + if (_dispatch_unote_needs_rearm(dr) && + !(dqf & (DSF_ARMED|DSF_DELETED|DSF_CANCELED|DQF_RELEASED))) { // The source needs to be rearmed on the kevent queue. if (dq != dkq) { return dkq; @@ -954,20 +857,29 @@ _dispatch_source_invoke2(dispatch_object_t dou, dispatch_invoke_flags_t flags, // no need for resume when we can directly unregister the kevent goto unregister_event; } - if (prevent_starvation) { + if (unlikely(DISPATCH_QUEUE_IS_SUSPENDED(ds))) { + // do not try to rearm the kevent if the source is suspended + // from the source handler + return ds->do_targetq; + } + if (prevent_starvation && dr->du_wlh == DISPATCH_WLH_ANON) { // keep the old behavior to force re-enqueue to our target queue - // for the rearm. It is inefficient though and we should - // improve this . + // for the rearm. // // if the handler didn't run, or this is a pending delete // or our target queue is a global queue, then starvation is // not a concern and we can rearm right away. return ds->do_targetq; } - if (unlikely(!_dispatch_source_kevent_resume(ds, 0))) { - dqf = _dispatch_queue_atomic_flags(ds->_as_dq); + if (unlikely(!_dispatch_source_refs_resume(ds))) { goto unregister_event; } + if (!prevent_starvation && _dispatch_wlh_should_poll_unote(dr)) { + // try to redrive the drain from under the lock for sources + // targeting an overcommit root queue to avoid parking + // when the next event has already fired + _dispatch_event_loop_drain(KEVENT_FLAG_IMMEDIATE); + } } return retq; @@ -975,13 +887,15 @@ _dispatch_source_invoke2(dispatch_object_t dou, dispatch_invoke_flags_t flags, DISPATCH_NOINLINE void -_dispatch_source_invoke(dispatch_source_t ds, dispatch_invoke_flags_t flags) +_dispatch_source_invoke(dispatch_source_t ds, dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags) { - _dispatch_queue_class_invoke(ds->_as_dq, flags, _dispatch_source_invoke2); + _dispatch_queue_class_invoke(ds, dic, flags, + DISPATCH_INVOKE_DISALLOW_SYNC_WAITERS, _dispatch_source_invoke2); } void -_dispatch_source_wakeup(dispatch_source_t ds, pthread_priority_t pp, +_dispatch_source_wakeup(dispatch_source_t ds, dispatch_qos_t qos, dispatch_wakeup_flags_t flags) { // This function determines whether the source needs to be invoked. @@ -993,21 +907,26 @@ _dispatch_source_wakeup(dispatch_source_t ds, pthread_priority_t pp, dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds->_as_dq); bool deferred_delete = (dqf & DSF_DEFERRED_DELETE); - if (ds->ds_is_direct_kevent) { + if (dr->du_is_direct) { dkq = DISPATCH_QUEUE_WAKEUP_TARGET; } - if (!ds->ds_is_installed) { + if (!(dqf & (DSF_CANCELED | DQF_RELEASED)) && dr->du_is_timer && + os_atomic_load2o(ds, ds_timer_refs->dt_pending_config, relaxed)) { + // timer has to be configured on the kevent queue + tq = dkq; + } else if (!ds->ds_is_installed) { // The source needs to be installed on the kevent queue. tq = dkq; } else if (_dispatch_source_get_registration_handler(dr)) { // The registration handler needs to be delivered to the target queue. tq = DISPATCH_QUEUE_WAKEUP_TARGET; - } else if (deferred_delete && ((dqf & DSF_DELETED) || !(dqf & DSF_ARMED))) { + } else if (deferred_delete && !(dqf & DSF_ARMED)) { // Pending source kevent unregistration has been completed // or EV_ONESHOT event can be acknowledged tq = dkq; - } else if (!(dqf & (DSF_CANCELED | DQF_RELEASED)) && ds->ds_pending_data) { + } else if (!(dqf & (DSF_CANCELED | DQF_RELEASED)) && + os_atomic_load2o(ds, ds_pending_data, relaxed)) { // The source has pending data to deliver to the target queue. tq = DISPATCH_QUEUE_WAKEUP_TARGET; } else if ((dqf & (DSF_CANCELED | DQF_RELEASED)) && !deferred_delete) { @@ -1015,13 +934,21 @@ _dispatch_source_wakeup(dispatch_source_t ds, pthread_priority_t pp, // cancellation handler needs to be delivered to the target queue. // Note: cancellation assumes installation. if (!(dqf & DSF_DELETED)) { - tq = dkq; + if (dr->du_is_timer && !(dqf & DSF_ARMED)) { + // timers can cheat if not armed because there's nothing left + // to do on the manager queue and unregistration can happen + // on the regular target queue + tq = DISPATCH_QUEUE_WAKEUP_TARGET; + } else { + tq = dkq; + } } else if (_dispatch_source_get_event_handler(dr) || _dispatch_source_get_cancel_handler(dr) || _dispatch_source_get_registration_handler(dr)) { tq = DISPATCH_QUEUE_WAKEUP_TARGET; } - } else if (ds->ds_needs_rearm && !(dqf & DSF_ARMED)) { + } else if (_dispatch_unote_needs_rearm(dr) && + !(dqf & (DSF_ARMED|DSF_DELETED|DSF_CANCELED|DQF_RELEASED))) { // The source needs to be rearmed on the kevent queue. tq = dkq; } @@ -1029,13 +956,12 @@ _dispatch_source_wakeup(dispatch_source_t ds, pthread_priority_t pp, tq = DISPATCH_QUEUE_WAKEUP_TARGET; } - if (tq) { - return _dispatch_queue_class_wakeup(ds->_as_dq, pp, flags, tq); - } else if (pp) { - return _dispatch_queue_class_override_drainer(ds->_as_dq, pp, flags); - } else if (flags & DISPATCH_WAKEUP_CONSUME) { - return _dispatch_release_tailcall(ds); + if ((tq == DISPATCH_QUEUE_WAKEUP_TARGET) && + ds->do_targetq == &_dispatch_mgr_q) { + tq = DISPATCH_QUEUE_WAKEUP_MGR; } + + return _dispatch_queue_class_wakeup(ds->_as_dq, qos, flags, tq); } void @@ -1046,13 +972,13 @@ dispatch_source_cancel(dispatch_source_t ds) // could potentially invoke the source, do the cancellation, // unregister the source, and deallocate it. We would // need to therefore retain/release before setting the bit - _dispatch_retain(ds); + _dispatch_retain_2(ds); dispatch_queue_t q = ds->_as_dq; if (_dispatch_queue_atomic_flags_set_orig(q, DSF_CANCELED) & DSF_CANCELED) { - _dispatch_release_tailcall(ds); + _dispatch_release_2_tailcall(ds); } else { - dx_wakeup(ds, 0, DISPATCH_WAKEUP_FLUSH | DISPATCH_WAKEUP_CONSUME); + dx_wakeup(ds, 0, DISPATCH_WAKEUP_MAKE_DIRTY | DISPATCH_WAKEUP_CONSUME_2); } } @@ -1060,9 +986,9 @@ void dispatch_source_cancel_and_wait(dispatch_source_t ds) { dispatch_queue_flags_t old_dqf, dqf, new_dqf; - pthread_priority_t pp; + dispatch_source_refs_t dr = ds->ds_refs; - if (unlikely(_dispatch_source_get_cancel_handler(ds->ds_refs))) { + if (unlikely(_dispatch_source_get_cancel_handler(dr))) { DISPATCH_CLIENT_CRASH(ds, "Source has a cancel handler"); } @@ -1074,7 +1000,7 @@ dispatch_source_cancel_and_wait(dispatch_source_t ds) } if ((old_dqf & DSF_STATE_MASK) == DSF_DELETED) { // just add DSF_CANCELED - } else if ((old_dqf & DSF_DEFERRED_DELETE) || !ds->ds_is_direct_kevent){ + } else if ((old_dqf & DSF_DEFERRED_DELETE) || !dr->du_is_direct) { new_dqf |= DSF_CANCEL_WAITER; } }); @@ -1087,13 +1013,12 @@ dispatch_source_cancel_and_wait(dispatch_source_t ds) return; } if (dqf & DSF_CANCEL_WAITER) { - goto override; + goto wakeup; } // simplified version of _dispatch_queue_drain_try_lock // that also sets the DIRTY bit on failure to lock - dispatch_lock_owner tid_self = _dispatch_tid_self(); - uint64_t xor_owner_and_set_full_width = tid_self | + uint64_t set_owner_and_set_full_width = _dispatch_lock_value_for_self() | DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER; uint64_t old_state, new_state; @@ -1102,7 +1027,7 @@ dispatch_source_cancel_and_wait(dispatch_source_t ds) if (likely(_dq_state_is_runnable(old_state) && !_dq_state_drain_locked(old_state))) { new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK; - new_state ^= xor_owner_and_set_full_width; + new_state |= set_owner_and_set_full_width; } else if (old_dqf & DSF_CANCELED) { os_atomic_rmw_loop_give_up(break); } else { @@ -1126,27 +1051,28 @@ dispatch_source_cancel_and_wait(dispatch_source_t ds) // same thing _dispatch_source_invoke2() does when handling cancellation dqf = _dispatch_queue_atomic_flags(ds->_as_dq); if (!(dqf & (DSF_DEFERRED_DELETE | DSF_DELETED))) { - _dispatch_source_kevent_unregister(ds); + _dispatch_source_refs_unregister(ds, 0); dqf = _dispatch_queue_atomic_flags(ds->_as_dq); if (likely((dqf & DSF_STATE_MASK) == DSF_DELETED)) { _dispatch_source_cancel_callout(ds, NULL, DISPATCH_INVOKE_NONE); } } - _dispatch_try_lock_transfer_or_wakeup(ds->_as_dq); - } else if (unlikely(_dq_state_drain_locked_by(old_state, tid_self))) { + dx_wakeup(ds, 0, DISPATCH_WAKEUP_BARRIER_COMPLETE); + } else if (unlikely(_dq_state_drain_locked_by_self(old_state))) { DISPATCH_CLIENT_CRASH(ds, "dispatch_source_cancel_and_wait " "called from a source handler"); } else { -override: - pp = _dispatch_get_priority() & _PTHREAD_PRIORITY_QOS_CLASS_MASK; - if (pp) dx_wakeup(ds, pp, DISPATCH_WAKEUP_OVERRIDING); + dispatch_qos_t qos; +wakeup: + qos = _dispatch_qos_from_pp(_dispatch_get_priority()); + dx_wakeup(ds, qos, DISPATCH_WAKEUP_MAKE_DIRTY); dispatch_activate(ds); } dqf = _dispatch_queue_atomic_flags(ds->_as_dq); while (unlikely((dqf & DSF_STATE_MASK) != DSF_DELETED)) { if (unlikely(!(dqf & DSF_CANCEL_WAITER))) { - if (!os_atomic_cmpxchgvw2o(ds, dq_atomic_flags, + if (!os_atomic_cmpxchgv2o(ds, dq_atomic_flags, dqf, dqf | DSF_CANCEL_WAITER, &dqf, relaxed)) { continue; } @@ -1157,46 +1083,44 @@ dispatch_source_cancel_and_wait(dispatch_source_t ds) } } -static void -_dispatch_source_merge_kevent(dispatch_source_t ds, - const _dispatch_kevent_qos_s *ke) +void +_dispatch_source_merge_evt(dispatch_unote_t du, uint32_t flags, uintptr_t data, + uintptr_t status, pthread_priority_t pp) { - _dispatch_object_debug(ds, "%s", __func__); - dispatch_wakeup_flags_t flags = 0; + dispatch_source_refs_t dr = du._dr; + dispatch_source_t ds = _dispatch_source_from_refs(dr); + dispatch_wakeup_flags_t wflags = 0; dispatch_queue_flags_t dqf; - pthread_priority_t pp = 0; - if (ds->ds_needs_rearm || (ke->flags & (EV_DELETE | EV_ONESHOT))) { + if (_dispatch_unote_needs_rearm(dr) || (flags & (EV_DELETE | EV_ONESHOT))) { // once we modify the queue atomic flags below, it will allow concurrent // threads running _dispatch_source_invoke2 to dispose of the source, - // so we can't safely borrow the reference we get from the knote udata + // so we can't safely borrow the reference we get from the muxnote udata // anymore, and need our own - flags = DISPATCH_WAKEUP_CONSUME; - _dispatch_retain(ds); // rdar://20382435 + wflags = DISPATCH_WAKEUP_CONSUME_2; + _dispatch_retain_2(ds); // rdar://20382435 } - if ((ke->flags & EV_UDATA_SPECIFIC) && (ke->flags & EV_ONESHOT) && - !(ke->flags & EV_DELETE)) { + if ((flags & EV_UDATA_SPECIFIC) && (flags & EV_ONESHOT) && + !(flags & EV_DELETE)) { dqf = _dispatch_queue_atomic_flags_set_and_clear(ds->_as_dq, DSF_DEFERRED_DELETE, DSF_ARMED); - if (ke->flags & EV_VANISHED) { - _dispatch_bug_kevent_client("kevent", _evfiltstr(ke->filter), + if (flags & EV_VANISHED) { + _dispatch_bug_kevent_client("kevent", dr->du_type->dst_kind, "monitored resource vanished before the source " "cancel handler was invoked", 0); } _dispatch_debug("kevent-source[%p]: %s kevent[%p]", ds, - (ke->flags & EV_VANISHED) ? "vanished" : - "deferred delete oneshot", (void*)ke->udata); - } else if ((ke->flags & EV_DELETE) || (ke->flags & EV_ONESHOT)) { - dqf = _dispatch_queue_atomic_flags_set_and_clear(ds->_as_dq, - DSF_DELETED, DSF_ARMED); - _dispatch_debug("kevent-source[%p]: delete kevent[%p]", - ds, (void*)ke->udata); - if (ke->flags & EV_DELETE) goto done; - } else if (ds->ds_needs_rearm) { + (flags & EV_VANISHED) ? "vanished" : + "deferred delete oneshot", dr); + } else if (flags & (EV_DELETE | EV_ONESHOT)) { + _dispatch_source_refs_unregister(ds, DU_UNREGISTER_ALREADY_DELETED); + _dispatch_debug("kevent-source[%p]: deleted kevent[%p]", ds, dr); + if (flags & EV_DELETE) goto done; + dqf = _dispatch_queue_atomic_flags(ds->_as_dq); + } else if (_dispatch_unote_needs_rearm(dr)) { dqf = _dispatch_queue_atomic_flags_clear(ds->_as_dq, DSF_ARMED); - _dispatch_debug("kevent-source[%p]: disarmed kevent[%p] ", - ds, (void*)ke->udata); + _dispatch_debug("kevent-source[%p]: disarmed kevent[%p]", ds, dr); } else { dqf = _dispatch_queue_atomic_flags(ds->_as_dq); } @@ -1204,16 +1128,10 @@ _dispatch_source_merge_kevent(dispatch_source_t ds, if (dqf & (DSF_CANCELED | DQF_RELEASED)) { goto done; // rdar://20204025 } -#if HAVE_MACH - if (ke->filter == EVFILT_MACHPORT && - dx_type(ds) == DISPATCH_MACH_CHANNEL_TYPE) { - DISPATCH_INTERNAL_CRASH(ke->flags,"Unexpected kevent for mach channel"); - } -#endif - unsigned long data; - if ((ke->flags & EV_UDATA_SPECIFIC) && (ke->flags & EV_ONESHOT) && - (ke->flags & EV_VANISHED)) { + dispatch_unote_action_t action = dr->du_data_action; + if ((flags & EV_UDATA_SPECIFIC) && (flags & EV_ONESHOT) && + (flags & EV_VANISHED)) { // if the resource behind the ident vanished, the event handler can't // do anything useful anymore, so do not try to call it at all // @@ -1223,497 +1141,63 @@ _dispatch_source_merge_kevent(dispatch_source_t ds, // if we get both bits it was a real EV_VANISHED delivery os_atomic_store2o(ds, ds_pending_data, 0, relaxed); #if HAVE_MACH - } else if (ke->filter == EVFILT_MACHPORT) { - data = DISPATCH_MACH_RECV_MESSAGE; + } else if (dr->du_filter == EVFILT_MACHPORT) { os_atomic_store2o(ds, ds_pending_data, data, relaxed); #endif - } else if (ds->ds_is_level) { - // ke->data is signed and "negative available data" makes no sense - // zero bytes happens when EV_EOF is set - dispatch_assert(ke->data >= 0l); - data = ~(unsigned long)ke->data; + } else if (action == DISPATCH_UNOTE_ACTION_DATA_SET) { os_atomic_store2o(ds, ds_pending_data, data, relaxed); - } else if (ds->ds_is_adder) { - data = (unsigned long)ke->data; + } else if (action == DISPATCH_UNOTE_ACTION_DATA_ADD) { os_atomic_add2o(ds, ds_pending_data, data, relaxed); - } else if (ke->fflags & ds->ds_pending_data_mask) { - data = ke->fflags & ds->ds_pending_data_mask; + } else if (data && action == DISPATCH_UNOTE_ACTION_DATA_OR) { os_atomic_or2o(ds, ds_pending_data, data, relaxed); + } else if (data && action == DISPATCH_UNOTE_ACTION_DATA_OR_STATUS_SET) { + // We combine the data and status into a single 64-bit value. + uint64_t odata, ndata; + uint64_t value = DISPATCH_SOURCE_COMBINE_DATA_AND_STATUS(data, status); + os_atomic_rmw_loop2o(ds, ds_pending_data, odata, ndata, relaxed, { + ndata = DISPATCH_SOURCE_GET_DATA(odata) | value; + }); + } else if (data) { + DISPATCH_INTERNAL_CRASH(action, "Unexpected source action value"); } + _dispatch_debug("kevent-source[%p]: merged kevent[%p]", ds, dr); done: -#if DISPATCH_USE_KEVENT_QOS - pp = ((pthread_priority_t)ke->qos) & ~_PTHREAD_PRIORITY_FLAGS_MASK; -#endif - dx_wakeup(ds, pp, flags | DISPATCH_WAKEUP_FLUSH); + _dispatch_object_debug(ds, "%s", __func__); + dx_wakeup(ds, _dispatch_qos_from_pp(pp), wflags | DISPATCH_WAKEUP_MAKE_DIRTY); } #pragma mark - -#pragma mark dispatch_kevent_t - -#if DISPATCH_USE_GUARDED_FD_CHANGE_FDGUARD -static void _dispatch_kevent_guard(dispatch_kevent_t dk); -static void _dispatch_kevent_unguard(dispatch_kevent_t dk); -#else -static inline void _dispatch_kevent_guard(dispatch_kevent_t dk) { (void)dk; } -static inline void _dispatch_kevent_unguard(dispatch_kevent_t dk) { (void)dk; } -#endif - -#if !DISPATCH_USE_EV_UDATA_SPECIFIC -static struct dispatch_kevent_s _dispatch_kevent_data_or = { - .dk_kevent = { - .filter = DISPATCH_EVFILT_CUSTOM_OR, - .flags = EV_CLEAR, - }, - .dk_sources = TAILQ_HEAD_INITIALIZER(_dispatch_kevent_data_or.dk_sources), -}; -static struct dispatch_kevent_s _dispatch_kevent_data_add = { - .dk_kevent = { - .filter = DISPATCH_EVFILT_CUSTOM_ADD, - }, - .dk_sources = TAILQ_HEAD_INITIALIZER(_dispatch_kevent_data_add.dk_sources), -}; -#endif // !DISPATCH_USE_EV_UDATA_SPECIFIC - -#define DSL_HASH(x) ((x) & (DSL_HASH_SIZE - 1)) - -DISPATCH_CACHELINE_ALIGN -static TAILQ_HEAD(, dispatch_kevent_s) _dispatch_sources[DSL_HASH_SIZE]; - -static void -_dispatch_kevent_init() -{ - unsigned int i; - for (i = 0; i < DSL_HASH_SIZE; i++) { - TAILQ_INIT(&_dispatch_sources[i]); - } - -#if !DISPATCH_USE_EV_UDATA_SPECIFIC - TAILQ_INSERT_TAIL(&_dispatch_sources[0], - &_dispatch_kevent_data_or, dk_list); - TAILQ_INSERT_TAIL(&_dispatch_sources[0], - &_dispatch_kevent_data_add, dk_list); - _dispatch_kevent_data_or.dk_kevent.udata = - (_dispatch_kevent_qos_udata_t)&_dispatch_kevent_data_or; - _dispatch_kevent_data_add.dk_kevent.udata = - (_dispatch_kevent_qos_udata_t)&_dispatch_kevent_data_add; -#endif // !DISPATCH_USE_EV_UDATA_SPECIFIC -} - -static inline uintptr_t -_dispatch_kevent_hash(uint64_t ident, short filter) -{ - uint64_t value; -#if HAVE_MACH - value = (filter == EVFILT_MACHPORT || - filter == DISPATCH_EVFILT_MACH_NOTIFICATION ? - MACH_PORT_INDEX(ident) : ident); -#else - value = ident; - (void)filter; -#endif - return DSL_HASH((uintptr_t)value); -} - -static dispatch_kevent_t -_dispatch_kevent_find(uint64_t ident, short filter) -{ - uintptr_t hash = _dispatch_kevent_hash(ident, filter); - dispatch_kevent_t dki; - - TAILQ_FOREACH(dki, &_dispatch_sources[hash], dk_list) { - if (dki->dk_kevent.ident == ident && dki->dk_kevent.filter == filter) { - break; - } - } - return dki; -} - -static void -_dispatch_kevent_insert(dispatch_kevent_t dk) -{ - if (dk->dk_kevent.flags & EV_UDATA_SPECIFIC) return; - _dispatch_kevent_guard(dk); - uintptr_t hash = _dispatch_kevent_hash(dk->dk_kevent.ident, - dk->dk_kevent.filter); - TAILQ_INSERT_TAIL(&_dispatch_sources[hash], dk, dk_list); -} - -// Find existing kevents, and merge any new flags if necessary -static bool -_dispatch_kevent_register(dispatch_kevent_t *dkp, pthread_priority_t pp, - uint32_t *flgp) -{ - dispatch_kevent_t dk = NULL, ds_dkev = *dkp; - uint32_t new_flags; - bool do_resume = false; - - if (!(ds_dkev->dk_kevent.flags & EV_UDATA_SPECIFIC)) { - dk = _dispatch_kevent_find(ds_dkev->dk_kevent.ident, - ds_dkev->dk_kevent.filter); - } - if (dk) { - // If an existing dispatch kevent is found, check to see if new flags - // need to be added to the existing kevent - new_flags = ~dk->dk_kevent.fflags & ds_dkev->dk_kevent.fflags; - dk->dk_kevent.fflags |= ds_dkev->dk_kevent.fflags; - free(ds_dkev); - *dkp = dk; - do_resume = new_flags; - } else { - dk = ds_dkev; -#if DISPATCH_USE_KEVENT_WORKQUEUE - if (!_dispatch_kevent_workqueue_enabled) { - // do nothing - } else if (!(dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) { - dk->dk_kevent.qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; - } else { - pp &= (~_PTHREAD_PRIORITY_FLAGS_MASK | - _PTHREAD_PRIORITY_OVERCOMMIT_FLAG); - if (!pp) pp = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; - _dispatch_assert_is_valid_qos_class(pp); - dk->dk_kevent.qos = (_dispatch_kevent_priority_t)pp; - } -#else - (void)pp; -#endif - _dispatch_kevent_insert(dk); - new_flags = dk->dk_kevent.fflags; - do_resume = true; - } - // Re-register the kevent with the kernel if new flags were added - // by the dispatch kevent - if (do_resume) { - dk->dk_kevent.flags |= EV_ADD; - } - *flgp = new_flags; - return do_resume; -} - -static long -_dispatch_kevent_resume(dispatch_kevent_t dk, uint32_t new_flags, - uint32_t del_flags) -{ - long r; - bool oneshot; - if (dk->dk_kevent.flags & EV_DELETE) { - return 0; - } - switch (dk->dk_kevent.filter) { - case DISPATCH_EVFILT_TIMER: - case DISPATCH_EVFILT_CUSTOM_ADD: - case DISPATCH_EVFILT_CUSTOM_OR: - // these types not registered with kevent - return 0; -#if HAVE_MACH - case DISPATCH_EVFILT_MACH_NOTIFICATION: - return _dispatch_kevent_mach_notify_resume(dk, new_flags, del_flags); -#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK - case EVFILT_MACHPORT: - if (!(dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) { - return _dispatch_kevent_machport_resume(dk, new_flags, del_flags); - } - // fall through -#endif -#endif // HAVE_MACH - default: - // oneshot dk may be freed by the time we return from - // _dispatch_kq_immediate_update if the event was delivered (and then - // unregistered) concurrently. - oneshot = (dk->dk_kevent.flags & EV_ONESHOT); - r = _dispatch_kq_immediate_update(&dk->dk_kevent); - if (r && (dk->dk_kevent.flags & EV_ADD) && - (dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) { - dk->dk_kevent.flags |= EV_DELETE; - dk->dk_kevent.flags &= ~(EV_ADD|EV_ENABLE|EV_VANISHED); - } else if (!oneshot && (dk->dk_kevent.flags & EV_DISPATCH)) { - // we can safely skip doing this for ONESHOT events because - // the next kq update we will do is _dispatch_kevent_dispose() - // which also clears EV_ADD. - dk->dk_kevent.flags &= ~(EV_ADD|EV_VANISHED); - } - return r; - } - (void)new_flags; (void)del_flags; -} - -static long -_dispatch_kevent_dispose(dispatch_kevent_t dk, unsigned int options) -{ - long r = 0; - switch (dk->dk_kevent.filter) { - case DISPATCH_EVFILT_TIMER: - case DISPATCH_EVFILT_CUSTOM_ADD: - case DISPATCH_EVFILT_CUSTOM_OR: - if (dk->dk_kevent.flags & EV_UDATA_SPECIFIC) { - free(dk); - } else { - // these sources live on statically allocated lists - } - return r; - } - if (!(dk->dk_kevent.flags & EV_DELETE)) { - dk->dk_kevent.flags |= EV_DELETE; - dk->dk_kevent.flags &= ~(EV_ADD|EV_ENABLE|EV_VANISHED); - if (options & DKEV_DISPOSE_IMMEDIATE_DELETE) { - dk->dk_kevent.flags |= EV_ENABLE; - } - switch (dk->dk_kevent.filter) { -#if HAVE_MACH - case DISPATCH_EVFILT_MACH_NOTIFICATION: - r = _dispatch_kevent_mach_notify_resume(dk, 0,dk->dk_kevent.fflags); - break; -#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK - case EVFILT_MACHPORT: - if (!(dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) { - r = _dispatch_kevent_machport_resume(dk,0,dk->dk_kevent.fflags); - break; - } - // fall through -#endif -#endif - default: - if (options & DKEV_DISPOSE_IMMEDIATE_DELETE) { - _dispatch_kq_deferred_update(&dk->dk_kevent); - } else { - r = _dispatch_kq_immediate_update(&dk->dk_kevent); - } - break; - } - if (options & DKEV_DISPOSE_IMMEDIATE_DELETE) { - dk->dk_kevent.flags &= ~EV_ENABLE; - } - } - if (dk->dk_kevent.flags & EV_UDATA_SPECIFIC) { - bool deferred_delete = (r == EINPROGRESS); -#if DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS - if (r == ENOENT) deferred_delete = true; -#endif - if (deferred_delete) { - // deferred EV_DELETE or concurrent concurrent EV_DELETE delivery - dk->dk_kevent.flags &= ~EV_DELETE; - dk->dk_kevent.flags |= EV_ENABLE; - return r; - } - } else { - uintptr_t hash = _dispatch_kevent_hash(dk->dk_kevent.ident, - dk->dk_kevent.filter); - TAILQ_REMOVE(&_dispatch_sources[hash], dk, dk_list); - } - _dispatch_kevent_unguard(dk); - free(dk); - return r; -} - -static long -_dispatch_kevent_unregister(dispatch_kevent_t dk, uint32_t flg, - unsigned int options) -{ - dispatch_source_refs_t dri; - uint32_t del_flags, fflags = 0; - long r = 0; - - if (TAILQ_EMPTY(&dk->dk_sources) || - (dk->dk_kevent.flags & EV_UDATA_SPECIFIC)) { - r = _dispatch_kevent_dispose(dk, options); - } else { - TAILQ_FOREACH(dri, &dk->dk_sources, dr_list) { - dispatch_source_t dsi = _dispatch_source_from_refs(dri); - uint32_t mask = (uint32_t)dsi->ds_pending_data_mask; - fflags |= mask; - } - del_flags = flg & ~fflags; - if (del_flags) { - dk->dk_kevent.flags |= EV_ADD; - dk->dk_kevent.fflags &= ~del_flags; - r = _dispatch_kevent_resume(dk, 0, del_flags); - } - } - return r; -} - -DISPATCH_NOINLINE -static void -_dispatch_kevent_proc_exit(_dispatch_kevent_qos_s *ke) -{ - // EVFILT_PROC may fail with ESRCH when the process exists but is a zombie - // . As a workaround, we simulate an exit event for - // any EVFILT_PROC with an invalid pid . - _dispatch_kevent_qos_s fake; - fake = *ke; - fake.flags &= ~EV_ERROR; - fake.flags |= EV_ONESHOT; - fake.fflags = NOTE_EXIT; - fake.data = 0; - _dispatch_kevent_debug("synthetic NOTE_EXIT", ke); - _dispatch_kevent_merge(&fake); -} - -DISPATCH_NOINLINE -static void -_dispatch_kevent_error(_dispatch_kevent_qos_s *ke) -{ - _dispatch_kevent_qos_s *kev = NULL; - - if (ke->flags & EV_DELETE) { - if (ke->flags & EV_UDATA_SPECIFIC) { - if (ke->data == EINPROGRESS) { - // deferred EV_DELETE - return; - } -#if DISPATCH_KEVENT_TREAT_ENOENT_AS_EINPROGRESS - if (ke->data == ENOENT) { - // deferred EV_DELETE - return; - } -#endif - } - // for EV_DELETE if the update was deferred we may have reclaimed - // our dispatch_kevent_t, and it is unsafe to dereference it now. - } else if (ke->udata) { - kev = &((dispatch_kevent_t)ke->udata)->dk_kevent; - ke->flags |= kev->flags; - } - -#if HAVE_MACH - if (ke->filter == EVFILT_MACHPORT && ke->data == ENOTSUP && - (ke->flags & EV_ADD) && _dispatch_evfilt_machport_direct_enabled && - kev && (kev->fflags & MACH_RCV_MSG)) { - DISPATCH_INTERNAL_CRASH(ke->ident, - "Missing EVFILT_MACHPORT support for ports"); - } -#endif - - if (ke->data) { - // log the unexpected error - _dispatch_bug_kevent_client("kevent", _evfiltstr(ke->filter), - !ke->udata ? NULL : - ke->flags & EV_DELETE ? "delete" : - ke->flags & EV_ADD ? "add" : - ke->flags & EV_ENABLE ? "enable" : "monitor", - (int)ke->data); - } -} - -static void -_dispatch_kevent_drain(_dispatch_kevent_qos_s *ke) -{ -#if DISPATCH_DEBUG - static dispatch_once_t pred; - dispatch_once_f(&pred, NULL, _dispatch_kevent_debugger); -#endif - if (ke->filter == EVFILT_USER) { - _dispatch_kevent_mgr_debug(ke); - return; - } - if (slowpath(ke->flags & EV_ERROR)) { - if (ke->filter == EVFILT_PROC && ke->data == ESRCH) { - _dispatch_debug("kevent[0x%llx]: ESRCH from EVFILT_PROC: " - "generating fake NOTE_EXIT", (unsigned long long)ke->udata); - return _dispatch_kevent_proc_exit(ke); - } - _dispatch_debug("kevent[0x%llx]: handling error", - (unsigned long long)ke->udata); - return _dispatch_kevent_error(ke); - } - if (ke->filter == EVFILT_TIMER) { - _dispatch_debug("kevent[0x%llx]: handling timer", - (unsigned long long)ke->udata); - return _dispatch_timers_kevent(ke); - } -#if HAVE_MACH - if (ke->filter == EVFILT_MACHPORT) { - _dispatch_debug("kevent[0x%llx]: handling mach port", - (unsigned long long)ke->udata); - return _dispatch_mach_kevent_merge(ke); - } -#endif - return _dispatch_kevent_merge(ke); -} - -DISPATCH_NOINLINE -static void -_dispatch_kevent_merge(_dispatch_kevent_qos_s *ke) -{ - dispatch_kevent_t dk = (void*)ke->udata; - dispatch_source_refs_t dri, dr_next; - - TAILQ_FOREACH_SAFE(dri, &dk->dk_sources, dr_list, dr_next) { - _dispatch_source_merge_kevent(_dispatch_source_from_refs(dri), ke); - } -} - -#if DISPATCH_USE_GUARDED_FD_CHANGE_FDGUARD -static void -_dispatch_kevent_guard(dispatch_kevent_t dk) -{ - guardid_t guard; - const unsigned int guard_flags = GUARD_CLOSE; - int r, fd_flags = 0; - switch (dk->dk_kevent.filter) { - case EVFILT_READ: - case EVFILT_WRITE: - case EVFILT_VNODE: - guard = &dk->dk_kevent; - r = change_fdguard_np((int)dk->dk_kevent.ident, NULL, 0, - &guard, guard_flags, &fd_flags); - if (slowpath(r == -1)) { - int err = errno; - if (err != EPERM) { - (void)dispatch_assume_zero(err); - } - return; - } - dk->dk_kevent.ext[0] = guard_flags; - dk->dk_kevent.ext[1] = fd_flags; - break; - } -} - -static void -_dispatch_kevent_unguard(dispatch_kevent_t dk) -{ - guardid_t guard; - unsigned int guard_flags; - int r, fd_flags; - switch (dk->dk_kevent.filter) { - case EVFILT_READ: - case EVFILT_WRITE: - case EVFILT_VNODE: - guard_flags = (unsigned int)dk->dk_kevent.ext[0]; - if (!guard_flags) { - return; - } - guard = &dk->dk_kevent; - fd_flags = (int)dk->dk_kevent.ext[1]; - r = change_fdguard_np((int)dk->dk_kevent.ident, &guard, - guard_flags, NULL, 0, &fd_flags); - if (slowpath(r == -1)) { - (void)dispatch_assume_zero(errno); - return; - } - dk->dk_kevent.ext[0] = 0; - break; - } -} -#endif // DISPATCH_USE_GUARDED_FD_CHANGE_FDGUARD - -#pragma mark - -#pragma mark dispatch_source_timer +#pragma mark dispatch_source_timer #if DISPATCH_USE_DTRACE -static dispatch_source_refs_t +static dispatch_timer_source_refs_t _dispatch_trace_next_timer[DISPATCH_TIMER_QOS_COUNT]; #define _dispatch_trace_next_timer_set(x, q) \ _dispatch_trace_next_timer[(q)] = (x) #define _dispatch_trace_next_timer_program(d, q) \ _dispatch_trace_timer_program(_dispatch_trace_next_timer[(q)], (d)) -#define _dispatch_trace_next_timer_wake(q) \ - _dispatch_trace_timer_wake(_dispatch_trace_next_timer[(q)]) +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_mgr_trace_timers_wakes(void) +{ + uint32_t qos; + + if (_dispatch_timers_will_wake) { + if (slowpath(DISPATCH_TIMER_WAKE_ENABLED())) { + for (qos = 0; qos < DISPATCH_TIMER_QOS_COUNT; qos++) { + if (_dispatch_timers_will_wake & (1 << qos)) { + _dispatch_trace_timer_wake(_dispatch_trace_next_timer[qos]); + } + } + } + _dispatch_timers_will_wake = 0; + } +} #else #define _dispatch_trace_next_timer_set(x, q) #define _dispatch_trace_next_timer_program(d, q) -#define _dispatch_trace_next_timer_wake(q) +#define _dispatch_mgr_trace_timers_wakes() #endif #define _dispatch_source_timer_telemetry_enabled() false @@ -1721,118 +1205,59 @@ static dispatch_source_refs_t DISPATCH_NOINLINE static void _dispatch_source_timer_telemetry_slow(dispatch_source_t ds, - uintptr_t ident, struct dispatch_timer_source_s *values) + dispatch_clock_t clock, struct dispatch_timer_source_s *values) { if (_dispatch_trace_timer_configure_enabled()) { - _dispatch_trace_timer_configure(ds, ident, values); + _dispatch_trace_timer_configure(ds, clock, values); } } DISPATCH_ALWAYS_INLINE static inline void -_dispatch_source_timer_telemetry(dispatch_source_t ds, uintptr_t ident, +_dispatch_source_timer_telemetry(dispatch_source_t ds, dispatch_clock_t clock, struct dispatch_timer_source_s *values) { if (_dispatch_trace_timer_configure_enabled() || _dispatch_source_timer_telemetry_enabled()) { - _dispatch_source_timer_telemetry_slow(ds, ident, values); + _dispatch_source_timer_telemetry_slow(ds, clock, values); asm(""); // prevent tailcall } } -// approx 1 year (60s * 60m * 24h * 365d) -#define FOREVER_NSEC 31536000000000000ull - -DISPATCH_ALWAYS_INLINE -static inline uint64_t -_dispatch_source_timer_now(uint64_t nows[], unsigned int tidx) -{ - unsigned int tk = DISPATCH_TIMER_KIND(tidx); - if (nows && fastpath(nows[tk] != 0)) { - return nows[tk]; - } - uint64_t now; - switch (tk) { - case DISPATCH_TIMER_KIND_MACH: - now = _dispatch_absolute_time(); - break; - case DISPATCH_TIMER_KIND_WALL: - now = _dispatch_get_nanoseconds(); - break; - } - if (nows) { - nows[tk] = now; - } - return now; -} - -static inline unsigned long -_dispatch_source_timer_data(dispatch_source_refs_t dr, unsigned long prev) -{ - // calculate the number of intervals since last fire - unsigned long data, missed; - uint64_t now; - now = _dispatch_source_timer_now(NULL, _dispatch_source_timer_idx(dr)); - missed = (unsigned long)((now - ds_timer(dr).last_fire) / - ds_timer(dr).interval); - // correct for missed intervals already delivered last time - data = prev - ds_timer(dr).missed + missed; - ds_timer(dr).missed = missed; - return data; -} - -struct dispatch_set_timer_params { - dispatch_source_t ds; - uintptr_t ident; - struct dispatch_timer_source_s values; -}; - +DISPATCH_NOINLINE static void -_dispatch_source_set_timer3(void *context) +_dispatch_source_timer_configure(dispatch_source_t ds) { - // Called on the _dispatch_mgr_q - struct dispatch_set_timer_params *params = context; - dispatch_source_t ds = params->ds; - ds->ds_ident_hack = params->ident; - ds_timer(ds->ds_refs) = params->values; - // Clear any pending data that might have accumulated on - // older timer params - ds->ds_pending_data = 0; - // Re-arm in case we got disarmed because of pending set_timer suspension - _dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_ARMED); - _dispatch_debug("kevent-source[%p]: rearmed kevent[%p]", ds, ds->ds_dkev); - dispatch_resume(ds); - // Must happen after resume to avoid getting disarmed due to suspension - _dispatch_timers_update(ds); - dispatch_release(ds); - if (params->values.flags & DISPATCH_TIMER_WALL_CLOCK) { - _dispatch_mach_host_calendar_change_register(); - } - free(params); -} + dispatch_timer_source_refs_t dt = ds->ds_timer_refs; + dispatch_timer_config_t dtc; -static void -_dispatch_source_set_timer2(void *context) -{ - // Called on the source queue - struct dispatch_set_timer_params *params = context; - dispatch_suspend(params->ds); - _dispatch_barrier_async_detached_f(&_dispatch_mgr_q, params, - _dispatch_source_set_timer3); + dtc = os_atomic_xchg2o(dt, dt_pending_config, NULL, dependency); + if (dtc->dtc_clock == DISPATCH_CLOCK_MACH) { + dt->du_fflags |= DISPATCH_TIMER_CLOCK_MACH; + } else { + dt->du_fflags &= ~(uint32_t)DISPATCH_TIMER_CLOCK_MACH; + } + dt->dt_timer = dtc->dtc_timer; + free(dtc); + if (ds->ds_is_installed) { + // Clear any pending data that might have accumulated on + // older timer params + os_atomic_store2o(ds, ds_pending_data, 0, relaxed); + _dispatch_timers_update(dt, 0); + } } -DISPATCH_NOINLINE -static struct dispatch_set_timer_params * -_dispatch_source_timer_params(dispatch_source_t ds, dispatch_time_t start, +static dispatch_timer_config_t +_dispatch_source_timer_config_create(dispatch_time_t start, uint64_t interval, uint64_t leeway) { - struct dispatch_set_timer_params *params; - params = _dispatch_calloc(1ul, sizeof(struct dispatch_set_timer_params)); - params->ds = ds; - params->values.flags = ds_timer(ds->ds_refs).flags; - - if (interval == 0) { - // we use zero internally to mean disabled + dispatch_timer_config_t dtc; + dtc = _dispatch_calloc(1ul, sizeof(struct dispatch_timer_config_s)); + if (unlikely(interval == 0)) { + if (start != DISPATCH_TIME_FOREVER) { + _dispatch_bug_deprecated("Setting timer interval to 0 requests " + "a 1ns timer, did you mean FOREVER (a one-shot timer)?"); + } interval = 1; } else if ((int64_t)interval < 0) { // 6866347 - make sure nanoseconds won't overflow @@ -1850,7 +1275,7 @@ _dispatch_source_timer_params(dispatch_source_t ds, dispatch_time_t start, if ((int64_t)start < 0) { // wall clock start = (dispatch_time_t)-((int64_t)start); - params->values.flags |= DISPATCH_TIMER_WALL_CLOCK; + dtc->dtc_clock = DISPATCH_CLOCK_WALL; } else { // absolute clock interval = _dispatch_time_nano2mach(interval); @@ -1862,64 +1287,50 @@ _dispatch_source_timer_params(dispatch_source_t ds, dispatch_time_t start, interval = 1; } leeway = _dispatch_time_nano2mach(leeway); - params->values.flags &= ~(unsigned long)DISPATCH_TIMER_WALL_CLOCK; - } - params->ident = DISPATCH_TIMER_IDENT(params->values.flags); - params->values.target = start; - params->values.deadline = (start < UINT64_MAX - leeway) ? - start + leeway : UINT64_MAX; - params->values.interval = interval; - params->values.leeway = (interval == INT64_MAX || leeway < interval / 2) ? - leeway : interval / 2; - return params; -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_source_set_timer(dispatch_source_t ds, dispatch_time_t start, - uint64_t interval, uint64_t leeway, bool source_sync) -{ - if (slowpath(!ds->ds_is_timer) || - slowpath(ds_timer(ds->ds_refs).flags & DISPATCH_TIMER_INTERVAL)) { - DISPATCH_CLIENT_CRASH(ds, "Attempt to set timer on a non-timer source"); + dtc->dtc_clock = DISPATCH_CLOCK_MACH; + } + if (interval < INT64_MAX && leeway > interval / 2) { + leeway = interval / 2; } - struct dispatch_set_timer_params *params; - params = _dispatch_source_timer_params(ds, start, interval, leeway); - - _dispatch_source_timer_telemetry(ds, params->ident, ¶ms->values); - // Suspend the source so that it doesn't fire with pending changes - // The use of suspend/resume requires the external retain/release - dispatch_retain(ds); - if (source_sync) { - return _dispatch_barrier_trysync_or_async_f(ds->_as_dq, params, - _dispatch_source_set_timer2); + dtc->dtc_timer.target = start; + dtc->dtc_timer.interval = interval; + if (start + leeway < INT64_MAX) { + dtc->dtc_timer.deadline = start + leeway; } else { - return _dispatch_source_set_timer2(params); + dtc->dtc_timer.deadline = INT64_MAX; } + return dtc; } +DISPATCH_NOINLINE void dispatch_source_set_timer(dispatch_source_t ds, dispatch_time_t start, uint64_t interval, uint64_t leeway) { - _dispatch_source_set_timer(ds, start, interval, leeway, true); -} + dispatch_timer_source_refs_t dt = ds->ds_timer_refs; + dispatch_timer_config_t dtc; -void -_dispatch_source_set_runloop_timer_4CF(dispatch_source_t ds, - dispatch_time_t start, uint64_t interval, uint64_t leeway) -{ - // Don't serialize through the source queue for CF timers - _dispatch_source_set_timer(ds, start, interval, leeway, false); + if (unlikely(!dt->du_is_timer || (dt->du_fflags&DISPATCH_TIMER_INTERVAL))) { + DISPATCH_CLIENT_CRASH(ds, "Attempt to set timer on a non-timer source"); + } + + dtc = _dispatch_source_timer_config_create(start, interval, leeway); + _dispatch_source_timer_telemetry(ds, dtc->dtc_clock, &dtc->dtc_timer); + dtc = os_atomic_xchg2o(dt, dt_pending_config, dtc, release); + if (dtc) free(dtc); + dx_wakeup(ds, 0, DISPATCH_WAKEUP_MAKE_DIRTY); } -void +static void _dispatch_source_set_interval(dispatch_source_t ds, uint64_t interval) { - dispatch_source_refs_t dr = ds->ds_refs; - #define NSEC_PER_FRAME (NSEC_PER_SEC/60) - const bool animation = ds_timer(dr).flags & DISPATCH_INTERVAL_UI_ANIMATION; +#define NSEC_PER_FRAME (NSEC_PER_SEC/60) +// approx 1 year (60s * 60m * 24h * 365d) +#define FOREVER_NSEC 31536000000000000ull + + dispatch_timer_source_refs_t dr = ds->ds_timer_refs; + const bool animation = dr->du_fflags & DISPATCH_INTERVAL_UI_ANIMATION; if (fastpath(interval <= (animation ? FOREVER_NSEC/NSEC_PER_FRAME : FOREVER_NSEC/NSEC_PER_MSEC))) { interval *= animation ? NSEC_PER_FRAME : NSEC_PER_MSEC; @@ -1928,1071 +1339,994 @@ _dispatch_source_set_interval(dispatch_source_t ds, uint64_t interval) } interval = _dispatch_time_nano2mach(interval); uint64_t target = _dispatch_absolute_time() + interval; - target = (target / interval) * interval; + target -= (target % interval); const uint64_t leeway = animation ? _dispatch_time_nano2mach(NSEC_PER_FRAME) : interval / 2; - ds_timer(dr).target = target; - ds_timer(dr).deadline = target + leeway; - ds_timer(dr).interval = interval; - ds_timer(dr).leeway = leeway; - _dispatch_source_timer_telemetry(ds, ds->ds_ident_hack, &ds_timer(dr)); + dr->dt_timer.target = target; + dr->dt_timer.deadline = target + leeway; + dr->dt_timer.interval = interval; + _dispatch_source_timer_telemetry(ds, DISPATCH_CLOCK_MACH, &dr->dt_timer); } #pragma mark - -#pragma mark dispatch_timers +#pragma mark dispatch_after -#define DISPATCH_TIMER_STRUCT(refs) \ - uint64_t target, deadline; \ - TAILQ_HEAD(, refs) dt_sources - -typedef struct dispatch_timer_s { - DISPATCH_TIMER_STRUCT(dispatch_timer_source_refs_s); -} *dispatch_timer_t; - -#define DISPATCH_TIMER_INITIALIZER(tidx) \ - [tidx] = { \ - .target = UINT64_MAX, \ - .deadline = UINT64_MAX, \ - .dt_sources = TAILQ_HEAD_INITIALIZER( \ - _dispatch_timer[tidx].dt_sources), \ - } -#define DISPATCH_TIMER_INIT(kind, qos) \ - DISPATCH_TIMER_INITIALIZER(DISPATCH_TIMER_INDEX( \ - DISPATCH_TIMER_KIND_##kind, DISPATCH_TIMER_QOS_##qos)) - -struct dispatch_timer_s _dispatch_timer[] = { - DISPATCH_TIMER_INIT(WALL, NORMAL), - DISPATCH_TIMER_INIT(WALL, CRITICAL), - DISPATCH_TIMER_INIT(WALL, BACKGROUND), - DISPATCH_TIMER_INIT(MACH, NORMAL), - DISPATCH_TIMER_INIT(MACH, CRITICAL), - DISPATCH_TIMER_INIT(MACH, BACKGROUND), -}; -#define DISPATCH_TIMER_COUNT \ - ((sizeof(_dispatch_timer) / sizeof(_dispatch_timer[0]))) +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_after(dispatch_time_t when, dispatch_queue_t queue, + void *ctxt, void *handler, bool block) +{ + dispatch_timer_source_refs_t dt; + dispatch_source_t ds; + uint64_t leeway, delta; -#if __linux__ -#define DISPATCH_KEVENT_TIMER_UDATA(tidx) \ - (void*)&_dispatch_kevent_timer[tidx] -#else -#define DISPATCH_KEVENT_TIMER_UDATA(tidx) \ - (uintptr_t)&_dispatch_kevent_timer[tidx] + if (when == DISPATCH_TIME_FOREVER) { +#if DISPATCH_DEBUG + DISPATCH_CLIENT_CRASH(0, "dispatch_after called with 'when' == infinity"); #endif -#ifdef __LP64__ -#define DISPATCH_KEVENT_TIMER_UDATA_INITIALIZER(tidx) \ - .udata = DISPATCH_KEVENT_TIMER_UDATA(tidx) -#else // __LP64__ -// dynamic initialization in _dispatch_timers_init() -#define DISPATCH_KEVENT_TIMER_UDATA_INITIALIZER(tidx) \ - .udata = 0 -#endif // __LP64__ -#define DISPATCH_KEVENT_TIMER_INITIALIZER(tidx) \ - [tidx] = { \ - .dk_kevent = { \ - .ident = tidx, \ - .filter = DISPATCH_EVFILT_TIMER, \ - DISPATCH_KEVENT_TIMER_UDATA_INITIALIZER(tidx), \ - }, \ - .dk_sources = TAILQ_HEAD_INITIALIZER( \ - _dispatch_kevent_timer[tidx].dk_sources), \ - } -#define DISPATCH_KEVENT_TIMER_INIT(kind, qos) \ - DISPATCH_KEVENT_TIMER_INITIALIZER(DISPATCH_TIMER_INDEX( \ - DISPATCH_TIMER_KIND_##kind, DISPATCH_TIMER_QOS_##qos)) - -struct dispatch_kevent_s _dispatch_kevent_timer[] = { - DISPATCH_KEVENT_TIMER_INIT(WALL, NORMAL), - DISPATCH_KEVENT_TIMER_INIT(WALL, CRITICAL), - DISPATCH_KEVENT_TIMER_INIT(WALL, BACKGROUND), - DISPATCH_KEVENT_TIMER_INIT(MACH, NORMAL), - DISPATCH_KEVENT_TIMER_INIT(MACH, CRITICAL), - DISPATCH_KEVENT_TIMER_INIT(MACH, BACKGROUND), - DISPATCH_KEVENT_TIMER_INITIALIZER(DISPATCH_TIMER_INDEX_DISARM), -}; -#define DISPATCH_KEVENT_TIMER_COUNT \ - ((sizeof(_dispatch_kevent_timer) / sizeof(_dispatch_kevent_timer[0]))) - -#define DISPATCH_KEVENT_TIMEOUT_IDENT_MASK (~0ull << 8) -#define DISPATCH_KEVENT_TIMEOUT_INITIALIZER(tidx, note) \ - [tidx] = { \ - .ident = DISPATCH_KEVENT_TIMEOUT_IDENT_MASK|(tidx), \ - .filter = EVFILT_TIMER, \ - .flags = EV_ONESHOT, \ - .fflags = NOTE_ABSOLUTE|NOTE_NSECONDS|NOTE_LEEWAY|(note), \ - } -#define DISPATCH_KEVENT_TIMEOUT_INIT(kind, qos, note) \ - DISPATCH_KEVENT_TIMEOUT_INITIALIZER(DISPATCH_TIMER_INDEX( \ - DISPATCH_TIMER_KIND_##kind, DISPATCH_TIMER_QOS_##qos), note) - -_dispatch_kevent_qos_s _dispatch_kevent_timeout[] = { - DISPATCH_KEVENT_TIMEOUT_INIT(WALL, NORMAL, NOTE_MACH_CONTINUOUS_TIME), - DISPATCH_KEVENT_TIMEOUT_INIT(WALL, CRITICAL, NOTE_MACH_CONTINUOUS_TIME | NOTE_CRITICAL), - DISPATCH_KEVENT_TIMEOUT_INIT(WALL, BACKGROUND, NOTE_MACH_CONTINUOUS_TIME | NOTE_BACKGROUND), - DISPATCH_KEVENT_TIMEOUT_INIT(MACH, NORMAL, 0), - DISPATCH_KEVENT_TIMEOUT_INIT(MACH, CRITICAL, NOTE_CRITICAL), - DISPATCH_KEVENT_TIMEOUT_INIT(MACH, BACKGROUND, NOTE_BACKGROUND), -}; -#define DISPATCH_KEVENT_TIMEOUT_COUNT \ - ((sizeof(_dispatch_kevent_timeout) / sizeof(_dispatch_kevent_timeout[0]))) -static_assert(DISPATCH_KEVENT_TIMEOUT_COUNT == DISPATCH_TIMER_INDEX_COUNT - 1, - "should have a kevent for everything but disarm (ddt assumes this)"); + return; + } -#define DISPATCH_KEVENT_COALESCING_WINDOW_INIT(qos, ms) \ - [DISPATCH_TIMER_QOS_##qos] = 2ull * (ms) * NSEC_PER_MSEC + delta = _dispatch_timeout(when); + if (delta == 0) { + if (block) { + return dispatch_async(queue, handler); + } + return dispatch_async_f(queue, ctxt, handler); + } + leeway = delta / 10; // -static const uint64_t _dispatch_kevent_coalescing_window[] = { - DISPATCH_KEVENT_COALESCING_WINDOW_INIT(NORMAL, 75), - DISPATCH_KEVENT_COALESCING_WINDOW_INIT(CRITICAL, 1), - DISPATCH_KEVENT_COALESCING_WINDOW_INIT(BACKGROUND, 100), -}; + if (leeway < NSEC_PER_MSEC) leeway = NSEC_PER_MSEC; + if (leeway > 60 * NSEC_PER_SEC) leeway = 60 * NSEC_PER_SEC; -#define _dispatch_timers_insert(tidx, dra, dr, dr_list, dta, dt, dt_list) ({ \ - typeof(dr) dri = NULL; typeof(dt) dti; \ - if (tidx != DISPATCH_TIMER_INDEX_DISARM) { \ - TAILQ_FOREACH(dri, &dra[tidx].dk_sources, dr_list) { \ - if (ds_timer(dr).target < ds_timer(dri).target) { \ - break; \ - } \ - } \ - TAILQ_FOREACH(dti, &dta[tidx].dt_sources, dt_list) { \ - if (ds_timer(dt).deadline < ds_timer(dti).deadline) { \ - break; \ - } \ - } \ - if (dti) { \ - TAILQ_INSERT_BEFORE(dti, dt, dt_list); \ - } else { \ - TAILQ_INSERT_TAIL(&dta[tidx].dt_sources, dt, dt_list); \ - } \ - } \ - if (dri) { \ - TAILQ_INSERT_BEFORE(dri, dr, dr_list); \ - } else { \ - TAILQ_INSERT_TAIL(&dra[tidx].dk_sources, dr, dr_list); \ - } \ - }) - -#define _dispatch_timers_remove(tidx, dk, dra, dr, dr_list, dta, dt, dt_list) \ - ({ \ - if (tidx != DISPATCH_TIMER_INDEX_DISARM) { \ - TAILQ_REMOVE(&dta[tidx].dt_sources, dt, dt_list); \ - } \ - TAILQ_REMOVE(dk ? &(*(dk)).dk_sources : &dra[tidx].dk_sources, dr, \ - dr_list); }) - -#define _dispatch_timers_check(dra, dta) ({ \ - unsigned int timerm = _dispatch_timers_mask; \ - bool update = false; \ - unsigned int tidx; \ - for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) { \ - if (!(timerm & (1 << tidx))){ \ - continue; \ - } \ - dispatch_timer_source_refs_t dr = (dispatch_timer_source_refs_t) \ - TAILQ_FIRST(&dra[tidx].dk_sources); \ - dispatch_timer_source_refs_t dt = (dispatch_timer_source_refs_t) \ - TAILQ_FIRST(&dta[tidx].dt_sources); \ - uint64_t target = dr ? ds_timer(dr).target : UINT64_MAX; \ - uint64_t deadline = dr ? ds_timer(dt).deadline : UINT64_MAX; \ - if (target != dta[tidx].target) { \ - dta[tidx].target = target; \ - update = true; \ - } \ - if (deadline != dta[tidx].deadline) { \ - dta[tidx].deadline = deadline; \ - update = true; \ - } \ - } \ - update; }) - -static bool _dispatch_timers_reconfigure, _dispatch_timer_expired; -static unsigned int _dispatch_timers_mask; -static bool _dispatch_timers_force_max_leeway; + // this function can and should be optimized to not use a dispatch source + ds = dispatch_source_create(&_dispatch_source_type_after, 0, 0, queue); + dt = ds->ds_timer_refs; -static void -_dispatch_timers_init(void) -{ -#ifndef __LP64__ - unsigned int tidx; - for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) { - _dispatch_kevent_timer[tidx].dk_kevent.udata = - DISPATCH_KEVENT_TIMER_UDATA(tidx); + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + if (block) { + _dispatch_continuation_init(dc, ds, handler, 0, 0, 0); + } else { + _dispatch_continuation_init_f(dc, ds, ctxt, handler, 0, 0, 0); } -#endif // __LP64__ - if (slowpath(getenv("LIBDISPATCH_TIMERS_FORCE_MAX_LEEWAY"))) { - _dispatch_timers_force_max_leeway = true; + // reference `ds` so that it doesn't show up as a leak + dc->dc_data = ds; + _dispatch_trace_continuation_push(ds->_as_dq, dc); + os_atomic_store2o(dt, ds_handler[DS_EVENT_HANDLER], dc, relaxed); + + if ((int64_t)when < 0) { + // wall clock + when = (dispatch_time_t)-((int64_t)when); + } else { + // absolute clock + dt->du_fflags |= DISPATCH_TIMER_CLOCK_MACH; + leeway = _dispatch_time_nano2mach(leeway); } + dt->dt_timer.target = when; + dt->dt_timer.interval = UINT64_MAX; + dt->dt_timer.deadline = when + leeway; + dispatch_activate(ds); } -static inline void -_dispatch_timers_unregister(dispatch_source_t ds, dispatch_kevent_t dk) +DISPATCH_NOINLINE +void +dispatch_after_f(dispatch_time_t when, dispatch_queue_t queue, void *ctxt, + dispatch_function_t func) { - dispatch_source_refs_t dr = ds->ds_refs; - unsigned int tidx = (unsigned int)dk->dk_kevent.ident; - - if (slowpath(ds_timer_aggregate(ds))) { - _dispatch_timer_aggregates_unregister(ds, tidx); - } - _dispatch_timers_remove(tidx, dk, _dispatch_kevent_timer, dr, dr_list, - _dispatch_timer, (dispatch_timer_source_refs_t)dr, dt_list); - if (tidx != DISPATCH_TIMER_INDEX_DISARM) { - _dispatch_timers_reconfigure = true; - _dispatch_timers_mask |= 1 << tidx; - } + _dispatch_after(when, queue, ctxt, func, false); } -// Updates the ordered list of timers based on next fire date for changes to ds. -// Should only be called from the context of _dispatch_mgr_q. -static void -_dispatch_timers_update(dispatch_source_t ds) +#ifdef __BLOCKS__ +void +dispatch_after(dispatch_time_t when, dispatch_queue_t queue, + dispatch_block_t work) { - dispatch_kevent_t dk = ds->ds_dkev; - dispatch_source_refs_t dr = ds->ds_refs; - unsigned int tidx; - - DISPATCH_ASSERT_ON_MANAGER_QUEUE(); + _dispatch_after(when, queue, NULL, work, true); +} +#endif - // Do not reschedule timers unregistered with _dispatch_kevent_unregister() - if (slowpath(!dk)) { - return; - } - // Move timers that are disabled, suspended or have missed intervals to the - // disarmed list, rearm after resume resp. source invoke will reenable them - if (!ds_timer(dr).target || DISPATCH_QUEUE_IS_SUSPENDED(ds) || - ds->ds_pending_data) { - tidx = DISPATCH_TIMER_INDEX_DISARM; - _dispatch_queue_atomic_flags_clear(ds->_as_dq, DSF_ARMED); - _dispatch_debug("kevent-source[%p]: disarmed kevent[%p]", ds, - ds->ds_dkev); - } else { - tidx = _dispatch_source_timer_idx(dr); - } - if (slowpath(ds_timer_aggregate(ds))) { - _dispatch_timer_aggregates_register(ds); - } - if (slowpath(!ds->ds_is_installed)) { - ds->ds_is_installed = true; - if (tidx != DISPATCH_TIMER_INDEX_DISARM) { - _dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_ARMED); - _dispatch_debug("kevent-source[%p]: rearmed kevent[%p]", ds, - ds->ds_dkev); - } - _dispatch_object_debug(ds, "%s", __func__); - ds->ds_dkev = NULL; - free(dk); - } else { - _dispatch_timers_unregister(ds, dk); - } - if (tidx != DISPATCH_TIMER_INDEX_DISARM) { - _dispatch_timers_reconfigure = true; - _dispatch_timers_mask |= 1 << tidx; - } - if (dk != &_dispatch_kevent_timer[tidx]){ - ds->ds_dkev = &_dispatch_kevent_timer[tidx]; - } - _dispatch_timers_insert(tidx, _dispatch_kevent_timer, dr, dr_list, - _dispatch_timer, (dispatch_timer_source_refs_t)dr, dt_list); - if (slowpath(ds_timer_aggregate(ds))) { - _dispatch_timer_aggregates_update(ds, tidx); - } +#pragma mark - +#pragma mark dispatch_timers + +/* + * The dispatch_timer_heap_t structure is a double min-heap of timers, + * interleaving the by-target min-heap in the even slots, and the by-deadline + * in the odd ones. + * + * The min element of these is held inline in the dispatch_timer_heap_t + * structure, and further entries are held in segments. + * + * dth_segments is the number of allocated segments. + * + * Segment 0 has a size of `DISPATCH_HEAP_INIT_SEGMENT_CAPACITY` pointers + * Segment k has a size of (DISPATCH_HEAP_INIT_SEGMENT_CAPACITY << (k - 1)) + * + * Segment n (dth_segments - 1) is the last segment and points its final n + * entries to previous segments. Its address is held in the `dth_heap` field. + * + * segment n [ regular timer pointers | n-1 | k | 0 ] + * | | | + * segment n-1 <---------------------------' | | + * segment k <--------------------------------' | + * segment 0 <------------------------------------' + */ +#define DISPATCH_HEAP_INIT_SEGMENT_CAPACITY 8u + +/* + * There are two min-heaps stored interleaved in a single array, + * even indices are for the by-target min-heap, and odd indices for + * the by-deadline one. + */ +#define DTH_HEAP_ID_MASK (DTH_ID_COUNT - 1) +#define DTH_HEAP_ID(idx) ((idx) & DTH_HEAP_ID_MASK) +#define DTH_IDX_FOR_HEAP_ID(idx, heap_id) \ + (((idx) & ~DTH_HEAP_ID_MASK) | (heap_id)) + +DISPATCH_ALWAYS_INLINE +static inline uint32_t +_dispatch_timer_heap_capacity(uint32_t segments) +{ + if (segments == 0) return 2; + uint32_t seg_no = segments - 1; + // for C = DISPATCH_HEAP_INIT_SEGMENT_CAPACITY, + // 2 + C + SUM(C << (i-1), i = 1..seg_no) - seg_no + return 2 + (DISPATCH_HEAP_INIT_SEGMENT_CAPACITY << seg_no) - seg_no; } -static inline void -_dispatch_timers_run2(uint64_t nows[], unsigned int tidx) +DISPATCH_NOINLINE +static void +_dispatch_timer_heap_grow(dispatch_timer_heap_t dth) { - dispatch_source_refs_t dr; - dispatch_source_t ds; - uint64_t now, missed; + uint32_t seg_capacity = DISPATCH_HEAP_INIT_SEGMENT_CAPACITY; + uint32_t seg_no = dth->dth_segments++; + void **heap, **heap_prev = dth->dth_heap; - now = _dispatch_source_timer_now(nows, tidx); - while ((dr = TAILQ_FIRST(&_dispatch_kevent_timer[tidx].dk_sources))) { - ds = _dispatch_source_from_refs(dr); - // We may find timers on the wrong list due to a pending update from - // dispatch_source_set_timer. Force an update of the list in that case. - if (tidx != ds->ds_ident_hack) { - _dispatch_timers_update(ds); - continue; - } - if (!ds_timer(dr).target) { - // No configured timers on the list - break; - } - if (ds_timer(dr).target > now) { - // Done running timers for now. - break; - } - // Remove timers that are suspended or have missed intervals from the - // list, rearm after resume resp. source invoke will reenable them - if (DISPATCH_QUEUE_IS_SUSPENDED(ds) || ds->ds_pending_data) { - _dispatch_timers_update(ds); - continue; - } - // Calculate number of missed intervals. - missed = (now - ds_timer(dr).target) / ds_timer(dr).interval; - if (++missed > INT_MAX) { - missed = INT_MAX; - } - if (ds_timer(dr).interval < INT64_MAX) { - ds_timer(dr).target += missed * ds_timer(dr).interval; - ds_timer(dr).deadline = ds_timer(dr).target + ds_timer(dr).leeway; - } else { - ds_timer(dr).target = UINT64_MAX; - ds_timer(dr).deadline = UINT64_MAX; - } - _dispatch_timers_update(ds); - ds_timer(dr).last_fire = now; - - unsigned long data; - data = os_atomic_add2o(ds, ds_pending_data, - (unsigned long)missed, relaxed); - _dispatch_trace_timer_fire(dr, data, (unsigned long)missed); - dx_wakeup(ds, 0, DISPATCH_WAKEUP_FLUSH); - if (ds_timer(dr).flags & DISPATCH_TIMER_AFTER) { - _dispatch_source_kevent_unregister(ds); - } + if (seg_no > 0) { + seg_capacity <<= (seg_no - 1); + } + heap = _dispatch_calloc(seg_capacity, sizeof(void *)); + if (seg_no > 1) { + uint32_t prev_seg_no = seg_no - 1; + uint32_t prev_seg_capacity = seg_capacity >> 1; + memcpy(&heap[seg_capacity - prev_seg_no], + &heap_prev[prev_seg_capacity - prev_seg_no], + prev_seg_no * sizeof(void *)); } + if (seg_no > 0) { + heap[seg_capacity - seg_no] = heap_prev; + } + dth->dth_heap = heap; } DISPATCH_NOINLINE static void -_dispatch_timers_run(uint64_t nows[]) +_dispatch_timer_heap_shrink(dispatch_timer_heap_t dth) { - unsigned int tidx; - for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) { - if (!TAILQ_EMPTY(&_dispatch_kevent_timer[tidx].dk_sources)) { - _dispatch_timers_run2(nows, tidx); - } + uint32_t seg_capacity = DISPATCH_HEAP_INIT_SEGMENT_CAPACITY; + uint32_t seg_no = --dth->dth_segments; + void **heap = dth->dth_heap, **heap_prev = NULL; + + if (seg_no > 0) { + seg_capacity <<= (seg_no - 1); + heap_prev = heap[seg_capacity - seg_no]; + } + if (seg_no > 1) { + uint32_t prev_seg_no = seg_no - 1; + uint32_t prev_seg_capacity = seg_capacity >> 1; + memcpy(&heap_prev[prev_seg_capacity - prev_seg_no], + &heap[seg_capacity - prev_seg_no], + prev_seg_no * sizeof(void *)); } + dth->dth_heap = heap_prev; + free(heap); } -static inline unsigned int -_dispatch_timers_get_delay(uint64_t nows[], struct dispatch_timer_s timer[], - uint64_t *delay, uint64_t *leeway, int qos, int kind) -{ - unsigned int tidx, ridx = DISPATCH_TIMER_COUNT; - uint64_t tmp, delta = UINT64_MAX, dldelta = UINT64_MAX; - - for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) { - if (qos >= 0 && qos != DISPATCH_TIMER_QOS(tidx)){ - continue; - } - if (kind >= 0 && kind != DISPATCH_TIMER_KIND(tidx)){ - continue; - } - uint64_t target = timer[tidx].target; - if (target == UINT64_MAX) { - continue; - } - uint64_t deadline = timer[tidx].deadline; - if (qos >= 0) { - // Timer pre-coalescing - uint64_t window = _dispatch_kevent_coalescing_window[qos]; - uint64_t latest = deadline > window ? deadline - window : 0; - dispatch_source_refs_t dri; - TAILQ_FOREACH(dri, &_dispatch_kevent_timer[tidx].dk_sources, - dr_list) { - tmp = ds_timer(dri).target; - if (tmp > latest) break; - target = tmp; - } - } - uint64_t now = _dispatch_source_timer_now(nows, tidx); - if (target <= now) { - delta = 0; - break; - } - tmp = target - now; - if (DISPATCH_TIMER_KIND(tidx) != DISPATCH_TIMER_KIND_WALL) { - tmp = _dispatch_time_mach2nano(tmp); - } - if (tmp < INT64_MAX && tmp < delta) { - ridx = tidx; - delta = tmp; - } - dispatch_assert(target <= deadline); - tmp = deadline - now; - if (DISPATCH_TIMER_KIND(tidx) != DISPATCH_TIMER_KIND_WALL) { - tmp = _dispatch_time_mach2nano(tmp); - } - if (tmp < INT64_MAX && tmp < dldelta) { - dldelta = tmp; - } +DISPATCH_ALWAYS_INLINE +static inline dispatch_timer_source_refs_t * +_dispatch_timer_heap_get_slot(dispatch_timer_heap_t dth, uint32_t idx) +{ + uint32_t seg_no, segments = dth->dth_segments; + void **segment; + + if (idx < DTH_ID_COUNT) { + return &dth->dth_min[idx]; + } + idx -= DTH_ID_COUNT; + + // Derive the segment number from the index. Naming + // DISPATCH_HEAP_INIT_SEGMENT_CAPACITY `C`, the segments index ranges are: + // 0: 0 .. (C - 1) + // 1: C .. 2 * C - 1 + // k: 2^(k-1) * C .. 2^k * C - 1 + // so `k` can be derived from the first bit set in `idx` + seg_no = (uint32_t)(__builtin_clz(DISPATCH_HEAP_INIT_SEGMENT_CAPACITY - 1) - + __builtin_clz(idx | (DISPATCH_HEAP_INIT_SEGMENT_CAPACITY - 1))); + if (seg_no + 1 == segments) { + segment = dth->dth_heap; + } else { + uint32_t seg_capacity = DISPATCH_HEAP_INIT_SEGMENT_CAPACITY; + seg_capacity <<= (segments - 2); + segment = dth->dth_heap[seg_capacity - seg_no - 1]; + } + if (seg_no) { + idx -= DISPATCH_HEAP_INIT_SEGMENT_CAPACITY << (seg_no - 1); } - *delay = delta; - *leeway = delta && delta < UINT64_MAX ? dldelta - delta : UINT64_MAX; - return ridx; + return (dispatch_timer_source_refs_t *)(segment + idx); } +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_timer_heap_set(dispatch_timer_source_refs_t *slot, + dispatch_timer_source_refs_t dt, uint32_t idx) +{ + *slot = dt; + dt->dt_heap_entry[DTH_HEAP_ID(idx)] = idx; +} -#ifdef __linux__ -// in linux we map the _dispatch_kevent_qos_s to struct kevent instead -// of struct kevent64. We loose the kevent.ext[] members and the time -// out is based on relavite msec based time vs. absolute nsec based time. -// For now we make the adjustments right here until the solution -// to either extend libkqueue with a proper kevent64 API or removing kevent -// all together and move to a lower API (e.g. epoll or kernel_module. -// Also leeway is ignored. +DISPATCH_ALWAYS_INLINE +static inline uint32_t +_dispatch_timer_heap_parent(uint32_t idx) +{ + uint32_t heap_id = DTH_HEAP_ID(idx); + idx = (idx - DTH_ID_COUNT) / 2; // go to the parent + return DTH_IDX_FOR_HEAP_ID(idx, heap_id); +} -static void -_dispatch_kevent_timer_set_delay(_dispatch_kevent_qos_s *ke, uint64_t delay, - uint64_t leeway, uint64_t nows[]) +DISPATCH_ALWAYS_INLINE +static inline uint32_t +_dispatch_timer_heap_left_child(uint32_t idx) { - // call to update nows[] - _dispatch_source_timer_now(nows, DISPATCH_TIMER_KIND_WALL); - // adjust nsec based delay to msec based and ignore leeway - delay /= 1000000L; - if ((int64_t)(delay) <= 0) { - delay = 1; // if value <= 0 the dispatch will stop - } - ke->data = (int64_t)delay; + uint32_t heap_id = DTH_HEAP_ID(idx); + // 2 * (idx - heap_id) + DTH_ID_COUNT + heap_id + return 2 * idx + DTH_ID_COUNT - heap_id; } -#else -static void -_dispatch_kevent_timer_set_delay(_dispatch_kevent_qos_s *ke, uint64_t delay, - uint64_t leeway, uint64_t nows[]) +#if DISPATCH_HAVE_TIMER_COALESCING +DISPATCH_ALWAYS_INLINE +static inline uint32_t +_dispatch_timer_heap_walk_skip(uint32_t idx, uint32_t count) { - delay += _dispatch_source_timer_now(nows, DISPATCH_TIMER_KIND_WALL); - if (slowpath(_dispatch_timers_force_max_leeway)) { - ke->data = (int64_t)(delay + leeway); - ke->ext[1] = 0; - } else { - ke->data = (int64_t)delay; - ke->ext[1] = leeway; - } + uint32_t heap_id = DTH_HEAP_ID(idx); + + idx -= heap_id; + if (unlikely(idx + DTH_ID_COUNT == count)) { + // reaching `count` doesn't mean we're done, but there is a weird + // corner case if the last item of the heap is a left child: + // + // /\ + // / \ + // / __\ + // /__/ + // ^ + // + // The formula below would return the sibling of `idx` which is + // out of bounds. Fortunately, the correct answer is the same + // as for idx's parent + idx = _dispatch_timer_heap_parent(idx); + } + + // + // When considering the index in a non interleaved, 1-based array + // representation of a heap, hence looking at (idx / DTH_ID_COUNT + 1) + // for a given idx in our dual-heaps, that index is in one of two forms: + // + // (a) 1xxxx011111 or (b) 111111111 + // d i 0 d 0 + // + // The first bit set is the row of the binary tree node (0-based). + // The following digits from most to least significant represent the path + // to that node, where `0` is a left turn and `1` a right turn. + // + // For example 0b0101 (5) is a node on row 2 accessed going left then right: + // + // row 0 1 + // / . + // row 1 2 3 + // . \ . . + // row 2 4 5 6 7 + // : : : : : : : : + // + // Skipping a sub-tree in walk order means going to the sibling of the last + // node reached after we turned left. If the node was of the form (a), + // this node is 1xxxx1, which for the above example is 0b0011 (3). + // If the node was of the form (b) then we never took a left, meaning + // we reached the last element in traversal order. + // + + // + // we want to find + // - the least significant bit set to 0 in (idx / DTH_ID_COUNT + 1) + // - which is offset by log_2(DTH_ID_COUNT) from the position of the least + // significant 0 in (idx + DTH_ID_COUNT + DTH_ID_COUNT - 1) + // since idx is a multiple of DTH_ID_COUNT and DTH_ID_COUNT a power of 2. + // - which in turn is the same as the position of the least significant 1 in + // ~(idx + DTH_ID_COUNT + DTH_ID_COUNT - 1) + // + dispatch_static_assert(powerof2(DTH_ID_COUNT)); + idx += DTH_ID_COUNT + DTH_ID_COUNT - 1; + idx >>= __builtin_ctz(~idx); + + // + // `idx` is now either: + // - 0 if it was the (b) case above, in which case the walk is done + // - 1xxxx0 as the position in a 0 based array representation of a non + // interleaved heap, so we just have to compute the interleaved index. + // + return likely(idx) ? DTH_ID_COUNT * idx + heap_id : UINT32_MAX; } -#endif // __linux__ -static bool -_dispatch_timers_program2(uint64_t nows[], _dispatch_kevent_qos_s *ke, - unsigned int tidx) +DISPATCH_ALWAYS_INLINE +static inline uint32_t +_dispatch_timer_heap_walk_next(uint32_t idx, uint32_t count) { - bool poll; - uint64_t delay, leeway; - - _dispatch_timers_get_delay(nows, _dispatch_timer, &delay, &leeway, - (int)DISPATCH_TIMER_QOS(tidx), (int)DISPATCH_TIMER_KIND(tidx)); - poll = (delay == 0); - if (poll || delay == UINT64_MAX) { - _dispatch_trace_next_timer_set(NULL, DISPATCH_TIMER_QOS(tidx)); - if (!ke->data) { - return poll; - } - ke->data = 0; - ke->flags |= EV_DELETE; - ke->flags &= ~(EV_ADD|EV_ENABLE); - } else { - _dispatch_trace_next_timer_set( - TAILQ_FIRST(&_dispatch_kevent_timer[tidx].dk_sources), DISPATCH_TIMER_QOS(tidx)); - _dispatch_trace_next_timer_program(delay, DISPATCH_TIMER_QOS(tidx)); - _dispatch_kevent_timer_set_delay(ke, delay, leeway, nows); - ke->flags |= EV_ADD|EV_ENABLE; - ke->flags &= ~EV_DELETE; -#if DISPATCH_USE_KEVENT_WORKQUEUE - if (_dispatch_kevent_workqueue_enabled) { - ke->qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; - } -#endif + // + // Goes to the next element in heap walk order, which is the prefix ordered + // walk of the tree. + // + // From a given node, the next item to return is the left child if it + // exists, else the first right sibling we find by walking our parent chain, + // which is exactly what _dispatch_timer_heap_walk_skip() returns. + // + uint32_t lchild = _dispatch_timer_heap_left_child(idx); + if (lchild < count) { + return lchild; } - _dispatch_kq_deferred_update(ke); - return poll; + return _dispatch_timer_heap_walk_skip(idx, count); } DISPATCH_NOINLINE -static bool -_dispatch_timers_program(uint64_t nows[]) -{ - bool poll = false; - unsigned int tidx, timerm = _dispatch_timers_mask; - for (tidx = 0; tidx < DISPATCH_KEVENT_TIMEOUT_COUNT; tidx++) { - if (!(timerm & 1 << tidx)){ - continue; +static uint64_t +_dispatch_timer_heap_max_target_before(dispatch_timer_heap_t dth, uint64_t limit) +{ + dispatch_timer_source_refs_t dri; + uint32_t idx = _dispatch_timer_heap_left_child(DTH_TARGET_ID); + uint32_t count = dth->dth_count; + uint64_t tmp, target = dth->dth_min[DTH_TARGET_ID]->dt_timer.target; + + while (idx < count) { + dri = *_dispatch_timer_heap_get_slot(dth, idx); + tmp = dri->dt_timer.target; + if (tmp > limit) { + // skip subtree since none of the targets below can be before limit + idx = _dispatch_timer_heap_walk_skip(idx, count); + } else { + target = tmp; + idx = _dispatch_timer_heap_walk_next(idx, count); } - poll |= _dispatch_timers_program2(nows, &_dispatch_kevent_timeout[tidx], - tidx); } - return poll; + return target; } +#endif // DISPATCH_HAVE_TIMER_COALESCING DISPATCH_NOINLINE -static bool -_dispatch_timers_configure(void) -{ - _dispatch_timer_aggregates_check(); - // Find out if there is a new target/deadline on the timer lists - return _dispatch_timers_check(_dispatch_kevent_timer, _dispatch_timer); -} - -#if HAVE_MACH static void -_dispatch_timers_calendar_change(void) -{ - unsigned int qos; - - // calendar change may have gone past the wallclock deadline - _dispatch_timer_expired = true; - for (qos = 0; qos < DISPATCH_TIMER_QOS_COUNT; qos++) { - _dispatch_timers_mask |= - 1 << DISPATCH_TIMER_INDEX(DISPATCH_TIMER_KIND_WALL, qos); +_dispatch_timer_heap_resift(dispatch_timer_heap_t dth, + dispatch_timer_source_refs_t dt, uint32_t idx) +{ + dispatch_static_assert(offsetof(struct dispatch_timer_source_s, target) == + offsetof(struct dispatch_timer_source_s, heap_key[DTH_TARGET_ID])); + dispatch_static_assert(offsetof(struct dispatch_timer_source_s, deadline) == + offsetof(struct dispatch_timer_source_s, heap_key[DTH_DEADLINE_ID])); +#define dth_cmp(hid, dt1, op, dt2) \ + (((dt1)->dt_timer.heap_key)[hid] op ((dt2)->dt_timer.heap_key)[hid]) + + dispatch_timer_source_refs_t *pslot, pdt; + dispatch_timer_source_refs_t *cslot, cdt; + dispatch_timer_source_refs_t *rslot, rdt; + uint32_t cidx, dth_count = dth->dth_count; + dispatch_timer_source_refs_t *slot; + int heap_id = DTH_HEAP_ID(idx); + bool sifted_up = false; + + // try to sift up + + slot = _dispatch_timer_heap_get_slot(dth, idx); + while (idx >= DTH_ID_COUNT) { + uint32_t pidx = _dispatch_timer_heap_parent(idx); + pslot = _dispatch_timer_heap_get_slot(dth, pidx); + pdt = *pslot; + if (dth_cmp(heap_id, pdt, <=, dt)) { + break; + } + _dispatch_timer_heap_set(slot, pdt, idx); + slot = pslot; + idx = pidx; + sifted_up = true; + } + if (sifted_up) { + goto done; } -} -#endif -static void -_dispatch_timers_kevent(_dispatch_kevent_qos_s *ke) -{ - dispatch_assert(ke->data > 0); - dispatch_assert((ke->ident & DISPATCH_KEVENT_TIMEOUT_IDENT_MASK) == - DISPATCH_KEVENT_TIMEOUT_IDENT_MASK); - unsigned int tidx = ke->ident & ~DISPATCH_KEVENT_TIMEOUT_IDENT_MASK; - dispatch_assert(tidx < DISPATCH_KEVENT_TIMEOUT_COUNT); - dispatch_assert(_dispatch_kevent_timeout[tidx].data != 0); - _dispatch_kevent_timeout[tidx].data = 0; // kevent deleted via EV_ONESHOT - _dispatch_timer_expired = true; - _dispatch_timers_mask |= 1 << tidx; - _dispatch_trace_next_timer_wake(DISPATCH_TIMER_QOS(tidx)); -} + // try to sift down -static inline bool -_dispatch_mgr_timers(void) -{ - uint64_t nows[DISPATCH_TIMER_KIND_COUNT] = {}; - bool expired = slowpath(_dispatch_timer_expired); - if (expired) { - _dispatch_timers_run(nows); - } - bool reconfigure = slowpath(_dispatch_timers_reconfigure); - if (reconfigure || expired) { - if (reconfigure) { - reconfigure = _dispatch_timers_configure(); - _dispatch_timers_reconfigure = false; + while ((cidx = _dispatch_timer_heap_left_child(idx)) < dth_count) { + uint32_t ridx = cidx + DTH_ID_COUNT; + cslot = _dispatch_timer_heap_get_slot(dth, cidx); + cdt = *cslot; + if (ridx < dth_count) { + rslot = _dispatch_timer_heap_get_slot(dth, ridx); + rdt = *rslot; + if (dth_cmp(heap_id, cdt, >, rdt)) { + cidx = ridx; + cdt = rdt; + cslot = rslot; + } } - if (reconfigure || expired) { - expired = _dispatch_timer_expired = _dispatch_timers_program(nows); - expired = expired || _dispatch_mgr_q.dq_items_tail; + if (dth_cmp(heap_id, dt, <=, cdt)) { + break; } - _dispatch_timers_mask = 0; + _dispatch_timer_heap_set(slot, cdt, idx); + slot = cslot; + idx = cidx; } - return expired; -} -#pragma mark - -#pragma mark dispatch_timer_aggregate - -typedef struct { - TAILQ_HEAD(, dispatch_timer_source_aggregate_refs_s) dk_sources; -} dispatch_timer_aggregate_refs_s; - -typedef struct dispatch_timer_aggregate_s { - DISPATCH_QUEUE_HEADER(queue); - TAILQ_ENTRY(dispatch_timer_aggregate_s) dta_list; - dispatch_timer_aggregate_refs_s - dta_kevent_timer[DISPATCH_KEVENT_TIMER_COUNT]; - struct { - DISPATCH_TIMER_STRUCT(dispatch_timer_source_aggregate_refs_s); - } dta_timer[DISPATCH_TIMER_COUNT]; - struct dispatch_timer_s dta_timer_data[DISPATCH_TIMER_COUNT]; - unsigned int dta_refcount; -} DISPATCH_QUEUE_ALIGN dispatch_timer_aggregate_s; - -typedef TAILQ_HEAD(, dispatch_timer_aggregate_s) dispatch_timer_aggregates_s; -static dispatch_timer_aggregates_s _dispatch_timer_aggregates = - TAILQ_HEAD_INITIALIZER(_dispatch_timer_aggregates); - -dispatch_timer_aggregate_t -dispatch_timer_aggregate_create(void) -{ - unsigned int tidx; - dispatch_timer_aggregate_t dta = _dispatch_alloc(DISPATCH_VTABLE(queue), - sizeof(struct dispatch_timer_aggregate_s)); - _dispatch_queue_init(dta->_as_dq, DQF_NONE, - DISPATCH_QUEUE_WIDTH_MAX, false); - dta->do_targetq = _dispatch_get_root_queue( - _DISPATCH_QOS_CLASS_USER_INITIATED, true); - //FIXME: aggregates need custom vtable - //dta->dq_label = "timer-aggregate"; - for (tidx = 0; tidx < DISPATCH_KEVENT_TIMER_COUNT; tidx++) { - TAILQ_INIT(&dta->dta_kevent_timer[tidx].dk_sources); - } - for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) { - TAILQ_INIT(&dta->dta_timer[tidx].dt_sources); - dta->dta_timer[tidx].target = UINT64_MAX; - dta->dta_timer[tidx].deadline = UINT64_MAX; - dta->dta_timer_data[tidx].target = UINT64_MAX; - dta->dta_timer_data[tidx].deadline = UINT64_MAX; - } - return (dispatch_timer_aggregate_t)_dispatch_introspection_queue_create( - dta->_as_dq); +done: + _dispatch_timer_heap_set(slot, dt, idx); +#undef dth_cmp } -typedef struct dispatch_timer_delay_s { - dispatch_timer_t timer; - uint64_t delay, leeway; -} *dispatch_timer_delay_t; - +DISPATCH_ALWAYS_INLINE static void -_dispatch_timer_aggregate_get_delay(void *ctxt) +_dispatch_timer_heap_insert(dispatch_timer_heap_t dth, + dispatch_timer_source_refs_t dt) { - dispatch_timer_delay_t dtd = ctxt; - struct { uint64_t nows[DISPATCH_TIMER_KIND_COUNT]; } dtn = {}; - _dispatch_timers_get_delay(dtn.nows, dtd->timer, &dtd->delay, &dtd->leeway, - -1, -1); -} + uint32_t idx = (dth->dth_count += DTH_ID_COUNT) - DTH_ID_COUNT; -uint64_t -dispatch_timer_aggregate_get_delay(dispatch_timer_aggregate_t dta, - uint64_t *leeway_ptr) -{ - struct dispatch_timer_delay_s dtd = { - .timer = dta->dta_timer_data, - }; - dispatch_sync_f(dta->_as_dq, &dtd, _dispatch_timer_aggregate_get_delay); - if (leeway_ptr) { - *leeway_ptr = dtd.leeway; + DISPATCH_TIMER_ASSERT(dt->dt_heap_entry[DTH_TARGET_ID], ==, + DTH_INVALID_ID, "target idx"); + DISPATCH_TIMER_ASSERT(dt->dt_heap_entry[DTH_DEADLINE_ID], ==, + DTH_INVALID_ID, "deadline idx"); + + if (idx == 0) { + dt->dt_heap_entry[DTH_TARGET_ID] = DTH_TARGET_ID; + dt->dt_heap_entry[DTH_DEADLINE_ID] = DTH_DEADLINE_ID; + dth->dth_min[DTH_TARGET_ID] = dth->dth_min[DTH_DEADLINE_ID] = dt; + return; } - return dtd.delay; -} -static void -_dispatch_timer_aggregate_update(void *ctxt) -{ - dispatch_timer_aggregate_t dta = (void*)_dispatch_queue_get_current(); - dispatch_timer_t dtau = ctxt; - unsigned int tidx; - for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) { - dta->dta_timer_data[tidx].target = dtau[tidx].target; - dta->dta_timer_data[tidx].deadline = dtau[tidx].deadline; + if (unlikely(idx + DTH_ID_COUNT > + _dispatch_timer_heap_capacity(dth->dth_segments))) { + _dispatch_timer_heap_grow(dth); } - free(dtau); + _dispatch_timer_heap_resift(dth, dt, idx + DTH_TARGET_ID); + _dispatch_timer_heap_resift(dth, dt, idx + DTH_DEADLINE_ID); } DISPATCH_NOINLINE static void -_dispatch_timer_aggregates_configure(void) +_dispatch_timer_heap_remove(dispatch_timer_heap_t dth, + dispatch_timer_source_refs_t dt) { - dispatch_timer_aggregate_t dta; - dispatch_timer_t dtau; - TAILQ_FOREACH(dta, &_dispatch_timer_aggregates, dta_list) { - if (!_dispatch_timers_check(dta->dta_kevent_timer, dta->dta_timer)) { - continue; + uint32_t idx = (dth->dth_count -= DTH_ID_COUNT); + + DISPATCH_TIMER_ASSERT(dt->dt_heap_entry[DTH_TARGET_ID], !=, + DTH_INVALID_ID, "target idx"); + DISPATCH_TIMER_ASSERT(dt->dt_heap_entry[DTH_DEADLINE_ID], !=, + DTH_INVALID_ID, "deadline idx"); + + if (idx == 0) { + DISPATCH_TIMER_ASSERT(dth->dth_min[DTH_TARGET_ID], ==, dt, + "target slot"); + DISPATCH_TIMER_ASSERT(dth->dth_min[DTH_DEADLINE_ID], ==, dt, + "deadline slot"); + dth->dth_min[DTH_TARGET_ID] = dth->dth_min[DTH_DEADLINE_ID] = NULL; + goto clear_heap_entry; + } + + for (uint32_t heap_id = 0; heap_id < DTH_ID_COUNT; heap_id++) { + dispatch_timer_source_refs_t *slot, last_dt; + slot = _dispatch_timer_heap_get_slot(dth, idx + heap_id); + last_dt = *slot; *slot = NULL; + if (last_dt != dt) { + uint32_t removed_idx = dt->dt_heap_entry[heap_id]; + _dispatch_timer_heap_resift(dth, last_dt, removed_idx); } - dtau = _dispatch_calloc(DISPATCH_TIMER_COUNT, sizeof(*dtau)); - memcpy(dtau, dta->dta_timer, sizeof(dta->dta_timer)); - _dispatch_barrier_async_detached_f(dta->_as_dq, dtau, - _dispatch_timer_aggregate_update); } + if (unlikely(idx <= _dispatch_timer_heap_capacity(dth->dth_segments - 1))) { + _dispatch_timer_heap_shrink(dth); + } + +clear_heap_entry: + dt->dt_heap_entry[DTH_TARGET_ID] = DTH_INVALID_ID; + dt->dt_heap_entry[DTH_DEADLINE_ID] = DTH_INVALID_ID; } +DISPATCH_ALWAYS_INLINE static inline void -_dispatch_timer_aggregates_check(void) +_dispatch_timer_heap_update(dispatch_timer_heap_t dth, + dispatch_timer_source_refs_t dt) { - if (fastpath(TAILQ_EMPTY(&_dispatch_timer_aggregates))) { - return; - } - _dispatch_timer_aggregates_configure(); + DISPATCH_TIMER_ASSERT(dt->dt_heap_entry[DTH_TARGET_ID], !=, + DTH_INVALID_ID, "target idx"); + DISPATCH_TIMER_ASSERT(dt->dt_heap_entry[DTH_DEADLINE_ID], !=, + DTH_INVALID_ID, "deadline idx"); + + + _dispatch_timer_heap_resift(dth, dt, dt->dt_heap_entry[DTH_TARGET_ID]); + _dispatch_timer_heap_resift(dth, dt, dt->dt_heap_entry[DTH_DEADLINE_ID]); } -static void -_dispatch_timer_aggregates_register(dispatch_source_t ds) +DISPATCH_ALWAYS_INLINE +static bool +_dispatch_timer_heap_has_new_min(dispatch_timer_heap_t dth, + uint32_t count, uint32_t mask) { - dispatch_timer_aggregate_t dta = ds_timer_aggregate(ds); - if (!dta->dta_refcount++) { - TAILQ_INSERT_TAIL(&_dispatch_timer_aggregates, dta, dta_list); + dispatch_timer_source_refs_t dt; + bool changed = false; + uint64_t tmp; + uint32_t tidx; + + for (tidx = 0; tidx < count; tidx++) { + if (!(mask & (1u << tidx))) { + continue; + } + + dt = dth[tidx].dth_min[DTH_TARGET_ID]; + tmp = dt ? dt->dt_timer.target : UINT64_MAX; + if (dth[tidx].dth_target != tmp) { + dth[tidx].dth_target = tmp; + changed = true; + } + dt = dth[tidx].dth_min[DTH_DEADLINE_ID]; + tmp = dt ? dt->dt_timer.deadline : UINT64_MAX; + if (dth[tidx].dth_deadline != tmp) { + dth[tidx].dth_deadline = tmp; + changed = true; + } } + return changed; } -DISPATCH_NOINLINE -static void -_dispatch_timer_aggregates_update(dispatch_source_t ds, unsigned int tidx) +static inline void +_dispatch_timers_unregister(dispatch_timer_source_refs_t dt) { - dispatch_timer_aggregate_t dta = ds_timer_aggregate(ds); - dispatch_timer_source_aggregate_refs_t dr; - dr = (dispatch_timer_source_aggregate_refs_t)ds->ds_refs; - _dispatch_timers_insert(tidx, dta->dta_kevent_timer, dr, dra_list, - dta->dta_timer, dr, dta_list); + uint32_t tidx = dt->du_ident; + dispatch_timer_heap_t heap = &_dispatch_timers_heap[tidx]; + + _dispatch_timer_heap_remove(heap, dt); + _dispatch_timers_reconfigure = true; + _dispatch_timers_processing_mask |= 1 << tidx; + dispatch_assert(dt->du_wlh == NULL || dt->du_wlh == DISPATCH_WLH_ANON); + dt->du_wlh = NULL; } -DISPATCH_NOINLINE -static void -_dispatch_timer_aggregates_unregister(dispatch_source_t ds, unsigned int tidx) +static inline void +_dispatch_timers_register(dispatch_timer_source_refs_t dt, uint32_t tidx) { - dispatch_timer_aggregate_t dta = ds_timer_aggregate(ds); - dispatch_timer_source_aggregate_refs_t dr; - dr = (dispatch_timer_source_aggregate_refs_t)ds->ds_refs; - _dispatch_timers_remove(tidx, (dispatch_timer_aggregate_refs_s*)NULL, - dta->dta_kevent_timer, dr, dra_list, dta->dta_timer, dr, dta_list); - if (!--dta->dta_refcount) { - TAILQ_REMOVE(&_dispatch_timer_aggregates, dta, dta_list); + dispatch_timer_heap_t heap = &_dispatch_timers_heap[tidx]; + if (_dispatch_unote_registered(dt)) { + DISPATCH_TIMER_ASSERT(dt->du_ident, ==, tidx, "tidx"); + _dispatch_timer_heap_update(heap, dt); + } else { + dt->du_ident = tidx; + _dispatch_timer_heap_insert(heap, dt); } + _dispatch_timers_reconfigure = true; + _dispatch_timers_processing_mask |= 1 << tidx; + dispatch_assert(dt->du_wlh == NULL || dt->du_wlh == DISPATCH_WLH_ANON); + dt->du_wlh = DISPATCH_WLH_ANON; } -#pragma mark - -#pragma mark dispatch_kqueue - -static int _dispatch_kq; - -#if DISPATCH_DEBUG_QOS && DISPATCH_USE_KEVENT_WORKQUEUE -#define _dispatch_kevent_assert_valid_qos(ke) ({ \ - if (_dispatch_kevent_workqueue_enabled) { \ - const _dispatch_kevent_qos_s *_ke = (ke); \ - if (_ke->flags & (EV_ADD|EV_ENABLE)) { \ - _dispatch_assert_is_valid_qos_class(\ - (pthread_priority_t)_ke->qos); \ - dispatch_assert(_ke->qos); \ - } \ - } \ - }) -#else -#define _dispatch_kevent_assert_valid_qos(ke) ((void)ke) -#endif - +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_source_timer_tryarm(dispatch_source_t ds) +{ + dispatch_queue_flags_t oqf, nqf; + return os_atomic_rmw_loop2o(ds, dq_atomic_flags, oqf, nqf, relaxed, { + if (oqf & (DSF_CANCELED | DQF_RELEASED)) { + // do not install a cancelled timer + os_atomic_rmw_loop_give_up(break); + } + nqf = oqf | DSF_ARMED; + }); +} +// Updates the ordered list of timers based on next fire date for changes to ds. +// Should only be called from the context of _dispatch_mgr_q. static void -_dispatch_kq_init(void *context DISPATCH_UNUSED) +_dispatch_timers_update(dispatch_unote_t du, uint32_t flags) { - _dispatch_fork_becomes_unsafe(); -#if DISPATCH_USE_KEVENT_WORKQUEUE - _dispatch_kevent_workqueue_init(); - if (_dispatch_kevent_workqueue_enabled) { - int r; - const _dispatch_kevent_qos_s kev[] = { - [0] = { - .ident = 1, - .filter = EVFILT_USER, - .flags = EV_ADD|EV_CLEAR, - .qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG, - }, - [1] = { - .ident = 1, - .filter = EVFILT_USER, - .fflags = NOTE_TRIGGER, - }, - }; - _dispatch_kq = -1; -retry: - r = kevent_qos(-1, kev, 2, NULL, 0, NULL, NULL, - KEVENT_FLAG_WORKQ|KEVENT_FLAG_IMMEDIATE); - if (slowpath(r == -1)) { - int err = errno; - switch (err) { - case EINTR: - goto retry; - default: - DISPATCH_CLIENT_CRASH(err, - "Failed to initalize workqueue kevent"); - break; - } - } + dispatch_timer_source_refs_t dr = du._dt; + dispatch_source_t ds = _dispatch_source_from_refs(dr); + const char *verb = "updated"; + bool will_register, disarm = false; + + DISPATCH_ASSERT_ON_MANAGER_QUEUE(); + + if (unlikely(dr->du_ident == DISPATCH_TIMER_IDENT_CANCELED)) { + dispatch_assert((flags & DISPATCH_TIMERS_RETAIN_2) == 0); return; } -#endif // DISPATCH_USE_KEVENT_WORKQUEUE -#if DISPATCH_USE_MGR_THREAD - static const _dispatch_kevent_qos_s kev = { - .ident = 1, - .filter = EVFILT_USER, - .flags = EV_ADD|EV_CLEAR, - }; - _dispatch_fork_becomes_unsafe(); -#if DISPATCH_USE_GUARDED_FD - guardid_t guard = (uintptr_t)&kev; - _dispatch_kq = guarded_kqueue_np(&guard, GUARD_CLOSE | GUARD_DUP); -#else - _dispatch_kq = kqueue(); -#endif - if (_dispatch_kq == -1) { - int err = errno; - switch (err) { - case EMFILE: - DISPATCH_CLIENT_CRASH(err, "kqueue() failure: " - "process is out of file descriptors"); - break; - case ENFILE: - DISPATCH_CLIENT_CRASH(err, "kqueue() failure: " - "system is out of file descriptors"); - break; - case ENOMEM: - DISPATCH_CLIENT_CRASH(err, "kqueue() failure: " - "kernel is out of memory"); - break; - default: - DISPATCH_INTERNAL_CRASH(err, "kqueue() failure"); - break; + // Unregister timers that are unconfigured, disabled, suspended or have + // missed intervals. Rearm after dispatch_set_timer(), resume or source + // invoke will reenable them + will_register = !(flags & DISPATCH_TIMERS_UNREGISTER) && + dr->dt_timer.target < INT64_MAX && + !os_atomic_load2o(ds, ds_pending_data, relaxed) && + !DISPATCH_QUEUE_IS_SUSPENDED(ds) && + !os_atomic_load2o(dr, dt_pending_config, relaxed); + if (likely(!_dispatch_unote_registered(dr))) { + dispatch_assert((flags & DISPATCH_TIMERS_RETAIN_2) == 0); + if (unlikely(!will_register || !_dispatch_source_timer_tryarm(ds))) { + return; } + verb = "armed"; + } else if (unlikely(!will_register)) { + disarm = true; + verb = "disarmed"; } - (void)dispatch_assume_zero(kevent_qos(_dispatch_kq, &kev, 1, NULL, 0, NULL, - NULL, 0)); - _dispatch_queue_push(_dispatch_mgr_q.do_targetq, &_dispatch_mgr_q, 0); -#endif // DISPATCH_USE_MGR_THREAD -} -DISPATCH_NOINLINE -static long -_dispatch_kq_update(const _dispatch_kevent_qos_s *ke, int n) -{ - int i, r; - _dispatch_kevent_qos_s kev_error[n]; - static dispatch_once_t pred; - dispatch_once_f(&pred, NULL, _dispatch_kq_init); - - for (i = 0; i < n; i++) { - if (ke[i].filter != EVFILT_USER || DISPATCH_MGR_QUEUE_DEBUG) { - _dispatch_kevent_debug_n("updating", ke + i, i, n); - } + // The heap owns a +2 on dispatch sources it references + // + // _dispatch_timers_run2() also sometimes passes DISPATCH_TIMERS_RETAIN_2 + // when it wants to take over this +2 at the same time we are unregistering + // the timer from the heap. + // + // Compute our refcount balance according to these rules, if our balance + // would become negative we retain the source upfront, if it is positive, we + // get rid of the extraneous refcounts after we're done touching the source. + int refs = will_register ? -2 : 0; + if (_dispatch_unote_registered(dr) && !(flags & DISPATCH_TIMERS_RETAIN_2)) { + refs += 2; + } + if (refs < 0) { + dispatch_assert(refs == -2); + _dispatch_retain_2(ds); } - unsigned int flags = KEVENT_FLAG_ERROR_EVENTS; -#if DISPATCH_USE_KEVENT_WORKQUEUE - if (_dispatch_kevent_workqueue_enabled) { - flags |= KEVENT_FLAG_WORKQ; + uint32_t tidx = _dispatch_source_timer_idx(dr); + if (unlikely(_dispatch_unote_registered(dr) && + (!will_register || dr->du_ident != tidx))) { + _dispatch_timers_unregister(dr); + } + if (likely(will_register)) { + _dispatch_timers_register(dr, tidx); } -#endif -retry: - r = kevent_qos(_dispatch_kq, ke, n, kev_error, n, NULL, NULL, flags); - if (slowpath(r == -1)) { - int err = errno; - switch (err) { - case EINTR: - goto retry; - case EBADF: - DISPATCH_CLIENT_CRASH(err, "Do not close random Unix descriptors"); - break; - default: - (void)dispatch_assume_zero(err); - break; - } - return err; + if (disarm) { + _dispatch_queue_atomic_flags_clear(ds->_as_dq, DSF_ARMED); } - for (i = 0, n = r; i < n; i++) { - if (kev_error[i].flags & EV_ERROR) { - _dispatch_kevent_debug("returned error", &kev_error[i]); - _dispatch_kevent_drain(&kev_error[i]); - r = (int)kev_error[i].data; - } else { - _dispatch_kevent_mgr_debug(&kev_error[i]); - r = 0; - } + _dispatch_debug("kevent-source[%p]: %s timer[%p]", ds, verb, dr); + _dispatch_object_debug(ds, "%s", __func__); + if (refs > 0) { + dispatch_assert(refs == 2); + _dispatch_release_2_tailcall(ds); } - return r; } +#define DISPATCH_TIMER_MISSED_MARKER 1ul + DISPATCH_ALWAYS_INLINE -static void -_dispatch_kq_update_all(const _dispatch_kevent_qos_s *kev, int n) +static inline unsigned long +_dispatch_source_timer_compute_missed(dispatch_timer_source_refs_t dt, + uint64_t now, unsigned long prev) { - (void)_dispatch_kq_update(kev, n); + uint64_t missed = (now - dt->dt_timer.target) / dt->dt_timer.interval; + if (++missed + prev > LONG_MAX) { + missed = LONG_MAX - prev; + } + if (dt->dt_timer.interval < INT64_MAX) { + uint64_t push_by = missed * dt->dt_timer.interval; + dt->dt_timer.target += push_by; + dt->dt_timer.deadline += push_by; + } else { + dt->dt_timer.target = UINT64_MAX; + dt->dt_timer.deadline = UINT64_MAX; + } + prev += missed; + return prev; } DISPATCH_ALWAYS_INLINE -static long -_dispatch_kq_update_one(const _dispatch_kevent_qos_s *kev) +static inline unsigned long +_dispatch_source_timer_data(dispatch_source_t ds, dispatch_unote_t du) { - return _dispatch_kq_update(kev, 1); -} + dispatch_timer_source_refs_t dr = du._dt; + unsigned long data, prev, clear_prev = 0; -static inline bool -_dispatch_kevent_maps_to_same_knote(const _dispatch_kevent_qos_s *e1, - const _dispatch_kevent_qos_s *e2) -{ - return e1->filter == e2->filter && - e1->ident == e2->ident && - e1->udata == e2->udata; + os_atomic_rmw_loop2o(ds, ds_pending_data, prev, clear_prev, relaxed, { + data = prev >> 1; + if (unlikely(prev & DISPATCH_TIMER_MISSED_MARKER)) { + os_atomic_rmw_loop_give_up(goto handle_missed_intervals); + } + }); + return data; + +handle_missed_intervals: + // The timer may be in _dispatch_source_invoke2() already for other + // reasons such as running the registration handler when ds_pending_data + // is changed by _dispatch_timers_run2() without holding the drain lock. + // + // We hence need dependency ordering to pair with the release barrier + // done by _dispatch_timers_run2() when setting the MISSED_MARKER bit. + os_atomic_thread_fence(dependency); + dr = os_atomic_force_dependency_on(dr, data); + + uint64_t now = _dispatch_time_now(DISPATCH_TIMER_CLOCK(dr->du_ident)); + if (now >= dr->dt_timer.target) { + OS_COMPILER_CAN_ASSUME(dr->dt_timer.interval < INT64_MAX); + data = _dispatch_source_timer_compute_missed(dr, now, data); + } + + // When we see the MISSED_MARKER the manager has given up on this timer + // and expects the handler to call "resume". + // + // However, it may not have reflected this into the atomic flags yet + // so make sure _dispatch_source_invoke2() sees the timer is disarmed + // + // The subsequent _dispatch_source_refs_resume() will enqueue the source + // on the manager and make the changes to `ds_timer` above visible. + _dispatch_queue_atomic_flags_clear(ds->_as_dq, DSF_ARMED); + os_atomic_store2o(ds, ds_pending_data, 0, relaxed); + return data; } -static inline int -_dispatch_deferred_event_find_slot(dispatch_deferred_items_t ddi, - const _dispatch_kevent_qos_s *ke) +static inline void +_dispatch_timers_run2(dispatch_clock_now_cache_t nows, uint32_t tidx) { - _dispatch_kevent_qos_s *events = ddi->ddi_eventlist; - int i; - - for (i = 0; i < ddi->ddi_nevents; i++) { - if (_dispatch_kevent_maps_to_same_knote(&events[i], ke)) { + dispatch_timer_source_refs_t dr; + dispatch_source_t ds; + uint64_t data, pending_data; + uint64_t now = _dispatch_time_now_cached(DISPATCH_TIMER_CLOCK(tidx), nows); + + while ((dr = _dispatch_timers_heap[tidx].dth_min[DTH_TARGET_ID])) { + DISPATCH_TIMER_ASSERT(dr->du_filter, ==, DISPATCH_EVFILT_TIMER, + "invalid filter"); + DISPATCH_TIMER_ASSERT(dr->du_ident, ==, tidx, "tidx"); + DISPATCH_TIMER_ASSERT(dr->dt_timer.target, !=, 0, "missing target"); + ds = _dispatch_source_from_refs(dr); + if (dr->dt_timer.target > now) { + // Done running timers for now. break; } - } - return i; -} - -static void -_dispatch_kq_deferred_update(const _dispatch_kevent_qos_s *ke) -{ - dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); - int slot; - - _dispatch_kevent_assert_valid_qos(ke); - if (ddi) { - if (unlikely(ddi->ddi_nevents == ddi->ddi_maxevents)) { - _dispatch_deferred_items_set(NULL); - _dispatch_kq_update_all(ddi->ddi_eventlist, ddi->ddi_nevents); - ddi->ddi_nevents = 0; - _dispatch_deferred_items_set(ddi); - } - if (ke->filter != EVFILT_USER || DISPATCH_MGR_QUEUE_DEBUG) { - _dispatch_kevent_debug("deferred", ke); + if (dr->du_fflags & DISPATCH_TIMER_AFTER) { + _dispatch_trace_timer_fire(dr, 1, 1); + _dispatch_source_merge_evt(dr, EV_ONESHOT, 1, 0, 0); + _dispatch_debug("kevent-source[%p]: fired after timer[%p]", ds, dr); + _dispatch_object_debug(ds, "%s", __func__); + continue; } - bool needs_enable = false; - slot = _dispatch_deferred_event_find_slot(ddi, ke); - if (slot == ddi->ddi_nevents) { - ddi->ddi_nevents++; - } else if (ke->flags & EV_DELETE) { - // when deleting and an enable is pending, - // we must merge EV_ENABLE to do an immediate deletion - needs_enable = (ddi->ddi_eventlist[slot].flags & EV_ENABLE); + + data = os_atomic_load2o(ds, ds_pending_data, relaxed); + if (unlikely(data)) { + // the release barrier is required to make the changes + // to `ds_timer` visible to _dispatch_source_timer_data() + if (os_atomic_cmpxchg2o(ds, ds_pending_data, data, + data | DISPATCH_TIMER_MISSED_MARKER, release)) { + _dispatch_timers_update(dr, DISPATCH_TIMERS_UNREGISTER); + continue; + } } - ddi->ddi_eventlist[slot] = *ke; - if (needs_enable) { - ddi->ddi_eventlist[slot].flags |= EV_ENABLE; + + data = _dispatch_source_timer_compute_missed(dr, now, 0); + _dispatch_timers_update(dr, DISPATCH_TIMERS_RETAIN_2); + pending_data = data << 1; + if (!_dispatch_unote_registered(dr) && dr->dt_timer.target < INT64_MAX){ + // if we unregistered because of suspension we have to fake we + // missed events. + pending_data |= DISPATCH_TIMER_MISSED_MARKER; + os_atomic_store2o(ds, ds_pending_data, pending_data, release); + } else { + os_atomic_store2o(ds, ds_pending_data, pending_data, relaxed); } - } else { - _dispatch_kq_update_one(ke); + _dispatch_trace_timer_fire(dr, data, data); + _dispatch_debug("kevent-source[%p]: fired timer[%p]", ds, dr); + _dispatch_object_debug(ds, "%s", __func__); + dx_wakeup(ds, 0, DISPATCH_WAKEUP_MAKE_DIRTY | DISPATCH_WAKEUP_CONSUME_2); } } -static long -_dispatch_kq_immediate_update(_dispatch_kevent_qos_s *ke) +DISPATCH_NOINLINE +static void +_dispatch_timers_run(dispatch_clock_now_cache_t nows) { - dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); - int slot, last; - - _dispatch_kevent_assert_valid_qos(ke); - if (ddi) { - _dispatch_kevent_qos_s *events = ddi->ddi_eventlist; - slot = _dispatch_deferred_event_find_slot(ddi, ke); - if (slot < ddi->ddi_nevents) { - // when deleting and an enable is pending, - // we must merge EV_ENABLE to do an immediate deletion - if ((ke->flags & EV_DELETE) && (events[slot].flags & EV_ENABLE)) { - ke->flags |= EV_ENABLE; - } - last = --ddi->ddi_nevents; - if (slot != last) { - events[slot] = events[last]; - } + uint32_t tidx; + for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) { + if (_dispatch_timers_heap[tidx].dth_count) { + _dispatch_timers_run2(nows, tidx); } } - return _dispatch_kq_update_one(ke); } -#pragma mark - -#pragma mark dispatch_mgr +#if DISPATCH_HAVE_TIMER_COALESCING +#define DISPATCH_KEVENT_COALESCING_WINDOW_INIT(qos, ms) \ + [DISPATCH_TIMER_QOS_##qos] = 2ull * (ms) * NSEC_PER_MSEC -DISPATCH_NOINLINE -static void -_dispatch_mgr_queue_poke(dispatch_queue_t dq DISPATCH_UNUSED, - pthread_priority_t pp DISPATCH_UNUSED) +static const uint64_t _dispatch_kevent_coalescing_window[] = { + DISPATCH_KEVENT_COALESCING_WINDOW_INIT(NORMAL, 75), +#if DISPATCH_HAVE_TIMER_QOS + DISPATCH_KEVENT_COALESCING_WINDOW_INIT(CRITICAL, 1), + DISPATCH_KEVENT_COALESCING_WINDOW_INIT(BACKGROUND, 100), +#endif +}; +#endif // DISPATCH_HAVE_TIMER_COALESCING + +static inline dispatch_timer_delay_s +_dispatch_timers_get_delay(dispatch_timer_heap_t dth, dispatch_clock_t clock, + uint32_t qos, dispatch_clock_now_cache_t nows) { - static const _dispatch_kevent_qos_s kev = { - .ident = 1, - .filter = EVFILT_USER, - .fflags = NOTE_TRIGGER, - }; + uint64_t target = dth->dth_target, deadline = dth->dth_deadline; + uint64_t delta = INT64_MAX, dldelta = INT64_MAX; + dispatch_timer_delay_s rc; -#if DISPATCH_DEBUG && DISPATCH_MGR_QUEUE_DEBUG - _dispatch_debug("waking up the dispatch manager queue: %p", dq); + dispatch_assert(target <= deadline); + if (delta == 0 || target >= INT64_MAX) { + goto done; + } + + if (qos < DISPATCH_TIMER_QOS_COUNT && dth->dth_count > 2) { +#if DISPATCH_HAVE_TIMER_COALESCING + // Timer pre-coalescing + // When we have several timers with this target/deadline bracket: + // + // Target window Deadline + // V <-------V + // t1: [...........|.................] + // t2: [......|.......] + // t3: [..|..........] + // t4: | [.............] + // ^ + // Optimal Target + // + // Coalescing works better if the Target is delayed to "Optimal", by + // picking the latest target that isn't too close to the deadline. + uint64_t window = _dispatch_kevent_coalescing_window[qos]; + if (target + window < deadline) { + uint64_t latest = deadline - window; + target = _dispatch_timer_heap_max_target_before(dth, latest); + } #endif - _dispatch_kq_deferred_update(&kev); -} + } -void -_dispatch_mgr_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, - dispatch_wakeup_flags_t flags) -{ - if (flags & DISPATCH_WAKEUP_FLUSH) { - os_atomic_or2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, release); + uint64_t now = _dispatch_time_now_cached(clock, nows); + if (target <= now) { + delta = 0; + dldelta = 0; + goto done; } - if (_dispatch_queue_get_current() == &_dispatch_mgr_q) { - return; + uint64_t tmp = target - now; + if (clock != DISPATCH_CLOCK_WALL) { + tmp = _dispatch_time_mach2nano(tmp); + } + if (tmp < delta) { + delta = tmp; } - if (!_dispatch_queue_class_probe(&_dispatch_mgr_q)) { - return; + tmp = deadline - now; + if (clock != DISPATCH_CLOCK_WALL) { + tmp = _dispatch_time_mach2nano(tmp); + } + if (tmp < dldelta) { + dldelta = tmp; } - _dispatch_mgr_queue_poke(dq, pp); +done: + rc.delay = delta; + rc.leeway = delta < INT64_MAX ? dldelta - delta : INT64_MAX; + return rc; } -DISPATCH_NOINLINE -static void -_dispatch_event_init(void) +static bool +_dispatch_timers_program2(dispatch_clock_now_cache_t nows, uint32_t tidx) { - _dispatch_kevent_init(); - _dispatch_timers_init(); -#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK - _dispatch_mach_recv_msg_buf_init(); -#endif - _dispatch_memorypressure_init(); - _voucher_activity_debug_channel_init(); + uint32_t qos = DISPATCH_TIMER_QOS(tidx); + dispatch_clock_t clock = DISPATCH_TIMER_CLOCK(tidx); + dispatch_timer_heap_t heap = &_dispatch_timers_heap[tidx]; + dispatch_timer_delay_s range; + + range = _dispatch_timers_get_delay(heap, clock, qos, nows); + if (range.delay == 0 || range.delay >= INT64_MAX) { + _dispatch_trace_next_timer_set(NULL, qos); + if (heap->dth_flags & DTH_ARMED) { + _dispatch_event_loop_timer_delete(tidx); + } + return range.delay == 0; + } + + _dispatch_trace_next_timer_set(heap->dth_min[DTH_TARGET_ID], qos); + _dispatch_trace_next_timer_program(range.delay, qos); + _dispatch_event_loop_timer_arm(tidx, range, nows); + return false; } -#if DISPATCH_USE_MGR_THREAD DISPATCH_NOINLINE -static void -_dispatch_mgr_init(void) +static bool +_dispatch_timers_program(dispatch_clock_now_cache_t nows) { - uint64_t owned = DISPATCH_QUEUE_SERIAL_DRAIN_OWNED; - _dispatch_queue_set_current(&_dispatch_mgr_q); - if (_dispatch_queue_drain_try_lock(&_dispatch_mgr_q, - DISPATCH_INVOKE_STEALING, NULL) != owned) { - DISPATCH_INTERNAL_CRASH(0, "Locking the manager should not fail"); + bool poll = false; + uint32_t tidx, timerm = _dispatch_timers_processing_mask; + + for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) { + if (timerm & (1 << tidx)) { + poll |= _dispatch_timers_program2(nows, tidx); + } } - _dispatch_mgr_priority_init(); - _dispatch_event_init(); + return poll; } DISPATCH_NOINLINE static bool -_dispatch_mgr_wait_for_event(dispatch_deferred_items_t ddi, bool poll) +_dispatch_timers_configure(void) { - int r; - dispatch_assert((size_t)ddi->ddi_maxevents < countof(ddi->ddi_eventlist)); - -retry: - r = kevent_qos(_dispatch_kq, ddi->ddi_eventlist, ddi->ddi_nevents, - ddi->ddi_eventlist + ddi->ddi_maxevents, 1, NULL, NULL, - poll ? KEVENT_FLAG_IMMEDIATE : KEVENT_FLAG_NONE); - if (slowpath(r == -1)) { - int err = errno; - switch (err) { - case EINTR: - goto retry; - case EBADF: - DISPATCH_CLIENT_CRASH(err, "Do not close random Unix descriptors"); - break; - default: - (void)dispatch_assume_zero(err); - break; + // Find out if there is a new target/deadline on the timer lists + return _dispatch_timer_heap_has_new_min(_dispatch_timers_heap, + countof(_dispatch_timers_heap), _dispatch_timers_processing_mask); +} + +static inline bool +_dispatch_mgr_timers(void) +{ + dispatch_clock_now_cache_s nows = { }; + bool expired = _dispatch_timers_expired; + if (unlikely(expired)) { + _dispatch_timers_run(&nows); + } + _dispatch_mgr_trace_timers_wakes(); + bool reconfigure = _dispatch_timers_reconfigure; + if (unlikely(reconfigure || expired)) { + if (reconfigure) { + reconfigure = _dispatch_timers_configure(); + _dispatch_timers_reconfigure = false; + } + if (reconfigure || expired) { + expired = _dispatch_timers_expired = _dispatch_timers_program(&nows); + } + _dispatch_timers_processing_mask = 0; + } + return expired; +} + +#pragma mark - +#pragma mark dispatch_mgr + +void +_dispatch_mgr_queue_push(dispatch_queue_t dq, dispatch_object_t dou, + DISPATCH_UNUSED dispatch_qos_t qos) +{ + uint64_t dq_state; + _dispatch_trace_continuation_push(dq, dou._do); + if (unlikely(_dispatch_queue_push_update_tail(dq, dou._do))) { + _dispatch_queue_push_update_head(dq, dou._do); + dq_state = os_atomic_or2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, release); + if (!_dq_state_drain_locked_by_self(dq_state)) { + _dispatch_event_loop_poke(DISPATCH_WLH_MANAGER, 0, 0); } } - ddi->ddi_nevents = 0; - return r > 0; } +DISPATCH_NORETURN +void +_dispatch_mgr_queue_wakeup(DISPATCH_UNUSED dispatch_queue_t dq, + DISPATCH_UNUSED dispatch_qos_t qos, + DISPATCH_UNUSED dispatch_wakeup_flags_t flags) +{ + DISPATCH_INTERNAL_CRASH(0, "Don't try to wake up or override the manager"); +} + +#if DISPATCH_USE_MGR_THREAD DISPATCH_NOINLINE DISPATCH_NORETURN static void _dispatch_mgr_invoke(void) { - dispatch_deferred_items_s ddi; +#if DISPATCH_EVENT_BACKEND_KEVENT + dispatch_kevent_s evbuf[DISPATCH_DEFERRED_ITEMS_EVENT_COUNT]; +#endif + dispatch_deferred_items_s ddi = { +#if DISPATCH_EVENT_BACKEND_KEVENT + .ddi_maxevents = DISPATCH_DEFERRED_ITEMS_EVENT_COUNT, + .ddi_eventlist = evbuf, +#endif + }; bool poll; - ddi.ddi_magic = DISPATCH_DEFERRED_ITEMS_MAGIC; - ddi.ddi_stashed_pp = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; - ddi.ddi_nevents = 0; - ddi.ddi_maxevents = 1; - _dispatch_deferred_items_set(&ddi); - for (;;) { _dispatch_mgr_queue_drain(); poll = _dispatch_mgr_timers(); poll = poll || _dispatch_queue_class_probe(&_dispatch_mgr_q); - if (_dispatch_mgr_wait_for_event(&ddi, poll)) { - _dispatch_kevent_qos_s *ke = ddi.ddi_eventlist + ddi.ddi_maxevents; - _dispatch_kevent_debug("received", ke); - _dispatch_kevent_drain(ke); - } + _dispatch_event_loop_drain(poll ? KEVENT_FLAG_IMMEDIATE : 0); } } #endif // DISPATCH_USE_MGR_THREAD @@ -3000,6 +2334,7 @@ _dispatch_mgr_invoke(void) DISPATCH_NORETURN void _dispatch_mgr_thread(dispatch_queue_t dq DISPATCH_UNUSED, + dispatch_invoke_context_t dic DISPATCH_UNUSED, dispatch_invoke_flags_t flags DISPATCH_UNUSED) { #if DISPATCH_USE_KEVENT_WORKQUEUE @@ -3009,7 +2344,9 @@ _dispatch_mgr_thread(dispatch_queue_t dq DISPATCH_UNUSED, } #endif #if DISPATCH_USE_MGR_THREAD - _dispatch_mgr_init(); + _dispatch_queue_set_current(&_dispatch_mgr_q); + _dispatch_mgr_priority_init(); + _dispatch_queue_mgr_lock(&_dispatch_mgr_q); // never returns, so burn bridges behind us & clear stack 2k ahead _dispatch_clear_stack(2048); _dispatch_mgr_invoke(); @@ -3018,18 +2355,19 @@ _dispatch_mgr_thread(dispatch_queue_t dq DISPATCH_UNUSED, #if DISPATCH_USE_KEVENT_WORKQUEUE -#define DISPATCH_KEVENT_WORKER_IS_NOT_MANAGER ((pthread_priority_t)(~0ul)) +#define DISPATCH_KEVENT_WORKER_IS_NOT_MANAGER ((dispatch_priority_t)~0u) + +_Static_assert(WORKQ_KEVENT_EVENT_BUFFER_LEN >= + DISPATCH_DEFERRED_ITEMS_EVENT_COUNT, + "our list should not be longer than the kernel's"); DISPATCH_ALWAYS_INLINE -static inline pthread_priority_t -_dispatch_kevent_worker_thread_init(dispatch_deferred_items_t ddi) +static inline dispatch_priority_t +_dispatch_wlh_worker_thread_init(dispatch_wlh_t wlh, + dispatch_deferred_items_t ddi) { - uint64_t owned = DISPATCH_QUEUE_SERIAL_DRAIN_OWNED; - - ddi->ddi_magic = DISPATCH_DEFERRED_ITEMS_MAGIC; - ddi->ddi_nevents = 0; - ddi->ddi_maxevents = countof(ddi->ddi_eventlist); - ddi->ddi_stashed_pp = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; + dispatch_assert(wlh); + dispatch_priority_t old_dbp; pthread_priority_t pp = _dispatch_get_priority(); if (!(pp & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG)) { @@ -3040,10 +2378,20 @@ _dispatch_kevent_worker_thread_init(dispatch_deferred_items_t ddi) // Also add the NEEDS_UNBIND flag so that // _dispatch_priority_compute_update knows it has to unbind pp &= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG | ~_PTHREAD_PRIORITY_FLAGS_MASK; - pp |= _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG; + if (wlh == DISPATCH_WLH_ANON) { + pp |= _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG; + } else { + // pthread sets the flag when it is an event delivery thread + // so we need to explicitly clear it + pp &= ~(pthread_priority_t)_PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG; + } _dispatch_thread_setspecific(dispatch_priority_key, - (void *)(uintptr_t)pp); - ddi->ddi_stashed_pp = 0; + (void *)(uintptr_t)pp); + if (wlh != DISPATCH_WLH_ANON) { + _dispatch_debug("wlh[%p]: handling events", wlh); + } else { + ddi->ddi_can_stash = true; + } return DISPATCH_KEVENT_WORKER_IS_NOT_MANAGER; } @@ -3070,3766 +2418,136 @@ _dispatch_kevent_worker_thread_init(dispatch_deferred_items_t ddi) _dispatch_thread_setspecific(dispatch_priority_key, (void *)(uintptr_t)pp); // ensure kevents registered from this thread are registered at manager QoS - pthread_priority_t old_dp = _dispatch_set_defaultpriority( - (pthread_priority_t)_PTHREAD_PRIORITY_EVENT_MANAGER_FLAG, NULL); + old_dbp = _dispatch_set_basepri(DISPATCH_PRIORITY_FLAG_MANAGER); _dispatch_queue_set_current(&_dispatch_mgr_q); - if (_dispatch_queue_drain_try_lock(&_dispatch_mgr_q, - DISPATCH_INVOKE_STEALING, NULL) != owned) { - DISPATCH_INTERNAL_CRASH(0, "Locking the manager should not fail"); - } - static int event_thread_init; - if (!event_thread_init) { - event_thread_init = 1; - _dispatch_event_init(); - } - return old_dp; + _dispatch_queue_mgr_lock(&_dispatch_mgr_q); + return old_dbp; } DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT static inline bool -_dispatch_kevent_worker_thread_reset(pthread_priority_t old_dp) +_dispatch_wlh_worker_thread_reset(dispatch_priority_t old_dbp) { - dispatch_queue_t dq = &_dispatch_mgr_q; - uint64_t orig_dq_state; - - _dispatch_queue_drain_unlock(dq, DISPATCH_QUEUE_SERIAL_DRAIN_OWNED, - &orig_dq_state); - _dispatch_reset_defaultpriority(old_dp); + bool needs_poll = _dispatch_queue_mgr_unlock(&_dispatch_mgr_q); + _dispatch_reset_basepri(old_dbp); + _dispatch_reset_basepri_override(); _dispatch_queue_set_current(NULL); - return _dq_state_is_dirty(orig_dq_state); + return needs_poll; } -DISPATCH_NOINLINE -void -_dispatch_kevent_worker_thread(_dispatch_kevent_qos_s **events, int *nevents) +DISPATCH_ALWAYS_INLINE +static void +_dispatch_wlh_worker_thread(dispatch_wlh_t wlh, dispatch_kevent_t events, + int *nevents) { _dispatch_introspection_thread_add(); + DISPATCH_PERF_MON_VAR_INIT - if (!events && !nevents) { - // events for worker thread request have already been delivered earlier - return; - } - - _dispatch_kevent_qos_s *ke = *events; - int n = *nevents; - if (!dispatch_assume(n) || !dispatch_assume(*events)) return; - - dispatch_deferred_items_s ddi; - pthread_priority_t old_dp = _dispatch_kevent_worker_thread_init(&ddi); + dispatch_deferred_items_s ddi = { + .ddi_eventlist = events, + }; + dispatch_priority_t old_dbp; - _dispatch_deferred_items_set(&ddi); - for (int i = 0; i < n; i++) { - _dispatch_kevent_debug("received", ke); - _dispatch_kevent_drain(ke++); + old_dbp = _dispatch_wlh_worker_thread_init(wlh, &ddi); + if (old_dbp == DISPATCH_KEVENT_WORKER_IS_NOT_MANAGER) { + _dispatch_perfmon_start_impl(true); + } else { + dispatch_assert(wlh == DISPATCH_WLH_ANON); + wlh = DISPATCH_WLH_ANON; } + _dispatch_deferred_items_set(&ddi); + _dispatch_event_loop_merge(events, *nevents); - if (old_dp != DISPATCH_KEVENT_WORKER_IS_NOT_MANAGER) { + if (old_dbp != DISPATCH_KEVENT_WORKER_IS_NOT_MANAGER) { _dispatch_mgr_queue_drain(); bool poll = _dispatch_mgr_timers(); - if (_dispatch_kevent_worker_thread_reset(old_dp)) { + if (_dispatch_wlh_worker_thread_reset(old_dbp)) { poll = true; } - if (poll) _dispatch_mgr_queue_poke(&_dispatch_mgr_q, 0); - } - _dispatch_deferred_items_set(NULL); - - if (ddi.ddi_stashed_pp & _PTHREAD_PRIORITY_PRIORITY_MASK) { - *nevents = 0; - if (ddi.ddi_nevents) { - _dispatch_kq_update_all(ddi.ddi_eventlist, ddi.ddi_nevents); - } - ddi.ddi_stashed_pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; - return _dispatch_root_queue_drain_deferred_item(ddi.ddi_stashed_dq, - ddi.ddi_stashed_dou, ddi.ddi_stashed_pp); -#ifndef WORKQ_KEVENT_EVENT_BUFFER_LEN - } else if (ddi.ddi_nevents > *nevents) { - *nevents = 0; - _dispatch_kq_update_all(ddi.ddi_eventlist, ddi.ddi_nevents); -#endif - } else { - *nevents = ddi.ddi_nevents; - dispatch_static_assert(__builtin_types_compatible_p(typeof(**events), - typeof(*ddi.ddi_eventlist))); - memcpy(*events, ddi.ddi_eventlist, - (size_t)ddi.ddi_nevents * sizeof(*ddi.ddi_eventlist)); - } -} -#endif // DISPATCH_USE_KEVENT_WORKQUEUE - -#pragma mark - -#pragma mark dispatch_memorypressure - -#if DISPATCH_USE_MEMORYPRESSURE_SOURCE -#define DISPATCH_MEMORYPRESSURE_SOURCE_TYPE DISPATCH_SOURCE_TYPE_MEMORYPRESSURE -#define DISPATCH_MEMORYPRESSURE_SOURCE_MASK ( \ - DISPATCH_MEMORYPRESSURE_NORMAL | \ - DISPATCH_MEMORYPRESSURE_WARN | \ - DISPATCH_MEMORYPRESSURE_CRITICAL | \ - DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN | \ - DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL) -#define DISPATCH_MEMORYPRESSURE_MALLOC_MASK ( \ - DISPATCH_MEMORYPRESSURE_WARN | \ - DISPATCH_MEMORYPRESSURE_CRITICAL | \ - DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN | \ - DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL) -#elif DISPATCH_USE_VM_PRESSURE_SOURCE -#define DISPATCH_MEMORYPRESSURE_SOURCE_TYPE DISPATCH_SOURCE_TYPE_VM -#define DISPATCH_MEMORYPRESSURE_SOURCE_MASK DISPATCH_VM_PRESSURE -#endif - -#if DISPATCH_USE_MEMORYPRESSURE_SOURCE || DISPATCH_USE_VM_PRESSURE_SOURCE -static dispatch_source_t _dispatch_memorypressure_source; - -static void -_dispatch_memorypressure_handler(void *context DISPATCH_UNUSED) -{ -#if DISPATCH_USE_MEMORYPRESSURE_SOURCE - unsigned long memorypressure; - memorypressure = dispatch_source_get_data(_dispatch_memorypressure_source); - - if (memorypressure & DISPATCH_MEMORYPRESSURE_NORMAL) { - _dispatch_memory_warn = false; - _dispatch_continuation_cache_limit = DISPATCH_CONTINUATION_CACHE_LIMIT; -#if VOUCHER_USE_MACH_VOUCHER - if (_firehose_task_buffer) { - firehose_buffer_clear_bank_flags(_firehose_task_buffer, - FIREHOSE_BUFFER_BANK_FLAG_LOW_MEMORY); - } -#endif - } - if (memorypressure & DISPATCH_MEMORYPRESSURE_WARN) { - _dispatch_memory_warn = true; - _dispatch_continuation_cache_limit = - DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYPRESSURE_PRESSURE_WARN; -#if VOUCHER_USE_MACH_VOUCHER - if (_firehose_task_buffer) { - firehose_buffer_set_bank_flags(_firehose_task_buffer, - FIREHOSE_BUFFER_BANK_FLAG_LOW_MEMORY); + if (poll) _dispatch_event_loop_poke(DISPATCH_WLH_MANAGER, 0, 0); + } else if (ddi.ddi_stashed_dou._do) { + _dispatch_debug("wlh[%p]: draining deferred item %p", wlh, + ddi.ddi_stashed_dou._do); + if (wlh == DISPATCH_WLH_ANON) { + dispatch_assert(ddi.ddi_nevents == 0); + _dispatch_deferred_items_set(NULL); + _dispatch_root_queue_drain_deferred_item(&ddi + DISPATCH_PERF_MON_ARGS); + } else { + _dispatch_root_queue_drain_deferred_wlh(&ddi + DISPATCH_PERF_MON_ARGS); } -#endif } - if (memorypressure & DISPATCH_MEMORYPRESSURE_MALLOC_MASK) { - malloc_memory_event_handler(memorypressure & DISPATCH_MEMORYPRESSURE_MALLOC_MASK); - } -#elif DISPATCH_USE_VM_PRESSURE_SOURCE - // we must have gotten DISPATCH_VM_PRESSURE - malloc_zone_pressure_relief(0,0); -#endif -} - -static void -_dispatch_memorypressure_init(void) -{ - _dispatch_memorypressure_source = dispatch_source_create( - DISPATCH_MEMORYPRESSURE_SOURCE_TYPE, 0, - DISPATCH_MEMORYPRESSURE_SOURCE_MASK, - _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, true)); - dispatch_source_set_event_handler_f(_dispatch_memorypressure_source, - _dispatch_memorypressure_handler); - dispatch_activate(_dispatch_memorypressure_source); -} -#else -static inline void _dispatch_memorypressure_init(void) {} -#endif // DISPATCH_USE_MEMORYPRESSURE_SOURCE || DISPATCH_USE_VM_PRESSURE_SOURCE - -#pragma mark - -#pragma mark dispatch_mach - -#if HAVE_MACH - -#if DISPATCH_DEBUG && DISPATCH_MACHPORT_DEBUG -#define _dispatch_debug_machport(name) \ - dispatch_debug_machport((name), __func__) -#else -#define _dispatch_debug_machport(name) ((void)(name)) -#endif - -// Flags for all notifications that are registered/unregistered when a -// send-possible notification is requested/delivered -#define _DISPATCH_MACH_SP_FLAGS (DISPATCH_MACH_SEND_POSSIBLE| \ - DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_DELETED) -#define _DISPATCH_MACH_RECV_FLAGS (DISPATCH_MACH_RECV_MESSAGE| \ - DISPATCH_MACH_RECV_MESSAGE_DIRECT| \ - DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE) -#define _DISPATCH_MACH_RECV_DIRECT_FLAGS ( \ - DISPATCH_MACH_RECV_MESSAGE_DIRECT| \ - DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE) - -#define _DISPATCH_IS_POWER_OF_TWO(v) (!(v & (v - 1)) && v) -#define _DISPATCH_HASH(x, y) (_DISPATCH_IS_POWER_OF_TWO(y) ? \ - (MACH_PORT_INDEX(x) & ((y) - 1)) : (MACH_PORT_INDEX(x) % (y))) - -#define _DISPATCH_MACHPORT_HASH_SIZE 32 -#define _DISPATCH_MACHPORT_HASH(x) \ - _DISPATCH_HASH((x), _DISPATCH_MACHPORT_HASH_SIZE) - -#ifndef MACH_RCV_VOUCHER -#define MACH_RCV_VOUCHER 0x00000800 -#endif -#define DISPATCH_MACH_RCV_TRAILER MACH_RCV_TRAILER_CTX -#define DISPATCH_MACH_RCV_OPTIONS ( \ - MACH_RCV_MSG | MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY | \ - MACH_RCV_TRAILER_ELEMENTS(DISPATCH_MACH_RCV_TRAILER) | \ - MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0)) | \ - MACH_RCV_VOUCHER - -#define DISPATCH_MACH_NOTIFICATION_ARMED(dk) ((dk)->dk_kevent.ext[0]) - -static void _dispatch_kevent_mach_msg_recv(_dispatch_kevent_qos_s *ke, - mach_msg_header_t *hdr); -static void _dispatch_kevent_mach_msg_destroy(_dispatch_kevent_qos_s *ke, - mach_msg_header_t *hdr); -static void _dispatch_source_merge_mach_msg(dispatch_source_t ds, - dispatch_source_refs_t dr, dispatch_kevent_t dk, - _dispatch_kevent_qos_s *ke, mach_msg_header_t *hdr, - mach_msg_size_t siz); -static kern_return_t _dispatch_mach_notify_update(dispatch_kevent_t dk, - uint32_t new_flags, uint32_t del_flags, uint32_t mask, - mach_msg_id_t notify_msgid, mach_port_mscount_t notify_sync); -static void _dispatch_mach_notify_source_invoke(mach_msg_header_t *hdr); -static void _dispatch_mach_reply_kevent_unregister(dispatch_mach_t dm, - dispatch_mach_reply_refs_t dmr, unsigned int options); -static void _dispatch_mach_notification_kevent_unregister(dispatch_mach_t dm); -static void _dispatch_mach_msg_recv(dispatch_mach_t dm, - dispatch_mach_reply_refs_t dmr, _dispatch_kevent_qos_s *ke, - mach_msg_header_t *hdr, mach_msg_size_t siz); -static void _dispatch_mach_merge_notification_kevent(dispatch_mach_t dm, - const _dispatch_kevent_qos_s *ke); -static inline mach_msg_option_t _dispatch_mach_checkin_options(void); - -static const size_t _dispatch_mach_recv_msg_size = - DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE; -static const size_t dispatch_mach_trailer_size = - sizeof(dispatch_mach_trailer_t); -static mach_port_t _dispatch_mach_notify_port; -static dispatch_source_t _dispatch_mach_notify_source; - -static inline void* -_dispatch_kevent_mach_msg_buf(_dispatch_kevent_qos_s *ke) -{ - return (void*)ke->ext[0]; -} - -static inline mach_msg_size_t -_dispatch_kevent_mach_msg_size(_dispatch_kevent_qos_s *ke) -{ - // buffer size in the successful receive case, but message size (like - // msgh_size) in the MACH_RCV_TOO_LARGE case, i.e. add trailer size. - return (mach_msg_size_t)ke->ext[1]; -} - -static void -_dispatch_source_type_mach_recv_direct_init(dispatch_source_t ds, - dispatch_source_type_t type DISPATCH_UNUSED, - uintptr_t handle DISPATCH_UNUSED, - unsigned long mask DISPATCH_UNUSED, - dispatch_queue_t q DISPATCH_UNUSED) -{ - ds->ds_pending_data_mask = DISPATCH_MACH_RECV_MESSAGE_DIRECT; -#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK - if (_dispatch_evfilt_machport_direct_enabled) return; - ds->ds_dkev->dk_kevent.fflags = DISPATCH_MACH_RECV_MESSAGE_DIRECT; - ds->ds_dkev->dk_kevent.flags &= ~(EV_UDATA_SPECIFIC|EV_VANISHED); - ds->ds_is_direct_kevent = false; -#endif -} - -static const -struct dispatch_source_type_s _dispatch_source_type_mach_recv_direct = { - .ke = { - .filter = EVFILT_MACHPORT, - .flags = EV_VANISHED|EV_DISPATCH|EV_UDATA_SPECIFIC, - .fflags = DISPATCH_MACH_RCV_OPTIONS, - }, - .init = _dispatch_source_type_mach_recv_direct_init, -}; - -#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK -static mach_port_t _dispatch_mach_portset, _dispatch_mach_recv_portset; -static _dispatch_kevent_qos_s _dispatch_mach_recv_kevent = { - .filter = EVFILT_MACHPORT, - .flags = EV_ADD|EV_ENABLE|EV_DISPATCH, - .fflags = DISPATCH_MACH_RCV_OPTIONS, -}; -static void -_dispatch_mach_recv_msg_buf_init(void) -{ - if (_dispatch_evfilt_machport_direct_enabled) return; - mach_vm_size_t vm_size = mach_vm_round_page( - _dispatch_mach_recv_msg_size + dispatch_mach_trailer_size); - mach_vm_address_t vm_addr = vm_page_size; - kern_return_t kr; - - while (slowpath(kr = mach_vm_allocate(mach_task_self(), &vm_addr, vm_size, - VM_FLAGS_ANYWHERE))) { - if (kr != KERN_NO_SPACE) { - DISPATCH_CLIENT_CRASH(kr, - "Could not allocate mach msg receive buffer"); - } - _dispatch_temporary_resource_shortage(); - vm_addr = vm_page_size; + _dispatch_deferred_items_set(NULL); + if (old_dbp == DISPATCH_KEVENT_WORKER_IS_NOT_MANAGER && + !ddi.ddi_stashed_dou._do) { + _dispatch_perfmon_end(perfmon_thread_event_no_steal); } - _dispatch_mach_recv_kevent.ext[0] = (uintptr_t)vm_addr; - _dispatch_mach_recv_kevent.ext[1] = vm_size; + _dispatch_debug("returning %d deferred kevents", ddi.ddi_nevents); + *nevents = ddi.ddi_nevents; } -#endif DISPATCH_NOINLINE -static void -_dispatch_source_merge_mach_msg_direct(dispatch_source_t ds, - _dispatch_kevent_qos_s *ke, mach_msg_header_t *hdr) +void +_dispatch_kevent_worker_thread(dispatch_kevent_t *events, int *nevents) { - dispatch_continuation_t dc = _dispatch_source_get_event_handler(ds->ds_refs); - dispatch_queue_t cq = _dispatch_queue_get_current(); - - // see firehose_client_push_notify_async - _dispatch_queue_set_current(ds->_as_dq); - dc->dc_func(hdr); - _dispatch_queue_set_current(cq); - if (hdr != _dispatch_kevent_mach_msg_buf(ke)) { - free(hdr); + if (!events && !nevents) { + // events for worker thread request have already been delivered earlier + return; } + if (!dispatch_assume(*nevents && *events)) return; + _dispatch_adopt_wlh_anon(); + _dispatch_wlh_worker_thread(DISPATCH_WLH_ANON, *events, nevents); + _dispatch_reset_wlh(); } -dispatch_source_t -_dispatch_source_create_mach_msg_direct_recv(mach_port_t recvp, - const struct dispatch_continuation_s *dc) -{ - dispatch_source_t ds; - ds = dispatch_source_create(&_dispatch_source_type_mach_recv_direct, - recvp, 0, &_dispatch_mgr_q); - os_atomic_store(&ds->ds_refs->ds_handler[DS_EVENT_HANDLER], - (dispatch_continuation_t)dc, relaxed); - return ds; -} - -static void -_dispatch_mach_notify_port_init(void *context DISPATCH_UNUSED) -{ - kern_return_t kr; -#if HAVE_MACH_PORT_CONSTRUCT - mach_port_options_t opts = { .flags = MPO_CONTEXT_AS_GUARD | MPO_STRICT }; -#ifdef __LP64__ - const mach_port_context_t guard = 0xfeed09071f1ca7edull; -#else - const mach_port_context_t guard = 0xff1ca7edull; -#endif - kr = mach_port_construct(mach_task_self(), &opts, guard, - &_dispatch_mach_notify_port); -#else - kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, - &_dispatch_mach_notify_port); -#endif - DISPATCH_VERIFY_MIG(kr); - if (slowpath(kr)) { - DISPATCH_CLIENT_CRASH(kr, - "mach_port_construct() failed: cannot create receive right"); - } - - static const struct dispatch_continuation_s dc = { - .dc_func = (void*)_dispatch_mach_notify_source_invoke, - }; - _dispatch_mach_notify_source = _dispatch_source_create_mach_msg_direct_recv( - _dispatch_mach_notify_port, &dc); - dispatch_assert(_dispatch_mach_notify_source); - dispatch_activate(_dispatch_mach_notify_source); -} -static mach_port_t -_dispatch_get_mach_notify_port(void) -{ - static dispatch_once_t pred; - dispatch_once_f(&pred, NULL, _dispatch_mach_notify_port_init); - return _dispatch_mach_notify_port; -} +#endif // DISPATCH_USE_KEVENT_WORKQUEUE +#pragma mark - +#pragma mark dispatch_source_debug -#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK -static void -_dispatch_mach_recv_portset_init(void *context DISPATCH_UNUSED) +static size_t +_dispatch_source_debug_attr(dispatch_source_t ds, char* buf, size_t bufsiz) { - kern_return_t kr; - - kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET, - &_dispatch_mach_recv_portset); - DISPATCH_VERIFY_MIG(kr); - if (slowpath(kr)) { - DISPATCH_CLIENT_CRASH(kr, - "mach_port_allocate() failed: cannot create port set"); - } - _dispatch_kevent_qos_s *ke = &_dispatch_mach_recv_kevent; - dispatch_assert(_dispatch_kevent_mach_msg_buf(ke)); - dispatch_assert(dispatch_mach_trailer_size == - REQUESTED_TRAILER_SIZE_NATIVE(MACH_RCV_TRAILER_ELEMENTS( - DISPATCH_MACH_RCV_TRAILER))); - ke->ident = _dispatch_mach_recv_portset; -#if DISPATCH_USE_KEVENT_WORKQUEUE - if (_dispatch_kevent_workqueue_enabled) { - ke->qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; - } -#endif - _dispatch_kq_immediate_update(&_dispatch_mach_recv_kevent); + dispatch_queue_t target = ds->do_targetq; + dispatch_source_refs_t dr = ds->ds_refs; + return dsnprintf(buf, bufsiz, "target = %s[%p], ident = 0x%x, " + "mask = 0x%x, pending_data = 0x%llx, registered = %d, " + "armed = %d, deleted = %d%s, canceled = %d, ", + target && target->dq_label ? target->dq_label : "", target, + dr->du_ident, dr->du_fflags, (unsigned long long)ds->ds_pending_data, + ds->ds_is_installed, (bool)(ds->dq_atomic_flags & DSF_ARMED), + (bool)(ds->dq_atomic_flags & DSF_DELETED), + (ds->dq_atomic_flags & DSF_DEFERRED_DELETE) ? " (pending)" : "", + (bool)(ds->dq_atomic_flags & DSF_CANCELED)); } -static mach_port_t -_dispatch_get_mach_recv_portset(void) +static size_t +_dispatch_timer_debug_attr(dispatch_source_t ds, char* buf, size_t bufsiz) { - static dispatch_once_t pred; - dispatch_once_f(&pred, NULL, _dispatch_mach_recv_portset_init); - return _dispatch_mach_recv_portset; + dispatch_timer_source_refs_t dr = ds->ds_timer_refs; + return dsnprintf(buf, bufsiz, "timer = { target = 0x%llx, deadline = 0x%llx" + ", interval = 0x%llx, flags = 0x%x }, ", + (unsigned long long)dr->dt_timer.target, + (unsigned long long)dr->dt_timer.deadline, + (unsigned long long)dr->dt_timer.interval, dr->du_fflags); } -static void -_dispatch_mach_portset_init(void *context DISPATCH_UNUSED) +size_t +_dispatch_source_debug(dispatch_source_t ds, char *buf, size_t bufsiz) { - _dispatch_kevent_qos_s kev = { - .filter = EVFILT_MACHPORT, - .flags = EV_ADD, - }; -#if DISPATCH_USE_KEVENT_WORKQUEUE - if (_dispatch_kevent_workqueue_enabled) { - kev.qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; - } -#endif - - kern_return_t kr; - - kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET, - &_dispatch_mach_portset); - DISPATCH_VERIFY_MIG(kr); - if (slowpath(kr)) { - DISPATCH_CLIENT_CRASH(kr, - "mach_port_allocate() failed: cannot create port set"); + dispatch_source_refs_t dr = ds->ds_refs; + size_t offset = 0; + offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", + dx_kind(ds), ds); + offset += _dispatch_object_debug_attr(ds, &buf[offset], bufsiz - offset); + offset += _dispatch_source_debug_attr(ds, &buf[offset], bufsiz - offset); + if (dr->du_is_timer) { + offset += _dispatch_timer_debug_attr(ds, &buf[offset], bufsiz - offset); } - kev.ident = _dispatch_mach_portset; - _dispatch_kq_immediate_update(&kev); + offset += dsnprintf(&buf[offset], bufsiz - offset, "kevent = %p%s, " + "filter = %s }", dr, dr->du_is_direct ? " (direct)" : "", + dr->du_type->dst_kind); + return offset; } - -static mach_port_t -_dispatch_get_mach_portset(void) -{ - static dispatch_once_t pred; - dispatch_once_f(&pred, NULL, _dispatch_mach_portset_init); - return _dispatch_mach_portset; -} - -static kern_return_t -_dispatch_mach_portset_update(dispatch_kevent_t dk, mach_port_t mps) -{ - mach_port_t mp = (mach_port_t)dk->dk_kevent.ident; - kern_return_t kr; - - _dispatch_debug_machport(mp); - kr = mach_port_move_member(mach_task_self(), mp, mps); - if (slowpath(kr)) { - DISPATCH_VERIFY_MIG(kr); - switch (kr) { - case KERN_INVALID_RIGHT: - if (mps) { - _dispatch_bug_mach_client("_dispatch_kevent_machport_enable: " - "mach_port_move_member() failed ", kr); - break; - } - //fall through - case KERN_INVALID_NAME: -#if DISPATCH_DEBUG - _dispatch_log("Corruption: Mach receive right 0x%x destroyed " - "prematurely", mp); -#endif - break; - default: - (void)dispatch_assume_zero(kr); - break; - } - } - return mps ? kr : 0; -} - -static kern_return_t -_dispatch_kevent_machport_resume(dispatch_kevent_t dk, uint32_t new_flags, - uint32_t del_flags) -{ - kern_return_t kr = 0; - dispatch_assert_zero(new_flags & del_flags); - if ((new_flags & _DISPATCH_MACH_RECV_FLAGS) || - (del_flags & _DISPATCH_MACH_RECV_FLAGS)) { - mach_port_t mps; - if (new_flags & _DISPATCH_MACH_RECV_DIRECT_FLAGS) { - mps = _dispatch_get_mach_recv_portset(); - } else if ((new_flags & DISPATCH_MACH_RECV_MESSAGE) || - ((del_flags & _DISPATCH_MACH_RECV_DIRECT_FLAGS) && - (dk->dk_kevent.fflags & DISPATCH_MACH_RECV_MESSAGE))) { - mps = _dispatch_get_mach_portset(); - } else { - mps = MACH_PORT_NULL; - } - kr = _dispatch_mach_portset_update(dk, mps); - } - return kr; -} -#endif // DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK - -static kern_return_t -_dispatch_kevent_mach_notify_resume(dispatch_kevent_t dk, uint32_t new_flags, - uint32_t del_flags) -{ - kern_return_t kr = 0; - dispatch_assert_zero(new_flags & del_flags); - if ((new_flags & _DISPATCH_MACH_SP_FLAGS) || - (del_flags & _DISPATCH_MACH_SP_FLAGS)) { - // Requesting a (delayed) non-sync send-possible notification - // registers for both immediate dead-name notification and delayed-arm - // send-possible notification for the port. - // The send-possible notification is armed when a mach_msg() with the - // the MACH_SEND_NOTIFY to the port times out. - // If send-possible is unavailable, fall back to immediate dead-name - // registration rdar://problem/2527840&9008724 - kr = _dispatch_mach_notify_update(dk, new_flags, del_flags, - _DISPATCH_MACH_SP_FLAGS, MACH_NOTIFY_SEND_POSSIBLE, - MACH_NOTIFY_SEND_POSSIBLE == MACH_NOTIFY_DEAD_NAME ? 1 : 0); - } - return kr; -} - -#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK -DISPATCH_NOINLINE -static void -_dispatch_kevent_machport_drain(_dispatch_kevent_qos_s *ke) -{ - mach_port_t name = (mach_port_name_t)ke->data; - dispatch_kevent_t dk; - - _dispatch_debug_machport(name); - dk = _dispatch_kevent_find(name, EVFILT_MACHPORT); - if (!dispatch_assume(dk)) { - return; - } - _dispatch_mach_portset_update(dk, MACH_PORT_NULL); // emulate EV_DISPATCH - - _dispatch_kevent_qos_s kev = { - .ident = name, - .filter = EVFILT_MACHPORT, - .flags = EV_ADD|EV_ENABLE|EV_DISPATCH, - .fflags = DISPATCH_MACH_RECV_MESSAGE, - .udata = (uintptr_t)dk, - }; - _dispatch_kevent_debug("synthetic", &kev); - _dispatch_kevent_merge(&kev); -} -#endif - -DISPATCH_NOINLINE -static void -_dispatch_kevent_mach_msg_drain(_dispatch_kevent_qos_s *ke) -{ - mach_msg_header_t *hdr = _dispatch_kevent_mach_msg_buf(ke); - mach_msg_size_t siz; - mach_msg_return_t kr = (mach_msg_return_t)ke->fflags; - - if (!fastpath(hdr)) { - DISPATCH_INTERNAL_CRASH(kr, "EVFILT_MACHPORT with no message"); - } - if (fastpath(!kr)) { - _dispatch_kevent_mach_msg_recv(ke, hdr); - goto out; - } else if (kr != MACH_RCV_TOO_LARGE) { - goto out; - } else if (!ke->data) { - DISPATCH_INTERNAL_CRASH(0, "MACH_RCV_LARGE_IDENTITY with no identity"); - } - if (slowpath(ke->ext[1] > (UINT_MAX - dispatch_mach_trailer_size))) { - DISPATCH_INTERNAL_CRASH(ke->ext[1], - "EVFILT_MACHPORT with overlarge message"); - } - siz = _dispatch_kevent_mach_msg_size(ke) + dispatch_mach_trailer_size; - hdr = malloc(siz); - if (!dispatch_assume(hdr)) { - // Kernel will discard message too large to fit - hdr = NULL; - siz = 0; - } - mach_port_t name = (mach_port_name_t)ke->data; - const mach_msg_option_t options = ((DISPATCH_MACH_RCV_OPTIONS | - MACH_RCV_TIMEOUT) & ~MACH_RCV_LARGE); - kr = mach_msg(hdr, options, 0, siz, name, MACH_MSG_TIMEOUT_NONE, - MACH_PORT_NULL); - if (fastpath(!kr)) { - _dispatch_kevent_mach_msg_recv(ke, hdr); - goto out; - } else if (kr == MACH_RCV_TOO_LARGE) { - _dispatch_log("BUG in libdispatch client: " - "_dispatch_kevent_mach_msg_drain: dropped message too " - "large to fit in memory: id = 0x%x, size = %u", - hdr->msgh_id, _dispatch_kevent_mach_msg_size(ke)); - kr = MACH_MSG_SUCCESS; - } - if (hdr != _dispatch_kevent_mach_msg_buf(ke)) { - free(hdr); - } -out: - if (slowpath(kr)) { - _dispatch_bug_mach_client("_dispatch_kevent_mach_msg_drain: " - "message reception failed", kr); - } -} - -DISPATCH_NOINLINE -static void -_dispatch_mach_kevent_merge(_dispatch_kevent_qos_s *ke) -{ - if (unlikely(!(ke->flags & EV_UDATA_SPECIFIC))) { -#if DISPATCH_EVFILT_MACHPORT_PORTSET_FALLBACK - if (ke->ident == _dispatch_mach_recv_portset) { - _dispatch_kevent_mach_msg_drain(ke); - return _dispatch_kq_deferred_update(&_dispatch_mach_recv_kevent); - } else if (ke->ident == _dispatch_mach_portset) { - return _dispatch_kevent_machport_drain(ke); - } -#endif - return _dispatch_kevent_error(ke); - } - - dispatch_kevent_t dk = (dispatch_kevent_t)ke->udata; - dispatch_source_refs_t dr = TAILQ_FIRST(&dk->dk_sources); - bool is_reply = (dk->dk_kevent.flags & EV_ONESHOT); - dispatch_source_t ds = _dispatch_source_from_refs(dr); - - if (_dispatch_kevent_mach_msg_size(ke)) { - _dispatch_kevent_mach_msg_drain(ke); - if (is_reply) { - // _dispatch_kevent_mach_msg_drain() should have deleted this event - dispatch_assert(ke->flags & EV_DELETE); - return; - } - - if (!(ds->dq_atomic_flags & DSF_CANCELED)) { - // re-arm the mach channel - ke->fflags = DISPATCH_MACH_RCV_OPTIONS; - ke->data = 0; - ke->ext[0] = 0; - ke->ext[1] = 0; - return _dispatch_kq_deferred_update(ke); - } - } else if (is_reply) { - DISPATCH_INTERNAL_CRASH(ke->flags, "Unexpected EVFILT_MACHPORT event"); - } - if (unlikely((ke->flags & EV_VANISHED) && - (dx_type(ds) == DISPATCH_MACH_CHANNEL_TYPE))) { - DISPATCH_CLIENT_CRASH(ke->flags, - "Unexpected EV_VANISHED (do not destroy random mach ports)"); - } - return _dispatch_kevent_merge(ke); -} - -static void -_dispatch_kevent_mach_msg_recv(_dispatch_kevent_qos_s *ke, - mach_msg_header_t *hdr) -{ - dispatch_source_refs_t dri; - dispatch_kevent_t dk; - mach_port_t name = hdr->msgh_local_port; - mach_msg_size_t siz = hdr->msgh_size + dispatch_mach_trailer_size; - - if (!dispatch_assume(hdr->msgh_size <= UINT_MAX - - dispatch_mach_trailer_size)) { - _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: " - "received overlarge message"); - return _dispatch_kevent_mach_msg_destroy(ke, hdr); - } - if (!dispatch_assume(name)) { - _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: " - "received message with MACH_PORT_NULL port"); - return _dispatch_kevent_mach_msg_destroy(ke, hdr); - } - _dispatch_debug_machport(name); - if (ke->flags & EV_UDATA_SPECIFIC) { - dk = (void*)ke->udata; - } else { - dk = _dispatch_kevent_find(name, EVFILT_MACHPORT); - } - if (!dispatch_assume(dk)) { - _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: " - "received message with unknown kevent"); - return _dispatch_kevent_mach_msg_destroy(ke, hdr); - } - TAILQ_FOREACH(dri, &dk->dk_sources, dr_list) { - dispatch_source_t dsi = _dispatch_source_from_refs(dri); - if (dsi->ds_pending_data_mask & _DISPATCH_MACH_RECV_DIRECT_FLAGS) { - return _dispatch_source_merge_mach_msg(dsi, dri, dk, ke, hdr, siz); - } - } - _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: " - "received message with no listeners"); - return _dispatch_kevent_mach_msg_destroy(ke, hdr); -} - -static void -_dispatch_kevent_mach_msg_destroy(_dispatch_kevent_qos_s *ke, - mach_msg_header_t *hdr) -{ - if (hdr) { - mach_msg_destroy(hdr); - if (hdr != _dispatch_kevent_mach_msg_buf(ke)) { - free(hdr); - } - } -} - -static void -_dispatch_source_merge_mach_msg(dispatch_source_t ds, dispatch_source_refs_t dr, - dispatch_kevent_t dk, _dispatch_kevent_qos_s *ke, - mach_msg_header_t *hdr, mach_msg_size_t siz) -{ - if (dx_type(ds) == DISPATCH_SOURCE_KEVENT_TYPE) { - return _dispatch_source_merge_mach_msg_direct(ds, ke, hdr); - } - dispatch_mach_reply_refs_t dmr = NULL; - if (dk->dk_kevent.flags & EV_ONESHOT) { - dmr = (dispatch_mach_reply_refs_t)dr; - } - return _dispatch_mach_msg_recv((dispatch_mach_t)ds, dmr, ke, hdr, siz); -} - -DISPATCH_NOINLINE -static void -_dispatch_mach_notify_merge(mach_port_t name, uint32_t flag, bool final) -{ - dispatch_source_refs_t dri, dr_next; - dispatch_kevent_t dk; - bool unreg; - - dk = _dispatch_kevent_find(name, DISPATCH_EVFILT_MACH_NOTIFICATION); - if (!dk) { - return; - } - - // Update notification registration state. - dk->dk_kevent.data &= ~_DISPATCH_MACH_SP_FLAGS; - _dispatch_kevent_qos_s kev = { - .ident = name, - .filter = DISPATCH_EVFILT_MACH_NOTIFICATION, - .flags = EV_ADD|EV_ENABLE, - .fflags = flag, - .udata = (uintptr_t)dk, - }; - if (final) { - // This can never happen again - unreg = true; - } else { - // Re-register for notification before delivery - unreg = _dispatch_kevent_resume(dk, flag, 0); - } - DISPATCH_MACH_NOTIFICATION_ARMED(dk) = 0; - TAILQ_FOREACH_SAFE(dri, &dk->dk_sources, dr_list, dr_next) { - dispatch_source_t dsi = _dispatch_source_from_refs(dri); - if (dx_type(dsi) == DISPATCH_MACH_CHANNEL_TYPE) { - dispatch_mach_t dm = (dispatch_mach_t)dsi; - _dispatch_mach_merge_notification_kevent(dm, &kev); - if (unreg && dm->dm_dkev) { - _dispatch_mach_notification_kevent_unregister(dm); - } - } else { - _dispatch_source_merge_kevent(dsi, &kev); - if (unreg) { - _dispatch_source_kevent_unregister(dsi); - } - } - if (!dr_next || DISPATCH_MACH_NOTIFICATION_ARMED(dk)) { - // current merge is last in list (dk might have been freed) - // or it re-armed the notification - return; - } - } -} - -static kern_return_t -_dispatch_mach_notify_update(dispatch_kevent_t dk, uint32_t new_flags, - uint32_t del_flags, uint32_t mask, mach_msg_id_t notify_msgid, - mach_port_mscount_t notify_sync) -{ - mach_port_t previous, port = (mach_port_t)dk->dk_kevent.ident; - typeof(dk->dk_kevent.data) prev = dk->dk_kevent.data; - kern_return_t kr, krr = 0; - - // Update notification registration state. - dk->dk_kevent.data |= (new_flags | dk->dk_kevent.fflags) & mask; - dk->dk_kevent.data &= ~(del_flags & mask); - - _dispatch_debug_machport(port); - if ((dk->dk_kevent.data & mask) && !(prev & mask)) { - _dispatch_debug("machport[0x%08x]: registering for send-possible " - "notification", port); - previous = MACH_PORT_NULL; - krr = mach_port_request_notification(mach_task_self(), port, - notify_msgid, notify_sync, _dispatch_get_mach_notify_port(), - MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous); - DISPATCH_VERIFY_MIG(krr); - - switch(krr) { - case KERN_INVALID_NAME: - case KERN_INVALID_RIGHT: - // Suppress errors & clear registration state - dk->dk_kevent.data &= ~mask; - break; - default: - // Else, we don't expect any errors from mach. Log any errors - if (dispatch_assume_zero(krr)) { - // log the error & clear registration state - dk->dk_kevent.data &= ~mask; - } else if (dispatch_assume_zero(previous)) { - // Another subsystem has beat libdispatch to requesting the - // specified Mach notification on this port. We should - // technically cache the previous port and message it when the - // kernel messages our port. Or we can just say screw those - // subsystems and deallocate the previous port. - // They should adopt libdispatch :-P - kr = mach_port_deallocate(mach_task_self(), previous); - DISPATCH_VERIFY_MIG(kr); - (void)dispatch_assume_zero(kr); - previous = MACH_PORT_NULL; - } - } - } else if (!(dk->dk_kevent.data & mask) && (prev & mask)) { - _dispatch_debug("machport[0x%08x]: unregistering for send-possible " - "notification", port); - previous = MACH_PORT_NULL; - kr = mach_port_request_notification(mach_task_self(), port, - notify_msgid, notify_sync, MACH_PORT_NULL, - MACH_MSG_TYPE_MOVE_SEND_ONCE, &previous); - DISPATCH_VERIFY_MIG(kr); - - switch (kr) { - case KERN_INVALID_NAME: - case KERN_INVALID_RIGHT: - case KERN_INVALID_ARGUMENT: - break; - default: - if (dispatch_assume_zero(kr)) { - // log the error - } - } - } else { - return 0; - } - if (slowpath(previous)) { - // the kernel has not consumed the send-once right yet - (void)dispatch_assume_zero( - _dispatch_send_consume_send_once_right(previous)); - } - return krr; -} - -static void -_dispatch_mach_host_notify_update(void *context DISPATCH_UNUSED) -{ - static int notify_type = HOST_NOTIFY_CALENDAR_SET; - kern_return_t kr; - _dispatch_debug("registering for calendar-change notification"); -retry: - kr = host_request_notification(_dispatch_get_mach_host_port(), - notify_type, _dispatch_get_mach_notify_port()); - // Fallback when missing support for newer _SET variant, fires strictly more. - if (kr == KERN_INVALID_ARGUMENT && - notify_type != HOST_NOTIFY_CALENDAR_CHANGE){ - notify_type = HOST_NOTIFY_CALENDAR_CHANGE; - goto retry; - } - DISPATCH_VERIFY_MIG(kr); - (void)dispatch_assume_zero(kr); -} - -static void -_dispatch_mach_host_calendar_change_register(void) -{ - static dispatch_once_t pred; - dispatch_once_f(&pred, NULL, _dispatch_mach_host_notify_update); -} - -static void -_dispatch_mach_notify_source_invoke(mach_msg_header_t *hdr) -{ - mig_reply_error_t reply; - dispatch_assert(sizeof(mig_reply_error_t) == sizeof(union - __ReplyUnion___dispatch_libdispatch_internal_protocol_subsystem)); - dispatch_assert(sizeof(mig_reply_error_t) < _dispatch_mach_recv_msg_size); - boolean_t success = libdispatch_internal_protocol_server(hdr, &reply.Head); - if (!success && reply.RetCode == MIG_BAD_ID && - (hdr->msgh_id == HOST_CALENDAR_SET_REPLYID || - hdr->msgh_id == HOST_CALENDAR_CHANGED_REPLYID)) { - _dispatch_debug("calendar-change notification"); - _dispatch_timers_calendar_change(); - _dispatch_mach_host_notify_update(NULL); - success = TRUE; - reply.RetCode = KERN_SUCCESS; - } - if (dispatch_assume(success) && reply.RetCode != MIG_NO_REPLY) { - (void)dispatch_assume_zero(reply.RetCode); - } - if (!success || (reply.RetCode && reply.RetCode != MIG_NO_REPLY)) { - mach_msg_destroy(hdr); - } -} - -kern_return_t -_dispatch_mach_notify_port_deleted(mach_port_t notify DISPATCH_UNUSED, - mach_port_name_t name) -{ -#if DISPATCH_DEBUG - _dispatch_log("Corruption: Mach send/send-once/dead-name right 0x%x " - "deleted prematurely", name); -#endif - - _dispatch_debug_machport(name); - _dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_DELETED, true); - - return KERN_SUCCESS; -} - -kern_return_t -_dispatch_mach_notify_dead_name(mach_port_t notify DISPATCH_UNUSED, - mach_port_name_t name) -{ - kern_return_t kr; - - _dispatch_debug("machport[0x%08x]: dead-name notification", name); - _dispatch_debug_machport(name); - _dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_DEAD, true); - - // the act of receiving a dead name notification allocates a dead-name - // right that must be deallocated - kr = mach_port_deallocate(mach_task_self(), name); - DISPATCH_VERIFY_MIG(kr); - //(void)dispatch_assume_zero(kr); - - return KERN_SUCCESS; -} - -kern_return_t -_dispatch_mach_notify_send_possible(mach_port_t notify DISPATCH_UNUSED, - mach_port_name_t name) -{ - _dispatch_debug("machport[0x%08x]: send-possible notification", name); - _dispatch_debug_machport(name); - _dispatch_mach_notify_merge(name, DISPATCH_MACH_SEND_POSSIBLE, false); - - return KERN_SUCCESS; -} - -#pragma mark - -#pragma mark dispatch_mach_t - -#define DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT 0x1 -#define DISPATCH_MACH_REGISTER_FOR_REPLY 0x2 -#define DISPATCH_MACH_WAIT_FOR_REPLY 0x4 -#define DISPATCH_MACH_OWNED_REPLY_PORT 0x8 -#define DISPATCH_MACH_OPTIONS_MASK 0xffff - -#define DM_SEND_STATUS_SUCCESS 0x1 -#define DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT 0x2 - -DISPATCH_ENUM(dispatch_mach_send_invoke_flags, uint32_t, - DM_SEND_INVOKE_NONE = 0x0, - DM_SEND_INVOKE_FLUSH = 0x1, - DM_SEND_INVOKE_NEEDS_BARRIER = 0x2, - DM_SEND_INVOKE_CANCEL = 0x4, - DM_SEND_INVOKE_CAN_RUN_BARRIER = 0x8, - DM_SEND_INVOKE_IMMEDIATE_SEND = 0x10, -); -#define DM_SEND_INVOKE_IMMEDIATE_SEND_MASK \ - ((dispatch_mach_send_invoke_flags_t)DM_SEND_INVOKE_IMMEDIATE_SEND) - -static inline pthread_priority_t _dispatch_mach_priority_propagate( - mach_msg_option_t options); -static mach_port_t _dispatch_mach_msg_get_remote_port(dispatch_object_t dou); -static mach_port_t _dispatch_mach_msg_get_reply_port(dispatch_object_t dou); -static void _dispatch_mach_msg_disconnected(dispatch_mach_t dm, - mach_port_t local_port, mach_port_t remote_port); -static inline void _dispatch_mach_msg_reply_received(dispatch_mach_t dm, - dispatch_mach_reply_refs_t dmr, mach_port_t local_port); -static dispatch_mach_msg_t _dispatch_mach_msg_create_reply_disconnected( - dispatch_object_t dou, dispatch_mach_reply_refs_t dmr); -static bool _dispatch_mach_reconnect_invoke(dispatch_mach_t dm, - dispatch_object_t dou); -static inline mach_msg_header_t* _dispatch_mach_msg_get_msg( - dispatch_mach_msg_t dmsg); -static void _dispatch_mach_send_push(dispatch_mach_t dm, dispatch_object_t dou, - pthread_priority_t pp); - -static dispatch_mach_t -_dispatch_mach_create(const char *label, dispatch_queue_t q, void *context, - dispatch_mach_handler_function_t handler, bool handler_is_block) -{ - dispatch_mach_t dm; - dispatch_mach_refs_t dr; - - dm = _dispatch_alloc(DISPATCH_VTABLE(mach), - sizeof(struct dispatch_mach_s)); - _dispatch_queue_init(dm->_as_dq, DQF_NONE, 1, true); - - dm->dq_label = label; - dm->do_ref_cnt++; // the reference _dispatch_mach_cancel_invoke holds - - dr = _dispatch_calloc(1ul, sizeof(struct dispatch_mach_refs_s)); - dr->dr_source_wref = _dispatch_ptr2wref(dm); - dr->dm_handler_func = handler; - dr->dm_handler_ctxt = context; - dm->ds_refs = dr; - dm->dm_handler_is_block = handler_is_block; - - dm->dm_refs = _dispatch_calloc(1ul, - sizeof(struct dispatch_mach_send_refs_s)); - dm->dm_refs->dr_source_wref = _dispatch_ptr2wref(dm); - dm->dm_refs->dm_disconnect_cnt = DISPATCH_MACH_NEVER_CONNECTED; - TAILQ_INIT(&dm->dm_refs->dm_replies); - - if (slowpath(!q)) { - q = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, true); - } else { - _dispatch_retain(q); - } - dm->do_targetq = q; - _dispatch_object_debug(dm, "%s", __func__); - return dm; -} - -dispatch_mach_t -dispatch_mach_create(const char *label, dispatch_queue_t q, - dispatch_mach_handler_t handler) -{ - dispatch_block_t bb = _dispatch_Block_copy((void*)handler); - return _dispatch_mach_create(label, q, bb, - (dispatch_mach_handler_function_t)_dispatch_Block_invoke(bb), true); -} - -dispatch_mach_t -dispatch_mach_create_f(const char *label, dispatch_queue_t q, void *context, - dispatch_mach_handler_function_t handler) -{ - return _dispatch_mach_create(label, q, context, handler, false); -} - -void -_dispatch_mach_dispose(dispatch_mach_t dm) -{ - _dispatch_object_debug(dm, "%s", __func__); - dispatch_mach_refs_t dr = dm->ds_refs; - if (dm->dm_handler_is_block && dr->dm_handler_ctxt) { - Block_release(dr->dm_handler_ctxt); - } - free(dr); - free(dm->dm_refs); - _dispatch_queue_destroy(dm->_as_dq); -} - -void -dispatch_mach_connect(dispatch_mach_t dm, mach_port_t receive, - mach_port_t send, dispatch_mach_msg_t checkin) -{ - dispatch_mach_send_refs_t dr = dm->dm_refs; - dispatch_kevent_t dk; - uint32_t disconnect_cnt; - dispatch_source_type_t type = &_dispatch_source_type_mach_recv_direct; - - dm->ds_is_direct_kevent = (bool)_dispatch_evfilt_machport_direct_enabled; - if (MACH_PORT_VALID(receive)) { - dk = _dispatch_calloc(1ul, sizeof(struct dispatch_kevent_s)); - dk->dk_kevent = type->ke; - dk->dk_kevent.ident = receive; - dk->dk_kevent.flags |= EV_ADD|EV_ENABLE|EV_VANISHED; - dk->dk_kevent.udata = (uintptr_t)dk; - TAILQ_INIT(&dk->dk_sources); - dm->ds_dkev = dk; - dm->ds_pending_data_mask = DISPATCH_MACH_RECV_MESSAGE_DIRECT; - dm->ds_needs_rearm = dm->ds_is_direct_kevent; - if (!dm->ds_is_direct_kevent) { - dk->dk_kevent.fflags = DISPATCH_MACH_RECV_MESSAGE_DIRECT; - dk->dk_kevent.flags &= ~(EV_UDATA_SPECIFIC|EV_VANISHED); - } - _dispatch_retain(dm); // the reference the manager queue holds - } - dr->dm_send = send; - if (MACH_PORT_VALID(send)) { - if (checkin) { - dispatch_retain(checkin); - checkin->dmsg_options = _dispatch_mach_checkin_options(); - dr->dm_checkin_port = _dispatch_mach_msg_get_remote_port(checkin); - } - dr->dm_checkin = checkin; - } - // monitor message reply ports - dm->ds_pending_data_mask |= DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE; - dispatch_assert(DISPATCH_MACH_NEVER_CONNECTED - 1 == - DISPATCH_MACH_NEVER_INSTALLED); - disconnect_cnt = os_atomic_dec2o(dr, dm_disconnect_cnt, release); - if (unlikely(disconnect_cnt != DISPATCH_MACH_NEVER_INSTALLED)) { - DISPATCH_CLIENT_CRASH(disconnect_cnt, "Channel already connected"); - } - _dispatch_object_debug(dm, "%s", __func__); - return dispatch_activate(dm); -} - -// assumes low bit of mach port names is always set -#define DISPATCH_MACH_REPLY_PORT_UNOWNED 0x1u - -static inline void -_dispatch_mach_reply_mark_reply_port_owned(dispatch_mach_reply_refs_t dmr) -{ - dmr->dmr_reply &= ~DISPATCH_MACH_REPLY_PORT_UNOWNED; -} - -static inline bool -_dispatch_mach_reply_is_reply_port_owned(dispatch_mach_reply_refs_t dmr) -{ - mach_port_t reply_port = dmr->dmr_reply; - return reply_port ? !(reply_port & DISPATCH_MACH_REPLY_PORT_UNOWNED) :false; -} - -static inline mach_port_t -_dispatch_mach_reply_get_reply_port(dispatch_mach_reply_refs_t dmr) -{ - mach_port_t reply_port = dmr->dmr_reply; - return reply_port ? (reply_port | DISPATCH_MACH_REPLY_PORT_UNOWNED) : 0; -} - -static inline bool -_dispatch_mach_reply_tryremove(dispatch_mach_t dm, - dispatch_mach_reply_refs_t dmr) -{ - bool removed; - _dispatch_unfair_lock_lock(&dm->dm_refs->dm_replies_lock); - if ((removed = _TAILQ_IS_ENQUEUED(dmr, dmr_list))) { - TAILQ_REMOVE(&dm->dm_refs->dm_replies, dmr, dmr_list); - _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list); - } - _dispatch_unfair_lock_unlock(&dm->dm_refs->dm_replies_lock); - return removed; -} - -DISPATCH_NOINLINE -static void -_dispatch_mach_reply_waiter_unregister(dispatch_mach_t dm, - dispatch_mach_reply_refs_t dmr, unsigned int options) -{ - dispatch_mach_msg_t dmsgr = NULL; - bool disconnected = (options & DKEV_UNREGISTER_DISCONNECTED); - if (options & DKEV_UNREGISTER_REPLY_REMOVE) { - _dispatch_unfair_lock_lock(&dm->dm_refs->dm_replies_lock); - if (unlikely(!_TAILQ_IS_ENQUEUED(dmr, dmr_list))) { - DISPATCH_INTERNAL_CRASH(0, "Could not find reply registration"); - } - TAILQ_REMOVE(&dm->dm_refs->dm_replies, dmr, dmr_list); - _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list); - _dispatch_unfair_lock_unlock(&dm->dm_refs->dm_replies_lock); - } - if (disconnected) { - dmsgr = _dispatch_mach_msg_create_reply_disconnected(NULL, dmr); - } else if (dmr->dmr_voucher) { - _voucher_release(dmr->dmr_voucher); - dmr->dmr_voucher = NULL; - } - _dispatch_debug("machport[0x%08x]: unregistering for sync reply%s, ctxt %p", - _dispatch_mach_reply_get_reply_port(dmr), - disconnected ? " (disconnected)" : "", dmr->dmr_ctxt); - if (dmsgr) { - return _dispatch_queue_push(dm->_as_dq, dmsgr, dmsgr->dmsg_priority); - } - dispatch_assert(!(options & DKEV_UNREGISTER_WAKEUP)); -} - -DISPATCH_NOINLINE -static void -_dispatch_mach_reply_kevent_unregister(dispatch_mach_t dm, - dispatch_mach_reply_refs_t dmr, unsigned int options) -{ - dispatch_mach_msg_t dmsgr = NULL; - bool replies_empty = false; - bool disconnected = (options & DKEV_UNREGISTER_DISCONNECTED); - if (options & DKEV_UNREGISTER_REPLY_REMOVE) { - _dispatch_unfair_lock_lock(&dm->dm_refs->dm_replies_lock); - if (unlikely(!_TAILQ_IS_ENQUEUED(dmr, dmr_list))) { - DISPATCH_INTERNAL_CRASH(0, "Could not find reply registration"); - } - TAILQ_REMOVE(&dm->dm_refs->dm_replies, dmr, dmr_list); - _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list); - replies_empty = TAILQ_EMPTY(&dm->dm_refs->dm_replies); - _dispatch_unfair_lock_unlock(&dm->dm_refs->dm_replies_lock); - } - if (disconnected) { - dmsgr = _dispatch_mach_msg_create_reply_disconnected(NULL, dmr); - } else if (dmr->dmr_voucher) { - _voucher_release(dmr->dmr_voucher); - dmr->dmr_voucher = NULL; - } - uint32_t flags = DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE; - dispatch_kevent_t dk = dmr->dmr_dkev; - _dispatch_debug("machport[0x%08x]: unregistering for reply%s, ctxt %p", - (mach_port_t)dk->dk_kevent.ident, - disconnected ? " (disconnected)" : "", dmr->dmr_ctxt); - if (!dm->ds_is_direct_kevent) { - dmr->dmr_dkev = NULL; - TAILQ_REMOVE(&dk->dk_sources, (dispatch_source_refs_t)dmr, dr_list); - _dispatch_kevent_unregister(dk, flags, 0); - } else { - long r = _dispatch_kevent_unregister(dk, flags, options); - if (r == EINPROGRESS) { - _dispatch_debug("machport[0x%08x]: deferred delete kevent[%p]", - (mach_port_t)dk->dk_kevent.ident, dk); - dispatch_assert(options == DKEV_UNREGISTER_DISCONNECTED); - // dmr must be put back so that the event delivery finds it, the - // replies lock is held by the caller. - TAILQ_INSERT_HEAD(&dm->dm_refs->dm_replies, dmr, dmr_list); - if (dmsgr) { - dmr->dmr_voucher = dmsgr->dmsg_voucher; - dmsgr->dmsg_voucher = NULL; - dispatch_release(dmsgr); - } - return; // deferred unregistration - } - dispatch_assume_zero(r); - dmr->dmr_dkev = NULL; - _TAILQ_TRASH_ENTRY(dmr, dr_list); - } - free(dmr); - if (dmsgr) { - return _dispatch_queue_push(dm->_as_dq, dmsgr, dmsgr->dmsg_priority); - } - if ((options & DKEV_UNREGISTER_WAKEUP) && replies_empty && - (dm->dm_refs->dm_disconnect_cnt || - (dm->dq_atomic_flags & DSF_CANCELED))) { - dx_wakeup(dm, 0, DISPATCH_WAKEUP_FLUSH); - } -} - -DISPATCH_NOINLINE -static void -_dispatch_mach_reply_waiter_register(dispatch_mach_t dm, - dispatch_mach_reply_refs_t dmr, mach_port_t reply_port, - dispatch_mach_msg_t dmsg, mach_msg_option_t msg_opts) -{ - dmr->dr_source_wref = _dispatch_ptr2wref(dm); - dmr->dmr_dkev = NULL; - dmr->dmr_reply = reply_port; - if (msg_opts & DISPATCH_MACH_OWNED_REPLY_PORT) { - _dispatch_mach_reply_mark_reply_port_owned(dmr); - } else { - if (dmsg->dmsg_voucher) { - dmr->dmr_voucher = _voucher_retain(dmsg->dmsg_voucher); - } - dmr->dmr_priority = (dispatch_priority_t)dmsg->dmsg_priority; - // make reply context visible to leaks rdar://11777199 - dmr->dmr_ctxt = dmsg->do_ctxt; - } - - _dispatch_debug("machport[0x%08x]: registering for sync reply, ctxt %p", - reply_port, dmsg->do_ctxt); - _dispatch_unfair_lock_lock(&dm->dm_refs->dm_replies_lock); - if (unlikely(_TAILQ_IS_ENQUEUED(dmr, dmr_list))) { - DISPATCH_INTERNAL_CRASH(dmr->dmr_list.tqe_prev, "Reply already registered"); - } - TAILQ_INSERT_TAIL(&dm->dm_refs->dm_replies, dmr, dmr_list); - _dispatch_unfair_lock_unlock(&dm->dm_refs->dm_replies_lock); -} - -DISPATCH_NOINLINE -static void -_dispatch_mach_reply_kevent_register(dispatch_mach_t dm, mach_port_t reply_port, - dispatch_mach_msg_t dmsg) -{ - dispatch_kevent_t dk; - dispatch_mach_reply_refs_t dmr; - dispatch_source_type_t type = &_dispatch_source_type_mach_recv_direct; - pthread_priority_t mp, pp; - - dk = _dispatch_calloc(1ul, sizeof(struct dispatch_kevent_s)); - dk->dk_kevent = type->ke; - dk->dk_kevent.ident = reply_port; - dk->dk_kevent.flags |= EV_ADD|EV_ENABLE|EV_ONESHOT; - dk->dk_kevent.udata = (uintptr_t)dk; - TAILQ_INIT(&dk->dk_sources); - if (!dm->ds_is_direct_kevent) { - dk->dk_kevent.fflags = DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE; - dk->dk_kevent.flags &= ~(EV_UDATA_SPECIFIC|EV_VANISHED); - } - - dmr = _dispatch_calloc(1ul, sizeof(struct dispatch_mach_reply_refs_s)); - dmr->dr_source_wref = _dispatch_ptr2wref(dm); - dmr->dmr_dkev = dk; - dmr->dmr_reply = reply_port; - if (dmsg->dmsg_voucher) { - dmr->dmr_voucher = _voucher_retain(dmsg->dmsg_voucher); - } - dmr->dmr_priority = (dispatch_priority_t)dmsg->dmsg_priority; - // make reply context visible to leaks rdar://11777199 - dmr->dmr_ctxt = dmsg->do_ctxt; - - pp = dm->dq_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK; - if (pp && dm->ds_is_direct_kevent) { - mp = dmsg->dmsg_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK; - if (pp < mp) pp = mp; - pp |= dm->dq_priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; - } else { - pp = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; - } - - _dispatch_debug("machport[0x%08x]: registering for reply, ctxt %p", - reply_port, dmsg->do_ctxt); - uint32_t flags; - bool do_resume = _dispatch_kevent_register(&dmr->dmr_dkev, pp, &flags); - TAILQ_INSERT_TAIL(&dmr->dmr_dkev->dk_sources, (dispatch_source_refs_t)dmr, - dr_list); - _dispatch_unfair_lock_lock(&dm->dm_refs->dm_replies_lock); - if (unlikely(_TAILQ_IS_ENQUEUED(dmr, dmr_list))) { - DISPATCH_INTERNAL_CRASH(dmr->dmr_list.tqe_prev, "Reply already registered"); - } - TAILQ_INSERT_TAIL(&dm->dm_refs->dm_replies, dmr, dmr_list); - _dispatch_unfair_lock_unlock(&dm->dm_refs->dm_replies_lock); - if (do_resume && _dispatch_kevent_resume(dmr->dmr_dkev, flags, 0)) { - return _dispatch_mach_reply_kevent_unregister(dm, dmr, - DKEV_UNREGISTER_DISCONNECTED|DKEV_UNREGISTER_REPLY_REMOVE); - } -} - -DISPATCH_NOINLINE -static void -_dispatch_mach_notification_kevent_unregister(dispatch_mach_t dm) -{ - DISPATCH_ASSERT_ON_MANAGER_QUEUE(); - dispatch_kevent_t dk = dm->dm_dkev; - dm->dm_dkev = NULL; - TAILQ_REMOVE(&dk->dk_sources, (dispatch_source_refs_t)dm->dm_refs, - dr_list); - dm->ds_pending_data_mask &= ~(unsigned long) - (DISPATCH_MACH_SEND_POSSIBLE|DISPATCH_MACH_SEND_DEAD); - _dispatch_kevent_unregister(dk, - DISPATCH_MACH_SEND_POSSIBLE|DISPATCH_MACH_SEND_DEAD, 0); -} - -DISPATCH_NOINLINE -static void -_dispatch_mach_notification_kevent_register(dispatch_mach_t dm,mach_port_t send) -{ - DISPATCH_ASSERT_ON_MANAGER_QUEUE(); - dispatch_kevent_t dk; - - dk = _dispatch_calloc(1ul, sizeof(struct dispatch_kevent_s)); - dk->dk_kevent = _dispatch_source_type_mach_send.ke; - dk->dk_kevent.ident = send; - dk->dk_kevent.flags |= EV_ADD|EV_ENABLE; - dk->dk_kevent.fflags = DISPATCH_MACH_SEND_POSSIBLE|DISPATCH_MACH_SEND_DEAD; - dk->dk_kevent.udata = (uintptr_t)dk; - TAILQ_INIT(&dk->dk_sources); - - dm->ds_pending_data_mask |= dk->dk_kevent.fflags; - - uint32_t flags; - bool do_resume = _dispatch_kevent_register(&dk, - _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG, &flags); - TAILQ_INSERT_TAIL(&dk->dk_sources, - (dispatch_source_refs_t)dm->dm_refs, dr_list); - dm->dm_dkev = dk; - if (do_resume && _dispatch_kevent_resume(dm->dm_dkev, flags, 0)) { - _dispatch_mach_notification_kevent_unregister(dm); - } -} - -static mach_port_t -_dispatch_get_thread_reply_port(void) -{ - mach_port_t reply_port, mrp = _dispatch_get_thread_mig_reply_port(); - if (mrp) { - reply_port = mrp; - _dispatch_debug("machport[0x%08x]: borrowed thread sync reply port", - reply_port); - } else { - reply_port = mach_reply_port(); - _dispatch_set_thread_mig_reply_port(reply_port); - _dispatch_debug("machport[0x%08x]: allocated thread sync reply port", - reply_port); - } - _dispatch_debug_machport(reply_port); - return reply_port; -} - -static void -_dispatch_clear_thread_reply_port(mach_port_t reply_port) -{ - mach_port_t mrp = _dispatch_get_thread_mig_reply_port(); - if (reply_port != mrp) { - if (mrp) { - _dispatch_debug("machport[0x%08x]: did not clear thread sync reply " - "port (found 0x%08x)", reply_port, mrp); - } - return; - } - _dispatch_set_thread_mig_reply_port(MACH_PORT_NULL); - _dispatch_debug_machport(reply_port); - _dispatch_debug("machport[0x%08x]: cleared thread sync reply port", - reply_port); -} - -static void -_dispatch_set_thread_reply_port(mach_port_t reply_port) -{ - _dispatch_debug_machport(reply_port); - mach_port_t mrp = _dispatch_get_thread_mig_reply_port(); - if (mrp) { - kern_return_t kr = mach_port_mod_refs(mach_task_self(), reply_port, - MACH_PORT_RIGHT_RECEIVE, -1); - DISPATCH_VERIFY_MIG(kr); - dispatch_assume_zero(kr); - _dispatch_debug("machport[0x%08x]: deallocated sync reply port " - "(found 0x%08x)", reply_port, mrp); - } else { - _dispatch_set_thread_mig_reply_port(reply_port); - _dispatch_debug("machport[0x%08x]: restored thread sync reply port", - reply_port); - } -} - -static inline mach_port_t -_dispatch_mach_msg_get_remote_port(dispatch_object_t dou) -{ - mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dou._dmsg); - mach_port_t remote = hdr->msgh_remote_port; - return remote; -} - -static inline mach_port_t -_dispatch_mach_msg_get_reply_port(dispatch_object_t dou) -{ - mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dou._dmsg); - mach_port_t local = hdr->msgh_local_port; - if (!MACH_PORT_VALID(local) || MACH_MSGH_BITS_LOCAL(hdr->msgh_bits) != - MACH_MSG_TYPE_MAKE_SEND_ONCE) return MACH_PORT_NULL; - return local; -} - -static inline void -_dispatch_mach_msg_set_reason(dispatch_mach_msg_t dmsg, mach_error_t err, - unsigned long reason) -{ - dispatch_assert_zero(reason & ~(unsigned long)code_emask); - dmsg->dmsg_error = ((err || !reason) ? err : - err_local|err_sub(0x3e0)|(mach_error_t)reason); -} - -static inline unsigned long -_dispatch_mach_msg_get_reason(dispatch_mach_msg_t dmsg, mach_error_t *err_ptr) -{ - mach_error_t err = dmsg->dmsg_error; - - dmsg->dmsg_error = 0; - if ((err & system_emask) == err_local && err_get_sub(err) == 0x3e0) { - *err_ptr = 0; - return err_get_code(err); - } - *err_ptr = err; - return err ? DISPATCH_MACH_MESSAGE_SEND_FAILED : DISPATCH_MACH_MESSAGE_SENT; -} - -static void -_dispatch_mach_msg_recv(dispatch_mach_t dm, dispatch_mach_reply_refs_t dmr, - _dispatch_kevent_qos_s *ke, mach_msg_header_t *hdr, mach_msg_size_t siz) -{ - _dispatch_debug_machport(hdr->msgh_remote_port); - _dispatch_debug("machport[0x%08x]: received msg id 0x%x, reply on 0x%08x", - hdr->msgh_local_port, hdr->msgh_id, hdr->msgh_remote_port); - bool canceled = (dm->dq_atomic_flags & DSF_CANCELED); - if (!dmr && canceled) { - // message received after cancellation, _dispatch_mach_kevent_merge is - // responsible for mach channel source state (e.g. deferred deletion) - return _dispatch_kevent_mach_msg_destroy(ke, hdr); - } - dispatch_mach_msg_t dmsg; - voucher_t voucher; - pthread_priority_t priority; - void *ctxt = NULL; - if (dmr) { - _voucher_mach_msg_clear(hdr, false); // deallocate reply message voucher - voucher = dmr->dmr_voucher; - dmr->dmr_voucher = NULL; // transfer reference - priority = dmr->dmr_priority; - ctxt = dmr->dmr_ctxt; - unsigned int options = DKEV_DISPOSE_IMMEDIATE_DELETE; - options |= DKEV_UNREGISTER_REPLY_REMOVE; - options |= DKEV_UNREGISTER_WAKEUP; - if (canceled) options |= DKEV_UNREGISTER_DISCONNECTED; - _dispatch_mach_reply_kevent_unregister(dm, dmr, options); - ke->flags |= EV_DELETE; // remember that unregister deleted the event - if (canceled) return; - } else { - voucher = voucher_create_with_mach_msg(hdr); - priority = _voucher_get_priority(voucher); - } - dispatch_mach_msg_destructor_t destructor; - destructor = (hdr == _dispatch_kevent_mach_msg_buf(ke)) ? - DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT : - DISPATCH_MACH_MSG_DESTRUCTOR_FREE; - dmsg = dispatch_mach_msg_create(hdr, siz, destructor, NULL); - if (hdr == _dispatch_kevent_mach_msg_buf(ke)) { - _dispatch_ktrace2(DISPATCH_MACH_MSG_hdr_move, (uint64_t)hdr, (uint64_t)dmsg->dmsg_buf); - } - dmsg->dmsg_voucher = voucher; - dmsg->dmsg_priority = priority; - dmsg->do_ctxt = ctxt; - _dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_MESSAGE_RECEIVED); - _dispatch_voucher_debug("mach-msg[%p] create", voucher, dmsg); - _dispatch_voucher_ktrace_dmsg_push(dmsg); - return _dispatch_queue_push(dm->_as_dq, dmsg, dmsg->dmsg_priority); -} - -DISPATCH_ALWAYS_INLINE -static inline dispatch_mach_msg_t -_dispatch_mach_msg_reply_recv(dispatch_mach_t dm, - dispatch_mach_reply_refs_t dmr, mach_port_t reply_port) -{ - if (slowpath(!MACH_PORT_VALID(reply_port))) { - DISPATCH_CLIENT_CRASH(reply_port, "Invalid reply port"); - } - void *ctxt = dmr->dmr_ctxt; - mach_msg_header_t *hdr, *hdr2 = NULL; - void *hdr_copyout_addr; - mach_msg_size_t siz, msgsiz = 0; - mach_msg_return_t kr; - mach_msg_option_t options; - siz = mach_vm_round_page(_dispatch_mach_recv_msg_size + - dispatch_mach_trailer_size); - hdr = alloca(siz); - for (mach_vm_address_t p = mach_vm_trunc_page(hdr + vm_page_size); - p < (mach_vm_address_t)hdr + siz; p += vm_page_size) { - *(char*)p = 0; // ensure alloca buffer doesn't overlap with stack guard - } - options = DISPATCH_MACH_RCV_OPTIONS & (~MACH_RCV_VOUCHER); -retry: - _dispatch_debug_machport(reply_port); - _dispatch_debug("machport[0x%08x]: MACH_RCV_MSG %s", reply_port, - (options & MACH_RCV_TIMEOUT) ? "poll" : "wait"); - kr = mach_msg(hdr, options, 0, siz, reply_port, MACH_MSG_TIMEOUT_NONE, - MACH_PORT_NULL); - hdr_copyout_addr = hdr; - _dispatch_debug_machport(reply_port); - _dispatch_debug("machport[0x%08x]: MACH_RCV_MSG (size %u, opts 0x%x) " - "returned: %s - 0x%x", reply_port, siz, options, - mach_error_string(kr), kr); - switch (kr) { - case MACH_RCV_TOO_LARGE: - if (!fastpath(hdr->msgh_size <= UINT_MAX - - dispatch_mach_trailer_size)) { - DISPATCH_CLIENT_CRASH(hdr->msgh_size, "Overlarge message"); - } - if (options & MACH_RCV_LARGE) { - msgsiz = hdr->msgh_size + dispatch_mach_trailer_size; - hdr2 = malloc(msgsiz); - if (dispatch_assume(hdr2)) { - hdr = hdr2; - siz = msgsiz; - } - options |= MACH_RCV_TIMEOUT; - options &= ~MACH_RCV_LARGE; - goto retry; - } - _dispatch_log("BUG in libdispatch client: " - "dispatch_mach_send_and_wait_for_reply: dropped message too " - "large to fit in memory: id = 0x%x, size = %u", hdr->msgh_id, - hdr->msgh_size); - break; - case MACH_RCV_INVALID_NAME: // rdar://problem/21963848 - case MACH_RCV_PORT_CHANGED: // rdar://problem/21885327 - case MACH_RCV_PORT_DIED: - // channel was disconnected/canceled and reply port destroyed - _dispatch_debug("machport[0x%08x]: sync reply port destroyed, ctxt %p: " - "%s - 0x%x", reply_port, ctxt, mach_error_string(kr), kr); - goto out; - case MACH_MSG_SUCCESS: - if (hdr->msgh_remote_port) { - _dispatch_debug_machport(hdr->msgh_remote_port); - } - _dispatch_debug("machport[0x%08x]: received msg id 0x%x, size = %u, " - "reply on 0x%08x", hdr->msgh_local_port, hdr->msgh_id, - hdr->msgh_size, hdr->msgh_remote_port); - siz = hdr->msgh_size + dispatch_mach_trailer_size; - if (hdr2 && siz < msgsiz) { - void *shrink = realloc(hdr2, msgsiz); - if (shrink) hdr = hdr2 = shrink; - } - break; - default: - dispatch_assume_zero(kr); - break; - } - _dispatch_mach_msg_reply_received(dm, dmr, hdr->msgh_local_port); - hdr->msgh_local_port = MACH_PORT_NULL; - if (slowpath((dm->dq_atomic_flags & DSF_CANCELED) || kr)) { - if (!kr) mach_msg_destroy(hdr); - goto out; - } - dispatch_mach_msg_t dmsg; - dispatch_mach_msg_destructor_t destructor = (!hdr2) ? - DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT : - DISPATCH_MACH_MSG_DESTRUCTOR_FREE; - dmsg = dispatch_mach_msg_create(hdr, siz, destructor, NULL); - if (!hdr2 || hdr != hdr_copyout_addr) { - _dispatch_ktrace2(DISPATCH_MACH_MSG_hdr_move, (uint64_t)hdr_copyout_addr, (uint64_t)_dispatch_mach_msg_get_msg(dmsg)); - } - dmsg->do_ctxt = ctxt; - return dmsg; -out: - free(hdr2); - return NULL; -} - -static inline void -_dispatch_mach_msg_reply_received(dispatch_mach_t dm, - dispatch_mach_reply_refs_t dmr, mach_port_t local_port) -{ - bool removed = _dispatch_mach_reply_tryremove(dm, dmr); - if (!MACH_PORT_VALID(local_port) || !removed) { - // port moved/destroyed during receive, or reply waiter was never - // registered or already removed (disconnected) - return; - } - mach_port_t reply_port = _dispatch_mach_reply_get_reply_port(dmr); - _dispatch_debug("machport[0x%08x]: unregistered for sync reply, ctxt %p", - reply_port, dmr->dmr_ctxt); - if (_dispatch_mach_reply_is_reply_port_owned(dmr)) { - _dispatch_set_thread_reply_port(reply_port); - if (local_port != reply_port) { - DISPATCH_CLIENT_CRASH(local_port, - "Reply received on unexpected port"); - } - return; - } - mach_msg_header_t *hdr; - dispatch_mach_msg_t dmsg; - dmsg = dispatch_mach_msg_create(NULL, sizeof(mach_msg_header_t), - DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT, &hdr); - hdr->msgh_local_port = local_port; - dmsg->dmsg_voucher = dmr->dmr_voucher; - dmr->dmr_voucher = NULL; // transfer reference - dmsg->dmsg_priority = dmr->dmr_priority; - dmsg->do_ctxt = dmr->dmr_ctxt; - _dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_REPLY_RECEIVED); - return _dispatch_queue_push(dm->_as_dq, dmsg, dmsg->dmsg_priority); -} - -static inline void -_dispatch_mach_msg_disconnected(dispatch_mach_t dm, mach_port_t local_port, - mach_port_t remote_port) -{ - mach_msg_header_t *hdr; - dispatch_mach_msg_t dmsg; - dmsg = dispatch_mach_msg_create(NULL, sizeof(mach_msg_header_t), - DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT, &hdr); - if (local_port) hdr->msgh_local_port = local_port; - if (remote_port) hdr->msgh_remote_port = remote_port; - _dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_DISCONNECTED); - _dispatch_debug("machport[0x%08x]: %s right disconnected", local_port ? - local_port : remote_port, local_port ? "receive" : "send"); - return _dispatch_queue_push(dm->_as_dq, dmsg, dmsg->dmsg_priority); -} - -static inline dispatch_mach_msg_t -_dispatch_mach_msg_create_reply_disconnected(dispatch_object_t dou, - dispatch_mach_reply_refs_t dmr) -{ - dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr; - mach_port_t reply_port = dmsg ? dmsg->dmsg_reply : - _dispatch_mach_reply_get_reply_port(dmr); - voucher_t v; - - if (!reply_port) { - if (!dmsg) { - v = dmr->dmr_voucher; - dmr->dmr_voucher = NULL; // transfer reference - if (v) _voucher_release(v); - } - return NULL; - } - - if (dmsg) { - v = dmsg->dmsg_voucher; - if (v) _voucher_retain(v); - } else { - v = dmr->dmr_voucher; - dmr->dmr_voucher = NULL; // transfer reference - } - - if ((dmsg && (dmsg->dmsg_options & DISPATCH_MACH_WAIT_FOR_REPLY) && - (dmsg->dmsg_options & DISPATCH_MACH_OWNED_REPLY_PORT)) || - (dmr && !dmr->dmr_dkev && - _dispatch_mach_reply_is_reply_port_owned(dmr))) { - if (v) _voucher_release(v); - // deallocate owned reply port to break _dispatch_mach_msg_reply_recv - // out of waiting in mach_msg(MACH_RCV_MSG) - kern_return_t kr = mach_port_mod_refs(mach_task_self(), reply_port, - MACH_PORT_RIGHT_RECEIVE, -1); - DISPATCH_VERIFY_MIG(kr); - dispatch_assume_zero(kr); - return NULL; - } - - mach_msg_header_t *hdr; - dmsgr = dispatch_mach_msg_create(NULL, sizeof(mach_msg_header_t), - DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT, &hdr); - dmsgr->dmsg_voucher = v; - hdr->msgh_local_port = reply_port; - if (dmsg) { - dmsgr->dmsg_priority = dmsg->dmsg_priority; - dmsgr->do_ctxt = dmsg->do_ctxt; - } else { - dmsgr->dmsg_priority = dmr->dmr_priority; - dmsgr->do_ctxt = dmr->dmr_ctxt; - } - _dispatch_mach_msg_set_reason(dmsgr, 0, DISPATCH_MACH_DISCONNECTED); - _dispatch_debug("machport[0x%08x]: reply disconnected, ctxt %p", - hdr->msgh_local_port, dmsgr->do_ctxt); - return dmsgr; -} - -DISPATCH_NOINLINE -static void -_dispatch_mach_msg_not_sent(dispatch_mach_t dm, dispatch_object_t dou) -{ - dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr; - mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg); - mach_msg_option_t msg_opts = dmsg->dmsg_options; - _dispatch_debug("machport[0x%08x]: not sent msg id 0x%x, ctxt %p, " - "msg_opts 0x%x, kvoucher 0x%08x, reply on 0x%08x", - msg->msgh_remote_port, msg->msgh_id, dmsg->do_ctxt, - msg_opts, msg->msgh_voucher_port, dmsg->dmsg_reply); - unsigned long reason = (msg_opts & DISPATCH_MACH_REGISTER_FOR_REPLY) ? - 0 : DISPATCH_MACH_MESSAGE_NOT_SENT; - dmsgr = _dispatch_mach_msg_create_reply_disconnected(dmsg, NULL); - _dispatch_mach_msg_set_reason(dmsg, 0, reason); - _dispatch_queue_push(dm->_as_dq, dmsg, dmsg->dmsg_priority); - if (dmsgr) _dispatch_queue_push(dm->_as_dq, dmsgr, dmsgr->dmsg_priority); -} - -DISPATCH_NOINLINE -static uint32_t -_dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, - dispatch_mach_reply_refs_t dmr, pthread_priority_t pp, - dispatch_mach_send_invoke_flags_t send_flags) -{ - dispatch_mach_send_refs_t dr = dm->dm_refs; - dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr = NULL; - voucher_t voucher = dmsg->dmsg_voucher; - mach_voucher_t ipc_kvoucher = MACH_VOUCHER_NULL; - uint32_t send_status = 0; - bool clear_voucher = false, kvoucher_move_send = false; - mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg); - bool is_reply = (MACH_MSGH_BITS_REMOTE(msg->msgh_bits) == - MACH_MSG_TYPE_MOVE_SEND_ONCE); - mach_port_t reply_port = dmsg->dmsg_reply; - if (!is_reply) { - dr->dm_needs_mgr = 0; - if (unlikely(dr->dm_checkin && dmsg != dr->dm_checkin)) { - // send initial checkin message - if (dm->dm_dkev && slowpath(_dispatch_queue_get_current() != - &_dispatch_mgr_q)) { - // send kevent must be uninstalled on the manager queue - dr->dm_needs_mgr = 1; - goto out; - } - if (unlikely(!_dispatch_mach_msg_send(dm, - dr->dm_checkin, NULL, pp, DM_SEND_INVOKE_NONE))) { - goto out; - } - dr->dm_checkin = NULL; - } - } - mach_msg_return_t kr = 0; - mach_msg_option_t opts = 0, msg_opts = dmsg->dmsg_options; - if (!(msg_opts & DISPATCH_MACH_REGISTER_FOR_REPLY)) { - mach_msg_priority_t msg_priority = MACH_MSG_PRIORITY_UNSPECIFIED; - opts = MACH_SEND_MSG | (msg_opts & ~DISPATCH_MACH_OPTIONS_MASK); - if (!is_reply) { - if (dmsg != dr->dm_checkin) { - msg->msgh_remote_port = dr->dm_send; - } - if (_dispatch_queue_get_current() == &_dispatch_mgr_q) { - if (slowpath(!dm->dm_dkev)) { - _dispatch_mach_notification_kevent_register(dm, - msg->msgh_remote_port); - } - if (fastpath(dm->dm_dkev)) { - if (DISPATCH_MACH_NOTIFICATION_ARMED(dm->dm_dkev)) { - goto out; - } - opts |= MACH_SEND_NOTIFY; - } - } - opts |= MACH_SEND_TIMEOUT; - if (dmsg->dmsg_priority != _voucher_get_priority(voucher)) { - ipc_kvoucher = _voucher_create_mach_voucher_with_priority( - voucher, dmsg->dmsg_priority); - } - _dispatch_voucher_debug("mach-msg[%p] msg_set", voucher, dmsg); - if (ipc_kvoucher) { - kvoucher_move_send = true; - clear_voucher = _voucher_mach_msg_set_mach_voucher(msg, - ipc_kvoucher, kvoucher_move_send); - } else { - clear_voucher = _voucher_mach_msg_set(msg, voucher); - } - if (pp && _dispatch_evfilt_machport_direct_enabled) { - opts |= MACH_SEND_OVERRIDE; - msg_priority = (mach_msg_priority_t)pp; - } - } - _dispatch_debug_machport(msg->msgh_remote_port); - if (reply_port) _dispatch_debug_machport(reply_port); - if (msg_opts & DISPATCH_MACH_WAIT_FOR_REPLY) { - if (msg_opts & DISPATCH_MACH_OWNED_REPLY_PORT) { - _dispatch_clear_thread_reply_port(reply_port); - } - _dispatch_mach_reply_waiter_register(dm, dmr, reply_port, dmsg, - msg_opts); - } - kr = mach_msg(msg, opts, msg->msgh_size, 0, MACH_PORT_NULL, 0, - msg_priority); - _dispatch_debug("machport[0x%08x]: sent msg id 0x%x, ctxt %p, " - "opts 0x%x, msg_opts 0x%x, kvoucher 0x%08x, reply on 0x%08x: " - "%s - 0x%x", msg->msgh_remote_port, msg->msgh_id, dmsg->do_ctxt, - opts, msg_opts, msg->msgh_voucher_port, reply_port, - mach_error_string(kr), kr); - if (unlikely(kr && (msg_opts & DISPATCH_MACH_WAIT_FOR_REPLY))) { - _dispatch_mach_reply_waiter_unregister(dm, dmr, - DKEV_UNREGISTER_REPLY_REMOVE); - } - if (clear_voucher) { - if (kr == MACH_SEND_INVALID_VOUCHER && msg->msgh_voucher_port) { - DISPATCH_CLIENT_CRASH(kr, "Voucher port corruption"); - } - mach_voucher_t kv; - kv = _voucher_mach_msg_clear(msg, kvoucher_move_send); - if (kvoucher_move_send) ipc_kvoucher = kv; - } - } - if (kr == MACH_SEND_TIMED_OUT && (opts & MACH_SEND_TIMEOUT)) { - if (opts & MACH_SEND_NOTIFY) { - _dispatch_debug("machport[0x%08x]: send-possible notification " - "armed", (mach_port_t)dm->dm_dkev->dk_kevent.ident); - DISPATCH_MACH_NOTIFICATION_ARMED(dm->dm_dkev) = 1; - } else { - // send kevent must be installed on the manager queue - dr->dm_needs_mgr = 1; - } - if (ipc_kvoucher) { - _dispatch_kvoucher_debug("reuse on re-send", ipc_kvoucher); - voucher_t ipc_voucher; - ipc_voucher = _voucher_create_with_priority_and_mach_voucher( - voucher, dmsg->dmsg_priority, ipc_kvoucher); - _dispatch_voucher_debug("mach-msg[%p] replace voucher[%p]", - ipc_voucher, dmsg, voucher); - if (dmsg->dmsg_voucher) _voucher_release(dmsg->dmsg_voucher); - dmsg->dmsg_voucher = ipc_voucher; - } - goto out; - } else if (ipc_kvoucher && (kr || !kvoucher_move_send)) { - _voucher_dealloc_mach_voucher(ipc_kvoucher); - } - if (!(msg_opts & DISPATCH_MACH_WAIT_FOR_REPLY) && !kr && reply_port && - !(dm->ds_dkev && dm->ds_dkev->dk_kevent.ident == reply_port)) { - if (!dm->ds_is_direct_kevent && - _dispatch_queue_get_current() != &_dispatch_mgr_q) { - // reply receive kevent must be installed on the manager queue - dr->dm_needs_mgr = 1; - dmsg->dmsg_options = msg_opts | DISPATCH_MACH_REGISTER_FOR_REPLY; - goto out; - } - _dispatch_mach_reply_kevent_register(dm, reply_port, dmsg); - } - if (unlikely(!is_reply && dmsg == dr->dm_checkin && dm->dm_dkev)) { - _dispatch_mach_notification_kevent_unregister(dm); - } - if (slowpath(kr)) { - // Send failed, so reply was never registered - dmsgr = _dispatch_mach_msg_create_reply_disconnected(dmsg, NULL); - } - _dispatch_mach_msg_set_reason(dmsg, kr, 0); - if ((send_flags & DM_SEND_INVOKE_IMMEDIATE_SEND) && - (msg_opts & DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT)) { - // Return sent message synchronously - send_status |= DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT; - } else { - _dispatch_queue_push(dm->_as_dq, dmsg, dmsg->dmsg_priority); - } - if (dmsgr) _dispatch_queue_push(dm->_as_dq, dmsgr, dmsgr->dmsg_priority); - send_status |= DM_SEND_STATUS_SUCCESS; -out: - return send_status; -} - -#pragma mark - -#pragma mark dispatch_mach_send_refs_t - -static void _dispatch_mach_cancel(dispatch_mach_t dm); -static void _dispatch_mach_send_barrier_drain_push(dispatch_mach_t dm, - pthread_priority_t pp); - -DISPATCH_ALWAYS_INLINE -static inline pthread_priority_t -_dm_state_get_override(uint64_t dm_state) -{ - dm_state &= DISPATCH_MACH_STATE_OVERRIDE_MASK; - return (pthread_priority_t)(dm_state >> 32); -} - -DISPATCH_ALWAYS_INLINE -static inline uint64_t -_dm_state_override_from_priority(pthread_priority_t pp) -{ - uint64_t pp_state = pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK; - return pp_state << 32; -} - -DISPATCH_ALWAYS_INLINE -static inline bool -_dm_state_needs_override(uint64_t dm_state, uint64_t pp_state) -{ - return (pp_state > (dm_state & DISPATCH_MACH_STATE_OVERRIDE_MASK)); -} - -DISPATCH_ALWAYS_INLINE -static inline uint64_t -_dm_state_merge_override(uint64_t dm_state, uint64_t pp_state) -{ - if (_dm_state_needs_override(dm_state, pp_state)) { - dm_state &= ~DISPATCH_MACH_STATE_OVERRIDE_MASK; - dm_state |= pp_state; - dm_state |= DISPATCH_MACH_STATE_DIRTY; - dm_state |= DISPATCH_MACH_STATE_RECEIVED_OVERRIDE; - } - return dm_state; -} - -#define _dispatch_mach_send_push_update_tail(dr, tail) \ - os_mpsc_push_update_tail(dr, dm, tail, do_next) -#define _dispatch_mach_send_push_update_head(dr, head) \ - os_mpsc_push_update_head(dr, dm, head) -#define _dispatch_mach_send_get_head(dr) \ - os_mpsc_get_head(dr, dm) -#define _dispatch_mach_send_unpop_head(dr, dc, dc_next) \ - os_mpsc_undo_pop_head(dr, dm, dc, dc_next, do_next) -#define _dispatch_mach_send_pop_head(dr, head) \ - os_mpsc_pop_head(dr, dm, head, do_next) - -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_mach_send_push_inline(dispatch_mach_send_refs_t dr, - dispatch_object_t dou) -{ - if (_dispatch_mach_send_push_update_tail(dr, dou._do)) { - _dispatch_mach_send_push_update_head(dr, dou._do); - return true; - } - return false; -} - -DISPATCH_NOINLINE -static bool -_dispatch_mach_send_drain(dispatch_mach_t dm, dispatch_invoke_flags_t flags, - dispatch_mach_send_invoke_flags_t send_flags) -{ - dispatch_mach_send_refs_t dr = dm->dm_refs; - dispatch_mach_reply_refs_t dmr; - dispatch_mach_msg_t dmsg; - struct dispatch_object_s *dc = NULL, *next_dc = NULL; - pthread_priority_t pp = _dm_state_get_override(dr->dm_state); - uint64_t old_state, new_state; - uint32_t send_status; - bool needs_mgr, disconnecting, returning_send_result = false; - -again: - needs_mgr = false; disconnecting = false; - while (dr->dm_tail) { - dc = _dispatch_mach_send_get_head(dr); - do { - dispatch_mach_send_invoke_flags_t sf = send_flags; - // Only request immediate send result for the first message - send_flags &= ~DM_SEND_INVOKE_IMMEDIATE_SEND_MASK; - next_dc = _dispatch_mach_send_pop_head(dr, dc); - if (_dispatch_object_has_type(dc, - DISPATCH_CONTINUATION_TYPE(MACH_SEND_BARRIER))) { - if (!(send_flags & DM_SEND_INVOKE_CAN_RUN_BARRIER)) { - goto partial_drain; - } - _dispatch_continuation_pop(dc, dm->_as_dq, flags); - continue; - } - if (_dispatch_object_is_slow_item(dc)) { - dmsg = ((dispatch_continuation_t)dc)->dc_data; - dmr = ((dispatch_continuation_t)dc)->dc_other; - } else if (_dispatch_object_has_vtable(dc)) { - dmsg = (dispatch_mach_msg_t)dc; - dmr = NULL; - } else { - if ((dm->dm_dkev || !dm->ds_is_direct_kevent) && - (_dispatch_queue_get_current() != &_dispatch_mgr_q)) { - // send kevent must be uninstalled on the manager queue - needs_mgr = true; - goto partial_drain; - } - if (unlikely(!_dispatch_mach_reconnect_invoke(dm, dc))) { - disconnecting = true; - goto partial_drain; - } - continue; - } - _dispatch_voucher_ktrace_dmsg_pop(dmsg); - if (unlikely(dr->dm_disconnect_cnt || - (dm->dq_atomic_flags & DSF_CANCELED))) { - _dispatch_mach_msg_not_sent(dm, dmsg); - continue; - } - send_status = _dispatch_mach_msg_send(dm, dmsg, dmr, pp, sf); - if (unlikely(!send_status)) { - goto partial_drain; - } - if (send_status & DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT) { - returning_send_result = true; - } - } while ((dc = next_dc)); - } - - os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, release, { - if (old_state & DISPATCH_MACH_STATE_DIRTY) { - new_state = old_state; - new_state &= ~DISPATCH_MACH_STATE_DIRTY; - new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE; - new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER; - } else { - // unlock - new_state = 0; - } - }); - goto out; - -partial_drain: - // if this is not a complete drain, we must undo some things - _dispatch_mach_send_unpop_head(dr, dc, next_dc); - - if (_dispatch_object_has_type(dc, - DISPATCH_CONTINUATION_TYPE(MACH_SEND_BARRIER))) { - os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, release, { - new_state = old_state; - new_state |= DISPATCH_MACH_STATE_DIRTY; - new_state |= DISPATCH_MACH_STATE_PENDING_BARRIER; - new_state &= ~DISPATCH_MACH_STATE_UNLOCK_MASK; - }); - } else { - os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, release, { - new_state = old_state; - if (old_state & (DISPATCH_MACH_STATE_DIRTY | - DISPATCH_MACH_STATE_RECEIVED_OVERRIDE)) { - new_state &= ~DISPATCH_MACH_STATE_DIRTY; - new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE; - new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER; - } else { - new_state |= DISPATCH_MACH_STATE_DIRTY; - new_state &= ~DISPATCH_MACH_STATE_UNLOCK_MASK; - } - }); - } - -out: - if (old_state & DISPATCH_MACH_STATE_RECEIVED_OVERRIDE) { - // Ensure that the root queue sees that this thread was overridden. - _dispatch_set_defaultpriority_override(); - } - - if (unlikely(new_state & DISPATCH_MACH_STATE_UNLOCK_MASK)) { - os_atomic_thread_fence(acquire); - pp = _dm_state_get_override(new_state); - goto again; - } - - if (new_state & DISPATCH_MACH_STATE_PENDING_BARRIER) { - pp = _dm_state_get_override(new_state); - _dispatch_mach_send_barrier_drain_push(dm, pp); - } else { - if (needs_mgr) { - pp = _dm_state_get_override(new_state); - } else { - pp = 0; - } - if (!disconnecting) dx_wakeup(dm, pp, DISPATCH_WAKEUP_FLUSH); - } - return returning_send_result; -} - -DISPATCH_NOINLINE -static void -_dispatch_mach_send_invoke(dispatch_mach_t dm, - dispatch_invoke_flags_t flags, - dispatch_mach_send_invoke_flags_t send_flags) -{ - dispatch_lock_owner tid_self = _dispatch_tid_self(); - uint64_t old_state, new_state; - pthread_priority_t pp_floor; - - uint64_t canlock_mask = DISPATCH_MACH_STATE_UNLOCK_MASK; - uint64_t canlock_state = 0; - - if (send_flags & DM_SEND_INVOKE_NEEDS_BARRIER) { - canlock_mask |= DISPATCH_MACH_STATE_PENDING_BARRIER; - canlock_state = DISPATCH_MACH_STATE_PENDING_BARRIER; - } else if (!(send_flags & DM_SEND_INVOKE_CAN_RUN_BARRIER)) { - canlock_mask |= DISPATCH_MACH_STATE_PENDING_BARRIER; - } - - if (flags & DISPATCH_INVOKE_MANAGER_DRAIN) { - pp_floor = 0; - } else { - // _dispatch_queue_class_invoke will have applied the queue override - // (if any) before we get here. Else use the default base priority - // as an estimation of the priority we already asked for. - pp_floor = dm->_as_dq->dq_override; - if (!pp_floor) { - pp_floor = _dispatch_get_defaultpriority(); - pp_floor &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; - } - } - -retry: - os_atomic_rmw_loop2o(dm->dm_refs, dm_state, old_state, new_state, acquire, { - new_state = old_state; - if (unlikely((old_state & canlock_mask) != canlock_state)) { - if (!(send_flags & DM_SEND_INVOKE_FLUSH)) { - os_atomic_rmw_loop_give_up(break); - } - new_state |= DISPATCH_MACH_STATE_DIRTY; - } else { - if (likely(pp_floor)) { - pthread_priority_t pp = _dm_state_get_override(old_state); - if (unlikely(pp > pp_floor)) { - os_atomic_rmw_loop_give_up({ - _dispatch_wqthread_override_start(tid_self, pp); - // Ensure that the root queue sees - // that this thread was overridden. - _dispatch_set_defaultpriority_override(); - pp_floor = pp; - goto retry; - }); - } - } - new_state |= tid_self; - new_state &= ~DISPATCH_MACH_STATE_DIRTY; - new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE; - new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER; - } - }); - - if (unlikely((old_state & canlock_mask) != canlock_state)) { - return; - } - if (send_flags & DM_SEND_INVOKE_CANCEL) { - _dispatch_mach_cancel(dm); - } - _dispatch_mach_send_drain(dm, flags, send_flags); -} - -DISPATCH_NOINLINE -void -_dispatch_mach_send_barrier_drain_invoke(dispatch_continuation_t dc, - dispatch_invoke_flags_t flags) -{ - dispatch_mach_t dm = (dispatch_mach_t)_dispatch_queue_get_current(); - uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; - dispatch_thread_frame_s dtf; - - DISPATCH_COMPILER_CAN_ASSUME(dc->dc_priority == DISPATCH_NO_PRIORITY); - DISPATCH_COMPILER_CAN_ASSUME(dc->dc_voucher == DISPATCH_NO_VOUCHER); - // hide the mach channel (see _dispatch_mach_barrier_invoke comment) - _dispatch_thread_frame_stash(&dtf); - _dispatch_continuation_pop_forwarded(dc, DISPATCH_NO_VOUCHER, dc_flags,{ - _dispatch_mach_send_invoke(dm, flags, - DM_SEND_INVOKE_NEEDS_BARRIER | DM_SEND_INVOKE_CAN_RUN_BARRIER); - }); - _dispatch_thread_frame_unstash(&dtf); -} - -DISPATCH_NOINLINE -static void -_dispatch_mach_send_barrier_drain_push(dispatch_mach_t dm, - pthread_priority_t pp) -{ - dispatch_continuation_t dc = _dispatch_continuation_alloc(); - - dc->do_vtable = DC_VTABLE(MACH_SEND_BARRRIER_DRAIN); - dc->dc_func = NULL; - dc->dc_ctxt = NULL; - dc->dc_voucher = DISPATCH_NO_VOUCHER; - dc->dc_priority = DISPATCH_NO_PRIORITY; - return _dispatch_queue_push(dm->_as_dq, dc, pp); -} - -DISPATCH_NOINLINE -static void -_dispatch_mach_send_push(dispatch_mach_t dm, dispatch_continuation_t dc, - pthread_priority_t pp) -{ - dispatch_mach_send_refs_t dr = dm->dm_refs; - uint64_t pp_state, old_state, new_state, state_flags = 0; - dispatch_lock_owner owner; - bool wakeup; - - // when pushing a send barrier that destroys - // the last reference to this channel, and the send queue is already - // draining on another thread, the send barrier may run as soon as - // _dispatch_mach_send_push_inline() returns. - _dispatch_retain(dm); - pp_state = _dm_state_override_from_priority(pp); - - wakeup = _dispatch_mach_send_push_inline(dr, dc); - if (wakeup) { - state_flags = DISPATCH_MACH_STATE_DIRTY; - if (dc->do_vtable == DC_VTABLE(MACH_SEND_BARRIER)) { - state_flags |= DISPATCH_MACH_STATE_PENDING_BARRIER; - } - } - - if (state_flags) { - os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, release, { - new_state = _dm_state_merge_override(old_state, pp_state); - new_state |= state_flags; - }); - } else { - os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, relaxed, { - new_state = _dm_state_merge_override(old_state, pp_state); - if (old_state == new_state) { - os_atomic_rmw_loop_give_up(break); - } - }); - } - - pp = _dm_state_get_override(new_state); - owner = _dispatch_lock_owner((dispatch_lock)old_state); - if (owner) { - if (_dm_state_needs_override(old_state, pp_state)) { - _dispatch_wqthread_override_start_check_owner(owner, pp, - &dr->dm_state_lock.dul_lock); - } - return _dispatch_release_tailcall(dm); - } - - dispatch_wakeup_flags_t wflags = 0; - if (state_flags & DISPATCH_MACH_STATE_PENDING_BARRIER) { - _dispatch_mach_send_barrier_drain_push(dm, pp); - } else if (wakeup || dr->dm_disconnect_cnt || - (dm->dq_atomic_flags & DSF_CANCELED)) { - wflags = DISPATCH_WAKEUP_FLUSH | DISPATCH_WAKEUP_CONSUME; - } else if (old_state & DISPATCH_MACH_STATE_PENDING_BARRIER) { - wflags = DISPATCH_WAKEUP_OVERRIDING | DISPATCH_WAKEUP_CONSUME; - } - if (wflags) { - return dx_wakeup(dm, pp, wflags); - } - return _dispatch_release_tailcall(dm); -} - -DISPATCH_NOINLINE -static bool -_dispatch_mach_send_push_and_trydrain(dispatch_mach_t dm, - dispatch_object_t dou, pthread_priority_t pp, - dispatch_mach_send_invoke_flags_t send_flags) -{ - dispatch_mach_send_refs_t dr = dm->dm_refs; - dispatch_lock_owner tid_self = _dispatch_tid_self(); - uint64_t pp_state, old_state, new_state, canlock_mask, state_flags = 0; - dispatch_lock_owner owner; - - pp_state = _dm_state_override_from_priority(pp); - bool wakeup = _dispatch_mach_send_push_inline(dr, dou); - if (wakeup) { - state_flags = DISPATCH_MACH_STATE_DIRTY; - } - - if (unlikely(dr->dm_disconnect_cnt || - (dm->dq_atomic_flags & DSF_CANCELED))) { - os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, release, { - new_state = _dm_state_merge_override(old_state, pp_state); - new_state |= state_flags; - }); - dx_wakeup(dm, pp, DISPATCH_WAKEUP_FLUSH); - return false; - } - - canlock_mask = DISPATCH_MACH_STATE_UNLOCK_MASK | - DISPATCH_MACH_STATE_PENDING_BARRIER; - if (state_flags) { - os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, seq_cst, { - new_state = _dm_state_merge_override(old_state, pp_state); - new_state |= state_flags; - if (likely((old_state & canlock_mask) == 0)) { - new_state |= tid_self; - new_state &= ~DISPATCH_MACH_STATE_DIRTY; - new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE; - new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER; - } - }); - } else { - os_atomic_rmw_loop2o(dr, dm_state, old_state, new_state, acquire, { - new_state = _dm_state_merge_override(old_state, pp_state); - if (new_state == old_state) { - os_atomic_rmw_loop_give_up(return false); - } - if (likely((old_state & canlock_mask) == 0)) { - new_state |= tid_self; - new_state &= ~DISPATCH_MACH_STATE_DIRTY; - new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE; - new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER; - } - }); - } - - owner = _dispatch_lock_owner((dispatch_lock)old_state); - if (owner) { - if (_dm_state_needs_override(old_state, pp_state)) { - _dispatch_wqthread_override_start_check_owner(owner, pp, - &dr->dm_state_lock.dul_lock); - } - return false; - } - - if (old_state & DISPATCH_MACH_STATE_PENDING_BARRIER) { - dx_wakeup(dm, pp, DISPATCH_WAKEUP_OVERRIDING); - return false; - } - - // Ensure our message is still at the head of the queue and has not already - // been dequeued by another thread that raced us to the send queue lock. - // A plain load of the head and comparison against our object pointer is - // sufficient. - if (unlikely(!(wakeup && dou._do == dr->dm_head))) { - // Don't request immediate send result for messages we don't own - send_flags &= ~DM_SEND_INVOKE_IMMEDIATE_SEND_MASK; - } - return _dispatch_mach_send_drain(dm, DISPATCH_INVOKE_NONE, send_flags); -} - -static void -_dispatch_mach_merge_notification_kevent(dispatch_mach_t dm, - const _dispatch_kevent_qos_s *ke) -{ - if (!(ke->fflags & dm->ds_pending_data_mask)) { - return; - } - _dispatch_mach_send_invoke(dm, DISPATCH_INVOKE_MANAGER_DRAIN, - DM_SEND_INVOKE_FLUSH); -} - -#pragma mark - -#pragma mark dispatch_mach_t - -static inline mach_msg_option_t -_dispatch_mach_checkin_options(void) -{ - mach_msg_option_t options = 0; -#if DISPATCH_USE_CHECKIN_NOIMPORTANCE - options = MACH_SEND_NOIMPORTANCE; // -#endif - return options; -} - - -static inline mach_msg_option_t -_dispatch_mach_send_options(void) -{ - mach_msg_option_t options = 0; - return options; -} - -DISPATCH_ALWAYS_INLINE -static inline pthread_priority_t -_dispatch_mach_priority_propagate(mach_msg_option_t options) -{ -#if DISPATCH_USE_NOIMPORTANCE_QOS - if (options & MACH_SEND_NOIMPORTANCE) return 0; -#else - (void)options; -#endif - return _dispatch_priority_propagate(); -} - -DISPATCH_NOINLINE -static bool -_dispatch_mach_send_msg(dispatch_mach_t dm, dispatch_mach_msg_t dmsg, - dispatch_continuation_t dc_wait, mach_msg_option_t options) -{ - dispatch_mach_send_refs_t dr = dm->dm_refs; - if (slowpath(dmsg->do_next != DISPATCH_OBJECT_LISTLESS)) { - DISPATCH_CLIENT_CRASH(dmsg->do_next, "Message already enqueued"); - } - dispatch_retain(dmsg); - pthread_priority_t priority = _dispatch_mach_priority_propagate(options); - options |= _dispatch_mach_send_options(); - dmsg->dmsg_options = options; - mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg); - dmsg->dmsg_reply = _dispatch_mach_msg_get_reply_port(dmsg); - bool is_reply = (MACH_MSGH_BITS_REMOTE(msg->msgh_bits) == - MACH_MSG_TYPE_MOVE_SEND_ONCE); - dmsg->dmsg_priority = priority; - dmsg->dmsg_voucher = _voucher_copy(); - _dispatch_voucher_debug("mach-msg[%p] set", dmsg->dmsg_voucher, dmsg); - - uint32_t send_status; - bool returning_send_result = false; - dispatch_mach_send_invoke_flags_t send_flags = DM_SEND_INVOKE_NONE; - if (options & DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT) { - send_flags = DM_SEND_INVOKE_IMMEDIATE_SEND; - } - if (is_reply && !dmsg->dmsg_reply && !dr->dm_disconnect_cnt && - !(dm->dq_atomic_flags & DSF_CANCELED)) { - // replies are sent to a send-once right and don't need the send queue - dispatch_assert(!dc_wait); - send_status = _dispatch_mach_msg_send(dm, dmsg, NULL, 0, send_flags); - dispatch_assert(send_status); - returning_send_result = !!(send_status & - DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT); - } else { - _dispatch_voucher_ktrace_dmsg_push(dmsg); - priority &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; - dispatch_object_t dou = { ._dmsg = dmsg }; - if (dc_wait) dou._dc = dc_wait; - returning_send_result = _dispatch_mach_send_push_and_trydrain(dm, dou, - priority, send_flags); - } - if (returning_send_result) { - _dispatch_voucher_debug("mach-msg[%p] clear", dmsg->dmsg_voucher, dmsg); - if (dmsg->dmsg_voucher) _voucher_release(dmsg->dmsg_voucher); - dmsg->dmsg_voucher = NULL; - dmsg->do_next = DISPATCH_OBJECT_LISTLESS; - dispatch_release(dmsg); - } - return returning_send_result; -} - -DISPATCH_NOINLINE -void -dispatch_mach_send(dispatch_mach_t dm, dispatch_mach_msg_t dmsg, - mach_msg_option_t options) -{ - dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK); - options &= ~DISPATCH_MACH_OPTIONS_MASK; - bool returned_send_result = _dispatch_mach_send_msg(dm, dmsg, NULL,options); - dispatch_assert(!returned_send_result); -} - -DISPATCH_NOINLINE -void -dispatch_mach_send_with_result(dispatch_mach_t dm, dispatch_mach_msg_t dmsg, - mach_msg_option_t options, dispatch_mach_send_flags_t send_flags, - dispatch_mach_reason_t *send_result, mach_error_t *send_error) -{ - if (unlikely(send_flags != DISPATCH_MACH_SEND_DEFAULT)) { - DISPATCH_CLIENT_CRASH(send_flags, "Invalid send flags"); - } - dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK); - options &= ~DISPATCH_MACH_OPTIONS_MASK; - options |= DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT; - bool returned_send_result = _dispatch_mach_send_msg(dm, dmsg, NULL,options); - unsigned long reason = DISPATCH_MACH_NEEDS_DEFERRED_SEND; - mach_error_t err = 0; - if (returned_send_result) { - reason = _dispatch_mach_msg_get_reason(dmsg, &err); - } - *send_result = reason; - *send_error = err; -} - -static inline -dispatch_mach_msg_t -_dispatch_mach_send_and_wait_for_reply(dispatch_mach_t dm, - dispatch_mach_msg_t dmsg, mach_msg_option_t options, - bool *returned_send_result) -{ - mach_port_t reply_port = _dispatch_mach_msg_get_reply_port(dmsg); - if (!reply_port) { - // use per-thread mach reply port - reply_port = _dispatch_get_thread_reply_port(); - mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dmsg); - dispatch_assert(MACH_MSGH_BITS_LOCAL(hdr->msgh_bits) == - MACH_MSG_TYPE_MAKE_SEND_ONCE); - hdr->msgh_local_port = reply_port; - options |= DISPATCH_MACH_OWNED_REPLY_PORT; - } - - dispatch_mach_reply_refs_t dmr; -#if DISPATCH_DEBUG - dmr = _dispatch_calloc(1, sizeof(*dmr)); -#else - struct dispatch_mach_reply_refs_s dmr_buf = { }; - dmr = &dmr_buf; -#endif - struct dispatch_continuation_s dc_wait = { - .dc_flags = DISPATCH_OBJ_SYNC_SLOW_BIT, - .dc_data = dmsg, - .dc_other = dmr, - .dc_priority = DISPATCH_NO_PRIORITY, - .dc_voucher = DISPATCH_NO_VOUCHER, - }; - dmr->dmr_ctxt = dmsg->do_ctxt; - *returned_send_result = _dispatch_mach_send_msg(dm, dmsg, &dc_wait,options); - if (options & DISPATCH_MACH_OWNED_REPLY_PORT) { - _dispatch_clear_thread_reply_port(reply_port); - } - dmsg = _dispatch_mach_msg_reply_recv(dm, dmr, reply_port); -#if DISPATCH_DEBUG - free(dmr); -#endif - return dmsg; -} - -DISPATCH_NOINLINE -dispatch_mach_msg_t -dispatch_mach_send_and_wait_for_reply(dispatch_mach_t dm, - dispatch_mach_msg_t dmsg, mach_msg_option_t options) -{ - bool returned_send_result; - dispatch_mach_msg_t reply; - dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK); - options &= ~DISPATCH_MACH_OPTIONS_MASK; - options |= DISPATCH_MACH_WAIT_FOR_REPLY; - reply = _dispatch_mach_send_and_wait_for_reply(dm, dmsg, options, - &returned_send_result); - dispatch_assert(!returned_send_result); - return reply; -} - -DISPATCH_NOINLINE -dispatch_mach_msg_t -dispatch_mach_send_with_result_and_wait_for_reply(dispatch_mach_t dm, - dispatch_mach_msg_t dmsg, mach_msg_option_t options, - dispatch_mach_send_flags_t send_flags, - dispatch_mach_reason_t *send_result, mach_error_t *send_error) -{ - if (unlikely(send_flags != DISPATCH_MACH_SEND_DEFAULT)) { - DISPATCH_CLIENT_CRASH(send_flags, "Invalid send flags"); - } - bool returned_send_result; - dispatch_mach_msg_t reply; - dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK); - options &= ~DISPATCH_MACH_OPTIONS_MASK; - options |= DISPATCH_MACH_WAIT_FOR_REPLY; - options |= DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT; - reply = _dispatch_mach_send_and_wait_for_reply(dm, dmsg, options, - &returned_send_result); - unsigned long reason = DISPATCH_MACH_NEEDS_DEFERRED_SEND; - mach_error_t err = 0; - if (returned_send_result) { - reason = _dispatch_mach_msg_get_reason(dmsg, &err); - } - *send_result = reason; - *send_error = err; - return reply; -} - -DISPATCH_NOINLINE -static bool -_dispatch_mach_disconnect(dispatch_mach_t dm) -{ - dispatch_mach_send_refs_t dr = dm->dm_refs; - bool disconnected; - if (dm->dm_dkev) { - _dispatch_mach_notification_kevent_unregister(dm); - } - if (MACH_PORT_VALID(dr->dm_send)) { - _dispatch_mach_msg_disconnected(dm, MACH_PORT_NULL, dr->dm_send); - } - dr->dm_send = MACH_PORT_NULL; - if (dr->dm_checkin) { - _dispatch_mach_msg_not_sent(dm, dr->dm_checkin); - dr->dm_checkin = NULL; - } - _dispatch_unfair_lock_lock(&dm->dm_refs->dm_replies_lock); - dispatch_mach_reply_refs_t dmr, tmp; - TAILQ_FOREACH_SAFE(dmr, &dm->dm_refs->dm_replies, dmr_list, tmp) { - TAILQ_REMOVE(&dm->dm_refs->dm_replies, dmr, dmr_list); - _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list); - if (dmr->dmr_dkev) { - _dispatch_mach_reply_kevent_unregister(dm, dmr, - DKEV_UNREGISTER_DISCONNECTED); - } else { - _dispatch_mach_reply_waiter_unregister(dm, dmr, - DKEV_UNREGISTER_DISCONNECTED); - } - } - disconnected = TAILQ_EMPTY(&dm->dm_refs->dm_replies); - _dispatch_unfair_lock_unlock(&dm->dm_refs->dm_replies_lock); - return disconnected; -} - -static void -_dispatch_mach_cancel(dispatch_mach_t dm) -{ - _dispatch_object_debug(dm, "%s", __func__); - if (!_dispatch_mach_disconnect(dm)) return; - if (dm->ds_dkev) { - mach_port_t local_port = (mach_port_t)dm->ds_dkev->dk_kevent.ident; - _dispatch_source_kevent_unregister(dm->_as_ds); - if ((dm->dq_atomic_flags & DSF_STATE_MASK) == DSF_DELETED) { - _dispatch_mach_msg_disconnected(dm, local_port, MACH_PORT_NULL); - } - } else { - _dispatch_queue_atomic_flags_set_and_clear(dm->_as_dq, DSF_DELETED, - DSF_ARMED | DSF_DEFERRED_DELETE); - } -} - -DISPATCH_NOINLINE -static bool -_dispatch_mach_reconnect_invoke(dispatch_mach_t dm, dispatch_object_t dou) -{ - if (!_dispatch_mach_disconnect(dm)) return false; - dispatch_mach_send_refs_t dr = dm->dm_refs; - dr->dm_checkin = dou._dc->dc_data; - dr->dm_send = (mach_port_t)dou._dc->dc_other; - _dispatch_continuation_free(dou._dc); - (void)os_atomic_dec2o(dr, dm_disconnect_cnt, relaxed); - _dispatch_object_debug(dm, "%s", __func__); - _dispatch_release(dm); // - return true; -} - -DISPATCH_NOINLINE -void -dispatch_mach_reconnect(dispatch_mach_t dm, mach_port_t send, - dispatch_mach_msg_t checkin) -{ - dispatch_mach_send_refs_t dr = dm->dm_refs; - (void)os_atomic_inc2o(dr, dm_disconnect_cnt, relaxed); - if (MACH_PORT_VALID(send) && checkin) { - dispatch_retain(checkin); - checkin->dmsg_options = _dispatch_mach_checkin_options(); - dr->dm_checkin_port = _dispatch_mach_msg_get_remote_port(checkin); - } else { - checkin = NULL; - dr->dm_checkin_port = MACH_PORT_NULL; - } - dispatch_continuation_t dc = _dispatch_continuation_alloc(); - dc->dc_flags = DISPATCH_OBJ_CONSUME_BIT; - // actually called manually in _dispatch_mach_send_drain - dc->dc_func = (void*)_dispatch_mach_reconnect_invoke; - dc->dc_ctxt = dc; - dc->dc_data = checkin; - dc->dc_other = (void*)(uintptr_t)send; - dc->dc_voucher = DISPATCH_NO_VOUCHER; - dc->dc_priority = DISPATCH_NO_PRIORITY; - _dispatch_retain(dm); // - return _dispatch_mach_send_push(dm, dc, 0); -} - -DISPATCH_NOINLINE -mach_port_t -dispatch_mach_get_checkin_port(dispatch_mach_t dm) -{ - dispatch_mach_send_refs_t dr = dm->dm_refs; - if (slowpath(dm->dq_atomic_flags & DSF_CANCELED)) { - return MACH_PORT_DEAD; - } - return dr->dm_checkin_port; -} - -DISPATCH_NOINLINE -static void -_dispatch_mach_connect_invoke(dispatch_mach_t dm) -{ - dispatch_mach_refs_t dr = dm->ds_refs; - _dispatch_client_callout4(dr->dm_handler_ctxt, - DISPATCH_MACH_CONNECTED, NULL, 0, dr->dm_handler_func); - dm->dm_connect_handler_called = 1; -} - -DISPATCH_NOINLINE -void -_dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg, - dispatch_invoke_flags_t flags) -{ - dispatch_thread_frame_s dtf; - dispatch_mach_refs_t dr; - dispatch_mach_t dm; - mach_error_t err; - unsigned long reason = _dispatch_mach_msg_get_reason(dmsg, &err); - _dispatch_thread_set_self_t adopt_flags = DISPATCH_PRIORITY_ENFORCE| - DISPATCH_VOUCHER_CONSUME|DISPATCH_VOUCHER_REPLACE; - - // hide mach channel - dm = (dispatch_mach_t)_dispatch_thread_frame_stash(&dtf); - dr = dm->ds_refs; - dmsg->do_next = DISPATCH_OBJECT_LISTLESS; - _dispatch_voucher_ktrace_dmsg_pop(dmsg); - _dispatch_voucher_debug("mach-msg[%p] adopt", dmsg->dmsg_voucher, dmsg); - (void)_dispatch_adopt_priority_and_set_voucher(dmsg->dmsg_priority, - dmsg->dmsg_voucher, adopt_flags); - dmsg->dmsg_voucher = NULL; - dispatch_invoke_with_autoreleasepool(flags, { - if (slowpath(!dm->dm_connect_handler_called)) { - _dispatch_mach_connect_invoke(dm); - } - _dispatch_client_callout4(dr->dm_handler_ctxt, reason, dmsg, err, - dr->dm_handler_func); - }); - _dispatch_thread_frame_unstash(&dtf); - _dispatch_introspection_queue_item_complete(dmsg); - dispatch_release(dmsg); -} - -DISPATCH_NOINLINE -void -_dispatch_mach_barrier_invoke(dispatch_continuation_t dc, - dispatch_invoke_flags_t flags) -{ - dispatch_thread_frame_s dtf; - dispatch_mach_t dm = dc->dc_other; - dispatch_mach_refs_t dr; - uintptr_t dc_flags = (uintptr_t)dc->dc_data; - unsigned long type = dc_type(dc); - - // hide mach channel from clients - if (type == DISPATCH_CONTINUATION_TYPE(MACH_RECV_BARRIER)) { - // on the send queue, the mach channel isn't the current queue - // its target queue is the current one already - _dispatch_thread_frame_stash(&dtf); - } - dr = dm->ds_refs; - DISPATCH_COMPILER_CAN_ASSUME(dc_flags & DISPATCH_OBJ_CONSUME_BIT); - _dispatch_continuation_pop_forwarded(dc, dm->dq_override_voucher, dc_flags,{ - dispatch_invoke_with_autoreleasepool(flags, { - if (slowpath(!dm->dm_connect_handler_called)) { - _dispatch_mach_connect_invoke(dm); - } - _dispatch_client_callout(dc->dc_ctxt, dc->dc_func); - _dispatch_client_callout4(dr->dm_handler_ctxt, - DISPATCH_MACH_BARRIER_COMPLETED, NULL, 0, - dr->dm_handler_func); - }); - }); - if (type == DISPATCH_CONTINUATION_TYPE(MACH_RECV_BARRIER)) { - _dispatch_thread_frame_unstash(&dtf); - } -} - -DISPATCH_NOINLINE -void -dispatch_mach_send_barrier_f(dispatch_mach_t dm, void *context, - dispatch_function_t func) -{ - dispatch_continuation_t dc = _dispatch_continuation_alloc(); - uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; - pthread_priority_t pp; - - _dispatch_continuation_init_f(dc, dm, context, func, 0, 0, dc_flags); - dc->dc_data = (void *)dc->dc_flags; - dc->dc_other = dm; - dc->do_vtable = DC_VTABLE(MACH_SEND_BARRIER); - _dispatch_trace_continuation_push(dm->_as_dq, dc); - pp = _dispatch_continuation_get_override_priority(dm->_as_dq, dc); - return _dispatch_mach_send_push(dm, dc, pp); -} - -DISPATCH_NOINLINE -void -dispatch_mach_send_barrier(dispatch_mach_t dm, dispatch_block_t barrier) -{ - dispatch_continuation_t dc = _dispatch_continuation_alloc(); - uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; - pthread_priority_t pp; - - _dispatch_continuation_init(dc, dm, barrier, 0, 0, dc_flags); - dc->dc_data = (void *)dc->dc_flags; - dc->dc_other = dm; - dc->do_vtable = DC_VTABLE(MACH_SEND_BARRIER); - _dispatch_trace_continuation_push(dm->_as_dq, dc); - pp = _dispatch_continuation_get_override_priority(dm->_as_dq, dc); - return _dispatch_mach_send_push(dm, dc, pp); -} - -DISPATCH_NOINLINE -void -dispatch_mach_receive_barrier_f(dispatch_mach_t dm, void *context, - dispatch_function_t func) -{ - dispatch_continuation_t dc = _dispatch_continuation_alloc(); - uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; - - _dispatch_continuation_init_f(dc, dm, context, func, 0, 0, dc_flags); - dc->dc_data = (void *)dc->dc_flags; - dc->dc_other = dm; - dc->do_vtable = DC_VTABLE(MACH_RECV_BARRIER); - return _dispatch_continuation_async(dm->_as_dq, dc); -} - -DISPATCH_NOINLINE -void -dispatch_mach_receive_barrier(dispatch_mach_t dm, dispatch_block_t barrier) -{ - dispatch_continuation_t dc = _dispatch_continuation_alloc(); - uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; - - _dispatch_continuation_init(dc, dm, barrier, 0, 0, dc_flags); - dc->dc_data = (void *)dc->dc_flags; - dc->dc_other = dm; - dc->do_vtable = DC_VTABLE(MACH_RECV_BARRIER); - return _dispatch_continuation_async(dm->_as_dq, dc); -} - -DISPATCH_NOINLINE -static void -_dispatch_mach_cancel_invoke(dispatch_mach_t dm, dispatch_invoke_flags_t flags) -{ - dispatch_mach_refs_t dr = dm->ds_refs; - - dispatch_invoke_with_autoreleasepool(flags, { - if (slowpath(!dm->dm_connect_handler_called)) { - _dispatch_mach_connect_invoke(dm); - } - _dispatch_client_callout4(dr->dm_handler_ctxt, - DISPATCH_MACH_CANCELED, NULL, 0, dr->dm_handler_func); - }); - dm->dm_cancel_handler_called = 1; - _dispatch_release(dm); // the retain is done at creation time -} - -DISPATCH_NOINLINE -void -dispatch_mach_cancel(dispatch_mach_t dm) -{ - dispatch_source_cancel(dm->_as_ds); -} - -static void -_dispatch_mach_install(dispatch_mach_t dm, pthread_priority_t pp) -{ - uint32_t disconnect_cnt; - - if (dm->ds_dkev) { - _dispatch_source_kevent_register(dm->_as_ds, pp); - } - if (dm->ds_is_direct_kevent) { - pp &= (~_PTHREAD_PRIORITY_FLAGS_MASK | - _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG | - _PTHREAD_PRIORITY_OVERCOMMIT_FLAG); - // _dispatch_mach_reply_kevent_register assumes this has been done - // which is unlike regular sources or queues, the DEFAULTQUEUE flag - // is used so that the priority of that channel doesn't act as a floor - // QoS for incoming messages (26761457) - dm->dq_priority = (dispatch_priority_t)pp; - } - dm->ds_is_installed = true; - if (unlikely(!os_atomic_cmpxchgv2o(dm->dm_refs, dm_disconnect_cnt, - DISPATCH_MACH_NEVER_INSTALLED, 0, &disconnect_cnt, release))) { - DISPATCH_INTERNAL_CRASH(disconnect_cnt, "Channel already installed"); - } -} - -void -_dispatch_mach_finalize_activation(dispatch_mach_t dm) -{ - if (dm->ds_is_direct_kevent && !dm->ds_is_installed) { - dispatch_source_t ds = dm->_as_ds; - pthread_priority_t pp = _dispatch_source_compute_kevent_priority(ds); - if (pp) _dispatch_mach_install(dm, pp); - } - - // call "super" - _dispatch_queue_finalize_activation(dm->_as_dq); -} - -DISPATCH_ALWAYS_INLINE -static inline dispatch_queue_t -_dispatch_mach_invoke2(dispatch_object_t dou, dispatch_invoke_flags_t flags, - uint64_t *owned, struct dispatch_object_s **dc_ptr DISPATCH_UNUSED) -{ - dispatch_mach_t dm = dou._dm; - dispatch_queue_t retq = NULL; - dispatch_queue_t dq = _dispatch_queue_get_current(); - - // This function performs all mach channel actions. Each action is - // responsible for verifying that it takes place on the appropriate queue. - // If the current queue is not the correct queue for this action, the - // correct queue will be returned and the invoke will be re-driven on that - // queue. - - // The order of tests here in invoke and in wakeup should be consistent. - - dispatch_mach_send_refs_t dr = dm->dm_refs; - dispatch_queue_t dkq = &_dispatch_mgr_q; - - if (dm->ds_is_direct_kevent) { - dkq = dm->do_targetq; - } - - if (slowpath(!dm->ds_is_installed)) { - // The channel needs to be installed on the kevent queue. - if (dq != dkq) { - return dkq; - } - _dispatch_mach_install(dm, _dispatch_get_defaultpriority()); - } - - if (_dispatch_queue_class_probe(dm)) { - if (dq == dm->do_targetq) { - retq = _dispatch_queue_serial_drain(dm->_as_dq, flags, owned, NULL); - } else { - retq = dm->do_targetq; - } - } - - dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dm->_as_dq); - - if (dr->dm_tail) { - bool requires_mgr = dr->dm_needs_mgr || (dr->dm_disconnect_cnt && - (dm->dm_dkev || !dm->ds_is_direct_kevent)); - if (!(dm->dm_dkev && DISPATCH_MACH_NOTIFICATION_ARMED(dm->dm_dkev)) || - (dqf & DSF_CANCELED) || dr->dm_disconnect_cnt) { - // The channel has pending messages to send. - if (unlikely(requires_mgr && dq != &_dispatch_mgr_q)) { - return retq ? retq : &_dispatch_mgr_q; - } - dispatch_mach_send_invoke_flags_t send_flags = DM_SEND_INVOKE_NONE; - if (dq != &_dispatch_mgr_q) { - send_flags |= DM_SEND_INVOKE_CAN_RUN_BARRIER; - } - _dispatch_mach_send_invoke(dm, flags, send_flags); - } - } else if (dqf & DSF_CANCELED) { - // The channel has been cancelled and needs to be uninstalled from the - // manager queue. After uninstallation, the cancellation handler needs - // to be delivered to the target queue. - if ((dqf & DSF_STATE_MASK) == (DSF_ARMED | DSF_DEFERRED_DELETE)) { - // waiting for the delivery of a deferred delete event - return retq; - } - if ((dqf & DSF_STATE_MASK) != DSF_DELETED) { - if (dq != &_dispatch_mgr_q) { - return retq ? retq : &_dispatch_mgr_q; - } - _dispatch_mach_send_invoke(dm, flags, DM_SEND_INVOKE_CANCEL); - dqf = _dispatch_queue_atomic_flags(dm->_as_dq); - if (unlikely((dqf & DSF_STATE_MASK) != DSF_DELETED)) { - // waiting for the delivery of a deferred delete event - // or deletion didn't happen because send_invoke couldn't - // acquire the send lock - return retq; - } - } - if (!dm->dm_cancel_handler_called) { - if (dq != dm->do_targetq) { - return retq ? retq : dm->do_targetq; - } - _dispatch_mach_cancel_invoke(dm, flags); - } - } - - return retq; -} - -DISPATCH_NOINLINE -void -_dispatch_mach_invoke(dispatch_mach_t dm, dispatch_invoke_flags_t flags) -{ - _dispatch_queue_class_invoke(dm, flags, _dispatch_mach_invoke2); -} - -void -_dispatch_mach_wakeup(dispatch_mach_t dm, pthread_priority_t pp, - dispatch_wakeup_flags_t flags) -{ - // This function determines whether the mach channel needs to be invoked. - // The order of tests here in probe and in invoke should be consistent. - - dispatch_mach_send_refs_t dr = dm->dm_refs; - dispatch_queue_wakeup_target_t dkq = DISPATCH_QUEUE_WAKEUP_MGR; - dispatch_queue_wakeup_target_t tq = DISPATCH_QUEUE_WAKEUP_NONE; - dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dm->_as_dq); - - if (dm->ds_is_direct_kevent) { - dkq = DISPATCH_QUEUE_WAKEUP_TARGET; - } - - if (!dm->ds_is_installed) { - // The channel needs to be installed on the kevent queue. - tq = dkq; - goto done; - } - - if (_dispatch_queue_class_probe(dm)) { - tq = DISPATCH_QUEUE_WAKEUP_TARGET; - goto done; - } - - if (_dispatch_lock_is_locked(dr->dm_state_lock.dul_lock)) { - // Sending and uninstallation below require the send lock, the channel - // will be woken up when the lock is dropped - _dispatch_queue_reinstate_override_priority(dm, (dispatch_priority_t)pp); - goto done; - } - - if (dr->dm_tail) { - bool requires_mgr = dr->dm_needs_mgr || (dr->dm_disconnect_cnt && - (dm->dm_dkev || !dm->ds_is_direct_kevent)); - if (!(dm->dm_dkev && DISPATCH_MACH_NOTIFICATION_ARMED(dm->dm_dkev)) || - (dqf & DSF_CANCELED) || dr->dm_disconnect_cnt) { - if (unlikely(requires_mgr)) { - tq = DISPATCH_QUEUE_WAKEUP_MGR; - } else { - tq = DISPATCH_QUEUE_WAKEUP_TARGET; - } - } else { - // can happen when we can't send because the port is full - // but we should not lose the override - _dispatch_queue_reinstate_override_priority(dm, - (dispatch_priority_t)pp); - } - } else if (dqf & DSF_CANCELED) { - if ((dqf & DSF_STATE_MASK) == (DSF_ARMED | DSF_DEFERRED_DELETE)) { - // waiting for the delivery of a deferred delete event - } else if ((dqf & DSF_STATE_MASK) != DSF_DELETED) { - // The channel needs to be uninstalled from the manager queue - tq = DISPATCH_QUEUE_WAKEUP_MGR; - } else if (!dm->dm_cancel_handler_called) { - // the cancellation handler needs to be delivered to the target - // queue. - tq = DISPATCH_QUEUE_WAKEUP_TARGET; - } - } - -done: - if (tq) { - return _dispatch_queue_class_wakeup(dm->_as_dq, pp, flags, tq); - } else if (pp) { - return _dispatch_queue_class_override_drainer(dm->_as_dq, pp, flags); - } else if (flags & DISPATCH_WAKEUP_CONSUME) { - return _dispatch_release_tailcall(dm); - } -} - -#pragma mark - -#pragma mark dispatch_mach_msg_t - -dispatch_mach_msg_t -dispatch_mach_msg_create(mach_msg_header_t *msg, size_t size, - dispatch_mach_msg_destructor_t destructor, mach_msg_header_t **msg_ptr) -{ - if (slowpath(size < sizeof(mach_msg_header_t)) || - slowpath(destructor && !msg)) { - DISPATCH_CLIENT_CRASH(size, "Empty message"); - } - dispatch_mach_msg_t dmsg = _dispatch_alloc(DISPATCH_VTABLE(mach_msg), - sizeof(struct dispatch_mach_msg_s) + - (destructor ? 0 : size - sizeof(dmsg->dmsg_msg))); - if (destructor) { - dmsg->dmsg_msg = msg; - } else if (msg) { - memcpy(dmsg->dmsg_buf, msg, size); - } - dmsg->do_next = DISPATCH_OBJECT_LISTLESS; - dmsg->do_targetq = _dispatch_get_root_queue(_DISPATCH_QOS_CLASS_DEFAULT, - false); - dmsg->dmsg_destructor = destructor; - dmsg->dmsg_size = size; - if (msg_ptr) { - *msg_ptr = _dispatch_mach_msg_get_msg(dmsg); - } - return dmsg; -} - -void -_dispatch_mach_msg_dispose(dispatch_mach_msg_t dmsg) -{ - if (dmsg->dmsg_voucher) { - _voucher_release(dmsg->dmsg_voucher); - dmsg->dmsg_voucher = NULL; - } - switch (dmsg->dmsg_destructor) { - case DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT: - break; - case DISPATCH_MACH_MSG_DESTRUCTOR_FREE: - free(dmsg->dmsg_msg); - break; - case DISPATCH_MACH_MSG_DESTRUCTOR_VM_DEALLOCATE: { - mach_vm_size_t vm_size = dmsg->dmsg_size; - mach_vm_address_t vm_addr = (uintptr_t)dmsg->dmsg_msg; - (void)dispatch_assume_zero(mach_vm_deallocate(mach_task_self(), - vm_addr, vm_size)); - break; - }} -} - -static inline mach_msg_header_t* -_dispatch_mach_msg_get_msg(dispatch_mach_msg_t dmsg) -{ - return dmsg->dmsg_destructor ? dmsg->dmsg_msg : - (mach_msg_header_t*)dmsg->dmsg_buf; -} - -mach_msg_header_t* -dispatch_mach_msg_get_msg(dispatch_mach_msg_t dmsg, size_t *size_ptr) -{ - if (size_ptr) { - *size_ptr = dmsg->dmsg_size; - } - return _dispatch_mach_msg_get_msg(dmsg); -} - -size_t -_dispatch_mach_msg_debug(dispatch_mach_msg_t dmsg, char* buf, size_t bufsiz) -{ - size_t offset = 0; - offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", - dx_kind(dmsg), dmsg); - offset += dsnprintf(&buf[offset], bufsiz - offset, "xrefcnt = 0x%x, " - "refcnt = 0x%x, ", dmsg->do_xref_cnt + 1, dmsg->do_ref_cnt + 1); - offset += dsnprintf(&buf[offset], bufsiz - offset, "opts/err = 0x%x, " - "msgh[%p] = { ", dmsg->dmsg_options, dmsg->dmsg_buf); - mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dmsg); - if (hdr->msgh_id) { - offset += dsnprintf(&buf[offset], bufsiz - offset, "id 0x%x, ", - hdr->msgh_id); - } - if (hdr->msgh_size) { - offset += dsnprintf(&buf[offset], bufsiz - offset, "size %u, ", - hdr->msgh_size); - } - if (hdr->msgh_bits) { - offset += dsnprintf(&buf[offset], bufsiz - offset, "bits msgh_bits), - MACH_MSGH_BITS_REMOTE(hdr->msgh_bits)); - if (MACH_MSGH_BITS_OTHER(hdr->msgh_bits)) { - offset += dsnprintf(&buf[offset], bufsiz - offset, ", o 0x%x", - MACH_MSGH_BITS_OTHER(hdr->msgh_bits)); - } - offset += dsnprintf(&buf[offset], bufsiz - offset, ">, "); - } - if (hdr->msgh_local_port && hdr->msgh_remote_port) { - offset += dsnprintf(&buf[offset], bufsiz - offset, "local 0x%x, " - "remote 0x%x", hdr->msgh_local_port, hdr->msgh_remote_port); - } else if (hdr->msgh_local_port) { - offset += dsnprintf(&buf[offset], bufsiz - offset, "local 0x%x", - hdr->msgh_local_port); - } else if (hdr->msgh_remote_port) { - offset += dsnprintf(&buf[offset], bufsiz - offset, "remote 0x%x", - hdr->msgh_remote_port); - } else { - offset += dsnprintf(&buf[offset], bufsiz - offset, "no ports"); - } - offset += dsnprintf(&buf[offset], bufsiz - offset, " } }"); - return offset; -} - -#pragma mark - -#pragma mark dispatch_mig_server - -mach_msg_return_t -dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, - dispatch_mig_callback_t callback) -{ - mach_msg_options_t options = MACH_RCV_MSG | MACH_RCV_TIMEOUT - | MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_CTX) - | MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0) | MACH_RCV_VOUCHER; - mach_msg_options_t tmp_options; - mig_reply_error_t *bufTemp, *bufRequest, *bufReply; - mach_msg_return_t kr = 0; - uint64_t assertion_token = 0; - unsigned int cnt = 1000; // do not stall out serial queues - boolean_t demux_success; - bool received = false; - size_t rcv_size = maxmsgsz + MAX_TRAILER_SIZE; - - bufRequest = alloca(rcv_size); - bufRequest->RetCode = 0; - for (mach_vm_address_t p = mach_vm_trunc_page(bufRequest + vm_page_size); - p < (mach_vm_address_t)bufRequest + rcv_size; p += vm_page_size) { - *(char*)p = 0; // ensure alloca buffer doesn't overlap with stack guard - } - - bufReply = alloca(rcv_size); - bufReply->Head.msgh_size = 0; - for (mach_vm_address_t p = mach_vm_trunc_page(bufReply + vm_page_size); - p < (mach_vm_address_t)bufReply + rcv_size; p += vm_page_size) { - *(char*)p = 0; // ensure alloca buffer doesn't overlap with stack guard - } - -#if DISPATCH_DEBUG - options |= MACH_RCV_LARGE; // rdar://problem/8422992 -#endif - tmp_options = options; - // XXX FIXME -- change this to not starve out the target queue - for (;;) { - if (DISPATCH_QUEUE_IS_SUSPENDED(ds) || (--cnt == 0)) { - options &= ~MACH_RCV_MSG; - tmp_options &= ~MACH_RCV_MSG; - - if (!(tmp_options & MACH_SEND_MSG)) { - goto out; - } - } - kr = mach_msg(&bufReply->Head, tmp_options, bufReply->Head.msgh_size, - (mach_msg_size_t)rcv_size, (mach_port_t)ds->ds_ident_hack, 0,0); - - tmp_options = options; - - if (slowpath(kr)) { - switch (kr) { - case MACH_SEND_INVALID_DEST: - case MACH_SEND_TIMED_OUT: - if (bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) { - mach_msg_destroy(&bufReply->Head); - } - break; - case MACH_RCV_TIMED_OUT: - // Don't return an error if a message was sent this time or - // a message was successfully received previously - // rdar://problems/7363620&7791738 - if(bufReply->Head.msgh_remote_port || received) { - kr = MACH_MSG_SUCCESS; - } - break; - case MACH_RCV_INVALID_NAME: - break; -#if DISPATCH_DEBUG - case MACH_RCV_TOO_LARGE: - // receive messages that are too large and log their id and size - // rdar://problem/8422992 - tmp_options &= ~MACH_RCV_LARGE; - size_t large_size = bufReply->Head.msgh_size + MAX_TRAILER_SIZE; - void *large_buf = malloc(large_size); - if (large_buf) { - rcv_size = large_size; - bufReply = large_buf; - } - if (!mach_msg(&bufReply->Head, tmp_options, 0, - (mach_msg_size_t)rcv_size, - (mach_port_t)ds->ds_ident_hack, 0, 0)) { - _dispatch_log("BUG in libdispatch client: " - "dispatch_mig_server received message larger than " - "requested size %zd: id = 0x%x, size = %d", - maxmsgsz, bufReply->Head.msgh_id, - bufReply->Head.msgh_size); - } - if (large_buf) { - free(large_buf); - } - // fall through -#endif - default: - _dispatch_bug_mach_client( - "dispatch_mig_server: mach_msg() failed", kr); - break; - } - goto out; - } - - if (!(tmp_options & MACH_RCV_MSG)) { - goto out; - } - - if (assertion_token) { -#if DISPATCH_USE_IMPORTANCE_ASSERTION - int r = proc_importance_assertion_complete(assertion_token); - (void)dispatch_assume_zero(r); -#endif - assertion_token = 0; - } - received = true; - - bufTemp = bufRequest; - bufRequest = bufReply; - bufReply = bufTemp; - -#if DISPATCH_USE_IMPORTANCE_ASSERTION -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wdeprecated-declarations" - int r = proc_importance_assertion_begin_with_msg(&bufRequest->Head, - NULL, &assertion_token); - if (r && slowpath(r != EIO)) { - (void)dispatch_assume_zero(r); - } -#pragma clang diagnostic pop -#endif - _voucher_replace(voucher_create_with_mach_msg(&bufRequest->Head)); - demux_success = callback(&bufRequest->Head, &bufReply->Head); - - if (!demux_success) { - // destroy the request - but not the reply port - bufRequest->Head.msgh_remote_port = 0; - mach_msg_destroy(&bufRequest->Head); - } else if (!(bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX)) { - // if MACH_MSGH_BITS_COMPLEX is _not_ set, then bufReply->RetCode - // is present - if (slowpath(bufReply->RetCode)) { - if (bufReply->RetCode == MIG_NO_REPLY) { - continue; - } - - // destroy the request - but not the reply port - bufRequest->Head.msgh_remote_port = 0; - mach_msg_destroy(&bufRequest->Head); - } - } - - if (bufReply->Head.msgh_remote_port) { - tmp_options |= MACH_SEND_MSG; - if (MACH_MSGH_BITS_REMOTE(bufReply->Head.msgh_bits) != - MACH_MSG_TYPE_MOVE_SEND_ONCE) { - tmp_options |= MACH_SEND_TIMEOUT; - } - } - } - -out: - if (assertion_token) { -#if DISPATCH_USE_IMPORTANCE_ASSERTION - int r = proc_importance_assertion_complete(assertion_token); - (void)dispatch_assume_zero(r); -#endif - } - - return kr; -} - -#endif /* HAVE_MACH */ - -#pragma mark - -#pragma mark dispatch_source_debug - -DISPATCH_NOINLINE -static const char * -_evfiltstr(short filt) -{ - switch (filt) { -#define _evfilt2(f) case (f): return #f - _evfilt2(EVFILT_READ); - _evfilt2(EVFILT_WRITE); - _evfilt2(EVFILT_AIO); - _evfilt2(EVFILT_VNODE); - _evfilt2(EVFILT_PROC); - _evfilt2(EVFILT_SIGNAL); - _evfilt2(EVFILT_TIMER); -#if HAVE_MACH - _evfilt2(EVFILT_MACHPORT); - _evfilt2(DISPATCH_EVFILT_MACH_NOTIFICATION); -#endif - _evfilt2(EVFILT_FS); - _evfilt2(EVFILT_USER); -#ifdef EVFILT_VM - _evfilt2(EVFILT_VM); -#endif -#ifdef EVFILT_SOCK - _evfilt2(EVFILT_SOCK); -#endif -#ifdef EVFILT_MEMORYSTATUS - _evfilt2(EVFILT_MEMORYSTATUS); -#endif - - _evfilt2(DISPATCH_EVFILT_TIMER); - _evfilt2(DISPATCH_EVFILT_CUSTOM_ADD); - _evfilt2(DISPATCH_EVFILT_CUSTOM_OR); - default: - return "EVFILT_missing"; - } -} - -#if DISPATCH_DEBUG -static const char * -_evflagstr2(uint16_t *flagsp) -{ -#define _evflag2(f) \ - if ((*flagsp & (f)) == (f) && (f)) { \ - *flagsp &= ~(f); \ - return #f "|"; \ - } - _evflag2(EV_ADD); - _evflag2(EV_DELETE); - _evflag2(EV_ENABLE); - _evflag2(EV_DISABLE); - _evflag2(EV_ONESHOT); - _evflag2(EV_CLEAR); - _evflag2(EV_RECEIPT); - _evflag2(EV_DISPATCH); - _evflag2(EV_UDATA_SPECIFIC); -#ifdef EV_POLL - _evflag2(EV_POLL); -#endif -#ifdef EV_OOBAND - _evflag2(EV_OOBAND); -#endif - _evflag2(EV_ERROR); - _evflag2(EV_EOF); - _evflag2(EV_VANISHED); - *flagsp = 0; - return "EV_UNKNOWN "; -} - -DISPATCH_NOINLINE -static const char * -_evflagstr(uint16_t flags, char *str, size_t strsize) -{ - str[0] = 0; - while (flags) { - strlcat(str, _evflagstr2(&flags), strsize); - } - size_t sz = strlen(str); - if (sz) str[sz-1] = 0; - return str; -} -#endif - -static size_t -_dispatch_source_debug_attr(dispatch_source_t ds, char* buf, size_t bufsiz) -{ - dispatch_queue_t target = ds->do_targetq; - return dsnprintf(buf, bufsiz, "target = %s[%p], ident = 0x%lx, " - "mask = 0x%lx, pending_data = 0x%lx, registered = %d, " - "armed = %d, deleted = %d%s, canceled = %d, ", - target && target->dq_label ? target->dq_label : "", target, - ds->ds_ident_hack, ds->ds_pending_data_mask, ds->ds_pending_data, - ds->ds_is_installed, (bool)(ds->dq_atomic_flags & DSF_ARMED), - (bool)(ds->dq_atomic_flags & DSF_DELETED), - (ds->dq_atomic_flags & DSF_DEFERRED_DELETE) ? " (pending)" : "", - (bool)(ds->dq_atomic_flags & DSF_CANCELED)); -} - -static size_t -_dispatch_timer_debug_attr(dispatch_source_t ds, char* buf, size_t bufsiz) -{ - dispatch_source_refs_t dr = ds->ds_refs; - return dsnprintf(buf, bufsiz, "timer = { target = 0x%llx, deadline = 0x%llx" - ", last_fire = 0x%llx, interval = 0x%llx, flags = 0x%lx }, ", - (unsigned long long)ds_timer(dr).target, - (unsigned long long)ds_timer(dr).deadline, - (unsigned long long)ds_timer(dr).last_fire, - (unsigned long long)ds_timer(dr).interval, ds_timer(dr).flags); -} - -size_t -_dispatch_source_debug(dispatch_source_t ds, char* buf, size_t bufsiz) -{ - size_t offset = 0; - offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", - dx_kind(ds), ds); - offset += _dispatch_object_debug_attr(ds, &buf[offset], bufsiz - offset); - offset += _dispatch_source_debug_attr(ds, &buf[offset], bufsiz - offset); - if (ds->ds_is_timer) { - offset += _dispatch_timer_debug_attr(ds, &buf[offset], bufsiz - offset); - } - const char *filter; - if (!ds->ds_dkev) { - filter = "????"; - } else if (ds->ds_is_custom_source) { - filter = _evfiltstr((int16_t)(uintptr_t)ds->ds_dkev); - } else { - filter = _evfiltstr(ds->ds_dkev->dk_kevent.filter); - } - offset += dsnprintf(&buf[offset], bufsiz - offset, "kevent = %p%s, " - "filter = %s }", ds->ds_dkev, ds->ds_is_direct_kevent ? " (direct)" - : "", filter); - return offset; -} - -#if HAVE_MACH -static size_t -_dispatch_mach_debug_attr(dispatch_mach_t dm, char* buf, size_t bufsiz) -{ - dispatch_queue_t target = dm->do_targetq; - return dsnprintf(buf, bufsiz, "target = %s[%p], receive = 0x%x, " - "send = 0x%x, send-possible = 0x%x%s, checkin = 0x%x%s, " - "send state = %016llx, disconnected = %d, canceled = %d ", - target && target->dq_label ? target->dq_label : "", target, - dm->ds_dkev ?(mach_port_t)dm->ds_dkev->dk_kevent.ident:0, - dm->dm_refs->dm_send, - dm->dm_dkev ?(mach_port_t)dm->dm_dkev->dk_kevent.ident:0, - dm->dm_dkev && DISPATCH_MACH_NOTIFICATION_ARMED(dm->dm_dkev) ? - " (armed)" : "", dm->dm_refs->dm_checkin_port, - dm->dm_refs->dm_checkin ? " (pending)" : "", - dm->dm_refs->dm_state, dm->dm_refs->dm_disconnect_cnt, - (bool)(dm->dq_atomic_flags & DSF_CANCELED)); -} - -size_t -_dispatch_mach_debug(dispatch_mach_t dm, char* buf, size_t bufsiz) -{ - size_t offset = 0; - offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", - dm->dq_label && !dm->dm_cancel_handler_called ? dm->dq_label : - dx_kind(dm), dm); - offset += _dispatch_object_debug_attr(dm, &buf[offset], bufsiz - offset); - offset += _dispatch_mach_debug_attr(dm, &buf[offset], bufsiz - offset); - offset += dsnprintf(&buf[offset], bufsiz - offset, "}"); - return offset; -} -#endif // HAVE_MACH - -#if DISPATCH_DEBUG -DISPATCH_NOINLINE -static void -dispatch_kevent_debug(const char *verb, const _dispatch_kevent_qos_s *kev, - int i, int n, const char *function, unsigned int line) -{ - char flagstr[256]; - char i_n[31]; - - if (n > 1) { - snprintf(i_n, sizeof(i_n), "%d/%d ", i + 1, n); - } else { - i_n[0] = '\0'; - } -#if DISPATCH_USE_KEVENT_QOS - _dispatch_debug("%s kevent[%p] %s= { ident = 0x%llx, filter = %s, " - "flags = %s (0x%x), fflags = 0x%x, data = 0x%llx, udata = 0x%llx, " - "qos = 0x%x, ext[0] = 0x%llx, ext[1] = 0x%llx, ext[2] = 0x%llx, " - "ext[3] = 0x%llx }: %s #%u", verb, kev, i_n, kev->ident, - _evfiltstr(kev->filter), _evflagstr(kev->flags, flagstr, - sizeof(flagstr)), kev->flags, kev->fflags, kev->data, kev->udata, - kev->qos, kev->ext[0], kev->ext[1], kev->ext[2], kev->ext[3], - function, line); -#else - _dispatch_debug("%s kevent[%p] %s= { ident = 0x%llx, filter = %s, " - "flags = %s (0x%x), fflags = 0x%x, data = 0x%llx, udata = 0x%llx, " - "ext[0] = 0x%llx, ext[1] = 0x%llx }: %s #%u", verb, kev, i_n, - kev->ident, _evfiltstr(kev->filter), _evflagstr(kev->flags, flagstr, - sizeof(flagstr)), kev->flags, kev->fflags, kev->data, kev->udata, -#ifndef IGNORE_KEVENT64_EXT - kev->ext[0], kev->ext[1], -#else - 0ull, 0ull, -#endif - function, line); -#endif -} - -static void -_dispatch_kevent_debugger2(void *context) -{ - struct sockaddr sa; - socklen_t sa_len = sizeof(sa); - int c, fd = (int)(long)context; - unsigned int i; - dispatch_kevent_t dk; - dispatch_source_t ds; - dispatch_source_refs_t dr; - FILE *debug_stream; - - c = accept(fd, &sa, &sa_len); - if (c == -1) { - if (errno != EAGAIN) { - (void)dispatch_assume_zero(errno); - } - return; - } -#if 0 - int r = fcntl(c, F_SETFL, 0); // disable non-blocking IO - if (r == -1) { - (void)dispatch_assume_zero(errno); - } -#endif - debug_stream = fdopen(c, "a"); - if (!dispatch_assume(debug_stream)) { - close(c); - return; - } - - fprintf(debug_stream, "HTTP/1.0 200 OK\r\n"); - fprintf(debug_stream, "Content-type: text/html\r\n"); - fprintf(debug_stream, "Pragma: nocache\r\n"); - fprintf(debug_stream, "\r\n"); - fprintf(debug_stream, "\n"); - fprintf(debug_stream, "PID %u\n", getpid()); - fprintf(debug_stream, "\n
        \n"); - - for (i = 0; i < DSL_HASH_SIZE; i++) { - if (TAILQ_EMPTY(&_dispatch_sources[i])) { - continue; - } - TAILQ_FOREACH(dk, &_dispatch_sources[i], dk_list) { - fprintf(debug_stream, "\t
      • DK %p ident %lu filter %s flags " - "0x%hx fflags 0x%x data 0x%lx udata %p\n", - dk, (unsigned long)dk->dk_kevent.ident, - _evfiltstr(dk->dk_kevent.filter), dk->dk_kevent.flags, - dk->dk_kevent.fflags, (unsigned long)dk->dk_kevent.data, - (void*)dk->dk_kevent.udata); - fprintf(debug_stream, "\t\t
          \n"); - TAILQ_FOREACH(dr, &dk->dk_sources, dr_list) { - ds = _dispatch_source_from_refs(dr); - fprintf(debug_stream, "\t\t\t
        • DS %p refcnt 0x%x state " - "0x%llx data 0x%lx mask 0x%lx flags 0x%x
        • \n", - ds, ds->do_ref_cnt + 1, ds->dq_state, - ds->ds_pending_data, ds->ds_pending_data_mask, - ds->dq_atomic_flags); - if (_dq_state_is_enqueued(ds->dq_state)) { - dispatch_queue_t dq = ds->do_targetq; - fprintf(debug_stream, "\t\t
          DQ: %p refcnt 0x%x state " - "0x%llx label: %s\n", dq, dq->do_ref_cnt + 1, - dq->dq_state, dq->dq_label ?: ""); - } - } - fprintf(debug_stream, "\t\t
        \n"); - fprintf(debug_stream, "\t
      • \n"); - } - } - fprintf(debug_stream, "
      \n\n\n"); - fflush(debug_stream); - fclose(debug_stream); -} - -static void -_dispatch_kevent_debugger2_cancel(void *context) -{ - int ret, fd = (int)(long)context; - - ret = close(fd); - if (ret != -1) { - (void)dispatch_assume_zero(errno); - } -} - -static void -_dispatch_kevent_debugger(void *context DISPATCH_UNUSED) -{ - union { - struct sockaddr_in sa_in; - struct sockaddr sa; - } sa_u = { - .sa_in = { - .sin_family = AF_INET, - .sin_addr = { htonl(INADDR_LOOPBACK), }, - }, - }; - dispatch_source_t ds; - const char *valstr; - int val, r, fd, sock_opt = 1; - socklen_t slen = sizeof(sa_u); - -#ifndef __linux__ - if (issetugid()) { - return; - } -#endif - valstr = getenv("LIBDISPATCH_DEBUGGER"); - if (!valstr) { - return; - } - val = atoi(valstr); - if (val == 2) { - sa_u.sa_in.sin_addr.s_addr = 0; - } - fd = socket(PF_INET, SOCK_STREAM, 0); - if (fd == -1) { - (void)dispatch_assume_zero(errno); - return; - } - r = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (void *)&sock_opt, - (socklen_t) sizeof sock_opt); - if (r == -1) { - (void)dispatch_assume_zero(errno); - goto out_bad; - } -#if 0 - r = fcntl(fd, F_SETFL, O_NONBLOCK); - if (r == -1) { - (void)dispatch_assume_zero(errno); - goto out_bad; - } -#endif - r = bind(fd, &sa_u.sa, sizeof(sa_u)); - if (r == -1) { - (void)dispatch_assume_zero(errno); - goto out_bad; - } - r = listen(fd, SOMAXCONN); - if (r == -1) { - (void)dispatch_assume_zero(errno); - goto out_bad; - } - r = getsockname(fd, &sa_u.sa, &slen); - if (r == -1) { - (void)dispatch_assume_zero(errno); - goto out_bad; - } - - ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, (uintptr_t)fd, 0, - &_dispatch_mgr_q); - if (dispatch_assume(ds)) { - _dispatch_log("LIBDISPATCH: debug port: %hu", - (in_port_t)ntohs(sa_u.sa_in.sin_port)); - - /* ownership of fd transfers to ds */ - dispatch_set_context(ds, (void *)(long)fd); - dispatch_source_set_event_handler_f(ds, _dispatch_kevent_debugger2); - dispatch_source_set_cancel_handler_f(ds, - _dispatch_kevent_debugger2_cancel); - dispatch_resume(ds); - - return; - } -out_bad: - close(fd); -} - -#if HAVE_MACH - -#ifndef MACH_PORT_TYPE_SPREQUEST -#define MACH_PORT_TYPE_SPREQUEST 0x40000000 -#endif - -DISPATCH_NOINLINE -void -dispatch_debug_machport(mach_port_t name, const char* str) -{ - mach_port_type_t type; - mach_msg_bits_t ns = 0, nr = 0, nso = 0, nd = 0; - unsigned int dnreqs = 0, dnrsiz; - kern_return_t kr = mach_port_type(mach_task_self(), name, &type); - if (kr) { - _dispatch_log("machport[0x%08x] = { error(0x%x) \"%s\" }: %s", name, - kr, mach_error_string(kr), str); - return; - } - if (type & MACH_PORT_TYPE_SEND) { - (void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name, - MACH_PORT_RIGHT_SEND, &ns)); - } - if (type & MACH_PORT_TYPE_SEND_ONCE) { - (void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name, - MACH_PORT_RIGHT_SEND_ONCE, &nso)); - } - if (type & MACH_PORT_TYPE_DEAD_NAME) { - (void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name, - MACH_PORT_RIGHT_DEAD_NAME, &nd)); - } - if (type & (MACH_PORT_TYPE_RECEIVE|MACH_PORT_TYPE_SEND)) { - kr = mach_port_dnrequest_info(mach_task_self(), name, &dnrsiz, &dnreqs); - if (kr != KERN_INVALID_RIGHT) (void)dispatch_assume_zero(kr); - } - if (type & MACH_PORT_TYPE_RECEIVE) { - mach_port_status_t status = { .mps_pset = 0, }; - mach_msg_type_number_t cnt = MACH_PORT_RECEIVE_STATUS_COUNT; - (void)dispatch_assume_zero(mach_port_get_refs(mach_task_self(), name, - MACH_PORT_RIGHT_RECEIVE, &nr)); - (void)dispatch_assume_zero(mach_port_get_attributes(mach_task_self(), - name, MACH_PORT_RECEIVE_STATUS, (void*)&status, &cnt)); - _dispatch_log("machport[0x%08x] = { R(%03u) S(%03u) SO(%03u) D(%03u) " - "dnreqs(%03u) spreq(%s) nsreq(%s) pdreq(%s) srights(%s) " - "sorights(%03u) qlim(%03u) msgcount(%03u) mkscount(%03u) " - "seqno(%03u) }: %s", name, nr, ns, nso, nd, dnreqs, - type & MACH_PORT_TYPE_SPREQUEST ? "Y":"N", - status.mps_nsrequest ? "Y":"N", status.mps_pdrequest ? "Y":"N", - status.mps_srights ? "Y":"N", status.mps_sorights, - status.mps_qlimit, status.mps_msgcount, status.mps_mscount, - status.mps_seqno, str); - } else if (type & (MACH_PORT_TYPE_SEND|MACH_PORT_TYPE_SEND_ONCE| - MACH_PORT_TYPE_DEAD_NAME)) { - _dispatch_log("machport[0x%08x] = { R(%03u) S(%03u) SO(%03u) D(%03u) " - "dnreqs(%03u) spreq(%s) }: %s", name, nr, ns, nso, nd, dnreqs, - type & MACH_PORT_TYPE_SPREQUEST ? "Y":"N", str); - } else { - _dispatch_log("machport[0x%08x] = { type(0x%08x) }: %s", name, type, - str); - } -} - -#endif // HAVE_MACH - -#endif // DISPATCH_DEBUG diff --git a/src/source_internal.h b/src/source_internal.h index 41b6d11a0..55b81e787 100644 --- a/src/source_internal.h +++ b/src/source_internal.h @@ -32,168 +32,45 @@ #include // for HeaderDoc #endif -#define DISPATCH_EVFILT_TIMER (-EVFILT_SYSCOUNT - 1) -#define DISPATCH_EVFILT_CUSTOM_ADD (-EVFILT_SYSCOUNT - 2) -#define DISPATCH_EVFILT_CUSTOM_OR (-EVFILT_SYSCOUNT - 3) -#define DISPATCH_EVFILT_MACH_NOTIFICATION (-EVFILT_SYSCOUNT - 4) -#define DISPATCH_EVFILT_SYSCOUNT ( EVFILT_SYSCOUNT + 4) - -#if HAVE_MACH -// NOTE: dispatch_source_mach_send_flags_t and dispatch_source_mach_recv_flags_t -// bit values must not overlap as they share the same kevent fflags ! - -/*! - * @enum dispatch_source_mach_send_flags_t - * - * @constant DISPATCH_MACH_SEND_DELETED - * Port-deleted notification. Disabled for source registration. - */ -enum { - DISPATCH_MACH_SEND_DELETED = 0x4, -}; -/*! - * @enum dispatch_source_mach_recv_flags_t - * - * @constant DISPATCH_MACH_RECV_MESSAGE - * Receive right has pending messages - * - * @constant DISPATCH_MACH_RECV_MESSAGE_DIRECT - * Receive messages from receive right directly via kevent64() - * - * @constant DISPATCH_MACH_RECV_NO_SENDERS - * Receive right has no more senders. TODO - */ -enum { - DISPATCH_MACH_RECV_MESSAGE = 0x2, - DISPATCH_MACH_RECV_MESSAGE_DIRECT = 0x10, - DISPATCH_MACH_RECV_MESSAGE_DIRECT_ONCE = 0x20, - DISPATCH_MACH_RECV_NO_SENDERS = 0x40, -}; -#endif // HAVE_MACH - enum { /* DISPATCH_TIMER_STRICT 0x1 */ /* DISPATCH_TIMER_BACKGROUND = 0x2, */ - DISPATCH_TIMER_WALL_CLOCK = 0x4, + DISPATCH_TIMER_CLOCK_MACH = 0x4, DISPATCH_TIMER_INTERVAL = 0x8, - DISPATCH_TIMER_WITH_AGGREGATE = 0x10, + DISPATCH_TIMER_AFTER = 0x10, /* DISPATCH_INTERVAL_UI_ANIMATION = 0x20 */ - DISPATCH_TIMER_AFTER = 0x40, }; -#define DISPATCH_TIMER_QOS_NORMAL 0u -#define DISPATCH_TIMER_QOS_CRITICAL 1u -#define DISPATCH_TIMER_QOS_BACKGROUND 2u -#define DISPATCH_TIMER_QOS_COUNT (DISPATCH_TIMER_QOS_BACKGROUND + 1) -#define DISPATCH_TIMER_QOS(tidx) (((uintptr_t)(tidx) >> 1) & 0x3ul) - -#define DISPATCH_TIMER_KIND_WALL 0u -#define DISPATCH_TIMER_KIND_MACH 1u -#define DISPATCH_TIMER_KIND_COUNT (DISPATCH_TIMER_KIND_MACH + 1) -#define DISPATCH_TIMER_KIND(tidx) ((uintptr_t)(tidx) & 0x1ul) - -#define DISPATCH_TIMER_INDEX(kind, qos) ((qos) << 1 | (kind)) -#define DISPATCH_TIMER_INDEX_DISARM \ - DISPATCH_TIMER_INDEX(0, DISPATCH_TIMER_QOS_COUNT) -#define DISPATCH_TIMER_INDEX_COUNT (DISPATCH_TIMER_INDEX_DISARM + 1) -#define DISPATCH_TIMER_IDENT(flags) ({ unsigned long f = (flags); \ - DISPATCH_TIMER_INDEX(f & DISPATCH_TIMER_WALL_CLOCK ? \ - DISPATCH_TIMER_KIND_WALL : DISPATCH_TIMER_KIND_MACH, \ - f & DISPATCH_TIMER_STRICT ? DISPATCH_TIMER_QOS_CRITICAL : \ - f & DISPATCH_TIMER_BACKGROUND ? DISPATCH_TIMER_QOS_BACKGROUND : \ - DISPATCH_TIMER_QOS_NORMAL); }) - -struct dispatch_kevent_s { - TAILQ_ENTRY(dispatch_kevent_s) dk_list; - TAILQ_HEAD(, dispatch_source_refs_s) dk_sources; - _dispatch_kevent_qos_s dk_kevent; -}; - -typedef struct dispatch_kevent_s *dispatch_kevent_t; - -typedef typeof(((dispatch_kevent_t)NULL)->dk_kevent.udata) _dispatch_kevent_qos_udata_t; - -#define DISPATCH_KEV_CUSTOM_ADD ((dispatch_kevent_t)DISPATCH_EVFILT_CUSTOM_ADD) -#define DISPATCH_KEV_CUSTOM_OR ((dispatch_kevent_t)DISPATCH_EVFILT_CUSTOM_OR) - -struct dispatch_source_type_s { - _dispatch_kevent_qos_s ke; - uint64_t mask; - void (*init)(dispatch_source_t ds, dispatch_source_type_t type, - uintptr_t handle, unsigned long mask, dispatch_queue_t q); -}; - -struct dispatch_timer_source_s { - uint64_t target; - uint64_t deadline; - uint64_t last_fire; - uint64_t interval; - uint64_t leeway; - unsigned long flags; // dispatch_timer_flags_t - unsigned long missed; -}; - -enum { - DS_EVENT_HANDLER = 0, - DS_CANCEL_HANDLER, - DS_REGISTN_HANDLER, -}; - -// Source state which may contain references to the source object -// Separately allocated so that 'leaks' can see sources -typedef struct dispatch_source_refs_s { - TAILQ_ENTRY(dispatch_source_refs_s) dr_list; - uintptr_t dr_source_wref; // "weak" backref to dispatch_source_t - dispatch_continuation_t volatile ds_handler[3]; -} *dispatch_source_refs_t; - -typedef struct dispatch_timer_source_refs_s { - struct dispatch_source_refs_s _ds_refs; - struct dispatch_timer_source_s _ds_timer; - TAILQ_ENTRY(dispatch_timer_source_refs_s) dt_list; -} *dispatch_timer_source_refs_t; - -typedef struct dispatch_timer_source_aggregate_refs_s { - struct dispatch_timer_source_refs_s _dsa_refs; - TAILQ_ENTRY(dispatch_timer_source_aggregate_refs_s) dra_list; - TAILQ_ENTRY(dispatch_timer_source_aggregate_refs_s) dta_list; -} *dispatch_timer_source_aggregate_refs_t; - -#define _dispatch_ptr2wref(ptr) (~(uintptr_t)(ptr)) -#define _dispatch_wref2ptr(ref) ((void*)~(ref)) -#define _dispatch_source_from_refs(dr) \ - ((dispatch_source_t)_dispatch_wref2ptr((dr)->dr_source_wref)) -#define ds_timer(dr) \ - (((dispatch_timer_source_refs_t)(dr))->_ds_timer) -#define ds_timer_aggregate(ds) \ - ((dispatch_timer_aggregate_t)((ds)->dq_specific_q)) - DISPATCH_ALWAYS_INLINE static inline unsigned int -_dispatch_source_timer_idx(dispatch_source_refs_t dr) +_dispatch_source_timer_idx(dispatch_unote_t du) { - return DISPATCH_TIMER_IDENT(ds_timer(dr).flags); + uint32_t clock, qos = 0, fflags = du._dt->du_fflags; + + dispatch_assert(DISPATCH_CLOCK_MACH == 1); + dispatch_assert(DISPATCH_CLOCK_WALL == 0); + clock = (fflags & DISPATCH_TIMER_CLOCK_MACH) / DISPATCH_TIMER_CLOCK_MACH; + +#if DISPATCH_HAVE_TIMER_QOS + dispatch_assert(DISPATCH_TIMER_STRICT == DISPATCH_TIMER_QOS_CRITICAL); + dispatch_assert(DISPATCH_TIMER_BACKGROUND == DISPATCH_TIMER_QOS_BACKGROUND); + qos = fflags & (DISPATCH_TIMER_STRICT | DISPATCH_TIMER_BACKGROUND); + // flags are normalized so this should never happen + dispatch_assert(qos < DISPATCH_TIMER_QOS_COUNT); +#endif + + return DISPATCH_TIMER_INDEX(clock, qos); } #define _DISPATCH_SOURCE_HEADER(refs) \ DISPATCH_QUEUE_HEADER(refs); \ - /* LP64: fills 32bit hole in QUEUE_HEADER */ \ unsigned int \ - ds_is_level:1, \ - ds_is_adder:1, \ ds_is_installed:1, \ - ds_is_direct_kevent:1, \ - ds_is_custom_source:1, \ - ds_needs_rearm:1, \ - ds_is_timer:1, \ - ds_vmpressure_override:1, \ - ds_memorypressure_override:1, \ - dm_handler_is_block:1, \ + dm_needs_mgr:1, \ dm_connect_handler_called:1, \ - dm_cancel_handler_called:1; \ - dispatch_kevent_t ds_dkev; \ - dispatch_##refs##_refs_t ds_refs; \ - unsigned long ds_pending_data_mask; + dm_uninstalled:1, \ + dm_cancel_handler_called:1, \ + dm_is_xpc:1 #define DISPATCH_SOURCE_HEADER(refs) \ struct dispatch_source_s _as_ds[0]; \ @@ -202,150 +79,53 @@ _dispatch_source_timer_idx(dispatch_source_refs_t dr) DISPATCH_CLASS_DECL_BARE(source); _OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(dispatch_source, dispatch_object); -#if DISPATCH_PURE_C +#ifndef __cplusplus struct dispatch_source_s { _DISPATCH_SOURCE_HEADER(source); - unsigned long ds_ident_hack; - unsigned long ds_data; - unsigned long ds_pending_data; -} DISPATCH_QUEUE_ALIGN; -#endif - -#if HAVE_MACH -// Mach channel state which may contain references to the channel object -// layout must match dispatch_source_refs_s -struct dispatch_mach_refs_s { - TAILQ_ENTRY(dispatch_mach_refs_s) dr_list; - uintptr_t dr_source_wref; // "weak" backref to dispatch_mach_t - dispatch_mach_handler_function_t dm_handler_func; - void *dm_handler_ctxt; -}; -typedef struct dispatch_mach_refs_s *dispatch_mach_refs_t; - -struct dispatch_mach_reply_refs_s { - TAILQ_ENTRY(dispatch_mach_reply_refs_s) dr_list; - uintptr_t dr_source_wref; // "weak" backref to dispatch_mach_t - dispatch_kevent_t dmr_dkev; - void *dmr_ctxt; - mach_port_t dmr_reply; - dispatch_priority_t dmr_priority; - voucher_t dmr_voucher; - TAILQ_ENTRY(dispatch_mach_reply_refs_s) dmr_list; -}; -typedef struct dispatch_mach_reply_refs_s *dispatch_mach_reply_refs_t; - -#define _DISPATCH_MACH_STATE_UNUSED_MASK_2 0xff00000000000000ull -#define DISPATCH_MACH_STATE_OVERRIDE_MASK 0x00ffff0000000000ull -#define _DISPATCH_MACH_STATE_UNUSED_MASK_1 0x000000f000000000ull -#define DISPATCH_MACH_STATE_DIRTY 0x0000000800000000ull -#define DISPATCH_MACH_STATE_RECEIVED_OVERRIDE 0x0000000400000000ull -#define _DISPATCH_MACH_STATE_UNUSED_MASK_0 0x0000000200000000ull -#define DISPATCH_MACH_STATE_PENDING_BARRIER 0x0000000100000000ull -#define DISPATCH_MACH_STATE_UNLOCK_MASK 0x00000000ffffffffull + uint64_t ds_data DISPATCH_ATOMIC64_ALIGN; + uint64_t ds_pending_data DISPATCH_ATOMIC64_ALIGN; +} DISPATCH_ATOMIC64_ALIGN; -struct dispatch_mach_send_refs_s { - TAILQ_ENTRY(dispatch_mach_send_refs_s) dr_list; - uintptr_t dr_source_wref; // "weak" backref to dispatch_mach_t - dispatch_mach_msg_t dm_checkin; - TAILQ_HEAD(, dispatch_mach_reply_refs_s) dm_replies; - dispatch_unfair_lock_s dm_replies_lock; -#define DISPATCH_MACH_DISCONNECT_MAGIC_BASE (0x80000000) -#define DISPATCH_MACH_NEVER_INSTALLED (DISPATCH_MACH_DISCONNECT_MAGIC_BASE + 0) -#define DISPATCH_MACH_NEVER_CONNECTED (DISPATCH_MACH_DISCONNECT_MAGIC_BASE + 1) - uint32_t volatile dm_disconnect_cnt; - union { - uint64_t volatile dm_state; - DISPATCH_STRUCT_LITTLE_ENDIAN_2( - dispatch_unfair_lock_s dm_state_lock, - uint32_t dm_state_bits - ); - }; - unsigned int dm_needs_mgr:1; - struct dispatch_object_s *volatile dm_tail; - struct dispatch_object_s *volatile dm_head; - mach_port_t dm_send, dm_checkin_port; -}; -typedef struct dispatch_mach_send_refs_s *dispatch_mach_send_refs_t; +// Extracts source data from the ds_data field +#define DISPATCH_SOURCE_GET_DATA(d) ((d) & 0xFFFFFFFF) -DISPATCH_CLASS_DECL(mach); -#if DISPATCH_PURE_C -struct dispatch_mach_s { - DISPATCH_SOURCE_HEADER(mach); - dispatch_kevent_t dm_dkev; - dispatch_mach_send_refs_t dm_refs; -} DISPATCH_QUEUE_ALIGN; -#endif +// Extracts status from the ds_data field +#define DISPATCH_SOURCE_GET_STATUS(d) ((d) >> 32) -DISPATCH_CLASS_DECL(mach_msg); -struct dispatch_mach_msg_s { - DISPATCH_OBJECT_HEADER(mach_msg); - union { - mach_msg_option_t dmsg_options; - mach_error_t dmsg_error; - }; - mach_port_t dmsg_reply; - pthread_priority_t dmsg_priority; - voucher_t dmsg_voucher; - dispatch_mach_msg_destructor_t dmsg_destructor; - size_t dmsg_size; - union { - mach_msg_header_t *dmsg_msg; - char dmsg_buf[0]; - }; -}; -#endif // HAVE_MACH +// Combine data and status for the ds_data field +#define DISPATCH_SOURCE_COMBINE_DATA_AND_STATUS(data, status) \ + ((((uint64_t)(status)) << 32) | (data)) -extern const struct dispatch_source_type_s _dispatch_source_type_after; +#endif // __cplusplus -#if TARGET_OS_EMBEDDED -#define DSL_HASH_SIZE 64u // must be a power of two -#else -#define DSL_HASH_SIZE 256u // must be a power of two -#endif - -dispatch_source_t -_dispatch_source_create_mach_msg_direct_recv(mach_port_t recvp, - const struct dispatch_continuation_s *dc); +void _dispatch_source_refs_register(dispatch_source_t ds, + dispatch_wlh_t wlh, dispatch_priority_t bp); +void _dispatch_source_refs_unregister(dispatch_source_t ds, uint32_t options); void _dispatch_source_xref_dispose(dispatch_source_t ds); -void _dispatch_source_dispose(dispatch_source_t ds); -void _dispatch_source_finalize_activation(dispatch_source_t ds); -void _dispatch_source_invoke(dispatch_source_t ds, dispatch_invoke_flags_t flags); -void _dispatch_source_wakeup(dispatch_source_t ds, pthread_priority_t pp, +void _dispatch_source_dispose(dispatch_source_t ds, bool *allow_free); +void _dispatch_source_finalize_activation(dispatch_source_t ds, + bool *allow_resume); +void _dispatch_source_invoke(dispatch_source_t ds, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); +void _dispatch_source_wakeup(dispatch_source_t ds, dispatch_qos_t qos, dispatch_wakeup_flags_t flags); +void _dispatch_source_merge_evt(dispatch_unote_t du, uint32_t flags, + uintptr_t data, uintptr_t status, pthread_priority_t pp); size_t _dispatch_source_debug(dispatch_source_t ds, char* buf, size_t bufsiz); -void _dispatch_source_set_interval(dispatch_source_t ds, uint64_t interval); -void _dispatch_source_set_event_handler_continuation(dispatch_source_t ds, - dispatch_continuation_t dc); + DISPATCH_EXPORT // for firehose server void _dispatch_source_merge_data(dispatch_source_t ds, pthread_priority_t pp, unsigned long val); -#if HAVE_MACH -void _dispatch_mach_dispose(dispatch_mach_t dm); -void _dispatch_mach_finalize_activation(dispatch_mach_t dm); -void _dispatch_mach_invoke(dispatch_mach_t dm, dispatch_invoke_flags_t flags); -void _dispatch_mach_wakeup(dispatch_mach_t dm, pthread_priority_t pp, +void _dispatch_mgr_queue_push(dispatch_queue_t dq, dispatch_object_t dou, + dispatch_qos_t qos); +void _dispatch_mgr_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos, dispatch_wakeup_flags_t flags); -size_t _dispatch_mach_debug(dispatch_mach_t dm, char* buf, size_t bufsiz); - -void _dispatch_mach_msg_dispose(dispatch_mach_msg_t dmsg); -void _dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg, +void _dispatch_mgr_thread(dispatch_queue_t dq, dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); -size_t _dispatch_mach_msg_debug(dispatch_mach_msg_t dmsg, char* buf, - size_t bufsiz); - -void _dispatch_mach_send_barrier_drain_invoke(dispatch_continuation_t dc, - dispatch_invoke_flags_t flags); -void _dispatch_mach_barrier_invoke(dispatch_continuation_t dc, - dispatch_invoke_flags_t flags); -#endif // HAVE_MACH - -void _dispatch_mgr_queue_wakeup(dispatch_queue_t dq, pthread_priority_t pp, - dispatch_wakeup_flags_t flags); -void _dispatch_mgr_thread(dispatch_queue_t dq, dispatch_invoke_flags_t flags); #if DISPATCH_USE_KEVENT_WORKQUEUE -void _dispatch_kevent_worker_thread(_dispatch_kevent_qos_s **events, +void _dispatch_kevent_worker_thread(dispatch_kevent_t *events, int *nevents); -#endif +#endif // DISPATCH_USE_KEVENT_WORKQUEUE #endif /* __DISPATCH_SOURCE_INTERNAL__ */ diff --git a/src/swift/Block.swift b/src/swift/Block.swift index c1266cea1..d4cae3c60 100644 --- a/src/swift/Block.swift +++ b/src/swift/Block.swift @@ -37,24 +37,19 @@ public struct DispatchWorkItemFlags : OptionSet, RawRepresentable { @available(OSX 10.10, iOS 8.0, *) public class DispatchWorkItem { internal var _block: _DispatchBlock - internal var _group: DispatchGroup? - public init(group: DispatchGroup? = nil, qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], block: @convention(block) () -> ()) { + public init(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], block: @escaping @convention(block) () -> ()) { _block = dispatch_block_create_with_qos_class(dispatch_block_flags_t(flags.rawValue), qos.qosClass.rawValue.rawValue, Int32(qos.relativePriority), block) } - // Used by DispatchQueue.synchronously to provide a @noescape path through + // Used by DispatchQueue.synchronously to provide a path through // dispatch_block_t, as we know the lifetime of the block in question. - internal init(flags: DispatchWorkItemFlags = [], noescapeBlock: @noescape () -> ()) { + internal init(flags: DispatchWorkItemFlags = [], noescapeBlock: () -> ()) { _block = _swift_dispatch_block_create_noescape(dispatch_block_flags_t(flags.rawValue), noescapeBlock) } public func perform() { - if let g = _group { - g.enter() - defer { g.leave() } - } _block() } @@ -63,14 +58,19 @@ public class DispatchWorkItem { } public func wait(timeout: DispatchTime) -> DispatchTimeoutResult { - return dispatch_block_wait(_block, timeout.rawValue) == 0 ? .Success : .TimedOut + return dispatch_block_wait(_block, timeout.rawValue) == 0 ? .success : .timedOut } public func wait(wallTimeout: DispatchWallTime) -> DispatchTimeoutResult { - return dispatch_block_wait(_block, wallTimeout.rawValue) == 0 ? .Success : .TimedOut + return dispatch_block_wait(_block, wallTimeout.rawValue) == 0 ? .success : .timedOut } - public func notify(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], queue: DispatchQueue, execute: @convention(block) () -> Void) { + public func notify( + qos: DispatchQoS = .unspecified, + flags: DispatchWorkItemFlags = [], + queue: DispatchQueue, + execute: @escaping @convention(block) () -> ()) + { if qos != .unspecified || !flags.isEmpty { let item = DispatchWorkItem(qos: qos, flags: flags, block: execute) dispatch_block_notify(_block, queue.__wrapped, item._block) @@ -92,17 +92,6 @@ public class DispatchWorkItem { } } -@available(OSX 10.10, iOS 8.0, *) -public extension DispatchWorkItem { - @available(*, deprecated, renamed: "DispatchWorkItem.wait(self:wallTimeout:)") - public func wait(timeout: DispatchWallTime) -> Int { - switch wait(wallTimeout: timeout) { - case .Success: return 0 - case .TimedOut: return DispatchTimeoutResult.KERN_OPERATION_TIMED_OUT - } - } -} - /// The dispatch_block_t typealias is different from usual closures in that it /// uses @convention(block). This is to avoid unnecessary bridging between /// C blocks and Swift closures, which interferes with dispatch APIs that depend @@ -111,4 +100,4 @@ internal typealias _DispatchBlock = @convention(block) () -> Void internal typealias dispatch_block_t = @convention(block) () -> Void @_silgen_name("_swift_dispatch_block_create_noescape") -internal func _swift_dispatch_block_create_noescape(_ flags: dispatch_block_flags_t, _ block: @noescape () -> ()) -> _DispatchBlock +internal func _swift_dispatch_block_create_noescape(_ flags: dispatch_block_flags_t, _ block: () -> ()) -> _DispatchBlock diff --git a/src/swift/Data.swift b/src/swift/Data.swift index 0d21e27c0..1e7350463 100644 --- a/src/swift/Data.swift +++ b/src/swift/Data.swift @@ -19,7 +19,6 @@ public struct DispatchData : RandomAccessCollection { public static let empty: DispatchData = DispatchData(data: _swift_dispatch_data_empty()) -#if false /* FIXME: dragging in _TMBO (Objective-C) */ public enum Deallocator { /// Use `free` case free @@ -28,9 +27,15 @@ public struct DispatchData : RandomAccessCollection { case unmap /// A custom deallocator - case custom(DispatchQueue?, @convention(block) () -> Void) - - private var _deallocator: (DispatchQueue?, @convention(block) () -> Void) { + // FIXME: Want @convention(block) here to minimize the overhead of + // doing the conversion (once per custom enum instance instead + // of once per call to DispatchData.init using the enum instance). + // However, adding the annotation here results in Data.o containing + // a reference to _TMBO (opaque metadata for Builtin.UnknownObject) + // which is only made available on platforms with Objective-C. + case custom(DispatchQueue?, () -> Void) + + fileprivate var _deallocator: (DispatchQueue?, @convention(block) () -> Void) { switch self { case .free: return (nil, _dispatch_data_destructor_free()) case .unmap: return (nil, _dispatch_data_destructor_munmap()) @@ -38,55 +43,94 @@ public struct DispatchData : RandomAccessCollection { } } } -#endif - internal var __wrapped: dispatch_data_t + + internal var __wrapped: __DispatchData /// Initialize a `Data` with copied memory content. /// /// - parameter bytes: A pointer to the memory. It will be copied. - /// - parameter count: The number of bytes to copy. + @available(swift, deprecated: 4, message: "Use init(bytes: UnsafeRawBufferPointer) instead") public init(bytes buffer: UnsafeBufferPointer) { - __wrapped = dispatch_data_create( - buffer.baseAddress!, buffer.count, nil, _dispatch_data_destructor_default()) + let d = buffer.baseAddress == nil ? _swift_dispatch_data_empty() + : dispatch_data_create(buffer.baseAddress!, buffer.count, nil, + _dispatch_data_destructor_default()) + self.init(data: d) + } + + /// Initialize a `Data` with copied memory content. + /// + /// - parameter bytes: A pointer to the memory. It will be copied. + /// - parameter count: The number of bytes to copy. + public init(bytes buffer: UnsafeRawBufferPointer) { + let d = buffer.baseAddress == nil ? _swift_dispatch_data_empty() + : dispatch_data_create(buffer.baseAddress!, buffer.count, nil, + _dispatch_data_destructor_default()) + self.init(data: d) } -#if false /* FIXME: dragging in _TMBO (Objective-C) */ + /// Initialize a `Data` without copying the bytes. /// - /// - parameter bytes: A pointer to the bytes. - /// - parameter count: The size of the bytes. + /// - parameter bytes: A buffer pointer containing the data. /// - parameter deallocator: Specifies the mechanism to free the indicated buffer. + @available(swift, deprecated: 4, message: "Use init(bytesNoCopy: UnsafeRawBufferPointer, deallocater: Deallocator) instead") public init(bytesNoCopy bytes: UnsafeBufferPointer, deallocator: Deallocator = .free) { let (q, b) = deallocator._deallocator + let d = bytes.baseAddress == nil ? _swift_dispatch_data_empty() + : dispatch_data_create(bytes.baseAddress!, bytes.count, q?.__wrapped, b) + self.init(data: d) + } - __wrapped = dispatch_data_create(bytes.baseAddress!, bytes.count, q?.__wrapped, b) + /// Initialize a `Data` without copying the bytes. + /// + /// - parameter bytes: A pointer to the bytes. + /// - parameter count: The size of the bytes. + /// - parameter deallocator: Specifies the mechanism to free the indicated buffer. + public init(bytesNoCopy bytes: UnsafeRawBufferPointer, deallocator: Deallocator = .free) { + let (q, b) = deallocator._deallocator + let d = bytes.baseAddress == nil ? _swift_dispatch_data_empty() + : dispatch_data_create(bytes.baseAddress!, bytes.count, q?.__wrapped, b) + self.init(data: d) } -#endif + internal init(data: dispatch_data_t) { - __wrapped = data + __wrapped = __DispatchData(data: data, owned: true) + } + + internal init(borrowedData: dispatch_data_t) { + __wrapped = __DispatchData(data: borrowedData, owned: false) } public var count: Int { - return CDispatch.dispatch_data_get_size(__wrapped) + return CDispatch.dispatch_data_get_size(__wrapped.__wrapped) } public func withUnsafeBytes( - body: @noescape (UnsafePointer) throws -> Result) rethrows -> Result + body: (UnsafePointer) throws -> Result) rethrows -> Result { - var ptr: UnsafePointer? = nil - var size = 0; - let data = CDispatch.dispatch_data_create_map(__wrapped, &ptr, &size) + var ptr: UnsafeRawPointer? = nil + var size = 0 + let data = CDispatch.dispatch_data_create_map(__wrapped.__wrapped, &ptr, &size) + let contentPtr = ptr!.bindMemory( + to: ContentType.self, capacity: size / MemoryLayout.stride) defer { _fixLifetime(data) } - return try body(UnsafePointer(ptr!)) + return try body(contentPtr) } public func enumerateBytes( - block: @noescape (buffer: UnsafeBufferPointer, byteIndex: Int, stop: inout Bool) -> Void) + block: (_ buffer: UnsafeBufferPointer, _ byteIndex: Int, _ stop: inout Bool) -> Void) { - _swift_dispatch_data_apply(__wrapped) { (data: dispatch_data_t, offset: Int, ptr: UnsafePointer, size: Int) in - let bp = UnsafeBufferPointer(start: UnsafePointer(ptr), count: size) - var stop = false - block(buffer: bp, byteIndex: offset, stop: &stop) - return !stop + // we know that capturing block in the closure being created/passed to dispatch_data_apply + // does not cause block to escape because dispatch_data_apply does not allow its + // block argument to escape. Therefore, the usage of withoutActuallyEscaping to + // bypass the Swift type system is safe. + withoutActuallyEscaping(block) { escapableBlock in + _ = CDispatch.dispatch_data_apply(__wrapped.__wrapped) { (_, offset: Int, ptr: UnsafeRawPointer, size: Int) in + let bytePtr = ptr.bindMemory(to: UInt8.self, capacity: size) + let bp = UnsafeBufferPointer(start: bytePtr, count: size) + var stop = false + escapableBlock(bp, offset, &stop) + return !stop + } } } @@ -94,33 +138,53 @@ public struct DispatchData : RandomAccessCollection { /// /// - parameter bytes: A pointer to the bytes to copy in to the data. /// - parameter count: The number of bytes to copy. + @available(swift, deprecated: 4, message: "Use append(_: UnsafeRawBufferPointer) instead") public mutating func append(_ bytes: UnsafePointer, count: Int) { let data = dispatch_data_create(bytes, count, nil, _dispatch_data_destructor_default()) self.append(DispatchData(data: data)) } + /// Append bytes to the data. + /// + /// - parameter bytes: A pointer to the bytes to copy in to the data. + /// - parameter count: The number of bytes to copy. + public mutating func append(_ bytes: UnsafeRawBufferPointer) { + // Nil base address does nothing. + guard bytes.baseAddress != nil else { return } + let data = dispatch_data_create(bytes.baseAddress!, bytes.count, nil, _dispatch_data_destructor_default()) + self.append(DispatchData(data: data)) + } + /// Append data to the data. /// /// - parameter data: The data to append to this data. public mutating func append(_ other: DispatchData) { - let data = CDispatch.dispatch_data_create_concat(__wrapped, other.__wrapped) - __wrapped = data + let data = CDispatch.dispatch_data_create_concat(__wrapped.__wrapped, other.__wrapped.__wrapped) + __wrapped = __DispatchData(data: data, owned: true) } /// Append a buffer of bytes to the data. /// /// - parameter buffer: The buffer of bytes to append. The size is calculated from `SourceType` and `buffer.count`. public mutating func append(_ buffer : UnsafeBufferPointer) { - self.append(UnsafePointer(buffer.baseAddress!), count: buffer.count * sizeof(SourceType.self)) + let count = buffer.count * MemoryLayout.stride; + buffer.baseAddress?.withMemoryRebound(to: UInt8.self, capacity: count) { + self.append($0, count: count) + } } - private func _copyBytesHelper(to pointer: UnsafeMutablePointer, from range: CountableRange) { + private func _copyBytesHelper(to pointer: UnsafeMutableRawPointer, from range: CountableRange) { var copiedCount = 0 - _ = CDispatch.dispatch_data_apply(__wrapped) { (data: dispatch_data_t, offset: Int, ptr: UnsafePointer, size: Int) in - let limit = Swift.min((range.endIndex - range.startIndex) - copiedCount, size) - memcpy(pointer + copiedCount, ptr, limit) - copiedCount += limit - return copiedCount < (range.endIndex - range.startIndex) + if range.isEmpty { return } + let rangeSize = range.count + _ = CDispatch.dispatch_data_apply(__wrapped.__wrapped) { (data: dispatch_data_t, offset: Int, ptr: UnsafeRawPointer, size: Int) in + if offset >= range.endIndex { return false } // This region is after endIndex + let copyOffset = range.startIndex > offset ? range.startIndex - offset : 0 // offset of first byte, in this region + if copyOffset >= size { return true } // This region is before startIndex + let count = Swift.min(rangeSize - copiedCount, size - copyOffset) + memcpy(pointer + copiedCount, ptr + copyOffset, count) + copiedCount += count + return copiedCount < rangeSize } } @@ -129,22 +193,46 @@ public struct DispatchData : RandomAccessCollection { /// - parameter pointer: A pointer to the buffer you wish to copy the bytes into. /// - parameter count: The number of bytes to copy. /// - warning: This method does not verify that the contents at pointer have enough space to hold `count` bytes. + @available(swift, deprecated: 4, message: "Use copyBytes(to: UnsafeMutableRawBufferPointer, count: Int) instead") public func copyBytes(to pointer: UnsafeMutablePointer, count: Int) { _copyBytesHelper(to: pointer, from: 0..) instead") public func copyBytes(to pointer: UnsafeMutablePointer, from range: CountableRange) { _copyBytesHelper(to: pointer, from: range) } + /// Copy a subset of the contents of the data to a pointer. + /// + /// - parameter pointer: A pointer to the buffer you wish to copy the bytes into. The buffer must be large + /// enough to hold `count` bytes. + /// - parameter range: The range in the `Data` to copy. + public func copyBytes(to pointer: UnsafeMutableRawBufferPointer, from range: CountableRange) { + assert(range.count <= pointer.count, "Buffer too small to copy \(range.count) bytes") + guard pointer.baseAddress != nil else { return } + _copyBytesHelper(to: pointer.baseAddress!, from: range) + } + /// Copy the contents of the data into a buffer. /// - /// This function copies the bytes in `range` from the data into the buffer. If the count of the `range` is greater than `sizeof(DestinationType) * buffer.count` then the first N bytes will be copied into the buffer. + /// This function copies the bytes in `range` from the data into the buffer. If the count of the `range` is greater than `MemoryLayout.stride * buffer.count` then the first N bytes will be copied into the buffer. /// - precondition: The range must be within the bounds of the data. Otherwise `fatalError` is called. /// - parameter buffer: A buffer to copy the data into. /// - parameter range: A range in the data to copy into the buffer. If the range is empty, this function will return 0 without copying anything. If the range is nil, as much data as will fit into `buffer` is copied. @@ -162,30 +250,28 @@ public struct DispatchData : RandomAccessCollection { precondition(r.endIndex >= 0) precondition(r.endIndex <= cnt, "The range is outside the bounds of the data") - copyRange = r.startIndex..<(r.startIndex + Swift.min(buffer.count * sizeof(DestinationType.self), r.count)) + copyRange = r.startIndex..<(r.startIndex + Swift.min(buffer.count * MemoryLayout.stride, r.count)) } else { - copyRange = 0...stride, cnt) } guard !copyRange.isEmpty else { return 0 } - let pointer : UnsafeMutablePointer = UnsafeMutablePointer(buffer.baseAddress!) - _copyBytesHelper(to: pointer, from: copyRange) + _copyBytesHelper(to: buffer.baseAddress!, from: copyRange) return copyRange.count } /// Sets or returns the byte at the specified index. public subscript(index: Index) -> UInt8 { var offset = 0 - let subdata = CDispatch.dispatch_data_copy_region(__wrapped, index, &offset) + let subdata = CDispatch.dispatch_data_copy_region(__wrapped.__wrapped, index, &offset) - var ptr: UnsafePointer? = nil + var ptr: UnsafeRawPointer? = nil var size = 0 let map = CDispatch.dispatch_data_create_map(subdata, &ptr, &size) defer { _fixLifetime(map) } - let pptr = UnsafePointer(ptr!) - return pptr[index - offset] + return ptr!.load(fromByteOffset: index - offset, as: UInt8.self) } public subscript(bounds: Range) -> RandomAccessSlice { @@ -197,13 +283,13 @@ public struct DispatchData : RandomAccessCollection { /// - parameter range: The range to copy. public func subdata(in range: CountableRange) -> DispatchData { let subrange = CDispatch.dispatch_data_create_subrange( - __wrapped, range.startIndex, range.endIndex - range.startIndex) + __wrapped.__wrapped, range.startIndex, range.endIndex - range.startIndex) return DispatchData(data: subrange) } public func region(location: Int) -> (data: DispatchData, offset: Int) { var offset: Int = 0 - let data = CDispatch.dispatch_data_copy_region(__wrapped, location, &offset) + let data = CDispatch.dispatch_data_copy_region(__wrapped.__wrapped, location, &offset) return (DispatchData(data: data), offset) } @@ -233,37 +319,33 @@ public struct DispatchData : RandomAccessCollection { public struct DispatchDataIterator : IteratorProtocol, Sequence { - /// Create an iterator over the given DisaptchData + /// Create an iterator over the given DispatchData public init(_data: DispatchData) { - var ptr: UnsafePointer? + var ptr: UnsafeRawPointer? self._count = 0 - self._data = CDispatch.dispatch_data_create_map(_data.__wrapped, &ptr, &self._count) - self._ptr = UnsafePointer(ptr!) + self._data = __DispatchData(data: CDispatch.dispatch_data_create_map(_data.__wrapped.__wrapped, &ptr, &self._count), owned: true) + self._ptr = ptr self._position = _data.startIndex + + // The only time we expect a 'nil' pointer is when the data is empty. + assert(self._ptr != nil || self._count == self._position) } /// Advance to the next element and return it, or `nil` if no next /// element exists. - /// - /// - Precondition: No preceding call to `self.next()` has returned `nil`. public mutating func next() -> DispatchData._Element? { if _position == _count { return nil } - let element = _ptr[_position]; + let element = _ptr.load(fromByteOffset: _position, as: UInt8.self) _position = _position + 1 return element } - internal let _data: dispatch_data_t - internal var _ptr: UnsafePointer + internal let _data: __DispatchData + internal var _ptr: UnsafeRawPointer! internal var _count: Int internal var _position: DispatchData.Index } -typealias _swift_data_applier = @convention(block) @noescape (dispatch_data_t, Int, UnsafePointer, Int) -> Bool - -@_silgen_name("_swift_dispatch_data_apply") -internal func _swift_dispatch_data_apply(_ data: dispatch_data_t, _ block: _swift_data_applier) - @_silgen_name("_swift_dispatch_data_empty") internal func _swift_dispatch_data_empty() -> dispatch_data_t diff --git a/src/swift/Dispatch.apinotes b/src/swift/Dispatch.apinotes index 6e804515a..d40bb68a4 100644 --- a/src/swift/Dispatch.apinotes +++ b/src/swift/Dispatch.apinotes @@ -91,6 +91,8 @@ Protocols: SwiftName: DispatchSourceUserDataOr - Name: OS_dispatch_source_data_add SwiftName: DispatchSourceUserDataAdd +- Name: OS_dispatch_source_data_replace + SwiftName: DispatchSourceUserDataReplace - Name: OS_dispatch_source_vnode SwiftName: DispatchSourceFileSystemObject - Name: OS_dispatch_source_write diff --git a/src/swift/Dispatch.swift b/src/swift/Dispatch.swift index 2b9cb2164..ec73acbb7 100644 --- a/src/swift/Dispatch.swift +++ b/src/swift/Dispatch.swift @@ -59,11 +59,6 @@ public struct DispatchQoS : Equatable { @available(OSX 10.10, iOS 8.0, *) public static let `default` = DispatchQoS(qosClass: .default, relativePriority: 0) - @available(OSX, introduced: 10.10, deprecated: 10.10, renamed: "DispatchQoS.default") - @available(iOS, introduced: 8.0, deprecated: 8.0, renamed: "DispatchQoS.default") - @available(*, deprecated, renamed: "DispatchQoS.default") - public static let defaultQoS = DispatchQoS.default - @available(OSX 10.10, iOS 8.0, *) public static let userInitiated = DispatchQoS(qosClass: .userInitiated, relativePriority: 0) @@ -82,11 +77,6 @@ public struct DispatchQoS : Equatable { @available(OSX 10.10, iOS 8.0, *) case `default` - @available(OSX, introduced: 10.10, deprecated: 10.10, renamed: "QoSClass.default") - @available(iOS, introduced: 8.0, deprecated: 8.0, renamed: "QoSClass.default") - @available(*, deprecated, renamed: "QoSClass.default") - static let defaultQoS = QoSClass.default - @available(OSX 10.10, iOS 8.0, *) case userInitiated @@ -95,9 +85,11 @@ public struct DispatchQoS : Equatable { case unspecified + // _OSQoSClass is internal on Linux, so this initialiser has to + // remain as an internal init. @available(OSX 10.10, iOS 8.0, *) - internal init?(qosClass: _OSQoSClass) { - switch qosClass { + internal init?(rawValue: _OSQoSClass) { + switch rawValue { case .QOS_CLASS_BACKGROUND: self = .background case .QOS_CLASS_UTILITY: self = .utility case .QOS_CLASS_DEFAULT: self = .default @@ -135,14 +127,14 @@ public func ==(a: DispatchQoS, b: DispatchQoS) -> Bool { public enum DispatchTimeoutResult { static let KERN_OPERATION_TIMED_OUT:Int = 49 - case Success - case TimedOut + case success + case timedOut } /// dispatch_group public extension DispatchGroup { - public func notify(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], queue: DispatchQueue, execute work: @convention(block) () -> ()) { + public func notify(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], queue: DispatchQueue, execute work: @escaping @convention(block) () -> ()) { if #available(OSX 10.10, iOS 8.0, *), qos != .unspecified || !flags.isEmpty { let item = DispatchWorkItem(qos: qos, flags: flags, block: work) dispatch_group_notify(self.__wrapped, queue.__wrapped, item._block) @@ -161,21 +153,11 @@ public extension DispatchGroup { } public func wait(timeout: DispatchTime) -> DispatchTimeoutResult { - return dispatch_group_wait(self.__wrapped, timeout.rawValue) == 0 ? .Success : .TimedOut + return dispatch_group_wait(self.__wrapped, timeout.rawValue) == 0 ? .success : .timedOut } public func wait(wallTimeout timeout: DispatchWallTime) -> DispatchTimeoutResult { - return dispatch_group_wait(self.__wrapped, timeout.rawValue) == 0 ? .Success : .TimedOut - } -} - -public extension DispatchGroup { - @available(*, deprecated, renamed: "DispatchGroup.wait(self:wallTimeout:)") - public func wait(walltime timeout: DispatchWallTime) -> Int { - switch wait(wallTimeout: timeout) { - case .Success: return 0 - case .TimedOut: return DispatchTimeoutResult.KERN_OPERATION_TIMED_OUT - } + return dispatch_group_wait(self.__wrapped, timeout.rawValue) == 0 ? .success : .timedOut } } @@ -192,20 +174,10 @@ public extension DispatchSemaphore { } public func wait(timeout: DispatchTime) -> DispatchTimeoutResult { - return dispatch_semaphore_wait(self.__wrapped, timeout.rawValue) == 0 ? .Success : .TimedOut + return dispatch_semaphore_wait(self.__wrapped, timeout.rawValue) == 0 ? .success : .timedOut } public func wait(wallTimeout: DispatchWallTime) -> DispatchTimeoutResult { - return dispatch_semaphore_wait(self.__wrapped, wallTimeout.rawValue) == 0 ? .Success : .TimedOut - } -} - -public extension DispatchSemaphore { - @available(*, deprecated, renamed: "DispatchSemaphore.wait(self:wallTimeout:)") - public func wait(walltime timeout: DispatchWalltime) -> Int { - switch wait(wallTimeout: timeout) { - case .Success: return 0 - case .TimedOut: return DispatchTimeoutResult.KERN_OPERATION_TIMED_OUT - } + return dispatch_semaphore_wait(self.__wrapped, wallTimeout.rawValue) == 0 ? .success : .timedOut } } diff --git a/src/swift/DispatchStubs.cc b/src/swift/DispatchStubs.cc index 1e5ec74f7..9c667d570 100644 --- a/src/swift/DispatchStubs.cc +++ b/src/swift/DispatchStubs.cc @@ -26,6 +26,7 @@ @protocol OS_dispatch_source_timer; @protocol OS_dispatch_source_data_add; @protocol OS_dispatch_source_data_or; +@protocol OS_dispatch_source_data_replace; @protocol OS_dispatch_source_vnode; @protocol OS_dispatch_source_write; @@ -44,6 +45,7 @@ static void _dispatch_overlay_constructor() { class_addProtocol(source, @protocol(OS_dispatch_source_timer)); class_addProtocol(source, @protocol(OS_dispatch_source_data_add)); class_addProtocol(source, @protocol(OS_dispatch_source_data_or)); + class_addProtocol(source, @protocol(OS_dispatch_source_data_replace)); class_addProtocol(source, @protocol(OS_dispatch_source_vnode)); class_addProtocol(source, @protocol(OS_dispatch_source_write)); } @@ -51,12 +53,41 @@ static void _dispatch_overlay_constructor() { #endif /* USE_OBJC */ -#if 0 /* FIXME -- adding directory to include path may need build-script plumbing to do properly... */ -#include "swift/Runtime/Config.h" + +// Replicate the SWIFT_CC(swift) calling convention macro from +// swift/include/swift/Runtime/Config.h because it is +// quite awkward to include Config.h and its recursive includes +// in dispatch. This define must be manually kept in synch +#define SWIFT_CC(CC) SWIFT_CC_##CC +#if SWIFT_USE_SWIFTCALL +#define SWIFT_CC_swift __attribute__((swiftcall)) #else -#define SWIFT_CC(x) /* FIXME!! */ +#define SWIFT_CC_swift +#endif + +extern "C" dispatch_queue_attr_t _swift_dispatch_queue_concurrent(void); +extern "C" void _swift_dispatch_apply_current(size_t iterations, __attribute__((__noescape__)) void (^block)(size_t)); +extern "C" dispatch_queue_t _swift_dispatch_get_main_queue(void); +extern "C" dispatch_data_t _swift_dispatch_data_empty(void); +extern "C" dispatch_block_t _swift_dispatch_data_destructor_default(void); +extern "C" dispatch_block_t _swift_dispatch_data_destructor_free(void); +extern "C" dispatch_block_t _swift_dispatch_data_destructor_munmap(void); +extern "C" dispatch_block_t _swift_dispatch_block_create_with_qos_class(dispatch_block_flags_t flags, dispatch_qos_class_t qos, int relative_priority, dispatch_block_t block); +extern "C" dispatch_block_t _swift_dispatch_block_create_noescape(dispatch_block_flags_t flags, dispatch_block_t block); +extern "C" void _swift_dispatch_block_cancel(dispatch_block_t block); +extern "C" long _swift_dispatch_block_wait(dispatch_block_t block, dispatch_time_t timeout); +extern "C" void _swift_dispatch_block_notify(dispatch_block_t block, dispatch_queue_t queue, dispatch_block_t notification_block); +extern "C" long _swift_dispatch_block_testcancel(dispatch_block_t block); +extern "C" void _swift_dispatch_async(dispatch_queue_t queue, dispatch_block_t block); +extern "C" void _swift_dispatch_group_async(dispatch_group_t group, dispatch_queue_t queue, dispatch_block_t block); +extern "C" void _swift_dispatch_sync(dispatch_queue_t queue, dispatch_block_t block); +extern "C" void _swift_dispatch_release(dispatch_object_t obj); +extern "C" void _swift_dispatch_retain(dispatch_object_t obj); +#if !USE_OBJC +extern "C" void * objc_retainAutoreleasedReturnValue(void *obj); #endif + SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE extern "C" dispatch_queue_attr_t _swift_dispatch_queue_concurrent(void) { @@ -135,12 +166,6 @@ _swift_dispatch_block_testcancel(dispatch_block_t block) { return dispatch_block_testcancel(block); } -SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE -extern "C" bool -_swift_dispatch_data_apply(dispatch_data_t data, bool (^applier)(dispatch_data_t, size_t, const void *, size_t)) { - return dispatch_data_apply(data, applier); -} - SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE extern "C" void _swift_dispatch_async(dispatch_queue_t queue, dispatch_block_t block) { @@ -165,13 +190,14 @@ _swift_dispatch_release(dispatch_object_t obj) { dispatch_release(obj); } -// DISPATCH_RUNTIME_STDLIB_INTERFACE -// extern "C" dispatch_queue_t -// _swift_apply_current_root_queue() { -// return DISPATCH_APPLY_CURRENT_ROOT_QUEUE; -// } +SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE +extern "C" void +_swift_dispatch_retain(dispatch_object_t obj) { + dispatch_retain(obj); +} #define SOURCE(t) \ + extern "C" dispatch_source_type_t _swift_dispatch_source_type_##t(void); \ SWIFT_CC(swift) \ DISPATCH_RUNTIME_STDLIB_INTERFACE extern "C" dispatch_source_type_t \ _swift_dispatch_source_type_##t(void) { \ @@ -180,6 +206,7 @@ _swift_dispatch_release(dispatch_object_t obj) { SOURCE(DATA_ADD) SOURCE(DATA_OR) +SOURCE(DATA_REPLACE) #if HAVE_MACH SOURCE(MACH_SEND) SOURCE(MACH_RECV) @@ -196,12 +223,25 @@ SOURCE(VNODE) #endif SOURCE(WRITE) -// See comment in CFFuntime.c explaining why objc_retainAutoreleasedReturnValue is needed. -extern "C" void swift_release(void *); +#if !USE_OBJC + +// For CF functions with 'Get' semantics, the compiler currently assumes that +// the result is autoreleased and must be retained. It does so on all platforms +// by emitting a call to objc_retainAutoreleasedReturnValue. On Darwin, this is +// implemented by the ObjC runtime. On non-ObjC platforms, there is no runtime, +// and therefore we have to stub it out here ourselves. The compiler will +// eventually call swift_release to balance the retain below. This is a +// workaround until the compiler no longer emits this callout on non-ObjC +// platforms. +extern "C" void swift_retain(void *); + +SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE extern "C" void * objc_retainAutoreleasedReturnValue(void *obj) { if (obj) { - swift_release(obj); + swift_retain(obj); return obj; } else return NULL; } + +#endif // !USE_OBJC diff --git a/src/swift/IO.swift b/src/swift/IO.swift index 6e6b6692e..d26f64160 100644 --- a/src/swift/IO.swift +++ b/src/swift/IO.swift @@ -34,15 +34,15 @@ public extension DispatchIO { public static let strictInterval = IntervalFlags(rawValue: 1) } - public class func read(fromFileDescriptor: Int32, maxLength: Int, runningHandlerOn queue: DispatchQueue, handler: (data: DispatchData, error: Int32) -> Void) { + public class func read(fromFileDescriptor: Int32, maxLength: Int, runningHandlerOn queue: DispatchQueue, handler: @escaping (_ data: DispatchData, _ error: Int32) -> Void) { dispatch_read(fromFileDescriptor, maxLength, queue.__wrapped) { (data: dispatch_data_t, error: Int32) in - handler(data: DispatchData(data: data), error: error) + handler(DispatchData(borrowedData: data), error) } } - public class func write(fromFileDescriptor: Int32, data: DispatchData, runningHandlerOn queue: DispatchQueue, handler: (data: DispatchData?, error: Int32) -> Void) { - dispatch_write(fromFileDescriptor, data.__wrapped, queue.__wrapped) { (data: dispatch_data_t?, error: Int32) in - handler(data: data.flatMap { DispatchData(data: $0) }, error: error) + public class func write(toFileDescriptor: Int32, data: DispatchData, runningHandlerOn queue: DispatchQueue, handler: @escaping (_ data: DispatchData?, _ error: Int32) -> Void) { + dispatch_write(toFileDescriptor, data.__wrapped.__wrapped, queue.__wrapped) { (data: dispatch_data_t?, error: Int32) in + handler(data.flatMap { DispatchData(borrowedData: $0) }, error) } } @@ -50,18 +50,31 @@ public extension DispatchIO { type: StreamType, fileDescriptor: Int32, queue: DispatchQueue, - cleanupHandler: (error: Int32) -> Void) + cleanupHandler: @escaping (_ error: Int32) -> Void) { self.init(__type: type.rawValue, fd: fileDescriptor, queue: queue, handler: cleanupHandler) } + @available(swift, obsoleted: 4) public convenience init( type: StreamType, path: UnsafePointer, oflag: Int32, mode: mode_t, queue: DispatchQueue, - cleanupHandler: (error: Int32) -> Void) + cleanupHandler: @escaping (_ error: Int32) -> Void) + { + self.init(__type: type.rawValue, path: path, oflag: oflag, mode: mode, queue: queue, handler: cleanupHandler) + } + + @available(swift, introduced: 4) + public convenience init?( + type: StreamType, + path: UnsafePointer, + oflag: Int32, + mode: mode_t, + queue: DispatchQueue, + cleanupHandler: @escaping (_ error: Int32) -> Void) { self.init(__type: type.rawValue, path: path, oflag: oflag, mode: mode, queue: queue, handler: cleanupHandler) } @@ -70,60 +83,28 @@ public extension DispatchIO { type: StreamType, io: DispatchIO, queue: DispatchQueue, - cleanupHandler: (error: Int32) -> Void) + cleanupHandler: @escaping (_ error: Int32) -> Void) { self.init(__type: type.rawValue, io: io, queue: queue, handler: cleanupHandler) } - public func read(offset: off_t, length: Int, queue: DispatchQueue, ioHandler: (done: Bool, data: DispatchData?, error: Int32) -> Void) { + public func read(offset: off_t, length: Int, queue: DispatchQueue, ioHandler: @escaping (_ done: Bool, _ data: DispatchData?, _ error: Int32) -> Void) { dispatch_io_read(self.__wrapped, offset, length, queue.__wrapped) { (done: Bool, data: dispatch_data_t?, error: Int32) in - ioHandler(done: done, data: data.flatMap { DispatchData(data: $0) }, error: error) + ioHandler(done, data.flatMap { DispatchData(borrowedData: $0) }, error) } } - public func write(offset: off_t, data: DispatchData, queue: DispatchQueue, ioHandler: (done: Bool, data: DispatchData?, error: Int32) -> Void) { - dispatch_io_write(self.__wrapped, offset, data.__wrapped, queue.__wrapped) { (done: Bool, data: dispatch_data_t?, error: Int32) in - ioHandler(done: done, data: data.flatMap { DispatchData(data: $0) }, error: error) + public func write(offset: off_t, data: DispatchData, queue: DispatchQueue, ioHandler: @escaping (_ done: Bool, _ data: DispatchData?, _ error: Int32) -> Void) { + dispatch_io_write(self.__wrapped, offset, data.__wrapped.__wrapped, queue.__wrapped) { (done: Bool, data: dispatch_data_t?, error: Int32) in + ioHandler(done, data.flatMap { DispatchData(borrowedData: $0) }, error) } } public func setInterval(interval: DispatchTimeInterval, flags: IntervalFlags = []) { - dispatch_io_set_interval(self.__wrapped, interval.rawValue, flags.rawValue) + dispatch_io_set_interval(self.__wrapped, UInt64(interval.rawValue), flags.rawValue) } public func close(flags: CloseFlags = []) { dispatch_io_close(self.__wrapped, flags.rawValue) } } - -extension DispatchIO { - @available(*, deprecated, renamed: "DispatchIO.read(fromFileDescriptor:maxLength:runningHandlerOn:handler:)") - public class func read(fd: Int32, length: Int, queue: DispatchQueue, handler: (DispatchData, Int32) -> Void) { - DispatchIO.read(fromFileDescriptor: fd, maxLength: length, runningHandlerOn: queue, handler: handler) - } - - @available(*, deprecated, renamed: "DispatchIO.write(fromFileDescriptor:data:runningHandlerOn:handler:)") - public class func write(fd: Int32, data: DispatchData, queue: DispatchQueue, handler: (DispatchData?, Int32) -> Void) { - DispatchIO.write(fromFileDescriptor: fd, data: data, runningHandlerOn: queue, handler: handler) - } - - @available(*, deprecated, renamed: "DispatchIO.barrier(self:execute:)") - public func withBarrier(barrier work: () -> ()) { - barrier(execute: work) - } - - @available(*, deprecated, renamed: "DispatchIO.setLimit(self:highWater:)") - public func setHighWater(highWater: Int) { - setLimit(highWater: highWater) - } - - @available(*, deprecated, renamed: "DispatchIO.setLimit(self:lowWater:)") - public func setLowWater(lowWater: Int) { - setLimit(lowWater: lowWater) - } - - @available(*, deprecated, renamed: "DispatchIO.setInterval(self:interval:flags:)") - public func setInterval(interval: UInt64, flags: IntervalFlags) { - setInterval(interval: .nanoseconds(Int(interval)), flags: flags) - } -} diff --git a/src/swift/Private.swift b/src/swift/Private.swift index e38f72861..df6a7b336 100644 --- a/src/swift/Private.swift +++ b/src/swift/Private.swift @@ -14,62 +14,62 @@ import CDispatch -@available(*, unavailable, renamed:"DispatchQueue.init(label:attributes:target:)") +@available(*, unavailable, renamed:"DispatchQueue.init(label:qos:attributes:autoreleaseFrequency:target:)") public func dispatch_queue_create(_ label: UnsafePointer?, _ attr: dispatch_queue_attr_t?) -> DispatchQueue { fatalError() } -@available(*, unavailable, renamed:"DispatchQueue.init(label:attributes:target:)") +@available(*, unavailable, renamed:"DispatchQueue.init(label:qos:attributes:autoreleaseFrequency:target:)") public func dispatch_queue_create_with_target(_ label: UnsafePointer?, _ attr: dispatch_queue_attr_t?, _ queue: DispatchQueue?) -> DispatchQueue { fatalError() } @available(*, unavailable, renamed:"DispatchIO.init(type:fileDescriptor:queue:cleanupHandler:)") -public func dispatch_io_create(_ type: UInt, _ fd: Int32, _ queue: DispatchQueue, _ cleanup_handler: (Int32) -> Void) -> DispatchIO +public func dispatch_io_create(_ type: UInt, _ fd: Int32, _ queue: DispatchQueue, _ cleanup_handler: @escaping (Int32) -> Void) -> DispatchIO { fatalError() } @available(*, unavailable, renamed:"DispatchIO.init(type:path:oflag:mode:queue:cleanupHandler:)") -public func dispatch_io_create_with_path(_ type: UInt, _ path: UnsafePointer, _ oflag: Int32, _ mode: mode_t, _ queue: DispatchQueue, _ cleanup_handler: (Int32) -> Void) -> DispatchIO +public func dispatch_io_create_with_path(_ type: UInt, _ path: UnsafePointer, _ oflag: Int32, _ mode: mode_t, _ queue: DispatchQueue, _ cleanup_handler: @escaping (Int32) -> Void) -> DispatchIO { fatalError() } @available(*, unavailable, renamed:"DispatchIO.init(type:io:queue:cleanupHandler:)") -public func dispatch_io_create_with_io(_ type: UInt, _ io: DispatchIO, _ queue: DispatchQueue, _ cleanup_handler: (Int32) -> Void) -> DispatchIO +public func dispatch_io_create_with_io(_ type: UInt, _ io: DispatchIO, _ queue: DispatchQueue, _ cleanup_handler: @escaping (Int32) -> Void) -> DispatchIO { fatalError() } @available(*, unavailable, renamed:"DispatchIO.read(fileDescriptor:length:queue:handler:)") -public func dispatch_read(_ fd: Int32, _ length: Int, _ queue: DispatchQueue, _ handler: (dispatch_data_t, Int32) -> Void) +public func dispatch_read(_ fd: Int32, _ length: Int, _ queue: DispatchQueue, _ handler: @escaping (dispatch_data_t, Int32) -> Void) { fatalError() } @available(*, unavailable, renamed:"DispatchIO.read(self:offset:length:queue:ioHandler:)") -func dispatch_io_read(_ channel: DispatchIO, _ offset: off_t, _ length: Int, _ queue: DispatchQueue, _ io_handler: (Bool, dispatch_data_t?, Int32) -> Void) +func dispatch_io_read(_ channel: DispatchIO, _ offset: off_t, _ length: Int, _ queue: DispatchQueue, _ io_handler: @escaping (Bool, dispatch_data_t?, Int32) -> Void) { fatalError() } @available(*, unavailable, renamed:"DispatchIO.write(self:offset:data:queue:ioHandler:)") -func dispatch_io_write(_ channel: DispatchIO, _ offset: off_t, _ data: dispatch_data_t, _ queue: DispatchQueue, _ io_handler: (Bool, dispatch_data_t?, Int32) -> Void) +func dispatch_io_write(_ channel: DispatchIO, _ offset: off_t, _ data: dispatch_data_t, _ queue: DispatchQueue, _ io_handler: @escaping (Bool, dispatch_data_t?, Int32) -> Void) { fatalError() } @available(*, unavailable, renamed:"DispatchIO.write(fileDescriptor:data:queue:handler:)") -func dispatch_write(_ fd: Int32, _ data: dispatch_data_t, _ queue: DispatchQueue, _ handler: (dispatch_data_t?, Int32) -> Void) +func dispatch_write(_ fd: Int32, _ data: dispatch_data_t, _ queue: DispatchQueue, _ handler: @escaping (dispatch_data_t?, Int32) -> Void) { fatalError() } @available(*, unavailable, renamed:"DispatchData.init(bytes:)") -public func dispatch_data_create(_ buffer: UnsafePointer, _ size: Int, _ queue: DispatchQueue?, _ destructor: (() -> Void)?) -> dispatch_data_t +public func dispatch_data_create(_ buffer: UnsafeRawPointer, _ size: Int, _ queue: DispatchQueue?, _ destructor: (() -> Void)?) -> dispatch_data_t { fatalError() } @@ -81,7 +81,7 @@ public func dispatch_data_get_size(_ data: dispatch_data_t) -> Int } @available(*, unavailable, renamed:"DispatchData.withUnsafeBytes(self:body:)") -public func dispatch_data_create_map(_ data: dispatch_data_t, _ buffer_ptr: UnsafeMutablePointer?>?, _ size_ptr: UnsafeMutablePointer?) -> dispatch_data_t +public func dispatch_data_create_map(_ data: dispatch_data_t, _ buffer_ptr: UnsafeMutablePointer?, _ size_ptr: UnsafeMutablePointer?) -> dispatch_data_t { fatalError() } @@ -99,7 +99,7 @@ public func dispatch_data_create_subrange(_ data: dispatch_data_t, _ offset: Int } @available(*, unavailable, renamed:"DispatchData.enumerateBytes(self:block:)") -public func dispatch_data_apply(_ data: dispatch_data_t, _ applier: (dispatch_data_t, Int, UnsafePointer, Int) -> Bool) -> Bool +public func dispatch_data_apply(_ data: dispatch_data_t, _ applier: @escaping (dispatch_data_t, Int, UnsafeRawPointer, Int) -> Bool) -> Bool { fatalError() } @@ -110,14 +110,14 @@ public func dispatch_data_copy_region(_ data: dispatch_data_t, _ location: Int, fatalError() } -@available(*, unavailable, renamed:"DispatchQueue.asynchronously(self:group:qos:flags:execute:)") -public func dispatch_group_async(_ group: DispatchGroup, _ queue: DispatchQueue, _ block: () -> Void) +@available(*, unavailable, renamed:"DispatchQueue.async(self:group:qos:flags:execute:)") +public func dispatch_group_async(_ group: DispatchGroup, _ queue: DispatchQueue, _ block: @escaping () -> Void) { fatalError() } @available(*, unavailable, renamed: "DispatchGroup.notify(self:qos:flags:queue:execute:)") -public func dispatch_group_notify(_ group: DispatchGroup, _ queue: DispatchQueue, _ block: () -> Void) +public func dispatch_group_notify(_ group: DispatchGroup, _ queue: DispatchQueue, _ block: @escaping () -> Void) { fatalError() } @@ -140,14 +140,14 @@ public func dispatch_io_set_interval(_ channel: DispatchIO, _ interval: UInt64, fatalError() } -@available(*, unavailable, renamed:"DispatchQueue.apply(attributes:iterations:execute:)") -public func dispatch_apply(_ iterations: Int, _ queue: DispatchQueue, _ block: @noescape (Int) -> Void) +@available(*, unavailable, renamed:"DispatchQueue.concurrentPerform(iterations:execute:)") +public func dispatch_apply(_ iterations: Int, _ queue: DispatchQueue, _ block: (Int) -> Void) { fatalError() } -@available(*, unavailable, renamed:"DispatchQueue.asynchronously(self:execute:)") -public func dispatch_async(_ queue: DispatchQueue, _ block: () -> Void) +@available(*, unavailable, renamed:"DispatchQueue.async(self:execute:)") +public func dispatch_async(_ queue: DispatchQueue, _ block: @escaping () -> Void) { fatalError() } @@ -158,25 +158,25 @@ public func dispatch_get_global_queue(_ identifier: Int, _ flags: UInt) -> Dispa fatalError() } -@available(*, unavailable, renamed: "DispatchQueue.main") -public func dispatch_get_main_queue() -> DispatchQueue +@available(*, unavailable, renamed: "getter:DispatchQueue.main()") +public func dispatch_get_main_queue() -> DispatchQueue { fatalError() } -@available(*, unavailable, renamed:"DispatchQueueAttributes.initiallyInactive") +@available(*, unavailable, renamed:"DispatchQueue.Attributes.initiallyInactive") public func dispatch_queue_attr_make_initially_inactive(_ attr: dispatch_queue_attr_t?) -> dispatch_queue_attr_t { fatalError() } -@available(*, unavailable, renamed:"DispatchQueueAttributes.autoreleaseWorkItem") +@available(*, unavailable, renamed:"DispatchQueue.AutoreleaseFrequency.workItem") public func dispatch_queue_attr_make_with_autorelease_frequency(_ attr: dispatch_queue_attr_t?, _ frequency: dispatch_autorelease_frequency_t) -> dispatch_queue_attr_t { fatalError() } -@available(*, unavailable, renamed:"DispatchQueueAttributes.qosUserInitiated") +@available(*, unavailable, renamed:"DispatchQoS") public func dispatch_queue_attr_make_with_qos_class(_ attr: dispatch_queue_attr_t?, _ qos_class: dispatch_qos_class_t, _ relative_priority: Int32) -> dispatch_queue_attr_t { fatalError() @@ -194,38 +194,38 @@ public func dispatch_queue_get_qos_class(_ queue: DispatchQueue, _ relative_prio fatalError() } -@available(*, unavailable, renamed:"DispatchQueue.after(self:when:execute:)") -public func dispatch_after(_ when: dispatch_time_t, _ queue: DispatchQueue, _ block: () -> Void) +@available(*, unavailable, renamed:"DispatchQueue.asyncAfter(self:deadline:qos:flags:execute:)") +public func dispatch_after(_ when: dispatch_time_t, _ queue: DispatchQueue, _ block: @escaping () -> Void) { fatalError() } -@available(*, unavailable, renamed:"DispatchQueue.asynchronously(self:group:qos:flags:execute:)") -public func dispatch_barrier_async(_ queue: DispatchQueue, _ block: () -> Void) +@available(*, unavailable, renamed:"DispatchQueue.async(self:group:qos:flags:execute:)") +public func dispatch_barrier_async(_ queue: DispatchQueue, _ block: @escaping () -> Void) { fatalError() } -@available(*, unavailable, renamed:"DispatchQueue.synchronously(self:flags:execute:)") -public func dispatch_barrier_sync(_ queue: DispatchQueue, _ block: @noescape () -> Void) +@available(*, unavailable, renamed:"DispatchQueue.sync(self:flags:execute:)") +public func dispatch_barrier_sync(_ queue: DispatchQueue, _ block: () -> Void) { fatalError() } @available(*, unavailable, renamed:"DispatchQueue.setSpecific(self:key:value:)") -public func dispatch_queue_set_specific(_ queue: DispatchQueue, _ key: UnsafePointer, _ context: UnsafeMutablePointer?, _ destructor: (@convention(c) (UnsafeMutablePointer?) -> Void)?) +public func dispatch_queue_set_specific(_ queue: DispatchQueue, _ key: UnsafeRawPointer, _ context: UnsafeMutableRawPointer?, _ destructor: (@convention(c) (UnsafeMutableRawPointer?) -> Void)?) { fatalError() } @available(*, unavailable, renamed:"DispatchQueue.getSpecific(self:key:)") -public func dispatch_queue_get_specific(_ queue: DispatchQueue, _ key: UnsafePointer) -> UnsafeMutablePointer? +public func dispatch_queue_get_specific(_ queue: DispatchQueue, _ key: UnsafeRawPointer) -> UnsafeMutableRawPointer? { fatalError() } @available(*, unavailable, renamed:"DispatchQueue.getSpecific(key:)") -public func dispatch_get_specific(_ key: UnsafePointer) -> UnsafeMutablePointer? +public func dispatch_get_specific(_ key: UnsafeRawPointer) -> UnsafeMutableRawPointer? { fatalError() } @@ -338,22 +338,22 @@ public func dispatch_walltime(_ when: UnsafePointer?, _ delta: Int64) fatalError() } -@available(*, unavailable, renamed: "DispatchQueue.GlobalAttributes.qosUserInitiated") +@available(*, unavailable, renamed: "DispatchQueue.GlobalQueuePriority.high") public var DISPATCH_QUEUE_PRIORITY_HIGH: Int { fatalError() } -@available(*, unavailable, renamed: "DispatchQueue.GlobalAttributes.qosDefault") +@available(*, unavailable, renamed: "DispatchQueue.GlobalQueuePriority.default") public var DISPATCH_QUEUE_PRIORITY_DEFAULT: Int { fatalError() } -@available(*, unavailable, renamed: "DispatchQueue.GlobalAttributes.qosUtility") +@available(*, unavailable, renamed: "DispatchQueue.GlobalQueuePriority.low") public var DISPATCH_QUEUE_PRIORITY_LOW: Int { fatalError() } -@available(*, unavailable, renamed: "DispatchQueue.GlobalAttributes.qosBackground") +@available(*, unavailable, renamed: "DispatchQueue.GlobalQueuePriority.background") public var DISPATCH_QUEUE_PRIORITY_BACKGROUND: Int { fatalError() } diff --git a/src/swift/Queue.swift b/src/swift/Queue.swift index 5a45fdcd1..bff1bc323 100644 --- a/src/swift/Queue.swift +++ b/src/swift/Queue.swift @@ -14,82 +14,6 @@ import CDispatch -public struct DispatchQueueAttributes : OptionSet { - public let rawValue: UInt64 - public init(rawValue: UInt64) { self.rawValue = rawValue } - - public static let serial = DispatchQueueAttributes(rawValue: 0<<0) - public static let concurrent = DispatchQueueAttributes(rawValue: 1<<1) - - @available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) - public static let initiallyInactive = DispatchQueueAttributes(rawValue: 1<<2) - - @available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) - public static let autoreleaseInherit = DispatchQueueAttributes(rawValue: 1<<3) - - @available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) - public static let autoreleaseWorkItem = DispatchQueueAttributes(rawValue: 1<<4) - - @available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) - public static let autoreleaseNever = DispatchQueueAttributes(rawValue: 1<<5) - - @available(OSX 10.10, iOS 8.0, *) - public static let qosUserInteractive = DispatchQueueAttributes(rawValue: 1<<6) - - @available(OSX 10.10, iOS 8.0, *) - public static let qosUserInitiated = DispatchQueueAttributes(rawValue: 1<<7) - - @available(OSX 10.10, iOS 8.0, *) - public static let qosDefault = DispatchQueueAttributes(rawValue: 1<<8) - - @available(OSX 10.10, iOS 8.0, *) - public static let qosUtility = DispatchQueueAttributes(rawValue: 1<<9) - - @available(OSX 10.10, iOS 8.0, *) - public static let qosBackground = DispatchQueueAttributes(rawValue: 1<<10) - - @available(*, deprecated, message: ".noQoS has no effect, it should not be used") - public static let noQoS = DispatchQueueAttributes(rawValue: 1<<11) - - private var attr: dispatch_queue_attr_t? { - var attr: dispatch_queue_attr_t? - - if self.contains(.concurrent) { - attr = _swift_dispatch_queue_concurrent() - } - if #available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) { - if self.contains(.initiallyInactive) { - attr = CDispatch.dispatch_queue_attr_make_initially_inactive(attr) - } - if self.contains(.autoreleaseWorkItem) { - // DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM - attr = CDispatch.dispatch_queue_attr_make_with_autorelease_frequency(attr, dispatch_autorelease_frequency_t(1)) - } else if self.contains(.autoreleaseInherit) { - // DISPATCH_AUTORELEASE_FREQUENCY_INHERIT - attr = CDispatch.dispatch_queue_attr_make_with_autorelease_frequency(attr, dispatch_autorelease_frequency_t(0)) - } else if self.contains(.autoreleaseNever) { - // DISPATCH_AUTORELEASE_FREQUENCY_NEVER - attr = CDispatch.dispatch_queue_attr_make_with_autorelease_frequency(attr, dispatch_autorelease_frequency_t(2)) - } - } - if #available(OSX 10.10, iOS 8.0, *) { - if self.contains(.qosUserInteractive) { - attr = CDispatch.dispatch_queue_attr_make_with_qos_class(attr, _OSQoSClass.QOS_CLASS_USER_INTERACTIVE.rawValue, 0) - } else if self.contains(.qosUserInitiated) { - attr = CDispatch.dispatch_queue_attr_make_with_qos_class(attr, _OSQoSClass.QOS_CLASS_USER_INITIATED.rawValue, 0) - } else if self.contains(.qosDefault) { - attr = CDispatch.dispatch_queue_attr_make_with_qos_class(attr, _OSQoSClass.QOS_CLASS_DEFAULT.rawValue, 0) - } else if self.contains(.qosUtility) { - attr = CDispatch.dispatch_queue_attr_make_with_qos_class(attr, _OSQoSClass.QOS_CLASS_UTILITY.rawValue, 0) - } else if self.contains(.qosBackground) { - attr = CDispatch.dispatch_queue_attr_make_with_qos_class(attr, _OSQoSClass.QOS_CLASS_BACKGROUND.rawValue, 0) - } - } - return attr - } -} - - public final class DispatchSpecificKey { public init() {} } @@ -100,66 +24,86 @@ internal class _DispatchSpecificValue { } public extension DispatchQueue { - - public struct GlobalAttributes : OptionSet { + public struct Attributes : OptionSet { public let rawValue: UInt64 public init(rawValue: UInt64) { self.rawValue = rawValue } - @available(OSX 10.10, iOS 8.0, *) - public static let qosUserInteractive = GlobalAttributes(rawValue: 1<<0) - - @available(OSX 10.10, iOS 8.0, *) - public static let qosUserInitiated = GlobalAttributes(rawValue: 1<<1) - - @available(OSX 10.10, iOS 8.0, *) - public static let qosDefault = GlobalAttributes(rawValue: 1<<2) + public static let concurrent = Attributes(rawValue: 1<<1) - @available(OSX 10.10, iOS 8.0, *) - public static let qosUtility = GlobalAttributes(rawValue: 1<<3) + @available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) + public static let initiallyInactive = Attributes(rawValue: 1<<2) - @available(OSX 10.10, iOS 8.0, *) - public static let qosBackground = GlobalAttributes(rawValue: 1<<4) + fileprivate func _attr() -> dispatch_queue_attr_t? { + var attr: dispatch_queue_attr_t? = nil - // Avoid using our own deprecated constants here by declaring - // non-deprecated constants and then basing the public ones on those. - internal static let _priorityHigh = GlobalAttributes(rawValue: 1<<5) - internal static let _priorityDefault = GlobalAttributes(rawValue: 1<<6) - internal static let _priorityLow = GlobalAttributes(rawValue: 1<<7) - internal static let _priorityBackground = GlobalAttributes(rawValue: 1<<8) + if self.contains(.concurrent) { + attr = _swift_dispatch_queue_concurrent() + } + if #available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) { + if self.contains(.initiallyInactive) { + attr = CDispatch.dispatch_queue_attr_make_initially_inactive(attr) + } + } + return attr + } + } + public enum GlobalQueuePriority { @available(OSX, deprecated: 10.10, message: "Use qos attributes instead") @available(*, deprecated: 8.0, message: "Use qos attributes instead") - public static let priorityHigh = _priorityHigh + case high @available(OSX, deprecated: 10.10, message: "Use qos attributes instead") @available(*, deprecated: 8.0, message: "Use qos attributes instead") - public static let priorityDefault = _priorityDefault + case `default` @available(OSX, deprecated: 10.10, message: "Use qos attributes instead") @available(*, deprecated: 8.0, message: "Use qos attributes instead") - public static let priorityLow = _priorityLow + case low @available(OSX, deprecated: 10.10, message: "Use qos attributes instead") @available(*, deprecated: 8.0, message: "Use qos attributes instead") - public static let priorityBackground = _priorityBackground + case background internal var _translatedValue: Int { - if #available(OSX 10.10, iOS 8.0, *) { - if self.contains(.qosUserInteractive) { return Int(_OSQoSClass.QOS_CLASS_USER_INTERACTIVE.rawValue) } - else if self.contains(.qosUserInitiated) { return Int(_OSQoSClass.QOS_CLASS_USER_INITIATED.rawValue) } - else if self.contains(.qosDefault) { return Int(_OSQoSClass.QOS_CLASS_DEFAULT.rawValue) } - else if self.contains(.qosUtility) { return Int(_OSQoSClass.QOS_CLASS_UTILITY.rawValue) } - else { return Int(_OSQoSClass.QOS_CLASS_BACKGROUND.rawValue) } + switch self { + case .high: return 2 // DISPATCH_QUEUE_PRIORITY_HIGH + case .default: return 0 // DISPATCH_QUEUE_PRIORITY_DEFAULT + case .low: return -2 // DISPATCH_QUEUE_PRIORITY_LOW + case .background: return Int(Int16.min) // DISPATCH_QUEUE_PRIORITY_BACKGROUND } - if self.contains(._priorityHigh) { return 2 } // DISPATCH_QUEUE_PRIORITY_HIGH - else if self.contains(._priorityDefault) { return 0 } // DISPATCH_QUEUE_PRIORITY_DEFAULT - else if self.contains(._priorityLow) { return -2 } // // DISPATCH_QUEUE_PRIORITY_LOW - else if self.contains(._priorityBackground) { return Int(Int16.min) } // // DISPATCH_QUEUE_PRIORITY_BACKGROUND - return 0 } } - public class func concurrentPerform(iterations: Int, execute work: @noescape (Int) -> Void) { + public enum AutoreleaseFrequency { + case inherit + + @available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) + case workItem + + @available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) + case never + + internal func _attr(attr: dispatch_queue_attr_t?) -> dispatch_queue_attr_t? { + if #available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) { + switch self { + case .inherit: + // DISPATCH_AUTORELEASE_FREQUENCY_INHERIT + return CDispatch.dispatch_queue_attr_make_with_autorelease_frequency(attr, dispatch_autorelease_frequency_t(0)) + case .workItem: + // DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM + return CDispatch.dispatch_queue_attr_make_with_autorelease_frequency(attr, dispatch_autorelease_frequency_t(1)) + case .never: + // DISPATCH_AUTORELEASE_FREQUENCY_NEVER + return CDispatch.dispatch_queue_attr_make_with_autorelease_frequency(attr, dispatch_autorelease_frequency_t(2)) + } + } else { + return attr + } + } + } + + public class func concurrentPerform(iterations: Int, execute work: (Int) -> Void) { _swift_dispatch_apply_current(iterations, work) } @@ -167,9 +111,15 @@ public extension DispatchQueue { return DispatchQueue(queue: _swift_dispatch_get_main_queue()) } - public class func global(attributes: GlobalAttributes = []) -> DispatchQueue { - // SubOptimal? Should we be caching these global DispatchQueue objects? - return DispatchQueue(queue:dispatch_get_global_queue(attributes._translatedValue, 0)) + @available(OSX, deprecated: 10.10, message: "") + @available(*, deprecated: 8.0, message: "") + public class func global(priority: GlobalQueuePriority) -> DispatchQueue { + return DispatchQueue(queue: CDispatch.dispatch_get_global_queue(priority._translatedValue, 0)) + } + + @available(OSX 10.10, iOS 8.0, *) + public class func global(qos: DispatchQoS.QoSClass = .default) -> DispatchQueue { + return DispatchQueue(queue: CDispatch.dispatch_get_global_queue(Int(qos.rawValue.rawValue), 0)) } public class func getSpecific(key: DispatchSpecificKey) -> T? { @@ -185,13 +135,23 @@ public extension DispatchQueue { public convenience init( label: String, - attributes: DispatchQueueAttributes = .serial, + qos: DispatchQoS = .unspecified, + attributes: Attributes = [], + autoreleaseFrequency: AutoreleaseFrequency = .inherit, target: DispatchQueue? = nil) { + var attr = attributes._attr() + if autoreleaseFrequency != .inherit { + attr = autoreleaseFrequency._attr(attr: attr) + } + if #available(OSX 10.10, iOS 8.0, *), qos != .unspecified { + attr = CDispatch.dispatch_queue_attr_make_with_qos_class(attr, qos.qosClass.rawValue.rawValue, Int32(qos.relativePriority)) + } + if #available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) { - self.init(__label: label, attr: attributes.attr, queue: target) + self.init(__label: label, attr: attr, queue: target) } else { - self.init(__label: label, attr: attributes.attr) + self.init(__label: label, attr: attr) if let tq = target { self.setTarget(queue: tq) } } } @@ -202,59 +162,67 @@ public extension DispatchQueue { @available(OSX 10.10, iOS 8.0, *) public func sync(execute workItem: DispatchWorkItem) { - dispatch_sync(self.__wrapped, workItem._block) + CDispatch.dispatch_sync(self.__wrapped, workItem._block) } @available(OSX 10.10, iOS 8.0, *) public func async(execute workItem: DispatchWorkItem) { - // _swift_dispatch_{group,}_async preserves the @convention(block) - // for work item blocks. - if let g = workItem._group { - dispatch_group_async(g.__wrapped, self.__wrapped, workItem._block) - } else { - dispatch_async(self.__wrapped, workItem._block) - } + CDispatch.dispatch_async(self.__wrapped, workItem._block) } - public func async(group: DispatchGroup? = nil, qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], execute work: @convention(block) () -> Void) { - if group == nil && qos == .unspecified && flags.isEmpty { + @available(OSX 10.10, iOS 8.0, *) + public func async(group: DispatchGroup, execute workItem: DispatchWorkItem) { + CDispatch.dispatch_group_async(group.__wrapped, self.__wrapped, workItem._block) + } + + public func async( + group: DispatchGroup? = nil, + qos: DispatchQoS = .unspecified, + flags: DispatchWorkItemFlags = [], + execute work: @escaping @convention(block) () -> Void) + { + if group == nil && qos == .unspecified { // Fast-path route for the most common API usage - dispatch_async(self.__wrapped, work) - return + if flags.isEmpty { + CDispatch.dispatch_async(self.__wrapped, work) + return + } else if flags == .barrier { + CDispatch.dispatch_barrier_async(self.__wrapped, work) + return + } } + var block: @convention(block) () -> Void = work if #available(OSX 10.10, iOS 8.0, *), (qos != .unspecified || !flags.isEmpty) { let workItem = DispatchWorkItem(qos: qos, flags: flags, block: work) - if let g = group { - dispatch_group_async(g.__wrapped, self.__wrapped, workItem._block) - } else { - dispatch_async(self.__wrapped, workItem._block) - } + block = workItem._block + } + + if let g = group { + CDispatch.dispatch_group_async(g.__wrapped, self.__wrapped, block) } else { - if let g = group { - dispatch_group_async(g.__wrapped, self.__wrapped, work) - } else { - dispatch_async(self.__wrapped, work) - } + CDispatch.dispatch_async(self.__wrapped, block) } } - private func _syncBarrier(block: @noescape () -> ()) { - dispatch_barrier_sync(self.__wrapped, block) + private func _syncBarrier(block: () -> ()) { + CDispatch.dispatch_barrier_sync(self.__wrapped, block) } private func _syncHelper( - fn: (@noescape () -> ()) -> (), - execute work: @noescape () throws -> T, + fn: (() -> ()) -> (), + execute work: () throws -> T, rescue: ((Swift.Error) throws -> (T))) rethrows -> T { var result: T? var error: Swift.Error? - fn { - do { - result = try work() - } catch let e { - error = e + withoutActuallyEscaping(work) { _work in + fn { + do { + result = try _work() + } catch let e { + error = e + } } } if let e = error { @@ -266,10 +234,10 @@ public extension DispatchQueue { @available(OSX 10.10, iOS 8.0, *) private func _syncHelper( - fn: (DispatchWorkItem) -> (), + fn: (DispatchWorkItem) -> (), flags: DispatchWorkItemFlags, - execute work: @noescape () throws -> T, - rescue: ((Swift.Error) throws -> (T))) rethrows -> T + execute work: () throws -> T, + rescue: @escaping ((Swift.Error) throws -> (T))) rethrows -> T { var result: T? var error: Swift.Error? @@ -277,7 +245,7 @@ public extension DispatchQueue { do { result = try work() } catch let e { - error = e + error = e } }) fn(workItem) @@ -288,11 +256,11 @@ public extension DispatchQueue { } } - public func sync(execute work: @noescape () throws -> T) rethrows -> T { + public func sync(execute work: () throws -> T) rethrows -> T { return try self._syncHelper(fn: sync, execute: work, rescue: { throw $0 }) } - public func sync(flags: DispatchWorkItemFlags, execute work: @noescape () throws -> T) rethrows -> T { + public func sync(flags: DispatchWorkItemFlags, execute work: () throws -> T) rethrows -> T { if flags == .barrier { return try self._syncHelper(fn: _syncBarrier, execute: work, rescue: { throw $0 }) } else if #available(OSX 10.10, iOS 8.0, *), !flags.isEmpty { @@ -302,38 +270,48 @@ public extension DispatchQueue { } } - public func after(when: DispatchTime, qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], execute work: @convention(block) () -> Void) { + public func asyncAfter( + deadline: DispatchTime, + qos: DispatchQoS = .unspecified, + flags: DispatchWorkItemFlags = [], + execute work: @escaping @convention(block) () -> Void) + { if #available(OSX 10.10, iOS 8.0, *), qos != .unspecified || !flags.isEmpty { let item = DispatchWorkItem(qos: qos, flags: flags, block: work) - dispatch_after(when.rawValue, self.__wrapped, item._block) + CDispatch.dispatch_after(deadline.rawValue, self.__wrapped, item._block) } else { - dispatch_after(when.rawValue, self.__wrapped, work) + CDispatch.dispatch_after(deadline.rawValue, self.__wrapped, work) } } - @available(OSX 10.10, iOS 8.0, *) - public func after(when: DispatchTime, execute: DispatchWorkItem) { - dispatch_after(when.rawValue, self.__wrapped, execute._block) - } - - public func after(walltime when: DispatchWallTime, qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], execute work: @convention(block) () -> Void) { + public func asyncAfter( + wallDeadline: DispatchWallTime, + qos: DispatchQoS = .unspecified, + flags: DispatchWorkItemFlags = [], + execute work: @escaping @convention(block) () -> Void) + { if #available(OSX 10.10, iOS 8.0, *), qos != .unspecified || !flags.isEmpty { let item = DispatchWorkItem(qos: qos, flags: flags, block: work) - dispatch_after(when.rawValue, self.__wrapped, item._block) + CDispatch.dispatch_after(wallDeadline.rawValue, self.__wrapped, item._block) } else { - dispatch_after(when.rawValue, self.__wrapped, work) + CDispatch.dispatch_after(wallDeadline.rawValue, self.__wrapped, work) } } @available(OSX 10.10, iOS 8.0, *) - public func after(walltime when: DispatchWallTime, execute: DispatchWorkItem) { - dispatch_after(when.rawValue, self.__wrapped, execute._block) + public func asyncAfter(deadline: DispatchTime, execute: DispatchWorkItem) { + CDispatch.dispatch_after(deadline.rawValue, self.__wrapped, execute._block) + } + + @available(OSX 10.10, iOS 8.0, *) + public func asyncAfter(wallDeadline: DispatchWallTime, execute: DispatchWorkItem) { + CDispatch.dispatch_after(wallDeadline.rawValue, self.__wrapped, execute._block) } @available(OSX 10.10, iOS 8.0, *) public var qos: DispatchQoS { var relPri: Int32 = 0 - let cls = DispatchQoS.QoSClass(qosClass: _OSQoSClass(qosClass: dispatch_queue_get_qos_class(self.__wrapped, &relPri))!)! + let cls = DispatchQoS.QoSClass(rawValue: _OSQoSClass(qosClass: dispatch_queue_get_qos_class(self.__wrapped, &relPri))!)! return DispatchQoS(qosClass: cls, relativePriority: Int(relPri)) } @@ -348,61 +326,24 @@ public extension DispatchQueue { return nil } - public func setSpecific(key: DispatchSpecificKey, value: T) { - let v = _DispatchSpecificValue(value: value) + public func setSpecific(key: DispatchSpecificKey, value: T?) { let k = Unmanaged.passUnretained(key).toOpaque() - let p = Unmanaged.passRetained(v).toOpaque() + let v = value.flatMap { _DispatchSpecificValue(value: $0) } + let p = v.flatMap { Unmanaged.passRetained($0).toOpaque() } dispatch_queue_set_specific(self.__wrapped, k, p, _destructDispatchSpecificValue) } -} - -extension DispatchQueue { - @available(*, deprecated, renamed: "DispatchQueue.sync(self:execute:)") - public func synchronously(execute work: @noescape () -> ()) { - sync(execute: work) - } - - @available(OSX, introduced: 10.10, deprecated: 10.12, renamed: "DispatchQueue.sync(self:execute:)") - @available(iOS, introduced: 8.0, deprecated: 10.0, renamed: "DispatchQueue.sync(self:execute:)") - @available(*, deprecated, renamed: "DispatchQueue.sync(self:execute:)") - public func synchronously(execute workItem: DispatchWorkItem) { - sync(execute: workItem) - } - - @available(OSX, introduced: 10.10, deprecated: 10.12, renamed: "DispatchQueue.async(self:execute:)") - @available(iOS, introduced: 8.0, deprecated: 10.0, renamed: "DispatchQueue.async(self:execute:)") - @available(*, deprecated, renamed: "DispatchQueue.async(self:execute:)") - public func asynchronously(execute workItem: DispatchWorkItem) { - async(execute: workItem) - } - @available(*, deprecated, renamed: "DispatchQueue.async(self:group:qos:flags:execute:)") - public func asynchronously(group: DispatchGroup? = nil, qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], execute work: @convention(block) () -> Void) { - async(group: group, qos: qos, flags: flags, execute: work) - } - - @available(*, deprecated, renamed: "DispatchQueue.sync(self:execute:)") - public func synchronously(execute work: @noescape () throws -> T) rethrows -> T { - return try sync(execute: work) - } + #if os(Android) + @_silgen_name("_dispatch_install_thread_detach_callback") + private static func _dispatch_install_thread_detach_callback(_ cb: @escaping @convention(c) () -> Void) - @available(*, deprecated, renamed: "DispatchQueue.sync(self:flags:execute:)") - public func synchronously(flags: DispatchWorkItemFlags, execute work: @noescape () throws -> T) rethrows -> T { - return try sync(flags: flags, execute: work) - } - - @available(*, deprecated, renamed: "DispatchQueue.concurrentPerform(iterations:execute:)") - public func apply(applier iterations: Int, execute block: @noescape (Int) -> Void) { - DispatchQueue.concurrentPerform(iterations: iterations, execute: block) - } - - @available(*, deprecated, renamed: "DispatchQueue.setTarget(self:queue:)") - public func setTargetQueue(queue: DispatchQueue) { - self.setTarget(queue: queue) + public static func setThreadDetachCallback(_ cb: @escaping @convention(c) () -> Void) { + _dispatch_install_thread_detach_callback(cb) } + #endif } -private func _destructDispatchSpecificValue(ptr: UnsafeMutablePointer?) { +private func _destructDispatchSpecificValue(ptr: UnsafeMutableRawPointer?) { if let p = ptr { Unmanaged.fromOpaque(p).release() } @@ -414,8 +355,5 @@ internal func _swift_dispatch_queue_concurrent() -> dispatch_queue_attr_t @_silgen_name("_swift_dispatch_get_main_queue") internal func _swift_dispatch_get_main_queue() -> dispatch_queue_t -@_silgen_name("_swift_dispatch_apply_current_root_queue") -internal func _swift_dispatch_apply_current_root_queue() -> dispatch_queue_t - @_silgen_name("_swift_dispatch_apply_current") -internal func _swift_dispatch_apply_current(_ iterations: Int, _ block: @convention(block) @noescape (Int) -> Void) +internal func _swift_dispatch_apply_current(_ iterations: Int, _ block: @convention(block) (Int) -> Void) diff --git a/src/swift/Source.swift b/src/swift/Source.swift index 2830f010e..421a6e9bb 100644 --- a/src/swift/Source.swift +++ b/src/swift/Source.swift @@ -12,10 +12,10 @@ import CDispatch -public extension DispatchSourceType { +public extension DispatchSourceProtocol { public func setEventHandler(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], handler: DispatchSourceHandler?) { - if #available(OSX 10.10, iOS 8.0, *), let h = handler where qos != .unspecified || !flags.isEmpty { + if #available(OSX 10.10, iOS 8.0, *), let h = handler, qos != .unspecified || !flags.isEmpty { let item = DispatchWorkItem(qos: qos, flags: flags, block: h) CDispatch.dispatch_source_set_event_handler((self as! DispatchSource).__wrapped, item._block) } else { @@ -29,7 +29,7 @@ public extension DispatchSourceType { } public func setCancelHandler(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], handler: DispatchSourceHandler?) { - if #available(OSX 10.10, iOS 8.0, *), let h = handler where qos != .unspecified || !flags.isEmpty { + if #available(OSX 10.10, iOS 8.0, *), let h = handler, qos != .unspecified || !flags.isEmpty { let item = DispatchWorkItem(qos: qos, flags: flags, block: h) CDispatch.dispatch_source_set_cancel_handler((self as! DispatchSource).__wrapped, item._block) } else { @@ -43,7 +43,7 @@ public extension DispatchSourceType { } public func setRegistrationHandler(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], handler: DispatchSourceHandler?) { - if #available(OSX 10.10, iOS 8.0, *), let h = handler where qos != .unspecified || !flags.isEmpty { + if #available(OSX 10.10, iOS 8.0, *), let h = handler, qos != .unspecified || !flags.isEmpty { let item = DispatchWorkItem(qos: qos, flags: flags, block: h) CDispatch.dispatch_source_set_registration_handler((self as! DispatchSource).__wrapped, item._block) } else { @@ -112,7 +112,7 @@ public extension DispatchSource { } #endif -#if !os(Linux) +#if !os(Linux) && !os(Android) public struct ProcessEvent : OptionSet, RawRepresentable { public let rawValue: UInt public init(rawValue: UInt) { self.rawValue = rawValue } @@ -150,66 +150,71 @@ public extension DispatchSource { } #if HAVE_MACH - public class func machSend(port: mach_port_t, eventMask: MachSendEvent, queue: DispatchQueue? = nil) -> DispatchSourceMachSend { + public class func makeMachSendSource(port: mach_port_t, eventMask: MachSendEvent, queue: DispatchQueue? = nil) -> DispatchSourceMachSend { let source = dispatch_source_create(_swift_dispatch_source_type_mach_send(), UInt(port), eventMask.rawValue, queue?.__wrapped) return DispatchSource(source: source) as DispatchSourceMachSend } #endif #if HAVE_MACH - public class func machReceive(port: mach_port_t, queue: DispatchQueue? = nil) -> DispatchSourceMachReceive { + public class func makeMachReceiveSource(port: mach_port_t, queue: DispatchQueue? = nil) -> DispatchSourceMachReceive { let source = dispatch_source_create(_swift_dispatch_source_type_mach_recv(), UInt(port), 0, queue?.__wrapped) return DispatchSource(source) as DispatchSourceMachReceive } #endif #if HAVE_MACH - public class func memoryPressure(eventMask: MemoryPressureEvent, queue: DispatchQueue? = nil) -> DispatchSourceMemoryPressure { + public class func makeMemoryPressureSource(eventMask: MemoryPressureEvent, queue: DispatchQueue? = nil) -> DispatchSourceMemoryPressure { let source = dispatch_source_create(_swift_dispatch_source_type_memorypressure(), 0, eventMask.rawValue, queue.__wrapped) return DispatchSourceMemoryPressure(source) } #endif -#if !os(Linux) - public class func process(identifier: pid_t, eventMask: ProcessEvent, queue: DispatchQueue? = nil) -> DispatchSourceProcess { +#if !os(Linux) && !os(Android) + public class func makeProcessSource(identifier: pid_t, eventMask: ProcessEvent, queue: DispatchQueue? = nil) -> DispatchSourceProcess { let source = dispatch_source_create(_swift_dispatch_source_type_proc(), UInt(identifier), eventMask.rawValue, queue?.__wrapped) return DispatchSource(source: source) as DispatchSourceProcess } #endif - public class func read(fileDescriptor: Int32, queue: DispatchQueue? = nil) -> DispatchSourceRead { + public class func makeReadSource(fileDescriptor: Int32, queue: DispatchQueue? = nil) -> DispatchSourceRead { let source = dispatch_source_create(_swift_dispatch_source_type_read(), UInt(fileDescriptor), 0, queue?.__wrapped) return DispatchSource(source: source) as DispatchSourceRead } - public class func signal(signal: Int32, queue: DispatchQueue? = nil) -> DispatchSourceSignal { + public class func makeSignalSource(signal: Int32, queue: DispatchQueue? = nil) -> DispatchSourceSignal { let source = dispatch_source_create(_swift_dispatch_source_type_signal(), UInt(signal), 0, queue?.__wrapped) return DispatchSource(source: source) as DispatchSourceSignal } - public class func timer(flags: TimerFlags = [], queue: DispatchQueue? = nil) -> DispatchSourceTimer { + public class func makeTimerSource(flags: TimerFlags = [], queue: DispatchQueue? = nil) -> DispatchSourceTimer { let source = dispatch_source_create(_swift_dispatch_source_type_timer(), 0, flags.rawValue, queue?.__wrapped) return DispatchSource(source: source) as DispatchSourceTimer } - public class func userDataAdd(queue: DispatchQueue? = nil) -> DispatchSourceUserDataAdd { + public class func makeUserDataAddSource(queue: DispatchQueue? = nil) -> DispatchSourceUserDataAdd { let source = dispatch_source_create(_swift_dispatch_source_type_data_add(), 0, 0, queue?.__wrapped) return DispatchSource(source: source) as DispatchSourceUserDataAdd } - public class func userDataOr(queue: DispatchQueue? = nil) -> DispatchSourceUserDataOr { + public class func makeUserDataOrSource(queue: DispatchQueue? = nil) -> DispatchSourceUserDataOr { let source = dispatch_source_create(_swift_dispatch_source_type_data_or(), 0, 0, queue?.__wrapped) return DispatchSource(source: source) as DispatchSourceUserDataOr } + + public class func makeUserDataReplaceSource(queue: DispatchQueue? = nil) -> DispatchSourceUserDataReplace { + let source = dispatch_source_create(_swift_dispatch_source_type_data_replace(), 0, 0, queue?.__wrapped) + return DispatchSource(source: source) as DispatchSourceUserDataReplace + } -#if !os(Linux) - public class func fileSystemObject(fileDescriptor: Int32, eventMask: FileSystemEvent, queue: DispatchQueue? = nil) -> DispatchSourceFileSystemObject { +#if !os(Linux) && !os(Android) + public class func makeFileSystemObjectSource(fileDescriptor: Int32, eventMask: FileSystemEvent, queue: DispatchQueue? = nil) -> DispatchSourceFileSystemObject { let source = dispatch_source_create(_swift_dispatch_source_type_vnode(), UInt(fileDescriptor), eventMask.rawValue, queue?.__wrapped) return DispatchSource(source: source) as DispatchSourceFileSystemObject } #endif - public class func write(fileDescriptor: Int32, queue: DispatchQueue? = nil) -> DispatchSourceWrite { + public class func makeWriteSource(fileDescriptor: Int32, queue: DispatchQueue? = nil) -> DispatchSourceWrite { let source = dispatch_source_create(_swift_dispatch_source_type_write(), UInt(fileDescriptor), 0, queue?.__wrapped) return DispatchSource(source: source) as DispatchSourceWrite } @@ -255,7 +260,7 @@ public extension DispatchSourceMemoryPressure { } #endif -#if !os(Linux) +#if !os(Linux) && !os(Android) public extension DispatchSourceProcess { public var handle: pid_t { return pid_t(dispatch_source_get_handle(self as! DispatchSource)) @@ -274,64 +279,346 @@ public extension DispatchSourceProcess { #endif public extension DispatchSourceTimer { + /// + /// Sets the deadline and leeway for a timer event that fires once. + /// + /// Once this function returns, any pending source data accumulated for the previous timer values + /// has been cleared and the next timer event will occur at `deadline`. + /// + /// Delivery of the timer event may be delayed by the system in order to improve power consumption + /// and system performance. The upper limit to the allowable delay may be configured with the `leeway` + /// argument; the lower limit is under the control of the system. + /// + /// The lower limit to the allowable delay may vary with process state such as visibility of the + /// application UI. If the timer source was created with flags `TimerFlags.strict`, the system + /// will make a best effort to strictly observe the provided `leeway` value, even if it is smaller + /// than the current lower limit. Note that a minimal amount of delay is to be expected even if + /// this flag is specified. + /// + /// Calling this method has no effect if the timer source has already been canceled. + /// - note: Delivery of the timer event does not cancel the timer source. + /// + /// - parameter deadline: the time at which the timer event will be delivered, subject to the + /// leeway and other considerations described above. The deadline is based on Mach absolute + /// time. + /// - parameter leeway: the leeway for the timer. + /// + @available(swift, deprecated: 4, renamed: "schedule(deadline:repeating:leeway:)") public func scheduleOneshot(deadline: DispatchTime, leeway: DispatchTimeInterval = .nanoseconds(0)) { dispatch_source_set_timer((self as! DispatchSource).__wrapped, deadline.rawValue, ~0, UInt64(leeway.rawValue)) } + /// + /// Sets the deadline and leeway for a timer event that fires once. + /// + /// Once this function returns, any pending source data accumulated for the previous timer values + /// has been cleared and the next timer event will occur at `wallDeadline`. + /// + /// Delivery of the timer event may be delayed by the system in order to improve power consumption + /// and system performance. The upper limit to the allowable delay may be configured with the `leeway` + /// argument; the lower limit is under the control of the system. + /// + /// The lower limit to the allowable delay may vary with process state such as visibility of the + /// application UI. If the timer source was created with flags `TimerFlags.strict`, the system + /// will make a best effort to strictly observe the provided `leeway` value, even if it is smaller + /// than the current lower limit. Note that a minimal amount of delay is to be expected even if + /// this flag is specified. + /// + /// Calling this method has no effect if the timer source has already been canceled. + /// - note: Delivery of the timer event does not cancel the timer source. + /// + /// - parameter wallDeadline: the time at which the timer event will be delivered, subject to the + /// leeway and other considerations described above. The deadline is based on + /// `gettimeofday(3)`. + /// - parameter leeway: the leeway for the timer. + /// + @available(swift, deprecated: 4, renamed: "schedule(wallDeadline:repeating:leeway:)") public func scheduleOneshot(wallDeadline: DispatchWallTime, leeway: DispatchTimeInterval = .nanoseconds(0)) { dispatch_source_set_timer((self as! DispatchSource).__wrapped, wallDeadline.rawValue, ~0, UInt64(leeway.rawValue)) } + /// + /// Sets the deadline, interval and leeway for a timer event that fires at least once. + /// + /// Once this function returns, any pending source data accumulated for the previous timer values + /// has been cleared. The next timer event will occur at `deadline` and every `interval` units of + /// time thereafter until the timer source is canceled. + /// + /// Delivery of a timer event may be delayed by the system in order to improve power consumption + /// and system performance. The upper limit to the allowable delay may be configured with the `leeway` + /// argument; the lower limit is under the control of the system. + /// + /// For the initial timer fire at `deadline`, the upper limit to the allowable delay is set to + /// `leeway`. For the subsequent timer fires at `deadline + N * interval`, the upper + /// limit is the smaller of `leeway` and `interval/2`. + /// + /// The lower limit to the allowable delay may vary with process state such as visibility of the + /// application UI. If the timer source was created with flags `TimerFlags.strict`, the system + /// will make a best effort to strictly observe the provided `leeway` value, even if it is smaller + /// than the current lower limit. Note that a minimal amount of delay is to be expected even if + /// this flag is specified. + /// + /// Calling this method has no effect if the timer source has already been canceled. + /// + /// - parameter deadline: the time at which the timer event will be delivered, subject to the + /// leeway and other considerations described above. The deadline is based on Mach absolute + /// time. + /// - parameter interval: the interval for the timer. + /// - parameter leeway: the leeway for the timer. + /// + @available(swift, deprecated: 4, renamed: "schedule(deadline:repeating:leeway:)") public func scheduleRepeating(deadline: DispatchTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval = .nanoseconds(0)) { - dispatch_source_set_timer((self as! DispatchSource).__wrapped, deadline.rawValue, interval.rawValue, UInt64(leeway.rawValue)) + dispatch_source_set_timer((self as! DispatchSource).__wrapped, deadline.rawValue, interval == .never ? ~0 : UInt64(interval.rawValue), UInt64(leeway.rawValue)) } + /// + /// Sets the deadline, interval and leeway for a timer event that fires at least once. + /// + /// Once this function returns, any pending source data accumulated for the previous timer values + /// has been cleared. The next timer event will occur at `deadline` and every `interval` seconds + /// thereafter until the timer source is canceled. + /// + /// Delivery of a timer event may be delayed by the system in order to improve power consumption and + /// system performance. The upper limit to the allowable delay may be configured with the `leeway` + /// argument; the lower limit is under the control of the system. + /// + /// For the initial timer fire at `deadline`, the upper limit to the allowable delay is set to + /// `leeway`. For the subsequent timer fires at `deadline + N * interval`, the upper + /// limit is the smaller of `leeway` and `interval/2`. + /// + /// The lower limit to the allowable delay may vary with process state such as visibility of the + /// application UI. If the timer source was created with flags `TimerFlags.strict`, the system + /// will make a best effort to strictly observe the provided `leeway` value, even if it is smaller + /// than the current lower limit. Note that a minimal amount of delay is to be expected even if + /// this flag is specified. + /// + /// Calling this method has no effect if the timer source has already been canceled. + /// + /// - parameter deadline: the time at which the timer event will be delivered, subject to the + /// leeway and other considerations described above. The deadline is based on Mach absolute + /// time. + /// - parameter interval: the interval for the timer in seconds. + /// - parameter leeway: the leeway for the timer. + /// + @available(swift, deprecated: 4, renamed: "schedule(deadline:repeating:leeway:)") public func scheduleRepeating(deadline: DispatchTime, interval: Double, leeway: DispatchTimeInterval = .nanoseconds(0)) { - dispatch_source_set_timer((self as! DispatchSource).__wrapped, deadline.rawValue, UInt64(interval * Double(NSEC_PER_SEC)), UInt64(leeway.rawValue)) + dispatch_source_set_timer((self as! DispatchSource).__wrapped, deadline.rawValue, interval.isInfinite ? ~0 : UInt64(interval * Double(NSEC_PER_SEC)), UInt64(leeway.rawValue)) } + /// + /// Sets the deadline, interval and leeway for a timer event that fires at least once. + /// + /// Once this function returns, any pending source data accumulated for the previous timer values + /// has been cleared. The next timer event will occur at `wallDeadline` and every `interval` units of + /// time thereafter until the timer source is canceled. + /// + /// Delivery of a timer event may be delayed by the system in order to improve power consumption and + /// system performance. The upper limit to the allowable delay may be configured with the `leeway` + /// argument; the lower limit is under the control of the system. + /// + /// For the initial timer fire at `wallDeadline`, the upper limit to the allowable delay is set to + /// `leeway`. For the subsequent timer fires at `wallDeadline + N * interval`, the upper + /// limit is the smaller of `leeway` and `interval/2`. + /// + /// The lower limit to the allowable delay may vary with process state such as visibility of the + /// application UI. If the timer source was created with flags `TimerFlags.strict`, the system + /// will make a best effort to strictly observe the provided `leeway` value, even if it is smaller + /// than the current lower limit. Note that a minimal amount of delay is to be expected even if + /// this flag is specified. + /// + /// Calling this method has no effect if the timer source has already been canceled. + /// + /// - parameter wallDeadline: the time at which the timer event will be delivered, subject to the + /// leeway and other considerations described above. The deadline is based on + /// `gettimeofday(3)`. + /// - parameter interval: the interval for the timer. + /// - parameter leeway: the leeway for the timer. + /// + @available(swift, deprecated: 4, renamed: "schedule(wallDeadline:repeating:leeway:)") public func scheduleRepeating(wallDeadline: DispatchWallTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval = .nanoseconds(0)) { - dispatch_source_set_timer((self as! DispatchSource).__wrapped, wallDeadline.rawValue, interval.rawValue, UInt64(leeway.rawValue)) + dispatch_source_set_timer((self as! DispatchSource).__wrapped, wallDeadline.rawValue, interval == .never ? ~0 : UInt64(interval.rawValue), UInt64(leeway.rawValue)) } + /// + /// Sets the deadline, interval and leeway for a timer event that fires at least once. + /// + /// Once this function returns, any pending source data accumulated for the previous timer values + /// has been cleared. The next timer event will occur at `wallDeadline` and every `interval` seconds + /// thereafter until the timer source is canceled. + /// + /// Delivery of a timer event may be delayed by the system in order to improve power consumption and + /// system performance. The upper limit to the allowable delay may be configured with the `leeway` + /// argument; the lower limit is under the control of the system. + /// + /// For the initial timer fire at `wallDeadline`, the upper limit to the allowable delay is set to + /// `leeway`. For the subsequent timer fires at `wallDeadline + N * interval`, the upper + /// limit is the smaller of `leeway` and `interval/2`. + /// + /// The lower limit to the allowable delay may vary with process state such as visibility of the + /// application UI. If the timer source was created with flags `TimerFlags.strict`, the system + /// will make a best effort to strictly observe the provided `leeway` value, even if it is smaller + /// than the current lower limit. Note that a minimal amount of delay is to be expected even if + /// this flag is specified. + /// + /// Calling this method has no effect if the timer source has already been canceled. + /// + /// - parameter wallDeadline: the time at which the timer event will be delivered, subject to the + /// leeway and other considerations described above. The deadline is based on + /// `gettimeofday(3)`. + /// - parameter interval: the interval for the timer in seconds. + /// - parameter leeway: the leeway for the timer. + /// + @available(swift, deprecated: 4, renamed: "schedule(wallDeadline:repeating:leeway:)") public func scheduleRepeating(wallDeadline: DispatchWallTime, interval: Double, leeway: DispatchTimeInterval = .nanoseconds(0)) { - dispatch_source_set_timer((self as! DispatchSource).__wrapped, wallDeadline.rawValue, UInt64(interval * Double(NSEC_PER_SEC)), UInt64(leeway.rawValue)) - } -} - -public extension DispatchSourceTimer { - @available(*, deprecated, renamed: "DispatchSourceTimer.scheduleOneshot(self:deadline:leeway:)") - public func setTimer(start: DispatchTime, leeway: DispatchTimeInterval = .nanoseconds(0)) { - scheduleOneshot(deadline: start, leeway: leeway) - } - - @available(*, deprecated, renamed: "DispatchSourceTimer.scheduleOneshot(self:wallDeadline:leeway:)") - public func setTimer(walltime start: DispatchWallTime, leeway: DispatchTimeInterval = .nanoseconds(0)) { - scheduleOneshot(wallDeadline: start, leeway: leeway) + dispatch_source_set_timer((self as! DispatchSource).__wrapped, wallDeadline.rawValue, interval.isInfinite ? ~0 : UInt64(interval * Double(NSEC_PER_SEC)), UInt64(leeway.rawValue)) } - @available(*, deprecated, renamed: "DispatchSourceTimer.scheduleRepeating(self:deadline:interval:leeway:)") - public func setTimer(start: DispatchTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval = .nanoseconds(0)) { - scheduleRepeating(deadline: start, interval: interval, leeway: leeway) + /// + /// Sets the deadline, repeat interval and leeway for a timer event. + /// + /// Once this function returns, any pending source data accumulated for the previous timer values + /// has been cleared. The next timer event will occur at `deadline` and every `repeating` units of + /// time thereafter until the timer source is canceled. If the value of `repeating` is `.never`, + /// or is defaulted, the timer fires only once. + /// + /// Delivery of a timer event may be delayed by the system in order to improve power consumption + /// and system performance. The upper limit to the allowable delay may be configured with the `leeway` + /// argument; the lower limit is under the control of the system. + /// + /// For the initial timer fire at `deadline`, the upper limit to the allowable delay is set to + /// `leeway`. For the subsequent timer fires at `deadline + N * repeating`, the upper + /// limit is the smaller of `leeway` and `repeating/2`. + /// + /// The lower limit to the allowable delay may vary with process state such as visibility of the + /// application UI. If the timer source was created with flags `TimerFlags.strict`, the system + /// will make a best effort to strictly observe the provided `leeway` value, even if it is smaller + /// than the current lower limit. Note that a minimal amount of delay is to be expected even if + /// this flag is specified. + /// + /// Calling this method has no effect if the timer source has already been canceled. + /// + /// - parameter deadline: the time at which the first timer event will be delivered, subject to the + /// leeway and other considerations described above. The deadline is based on Mach absolute + /// time. + /// - parameter repeating: the repeat interval for the timer, or `.never` if the timer should fire + /// only once. + /// - parameter leeway: the leeway for the timer. + /// + @available(swift, introduced: 4) + public func schedule(deadline: DispatchTime, repeating interval: DispatchTimeInterval = .never, leeway: DispatchTimeInterval = .nanoseconds(0)) { + dispatch_source_set_timer((self as! DispatchSource).__wrapped, deadline.rawValue, interval == .never ? ~0 : UInt64(interval.rawValue), UInt64(leeway.rawValue)) } - @available(*, deprecated, renamed: "DispatchSourceTimer.scheduleRepeating(self:deadline:interval:leeway:)") - public func setTimer(start: DispatchTime, interval: Double, leeway: DispatchTimeInterval = .nanoseconds(0)) { - scheduleRepeating(deadline: start, interval: interval, leeway: leeway) + /// + /// Sets the deadline, repeat interval and leeway for a timer event. + /// + /// Once this function returns, any pending source data accumulated for the previous timer values + /// has been cleared. The next timer event will occur at `deadline` and every `repeating` seconds + /// thereafter until the timer source is canceled. If the value of `repeating` is `.infinity`, + /// the timer fires only once. + /// + /// Delivery of a timer event may be delayed by the system in order to improve power consumption + /// and system performance. The upper limit to the allowable delay may be configured with the `leeway` + /// argument; the lower limit is under the control of the system. + /// + /// For the initial timer fire at `deadline`, the upper limit to the allowable delay is set to + /// `leeway`. For the subsequent timer fires at `deadline + N * repeating`, the upper + /// limit is the smaller of `leeway` and `repeating/2`. + /// + /// The lower limit to the allowable delay may vary with process state such as visibility of the + /// application UI. If the timer source was created with flags `TimerFlags.strict`, the system + /// will make a best effort to strictly observe the provided `leeway` value, even if it is smaller + /// than the current lower limit. Note that a minimal amount of delay is to be expected even if + /// this flag is specified. + /// + /// Calling this method has no effect if the timer source has already been canceled. + /// + /// - parameter deadline: the time at which the timer event will be delivered, subject to the + /// leeway and other considerations described above. The deadline is based on Mach absolute + /// time. + /// - parameter repeating: the repeat interval for the timer in seconds, or `.infinity` if the timer + /// should fire only once. + /// - parameter leeway: the leeway for the timer. + /// + @available(swift, introduced: 4) + public func schedule(deadline: DispatchTime, repeating interval: Double, leeway: DispatchTimeInterval = .nanoseconds(0)) { + dispatch_source_set_timer((self as! DispatchSource).__wrapped, deadline.rawValue, interval.isInfinite ? ~0 : UInt64(interval * Double(NSEC_PER_SEC)), UInt64(leeway.rawValue)) } - @available(*, deprecated, renamed: "DispatchSourceTimer.scheduleRepeating(self:wallDeadline:interval:leeway:)") - public func setTimer(walltime start: DispatchWallTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval = .nanoseconds(0)) { - scheduleRepeating(wallDeadline: start, interval: interval, leeway: leeway) + /// + /// Sets the deadline, repeat interval and leeway for a timer event. + /// + /// Once this function returns, any pending source data accumulated for the previous timer values + /// has been cleared. The next timer event will occur at `wallDeadline` and every `repeating` units of + /// time thereafter until the timer source is canceled. If the value of `repeating` is `.never`, + /// or is defaulted, the timer fires only once. + /// + /// Delivery of a timer event may be delayed by the system in order to improve power consumption and + /// system performance. The upper limit to the allowable delay may be configured with the `leeway` + /// argument; the lower limit is under the control of the system. + /// + /// For the initial timer fire at `wallDeadline`, the upper limit to the allowable delay is set to + /// `leeway`. For the subsequent timer fires at `wallDeadline + N * repeating`, the upper + /// limit is the smaller of `leeway` and `repeating/2`. + /// + /// The lower limit to the allowable delay may vary with process state such as visibility of the + /// application UI. If the timer source was created with flags `TimerFlags.strict`, the system + /// will make a best effort to strictly observe the provided `leeway` value, even if it is smaller + /// than the current lower limit. Note that a minimal amount of delay is to be expected even if + /// this flag is specified. + /// + /// Calling this method has no effect if the timer source has already been canceled. + /// + /// - parameter wallDeadline: the time at which the timer event will be delivered, subject to the + /// leeway and other considerations described above. The deadline is based on + /// `gettimeofday(3)`. + /// - parameter repeating: the repeat interval for the timer, or `.never` if the timer should fire + /// only once. + /// - parameter leeway: the leeway for the timer. + /// + @available(swift, introduced: 4) + public func schedule(wallDeadline: DispatchWallTime, repeating interval: DispatchTimeInterval = .never, leeway: DispatchTimeInterval = .nanoseconds(0)) { + dispatch_source_set_timer((self as! DispatchSource).__wrapped, wallDeadline.rawValue, interval == .never ? ~0 : UInt64(interval.rawValue), UInt64(leeway.rawValue)) } - @available(*, deprecated, renamed: "DispatchSourceTimer.scheduleRepeating(self:wallDeadline:interval:leeway:)") - public func setTimer(walltime start: DispatchWallTime, interval: Double, leeway: DispatchTimeInterval = .nanoseconds(0)) { - scheduleRepeating(wallDeadline: start, interval: interval, leeway: leeway) + /// + /// Sets the deadline, repeat interval and leeway for a timer event that fires at least once. + /// + /// Once this function returns, any pending source data accumulated for the previous timer values + /// has been cleared. The next timer event will occur at `wallDeadline` and every `repeating` seconds + /// thereafter until the timer source is canceled. If the value of `repeating` is `.infinity`, + /// the timer fires only once. + /// + /// Delivery of a timer event may be delayed by the system in order to improve power consumption + /// and system performance. The upper limit to the allowable delay may be configured with the `leeway` + /// argument; the lower limit is under the control of the system. + /// + /// For the initial timer fire at `wallDeadline`, the upper limit to the allowable delay is set to + /// `leeway`. For the subsequent timer fires at `wallDeadline + N * repeating`, the upper + /// limit is the smaller of `leeway` and `repeating/2`. + /// + /// The lower limit to the allowable delay may vary with process state such as visibility of the + /// application UI. If the timer source was created with flags `TimerFlags.strict`, the system + /// will make a best effort to strictly observe the provided `leeway` value, even if it is smaller + /// than the current lower limit. Note that a minimal amount of delay is to be expected even if + /// this flag is specified. + /// + /// Calling this method has no effect if the timer source has already been canceled. + /// + /// - parameter wallDeadline: the time at which the timer event will be delivered, subject to the + /// leeway and other considerations described above. The deadline is based on + /// `gettimeofday(3)`. + /// - parameter repeating: the repeat interval for the timer in seconds, or `.infinity` if the timer + /// should fire only once. + /// - parameter leeway: the leeway for the timer. + /// + @available(swift, introduced: 4) + public func schedule(wallDeadline: DispatchWallTime, repeating interval: Double, leeway: DispatchTimeInterval = .nanoseconds(0)) { + dispatch_source_set_timer((self as! DispatchSource).__wrapped, wallDeadline.rawValue, interval.isInfinite ? ~0 : UInt64(interval * Double(NSEC_PER_SEC)), UInt64(leeway.rawValue)) } } -#if !os(Linux) +#if !os(Linux) && !os(Android) public extension DispatchSourceFileSystemObject { public var handle: Int32 { return Int32(dispatch_source_get_handle((self as! DispatchSource).__wrapped)) @@ -350,39 +637,37 @@ public extension DispatchSourceFileSystemObject { #endif public extension DispatchSourceUserDataAdd { - /// @function mergeData - /// - /// @abstract - /// Merges data into a dispatch source of type DISPATCH_SOURCE_TYPE_DATA_ADD or - /// DISPATCH_SOURCE_TYPE_DATA_OR and submits its event handler block to its - /// target queue. + /// Merges data into a dispatch source of type `DISPATCH_SOURCE_TYPE_DATA_ADD` + /// and submits its event handler block to its target queue. /// - /// @param value - /// The value to coalesce with the pending data using a logical OR or an ADD - /// as specified by the dispatch source type. A value of zero has no effect - /// and will not result in the submission of the event handler block. - public func mergeData(value: UInt) { - dispatch_source_merge_data((self as! DispatchSource).__wrapped, value) + /// - parameter data: the value to add to the current pending data. A value of zero + /// has no effect and will not result in the submission of the event handler block. + public func add(data: UInt) { + dispatch_source_merge_data((self as! DispatchSource).__wrapped, data) } } public extension DispatchSourceUserDataOr { -#if false /*FIXME: clashes with UserDataAdd?? */ - /// @function mergeData + /// Merges data into a dispatch source of type `DISPATCH_SOURCE_TYPE_DATA_OR` and + /// submits its event handler block to its target queue. /// - /// @abstract - /// Merges data into a dispatch source of type DISPATCH_SOURCE_TYPE_DATA_ADD or - /// DISPATCH_SOURCE_TYPE_DATA_OR and submits its event handler block to its - /// target queue. + /// - parameter data: The value to OR into the current pending data. A value of zero + /// has no effect and will not result in the submission of the event handler block. + public func or(data: UInt) { + dispatch_source_merge_data((self as! DispatchSource).__wrapped, data) + } +} + +public extension DispatchSourceUserDataReplace { + /// Merges data into a dispatch source of type `DISPATCH_SOURCE_TYPE_DATA_REPLACE` + /// and submits its event handler block to its target queue. /// - /// @param value - /// The value to coalesce with the pending data using a logical OR or an ADD - /// as specified by the dispatch source type. A value of zero has no effect - /// and will not result in the submission of the event handler block. - public func mergeData(value: UInt) { - dispatch_source_merge_data((self as! DispatchSource).__wrapped, value) + /// - parameter data: The value that will replace the current pending data. + /// A value of zero will be stored but will not result in the submission of the event + /// handler block. + public func replace(data: UInt) { + dispatch_source_merge_data((self as! DispatchSource).__wrapped, data) } -#endif } @_silgen_name("_swift_dispatch_source_type_DATA_ADD") @@ -391,6 +676,9 @@ internal func _swift_dispatch_source_type_data_add() -> dispatch_source_type_t @_silgen_name("_swift_dispatch_source_type_DATA_OR") internal func _swift_dispatch_source_type_data_or() -> dispatch_source_type_t +@_silgen_name("_swift_dispatch_source_type_DATA_REPLACE") +internal func _swift_dispatch_source_type_data_replace() -> dispatch_source_type_t + #if HAVE_MACH @_silgen_name("_swift_dispatch_source_type_MACH_SEND") internal func _swift_dispatch_source_type_mach_send() -> dispatch_source_type_t @@ -402,7 +690,7 @@ internal func _swift_dispatch_source_type_mach_recv() -> dispatch_source_type_t internal func _swift_dispatch_source_type_memorypressure() -> dispatch_source_type_t #endif -#if !os(Linux) +#if !os(Linux) && !os(Android) @_silgen_name("_swift_dispatch_source_type_PROC") internal func _swift_dispatch_source_type_proc() -> dispatch_source_type_t #endif @@ -416,7 +704,7 @@ internal func _swift_dispatch_source_type_signal() -> dispatch_source_type_t @_silgen_name("_swift_dispatch_source_type_TIMER") internal func _swift_dispatch_source_type_timer() -> dispatch_source_type_t -#if !os(Linux) +#if !os(Linux) && !os(Android) @_silgen_name("_swift_dispatch_source_type_VNODE") internal func _swift_dispatch_source_type_vnode() -> dispatch_source_type_t #endif diff --git a/src/swift/Time.swift b/src/swift/Time.swift index 76a6979eb..d7d49c96b 100644 --- a/src/swift/Time.swift +++ b/src/swift/Time.swift @@ -16,7 +16,15 @@ import CDispatch -public struct DispatchTime { +public struct DispatchTime : Comparable { +#if HAVE_MACH + private static let timebaseInfo: mach_timebase_info_data_t = { + var info = mach_timebase_info_data_t(numer: 1, denom: 1) + mach_timebase_info(&info) + return info + }() +#endif + public let rawValue: dispatch_time_t public static func now() -> DispatchTime { @@ -26,12 +34,60 @@ public struct DispatchTime { public static let distantFuture = DispatchTime(rawValue: ~0) - private init(rawValue: dispatch_time_t) { + fileprivate init(rawValue: dispatch_time_t) { self.rawValue = rawValue } + + /// Creates a `DispatchTime` relative to the system clock that + /// ticks since boot. + /// + /// - Parameters: + /// - uptimeNanoseconds: The number of nanoseconds since boot, excluding + /// time the system spent asleep + /// - Returns: A new `DispatchTime` + /// - Discussion: This clock is the same as the value returned by + /// `mach_absolute_time` when converted into nanoseconds. + /// On some platforms, the nanosecond value is rounded up to a + /// multiple of the Mach timebase, using the conversion factors + /// returned by `mach_timebase_info()`. The nanosecond equivalent + /// of the rounded result can be obtained by reading the + /// `uptimeNanoseconds` property. + /// Note that `DispatchTime(uptimeNanoseconds: 0)` is + /// equivalent to `DispatchTime.now()`, that is, its value + /// represents the number of nanoseconds since boot (excluding + /// system sleep time), not zero nanoseconds since boot. + public init(uptimeNanoseconds: UInt64) { + var rawValue = uptimeNanoseconds +#if HAVE_MACH + if (DispatchTime.timebaseInfo.numer != DispatchTime.timebaseInfo.denom) { + rawValue = (rawValue * UInt64(DispatchTime.timebaseInfo.denom) + + UInt64(DispatchTime.timebaseInfo.numer - 1)) / UInt64(DispatchTime.timebaseInfo.numer) + } +#endif + self.rawValue = dispatch_time_t(rawValue) + } + + public var uptimeNanoseconds: UInt64 { + var result = self.rawValue +#if HAVE_MACH + if (DispatchTime.timebaseInfo.numer != DispatchTime.timebaseInfo.denom) { + result = result * UInt64(DispatchTime.timebaseInfo.numer) / UInt64(DispatchTime.timebaseInfo.denom) + } +#endif + return result + } +} + +public func <(a: DispatchTime, b: DispatchTime) -> Bool { + if a.rawValue == ~0 || b.rawValue == ~0 { return false } + return a.rawValue < b.rawValue } -public struct DispatchWallTime { +public func ==(a: DispatchTime, b: DispatchTime) -> Bool { + return a.rawValue == b.rawValue +} + +public struct DispatchWallTime : Comparable { public let rawValue: dispatch_time_t public static func now() -> DispatchWallTime { @@ -40,71 +96,101 @@ public struct DispatchWallTime { public static let distantFuture = DispatchWallTime(rawValue: ~0) - private init(rawValue: dispatch_time_t) { + fileprivate init(rawValue: dispatch_time_t) { self.rawValue = rawValue } - public init(time: timespec) { - var t = time + public init(timespec: timespec) { + var t = timespec self.rawValue = CDispatch.dispatch_walltime(&t, 0) } } -@available(*, deprecated, renamed: "DispatchWallTime") -public typealias DispatchWalltime = DispatchWallTime +public func <(a: DispatchWallTime, b: DispatchWallTime) -> Bool { + if b.rawValue == ~0 { + return a.rawValue != ~0 + } else if a.rawValue == ~0 { + return false + } + return -Int64(bitPattern: a.rawValue) < -Int64(bitPattern: b.rawValue) +} + +public func ==(a: DispatchWallTime, b: DispatchWallTime) -> Bool { + return a.rawValue == b.rawValue +} public enum DispatchTimeInterval { case seconds(Int) case milliseconds(Int) case microseconds(Int) case nanoseconds(Int) + @_downgrade_exhaustivity_check + case never - internal var rawValue: UInt64 { + internal var rawValue: Int64 { switch self { - case .seconds(let s): return UInt64(s) * NSEC_PER_SEC - case .milliseconds(let ms): return UInt64(ms) * NSEC_PER_MSEC - case .microseconds(let us): return UInt64(us) * NSEC_PER_USEC - case .nanoseconds(let ns): return UInt64(ns) + case .seconds(let s): return Int64(s) * Int64(NSEC_PER_SEC) + case .milliseconds(let ms): return Int64(ms) * Int64(NSEC_PER_MSEC) + case .microseconds(let us): return Int64(us) * Int64(NSEC_PER_USEC) + case .nanoseconds(let ns): return Int64(ns) + case .never: return Int64.max + } + } + + public static func ==(lhs: DispatchTimeInterval, rhs: DispatchTimeInterval) -> Bool { + switch (lhs, rhs) { + case (.never, .never): return true + case (.never, _): return false + case (_, .never): return false + default: return lhs.rawValue == rhs.rawValue } } } public func +(time: DispatchTime, interval: DispatchTimeInterval) -> DispatchTime { - let t = CDispatch.dispatch_time(time.rawValue, Int64(interval.rawValue)) + let t = CDispatch.dispatch_time(time.rawValue, interval.rawValue) return DispatchTime(rawValue: t) } public func -(time: DispatchTime, interval: DispatchTimeInterval) -> DispatchTime { - let t = CDispatch.dispatch_time(time.rawValue, -Int64(interval.rawValue)) + let t = CDispatch.dispatch_time(time.rawValue, -interval.rawValue) return DispatchTime(rawValue: t) } public func +(time: DispatchTime, seconds: Double) -> DispatchTime { - let t = CDispatch.dispatch_time(time.rawValue, Int64(seconds * Double(NSEC_PER_SEC))) + let interval = seconds * Double(NSEC_PER_SEC) + let t = CDispatch.dispatch_time(time.rawValue, + interval.isInfinite || interval.isNaN ? Int64.max : Int64(interval)) return DispatchTime(rawValue: t) } public func -(time: DispatchTime, seconds: Double) -> DispatchTime { - let t = CDispatch.dispatch_time(time.rawValue, Int64(-seconds * Double(NSEC_PER_SEC))) + let interval = -seconds * Double(NSEC_PER_SEC) + let t = CDispatch.dispatch_time(time.rawValue, + interval.isInfinite || interval.isNaN ? Int64.min : Int64(interval)) return DispatchTime(rawValue: t) } public func +(time: DispatchWallTime, interval: DispatchTimeInterval) -> DispatchWallTime { - let t = CDispatch.dispatch_time(time.rawValue, Int64(interval.rawValue)) + let t = CDispatch.dispatch_time(time.rawValue, interval.rawValue) return DispatchWallTime(rawValue: t) } public func -(time: DispatchWallTime, interval: DispatchTimeInterval) -> DispatchWallTime { - let t = CDispatch.dispatch_time(time.rawValue, -Int64(interval.rawValue)) + let t = CDispatch.dispatch_time(time.rawValue, -interval.rawValue) return DispatchWallTime(rawValue: t) } public func +(time: DispatchWallTime, seconds: Double) -> DispatchWallTime { - let t = CDispatch.dispatch_time(time.rawValue, Int64(seconds * Double(NSEC_PER_SEC))) + let interval = seconds * Double(NSEC_PER_SEC) + let t = CDispatch.dispatch_time(time.rawValue, + interval.isInfinite || interval.isNaN ? Int64.max : Int64(interval)) return DispatchWallTime(rawValue: t) } public func -(time: DispatchWallTime, seconds: Double) -> DispatchWallTime { - let t = CDispatch.dispatch_time(time.rawValue, Int64(-seconds * Double(NSEC_PER_SEC))) + let interval = -seconds * Double(NSEC_PER_SEC) + let t = CDispatch.dispatch_time(time.rawValue, + interval.isInfinite || interval.isNaN ? Int64.min : Int64(interval)) return DispatchWallTime(rawValue: t) } diff --git a/src/swift/Wrapper.swift b/src/swift/Wrapper.swift index d38bb9358..5a551dfba 100644 --- a/src/swift/Wrapper.swift +++ b/src/swift/Wrapper.swift @@ -15,6 +15,10 @@ import CDispatch // This file contains declarations that are provided by the // importer via Dispatch.apinote when the platform has Objective-C support +public func dispatchMain() -> Never { + CDispatch.dispatch_main() +} + public class DispatchObject { internal func wrapped() -> dispatch_object_t { @@ -59,7 +63,7 @@ public class DispatchGroup : DispatchObject { } public func leave() { - dispatch_group_enter(__wrapped) + dispatch_group_leave(__wrapped) } } @@ -87,29 +91,25 @@ public class DispatchIO : DispatchObject { } internal init(__type: UInt, fd: Int32, queue: DispatchQueue, - handler: (error: Int32) -> Void) { + handler: @escaping (_ error: Int32) -> Void) { __wrapped = dispatch_io_create(__type, fd, queue.__wrapped, handler) } internal init(__type: UInt, path: UnsafePointer, oflag: Int32, - mode: mode_t, queue: DispatchQueue, handler: (error: Int32) -> Void) { + mode: mode_t, queue: DispatchQueue, handler: @escaping (_ error: Int32) -> Void) { __wrapped = dispatch_io_create_with_path(__type, path, oflag, mode, queue.__wrapped, handler) } internal init(__type: UInt, io: DispatchIO, - queue: DispatchQueue, handler: (error: Int32) -> Void) { + queue: DispatchQueue, handler: @escaping (_ error: Int32) -> Void) { __wrapped = dispatch_io_create_with_io(__type, io.__wrapped, queue.__wrapped, handler) } - internal init(queue:dispatch_queue_t) { - __wrapped = queue - } - deinit { _swift_dispatch_release(wrapped()) } - public func barrier(execute: () -> ()) { + public func barrier(execute: @escaping () -> ()) { dispatch_io_barrier(self.__wrapped, execute) } @@ -149,16 +149,16 @@ public class DispatchQueue : DispatchObject { _swift_dispatch_release(wrapped()) } - public func sync(execute workItem: @noescape ()->()) { + public func sync(execute workItem: ()->()) { dispatch_sync(self.__wrapped, workItem) } } public class DispatchSource : DispatchObject, - DispatchSourceType, DispatchSourceRead, + DispatchSourceProtocol, DispatchSourceRead, DispatchSourceSignal, DispatchSourceTimer, DispatchSourceUserDataAdd, DispatchSourceUserDataOr, - DispatchSourceWrite { + DispatchSourceUserDataReplace, DispatchSourceWrite { internal let __wrapped:dispatch_source_t final internal override func wrapped() -> dispatch_object_t { @@ -180,15 +180,34 @@ extension DispatchSource : DispatchSourceMachSend, } #endif -#if !os(Linux) +#if !os(Linux) && !os(Android) extension DispatchSource : DispatchSourceProcess, DispatchSourceFileSystemObject { } #endif +internal class __DispatchData : DispatchObject { + internal let __wrapped:dispatch_data_t + + final internal override func wrapped() -> dispatch_object_t { + return unsafeBitCast(__wrapped, to: dispatch_object_t.self) + } + + internal init(data:dispatch_data_t, owned:Bool) { + __wrapped = data + if !owned { + _swift_dispatch_retain(unsafeBitCast(data, to: dispatch_object_t.self)) + } + } + + deinit { + _swift_dispatch_release(wrapped()) + } +} + public typealias DispatchSourceHandler = @convention(block) () -> Void -public protocol DispatchSourceType { +public protocol DispatchSourceProtocol { func setEventHandler(qos: DispatchQoS, flags: DispatchWorkItemFlags, handler: DispatchSourceHandler?) func setEventHandler(handler: DispatchWorkItem) @@ -216,18 +235,20 @@ public protocol DispatchSourceType { var isCancelled: Bool { get } } -public protocol DispatchSourceUserDataAdd : DispatchSourceType { - func mergeData(value: UInt) +public protocol DispatchSourceUserDataAdd : DispatchSourceProtocol { + func add(data: UInt) } -public protocol DispatchSourceUserDataOr { -#if false /*FIXME: clashes with UserDataAdd?? */ - func mergeData(value: UInt) -#endif +public protocol DispatchSourceUserDataOr : DispatchSourceProtocol { + func or(data: UInt) +} + +public protocol DispatchSourceUserDataReplace : DispatchSourceProtocol { + func replace(data: UInt) } #if HAVE_MACH -public protocol DispatchSourceMachSend : DispatchSourceType { +public protocol DispatchSourceMachSend : DispatchSourceProtocol { public var handle: mach_port_t { get } public var data: DispatchSource.MachSendEvent { get } @@ -237,21 +258,21 @@ public protocol DispatchSourceMachSend : DispatchSourceType { #endif #if HAVE_MACH -public protocol DispatchSourceMachReceive : DispatchSourceType { +public protocol DispatchSourceMachReceive : DispatchSourceProtocol { var handle: mach_port_t { get } } #endif #if HAVE_MACH -public protocol DispatchSourceMemoryPressure : DispatchSourceType { +public protocol DispatchSourceMemoryPressure : DispatchSourceProtocol { public var data: DispatchSource.MemoryPressureEvent { get } public var mask: DispatchSource.MemoryPressureEvent { get } } #endif -#if !os(Linux) -public protocol DispatchSourceProcess : DispatchSourceType { +#if !os(Linux) && !os(Android) +public protocol DispatchSourceProcess : DispatchSourceProtocol { var handle: pid_t { get } var data: DispatchSource.ProcessEvent { get } @@ -260,28 +281,28 @@ public protocol DispatchSourceProcess : DispatchSourceType { } #endif -public protocol DispatchSourceRead : DispatchSourceType { +public protocol DispatchSourceRead : DispatchSourceProtocol { } -public protocol DispatchSourceSignal : DispatchSourceType { +public protocol DispatchSourceSignal : DispatchSourceProtocol { } -public protocol DispatchSourceTimer : DispatchSourceType { - func setTimer(start: DispatchTime, leeway: DispatchTimeInterval) +public protocol DispatchSourceTimer : DispatchSourceProtocol { + func scheduleOneshot(deadline: DispatchTime, leeway: DispatchTimeInterval) - func setTimer(walltime start: DispatchWallTime, leeway: DispatchTimeInterval) + func scheduleOneshot(wallDeadline: DispatchWallTime, leeway: DispatchTimeInterval) - func setTimer(start: DispatchTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval) + func scheduleRepeating(deadline: DispatchTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval) - func setTimer(start: DispatchTime, interval: Double, leeway: DispatchTimeInterval) + func scheduleRepeating(deadline: DispatchTime, interval: Double, leeway: DispatchTimeInterval) - func setTimer(walltime start: DispatchWallTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval) + func scheduleRepeating(wallDeadline: DispatchWallTime, interval: DispatchTimeInterval, leeway: DispatchTimeInterval) - func setTimer(walltime start: DispatchWallTime, interval: Double, leeway: DispatchTimeInterval) + func scheduleRepeating(wallDeadline: DispatchWallTime, interval: Double, leeway: DispatchTimeInterval) } -#if !os(Linux) -public protocol DispatchSourceFileSystemObject : DispatchSourceType { +#if !os(Linux) && !os(Android) +public protocol DispatchSourceFileSystemObject : DispatchSourceProtocol { var handle: Int32 { get } var data: DispatchSource.FileSystemEvent { get } @@ -290,7 +311,7 @@ public protocol DispatchSourceFileSystemObject : DispatchSourceType { } #endif -public protocol DispatchSourceWrite : DispatchSourceType { +public protocol DispatchSourceWrite : DispatchSourceProtocol { } @@ -307,9 +328,9 @@ internal enum _OSQoSClass : UInt32 { case 0x21: self = .QOS_CLASS_USER_INTERACTIVE case 0x19: self = .QOS_CLASS_USER_INITIATED case 0x15: self = .QOS_CLASS_DEFAULT - case 0x11: self = QOS_CLASS_UTILITY - case 0x09: self = QOS_CLASS_BACKGROUND - case 0x00: self = QOS_CLASS_UNSPECIFIED + case 0x11: self = .QOS_CLASS_UTILITY + case 0x09: self = .QOS_CLASS_BACKGROUND + case 0x00: self = .QOS_CLASS_UNSPECIFIED default: return nil } } @@ -317,3 +338,6 @@ internal enum _OSQoSClass : UInt32 { @_silgen_name("_swift_dispatch_release") internal func _swift_dispatch_release(_ obj: dispatch_object_t) -> Void + +@_silgen_name("_swift_dispatch_retain") +internal func _swift_dispatch_retain(_ obj: dispatch_object_t) -> Void diff --git a/src/time.c b/src/time.c index 6d008319b..5b0bab0bf 100644 --- a/src/time.c +++ b/src/time.c @@ -20,52 +20,74 @@ #include "internal.h" -uint64_t -_dispatch_get_nanoseconds(void) +#if DISPATCH_USE_HOST_TIME +typedef struct _dispatch_host_time_data_s { + long double frac; + bool ratio_1_to_1; +} _dispatch_host_time_data_s; + +DISPATCH_CACHELINE_ALIGN +static _dispatch_host_time_data_s _dispatch_host_time_data; + +uint64_t (*_dispatch_host_time_mach2nano)(uint64_t machtime); +uint64_t (*_dispatch_host_time_nano2mach)(uint64_t nsec); + +static uint64_t +_dispatch_mach_host_time_mach2nano(uint64_t machtime) { -#if !TARGET_OS_WIN32 - struct timeval now; - int r = gettimeofday(&now, NULL); - dispatch_assert_zero(r); - dispatch_assert(sizeof(NSEC_PER_SEC) == 8); - dispatch_assert(sizeof(NSEC_PER_USEC) == 8); - return (uint64_t)now.tv_sec * NSEC_PER_SEC + - (uint64_t)now.tv_usec * NSEC_PER_USEC; -#else /* TARGET_OS_WIN32 */ - // FILETIME is 100-nanosecond intervals since January 1, 1601 (UTC). - FILETIME ft; - ULARGE_INTEGER li; - GetSystemTimeAsFileTime(&ft); - li.LowPart = ft.dwLowDateTime; - li.HighPart = ft.dwHighDateTime; - return li.QuadPart * 100ull; -#endif /* TARGET_OS_WIN32 */ + _dispatch_host_time_data_s *const data = &_dispatch_host_time_data; + + if (unlikely(!machtime || data->ratio_1_to_1)) { + return machtime; + } + if (machtime >= INT64_MAX) { + return INT64_MAX; + } + long double big_tmp = ((long double)machtime * data->frac) + .5L; + if (unlikely(big_tmp >= INT64_MAX)) { + return INT64_MAX; + } + return (uint64_t)big_tmp; } -#if !(defined(__i386__) || defined(__x86_64__) || !HAVE_MACH_ABSOLUTE_TIME) \ - || TARGET_OS_WIN32 -DISPATCH_CACHELINE_ALIGN _dispatch_host_time_data_s _dispatch_host_time_data = { - .ratio_1_to_1 = true, -}; +static uint64_t +_dispatch_mach_host_time_nano2mach(uint64_t nsec) +{ + _dispatch_host_time_data_s *const data = &_dispatch_host_time_data; + + if (unlikely(!nsec || data->ratio_1_to_1)) { + return nsec; + } + if (nsec >= INT64_MAX) { + return INT64_MAX; + } + long double big_tmp = ((long double)nsec / data->frac) + .5L; + if (unlikely(big_tmp >= INT64_MAX)) { + return INT64_MAX; + } + return (uint64_t)big_tmp; +} + +static void +_dispatch_host_time_init(mach_timebase_info_data_t *tbi) +{ + _dispatch_host_time_data.frac = tbi->numer; + _dispatch_host_time_data.frac /= tbi->denom; + _dispatch_host_time_data.ratio_1_to_1 = (tbi->numer == tbi->denom); + _dispatch_host_time_mach2nano = _dispatch_mach_host_time_mach2nano; + _dispatch_host_time_nano2mach = _dispatch_mach_host_time_nano2mach; +} +#endif // DISPATCH_USE_HOST_TIME void -_dispatch_get_host_time_init(void *context DISPATCH_UNUSED) +_dispatch_time_init(void) { -#if !TARGET_OS_WIN32 +#if DISPATCH_USE_HOST_TIME mach_timebase_info_data_t tbi; (void)dispatch_assume_zero(mach_timebase_info(&tbi)); - _dispatch_host_time_data.frac = tbi.numer; - _dispatch_host_time_data.frac /= tbi.denom; - _dispatch_host_time_data.ratio_1_to_1 = (tbi.numer == tbi.denom); -#else - LARGE_INTEGER freq; - dispatch_assume(QueryPerformanceFrequency(&freq)); - _dispatch_host_time_data.frac = (long double)NSEC_PER_SEC / - (long double)freq.QuadPart; - _dispatch_host_time_data.ratio_1_to_1 = (freq.QuadPart == 1); -#endif /* TARGET_OS_WIN32 */ + _dispatch_host_time_init(&tbi); +#endif // DISPATCH_USE_HOST_TIME } -#endif dispatch_time_t dispatch_time(dispatch_time_t inval, int64_t delta) @@ -115,7 +137,7 @@ dispatch_walltime(const struct timespec *inval, int64_t delta) { int64_t nsec; if (inval) { - nsec = inval->tv_sec * 1000000000ll + inval->tv_nsec; + nsec = (int64_t)_dispatch_timespec_to_nano(*inval); } else { nsec = (int64_t)_dispatch_get_nanoseconds(); } diff --git a/src/trace.h b/src/trace.h index d73ff3fb3..c670f60b7 100644 --- a/src/trace.h +++ b/src/trace.h @@ -29,14 +29,6 @@ #if DISPATCH_PURE_C -#if DISPATCH_USE_DTRACE || DISPATCH_USE_DTRACE_INTROSPECTION -typedef struct dispatch_trace_timer_params_s { - int64_t deadline, interval, leeway; -} *dispatch_trace_timer_params_t; - -#include "provider.h" -#endif // DISPATCH_USE_DTRACE || DISPATCH_USE_DTRACE_INTROSPECTION - #if DISPATCH_USE_DTRACE_INTROSPECTION #define _dispatch_trace_callout(_c, _f, _dcc) do { \ if (slowpath(DISPATCH_CALLOUT_ENTRY_ENABLED()) || \ @@ -108,7 +100,7 @@ _dispatch_trace_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) } else { \ _dc = (void*)_do; \ _ctxt = _dc->dc_ctxt; \ - if (_dc->dc_flags & DISPATCH_OBJ_SYNC_SLOW_BIT) { \ + if (_dc->dc_flags & DISPATCH_OBJ_SYNC_WAITER_BIT) { \ _kind = "semaphore"; \ _func = (dispatch_function_t)dispatch_semaphore_signal; \ } else if (_dc->dc_flags & DISPATCH_OBJ_BLOCK_BIT) { \ @@ -131,8 +123,8 @@ _dispatch_trace_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) #if DISPATCH_USE_DTRACE_INTROSPECTION || DISPATCH_INTROSPECTION DISPATCH_ALWAYS_INLINE static inline void -_dispatch_trace_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head, - dispatch_object_t _tail, pthread_priority_t pp, unsigned int n) +_dispatch_trace_root_queue_push_list(dispatch_queue_t dq, + dispatch_object_t _head, dispatch_object_t _tail, int n) { if (slowpath(DISPATCH_QUEUE_PUSH_ENABLED())) { struct dispatch_object_s *dou = _head._do; @@ -141,20 +133,20 @@ _dispatch_trace_queue_push_list(dispatch_queue_t dq, dispatch_object_t _head, } while (dou != _tail._do && (dou = dou->do_next)); } _dispatch_introspection_queue_push_list(dq, _head, _tail); - _dispatch_queue_push_list(dq, _head, _tail, pp, n); + _dispatch_root_queue_push_inline(dq, _head, _tail, n); } DISPATCH_ALWAYS_INLINE static inline void _dispatch_trace_queue_push_inline(dispatch_queue_t dq, dispatch_object_t _tail, - pthread_priority_t pp, dispatch_wakeup_flags_t flags) + dispatch_qos_t qos) { if (slowpath(DISPATCH_QUEUE_PUSH_ENABLED())) { struct dispatch_object_s *dou = _tail._do; _dispatch_trace_continuation(dq, dou, DISPATCH_QUEUE_PUSH); } _dispatch_introspection_queue_push(dq, _tail); - _dispatch_queue_push_inline(dq, _tail, pp, flags); + _dispatch_queue_push_inline(dq, _tail, qos); } DISPATCH_ALWAYS_INLINE @@ -168,7 +160,7 @@ _dispatch_trace_continuation_push(dispatch_queue_t dq, dispatch_object_t _tail) _dispatch_introspection_queue_push(dq, _tail); } -#define _dispatch_queue_push_list _dispatch_trace_queue_push_list +#define _dispatch_root_queue_push_inline _dispatch_trace_root_queue_push_list #define _dispatch_queue_push_inline _dispatch_trace_queue_push_inline DISPATCH_ALWAYS_INLINE @@ -189,7 +181,7 @@ _dispatch_trace_continuation_pop(dispatch_queue_t dq, dispatch_object_t dou) #if DISPATCH_USE_DTRACE static inline dispatch_function_t -_dispatch_trace_timer_function(dispatch_source_refs_t dr) +_dispatch_trace_timer_function(dispatch_timer_source_refs_t dr) { dispatch_continuation_t dc; dc = os_atomic_load(&dr->ds_handler[DS_EVENT_HANDLER], relaxed); @@ -198,12 +190,12 @@ _dispatch_trace_timer_function(dispatch_source_refs_t dr) DISPATCH_ALWAYS_INLINE static inline dispatch_trace_timer_params_t -_dispatch_trace_timer_params(uintptr_t ident, +_dispatch_trace_timer_params(dispatch_clock_t clock, struct dispatch_timer_source_s *values, uint64_t deadline, dispatch_trace_timer_params_t params) { - #define _dispatch_trace_time2nano3(t) (DISPATCH_TIMER_KIND(ident) \ - == DISPATCH_TIMER_KIND_MACH ? _dispatch_time_mach2nano(t) : (t)) + #define _dispatch_trace_time2nano3(t) \ + (clock == DISPATCH_CLOCK_MACH ? _dispatch_time_mach2nano(t) : (t)) #define _dispatch_trace_time2nano2(v, t) ({ uint64_t _t = (t); \ (v) >= INT64_MAX ? -1ll : (int64_t)_dispatch_trace_time2nano3(_t);}) #define _dispatch_trace_time2nano(v) ({ uint64_t _t; \ @@ -212,14 +204,13 @@ _dispatch_trace_timer_params(uintptr_t ident, if (deadline) { params->deadline = (int64_t)deadline; } else { - uint64_t now = (DISPATCH_TIMER_KIND(ident) == - DISPATCH_TIMER_KIND_MACH ? _dispatch_absolute_time() : - _dispatch_get_nanoseconds()); + uint64_t now = _dispatch_time_now(clock); params->deadline = _dispatch_trace_time2nano2(values->target, values->target < now ? 0 : values->target - now); } + uint64_t leeway = values->deadline - values->target; params->interval = _dispatch_trace_time2nano(values->interval); - params->leeway = _dispatch_trace_time2nano(values->leeway); + params->leeway = _dispatch_trace_time2nano(leeway); return params; } @@ -232,33 +223,34 @@ _dispatch_trace_timer_configure_enabled(void) DISPATCH_ALWAYS_INLINE static inline void -_dispatch_trace_timer_configure(dispatch_source_t ds, uintptr_t ident, +_dispatch_trace_timer_configure(dispatch_source_t ds, dispatch_clock_t clock, struct dispatch_timer_source_s *values) { + dispatch_timer_source_refs_t dr = ds->ds_timer_refs; struct dispatch_trace_timer_params_s params; - DISPATCH_TIMER_CONFIGURE(ds, _dispatch_trace_timer_function(ds->ds_refs), - _dispatch_trace_timer_params(ident, values, 0, - ¶ms)); + DISPATCH_TIMER_CONFIGURE(ds, _dispatch_trace_timer_function(dr), + _dispatch_trace_timer_params(clock, values, 0, ¶ms)); } DISPATCH_ALWAYS_INLINE static inline void -_dispatch_trace_timer_program(dispatch_source_refs_t dr, uint64_t deadline) +_dispatch_trace_timer_program(dispatch_timer_source_refs_t dr, uint64_t deadline) { if (slowpath(DISPATCH_TIMER_PROGRAM_ENABLED())) { if (deadline && dr) { dispatch_source_t ds = _dispatch_source_from_refs(dr); + dispatch_clock_t clock = DISPATCH_TIMER_CLOCK(dr->du_ident); struct dispatch_trace_timer_params_s params; DISPATCH_TIMER_PROGRAM(ds, _dispatch_trace_timer_function(dr), - _dispatch_trace_timer_params(ds->ds_ident_hack, - &ds_timer(dr), deadline, ¶ms)); + _dispatch_trace_timer_params(clock, &dr->dt_timer, + deadline, ¶ms)); } } } DISPATCH_ALWAYS_INLINE static inline void -_dispatch_trace_timer_wake(dispatch_source_refs_t dr) +_dispatch_trace_timer_wake(dispatch_timer_source_refs_t dr) { if (slowpath(DISPATCH_TIMER_WAKE_ENABLED())) { if (dr) { @@ -270,8 +262,8 @@ _dispatch_trace_timer_wake(dispatch_source_refs_t dr) DISPATCH_ALWAYS_INLINE static inline void -_dispatch_trace_timer_fire(dispatch_source_refs_t dr, unsigned long data, - unsigned long missed) +_dispatch_trace_timer_fire(dispatch_timer_source_refs_t dr, uint64_t data, + uint64_t missed) { if (slowpath(DISPATCH_TIMER_FIRE_ENABLED())) { if (!(data - missed) && dr) { @@ -284,8 +276,8 @@ _dispatch_trace_timer_fire(dispatch_source_refs_t dr, unsigned long data, #else #define _dispatch_trace_timer_configure_enabled() false -#define _dispatch_trace_timer_configure(ds, ident, values) \ - do { (void)(ds); (void)(ident); (void)(values); } while(0) +#define _dispatch_trace_timer_configure(ds, clock, values) \ + do { (void)(ds); (void)(clock); (void)(values); } while(0) #define _dispatch_trace_timer_program(dr, deadline) \ do { (void)(dr); (void)(deadline); } while(0) #define _dispatch_trace_timer_wake(dr) \ diff --git a/src/voucher.c b/src/voucher.c index 94a293427..458e2f0a4 100644 --- a/src/voucher.c +++ b/src/voucher.c @@ -85,6 +85,7 @@ voucher_create(voucher_recipe_t recipe) if (extra) { memcpy(_voucher_extra_recipes(voucher), recipe->vr_data, extra); } + _voucher_trace(CREATE, voucher, MACH_PORT_NULL, 0); return voucher; } #endif @@ -165,24 +166,69 @@ _voucher_thread_cleanup(void *voucher) _voucher_release(voucher); } +#pragma mark - +#pragma mark voucher_hash + DISPATCH_CACHELINE_ALIGN -static TAILQ_HEAD(, voucher_s) _vouchers[VL_HASH_SIZE]; -#define _vouchers_head(kv) (&_vouchers[VL_HASH((kv))]) -static dispatch_unfair_lock_s _vouchers_lock; -#define _vouchers_lock_lock() _dispatch_unfair_lock_lock(&_vouchers_lock) -#define _vouchers_lock_unlock() _dispatch_unfair_lock_unlock(&_vouchers_lock) +static voucher_hash_head_s _voucher_hash[VL_HASH_SIZE]; + +#define _voucher_hash_head(kv) (&_voucher_hash[VL_HASH((kv))]) +static dispatch_unfair_lock_s _voucher_hash_lock; +#define _voucher_hash_lock_lock() \ + _dispatch_unfair_lock_lock(&_voucher_hash_lock) +#define _voucher_hash_lock_unlock() \ + _dispatch_unfair_lock_unlock(&_voucher_hash_lock) + +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_hash_head_init(voucher_hash_head_s *head) +{ + _voucher_hash_set_next(&head->vhh_first, VOUCHER_NULL); + _voucher_hash_set_prev_ptr(&head->vhh_last_ptr, &head->vhh_first); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_hash_enqueue(mach_voucher_t kv, voucher_t v) +{ + // same as TAILQ_INSERT_TAIL + voucher_hash_head_s *head = _voucher_hash_head(kv); + uintptr_t prev_ptr = head->vhh_last_ptr; + _voucher_hash_set_next(&v->v_list.vhe_next, VOUCHER_NULL); + v->v_list.vhe_prev_ptr = prev_ptr; + _voucher_hash_store_to_prev_ptr(prev_ptr, v); + _voucher_hash_set_prev_ptr(&head->vhh_last_ptr, &v->v_list.vhe_next); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_hash_remove(mach_voucher_t kv, voucher_t v) +{ + // same as TAILQ_REMOVE + voucher_hash_head_s *head = _voucher_hash_head(kv); + voucher_t next = _voucher_hash_get_next(v->v_list.vhe_next); + uintptr_t prev_ptr = v->v_list.vhe_prev_ptr; + if (next) { + next->v_list.vhe_prev_ptr = prev_ptr; + } else { + head->vhh_last_ptr = prev_ptr; + } + _voucher_hash_store_to_prev_ptr(prev_ptr, next); + _voucher_hash_mark_not_enqueued(v); +} static voucher_t _voucher_find_and_retain(mach_voucher_t kv) { - voucher_t v; if (!kv) return NULL; - _vouchers_lock_lock(); - TAILQ_FOREACH(v, _vouchers_head(kv), v_list) { + _voucher_hash_lock_lock(); + voucher_hash_head_s *head = _voucher_hash_head(kv); + voucher_t v = _voucher_hash_get_next(head->vhh_first); + while (v) { if (v->v_ipc_kvoucher == kv) { int xref_cnt = os_atomic_inc2o(v, os_obj_xref_cnt, relaxed); _dispatch_voucher_debug("retain -> %d", v, xref_cnt + 1); - if (slowpath(xref_cnt < 0)) { + if (unlikely(xref_cnt < 0)) { _dispatch_voucher_debug("over-release", v); _OS_OBJECT_CLIENT_CRASH("Voucher over-release"); } @@ -192,8 +238,9 @@ _voucher_find_and_retain(mach_voucher_t kv) } break; } + v = _voucher_hash_get_next(v->v_list.vhe_next); } - _vouchers_lock_unlock(); + _voucher_hash_lock_unlock(); return v; } @@ -202,35 +249,35 @@ _voucher_insert(voucher_t v) { mach_voucher_t kv = v->v_ipc_kvoucher; if (!kv) return; - _vouchers_lock_lock(); - if (slowpath(_TAILQ_IS_ENQUEUED(v, v_list))) { + _voucher_hash_lock_lock(); + if (unlikely(_voucher_hash_is_enqueued(v))) { _dispatch_voucher_debug("corruption", v); - DISPATCH_CLIENT_CRASH(v->v_list.tqe_prev, "Voucher corruption"); + DISPATCH_CLIENT_CRASH(0, "Voucher corruption"); } - TAILQ_INSERT_TAIL(_vouchers_head(kv), v, v_list); - _vouchers_lock_unlock(); + _voucher_hash_enqueue(kv, v); + _voucher_hash_lock_unlock(); } static void _voucher_remove(voucher_t v) { mach_voucher_t kv = v->v_ipc_kvoucher; - if (!_TAILQ_IS_ENQUEUED(v, v_list)) return; - _vouchers_lock_lock(); - if (slowpath(!kv)) { + if (!_voucher_hash_is_enqueued(v)) return; + _voucher_hash_lock_lock(); + if (unlikely(!kv)) { _dispatch_voucher_debug("corruption", v); DISPATCH_CLIENT_CRASH(0, "Voucher corruption"); } // check for resurrection race with _voucher_find_and_retain - if (os_atomic_load2o(v, os_obj_xref_cnt, ordered) < 0 && - _TAILQ_IS_ENQUEUED(v, v_list)) { - TAILQ_REMOVE(_vouchers_head(kv), v, v_list); - _TAILQ_MARK_NOT_ENQUEUED(v, v_list); - v->v_list.tqe_next = (void*)~0ull; + if (os_atomic_load2o(v, os_obj_xref_cnt, ordered) < 0) { + if (_voucher_hash_is_enqueued(v)) _voucher_hash_remove(kv, v); } - _vouchers_lock_unlock(); + _voucher_hash_lock_unlock(); } +#pragma mark - +#pragma mark mach_voucher_t + void _voucher_dealloc_mach_voucher(mach_voucher_t kv) { @@ -313,18 +360,11 @@ voucher_replace_default_voucher(void) #define _voucher_mach_recipe_size(payload_size) \ (sizeof(mach_voucher_attr_recipe_data_t) + (payload_size)) -#if VOUCHER_USE_MACH_VOUCHER_PRIORITY #define _voucher_mach_recipe_alloca(v) ((mach_voucher_attr_recipe_t)alloca(\ _voucher_mach_recipe_size(0) + \ _voucher_mach_recipe_size(sizeof(ipc_pthread_priority_value_t)) + \ _voucher_mach_recipe_size(sizeof(_voucher_mach_udata_s)) + \ _voucher_extra_size(v))) -#else -#define _voucher_mach_recipe_alloca(v) ((mach_voucher_attr_recipe_t)alloca(\ - _voucher_mach_recipe_size(0) + \ - _voucher_mach_recipe_size(sizeof(_voucher_mach_udata_s)) + \ - _voucher_extra_size(v))) -#endif DISPATCH_ALWAYS_INLINE static inline mach_voucher_attr_recipe_size_t @@ -345,7 +385,6 @@ _voucher_mach_recipe_init(mach_voucher_attr_recipe_t mvar_buf, voucher_s *v, }; size += _voucher_mach_recipe_size(0); -#if VOUCHER_USE_MACH_VOUCHER_PRIORITY if (pp) { ipc_pthread_priority_value_t value = (ipc_pthread_priority_value_t)pp; *mvar_buf++ = (mach_voucher_attr_recipe_data_t){ @@ -356,7 +395,6 @@ _voucher_mach_recipe_init(mach_voucher_attr_recipe_t mvar_buf, voucher_s *v, mvar_buf = _dispatch_memappend(mvar_buf, &value); size += _voucher_mach_recipe_size(sizeof(value)); } -#endif // VOUCHER_USE_MACH_VOUCHER_PRIORITY if ((v && v->v_activity) || pp) { _voucher_mach_udata_s *udata_buf; @@ -419,7 +457,7 @@ _voucher_get_mach_voucher(voucher_t voucher) size = _voucher_mach_recipe_init(mvar, voucher, kvb, voucher->v_priority); kr = _voucher_create_mach_voucher(mvar, size, &kv); - if (dispatch_assume_zero(kr) || !kv){ + if (dispatch_assume_zero(kr) || !kv) { return MACH_VOUCHER_NULL; } if (!os_atomic_cmpxchgv2o(voucher, v_ipc_kvoucher, MACH_VOUCHER_NULL, @@ -453,7 +491,7 @@ _voucher_create_mach_voucher_with_priority(voucher_t voucher, size = _voucher_mach_recipe_init(mvar, voucher, kvb, priority); kr = _voucher_create_mach_voucher(mvar, size, &kv); - if (dispatch_assume_zero(kr) || !kv){ + if (dispatch_assume_zero(kr) || !kv) { return MACH_VOUCHER_NULL; } _dispatch_kvoucher_debug("create with priority from voucher[%p]", kv, @@ -471,29 +509,6 @@ _voucher_create_with_mach_voucher(mach_voucher_t kv, mach_msg_bits_t msgh_bits) mach_voucher_attr_recipe_size_t kvr_size = 0; mach_voucher_attr_content_size_t udata_sz = 0; _voucher_mach_udata_s *udata = NULL; -#if !VOUCHER_USE_BANK_AUTOREDEEM - mach_voucher_t rkv; - const mach_voucher_attr_recipe_data_t redeem_recipe[] = { - [0] = { - .key = MACH_VOUCHER_ATTR_KEY_ALL, - .command = MACH_VOUCHER_ATTR_COPY, - .previous_voucher = kv, - }, - [1] = { - .key = MACH_VOUCHER_ATTR_KEY_BANK, - .command = MACH_VOUCHER_ATTR_REDEEM, - }, - }; - kr = _voucher_create_mach_voucher(redeem_recipe, sizeof(redeem_recipe), - &rkv); - if (!dispatch_assume_zero(kr)) { - _voucher_dealloc_mach_voucher(kv); - _dispatch_kvoucher_debug("redeemed from 0x%08x", rkv, kv); - kv = rkv; - } else { - _dispatch_voucher_debug_machport(kv); - } -#endif voucher_t v = _voucher_find_and_retain(kv); if (v) { _dispatch_voucher_debug("kvoucher[0x%08x] found", v, kv); @@ -548,15 +563,12 @@ _voucher_create_with_mach_voucher(mach_voucher_t kv, mach_msg_bits_t msgh_bits) .key = MACH_VOUCHER_ATTR_KEY_USER_DATA, .command = MACH_VOUCHER_ATTR_REMOVE, }, -#if VOUCHER_USE_MACH_VOUCHER_PRIORITY [2] = { .key = MACH_VOUCHER_ATTR_KEY_PTHPRIORITY, .command = MACH_VOUCHER_ATTR_REMOVE, }, -#endif }; mach_voucher_attr_recipe_size_t size = sizeof(remove_userdata_recipe); - kr = _voucher_create_mach_voucher(remove_userdata_recipe, size, &nkv); if (!dispatch_assume_zero(kr)) { _dispatch_voucher_debug("kvoucher[0x%08x] udata removal " @@ -574,6 +586,7 @@ _voucher_create_with_mach_voucher(mach_voucher_t kv, mach_msg_bits_t msgh_bits) } } + _voucher_trace(CREATE, v, v->v_kvoucher, v->v_activity); _voucher_insert(v); _dispatch_voucher_debug("kvoucher[0x%08x] create", v, kv); return v; @@ -608,6 +621,7 @@ _voucher_create_with_priority_and_mach_voucher(voucher_t ov, "voucher[%p]", v, kv, ov); _dispatch_voucher_debug_machport(kv); } + _voucher_trace(CREATE, v, v->v_kvoucher, v->v_activity); return v; } @@ -635,7 +649,7 @@ _voucher_create_without_importance(voucher_t ov) }; kr = _voucher_create_mach_voucher(importance_remove_recipe, sizeof(importance_remove_recipe), &kv); - if (dispatch_assume_zero(kr) || !kv){ + if (dispatch_assume_zero(kr) || !kv) { if (ov->v_ipc_kvoucher) return NULL; kv = MACH_VOUCHER_NULL; } @@ -665,6 +679,7 @@ _voucher_create_without_importance(voucher_t ov) _dispatch_voucher_debug("kvoucher[0x%08x] create without importance " "from voucher[%p]", v, kv, ov); } + _voucher_trace(CREATE, v, v->v_kvoucher, v->v_activity); return v; } @@ -684,7 +699,7 @@ _voucher_create_accounting_voucher(voucher_t ov) }; kr = _voucher_create_mach_voucher(&accounting_copy_recipe, sizeof(accounting_copy_recipe), &kv); - if (dispatch_assume_zero(kr) || !kv){ + if (dispatch_assume_zero(kr) || !kv) { return NULL; } voucher_t v = _voucher_find_and_retain(kv); @@ -700,6 +715,7 @@ _voucher_create_accounting_voucher(voucher_t ov) v->v_kvbase = _voucher_retain(ov); _voucher_dealloc_mach_voucher(kv); // borrow base reference } + _voucher_trace(CREATE, v, kv, v->v_activity); _voucher_insert(v); _dispatch_voucher_debug("kvoucher[0x%08x] create accounting voucher " "from voucher[%p]", v, kv, ov); @@ -757,18 +773,19 @@ _voucher_xref_dispose(voucher_t voucher) { _dispatch_voucher_debug("xref_dispose", voucher); _voucher_remove(voucher); - return _os_object_release_internal_inline((_os_object_t)voucher); + return _os_object_release_internal_n_inline((_os_object_t)voucher, 1); } void _voucher_dispose(voucher_t voucher) { + _voucher_trace(DISPOSE, voucher); _dispatch_voucher_debug("dispose", voucher); - if (slowpath(_TAILQ_IS_ENQUEUED(voucher, v_list))) { + if (slowpath(_voucher_hash_is_enqueued(voucher))) { _dispatch_voucher_debug("corruption", voucher); - DISPATCH_CLIENT_CRASH(voucher->v_list.tqe_prev, "Voucher corruption"); + DISPATCH_CLIENT_CRASH(0, "Voucher corruption"); } - voucher->v_list.tqe_next = DISPATCH_OBJECT_LISTLESS; + _voucher_hash_mark_not_enqueued(voucher); if (voucher->v_ipc_kvoucher) { if (voucher->v_ipc_kvoucher != voucher->v_kvoucher) { _voucher_dealloc_mach_voucher(voucher->v_ipc_kvoucher); @@ -806,10 +823,9 @@ _voucher_activity_debug_channel_init(void) { dispatch_mach_handler_function_t handler = NULL; - if (_voucher_libtrace_hooks && _voucher_libtrace_hooks->vah_version >= 2) { + if (_voucher_libtrace_hooks) { handler = _voucher_libtrace_hooks->vah_debug_channel_handler; } - if (!handler) return; dispatch_mach_t dm; @@ -824,6 +840,7 @@ _voucher_activity_debug_channel_init(void) if (dbgp) { dm = dispatch_mach_create_f("com.apple.debug-channel", DISPATCH_TARGET_QUEUE_DEFAULT, NULL, handler); + dm->dm_recv_refs->du_can_be_wlh = false; // 29906118 dispatch_mach_connect(dm, dbgp, MACH_PORT_NULL, NULL); // will force the DISPATCH_MACH_CONNECTED event dispatch_mach_send_barrier_f(dm, NULL, @@ -989,6 +1006,9 @@ _voucher_libkernel_init(void) void voucher_activity_initialize_4libtrace(voucher_activity_hooks_t hooks) { + if (hooks->vah_version < 3) { + DISPATCH_CLIENT_CRASH(hooks->vah_version, "unsupported vah_version"); + } if (!os_atomic_cmpxchg(&_voucher_libtrace_hooks, NULL, hooks, relaxed)) { DISPATCH_CLIENT_CRASH(_voucher_libtrace_hooks, @@ -1002,7 +1022,7 @@ _voucher_init(void) _voucher_libkernel_init(); unsigned int i; for (i = 0; i < VL_HASH_SIZE; i++) { - TAILQ_INIT(&_vouchers[i]); + _voucher_hash_head_init(&_voucher_hash[i]); } } @@ -1051,6 +1071,12 @@ _voucher_activity_id_allocate(firehose_activity_flags_t flags) return FIREHOSE_ACTIVITY_ID_MAKE(aid, flags); } +firehose_activity_id_t +voucher_activity_id_allocate(firehose_activity_flags_t flags) +{ + return _voucher_activity_id_allocate(flags); +} + #define _voucher_activity_tracepoint_reserve(stamp, stream, pub, priv, privbuf) \ firehose_buffer_tracepoint_reserve(_firehose_task_buffer, stamp, \ stream, pub, priv, privbuf) @@ -1071,7 +1097,13 @@ _firehose_task_buffer_init(void *ctx OS_UNUSED) info_size = proc_pidinfo(getpid(), PROC_PIDUNIQIDENTIFIERINFO, 1, &p_uniqinfo, PROC_PIDUNIQIDENTIFIERINFO_SIZE); if (slowpath(info_size != PROC_PIDUNIQIDENTIFIERINFO_SIZE)) { - DISPATCH_INTERNAL_CRASH(info_size, "Unable to get the unique pid"); + if (info_size == 0) { + DISPATCH_INTERNAL_CRASH(errno, + "Unable to get the unique pid (error)"); + } else { + DISPATCH_INTERNAL_CRASH(info_size, + "Unable to get the unique pid (size)"); + } } _voucher_unique_pid = p_uniqinfo.p_uniqueid; @@ -1094,6 +1126,13 @@ _firehose_task_buffer_init(void *ctx OS_UNUSED) // firehose_buffer_create always consumes the send-right _firehose_task_buffer = firehose_buffer_create(logd_port, _voucher_unique_pid, flags); + if (_voucher_libtrace_hooks->vah_version >= 4 && + _voucher_libtrace_hooks->vah_metadata_init) { + firehose_buffer_t fb = _firehose_task_buffer; + size_t meta_sz = FIREHOSE_BUFFER_LIBTRACE_HEADER_SIZE; + void *meta = (void *)((uintptr_t)(&fb->fb_header + 1) - meta_sz); + _voucher_libtrace_hooks->vah_metadata_init(meta, meta_sz); + } } } @@ -1126,30 +1165,23 @@ voucher_activity_get_metadata_buffer(size_t *length) } voucher_t -voucher_activity_create(firehose_tracepoint_id_t trace_id, - voucher_t base, firehose_activity_flags_t flags, uint64_t location) -{ - return voucher_activity_create_with_location(&trace_id, base, flags, location); -} - -voucher_t -voucher_activity_create_with_location(firehose_tracepoint_id_t *trace_id, - voucher_t base, firehose_activity_flags_t flags, uint64_t location) +voucher_activity_create_with_data(firehose_tracepoint_id_t *trace_id, + voucher_t base, firehose_activity_flags_t flags, + const void *pubdata, size_t publen) { firehose_activity_id_t va_id = 0, current_id = 0, parent_id = 0; firehose_tracepoint_id_u ftid = { .ftid_value = *trace_id }; - uint16_t pubsize = sizeof(va_id) + sizeof(location); uint64_t creator_id = 0; + uint16_t pubsize; voucher_t ov = _voucher_get(); voucher_t v; + if (os_add_overflow(sizeof(va_id), publen, &pubsize) || pubsize > 128) { + DISPATCH_CLIENT_CRASH(pubsize, "Absurd publen"); + } if (base == VOUCHER_CURRENT) { base = ov; } - if (_voucher_activity_disabled()) { - *trace_id = 0; - return base ? _voucher_retain(base) : VOUCHER_NULL; - } FIREHOSE_TRACE_ID_CLEAR_FLAG(ftid, base, has_unique_pid); if (ov && (current_id = ov->v_activity)) { @@ -1179,6 +1211,10 @@ voucher_activity_create_with_location(firehose_tracepoint_id_t *trace_id, v->v_activity_creator = _voucher_unique_pid; v->v_parent_activity = parent_id; + if (_voucher_activity_disabled()) { + goto done; + } + static const firehose_stream_t streams[2] = { firehose_stream_metadata, firehose_stream_persist, @@ -1202,13 +1238,24 @@ voucher_activity_create_with_location(firehose_tracepoint_id_t *trace_id, pubptr = _dispatch_memappend(pubptr, &parent_id); } pubptr = _dispatch_memappend(pubptr, &va_id); - pubptr = _dispatch_memappend(pubptr, &location); + pubptr = _dispatch_mempcpy(pubptr, pubdata, publen); _voucher_activity_tracepoint_flush(ft, ftid); } +done: *trace_id = ftid.ftid_value; + _voucher_trace(CREATE, v, v->v_kvoucher, va_id); return v; } +voucher_t +voucher_activity_create_with_location(firehose_tracepoint_id_t *trace_id, + voucher_t base, firehose_activity_flags_t flags, uint64_t loc) +{ + return voucher_activity_create_with_data(trace_id, base, flags, + &loc, sizeof(loc)); +} + +#if OS_VOUCHER_ACTIVITY_GENERATE_SWAPS void _voucher_activity_swap(firehose_activity_id_t old_id, firehose_activity_id_t new_id) @@ -1245,6 +1292,7 @@ _voucher_activity_swap(firehose_activity_id_t old_id, if (new_id) pubptr = _dispatch_memappend(pubptr, &new_id); _voucher_activity_tracepoint_flush(ft, ftid); } +#endif firehose_activity_id_t voucher_get_activity_id_and_creator(voucher_t v, uint64_t *creator_pid, @@ -1276,22 +1324,22 @@ voucher_activity_flush(firehose_stream_t stream) firehose_buffer_stream_flush(_firehose_task_buffer, stream); } -DISPATCH_ALWAYS_INLINE -static inline firehose_tracepoint_id_t -_voucher_activity_trace(firehose_stream_t stream, - firehose_tracepoint_id_u ftid, uint64_t stamp, - const void *pubdata, size_t publen, - const void *privdata, size_t privlen) +DISPATCH_NOINLINE +firehose_tracepoint_id_t +voucher_activity_trace_v(firehose_stream_t stream, + firehose_tracepoint_id_t trace_id, uint64_t stamp, + const struct iovec *iov, size_t publen, size_t privlen) { + firehose_tracepoint_id_u ftid = { .ftid_value = trace_id }; const uint16_t ft_size = offsetof(struct firehose_tracepoint_s, ft_data); const size_t _firehose_chunk_payload_size = - sizeof(((struct firehose_buffer_chunk_s *)0)->fbc_data); + sizeof(((struct firehose_chunk_s *)0)->fc_data); if (_voucher_activity_disabled()) return 0; firehose_tracepoint_t ft; firehose_activity_id_t va_id = 0; - firehose_buffer_chunk_t fbc; + firehose_chunk_t fc; uint8_t *privptr, *pubptr; size_t pubsize = publen; voucher_t ov = _voucher_get(); @@ -1331,38 +1379,52 @@ _voucher_activity_trace(firehose_stream_t stream, pubptr = _dispatch_memappend(pubptr, &creator_pid); } if (privlen) { - fbc = firehose_buffer_chunk_for_address(ft); + fc = firehose_buffer_chunk_for_address(ft); struct firehose_buffer_range_s range = { - .fbr_offset = (uint16_t)(privptr - fbc->fbc_start), + .fbr_offset = (uint16_t)(privptr - fc->fc_start), .fbr_length = (uint16_t)privlen, }; pubptr = _dispatch_memappend(pubptr, &range); - _dispatch_mempcpy(privptr, privdata, privlen); } - _dispatch_mempcpy(pubptr, pubdata, publen); + while (publen > 0) { + pubptr = _dispatch_mempcpy(pubptr, iov->iov_base, iov->iov_len); + if (unlikely(os_sub_overflow(publen, iov->iov_len, &publen))) { + DISPATCH_CLIENT_CRASH(0, "Invalid arguments"); + } + iov++; + } + while (privlen > 0) { + privptr = _dispatch_mempcpy(privptr, iov->iov_base, iov->iov_len); + if (unlikely(os_sub_overflow(privlen, iov->iov_len, &privlen))) { + DISPATCH_CLIENT_CRASH(0, "Invalid arguments"); + } + iov++; + } _voucher_activity_tracepoint_flush(ft, ftid); return ftid.ftid_value; } firehose_tracepoint_id_t voucher_activity_trace(firehose_stream_t stream, - firehose_tracepoint_id_t trace_id, uint64_t timestamp, + firehose_tracepoint_id_t trace_id, uint64_t stamp, const void *pubdata, size_t publen) { - firehose_tracepoint_id_u ftid = { .ftid_value = trace_id }; - return _voucher_activity_trace(stream, ftid, timestamp, pubdata, publen, - NULL, 0); + struct iovec iov = { (void *)pubdata, publen }; + return voucher_activity_trace_v(stream, trace_id, stamp, &iov, publen, 0); } firehose_tracepoint_id_t voucher_activity_trace_with_private_strings(firehose_stream_t stream, - firehose_tracepoint_id_t trace_id, uint64_t timestamp, + firehose_tracepoint_id_t trace_id, uint64_t stamp, const void *pubdata, size_t publen, const void *privdata, size_t privlen) { - firehose_tracepoint_id_u ftid = { .ftid_value = trace_id }; - return _voucher_activity_trace(stream, ftid, timestamp, - pubdata, publen, privdata, privlen); + struct iovec iov[2] = { + { (void *)pubdata, publen }, + { (void *)privdata, privlen }, + }; + return voucher_activity_trace_v(stream, trace_id, stamp, + iov, publen, privlen); } #pragma mark - @@ -1374,7 +1436,7 @@ _voucher_debug(voucher_t v, char* buf, size_t bufsiz) size_t offset = 0; #define bufprintf(...) \ offset += dsnprintf(&buf[offset], bufsiz - offset, ##__VA_ARGS__) - bufprintf("voucher[%p] = { xrefcnt = 0x%x, refcnt = 0x%x", v, + bufprintf("voucher[%p] = { xref = %d, ref = %d", v, v->os_obj_xref_cnt + 1, v->os_obj_ref_cnt + 1); if (v->v_kvbase) { @@ -1410,7 +1472,7 @@ voucher_create(voucher_recipe_t recipe) (void)recipe; return NULL; } -#endif +#endif // VOUCHER_ENABLE_RECIPE_OBJECTS voucher_t voucher_adopt(voucher_t voucher) @@ -1495,12 +1557,14 @@ _voucher_create_accounting_voucher(voucher_t voucher) return NULL; } +#if HAVE_MACH voucher_t voucher_create_with_mach_msg(mach_msg_header_t *msg) { (void)msg; return NULL; } +#endif #if VOUCHER_ENABLE_GET_MACH_VOUCHER mach_voucher_t @@ -1509,7 +1573,7 @@ voucher_get_mach_voucher(voucher_t voucher) (void)voucher; return 0; } -#endif +#endif // VOUCHER_ENABLE_GET_MACH_VOUCHER void _voucher_xref_dispose(voucher_t voucher) @@ -1543,7 +1607,7 @@ voucher_get_current_persona_proximate_info(struct proc_persona_info *persona_inf (void)persona_info; return -1; } -#endif +#endif // VOUCHER_EXPORT_PERSONA_SPI void _voucher_activity_debug_channel_init(void) @@ -1560,6 +1624,7 @@ _voucher_init(void) { } +#if OS_VOUCHER_ACTIVITY_SPI void* voucher_activity_get_metadata_buffer(size_t *length) { @@ -1620,6 +1685,16 @@ voucher_activity_trace_with_private_strings(firehose_stream_t stream, return 0; } +firehose_tracepoint_id_t +voucher_activity_trace_v(firehose_stream_t stream, + firehose_tracepoint_id_t trace_id, uint64_t timestamp, + const struct iovec *iov, size_t publen, size_t privlen) +{ + (void)stream; (void)trace_id; (void)timestamp; + (void)iov; (void)publen; (void)privlen; + return 0; +} + void voucher_activity_flush(firehose_stream_t stream) { @@ -1631,6 +1706,7 @@ voucher_activity_initialize_4libtrace(voucher_activity_hooks_t hooks) { (void)hooks; } +#endif // OS_VOUCHER_ACTIVITY_SPI size_t _voucher_debug(voucher_t v, char* buf, size_t bufsiz) diff --git a/src/voucher_internal.h b/src/voucher_internal.h index 3aa1a6579..772c8c434 100644 --- a/src/voucher_internal.h +++ b/src/voucher_internal.h @@ -54,7 +54,7 @@ OS_OBJECT_DECL_CLASS(voucher_recipe); * @result * The newly created voucher object. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) OS_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW voucher_t voucher_create(voucher_recipe_t recipe); @@ -78,7 +78,7 @@ voucher_create(voucher_recipe_t recipe); * @result * A mach voucher port. */ -__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0) +API_AVAILABLE(macos(10.10), ios(8.0)) OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW mach_voucher_t voucher_get_mach_voucher(voucher_t voucher); @@ -90,8 +90,10 @@ voucher_get_mach_voucher(voucher_t voucher); void _voucher_init(void); void _voucher_atfork_child(void); void _voucher_activity_debug_channel_init(void); +#if OS_VOUCHER_ACTIVITY_SPI && OS_VOUCHER_ACTIVITY_GENERATE_SWAPS void _voucher_activity_swap(firehose_activity_id_t old_id, firehose_activity_id_t new_id); +#endif void _voucher_xref_dispose(voucher_t voucher); void _voucher_dispose(voucher_t voucher); size_t _voucher_debug(voucher_t v, char* buf, size_t bufsiz); @@ -121,9 +123,7 @@ void voucher_release(voucher_t voucher); #define DISPATCH_VOUCHER_ACTIVITY_DEBUG 1 #endif -#if VOUCHER_USE_MACH_VOUCHER_PRIORITY #include -#endif typedef uint32_t _voucher_magic_t; typedef uint32_t _voucher_priority_t; @@ -158,7 +158,10 @@ typedef struct voucher_s { struct voucher_vtable_s *os_obj_isa, os_obj_ref_cnt, os_obj_xref_cnt); - TAILQ_ENTRY(voucher_s) v_list; + struct voucher_hash_entry_s { + uintptr_t vhe_next; + uintptr_t vhe_prev_ptr; + } v_list; mach_voucher_t v_kvoucher, v_ipc_kvoucher; // if equal, only one reference voucher_t v_kvbase; // if non-NULL, v_kvoucher is a borrowed reference firehose_activity_id_t v_activity; @@ -172,6 +175,54 @@ typedef struct voucher_s { #endif } voucher_s; +typedef struct voucher_hash_head_s { + uintptr_t vhh_first; + uintptr_t vhh_last_ptr; +} voucher_hash_head_s; + +DISPATCH_ALWAYS_INLINE +static inline bool +_voucher_hash_is_enqueued(const struct voucher_s *v) +{ + return v->v_list.vhe_prev_ptr != 0; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_hash_mark_not_enqueued(struct voucher_s *v) +{ + v->v_list.vhe_prev_ptr = 0; + v->v_list.vhe_next = (uintptr_t)DISPATCH_OBJECT_LISTLESS; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_hash_set_next(uintptr_t *next, struct voucher_s *v) +{ + *next = ~(uintptr_t)v; +} + +DISPATCH_ALWAYS_INLINE +static inline voucher_t +_voucher_hash_get_next(uintptr_t next) +{ + return (voucher_t)~next; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_hash_set_prev_ptr(uintptr_t *prev_ptr, uintptr_t *addr) +{ + *prev_ptr = ~(uintptr_t)addr; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_hash_store_to_prev_ptr(uintptr_t prev_ptr, struct voucher_s *v) +{ + *(uintptr_t *)~prev_ptr = ~(uintptr_t)v; +} + #if VOUCHER_ENABLE_RECIPE_OBJECTS #define _voucher_extra_size(v) ((v)->v_recipe_extra_size) #define _voucher_extra_recipes(v) ((char*)(v) + (v)->v_recipe_extra_offset) @@ -204,48 +255,54 @@ typedef struct voucher_recipe_s { _dispatch_debug("voucher[%p]: " msg, v, ##__VA_ARGS__) #define _dispatch_kvoucher_debug(msg, kv, ...) \ _dispatch_debug("kvoucher[0x%08x]: " msg, kv, ##__VA_ARGS__) -#if DISPATCH_MACHPORT_DEBUG -#define _dispatch_voucher_debug_machport(name) \ - dispatch_debug_machport((name), __func__) -#else -#define _dispatch_voucher_debug_machport(name) ((void)(name)) -#endif +#define _dispatch_voucher_debug_machport(name) _dispatch_debug_machport(name) #else #define _dispatch_voucher_debug(msg, v, ...) #define _dispatch_kvoucher_debug(msg, kv, ...) #define _dispatch_voucher_debug_machport(name) ((void)(name)) #endif -#if DISPATCH_PURE_C +#if DISPATCH_USE_DTRACE +#define _voucher_trace(how, ...) ({ \ + if (unlikely(VOUCHER_##how##_ENABLED())) { \ + VOUCHER_##how(__VA_ARGS__); \ + } \ + }) +#else +#define _voucher_trace(how, ...) ((void)0) +#endif + +#ifndef DISPATCH_VOUCHER_OBJC_DEBUG +#if DISPATCH_INTROSPECTION || DISPATCH_DEBUG +#define DISPATCH_VOUCHER_OBJC_DEBUG 1 +#else +#define DISPATCH_VOUCHER_OBJC_DEBUG 0 +#endif +#endif // DISPATCH_VOUCHER_OBJC_DEBUG DISPATCH_ALWAYS_INLINE -static inline voucher_t -_voucher_retain(voucher_t voucher) +static inline struct voucher_s * +_voucher_retain_inline(struct voucher_s *voucher) { -#if !DISPATCH_VOUCHER_OBJC_DEBUG // not using _os_object_refcnt* because we don't need barriers: // vouchers are immutable and are in a hash table with a lock int xref_cnt = os_atomic_inc2o(voucher, os_obj_xref_cnt, relaxed); + _voucher_trace(RETAIN, (voucher_t)voucher, xref_cnt + 1); _dispatch_voucher_debug("retain -> %d", voucher, xref_cnt + 1); if (unlikely(xref_cnt <= 0)) { _OS_OBJECT_CLIENT_CRASH("Voucher resurrection"); } -#else - os_retain(voucher); - _dispatch_voucher_debug("retain -> %d", voucher, - voucher->os_obj_xref_cnt + 1); -#endif // DISPATCH_DEBUG return voucher; } DISPATCH_ALWAYS_INLINE static inline void -_voucher_release(voucher_t voucher) +_voucher_release_inline(struct voucher_s *voucher) { -#if !DISPATCH_VOUCHER_OBJC_DEBUG // not using _os_object_refcnt* because we don't need barriers: // vouchers are immutable and are in a hash table with a lock int xref_cnt = os_atomic_dec2o(voucher, os_obj_xref_cnt, relaxed); + _voucher_trace(RELEASE, (voucher_t)voucher, xref_cnt + 1); _dispatch_voucher_debug("release -> %d", voucher, xref_cnt + 1); if (likely(xref_cnt >= 0)) { return; @@ -254,10 +311,31 @@ _voucher_release(voucher_t voucher) _OS_OBJECT_CLIENT_CRASH("Voucher over-release"); } return _os_object_xref_dispose((_os_object_t)voucher); +} + +#if DISPATCH_PURE_C + +DISPATCH_ALWAYS_INLINE +static inline voucher_t +_voucher_retain(voucher_t voucher) +{ +#if DISPATCH_VOUCHER_OBJC_DEBUG + os_retain(voucher); #else - _dispatch_voucher_debug("release -> %d", voucher, voucher->os_obj_xref_cnt); - return os_release(voucher); -#endif // DISPATCH_DEBUG + _voucher_retain_inline(voucher); +#endif // DISPATCH_VOUCHER_OBJC_DEBUG + return voucher; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_voucher_release(voucher_t voucher) +{ +#if DISPATCH_VOUCHER_OBJC_DEBUG + os_release(voucher); +#else + _voucher_release_inline(voucher); +#endif // DISPATCH_VOUCHER_OBJC_DEBUG } DISPATCH_ALWAYS_INLINE @@ -268,13 +346,13 @@ _voucher_release_no_dispose(voucher_t voucher) // not using _os_object_refcnt* because we don't need barriers: // vouchers are immutable and are in a hash table with a lock int xref_cnt = os_atomic_dec2o(voucher, os_obj_xref_cnt, relaxed); + _voucher_trace(RELEASE, voucher, xref_cnt + 1); _dispatch_voucher_debug("release -> %d", voucher, xref_cnt + 1); if (likely(xref_cnt >= 0)) { return; } _OS_OBJECT_CLIENT_CRASH("Voucher over-release"); #else - _dispatch_voucher_debug("release -> %d", voucher, voucher->os_obj_xref_cnt); return os_release(voucher); #endif // DISPATCH_DEBUG } @@ -317,13 +395,17 @@ static inline mach_voucher_t _voucher_swap_and_get_mach_voucher(voucher_t ov, voucher_t voucher) { if (ov == voucher) return VOUCHER_NO_MACH_VOUCHER; - _dispatch_voucher_debug("swap from voucher[%p]", voucher, ov); + if (ov) _voucher_trace(ORPHAN, ov); _dispatch_thread_setspecific(dispatch_voucher_key, voucher); + if (voucher) _voucher_trace(ADOPT, voucher); + _dispatch_voucher_debug("swap from voucher[%p]", voucher, ov); mach_voucher_t kv = voucher ? voucher->v_kvoucher : MACH_VOUCHER_NULL; mach_voucher_t okv = ov ? ov->v_kvoucher : MACH_VOUCHER_NULL; +#if OS_VOUCHER_ACTIVITY_GENERATE_SWAPS firehose_activity_id_t aid = voucher ? voucher->v_activity : 0; firehose_activity_id_t oaid = ov ? ov->v_activity : 0; if (aid != oaid) _voucher_activity_swap(aid, oaid); +#endif return (kv != okv) ? kv : VOUCHER_NO_MACH_VOUCHER; } @@ -498,21 +580,13 @@ _dispatch_continuation_voucher_set(dispatch_continuation_t dc, { voucher_t v = NULL; + (void)dqu; // _dispatch_continuation_voucher_set is never called for blocks with // private data or with the DISPATCH_BLOCK_HAS_VOUCHER flag set. // only _dispatch_continuation_init_slow handles this bit. dispatch_assert(!(flags & DISPATCH_BLOCK_HAS_VOUCHER)); - if (dqu._oq->oq_override_voucher != DISPATCH_NO_VOUCHER) { - // if the queue has an override voucher, we should not capture anything - // - // if the continuation is enqueued before the queue is activated, then - // this optimization fails and we do capture whatever is current - // - // _dispatch_continuation_voucher_adopt() would do the right thing - // but using DISPATCH_NO_VOUCHER here is more efficient. - v = DISPATCH_NO_VOUCHER; - } else if (!(flags & DISPATCH_BLOCK_NO_VOUCHER)) { + if (!(flags & DISPATCH_BLOCK_NO_VOUCHER)) { v = _voucher_copy(); } dc->dc_voucher = v; @@ -528,7 +602,7 @@ _dispatch_continuation_voucher_adopt(dispatch_continuation_t dc, voucher_t ov, uintptr_t dc_flags) { voucher_t v = dc->dc_voucher; - _dispatch_thread_set_self_t consume = (dc_flags & DISPATCH_OBJ_CONSUME_BIT); + dispatch_thread_set_self_t consume = (dc_flags & DISPATCH_OBJ_CONSUME_BIT); dispatch_assert(DISPATCH_OBJ_CONSUME_BIT == DISPATCH_VOUCHER_CONSUME); if (consume) { @@ -540,7 +614,7 @@ _dispatch_continuation_voucher_adopt(dispatch_continuation_t dc, if (likely(!(dc_flags & DISPATCH_OBJ_ENFORCE_VOUCHER))) { if (unlikely(ov != DISPATCH_NO_VOUCHER && v != ov)) { - if (consume) _voucher_release(v); + if (consume && v) _voucher_release(v); consume = 0; v = ov; } diff --git a/tools/voucher_trace.d b/tools/voucher_trace.d new file mode 100755 index 000000000..890198e66 --- /dev/null +++ b/tools/voucher_trace.d @@ -0,0 +1,78 @@ +#!/usr/sbin/dtrace -s + +/* + * Copyright (c) 2017 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * Usage: voucher_trace.d -p [pid] + * traced process must have been executed with + * DYLD_LIBRARY_PATH=/usr/lib/system/introspection or with + * DYLD_IMAGE_SUFFIX=_profile or DYLD_IMAGE_SUFFIX=_debug + */ + +#pragma D option quiet +#pragma D option zdefs +#pragma D option bufsize=16m + +BEGIN { + printf("Starting to trace voucher operations...\n"); +} + +voucher$target:libdispatch*.dylib::create +{ + printf("ALLOC voucher 0x%p, thread %#llx, ref 1, port %#x, aid %#llx", arg0, tid, arg1, arg2); + ustack(10); + printf("\n") +} + +voucher$target:libdispatch*.dylib::dispose +{ + printf("FREE voucher 0x%p, thread %#llx, ref 0", arg0, tid); + ustack(10); + printf("\n") +} + +voucher$target:libdispatch*.dylib::retain +{ + printf("RETAIN voucher 0x%p, thread %#llx, ref %d", arg0, tid, arg1); + ustack(10); + printf("\n") +} + +voucher$target:libdispatch*.dylib::release +{ + printf("RELEASE voucher 0x%p, thread %#llx, ref %d", arg0, tid, arg1); + ustack(10); + printf("\n") +} + +voucher$target:libdispatch*.dylib::adopt +{ + printf("ADOPT voucher 0x%p, thread %#llx", arg0, tid); + ustack(10); + printf("\n") +} + +voucher$target:libdispatch*.dylib::orphan +{ + printf("ORPHAN voucher 0x%p, thread %#llx", arg0, tid); + ustack(10); + printf("\n") +} diff --git a/xcodeconfig/libdispatch-dyld-stub.xcconfig b/xcodeconfig/libdispatch-dyld-stub.xcconfig index aabda625b..dd1814db9 100644 --- a/xcodeconfig/libdispatch-dyld-stub.xcconfig +++ b/xcodeconfig/libdispatch-dyld-stub.xcconfig @@ -18,11 +18,11 @@ // @APPLE_APACHE_LICENSE_HEADER_END@ // -OTHER_LDFLAGS = -BUILD_VARIANTS = normal -GCC_PREPROCESSOR_DEFINITIONS = $(inherited) DISPATCH_VARIANT_STATIC=1 DISPATCH_VARIANT_DYLD_STUB=1 USE_OBJC=0 DISPATCH_USE_DTRACE=0 PRODUCT_NAME = libdispatch_dyld_stub INSTALL_PATH = /usr/local/lib/dyld_stub -EXCLUDED_SOURCE_FILE_NAMES = * -INCLUDED_SOURCE_FILE_NAMES = voucher.c // it's minimal with DISPATCH_VARIANT_DYLD_STUB +BUILD_VARIANTS = normal +GCC_PREPROCESSOR_DEFINITIONS = $(inherited) DISPATCH_VARIANT_DYLD_STUB=1 $(STATICLIB_PREPROCESSOR_DEFINITIONS) +OTHER_LDFLAGS = VERSIONING_SYSTEM = +EXCLUDED_SOURCE_FILE_NAMES = * +INCLUDED_SOURCE_FILE_NAMES = voucher.c // minimal with DISPATCH_VARIANT_DYLD_STUB diff --git a/xcodeconfig/libdispatch-mp-static.xcconfig b/xcodeconfig/libdispatch-mp-static.xcconfig index 1f0eddc4c..af3715f1e 100644 --- a/xcodeconfig/libdispatch-mp-static.xcconfig +++ b/xcodeconfig/libdispatch-mp-static.xcconfig @@ -18,13 +18,12 @@ // @APPLE_APACHE_LICENSE_HEADER_END@ // -OTHER_LDFLAGS = -BUILD_VARIANTS = normal debug -GCC_PREPROCESSOR_DEFINITIONS = $(inherited) DISPATCH_VARIANT_STATIC=1 USE_OBJC=0 DISPATCH_USE_DTRACE=0 -PRODUCT_NAME = libdispatch -INSTALL_PATH = /usr/local/lib/system - // skip simulator SUPPORTED_PLATFORMS = macosx iphoneos appletvos watchos +PRODUCT_NAME = libdispatch +INSTALL_PATH = /usr/local/lib/system +BUILD_VARIANTS = normal debug +GCC_PREPROCESSOR_DEFINITIONS = $(inherited) $(STATICLIB_PREPROCESSOR_DEFINITIONS) +OTHER_LDFLAGS = SKIP_INSTALL[sdk=*simulator*] = YES EXCLUDED_SOURCE_FILE_NAMES[sdk=*simulator*] = * diff --git a/xcodeconfig/libdispatch-resolved.xcconfig b/xcodeconfig/libdispatch-resolved.xcconfig index a42add8ef..2f2e273e1 100644 --- a/xcodeconfig/libdispatch-resolved.xcconfig +++ b/xcodeconfig/libdispatch-resolved.xcconfig @@ -23,3 +23,4 @@ PRODUCT_NAME = libdispatch_$(DISPATCH_RESOLVED_VARIANT) OTHER_LDFLAGS = SKIP_INSTALL = YES VERSIONING_SYSTEM = +EXCLUDED_SOURCE_FILE_NAMES = * diff --git a/xcodeconfig/libdispatch-resolver_iphoneos.order b/xcodeconfig/libdispatch-resolver_iphoneos.order deleted file mode 100644 index eea98459d..000000000 --- a/xcodeconfig/libdispatch-resolver_iphoneos.order +++ /dev/null @@ -1,20 +0,0 @@ -# -# Copyright (c) 2013 Apple Inc. All rights reserved. -# -# @APPLE_APACHE_LICENSE_HEADER_START@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# @APPLE_APACHE_LICENSE_HEADER_END@ -# - diff --git a/xcodeconfig/libdispatch-up-static.xcconfig b/xcodeconfig/libdispatch-up-static.xcconfig index 0ece6354e..170c5b356 100644 --- a/xcodeconfig/libdispatch-up-static.xcconfig +++ b/xcodeconfig/libdispatch-up-static.xcconfig @@ -18,8 +18,11 @@ // @APPLE_APACHE_LICENSE_HEADER_END@ // -OTHER_LDFLAGS = +// skip simulator +SUPPORTED_PLATFORMS = macosx iphoneos appletvos watchos +PRODUCT_NAME = libdispatch_up BUILD_VARIANTS = normal +GCC_PREPROCESSOR_DEFINITIONS = $(inherited) DISPATCH_HW_CONFIG_UP=1 $(STATICLIB_PREPROCESSOR_DEFINITIONS) +OTHER_LDFLAGS = SKIP_INSTALL = YES -EXCLUDED_SOURCE_FILE_NAMES = * -GCC_PREPROCESSOR_DEFINITIONS = $(inherited) USE_OBJC=0 DISPATCH_USE_DTRACE=0 +EXCLUDED_SOURCE_FILE_NAMES[sdk=*simulator*] = * diff --git a/xcodeconfig/libdispatch.aliases b/xcodeconfig/libdispatch.aliases index 65dfd04f9..d8a5113a2 100644 --- a/xcodeconfig/libdispatch.aliases +++ b/xcodeconfig/libdispatch.aliases @@ -19,8 +19,9 @@ # __dispatch_data_destructor_vm_deallocate __dispatch_data_destructor_munmap -__dispatch_source_type_memorypressure __dispatch_source_type_memorystatus __dispatch_queue_attrs __dispatch_queue_attr_concurrent +__dispatch_source_type_memorypressure __dispatch_source_type_memorystatus _dispatch_assert_queue$V2 _dispatch_assert_queue _dispatch_assert_queue_not$V2 _dispatch_assert_queue_not _dispatch_queue_create_with_target$V2 _dispatch_queue_create_with_target +_dispatch_source_set_timer __dispatch_source_set_runloop_timer_4CF diff --git a/xcodeconfig/libdispatch.xcconfig b/xcodeconfig/libdispatch.xcconfig index d5b08d6dd..643e1d38b 100644 --- a/xcodeconfig/libdispatch.xcconfig +++ b/xcodeconfig/libdispatch.xcconfig @@ -40,6 +40,7 @@ ONLY_ACTIVE_ARCH = NO CLANG_LINK_OBJC_RUNTIME = NO GCC_C_LANGUAGE_STANDARD = gnu11 CLANG_CXX_LANGUAGE_STANDARD = gnu++11 +ENABLE_STRICT_OBJC_MSGSEND = YES GCC_ENABLE_CPP_EXCEPTIONS = NO GCC_STRICT_ALIASING = YES GCC_SYMBOLS_PRIVATE_EXTERN = YES @@ -49,24 +50,40 @@ GCC_WARN_64_TO_32_BIT_CONVERSION = YES GCC_WARN_ABOUT_RETURN_TYPE = YES GCC_WARN_ABOUT_MISSING_PROTOTYPES = YES GCC_WARN_ABOUT_MISSING_NEWLINE = YES -GCC_WARN_UNUSED_VARIABLE = YES -GCC_WARN_INITIALIZER_NOT_FULLY_BRACKETED = YES GCC_WARN_ABOUT_MISSING_FIELD_INITIALIZERS = YES +GCC_WARN_INITIALIZER_NOT_FULLY_BRACKETED = YES GCC_WARN_SIGN_COMPARE = YES +GCC_WARN_STRICT_SELECTOR_MATCH = YES +GCC_WARN_UNDECLARED_SELECTOR = YES GCC_WARN_UNINITIALIZED_AUTOS = YES +GCC_WARN_UNKNOWN_PRAGMAS = YES +GCC_WARN_UNUSED_FUNCTION = YES +GCC_WARN_UNUSED_LABEL = YES +GCC_WARN_UNUSED_PARAMETER = YES +GCC_WARN_UNUSED_VARIABLE = YES +CLANG_WARN_ASSIGN_ENUM = YES +CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES +CLANG_WARN_DOCUMENTATION_COMMENTS = YES +CLANG_WARN__DUPLICATE_METHOD_MATCH = YES CLANG_WARN_EMPTY_BODY = YES CLANG_WARN_IMPLICIT_SIGN_CONVERSION = YES +CLANG_WARN_INFINITE_RECURSION = YES +CLANG_WARN_OBJC_IMPLICIT_ATOMIC_PROPERTIES = YES +CLANG_WARN_OBJC_MISSING_PROPERTY_SYNTHESIS = YES CLANG_WARN_SUSPICIOUS_IMPLICIT_CONVERSION = YES -CLANG_WARN_DOCUMENTATION_COMMENTS = YES +CLANG_WARN_SUSPICIOUS_MOVE = YES +CLANG_WARN_UNREACHABLE_CODE = YES GCC_TREAT_WARNINGS_AS_ERRORS = YES GCC_OPTIMIZATION_LEVEL = s -GCC_PREPROCESSOR_DEFINITIONS = __DARWIN_NON_CANCELABLE=1 $(DISPATCH_PREPROCESSOR_DEFINITIONS) GCC_NO_COMMON_BLOCKS = YES -WARNING_CFLAGS = -Wall -Wextra -Waggregate-return -Wfloat-equal -Wpacked -Wmissing-declarations -Wstrict-overflow=4 -Wstrict-aliasing=2 -Wno-packed -Wno-unknown-warning-option -OTHER_CFLAGS = -fverbose-asm -isystem $(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders +GCC_PREPROCESSOR_DEFINITIONS = __DARWIN_NON_CANCELABLE=1 $(DISPATCH_PREPROCESSOR_DEFINITIONS) +STATICLIB_PREPROCESSOR_DEFINITIONS = DISPATCH_VARIANT_STATIC=1 USE_OBJC=0 DISPATCH_USE_DTRACE=0 +WARNING_CFLAGS = -Wall -Wextra -Warray-bounds-pointer-arithmetic -Watomic-properties -Wcomma -Wconditional-uninitialized -Wcovered-switch-default -Wdate-time -Wdeprecated -Wdouble-promotion -Wduplicate-enum -Wexpansion-to-defined -Wfloat-equal -Widiomatic-parentheses -Wignored-qualifiers -Wimplicit-fallthrough -Wnullable-to-nonnull-conversion -Wobjc-interface-ivars -Wover-aligned -Wpacked -Wpointer-arith -Wselector -Wstatic-in-inline -Wsuper-class-method-mismatch -Wswitch-enum -Wtautological-compare -Wunguarded-availability -Wunused -Wno-unknown-warning-option $(NO_WARNING_CFLAGS) +NO_WARNING_CFLAGS = -Wno-pedantic -Wno-bad-function-cast -Wno-c++-compat -Wno-c++98-compat -Wno-c++98-compat-pedantic -Wno-cast-align -Wno-cast-qual -Wno-disabled-macro-expansion -Wno-documentation-unknown-command -Wno-format-nonliteral -Wno-missing-variable-declarations -Wno-old-style-cast -Wno-padded -Wno-reserved-id-macro -Wno-shift-sign-overflow -Wno-undef -Wno-unreachable-code-aggressive -Wno-unused-macros -Wno-used-but-marked-unused -Wno-vla +OTHER_CFLAGS = -fverbose-asm -isystem $(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders $(PLATFORM_CFLAGS) OTHER_CFLAGS[arch=i386][sdk=macosx*] = $(OTHER_CFLAGS) -fno-unwind-tables -fno-asynchronous-unwind-tables -fno-exceptions OTHER_CFLAGS_normal = -momit-leaf-frame-pointer -OTHER_CFLAGS_profile = $(OTHER_CFLAGS_normal) -DDISPATCH_PROFILE=1 +OTHER_CFLAGS_profile = $(OTHER_CFLAGS_normal) -DDISPATCH_PROFILE=1 -DDISPATCH_PERF_MON=1 OTHER_CFLAGS_debug = -fstack-protector -fno-inline -O0 -DDISPATCH_DEBUG=1 -DOS_DEBUG=1 GENERATE_PROFILING_CODE = NO DYLIB_CURRENT_VERSION = $(CURRENT_PROJECT_VERSION) diff --git a/xcodeconfig/libdispatch_iphoneos.order b/xcodeconfig/libdispatch_iphoneos.order deleted file mode 100644 index eea98459d..000000000 --- a/xcodeconfig/libdispatch_iphoneos.order +++ /dev/null @@ -1,20 +0,0 @@ -# -# Copyright (c) 2013 Apple Inc. All rights reserved. -# -# @APPLE_APACHE_LICENSE_HEADER_START@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# @APPLE_APACHE_LICENSE_HEADER_END@ -# - diff --git a/xcodeconfig/libfirehose.xcconfig b/xcodeconfig/libfirehose.xcconfig index 07a8b9ac1..4c711994c 100644 --- a/xcodeconfig/libfirehose.xcconfig +++ b/xcodeconfig/libfirehose.xcconfig @@ -18,18 +18,17 @@ // @APPLE_APACHE_LICENSE_HEADER_END@ // -OTHER_MIGFLAGS = -novouchers -OTHER_LDFLAGS = SUPPORTED_PLATFORMS = macosx iphoneos iphonesimulator appletvos appletvsimulator watchos watchsimulator PRODUCT_NAME = $(TARGET_NAME) INSTALL_PATH = /usr/local/lib/ +GCC_PREPROCESSOR_DEFINITIONS = $(inherited) FIREHOSE_SERVER=1 DISPATCH_USE_DTRACE=0 +OTHER_MIGFLAGS = -novouchers +OTHER_LDFLAGS = PUBLIC_HEADERS_FOLDER_PATH = /usr/include/os PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/os STRIP_INSTALLED_PRODUCT = NO COPY_PHASE_STRIP = NO SEPARATE_STRIP = NO -GCC_PREPROCESSOR_DEFINITIONS = $(inherited) FIREHOSE_SERVER=1 DISPATCH_USE_DTRACE=0 - VALID_ARCHS[sdk=macosx*] = $(NATIVE_ARCH_ACTUAL) COPY_HEADERS_RUN_UNIFDEF = YES diff --git a/xcodeconfig/libfirehose_kernel.xcconfig b/xcodeconfig/libfirehose_kernel.xcconfig index f6b2a99f6..c572f80e7 100644 --- a/xcodeconfig/libfirehose_kernel.xcconfig +++ b/xcodeconfig/libfirehose_kernel.xcconfig @@ -20,16 +20,14 @@ #include "libfirehose.xcconfig" -OTHER_CFLAGS = -mkernel -nostdinc -Wno-packed -// LLVM_LTO = YES +SUPPORTED_PLATFORMS = macosx iphoneos appletvos watchos PRODUCT_NAME = $(TARGET_NAME) INSTALL_PATH = /usr/local/lib/kernel/ +GCC_PREPROCESSOR_DEFINITIONS = $(inherited) KERNEL=1 DISPATCH_USE_DTRACE=0 +OTHER_CFLAGS = -mkernel -nostdinc -Wno-packed +// LLVM_LTO = YES PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/kernel/os -SUPPORTED_PLATFORMS = macosx iphoneos appletvos watchos - HEADER_SEARCH_PATHS = $(PROJECT_DIR) $(SDKROOT)/System/Library/Frameworks/Kernel.framework/PrivateHeaders $(SDKROOT)/System/Library/Frameworks/Kernel.framework/Headers $(SDKROOT)/usr/local/include/os $(SDKROOT)/usr/local/include/firehose -GCC_PREPROCESSOR_DEFINITIONS = $(inherited) KERNEL=1 DISPATCH_USE_DTRACE=0 - COPY_HEADERS_RUN_UNIFDEF = YES COPY_HEADERS_UNIFDEF_FLAGS = -DKERNEL=1 -DOS_FIREHOSE_SPI=1 -DOS_VOUCHER_ACTIVITY_SPI_TYPES=1 -UOS_VOUCHER_ACTIVITY_SPI diff --git a/xcodescripts/install-manpages.sh b/xcodescripts/install-manpages.sh index d9e28af6c..db13163e8 100755 --- a/xcodescripts/install-manpages.sh +++ b/xcodescripts/install-manpages.sh @@ -64,7 +64,7 @@ for m in dispatch_group_enter dispatch_group_leave dispatch_group_wait \ ln -f dispatch_group_create.3 ${m}.3 done -for m in dispatch_retain dispatch_release dispatch_suspend dispatch_resume \ +for m in dispatch_retain dispatch_release dispatch_suspend dispatch_resume dispatch_activate \ dispatch_get_context dispatch_set_context dispatch_set_finalizer_f; do ln -f dispatch_object.3 ${m}.3 done From c1107b04a843025de1e611f38d906313606c296a Mon Sep 17 00:00:00 2001 From: Apple OSS Distributions <91980991+AppleOSSDistributions@users.noreply.github.com> Date: Thu, 4 Oct 2018 22:31:25 +0000 Subject: [PATCH 10/18] libdispatch-1008.200.78 Imported from libdispatch-1008.200.78.tar.gz --- CMakeLists.txt | 62 +- INSTALL.md | 30 +- PATCHES | 21 + cmake/config.h.in | 6 - cmake/modules/DispatchCompilerWarnings.cmake | 74 + cmake/modules/DispatchSanitization.cmake | 44 + cmake/modules/SwiftSupport.cmake | 1 + config/config.h | 6 - configure.ac | 3 + dispatch/CMakeLists.txt | 7 +- dispatch/base.h | 15 +- dispatch/block.h | 15 +- dispatch/dispatch.h | 4 +- dispatch/object.h | 26 +- dispatch/queue.h | 278 +- dispatch/source.h | 14 +- dispatch/time.h | 20 +- libdispatch.xcodeproj/project.pbxproj | 473 +- man/dispatch_after.3 | 6 + man/dispatch_io_read.3 | 4 +- man/dispatch_semaphore_create.3 | 3 +- man/dispatch_source_create.3 | 5 +- man/dispatch_time.3 | 23 +- os/firehose_buffer_private.h | 16 +- os/firehose_server_private.h | 15 + os/linux_base.h | 5 +- os/voucher_activity_private.h | 34 +- os/voucher_private.h | 88 +- private/introspection_private.h | 100 +- private/layout_private.h | 29 +- private/mach_private.h | 106 +- private/private.h | 26 +- private/queue_private.h | 176 +- private/source_private.h | 50 +- private/time_private.h | 87 + private/workloop_private.h | 441 + src/CMakeLists.txt | 13 +- src/Makefile.am | 2 +- src/allocator.c | 152 +- src/allocator_internal.h | 16 +- src/apply.c | 128 +- src/benchmark.c | 10 +- src/block.cpp | 23 +- src/data.c | 33 +- src/data.m | 2 +- src/data_internal.h | 8 +- src/event/event.c | 1085 +- src/event/event_config.h | 17 +- src/event/event_epoll.c | 265 +- src/event/event_internal.h | 420 +- src/event/event_kevent.c | 932 +- src/event/workqueue.c | 38 +- src/event/workqueue_internal.h | 8 +- src/firehose/firehose.defs | 31 +- src/firehose/firehose_buffer.c | 703 +- src/firehose/firehose_buffer_internal.h | 81 +- src/firehose/firehose_inline_internal.h | 224 +- src/firehose/firehose_reply.defs | 9 +- src/firehose/firehose_server.c | 376 +- src/firehose/firehose_server_internal.h | 21 +- src/init.c | 1003 +- src/inline_internal.h | 1019 +- src/internal.h | 515 +- src/introspection.c | 564 +- src/introspection_internal.h | 144 +- src/io.c | 94 +- src/io_internal.h | 12 +- src/libdispatch.codes | 25 +- src/libdispatch.plist | 99 + src/mach.c | 1584 +-- src/mach_internal.h | 32 +- src/object.c | 116 +- src/object.m | 34 +- src/object_internal.h | 337 +- src/once.c | 84 +- src/queue.c | 10392 +++++++++-------- src/queue_internal.h | 761 +- src/semaphore.c | 339 +- src/semaphore_internal.h | 79 +- src/shims.h | 23 +- src/shims/atomic.h | 2 +- src/shims/atomic_sfb.h | 4 +- src/shims/hw_config.h | 7 + src/shims/lock.c | 99 +- src/shims/lock.h | 266 +- src/shims/perfmon.h | 2 +- src/shims/priority.h | 154 +- src/shims/target.h | 61 + src/shims/time.h | 118 +- src/shims/tsd.h | 2 +- src/shims/yield.h | 43 +- src/source.c | 2219 +--- src/source_internal.h | 98 +- src/swift/Data.swift | 1 + src/swift/Time.swift | 108 +- src/time.c | 72 +- src/trace.h | 298 +- src/transform.c | 46 +- src/voucher.c | 270 +- src/voucher_internal.h | 35 +- tools/firehose_trace.lua | 83 + xcodeconfig/libdispatch-resolved.xcconfig | 2 +- xcodeconfig/libdispatch-up-static.xcconfig | 28 - xcodeconfig/libdispatch.clean | 46 + xcodeconfig/libdispatch.dirty | 153 + xcodeconfig/libdispatch.order | 15 +- xcodeconfig/libdispatch.xcconfig | 16 +- xcodescripts/check-order.sh | 71 + xcodescripts/mig-headers.sh | 5 + 109 files changed, 16743 insertions(+), 11747 deletions(-) create mode 100644 cmake/modules/DispatchCompilerWarnings.cmake create mode 100644 cmake/modules/DispatchSanitization.cmake create mode 100644 private/time_private.h create mode 100644 private/workloop_private.h create mode 100644 src/libdispatch.plist create mode 100644 src/shims/target.h create mode 100755 tools/firehose_trace.lua delete mode 100644 xcodeconfig/libdispatch-up-static.xcconfig create mode 100644 xcodeconfig/libdispatch.clean create mode 100644 xcodeconfig/libdispatch.dirty create mode 100644 xcodescripts/check-order.sh diff --git a/CMakeLists.txt b/CMakeLists.txt index f6b078e25..ef736629d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -25,11 +25,40 @@ include(GNUInstallDirs) set(WITH_BLOCKS_RUNTIME "" CACHE PATH "Path to blocks runtime") include(DispatchAppleOptions) +include(DispatchSanitization) + +include(DispatchCompilerWarnings) +dispatch_common_warnings() option(ENABLE_DISPATCH_INIT_CONSTRUCTOR "enable libdispatch_init as a constructor" ON) set(USE_LIBDISPATCH_INIT_CONSTRUCTOR ${ENABLE_DISPATCH_INIT_CONSTRUCTOR}) -# TODO(compnerd) swift options +option(ENABLE_SWIFT "enable libdispatch swift overlay" OFF) +if(ENABLE_SWIFT) + if(NOT CMAKE_SWIFT_COMPILER) + message(FATAL_ERROR "CMAKE_SWIFT_COMPILER must be defined to enable swift") + endif() + + get_filename_component(SWIFT_TOOLCHAIN ${CMAKE_SWIFT_COMPILER} DIRECTORY) + get_filename_component(SWIFT_TOOLCHAIN ${SWIFT_TOOLCHAIN} DIRECTORY) + + string(TOLOWER ${CMAKE_SYSTEM_NAME} SWIFT_OS) + set(SWIFT_RUNTIME_LIBDIR ${SWIFT_TOOLCHAIN}/lib/swift/${SWIFT_OS}/${CMAKE_SYSTEM_PROCESSOR}) + + add_library(swiftCore + SHARED IMPORTED GLOBAL) + set_target_properties(swiftCore + PROPERTIES + IMPORTED_LOCATION + ${SWIFT_RUNTIME_LIBDIR}/${CMAKE_SHARED_LIBRARY_PREFIX}swiftCore${CMAKE_SHARED_LIBRARY_SUFFIX}) + + add_library(swiftSwiftOnoneSupport + SHARED IMPORTED GLOBAL) + set_target_properties(swiftSwiftOnoneSupport + PROPERTIES + IMPORTED_LOCATION + ${SWIFT_RUNTIME_LIBDIR}/${CMAKE_SHARED_LIBRARY_PREFIX}swiftSwiftOnoneSupport${CMAKE_SHARED_LIBRARY_SUFFIX}) +endif() option(BUILD_SHARED_LIBS "build shared libraries" ON) @@ -59,8 +88,7 @@ if(ENABLE_INTERNAL_PTHREAD_WORKQUEUES) set(HAVE_PTHREAD_WORKQUEUES 0) else() check_include_files(pthread/workqueue_private.h HAVE_PTHREAD_WORKQUEUE_PRIVATE_H) - check_include_files(pthread_workqueue.h HAVE_PTHREAD_WORKQUEUE_H) - if(HAVE_PTHREAD_WORKQUEUE_PRIVATE_H AND HAVE_PTHREAD_WORKQUEUE_H) + if(HAVE_PTHREAD_WORKQUEUE_PRIVATE_H) set(HAVE_PTHREAD_WORKQUEUES 1) set(DISPATCH_USE_INTERNAL_WORKQUEUE 0) else() @@ -111,7 +139,6 @@ check_function_exists(mach_port_construct HAVE_MACH_PORT_CONSTRUCT) check_function_exists(malloc_create_zone HAVE_MALLOC_CREATE_ZONE) check_function_exists(pthread_key_init_np HAVE_PTHREAD_KEY_INIT_NP) check_function_exists(pthread_main_np HAVE_PTHREAD_MAIN_NP) -check_function_exists(pthread_workqueue_setdispatch_np HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP) check_function_exists(strlcpy HAVE_STRLCPY) check_function_exists(sysconf HAVE_SYSCONF) @@ -152,7 +179,6 @@ check_include_files("stdint.h" HAVE_STDINT_H) check_include_files("stdlib.h" HAVE_STDLIB_H) check_include_files("string.h" HAVE_STRING_H) check_include_files("strings.h" HAVE_STRINGS_H) -check_include_files("sys/cdefs.h" HAVE_SYS_CDEFS_H) check_include_files("sys/guarded.h" HAVE_SYS_GUARDED_H) check_include_files("sys/stat.h" HAVE_SYS_STAT_H) check_include_files("sys/types.h" HAVE_SYS_TYPES_H) @@ -198,21 +224,19 @@ if(leaks_EXECUTABLE) endif() if(CMAKE_SYSTEM_NAME STREQUAL Darwin) - add_custom_command(OUTPUT - "${CMAKE_SOURCE_DIR}/dispatch/module.modulemap" - "${CMAKE_SOURCE_DIR}/private/module.modulemap" - COMMAND - ${CMAKE_COMMAND} -E create_symlink "${CMAKE_SOURCE_DIR}/darwin/module.modulemap" "${CMAKE_SOURCE_DIR}/dispatch/module.modulemap" - COMMAND - ${CMAKE_COMMAND} -E create_symlink "${CMAKE_SOURCE_DIR}/darwin/module.modulemap" "${CMAKE_SOURCE_DIR}/private/module.modulemap") + add_custom_target(module-map-symlinks + ALL + COMMAND + ${CMAKE_COMMAND} -E create_symlink "${CMAKE_SOURCE_DIR}/dispatch/darwin/module.modulemap" "${CMAKE_SOURCE_DIR}/dispatch/module.modulemap" + COMMAND + ${CMAKE_COMMAND} -E create_symlink "${CMAKE_SOURCE_DIR}/private/darwin/module.modulemap" "${CMAKE_SOURCE_DIR}/private/module.modulemap") else() - add_custom_command(OUTPUT - "${CMAKE_SOURCE_DIR}/dispatch/module.modulemap" - "${CMAKE_SOURCE_DIR}/private/module.modulemap" - COMMAND - ${CMAKE_COMMAND} -E create_symlink "${CMAKE_SOURCE_DIR}/generic/module.modulemap" "${CMAKE_SOURCE_DIR}/dispatch/module.modulemap" - COMMAND - ${CMAKE_COMMAND} -E create_symlink "${CMAKE_SOURCE_DIR}/generic/module.modulemap" "${CMAKE_SOURCE_DIR}/private/module.modulemap") + add_custom_target(module-map-symlinks + ALL + COMMAND + ${CMAKE_COMMAND} -E create_symlink "${CMAKE_SOURCE_DIR}/dispatch/generic/module.modulemap" "${CMAKE_SOURCE_DIR}/dispatch/module.modulemap" + COMMAND + ${CMAKE_COMMAND} -E create_symlink "${CMAKE_SOURCE_DIR}/private/generic/module.modulemap" "${CMAKE_SOURCE_DIR}/private/module.modulemap") endif() configure_file("${CMAKE_SOURCE_DIR}/cmake/config.h.in" "${CMAKE_BINARY_DIR}/config/config_ac.h") diff --git a/INSTALL.md b/INSTALL.md index 9940c2cf7..a426bcf30 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -129,9 +129,21 @@ libdispatch for /usr/lib/system on OS X El Capitan: Typical configuration line for FreeBSD 8.x and 9.x to build libdispatch with clang and blocks support: - sh autogen.sh - ./configure CC=clang --with-blocks-runtime=/usr/local/lib - make check + ``` + cmake -G Ninja -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ -DWITH_BLOCKS_RUNTIME=/usr/local/lib + ninja + ninja test + ``` + +### Building for android + +Note that this assumes that you are building on Linux. It requires that you +have the android NDK available. It has been tested against API Level 21. + + ``` + cmake -G Ninja -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_SYSTEM_NAME=Android -DCMAKE_SYSTEM_VERSION=21 -DCMAKE_ANDROID_NDK= + ninja + ``` ### Building and installing for Linux @@ -140,7 +152,7 @@ on Ubuntu; currently supported versions are 14.04, 15.10 and 16.04. 1. The first thing to do is install required packages: - `sudo apt-get install autoconf libtool pkg-config clang systemtap-sdt-dev libbsd-dev linux-libc-dev` + `sudo apt-get install cmake ninja-build clang systemtap-sdt-dev libbsd-dev linux-libc-dev` Note: compiling libdispatch requires clang 3.8 or better and the gold linker. If the default clang on your Ubuntu version is @@ -148,11 +160,11 @@ too old, see http://apt.llvm.org/ to install a newer version. On older Ubuntu releases, you may need to install binutils-gold to get the gold linker. -2. Build (as in the general instructions above) +2. Build ``` - sh autogen.sh - ./configure - make - make install + cmake -G Ninja -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ + ninja + ninja install ``` + diff --git a/PATCHES b/PATCHES index c3d28b330..47db8f3a3 100644 --- a/PATCHES +++ b/PATCHES @@ -353,3 +353,24 @@ github commits starting with 29bdc2f from [8947dcf] APPLIED rdar://33531111 [5ad9208] APPLIED rdar://33531111 [698d085] APPLIED rdar://33531111 +[ce1ce45] APPLIED rdar://35017478 +[291f34d] APPLIED rdar://35017478 +[666df60] APPLIED rdar://35017478 +[80dd736] APPLIED rdar://35017478 +[0fd5a69] APPLIED rdar://35017478 +[0e35ed9] APPLIED rdar://35017478 +[70ce56b] APPLIED rdar://35017478 +[40fc1f3] APPLIED rdar://35017478 +[9ec74ed] APPLIED rdar://35017478 +[7f330ed] APPLIED rdar://35017478 +[947b51c] APPLIED rdar://35017478 +[295f676] APPLIED rdar://35017478 +[48196a2] APPLIED rdar://35017478 +[a28fc2b] APPLIED rdar://35017478 +[791ce5d] APPLIED rdar://35017478 +[0d0a998] APPLIED rdar://35017478 +[29329b5] APPLIED rdar://35017478 +[141403a] APPLIED rdar://35017478 +[b7f1beb] APPLIED rdar://35017478 +[7ef9cde] APPLIED rdar://35017478 +[12c9ca8] APPLIED rdar://35017478 diff --git a/cmake/config.h.in b/cmake/config.h.in index 6696e9863..00e2bbcdd 100644 --- a/cmake/config.h.in +++ b/cmake/config.h.in @@ -152,9 +152,6 @@ /* Define to 1 if you have the header file. */ #cmakedefine HAVE_PTHREAD_WORKQUEUE_PRIVATE_H -/* Define to 1 if you have the `pthread_workqueue_setdispatch_np' function. */ -#cmakedefine HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP - /* Define to 1 if you have the header file. */ #cmakedefine01 HAVE_STDINT_H @@ -176,9 +173,6 @@ /* Define to 1 if you have the `sysconf' function. */ #cmakedefine01 HAVE_SYSCONF -/* Define to 1 if you have the header file. */ -#cmakedefine01 HAVE_SYS_CDEFS_H - /* Define to 1 if you have the header file. */ #cmakedefine HAVE_SYS_GUARDED_H diff --git a/cmake/modules/DispatchCompilerWarnings.cmake b/cmake/modules/DispatchCompilerWarnings.cmake new file mode 100644 index 000000000..dcc074e29 --- /dev/null +++ b/cmake/modules/DispatchCompilerWarnings.cmake @@ -0,0 +1,74 @@ + +if("${CMAKE_C_SIMULATE_ID}" STREQUAL "MSVC") + # TODO: someone needs to provide the msvc equivalent warning flags + macro(dispatch_common_warnings) + endmacro() +else() + macro(dispatch_common_warnings) + add_compile_options(-Werror) + add_compile_options(-Wall) + add_compile_options(-Wextra) + add_compile_options(-Wmissing-prototypes) + add_compile_options(-Wdocumentation) + add_compile_options(-Wunreachable-code) + add_compile_options(-Wshadow) + add_compile_options(-Wconversion) + add_compile_options(-Wconstant-conversion) + add_compile_options(-Wint-conversion) + add_compile_options(-Wbool-conversion) + add_compile_options(-Wenum-conversion) + add_compile_options(-Wassign-enum) + add_compile_options(-Wshorten-64-to-32) + add_compile_options(-Wnewline-eof) + add_compile_options(-Wdeprecated-declarations) + add_compile_options(-Wsign-conversion) + add_compile_options(-Winfinite-recursion) + add_compile_options(-Warray-bounds-pointer-arithmetic) + add_compile_options(-Watomic-properties) + add_compile_options(-Wcomma) + add_compile_options(-Wconditional-uninitialized) + add_compile_options(-Wcovered-switch-default) + add_compile_options(-Wdate-time) + add_compile_options(-Wdeprecated) + add_compile_options(-Wdouble-promotion) + add_compile_options(-Wduplicate-enum) + add_compile_options(-Wexpansion-to-defined) + add_compile_options(-Wfloat-equal) + add_compile_options(-Widiomatic-parentheses) + add_compile_options(-Wnullable-to-nonnull-conversion) + add_compile_options(-Wobjc-interface-ivars) + add_compile_options(-Wover-aligned) + add_compile_options(-Wpacked) + add_compile_options(-Wpointer-arith) + add_compile_options(-Wselector) + add_compile_options(-Wstatic-in-inline) + add_compile_options(-Wsuper-class-method-mismatch) + add_compile_options(-Wswitch-enum) + add_compile_options(-Wunguarded-availability) + add_compile_options(-Wunused) + + add_compile_options(-Wno-unknown-warning-option) + add_compile_options(-Wno-trigraphs) + add_compile_options(-Wno-four-char-constants) + add_compile_options(-Wno-disabled-macro-expansion) + add_compile_options(-Wno-pedantic) + add_compile_options(-Wno-bad-function-cast) + add_compile_options(-Wno-c++-compat) + add_compile_options(-Wno-c++98-compat) + add_compile_options(-Wno-c++98-compat-pedantic) + add_compile_options(-Wno-cast-align) + add_compile_options(-Wno-cast-qual) + add_compile_options(-Wno-documentation-unknown-command) + add_compile_options(-Wno-format-nonliteral) + add_compile_options(-Wno-missing-variable-declarations) + add_compile_options(-Wno-old-style-cast) + add_compile_options(-Wno-padded) + add_compile_options(-Wno-reserved-id-macro) + add_compile_options(-Wno-shift-sign-overflow) + add_compile_options(-Wno-undef) + add_compile_options(-Wno-unreachable-code-aggressive) + add_compile_options(-Wno-unused-macros) + add_compile_options(-Wno-used-but-marked-unused) + add_compile_options(-Wno-vla) + endmacro() +endif() diff --git a/cmake/modules/DispatchSanitization.cmake b/cmake/modules/DispatchSanitization.cmake new file mode 100644 index 000000000..a0641f524 --- /dev/null +++ b/cmake/modules/DispatchSanitization.cmake @@ -0,0 +1,44 @@ + +set(DISPATCH_USE_SANITIZER "" CACHE STRING + "Define the sanitizer used to build binaries and tests.") + +if(CMAKE_SYSTEM_NAME STREQUAL Darwin AND DISPATCH_USE_SANITIZER) + message(FATAL_ERROR "building libdispatch with sanitization is not supported on Darwin") +endif() + +if(DISPATCH_USE_SANITIZER) + # TODO(compnerd) ensure that the compiler supports these options before adding + # them. At the moment, assume that this will just be used with a GNU + # compatible driver and that the options are spelt correctly in light of that. + add_compile_options("-fno-omit-frame-pointer") + if(CMAKE_BUILD_TYPE MATCHES "Debug") + add_compile_options("-O1") + elseif(NOT CMAKE_BUILD_TYPE MATCHES "Debug" AND + NOT CMAKE_BUILD_TYPE MATCHES "RelWithDebInfo") + add_compile_options("-gline-tables-only") + endif() + + if(LLVM_USE_SANITIZER STREQUAL "Address") + add_compile_options("-fsanitize=address") + elseif(DISPATCH_USE_SANITIZER MATCHES "Memory(WithOrigins)?") + add_compile_options("-fsanitize=memory") + if(DISPATCH_USE_SANITIZER STREQUAL "MemoryWithOrigins") + add_compile_options("-fsanitize-memory-track-origins") + endif() + elseif(DISPATCH_USE_SANITIZER STREQUAL "Undefined") + add_compile_options("-fsanitize=undefined") + add_compile_options("-fno-sanitize=vptr,function") + add_compile_options("-fno-sanitize-recover=all") + elseif(DISPATCH_USE_SANITIZER STREQUAL "Thread") + add_compile_options("-fsanitize=thread") + elseif(DISPATCH_USE_SANITIZER STREQUAL "Address;Undefined" OR + DISPATCH_USE_SANITIZER STREQUAL "Undefined;Address") + add_compile_options("-fsanitize=address,undefined") + add_compile_options("-fno-sanitize=vptr,function") + add_compile_options("-fno-sanitize-recover=all") + elseif(DISPATCH_USE_SANITIZER STREQUAL "Leaks") + add_compile_options("-fsanitize=leak") + else() + message(FATAL_ERROR "unsupported value of DISPATCH_USE_SANITIZER: ${DISPATCH_USE_SANITIZER}") + endif() +endif() diff --git a/cmake/modules/SwiftSupport.cmake b/cmake/modules/SwiftSupport.cmake index 196593999..64b7b36e9 100644 --- a/cmake/modules/SwiftSupport.cmake +++ b/cmake/modules/SwiftSupport.cmake @@ -57,6 +57,7 @@ function(add_swift_library library) ${module_directory}/${ASL_MODULE_NAME}.swiftdoc DEPENDS ${ASL_SOURCES} + ${CMAKE_SWIFT_COMPILER} COMMAND ${CMAKE_COMMAND} -E make_directory ${module_directory} COMMAND diff --git a/config/config.h b/config/config.h index 91d7cfe8e..a24187ec3 100644 --- a/config/config.h +++ b/config/config.h @@ -106,9 +106,6 @@ /* Define to 1 if you have the `mach_approximate_time' function. */ #define HAVE_MACH_APPROXIMATE_TIME 1 -/* Define to 1 if you have the `mach_port_construct' function. */ -#define HAVE_MACH_PORT_CONSTRUCT 1 - /* Define to 1 if you have the `malloc_create_zone' function. */ #define HAVE_MALLOC_CREATE_ZONE 1 @@ -148,9 +145,6 @@ /* Define to 1 if you have the header file. */ #define HAVE_PTHREAD_WORKQUEUE_PRIVATE_H 1 -/* Define to 1 if you have the `pthread_workqueue_setdispatch_np' function. */ -#define HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP 1 - /* Define to 1 if you have the header file. */ #define HAVE_STDINT_H 1 diff --git a/configure.ac b/configure.ac index 8f38f0829..fad99ec23 100644 --- a/configure.ac +++ b/configure.ac @@ -190,6 +190,9 @@ AC_ARG_WITH([swift-toolchain], armv7l*) target_cpu="armv7" ;; + armv6l*) + target_cpu="armv6" + ;; *) esac ;; diff --git a/dispatch/CMakeLists.txt b/dispatch/CMakeLists.txt index dbfb866a8..b50b1ba15 100644 --- a/dispatch/CMakeLists.txt +++ b/dispatch/CMakeLists.txt @@ -14,11 +14,12 @@ install(FILES source.h time.h DESTINATION - ${CMAKE_INSTALL_FULL_INCLUDEDIR}/dispatch/) + ${CMAKE_INSTALL_FULL_INCLUDEDIR}/dispatch) if(ENABLE_SWIFT) + get_filename_component(MODULE_MAP module.modulemap REALPATH) install(FILES - module.modulemap + ${MODULE_MAP} DESTINATION - ${CMAKE_INSTALL_FULL_INCLUEDIR}/dispatch/) + ${CMAKE_INSTALL_FULL_INCLUDEDIR}/dispatch) endif() diff --git a/dispatch/base.h b/dispatch/base.h index 4c82b010c..c2bea82ea 100644 --- a/dispatch/base.h +++ b/dispatch/base.h @@ -120,15 +120,20 @@ #endif #endif -#if TARGET_OS_WIN32 && defined(__DISPATCH_BUILDING_DISPATCH__) && \ - defined(__cplusplus) +#if TARGET_OS_WIN32 +#ifdef __cplusplus +#ifdef __DISPATCH_BUILDING_DISPATCH__ #define DISPATCH_EXPORT extern "C" extern __declspec(dllexport) -#elif TARGET_OS_WIN32 && defined(__DISPATCH_BUILDING_DISPATCH__) +#else #define DISPATCH_EXPORT extern __declspec(dllexport) -#elif TARGET_OS_WIN32 && defined(__cplusplus) +#endif // __DISPATCH_BUILDING_DISPATCH__ +#else // __cplusplus +#ifdef __DISPATCH_BUILDING_DISPATCH__ #define DISPATCH_EXPORT extern "C" extern __declspec(dllimport) -#elif TARGET_OS_WIN32 +#else #define DISPATCH_EXPORT extern __declspec(dllimport) +#endif // __DISPATCH_BUILDING_DISPATCH__ +#endif // __cplusplus #elif __GNUC__ #define DISPATCH_EXPORT extern __attribute__((visibility("default"))) #else diff --git a/dispatch/block.h b/dispatch/block.h index cbdcb5eff..d60cb2c18 100644 --- a/dispatch/block.h +++ b/dispatch/block.h @@ -48,13 +48,14 @@ __BEGIN_DECLS * * @const DISPATCH_BLOCK_DETACHED * Flag indicating that a dispatch block object should execute disassociated - * from current execution context attributes such as QOS class, os_activity_t - * and properties of the current IPC request (if any). If invoked directly, the - * block object will remove these attributes from the calling thread for the - * duration of the block body (before applying attributes assigned to the block - * object, if any). If submitted to a queue, the block object will be executed - * with the attributes of the queue (or any attributes specifically assigned to - * the block object). + * from current execution context attributes such as os_activity_t + * and properties of the current IPC request (if any). With regard to QoS class, + * the behavior is the same as for DISPATCH_BLOCK_NO_QOS. If invoked directly, + * the block object will remove the other attributes from the calling thread for + * the duration of the block body (before applying attributes assigned to the + * block object, if any). If submitted to a queue, the block object will be + * executed with the attributes of the queue (or any attributes specifically + * assigned to the block object). * * @const DISPATCH_BLOCK_ASSIGN_CURRENT * Flag indicating that a dispatch block object should be assigned the execution diff --git a/dispatch/dispatch.h b/dispatch/dispatch.h index 2d45b8356..79a4c6078 100644 --- a/dispatch/dispatch.h +++ b/dispatch/dispatch.h @@ -48,10 +48,9 @@ #endif #endif -#define DISPATCH_API_VERSION 20170124 +#define DISPATCH_API_VERSION 20180109 #ifndef __DISPATCH_BUILDING_DISPATCH__ - #ifndef __DISPATCH_INDIRECT__ #define __DISPATCH_INDIRECT__ #endif @@ -70,7 +69,6 @@ #include #undef __DISPATCH_INDIRECT__ - #endif /* !__DISPATCH_BUILDING_DISPATCH__ */ #endif diff --git a/dispatch/object.h b/dispatch/object.h index 3ff36c2d3..653c122e0 100644 --- a/dispatch/object.h +++ b/dispatch/object.h @@ -52,13 +52,16 @@ OS_OBJECT_DECL_CLASS(dispatch_object); #if OS_OBJECT_SWIFT3 #define DISPATCH_DECL(name) OS_OBJECT_DECL_SUBCLASS_SWIFT(name, dispatch_object) +#define DISPATCH_DECL_SUBCLASS(name, base) OS_OBJECT_DECL_SUBCLASS_SWIFT(name, base) #else // OS_OBJECT_SWIFT3 #define DISPATCH_DECL(name) OS_OBJECT_DECL_SUBCLASS(name, dispatch_object) +#define DISPATCH_DECL_SUBCLASS(name, base) OS_OBJECT_DECL_SUBCLASS(name, base) DISPATCH_INLINE DISPATCH_ALWAYS_INLINE DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void -_dispatch_object_validate(dispatch_object_t object) { - void *isa = *(void* volatile*)(OS_OBJECT_BRIDGE void*)object; +_dispatch_object_validate(dispatch_object_t object) +{ + void *isa = *(void *volatile*)(OS_OBJECT_BRIDGE void*)object; (void)isa; } #endif // OS_OBJECT_SWIFT3 @@ -79,31 +82,29 @@ typedef struct dispatch_object_s { } *dispatch_object_t; #define DISPATCH_DECL(name) \ typedef struct name##_s : public dispatch_object_s {} *name##_t -#define DISPATCH_GLOBAL_OBJECT(type, object) (&(object)) +#define DISPATCH_DECL_SUBCLASS(name, base) \ + typedef struct name##_s : public base##_s {} *name##_t +#define DISPATCH_GLOBAL_OBJECT(type, object) (static_cast(&(object))) #define DISPATCH_RETURNS_RETAINED #else /* Plain C */ +#ifndef __DISPATCH_BUILDING_DISPATCH__ typedef union { struct _os_object_s *_os_obj; struct dispatch_object_s *_do; - struct dispatch_continuation_s *_dc; struct dispatch_queue_s *_dq; struct dispatch_queue_attr_s *_dqa; struct dispatch_group_s *_dg; struct dispatch_source_s *_ds; struct dispatch_mach_s *_dm; struct dispatch_mach_msg_s *_dmsg; - struct dispatch_source_attr_s *_dsa; struct dispatch_semaphore_s *_dsema; struct dispatch_data_s *_ddata; struct dispatch_io_s *_dchannel; - struct dispatch_operation_s *_doperation; - struct dispatch_disk_s *_ddisk; } dispatch_object_t DISPATCH_TRANSPARENT_UNION; -/*! @parseOnly */ +#endif // !__DISPATCH_BUILDING_DISPATCH__ #define DISPATCH_DECL(name) typedef struct name##_s *name##_t -/*! @parseOnly */ -#define DISPATCH_GLOBAL_OBJECT(t, x) (&(x)) -/*! @parseOnly */ +#define DISPATCH_DECL_SUBCLASS(name, base) typedef base##_t name##_t +#define DISPATCH_GLOBAL_OBJECT(type, object) ((type)&(object)) #define DISPATCH_RETURNS_RETAINED #endif @@ -122,12 +123,9 @@ typedef union { #define DISPATCH_DATA_DECL(name) OS_OBJECT_DECL_SWIFT(name) #endif // DISPATCH_DATA_DECL #elif !TARGET_OS_WIN32 -/*! @parseOnly */ #define DISPATCH_SOURCE_DECL(name) \ DISPATCH_DECL(name); -/*! @parseOnly */ #define DISPATCH_DATA_DECL(name) DISPATCH_DECL(name) -/*! @parseOnly */ #define DISPATCH_SOURCE_TYPE_DECL(name) \ DISPATCH_EXPORT const struct dispatch_source_type_s \ _dispatch_source_type_##name diff --git a/dispatch/queue.h b/dispatch/queue.h index 8dab75f9d..7c4a0f49d 100644 --- a/dispatch/queue.h +++ b/dispatch/queue.h @@ -53,25 +53,151 @@ DISPATCH_ASSUME_NONNULL_BEGIN * @typedef dispatch_queue_t * * @abstract - * Dispatch queues invoke blocks submitted to them serially in FIFO order. A - * queue will only invoke one block at a time, but independent queues may each - * invoke their blocks concurrently with respect to each other. + * Dispatch queues invoke workitems submitted to them. * * @discussion - * Dispatch queues are lightweight objects to which blocks may be submitted. - * The system manages a pool of threads which process dispatch queues and - * invoke blocks submitted to them. + * Dispatch queues come in many flavors, the most common one being the dispatch + * serial queue (See dispatch_queue_serial_t). + * + * The system manages a pool of threads which process dispatch queues and invoke + * workitems submitted to them. * * Conceptually a dispatch queue may have its own thread of execution, and * interaction between queues is highly asynchronous. * * Dispatch queues are reference counted via calls to dispatch_retain() and - * dispatch_release(). Pending blocks submitted to a queue also hold a + * dispatch_release(). Pending workitems submitted to a queue also hold a * reference to the queue until they have finished. Once all references to a * queue have been released, the queue will be deallocated by the system. */ DISPATCH_DECL(dispatch_queue); +/*! + * @typedef dispatch_queue_global_t + * + * @abstract + * Dispatch global concurrent queues are an abstraction around the system thread + * pool which invokes workitems that are submitted to dispatch queues. + * + * @discussion + * Dispatch global concurrent queues provide buckets of priorities on top of the + * thread pool the system manages. The system will decide how many threads + * to allocate to this pool depending on demand and system load. In particular, + * the system tries to maintain a good level of concurrency for this resource, + * and will create new threads when too many existing worker threads block in + * system calls. + * + * The global concurrent queues are a shared resource and as such it is the + * responsiblity of every user of this resource to not submit an unbounded + * amount of work to this pool, especially work that may block, as this can + * cause the system to spawn very large numbers of threads (aka. thread + * explosion). + * + * Work items submitted to the global concurrent queues have no ordering + * guarantee with respect to the order of submission, and workitems submitted + * to these queues may be invoked concurrently. + * + * Dispatch global concurrent queues are well-known global objects that are + * returned by dispatch_get_global_queue(). These objects cannot be modified. + * Calls to dispatch_suspend(), dispatch_resume(), dispatch_set_context(), etc., + * will have no effect when used with queues of this type. + */ +#if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) +typedef struct dispatch_queue_global_s *dispatch_queue_global_t; +#else +DISPATCH_DECL_SUBCLASS(dispatch_queue_global, dispatch_queue); +#endif + +/*! + * @typedef dispatch_queue_serial_t + * + * @abstract + * Dispatch serial queues invoke workitems submitted to them serially in FIFO + * order. + * + * @discussion + * Dispatch serial queues are lightweight objects to which workitems may be + * submitted to be invoked in FIFO order. A serial queue will only invoke one + * workitem at a time, but independent serial queues may each invoke their work + * items concurrently with respect to each other. + * + * Serial queues can target each other (See dispatch_set_target_queue()). The + * serial queue at the bottom of a queue hierarchy provides an exclusion + * context: at most one workitem submitted to any of the queues in such + * a hiearchy will run at any given time. + * + * Such hierarchies provide a natural construct to organize an application + * subsystem around. + * + * Serial queues are created by passing a dispatch queue attribute derived from + * DISPATCH_QUEUE_SERIAL to dispatch_queue_create_with_target(). + */ +#if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) +typedef struct dispatch_lane_s *dispatch_queue_serial_t; +#else +DISPATCH_DECL_SUBCLASS(dispatch_queue_serial, dispatch_queue); +#endif + +/*! + * @typedef dispatch_queue_main_t + * + * @abstract + * The type of the default queue that is bound to the main thread. + * + * @discussion + * The main queue is a serial queue (See dispatch_queue_serial_t) which is bound + * to the main thread of an application. + * + * In order to invoke workitems submitted to the main queue, the application + * must call dispatch_main(), NSApplicationMain(), or use a CFRunLoop on the + * main thread. + * + * The main queue is a well known global object that is made automatically on + * behalf of the main thread during process initialization and is returned by + * dispatch_get_main_queue(). This object cannot be modified. Calls to + * dispatch_suspend(), dispatch_resume(), dispatch_set_context(), etc., will + * have no effect when used on the main queue. + */ +#if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) +typedef struct dispatch_queue_static_s *dispatch_queue_main_t; +#else +DISPATCH_DECL_SUBCLASS(dispatch_queue_main, dispatch_queue_serial); +#endif + +/*! + * @typedef dispatch_queue_concurrent_t + * + * @abstract + * Dispatch concurrent queues invoke workitems submitted to them concurrently, + * and admit a notion of barrier workitems. + * + * @discussion + * Dispatch concurrent queues are lightweight objects to which regular and + * barrier workitems may be submited. Barrier workitems are invoked in + * exclusion of any other kind of workitem in FIFO order. + * + * Regular workitems can be invoked concurrently for the same concurrent queue, + * in any order. However, regular workitems will not be invoked before any + * barrier workitem submited ahead of them has been invoked. + * + * In other words, if a serial queue is equivalent to a mutex in the Dispatch + * world, a concurrent queue is equivalent to a reader-writer lock, where + * regular items are readers and barriers are writers. + * + * Concurrent queues are created by passing a dispatch queue attribute derived + * from DISPATCH_QUEUE_CONCURRENT to dispatch_queue_create_with_target(). + * + * Caveat: + * Dispatch concurrent queues at this time do not implement priority inversion + * avoidance when lower priority regular workitems (readers) are being invoked + * and are preventing a higher priority barrier (writer) from being invoked. + */ +#if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) +typedef struct dispatch_lane_s *dispatch_queue_concurrent_t; +#else +DISPATCH_DECL_SUBCLASS(dispatch_queue_concurrent, dispatch_queue); +#endif + __BEGIN_DECLS /*! @@ -137,8 +263,7 @@ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_async_f(dispatch_queue_t queue, - void *_Nullable context, - dispatch_function_t work); + void *_Nullable context, dispatch_function_t work); /*! * @function dispatch_sync @@ -147,8 +272,12 @@ dispatch_async_f(dispatch_queue_t queue, * Submits a block for synchronous execution on a dispatch queue. * * @discussion - * Submits a block to a dispatch queue like dispatch_async(), however - * dispatch_sync() will not return until the block has finished. + * Submits a workitem to a dispatch queue like dispatch_async(), however + * dispatch_sync() will not return until the workitem has finished. + * + * Work items submitted to a queue with dispatch_sync() do not observe certain + * queue attributes of that queue when invoked (such as autorelease frequency + * and QOS class). * * Calls to dispatch_sync() targeting the current queue will result * in dead-lock. Use of dispatch_sync() is also subject to the same @@ -159,8 +288,10 @@ dispatch_async_f(dispatch_queue_t queue, * calls to this function are synchronous, the dispatch_sync() "borrows" the * reference of the caller. * - * As an optimization, dispatch_sync() invokes the block on the current - * thread when possible. + * As an optimization, dispatch_sync() invokes the workitem on the thread which + * submitted the workitem, except when the passed queue is the main queue or + * a queue targetting it (See dispatch_queue_main_t, + * dispatch_set_target_queue()). * * @param queue * The target dispatch queue to which the block is submitted. @@ -203,18 +334,19 @@ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_sync_f(dispatch_queue_t queue, - void *_Nullable context, - dispatch_function_t work); + void *_Nullable context, dispatch_function_t work); -#if !defined(__APPLE__) || TARGET_OS_WATCH || TARGET_OS_TV || \ +#if defined(__APPLE__) && \ (defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && \ - __IPHONE_OS_VERSION_MIN_REQUIRED >= __IPHONE_7_0) || \ + __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_7_0) || \ (defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && \ - __MAC_OS_X_VERSION_MIN_REQUIRED >= __MAC_10_9) -#define DISPATCH_APPLY_AUTO_AVAILABLE 1 -#else + __MAC_OS_X_VERSION_MIN_REQUIRED < __MAC_10_9) #define DISPATCH_APPLY_AUTO_AVAILABLE 0 +#define DISPATCH_APPLY_QUEUE_ARG_NULLABILITY _Nonnull +#else +#define DISPATCH_APPLY_AUTO_AVAILABLE 1 +#define DISPATCH_APPLY_QUEUE_ARG_NULLABILITY _Nullable #endif /*! @@ -270,7 +402,8 @@ dispatch_sync_f(dispatch_queue_t queue, API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_NOTHROW void -dispatch_apply(size_t iterations, dispatch_queue_t queue, +dispatch_apply(size_t iterations, + dispatch_queue_t DISPATCH_APPLY_QUEUE_ARG_NULLABILITY queue, DISPATCH_NOESCAPE void (^block)(size_t)); #endif @@ -304,9 +437,9 @@ dispatch_apply(size_t iterations, dispatch_queue_t queue, API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL4 DISPATCH_NOTHROW void -dispatch_apply_f(size_t iterations, dispatch_queue_t queue, - void *_Nullable context, - void (*work)(void *_Nullable, size_t)); +dispatch_apply_f(size_t iterations, + dispatch_queue_t DISPATCH_APPLY_QUEUE_ARG_NULLABILITY queue, + void *_Nullable context, void (*work)(void *_Nullable, size_t)); /*! * @function dispatch_get_current_queue @@ -343,7 +476,12 @@ dispatch_queue_t dispatch_get_current_queue(void); API_AVAILABLE(macos(10.6), ios(4.0)) -DISPATCH_EXPORT struct dispatch_queue_s _dispatch_main_q; +DISPATCH_EXPORT +#if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) +struct dispatch_queue_static_s _dispatch_main_q; +#else +struct dispatch_queue_s _dispatch_main_q; +#endif /*! * @function dispatch_get_main_queue @@ -356,15 +494,24 @@ DISPATCH_EXPORT struct dispatch_queue_s _dispatch_main_q; * call dispatch_main(), NSApplicationMain(), or use a CFRunLoop on the main * thread. * + * The main queue is meant to be used in application context to interact with + * the main thread and the main runloop. + * + * Because the main queue doesn't behave entirely like a regular serial queue, + * it may have unwanted side-effects when used in processes that are not UI apps + * (daemons). For such processes, the main queue should be avoided. + * + * @see dispatch_queue_main_t + * * @result * Returns the main queue. This queue is created automatically on behalf of * the main thread before main() is called. */ DISPATCH_INLINE DISPATCH_ALWAYS_INLINE DISPATCH_CONST DISPATCH_NOTHROW -dispatch_queue_t +dispatch_queue_main_t dispatch_get_main_queue(void) { - return DISPATCH_GLOBAL_OBJECT(dispatch_queue_t, _dispatch_main_q); + return DISPATCH_GLOBAL_OBJECT(dispatch_queue_main_t, _dispatch_main_q); } /*! @@ -420,9 +567,7 @@ typedef unsigned int dispatch_qos_class_t; * class. * * @discussion - * The well-known global concurrent queues may not be modified. Calls to - * dispatch_suspend(), dispatch_resume(), dispatch_set_context(), etc., will - * have no effect when used with queues returned by this function. + * See dispatch_queue_global_t. * * @param identifier * A quality of service class defined in qos_class_t or a priority defined in @@ -453,7 +598,7 @@ typedef unsigned int dispatch_qos_class_t; */ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_CONST DISPATCH_WARN_RESULT DISPATCH_NOTHROW -dispatch_queue_t +dispatch_queue_global_t dispatch_get_global_queue(long identifier, unsigned long flags); /*! @@ -467,7 +612,11 @@ DISPATCH_DECL(dispatch_queue_attr); /*! * @const DISPATCH_QUEUE_SERIAL * - * @discussion A dispatch queue that invokes blocks serially in FIFO order. + * @discussion + * An attribute that can be used to create a dispatch queue that invokes blocks + * serially in FIFO order. + * + * See dispatch_queue_serial_t. */ #define DISPATCH_QUEUE_SERIAL NULL @@ -475,8 +624,10 @@ DISPATCH_DECL(dispatch_queue_attr); * @const DISPATCH_QUEUE_SERIAL_INACTIVE * * @discussion - * A dispatch queue that invokes blocks serially in FIFO order, and that is - * created initially inactive. See dispatch_queue_attr_make_initially_inactive(). + * An attribute that can be used to create a dispatch queue that invokes blocks + * serially in FIFO order, and that is initially inactive. + * + * See dispatch_queue_attr_make_initially_inactive(). */ #define DISPATCH_QUEUE_SERIAL_INACTIVE \ dispatch_queue_attr_make_initially_inactive(DISPATCH_QUEUE_SERIAL) @@ -484,8 +635,12 @@ DISPATCH_DECL(dispatch_queue_attr); /*! * @const DISPATCH_QUEUE_CONCURRENT * - * @discussion A dispatch queue that may invoke blocks concurrently and supports - * barrier blocks submitted with the dispatch barrier API. + * @discussion + * An attribute that can be used to create a dispatch queue that may invoke + * blocks concurrently and supports barrier blocks submitted with the dispatch + * barrier API. + * + * See dispatch_queue_concurrent_t. */ #define DISPATCH_QUEUE_CONCURRENT \ DISPATCH_GLOBAL_OBJECT(dispatch_queue_attr_t, \ @@ -498,9 +653,11 @@ struct dispatch_queue_attr_s _dispatch_queue_attr_concurrent; * @const DISPATCH_QUEUE_CONCURRENT_INACTIVE * * @discussion - * A dispatch queue that may invoke blocks concurrently and supports barrier - * blocks submitted with the dispatch barrier API, and that is created initially - * inactive. See dispatch_queue_attr_make_initially_inactive(). + * An attribute that can be used to create a dispatch queue that may invoke + * blocks concurrently and supports barrier blocks submitted with the dispatch + * barrier API, and that is initially inactive. + * + * See dispatch_queue_attr_make_initially_inactive(). */ #define DISPATCH_QUEUE_CONCURRENT_INACTIVE \ dispatch_queue_attr_make_initially_inactive(DISPATCH_QUEUE_CONCURRENT) @@ -668,6 +825,10 @@ dispatch_queue_attr_make_with_autorelease_frequency( * queue = dispatch_queue_create("com.example.myqueue", attr); * * + * The QOS class and relative priority set this way on a queue have no effect on + * blocks that are submitted synchronously to a queue (via dispatch_sync(), + * dispatch_barrier_sync()). + * * @param attr * A queue attribute value to be combined with the QOS class, or NULL. * @@ -725,9 +886,9 @@ dispatch_queue_attr_make_with_qos_class(dispatch_queue_attr_t _Nullable attr, * reader-writer schemes. * * When a dispatch queue is no longer needed, it should be released with - * dispatch_release(). Note that any pending blocks submitted to a queue will - * hold a reference to that queue. Therefore a queue will not be deallocated - * until all pending blocks have finished. + * dispatch_release(). Note that any pending blocks submitted asynchronously to + * a queue will hold a reference to that queue. Therefore a queue will not be + * deallocated until all pending blocks have finished. * * When using a dispatch queue attribute @a attr specifying a QoS class (derived * from the result of dispatch_queue_attr_make_with_qos_class()), passing the @@ -763,8 +924,8 @@ DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_t dispatch_queue_create_with_target(const char *_Nullable label, - dispatch_queue_attr_t _Nullable attr, dispatch_queue_t _Nullable target) - DISPATCH_ALIAS_V2(dispatch_queue_create_with_target); + dispatch_queue_attr_t _Nullable attr, dispatch_queue_t _Nullable target) + DISPATCH_ALIAS_V2(dispatch_queue_create_with_target); /*! * @function dispatch_queue_create @@ -783,9 +944,9 @@ dispatch_queue_create_with_target(const char *_Nullable label, * reader-writer schemes. * * When a dispatch queue is no longer needed, it should be released with - * dispatch_release(). Note that any pending blocks submitted to a queue will - * hold a reference to that queue. Therefore a queue will not be deallocated - * until all pending blocks have finished. + * dispatch_release(). Note that any pending blocks submitted asynchronously to + * a queue will hold a reference to that queue. Therefore a queue will not be + * deallocated until all pending blocks have finished. * * Passing the result of the dispatch_queue_attr_make_with_qos_class() function * to the attr parameter of this function allows a quality of service class and @@ -993,9 +1154,8 @@ dispatch_main(void); API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NONNULL3 DISPATCH_NOTHROW void -dispatch_after(dispatch_time_t when, - dispatch_queue_t queue, - dispatch_block_t block); +dispatch_after(dispatch_time_t when, dispatch_queue_t queue, + dispatch_block_t block); #endif /*! @@ -1026,10 +1186,8 @@ dispatch_after(dispatch_time_t when, API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NONNULL4 DISPATCH_NOTHROW void -dispatch_after_f(dispatch_time_t when, - dispatch_queue_t queue, - void *_Nullable context, - dispatch_function_t work); +dispatch_after_f(dispatch_time_t when, dispatch_queue_t queue, + void *_Nullable context, dispatch_function_t work); /*! * @functiongroup Dispatch Barrier API @@ -1108,8 +1266,7 @@ API_AVAILABLE(macos(10.7), ios(4.3)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_barrier_async_f(dispatch_queue_t queue, - void *_Nullable context, - dispatch_function_t work); + void *_Nullable context, dispatch_function_t work); /*! * @function dispatch_barrier_sync @@ -1168,8 +1325,7 @@ API_AVAILABLE(macos(10.7), ios(4.3)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_barrier_sync_f(dispatch_queue_t queue, - void *_Nullable context, - dispatch_function_t work); + void *_Nullable context, dispatch_function_t work); /*! * @functiongroup Dispatch queue-specific contexts @@ -1211,7 +1367,7 @@ API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_queue_set_specific(dispatch_queue_t queue, const void *key, - void *_Nullable context, dispatch_function_t _Nullable destructor); + void *_Nullable context, dispatch_function_t _Nullable destructor); /*! * @function dispatch_queue_get_specific @@ -1321,7 +1477,7 @@ API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 void dispatch_assert_queue(dispatch_queue_t queue) - DISPATCH_ALIAS_V2(dispatch_assert_queue); + DISPATCH_ALIAS_V2(dispatch_assert_queue); /*! * @function dispatch_assert_queue_barrier @@ -1370,7 +1526,7 @@ API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 void dispatch_assert_queue_not(dispatch_queue_t queue) - DISPATCH_ALIAS_V2(dispatch_assert_queue_not); + DISPATCH_ALIAS_V2(dispatch_assert_queue_not); #ifdef NDEBUG #define dispatch_assert_queue_debug(q) ((void)(0 && (q))) diff --git a/dispatch/source.h b/dispatch/source.h index 6992d4226..61a33bb6c 100644 --- a/dispatch/source.h +++ b/dispatch/source.h @@ -548,6 +548,7 @@ dispatch_source_testcancel(dispatch_source_t source); * * DISPATCH_SOURCE_TYPE_DATA_ADD: n/a * DISPATCH_SOURCE_TYPE_DATA_OR: n/a + * DISPATCH_SOURCE_TYPE_DATA_REPLACE: n/a * DISPATCH_SOURCE_TYPE_MACH_SEND: mach port (mach_port_t) * DISPATCH_SOURCE_TYPE_MACH_RECV: mach port (mach_port_t) * DISPATCH_SOURCE_TYPE_MEMORYPRESSURE n/a @@ -579,6 +580,7 @@ dispatch_source_get_handle(dispatch_source_t source); * * DISPATCH_SOURCE_TYPE_DATA_ADD: n/a * DISPATCH_SOURCE_TYPE_DATA_OR: n/a + * DISPATCH_SOURCE_TYPE_DATA_REPLACE: n/a * DISPATCH_SOURCE_TYPE_MACH_SEND: dispatch_source_mach_send_flags_t * DISPATCH_SOURCE_TYPE_MACH_RECV: n/a * DISPATCH_SOURCE_TYPE_MEMORYPRESSURE dispatch_source_memorypressure_flags_t @@ -615,6 +617,7 @@ dispatch_source_get_mask(dispatch_source_t source); * * DISPATCH_SOURCE_TYPE_DATA_ADD: application defined data * DISPATCH_SOURCE_TYPE_DATA_OR: application defined data + * DISPATCH_SOURCE_TYPE_DATA_REPLACE: application defined data * DISPATCH_SOURCE_TYPE_MACH_SEND: dispatch_source_mach_send_flags_t * DISPATCH_SOURCE_TYPE_MACH_RECV: n/a * DISPATCH_SOURCE_TYPE_MEMORYPRESSURE dispatch_source_memorypressure_flags_t @@ -637,9 +640,9 @@ dispatch_source_get_data(dispatch_source_t source); * @function dispatch_source_merge_data * * @abstract - * Merges data into a dispatch source of type DISPATCH_SOURCE_TYPE_DATA_ADD or - * DISPATCH_SOURCE_TYPE_DATA_OR and submits its event handler block to its - * target queue. + * Merges data into a dispatch source of type DISPATCH_SOURCE_TYPE_DATA_ADD, + * DISPATCH_SOURCE_TYPE_DATA_OR or DISPATCH_SOURCE_TYPE_DATA_REPLACE, + * and submits its event handler block to its target queue. * * @param source * The result of passing NULL in this parameter is undefined. @@ -684,8 +687,9 @@ dispatch_source_merge_data(dispatch_source_t source, unsigned long value); * * The 'start' argument also determines which clock will be used for the timer: * If 'start' is DISPATCH_TIME_NOW or was created with dispatch_time(3), the - * timer is based on mach_absolute_time(). If 'start' was created with - * dispatch_walltime(3), the timer is based on gettimeofday(3). + * timer is based on up time (which is obtained from mach_absolute_time() on + * Apple platforms). If 'start' was created with dispatch_walltime(3), the + * timer is based on gettimeofday(3). * * Calling this function has no effect if the timer source has already been * canceled. diff --git a/dispatch/time.h b/dispatch/time.h index ce99f2700..02dd27f6e 100644 --- a/dispatch/time.h +++ b/dispatch/time.h @@ -66,6 +66,10 @@ struct timespec; */ typedef uint64_t dispatch_time_t; +enum { + DISPATCH_WALLTIME_NOW DISPATCH_ENUM_API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) = ~1ull, +}; + #define DISPATCH_TIME_NOW (0ull) #define DISPATCH_TIME_FOREVER (~0ull) @@ -73,15 +77,19 @@ typedef uint64_t dispatch_time_t; * @function dispatch_time * * @abstract - * Create dispatch_time_t relative to the default clock or modify an existing - * dispatch_time_t. + * Create a dispatch_time_t relative to the current value of the default or + * wall time clock, or modify an existing dispatch_time_t. * * @discussion - * On Mac OS X the default clock is based on mach_absolute_time(). + * On Apple platforms, the default clock is based on mach_absolute_time(). * * @param when - * An optional dispatch_time_t to add nanoseconds to. If zero is passed, then - * dispatch_time() will use the result of mach_absolute_time(). + * An optional dispatch_time_t to add nanoseconds to. If DISPATCH_TIME_NOW is + * passed, then dispatch_time() will use the default clock (which is based on + * mach_absolute_time() on Apple platforms). If DISPATCH_WALLTIME_NOW is used, + * dispatch_time() will use the value returned by gettimeofday(3). + * dispatch_time(DISPATCH_WALLTIME_NOW, delta) is equivalent to + * dispatch_walltime(NULL, delta). * * @param delta * Nanoseconds to add. @@ -106,6 +114,8 @@ dispatch_time(dispatch_time_t when, int64_t delta); * @param when * A struct timespec to add time to. If NULL is passed, then * dispatch_walltime() will use the result of gettimeofday(3). + * dispatch_walltime(NULL, delta) returns the same value as + * dispatch_time(DISPATCH_WALLTIME_NOW, delta). * * @param delta * Nanoseconds to add. diff --git a/libdispatch.xcodeproj/project.pbxproj b/libdispatch.xcodeproj/project.pbxproj index e7134e709..e6e9be8ee 100644 --- a/libdispatch.xcodeproj/project.pbxproj +++ b/libdispatch.xcodeproj/project.pbxproj @@ -51,12 +51,24 @@ name = libdispatch_tests_legacy; productName = libdispatch_tests; }; + 9BEBA56F20127D3300E6FD0D /* libdispatch_tools_Sim */ = { + isa = PBXAggregateTarget; + buildConfigurationList = 9BEBA57620127D3300E6FD0D /* Build configuration list for PBXAggregateTarget "libdispatch_tools_Sim" */; + buildPhases = ( + ); + dependencies = ( + 9BEBA57820127D4400E6FD0D /* PBXTargetDependency */, + ); + name = libdispatch_tools_Sim; + productName = libdispatch_tools_Sim; + }; C927F35A10FD7F0600C5AB8B /* libdispatch_tools */ = { isa = PBXAggregateTarget; buildConfigurationList = C927F35E10FD7F0B00C5AB8B /* Build configuration list for PBXAggregateTarget "libdispatch_tools" */; buildPhases = ( ); dependencies = ( + 9B2A11A32032494E0060E7D4 /* PBXTargetDependency */, C927F36910FD7F1A00C5AB8B /* PBXTargetDependency */, ); name = libdispatch_tools; @@ -69,9 +81,7 @@ 2BBF5A61154B64D8002B20F9 /* allocator_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 2BBF5A5F154B64D8002B20F9 /* allocator_internal.h */; }; 2BBF5A63154B64F5002B20F9 /* allocator.c in Sources */ = {isa = PBXBuildFile; fileRef = 2BBF5A62154B64F5002B20F9 /* allocator.c */; }; 2BBF5A64154B64F5002B20F9 /* allocator.c in Sources */ = {isa = PBXBuildFile; fileRef = 2BBF5A62154B64F5002B20F9 /* allocator.c */; }; - 2BBF5A65154B64F5002B20F9 /* allocator.c in Sources */ = {isa = PBXBuildFile; fileRef = 2BBF5A62154B64F5002B20F9 /* allocator.c */; }; 2BBF5A66154B64F5002B20F9 /* allocator.c in Sources */ = {isa = PBXBuildFile; fileRef = 2BBF5A62154B64F5002B20F9 /* allocator.c */; }; - 2BBF5A67154B64F5002B20F9 /* allocator.c in Sources */ = {isa = PBXBuildFile; fileRef = 2BBF5A62154B64F5002B20F9 /* allocator.c */; }; 2BE17C6418EA305E002CA4E8 /* layout_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 2BE17C6318EA305E002CA4E8 /* layout_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; 2BE17C6518EA305E002CA4E8 /* layout_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 2BE17C6318EA305E002CA4E8 /* layout_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; 5A0095A210F274B0000E2A31 /* io_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 5A0095A110F274B0000E2A31 /* io_internal.h */; }; @@ -84,34 +94,34 @@ 6E040C751C499CE600411A2E /* firehose_buffer_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EDF10831BBB487E007F14BF /* firehose_buffer_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; 6E21F2E81BBB23FA0000C6A5 /* firehose_server_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E21F2E41BBB23F00000C6A5 /* firehose_server_internal.h */; }; 6E21F2E91BBB240E0000C6A5 /* firehose_server.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E21F2E51BBB23F00000C6A5 /* firehose_server.c */; }; + 6E29394D1FB9527F00FDAC90 /* libdispatch.plist in Copy Ariadne Plist */ = {isa = PBXBuildFile; fileRef = 6E29394C1FB9526E00FDAC90 /* libdispatch.plist */; }; 6E393F981BD60F8D005A551E /* firehose_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF0B26A1BA8C4AE007FA4F6 /* firehose_internal.h */; }; 6E4BACBD1D48A41500B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; 6E4BACC21D48A42000B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; 6E4BACC31D48A42100B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; - 6E4BACC41D48A42200B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; 6E4BACC51D48A42200B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; - 6E4BACC61D48A42300B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; 6E4BACC71D48A42300B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; 6E4BACC81D48A42400B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; 6E4BACCA1D48A89500B562AE /* mach_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E4BACC91D48A89500B562AE /* mach_internal.h */; }; 6E4BACF51D49A04600B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; 6E4BACF61D49A04700B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; 6E4BACF71D49A04700B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; - 6E4BACF81D49A04800B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; 6E4BACF91D49A04800B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; - 6E4BACFA1D49A04900B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; 6E4BACFB1D49A04A00B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; 6E4BACFC1D49A04A00B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; + 6E5662E11F8C2E3E00BC2474 /* workqueue_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E5662DC1F8C2E3E00BC2474 /* workqueue_internal.h */; }; + 6E5662E21F8C2E4F00BC2474 /* workqueue_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E5662DC1F8C2E3E00BC2474 /* workqueue_internal.h */; }; + 6E5662E31F8C2E5100BC2474 /* workqueue_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E5662DC1F8C2E3E00BC2474 /* workqueue_internal.h */; }; 6E5ACCBA1D3C4D0B007DA2B4 /* event_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E5ACCB91D3C4D0B007DA2B4 /* event_internal.h */; }; 6E5ACCBB1D3C4D0E007DA2B4 /* event_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E5ACCB91D3C4D0B007DA2B4 /* event_internal.h */; }; 6E5ACCBC1D3C4D0F007DA2B4 /* event_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E5ACCB91D3C4D0B007DA2B4 /* event_internal.h */; }; + 6E7018211F4EB51B0077C1DC /* workloop_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E70181C1F4EB51B0077C1DC /* workloop_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 6E7018221F4EB5220077C1DC /* workloop_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E70181C1F4EB51B0077C1DC /* workloop_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; 6E90269C1BB9BD50004DC3AD /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; settings = {ATTRIBUTES = (Server, ); }; }; 6E9955581C3AF7710071D40C /* venture_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9955571C3AF7710071D40C /* venture_private.h */; }; 6E99558A1C3AF7900071D40C /* venture_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9955571C3AF7710071D40C /* venture_private.h */; }; 6E9955CF1C3B218E0071D40C /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; }; - 6E9956011C3B21980071D40C /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; }; 6E9956021C3B21990071D40C /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; }; - 6E9956031C3B219A0071D40C /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; }; 6E9956041C3B219B0071D40C /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; }; 6E9956051C3B219B0071D40C /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; }; 6E9956071C3B21AA0071D40C /* venture_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9956061C3B21AA0071D40C /* venture_internal.h */; }; @@ -125,29 +135,20 @@ 6EA962971D48622600759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; 6EA962981D48622700759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; 6EA962991D48622800759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; - 6EA9629A1D48622900759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; 6EA9629B1D48622900759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; - 6EA9629C1D48622A00759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; 6EA9629D1D48622B00759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; 6EA9629E1D48622C00759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; 6EA9629F1D48625000759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; 6EA962A01D48625100759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; 6EA962A11D48625100759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; - 6EA962A21D48625200759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; 6EA962A31D48625300759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; - 6EA962A41D48625300759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; 6EA962A51D48625400759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; 6EA962A61D48625500759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; 6EB60D2C1BBB197B0092FA94 /* firehose_inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EB60D291BBB19640092FA94 /* firehose_inline_internal.h */; }; 6EBEC7E51BBDD30C009B1596 /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; }; - 6EBEC7E61BBDD30D009B1596 /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; }; - 6EBEC7E71BBDD30F009B1596 /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; }; 6EBEC7E81BBDD324009B1596 /* firehose_reply.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72406A031AF95DF800DF4E2B /* firehose_reply.defs */; settings = {ATTRIBUTES = (Server, ); }; }; - 6EBEC7E91BBDD325009B1596 /* firehose_reply.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72406A031AF95DF800DF4E2B /* firehose_reply.defs */; settings = {ATTRIBUTES = (Server, ); }; }; - 6EBEC7EA1BBDD326009B1596 /* firehose_reply.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72406A031AF95DF800DF4E2B /* firehose_reply.defs */; settings = {ATTRIBUTES = (Server, ); }; }; 6ED64B401BBD898300C35F4D /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; 6ED64B411BBD898400C35F4D /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; - 6ED64B421BBD898500C35F4D /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; 6ED64B431BBD898600C35F4D /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; 6ED64B441BBD898700C35F4D /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; 6ED64B461BBD89AF00C35F4D /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; }; @@ -165,7 +166,6 @@ 6ED64B581BBD8A3E00C35F4D /* firehose_inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EB60D291BBB19640092FA94 /* firehose_inline_internal.h */; }; 6ED64B591BBD8A3F00C35F4D /* firehose_inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EB60D291BBB19640092FA94 /* firehose_inline_internal.h */; }; 6EDF10B81BBB488A007F14BF /* firehose_buffer_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EDF10831BBB487E007F14BF /* firehose_buffer_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; - 6EE664271BE2FD5C00ED7B1C /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; 6EF0B26D1BA8C527007FA4F6 /* firehose_server_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 72EA3FBA1AF41EA400BBA227 /* firehose_server_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; 6EF0B2711BA8C540007FA4F6 /* firehose_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF0B26A1BA8C4AE007FA4F6 /* firehose_internal.h */; }; 6EF0B2781BA8C56E007FA4F6 /* firehose_reply.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72406A031AF95DF800DF4E2B /* firehose_reply.defs */; settings = {ATTRIBUTES = (Client, ); }; }; @@ -174,9 +174,7 @@ 6EF2CAAC1C8899D5001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; 6EF2CAAD1C8899E9001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; 6EF2CAAE1C8899EA001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; - 6EF2CAAF1C8899EB001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; 6EF2CAB01C8899EB001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; - 6EF2CAB11C8899EC001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; 6EF2CAB21C8899EC001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; 6EF2CAB31C8899ED001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; 6EF2CAB41C889D65001ABE83 /* lock.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF2CAA41C88998A001ABE83 /* lock.h */; }; @@ -200,6 +198,8 @@ 96BC39BD0F3EBAB100C59689 /* queue_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 96BC39BC0F3EBAB100C59689 /* queue_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; 96C9553B0F3EAEDD000D2CA4 /* once.h in Headers */ = {isa = PBXBuildFile; fileRef = 96C9553A0F3EAEDD000D2CA4 /* once.h */; settings = {ATTRIBUTES = (Public, ); }; }; 96DF70BE0F38FE3C0074BD99 /* once.c in Sources */ = {isa = PBXBuildFile; fileRef = 96DF70BD0F38FE3C0074BD99 /* once.c */; }; + B683588F1FA77F5A00AA0D58 /* time_private.h in Headers */ = {isa = PBXBuildFile; fileRef = B683588A1FA77F4900AA0D58 /* time_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + B68358901FA77F5B00AA0D58 /* time_private.h in Headers */ = {isa = PBXBuildFile; fileRef = B683588A1FA77F4900AA0D58 /* time_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; C00B0DF21C5AEBBE000330B3 /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; }; C00B0DF31C5AEBBE000330B3 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; C00B0DF41C5AEBBE000330B3 /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; }; @@ -244,17 +244,14 @@ C90144661C73A9F6002638FC /* module.modulemap in Headers */ = {isa = PBXBuildFile; fileRef = C90144641C73A845002638FC /* module.modulemap */; settings = {ATTRIBUTES = (Private, ); }; }; C913AC0F143BD34800B78976 /* data_private.h in Headers */ = {isa = PBXBuildFile; fileRef = C913AC0E143BD34800B78976 /* data_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; C93D6165143E190E00EB9023 /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; }; - C93D6166143E190F00EB9023 /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; }; C93D6167143E190F00EB9023 /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; }; C9C5F80E143C1771006DC718 /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; }; E4128ED613BA9A1700ABB2CB /* hw_config.h in Headers */ = {isa = PBXBuildFile; fileRef = E4128ED513BA9A1700ABB2CB /* hw_config.h */; }; E4128ED713BA9A1700ABB2CB /* hw_config.h in Headers */ = {isa = PBXBuildFile; fileRef = E4128ED513BA9A1700ABB2CB /* hw_config.h */; }; - E417A38412A472C4004D659D /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; }; E417A38512A472C5004D659D /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; }; E420867016027AE500EEE210 /* data.m in Sources */ = {isa = PBXBuildFile; fileRef = E420866F16027AE500EEE210 /* data.m */; }; E420867116027AE500EEE210 /* data.m in Sources */ = {isa = PBXBuildFile; fileRef = E420866F16027AE500EEE210 /* data.m */; }; E420867216027AE500EEE210 /* data.m in Sources */ = {isa = PBXBuildFile; fileRef = E420866F16027AE500EEE210 /* data.m */; }; - E420867316027AE500EEE210 /* data.m in Sources */ = {isa = PBXBuildFile; fileRef = E420866F16027AE500EEE210 /* data.m */; }; E421E5F91716ADA10090DC9B /* introspection.h in Headers */ = {isa = PBXBuildFile; fileRef = E421E5F81716ADA10090DC9B /* introspection.h */; settings = {ATTRIBUTES = (Public, ); }; }; E422A0D512A557B5005E5BDB /* trace.h in Headers */ = {isa = PBXBuildFile; fileRef = E422A0D412A557B5005E5BDB /* trace.h */; }; E422A0D612A557B5005E5BDB /* trace.h in Headers */ = {isa = PBXBuildFile; fileRef = E422A0D412A557B5005E5BDB /* trace.h */; }; @@ -264,26 +261,20 @@ E43A72501AF85BBC00BAA921 /* block.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E43A724F1AF85BBC00BAA921 /* block.cpp */; }; E43A72841AF85BCB00BAA921 /* block.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E43A724F1AF85BBC00BAA921 /* block.cpp */; }; E43A72851AF85BCC00BAA921 /* block.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E43A724F1AF85BBC00BAA921 /* block.cpp */; }; - E43A72861AF85BCC00BAA921 /* block.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E43A724F1AF85BBC00BAA921 /* block.cpp */; }; E43A72871AF85BCD00BAA921 /* block.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E43A724F1AF85BBC00BAA921 /* block.cpp */; }; - E43A72881AF85BE900BAA921 /* block.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E43A724F1AF85BBC00BAA921 /* block.cpp */; }; E44757DA17F4572600B82CA1 /* inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44757D917F4572600B82CA1 /* inline_internal.h */; }; E44757DB17F4573500B82CA1 /* inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44757D917F4572600B82CA1 /* inline_internal.h */; }; E44757DC17F4573600B82CA1 /* inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44757D917F4572600B82CA1 /* inline_internal.h */; }; E44A8E6B1805C3E0009FFDB6 /* voucher.c in Sources */ = {isa = PBXBuildFile; fileRef = E44A8E6A1805C3E0009FFDB6 /* voucher.c */; }; E44A8E6C1805C3E0009FFDB6 /* voucher.c in Sources */ = {isa = PBXBuildFile; fileRef = E44A8E6A1805C3E0009FFDB6 /* voucher.c */; }; E44A8E6D1805C3E0009FFDB6 /* voucher.c in Sources */ = {isa = PBXBuildFile; fileRef = E44A8E6A1805C3E0009FFDB6 /* voucher.c */; }; - E44A8E6E1805C3E0009FFDB6 /* voucher.c in Sources */ = {isa = PBXBuildFile; fileRef = E44A8E6A1805C3E0009FFDB6 /* voucher.c */; }; E44A8E6F1805C3E0009FFDB6 /* voucher.c in Sources */ = {isa = PBXBuildFile; fileRef = E44A8E6A1805C3E0009FFDB6 /* voucher.c */; }; - E44A8E701805C3E0009FFDB6 /* voucher.c in Sources */ = {isa = PBXBuildFile; fileRef = E44A8E6A1805C3E0009FFDB6 /* voucher.c */; }; E44A8E721805C473009FFDB6 /* voucher_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E44A8E711805C473009FFDB6 /* voucher_private.h */; }; E44A8E731805C473009FFDB6 /* voucher_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E44A8E711805C473009FFDB6 /* voucher_private.h */; }; E44A8E7518066276009FFDB6 /* voucher_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44A8E7418066276009FFDB6 /* voucher_internal.h */; }; E44A8E7618066276009FFDB6 /* voucher_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44A8E7418066276009FFDB6 /* voucher_internal.h */; }; E44A8E7718066276009FFDB6 /* voucher_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44A8E7418066276009FFDB6 /* voucher_internal.h */; }; E44EBE3E1251659900645D88 /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; }; - E44EBE5412517EBE00645D88 /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; }; - E44EBE5512517EBE00645D88 /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; }; E44EBE5612517EBE00645D88 /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; }; E44EBE5712517EBE00645D88 /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; }; E44F9DAB16543F94001DCD38 /* introspection_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44F9DA816543F79001DCD38 /* introspection_internal.h */; }; @@ -312,20 +303,6 @@ E4630251176162D200E11F4C /* atomic_sfb.h in Headers */ = {isa = PBXBuildFile; fileRef = E463024F1761603C00E11F4C /* atomic_sfb.h */; }; E4630252176162D300E11F4C /* atomic_sfb.h in Headers */ = {isa = PBXBuildFile; fileRef = E463024F1761603C00E11F4C /* atomic_sfb.h */; }; E4630253176162D400E11F4C /* atomic_sfb.h in Headers */ = {isa = PBXBuildFile; fileRef = E463024F1761603C00E11F4C /* atomic_sfb.h */; }; - E46DBC4014EE10C80001F9F6 /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; }; - E46DBC4114EE10C80001F9F6 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; - E46DBC4214EE10C80001F9F6 /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; }; - E46DBC4314EE10C80001F9F6 /* queue.c in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED8A0E8361E600161930 /* queue.c */; }; - E46DBC4414EE10C80001F9F6 /* semaphore.c in Sources */ = {isa = PBXBuildFile; fileRef = 721F5CCE0F15553500FF03A6 /* semaphore.c */; }; - E46DBC4514EE10C80001F9F6 /* once.c in Sources */ = {isa = PBXBuildFile; fileRef = 96DF70BD0F38FE3C0074BD99 /* once.c */; }; - E46DBC4614EE10C80001F9F6 /* apply.c in Sources */ = {isa = PBXBuildFile; fileRef = 9676A0E00F3E755D00713ADB /* apply.c */; }; - E46DBC4714EE10C80001F9F6 /* object.c in Sources */ = {isa = PBXBuildFile; fileRef = 9661E56A0F3E7DDF00749F3E /* object.c */; }; - E46DBC4814EE10C80001F9F6 /* benchmark.c in Sources */ = {isa = PBXBuildFile; fileRef = 965CD6340F3E806200D4E28D /* benchmark.c */; }; - E46DBC4914EE10C80001F9F6 /* source.c in Sources */ = {isa = PBXBuildFile; fileRef = 96A8AA860F41E7A400CD570B /* source.c */; }; - E46DBC4A14EE10C80001F9F6 /* time.c in Sources */ = {isa = PBXBuildFile; fileRef = 96032E4A0F5CC8C700241C5F /* time.c */; }; - E46DBC4B14EE10C80001F9F6 /* data.c in Sources */ = {isa = PBXBuildFile; fileRef = 5AAB45BF10D30B79004407EA /* data.c */; }; - E46DBC4C14EE10C80001F9F6 /* io.c in Sources */ = {isa = PBXBuildFile; fileRef = 5A27262510F26F1900751FBC /* io.c */; }; - E46DBC4D14EE10C80001F9F6 /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; }; E48AF55A16E70FD9004105FF /* io_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E48AF55916E70FD9004105FF /* io_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; E48AF55B16E72D44004105FF /* io_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E48AF55916E70FD9004105FF /* io_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; E48EC97C1835BADD00EAC4F1 /* yield.h in Headers */ = {isa = PBXBuildFile; fileRef = E48EC97B1835BADD00EAC4F1 /* yield.h */; }; @@ -363,7 +340,6 @@ E49BB7091E70A39700868613 /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; }; E49BB70A1E70A3B000868613 /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; }; E49F2423125D3C960057C971 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; - E49F2424125D3C970057C971 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; E49F2499125D48D80057C971 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; E49F24AB125D57FA0057C971 /* dispatch.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED960E8361E600161930 /* dispatch.h */; settings = {ATTRIBUTES = (Public, ); }; }; E49F24AC125D57FA0057C971 /* base.h in Headers */ = {isa = PBXBuildFile; fileRef = 72CC942F0ECCD8750031B751 /* base.h */; settings = {ATTRIBUTES = (Public, ); }; }; @@ -435,16 +411,6 @@ E4D76A9418E325D200B1F98B /* block.h in Headers */ = {isa = PBXBuildFile; fileRef = E4D76A9218E325D200B1F98B /* block.h */; settings = {ATTRIBUTES = (Public, ); }; }; E4EB4A2714C35ECE00AA0FA9 /* object.h in Headers */ = {isa = PBXBuildFile; fileRef = E4EB4A2614C35ECE00AA0FA9 /* object.h */; }; E4EB4A2814C35ECE00AA0FA9 /* object.h in Headers */ = {isa = PBXBuildFile; fileRef = E4EB4A2614C35ECE00AA0FA9 /* object.h */; }; - E4EC11AE12514302000DDBD1 /* queue.c in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED8A0E8361E600161930 /* queue.c */; }; - E4EC11AF12514302000DDBD1 /* semaphore.c in Sources */ = {isa = PBXBuildFile; fileRef = 721F5CCE0F15553500FF03A6 /* semaphore.c */; }; - E4EC11B012514302000DDBD1 /* once.c in Sources */ = {isa = PBXBuildFile; fileRef = 96DF70BD0F38FE3C0074BD99 /* once.c */; }; - E4EC11B112514302000DDBD1 /* apply.c in Sources */ = {isa = PBXBuildFile; fileRef = 9676A0E00F3E755D00713ADB /* apply.c */; }; - E4EC11B212514302000DDBD1 /* object.c in Sources */ = {isa = PBXBuildFile; fileRef = 9661E56A0F3E7DDF00749F3E /* object.c */; }; - E4EC11B312514302000DDBD1 /* benchmark.c in Sources */ = {isa = PBXBuildFile; fileRef = 965CD6340F3E806200D4E28D /* benchmark.c */; }; - E4EC11B412514302000DDBD1 /* source.c in Sources */ = {isa = PBXBuildFile; fileRef = 96A8AA860F41E7A400CD570B /* source.c */; }; - E4EC11B512514302000DDBD1 /* time.c in Sources */ = {isa = PBXBuildFile; fileRef = 96032E4A0F5CC8C700241C5F /* time.c */; }; - E4EC11B712514302000DDBD1 /* data.c in Sources */ = {isa = PBXBuildFile; fileRef = 5AAB45BF10D30B79004407EA /* data.c */; }; - E4EC11B812514302000DDBD1 /* io.c in Sources */ = {isa = PBXBuildFile; fileRef = 5A27262510F26F1900751FBC /* io.c */; }; E4EC121A12514715000DDBD1 /* queue.c in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED8A0E8361E600161930 /* queue.c */; }; E4EC121B12514715000DDBD1 /* semaphore.c in Sources */ = {isa = PBXBuildFile; fileRef = 721F5CCE0F15553500FF03A6 /* semaphore.c */; }; E4EC121C12514715000DDBD1 /* once.c in Sources */ = {isa = PBXBuildFile; fileRef = 96DF70BD0F38FE3C0074BD99 /* once.c */; }; @@ -459,8 +425,8 @@ E4ECBAA615253D17002C313C /* mach_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E4ECBAA415253C25002C313C /* mach_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; E4FC3264145F46C9002FBDDB /* object.m in Sources */ = {isa = PBXBuildFile; fileRef = E4FC3263145F46C9002FBDDB /* object.m */; }; E4FC3265145F46C9002FBDDB /* object.m in Sources */ = {isa = PBXBuildFile; fileRef = E4FC3263145F46C9002FBDDB /* object.m */; }; - E4FC3266145F46C9002FBDDB /* object.m in Sources */ = {isa = PBXBuildFile; fileRef = E4FC3263145F46C9002FBDDB /* object.m */; }; E4FC3267145F46C9002FBDDB /* object.m in Sources */ = {isa = PBXBuildFile; fileRef = E4FC3263145F46C9002FBDDB /* object.m */; }; + F7DC045B2060BBBE00C90737 /* target.h in Headers */ = {isa = PBXBuildFile; fileRef = F7DC045A2060BBBE00C90737 /* target.h */; }; FC0B34790FA2851C0080FFA0 /* source_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = FC0B34780FA2851C0080FFA0 /* source_internal.h */; }; FC1832A6109923C7003403D5 /* perfmon.h in Headers */ = {isa = PBXBuildFile; fileRef = FC1832A2109923C7003403D5 /* perfmon.h */; }; FC1832A7109923C7003403D5 /* time.h in Headers */ = {isa = PBXBuildFile; fileRef = FC1832A3109923C7003403D5 /* time.h */; }; @@ -534,6 +500,27 @@ remoteGlobalIDString = 92F3FECA1BEC69E500025962; remoteInfo = darwintests; }; + 9B2A11A22032494E0060E7D4 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 9B6A42E01FE098430000D146 /* queue-tip.xcodeproj */; + proxyType = 1; + remoteGlobalIDString = 9BECABC71E944C0400ED341E; + remoteInfo = "queue-tip"; + }; + 9B2A11A92032494E0060E7D4 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 9B6A42E01FE098430000D146 /* queue-tip.xcodeproj */; + proxyType = 2; + remoteGlobalIDString = 9BECABC81E944C0400ED341E; + remoteInfo = "queue-tip"; + }; + 9BEBA57720127D4400E6FD0D /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = C927F35F10FD7F1000C5AB8B /* ddt.xcodeproj */; + proxyType = 1; + remoteGlobalIDString = FCFA5A9F10D1AE050074F59A; + remoteInfo = ddt; + }; C00B0E131C5AEED6000330B3 /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; @@ -569,20 +556,6 @@ remoteGlobalIDString = D2AAC045055464E500DB518D; remoteInfo = libdispatch; }; - E437F0D514F7441F00F0B997 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; - proxyType = 1; - remoteGlobalIDString = E46DBC1A14EE10C80001F9F6; - remoteInfo = libdispatch_static; - }; - E47D6ECA125FEB9D0070D91C /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; - proxyType = 1; - remoteGlobalIDString = E4EC118F12514302000DDBD1; - remoteInfo = "libdispatch up resolved"; - }; E47D6ECC125FEBA10070D91C /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; @@ -595,7 +568,7 @@ containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; proxyType = 1; remoteGlobalIDString = E49BB6CE1E70748100868613; - remoteInfo = "libdispatch alt resolved"; + remoteInfo = "libdispatch armv81 resolved"; }; E4B515DA164B317700E003AF /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; @@ -607,6 +580,17 @@ /* End PBXContainerItemProxy section */ /* Begin PBXCopyFilesBuildPhase section */ + 6E2939471FB9522D00FDAC90 /* Copy Ariadne Plist */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 8; + dstPath = /AppleInternal/Library/Ariadne/Plists; + dstSubfolderSpec = 0; + files = ( + 6E29394D1FB9527F00FDAC90 /* libdispatch.plist in Copy Ariadne Plist */, + ); + name = "Copy Ariadne Plist"; + runOnlyForDeploymentPostprocessing = 1; + }; 6EA283D61CAB933E0041B2E0 /* Copy Trace Definitions */ = { isa = PBXCopyFilesBuildPhase; buildActionMask = 8; @@ -636,6 +620,8 @@ 6E1612691C79606E006FC9A9 /* dispatch_queue_label.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_queue_label.c; sourceTree = ""; }; 6E21F2E41BBB23F00000C6A5 /* firehose_server_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = firehose_server_internal.h; sourceTree = ""; }; 6E21F2E51BBB23F00000C6A5 /* firehose_server.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = firehose_server.c; sourceTree = ""; }; + 6E2464E21F5E67E20031ADD9 /* check-order.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "check-order.sh"; sourceTree = ""; }; + 6E29394C1FB9526E00FDAC90 /* libdispatch.plist */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.xml; path = libdispatch.plist; sourceTree = ""; }; 6E326A8F1C2245C4002A6505 /* dispatch_transform.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_transform.c; sourceTree = ""; }; 6E326AB11C224830002A6505 /* dispatch_cascade.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_cascade.c; sourceTree = ""; }; 6E326AB31C224870002A6505 /* dispatch_qos.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_qos.c; sourceTree = ""; }; @@ -658,13 +644,16 @@ 6E326B161C239431002A6505 /* dispatch_timer_short.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_timer_short.c; sourceTree = ""; }; 6E326B171C239431002A6505 /* dispatch_timer_timeout.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_timer_timeout.c; sourceTree = ""; }; 6E326B441C239B61002A6505 /* dispatch_priority.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_priority.c; sourceTree = ""; }; + 6E49BF2420E34B43002624FC /* libdispatch.clean */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.clean; sourceTree = ""; }; + 6E49BF2920E34B44002624FC /* libdispatch.dirty */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.dirty; sourceTree = ""; }; 6E4BACBC1D48A41500B562AE /* mach.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = mach.c; sourceTree = ""; }; 6E4BACC91D48A89500B562AE /* mach_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mach_internal.h; sourceTree = ""; }; 6E4FC9D11C84123600520351 /* os_venture_basic.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = os_venture_basic.c; sourceTree = ""; }; + 6E5662DC1F8C2E3E00BC2474 /* workqueue_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = workqueue_internal.h; sourceTree = ""; }; + 6E5662E41F8C2E5B00BC2474 /* workqueue.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = workqueue.c; sourceTree = ""; }; 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = event_kevent.c; sourceTree = ""; }; 6E5ACCB91D3C4D0B007DA2B4 /* event_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = event_internal.h; sourceTree = ""; }; 6E5ACCBD1D3C6719007DA2B4 /* event.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = event.c; sourceTree = ""; }; - 6E62B0531C55806200D2C7C0 /* dispatch_trysync.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_trysync.c; sourceTree = ""; }; 6E67D8D31C16C20B00FC98AC /* dispatch_apply.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_apply.c; sourceTree = ""; }; 6E67D8D91C16C94B00FC98AC /* dispatch_cf_main.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_cf_main.c; sourceTree = ""; }; 6E67D90D1C16CCEB00FC98AC /* dispatch_debug.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_debug.c; sourceTree = ""; }; @@ -673,6 +662,7 @@ 6E67D9131C17676D00FC98AC /* dispatch_overcommit.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_overcommit.c; sourceTree = ""; }; 6E67D9151C1768B300FC98AC /* dispatch_pingpong.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_pingpong.c; sourceTree = ""; }; 6E67D9171C17BA7200FC98AC /* nsoperation.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = nsoperation.m; sourceTree = ""; }; + 6E70181C1F4EB51B0077C1DC /* workloop_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = workloop_private.h; sourceTree = ""; }; 6E8E4E6D1C1A35EE0004F5CC /* dispatch_select.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_select.c; sourceTree = ""; }; 6E8E4E6E1C1A35EE0004F5CC /* test_lib.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = test_lib.c; sourceTree = ""; }; 6E8E4E6F1C1A35EE0004F5CC /* test_lib.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = test_lib.h; sourceTree = ""; }; @@ -739,8 +729,11 @@ 96BC39BC0F3EBAB100C59689 /* queue_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = queue_private.h; sourceTree = ""; }; 96C9553A0F3EAEDD000D2CA4 /* once.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = once.h; sourceTree = ""; }; 96DF70BD0F38FE3C0074BD99 /* once.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = once.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; + 9B6A42E01FE098430000D146 /* queue-tip.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = "queue-tip.xcodeproj"; path = "tools/queue-tip/queue-tip.xcodeproj"; sourceTree = ""; }; B63B793F1E8F004F0060C1E1 /* dispatch_no_blocks.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_no_blocks.c; sourceTree = ""; }; B68330BC1EBCF6080003E71C /* dispatch_wl.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_wl.c; sourceTree = ""; }; + B683588A1FA77F4900AA0D58 /* time_private.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = time_private.h; sourceTree = ""; }; + B68358911FA77FFD00AA0D58 /* dispatch_time.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_time.c; sourceTree = ""; }; B69878521F06F8790088F94F /* dispatch_signals.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_signals.c; sourceTree = ""; }; B6AC73FD1EB10973009FB2F2 /* perf_thread_request.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = perf_thread_request.c; sourceTree = ""; }; B6AE9A4A1D7F53B300AC007F /* dispatch_queue_create.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_queue_create.c; sourceTree = ""; }; @@ -782,13 +775,11 @@ E44F9DA816543F79001DCD38 /* introspection_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = introspection_internal.h; sourceTree = ""; }; E454569214746F1B00106147 /* object_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = object_private.h; sourceTree = ""; }; E463024F1761603C00E11F4C /* atomic_sfb.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = atomic_sfb.h; sourceTree = ""; }; - E46DBC5714EE10C80001F9F6 /* libdispatch_up.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch_up.a; sourceTree = BUILT_PRODUCTS_DIR; }; - E46DBC5814EE11BC0001F9F6 /* libdispatch-up-static.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "libdispatch-up-static.xcconfig"; sourceTree = ""; }; E47D6BB5125F0F800070D91C /* resolved.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = resolved.h; sourceTree = ""; }; E482F1CD12DBAB590030614D /* postprocess-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "postprocess-headers.sh"; sourceTree = ""; }; E48AF55916E70FD9004105FF /* io_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = io_private.h; path = private/io_private.h; sourceTree = SOURCE_ROOT; tabWidth = 8; }; E48EC97B1835BADD00EAC4F1 /* yield.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = yield.h; sourceTree = ""; }; - E49BB6F21E70748100868613 /* libdispatch_alt.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch_alt.a; sourceTree = BUILT_PRODUCTS_DIR; }; + E49BB6F21E70748100868613 /* libdispatch_armv81.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch_armv81.a; sourceTree = BUILT_PRODUCTS_DIR; }; E49F24DF125D57FA0057C971 /* libdispatch.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libdispatch.dylib; sourceTree = BUILT_PRODUCTS_DIR; }; E49F251D125D630A0057C971 /* install-manpages.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "install-manpages.sh"; sourceTree = ""; }; E49F251E125D631D0057C971 /* mig-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "mig-headers.sh"; sourceTree = ""; }; @@ -806,10 +797,10 @@ E4D76A9218E325D200B1F98B /* block.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = block.h; sourceTree = ""; }; E4EB4A2614C35ECE00AA0FA9 /* object.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = object.h; sourceTree = ""; }; E4EB4A2A14C36F4E00AA0FA9 /* install-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "install-headers.sh"; sourceTree = ""; }; - E4EC11C312514302000DDBD1 /* libdispatch_up.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch_up.a; sourceTree = BUILT_PRODUCTS_DIR; }; E4EC122D12514715000DDBD1 /* libdispatch_mp.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch_mp.a; sourceTree = BUILT_PRODUCTS_DIR; }; E4ECBAA415253C25002C313C /* mach_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mach_private.h; sourceTree = ""; }; E4FC3263145F46C9002FBDDB /* object.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = object.m; sourceTree = ""; }; + F7DC045A2060BBBE00C90737 /* target.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = target.h; sourceTree = ""; }; FC0B34780FA2851C0080FFA0 /* source_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = source_internal.h; sourceTree = ""; }; FC1832A2109923C7003403D5 /* perfmon.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = perfmon.h; sourceTree = ""; }; FC1832A3109923C7003403D5 /* time.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = time.h; sourceTree = ""; }; @@ -863,6 +854,7 @@ 92F3FEC91BEC687200025962 /* Darwin Tests */, C6A0FF2B0290797F04C91782 /* Documentation */, 1AB674ADFE9D54B511CA2CBB /* Products */, + 9B6A42E01FE098430000D146 /* queue-tip.xcodeproj */, C927F35F10FD7F1000C5AB8B /* ddt.xcodeproj */, 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */, ); @@ -897,6 +889,7 @@ 6E9955CE1C3B218E0071D40C /* venture.c */, E44A8E6A1805C3E0009FFDB6 /* voucher.c */, 6EA283D01CAB93270041B2E0 /* libdispatch.codes */, + 6E29394C1FB9526E00FDAC90 /* libdispatch.plist */, FC7BED950E8361E600161930 /* protocol.defs */, E43570B8126E93380097AB9F /* provider.d */, 6E5ACCAF1D3BF2A0007DA2B4 /* event */, @@ -913,9 +906,7 @@ E4B515D6164B2DA300E003AF /* libdispatch.dylib */, E49F24DF125D57FA0057C971 /* libdispatch.dylib */, E4EC122D12514715000DDBD1 /* libdispatch_mp.a */, - E4EC11C312514302000DDBD1 /* libdispatch_up.a */, - E49BB6F21E70748100868613 /* libdispatch_alt.a */, - E46DBC5714EE10C80001F9F6 /* libdispatch_up.a */, + E49BB6F21E70748100868613 /* libdispatch_armv81.a */, C01866BD1C5973210040FC07 /* libdispatch.a */, C00B0E0A1C5AEBBE000330B3 /* libdispatch_dyld_stub.a */, 6E040C631C499B1B00411A2E /* libfirehose_kernel.a */, @@ -940,6 +931,7 @@ children = ( 6EA793881D458A5800929B1B /* event_config.h */, 6E5ACCB91D3C4D0B007DA2B4 /* event_internal.h */, + 6E5662DC1F8C2E3E00BC2474 /* workqueue_internal.h */, ); path = event; sourceTree = ""; @@ -950,6 +942,7 @@ 6E5ACCBD1D3C6719007DA2B4 /* event.c */, 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */, 6EA7937D1D456D1300929B1B /* event_epoll.c */, + 6E5662E41F8C2E5B00BC2474 /* workqueue.c */, ); path = event; sourceTree = ""; @@ -1036,9 +1029,9 @@ 6E326B151C239431002A6505 /* dispatch_timer_set_time.c */, 6E326B161C239431002A6505 /* dispatch_timer_short.c */, 6E326B171C239431002A6505 /* dispatch_timer_timeout.c */, + B68358911FA77FFD00AA0D58 /* dispatch_time.c */, 6E326AE61C2392E8002A6505 /* dispatch_timer.c */, 6E326A8F1C2245C4002A6505 /* dispatch_transform.c */, - 6E62B0531C55806200D2C7C0 /* dispatch_trysync.c */, 6E8E4EC91C1A670B0004F5CC /* dispatch_vm.c */, 6E326AB71C225FCA002A6505 /* dispatch_vnode.c */, B68330BC1EBCF6080003E71C /* dispatch_wl.c */, @@ -1057,6 +1050,14 @@ path = tests; sourceTree = ""; }; + 9B6A42E11FE098430000D146 /* Products */ = { + isa = PBXGroup; + children = ( + 9B2A11AA2032494E0060E7D4 /* queue-tip */, + ); + name = Products; + sourceTree = ""; + }; C6A0FF2B0290797F04C91782 /* Documentation */ = { isa = PBXGroup; children = ( @@ -1096,7 +1097,6 @@ E43D93F11097917E004F6A62 /* libdispatch.xcconfig */, E40041AA125D705F0022B135 /* libdispatch-resolver.xcconfig */, E40041A9125D70590022B135 /* libdispatch-resolved.xcconfig */, - E46DBC5814EE11BC0001F9F6 /* libdispatch-up-static.xcconfig */, C01866BE1C59735B0040FC07 /* libdispatch-mp-static.xcconfig */, C00B0E121C5AEBF7000330B3 /* libdispatch-dyld-stub.xcconfig */, E4B515D9164B2E9B00E003AF /* libdispatch-introspection.xcconfig */, @@ -1104,6 +1104,8 @@ 6E040C721C499C3600411A2E /* libfirehose_kernel.xcconfig */, E422DA3614D2A7E7003C6EE4 /* libdispatch.aliases */, E448727914C6215D00BB45C2 /* libdispatch.order */, + 6E49BF2420E34B43002624FC /* libdispatch.clean */, + 6E49BF2920E34B44002624FC /* libdispatch.dirty */, E421E5FD1716BEA70090DC9B /* libdispatch.interposable */, ); path = xcodeconfig; @@ -1133,6 +1135,7 @@ E49F259C125D664F0057C971 /* xcodescripts */ = { isa = PBXGroup; children = ( + 6E2464E21F5E67E20031ADD9 /* check-order.sh */, E49F251D125D630A0057C971 /* install-manpages.sh */, E4EB4A2A14C36F4E00AA0FA9 /* install-headers.sh */, E421E5FB1716B8730090DC9B /* install-dtrace.sh */, @@ -1168,6 +1171,7 @@ FC1832A0109923B3003403D5 /* shims */ = { isa = PBXGroup; children = ( + F7DC045A2060BBBE00C90737 /* target.h */, 96929D820F3EA1020041FF5D /* atomic.h */, E463024F1761603C00E11F4C /* atomic_sfb.h */, E4BA743913A8911B0095BDF1 /* getprogname.h */, @@ -1211,8 +1215,10 @@ C913AC0E143BD34800B78976 /* data_private.h */, E48AF55916E70FD9004105FF /* io_private.h */, 96BC39BC0F3EBAB100C59689 /* queue_private.h */, + 6E70181C1F4EB51B0077C1DC /* workloop_private.h */, FCEF047F0F5661960067401F /* source_private.h */, E4ECBAA415253C25002C313C /* mach_private.h */, + B683588A1FA77F4900AA0D58 /* time_private.h */, C90144641C73A845002638FC /* module.modulemap */, 961B99350F3E83980006BC96 /* benchmark.h */, E4B515D7164B2DFB00E003AF /* introspection_private.h */, @@ -1301,8 +1307,10 @@ 96BC39BD0F3EBAB100C59689 /* queue_private.h in Headers */, C90144661C73A9F6002638FC /* module.modulemap in Headers */, FCEF04800F5661960067401F /* source_private.h in Headers */, + F7DC045B2060BBBE00C90737 /* target.h in Headers */, 961B99360F3E83980006BC96 /* benchmark.h in Headers */, FC7BED9E0E8361E600161930 /* internal.h in Headers */, + 6E7018211F4EB51B0077C1DC /* workloop_private.h in Headers */, 965ECC210F3EAB71004DDD89 /* object_internal.h in Headers */, 96929D960F3EA2170041FF5D /* queue_internal.h in Headers */, FC0B34790FA2851C0080FFA0 /* source_internal.h in Headers */, @@ -1329,6 +1337,8 @@ 6ED64B571BBD8A3B00C35F4D /* firehose_inline_internal.h in Headers */, E4128ED613BA9A1700ABB2CB /* hw_config.h in Headers */, E454569314746F1B00106147 /* object_private.h in Headers */, + B683588F1FA77F5A00AA0D58 /* time_private.h in Headers */, + 6E5662E11F8C2E3E00BC2474 /* workqueue_internal.h in Headers */, E4EB4A2714C35ECE00AA0FA9 /* object.h in Headers */, E48AF55A16E70FD9004105FF /* io_private.h in Headers */, E4ECBAA515253C25002C313C /* mach_private.h in Headers */, @@ -1344,6 +1354,7 @@ E49F24AB125D57FA0057C971 /* dispatch.h in Headers */, E49F24AC125D57FA0057C971 /* base.h in Headers */, 6E5ACCBB1D3C4D0E007DA2B4 /* event_internal.h in Headers */, + 6E7018221F4EB5220077C1DC /* workloop_private.h in Headers */, E49F24AD125D57FA0057C971 /* object.h in Headers */, E44757DC17F4573600B82CA1 /* inline_internal.h in Headers */, E49F24AE125D57FA0057C971 /* queue.h in Headers */, @@ -1354,6 +1365,7 @@ E49F24B1125D57FA0057C971 /* group.h in Headers */, E49F24B2125D57FA0057C971 /* once.h in Headers */, E49F24B3125D57FA0057C971 /* io.h in Headers */, + 6E5662E21F8C2E4F00BC2474 /* workqueue_internal.h in Headers */, E44A8E7618066276009FFDB6 /* voucher_internal.h in Headers */, E4630252176162D300E11F4C /* atomic_sfb.h in Headers */, E49F24B4125D57FA0057C971 /* data.h in Headers */, @@ -1381,6 +1393,7 @@ 6ED64B521BBD8A2100C35F4D /* firehose_buffer_internal.h in Headers */, E48EC97D1835BADD00EAC4F1 /* yield.h in Headers */, 2BE17C6518EA305E002CA4E8 /* layout_private.h in Headers */, + B68358901FA77F5B00AA0D58 /* time_private.h in Headers */, E49F24C6125D57FA0057C971 /* config.h in Headers */, E422A0D612A557B5005E5BDB /* trace.h in Headers */, 6E9956091C3B21B40071D40C /* venture_internal.h in Headers */, @@ -1405,6 +1418,7 @@ E4B515D8164B2DFB00E003AF /* introspection_private.h in Headers */, E44F9DAF16544026001DCD38 /* internal.h in Headers */, E421E5F91716ADA10090DC9B /* introspection.h in Headers */, + 6E5662E31F8C2E5100BC2474 /* workqueue_internal.h in Headers */, E44F9DB216544032001DCD38 /* object_internal.h in Headers */, E44F9DB316544037001DCD38 /* queue_internal.h in Headers */, 6ED64B531BBD8A2300C35F4D /* firehose_buffer_internal.h in Headers */, @@ -1532,17 +1546,17 @@ E4EB4A2B14C3720B00AA0FA9 /* Install Headers */, E482F1C512DBAA110030614D /* Postprocess Headers */, 4CED8B9D0EEDF8B600AF99AB /* Install Manpages */, + 6E2464DD1F5E67900031ADD9 /* Validate symbol ordering */, + 6E2939471FB9522D00FDAC90 /* Copy Ariadne Plist */, ); buildRules = ( ); dependencies = ( 6EF0B27E1BA8C5BF007FA4F6 /* PBXTargetDependency */, E47D6ECD125FEBA10070D91C /* PBXTargetDependency */, - E47D6ECB125FEB9D0070D91C /* PBXTargetDependency */, E49BB6F81E7074C100868613 /* PBXTargetDependency */, E4B515DB164B317700E003AF /* PBXTargetDependency */, C01866C21C597AEA0040FC07 /* PBXTargetDependency */, - E437F0D614F7441F00F0B997 /* PBXTargetDependency */, C00B0E141C5AEED6000330B3 /* PBXTargetDependency */, ); name = libdispatch; @@ -1550,24 +1564,9 @@ productReference = D2AAC046055464E500DB518D /* libdispatch.dylib */; productType = "com.apple.product-type.library.dynamic"; }; - E46DBC1A14EE10C80001F9F6 /* libdispatch up static */ = { - isa = PBXNativeTarget; - buildConfigurationList = E46DBC5414EE10C80001F9F6 /* Build configuration list for PBXNativeTarget "libdispatch up static" */; - buildPhases = ( - E46DBC3E14EE10C80001F9F6 /* Sources */, - ); - buildRules = ( - ); - dependencies = ( - ); - name = "libdispatch up static"; - productName = libdispatch; - productReference = E46DBC5714EE10C80001F9F6 /* libdispatch_up.a */; - productType = "com.apple.product-type.library.static"; - }; - E49BB6CE1E70748100868613 /* libdispatch alt resolved */ = { + E49BB6CE1E70748100868613 /* libdispatch armv81 resolved */ = { isa = PBXNativeTarget; - buildConfigurationList = E49BB6EF1E70748100868613 /* Build configuration list for PBXNativeTarget "libdispatch alt resolved" */; + buildConfigurationList = E49BB6EF1E70748100868613 /* Build configuration list for PBXNativeTarget "libdispatch armv81 resolved" */; buildPhases = ( E49BB6CF1E70748100868613 /* Mig Headers */, E49BB6D01E70748100868613 /* Sources */, @@ -1577,9 +1576,9 @@ ); dependencies = ( ); - name = "libdispatch alt resolved"; + name = "libdispatch armv81 resolved"; productName = libdispatch; - productReference = E49BB6F21E70748100868613 /* libdispatch_alt.a */; + productReference = E49BB6F21E70748100868613 /* libdispatch_armv81.a */; productType = "com.apple.product-type.library.static"; }; E49F24A9125D57FA0057C971 /* libdispatch no resolver */ = { @@ -1619,23 +1618,6 @@ productReference = E4B515D6164B2DA300E003AF /* libdispatch.dylib */; productType = "com.apple.product-type.library.dynamic"; }; - E4EC118F12514302000DDBD1 /* libdispatch up resolved */ = { - isa = PBXNativeTarget; - buildConfigurationList = E4EC11BC12514302000DDBD1 /* Build configuration list for PBXNativeTarget "libdispatch up resolved" */; - buildPhases = ( - E4EC12141251461A000DDBD1 /* Mig Headers */, - E4EC11AC12514302000DDBD1 /* Sources */, - E4EC121212514613000DDBD1 /* Symlink normal variant */, - ); - buildRules = ( - ); - dependencies = ( - ); - name = "libdispatch up resolved"; - productName = libdispatch; - productReference = E4EC11C312514302000DDBD1 /* libdispatch_up.a */; - productType = "com.apple.product-type.library.static"; - }; E4EC121612514715000DDBD1 /* libdispatch mp resolved */ = { isa = PBXNativeTarget; buildConfigurationList = E4EC122612514715000DDBD1 /* Build configuration list for PBXNativeTarget "libdispatch mp resolved" */; @@ -1687,6 +1669,10 @@ CreatedOnToolsVersion = 7.1; ProvisioningStyle = Manual; }; + 9BEBA56F20127D3300E6FD0D = { + CreatedOnToolsVersion = 9.3; + ProvisioningStyle = Automatic; + }; C00B0DF01C5AEBBE000330B3 = { ProvisioningStyle = Manual; }; @@ -1699,18 +1685,12 @@ D2AAC045055464E500DB518D = { ProvisioningStyle = Manual; }; - E46DBC1A14EE10C80001F9F6 = { - ProvisioningStyle = Manual; - }; E49F24A9125D57FA0057C971 = { ProvisioningStyle = Manual; }; E4B51595164B2DA300E003AF = { ProvisioningStyle = Manual; }; - E4EC118F12514302000DDBD1 = { - ProvisioningStyle = Manual; - }; E4EC121612514715000DDBD1 = { ProvisioningStyle = Manual; }; @@ -1737,21 +1717,24 @@ ProductGroup = 4552536F19B1384900B88766 /* Products */; ProjectRef = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; }, + { + ProductGroup = 9B6A42E11FE098430000D146 /* Products */; + ProjectRef = 9B6A42E01FE098430000D146 /* queue-tip.xcodeproj */; + }, ); projectRoot = ""; targets = ( D2AAC045055464E500DB518D /* libdispatch */, E49F24A9125D57FA0057C971 /* libdispatch no resolver */, E4EC121612514715000DDBD1 /* libdispatch mp resolved */, - E4EC118F12514302000DDBD1 /* libdispatch up resolved */, - E49BB6CE1E70748100868613 /* libdispatch alt resolved */, + E49BB6CE1E70748100868613 /* libdispatch armv81 resolved */, E4B51595164B2DA300E003AF /* libdispatch introspection */, - E46DBC1A14EE10C80001F9F6 /* libdispatch up static */, C01866A41C5973210040FC07 /* libdispatch mp static */, C00B0DF01C5AEBBE000330B3 /* libdispatch dyld stub */, 3F3C9326128E637B0042B1F7 /* libdispatch_Sim */, 6E2ECAFD1C49C2FF00A30A32 /* libdispatch_kernel */, C927F35A10FD7F0600C5AB8B /* libdispatch_tools */, + 9BEBA56F20127D3300E6FD0D /* libdispatch_tools_Sim */, 4552540A19B1389700B88766 /* libdispatch_tests */, 92CBD7201BED924F006E0892 /* libdispatch_tests_legacy */, 92F3FECA1BEC69E500025962 /* darwintests */, @@ -1790,6 +1773,13 @@ remoteRef = 4552540819B1384900B88766 /* PBXContainerItemProxy */; sourceTree = BUILT_PRODUCTS_DIR; }; + 9B2A11AA2032494E0060E7D4 /* queue-tip */ = { + isa = PBXReferenceProxy; + fileType = "compiled.mach-o.executable"; + path = "queue-tip"; + remoteRef = 9B2A11A92032494E0060E7D4 /* PBXContainerItemProxy */; + sourceTree = BUILT_PRODUCTS_DIR; + }; C927F36710FD7F1000C5AB8B /* ddt */ = { isa = PBXReferenceProxy; fileType = "compiled.mach-o.executable"; @@ -1816,6 +1806,24 @@ shellScript = ". \"${SCRIPT_INPUT_FILE_0}\""; showEnvVarsInLog = 0; }; + 6E2464DD1F5E67900031ADD9 /* Validate symbol ordering */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 8; + files = ( + ); + inputPaths = ( + "$(SRCROOT)/xcodeconfig/libdispatch.order", + "$(SRCROOT)/xcodeconfig/libdispatch.dirty", + "$(SRCROOT)/xcodeconfig/libdispatch.clean", + ); + name = "Validate symbol ordering"; + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 1; + shellPath = "/bin/bash -e"; + shellScript = ". \"${SRCROOT}/xcodescripts/check-order.sh\""; + showEnvVarsInLog = 0; + }; C00B0E061C5AEBBE000330B3 /* Symlink libdispatch.a -> libdispatch_dyld_target.a */ = { isa = PBXShellScriptBuildPhase; buildActionMask = 2147483647; @@ -2009,47 +2017,6 @@ shellScript = ". \"${SCRIPT_INPUT_FILE_0}\""; showEnvVarsInLog = 0; }; - E4EC121212514613000DDBD1 /* Symlink normal variant */ = { - isa = PBXShellScriptBuildPhase; - buildActionMask = 2147483647; - files = ( - ); - inputPaths = ( - ); - name = "Symlink normal variant"; - outputPaths = ( - "$(CONFIGURATION_BUILD_DIR)/$(PRODUCT_NAME)_normal.a", - ); - runOnlyForDeploymentPostprocessing = 0; - shellPath = "/bin/bash -e"; - shellScript = "ln -fs \"${PRODUCT_NAME}.a\" \"${SCRIPT_OUTPUT_FILE_0}\""; - showEnvVarsInLog = 0; - }; - E4EC12141251461A000DDBD1 /* Mig Headers */ = { - isa = PBXShellScriptBuildPhase; - buildActionMask = 2147483647; - files = ( - ); - inputPaths = ( - "$(SRCROOT)/src/protocol.defs", - "$(SRCROOT)/src/firehose/firehose.defs", - "$(SRCROOT)/src/firehose/firehose_reply.defs", - "$(SRCROOT)/xcodescripts/mig-headers.sh", - ); - name = "Mig Headers"; - outputPaths = ( - "$(DERIVED_FILE_DIR)/protocol.h", - "$(DERIVED_FILE_DIR)/protocolServer.h", - "$(DERIVED_FILE_DIR)/firehose.h", - "$(DERIVED_FILE_DIR)/firehoseServer.h", - "$(DERIVED_FILE_DIR)/firehose_reply.h", - "$(DERIVED_FILE_DIR)/firehose_replyServer.h", - ); - runOnlyForDeploymentPostprocessing = 0; - shellPath = "/bin/bash -e"; - shellScript = ". \"${SCRIPT_INPUT_FILE_3}\""; - showEnvVarsInLog = 0; - }; E4EC121712514715000DDBD1 /* Mig Headers */ = { isa = PBXShellScriptBuildPhase; buildActionMask = 2147483647; @@ -2215,39 +2182,6 @@ ); runOnlyForDeploymentPostprocessing = 0; }; - E46DBC3E14EE10C80001F9F6 /* Sources */ = { - isa = PBXSourcesBuildPhase; - buildActionMask = 2147483647; - files = ( - E46DBC4014EE10C80001F9F6 /* protocol.defs in Sources */, - 6EBEC7E71BBDD30F009B1596 /* firehose.defs in Sources */, - 6EBEC7EA1BBDD326009B1596 /* firehose_reply.defs in Sources */, - E46DBC4114EE10C80001F9F6 /* resolver.c in Sources */, - E46DBC4214EE10C80001F9F6 /* init.c in Sources */, - E46DBC4714EE10C80001F9F6 /* object.c in Sources */, - E43A72881AF85BE900BAA921 /* block.cpp in Sources */, - 6EF2CAB11C8899EC001ABE83 /* lock.c in Sources */, - E46DBC4414EE10C80001F9F6 /* semaphore.c in Sources */, - E46DBC4514EE10C80001F9F6 /* once.c in Sources */, - E46DBC4314EE10C80001F9F6 /* queue.c in Sources */, - E46DBC4614EE10C80001F9F6 /* apply.c in Sources */, - E46DBC4914EE10C80001F9F6 /* source.c in Sources */, - 6E4BACC61D48A42300B562AE /* mach.c in Sources */, - 6EA9629C1D48622A00759D53 /* event.c in Sources */, - 6EA962A41D48625300759D53 /* event_kevent.c in Sources */, - 6E4BACFA1D49A04900B562AE /* event_epoll.c in Sources */, - E44A8E701805C3E0009FFDB6 /* voucher.c in Sources */, - 6EE664271BE2FD5C00ED7B1C /* firehose_buffer.c in Sources */, - E46DBC4C14EE10C80001F9F6 /* io.c in Sources */, - E46DBC4B14EE10C80001F9F6 /* data.c in Sources */, - E46DBC4D14EE10C80001F9F6 /* transform.c in Sources */, - E46DBC4A14EE10C80001F9F6 /* time.c in Sources */, - 2BBF5A67154B64F5002B20F9 /* allocator.c in Sources */, - E46DBC4814EE10C80001F9F6 /* benchmark.c in Sources */, - 6E9956011C3B21980071D40C /* venture.c in Sources */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; E49BB6D01E70748100868613 /* Sources */ = { isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; @@ -2357,42 +2291,6 @@ ); runOnlyForDeploymentPostprocessing = 0; }; - E4EC11AC12514302000DDBD1 /* Sources */ = { - isa = PBXSourcesBuildPhase; - buildActionMask = 2147483647; - files = ( - E417A38412A472C4004D659D /* provider.d in Sources */, - E44EBE5412517EBE00645D88 /* protocol.defs in Sources */, - 6EBEC7E61BBDD30D009B1596 /* firehose.defs in Sources */, - 6EBEC7E91BBDD325009B1596 /* firehose_reply.defs in Sources */, - E49F2424125D3C970057C971 /* resolver.c in Sources */, - E44EBE5512517EBE00645D88 /* init.c in Sources */, - E4EC11B212514302000DDBD1 /* object.c in Sources */, - E4FC3266145F46C9002FBDDB /* object.m in Sources */, - E43A72861AF85BCC00BAA921 /* block.cpp in Sources */, - 6EF2CAAF1C8899EB001ABE83 /* lock.c in Sources */, - E4EC11AF12514302000DDBD1 /* semaphore.c in Sources */, - E4EC11B012514302000DDBD1 /* once.c in Sources */, - E4EC11AE12514302000DDBD1 /* queue.c in Sources */, - E4EC11B112514302000DDBD1 /* apply.c in Sources */, - E4EC11B412514302000DDBD1 /* source.c in Sources */, - 6E4BACC41D48A42200B562AE /* mach.c in Sources */, - 6EA9629A1D48622900759D53 /* event.c in Sources */, - 6EA962A21D48625200759D53 /* event_kevent.c in Sources */, - 6E4BACF81D49A04800B562AE /* event_epoll.c in Sources */, - E44A8E6E1805C3E0009FFDB6 /* voucher.c in Sources */, - 6ED64B421BBD898500C35F4D /* firehose_buffer.c in Sources */, - E4EC11B812514302000DDBD1 /* io.c in Sources */, - E4EC11B712514302000DDBD1 /* data.c in Sources */, - E420867316027AE500EEE210 /* data.m in Sources */, - C93D6166143E190F00EB9023 /* transform.c in Sources */, - E4EC11B512514302000DDBD1 /* time.c in Sources */, - 2BBF5A65154B64F5002B20F9 /* allocator.c in Sources */, - E4EC11B312514302000DDBD1 /* benchmark.c in Sources */, - 6E9956031C3B219A0071D40C /* venture.c in Sources */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; E4EC121812514715000DDBD1 /* Sources */ = { isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; @@ -2452,6 +2350,16 @@ target = 92F3FECA1BEC69E500025962 /* darwintests */; targetProxy = 92F3FECE1BEC6F1000025962 /* PBXContainerItemProxy */; }; + 9B2A11A32032494E0060E7D4 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + name = "queue-tip"; + targetProxy = 9B2A11A22032494E0060E7D4 /* PBXContainerItemProxy */; + }; + 9BEBA57820127D4400E6FD0D /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + name = ddt; + targetProxy = 9BEBA57720127D4400E6FD0D /* PBXContainerItemProxy */; + }; C00B0E141C5AEED6000330B3 /* PBXTargetDependency */ = { isa = PBXTargetDependency; target = C00B0DF01C5AEBBE000330B3 /* libdispatch dyld stub */; @@ -2472,16 +2380,6 @@ target = D2AAC045055464E500DB518D /* libdispatch */; targetProxy = E4128E4913B94BCE00ABB2CB /* PBXContainerItemProxy */; }; - E437F0D614F7441F00F0B997 /* PBXTargetDependency */ = { - isa = PBXTargetDependency; - target = E46DBC1A14EE10C80001F9F6 /* libdispatch up static */; - targetProxy = E437F0D514F7441F00F0B997 /* PBXContainerItemProxy */; - }; - E47D6ECB125FEB9D0070D91C /* PBXTargetDependency */ = { - isa = PBXTargetDependency; - target = E4EC118F12514302000DDBD1 /* libdispatch up resolved */; - targetProxy = E47D6ECA125FEB9D0070D91C /* PBXContainerItemProxy */; - }; E47D6ECD125FEBA10070D91C /* PBXTargetDependency */ = { isa = PBXTargetDependency; target = E4EC121612514715000DDBD1 /* libdispatch mp resolved */; @@ -2489,7 +2387,7 @@ }; E49BB6F81E7074C100868613 /* PBXTargetDependency */ = { isa = PBXTargetDependency; - target = E49BB6CE1E70748100868613 /* libdispatch alt resolved */; + target = E49BB6CE1E70748100868613 /* libdispatch armv81 resolved */; targetProxy = E49BB6F71E7074C100868613 /* PBXContainerItemProxy */; }; E4B515DB164B317700E003AF /* PBXTargetDependency */ = { @@ -2608,56 +2506,58 @@ }; name = Debug; }; - C00B0E081C5AEBBE000330B3 /* Release */ = { + 9BEBA57020127D3300E6FD0D /* Release */ = { isa = XCBuildConfiguration; - baseConfigurationReference = C00B0E121C5AEBF7000330B3 /* libdispatch-dyld-stub.xcconfig */; buildSettings = { + CODE_SIGN_STYLE = Automatic; + PRODUCT_NAME = "$(TARGET_NAME)"; }; name = Release; }; - C00B0E091C5AEBBE000330B3 /* Debug */ = { + 9BEBA57120127D3300E6FD0D /* Debug */ = { isa = XCBuildConfiguration; - baseConfigurationReference = C00B0E121C5AEBF7000330B3 /* libdispatch-dyld-stub.xcconfig */; buildSettings = { + CODE_SIGN_STYLE = Automatic; + PRODUCT_NAME = "$(TARGET_NAME)"; }; name = Debug; }; - C01866BB1C5973210040FC07 /* Release */ = { + C00B0E081C5AEBBE000330B3 /* Release */ = { isa = XCBuildConfiguration; - baseConfigurationReference = C01866BE1C59735B0040FC07 /* libdispatch-mp-static.xcconfig */; + baseConfigurationReference = C00B0E121C5AEBF7000330B3 /* libdispatch-dyld-stub.xcconfig */; buildSettings = { }; name = Release; }; - C01866BC1C5973210040FC07 /* Debug */ = { + C00B0E091C5AEBBE000330B3 /* Debug */ = { isa = XCBuildConfiguration; - baseConfigurationReference = C01866BE1C59735B0040FC07 /* libdispatch-mp-static.xcconfig */; + baseConfigurationReference = C00B0E121C5AEBF7000330B3 /* libdispatch-dyld-stub.xcconfig */; buildSettings = { }; name = Debug; }; - C927F35B10FD7F0600C5AB8B /* Release */ = { + C01866BB1C5973210040FC07 /* Release */ = { isa = XCBuildConfiguration; + baseConfigurationReference = C01866BE1C59735B0040FC07 /* libdispatch-mp-static.xcconfig */; buildSettings = { }; name = Release; }; - C927F35C10FD7F0600C5AB8B /* Debug */ = { + C01866BC1C5973210040FC07 /* Debug */ = { isa = XCBuildConfiguration; + baseConfigurationReference = C01866BE1C59735B0040FC07 /* libdispatch-mp-static.xcconfig */; buildSettings = { }; name = Debug; }; - E46DBC5514EE10C80001F9F6 /* Release */ = { + C927F35B10FD7F0600C5AB8B /* Release */ = { isa = XCBuildConfiguration; - baseConfigurationReference = E46DBC5814EE11BC0001F9F6 /* libdispatch-up-static.xcconfig */; buildSettings = { }; name = Release; }; - E46DBC5614EE10C80001F9F6 /* Debug */ = { + C927F35C10FD7F0600C5AB8B /* Debug */ = { isa = XCBuildConfiguration; - baseConfigurationReference = E46DBC5814EE11BC0001F9F6 /* libdispatch-up-static.xcconfig */; buildSettings = { }; name = Debug; @@ -2666,7 +2566,7 @@ isa = XCBuildConfiguration; baseConfigurationReference = E40041A9125D70590022B135 /* libdispatch-resolved.xcconfig */; buildSettings = { - DISPATCH_RESOLVED_VARIANT = alt; + DISPATCH_RESOLVED_VARIANT = armv81; }; name = Release; }; @@ -2674,7 +2574,7 @@ isa = XCBuildConfiguration; baseConfigurationReference = E40041A9125D70590022B135 /* libdispatch-resolved.xcconfig */; buildSettings = { - DISPATCH_RESOLVED_VARIANT = alt; + DISPATCH_RESOLVED_VARIANT = armv81; }; name = Debug; }; @@ -2727,22 +2627,6 @@ }; name = Debug; }; - E4EC11BD12514302000DDBD1 /* Release */ = { - isa = XCBuildConfiguration; - baseConfigurationReference = E40041A9125D70590022B135 /* libdispatch-resolved.xcconfig */; - buildSettings = { - DISPATCH_RESOLVED_VARIANT = up; - }; - name = Release; - }; - E4EC11BE12514302000DDBD1 /* Debug */ = { - isa = XCBuildConfiguration; - baseConfigurationReference = E40041A9125D70590022B135 /* libdispatch-resolved.xcconfig */; - buildSettings = { - DISPATCH_RESOLVED_VARIANT = up; - }; - name = Debug; - }; E4EC122712514715000DDBD1 /* Release */ = { isa = XCBuildConfiguration; baseConfigurationReference = E40041A9125D70590022B135 /* libdispatch-resolved.xcconfig */; @@ -2843,6 +2727,15 @@ defaultConfigurationIsVisible = 0; defaultConfigurationName = Release; }; + 9BEBA57620127D3300E6FD0D /* Build configuration list for PBXAggregateTarget "libdispatch_tools_Sim" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 9BEBA57020127D3300E6FD0D /* Release */, + 9BEBA57120127D3300E6FD0D /* Debug */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; C00B0E071C5AEBBE000330B3 /* Build configuration list for PBXNativeTarget "libdispatch dyld stub" */ = { isa = XCConfigurationList; buildConfigurations = ( @@ -2870,16 +2763,7 @@ defaultConfigurationIsVisible = 0; defaultConfigurationName = Release; }; - E46DBC5414EE10C80001F9F6 /* Build configuration list for PBXNativeTarget "libdispatch up static" */ = { - isa = XCConfigurationList; - buildConfigurations = ( - E46DBC5514EE10C80001F9F6 /* Release */, - E46DBC5614EE10C80001F9F6 /* Debug */, - ); - defaultConfigurationIsVisible = 0; - defaultConfigurationName = Release; - }; - E49BB6EF1E70748100868613 /* Build configuration list for PBXNativeTarget "libdispatch alt resolved" */ = { + E49BB6EF1E70748100868613 /* Build configuration list for PBXNativeTarget "libdispatch armv81 resolved" */ = { isa = XCConfigurationList; buildConfigurations = ( E49BB6F01E70748100868613 /* Release */, @@ -2906,15 +2790,6 @@ defaultConfigurationIsVisible = 0; defaultConfigurationName = Release; }; - E4EC11BC12514302000DDBD1 /* Build configuration list for PBXNativeTarget "libdispatch up resolved" */ = { - isa = XCConfigurationList; - buildConfigurations = ( - E4EC11BD12514302000DDBD1 /* Release */, - E4EC11BE12514302000DDBD1 /* Debug */, - ); - defaultConfigurationIsVisible = 0; - defaultConfigurationName = Release; - }; E4EC122612514715000DDBD1 /* Build configuration list for PBXNativeTarget "libdispatch mp resolved" */ = { isa = XCConfigurationList; buildConfigurations = ( diff --git a/man/dispatch_after.3 b/man/dispatch_after.3 index 4c55214da..db34af0e3 100644 --- a/man/dispatch_after.3 +++ b/man/dispatch_after.3 @@ -31,6 +31,12 @@ parameter is a value created by .Fn dispatch_time or .Fn dispatch_walltime . +Submission of the block may be delayed by the system in order to improve power consumption and system performance. +The system applies a leeway (see +.Xr dispatch_source_set_timer 3 ) +that is equal to one tenth of the interval between +.Fa when +and the time at which the function is called, with the leeway capped to at least one millisecond and at most one minute. .Pp For a more detailed description about submitting blocks to queues, see .Xr dispatch_async 3 . diff --git a/man/dispatch_io_read.3 b/man/dispatch_io_read.3 index 51c3b1c3e..26a11e894 100644 --- a/man/dispatch_io_read.3 +++ b/man/dispatch_io_read.3 @@ -20,7 +20,7 @@ .Fo dispatch_io_write .Fa "dispatch_io_t channel" .Fa "off_t offset" -.Fa "dispatch_data_t dispatch" +.Fa "dispatch_data_t data" .Fa "dispatch_queue_t queue" .Fa "void (^handler)(bool done, dispatch_data_t data, int error)" .Fc @@ -132,7 +132,7 @@ flag set, the associated I/O operation is complete and that handler block will not be run again. If an unrecoverable error occurs while performing the I/O operation, the handler block will be submitted with the .Va done -flag set and the appriate POSIX error code in the +flag set and the appropriate POSIX error code in the .Va error parameter. An invocation of a handler block with the .Va done diff --git a/man/dispatch_semaphore_create.3 b/man/dispatch_semaphore_create.3 index da263658a..c0aa45171 100644 --- a/man/dispatch_semaphore_create.3 +++ b/man/dispatch_semaphore_create.3 @@ -36,7 +36,8 @@ parameter is creatable with the .Xr dispatch_time 3 or .Xr dispatch_walltime 3 -functions. +functions. If the timeout is reached without a signal being received, the semaphore +is re-incremented before the function returns. .Pp The .Fn dispatch_semaphore_signal diff --git a/man/dispatch_source_create.3 b/man/dispatch_source_create.3 index b4e9a7ad8..313b6e723 100644 --- a/man/dispatch_source_create.3 +++ b/man/dispatch_source_create.3 @@ -515,8 +515,9 @@ is .Vt DISPATCH_TIME_NOW or was created with .Xr dispatch_time 3 , -the timer is based on -.Fn mach_absolute_time . +the timer is based on up time (which is obtained from +.Fn mach_absolute_time +on Apple platforms). If .Fa start was created with diff --git a/man/dispatch_time.3 b/man/dispatch_time.3 index 685898de0..2536e0e9f 100644 --- a/man/dispatch_time.3 +++ b/man/dispatch_time.3 @@ -9,6 +9,7 @@ .Sh SYNOPSIS .Fd #include .Vt static const dispatch_time_t DISPATCH_TIME_NOW = 0ull ; +.Vt static const dispatch_time_t DISPATCH_WALLTIME_NOW = ~1ull ; .Vt static const dispatch_time_t DISPATCH_TIME_FOREVER = ~0ull ; .Ft dispatch_time_t .Fo dispatch_time @@ -29,7 +30,8 @@ with dispatch functions that need timeouts or operate on a schedule. The .Fa dispatch_time_t type is a semi-opaque integer, with only the special values -.Vt DISPATCH_TIME_NOW +.Vt DISPATCH_TIME_NOW , +.Vt DISPATCH_WALLTIME_NOW and .Vt DISPATCH_TIME_FOREVER being externally defined. All other values are represented using an internal @@ -43,13 +45,16 @@ function returns a milestone relative to an existing milestone after adding nanoseconds. If the .Fa base -parameter maps internally to a wall clock, then the returned value is -relative to the wall clock. +parameter maps internally to a wall clock or is +.Vt DISPATCH_WALLTIME_NOW , +then the returned value is relative to the wall clock. Otherwise, if .Fa base is .Vt DISPATCH_TIME_NOW , -then the current time of the default host clock is used. +then the current time of the default host clock is used. On Apple platforms, +the value of the default host clock is obtained from +.Vt mach_absolute_time() . .Pp The .Fn dispatch_walltime @@ -59,6 +64,9 @@ using the wall clock, as specified by the optional parameter. If .Fa base is NULL, then the current time of the wall clock is used. +.Vt dispatch_walltime(NULL, offset) +is equivalent to +.Vt dispatch_time(DISPATCH_WALLTIME_NOW, offset) . .Sh EDGE CONDITIONS The .Fn dispatch_time @@ -81,11 +89,16 @@ parameter is ignored. Underflow causes the smallest representable value to be returned for a given clock. .Sh EXAMPLES -Create a milestone two seconds in the future: +Create a milestone two seconds in the future, relative to the default clock: .Bd -literal -offset indent milestone = dispatch_time(DISPATCH_TIME_NOW, 2 * NSEC_PER_SEC); .Ed .Pp +Create a milestone two seconds in the future, in wall clock time: +.Bd -literal -offset indent +milestone = dispatch_time(DISPATCH_WALLTIME_NOW, 2 * NSEC_PER_SEC); +.Ed +.Pp Create a milestone for use as an infinite timeout: .Bd -literal -offset indent milestone = DISPATCH_TIME_FOREVER; diff --git a/os/firehose_buffer_private.h b/os/firehose_buffer_private.h index d131d6dc4..a633bf408 100644 --- a/os/firehose_buffer_private.h +++ b/os/firehose_buffer_private.h @@ -31,7 +31,7 @@ #include #endif -#define OS_FIREHOSE_SPI_VERSION 20170222 +#define OS_FIREHOSE_SPI_VERSION 20180226 /*! * @group Firehose SPI @@ -40,7 +40,8 @@ */ #define FIREHOSE_BUFFER_LIBTRACE_HEADER_SIZE 2048ul -#define FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT 16 +#define FIREHOSE_BUFFER_KERNEL_MIN_CHUNK_COUNT 16 +#define FIREHOSE_BUFFER_KERNEL_MAX_CHUNK_COUNT 64 typedef struct firehose_buffer_range_s { uint16_t fbr_offset; // offset from the start of the buffer @@ -56,6 +57,14 @@ extern void __firehose_buffer_push_to_logd(firehose_buffer_t fb, bool for_io); extern void __firehose_critical_region_enter(void); extern void __firehose_critical_region_leave(void); extern void __firehose_allocate(vm_offset_t *addr, vm_size_t size); +extern uint8_t __firehose_buffer_kernel_chunk_count; +extern uint8_t __firehose_num_kernel_io_pages; + +#define FIREHOSE_BUFFER_KERNEL_DEFAULT_CHUNK_COUNT FIREHOSE_BUFFER_KERNEL_MIN_CHUNK_COUNT +#define FIREHOSE_BUFFER_KERNEL_DEFAULT_IO_PAGES 8 + +#define FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT __firehose_buffer_kernel_chunk_count +#define FIREHOSE_BUFFER_CHUNK_PREALLOCATED_COUNT (__firehose_buffer_kernel_chunk_count - 1) // the first chunk is the header // exported for the kernel firehose_tracepoint_t @@ -72,6 +81,9 @@ __firehose_buffer_create(size_t *size); void __firehose_merge_updates(firehose_push_reply_t update); +int +__firehose_kernel_configuration_valid(uint8_t chunk_count, uint8_t io_pages); + #else #define __firehose_critical_region_enter() diff --git a/os/firehose_server_private.h b/os/firehose_server_private.h index fc352da1c..221ecb38e 100644 --- a/os/firehose_server_private.h +++ b/os/firehose_server_private.h @@ -356,6 +356,21 @@ OS_NOTHROW void firehose_server_cancel(void); +/*! + * @function firehose_server_set_logging_prefs + * + * @abstract + * Publishes a new preferences buffer. + * + * @description + * The server will take ownership of this buffer and will + * call munmap() on the previous one that was stored. + */ +OS_NOTHROW +void +firehose_server_set_logging_prefs(void *pointer, size_t length, + os_block_t block); + /*! * @typedef firehose_server_queue_t * diff --git a/os/linux_base.h b/os/linux_base.h index c8b9cad7c..58b497148 100644 --- a/os/linux_base.h +++ b/os/linux_base.h @@ -13,9 +13,12 @@ #ifndef __OS_LINUX_BASE__ #define __OS_LINUX_BASE__ +#if __has_include() +#include +#endif #include -#if HAVE_SYS_CDEFS_H +#if __has_include() #include #endif diff --git a/os/voucher_activity_private.h b/os/voucher_activity_private.h index 8ce0ef583..ed7a53153 100644 --- a/os/voucher_activity_private.h +++ b/os/voucher_activity_private.h @@ -271,15 +271,14 @@ voucher_activity_trace_v(firehose_stream_t stream, firehose_tracepoint_id_t trace_id, uint64_t timestamp, const struct iovec *iov, size_t publen, size_t privlen); +#define VOUCHER_ACTIVITY_TRACE_FLAG_UNRELIABLE 0x01 -API_DEPRECATED_WITH_REPLACEMENT("voucher_activity_trace_v", - macos(10.12,10.12), ios(10.0,10.0), tvos(10.0,10.0), watchos(3.0,3.0)) -OS_VOUCHER_EXPORT OS_NOTHROW OS_NONNULL4 OS_NONNULL6 +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +OS_VOUCHER_EXPORT OS_NOTHROW OS_NONNULL4 firehose_tracepoint_id_t -voucher_activity_trace_with_private_strings(firehose_stream_t stream, +voucher_activity_trace_v_2(firehose_stream_t stream, firehose_tracepoint_id_t trace_id, uint64_t timestamp, - const void *pubdata, size_t publen, - const void *privdata, size_t privlen); + const struct iovec *iov, size_t publen, size_t privlen, uint32_t flags); typedef const struct voucher_activity_hooks_s { #define VOUCHER_ACTIVITY_HOOKS_VERSION 5 @@ -320,9 +319,30 @@ voucher_activity_initialize_4libtrace(voucher_activity_hooks_t hooks); */ API_AVAILABLE(macos(10.10), ios(8.0)) OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW OS_NONNULL_ALL -void* +void * voucher_activity_get_metadata_buffer(size_t *length); +/*! + * @function voucher_activity_get_logging_preferences + * + * @abstract + * Return address and length of vm_map()ed configuration data for the logging + * subsystem. + * + * @discussion + * The data must be deallocated with vm_deallocate(). + * + * @param length + * Pointer to size_t variable, filled with length of preferences buffer. + * + * @result + * Address of preferences buffer, returns NULL on error. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(3.0)) +OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW OS_NONNULL_ALL +void * +voucher_activity_get_logging_preferences(size_t *length); + /*! * @function voucher_get_activity_id_4dyld * diff --git a/os/voucher_private.h b/os/voucher_private.h index aecbbc9ff..164aa3c5b 100644 --- a/os/voucher_private.h +++ b/os/voucher_private.h @@ -199,22 +199,9 @@ voucher_decrement_importance_count4CF(voucher_t _Nullable voucher); * voucher adopted on the calling thread. If the block object is submitted to a * queue, this replaces the default behavior of associating the submitted block * instance with the voucher adopted at the time of submission. - * This flag is ignored if a specific voucher object is assigned with the - * dispatch_block_create_with_voucher* functions, and is equivalent to passing - * the NULL voucher to these functions. - * - * @const DISPATCH_BLOCK_IF_LAST_RESET_QUEUE_QOS_OVERRIDE - * Flag indicating that this dispatch block object should try to reset the - * recorded maximum QoS of all currently enqueued items on a serial dispatch - * queue at the base of a queue hierarchy. - * - * This is only works if the queue becomes empty by dequeuing the block in - * question, and then allows that block to enqueue more work on this hierarchy - * without perpetuating QoS overrides resulting from items previously executed - * on the hierarchy. - * - * A dispatch block object created with this flag set cannot be used with - * dispatch_block_wait() or dispatch_block_cancel(). + * This flag is ignored if used with the dispatch_block_create_with_voucher*() + * functions. + * */ #define DISPATCH_BLOCK_NO_VOUCHER (0x40ul) @@ -238,9 +225,7 @@ voucher_decrement_importance_count4CF(voucher_t _Nullable voucher); * on with dispatch_block_wait() or observed with dispatch_block_notify(). * * The returned dispatch block will be executed with the specified voucher - * adopted for the duration of the block body. If the NULL voucher is passed, - * the block will be executed with the voucher adopted on the calling thread, or - * with no voucher if the DISPATCH_BLOCK_DETACHED flag was also provided. + * adopted for the duration of the block body. * * If the returned dispatch block object is submitted to a dispatch queue, the * submitted block instance will be associated with the QOS class current at the @@ -265,11 +250,11 @@ voucher_decrement_importance_count4CF(voucher_t _Nullable voucher); * @param flags * Configuration flags for the block object. * Passing a value that is not a bitwise OR of flags from dispatch_block_flags_t - * results in NULL being returned. + * results in NULL being returned. The DISPATCH_BLOCK_NO_VOUCHER flag is + * ignored. * * @param voucher - * A voucher object or NULL. Passing NULL is equivalent to specifying the - * DISPATCH_BLOCK_NO_VOUCHER flag. + * A voucher object or NULL. * * @param block * The block to create the dispatch block object from. @@ -305,9 +290,7 @@ dispatch_block_create_with_voucher(dispatch_block_flags_t flags, * on with dispatch_block_wait() or observed with dispatch_block_notify(). * * The returned dispatch block will be executed with the specified voucher - * adopted for the duration of the block body. If the NULL voucher is passed, - * the block will be executed with the voucher adopted on the calling thread, or - * with no voucher if the DISPATCH_BLOCK_DETACHED flag was also provided. + * adopted for the duration of the block body. * * If invoked directly, the returned dispatch block object will be executed with * the assigned QOS class as long as that does not result in a lower QOS class @@ -330,11 +313,11 @@ dispatch_block_create_with_voucher(dispatch_block_flags_t flags, * @param flags * Configuration flags for the block object. * Passing a value that is not a bitwise OR of flags from dispatch_block_flags_t - * results in NULL being returned. + * results in NULL being returned. The DISPATCH_BLOCK_NO_VOUCHER and + * DISPATCH_BLOCK_NO_QOS flags are ignored. * * @param voucher - * A voucher object or NULL. Passing NULL is equivalent to specifying the - * DISPATCH_BLOCK_NO_VOUCHER flag. + * A voucher object or NULL. * * @param qos_class * A QOS class value: @@ -419,6 +402,55 @@ OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW voucher_t _Nullable voucher_create_with_mach_msg(mach_msg_header_t *msg); +/*! + * @function voucher_kvoucher_debug + * + * @abstract + * Writes a human-readable representation of a voucher to a memory buffer. + * + * @discussion + * The formatted representation of the voucher is written starting at a given + * offset in the buffer. If the remaining space in the buffer is too small, the + * output is truncated. Nothing is written before buf[offset] or at or beyond + * buf[bufsize]. + * + * @param task + * The task port for the task that owns the voucher port. + * + * @param voucher + * The voucher port name. + * + * @param buf + * The buffer to which the formatted representation of the voucher should be + * written. + * + * @param bufsiz + * The size of the buffer. + * + * @param offset + * The offset of the first byte in the buffer to be used for output. + * + * @param prefix + * A string to be written at the start of each line of formatted output. + * Typically used to generate leading whitespace for indentation. Use NULL if + * no prefix is required. + * + * @param max_hex_data + * The maximum number of bytes of hex data to be formatted for voucher content + * that is not of type MACH_VOUCHER_ATTR_KEY_ATM, MACH_VOUCHER_ATTR_KEY_BANK + * or MACH_VOUCHER_ATTR_KEY_IMPORTANCE. + * + * @result + * The offset of the first byte in the buffer following the formatted voucher + * representation. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW +size_t +voucher_kvoucher_debug(mach_port_t task, mach_port_name_t voucher, char *buf, + size_t bufsiz, size_t offset, char * _Nullable prefix, + size_t max_hex_data); + /*! * @group Voucher Persona SPI * SPI intended for clients that need to interact with personas. diff --git a/private/introspection_private.h b/private/introspection_private.h index 972c68857..137ea97ab 100644 --- a/private/introspection_private.h +++ b/private/introspection_private.h @@ -134,7 +134,6 @@ typedef struct dispatch_object_s *dispatch_object_t; * @field source_size * Size of dispatch_introspection_source_s structure. */ - API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT const struct dispatch_introspection_versions_s { unsigned long introspection_version; @@ -389,7 +388,8 @@ typedef struct dispatch_introspection_source_s { unsigned long enqueued:1, handler_is_block:1, timer:1, - after:1; + after:1, + is_xpc:1; } dispatch_introspection_source_s; typedef dispatch_introspection_source_s *dispatch_introspection_source_t; @@ -425,12 +425,12 @@ typedef dispatch_introspection_queue_thread_s * Types of items enqueued on a dispatch queue. */ enum dispatch_introspection_queue_item_type { - dispatch_introspection_queue_item_type_none = 0x0, - dispatch_introspection_queue_item_type_block = 0x11, - dispatch_introspection_queue_item_type_function = 0x12, - dispatch_introspection_queue_item_type_object = 0x100, - dispatch_introspection_queue_item_type_queue = 0x101, - dispatch_introspection_queue_item_type_source = 0102, + dispatch_introspection_queue_item_type_none = 0x0, + dispatch_introspection_queue_item_type_block = 0x11, + dispatch_introspection_queue_item_type_function = 0x12, + dispatch_introspection_queue_item_type_object = 0x100, + dispatch_introspection_queue_item_type_queue = 0x101, + dispatch_introspection_queue_item_type_source = 0x42, }; /*! @@ -531,20 +531,93 @@ typedef void (*dispatch_introspection_hook_queue_item_dequeue_t)( typedef void (*dispatch_introspection_hook_queue_item_complete_t)( dispatch_continuation_t object); +/*! + * @enum dispatch_introspection_runtime_event + * + * @abstract + * Types for major events the dispatch runtime goes through as sent by + * the runtime_event hook. + * + * @const dispatch_introspection_runtime_event_worker_event_delivery + * A worker thread was unparked to deliver some kernel events. + * There may be an unpark event if the thread will pick up a queue to drain. + * There always is a worker_park event when the thread is returned to the pool. + * `ptr` is the queue for which events are being delivered, or NULL (for generic + * events). + * `value` is the number of events delivered. + * + * @const dispatch_introspection_runtime_event_worker_unpark + * A worker thread junst unparked (sent from the context of the thread). + * `ptr` is the queue for which the thread unparked. + * `value` is 0. + * + * @const dispatch_introspection_runtime_event_worker_request + * `ptr` is set to the queue on behalf of which the thread request is made. + * `value` is the number of threads requested. + * + * @const dispatch_introspection_runtime_event_worker_park + * A worker thread is about to park (sent from the context of the thread). + * `ptr` and `value` are 0. + * + * @const dispatch_introspection_runtime_event_sync_wait + * A caller of dispatch_sync or dispatch_async_and_wait hit contention. + * `ptr` is the queue that caused the initial contention. + * `value` is 0. + * + * @const dispatch_introspection_runtime_event_async_sync_handoff + * @const dispatch_introspection_runtime_event_sync_sync_handoff + * @const dispatch_introspection_runtime_event_sync_async_handoff + * + * A queue is being handed off from a thread to another due to respectively: + * - async/sync contention + * - sync/sync contention + * - sync/async contention + * + * `ptr` is set to dispatch_queue_t which is handed off to the next thread. + * `value` is 0. + */ +#ifndef __DISPATCH_BUILDING_DISPATCH__ +enum dispatch_introspection_runtime_event { + dispatch_introspection_runtime_event_worker_event_delivery = 1, + dispatch_introspection_runtime_event_worker_unpark = 2, + dispatch_introspection_runtime_event_worker_request = 3, + dispatch_introspection_runtime_event_worker_park = 4, + + dispatch_introspection_runtime_event_sync_wait = 10, + dispatch_introspection_runtime_event_async_sync_handoff = 11, + dispatch_introspection_runtime_event_sync_sync_handoff = 12, + dispatch_introspection_runtime_event_sync_async_handoff = 13, +}; +#endif + +/*! + * @typedef dispatch_introspection_hook_runtime_event_t + * + * @abstract + * A function pointer called for various runtime events. + * + * @discussion + * The actual payloads are discussed in the documentation of the + * dispatch_introspection_runtime_event enum. + */ +typedef void (*dispatch_introspection_hook_runtime_event_t)( + enum dispatch_introspection_runtime_event event, + void *ptr, unsigned long long value); + /*! * @typedef dispatch_introspection_hooks_s * * @abstract * A structure of function pointer hooks into libdispatch. */ - typedef struct dispatch_introspection_hooks_s { dispatch_introspection_hook_queue_create_t queue_create; dispatch_introspection_hook_queue_dispose_t queue_dispose; dispatch_introspection_hook_queue_item_enqueue_t queue_item_enqueue; dispatch_introspection_hook_queue_item_dequeue_t queue_item_dequeue; dispatch_introspection_hook_queue_item_complete_t queue_item_complete; - void *_reserved[5]; + dispatch_introspection_hook_runtime_event_t runtime_event; + void *_reserved[4]; } dispatch_introspection_hooks_s; typedef dispatch_introspection_hooks_s *dispatch_introspection_hooks_t; @@ -715,7 +788,6 @@ dispatch_introspection_queue_item_get_info(dispatch_queue_t queue, * The structure is copied on input and filled with the previously installed * hooks on output. */ - API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT void dispatch_introspection_hooks_install(dispatch_introspection_hooks_t hooks); @@ -740,7 +812,6 @@ dispatch_introspection_hooks_install(dispatch_introspection_hooks_t hooks); * As a convenience, the 'enable' pointer may itself be NULL to indicate that * all hook callouts should be enabled. */ - extern void dispatch_introspection_hook_callouts_enable( dispatch_introspection_hooks_t enable); @@ -751,7 +822,6 @@ dispatch_introspection_hook_callouts_enable( * @abstract * Callout to queue creation hook that a debugger can break on. */ - extern void dispatch_introspection_hook_callout_queue_create( dispatch_introspection_queue_t queue_info); @@ -762,7 +832,6 @@ dispatch_introspection_hook_callout_queue_create( * @abstract * Callout to queue destruction hook that a debugger can break on. */ - extern void dispatch_introspection_hook_callout_queue_dispose( dispatch_introspection_queue_t queue_info); @@ -773,7 +842,6 @@ dispatch_introspection_hook_callout_queue_dispose( * @abstract * Callout to queue enqueue hook that a debugger can break on. */ - extern void dispatch_introspection_hook_callout_queue_item_enqueue( dispatch_queue_t queue, dispatch_introspection_queue_item_t item); @@ -784,7 +852,6 @@ dispatch_introspection_hook_callout_queue_item_enqueue( * @abstract * Callout to queue dequeue hook that a debugger can break on. */ - extern void dispatch_introspection_hook_callout_queue_item_dequeue( dispatch_queue_t queue, dispatch_introspection_queue_item_t item); @@ -795,7 +862,6 @@ dispatch_introspection_hook_callout_queue_item_dequeue( * @abstract * Callout to queue item complete hook that a debugger can break on. */ - extern void dispatch_introspection_hook_callout_queue_item_complete( dispatch_continuation_t object); diff --git a/private/layout_private.h b/private/layout_private.h index 0c0cd942d..d85e94a53 100644 --- a/private/layout_private.h +++ b/private/layout_private.h @@ -54,7 +54,6 @@ DISPATCH_EXPORT const struct dispatch_queue_offsets_s { #endif #if DISPATCH_LAYOUT_SPI - /*! * @group Data Structure Layout SPI * SPI intended for CoreSymbolication only @@ -67,8 +66,36 @@ DISPATCH_EXPORT const struct dispatch_tsd_indexes_s { const uint16_t dti_queue_index; const uint16_t dti_voucher_index; const uint16_t dti_qos_class_index; + /* version 3 */ + const uint16_t dti_continuation_cache_index; } dispatch_tsd_indexes; +#if TARGET_OS_MAC + +#include + +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT const struct dispatch_allocator_layout_s { + const uint16_t dal_version; + /* version 1 */ + /* Pointer to the allocator metadata address, points to NULL if unused */ + void **const dal_allocator_zone; + /* Magical "isa" for allocations that are on freelists */ + void *const *const dal_deferred_free_isa; + /* Size of allocations made in the magazine */ + const uint16_t dal_allocation_size; + /* fields used by the enumerator */ + const uint16_t dal_magazine_size; + const uint16_t dal_first_allocation_offset; + const uint16_t dal_allocation_isa_offset; + /* Enumerates allocated continuations */ + kern_return_t (*dal_enumerator)(task_t remote_task, + const struct dispatch_allocator_layout_s *remote_allocator_layout, + vm_address_t zone_address, memory_reader_t reader, + void (^recorder)(vm_address_t dc_address, void *dc_mem, + size_t size, bool *stop)); +} dispatch_allocator_layout; +#endif // TARGET_OS_MAC #endif // DISPATCH_LAYOUT_SPI __END_DECLS diff --git a/private/mach_private.h b/private/mach_private.h index bc5322332..e56f6d5c7 100644 --- a/private/mach_private.h +++ b/private/mach_private.h @@ -66,6 +66,9 @@ DISPATCH_DECL(dispatch_mach); * * @const DISPATCH_MACH_MESSAGE_RECEIVED * A message was received, it is passed in the message parameter. + * It is the responsibility of the client of this API to handle this and consume + * or dispose of the rights in the message (for example by calling + * mach_msg_destroy()). * * @const DISPATCH_MACH_MESSAGE_SENT * A message was sent, it is passed in the message parameter (so that associated @@ -115,15 +118,15 @@ DISPATCH_DECL(dispatch_mach); * once during the lifetime of the channel. This event is sent only for XPC * channels (i.e. channels that were created by calling * dispatch_mach_create_4libxpc()) and only if the - * dmxh_enable_sigterm_notification function in the XPC hooks structure is not - * set or it returned true when it was called at channel activation time. + * dmxh_enable_sigterm_notification function in the XPC hooks structure returned + * true when it was called at channel activation time. * * @const DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED * The channel has been disconnected by a call to dispatch_mach_reconnect() or * dispatch_mach_cancel(), an empty message is passed in the message parameter * (so that associated port rights can be disposed of). The message header will * contain a local port with the receive right previously allocated to receive - * an asynchronous reply to a message previously sent to the channel. Used + * an asynchronous reply to a message previously sent to the channel. Used * only if the channel is disconnected while waiting for a reply to a message * sent with dispatch_mach_send_with_result_and_async_reply_4libxpc(). */ @@ -432,6 +435,82 @@ DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_mach_cancel(dispatch_mach_t channel); +/*! + * @function dispatch_mach_mig_demux + * + * @abstract + * Handles an incoming DISPATCH_MACH_MESSAGE_RECEIVED event through a series of + * MIG subsystem demultiplexers. + * + * @discussion + * This function can be used with a static array of MIG subsystems to try. + * If it returns true, then the dispatch mach message has been consumed as per + * usual MIG rules. + * + * If it returns false, then the mach message has not been touched, and + * consuming or disposing of the rights in the message is mandatory. + * + * It is hence possible to write a manual demuxer this way: + * + * + * if (!dispatch_mach_mig_demux(context, subsystems, count, message)) { + * mach_msg_header_t hdr = dispatch_mach_msg_get_msg(message, NULL); + * switch (hdr->msgh_id) { + * case ...: // manual consumption of messages + * ... + * break; + * default: + * mach_msg_destroy(hdr); // no one claimed the message, destroy it + * } + * } + * + * + * @param context + * An optional context that the MIG routines can query with + * dispatch_mach_mig_demux_get_context() as MIG doesn't support contexts. + * + * @param subsystems + * An array of mig_subsystem structs for all the demuxers to try. + * These are exposed by MIG in the Server header of the generated interface. + * + * @param count + * The number of entries in the subsystems array. + * + * @param msg + * The dispatch mach message to process. + * + * @returns + * Whether or not the dispatch mach message has been consumed. + * If false is returned, then it is the responsibility of the caller to consume + * or dispose of the received message rights. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NONNULL4 DISPATCH_NOTHROW +bool +dispatch_mach_mig_demux(void *_Nullable context, + const struct mig_subsystem *_Nonnull const subsystems[_Nonnull], + size_t count, dispatch_mach_msg_t msg); + +/*! + * @function dispatch_mach_mig_demux_get_context + * + * @abstract + * Returns the context passed to dispatch_mach_mig_demux() from the context of + * a MIG routine implementation. + * + * @discussion + * Calling this function from another context than a MIG routine called from the + * context of dispatch_mach_mig_demux_get_context() is invalid and will cause + * your process to be terminated. + * + * @returns + * The context passed to the outer call to dispatch_mach_mig_demux(). + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_NOTHROW +void *_Nullable +dispatch_mach_mig_demux_get_context(void); + /*! * @function dispatch_mach_send * Asynchronously send a message encapsulated in a dispatch mach message object @@ -813,6 +892,7 @@ typedef void (*_Nonnull dispatch_mach_async_reply_callback_t)(void *context, API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) typedef const struct dispatch_mach_xpc_hooks_s { +#define DISPATCH_MACH_XPC_MIN_HOOKS_VERSION 3 #define DISPATCH_MACH_XPC_HOOKS_VERSION 3 unsigned long version; @@ -838,12 +918,12 @@ typedef const struct dispatch_mach_xpc_hooks_s { * Gets the queue to which a reply to a message sent using * dispatch_mach_send_with_result_and_async_reply_4libxpc() should be * delivered. The msg_context argument is the value of the do_ctxt field - * of the outgoing message, as returned by dispatch_get_context(). If this - * function returns NULL, the reply will be delivered to the channel queue. - * This function should not make any assumptions about the thread on which - * it is called and, since it may be called more than once per message, it - * should execute as quickly as possible and not attempt to synchronize with - * other code. + * of the outgoing message, as returned by dispatch_get_context(). + * + * This function should return a consistent result until an event is + * received for this message. This function must return NULL if + * dispatch_mach_send_with_result_and_async_reply_4libxpc() wasn't used to + * send the message, and non NULL otherwise. */ dispatch_queue_t _Nullable (*_Nonnull dmxh_msg_context_reply_queue)( void *_Nonnull msg_context); @@ -870,12 +950,10 @@ typedef const struct dispatch_mach_xpc_hooks_s { * returns true, a DISPATCH_MACH_SIGTERM_RECEIVED notification will be * delivered to the channel's event handler when a SIGTERM is received. */ - bool (* _Nullable dmxh_enable_sigterm_notification)( + bool (*_Nonnull dmxh_enable_sigterm_notification)( void *_Nullable context); } *dispatch_mach_xpc_hooks_t; -#define DISPATCH_MACH_XPC_SUPPORTS_ASYNC_REPLIES(hooks) ((hooks)->version >= 2) - /*! * @function dispatch_mach_hooks_install_4libxpc * @@ -907,7 +985,7 @@ dispatch_mach_hooks_install_4libxpc(dispatch_mach_xpc_hooks_t hooks); * for each message received and for each message that was successfully sent, * that failed to be sent, or was not sent; as well as when a barrier block * has completed, or when channel connection, reconnection or cancellation has - * taken effect. However, the handler will not be called for messages that + * taken effect. However, the handler will not be called for messages that * were passed to the XPC hooks dmxh_direct_message_handler function if that * function returned true. * @@ -951,7 +1029,7 @@ dispatch_mach_create_4libxpc(const char *_Nullable label, * dmxh_msg_context_reply_queue function in the dispatch_mach_xpc_hooks_s * structure, which is called with a single argument whose value is the * do_ctxt field of the message argument to this function. The reply message is - * delivered to the dmxh_async_reply_handler hook function instead of being + * delivered to the dmxh_async_reply_handler hook function instead of being * passed to the channel event handler. * * If the dmxh_msg_context_reply_queue function is not implemented or returns diff --git a/private/private.h b/private/private.h index ed9f876cc..df4aba51e 100644 --- a/private/private.h +++ b/private/private.h @@ -28,7 +28,12 @@ #define __DISPATCH_PRIVATE__ #ifdef __APPLE__ +#include +#include #include +#include +#elif defined(__linux__) +#include #endif #if TARGET_OS_MAC @@ -39,9 +44,6 @@ #if HAVE_UNISTD_H #include #endif -#if HAVE_SYS_CDEFS_H -#include -#endif #include #if TARGET_OS_MAC #include @@ -56,6 +58,7 @@ #include #include +#include #include #if DISPATCH_MACH_SPI #include @@ -63,13 +66,13 @@ #include #include #include +#include #undef __DISPATCH_INDIRECT__ - #endif /* !__DISPATCH_BUILDING_DISPATCH__ */ // Check that public and private dispatch headers match -#if DISPATCH_API_VERSION != 20170124 // Keep in sync with +#if DISPATCH_API_VERSION != 20180109 // Keep in sync with #error "Dispatch header mismatch between /usr/include and /usr/local/include" #endif @@ -208,7 +211,7 @@ _dispatch_main_queue_callback_4CF(void *_Null_unspecified msg); API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW -dispatch_queue_t +dispatch_queue_serial_t _dispatch_runloop_root_queue_create_4CF(const char *_Nullable label, unsigned long flags); @@ -218,15 +221,10 @@ DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW mach_port_t _dispatch_runloop_root_queue_get_port_4CF(dispatch_queue_t queue); -#ifdef __BLOCKS__ API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) -DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT -DISPATCH_NOTHROW -dispatch_queue_t -_dispatch_network_root_queue_create_4NW(const char *_Nullable label, - const pthread_attr_t *_Nullable attrs, - dispatch_block_t _Nullable configure); -#endif +DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW +bool +_dispatch_source_will_reenable_kevent_4NW(dispatch_source_t source); #endif API_AVAILABLE(macos(10.9), ios(7.0)) diff --git a/private/queue_private.h b/private/queue_private.h index 2b50eb891..4b915fbf2 100644 --- a/private/queue_private.h +++ b/private/queue_private.h @@ -47,6 +47,155 @@ enum { DISPATCH_QUEUE_OVERCOMMIT = 0x2ull, }; + +/*! + * @function dispatch_set_qos_class + * + * @abstract + * Sets the QOS class on a dispatch queue, source or mach channel. + * + * @discussion + * This is equivalent to using dispatch_queue_make_attr_with_qos_class() + * when creating a dispatch queue, but is availabile on additional dispatch + * object types. + * + * When configured in this manner, the specified QOS class will be used over + * the assigned QOS of workitems submitted asynchronously to this object, + * unless the workitem has been created with ENFORCE semantics + * (see DISPATCH_BLOCK_ENFORCE_QOS_CLASS). + * + * Calling this function will supersede any prior calls to + * dispatch_set_qos_class() or dispatch_set_qos_class_floor(). + * + * @param object + * A dispatch queue, source or mach channel to configure. + * The object must be inactive, and can't be a workloop. + * + * Passing another object type or an object that has been activated is undefined + * and will cause the process to be terminated. + * + * @param qos_class + * A QOS class value: + * - QOS_CLASS_USER_INTERACTIVE + * - QOS_CLASS_USER_INITIATED + * - QOS_CLASS_DEFAULT + * - QOS_CLASS_UTILITY + * - QOS_CLASS_BACKGROUND + * Passing any other value is undefined. + * + * @param relative_priority + * A relative priority within the QOS class. This value is a negative + * offset from the maximum supported scheduler priority for the given class. + * Passing a value greater than zero or less than QOS_MIN_RELATIVE_PRIORITY + * is undefined. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_NOTHROW +void +dispatch_set_qos_class(dispatch_object_t object, + dispatch_qos_class_t qos_class, int relative_priority); + +/*! + * @function dispatch_set_qos_class_floor + * + * @abstract + * Sets the QOS class floor on a dispatch queue, source, workloop or mach + * channel. + * + * @discussion + * The QOS class of workitems submitted to this object asynchronously will be + * elevated to at least the specified QOS class floor. + * Unlike dispatch_set_qos_class(), the QOS of the workitem will be used if + * higher than the floor even when the workitem has been created without + * "ENFORCE" semantics. + * + * Setting the QOS class floor is equivalent to the QOS effects of configuring + * a target queue whose QOS class has been set with dispatch_set_qos_class(). + * + * Calling this function will supersede any prior calls to + * dispatch_set_qos_class() or dispatch_set_qos_class_floor(). + * + * @param object + * A dispatch queue, workloop, source or mach channel to configure. + * The object must be inactive. + * + * Passing another object type or an object that has been activated is undefined + * and will cause the process to be terminated. + * + * @param qos_class + * A QOS class value: + * - QOS_CLASS_USER_INTERACTIVE + * - QOS_CLASS_USER_INITIATED + * - QOS_CLASS_DEFAULT + * - QOS_CLASS_UTILITY + * - QOS_CLASS_BACKGROUND + * Passing any other value is undefined. + * + * @param relative_priority + * A relative priority within the QOS class. This value is a negative + * offset from the maximum supported scheduler priority for the given class. + * Passing a value greater than zero or less than QOS_MIN_RELATIVE_PRIORITY + * is undefined. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_NOTHROW +void +dispatch_set_qos_class_floor(dispatch_object_t object, + dispatch_qos_class_t qos_class, int relative_priority); + +/*! + * @function dispatch_set_qos_class_fallback + * + * @abstract + * Sets the fallback QOS class on a dispatch queue, source, workloop or mach + * channel. + * + * @discussion + * Workitems submitted asynchronously to this object that don't have an assigned + * QOS class will use the specified QOS class as a fallback. This interface + * doesn't support relative priority. + * + * Workitems without an assigned QOS are: + * - workitems submitted from the context of a thread opted-out of QOS, + * - workitems created with the DISPATCH_BLOCK_DETACHED or + * DISPATCH_BLOCK_NO_QOS_CLASS flags, + * - XPC messages sent with xpc_connection_send_notification(), + * - XPC connection and dispatch source handlers. + * + * Calling both dispatch_set_qos_class_fallback() and dispatch_set_qos_class() + * on an object will only apply the effect of dispatch_set_qos_class(). + * + * A QOS class fallback must always be at least as high as the current QOS + * floor for the dispatch queue hierarchy, else it is ignored. + * + * When no QOS fallback has been explicitly specified: + * - queues on hierarchies without a QOS class or QOS class floor have + * a fallback of QOS_CLASS_DEFAULT, + * - queues on hierarchies with a QOS class or QOS class floor configured will + * also use that QOS class as a fallback. + * + * @param object + * A dispatch queue, workloop, source or mach channel to configure. + * The object must be inactive. + * + * Passing another object type or an object that has been activated is undefined + * and will cause the process to be terminated. + * + * @param qos_class + * A QOS class value: + * - QOS_CLASS_USER_INTERACTIVE + * - QOS_CLASS_USER_INITIATED + * - QOS_CLASS_DEFAULT + * - QOS_CLASS_UTILITY + * - QOS_CLASS_BACKGROUND + * Passing any other value is undefined. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_NOTHROW +void +dispatch_set_qos_class_fallback(dispatch_object_t object, + dispatch_qos_class_t qos_class); + #define DISPATCH_QUEUE_FLAGS_MASK (DISPATCH_QUEUE_OVERCOMMIT) /*! @@ -63,7 +212,7 @@ enum { * * It is recommended to not specify a target queue at all when using this * attribute and to use dispatch_queue_attr_make_with_qos_class() to select the - * appropriate QoS class instead. + * appropriate QOS class instead. * * Queues created with this attribute cannot change target after having been * activated. See dispatch_set_target_queue() and dispatch_activate(). @@ -129,7 +278,8 @@ dispatch_queue_attr_make_with_overcommit(dispatch_queue_attr_t _Nullable attr, API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void -dispatch_queue_set_label_nocopy(dispatch_queue_t queue, const char *label); +dispatch_queue_set_label_nocopy(dispatch_queue_t queue, + const char * _Nullable label); /*! * @function dispatch_queue_set_width @@ -167,7 +317,7 @@ DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_queue_set_width(dispatch_queue_t dq, long width); -#ifdef __BLOCKS__ +#if defined(__BLOCKS__) && defined(__APPLE__) /*! * @function dispatch_pthread_root_queue_create * @@ -222,13 +372,13 @@ dispatch_queue_set_width(dispatch_queue_t dq, long width); * @result * The newly created dispatch pthread root queue. */ -API_AVAILABLE(macos(10.9), ios(6.0)) +API_AVAILABLE(macos(10.9), ios(6.0)) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW -dispatch_queue_t +dispatch_queue_global_t dispatch_pthread_root_queue_create(const char *_Nullable label, - unsigned long flags, const pthread_attr_t *_Nullable attr, - dispatch_block_t _Nullable configure); + unsigned long flags, const pthread_attr_t *_Nullable attr, + dispatch_block_t _Nullable configure); /*! * @function dispatch_pthread_root_queue_flags_pool_size @@ -258,8 +408,6 @@ dispatch_pthread_root_queue_flags_pool_size(uint8_t pool_size) (unsigned long)pool_size); } -#endif /* __BLOCKS__ */ - /*! * @function dispatch_pthread_root_queue_copy_current * @@ -272,8 +420,9 @@ dispatch_pthread_root_queue_flags_pool_size(uint8_t pool_size) * A new reference to a pthread root queue object or NULL. */ API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +DISPATCH_LINUX_UNAVAILABLE() DISPATCH_EXPORT DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW -dispatch_queue_t _Nullable +dispatch_queue_global_t _Nullable dispatch_pthread_root_queue_copy_current(void); /*! @@ -287,6 +436,8 @@ dispatch_pthread_root_queue_copy_current(void); */ #define DISPATCH_APPLY_CURRENT_ROOT_QUEUE ((dispatch_queue_t _Nonnull)0) +#endif /* defined(__BLOCKS__) && defined(__APPLE__) */ + /*! * @function dispatch_async_enforce_qos_class_f * @@ -321,14 +472,13 @@ API_AVAILABLE(macos(10.11), ios(9.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_async_enforce_qos_class_f(dispatch_queue_t queue, - void *_Nullable context, dispatch_function_t work); - + void *_Nullable context, dispatch_function_t work); #ifdef __ANDROID__ /*! * @function _dispatch_install_thread_detach_callback * - * @param callback + * @param cb * Function to be called before each worker thread exits to detach JVM. * * Hook to be able to detach threads from the Java JVM before they exit. diff --git a/private/source_private.h b/private/source_private.h index ad22e6a6a..56e9213e1 100644 --- a/private/source_private.h +++ b/private/source_private.h @@ -56,6 +56,19 @@ __BEGIN_DECLS * * The handle is the interval value in milliseconds or frames. * The mask specifies which flags from dispatch_source_timer_flags_t to apply. + * + * Starting with macOS 10.14, iOS 12, dispatch_source_set_timer() + * can be used on such sources, and its arguments are used as follow: + * - start: + * must be DISPATCH_TIME_NOW or DISPATCH_TIME_FOREVER. + * DISPATCH_TIME_NOW will enable the timer, and align its phase, and + * DISPATCH_TIME_FOREVER will disable the timer as usual.* + * - interval: + * its unit is in milliseconds by default, or frames if the source + * was created with the DISPATCH_INTERVAL_UI_ANIMATION flag. + * - leeway: + * per-thousands of the interval (valid values range from 0 to 1000). + * If ~0ull is passed, the default leeway for the interval is used instead. */ #define DISPATCH_SOURCE_TYPE_INTERVAL (&_dispatch_source_type_interval) API_AVAILABLE(macos(10.9), ios(7.0)) @@ -240,6 +253,41 @@ enum { DISPATCH_VFS_DESIREDDISK = 0x4000, }; +/*! + * @enum dispatch_clockid_t + * + * @discussion + * These values can be used with DISPATCH_SOURCE_TYPE_TIMER as a "handle" + * to anchor the timer to a given clock which allows for various optimizations. + * + * Note that using an explicit clock will make the dispatch source "strict" + * like dispatch_source_set_mandatory_cancel_handler() does. + * + * @constant DISPATCH_CLOCKID_UPTIME + * A monotonic clock that doesn't tick while the machine is asleep. + * Equivalent to the CLOCK_UPTIME clock ID on BSD systems. + * + * @constant DISPATCH_CLOCKID_MONOTONIC + * A monotonic clock that ticks while the machine sleeps. + * Equivalent to POSIX CLOCK_MONOTONIC. + * (Note that on Linux, CLOCK_MONOTONIC isn't conformant and doesn't tick while + * sleeping, hence on Linux this is the same clock as CLOCK_BOOTTIME). + * + * @constant DISPATCH_CLOCKID_WALLTIME + * A clock equivalent to the wall clock time, as returned by gettimeofday(). + * Equivalent to POSIX CLOCK_REALTIME. + * + * @constant DISPATCH_CLOCKID_REALTIME + * An alias for DISPATCH_CLOCKID_WALLTIME to match the POSIX clock of the + * same name. + */ +DISPATCH_ENUM(dispatch_clockid, uintptr_t, + DISPATCH_CLOCKID_UPTIME DISPATCH_ENUM_API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) = 1, + DISPATCH_CLOCKID_MONOTONIC DISPATCH_ENUM_API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) = 2, + DISPATCH_CLOCKID_WALLTIME DISPATCH_ENUM_API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) = 3, + DISPATCH_CLOCKID_REALTIME DISPATCH_ENUM_API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) = 3, +); + /*! * @enum dispatch_source_timer_flags_t * @@ -356,7 +404,7 @@ enum { DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN DISPATCH_ENUM_API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) = 0x10, DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL DISPATCH_ENUM_API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) = 0x20, - + DISPATCH_MEMORYPRESSURE_MSL_STATUS DISPATCH_ENUM_API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) = 0xf0000000, }; diff --git a/private/time_private.h b/private/time_private.h new file mode 100644 index 000000000..ae341e6d6 --- /dev/null +++ b/private/time_private.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 20017 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases. Any applications relying on + * these interfaces WILL break. + */ + +#ifndef __DISPATCH_TIME_PRIVATE__ +#define __DISPATCH_TIME_PRIVATE__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +/* + * @constant DISPATCH_MONOTONICTIME_NOW + * A dispatch_time_t value that corresponds to the current value of the + * platform's monotonic clock. On Apple platforms, this clock is based on + * mach_continuous_time(). Use this value with the dispatch_time() function to + * derive a time value for a timer in monotonic time (i.e. a timer that + * continues to tick while the system is asleep). For example: + * + * dispatch_time_t t = dispatch_time(DISPATCH_MONOTONICTIME_NOW,5*NSEC_PER_SEC); + * dispatch_source_t ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, + * 0, 0, q); + * dispatch_source_set_event_handler(ds, ^{ ... }); + * dispatch_source_set_timer(ds, t, 10 * NSEC_PER_SEC, 0); + * dispatch_activate(ds); + */ +enum { + DISPATCH_MONOTONICTIME_NOW DISPATCH_ENUM_API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) = (1ull << 63) +}; + +#ifdef __APPLE__ + +// Helper macros for up time, montonic time and wall time. +#define _dispatch_uptime_after_nsec(t) \ + dispatch_time(DISPATCH_TIME_NOW, (t)) +#define _dispatch_uptime_after_usec(t) \ + dispatch_time(DISPATCH_TIME_NOW, (t) * NSEC_PER_USEC) +#define _dispatch_uptime_after_msec(t) \ + dispatch_time(DISPATCH_TIME_NOW, (t) * NSEC_PER_MSEC) +#define _dispatch_uptime_after_sec(t) \ + dispatch_time(DISPATCH_TIME_NOW, (t) * NSEC_PER_SEC) + +#define _dispatch_monotonictime_after_nsec(t) \ + dispatch_time(DISPATCH_MONOTONICTIME_NOW, (t)) +#define _dispatch_monotonictime_after_usec(t) \ + dispatch_time(DISPATCH_MONOTONICTIME_NOW, (t) * NSEC_PER_USEC) +#define _dispatch_monotonictime_after_msec(t) \ + dispatch_time(DISPATCH_MONOTONICTIME_NOW, (t) * NSEC_PER_MSEC) +#define _dispatch_monotonictime_after_sec(t) \ + dispatch_time(DISPATCH_MONOTONICTIME_NOW, (t) * NSEC_PER_SEC) + +#define _dispatch_walltime_after_nsec(t) \ + dispatch_time(DISPATCH_WALLTIME_NOW, (t)) +#define _dispatch_walltime_after_usec(t) \ + dispatch_time(DISPATCH_WALLTIME_NOW, (t) * NSEC_PER_USEC) +#define _dispatch_walltime_after_msec(t) \ + dispatch_time(DISPATCH_WALLTIME_NOW, (t) * NSEC_PER_MSEC) +#define _dispatch_walltime_after_sec(t) \ + dispatch_time(DISPATCH_WALLTIME_NOW, (t) * NSEC_PER_SEC) + +#endif // __APPLE__ + +#endif + diff --git a/private/workloop_private.h b/private/workloop_private.h new file mode 100644 index 000000000..01273217e --- /dev/null +++ b/private/workloop_private.h @@ -0,0 +1,441 @@ +/* + * Copyright (c) 2017-2018 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_WORKLOOP_PRIVATE__ +#define __DISPATCH_WORKLOOP_PRIVATE__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +/******************************************************************************\ + * + * THIS FILE IS AN IN-PROGRESS INTERFACE THAT IS SUBJECT TO CHANGE + * PLEASE REACH-OUT TO gcd@group.apple.com BEFORE ADOPTING ANY INTERFACE + * +\******************************************************************************/ + +DISPATCH_ASSUME_NONNULL_BEGIN + +__BEGIN_DECLS + +/*! + * @typedef dispatch_workloop_t + * + * @abstract + * Dispatch workloops invoke workitems submitted to them in priority order. + * + * @discussion + * A dispatch workloop is a flavor of dispatch_queue_t that is a priority + * ordered queue (using the QOS class of the submitted workitems as the + * ordering). + * + * Between each workitem invocation, the workloop will evaluate whether higher + * priority workitems have since been submitted and execute these first. + * + * Serial queues targeting a workloop maintain FIFO execution of their + * workitems. However, the workloop may reorder workitems submitted to + * independent serial queues targeting it with respect to each other, + * based on their priorities. + * + * A dispatch workloop is a "subclass" of dispatch_queue_t which can be passed + * to all APIs accepting a dispatch queue, except for functions from the + * dispatch_sync() family. dispatch_async_and_wait() must be used for workloop + * objects. Functions from the dispatch_sync() family on queues targeting + * a workloop are still permitted but discouraged for performance reasons. + */ +#if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) +typedef struct dispatch_workloop_s *dispatch_workloop_t; +#else +DISPATCH_DECL_SUBCLASS(dispatch_workloop, dispatch_queue); +#endif + +/*! + * @function dispatch_workloop_create + * + * @abstract + * Creates a new dispatch workloop to which workitems may be submitted. + * + * @param label + * A string label to attach to the workloop. + * + * @result + * The newly created dispatch workloop. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NOTHROW +dispatch_workloop_t +dispatch_workloop_create(const char *_Nullable label); + +/*! + * @function dispatch_workloop_create_inactive + * + * @abstract + * Creates a new inactive dispatch workloop that can be setup and then + * activated. + * + * @discussion + * Creating an inactive workloop allows for it to receive further configuration + * before it is activated, and workitems can be submitted to it. + * + * Submitting workitems to an inactive workloop is undefined and will cause the + * process to be terminated. + * + * @param label + * A string label to attach to the workloop. + * + * @result + * The newly created dispatch workloop. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NOTHROW +dispatch_workloop_t +dispatch_workloop_create_inactive(const char *_Nullable label); + +/*! + * @function dispatch_workloop_set_autorelease_frequency + * + * @abstract + * Sets the autorelease frequency of the workloop. + * + * @discussion + * See dispatch_queue_attr_make_with_autorelease_frequency(). + * The default policy for a workloop is + * DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM. + * + * @param workloop + * The dispatch workloop to modify. + * + * This workloop must be inactive, passing an activated object is undefined + * and will cause the process to be terminated. + * + * @param frequency + * The requested autorelease frequency. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_workloop_set_autorelease_frequency(dispatch_workloop_t workloop, + dispatch_autorelease_frequency_t frequency); + +DISPATCH_ENUM(dispatch_workloop_param_flags, uint64_t, + DISPATCH_WORKLOOP_NONE DISPATCH_ENUM_API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) = 0x0, + DISPATCH_WORKLOOP_FIXED_PRIORITY DISPATCH_ENUM_API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) = 0x1, +); + +/*! + * @function dispatch_workloop_set_qos_class_floor + * + * @abstract + * Sets the QOS class floor of a workloop. + * + * @discussion + * See dispatch_set_qos_class_floor(). + * + * This function is strictly equivalent to dispatch_set_qos_class_floor() but + * allows to pass extra flags. + * + * Using both dispatch_workloop_set_scheduler_priority() and + * dispatch_set_qos_class_floor() or dispatch_workloop_set_qos_class_floor() + * is undefined and will cause the process to be terminated. + * + * @param workloop + * The dispatch workloop to modify. + * + * This workloop must be inactive, passing an activated object is undefined + * and will cause the process to be terminated. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_workloop_set_qos_class_floor(dispatch_workloop_t workloop, + qos_class_t qos, int relpri, dispatch_workloop_param_flags_t flags); + +/*! + * @function dispatch_workloop_set_scheduler_priority + * + * @abstract + * Sets the scheduler priority for a dispatch workloop. + * + * @discussion + * This sets the scheduler priority of the threads that the runtime will bring + * up to service this workloop. + * + * QOS propagation still functions on these workloops, but its effect on the + * priority of the thread brought up to service this workloop is ignored. + * + * Using both dispatch_workloop_set_scheduler_priority() and + * dispatch_set_qos_class_floor() or dispatch_workloop_set_qos_class_floor() + * is undefined and will cause the process to be terminated. + * + * @param workloop + * The dispatch workloop to modify. + * + * This workloop must be inactive, passing an activated object is undefined + * and will cause the process to be terminated. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_workloop_set_scheduler_priority(dispatch_workloop_t workloop, + int priority, dispatch_workloop_param_flags_t flags); + +/*! + * @function dispatch_workloop_set_cpupercent + * + * @abstract + * Sets the cpu percent and refill attributes for a dispatch workloop. + * + * @discussion + * This should only used if the workloop was also setup with the + * DISPATCH_WORKLOOP_FIXED_PRIORITY flag as a safe guard against + * busy loops that could starve the rest of the system forever. + * + * If DISPATCH_WORKLOOP_FIXED_PRIORITY wasn't passed, using this function is + * undefined and will cause the process to be terminated. + * + * @param workloop + * The dispatch workloop to modify. + * + * This workloop must be inactive, passing an activated object is undefined + * and will cause the process to be terminated. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_workloop_set_cpupercent(dispatch_workloop_t workloop, uint8_t percent, + uint32_t refillms); + +/*! + * @function dispatch_workloop_is_current() + * + * @abstract + * Returns whether the current thread has been made by the runtime to service + * this workloop. + * + * @discussion + * Note that when using dispatch_async_and_wait(workloop, ^{ ... }) + * then workloop will be seen as the "current" one by the submitted + * workitem, but that is not the case when using dispatch_sync() on a queue + * targeting the workloop. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +bool +dispatch_workloop_is_current(dispatch_workloop_t workloop); + +/*! + * @function dispatch_workloop_copy_current() + * + * @abstract + * Returns a copy of the workoop that is being serviced on the calling thread + * if any. + * + * @discussion + * If the thread is not a workqueue thread, or is not servicing a dispatch + * workloop, then NULL is returned. + * + * This returns a retained object that must be released with dispatch_release(). + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_RETURNS_RETAINED DISPATCH_NOTHROW +dispatch_workloop_t _Nullable +dispatch_workloop_copy_current(void); + +// Equivalent to dispatch_workloop_set_qos_class_floor(workoop, qos, 0, flags) +API_DEPRECATED_WITH_REPLACEMENT("dispatch_workloop_set_qos_class_floor", + macos(10.14,10.14), ios(12.0,12.0), tvos(12.0,12.0), watchos(5.0,5.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_workloop_set_qos_class(dispatch_workloop_t workloop, + qos_class_t qos, dispatch_workloop_param_flags_t flags); + +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_NOTHROW +bool +_dispatch_workloop_should_yield_4NW(void); + +/*! + * @function dispatch_async_and_wait + * + * @abstract + * Submits a block for synchronous execution on a dispatch queue. + * + * @discussion + * Submits a workitem to a dispatch queue like dispatch_async(), however + * dispatch_async_and_wait() will not return until the workitem has finished. + * + * Like functions of the dispatch_sync family, dispatch_async_and_wait() is + * subject to dead-lock (See dispatch_sync() for details). + * + * However, dispatch_async_and_wait() differs from functions of the + * dispatch_sync family in two fundamental ways: how it respects queue + * attributes and how it chooses the execution context invoking the workitem. + * + * Differences with dispatch_sync() + * + * Work items submitted to a queue with dispatch_async_and_wait() observe all + * queue attributes of that queue when invoked (inluding autorelease frequency + * or QOS class). + * + * When the runtime has brought up a thread to invoke the asynchronous workitems + * already submitted to the specified queue, that servicing thread will also be + * used to execute synchronous work submitted to the queue with + * dispatch_async_and_wait(). + * + * However, if the runtime has not brought up a thread to service the specified + * queue (because it has no workitems enqueued, or only synchronous workitems), + * then dispatch_async_and_wait() will invoke the workitem on the calling thread, + * similar to the behaviour of functions in the dispatch_sync family. + * + * As an exception, if the queue the work is submitted to doesn't target + * a global concurrent queue (for example because it targets the main queue), + * then the workitem will never be invoked by the thread calling + * dispatch_async_and_wait(). + * + * In other words, dispatch_async_and_wait() is similar to submitting + * a dispatch_block_create()d workitem to a queue and then waiting on it, as + * shown in the code example below. However, dispatch_async_and_wait() is + * significantly more efficient when a new thread is not required to execute + * the workitem (as it will use the stack of the submitting thread instead of + * requiring heap allocations). + * + * + * dispatch_block_t b = dispatch_block_create(0, block); + * dispatch_async(queue, b); + * dispatch_block_wait(b, DISPATCH_TIME_FOREVER); + * Block_release(b); + * + * + * @param queue + * The target dispatch queue to which the block is submitted. + * The result of passing NULL in this parameter is undefined. + * + * @param block + * The block to be invoked on the target dispatch queue. + * The result of passing NULL in this parameter is undefined. + */ +#ifdef __BLOCKS__ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_async_and_wait(dispatch_queue_t queue, + DISPATCH_NOESCAPE dispatch_block_t block); +#endif + +/*! + * @function dispatch_async_and_wait_f + * + * @abstract + * Submits a function for synchronous execution on a dispatch queue. + * + * @discussion + * See dispatch_async_and_wait() for details. + * + * @param queue + * The target dispatch queue to which the function is submitted. + * The result of passing NULL in this parameter is undefined. + * + * @param context + * The application-defined context parameter to pass to the function. + * + * @param work + * The application-defined function to invoke on the target queue. The first + * parameter passed to this function is the context provided to + * dispatch_async_and_wait_f(). + * The result of passing NULL in this parameter is undefined. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +void +dispatch_async_and_wait_f(dispatch_queue_t queue, + void *_Nullable context, dispatch_function_t work); + +/*! + * @function dispatch_barrier_async_and_wait + * + * @abstract + * Submits a block for synchronous execution on a dispatch queue. + * + * @discussion + * Submits a block to a dispatch queue like dispatch_async_and_wait(), but marks + * that block as a barrier (relevant only on DISPATCH_QUEUE_CONCURRENT + * queues). + * + * @param queue + * The target dispatch queue to which the block is submitted. + * The result of passing NULL in this parameter is undefined. + * + * @param work + * The application-defined block to invoke on the target queue. + * The result of passing NULL in this parameter is undefined. + */ +#ifdef __BLOCKS__ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_barrier_async_and_wait(dispatch_queue_t queue, + DISPATCH_NOESCAPE dispatch_block_t block); +#endif + +/*! + * @function dispatch_barrier_async_and_wait_f + * + * @abstract + * Submits a function for synchronous execution on a dispatch queue. + * + * @discussion + * Submits a function to a dispatch queue like dispatch_async_and_wait_f(), but + * marks that function as a barrier (relevant only on DISPATCH_QUEUE_CONCURRENT + * queues). + * + * @param queue + * The target dispatch queue to which the function is submitted. + * The result of passing NULL in this parameter is undefined. + * + * @param context + * The application-defined context parameter to pass to the function. + * + * @param work + * The application-defined function to invoke on the target queue. The first + * parameter passed to this function is the context provided to + * dispatch_barrier_async_and_wait_f(). + * The result of passing NULL in this parameter is undefined. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +void +dispatch_barrier_async_and_wait_f(dispatch_queue_t queue, + void *_Nullable context, dispatch_function_t work); + +__END_DECLS + +DISPATCH_ASSUME_NONNULL_END + +#endif diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 2ec2691fc..4d4bb2e55 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -69,7 +69,7 @@ if(HAVE_OBJC) data.m object.m) endif() -if(CMAKE_SWIFT_COMPILER) +if(ENABLE_SWIFT) set(swift_optimization_flags) if(CMAKE_BUILD_TYPE MATCHES Release) set(swift_optimization_flags -O) @@ -99,6 +99,8 @@ if(CMAKE_SWIFT_COMPILER) SWIFT_FLAGS -I ${CMAKE_SOURCE_DIR} ${swift_optimization_flags}) + add_dependencies(swiftDispatch + module-map-symlinks) target_sources(dispatch PRIVATE swift/DispatchStubs.cc @@ -200,5 +202,12 @@ add_custom_command(TARGET dispatch POST_BUILD install(TARGETS dispatch DESTINATION - "${CMAKE_INSTALL_FULL_LIBDIR}") + "${CMAKE_INSTALL_LIBDIR}") +if(ENABLE_SWIFT) + install(FILES + ${CMAKE_CURRENT_BINARY_DIR}/swift/Dispatch.swiftmodule + ${CMAKE_CURRENT_BINARY_DIR}/swift/Dispatch.swiftdoc + DESTINATION + "${CMAKE_INSTALL_LIBDIR}/swift/${SWIFT_OS}/${CMAKE_SYSTEM_PROCESSOR}") +endif() diff --git a/src/Makefile.am b/src/Makefile.am index 8beaf1e85..58dcead4b 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -188,7 +188,7 @@ SWIFT_GEN_FILES= \ $(abs_builddir)/swift/Dispatch.swiftdoc \ $(SWIFT_OBJ_FILES) -swiftmoddir=${prefix}/lib/swift/${OS_STRING}/${host_cpu} +swiftmoddir=${prefix}/lib/swift/${OS_STRING}/${target_cpu} swiftmod_HEADERS=\ $(abs_builddir)/swift/Dispatch.swiftmodule \ $(abs_builddir)/swift/Dispatch.swiftdoc diff --git a/src/allocator.c b/src/allocator.c index e6ea77217..66284090f 100644 --- a/src/allocator.c +++ b/src/allocator.c @@ -41,7 +41,7 @@ // // If something goes wrong here, the symptom would be a NULL dereference // in alloc_continuation_from_heap or _magazine when derefing the magazine ptr. -static dispatch_heap_t _dispatch_main_heap; +DISPATCH_GLOBAL(dispatch_heap_t _dispatch_main_heap); DISPATCH_ALWAYS_INLINE static void @@ -112,11 +112,11 @@ get_cont_and_indices_for_bitmap_and_index(bitmap_t *bitmap, unsigned int bindex = mindex % BITMAPS_PER_SUPERMAP; unsigned int sindex = mindex / BITMAPS_PER_SUPERMAP; dispatch_assert(&m->maps[sindex][bindex] == bitmap); - if (fastpath(continuation_out)) { + if (likely(continuation_out)) { *continuation_out = continuation_address(m, sindex, bindex, index); } - if (fastpath(supermap_out)) *supermap_out = supermap_address(m, sindex); - if (fastpath(bitmap_index_out)) *bitmap_index_out = bindex; + if (likely(supermap_out)) *supermap_out = supermap_address(m, sindex); + if (likely(bitmap_index_out)) *bitmap_index_out = bindex; } DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST @@ -144,14 +144,14 @@ get_maps_and_indices_for_continuation(dispatch_continuation_t c, padded_continuation *p = (padded_continuation *)c; struct dispatch_magazine_s *m = magazine_for_continuation(c); #if PACK_FIRST_PAGE_WITH_CONTINUATIONS - if (fastpath(continuation_is_in_first_page(c))) { + if (likely(continuation_is_in_first_page(c))) { cindex = (unsigned int)(p - m->fp_conts); index = cindex % CONTINUATIONS_PER_BITMAP; mindex = cindex / CONTINUATIONS_PER_BITMAP; - if (fastpath(supermap_out)) *supermap_out = NULL; - if (fastpath(bitmap_index_out)) *bitmap_index_out = mindex; - if (fastpath(bitmap_out)) *bitmap_out = &m->fp_maps[mindex]; - if (fastpath(index_out)) *index_out = index; + if (likely(supermap_out)) *supermap_out = NULL; + if (likely(bitmap_index_out)) *bitmap_index_out = mindex; + if (likely(bitmap_out)) *bitmap_out = &m->fp_maps[mindex]; + if (likely(index_out)) *index_out = index; return; } #endif // PACK_FIRST_PAGE_WITH_CONTINUATIONS @@ -159,10 +159,10 @@ get_maps_and_indices_for_continuation(dispatch_continuation_t c, sindex = cindex / (BITMAPS_PER_SUPERMAP * CONTINUATIONS_PER_BITMAP); mindex = (cindex / CONTINUATIONS_PER_BITMAP) % BITMAPS_PER_SUPERMAP; index = cindex % CONTINUATIONS_PER_BITMAP; - if (fastpath(supermap_out)) *supermap_out = &m->supermaps[sindex]; - if (fastpath(bitmap_index_out)) *bitmap_index_out = mindex; - if (fastpath(bitmap_out)) *bitmap_out = &m->maps[sindex][mindex]; - if (fastpath(index_out)) *index_out = index; + if (likely(supermap_out)) *supermap_out = &m->supermaps[sindex]; + if (likely(bitmap_index_out)) *bitmap_index_out = mindex; + if (likely(bitmap_out)) *bitmap_out = &m->maps[sindex][mindex]; + if (likely(index_out)) *index_out = index; } // Base address of page, or NULL if this page shouldn't be madvise()d @@ -170,17 +170,17 @@ DISPATCH_ALWAYS_INLINE_NDEBUG DISPATCH_CONST static void * madvisable_page_base_for_continuation(dispatch_continuation_t c) { - if (fastpath(continuation_is_in_first_page(c))) { + if (likely(continuation_is_in_first_page(c))) { return NULL; } void *page_base = (void *)((uintptr_t)c & ~(uintptr_t)DISPATCH_ALLOCATOR_PAGE_MASK); #if DISPATCH_DEBUG struct dispatch_magazine_s *m = magazine_for_continuation(c); - if (slowpath(page_base < (void *)&m->conts)) { + if (unlikely(page_base < (void *)&m->conts)) { DISPATCH_INTERNAL_CRASH(page_base, "madvisable continuation too low"); } - if (slowpath(page_base > (void *)&m->conts[SUPERMAPS_PER_MAGAZINE-1] + if (unlikely(page_base > (void *)&m->conts[SUPERMAPS_PER_MAGAZINE-1] [BITMAPS_PER_SUPERMAP-1][CONTINUATIONS_PER_BITMAP-1])) { DISPATCH_INTERNAL_CRASH(page_base, "madvisable continuation too high"); } @@ -254,7 +254,7 @@ bitmap_clear_bit(volatile bitmap_t *bitmap, unsigned int index, bitmap_t b; if (exclusively == CLEAR_EXCLUSIVELY) { - if (slowpath((*bitmap & mask) == 0)) { + if (unlikely((*bitmap & mask) == 0)) { DISPATCH_CLIENT_CRASH(*bitmap, "Corruption: failed to clear bit exclusively"); } @@ -299,12 +299,12 @@ alloc_continuation_from_first_page(struct dispatch_magazine_s *magazine) // TODO: unroll if this is hot? for (i = 0; i < FULL_BITMAPS_IN_FIRST_PAGE; i++) { index = bitmap_set_first_unset_bit(&magazine->fp_maps[i]); - if (fastpath(index != NO_BITS_WERE_UNSET)) goto found; + if (likely(index != NO_BITS_WERE_UNSET)) goto found; } if (REMAINDERED_CONTINUATIONS_IN_FIRST_PAGE) { index = bitmap_set_first_unset_bit_upto_index(&magazine->fp_maps[i], REMAINDERED_CONTINUATIONS_IN_FIRST_PAGE - 1); - if (fastpath(index != NO_BITS_WERE_UNSET)) goto found; + if (likely(index != NO_BITS_WERE_UNSET)) goto found; } return NULL; @@ -348,7 +348,7 @@ _dispatch_alloc_try_create_heap(dispatch_heap_t *heap_ptr) mach_vm_size_t vm_size = MAGAZINES_PER_HEAP * BYTES_PER_MAGAZINE; mach_vm_offset_t vm_mask = ~MAGAZINE_MASK; mach_vm_address_t vm_addr = vm_page_size; - while (slowpath(kr = mach_vm_map(mach_task_self(), &vm_addr, vm_size, + while (unlikely(kr = mach_vm_map(mach_task_self(), &vm_addr, vm_size, vm_mask, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_LIBDISPATCH), MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT))) { @@ -443,7 +443,7 @@ _dispatch_alloc_continuation_from_heap(dispatch_heap_t heap) #if PACK_FIRST_PAGE_WITH_CONTINUATIONS // First try the continuations in the first page for this CPU cont = alloc_continuation_from_first_page(&(heap[cpu_number])); - if (fastpath(cont)) { + if (likely(cont)) { return cont; } #endif @@ -460,11 +460,11 @@ _dispatch_alloc_continuation_from_heap_slow(void) dispatch_continuation_t cont; for (;;) { - if (!fastpath(*heap)) { + if (unlikely(!*heap)) { _dispatch_alloc_try_create_heap(heap); } cont = _dispatch_alloc_continuation_from_heap(*heap); - if (fastpath(cont)) { + if (likely(cont)) { return cont; } // If we have tuned our parameters right, 99.999% of apps should @@ -489,16 +489,16 @@ _dispatch_alloc_continuation_alloc(void) { dispatch_continuation_t cont; - if (fastpath(_dispatch_main_heap)) { + if (likely(_dispatch_main_heap)) { // Start looking in the same page where we found a continuation // last time. bitmap_t *last = last_found_page(); - if (fastpath(last)) { + if (likely(last)) { unsigned int i; for (i = 0; i < BITMAPS_PER_PAGE; i++) { bitmap_t *cur = last + i; unsigned int index = bitmap_set_first_unset_bit(cur); - if (fastpath(index != NO_BITS_WERE_UNSET)) { + if (likely(index != NO_BITS_WERE_UNSET)) { bitmap_t *supermap; unsigned int bindex; get_cont_and_indices_for_bitmap_and_index(cur, @@ -511,7 +511,7 @@ _dispatch_alloc_continuation_alloc(void) } cont = _dispatch_alloc_continuation_from_heap(_dispatch_main_heap); - if (fastpath(cont)) { + if (likely(cont)) { return cont; } } @@ -579,14 +579,15 @@ _dispatch_alloc_continuation_free(dispatch_continuation_t c) bitmap_t *b, *s; unsigned int b_idx, idx; + c->dc_flags = 0; get_maps_and_indices_for_continuation(c, &s, &b_idx, &b, &idx); bool bitmap_now_empty = bitmap_clear_bit(b, idx, CLEAR_EXCLUSIVELY); - if (slowpath(s)) { + if (unlikely(s)) { (void)bitmap_clear_bit(s, b_idx, CLEAR_NONEXCLUSIVELY); } // We only try to madvise(2) pages outside of the first page. // (Allocations in the first page do not have a supermap entry.) - if (slowpath(bitmap_now_empty) && slowpath(s)) { + if (unlikely(bitmap_now_empty && s)) { return _dispatch_alloc_maybe_madvise_page(c); } } @@ -594,60 +595,90 @@ _dispatch_alloc_continuation_free(dispatch_continuation_t c) #pragma mark - #pragma mark dispatch_alloc_init -#if DISPATCH_DEBUG +#if DISPATCH_CONTINUATION_MALLOC || DISPATCH_DEBUG static void _dispatch_alloc_init(void) { // Double-check our math. These are all compile time checks and don't // generate code. - dispatch_assert(sizeof(bitmap_t) == BYTES_PER_BITMAP); - dispatch_assert(sizeof(bitmap_t) == BYTES_PER_SUPERMAP); - dispatch_assert(sizeof(struct dispatch_magazine_header_s) == + dispatch_static_assert(sizeof(bitmap_t) == BYTES_PER_BITMAP); + dispatch_static_assert(sizeof(bitmap_t) == BYTES_PER_SUPERMAP); + dispatch_static_assert(sizeof(struct dispatch_magazine_header_s) == SIZEOF_HEADER); - dispatch_assert(sizeof(struct dispatch_continuation_s) <= + dispatch_static_assert(sizeof(struct dispatch_continuation_s) <= DISPATCH_CONTINUATION_SIZE); // Magazines should be the right size, so they pack neatly into an array of // heaps. - dispatch_assert(sizeof(struct dispatch_magazine_s) == BYTES_PER_MAGAZINE); + dispatch_static_assert(sizeof(struct dispatch_magazine_s) == + BYTES_PER_MAGAZINE); // The header and maps sizes should match what we computed. - dispatch_assert(SIZEOF_HEADER == + dispatch_static_assert(SIZEOF_HEADER == sizeof(((struct dispatch_magazine_s *)0x0)->header)); - dispatch_assert(SIZEOF_MAPS == + dispatch_static_assert(SIZEOF_MAPS == sizeof(((struct dispatch_magazine_s *)0x0)->maps)); // The main array of continuations should start at the second page, // self-aligned. - dispatch_assert(offsetof(struct dispatch_magazine_s, conts) % + dispatch_static_assert(offsetof(struct dispatch_magazine_s, conts) % (CONTINUATIONS_PER_BITMAP * DISPATCH_CONTINUATION_SIZE) == 0); - dispatch_assert(offsetof(struct dispatch_magazine_s, conts) == + dispatch_static_assert(offsetof(struct dispatch_magazine_s, conts) == DISPATCH_ALLOCATOR_PAGE_SIZE); #if PACK_FIRST_PAGE_WITH_CONTINUATIONS // The continuations in the first page should actually fit within the first // page. - dispatch_assert(offsetof(struct dispatch_magazine_s, fp_conts) < + dispatch_static_assert(offsetof(struct dispatch_magazine_s, fp_conts) < DISPATCH_ALLOCATOR_PAGE_SIZE); - dispatch_assert(offsetof(struct dispatch_magazine_s, fp_conts) % + dispatch_static_assert(offsetof(struct dispatch_magazine_s, fp_conts) % DISPATCH_CONTINUATION_SIZE == 0); - dispatch_assert(offsetof(struct dispatch_magazine_s, fp_conts) + + dispatch_static_assert(offsetof(struct dispatch_magazine_s, fp_conts) + sizeof(((struct dispatch_magazine_s *)0x0)->fp_conts) == - DISPATCH_ALLOCATOR_PAGE_SIZE); + DISPATCH_ALLOCATOR_PAGE_SIZE); #endif // PACK_FIRST_PAGE_WITH_CONTINUATIONS // Make sure our alignment will be correct: that is, that we are correctly // aligning to both. - dispatch_assert(ROUND_UP_TO_BITMAP_ALIGNMENT(ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(1)) == + dispatch_static_assert(ROUND_UP_TO_BITMAP_ALIGNMENT(ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(1)) == ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(1)); - dispatch_assert(ROUND_UP_TO_CONTINUATION_SIZE(ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(1)) == + dispatch_static_assert(ROUND_UP_TO_CONTINUATION_SIZE(ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(1)) == ROUND_UP_TO_BITMAP_ALIGNMENT_AND_CONTINUATION_SIZE(1)); } -#elif (DISPATCH_ALLOCATOR && DISPATCH_CONTINUATION_MALLOC) \ - || (DISPATCH_CONTINUATION_MALLOC && DISPATCH_USE_MALLOCZONE) -static inline void _dispatch_alloc_init(void) {} -#endif +#endif // DISPATCH_CONTINUATION_MALLOC || DISPATCH_DEBUG + +kern_return_t +_dispatch_allocator_enumerate(task_t remote_task, + const struct dispatch_allocator_layout_s *remote_dal, + vm_address_t zone_address, memory_reader_t reader, + void (^recorder)(vm_address_t, void *, size_t, bool *stop)) +{ + const size_t heap_size = remote_dal->dal_magazine_size; + const size_t dc_size = remote_dal->dal_allocation_size; + const size_t dc_flags_offset = remote_dal->dal_allocation_isa_offset; + bool stop = false; + void *heap; + + while (zone_address) { + // FIXME: improve this by not faulting everything and driving it through + // the bitmap. + kern_return_t kr = reader(remote_task, zone_address, heap_size, &heap); + size_t offs = remote_dal->dal_first_allocation_offset; + if (kr) return kr; + while (offs < heap_size) { + void *isa = *(void **)(heap + offs + dc_flags_offset); + if (isa && isa != remote_dal->dal_deferred_free_isa) { + recorder(zone_address + offs, heap + offs, dc_size, &stop); + if (stop) return KERN_SUCCESS; + } + offs += dc_size; + } + zone_address = (vm_address_t)((dispatch_heap_t)heap)->header.dh_next; + } + + return KERN_SUCCESS; +} #endif // DISPATCH_ALLOCATOR @@ -677,8 +708,8 @@ static dispatch_continuation_t _dispatch_malloc_continuation_alloc(void) { dispatch_continuation_t dc; - while (!(dc = fastpath(calloc(1, - ROUND_UP_TO_CACHELINE_SIZE(sizeof(*dc)))))) { + size_t alloc_size = ROUND_UP_TO_CACHELINE_SIZE(sizeof(*dc)); + while (unlikely(!(dc = calloc(1, alloc_size)))) { _dispatch_temporary_resource_shortage(); } return dc; @@ -696,12 +727,10 @@ _dispatch_malloc_continuation_free(dispatch_continuation_t c) #if DISPATCH_ALLOCATOR #if DISPATCH_CONTINUATION_MALLOC -#if DISPATCH_USE_NANOZONE -extern boolean_t malloc_engaged_nano(void); -#else +#if !DISPATCH_USE_NANOZONE #define malloc_engaged_nano() false -#endif // DISPATCH_USE_NANOZONE -static int _dispatch_use_dispatch_alloc; +#endif // !DISPATCH_USE_NANOZONE +DISPATCH_STATIC_GLOBAL(bool _dispatch_use_dispatch_alloc); #else #define _dispatch_use_dispatch_alloc 1 #endif // DISPATCH_CONTINUATION_MALLOC @@ -709,6 +738,9 @@ static int _dispatch_use_dispatch_alloc; #if (DISPATCH_ALLOCATOR && (DISPATCH_CONTINUATION_MALLOC || DISPATCH_DEBUG)) \ || (DISPATCH_CONTINUATION_MALLOC && DISPATCH_USE_MALLOCZONE) + +DISPATCH_STATIC_GLOBAL(dispatch_once_t _dispatch_continuation_alloc_init_pred); + static void _dispatch_continuation_alloc_init(void *ctxt DISPATCH_UNUSED) { @@ -729,11 +761,11 @@ _dispatch_continuation_alloc_init(void *ctxt DISPATCH_UNUSED) #endif // DISPATCH_ALLOCATOR } -static void -_dispatch_continuation_alloc_once() +static inline void +_dispatch_continuation_alloc_once(void) { - static dispatch_once_t pred; - dispatch_once_f(&pred, NULL, _dispatch_continuation_alloc_init); + dispatch_once_f(&_dispatch_continuation_alloc_init_pred, + NULL, _dispatch_continuation_alloc_init); } #else static inline void _dispatch_continuation_alloc_once(void) {} diff --git a/src/allocator_internal.h b/src/allocator_internal.h index abe4a1d43..5f8c2f068 100644 --- a/src/allocator_internal.h +++ b/src/allocator_internal.h @@ -28,7 +28,7 @@ #define __DISPATCH_ALLOCATOR_INTERNAL__ #ifndef DISPATCH_ALLOCATOR -#if TARGET_OS_MAC && (defined(__LP64__) || TARGET_OS_EMBEDDED) +#if TARGET_OS_MAC && (defined(__LP64__) || TARGET_OS_IPHONE) #define DISPATCH_ALLOCATOR 1 #endif #endif @@ -72,7 +72,7 @@ #define MAGAZINES_PER_HEAP (NUM_CPU) // Do you care about compaction or performance? -#if TARGET_OS_EMBEDDED +#if TARGET_OS_IPHONE #define PACK_FIRST_PAGE_WITH_CONTINUATIONS 1 #else #define PACK_FIRST_PAGE_WITH_CONTINUATIONS 0 @@ -88,7 +88,7 @@ #define DISPATCH_ALLOCATOR_PAGE_MASK PAGE_MAX_MASK -#if TARGET_OS_EMBEDDED +#if TARGET_OS_IPHONE #define PAGES_PER_MAGAZINE 64 #else #define PAGES_PER_MAGAZINE 512 @@ -281,6 +281,16 @@ struct dispatch_magazine_s { #define DISPATCH_ALLOCATOR_SCRIBBLE ((uintptr_t)0xAFAFAFAFAFAFAFAF) #endif + +kern_return_t _dispatch_allocator_enumerate(task_t remote_task, + const struct dispatch_allocator_layout_s *remote_allocator_layout, + vm_address_t zone_address, memory_reader_t reader, + void (^recorder)(vm_address_t, void *, size_t , bool *stop)); + #endif // DISPATCH_ALLOCATOR +#if DISPATCH_ALLOCATOR +extern dispatch_heap_t _dispatch_main_heap; +#endif + #endif // __DISPATCH_ALLOCATOR_INTERNAL__ diff --git a/src/apply.c b/src/apply.c index 6f44cf90b..5f93e6693 100644 --- a/src/apply.c +++ b/src/apply.c @@ -28,9 +28,8 @@ static char const * const _dispatch_apply_key = "apply"; DISPATCH_ALWAYS_INLINE static inline void -_dispatch_apply_invoke2(void *ctxt, long invoke_flags) +_dispatch_apply_invoke2(dispatch_apply_t da, long invoke_flags) { - dispatch_apply_t da = (dispatch_apply_t)ctxt; size_t const iter = da->da_iterations; size_t idx, done = 0; @@ -40,7 +39,6 @@ _dispatch_apply_invoke2(void *ctxt, long invoke_flags) // da_dc is only safe to access once the 'index lock' has been acquired dispatch_apply_function_t const func = (void *)da->da_dc->dc_func; void *const da_ctxt = da->da_dc->dc_ctxt; - dispatch_queue_t dq = da->da_dc->dc_data; _dispatch_perfmon_workitem_dec(); // this unit executes many items @@ -54,6 +52,7 @@ _dispatch_apply_invoke2(void *ctxt, long invoke_flags) dispatch_thread_frame_s dtf; dispatch_priority_t old_dbp = 0; if (invoke_flags & DISPATCH_APPLY_INVOKE_REDIRECT) { + dispatch_queue_t dq = da->da_dc->dc_data; _dispatch_thread_frame_push(&dtf, dq); old_dbp = _dispatch_set_basepri(dq->dq_priority); } @@ -156,11 +155,12 @@ _dispatch_apply_serial(void *ctxt) DISPATCH_ALWAYS_INLINE static inline void -_dispatch_apply_f2(dispatch_queue_t dq, dispatch_apply_t da, +_dispatch_apply_f(dispatch_queue_global_t dq, dispatch_apply_t da, dispatch_function_t func) { int32_t i = 0; dispatch_continuation_t head = NULL, tail = NULL; + pthread_priority_t pp = _dispatch_get_priority(); // The current thread does not need a continuation int32_t continuation_cnt = da->da_thr_cnt - 1; @@ -169,9 +169,11 @@ _dispatch_apply_f2(dispatch_queue_t dq, dispatch_apply_t da, for (i = 0; i < continuation_cnt; i++) { dispatch_continuation_t next = _dispatch_continuation_alloc(); - uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; + uintptr_t dc_flags = DC_FLAG_CONSUME; - _dispatch_continuation_init_f(next, dq, da, func, 0, 0, dc_flags); + _dispatch_continuation_init_f(next, dq, da, func, + DISPATCH_BLOCK_HAS_PRIORITY, dc_flags); + next->dc_priority = pp | _PTHREAD_PRIORITY_ENFORCE_FLAG; next->do_next = head; head = next; @@ -182,28 +184,65 @@ _dispatch_apply_f2(dispatch_queue_t dq, dispatch_apply_t da, _dispatch_thread_event_init(&da->da_event); // FIXME: dq may not be the right queue for the priority of `head` + _dispatch_trace_item_push_list(dq, head, tail); _dispatch_root_queue_push_inline(dq, head, tail, continuation_cnt); // Call the first element directly _dispatch_apply_invoke_and_wait(da); } +DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT +static inline int32_t +_dispatch_queue_try_reserve_apply_width(dispatch_queue_t dq, int32_t da_width) +{ + uint64_t old_state, new_state; + int32_t width; + + if (unlikely(dq->dq_width == 1)) { + return 0; + } + + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + width = (int32_t)_dq_state_available_width(old_state); + if (unlikely(!width)) { + os_atomic_rmw_loop_give_up(return 0); + } + if (width > da_width) { + width = da_width; + } + new_state = old_state + (uint64_t)width * DISPATCH_QUEUE_WIDTH_INTERVAL; + }); + return width; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_relinquish_width(dispatch_queue_t top_dq, + dispatch_queue_t stop_dq, int32_t da_width) +{ + uint64_t delta = (uint64_t)da_width * DISPATCH_QUEUE_WIDTH_INTERVAL; + dispatch_queue_t dq = top_dq; + + while (dq != stop_dq) { + os_atomic_sub2o(dq, dq_state, delta, relaxed); + dq = dq->do_targetq; + } +} + DISPATCH_NOINLINE static void _dispatch_apply_redirect(void *ctxt) { dispatch_apply_t da = (dispatch_apply_t)ctxt; int32_t da_width = da->da_thr_cnt - 1; - dispatch_queue_t dq = da->da_dc->dc_data, rq = dq, tq; + dispatch_queue_t top_dq = da->da_dc->dc_data, dq = top_dq; do { - int32_t width = _dispatch_queue_try_reserve_apply_width(rq, da_width); + int32_t width = _dispatch_queue_try_reserve_apply_width(dq, da_width); if (unlikely(da_width > width)) { int32_t excess = da_width - width; - for (tq = dq; tq != rq; tq = tq->do_targetq) { - _dispatch_queue_relinquish_width(tq, excess); - } - da_width -= excess; + _dispatch_queue_relinquish_width(top_dq, dq, excess); + da_width = width; if (unlikely(!da_width)) { return _dispatch_apply_serial(da); } @@ -215,19 +254,17 @@ _dispatch_apply_redirect(void *ctxt) // this continuation. da->da_flags = _dispatch_queue_autorelease_frequency(dq); } - rq = rq->do_targetq; - } while (unlikely(rq->do_targetq)); - _dispatch_apply_f2(rq, da, _dispatch_apply_redirect_invoke); - do { - _dispatch_queue_relinquish_width(dq, da_width); dq = dq->do_targetq; } while (unlikely(dq->do_targetq)); + + _dispatch_apply_f(upcast(dq)._dgq, da, _dispatch_apply_redirect_invoke); + _dispatch_queue_relinquish_width(top_dq, dq, da_width); } #define DISPATCH_APPLY_MAX UINT16_MAX // must be < sqrt(SIZE_MAX) DISPATCH_ALWAYS_INLINE -static inline dispatch_queue_t +static inline dispatch_queue_global_t _dispatch_apply_root_queue(dispatch_queue_t dq) { if (dq) { @@ -235,8 +272,8 @@ _dispatch_apply_root_queue(dispatch_queue_t dq) dq = dq->do_targetq; } // if the current root queue is a pthread root queue, select it - if (!_dispatch_priority_qos(dq->dq_priority)) { - return dq; + if (!_dispatch_is_in_root_queues_array(dq)) { + return upcast(dq)._dgq; } } @@ -247,7 +284,7 @@ _dispatch_apply_root_queue(dispatch_queue_t dq) DISPATCH_NOINLINE void -dispatch_apply_f(size_t iterations, dispatch_queue_t dq, void *ctxt, +dispatch_apply_f(size_t iterations, dispatch_queue_t _dq, void *ctxt, void (*func)(void *, size_t)) { if (unlikely(iterations == 0)) { @@ -257,11 +294,15 @@ dispatch_apply_f(size_t iterations, dispatch_queue_t dq, void *ctxt, _dispatch_thread_context_find(_dispatch_apply_key); size_t nested = dtctxt ? dtctxt->dtc_apply_nesting : 0; dispatch_queue_t old_dq = _dispatch_queue_get_current(); + dispatch_queue_t dq; - if (likely(dq == DISPATCH_APPLY_AUTO)) { - dq = _dispatch_apply_root_queue(old_dq); + if (likely(_dq == DISPATCH_APPLY_AUTO)) { + dq = _dispatch_apply_root_queue(old_dq)->_as_dq; + } else { + dq = _dq; // silence clang Nullability complaints } - dispatch_qos_t qos = _dispatch_priority_qos(dq->dq_priority); + dispatch_qos_t qos = _dispatch_priority_qos(dq->dq_priority) ?: + _dispatch_priority_fallback_qos(dq->dq_priority); if (unlikely(dq->do_targetq)) { // if the queue passed-in is not a root queue, use the current QoS // since the caller participates in the work anyway @@ -294,6 +335,7 @@ dispatch_apply_f(size_t iterations, dispatch_queue_t dq, void *ctxt, #if DISPATCH_INTROSPECTION da->da_dc = _dispatch_continuation_alloc(); *da->da_dc = dc; + da->da_dc->dc_flags = DC_FLAG_ALLOCATED; #else da->da_dc = &dc; #endif @@ -312,7 +354,7 @@ dispatch_apply_f(size_t iterations, dispatch_queue_t dq, void *ctxt, dispatch_thread_frame_s dtf; _dispatch_thread_frame_push(&dtf, dq); - _dispatch_apply_f2(dq, da, _dispatch_apply_invoke); + _dispatch_apply_f(upcast(dq)._dgq, da, _dispatch_apply_invoke); _dispatch_thread_frame_pop(&dtf); } @@ -324,39 +366,3 @@ dispatch_apply(size_t iterations, dispatch_queue_t dq, void (^work)(size_t)) (dispatch_apply_function_t)_dispatch_Block_invoke(work)); } #endif - -#if 0 -#ifdef __BLOCKS__ -void -dispatch_stride(size_t offset, size_t stride, size_t iterations, - dispatch_queue_t dq, void (^work)(size_t)) -{ - dispatch_stride_f(offset, stride, iterations, dq, work, - (dispatch_apply_function_t)_dispatch_Block_invoke(work)); -} -#endif - -DISPATCH_NOINLINE -void -dispatch_stride_f(size_t offset, size_t stride, size_t iterations, - dispatch_queue_t dq, void *ctxt, void (*func)(void *, size_t)) -{ - if (stride == 0) { - stride = 1; - } - dispatch_apply(iterations / stride, queue, ^(size_t idx) { - size_t i = idx * stride + offset; - size_t stop = i + stride; - do { - func(ctxt, i++); - } while (i < stop); - }); - - dispatch_sync(queue, ^{ - size_t i; - for (i = iterations - (iterations % stride); i < iterations; i++) { - func(ctxt, i + offset); - } - }); -} -#endif diff --git a/src/benchmark.c b/src/benchmark.c index 49a4faa44..b47504386 100644 --- a/src/benchmark.c +++ b/src/benchmark.c @@ -53,12 +53,12 @@ _dispatch_benchmark_init(void *context) dispatch_assert_zero(kr); #endif - start = _dispatch_absolute_time(); + start = _dispatch_uptime(); do { i++; f(c); } while (i < cnt); - delta = _dispatch_absolute_time() - start; + delta = _dispatch_uptime() - start; lcost = delta; #if HAVE_MACH_ABSOLUTE_TIME @@ -102,16 +102,16 @@ dispatch_benchmark_f(size_t count, register void *ctxt, dispatch_once_f(&pred, &bdata, _dispatch_benchmark_init); - if (slowpath(count == 0)) { + if (unlikely(count == 0)) { return 0; } - start = _dispatch_absolute_time(); + start = _dispatch_uptime(); do { i++; func(ctxt); } while (i < count); - delta = _dispatch_absolute_time() - start; + delta = _dispatch_uptime() - start; conversion = delta; #if HAVE_MACH_ABSOLUTE_TIME diff --git a/src/block.cpp b/src/block.cpp index 2a6f00799..9e9c2246e 100644 --- a/src/block.cpp +++ b/src/block.cpp @@ -28,9 +28,7 @@ #error Must build without C++ exceptions #endif -extern "C" { #include "internal.h" -} // NOTE: this file must not contain any atomic operations @@ -68,7 +66,9 @@ struct dispatch_block_private_data_s { dbpd_block(), dbpd_group(), dbpd_queue(), dbpd_thread() { // copy constructor, create copy with retained references - if (dbpd_voucher) voucher_retain(dbpd_voucher); + if (dbpd_voucher && dbpd_voucher != DISPATCH_NO_VOUCHER) { + voucher_retain(dbpd_voucher); + } if (o.dbpd_block) dbpd_block = _dispatch_Block_copy(o.dbpd_block); _dispatch_block_private_data_debug("copy from %p, block: %p from %p", &o, dbpd_block, o.dbpd_block); @@ -79,17 +79,24 @@ struct dispatch_block_private_data_s { { _dispatch_block_private_data_debug("destroy%s, block: %p", dbpd_magic ? "" : " (stack)", dbpd_block); + +#if DISPATCH_INTROSPECTION + void *db = (char *) this - sizeof(struct Block_layout); + _dispatch_ktrace1(DISPATCH_QOS_TRACE_private_block_dispose, db); +#endif /* DISPATCH_INTROSPECTION */ + if (dbpd_magic != DISPATCH_BLOCK_PRIVATE_DATA_MAGIC) return; if (dbpd_group) { if (!dbpd_performed) dispatch_group_leave(dbpd_group); - ((void (*)(dispatch_group_t))dispatch_release)(dbpd_group); + _os_object_release(dbpd_group->_as_os_obj); } if (dbpd_queue) { - ((void (*)(os_mpsc_queue_t, uint16_t)) - _os_object_release_internal_n)(dbpd_queue, 2); + _os_object_release_internal_n(dbpd_queue->_as_os_obj, 2); } if (dbpd_block) Block_release(dbpd_block); - if (dbpd_voucher) voucher_release(dbpd_voucher); + if (dbpd_voucher && dbpd_voucher != DISPATCH_NO_VOUCHER) { + voucher_release(dbpd_voucher); + } } }; @@ -114,7 +121,7 @@ extern void DISPATCH_BLOCK_SPECIAL_INVOKE(void *) #else asm("____dispatch_block_create_block_invoke"); #endif -void (*_dispatch_block_special_invoke)(void*) = DISPATCH_BLOCK_SPECIAL_INVOKE; +void (*const _dispatch_block_special_invoke)(void*) = DISPATCH_BLOCK_SPECIAL_INVOKE; } #endif // __BLOCKS__ diff --git a/src/data.c b/src/data.c index 3efab2f89..700a694ea 100644 --- a/src/data.c +++ b/src/data.c @@ -118,8 +118,7 @@ _dispatch_data_alloc(size_t n, size_t extra) data = _dispatch_object_alloc(DISPATCH_DATA_CLASS, size); data->num_records = n; #if !DISPATCH_DATA_IS_BRIDGED_TO_NSDATA - data->do_targetq = dispatch_get_global_queue( - DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); + data->do_targetq = _dispatch_get_default_queue(false); data->do_next = DISPATCH_OBJECT_LISTLESS; #endif return data; @@ -143,8 +142,7 @@ _dispatch_data_destroy_buffer(const void* buffer, size_t size, #endif } else { if (!queue) { - queue = dispatch_get_global_queue( - DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); + queue = _dispatch_get_default_queue(false); } dispatch_async_f(queue, destructor, _dispatch_call_block_and_release); } @@ -200,7 +198,7 @@ dispatch_data_create(const void* buffer, size_t size, dispatch_queue_t queue, // The default destructor was provided, indicating the data should be // copied. data_buf = malloc(size); - if (slowpath(!data_buf)) { + if (unlikely(!data_buf)) { return DISPATCH_OUT_OF_MEMORY; } buffer = memcpy(data_buf, buffer, size); @@ -242,7 +240,7 @@ dispatch_data_create_alloc(size_t size, void** buffer_ptr) dispatch_data_t data = dispatch_data_empty; void *buffer = NULL; - if (slowpath(!size)) { + if (unlikely(!size)) { goto out; } data = _dispatch_data_alloc(0, size); @@ -271,17 +269,16 @@ _dispatch_data_dispose(dispatch_data_t dd, DISPATCH_UNUSED bool *allow_free) } } +#if DISPATCH_DATA_IS_BRIDGED_TO_NSDATA void _dispatch_data_set_target_queue(dispatch_data_t dd, dispatch_queue_t tq) { -#if DISPATCH_DATA_IS_BRIDGED_TO_NSDATA - _dispatch_retain(tq); - tq = os_atomic_xchg2o(dd, do_targetq, tq, release); - if (tq) _dispatch_release(tq); -#else + if (tq == DISPATCH_TARGET_QUEUE_DEFAULT) { + tq = _dispatch_get_default_queue(false); + } _dispatch_object_set_target_queue_inline(dd, tq); -#endif } +#endif // DISPATCH_DATA_IS_BRIDGED_TO_NSDATA size_t _dispatch_data_debug(dispatch_data_t dd, char* buf, size_t bufsiz) @@ -405,7 +402,7 @@ dispatch_data_create_subrange(dispatch_data_t dd, size_t offset, } // Crashing here indicates memory corruption of passed in data object - if (slowpath(i >= dd_num_records)) { + if (unlikely(i >= dd_num_records)) { DISPATCH_INTERNAL_CRASH(i, "dispatch_data_create_subrange out of bounds"); } @@ -435,7 +432,7 @@ dispatch_data_create_subrange(dispatch_data_t dd, size_t offset, last_length -= record_length; // Crashing here indicates memory corruption of passed in data object - if (slowpath(i + count >= dd_num_records)) { + if (unlikely(i + count >= dd_num_records)) { DISPATCH_INTERNAL_CRASH(i + count, "dispatch_data_create_subrange out of bounds"); } @@ -502,7 +499,7 @@ dispatch_data_create_map(dispatch_data_t dd, const void **buffer_ptr, } buffer = _dispatch_data_flatten(dd); - if (fastpath(buffer)) { + if (likely(buffer)) { data = dispatch_data_create(buffer, size, NULL, DISPATCH_DATA_DESTRUCTOR_FREE); } else { @@ -525,7 +522,7 @@ _dispatch_data_get_flattened_bytes(dispatch_data_t dd) const void *buffer; size_t offset = 0; - if (slowpath(!dd->size)) { + if (unlikely(!dd->size)) { return NULL; } @@ -535,9 +532,9 @@ _dispatch_data_get_flattened_bytes(dispatch_data_t dd) } void *flatbuf = _dispatch_data_flatten(dd); - if (fastpath(flatbuf)) { + if (likely(flatbuf)) { // we need a release so that readers see the content of the buffer - if (slowpath(!os_atomic_cmpxchgv2o(dd, buf, NULL, flatbuf, + if (unlikely(!os_atomic_cmpxchgv2o(dd, buf, NULL, flatbuf, &buffer, release))) { free(flatbuf); } else { diff --git a/src/data.m b/src/data.m index 1d024ffe7..789c3eb0c 100644 --- a/src/data.m +++ b/src/data.m @@ -122,7 +122,7 @@ - (NSString *)debugDescription { _dispatch_data_debug(self, buf, sizeof(buf)); NSString *format = [nsstring stringWithUTF8String:"<%s: %s>"]; if (!format) return nil; - return [nsstring stringWithFormat:format, class_getName([self class]), buf]; + return [nsstring stringWithFormat:format, object_getClassName(self), buf]; } - (NSUInteger)length { diff --git a/src/data_internal.h b/src/data_internal.h index 19fc3d9ad..c5bc09f75 100644 --- a/src/data_internal.h +++ b/src/data_internal.h @@ -51,7 +51,7 @@ _OS_OBJECT_DECL_PROTOCOL(dispatch_data, dispatch_object); #define DISPATCH_DATA_CLASS DISPATCH_VTABLE(data) #define DISPATCH_DATA_EMPTY_CLASS DISPATCH_VTABLE(data_empty) #else -DISPATCH_CLASS_DECL(data); +DISPATCH_CLASS_DECL(data, OBJECT); #define DISPATCH_DATA_CLASS DISPATCH_VTABLE(data) #endif // DISPATCH_DATA_IS_BRIDGED_TO_NSDATA @@ -103,8 +103,10 @@ struct dispatch_data_format_type_s { void _dispatch_data_init_with_bytes(dispatch_data_t data, const void *buffer, size_t size, dispatch_block_t destructor); void _dispatch_data_dispose(dispatch_data_t data, bool *allow_free); +#if DISPATCH_DATA_IS_BRIDGED_TO_NSDATA void _dispatch_data_set_target_queue(struct dispatch_data_s *dd, dispatch_queue_t tq); +#endif size_t _dispatch_data_debug(dispatch_data_t data, char* buf, size_t bufsiz); const void* _dispatch_data_get_flattened_bytes(struct dispatch_data_s *dd); @@ -127,13 +129,13 @@ _dispatch_data_map_direct(struct dispatch_data_s *dd, size_t offset, const void *buffer = NULL; dispatch_assert(dd->size); - if (slowpath(!_dispatch_data_leaf(dd)) && + if (unlikely(!_dispatch_data_leaf(dd)) && _dispatch_data_num_records(dd) == 1) { offset += dd->records[0].from; dd = (struct dispatch_data_s *)dd->records[0].data_object; } - if (fastpath(_dispatch_data_leaf(dd))) { + if (likely(_dispatch_data_leaf(dd))) { buffer = dd->buf + offset; } else { buffer = os_atomic_load((void **)&dd->buf, relaxed); diff --git a/src/event/event.c b/src/event/event.c index 34abbf041..b1bc05343 100644 --- a/src/event/event.c +++ b/src/event/event.c @@ -20,6 +20,13 @@ #include "internal.h" +#pragma mark unote generic functions + +static void _dispatch_timer_unote_register(dispatch_timer_source_refs_t dt, + dispatch_wlh_t wlh, dispatch_priority_t pri); +static void _dispatch_timer_unote_resume(dispatch_timer_source_refs_t dt); +static void _dispatch_timer_unote_unregister(dispatch_timer_source_refs_t dt); + DISPATCH_NOINLINE static dispatch_unote_t _dispatch_unote_create(dispatch_source_type_t dst, @@ -32,14 +39,11 @@ _dispatch_unote_create(dispatch_source_type_t dst, return DISPATCH_UNOTE_NULL; } - if (dst->dst_filter != DISPATCH_EVFILT_TIMER) { - if (dst->dst_mask && !mask) { - return DISPATCH_UNOTE_NULL; - } + if (dst->dst_mask && !mask) { + return DISPATCH_UNOTE_NULL; } - if ((dst->dst_flags & EV_UDATA_SPECIFIC) || - (dst->dst_filter == DISPATCH_EVFILT_TIMER)) { + if (dst->dst_flags & EV_UDATA_SPECIFIC) { du = _dispatch_calloc(1u, dst->dst_size); } else { dul = _dispatch_calloc(1u, sizeof(*dul) + dst->dst_size); @@ -53,7 +57,6 @@ _dispatch_unote_create(dispatch_source_type_t dst, if (dst->dst_flags & EV_UDATA_SPECIFIC) { du->du_is_direct = true; } - du->du_data_action = DISPATCH_UNOTE_ACTION_DATA_OR; return (dispatch_unote_t){ ._du = du }; } @@ -78,13 +81,7 @@ _dispatch_unote_create_with_fd(dispatch_source_type_t dst, return DISPATCH_UNOTE_NULL; } #endif - dispatch_unote_t du = _dispatch_unote_create(dst, handle, mask); - if (du._du) { - int16_t filter = dst->dst_filter; - du._du->du_data_action = (filter == EVFILT_READ||filter == EVFILT_WRITE) - ? DISPATCH_UNOTE_ACTION_DATA_SET : DISPATCH_UNOTE_ACTION_DATA_OR; - } - return du; + return _dispatch_unote_create(dst, handle, mask); } DISPATCH_NOINLINE @@ -123,6 +120,88 @@ _dispatch_unote_dispose(dispatch_unote_t du) free(ptr); } +bool +_dispatch_unote_register(dispatch_unote_t du, dispatch_wlh_t wlh, + dispatch_priority_t pri) +{ + dispatch_assert(du._du->du_is_timer || !_dispatch_unote_registered(du)); + dispatch_priority_t masked_pri; + + masked_pri = pri & (DISPATCH_PRIORITY_FLAG_MANAGER | + DISPATCH_PRIORITY_FLAG_FALLBACK | + DISPATCH_PRIORITY_FLAG_FLOOR | + DISPATCH_PRIORITY_FALLBACK_QOS_MASK | + DISPATCH_PRIORITY_REQUESTED_MASK); + + dispatch_assert(wlh == DISPATCH_WLH_ANON || masked_pri); + if (masked_pri == _dispatch_priority_make_fallback(DISPATCH_QOS_DEFAULT)) { + _dispatch_ktrace1(DISPATCH_PERF_source_registration_without_qos, + _dispatch_wref2ptr(du._du->du_owner_wref)); + } + + du._du->du_priority = pri; + + switch (du._du->du_filter) { + case DISPATCH_EVFILT_CUSTOM_ADD: + case DISPATCH_EVFILT_CUSTOM_OR: + case DISPATCH_EVFILT_CUSTOM_REPLACE: + _dispatch_unote_state_set(du, DISPATCH_WLH_ANON, DU_STATE_ARMED); + return true; + } + if (du._du->du_is_timer) { + _dispatch_timer_unote_register(du._dt, wlh, pri); + return true; + } +#if DISPATCH_HAVE_DIRECT_KNOTES + if (du._du->du_is_direct) { + return _dispatch_unote_register_direct(du, wlh); + } +#endif + return _dispatch_unote_register_muxed(du); +} + +void +_dispatch_unote_resume(dispatch_unote_t du) +{ + dispatch_assert(du._du->du_is_timer || _dispatch_unote_needs_rearm(du)); + if (du._du->du_is_timer) { + _dispatch_timer_unote_resume(du._dt); +#if DISPATCH_HAVE_DIRECT_KNOTES + } else if (du._du->du_is_direct) { + _dispatch_unote_resume_direct(du); +#endif + } else { + _dispatch_unote_resume_muxed(du); + } +} + +bool +_dispatch_unote_unregister(dispatch_unote_t du, uint32_t flags) +{ + if (!_dispatch_unote_registered(du)) { + return true; + } + switch (du._du->du_filter) { + case DISPATCH_EVFILT_CUSTOM_ADD: + case DISPATCH_EVFILT_CUSTOM_OR: + case DISPATCH_EVFILT_CUSTOM_REPLACE: + _dispatch_unote_state_set(du, DU_STATE_UNREGISTERED); + return true; + } + if (du._du->du_is_timer) { + _dispatch_timer_unote_unregister(du._dt); + return true; + } +#if DISPATCH_HAVE_DIRECT_KNOTES + if (du._du->du_is_direct) { + return _dispatch_unote_unregister_direct(du, flags); + } +#endif + + dispatch_assert(flags & DUU_DELETE_ACK); + return _dispatch_unote_unregister_muxed(du); +} + #pragma mark data or / add static dispatch_unote_t @@ -146,7 +225,9 @@ const dispatch_source_type_s _dispatch_source_type_data_add = { .dst_kind = "data-add", .dst_filter = DISPATCH_EVFILT_CUSTOM_ADD, .dst_flags = EV_UDATA_SPECIFIC|EV_CLEAR, + .dst_action = DISPATCH_UNOTE_ACTION_PASS_DATA, .dst_size = sizeof(struct dispatch_source_refs_s), + .dst_strict = false, .dst_create = _dispatch_source_data_create, .dst_merge_evt = NULL, @@ -156,7 +237,9 @@ const dispatch_source_type_s _dispatch_source_type_data_or = { .dst_kind = "data-or", .dst_filter = DISPATCH_EVFILT_CUSTOM_OR, .dst_flags = EV_UDATA_SPECIFIC|EV_CLEAR, + .dst_action = DISPATCH_UNOTE_ACTION_PASS_DATA, .dst_size = sizeof(struct dispatch_source_refs_s), + .dst_strict = false, .dst_create = _dispatch_source_data_create, .dst_merge_evt = NULL, @@ -166,7 +249,9 @@ const dispatch_source_type_s _dispatch_source_type_data_replace = { .dst_kind = "data-replace", .dst_filter = DISPATCH_EVFILT_CUSTOM_REPLACE, .dst_flags = EV_UDATA_SPECIFIC|EV_CLEAR, + .dst_action = DISPATCH_UNOTE_ACTION_PASS_DATA, .dst_size = sizeof(struct dispatch_source_refs_s), + .dst_strict = false, .dst_create = _dispatch_source_data_create, .dst_merge_evt = NULL, @@ -184,7 +269,9 @@ const dispatch_source_type_s _dispatch_source_type_read = { #endif .dst_data = 1, #endif // DISPATCH_EVENT_BACKEND_KEVENT + .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_SET_DATA, .dst_size = sizeof(struct dispatch_source_refs_s), + .dst_strict = false, .dst_create = _dispatch_unote_create_with_fd, .dst_merge_evt = _dispatch_source_merge_evt, @@ -200,7 +287,9 @@ const dispatch_source_type_s _dispatch_source_type_write = { #endif .dst_data = 1, #endif // DISPATCH_EVENT_BACKEND_KEVENT + .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_SET_DATA, .dst_size = sizeof(struct dispatch_source_refs_s), + .dst_strict = false, .dst_create = _dispatch_unote_create_with_fd, .dst_merge_evt = _dispatch_source_merge_evt, @@ -215,113 +304,943 @@ _dispatch_source_signal_create(dispatch_source_type_t dst, uintptr_t handle, if (handle >= NSIG) { return DISPATCH_UNOTE_NULL; } - dispatch_unote_t du = _dispatch_unote_create_with_handle(dst, handle, mask); - if (du._du) { - du._du->du_data_action = DISPATCH_UNOTE_ACTION_DATA_ADD; - } - return du; + return _dispatch_unote_create_with_handle(dst, handle, mask); } const dispatch_source_type_s _dispatch_source_type_signal = { .dst_kind = "signal", .dst_filter = EVFILT_SIGNAL, .dst_flags = DISPATCH_EV_DIRECT|EV_CLEAR, + .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_ADD_DATA, .dst_size = sizeof(struct dispatch_source_refs_s), + .dst_strict = false, .dst_create = _dispatch_source_signal_create, .dst_merge_evt = _dispatch_source_merge_evt, }; -#pragma mark timers +#pragma mark - +#pragma mark timer globals + +DISPATCH_GLOBAL(struct dispatch_timer_heap_s +_dispatch_timers_heap[DISPATCH_TIMER_COUNT]); -bool _dispatch_timers_reconfigure, _dispatch_timers_expired; -uint32_t _dispatch_timers_processing_mask; #if DISPATCH_USE_DTRACE -uint32_t _dispatch_timers_will_wake; +DISPATCH_STATIC_GLOBAL(dispatch_timer_source_refs_t +_dispatch_trace_next_timer[DISPATCH_TIMER_QOS_COUNT]); +#define _dispatch_trace_next_timer_set(x, q) \ + _dispatch_trace_next_timer[(q)] = (x) +#define _dispatch_trace_next_timer_program(d, q) \ + _dispatch_trace_timer_program(_dispatch_trace_next_timer[(q)], (d)) +#else +#define _dispatch_trace_next_timer_set(x, q) +#define _dispatch_trace_next_timer_program(d, q) #endif -#define DISPATCH_TIMER_HEAP_INITIALIZER(tidx) \ - [tidx] = { \ - .dth_target = UINT64_MAX, \ - .dth_deadline = UINT64_MAX, \ - } -#define DISPATCH_TIMER_HEAP_INIT(kind, qos) \ - DISPATCH_TIMER_HEAP_INITIALIZER(DISPATCH_TIMER_INDEX( \ - DISPATCH_CLOCK_##kind, DISPATCH_TIMER_QOS_##qos)) - -struct dispatch_timer_heap_s _dispatch_timers_heap[] = { - DISPATCH_TIMER_HEAP_INIT(WALL, NORMAL), - DISPATCH_TIMER_HEAP_INIT(MACH, NORMAL), + +#pragma mark timer heap +/* + * The dispatch_timer_heap_t structure is a double min-heap of timers, + * interleaving the by-target min-heap in the even slots, and the by-deadline + * in the odd ones. + * + * The min element of these is held inline in the dispatch_timer_heap_t + * structure, and further entries are held in segments. + * + * dth_segments is the number of allocated segments. + * + * Segment 0 has a size of `DISPATCH_HEAP_INIT_SEGMENT_CAPACITY` pointers + * Segment k has a size of (DISPATCH_HEAP_INIT_SEGMENT_CAPACITY << (k - 1)) + * + * Segment n (dth_segments - 1) is the last segment and points its final n + * entries to previous segments. Its address is held in the `dth_heap` field. + * + * segment n [ regular timer pointers | n-1 | k | 0 ] + * | | | + * segment n-1 <---------------------------' | | + * segment k <--------------------------------' | + * segment 0 <------------------------------------' + */ +#define DISPATCH_HEAP_INIT_SEGMENT_CAPACITY 8u + +/* + * There are two min-heaps stored interleaved in a single array, + * even indices are for the by-target min-heap, and odd indices for + * the by-deadline one. + */ +#define DTH_HEAP_ID_MASK (DTH_ID_COUNT - 1) +#define DTH_HEAP_ID(idx) ((idx) & DTH_HEAP_ID_MASK) +#define DTH_IDX_FOR_HEAP_ID(idx, heap_id) \ + (((idx) & ~DTH_HEAP_ID_MASK) | (heap_id)) + +DISPATCH_ALWAYS_INLINE +static inline uint32_t +_dispatch_timer_heap_capacity(uint32_t segments) +{ + if (segments == 0) return 2; + uint32_t seg_no = segments - 1; + // for C = DISPATCH_HEAP_INIT_SEGMENT_CAPACITY, + // 2 + C + SUM(C << (i-1), i = 1..seg_no) - seg_no + return 2 + (DISPATCH_HEAP_INIT_SEGMENT_CAPACITY << seg_no) - seg_no; +} + +static void +_dispatch_timer_heap_grow(dispatch_timer_heap_t dth) +{ + uint32_t seg_capacity = DISPATCH_HEAP_INIT_SEGMENT_CAPACITY; + uint32_t seg_no = dth->dth_segments++; + void **heap, **heap_prev = dth->dth_heap; + + if (seg_no > 0) { + seg_capacity <<= (seg_no - 1); + } + heap = _dispatch_calloc(seg_capacity, sizeof(void *)); + if (seg_no > 1) { + uint32_t prev_seg_no = seg_no - 1; + uint32_t prev_seg_capacity = seg_capacity >> 1; + memcpy(&heap[seg_capacity - prev_seg_no], + &heap_prev[prev_seg_capacity - prev_seg_no], + prev_seg_no * sizeof(void *)); + } + if (seg_no > 0) { + heap[seg_capacity - seg_no] = heap_prev; + } + dth->dth_heap = heap; +} + +static void +_dispatch_timer_heap_shrink(dispatch_timer_heap_t dth) +{ + uint32_t seg_capacity = DISPATCH_HEAP_INIT_SEGMENT_CAPACITY; + uint32_t seg_no = --dth->dth_segments; + void **heap = dth->dth_heap, **heap_prev = NULL; + + if (seg_no > 0) { + seg_capacity <<= (seg_no - 1); + heap_prev = heap[seg_capacity - seg_no]; + } + if (seg_no > 1) { + uint32_t prev_seg_no = seg_no - 1; + uint32_t prev_seg_capacity = seg_capacity >> 1; + memcpy(&heap_prev[prev_seg_capacity - prev_seg_no], + &heap[seg_capacity - prev_seg_no], + prev_seg_no * sizeof(void *)); + } + dth->dth_heap = heap_prev; + free(heap); +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_timer_source_refs_t * +_dispatch_timer_heap_get_slot(dispatch_timer_heap_t dth, uint32_t idx) +{ + uint32_t seg_no, segments = dth->dth_segments; + void **segment; + + if (idx < DTH_ID_COUNT) { + return &dth->dth_min[idx]; + } + idx -= DTH_ID_COUNT; + + // Derive the segment number from the index. Naming + // DISPATCH_HEAP_INIT_SEGMENT_CAPACITY `C`, the segments index ranges are: + // 0: 0 .. (C - 1) + // 1: C .. 2 * C - 1 + // k: 2^(k-1) * C .. 2^k * C - 1 + // so `k` can be derived from the first bit set in `idx` + seg_no = (uint32_t)(__builtin_clz(DISPATCH_HEAP_INIT_SEGMENT_CAPACITY - 1) - + __builtin_clz(idx | (DISPATCH_HEAP_INIT_SEGMENT_CAPACITY - 1))); + if (seg_no + 1 == segments) { + segment = dth->dth_heap; + } else { + uint32_t seg_capacity = DISPATCH_HEAP_INIT_SEGMENT_CAPACITY; + seg_capacity <<= (segments - 2); + segment = dth->dth_heap[seg_capacity - seg_no - 1]; + } + if (seg_no) { + idx -= DISPATCH_HEAP_INIT_SEGMENT_CAPACITY << (seg_no - 1); + } + return (dispatch_timer_source_refs_t *)(segment + idx); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_timer_heap_set(dispatch_timer_heap_t dth, + dispatch_timer_source_refs_t *slot, + dispatch_timer_source_refs_t dt, uint32_t idx) +{ + if (idx < DTH_ID_COUNT) { + dth->dth_needs_program = true; + } + *slot = dt; + dt->dt_heap_entry[DTH_HEAP_ID(idx)] = idx; +} + +DISPATCH_ALWAYS_INLINE +static inline uint32_t +_dispatch_timer_heap_parent(uint32_t idx) +{ + uint32_t heap_id = DTH_HEAP_ID(idx); + idx = (idx - DTH_ID_COUNT) / 2; // go to the parent + return DTH_IDX_FOR_HEAP_ID(idx, heap_id); +} + +DISPATCH_ALWAYS_INLINE +static inline uint32_t +_dispatch_timer_heap_left_child(uint32_t idx) +{ + uint32_t heap_id = DTH_HEAP_ID(idx); + // 2 * (idx - heap_id) + DTH_ID_COUNT + heap_id + return 2 * idx + DTH_ID_COUNT - heap_id; +} + +#if DISPATCH_HAVE_TIMER_COALESCING +DISPATCH_ALWAYS_INLINE +static inline uint32_t +_dispatch_timer_heap_walk_skip(uint32_t idx, uint32_t count) +{ + uint32_t heap_id = DTH_HEAP_ID(idx); + + idx -= heap_id; + if (unlikely(idx + DTH_ID_COUNT == count)) { + // reaching `count` doesn't mean we're done, but there is a weird + // corner case if the last item of the heap is a left child: + // + // /\ + // / \ + // / __\ + // /__/ + // ^ + // + // The formula below would return the sibling of `idx` which is + // out of bounds. Fortunately, the correct answer is the same + // as for idx's parent + idx = _dispatch_timer_heap_parent(idx); + } + + // + // When considering the index in a non interleaved, 1-based array + // representation of a heap, hence looking at (idx / DTH_ID_COUNT + 1) + // for a given idx in our dual-heaps, that index is in one of two forms: + // + // (a) 1xxxx011111 or (b) 111111111 + // d i 0 d 0 + // + // The first bit set is the row of the binary tree node (0-based). + // The following digits from most to least significant represent the path + // to that node, where `0` is a left turn and `1` a right turn. + // + // For example 0b0101 (5) is a node on row 2 accessed going left then right: + // + // row 0 1 + // / . + // row 1 2 3 + // . \ . . + // row 2 4 5 6 7 + // : : : : : : : : + // + // Skipping a sub-tree in walk order means going to the sibling of the last + // node reached after we turned left. If the node was of the form (a), + // this node is 1xxxx1, which for the above example is 0b0011 (3). + // If the node was of the form (b) then we never took a left, meaning + // we reached the last element in traversal order. + // + + // + // we want to find + // - the least significant bit set to 0 in (idx / DTH_ID_COUNT + 1) + // - which is offset by log_2(DTH_ID_COUNT) from the position of the least + // significant 0 in (idx + DTH_ID_COUNT + DTH_ID_COUNT - 1) + // since idx is a multiple of DTH_ID_COUNT and DTH_ID_COUNT a power of 2. + // - which in turn is the same as the position of the least significant 1 in + // ~(idx + DTH_ID_COUNT + DTH_ID_COUNT - 1) + // + dispatch_static_assert(powerof2(DTH_ID_COUNT)); + idx += DTH_ID_COUNT + DTH_ID_COUNT - 1; + idx >>= __builtin_ctz(~idx); + + // + // `idx` is now either: + // - 0 if it was the (b) case above, in which case the walk is done + // - 1xxxx0 as the position in a 0 based array representation of a non + // interleaved heap, so we just have to compute the interleaved index. + // + return likely(idx) ? DTH_ID_COUNT * idx + heap_id : UINT32_MAX; +} + +DISPATCH_ALWAYS_INLINE +static inline uint32_t +_dispatch_timer_heap_walk_next(uint32_t idx, uint32_t count) +{ + // + // Goes to the next element in heap walk order, which is the prefix ordered + // walk of the tree. + // + // From a given node, the next item to return is the left child if it + // exists, else the first right sibling we find by walking our parent chain, + // which is exactly what _dispatch_timer_heap_walk_skip() returns. + // + uint32_t lchild = _dispatch_timer_heap_left_child(idx); + if (lchild < count) { + return lchild; + } + return _dispatch_timer_heap_walk_skip(idx, count); +} + +static uint64_t +_dispatch_timer_heap_max_target_before(dispatch_timer_heap_t dth, uint64_t limit) +{ + dispatch_timer_source_refs_t dri; + uint32_t idx = _dispatch_timer_heap_left_child(DTH_TARGET_ID); + uint32_t count = dth->dth_count; + uint64_t tmp, target = dth->dth_min[DTH_TARGET_ID]->dt_timer.target; + + while (idx < count) { + dri = *_dispatch_timer_heap_get_slot(dth, idx); + tmp = dri->dt_timer.target; + if (tmp > limit) { + // skip subtree since none of the targets below can be before limit + idx = _dispatch_timer_heap_walk_skip(idx, count); + } else { + target = tmp; + idx = _dispatch_timer_heap_walk_next(idx, count); + } + } + return target; +} +#endif // DISPATCH_HAVE_TIMER_COALESCING + +static void +_dispatch_timer_heap_resift(dispatch_timer_heap_t dth, + dispatch_timer_source_refs_t dt, uint32_t idx) +{ + dispatch_static_assert(offsetof(struct dispatch_timer_source_s, target) == + offsetof(struct dispatch_timer_source_s, heap_key[DTH_TARGET_ID])); + dispatch_static_assert(offsetof(struct dispatch_timer_source_s, deadline) == + offsetof(struct dispatch_timer_source_s, heap_key[DTH_DEADLINE_ID])); +#define dth_cmp(hid, dt1, op, dt2) \ + (((dt1)->dt_timer.heap_key)[hid] op ((dt2)->dt_timer.heap_key)[hid]) + + dispatch_timer_source_refs_t *pslot, pdt; + dispatch_timer_source_refs_t *cslot, cdt; + dispatch_timer_source_refs_t *rslot, rdt; + uint32_t cidx, dth_count = dth->dth_count; + dispatch_timer_source_refs_t *slot; + int heap_id = DTH_HEAP_ID(idx); + bool sifted_up = false; + + // try to sift up + + slot = _dispatch_timer_heap_get_slot(dth, idx); + while (idx >= DTH_ID_COUNT) { + uint32_t pidx = _dispatch_timer_heap_parent(idx); + pslot = _dispatch_timer_heap_get_slot(dth, pidx); + pdt = *pslot; + if (dth_cmp(heap_id, pdt, <=, dt)) { + break; + } + _dispatch_timer_heap_set(dth, slot, pdt, idx); + slot = pslot; + idx = pidx; + sifted_up = true; + } + if (sifted_up) { + goto done; + } + + // try to sift down + + while ((cidx = _dispatch_timer_heap_left_child(idx)) < dth_count) { + uint32_t ridx = cidx + DTH_ID_COUNT; + cslot = _dispatch_timer_heap_get_slot(dth, cidx); + cdt = *cslot; + if (ridx < dth_count) { + rslot = _dispatch_timer_heap_get_slot(dth, ridx); + rdt = *rslot; + if (dth_cmp(heap_id, cdt, >, rdt)) { + cidx = ridx; + cdt = rdt; + cslot = rslot; + } + } + if (dth_cmp(heap_id, dt, <=, cdt)) { + break; + } + _dispatch_timer_heap_set(dth, slot, cdt, idx); + slot = cslot; + idx = cidx; + } + +done: + _dispatch_timer_heap_set(dth, slot, dt, idx); +#undef dth_cmp +} + +DISPATCH_ALWAYS_INLINE +static void +_dispatch_timer_heap_insert(dispatch_timer_heap_t dth, + dispatch_timer_source_refs_t dt) +{ + uint32_t idx = (dth->dth_count += DTH_ID_COUNT) - DTH_ID_COUNT; + + DISPATCH_TIMER_ASSERT(dt->dt_heap_entry[DTH_TARGET_ID], ==, + DTH_INVALID_ID, "target idx"); + DISPATCH_TIMER_ASSERT(dt->dt_heap_entry[DTH_DEADLINE_ID], ==, + DTH_INVALID_ID, "deadline idx"); + + dispatch_qos_t qos = MAX(_dispatch_priority_qos(dt->du_priority), + _dispatch_priority_fallback_qos(dt->du_priority)); + if (dth->dth_max_qos < qos) { + dth->dth_max_qos = (uint8_t)qos; + dth->dth_needs_program = true; + } + + if (idx == 0) { + dth->dth_needs_program = true; + dt->dt_heap_entry[DTH_TARGET_ID] = DTH_TARGET_ID; + dt->dt_heap_entry[DTH_DEADLINE_ID] = DTH_DEADLINE_ID; + dth->dth_min[DTH_TARGET_ID] = dth->dth_min[DTH_DEADLINE_ID] = dt; + return; + } + + if (unlikely(idx + DTH_ID_COUNT > + _dispatch_timer_heap_capacity(dth->dth_segments))) { + _dispatch_timer_heap_grow(dth); + } + _dispatch_timer_heap_resift(dth, dt, idx + DTH_TARGET_ID); + _dispatch_timer_heap_resift(dth, dt, idx + DTH_DEADLINE_ID); +} + +static void +_dispatch_timer_heap_remove(dispatch_timer_heap_t dth, + dispatch_timer_source_refs_t dt) +{ + uint32_t idx = (dth->dth_count -= DTH_ID_COUNT); + + DISPATCH_TIMER_ASSERT(dt->dt_heap_entry[DTH_TARGET_ID], !=, + DTH_INVALID_ID, "target idx"); + DISPATCH_TIMER_ASSERT(dt->dt_heap_entry[DTH_DEADLINE_ID], !=, + DTH_INVALID_ID, "deadline idx"); + + if (idx == 0) { + DISPATCH_TIMER_ASSERT(dth->dth_min[DTH_TARGET_ID], ==, dt, + "target slot"); + DISPATCH_TIMER_ASSERT(dth->dth_min[DTH_DEADLINE_ID], ==, dt, + "deadline slot"); + dth->dth_needs_program = true; + dth->dth_min[DTH_TARGET_ID] = dth->dth_min[DTH_DEADLINE_ID] = NULL; + goto clear_heap_entry; + } + + for (uint32_t heap_id = 0; heap_id < DTH_ID_COUNT; heap_id++) { + dispatch_timer_source_refs_t *slot, last_dt; + slot = _dispatch_timer_heap_get_slot(dth, idx + heap_id); + last_dt = *slot; *slot = NULL; + if (last_dt != dt) { + uint32_t removed_idx = dt->dt_heap_entry[heap_id]; + _dispatch_timer_heap_resift(dth, last_dt, removed_idx); + } + } + if (unlikely(idx <= _dispatch_timer_heap_capacity(dth->dth_segments - 1))) { + _dispatch_timer_heap_shrink(dth); + } + +clear_heap_entry: + dt->dt_heap_entry[DTH_TARGET_ID] = DTH_INVALID_ID; + dt->dt_heap_entry[DTH_DEADLINE_ID] = DTH_INVALID_ID; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_timer_heap_update(dispatch_timer_heap_t dth, + dispatch_timer_source_refs_t dt) +{ + DISPATCH_TIMER_ASSERT(dt->dt_heap_entry[DTH_TARGET_ID], !=, + DTH_INVALID_ID, "target idx"); + DISPATCH_TIMER_ASSERT(dt->dt_heap_entry[DTH_DEADLINE_ID], !=, + DTH_INVALID_ID, "deadline idx"); + + _dispatch_timer_heap_resift(dth, dt, dt->dt_heap_entry[DTH_TARGET_ID]); + _dispatch_timer_heap_resift(dth, dt, dt->dt_heap_entry[DTH_DEADLINE_ID]); +} + +#pragma mark timer unote + +#define _dispatch_timer_du_debug(what, du) \ + _dispatch_debug("kevent-source[%p]: %s kevent[%p] { ident = 0x%x }", \ + _dispatch_wref2ptr((du)->du_owner_wref), what, \ + (du), (du)->du_ident) + +DISPATCH_ALWAYS_INLINE +static inline unsigned int +_dispatch_timer_unote_idx(dispatch_timer_source_refs_t dt) +{ + dispatch_clock_t clock = _dispatch_timer_flags_to_clock(dt->du_timer_flags); + uint32_t qos = 0; + #if DISPATCH_HAVE_TIMER_QOS - DISPATCH_TIMER_HEAP_INIT(WALL, CRITICAL), - DISPATCH_TIMER_HEAP_INIT(MACH, CRITICAL), - DISPATCH_TIMER_HEAP_INIT(WALL, BACKGROUND), - DISPATCH_TIMER_HEAP_INIT(MACH, BACKGROUND), + dispatch_assert(DISPATCH_TIMER_STRICT == DISPATCH_TIMER_QOS_CRITICAL); + dispatch_assert(DISPATCH_TIMER_BACKGROUND == DISPATCH_TIMER_QOS_BACKGROUND); + qos = dt->du_timer_flags & (DISPATCH_TIMER_STRICT|DISPATCH_TIMER_BACKGROUND); + // flags are normalized so this should never happen + dispatch_assert(qos < DISPATCH_TIMER_QOS_COUNT); #endif -}; + + return DISPATCH_TIMER_INDEX(clock, qos); +} + +static void +_dispatch_timer_unote_disarm(dispatch_timer_source_refs_t dt, + dispatch_timer_heap_t dth) +{ + uint32_t tidx = dt->du_ident; + + dispatch_assert(_dispatch_unote_armed(dt)); + _dispatch_timer_heap_remove(&dth[tidx], dt); + _dispatch_timers_heap_dirty(dth, tidx); + _dispatch_unote_state_clear_bit(dt, DU_STATE_ARMED); + _dispatch_timer_du_debug("disarmed", dt); +} + +static void +_dispatch_timer_unote_arm(dispatch_timer_source_refs_t dt, + dispatch_timer_heap_t dth, uint32_t tidx) +{ + if (_dispatch_unote_armed(dt)) { + DISPATCH_TIMER_ASSERT(dt->du_ident, ==, tidx, "tidx"); + _dispatch_timer_heap_update(&dth[tidx], dt); + _dispatch_timer_du_debug("updated", dt); + } else { + dt->du_ident = tidx; + _dispatch_timer_heap_insert(&dth[tidx], dt); + _dispatch_unote_state_set_bit(dt, DU_STATE_ARMED); + _dispatch_timer_du_debug("armed", dt); + } + _dispatch_timers_heap_dirty(dth, tidx); +} + +#define DISPATCH_TIMER_UNOTE_TRACE_SUSPENSION 0x1 + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_timer_unote_needs_rearm(dispatch_timer_source_refs_t dr, int flags) +{ + dispatch_source_t ds = _dispatch_source_from_refs(dr); + if (unlikely(DISPATCH_QUEUE_IS_SUSPENDED(ds))) { + if (flags & DISPATCH_TIMER_UNOTE_TRACE_SUSPENSION) { + _dispatch_ktrace1(DISPATCH_PERF_suspended_timer_fire, ds); + } + return false; + } + return dr->du_ident != DISPATCH_TIMER_IDENT_CANCELED && + dr->dt_timer.target < INT64_MAX; +} + +DISPATCH_NOINLINE +static void +_dispatch_timer_unote_register(dispatch_timer_source_refs_t dt, + dispatch_wlh_t wlh, dispatch_priority_t pri) +{ + // aggressively coalesce background/maintenance QoS timers + // + if (_dispatch_qos_is_background(_dispatch_priority_qos(pri))) { + if (dt->du_timer_flags & DISPATCH_TIMER_STRICT) { + _dispatch_ktrace1(DISPATCH_PERF_strict_bg_timer, + _dispatch_source_from_refs(dt)); + } else { + dt->du_timer_flags |= DISPATCH_TIMER_BACKGROUND; + dt->du_ident = _dispatch_timer_unote_idx(dt); + } + } + // _dispatch_source_activate() can pre-set a wlh for timers directly + // attached to their workloops. + if (_dispatch_unote_wlh(dt) != wlh) { + dispatch_assert(_dispatch_unote_wlh(dt) == NULL); + _dispatch_unote_state_set(dt, DISPATCH_WLH_ANON, 0); + } + if (os_atomic_load2o(dt, dt_pending_config, relaxed)) { + _dispatch_timer_unote_configure(dt); + } +} + +void +_dispatch_timer_unote_configure(dispatch_timer_source_refs_t dt) +{ + dispatch_timer_config_t dtc; + + dtc = os_atomic_xchg2o(dt, dt_pending_config, NULL, dependency); + if (dtc->dtc_clock != _dispatch_timer_flags_to_clock(dt->du_timer_flags)) { + dt->du_timer_flags &= ~_DISPATCH_TIMER_CLOCK_MASK; + dt->du_timer_flags |= _dispatch_timer_flags_from_clock(dtc->dtc_clock); + } + dt->dt_timer = dtc->dtc_timer; + free(dtc); + // Clear any pending data that might have accumulated on + // older timer params + os_atomic_store2o(dt, ds_pending_data, 0, relaxed); + + if (_dispatch_unote_armed(dt)) { + return _dispatch_timer_unote_resume(dt); + } +} + +static inline dispatch_timer_heap_t +_dispatch_timer_unote_heap(dispatch_timer_source_refs_t dt) +{ + dispatch_wlh_t wlh = _dispatch_unote_wlh(dt); + if (wlh == DISPATCH_WLH_ANON) { + return _dispatch_timers_heap; + } + return ((dispatch_workloop_t)wlh)->dwl_timer_heap; +} + +DISPATCH_NOINLINE +static void +_dispatch_timer_unote_resume(dispatch_timer_source_refs_t dt) +{ + // ... and now reflect any impact the reconfiguration has to the heap. + // The heap also owns a +2 on dispatch sources it references, so maintain + // this invariant as we tweak the registration. + + bool will_arm = _dispatch_timer_unote_needs_rearm(dt, 0); + bool was_armed = _dispatch_unote_armed(dt); + uint32_t tidx = _dispatch_timer_unote_idx(dt); + dispatch_timer_heap_t dth = _dispatch_timer_unote_heap(dt); + + if (unlikely(was_armed && (!will_arm || dt->du_ident != tidx))) { + _dispatch_timer_unote_disarm(dt, dth); + } + if (will_arm) { + if (!was_armed) _dispatch_retain_unote_owner(dt); + _dispatch_timer_unote_arm(dt, dth, tidx); + } else if (was_armed) { + _dispatch_release_unote_owner_tailcall(dt); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_timer_unote_unregister(dispatch_timer_source_refs_t dt) +{ + dispatch_timer_heap_t dth = _dispatch_timer_unote_heap(dt); + if (_dispatch_unote_armed(dt)) { + _dispatch_timer_unote_disarm(dt, dth); + _dispatch_release_2_no_dispose(_dispatch_source_from_refs(dt)); + } + _dispatch_wlh_release(_dispatch_unote_wlh(dt)); + _dispatch_unote_state_set(dt, DU_STATE_UNREGISTERED); + dt->du_ident = DISPATCH_TIMER_IDENT_CANCELED; +} static dispatch_unote_t _dispatch_source_timer_create(dispatch_source_type_t dst, uintptr_t handle, unsigned long mask) { - uint32_t fflags = dst->dst_fflags; - dispatch_unote_t du; + dispatch_timer_source_refs_t dt; // normalize flags if (mask & DISPATCH_TIMER_STRICT) { mask &= ~(unsigned long)DISPATCH_TIMER_BACKGROUND; } + if (mask & ~dst->dst_mask) { + return DISPATCH_UNOTE_NULL; + } - if (fflags & DISPATCH_TIMER_INTERVAL) { + if (dst->dst_timer_flags & DISPATCH_TIMER_INTERVAL) { if (!handle) return DISPATCH_UNOTE_NULL; - du = _dispatch_unote_create_without_handle(dst, 0, mask); - } else { - du = _dispatch_unote_create_without_handle(dst, handle, mask); + } else if (dst->dst_filter == DISPATCH_EVFILT_TIMER_WITH_CLOCK) { + if (handle) return DISPATCH_UNOTE_NULL; + } else switch (handle) { + case 0: + break; + case DISPATCH_CLOCKID_UPTIME: + dst = &_dispatch_source_type_timer_with_clock; + mask |= DISPATCH_TIMER_CLOCK_UPTIME; + break; + case DISPATCH_CLOCKID_MONOTONIC: + dst = &_dispatch_source_type_timer_with_clock; + mask |= DISPATCH_TIMER_CLOCK_MONOTONIC; + break; + case DISPATCH_CLOCKID_WALLTIME: + dst = &_dispatch_source_type_timer_with_clock; + mask |= DISPATCH_TIMER_CLOCK_WALL; + break; + default: + return DISPATCH_UNOTE_NULL; } - if (du._dt) { - du._dt->du_is_timer = true; - du._dt->du_data_action = DISPATCH_UNOTE_ACTION_DATA_ADD; - du._dt->du_fflags |= fflags; - du._dt->du_ident = _dispatch_source_timer_idx(du); - du._dt->dt_timer.target = UINT64_MAX; - du._dt->dt_timer.deadline = UINT64_MAX; - du._dt->dt_timer.interval = UINT64_MAX; - du._dt->dt_heap_entry[DTH_TARGET_ID] = DTH_INVALID_ID; - du._dt->dt_heap_entry[DTH_DEADLINE_ID] = DTH_INVALID_ID; - } - return du; + dt = _dispatch_calloc(1u, dst->dst_size); + dt->du_type = dst; + dt->du_filter = dst->dst_filter; + dt->du_is_timer = true; + dt->du_timer_flags |= (uint8_t)(mask | dst->dst_timer_flags); + dt->du_ident = _dispatch_timer_unote_idx(dt); + dt->dt_timer.target = UINT64_MAX; + dt->dt_timer.deadline = UINT64_MAX; + dt->dt_timer.interval = UINT64_MAX; + dt->dt_heap_entry[DTH_TARGET_ID] = DTH_INVALID_ID; + dt->dt_heap_entry[DTH_DEADLINE_ID] = DTH_INVALID_ID; + return (dispatch_unote_t){ ._dt = dt }; } const dispatch_source_type_s _dispatch_source_type_timer = { - .dst_kind = "timer", - .dst_filter = DISPATCH_EVFILT_TIMER, - .dst_flags = EV_DISPATCH, - .dst_mask = DISPATCH_TIMER_STRICT|DISPATCH_TIMER_BACKGROUND, - .dst_fflags = 0, - .dst_size = sizeof(struct dispatch_timer_source_refs_s), - - .dst_create = _dispatch_source_timer_create, + .dst_kind = "timer", + .dst_filter = DISPATCH_EVFILT_TIMER, + .dst_flags = EV_DISPATCH, + .dst_mask = DISPATCH_TIMER_STRICT|DISPATCH_TIMER_BACKGROUND, + .dst_timer_flags = 0, + .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_TIMER, + .dst_size = sizeof(struct dispatch_timer_source_refs_s), + .dst_strict = false, + + .dst_create = _dispatch_source_timer_create, + .dst_merge_evt = _dispatch_source_merge_evt, +}; + +const dispatch_source_type_s _dispatch_source_type_timer_with_clock = { + .dst_kind = "timer (fixed-clock)", + .dst_filter = DISPATCH_EVFILT_TIMER_WITH_CLOCK, + .dst_flags = EV_DISPATCH, + .dst_mask = DISPATCH_TIMER_STRICT|DISPATCH_TIMER_BACKGROUND, + .dst_timer_flags = 0, + .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_TIMER, + .dst_size = sizeof(struct dispatch_timer_source_refs_s), + + .dst_create = _dispatch_source_timer_create, + .dst_merge_evt = _dispatch_source_merge_evt, }; const dispatch_source_type_s _dispatch_source_type_after = { - .dst_kind = "timer (after)", - .dst_filter = DISPATCH_EVFILT_TIMER, - .dst_flags = EV_DISPATCH, - .dst_mask = 0, - .dst_fflags = DISPATCH_TIMER_AFTER, - .dst_size = sizeof(struct dispatch_timer_source_refs_s), - - .dst_create = _dispatch_source_timer_create, + .dst_kind = "timer (after)", + .dst_filter = DISPATCH_EVFILT_TIMER_WITH_CLOCK, + .dst_flags = EV_DISPATCH, + .dst_mask = 0, + .dst_timer_flags = DISPATCH_TIMER_AFTER, + .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_TIMER, + .dst_size = sizeof(struct dispatch_timer_source_refs_s), + + .dst_create = _dispatch_source_timer_create, + .dst_merge_evt = _dispatch_source_merge_evt, }; const dispatch_source_type_s _dispatch_source_type_interval = { - .dst_kind = "timer (interval)", - .dst_filter = DISPATCH_EVFILT_TIMER, - .dst_flags = EV_DISPATCH, - .dst_mask = DISPATCH_TIMER_STRICT|DISPATCH_TIMER_BACKGROUND - |DISPATCH_INTERVAL_UI_ANIMATION, - .dst_fflags = DISPATCH_TIMER_INTERVAL|DISPATCH_TIMER_CLOCK_MACH, - .dst_size = sizeof(struct dispatch_timer_source_refs_s), - - .dst_create = _dispatch_source_timer_create, + .dst_kind = "timer (interval)", + .dst_filter = DISPATCH_EVFILT_TIMER_WITH_CLOCK, + .dst_flags = EV_DISPATCH, + .dst_mask = DISPATCH_TIMER_STRICT|DISPATCH_TIMER_BACKGROUND| + DISPATCH_INTERVAL_UI_ANIMATION, + .dst_timer_flags = DISPATCH_TIMER_INTERVAL|DISPATCH_TIMER_CLOCK_UPTIME, + .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_TIMER, + .dst_size = sizeof(struct dispatch_timer_source_refs_s), + + .dst_create = _dispatch_source_timer_create, + .dst_merge_evt = _dispatch_source_merge_evt, }; + +#pragma mark timer draining + +static void +_dispatch_timers_run(dispatch_timer_heap_t dth, uint32_t tidx, + dispatch_clock_now_cache_t nows) +{ + dispatch_timer_source_refs_t dr; + uint64_t pending, now; + + while ((dr = dth[tidx].dth_min[DTH_TARGET_ID])) { + DISPATCH_TIMER_ASSERT(dr->du_ident, ==, tidx, "tidx"); + DISPATCH_TIMER_ASSERT(dr->dt_timer.target, !=, 0, "missing target"); + + now = _dispatch_time_now_cached(DISPATCH_TIMER_CLOCK(tidx), nows); + if (dr->dt_timer.target > now) { + // Done running timers for now. + break; + } + + if (dr->du_timer_flags & DISPATCH_TIMER_AFTER) { + _dispatch_timer_unote_disarm(dr, dth); // +2 is consumed by _merge_evt() + _dispatch_wlh_release(_dispatch_unote_wlh(dr)); + _dispatch_unote_state_set(dr, DU_STATE_UNREGISTERED); + os_atomic_store2o(dr, ds_pending_data, 2, relaxed); + _dispatch_trace_timer_fire(dr, 1, 1); + dux_merge_evt(dr, EV_ONESHOT, 0, 0); + continue; + } + + if (os_atomic_load2o(dr, dt_pending_config, relaxed)) { + _dispatch_timer_unote_configure(dr); + continue; + } + + // We want to try to keep repeating timers in the heap if their handler + // is keeping up to avoid useless hops through the manager thread. + // + // However, if we can observe a non consumed ds_pending_data, we have to + // remove the timer from the heap until the handler keeps up (disarm). + // Such an operation is a one-way street, as _dispatch_source_invoke2() + // can decide to dispose of a timer without going back to the manager if + // it can observe that it is disarmed. + // + // To solve this race, we use a the MISSED marker in ds_pending_data + // with a release barrier to make the changes accumulated on `ds_timer` + // visible to _dispatch_source_timer_data(). Doing this also transfers + // the responsibility to call _dispatch_timer_unote_compute_missed() + // to _dispatch_source_invoke2() without the manager involvement. + // + // Suspension also causes the timer to be removed from the heap. We need + // to make sure _dispatch_source_timer_data() will recompute the proper + // number of fired events when the source is resumed, and also use the + // MISSED marker for this similar purpose. + if (unlikely(os_atomic_load2o(dr, ds_pending_data, relaxed))) { + _dispatch_timer_unote_disarm(dr, dth); + pending = os_atomic_or_orig2o(dr, ds_pending_data, + DISPATCH_TIMER_DISARMED_MARKER, relaxed); + } else { + pending = _dispatch_timer_unote_compute_missed(dr, now, 0) << 1; + if (_dispatch_timer_unote_needs_rearm(dr, + DISPATCH_TIMER_UNOTE_TRACE_SUSPENSION)) { + // _dispatch_source_merge_evt() consumes a +2 which we transfer + // from the heap ownership when we disarm the timer. If it stays + // armed, we need to take new retain counts + _dispatch_retain_unote_owner(dr); + _dispatch_timer_unote_arm(dr, dth, tidx); + os_atomic_store2o(dr, ds_pending_data, pending, relaxed); + } else { + _dispatch_timer_unote_disarm(dr, dth); + pending |= DISPATCH_TIMER_DISARMED_MARKER; + os_atomic_store2o(dr, ds_pending_data, pending, release); + } + } + _dispatch_trace_timer_fire(dr, pending >> 1, pending >> 1); + dux_merge_evt(dr, EV_ONESHOT, 0, 0); + } +} + +#if DISPATCH_HAVE_TIMER_COALESCING +#define DISPATCH_KEVENT_COALESCING_WINDOW_INIT(qos, ms) \ + [DISPATCH_TIMER_QOS_##qos] = 2ull * (ms) * NSEC_PER_MSEC + +static const uint64_t _dispatch_kevent_coalescing_window[] = { + DISPATCH_KEVENT_COALESCING_WINDOW_INIT(NORMAL, 75), +#if DISPATCH_HAVE_TIMER_QOS + DISPATCH_KEVENT_COALESCING_WINDOW_INIT(CRITICAL, 1), + DISPATCH_KEVENT_COALESCING_WINDOW_INIT(BACKGROUND, 100), +#endif +}; +#endif // DISPATCH_HAVE_TIMER_COALESCING + +DISPATCH_ALWAYS_INLINE +static inline dispatch_timer_delay_s +_dispatch_timers_get_delay(dispatch_timer_heap_t dth, uint32_t tidx, + uint32_t qos, dispatch_clock_now_cache_t nows) +{ + uint64_t target, deadline; + dispatch_timer_delay_s rc; + + if (!dth[tidx].dth_min[DTH_TARGET_ID]) { + rc.delay = rc.leeway = INT64_MAX; + return rc; + } + + target = dth[tidx].dth_min[DTH_TARGET_ID]->dt_timer.target; + deadline = dth[tidx].dth_min[DTH_DEADLINE_ID]->dt_timer.deadline; + dispatch_assert(target <= deadline && target < INT64_MAX); + + uint64_t now = _dispatch_time_now_cached(DISPATCH_TIMER_CLOCK(tidx), nows); + if (target <= now) { + rc.delay = rc.leeway = 0; + return rc; + } + + if (qos < DISPATCH_TIMER_QOS_COUNT && dth[tidx].dth_count > 2) { +#if DISPATCH_HAVE_TIMER_COALESCING + // Timer pre-coalescing + // When we have several timers with this target/deadline bracket: + // + // Target window Deadline + // V <-------V + // t1: [...........|.................] + // t2: [......|.......] + // t3: [..|..........] + // t4: | [.............] + // ^ + // Optimal Target + // + // Coalescing works better if the Target is delayed to "Optimal", by + // picking the latest target that isn't too close to the deadline. + uint64_t window = _dispatch_kevent_coalescing_window[qos]; + if (target + window < deadline) { + uint64_t latest = deadline - window; + target = _dispatch_timer_heap_max_target_before(&dth[tidx], latest); + } +#endif + } + + rc.delay = MIN(target - now, INT64_MAX); + rc.leeway = MIN(deadline - target, INT64_MAX); + return rc; +} + +static void +_dispatch_timers_program(dispatch_timer_heap_t dth, uint32_t tidx, + dispatch_clock_now_cache_t nows) +{ + uint32_t qos = DISPATCH_TIMER_QOS(tidx); + dispatch_timer_delay_s range; + + range = _dispatch_timers_get_delay(dth, tidx, qos, nows); + if (range.delay == 0) { + _dispatch_timers_heap_dirty(dth, tidx); + } + if (range.delay == 0 || range.delay >= INT64_MAX) { + _dispatch_trace_next_timer_set(NULL, qos); + if (dth[tidx].dth_armed) { + _dispatch_event_loop_timer_delete(dth, tidx); + } + dth[tidx].dth_armed = false; + dth[tidx].dth_needs_program = false; + } else { + _dispatch_trace_next_timer_set(dth[tidx].dth_min[DTH_TARGET_ID], qos); + _dispatch_trace_next_timer_program(range.delay, qos); + _dispatch_event_loop_timer_arm(dth, tidx, range, nows); + dth[tidx].dth_armed = true; + dth[tidx].dth_needs_program = false; + } +} + +void +_dispatch_event_loop_drain_timers(dispatch_timer_heap_t dth, uint32_t count) +{ + dispatch_clock_now_cache_s nows = { }; + uint32_t tidx; + + do { + for (tidx = 0; tidx < count; tidx++) { + _dispatch_timers_run(dth, tidx, &nows); + } + +#if DISPATCH_USE_DTRACE + uint32_t mask = dth[0].dth_dirty_bits & DTH_DIRTY_QOS_MASK; + while (mask && DISPATCH_TIMER_WAKE_ENABLED()) { + int qos = __builtin_ctz(mask); + mask -= 1 << qos; + _dispatch_trace_timer_wake(_dispatch_trace_next_timer[qos]); + } +#endif // DISPATCH_USE_DTRACE + + dth[0].dth_dirty_bits = 0; + + for (tidx = 0; tidx < count; tidx++) { + if (dth[tidx].dth_needs_program) { + _dispatch_timers_program(dth, tidx, &nows); + } + } + + /* + * Note: dth_dirty_bits being set again can happen if we notice + * a new configuration during _dispatch_timers_run() that causes + * the timer to change clocks for a bucket we already drained. + * + * This is however extremely unlikely, and because we drain relatively + * to a constant cached "now", this will converge quickly. + */ + } while (unlikely(dth[0].dth_dirty_bits)); +} diff --git a/src/event/event_config.h b/src/event/event_config.h index 60f776f95..ca0e368f7 100644 --- a/src/event/event_config.h +++ b/src/event/event_config.h @@ -74,6 +74,12 @@ #endif #if DISPATCH_EVENT_BACKEND_KEVENT +# if defined(EV_UDATA_SPECIFIC) && EV_UDATA_SPECIFIC +# define DISPATCH_HAVE_DIRECT_KNOTES 1 +# else +# define DISPATCH_HAVE_DIRECT_KNOTES 0 +# endif + # if defined(EV_SET_QOS) # define DISPATCH_USE_KEVENT_QOS 1 # ifndef KEVENT_FLAG_IMMEDIATE @@ -147,6 +153,7 @@ # define DISPATCH_HAVE_TIMER_QOS 0 # define DISPATCH_HAVE_TIMER_COALESCING 0 # define KEVENT_FLAG_IMMEDIATE 0x001 +# define DISPATCH_HAVE_DIRECT_KNOTES 0 #endif // !DISPATCH_EVENT_BACKEND_KEVENT #ifdef EV_UDATA_SPECIFIC @@ -161,11 +168,11 @@ #define DISPATCH_EV_MSG_NEEDS_FREE 0x10000 // mach message needs to be freed() #define DISPATCH_EVFILT_TIMER (-EVFILT_SYSCOUNT - 1) -#define DISPATCH_EVFILT_CUSTOM_ADD (-EVFILT_SYSCOUNT - 2) -#define DISPATCH_EVFILT_CUSTOM_OR (-EVFILT_SYSCOUNT - 3) -#define DISPATCH_EVFILT_CUSTOM_REPLACE (-EVFILT_SYSCOUNT - 4) -#define DISPATCH_EVFILT_MACH_NOTIFICATION (-EVFILT_SYSCOUNT - 5) -#define DISPATCH_EVFILT_SYSCOUNT ( EVFILT_SYSCOUNT + 5) +#define DISPATCH_EVFILT_TIMER_WITH_CLOCK (-EVFILT_SYSCOUNT - 2) +#define DISPATCH_EVFILT_CUSTOM_ADD (-EVFILT_SYSCOUNT - 3) +#define DISPATCH_EVFILT_CUSTOM_OR (-EVFILT_SYSCOUNT - 4) +#define DISPATCH_EVFILT_CUSTOM_REPLACE (-EVFILT_SYSCOUNT - 5) +#define DISPATCH_EVFILT_MACH_NOTIFICATION (-EVFILT_SYSCOUNT - 6) #if HAVE_MACH # if !EV_UDATA_SPECIFIC diff --git a/src/event/event_epoll.c b/src/event/event_epoll.c index add4dde65..a99eb5dc8 100644 --- a/src/event/event_epoll.c +++ b/src/event/event_epoll.c @@ -38,21 +38,23 @@ #define DISPATCH_EPOLL_MAX_EVENT_COUNT 16 enum { - DISPATCH_EPOLL_EVENTFD = 0x0001, - DISPATCH_EPOLL_CLOCK_WALL = 0x0002, - DISPATCH_EPOLL_CLOCK_MACH = 0x0003, + DISPATCH_EPOLL_EVENTFD = 0x0001, + DISPATCH_EPOLL_CLOCK_WALL = 0x0002, + DISPATCH_EPOLL_CLOCK_UPTIME = 0x0003, + DISPATCH_EPOLL_CLOCK_MONOTONIC = 0x0004, }; typedef struct dispatch_muxnote_s { - TAILQ_ENTRY(dispatch_muxnote_s) dmn_list; - TAILQ_HEAD(, dispatch_unote_linkage_s) dmn_readers_head; - TAILQ_HEAD(, dispatch_unote_linkage_s) dmn_writers_head; - int dmn_fd; - uint32_t dmn_ident; - uint32_t dmn_events; - int16_t dmn_filter; - bool dmn_skip_outq_ioctl; - bool dmn_skip_inq_ioctl; + LIST_ENTRY(dispatch_muxnote_s) dmn_list; + LIST_HEAD(, dispatch_unote_linkage_s) dmn_readers_head; + LIST_HEAD(, dispatch_unote_linkage_s) dmn_writers_head; + int dmn_fd; + uint32_t dmn_ident; + uint32_t dmn_events; + uint16_t dmn_disarmed_events; + int8_t dmn_filter; + bool dmn_skip_outq_ioctl : 1; + bool dmn_skip_inq_ioctl : 1; } *dispatch_muxnote_t; typedef struct dispatch_epoll_timeout_s { @@ -67,8 +69,7 @@ static int _dispatch_epfd, _dispatch_eventfd; static dispatch_once_t epoll_init_pred; static void _dispatch_epoll_init(void *); -DISPATCH_CACHELINE_ALIGN -static TAILQ_HEAD(dispatch_muxnote_bucket_s, dispatch_muxnote_s) +static LIST_HEAD(dispatch_muxnote_bucket_s, dispatch_muxnote_s) _dispatch_sources[DSL_HASH_SIZE]; #define DISPATCH_EPOLL_TIMEOUT_INITIALIZER(clock) \ @@ -78,11 +79,19 @@ _dispatch_sources[DSL_HASH_SIZE]; } static struct dispatch_epoll_timeout_s _dispatch_epoll_timeout[] = { DISPATCH_EPOLL_TIMEOUT_INITIALIZER(WALL), - DISPATCH_EPOLL_TIMEOUT_INITIALIZER(MACH), + DISPATCH_EPOLL_TIMEOUT_INITIALIZER(UPTIME), + DISPATCH_EPOLL_TIMEOUT_INITIALIZER(MONOTONIC), }; #pragma mark dispatch_muxnote_t +DISPATCH_ALWAYS_INLINE +static inline uint32_t +_dispatch_muxnote_armed_events(dispatch_muxnote_t dmn) +{ + return dmn->dmn_events & ~dmn->dmn_disarmed_events; +} + DISPATCH_ALWAYS_INLINE static inline struct dispatch_muxnote_bucket_s * _dispatch_muxnote_bucket(uint32_t ident) @@ -95,11 +104,11 @@ _dispatch_muxnote_bucket(uint32_t ident) DISPATCH_ALWAYS_INLINE static inline dispatch_muxnote_t _dispatch_muxnote_find(struct dispatch_muxnote_bucket_s *dmb, - uint32_t ident, int16_t filter) + uint32_t ident, int8_t filter) { dispatch_muxnote_t dmn; if (filter == EVFILT_WRITE) filter = EVFILT_READ; - TAILQ_FOREACH(dmn, dmb, dmn_list) { + LIST_FOREACH(dmn, dmb, dmn_list) { if (dmn->dmn_ident == ident && dmn->dmn_filter == filter) { break; } @@ -143,7 +152,7 @@ _dispatch_muxnote_create(dispatch_unote_t du, uint32_t events) dispatch_muxnote_t dmn; struct stat sb; int fd = (int)du._du->du_ident; - int16_t filter = du._du->du_filter; + int8_t filter = du._du->du_filter; bool skip_outq_ioctl = false, skip_inq_ioctl = false; sigset_t sigmask; @@ -193,8 +202,8 @@ _dispatch_muxnote_create(dispatch_unote_t du, uint32_t events) } dmn = _dispatch_calloc(1, sizeof(struct dispatch_muxnote_s)); - TAILQ_INIT(&dmn->dmn_readers_head); - TAILQ_INIT(&dmn->dmn_writers_head); + LIST_INIT(&dmn->dmn_readers_head); + LIST_INIT(&dmn->dmn_writers_head); dmn->dmn_fd = fd; dmn->dmn_ident = du._du->du_ident; dmn->dmn_filter = filter; @@ -207,33 +216,27 @@ _dispatch_muxnote_create(dispatch_unote_t du, uint32_t events) #pragma mark dispatch_unote_t static int -_dispatch_epoll_update(dispatch_muxnote_t dmn, int op) +_dispatch_epoll_update(dispatch_muxnote_t dmn, uint32_t events, int op) { dispatch_once_f(&epoll_init_pred, NULL, _dispatch_epoll_init); struct epoll_event ev = { - .events = dmn->dmn_events, + .events = events, .data = { .ptr = dmn }, }; return epoll_ctl(_dispatch_epfd, op, dmn->dmn_fd, &ev); } -bool -_dispatch_unote_register(dispatch_unote_t du, - DISPATCH_UNUSED dispatch_wlh_t wlh, dispatch_priority_t pri) +DISPATCH_ALWAYS_INLINE +static inline uint32_t +_dispatch_unote_required_events(dispatch_unote_t du) { - struct dispatch_muxnote_bucket_s *dmb; - dispatch_muxnote_t dmn; uint32_t events = EPOLLFREE; - dispatch_assert(!_dispatch_unote_registered(du)); - du._du->du_priority = pri; - switch (du._du->du_filter) { case DISPATCH_EVFILT_CUSTOM_ADD: case DISPATCH_EVFILT_CUSTOM_OR: case DISPATCH_EVFILT_CUSTOM_REPLACE: - du._du->du_wlh = DISPATCH_WLH_ANON; - return true; + return 0; case EVFILT_WRITE: events |= EPOLLOUT; break; @@ -242,97 +245,112 @@ _dispatch_unote_register(dispatch_unote_t du, break; } - if (du._du->du_type->dst_flags & EV_DISPATCH) { + if (dux_type(du._du)->dst_flags & EV_DISPATCH) { events |= EPOLLONESHOT; } + return events; +} + +bool +_dispatch_unote_register_muxed(dispatch_unote_t du) +{ + struct dispatch_muxnote_bucket_s *dmb; + dispatch_muxnote_t dmn; + uint32_t events; + + events = _dispatch_unote_required_events(du); + du._du->du_priority = pri; + dmb = _dispatch_unote_muxnote_bucket(du); dmn = _dispatch_unote_muxnote_find(dmb, du); if (dmn) { - events &= ~dmn->dmn_events; - if (events) { - dmn->dmn_events |= events; - if (_dispatch_epoll_update(dmn, EPOLL_CTL_MOD) < 0) { - dmn->dmn_events &= ~events; + if (events & ~_dispatch_muxnote_armed_events(dmn)) { + events |= _dispatch_muxnote_armed_events(dmn); + if (_dispatch_epoll_update(dmn, events, EPOLL_CTL_MOD) < 0) { dmn = NULL; + } else { + dmn->dmn_events |= events; + dmn->dmn_disarmed_events &= ~events; } } } else { dmn = _dispatch_muxnote_create(du, events); - if (_dispatch_epoll_update(dmn, EPOLL_CTL_ADD) < 0) { - _dispatch_muxnote_dispose(dmn); - dmn = NULL; - } else { - TAILQ_INSERT_TAIL(dmb, dmn, dmn_list); + if (dmn) { + if (_dispatch_epoll_update(dmn, events, EPOLL_CTL_ADD) < 0) { + _dispatch_muxnote_dispose(dmn); + dmn = NULL; + } else { + LIST_INSERT_HEAD(dmb, dmn, dmn_list); + } } } if (dmn) { dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du); if (events & EPOLLOUT) { - TAILQ_INSERT_TAIL(&dmn->dmn_writers_head, dul, du_link); + LIST_INSERT_HEAD(&dmn->dmn_writers_head, dul, du_link); } else { - TAILQ_INSERT_TAIL(&dmn->dmn_readers_head, dul, du_link); + LIST_INSERT_HEAD(&dmn->dmn_readers_head, dul, du_link); } dul->du_muxnote = dmn; - dispatch_assert(du._du->du_wlh == NULL); - du._du->du_wlh = DISPATCH_WLH_ANON; + _dispatch_unote_state_set(du, DISPATCH_WLH_ANON, DU_STATE_ARMED); } return dmn != NULL; } void -_dispatch_unote_resume(dispatch_unote_t du) +_dispatch_unote_resume_muxed(dispatch_unote_t du) { dispatch_muxnote_t dmn = _dispatch_unote_get_linkage(du)->du_muxnote; dispatch_assert(_dispatch_unote_registered(du)); + uint32_t events = _dispatch_unote_required_events(du); - _dispatch_epoll_update(dmn, EPOLL_CTL_MOD); + if (events & dmn->dmn_disarmed_events) { + dmn->dmn_disarmed_events &= ~events; + events = _dispatch_muxnote_armed_events(dmn); + _dispatch_epoll_update(dmn, events, EPOLL_CTL_MOD); + } } bool -_dispatch_unote_unregister(dispatch_unote_t du, DISPATCH_UNUSED uint32_t flags) +_dispatch_unote_unregister_muxed(dispatch_unote_t du) { - switch (du._du->du_filter) { - case DISPATCH_EVFILT_CUSTOM_ADD: - case DISPATCH_EVFILT_CUSTOM_OR: - case DISPATCH_EVFILT_CUSTOM_REPLACE: - du._du->du_wlh = NULL; - return true; - } - if (_dispatch_unote_registered(du)) { - dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du); - dispatch_muxnote_t dmn = dul->du_muxnote; - uint32_t events = dmn->dmn_events; - - if (du._du->du_filter == EVFILT_WRITE) { - TAILQ_REMOVE(&dmn->dmn_writers_head, dul, du_link); - } else { - TAILQ_REMOVE(&dmn->dmn_readers_head, dul, du_link); + dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du); + dispatch_muxnote_t dmn = dul->du_muxnote; + uint32_t events = dmn->dmn_events; + + LIST_REMOVE(dul, du_link); + _LIST_TRASH_ENTRY(dul, du_link); + dul->du_muxnote = NULL; + + if (LIST_EMPTY(&dmn->dmn_readers_head)) { + events &= (uint32_t)~EPOLLIN; + if (dmn->dmn_disarmed_events & EPOLLIN) { + dmn->dmn_disarmed_events &= (uint16_t)~EPOLLIN; + dmn->dmn_events &= (uint32_t)~EPOLLIN; } - _TAILQ_TRASH_ENTRY(dul, du_link); - dul->du_muxnote = NULL; - - if (TAILQ_EMPTY(&dmn->dmn_readers_head)) { - events &= (uint32_t)(~EPOLLIN); - } - if (TAILQ_EMPTY(&dmn->dmn_writers_head)) { - events &= (uint32_t)(~EPOLLOUT); + } + if (LIST_EMPTY(&dmn->dmn_writers_head)) { + events &= (uint32_t)~EPOLLOUT; + if (dmn->dmn_disarmed_events & EPOLLOUT) { + dmn->dmn_disarmed_events &= (uint16_t)~EPOLLOUT; + dmn->dmn_events &= (uint32_t)~EPOLLOUT; } + } - if (events == dmn->dmn_events) { - // nothing to do - } else if (events & (EPOLLIN | EPOLLOUT)) { + if (events & (EPOLLIN | EPOLLOUT)) { + if (events != _dispatch_muxnote_armed_events(dmn)) { dmn->dmn_events = events; - _dispatch_epoll_update(dmn, EPOLL_CTL_MOD); - } else { - epoll_ctl(_dispatch_epfd, EPOLL_CTL_DEL, dmn->dmn_fd, NULL); - TAILQ_REMOVE(_dispatch_unote_muxnote_bucket(du), dmn, dmn_list); - _dispatch_muxnote_dispose(dmn); + events = _dispatch_muxnote_armed_events(dmn); + _dispatch_epoll_update(dmn, events, EPOLL_CTL_MOD); } - dispatch_assert(du._du->du_wlh == DISPATCH_WLH_ANON); - du._du->du_wlh = NULL; + } else { + epoll_ctl(_dispatch_epfd, EPOLL_CTL_DEL, dmn->dmn_fd, NULL); + LIST_REMOVE(dmn, dmn_list); + _dispatch_muxnote_dispose(dmn); } + _dispatch_unote_state_set(du, DU_STATE_UNREGISTERED); return true; } @@ -341,13 +359,14 @@ _dispatch_unote_unregister(dispatch_unote_t du, DISPATCH_UNUSED uint32_t flags) static void _dispatch_event_merge_timer(dispatch_clock_t clock) { - _dispatch_timers_expired = true; - _dispatch_timers_processing_mask |= 1 << DISPATCH_TIMER_INDEX(clock, 0); -#if DISPATCH_USE_DTRACE - _dispatch_timers_will_wake |= 1 << 0; -#endif + dispatch_timer_heap_t dth = _dispatch_timers_heap; + uint32_t tidx = DISPATCH_TIMER_INDEX(clock, 0); + _dispatch_epoll_timeout[clock].det_armed = false; - _dispatch_timers_heap[clock].dth_flags &= ~DTH_ARMED; + + _dispatch_timers_heap_dirty(dth, tidx); + dth[tidx].dth_needs_program = true; + dth[tidx].dth_armed = false; } static void @@ -370,9 +389,12 @@ _dispatch_timeout_program(uint32_t tidx, uint64_t target, clockid_t clockid; int fd; switch (DISPATCH_TIMER_CLOCK(tidx)) { - case DISPATCH_CLOCK_MACH: + case DISPATCH_CLOCK_UPTIME: clockid = CLOCK_MONOTONIC; break; + case DISPATCH_CLOCK_MONOTONIC: + clockid = CLOCK_BOOTTIME; + break; case DISPATCH_CLOCK_WALL: clockid = CLOCK_REALTIME; break; @@ -406,19 +428,17 @@ _dispatch_timeout_program(uint32_t tidx, uint64_t target, } void -_dispatch_event_loop_timer_arm(uint32_t tidx, dispatch_timer_delay_s range, - dispatch_clock_now_cache_t nows) +_dispatch_event_loop_timer_arm(dispatch_timer_heap_t dth, uint32_t tidx, + dispatch_timer_delay_s range, dispatch_clock_now_cache_t nows) { - uint64_t target = range.delay; - target += _dispatch_time_now_cached(DISPATCH_TIMER_CLOCK(tidx), nows); - _dispatch_timers_heap[tidx].dth_flags |= DTH_ARMED; + dispatch_clock_t clock = DISPATCH_TIMER_CLOCK(tidx); + uint64_t target = range.delay + _dispatch_time_now_cached(clock, nows); _dispatch_timeout_program(tidx, target, range.leeway); } void -_dispatch_event_loop_timer_delete(uint32_t tidx) +_dispatch_event_loop_timer_delete(dispatch_timer_heap_t dth, uint32_t tidx) { - _dispatch_timers_heap[tidx].dth_flags &= ~DTH_ARMED; _dispatch_timeout_program(tidx, UINT64_MAX, UINT64_MAX); } @@ -434,11 +454,6 @@ _dispatch_epoll_init(void *context DISPATCH_UNUSED) { _dispatch_fork_becomes_unsafe(); - unsigned int i; - for (i = 0; i < DSL_HASH_SIZE; i++) { - TAILQ_INIT(&_dispatch_sources[i]); - } - _dispatch_epfd = epoll_create1(EPOLL_CLOEXEC); if (_dispatch_epfd < 0) { DISPATCH_INTERNAL_CRASH(errno, "epoll_create1() failed"); @@ -459,6 +474,7 @@ _dispatch_epoll_init(void *context DISPATCH_UNUSED) } #if DISPATCH_USE_MGR_THREAD + _dispatch_trace_item_push(_dispatch_mgr_q.do_targetq, &_dispatch_mgr_q); dx_push(_dispatch_mgr_q.do_targetq, &_dispatch_mgr_q, 0); #endif } @@ -488,9 +504,13 @@ _dispatch_event_merge_signal(dispatch_muxnote_t dmn) // will kick in, the thread with the wrong mask will be fixed up, and the // signal delivered to us again properly. if ((rc = read(dmn->dmn_fd, &si, sizeof(si))) == sizeof(si)) { - TAILQ_FOREACH_SAFE(dul, &dmn->dmn_readers_head, du_link, dul_next) { + LIST_FOREACH_SAFE(dul, &dmn->dmn_readers_head, du_link, dul_next) { dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul); - dux_merge_evt(du._du, EV_ADD|EV_ENABLE|EV_CLEAR, 1, 0, 0); + // consumed by dux_merge_evt() + _dispatch_retain_unote_owner(du); + dispatch_assert(!dux_needs_rearm(du._du)); + os_atomic_store2o(du._dr, ds_pending_data, 1, relaxed) + dux_merge_evt(du._du, EV_ADD|EV_ENABLE|EV_CLEAR, 1, 0); } } else { dispatch_assume(rc == -1 && errno == EAGAIN); @@ -533,21 +553,36 @@ _dispatch_event_merge_fd(dispatch_muxnote_t dmn, uint32_t events) dispatch_unote_linkage_t dul, dul_next; uintptr_t data; + dmn->dmn_disarmed_events |= (events & (EPOLLIN | EPOLLOUT)); + if (events & EPOLLIN) { data = _dispatch_get_buffer_size(dmn, false); - TAILQ_FOREACH_SAFE(dul, &dmn->dmn_readers_head, du_link, dul_next) { + LIST_FOREACH_SAFE(dul, &dmn->dmn_readers_head, du_link, dul_next) { dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul); - dux_merge_evt(du._du, EV_ADD|EV_ENABLE|EV_DISPATCH, ~data, 0, 0); + // consumed by dux_merge_evt() + _dispatch_retain_unote_owner(du); + dispatch_assert(dux_needs_rearm(du._du)); + _dispatch_unote_state_clear_bit(du, DU_STATE_ARMED); + os_atomic_store2o(du._dr, ds_pending_data, ~data, relaxed) + dux_merge_evt(du._du, EV_ADD|EV_ENABLE|EV_DISPATCH, data, 0, 0); } } if (events & EPOLLOUT) { data = _dispatch_get_buffer_size(dmn, true); - TAILQ_FOREACH_SAFE(dul, &dmn->dmn_writers_head, du_link, dul_next) { + LIST_FOREACH_SAFE(dul, &dmn->dmn_writers_head, du_link, dul_next) { dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul); - dux_merge_evt(du._du, EV_ADD|EV_ENABLE|EV_DISPATCH, ~data, 0, 0); + // consumed by dux_merge_evt() + _dispatch_retain_unote_owner(du); + dispatch_assert(dux_needs_rearm(du._du)); + _dispatch_unote_state_clear_bit(du, DU_STATE_ARMED); + os_atomic_store2o(du._dr, ds_pending_data, ~data, relaxed) + dux_merge_evt(du._du, EV_ADD|EV_ENABLE|EV_DISPATCH, data, 0, 0); } } + + events = _dispatch_muxnote_armed_events(dmn); + if (events) _dispatch_epoll_update(dmn, events, EPOLL_CTL_MOD); } DISPATCH_NOINLINE @@ -593,7 +628,7 @@ _dispatch_event_loop_drain(uint32_t flags) break; case DISPATCH_EPOLL_CLOCK_MACH: - _dispatch_event_merge_timer(DISPATCH_CLOCK_MACH); + _dispatch_event_merge_timer(DISPATCH_CLOCK_UPTIME); break; default: @@ -611,6 +646,12 @@ _dispatch_event_loop_drain(uint32_t flags) } } +void +_dispatch_event_loop_cancel_waiter(dispatch_sync_context_t dsc) +{ + (void)dsc; +} + void _dispatch_event_loop_wake_owner(dispatch_sync_context_t dsc, dispatch_wlh_t wlh, uint64_t old_state, uint64_t new_state) @@ -642,9 +683,9 @@ _dispatch_event_loop_assert_not_owned(dispatch_wlh_t wlh) #endif void -_dispatch_event_loop_leave_immediate(dispatch_wlh_t wlh, uint64_t dq_state) +_dispatch_event_loop_leave_immediate(uint64_t dq_state) { - (void)wlh; (void)dq_state; + (void)dq_state; } #endif // DISPATCH_EVENT_BACKEND_EPOLL diff --git a/src/event/event_internal.h b/src/event/event_internal.h index 842c4ee5b..ff561ced5 100644 --- a/src/event/event_internal.h +++ b/src/event/event_internal.h @@ -29,29 +29,120 @@ #include "event_config.h" +/* + * The unote state has 3 pieces of information and reflects the state + * of the unote registration and mirrors the state of the knote if any. + * + * This state is peculiar in the sense that it can be read concurrently, but + * is never written to concurrently. This is achieved by serializing through + * kevent calls from appropriate synchronization context (referred as `dkq` + * for dispatch kevent queue in the dispatch source code). + * + * DU_STATE_ARMED + * + * This bit represents the fact that the registration is active and may + * receive events at any given time. This bit can only be set if the WLH bits + * are set and the DU_STATE_NEEDS_DELETE bit is not. + * + * DU_STATE_NEEDS_DELETE + * + * The kernel has indicated that it wants the next event for this unote to be + * an unregistration. This bit can only be set if the DU_STATE_ARMED bit is + * not set. + * + * DU_STATE_NEEDS_DELETE may be the only bit set in the unote state + * + * DU_STATE_WLH_MASK + * + * The most significant bits of du_state represent which event loop this unote + * is registered with, and has a storage reference on it taken with + * _dispatch_wlh_retain(). + * + * Registration + * + * Unote registration attempt is made with _dispatch_unote_register(). + * On succes, it will set the WLH bits and the DU_STATE_ARMED bit, on failure + * the state is 0. + * + * _dispatch_unote_register() must be called from the appropriate + * synchronization context depending on the unote type. + * + * Event delivery + * + * When an event is delivered for a unote type that requires explicit + * re-arming (EV_DISPATCH or EV_ONESHOT), the DU_STATE_ARMED bit is cleared. + * If the event is marked as EV_ONESHOT, then the DU_STATE_NEEDS_DELETE bit + * is also set, initiating the "deferred delete" state machine. + * + * For other unote types, the state isn't touched, unless the event is + * EV_ONESHOT, in which case it causes an automatic unregistration. + * + * Unregistration + * + * The unote owner can attempt unregistering the unote with + * _dispatch_unote_unregister() from the proper synchronization context + * at any given time. When successful, the state will be set to 0 and the + * unote is no longer active. Unregistration is always successful for events + * that don't require explcit re-arming. + * + * When this unregistration fails, then the unote owner must wait for the + * next event delivery for this unote. + */ +typedef uintptr_t dispatch_unote_state_t; +#define DU_STATE_ARMED ((dispatch_unote_state_t)0x1ul) +#define DU_STATE_NEEDS_DELETE ((dispatch_unote_state_t)0x2ul) +#define DU_STATE_WLH_MASK ((dispatch_unote_state_t)~0x3ul) +#define DU_STATE_UNREGISTERED ((dispatch_unote_state_t)0) + struct dispatch_sync_context_s; typedef struct dispatch_wlh_s *dispatch_wlh_t; // opaque handle -#define DISPATCH_WLH_ANON ((dispatch_wlh_t)(void*)(~0ul)) -#define DISPATCH_WLH_MANAGER ((dispatch_wlh_t)(void*)(~2ul)) +#define DISPATCH_WLH_ANON ((dispatch_wlh_t)(void*)(~0x3ul)) +#define DISPATCH_WLH_MANAGER ((dispatch_wlh_t)(void*)(~0x7ul)) + +DISPATCH_ENUM(dispatch_unote_timer_flags, uint8_t, + /* DISPATCH_TIMER_STRICT 0x1 */ + /* DISPATCH_TIMER_BACKGROUND = 0x2, */ + DISPATCH_TIMER_CLOCK_UPTIME = DISPATCH_CLOCK_UPTIME << 2, + DISPATCH_TIMER_CLOCK_MONOTONIC = DISPATCH_CLOCK_MONOTONIC << 2, + DISPATCH_TIMER_CLOCK_WALL = DISPATCH_CLOCK_WALL << 2, +#define _DISPATCH_TIMER_CLOCK_MASK (0x3 << 2) + DISPATCH_TIMER_INTERVAL = 0x10, + /* DISPATCH_INTERVAL_UI_ANIMATION = 0x20 */ // See source_private.h + DISPATCH_TIMER_AFTER = 0x40, +); -#define DISPATCH_UNOTE_DATA_ACTION_SIZE 2 +static inline dispatch_clock_t +_dispatch_timer_flags_to_clock(dispatch_unote_timer_flags_t flags) +{ + return (dispatch_clock_t)((flags & _DISPATCH_TIMER_CLOCK_MASK) >> 2); +} + +static inline dispatch_unote_timer_flags_t +_dispatch_timer_flags_from_clock(dispatch_clock_t clock) +{ + return (dispatch_unote_timer_flags_t)(clock << 2); +} #define DISPATCH_UNOTE_CLASS_HEADER() \ dispatch_source_type_t du_type; \ uintptr_t du_owner_wref; /* "weak" back reference to the owner object */ \ - dispatch_wlh_t du_wlh; \ + os_atomic(dispatch_unote_state_t) du_state; \ uint32_t du_ident; \ int8_t du_filter; \ - os_atomic(bool) dmsr_notification_armed; \ - uint16_t du_data_action : DISPATCH_UNOTE_DATA_ACTION_SIZE; \ - uint16_t du_is_direct : 1; \ - uint16_t du_is_timer : 1; \ - uint16_t du_memorypressure_override : 1; \ - uint16_t du_vmpressure_override : 1; \ - uint16_t du_can_be_wlh : 1; \ - uint16_t dmr_async_reply : 1; \ - uint16_t dmrr_handler_is_block : 1; \ - uint16_t du_unused : 7; \ + uint8_t du_is_direct : 1; \ + uint8_t du_is_timer : 1; \ + uint8_t du_has_extended_status : 1; \ + uint8_t du_memorypressure_override : 1; \ + uint8_t du_vmpressure_override : 1; \ + uint8_t du_can_be_wlh : 1; \ + uint8_t dmrr_handler_is_block : 1; \ + uint8_t du_unused_flag : 1; \ + union { \ + uint8_t du_timer_flags; \ + os_atomic(bool) dmsr_notification_armed; \ + bool dmr_reply_port_owned; \ + }; \ + uint8_t du_unused; \ uint32_t du_fflags; \ dispatch_priority_t du_priority @@ -60,22 +151,10 @@ typedef struct dispatch_wlh_s *dispatch_wlh_t; // opaque handle #define _dispatch_source_from_refs(dr) \ ((dispatch_source_t)_dispatch_wref2ptr((dr)->du_owner_wref)) -DISPATCH_ENUM(dispatch_unote_action, uint8_t, - DISPATCH_UNOTE_ACTION_DATA_OR = 0, - DISPATCH_UNOTE_ACTION_DATA_OR_STATUS_SET, - DISPATCH_UNOTE_ACTION_DATA_SET, - DISPATCH_UNOTE_ACTION_DATA_ADD, - DISPATCH_UNOTE_ACTION_LAST = DISPATCH_UNOTE_ACTION_DATA_ADD -); -_Static_assert(DISPATCH_UNOTE_ACTION_LAST < - (1 << DISPATCH_UNOTE_DATA_ACTION_SIZE), - "DISPATCH_UNOTE_ACTION_LAST too large for du_data_action field"); - typedef struct dispatch_unote_class_s { DISPATCH_UNOTE_CLASS_HEADER(); } *dispatch_unote_class_t; - enum { DS_EVENT_HANDLER = 0, DS_CANCEL_HANDLER, @@ -84,7 +163,23 @@ enum { #define DISPATCH_SOURCE_REFS_HEADER() \ DISPATCH_UNOTE_CLASS_HEADER(); \ - struct dispatch_continuation_s *volatile ds_handler[3] + struct dispatch_continuation_s *volatile ds_handler[3]; \ + uint64_t ds_data DISPATCH_ATOMIC64_ALIGN; \ + uint64_t ds_pending_data DISPATCH_ATOMIC64_ALIGN + + +// Extracts source data from the ds_data field +#define DISPATCH_SOURCE_GET_DATA(d) ((d) & 0xFFFFFFFF) + +// Extracts status from the ds_data field +#define DISPATCH_SOURCE_GET_STATUS(d) ((d) >> 32) + +// Combine data and status for the ds_data field +#define DISPATCH_SOURCE_COMBINE_DATA_AND_STATUS(data, status) \ + ((((uint64_t)(status)) << 32) | (data)) + +#define DISPATCH_TIMER_DISARMED_MARKER 1ul + // Source state which may contain references to the source object // Separately allocated so that 'leaks' can see sources @@ -125,11 +220,14 @@ typedef struct dispatch_timer_source_refs_s { } *dispatch_timer_source_refs_t; typedef struct dispatch_timer_heap_s { - uint64_t dth_target, dth_deadline; uint32_t dth_count; - uint16_t dth_segments; -#define DTH_ARMED 1u - uint16_t dth_flags; + uint8_t dth_segments; + uint8_t dth_max_qos; +#define DTH_DIRTY_GLOBAL 0x80 +#define DTH_DIRTY_QOS_MASK ((1u << DISPATCH_TIMER_QOS_COUNT) - 1) + uint8_t dth_dirty_bits; // Only used in the first heap + uint8_t dth_armed : 1; + uint8_t dth_needs_program : 1; dispatch_timer_source_refs_t dth_min[DTH_ID_COUNT]; void **dth_heap; } *dispatch_timer_heap_t; @@ -154,15 +252,21 @@ typedef struct dispatch_mach_recv_refs_s *dispatch_mach_recv_refs_t; struct dispatch_mach_reply_refs_s { DISPATCH_UNOTE_CLASS_HEADER(); - dispatch_priority_t dmr_priority; + pthread_priority_t dmr_priority : 32; void *dmr_ctxt; voucher_t dmr_voucher; - TAILQ_ENTRY(dispatch_mach_reply_refs_s) dmr_list; - mach_port_t dmr_waiter_tid; + LIST_ENTRY(dispatch_mach_reply_refs_s) dmr_list; }; typedef struct dispatch_mach_reply_refs_s *dispatch_mach_reply_refs_t; -#define _DISPATCH_MACH_STATE_UNUSED_MASK 0xffffffa000000000ull +struct dispatch_mach_reply_wait_refs_s { + struct dispatch_mach_reply_refs_s dwr_refs; + mach_port_t dwr_waiter_tid; +}; +typedef struct dispatch_mach_reply_wait_refs_s *dispatch_mach_reply_wait_refs_t; + +#define _DISPATCH_MACH_STATE_UNUSED_MASK 0xffffff8000000000ull +#define DISPATCH_MACH_STATE_ENQUEUED 0x0000008000000000ull #define DISPATCH_MACH_STATE_DIRTY 0x0000002000000000ull #define DISPATCH_MACH_STATE_PENDING_BARRIER 0x0000001000000000ull #define DISPATCH_MACH_STATE_RECEIVED_OVERRIDE 0x0000000800000000ull @@ -172,23 +276,22 @@ typedef struct dispatch_mach_reply_refs_s *dispatch_mach_reply_refs_t; struct dispatch_mach_send_refs_s { DISPATCH_UNOTE_CLASS_HEADER(); - dispatch_mach_msg_t dmsr_checkin; - TAILQ_HEAD(, dispatch_mach_reply_refs_s) dmsr_replies; dispatch_unfair_lock_s dmsr_replies_lock; -#define DISPATCH_MACH_DISCONNECT_MAGIC_BASE (0x80000000) -#define DISPATCH_MACH_NEVER_INSTALLED (DISPATCH_MACH_DISCONNECT_MAGIC_BASE + 0) -#define DISPATCH_MACH_NEVER_CONNECTED (DISPATCH_MACH_DISCONNECT_MAGIC_BASE + 1) - uint32_t volatile dmsr_disconnect_cnt; + dispatch_mach_msg_t dmsr_checkin; + LIST_HEAD(, dispatch_mach_reply_refs_s) dmsr_replies; +#define DISPATCH_MACH_NEVER_CONNECTED 0x80000000 DISPATCH_UNION_LE(uint64_t volatile dmsr_state, dispatch_unfair_lock_s dmsr_state_lock, uint32_t dmsr_state_bits ) DISPATCH_ATOMIC64_ALIGN; struct dispatch_object_s *volatile dmsr_tail; struct dispatch_object_s *volatile dmsr_head; + uint32_t volatile dmsr_disconnect_cnt; mach_port_t dmsr_send, dmsr_checkin_port; }; typedef struct dispatch_mach_send_refs_s *dispatch_mach_send_refs_t; +bool _dispatch_mach_notification_armed(dispatch_mach_send_refs_t dmsr); void _dispatch_mach_notification_set_armed(dispatch_mach_send_refs_t dmsr); struct dispatch_xpc_term_refs_s { @@ -211,7 +314,7 @@ typedef union dispatch_unote_u { #define DISPATCH_UNOTE_NULL ((dispatch_unote_t){ ._du = NULL }) -#if TARGET_OS_EMBEDDED +#if TARGET_OS_IPHONE #define DSL_HASH_SIZE 64u // must be a power of two #else #define DSL_HASH_SIZE 256u // must be a power of two @@ -219,26 +322,33 @@ typedef union dispatch_unote_u { #define DSL_HASH(x) ((x) & (DSL_HASH_SIZE - 1)) typedef struct dispatch_unote_linkage_s { - TAILQ_ENTRY(dispatch_unote_linkage_s) du_link; + LIST_ENTRY(dispatch_unote_linkage_s) du_link; struct dispatch_muxnote_s *du_muxnote; } DISPATCH_ATOMIC64_ALIGN *dispatch_unote_linkage_t; -#define DU_UNREGISTER_IMMEDIATE_DELETE 0x01 -#define DU_UNREGISTER_ALREADY_DELETED 0x02 -#define DU_UNREGISTER_DISCONNECTED 0x04 -#define DU_UNREGISTER_REPLY_REMOVE 0x08 +DISPATCH_ENUM(dispatch_unote_action, uint8_t, + DISPATCH_UNOTE_ACTION_PASS_DATA, // pass ke->data + DISPATCH_UNOTE_ACTION_PASS_FFLAGS, // pass ke->fflags + DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, // ds_pending_data |= ke->fflags + DISPATCH_UNOTE_ACTION_SOURCE_SET_DATA, // ds_pending_data = ~ke->data + DISPATCH_UNOTE_ACTION_SOURCE_ADD_DATA, // ds_pending_data += ke->data + DISPATCH_UNOTE_ACTION_SOURCE_TIMER, // timer +); typedef struct dispatch_source_type_s { const char *dst_kind; int8_t dst_filter; + dispatch_unote_action_t dst_action; uint8_t dst_per_trigger_qos : 1; + uint8_t dst_strict : 1; + uint8_t dst_timer_flags; uint16_t dst_flags; +#if DISPATCH_EVENT_BACKEND_KEVENT + uint16_t dst_data; +#endif uint32_t dst_fflags; uint32_t dst_mask; uint32_t dst_size; -#if DISPATCH_EVENT_BACKEND_KEVENT - uint32_t dst_data; -#endif dispatch_unote_t (*dst_create)(dispatch_source_type_t dst, uintptr_t handle, unsigned long mask); @@ -246,25 +356,30 @@ typedef struct dispatch_source_type_s { bool (*dst_update_mux)(struct dispatch_muxnote_s *dmn); #endif void (*dst_merge_evt)(dispatch_unote_t du, uint32_t flags, uintptr_t data, - uintptr_t status, pthread_priority_t pp); + pthread_priority_t pp); #if HAVE_MACH void (*dst_merge_msg)(dispatch_unote_t du, uint32_t flags, - mach_msg_header_t *msg, mach_msg_size_t sz); + mach_msg_header_t *msg, mach_msg_size_t sz, + pthread_priority_t msg_pp, pthread_priority_t override_pp); #endif } dispatch_source_type_s; #define dux_create(dst, handle, mask) (dst)->dst_create(dst, handle, mask) -#define dux_merge_evt(du, ...) (du)->du_type->dst_merge_evt(du, __VA_ARGS__) -#define dux_merge_msg(du, ...) (du)->du_type->dst_merge_msg(du, __VA_ARGS__) +#define dux_type(du) (du)->du_type +#define dux_needs_rearm(du) (dux_type(du)->dst_flags & (EV_ONESHOT | EV_DISPATCH)) +#define dux_merge_evt(du, ...) dux_type(du)->dst_merge_evt(du, __VA_ARGS__) +#define dux_merge_msg(du, ...) dux_type(du)->dst_merge_msg(du, __VA_ARGS__) extern const dispatch_source_type_s _dispatch_source_type_after; #if HAVE_MACH -extern const dispatch_source_type_s _dispatch_source_type_mach_recv_direct; +extern const dispatch_source_type_s _dispatch_mach_type_notification; extern const dispatch_source_type_s _dispatch_mach_type_send; extern const dispatch_source_type_s _dispatch_mach_type_recv; extern const dispatch_source_type_s _dispatch_mach_type_reply; extern const dispatch_source_type_s _dispatch_xpc_type_sigterm; +extern const dispatch_source_type_s _dispatch_source_type_timer_with_clock; +#define DISPATCH_MACH_TYPE_WAITER ((const dispatch_source_type_s *)-2) #endif #pragma mark - @@ -282,9 +397,10 @@ typedef dispatch_kevent_s *dispatch_kevent_t; #define DISPATCH_DEFERRED_ITEMS_EVENT_COUNT 16 typedef struct dispatch_deferred_items_s { - dispatch_queue_t ddi_stashed_rq; + dispatch_queue_global_t ddi_stashed_rq; dispatch_object_t ddi_stashed_dou; dispatch_qos_t ddi_stashed_qos; + dispatch_wlh_t ddi_wlh; #if DISPATCH_EVENT_BACKEND_KEVENT dispatch_kevent_t ddi_eventlist; uint16_t ddi_nevents; @@ -337,18 +453,93 @@ _dispatch_clear_return_to_kernel(void) _dispatch_thread_setspecific(dispatch_r2k_key, (void *)0); } +DISPATCH_ALWAYS_INLINE +static inline dispatch_wlh_t +_du_state_wlh(dispatch_unote_state_t du_state) +{ + return (dispatch_wlh_t)(du_state & DU_STATE_WLH_MASK); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_du_state_registered(dispatch_unote_state_t du_state) +{ + return du_state != DU_STATE_UNREGISTERED; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_du_state_armed(dispatch_unote_state_t du_state) +{ + return du_state & DU_STATE_ARMED; +} + DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_unote_registered(dispatch_unote_t du) +_du_state_needs_delete(dispatch_unote_state_t du_state) { - return du._du->du_wlh != NULL; + return du_state & DU_STATE_NEEDS_DELETE; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_du_state_needs_rearm(dispatch_unote_state_t du_state) +{ + return _du_state_registered(du_state) && !_du_state_armed(du_state) && + !_du_state_needs_delete(du_state); +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_unote_state_t +_dispatch_unote_state(dispatch_unote_t du) +{ + return os_atomic_load(&du._du->du_state, relaxed); +} +#define _dispatch_unote_wlh(du) \ + _du_state_wlh(_dispatch_unote_state(du)) +#define _dispatch_unote_registered(du) \ + _du_state_registered(_dispatch_unote_state(du)) +#define _dispatch_unote_armed(du) \ + _du_state_armed(_dispatch_unote_state(du)) +#define _dispatch_unote_needs_delete(du) \ + _du_state_needs_delete(_dispatch_unote_state(du)) +#define _dispatch_unote_needs_rearm(du) \ + _du_state_needs_rearm(_dispatch_unote_state(du)) + +DISPATCH_ALWAYS_INLINE DISPATCH_OVERLOADABLE +static inline void +_dispatch_unote_state_set(dispatch_unote_t du, dispatch_unote_state_t value) +{ + os_atomic_store(&du._du->du_state, value, relaxed); +} + +DISPATCH_ALWAYS_INLINE DISPATCH_OVERLOADABLE +static inline void +_dispatch_unote_state_set(dispatch_unote_t du, dispatch_wlh_t wlh, + dispatch_unote_state_t bits) +{ + _dispatch_unote_state_set(du, (dispatch_unote_state_t)wlh | bits); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_unote_state_set_bit(dispatch_unote_t du, dispatch_unote_state_t bit) +{ + _dispatch_unote_state_set(du, _dispatch_unote_state(du) | bit); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_unote_state_clear_bit(dispatch_unote_t du, dispatch_unote_state_t bit) +{ + _dispatch_unote_state_set(du, _dispatch_unote_state(du) & ~bit); } DISPATCH_ALWAYS_INLINE static inline bool _dispatch_unote_wlh_changed(dispatch_unote_t du, dispatch_wlh_t expected_wlh) { - dispatch_wlh_t wlh = du._du->du_wlh; + dispatch_wlh_t wlh = _dispatch_unote_wlh(du); return wlh && wlh != DISPATCH_WLH_ANON && wlh != expected_wlh; } @@ -361,13 +552,6 @@ _dispatch_unote_get_linkage(dispatch_unote_t du) - sizeof(struct dispatch_unote_linkage_s)); } -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_unote_needs_rearm(dispatch_unote_t du) -{ - return du._du->du_type->dst_flags & (EV_ONESHOT | EV_DISPATCH); -} - DISPATCH_ALWAYS_INLINE static inline dispatch_unote_t _dispatch_unote_linkage_get_unote(dispatch_unote_linkage_t dul) @@ -377,6 +561,27 @@ _dispatch_unote_linkage_get_unote(dispatch_unote_linkage_t dul) #endif // DISPATCH_PURE_C +DISPATCH_ALWAYS_INLINE +static inline unsigned long +_dispatch_timer_unote_compute_missed(dispatch_timer_source_refs_t dt, + uint64_t now, unsigned long prev) +{ + uint64_t missed = (now - dt->dt_timer.target) / dt->dt_timer.interval; + if (++missed + prev > LONG_MAX) { + missed = LONG_MAX - prev; + } + if (dt->dt_timer.interval < INT64_MAX) { + uint64_t push_by = missed * dt->dt_timer.interval; + dt->dt_timer.target += push_by; + dt->dt_timer.deadline += push_by; + } else { + dt->dt_timer.target = UINT64_MAX; + dt->dt_timer.deadline = UINT64_MAX; + } + prev += missed; + return prev; +} + #pragma mark - #pragma mark prototypes @@ -390,20 +595,19 @@ _dispatch_unote_linkage_get_unote(dispatch_unote_linkage_t dul) #define DISPATCH_TIMER_QOS_COUNT 1u #endif -#define DISPATCH_TIMER_QOS(tidx) (((uintptr_t)(tidx) >> 1) & 3u) -#define DISPATCH_TIMER_CLOCK(tidx) (dispatch_clock_t)((tidx) & 1u) +#define DISPATCH_TIMER_QOS(tidx) ((uint32_t)(tidx) % DISPATCH_TIMER_QOS_COUNT) +#define DISPATCH_TIMER_CLOCK(tidx) (dispatch_clock_t)((tidx) / DISPATCH_TIMER_QOS_COUNT) -#define DISPATCH_TIMER_INDEX(clock, qos) ((qos) << 1 | (clock)) +#define DISPATCH_TIMER_INDEX(clock, qos) (((clock) * DISPATCH_TIMER_QOS_COUNT) + (qos)) #define DISPATCH_TIMER_COUNT \ - DISPATCH_TIMER_INDEX(0, DISPATCH_TIMER_QOS_COUNT) + DISPATCH_TIMER_INDEX(DISPATCH_CLOCK_COUNT, 0) +// Workloops do not support optimizing WALL timers +#define DISPATCH_TIMER_WLH_COUNT \ + DISPATCH_TIMER_INDEX(DISPATCH_CLOCK_WALL, 0) + #define DISPATCH_TIMER_IDENT_CANCELED (~0u) extern struct dispatch_timer_heap_s _dispatch_timers_heap[DISPATCH_TIMER_COUNT]; -extern bool _dispatch_timers_reconfigure, _dispatch_timers_expired; -extern uint32_t _dispatch_timers_processing_mask; -#if DISPATCH_USE_DTRACE -extern uint32_t _dispatch_timers_will_wake; -#endif dispatch_unote_t _dispatch_unote_create_with_handle(dispatch_source_type_t dst, uintptr_t handle, unsigned long mask); @@ -411,18 +615,51 @@ dispatch_unote_t _dispatch_unote_create_with_fd(dispatch_source_type_t dst, uintptr_t handle, unsigned long mask); dispatch_unote_t _dispatch_unote_create_without_handle( dispatch_source_type_t dst, uintptr_t handle, unsigned long mask); +void _dispatch_unote_dispose(dispatch_unote_t du); +/* + * @const DUU_DELETE_ACK + * Unregistration can acknowledge the "needs-delete" state of a unote. + * There must be some sort of synchronization between callers passing this flag + * for a given unote. + * + * @const DUU_PROBE + * This flag is passed for the first unregistration attempt of a unote. + * When passed, it allows the unregistration to speculatively try to do the + * unregistration syscalls and maybe get lucky. If the flag isn't passed, + * unregistration will preflight the attempt, and will not perform any syscall + * if it cannot guarantee their success. + * + * @const DUU_MUST_SUCCEED + * The caller expects the unregistration to always succeeed. + * _dispatch_unote_unregister will either crash or return true. + */ +#define DUU_DELETE_ACK 0x1 +#define DUU_PROBE 0x2 +#define DUU_MUST_SUCCEED 0x4 +bool _dispatch_unote_unregister(dispatch_unote_t du, uint32_t flags); bool _dispatch_unote_register(dispatch_unote_t du, dispatch_wlh_t wlh, dispatch_priority_t pri); void _dispatch_unote_resume(dispatch_unote_t du); -bool _dispatch_unote_unregister(dispatch_unote_t du, uint32_t flags); -void _dispatch_unote_dispose(dispatch_unote_t du); + +bool _dispatch_unote_unregister_muxed(dispatch_unote_t du); +bool _dispatch_unote_register_muxed(dispatch_unote_t du); +void _dispatch_unote_resume_muxed(dispatch_unote_t du); + +#if DISPATCH_HAVE_DIRECT_KNOTES +bool _dispatch_unote_unregister_direct(dispatch_unote_t du, uint32_t flags); +bool _dispatch_unote_register_direct(dispatch_unote_t du, dispatch_wlh_t wlh); +void _dispatch_unote_resume_direct(dispatch_unote_t du); +#endif + +void _dispatch_timer_unote_configure(dispatch_timer_source_refs_t dt); void _dispatch_event_loop_atfork_child(void); #define DISPATCH_EVENT_LOOP_CONSUME_2 DISPATCH_WAKEUP_CONSUME_2 #define DISPATCH_EVENT_LOOP_OVERRIDE 0x80000000 void _dispatch_event_loop_poke(dispatch_wlh_t wlh, uint64_t dq_state, uint32_t flags); +void _dispatch_event_loop_cancel_waiter(struct dispatch_sync_context_s *dsc); void _dispatch_event_loop_wake_owner(struct dispatch_sync_context_s *dsc, dispatch_wlh_t wlh, uint64_t old_state, uint64_t new_state); void _dispatch_event_loop_wait_for_ownership( @@ -435,15 +672,36 @@ void _dispatch_event_loop_assert_not_owned(dispatch_wlh_t wlh); #undef _dispatch_event_loop_assert_not_owned #define _dispatch_event_loop_assert_not_owned(wlh) ((void)wlh) #endif -void _dispatch_event_loop_leave_immediate(dispatch_wlh_t wlh, uint64_t dq_state); +void _dispatch_event_loop_leave_immediate(uint64_t dq_state); #if DISPATCH_EVENT_BACKEND_KEVENT -void _dispatch_event_loop_leave_deferred(dispatch_wlh_t wlh, +void _dispatch_event_loop_leave_deferred(dispatch_deferred_items_t ddi, uint64_t dq_state); void _dispatch_event_loop_merge(dispatch_kevent_t events, int nevents); #endif void _dispatch_event_loop_drain(uint32_t flags); -void _dispatch_event_loop_timer_arm(unsigned int tidx, + +void _dispatch_event_loop_timer_arm(dispatch_timer_heap_t dth, uint32_t tidx, dispatch_timer_delay_s range, dispatch_clock_now_cache_t nows); -void _dispatch_event_loop_timer_delete(unsigned int tidx); +void _dispatch_event_loop_timer_delete(dispatch_timer_heap_t dth, uint32_t tidx); + +void _dispatch_event_loop_drain_timers(dispatch_timer_heap_t dth, uint32_t count); + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_timers_heap_dirty(dispatch_timer_heap_t dth, uint32_t tidx) +{ + // Note: the dirty bits are only maintained in the first heap for any tidx + dth[0].dth_dirty_bits |= (1 << DISPATCH_TIMER_QOS(tidx)) | DTH_DIRTY_GLOBAL; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_event_loop_drain_anon_timers(void) +{ + if (_dispatch_timers_heap[0].dth_dirty_bits) { + _dispatch_event_loop_drain_timers(_dispatch_timers_heap, + DISPATCH_TIMER_COUNT); + } +} #endif /* __DISPATCH_EVENT_EVENT_INTERNAL__ */ diff --git a/src/event/event_kevent.c b/src/event/event_kevent.c index 8fe76d55c..1e7cdb5bd 100644 --- a/src/event/event_kevent.c +++ b/src/event/event_kevent.c @@ -33,52 +33,40 @@ #define DISPATCH_MACH_AUDIT_TOKEN_PID (5) typedef struct dispatch_muxnote_s { - TAILQ_ENTRY(dispatch_muxnote_s) dmn_list; - TAILQ_HEAD(, dispatch_unote_linkage_s) dmn_unotes_head; - dispatch_wlh_t dmn_wlh; - dispatch_kevent_s dmn_kev; + LIST_ENTRY(dispatch_muxnote_s) dmn_list; + LIST_HEAD(, dispatch_unote_linkage_s) dmn_unotes_head; + dispatch_kevent_s dmn_kev DISPATCH_ATOMIC64_ALIGN; } *dispatch_muxnote_t; -static bool _dispatch_timers_force_max_leeway; -static int _dispatch_kq = -1; -static struct { - dispatch_once_t pred; - dispatch_unfair_lock_s lock; -} _dispatch_muxnotes; -#if !DISPATCH_USE_KEVENT_WORKQUEUE -#define _dispatch_muxnotes_lock() \ - _dispatch_unfair_lock_lock(&_dispatch_muxnotes.lock) -#define _dispatch_muxnotes_unlock() \ - _dispatch_unfair_lock_unlock(&_dispatch_muxnotes.lock) -#else -#define _dispatch_muxnotes_lock() -#define _dispatch_muxnotes_unlock() -#endif // !DISPATCH_USE_KEVENT_WORKQUEUE +LIST_HEAD(dispatch_muxnote_bucket_s, dispatch_muxnote_s); -DISPATCH_CACHELINE_ALIGN -static TAILQ_HEAD(dispatch_muxnote_bucket_s, dispatch_muxnote_s) -_dispatch_sources[DSL_HASH_SIZE]; +DISPATCH_STATIC_GLOBAL(bool _dispatch_timers_force_max_leeway); +DISPATCH_STATIC_GLOBAL(dispatch_once_t _dispatch_kq_poll_pred); +DISPATCH_STATIC_GLOBAL(struct dispatch_muxnote_bucket_s _dispatch_sources[DSL_HASH_SIZE]); -#define DISPATCH_NOTE_CLOCK_WALL NOTE_MACH_CONTINUOUS_TIME -#define DISPATCH_NOTE_CLOCK_MACH 0 +#define DISPATCH_NOTE_CLOCK_WALL NOTE_NSECONDS | NOTE_MACH_CONTINUOUS_TIME +#define DISPATCH_NOTE_CLOCK_MONOTONIC NOTE_MACHTIME | NOTE_MACH_CONTINUOUS_TIME +#define DISPATCH_NOTE_CLOCK_UPTIME NOTE_MACHTIME static const uint32_t _dispatch_timer_index_to_fflags[] = { #define DISPATCH_TIMER_FFLAGS_INIT(kind, qos, note) \ [DISPATCH_TIMER_INDEX(DISPATCH_CLOCK_##kind, DISPATCH_TIMER_QOS_##qos)] = \ - DISPATCH_NOTE_CLOCK_##kind | NOTE_ABSOLUTE | \ - NOTE_NSECONDS | NOTE_LEEWAY | (note) + DISPATCH_NOTE_CLOCK_##kind | NOTE_ABSOLUTE | NOTE_LEEWAY | (note) DISPATCH_TIMER_FFLAGS_INIT(WALL, NORMAL, 0), - DISPATCH_TIMER_FFLAGS_INIT(MACH, NORMAL, 0), + DISPATCH_TIMER_FFLAGS_INIT(UPTIME, NORMAL, 0), + DISPATCH_TIMER_FFLAGS_INIT(MONOTONIC, NORMAL, 0), #if DISPATCH_HAVE_TIMER_QOS DISPATCH_TIMER_FFLAGS_INIT(WALL, CRITICAL, NOTE_CRITICAL), - DISPATCH_TIMER_FFLAGS_INIT(MACH, CRITICAL, NOTE_CRITICAL), + DISPATCH_TIMER_FFLAGS_INIT(UPTIME, CRITICAL, NOTE_CRITICAL), + DISPATCH_TIMER_FFLAGS_INIT(MONOTONIC, CRITICAL, NOTE_CRITICAL), DISPATCH_TIMER_FFLAGS_INIT(WALL, BACKGROUND, NOTE_BACKGROUND), - DISPATCH_TIMER_FFLAGS_INIT(MACH, BACKGROUND, NOTE_BACKGROUND), + DISPATCH_TIMER_FFLAGS_INIT(UPTIME, BACKGROUND, NOTE_BACKGROUND), + DISPATCH_TIMER_FFLAGS_INIT(MONOTONIC, BACKGROUND, NOTE_BACKGROUND), #endif #undef DISPATCH_TIMER_FFLAGS_INIT }; -static void _dispatch_kevent_timer_drain(dispatch_kevent_t ke); +static inline void _dispatch_kevent_timer_drain(dispatch_kevent_t ke); #pragma mark - #pragma mark kevent debug @@ -113,6 +101,7 @@ _evfiltstr(short filt) #endif // DISPATCH_EVENT_BACKEND_KEVENT _evfilt2(DISPATCH_EVFILT_TIMER); + _evfilt2(DISPATCH_EVFILT_TIMER_WITH_CLOCK); _evfilt2(DISPATCH_EVFILT_CUSTOM_ADD); _evfilt2(DISPATCH_EVFILT_CUSTOM_OR); _evfilt2(DISPATCH_EVFILT_CUSTOM_REPLACE); @@ -228,6 +217,12 @@ dispatch_kevent_debug(const char *verb, const dispatch_kevent_s *kev, #define _dispatch_kevent_wlh_debug(verb, kev) ((void)verb, (void)kev) #endif // DISPATCH_WLH_DEBUG +#define _dispatch_du_debug(what, du) \ + _dispatch_debug("kevent-source[%p]: %s kevent[%p] " \ + "{ filter = %s, ident = 0x%x }", \ + _dispatch_wref2ptr((du)->du_owner_wref), what, \ + (du), _evfiltstr((du)->du_filter), (du)->du_ident) + #if DISPATCH_MACHPORT_DEBUG #ifndef MACH_PORT_TYPE_SPREQUEST #define MACH_PORT_TYPE_SPREQUEST 0x40000000 @@ -294,8 +289,8 @@ dispatch_debug_machport(mach_port_t name, const char* str) #if HAVE_MACH -static dispatch_once_t _dispatch_mach_host_port_pred; -static mach_port_t _dispatch_mach_host_port; +DISPATCH_STATIC_GLOBAL(dispatch_once_t _dispatch_mach_host_port_pred); +DISPATCH_STATIC_GLOBAL(mach_port_t _dispatch_mach_host_port); static inline void* _dispatch_kevent_mach_msg_buf(dispatch_kevent_t ke) @@ -322,7 +317,7 @@ static inline void _dispatch_mach_host_calendar_change_register(void); // - data is used to monitor the actual state of the // mach_port_request_notification() // - ext[0] is a boolean that trackes whether the notification is armed or not -#define DISPATCH_MACH_NOTIFICATION_ARMED(dk) ((dk)->ext[0]) +#define DISPATCH_MACH_NOTIFICATION_ARMED(dmn) ((dmn)->dmn_kev.ext[0]) #endif DISPATCH_ALWAYS_INLINE @@ -345,6 +340,7 @@ DISPATCH_NOINLINE static void _dispatch_kevent_print_error(dispatch_kevent_t ke) { + dispatch_unote_class_t du = NULL; _dispatch_debug("kevent[0x%llx]: handling error", (unsigned long long)ke->udata); if (ke->flags & EV_DELETE) { @@ -359,61 +355,137 @@ _dispatch_kevent_print_error(dispatch_kevent_t ke) } else if (ke->udata & DISPATCH_KEVENT_MUXED_MARKER) { ke->flags |= _dispatch_kevent_get_muxnote(ke)->dmn_kev.flags; } else if (ke->udata) { - if (!_dispatch_unote_registered(_dispatch_kevent_get_unote(ke))) { + du = (dispatch_unote_class_t)(uintptr_t)ke->udata; + if (!_dispatch_unote_registered(du)) { ke->flags |= EV_ADD; } } -#if HAVE_MACH - if (ke->filter == EVFILT_MACHPORT && ke->data == ENOTSUP && - (ke->flags & EV_ADD) && (ke->fflags & MACH_RCV_MSG)) { - DISPATCH_INTERNAL_CRASH(ke->ident, - "Missing EVFILT_MACHPORT support for ports"); - } -#endif - - if (ke->data) { + switch (ke->data) { + case 0: + return; + case ERANGE: /* A broken QoS was passed to kevent_id() */ + DISPATCH_INTERNAL_CRASH(ke->qos, "Invalid kevent priority"); + default: // log the unexpected error _dispatch_bug_kevent_client("kevent", _evfiltstr(ke->filter), !ke->udata ? NULL : ke->flags & EV_DELETE ? "delete" : ke->flags & EV_ADD ? "add" : ke->flags & EV_ENABLE ? "enable" : "monitor", - (int)ke->data); + (int)ke->data, ke->ident, ke->udata, du); + } +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_kevent_merge_ev_flags(dispatch_unote_t du, uint32_t flags) +{ + if (unlikely(!(flags & EV_UDATA_SPECIFIC) && (flags & EV_ONESHOT))) { + _dispatch_unote_unregister(du, DUU_DELETE_ACK | DUU_MUST_SUCCEED); + return; } + + if (flags & EV_DELETE) { + // When a speculative deletion is requested by libdispatch, + // and the kernel is about to deliver an event, it can acknowledge + // our wish by delivering the event as a (EV_DELETE | EV_ONESHOT) + // event and dropping the knote at once. + _dispatch_unote_state_set(du, DU_STATE_NEEDS_DELETE); + } else if (flags & (EV_ONESHOT | EV_VANISHED)) { + // EV_VANISHED events if re-enabled will produce another EV_VANISHED + // event. To avoid an infinite loop of such events, mark the unote + // as needing deletion so that _dispatch_unote_needs_rearm() + // eventually returns false. + // + // mach channels crash on EV_VANISHED, and dispatch sources stay + // in a limbo until canceled (explicitly or not). + dispatch_unote_state_t du_state = _dispatch_unote_state(du); + du_state |= DU_STATE_NEEDS_DELETE; + du_state &= ~DU_STATE_ARMED; + _dispatch_unote_state_set(du, du_state); + } else if (likely(flags & EV_DISPATCH)) { + _dispatch_unote_state_clear_bit(du, DU_STATE_ARMED); + } else { + return; + } + + _dispatch_du_debug((flags & EV_VANISHED) ? "vanished" : + (flags & EV_DELETE) ? "deleted oneshot" : + (flags & EV_ONESHOT) ? "oneshot" : "disarmed", du._du); } DISPATCH_NOINLINE static void _dispatch_kevent_merge(dispatch_unote_t du, dispatch_kevent_t ke) { - uintptr_t data; - uintptr_t status = 0; + dispatch_unote_action_t action = dux_type(du._du)->dst_action; pthread_priority_t pp = 0; -#if DISPATCH_USE_KEVENT_QOS - pp = ((pthread_priority_t)ke->qos) & ~_PTHREAD_PRIORITY_FLAGS_MASK; + uintptr_t data; + + // once we modify the queue atomic flags below, it will allow concurrent + // threads running _dispatch_source_invoke2 to dispose of the source, + // so we can't safely borrow the reference we get from the muxnote udata + // anymore, and need our own + _dispatch_retain_unote_owner(du); + + switch (action) { + case DISPATCH_UNOTE_ACTION_PASS_DATA: + data = (uintptr_t)ke->data; + break; + + case DISPATCH_UNOTE_ACTION_PASS_FFLAGS: + data = (uintptr_t)ke->fflags; +#if HAVE_MACH + if (du._du->du_filter == EVFILT_MACHPORT) { + data = DISPATCH_MACH_RECV_MESSAGE; + } #endif - dispatch_unote_action_t action = du._du->du_data_action; - if (action == DISPATCH_UNOTE_ACTION_DATA_SET) { + break; + + case DISPATCH_UNOTE_ACTION_SOURCE_SET_DATA: // ke->data is signed and "negative available data" makes no sense // zero bytes happens when EV_EOF is set dispatch_assert(ke->data >= 0l); - data = ~(unsigned long)ke->data; -#if HAVE_MACH - } else if (du._du->du_filter == EVFILT_MACHPORT) { - data = DISPATCH_MACH_RECV_MESSAGE; -#endif - } else if (action == DISPATCH_UNOTE_ACTION_DATA_ADD) { data = (unsigned long)ke->data; - } else if (action == DISPATCH_UNOTE_ACTION_DATA_OR) { - data = ke->fflags & du._du->du_fflags; - } else if (action == DISPATCH_UNOTE_ACTION_DATA_OR_STATUS_SET) { + os_atomic_store2o(du._dr, ds_pending_data, ~data, relaxed); + break; + + case DISPATCH_UNOTE_ACTION_SOURCE_ADD_DATA: + data = (unsigned long)ke->data; + if (data) os_atomic_add2o(du._dr, ds_pending_data, data, relaxed); + break; + + case DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS: data = ke->fflags & du._du->du_fflags; - status = (unsigned long)ke->data; - } else { + if (du._dr->du_has_extended_status) { + uint64_t odata, ndata, value; + uint32_t status = (uint32_t)ke->data; + + // We combine the data and status into a single 64-bit value. + value = DISPATCH_SOURCE_COMBINE_DATA_AND_STATUS(data, status); + os_atomic_rmw_loop2o(du._dr, ds_pending_data, odata, ndata, relaxed, { + ndata = DISPATCH_SOURCE_GET_DATA(odata) | value; + }); +#if HAVE_MACH + } else if (du._du->du_filter == EVFILT_MACHPORT) { + data = DISPATCH_MACH_RECV_MESSAGE; + os_atomic_store2o(du._dr, ds_pending_data, data, relaxed); +#endif + } else { + if (data) os_atomic_or2o(du._dr, ds_pending_data, data, relaxed); + } + break; + + default: DISPATCH_INTERNAL_CRASH(action, "Corrupt unote action"); } - return dux_merge_evt(du._du, ke->flags, data, status, pp); + + _dispatch_kevent_merge_ev_flags(du, ke->flags); +#if DISPATCH_USE_KEVENT_QOS + pp = ((pthread_priority_t)ke->qos) & ~_PTHREAD_PRIORITY_FLAGS_MASK; +#endif + return dux_merge_evt(du._du, ke->flags, data, pp); } DISPATCH_NOINLINE @@ -423,7 +495,11 @@ _dispatch_kevent_merge_muxed(dispatch_kevent_t ke) dispatch_muxnote_t dmn = _dispatch_kevent_get_muxnote(ke); dispatch_unote_linkage_t dul, dul_next; - TAILQ_FOREACH_SAFE(dul, &dmn->dmn_unotes_head, du_link, dul_next) { + if (ke->flags & (EV_ONESHOT | EV_DELETE)) { + // tell _dispatch_unote_unregister_muxed() the kernel half is gone + dmn->dmn_kev.flags |= EV_DELETE; + } + LIST_FOREACH_SAFE(dul, &dmn->dmn_unotes_head, du_link, dul_next) { _dispatch_kevent_merge(_dispatch_unote_linkage_get_unote(dul), ke); } } @@ -439,13 +515,12 @@ _dispatch_kevent_drain(dispatch_kevent_t ke) _dispatch_kevent_debug("received", ke); if (unlikely(ke->flags & EV_ERROR)) { if (ke->filter == EVFILT_PROC && ke->data == ESRCH) { - // EVFILT_PROC may fail with ESRCH when the process exists but is a zombie - // . As a workaround, we simulate an exit event for - // any EVFILT_PROC with an invalid pid . - ke->flags &= ~(EV_ERROR | EV_ADD | EV_ENABLE | EV_UDATA_SPECIFIC); - ke->flags |= EV_ONESHOT; + // EVFILT_PROC may fail with ESRCH + // when the process exists but is a zombie. As a workaround, we + // simulate an exit event for any EVFILT_PROC with an invalid pid. + ke->flags = EV_UDATA_SPECIFIC | EV_ONESHOT | EV_DELETE; ke->fflags = NOTE_EXIT; - ke->data = 0; + ke->data = 0; _dispatch_kevent_debug("synthetic NOTE_EXIT", ke); } else { return _dispatch_kevent_print_error(ke); @@ -456,10 +531,8 @@ _dispatch_kevent_drain(dispatch_kevent_t ke) } #if HAVE_MACH - if (ke->filter == EVFILT_MACHPORT) { - if (_dispatch_kevent_mach_msg_size(ke)) { - return _dispatch_kevent_mach_msg_drain(ke); - } + if (ke->filter == EVFILT_MACHPORT && _dispatch_kevent_mach_msg_size(ke)) { + return _dispatch_kevent_mach_msg_drain(ke); } #endif @@ -473,8 +546,8 @@ _dispatch_kevent_drain(dispatch_kevent_t ke) #if DISPATCH_USE_MGR_THREAD DISPATCH_NOINLINE -static int -_dispatch_kq_create(const void *guard_ptr) +static void +_dispatch_kq_create(intptr_t *fd_ptr) { static const dispatch_kevent_s kev = { .ident = 1, @@ -486,7 +559,7 @@ _dispatch_kq_create(const void *guard_ptr) _dispatch_fork_becomes_unsafe(); #if DISPATCH_USE_GUARDED_FD - guardid_t guard = (uintptr_t)guard_ptr; + guardid_t guard = (uintptr_t)fd_ptr; kqfd = guarded_kqueue_np(&guard, GUARD_CLOSE | GUARD_DUP); #else (void)guard_ptr; @@ -517,10 +590,16 @@ _dispatch_kq_create(const void *guard_ptr) #else dispatch_assume_zero(kevent(kqfd, &kev, 1, NULL, 0, NULL)); #endif - return kqfd; + *fd_ptr = kqfd; } #endif +static inline int +_dispatch_kq_fd(void) +{ + return (int)(intptr_t)_dispatch_mgr_q.do_ctxt; +} + static void _dispatch_kq_init(void *context) { @@ -536,7 +615,7 @@ _dispatch_kq_init(void *context) _dispatch_kevent_workqueue_init(); if (_dispatch_kevent_workqueue_enabled) { int r; - int kqfd = _dispatch_kq; + int kqfd = _dispatch_kq_fd(); const dispatch_kevent_s ke = { .ident = 1, .filter = EVFILT_USER, @@ -562,7 +641,8 @@ _dispatch_kq_init(void *context) } #endif // DISPATCH_USE_KEVENT_WORKQUEUE #if DISPATCH_USE_MGR_THREAD - _dispatch_kq = _dispatch_kq_create(&_dispatch_mgr_q); + _dispatch_kq_create((intptr_t *)&_dispatch_mgr_q.do_ctxt); + _dispatch_trace_item_push(_dispatch_mgr_q.do_targetq, &_dispatch_mgr_q); dx_push(_dispatch_mgr_q.do_targetq, &_dispatch_mgr_q, 0); #endif // DISPATCH_USE_MGR_THREAD } @@ -579,11 +659,10 @@ _dispatch_kq_poll(dispatch_wlh_t wlh, dispatch_kevent_t ke, int n, dispatch_kevent_t ke_out, int n_out, void *buf, size_t *avail, uint32_t flags) { - static dispatch_once_t pred; bool kq_initialized = false; int r = 0; - dispatch_once_f(&pred, &kq_initialized, _dispatch_kq_init); + dispatch_once_f(&_dispatch_kq_poll_pred, &kq_initialized, _dispatch_kq_init); if (unlikely(kq_initialized)) { // The calling thread was the one doing the initialization // @@ -594,7 +673,6 @@ _dispatch_kq_poll(dispatch_wlh_t wlh, dispatch_kevent_t ke, int n, _voucher_activity_debug_channel_init(); } - #if !DISPATCH_USE_KEVENT_QOS if (flags & KEVENT_FLAG_ERROR_EVENTS) { // emulate KEVENT_FLAG_ERROR_EVENTS @@ -606,8 +684,10 @@ _dispatch_kq_poll(dispatch_wlh_t wlh, dispatch_kevent_t ke, int n, #endif retry: - if (wlh == DISPATCH_WLH_ANON) { - int kqfd = _dispatch_kq; + if (unlikely(wlh == NULL)) { + DISPATCH_INTERNAL_CRASH(wlh, "Invalid wlh"); + } else if (wlh == DISPATCH_WLH_ANON) { + int kqfd = _dispatch_kq_fd(); #if DISPATCH_USE_KEVENT_QOS if (_dispatch_kevent_workqueue_enabled) { flags |= KEVENT_FLAG_WORKQ; @@ -706,13 +786,13 @@ _dispatch_kq_unote_set_kevent(dispatch_unote_t _du, dispatch_kevent_t dk, uint16_t action) { dispatch_unote_class_t du = _du._du; - dispatch_source_type_t dst = du->du_type; + dispatch_source_type_t dst = dux_type(du); uint16_t flags = dst->dst_flags | action; if ((flags & EV_VANISHED) && !(flags & EV_ADD)) { flags &= ~EV_VANISHED; } - pthread_priority_t pp = _dispatch_priority_to_pp(du->du_priority); + *dk = (dispatch_kevent_s){ .ident = du->du_ident, .filter = dst->dst_filter, @@ -721,7 +801,8 @@ _dispatch_kq_unote_set_kevent(dispatch_unote_t _du, dispatch_kevent_t dk, .fflags = du->du_fflags | dst->dst_fflags, .data = (typeof(dk->data))dst->dst_data, #if DISPATCH_USE_KEVENT_QOS - .qos = (typeof(dk->qos))pp, + .qos = (typeof(dk->qos))_dispatch_priority_to_pp_prefer_fallback( + du->du_priority), #endif }; } @@ -779,7 +860,7 @@ _dispatch_kq_deferred_update(dispatch_wlh_t wlh, dispatch_kevent_t ke) { dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); - if (ddi && ddi->ddi_maxevents && wlh == _dispatch_get_wlh()) { + if (ddi && ddi->ddi_wlh == wlh && ddi->ddi_maxevents) { int slot = _dispatch_kq_deferred_find_slot(ddi, ke->filter, ke->ident, ke->udata); dispatch_kevent_t dk = _dispatch_kq_deferred_reuse_slot(wlh, ddi, slot); @@ -797,7 +878,7 @@ static int _dispatch_kq_immediate_update(dispatch_wlh_t wlh, dispatch_kevent_t ke) { dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); - if (ddi && wlh == _dispatch_get_wlh()) { + if (ddi && ddi->ddi_wlh == wlh) { int slot = _dispatch_kq_deferred_find_slot(ddi, ke->filter, ke->ident, ke->udata); _dispatch_kq_deferred_discard_slot(ddi, slot); @@ -817,13 +898,12 @@ _dispatch_kq_unote_update(dispatch_wlh_t wlh, dispatch_unote_t _du, if (action_flags & EV_ADD) { // as soon as we register we may get an event delivery and it has to - // see du_wlh already set, else it will not unregister the kevent - dispatch_assert(du->du_wlh == NULL); + // see du_state already set, else it will not unregister the kevent _dispatch_wlh_retain(wlh); - du->du_wlh = wlh; + _dispatch_unote_state_set(du, wlh, DU_STATE_ARMED); } - if (ddi && wlh == _dispatch_get_wlh()) { + if (ddi && ddi->ddi_wlh == wlh) { int slot = _dispatch_kq_deferred_find_slot(ddi, du->du_filter, du->du_ident, (uintptr_t)du); if (slot < ddi->ddi_nevents) { @@ -853,18 +933,24 @@ _dispatch_kq_unote_update(dispatch_wlh_t wlh, dispatch_unote_t _du, done: if (action_flags & EV_ADD) { if (unlikely(r)) { - _dispatch_wlh_release(du->du_wlh); - du->du_wlh = NULL; + _dispatch_wlh_release(wlh); + _dispatch_unote_state_set(du, DU_STATE_UNREGISTERED); + } else { + _dispatch_du_debug("installed", du); } return r == 0; } if (action_flags & EV_DELETE) { if (r == EINPROGRESS) { + _dispatch_du_debug("deferred delete", du); return false; } - _dispatch_wlh_release(du->du_wlh); - du->du_wlh = NULL; + _dispatch_wlh_release(wlh); + _dispatch_unote_state_set(du, DU_STATE_UNREGISTERED); + _dispatch_du_debug("deleted", du); + } else if (action_flags & EV_ENABLE) { + _dispatch_du_debug("rearmed", du); } dispatch_assume_zero(r); @@ -873,15 +959,6 @@ _dispatch_kq_unote_update(dispatch_wlh_t wlh, dispatch_unote_t _du, #pragma mark dispatch_muxnote_t -static void -_dispatch_muxnotes_init(void *ctxt DISPATCH_UNUSED) -{ - uint32_t i; - for (i = 0; i < DSL_HASH_SIZE; i++) { - TAILQ_INIT(&_dispatch_sources[i]); - } -} - DISPATCH_ALWAYS_INLINE static inline struct dispatch_muxnote_bucket_s * _dispatch_muxnote_bucket(uint64_t ident, int16_t filter) @@ -899,7 +976,6 @@ _dispatch_muxnote_bucket(uint64_t ident, int16_t filter) break; } - dispatch_once_f(&_dispatch_muxnotes.pred, NULL, _dispatch_muxnotes_init); return &_dispatch_sources[DSL_HASH((uintptr_t)ident)]; } #define _dispatch_unote_muxnote_bucket(du) \ @@ -908,21 +984,16 @@ _dispatch_muxnote_bucket(uint64_t ident, int16_t filter) DISPATCH_ALWAYS_INLINE static inline dispatch_muxnote_t _dispatch_muxnote_find(struct dispatch_muxnote_bucket_s *dmb, - dispatch_wlh_t wlh, uint64_t ident, int16_t filter) + uint64_t ident, int16_t filter) { dispatch_muxnote_t dmn; - _dispatch_muxnotes_lock(); - TAILQ_FOREACH(dmn, dmb, dmn_list) { - if (dmn->dmn_wlh == wlh && dmn->dmn_kev.ident == ident && - dmn->dmn_kev.filter == filter) { + LIST_FOREACH(dmn, dmb, dmn_list) { + if (dmn->dmn_kev.ident == ident && dmn->dmn_kev.filter == filter) { break; } } - _dispatch_muxnotes_unlock(); return dmn; } -#define _dispatch_unote_muxnote_find(dmb, du, wlh) \ - _dispatch_muxnote_find(dmb, wlh, du._du->du_ident, du._du->du_filter) DISPATCH_ALWAYS_INLINE static inline dispatch_muxnote_t @@ -930,50 +1001,45 @@ _dispatch_mach_muxnote_find(mach_port_t name, int16_t filter) { struct dispatch_muxnote_bucket_s *dmb; dmb = _dispatch_muxnote_bucket(name, filter); - return _dispatch_muxnote_find(dmb, DISPATCH_WLH_ANON, name, filter); + return _dispatch_muxnote_find(dmb, name, filter); } -DISPATCH_NOINLINE -static bool -_dispatch_unote_register_muxed(dispatch_unote_t du, dispatch_wlh_t wlh) +bool +_dispatch_unote_register_muxed(dispatch_unote_t du) { struct dispatch_muxnote_bucket_s *dmb = _dispatch_unote_muxnote_bucket(du); dispatch_muxnote_t dmn; bool installed = true; - dmn = _dispatch_unote_muxnote_find(dmb, du, wlh); + dmn = _dispatch_muxnote_find(dmb, du._du->du_ident, du._du->du_filter); if (dmn) { uint32_t flags = du._du->du_fflags & ~dmn->dmn_kev.fflags; if (flags) { dmn->dmn_kev.fflags |= flags; - if (unlikely(du._du->du_type->dst_update_mux)) { - installed = du._du->du_type->dst_update_mux(dmn); + if (unlikely(dux_type(du._du)->dst_update_mux)) { + installed = dux_type(du._du)->dst_update_mux(dmn); } else { - installed = !_dispatch_kq_immediate_update(dmn->dmn_wlh, + installed = !_dispatch_kq_immediate_update(DISPATCH_WLH_ANON, &dmn->dmn_kev); } if (!installed) dmn->dmn_kev.fflags &= ~flags; } } else { dmn = _dispatch_calloc(1, sizeof(struct dispatch_muxnote_s)); - TAILQ_INIT(&dmn->dmn_unotes_head); _dispatch_kq_unote_set_kevent(du, &dmn->dmn_kev, EV_ADD | EV_ENABLE); #if DISPATCH_USE_KEVENT_QOS dmn->dmn_kev.qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; #endif dmn->dmn_kev.udata = (uintptr_t)dmn | DISPATCH_KEVENT_MUXED_MARKER; - dmn->dmn_wlh = wlh; - if (unlikely(du._du->du_type->dst_update_mux)) { - installed = du._du->du_type->dst_update_mux(dmn); + if (unlikely(dux_type(du._du)->dst_update_mux)) { + installed = dux_type(du._du)->dst_update_mux(dmn); } else { - installed = !_dispatch_kq_immediate_update(dmn->dmn_wlh, + installed = !_dispatch_kq_immediate_update(DISPATCH_WLH_ANON, &dmn->dmn_kev); } if (installed) { dmn->dmn_kev.flags &= ~(EV_ADD | EV_VANISHED); - _dispatch_muxnotes_lock(); - TAILQ_INSERT_TAIL(dmb, dmn, dmn_list); - _dispatch_muxnotes_unlock(); + LIST_INSERT_HEAD(dmb, dmn, dmn_list); } else { free(dmn); } @@ -981,58 +1047,34 @@ _dispatch_unote_register_muxed(dispatch_unote_t du, dispatch_wlh_t wlh) if (installed) { dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du); - TAILQ_INSERT_TAIL(&dmn->dmn_unotes_head, dul, du_link); - dul->du_muxnote = dmn; - + LIST_INSERT_HEAD(&dmn->dmn_unotes_head, dul, du_link); if (du._du->du_filter == DISPATCH_EVFILT_MACH_NOTIFICATION) { - bool armed = DISPATCH_MACH_NOTIFICATION_ARMED(&dmn->dmn_kev); - os_atomic_store2o(du._dmsr, dmsr_notification_armed, armed,relaxed); + os_atomic_store2o(du._dmsr, dmsr_notification_armed, + DISPATCH_MACH_NOTIFICATION_ARMED(dmn), relaxed); } - du._du->du_wlh = DISPATCH_WLH_ANON; + dul->du_muxnote = dmn; + _dispatch_unote_state_set(du, DISPATCH_WLH_ANON, DU_STATE_ARMED); + _dispatch_du_debug("installed", du._du); } return installed; } -bool -_dispatch_unote_register(dispatch_unote_t du, dispatch_wlh_t wlh, - dispatch_priority_t pri) -{ - dispatch_assert(!_dispatch_unote_registered(du)); - du._du->du_priority = pri; - switch (du._du->du_filter) { - case DISPATCH_EVFILT_CUSTOM_ADD: - case DISPATCH_EVFILT_CUSTOM_OR: - case DISPATCH_EVFILT_CUSTOM_REPLACE: - du._du->du_wlh = DISPATCH_WLH_ANON; - return true; - } - if (!du._du->du_is_direct) { - return _dispatch_unote_register_muxed(du, DISPATCH_WLH_ANON); - } - return _dispatch_kq_unote_update(wlh, du, EV_ADD | EV_ENABLE); -} - void -_dispatch_unote_resume(dispatch_unote_t du) +_dispatch_unote_resume_muxed(dispatch_unote_t du) { - dispatch_assert(_dispatch_unote_registered(du)); - - if (du._du->du_is_direct) { - dispatch_wlh_t wlh = du._du->du_wlh; - _dispatch_kq_unote_update(wlh, du, EV_ENABLE); - } else if (unlikely(du._du->du_type->dst_update_mux)) { + _dispatch_unote_state_set_bit(du, DU_STATE_ARMED); + if (unlikely(dux_type(du._du)->dst_update_mux)) { dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du); - du._du->du_type->dst_update_mux(dul->du_muxnote); + dux_type(du._du)->dst_update_mux(dul->du_muxnote); } else { dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du); dispatch_muxnote_t dmn = dul->du_muxnote; - _dispatch_kq_deferred_update(dmn->dmn_wlh, &dmn->dmn_kev); + _dispatch_kq_deferred_update(DISPATCH_WLH_ANON, &dmn->dmn_kev); } } -DISPATCH_NOINLINE -static bool -_dispatch_unote_unregister_muxed(dispatch_unote_t du, uint32_t flags) +bool +_dispatch_unote_unregister_muxed(dispatch_unote_t du) { dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du); dispatch_muxnote_t dmn = dul->du_muxnote; @@ -1041,18 +1083,18 @@ _dispatch_unote_unregister_muxed(dispatch_unote_t du, uint32_t flags) if (dmn->dmn_kev.filter == DISPATCH_EVFILT_MACH_NOTIFICATION) { os_atomic_store2o(du._dmsr, dmsr_notification_armed, false, relaxed); } - dispatch_assert(du._du->du_wlh == DISPATCH_WLH_ANON); - du._du->du_wlh = NULL; - TAILQ_REMOVE(&dmn->dmn_unotes_head, dul, du_link); - _TAILQ_TRASH_ENTRY(dul, du_link); + _dispatch_unote_state_set(du, DU_STATE_UNREGISTERED); + LIST_REMOVE(dul, du_link); + _LIST_TRASH_ENTRY(dul, du_link); dul->du_muxnote = NULL; - if (TAILQ_EMPTY(&dmn->dmn_unotes_head)) { + if (LIST_EMPTY(&dmn->dmn_unotes_head)) { + dispose = true; + update = !(dmn->dmn_kev.flags & EV_DELETE); dmn->dmn_kev.flags |= EV_DELETE; - update = dispose = true; } else { - uint32_t fflags = du._du->du_type->dst_fflags; - TAILQ_FOREACH(dul, &dmn->dmn_unotes_head, du_link) { + uint32_t fflags = dux_type(du._du)->dst_fflags; + LIST_FOREACH(dul, &dmn->dmn_unotes_head, du_link) { du = _dispatch_unote_linkage_get_unote(dul); fflags |= du._du->du_fflags; } @@ -1061,55 +1103,108 @@ _dispatch_unote_unregister_muxed(dispatch_unote_t du, uint32_t flags) update = true; } } - if (update && !(flags & DU_UNREGISTER_ALREADY_DELETED)) { - if (unlikely(du._du->du_type->dst_update_mux)) { - dispatch_assume(du._du->du_type->dst_update_mux(dmn)); + if (update) { + if (unlikely(dux_type(du._du)->dst_update_mux)) { + dispatch_assume(dux_type(du._du)->dst_update_mux(dmn)); } else { - _dispatch_kq_deferred_update(dmn->dmn_wlh, &dmn->dmn_kev); + _dispatch_kq_deferred_update(DISPATCH_WLH_ANON, &dmn->dmn_kev); } } if (dispose) { - struct dispatch_muxnote_bucket_s *dmb; - dmb = _dispatch_muxnote_bucket(dmn->dmn_kev.ident, dmn->dmn_kev.filter); - _dispatch_muxnotes_lock(); - TAILQ_REMOVE(dmb, dmn, dmn_list); - _dispatch_muxnotes_unlock(); + LIST_REMOVE(dmn, dmn_list); free(dmn); } + _dispatch_du_debug("deleted", du._du); return true; } +#if DISPATCH_HAVE_DIRECT_KNOTES bool -_dispatch_unote_unregister(dispatch_unote_t du, uint32_t flags) -{ - switch (du._du->du_filter) { - case DISPATCH_EVFILT_CUSTOM_ADD: - case DISPATCH_EVFILT_CUSTOM_OR: - case DISPATCH_EVFILT_CUSTOM_REPLACE: - du._du->du_wlh = NULL; - return true; - } - dispatch_wlh_t wlh = du._du->du_wlh; - if (wlh) { - if (!du._du->du_is_direct) { - return _dispatch_unote_unregister_muxed(du, flags); +_dispatch_unote_register_direct(dispatch_unote_t du, dispatch_wlh_t wlh) +{ + return _dispatch_kq_unote_update(wlh, du, EV_ADD | EV_ENABLE); +} + +void +_dispatch_unote_resume_direct(dispatch_unote_t du) +{ + _dispatch_unote_state_set_bit(du, DU_STATE_ARMED); + _dispatch_kq_unote_update(_dispatch_unote_wlh(du), du, EV_ENABLE); +} + +bool +_dispatch_unote_unregister_direct(dispatch_unote_t du, uint32_t flags) +{ + dispatch_unote_state_t du_state = _dispatch_unote_state(du); + dispatch_wlh_t du_wlh = _du_state_wlh(du_state); + dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); + uint16_t action = EV_DELETE; + if (likely(du_wlh != DISPATCH_WLH_ANON && ddi && ddi->ddi_wlh == du_wlh)) { + action |= EV_ENABLE; + flags |= DUU_DELETE_ACK | DUU_MUST_SUCCEED; + } + + if (!_du_state_needs_delete(du_state) || (flags & DUU_DELETE_ACK)) { + if (du_state == DU_STATE_NEEDS_DELETE) { + // There is no knote to unregister anymore, just do it. + _dispatch_unote_state_set(du, DU_STATE_UNREGISTERED); + _dispatch_du_debug("acknowledged deleted oneshot", du._du); + return true; } - uint16_t action_flags; - if (flags & DU_UNREGISTER_ALREADY_DELETED) { - action_flags = 0; - } else if (flags & DU_UNREGISTER_IMMEDIATE_DELETE) { - action_flags = EV_DELETE | EV_ENABLE; - } else { - action_flags = EV_DELETE; + if (!_du_state_armed(du_state)) { + action |= EV_ENABLE; + flags |= DUU_MUST_SUCCEED; + } + if ((action & EV_ENABLE) || (flags & DUU_PROBE)) { + if (_dispatch_kq_unote_update(du_wlh, du, action)) { + return true; + } } - return _dispatch_kq_unote_update(wlh, du, action_flags); } - return true; + if (flags & DUU_MUST_SUCCEED) { + DISPATCH_INTERNAL_CRASH(0, "Unregistration failed"); + } + return false; } +#endif // DISPATCH_HAVE_DIRECT_KNOTES #pragma mark - #pragma mark dispatch_event_loop +enum { + DISPATCH_WORKLOOP_ASYNC, + DISPATCH_WORKLOOP_ASYNC_FROM_SYNC, + DISPATCH_WORKLOOP_ASYNC_DISCOVER_SYNC, + DISPATCH_WORKLOOP_ASYNC_QOS_UPDATE, + DISPATCH_WORKLOOP_ASYNC_LEAVE, + DISPATCH_WORKLOOP_ASYNC_LEAVE_FROM_SYNC, + DISPATCH_WORKLOOP_ASYNC_LEAVE_FROM_TRANSFER, + DISPATCH_WORKLOOP_ASYNC_FORCE_END_OWNERSHIP, + DISPATCH_WORKLOOP_RETARGET, + + DISPATCH_WORKLOOP_SYNC_WAIT, + DISPATCH_WORKLOOP_SYNC_WAKE, + DISPATCH_WORKLOOP_SYNC_FAKE, + DISPATCH_WORKLOOP_SYNC_END, +}; + +static char const * const _dispatch_workloop_actions[] = { + [DISPATCH_WORKLOOP_ASYNC] = "async", + [DISPATCH_WORKLOOP_ASYNC_FROM_SYNC] = "async (from sync)", + [DISPATCH_WORKLOOP_ASYNC_DISCOVER_SYNC] = "discover sync", + [DISPATCH_WORKLOOP_ASYNC_QOS_UPDATE] = "qos update", + [DISPATCH_WORKLOOP_ASYNC_LEAVE] = "leave", + [DISPATCH_WORKLOOP_ASYNC_LEAVE_FROM_SYNC] = "leave (from sync)", + [DISPATCH_WORKLOOP_ASYNC_LEAVE_FROM_TRANSFER] = "leave (from transfer)", + [DISPATCH_WORKLOOP_ASYNC_FORCE_END_OWNERSHIP] = "leave (forced)", + [DISPATCH_WORKLOOP_RETARGET] = "retarget", + + [DISPATCH_WORKLOOP_SYNC_WAIT] = "sync-wait", + [DISPATCH_WORKLOOP_SYNC_FAKE] = "sync-fake", + [DISPATCH_WORKLOOP_SYNC_WAKE] = "sync-wake", + [DISPATCH_WORKLOOP_SYNC_END] = "sync-end", +}; + void _dispatch_event_loop_atfork_child(void) { @@ -1142,8 +1237,8 @@ DISPATCH_NOINLINE void _dispatch_event_loop_drain(uint32_t flags) { - dispatch_wlh_t wlh = _dispatch_get_wlh(); dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); + dispatch_wlh_t wlh = ddi->ddi_wlh; int n; again: @@ -1151,6 +1246,7 @@ _dispatch_event_loop_drain(uint32_t flags) ddi->ddi_nevents = 0; _dispatch_kq_drain(wlh, ddi->ddi_eventlist, n, flags); + if ((flags & KEVENT_FLAG_IMMEDIATE) && !(flags & KEVENT_FLAG_ERROR_EVENTS) && _dispatch_needs_to_return_to_kernel()) { @@ -1162,40 +1258,45 @@ void _dispatch_event_loop_merge(dispatch_kevent_t events, int nevents) { dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); + dispatch_wlh_t wlh = ddi->ddi_wlh; dispatch_kevent_s kev[nevents]; // now we can re-use the whole event list, but we need to save one slot // for the event loop poke memcpy(kev, events, sizeof(kev)); - ddi->ddi_maxevents = DISPATCH_DEFERRED_ITEMS_EVENT_COUNT - 1; + ddi->ddi_maxevents = DISPATCH_DEFERRED_ITEMS_EVENT_COUNT - 2; for (int i = 0; i < nevents; i++) { _dispatch_kevent_drain(&kev[i]); } - dispatch_wlh_t wlh = _dispatch_get_wlh(); - if (wlh == DISPATCH_WLH_ANON && ddi->ddi_stashed_dou._do) { - if (ddi->ddi_nevents) { + if (wlh == DISPATCH_WLH_ANON) { + if (ddi->ddi_stashed_dou._do && ddi->ddi_nevents) { // We will drain the stashed item and not return to the kernel // right away. As a consequence, do not delay these updates. _dispatch_event_loop_drain(KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_ERROR_EVENTS); } - _dispatch_trace_continuation_push(ddi->ddi_stashed_rq, - ddi->ddi_stashed_dou); } } void -_dispatch_event_loop_leave_immediate(dispatch_wlh_t wlh, uint64_t dq_state) +_dispatch_event_loop_leave_immediate(uint64_t dq_state) { - (void)wlh; (void)dq_state; + (void)dq_state; } void -_dispatch_event_loop_leave_deferred(dispatch_wlh_t wlh, uint64_t dq_state) +_dispatch_event_loop_leave_deferred(dispatch_deferred_items_t ddi, + uint64_t dq_state) { - (void)wlh; (void)dq_state; + (void)ddi; (void)dq_state; +} + +void +_dispatch_event_loop_cancel_waiter(dispatch_sync_context_t dsc) +{ + (void)dsc; } void @@ -1208,6 +1309,10 @@ _dispatch_event_loop_wake_owner(dispatch_sync_context_t dsc, void _dispatch_event_loop_wait_for_ownership(dispatch_sync_context_t dsc) { + if (dsc->dsc_waiter_needs_cancel) { + _dispatch_event_loop_cancel_waiter(dsc); + dsc->dsc_waiter_needs_cancel = false; + } if (dsc->dsc_release_storage) { _dispatch_queue_release_storage(dsc->dc_data); } @@ -1235,72 +1340,74 @@ _dispatch_event_loop_assert_not_owned(dispatch_wlh_t wlh) DISPATCH_NOINLINE static void -_dispatch_kevent_timer_drain(dispatch_kevent_t ke) -{ - dispatch_assert(ke->data > 0); - dispatch_assert((ke->ident & DISPATCH_KEVENT_TIMEOUT_IDENT_MASK) == - DISPATCH_KEVENT_TIMEOUT_IDENT_MASK); - uint32_t tidx = ke->ident & ~DISPATCH_KEVENT_TIMEOUT_IDENT_MASK; - - dispatch_assert(tidx < DISPATCH_TIMER_COUNT); - _dispatch_timers_expired = true; - _dispatch_timers_processing_mask |= 1 << tidx; - _dispatch_timers_heap[tidx].dth_flags &= ~DTH_ARMED; -#if DISPATCH_USE_DTRACE - _dispatch_timers_will_wake |= 1 << DISPATCH_TIMER_QOS(tidx); -#endif -} - -DISPATCH_NOINLINE -static void -_dispatch_event_loop_timer_program(uint32_t tidx, +_dispatch_event_loop_timer_program(dispatch_timer_heap_t dth, uint32_t tidx, uint64_t target, uint64_t leeway, uint16_t action) { + dispatch_wlh_t wlh = _dispatch_get_wlh(); +#if DISPATCH_USE_KEVENT_QOS + pthread_priority_t pp = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; + if (wlh != DISPATCH_WLH_ANON) { + pp = _dispatch_qos_to_pp(dth[tidx].dth_max_qos); + } +#endif dispatch_kevent_s ke = { .ident = DISPATCH_KEVENT_TIMEOUT_IDENT_MASK | tidx, .filter = EVFILT_TIMER, .flags = action | EV_ONESHOT, .fflags = _dispatch_timer_index_to_fflags[tidx], .data = (int64_t)target, - .udata = (uintptr_t)&_dispatch_timers_heap[tidx], + .udata = (uintptr_t)dth, #if DISPATCH_HAVE_TIMER_COALESCING .ext[1] = leeway, #endif #if DISPATCH_USE_KEVENT_QOS - .qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG, + .qos = (typeof(ke.qos))pp, #endif }; - _dispatch_kq_deferred_update(DISPATCH_WLH_ANON, &ke); + _dispatch_kq_deferred_update(wlh, &ke); } void -_dispatch_event_loop_timer_arm(uint32_t tidx, dispatch_timer_delay_s range, - dispatch_clock_now_cache_t nows) +_dispatch_event_loop_timer_arm(dispatch_timer_heap_t dth, uint32_t tidx, + dispatch_timer_delay_s range, dispatch_clock_now_cache_t nows) { + dispatch_clock_t clock = DISPATCH_TIMER_CLOCK(tidx); + uint64_t target = range.delay + _dispatch_time_now_cached(clock, nows); if (unlikely(_dispatch_timers_force_max_leeway)) { - range.delay += range.leeway; + target += range.leeway; range.leeway = 0; } + + _dispatch_event_loop_timer_program(dth, tidx, target, range.leeway, + EV_ADD | EV_ENABLE); #if HAVE_MACH - if (DISPATCH_TIMER_CLOCK(tidx) == DISPATCH_CLOCK_WALL) { + if (clock == DISPATCH_CLOCK_WALL) { _dispatch_mach_host_calendar_change_register(); } #endif - - // EVFILT_TIMER NOTE_ABSOLUTE always expects - // a WALL deadline - uint64_t now = _dispatch_time_now_cached(DISPATCH_CLOCK_WALL, nows); - _dispatch_timers_heap[tidx].dth_flags |= DTH_ARMED; - _dispatch_event_loop_timer_program(tidx, now + range.delay, range.leeway, - EV_ADD | EV_ENABLE); } void -_dispatch_event_loop_timer_delete(uint32_t tidx) +_dispatch_event_loop_timer_delete(dispatch_timer_heap_t dth, uint32_t tidx) { - _dispatch_timers_heap[tidx].dth_flags &= ~DTH_ARMED; - _dispatch_event_loop_timer_program(tidx, 0, 0, EV_DELETE); + _dispatch_event_loop_timer_program(dth, tidx, 0, 0, EV_DELETE); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_kevent_timer_drain(dispatch_kevent_t ke) +{ + dispatch_timer_heap_t dth = (dispatch_timer_heap_t)ke->udata; + uint32_t tidx = ke->ident & ~DISPATCH_KEVENT_TIMEOUT_IDENT_MASK; + + dispatch_assert(ke->data > 0); + dispatch_assert(ke->ident == (tidx | DISPATCH_KEVENT_TIMEOUT_IDENT_MASK)); + dispatch_assert(tidx < DISPATCH_TIMER_COUNT); + + _dispatch_timers_heap_dirty(dth, tidx); + dth[tidx].dth_needs_program = true; + dth[tidx].dth_armed = false; } #pragma mark - @@ -1312,7 +1419,7 @@ _dispatch_source_proc_create(dispatch_source_type_t dst DISPATCH_UNUSED, { dispatch_unote_t du = _dispatch_unote_create_with_handle(dst, handle, mask); if (du._du && (mask & DISPATCH_PROC_EXIT_STATUS)) { - du._du->du_data_action = DISPATCH_UNOTE_ACTION_DATA_OR_STATUS_SET; + du._du->du_has_extended_status = true; } return du; } @@ -1330,7 +1437,9 @@ const dispatch_source_type_s _dispatch_source_type_proc = { |NOTE_REAP #endif , + .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, .dst_size = sizeof(struct dispatch_source_refs_s), + .dst_strict = false, .dst_create = _dispatch_source_proc_create, .dst_merge_evt = _dispatch_source_merge_evt, @@ -1349,7 +1458,9 @@ const dispatch_source_type_s _dispatch_source_type_vnode = { |NOTE_NONE #endif , + .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, .dst_size = sizeof(struct dispatch_source_refs_s), + .dst_strict = false, .dst_create = _dispatch_unote_create_with_fd, .dst_merge_evt = _dispatch_source_merge_evt, @@ -1377,7 +1488,9 @@ const dispatch_source_type_s _dispatch_source_type_vfs = { |VQ_DESIRED_DISK #endif , + .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, .dst_size = sizeof(struct dispatch_source_refs_s), + .dst_strict = false, .dst_create = _dispatch_unote_create_without_handle, .dst_merge_evt = _dispatch_source_merge_evt, @@ -1401,7 +1514,9 @@ const dispatch_source_type_s _dispatch_source_type_sock = { |NOTE_NOTIFY_ACK #endif , + .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, .dst_size = sizeof(struct dispatch_source_refs_s), + .dst_strict = false, .dst_create = _dispatch_unote_create_with_fd, .dst_merge_evt = _dispatch_source_merge_evt, @@ -1414,7 +1529,10 @@ const dispatch_source_type_s _dispatch_source_type_nw_channel = { .dst_filter = EVFILT_NW_CHANNEL, .dst_flags = DISPATCH_EV_DIRECT|EV_CLEAR|EV_VANISHED, .dst_mask = NOTE_FLOW_ADV_UPDATE, + .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, .dst_size = sizeof(struct dispatch_source_refs_s), + .dst_strict = false, + .dst_create = _dispatch_unote_create_with_fd, .dst_merge_evt = _dispatch_source_merge_evt, }; @@ -1477,7 +1595,7 @@ _dispatch_memorypressure_init(void) { dispatch_source_t ds = dispatch_source_create( DISPATCH_SOURCE_TYPE_MEMORYPRESSURE, 0, - DISPATCH_MEMORYPRESSURE_SOURCE_MASK, &_dispatch_mgr_q); + DISPATCH_MEMORYPRESSURE_SOURCE_MASK, _dispatch_mgr_q._as_dq); dispatch_set_context(ds, ds); dispatch_source_set_event_handler_f(ds, _dispatch_memorypressure_handler); dispatch_activate(ds); @@ -1529,7 +1647,9 @@ const dispatch_source_type_s _dispatch_source_type_memorypressure = { |NOTE_MEMORYSTATUS_LOW_SWAP|NOTE_MEMORYSTATUS_PROC_LIMIT_WARN |NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL |NOTE_MEMORYSTATUS_MSL_STATUS, + .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, .dst_size = sizeof(struct dispatch_source_refs_s), + .dst_strict = false, #if TARGET_OS_SIMULATOR .dst_create = _dispatch_source_memorypressure_create, @@ -1558,7 +1678,9 @@ const dispatch_source_type_s _dispatch_source_type_vm = { .dst_filter = EVFILT_MEMORYSTATUS, .dst_flags = EV_UDATA_SPECIFIC|EV_DISPATCH, .dst_mask = NOTE_VM_PRESSURE, + .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, .dst_size = sizeof(struct dispatch_source_refs_s), + .dst_strict = false, .dst_create = _dispatch_source_vm_create, // redirected to _dispatch_source_type_memorypressure @@ -1575,19 +1697,21 @@ const dispatch_source_type_s _dispatch_source_type_vm = { static void _dispatch_mach_host_notify_update(void *context); -static mach_port_t _dispatch_mach_notify_port; -static dispatch_source_t _dispatch_mach_notify_source; +DISPATCH_STATIC_GLOBAL(dispatch_once_t _dispatch_mach_notify_port_pred); +DISPATCH_STATIC_GLOBAL(dispatch_once_t _dispatch_mach_calendar_pred); +DISPATCH_STATIC_GLOBAL(mach_port_t _dispatch_mach_notify_port); static void _dispatch_timers_calendar_change(void) { - uint32_t qos; + dispatch_timer_heap_t dth = _dispatch_timers_heap; + uint32_t qos, tidx; // calendar change may have gone past the wallclock deadline - _dispatch_timers_expired = true; for (qos = 0; qos < DISPATCH_TIMER_QOS_COUNT; qos++) { - _dispatch_timers_processing_mask |= - 1 << DISPATCH_TIMER_INDEX(DISPATCH_CLOCK_WALL, qos); + tidx = DISPATCH_TIMER_INDEX(DISPATCH_CLOCK_WALL, qos); + _dispatch_timers_heap_dirty(dth, tidx); + dth[tidx].dth_needs_program = true; } } @@ -1609,7 +1733,10 @@ _dispatch_mach_msg_get_audit_trailer(mach_msg_header_t *hdr) DISPATCH_NOINLINE static void -_dispatch_mach_notify_source_invoke(mach_msg_header_t *hdr) +_dispatch_mach_notification_merge_msg(dispatch_unote_t du, uint32_t flags, + mach_msg_header_t *hdr, mach_msg_size_t msgsz DISPATCH_UNUSED, + pthread_priority_t msg_pp DISPATCH_UNUSED, + pthread_priority_t ovr_pp DISPATCH_UNUSED) { mig_reply_error_t reply; mach_msg_audit_trailer_t *tlr = NULL; @@ -1621,16 +1748,17 @@ _dispatch_mach_notify_source_invoke(mach_msg_header_t *hdr) if (!tlr) { DISPATCH_INTERNAL_CRASH(0, "message received without expected trailer"); } - if (hdr->msgh_id <= MACH_NOTIFY_LAST - && dispatch_assume_zero(tlr->msgh_audit.val[ + if (hdr->msgh_id <= MACH_NOTIFY_LAST && + dispatch_assume_zero(tlr->msgh_audit.val[ DISPATCH_MACH_AUDIT_TOKEN_PID])) { mach_msg_destroy(hdr); - return; + goto out; } + boolean_t success = libdispatch_internal_protocol_server(hdr, &reply.Head); if (!success && reply.RetCode == MIG_BAD_ID && (hdr->msgh_id == HOST_CALENDAR_SET_REPLYID || - hdr->msgh_id == HOST_CALENDAR_CHANGED_REPLYID)) { + hdr->msgh_id == HOST_CALENDAR_CHANGED_REPLYID)) { _dispatch_debug("calendar-change notification"); _dispatch_timers_calendar_change(); _dispatch_mach_host_notify_update(NULL); @@ -1643,39 +1771,38 @@ _dispatch_mach_notify_source_invoke(mach_msg_header_t *hdr) if (!success || (reply.RetCode && reply.RetCode != MIG_NO_REPLY)) { mach_msg_destroy(hdr); } + +out: + if (flags & DISPATCH_EV_MSG_NEEDS_FREE) { + free(hdr); + } + return _dispatch_unote_resume(du); } DISPATCH_NOINLINE static void _dispatch_mach_notify_port_init(void *context DISPATCH_UNUSED) { - kern_return_t kr; -#if HAVE_MACH_PORT_CONSTRUCT mach_port_options_t opts = { .flags = MPO_CONTEXT_AS_GUARD | MPO_STRICT }; -#if DISPATCH_SIZEOF_PTR == 8 - const mach_port_context_t guard = 0xfeed09071f1ca7edull; -#else - const mach_port_context_t guard = 0xff1ca7edull; -#endif + mach_port_context_t guard = (uintptr_t)&_dispatch_mach_notify_port; + kern_return_t kr; + kr = mach_port_construct(mach_task_self(), &opts, guard, &_dispatch_mach_notify_port); -#else - kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, - &_dispatch_mach_notify_port); -#endif - DISPATCH_VERIFY_MIG(kr); if (unlikely(kr)) { DISPATCH_CLIENT_CRASH(kr, "mach_port_construct() failed: cannot create receive right"); } - static const struct dispatch_continuation_s dc = { - .dc_func = (void*)_dispatch_mach_notify_source_invoke, - }; - _dispatch_mach_notify_source = _dispatch_source_create_mach_msg_direct_recv( - _dispatch_mach_notify_port, &dc); - dispatch_assert(_dispatch_mach_notify_source); - dispatch_activate(_dispatch_mach_notify_source); + dispatch_unote_t du = dux_create(&_dispatch_mach_type_notification, + _dispatch_mach_notify_port, 0); + + // make sure _dispatch_kevent_mach_msg_recv can call + // _dispatch_retain_unote_owner + du._du->du_owner_wref = _dispatch_ptr2wref(&_dispatch_mgr_q); + + dispatch_assume(_dispatch_unote_register(du, DISPATCH_WLH_ANON, + DISPATCH_PRIORITY_FLAG_MANAGER)); } static void @@ -1711,26 +1838,19 @@ DISPATCH_ALWAYS_INLINE static inline mach_port_t _dispatch_get_mach_notify_port(void) { - static dispatch_once_t pred; - dispatch_once_f(&pred, NULL, _dispatch_mach_notify_port_init); + dispatch_once_f(&_dispatch_mach_notify_port_pred, NULL, + _dispatch_mach_notify_port_init); return _dispatch_mach_notify_port; } static void _dispatch_mach_host_notify_update(void *context DISPATCH_UNUSED) { - static int notify_type = HOST_NOTIFY_CALENDAR_SET; kern_return_t kr; _dispatch_debug("registering for calendar-change notification"); -retry: + kr = host_request_notification(_dispatch_get_mach_host_port(), - notify_type, _dispatch_get_mach_notify_port()); - // Fallback when missing support for newer _SET variant, fires strictly more - if (kr == KERN_INVALID_ARGUMENT && - notify_type != HOST_NOTIFY_CALENDAR_CHANGE) { - notify_type = HOST_NOTIFY_CALENDAR_CHANGE; - goto retry; - } + HOST_NOTIFY_CALENDAR_SET, _dispatch_get_mach_notify_port()); DISPATCH_VERIFY_MIG(kr); (void)dispatch_assume_zero(kr); } @@ -1739,8 +1859,8 @@ DISPATCH_ALWAYS_INLINE static inline void _dispatch_mach_host_calendar_change_register(void) { - static dispatch_once_t pred; - dispatch_once_f(&pred, NULL, _dispatch_mach_host_notify_update); + dispatch_once_f(&_dispatch_mach_calendar_pred, NULL, + _dispatch_mach_host_notify_update); } static kern_return_t @@ -1848,6 +1968,7 @@ _dispatch_mach_notify_merge(mach_port_t name, uint32_t data, bool final) { dispatch_unote_linkage_t dul, dul_next; dispatch_muxnote_t dmn; + uint32_t flags = EV_ENABLE; _dispatch_debug_machport(name); dmn = _dispatch_mach_muxnote_find(name, DISPATCH_EVFILT_MACH_NOTIFICATION); @@ -1856,22 +1977,30 @@ _dispatch_mach_notify_merge(mach_port_t name, uint32_t data, bool final) } dmn->dmn_kev.data &= ~_DISPATCH_MACH_SP_FLAGS; - if (!final) { - // Re-register for notification before delivery - final = !_dispatch_kevent_mach_notify_resume(dmn, data, 0); + if (final || !_dispatch_kevent_mach_notify_resume(dmn, data, 0)) { + flags = EV_ONESHOT; + dmn->dmn_kev.flags |= EV_DELETE; } + os_atomic_store(&DISPATCH_MACH_NOTIFICATION_ARMED(dmn), 0, relaxed); - uint32_t flags = final ? EV_ONESHOT : EV_ENABLE; - DISPATCH_MACH_NOTIFICATION_ARMED(&dmn->dmn_kev) = 0; - TAILQ_FOREACH_SAFE(dul, &dmn->dmn_unotes_head, du_link, dul_next) { - dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul); - os_atomic_store2o(du._dmsr, dmsr_notification_armed, false, relaxed); - dux_merge_evt(du._du, flags, (data & du._du->du_fflags), 0, 0); - if (!dul_next || DISPATCH_MACH_NOTIFICATION_ARMED(&dmn->dmn_kev)) { - // current merge is last in list (dmn might have been freed) - // or it re-armed the notification + LIST_FOREACH_SAFE(dul, &dmn->dmn_unotes_head, du_link, dul_next) { + if (os_atomic_load(&DISPATCH_MACH_NOTIFICATION_ARMED(dmn), relaxed)) { + dispatch_assert(!final); break; } + dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul); + uint32_t fflags = (data & du._du->du_fflags); + os_atomic_store2o(du._du, dmsr_notification_armed, 0, relaxed); + if (final || fflags) { + // consumed by dux_merge_evt() + _dispatch_retain_unote_owner(du); + if (final) _dispatch_unote_unregister_muxed(du); + if (fflags && dux_type(du._du)->dst_action == + DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS) { + os_atomic_or2o(du._dr, ds_pending_data, fflags, relaxed); + } + dux_merge_evt(du._du, flags, fflags, 0); + } } } @@ -1921,22 +2050,20 @@ _dispatch_mach_notification_set_armed(dispatch_mach_send_refs_t dmsr) { dispatch_muxnote_t dmn = _dispatch_unote_get_linkage(dmsr)->du_muxnote; dispatch_unote_linkage_t dul; - dispatch_unote_t du; - - if (!_dispatch_unote_registered(dmsr)) { - return; - } - - DISPATCH_MACH_NOTIFICATION_ARMED(&dmn->dmn_kev) = true; - TAILQ_FOREACH(dul, &dmn->dmn_unotes_head, du_link) { - du = _dispatch_unote_linkage_get_unote(dul); - os_atomic_store2o(du._dmsr, dmsr_notification_armed, true, relaxed); + if (dmn) { + os_atomic_store(&DISPATCH_MACH_NOTIFICATION_ARMED(dmn), 1, relaxed); + LIST_FOREACH(dul, &dmn->dmn_unotes_head, du_link) { + dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul); + os_atomic_store2o(du._du, dmsr_notification_armed, 1, relaxed); + } + _dispatch_debug("machport[0x%08x]: send-possible notification armed", + (mach_port_name_t)dmn->dmn_kev.ident); } } static dispatch_unote_t _dispatch_source_mach_send_create(dispatch_source_type_t dst, - uintptr_t handle, unsigned long mask) + uintptr_t handle, unsigned long mask) { if (!mask) { // Preserve legacy behavior that (mask == 0) => DISPATCH_MACH_SEND_DEAD @@ -1963,7 +2090,9 @@ const dispatch_source_type_s _dispatch_source_type_mach_send = { .dst_filter = DISPATCH_EVFILT_MACH_NOTIFICATION, .dst_flags = EV_CLEAR, .dst_mask = DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_POSSIBLE, + .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, .dst_size = sizeof(struct dispatch_source_refs_s), + .dst_strict = false, .dst_create = _dispatch_source_mach_send_create, .dst_update_mux = _dispatch_mach_send_update, @@ -1979,7 +2108,7 @@ _dispatch_mach_send_create(dispatch_source_type_t dst, _dispatch_unote_create_without_handle(dst, handle, mask); if (du._dmsr) { du._dmsr->dmsr_disconnect_cnt = DISPATCH_MACH_NEVER_CONNECTED; - TAILQ_INIT(&du._dmsr->dmsr_replies); + LIST_INIT(&du._dmsr->dmsr_replies); } return du; } @@ -1989,11 +2118,13 @@ const dispatch_source_type_s _dispatch_mach_type_send = { .dst_filter = DISPATCH_EVFILT_MACH_NOTIFICATION, .dst_flags = EV_CLEAR, .dst_mask = DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_POSSIBLE, + .dst_action = DISPATCH_UNOTE_ACTION_PASS_FFLAGS, .dst_size = sizeof(struct dispatch_mach_send_refs_s), + .dst_strict = false, .dst_create = _dispatch_mach_send_create, .dst_update_mux = _dispatch_mach_send_update, - .dst_merge_evt = _dispatch_mach_merge_notification, + .dst_merge_evt = _dispatch_mach_notification_merge_evt, }; #endif // HAVE_MACH @@ -2002,31 +2133,25 @@ const dispatch_source_type_s _dispatch_mach_type_send = { static void _dispatch_kevent_mach_msg_recv(dispatch_unote_t du, uint32_t flags, - mach_msg_header_t *hdr) + mach_msg_header_t *hdr, pthread_priority_t msg_pp, + pthread_priority_t ovr_pp) { - mach_msg_size_t siz = hdr->msgh_size + DISPATCH_MACH_TRAILER_SIZE; mach_port_t name = hdr->msgh_local_port; + mach_msg_size_t siz; - if (!dispatch_assume(hdr->msgh_size <= UINT_MAX - - DISPATCH_MACH_TRAILER_SIZE)) { - _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: " - "received overlarge message"); - } else if (!dispatch_assume(name)) { - _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: " - "received message with MACH_PORT_NULL port"); - } else { - _dispatch_debug_machport(name); - if (likely(du._du)) { - return dux_merge_msg(du._du, flags, hdr, siz); - } - _dispatch_bug_client("_dispatch_kevent_mach_msg_recv: " - "received message with no listeners"); + if (os_add_overflow(hdr->msgh_size, DISPATCH_MACH_TRAILER_SIZE, &siz)) { + DISPATCH_CLIENT_CRASH(hdr->msgh_size, "Overlarge message received"); } - - mach_msg_destroy(hdr); - if (flags & DISPATCH_EV_MSG_NEEDS_FREE) { - free(hdr); + if (os_unlikely(name == MACH_PORT_NULL)) { + DISPATCH_CLIENT_CRASH(hdr->msgh_id, "Received message with " + "MACH_PORT_NULL msgh_local_port"); } + + _dispatch_debug_machport(name); + // consumed by dux_merge_evt() + _dispatch_retain_unote_owner(du); + _dispatch_kevent_merge_ev_flags(du, flags); + return dux_merge_msg(du._du, flags, hdr, siz, msg_pp, ovr_pp); } DISPATCH_NOINLINE @@ -2034,53 +2159,50 @@ static void _dispatch_kevent_mach_msg_drain(dispatch_kevent_t ke) { mach_msg_header_t *hdr = _dispatch_kevent_mach_msg_buf(ke); + dispatch_unote_t du = _dispatch_kevent_get_unote(ke); + pthread_priority_t msg_pp = (pthread_priority_t)(ke->ext[2] >> 32); + pthread_priority_t ovr_pp = (pthread_priority_t)ke->qos; + uint32_t flags = ke->flags; mach_msg_size_t siz; mach_msg_return_t kr = (mach_msg_return_t)ke->fflags; - uint32_t flags = ke->flags; - dispatch_unote_t du = _dispatch_kevent_get_unote(ke); if (unlikely(!hdr)) { DISPATCH_INTERNAL_CRASH(kr, "EVFILT_MACHPORT with no message"); } if (likely(!kr)) { - _dispatch_kevent_mach_msg_recv(du, flags, hdr); - goto out; - } else if (kr != MACH_RCV_TOO_LARGE) { + return _dispatch_kevent_mach_msg_recv(du, flags, hdr, msg_pp, ovr_pp); + } + if (kr != MACH_RCV_TOO_LARGE) { goto out; - } else if (!ke->data) { + } + + if (!ke->data) { DISPATCH_INTERNAL_CRASH(0, "MACH_RCV_LARGE_IDENTITY with no identity"); } if (unlikely(ke->ext[1] > (UINT_MAX - DISPATCH_MACH_TRAILER_SIZE))) { DISPATCH_INTERNAL_CRASH(ke->ext[1], "EVFILT_MACHPORT with overlarge message"); } - siz = _dispatch_kevent_mach_msg_size(ke) + DISPATCH_MACH_TRAILER_SIZE; - hdr = malloc(siz); - if (dispatch_assume(hdr)) { - flags |= DISPATCH_EV_MSG_NEEDS_FREE; - } else { - // Kernel will discard message too large to fit - hdr = NULL; - siz = 0; - } - mach_port_t name = (mach_port_name_t)ke->data; const mach_msg_option_t options = ((DISPATCH_MACH_RCV_OPTIONS | MACH_RCV_TIMEOUT) & ~MACH_RCV_LARGE); - kr = mach_msg(hdr, options, 0, siz, name, MACH_MSG_TIMEOUT_NONE, - MACH_PORT_NULL); + siz = _dispatch_kevent_mach_msg_size(ke) + DISPATCH_MACH_TRAILER_SIZE; + hdr = malloc(siz); // mach_msg will return TOO_LARGE if hdr/siz is NULL/0 + kr = mach_msg(hdr, options, 0, dispatch_assume(hdr) ? siz : 0, + (mach_port_name_t)ke->data, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); if (likely(!kr)) { - _dispatch_kevent_mach_msg_recv(du, flags, hdr); - goto out; - } else if (kr == MACH_RCV_TOO_LARGE) { + flags |= DISPATCH_EV_MSG_NEEDS_FREE; + return _dispatch_kevent_mach_msg_recv(du, flags, hdr, msg_pp, ovr_pp); + } + + if (kr == MACH_RCV_TOO_LARGE) { _dispatch_log("BUG in libdispatch client: " "_dispatch_kevent_mach_msg_drain: dropped message too " "large to fit in memory: id = 0x%x, size = %u", hdr->msgh_id, _dispatch_kevent_mach_msg_size(ke)); kr = MACH_MSG_SUCCESS; } - if (flags & DISPATCH_EV_MSG_NEEDS_FREE) { - free(hdr); - } + free(hdr); + out: if (unlikely(kr)) { _dispatch_bug_mach_client("_dispatch_kevent_mach_msg_drain: " @@ -2093,7 +2215,9 @@ const dispatch_source_type_s _dispatch_source_type_mach_recv = { .dst_filter = EVFILT_MACHPORT, .dst_flags = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_VANISHED, .dst_fflags = 0, + .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, .dst_size = sizeof(struct dispatch_source_refs_s), + .dst_strict = false, .dst_create = _dispatch_unote_create_with_handle, .dst_merge_evt = _dispatch_source_merge_evt, @@ -2103,66 +2227,51 @@ const dispatch_source_type_s _dispatch_source_type_mach_recv = { }; static void -_dispatch_source_mach_recv_direct_merge_msg(dispatch_unote_t du, uint32_t flags, - mach_msg_header_t *msg, mach_msg_size_t msgsz DISPATCH_UNUSED) +_dispatch_mach_notification_event(dispatch_unote_t du, uint32_t flags DISPATCH_UNUSED, + uintptr_t data DISPATCH_UNUSED, pthread_priority_t pp DISPATCH_UNUSED) { - dispatch_continuation_t dc = du._dr->ds_handler[DS_EVENT_HANDLER]; - dispatch_source_t ds = _dispatch_source_from_refs(du._dr); - dispatch_queue_t cq = _dispatch_queue_get_current(); - - // see firehose_client_push_notify_async - _dispatch_queue_set_current(ds->_as_dq); - dc->dc_func(msg); - _dispatch_queue_set_current(cq); - if (flags & DISPATCH_EV_MSG_NEEDS_FREE) { - free(msg); - } - if ((ds->dq_atomic_flags & DSF_CANCELED) || - (flags & (EV_ONESHOT | EV_DELETE))) { - return _dispatch_source_merge_evt(du, flags, 0, 0, 0); - } - if (_dispatch_unote_needs_rearm(du)) { - return _dispatch_unote_resume(du); - } + DISPATCH_CLIENT_CRASH(du._du->du_ident, "Unexpected non message event"); } -static void -_dispatch_mach_recv_direct_merge(dispatch_unote_t du, - uint32_t flags, uintptr_t data, - uintptr_t status DISPATCH_UNUSED, - pthread_priority_t pp) -{ - if (flags & EV_VANISHED) { - DISPATCH_CLIENT_CRASH(du._du->du_ident, - "Unexpected EV_VANISHED (do not destroy random mach ports)"); - } - return _dispatch_source_merge_evt(du, flags, data, 0, pp); -} - -const dispatch_source_type_s _dispatch_source_type_mach_recv_direct = { - .dst_kind = "direct mach_recv", +const dispatch_source_type_s _dispatch_mach_type_notification = { + .dst_kind = "mach_notification", .dst_filter = EVFILT_MACHPORT, .dst_flags = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_VANISHED, .dst_fflags = DISPATCH_MACH_RCV_OPTIONS, - .dst_size = sizeof(struct dispatch_source_refs_s), + .dst_action = DISPATCH_UNOTE_ACTION_PASS_FFLAGS, + .dst_size = sizeof(struct dispatch_unote_class_s), + .dst_strict = false, .dst_create = _dispatch_unote_create_with_handle, - .dst_merge_evt = _dispatch_mach_recv_direct_merge, - .dst_merge_msg = _dispatch_source_mach_recv_direct_merge_msg, + .dst_merge_evt = _dispatch_mach_notification_event, + .dst_merge_msg = _dispatch_mach_notification_merge_msg, .dst_per_trigger_qos = true, }; +static void +_dispatch_mach_recv_direct_merge_evt(dispatch_unote_t du, uint32_t flags, + uintptr_t data, pthread_priority_t pp) +{ + if (flags & EV_VANISHED) { + DISPATCH_CLIENT_CRASH(du._du->du_ident, + "Unexpected EV_VANISHED (do not destroy random mach ports)"); + } + return _dispatch_source_merge_evt(du, flags, data, pp); +} + const dispatch_source_type_s _dispatch_mach_type_recv = { .dst_kind = "mach_recv (channel)", .dst_filter = EVFILT_MACHPORT, .dst_flags = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_VANISHED, .dst_fflags = DISPATCH_MACH_RCV_OPTIONS, + .dst_action = DISPATCH_UNOTE_ACTION_PASS_FFLAGS, .dst_size = sizeof(struct dispatch_mach_recv_refs_s), + .dst_strict = false, - // without handle because the mach code will set the ident after connect + // without handle because the mach code will set the ident after connect .dst_create = _dispatch_unote_create_without_handle, - .dst_merge_evt = _dispatch_mach_recv_direct_merge, + .dst_merge_evt = _dispatch_mach_recv_direct_merge_evt, .dst_merge_msg = _dispatch_mach_merge_msg, .dst_per_trigger_qos = true, @@ -2172,7 +2281,6 @@ DISPATCH_NORETURN static void _dispatch_mach_reply_merge_evt(dispatch_unote_t du, uint32_t flags DISPATCH_UNUSED, uintptr_t data DISPATCH_UNUSED, - uintptr_t status DISPATCH_UNUSED, pthread_priority_t pp DISPATCH_UNUSED) { DISPATCH_INTERNAL_CRASH(du._du->du_ident, "Unexpected event"); @@ -2183,7 +2291,9 @@ const dispatch_source_type_s _dispatch_mach_type_reply = { .dst_filter = EVFILT_MACHPORT, .dst_flags = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_ONESHOT|EV_VANISHED, .dst_fflags = DISPATCH_MACH_RCV_OPTIONS, + .dst_action = DISPATCH_UNOTE_ACTION_PASS_FFLAGS, .dst_size = sizeof(struct dispatch_mach_reply_refs_s), + .dst_strict = false, .dst_create = _dispatch_unote_create_with_handle, .dst_merge_evt = _dispatch_mach_reply_merge_evt, @@ -2197,10 +2307,12 @@ const dispatch_source_type_s _dispatch_xpc_type_sigterm = { .dst_filter = EVFILT_SIGNAL, .dst_flags = DISPATCH_EV_DIRECT|EV_CLEAR|EV_ONESHOT, .dst_fflags = 0, + .dst_action = DISPATCH_UNOTE_ACTION_PASS_DATA, .dst_size = sizeof(struct dispatch_xpc_term_refs_s), + .dst_strict = false, .dst_create = _dispatch_unote_create_with_handle, - .dst_merge_evt = _dispatch_xpc_sigterm_merge, + .dst_merge_evt = _dispatch_xpc_sigterm_merge_evt, }; #endif // HAVE_MACH diff --git a/src/event/workqueue.c b/src/event/workqueue.c index 73362a58a..eabeb461e 100644 --- a/src/event/workqueue.c +++ b/src/event/workqueue.c @@ -48,7 +48,7 @@ */ typedef struct dispatch_workq_monitor_s { /* The dispatch_queue we are monitoring */ - dispatch_queue_t dq; + dispatch_queue_global_t dq; /* The observed number of runnable worker threads */ int32_t num_runnable; @@ -66,7 +66,7 @@ typedef struct dispatch_workq_monitor_s { int num_registered_tids; } dispatch_workq_monitor_s, *dispatch_workq_monitor_t; -static dispatch_workq_monitor_s _dispatch_workq_monitors[DISPATCH_QOS_MAX]; +static dispatch_workq_monitor_s _dispatch_workq_monitors[DISPATCH_QOS_NBUCKETS]; #pragma mark Implementation of the monitoring subsystem. @@ -77,13 +77,15 @@ static void _dispatch_workq_init_once(void *context DISPATCH_UNUSED); static dispatch_once_t _dispatch_workq_init_once_pred; void -_dispatch_workq_worker_register(dispatch_queue_t root_q, qos_class_t cls) +_dispatch_workq_worker_register(dispatch_queue_global_t root_q) { dispatch_once_f(&_dispatch_workq_init_once_pred, NULL, &_dispatch_workq_init_once); #if HAVE_DISPATCH_WORKQ_MONITORING - dispatch_qos_t qos = _dispatch_qos_from_qos_class(cls); - dispatch_workq_monitor_t mon = &_dispatch_workq_monitors[qos-1]; + dispatch_qos_t qos = _dispatch_priority_qos(root_q->dq_priority); + if (qos == 0) qos = DISPATCH_QOS_DEFAULT; + int bucket = DISPATCH_QOS_BUCKET(qos); + dispatch_workq_monitor_t mon = &_dispatch_workq_monitors[bucket]; dispatch_assert(mon->dq == root_q); dispatch_tid tid = _dispatch_tid_self(); _dispatch_unfair_lock_lock(&mon->registered_tid_lock); @@ -95,11 +97,13 @@ _dispatch_workq_worker_register(dispatch_queue_t root_q, qos_class_t cls) } void -_dispatch_workq_worker_unregister(dispatch_queue_t root_q, qos_class_t cls) +_dispatch_workq_worker_unregister(dispatch_queue_global_t root_q) { #if HAVE_DISPATCH_WORKQ_MONITORING - dispatch_qos_t qos = _dispatch_qos_from_qos_class(cls); - dispatch_workq_monitor_t mon = &_dispatch_workq_monitors[qos-1]; + dispatch_qos_t qos = _dispatch_priority_qos(root_q->dq_priority); + if (qos == 0) qos = DISPATCH_QOS_DEFAULT; + int bucket = DISPATCH_QOS_BUCKET(qos); + dispatch_workq_monitor_t mon = &_dispatch_workq_monitors[bucket]; dispatch_assert(mon->dq == root_q); dispatch_tid tid = _dispatch_tid_self(); _dispatch_unfair_lock_lock(&mon->registered_tid_lock); @@ -174,14 +178,18 @@ _dispatch_workq_count_runnable_workers(dispatch_workq_monitor_t mon) #error must define _dispatch_workq_count_runnable_workers #endif +#define foreach_qos_bucket_reverse(name) \ + for (name = DISPATCH_QOS_BUCKET(DISPATCH_QOS_MAX); \ + name >= DISPATCH_QOS_BUCKET(DISPATCH_QOS_MAINTENANCE); name--) + static void _dispatch_workq_monitor_pools(void *context DISPATCH_UNUSED) { int global_soft_max = WORKQ_OVERSUBSCRIBE_FACTOR * (int)dispatch_hw_config(active_cpus); - int global_runnable = 0; - for (dispatch_qos_t i = DISPATCH_QOS_MAX; i > DISPATCH_QOS_UNSPECIFIED; i--) { - dispatch_workq_monitor_t mon = &_dispatch_workq_monitors[i-1]; - dispatch_queue_t dq = mon->dq; + int global_runnable = 0, i; + foreach_qos_bucket_reverse(i) { + dispatch_workq_monitor_t mon = &_dispatch_workq_monitors[i]; + dispatch_queue_global_t dq = mon->dq; if (!_dispatch_queue_class_probe(dq)) { _dispatch_debug("workq: %s is empty.", dq->dq_label); @@ -226,9 +234,9 @@ static void _dispatch_workq_init_once(void *context DISPATCH_UNUSED) { #if HAVE_DISPATCH_WORKQ_MONITORING - int target_runnable = (int)dispatch_hw_config(active_cpus); - for (dispatch_qos_t i = DISPATCH_QOS_MAX; i > DISPATCH_QOS_UNSPECIFIED; i--) { - dispatch_workq_monitor_t mon = &_dispatch_workq_monitors[i-1]; + int i, target_runnable = (int)dispatch_hw_config(active_cpus); + foreach_qos_bucket_reverse(i) { + dispatch_workq_monitor_t mon = &_dispatch_workq_monitors[i]; mon->dq = _dispatch_get_root_queue(i, false); void *buf = _dispatch_calloc(WORKQ_MAX_TRACKED_TIDS, sizeof(dispatch_tid)); mon->registered_tids = buf; diff --git a/src/event/workqueue_internal.h b/src/event/workqueue_internal.h index 94dfe4e36..b6ca6df2f 100644 --- a/src/event/workqueue_internal.h +++ b/src/event/workqueue_internal.h @@ -27,12 +27,8 @@ #ifndef __DISPATCH_WORKQUEUE_INTERNAL__ #define __DISPATCH_WORKQUEUE_INTERNAL__ -#define WORKQ_ADDTHREADS_OPTION_OVERCOMMIT 0x1 - -#define DISPATCH_WORKQ_MAX_PTHREAD_COUNT 255 - -void _dispatch_workq_worker_register(dispatch_queue_t root_q, qos_class_t cls); -void _dispatch_workq_worker_unregister(dispatch_queue_t root_q, qos_class_t cls); +void _dispatch_workq_worker_register(dispatch_queue_global_t root_q); +void _dispatch_workq_worker_unregister(dispatch_queue_global_t root_q); #if defined(__linux__) #define HAVE_DISPATCH_WORKQ_MONITORING 1 diff --git a/src/firehose/firehose.defs b/src/firehose/firehose.defs index e4fdf3324..c968588e1 100644 --- a/src/firehose/firehose.defs +++ b/src/firehose/firehose.defs @@ -27,32 +27,35 @@ subsystem firehose 11600; serverprefix firehose_server_; userprefix firehose_send_; -simpleroutine -register( +UseSpecialReplyPort 1; + +simpleroutine register( server_port : mach_port_t; mem_port : mach_port_move_send_t; mem_size : mach_vm_size_t; - comm_recvp : mach_port_move_receive_t; + comm_mem_recvp : mach_port_move_receive_t; + comm_io_recvp : mach_port_move_receive_t; comm_sendp : mach_port_make_send_t; extra_info_port : mach_port_move_send_t; extra_info_size : mach_vm_size_t; ServerAuditToken atoken : audit_token_t ); -routine -push_and_wait( +routine push_and_wait( RequestPort comm_port : mach_port_t; -SReplyPort reply_port : mach_port_make_send_once_t; - qos_class : qos_class_t; - for_io : boolean_t; +SReplyPort reply_port : mach_port_make_send_once_t; out push_reply : firehose_push_reply_t; out quarantinedOut : boolean_t ); -simpleroutine -push_async( - comm_port : mach_port_t; - qos_class : qos_class_t; - for_io : boolean_t; - expects_notify : boolean_t +simpleroutine push_async( +RequestPort comm_port : mach_port_t; +in qos_class : qos_class_t; +WaitTime timeout : natural_t +); + +routine get_logging_prefs( +RequestPort server_port : mach_port_t; +out mem_port : mach_port_t; +out mem_size : mach_vm_size_t ); diff --git a/src/firehose/firehose_buffer.c b/src/firehose/firehose_buffer.c index 3bb790c7c..1ba036fb0 100644 --- a/src/firehose/firehose_buffer.c +++ b/src/firehose/firehose_buffer.c @@ -26,12 +26,6 @@ #define __OS_EXPOSE_INTERNALS_INDIRECT__ 1 #define DISPATCH_PURE_C 1 -#define _safe_cast_to_long(x) \ - ({ _Static_assert(sizeof(typeof(x)) <= sizeof(long), \ - "__builtin_expect doesn't support types wider than long"); \ - (long)(x); }) -#define fastpath(x) ((typeof(x))__builtin_expect(_safe_cast_to_long(x), ~0l)) -#define slowpath(x) ((typeof(x))__builtin_expect(_safe_cast_to_long(x), 0l)) #define os_likely(x) __builtin_expect(!!(x), 1) #define os_unlikely(x) __builtin_expect(!!(x), 0) #define likely(x) __builtin_expect(!!(x), 1) @@ -67,7 +61,7 @@ typedef struct dispatch_gate_s { dispatch_lock dgl_lock; } dispatch_gate_s, *dispatch_gate_t; #define DLOCK_LOCK_DATA_CONTENTION 0 -static void _dispatch_gate_wait(dispatch_gate_t l, uint32_t flags); +static void _dispatch_firehose_gate_wait(dispatch_gate_t l, uint32_t flags); #define fcp_quarntined fcp_quarantined @@ -124,6 +118,11 @@ _Static_assert(sizeof(struct firehose_tracepoint_s) == 24, #ifdef KERNEL static firehose_buffer_t kernel_firehose_buffer = NULL; + +_Static_assert(FIREHOSE_BUFFER_KERNEL_MAX_CHUNK_COUNT == FIREHOSE_BUFFER_CHUNK_COUNT, + "FIREHOSE_BUFFER_KERNEL_MAX_CHUNK_COUNT must match FIREHOSE_BUFFER_CHUNK_COUNT"); +_Static_assert(FIREHOSE_BUFFER_KERNEL_DEFAULT_IO_PAGES <= FIREHOSE_BUFFER_KERNEL_DEFAULT_CHUNK_COUNT * 3 / 4, + "FIREHOSE_BUFFER_KERNEL_DEFAULT_IO_PAGES cannot exceed 3/4 of FIREHOSE_BUFFER_KERNEL_DEFAULT_CHUNK_COUNT"); #endif #pragma mark - @@ -131,31 +130,37 @@ static firehose_buffer_t kernel_firehose_buffer = NULL; #ifndef KERNEL static mach_port_t -firehose_client_reconnect(firehose_buffer_t fb, mach_port_t oldsendp) +firehose_client_reconnect(firehose_buffer_t fb, mach_port_t oldsendp, + firehose_buffer_pushport_t pushport) { - mach_port_t sendp = MACH_PORT_NULL; + mach_port_t cursendp = MACH_PORT_NULL; mach_port_t mem_port = MACH_PORT_NULL, extra_info_port = MACH_PORT_NULL; mach_vm_size_t extra_info_size = 0; kern_return_t kr; + bool reconnecting = (oldsendp != MACH_PORT_NULL); dispatch_assert(fb->fb_header.fbh_logd_port); dispatch_assert(fb->fb_header.fbh_recvp); dispatch_assert(fb->fb_header.fbh_uniquepid != 0); _dispatch_unfair_lock_lock(&fb->fb_header.fbh_logd_lock); - sendp = fb->fb_header.fbh_sendp; - if (sendp != oldsendp || sendp == MACH_PORT_DEAD) { + cursendp = fb->fb_header.fbh_sendp[pushport]; + if (cursendp != oldsendp || cursendp == MACH_PORT_DEAD) { // someone beat us to reconnecting or logd was unloaded, just go away goto unlock; } - if (oldsendp) { - // same trick as _xpc_pipe_dispose: keeping a send right - // maintains the name, so that we can destroy the receive right - // in case we still have it. - (void)firehose_mach_port_recv_dispose(oldsendp, fb); - firehose_mach_port_send_release(oldsendp); - fb->fb_header.fbh_sendp = MACH_PORT_NULL; + if (reconnecting) { + for (int i = 0; i < FIREHOSE_BUFFER_NPUSHPORTS; i++) { + mach_port_t spi = fb->fb_header.fbh_sendp[i]; + dispatch_assert(spi); + // same trick as _xpc_pipe_dispose: keeping a send right maintains + // the name, so that we can destroy the receive right in case we + // still have it. + (void)firehose_mach_port_recv_dispose(spi, fb); + firehose_mach_port_send_release(spi); + fb->fb_header.fbh_sendp[i] = MACH_PORT_NULL; + } } /* Create a memory port for the buffer VM region */ @@ -174,11 +179,7 @@ firehose_client_reconnect(firehose_buffer_t fb, mach_port_t oldsendp) DISPATCH_CLIENT_CRASH(kr, "Unable to make memory port"); } - /* Create a communication port to the logging daemon */ - uint32_t opts = MPO_CONTEXT_AS_GUARD | MPO_TEMPOWNER | MPO_INSERT_SEND_RIGHT; - sendp = firehose_mach_port_allocate(opts, fb); - - if (oldsendp && _voucher_libtrace_hooks->vah_get_reconnect_info) { + if (reconnecting && _voucher_libtrace_hooks->vah_get_reconnect_info) { kr = _voucher_libtrace_hooks->vah_get_reconnect_info(&addr, &size); if (likely(kr == KERN_SUCCESS) && addr && size) { extra_info_size = size; @@ -194,25 +195,39 @@ firehose_client_reconnect(firehose_buffer_t fb, mach_port_t oldsendp) } } + /* Create memory and IO communication ports to the logging daemon */ + uint32_t opts = MPO_CONTEXT_AS_GUARD | MPO_TEMPOWNER | MPO_INSERT_SEND_RIGHT; + mach_port_t sendp[FIREHOSE_BUFFER_NPUSHPORTS]; + for (int i = 0; i < FIREHOSE_BUFFER_NPUSHPORTS; i++) { + sendp[i] = firehose_mach_port_allocate(opts, 1, fb); + } + cursendp = sendp[pushport]; + /* Call the firehose_register() MIG routine */ kr = firehose_send_register(fb->fb_header.fbh_logd_port, mem_port, - sizeof(union firehose_buffer_u), sendp, fb->fb_header.fbh_recvp, + sizeof(union firehose_buffer_u), + sendp[FIREHOSE_BUFFER_PUSHPORT_MEM], + sendp[FIREHOSE_BUFFER_PUSHPORT_IO], fb->fb_header.fbh_recvp, extra_info_port, extra_info_size); if (likely(kr == KERN_SUCCESS)) { - fb->fb_header.fbh_sendp = sendp; + for (int i = 0; i < FIREHOSE_BUFFER_NPUSHPORTS; i++) { + fb->fb_header.fbh_sendp[i] = sendp[i]; + } } else if (unlikely(kr == MACH_SEND_INVALID_DEST)) { // MACH_SEND_INVALID_DEST here means that logd's boostrap port // turned into a dead name, which in turn means that logd has been // unloaded. The only option here, is to give up permanently. - // - // same trick as _xpc_pipe_dispose: keeping a send right - // maintains the name, so that we can destroy the receive right - // in case we still have it. - (void)firehose_mach_port_recv_dispose(sendp, fb); - firehose_mach_port_send_release(sendp); + for (int i = 0; i < FIREHOSE_BUFFER_NPUSHPORTS; i++) { + // same trick as _xpc_pipe_dispose: keeping a send right maintains + // the name, so that we can destroy the receive right in case we + // still have it. + (void)firehose_mach_port_recv_dispose(sendp[i], fb); + firehose_mach_port_send_release(sendp[i]); + fb->fb_header.fbh_sendp[i] = MACH_PORT_DEAD; + } + cursendp = MACH_PORT_DEAD; firehose_mach_port_send_release(mem_port); if (extra_info_port) firehose_mach_port_send_release(extra_info_port); - sendp = fb->fb_header.fbh_sendp = MACH_PORT_DEAD; } else { // the client probably has some form of memory corruption // and/or a port leak @@ -221,7 +236,7 @@ firehose_client_reconnect(firehose_buffer_t fb, mach_port_t oldsendp) unlock: _dispatch_unfair_lock_unlock(&fb->fb_header.fbh_logd_lock); - return sendp; + return cursendp; } static void @@ -266,14 +281,14 @@ firehose_buffer_update_limits_unlocked(firehose_buffer_t fb) } total = MAX(total, FIREHOSE_BUFFER_CHUNK_PREALLOCATED_COUNT); if (!(fbb_flags & FIREHOSE_BUFFER_BANK_FLAG_LOW_MEMORY)) { - total = MAX(total, TARGET_OS_EMBEDDED ? 8 : 12); + total = MAX(total, TARGET_OS_IPHONE ? 8 : 12); } - new.fbs_max_ref = total; - new.fbs_mem_bank = FIREHOSE_BANK_UNAVAIL_BIT - (total - 1); - new.fbs_io_bank = FIREHOSE_BANK_UNAVAIL_BIT - - MAX(3 * total / 8, 2 * io_streams); - new.fbs_unused = 0; + new = (firehose_bank_state_u) { + .fbs_max_ref = (firehose_chunk_ref_t)(total + 1), + .fbs_mem_bank = total - 1, + .fbs_io_bank = MAX(3 * total / 8, 2 * io_streams), + }; old = fbb->fbb_limits; fbb->fbb_limits = new; @@ -299,7 +314,7 @@ firehose_buffer_create(mach_port_t logd_port, uint64_t unique_pid, vm_addr = vm_page_size; const size_t madvise_bytes = FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT * FIREHOSE_CHUNK_SIZE; - if (slowpath(madvise_bytes % PAGE_SIZE)) { + if (unlikely(madvise_bytes % PAGE_SIZE)) { DISPATCH_INTERNAL_CRASH(madvise_bytes, "Invalid values for MADVISE_CHUNK_COUNT / CHUNK_SIZE"); } @@ -308,7 +323,7 @@ firehose_buffer_create(mach_port_t logd_port, uint64_t unique_pid, VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE | VM_MAKE_TAG(VM_MEMORY_GENEALOGY), MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_NONE); - if (slowpath(kr)) { + if (unlikely(kr)) { if (kr != KERN_NO_SPACE) dispatch_assume_zero(kr); firehose_mach_port_send_release(logd_port); return NULL; @@ -331,7 +346,8 @@ firehose_buffer_create(mach_port_t logd_port, uint64_t unique_pid, fbh->fbh_logd_port = logd_port; fbh->fbh_pid = getpid(); fbh->fbh_uniquepid = unique_pid; - fbh->fbh_recvp = firehose_mach_port_allocate(opts, fb); + fbh->fbh_recvp = firehose_mach_port_allocate(opts, MACH_PORT_QLIMIT_BASIC, + fb); #endif // !KERNEL fbh->fbh_spi_version = OS_FIREHOSE_SPI_VERSION; fbh->fbh_bank.fbb_flags = bank_flags; @@ -345,13 +361,13 @@ firehose_buffer_create(mach_port_t logd_port, uint64_t unique_pid, } firehose_buffer_update_limits_unlocked(fb); #else - uint16_t total = FIREHOSE_BUFFER_CHUNK_PREALLOCATED_COUNT + 1; - const uint16_t num_kernel_io_pages = 8; + uint16_t total = FIREHOSE_BUFFER_CHUNK_PREALLOCATED_COUNT; + const uint16_t num_kernel_io_pages = __firehose_num_kernel_io_pages; uint16_t io_pages = num_kernel_io_pages; fbh->fbh_bank.fbb_state = (firehose_bank_state_u){ - .fbs_max_ref = total, - .fbs_io_bank = FIREHOSE_BANK_UNAVAIL_BIT - io_pages, - .fbs_mem_bank = FIREHOSE_BANK_UNAVAIL_BIT - (total - io_pages - 1), + .fbs_max_ref = (firehose_chunk_ref_t)(total + 1), + .fbs_io_bank = io_pages, + .fbs_mem_bank = total - io_pages, }; fbh->fbh_bank.fbb_limits = fbh->fbh_bank.fbb_state; #endif // KERNEL @@ -376,7 +392,7 @@ firehose_buffer_create(mach_port_t logd_port, uint64_t unique_pid, // install the early boot page as the current one for persist fbh->fbh_stream[firehose_stream_persist].fbs_state.fss_current = FIREHOSE_BUFFER_CHUNK_PREALLOCATED_COUNT; - fbh->fbh_bank.fbb_state.fbs_io_bank += 1; + fbh->fbh_bank.fbb_state.fbs_io_bank -= 1; #endif fbh->fbh_ring_tail = (firehose_ring_tail_u){ @@ -386,52 +402,54 @@ firehose_buffer_create(mach_port_t logd_port, uint64_t unique_pid, } #ifndef KERNEL -static void -firehose_notify_source_invoke(mach_msg_header_t *hdr) -{ - const size_t reply_size = - sizeof(union __ReplyUnion__firehose_client_firehoseReply_subsystem); - - firehose_mig_server(firehoseReply_server, reply_size, hdr); -} +static char const * const _firehose_key = "firehose"; -static void -firehose_client_register_for_notifications(firehose_buffer_t fb) +static bool +firehose_drain_notifications_once(firehose_buffer_t fb) { - static const struct dispatch_continuation_s dc = { - .dc_func = (void *)firehose_notify_source_invoke, - }; - firehose_buffer_header_t fbh = &fb->fb_header; + mach_msg_options_t opts = MACH_RCV_MSG | MACH_RCV_TIMEOUT | + MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_CTX) | MACH_RCV_LARGE | + MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0); - dispatch_once(&fbh->fbh_notifs_pred, ^{ - dispatch_source_t ds = _dispatch_source_create_mach_msg_direct_recv( - fbh->fbh_recvp, &dc); - dispatch_set_context(ds, fb); - dispatch_activate(ds); - fbh->fbh_notifs_source = ds; - }); + const size_t maxsize = + sizeof(union __RequestUnion__firehose_client_firehoseReply_subsystem); + const size_t maxreplysize = + sizeof(union __ReplyUnion__firehose_client_firehoseReply_subsystem); + mach_msg_size_t rcv_size = maxsize + MAX_TRAILER_SIZE; + mig_reply_error_t *msg = alloca(rcv_size); + kern_return_t kr; + + kr = mach_msg(&msg->Head, opts, 0, rcv_size, fb->fb_header.fbh_recvp, 0, 0); + + if (kr == KERN_SUCCESS) { + dispatch_thread_context_s firehose_ctxt = { + .dtc_key = _firehose_key, + .dtc_fb = fb, + }; + _dispatch_thread_context_push(&firehose_ctxt); + firehose_mig_server(firehoseReply_server, maxreplysize, &msg->Head); + _dispatch_thread_context_pop(&firehose_ctxt); + } else if (kr != MACH_RCV_TIMED_OUT) { + DISPATCH_CLIENT_CRASH(kr, "firehose_drain_notifications_once() failed"); + } + return kr == KERN_SUCCESS; } static void firehose_client_send_push_async(firehose_buffer_t fb, qos_class_t qos, bool for_io) { - bool ask_for_notifs = fb->fb_header.fbh_notifs_source != NULL; - mach_port_t sendp = fb->fb_header.fbh_sendp; + firehose_buffer_pushport_t pushport = for_io; + mach_port_t sendp = fb->fb_header.fbh_sendp[pushport]; kern_return_t kr = KERN_FAILURE; - if (!ask_for_notifs && _dispatch_is_multithreaded_inline()) { - firehose_client_register_for_notifications(fb); - ask_for_notifs = true; - } - - if (slowpath(sendp == MACH_PORT_DEAD)) { + if (unlikely(sendp == MACH_PORT_DEAD)) { return; } - if (fastpath(sendp)) { - kr = firehose_send_push_async(sendp, qos, for_io, ask_for_notifs); - if (likely(kr == KERN_SUCCESS)) { + if (likely(sendp)) { + kr = firehose_send_push_async(sendp, qos, 0); + if (likely(kr == KERN_SUCCESS || kr == MACH_SEND_TIMED_OUT)) { return; } if (kr != MACH_SEND_INVALID_DEST) { @@ -440,10 +458,10 @@ firehose_client_send_push_async(firehose_buffer_t fb, qos_class_t qos, } } - sendp = firehose_client_reconnect(fb, sendp); - if (fastpath(MACH_PORT_VALID(sendp))) { - kr = firehose_send_push_async(sendp, qos, for_io, ask_for_notifs); - if (likely(kr == KERN_SUCCESS)) { + sendp = firehose_client_reconnect(fb, sendp, pushport); + if (likely(MACH_PORT_VALID(sendp))) { + kr = firehose_send_push_async(sendp, qos, 0); + if (likely(kr == KERN_SUCCESS || kr == MACH_SEND_TIMED_OUT)) { return; } if (kr != MACH_SEND_INVALID_DEST) { @@ -485,13 +503,7 @@ firehose_client_merge_updates(firehose_buffer_t fb, bool async_notif, #ifndef KERNEL // this isn't a dispatch_once so that the upcall to libtrace // can actually log itself without blocking on the gate. - if (async_notif) { - if (os_atomic_xchg(&fbh->fbh_quarantined_state, - FBH_QUARANTINE_STARTED, relaxed) != - FBH_QUARANTINE_STARTED) { - firehose_client_start_quarantine(fb); - } - } else if (os_atomic_load(&fbh->fbh_quarantined_state, relaxed) == + if (os_atomic_load(&fbh->fbh_quarantined_state, relaxed) == FBH_QUARANTINE_NONE) { os_atomic_cmpxchg(&fbh->fbh_quarantined_state, FBH_QUARANTINE_NONE, FBH_QUARANTINE_PENDING, relaxed); @@ -532,7 +544,7 @@ firehose_client_merge_updates(firehose_buffer_t fb, bool async_notif, bank_updates = ((uint64_t)mem_delta << FIREHOSE_BANK_SHIFT(0)) | ((uint64_t)io_delta << FIREHOSE_BANK_SHIFT(1)); - state.fbs_atomic_state = os_atomic_sub2o(fbh, + state.fbs_atomic_state = os_atomic_add2o(fbh, fbh_bank.fbb_state.fbs_atomic_state, bank_updates, release); __firehose_critical_region_leave(); @@ -549,23 +561,63 @@ firehose_client_merge_updates(firehose_buffer_t fb, bool async_notif, } #ifndef KERNEL +void * +firehose_buffer_get_logging_prefs(firehose_buffer_t fb, size_t *length) +{ + mach_port_t sendp = fb->fb_header.fbh_logd_port; + mach_port_t mem_port = MACH_PORT_NULL; + mach_vm_size_t size = 0; + mach_vm_address_t addr = 0; + kern_return_t kr; + + if (unlikely(!MACH_PORT_VALID(sendp))) { + *length = 0; + return NULL; + } + + kr = firehose_send_get_logging_prefs(sendp, &mem_port, &size); + if (unlikely(kr != KERN_SUCCESS)) { + if (kr != MACH_SEND_INVALID_DEST) { + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + } + *length = 0; + return NULL; + } + + /* Map the memory handle into the server address space */ + kr = mach_vm_map(mach_task_self(), &addr, size, 0, + VM_FLAGS_ANYWHERE, mem_port, 0, FALSE, + VM_PROT_READ, VM_PROT_READ, VM_INHERIT_NONE); + DISPATCH_VERIFY_MIG(kr); + if (dispatch_assume_zero(kr)) { + addr = 0; + size = 0; + } + kr = mach_port_deallocate(mach_task_self(), mem_port); + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + + *length = (size_t)size; + return (void *)addr; +} + OS_NOT_TAIL_CALLED OS_NOINLINE static void firehose_client_send_push_and_wait(firehose_buffer_t fb, bool for_io, firehose_bank_state_u *state_out) { - mach_port_t sendp = fb->fb_header.fbh_sendp; + firehose_buffer_pushport_t pushport = for_io; + mach_port_t sendp = fb->fb_header.fbh_sendp[pushport]; firehose_push_reply_t push_reply = { }; - qos_class_t qos = qos_class_self(); boolean_t quarantined = false; kern_return_t kr; - if (slowpath(sendp == MACH_PORT_DEAD)) { + if (unlikely(sendp == MACH_PORT_DEAD)) { return; } - if (fastpath(sendp)) { - kr = firehose_send_push_and_wait(sendp, qos, for_io, - &push_reply, &quarantined); + if (likely(sendp)) { + kr = firehose_send_push_and_wait(sendp, &push_reply, &quarantined); if (likely(kr == KERN_SUCCESS)) { goto success; } @@ -575,10 +627,9 @@ firehose_client_send_push_and_wait(firehose_buffer_t fb, bool for_io, } } - sendp = firehose_client_reconnect(fb, sendp); - if (fastpath(MACH_PORT_VALID(sendp))) { - kr = firehose_send_push_and_wait(sendp, qos, for_io, - &push_reply, &quarantined); + sendp = firehose_client_reconnect(fb, sendp, pushport); + if (likely(MACH_PORT_VALID(sendp))) { + kr = firehose_send_push_and_wait(sendp, &push_reply, &quarantined); if (likely(kr == KERN_SUCCESS)) { goto success; } @@ -639,13 +690,21 @@ kern_return_t firehose_client_push_notify_async(mach_port_t server_port OS_UNUSED, firehose_push_reply_t push_reply, boolean_t quarantined) { - // see _dispatch_source_merge_mach_msg_direct - dispatch_queue_t dq = _dispatch_queue_get_current(); - firehose_buffer_t fb = dispatch_get_context(dq); + dispatch_thread_context_t ctxt = + _dispatch_thread_context_find(_firehose_key); + firehose_buffer_t fb = ctxt->dtc_fb; firehose_client_merge_updates(fb, true, push_reply, quarantined, NULL); return KERN_SUCCESS; } +kern_return_t +firehose_client_get_logging_prefs_reply(mach_port_t req_port OS_UNUSED, + mach_port_t mem_port OS_UNUSED, mach_vm_size_t size OS_UNUSED) +{ + DISPATCH_INTERNAL_CRASH(0, "firehose_client_get_logging_prefs_reply " + "should never be sent to the buffer receive port"); +} + #endif // !KERNEL #pragma mark - #pragma mark Buffer handling @@ -661,47 +720,94 @@ firehose_buffer_update_limits(firehose_buffer_t fb) } #endif // !KERNEL +OS_ALWAYS_INLINE +static inline uint64_t +firehose_buffer_chunk_apply_stamp_slop(uint64_t stamp) +{ + // boot starts mach absolute time at + // 0, and wrapping around to values above UINT64_MAX - + // FIREHOSE_STAMP_SLOP breaks firehose_buffer_stream_flush() + // assumptions + return stamp > FIREHOSE_STAMP_SLOP ? stamp - FIREHOSE_STAMP_SLOP : 0; +} + +OS_ALWAYS_INLINE +static inline bool +firehose_buffer_chunk_stamp_delta_fits(firehose_chunk_t fc, uint64_t stamp) +{ + return !((stamp - fc->fc_timestamp) >> 48); +} + OS_ALWAYS_INLINE static inline firehose_tracepoint_t firehose_buffer_chunk_init(firehose_chunk_t fc, - firehose_tracepoint_query_t ask, uint8_t **privptr) + firehose_tracepoint_query_t ask, uint8_t **privptr, uint64_t thread, + firehose_tracepoint_t *lft, uint64_t loss_start) { + firehose_tracepoint_t ft; + uint64_t stamp_and_len; + const uint16_t ft_size = offsetof(struct firehose_tracepoint_s, ft_data); uint16_t pub_offs = offsetof(struct firehose_chunk_s, fc_data); uint16_t priv_offs = FIREHOSE_CHUNK_SIZE; - pub_offs += roundup(ft_size + ask->pubsize, 8); - priv_offs -= ask->privsize; + if (unlikely(lft)) { + const uint16_t flp_size = sizeof(struct firehose_loss_payload_s); + uint64_t stamp, minstamp; + uint16_t flp_pub_offs; + + // first, try to make both timestamps fit + minstamp = MIN(ask->stamp, loss_start); + fc->fc_timestamp = + firehose_buffer_chunk_apply_stamp_slop(minstamp); + + // if they can't both fit, use the timestamp of the actual tracepoint: + // a) this should _really_ never happen + // b) if it does, a determined reader can tell that it did by comparing + // the loss event start_stamp payload field with the main stamp + if (!firehose_buffer_chunk_stamp_delta_fits(fc, ask->stamp)) { + // if ask->stamp didn't fit on the first try it must be greater than + // loss_start by > 2^48, so it must also be greater than + // FIREHOSE_STAMP_SLOP - so no need to worry about underflow here + fc->fc_timestamp = ask->stamp - FIREHOSE_STAMP_SLOP; + } - if (fc->fc_pos.fcp_atomic_pos) { - // Needed for process death handling (recycle-reuse): - // No atomic fences required, we merely want to make sure the observers - // will see memory effects in program (asm) order. - // 1. the payload part of the chunk is cleared completely - // 2. the chunk is marked as reused - // This ensures that if we don't see a reference to a chunk in the ring - // and it is dirty, when crawling the chunk, we don't see remnants of - // other tracepoints - // - // We only do that when the fc_pos is non zero, because zero means - // we just faulted the chunk, and the kernel already bzero-ed it. - bzero(fc->fc_data, sizeof(fc->fc_data)); - } - dispatch_compiler_barrier(); - // boot starts mach absolute time at 0, and - // wrapping around to values above UINT64_MAX - FIREHOSE_STAMP_SLOP - // breaks firehose_buffer_stream_flush() assumptions - if (ask->stamp > FIREHOSE_STAMP_SLOP) { - fc->fc_timestamp = ask->stamp - FIREHOSE_STAMP_SLOP; + *lft = (firehose_tracepoint_t)fc->fc_data; + + stamp = firehose_buffer_chunk_stamp_delta_fits(fc, loss_start) ? + loss_start : ask->stamp; + + stamp_and_len = stamp - fc->fc_timestamp; + stamp_and_len |= (uint64_t)flp_size << 48; + os_atomic_store2o(*lft, ft_stamp_and_length, stamp_and_len, relaxed); + + (*lft)->ft_thread = thread; // not really meaningful + + flp_pub_offs = roundup(ft_size + flp_size, 8); + pub_offs += flp_pub_offs; + ft = (firehose_tracepoint_t)(fc->fc_data + flp_pub_offs); } else { - fc->fc_timestamp = 0; + fc->fc_timestamp = + firehose_buffer_chunk_apply_stamp_slop(ask->stamp); + ft = (firehose_tracepoint_t)fc->fc_data; } + + pub_offs += roundup(ft_size + ask->pubsize, 8); + priv_offs -= ask->privsize; + + // Needed for process death handling (tracepoint-begin): + // write the length before making the chunk visible + stamp_and_len = ask->stamp - fc->fc_timestamp; + stamp_and_len |= (uint64_t)ask->pubsize << 48; + os_atomic_store2o(ft, ft_stamp_and_length, stamp_and_len, relaxed); + + ft->ft_thread = thread; + fc->fc_pos = (firehose_chunk_pos_u){ .fcp_next_entry_offs = pub_offs, .fcp_private_offs = priv_offs, .fcp_refcnt = 1, - .fcp_qos = firehose_buffer_qos_bits_propagate(), .fcp_stream = ask->stream, .fcp_flag_io = ask->for_io, .fcp_quarantined = ask->quarantined, @@ -710,70 +816,158 @@ firehose_buffer_chunk_init(firehose_chunk_t fc, if (privptr) { *privptr = fc->fc_start + priv_offs; } - return (firehose_tracepoint_t)fc->fc_data; + return ft; } OS_NOINLINE static firehose_tracepoint_t firehose_buffer_stream_chunk_install(firehose_buffer_t fb, - firehose_tracepoint_query_t ask, uint8_t **privptr, uint16_t ref) + firehose_tracepoint_query_t ask, uint8_t **privptr, + firehose_chunk_ref_t ref) { firehose_stream_state_u state, new_state; - firehose_tracepoint_t ft; + firehose_tracepoint_t ft = NULL, lft; firehose_buffer_header_t fbh = &fb->fb_header; firehose_buffer_stream_t fbs = &fbh->fbh_stream[ask->stream]; - uint64_t stamp_and_len; - if (fastpath(ref)) { + if (likely(ref)) { + uint64_t thread; + bool installed = false; firehose_chunk_t fc = firehose_buffer_ref_to_chunk(fb, ref); - ft = firehose_buffer_chunk_init(fc, ask, privptr); - // Needed for process death handling (tracepoint-begin): - // write the length before making the chunk visible - stamp_and_len = ask->stamp - fc->fc_timestamp; - stamp_and_len |= (uint64_t)ask->pubsize << 48; - os_atomic_store2o(ft, ft_stamp_and_length, stamp_and_len, relaxed); -#ifdef KERNEL - ft->ft_thread = thread_tid(current_thread()); + + if (fc->fc_pos.fcp_atomic_pos) { + // Needed for process death handling (recycle-reuse): + // No atomic fences required, we merely want to make sure the + // observers will see memory effects in program (asm) order. + // 1. the payload part of the chunk is cleared completely + // 2. the chunk is marked as reused + // This ensures that if we don't see a reference to a chunk in the + // ring and it is dirty, when crawling the chunk, we don't see + // remnants of other tracepoints. + // + // We only do that when the fc_pos is non zero, because zero means + // we just faulted the chunk, and the kernel already bzero-ed it. + bzero(fc->fc_data, sizeof(fc->fc_data)); + } + dispatch_compiler_barrier(); + + if (ask->stream == firehose_stream_metadata) { + os_atomic_or2o(fbh, fbh_bank.fbb_metadata_bitmap, 1ULL << ref, + relaxed); + } + +#if KERNEL + thread = thread_tid(current_thread()); #else - ft->ft_thread = _pthread_threadid_self_np_direct(); + thread = _pthread_threadid_self_np_direct(); #endif - if (ask->stream == firehose_stream_metadata) { - os_atomic_or2o(fbh, fbh_bank.fbb_metadata_bitmap, - 1ULL << ref, relaxed); + + // If no tracepoints were lost at the tail end of this generation, the + // chunk timestamp is the stamp of the first tracepoint and the first + // tracepoint belongs at the beginning of the chunk. If, however, we + // need to record a loss event, the timestamp has to be the minimum of + // the loss stamp and the stamp of the first tracepoint, and the loss + // event needs to be placed at the beginning of the chunk in addition to + // the first actual tracepoint. + state.fss_atomic_state = + os_atomic_load2o(fbs, fbs_state.fss_atomic_state, relaxed); + + if (likely(!state.fss_loss)) { + ft = firehose_buffer_chunk_init(fc, ask, privptr, thread, NULL, 0); + + // release to publish the chunk init + installed = os_atomic_rmw_loop2o(fbs, fbs_state.fss_atomic_state, + state.fss_atomic_state, new_state.fss_atomic_state, release, { + if (state.fss_loss) { + os_atomic_rmw_loop_give_up(break); + } + // clear the gate, waiter bits and loss count + new_state = (firehose_stream_state_u){ + .fss_current = ref, + .fss_generation = state.fss_generation + 1, + }; + }); } - // release barrier to make the chunk init visible - os_atomic_rmw_loop2o(fbs, fbs_state.fss_atomic_state, - state.fss_atomic_state, new_state.fss_atomic_state, release, { - // We use a generation counter to prevent a theoretical ABA problem: - // a thread could try to acquire a tracepoint in a chunk, fail to - // do so mark it as to be pushed, enqueue it, and then be preempted - // - // It sleeps for a long time, and then tries to acquire the - // allocator bit and uninstalling the chunk. Succeeds in doing so, - // but because the chunk actually happened to have cycled all the - // way back to being installed. That thread would effectively hide - // that unflushed chunk and leak it. - // - // Having a generation counter prevents the uninstallation of the - // chunk to spuriously succeed when it was a re-incarnation of it. - new_state = (firehose_stream_state_u){ - .fss_current = ref, - .fss_generation = state.fss_generation + 1, + + if (unlikely(!installed)) { + uint64_t loss_start, loss_end; + + // ensure we can see the start stamp + (void)os_atomic_load2o(fbs, fbs_state.fss_atomic_state, acquire); + loss_start = fbs->fbs_loss_start; + fbs->fbs_loss_start = 0; // reset under fss_gate + loss_end = mach_continuous_time(); + + ft = firehose_buffer_chunk_init(fc, ask, privptr, thread, &lft, + loss_start); + os_atomic_rmw_loop2o(fbs, fbs_state.fss_atomic_state, + state.fss_atomic_state, new_state.fss_atomic_state, release, { + // no giving up this time! + new_state = (firehose_stream_state_u){ + .fss_current = ref, + .fss_generation = state.fss_generation + 1, + }; + }); + + struct firehose_loss_payload_s flp = { + .start_stamp = loss_start, + .end_stamp = loss_end, + .count = state.fss_loss, }; - }); + memcpy(lft->ft_data, &flp, sizeof(flp)); + + firehose_tracepoint_id_u ftid = { .ftid = { + ._namespace = firehose_tracepoint_namespace_loss, + // no meaningful value for _type + // nor for _flags + ._code = ask->stream, + } }; + + // publish the contents of the loss tracepoint + os_atomic_store2o(lft, ft_id.ftid_atomic_value, ftid.ftid_value, + release); + } } else { - // the allocator gave up just clear the allocator + waiter bits - firehose_stream_state_u mask = { .fss_allocator = ~0u, }; - state.fss_atomic_state = os_atomic_and_orig2o(fbs, - fbs_state.fss_atomic_state, ~mask.fss_atomic_state, relaxed); - ft = NULL; + // the allocator gave up - just clear the allocator and waiter bits and + // increment the loss count + state.fss_atomic_state = + os_atomic_load2o(fbs, fbs_state.fss_atomic_state, relaxed); + if (!state.fss_timestamped) { + fbs->fbs_loss_start = mach_continuous_time(); + + // release to publish the timestamp + os_atomic_rmw_loop2o(fbs, fbs_state.fss_atomic_state, + state.fss_atomic_state, new_state.fss_atomic_state, + release, { + new_state = (firehose_stream_state_u){ + .fss_loss = + MIN(state.fss_loss + 1, FIREHOSE_LOSS_COUNT_MAX), + .fss_timestamped = true, + .fss_generation = state.fss_generation, + }; + }); + } else { + os_atomic_rmw_loop2o(fbs, fbs_state.fss_atomic_state, + state.fss_atomic_state, new_state.fss_atomic_state, + relaxed, { + new_state = (firehose_stream_state_u){ + .fss_loss = + MIN(state.fss_loss + 1, FIREHOSE_LOSS_COUNT_MAX), + .fss_timestamped = true, + .fss_generation = state.fss_generation, + }; + }); + } } // pairs with the one in firehose_buffer_tracepoint_reserve() __firehose_critical_region_leave(); #ifndef KERNEL - if (unlikely(_dispatch_lock_is_locked_by_self(state.fss_gate.dgl_lock))) { + _dispatch_trace_firehose_chunk_install(((uint64_t *)ask)[0], + ((uint64_t *)ask)[1], state.fss_atomic_state, + new_state.fss_atomic_state); + if (unlikely(state.fss_allocator & FIREHOSE_GATE_WAITERS_MASK)) { _dispatch_gate_broadcast_slow(&fbs->fbs_state.fss_gate, state.fss_gate.dgl_lock); } @@ -789,17 +983,17 @@ firehose_buffer_stream_chunk_install(firehose_buffer_t fb, firehose_client_start_quarantine(fb); } } -#endif // KERNEL +#endif // !KERNEL return ft; } #ifndef KERNEL OS_ALWAYS_INLINE -static inline uint16_t +static inline firehose_chunk_ref_t firehose_buffer_ring_try_grow(firehose_buffer_bank_t fbb, uint16_t limit) { - uint16_t ref = 0; + firehose_chunk_ref_t ref = 0; uint64_t bitmap; _dispatch_unfair_lock_lock(&fbb->fbb_lock); @@ -813,8 +1007,8 @@ firehose_buffer_ring_try_grow(firehose_buffer_bank_t fbb, uint16_t limit) } OS_ALWAYS_INLINE -static inline uint16_t -firehose_buffer_ring_shrink(firehose_buffer_t fb, uint16_t ref) +static inline firehose_chunk_ref_t +firehose_buffer_ring_shrink(firehose_buffer_t fb, firehose_chunk_ref_t ref) { const size_t madv_size = FIREHOSE_CHUNK_SIZE * FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT; @@ -830,7 +1024,7 @@ firehose_buffer_ring_shrink(firehose_buffer_t fb, uint16_t ref) } bitmap = (fb->fb_header.fbh_bank.fbb_bitmap &= ~(1UL << ref)); - ref &= ~madv_mask; + ref &= ~(FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT - 1); if ((bitmap & (madv_mask << ref)) == 0) { // if MADVISE_WIDTH consecutive chunks are free, madvise them free madvise(firehose_buffer_ref_to_chunk(fb, ref), madv_size, MADV_FREE); @@ -844,7 +1038,7 @@ firehose_buffer_ring_shrink(firehose_buffer_t fb, uint16_t ref) OS_NOINLINE void -firehose_buffer_ring_enqueue(firehose_buffer_t fb, uint16_t ref) +firehose_buffer_ring_enqueue(firehose_buffer_t fb, firehose_chunk_ref_t ref) { firehose_chunk_t fc = firehose_buffer_ref_to_chunk(fb, ref); uint16_t volatile *fbh_ring; @@ -916,9 +1110,9 @@ firehose_buffer_ring_enqueue(firehose_buffer_t fb, uint16_t ref) // a thread being preempted here for GEN_MASK worth of ring rotations, // it could lead to the cmpxchg succeed, and have a bogus enqueue // (confused enqueuer) - if (fastpath(os_atomic_cmpxchgv(&fbh_ring[idx], gen, gen | ref, &dummy, + if (likely(os_atomic_cmpxchgv(&fbh_ring[idx], gen, gen | ref, &dummy, relaxed))) { - if (fastpath(os_atomic_cmpxchgv(fbh_ring_head, head, head + 1, + if (likely(os_atomic_cmpxchgv(fbh_ring_head, head, head + 1, &head, release))) { __firehose_critical_region_leave(); break; @@ -949,27 +1143,31 @@ firehose_buffer_ring_enqueue(firehose_buffer_t fb, uint16_t ref) void firehose_buffer_force_connect(firehose_buffer_t fb) { - mach_port_t sendp = fb->fb_header.fbh_sendp; - if (sendp == MACH_PORT_NULL) firehose_client_reconnect(fb, MACH_PORT_NULL); + mach_port_t sendp = fb->fb_header.fbh_sendp[FIREHOSE_BUFFER_PUSHPORT_MEM]; + if (sendp == MACH_PORT_NULL) { + firehose_client_reconnect(fb, MACH_PORT_NULL, + FIREHOSE_BUFFER_PUSHPORT_MEM); + } } #endif OS_ALWAYS_INLINE -static inline uint16_t +static inline firehose_chunk_ref_t firehose_buffer_ring_try_recycle(firehose_buffer_t fb) { firehose_ring_tail_u pos, old; uint16_t volatile *fbh_ring; - uint16_t gen, ref, entry, tail; + uint16_t gen, entry, tail; + firehose_chunk_ref_t ref; firehose_chunk_t fc; bool for_io; os_atomic_rmw_loop2o(&fb->fb_header, fbh_ring_tail.frp_atomic_tail, old.frp_atomic_tail, pos.frp_atomic_tail, relaxed, { pos = old; - if (fastpath(old.frp_mem_tail != old.frp_mem_flushed)) { + if (likely(old.frp_mem_tail != old.frp_mem_flushed)) { pos.frp_mem_tail++; - } else if (fastpath(old.frp_io_tail != old.frp_io_flushed)) { + } else if (likely(old.frp_io_tail != old.frp_io_flushed)) { pos.frp_io_tail++; } else { os_atomic_rmw_loop_give_up(return 0); @@ -1016,46 +1214,42 @@ firehose_buffer_ring_try_recycle(firehose_buffer_t fb) OS_NOINLINE static firehose_tracepoint_t firehose_buffer_tracepoint_reserve_wait_for_chunks_from_logd(firehose_buffer_t fb, - firehose_tracepoint_query_t ask, uint8_t **privptr, uint16_t ref) + firehose_tracepoint_query_t ask, uint8_t **privptr) { - const uint64_t bank_unavail_mask = FIREHOSE_BANK_UNAVAIL_MASK(ask->for_io); - const uint64_t bank_inc = FIREHOSE_BANK_INC(ask->for_io); + bool for_io = ask->for_io; + firehose_buffer_pushport_t pushport = for_io; firehose_buffer_bank_t const fbb = &fb->fb_header.fbh_bank; firehose_bank_state_u state; - uint16_t fbs_max_ref; + firehose_chunk_ref_t ref, fbs_max_ref; + + for (int i = MACH_PORT_QLIMIT_BASIC; + i-- && firehose_drain_notifications_once(fb); ); // first wait for our bank to have space, if needed - if (!fastpath(ask->is_bank_ok)) { + if (unlikely(!ask->is_bank_ok)) { state.fbs_atomic_state = os_atomic_load2o(fbb, fbb_state.fbs_atomic_state, relaxed); - while ((state.fbs_atomic_state - bank_inc) & bank_unavail_mask) { + while (!firehose_buffer_bank_try_reserve_slot(fb, for_io, &state)) { if (ask->quarantined) { - __FIREHOSE_CLIENT_THROTTLED_DUE_TO_HEAVY_LOGGING__(fb, - ask->for_io, &state); + __FIREHOSE_CLIENT_THROTTLED_DUE_TO_HEAVY_LOGGING__(fb, for_io, + &state); } else { - firehose_client_send_push_and_wait(fb, ask->for_io, &state); + firehose_client_send_push_and_wait(fb, for_io, &state); } - if (slowpath(fb->fb_header.fbh_sendp == MACH_PORT_DEAD)) { + if (unlikely(fb->fb_header.fbh_sendp[pushport] == MACH_PORT_DEAD)) { // logd was unloaded, give up return NULL; } } - ask->is_bank_ok = true; fbs_max_ref = state.fbs_max_ref; } else { fbs_max_ref = fbb->fbb_state.fbs_max_ref; } - // second, if we were passed a chunk, we may need to shrink - if (slowpath(ref)) { - goto try_shrink; - } - // third, wait for a chunk to come up, and if not, wait on the daemon for (;;) { - if (fastpath(ref = firehose_buffer_ring_try_recycle(fb))) { - try_shrink: - if (slowpath(ref >= fbs_max_ref)) { + if (likely(ref = firehose_buffer_ring_try_recycle(fb))) { + if (unlikely(ref >= fbs_max_ref)) { ref = firehose_buffer_ring_shrink(fb, ref); if (!ref) { continue; @@ -1063,16 +1257,16 @@ firehose_buffer_tracepoint_reserve_wait_for_chunks_from_logd(firehose_buffer_t f } break; } - if (fastpath(ref = firehose_buffer_ring_try_grow(fbb, fbs_max_ref))) { + if (likely(ref = firehose_buffer_ring_try_grow(fbb, fbs_max_ref))) { break; } if (ask->quarantined) { - __FIREHOSE_CLIENT_THROTTLED_DUE_TO_HEAVY_LOGGING__(fb, - ask->for_io, &state); + __FIREHOSE_CLIENT_THROTTLED_DUE_TO_HEAVY_LOGGING__(fb, for_io, + NULL); } else { - firehose_client_send_push_and_wait(fb, ask->for_io, NULL); + firehose_client_send_push_and_wait(fb, for_io, NULL); } - if (slowpath(fb->fb_header.fbh_sendp == MACH_PORT_DEAD)) { + if (unlikely(fb->fb_header.fbh_sendp[pushport] == MACH_PORT_DEAD)) { // logd was unloaded, give up break; } @@ -1088,7 +1282,7 @@ _dispatch_gate_lock_load_seq_cst(dispatch_gate_t l) } OS_NOINLINE static void -_dispatch_gate_wait(dispatch_gate_t l, uint32_t flags) +_dispatch_firehose_gate_wait(dispatch_gate_t l, uint32_t flags) { (void)flags; _dispatch_wait_until(_dispatch_gate_lock_load_seq_cst(l) == 0); @@ -1102,42 +1296,60 @@ firehose_buffer_tracepoint_reserve_slow(firehose_buffer_t fb, const unsigned for_io = ask->for_io; const firehose_buffer_bank_t fbb = &fb->fb_header.fbh_bank; firehose_bank_state_u state; - uint16_t ref = 0; + bool reserved; + firehose_chunk_ref_t ref = 0; + +#ifndef KERNEL + // before we try to allocate anything look at whether there are things logd + // already sent back to us + firehose_drain_notifications_once(fb); +#endif // KERNEL + + state.fbs_atomic_state = + os_atomic_load2o(fbb, fbb_state.fbs_atomic_state, relaxed); + reserved = firehose_buffer_bank_try_reserve_slot(fb, for_io, &state); - uint64_t unavail_mask = FIREHOSE_BANK_UNAVAIL_MASK(for_io); #ifndef KERNEL - state.fbs_atomic_state = os_atomic_add_orig2o(fbb, - fbb_state.fbs_atomic_state, FIREHOSE_BANK_INC(for_io), acquire); - if (fastpath(!(state.fbs_atomic_state & unavail_mask))) { - ask->is_bank_ok = true; - if (fastpath(ref = firehose_buffer_ring_try_recycle(fb))) { - if (fastpath(ref < state.fbs_max_ref)) { - return firehose_buffer_stream_chunk_install(fb, ask, - privptr, ref); + if (likely(reserved)) { + while (!ref) { + ref = firehose_buffer_ring_try_recycle(fb); + if (unlikely(!ref)) { + break; } + + if (unlikely(ref >= state.fbs_max_ref)) { + ref = firehose_buffer_ring_shrink(fb, ref); + } + } + + if (unlikely(!ref)) { + ref = firehose_buffer_ring_try_grow(fbb, state.fbs_max_ref); } } - return firehose_buffer_tracepoint_reserve_wait_for_chunks_from_logd(fb, ask, - privptr, ref); -#else - firehose_bank_state_u value; - ask->is_bank_ok = os_atomic_rmw_loop2o(fbb, fbb_state.fbs_atomic_state, - state.fbs_atomic_state, value.fbs_atomic_state, acquire, { - value = state; - if (slowpath((value.fbs_atomic_state & unavail_mask) != 0)) { - os_atomic_rmw_loop_give_up(break); + + if (likely(ref || !ask->reliable)) { + if (!ref && reserved) { + firehose_buffer_bank_relinquish_slot(fb, for_io); } - value.fbs_atomic_state += FIREHOSE_BANK_INC(for_io); - }); - if (ask->is_bank_ok) { + + // installing `0` unlocks the allocator + return firehose_buffer_stream_chunk_install(fb, ask, privptr, ref); + } else { + firehose_buffer_stream_signal_waiting_for_logd(fb, ask->stream); + + ask->is_bank_ok = reserved; + return firehose_buffer_tracepoint_reserve_wait_for_chunks_from_logd(fb, + ask, privptr); + } +#else + if (likely(reserved)) { ref = firehose_buffer_ring_try_recycle(fb); - if (slowpath(ref == 0)) { - // the kernel has no overlap between I/O and memory chunks, - // having an available bank slot means we should be able to recycle + if (unlikely(ref == 0)) { + // the kernel has no overlap between I/O and memory chunks, so + // having an available bank slot means we must be able to recycle DISPATCH_INTERNAL_CRASH(0, "Unable to recycle a chunk"); } } - // rdar://25137005 installing `0` unlocks the allocator return firehose_buffer_stream_chunk_install(fb, ask, privptr, ref); #endif // KERNEL } @@ -1148,11 +1360,11 @@ __firehose_buffer_tracepoint_reserve(uint64_t stamp, firehose_stream_t stream, uint16_t pubsize, uint16_t privsize, uint8_t **privptr) { firehose_buffer_t fb = kernel_firehose_buffer; - if (!fastpath(fb)) { + if (unlikely(!fb)) { return NULL; } return firehose_buffer_tracepoint_reserve(fb, stamp, stream, pubsize, - privsize, privptr); + privsize, privptr, false); } firehose_buffer_t @@ -1179,10 +1391,19 @@ void __firehose_merge_updates(firehose_push_reply_t update) { firehose_buffer_t fb = kernel_firehose_buffer; - if (fastpath(fb)) { + if (likely(fb)) { firehose_client_merge_updates(fb, true, update, false, NULL); } } + +int +__firehose_kernel_configuration_valid(uint8_t chunk_count, uint8_t io_pages) +{ + return (((chunk_count % 4) == 0) && + (chunk_count >= FIREHOSE_BUFFER_KERNEL_MIN_CHUNK_COUNT) && + (chunk_count <= FIREHOSE_BUFFER_KERNEL_MAX_CHUNK_COUNT) && + (io_pages <= (chunk_count * 3 / 4))); +} #endif // KERNEL #endif // OS_FIREHOSE_SPI diff --git a/src/firehose/firehose_buffer_internal.h b/src/firehose/firehose_buffer_internal.h index e41d9cb29..1f1b730a9 100644 --- a/src/firehose/firehose_buffer_internal.h +++ b/src/firehose/firehose_buffer_internal.h @@ -31,28 +31,42 @@ // firehose buffer is CHUNK_COUNT * CHUNK_SIZE big == 256k #define FIREHOSE_BUFFER_CHUNK_COUNT 64ul -#ifdef KERNEL -#define FIREHOSE_BUFFER_CHUNK_PREALLOCATED_COUNT 15 -#else +#ifndef KERNEL #define FIREHOSE_BUFFER_CHUNK_PREALLOCATED_COUNT 4 #define FIREHOSE_BUFFER_MADVISE_CHUNK_COUNT 4 #endif +#define FIREHOSE_RING_POS_GEN_INC ((uint16_t)(FIREHOSE_BUFFER_CHUNK_COUNT)) +#define FIREHOSE_RING_POS_IDX_MASK ((uint16_t)(FIREHOSE_RING_POS_GEN_INC - 1)) +#define FIREHOSE_RING_POS_GEN_MASK ((uint16_t)~FIREHOSE_RING_POS_IDX_MASK) + +#if __has_feature(c_static_assert) +_Static_assert(FIREHOSE_RING_POS_IDX_MASK < 0xff, + "firehose chunk ref fits in its type with space for PRISTINE"); +#endif + +typedef uint8_t firehose_chunk_ref_t; + static const unsigned long firehose_stream_uses_io_bank = (1UL << firehose_stream_persist) | - (1UL << firehose_stream_special); + (1UL << firehose_stream_special) | + (1UL << firehose_stream_signpost); typedef union { #define FIREHOSE_BANK_SHIFT(bank) (16 * (bank)) #define FIREHOSE_BANK_INC(bank) (1ULL << FIREHOSE_BANK_SHIFT(bank)) -#define FIREHOSE_BANK_UNAVAIL_BIT ((uint16_t)0x8000) -#define FIREHOSE_BANK_UNAVAIL_MASK(bank) (FIREHOSE_BANK_INC(bank) << 15) uint64_t fbs_atomic_state; struct { - uint16_t fbs_mem_bank; - uint16_t fbs_io_bank; - uint16_t fbs_max_ref; - uint16_t fbs_unused; + union { + struct { + uint16_t fbs_mem_bank; + uint16_t fbs_io_bank; + }; + uint16_t fbs_banks[2]; + }; + firehose_chunk_ref_t fbs_max_ref; + uint8_t fbs_unused1; + uint16_t fbs_unused2; }; } firehose_bank_state_u; @@ -89,15 +103,34 @@ typedef union { uint64_t fss_atomic_state; dispatch_gate_s fss_gate; struct { +#define FIREHOSE_GATE_RELIABLE_WAITERS_BIT 0x00000001UL +#define FIREHOSE_GATE_UNRELIABLE_WAITERS_BIT 0x00000002UL +#define FIREHOSE_GATE_WAITERS_MASK 0x00000003UL uint32_t fss_allocator; -#define FIREHOSE_STREAM_STATE_PRISTINE 0xffff - uint16_t fss_current; +#define FIREHOSE_STREAM_STATE_PRISTINE 0xff + firehose_chunk_ref_t fss_current; + uint8_t fss_loss : FIREHOSE_LOSS_COUNT_WIDTH; + uint8_t fss_timestamped : 1; + uint8_t fss_waiting_for_logd : 1; + + /* + * We use a generation counter to prevent a theoretical ABA problem: a + * thread could try to acquire a tracepoint in a chunk, fail to do so, + * mark it as to be pushed, enqueue it, and then be preempted. It + * sleeps for a long time, and then tries to acquire the allocator bit + * and uninstall the chunk. Succeeds in doing so, but because the chunk + * actually happened to have cycled all the way back to being installed. + * That thread would effectively hide that unflushed chunk and leak it. + * Having a generation counter prevents the uninstallation of the chunk + * to spuriously succeed when it was a re-incarnation of it. + */ uint16_t fss_generation; }; } firehose_stream_state_u; typedef struct firehose_buffer_stream_s { firehose_stream_state_u fbs_state; + uint64_t fbs_loss_start; // protected by fss_gate } OS_ALIGNED(128) *firehose_buffer_stream_t; typedef union { @@ -110,9 +143,11 @@ typedef union { }; } firehose_ring_tail_u; -#define FIREHOSE_RING_POS_GEN_INC ((uint16_t)(FIREHOSE_BUFFER_CHUNK_COUNT)) -#define FIREHOSE_RING_POS_IDX_MASK ((uint16_t)(FIREHOSE_RING_POS_GEN_INC - 1)) -#define FIREHOSE_RING_POS_GEN_MASK ((uint16_t)~FIREHOSE_RING_POS_IDX_MASK) +OS_ENUM(firehose_buffer_pushport, uint8_t, + FIREHOSE_BUFFER_PUSHPORT_MEM, + FIREHOSE_BUFFER_PUSHPORT_IO, + FIREHOSE_BUFFER_NPUSHPORTS, +); /* * Rings are circular buffers with CHUNK_COUNT entries, with 3 important markers @@ -163,13 +198,11 @@ typedef struct firehose_buffer_header_s { uint64_t fbh_uniquepid; pid_t fbh_pid; mach_port_t fbh_logd_port; - mach_port_t volatile fbh_sendp; + mach_port_t volatile fbh_sendp[FIREHOSE_BUFFER_NPUSHPORTS]; mach_port_t fbh_recvp; // past that point fields may be aligned differently between 32 and 64bits #ifndef KERNEL - dispatch_once_t fbh_notifs_pred OS_ALIGNED(64); - dispatch_source_t fbh_notifs_source; dispatch_unfair_lock_s fbh_logd_lock; #define FBH_QUARANTINE_NONE 0 #define FBH_QUARANTINE_PENDING 1 @@ -187,13 +220,14 @@ union firehose_buffer_u { // used to let the compiler pack these values in 1 or 2 registers typedef struct firehose_tracepoint_query_s { + uint64_t stamp; uint16_t pubsize; uint16_t privsize; firehose_stream_t stream; bool is_bank_ok; - bool for_io; - bool quarantined; - uint64_t stamp; + bool for_io : 1; + bool quarantined : 1; + bool reliable : 1; } *firehose_tracepoint_query_t; #ifndef FIREHOSE_SERVER @@ -206,11 +240,14 @@ firehose_tracepoint_t firehose_buffer_tracepoint_reserve_slow(firehose_buffer_t fb, firehose_tracepoint_query_t ask, uint8_t **privptr); +void * +firehose_buffer_get_logging_prefs(firehose_buffer_t fb, size_t *size); + void firehose_buffer_update_limits(firehose_buffer_t fb); void -firehose_buffer_ring_enqueue(firehose_buffer_t fb, uint16_t ref); +firehose_buffer_ring_enqueue(firehose_buffer_t fb, firehose_chunk_ref_t ref); void firehose_buffer_force_connect(firehose_buffer_t fb); diff --git a/src/firehose/firehose_inline_internal.h b/src/firehose/firehose_inline_internal.h index 3939ee25b..64d865439 100644 --- a/src/firehose/firehose_inline_internal.h +++ b/src/firehose/firehose_inline_internal.h @@ -21,25 +21,30 @@ #ifndef __FIREHOSE_INLINE_INTERNAL__ #define __FIREHOSE_INLINE_INTERNAL__ +#ifndef _os_atomic_basetypeof +#define _os_atomic_basetypeof(p) \ + typeof(atomic_load_explicit(_os_atomic_c11_atomic(p), memory_order_relaxed)) +#endif + #define firehose_atomic_maxv2o(p, f, v, o, m) \ os_atomic_rmw_loop2o(p, f, *(o), (v), m, { \ if (*(o) >= (v)) os_atomic_rmw_loop_give_up(break); \ }) #define firehose_atomic_max2o(p, f, v, m) ({ \ - typeof((p)->f) _old; \ + _os_atomic_basetypeof(&(p)->f) _old; \ firehose_atomic_maxv2o(p, f, v, &_old, m); \ }) #ifndef KERNEL // caller must test for non zero first OS_ALWAYS_INLINE -static inline uint16_t +static inline firehose_chunk_ref_t firehose_bitmap_first_set(uint64_t bitmap) { dispatch_assert(bitmap != 0); // this builtin returns 0 if bitmap is 0, or (first bit set + 1) - return (uint16_t)__builtin_ffsll((long long)bitmap) - 1; + return (firehose_chunk_ref_t)__builtin_ffsll((long long)bitmap) - 1; } #endif @@ -49,11 +54,13 @@ firehose_bitmap_first_set(uint64_t bitmap) OS_ALWAYS_INLINE static inline mach_port_t -firehose_mach_port_allocate(uint32_t flags, void *ctx) +firehose_mach_port_allocate(uint32_t flags, mach_port_msgcount_t qlimit, + void *ctx) { mach_port_t port = MACH_PORT_NULL; mach_port_options_t opts = { - .flags = flags, + .flags = flags | MPO_QLIMIT, + .mpl = { .mpl_qlimit = qlimit }, }; kern_return_t kr = mach_port_construct(mach_task_self(), &opts, (mach_port_context_t)ctx, &port); @@ -107,7 +114,8 @@ firehose_mig_server(dispatch_mig_callback_t demux, size_t maxmsgsz, expects_reply = true; } - if (!fastpath(demux(hdr, &msg_reply->Head))) { + msg_reply->Head = (mach_msg_header_t){ }; + if (unlikely(!demux(hdr, &msg_reply->Head))) { rc = MIG_BAD_ID; } else if (msg_reply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) { rc = KERN_SUCCESS; @@ -117,14 +125,14 @@ firehose_mig_server(dispatch_mig_callback_t demux, size_t maxmsgsz, rc = msg_reply->RetCode; } - if (slowpath(rc == KERN_SUCCESS && expects_reply)) { + if (unlikely(rc == KERN_SUCCESS && expects_reply)) { // if crashing here, some handler returned KERN_SUCCESS // hoping for firehose_mig_server to perform the mach_msg() // call to reply, and it doesn't know how to do that DISPATCH_INTERNAL_CRASH(msg_reply->Head.msgh_id, "firehose_mig_server doesn't handle replies"); } - if (slowpath(rc != KERN_SUCCESS && rc != MIG_NO_REPLY)) { + if (unlikely(rc != KERN_SUCCESS && rc != MIG_NO_REPLY)) { // destroy the request - but not the reply port hdr->msgh_remote_port = 0; mach_msg_destroy(hdr); @@ -144,15 +152,15 @@ firehose_buffer_chunk_for_address(void *addr) } OS_ALWAYS_INLINE -static inline uint16_t +static inline firehose_chunk_ref_t firehose_buffer_chunk_to_ref(firehose_buffer_t fb, firehose_chunk_t fbc) { - return (uint16_t)(fbc - fb->fb_chunks); + return (firehose_chunk_ref_t)(fbc - fb->fb_chunks); } OS_ALWAYS_INLINE static inline firehose_chunk_t -firehose_buffer_ref_to_chunk(firehose_buffer_t fb, uint16_t ref) +firehose_buffer_ref_to_chunk(firehose_buffer_t fb, firehose_chunk_ref_t ref) { return fb->fb_chunks + ref; } @@ -160,20 +168,6 @@ firehose_buffer_ref_to_chunk(firehose_buffer_t fb, uint16_t ref) #ifndef FIREHOSE_SERVER #if DISPATCH_PURE_C -OS_ALWAYS_INLINE -static inline uint8_t -firehose_buffer_qos_bits_propagate(void) -{ -#ifndef KERNEL - pthread_priority_t pp = _dispatch_priority_propagate(); - - pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; - return (uint8_t)(pp >> _PTHREAD_PRIORITY_QOS_CLASS_SHIFT); -#else - return 0; -#endif -} - OS_ALWAYS_INLINE static inline void firehose_buffer_stream_flush(firehose_buffer_t fb, firehose_stream_t stream) @@ -182,7 +176,7 @@ firehose_buffer_stream_flush(firehose_buffer_t fb, firehose_stream_t stream) firehose_stream_state_u old_state, new_state; firehose_chunk_t fc; uint64_t stamp = UINT64_MAX; // will cause the reservation to fail - uint16_t ref; + firehose_chunk_ref_t ref; long result; old_state.fss_atomic_state = @@ -198,7 +192,7 @@ firehose_buffer_stream_flush(firehose_buffer_t fb, firehose_stream_t stream) fc = firehose_buffer_ref_to_chunk(fb, old_state.fss_current); result = firehose_chunk_tracepoint_try_reserve(fc, stamp, stream, - firehose_buffer_qos_bits_propagate(), 1, 0, NULL); + 0, 1, 0, NULL); if (likely(result < 0)) { firehose_buffer_ring_enqueue(fb, old_state.fss_current); } @@ -247,6 +241,10 @@ firehose_buffer_stream_flush(firehose_buffer_t fb, firehose_stream_t stream) * @param privptr * The pointer to the private buffer, can be NULL * + * @param reliable + * Whether we should wait for logd or drop the tracepoint in the event that no + * chunk is available. + * * @result * The pointer to the tracepoint. */ @@ -254,17 +252,15 @@ OS_ALWAYS_INLINE static inline firehose_tracepoint_t firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp, firehose_stream_t stream, uint16_t pubsize, - uint16_t privsize, uint8_t **privptr) + uint16_t privsize, uint8_t **privptr, bool reliable) { firehose_buffer_stream_t fbs = &fb->fb_header.fbh_stream[stream]; firehose_stream_state_u old_state, new_state; firehose_chunk_t fc; -#if KERNEL - bool failable = false; -#endif + bool waited = false; bool success; long result; - uint16_t ref; + firehose_chunk_ref_t ref; // cannot use os_atomic_rmw_loop2o, _page_try_reserve does a store old_state.fss_atomic_state = @@ -276,11 +272,10 @@ firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp, if (likely(ref && ref != FIREHOSE_STREAM_STATE_PRISTINE)) { fc = firehose_buffer_ref_to_chunk(fb, ref); result = firehose_chunk_tracepoint_try_reserve(fc, stamp, stream, - firehose_buffer_qos_bits_propagate(), - pubsize, privsize, privptr); + 0, pubsize, privsize, privptr); if (likely(result > 0)) { uint64_t thread; -#ifdef KERNEL +#if KERNEL thread = thread_tid(current_thread()); #else thread = _pthread_threadid_self_np_direct(); @@ -293,28 +288,70 @@ firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp, } new_state.fss_current = 0; } -#if KERNEL - if (failable) { - return NULL; - } + + if (!reliable && ((waited && old_state.fss_timestamped) +#ifndef KERNEL + || old_state.fss_waiting_for_logd +#endif + )) { + new_state.fss_loss = + MIN(old_state.fss_loss + 1, FIREHOSE_LOSS_COUNT_MAX); + + success = os_atomic_cmpxchgv2o(fbs, fbs_state.fss_atomic_state, + old_state.fss_atomic_state, new_state.fss_atomic_state, + &old_state.fss_atomic_state, relaxed); + if (success) { +#ifndef KERNEL + _dispatch_trace_firehose_reserver_gave_up(stream, ref, waited, + old_state.fss_atomic_state, new_state.fss_atomic_state); #endif + return NULL; + } else { + continue; + } + } if (unlikely(old_state.fss_allocator)) { - _dispatch_gate_wait(&fbs->fbs_state.fss_gate, +#if KERNEL + _dispatch_firehose_gate_wait(&fbs->fbs_state.fss_gate, DLOCK_LOCK_DATA_CONTENTION); + waited = true; + old_state.fss_atomic_state = os_atomic_load2o(fbs, fbs_state.fss_atomic_state, relaxed); -#if KERNEL - failable = true; +#else + if (likely(reliable)) { + new_state.fss_allocator |= FIREHOSE_GATE_RELIABLE_WAITERS_BIT; + } else { + new_state.fss_allocator |= FIREHOSE_GATE_UNRELIABLE_WAITERS_BIT; + } + + bool already_equal = (new_state.fss_atomic_state == + old_state.fss_atomic_state); + success = already_equal || os_atomic_cmpxchgv2o(fbs, + fbs_state.fss_atomic_state, old_state.fss_atomic_state, + new_state.fss_atomic_state, &old_state.fss_atomic_state, + relaxed); + if (success) { + _dispatch_trace_firehose_reserver_wait(stream, ref, waited, + old_state.fss_atomic_state, new_state.fss_atomic_state, + reliable); + _dispatch_firehose_gate_wait(&fbs->fbs_state.fss_gate, + new_state.fss_allocator, + DLOCK_LOCK_DATA_CONTENTION); + waited = true; + + old_state.fss_atomic_state = os_atomic_load2o(fbs, + fbs_state.fss_atomic_state, relaxed); + } #endif continue; } - // if the thread doing the allocation is a low priority one - // we may starve high priority ones. - // so disable preemption before we become an allocator - // the reenabling of the preemption is in - // firehose_buffer_stream_chunk_install + // if the thread doing the allocation is of low priority we may starve + // threads of higher priority, so disable pre-emption before becoming + // the allocator (it is re-enabled in + // firehose_buffer_stream_chunk_install()) __firehose_critical_region_enter(); #if KERNEL new_state.fss_allocator = (uint32_t)cpu_number(); @@ -331,6 +368,7 @@ firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp, } struct firehose_tracepoint_query_s ask = { + .stamp = stamp, .pubsize = pubsize, .privsize = privsize, .stream = stream, @@ -338,8 +376,15 @@ firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp, #ifndef KERNEL .quarantined = fb->fb_header.fbh_quarantined, #endif - .stamp = stamp, + .reliable = reliable, }; + +#ifndef KERNEL + _dispatch_trace_firehose_allocator(((uint64_t *)&ask)[0], + ((uint64_t *)&ask)[1], old_state.fss_atomic_state, + new_state.fss_atomic_state); +#endif + return firehose_buffer_tracepoint_reserve_slow(fb, &ask, privptr); } @@ -379,7 +424,81 @@ firehose_buffer_tracepoint_flush(firehose_buffer_t fb, } } +OS_ALWAYS_INLINE +static inline bool +firehose_buffer_bank_try_reserve_slot(firehose_buffer_t fb, bool for_io, + firehose_bank_state_u *state_in_out) +{ + bool success; + firehose_buffer_bank_t fbb = &fb->fb_header.fbh_bank; + + firehose_bank_state_u old_state = *state_in_out, new_state; + do { + if (unlikely(!old_state.fbs_banks[for_io])) { + return false; + } + new_state = old_state; + new_state.fbs_banks[for_io]--; + + success = os_atomic_cmpxchgvw(&fbb->fbb_state.fbs_atomic_state, + old_state.fbs_atomic_state, new_state.fbs_atomic_state, + &old_state.fbs_atomic_state, acquire); + } while (unlikely(!success)); + + *state_in_out = new_state; + return true; +} + #ifndef KERNEL +OS_ALWAYS_INLINE +static inline void +firehose_buffer_stream_signal_waiting_for_logd(firehose_buffer_t fb, + firehose_stream_t stream) +{ + firehose_stream_state_u state, new_state; + firehose_buffer_stream_t fbs = &fb->fb_header.fbh_stream[stream]; + + state.fss_atomic_state = + os_atomic_load2o(fbs, fbs_state.fss_atomic_state, relaxed); + if (!state.fss_timestamped) { + fbs->fbs_loss_start = mach_continuous_time(); + + // release to publish the timestamp + os_atomic_rmw_loop2o(fbs, fbs_state.fss_atomic_state, + state.fss_atomic_state, new_state.fss_atomic_state, + release, { + new_state = (firehose_stream_state_u){ + .fss_allocator = (state.fss_allocator & + ~FIREHOSE_GATE_UNRELIABLE_WAITERS_BIT), + .fss_loss = state.fss_loss, + .fss_timestamped = true, + .fss_waiting_for_logd = true, + .fss_generation = state.fss_generation, + }; + }); + } else { + os_atomic_rmw_loop2o(fbs, fbs_state.fss_atomic_state, + state.fss_atomic_state, new_state.fss_atomic_state, + relaxed, { + new_state = (firehose_stream_state_u){ + .fss_allocator = (state.fss_allocator & + ~FIREHOSE_GATE_UNRELIABLE_WAITERS_BIT), + .fss_loss = state.fss_loss, + .fss_timestamped = true, + .fss_waiting_for_logd = true, + .fss_generation = state.fss_generation, + }; + }); + } + + _dispatch_trace_firehose_wait_for_logd(stream, fbs->fbs_loss_start, + state.fss_atomic_state, new_state.fss_atomic_state); + if (unlikely(state.fss_allocator & FIREHOSE_GATE_UNRELIABLE_WAITERS_BIT)) { + _dispatch_gate_broadcast_slow(&fbs->fbs_state.fss_gate, + state.fss_gate.dgl_lock); + } +} + OS_ALWAYS_INLINE static inline void firehose_buffer_clear_bank_flags(firehose_buffer_t fb, unsigned long bits) @@ -405,6 +524,15 @@ firehose_buffer_set_bank_flags(firehose_buffer_t fb, unsigned long bits) firehose_buffer_update_limits(fb); } } + +OS_ALWAYS_INLINE +static inline void +firehose_buffer_bank_relinquish_slot(firehose_buffer_t fb, bool for_io) +{ + firehose_buffer_bank_t fbb = &fb->fb_header.fbh_bank; + os_atomic_add2o(fbb, fbb_state.fbs_atomic_state, FIREHOSE_BANK_INC(for_io), + relaxed); +} #endif // !KERNEL #endif // !defined(FIREHOSE_SERVER) diff --git a/src/firehose/firehose_reply.defs b/src/firehose/firehose_reply.defs index c08054516..1320ea2fe 100644 --- a/src/firehose/firehose_reply.defs +++ b/src/firehose/firehose_reply.defs @@ -32,7 +32,7 @@ skip; // firehose_register simpleroutine push_reply( RequestPort req_port : mach_port_move_send_once_t; -in rtc : kern_return_t; +in ReturnCode : kern_return_t; in push_reply : firehose_push_reply_t; in quarantined : boolean_t ); @@ -43,3 +43,10 @@ in push_reply : firehose_push_reply_t; in quarantined : boolean_t; WaitTime timeout : natural_t ); + +simpleroutine get_logging_prefs_reply( +RequestPort req_port : mach_port_move_send_once_t; +// no ReturnCode for complex messages +in mem_port : mach_port_t; +in mem_size : mach_vm_size_t +); diff --git a/src/firehose/firehose_server.c b/src/firehose/firehose_server.c index ba335dbe3..e870757e0 100644 --- a/src/firehose/firehose_server.c +++ b/src/firehose/firehose_server.c @@ -39,15 +39,18 @@ typedef struct fs_client_queue_s { static struct firehose_server_s { mach_port_t fs_bootstrap_port; dispatch_mach_t fs_mach_channel; - dispatch_queue_t fs_ipc_queue; dispatch_queue_t fs_snapshot_gate_queue; dispatch_queue_t fs_io_drain_queue; dispatch_queue_t fs_mem_drain_queue; firehose_handler_t fs_handler; firehose_snapshot_t fs_snapshot; - int fs_kernel_fd; firehose_client_t fs_kernel_client; + int fs_kernel_fd; + + mach_port_t fs_prefs_cache_entry; + size_t fs_prefs_cache_size; + void *fs_prefs_cache; TAILQ_HEAD(, firehose_client_s) fs_clients; os_unfair_lock fs_clients_lock; @@ -74,13 +77,15 @@ fs_clients_unlock(void) os_unfair_lock_unlock(&server_config.fs_clients_lock); } -static void firehose_server_demux(firehose_client_t fc, - mach_msg_header_t *msg_hdr); static void firehose_client_cancel(firehose_client_t fc); static void firehose_client_snapshot_finish(firehose_client_t fc, firehose_snapshot_t snapshot, bool for_io); static void firehose_client_handle_death(void *ctxt); +static const struct mig_subsystem *const firehose_subsystems[] = { + (mig_subsystem_t)&firehose_server_firehose_subsystem, +}; + #pragma mark - #pragma mark firehose client enqueueing @@ -121,22 +126,18 @@ fs_source(bool quarantined, bool for_io) OS_ALWAYS_INLINE static inline void -firehose_client_push(firehose_client_t fc, pthread_priority_t pp, - bool quarantined, bool for_io) +firehose_client_push(firehose_client_t fc, bool quarantined, bool for_io) { fs_client_queue_t queue = fs_queue(quarantined, for_io); - if (fc && os_mpsc_push_update_tail(queue, fs_client, fc, fc_next[for_io])) { - os_mpsc_push_update_head(queue, fs_client, fc); - _dispatch_source_merge_data(fs_source(quarantined, for_io), pp, 1); - } else if (pp) { - _dispatch_source_merge_data(fs_source(quarantined, for_io), pp, 1); + if (fc && os_mpsc_push_item(os_mpsc(queue, fs_client), + fc, fc_next[for_io])) { + dispatch_source_merge_data(fs_source(quarantined, for_io), 1); } } OS_ALWAYS_INLINE static inline bool -firehose_client_wakeup(firehose_client_t fc, pthread_priority_t pp, - bool for_io) +firehose_client_wakeup(firehose_client_t fc, bool for_io) { uintptr_t canceled_bit = FC_STATE_CANCELED(for_io); uintptr_t enqueued_bit = FC_STATE_ENQUEUED(for_io); @@ -151,7 +152,7 @@ firehose_client_wakeup(firehose_client_t fc, pthread_priority_t pp, } new_state = old_state | enqueued_bit; }); - firehose_client_push(old_state & enqueued_bit ? NULL : fc, pp, + firehose_client_push(old_state & enqueued_bit ? NULL : fc, fc->fc_quarantined, for_io); return true; } @@ -171,7 +172,7 @@ firehose_client_start_cancel(firehose_client_t fc, bool for_io) } new_state = old_state | enqueued_bit | canceling_bit; }); - firehose_client_push(old_state & enqueued_bit ? NULL : fc, 0, + firehose_client_push(old_state & enqueued_bit ? NULL : fc, fc->fc_quarantined, for_io); } @@ -254,7 +255,6 @@ firehose_client_mark_corrupted(firehose_client_t fc, mach_port_t reply_port) { // this client is really confused, do *not* answer to asyncs anymore fc->fc_memory_corrupted = true; - fc->fc_use_notifs = false; // XXX: do not cancel the data sources or a corrupted client could // prevent snapshots from being taken if unlucky with ordering @@ -292,7 +292,8 @@ firehose_client_drain_one(firehose_client_t fc, mach_port_t port, uint32_t flags firehose_chunk_t fbc; firehose_event_t evt; uint16_t volatile *fbh_ring; - uint16_t flushed, ref, count = 0; + uint16_t flushed, count = 0; + firehose_chunk_ref_t ref; uint16_t client_head, client_flushed, sent_flushed; firehose_snapshot_t snapshot = NULL; bool for_io = (flags & FIREHOSE_DRAIN_FOR_IO); @@ -315,7 +316,7 @@ firehose_client_drain_one(firehose_client_t fc, mach_port_t port, uint32_t flags if (fc->fc_needs_mem_snapshot) snapshot = server_config.fs_snapshot; } - if (slowpath(fc->fc_memory_corrupted)) { + if (unlikely(fc->fc_memory_corrupted)) { goto corrupt; } @@ -335,7 +336,7 @@ firehose_client_drain_one(firehose_client_t fc, mach_port_t port, uint32_t flags // see firehose_buffer_ring_enqueue do { ref = (flushed + count) & FIREHOSE_RING_POS_IDX_MASK; - ref = os_atomic_load(&fbh_ring[ref], relaxed); + ref = (firehose_chunk_ref_t)os_atomic_load(&fbh_ring[ref], relaxed); ref &= FIREHOSE_RING_POS_IDX_MASK; } while (!fc->fc_pid && !ref); count++; @@ -350,7 +351,7 @@ firehose_client_drain_one(firehose_client_t fc, mach_port_t port, uint32_t flags os_unfair_lock_lock(&fc->fc_lock); } server_config.fs_handler(fc, evt, fbc); - if (slowpath(snapshot)) { + if (unlikely(snapshot)) { snapshot->handler(fc, evt, fbc); } if (fbc->fc_pos.fcp_stream == firehose_stream_metadata) { @@ -358,7 +359,7 @@ firehose_client_drain_one(firehose_client_t fc, mach_port_t port, uint32_t flags } // clients not using notifications (single threaded) always drain fully // because they use all their limit, always - } while (!fc->fc_use_notifs || count < DRAIN_BATCH_SIZE || snapshot); + } while (count < DRAIN_BATCH_SIZE || snapshot); if (count) { // we don't load the full fbh_ring_tail because that is a 64bit quantity @@ -376,12 +377,12 @@ firehose_client_drain_one(firehose_client_t fc, mach_port_t port, uint32_t flags if (!fc->fc_pid) { // will fire firehose_client_notify() because port is MACH_PORT_DEAD port = fc->fc_sendp; - } else if (!port && client_flushed == sent_flushed && fc->fc_use_notifs) { + } else if (!port && client_flushed == sent_flushed) { port = fc->fc_sendp; } } - if (slowpath(snapshot)) { + if (unlikely(snapshot)) { firehose_client_snapshot_finish(fc, snapshot, for_io); firehose_client_snapshot_mark_done(fc, snapshot, for_io); } @@ -394,12 +395,12 @@ firehose_client_drain_one(firehose_client_t fc, mach_port_t port, uint32_t flags dispatch_resume(fc->fc_kernel_source); } } else { - if (fc->fc_use_notifs && count >= DRAIN_BATCH_SIZE) { + if (count >= DRAIN_BATCH_SIZE) { // if we hit the drain batch size, the client probably logs a lot // and there's more to drain, so optimistically schedule draining // again this is cheap since the queue is hot, and is fair for other // clients - firehose_client_wakeup(fc, 0, for_io); + firehose_client_wakeup(fc, for_io); } if (count && server_config.fs_kernel_client) { // the kernel is special because it can drop messages, so if we're @@ -433,9 +434,10 @@ firehose_client_drain(void *ctxt) size_t clients = 0; while (queue->fs_client_tail) { - fc = os_mpsc_get_head(queue, fs_client); + fc = os_mpsc_get_head(os_mpsc(queue, fs_client)); do { - fc_next = os_mpsc_pop_head(queue, fs_client, fc, fc_next[for_io]); + fc_next = os_mpsc_pop_head(os_mpsc(queue, fs_client), + fc, fc_next[for_io]); if (firehose_client_dequeue(fc, for_io)) { firehose_client_drain_one(fc, MACH_PORT_NULL, for_io ? FIREHOSE_DRAIN_FOR_IO : 0); @@ -481,8 +483,10 @@ firehose_client_finalize(firehose_client_t fc OS_OBJECT_CONSUMED) TAILQ_REMOVE(&server_config.fs_clients, fc, fc_entry); fs_clients_unlock(); - dispatch_release(fc->fc_mach_channel); - fc->fc_mach_channel = NULL; + for (int i = 0; i < FIREHOSE_BUFFER_NPUSHPORTS; i++) { + dispatch_release(fc->fc_mach_channel[i]); + fc->fc_mach_channel[i] = NULL; + } fc->fc_entry.tqe_next = DISPATCH_OBJECT_LISTLESS; fc->fc_entry.tqe_prev = DISPATCH_OBJECT_LISTLESS; _os_object_release(&fc->fc_as_os_object); @@ -528,7 +532,8 @@ firehose_client_handle_death(void *ctxt) // remove the pages that we flushed already from the bitmap for (; tail != flushed; tail++) { uint16_t ring_pos = tail & FIREHOSE_RING_POS_IDX_MASK; - uint16_t ref = fbh_ring[ring_pos] & FIREHOSE_RING_POS_IDX_MASK; + firehose_chunk_ref_t ref = + fbh_ring[ring_pos] & FIREHOSE_RING_POS_IDX_MASK; bitmap &= ~(1ULL << ref); } @@ -538,7 +543,7 @@ firehose_client_handle_death(void *ctxt) // Then look at all the allocated pages not seen in the ring while (bitmap) { - uint16_t ref = firehose_bitmap_first_set(bitmap); + firehose_chunk_ref_t ref = firehose_bitmap_first_set(bitmap); firehose_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); uint16_t fbc_length = fbc->fc_pos.fcp_next_entry_offs; @@ -571,7 +576,7 @@ firehose_client_handle_death(void *ctxt) uint64_t mem_bitmap_copy = mem_bitmap; while (mem_bitmap_copy) { - uint16_t ref = firehose_bitmap_first_set(mem_bitmap_copy); + firehose_chunk_ref_t ref = firehose_bitmap_first_set(mem_bitmap_copy); firehose_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); mem_bitmap_copy &= ~(1ULL << ref); @@ -596,44 +601,54 @@ firehose_client_handle_mach_event(void *ctx, dispatch_mach_reason_t reason, switch (reason) { case DISPATCH_MACH_MESSAGE_RECEIVED: + if (dispatch_mach_mig_demux(fc, firehose_subsystems, + countof(firehose_subsystems), dmsg)) { + break; + } + msg_hdr = dispatch_mach_msg_get_msg(dmsg, NULL); if (msg_hdr->msgh_id == MACH_NOTIFY_NO_SENDERS) { _dispatch_debug("FIREHOSE NO_SENDERS (unique_pid: 0x%llx)", firehose_client_get_unique_pid(fc, NULL)); - dispatch_mach_cancel(fc->fc_mach_channel); - } else { - firehose_server_demux(fc, msg_hdr); + for (int i = 0; i < FIREHOSE_BUFFER_NPUSHPORTS; i++) { + dispatch_mach_cancel(fc->fc_mach_channel[i]); + } } + mach_msg_destroy(msg_hdr); break; case DISPATCH_MACH_DISCONNECTED: msg_hdr = dispatch_mach_msg_get_msg(dmsg, NULL); - port = msg_hdr->msgh_remote_port; - if (MACH_PORT_VALID(port)) { - if (port != fc->fc_sendp) { - DISPATCH_INTERNAL_CRASH(port, "Unknown send-right"); - } - firehose_mach_port_send_release(fc->fc_sendp); - fc->fc_sendp = MACH_PORT_NULL; - } port = msg_hdr->msgh_local_port; if (MACH_PORT_VALID(port)) { - if (port != fc->fc_recvp) { + int i; + for (i = 0; i < FIREHOSE_BUFFER_NPUSHPORTS; i++) { + if (fc->fc_recvp[i] == port) { + break; + } + } + if (i == FIREHOSE_BUFFER_NPUSHPORTS) { DISPATCH_INTERNAL_CRASH(port, "Unknown recv-right"); } - firehose_mach_port_recv_dispose(fc->fc_recvp, fc); - fc->fc_recvp = MACH_PORT_NULL; + firehose_mach_port_recv_dispose(fc->fc_recvp[i], &fc->fc_recvp[i]); + fc->fc_recvp[i] = MACH_PORT_NULL; } break; case DISPATCH_MACH_CANCELED: - if (MACH_PORT_VALID(fc->fc_sendp)) { - DISPATCH_INTERNAL_CRASH(fc->fc_sendp, "send-right leak"); - } - if (MACH_PORT_VALID(fc->fc_recvp)) { - DISPATCH_INTERNAL_CRASH(fc->fc_recvp, "recv-right leak"); + if (!_os_atomic_refcnt_sub2o(fc, fc_mach_channel_refcnt, 1)) { + _os_atomic_refcnt_dispose_barrier2o(fc, fc_mach_channel_refcnt); + + firehose_mach_port_send_release(fc->fc_sendp); + fc->fc_sendp = MACH_PORT_NULL; + for (int i = 0; i < FIREHOSE_BUFFER_NPUSHPORTS; i++) { + if (MACH_PORT_VALID(fc->fc_recvp[i])) { + DISPATCH_INTERNAL_CRASH(fc->fc_recvp[i], "recv-right leak"); + } + } + + firehose_client_cancel(fc); } - firehose_client_cancel(fc); break; } } @@ -647,8 +662,8 @@ firehose_client_kernel_source_handle_event(void *ctxt) // resumed in firehose_client_drain for both memory and I/O dispatch_suspend(fc->fc_kernel_source); dispatch_suspend(fc->fc_kernel_source); - firehose_client_wakeup(fc, 0, false); - firehose_client_wakeup(fc, 0, true); + firehose_client_wakeup(fc, false); + firehose_client_wakeup(fc, true); } #endif @@ -656,7 +671,7 @@ static inline void firehose_client_resume(firehose_client_t fc, const struct firehose_client_connected_info_s *fcci) { - dispatch_assert_queue(server_config.fs_io_drain_queue); + dispatch_assert_queue(server_config.fs_mem_drain_queue); fs_clients_lock(); TAILQ_INSERT_TAIL(&server_config.fs_clients, fc, fc_entry); @@ -666,8 +681,10 @@ firehose_client_resume(firehose_client_t fc, if (!fc->fc_pid) { dispatch_activate(fc->fc_kernel_source); } else { - dispatch_mach_connect(fc->fc_mach_channel, - fc->fc_recvp, fc->fc_sendp, NULL); + for (int i = 0; i < FIREHOSE_BUFFER_NPUSHPORTS; i++) { + dispatch_mach_connect(fc->fc_mach_channel[i], + fc->fc_recvp[i], MACH_PORT_NULL, NULL); + } } } @@ -677,15 +694,10 @@ firehose_client_cancel(firehose_client_t fc) _dispatch_debug("client died (unique_pid: 0x%llx", firehose_client_get_unique_pid(fc, NULL)); - if (MACH_PORT_VALID(fc->fc_sendp)) { - firehose_mach_port_send_release(fc->fc_sendp); - fc->fc_sendp = MACH_PORT_NULL; - } - if (MACH_PORT_VALID(fc->fc_recvp)) { - firehose_mach_port_recv_dispose(fc->fc_recvp, fc); - fc->fc_recvp = MACH_PORT_NULL; + dispatch_assert(fc->fc_sendp == MACH_PORT_NULL); + for (int i = 0; i < FIREHOSE_BUFFER_NPUSHPORTS; i++) { + dispatch_assert(fc->fc_recvp[i] == MACH_PORT_NULL); } - fc->fc_use_notifs = false; firehose_client_start_cancel(fc, false); firehose_client_start_cancel(fc, true); } @@ -720,7 +732,8 @@ typedef struct firehose_token_s { static firehose_client_t firehose_client_create(firehose_buffer_t fb, firehose_token_t token, - mach_port_t comm_recvp, mach_port_t comm_sendp) + mach_port_t comm_mem_recvp, mach_port_t comm_io_recvp, + mach_port_t comm_sendp) { uint64_t unique_pid = fb->fb_header.fbh_uniquepid; firehose_client_t fc = _firehose_client_create(fb); @@ -731,13 +744,21 @@ firehose_client_create(firehose_buffer_t fb, firehose_token_t token, fc->fc_pidversion = token->execcnt; _dispatch_debug("FIREHOSE_REGISTER (unique_pid: 0x%llx)", unique_pid); - fc->fc_recvp = comm_recvp; + mach_port_t recvp[] = { comm_mem_recvp, comm_io_recvp }; + dispatch_queue_t fsqs[] = { + server_config.fs_mem_drain_queue, + server_config.fs_io_drain_queue + }; + fc->fc_mach_channel_refcnt = FIREHOSE_BUFFER_NPUSHPORTS; + for (int i = 0; i < FIREHOSE_BUFFER_NPUSHPORTS; i++) { + fc->fc_recvp[i] = recvp[i]; + firehose_mach_port_guard(fc->fc_recvp[i], true, &fc->fc_recvp[i]); + dm = dispatch_mach_create_f("com.apple.firehose.peer", fsqs[i], fc, + firehose_client_handle_mach_event); + fc->fc_mach_channel[i] = dm; + } + fc->fc_sendp = comm_sendp; - firehose_mach_port_guard(comm_recvp, true, fc); - dm = dispatch_mach_create_f("com.apple.firehose.peer", - server_config.fs_ipc_queue, - fc, firehose_client_handle_mach_event); - fc->fc_mach_channel = dm; return fc; } @@ -767,19 +788,19 @@ firehose_kernel_client_create(void) } DISPATCH_INTERNAL_CRASH(errno, "Unable to map kernel buffer"); } - if (fb_map.fbmi_size != - FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT * FIREHOSE_CHUNK_SIZE) { + if ((fb_map.fbmi_size < FIREHOSE_BUFFER_KERNEL_MIN_CHUNK_COUNT * FIREHOSE_CHUNK_SIZE) || + (fb_map.fbmi_size > FIREHOSE_BUFFER_KERNEL_MAX_CHUNK_COUNT * FIREHOSE_CHUNK_SIZE)) { DISPATCH_INTERNAL_CRASH(fb_map.fbmi_size, "Unexpected kernel buffer size"); } fc = _firehose_client_create((firehose_buffer_t)(uintptr_t)fb_map.fbmi_addr); ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, (uintptr_t)fd, 0, - fs->fs_ipc_queue); + fs->fs_mem_drain_queue); + dispatch_set_qos_class_floor(ds, QOS_CLASS_USER_INITIATED, 0); dispatch_set_context(ds, fc); dispatch_source_set_event_handler_f(ds, firehose_client_kernel_source_handle_event); fc->fc_kernel_source = ds; - fc->fc_use_notifs = true; fc->fc_sendp = MACH_PORT_DEAD; // causes drain() to call notify fs->fs_kernel_fd = fd; @@ -854,24 +875,16 @@ firehose_client_initiate_quarantine(firehose_client_t fc) #pragma mark - #pragma mark firehose server -/* - * The current_message context stores the client info for the current message - * being handled. The only reason this works is because currently the message - * processing is serial. If that changes, this would not work. - */ -static firehose_client_t cur_client_info; - static void -firehose_server_handle_mach_event(void *ctx OS_UNUSED, +firehose_server_handle_mach_event(void *ctx, dispatch_mach_reason_t reason, dispatch_mach_msg_t dmsg, mach_error_t error OS_UNUSED) { - mach_msg_header_t *msg_hdr = NULL; - if (reason == DISPATCH_MACH_MESSAGE_RECEIVED) { - msg_hdr = dispatch_mach_msg_get_msg(dmsg, NULL); - /* TODO: Assert this should be a register message */ - firehose_server_demux(NULL, msg_hdr); + if (!dispatch_mach_mig_demux(ctx, firehose_subsystems, + countof(firehose_subsystems), dmsg)) { + mach_msg_destroy(dispatch_mach_msg_get_msg(dmsg, NULL)); + } } } @@ -880,26 +893,32 @@ firehose_server_init(mach_port_t comm_port, firehose_handler_t handler) { struct firehose_server_s *fs = &server_config; dispatch_queue_attr_t attr = DISPATCH_QUEUE_SERIAL_WITH_AUTORELEASE_POOL; - dispatch_queue_attr_t attr_ui; + dispatch_queue_attr_t attr_inactive, attr_utility_inactive; dispatch_mach_t dm; dispatch_source_t ds; // just reference the string so that it's captured (void)os_atomic_load(&__libfirehose_serverVersionString[0], relaxed); - attr_ui = dispatch_queue_attr_make_with_qos_class(attr, - QOS_CLASS_USER_INITIATED, 0); - fs->fs_ipc_queue = dispatch_queue_create_with_target( - "com.apple.firehose.ipc", attr_ui, NULL); fs->fs_snapshot_gate_queue = dispatch_queue_create_with_target( "com.apple.firehose.snapshot-gate", attr, NULL); + + attr_inactive = dispatch_queue_attr_make_initially_inactive(attr); + attr_utility_inactive = dispatch_queue_attr_make_with_qos_class( + attr_inactive, QOS_CLASS_UTILITY, 0); + fs->fs_io_drain_queue = dispatch_queue_create_with_target( - "com.apple.firehose.drain-io", attr, NULL); + "com.apple.firehose.drain-io", attr_utility_inactive, NULL); + dispatch_set_qos_class_fallback(fs->fs_io_drain_queue, QOS_CLASS_UTILITY); + dispatch_activate(fs->fs_io_drain_queue); + fs->fs_mem_drain_queue = dispatch_queue_create_with_target( - "com.apple.firehose.drain-mem", attr, NULL); + "com.apple.firehose.drain-mem", attr_inactive, NULL); + dispatch_set_qos_class_fallback(fs->fs_mem_drain_queue, QOS_CLASS_UTILITY); + dispatch_activate(fs->fs_mem_drain_queue); dm = dispatch_mach_create_f("com.apple.firehose.listener", - fs->fs_ipc_queue, NULL, firehose_server_handle_mach_event); + fs->fs_mem_drain_queue, NULL, firehose_server_handle_mach_event); fs->fs_bootstrap_port = comm_port; fs->fs_mach_channel = dm; fs->fs_handler = _Block_copy(handler); @@ -947,7 +966,7 @@ firehose_server_resume(void) struct firehose_server_s *fs = &server_config; if (fs->fs_kernel_client) { - dispatch_async(fs->fs_io_drain_queue, ^{ + dispatch_async(fs->fs_mem_drain_queue, ^{ struct firehose_client_connected_info_s fcci = { .fcci_version = FIREHOSE_CLIENT_CONNECTED_INFO_VERSION, }; @@ -970,11 +989,46 @@ firehose_server_cancel(void) fs_clients_lock(); TAILQ_FOREACH(fc, &server_config.fs_clients, fc_entry) { - dispatch_mach_cancel(fc->fc_mach_channel); + if (fc->fc_pid) { + for (int i = 0; i < FIREHOSE_BUFFER_NPUSHPORTS; i++) { + dispatch_mach_cancel(fc->fc_mach_channel[i]); + } + } } fs_clients_unlock(); } +void +firehose_server_set_logging_prefs(void *pointer, size_t length, os_block_t block) +{ + dispatch_async(server_config.fs_mem_drain_queue, ^{ + kern_return_t kr; + memory_object_size_t size = (memory_object_size_t)length; + if (server_config.fs_prefs_cache_entry) { + kr = mach_port_deallocate(mach_task_self(), + server_config.fs_prefs_cache_entry); + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + } + if (server_config.fs_prefs_cache) { + munmap(server_config.fs_prefs_cache, + server_config.fs_prefs_cache_size); + } + + server_config.fs_prefs_cache = pointer; + server_config.fs_prefs_cache_size = length; + server_config.fs_prefs_cache_entry = MACH_PORT_NULL; + if (pointer) { + kr = mach_make_memory_entry_64(mach_task_self(), &size, + (mach_vm_address_t)pointer, VM_PROT_READ | MAP_MEM_VM_SHARE, + &server_config.fs_prefs_cache_entry, MACH_PORT_NULL); + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + } + if (block) block(); + }); +} + dispatch_queue_t firehose_server_copy_queue(firehose_server_queue_t which) { @@ -1040,7 +1094,7 @@ firehose_client_metadata_stream_peek(firehose_client_t fc, uint64_t bitmap = fbh->fbh_bank.fbb_metadata_bitmap; while (bitmap) { - uint16_t ref = firehose_bitmap_first_set(bitmap); + firehose_chunk_ref_t ref = firehose_bitmap_first_set(bitmap); firehose_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); uint16_t fbc_length = fbc->fc_pos.fcp_next_entry_offs; @@ -1100,7 +1154,7 @@ firehose_client_snapshot_finish(firehose_client_t fc, // remove the pages that we flushed already from the bitmap for (; tail != flushed; tail++) { uint16_t idx = tail & FIREHOSE_RING_POS_IDX_MASK; - uint16_t ref = fbh_ring[idx] & FIREHOSE_RING_POS_IDX_MASK; + firehose_chunk_ref_t ref = fbh_ring[idx] & FIREHOSE_RING_POS_IDX_MASK; bitmap &= ~(1ULL << ref); } @@ -1113,7 +1167,7 @@ firehose_client_snapshot_finish(firehose_client_t fc, // Then look at all the allocated pages not seen in the ring while (bitmap) { - uint16_t ref = firehose_bitmap_first_set(bitmap); + firehose_chunk_ref_t ref = firehose_bitmap_first_set(bitmap); firehose_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); uint16_t fbc_length = fbc->fc_pos.fcp_next_entry_offs; @@ -1139,18 +1193,18 @@ static void firehose_snapshot_tickle_clients(firehose_snapshot_t fs, bool for_io) { firehose_client_t fc; - long n = 0; + uint32_t n = 0; fs_clients_lock(); TAILQ_FOREACH(fc, &server_config.fs_clients, fc_entry) { - if (slowpath(fc->fc_memory_corrupted)) { + if (unlikely(fc->fc_memory_corrupted)) { continue; } if (!fc->fc_pid) { #if TARGET_OS_SIMULATOR continue; #endif - } else if (!firehose_client_wakeup(fc, 0, for_io)) { + } else if (!firehose_client_wakeup(fc, for_io)) { continue; } n++; @@ -1164,7 +1218,10 @@ firehose_snapshot_tickle_clients(firehose_snapshot_t fs, bool for_io) // cheating: equivalent to dispatch_group_enter() n times // without the acquire barriers that we don't need - if (n) os_atomic_add2o(fs->fs_group, dg_value, n, relaxed); + if (n) { + os_atomic_sub2o(fs->fs_group, dg_bits, + n * DISPATCH_GROUP_VALUE_INTERVAL, relaxed); + } } static void @@ -1237,7 +1294,8 @@ firehose_snapshot(firehose_snapshot_handler_t handler) kern_return_t firehose_server_register(mach_port_t server_port OS_UNUSED, mach_port_t mem_port, mach_vm_size_t mem_size, - mach_port_t comm_recvp, mach_port_t comm_sendp, + mach_port_t comm_mem_recvp, mach_port_t comm_io_recvp, + mach_port_t comm_sendp, mach_port_t extra_info_port, mach_vm_size_t extra_info_size, audit_token_t atoken) { @@ -1253,12 +1311,12 @@ firehose_server_register(mach_port_t server_port OS_UNUSED, } /* - * Request a MACH_NOTIFY_NO_SENDERS notification for recvp. That should - * indicate the client going away. + * Request a MACH_NOTIFY_NO_SENDERS notification for the mem_recvp. That + * should indicate the client going away. */ mach_port_t previous = MACH_PORT_NULL; - kr = mach_port_request_notification(mach_task_self(), comm_recvp, - MACH_NOTIFY_NO_SENDERS, 0, comm_recvp, + kr = mach_port_request_notification(mach_task_self(), comm_mem_recvp, + MACH_NOTIFY_NO_SENDERS, 0, comm_mem_recvp, MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous); DISPATCH_VERIFY_MIG(kr); if (dispatch_assume_zero(kr)) { @@ -1275,93 +1333,87 @@ firehose_server_register(mach_port_t server_port OS_UNUSED, return KERN_NO_SPACE; } - if (extra_info_port && extra_info_size) { - mach_vm_address_t addr = 0; - kr = mach_vm_map(mach_task_self(), &addr, extra_info_size, 0, - VM_FLAGS_ANYWHERE, extra_info_port, 0, FALSE, - VM_PROT_READ, VM_PROT_READ, VM_INHERIT_NONE); - if (dispatch_assume_zero(kr)) { - mach_vm_deallocate(mach_task_self(), base_addr, mem_size); - return KERN_NO_SPACE; + if (extra_info_port) { + if (extra_info_size) { + mach_vm_address_t addr = 0; + kr = mach_vm_map(mach_task_self(), &addr, extra_info_size, 0, + VM_FLAGS_ANYWHERE, extra_info_port, 0, TRUE, + VM_PROT_READ, VM_PROT_READ, VM_INHERIT_NONE); + if (dispatch_assume_zero(kr)) { + mach_vm_deallocate(mach_task_self(), base_addr, mem_size); + return KERN_NO_SPACE; + } + fcci.fcci_data = (void *)(uintptr_t)addr; + fcci.fcci_size = (size_t)extra_info_size; } - fcci.fcci_data = (void *)(uintptr_t)addr; - fcci.fcci_size = (size_t)extra_info_size; + firehose_mach_port_send_release(extra_info_port); } + firehose_mach_port_send_release(mem_port); + fc = firehose_client_create((firehose_buffer_t)base_addr, - (firehose_token_t)&atoken, comm_recvp, comm_sendp); - dispatch_async(server_config.fs_io_drain_queue, ^{ - firehose_client_resume(fc, &fcci); - if (fcci.fcci_size) { - vm_deallocate(mach_task_self(), (vm_address_t)fcci.fcci_data, - fcci.fcci_size); - } - }); + (firehose_token_t)&atoken, comm_mem_recvp, comm_io_recvp, + comm_sendp); + firehose_client_resume(fc, &fcci); + + if (fcci.fcci_size) { + vm_deallocate(mach_task_self(), (vm_address_t)fcci.fcci_data, + fcci.fcci_size); + } - if (extra_info_port) firehose_mach_port_send_release(extra_info_port); - firehose_mach_port_send_release(mem_port); return KERN_SUCCESS; } kern_return_t -firehose_server_push_async(mach_port_t server_port OS_UNUSED, - qos_class_t qos, boolean_t for_io, boolean_t expects_notifs) +firehose_server_push_async(mach_port_t server_port, + qos_class_t qos DISPATCH_UNUSED) { - firehose_client_t fc = cur_client_info; - pthread_priority_t pp = _pthread_qos_class_encode(qos, 0, - _PTHREAD_PRIORITY_ENFORCE_FLAG); + firehose_client_t fc = dispatch_mach_mig_demux_get_context(); + bool for_io = (server_port == fc->fc_recvp[FIREHOSE_BUFFER_PUSHPORT_IO]); _dispatch_debug("FIREHOSE_PUSH_ASYNC (unique_pid %llx)", firehose_client_get_unique_pid(fc, NULL)); - if (!slowpath(fc->fc_memory_corrupted)) { - if (expects_notifs && !fc->fc_use_notifs) { - fc->fc_use_notifs = true; - } - firehose_client_wakeup(fc, pp, for_io); + if (likely(!fc->fc_memory_corrupted)) { + firehose_client_wakeup(fc, for_io); } return KERN_SUCCESS; } kern_return_t -firehose_server_push_and_wait(mach_port_t server_port OS_UNUSED, - mach_port_t reply_port, qos_class_t qos, boolean_t for_io, - firehose_push_reply_t *push_reply OS_UNUSED, +firehose_server_push_and_wait(mach_port_t server_port, + mach_port_t reply_port, firehose_push_reply_t *push_reply OS_UNUSED, boolean_t *quarantinedOut OS_UNUSED) { - firehose_client_t fc = cur_client_info; - dispatch_block_flags_t flags = DISPATCH_BLOCK_ENFORCE_QOS_CLASS; - dispatch_block_t block; - dispatch_queue_t q; + firehose_client_t fc = dispatch_mach_mig_demux_get_context(); + bool for_io = (server_port == fc->fc_recvp[FIREHOSE_BUFFER_PUSHPORT_IO]); _dispatch_debug("FIREHOSE_PUSH (unique_pid %llx)", firehose_client_get_unique_pid(fc, NULL)); - if (slowpath(fc->fc_memory_corrupted)) { + if (unlikely(fc->fc_memory_corrupted)) { firehose_client_mark_corrupted(fc, reply_port); return MIG_NO_REPLY; } + dispatch_queue_t q; if (for_io) { q = server_config.fs_io_drain_queue; } else { q = server_config.fs_mem_drain_queue; } + dispatch_assert_queue(q); + + firehose_client_drain_one(fc, reply_port, + for_io ? FIREHOSE_DRAIN_FOR_IO : 0); - block = dispatch_block_create_with_qos_class(flags, qos, 0, ^{ - firehose_client_drain_one(fc, reply_port, - for_io ? FIREHOSE_DRAIN_FOR_IO : 0); - }); - dispatch_async(q, block); - _Block_release(block); return MIG_NO_REPLY; } -static void -firehose_server_demux(firehose_client_t fc, mach_msg_header_t *msg_hdr) +kern_return_t +firehose_server_get_logging_prefs(mach_port_t server_port OS_UNUSED, + mach_port_t *mem_port, mach_vm_size_t *prefs_size) { - const size_t reply_size = - sizeof(union __ReplyUnion__firehose_server_firehose_subsystem); - - cur_client_info = fc; - firehose_mig_server(firehose_server, reply_size, msg_hdr); + *mem_port = server_config.fs_prefs_cache_entry; + *prefs_size = (mach_vm_size_t)server_config.fs_prefs_cache_size; + return KERN_SUCCESS; } diff --git a/src/firehose/firehose_server_internal.h b/src/firehose/firehose_server_internal.h index 13f52b880..106b7a135 100644 --- a/src/firehose/firehose_server_internal.h +++ b/src/firehose/firehose_server_internal.h @@ -21,7 +21,7 @@ #ifndef __FIREHOSE_SERVER_INTERNAL__ #define __FIREHOSE_SERVER_INTERNAL__ -OS_OBJECT_CLASS_DECL(firehose_client, object); +OS_OBJECT_CLASS_DECL(firehose_client); #define FIREHOSE_CLIENT_CLASS OS_OBJECT_VTABLE(firehose_client) typedef struct firehose_snapshot_s *firehose_snapshot_t; @@ -57,25 +57,26 @@ struct firehose_client_s { #define FC_STATE_IO_CANCELED 0x0200 #define FC_STATE_CANCELED_MASK 0x0300 - uintptr_t volatile fc_state; - void *volatile fc_ctxt; union { - dispatch_mach_t fc_mach_channel; + dispatch_mach_t fc_mach_channel[FIREHOSE_BUFFER_NPUSHPORTS]; dispatch_source_t fc_kernel_source; }; - mach_port_t fc_recvp; + mach_port_t fc_recvp[FIREHOSE_BUFFER_NPUSHPORTS]; mach_port_t fc_sendp; os_unfair_lock fc_lock; pid_t fc_pid; int fc_pidversion; uid_t fc_euid; - bool fc_use_notifs; - bool fc_memory_corrupted; - bool fc_needs_io_snapshot; - bool fc_needs_mem_snapshot; - bool fc_quarantined; + os_atomic(uint16_t) fc_state; + os_atomic(uint8_t) fc_mach_channel_refcnt; + // These bits are mutated from different locking domains, and so cannot be + // safely consolidated into a bit-field. + bool volatile fc_memory_corrupted; + bool volatile fc_needs_io_snapshot; + bool volatile fc_needs_mem_snapshot; + bool volatile fc_quarantined; }; void diff --git a/src/init.c b/src/init.c index 6672fac45..c7d869961 100644 --- a/src/init.c +++ b/src/init.c @@ -32,6 +32,7 @@ #pragma mark - #pragma mark dispatch_init + #if USE_LIBDISPATCH_INIT_CONSTRUCTOR DISPATCH_NOTHROW __attribute__((constructor)) void @@ -143,14 +144,15 @@ voucher_activity_hooks_t _voucher_libtrace_hooks; dispatch_mach_t _voucher_activity_debug_channel; #endif #if HAVE_PTHREAD_WORKQUEUE_QOS && DISPATCH_DEBUG -int _dispatch_set_qos_class_enabled; +bool _dispatch_set_qos_class_enabled; #endif #if DISPATCH_USE_KEVENT_WORKQUEUE && DISPATCH_USE_MGR_THREAD -int _dispatch_kevent_workqueue_enabled; +bool _dispatch_kevent_workqueue_enabled = 1; #endif DISPATCH_HW_CONFIG(); uint8_t _dispatch_unsafe_fork; +uint8_t _dispatch_mode; bool _dispatch_child_of_unsafe_fork; #if DISPATCH_USE_MEMORYPRESSURE_SOURCE bool _dispatch_memory_warn; @@ -191,133 +193,213 @@ const struct dispatch_queue_offsets_s dispatch_queue_offsets = { .dqo_priority_size = 0, }; +#if TARGET_OS_MAC +const struct dispatch_allocator_layout_s dispatch_allocator_layout = { + .dal_version = 1, +#if DISPATCH_ALLOCATOR + .dal_allocator_zone = &_dispatch_main_heap, + .dal_deferred_free_isa = &_dispatch_main_heap, + .dal_allocation_size = DISPATCH_CONTINUATION_SIZE, + .dal_magazine_size = BYTES_PER_MAGAZINE, +#if PACK_FIRST_PAGE_WITH_CONTINUATIONS + .dal_first_allocation_offset = + offsetof(struct dispatch_magazine_s, fp_conts), +#else + .dal_first_allocation_offset = + offsetof(struct dispatch_magazine_s, conts), +#endif + .dal_allocation_isa_offset = + offsetof(struct dispatch_continuation_s, dc_flags), + .dal_enumerator = &_dispatch_allocator_enumerate, +#endif // DISPATCH_ALLOCATOR +}; +#endif + #if DISPATCH_USE_DIRECT_TSD const struct dispatch_tsd_indexes_s dispatch_tsd_indexes = { - .dti_version = 2, + .dti_version = 3, .dti_queue_index = dispatch_queue_key, .dti_voucher_index = dispatch_voucher_key, .dti_qos_class_index = dispatch_priority_key, + .dti_continuation_cache_index = dispatch_cache_key, }; #endif // DISPATCH_USE_DIRECT_TSD // 6618342 Contact the team that owns the Instrument DTrace probe before // renaming this symbol -DISPATCH_CACHELINE_ALIGN -struct dispatch_queue_s _dispatch_main_q = { +struct dispatch_queue_static_s _dispatch_main_q = { DISPATCH_GLOBAL_OBJECT_HEADER(queue_main), #if !DISPATCH_USE_RESOLVERS - .do_targetq = &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT], + .do_targetq = _dispatch_get_default_queue(true), #endif .dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(1) | DISPATCH_QUEUE_ROLE_BASE_ANON, .dq_label = "com.apple.main-thread", - .dq_atomic_flags = DQF_THREAD_BOUND | DQF_CANNOT_TRYSYNC | DQF_WIDTH(1), + .dq_atomic_flags = DQF_THREAD_BOUND | DQF_WIDTH(1), .dq_serialnum = 1, }; -#pragma mark - -#pragma mark dispatch_queue_attr_t +#if DISPATCH_USE_MGR_THREAD && DISPATCH_USE_PTHREAD_ROOT_QUEUES +static struct dispatch_pthread_root_queue_context_s +_dispatch_mgr_root_queue_pthread_context; + +struct dispatch_queue_global_s _dispatch_mgr_root_queue = { + DISPATCH_GLOBAL_OBJECT_HEADER(queue_global), + .dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, + .do_ctxt = &_dispatch_mgr_root_queue_pthread_context, + .dq_label = "com.apple.root.libdispatch-manager", + .dq_atomic_flags = DQF_WIDTH(DISPATCH_QUEUE_WIDTH_POOL), + .dq_priority = DISPATCH_PRIORITY_FLAG_MANAGER | + DISPATCH_PRIORITY_SATURATED_OVERRIDE, + .dq_serialnum = 3, + .dgq_thread_pool_size = 1, +}; +#else +#define _dispatch_mgr_root_queue _dispatch_root_queues[\ + DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT] +#endif -#define DISPATCH_QUEUE_ATTR_INIT(qos, prio, overcommit, freq, concurrent, \ - inactive) \ - { \ - DISPATCH_GLOBAL_OBJECT_HEADER(queue_attr), \ - .dqa_qos_and_relpri = (_dispatch_priority_make(qos, prio) & \ - DISPATCH_PRIORITY_REQUESTED_MASK), \ - .dqa_overcommit = _dispatch_queue_attr_overcommit_##overcommit, \ - .dqa_autorelease_frequency = DISPATCH_AUTORELEASE_FREQUENCY_##freq, \ - .dqa_concurrent = (concurrent), \ - .dqa_inactive = (inactive), \ - } +// 6618342 Contact the team that owns the Instrument DTrace probe before +// renaming this symbol +struct dispatch_queue_static_s _dispatch_mgr_q = { + DISPATCH_GLOBAL_OBJECT_HEADER(queue_mgr), + .dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(1) | + DISPATCH_QUEUE_ROLE_BASE_ANON, + .do_ctxt = (void *)-1, + .do_targetq = _dispatch_mgr_root_queue._as_dq, + .dq_label = "com.apple.libdispatch-manager", + .dq_atomic_flags = DQF_WIDTH(1), + .dq_priority = DISPATCH_PRIORITY_FLAG_MANAGER | + DISPATCH_PRIORITY_SATURATED_OVERRIDE, + .dq_serialnum = 2, +}; -#define DISPATCH_QUEUE_ATTR_ACTIVE_INIT(qos, prio, overcommit, freq, \ - concurrent) \ - { \ - [DQA_INDEX_ACTIVE] = DISPATCH_QUEUE_ATTR_INIT( \ - qos, prio, overcommit, freq, concurrent, false), \ - [DQA_INDEX_INACTIVE] = DISPATCH_QUEUE_ATTR_INIT( \ - qos, prio, overcommit, freq, concurrent, true), \ - } +#if DISPATCH_USE_INTERNAL_WORKQUEUE +static struct dispatch_pthread_root_queue_context_s + _dispatch_pthread_root_queue_contexts[DISPATCH_ROOT_QUEUE_COUNT]; +#define _dispatch_root_queue_ctxt(n) &_dispatch_pthread_root_queue_contexts[n] +#else +#define _dispatch_root_queue_ctxt(n) NULL +#endif // DISPATCH_USE_INTERNAL_WORKQUEUE -#define DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, prio, overcommit) \ - { \ - [DQA_INDEX_AUTORELEASE_FREQUENCY_INHERIT][DQA_INDEX_CONCURRENT] = \ - DISPATCH_QUEUE_ATTR_ACTIVE_INIT( \ - qos, prio, overcommit, INHERIT, 1), \ - [DQA_INDEX_AUTORELEASE_FREQUENCY_INHERIT][DQA_INDEX_SERIAL] = \ - DISPATCH_QUEUE_ATTR_ACTIVE_INIT( \ - qos, prio, overcommit, INHERIT, 0), \ - [DQA_INDEX_AUTORELEASE_FREQUENCY_WORK_ITEM][DQA_INDEX_CONCURRENT] = \ - DISPATCH_QUEUE_ATTR_ACTIVE_INIT( \ - qos, prio, overcommit, WORK_ITEM, 1), \ - [DQA_INDEX_AUTORELEASE_FREQUENCY_WORK_ITEM][DQA_INDEX_SERIAL] = \ - DISPATCH_QUEUE_ATTR_ACTIVE_INIT( \ - qos, prio, overcommit, WORK_ITEM, 0), \ - [DQA_INDEX_AUTORELEASE_FREQUENCY_NEVER][DQA_INDEX_CONCURRENT] = \ - DISPATCH_QUEUE_ATTR_ACTIVE_INIT( \ - qos, prio, overcommit, NEVER, 1), \ - [DQA_INDEX_AUTORELEASE_FREQUENCY_NEVER][DQA_INDEX_SERIAL] = \ - DISPATCH_QUEUE_ATTR_ACTIVE_INIT(\ - qos, prio, overcommit, NEVER, 0), \ +// 6618342 Contact the team that owns the Instrument DTrace probe before +// renaming this symbol +struct dispatch_queue_global_s _dispatch_root_queues[] = { +#define _DISPATCH_ROOT_QUEUE_IDX(n, flags) \ + ((flags & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) ? \ + DISPATCH_ROOT_QUEUE_IDX_##n##_QOS_OVERCOMMIT : \ + DISPATCH_ROOT_QUEUE_IDX_##n##_QOS) +#define _DISPATCH_ROOT_QUEUE_ENTRY(n, flags, ...) \ + [_DISPATCH_ROOT_QUEUE_IDX(n, flags)] = { \ + DISPATCH_GLOBAL_OBJECT_HEADER(queue_global), \ + .dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, \ + .do_ctxt = _dispatch_root_queue_ctxt(_DISPATCH_ROOT_QUEUE_IDX(n, flags)), \ + .dq_atomic_flags = DQF_WIDTH(DISPATCH_QUEUE_WIDTH_POOL), \ + .dq_priority = flags | ((flags & DISPATCH_PRIORITY_FLAG_FALLBACK) ? \ + _dispatch_priority_make_fallback(DISPATCH_QOS_##n) : \ + _dispatch_priority_make(DISPATCH_QOS_##n, 0)), \ + __VA_ARGS__ \ } + _DISPATCH_ROOT_QUEUE_ENTRY(MAINTENANCE, 0, + .dq_label = "com.apple.root.maintenance-qos", + .dq_serialnum = 4, + ), + _DISPATCH_ROOT_QUEUE_ENTRY(MAINTENANCE, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, + .dq_label = "com.apple.root.maintenance-qos.overcommit", + .dq_serialnum = 5, + ), + _DISPATCH_ROOT_QUEUE_ENTRY(BACKGROUND, 0, + .dq_label = "com.apple.root.background-qos", + .dq_serialnum = 6, + ), + _DISPATCH_ROOT_QUEUE_ENTRY(BACKGROUND, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, + .dq_label = "com.apple.root.background-qos.overcommit", + .dq_serialnum = 7, + ), + _DISPATCH_ROOT_QUEUE_ENTRY(UTILITY, 0, + .dq_label = "com.apple.root.utility-qos", + .dq_serialnum = 8, + ), + _DISPATCH_ROOT_QUEUE_ENTRY(UTILITY, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, + .dq_label = "com.apple.root.utility-qos.overcommit", + .dq_serialnum = 9, + ), + _DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT, DISPATCH_PRIORITY_FLAG_FALLBACK, + .dq_label = "com.apple.root.default-qos", + .dq_serialnum = 10, + ), + _DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT, + DISPATCH_PRIORITY_FLAG_FALLBACK | DISPATCH_PRIORITY_FLAG_OVERCOMMIT, + .dq_label = "com.apple.root.default-qos.overcommit", + .dq_serialnum = 11, + ), + _DISPATCH_ROOT_QUEUE_ENTRY(USER_INITIATED, 0, + .dq_label = "com.apple.root.user-initiated-qos", + .dq_serialnum = 12, + ), + _DISPATCH_ROOT_QUEUE_ENTRY(USER_INITIATED, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, + .dq_label = "com.apple.root.user-initiated-qos.overcommit", + .dq_serialnum = 13, + ), + _DISPATCH_ROOT_QUEUE_ENTRY(USER_INTERACTIVE, 0, + .dq_label = "com.apple.root.user-interactive-qos", + .dq_serialnum = 14, + ), + _DISPATCH_ROOT_QUEUE_ENTRY(USER_INTERACTIVE, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, + .dq_label = "com.apple.root.user-interactive-qos.overcommit", + .dq_serialnum = 15, + ), +}; + +unsigned long volatile _dispatch_queue_serial_numbers = + DISPATCH_QUEUE_SERIAL_NUMBER_INIT; -#define DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, prio) \ - [prio] = { \ - [DQA_INDEX_UNSPECIFIED_OVERCOMMIT] = \ - DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, -(prio), unspecified),\ - [DQA_INDEX_NON_OVERCOMMIT] = \ - DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, -(prio), disabled), \ - [DQA_INDEX_OVERCOMMIT] = \ - DISPATCH_QUEUE_ATTR_OVERCOMMIT_INIT(qos, -(prio), enabled), \ - } -#define DISPATCH_QUEUE_ATTR_PRIO_INIT(qos) \ - { \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 0), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 1), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 2), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 3), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 4), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 5), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 6), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 7), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 8), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 9), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 10), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 11), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 12), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 13), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 14), \ - DISPATCH_QUEUE_ATTR_PRIO_INITIALIZER(qos, 15), \ +dispatch_queue_global_t +dispatch_get_global_queue(long priority, unsigned long flags) +{ + dispatch_assert(countof(_dispatch_root_queues) == + DISPATCH_ROOT_QUEUE_COUNT); + + if (flags & ~(unsigned long)DISPATCH_QUEUE_OVERCOMMIT) { + return DISPATCH_BAD_INPUT; + } + dispatch_qos_t qos = _dispatch_qos_from_queue_priority(priority); +#if !HAVE_PTHREAD_WORKQUEUE_QOS + if (qos == QOS_CLASS_MAINTENANCE) { + qos = DISPATCH_QOS_BACKGROUND; + } else if (qos == QOS_CLASS_USER_INTERACTIVE) { + qos = DISPATCH_QOS_USER_INITIATED; } +#endif + if (qos == DISPATCH_QOS_UNSPECIFIED) { + return DISPATCH_BAD_INPUT; + } + return _dispatch_get_root_queue(qos, flags & DISPATCH_QUEUE_OVERCOMMIT); +} -#define DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(qos) \ - [DQA_INDEX_QOS_CLASS_##qos] = \ - DISPATCH_QUEUE_ATTR_PRIO_INIT(DISPATCH_QOS_##qos) +dispatch_queue_t +dispatch_get_current_queue(void) +{ + return _dispatch_queue_get_current_or_default(); +} + +#pragma mark - +#pragma mark dispatch_queue_attr_t // DISPATCH_QUEUE_CONCURRENT resp. _dispatch_queue_attr_concurrent is aliased -// to array member [0][0][0][0][0][0] and their properties must match! -const struct dispatch_queue_attr_s _dispatch_queue_attrs[] - [DISPATCH_QUEUE_ATTR_PRIO_COUNT] - [DISPATCH_QUEUE_ATTR_OVERCOMMIT_COUNT] - [DISPATCH_QUEUE_ATTR_AUTORELEASE_FREQUENCY_COUNT] - [DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT] - [DISPATCH_QUEUE_ATTR_INACTIVE_COUNT] = { - DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(UNSPECIFIED), - DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(MAINTENANCE), - DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(BACKGROUND), - DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(UTILITY), - DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(DEFAULT), - DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(USER_INITIATED), - DISPATCH_QUEUE_ATTR_QOS_INITIALIZER(USER_INTERACTIVE), +// to array member [0] and their properties must match! +const struct dispatch_queue_attr_s _dispatch_queue_attrs[] = { + [0 ... DISPATCH_QUEUE_ATTR_COUNT - 1] = { + DISPATCH_GLOBAL_OBJECT_HEADER(queue_attr), + }, }; #if DISPATCH_VARIANT_STATIC // -struct dispatch_queue_attr_s _dispatch_queue_attr_concurrent = - DISPATCH_QUEUE_ATTR_INIT(QOS_CLASS_UNSPECIFIED, 0, - unspecified, INHERIT, 1, false); +struct dispatch_queue_attr_s _dispatch_queue_attr_concurrent = { + DISPATCH_GLOBAL_OBJECT_HEADER(queue_attr), +}; #endif // DISPATCH_VARIANT_STATIC // _dispatch_queue_attr_concurrent is aliased using libdispatch.aliases @@ -328,189 +410,359 @@ extern struct dispatch_queue_attr_s _dispatch_queue_attr_concurrent __attribute__((__alias__("_dispatch_queue_attrs"))); #endif +dispatch_queue_attr_info_t +_dispatch_queue_attr_to_info(dispatch_queue_attr_t dqa) +{ + dispatch_queue_attr_info_t dqai = { }; + + if (!dqa) return dqai; + +#if DISPATCH_VARIANT_STATIC + if (dqa == &_dispatch_queue_attr_concurrent) { + dqai.dqai_concurrent = true; + return dqai; + } +#endif + + if (dqa < _dispatch_queue_attrs || + dqa >= &_dispatch_queue_attrs[DISPATCH_QUEUE_ATTR_COUNT]) { + DISPATCH_CLIENT_CRASH(dqa->do_vtable, "Invalid queue attribute"); + } + + size_t idx = (size_t)(dqa - _dispatch_queue_attrs); + + dqai.dqai_inactive = (idx % DISPATCH_QUEUE_ATTR_INACTIVE_COUNT); + idx /= DISPATCH_QUEUE_ATTR_INACTIVE_COUNT; + + dqai.dqai_concurrent = !(idx % DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT); + idx /= DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT; + + dqai.dqai_relpri = -(idx % DISPATCH_QUEUE_ATTR_PRIO_COUNT); + idx /= DISPATCH_QUEUE_ATTR_PRIO_COUNT; + + dqai.dqai_qos = idx % DISPATCH_QUEUE_ATTR_QOS_COUNT; + idx /= DISPATCH_QUEUE_ATTR_QOS_COUNT; + + dqai.dqai_autorelease_frequency = + idx % DISPATCH_QUEUE_ATTR_AUTORELEASE_FREQUENCY_COUNT; + idx /= DISPATCH_QUEUE_ATTR_AUTORELEASE_FREQUENCY_COUNT; + + dqai.dqai_overcommit = idx % DISPATCH_QUEUE_ATTR_OVERCOMMIT_COUNT; + idx /= DISPATCH_QUEUE_ATTR_OVERCOMMIT_COUNT; + + return dqai; +} + +static dispatch_queue_attr_t +_dispatch_queue_attr_from_info(dispatch_queue_attr_info_t dqai) +{ + size_t idx = 0; + + idx *= DISPATCH_QUEUE_ATTR_OVERCOMMIT_COUNT; + idx += dqai.dqai_overcommit; + + idx *= DISPATCH_QUEUE_ATTR_AUTORELEASE_FREQUENCY_COUNT; + idx += dqai.dqai_autorelease_frequency; + + idx *= DISPATCH_QUEUE_ATTR_QOS_COUNT; + idx += dqai.dqai_qos; + + idx *= DISPATCH_QUEUE_ATTR_PRIO_COUNT; + idx += (size_t)(-dqai.dqai_relpri); + + idx *= DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT; + idx += !dqai.dqai_concurrent; + + idx *= DISPATCH_QUEUE_ATTR_INACTIVE_COUNT; + idx += dqai.dqai_inactive; + + return (dispatch_queue_attr_t)&_dispatch_queue_attrs[idx]; +} + +dispatch_queue_attr_t +dispatch_queue_attr_make_with_qos_class(dispatch_queue_attr_t dqa, + dispatch_qos_class_t qos_class, int relpri) +{ + if (!_dispatch_qos_class_valid(qos_class, relpri)) { + return (dispatch_queue_attr_t)dqa; + } + dispatch_queue_attr_info_t dqai = _dispatch_queue_attr_to_info(dqa); + dqai.dqai_qos = _dispatch_qos_from_qos_class(qos_class); + dqai.dqai_relpri = relpri; + return _dispatch_queue_attr_from_info(dqai); +} + +dispatch_queue_attr_t +dispatch_queue_attr_make_initially_inactive(dispatch_queue_attr_t dqa) +{ + dispatch_queue_attr_info_t dqai = _dispatch_queue_attr_to_info(dqa); + dqai.dqai_inactive = true; + return _dispatch_queue_attr_from_info(dqai); +} + +dispatch_queue_attr_t +dispatch_queue_attr_make_with_overcommit(dispatch_queue_attr_t dqa, + bool overcommit) +{ + dispatch_queue_attr_info_t dqai = _dispatch_queue_attr_to_info(dqa); + if (overcommit) { + dqai.dqai_overcommit = _dispatch_queue_attr_overcommit_enabled; + } else { + dqai.dqai_overcommit = _dispatch_queue_attr_overcommit_disabled; + } + return _dispatch_queue_attr_from_info(dqai); +} + +dispatch_queue_attr_t +dispatch_queue_attr_make_with_autorelease_frequency(dispatch_queue_attr_t dqa, + dispatch_autorelease_frequency_t frequency) +{ + switch (frequency) { + case DISPATCH_AUTORELEASE_FREQUENCY_INHERIT: + case DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM: + case DISPATCH_AUTORELEASE_FREQUENCY_NEVER: + break; + default: + return (dispatch_queue_attr_t)dqa; + } + dispatch_queue_attr_info_t dqai = _dispatch_queue_attr_to_info(dqa); + dqai.dqai_autorelease_frequency = (uint16_t)frequency; + return _dispatch_queue_attr_from_info(dqai); +} + #pragma mark - #pragma mark dispatch_vtables +DISPATCH_NOINLINE +static void +_dispatch_object_no_dispose(dispatch_object_t dou, + DISPATCH_UNUSED bool *allow_free) +{ + DISPATCH_INTERNAL_CRASH(dx_type(dou._do), "do_dispose called"); +} + +DISPATCH_NOINLINE +static size_t +_dispatch_object_missing_debug(DISPATCH_UNUSED dispatch_object_t dou, + char *buf, size_t bufsiz) +{ + return strlcpy(buf, "missing do_debug vtable slot: ", bufsiz); +} + +DISPATCH_NOINLINE +static void +_dispatch_object_no_invoke(dispatch_object_t dou, + DISPATCH_UNUSED dispatch_invoke_context_t dic, + DISPATCH_UNUSED dispatch_invoke_flags_t flags) +{ + DISPATCH_INTERNAL_CRASH(dx_type(dou._do), "do_invoke called"); +} + +/* + * Dispatch object cluster + */ + DISPATCH_VTABLE_INSTANCE(semaphore, - .do_type = DISPATCH_SEMAPHORE_TYPE, - .do_kind = "semaphore", - .do_dispose = _dispatch_semaphore_dispose, - .do_debug = _dispatch_semaphore_debug, + .do_type = DISPATCH_SEMAPHORE_TYPE, + .do_dispose = _dispatch_semaphore_dispose, + .do_debug = _dispatch_semaphore_debug, + .do_invoke = _dispatch_object_no_invoke, ); DISPATCH_VTABLE_INSTANCE(group, - .do_type = DISPATCH_GROUP_TYPE, - .do_kind = "group", - .do_dispose = _dispatch_group_dispose, - .do_debug = _dispatch_group_debug, + .do_type = DISPATCH_GROUP_TYPE, + .do_dispose = _dispatch_group_dispose, + .do_debug = _dispatch_group_debug, + .do_invoke = _dispatch_object_no_invoke, ); -DISPATCH_VTABLE_INSTANCE(queue, - .do_type = DISPATCH_QUEUE_LEGACY_TYPE, - .do_kind = "queue", - .do_dispose = _dispatch_queue_dispose, - .do_suspend = _dispatch_queue_suspend, - .do_resume = _dispatch_queue_resume, - .do_push = _dispatch_queue_push, - .do_invoke = _dispatch_queue_invoke, - .do_wakeup = _dispatch_queue_wakeup, - .do_debug = dispatch_queue_debug, - .do_set_targetq = _dispatch_queue_set_target_queue, +#if !DISPATCH_DATA_IS_BRIDGED_TO_NSDATA +DISPATCH_VTABLE_INSTANCE(data, + .do_type = DISPATCH_DATA_TYPE, + .do_dispose = _dispatch_data_dispose, + .do_debug = _dispatch_data_debug, + .do_invoke = _dispatch_object_no_invoke, ); +#endif -DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_serial, queue, - .do_type = DISPATCH_QUEUE_SERIAL_TYPE, - .do_kind = "serial-queue", - .do_dispose = _dispatch_queue_dispose, - .do_suspend = _dispatch_queue_suspend, - .do_resume = _dispatch_queue_resume, - .do_finalize_activation = _dispatch_queue_finalize_activation, - .do_push = _dispatch_queue_push, - .do_invoke = _dispatch_queue_invoke, - .do_wakeup = _dispatch_queue_wakeup, - .do_debug = dispatch_queue_debug, - .do_set_targetq = _dispatch_queue_set_target_queue, +DISPATCH_VTABLE_INSTANCE(queue_attr, + .do_type = DISPATCH_QUEUE_ATTR_TYPE, + .do_dispose = _dispatch_object_no_dispose, + .do_debug = _dispatch_object_missing_debug, + .do_invoke = _dispatch_object_no_invoke, ); -DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_concurrent, queue, - .do_type = DISPATCH_QUEUE_CONCURRENT_TYPE, - .do_kind = "concurrent-queue", - .do_dispose = _dispatch_queue_dispose, - .do_suspend = _dispatch_queue_suspend, - .do_resume = _dispatch_queue_resume, - .do_finalize_activation = _dispatch_queue_finalize_activation, - .do_push = _dispatch_queue_push, - .do_invoke = _dispatch_queue_invoke, - .do_wakeup = _dispatch_queue_wakeup, - .do_debug = dispatch_queue_debug, - .do_set_targetq = _dispatch_queue_set_target_queue, +#if HAVE_MACH +DISPATCH_VTABLE_INSTANCE(mach_msg, + .do_type = DISPATCH_MACH_MSG_TYPE, + .do_dispose = _dispatch_mach_msg_dispose, + .do_debug = _dispatch_mach_msg_debug, + .do_invoke = _dispatch_mach_msg_invoke, ); +#endif // HAVE_MACH - -DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_root, queue, - .do_type = DISPATCH_QUEUE_GLOBAL_ROOT_TYPE, - .do_kind = "global-queue", - .do_dispose = _dispatch_pthread_root_queue_dispose, - .do_push = _dispatch_root_queue_push, - .do_invoke = NULL, - .do_wakeup = _dispatch_root_queue_wakeup, - .do_debug = dispatch_queue_debug, +DISPATCH_VTABLE_INSTANCE(io, + .do_type = DISPATCH_IO_TYPE, + .do_dispose = _dispatch_io_dispose, + .do_debug = _dispatch_io_debug, + .do_invoke = _dispatch_object_no_invoke, ); - -DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_main, queue, - .do_type = DISPATCH_QUEUE_SERIAL_TYPE, - .do_kind = "main-queue", - .do_dispose = _dispatch_queue_dispose, - .do_push = _dispatch_queue_push, - .do_invoke = _dispatch_queue_invoke, - .do_wakeup = _dispatch_main_queue_wakeup, - .do_debug = dispatch_queue_debug, +DISPATCH_VTABLE_INSTANCE(operation, + .do_type = DISPATCH_OPERATION_TYPE, + .do_dispose = _dispatch_operation_dispose, + .do_debug = _dispatch_operation_debug, + .do_invoke = _dispatch_object_no_invoke, ); -DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_runloop, queue, - .do_type = DISPATCH_QUEUE_RUNLOOP_TYPE, - .do_kind = "runloop-queue", - .do_dispose = _dispatch_runloop_queue_dispose, - .do_push = _dispatch_queue_push, - .do_invoke = _dispatch_queue_invoke, - .do_wakeup = _dispatch_runloop_queue_wakeup, - .do_debug = dispatch_queue_debug, +DISPATCH_VTABLE_INSTANCE(disk, + .do_type = DISPATCH_DISK_TYPE, + .do_dispose = _dispatch_disk_dispose, + .do_debug = _dispatch_object_missing_debug, + .do_invoke = _dispatch_object_no_invoke, ); -DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_mgr, queue, - .do_type = DISPATCH_QUEUE_MGR_TYPE, - .do_kind = "mgr-queue", - .do_push = _dispatch_mgr_queue_push, - .do_invoke = _dispatch_mgr_thread, - .do_wakeup = _dispatch_mgr_queue_wakeup, - .do_debug = dispatch_queue_debug, -); +/* + * Dispatch queue cluster + */ -DISPATCH_VTABLE_INSTANCE(queue_specific_queue, - .do_type = DISPATCH_QUEUE_SPECIFIC_TYPE, - .do_kind = "queue-context", - .do_dispose = _dispatch_queue_specific_queue_dispose, - .do_push = (void *)_dispatch_queue_push, - .do_invoke = (void *)_dispatch_queue_invoke, - .do_wakeup = (void *)_dispatch_queue_wakeup, - .do_debug = (void *)dispatch_queue_debug, +DISPATCH_NOINLINE +static void +_dispatch_queue_no_activate(dispatch_queue_class_t dqu, + DISPATCH_UNUSED bool *allow_resume) +{ + DISPATCH_INTERNAL_CRASH(dx_type(dqu._dq), "dq_activate called"); +} + +DISPATCH_VTABLE_INSTANCE(queue, + // This is the base class for queues, no objects of this type are made + .do_type = _DISPATCH_QUEUE_CLUSTER, + .do_dispose = _dispatch_object_no_dispose, + .do_debug = _dispatch_queue_debug, + .do_invoke = _dispatch_object_no_invoke, + + .dq_activate = _dispatch_queue_no_activate, ); -DISPATCH_VTABLE_INSTANCE(queue_attr, - .do_type = DISPATCH_QUEUE_ATTR_TYPE, - .do_kind = "queue-attr", +DISPATCH_VTABLE_INSTANCE(workloop, + .do_type = DISPATCH_WORKLOOP_TYPE, + .do_dispose = _dispatch_workloop_dispose, + .do_debug = _dispatch_queue_debug, + .do_invoke = _dispatch_workloop_invoke, + + .dq_activate = _dispatch_queue_no_activate, + .dq_wakeup = _dispatch_workloop_wakeup, + .dq_push = _dispatch_workloop_push, ); -DISPATCH_VTABLE_INSTANCE(source, - .do_type = DISPATCH_SOURCE_KEVENT_TYPE, - .do_kind = "kevent-source", - .do_dispose = _dispatch_source_dispose, - .do_suspend = (void *)_dispatch_queue_suspend, - .do_resume = (void *)_dispatch_queue_resume, - .do_finalize_activation = _dispatch_source_finalize_activation, - .do_push = (void *)_dispatch_queue_push, - .do_invoke = _dispatch_source_invoke, - .do_wakeup = _dispatch_source_wakeup, - .do_debug = _dispatch_source_debug, - .do_set_targetq = (void *)_dispatch_queue_set_target_queue, +DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_serial, lane, + .do_type = DISPATCH_QUEUE_SERIAL_TYPE, + .do_dispose = _dispatch_lane_dispose, + .do_debug = _dispatch_queue_debug, + .do_invoke = _dispatch_lane_invoke, + + .dq_activate = _dispatch_lane_activate, + .dq_wakeup = _dispatch_lane_wakeup, + .dq_push = _dispatch_lane_push, ); -#if HAVE_MACH -DISPATCH_VTABLE_INSTANCE(mach, - .do_type = DISPATCH_MACH_CHANNEL_TYPE, - .do_kind = "mach-channel", - .do_dispose = _dispatch_mach_dispose, - .do_suspend = (void *)_dispatch_queue_suspend, - .do_resume = (void *)_dispatch_queue_resume, - .do_finalize_activation = _dispatch_mach_finalize_activation, - .do_push = (void *)_dispatch_queue_push, - .do_invoke = _dispatch_mach_invoke, - .do_wakeup = _dispatch_mach_wakeup, - .do_debug = _dispatch_mach_debug, - .do_set_targetq = (void *)_dispatch_queue_set_target_queue, +DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_concurrent, lane, + .do_type = DISPATCH_QUEUE_CONCURRENT_TYPE, + .do_dispose = _dispatch_lane_dispose, + .do_debug = _dispatch_queue_debug, + .do_invoke = _dispatch_lane_invoke, + + .dq_activate = _dispatch_lane_activate, + .dq_wakeup = _dispatch_lane_wakeup, + .dq_push = _dispatch_lane_concurrent_push, ); -DISPATCH_VTABLE_INSTANCE(mach_msg, - .do_type = DISPATCH_MACH_MSG_TYPE, - .do_kind = "mach-msg", - .do_dispose = _dispatch_mach_msg_dispose, - .do_invoke = _dispatch_mach_msg_invoke, - .do_debug = _dispatch_mach_msg_debug, +DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_global, lane, + .do_type = DISPATCH_QUEUE_GLOBAL_ROOT_TYPE, + .do_dispose = _dispatch_object_no_dispose, + .do_debug = _dispatch_queue_debug, + .do_invoke = _dispatch_object_no_invoke, + + .dq_activate = _dispatch_queue_no_activate, + .dq_wakeup = _dispatch_root_queue_wakeup, + .dq_push = _dispatch_root_queue_push, ); -#endif // HAVE_MACH -#if !DISPATCH_DATA_IS_BRIDGED_TO_NSDATA -DISPATCH_VTABLE_INSTANCE(data, - .do_type = DISPATCH_DATA_TYPE, - .do_kind = "data", - .do_dispose = _dispatch_data_dispose, - .do_debug = _dispatch_data_debug, - .do_set_targetq = (void*)_dispatch_data_set_target_queue, +#if DISPATCH_USE_PTHREAD_ROOT_QUEUES +DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_pthread_root, lane, + .do_type = DISPATCH_QUEUE_PTHREAD_ROOT_TYPE, + .do_dispose = _dispatch_pthread_root_queue_dispose, + .do_debug = _dispatch_queue_debug, + .do_invoke = _dispatch_object_no_invoke, + + .dq_activate = _dispatch_queue_no_activate, + .dq_wakeup = _dispatch_root_queue_wakeup, + .dq_push = _dispatch_root_queue_push, ); +#endif // DISPATCH_USE_PTHREAD_ROOT_QUEUES + +DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_mgr, lane, + .do_type = DISPATCH_QUEUE_MGR_TYPE, + .do_dispose = _dispatch_object_no_dispose, + .do_debug = _dispatch_queue_debug, +#if DISPATCH_USE_MGR_THREAD + .do_invoke = _dispatch_mgr_thread, +#else + .do_invoke = _dispatch_object_no_invoke, #endif -DISPATCH_VTABLE_INSTANCE(io, - .do_type = DISPATCH_IO_TYPE, - .do_kind = "channel", - .do_dispose = _dispatch_io_dispose, - .do_debug = _dispatch_io_debug, - .do_set_targetq = _dispatch_io_set_target_queue, + .dq_activate = _dispatch_queue_no_activate, + .dq_wakeup = _dispatch_mgr_queue_wakeup, + .dq_push = _dispatch_mgr_queue_push, ); -DISPATCH_VTABLE_INSTANCE(operation, - .do_type = DISPATCH_OPERATION_TYPE, - .do_kind = "operation", - .do_dispose = _dispatch_operation_dispose, - .do_debug = _dispatch_operation_debug, +DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_main, lane, + .do_type = DISPATCH_QUEUE_MAIN_TYPE, + .do_dispose = _dispatch_lane_dispose, + .do_debug = _dispatch_queue_debug, + .do_invoke = _dispatch_lane_invoke, + + .dq_activate = _dispatch_queue_no_activate, + .dq_wakeup = _dispatch_main_queue_wakeup, + .dq_push = _dispatch_main_queue_push, ); -DISPATCH_VTABLE_INSTANCE(disk, - .do_type = DISPATCH_DISK_TYPE, - .do_kind = "disk", - .do_dispose = _dispatch_disk_dispose, +#if DISPATCH_COCOA_COMPAT +DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_runloop, lane, + .do_type = DISPATCH_QUEUE_RUNLOOP_TYPE, + .do_dispose = _dispatch_runloop_queue_dispose, + .do_debug = _dispatch_queue_debug, + .do_invoke = _dispatch_lane_invoke, + + .dq_activate = _dispatch_queue_no_activate, + .dq_wakeup = _dispatch_runloop_queue_wakeup, + .dq_push = _dispatch_lane_push, +); +#endif + +DISPATCH_VTABLE_INSTANCE(source, + .do_type = DISPATCH_SOURCE_KEVENT_TYPE, + .do_dispose = _dispatch_source_dispose, + .do_debug = _dispatch_source_debug, + .do_invoke = _dispatch_source_invoke, + + .dq_activate = _dispatch_source_activate, + .dq_wakeup = _dispatch_source_wakeup, + .dq_push = _dispatch_lane_push, ); +#if HAVE_MACH +DISPATCH_VTABLE_INSTANCE(mach, + .do_type = DISPATCH_MACH_CHANNEL_TYPE, + .do_dispose = _dispatch_mach_dispose, + .do_debug = _dispatch_mach_debug, + .do_invoke = _dispatch_mach_invoke, + + .dq_activate = _dispatch_mach_activate, + .dq_wakeup = _dispatch_mach_wakeup, + .dq_push = _dispatch_lane_push, +); +#endif // HAVE_MACH void _dispatch_vtable_init(void) @@ -572,13 +824,13 @@ _dispatch_build_init(void *context DISPATCH_UNUSED) size_t bufsz = sizeof(_dispatch_build); sysctl(mib, 2, _dispatch_build, &bufsz, NULL, 0); -#if TARGET_IPHONE_SIMULATOR +#if TARGET_OS_SIMULATOR char *sim_version = getenv("SIMULATOR_RUNTIME_BUILD_VERSION"); if (sim_version) { (void)strlcat(_dispatch_build, " ", sizeof(_dispatch_build)); (void)strlcat(_dispatch_build, sim_version, sizeof(_dispatch_build)); } -#endif // TARGET_IPHONE_SIMULATOR +#endif // TARGET_OS_SIMULATOR #else /* @@ -590,6 +842,22 @@ _dispatch_build_init(void *context DISPATCH_UNUSED) static dispatch_once_t _dispatch_build_pred; +bool +_dispatch_parse_bool(const char *v) +{ + return strcasecmp(v, "YES") == 0 || strcasecmp(v, "Y") == 0 || + strcasecmp(v, "TRUE") == 0 || atoi(v); +} + +DISPATCH_NOINLINE +bool +_dispatch_getenv_bool(const char *env, bool default_v) +{ + const char *v = getenv(env); + + return v ? _dispatch_parse_bool(v) : default_v; +} + char* _dispatch_get_build(void) { @@ -597,58 +865,159 @@ _dispatch_get_build(void) return _dispatch_build; } -#define _dispatch_bug_log(msg, ...) do { \ - static void *last_seen; \ - void *ra = __builtin_return_address(0); \ - if (last_seen != ra) { \ - last_seen = ra; \ - _dispatch_log(msg, ##__VA_ARGS__); \ - } \ -} while(0) +#define _dispatch_bug_log_is_repeated() ({ \ + static void *last_seen; \ + void *previous = last_seen; \ + last_seen =__builtin_return_address(0); \ + last_seen == previous; \ + }) -void -_dispatch_bug(size_t line, long val) +#if HAVE_OS_FAULT_WITH_PAYLOAD +__attribute__((__format__(__printf__,2,3))) +static void +_dispatch_fault(const char *reason, const char *fmt, ...) { - dispatch_once_f(&_dispatch_build_pred, NULL, _dispatch_build_init); - _dispatch_bug_log("BUG in libdispatch: %s - %lu - 0x%lx", - _dispatch_build, (unsigned long)line, val); + char buf[1024]; + va_list ap; + + va_start(ap, fmt); + vsnprintf(buf, sizeof(buf), fmt, ap); + va_end(ap); + + if (_dispatch_mode & DISPATCH_MODE_STRICT) { +#if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR + } else if (!(_dispatch_mode & DISPATCH_MODE_NO_FAULTS)) { + os_fault_with_payload(OS_REASON_LIBSYSTEM, + OS_REASON_LIBSYSTEM_CODE_FAULT, + buf, (uint32_t)strlen(buf) + 1, reason, 0); +#else + (void)reason; +#endif + } } +#else +#define _dispatch_fault(reason, fmt, ...) +#endif // HAVE_OS_FAULT_WITH_PAYLOAD + +#define _dispatch_log_fault(reason, fmt, ...) ({ \ + if (!_dispatch_bug_log_is_repeated()) { \ + _dispatch_log(fmt, ##__VA_ARGS__); \ + _dispatch_fault(reason, fmt, ##__VA_ARGS__); \ + if (_dispatch_mode & DISPATCH_MODE_STRICT) { \ + DISPATCH_CLIENT_CRASH(0, reason); \ + } \ + } \ + }) void -_dispatch_bug_client(const char* msg) +_dispatch_bug(size_t line, long val) { - _dispatch_bug_log("BUG in libdispatch client: %s", msg); + dispatch_once_f(&_dispatch_build_pred, NULL, _dispatch_build_init); + + if (_dispatch_bug_log_is_repeated()) return; + + _dispatch_log("BUG in libdispatch: %s - %lu - 0x%lx", + _dispatch_build, (unsigned long)line, val); } #if HAVE_MACH void -_dispatch_bug_mach_client(const char* msg, mach_msg_return_t kr) +_dispatch_bug_mach_client(const char *msg, mach_msg_return_t kr) { - _dispatch_bug_log("BUG in libdispatch client: %s %s - 0x%x", msg, + _dispatch_log_fault("LIBDISPATCH_STRICT: _dispatch_bug_mach_client", + "BUG in libdispatch client: %s %s - 0x%x", msg, mach_error_string(kr), kr); } #endif +void * +_dispatch_continuation_get_function_symbol(dispatch_continuation_t dc) +{ + if (dc->dc_flags & DC_FLAG_BLOCK_WITH_PRIVATE_DATA) { + dispatch_block_private_data_t dpbd = _dispatch_block_get_data(dc->dc_ctxt); + return _dispatch_Block_invoke(dpbd->dbpd_block); + } + if (dc->dc_flags & DC_FLAG_BLOCK) { + return _dispatch_Block_invoke(dc->dc_ctxt); + } + return dc->dc_func; +} + void -_dispatch_bug_kevent_client(const char* msg, const char* filter, - const char *operation, int err) +_dispatch_bug_kevent_client(const char *msg, const char *filter, + const char *operation, int err, uint64_t ident, uint64_t udata, + dispatch_unote_t du) { + dispatch_continuation_t dc; + dispatch_object_t dou; + void *func = NULL; + + if (du._du) { + dou._do = _dispatch_wref2ptr(du._du->du_owner_wref); + switch (dx_type(dou._do)) { + case DISPATCH_SOURCE_KEVENT_TYPE: + dc = du._dr->ds_handler[DS_EVENT_HANDLER]; + if (dc) func = _dispatch_continuation_get_function_symbol(dc); + break; + case DISPATCH_MACH_CHANNEL_TYPE: + func = du._dmrr->dmrr_handler_func; + break; + } + filter = dux_type(du._du)->dst_kind; + } + if (operation && err) { - _dispatch_bug_log("BUG in libdispatch client: %s[%s] %s: \"%s\" - 0x%x", - msg, filter, operation, strerror(err), err); + _dispatch_log_fault("LIBDISPATCH_STRICT: _dispatch_bug_kevent_client", + "BUG in libdispatch client: %s %s: \"%s\" - 0x%x " + "{ 0x%llx[%s], ident: %lld / 0x%llx, handler: %p }", + msg, operation, strerror(err), err, + udata, filter, ident, ident, func); } else if (operation) { - _dispatch_bug_log("BUG in libdispatch client: %s[%s] %s", - msg, filter, operation); + _dispatch_log_fault("LIBDISPATCH_STRICT: _dispatch_bug_kevent_client", + "BUG in libdispatch client: %s %s" + "{ 0x%llx[%s], ident: %lld / 0x%llx, handler: %p }", + msg, operation, udata, filter, ident, ident, func); } else { - _dispatch_bug_log("BUG in libdispatch: %s[%s]: \"%s\" - 0x%x", - msg, filter, strerror(err), err); + _dispatch_log_fault("LIBDISPATCH_STRICT: _dispatch_bug_kevent_client", + "BUG in libdispatch: %s: \"%s\" - 0x%x" + "{ 0x%llx[%s], ident: %lld / 0x%llx, handler: %p }", + msg, strerror(err), err, udata, filter, ident, ident, func); } } +void +_dispatch_bug_kevent_vanished(dispatch_unote_t du) +{ + dispatch_continuation_t dc; + dispatch_object_t dou; + void *func = NULL; + + dou._do = _dispatch_wref2ptr(du._du->du_owner_wref); + switch (dx_type(dou._do)) { + case DISPATCH_SOURCE_KEVENT_TYPE: + dc = du._dr->ds_handler[DS_EVENT_HANDLER]; + if (dc) func = _dispatch_continuation_get_function_symbol(dc); + break; + case DISPATCH_MACH_CHANNEL_TYPE: + func = du._dmrr->dmrr_handler_func; + break; + } + _dispatch_log_fault("LIBDISPATCH_STRICT: _dispatch_bug_kevent_vanished", + "BUG in libdispatch client: %s, monitored resource vanished before " + "the source cancel handler was invoked " + "{ %p[%s], ident: %d / 0x%x, handler: %p }", + dux_type(du._du)->dst_kind, dou._dq, + dou._dq->dq_label ? dou._dq->dq_label : "", + du._du->du_ident, du._du->du_ident, func); +} + +DISPATCH_NOINLINE DISPATCH_WEAK void _dispatch_bug_deprecated(const char *msg) { - _dispatch_bug_log("DEPRECATED USE in libdispatch client: %s", msg); + _dispatch_log_fault("LIBDISPATCH_STRICT: _dispatch_bug_deprecated", + "DEPRECATED USE in libdispatch client: %s; " + "set a breakpoint on _dispatch_bug_deprecated to debug", msg); } void @@ -705,7 +1074,7 @@ _dispatch_logv_init(void *context DISPATCH_UNUSED) struct timeval tv; gettimeofday(&tv, NULL); #if DISPATCH_DEBUG - dispatch_log_basetime = _dispatch_absolute_time(); + dispatch_log_basetime = _dispatch_uptime(); #endif dprintf(dispatch_logfile, "=== log file opened for %s[%u] at " "%ld.%06u ===\n", getprogname() ?: "", getpid(), @@ -722,7 +1091,7 @@ _dispatch_log_file(char *buf, size_t len) buf[len++] = '\n'; retry: r = write(dispatch_logfile, buf, len); - if (slowpath(r == -1) && errno == EINTR) { + if (unlikely(r == -1) && errno == EINTR) { goto retry; } } @@ -737,7 +1106,7 @@ _dispatch_logv_file(const char *msg, va_list ap) #if DISPATCH_DEBUG offset += dsnprintf(&buf[offset], bufsiz - offset, "%llu\t", - _dispatch_absolute_time() - dispatch_log_basetime); + _dispatch_uptime() - dispatch_log_basetime); #endif r = vsnprintf(&buf[offset], bufsiz - offset, msg, ap); if (r < 0) return; @@ -759,7 +1128,7 @@ static inline void _dispatch_vsyslog(const char *msg, va_list ap) { char *str; - vasprintf(&str, msg, ap); + vasprintf(&str, msg, ap); if (str) { _dispatch_syslog(str); free(str); @@ -784,10 +1153,10 @@ static inline void _dispatch_logv(const char *msg, size_t len, va_list *ap_ptr) { dispatch_once_f(&_dispatch_logv_pred, NULL, _dispatch_logv_init); - if (slowpath(dispatch_log_disabled)) { + if (unlikely(dispatch_log_disabled)) { return; } - if (slowpath(dispatch_logfile != -1)) { + if (unlikely(dispatch_logfile != -1)) { if (!ap_ptr) { return _dispatch_log_file((char*)msg, len); } @@ -819,10 +1188,7 @@ static size_t _dispatch_object_debug2(dispatch_object_t dou, char* buf, size_t bufsiz) { DISPATCH_OBJECT_TFB(_dispatch_objc_debug, dou, buf, bufsiz); - if (dx_vtable(dou._do)->do_debug) { - return dx_debug(dou._do, buf, bufsiz); - } - return strlcpy(buf, "NULL vtable slot: ", bufsiz); + return dx_debug(dou._do, buf, bufsiz); } DISPATCH_NOINLINE @@ -834,7 +1200,7 @@ _dispatch_debugv(dispatch_object_t dou, const char *msg, va_list ap) int r; #if DISPATCH_DEBUG && !DISPATCH_USE_OS_DEBUG_LOG offset += dsnprintf(&buf[offset], bufsiz - offset, "%llu\t\t%p\t", - _dispatch_absolute_time() - dispatch_log_basetime, + _dispatch_uptime() - dispatch_log_basetime, (void *)_dispatch_thread_self()); #endif if (dou._do) { @@ -904,7 +1270,7 @@ void * _dispatch_calloc(size_t num_items, size_t size) { void *buf; - while (!fastpath(buf = calloc(num_items, size))) { + while (unlikely(!(buf = calloc(num_items, size)))) { _dispatch_temporary_resource_shortage(); } return buf; @@ -919,7 +1285,7 @@ _dispatch_strdup_if_mutable(const char *str) { #if HAVE_DYLD_IS_MEMORY_IMMUTABLE size_t size = strlen(str) + 1; - if (slowpath(!_dyld_is_memory_immutable(str, size))) { + if (unlikely(!_dyld_is_memory_immutable(str, size))) { char *clone = (char *) malloc(size); if (dispatch_assume(clone)) { memcpy(clone, str, size); @@ -942,8 +1308,8 @@ void * { dispatch_block_t rval; - if (fastpath(db)) { - while (!fastpath(rval = Block_copy(db))) { + if (likely(db)) { + while (unlikely(!(rval = Block_copy(db)))) { _dispatch_temporary_resource_shortage(); } return rval; @@ -982,7 +1348,7 @@ _dispatch_client_callout(void *ctxt, dispatch_function_t f) { _dispatch_get_tsd_base(); void *u = _dispatch_get_unwind_tsd(); - if (fastpath(!u)) return f(ctxt); + if (likely(!u)) return f(ctxt); _dispatch_set_unwind_tsd(NULL); f(ctxt); _dispatch_free_unwind_tsd(); @@ -996,7 +1362,7 @@ _dispatch_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) { _dispatch_get_tsd_base(); void *u = _dispatch_get_unwind_tsd(); - if (fastpath(!u)) return f(ctxt, i); + if (likely(!u)) return f(ctxt, i); _dispatch_set_unwind_tsd(NULL); f(ctxt, i); _dispatch_free_unwind_tsd(); @@ -1013,7 +1379,7 @@ _dispatch_client_callout3(void *ctxt, dispatch_mach_reason_t reason, { _dispatch_get_tsd_base(); void *u = _dispatch_get_unwind_tsd(); - if (fastpath(!u)) return f(ctxt, reason, dmsg); + if (likely(!u)) return f(ctxt, reason, dmsg); _dispatch_set_unwind_tsd(NULL); f(ctxt, reason, dmsg); _dispatch_free_unwind_tsd(); @@ -1028,7 +1394,7 @@ _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, { _dispatch_get_tsd_base(); void *u = _dispatch_get_unwind_tsd(); - if (fastpath(!u)) return f(ctxt, reason, dmsg, error); + if (likely(!u)) return f(ctxt, reason, dmsg, error); _dispatch_set_unwind_tsd(NULL); f(ctxt, reason, dmsg, error); _dispatch_free_unwind_tsd(); @@ -1056,7 +1422,7 @@ _os_object_alloc_realized(const void *cls, size_t size) { _os_object_t obj; dispatch_assert(size >= sizeof(struct _os_object_s)); - while (!fastpath(obj = calloc(1u, size))) { + while (unlikely(!(obj = calloc(1u, size)))) { _dispatch_temporary_resource_shortage(); } obj->os_obj_isa = cls; @@ -1081,7 +1447,7 @@ void _os_object_xref_dispose(_os_object_t obj) { _os_object_xrefcnt_dispose_barrier(obj); - if (fastpath(obj->os_obj_isa->_os_obj_xref_dispose)) { + if (likely(obj->os_obj_isa->_os_obj_xref_dispose)) { return obj->os_obj_isa->_os_obj_xref_dispose(obj); } return _os_object_release_internal(obj); @@ -1091,7 +1457,7 @@ void _os_object_dispose(_os_object_t obj) { _os_object_refcnt_dispose_barrier(obj); - if (fastpath(obj->os_obj_isa->_os_obj_dispose)) { + if (likely(obj->os_obj_isa->_os_obj_dispose)) { return obj->os_obj_isa->_os_obj_dispose(obj); } return _os_object_dealloc(obj); @@ -1100,7 +1466,7 @@ _os_object_dispose(_os_object_t obj) void* os_retain(void *obj) { - if (fastpath(obj)) { + if (likely(obj)) { return _os_object_retain(obj); } return obj; @@ -1110,7 +1476,7 @@ os_retain(void *obj) void os_release(void *obj) { - if (fastpath(obj)) { + if (likely(obj)) { return _os_object_release(obj); } } @@ -1234,3 +1600,60 @@ _dispatch_mach_notify_send_once(mach_port_t notify DISPATCH_UNUSED) } #endif // HAVE_MACH +#pragma mark - +#pragma mark dispatch to XPC callbacks +#if HAVE_MACH + +// Default dmxh_direct_message_handler callback that does not handle +// messages inline. +static bool +_dispatch_mach_xpc_no_handle_message( + void *_Nullable context DISPATCH_UNUSED, + dispatch_mach_reason_t reason DISPATCH_UNUSED, + dispatch_mach_msg_t message DISPATCH_UNUSED, + mach_error_t error DISPATCH_UNUSED) +{ + return false; +} + +// Default dmxh_msg_context_reply_queue callback that returns a NULL queue. +static dispatch_queue_t +_dispatch_mach_msg_context_no_async_reply_queue( + void *_Nonnull msg_context DISPATCH_UNUSED) +{ + return NULL; +} + +// Default dmxh_async_reply_handler callback that crashes when called. +DISPATCH_NORETURN +static void +_dispatch_mach_default_async_reply_handler(void *context DISPATCH_UNUSED, + dispatch_mach_reason_t reason DISPATCH_UNUSED, + dispatch_mach_msg_t message DISPATCH_UNUSED) +{ + DISPATCH_CLIENT_CRASH(_dispatch_mach_xpc_hooks, + "_dispatch_mach_default_async_reply_handler called"); +} + +// Default dmxh_enable_sigterm_notification callback that enables delivery of +// SIGTERM notifications (for backwards compatibility). +static bool +_dispatch_mach_enable_sigterm(void *_Nullable context DISPATCH_UNUSED) +{ + return true; +} + +// Callbacks from dispatch to XPC. The default is to not support any callbacks. +const struct dispatch_mach_xpc_hooks_s _dispatch_mach_xpc_hooks_default = { + .version = DISPATCH_MACH_XPC_HOOKS_VERSION, + .dmxh_direct_message_handler = &_dispatch_mach_xpc_no_handle_message, + .dmxh_msg_context_reply_queue = + &_dispatch_mach_msg_context_no_async_reply_queue, + .dmxh_async_reply_handler = &_dispatch_mach_default_async_reply_handler, + .dmxh_enable_sigterm_notification = &_dispatch_mach_enable_sigterm, +}; + +dispatch_mach_xpc_hooks_t _dispatch_mach_xpc_hooks = + &_dispatch_mach_xpc_hooks_default; + +#endif // HAVE_MACH diff --git a/src/inline_internal.h b/src/inline_internal.h index 1279874d4..69805aff1 100644 --- a/src/inline_internal.h +++ b/src/inline_internal.h @@ -89,21 +89,45 @@ _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, #pragma mark _os_object_t & dispatch_object_t #if DISPATCH_PURE_C +DISPATCH_ALWAYS_INLINE +static inline const char * +_dispatch_object_class_name(dispatch_object_t dou) +{ +#if USE_OBJC + return object_getClassName((id)dou._do) + strlen("OS_dispatch_"); +#else + return dx_vtable(dou._do)->do_kind; +#endif +} + DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_object_has_vtable(dispatch_object_t dou) +_dispatch_object_is_global(dispatch_object_t dou) { - uintptr_t dc_flags = dou._dc->dc_flags; + return dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT; +} +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_object_is_root_or_base_queue(dispatch_object_t dou) +{ + return dx_hastypeflag(dou._do, QUEUE_ROOT) || + dx_hastypeflag(dou._do, QUEUE_BASE); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_object_has_vtable(dispatch_object_t dou) +{ // vtables are pointers far away from the low page in memory - return dc_flags > 0xffful; + return dou._dc->dc_flags > 0xffful; } DISPATCH_ALWAYS_INLINE static inline bool _dispatch_object_is_queue(dispatch_object_t dou) { - return _dispatch_object_has_vtable(dou) && dx_vtable(dou._do)->do_push; + return _dispatch_object_has_vtable(dou) && dx_vtable(dou._dq)->dq_push; } DISPATCH_ALWAYS_INLINE @@ -138,16 +162,23 @@ _dispatch_object_is_barrier(dispatch_object_t dou) dispatch_queue_flags_t dq_flags; if (!_dispatch_object_has_vtable(dou)) { - return (dou._dc->dc_flags & DISPATCH_OBJ_BARRIER_BIT); - } - switch (dx_metatype(dou._do)) { - case _DISPATCH_QUEUE_TYPE: - case _DISPATCH_SOURCE_TYPE: - dq_flags = os_atomic_load2o(dou._dq, dq_atomic_flags, relaxed); - return dq_flags & DQF_BARRIER_BIT; - default: + return (dou._dc->dc_flags & DC_FLAG_BARRIER); + } + if (dx_cluster(dou._do) != _DISPATCH_QUEUE_CLUSTER) { + return false; + } + dq_flags = os_atomic_load2o(dou._dq, dq_atomic_flags, relaxed); + return dq_flags & DQF_BARRIER_BIT; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_object_is_waiter(dispatch_object_t dou) +{ + if (_dispatch_object_has_vtable(dou)) { return false; } + return (dou._dc->dc_flags & (DC_FLAG_SYNC_WAITER | DC_FLAG_ASYNC_AND_WAIT)); } DISPATCH_ALWAYS_INLINE @@ -157,7 +188,7 @@ _dispatch_object_is_sync_waiter(dispatch_object_t dou) if (_dispatch_object_has_vtable(dou)) { return false; } - return (dou._dc->dc_flags & DISPATCH_OBJ_SYNC_WAITER_BIT); + return (dou._dc->dc_flags & DC_FLAG_SYNC_WAITER); } DISPATCH_ALWAYS_INLINE @@ -167,17 +198,16 @@ _dispatch_object_is_sync_waiter_non_barrier(dispatch_object_t dou) if (_dispatch_object_has_vtable(dou)) { return false; } - return ((dou._dc->dc_flags & - (DISPATCH_OBJ_BARRIER_BIT | DISPATCH_OBJ_SYNC_WAITER_BIT)) == - (DISPATCH_OBJ_SYNC_WAITER_BIT)); + return ((dou._dc->dc_flags & (DC_FLAG_BARRIER | DC_FLAG_SYNC_WAITER)) == + (DC_FLAG_SYNC_WAITER)); } DISPATCH_ALWAYS_INLINE static inline _os_object_t _os_object_retain_internal_n_inline(_os_object_t obj, int n) { - int ref_cnt = _os_object_refcnt_add(obj, n); - if (unlikely(ref_cnt <= 0)) { + int ref_cnt = _os_object_refcnt_add_orig(obj, n); + if (unlikely(ref_cnt < 0)) { _OS_OBJECT_CLIENT_CRASH("Resurrection of an object"); } return obj; @@ -237,6 +267,29 @@ _dispatch_retain_n(dispatch_object_t dou, int n) (void)_os_object_retain_internal_n_inline(dou._os_obj, n); } +DISPATCH_ALWAYS_INLINE_NDEBUG +static inline void +_dispatch_retain_n_unsafe(dispatch_object_t dou, int n) +{ + // _dispatch_retain_*_unsafe assumes: + // - the object is not global + // - there's no refcount management bug + // + // This is meant to be used only when called between the update_tail and + // update_prev os_mpsc methods, so that the assembly of that critical window + // is as terse as possible (this window is a possible dequeuer starvation). + // + // Other code should use the safe variants at all times. + os_atomic_add2o(dou._os_obj, os_obj_ref_cnt, n, relaxed); +} + +DISPATCH_ALWAYS_INLINE_NDEBUG +static inline void +_dispatch_retain_2_unsafe(dispatch_object_t dou) +{ + _dispatch_retain_n_unsafe(dou, 2); +} + DISPATCH_ALWAYS_INLINE_NDEBUG static inline void _dispatch_release(dispatch_object_t dou) @@ -288,9 +341,23 @@ _dispatch_release_2_tailcall(dispatch_object_t dou) DISPATCH_ALWAYS_INLINE static inline void -_dispatch_queue_retain_storage(dispatch_queue_t dq) +_dispatch_retain_unote_owner(dispatch_unote_t du) { - int ref_cnt = os_atomic_inc2o(dq, dq_sref_cnt, relaxed); + _dispatch_retain_2(_dispatch_wref2ptr(du._du->du_owner_wref)); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_release_unote_owner_tailcall(dispatch_unote_t du) +{ + _dispatch_release_2_tailcall(_dispatch_wref2ptr(du._du->du_owner_wref)); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_retain_storage(dispatch_queue_class_t dqu) +{ + int ref_cnt = os_atomic_inc2o(dqu._dq, dq_sref_cnt, relaxed); if (unlikely(ref_cnt <= 0)) { _OS_OBJECT_CLIENT_CRASH("Resurrection of an object"); } @@ -298,21 +365,21 @@ _dispatch_queue_retain_storage(dispatch_queue_t dq) DISPATCH_ALWAYS_INLINE static inline void -_dispatch_queue_release_storage(dispatch_queue_t dq) +_dispatch_queue_release_storage(dispatch_queue_class_t dqu) { // this refcount only delays the _dispatch_object_dealloc() and there's no // need for visibility wrt to the allocation, the internal refcount already // gives us that, and the object becomes immutable after the last internal // refcount release. - int ref_cnt = os_atomic_dec2o(dq, dq_sref_cnt, relaxed); + int ref_cnt = os_atomic_dec2o(dqu._dq, dq_sref_cnt, relaxed); if (unlikely(ref_cnt >= 0)) { return; } if (unlikely(ref_cnt < -1)) { _OS_OBJECT_CLIENT_CRASH("Over-release of an object"); } - dq->dq_state = 0xdead000000000000; - _dispatch_object_dealloc(dq); + dqu._dq->dq_state = 0xdead000000000000; + _dispatch_object_dealloc(dqu._dq); } DISPATCH_ALWAYS_INLINE DISPATCH_NONNULL_ALL @@ -323,7 +390,6 @@ _dispatch_object_set_target_queue_inline(dispatch_object_t dou, _dispatch_retain(tq); tq = os_atomic_xchg2o(dou._do, do_targetq, tq, release); if (tq) _dispatch_release(tq); - _dispatch_object_debug(dou._do, "%s", __func__); } #endif // DISPATCH_PURE_C @@ -386,8 +452,8 @@ _dispatch_thread_frame_iterate_next(dispatch_thread_frame_iterator_t it) if (dtf) { dispatch_queue_t tq = dq->do_targetq; if (tq) { - // redirections, dispatch_sync and dispatch_trysync_f may skip - // frames, so we need to simulate seeing the missing links + // redirections or dispatch_sync may skip frames, + // so we need to simulate seeing the missing links it->dtfi_queue = tq; if (dq == dtf->dtf_queue) { it->dtfi_frame = dtf->dtf_prev; @@ -429,25 +495,26 @@ static inline void _dispatch_thread_frame_save_state(dispatch_thread_frame_t dtf) { _dispatch_thread_getspecific_packed_pair( - dispatch_queue_key, dispatch_frame_key, (void **)&dtf->dtf_queue); + dispatch_queue_key, dispatch_frame_key, dtf->dtf_pair); } DISPATCH_ALWAYS_INLINE static inline void -_dispatch_thread_frame_push(dispatch_thread_frame_t dtf, dispatch_queue_t dq) +_dispatch_thread_frame_push(dispatch_thread_frame_t dtf, + dispatch_queue_class_t dqu) { _dispatch_thread_frame_save_state(dtf); - _dispatch_thread_setspecific_pair(dispatch_queue_key, dq, + _dispatch_thread_setspecific_pair(dispatch_queue_key, dqu._dq, dispatch_frame_key, dtf); } DISPATCH_ALWAYS_INLINE static inline void _dispatch_thread_frame_push_and_rebase(dispatch_thread_frame_t dtf, - dispatch_queue_t dq, dispatch_thread_frame_t new_base) + dispatch_queue_class_t dqu, dispatch_thread_frame_t new_base) { _dispatch_thread_frame_save_state(dtf); - _dispatch_thread_setspecific_pair(dispatch_queue_key, dq, + _dispatch_thread_setspecific_pair(dispatch_queue_key, dqu._dq, dispatch_frame_key, new_base); } @@ -456,7 +523,7 @@ static inline void _dispatch_thread_frame_pop(dispatch_thread_frame_t dtf) { _dispatch_thread_setspecific_packed_pair( - dispatch_queue_key, dispatch_frame_key, (void **)&dtf->dtf_queue); + dispatch_queue_key, dispatch_frame_key, dtf->dtf_pair); } DISPATCH_ALWAYS_INLINE @@ -464,8 +531,8 @@ static inline dispatch_queue_t _dispatch_thread_frame_stash(dispatch_thread_frame_t dtf) { _dispatch_thread_getspecific_pair( - dispatch_queue_key, (void **)&dtf->dtf_queue, - dispatch_frame_key, (void **)&dtf->dtf_prev); + dispatch_queue_key, &dtf->dtf_pair[0], + dispatch_frame_key, &dtf->dtf_pair[1]); _dispatch_thread_frame_pop(dtf->dtf_prev); return dtf->dtf_queue; } @@ -547,89 +614,84 @@ _dispatch_thread_override_end(mach_port_t thread, void *resource) DISPATCH_ALWAYS_INLINE static inline dispatch_queue_flags_t -_dispatch_queue_atomic_flags(dispatch_queue_t dq) +_dispatch_queue_atomic_flags(dispatch_queue_class_t dqu) { - return os_atomic_load2o(dq, dq_atomic_flags, relaxed); + return os_atomic_load2o(dqu._dq, dq_atomic_flags, relaxed); } DISPATCH_ALWAYS_INLINE static inline dispatch_queue_flags_t -_dispatch_queue_atomic_flags_set(dispatch_queue_t dq, +_dispatch_queue_atomic_flags_set(dispatch_queue_class_t dqu, dispatch_queue_flags_t bits) { - return os_atomic_or2o(dq, dq_atomic_flags, bits, relaxed); + return os_atomic_or2o(dqu._dq, dq_atomic_flags, bits, relaxed); } DISPATCH_ALWAYS_INLINE static inline dispatch_queue_flags_t -_dispatch_queue_atomic_flags_set_and_clear_orig(dispatch_queue_t dq, +_dispatch_queue_atomic_flags_set_and_clear_orig(dispatch_queue_class_t dqu, dispatch_queue_flags_t add_bits, dispatch_queue_flags_t clr_bits) { dispatch_queue_flags_t oflags, nflags; - os_atomic_rmw_loop2o(dq, dq_atomic_flags, oflags, nflags, relaxed, { + os_atomic_rmw_loop2o(dqu._dq, dq_atomic_flags, oflags, nflags, relaxed, { nflags = (oflags | add_bits) & ~clr_bits; + if (nflags == oflags) os_atomic_rmw_loop_give_up(return oflags); }); return oflags; } DISPATCH_ALWAYS_INLINE static inline dispatch_queue_flags_t -_dispatch_queue_atomic_flags_set_and_clear(dispatch_queue_t dq, +_dispatch_queue_atomic_flags_set_and_clear(dispatch_queue_class_t dqu, dispatch_queue_flags_t add_bits, dispatch_queue_flags_t clr_bits) { dispatch_queue_flags_t oflags, nflags; - os_atomic_rmw_loop2o(dq, dq_atomic_flags, oflags, nflags, relaxed, { + os_atomic_rmw_loop2o(dqu._dq, dq_atomic_flags, oflags, nflags, relaxed, { nflags = (oflags | add_bits) & ~clr_bits; + if (nflags == oflags) os_atomic_rmw_loop_give_up(return oflags); }); return nflags; } DISPATCH_ALWAYS_INLINE static inline dispatch_queue_flags_t -_dispatch_queue_atomic_flags_set_orig(dispatch_queue_t dq, +_dispatch_queue_atomic_flags_set_orig(dispatch_queue_class_t dqu, dispatch_queue_flags_t bits) { - return os_atomic_or_orig2o(dq, dq_atomic_flags, bits, relaxed); + return os_atomic_or_orig2o(dqu._dq, dq_atomic_flags, bits, relaxed); } DISPATCH_ALWAYS_INLINE static inline dispatch_queue_flags_t -_dispatch_queue_atomic_flags_clear(dispatch_queue_t dq, +_dispatch_queue_atomic_flags_clear(dispatch_queue_class_t dqu, dispatch_queue_flags_t bits) { - return os_atomic_and2o(dq, dq_atomic_flags, ~bits, relaxed); + return os_atomic_and2o(dqu._dq, dq_atomic_flags, ~bits, relaxed); } DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_queue_is_thread_bound(dispatch_queue_t dq) +_dispatch_queue_is_thread_bound(dispatch_queue_class_t dqu) { - return _dispatch_queue_atomic_flags(dq) & DQF_THREAD_BOUND; + return _dispatch_queue_atomic_flags(dqu) & DQF_THREAD_BOUND; } DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_queue_cannot_trysync(dispatch_queue_t dq) +_dispatch_queue_label_needs_free(dispatch_queue_class_t dqu) { - return _dispatch_queue_atomic_flags(dq) & DQF_CANNOT_TRYSYNC; -} - -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_queue_label_needs_free(dispatch_queue_t dq) -{ - return _dispatch_queue_atomic_flags(dq) & DQF_LABEL_NEEDS_FREE; + return _dispatch_queue_atomic_flags(dqu) & DQF_LABEL_NEEDS_FREE; } DISPATCH_ALWAYS_INLINE static inline dispatch_invoke_flags_t -_dispatch_queue_autorelease_frequency(dispatch_queue_t dq) +_dispatch_queue_autorelease_frequency(dispatch_queue_class_t dqu) { const unsigned long factor = DISPATCH_INVOKE_AUTORELEASE_ALWAYS / DQF_AUTORELEASE_ALWAYS; - dispatch_static_assert(factor > 0); + dispatch_assert(factor > 0); - dispatch_queue_flags_t qaf = _dispatch_queue_atomic_flags(dq); + dispatch_queue_flags_t qaf = _dispatch_queue_atomic_flags(dqu); qaf &= _DQF_AUTORELEASE_MASK; return (dispatch_invoke_flags_t)qaf * factor; @@ -637,10 +699,10 @@ _dispatch_queue_autorelease_frequency(dispatch_queue_t dq) DISPATCH_ALWAYS_INLINE static inline dispatch_invoke_flags_t -_dispatch_queue_merge_autorelease_frequency(dispatch_queue_t dq, +_dispatch_queue_merge_autorelease_frequency(dispatch_queue_class_t dqu, dispatch_invoke_flags_t flags) { - dispatch_invoke_flags_t qaf = _dispatch_queue_autorelease_frequency(dq); + dispatch_invoke_flags_t qaf = _dispatch_queue_autorelease_frequency(dqu); if (qaf) { flags &= ~_DISPATCH_INVOKE_AUTORELEASE_MASK; @@ -651,9 +713,9 @@ _dispatch_queue_merge_autorelease_frequency(dispatch_queue_t dq, DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_queue_is_legacy(dispatch_queue_t dq) +_dispatch_queue_is_mutable(dispatch_queue_class_t dqu) { - return _dispatch_queue_atomic_flags(dq) & DQF_LEGACY; + return _dispatch_queue_atomic_flags(dqu) & DQF_MUTABLE; } DISPATCH_ALWAYS_INLINE @@ -683,15 +745,37 @@ _dispatch_get_wlh(void) return _dispatch_thread_getspecific(dispatch_wlh_key); } +DISPATCH_ALWAYS_INLINE +static inline dispatch_workloop_t +_dispatch_wlh_to_workloop(dispatch_wlh_t wlh) +{ + if (wlh == DISPATCH_WLH_ANON) { + return NULL; + } + if (dx_metatype((dispatch_workloop_t)wlh) == _DISPATCH_WORKLOOP_TYPE) { + return (dispatch_workloop_t)wlh; + } + return NULL; +} + DISPATCH_ALWAYS_INLINE DISPATCH_PURE static inline dispatch_wlh_t -_dispatch_get_wlh_reference(void) +_dispatch_get_event_wlh(void) { - dispatch_wlh_t wlh = _dispatch_thread_getspecific(dispatch_wlh_key); - if (wlh != DISPATCH_WLH_ANON) { - wlh = (dispatch_wlh_t)((uintptr_t)wlh & ~DISPATCH_WLH_STORAGE_REF); + dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); + if (ddi) { + DISPATCH_COMPILER_CAN_ASSUME(ddi->ddi_wlh != DISPATCH_WLH_ANON); + return ddi->ddi_wlh; } - return wlh; + return DISPATCH_WLH_ANON; +} + +DISPATCH_ALWAYS_INLINE DISPATCH_PURE +static inline dispatch_wlh_t +_dispatch_get_wlh_reference(void) +{ + dispatch_wlh_t wlh = _dispatch_get_wlh(); + return (dispatch_wlh_t)((uintptr_t)wlh & ~DISPATCH_WLH_STORAGE_REF); } DISPATCH_ALWAYS_INLINE @@ -756,11 +840,10 @@ DISPATCH_ALWAYS_INLINE static inline bool _dispatch_wlh_should_poll_unote(dispatch_unote_t du) { - if (likely(_dispatch_needs_to_return_to_kernel())) { - dispatch_wlh_t wlh = _dispatch_get_wlh(); - return wlh != DISPATCH_WLH_ANON && du._du->du_wlh == wlh; - } - return false; + dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); + return _dispatch_needs_to_return_to_kernel() && ddi && + ddi->ddi_wlh != DISPATCH_WLH_ANON && + _dispatch_unote_wlh(du) == ddi->ddi_wlh; } #endif // DISPATCH_PURE_C @@ -1032,49 +1115,55 @@ static inline dispatch_qos_t _dispatch_get_basepri_override_qos_floor(void); static inline void _dispatch_set_basepri_override_qos(dispatch_qos_t qos); static inline void _dispatch_reset_basepri(dispatch_priority_t dbp); static inline dispatch_priority_t _dispatch_set_basepri(dispatch_priority_t dbp); -static inline bool _dispatch_queue_need_override_retain( - dispatch_queue_class_t dqu, dispatch_qos_t qos); #if DISPATCH_PURE_C // Note to later developers: ensure that any initialization changes are // made for statically allocated queues (i.e. _dispatch_main_q). -static inline void -_dispatch_queue_init(dispatch_queue_t dq, dispatch_queue_flags_t dqf, +static inline dispatch_queue_class_t +_dispatch_queue_init(dispatch_queue_class_t dqu, dispatch_queue_flags_t dqf, uint16_t width, uint64_t initial_state_bits) { uint64_t dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(width); + dispatch_queue_t dq = dqu._dq; dispatch_assert((initial_state_bits & ~(DISPATCH_QUEUE_ROLE_MASK | DISPATCH_QUEUE_INACTIVE)) == 0); if (initial_state_bits & DISPATCH_QUEUE_INACTIVE) { dq_state |= DISPATCH_QUEUE_INACTIVE + DISPATCH_QUEUE_NEEDS_ACTIVATION; - dq->do_ref_cnt += 2; // rdar://8181908 see _dispatch_queue_resume + dq->do_ref_cnt += 2; // rdar://8181908 see _dispatch_lane_resume + if (dx_metatype(dq) == _DISPATCH_SOURCE_TYPE) { + dq->do_ref_cnt++; // released when DSF_DELETED is set + } } dq_state |= (initial_state_bits & DISPATCH_QUEUE_ROLE_MASK); - dq->do_next = (struct dispatch_queue_s *)DISPATCH_OBJECT_LISTLESS; + dq->do_next = DISPATCH_OBJECT_LISTLESS; dqf |= DQF_WIDTH(width); os_atomic_store2o(dq, dq_atomic_flags, dqf, relaxed); dq->dq_state = dq_state; dq->dq_serialnum = os_atomic_inc_orig(&_dispatch_queue_serial_numbers, relaxed); + return dqu; } +#define _dispatch_queue_alloc(name, dqf, w, initial_state_bits) \ + _dispatch_queue_init(_dispatch_object_alloc(DISPATCH_VTABLE(name),\ + sizeof(struct dispatch_##name##_s)), dqf, w, initial_state_bits) /* Used by: - * - _dispatch_queue_set_target_queue + * - _dispatch_lane_set_target_queue * - changing dispatch source handlers * * Tries to prevent concurrent wakeup of an inactive queue by suspending it. */ DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT static inline bool -_dispatch_queue_try_inactive_suspend(dispatch_queue_t dq) +_dispatch_lane_try_inactive_suspend(dispatch_lane_class_t dqu) { uint64_t old_state, new_state; - (void)os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + (void)os_atomic_rmw_loop2o(dqu._dl, dq_state, old_state, new_state, relaxed, { if (unlikely(!_dq_state_is_inactive(old_state))) { os_atomic_rmw_loop_give_up(return false); } @@ -1088,7 +1177,7 @@ _dispatch_queue_try_inactive_suspend(dispatch_queue_t dq) // // We don't want to handle the side suspend count in a codepath that // needs to be fast. - DISPATCH_CLIENT_CRASH(dq, "Too many calls to dispatch_suspend() " + DISPATCH_CLIENT_CRASH(0, "Too many calls to dispatch_suspend() " "prior to calling dispatch_set_target_queue() " "or dispatch_set_*_handler()"); } @@ -1216,38 +1305,6 @@ _dispatch_queue_drain_try_lock_wlh(dispatch_queue_t dq, uint64_t *dq_state) !_dq_state_drain_locked(old_state); } -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_queue_mgr_lock(dispatch_queue_t dq) -{ - uint64_t old_state, new_state, set_owner_and_set_full_width = - _dispatch_lock_value_for_self() | DISPATCH_QUEUE_SERIAL_DRAIN_OWNED; - - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, { - new_state = old_state; - if (unlikely(!_dq_state_is_runnable(old_state) || - _dq_state_drain_locked(old_state))) { - DISPATCH_INTERNAL_CRASH((uintptr_t)old_state, - "Locking the manager should not fail"); - } - new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK; - new_state |= set_owner_and_set_full_width; - }); -} - -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_queue_mgr_unlock(dispatch_queue_t dq) -{ - uint64_t old_state, new_state; - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { - new_state = old_state - DISPATCH_QUEUE_SERIAL_DRAIN_OWNED; - new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; - new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; - }); - return _dq_state_is_dirty(old_state); -} - /* Used by _dispatch_barrier_{try,}sync * * Note, this fails if any of e:1 or dl!=0, but that allows this code to be a @@ -1261,7 +1318,7 @@ _dispatch_queue_mgr_unlock(dispatch_queue_t dq) */ DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT static inline bool -_dispatch_queue_try_acquire_barrier_sync_and_suspend(dispatch_queue_t dq, +_dispatch_queue_try_acquire_barrier_sync_and_suspend(dispatch_lane_t dq, uint32_t tid, uint64_t suspend_count) { uint64_t init = DISPATCH_QUEUE_STATE_INIT_VALUE(dq->dq_width); @@ -1281,9 +1338,9 @@ _dispatch_queue_try_acquire_barrier_sync_and_suspend(dispatch_queue_t dq, DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT static inline bool -_dispatch_queue_try_acquire_barrier_sync(dispatch_queue_t dq, uint32_t tid) +_dispatch_queue_try_acquire_barrier_sync(dispatch_queue_class_t dq, uint32_t tid) { - return _dispatch_queue_try_acquire_barrier_sync_and_suspend(dq, tid, 0); + return _dispatch_queue_try_acquire_barrier_sync_and_suspend(dq._dl, tid, 0); } /* Used by _dispatch_sync for root queues and some drain codepaths @@ -1296,10 +1353,9 @@ _dispatch_queue_try_acquire_barrier_sync(dispatch_queue_t dq, uint32_t tid) */ DISPATCH_ALWAYS_INLINE static inline void -_dispatch_queue_reserve_sync_width(dispatch_queue_t dq) +_dispatch_queue_reserve_sync_width(dispatch_lane_t dq) { - (void)os_atomic_add2o(dq, dq_state, - DISPATCH_QUEUE_WIDTH_INTERVAL, relaxed); + os_atomic_add2o(dq, dq_state, DISPATCH_QUEUE_WIDTH_INTERVAL, relaxed); } /* Used by _dispatch_sync on non-serial queues @@ -1309,7 +1365,7 @@ _dispatch_queue_reserve_sync_width(dispatch_queue_t dq) */ DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT static inline bool -_dispatch_queue_try_reserve_sync_width(dispatch_queue_t dq) +_dispatch_queue_try_reserve_sync_width(dispatch_lane_t dq) { uint64_t old_state, new_state; @@ -1331,43 +1387,6 @@ _dispatch_queue_try_reserve_sync_width(dispatch_queue_t dq) }); } -/* Used by _dispatch_apply_redirect - * - * Try to acquire at most da_width and returns what could be acquired, - * possibly 0 - */ -DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT -static inline int32_t -_dispatch_queue_try_reserve_apply_width(dispatch_queue_t dq, int32_t da_width) -{ - uint64_t old_state, new_state; - int32_t width; - - (void)os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { - width = (int32_t)_dq_state_available_width(old_state); - if (unlikely(!width)) { - os_atomic_rmw_loop_give_up(return 0); - } - if (width > da_width) { - width = da_width; - } - new_state = old_state + (uint64_t)width * DISPATCH_QUEUE_WIDTH_INTERVAL; - }); - return width; -} - -/* Used by _dispatch_apply_redirect - * - * Release width acquired by _dispatch_queue_try_acquire_width - */ -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_queue_relinquish_width(dispatch_queue_t dq, int32_t da_width) -{ - (void)os_atomic_sub2o(dq, dq_state, - (uint64_t)da_width * DISPATCH_QUEUE_WIDTH_INTERVAL, relaxed); -} - /* Used by target-queue recursing code * * Initial state must be { sc:0, ib:0, qf:0, pb:0, d:0 } @@ -1375,7 +1394,7 @@ _dispatch_queue_relinquish_width(dispatch_queue_t dq, int32_t da_width) */ DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT static inline bool -_dispatch_queue_try_acquire_async(dispatch_queue_t dq) +_dispatch_queue_try_acquire_async(dispatch_lane_t dq) { uint64_t old_state, new_state; @@ -1401,7 +1420,7 @@ _dispatch_queue_try_acquire_async(dispatch_queue_t dq) */ DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT static inline bool -_dispatch_queue_try_upgrade_full_width(dispatch_queue_t dq, uint64_t owned) +_dispatch_queue_try_upgrade_full_width(dispatch_lane_t dq, uint64_t owned) { uint64_t old_state, new_state; uint64_t pending_barrier_width = DISPATCH_QUEUE_PENDING_BARRIER + @@ -1429,15 +1448,16 @@ _dispatch_queue_try_upgrade_full_width(dispatch_queue_t dq, uint64_t owned) */ DISPATCH_ALWAYS_INLINE static inline uint64_t -_dispatch_queue_adjust_owned(dispatch_queue_t dq, uint64_t owned, +_dispatch_queue_adjust_owned(dispatch_queue_class_t dq, uint64_t owned, struct dispatch_object_s *next_dc) { + uint16_t dq_width = dq._dq->dq_width; uint64_t reservation; - if (unlikely(dq->dq_width > 1)) { + if (unlikely(dq_width > 1)) { if (next_dc && _dispatch_object_is_barrier(next_dc)) { reservation = DISPATCH_QUEUE_PENDING_BARRIER; - reservation += (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL; + reservation += (dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL; owned -= reservation; } } @@ -1489,101 +1509,109 @@ _dispatch_queue_drain_try_unlock(dispatch_queue_t dq, uint64_t owned, bool done) #pragma mark - #pragma mark os_mpsc_queue -// type_t * {volatile,const,_Atomic,...} -> type_t * -// type_t[] -> type_t * -#define os_unqualified_pointer_type(expr) \ - typeof(typeof(*(expr)) *) +#define _os_mpsc_head(q, _ns, ...) &(q)->_ns##_head ##__VA_ARGS__ +#define _os_mpsc_tail(q, _ns, ...) &(q)->_ns##_tail ##__VA_ARGS__ -#define os_mpsc_node_type(q, _ns) \ - os_unqualified_pointer_type((q)->_ns##_head) +#define os_mpsc(q, _ns, ...) (q, _ns, __VA_ARGS__) + +#define os_mpsc_node_type(Q) _os_atomic_basetypeof(_os_mpsc_head Q) // // Multi Producer calls, can be used safely concurrently // // Returns true when the queue was empty and the head must be set -#define os_mpsc_push_update_tail_list(q, _ns, head, tail, _o_next) ({ \ - os_mpsc_node_type(q, _ns) _head = (head), _tail = (tail), _prev; \ - _tail->_o_next = NULL; \ - _prev = os_atomic_xchg2o((q), _ns##_tail, _tail, release); \ +#define os_mpsc_push_update_tail(Q, tail, _o_next) ({ \ + os_mpsc_node_type(Q) _tl = (tail); \ + os_atomic_store2o(_tl, _o_next, NULL, relaxed); \ + os_atomic_xchg(_os_mpsc_tail Q, _tl, release); \ + }) + +#define os_mpsc_push_was_empty(prev) ((prev) == NULL) + +#define os_mpsc_push_update_prev(Q, prev, head, _o_next) ({ \ + os_mpsc_node_type(Q) _prev = (prev); \ if (likely(_prev)) { \ - os_atomic_store2o(_prev, _o_next, _head, relaxed); \ + (void)os_atomic_store2o(_prev, _o_next, (head), relaxed); \ + } else { \ + (void)os_atomic_store(_os_mpsc_head Q, (head), relaxed); \ } \ - (_prev == NULL); \ }) -// Returns true when the queue was empty and the head must be set -#define os_mpsc_push_update_tail(q, _ns, o, _o_next) ({ \ - os_mpsc_node_type(q, _ns) _o = (o); \ - os_mpsc_push_update_tail_list(q, _ns, _o, _o, _o_next); \ +#define os_mpsc_push_list(Q, head, tail, _o_next) ({ \ + os_mpsc_node_type(Q) _token; \ + _token = os_mpsc_push_update_tail(Q, tail, _o_next); \ + os_mpsc_push_update_prev(Q, _token, head, _o_next); \ + os_mpsc_push_was_empty(_token); \ }) -#define os_mpsc_push_update_head(q, _ns, o) ({ \ - os_atomic_store2o((q), _ns##_head, o, relaxed); \ +// Returns true when the queue was empty and the head must be set +#define os_mpsc_push_item(Q, tail, _o_next) ({ \ + os_mpsc_node_type(Q) _tail = (tail); \ + os_mpsc_push_list(Q, _tail, _tail, _o_next); \ }) // // Single Consumer calls, can NOT be used safely concurrently // -#define os_mpsc_get_head(q, _ns) \ - _dispatch_wait_until(os_atomic_load2o(q, _ns##_head, dependency)) +#define os_mpsc_looks_empty(Q) \ + (os_atomic_load(_os_mpsc_tail Q, relaxed) == NULL) + +#define os_mpsc_get_head(Q) \ + _dispatch_wait_until(os_atomic_load(_os_mpsc_head Q, dependency)) #define os_mpsc_get_next(_n, _o_next) \ _dispatch_wait_until(os_atomic_load2o(_n, _o_next, dependency)) -#define os_mpsc_pop_head(q, _ns, head, _o_next) ({ \ - typeof(q) _q = (q); \ - os_mpsc_node_type(_q, _ns) _head = (head), _n; \ +#define os_mpsc_pop_head(Q, head, _o_next) ({ \ + os_mpsc_node_type(Q) _head = (head), _n; \ _n = os_atomic_load2o(_head, _o_next, dependency); \ - os_atomic_store2o(_q, _ns##_head, _n, relaxed); \ + os_atomic_store(_os_mpsc_head Q, _n, relaxed); \ /* 22708742: set tail to NULL with release, so that NULL write */ \ /* to head above doesn't clobber head from concurrent enqueuer */ \ if (unlikely(!_n && \ - !os_atomic_cmpxchg2o(_q, _ns##_tail, _head, NULL, release))) { \ + !os_atomic_cmpxchg(_os_mpsc_tail Q, _head, NULL, release))) { \ _n = os_mpsc_get_next(_head, _o_next); \ - os_atomic_store2o(_q, _ns##_head, _n, relaxed); \ + os_atomic_store(_os_mpsc_head Q, _n, relaxed); \ } \ _n; \ }) -#define os_mpsc_undo_pop_head(q, _ns, head, next, _o_next) ({ \ - typeof(q) _q = (q); \ - os_mpsc_node_type(_q, _ns) _head = (head), _n = (next); \ +#define os_mpsc_undo_pop_list(Q, head, tail, next, _o_next) ({ \ + os_mpsc_node_type(Q) _hd = (head), _tl = (tail), _n = (next); \ + os_atomic_store2o(_tl, _o_next, _n, relaxed); \ if (unlikely(!_n && \ - !os_atomic_cmpxchg2o(_q, _ns##_tail, NULL, _head, relaxed))) { \ - _n = os_mpsc_get_head(q, _ns); \ - os_atomic_store2o(_head, _o_next, _n, relaxed); \ + !os_atomic_cmpxchg(_os_mpsc_tail Q, NULL, _tl, release))) { \ + _n = os_mpsc_get_head(Q); \ + os_atomic_store2o(_tl, _o_next, _n, relaxed); \ } \ - os_atomic_store2o(_q, _ns##_head, _head, relaxed); \ + os_atomic_store(_os_mpsc_head Q, _hd, relaxed); \ + }) + +#define os_mpsc_undo_pop_head(Q, head, next, _o_next) ({ \ + os_mpsc_node_type(Q) _head = (head); \ + os_mpsc_undo_pop_list(Q, _head, _head, next, _o_next); \ }) -#define os_mpsc_capture_snapshot(q, _ns, tail) ({ \ - typeof(q) _q = (q); \ - os_mpsc_node_type(_q, _ns) _head = os_mpsc_get_head(q, _ns); \ - os_atomic_store2o(_q, _ns##_head, NULL, relaxed); \ +#define os_mpsc_capture_snapshot(Q, tail) ({ \ + os_mpsc_node_type(Q) _head = os_mpsc_get_head(Q); \ + os_atomic_store(_os_mpsc_head Q, NULL, relaxed); \ /* 22708742: set tail to NULL with release, so that NULL write */ \ /* to head above doesn't clobber head from concurrent enqueuer */ \ - *(tail) = os_atomic_xchg2o(_q, _ns##_tail, NULL, release); \ + *(tail) = os_atomic_xchg(_os_mpsc_tail Q, NULL, release); \ _head; \ }) #define os_mpsc_pop_snapshot_head(head, tail, _o_next) ({ \ - os_unqualified_pointer_type(head) _head = (head), _n = NULL; \ - if (_head != (tail)) { \ - _n = os_mpsc_get_next(_head, _o_next); \ - }; \ - _n; }) - -#define os_mpsc_prepend(q, _ns, head, tail, _o_next) ({ \ - typeof(q) _q = (q); \ - os_mpsc_node_type(_q, _ns) _head = (head), _tail = (tail), _n; \ - os_atomic_store2o(_tail, _o_next, NULL, relaxed); \ - if (unlikely(!os_atomic_cmpxchg2o(_q, _ns##_tail, NULL, _tail, release))) { \ - _n = os_mpsc_get_head(q, _ns); \ - os_atomic_store2o(_tail, _o_next, _n, relaxed); \ - } \ - os_atomic_store2o(_q, _ns##_head, _head, relaxed); \ + typeof(head) _head = (head), _tail = (tail), _n = NULL; \ + if (_head != _tail) _n = os_mpsc_get_next(_head, _o_next); \ + _n; \ + }) + +#define os_mpsc_prepend(Q, head, tail, _o_next) ({ \ + os_mpsc_node_type(Q) _n = os_atomic_load(_os_mpsc_head Q, relaxed); \ + os_mpsc_undo_pop_list(Q, head, tail, _n, _o_next); \ }) #pragma mark - @@ -1591,7 +1619,7 @@ _dispatch_queue_drain_try_unlock(dispatch_queue_t dq, uint64_t owned, bool done) DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_queue_sidelock_trylock(dispatch_queue_t dq, dispatch_qos_t qos) +_dispatch_queue_sidelock_trylock(dispatch_lane_t dq, dispatch_qos_t qos) { dispatch_tid owner; if (_dispatch_unfair_lock_trylock(&dq->dq_sidelock, &owner)) { @@ -1604,14 +1632,14 @@ _dispatch_queue_sidelock_trylock(dispatch_queue_t dq, dispatch_qos_t qos) DISPATCH_ALWAYS_INLINE static inline void -_dispatch_queue_sidelock_lock(dispatch_queue_t dq) +_dispatch_queue_sidelock_lock(dispatch_lane_t dq) { return _dispatch_unfair_lock_lock(&dq->dq_sidelock); } DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_queue_sidelock_tryunlock(dispatch_queue_t dq) +_dispatch_queue_sidelock_tryunlock(dispatch_lane_t dq) { if (_dispatch_unfair_lock_tryunlock(&dq->dq_sidelock)) { return true; @@ -1625,7 +1653,7 @@ _dispatch_queue_sidelock_tryunlock(dispatch_queue_t dq) DISPATCH_ALWAYS_INLINE static inline void -_dispatch_queue_sidelock_unlock(dispatch_queue_t dq) +_dispatch_queue_sidelock_unlock(dispatch_lane_t dq) { if (_dispatch_unfair_lock_unlock_had_failed_trylock(&dq->dq_sidelock)) { // Ensure that the root queue sees that this thread was overridden. @@ -1646,106 +1674,66 @@ _dispatch_queue_get_current(void) } DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_queue_set_current(dispatch_queue_t dq) +static inline dispatch_queue_t +_dispatch_queue_get_current_or_default(void) { - _dispatch_thread_setspecific(dispatch_queue_key, dq); + int idx = DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT; + return _dispatch_queue_get_current() ?: _dispatch_root_queues[idx]._as_dq; } DISPATCH_ALWAYS_INLINE -static inline struct dispatch_object_s* -_dispatch_queue_head(dispatch_queue_t dq) +static inline void +_dispatch_queue_set_current(dispatch_queue_class_t dqu) { - return os_mpsc_get_head(dq, dq_items); + _dispatch_thread_setspecific(dispatch_queue_key, dqu._dq); } DISPATCH_ALWAYS_INLINE static inline struct dispatch_object_s* -_dispatch_queue_next(dispatch_queue_t dq, struct dispatch_object_s *dc) +_dispatch_queue_get_head(dispatch_lane_class_t dq) { - return os_mpsc_pop_head(dq, dq_items, dc, do_next); + return os_mpsc_get_head(os_mpsc(dq._dl, dq_items)); } DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_queue_push_update_tail(dispatch_queue_t dq, - struct dispatch_object_s *tail) +static inline struct dispatch_object_s* +_dispatch_queue_pop_head(dispatch_lane_class_t dq, struct dispatch_object_s *dc) { - // if we crash here with a value less than 0x1000, then we are - // at a known bug in client code. for example, see - // _dispatch_queue_dispose or _dispatch_atfork_child - return os_mpsc_push_update_tail(dq, dq_items, tail, do_next); + return os_mpsc_pop_head(os_mpsc(dq._dl, dq_items), dc, do_next); } DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_queue_push_update_tail_list(dispatch_queue_t dq, - struct dispatch_object_s *head, struct dispatch_object_s *tail) +_dispatch_queue_push_item(dispatch_lane_class_t dqu, dispatch_object_t dou) { - // if we crash here with a value less than 0x1000, then we are - // at a known bug in client code. for example, see - // _dispatch_queue_dispose or _dispatch_atfork_child - return os_mpsc_push_update_tail_list(dq, dq_items, head, tail, do_next); + return os_mpsc_push_item(os_mpsc(dqu._dl, dq_items), dou._do, do_next); } DISPATCH_ALWAYS_INLINE static inline void -_dispatch_queue_push_update_head(dispatch_queue_t dq, - struct dispatch_object_s *head) +_dispatch_root_queue_push_inline(dispatch_queue_global_t dq, + dispatch_object_t _head, dispatch_object_t _tail, int n) { - os_mpsc_push_update_head(dq, dq_items, head); -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_root_queue_push_inline(dispatch_queue_t dq, dispatch_object_t _head, - dispatch_object_t _tail, int n) -{ - struct dispatch_object_s *head = _head._do, *tail = _tail._do; - if (unlikely(_dispatch_queue_push_update_tail_list(dq, head, tail))) { - _dispatch_queue_push_update_head(dq, head); - return _dispatch_global_queue_poke(dq, n, 0); + struct dispatch_object_s *hd = _head._do, *tl = _tail._do; + if (unlikely(os_mpsc_push_list(os_mpsc(dq, dq_items), hd, tl, do_next))) { + return _dispatch_root_queue_poke(dq, n, 0); } } -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_queue_push_inline(dispatch_queue_t dq, dispatch_object_t _tail, - dispatch_qos_t qos) -{ - struct dispatch_object_s *tail = _tail._do; - dispatch_wakeup_flags_t flags = 0; - // If we are going to call dx_wakeup(), the queue must be retained before - // the item we're pushing can be dequeued, which means: - // - before we exchange the tail if we may have to override - // - before we set the head if we made the queue non empty. - // Otherwise, if preempted between one of these and the call to dx_wakeup() - // the blocks submitted to the queue may release the last reference to the - // queue when invoked by _dispatch_queue_drain. - bool overriding = _dispatch_queue_need_override_retain(dq, qos); - if (unlikely(_dispatch_queue_push_update_tail(dq, tail))) { - if (!overriding) _dispatch_retain_2(dq->_as_os_obj); - _dispatch_queue_push_update_head(dq, tail); - flags = DISPATCH_WAKEUP_CONSUME_2 | DISPATCH_WAKEUP_MAKE_DIRTY; - } else if (overriding) { - flags = DISPATCH_WAKEUP_CONSUME_2; - } else { - return; - } - return dx_wakeup(dq, qos, flags); -} +#include "trace.h" DISPATCH_ALWAYS_INLINE static inline void -_dispatch_queue_push_queue(dispatch_queue_t tq, dispatch_queue_t dq, +_dispatch_queue_push_queue(dispatch_queue_t tq, dispatch_queue_class_t dq, uint64_t dq_state) { + _dispatch_trace_item_push(tq, dq); return dx_push(tq, dq, _dq_state_max_qos(dq_state)); } DISPATCH_ALWAYS_INLINE static inline dispatch_priority_t -_dispatch_root_queue_identity_assume(dispatch_queue_t assumed_rq) +_dispatch_root_queue_identity_assume(dispatch_queue_global_t assumed_rq) { dispatch_priority_t old_dbp = _dispatch_get_basepri(); dispatch_assert(dx_hastypeflag(assumed_rq, QUEUE_ROOT)); @@ -1755,18 +1743,18 @@ _dispatch_root_queue_identity_assume(dispatch_queue_t assumed_rq) } typedef dispatch_queue_wakeup_target_t -_dispatch_queue_class_invoke_handler_t(dispatch_object_t, +_dispatch_queue_class_invoke_handler_t(dispatch_queue_class_t, dispatch_invoke_context_t dic, dispatch_invoke_flags_t, uint64_t *owned); DISPATCH_ALWAYS_INLINE static inline void -_dispatch_queue_class_invoke(dispatch_object_t dou, +_dispatch_queue_class_invoke(dispatch_queue_class_t dqu, dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, dispatch_invoke_flags_t const_restrict_flags, _dispatch_queue_class_invoke_handler_t invoke) { - dispatch_queue_t dq = dou._dq; + dispatch_queue_t dq = dqu._dq; dispatch_queue_wakeup_target_t tq = DISPATCH_QUEUE_WAKEUP_NONE; bool owning = !(flags & DISPATCH_INVOKE_STEALING); uint64_t owned = 0; @@ -1781,6 +1769,7 @@ _dispatch_queue_class_invoke(dispatch_object_t dou, if (!(flags & (DISPATCH_INVOKE_STEALING | DISPATCH_INVOKE_WLH))) { dq->do_next = DISPATCH_OBJECT_LISTLESS; + _dispatch_trace_item_pop(_dispatch_queue_get_current(), dq); } flags |= const_restrict_flags; if (likely(flags & DISPATCH_INVOKE_WLH)) { @@ -1795,6 +1784,11 @@ _dispatch_queue_class_invoke(dispatch_object_t dou, } else { old_dbp = 0; } + if (flags & DISPATCH_INVOKE_WORKLOOP_DRAIN) { + if (unlikely(_dispatch_queue_atomic_flags(dqu) & DQF_MUTABLE)) { + _dispatch_queue_atomic_flags_clear(dqu, DQF_MUTABLE); + } + } flags = _dispatch_queue_merge_autorelease_frequency(dq, flags); attempt_running_slow_head: @@ -1841,83 +1835,57 @@ _dispatch_queue_class_invoke(dispatch_object_t dou, } } if (likely(owning)) { - _dispatch_introspection_queue_item_complete(dq); + _dispatch_trace_item_complete(dq); } if (tq) { - if (const_restrict_flags & DISPATCH_INVOKE_DISALLOW_SYNC_WAITERS) { - dispatch_assert(dic->dic_deferred == NULL); - } else if (dic->dic_deferred) { - return _dispatch_queue_drain_sync_waiter(dq, dic, - flags, owned); - } - - uint64_t old_state, new_state, enqueued = DISPATCH_QUEUE_ENQUEUED; - if (tq == DISPATCH_QUEUE_WAKEUP_MGR) { - enqueued = DISPATCH_QUEUE_ENQUEUED_ON_MGR; - } - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { - new_state = old_state - owned; - new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; - new_state |= DISPATCH_QUEUE_DIRTY; - if (_dq_state_is_runnable(new_state) && - !_dq_state_is_enqueued(new_state)) { - // drain was not interupted for suspension - // we will reenqueue right away, just put ENQUEUED back - new_state |= enqueued; - } - }); - old_state -= owned; - if (_dq_state_received_override(old_state)) { - // Ensure that the root queue sees that this thread was overridden. - _dispatch_set_basepri_override_qos(_dq_state_max_qos(new_state)); - } - if ((old_state ^ new_state) & enqueued) { - dispatch_assert(_dq_state_is_enqueued(new_state)); - return _dispatch_queue_push_queue(tq, dq, new_state); - } + return _dispatch_queue_invoke_finish(dq, dic, tq, owned); } - _dispatch_release_2_tailcall(dq); + return _dispatch_release_2_tailcall(dq); } DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_queue_class_probe(dispatch_queue_class_t dqu) +_dispatch_queue_class_probe(dispatch_lane_class_t dqu) { struct dispatch_object_s *tail; // seq_cst wrt atomic store to dq_state // seq_cst wrt atomic store to dq_flags - tail = os_atomic_load2o(dqu._oq, oq_items_tail, ordered); + tail = os_atomic_load2o(dqu._dl, dq_items_tail, ordered); return unlikely(tail != NULL); } DISPATCH_ALWAYS_INLINE DISPATCH_CONST static inline bool -_dispatch_is_in_root_queues_array(dispatch_queue_t dq) +_dispatch_is_in_root_queues_array(dispatch_queue_class_t dqu) { - return (dq >= _dispatch_root_queues) && - (dq < _dispatch_root_queues + _DISPATCH_ROOT_QUEUE_IDX_COUNT); + return (dqu._dgq >= _dispatch_root_queues) && + (dqu._dgq < _dispatch_root_queues + _DISPATCH_ROOT_QUEUE_IDX_COUNT); } DISPATCH_ALWAYS_INLINE DISPATCH_CONST -static inline dispatch_queue_t +static inline dispatch_queue_global_t _dispatch_get_root_queue(dispatch_qos_t qos, bool overcommit) { - if (unlikely(qos == DISPATCH_QOS_UNSPECIFIED || qos > DISPATCH_QOS_MAX)) { + if (unlikely(qos < DISPATCH_QOS_MIN || qos > DISPATCH_QOS_MAX)) { DISPATCH_CLIENT_CRASH(qos, "Corrupted priority"); } return &_dispatch_root_queues[2 * (qos - 1) + overcommit]; } +#define _dispatch_get_default_queue(overcommit) \ + _dispatch_root_queues[DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS + \ + !!(overcommit)]._as_dq + DISPATCH_ALWAYS_INLINE static inline void -_dispatch_queue_set_bound_thread(dispatch_queue_t dq) +_dispatch_queue_set_bound_thread(dispatch_queue_class_t dqu) { // Tag thread-bound queues with the owning thread - dispatch_assert(_dispatch_queue_is_thread_bound(dq)); + dispatch_assert(_dispatch_queue_is_thread_bound(dqu)); uint64_t old_state, new_state; - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + os_atomic_rmw_loop2o(dqu._dq, dq_state, old_state, new_state, relaxed, { new_state = old_state; new_state &= ~DISPATCH_QUEUE_DRAIN_OWNER_MASK; new_state |= _dispatch_lock_value_for_self(); @@ -1926,11 +1894,11 @@ _dispatch_queue_set_bound_thread(dispatch_queue_t dq) DISPATCH_ALWAYS_INLINE static inline void -_dispatch_queue_clear_bound_thread(dispatch_queue_t dq) +_dispatch_queue_clear_bound_thread(dispatch_queue_class_t dqu) { - dispatch_assert(_dispatch_queue_is_thread_bound(dq)); - _dispatch_queue_atomic_flags_clear(dq, DQF_THREAD_BOUND|DQF_CANNOT_TRYSYNC); - os_atomic_and2o(dq, dq_state, ~DISPATCH_QUEUE_DRAIN_OWNER_MASK, relaxed); + dispatch_assert(_dispatch_queue_is_thread_bound(dqu)); + os_atomic_and2o(dqu._dq, dq_state, + ~DISPATCH_QUEUE_DRAIN_OWNER_MASK, relaxed); } DISPATCH_ALWAYS_INLINE @@ -1988,8 +1956,7 @@ _dispatch_get_basepri_override_qos_floor(void) dispatch_priority_t dbp = _dispatch_get_basepri(); dispatch_qos_t qos = _dispatch_priority_qos(dbp); dispatch_qos_t oqos = _dispatch_priority_override_qos(dbp); - dispatch_qos_t qos_floor = MAX(qos, oqos); - return qos_floor ? qos_floor : DISPATCH_QOS_SATURATED; + return MAX(qos, oqos); } DISPATCH_ALWAYS_INLINE @@ -2025,27 +1992,38 @@ _dispatch_reset_basepri_override(void) DISPATCH_ALWAYS_INLINE static inline dispatch_priority_t -_dispatch_set_basepri(dispatch_priority_t dbp) +_dispatch_set_basepri(dispatch_priority_t dq_dbp) { #if HAVE_PTHREAD_WORKQUEUE_QOS - const dispatch_priority_t preserved_mask = - DISPATCH_PRIORITY_OVERRIDE_MASK | DISPATCH_PRIORITY_FLAG_OVERCOMMIT; dispatch_priority_t old_dbp = _dispatch_get_basepri(); - if (old_dbp) { - dispatch_priority_t flags, defaultqueue, basepri; - flags = (dbp & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE); - defaultqueue = (old_dbp & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE); - basepri = old_dbp & DISPATCH_PRIORITY_REQUESTED_MASK; - dbp &= DISPATCH_PRIORITY_REQUESTED_MASK; - if (!dbp) { - flags = DISPATCH_PRIORITY_FLAG_INHERIT | defaultqueue; - dbp = basepri; - } else if (dbp < basepri && !defaultqueue) { // rdar://16349734 - dbp = basepri; + dispatch_priority_t dbp = old_dbp; + + if (unlikely(!old_dbp)) { + dbp = dq_dbp & ~DISPATCH_PRIORITY_OVERRIDE_MASK; + } else if (dq_dbp & DISPATCH_PRIORITY_REQUESTED_MASK) { + dbp &= (DISPATCH_PRIORITY_OVERRIDE_MASK | + DISPATCH_PRIORITY_FLAG_OVERCOMMIT); + dbp |= MAX(old_dbp & DISPATCH_PRIORITY_REQUESTED_MASK, + dq_dbp & DISPATCH_PRIORITY_REQUESTED_MASK); + if (_dispatch_priority_fallback_qos(dq_dbp) > + _dispatch_priority_qos(dbp)) { + dq_dbp &= (DISPATCH_PRIORITY_FALLBACK_QOS_MASK | + DISPATCH_PRIORITY_FLAG_FALLBACK | + DISPATCH_PRIORITY_FLAG_FLOOR); + } else { + dq_dbp &= DISPATCH_PRIORITY_FLAG_FLOOR; } - dbp |= flags | (old_dbp & preserved_mask); + dbp |= dq_dbp; } else { - dbp &= ~DISPATCH_PRIORITY_OVERRIDE_MASK; + if (dbp & DISPATCH_PRIORITY_REQUESTED_MASK) { + dbp |= DISPATCH_PRIORITY_FLAG_FLOOR; + } + if (_dispatch_priority_fallback_qos(dq_dbp) > + _dispatch_priority_qos(dbp)) { + dbp &= ~DISPATCH_PRIORITY_FALLBACK_QOS_MASK; + dbp |= (dq_dbp & (DISPATCH_PRIORITY_FALLBACK_QOS_MASK | + DISPATCH_PRIORITY_FLAG_FALLBACK)); + } } _dispatch_thread_setspecific(dispatch_basepri_key, (void*)(uintptr_t)dbp); return old_dbp; @@ -2056,106 +2034,65 @@ _dispatch_set_basepri(dispatch_priority_t dbp) } DISPATCH_ALWAYS_INLINE -static inline dispatch_priority_t -_dispatch_set_basepri_wlh(dispatch_priority_t dbp) +static inline void +_dispatch_init_basepri(dispatch_priority_t dbp) { #if HAVE_PTHREAD_WORKQUEUE_QOS dispatch_assert(!_dispatch_get_basepri()); - // _dispatch_set_basepri_override_qos(DISPATCH_QOS_SATURATED) - dbp |= DISPATCH_QOS_SATURATED << DISPATCH_PRIORITY_OVERRIDE_SHIFT; _dispatch_thread_setspecific(dispatch_basepri_key, (void*)(uintptr_t)dbp); #else (void)dbp; #endif - return 0; } DISPATCH_ALWAYS_INLINE -static inline pthread_priority_t -_dispatch_priority_adopt(pthread_priority_t pp, unsigned long flags) +static inline void +_dispatch_init_basepri_wlh(dispatch_priority_t dbp) { #if HAVE_PTHREAD_WORKQUEUE_QOS - dispatch_priority_t inherited, defaultqueue, dbp = _dispatch_get_basepri(); - pthread_priority_t basepp = _dispatch_priority_to_pp_strip_flags(dbp); - bool enforce = (flags & DISPATCH_PRIORITY_ENFORCE) || - (pp & _PTHREAD_PRIORITY_ENFORCE_FLAG); - inherited = (dbp & DISPATCH_PRIORITY_FLAG_INHERIT); - defaultqueue = (dbp & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE); - pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK; - - if (!pp) { - return basepp; - } else if (defaultqueue) { // rdar://16349734 - return pp; - } else if (pp < basepp) { - return basepp; - } else if (enforce || inherited) { - return pp; - } else { - return basepp; - } + dispatch_assert(!_dispatch_get_basepri()); + dbp |= _dispatch_priority_make_override(DISPATCH_QOS_SATURATED); + _dispatch_thread_setspecific(dispatch_basepri_key, (void*)(uintptr_t)dbp); #else - (void)pp; (void)flags; - return 0; + (void)dbp; #endif } DISPATCH_ALWAYS_INLINE static inline void -_dispatch_queue_priority_inherit_from_target(dispatch_queue_t dq, - dispatch_queue_t tq) +_dispatch_clear_basepri(void) { #if HAVE_PTHREAD_WORKQUEUE_QOS - const dispatch_priority_t rootqueue_flag = DISPATCH_PRIORITY_FLAG_ROOTQUEUE; - const dispatch_priority_t inherited_flag = DISPATCH_PRIORITY_FLAG_INHERIT; - const dispatch_priority_t defaultqueue_flag = - DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE; - dispatch_priority_t pri = dq->dq_priority, tpri = tq->dq_priority; - - if ((!_dispatch_priority_qos(pri) || (pri & inherited_flag)) && - (tpri & rootqueue_flag)) { - if (_dispatch_priority_override_qos(pri) == DISPATCH_QOS_SATURATED) { - pri &= DISPATCH_PRIORITY_OVERRIDE_MASK; - } else { - pri = 0; - } - if (tpri & defaultqueue_flag) { - // base queues need to know they target - // the default root queue so that _dispatch_queue_override_qos() - // in _dispatch_queue_class_wakeup() can fallback to QOS_DEFAULT - // if no other priority was provided. - pri |= defaultqueue_flag; - } else { - pri |= (tpri & ~rootqueue_flag) | inherited_flag; - } - dq->dq_priority = pri; - } else if (pri & defaultqueue_flag) { - // the DEFAULTQUEUE flag is only set on queues due to the code above, - // and must never be kept if we don't target a global root queue. - dq->dq_priority = (pri & ~defaultqueue_flag); - } -#else - (void)dq; (void)tq; + _dispatch_thread_setspecific(dispatch_basepri_key, (void*)(uintptr_t)0); #endif } DISPATCH_ALWAYS_INLINE -static inline dispatch_priority_t -_dispatch_priority_inherit_from_root_queue(dispatch_priority_t pri, - dispatch_queue_t rq) +static inline pthread_priority_t +_dispatch_priority_adopt(pthread_priority_t pp, unsigned long flags) { #if HAVE_PTHREAD_WORKQUEUE_QOS - dispatch_priority_t p = pri & DISPATCH_PRIORITY_REQUESTED_MASK; - dispatch_priority_t rqp = rq->dq_priority & DISPATCH_PRIORITY_REQUESTED_MASK; - dispatch_priority_t defaultqueue = - rq->dq_priority & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE; + dispatch_priority_t dbp = _dispatch_get_basepri(); + pthread_priority_t basepp = _dispatch_priority_to_pp_strip_flags(dbp); + pthread_priority_t minbasepp = basepp & + ~(pthread_priority_t)_PTHREAD_PRIORITY_PRIORITY_MASK; + bool enforce = (flags & DISPATCH_PRIORITY_ENFORCE) || + (pp & _PTHREAD_PRIORITY_ENFORCE_FLAG); + pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK; - if (!p || (!defaultqueue && p < rqp)) { - p = rqp | defaultqueue; + if (unlikely(!pp)) { + dispatch_qos_t fallback = _dispatch_priority_fallback_qos(dbp); + return fallback ? _dispatch_qos_to_pp(fallback) : basepp; + } else if (pp < minbasepp) { + return basepp; + } else if (enforce || (dbp & (DISPATCH_PRIORITY_FLAG_FLOOR | + DISPATCH_PRIORITY_FLAG_FALLBACK))) { + return pp; + } else { + return basepp; } - return p | (rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT); #else - (void)rq; (void)pri; + (void)pp; (void)flags; return 0; #endif } @@ -2263,41 +2200,43 @@ _dispatch_reset_voucher(voucher_t v, dispatch_thread_set_self_t flags) } DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_queue_need_override(dispatch_queue_class_t dqu, dispatch_qos_t qos) +static inline dispatch_qos_t +_dispatch_queue_push_qos(dispatch_queue_class_t dq, dispatch_qos_t qos) { - uint64_t dq_state = os_atomic_load2o(dqu._dq, dq_state, relaxed); - // dq_priority "override qos" contains the priority at which the queue - // is already running for thread-bound queues. - // For non thread-bound queues, the qos of the queue may not be observed - // when the first work item is dispatched synchronously. - return _dq_state_max_qos(dq_state) < qos && - _dispatch_priority_override_qos(dqu._dq->dq_priority) < qos; + if (qos > _dispatch_priority_qos(dq._dl->dq_priority)) { + return qos; + } + return DISPATCH_QOS_UNSPECIFIED; } DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_queue_need_override_retain(dispatch_queue_class_t dqu, - dispatch_qos_t qos) +static inline dispatch_qos_t +_dispatch_queue_wakeup_qos(dispatch_queue_class_t dq, dispatch_qos_t qos) { - if (_dispatch_queue_need_override(dqu, qos)) { - _os_object_retain_internal_n_inline(dqu._oq->_as_os_obj, 2); - return true; - } - return false; + if (!qos) qos = _dispatch_priority_fallback_qos(dq._dl->dq_priority); + // for asynchronous workitems, queue priority is the floor for overrides + return MAX(qos, _dispatch_priority_qos(dq._dl->dq_priority)); } DISPATCH_ALWAYS_INLINE static inline dispatch_qos_t -_dispatch_queue_override_qos(dispatch_queue_class_t dqu, dispatch_qos_t qos) +_dispatch_queue_max_qos(dispatch_queue_class_t dq) { - if (dqu._oq->oq_priority & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE) { - // queues targeting the default root queue use any asynchronous - // workitem priority available and fallback to QOS_DEFAULT otherwise. - return qos ? qos : DISPATCH_QOS_DEFAULT; - } - // for asynchronous workitems, queue priority is the floor for overrides - return MAX(qos, _dispatch_priority_qos(dqu._oq->oq_priority)); + // Note: the non atomic load allows to avoid CAS on 32bit architectures + // which doesn't give us much as the bits we want are in a single byte + // and can't quite be read non atomically. Given that this function is + // called in various critical codepaths (such as _dispatch_lane_push() + // between the tail exchange and updating the `prev` pointer), we care + // deeply about avoiding this. + return _dq_state_max_qos((uint64_t)dq._dl->dq_state_bits << 32); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_queue_need_override(dispatch_queue_class_t dq, dispatch_qos_t qos) +{ + dispatch_qos_t max_qos = _dispatch_queue_max_qos(dq); + return max_qos == DISPATCH_QOS_UNSPECIFIED || max_qos < qos; } #define DISPATCH_PRIORITY_PROPAGATE_CURRENT 0x1 @@ -2355,23 +2294,21 @@ DISPATCH_ALWAYS_INLINE static inline bool _dispatch_block_has_private_data(const dispatch_block_t block) { - extern void (*_dispatch_block_special_invoke)(void*); return (_dispatch_Block_invoke(block) == _dispatch_block_special_invoke); } DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT static inline pthread_priority_t _dispatch_block_invoke_should_set_priority(dispatch_block_flags_t flags, - pthread_priority_t new_pri) + pthread_priority_t new_pri) { pthread_priority_t old_pri, p = 0; // 0 means do not change priority. if ((flags & DISPATCH_BLOCK_HAS_PRIORITY) && ((flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS) || !(flags & DISPATCH_BLOCK_INHERIT_QOS_CLASS))) { - old_pri = _dispatch_get_priority(); new_pri &= ~_PTHREAD_PRIORITY_FLAGS_MASK; - p = old_pri & ~_PTHREAD_PRIORITY_FLAGS_MASK; - if (!p || p >= new_pri) p = 0; + old_pri = _dispatch_get_priority() & ~_PTHREAD_PRIORITY_FLAGS_MASK; + if (old_pri && old_pri < new_pri) p = old_pri; } return p; } @@ -2454,6 +2391,11 @@ _dispatch_continuation_free_cacheonly(dispatch_continuation_t dc) } dc->do_next = prev_dc; dc->dc_cache_cnt = cnt; +#if DISPATCH_ALLOCATOR + // This magical value helps memory tools to recognize continuations on + // the various free lists that are really free. + dc->dc_flags = (uintptr_t)(void *)&_dispatch_main_heap; +#endif _dispatch_thread_setspecific(dispatch_cache_key, dc); return NULL; } @@ -2468,8 +2410,6 @@ _dispatch_continuation_free(dispatch_continuation_t dc) } } -#include "trace.h" - DISPATCH_ALWAYS_INLINE static inline void _dispatch_continuation_with_group_invoke(dispatch_continuation_t dc) @@ -2478,7 +2418,7 @@ _dispatch_continuation_with_group_invoke(dispatch_continuation_t dc) unsigned long type = dx_type(dou); if (type == DISPATCH_GROUP_TYPE) { _dispatch_client_callout(dc->dc_ctxt, dc->dc_func); - _dispatch_introspection_queue_item_complete(dou); + _dispatch_trace_item_complete(dc); dispatch_group_leave((dispatch_group_t)dou); } else { DISPATCH_INTERNAL_CRASH(dx_type(dou), "Unexpected object type"); @@ -2487,8 +2427,8 @@ _dispatch_continuation_with_group_invoke(dispatch_continuation_t dc) DISPATCH_ALWAYS_INLINE static inline void -_dispatch_continuation_invoke_inline(dispatch_object_t dou, voucher_t ov, - dispatch_invoke_flags_t flags) +_dispatch_continuation_invoke_inline(dispatch_object_t dou, + dispatch_invoke_flags_t flags, dispatch_queue_class_t dqu) { dispatch_continuation_t dc = dou._dc, dc1; dispatch_invoke_with_autoreleasepool(flags, { @@ -2499,17 +2439,20 @@ _dispatch_continuation_invoke_inline(dispatch_object_t dou, voucher_t ov, // The ccache version is per-thread. // Therefore, the object has not been reused yet. // This generates better assembly. - _dispatch_continuation_voucher_adopt(dc, ov, dc_flags); - if (dc_flags & DISPATCH_OBJ_CONSUME_BIT) { + _dispatch_continuation_voucher_adopt(dc, dc_flags); + if (!(dc_flags & DC_FLAG_NO_INTROSPECTION)) { + _dispatch_trace_item_pop(dqu, dou); + } + if (dc_flags & DC_FLAG_CONSUME) { dc1 = _dispatch_continuation_free_cacheonly(dc); } else { dc1 = NULL; } - if (unlikely(dc_flags & DISPATCH_OBJ_GROUP_BIT)) { + if (unlikely(dc_flags & DC_FLAG_GROUP_ASYNC)) { _dispatch_continuation_with_group_invoke(dc); } else { _dispatch_client_callout(dc->dc_ctxt, dc->dc_func); - _dispatch_introspection_queue_item_complete(dou); + _dispatch_trace_item_complete(dc); } if (unlikely(dc1)) { _dispatch_continuation_free_to_cache_limit(dc1); @@ -2522,147 +2465,131 @@ DISPATCH_ALWAYS_INLINE_NDEBUG static inline void _dispatch_continuation_pop_inline(dispatch_object_t dou, dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, - dispatch_queue_t dq) + dispatch_queue_class_t dqu) { dispatch_pthread_root_queue_observer_hooks_t observer_hooks = _dispatch_get_pthread_root_queue_observer_hooks(); - if (observer_hooks) observer_hooks->queue_will_execute(dq); - _dispatch_trace_continuation_pop(dq, dou); + if (observer_hooks) observer_hooks->queue_will_execute(dqu._dq); flags &= _DISPATCH_INVOKE_PROPAGATE_MASK; if (_dispatch_object_has_vtable(dou)) { - dx_invoke(dou._do, dic, flags); + dx_invoke(dou._dq, dic, flags); } else { - _dispatch_continuation_invoke_inline(dou, DISPATCH_NO_VOUCHER, flags); + _dispatch_continuation_invoke_inline(dou, flags, dqu); } - if (observer_hooks) observer_hooks->queue_did_execute(dq); + if (observer_hooks) observer_hooks->queue_did_execute(dqu._dq); } // used to forward the do_invoke of a continuation with a vtable to its real // implementation. -#define _dispatch_continuation_pop_forwarded(dc, ov, dc_flags, ...) \ +#define _dispatch_continuation_pop_forwarded(dc, dc_flags, dq, ...) \ ({ \ dispatch_continuation_t _dc = (dc), _dc1; \ uintptr_t _dc_flags = (dc_flags); \ - _dispatch_continuation_voucher_adopt(_dc, ov, _dc_flags); \ - if (_dc_flags & DISPATCH_OBJ_CONSUME_BIT) { \ + _dispatch_continuation_voucher_adopt(_dc, _dc_flags); \ + if (!(_dc_flags & DC_FLAG_NO_INTROSPECTION)) { \ + _dispatch_trace_item_pop(dq, dc); \ + } \ + if (_dc_flags & DC_FLAG_CONSUME) { \ _dc1 = _dispatch_continuation_free_cacheonly(_dc); \ } else { \ _dc1 = NULL; \ } \ __VA_ARGS__; \ - _dispatch_introspection_queue_item_complete(_dc); \ + if (!(_dc_flags & DC_FLAG_NO_INTROSPECTION)) { \ + _dispatch_trace_item_complete(_dc); \ + } \ if (unlikely(_dc1)) { \ _dispatch_continuation_free_to_cache_limit(_dc1); \ } \ }) DISPATCH_ALWAYS_INLINE -static inline void +static inline dispatch_qos_t _dispatch_continuation_priority_set(dispatch_continuation_t dc, + dispatch_queue_class_t dqu, pthread_priority_t pp, dispatch_block_flags_t flags) { + dispatch_qos_t qos = DISPATCH_QOS_UNSPECIFIED; #if HAVE_PTHREAD_WORKQUEUE_QOS - if (likely(!(flags & DISPATCH_BLOCK_HAS_PRIORITY))) { - pp = _dispatch_priority_propagate(); - } - if (flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS) { - pp |= _PTHREAD_PRIORITY_ENFORCE_FLAG; + dispatch_queue_t dq = dqu._dq; + + if (likely(pp)) { + bool enforce = (flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS); + bool is_floor = (dq->dq_priority & DISPATCH_PRIORITY_FLAG_FLOOR); + bool dq_has_qos = (dq->dq_priority & DISPATCH_PRIORITY_REQUESTED_MASK); + if (enforce) { + pp |= _PTHREAD_PRIORITY_ENFORCE_FLAG; + qos = _dispatch_qos_from_pp_unsafe(pp); + } else if (!is_floor && dq_has_qos) { + pp = 0; + } else { + qos = _dispatch_qos_from_pp_unsafe(pp); + } } dc->dc_priority = pp; #else - (void)dc; (void)pp; (void)flags; + (void)dc; (void)dqu; (void)pp; (void)flags; #endif + return qos; } DISPATCH_ALWAYS_INLINE static inline dispatch_qos_t -_dispatch_continuation_override_qos(dispatch_queue_t dq, - dispatch_continuation_t dc) -{ -#if HAVE_PTHREAD_WORKQUEUE_QOS - dispatch_qos_t dc_qos = _dispatch_qos_from_pp(dc->dc_priority); - bool enforce = dc->dc_priority & _PTHREAD_PRIORITY_ENFORCE_FLAG; - dispatch_qos_t dq_qos = _dispatch_priority_qos(dq->dq_priority); - bool defaultqueue = dq->dq_priority & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE; - - dispatch_assert(dc->dc_priority != DISPATCH_NO_PRIORITY); - if (dc_qos && (enforce || !dq_qos || defaultqueue)) { - return dc_qos; - } - return dq_qos; -#else - (void)dq; (void)dc; - return 0; -#endif -} - -DISPATCH_ALWAYS_INLINE -static inline void _dispatch_continuation_init_f(dispatch_continuation_t dc, - dispatch_queue_class_t dqu, void *ctxt, dispatch_function_t func, - pthread_priority_t pp, dispatch_block_flags_t flags, uintptr_t dc_flags) + dispatch_queue_class_t dqu, void *ctxt, dispatch_function_t f, + dispatch_block_flags_t flags, uintptr_t dc_flags) { - dc->dc_flags = dc_flags; - dc->dc_func = func; + pthread_priority_t pp = 0; + dc->dc_flags = dc_flags | DC_FLAG_ALLOCATED; + dc->dc_func = f; dc->dc_ctxt = ctxt; - _dispatch_continuation_voucher_set(dc, dqu, flags); - _dispatch_continuation_priority_set(dc, pp, flags); + // in this context DISPATCH_BLOCK_HAS_PRIORITY means that the priority + // should not be propagated, only taken from the handler if it has one + if (!(flags & DISPATCH_BLOCK_HAS_PRIORITY)) { + pp = _dispatch_priority_propagate(); + } + _dispatch_continuation_voucher_set(dc, flags); + return _dispatch_continuation_priority_set(dc, dqu, pp, flags); } DISPATCH_ALWAYS_INLINE -static inline void +static inline dispatch_qos_t _dispatch_continuation_init(dispatch_continuation_t dc, dispatch_queue_class_t dqu, dispatch_block_t work, - pthread_priority_t pp, dispatch_block_flags_t flags, uintptr_t dc_flags) + dispatch_block_flags_t flags, uintptr_t dc_flags) { - dc->dc_flags = dc_flags | DISPATCH_OBJ_BLOCK_BIT; - dc->dc_ctxt = _dispatch_Block_copy(work); - _dispatch_continuation_priority_set(dc, pp, flags); + void *ctxt = _dispatch_Block_copy(work); + dc_flags |= DC_FLAG_BLOCK | DC_FLAG_ALLOCATED; if (unlikely(_dispatch_block_has_private_data(work))) { - // always sets dc_func & dc_voucher - // may update dc_priority & do_vtable + dc->dc_flags = dc_flags; + dc->dc_ctxt = ctxt; + // will initialize all fields but requires dc_flags & dc_ctxt to be set return _dispatch_continuation_init_slow(dc, dqu, flags); } - if (dc_flags & DISPATCH_OBJ_CONSUME_BIT) { - dc->dc_func = _dispatch_call_block_and_release; - } else { - dc->dc_func = _dispatch_Block_invoke(work); + dispatch_function_t func = _dispatch_Block_invoke(work); + if (dc_flags & DC_FLAG_CONSUME) { + func = _dispatch_call_block_and_release; } - _dispatch_continuation_voucher_set(dc, dqu, flags); + return _dispatch_continuation_init_f(dc, dqu, ctxt, func, flags, dc_flags); } -#if HAVE_MACH -#pragma mark dispatch_mach_reply_refs_t - -// assumes low bit of mach port names is always set -#define DISPATCH_MACH_REPLY_PORT_UNOWNED 0x1u - DISPATCH_ALWAYS_INLINE static inline void -_dispatch_mach_reply_mark_reply_port_owned(dispatch_mach_reply_refs_t dmr) -{ - dmr->du_ident &= ~DISPATCH_MACH_REPLY_PORT_UNOWNED; -} - -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_mach_reply_is_reply_port_owned(dispatch_mach_reply_refs_t dmr) -{ - mach_port_t reply_port = (mach_port_t)dmr->du_ident; - return reply_port ? !(reply_port & DISPATCH_MACH_REPLY_PORT_UNOWNED) :false; -} - -DISPATCH_ALWAYS_INLINE -static inline mach_port_t -_dispatch_mach_reply_get_reply_port(mach_port_t reply_port) +_dispatch_continuation_async(dispatch_queue_class_t dqu, + dispatch_continuation_t dc, dispatch_qos_t qos, uintptr_t dc_flags) { - return reply_port ? (reply_port | DISPATCH_MACH_REPLY_PORT_UNOWNED) : 0; +#if DISPATCH_INTROSPECTION + if (!(dc_flags & DC_FLAG_NO_INTROSPECTION)) { + _dispatch_trace_item_push(dqu, dc); + } +#else + (void)dc_flags; +#endif + return dx_push(dqu._dq, dc, qos); } -#endif // HAVE_MACH - #endif // DISPATCH_PURE_C #endif /* __DISPATCH_INLINE_INTERNAL__ */ diff --git a/src/internal.h b/src/internal.h index 286e53458..df742a20b 100644 --- a/src/internal.h +++ b/src/internal.h @@ -40,34 +40,7 @@ #include #include #include - -#ifndef TARGET_OS_MAC_DESKTOP -#define TARGET_OS_MAC_DESKTOP (TARGET_OS_MAC && \ - !TARGET_OS_SIMULATOR && !TARGET_OS_IPHONE && !TARGET_OS_EMBEDDED) -#endif - -#if TARGET_OS_MAC_DESKTOP -# define DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(x) \ - (__MAC_OS_X_VERSION_MIN_REQUIRED >= (x)) -# if !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200) -# error "OS X hosts older than OS X 10.12 aren't supported anymore" -# endif // !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200) -#elif TARGET_OS_SIMULATOR -# define DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(x) \ - (IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED >= (x)) -# if !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200) -# error "Simulator hosts older than OS X 10.12 aren't supported anymore" -# endif // !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200) -#else -# define DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(x) 1 -# if __IPHONE_OS_VERSION_MIN_REQUIRED < 90000 -# error "iOS hosts older than iOS 9.0 aren't supported anymore" -# endif -#endif - -#else // !__APPLE__ -#define DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(x) 0 -#endif // !__APPLE__ +#endif // __APPLE__ #if !defined(DISPATCH_MACH_SPI) && TARGET_OS_MAC @@ -138,6 +111,91 @@ #endif #endif +#ifndef DISPATCH_STATIC_GLOBAL +#define DISPATCH_STATIC_GLOBAL(declaration) static declaration +#endif +#ifndef DISPATCH_GLOBAL +#define DISPATCH_GLOBAL(declaration) declaration +#endif +#ifndef DISPATCH_GLOBAL_INIT +#define DISPATCH_GLOBAL_INIT(declaration, ...) declaration = __VA_ARGS__ +#endif + +#if defined(__OBJC__) || defined(__cplusplus) +#define DISPATCH_PURE_C 0 +#else +#define DISPATCH_PURE_C 1 +#endif + +#ifdef __OBJC__ +@protocol OS_dispatch_queue; +#endif + +// Lane cluster class: type for all the queues that have a single head/tail pair +typedef union { + struct dispatch_lane_s *_dl; + struct dispatch_queue_static_s *_dsq; + struct dispatch_queue_global_s *_dgq; + struct dispatch_queue_pthread_root_s *_dpq; + struct dispatch_source_s *_ds; + struct dispatch_mach_s *_dm; +#ifdef __OBJC__ + id _objc_dq; // unsafe cast for the sake of object.m +#endif +} dispatch_lane_class_t DISPATCH_TRANSPARENT_UNION; + +// Dispatch queue cluster class: type for any dispatch_queue_t +typedef union { + struct dispatch_queue_s *_dq; + struct dispatch_workloop_s *_dwl; + struct dispatch_lane_s *_dl; + struct dispatch_queue_static_s *_dsq; + struct dispatch_queue_global_s *_dgq; + struct dispatch_queue_pthread_root_s *_dpq; + struct dispatch_source_s *_ds; + struct dispatch_mach_s *_dm; + dispatch_lane_class_t _dlu; +#ifdef __OBJC__ + id _objc_dq; +#endif +} dispatch_queue_class_t DISPATCH_TRANSPARENT_UNION; + +#ifndef __OBJC__ +typedef union { + struct _os_object_s *_os_obj; + struct dispatch_object_s *_do; + struct dispatch_queue_s *_dq; + struct dispatch_queue_attr_s *_dqa; + struct dispatch_group_s *_dg; + struct dispatch_source_s *_ds; + struct dispatch_mach_s *_dm; + struct dispatch_mach_msg_s *_dmsg; + struct dispatch_semaphore_s *_dsema; + struct dispatch_data_s *_ddata; + struct dispatch_io_s *_dchannel; + + struct dispatch_continuation_s *_dc; + struct dispatch_sync_context_s *_dsc; + struct dispatch_operation_s *_doperation; + struct dispatch_disk_s *_ddisk; + struct dispatch_workloop_s *_dwl; + struct dispatch_lane_s *_dl; + struct dispatch_queue_static_s *_dsq; + struct dispatch_queue_global_s *_dgq; + struct dispatch_queue_pthread_root_s *_dpq; + dispatch_queue_class_t _dqu; + dispatch_lane_class_t _dlu; + uintptr_t _do_value; +} dispatch_object_t DISPATCH_TRANSPARENT_UNION; + +DISPATCH_ALWAYS_INLINE +static inline dispatch_object_t +upcast(dispatch_object_t dou) +{ + return dou; +} +#endif // __OBJC__ + #include #include #include @@ -152,16 +210,11 @@ #include #endif -#if defined(__OBJC__) || defined(__cplusplus) -#define DISPATCH_PURE_C 0 -#else -#define DISPATCH_PURE_C 1 -#endif - /* private.h must be included last to avoid picking up installed headers. */ #include #include "os/object_private.h" #include "queue_private.h" +#include "workloop_private.h" #include "source_private.h" #include "mach_private.h" #include "data_private.h" @@ -174,46 +227,6 @@ #include "benchmark.h" #include "private.h" -/* SPI for Libsystem-internal use */ -DISPATCH_EXPORT DISPATCH_NOTHROW void libdispatch_init(void); -#if !TARGET_OS_WIN32 -DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_prepare(void); -DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_parent(void); -DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); -#endif - -/* More #includes at EOF (dependent on the contents of internal.h) ... */ - -// Abort on uncaught exceptions thrown from client callouts rdar://8577499 -#if !defined(DISPATCH_USE_CLIENT_CALLOUT) -#define DISPATCH_USE_CLIENT_CALLOUT 1 -#endif - -#define DISPATCH_ALLOW_NON_LEAF_RETARGET 1 - -/* The "_debug" library build */ -#ifndef DISPATCH_DEBUG -#define DISPATCH_DEBUG 0 -#endif - -#ifndef DISPATCH_PROFILE -#define DISPATCH_PROFILE 0 -#endif - -#if (!TARGET_OS_EMBEDDED || DISPATCH_DEBUG || DISPATCH_PROFILE) && \ - !defined(DISPATCH_USE_DTRACE) -#define DISPATCH_USE_DTRACE 1 -#endif - -#if DISPATCH_USE_DTRACE && (DISPATCH_INTROSPECTION || DISPATCH_DEBUG || \ - DISPATCH_PROFILE) && !defined(DISPATCH_USE_DTRACE_INTROSPECTION) -#define DISPATCH_USE_DTRACE_INTROSPECTION 1 -#endif - -#ifndef DISPATCH_DEBUG_QOS -#define DISPATCH_DEBUG_QOS DISPATCH_DEBUG -#endif - #if HAVE_LIBKERN_OSCROSSENDIAN_H #include #endif @@ -238,7 +251,17 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); #include #include #include +#if __has_include() +#include +#endif #endif /* HAVE_MACH */ +#if __has_include() +#define HAVE_OS_FAULT_WITH_PAYLOAD 1 +#include +#include +#else +#define HAVE_OS_FAULT_WITH_PAYLOAD 0 +#endif #if HAVE_MALLOC_MALLOC_H #include #endif @@ -289,6 +312,48 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); #include #endif +/* More #includes at EOF (dependent on the contents of internal.h) ... */ + +__BEGIN_DECLS + +/* SPI for Libsystem-internal use */ +DISPATCH_EXPORT DISPATCH_NOTHROW void libdispatch_init(void); +#if !TARGET_OS_WIN32 +DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_prepare(void); +DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_parent(void); +DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); +#endif + +// Abort on uncaught exceptions thrown from client callouts rdar://8577499 +#if !defined(DISPATCH_USE_CLIENT_CALLOUT) +#define DISPATCH_USE_CLIENT_CALLOUT 1 +#endif + +#define DISPATCH_ALLOW_NON_LEAF_RETARGET 1 + +/* The "_debug" library build */ +#ifndef DISPATCH_DEBUG +#define DISPATCH_DEBUG 0 +#endif + +#ifndef DISPATCH_PROFILE +#define DISPATCH_PROFILE 0 +#endif + +#if (TARGET_OS_OSX || DISPATCH_DEBUG || DISPATCH_PROFILE) && \ + !defined(DISPATCH_USE_DTRACE) +#define DISPATCH_USE_DTRACE 1 +#endif + +#if DISPATCH_USE_DTRACE && (DISPATCH_INTROSPECTION || DISPATCH_DEBUG || \ + DISPATCH_PROFILE) && !defined(DISPATCH_USE_DTRACE_INTROSPECTION) +#define DISPATCH_USE_DTRACE_INTROSPECTION 1 +#endif + +#ifndef DISPATCH_DEBUG_QOS +#define DISPATCH_DEBUG_QOS DISPATCH_DEBUG +#endif + #if __GNUC__ #define DISPATCH_NOINLINE __attribute__((__noinline__)) #define DISPATCH_USED __attribute__((__used__)) @@ -351,27 +416,22 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); #define USEC_PER_SEC 1000000ull #define NSEC_PER_USEC 1000ull -/* I wish we had __builtin_expect_range() */ #if __GNUC__ -#define _safe_cast_to_long(x) \ - ({ _Static_assert(sizeof(typeof(x)) <= sizeof(long), \ - "__builtin_expect doesn't support types wider than long"); \ - (long)(x); }) -#define fastpath(x) ((typeof(x))__builtin_expect(_safe_cast_to_long(x), ~0l)) -#define slowpath(x) ((typeof(x))__builtin_expect(_safe_cast_to_long(x), 0l)) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else -#define fastpath(x) (x) -#define slowpath(x) (x) #define likely(x) (!!(x)) #define unlikely(x) (!!(x)) #endif // __GNUC__ +#define _LIST_IS_ENQUEUED(elm, field) \ + ((elm)->field.le_prev != NULL) +#define _LIST_MARK_NOT_ENQUEUED(elm, field) \ + ((void)((elm)->field.le_prev = NULL)) #define _TAILQ_IS_ENQUEUED(elm, field) \ ((elm)->field.tqe_prev != NULL) #define _TAILQ_MARK_NOT_ENQUEUED(elm, field) \ - do { (elm)->field.tqe_prev = NULL; } while (0) + ((void)((elm)->field.tqe_prev = NULL)) #if DISPATCH_DEBUG // sys/queue.h debugging @@ -382,6 +442,10 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); #define TRASHIT(x) do {(x) = (void *)-1;} while (0) #endif #endif // DISPATCH_DEBUG +#define _LIST_TRASH_ENTRY(elm, field) do { \ + TRASHIT((elm)->field.le_next); \ + TRASHIT((elm)->field.le_prev); \ + } while (0) #define _TAILQ_TRASH_ENTRY(elm, field) do { \ TRASHIT((elm)->field.tqe_next); \ TRASHIT((elm)->field.tqe_prev); \ @@ -391,18 +455,27 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); TRASHIT((head)->tqh_last); \ } while (0) +#define DISPATCH_MODE_STRICT (1U << 0) +#define DISPATCH_MODE_NO_FAULTS (1U << 1) +extern uint8_t _dispatch_mode; + DISPATCH_EXPORT DISPATCH_NOINLINE void _dispatch_bug(size_t line, long val); -DISPATCH_NOINLINE -void _dispatch_bug_client(const char* msg); #if HAVE_MACH DISPATCH_NOINLINE void _dispatch_bug_mach_client(const char *msg, mach_msg_return_t kr); #endif // HAVE_MACH + +struct dispatch_unote_class_s; + DISPATCH_NOINLINE -void _dispatch_bug_kevent_client(const char* msg, const char* filter, - const char *operation, int err); +void _dispatch_bug_kevent_client(const char *msg, const char *filter, + const char *operation, int err, uint64_t ident, uint64_t udata, + struct dispatch_unote_class_s *du); + +DISPATCH_NOINLINE +void _dispatch_bug_kevent_vanished(struct dispatch_unote_class_s *du); DISPATCH_NOINLINE void _dispatch_bug_deprecated(const char *msg); @@ -428,6 +501,16 @@ void _dispatch_abort(size_t line, long val); #include #endif +#define DISPATCH_BAD_INPUT ((void *_Nonnull)0) +#define DISPATCH_OUT_OF_MEMORY ((void *_Nonnull)0) + +#if __has_attribute(diagnose_if) +#define DISPATCH_STATIC_ASSERT_IF(e) \ + __attribute__((diagnose_if(e, "Assertion failed", "error"))) +#else +#define DISPATCH_STATIC_ASSERT_IF(e) +#endif // __has_attribute(diagnose_if) + #if DISPATCH_USE_OS_DEBUG_LOG #define _dispatch_log(msg, ...) os_debug_log("libdispatch", msg, ## __VA_ARGS__) #else @@ -439,64 +522,41 @@ void _dispatch_log(const char *msg, ...); ({ size_t _siz = siz; int _r = snprintf(buf, _siz, __VA_ARGS__); \ _r < 0 ? 0u : ((size_t)_r > _siz ? _siz : (size_t)_r); }) -#if __GNUC__ -#define dispatch_static_assert(e) ({ \ - char __compile_time_assert__[(bool)(e) ? 1 : -1] DISPATCH_UNUSED; \ - }) +#if __has_feature(c_static_assert) || __STDC_VERSION__ >= 201112L +#define _dispatch_static_assert(e, s, ...) _Static_assert(e, s) #else -#define dispatch_static_assert(e) +#define _dispatch_static_assert(e, s, ...) #endif +#define dispatch_static_assert(e, ...) \ + _dispatch_static_assert(e, ##__VA_ARGS__, #e) -#define DISPATCH_BAD_INPUT ((void *_Nonnull)0) -#define DISPATCH_OUT_OF_MEMORY ((void *_Nonnull)0) +#define dispatch_assert_aliases(t1, t2, f) \ + dispatch_static_assert(offsetof(struct t1,f) == offsetof(struct t2,f), \ + #t1 "::" #f " and " #t2 "::" #f " should alias") /* * For reporting bugs within libdispatch when using the "_debug" version of the * library. */ -#if __APPLE__ -#define dispatch_assert(e) do { \ - if (__builtin_constant_p(e)) { \ - dispatch_static_assert(e); \ - } else { \ - typeof(e) _e = (e); /* always eval 'e' */ \ - if (unlikely(DISPATCH_DEBUG && !_e)) { \ - _dispatch_abort(__LINE__, (long)_e); \ - } \ - } \ - } while (0) -#else +DISPATCH_ALWAYS_INLINE static inline void -_dispatch_assert(long e, size_t line) +_dispatch_assert(long e, size_t line) DISPATCH_STATIC_ASSERT_IF(!e) { - if (DISPATCH_DEBUG && !e) _dispatch_abort(line, e); + if (unlikely(DISPATCH_DEBUG && !e)) _dispatch_abort(line, e); } #define dispatch_assert(e) _dispatch_assert((long)(e), __LINE__) -#endif /* __GNUC__ */ -#if __APPLE__ /* * A lot of API return zero upon success and not-zero on fail. Let's capture * and log the non-zero value */ -#define dispatch_assert_zero(e) do { \ - if (__builtin_constant_p(e)) { \ - dispatch_static_assert(e); \ - } else { \ - typeof(e) _e = (e); /* always eval 'e' */ \ - if (unlikely(DISPATCH_DEBUG && _e)) { \ - _dispatch_abort(__LINE__, (long)_e); \ - } \ - } \ - } while (0) -#else +DISPATCH_ALWAYS_INLINE static inline void -_dispatch_assert_zero(long e, size_t line) +_dispatch_assert_zero(long e, size_t line) DISPATCH_STATIC_ASSERT_IF(e) { - if (DISPATCH_DEBUG && e) _dispatch_abort(line, e); + if (unlikely(DISPATCH_DEBUG && e)) _dispatch_abort(line, e); } #define dispatch_assert_zero(e) _dispatch_assert_zero((long)(e), __LINE__) -#endif /* __GNUC__ */ /* * For reporting bugs or impedance mismatches between libdispatch and external @@ -504,76 +564,27 @@ _dispatch_assert_zero(long e, size_t line) * * In particular, we wrap all system-calls with assume() macros. */ -#if __GNUC__ -#define dispatch_assume(e) ({ \ - typeof(e) _e = (e); /* always eval 'e' */ \ - if (unlikely(!_e)) { \ - if (__builtin_constant_p(e)) { \ - dispatch_static_assert(e); \ - } \ - _dispatch_bug(__LINE__, (long)_e); \ - } \ - _e; \ - }) -#else -static inline long -_dispatch_assume(long e, long line) +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_assume(long e, size_t line) DISPATCH_STATIC_ASSERT_IF(!e) { - if (!e) _dispatch_bug(line, e); - return e; + if (unlikely(!e)) _dispatch_bug(line, e); } -#define dispatch_assume(e) _dispatch_assume((long)(e), __LINE__) -#endif /* __GNUC__ */ +#define dispatch_assume(e) \ + ({ typeof(e) _e = (e); _dispatch_assume((long)_e, __LINE__); _e; }) /* * A lot of API return zero upon success and not-zero on fail. Let's capture * and log the non-zero value */ -#if __GNUC__ -#define dispatch_assume_zero(e) ({ \ - typeof(e) _e = (e); /* always eval 'e' */ \ - if (unlikely(_e)) { \ - if (__builtin_constant_p(e)) { \ - dispatch_static_assert(e); \ - } \ - _dispatch_bug(__LINE__, (long)_e); \ - } \ - _e; \ - }) -#else -static inline long -_dispatch_assume_zero(long e, long line) +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_assume_zero(long e, size_t line) DISPATCH_STATIC_ASSERT_IF(e) { - if (e) _dispatch_bug(line, e); - return e; + if (unlikely(e)) _dispatch_bug(line, e); } -#define dispatch_assume_zero(e) _dispatch_assume_zero((long)(e), __LINE__) -#endif /* __GNUC__ */ - -/* - * For reporting bugs in clients when using the "_debug" version of the library. - */ -#if __GNUC__ -#define dispatch_debug_assert(e, msg, args...) do { \ - if (__builtin_constant_p(e)) { \ - dispatch_static_assert(e); \ - } else { \ - typeof(e) _e = (e); /* always eval 'e' */ \ - if (unlikely(DISPATCH_DEBUG && !_e)) { \ - _dispatch_log("%s() 0x%lx: " msg, __func__, (long)_e, ##args); \ - abort(); \ - } \ - } \ - } while (0) -#else -#define dispatch_debug_assert(e, msg, args...) do { \ - typeof(e) _e = (e); /* always eval 'e' */ \ - if (unlikely(DISPATCH_DEBUG && !_e)) { \ - _dispatch_log("%s() 0x%lx: " msg, __FUNCTION__, _e, ##args); \ - abort(); \ - } \ -} while (0) -#endif /* __GNUC__ */ +#define dispatch_assume_zero(e) \ + ({ typeof(e) _e = (e); _dispatch_assume_zero((long)_e, __LINE__); _e; }) /* Make sure the debug statments don't get too stale */ #define _dispatch_debug(x, args...) do { \ @@ -596,6 +607,7 @@ _dispatch_object_debug(dispatch_object_t object, const char *message, ...); #ifdef __BLOCKS__ #define _dispatch_Block_invoke(bb) \ ((dispatch_function_t)((struct Block_layout *)bb)->invoke) + void *_dispatch_Block_copy(void *block); #if __GNUC__ #define _dispatch_Block_copy(x) ((typeof(x))_dispatch_Block_copy(x)) @@ -603,6 +615,8 @@ void *_dispatch_Block_copy(void *block); void _dispatch_call_block_and_release(void *block); #endif /* __BLOCKS__ */ +bool _dispatch_parse_bool(const char *v); +bool _dispatch_getenv_bool(const char *env, bool default_v); void _dispatch_temporary_resource_shortage(void); void *_dispatch_calloc(size_t num_items, size_t size); const char *_dispatch_strdup_if_mutable(const char *str); @@ -643,23 +657,71 @@ _dispatch_fork_becomes_unsafe(void) // Older Mac OS X and iOS Simulator fallbacks -#if HAVE__PTHREAD_WORKQUEUE_INIT && PTHREAD_WORKQUEUE_SPI_VERSION >= 20140213 \ - && !defined(HAVE_PTHREAD_WORKQUEUE_QOS) +#ifndef HAVE_PTHREAD_WORKQUEUE_QOS +#if !DISPATCH_USE_INTERNAL_WORKQUEUE && HAVE__PTHREAD_WORKQUEUE_INIT && \ + PTHREAD_WORKQUEUE_SPI_VERSION >= 20140213 #define HAVE_PTHREAD_WORKQUEUE_QOS 1 +#else +#define HAVE_PTHREAD_WORKQUEUE_QOS 0 #endif -#if HAVE__PTHREAD_WORKQUEUE_INIT && PTHREAD_WORKQUEUE_SPI_VERSION >= 20150304 \ - && !defined(HAVE_PTHREAD_WORKQUEUE_KEVENT) +#endif // !defined(HAVE_PTHREAD_WORKQUEUE_QOS) + +#ifndef HAVE_PTHREAD_WORKQUEUE_KEVENT +#if !DISPATCH_USE_INTERNAL_WORKQUEUE && HAVE__PTHREAD_WORKQUEUE_INIT && \ + defined(KEVENT_FLAG_WORKQ) && PTHREAD_WORKQUEUE_SPI_VERSION >= 20150304 #define HAVE_PTHREAD_WORKQUEUE_KEVENT 1 +#else +#define HAVE_PTHREAD_WORKQUEUE_KEVENT 0 #endif +#endif // !defined(HAVE_PTHREAD_WORKQUEUE_KEVENT) + +#ifndef DISPATCH_USE_WORKQUEUE_NARROWING +#if HAVE_PTHREAD_WORKQUEUES && DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(109900) +#define DISPATCH_USE_WORKQUEUE_NARROWING 1 +#else +#define DISPATCH_USE_WORKQUEUE_NARROWING 0 +#endif +#endif // !defined(DISPATCH_USE_WORKQUEUE_NARROWING) + +#ifndef DISPATCH_USE_PTHREAD_ROOT_QUEUES +#if defined(__BLOCKS__) && defined(__APPLE__) +#define DISPATCH_USE_PTHREAD_ROOT_QUEUES 1 // +#else +#define DISPATCH_USE_PTHREAD_ROOT_QUEUES 0 +#endif +#endif // !defined(DISPATCH_USE_PTHREAD_ROOT_QUEUES) + +#ifndef DISPATCH_USE_PTHREAD_POOL +#if DISPATCH_USE_PTHREAD_ROOT_QUEUES || DISPATCH_USE_INTERNAL_WORKQUEUE +#define DISPATCH_USE_PTHREAD_POOL 1 +#else +#define DISPATCH_USE_PTHREAD_POOL 0 +#endif +#endif // !defined(DISPATCH_USE_PTHREAD_POOL) -#ifndef HAVE_PTHREAD_WORKQUEUE_NARROWING -#if !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(109900) -#define HAVE_PTHREAD_WORKQUEUE_NARROWING 0 +#ifndef DISPATCH_USE_KEVENT_WORKQUEUE +#if HAVE_PTHREAD_WORKQUEUE_KEVENT +#define DISPATCH_USE_KEVENT_WORKQUEUE 1 #else -#define HAVE_PTHREAD_WORKQUEUE_NARROWING 1 +#define DISPATCH_USE_KEVENT_WORKQUEUE 0 #endif +#endif // !defined(DISPATCH_USE_KEVENT_WORKQUEUE) + +#if DISPATCH_USE_KEVENT_WORKQUEUE +#if !HAVE_PTHREAD_WORKQUEUE_QOS || !EV_UDATA_SPECIFIC +#error Invalid build configuration #endif +#endif // DISPATCH_USE_KEVENT_WORKQUEUE + +#ifndef DISPATCH_USE_MGR_THREAD +#if !DISPATCH_USE_KEVENT_WORKQUEUE || DISPATCH_DEBUG || DISPATCH_PROFILE +#define DISPATCH_USE_MGR_THREAD 1 +#else +#define DISPATCH_USE_MGR_THREAD 0 +#endif +#endif // !defined(DISPATCH_USE_MGR_THREAD) + #ifdef EVFILT_MEMORYSTATUS #ifndef DISPATCH_USE_MEMORYSTATUS @@ -674,26 +736,17 @@ _dispatch_fork_becomes_unsafe(void) #if !defined(DISPATCH_USE_MEMORYPRESSURE_SOURCE) && DISPATCH_USE_MEMORYSTATUS #define DISPATCH_USE_MEMORYPRESSURE_SOURCE 1 #endif -#if DISPATCH_USE_MEMORYPRESSURE_SOURCE + #if __has_include() #include -#else +#else // __has_include() @@ -771,6 +830,8 @@ typedef struct dispatch_trace_timer_params_s { #define DISPATCH_TRACE_SUBCLASS_PERF 2 #define DISPATCH_TRACE_SUBCLASS_MACH_MSG 3 #define DISPATCH_TRACE_SUBCLASS_PERF_MON 4 +#define DISPATCH_TRACE_SUBCLASS_QOS_TRACE 5 +#define DISPATCH_TRACE_SUBCLASS_FIREHOSE_TRACE 6 #define DISPATCH_PERF_non_leaf_retarget DISPATCH_CODE(PERF, 1) #define DISPATCH_PERF_post_activate_retarget DISPATCH_CODE(PERF, 2) @@ -778,6 +839,9 @@ typedef struct dispatch_trace_timer_params_s { #define DISPATCH_PERF_delayed_registration DISPATCH_CODE(PERF, 4) #define DISPATCH_PERF_mutable_target DISPATCH_CODE(PERF, 5) #define DISPATCH_PERF_strict_bg_timer DISPATCH_CODE(PERF, 6) +#define DISPATCH_PERF_suspended_timer_fire DISPATCH_CODE(PERF, 7) +#define DISPATCH_PERF_handlerless_source_fire DISPATCH_CODE(PERF, 8) +#define DISPATCH_PERF_source_registration_without_qos DISPATCH_CODE(PERF, 9) #define DISPATCH_MACH_MSG_hdr_move DISPATCH_CODE(MACH_MSG, 1) @@ -785,6 +849,32 @@ typedef struct dispatch_trace_timer_params_s { #define DISPATCH_PERF_MON_worker_thread_end DISPATCH_CODE_END(PERF_MON, 1) #define DISPATCH_PERF_MON_worker_useless DISPATCH_CODE(PERF_MON, 2) +#define DISPATCH_QOS_TRACE_queue_creation_start DISPATCH_CODE_START(QOS_TRACE, 1) +#define DISPATCH_QOS_TRACE_queue_creation_end DISPATCH_CODE_END(QOS_TRACE, 1) +#define DISPATCH_QOS_TRACE_queue_dispose DISPATCH_CODE(QOS_TRACE, 2) + +#define DISPATCH_QOS_TRACE_private_block_creation DISPATCH_CODE(QOS_TRACE, 3) +#define DISPATCH_QOS_TRACE_private_block_dispose DISPATCH_CODE(QOS_TRACE, 4) + +#define DISPATCH_QOS_TRACE_continuation_push_eb DISPATCH_CODE(QOS_TRACE, 5) +#define DISPATCH_QOS_TRACE_continuation_push_ab DISPATCH_CODE(QOS_TRACE, 6) +#define DISPATCH_QOS_TRACE_continuation_push_f DISPATCH_CODE(QOS_TRACE, 7) +#define DISPATCH_QOS_TRACE_source_push DISPATCH_CODE(QOS_TRACE, 8) + +#define DISPATCH_QOS_TRACE_continuation_pop DISPATCH_CODE(QOS_TRACE, 9) +#define DISPATCH_QOS_TRACE_source_pop DISPATCH_CODE(QOS_TRACE, 10) + +#define DISPATCH_QOS_TRACE_queue_item_complete DISPATCH_CODE(QOS_TRACE, 11) + +#define DISPATCH_QOS_TRACE_src_callout DISPATCH_CODE(QOS_TRACE, 12) +#define DISPATCH_QOS_TRACE_src_dispose DISPATCH_CODE(QOS_TRACE, 13) + +#define DISPATCH_FIREHOSE_TRACE_reserver_gave_up DISPATCH_CODE(FIREHOSE_TRACE, 1) +#define DISPATCH_FIREHOSE_TRACE_reserver_wait DISPATCH_CODE(FIREHOSE_TRACE, 2) +#define DISPATCH_FIREHOSE_TRACE_allocator DISPATCH_CODE(FIREHOSE_TRACE, 3) +#define DISPATCH_FIREHOSE_TRACE_wait_for_logd DISPATCH_CODE(FIREHOSE_TRACE, 4) +#define DISPATCH_FIREHOSE_TRACE_chunk_install DISPATCH_CODE(FIREHOSE_TRACE, 5) + DISPATCH_ALWAYS_INLINE static inline void _dispatch_ktrace_impl(uint32_t code, uint64_t a, uint64_t b, @@ -815,6 +905,8 @@ _dispatch_ktrace_impl(uint32_t code, uint64_t a, uint64_t b, #define _dispatch_ktrace1(code, a) _dispatch_ktrace(code, a, 0, 0, 0) #define _dispatch_ktrace0(code) _dispatch_ktrace(code, 0, 0, 0, 0) +#define BITPACK_UINT32_PAIR(a, b) (((uint64_t) (a) << 32) | (uint64_t) (b)) + #ifndef MACH_MSGH_BITS_VOUCHER_MASK #define MACH_MSGH_BITS_VOUCHER_MASK 0x001f0000 #define MACH_MSGH_BITS_SET_PORTS(remote, local, voucher) \ @@ -937,17 +1029,14 @@ mach_port_t _dispatch_get_mach_host_port(void); #if HAVE_PTHREAD_WORKQUEUE_QOS #if DISPATCH_DEBUG -extern int _dispatch_set_qos_class_enabled; +extern bool _dispatch_set_qos_class_enabled; #else #define _dispatch_set_qos_class_enabled (1) #endif #endif // HAVE_PTHREAD_WORKQUEUE_QOS #if DISPATCH_USE_KEVENT_WORKQUEUE -#if !HAVE_PTHREAD_WORKQUEUE_QOS || !EV_UDATA_SPECIFIC -#error Invalid build configuration -#endif #if DISPATCH_USE_MGR_THREAD -extern int _dispatch_kevent_workqueue_enabled; +extern bool _dispatch_kevent_workqueue_enabled; #else #define _dispatch_kevent_workqueue_enabled (1) #endif @@ -971,4 +1060,6 @@ extern int _dispatch_kevent_workqueue_enabled; #include "inline_internal.h" #include "firehose/firehose_internal.h" +__END_DECLS + #endif /* __DISPATCH_INTERNAL__ */ diff --git a/src/introspection.c b/src/introspection.c index 8692a8bc5..95f6eb143 100644 --- a/src/introspection.c +++ b/src/introspection.c @@ -29,54 +29,37 @@ #include "introspection_private.h" typedef struct dispatch_introspection_thread_s { +#if !OS_OBJECT_HAVE_OBJC1 void *dit_isa; - TAILQ_ENTRY(dispatch_introspection_thread_s) dit_list; +#endif + LIST_ENTRY(dispatch_introspection_thread_s) dit_list; pthread_t thread; +#if OS_OBJECT_HAVE_OBJC1 + void *dit_isa; +#endif dispatch_queue_t *queue; } dispatch_introspection_thread_s; +dispatch_static_assert(offsetof(struct dispatch_continuation_s, dc_flags) == + offsetof(struct dispatch_introspection_thread_s, dit_isa), + "These fields must alias so that leaks instruments work"); typedef struct dispatch_introspection_thread_s *dispatch_introspection_thread_t; struct dispatch_introspection_state_s _dispatch_introspection = { - .threads = TAILQ_HEAD_INITIALIZER(_dispatch_introspection.threads), - .queues = TAILQ_HEAD_INITIALIZER(_dispatch_introspection.queues), + .threads = LIST_HEAD_INITIALIZER(_dispatch_introspection.threads), + .queues = LIST_HEAD_INITIALIZER(_dispatch_introspection.queues), }; static void _dispatch_introspection_thread_remove(void *ctxt); -static void _dispatch_introspection_queue_order_dispose(dispatch_queue_t dq); +static void _dispatch_introspection_queue_order_dispose( + dispatch_queue_introspection_context_t dqic); #pragma mark - #pragma mark dispatch_introspection_init -DISPATCH_NOINLINE -static bool -_dispatch_getenv_bool(const char *env, bool default_v) -{ - const char *v = getenv(env); - - if (v) { - return strcasecmp(v, "YES") == 0 || strcasecmp(v, "Y") == 0 || - strcasecmp(v, "TRUE") == 0 || atoi(v); - } - return default_v; -} - void _dispatch_introspection_init(void) { - TAILQ_INSERT_TAIL(&_dispatch_introspection.queues, - &_dispatch_main_q, diq_list); - TAILQ_INSERT_TAIL(&_dispatch_introspection.queues, - &_dispatch_mgr_q, diq_list); -#if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES - TAILQ_INSERT_TAIL(&_dispatch_introspection.queues, - _dispatch_mgr_q.do_targetq, diq_list); -#endif - for (size_t i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { - TAILQ_INSERT_TAIL(&_dispatch_introspection.queues, - &_dispatch_root_queues[i], diq_list); - } - _dispatch_introspection.debug_queue_inversions = _dispatch_getenv_bool("LIBDISPATCH_DEBUG_QUEUE_INVERSIONS", false); @@ -93,6 +76,15 @@ _dispatch_introspection_init(void) _dispatch_thread_key_create(&dispatch_introspection_key, _dispatch_introspection_thread_remove); _dispatch_introspection_thread_add(); // add main thread + + for (size_t i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { + _dispatch_trace_queue_create(&_dispatch_root_queues[i]); + } +#if DISPATCH_USE_MGR_THREAD && DISPATCH_USE_PTHREAD_ROOT_QUEUES + _dispatch_trace_queue_create(_dispatch_mgr_q.do_targetq); +#endif + _dispatch_trace_queue_create(&_dispatch_main_q); + _dispatch_trace_queue_create(&_dispatch_mgr_q); } const struct dispatch_introspection_versions_s @@ -133,7 +125,7 @@ _dispatch_introspection_thread_add(void) (void*)thread + _dispatch_introspection.thread_queue_offset; _dispatch_thread_setspecific(dispatch_introspection_key, dit); _dispatch_unfair_lock_lock(&_dispatch_introspection.threads_lock); - TAILQ_INSERT_TAIL(&_dispatch_introspection.threads, dit, dit_list); + LIST_INSERT_HEAD(&_dispatch_introspection.threads, dit, dit_list); _dispatch_unfair_lock_unlock(&_dispatch_introspection.threads_lock); } @@ -142,7 +134,7 @@ _dispatch_introspection_thread_remove(void *ctxt) { dispatch_introspection_thread_t dit = ctxt; _dispatch_unfair_lock_lock(&_dispatch_introspection.threads_lock); - TAILQ_REMOVE(&_dispatch_introspection.threads, dit, dit_list); + LIST_REMOVE(dit, dit_list); _dispatch_unfair_lock_unlock(&_dispatch_introspection.threads_lock); _dispatch_continuation_free((void*)dit); _dispatch_thread_setspecific(dispatch_introspection_key, NULL); @@ -151,16 +143,16 @@ _dispatch_introspection_thread_remove(void *ctxt) #pragma mark - #pragma mark dispatch_introspection_info -DISPATCH_USED inline -dispatch_introspection_queue_s -dispatch_introspection_queue_get_info(dispatch_queue_t dq) +DISPATCH_ALWAYS_INLINE +static inline dispatch_introspection_queue_s +_dispatch_introspection_lane_get_info(dispatch_lane_class_t dqu) { - bool global = (dq->do_xref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || - (dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT); + dispatch_lane_t dq = dqu._dl; + bool global = _dispatch_object_is_global(dq); uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); dispatch_introspection_queue_s diq = { - .queue = dq, + .queue = dq->_as_dq, .target_queue = dq->do_targetq, .label = dq->dq_label, .serialnum = dq->dq_serialnum, @@ -171,11 +163,43 @@ dispatch_introspection_queue_get_info(dispatch_queue_t dq) .draining = (dq->dq_items_head == (void*)~0ul) || (!dq->dq_items_head && dq->dq_items_tail), .global = global, - .main = (dq == &_dispatch_main_q), + .main = dx_type(dq) == DISPATCH_QUEUE_MAIN_TYPE, + }; + return diq; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_introspection_queue_s +_dispatch_introspection_workloop_get_info(dispatch_workloop_t dwl) +{ + uint64_t dq_state = os_atomic_load2o(dwl, dq_state, relaxed); + + dispatch_introspection_queue_s diq = { + .queue = dwl->_as_dq, + .target_queue = dwl->do_targetq, + .label = dwl->dq_label, + .serialnum = dwl->dq_serialnum, + .width = 1, + .suspend_count = 0, + .enqueued = _dq_state_is_enqueued(dq_state), + .barrier = _dq_state_is_in_barrier(dq_state), + .draining = 0, + .global = 0, + .main = 0, }; return diq; } +DISPATCH_USED inline +dispatch_introspection_queue_s +dispatch_introspection_queue_get_info(dispatch_queue_t dq) +{ + if (dx_metatype(dq) == _DISPATCH_WORKLOOP_TYPE) { + return _dispatch_introspection_workloop_get_info(upcast(dq)._dwl); + } + return _dispatch_introspection_lane_get_info(upcast(dq)._dl); +} + static inline void _dispatch_introspection_continuation_get_info(dispatch_queue_t dq, dispatch_continuation_t dc, dispatch_introspection_queue_item_t diqi) @@ -190,8 +214,9 @@ _dispatch_introspection_continuation_get_info(dispatch_queue_t dq, flags = 0; switch (dc_type(dc)) { #if HAVE_PTHREAD_WORKQUEUE_QOS - case DC_OVERRIDE_STEALING_TYPE: - case DC_OVERRIDE_OWNING_TYPE: + case DISPATCH_CONTINUATION_TYPE(WORKLOOP_STEALING): + case DISPATCH_CONTINUATION_TYPE(OVERRIDE_STEALING): + case DISPATCH_CONTINUATION_TYPE(OVERRIDE_OWNING): dc = dc->dc_data; if (!_dispatch_object_is_continuation(dc)) { // these really wrap queues so we should hide the continuation type @@ -202,40 +227,42 @@ _dispatch_introspection_continuation_get_info(dispatch_queue_t dq, } return _dispatch_introspection_continuation_get_info(dq, dc, diqi); #endif - case DC_ASYNC_REDIRECT_TYPE: + case DISPATCH_CONTINUATION_TYPE(ASYNC_REDIRECT): DISPATCH_INTERNAL_CRASH(0, "Handled by the caller"); - case DC_MACH_ASYNC_REPLY_TYPE: + case DISPATCH_CONTINUATION_TYPE(MACH_ASYNC_REPLY): break; - case DC_MACH_SEND_BARRRIER_DRAIN_TYPE: + case DISPATCH_CONTINUATION_TYPE(MACH_SEND_BARRRIER_DRAIN): break; - case DC_MACH_SEND_BARRIER_TYPE: - case DC_MACH_RECV_BARRIER_TYPE: + case DISPATCH_CONTINUATION_TYPE(MACH_SEND_BARRIER): + case DISPATCH_CONTINUATION_TYPE(MACH_RECV_BARRIER): flags = (uintptr_t)dc->dc_data; dq = dq->do_targetq; break; default: DISPATCH_INTERNAL_CRASH(dc->do_vtable, "Unknown dc vtable type"); } - } else { - if (flags & DISPATCH_OBJ_SYNC_WAITER_BIT) { - dispatch_sync_context_t dsc = (dispatch_sync_context_t)dc; - waiter = pthread_from_mach_thread_np(dsc->dsc_waiter); - ctxt = dsc->dsc_ctxt; - func = dsc->dsc_func; - } - if (func == _dispatch_apply_invoke || - func == _dispatch_apply_redirect_invoke) { - dispatch_apply_t da = ctxt; - if (da->da_todo) { - dc = da->da_dc; - dq = dc->dc_data; - ctxt = dc->dc_ctxt; - func = dc->dc_func; - apply = true; - } + } else if (flags & (DC_FLAG_SYNC_WAITER | DC_FLAG_ASYNC_AND_WAIT)) { + dispatch_sync_context_t dsc = (dispatch_sync_context_t)dc; + waiter = pthread_from_mach_thread_np(dsc->dsc_waiter); + ctxt = dsc->dsc_ctxt; + func = dsc->dsc_func; + } else if (func == _dispatch_apply_invoke || + func == _dispatch_apply_redirect_invoke) { + dispatch_apply_t da = ctxt; + if (da->da_todo) { + dc = da->da_dc; + dq = dc->dc_data; + ctxt = dc->dc_ctxt; + func = dc->dc_func; + apply = true; } } - if (flags & DISPATCH_OBJ_BLOCK_BIT) { + + if (flags & DC_FLAG_BLOCK_WITH_PRIVATE_DATA) { + dispatch_block_private_data_t dbpd = _dispatch_block_get_data(ctxt); + diqi->type = dispatch_introspection_queue_item_type_block; + func = _dispatch_Block_invoke(dbpd->dbpd_block); + } else if (flags & DC_FLAG_BLOCK) { diqi->type = dispatch_introspection_queue_item_type_block; func = _dispatch_Block_invoke(ctxt); } else { @@ -247,11 +274,11 @@ _dispatch_introspection_continuation_get_info(dispatch_queue_t dq, .context = ctxt, .function = func, .waiter = waiter, - .barrier = (flags & DISPATCH_OBJ_BARRIER_BIT) || dq->dq_width == 1, - .sync = flags & DISPATCH_OBJ_SYNC_WAITER_BIT, + .barrier = (flags & DC_FLAG_BARRIER) || dq->dq_width == 1, + .sync = (bool)(flags & (DC_FLAG_SYNC_WAITER | DC_FLAG_ASYNC_AND_WAIT)), .apply = apply, }; - if (flags & DISPATCH_OBJ_GROUP_BIT) { + if (flags & DC_FLAG_GROUP_ASYNC) { dispatch_group_t group = dc->dc_data; if (dx_type(group) == DISPATCH_GROUP_TYPE) { diqi->function.group = group; @@ -267,7 +294,7 @@ _dispatch_introspection_object_get_info(dispatch_object_t dou) .object = dou._dc, .target_queue = dou._do->do_targetq, .type = (void*)dou._do->do_vtable, - .kind = dx_kind(dou._do), + .kind = _dispatch_object_class_name(dou._do), }; return dio; } @@ -284,7 +311,7 @@ _dispatch_introspection_source_get_info(dispatch_source_t ds) if (dc) { ctxt = dc->dc_ctxt; handler = dc->dc_func; - hdlr_is_block = (dc->dc_flags & DISPATCH_OBJ_BLOCK_BIT); + hdlr_is_block = (dc->dc_flags & DC_FLAG_BLOCK); } uint64_t dq_state = os_atomic_load2o(ds, dq_state, relaxed); @@ -297,13 +324,34 @@ _dispatch_introspection_source_get_info(dispatch_source_t ds) .enqueued = _dq_state_is_enqueued(dq_state), .handler_is_block = hdlr_is_block, .timer = dr->du_is_timer, - .after = dr->du_is_timer && (dr->du_fflags & DISPATCH_TIMER_AFTER), + .after = dr->du_is_timer && (dr->du_timer_flags & DISPATCH_TIMER_AFTER), .type = (unsigned long)dr->du_filter, .handle = (unsigned long)dr->du_ident, }; return dis; } +static inline +dispatch_introspection_source_s +_dispatch_introspection_mach_get_info(dispatch_mach_t dm) +{ + dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs; + uint64_t dq_state = os_atomic_load2o(dm, dq_state, relaxed); + + dispatch_introspection_source_s dis = { + .source = upcast(dm)._ds, + .target_queue = dm->do_targetq, + .context = dmrr->dmrr_handler_ctxt, + .handler = (void *)dmrr->dmrr_handler_func, + .suspend_count = _dq_state_suspend_cnt(dq_state) + dm->dq_side_suspend_cnt, + .enqueued = _dq_state_is_enqueued(dq_state), + .handler_is_block = dmrr->dmrr_handler_is_block, + .type = (unsigned long)dmrr->du_filter, + .handle = (unsigned long)dmrr->du_ident, + .is_xpc = dm->dm_is_xpc, + }; + return dis; +} static inline dispatch_introspection_queue_thread_s _dispatch_introspection_thread_get_info(dispatch_introspection_thread_t dit) @@ -331,21 +379,25 @@ dispatch_introspection_queue_item_get_info(dispatch_queue_t dq, if (_dispatch_object_has_vtable(dou._do)) { unsigned long type = dx_type(dou._do); unsigned long metatype = type & _DISPATCH_META_TYPE_MASK; - if (type == DC_ASYNC_REDIRECT_TYPE) { + if (type == DISPATCH_CONTINUATION_TYPE(ASYNC_REDIRECT)) { dq = dc->dc_data; dc = dc->dc_other; goto again; } if (metatype == _DISPATCH_CONTINUATION_TYPE) { _dispatch_introspection_continuation_get_info(dq, dc, &diqi); - } else if (metatype == _DISPATCH_QUEUE_TYPE && - type != DISPATCH_QUEUE_SPECIFIC_TYPE) { + } else if (metatype == _DISPATCH_LANE_TYPE) { + diqi.type = dispatch_introspection_queue_item_type_queue; + diqi.queue = _dispatch_introspection_lane_get_info(dou._dl); + } else if (metatype == _DISPATCH_WORKLOOP_TYPE) { diqi.type = dispatch_introspection_queue_item_type_queue; - diqi.queue = dispatch_introspection_queue_get_info(dou._dq); - } else if (metatype == _DISPATCH_SOURCE_TYPE && - type != DISPATCH_MACH_CHANNEL_TYPE) { + diqi.queue = _dispatch_introspection_workloop_get_info(dou._dwl); + } else if (type == DISPATCH_SOURCE_KEVENT_TYPE) { diqi.type = dispatch_introspection_queue_item_type_source; diqi.source = _dispatch_introspection_source_get_info(dou._ds); + } else if (type == DISPATCH_MACH_CHANNEL_TYPE) { + diqi.type = dispatch_introspection_queue_item_type_source; + diqi.source = _dispatch_introspection_mach_get_info(dou._dm); } else { diqi.type = dispatch_introspection_queue_item_type_object; diqi.object = _dispatch_introspection_object_get_info(dou._do); @@ -364,17 +416,22 @@ dispatch_queue_t dispatch_introspection_get_queues(dispatch_queue_t start, size_t count, dispatch_introspection_queue_t queues) { - dispatch_queue_t next; - next = start ? start : TAILQ_FIRST(&_dispatch_introspection.queues); + dispatch_queue_introspection_context_t next; + + if (start) { + next = start->do_finalizer; + } else { + next = LIST_FIRST(&_dispatch_introspection.queues); + } while (count--) { if (!next) { queues->queue = NULL; - break; + return NULL; } - *queues++ = dispatch_introspection_queue_get_info(next); - next = TAILQ_NEXT(next, diq_list); + *queues++ = dispatch_introspection_queue_get_info(next->dqic_queue._dq); + next = LIST_NEXT(next, dqic_list); } - return next; + return next->dqic_queue._dq; } DISPATCH_USED @@ -383,24 +440,26 @@ dispatch_introspection_get_queue_threads(dispatch_continuation_t start, size_t count, dispatch_introspection_queue_thread_t threads) { dispatch_introspection_thread_t next = start ? (void*)start : - TAILQ_FIRST(&_dispatch_introspection.threads); + LIST_FIRST(&_dispatch_introspection.threads); while (count--) { if (!next) { threads->object = NULL; break; } *threads++ = _dispatch_introspection_thread_get_info(next); - next = TAILQ_NEXT(next, dit_list); + next = LIST_NEXT(next, dit_list); } return (void*)next; } DISPATCH_USED dispatch_continuation_t -dispatch_introspection_queue_get_items(dispatch_queue_t dq, +dispatch_introspection_queue_get_items(dispatch_queue_t _dq, dispatch_continuation_t start, size_t count, dispatch_introspection_queue_item_t items) { + if (dx_metatype(_dq) != _DISPATCH_LANE_TYPE) return NULL; + dispatch_lane_t dq = upcast(_dq)._dl; dispatch_continuation_t next = start ? start : dq->dq_items_head == (void*)~0ul ? NULL : (void*)dq->dq_items_head; while (count--) { @@ -408,12 +467,35 @@ dispatch_introspection_queue_get_items(dispatch_queue_t dq, items->type = dispatch_introspection_queue_item_type_none; break; } - *items++ = dispatch_introspection_queue_item_get_info(dq, next); + *items++ = dispatch_introspection_queue_item_get_info(_dq, next); next = next->do_next; } return next; } +#pragma mark - +#pragma mark tracing & introspection helpers + +struct dispatch_object_s * +_dispatch_introspection_queue_fake_sync_push_pop(dispatch_queue_t dq, + void *ctxt, dispatch_function_t func, uintptr_t dc_flags) +{ + // fake just what introspection really needs here: flags, func, ctxt, queue, + // dc_priority, and of course waiter + struct dispatch_sync_context_s dsc = { + .dc_priority = _dispatch_get_priority(), + .dc_flags = DC_FLAG_SYNC_WAITER | dc_flags, + .dc_other = dq, + .dsc_func = func, + .dsc_ctxt = ctxt, + .dsc_waiter = _dispatch_tid_self(), + }; + + _dispatch_trace_item_push(dq, &dsc); + _dispatch_trace_item_pop(dq, &dsc); + return (struct dispatch_object_s *)(uintptr_t)&dsc; +} + #pragma mark - #pragma mark dispatch_introspection_hooks @@ -428,6 +510,7 @@ dispatch_introspection_hooks_s _dispatch_introspection_hook_callouts_enabled = { .queue_item_enqueue = DISPATCH_INTROSPECTION_NO_HOOK, .queue_item_dequeue = DISPATCH_INTROSPECTION_NO_HOOK, .queue_item_complete = DISPATCH_INTROSPECTION_NO_HOOK, + .runtime_event = DISPATCH_INTROSPECTION_NO_HOOK, }; #define DISPATCH_INTROSPECTION_HOOKS_COUNT (( \ @@ -436,12 +519,12 @@ dispatch_introspection_hooks_s _dispatch_introspection_hook_callouts_enabled = { sizeof(dispatch_function_t)) #define DISPATCH_INTROSPECTION_HOOK_ENABLED(h) \ - (slowpath(_dispatch_introspection_hooks.h)) + unlikely(_dispatch_introspection_hooks.h) #define DISPATCH_INTROSPECTION_HOOK_CALLOUT(h, ...) ({ \ typeof(_dispatch_introspection_hooks.h) _h; \ _h = _dispatch_introspection_hooks.h; \ - if (slowpath((void*)(_h) != DISPATCH_INTROSPECTION_NO_HOOK)) { \ + if (unlikely((void*)(_h) != DISPATCH_INTROSPECTION_NO_HOOK)) { \ _h(__VA_ARGS__); \ } }) @@ -515,20 +598,64 @@ _dispatch_introspection_queue_create_hook(dispatch_queue_t dq) dispatch_introspection_hook_callout_queue_create(&diq); } -dispatch_queue_t +dispatch_function_t +_dispatch_object_finalizer(dispatch_object_t dou) +{ + dispatch_queue_introspection_context_t dqic; + switch (dx_metatype(dou._do)) { + case _DISPATCH_LANE_TYPE: + case _DISPATCH_WORKLOOP_TYPE: + dqic = dou._dq->do_finalizer; + return dqic->dqic_finalizer; + default: + return dou._do->do_finalizer; + } +} + +void +_dispatch_object_set_finalizer(dispatch_object_t dou, + dispatch_function_t finalizer) +{ + dispatch_queue_introspection_context_t dqic; + switch (dx_metatype(dou._do)) { + case _DISPATCH_LANE_TYPE: + case _DISPATCH_WORKLOOP_TYPE: + dqic = dou._dq->do_finalizer; + dqic->dqic_finalizer = finalizer; + break; + default: + dou._do->do_finalizer = finalizer; + break; + } +} + +dispatch_queue_class_t _dispatch_introspection_queue_create(dispatch_queue_t dq) { - TAILQ_INIT(&dq->diq_order_top_head); - TAILQ_INIT(&dq->diq_order_bottom_head); + dispatch_queue_introspection_context_t dqic; + size_t sz = sizeof(struct dispatch_queue_introspection_context_s); + + if (!_dispatch_introspection.debug_queue_inversions) { + sz = offsetof(struct dispatch_queue_introspection_context_s, + __dqic_no_queue_inversion); + } + dqic = _dispatch_calloc(1, sz); + dqic->dqic_queue._dq = dq; + if (_dispatch_introspection.debug_queue_inversions) { + LIST_INIT(&dqic->dqic_order_top_head); + LIST_INIT(&dqic->dqic_order_bottom_head); + } + dq->do_finalizer = dqic; + _dispatch_unfair_lock_lock(&_dispatch_introspection.queues_lock); - TAILQ_INSERT_TAIL(&_dispatch_introspection.queues, dq, diq_list); + LIST_INSERT_HEAD(&_dispatch_introspection.queues, dqic, dqic_list); _dispatch_unfair_lock_unlock(&_dispatch_introspection.queues_lock); DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(queue_create, dq); if (DISPATCH_INTROSPECTION_HOOK_ENABLED(queue_create)) { _dispatch_introspection_queue_create_hook(dq); } - return dq; + return upcast(dq)._dqu; } DISPATCH_NOINLINE @@ -551,15 +678,22 @@ _dispatch_introspection_queue_dispose_hook(dispatch_queue_t dq) void _dispatch_introspection_queue_dispose(dispatch_queue_t dq) { + dispatch_queue_introspection_context_t dqic = dq->do_finalizer; + DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(queue_destroy, dq); if (DISPATCH_INTROSPECTION_HOOK_ENABLED(queue_dispose)) { _dispatch_introspection_queue_dispose_hook(dq); } _dispatch_unfair_lock_lock(&_dispatch_introspection.queues_lock); - TAILQ_REMOVE(&_dispatch_introspection.queues, dq, diq_list); - _dispatch_introspection_queue_order_dispose(dq); + LIST_REMOVE(dqic, dqic_list); + if (_dispatch_introspection.debug_queue_inversions) { + _dispatch_introspection_queue_order_dispose(dqic); + } _dispatch_unfair_lock_unlock(&_dispatch_introspection.queues_lock); + + dq->do_finalizer = dqic->dqic_finalizer; // restore the real finalizer + free(dqic); } DISPATCH_NOINLINE @@ -591,6 +725,85 @@ _dispatch_introspection_queue_item_enqueue(dispatch_queue_t dq, } } +void +_dispatch_trace_item_push_internal(dispatch_queue_t dq, + dispatch_object_t dou) +{ + if (dx_metatype(dq) != _DISPATCH_LANE_TYPE) { + return; + } + + dispatch_continuation_t dc = dou._dc; + + /* Only track user continuations */ + if (_dispatch_object_is_continuation(dou) && + _dispatch_object_has_vtable(dou) && dc_type(dc) > 0){ + return; + } + + struct dispatch_introspection_queue_item_s idc; + idc = dispatch_introspection_queue_item_get_info(dq, dc); + + switch (idc.type) { + case dispatch_introspection_queue_item_type_none: + break; + case dispatch_introspection_queue_item_type_block: + { + uintptr_t dc_flags = 0; + dc_flags |= (idc.block.barrier ? DC_BARRIER : 0); + dc_flags |= (idc.block.sync ? DC_SYNC : 0); + dc_flags |= (idc.block.apply ? DC_APPLY : 0); + + if (dc->dc_flags & DC_FLAG_BLOCK_WITH_PRIVATE_DATA) { + _dispatch_ktrace4(DISPATCH_QOS_TRACE_continuation_push_eb, + dou._do_value, + (uintptr_t)idc.block.block, /* Heap allocated block ptr */ + BITPACK_UINT32_PAIR(dq->dq_serialnum, dc_flags), + BITPACK_UINT32_PAIR(_dispatch_get_priority(), + dc->dc_priority)); + } else { + _dispatch_ktrace4(DISPATCH_QOS_TRACE_continuation_push_ab, + dou._do_value, + (uintptr_t)idc.block.block_invoke, /* Function pointer */ + BITPACK_UINT32_PAIR(dq->dq_serialnum, dc_flags), + BITPACK_UINT32_PAIR(_dispatch_get_priority(), + dc->dc_priority)); + } + + break; + } + case dispatch_introspection_queue_item_type_function: + { + uintptr_t dc_flags = 0; + dc_flags |= (idc.function.barrier ? DC_BARRIER : 0); + dc_flags |= (idc.function.sync ? DC_SYNC : 0); + dc_flags |= (idc.function.apply ? DC_APPLY : 0); + + _dispatch_ktrace4(DISPATCH_QOS_TRACE_continuation_push_f, + dou._do_value, + (uintptr_t)idc.function.function, /* Function pointer */ + BITPACK_UINT32_PAIR(dq->dq_serialnum, dc_flags), + BITPACK_UINT32_PAIR(_dispatch_get_priority(), dc->dc_priority)); + break; + } + case dispatch_introspection_queue_item_type_object: + /* Generic dispatch object - we don't know how to handle this yet */ + break; + case dispatch_introspection_queue_item_type_queue: + /* Dispatch queue - we don't know how to handle this yet */ + break; + case dispatch_introspection_queue_item_type_source: + /* Dispatch sources */ + _dispatch_ktrace4(DISPATCH_QOS_TRACE_source_push, + dou._do_value, + idc.source.type, + (uintptr_t)idc.source.handler, + dq->dq_serialnum); + break; + } +} + + DISPATCH_NOINLINE void dispatch_introspection_hook_callout_queue_item_dequeue(dispatch_queue_t queue, @@ -620,6 +833,47 @@ _dispatch_introspection_queue_item_dequeue(dispatch_queue_t dq, } } +void +_dispatch_trace_item_pop_internal(dispatch_queue_t dq, + dispatch_object_t dou) +{ + if (dx_metatype(dq) != _DISPATCH_LANE_TYPE) { + return; + } + + dispatch_continuation_t dc = dou._dc; + + /* Only track user continuations */ + if (_dispatch_object_is_continuation(dou) && + _dispatch_object_has_vtable(dou) && dc_type(dc) > 0){ + return; + } + + struct dispatch_introspection_queue_item_s idc; + idc = dispatch_introspection_queue_item_get_info(dq, dc); + + switch (idc.type) { + case dispatch_introspection_queue_item_type_none: + break; + case dispatch_introspection_queue_item_type_block: + case dispatch_introspection_queue_item_type_function: + _dispatch_ktrace3(DISPATCH_QOS_TRACE_continuation_pop, + dou._do_value, _dispatch_get_priority(), dq->dq_serialnum); + break; + case dispatch_introspection_queue_item_type_object: + /* Generic dispatch object - we don't know how to handle this yet */ + break; + case dispatch_introspection_queue_item_type_queue: + /* Dispatch queue - we don't know how to handle this yet */ + break; + case dispatch_introspection_queue_item_type_source: + /* Dispatch sources */ + _dispatch_ktrace2(DISPATCH_QOS_TRACE_source_pop, + dou._do_value, dq->dq_serialnum); + break; + } +} + DISPATCH_NOINLINE void dispatch_introspection_hook_callout_queue_item_complete( @@ -652,6 +906,20 @@ _dispatch_introspection_callout_entry(void *ctxt, dispatch_function_t f) queue_callout_begin, dq, ctxt, f); } +void +_dispatch_trace_source_callout_entry_internal(dispatch_source_t ds, long kind, + dispatch_queue_t dq, dispatch_continuation_t dc) +{ + if (dx_metatype(dq) != _DISPATCH_LANE_TYPE) { + return; + } + + _dispatch_ktrace3(DISPATCH_QOS_TRACE_src_callout, + (uintptr_t)ds, (uintptr_t)dc, kind); + + _dispatch_trace_item_push_internal(dq, (dispatch_object_t) dc); +} + void _dispatch_introspection_callout_return(void *ctxt, dispatch_function_t f) { @@ -660,13 +928,23 @@ _dispatch_introspection_callout_return(void *ctxt, dispatch_function_t f) queue_callout_end, dq, ctxt, f); } +void +_dispatch_introspection_runtime_event( + enum dispatch_introspection_runtime_event event, + void *ptr, uint64_t value) +{ + if (DISPATCH_INTROSPECTION_HOOK_ENABLED(runtime_event)) { + DISPATCH_INTROSPECTION_HOOK_CALLOUT(runtime_event, event, ptr, value); + } +} + #pragma mark - #pragma mark dispatch introspection deadlock detection typedef struct dispatch_queue_order_entry_s *dispatch_queue_order_entry_t; struct dispatch_queue_order_entry_s { - TAILQ_ENTRY(dispatch_queue_order_entry_s) dqoe_order_top_list; - TAILQ_ENTRY(dispatch_queue_order_entry_s) dqoe_order_bottom_list; + LIST_ENTRY(dispatch_queue_order_entry_s) dqoe_order_top_list; + LIST_ENTRY(dispatch_queue_order_entry_s) dqoe_order_bottom_list; const char *dqoe_top_label; const char *dqoe_bottom_label; dispatch_queue_t dqoe_top_tq; @@ -676,39 +954,43 @@ struct dispatch_queue_order_entry_s { }; static void -_dispatch_introspection_queue_order_dispose(dispatch_queue_t dq) +_dispatch_introspection_queue_order_dispose( + dispatch_queue_introspection_context_t dqic) { + dispatch_queue_introspection_context_t o_dqic; dispatch_queue_order_entry_t e, te; dispatch_queue_t otherq; - TAILQ_HEAD(, dispatch_queue_order_entry_s) head; + LIST_HEAD(, dispatch_queue_order_entry_s) head; // this whole thing happens with _dispatch_introspection.queues_lock locked - _dispatch_unfair_lock_lock(&dq->diq_order_top_head_lock); - head.tqh_first = dq->diq_order_top_head.tqh_first; - head.tqh_last = dq->diq_order_top_head.tqh_last; - TAILQ_INIT(&dq->diq_order_top_head); - _dispatch_unfair_lock_unlock(&dq->diq_order_top_head_lock); + _dispatch_unfair_lock_lock(&dqic->dqic_order_top_head_lock); + LIST_INIT(&head); + LIST_SWAP(&head, &dqic->dqic_order_top_head, + dispatch_queue_order_entry_s, dqoe_order_top_list); + _dispatch_unfair_lock_unlock(&dqic->dqic_order_top_head_lock); - TAILQ_FOREACH_SAFE(e, &head, dqoe_order_top_list, te) { + LIST_FOREACH_SAFE(e, &head, dqoe_order_top_list, te) { otherq = e->dqoe_bottom_tq; - _dispatch_unfair_lock_lock(&otherq->diq_order_bottom_head_lock); - TAILQ_REMOVE(&otherq->diq_order_bottom_head, e, dqoe_order_bottom_list); - _dispatch_unfair_lock_unlock(&otherq->diq_order_bottom_head_lock); + o_dqic = otherq->do_finalizer; + _dispatch_unfair_lock_lock(&o_dqic->dqic_order_bottom_head_lock); + LIST_REMOVE(e, dqoe_order_bottom_list); + _dispatch_unfair_lock_unlock(&o_dqic->dqic_order_bottom_head_lock); free(e); } - _dispatch_unfair_lock_lock(&dq->diq_order_bottom_head_lock); - head.tqh_first = dq->diq_order_bottom_head.tqh_first; - head.tqh_last = dq->diq_order_bottom_head.tqh_last; - TAILQ_INIT(&dq->diq_order_bottom_head); - _dispatch_unfair_lock_unlock(&dq->diq_order_bottom_head_lock); + _dispatch_unfair_lock_lock(&dqic->dqic_order_bottom_head_lock); + LIST_INIT(&head); + LIST_SWAP(&head, &dqic->dqic_order_bottom_head, + dispatch_queue_order_entry_s, dqoe_order_top_list); + _dispatch_unfair_lock_unlock(&dqic->dqic_order_bottom_head_lock); - TAILQ_FOREACH_SAFE(e, &head, dqoe_order_bottom_list, te) { + LIST_FOREACH_SAFE(e, &head, dqoe_order_bottom_list, te) { otherq = e->dqoe_top_tq; - _dispatch_unfair_lock_lock(&otherq->diq_order_top_head_lock); - TAILQ_REMOVE(&otherq->diq_order_top_head, e, dqoe_order_top_list); - _dispatch_unfair_lock_unlock(&otherq->diq_order_top_head_lock); + o_dqic = otherq->do_finalizer; + _dispatch_unfair_lock_lock(&o_dqic->dqic_order_top_head_lock); + LIST_REMOVE(e, dqoe_order_top_list); + _dispatch_unfair_lock_unlock(&o_dqic->dqic_order_top_head_lock); free(e); } } @@ -777,23 +1059,24 @@ _dispatch_introspection_order_check(dispatch_order_frame_t dof_prev, dispatch_queue_t bottom_q, dispatch_queue_t bottom_tq) { struct dispatch_order_frame_s dof = { .dof_prev = dof_prev }; + dispatch_queue_introspection_context_t btqic = bottom_tq->do_finalizer; // has anyone above bottom_tq ever sync()ed onto top_tq ? - _dispatch_unfair_lock_lock(&bottom_tq->diq_order_top_head_lock); - TAILQ_FOREACH(dof.dof_e, &bottom_tq->diq_order_top_head, dqoe_order_top_list) { - if (slowpath(dof.dof_e->dqoe_bottom_tq == top_tq)) { + _dispatch_unfair_lock_lock(&btqic->dqic_order_top_head_lock); + LIST_FOREACH(dof.dof_e, &btqic->dqic_order_top_head, dqoe_order_top_list) { + if (unlikely(dof.dof_e->dqoe_bottom_tq == top_tq)) { _dispatch_introspection_lock_inversion_fail(&dof, top_q, bottom_q); } _dispatch_introspection_order_check(&dof, top_q, top_tq, bottom_q, dof.dof_e->dqoe_bottom_tq); } - _dispatch_unfair_lock_unlock(&bottom_tq->diq_order_top_head_lock); + _dispatch_unfair_lock_unlock(&btqic->dqic_order_top_head_lock); } void -_dispatch_introspection_order_record(dispatch_queue_t top_q, - dispatch_queue_t bottom_q) +_dispatch_introspection_order_record(dispatch_queue_t top_q) { + dispatch_queue_t bottom_q = _dispatch_queue_get_current(); dispatch_queue_order_entry_t e, it; const int pcs_skip = 1, pcs_n_max = 128; void *pcs[pcs_n_max]; @@ -805,17 +1088,19 @@ _dispatch_introspection_order_record(dispatch_queue_t top_q, dispatch_queue_t top_tq = _dispatch_queue_bottom_target_queue(top_q); dispatch_queue_t bottom_tq = _dispatch_queue_bottom_target_queue(bottom_q); + dispatch_queue_introspection_context_t ttqic = top_tq->do_finalizer; + dispatch_queue_introspection_context_t btqic = bottom_tq->do_finalizer; - _dispatch_unfair_lock_lock(&top_tq->diq_order_top_head_lock); - TAILQ_FOREACH(it, &top_tq->diq_order_top_head, dqoe_order_top_list) { + _dispatch_unfair_lock_lock(&ttqic->dqic_order_top_head_lock); + LIST_FOREACH(it, &ttqic->dqic_order_top_head, dqoe_order_top_list) { if (it->dqoe_bottom_tq == bottom_tq) { // that dispatch_sync() is known and validated // move on - _dispatch_unfair_lock_unlock(&top_tq->diq_order_top_head_lock); + _dispatch_unfair_lock_unlock(&ttqic->dqic_order_top_head_lock); return; } } - _dispatch_unfair_lock_unlock(&top_tq->diq_order_top_head_lock); + _dispatch_unfair_lock_unlock(&ttqic->dqic_order_top_head_lock); _dispatch_introspection_order_check(NULL, top_q, top_tq, bottom_q, bottom_tq); pcs_n = MAX(backtrace(pcs, pcs_n_max) - pcs_skip, 0); @@ -852,22 +1137,22 @@ _dispatch_introspection_order_record(dispatch_queue_t top_q, e->dqoe_bottom_label = bottom_q->dq_label ?: ""; } - _dispatch_unfair_lock_lock(&top_tq->diq_order_top_head_lock); - TAILQ_FOREACH(it, &top_tq->diq_order_top_head, dqoe_order_top_list) { - if (slowpath(it->dqoe_bottom_tq == bottom_tq)) { + _dispatch_unfair_lock_lock(&ttqic->dqic_order_top_head_lock); + LIST_FOREACH(it, &ttqic->dqic_order_top_head, dqoe_order_top_list) { + if (unlikely(it->dqoe_bottom_tq == bottom_tq)) { // someone else validated it at the same time // go away quickly - _dispatch_unfair_lock_unlock(&top_tq->diq_order_top_head_lock); + _dispatch_unfair_lock_unlock(&ttqic->dqic_order_top_head_lock); free(e); return; } } - TAILQ_INSERT_HEAD(&top_tq->diq_order_top_head, e, dqoe_order_top_list); - _dispatch_unfair_lock_unlock(&top_tq->diq_order_top_head_lock); + LIST_INSERT_HEAD(&ttqic->dqic_order_top_head, e, dqoe_order_top_list); + _dispatch_unfair_lock_unlock(&ttqic->dqic_order_top_head_lock); - _dispatch_unfair_lock_lock(&bottom_tq->diq_order_bottom_head_lock); - TAILQ_INSERT_HEAD(&bottom_tq->diq_order_bottom_head, e, dqoe_order_bottom_list); - _dispatch_unfair_lock_unlock(&bottom_tq->diq_order_bottom_head_lock); + _dispatch_unfair_lock_lock(&btqic->dqic_order_bottom_head_lock); + LIST_INSERT_HEAD(&btqic->dqic_order_bottom_head, e, dqoe_order_bottom_list); + _dispatch_unfair_lock_unlock(&btqic->dqic_order_bottom_head_lock); } void @@ -891,8 +1176,9 @@ _dispatch_introspection_target_queue_changed(dispatch_queue_t dq) [2] = "a recipient", [3] = "both an initiator and a recipient" }; - bool as_top = !TAILQ_EMPTY(&dq->diq_order_top_head); - bool as_bottom = !TAILQ_EMPTY(&dq->diq_order_top_head); + dispatch_queue_introspection_context_t dqic = dq->do_finalizer; + bool as_top = !LIST_EMPTY(&dqic->dqic_order_top_head); + bool as_bottom = !LIST_EMPTY(&dqic->dqic_order_top_head); if (as_top || as_bottom) { _dispatch_log( @@ -903,7 +1189,7 @@ _dispatch_introspection_target_queue_changed(dispatch_queue_t dq) "a dispatch_sync", dq, dq->dq_label ?: "", reasons[(int)as_top + 2 * (int)as_bottom]); _dispatch_unfair_lock_lock(&_dispatch_introspection.queues_lock); - _dispatch_introspection_queue_order_dispose(dq); + _dispatch_introspection_queue_order_dispose(dq->do_finalizer); _dispatch_unfair_lock_unlock(&_dispatch_introspection.queues_lock); } } diff --git a/src/introspection_internal.h b/src/introspection_internal.h index e2fa6d18b..d4459da63 100644 --- a/src/introspection_internal.h +++ b/src/introspection_internal.h @@ -27,20 +27,42 @@ #ifndef __DISPATCH_INTROSPECTION_INTERNAL__ #define __DISPATCH_INTROSPECTION_INTERNAL__ +/* keep in sync with introspection_private.h */ +enum dispatch_introspection_runtime_event { + dispatch_introspection_runtime_event_worker_event_delivery = 1, + dispatch_introspection_runtime_event_worker_unpark = 2, + dispatch_introspection_runtime_event_worker_request = 3, + dispatch_introspection_runtime_event_worker_park = 4, + + dispatch_introspection_runtime_event_sync_wait = 10, + dispatch_introspection_runtime_event_async_sync_handoff = 11, + dispatch_introspection_runtime_event_sync_sync_handoff = 12, + dispatch_introspection_runtime_event_sync_async_handoff = 13, +}; + #if DISPATCH_INTROSPECTION -#define DISPATCH_INTROSPECTION_QUEUE_HEADER \ - TAILQ_ENTRY(dispatch_queue_s) diq_list; \ - dispatch_unfair_lock_s diq_order_top_head_lock; \ - dispatch_unfair_lock_s diq_order_bottom_head_lock; \ - TAILQ_HEAD(, dispatch_queue_order_entry_s) diq_order_top_head; \ - TAILQ_HEAD(, dispatch_queue_order_entry_s) diq_order_bottom_head -#define DISPATCH_INTROSPECTION_QUEUE_HEADER_SIZE \ - sizeof(struct { DISPATCH_INTROSPECTION_QUEUE_HEADER; }) +#define DC_BARRIER 0x1 +#define DC_SYNC 0x2 +#define DC_APPLY 0x4 + +typedef struct dispatch_queue_introspection_context_s { + dispatch_queue_class_t dqic_queue; + dispatch_function_t dqic_finalizer; + LIST_ENTRY(dispatch_queue_introspection_context_s) dqic_list; + + char __dqic_no_queue_inversion[0]; + + // used for queue inversion debugging only + dispatch_unfair_lock_s dqic_order_top_head_lock; + dispatch_unfair_lock_s dqic_order_bottom_head_lock; + LIST_HEAD(, dispatch_queue_order_entry_s) dqic_order_top_head; + LIST_HEAD(, dispatch_queue_order_entry_s) dqic_order_bottom_head; +} *dispatch_queue_introspection_context_t; struct dispatch_introspection_state_s { - TAILQ_HEAD(, dispatch_introspection_thread_s) threads; - TAILQ_HEAD(, dispatch_queue_s) queues; + LIST_HEAD(, dispatch_introspection_thread_s) threads; + LIST_HEAD(, dispatch_queue_introspection_context_s) queues; dispatch_unfair_lock_s threads_lock; dispatch_unfair_lock_s queues_lock; @@ -54,89 +76,121 @@ extern struct dispatch_introspection_state_s _dispatch_introspection; void _dispatch_introspection_init(void); void _dispatch_introspection_thread_add(void); -dispatch_queue_t _dispatch_introspection_queue_create(dispatch_queue_t dq); -void _dispatch_introspection_queue_dispose(dispatch_queue_t dq); -void _dispatch_introspection_queue_item_enqueue(dispatch_queue_t dq, +dispatch_function_t _dispatch_object_finalizer(dispatch_object_t dou); +void _dispatch_object_set_finalizer(dispatch_object_t dou, + dispatch_function_t finalizer); +dispatch_queue_class_t _dispatch_introspection_queue_create( + dispatch_queue_class_t dqu); +void _dispatch_introspection_queue_dispose(dispatch_queue_class_t dqu); +void _dispatch_introspection_queue_item_enqueue(dispatch_queue_class_t dqu, dispatch_object_t dou); -void _dispatch_introspection_queue_item_dequeue(dispatch_queue_t dq, +void _dispatch_introspection_queue_item_dequeue(dispatch_queue_class_t dqu, dispatch_object_t dou); void _dispatch_introspection_queue_item_complete(dispatch_object_t dou); void _dispatch_introspection_callout_entry(void *ctxt, dispatch_function_t f); void _dispatch_introspection_callout_return(void *ctxt, dispatch_function_t f); +struct dispatch_object_s *_dispatch_introspection_queue_fake_sync_push_pop( + dispatch_queue_t dq, void *ctxt, dispatch_function_t func, + uintptr_t dc_flags); +void _dispatch_introspection_runtime_event( + enum dispatch_introspection_runtime_event event, + void *ptr, uint64_t value); #if DISPATCH_PURE_C -static dispatch_queue_t _dispatch_queue_get_current(void); - DISPATCH_ALWAYS_INLINE static inline void -_dispatch_introspection_queue_push_list(dispatch_queue_t dq, +_dispatch_introspection_queue_push_list(dispatch_queue_class_t dqu, dispatch_object_t head, dispatch_object_t tail) { struct dispatch_object_s *dou = head._do; do { - _dispatch_introspection_queue_item_enqueue(dq, dou); + _dispatch_introspection_queue_item_enqueue(dqu, dou); } while (dou != tail._do && (dou = dou->do_next)); }; DISPATCH_ALWAYS_INLINE static inline void -_dispatch_introspection_queue_push(dispatch_queue_t dq, dispatch_object_t dou) { - _dispatch_introspection_queue_item_enqueue(dq, dou); -}; +_dispatch_introspection_queue_push(dispatch_queue_class_t dqu, + dispatch_object_t dou) +{ + _dispatch_introspection_queue_item_enqueue(dqu, dou); +} DISPATCH_ALWAYS_INLINE static inline void -_dispatch_introspection_queue_pop(dispatch_queue_t dq, dispatch_object_t dou) { - _dispatch_introspection_queue_item_dequeue(dq, dou); -}; +_dispatch_introspection_queue_pop(dispatch_queue_class_t dqu, + dispatch_object_t dou) +{ + _dispatch_introspection_queue_item_dequeue(dqu, dou); +} void -_dispatch_introspection_order_record(dispatch_queue_t top_q, - dispatch_queue_t bottom_q); +_dispatch_introspection_order_record(dispatch_queue_t top_q); void _dispatch_introspection_target_queue_changed(dispatch_queue_t dq); DISPATCH_ALWAYS_INLINE static inline void -_dispatch_introspection_sync_begin(dispatch_queue_t dq) +_dispatch_introspection_sync_begin(dispatch_queue_class_t dq) { if (!_dispatch_introspection.debug_queue_inversions) return; - _dispatch_introspection_order_record(dq, _dispatch_queue_get_current()); + _dispatch_introspection_order_record(dq._dq); } #endif // DISPATCH_PURE_C #else // DISPATCH_INTROSPECTION -#define DISPATCH_INTROSPECTION_QUEUE_HEADER -#define DISPATCH_INTROSPECTION_QUEUE_HEADER_SIZE 0 - #define _dispatch_introspection_init() #define _dispatch_introspection_thread_add() DISPATCH_ALWAYS_INLINE -static inline dispatch_queue_t -_dispatch_introspection_queue_create(dispatch_queue_t dq) { return dq; } +static inline dispatch_queue_class_t +_dispatch_introspection_queue_create(dispatch_queue_class_t dqu) +{ + return dqu; +} + +#if DISPATCH_PURE_C + +DISPATCH_ALWAYS_INLINE +static inline dispatch_function_t +_dispatch_object_finalizer(dispatch_object_t dou) +{ + return dou._do->do_finalizer; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_object_set_finalizer(dispatch_object_t dou, + dispatch_function_t finalizer) +{ + dou._do->do_finalizer = finalizer; +} + +#endif // DISPATCH_PURE_C DISPATCH_ALWAYS_INLINE static inline void -_dispatch_introspection_queue_dispose(dispatch_queue_t dq) { (void)dq; } +_dispatch_introspection_queue_dispose( + dispatch_queue_class_t dqu DISPATCH_UNUSED) {} DISPATCH_ALWAYS_INLINE static inline void -_dispatch_introspection_queue_push_list(dispatch_queue_t dq DISPATCH_UNUSED, +_dispatch_introspection_queue_push_list( + dispatch_queue_class_t dqu DISPATCH_UNUSED, dispatch_object_t head DISPATCH_UNUSED, dispatch_object_t tail DISPATCH_UNUSED) {} DISPATCH_ALWAYS_INLINE static inline void -_dispatch_introspection_queue_push(dispatch_queue_t dq DISPATCH_UNUSED, +_dispatch_introspection_queue_push(dispatch_queue_class_t dqu DISPATCH_UNUSED, dispatch_object_t dou DISPATCH_UNUSED) {} DISPATCH_ALWAYS_INLINE static inline void -_dispatch_introspection_queue_pop(dispatch_queue_t dq DISPATCH_UNUSED, +_dispatch_introspection_queue_pop(dispatch_queue_class_t dqu DISPATCH_UNUSED, dispatch_object_t dou DISPATCH_UNUSED) {} DISPATCH_ALWAYS_INLINE @@ -161,7 +215,21 @@ _dispatch_introspection_target_queue_changed( DISPATCH_ALWAYS_INLINE static inline void -_dispatch_introspection_sync_begin(dispatch_queue_t dq DISPATCH_UNUSED) {} +_dispatch_introspection_sync_begin( + dispatch_queue_class_t dq DISPATCH_UNUSED) {} + +DISPATCH_ALWAYS_INLINE +static inline struct dispatch_object_s * +_dispatch_introspection_queue_fake_sync_push_pop( + dispatch_queue_t dq DISPATCH_UNUSED, + void *ctxt DISPATCH_UNUSED, dispatch_function_t func DISPATCH_UNUSED, + uintptr_t dc_flags DISPATCH_UNUSED) { return NULL; } + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_introspection_runtime_event( + enum dispatch_introspection_runtime_event event DISPATCH_UNUSED, + void *ptr DISPATCH_UNUSED, uint64_t value DISPATCH_UNUSED) {} #endif // DISPATCH_INTROSPECTION diff --git a/src/io.c b/src/io.c index 155b6cf02..dea62a685 100644 --- a/src/io.c +++ b/src/io.c @@ -122,7 +122,7 @@ enum { #if DISPATCH_IO_DEBUG #if !DISPATCH_DEBUG #define _dispatch_io_log(x, ...) do { \ - _dispatch_log("%llu\t%p\t" x, _dispatch_absolute_time(), \ + _dispatch_log("%llu\t%p\t" x, _dispatch_uptime(), \ (void *)_dispatch_thread_self(), ##__VA_ARGS__); \ } while (0) #ifdef _dispatch_object_debug @@ -151,39 +151,28 @@ enum { #pragma mark - #pragma mark dispatch_io_hashtables +LIST_HEAD(dispatch_disk_head_s, dispatch_disk_s); +LIST_HEAD(dispatch_fd_entry_head_s, dispatch_fd_entry_s); + // Global hashtable of dev_t -> disk_s mappings -DISPATCH_CACHELINE_ALIGN -static TAILQ_HEAD(, dispatch_disk_s) _dispatch_io_devs[DIO_HASH_SIZE]; +DISPATCH_STATIC_GLOBAL(struct dispatch_disk_head_s _dispatch_io_devs[DIO_HASH_SIZE]); +DISPATCH_STATIC_GLOBAL(dispatch_queue_t _dispatch_io_devs_lockq); + // Global hashtable of fd -> fd_entry_s mappings -DISPATCH_CACHELINE_ALIGN -static TAILQ_HEAD(, dispatch_fd_entry_s) _dispatch_io_fds[DIO_HASH_SIZE]; +DISPATCH_STATIC_GLOBAL(struct dispatch_fd_entry_head_s _dispatch_io_fds[DIO_HASH_SIZE]); +DISPATCH_STATIC_GLOBAL(dispatch_queue_t _dispatch_io_fds_lockq); -static dispatch_once_t _dispatch_io_devs_lockq_pred; -static dispatch_queue_t _dispatch_io_devs_lockq; -static dispatch_queue_t _dispatch_io_fds_lockq; +DISPATCH_STATIC_GLOBAL(dispatch_once_t _dispatch_io_init_pred); static char const * const _dispatch_io_key = "io"; static void -_dispatch_io_fds_lockq_init(void *context DISPATCH_UNUSED) +_dispatch_io_queues_init(void *context DISPATCH_UNUSED) { _dispatch_io_fds_lockq = dispatch_queue_create( "com.apple.libdispatch-io.fd_lockq", NULL); - unsigned int i; - for (i = 0; i < DIO_HASH_SIZE; i++) { - TAILQ_INIT(&_dispatch_io_fds[i]); - } -} - -static void -_dispatch_io_devs_lockq_init(void *context DISPATCH_UNUSED) -{ _dispatch_io_devs_lockq = dispatch_queue_create( "com.apple.libdispatch-io.dev_lockq", NULL); - unsigned int i; - for (i = 0; i < DIO_HASH_SIZE; i++) { - TAILQ_INIT(&_dispatch_io_devs[i]); - } } #pragma mark - @@ -196,14 +185,16 @@ enum { DISPATCH_IOCNTL_MAX_PENDING_IO_REQS, }; -static struct dispatch_io_defaults_s { +extern struct dispatch_io_defaults_s { size_t chunk_size, low_water_chunks, max_pending_io_reqs; bool initial_delivery; -} dispatch_io_defaults = { +} dispatch_io_defaults; + +DISPATCH_GLOBAL_INIT(struct dispatch_io_defaults_s dispatch_io_defaults, { .chunk_size = DIO_MAX_CHUNK_SIZE, .low_water_chunks = DIO_DEFAULT_LOW_WATER_CHUNKS, .max_pending_io_reqs = DIO_MAX_PENDING_IO_REQS, -}; +}); #define _dispatch_iocntl_set_default(p, v) do { \ dispatch_io_defaults.p = (typeof(dispatch_io_defaults.p))(v); \ @@ -221,6 +212,7 @@ _dispatch_iocntl(uint32_t param, uint64_t value) break; case DISPATCH_IOCNTL_INITIAL_DELIVERY: _dispatch_iocntl_set_default(initial_delivery, value); + break; case DISPATCH_IOCNTL_MAX_PENDING_IO_REQS: _dispatch_iocntl_set_default(max_pending_io_reqs, value); break; @@ -236,7 +228,7 @@ _dispatch_io_create(dispatch_io_type_t type) dispatch_io_t channel = _dispatch_object_alloc(DISPATCH_VTABLE(io), sizeof(struct dispatch_io_s)); channel->do_next = DISPATCH_OBJECT_LISTLESS; - channel->do_targetq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, true); + channel->do_targetq = _dispatch_get_default_queue(true); channel->params.type = type; channel->params.high = SIZE_MAX; channel->params.low = dispatch_io_defaults.low_water_chunks * @@ -457,8 +449,8 @@ dispatch_io_create_with_path(dispatch_io_type_t type, const char *path, return; } dispatch_suspend(channel->queue); - dispatch_once_f(&_dispatch_io_devs_lockq_pred, NULL, - _dispatch_io_devs_lockq_init); + dispatch_once_f(&_dispatch_io_init_pred, NULL, + _dispatch_io_queues_init); dispatch_async(_dispatch_io_devs_lockq, ^{ dispatch_fd_entry_t fd_entry = _dispatch_fd_entry_create_with_path( path_data, st.st_dev, st.st_mode); @@ -697,7 +689,7 @@ _dispatch_io_stop(dispatch_io_t channel) channel); dispatch_fd_entry_t fdi; uintptr_t hash = DIO_HASH(channel->fd); - TAILQ_FOREACH(fdi, &_dispatch_io_fds[hash], fd_list) { + LIST_FOREACH(fdi, &_dispatch_io_fds[hash], fd_list) { if (fdi->fd == channel->fd) { _dispatch_fd_entry_cleanup_operations(fdi, channel); break; @@ -892,7 +884,7 @@ dispatch_read(dispatch_fd_t fd, size_t length, dispatch_queue_t queue, dispatch_operation_t op = _dispatch_operation_create(DOP_DIR_READ, channel, 0, length, dispatch_data_empty, - _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false), + _dispatch_get_default_queue(false), ^(bool done, dispatch_data_t data, int error) { if (data) { data = dispatch_data_create_concat(deliver_data, data); @@ -963,7 +955,7 @@ dispatch_write(dispatch_fd_t fd, dispatch_data_t data, dispatch_queue_t queue, dispatch_operation_t op = _dispatch_operation_create(DOP_DIR_WRITE, channel, 0, dispatch_data_get_size(data), data, - _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false), + _dispatch_get_default_queue(false), ^(bool done, dispatch_data_t d, int error) { if (done) { if (d) { @@ -1006,6 +998,7 @@ _dispatch_operation_create(dispatch_op_direction_t direction, if (err || !length) { _dispatch_io_data_retain(data); _dispatch_retain(queue); + _dispatch_retain(channel); dispatch_async(channel->barrier_queue, ^{ dispatch_async(queue, ^{ dispatch_data_t d = data; @@ -1017,6 +1010,7 @@ _dispatch_operation_create(dispatch_op_direction_t direction, _dispatch_channel_debug("IO handler invoke: err %d", channel, err); handler(true, d, err); + _dispatch_release(channel); _dispatch_io_data_release(data); }); _dispatch_release(queue); @@ -1041,7 +1035,7 @@ _dispatch_operation_create(dispatch_op_direction_t direction, // Take a snapshot of the priority of the channel queue. The actual I/O // for this operation will be performed at this priority dispatch_queue_t targetq = op->channel->do_targetq; - while (fastpath(targetq->do_targetq)) { + while (targetq->do_targetq) { targetq = targetq->do_targetq; } op->do_targetq = targetq; @@ -1275,14 +1269,13 @@ static void _dispatch_fd_entry_init_async(dispatch_fd_t fd, dispatch_fd_entry_init_callback_t completion_callback) { - static dispatch_once_t _dispatch_io_fds_lockq_pred; - dispatch_once_f(&_dispatch_io_fds_lockq_pred, NULL, - _dispatch_io_fds_lockq_init); + dispatch_once_f(&_dispatch_io_init_pred, NULL, + _dispatch_io_queues_init); dispatch_async(_dispatch_io_fds_lockq, ^{ dispatch_fd_entry_t fd_entry = NULL; // Check to see if there is an existing entry for the given fd uintptr_t hash = DIO_HASH(fd); - TAILQ_FOREACH(fd_entry, &_dispatch_io_fds[hash], fd_list) { + LIST_FOREACH(fd_entry, &_dispatch_io_fds[hash], fd_list) { if (fd_entry->fd == fd) { // Retain the fd_entry to ensure it cannot go away until the // stat() has completed @@ -1326,7 +1319,7 @@ _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash) _dispatch_io_fds_lockq); _dispatch_fd_entry_debug("create: fd %d", fd_entry, fd); fd_entry->fd = fd; - TAILQ_INSERT_TAIL(&_dispatch_io_fds[hash], fd_entry, fd_list); + LIST_INSERT_HEAD(&_dispatch_io_fds[hash], fd_entry, fd_list); fd_entry->barrier_queue = dispatch_queue_create( "com.apple.libdispatch-io.barrierq", NULL); fd_entry->barrier_group = dispatch_group_create(); @@ -1376,8 +1369,8 @@ _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash) // We have to get the disk on the global dev queue. The // barrier queue cannot continue until that is complete dispatch_suspend(fd_entry->barrier_queue); - dispatch_once_f(&_dispatch_io_devs_lockq_pred, NULL, - _dispatch_io_devs_lockq_init); + dispatch_once_f(&_dispatch_io_init_pred, NULL, + _dispatch_io_queues_init); dispatch_async(_dispatch_io_devs_lockq, ^{ _dispatch_disk_init(fd_entry, dev); dispatch_resume(fd_entry->barrier_queue); @@ -1394,7 +1387,7 @@ _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash) } _dispatch_stream_init(fd_entry, - _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false)); + _dispatch_get_default_queue(false)); } fd_entry->orig_flags = orig_flags; fd_entry->orig_nosigpipe = orig_nosigpipe; @@ -1416,7 +1409,7 @@ _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash) }); } // Remove this entry from the global fd list - TAILQ_REMOVE(&_dispatch_io_fds[hash], fd_entry, fd_list); + LIST_REMOVE(fd_entry, fd_list); }); // If there was a source associated with this stream, disposing of the // source cancels it and suspends the close queue. Freeing the fd_entry @@ -1462,7 +1455,7 @@ _dispatch_fd_entry_create_with_path(dispatch_io_path_data_t path_data, _dispatch_disk_init(fd_entry, major(dev)); } else { _dispatch_stream_init(fd_entry, - _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false)); + _dispatch_get_default_queue(false)); } fd_entry->fd = -1; fd_entry->orig_flags = -1; @@ -1625,7 +1618,7 @@ _dispatch_disk_init(dispatch_fd_entry_t fd_entry, dev_t dev) dispatch_disk_t disk; // Check to see if there is an existing entry for the given device uintptr_t hash = DIO_HASH(dev); - TAILQ_FOREACH(disk, &_dispatch_io_devs[hash], disk_list) { + LIST_FOREACH(disk, &_dispatch_io_devs[hash], disk_list) { if (disk->dev == dev) { _dispatch_retain(disk); goto out; @@ -1639,7 +1632,7 @@ _dispatch_disk_init(dispatch_fd_entry_t fd_entry, dev_t dev) disk->do_next = DISPATCH_OBJECT_LISTLESS; disk->do_xref_cnt = -1; disk->advise_list_depth = pending_reqs_depth; - disk->do_targetq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false); + disk->do_targetq = _dispatch_get_default_queue(false); disk->dev = dev; TAILQ_INIT(&disk->operations); disk->cur_rq = TAILQ_FIRST(&disk->operations); @@ -1647,7 +1640,7 @@ _dispatch_disk_init(dispatch_fd_entry_t fd_entry, dev_t dev) snprintf(label, sizeof(label), "com.apple.libdispatch-io.deviceq.%d", (int)dev); disk->pick_queue = dispatch_queue_create(label, NULL); - TAILQ_INSERT_TAIL(&_dispatch_io_devs[hash], disk, disk_list); + LIST_INSERT_HEAD(&_dispatch_io_devs[hash], disk, disk_list); out: fd_entry->disk = disk; TAILQ_INIT(&fd_entry->stream_ops); @@ -1656,11 +1649,10 @@ _dispatch_disk_init(dispatch_fd_entry_t fd_entry, dev_t dev) void _dispatch_disk_dispose(dispatch_disk_t disk, DISPATCH_UNUSED bool *allow_free) { - uintptr_t hash = DIO_HASH(disk->dev); - TAILQ_REMOVE(&_dispatch_io_devs[hash], disk, disk_list); + LIST_REMOVE(disk, disk_list); dispatch_assert(TAILQ_EMPTY(&disk->operations)); size_t i; - for (i=0; iadvise_list_depth; ++i) { + for (i = 0; i < disk->advise_list_depth; ++i) { dispatch_assert(!disk->advise_list[i]); } dispatch_release(disk->pick_queue); @@ -2092,7 +2084,7 @@ _dispatch_disk_perform(void *ctxt) op = disk->advise_list[disk->req_idx]; int result = _dispatch_operation_perform(op); disk->advise_list[disk->req_idx] = NULL; - disk->req_idx = (++disk->req_idx)%disk->advise_list_depth; + disk->req_idx = (disk->req_idx + 1) % disk->advise_list_depth; _dispatch_op_debug("async perform completion: disk %p", op, disk); dispatch_async(disk->pick_queue, ^{ _dispatch_op_debug("perform completion", op); @@ -2446,7 +2438,7 @@ _dispatch_io_debug(dispatch_io_t channel, char* buf, size_t bufsiz) { size_t offset = 0; offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", - dx_kind(channel), channel); + _dispatch_object_class_name(channel), channel); offset += _dispatch_object_debug_attr(channel, &buf[offset], bufsiz - offset); offset += _dispatch_io_debug_attr(channel, &buf[offset], bufsiz - offset); @@ -2481,7 +2473,7 @@ _dispatch_operation_debug(dispatch_operation_t op, char* buf, size_t bufsiz) { size_t offset = 0; offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", - dx_kind(op), op); + _dispatch_object_class_name(op), op); offset += _dispatch_object_debug_attr(op, &buf[offset], bufsiz - offset); offset += _dispatch_operation_debug_attr(op, &buf[offset], bufsiz - offset); offset += dsnprintf(&buf[offset], bufsiz - offset, "}"); diff --git a/src/io_internal.h b/src/io_internal.h index 672727fae..15a96eb84 100644 --- a/src/io_internal.h +++ b/src/io_internal.h @@ -34,7 +34,7 @@ #define _DISPATCH_IO_LABEL_SIZE 16 -#if TARGET_OS_EMBEDDED // rdar://problem/9032036 +#if TARGET_OS_IPHONE // rdar://problem/9032036 #define DIO_MAX_CHUNK_SIZE (512u * 1024) #define DIO_HASH_SIZE 64u // must be a power of two #else @@ -66,8 +66,8 @@ typedef unsigned int dispatch_op_flags_t; #define DIO_CLOSED 1u // channel has been closed #define DIO_STOPPED 2u // channel has been stopped (implies closed) -DISPATCH_INTERNAL_CLASS_DECL(operation); -DISPATCH_INTERNAL_CLASS_DECL(disk); +DISPATCH_INTERNAL_CLASS_DECL(operation, OBJECT); +DISPATCH_INTERNAL_CLASS_DECL(disk, OBJECT); struct dispatch_stream_s { dispatch_queue_t dq; @@ -105,7 +105,7 @@ struct dispatch_disk_s { size_t advise_idx; dev_t dev; bool io_active; - TAILQ_ENTRY(dispatch_disk_s) disk_list; + LIST_ENTRY(dispatch_disk_s) disk_list; size_t advise_list_depth; dispatch_operation_t advise_list[]; }; @@ -127,7 +127,7 @@ struct dispatch_fd_entry_s { dispatch_group_t barrier_group; dispatch_io_t convenience_channel; TAILQ_HEAD(, dispatch_operation_s) stream_ops; - TAILQ_ENTRY(dispatch_fd_entry_s) fd_list; + LIST_ENTRY(dispatch_fd_entry_s) fd_list; }; typedef struct dispatch_fd_entry_s *dispatch_fd_entry_t; @@ -163,7 +163,7 @@ struct dispatch_operation_s { TAILQ_ENTRY(dispatch_operation_s) stream_list; }; -DISPATCH_CLASS_DECL(io); +DISPATCH_CLASS_DECL(io, OBJECT); struct dispatch_io_s { DISPATCH_OBJECT_HEADER(io); dispatch_queue_t queue, barrier_queue; diff --git a/src/libdispatch.codes b/src/libdispatch.codes index 0ecc3331f..855c2ef66 100644 --- a/src/libdispatch.codes +++ b/src/libdispatch.codes @@ -7,13 +7,36 @@ 0x2e010018 DISPATCH_VOUCHER_activity_adopt 0x2e020004 DISPATCH_PERF_non_leaf_retarget -0x2e020008 DISPATCH_PERF_post_activate_mutation +0x2e020008 DISPATCH_PERF_post_activate_retarget 0x2e02000c DISPATCH_PERF_post_activate_mutation 0x2e020010 DISPATCH_PERF_delayed_registration 0x2e020014 DISPATCH_PERF_mutable_target 0x2e020018 DISPATCH_PERF_strict_bg_timer +0x2e02001c DISPATCH_PERF_suspended_timer_fire +0x2e020020 DISPATCH_PERF_handlerless_source_fire +0x2e020024 DISPATCH_PERF_source_registration_without_qos 0x2e030004 DISPATCH_MACH_MSG_hdr_move 0x2e040004 DISPATCH_PERF_MON_worker_thread 0x2e040008 DISPATCH_PERF_MON_worker_useless + +0x2e050004 DISPATCH_QOS_TRACE_queue_creation +0x2e050008 DISPATCH_QOS_TRACE_queue_dispose +0x2e05000c DISPATCH_QOS_TRACE_block_creation +0x2e050010 DISPATCH_QOS_TRACE_block_dispose +0x2e050014 DISPATCH_QOS_TRACE_cont_push_eb +0x2e050018 DISPATCH_QOS_TRACE_cont_push_ab +0x2e05001c DISPATCH_QOS_TRACE_cont_push_f +0x2e050020 DISPATCH_QOS_TRACE_source_push +0x2e050024 DISPATCH_QOS_TRACE_cont_pop +0x2e050028 DISPATCH_QOS_TRACE_source_pop +0x2e05002c DISPATCH_QOS_TRACE_queue_item_done +0x2e050030 DISPATCH_QOS_TRACE_source_callout +0x2e050034 DISPATCH_QOS_TRACE_source_dispose + +0x2e060004 DISPATCH_FIREHOSE_TRACE_reserver_gave_up +0x2e060008 DISPATCH_FIREHOSE_TRACE_reserver_wait +0x2e06000c DISPATCH_FIREHOSE_TRACE_allocator +0x2e060010 DISPATCH_FIREHOSE_TRACE_wait_for_logd +0x2e060014 DISPATCH_FIREHOSE_TRACE_chunk_install diff --git a/src/libdispatch.plist b/src/libdispatch.plist new file mode 100644 index 000000000..e05149258 --- /dev/null +++ b/src/libdispatch.plist @@ -0,0 +1,99 @@ + + + + + + Name + libdispatch + Children + + + Name + Significant Problems (should fix) + Children + + + Name + Non Leaf Retarget + Type + Impulse + KTraceCode + 0x2e020004 + + + Name + Retarget after Activation + Type + Impulse + KTraceCode + 0x2e020008 + + + Name + Mutation after Activation + Type + Impulse + KTraceCode + 0x2e02000c + + + Name + Source Firing Without a Handler + Type + Impulse + KTraceCode + 0x2e020020 + + + + + Name + Performance Problems + Children + + + Name + Delayed Source Registration + Type + Impulse + KTraceCode + 0x2e020010 + + + Name + Mutable target queue during traversal + Type + Impulse + KTraceCode + 0x2e020014 + + + Name + Timer with Strict + Background + Type + Impulse + KTraceCode + 0x2e020018 + + + Name + Suspended timer firing + Type + Impulse + KTraceCode + 0x2e02001c + + + Name + Source registration falling back to QOS_CLASS_DEFAULT + Type + Impulse + KTraceCode + 0x2e020024 + + + + + + + diff --git a/src/mach.c b/src/mach.c index 699492da0..54da00be4 100644 --- a/src/mach.c +++ b/src/mach.c @@ -24,20 +24,19 @@ #define DISPATCH_MACH_RETURN_IMMEDIATE_SEND_RESULT 0x1 #define DISPATCH_MACH_REGISTER_FOR_REPLY 0x2 #define DISPATCH_MACH_WAIT_FOR_REPLY 0x4 -#define DISPATCH_MACH_OWNED_REPLY_PORT 0x8 -#define DISPATCH_MACH_ASYNC_REPLY 0x10 #define DISPATCH_MACH_OPTIONS_MASK 0xffff #define DM_SEND_STATUS_SUCCESS 0x1 #define DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT 0x2 +#define DM_CHECKIN_CANCELED ((dispatch_mach_msg_t)~0ul) + DISPATCH_ENUM(dispatch_mach_send_invoke_flags, uint32_t, DM_SEND_INVOKE_NONE = 0x0, DM_SEND_INVOKE_MAKE_DIRTY = 0x1, DM_SEND_INVOKE_NEEDS_BARRIER = 0x2, - DM_SEND_INVOKE_CANCEL = 0x4, - DM_SEND_INVOKE_CAN_RUN_BARRIER = 0x8, - DM_SEND_INVOKE_IMMEDIATE_SEND = 0x10, + DM_SEND_INVOKE_CAN_RUN_BARRIER = 0x4, + DM_SEND_INVOKE_IMMEDIATE_SEND = 0x8, ); #define DM_SEND_INVOKE_IMMEDIATE_SEND_MASK \ ((dispatch_mach_send_invoke_flags_t)DM_SEND_INVOKE_IMMEDIATE_SEND) @@ -48,7 +47,7 @@ static mach_port_t _dispatch_mach_msg_get_reply_port(dispatch_object_t dou); static void _dispatch_mach_msg_disconnected(dispatch_mach_t dm, mach_port_t local_port, mach_port_t remote_port); static inline void _dispatch_mach_msg_reply_received(dispatch_mach_t dm, - dispatch_mach_reply_refs_t dmr, mach_port_t local_port); + dispatch_mach_reply_wait_refs_t dwr, mach_port_t local_port); static dispatch_mach_msg_t _dispatch_mach_msg_create_reply_disconnected( dispatch_object_t dou, dispatch_mach_reply_refs_t dmr, dispatch_mach_reason_t reason); @@ -58,11 +57,10 @@ static inline mach_msg_header_t* _dispatch_mach_msg_get_msg( dispatch_mach_msg_t dmsg); static void _dispatch_mach_send_push(dispatch_mach_t dm, dispatch_object_t dou, dispatch_qos_t qos); -static void _dispatch_mach_cancel(dispatch_mach_t dm); static void _dispatch_mach_push_send_barrier_drain(dispatch_mach_t dm, dispatch_qos_t qos); static void _dispatch_mach_handle_or_push_received_msg(dispatch_mach_t dm, - dispatch_mach_msg_t dmsg); + dispatch_mach_msg_t dmsg, pthread_priority_t pp); static void _dispatch_mach_push_async_reply_msg(dispatch_mach_t dm, dispatch_mach_msg_t dmsg, dispatch_queue_t drq); static dispatch_queue_t _dispatch_mach_msg_context_async_reply_queue( @@ -76,79 +74,16 @@ static void _dispatch_mach_notification_kevent_register(dispatch_mach_t dm, // For tests only. DISPATCH_EXPORT void _dispatch_mach_hooks_install_default(void); -dispatch_source_t -_dispatch_source_create_mach_msg_direct_recv(mach_port_t recvp, - const struct dispatch_continuation_s *dc) -{ - dispatch_source_t ds; - ds = dispatch_source_create(&_dispatch_source_type_mach_recv_direct, - recvp, 0, &_dispatch_mgr_q); - os_atomic_store(&ds->ds_refs->ds_handler[DS_EVENT_HANDLER], - (dispatch_continuation_t)dc, relaxed); - return ds; -} - #pragma mark - #pragma mark dispatch to XPC callbacks -static dispatch_mach_xpc_hooks_t _dispatch_mach_xpc_hooks; - -// Default dmxh_direct_message_handler callback that does not handle -// messages inline. -static bool -_dispatch_mach_xpc_no_handle_message( - void *_Nullable context DISPATCH_UNUSED, - dispatch_mach_reason_t reason DISPATCH_UNUSED, - dispatch_mach_msg_t message DISPATCH_UNUSED, - mach_error_t error DISPATCH_UNUSED) -{ - return false; -} - -// Default dmxh_msg_context_reply_queue callback that returns a NULL queue. -static dispatch_queue_t -_dispatch_mach_msg_context_no_async_reply_queue( - void *_Nonnull msg_context DISPATCH_UNUSED) -{ - return NULL; -} - -// Default dmxh_async_reply_handler callback that crashes when called. -DISPATCH_NORETURN -static void -_dispatch_mach_default_async_reply_handler(void *context DISPATCH_UNUSED, - dispatch_mach_reason_t reason DISPATCH_UNUSED, - dispatch_mach_msg_t message DISPATCH_UNUSED) -{ - DISPATCH_CLIENT_CRASH(_dispatch_mach_xpc_hooks, - "_dispatch_mach_default_async_reply_handler called"); -} - -// Default dmxh_enable_sigterm_notification callback that enables delivery of -// SIGTERM notifications (for backwards compatibility). -static bool -_dispatch_mach_enable_sigterm(void *_Nullable context DISPATCH_UNUSED) -{ - return true; -} - -// Callbacks from dispatch to XPC. The default is to not support any callbacks. -static const struct dispatch_mach_xpc_hooks_s _dispatch_mach_xpc_hooks_default - = { - .version = DISPATCH_MACH_XPC_HOOKS_VERSION, - .dmxh_direct_message_handler = &_dispatch_mach_xpc_no_handle_message, - .dmxh_msg_context_reply_queue = - &_dispatch_mach_msg_context_no_async_reply_queue, - .dmxh_async_reply_handler = &_dispatch_mach_default_async_reply_handler, - .dmxh_enable_sigterm_notification = &_dispatch_mach_enable_sigterm, -}; - -static dispatch_mach_xpc_hooks_t _dispatch_mach_xpc_hooks - = &_dispatch_mach_xpc_hooks_default; - void dispatch_mach_hooks_install_4libxpc(dispatch_mach_xpc_hooks_t hooks) { + if (hooks->version < DISPATCH_MACH_XPC_MIN_HOOKS_VERSION) { + DISPATCH_CLIENT_CRASH(hooks, + "trying to install hooks with unsupported version"); + } if (!os_atomic_cmpxchg(&_dispatch_mach_xpc_hooks, &_dispatch_mach_xpc_hooks_default, hooks, relaxed)) { DISPATCH_CLIENT_CRASH(_dispatch_mach_xpc_hooks, @@ -174,13 +109,10 @@ _dispatch_mach_create(const char *label, dispatch_queue_t q, void *context, dispatch_mach_recv_refs_t dmrr; dispatch_mach_send_refs_t dmsr; dispatch_mach_t dm; - dm = _dispatch_object_alloc(DISPATCH_VTABLE(mach), - sizeof(struct dispatch_mach_s)); - _dispatch_queue_init(dm->_as_dq, DQF_LEGACY, 1, - DISPATCH_QUEUE_INACTIVE | DISPATCH_QUEUE_ROLE_INNER); + dm = _dispatch_queue_alloc(mach, DQF_MUTABLE, 1, + DISPATCH_QUEUE_INACTIVE | DISPATCH_QUEUE_ROLE_INNER)._dm; dm->dq_label = label; - dm->do_ref_cnt++; // the reference _dispatch_mach_cancel_invoke holds dm->dm_is_xpc = is_xpc; dmrr = dux_create(&_dispatch_mach_type_recv, 0, 0)._dmrr; @@ -196,8 +128,8 @@ _dispatch_mach_create(const char *label, dispatch_queue_t q, void *context, dmsr->du_owner_wref = _dispatch_ptr2wref(dm); dm->dm_send_refs = dmsr; - if (slowpath(!q)) { - q = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, true); + if (unlikely(!q)) { + q = _dispatch_get_default_queue(true); } else { _dispatch_retain(q); } @@ -242,7 +174,7 @@ _dispatch_mach_dispose(dispatch_mach_t dm, bool *allow_free) _dispatch_unote_dispose(dm->dm_xpc_term_refs); dm->dm_xpc_term_refs = NULL; } - _dispatch_queue_destroy(dm->_as_dq, allow_free); + _dispatch_lane_class_dispose(dm, allow_free); } void @@ -250,11 +182,9 @@ dispatch_mach_connect(dispatch_mach_t dm, mach_port_t receive, mach_port_t send, dispatch_mach_msg_t checkin) { dispatch_mach_send_refs_t dmsr = dm->dm_send_refs; - uint32_t disconnect_cnt; if (MACH_PORT_VALID(receive)) { dm->dm_recv_refs->du_ident = receive; - _dispatch_retain(dm); // the reference the manager queue holds } dmsr->dmsr_send = send; if (MACH_PORT_VALID(send)) { @@ -266,151 +196,153 @@ dispatch_mach_connect(dispatch_mach_t dm, mach_port_t receive, } dmsr->dmsr_checkin = checkin; } - dispatch_assert(DISPATCH_MACH_NEVER_CONNECTED - 1 == - DISPATCH_MACH_NEVER_INSTALLED); - disconnect_cnt = os_atomic_dec2o(dmsr, dmsr_disconnect_cnt, release); - if (unlikely(disconnect_cnt != DISPATCH_MACH_NEVER_INSTALLED)) { + + uint32_t disconnect_cnt = os_atomic_and_orig2o(dmsr, dmsr_disconnect_cnt, + ~DISPATCH_MACH_NEVER_CONNECTED, relaxed); + if (unlikely(!(disconnect_cnt & DISPATCH_MACH_NEVER_CONNECTED))) { DISPATCH_CLIENT_CRASH(disconnect_cnt, "Channel already connected"); } _dispatch_object_debug(dm, "%s", __func__); return dispatch_activate(dm); } +static inline void +_dispatch_mach_reply_list_insert(dispatch_mach_send_refs_t dmsr, + dispatch_mach_reply_refs_t dmr) +{ + _dispatch_unfair_lock_lock(&dmsr->dmsr_replies_lock); + dispatch_assert(!_LIST_IS_ENQUEUED(dmr, dmr_list)); + LIST_INSERT_HEAD(&dmsr->dmsr_replies, dmr, dmr_list); + _dispatch_unfair_lock_unlock(&dmsr->dmsr_replies_lock); +} + +static inline void +_dispatch_mach_reply_list_remove_locked(dispatch_mach_reply_refs_t dmr) +{ + dispatch_assert(_LIST_IS_ENQUEUED(dmr, dmr_list)); + LIST_REMOVE(dmr, dmr_list); + _LIST_MARK_NOT_ENQUEUED(dmr, dmr_list); +} + static inline bool -_dispatch_mach_reply_tryremove(dispatch_mach_t dm, +_dispatch_mach_reply_list_tryremove(dispatch_mach_send_refs_t dmsr, dispatch_mach_reply_refs_t dmr) { bool removed; - _dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock); - if ((removed = _TAILQ_IS_ENQUEUED(dmr, dmr_list))) { - TAILQ_REMOVE(&dm->dm_send_refs->dmsr_replies, dmr, dmr_list); - _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list); + _dispatch_unfair_lock_lock(&dmsr->dmsr_replies_lock); + if ((removed = _LIST_IS_ENQUEUED(dmr, dmr_list))) { + _dispatch_mach_reply_list_remove_locked(dmr); } - _dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock); + _dispatch_unfair_lock_unlock(&dmsr->dmsr_replies_lock); return removed; } +#define DMRU_DELETE_ACK DUU_DELETE_ACK +#define DMRU_PROBE DUU_PROBE +#define DMRU_MUST_SUCCEED DUU_MUST_SUCCEED +#define DMRU_DUU_MASK 0x0f +#define DMRU_DISCONNECTED 0x10 +#define DMRU_REMOVE 0x20 +#define DMRU_ASYNC_MERGE 0x40 +#define DMRU_CANCEL 0x80 + DISPATCH_NOINLINE static void -_dispatch_mach_reply_waiter_unregister(dispatch_mach_t dm, +_dispatch_mach_reply_unregister(dispatch_mach_t dm, dispatch_mach_reply_refs_t dmr, uint32_t options) { - dispatch_mach_msg_t dmsgr = NULL; - bool disconnected = (options & DU_UNREGISTER_DISCONNECTED); - if (options & DU_UNREGISTER_REPLY_REMOVE) { - _dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock); - if (unlikely(!_TAILQ_IS_ENQUEUED(dmr, dmr_list))) { - DISPATCH_INTERNAL_CRASH(0, "Could not find reply registration"); - } - TAILQ_REMOVE(&dm->dm_send_refs->dmsr_replies, dmr, dmr_list); - _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list); - _dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock); - } - if (disconnected) { - dmsgr = _dispatch_mach_msg_create_reply_disconnected(NULL, dmr, - DISPATCH_MACH_DISCONNECTED); - } else if (dmr->dmr_voucher) { - _voucher_release(dmr->dmr_voucher); - dmr->dmr_voucher = NULL; - } - _dispatch_debug("machport[0x%08x]: unregistering for sync reply%s, ctxt %p", - _dispatch_mach_reply_get_reply_port((mach_port_t)dmr->du_ident), + // - async waiters have a dmr of type &_dispatch_mach_type_reply + // heap-allocated in _dispatch_mach_reply_kevent_register(). + // + // - sync waiters have a dmr of type DISPATCH_MACH_TYPE_WAITER, + // stack-allocated in _dispatch_mach_send_and_wait_for_reply(). + bool sync_waiter = (dux_type(dmr) == DISPATCH_MACH_TYPE_WAITER); + dispatch_mach_send_refs_t dmsr = dm->dm_send_refs; + bool disconnected = (options & DMRU_DISCONNECTED); + bool wakeup = false; + + _dispatch_debug("machport[0x%08x]: unregistering for%s reply%s, ctxt %p", + (mach_port_t)dmr->du_ident, sync_waiter ? " sync" : "", + (options & DMRU_CANCEL) ? " (canceled)" : disconnected ? " (disconnected)" : "", dmr->dmr_ctxt); - if (dmsgr) { - return _dispatch_mach_handle_or_push_received_msg(dm, dmsgr); - } -} -DISPATCH_NOINLINE -static bool -_dispatch_mach_reply_list_remove(dispatch_mach_t dm, - dispatch_mach_reply_refs_t dmr) { - // dmsr_replies_lock must be held by the caller. - bool removed = false; - if (likely(_TAILQ_IS_ENQUEUED(dmr, dmr_list))) { - TAILQ_REMOVE(&dm->dm_send_refs->dmsr_replies, dmr, dmr_list); - _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list); - removed = true; + if (options & DMRU_REMOVE) { + _dispatch_unfair_lock_lock(&dmsr->dmsr_replies_lock); + _dispatch_mach_reply_list_remove_locked(dmr); + if (LIST_EMPTY(&dmsr->dmsr_replies) && dmsr->dmsr_disconnect_cnt) { + wakeup = true; + } + _dispatch_unfair_lock_unlock(&dmsr->dmsr_replies_lock); } - return removed; -} - -DISPATCH_NOINLINE -static bool -_dispatch_mach_reply_kevent_unregister(dispatch_mach_t dm, - dispatch_mach_reply_refs_t dmr, uint32_t options) -{ - dispatch_assert(!_TAILQ_IS_ENQUEUED(dmr, dmr_list)); - bool disconnected = (options & DU_UNREGISTER_DISCONNECTED); - _dispatch_debug("machport[0x%08x]: unregistering for reply%s, ctxt %p", - (mach_port_t)dmr->du_ident, disconnected ? " (disconnected)" : "", - dmr->dmr_ctxt); - if (!_dispatch_unote_unregister(dmr, options)) { - _dispatch_debug("machport[0x%08x]: deferred delete kevent[%p]", - (mach_port_t)dmr->du_ident, dmr); - dispatch_assert(options == DU_UNREGISTER_DISCONNECTED); - return false; + if (_dispatch_unote_registered(dmr) && + !_dispatch_unote_unregister(dmr, options & DMRU_DUU_MASK)) { + dispatch_assert(!sync_waiter); // sync waiters never use kevent + if (options & DMRU_CANCEL) { + // when canceling, failed unregistrations are put back in the list + // the caller has the lock held + LIST_INSERT_HEAD(&dmsr->dmsr_replies, dmr, dmr_list); + } + return; } dispatch_mach_msg_t dmsgr = NULL; dispatch_queue_t drq = NULL; if (disconnected) { - // The next call is guaranteed to always transfer or consume the voucher - // in the dmr, if there is one. - dmsgr = _dispatch_mach_msg_create_reply_disconnected(NULL, dmr, - dmr->dmr_async_reply ? DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED - : DISPATCH_MACH_DISCONNECTED); - if (dmr->dmr_ctxt) { + if (dm->dm_is_xpc && dmr->dmr_ctxt) { drq = _dispatch_mach_msg_context_async_reply_queue(dmr->dmr_ctxt); } + dmsgr = _dispatch_mach_msg_create_reply_disconnected(NULL, dmr, + drq ? DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED + : DISPATCH_MACH_DISCONNECTED); + // _dispatch_mach_msg_create_reply_disconnected() consumes the voucher dispatch_assert(dmr->dmr_voucher == NULL); } else if (dmr->dmr_voucher) { _voucher_release(dmr->dmr_voucher); dmr->dmr_voucher = NULL; } - _dispatch_unote_dispose(dmr); + if (!sync_waiter) { + _dispatch_unote_dispose(dmr); + } if (dmsgr) { if (drq) { _dispatch_mach_push_async_reply_msg(dm, dmsgr, drq); } else { - _dispatch_mach_handle_or_push_received_msg(dm, dmsgr); + _dispatch_mach_handle_or_push_received_msg(dm, dmsgr, 0); } } - return true; + if (options & DMRU_ASYNC_MERGE) { + if (wakeup) { + return dx_wakeup(dm, 0, + DISPATCH_WAKEUP_CONSUME_2 | DISPATCH_WAKEUP_MAKE_DIRTY); + } + return _dispatch_release_2_tailcall(dm); + } } DISPATCH_NOINLINE static void _dispatch_mach_reply_waiter_register(dispatch_mach_t dm, - dispatch_mach_reply_refs_t dmr, mach_port_t reply_port, - dispatch_mach_msg_t dmsg, mach_msg_option_t msg_opts) + dispatch_mach_reply_wait_refs_t dwr, mach_port_t reply_port, + dispatch_mach_msg_t dmsg) { + dispatch_mach_reply_refs_t dmr = &dwr->dwr_refs; dmr->du_owner_wref = _dispatch_ptr2wref(dm); - dmr->du_wlh = NULL; dmr->du_filter = EVFILT_MACHPORT; dmr->du_ident = reply_port; - if (msg_opts & DISPATCH_MACH_OWNED_REPLY_PORT) { - _dispatch_mach_reply_mark_reply_port_owned(dmr); - } else { + if (!dmr->dmr_reply_port_owned) { if (dmsg->dmsg_voucher) { dmr->dmr_voucher = _voucher_retain(dmsg->dmsg_voucher); } - dmr->dmr_priority = _dispatch_priority_from_pp(dmsg->dmsg_priority); + dmr->dmr_priority = dmsg->dmsg_priority; // make reply context visible to leaks rdar://11777199 dmr->dmr_ctxt = dmsg->do_ctxt; } _dispatch_debug("machport[0x%08x]: registering for sync reply, ctxt %p", reply_port, dmsg->do_ctxt); - _dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock); - if (unlikely(_TAILQ_IS_ENQUEUED(dmr, dmr_list))) { - DISPATCH_INTERNAL_CRASH(dmr->dmr_list.tqe_prev, - "Reply already registered"); - } - TAILQ_INSERT_TAIL(&dm->dm_send_refs->dmsr_replies, dmr, dmr_list); - _dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock); + _dispatch_mach_reply_list_insert(dm->dm_send_refs, dmr); } DISPATCH_NOINLINE @@ -420,6 +352,7 @@ _dispatch_mach_reply_kevent_register(dispatch_mach_t dm, mach_port_t reply_port, { dispatch_mach_reply_refs_t dmr; dispatch_priority_t mpri, pri, overcommit; + dispatch_qos_t fallback; dispatch_wlh_t wlh; dmr = dux_create(&_dispatch_mach_type_reply, reply_port, 0)._dmr; @@ -428,58 +361,51 @@ _dispatch_mach_reply_kevent_register(dispatch_mach_t dm, mach_port_t reply_port, if (dmsg->dmsg_voucher) { dmr->dmr_voucher = _voucher_retain(dmsg->dmsg_voucher); } - dmr->dmr_priority = _dispatch_priority_from_pp(dmsg->dmsg_priority); + dmr->dmr_priority = dmsg->dmsg_priority; // make reply context visible to leaks rdar://11777199 dmr->dmr_ctxt = dmsg->do_ctxt; dispatch_queue_t drq = NULL; - if (dmsg->dmsg_options & DISPATCH_MACH_ASYNC_REPLY) { - dmr->dmr_async_reply = true; + if (dm->dm_is_xpc && dmsg->do_ctxt) { drq = _dispatch_mach_msg_context_async_reply_queue(dmsg->do_ctxt); } - - if (!drq) { + if (unlikely(!drq && _dispatch_unote_wlh(dm->dm_recv_refs))) { + wlh = _dispatch_unote_wlh(dm->dm_recv_refs); pri = dm->dq_priority; - wlh = dm->dm_recv_refs->du_wlh; - } else if (dx_type(drq) == DISPATCH_QUEUE_NETWORK_EVENT_TYPE) { - pri = DISPATCH_PRIORITY_FLAG_MANAGER; - wlh = (dispatch_wlh_t)drq; } else if (dx_hastypeflag(drq, QUEUE_ROOT)) { - pri = drq->dq_priority; wlh = DISPATCH_WLH_ANON; - } else if (drq == dm->do_targetq) { - pri = dm->dq_priority; - wlh = dm->dm_recv_refs->du_wlh; + if (_dispatch_is_in_root_queues_array(drq)) { + pri = drq->dq_priority; + } else { + pri = DISPATCH_PRIORITY_FLAG_MANAGER; + } } else if (!(pri = _dispatch_queue_compute_priority_and_wlh(drq, &wlh))) { - pri = drq->dq_priority; wlh = DISPATCH_WLH_ANON; + pri = drq->dq_priority; } + mpri = _dispatch_priority_from_pp_strip_flags(dmsg->dmsg_priority); + overcommit = pri & DISPATCH_PRIORITY_FLAG_OVERCOMMIT; + fallback = _dispatch_priority_fallback_qos(pri); if (pri & DISPATCH_PRIORITY_REQUESTED_MASK) { - overcommit = pri & DISPATCH_PRIORITY_FLAG_OVERCOMMIT; pri &= DISPATCH_PRIORITY_REQUESTED_MASK; - mpri = _dispatch_priority_from_pp_strip_flags(dmsg->dmsg_priority); if (pri < mpri) pri = mpri; pri |= overcommit; + } else if (fallback && mpri) { + pri = mpri | overcommit; + } else if (fallback && !mpri) { + pri = _dispatch_priority_make(fallback, 0) | overcommit; } else { pri = DISPATCH_PRIORITY_FLAG_MANAGER; + wlh = DISPATCH_WLH_ANON; } _dispatch_debug("machport[0x%08x]: registering for reply, ctxt %p", reply_port, dmsg->do_ctxt); - _dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock); - if (unlikely(_TAILQ_IS_ENQUEUED(dmr, dmr_list))) { - DISPATCH_INTERNAL_CRASH(dmr->dmr_list.tqe_prev, - "Reply already registered"); - } - TAILQ_INSERT_TAIL(&dm->dm_send_refs->dmsr_replies, dmr, dmr_list); - _dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock); + _dispatch_mach_reply_list_insert(dm->dm_send_refs, dmr); if (!_dispatch_unote_register(dmr, wlh, pri)) { - _dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock); - _dispatch_mach_reply_list_remove(dm, dmr); - _dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock); - _dispatch_mach_reply_kevent_unregister(dm, dmr, - DU_UNREGISTER_DISCONNECTED); + uint32_t options = DMRU_MUST_SUCCEED | DMRU_REMOVE | DMRU_DISCONNECTED; + _dispatch_mach_reply_unregister(dm, dmr, options); } } @@ -498,6 +424,22 @@ _dispatch_use_mach_special_reply_port(void) #endif } +static void +_dispatch_destruct_reply_port(mach_port_t reply_port, + enum thread_destruct_special_reply_port_rights rights) +{ + kern_return_t kr = KERN_SUCCESS; + + if (_dispatch_use_mach_special_reply_port()) { + kr = thread_destruct_special_reply_port(reply_port, rights); + } else if (rights == THREAD_SPECIAL_REPLY_PORT_ALL || + rights == THREAD_SPECIAL_REPLY_PORT_RECEIVE_ONLY) { + kr = mach_port_destruct(mach_task_self(), reply_port, 0, 0); + } + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); +} + static mach_port_t _dispatch_get_thread_reply_port(void) { @@ -567,10 +509,8 @@ _dispatch_set_thread_reply_port(mach_port_t reply_port) mrp = _dispatch_get_thread_mig_reply_port(); } if (mrp) { - kern_return_t kr = mach_port_mod_refs(mach_task_self(), reply_port, - MACH_PORT_RIGHT_RECEIVE, -1); - DISPATCH_VERIFY_MIG(kr); - dispatch_assume_zero(kr); + _dispatch_destruct_reply_port(reply_port, + THREAD_SPECIAL_REPLY_PORT_ALL); _dispatch_debug("machport[0x%08x]: deallocated sync reply port " "(found 0x%08x)", reply_port, mrp); } else { @@ -626,22 +566,20 @@ _dispatch_mach_msg_get_reason(dispatch_mach_msg_t dmsg, mach_error_t *err_ptr) static inline dispatch_mach_msg_t _dispatch_mach_msg_create_recv(mach_msg_header_t *hdr, mach_msg_size_t siz, - dispatch_mach_reply_refs_t dmr, uint32_t flags) + dispatch_mach_reply_refs_t dmr, uint32_t flags, pthread_priority_t pp) { dispatch_mach_msg_destructor_t destructor; dispatch_mach_msg_t dmsg; voucher_t voucher; - pthread_priority_t pp; if (dmr) { _voucher_mach_msg_clear(hdr, false); // deallocate reply message voucher - pp = _dispatch_priority_to_pp(dmr->dmr_priority); + pp = dmr->dmr_priority; voucher = dmr->dmr_voucher; dmr->dmr_voucher = NULL; // transfer reference } else { voucher = voucher_create_with_mach_msg(hdr); - pp = _dispatch_priority_compute_propagated( - _voucher_get_priority(voucher), 0); + pp = _dispatch_priority_compute_propagated(pp, 0); } destructor = (flags & DISPATCH_EV_MSG_NEEDS_FREE) ? @@ -663,81 +601,56 @@ _dispatch_mach_msg_create_recv(mach_msg_header_t *hdr, mach_msg_size_t siz, void _dispatch_mach_merge_msg(dispatch_unote_t du, uint32_t flags, - mach_msg_header_t *hdr, mach_msg_size_t siz) -{ - // this function is very similar with what _dispatch_source_merge_evt does - // but can't reuse it as handling the message must be protected by the - // internal refcount between the first half and the trailer of what - // _dispatch_source_merge_evt does. - - dispatch_mach_recv_refs_t dmrr = du._dmrr; - dispatch_mach_t dm = _dispatch_wref2ptr(dmrr->du_owner_wref); - dispatch_queue_flags_t dqf; - dispatch_mach_msg_t dmsg; - - dispatch_assert(_dispatch_unote_needs_rearm(du)); + mach_msg_header_t *hdr, mach_msg_size_t siz, + pthread_priority_t msg_pp, pthread_priority_t ovr_pp) +{ if (flags & EV_VANISHED) { DISPATCH_CLIENT_CRASH(du._du->du_ident, "Unexpected EV_VANISHED (do not destroy random mach ports)"); } - // once we modify the queue atomic flags below, it will allow concurrent - // threads running _dispatch_mach_invoke2 to dispose of the source, - // so we can't safely borrow the reference we get from the muxnote udata - // anymore, and need our own - dispatch_wakeup_flags_t wflags = DISPATCH_WAKEUP_CONSUME_2; - _dispatch_retain_2(dm); // rdar://20382435 - - if (unlikely((flags & EV_ONESHOT) && !(flags & EV_DELETE))) { - dqf = _dispatch_queue_atomic_flags_set_and_clear(dm->_as_dq, - DSF_DEFERRED_DELETE, DSF_ARMED); - _dispatch_debug("kevent-source[%p]: deferred delete oneshot kevent[%p]", - dm, dmrr); - } else if (unlikely(flags & (EV_ONESHOT | EV_DELETE))) { - _dispatch_source_refs_unregister(dm->_as_ds, - DU_UNREGISTER_ALREADY_DELETED); - dqf = _dispatch_queue_atomic_flags(dm->_as_dq); - _dispatch_debug("kevent-source[%p]: deleted kevent[%p]", dm, dmrr); - } else { - dqf = _dispatch_queue_atomic_flags_clear(dm->_as_dq, DSF_ARMED); - _dispatch_debug("kevent-source[%p]: disarmed kevent[%p]", dm, dmrr); - } - _dispatch_debug_machport(hdr->msgh_remote_port); _dispatch_debug("machport[0x%08x]: received msg id 0x%x, reply on 0x%08x", hdr->msgh_local_port, hdr->msgh_id, hdr->msgh_remote_port); - if (dqf & DSF_CANCELED) { + dispatch_mach_t dm = _dispatch_wref2ptr(du._dmrr->du_owner_wref); + if (unlikely(_dispatch_queue_atomic_flags(dm) & DSF_CANCELED)) { _dispatch_debug("machport[0x%08x]: drop msg id 0x%x, reply on 0x%08x", hdr->msgh_local_port, hdr->msgh_id, hdr->msgh_remote_port); mach_msg_destroy(hdr); if (flags & DISPATCH_EV_MSG_NEEDS_FREE) { free(hdr); } - return dx_wakeup(dm, 0, wflags | DISPATCH_WAKEUP_MAKE_DIRTY); + } else { + // Once the mach channel disarming is visible, cancellation will switch + // to immediately destroy messages. If we're preempted here, then the + // whole cancellation sequence may be complete by the time we really + // enqueue the message. + // + // _dispatch_mach_msg_invoke_with_mach() is responsible for filtering it + // out to keep the promise that DISPATCH_MACH_DISCONNECTED is the last + // event sent. + dispatch_mach_msg_t dmsg; + dmsg = _dispatch_mach_msg_create_recv(hdr, siz, NULL, flags, msg_pp); + _dispatch_mach_handle_or_push_received_msg(dm, dmsg, ovr_pp); + } + + if (unlikely(_dispatch_unote_needs_delete(du))) { + return dx_wakeup(dm, 0, DISPATCH_WAKEUP_EVENT | + DISPATCH_WAKEUP_CONSUME_2 | DISPATCH_WAKEUP_MAKE_DIRTY); } - - // Once the mach channel disarming is visible, cancellation will switch to - // immediate deletion. If we're preempted here, then the whole cancellation - // sequence may be complete by the time we really enqueue the message. - // - // _dispatch_mach_msg_invoke_with_mach() is responsible for filtering it out - // to keep the promise that DISPATCH_MACH_DISCONNECTED is the last - // event sent. - - dmsg = _dispatch_mach_msg_create_recv(hdr, siz, NULL, flags); - _dispatch_mach_handle_or_push_received_msg(dm, dmsg); return _dispatch_release_2_tailcall(dm); } void _dispatch_mach_reply_merge_msg(dispatch_unote_t du, uint32_t flags, - mach_msg_header_t *hdr, mach_msg_size_t siz) + mach_msg_header_t *hdr, mach_msg_size_t siz, + pthread_priority_t msg_pp, pthread_priority_t ovr_pp) { dispatch_mach_reply_refs_t dmr = du._dmr; dispatch_mach_t dm = _dispatch_wref2ptr(dmr->du_owner_wref); - bool canceled = (_dispatch_queue_atomic_flags(dm->_as_dq) & DSF_CANCELED); + bool canceled = (_dispatch_queue_atomic_flags(dm) & DSF_CANCELED); dispatch_mach_msg_t dmsg = NULL; _dispatch_debug_machport(hdr->msgh_remote_port); @@ -745,18 +658,18 @@ _dispatch_mach_reply_merge_msg(dispatch_unote_t du, uint32_t flags, hdr->msgh_local_port, hdr->msgh_id, hdr->msgh_remote_port); if (!canceled) { - dmsg = _dispatch_mach_msg_create_recv(hdr, siz, dmr, flags); + dmsg = _dispatch_mach_msg_create_recv(hdr, siz, dmr, flags, msg_pp); } if (dmsg) { dispatch_queue_t drq = NULL; - if (dmsg->do_ctxt) { + if (dm->dm_is_xpc && dmsg->do_ctxt) { drq = _dispatch_mach_msg_context_async_reply_queue(dmsg->do_ctxt); } if (drq) { _dispatch_mach_push_async_reply_msg(dm, dmsg, drq); } else { - _dispatch_mach_handle_or_push_received_msg(dm, dmsg); + _dispatch_mach_handle_or_push_received_msg(dm, dmsg, ovr_pp); } } else { _dispatch_debug("machport[0x%08x]: drop msg id 0x%x, reply on 0x%08x", @@ -767,41 +680,41 @@ _dispatch_mach_reply_merge_msg(dispatch_unote_t du, uint32_t flags, } } - dispatch_wakeup_flags_t wflags = 0; - uint32_t options = DU_UNREGISTER_IMMEDIATE_DELETE; - if (canceled) { - options |= DU_UNREGISTER_DISCONNECTED; - } + uint32_t options = DMRU_ASYNC_MERGE | DMRU_REMOVE; + options |= DMRU_MUST_SUCCEED | DMRU_DELETE_ACK; + if (canceled) options |= DMRU_DISCONNECTED; + dispatch_assert(_dispatch_unote_needs_delete(dmr)); + _dispatch_mach_reply_unregister(dm, dmr, options); // consumes the +2 +} - _dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock); - bool removed = _dispatch_mach_reply_list_remove(dm, dmr); - dispatch_assert(removed); - if (TAILQ_EMPTY(&dm->dm_send_refs->dmsr_replies) && - (dm->dm_send_refs->dmsr_disconnect_cnt || - (dm->dq_atomic_flags & DSF_CANCELED))) { - // When the list is empty, _dispatch_mach_disconnect() may release the - // last reference count on the Mach channel. To avoid this, take our - // own reference before releasing the lock. - wflags = DISPATCH_WAKEUP_MAKE_DIRTY | DISPATCH_WAKEUP_CONSUME_2; - _dispatch_retain_2(dm); +DISPATCH_ALWAYS_INLINE +static void +_dispatch_mach_stack_probe(void *addr, size_t size) +{ +#if TARGET_OS_MAC && DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101400) && \ + (defined(__x86_64__) || defined(__arm64__)) + // there should be a __has_feature() macro test + // for this, for now we approximate it, for when the compiler + // is generating calls to ____chkstk_darwin on our behalf + (void)addr; (void)size; +#else + for (mach_vm_address_t p = mach_vm_trunc_page(addr + vm_page_size); + p < (mach_vm_address_t)addr + size; p += vm_page_size) { + *(char*)p = 0; // ensure alloca buffer doesn't overlap with stack guard } - _dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock); - - bool result = _dispatch_mach_reply_kevent_unregister(dm, dmr, options); - dispatch_assert(result); - if (wflags) dx_wakeup(dm, 0, wflags); +#endif } DISPATCH_ALWAYS_INLINE static inline dispatch_mach_msg_t _dispatch_mach_msg_reply_recv(dispatch_mach_t dm, - dispatch_mach_reply_refs_t dmr, mach_port_t reply_port, + dispatch_mach_reply_wait_refs_t dwr, mach_port_t reply_port, mach_port_t send) { - if (slowpath(!MACH_PORT_VALID(reply_port))) { + if (unlikely(!MACH_PORT_VALID(reply_port))) { DISPATCH_CLIENT_CRASH(reply_port, "Invalid reply port"); } - void *ctxt = dmr->dmr_ctxt; + void *ctxt = dwr->dwr_refs.dmr_ctxt; mach_msg_header_t *hdr, *hdr2 = NULL; void *hdr_copyout_addr; mach_msg_size_t siz, msgsiz = 0; @@ -811,10 +724,7 @@ _dispatch_mach_msg_reply_recv(dispatch_mach_t dm, siz = mach_vm_round_page(DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE + DISPATCH_MACH_TRAILER_SIZE); hdr = alloca(siz); - for (mach_vm_address_t p = mach_vm_trunc_page(hdr + vm_page_size); - p < (mach_vm_address_t)hdr + siz; p += vm_page_size) { - *(char*)p = 0; // ensure alloca buffer doesn't overlap with stack guard - } + _dispatch_mach_stack_probe(hdr, siz); options = DISPATCH_MACH_RCV_OPTIONS & (~MACH_RCV_VOUCHER); if (MACH_PORT_VALID(send)) { notify = send; @@ -834,8 +744,7 @@ _dispatch_mach_msg_reply_recv(dispatch_mach_t dm, mach_error_string(kr), kr); switch (kr) { case MACH_RCV_TOO_LARGE: - if (!fastpath(hdr->msgh_size <= UINT_MAX - - DISPATCH_MACH_TRAILER_SIZE)) { + if (unlikely(hdr->msgh_size > UINT_MAX - DISPATCH_MACH_TRAILER_SIZE)) { DISPATCH_CLIENT_CRASH(hdr->msgh_size, "Overlarge message"); } if (options & MACH_RCV_LARGE) { @@ -860,6 +769,10 @@ _dispatch_mach_msg_reply_recv(dispatch_mach_t dm, // channel was disconnected/canceled and reply port destroyed _dispatch_debug("machport[0x%08x]: sync reply port destroyed, ctxt %p: " "%s - 0x%x", reply_port, ctxt, mach_error_string(kr), kr); + if (dwr->dwr_refs.dmr_reply_port_owned) { + _dispatch_destruct_reply_port(reply_port, + THREAD_SPECIAL_REPLY_PORT_SEND_ONLY); + } goto out; case MACH_MSG_SUCCESS: if (hdr->msgh_remote_port) { @@ -879,9 +792,9 @@ _dispatch_mach_msg_reply_recv(dispatch_mach_t dm, DISPATCH_INTERNAL_CRASH(kr, "Unexpected error from mach_msg_receive"); break; } - _dispatch_mach_msg_reply_received(dm, dmr, hdr->msgh_local_port); + _dispatch_mach_msg_reply_received(dm, dwr, hdr->msgh_local_port); hdr->msgh_local_port = MACH_PORT_NULL; - if (slowpath((dm->dq_atomic_flags & DSF_CANCELED) || kr)) { + if (unlikely((dm->dq_atomic_flags & DSF_CANCELED) || kr)) { if (!kr) mach_msg_destroy(hdr); goto out; } @@ -904,26 +817,38 @@ _dispatch_mach_msg_reply_recv(dispatch_mach_t dm, static inline void _dispatch_mach_msg_reply_received(dispatch_mach_t dm, - dispatch_mach_reply_refs_t dmr, mach_port_t local_port) + dispatch_mach_reply_wait_refs_t dwr, mach_port_t local_port) { - bool removed = _dispatch_mach_reply_tryremove(dm, dmr); - if (!MACH_PORT_VALID(local_port) || !removed) { - // port moved/destroyed during receive, or reply waiter was never - // registered or already removed (disconnected) - return; + dispatch_mach_reply_refs_t dmr = &dwr->dwr_refs; + bool removed = _dispatch_mach_reply_list_tryremove(dm->dm_send_refs, dmr); + mach_port_t reply_port = (mach_port_t)dmr->du_ident; + + if (removed) { + _dispatch_debug("machport[0x%08x]: unregistered for sync reply, ctxt %p", + reply_port, dmr->dmr_ctxt); } - mach_port_t reply_port = _dispatch_mach_reply_get_reply_port( - (mach_port_t)dmr->du_ident); - _dispatch_debug("machport[0x%08x]: unregistered for sync reply, ctxt %p", - reply_port, dmr->dmr_ctxt); - if (_dispatch_mach_reply_is_reply_port_owned(dmr)) { - _dispatch_set_thread_reply_port(reply_port); - if (local_port != reply_port) { + + if (dmr->dmr_reply_port_owned) { + if (local_port != reply_port && + (removed || MACH_PORT_VALID(local_port))) { DISPATCH_CLIENT_CRASH(local_port, "Reply received on unexpected port"); } + if (removed) { + _dispatch_set_thread_reply_port(reply_port); + } else { + _dispatch_destruct_reply_port(reply_port, + THREAD_SPECIAL_REPLY_PORT_SEND_ONLY); + } return; } + + if (!MACH_PORT_VALID(local_port) || !removed) { + // port moved/destroyed during receive, or reply waiter was never + // registered or already removed (disconnected) + return; + } + mach_msg_header_t *hdr; dispatch_mach_msg_t dmsg; dmsg = dispatch_mach_msg_create(NULL, sizeof(mach_msg_header_t), @@ -931,10 +856,10 @@ _dispatch_mach_msg_reply_received(dispatch_mach_t dm, hdr->msgh_local_port = local_port; dmsg->dmsg_voucher = dmr->dmr_voucher; dmr->dmr_voucher = NULL; // transfer reference - dmsg->dmsg_priority = _dispatch_priority_to_pp(dmr->dmr_priority); + dmsg->dmsg_priority = dmr->dmr_priority; dmsg->do_ctxt = dmr->dmr_ctxt; _dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_REPLY_RECEIVED); - return _dispatch_mach_handle_or_push_received_msg(dm, dmsg); + return _dispatch_mach_handle_or_push_received_msg(dm, dmsg, 0); } static inline void @@ -950,7 +875,7 @@ _dispatch_mach_msg_disconnected(dispatch_mach_t dm, mach_port_t local_port, _dispatch_mach_msg_set_reason(dmsg, 0, DISPATCH_MACH_DISCONNECTED); _dispatch_debug("machport[0x%08x]: %s right disconnected", local_port ? local_port : remote_port, local_port ? "receive" : "send"); - return _dispatch_mach_handle_or_push_received_msg(dm, dmsg); + return _dispatch_mach_handle_or_push_received_msg(dm, dmsg, 0); } static inline dispatch_mach_msg_t @@ -958,52 +883,44 @@ _dispatch_mach_msg_create_reply_disconnected(dispatch_object_t dou, dispatch_mach_reply_refs_t dmr, dispatch_mach_reason_t reason) { dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr; - mach_port_t reply_port = dmsg ? dmsg->dmsg_reply : - _dispatch_mach_reply_get_reply_port((mach_port_t)dmr->du_ident); - voucher_t v; + mach_port_t reply_port = dmsg ? dmsg->dmsg_reply :(mach_port_t)dmr->du_ident; if (!reply_port) { - if (!dmsg) { - v = dmr->dmr_voucher; - dmr->dmr_voucher = NULL; // transfer reference - if (v) _voucher_release(v); + if (!dmsg && dmr->dmr_voucher) { + _voucher_release(dmr->dmr_voucher); + dmr->dmr_voucher = NULL; } return NULL; } - if (dmsg) { - v = dmsg->dmsg_voucher; - if (v) _voucher_retain(v); - } else { - v = dmr->dmr_voucher; - dmr->dmr_voucher = NULL; // transfer reference - } - - if ((dmsg && (dmsg->dmsg_options & DISPATCH_MACH_WAIT_FOR_REPLY) && - (dmsg->dmsg_options & DISPATCH_MACH_OWNED_REPLY_PORT)) || - (dmr && !_dispatch_unote_registered(dmr) && - _dispatch_mach_reply_is_reply_port_owned(dmr))) { - if (v) _voucher_release(v); + if (dmr && !_dispatch_unote_registered(dmr) && dmr->dmr_reply_port_owned) { + if (dmr->dmr_voucher) { + _voucher_release(dmr->dmr_voucher); + dmr->dmr_voucher = NULL; + } // deallocate owned reply port to break _dispatch_mach_msg_reply_recv - // out of waiting in mach_msg(MACH_RCV_MSG) - kern_return_t kr = mach_port_mod_refs(mach_task_self(), reply_port, - MACH_PORT_RIGHT_RECEIVE, -1); - DISPATCH_VERIFY_MIG(kr); - dispatch_assume_zero(kr); + // out of waiting in mach_msg(MACH_RCV_MSG). + // + // after this call, dmr can become invalid + _dispatch_destruct_reply_port(reply_port, + THREAD_SPECIAL_REPLY_PORT_RECEIVE_ONLY); return NULL; } mach_msg_header_t *hdr; dmsgr = dispatch_mach_msg_create(NULL, sizeof(mach_msg_header_t), DISPATCH_MACH_MSG_DESTRUCTOR_DEFAULT, &hdr); - dmsgr->dmsg_voucher = v; hdr->msgh_local_port = reply_port; if (dmsg) { dmsgr->dmsg_priority = dmsg->dmsg_priority; dmsgr->do_ctxt = dmsg->do_ctxt; + dmsgr->dmsg_voucher = dmsg->dmsg_voucher; + if (dmsgr->dmsg_voucher) _voucher_retain(dmsgr->dmsg_voucher); } else { - dmsgr->dmsg_priority = _dispatch_priority_to_pp(dmr->dmr_priority); + dmsgr->dmsg_priority = dmr->dmr_priority; dmsgr->do_ctxt = dmr->dmr_ctxt; + dmsgr->dmsg_voucher = dmr->dmr_voucher; + dmr->dmr_voucher = NULL; // transfer reference } _dispatch_mach_msg_set_reason(dmsgr, 0, reason); _dispatch_debug("machport[0x%08x]: reply disconnected, ctxt %p", @@ -1013,7 +930,8 @@ _dispatch_mach_msg_create_reply_disconnected(dispatch_object_t dou, DISPATCH_NOINLINE static void -_dispatch_mach_msg_not_sent(dispatch_mach_t dm, dispatch_object_t dou) +_dispatch_mach_msg_not_sent(dispatch_mach_t dm, dispatch_object_t dou, + dispatch_mach_reply_wait_refs_t dwr) { dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr; dispatch_queue_t drq = NULL; @@ -1025,20 +943,20 @@ _dispatch_mach_msg_not_sent(dispatch_mach_t dm, dispatch_object_t dou) msg_opts, msg->msgh_voucher_port, dmsg->dmsg_reply); unsigned long reason = (msg_opts & DISPATCH_MACH_REGISTER_FOR_REPLY) ? 0 : DISPATCH_MACH_MESSAGE_NOT_SENT; - dmsgr = _dispatch_mach_msg_create_reply_disconnected(dmsg, NULL, - msg_opts & DISPATCH_MACH_ASYNC_REPLY - ? DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED - : DISPATCH_MACH_DISCONNECTED); - if (dmsg->do_ctxt) { + if (dm->dm_is_xpc && dmsg->do_ctxt) { drq = _dispatch_mach_msg_context_async_reply_queue(dmsg->do_ctxt); } + dmsgr = _dispatch_mach_msg_create_reply_disconnected(dmsg, + dwr ? &dwr->dwr_refs : NULL, + drq ? DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED + : DISPATCH_MACH_DISCONNECTED); _dispatch_mach_msg_set_reason(dmsg, 0, reason); - _dispatch_mach_handle_or_push_received_msg(dm, dmsg); + _dispatch_mach_handle_or_push_received_msg(dm, dmsg, 0); if (dmsgr) { if (drq) { _dispatch_mach_push_async_reply_msg(dm, dmsgr, drq); } else { - _dispatch_mach_handle_or_push_received_msg(dm, dmsgr); + _dispatch_mach_handle_or_push_received_msg(dm, dmsgr, 0); } } } @@ -1046,7 +964,7 @@ _dispatch_mach_msg_not_sent(dispatch_mach_t dm, dispatch_object_t dou) DISPATCH_NOINLINE static uint32_t _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, - dispatch_mach_reply_refs_t dmr, dispatch_qos_t qos, + dispatch_mach_reply_wait_refs_t dwr, dispatch_qos_t qos, dispatch_mach_send_invoke_flags_t send_flags) { dispatch_mach_send_refs_t dsrr = dm->dm_send_refs; @@ -1065,9 +983,9 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, if (unlikely(dsrr->dmsr_checkin && dmsg != dsrr->dmsr_checkin)) { // send initial checkin message if (unlikely(_dispatch_unote_registered(dsrr) && - _dispatch_queue_get_current() != &_dispatch_mgr_q)) { + _dispatch_queue_get_current() != _dispatch_mgr_q._as_dq)) { // send kevent must be uninstalled on the manager queue - dm->dm_needs_mgr = 1; + dm->dm_needs_mgr = true; goto out; } if (unlikely(!_dispatch_mach_msg_send(dm, @@ -1086,18 +1004,16 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, if (dmsg != dsrr->dmsr_checkin) { msg->msgh_remote_port = dsrr->dmsr_send; } - if (_dispatch_queue_get_current() == &_dispatch_mgr_q) { + if (_dispatch_queue_get_current() == _dispatch_mgr_q._as_dq) { if (unlikely(!_dispatch_unote_registered(dsrr))) { _dispatch_mach_notification_kevent_register(dm, msg->msgh_remote_port); + dispatch_assert(_dispatch_unote_registered(dsrr)); } - if (likely(_dispatch_unote_registered(dsrr))) { - if (os_atomic_load2o(dsrr, dmsr_notification_armed, - relaxed)) { - goto out; - } - opts |= MACH_SEND_NOTIFY; + if (dsrr->dmsr_notification_armed) { + goto out; } + opts |= MACH_SEND_NOTIFY; } opts |= MACH_SEND_TIMEOUT; if (dmsg->dmsg_priority != _voucher_get_priority(voucher)) { @@ -1122,14 +1038,13 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, _dispatch_debug_machport(msg->msgh_remote_port); if (reply_port) _dispatch_debug_machport(reply_port); if (msg_opts & DISPATCH_MACH_WAIT_FOR_REPLY) { - if (msg_opts & DISPATCH_MACH_OWNED_REPLY_PORT) { + if (dwr->dwr_refs.dmr_reply_port_owned) { if (_dispatch_use_mach_special_reply_port()) { opts |= MACH_SEND_SYNC_OVERRIDE; } _dispatch_clear_thread_reply_port(reply_port); } - _dispatch_mach_reply_waiter_register(dm, dmr, reply_port, dmsg, - msg_opts); + _dispatch_mach_reply_waiter_register(dm, dwr, reply_port, dmsg); } kr = mach_msg(msg, opts, msg->msgh_size, 0, MACH_PORT_NULL, 0, msg_priority); @@ -1139,8 +1054,9 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, opts, msg_opts, msg->msgh_voucher_port, reply_port, mach_error_string(kr), kr); if (unlikely(kr && (msg_opts & DISPATCH_MACH_WAIT_FOR_REPLY))) { - _dispatch_mach_reply_waiter_unregister(dm, dmr, - DU_UNREGISTER_REPLY_REMOVE); + uint32_t options = DMRU_MUST_SUCCEED | DMRU_REMOVE; + dispatch_assert(dwr); + _dispatch_mach_reply_unregister(dm, &dwr->dwr_refs, options); } if (clear_voucher) { if (kr == MACH_SEND_INVALID_VOUCHER && msg->msgh_voucher_port) { @@ -1153,12 +1069,10 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, } if (kr == MACH_SEND_TIMED_OUT && (opts & MACH_SEND_TIMEOUT)) { if (opts & MACH_SEND_NOTIFY) { - _dispatch_debug("machport[0x%08x]: send-possible notification " - "armed", (mach_port_t)dsrr->du_ident); _dispatch_mach_notification_set_armed(dsrr); } else { // send kevent must be installed on the manager queue - dm->dm_needs_mgr = 1; + dm->dm_needs_mgr = true; } if (ipc_kvoucher) { _dispatch_kvoucher_debug("reuse on re-send", ipc_kvoucher); @@ -1184,15 +1098,15 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, _dispatch_unote_registered(dsrr))) { _dispatch_mach_notification_kevent_unregister(dm); } - if (slowpath(kr)) { + if (unlikely(kr)) { // Send failed, so reply was never registered - dmsgr = _dispatch_mach_msg_create_reply_disconnected(dmsg, NULL, - msg_opts & DISPATCH_MACH_ASYNC_REPLY - ? DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED - : DISPATCH_MACH_DISCONNECTED); - if (dmsg->do_ctxt) { + if (dm->dm_is_xpc && dmsg->do_ctxt) { drq = _dispatch_mach_msg_context_async_reply_queue(dmsg->do_ctxt); } + dmsgr = _dispatch_mach_msg_create_reply_disconnected(dmsg, + dwr ? &dwr->dwr_refs : NULL, + drq ? DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED + : DISPATCH_MACH_DISCONNECTED); } _dispatch_mach_msg_set_reason(dmsg, kr, 0); if ((send_flags & DM_SEND_INVOKE_IMMEDIATE_SEND) && @@ -1200,13 +1114,13 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, // Return sent message synchronously send_status |= DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT; } else { - _dispatch_mach_handle_or_push_received_msg(dm, dmsg); + _dispatch_mach_handle_or_push_received_msg(dm, dmsg, 0); } if (dmsgr) { if (drq) { _dispatch_mach_push_async_reply_msg(dm, dmsgr, drq); } else { - _dispatch_mach_handle_or_push_received_msg(dm, dmsgr); + _dispatch_mach_handle_or_push_received_msg(dm, dmsgr, 0); } } send_status |= DM_SEND_STATUS_SUCCESS; @@ -1249,30 +1163,18 @@ _dmsr_state_merge_override(uint64_t dmsr_state, dispatch_qos_t qos) } #define _dispatch_mach_send_push_update_tail(dmsr, tail) \ - os_mpsc_push_update_tail(dmsr, dmsr, tail, do_next) -#define _dispatch_mach_send_push_update_head(dmsr, head) \ - os_mpsc_push_update_head(dmsr, dmsr, head) + os_mpsc_push_update_tail(os_mpsc(dmsr, dmsr), tail, do_next) +#define _dispatch_mach_send_push_update_prev(dmsr, prev, head) \ + os_mpsc_push_update_prev(os_mpsc(dmsr, dmsr), prev, head, do_next) #define _dispatch_mach_send_get_head(dmsr) \ - os_mpsc_get_head(dmsr, dmsr) -#define _dispatch_mach_send_unpop_head(dmsr, dc, dc_next) \ - os_mpsc_undo_pop_head(dmsr, dmsr, dc, dc_next, do_next) + os_mpsc_get_head(os_mpsc(dmsr, dmsr)) +#define _dispatch_mach_send_undo_pop_head(dmsr, dc, dc_next) \ + os_mpsc_undo_pop_head(os_mpsc(dmsr, dmsr), dc, dc_next, do_next) #define _dispatch_mach_send_pop_head(dmsr, head) \ - os_mpsc_pop_head(dmsr, dmsr, head, do_next) + os_mpsc_pop_head(os_mpsc(dmsr, dmsr), head, do_next) #define dm_push(dm, dc, qos) \ - _dispatch_queue_push((dm)->_as_dq, dc, qos) - -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_mach_send_push_inline(dispatch_mach_send_refs_t dmsr, - dispatch_object_t dou) -{ - if (_dispatch_mach_send_push_update_tail(dmsr, dou._do)) { - _dispatch_mach_send_push_update_head(dmsr, dou._do); - return true; - } - return false; -} + _dispatch_lane_push(dm, dc, qos) DISPATCH_NOINLINE static bool @@ -1280,16 +1182,16 @@ _dispatch_mach_send_drain(dispatch_mach_t dm, dispatch_invoke_flags_t flags, dispatch_mach_send_invoke_flags_t send_flags) { dispatch_mach_send_refs_t dmsr = dm->dm_send_refs; - dispatch_mach_reply_refs_t dmr; + dispatch_mach_reply_wait_refs_t dwr; dispatch_mach_msg_t dmsg; struct dispatch_object_s *dc = NULL, *next_dc = NULL; dispatch_qos_t qos = _dmsr_state_max_qos(dmsr->dmsr_state); uint64_t old_state, new_state; uint32_t send_status; - bool needs_mgr, disconnecting, returning_send_result = false; + bool returning_send_result = false; + dispatch_wakeup_flags_t wflags = 0; again: - needs_mgr = false; disconnecting = false; while (dmsr->dmsr_tail) { dc = _dispatch_mach_send_get_head(dmsr); do { @@ -1302,24 +1204,24 @@ _dispatch_mach_send_drain(dispatch_mach_t dm, dispatch_invoke_flags_t flags, if (!(send_flags & DM_SEND_INVOKE_CAN_RUN_BARRIER)) { goto partial_drain; } - _dispatch_continuation_pop(dc, NULL, flags, dm->_as_dq); + _dispatch_continuation_pop(dc, NULL, flags, dm); continue; } if (_dispatch_object_is_sync_waiter(dc)) { dmsg = ((dispatch_continuation_t)dc)->dc_data; - dmr = ((dispatch_continuation_t)dc)->dc_other; + dwr = ((dispatch_continuation_t)dc)->dc_other; } else if (_dispatch_object_has_vtable(dc)) { dmsg = (dispatch_mach_msg_t)dc; - dmr = NULL; + dwr = NULL; } else { if (_dispatch_unote_registered(dmsr) && - (_dispatch_queue_get_current() != &_dispatch_mgr_q)) { + (_dispatch_queue_get_current() != _dispatch_mgr_q._as_dq)) { // send kevent must be uninstalled on the manager queue - needs_mgr = true; + dm->dm_needs_mgr = true; + wflags |= DISPATCH_WAKEUP_MAKE_DIRTY; goto partial_drain; } if (unlikely(!_dispatch_mach_reconnect_invoke(dm, dc))) { - disconnecting = true; goto partial_drain; } _dispatch_perfmon_workitem_inc(); @@ -1328,12 +1230,13 @@ _dispatch_mach_send_drain(dispatch_mach_t dm, dispatch_invoke_flags_t flags, _dispatch_voucher_ktrace_dmsg_pop(dmsg); if (unlikely(dmsr->dmsr_disconnect_cnt || (dm->dq_atomic_flags & DSF_CANCELED))) { - _dispatch_mach_msg_not_sent(dm, dmsg); + _dispatch_mach_msg_not_sent(dm, dmsg, dwr); _dispatch_perfmon_workitem_inc(); continue; } - send_status = _dispatch_mach_msg_send(dm, dmsg, dmr, qos, sf); + send_status = _dispatch_mach_msg_send(dm, dmsg, dwr, qos, sf); if (unlikely(!send_status)) { + if (dm->dm_needs_mgr) wflags |= DISPATCH_WAKEUP_MAKE_DIRTY; goto partial_drain; } if (send_status & DM_SEND_STATUS_RETURNING_IMMEDIATE_SEND_RESULT) { @@ -1358,7 +1261,7 @@ _dispatch_mach_send_drain(dispatch_mach_t dm, dispatch_invoke_flags_t flags, partial_drain: // if this is not a complete drain, we must undo some things - _dispatch_mach_send_unpop_head(dmsr, dc, next_dc); + _dispatch_mach_send_undo_pop_head(dmsr, dc, next_dc); if (_dispatch_object_has_type(dc, DISPATCH_CONTINUATION_TYPE(MACH_SEND_BARRIER))) { @@ -1390,23 +1293,38 @@ _dispatch_mach_send_drain(dispatch_mach_t dm, dispatch_invoke_flags_t flags, _dispatch_set_basepri_override_qos(_dmsr_state_max_qos(old_state)); } + qos = _dmsr_state_max_qos(new_state); if (unlikely(new_state & DISPATCH_MACH_STATE_UNLOCK_MASK)) { - qos = _dmsr_state_max_qos(new_state); os_atomic_thread_fence(dependency); dmsr = os_atomic_force_dependency_on(dmsr, new_state); goto again; } if (new_state & DISPATCH_MACH_STATE_PENDING_BARRIER) { - qos = _dmsr_state_max_qos(new_state); + // we don't need to wakeup the mach channel with DISPATCH_WAKEUP_EVENT + // because a push on the receive queue always causes a wakeup even + // wen DSF_NEEDS_EVENT is set. _dispatch_mach_push_send_barrier_drain(dm, qos); - } else { - if (needs_mgr || dm->dm_needs_mgr) { - qos = _dmsr_state_max_qos(new_state); + return returning_send_result; + } + + if (new_state == 0 && dm->dm_disconnected && !dm->dm_cancel_handler_called){ + // cancelation waits for the send queue to be empty + // so when we know cancelation is pending, and we empty the queue, + // force an EVENT wakeup. + wflags |= DISPATCH_WAKEUP_EVENT | DISPATCH_WAKEUP_MAKE_DIRTY; + } + if ((old_state ^ new_state) & DISPATCH_MACH_STATE_ENQUEUED) { + if (wflags) { + wflags |= DISPATCH_WAKEUP_CONSUME_2; } else { - qos = 0; + // Note that after this release + // the mach channel may be gone. + _dispatch_release_2(dm); } - if (!disconnecting) dx_wakeup(dm, qos, DISPATCH_WAKEUP_MAKE_DIRTY); + } + if (wflags) { + dx_wakeup(dm, dm->dm_needs_mgr ? qos : 0, wflags); } return returning_send_result; } @@ -1456,9 +1374,6 @@ _dispatch_mach_send_invoke(dispatch_mach_t dm, dispatch_invoke_flags_t flags, if (unlikely((old_state & canlock_mask) != canlock_state)) { return; } - if (send_flags & DM_SEND_INVOKE_CANCEL) { - _dispatch_mach_cancel(dm); - } _dispatch_mach_send_drain(dm, flags, send_flags); } @@ -1468,15 +1383,15 @@ _dispatch_mach_send_barrier_drain_invoke(dispatch_continuation_t dc, DISPATCH_UNUSED dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags) { - dispatch_mach_t dm = (dispatch_mach_t)_dispatch_queue_get_current(); - uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; + dispatch_mach_t dm = upcast(_dispatch_queue_get_current())._dm; + uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_NO_INTROSPECTION; dispatch_thread_frame_s dtf; DISPATCH_COMPILER_CAN_ASSUME(dc->dc_priority == DISPATCH_NO_PRIORITY); DISPATCH_COMPILER_CAN_ASSUME(dc->dc_voucher == DISPATCH_NO_VOUCHER); // hide the mach channel (see _dispatch_mach_barrier_invoke comment) _dispatch_thread_frame_stash(&dtf); - _dispatch_continuation_pop_forwarded(dc, DISPATCH_NO_VOUCHER, dc_flags,{ + _dispatch_continuation_pop_forwarded(dc, dc_flags, dm, { _dispatch_mach_send_invoke(dm, flags, DM_SEND_INVOKE_NEEDS_BARRIER | DM_SEND_INVOKE_CAN_RUN_BARRIER); }); @@ -1499,33 +1414,42 @@ _dispatch_mach_push_send_barrier_drain(dispatch_mach_t dm, dispatch_qos_t qos) DISPATCH_NOINLINE static void -_dispatch_mach_send_push(dispatch_mach_t dm, dispatch_continuation_t dc, +_dispatch_mach_send_push(dispatch_mach_t dm, dispatch_object_t dou, dispatch_qos_t qos) { dispatch_mach_send_refs_t dmsr = dm->dm_send_refs; uint64_t old_state, new_state, state_flags = 0; + struct dispatch_object_s *prev; + dispatch_wakeup_flags_t wflags = 0; + bool is_send_barrier = (dou._dc->do_vtable == DC_VTABLE(MACH_SEND_BARRIER)); dispatch_tid owner; - bool wakeup; - - // when pushing a send barrier that destroys - // the last reference to this channel, and the send queue is already - // draining on another thread, the send barrier may run as soon as - // _dispatch_mach_send_push_inline() returns. - _dispatch_retain_2(dm); - - wakeup = _dispatch_mach_send_push_inline(dmsr, dc); - if (wakeup) { - state_flags = DISPATCH_MACH_STATE_DIRTY; - if (dc->do_vtable == DC_VTABLE(MACH_SEND_BARRIER)) { + + // the send queue needs to retain + // the mach channel if not empty, for the whole duration of this call + // + // When we may add the ENQUEUED bit, we need to reserve 2 more that we will + // transfer to _dispatch_mach_send_drain(). + prev = _dispatch_mach_send_push_update_tail(dmsr, dou._do); + _dispatch_retain_n_unsafe(dm, os_mpsc_push_was_empty(prev) ? 4 : 2); + _dispatch_mach_send_push_update_prev(dmsr, prev, dou._do); + + if (unlikely(os_mpsc_push_was_empty(prev))) { + state_flags = DISPATCH_MACH_STATE_DIRTY | DISPATCH_MACH_STATE_ENQUEUED; + wflags |= DISPATCH_WAKEUP_MAKE_DIRTY; + if (is_send_barrier) { state_flags |= DISPATCH_MACH_STATE_PENDING_BARRIER; } - } - if (state_flags) { os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, release, { new_state = _dmsr_state_merge_override(old_state, qos); new_state |= state_flags; }); + if ((old_state ^ new_state) & DISPATCH_MACH_STATE_ENQUEUED) { + // +2 transfered to the ENQUEUED state, _dispatch_mach_send_drain + // will consume it when clearing the bit. + } else { + _dispatch_release_2_no_dispose(dm); + } } else { os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, relaxed, { new_state = _dmsr_state_merge_override(old_state, qos); @@ -1535,6 +1459,7 @@ _dispatch_mach_send_push(dispatch_mach_t dm, dispatch_continuation_t dc, }); } + qos = _dmsr_state_max_qos(new_state); owner = _dispatch_lock_owner((dispatch_lock)old_state); if (owner) { @@ -1542,21 +1467,15 @@ _dispatch_mach_send_push(dispatch_mach_t dm, dispatch_continuation_t dc, _dispatch_wqthread_override_start_check_owner(owner, qos, &dmsr->dmsr_state_lock.dul_lock); } - return _dispatch_release_2_tailcall(dm); - } - - dispatch_wakeup_flags_t wflags = 0; - if (state_flags & DISPATCH_MACH_STATE_PENDING_BARRIER) { + } else if (state_flags & DISPATCH_MACH_STATE_PENDING_BARRIER) { _dispatch_mach_push_send_barrier_drain(dm, qos); - } else if (wakeup || dmsr->dmsr_disconnect_cnt || + } else if (wflags || dmsr->dmsr_disconnect_cnt || (dm->dq_atomic_flags & DSF_CANCELED)) { - wflags = DISPATCH_WAKEUP_MAKE_DIRTY | DISPATCH_WAKEUP_CONSUME_2; + return dx_wakeup(dm, qos, wflags | DISPATCH_WAKEUP_CONSUME_2); } else if (old_state & DISPATCH_MACH_STATE_PENDING_BARRIER) { - wflags = DISPATCH_WAKEUP_CONSUME_2; - } - if (wflags) { - return dx_wakeup(dm, qos, wflags); + return dx_wakeup(dm, qos, DISPATCH_WAKEUP_CONSUME_2); } + return _dispatch_release_2_tailcall(dm); } @@ -1569,12 +1488,19 @@ _dispatch_mach_send_push_and_trydrain(dispatch_mach_t dm, dispatch_mach_send_refs_t dmsr = dm->dm_send_refs; dispatch_lock owner_self = _dispatch_lock_value_for_self(); uint64_t old_state, new_state, canlock_mask, state_flags = 0; + dispatch_wakeup_flags_t wflags = 0; dispatch_tid owner; + struct dispatch_object_s *prev; - bool wakeup = _dispatch_mach_send_push_inline(dmsr, dou); - if (wakeup) { - state_flags = DISPATCH_MACH_STATE_DIRTY; + prev = _dispatch_mach_send_push_update_tail(dmsr, dou._do); + if (os_mpsc_push_was_empty(prev)) { + // the send queue needs to retain + // the mach channel if not empty. + _dispatch_retain_2(dm); + state_flags = DISPATCH_MACH_STATE_DIRTY | DISPATCH_MACH_STATE_ENQUEUED; + wflags = DISPATCH_WAKEUP_CONSUME_2 | DISPATCH_WAKEUP_MAKE_DIRTY; } + _dispatch_mach_send_push_update_prev(dmsr, prev, dou._do); if (unlikely(dmsr->dmsr_disconnect_cnt || (dm->dq_atomic_flags & DSF_CANCELED))) { @@ -1582,7 +1508,10 @@ _dispatch_mach_send_push_and_trydrain(dispatch_mach_t dm, new_state = _dmsr_state_merge_override(old_state, qos); new_state |= state_flags; }); - dx_wakeup(dm, qos, DISPATCH_WAKEUP_MAKE_DIRTY); + if ((old_state ^ new_state) & DISPATCH_MACH_STATE_ENQUEUED) { + wflags &= ~(dispatch_wakeup_flags_t)DISPATCH_WAKEUP_CONSUME_2; + } + dx_wakeup(dm, qos, wflags); return false; } @@ -1599,6 +1528,9 @@ _dispatch_mach_send_push_and_trydrain(dispatch_mach_t dm, new_state &= ~DISPATCH_MACH_STATE_PENDING_BARRIER; } }); + if ((old_state ^ new_state) & DISPATCH_MACH_STATE_ENQUEUED) { + wflags &= ~(dispatch_wakeup_flags_t)DISPATCH_WAKEUP_CONSUME_2; + } } else { os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, acquire, { new_state = _dmsr_state_merge_override(old_state, qos); @@ -1620,11 +1552,12 @@ _dispatch_mach_send_push_and_trydrain(dispatch_mach_t dm, _dispatch_wqthread_override_start_check_owner(owner, qos, &dmsr->dmsr_state_lock.dul_lock); } + if (wflags & DISPATCH_WAKEUP_CONSUME_2) _dispatch_release_2(dm); return false; } if (old_state & DISPATCH_MACH_STATE_PENDING_BARRIER) { - dx_wakeup(dm, qos, 0); + dx_wakeup(dm, qos, wflags); return false; } @@ -1632,10 +1565,11 @@ _dispatch_mach_send_push_and_trydrain(dispatch_mach_t dm, // been dequeued by another thread that raced us to the send queue lock. // A plain load of the head and comparison against our object pointer is // sufficient. - if (unlikely(!(wakeup && dou._do == dmsr->dmsr_head))) { + if (unlikely(!(wflags && dou._do == dmsr->dmsr_head))) { // Don't request immediate send result for messages we don't own send_flags &= ~DM_SEND_INVOKE_IMMEDIATE_SEND_MASK; } + if (wflags & DISPATCH_WAKEUP_CONSUME_2) _dispatch_release_2_no_dispose(dm); return _dispatch_mach_send_drain(dm, DISPATCH_INVOKE_NONE, send_flags); } @@ -1646,10 +1580,9 @@ DISPATCH_ALWAYS_INLINE static inline void _dispatch_mach_notification_kevent_unregister(dispatch_mach_t dm) { + uint32_t duu_options = DUU_DELETE_ACK | DUU_MUST_SUCCEED; DISPATCH_ASSERT_ON_MANAGER_QUEUE(); - if (_dispatch_unote_registered(dm->dm_send_refs)) { - dispatch_assume(_dispatch_unote_unregister(dm->dm_send_refs, 0)); - } + _dispatch_unote_unregister(dm->dm_send_refs, duu_options); dm->dm_send_refs->du_ident = 0; } @@ -1660,13 +1593,12 @@ _dispatch_mach_notification_kevent_register(dispatch_mach_t dm,mach_port_t send) DISPATCH_ASSERT_ON_MANAGER_QUEUE(); dm->dm_send_refs->du_ident = send; dispatch_assume(_dispatch_unote_register(dm->dm_send_refs, - DISPATCH_WLH_ANON, 0)); + DISPATCH_WLH_ANON, DISPATCH_PRIORITY_FLAG_MANAGER)); } void -_dispatch_mach_merge_notification(dispatch_unote_t du, +_dispatch_mach_notification_merge_evt(dispatch_unote_t du, uint32_t flags DISPATCH_UNUSED, uintptr_t data, - uintptr_t status DISPATCH_UNUSED, pthread_priority_t pp DISPATCH_UNUSED) { dispatch_mach_send_refs_t dmsr = du._dmsr; @@ -1676,21 +1608,27 @@ _dispatch_mach_merge_notification(dispatch_unote_t du, _dispatch_mach_send_invoke(dm, DISPATCH_INVOKE_MANAGER_DRAIN, DM_SEND_INVOKE_MAKE_DIRTY); } + _dispatch_release_2_tailcall(dm); } DISPATCH_NOINLINE static void _dispatch_mach_handle_or_push_received_msg(dispatch_mach_t dm, - dispatch_mach_msg_t dmsg) + dispatch_mach_msg_t dmsg, pthread_priority_t pp) { mach_error_t error; dispatch_mach_reason_t reason = _dispatch_mach_msg_get_reason(dmsg, &error); + dispatch_qos_t qos; + if (reason == DISPATCH_MACH_MESSAGE_RECEIVED || !dm->dm_is_xpc || !_dispatch_mach_xpc_hooks->dmxh_direct_message_handler( dm->dm_recv_refs->dmrr_handler_ctxt, reason, dmsg, error)) { // Not XPC client or not a message that XPC can handle inline - push // it onto the channel queue. - dm_push(dm, dmsg, _dispatch_qos_from_pp(dmsg->dmsg_priority)); + _dispatch_trace_item_push(dm, dmsg); + qos = _dispatch_qos_from_pp(pp); + if (!qos) qos = _dispatch_priority_qos(dm->dq_priority); + dm_push(dm, dmsg, qos); } else { // XPC handled the message inline. Do the cleanup that would otherwise // have happened in _dispatch_mach_msg_invoke(), leaving out steps that @@ -1703,12 +1641,13 @@ _dispatch_mach_handle_or_push_received_msg(dispatch_mach_t dm, DISPATCH_ALWAYS_INLINE static void _dispatch_mach_push_async_reply_msg(dispatch_mach_t dm, - dispatch_mach_msg_t dmsg, dispatch_queue_t drq) { + dispatch_mach_msg_t dmsg, dispatch_queue_t drq) +{ // Push the message onto the given queue. This function is only used for // replies to messages sent by // dispatch_mach_send_with_result_and_async_reply_4libxpc(). dispatch_continuation_t dc = _dispatch_mach_msg_async_reply_wrap(dmsg, dm); - _dispatch_trace_continuation_push(drq, dc); + _dispatch_trace_item_push(drq, dc); dx_push(drq, dc, _dispatch_qos_from_pp(dmsg->dmsg_priority)); } @@ -1726,6 +1665,7 @@ _dispatch_mach_checkin_options(void) } + static inline mach_msg_option_t _dispatch_mach_send_options(void) { @@ -1734,26 +1674,36 @@ _dispatch_mach_send_options(void) } DISPATCH_ALWAYS_INLINE -static inline dispatch_qos_t -_dispatch_mach_priority_propagate(mach_msg_option_t options, - pthread_priority_t *msg_pp) +static inline mach_msg_option_t +_dispatch_mach_send_msg_prepare(dispatch_mach_t dm, + dispatch_mach_msg_t dmsg, mach_msg_option_t options) { -#if DISPATCH_USE_NOIMPORTANCE_QOS - if (options & MACH_SEND_NOIMPORTANCE) { - *msg_pp = 0; - return 0; +#if DISPATCH_DEBUG + if (dm->dm_is_xpc && (options & DISPATCH_MACH_WAIT_FOR_REPLY) == 0 && + _dispatch_mach_msg_get_reply_port(dmsg)) { + dispatch_assert( + _dispatch_mach_msg_context_async_reply_queue(dmsg->do_ctxt)); } +#else + (void)dm; #endif - unsigned int flags = DISPATCH_PRIORITY_PROPAGATE_CURRENT; - if ((options & DISPATCH_MACH_WAIT_FOR_REPLY) && - (options & DISPATCH_MACH_OWNED_REPLY_PORT) && - _dispatch_use_mach_special_reply_port()) { - flags |= DISPATCH_PRIORITY_PROPAGATE_FOR_SYNC_IPC; + if (DISPATCH_USE_NOIMPORTANCE_QOS && (options & MACH_SEND_NOIMPORTANCE)) { + dmsg->dmsg_priority = 0; + } else { + unsigned int flags = DISPATCH_PRIORITY_PROPAGATE_CURRENT; + if ((options & DISPATCH_MACH_WAIT_FOR_REPLY) && + _dispatch_use_mach_special_reply_port()) { + // TODO: remove QoS contribution of sync IPC messages to send queue + // rdar://31848737 + flags |= DISPATCH_PRIORITY_PROPAGATE_FOR_SYNC_IPC; + } + dmsg->dmsg_priority = _dispatch_priority_compute_propagated(0, flags); } - *msg_pp = _dispatch_priority_compute_propagated(0, flags); - // TODO: remove QoS contribution of sync IPC messages to send queue - // rdar://31848737 - return _dispatch_qos_from_pp(*msg_pp); + dmsg->dmsg_voucher = _voucher_copy(); + _dispatch_voucher_debug("mach-msg[%p] set", dmsg->dmsg_voucher, dmsg); + options |= _dispatch_mach_send_options(); + dmsg->dmsg_options = options; + return options; } DISPATCH_NOINLINE @@ -1762,21 +1712,16 @@ _dispatch_mach_send_msg(dispatch_mach_t dm, dispatch_mach_msg_t dmsg, dispatch_continuation_t dc_wait, mach_msg_option_t options) { dispatch_mach_send_refs_t dmsr = dm->dm_send_refs; - if (slowpath(dmsg->do_next != DISPATCH_OBJECT_LISTLESS)) { + if (unlikely(dmsg->do_next != DISPATCH_OBJECT_LISTLESS)) { DISPATCH_CLIENT_CRASH(dmsg->do_next, "Message already enqueued"); } + options = _dispatch_mach_send_msg_prepare(dm, dmsg, options); dispatch_retain(dmsg); - pthread_priority_t msg_pp; - dispatch_qos_t qos = _dispatch_mach_priority_propagate(options, &msg_pp); - options |= _dispatch_mach_send_options(); - dmsg->dmsg_options = options; + dispatch_qos_t qos = _dispatch_qos_from_pp(dmsg->dmsg_priority); mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg); dmsg->dmsg_reply = _dispatch_mach_msg_get_reply_port(dmsg); bool is_reply = (MACH_MSGH_BITS_REMOTE(msg->msgh_bits) == MACH_MSG_TYPE_MOVE_SEND_ONCE); - dmsg->dmsg_priority = msg_pp; - dmsg->dmsg_voucher = _voucher_copy(); - _dispatch_voucher_debug("mach-msg[%p] set", dmsg->dmsg_voucher, dmsg); uint32_t send_status; bool returning_send_result = false; @@ -1816,7 +1761,7 @@ dispatch_mach_send(dispatch_mach_t dm, dispatch_mach_msg_t dmsg, { dispatch_assert_zero(options & DISPATCH_MACH_OPTIONS_MASK); options &= ~DISPATCH_MACH_OPTIONS_MASK; - bool returned_send_result = _dispatch_mach_send_msg(dm, dmsg, NULL,options); + bool returned_send_result = _dispatch_mach_send_msg(dm, dmsg, NULL, options); dispatch_assert(!returned_send_result); } @@ -1848,37 +1793,41 @@ _dispatch_mach_send_and_wait_for_reply(dispatch_mach_t dm, dispatch_mach_msg_t dmsg, mach_msg_option_t options, bool *returned_send_result) { + struct dispatch_mach_reply_wait_refs_s dwr_buf = { + .dwr_refs = { + .du_type = DISPATCH_MACH_TYPE_WAITER, + .dmr_ctxt = dmsg->do_ctxt, + }, + .dwr_waiter_tid = _dispatch_tid_self(), + }; + dispatch_mach_reply_wait_refs_t dwr = &dwr_buf; mach_port_t send = MACH_PORT_NULL; mach_port_t reply_port = _dispatch_mach_msg_get_reply_port(dmsg); - if (!reply_port) { + + if (likely(!reply_port)) { // use per-thread mach reply port reply_port = _dispatch_get_thread_reply_port(); mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dmsg); dispatch_assert(MACH_MSGH_BITS_LOCAL(hdr->msgh_bits) == MACH_MSG_TYPE_MAKE_SEND_ONCE); hdr->msgh_local_port = reply_port; - options |= DISPATCH_MACH_OWNED_REPLY_PORT; + dwr->dwr_refs.dmr_reply_port_owned = true; } options |= DISPATCH_MACH_WAIT_FOR_REPLY; - dispatch_mach_reply_refs_t dmr; #if DISPATCH_DEBUG - dmr = _dispatch_calloc(1, sizeof(*dmr)); -#else - struct dispatch_mach_reply_refs_s dmr_buf = { }; - dmr = &dmr_buf; + dwr = _dispatch_calloc(1, sizeof(*dwr)); + *dwr = dwr_buf; #endif struct dispatch_continuation_s dc_wait = { - .dc_flags = DISPATCH_OBJ_SYNC_WAITER_BIT, + .dc_flags = DC_FLAG_SYNC_WAITER, .dc_data = dmsg, - .dc_other = dmr, + .dc_other = &dwr->dwr_refs, .dc_priority = DISPATCH_NO_PRIORITY, .dc_voucher = DISPATCH_NO_VOUCHER, }; - dmr->dmr_ctxt = dmsg->do_ctxt; - dmr->dmr_waiter_tid = _dispatch_tid_self(); *returned_send_result = _dispatch_mach_send_msg(dm, dmsg, &dc_wait,options); - if (options & DISPATCH_MACH_OWNED_REPLY_PORT) { + if (dwr->dwr_refs.dmr_reply_port_owned) { _dispatch_clear_thread_reply_port(reply_port); if (_dispatch_use_mach_special_reply_port()) { // link special reply port to send right for remote receive right @@ -1886,9 +1835,9 @@ _dispatch_mach_send_and_wait_for_reply(dispatch_mach_t dm, send = dm->dm_send_refs->dmsr_send; } } - dmsg = _dispatch_mach_msg_reply_recv(dm, dmr, reply_port, send); + dmsg = _dispatch_mach_msg_reply_recv(dm, dwr, reply_port, send); #if DISPATCH_DEBUG - free(dmr); + free(dwr); #endif return dmsg; } @@ -1957,7 +1906,6 @@ dispatch_mach_send_with_result_and_async_reply_4libxpc(dispatch_mach_t dm, if (!reply_port) { DISPATCH_CLIENT_CRASH(0, "Reply port needed for async send with reply"); } - options |= DISPATCH_MACH_ASYNC_REPLY; bool returned_send_result = _dispatch_mach_send_msg(dm, dmsg, NULL,options); unsigned long reason = DISPATCH_MACH_NEEDS_DEFERRED_SEND; mach_error_t err = 0; @@ -1970,53 +1918,23 @@ dispatch_mach_send_with_result_and_async_reply_4libxpc(dispatch_mach_t dm, DISPATCH_NOINLINE static bool -_dispatch_mach_disconnect(dispatch_mach_t dm) +_dispatch_mach_cancel(dispatch_mach_t dm) { - dispatch_mach_send_refs_t dmsr = dm->dm_send_refs; - bool disconnected; - if (_dispatch_unote_registered(dmsr)) { - _dispatch_mach_notification_kevent_unregister(dm); - } - if (MACH_PORT_VALID(dmsr->dmsr_send)) { - _dispatch_mach_msg_disconnected(dm, MACH_PORT_NULL, dmsr->dmsr_send); - dmsr->dmsr_send = MACH_PORT_NULL; - } - if (dmsr->dmsr_checkin) { - _dispatch_mach_msg_not_sent(dm, dmsr->dmsr_checkin); - dmsr->dmsr_checkin = NULL; - } - _dispatch_unfair_lock_lock(&dm->dm_send_refs->dmsr_replies_lock); - dispatch_mach_reply_refs_t dmr, tmp; - TAILQ_FOREACH_SAFE(dmr, &dm->dm_send_refs->dmsr_replies, dmr_list, tmp) { - TAILQ_REMOVE(&dm->dm_send_refs->dmsr_replies, dmr, dmr_list); - _TAILQ_MARK_NOT_ENQUEUED(dmr, dmr_list); - if (_dispatch_unote_registered(dmr)) { - if (!_dispatch_mach_reply_kevent_unregister(dm, dmr, - DU_UNREGISTER_DISCONNECTED)) { - TAILQ_INSERT_HEAD(&dm->dm_send_refs->dmsr_replies, dmr, - dmr_list); - } - } else { - _dispatch_mach_reply_waiter_unregister(dm, dmr, - DU_UNREGISTER_DISCONNECTED); - } + bool uninstalled = dm->dm_disconnected; + if (dm->dm_send_refs->dmsr_disconnect_cnt) { + uninstalled = false; // } - disconnected = TAILQ_EMPTY(&dm->dm_send_refs->dmsr_replies); - _dispatch_unfair_lock_unlock(&dm->dm_send_refs->dmsr_replies_lock); - return disconnected; -} -static void -_dispatch_mach_cancel(dispatch_mach_t dm) -{ _dispatch_object_debug(dm, "%s", __func__); - if (!_dispatch_mach_disconnect(dm)) return; - bool uninstalled = true; - dispatch_assert(!dm->dm_uninstalled); + uint32_t duu_options = DMRU_DELETE_ACK; + if (!(_dispatch_queue_atomic_flags(dm) & DSF_NEEDS_EVENT)) { + duu_options |= DMRU_PROBE; + } - if (dm->dm_xpc_term_refs) { - uninstalled = _dispatch_unote_unregister(dm->dm_xpc_term_refs, 0); + dispatch_xpc_term_refs_t dxtr = dm->dm_xpc_term_refs; + if (dxtr && !_dispatch_unote_unregister(dxtr, duu_options)) { + uninstalled = false; } dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs; @@ -2024,45 +1942,99 @@ _dispatch_mach_cancel(dispatch_mach_t dm) if (local_port) { // handle the deferred delete case properly, similar to what // _dispatch_source_invoke2() does - dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dm->_as_dq); - if ((dqf & DSF_DEFERRED_DELETE) && !(dqf & DSF_ARMED)) { - _dispatch_source_refs_unregister(dm->_as_ds, - DU_UNREGISTER_IMMEDIATE_DELETE); - dqf = _dispatch_queue_atomic_flags(dm->_as_dq); - } else if (!(dqf & DSF_DEFERRED_DELETE) && !(dqf & DSF_DELETED)) { - _dispatch_source_refs_unregister(dm->_as_ds, 0); - dqf = _dispatch_queue_atomic_flags(dm->_as_dq); - } - if ((dqf & DSF_STATE_MASK) == DSF_DELETED) { + if (_dispatch_unote_unregister(dmrr, duu_options)) { _dispatch_mach_msg_disconnected(dm, local_port, MACH_PORT_NULL); dmrr->du_ident = 0; } else { uninstalled = false; } - } else { - _dispatch_queue_atomic_flags_set_and_clear(dm->_as_dq, DSF_DELETED, - DSF_ARMED | DSF_DEFERRED_DELETE); } - if (dm->dm_send_refs->dmsr_disconnect_cnt) { - uninstalled = false; // + if (uninstalled) { + dispatch_queue_flags_t dqf; + dqf = _dispatch_queue_atomic_flags_set_and_clear_orig(dm, + DSF_DELETED, DSF_NEEDS_EVENT); + if (unlikely(dqf & (DSF_DELETED | DSF_CANCEL_WAITER))) { + DISPATCH_CLIENT_CRASH(dqf, "Corrupt channel state"); + } + _dispatch_release_no_dispose(dm); // see _dispatch_queue_alloc() + } else { + _dispatch_queue_atomic_flags_set(dm, DSF_NEEDS_EVENT); } - if (uninstalled) dm->dm_uninstalled = uninstalled; + return uninstalled; } DISPATCH_NOINLINE static bool _dispatch_mach_reconnect_invoke(dispatch_mach_t dm, dispatch_object_t dou) { - if (!_dispatch_mach_disconnect(dm)) return false; - dispatch_mach_send_refs_t dmsr = dm->dm_send_refs; - dmsr->dmsr_checkin = dou._dc->dc_data; - dmsr->dmsr_send = (mach_port_t)dou._dc->dc_other; - _dispatch_continuation_free(dou._dc); - (void)os_atomic_dec2o(dmsr, dmsr_disconnect_cnt, relaxed); _dispatch_object_debug(dm, "%s", __func__); - _dispatch_release(dm); // - return true; + + // 1. handle the send-possible notification and checkin message + + dispatch_mach_send_refs_t dmsr = dm->dm_send_refs; + if (_dispatch_unote_registered(dmsr)) { + _dispatch_mach_notification_kevent_unregister(dm); + } + if (MACH_PORT_VALID(dmsr->dmsr_send)) { + _dispatch_mach_msg_disconnected(dm, MACH_PORT_NULL, dmsr->dmsr_send); + dmsr->dmsr_send = MACH_PORT_NULL; + } + if (dmsr->dmsr_checkin) { + _dispatch_mach_msg_not_sent(dm, dmsr->dmsr_checkin, NULL); + dmsr->dmsr_checkin = NULL; + } + dm->dm_needs_mgr = 0; + + // 2. cancel all pending replies and break out synchronous waiters + + dispatch_mach_reply_refs_t dmr, tmp; + LIST_HEAD(, dispatch_mach_reply_refs_s) replies = + LIST_HEAD_INITIALIZER(replies); + bool disconnected; + + // _dispatch_mach_reply_merge_msg is the one passing DMRU_DELETE_ACK + uint32_t dmru_options = DMRU_CANCEL | DMRU_DISCONNECTED; + if (!(_dispatch_queue_atomic_flags(dm) & DSF_NEEDS_EVENT)) { + dmru_options |= DMRU_PROBE; + } + + _dispatch_unfair_lock_lock(&dmsr->dmsr_replies_lock); + LIST_SWAP(&replies, &dmsr->dmsr_replies, + dispatch_mach_reply_refs_s, dmr_list); + LIST_FOREACH_SAFE(dmr, &replies, dmr_list, tmp) { + _LIST_MARK_NOT_ENQUEUED(dmr, dmr_list); + _dispatch_mach_reply_unregister(dm, dmr, dmru_options); + } + // any unote unregistration that fails is put back on the reply list + disconnected = LIST_EMPTY(&dmsr->dmsr_replies); + _dispatch_unfair_lock_unlock(&dmsr->dmsr_replies_lock); + + // 3. if no reply is left pending deferred deletion, finish reconnecting + + if (disconnected) { + mach_port_t dmsr_send = (mach_port_t)dou._dc->dc_other; + dispatch_mach_msg_t dmsr_checkin = dou._dc->dc_data; + + _dispatch_continuation_free(dou._dc); + if (dmsr_checkin == DM_CHECKIN_CANCELED) { + dm->dm_disconnected = true; + dmsr_checkin = NULL; + } + if (dm->dm_disconnected) { + if (MACH_PORT_VALID(dmsr_send)) { + _dispatch_mach_msg_disconnected(dm, MACH_PORT_NULL, dmsr_send); + } + if (dmsr_checkin) { + _dispatch_mach_msg_not_sent(dm, dmsr_checkin, NULL); + } + } else { + dmsr->dmsr_send = dmsr_send; + dmsr->dmsr_checkin = dmsr_checkin; + } + (void)os_atomic_dec2o(dmsr, dmsr_disconnect_cnt, relaxed); + } + return disconnected; } DISPATCH_NOINLINE @@ -2078,11 +2050,11 @@ dispatch_mach_reconnect(dispatch_mach_t dm, mach_port_t send, dmsg->dmsg_options = _dispatch_mach_checkin_options(); dmsr->dmsr_checkin_port = _dispatch_mach_msg_get_remote_port(dmsg); } else { - checkin = NULL; + if (checkin != DM_CHECKIN_CANCELED) checkin = NULL; dmsr->dmsr_checkin_port = MACH_PORT_NULL; } dispatch_continuation_t dc = _dispatch_continuation_alloc(); - dc->dc_flags = DISPATCH_OBJ_CONSUME_BIT; + dc->dc_flags = DC_FLAG_CONSUME | DC_FLAG_ALLOCATED; // actually called manually in _dispatch_mach_send_drain dc->dc_func = (void*)_dispatch_mach_reconnect_invoke; dc->dc_ctxt = dc; @@ -2090,7 +2062,6 @@ dispatch_mach_reconnect(dispatch_mach_t dm, mach_port_t send, dc->dc_other = (void*)(uintptr_t)send; dc->dc_voucher = DISPATCH_NO_VOUCHER; dc->dc_priority = DISPATCH_NO_PRIORITY; - _dispatch_retain(dm); // return _dispatch_mach_send_push(dm, dc, 0); } @@ -2099,7 +2070,7 @@ mach_port_t dispatch_mach_get_checkin_port(dispatch_mach_t dm) { dispatch_mach_send_refs_t dmsr = dm->dm_send_refs; - if (slowpath(dm->dq_atomic_flags & DSF_CANCELED)) { + if (unlikely(dm->dq_atomic_flags & DSF_CANCELED)) { return MACH_PORT_DEAD; } return dmsr->dmsr_checkin_port; @@ -2127,6 +2098,8 @@ _dispatch_mach_msg_invoke_with_mach(dispatch_mach_msg_t dmsg, dispatch_thread_set_self_t adopt_flags = DISPATCH_PRIORITY_ENFORCE| DISPATCH_VOUCHER_CONSUME|DISPATCH_VOUCHER_REPLACE; + _dispatch_trace_item_pop(dm, dmsg); + dmrr = dm->dm_recv_refs; dmsg->do_next = DISPATCH_OBJECT_LISTLESS; _dispatch_voucher_ktrace_dmsg_pop(dmsg); @@ -2139,14 +2112,14 @@ _dispatch_mach_msg_invoke_with_mach(dispatch_mach_msg_t dmsg, _dispatch_client_callout3(dmrr->dmrr_handler_ctxt, reason, dmsg, _dispatch_mach_xpc_hooks->dmxh_async_reply_handler); } else { - if (slowpath(!dm->dm_connect_handler_called)) { + if (unlikely(!dm->dm_connect_handler_called)) { _dispatch_mach_connect_invoke(dm); } if (reason == DISPATCH_MACH_MESSAGE_RECEIVED && - (_dispatch_queue_atomic_flags(dm->_as_dq) & DSF_CANCELED)) { + (_dispatch_queue_atomic_flags(dm) & DSF_CANCELED)) { // Do not deliver message received // after cancellation: _dispatch_mach_merge_msg can be preempted - // for a long time between clearing DSF_ARMED but before + // for a long time right after disarming the unote but before // enqueuing the message, allowing for cancellation to complete, // and then the message event to be delivered. // @@ -2160,7 +2133,7 @@ _dispatch_mach_msg_invoke_with_mach(dispatch_mach_msg_t dmsg, } _dispatch_perfmon_workitem_inc(); }); - _dispatch_introspection_queue_item_complete(dmsg); + _dispatch_trace_item_complete(dmsg); dispatch_release(dmsg); } @@ -2173,7 +2146,7 @@ _dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg, dispatch_thread_frame_s dtf; // hide mach channel - dispatch_mach_t dm = (dispatch_mach_t)_dispatch_thread_frame_stash(&dtf); + dispatch_mach_t dm = upcast(_dispatch_thread_frame_stash(&dtf))._dm; _dispatch_mach_msg_invoke_with_mach(dmsg, flags, dm); _dispatch_thread_frame_unstash(&dtf); } @@ -2197,10 +2170,10 @@ _dispatch_mach_barrier_invoke(dispatch_continuation_t dc, _dispatch_thread_frame_stash(&dtf); } dmrr = dm->dm_recv_refs; - DISPATCH_COMPILER_CAN_ASSUME(dc_flags & DISPATCH_OBJ_CONSUME_BIT); - _dispatch_continuation_pop_forwarded(dc, DISPATCH_NO_VOUCHER, dc_flags, { + DISPATCH_COMPILER_CAN_ASSUME(dc_flags & DC_FLAG_CONSUME); + _dispatch_continuation_pop_forwarded(dc, dc_flags, dm, { dispatch_invoke_with_autoreleasepool(flags, { - if (slowpath(!dm->dm_connect_handler_called)) { + if (unlikely(!dm->dm_connect_handler_called)) { _dispatch_mach_connect_invoke(dm); } _dispatch_client_callout(dc->dc_ctxt, dc->dc_func); @@ -2230,13 +2203,13 @@ dispatch_mach_send_barrier_f(dispatch_mach_t dm, void *context, dispatch_function_t func) { dispatch_continuation_t dc = _dispatch_continuation_alloc(); - uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_MACH_BARRIER; + uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_MACH_BARRIER; dispatch_qos_t qos; - _dispatch_continuation_init_f(dc, dm, context, func, 0, 0, dc_flags); + _dispatch_continuation_init_f(dc, dm, context, func, 0, dc_flags); _dispatch_mach_barrier_set_vtable(dc, dm, DC_VTABLE(MACH_SEND_BARRIER)); - _dispatch_trace_continuation_push(dm->_as_dq, dc); - qos = _dispatch_continuation_override_qos(dm->_as_dq, dc); + _dispatch_trace_item_push(dm, dc); + qos = _dispatch_qos_from_pp(dc->dc_priority); return _dispatch_mach_send_push(dm, dc, qos); } @@ -2245,13 +2218,13 @@ void dispatch_mach_send_barrier(dispatch_mach_t dm, dispatch_block_t barrier) { dispatch_continuation_t dc = _dispatch_continuation_alloc(); - uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_MACH_BARRIER; + uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_MACH_BARRIER; dispatch_qos_t qos; - _dispatch_continuation_init(dc, dm, barrier, 0, 0, dc_flags); + _dispatch_continuation_init(dc, dm, barrier, 0, dc_flags); _dispatch_mach_barrier_set_vtable(dc, dm, DC_VTABLE(MACH_SEND_BARRIER)); - _dispatch_trace_continuation_push(dm->_as_dq, dc); - qos = _dispatch_continuation_override_qos(dm->_as_dq, dc); + _dispatch_trace_item_push(dm, dc); + qos = _dispatch_qos_from_pp(dc->dc_priority); return _dispatch_mach_send_push(dm, dc, qos); } @@ -2261,11 +2234,12 @@ dispatch_mach_receive_barrier_f(dispatch_mach_t dm, void *context, dispatch_function_t func) { dispatch_continuation_t dc = _dispatch_continuation_alloc(); - uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_MACH_BARRIER; + uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_MACH_BARRIER; + dispatch_qos_t qos; - _dispatch_continuation_init_f(dc, dm, context, func, 0, 0, dc_flags); + qos = _dispatch_continuation_init_f(dc, dm, context, func, 0, dc_flags); _dispatch_mach_barrier_set_vtable(dc, dm, DC_VTABLE(MACH_RECV_BARRIER)); - return _dispatch_continuation_async(dm->_as_dq, dc); + return _dispatch_continuation_async(dm, dc, qos, dc_flags); } DISPATCH_NOINLINE @@ -2273,11 +2247,12 @@ void dispatch_mach_receive_barrier(dispatch_mach_t dm, dispatch_block_t barrier) { dispatch_continuation_t dc = _dispatch_continuation_alloc(); - uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_MACH_BARRIER; + uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_MACH_BARRIER; + dispatch_qos_t qos; - _dispatch_continuation_init(dc, dm, barrier, 0, 0, dc_flags); + qos = _dispatch_continuation_init(dc, dm, barrier, 0, dc_flags); _dispatch_mach_barrier_set_vtable(dc, dm, DC_VTABLE(MACH_RECV_BARRIER)); - return _dispatch_continuation_async(dm->_as_dq, dc); + return _dispatch_continuation_async(dm, dc, qos, dc_flags); } DISPATCH_NOINLINE @@ -2287,7 +2262,7 @@ _dispatch_mach_cancel_invoke(dispatch_mach_t dm, dispatch_invoke_flags_t flags) dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs; dispatch_invoke_with_autoreleasepool(flags, { - if (slowpath(!dm->dm_connect_handler_called)) { + if (unlikely(!dm->dm_connect_handler_called)) { _dispatch_mach_connect_invoke(dm); } _dispatch_client_callout4(dmrr->dmrr_handler_ctxt, @@ -2295,121 +2270,122 @@ _dispatch_mach_cancel_invoke(dispatch_mach_t dm, dispatch_invoke_flags_t flags) _dispatch_perfmon_workitem_inc(); }); dm->dm_cancel_handler_called = 1; - _dispatch_release(dm); // the retain is done at creation time } DISPATCH_NOINLINE void dispatch_mach_cancel(dispatch_mach_t dm) { - dispatch_source_cancel(dm->_as_ds); + dispatch_queue_flags_t dqf; + + _dispatch_object_debug(dm, "%s", __func__); + // similar race to dispatch_source_cancel + // Once we set the DSF_CANCELED bit, anyone can notice and finish the + // unregistration causing use after free in dispatch_mach_reconnect() below. + _dispatch_retain(dm); + dqf = _dispatch_queue_atomic_flags_set_orig(dm, DSF_CANCELED); + if (!(dqf & DSF_CANCELED)) { + dispatch_mach_reconnect(dm, MACH_PORT_NULL, DM_CHECKIN_CANCELED); + } + _dispatch_release_tailcall(dm); } static void _dispatch_mach_install(dispatch_mach_t dm, dispatch_wlh_t wlh, dispatch_priority_t pri) { + bool cancelled = (_dispatch_queue_atomic_flags(dm) & DSF_CANCELED); dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs; - uint32_t disconnect_cnt; - if (dmrr->du_ident) { - _dispatch_source_refs_register(dm->_as_ds, wlh, pri); + dispatch_assert(!dm->ds_is_installed); + dm->ds_is_installed = true; + + if (!cancelled && dmrr->du_ident) { + (void)_dispatch_unote_register(dmrr, wlh, pri); dispatch_assert(dmrr->du_is_direct); } - if (dm->dm_is_xpc) { - bool monitor_sigterm; - if (_dispatch_mach_xpc_hooks->version < 3) { - monitor_sigterm = true; - } else if (!_dispatch_mach_xpc_hooks->dmxh_enable_sigterm_notification){ - monitor_sigterm = true; - } else { - monitor_sigterm = - _dispatch_mach_xpc_hooks->dmxh_enable_sigterm_notification( - dm->dm_recv_refs->dmrr_handler_ctxt); - } - if (monitor_sigterm) { - dispatch_xpc_term_refs_t _dxtr = - dux_create(&_dispatch_xpc_type_sigterm, SIGTERM, 0)._dxtr; - _dxtr->du_owner_wref = _dispatch_ptr2wref(dm); - dm->dm_xpc_term_refs = _dxtr; - _dispatch_unote_register(dm->dm_xpc_term_refs, wlh, pri); - } + if (!cancelled && dm->dm_is_xpc && + _dispatch_mach_xpc_hooks->dmxh_enable_sigterm_notification( + dmrr->dmrr_handler_ctxt)) { + dispatch_xpc_term_refs_t _dxtr = + dux_create(&_dispatch_xpc_type_sigterm, SIGTERM, 0)._dxtr; + _dxtr->du_owner_wref = _dispatch_ptr2wref(dm); + dm->dm_xpc_term_refs = _dxtr; + _dispatch_unote_register(dm->dm_xpc_term_refs, wlh, pri); } if (!dm->dq_priority) { // _dispatch_mach_reply_kevent_register assumes this has been done - // which is unlike regular sources or queues, the DEFAULTQUEUE flag + // which is unlike regular sources or queues, the FALLBACK flag // is used so that the priority of the channel doesn't act as // a QoS floor for incoming messages (26761457) dm->dq_priority = pri; } - dm->ds_is_installed = true; - if (unlikely(!os_atomic_cmpxchgv2o(dm->dm_send_refs, dmsr_disconnect_cnt, - DISPATCH_MACH_NEVER_INSTALLED, 0, &disconnect_cnt, release))) { - DISPATCH_INTERNAL_CRASH(disconnect_cnt, "Channel already installed"); + + uint32_t disconnect_cnt = os_atomic_load2o(dm->dm_send_refs, + dmsr_disconnect_cnt, relaxed); + if (unlikely(disconnect_cnt & DISPATCH_MACH_NEVER_CONNECTED)) { + DISPATCH_CLIENT_CRASH(disconnect_cnt, "Channel never connected"); } } void -_dispatch_mach_finalize_activation(dispatch_mach_t dm, bool *allow_resume) +_dispatch_mach_activate(dispatch_mach_t dm, bool *allow_resume) { dispatch_priority_t pri; dispatch_wlh_t wlh; // call "super" - _dispatch_queue_finalize_activation(dm->_as_dq, allow_resume); + _dispatch_lane_activate(dm, allow_resume); if (!dm->ds_is_installed) { - pri = _dispatch_queue_compute_priority_and_wlh(dm->_as_dq, &wlh); + pri = _dispatch_queue_compute_priority_and_wlh(dm, &wlh); if (pri) _dispatch_mach_install(dm, wlh, pri); } } -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_mach_tryarm(dispatch_mach_t dm, dispatch_queue_flags_t *out_dqf) -{ - dispatch_queue_flags_t oqf, nqf; - bool rc = os_atomic_rmw_loop2o(dm, dq_atomic_flags, oqf, nqf, relaxed, { - nqf = oqf; - if (nqf & (DSF_ARMED | DSF_CANCELED | DSF_DEFERRED_DELETE | - DSF_DELETED)) { - // the test is inside the loop because it's convenient but the - // result should not change for the duration of the rmw_loop - os_atomic_rmw_loop_give_up(break); - } - nqf |= DSF_ARMED; - }); - if (out_dqf) *out_dqf = nqf; - return rc; +DISPATCH_NOINLINE +static void +_dispatch_mach_handle_wlh_change(dispatch_mach_t dm) +{ + dispatch_queue_flags_t dqf; + + dqf = _dispatch_queue_atomic_flags_set_orig(dm, DSF_WLH_CHANGED); + if (!(dqf & DQF_MUTABLE)) { + if (dm->dm_is_xpc) { + DISPATCH_CLIENT_CRASH(0, "Changing target queue " + "hierarchy after xpc connection was activated"); + } else { + DISPATCH_CLIENT_CRASH(0, "Changing target queue " + "hierarchy after mach channel was connected"); + } + } + if (!(dqf & DSF_WLH_CHANGED)) { + if (dm->dm_is_xpc) { + _dispatch_bug_deprecated("Changing target queue " + "hierarchy after xpc connection was activated"); + } else { + _dispatch_bug_deprecated("Changing target queue " + "hierarchy after mach channel was connected"); + } + } } DISPATCH_ALWAYS_INLINE static inline dispatch_queue_wakeup_target_t -_dispatch_mach_invoke2(dispatch_object_t dou, +_dispatch_mach_invoke2(dispatch_mach_t dm, dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, uint64_t *owned) { - dispatch_mach_t dm = dou._dm; dispatch_queue_wakeup_target_t retq = NULL; dispatch_queue_t dq = _dispatch_queue_get_current(); dispatch_mach_send_refs_t dmsr = dm->dm_send_refs; dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs; - dispatch_queue_flags_t dqf = 0; - - if (!(flags & DISPATCH_INVOKE_MANAGER_DRAIN) && dmrr && - _dispatch_unote_wlh_changed(dmrr, _dispatch_get_wlh())) { - dqf = _dispatch_queue_atomic_flags_set_orig(dm->_as_dq, - DSF_WLH_CHANGED); - if (!(dqf & DSF_WLH_CHANGED)) { - if (dm->dm_is_xpc) { - _dispatch_bug_deprecated("Changing target queue " - "hierarchy after xpc connection was activated"); - } else { - _dispatch_bug_deprecated("Changing target queue " - "hierarchy after mach channel was activated"); - } - } + dispatch_queue_flags_t dqf; + + if (unlikely(!(flags & DISPATCH_INVOKE_MANAGER_DRAIN) && dmrr && + _dispatch_unote_wlh_changed(dmrr, _dispatch_get_event_wlh()))) { + _dispatch_mach_handle_wlh_change(dm); } // This function performs all mach channel actions. Each action is @@ -2425,76 +2401,81 @@ _dispatch_mach_invoke2(dispatch_object_t dou, if (unlikely(flags & DISPATCH_INVOKE_MANAGER_DRAIN)) { return dm->do_targetq; } - _dispatch_mach_install(dm, _dispatch_get_wlh(),_dispatch_get_basepri()); + dispatch_priority_t pri = DISPATCH_PRIORITY_FLAG_MANAGER; + if (likely(flags & DISPATCH_INVOKE_WORKER_DRAIN)) { + pri = _dispatch_get_basepri(); + } + _dispatch_mach_install(dm, _dispatch_get_event_wlh(), pri); _dispatch_perfmon_workitem_inc(); } if (_dispatch_queue_class_probe(dm)) { if (dq == dm->do_targetq) { drain: - retq = _dispatch_queue_serial_drain(dm->_as_dq, dic, flags, owned); + retq = _dispatch_lane_serial_drain(dm, dic, flags, owned); } else { retq = dm->do_targetq; } } - if (!retq && _dispatch_unote_registered(dmrr)) { - if (_dispatch_mach_tryarm(dm, &dqf)) { - _dispatch_unote_resume(dmrr); - if (dq == dm->do_targetq && !dq->do_targetq && !dmsr->dmsr_tail && - (dq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) && - _dispatch_wlh_should_poll_unote(dmrr)) { - // try to redrive the drain from under the lock for channels - // targeting an overcommit root queue to avoid parking - // when the next message has already fired - _dispatch_event_loop_drain(KEVENT_FLAG_IMMEDIATE); - if (dm->dq_items_tail) goto drain; - } + dqf = _dispatch_queue_atomic_flags(dm); + if (!retq && !(dqf & DSF_CANCELED) && _dispatch_unote_needs_rearm(dmrr)) { + _dispatch_unote_resume(dmrr); + if (dq == dm->do_targetq && !dq->do_targetq && !dmsr->dmsr_tail && + (dq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) && + _dispatch_wlh_should_poll_unote(dmrr)) { + // try to redrive the drain from under the lock for channels + // targeting an overcommit root queue to avoid parking + // when the next message has already fired + _dispatch_event_loop_drain(KEVENT_FLAG_IMMEDIATE); + if (dm->dq_items_tail) goto drain; } - } else { - dqf = _dispatch_queue_atomic_flags(dm->_as_dq); + dqf = _dispatch_queue_atomic_flags(dm); } if (dmsr->dmsr_tail) { - bool requires_mgr = dm->dm_needs_mgr || (dmsr->dmsr_disconnect_cnt && - _dispatch_unote_registered(dmsr)); - if (!os_atomic_load2o(dmsr, dmsr_notification_armed, relaxed) || - (dqf & DSF_CANCELED) || dmsr->dmsr_disconnect_cnt) { + if (!dmsr->dmsr_notification_armed || dmsr->dmsr_disconnect_cnt) { + bool requires_mgr = dmsr->dmsr_disconnect_cnt ? + _dispatch_unote_registered(dmsr) : dm->dm_needs_mgr; // The channel has pending messages to send. - if (unlikely(requires_mgr && dq != &_dispatch_mgr_q)) { - return retq ? retq : &_dispatch_mgr_q; + if (unlikely(requires_mgr && dq != _dispatch_mgr_q._as_dq)) { + return retq ? retq : _dispatch_mgr_q._as_dq; } dispatch_mach_send_invoke_flags_t send_flags = DM_SEND_INVOKE_NONE; - if (dq != &_dispatch_mgr_q) { + if (dq != _dispatch_mgr_q._as_dq) { send_flags |= DM_SEND_INVOKE_CAN_RUN_BARRIER; } _dispatch_mach_send_invoke(dm, flags, send_flags); + if (!retq && dm->dq_items_tail) { + retq = dm->do_targetq; + } } - if (!retq) retq = DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT; - } else if (!retq && (dqf & DSF_CANCELED)) { + if (!retq && dmsr->dmsr_tail) { + retq = DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT; + } + } + + if (dqf & DSF_CANCELED) { // The channel has been cancelled and needs to be uninstalled from the - // manager queue. After uninstallation, the cancellation handler needs - // to be delivered to the target queue. - if (!dm->dm_uninstalled) { - if ((dqf & DSF_STATE_MASK) == (DSF_ARMED | DSF_DEFERRED_DELETE)) { - // waiting for the delivery of a deferred delete event - return retq ? retq : DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT; - } - if (dq != &_dispatch_mgr_q) { - return retq ? retq : &_dispatch_mgr_q; - } - _dispatch_mach_send_invoke(dm, flags, DM_SEND_INVOKE_CANCEL); - if (unlikely(!dm->dm_uninstalled)) { - // waiting for the delivery of a deferred delete event - // or deletion didn't happen because send_invoke couldn't - // acquire the send lock - return retq ? retq : DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT; - } + // manager queue. + if (!(dqf & DSF_DELETED) && !_dispatch_mach_cancel(dm)) { + // waiting for the delivery of a deferred delete event + return retq ? retq : DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT; } + + // After uninstallation, the cancellation handler needs to be delivered + // to the target queue, but not before we drained all messages from the + // receive queue. if (!dm->dm_cancel_handler_called) { if (dq != dm->do_targetq) { return retq ? retq : dm->do_targetq; } + if (DISPATCH_QUEUE_IS_SUSPENDED(dm)) { + return dm->do_targetq; + } + if (_dispatch_queue_class_probe(dm)) { + goto drain; + } _dispatch_mach_cancel_invoke(dm, flags); } } @@ -2520,7 +2501,7 @@ _dispatch_mach_wakeup(dispatch_mach_t dm, dispatch_qos_t qos, dispatch_mach_send_refs_t dmsr = dm->dm_send_refs; dispatch_queue_wakeup_target_t tq = DISPATCH_QUEUE_WAKEUP_NONE; - dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dm->_as_dq); + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dm); if (!dm->ds_is_installed) { // The channel needs to be installed on the kevent queue. @@ -2533,52 +2514,47 @@ _dispatch_mach_wakeup(dispatch_mach_t dm, dispatch_qos_t qos, goto done; } - if (_dispatch_lock_is_locked(dmsr->dmsr_state_lock.dul_lock)) { - // Sending and uninstallation below require the send lock, the channel - // will be woken up when the lock is dropped - goto done; - } - if (dmsr->dmsr_tail) { - bool requires_mgr = dm->dm_needs_mgr || (dmsr->dmsr_disconnect_cnt && - _dispatch_unote_registered(dmsr)); - if (!os_atomic_load2o(dmsr, dmsr_notification_armed, relaxed) || - (dqf & DSF_CANCELED) || dmsr->dmsr_disconnect_cnt) { + if (_dispatch_lock_is_locked(dmsr->dmsr_state_lock.dul_lock)) { + // Sending require the send lock, the channel will be woken up + // when the lock is dropped + goto done; + } + + if (!dmsr->dmsr_notification_armed || dmsr->dmsr_disconnect_cnt) { + bool requires_mgr = dmsr->dmsr_disconnect_cnt ? + _dispatch_unote_registered(dmsr) : dm->dm_needs_mgr; if (unlikely(requires_mgr)) { tq = DISPATCH_QUEUE_WAKEUP_MGR; } else { tq = DISPATCH_QUEUE_WAKEUP_TARGET; } } - } else if (dqf & DSF_CANCELED) { - if (!dm->dm_uninstalled) { - if ((dqf & DSF_STATE_MASK) == (DSF_ARMED | DSF_DEFERRED_DELETE)) { - // waiting for the delivery of a deferred delete event - } else { - // The channel needs to be uninstalled from the manager queue - tq = DISPATCH_QUEUE_WAKEUP_MGR; - } - } else if (!dm->dm_cancel_handler_called) { - // the cancellation handler needs to be delivered to the target - // queue. - tq = DISPATCH_QUEUE_WAKEUP_TARGET; - } + } else if ((dqf & DSF_CANCELED) && (dqf & DSF_NEEDS_EVENT) && + !(flags & DISPATCH_WAKEUP_EVENT)) { + // waiting for the delivery of a deferred delete event + } else if ((dqf & DSF_CANCELED) && !dm->dm_cancel_handler_called) { + // The channel needs to be cancelled and the cancellation handler + // needs to be delivered to the target queue. + tq = DISPATCH_QUEUE_WAKEUP_TARGET; } done: if ((tq == DISPATCH_QUEUE_WAKEUP_TARGET) && - dm->do_targetq == &_dispatch_mgr_q) { + dm->do_targetq == _dispatch_mgr_q._as_dq) { tq = DISPATCH_QUEUE_WAKEUP_MGR; } - return _dispatch_queue_class_wakeup(dm->_as_dq, qos, flags, tq); + return _dispatch_queue_wakeup(dm, qos, flags, tq); } static void _dispatch_mach_sigterm_invoke(void *ctx) { dispatch_mach_t dm = ctx; - if (!(dm->dq_atomic_flags & DSF_CANCELED)) { + uint32_t duu_options = DUU_DELETE_ACK | DUU_MUST_SUCCEED; + _dispatch_unote_unregister(dm->dm_xpc_term_refs, duu_options); + if (!(_dispatch_queue_atomic_flags(dm) & DSF_CANCELED)) { dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs; _dispatch_client_callout4(dmrr->dmrr_handler_ctxt, DISPATCH_MACH_SIGTERM_RECEIVED, NULL, 0, @@ -2587,27 +2563,15 @@ _dispatch_mach_sigterm_invoke(void *ctx) } void -_dispatch_xpc_sigterm_merge(dispatch_unote_t du, +_dispatch_xpc_sigterm_merge_evt(dispatch_unote_t du, uint32_t flags DISPATCH_UNUSED, uintptr_t data DISPATCH_UNUSED, - uintptr_t status DISPATCH_UNUSED, pthread_priority_t pp) + pthread_priority_t pp) { dispatch_mach_t dm = _dispatch_wref2ptr(du._du->du_owner_wref); - uint32_t options = 0; - if ((flags & EV_UDATA_SPECIFIC) && (flags & EV_ONESHOT) && - !(flags & EV_DELETE)) { - options = DU_UNREGISTER_IMMEDIATE_DELETE; - } else { - dispatch_assert((flags & EV_ONESHOT) && (flags & EV_DELETE)); - options = DU_UNREGISTER_ALREADY_DELETED; - } - _dispatch_unote_unregister(du, options); - if (!(dm->dq_atomic_flags & DSF_CANCELED)) { - _dispatch_barrier_async_detached_f(dm->_as_dq, dm, - _dispatch_mach_sigterm_invoke); - } else { - dx_wakeup(dm, _dispatch_qos_from_pp(pp), DISPATCH_WAKEUP_MAKE_DIRTY); - } + _dispatch_barrier_async_detached_f(dm, dm, _dispatch_mach_sigterm_invoke); + dx_wakeup(dm, _dispatch_qos_from_pp(pp), DISPATCH_WAKEUP_EVENT | + DISPATCH_WAKEUP_CONSUME_2 | DISPATCH_WAKEUP_MAKE_DIRTY); } #pragma mark - @@ -2617,8 +2581,7 @@ dispatch_mach_msg_t dispatch_mach_msg_create(mach_msg_header_t *msg, size_t size, dispatch_mach_msg_destructor_t destructor, mach_msg_header_t **msg_ptr) { - if (slowpath(size < sizeof(mach_msg_header_t)) || - slowpath(destructor && !msg)) { + if (unlikely(size < sizeof(mach_msg_header_t) || (destructor && !msg))) { DISPATCH_CLIENT_CRASH(size, "Empty message"); } @@ -2636,7 +2599,7 @@ dispatch_mach_msg_create(mach_msg_header_t *msg, size_t size, memcpy(dmsg->dmsg_buf, msg, size); } dmsg->do_next = DISPATCH_OBJECT_LISTLESS; - dmsg->do_targetq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false); + dmsg->do_targetq = _dispatch_get_default_queue(false); dmsg->dmsg_destructor = destructor; dmsg->dmsg_size = size; if (msg_ptr) { @@ -2689,7 +2652,7 @@ _dispatch_mach_msg_debug(dispatch_mach_msg_t dmsg, char* buf, size_t bufsiz) { size_t offset = 0; offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", - dx_kind(dmsg), dmsg); + _dispatch_object_class_name(dmsg), dmsg); offset += _dispatch_object_debug_attr(dmsg, buf + offset, bufsiz - offset); offset += dsnprintf(&buf[offset], bufsiz - offset, "opts/err = 0x%x, " "msgh[%p] = { ", dmsg->dmsg_options, dmsg->dmsg_buf); @@ -2732,11 +2695,7 @@ DISPATCH_ALWAYS_INLINE static dispatch_queue_t _dispatch_mach_msg_context_async_reply_queue(void *msg_context) { - if (DISPATCH_MACH_XPC_SUPPORTS_ASYNC_REPLIES(_dispatch_mach_xpc_hooks)) { - return _dispatch_mach_xpc_hooks->dmxh_msg_context_reply_queue( - msg_context); - } - return NULL; + return _dispatch_mach_xpc_hooks->dmxh_msg_context_reply_queue(msg_context); } static dispatch_continuation_t @@ -2794,17 +2753,11 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, bufRequest = alloca(rcv_size); bufRequest->RetCode = 0; - for (mach_vm_address_t p = mach_vm_trunc_page(bufRequest + vm_page_size); - p < (mach_vm_address_t)bufRequest + rcv_size; p += vm_page_size) { - *(char*)p = 0; // ensure alloca buffer doesn't overlap with stack guard - } + _dispatch_mach_stack_probe(bufRequest, rcv_size); bufReply = alloca(rcv_size); bufReply->Head.msgh_size = 0; - for (mach_vm_address_t p = mach_vm_trunc_page(bufReply + vm_page_size); - p < (mach_vm_address_t)bufReply + rcv_size; p += vm_page_size) { - *(char*)p = 0; // ensure alloca buffer doesn't overlap with stack guard - } + _dispatch_mach_stack_probe(bufReply, rcv_size); #if DISPATCH_DEBUG options |= MACH_RCV_LARGE; // rdar://problem/8422992 @@ -2825,7 +2778,7 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, tmp_options = options; - if (slowpath(kr)) { + if (unlikely(kr)) { switch (kr) { case MACH_SEND_INVALID_DEST: case MACH_SEND_TIMED_OUT: @@ -2862,6 +2815,9 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, "requested size %zd: id = 0x%x, size = %d", maxmsgsz, bufReply->Head.msgh_id, bufReply->Head.msgh_size); + if (bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) { + mach_msg_destroy(&bufReply->Head); + } } if (large_buf) { free(large_buf); @@ -2898,12 +2854,13 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, #pragma clang diagnostic ignored "-Wdeprecated-declarations" int r = proc_importance_assertion_begin_with_msg(&bufRequest->Head, NULL, &assertion_token); - if (r && slowpath(r != EIO)) { + if (r && r != EIO) { (void)dispatch_assume_zero(r); } #pragma clang diagnostic pop #endif _voucher_replace(voucher_create_with_mach_msg(&bufRequest->Head)); + bufReply->Head = (mach_msg_header_t){ }; demux_success = callback(&bufRequest->Head, &bufReply->Head); if (!demux_success) { @@ -2913,7 +2870,7 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, } else if (!(bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX)) { // if MACH_MSGH_BITS_COMPLEX is _not_ set, then bufReply->RetCode // is present - if (slowpath(bufReply->RetCode)) { + if (unlikely(bufReply->RetCode)) { if (bufReply->RetCode == MIG_NO_REPLY) { continue; } @@ -2944,6 +2901,111 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, return kr; } +#pragma mark - +#pragma mark dispatch_mach_mig_demux + +static char const * const +_dispatch_mach_mig_demux_context_key = "mach_mig_demux"; + +static const mig_routine_descriptor * +_dispatch_mach_mig_resolve(mach_msg_id_t msgid, + const struct mig_subsystem *const subsystems[], size_t count) +{ + const mig_routine_descriptor *desc; + + for (size_t i = 0; i < count; i++) { + if (subsystems[i]->start <= msgid && msgid < subsystems[i]->end) { + desc = &subsystems[i]->routine[msgid - subsystems[i]->start]; + return desc->stub_routine ? desc : NULL; + } + } + return NULL; +} + +bool +dispatch_mach_mig_demux(void *context, + const struct mig_subsystem *const subsystems[], size_t count, + dispatch_mach_msg_t dmsg) +{ + dispatch_thread_context_s dmmd_ctx = { + .dtc_key = _dispatch_mach_mig_demux_context_key, + .dtc_mig_demux_ctx = context, + }; + mach_msg_header_t *hdr = dispatch_mach_msg_get_msg(dmsg, NULL); + mach_msg_id_t msgid = hdr->msgh_id; + const mig_routine_descriptor *desc; + mig_reply_error_t *bufReply; + mach_msg_size_t reply_size; + kern_return_t kr; + + desc = _dispatch_mach_mig_resolve(msgid, subsystems, count); + if (!desc) return false; + + _dispatch_thread_context_push(&dmmd_ctx); + + reply_size = desc->max_reply_msg + MAX_TRAILER_SIZE; + bufReply = alloca(reply_size); + _dispatch_mach_stack_probe(bufReply, reply_size); + bufReply->Head = (mach_msg_header_t){ + .msgh_bits = MACH_MSGH_BITS(MACH_MSGH_BITS_REMOTE(hdr->msgh_bits), 0), + .msgh_remote_port = hdr->msgh_remote_port, + .msgh_size = sizeof(mig_reply_error_t), + .msgh_id = msgid + 100, + }; + + desc->stub_routine(hdr, &bufReply->Head); + + // if MACH_MSGH_BITS_COMPLEX is _not_ set, then bufReply->RetCode is present + if (unlikely(!(bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) && + bufReply->RetCode)) { + // destroy the request - but not the reply port + hdr->msgh_remote_port = 0; + if (bufReply->RetCode != MIG_NO_REPLY && + (hdr->msgh_bits & MACH_MSGH_BITS_COMPLEX)) { + mach_msg_destroy(hdr); + } + } + + if (bufReply->Head.msgh_remote_port) { + mach_msg_option_t options = MACH_SEND_MSG; + if (MACH_MSGH_BITS_REMOTE(bufReply->Head.msgh_bits) != + MACH_MSG_TYPE_MOVE_SEND_ONCE) { + options |= MACH_SEND_TIMEOUT; + } + kr = mach_msg(&bufReply->Head, options, bufReply->Head.msgh_size, + 0, MACH_PORT_NULL, 0, MACH_PORT_NULL); + switch (kr) { + case KERN_SUCCESS: + break; + case MACH_SEND_INVALID_DEST: + case MACH_SEND_TIMED_OUT: + if (bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) { + mach_msg_destroy(&bufReply->Head); + } + break; + default: + DISPATCH_VERIFY_MIG(kr); + DISPATCH_CLIENT_CRASH(kr, + "dispatch_mach_mig_demux: mach_msg(MACH_SEND_MSG) failed"); + } + } + + _dispatch_thread_context_pop(&dmmd_ctx); + return true; +} + +void * +dispatch_mach_mig_demux_get_context(void) +{ + dispatch_thread_context_t dtc; + dtc = _dispatch_thread_context_find(_dispatch_mach_mig_demux_context_key); + if (unlikely(dtc == NULL)) { + DISPATCH_CLIENT_CRASH(0, "dispatch_mach_mig_demux_get_context " + "not called from dispatch_mach_mig_demux context"); + } + return dtc->dtc_mig_demux_ctx; +} + #pragma mark - #pragma mark dispatch_mach_debug @@ -2972,7 +3034,7 @@ _dispatch_mach_debug(dispatch_mach_t dm, char* buf, size_t bufsiz) size_t offset = 0; offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", dm->dq_label && !dm->dm_cancel_handler_called ? dm->dq_label : - dx_kind(dm), dm); + _dispatch_object_class_name(dm), dm); offset += _dispatch_object_debug_attr(dm, &buf[offset], bufsiz - offset); offset += _dispatch_mach_debug_attr(dm, &buf[offset], bufsiz - offset); offset += dsnprintf(&buf[offset], bufsiz - offset, "}"); diff --git a/src/mach_internal.h b/src/mach_internal.h index 8c8edd8d3..b1e959c89 100644 --- a/src/mach_internal.h +++ b/src/mach_internal.h @@ -55,16 +55,15 @@ enum { DISPATCH_MACH_RECV_MESSAGE = 0x2, }; +DISPATCH_CLASS_DECL(mach, QUEUE); +DISPATCH_CLASS_DECL(mach_msg, OBJECT); -DISPATCH_CLASS_DECL(mach); -DISPATCH_CLASS_DECL(mach_msg); - -#ifndef __cplusplus struct dispatch_mach_s { - DISPATCH_SOURCE_HEADER(mach); + DISPATCH_SOURCE_CLASS_HEADER(mach); dispatch_mach_send_refs_t dm_send_refs; dispatch_xpc_term_refs_t dm_xpc_term_refs; } DISPATCH_ATOMIC64_ALIGN; +dispatch_assert_valid_lane_type(dispatch_mach_s); struct dispatch_mach_msg_s { DISPATCH_OBJECT_HEADER(mach_msg); @@ -91,30 +90,29 @@ _dispatch_mach_xref_dispose(struct dispatch_mach_s *dm) dm->dm_recv_refs->dmrr_handler_ctxt = (void *)0xbadfeed; } } -#endif // __cplusplus -dispatch_source_t -_dispatch_source_create_mach_msg_direct_recv(mach_port_t recvp, - const struct dispatch_continuation_s *dc); +extern dispatch_mach_xpc_hooks_t _dispatch_mach_xpc_hooks; +extern const struct dispatch_mach_xpc_hooks_s _dispatch_mach_xpc_hooks_default; void _dispatch_mach_msg_async_reply_invoke(dispatch_continuation_t dc, dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); void _dispatch_mach_dispose(dispatch_mach_t dm, bool *allow_free); -void _dispatch_mach_finalize_activation(dispatch_mach_t dm, bool *allow_resume); +void _dispatch_mach_activate(dispatch_mach_t dm, bool *allow_resume); void _dispatch_mach_invoke(dispatch_mach_t dm, dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); void _dispatch_mach_wakeup(dispatch_mach_t dm, dispatch_qos_t qos, dispatch_wakeup_flags_t flags); size_t _dispatch_mach_debug(dispatch_mach_t dm, char* buf, size_t bufsiz); -void _dispatch_mach_merge_notification(dispatch_unote_t du, - uint32_t flags, uintptr_t data, uintptr_t status, - pthread_priority_t pp); +void _dispatch_mach_notification_merge_evt(dispatch_unote_t du, + uint32_t flags, uintptr_t data, pthread_priority_t pp); void _dispatch_mach_merge_msg(dispatch_unote_t du, uint32_t flags, - mach_msg_header_t *msg, mach_msg_size_t msgsz); + mach_msg_header_t *msg, mach_msg_size_t msgsz, + pthread_priority_t msg_pp, pthread_priority_t ovr_pp); void _dispatch_mach_reply_merge_msg(dispatch_unote_t du, uint32_t flags, - mach_msg_header_t *msg, mach_msg_size_t msgsz); -void _dispatch_xpc_sigterm_merge(dispatch_unote_t du, uint32_t flags, - uintptr_t data, uintptr_t status, pthread_priority_t pp); + mach_msg_header_t *msg, mach_msg_size_t msgsz, + pthread_priority_t msg_pp, pthread_priority_t ovr_pp); +void _dispatch_xpc_sigterm_merge_evt(dispatch_unote_t du, uint32_t flags, + uintptr_t data, pthread_priority_t pp); void _dispatch_mach_msg_dispose(dispatch_mach_msg_t dmsg, bool *allow_free); void _dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg, diff --git a/src/object.c b/src/object.c index 86d100507..261e1996d 100644 --- a/src/object.c +++ b/src/object.c @@ -27,7 +27,7 @@ unsigned long _os_object_retain_count(_os_object_t obj) { int xref_cnt = obj->os_obj_xref_cnt; - if (slowpath(xref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) { + if (unlikely(xref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) { return ULONG_MAX; // global object } return (unsigned long)(xref_cnt + 1); @@ -65,8 +65,8 @@ DISPATCH_NOINLINE _os_object_t _os_object_retain(_os_object_t obj) { - int xref_cnt = _os_object_xrefcnt_inc(obj); - if (slowpath(xref_cnt <= 0)) { + int xref_cnt = _os_object_xrefcnt_inc_orig(obj); + if (unlikely(xref_cnt < 0)) { _OS_OBJECT_CLIENT_CRASH("Resurrection of an object"); } return obj; @@ -76,11 +76,11 @@ DISPATCH_NOINLINE _os_object_t _os_object_retain_with_resurrect(_os_object_t obj) { - int xref_cnt = _os_object_xrefcnt_inc(obj); - if (slowpath(xref_cnt < 0)) { + int xref_cnt = _os_object_xrefcnt_inc_orig(obj) + 1; + if (unlikely(xref_cnt < 0)) { _OS_OBJECT_CLIENT_CRASH("Resurrection of an over-released object"); } - if (slowpath(xref_cnt == 0)) { + if (unlikely(xref_cnt == 0)) { _os_object_retain_internal(obj); } return obj; @@ -91,10 +91,10 @@ void _os_object_release(_os_object_t obj) { int xref_cnt = _os_object_xrefcnt_dec(obj); - if (fastpath(xref_cnt >= 0)) { + if (likely(xref_cnt >= 0)) { return; } - if (slowpath(xref_cnt < -1)) { + if (unlikely(xref_cnt < -1)) { _OS_OBJECT_CLIENT_CRASH("Over-release of an object"); } return _os_object_xref_dispose(obj); @@ -105,13 +105,13 @@ _os_object_retain_weak(_os_object_t obj) { int xref_cnt, nxref_cnt; os_atomic_rmw_loop2o(obj, os_obj_xref_cnt, xref_cnt, nxref_cnt, relaxed, { - if (slowpath(xref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) { + if (unlikely(xref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) { os_atomic_rmw_loop_give_up(return true); // global object } - if (slowpath(xref_cnt == -1)) { + if (unlikely(xref_cnt == -1)) { os_atomic_rmw_loop_give_up(return false); } - if (slowpath(xref_cnt < -1)) { + if (unlikely(xref_cnt < -1)) { os_atomic_rmw_loop_give_up(goto overrelease); } nxref_cnt = xref_cnt + 1; @@ -125,10 +125,10 @@ bool _os_object_allows_weak_reference(_os_object_t obj) { int xref_cnt = obj->os_obj_xref_cnt; - if (slowpath(xref_cnt == -1)) { + if (unlikely(xref_cnt == -1)) { return false; } - if (slowpath(xref_cnt < -1)) { + if (unlikely(xref_cnt < -1)) { _OS_OBJECT_CLIENT_CRASH("Over-release of an object"); } return true; @@ -190,18 +190,21 @@ dispatch_release(dispatch_object_t dou) void _dispatch_xref_dispose(dispatch_object_t dou) { - unsigned long metatype = dx_metatype(dou._do); - if (metatype == _DISPATCH_QUEUE_TYPE || metatype == _DISPATCH_SOURCE_TYPE) { + if (dx_cluster(dou._do) == _DISPATCH_QUEUE_CLUSTER) { _dispatch_queue_xref_dispose(dou._dq); } - if (dx_type(dou._do) == DISPATCH_SOURCE_KEVENT_TYPE) { + switch (dx_type(dou._do)) { + case DISPATCH_SOURCE_KEVENT_TYPE: _dispatch_source_xref_dispose(dou._ds); + break; #if HAVE_MACH - } else if (dx_type(dou._do) == DISPATCH_MACH_CHANNEL_TYPE) { + case DISPATCH_MACH_CHANNEL_TYPE: _dispatch_mach_xref_dispose(dou._dm); + break; #endif - } else if (dx_type(dou._do) == DISPATCH_QUEUE_RUNLOOP_TYPE) { - _dispatch_runloop_queue_xref_dispose(dou._dq); + case DISPATCH_QUEUE_RUNLOOP_TYPE: + _dispatch_runloop_queue_xref_dispose(dou._dl); + break; } return _dispatch_release_tailcall(dou._os_obj); } @@ -211,14 +214,20 @@ void _dispatch_dispose(dispatch_object_t dou) { dispatch_queue_t tq = dou._do->do_targetq; - dispatch_function_t func = dou._do->do_finalizer; + dispatch_function_t func = _dispatch_object_finalizer(dou); void *ctxt = dou._do->do_ctxt; bool allow_free = true; - if (slowpath(dou._do->do_next != DISPATCH_OBJECT_LISTLESS)) { + if (unlikely(dou._do->do_next != DISPATCH_OBJECT_LISTLESS)) { DISPATCH_INTERNAL_CRASH(dou._do->do_next, "Release while enqueued"); } + if (unlikely(tq && tq->dq_serialnum == DISPATCH_QUEUE_SERIAL_NUMBER_WLF)) { + // the workloop fallback global queue is never serviced, so redirect + // the finalizer onto a global queue + tq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false)->_as_dq; + } + dx_dispose(dou._do, &allow_free); // Past this point, the only thing left of the object is its memory @@ -236,9 +245,7 @@ void * dispatch_get_context(dispatch_object_t dou) { DISPATCH_OBJECT_TFB(_dispatch_objc_get_context, dou); - if (unlikely(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT || - dx_hastypeflag(dou._do, QUEUE_ROOT) || - dx_hastypeflag(dou._do, QUEUE_BASE))) { + if (unlikely(dx_hastypeflag(dou._do, NO_CONTEXT))) { return NULL; } return dou._do->do_ctxt; @@ -248,9 +255,7 @@ void dispatch_set_context(dispatch_object_t dou, void *context) { DISPATCH_OBJECT_TFB(_dispatch_objc_set_context, dou, context); - if (unlikely(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT || - dx_hastypeflag(dou._do, QUEUE_ROOT) || - dx_hastypeflag(dou._do, QUEUE_BASE))) { + if (unlikely(dx_hastypeflag(dou._do, NO_CONTEXT))) { return; } dou._do->do_ctxt = context; @@ -260,36 +265,45 @@ void dispatch_set_finalizer_f(dispatch_object_t dou, dispatch_function_t finalizer) { DISPATCH_OBJECT_TFB(_dispatch_objc_set_finalizer_f, dou, finalizer); - if (unlikely(dou._do->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT || - dx_hastypeflag(dou._do, QUEUE_ROOT) || - dx_hastypeflag(dou._do, QUEUE_BASE))) { + if (unlikely(dx_hastypeflag(dou._do, NO_CONTEXT))) { return; } - dou._do->do_finalizer = finalizer; + _dispatch_object_set_finalizer(dou, finalizer); } void dispatch_set_target_queue(dispatch_object_t dou, dispatch_queue_t tq) { DISPATCH_OBJECT_TFB(_dispatch_objc_set_target_queue, dou, tq); - if (dx_vtable(dou._do)->do_set_targetq) { - dx_vtable(dou._do)->do_set_targetq(dou._do, tq); - } else if (likely(dou._do->do_ref_cnt != DISPATCH_OBJECT_GLOBAL_REFCNT && - !dx_hastypeflag(dou._do, QUEUE_ROOT) && - !dx_hastypeflag(dou._do, QUEUE_BASE))) { - if (slowpath(!tq)) { - tq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false); - } - _dispatch_object_set_target_queue_inline(dou._do, tq); + if (unlikely(_dispatch_object_is_global(dou) || + _dispatch_object_is_root_or_base_queue(dou))) { + return; + } + if (dx_cluster(dou._do) == _DISPATCH_QUEUE_CLUSTER) { + return _dispatch_lane_set_target_queue(dou._dl, tq); + } + if (dx_type(dou._do) == DISPATCH_IO_TYPE) { + // FIXME: dispatch IO should be a "source" + return _dispatch_io_set_target_queue(dou._dchannel, tq); + } + if (tq == DISPATCH_TARGET_QUEUE_DEFAULT) { + tq = _dispatch_get_default_queue(false); } + _dispatch_object_set_target_queue_inline(dou._do, tq); } void dispatch_activate(dispatch_object_t dou) { DISPATCH_OBJECT_TFB(_dispatch_objc_activate, dou); - if (dx_vtable(dou._do)->do_resume) { - dx_vtable(dou._do)->do_resume(dou._do, true); + if (unlikely(_dispatch_object_is_global(dou))) { + return; + } + if (dx_metatype(dou._do) == _DISPATCH_WORKLOOP_TYPE) { + return _dispatch_workloop_activate(dou._dwl); + } + if (dx_cluster(dou._do) == _DISPATCH_QUEUE_CLUSTER) { + return _dispatch_lane_resume(dou._dl, true); } } @@ -297,8 +311,12 @@ void dispatch_suspend(dispatch_object_t dou) { DISPATCH_OBJECT_TFB(_dispatch_objc_suspend, dou); - if (dx_vtable(dou._do)->do_suspend) { - dx_vtable(dou._do)->do_suspend(dou._do); + if (unlikely(_dispatch_object_is_global(dou) || + _dispatch_object_is_root_or_base_queue(dou))) { + return; + } + if (dx_cluster(dou._do) == _DISPATCH_QUEUE_CLUSTER) { + return _dispatch_lane_suspend(dou._dl); } } @@ -306,10 +324,12 @@ void dispatch_resume(dispatch_object_t dou) { DISPATCH_OBJECT_TFB(_dispatch_objc_resume, dou); - // the do_suspend below is not a typo. Having a do_resume but no do_suspend - // allows for objects to support activate, but have no-ops suspend/resume - if (dx_vtable(dou._do)->do_suspend) { - dx_vtable(dou._do)->do_resume(dou._do, false); + if (unlikely(_dispatch_object_is_global(dou) || + _dispatch_object_is_root_or_base_queue(dou))) { + return; + } + if (dx_cluster(dou._do) == _DISPATCH_QUEUE_CLUSTER) { + _dispatch_lane_resume(dou._dl, false); } } diff --git a/src/object.m b/src/object.m index efee82947..925fccc43 100644 --- a/src/object.m +++ b/src/object.m @@ -52,7 +52,7 @@ { id obj; size -= sizeof(((struct _os_object_s *)NULL)->os_obj_isa); - while (!fastpath(obj = class_createInstance(cls, size))) { + while (unlikely(!(obj = class_createInstance(cls, size)))) { _dispatch_temporary_resource_shortage(); } return obj; @@ -82,7 +82,11 @@ _Block_use_RR2(&callbacks); #if DISPATCH_COCOA_COMPAT const char *v = getenv("OBJC_DEBUG_MISSING_POOLS"); - _os_object_debug_missing_pools = v && !strcmp(v, "YES"); + if (v) _os_object_debug_missing_pools = _dispatch_parse_bool(v); + v = getenv("DISPATCH_DEBUG_MISSING_POOLS"); + if (v) _os_object_debug_missing_pools = _dispatch_parse_bool(v); + v = getenv("LIBDISPATCH_DEBUG_MISSING_POOLS"); + if (v) _os_object_debug_missing_pools = _dispatch_parse_bool(v); #endif } @@ -173,6 +177,9 @@ -(NSUInteger)retainCount { return _os_object_retain_count(self); } +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wdeprecated-implementations" + -(BOOL)retainWeakReference { return _os_object_retain_weak(self); } @@ -181,6 +188,8 @@ -(BOOL)allowsWeakReference { return _os_object_allows_weak_reference(self); } +#pragma clang diagnostic pop + - (void)_xref_dispose { return _os_object_release_internal(self); } @@ -290,11 +299,11 @@ - (NSString *)debugDescription { if (dx_vtable(obj)->do_debug) { dx_debug(obj, buf, sizeof(buf)); } else { - strlcpy(buf, dx_kind(obj), sizeof(buf)); + strlcpy(buf, object_getClassName(self), sizeof(buf)); } NSString *format = [nsstring stringWithUTF8String:"<%s: %s>"]; if (!format) return nil; - return [nsstring stringWithFormat:format, class_getName([self class]), buf]; + return [nsstring stringWithFormat:format, object_getClassName(self), buf]; } - (void)dealloc DISPATCH_NORETURN { @@ -313,7 +322,7 @@ - (NSString *)description { if (!nsstring) return nil; NSString *format = [nsstring stringWithUTF8String:"<%s: %s>"]; if (!format) return nil; - return [nsstring stringWithFormat:format, class_getName([self class]), + return [nsstring stringWithFormat:format, object_getClassName(self), dispatch_queue_get_label(self), self]; } @@ -354,7 +363,7 @@ @implementation DISPATCH_CLASS(queue_runloop) - (void)_xref_dispose { _dispatch_queue_xref_dispose((struct dispatch_queue_s *)self); - _dispatch_runloop_queue_xref_dispose(self); + _dispatch_runloop_queue_xref_dispose((dispatch_lane_t)self); [super _xref_dispose]; } @@ -371,12 +380,15 @@ - (void)_xref_dispose { #endif DISPATCH_CLASS_IMPL(semaphore) DISPATCH_CLASS_IMPL(group) +DISPATCH_CLASS_IMPL(workloop) DISPATCH_CLASS_IMPL(queue_serial) DISPATCH_CLASS_IMPL(queue_concurrent) DISPATCH_CLASS_IMPL(queue_main) -DISPATCH_CLASS_IMPL(queue_root) +DISPATCH_CLASS_IMPL(queue_global) +#if DISPATCH_USE_PTHREAD_ROOT_QUEUES +DISPATCH_CLASS_IMPL(queue_pthread_root) +#endif DISPATCH_CLASS_IMPL(queue_mgr) -DISPATCH_CLASS_IMPL(queue_specific_queue) DISPATCH_CLASS_IMPL(queue_attr) DISPATCH_CLASS_IMPL(mach_msg) DISPATCH_CLASS_IMPL(io) @@ -410,7 +422,7 @@ - (NSString *)debugDescription { _voucher_debug(self, buf, sizeof(buf)); NSString *format = [nsstring stringWithUTF8String:"<%s: %s>"]; if (!format) return nil; - return [nsstring stringWithFormat:format, class_getName([self class]), buf]; + return [nsstring stringWithFormat:format, object_getClassName(self), buf]; } @end @@ -440,7 +452,7 @@ - (NSString *)debugDescription { void _dispatch_last_resort_autorelease_pool_push(dispatch_invoke_context_t dic) { - if (!slowpath(_os_object_debug_missing_pools)) { + if (likely(!_os_object_debug_missing_pools)) { dic->dic_autorelease_pool = _dispatch_autorelease_pool_push(); } } @@ -448,7 +460,7 @@ - (NSString *)debugDescription { void _dispatch_last_resort_autorelease_pool_pop(dispatch_invoke_context_t dic) { - if (!slowpath(_os_object_debug_missing_pools)) { + if (likely(!_os_object_debug_missing_pools)) { _dispatch_autorelease_pool_pop(dic->dic_autorelease_pool); dic->dic_autorelease_pool = NULL; } diff --git a/src/object_internal.h b/src/object_internal.h index 4504f6587..b1f75602a 100644 --- a/src/object_internal.h +++ b/src/object_internal.h @@ -54,7 +54,7 @@ #endif // define a new proper class -#define OS_OBJECT_CLASS_DECL(name, super, ...) \ +#define OS_OBJECT_CLASS_DECL(name, ...) \ struct name##_s; \ struct name##_extra_vtable_s { \ __VA_ARGS__; \ @@ -71,61 +71,60 @@ #define OS_OBJECT_INTERNAL_CLASS_DECL(name, super, ...) \ OS_OBJECT_OBJC_RUNTIME_VISIBLE \ OS_OBJECT_DECL_IMPL_CLASS(name, OS_OBJECT_CLASS(super)); \ - OS_OBJECT_CLASS_DECL(name, super, ## __VA_ARGS__) + OS_OBJECT_CLASS_DECL(name, ## __VA_ARGS__) #elif OS_OBJECT_USE_OBJC #define OS_OBJECT_INTERNAL_CLASS_DECL(name, super, ...) \ OS_OBJECT_DECL(name); \ - OS_OBJECT_CLASS_DECL(name, super, ## __VA_ARGS__) + OS_OBJECT_CLASS_DECL(name, ## __VA_ARGS__) #else #define OS_OBJECT_INTERNAL_CLASS_DECL(name, super, ...) \ typedef struct name##_s *name##_t; \ - OS_OBJECT_CLASS_DECL(name, super, ## __VA_ARGS__) + OS_OBJECT_CLASS_DECL(name, ## __VA_ARGS__) #endif -#define DISPATCH_CLASS_DECL_BARE(name) \ - OS_OBJECT_CLASS_DECL(dispatch_##name, dispatch_object, \ - DISPATCH_OBJECT_VTABLE_HEADER(dispatch_##name)) +#define DISPATCH_CLASS_DECL_BARE(name, cluster) \ + OS_OBJECT_CLASS_DECL(dispatch_##name, \ + DISPATCH_##cluster##_VTABLE_HEADER(dispatch_##name)) -#define DISPATCH_CLASS_DECL(name) \ +#define DISPATCH_CLASS_DECL(name, cluster) \ _OS_OBJECT_DECL_PROTOCOL(dispatch_##name, dispatch_object) \ _OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(dispatch_##name, dispatch_##name) \ - DISPATCH_CLASS_DECL_BARE(name) + DISPATCH_CLASS_DECL_BARE(name, cluster) -#define DISPATCH_INTERNAL_CLASS_DECL(name) \ +#define DISPATCH_SUBCLASS_DECL(name, super, ctype) \ + _OS_OBJECT_DECL_PROTOCOL(dispatch_##name, dispatch_##super); \ + _OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(dispatch_##name, dispatch_##name) \ + OS_OBJECT_SUBCLASS_DECL(dispatch_##name, dispatch_##ctype) + +#define DISPATCH_INTERNAL_CLASS_DECL(name, cluster) \ DISPATCH_DECL(dispatch_##name); \ - DISPATCH_CLASS_DECL(name) + DISPATCH_CLASS_DECL(name, cluster) // define a new subclass used in a cluster -#define OS_OBJECT_SUBCLASS_DECL(name, super) \ - _OS_OBJECT_DECL_SUBCLASS_INTERFACE(name, super) \ +#define OS_OBJECT_SUBCLASS_DECL(name, ctype) \ struct name##_s; \ - OS_OBJECT_EXTRA_VTABLE_DECL(name, super) \ - extern const struct super##_vtable_s OS_OBJECT_CLASS_SYMBOL(name) \ + OS_OBJECT_EXTRA_VTABLE_DECL(name, ctype) \ + extern const struct ctype##_vtable_s OS_OBJECT_CLASS_SYMBOL(name) \ asm(OS_OBJC_CLASS_RAW_SYMBOL_NAME(OS_OBJECT_CLASS(name))) -#define DISPATCH_SUBCLASS_DECL(name, super) \ - OS_OBJECT_SUBCLASS_DECL(dispatch_##name, super) - #if OS_OBJECT_SWIFT3 // define a new internal subclass used in a class cluster -#define OS_OBJECT_INTERNAL_SUBCLASS_DECL(name, super) \ +#define OS_OBJECT_INTERNAL_SUBCLASS_DECL(name, super, ctype) \ _OS_OBJECT_DECL_PROTOCOL(name, super); \ - OS_OBJECT_SUBCLASS_DECL(name, super) - -#define DISPATCH_INTERNAL_SUBCLASS_DECL(name, super) \ - _OS_OBJECT_DECL_PROTOCOL(dispatch_##name, dispatch_##super) \ - DISPATCH_SUBCLASS_DECL(name, dispatch_##super) + _OS_OBJECT_DECL_SUBCLASS_INTERFACE(name, super) \ + OS_OBJECT_SUBCLASS_DECL(name, ctype) #else // define a new internal subclass used in a class cluster -#define OS_OBJECT_INTERNAL_SUBCLASS_DECL(name, super) \ - OS_OBJECT_DECL_SUBCLASS(name, super); \ - OS_OBJECT_SUBCLASS_DECL(name, super) - -#define DISPATCH_INTERNAL_SUBCLASS_DECL(name, super) \ - OS_OBJECT_DECL_SUBCLASS(dispatch_##name, dispatch_##super); \ - DISPATCH_SUBCLASS_DECL(name, dispatch_##super) +#define OS_OBJECT_INTERNAL_SUBCLASS_DECL(name, super, ctype) \ + OS_OBJECT_DECL_SUBCLASS(name, ctype); \ + _OS_OBJECT_DECL_SUBCLASS_INTERFACE(name, super) \ + OS_OBJECT_SUBCLASS_DECL(name, ctype) #endif +#define DISPATCH_INTERNAL_SUBCLASS_DECL(name, super, ctype) \ + OS_OBJECT_INTERNAL_SUBCLASS_DECL(dispatch_##name, dispatch_##super, \ + dispatch_##ctype) + // vtable symbols #define OS_OBJECT_VTABLE(name) (&OS_OBJECT_CLASS_SYMBOL(name)) #define DISPATCH_OBJC_CLASS(name) (&DISPATCH_CLASS_SYMBOL(name)) @@ -135,39 +134,35 @@ // ObjC classes and dispatch vtables are co-located via linker order and alias // files rdar://10640168 #if OS_OBJECT_HAVE_OBJC2 -#define OS_OBJECT_VTABLE_SUBCLASS_INSTANCE(name, super, xdispose, dispose, ...) \ +#define OS_OBJECT_VTABLE_SUBCLASS_INSTANCE(name, ctype, xdispose, dispose, ...) \ __attribute__((section("__DATA,__objc_data"), used)) \ - const struct super##_extra_vtable_s \ + const struct ctype##_extra_vtable_s \ OS_OBJECT_EXTRA_VTABLE_SYMBOL(name) = { __VA_ARGS__ } -#define OS_OBJECT_EXTRA_VTABLE_DECL(name, super) +#define OS_OBJECT_EXTRA_VTABLE_DECL(name, ctype) #define DISPATCH_VTABLE(name) DISPATCH_OBJC_CLASS(name) #else -#define OS_OBJECT_VTABLE_SUBCLASS_INSTANCE(name, super, xdispose, dispose, ...) \ - const struct super##_vtable_s \ +#define OS_OBJECT_VTABLE_SUBCLASS_INSTANCE(name, ctype, xdispose, dispose, ...) \ + const struct ctype##_vtable_s \ OS_OBJECT_EXTRA_VTABLE_SYMBOL(name) = { \ ._os_obj_objc_isa = &OS_OBJECT_CLASS_SYMBOL(name), \ ._os_obj_vtable = { __VA_ARGS__ }, \ } -#define OS_OBJECT_EXTRA_VTABLE_DECL(name, super) \ - extern const struct super##_vtable_s \ +#define OS_OBJECT_EXTRA_VTABLE_DECL(name, ctype) \ + extern const struct ctype##_vtable_s \ OS_OBJECT_EXTRA_VTABLE_SYMBOL(name); #define DISPATCH_VTABLE(name) &OS_OBJECT_EXTRA_VTABLE_SYMBOL(dispatch_##name) -#endif +#endif // OS_OBJECT_HAVE_OBJC2 #else -#define OS_OBJECT_VTABLE_SUBCLASS_INSTANCE(name, super, xdispose, dispose, ...) \ - const struct super##_vtable_s OS_OBJECT_CLASS_SYMBOL(name) = { \ +#define OS_OBJECT_VTABLE_SUBCLASS_INSTANCE(name, ctype, xdispose, dispose, ...) \ + const struct ctype##_vtable_s OS_OBJECT_CLASS_SYMBOL(name) = { \ ._os_obj_xref_dispose = xdispose, \ ._os_obj_dispose = dispose, \ ._os_obj_vtable = { __VA_ARGS__ }, \ } -#define OS_OBJECT_EXTRA_VTABLE_DECL(name, super) +#define OS_OBJECT_EXTRA_VTABLE_DECL(name, ctype) #define DISPATCH_VTABLE(name) DISPATCH_OBJC_CLASS(name) #endif // USE_OBJC -#define DISPATCH_VTABLE_SUBCLASS_INSTANCE(name, super, ...) \ - OS_OBJECT_VTABLE_SUBCLASS_INSTANCE(dispatch_##name, dispatch_##super, \ - _dispatch_xref_dispose, _dispatch_dispose, __VA_ARGS__) - // vtables for proper classes #define OS_OBJECT_VTABLE_INSTANCE(name, xdispose, dispose, ...) \ OS_OBJECT_VTABLE_SUBCLASS_INSTANCE(name, name, \ @@ -176,39 +171,50 @@ #define DISPATCH_VTABLE_INSTANCE(name, ...) \ DISPATCH_VTABLE_SUBCLASS_INSTANCE(name, name, __VA_ARGS__) -#define DISPATCH_INVOKABLE_VTABLE_HEADER(x) \ +#if USE_OBJC +#define DISPATCH_VTABLE_SUBCLASS_INSTANCE(name, ctype, ...) \ + OS_OBJECT_VTABLE_SUBCLASS_INSTANCE(dispatch_##name, dispatch_##ctype, \ + _dispatch_xref_dispose, _dispatch_dispose, __VA_ARGS__) + +#define DISPATCH_OBJECT_VTABLE_HEADER(x) \ unsigned long const do_type; \ - const char *const do_kind; \ + void (*const do_dispose)(struct x##_s *, bool *allow_free); \ + size_t (*const do_debug)(struct x##_s *, char *, size_t); \ void (*const do_invoke)(struct x##_s *, dispatch_invoke_context_t, \ - dispatch_invoke_flags_t); \ - void (*const do_push)(struct x##_s *, dispatch_object_t, \ - dispatch_qos_t) - -#define DISPATCH_QUEUEABLE_VTABLE_HEADER(x) \ - DISPATCH_INVOKABLE_VTABLE_HEADER(x); \ - void (*const do_wakeup)(struct x##_s *, \ - dispatch_qos_t, dispatch_wakeup_flags_t); \ - void (*const do_dispose)(struct x##_s *, bool *allow_free) + dispatch_invoke_flags_t) +#else +#define DISPATCH_VTABLE_SUBCLASS_INSTANCE(name, ctype, ...) \ + OS_OBJECT_VTABLE_SUBCLASS_INSTANCE(dispatch_##name, dispatch_##ctype, \ + _dispatch_xref_dispose, _dispatch_dispose, \ + .do_kind = #name, __VA_ARGS__) #define DISPATCH_OBJECT_VTABLE_HEADER(x) \ - DISPATCH_QUEUEABLE_VTABLE_HEADER(x); \ - void (*const do_set_targetq)(struct x##_s *, dispatch_queue_t); \ - void (*const do_suspend)(struct x##_s *); \ - void (*const do_resume)(struct x##_s *, bool activate); \ - void (*const do_finalize_activation)(struct x##_s *, bool *allow_resume); \ - size_t (*const do_debug)(struct x##_s *, char *, size_t) + unsigned long const do_type; \ + const char *const do_kind; \ + void (*const do_dispose)(struct x##_s *, bool *allow_free); \ + size_t (*const do_debug)(struct x##_s *, char *, size_t); \ + void (*const do_invoke)(struct x##_s *, dispatch_invoke_context_t, \ + dispatch_invoke_flags_t) +#endif + +#define DISPATCH_QUEUE_VTABLE_HEADER(x); \ + DISPATCH_OBJECT_VTABLE_HEADER(x); \ + void (*const dq_activate)(dispatch_queue_class_t, bool *allow_resume); \ + void (*const dq_wakeup)(dispatch_queue_class_t, dispatch_qos_t, \ + dispatch_wakeup_flags_t); \ + void (*const dq_push)(dispatch_queue_class_t, dispatch_object_t, \ + dispatch_qos_t) #define dx_vtable(x) (&(x)->do_vtable->_os_obj_vtable) #define dx_type(x) dx_vtable(x)->do_type -#define dx_subtype(x) (dx_vtable(x)->do_type & _DISPATCH_SUB_TYPE_MASK) #define dx_metatype(x) (dx_vtable(x)->do_type & _DISPATCH_META_TYPE_MASK) +#define dx_cluster(x) (dx_vtable(x)->do_type & _DISPATCH_TYPE_CLUSTER_MASK) #define dx_hastypeflag(x, f) (dx_vtable(x)->do_type & _DISPATCH_##f##_TYPEFLAG) -#define dx_kind(x) dx_vtable(x)->do_kind #define dx_debug(x, y, z) dx_vtable(x)->do_debug((x), (y), (z)) #define dx_dispose(x, y) dx_vtable(x)->do_dispose(x, y) #define dx_invoke(x, y, z) dx_vtable(x)->do_invoke(x, y, z) -#define dx_push(x, y, z) dx_vtable(x)->do_push(x, y, z) -#define dx_wakeup(x, y, z) dx_vtable(x)->do_wakeup(x, y, z) +#define dx_push(x, y, z) dx_vtable(x)->dq_push(x, y, z) +#define dx_wakeup(x, y, z) dx_vtable(x)->dq_wakeup(x, y, z) #define DISPATCH_OBJECT_GLOBAL_REFCNT _OS_OBJECT_GLOBAL_REFCNT @@ -253,19 +259,23 @@ DISPATCH_ENUM(dispatch_wakeup_flags, uint32_t, // This wakeup is caused by a dispatch_block_wait() DISPATCH_WAKEUP_BLOCK_WAIT = 0x00000008, + + // This wakeup may cause the source to leave its DSF_NEEDS_EVENT state + DISPATCH_WAKEUP_EVENT = 0x00000010, ); typedef struct dispatch_invoke_context_s { - struct dispatch_object_s *dic_deferred; -#if HAVE_PTHREAD_WORKQUEUE_NARROWING +#if DISPATCH_USE_WORKQUEUE_NARROWING uint64_t dic_next_narrow_check; #endif + struct dispatch_object_s *dic_barrier_waiter; + dispatch_qos_t dic_barrier_waiter_bucket; #if DISPATCH_COCOA_COMPAT void *dic_autorelease_pool; #endif } dispatch_invoke_context_s, *dispatch_invoke_context_t; -#if HAVE_PTHREAD_WORKQUEUE_NARROWING +#if DISPATCH_USE_WORKQUEUE_NARROWING #define DISPATCH_THREAD_IS_NARROWING 1 #define dispatch_with_disabled_narrowing(dic, ...) ({ \ @@ -322,10 +332,19 @@ DISPATCH_ENUM(dispatch_invoke_flags, uint32_t, // @const DISPATCH_INVOKE_MANAGER_DRAIN // We're draining from a manager context // + // @const DISPATCH_INVOKE_THREAD_BOUND + // We're draining from the context of a thread-bound queue (main thread) + // + // @const DISPATCH_INVOKE_WORKER_DRAIN + // The queue at the bottom of this drain is a workloop that supports + // reordering. + // DISPATCH_INVOKE_WORKER_DRAIN = 0x00010000, DISPATCH_INVOKE_REDIRECTING_DRAIN = 0x00020000, DISPATCH_INVOKE_MANAGER_DRAIN = 0x00040000, -#define _DISPATCH_INVOKE_DRAIN_MODE_MASK 0x000f0000u + DISPATCH_INVOKE_THREAD_BOUND = 0x00080000, + DISPATCH_INVOKE_WORKLOOP_DRAIN = 0x00100000, +#define _DISPATCH_INVOKE_DRAIN_MODE_MASK 0x00ff0000u // Autoreleasing modes // @@ -335,58 +354,72 @@ DISPATCH_ENUM(dispatch_invoke_flags, uint32_t, // @const DISPATCH_INVOKE_AUTORELEASE_NEVER // Never use autoreleasepools around callouts // - DISPATCH_INVOKE_AUTORELEASE_ALWAYS = 0x00100000, - DISPATCH_INVOKE_AUTORELEASE_NEVER = 0x00200000, -#define _DISPATCH_INVOKE_AUTORELEASE_MASK 0x00300000u + DISPATCH_INVOKE_AUTORELEASE_ALWAYS = 0x01000000, + DISPATCH_INVOKE_AUTORELEASE_NEVER = 0x02000000, +#define _DISPATCH_INVOKE_AUTORELEASE_MASK 0x03000000u ); enum { - _DISPATCH_META_TYPE_MASK = 0xffff0000, // mask for object meta-types - _DISPATCH_TYPEFLAGS_MASK = 0x0000ff00, // mask for object typeflags - _DISPATCH_SUB_TYPE_MASK = 0x000000ff, // mask for object sub-types - - _DISPATCH_CONTINUATION_TYPE = 0x00000, // meta-type for continuations - _DISPATCH_QUEUE_TYPE = 0x10000, // meta-type for queues - _DISPATCH_SOURCE_TYPE = 0x20000, // meta-type for sources - _DISPATCH_SEMAPHORE_TYPE = 0x30000, // meta-type for semaphores - _DISPATCH_NODE_TYPE = 0x40000, // meta-type for data node - _DISPATCH_IO_TYPE = 0x50000, // meta-type for io channels - _DISPATCH_OPERATION_TYPE = 0x60000, // meta-type for io operations - _DISPATCH_DISK_TYPE = 0x70000, // meta-type for io disks - - _DISPATCH_QUEUE_ROOT_TYPEFLAG = 0x0100, // bit set for any root queues - _DISPATCH_QUEUE_BASE_TYPEFLAG = 0x0200, // base of a hierarchy - // targets a root queue - -#define DISPATCH_CONTINUATION_TYPE(name) \ - (_DISPATCH_CONTINUATION_TYPE | DC_##name##_TYPE) - DISPATCH_DATA_TYPE = 1 | _DISPATCH_NODE_TYPE, - DISPATCH_MACH_MSG_TYPE = 2 | _DISPATCH_NODE_TYPE, - DISPATCH_QUEUE_ATTR_TYPE = 3 | _DISPATCH_NODE_TYPE, - - DISPATCH_IO_TYPE = 0 | _DISPATCH_IO_TYPE, - DISPATCH_OPERATION_TYPE = 0 | _DISPATCH_OPERATION_TYPE, - DISPATCH_DISK_TYPE = 0 | _DISPATCH_DISK_TYPE, - - DISPATCH_QUEUE_LEGACY_TYPE = 1 | _DISPATCH_QUEUE_TYPE, - DISPATCH_QUEUE_SERIAL_TYPE = 2 | _DISPATCH_QUEUE_TYPE, - DISPATCH_QUEUE_CONCURRENT_TYPE = 3 | _DISPATCH_QUEUE_TYPE, - DISPATCH_QUEUE_GLOBAL_ROOT_TYPE = 4 | _DISPATCH_QUEUE_TYPE | - _DISPATCH_QUEUE_ROOT_TYPEFLAG, - DISPATCH_QUEUE_NETWORK_EVENT_TYPE = 5 | _DISPATCH_QUEUE_TYPE | - _DISPATCH_QUEUE_BASE_TYPEFLAG, - DISPATCH_QUEUE_RUNLOOP_TYPE = 6 | _DISPATCH_QUEUE_TYPE | - _DISPATCH_QUEUE_BASE_TYPEFLAG, - DISPATCH_QUEUE_MGR_TYPE = 7 | _DISPATCH_QUEUE_TYPE | + _DISPATCH_META_TYPE_MASK = 0x000000ff, // mask for object meta-types + _DISPATCH_TYPE_CLUSTER_MASK = 0x000000f0, // mask for the cluster type + _DISPATCH_SUB_TYPE_MASK = 0x0000ff00, // mask for object sub-types + _DISPATCH_TYPEFLAGS_MASK = 0x00ff0000, // mask for object typeflags + + _DISPATCH_OBJECT_CLUSTER = 0x00000000, // dispatch object cluster + _DISPATCH_CONTINUATION_TYPE = 0x00000000, // meta-type for continuations + _DISPATCH_SEMAPHORE_TYPE = 0x00000001, // meta-type for semaphores + _DISPATCH_NODE_TYPE = 0x00000002, // meta-type for data node + _DISPATCH_IO_TYPE = 0x00000003, // meta-type for io channels + _DISPATCH_OPERATION_TYPE = 0x00000004, // meta-type for io operations + _DISPATCH_DISK_TYPE = 0x00000005, // meta-type for io disks + + _DISPATCH_QUEUE_CLUSTER = 0x00000010, // dispatch queue cluster + _DISPATCH_LANE_TYPE = 0x00000011, // meta-type for lanes + _DISPATCH_WORKLOOP_TYPE = 0x00000012, // meta-type for workloops + _DISPATCH_SOURCE_TYPE = 0x00000013, // meta-type for sources + + // QUEUE_ROOT is set on root queues (queues with a NULL do_targetq) + // QUEUE_BASE is set on hierarchy bases, these always target a root queue + // NO_CONTEXT is set on types not supporting dispatch_{get,set}_context + _DISPATCH_QUEUE_ROOT_TYPEFLAG = 0x00010000, + _DISPATCH_QUEUE_BASE_TYPEFLAG = 0x00020000, + _DISPATCH_NO_CONTEXT_TYPEFLAG = 0x00040000, + +#define DISPATCH_OBJECT_SUBTYPE(ty, base) (_DISPATCH_##base##_TYPE | (ty) << 8) +#define DISPATCH_CONTINUATION_TYPE(name) \ + DISPATCH_OBJECT_SUBTYPE(DC_##name##_TYPE, CONTINUATION) + + DISPATCH_SEMAPHORE_TYPE = DISPATCH_OBJECT_SUBTYPE(1, SEMAPHORE), + DISPATCH_GROUP_TYPE = DISPATCH_OBJECT_SUBTYPE(2, SEMAPHORE), + + DISPATCH_DATA_TYPE = DISPATCH_OBJECT_SUBTYPE(1, NODE), + DISPATCH_MACH_MSG_TYPE = DISPATCH_OBJECT_SUBTYPE(2, NODE), + DISPATCH_QUEUE_ATTR_TYPE = DISPATCH_OBJECT_SUBTYPE(3, NODE), + + DISPATCH_IO_TYPE = DISPATCH_OBJECT_SUBTYPE(0, IO), + DISPATCH_OPERATION_TYPE = DISPATCH_OBJECT_SUBTYPE(0, OPERATION), + DISPATCH_DISK_TYPE = DISPATCH_OBJECT_SUBTYPE(0, DISK), + + DISPATCH_QUEUE_SERIAL_TYPE = DISPATCH_OBJECT_SUBTYPE(1, LANE), + DISPATCH_QUEUE_CONCURRENT_TYPE = DISPATCH_OBJECT_SUBTYPE(2, LANE), + DISPATCH_QUEUE_GLOBAL_ROOT_TYPE = DISPATCH_OBJECT_SUBTYPE(3, LANE) | + _DISPATCH_QUEUE_ROOT_TYPEFLAG | _DISPATCH_NO_CONTEXT_TYPEFLAG, + DISPATCH_QUEUE_PTHREAD_ROOT_TYPE = DISPATCH_OBJECT_SUBTYPE(4, LANE) | + _DISPATCH_QUEUE_ROOT_TYPEFLAG | _DISPATCH_NO_CONTEXT_TYPEFLAG, + DISPATCH_QUEUE_MGR_TYPE = DISPATCH_OBJECT_SUBTYPE(5, LANE) | + _DISPATCH_QUEUE_BASE_TYPEFLAG | _DISPATCH_NO_CONTEXT_TYPEFLAG, + DISPATCH_QUEUE_MAIN_TYPE = DISPATCH_OBJECT_SUBTYPE(6, LANE) | + _DISPATCH_QUEUE_BASE_TYPEFLAG | _DISPATCH_NO_CONTEXT_TYPEFLAG, + DISPATCH_QUEUE_RUNLOOP_TYPE = DISPATCH_OBJECT_SUBTYPE(7, LANE) | + _DISPATCH_QUEUE_BASE_TYPEFLAG | _DISPATCH_NO_CONTEXT_TYPEFLAG, + DISPATCH_QUEUE_NETWORK_EVENT_TYPE = DISPATCH_OBJECT_SUBTYPE(8, LANE) | _DISPATCH_QUEUE_BASE_TYPEFLAG, - DISPATCH_QUEUE_SPECIFIC_TYPE = 8 | _DISPATCH_QUEUE_TYPE, - DISPATCH_SEMAPHORE_TYPE = 1 | _DISPATCH_SEMAPHORE_TYPE, - DISPATCH_GROUP_TYPE = 2 | _DISPATCH_SEMAPHORE_TYPE, - - DISPATCH_SOURCE_KEVENT_TYPE = 1 | _DISPATCH_SOURCE_TYPE, - DISPATCH_MACH_CHANNEL_TYPE = 2 | _DISPATCH_SOURCE_TYPE, + DISPATCH_WORKLOOP_TYPE = DISPATCH_OBJECT_SUBTYPE(0, WORKLOOP) | + _DISPATCH_QUEUE_BASE_TYPEFLAG, + DISPATCH_SOURCE_KEVENT_TYPE = DISPATCH_OBJECT_SUBTYPE(1, SOURCE), + DISPATCH_MACH_CHANNEL_TYPE = DISPATCH_OBJECT_SUBTYPE(2, SOURCE), }; typedef struct _os_object_vtable_s { @@ -435,54 +468,12 @@ typedef struct _os_object_s { } _OS_OBJECT_DECL_PROTOCOL(dispatch_object, object); - -OS_OBJECT_CLASS_DECL(dispatch_object, object, - DISPATCH_OBJECT_VTABLE_HEADER(dispatch_object)); +DISPATCH_CLASS_DECL_BARE(object, OBJECT); struct dispatch_object_s { _DISPATCH_OBJECT_HEADER(object); }; -#if OS_OBJECT_HAVE_OBJC1 -#define _OS_MPSC_QUEUE_FIELDS(ns, __state_field__) \ - DISPATCH_UNION_LE(uint64_t volatile __state_field__, \ - dispatch_lock __state_field__##_lock, \ - uint32_t __state_field__##_bits \ - ) DISPATCH_ATOMIC64_ALIGN; \ - struct dispatch_object_s *volatile ns##_items_head; \ - unsigned long ns##_serialnum; \ - const char *ns##_label; \ - struct dispatch_object_s *volatile ns##_items_tail; \ - dispatch_priority_t ns##_priority; \ - int volatile ns##_sref_cnt -#else -#define _OS_MPSC_QUEUE_FIELDS(ns, __state_field__) \ - struct dispatch_object_s *volatile ns##_items_head; \ - DISPATCH_UNION_LE(uint64_t volatile __state_field__, \ - dispatch_lock __state_field__##_lock, \ - uint32_t __state_field__##_bits \ - ) DISPATCH_ATOMIC64_ALIGN; \ - /* LP64 global queue cacheline boundary */ \ - unsigned long ns##_serialnum; \ - const char *ns##_label; \ - struct dispatch_object_s *volatile ns##_items_tail; \ - dispatch_priority_t ns##_priority; \ - int volatile ns##_sref_cnt -#endif - -OS_OBJECT_INTERNAL_CLASS_DECL(os_mpsc_queue, object, - DISPATCH_QUEUEABLE_VTABLE_HEADER(os_mpsc_queue)); - -struct os_mpsc_queue_s { - struct _os_object_s _as_os_obj[0]; - OS_OBJECT_STRUCT_HEADER(os_mpsc_queue); - struct dispatch_object_s *volatile oq_next; - void *oq_opaque1; // do_targetq - void *oq_opaque2; // do_ctxt - void *oq_opaque3; // do_finalizer - _OS_MPSC_QUEUE_FIELDS(oq, __oq_state_do_not_use); -}; - size_t _dispatch_object_debug_attr(dispatch_object_t dou, char* buf, size_t bufsiz); void *_dispatch_object_alloc(const void *vtable, size_t size); @@ -536,10 +527,10 @@ OS_OBJECT_OBJC_CLASS_DECL(object); // This is required by the dispatch_data_t/NSData bridging, which is not // supported on the old runtime. #define DISPATCH_OBJECT_TFB(f, o, ...) \ - if (slowpath((uintptr_t)((o)._os_obj->os_obj_isa) & 1) || \ - slowpath((Class)((o)._os_obj->os_obj_isa) < \ - (Class)OS_OBJECT_VTABLE(dispatch_object)) || \ - slowpath((Class)((o)._os_obj->os_obj_isa) >= \ + if (unlikely(((uintptr_t)((o)._os_obj->os_obj_isa) & 1) || \ + (Class)((o)._os_obj->os_obj_isa) < \ + (Class)OS_OBJECT_VTABLE(dispatch_object) || \ + (Class)((o)._os_obj->os_obj_isa) >= \ (Class)OS_OBJECT_VTABLE(object))) { \ return f((o), ##__VA_ARGS__); \ } @@ -592,14 +583,14 @@ size_t _dispatch_objc_debug(dispatch_object_t dou, char* buf, size_t bufsiz); #define _os_atomic_refcnt_perform2o(o, f, op, n, m) ({ \ typeof(o) _o = (o); \ int _ref_cnt = _o->f; \ - if (fastpath(_ref_cnt != _OS_OBJECT_GLOBAL_REFCNT)) { \ + if (likely(_ref_cnt != _OS_OBJECT_GLOBAL_REFCNT)) { \ _ref_cnt = os_atomic_##op##2o(_o, f, n, m); \ } \ _ref_cnt; \ }) -#define _os_atomic_refcnt_add2o(o, m, n) \ - _os_atomic_refcnt_perform2o(o, m, add, n, relaxed) +#define _os_atomic_refcnt_add_orig2o(o, m, n) \ + _os_atomic_refcnt_perform2o(o, m, add_orig, n, relaxed) #define _os_atomic_refcnt_sub2o(o, m, n) \ _os_atomic_refcnt_perform2o(o, m, sub, n, release) @@ -611,9 +602,9 @@ size_t _dispatch_objc_debug(dispatch_object_t dou, char* buf, size_t bufsiz); /* * Higher level _os_object_{x,}refcnt_* actions * - * _os_atomic_{x,}refcnt_inc(o): + * _os_atomic_{x,}refcnt_inc_orig(o): * increment the external (resp. internal) refcount and - * returns the new refcount value + * returns the old refcount value * * _os_atomic_{x,}refcnt_dec(o): * decrement the external (resp. internal) refcount and @@ -624,8 +615,8 @@ size_t _dispatch_objc_debug(dispatch_object_t dou, char* buf, size_t bufsiz); * (resp. internal) refcount * */ -#define _os_object_xrefcnt_inc(o) \ - _os_atomic_refcnt_add2o(o, os_obj_xref_cnt, 1) +#define _os_object_xrefcnt_inc_orig(o) \ + _os_atomic_refcnt_add_orig2o(o, os_obj_xref_cnt, 1) #define _os_object_xrefcnt_dec(o) \ _os_atomic_refcnt_sub2o(o, os_obj_xref_cnt, 1) @@ -633,8 +624,8 @@ size_t _dispatch_objc_debug(dispatch_object_t dou, char* buf, size_t bufsiz); #define _os_object_xrefcnt_dispose_barrier(o) \ _os_atomic_refcnt_dispose_barrier2o(o, os_obj_xref_cnt) -#define _os_object_refcnt_add(o, n) \ - _os_atomic_refcnt_add2o(o, os_obj_ref_cnt, n) +#define _os_object_refcnt_add_orig(o, n) \ + _os_atomic_refcnt_add_orig2o(o, os_obj_ref_cnt, n) #define _os_object_refcnt_sub(o, n) \ _os_atomic_refcnt_sub2o(o, os_obj_ref_cnt, n) diff --git a/src/once.c b/src/once.c index c01538c9d..86a74ff3b 100644 --- a/src/once.c +++ b/src/once.c @@ -24,14 +24,6 @@ #undef dispatch_once_f -typedef struct _dispatch_once_waiter_s { - volatile struct _dispatch_once_waiter_s *volatile dow_next; - dispatch_thread_event_s dow_event; - mach_port_t dow_thread; -} *_dispatch_once_waiter_t; - -#define DISPATCH_ONCE_DONE ((_dispatch_once_waiter_t)~0l) - #ifdef __BLOCKS__ void dispatch_once(dispatch_once_t *val, dispatch_block_t block) @@ -46,70 +38,34 @@ dispatch_once(dispatch_once_t *val, dispatch_block_t block) #define DISPATCH_ONCE_SLOW_INLINE DISPATCH_NOINLINE #endif // DISPATCH_ONCE_INLINE_FASTPATH -DISPATCH_ONCE_SLOW_INLINE +DISPATCH_NOINLINE static void -dispatch_once_f_slow(dispatch_once_t *val, void *ctxt, dispatch_function_t func) +_dispatch_once_callout(dispatch_once_gate_t l, void *ctxt, + dispatch_function_t func) { -#if DISPATCH_GATE_USE_FOR_DISPATCH_ONCE - dispatch_once_gate_t l = (dispatch_once_gate_t)val; - - if (_dispatch_once_gate_tryenter(l)) { - _dispatch_client_callout(ctxt, func); - _dispatch_once_gate_broadcast(l); - } else { - _dispatch_once_gate_wait(l); - } -#else - _dispatch_once_waiter_t volatile *vval = (_dispatch_once_waiter_t*)val; - struct _dispatch_once_waiter_s dow = { }; - _dispatch_once_waiter_t tail = &dow, next, tmp; - dispatch_thread_event_t event; - - if (os_atomic_cmpxchg(vval, NULL, tail, acquire)) { - dow.dow_thread = _dispatch_tid_self(); - _dispatch_client_callout(ctxt, func); - - next = (_dispatch_once_waiter_t)_dispatch_once_xchg_done(val); - while (next != tail) { - tmp = (_dispatch_once_waiter_t)_dispatch_wait_until(next->dow_next); - event = &next->dow_event; - next = tmp; - _dispatch_thread_event_signal(event); - } - } else { - _dispatch_thread_event_init(&dow.dow_event); - next = *vval; - for (;;) { - if (next == DISPATCH_ONCE_DONE) { - break; - } - if (os_atomic_cmpxchgv(vval, next, tail, &next, release)) { - dow.dow_thread = next->dow_thread; - dow.dow_next = next; - if (dow.dow_thread) { - pthread_priority_t pp = _dispatch_get_priority(); - _dispatch_thread_override_start(dow.dow_thread, pp, val); - } - _dispatch_thread_event_wait(&dow.dow_event); - if (dow.dow_thread) { - _dispatch_thread_override_end(dow.dow_thread, val); - } - break; - } - } - _dispatch_thread_event_destroy(&dow.dow_event); - } -#endif + _dispatch_client_callout(ctxt, func); + _dispatch_once_gate_broadcast(l); } DISPATCH_NOINLINE void dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func) { -#if !DISPATCH_ONCE_INLINE_FASTPATH - if (likely(os_atomic_load(val, acquire) == DLOCK_ONCE_DONE)) { + dispatch_once_gate_t l = (dispatch_once_gate_t)val; + +#if !DISPATCH_ONCE_INLINE_FASTPATH || DISPATCH_ONCE_USE_QUIESCENT_COUNTER + uintptr_t v = os_atomic_load(&l->dgo_once, acquire); + if (likely(v == DLOCK_ONCE_DONE)) { return; } -#endif // !DISPATCH_ONCE_INLINE_FASTPATH - return dispatch_once_f_slow(val, ctxt, func); +#if DISPATCH_ONCE_USE_QUIESCENT_COUNTER + if (likely(DISPATCH_ONCE_IS_GEN(v))) { + return _dispatch_once_mark_done_if_quiesced(l, v); + } +#endif +#endif + if (_dispatch_once_gate_tryenter(l)) { + return _dispatch_once_callout(l, ctxt, func); + } + return _dispatch_once_wait(l); } diff --git a/src/queue.c b/src/queue.c index 23eb63a7e..59048c33f 100644 --- a/src/queue.c +++ b/src/queue.c @@ -23,520 +23,22 @@ #include "protocol.h" // _dispatch_send_wakeup_runloop_thread #endif -#if HAVE_PTHREAD_WORKQUEUES || DISPATCH_USE_INTERNAL_WORKQUEUE -#define DISPATCH_USE_WORKQUEUES 1 -#endif -#if (!HAVE_PTHREAD_WORKQUEUES || DISPATCH_DEBUG) && \ - !defined(DISPATCH_ENABLE_THREAD_POOL) -#define DISPATCH_ENABLE_THREAD_POOL 1 -#endif -#if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES || DISPATCH_ENABLE_THREAD_POOL -#define DISPATCH_USE_PTHREAD_POOL 1 -#endif -#if HAVE_PTHREAD_WORKQUEUES && (!HAVE_PTHREAD_WORKQUEUE_QOS || \ - DISPATCH_DEBUG) && !HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP && \ - !defined(DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK) -#define DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK 1 -#endif -#if HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP && (DISPATCH_DEBUG || \ - (!DISPATCH_USE_KEVENT_WORKQUEUE && !HAVE_PTHREAD_WORKQUEUE_QOS)) && \ - !defined(DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP) -#define DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP 1 -#endif -#if DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP || \ - DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK || \ - DISPATCH_USE_INTERNAL_WORKQUEUE -#if !DISPATCH_USE_INTERNAL_WORKQUEUE -#define DISPATCH_USE_WORKQ_PRIORITY 1 -#endif -#define DISPATCH_USE_WORKQ_OPTIONS 1 -#endif - -#if DISPATCH_USE_WORKQUEUES && DISPATCH_USE_PTHREAD_POOL && \ - !DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK -#define pthread_workqueue_t void* -#endif - -static void _dispatch_sig_thread(void *ctxt); -static void _dispatch_cache_cleanup(void *value); -static void _dispatch_async_f2(dispatch_queue_t dq, dispatch_continuation_t dc); -static void _dispatch_queue_cleanup(void *ctxt); -static void _dispatch_wlh_cleanup(void *ctxt); -static void _dispatch_deferred_items_cleanup(void *ctxt); -static void _dispatch_frame_cleanup(void *ctxt); -static void _dispatch_context_cleanup(void *ctxt); -static void _dispatch_queue_barrier_complete(dispatch_queue_t dq, +static inline void _dispatch_root_queues_init(void); +static void _dispatch_lane_barrier_complete(dispatch_lane_class_t dqu, dispatch_qos_t qos, dispatch_wakeup_flags_t flags); -static void _dispatch_queue_non_barrier_complete(dispatch_queue_t dq); -static void _dispatch_queue_push_sync_waiter(dispatch_queue_t dq, - dispatch_sync_context_t dsc, dispatch_qos_t qos); -#if HAVE_PTHREAD_WORKQUEUE_QOS -static void _dispatch_root_queue_push_override_stealer(dispatch_queue_t orig_rq, - dispatch_queue_t dq, dispatch_qos_t qos); -static inline void _dispatch_queue_class_wakeup_with_override(dispatch_queue_t, - uint64_t dq_state, dispatch_wakeup_flags_t flags); -#endif -#if HAVE_PTHREAD_WORKQUEUES -static void _dispatch_worker_thread4(void *context); +static void _dispatch_lane_non_barrier_complete(dispatch_lane_t dq, + dispatch_wakeup_flags_t flags); #if HAVE_PTHREAD_WORKQUEUE_QOS -static void _dispatch_worker_thread3(pthread_priority_t priority); -#endif -#if DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP -static void _dispatch_worker_thread2(int priority, int options, void *context); -#endif -#endif -#if DISPATCH_USE_PTHREAD_POOL -static void *_dispatch_worker_thread(void *context); -#endif - -#if DISPATCH_COCOA_COMPAT -static dispatch_once_t _dispatch_main_q_handle_pred; -static void _dispatch_runloop_queue_poke(dispatch_queue_t dq, - dispatch_qos_t qos, dispatch_wakeup_flags_t flags); -static void _dispatch_runloop_queue_handle_init(void *ctxt); -static void _dispatch_runloop_queue_handle_dispose(dispatch_queue_t dq); +static inline void _dispatch_queue_wakeup_with_override( + dispatch_queue_class_t dq, uint64_t dq_state, + dispatch_wakeup_flags_t flags); #endif +static void _dispatch_workloop_drain_barrier_waiter(dispatch_workloop_t dwl, + struct dispatch_object_s *dc, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags, uint64_t owned); #pragma mark - -#pragma mark dispatch_root_queue - -struct dispatch_pthread_root_queue_context_s { - pthread_attr_t dpq_thread_attr; - dispatch_block_t dpq_thread_configure; - struct dispatch_semaphore_s dpq_thread_mediator; - dispatch_pthread_root_queue_observer_hooks_s dpq_observer_hooks; -}; -typedef struct dispatch_pthread_root_queue_context_s * - dispatch_pthread_root_queue_context_t; - -#if DISPATCH_ENABLE_THREAD_POOL -static struct dispatch_pthread_root_queue_context_s - _dispatch_pthread_root_queue_contexts[] = { - [DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS] = { - .dpq_thread_mediator = { - DISPATCH_GLOBAL_OBJECT_HEADER(semaphore), - }}, - [DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT] = { - .dpq_thread_mediator = { - DISPATCH_GLOBAL_OBJECT_HEADER(semaphore), - }}, - [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS] = { - .dpq_thread_mediator = { - DISPATCH_GLOBAL_OBJECT_HEADER(semaphore), - }}, - [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT] = { - .dpq_thread_mediator = { - DISPATCH_GLOBAL_OBJECT_HEADER(semaphore), - }}, - [DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS] = { - .dpq_thread_mediator = { - DISPATCH_GLOBAL_OBJECT_HEADER(semaphore), - }}, - [DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT] = { - .dpq_thread_mediator = { - DISPATCH_GLOBAL_OBJECT_HEADER(semaphore), - }}, - [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS] = { - .dpq_thread_mediator = { - DISPATCH_GLOBAL_OBJECT_HEADER(semaphore), - }}, - [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT] = { - .dpq_thread_mediator = { - DISPATCH_GLOBAL_OBJECT_HEADER(semaphore), - }}, - [DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS] = { - .dpq_thread_mediator = { - DISPATCH_GLOBAL_OBJECT_HEADER(semaphore), - }}, - [DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT] = { - .dpq_thread_mediator = { - DISPATCH_GLOBAL_OBJECT_HEADER(semaphore), - }}, - [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS] = { - .dpq_thread_mediator = { - DISPATCH_GLOBAL_OBJECT_HEADER(semaphore), - }}, - [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT] = { - .dpq_thread_mediator = { - DISPATCH_GLOBAL_OBJECT_HEADER(semaphore), - }}, -}; -#endif - -#ifndef DISPATCH_WORKQ_MAX_PTHREAD_COUNT -#define DISPATCH_WORKQ_MAX_PTHREAD_COUNT 255 -#endif - -struct dispatch_root_queue_context_s { - union { - struct { - int volatile dgq_pending; -#if DISPATCH_USE_WORKQUEUES - qos_class_t dgq_qos; -#if DISPATCH_USE_WORKQ_PRIORITY - int dgq_wq_priority; -#endif -#if DISPATCH_USE_WORKQ_OPTIONS - int dgq_wq_options; -#endif -#if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK || DISPATCH_USE_PTHREAD_POOL - pthread_workqueue_t dgq_kworkqueue; -#endif -#endif // DISPATCH_USE_WORKQUEUES -#if DISPATCH_USE_PTHREAD_POOL - void *dgq_ctxt; - int32_t volatile dgq_thread_pool_size; -#endif - }; - char _dgq_pad[DISPATCH_CACHELINE_SIZE]; - }; -}; -typedef struct dispatch_root_queue_context_s *dispatch_root_queue_context_t; - -#define WORKQ_PRIO_INVALID (-1) -#ifndef WORKQ_BG_PRIOQUEUE_CONDITIONAL -#define WORKQ_BG_PRIOQUEUE_CONDITIONAL WORKQ_PRIO_INVALID -#endif -#ifndef WORKQ_HIGH_PRIOQUEUE_CONDITIONAL -#define WORKQ_HIGH_PRIOQUEUE_CONDITIONAL WORKQ_PRIO_INVALID -#endif - -DISPATCH_CACHELINE_ALIGN -static struct dispatch_root_queue_context_s _dispatch_root_queue_contexts[] = { - [DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS] = {{{ -#if DISPATCH_USE_WORKQUEUES - .dgq_qos = QOS_CLASS_MAINTENANCE, -#if DISPATCH_USE_WORKQ_PRIORITY - .dgq_wq_priority = WORKQ_BG_PRIOQUEUE, -#endif -#if DISPATCH_USE_WORKQ_OPTIONS - .dgq_wq_options = 0, -#endif -#endif -#if DISPATCH_ENABLE_THREAD_POOL - .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS], -#endif - }}}, - [DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT] = {{{ -#if DISPATCH_USE_WORKQUEUES - .dgq_qos = QOS_CLASS_MAINTENANCE, -#if DISPATCH_USE_WORKQ_PRIORITY - .dgq_wq_priority = WORKQ_BG_PRIOQUEUE, -#endif -#if DISPATCH_USE_WORKQ_OPTIONS - .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, -#endif -#endif -#if DISPATCH_ENABLE_THREAD_POOL - .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT], -#endif - }}}, - [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS] = {{{ -#if DISPATCH_USE_WORKQUEUES - .dgq_qos = QOS_CLASS_BACKGROUND, -#if DISPATCH_USE_WORKQ_PRIORITY - .dgq_wq_priority = WORKQ_BG_PRIOQUEUE_CONDITIONAL, -#endif -#if DISPATCH_USE_WORKQ_OPTIONS - .dgq_wq_options = 0, -#endif -#endif -#if DISPATCH_ENABLE_THREAD_POOL - .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS], -#endif - }}}, - [DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT] = {{{ -#if DISPATCH_USE_WORKQUEUES - .dgq_qos = QOS_CLASS_BACKGROUND, -#if DISPATCH_USE_WORKQ_PRIORITY - .dgq_wq_priority = WORKQ_BG_PRIOQUEUE_CONDITIONAL, -#endif -#if DISPATCH_USE_WORKQ_OPTIONS - .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, -#endif -#endif -#if DISPATCH_ENABLE_THREAD_POOL - .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT], -#endif - }}}, - [DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS] = {{{ -#if DISPATCH_USE_WORKQUEUES - .dgq_qos = QOS_CLASS_UTILITY, -#if DISPATCH_USE_WORKQ_PRIORITY - .dgq_wq_priority = WORKQ_LOW_PRIOQUEUE, -#endif -#if DISPATCH_USE_WORKQ_OPTIONS - .dgq_wq_options = 0, -#endif -#endif -#if DISPATCH_ENABLE_THREAD_POOL - .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS], -#endif - }}}, - [DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT] = {{{ -#if DISPATCH_USE_WORKQUEUES - .dgq_qos = QOS_CLASS_UTILITY, -#if DISPATCH_USE_WORKQ_PRIORITY - .dgq_wq_priority = WORKQ_LOW_PRIOQUEUE, -#endif -#if DISPATCH_USE_WORKQ_OPTIONS - .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, -#endif -#endif -#if DISPATCH_ENABLE_THREAD_POOL - .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT], -#endif - }}}, - [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS] = {{{ -#if DISPATCH_USE_WORKQUEUES - .dgq_qos = QOS_CLASS_DEFAULT, -#if DISPATCH_USE_WORKQ_PRIORITY - .dgq_wq_priority = WORKQ_DEFAULT_PRIOQUEUE, -#endif -#if DISPATCH_USE_WORKQ_OPTIONS - .dgq_wq_options = 0, -#endif -#endif -#if DISPATCH_ENABLE_THREAD_POOL - .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS], -#endif - }}}, - [DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT] = {{{ -#if DISPATCH_USE_WORKQUEUES - .dgq_qos = QOS_CLASS_DEFAULT, -#if DISPATCH_USE_WORKQ_PRIORITY - .dgq_wq_priority = WORKQ_DEFAULT_PRIOQUEUE, -#endif -#if DISPATCH_USE_WORKQ_OPTIONS - .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, -#endif -#endif -#if DISPATCH_ENABLE_THREAD_POOL - .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT], -#endif - }}}, - [DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS] = {{{ -#if DISPATCH_USE_WORKQUEUES - .dgq_qos = QOS_CLASS_USER_INITIATED, -#if DISPATCH_USE_WORKQ_PRIORITY - .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE, -#endif -#if DISPATCH_USE_WORKQ_OPTIONS - .dgq_wq_options = 0, -#endif -#endif -#if DISPATCH_ENABLE_THREAD_POOL - .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS], -#endif - }}}, - [DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT] = {{{ -#if DISPATCH_USE_WORKQUEUES - .dgq_qos = QOS_CLASS_USER_INITIATED, -#if DISPATCH_USE_WORKQ_PRIORITY - .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE, -#endif -#if DISPATCH_USE_WORKQ_OPTIONS - .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, -#endif -#endif -#if DISPATCH_ENABLE_THREAD_POOL - .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT], -#endif - }}}, - [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS] = {{{ -#if DISPATCH_USE_WORKQUEUES - .dgq_qos = QOS_CLASS_USER_INTERACTIVE, -#if DISPATCH_USE_WORKQ_PRIORITY - .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE_CONDITIONAL, -#endif -#if DISPATCH_USE_WORKQ_OPTIONS - .dgq_wq_options = 0, -#endif -#endif -#if DISPATCH_ENABLE_THREAD_POOL - .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS], -#endif - }}}, - [DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT] = {{{ -#if DISPATCH_USE_WORKQUEUES - .dgq_qos = QOS_CLASS_USER_INTERACTIVE, -#if DISPATCH_USE_WORKQ_PRIORITY - .dgq_wq_priority = WORKQ_HIGH_PRIOQUEUE_CONDITIONAL, -#endif -#if DISPATCH_USE_WORKQ_OPTIONS - .dgq_wq_options = WORKQ_ADDTHREADS_OPTION_OVERCOMMIT, -#endif -#endif -#if DISPATCH_ENABLE_THREAD_POOL - .dgq_ctxt = &_dispatch_pthread_root_queue_contexts[ - DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT], -#endif - }}}, -}; - -// 6618342 Contact the team that owns the Instrument DTrace probe before -// renaming this symbol -DISPATCH_CACHELINE_ALIGN -struct dispatch_queue_s _dispatch_root_queues[] = { -#define _DISPATCH_ROOT_QUEUE_IDX(n, flags) \ - ((flags & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) ? \ - DISPATCH_ROOT_QUEUE_IDX_##n##_QOS_OVERCOMMIT : \ - DISPATCH_ROOT_QUEUE_IDX_##n##_QOS) -#define _DISPATCH_ROOT_QUEUE_ENTRY(n, flags, ...) \ - [_DISPATCH_ROOT_QUEUE_IDX(n, flags)] = { \ - DISPATCH_GLOBAL_OBJECT_HEADER(queue_root), \ - .dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, \ - .do_ctxt = &_dispatch_root_queue_contexts[ \ - _DISPATCH_ROOT_QUEUE_IDX(n, flags)], \ - .dq_atomic_flags = DQF_WIDTH(DISPATCH_QUEUE_WIDTH_POOL), \ - .dq_priority = _dispatch_priority_make(DISPATCH_QOS_##n, 0) | flags | \ - DISPATCH_PRIORITY_FLAG_ROOTQUEUE | \ - ((flags & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE) ? 0 : \ - DISPATCH_QOS_##n << DISPATCH_PRIORITY_OVERRIDE_SHIFT), \ - __VA_ARGS__ \ - } - _DISPATCH_ROOT_QUEUE_ENTRY(MAINTENANCE, 0, - .dq_label = "com.apple.root.maintenance-qos", - .dq_serialnum = 4, - ), - _DISPATCH_ROOT_QUEUE_ENTRY(MAINTENANCE, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, - .dq_label = "com.apple.root.maintenance-qos.overcommit", - .dq_serialnum = 5, - ), - _DISPATCH_ROOT_QUEUE_ENTRY(BACKGROUND, 0, - .dq_label = "com.apple.root.background-qos", - .dq_serialnum = 6, - ), - _DISPATCH_ROOT_QUEUE_ENTRY(BACKGROUND, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, - .dq_label = "com.apple.root.background-qos.overcommit", - .dq_serialnum = 7, - ), - _DISPATCH_ROOT_QUEUE_ENTRY(UTILITY, 0, - .dq_label = "com.apple.root.utility-qos", - .dq_serialnum = 8, - ), - _DISPATCH_ROOT_QUEUE_ENTRY(UTILITY, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, - .dq_label = "com.apple.root.utility-qos.overcommit", - .dq_serialnum = 9, - ), - _DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT, DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE, - .dq_label = "com.apple.root.default-qos", - .dq_serialnum = 10, - ), - _DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT, - DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE | DISPATCH_PRIORITY_FLAG_OVERCOMMIT, - .dq_label = "com.apple.root.default-qos.overcommit", - .dq_serialnum = 11, - ), - _DISPATCH_ROOT_QUEUE_ENTRY(USER_INITIATED, 0, - .dq_label = "com.apple.root.user-initiated-qos", - .dq_serialnum = 12, - ), - _DISPATCH_ROOT_QUEUE_ENTRY(USER_INITIATED, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, - .dq_label = "com.apple.root.user-initiated-qos.overcommit", - .dq_serialnum = 13, - ), - _DISPATCH_ROOT_QUEUE_ENTRY(USER_INTERACTIVE, 0, - .dq_label = "com.apple.root.user-interactive-qos", - .dq_serialnum = 14, - ), - _DISPATCH_ROOT_QUEUE_ENTRY(USER_INTERACTIVE, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, - .dq_label = "com.apple.root.user-interactive-qos.overcommit", - .dq_serialnum = 15, - ), -}; - -#if DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP -static const dispatch_queue_t _dispatch_wq2root_queues[][2] = { - [WORKQ_BG_PRIOQUEUE][0] = &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS], - [WORKQ_BG_PRIOQUEUE][WORKQ_ADDTHREADS_OPTION_OVERCOMMIT] = - &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT], - [WORKQ_LOW_PRIOQUEUE][0] = &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS], - [WORKQ_LOW_PRIOQUEUE][WORKQ_ADDTHREADS_OPTION_OVERCOMMIT] = - &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT], - [WORKQ_DEFAULT_PRIOQUEUE][0] = &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS], - [WORKQ_DEFAULT_PRIOQUEUE][WORKQ_ADDTHREADS_OPTION_OVERCOMMIT] = - &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT], - [WORKQ_HIGH_PRIOQUEUE][0] = &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS], - [WORKQ_HIGH_PRIOQUEUE][WORKQ_ADDTHREADS_OPTION_OVERCOMMIT] = - &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT], -}; -#endif // DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP - -#if DISPATCH_USE_MGR_THREAD && DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES -static struct dispatch_queue_s _dispatch_mgr_root_queue; -#else -#define _dispatch_mgr_root_queue _dispatch_root_queues[\ - DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT] -#endif - -// 6618342 Contact the team that owns the Instrument DTrace probe before -// renaming this symbol -DISPATCH_CACHELINE_ALIGN -struct dispatch_queue_s _dispatch_mgr_q = { - DISPATCH_GLOBAL_OBJECT_HEADER(queue_mgr), - .dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(1) | - DISPATCH_QUEUE_ROLE_BASE_ANON, - .do_targetq = &_dispatch_mgr_root_queue, - .dq_label = "com.apple.libdispatch-manager", - .dq_atomic_flags = DQF_WIDTH(1), - .dq_priority = DISPATCH_PRIORITY_FLAG_MANAGER | - DISPATCH_PRIORITY_SATURATED_OVERRIDE, - .dq_serialnum = 2, -}; - -dispatch_queue_t -dispatch_get_global_queue(long priority, unsigned long flags) -{ - if (flags & ~(unsigned long)DISPATCH_QUEUE_OVERCOMMIT) { - return DISPATCH_BAD_INPUT; - } - dispatch_qos_t qos = _dispatch_qos_from_queue_priority(priority); -#if !HAVE_PTHREAD_WORKQUEUE_QOS - if (qos == QOS_CLASS_MAINTENANCE) { - qos = DISPATCH_QOS_BACKGROUND; - } else if (qos == QOS_CLASS_USER_INTERACTIVE) { - qos = DISPATCH_QOS_USER_INITIATED; - } -#endif - if (qos == DISPATCH_QOS_UNSPECIFIED) { - return DISPATCH_BAD_INPUT; - } - return _dispatch_get_root_queue(qos, flags & DISPATCH_QUEUE_OVERCOMMIT); -} - -DISPATCH_ALWAYS_INLINE -static inline dispatch_queue_t -_dispatch_get_current_queue(void) -{ - return _dispatch_queue_get_current() ?: - _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, true); -} - -dispatch_queue_t -dispatch_get_current_queue(void) -{ - return _dispatch_get_current_queue(); -} +#pragma mark dispatch_assert_queue DISPATCH_NOINLINE DISPATCH_NORETURN static void @@ -560,7 +62,8 @@ void dispatch_assert_queue(dispatch_queue_t dq) { unsigned long metatype = dx_metatype(dq); - if (unlikely(metatype != _DISPATCH_QUEUE_TYPE)) { + if (unlikely(metatype != _DISPATCH_LANE_TYPE && + metatype != _DISPATCH_WORKLOOP_TYPE)) { DISPATCH_CLIENT_CRASH(metatype, "invalid queue passed to " "dispatch_assert_queue()"); } @@ -568,16 +71,8 @@ dispatch_assert_queue(dispatch_queue_t dq) if (likely(_dq_state_drain_locked_by_self(dq_state))) { return; } - // we can look at the width: if it is changing while we read it, - // it means that a barrier is running on `dq` concurrently, which - // proves that we're not on `dq`. Hence reading a stale '1' is ok. - // - // However if we can have thread bound queues, these mess with lock - // ownership and we always have to take the slowpath - if (likely(DISPATCH_COCOA_COMPAT || dq->dq_width > 1)) { - if (likely(_dispatch_thread_frame_find_queue(dq))) { - return; - } + if (likely(_dispatch_thread_frame_find_queue(dq))) { + return; } _dispatch_assert_queue_fail(dq, true); } @@ -586,26 +81,18 @@ void dispatch_assert_queue_not(dispatch_queue_t dq) { unsigned long metatype = dx_metatype(dq); - if (unlikely(metatype != _DISPATCH_QUEUE_TYPE)) { + if (unlikely(metatype != _DISPATCH_LANE_TYPE && + metatype != _DISPATCH_WORKLOOP_TYPE)) { DISPATCH_CLIENT_CRASH(metatype, "invalid queue passed to " "dispatch_assert_queue_not()"); } uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); - if (likely(!_dq_state_drain_locked_by_self(dq_state))) { - // we can look at the width: if it is changing while we read it, - // it means that a barrier is running on `dq` concurrently, which - // proves that we're not on `dq`. Hence reading a stale '1' is ok. - // - // However if we can have thread bound queues, these mess with lock - // ownership and we always have to take the slowpath - if (likely(!DISPATCH_COCOA_COMPAT && dq->dq_width == 1)) { - return; - } - if (likely(!_dispatch_thread_frame_find_queue(dq))) { - return; - } + if (unlikely(_dq_state_drain_locked_by_self(dq_state))) { + _dispatch_assert_queue_fail(dq, false); + } + if (unlikely(_dispatch_thread_frame_find_queue(dq))) { + _dispatch_assert_queue_fail(dq, false); } - _dispatch_assert_queue_fail(dq, false); } void @@ -627,4598 +114,4411 @@ dispatch_assert_queue_barrier(dispatch_queue_t dq) _dispatch_assert_queue_barrier_fail(dq); } -#if DISPATCH_DEBUG && DISPATCH_ROOT_QUEUE_DEBUG -#define _dispatch_root_queue_debug(...) _dispatch_debug(__VA_ARGS__) -#define _dispatch_debug_root_queue(...) dispatch_debug_queue(__VA_ARGS__) -#else -#define _dispatch_root_queue_debug(...) -#define _dispatch_debug_root_queue(...) -#endif - #pragma mark - -#pragma mark dispatch_init - -static inline bool -_dispatch_root_queues_init_workq(int *wq_supported) -{ - int r; (void)r; - bool result = false; - *wq_supported = 0; -#if DISPATCH_USE_WORKQUEUES - bool disable_wq = false; (void)disable_wq; -#if DISPATCH_ENABLE_THREAD_POOL && DISPATCH_DEBUG - disable_wq = slowpath(getenv("LIBDISPATCH_DISABLE_KWQ")); -#endif -#if DISPATCH_USE_KEVENT_WORKQUEUE || HAVE_PTHREAD_WORKQUEUE_QOS - bool disable_qos = false; -#if DISPATCH_DEBUG - disable_qos = slowpath(getenv("LIBDISPATCH_DISABLE_QOS")); -#endif -#if DISPATCH_USE_KEVENT_WORKQUEUE - bool disable_kevent_wq = false; -#if DISPATCH_DEBUG || DISPATCH_PROFILE - disable_kevent_wq = slowpath(getenv("LIBDISPATCH_DISABLE_KEVENT_WQ")); -#endif -#endif +#pragma mark _dispatch_set_priority_and_mach_voucher +#if HAVE_PTHREAD_WORKQUEUE_QOS - if (!disable_wq && !disable_qos) { - *wq_supported = _pthread_workqueue_supported(); -#if DISPATCH_USE_KEVENT_WORKQUEUE - if (!disable_kevent_wq && (*wq_supported & WORKQ_FEATURE_KEVENT)) { - r = _pthread_workqueue_init_with_kevent(_dispatch_worker_thread3, - (pthread_workqueue_function_kevent_t) - _dispatch_kevent_worker_thread, - offsetof(struct dispatch_queue_s, dq_serialnum), 0); -#if DISPATCH_USE_MGR_THREAD - _dispatch_kevent_workqueue_enabled = !r; -#endif - result = !r; - } else -#endif // DISPATCH_USE_KEVENT_WORKQUEUE - if (*wq_supported & WORKQ_FEATURE_FINEPRIO) { -#if DISPATCH_USE_MGR_THREAD - r = _pthread_workqueue_init(_dispatch_worker_thread3, - offsetof(struct dispatch_queue_s, dq_serialnum), 0); - result = !r; -#endif - } - if (!(*wq_supported & WORKQ_FEATURE_MAINTENANCE)) { - DISPATCH_INTERNAL_CRASH(*wq_supported, - "QoS Maintenance support required"); - } - } -#endif // DISPATCH_USE_KEVENT_WORKQUEUE || HAVE_PTHREAD_WORKQUEUE_QOS -#if DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP - if (!result && !disable_wq) { - pthread_workqueue_setdispatchoffset_np( - offsetof(struct dispatch_queue_s, dq_serialnum)); - r = pthread_workqueue_setdispatch_np(_dispatch_worker_thread2); -#if !DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK - (void)dispatch_assume_zero(r); -#endif - result = !r; - } -#endif // DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP -#if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK || DISPATCH_USE_PTHREAD_POOL - if (!result) { -#if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK - pthread_workqueue_attr_t pwq_attr; - if (!disable_wq) { - r = pthread_workqueue_attr_init_np(&pwq_attr); - (void)dispatch_assume_zero(r); - } -#endif - size_t i; - for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { - pthread_workqueue_t pwq = NULL; - dispatch_root_queue_context_t qc; - qc = &_dispatch_root_queue_contexts[i]; -#if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK - if (!disable_wq && qc->dgq_wq_priority != WORKQ_PRIO_INVALID) { - r = pthread_workqueue_attr_setqueuepriority_np(&pwq_attr, - qc->dgq_wq_priority); - (void)dispatch_assume_zero(r); - r = pthread_workqueue_attr_setovercommit_np(&pwq_attr, - qc->dgq_wq_options & - WORKQ_ADDTHREADS_OPTION_OVERCOMMIT); - (void)dispatch_assume_zero(r); - r = pthread_workqueue_create_np(&pwq, &pwq_attr); - (void)dispatch_assume_zero(r); - result = result || dispatch_assume(pwq); - } -#endif // DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK - if (pwq) { - qc->dgq_kworkqueue = pwq; +DISPATCH_NOINLINE +void +_dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pp, + mach_voucher_t kv) +{ + _pthread_set_flags_t pflags = 0; + if (pp && _dispatch_set_qos_class_enabled) { + pthread_priority_t old_pri = _dispatch_get_priority(); + if (pp != old_pri) { + if (old_pri & _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG) { + pflags |= _PTHREAD_SET_SELF_WQ_KEVENT_UNBIND; + // when we unbind, overcomitness can flip, so we need to learn + // it from the defaultpri, see _dispatch_priority_compute_update + pp |= (_dispatch_get_basepri() & + DISPATCH_PRIORITY_FLAG_OVERCOMMIT); } else { - qc->dgq_kworkqueue = (void*)(~0ul); - // because the fastpath of _dispatch_global_queue_poke didn't - // know yet that we're using the internal pool implementation - // we have to undo its setting of dgq_pending - qc->dgq_pending = 0; + // else we need to keep the one that is set in the current pri + pp |= (old_pri & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG); + } + if (likely(old_pri & ~_PTHREAD_PRIORITY_FLAGS_MASK)) { + pflags |= _PTHREAD_SET_SELF_QOS_FLAG; + } + uint64_t mgr_dq_state = + os_atomic_load2o(&_dispatch_mgr_q, dq_state, relaxed); + if (unlikely(_dq_state_drain_locked_by_self(mgr_dq_state))) { + DISPATCH_INTERNAL_CRASH(pp, + "Changing the QoS while on the manager queue"); + } + if (unlikely(pp & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG)) { + DISPATCH_INTERNAL_CRASH(pp, "Cannot raise oneself to manager"); + } + if (old_pri & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG) { + DISPATCH_INTERNAL_CRASH(old_pri, + "Cannot turn a manager thread into a normal one"); } } -#if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK - if (!disable_wq) { - r = pthread_workqueue_attr_destroy_np(&pwq_attr); - (void)dispatch_assume_zero(r); - } -#endif } -#endif // DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK || DISPATCH_ENABLE_THREAD_POOL -#endif // DISPATCH_USE_WORKQUEUES - return result; -} - -#if DISPATCH_USE_PTHREAD_POOL -static inline void -_dispatch_root_queue_init_pthread_pool(dispatch_root_queue_context_t qc, - int32_t pool_size, bool overcommit) -{ - dispatch_pthread_root_queue_context_t pqc = qc->dgq_ctxt; - int32_t thread_pool_size = overcommit ? DISPATCH_WORKQ_MAX_PTHREAD_COUNT : - (int32_t)dispatch_hw_config(active_cpus); - if (slowpath(pool_size) && pool_size < thread_pool_size) { - thread_pool_size = pool_size; - } - qc->dgq_thread_pool_size = thread_pool_size; -#if DISPATCH_USE_WORKQUEUES - if (qc->dgq_qos) { - (void)dispatch_assume_zero(pthread_attr_init(&pqc->dpq_thread_attr)); - (void)dispatch_assume_zero(pthread_attr_setdetachstate( - &pqc->dpq_thread_attr, PTHREAD_CREATE_DETACHED)); -#if HAVE_PTHREAD_WORKQUEUE_QOS - (void)dispatch_assume_zero(pthread_attr_set_qos_class_np( - &pqc->dpq_thread_attr, qc->dgq_qos, 0)); + if (kv != VOUCHER_NO_MACH_VOUCHER) { +#if VOUCHER_USE_MACH_VOUCHER + pflags |= _PTHREAD_SET_SELF_VOUCHER_FLAG; #endif } -#endif // HAVE_PTHREAD_WORKQUEUES - _dispatch_sema4_t *sema = &pqc->dpq_thread_mediator.dsema_sema; - _dispatch_sema4_init(sema, _DSEMA4_POLICY_LIFO); - _dispatch_sema4_create(sema, _DSEMA4_POLICY_LIFO); + if (!pflags) return; + int r = _pthread_set_properties_self(pflags, pp, kv); + if (r == EINVAL) { + DISPATCH_INTERNAL_CRASH(pp, "_pthread_set_properties_self failed"); + } + (void)dispatch_assume_zero(r); } -#endif // DISPATCH_USE_PTHREAD_POOL -static void -_dispatch_root_queues_init_once(void *context DISPATCH_UNUSED) +DISPATCH_NOINLINE +voucher_t +_dispatch_set_priority_and_voucher_slow(pthread_priority_t priority, + voucher_t v, dispatch_thread_set_self_t flags) { - int wq_supported; - _dispatch_fork_becomes_unsafe(); - if (!_dispatch_root_queues_init_workq(&wq_supported)) { -#if DISPATCH_ENABLE_THREAD_POOL - size_t i; - for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { - bool overcommit = true; -#if TARGET_OS_EMBEDDED || (DISPATCH_USE_INTERNAL_WORKQUEUE && HAVE_DISPATCH_WORKQ_MONITORING) - // some software hangs if the non-overcommitting queues do not - // overcommit when threads block. Someday, this behavior should - // apply to all platforms - if (!(i & 1)) { - overcommit = false; - } -#endif - _dispatch_root_queue_init_pthread_pool( - &_dispatch_root_queue_contexts[i], 0, overcommit); + voucher_t ov = DISPATCH_NO_VOUCHER; + mach_voucher_t kv = VOUCHER_NO_MACH_VOUCHER; + if (v != DISPATCH_NO_VOUCHER) { + bool retained = flags & DISPATCH_VOUCHER_CONSUME; + ov = _voucher_get(); + if (ov == v && (flags & DISPATCH_VOUCHER_REPLACE)) { + if (retained && v) _voucher_release_no_dispose(v); + ov = DISPATCH_NO_VOUCHER; + } else { + if (!retained && v) _voucher_retain(v); + kv = _voucher_swap_and_get_mach_voucher(ov, v); } -#else - DISPATCH_INTERNAL_CRASH((errno << 16) | wq_supported, - "Root queue initialization failed"); -#endif // DISPATCH_ENABLE_THREAD_POOL } -} - -void -_dispatch_root_queues_init(void) -{ - static dispatch_once_t _dispatch_root_queues_pred; - dispatch_once_f(&_dispatch_root_queues_pred, NULL, - _dispatch_root_queues_init_once); -} - -DISPATCH_EXPORT DISPATCH_NOTHROW -void -libdispatch_init(void) -{ - dispatch_assert(DISPATCH_ROOT_QUEUE_COUNT == 2 * DISPATCH_QOS_MAX); - - dispatch_assert(DISPATCH_QUEUE_PRIORITY_LOW == - -DISPATCH_QUEUE_PRIORITY_HIGH); - dispatch_assert(countof(_dispatch_root_queues) == - DISPATCH_ROOT_QUEUE_COUNT); - dispatch_assert(countof(_dispatch_root_queue_contexts) == - DISPATCH_ROOT_QUEUE_COUNT); -#if DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP - dispatch_assert(sizeof(_dispatch_wq2root_queues) / - sizeof(_dispatch_wq2root_queues[0][0]) == - WORKQ_NUM_PRIOQUEUE * 2); -#endif -#if DISPATCH_ENABLE_THREAD_POOL - dispatch_assert(countof(_dispatch_pthread_root_queue_contexts) == - DISPATCH_ROOT_QUEUE_COUNT); -#endif - - dispatch_assert(offsetof(struct dispatch_continuation_s, do_next) == - offsetof(struct dispatch_object_s, do_next)); - dispatch_assert(offsetof(struct dispatch_continuation_s, do_vtable) == - offsetof(struct dispatch_object_s, do_vtable)); - dispatch_assert(sizeof(struct dispatch_apply_s) <= - DISPATCH_CONTINUATION_SIZE); - dispatch_assert(sizeof(struct dispatch_queue_s) % DISPATCH_CACHELINE_SIZE - == 0); - dispatch_assert(offsetof(struct dispatch_queue_s, dq_state) % _Alignof(uint64_t) == 0); - dispatch_assert(sizeof(struct dispatch_root_queue_context_s) % - DISPATCH_CACHELINE_SIZE == 0); - -#if HAVE_PTHREAD_WORKQUEUE_QOS - dispatch_qos_t qos = _dispatch_qos_from_qos_class(qos_class_main()); - dispatch_priority_t pri = _dispatch_priority_make(qos, 0); - _dispatch_main_q.dq_priority = _dispatch_priority_with_override_qos(pri, qos); -#if DISPATCH_DEBUG - if (!slowpath(getenv("LIBDISPATCH_DISABLE_SET_QOS"))) { - _dispatch_set_qos_class_enabled = 1; + if (!(flags & DISPATCH_THREAD_PARK)) { + _dispatch_set_priority_and_mach_voucher_slow(priority, kv); } -#endif -#endif - -#if DISPATCH_USE_THREAD_LOCAL_STORAGE - _dispatch_thread_key_create(&__dispatch_tsd_key, _libdispatch_tsd_cleanup); -#else - _dispatch_thread_key_create(&dispatch_priority_key, NULL); - _dispatch_thread_key_create(&dispatch_r2k_key, NULL); - _dispatch_thread_key_create(&dispatch_queue_key, _dispatch_queue_cleanup); - _dispatch_thread_key_create(&dispatch_frame_key, _dispatch_frame_cleanup); - _dispatch_thread_key_create(&dispatch_cache_key, _dispatch_cache_cleanup); - _dispatch_thread_key_create(&dispatch_context_key, _dispatch_context_cleanup); - _dispatch_thread_key_create(&dispatch_pthread_root_queue_observer_hooks_key, - NULL); - _dispatch_thread_key_create(&dispatch_basepri_key, NULL); -#if DISPATCH_INTROSPECTION - _dispatch_thread_key_create(&dispatch_introspection_key , NULL); -#elif DISPATCH_PERF_MON - _dispatch_thread_key_create(&dispatch_bcounter_key, NULL); -#endif - _dispatch_thread_key_create(&dispatch_wlh_key, _dispatch_wlh_cleanup); - _dispatch_thread_key_create(&dispatch_voucher_key, _voucher_thread_cleanup); - _dispatch_thread_key_create(&dispatch_deferred_items_key, - _dispatch_deferred_items_cleanup); -#endif - -#if DISPATCH_USE_RESOLVERS // rdar://problem/8541707 - _dispatch_main_q.do_targetq = &_dispatch_root_queues[ - DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT]; -#endif - - _dispatch_queue_set_current(&_dispatch_main_q); - _dispatch_queue_set_bound_thread(&_dispatch_main_q); - -#if DISPATCH_USE_PTHREAD_ATFORK - (void)dispatch_assume_zero(pthread_atfork(dispatch_atfork_prepare, - dispatch_atfork_parent, dispatch_atfork_child)); -#endif - _dispatch_hw_config_init(); - _dispatch_time_init(); - _dispatch_vtable_init(); - _os_object_init(); - _voucher_init(); - _dispatch_introspection_init(); -} - -#if DISPATCH_USE_THREAD_LOCAL_STORAGE -#include -#include - -#ifndef __ANDROID__ -#ifdef SYS_gettid -DISPATCH_ALWAYS_INLINE -static inline pid_t -gettid(void) -{ - return (pid_t) syscall(SYS_gettid); -} -#else -#error "SYS_gettid unavailable on this system" -#endif /* SYS_gettid */ -#endif /* ! __ANDROID__ */ - -#define _tsd_call_cleanup(k, f) do { \ - if ((f) && tsd->k) ((void(*)(void*))(f))(tsd->k); \ - } while (0) - -#ifdef __ANDROID__ -static void (*_dispatch_thread_detach_callback)(void); - -void -_dispatch_install_thread_detach_callback(dispatch_function_t cb) -{ - if (os_atomic_xchg(&_dispatch_thread_detach_callback, cb, relaxed)) { - DISPATCH_CLIENT_CRASH(0, "Installing a thread detach callback twice"); - } + if (ov != DISPATCH_NO_VOUCHER && (flags & DISPATCH_VOUCHER_REPLACE)) { + if (ov) _voucher_release(ov); + ov = DISPATCH_NO_VOUCHER; + } + return ov; } #endif +#pragma mark - +#pragma mark dispatch_continuation_t -void -_libdispatch_tsd_cleanup(void *ctx) -{ - struct dispatch_tsd *tsd = (struct dispatch_tsd*) ctx; - - _tsd_call_cleanup(dispatch_priority_key, NULL); - _tsd_call_cleanup(dispatch_r2k_key, NULL); +static void _dispatch_async_redirect_invoke(dispatch_continuation_t dc, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); +static void _dispatch_queue_override_invoke(dispatch_continuation_t dc, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); +static void _dispatch_workloop_stealer_invoke(dispatch_continuation_t dc, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); - _tsd_call_cleanup(dispatch_queue_key, _dispatch_queue_cleanup); - _tsd_call_cleanup(dispatch_frame_key, _dispatch_frame_cleanup); - _tsd_call_cleanup(dispatch_cache_key, _dispatch_cache_cleanup); - _tsd_call_cleanup(dispatch_context_key, _dispatch_context_cleanup); - _tsd_call_cleanup(dispatch_pthread_root_queue_observer_hooks_key, - NULL); - _tsd_call_cleanup(dispatch_basepri_key, NULL); -#if DISPATCH_INTROSPECTION - _tsd_call_cleanup(dispatch_introspection_key, NULL); -#elif DISPATCH_PERF_MON - _tsd_call_cleanup(dispatch_bcounter_key, NULL); +const struct dispatch_continuation_vtable_s _dispatch_continuation_vtables[] = { + DC_VTABLE_ENTRY(ASYNC_REDIRECT, + .do_invoke = _dispatch_async_redirect_invoke), +#if HAVE_MACH + DC_VTABLE_ENTRY(MACH_SEND_BARRRIER_DRAIN, + .do_invoke = _dispatch_mach_send_barrier_drain_invoke), + DC_VTABLE_ENTRY(MACH_SEND_BARRIER, + .do_invoke = _dispatch_mach_barrier_invoke), + DC_VTABLE_ENTRY(MACH_RECV_BARRIER, + .do_invoke = _dispatch_mach_barrier_invoke), + DC_VTABLE_ENTRY(MACH_ASYNC_REPLY, + .do_invoke = _dispatch_mach_msg_async_reply_invoke), #endif - _tsd_call_cleanup(dispatch_wlh_key, _dispatch_wlh_cleanup); - _tsd_call_cleanup(dispatch_voucher_key, _voucher_thread_cleanup); - _tsd_call_cleanup(dispatch_deferred_items_key, - _dispatch_deferred_items_cleanup); -#ifdef __ANDROID__ - if (_dispatch_thread_detach_callback) { - _dispatch_thread_detach_callback(); - } +#if HAVE_PTHREAD_WORKQUEUE_QOS + DC_VTABLE_ENTRY(WORKLOOP_STEALING, + .do_invoke = _dispatch_workloop_stealer_invoke), + DC_VTABLE_ENTRY(OVERRIDE_STEALING, + .do_invoke = _dispatch_queue_override_invoke), + DC_VTABLE_ENTRY(OVERRIDE_OWNING, + .do_invoke = _dispatch_queue_override_invoke), #endif - tsd->tid = 0; -} +}; DISPATCH_NOINLINE -void -libdispatch_tsd_init(void) -{ - pthread_setspecific(__dispatch_tsd_key, &__dispatch_tsd); - __dispatch_tsd.tid = gettid(); -} -#endif - -DISPATCH_NOTHROW -void -_dispatch_queue_atfork_child(void) +static void +_dispatch_cache_cleanup(void *value) { - dispatch_queue_t main_q = &_dispatch_main_q; - void *crash = (void *)0x100; - size_t i; + dispatch_continuation_t dc, next_dc = value; - if (_dispatch_queue_is_thread_bound(main_q)) { - _dispatch_queue_set_bound_thread(main_q); + while ((dc = next_dc)) { + next_dc = dc->do_next; + _dispatch_continuation_free_to_heap(dc); } +} - if (!_dispatch_is_multithreaded_inline()) return; - - main_q->dq_items_head = crash; - main_q->dq_items_tail = crash; - - _dispatch_mgr_q.dq_items_head = crash; - _dispatch_mgr_q.dq_items_tail = crash; - - for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { - _dispatch_root_queues[i].dq_items_head = crash; - _dispatch_root_queues[i].dq_items_tail = crash; +static void +_dispatch_force_cache_cleanup(void) +{ + dispatch_continuation_t dc; + dc = _dispatch_thread_getspecific(dispatch_cache_key); + if (dc) { + _dispatch_thread_setspecific(dispatch_cache_key, NULL); + _dispatch_cache_cleanup(dc); } } +#if DISPATCH_USE_MEMORYPRESSURE_SOURCE DISPATCH_NOINLINE void -_dispatch_fork_becomes_unsafe_slow(void) +_dispatch_continuation_free_to_cache_limit(dispatch_continuation_t dc) { - uint8_t value = os_atomic_or(&_dispatch_unsafe_fork, - _DISPATCH_UNSAFE_FORK_MULTITHREADED, relaxed); - if (value & _DISPATCH_UNSAFE_FORK_PROHIBIT) { - DISPATCH_CLIENT_CRASH(0, "Transition to multithreaded is prohibited"); + _dispatch_continuation_free_to_heap(dc); + dispatch_continuation_t next_dc; + dc = _dispatch_thread_getspecific(dispatch_cache_key); + int cnt; + if (!dc || (cnt = dc->dc_cache_cnt - + _dispatch_continuation_cache_limit) <= 0) { + return; } + do { + next_dc = dc->do_next; + _dispatch_continuation_free_to_heap(dc); + } while (--cnt && (dc = next_dc)); + _dispatch_thread_setspecific(dispatch_cache_key, next_dc); } +#endif DISPATCH_NOINLINE void -_dispatch_prohibit_transition_to_multithreaded(bool prohibit) +_dispatch_continuation_pop(dispatch_object_t dou, dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags, dispatch_queue_class_t dqu) { - if (prohibit) { - uint8_t value = os_atomic_or(&_dispatch_unsafe_fork, - _DISPATCH_UNSAFE_FORK_PROHIBIT, relaxed); - if (value & _DISPATCH_UNSAFE_FORK_MULTITHREADED) { - DISPATCH_CLIENT_CRASH(0, "The executable is already multithreaded"); - } - } else { - os_atomic_and(&_dispatch_unsafe_fork, - (uint8_t)~_DISPATCH_UNSAFE_FORK_PROHIBIT, relaxed); - } + _dispatch_continuation_pop_inline(dou, dic, flags, dqu._dq); } #pragma mark - -#pragma mark dispatch_queue_attr_t +#pragma mark dispatch_block_create + +#if __BLOCKS__ DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_qos_class_valid(dispatch_qos_class_t qos_class, int relative_priority) -{ - qos_class_t qos = (qos_class_t)qos_class; - switch (qos) { - case QOS_CLASS_MAINTENANCE: - case QOS_CLASS_BACKGROUND: - case QOS_CLASS_UTILITY: - case QOS_CLASS_DEFAULT: - case QOS_CLASS_USER_INITIATED: - case QOS_CLASS_USER_INTERACTIVE: - case QOS_CLASS_UNSPECIFIED: - break; - default: - return false; - } - if (relative_priority > 0 || relative_priority < QOS_MIN_RELATIVE_PRIORITY){ - return false; - } - return true; -} - -#define DISPATCH_QUEUE_ATTR_OVERCOMMIT2IDX(overcommit) \ - ((overcommit) == _dispatch_queue_attr_overcommit_disabled ? \ - DQA_INDEX_NON_OVERCOMMIT : \ - ((overcommit) == _dispatch_queue_attr_overcommit_enabled ? \ - DQA_INDEX_OVERCOMMIT : DQA_INDEX_UNSPECIFIED_OVERCOMMIT)) - -#define DISPATCH_QUEUE_ATTR_CONCURRENT2IDX(concurrent) \ - ((concurrent) ? DQA_INDEX_CONCURRENT : DQA_INDEX_SERIAL) - -#define DISPATCH_QUEUE_ATTR_INACTIVE2IDX(inactive) \ - ((inactive) ? DQA_INDEX_INACTIVE : DQA_INDEX_ACTIVE) - -#define DISPATCH_QUEUE_ATTR_AUTORELEASE_FREQUENCY2IDX(frequency) \ - (frequency) - -#define DISPATCH_QUEUE_ATTR_PRIO2IDX(prio) (-(prio)) - -#define DISPATCH_QUEUE_ATTR_QOS2IDX(qos) (qos) - -static inline dispatch_queue_attr_t -_dispatch_get_queue_attr(dispatch_qos_t qos, int prio, - _dispatch_queue_attr_overcommit_t overcommit, - dispatch_autorelease_frequency_t frequency, - bool concurrent, bool inactive) -{ - return (dispatch_queue_attr_t)&_dispatch_queue_attrs - [DISPATCH_QUEUE_ATTR_QOS2IDX(qos)] - [DISPATCH_QUEUE_ATTR_PRIO2IDX(prio)] - [DISPATCH_QUEUE_ATTR_OVERCOMMIT2IDX(overcommit)] - [DISPATCH_QUEUE_ATTR_AUTORELEASE_FREQUENCY2IDX(frequency)] - [DISPATCH_QUEUE_ATTR_CONCURRENT2IDX(concurrent)] - [DISPATCH_QUEUE_ATTR_INACTIVE2IDX(inactive)]; -} - -dispatch_queue_attr_t -_dispatch_get_default_queue_attr(void) +_dispatch_block_flags_valid(dispatch_block_flags_t flags) { - return _dispatch_get_queue_attr(DISPATCH_QOS_UNSPECIFIED, 0, - _dispatch_queue_attr_overcommit_unspecified, - DISPATCH_AUTORELEASE_FREQUENCY_INHERIT, false, false); + return ((flags & ~DISPATCH_BLOCK_API_MASK) == 0); } -dispatch_queue_attr_t -dispatch_queue_attr_make_with_qos_class(dispatch_queue_attr_t dqa, - dispatch_qos_class_t qos_class, int relpri) +DISPATCH_ALWAYS_INLINE +static inline dispatch_block_flags_t +_dispatch_block_normalize_flags(dispatch_block_flags_t flags) { - if (!_dispatch_qos_class_valid(qos_class, relpri)) { - return DISPATCH_BAD_INPUT; - } - if (!slowpath(dqa)) { - dqa = _dispatch_get_default_queue_attr(); - } else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) { - DISPATCH_CLIENT_CRASH(dqa->do_vtable, "Invalid queue attribute"); + if (flags & (DISPATCH_BLOCK_NO_QOS_CLASS|DISPATCH_BLOCK_DETACHED)) { + flags |= DISPATCH_BLOCK_HAS_PRIORITY; } - return _dispatch_get_queue_attr(_dispatch_qos_from_qos_class(qos_class), - relpri, dqa->dqa_overcommit, dqa->dqa_autorelease_frequency, - dqa->dqa_concurrent, dqa->dqa_inactive); -} - -dispatch_queue_attr_t -dispatch_queue_attr_make_initially_inactive(dispatch_queue_attr_t dqa) -{ - if (!slowpath(dqa)) { - dqa = _dispatch_get_default_queue_attr(); - } else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) { - DISPATCH_CLIENT_CRASH(dqa->do_vtable, "Invalid queue attribute"); + if (flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS) { + flags &= ~(dispatch_block_flags_t)DISPATCH_BLOCK_INHERIT_QOS_CLASS; } - dispatch_priority_t pri = dqa->dqa_qos_and_relpri; - return _dispatch_get_queue_attr(_dispatch_priority_qos(pri), - _dispatch_priority_relpri(pri), dqa->dqa_overcommit, - dqa->dqa_autorelease_frequency, dqa->dqa_concurrent, true); + return flags; } -dispatch_queue_attr_t -dispatch_queue_attr_make_with_overcommit(dispatch_queue_attr_t dqa, - bool overcommit) +static inline dispatch_block_t +_dispatch_block_create_with_voucher_and_priority(dispatch_block_flags_t flags, + voucher_t voucher, pthread_priority_t pri, dispatch_block_t block) { - if (!slowpath(dqa)) { - dqa = _dispatch_get_default_queue_attr(); - } else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) { - DISPATCH_CLIENT_CRASH(dqa->do_vtable, "Invalid queue attribute"); - } - dispatch_priority_t pri = dqa->dqa_qos_and_relpri; - return _dispatch_get_queue_attr(_dispatch_priority_qos(pri), - _dispatch_priority_relpri(pri), overcommit ? - _dispatch_queue_attr_overcommit_enabled : - _dispatch_queue_attr_overcommit_disabled, - dqa->dqa_autorelease_frequency, dqa->dqa_concurrent, - dqa->dqa_inactive); -} + dispatch_block_flags_t unmodified_flags = flags; + pthread_priority_t unmodified_pri = pri; -dispatch_queue_attr_t -dispatch_queue_attr_make_with_autorelease_frequency(dispatch_queue_attr_t dqa, - dispatch_autorelease_frequency_t frequency) -{ - switch (frequency) { - case DISPATCH_AUTORELEASE_FREQUENCY_INHERIT: - case DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM: - case DISPATCH_AUTORELEASE_FREQUENCY_NEVER: - break; - default: - return DISPATCH_BAD_INPUT; + flags = _dispatch_block_normalize_flags(flags); + bool assign = (flags & DISPATCH_BLOCK_ASSIGN_CURRENT); + + if (!(flags & DISPATCH_BLOCK_HAS_VOUCHER)) { + if (flags & DISPATCH_BLOCK_DETACHED) { + voucher = VOUCHER_NULL; + flags |= DISPATCH_BLOCK_HAS_VOUCHER; + } else if (flags & DISPATCH_BLOCK_NO_VOUCHER) { + voucher = DISPATCH_NO_VOUCHER; + flags |= DISPATCH_BLOCK_HAS_VOUCHER; + } else if (assign) { +#if OS_VOUCHER_ACTIVITY_SPI + voucher = VOUCHER_CURRENT; +#endif + flags |= DISPATCH_BLOCK_HAS_VOUCHER; + } + } +#if OS_VOUCHER_ACTIVITY_SPI + if (voucher == VOUCHER_CURRENT) { + voucher = _voucher_get(); } - if (!slowpath(dqa)) { - dqa = _dispatch_get_default_queue_attr(); - } else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) { - DISPATCH_CLIENT_CRASH(dqa->do_vtable, "Invalid queue attribute"); +#endif + if (assign && !(flags & DISPATCH_BLOCK_HAS_PRIORITY)) { + pri = _dispatch_priority_propagate(); + flags |= DISPATCH_BLOCK_HAS_PRIORITY; } - dispatch_priority_t pri = dqa->dqa_qos_and_relpri; - return _dispatch_get_queue_attr(_dispatch_priority_qos(pri), - _dispatch_priority_relpri(pri), dqa->dqa_overcommit, - frequency, dqa->dqa_concurrent, dqa->dqa_inactive); + dispatch_block_t db = _dispatch_block_create(flags, voucher, pri, block); + +#if DISPATCH_DEBUG + dispatch_assert(_dispatch_block_get_data(db)); +#endif + + _dispatch_trace_block_create_with_voucher_and_priority(db, + _dispatch_Block_invoke(block), unmodified_flags, + ((unmodified_flags & DISPATCH_BLOCK_HAS_PRIORITY) ? unmodified_pri : + (unsigned long)UINT32_MAX), + _dispatch_get_priority(), pri); + return db; } -#pragma mark - -#pragma mark dispatch_queue_t +dispatch_block_t +dispatch_block_create(dispatch_block_flags_t flags, dispatch_block_t block) +{ + if (!_dispatch_block_flags_valid(flags)) return DISPATCH_BAD_INPUT; + return _dispatch_block_create_with_voucher_and_priority(flags, NULL, 0, + block); +} -void -dispatch_queue_set_label_nocopy(dispatch_queue_t dq, const char *label) +dispatch_block_t +dispatch_block_create_with_qos_class(dispatch_block_flags_t flags, + dispatch_qos_class_t qos_class, int relative_priority, + dispatch_block_t block) { - if (dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) { - return; - } - dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dq); - if (unlikely(dqf & DQF_LABEL_NEEDS_FREE)) { - DISPATCH_CLIENT_CRASH(dq, "Cannot change label for this queue"); + if (!_dispatch_block_flags_valid(flags) || + !_dispatch_qos_class_valid(qos_class, relative_priority)) { + return DISPATCH_BAD_INPUT; } - dq->dq_label = label; + flags |= DISPATCH_BLOCK_HAS_PRIORITY; + pthread_priority_t pri = 0; +#if HAVE_PTHREAD_WORKQUEUE_QOS + pri = _pthread_qos_class_encode(qos_class, relative_priority, 0); +#endif + return _dispatch_block_create_with_voucher_and_priority(flags, NULL, + pri, block); } -static inline bool -_dispatch_base_queue_is_wlh(dispatch_queue_t dq, dispatch_queue_t tq) +dispatch_block_t +dispatch_block_create_with_voucher(dispatch_block_flags_t flags, + voucher_t voucher, dispatch_block_t block) { - (void)dq; (void)tq; - return false; + if (!_dispatch_block_flags_valid(flags)) return DISPATCH_BAD_INPUT; + flags |= DISPATCH_BLOCK_HAS_VOUCHER; + flags &= ~DISPATCH_BLOCK_NO_VOUCHER; + return _dispatch_block_create_with_voucher_and_priority(flags, voucher, 0, + block); } -static void -_dispatch_queue_inherit_wlh_from_target(dispatch_queue_t dq, - dispatch_queue_t tq) +dispatch_block_t +dispatch_block_create_with_voucher_and_qos_class(dispatch_block_flags_t flags, + voucher_t voucher, dispatch_qos_class_t qos_class, + int relative_priority, dispatch_block_t block) { - uint64_t old_state, new_state, role; - - if (!dx_hastypeflag(tq, QUEUE_ROOT)) { - role = DISPATCH_QUEUE_ROLE_INNER; - } else if (_dispatch_base_queue_is_wlh(dq, tq)) { - role = DISPATCH_QUEUE_ROLE_BASE_WLH; - } else { - role = DISPATCH_QUEUE_ROLE_BASE_ANON; + if (!_dispatch_block_flags_valid(flags) || + !_dispatch_qos_class_valid(qos_class, relative_priority)) { + return DISPATCH_BAD_INPUT; } + flags |= (DISPATCH_BLOCK_HAS_VOUCHER|DISPATCH_BLOCK_HAS_PRIORITY); + flags &= ~(DISPATCH_BLOCK_NO_VOUCHER|DISPATCH_BLOCK_NO_QOS_CLASS); + pthread_priority_t pri = 0; +#if HAVE_PTHREAD_WORKQUEUE_QOS + pri = _pthread_qos_class_encode(qos_class, relative_priority, 0); +#endif + return _dispatch_block_create_with_voucher_and_priority(flags, voucher, + pri, block); +} - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { - new_state = old_state & ~DISPATCH_QUEUE_ROLE_MASK; - new_state |= role; - if (old_state == new_state) { - os_atomic_rmw_loop_give_up(break); - } - }); - - dispatch_wlh_t cur_wlh = _dispatch_get_wlh(); - if (cur_wlh == (dispatch_wlh_t)dq && !_dq_state_is_base_wlh(new_state)) { - _dispatch_event_loop_leave_immediate(cur_wlh, new_state); +void +dispatch_block_perform(dispatch_block_flags_t flags, dispatch_block_t block) +{ + if (!_dispatch_block_flags_valid(flags)) { + DISPATCH_CLIENT_CRASH(flags, "Invalid flags passed to " + "dispatch_block_perform()"); } - if (!dx_hastypeflag(tq, QUEUE_ROOT)) { -#if DISPATCH_ALLOW_NON_LEAF_RETARGET - _dispatch_queue_atomic_flags_set(tq, DQF_TARGETED); -#else - _dispatch_queue_atomic_flags_set_and_clear(tq, DQF_TARGETED, DQF_LEGACY); -#endif + flags = _dispatch_block_normalize_flags(flags); + + voucher_t voucher = DISPATCH_NO_VOUCHER; + if (flags & DISPATCH_BLOCK_DETACHED) { + voucher = VOUCHER_NULL; + flags |= DISPATCH_BLOCK_HAS_VOUCHER; } -} -unsigned long volatile _dispatch_queue_serial_numbers = - DISPATCH_QUEUE_SERIAL_NUMBER_INIT; + struct dispatch_block_private_data_s dbpds = + DISPATCH_BLOCK_PRIVATE_DATA_PERFORM_INITIALIZER(flags, block, voucher); + return _dispatch_block_invoke_direct(&dbpds); +} -dispatch_priority_t -_dispatch_queue_compute_priority_and_wlh(dispatch_queue_t dq, - dispatch_wlh_t *wlh_out) +void +_dispatch_block_invoke_direct(const struct dispatch_block_private_data_s *dbcpd) { - dispatch_priority_t p = dq->dq_priority & DISPATCH_PRIORITY_REQUESTED_MASK; - dispatch_queue_t tq = dq->do_targetq; - dispatch_priority_t tqp = tq->dq_priority &DISPATCH_PRIORITY_REQUESTED_MASK; - dispatch_wlh_t wlh = DISPATCH_WLH_ANON; - - if (_dq_state_is_base_wlh(dq->dq_state)) { - wlh = (dispatch_wlh_t)dq; + dispatch_block_private_data_t dbpd = (dispatch_block_private_data_t)dbcpd; + dispatch_block_flags_t flags = dbpd->dbpd_flags; + unsigned int atomic_flags = dbpd->dbpd_atomic_flags; + if (unlikely(atomic_flags & DBF_WAITED)) { + DISPATCH_CLIENT_CRASH(atomic_flags, "A block object may not be both " + "run more than once and waited for"); } + if (atomic_flags & DBF_CANCELED) goto out; - while (unlikely(!dx_hastypeflag(tq, QUEUE_ROOT))) { - if (unlikely(tq == &_dispatch_mgr_q)) { - if (wlh_out) *wlh_out = DISPATCH_WLH_ANON; - return DISPATCH_PRIORITY_FLAG_MANAGER; - } - if (unlikely(_dispatch_queue_is_thread_bound(tq))) { - // thread-bound hierarchies are weird, we need to install - // from the context of the thread this hierarchy is bound to - if (wlh_out) *wlh_out = NULL; - return 0; - } - if (unlikely(DISPATCH_QUEUE_IS_SUSPENDED(tq))) { - // this queue may not be activated yet, so the queue graph may not - // have stabilized yet - _dispatch_ktrace1(DISPATCH_PERF_delayed_registration, dq); - if (wlh_out) *wlh_out = NULL; - return 0; + pthread_priority_t op = 0, p = 0; + op = _dispatch_block_invoke_should_set_priority(flags, dbpd->dbpd_priority); + if (op) { + p = dbpd->dbpd_priority; + } + voucher_t ov, v = DISPATCH_NO_VOUCHER; + if (flags & DISPATCH_BLOCK_HAS_VOUCHER) { + v = dbpd->dbpd_voucher; + } + ov = _dispatch_set_priority_and_voucher(p, v, 0); + dbpd->dbpd_thread = _dispatch_tid_self(); + _dispatch_client_callout(dbpd->dbpd_block, + _dispatch_Block_invoke(dbpd->dbpd_block)); + _dispatch_reset_priority_and_voucher(op, ov); +out: + if ((atomic_flags & DBF_PERFORM) == 0) { + if (os_atomic_inc2o(dbpd, dbpd_performed, relaxed) == 1) { + dispatch_group_leave(dbpd->dbpd_group); } + } +} - if (_dq_state_is_base_wlh(tq->dq_state)) { - wlh = (dispatch_wlh_t)tq; - } else if (unlikely(_dispatch_queue_is_legacy(tq))) { - // we're not allowed to dereference tq->do_targetq - _dispatch_ktrace1(DISPATCH_PERF_delayed_registration, dq); - if (wlh_out) *wlh_out = NULL; - return 0; - } +void +_dispatch_block_sync_invoke(void *block) +{ + dispatch_block_t b = block; + dispatch_block_private_data_t dbpd = _dispatch_block_get_data(b); + dispatch_block_flags_t flags = dbpd->dbpd_flags; + unsigned int atomic_flags = dbpd->dbpd_atomic_flags; + if (unlikely(atomic_flags & DBF_WAITED)) { + DISPATCH_CLIENT_CRASH(atomic_flags, "A block object may not be both " + "run more than once and waited for"); + } + if (atomic_flags & DBF_CANCELED) goto out; - if (!(tq->dq_priority & DISPATCH_PRIORITY_FLAG_INHERIT)) { - if (p < tqp) p = tqp; + voucher_t ov = DISPATCH_NO_VOUCHER; + if (flags & DISPATCH_BLOCK_HAS_VOUCHER) { + ov = _dispatch_adopt_priority_and_set_voucher(0, dbpd->dbpd_voucher, 0); + } + dbpd->dbpd_block(); + _dispatch_reset_voucher(ov, 0); +out: + if ((atomic_flags & DBF_PERFORM) == 0) { + if (os_atomic_inc2o(dbpd, dbpd_performed, relaxed) == 1) { + dispatch_group_leave(dbpd->dbpd_group); } - tq = tq->do_targetq; - tqp = tq->dq_priority & DISPATCH_PRIORITY_REQUESTED_MASK; } - if (unlikely(!tqp)) { - // pthread root queues opt out of QoS - if (wlh_out) *wlh_out = DISPATCH_WLH_ANON; - return DISPATCH_PRIORITY_FLAG_MANAGER; + dispatch_queue_t boost_dq; + boost_dq = os_atomic_xchg2o(dbpd, dbpd_queue, NULL, relaxed); + if (boost_dq) { + // balances dispatch_{,barrier_,}sync + _dispatch_release_2(boost_dq); } - if (wlh_out) *wlh_out = wlh; - return _dispatch_priority_inherit_from_root_queue(p, tq); } +#define DISPATCH_BLOCK_ASYNC_INVOKE_RELEASE 0x1 + DISPATCH_NOINLINE -static dispatch_queue_t -_dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa, - dispatch_queue_t tq, bool legacy) +static void +_dispatch_block_async_invoke2(dispatch_block_t b, unsigned long invoke_flags) { - if (!slowpath(dqa)) { - dqa = _dispatch_get_default_queue_attr(); - } else if (dqa->do_vtable != DISPATCH_VTABLE(queue_attr)) { - DISPATCH_CLIENT_CRASH(dqa->do_vtable, "Invalid queue attribute"); + dispatch_block_private_data_t dbpd = _dispatch_block_get_data(b); + unsigned int atomic_flags = dbpd->dbpd_atomic_flags; + if (unlikely(atomic_flags & DBF_WAITED)) { + DISPATCH_CLIENT_CRASH(atomic_flags, "A block object may not be both " + "run more than once and waited for"); } - // - // Step 1: Normalize arguments (qos, overcommit, tq) - // - - dispatch_qos_t qos = _dispatch_priority_qos(dqa->dqa_qos_and_relpri); -#if !HAVE_PTHREAD_WORKQUEUE_QOS - if (qos == DISPATCH_QOS_USER_INTERACTIVE) { - qos = DISPATCH_QOS_USER_INITIATED; + if (likely(!(atomic_flags & DBF_CANCELED))) { + dbpd->dbpd_block(); } - if (qos == DISPATCH_QOS_MAINTENANCE) { - qos = DISPATCH_QOS_BACKGROUND; + if ((atomic_flags & DBF_PERFORM) == 0) { + if (os_atomic_inc2o(dbpd, dbpd_performed, relaxed) == 1) { + dispatch_group_leave(dbpd->dbpd_group); + } } -#endif // !HAVE_PTHREAD_WORKQUEUE_QOS - _dispatch_queue_attr_overcommit_t overcommit = dqa->dqa_overcommit; - if (overcommit != _dispatch_queue_attr_overcommit_unspecified && tq) { - if (tq->do_targetq) { - DISPATCH_CLIENT_CRASH(tq, "Cannot specify both overcommit and " - "a non-global target queue"); - } + dispatch_queue_t boost_dq; + boost_dq = os_atomic_xchg2o(dbpd, dbpd_queue, NULL, relaxed); + if (boost_dq) { + // balances dispatch_{,barrier_,group_}async + _dispatch_release_2(boost_dq); } - if (tq && !tq->do_targetq && - tq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) { - // Handle discrepancies between attr and target queue, attributes win - if (overcommit == _dispatch_queue_attr_overcommit_unspecified) { - if (tq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) { - overcommit = _dispatch_queue_attr_overcommit_enabled; - } else { - overcommit = _dispatch_queue_attr_overcommit_disabled; - } - } - if (qos == DISPATCH_QOS_UNSPECIFIED) { - dispatch_qos_t tq_qos = _dispatch_priority_qos(tq->dq_priority); - tq = _dispatch_get_root_queue(tq_qos, - overcommit == _dispatch_queue_attr_overcommit_enabled); - } else { - tq = NULL; - } - } else if (tq && !tq->do_targetq) { - // target is a pthread or runloop root queue, setting QoS or overcommit - // is disallowed - if (overcommit != _dispatch_queue_attr_overcommit_unspecified) { - DISPATCH_CLIENT_CRASH(tq, "Cannot specify an overcommit attribute " - "and use this kind of target queue"); - } - if (qos != DISPATCH_QOS_UNSPECIFIED) { - DISPATCH_CLIENT_CRASH(tq, "Cannot specify a QoS attribute " - "and use this kind of target queue"); - } - } else { - if (overcommit == _dispatch_queue_attr_overcommit_unspecified) { - // Serial queues default to overcommit! - overcommit = dqa->dqa_concurrent ? - _dispatch_queue_attr_overcommit_disabled : - _dispatch_queue_attr_overcommit_enabled; - } + if (invoke_flags & DISPATCH_BLOCK_ASYNC_INVOKE_RELEASE) { + Block_release(b); } - if (!tq) { - tq = _dispatch_get_root_queue( - qos == DISPATCH_QOS_UNSPECIFIED ? DISPATCH_QOS_DEFAULT : qos, - overcommit == _dispatch_queue_attr_overcommit_enabled); - if (slowpath(!tq)) { - DISPATCH_CLIENT_CRASH(qos, "Invalid queue attribute"); - } - } - - // - // Step 2: Initialize the queue - // - - if (legacy) { - // if any of these attributes is specified, use non legacy classes - if (dqa->dqa_inactive || dqa->dqa_autorelease_frequency) { - legacy = false; - } - } - - const void *vtable; - dispatch_queue_flags_t dqf = 0; - if (legacy) { - vtable = DISPATCH_VTABLE(queue); - } else if (dqa->dqa_concurrent) { - vtable = DISPATCH_VTABLE(queue_concurrent); - } else { - vtable = DISPATCH_VTABLE(queue_serial); - } - switch (dqa->dqa_autorelease_frequency) { - case DISPATCH_AUTORELEASE_FREQUENCY_NEVER: - dqf |= DQF_AUTORELEASE_NEVER; - break; - case DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM: - dqf |= DQF_AUTORELEASE_ALWAYS; - break; - } - if (legacy) { - dqf |= DQF_LEGACY; - } - if (label) { - const char *tmp = _dispatch_strdup_if_mutable(label); - if (tmp != label) { - dqf |= DQF_LABEL_NEEDS_FREE; - label = tmp; - } - } - - dispatch_queue_t dq = _dispatch_object_alloc(vtable, - sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_CACHELINE_PAD); - _dispatch_queue_init(dq, dqf, dqa->dqa_concurrent ? - DISPATCH_QUEUE_WIDTH_MAX : 1, DISPATCH_QUEUE_ROLE_INNER | - (dqa->dqa_inactive ? DISPATCH_QUEUE_INACTIVE : 0)); +} - dq->dq_label = label; -#if HAVE_PTHREAD_WORKQUEUE_QOS - dq->dq_priority = dqa->dqa_qos_and_relpri; - if (overcommit == _dispatch_queue_attr_overcommit_enabled) { - dq->dq_priority |= DISPATCH_PRIORITY_FLAG_OVERCOMMIT; - } -#endif - _dispatch_retain(tq); - if (qos == QOS_CLASS_UNSPECIFIED) { - // legacy way of inherithing the QoS from the target - _dispatch_queue_priority_inherit_from_target(dq, tq); - } - if (!dqa->dqa_inactive) { - _dispatch_queue_inherit_wlh_from_target(dq, tq); - } - dq->do_targetq = tq; - _dispatch_object_debug(dq, "%s", __func__); - return _dispatch_introspection_queue_create(dq); +static void +_dispatch_block_async_invoke(void *block) +{ + _dispatch_block_async_invoke2(block, 0); } -dispatch_queue_t -dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa, - dispatch_queue_t tq) +static void +_dispatch_block_async_invoke_and_release(void *block) { - return _dispatch_queue_create_with_target(label, dqa, tq, false); + _dispatch_block_async_invoke2(block, DISPATCH_BLOCK_ASYNC_INVOKE_RELEASE); } -dispatch_queue_t -dispatch_queue_create(const char *label, dispatch_queue_attr_t attr) +void +dispatch_block_cancel(dispatch_block_t db) { - return _dispatch_queue_create_with_target(label, attr, - DISPATCH_TARGET_QUEUE_DEFAULT, true); + dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); + if (unlikely(!dbpd)) { + DISPATCH_CLIENT_CRASH(0, "Invalid block object passed to " + "dispatch_block_cancel()"); + } + (void)os_atomic_or2o(dbpd, dbpd_atomic_flags, DBF_CANCELED, relaxed); } -dispatch_queue_t -dispatch_queue_create_with_accounting_override_voucher(const char *label, - dispatch_queue_attr_t attr, voucher_t voucher) +long +dispatch_block_testcancel(dispatch_block_t db) { - (void)label; (void)attr; (void)voucher; - DISPATCH_CLIENT_CRASH(0, "Unsupported interface"); + dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); + if (unlikely(!dbpd)) { + DISPATCH_CLIENT_CRASH(0, "Invalid block object passed to " + "dispatch_block_testcancel()"); + } + return (bool)(dbpd->dbpd_atomic_flags & DBF_CANCELED); } -void -_dispatch_queue_destroy(dispatch_queue_t dq, bool *allow_free) +long +dispatch_block_wait(dispatch_block_t db, dispatch_time_t timeout) { - uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); - uint64_t initial_state = DISPATCH_QUEUE_STATE_INIT_VALUE(dq->dq_width); + dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); + if (unlikely(!dbpd)) { + DISPATCH_CLIENT_CRASH(0, "Invalid block object passed to " + "dispatch_block_wait()"); + } - if (dx_hastypeflag(dq, QUEUE_ROOT)) { - initial_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE; + unsigned int flags = os_atomic_or_orig2o(dbpd, dbpd_atomic_flags, + DBF_WAITING, relaxed); + if (unlikely(flags & (DBF_WAITED | DBF_WAITING))) { + DISPATCH_CLIENT_CRASH(flags, "A block object may not be waited for " + "more than once"); } - dq_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; - dq_state &= ~DISPATCH_QUEUE_DIRTY; - dq_state &= ~DISPATCH_QUEUE_ROLE_MASK; - if (slowpath(dq_state != initial_state)) { - if (_dq_state_drain_locked(dq_state)) { - DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, - "Release of a locked queue"); - } -#ifndef __LP64__ - dq_state >>= 32; -#endif - DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, - "Release of a queue with corrupt state"); + + // If we know the queue where this block is + // enqueued, or the thread that's executing it, then we should boost + // it here. + + pthread_priority_t pp = _dispatch_get_priority(); + + dispatch_queue_t boost_dq; + boost_dq = os_atomic_xchg2o(dbpd, dbpd_queue, NULL, relaxed); + if (boost_dq) { + // release balances dispatch_{,barrier_,group_}async. + // Can't put the queue back in the timeout case: the block might + // finish after we fell out of group_wait and see our NULL, so + // neither of us would ever release. Side effect: After a _wait + // that times out, subsequent waits will not boost the qos of the + // still-running block. + dx_wakeup(boost_dq, _dispatch_qos_from_pp(pp), + DISPATCH_WAKEUP_BLOCK_WAIT | DISPATCH_WAKEUP_CONSUME_2); } - if (slowpath(dq->dq_items_tail)) { - DISPATCH_CLIENT_CRASH(dq->dq_items_tail, - "Release of a queue while items are enqueued"); + + mach_port_t boost_th = dbpd->dbpd_thread; + if (boost_th) { + _dispatch_thread_override_start(boost_th, pp, dbpd); } - // trash the queue so that use after free will crash - dq->dq_items_head = (void *)0x200; - dq->dq_items_tail = (void *)0x200; + int performed = os_atomic_load2o(dbpd, dbpd_performed, relaxed); + if (unlikely(performed > 1 || (boost_th && boost_dq))) { + DISPATCH_CLIENT_CRASH(performed, "A block object may not be both " + "run more than once and waited for"); + } + + long ret = dispatch_group_wait(dbpd->dbpd_group, timeout); - dispatch_queue_t dqsq = os_atomic_xchg2o(dq, dq_specific_q, - (void *)0x200, relaxed); - if (dqsq) { - _dispatch_release(dqsq); + if (boost_th) { + _dispatch_thread_override_end(boost_th, dbpd); } - // fastpath for queues that never got their storage retained - if (likely(os_atomic_load2o(dq, dq_sref_cnt, relaxed) == 0)) { - // poison the state with something that is suspended and is easy to spot - dq->dq_state = 0xdead000000000000; - return; + if (ret) { + // timed out: reverse our changes + os_atomic_and2o(dbpd, dbpd_atomic_flags, ~DBF_WAITING, relaxed); + } else { + os_atomic_or2o(dbpd, dbpd_atomic_flags, DBF_WAITED, relaxed); + // don't need to re-test here: the second call would see + // the first call's WAITING } - // Take over freeing the memory from _dispatch_object_dealloc() - // - // As soon as we call _dispatch_queue_release_storage(), we forfeit - // the possibility for the caller of dx_dispose() to finalize the object - // so that responsibility is ours. - _dispatch_object_finalize(dq); - *allow_free = false; - dq->dq_label = ""; - dq->do_targetq = NULL; - dq->do_finalizer = NULL; - dq->do_ctxt = NULL; - return _dispatch_queue_release_storage(dq); + return ret; } -// 6618342 Contact the team that owns the Instrument DTrace probe before -// renaming this symbol void -_dispatch_queue_dispose(dispatch_queue_t dq, bool *allow_free) +dispatch_block_notify(dispatch_block_t db, dispatch_queue_t queue, + dispatch_block_t notification_block) { - _dispatch_object_debug(dq, "%s", __func__); - _dispatch_introspection_queue_dispose(dq); - if (dq->dq_label && _dispatch_queue_label_needs_free(dq)) { - free((void*)dq->dq_label); + dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); + if (!dbpd) { + DISPATCH_CLIENT_CRASH(db, "Invalid block object passed to " + "dispatch_block_notify()"); + } + int performed = os_atomic_load2o(dbpd, dbpd_performed, relaxed); + if (unlikely(performed > 1)) { + DISPATCH_CLIENT_CRASH(performed, "A block object may not be both " + "run more than once and observed"); } - _dispatch_queue_destroy(dq, allow_free); + + return dispatch_group_notify(dbpd->dbpd_group, queue, notification_block); } -void -_dispatch_queue_xref_dispose(dispatch_queue_t dq) +DISPATCH_NOINLINE +dispatch_qos_t +_dispatch_continuation_init_slow(dispatch_continuation_t dc, + dispatch_queue_t dq, dispatch_block_flags_t flags) { - uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); - if (unlikely(_dq_state_is_suspended(dq_state))) { - long state = (long)dq_state; - if (sizeof(long) < sizeof(uint64_t)) state = (long)(dq_state >> 32); - if (unlikely(_dq_state_is_inactive(dq_state))) { - // Arguments for and against this assert are within 6705399 - DISPATCH_CLIENT_CRASH(state, "Release of an inactive object"); - } - DISPATCH_CLIENT_CRASH(dq_state, "Release of a suspended object"); + dispatch_block_private_data_t dbpd = _dispatch_block_get_data(dc->dc_ctxt); + dispatch_block_flags_t block_flags = dbpd->dbpd_flags; + uintptr_t dc_flags = dc->dc_flags; + pthread_priority_t pp = 0; + + // balanced in d_block_async_invoke_and_release or d_block_wait + if (os_atomic_cmpxchg2o(dbpd, dbpd_queue, NULL, dq, relaxed)) { + _dispatch_retain_2(dq); } - os_atomic_or2o(dq, dq_atomic_flags, DQF_RELEASED, relaxed); + + if (dc_flags & DC_FLAG_CONSUME) { + dc->dc_func = _dispatch_block_async_invoke_and_release; + } else { + dc->dc_func = _dispatch_block_async_invoke; + } + + flags |= block_flags; + if (block_flags & DISPATCH_BLOCK_HAS_PRIORITY) { + pp = dbpd->dbpd_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK; + } else if (flags & DISPATCH_BLOCK_HAS_PRIORITY) { + // _dispatch_source_handler_alloc is calling is and doesn't want us + // to propagate priorities + pp = 0; + } else { + pp = _dispatch_priority_propagate(); + } + _dispatch_continuation_priority_set(dc, dq, pp, flags); + if (block_flags & DISPATCH_BLOCK_BARRIER) { + dc_flags |= DC_FLAG_BARRIER; + } + if (block_flags & DISPATCH_BLOCK_HAS_VOUCHER) { + voucher_t v = dbpd->dbpd_voucher; + dc->dc_voucher = (v && v != DISPATCH_NO_VOUCHER) ? _voucher_retain(v) + : v; + _dispatch_voucher_debug("continuation[%p] set", dc->dc_voucher, dc); + _dispatch_voucher_ktrace_dc_push(dc); + } else { + _dispatch_continuation_voucher_set(dc, flags); + } + dc_flags |= DC_FLAG_BLOCK_WITH_PRIVATE_DATA; + dc->dc_flags = dc_flags; + return _dispatch_qos_from_pp(dc->dc_priority); } +#endif // __BLOCKS__ +#pragma mark - +#pragma mark dispatch_barrier_async + DISPATCH_NOINLINE static void -_dispatch_queue_suspend_slow(dispatch_queue_t dq) +_dispatch_async_f_slow(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func, dispatch_block_flags_t flags, + uintptr_t dc_flags) { - uint64_t dq_state, value, delta; + dispatch_continuation_t dc = _dispatch_continuation_alloc_from_heap(); + dispatch_qos_t qos; - _dispatch_queue_sidelock_lock(dq); + qos = _dispatch_continuation_init_f(dc, dq, ctxt, func, flags, dc_flags); + _dispatch_continuation_async(dq, dc, qos, dc->dc_flags); +} - // what we want to transfer (remove from dq_state) - delta = DISPATCH_QUEUE_SUSPEND_HALF * DISPATCH_QUEUE_SUSPEND_INTERVAL; - // but this is a suspend so add a suspend count at the same time - delta -= DISPATCH_QUEUE_SUSPEND_INTERVAL; - if (dq->dq_side_suspend_cnt == 0) { - // we substract delta from dq_state, and we want to set this bit - delta -= DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT; - } +DISPATCH_NOINLINE +void +dispatch_barrier_async_f(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) +{ + dispatch_continuation_t dc = _dispatch_continuation_alloc_cacheonly(); + uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_BARRIER; + dispatch_qos_t qos; - os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, { - // unsigned underflow of the substraction can happen because other - // threads could have touched this value while we were trying to acquire - // the lock, or because another thread raced us to do the same operation - // and got to the lock first. - if (unlikely(os_sub_overflow(dq_state, delta, &value))) { - os_atomic_rmw_loop_give_up(goto retry); - } - }); - if (unlikely(os_add_overflow(dq->dq_side_suspend_cnt, - DISPATCH_QUEUE_SUSPEND_HALF, &dq->dq_side_suspend_cnt))) { - DISPATCH_CLIENT_CRASH(0, "Too many nested calls to dispatch_suspend()"); + if (likely(!dc)) { + return _dispatch_async_f_slow(dq, ctxt, func, 0, dc_flags); } - return _dispatch_queue_sidelock_unlock(dq); -retry: - _dispatch_queue_sidelock_unlock(dq); - return dx_vtable(dq)->do_suspend(dq); + qos = _dispatch_continuation_init_f(dc, dq, ctxt, func, 0, dc_flags); + _dispatch_continuation_async(dq, dc, qos, dc_flags); } +DISPATCH_NOINLINE void -_dispatch_queue_suspend(dispatch_queue_t dq) +_dispatch_barrier_async_detached_f(dispatch_queue_class_t dq, void *ctxt, + dispatch_function_t func) { - dispatch_assert(dq->do_ref_cnt != DISPATCH_OBJECT_GLOBAL_REFCNT); + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + dc->dc_flags = DC_FLAG_CONSUME | DC_FLAG_BARRIER | DC_FLAG_ALLOCATED; + dc->dc_func = func; + dc->dc_ctxt = ctxt; + dc->dc_voucher = DISPATCH_NO_VOUCHER; + dc->dc_priority = DISPATCH_NO_PRIORITY; + _dispatch_trace_item_push(dq, dc); + dx_push(dq._dq, dc, 0); +} - uint64_t dq_state, value; +#ifdef __BLOCKS__ +void +dispatch_barrier_async(dispatch_queue_t dq, dispatch_block_t work) +{ + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_BARRIER; + dispatch_qos_t qos; - os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, { - value = DISPATCH_QUEUE_SUSPEND_INTERVAL; - if (unlikely(os_add_overflow(dq_state, value, &value))) { - os_atomic_rmw_loop_give_up({ - return _dispatch_queue_suspend_slow(dq); - }); - } + qos = _dispatch_continuation_init(dc, dq, work, 0, dc_flags); + _dispatch_continuation_async(dq, dc, qos, dc_flags); +} +#endif + +#pragma mark - +#pragma mark dispatch_async + +void +_dispatch_async_redirect_invoke(dispatch_continuation_t dc, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags) +{ + dispatch_thread_frame_s dtf; + struct dispatch_continuation_s *other_dc = dc->dc_other; + dispatch_invoke_flags_t ctxt_flags = (dispatch_invoke_flags_t)dc->dc_ctxt; + // if we went through _dispatch_root_queue_push_override, + // the "right" root queue was stuffed into dc_func + dispatch_queue_global_t assumed_rq = (dispatch_queue_global_t)dc->dc_func; + dispatch_lane_t dq = dc->dc_data; + dispatch_queue_t rq, old_dq; + dispatch_priority_t old_dbp; + + if (ctxt_flags) { + flags &= ~_DISPATCH_INVOKE_AUTORELEASE_MASK; + flags |= ctxt_flags; + } + old_dq = _dispatch_queue_get_current(); + if (assumed_rq) { + old_dbp = _dispatch_root_queue_identity_assume(assumed_rq); + _dispatch_set_basepri(dq->dq_priority); + } else { + old_dbp = _dispatch_set_basepri(dq->dq_priority); + } + + uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_NO_INTROSPECTION; + _dispatch_thread_frame_push(&dtf, dq); + _dispatch_continuation_pop_forwarded(dc, dc_flags, NULL, { + _dispatch_continuation_pop(other_dc, dic, flags, dq); }); + _dispatch_thread_frame_pop(&dtf); + if (assumed_rq) _dispatch_queue_set_current(old_dq); + _dispatch_reset_basepri(old_dbp); - if (!_dq_state_is_suspended(dq_state)) { - // rdar://8181908 we need to extend the queue life for the duration - // of the call to wakeup at _dispatch_queue_resume() time. - _dispatch_retain_2(dq); + rq = dq->do_targetq; + while (unlikely(rq->do_targetq && rq != old_dq)) { + _dispatch_lane_non_barrier_complete(upcast(rq)._dl, 0); + rq = rq->do_targetq; } + + // pairs with _dispatch_async_redirect_wrap + _dispatch_lane_non_barrier_complete(dq, DISPATCH_WAKEUP_CONSUME_2); +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_continuation_t +_dispatch_async_redirect_wrap(dispatch_lane_t dq, dispatch_object_t dou) +{ + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + + dou._do->do_next = NULL; + dc->do_vtable = DC_VTABLE(ASYNC_REDIRECT); + dc->dc_func = NULL; + dc->dc_ctxt = (void *)(uintptr_t)_dispatch_queue_autorelease_frequency(dq); + dc->dc_data = dq; + dc->dc_other = dou._do; + dc->dc_voucher = DISPATCH_NO_VOUCHER; + dc->dc_priority = DISPATCH_NO_PRIORITY; + _dispatch_retain_2(dq); // released in _dispatch_async_redirect_invoke + return dc; } DISPATCH_NOINLINE static void -_dispatch_queue_resume_slow(dispatch_queue_t dq) +_dispatch_continuation_redirect_push(dispatch_lane_t dl, + dispatch_object_t dou, dispatch_qos_t qos) { - uint64_t dq_state, value, delta; + if (likely(!_dispatch_object_is_redirection(dou))) { + dou._dc = _dispatch_async_redirect_wrap(dl, dou); + } else if (!dou._dc->dc_ctxt) { + // find first queue in descending target queue order that has + // an autorelease frequency set, and use that as the frequency for + // this continuation. + dou._dc->dc_ctxt = (void *) + (uintptr_t)_dispatch_queue_autorelease_frequency(dl); + } - _dispatch_queue_sidelock_lock(dq); + dispatch_queue_t dq = dl->do_targetq; + if (!qos) qos = _dispatch_priority_qos(dq->dq_priority); + dx_push(dq, dou, qos); +} - // what we want to transfer - delta = DISPATCH_QUEUE_SUSPEND_HALF * DISPATCH_QUEUE_SUSPEND_INTERVAL; - // but this is a resume so consume a suspend count at the same time - delta -= DISPATCH_QUEUE_SUSPEND_INTERVAL; - switch (dq->dq_side_suspend_cnt) { - case 0: - goto retry; - case DISPATCH_QUEUE_SUSPEND_HALF: - // we will transition the side count to 0, so we want to clear this bit - delta -= DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT; - break; +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_async_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, + dispatch_block_flags_t flags) +{ + dispatch_continuation_t dc = _dispatch_continuation_alloc_cacheonly(); + uintptr_t dc_flags = DC_FLAG_CONSUME; + dispatch_qos_t qos; + + if (unlikely(!dc)) { + return _dispatch_async_f_slow(dq, ctxt, func, flags, dc_flags); } - os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, { - // unsigned overflow of the addition can happen because other - // threads could have touched this value while we were trying to acquire - // the lock, or because another thread raced us to do the same operation - // and got to the lock first. - if (unlikely(os_add_overflow(dq_state, delta, &value))) { - os_atomic_rmw_loop_give_up(goto retry); - } - }); - dq->dq_side_suspend_cnt -= DISPATCH_QUEUE_SUSPEND_HALF; - return _dispatch_queue_sidelock_unlock(dq); -retry: - _dispatch_queue_sidelock_unlock(dq); - return dx_vtable(dq)->do_resume(dq, false); + qos = _dispatch_continuation_init_f(dc, dq, ctxt, func, flags, dc_flags); + _dispatch_continuation_async(dq, dc, qos, dc->dc_flags); } DISPATCH_NOINLINE -static void -_dispatch_queue_resume_finalize_activation(dispatch_queue_t dq) +void +dispatch_async_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { - bool allow_resume = true; - // Step 2: run the activation finalizer - if (dx_vtable(dq)->do_finalize_activation) { - dx_vtable(dq)->do_finalize_activation(dq, &allow_resume); - } - // Step 3: consume the suspend count - if (allow_resume) { - return dx_vtable(dq)->do_resume(dq, false); - } + _dispatch_async_f(dq, ctxt, func, 0); } +DISPATCH_NOINLINE void -_dispatch_queue_resume(dispatch_queue_t dq, bool activate) +dispatch_async_enforce_qos_class_f(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) { - // covers all suspend and inactive bits, including side suspend bit - const uint64_t suspend_bits = DISPATCH_QUEUE_SUSPEND_BITS_MASK; - uint64_t pending_barrier_width = - (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL; - uint64_t set_owner_and_set_full_width_and_in_barrier = - _dispatch_lock_value_for_self() | DISPATCH_QUEUE_WIDTH_FULL_BIT | - DISPATCH_QUEUE_IN_BARRIER; + _dispatch_async_f(dq, ctxt, func, DISPATCH_BLOCK_ENFORCE_QOS_CLASS); +} - // backward compatibility: only dispatch sources can abuse - // dispatch_resume() to really mean dispatch_activate() - bool is_source = (dx_metatype(dq) == _DISPATCH_SOURCE_TYPE); - uint64_t dq_state, value; +#ifdef __BLOCKS__ +void +dispatch_async(dispatch_queue_t dq, dispatch_block_t work) +{ + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + uintptr_t dc_flags = DC_FLAG_CONSUME; + dispatch_qos_t qos; - dispatch_assert(dq->do_ref_cnt != DISPATCH_OBJECT_GLOBAL_REFCNT); + qos = _dispatch_continuation_init(dc, dq, work, 0, dc_flags); + _dispatch_continuation_async(dq, dc, qos, dc->dc_flags); +} +#endif - // Activation is a bit tricky as it needs to finalize before the wakeup. - // - // If after doing its updates to the suspend count and/or inactive bit, - // the last suspension related bit that would remain is the - // NEEDS_ACTIVATION one, then this function: - // - // 1. moves the state to { sc:1 i:0 na:0 } (converts the needs-activate into - // a suspend count) - // 2. runs the activation finalizer - // 3. consumes the suspend count set in (1), and finishes the resume flow - // - // Concurrently, some property setters such as setting dispatch source - // handlers or _dispatch_queue_set_target_queue try to do in-place changes - // before activation. These protect their action by taking a suspend count. - // Step (1) above cannot happen if such a setter has locked the object. - if (activate) { - // relaxed atomic because this doesn't publish anything, this is only - // about picking the thread that gets to finalize the activation - os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, relaxed, { - if ((dq_state & suspend_bits) == - DISPATCH_QUEUE_NEEDS_ACTIVATION + DISPATCH_QUEUE_INACTIVE) { - // { sc:0 i:1 na:1 } -> { sc:1 i:0 na:0 } - value = dq_state - DISPATCH_QUEUE_INACTIVE - - DISPATCH_QUEUE_NEEDS_ACTIVATION - + DISPATCH_QUEUE_SUSPEND_INTERVAL; - } else if (_dq_state_is_inactive(dq_state)) { - // { sc:>0 i:1 na:1 } -> { i:0 na:1 } - // simple activation because sc is not 0 - // resume will deal with na:1 later - value = dq_state - DISPATCH_QUEUE_INACTIVE; - } else { - // object already active, this is a no-op, just exit - os_atomic_rmw_loop_give_up(return); - } - }); +#pragma mark - +#pragma mark _dispatch_sync_invoke / _dispatch_sync_complete + +DISPATCH_ALWAYS_INLINE +static uint64_t +_dispatch_lane_non_barrier_complete_try_lock(dispatch_lane_t dq, + uint64_t old_state, uint64_t new_state, uint64_t owner_self) +{ + uint64_t full_width = new_state; + if (_dq_state_has_pending_barrier(new_state)) { + full_width -= DISPATCH_QUEUE_PENDING_BARRIER; + full_width += DISPATCH_QUEUE_WIDTH_INTERVAL; + full_width += DISPATCH_QUEUE_IN_BARRIER; } else { - // release barrier needed to publish the effect of - // - dispatch_set_target_queue() - // - dispatch_set_*_handler() - // - do_finalize_activation() - os_atomic_rmw_loop2o(dq, dq_state, dq_state, value, release, { - if ((dq_state & suspend_bits) == DISPATCH_QUEUE_SUSPEND_INTERVAL - + DISPATCH_QUEUE_NEEDS_ACTIVATION) { - // { sc:1 i:0 na:1 } -> { sc:1 i:0 na:0 } - value = dq_state - DISPATCH_QUEUE_NEEDS_ACTIVATION; - } else if (is_source && (dq_state & suspend_bits) == - DISPATCH_QUEUE_NEEDS_ACTIVATION + DISPATCH_QUEUE_INACTIVE) { - // { sc:0 i:1 na:1 } -> { sc:1 i:0 na:0 } - value = dq_state - DISPATCH_QUEUE_INACTIVE - - DISPATCH_QUEUE_NEEDS_ACTIVATION - + DISPATCH_QUEUE_SUSPEND_INTERVAL; - } else if (unlikely(os_sub_overflow(dq_state, - DISPATCH_QUEUE_SUSPEND_INTERVAL, &value))) { - // underflow means over-resume or a suspend count transfer - // to the side count is needed - os_atomic_rmw_loop_give_up({ - if (!(dq_state & DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT)) { - goto over_resume; - } - return _dispatch_queue_resume_slow(dq); - }); - // - // below this, value = dq_state - DISPATCH_QUEUE_SUSPEND_INTERVAL - // - } else if (!_dq_state_is_runnable(value)) { - // Out of width or still suspended. - // For the former, force _dispatch_queue_non_barrier_complete - // to reconsider whether it has work to do - value |= DISPATCH_QUEUE_DIRTY; - } else if (_dq_state_drain_locked(value)) { - // still locked by someone else, make drain_try_unlock() fail - // and reconsider whether it has work to do - value |= DISPATCH_QUEUE_DIRTY; - } else if (!is_source && (_dq_state_has_pending_barrier(value) || - value + pending_barrier_width < - DISPATCH_QUEUE_WIDTH_FULL_BIT)) { - // if we can, acquire the full width drain lock - // and then perform a lock transfer - // - // However this is never useful for a source where there are no - // sync waiters, so never take the lock and do a plain wakeup - value &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK; - value |= set_owner_and_set_full_width_and_in_barrier; - } else { - // clear overrides and force a wakeup - value &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; - value &= ~DISPATCH_QUEUE_MAX_QOS_MASK; - } - }); + full_width += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; + full_width += DISPATCH_QUEUE_IN_BARRIER; } - - if ((dq_state ^ value) & DISPATCH_QUEUE_NEEDS_ACTIVATION) { - // we cleared the NEEDS_ACTIVATION bit and we have a valid suspend count - return _dispatch_queue_resume_finalize_activation(dq); + if ((full_width & DISPATCH_QUEUE_WIDTH_MASK) == + DISPATCH_QUEUE_WIDTH_FULL_BIT) { + new_state = full_width; + new_state &= ~DISPATCH_QUEUE_DIRTY; + new_state |= owner_self; + } else if (_dq_state_is_dirty(old_state)) { + new_state |= DISPATCH_QUEUE_ENQUEUED; } + return new_state; +} - if (activate) { - // if we're still in an activate codepath here we should have - // { sc:>0 na:1 }, if not we've got a corrupt state - if (unlikely(!_dq_state_is_suspended(value))) { - DISPATCH_CLIENT_CRASH(dq, "Invalid suspension state"); - } - return; +DISPATCH_ALWAYS_INLINE +static void +_dispatch_lane_non_barrier_complete_finish(dispatch_lane_t dq, + dispatch_wakeup_flags_t flags, uint64_t old_state, uint64_t new_state) +{ + if (_dq_state_received_override(old_state)) { + // Ensure that the root queue sees that this thread was overridden. + _dispatch_set_basepri_override_qos(_dq_state_max_qos(old_state)); } - if (_dq_state_is_suspended(value)) { - return; + if ((old_state ^ new_state) & DISPATCH_QUEUE_IN_BARRIER) { + if (_dq_state_is_dirty(old_state)) { + // + // dependency ordering for dq state changes that were flushed + // and not acted upon + os_atomic_thread_fence(dependency); + dq = os_atomic_force_dependency_on(dq, old_state); + } + return _dispatch_lane_barrier_complete(dq, 0, flags); } - if (_dq_state_is_dirty(dq_state)) { - // - // dependency ordering for dq state changes that were flushed - // and not acted upon - os_atomic_thread_fence(dependency); - dq = os_atomic_force_dependency_on(dq, dq_state); - } - // Balancing the retain_2 done in suspend() for rdar://8181908 - dispatch_wakeup_flags_t flags = DISPATCH_WAKEUP_CONSUME_2; - if ((dq_state ^ value) & DISPATCH_QUEUE_IN_BARRIER) { - flags |= DISPATCH_WAKEUP_BARRIER_COMPLETE; - } else if (!_dq_state_is_runnable(value)) { - if (_dq_state_is_base_wlh(dq_state)) { - _dispatch_event_loop_assert_not_owned((dispatch_wlh_t)dq); + if ((old_state ^ new_state) & DISPATCH_QUEUE_ENQUEUED) { + if (!(flags & DISPATCH_WAKEUP_CONSUME_2)) { + _dispatch_retain_2(dq); } - return _dispatch_release_2(dq); + dispatch_assert(!_dq_state_is_base_wlh(new_state)); + _dispatch_trace_item_push(dq->do_targetq, dq); + return dx_push(dq->do_targetq, dq, _dq_state_max_qos(new_state)); } - dispatch_assert(!_dq_state_received_sync_wait(dq_state)); - dispatch_assert(!_dq_state_in_sync_transfer(dq_state)); - return dx_wakeup(dq, _dq_state_max_qos(dq_state), flags); -over_resume: - if (unlikely(_dq_state_is_inactive(dq_state))) { - DISPATCH_CLIENT_CRASH(dq, "Over-resume of an inactive object"); + if (flags & DISPATCH_WAKEUP_CONSUME_2) { + _dispatch_release_2_tailcall(dq); } - DISPATCH_CLIENT_CRASH(dq, "Over-resume of an object"); } -const char * -dispatch_queue_get_label(dispatch_queue_t dq) +DISPATCH_NOINLINE +static void +_dispatch_lane_non_barrier_complete(dispatch_lane_t dq, + dispatch_wakeup_flags_t flags) { - if (slowpath(dq == DISPATCH_CURRENT_QUEUE_LABEL)) { - dq = _dispatch_get_current_queue(); - } - return dq->dq_label ? dq->dq_label : ""; -} + uint64_t old_state, new_state, owner_self = _dispatch_lock_value_for_self(); -qos_class_t -dispatch_queue_get_qos_class(dispatch_queue_t dq, int *relpri_ptr) -{ - dispatch_qos_class_t qos = _dispatch_priority_qos(dq->dq_priority); - if (relpri_ptr) { - *relpri_ptr = qos ? _dispatch_priority_relpri(dq->dq_priority) : 0; - } - return _dispatch_qos_to_qos_class(qos); + // see _dispatch_lane_resume() + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + new_state = old_state - DISPATCH_QUEUE_WIDTH_INTERVAL; + if (unlikely(_dq_state_drain_locked(old_state))) { + // make drain_try_unlock() fail and reconsider whether there's + // enough width now for a new item + new_state |= DISPATCH_QUEUE_DIRTY; + } else if (likely(_dq_state_is_runnable(new_state))) { + new_state = _dispatch_lane_non_barrier_complete_try_lock(dq, + old_state, new_state, owner_self); + } + }); + + _dispatch_lane_non_barrier_complete_finish(dq, flags, old_state, new_state); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_sync_function_invoke_inline(dispatch_queue_class_t dq, void *ctxt, + dispatch_function_t func) +{ + dispatch_thread_frame_s dtf; + _dispatch_thread_frame_push(&dtf, dq); + _dispatch_client_callout(ctxt, func); + _dispatch_perfmon_workitem_inc(); + _dispatch_thread_frame_pop(&dtf); } +DISPATCH_NOINLINE static void -_dispatch_queue_set_width2(void *ctxt) +_dispatch_sync_function_invoke(dispatch_queue_class_t dq, void *ctxt, + dispatch_function_t func) { - int w = (int)(intptr_t)ctxt; // intentional truncation - uint32_t tmp; - dispatch_queue_t dq = _dispatch_queue_get_current(); + _dispatch_sync_function_invoke_inline(dq, ctxt, func); +} - if (w >= 0) { - tmp = w ? (unsigned int)w : 1; - } else { - dispatch_qos_t qos = _dispatch_qos_from_pp(_dispatch_get_priority()); - switch (w) { - case DISPATCH_QUEUE_WIDTH_MAX_PHYSICAL_CPUS: - tmp = _dispatch_qos_max_parallelism(qos, - DISPATCH_MAX_PARALLELISM_PHYSICAL); - break; - case DISPATCH_QUEUE_WIDTH_ACTIVE_CPUS: - tmp = _dispatch_qos_max_parallelism(qos, - DISPATCH_MAX_PARALLELISM_ACTIVE); - break; - case DISPATCH_QUEUE_WIDTH_MAX_LOGICAL_CPUS: - default: - tmp = _dispatch_qos_max_parallelism(qos, 0); - break; +DISPATCH_NOINLINE +static void +_dispatch_sync_complete_recurse(dispatch_queue_t dq, dispatch_queue_t stop_dq, + uintptr_t dc_flags) +{ + bool barrier = (dc_flags & DC_FLAG_BARRIER); + do { + if (dq == stop_dq) return; + if (barrier) { + dx_wakeup(dq, 0, DISPATCH_WAKEUP_BARRIER_COMPLETE); + } else { + _dispatch_lane_non_barrier_complete(upcast(dq)._dl, 0); } - } - if (tmp > DISPATCH_QUEUE_WIDTH_MAX) { - tmp = DISPATCH_QUEUE_WIDTH_MAX; - } + dq = dq->do_targetq; + barrier = (dq->dq_width == 1); + } while (unlikely(dq->do_targetq)); +} - dispatch_queue_flags_t old_dqf, new_dqf; - os_atomic_rmw_loop2o(dq, dq_atomic_flags, old_dqf, new_dqf, relaxed, { - new_dqf = (old_dqf & DQF_FLAGS_MASK) | DQF_WIDTH(tmp); - }); - _dispatch_queue_inherit_wlh_from_target(dq, dq->do_targetq); - _dispatch_object_debug(dq, "%s", __func__); +DISPATCH_NOINLINE +static void +_dispatch_sync_invoke_and_complete_recurse(dispatch_queue_class_t dq, + void *ctxt, dispatch_function_t func, uintptr_t dc_flags + DISPATCH_TRACE_ARG(void *dc)) +{ + _dispatch_sync_function_invoke_inline(dq, ctxt, func); + _dispatch_trace_item_complete(dc); + _dispatch_sync_complete_recurse(dq._dq, NULL, dc_flags); } -void -dispatch_queue_set_width(dispatch_queue_t dq, long width) +DISPATCH_NOINLINE +static void +_dispatch_sync_invoke_and_complete(dispatch_lane_t dq, void *ctxt, + dispatch_function_t func DISPATCH_TRACE_ARG(void *dc)) { - if (unlikely(dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT || - dx_hastypeflag(dq, QUEUE_ROOT) || - dx_hastypeflag(dq, QUEUE_BASE))) { - return; + _dispatch_sync_function_invoke_inline(dq, ctxt, func); + _dispatch_trace_item_complete(dc); + _dispatch_lane_non_barrier_complete(dq, 0); +} + +/* + * For queues we can cheat and inline the unlock code, which is invalid + * for objects with a more complex state machine (sources or mach channels) + */ +DISPATCH_NOINLINE +static void +_dispatch_lane_barrier_sync_invoke_and_complete(dispatch_lane_t dq, + void *ctxt, dispatch_function_t func DISPATCH_TRACE_ARG(void *dc)) +{ + _dispatch_sync_function_invoke_inline(dq, ctxt, func); + _dispatch_trace_item_complete(dc); + if (unlikely(dq->dq_items_tail || dq->dq_width > 1)) { + return _dispatch_lane_barrier_complete(dq, 0, 0); } - unsigned long type = dx_type(dq); - switch (type) { - case DISPATCH_QUEUE_LEGACY_TYPE: - case DISPATCH_QUEUE_CONCURRENT_TYPE: - break; - case DISPATCH_QUEUE_SERIAL_TYPE: - DISPATCH_CLIENT_CRASH(type, "Cannot set width of a serial queue"); - default: - DISPATCH_CLIENT_CRASH(type, "Unexpected dispatch object type"); + // Presence of any of these bits requires more work that only + // _dispatch_*_barrier_complete() handles properly + // + // Note: testing for RECEIVED_OVERRIDE or RECEIVED_SYNC_WAIT without + // checking the role is sloppy, but is a super fast check, and neither of + // these bits should be set if the lock was never contended/discovered. + const uint64_t fail_unlock_mask = DISPATCH_QUEUE_SUSPEND_BITS_MASK | + DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_DIRTY | + DISPATCH_QUEUE_RECEIVED_OVERRIDE | DISPATCH_QUEUE_SYNC_TRANSFER | + DISPATCH_QUEUE_RECEIVED_SYNC_WAIT; + uint64_t old_state, new_state; + + // similar to _dispatch_queue_drain_try_unlock + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + new_state = old_state - DISPATCH_QUEUE_SERIAL_DRAIN_OWNED; + new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; + new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; + if (unlikely(old_state & fail_unlock_mask)) { + os_atomic_rmw_loop_give_up({ + return _dispatch_lane_barrier_complete(dq, 0, 0); + }); + } + }); + if (_dq_state_is_base_wlh(old_state)) { + _dispatch_event_loop_assert_not_owned((dispatch_wlh_t)dq); } +} - if (likely((int)width >= 0)) { - _dispatch_barrier_trysync_or_async_f(dq, (void*)(intptr_t)width, - _dispatch_queue_set_width2, DISPATCH_BARRIER_TRYSYNC_SUSPEND); - } else { - // The negative width constants need to execute on the queue to - // query the queue QoS - _dispatch_barrier_async_detached_f(dq, (void*)(intptr_t)width, - _dispatch_queue_set_width2); +#pragma mark - +#pragma mark _dispatch_sync_wait / _dispatch_sync_waiter_wake + +DISPATCH_NOINLINE +static void +_dispatch_waiter_wake_wlh_anon(dispatch_sync_context_t dsc) +{ + if (dsc->dsc_override_qos > dsc->dsc_override_qos_floor) { + _dispatch_wqthread_override_start(dsc->dsc_waiter, + dsc->dsc_override_qos); } + _dispatch_thread_event_signal(&dsc->dsc_event); } +DISPATCH_NOINLINE static void -_dispatch_queue_legacy_set_target_queue(void *ctxt) +_dispatch_waiter_wake(dispatch_sync_context_t dsc, dispatch_wlh_t wlh, + uint64_t old_state, uint64_t new_state) { - dispatch_queue_t dq = _dispatch_queue_get_current(); - dispatch_queue_t tq = ctxt; - dispatch_queue_t otq = dq->do_targetq; + dispatch_wlh_t waiter_wlh = dsc->dc_data; - if (_dispatch_queue_atomic_flags(dq) & DQF_TARGETED) { -#if DISPATCH_ALLOW_NON_LEAF_RETARGET - _dispatch_ktrace3(DISPATCH_PERF_non_leaf_retarget, dq, otq, tq); - _dispatch_bug_deprecated("Changing the target of a queue " - "already targeted by other dispatch objects"); -#else - DISPATCH_CLIENT_CRASH(0, "Cannot change the target of a queue " - "already targeted by other dispatch objects"); -#endif + if (_dq_state_in_sync_transfer(old_state) || + _dq_state_in_sync_transfer(new_state) || + (waiter_wlh != DISPATCH_WLH_ANON)) { + _dispatch_event_loop_wake_owner(dsc, wlh, old_state, new_state); } + if (unlikely(waiter_wlh == DISPATCH_WLH_ANON)) { + _dispatch_waiter_wake_wlh_anon(dsc); + } +} - _dispatch_queue_priority_inherit_from_target(dq, tq); - _dispatch_queue_inherit_wlh_from_target(dq, tq); -#if HAVE_PTHREAD_WORKQUEUE_QOS - // see _dispatch_queue_class_wakeup() - _dispatch_queue_sidelock_lock(dq); -#endif - dq->do_targetq = tq; -#if HAVE_PTHREAD_WORKQUEUE_QOS - // see _dispatch_queue_class_wakeup() - _dispatch_queue_sidelock_unlock(dq); -#endif +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_async_waiter_update(dispatch_sync_context_t dsc, + dispatch_queue_class_t dqu) +{ + dispatch_queue_t dq = dqu._dq; + dispatch_priority_t p = dq->dq_priority & DISPATCH_PRIORITY_REQUESTED_MASK; + if (p) { + pthread_priority_t pp = _dispatch_priority_to_pp_strip_flags(p); + if (pp > (dsc->dc_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK)) { + dsc->dc_priority = pp | _PTHREAD_PRIORITY_ENFORCE_FLAG; + } + } - _dispatch_object_debug(dq, "%s", __func__); - _dispatch_introspection_target_queue_changed(dq); - _dispatch_release_tailcall(otq); + if (dsc->dsc_autorelease == 0) { + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dqu); + dqf &= _DQF_AUTORELEASE_MASK; + dsc->dsc_autorelease = (uint8_t)(dqf / DQF_AUTORELEASE_ALWAYS); + } } -void -_dispatch_queue_set_target_queue(dispatch_queue_t dq, dispatch_queue_t tq) +DISPATCH_NOINLINE +static void +_dispatch_non_barrier_waiter_redirect_or_wake(dispatch_lane_t dq, + dispatch_object_t dou) { - dispatch_assert(dq->do_ref_cnt != DISPATCH_OBJECT_GLOBAL_REFCNT && - dq->do_targetq); + dispatch_sync_context_t dsc = (dispatch_sync_context_t)dou._dc; + uint64_t old_state; + + dispatch_assert(!(dsc->dc_flags & DC_FLAG_BARRIER)); + +again: + old_state = os_atomic_load2o(dq, dq_state, relaxed); - if (unlikely(!tq)) { - bool is_concurrent_q = (dq->dq_width > 1); - tq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, !is_concurrent_q); + if (dsc->dsc_override_qos < _dq_state_max_qos(old_state)) { + dsc->dsc_override_qos = (uint8_t)_dq_state_max_qos(old_state); } - if (_dispatch_queue_try_inactive_suspend(dq)) { - _dispatch_object_set_target_queue_inline(dq, tq); - return dx_vtable(dq)->do_resume(dq, false); + if (dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) { + _dispatch_async_waiter_update(dsc, dq); } -#if !DISPATCH_ALLOW_NON_LEAF_RETARGET - if (_dispatch_queue_atomic_flags(dq) & DQF_TARGETED) { - DISPATCH_CLIENT_CRASH(0, "Cannot change the target of a queue " - "already targeted by other dispatch objects"); + if (unlikely(_dq_state_is_inner_queue(old_state))) { + dispatch_queue_t tq = dq->do_targetq; + if (likely(tq->dq_width == 1)) { + dsc->dc_flags |= DC_FLAG_BARRIER; + } else { + dsc->dc_flags &= ~DC_FLAG_BARRIER; + if (_dispatch_queue_try_reserve_sync_width(upcast(tq)._dl)) { + dq = upcast(tq)._dl; + goto again; + } + } + return dx_push(tq, dsc, 0); } -#endif - if (unlikely(!_dispatch_queue_is_legacy(dq))) { -#if DISPATCH_ALLOW_NON_LEAF_RETARGET - if (_dispatch_queue_atomic_flags(dq) & DQF_TARGETED) { - DISPATCH_CLIENT_CRASH(0, "Cannot change the target of a queue " - "already targeted by other dispatch objects"); + if (dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) { + // _dispatch_barrier_async_and_wait_f_slow() expects dc_other to be the + // bottom queue of the graph + dsc->dc_other = dq; + } + return _dispatch_waiter_wake_wlh_anon(dsc); +} + +DISPATCH_NOINLINE +static void +_dispatch_barrier_waiter_redirect_or_wake(dispatch_queue_class_t dqu, + dispatch_object_t dc, dispatch_wakeup_flags_t flags, + uint64_t old_state, uint64_t new_state) +{ + dispatch_sync_context_t dsc = (dispatch_sync_context_t)dc._dc; + dispatch_queue_t dq = dqu._dq; + dispatch_wlh_t wlh = DISPATCH_WLH_ANON; + + if (dsc->dc_data == DISPATCH_WLH_ANON) { + if (dsc->dsc_override_qos < _dq_state_max_qos(old_state)) { + dsc->dsc_override_qos = (uint8_t)_dq_state_max_qos(old_state); } -#endif - DISPATCH_CLIENT_CRASH(0, "Cannot change the target of this object " - "after it has been activated"); } - unsigned long type = dx_type(dq); - switch (type) { - case DISPATCH_QUEUE_LEGACY_TYPE: -#if DISPATCH_ALLOW_NON_LEAF_RETARGET - if (_dispatch_queue_atomic_flags(dq) & DQF_TARGETED) { - _dispatch_bug_deprecated("Changing the target of a queue " - "already targeted by other dispatch objects"); + if (_dq_state_is_base_wlh(old_state)) { + wlh = (dispatch_wlh_t)dq; + } else if (_dq_state_received_override(old_state)) { + // Ensure that the root queue sees that this thread was overridden. + _dispatch_set_basepri_override_qos(_dq_state_max_qos(old_state)); + } + + if (flags & DISPATCH_WAKEUP_CONSUME_2) { + if (_dq_state_is_base_wlh(old_state) && + _dq_state_is_enqueued_on_target(new_state)) { + // If the thread request still exists, we need to leave it a +1 + _dispatch_release_no_dispose(dq); + } else { + _dispatch_release_2_no_dispose(dq); + } + } else if (_dq_state_is_base_wlh(old_state) && + _dq_state_is_enqueued_on_target(old_state) && + !_dq_state_is_enqueued_on_target(new_state)) { + // If we cleared the enqueued bit, we're about to destroy the workloop + // thread request, and we need to consume its +1. + _dispatch_release_no_dispose(dq); + } + + // + // Past this point we are borrowing the reference of the sync waiter + // + if (unlikely(_dq_state_is_inner_queue(old_state))) { + dispatch_queue_t tq = dq->do_targetq; + if (dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) { + _dispatch_async_waiter_update(dsc, dq); + } + if (likely(tq->dq_width == 1)) { + dsc->dc_flags |= DC_FLAG_BARRIER; + } else { + dispatch_lane_t dl = upcast(tq)._dl; + dsc->dc_flags &= ~DC_FLAG_BARRIER; + if (_dispatch_queue_try_reserve_sync_width(dl)) { + return _dispatch_non_barrier_waiter_redirect_or_wake(dl, dc); + } } + // passing the QoS of `dq` helps pushing on low priority waiters with + // legacy workloops. +#if DISPATCH_INTROSPECTION + dsc->dsc_from_async = false; #endif - break; - case DISPATCH_SOURCE_KEVENT_TYPE: - case DISPATCH_MACH_CHANNEL_TYPE: - _dispatch_ktrace1(DISPATCH_PERF_post_activate_retarget, dq); - _dispatch_bug_deprecated("Changing the target of a source " - "after it has been activated"); - break; - default: - DISPATCH_CLIENT_CRASH(type, "Unexpected dispatch object type"); + return dx_push(tq, dsc, _dq_state_max_qos(old_state)); } - _dispatch_retain(tq); - return _dispatch_barrier_trysync_or_async_f(dq, tq, - _dispatch_queue_legacy_set_target_queue, - DISPATCH_BARRIER_TRYSYNC_SUSPEND); + if (dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) { + // _dispatch_async_and_wait_f_slow() expects dc_other to be the + // bottom queue of the graph + dsc->dc_other = dq; + } +#if DISPATCH_INTROSPECTION + if (dsc->dsc_from_async) { + _dispatch_trace_runtime_event(async_sync_handoff, dq, 0); + } else { + _dispatch_trace_runtime_event(sync_sync_handoff, dq, 0); + } +#endif // DISPATCH_INTROSPECTION + return _dispatch_waiter_wake(dsc, wlh, old_state, new_state); } -#pragma mark - -#pragma mark dispatch_mgr_queue +DISPATCH_NOINLINE +static void +_dispatch_lane_drain_barrier_waiter(dispatch_lane_t dq, + struct dispatch_object_s *dc, dispatch_wakeup_flags_t flags, + uint64_t enqueued_bits) +{ + dispatch_sync_context_t dsc = (dispatch_sync_context_t)dc; + struct dispatch_object_s *next_dc; + uint64_t next_owner = 0, old_state, new_state; -#if DISPATCH_USE_MGR_THREAD && DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES -static struct dispatch_pthread_root_queue_context_s - _dispatch_mgr_root_queue_pthread_context; -static struct dispatch_root_queue_context_s - _dispatch_mgr_root_queue_context = {{{ -#if DISPATCH_USE_WORKQUEUES - .dgq_kworkqueue = (void*)(~0ul), -#endif - .dgq_ctxt = &_dispatch_mgr_root_queue_pthread_context, - .dgq_thread_pool_size = 1, -}}}; + next_owner = _dispatch_lock_value_from_tid(dsc->dsc_waiter); + next_dc = _dispatch_queue_pop_head(dq, dc); -static struct dispatch_queue_s _dispatch_mgr_root_queue = { - DISPATCH_GLOBAL_OBJECT_HEADER(queue_root), - .dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, - .do_ctxt = &_dispatch_mgr_root_queue_context, - .dq_label = "com.apple.root.libdispatch-manager", - .dq_atomic_flags = DQF_WIDTH(DISPATCH_QUEUE_WIDTH_POOL), - .dq_priority = DISPATCH_PRIORITY_FLAG_MANAGER | - DISPATCH_PRIORITY_SATURATED_OVERRIDE, - .dq_serialnum = 3, -}; -#endif // DISPATCH_USE_MGR_THREAD && DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES - -#if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES || DISPATCH_USE_KEVENT_WORKQUEUE -static struct { - volatile int prio; - volatile qos_class_t qos; - int default_prio; - int policy; - pthread_t tid; -} _dispatch_mgr_sched; +transfer_lock_again: + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + new_state = old_state; + new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; + new_state &= ~DISPATCH_QUEUE_DIRTY; + new_state |= next_owner; -static dispatch_once_t _dispatch_mgr_sched_pred; + if (_dq_state_is_base_wlh(old_state)) { + new_state |= DISPATCH_QUEUE_SYNC_TRANSFER; + if (next_dc) { + // we know there's a next item, keep the enqueued bit if any + } else if (unlikely(_dq_state_is_dirty(old_state))) { + os_atomic_rmw_loop_give_up({ + os_atomic_xor2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, acquire); + next_dc = os_atomic_load2o(dq, dq_items_head, relaxed); + goto transfer_lock_again; + }); + } else { + new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; + new_state &= ~DISPATCH_QUEUE_ENQUEUED; + } + } else { + new_state -= enqueued_bits; + } + }); -#if HAVE_PTHREAD_WORKQUEUE_QOS -// TODO: switch to "event-reflector thread" property -// Must be kept in sync with list of qos classes in sys/qos.h -static const int _dispatch_mgr_sched_qos2prio[] = { - [QOS_CLASS_MAINTENANCE] = 4, - [QOS_CLASS_BACKGROUND] = 4, - [QOS_CLASS_UTILITY] = 20, - [QOS_CLASS_DEFAULT] = 31, - [QOS_CLASS_USER_INITIATED] = 37, - [QOS_CLASS_USER_INTERACTIVE] = 47, -}; -#endif // HAVE_PTHREAD_WORKQUEUE_QOS + return _dispatch_barrier_waiter_redirect_or_wake(dq, dc, flags, + old_state, new_state); +} +DISPATCH_NOINLINE static void -_dispatch_mgr_sched_init(void *ctxt DISPATCH_UNUSED) +_dispatch_lane_class_barrier_complete(dispatch_lane_t dq, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target, + uint64_t owned) { - struct sched_param param; -#if DISPATCH_USE_MGR_THREAD && DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES - pthread_attr_t *attr; - attr = &_dispatch_mgr_root_queue_pthread_context.dpq_thread_attr; -#else - pthread_attr_t a, *attr = &a; -#endif - (void)dispatch_assume_zero(pthread_attr_init(attr)); - (void)dispatch_assume_zero(pthread_attr_getschedpolicy(attr, - &_dispatch_mgr_sched.policy)); - (void)dispatch_assume_zero(pthread_attr_getschedparam(attr, ¶m)); -#if HAVE_PTHREAD_WORKQUEUE_QOS - qos_class_t qos = qos_class_main(); - if (qos == QOS_CLASS_DEFAULT) { - qos = QOS_CLASS_USER_INITIATED; // rdar://problem/17279292 + uint64_t old_state, new_state, enqueue; + dispatch_queue_t tq; + + if (target == DISPATCH_QUEUE_WAKEUP_MGR) { + tq = _dispatch_mgr_q._as_dq; + enqueue = DISPATCH_QUEUE_ENQUEUED_ON_MGR; + } else if (target) { + tq = (target == DISPATCH_QUEUE_WAKEUP_TARGET) ? dq->do_targetq : target; + enqueue = DISPATCH_QUEUE_ENQUEUED; + } else { + tq = NULL; + enqueue = 0; } - if (qos) { - _dispatch_mgr_sched.qos = qos; - param.sched_priority = _dispatch_mgr_sched_qos2prio[qos]; + + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + new_state = _dq_state_merge_qos(old_state - owned, qos); + new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; + if (unlikely(_dq_state_is_suspended(old_state))) { + if (likely(_dq_state_is_base_wlh(old_state))) { + new_state &= ~DISPATCH_QUEUE_ENQUEUED; + } + } else if (enqueue) { + if (!_dq_state_is_enqueued(old_state)) { + new_state |= enqueue; + } + } else if (unlikely(_dq_state_is_dirty(old_state))) { + os_atomic_rmw_loop_give_up({ + // just renew the drain lock with an acquire barrier, to see + // what the enqueuer that set DIRTY has done. + // the xor generates better assembly as DISPATCH_QUEUE_DIRTY + // is already in a register + os_atomic_xor2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, acquire); + flags |= DISPATCH_WAKEUP_BARRIER_COMPLETE; + return dx_wakeup(dq, qos, flags); + }); + } else { + new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; + } + }); + old_state -= owned; + dispatch_assert(_dq_state_drain_locked_by_self(old_state)); + dispatch_assert(!_dq_state_is_enqueued_on_manager(old_state)); + + if (_dq_state_is_enqueued(new_state)) { + _dispatch_trace_runtime_event(sync_async_handoff, dq, 0); } -#endif - _dispatch_mgr_sched.default_prio = param.sched_priority; - _dispatch_mgr_sched.prio = _dispatch_mgr_sched.default_prio; -} -#endif // DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES || DISPATCH_USE_KEVENT_WORKQUEUE -#if DISPATCH_USE_MGR_THREAD && DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES -DISPATCH_NOINLINE -static pthread_t * -_dispatch_mgr_root_queue_init(void) -{ - dispatch_once_f(&_dispatch_mgr_sched_pred, NULL, _dispatch_mgr_sched_init); - struct sched_param param; - pthread_attr_t *attr; - attr = &_dispatch_mgr_root_queue_pthread_context.dpq_thread_attr; - (void)dispatch_assume_zero(pthread_attr_setdetachstate(attr, - PTHREAD_CREATE_DETACHED)); -#if !DISPATCH_DEBUG - (void)dispatch_assume_zero(pthread_attr_setstacksize(attr, 64 * 1024)); -#endif + + if (_dq_state_received_override(old_state)) { + // Ensure that the root queue sees that this thread was overridden. + _dispatch_set_basepri_override_qos(_dq_state_max_qos(old_state)); + } + + if (tq) { + if (likely((old_state ^ new_state) & enqueue)) { + dispatch_assert(_dq_state_is_enqueued(new_state)); + dispatch_assert(flags & DISPATCH_WAKEUP_CONSUME_2); + return _dispatch_queue_push_queue(tq, dq, new_state); + } #if HAVE_PTHREAD_WORKQUEUE_QOS - qos_class_t qos = _dispatch_mgr_sched.qos; - if (qos) { - if (_dispatch_set_qos_class_enabled) { - (void)dispatch_assume_zero(pthread_attr_set_qos_class_np(attr, - qos, 0)); + // when doing sync to async handoff + // if the queue received an override we have to forecefully redrive + // the same override so that a new stealer is enqueued because + // the previous one may be gone already + if (_dq_state_should_override(new_state)) { + return _dispatch_queue_wakeup_with_override(dq, new_state, flags); } - } #endif - param.sched_priority = _dispatch_mgr_sched.prio; - if (param.sched_priority > _dispatch_mgr_sched.default_prio) { - (void)dispatch_assume_zero(pthread_attr_setschedparam(attr, ¶m)); } - return &_dispatch_mgr_sched.tid; + if (flags & DISPATCH_WAKEUP_CONSUME_2) { + return _dispatch_release_2_tailcall(dq); + } } -static inline void -_dispatch_mgr_priority_apply(void) +DISPATCH_NOINLINE +static void +_dispatch_lane_drain_non_barriers(dispatch_lane_t dq, + struct dispatch_object_s *dc, dispatch_wakeup_flags_t flags) { - struct sched_param param; + size_t owned_width = dq->dq_width; + struct dispatch_object_s *next_dc; + + // see _dispatch_lane_drain, go in non barrier mode, and drain items + + os_atomic_and2o(dq, dq_state, ~DISPATCH_QUEUE_IN_BARRIER, release); + do { - param.sched_priority = _dispatch_mgr_sched.prio; - if (param.sched_priority > _dispatch_mgr_sched.default_prio) { - (void)dispatch_assume_zero(pthread_setschedparam( - _dispatch_mgr_sched.tid, _dispatch_mgr_sched.policy, - ¶m)); + if (likely(owned_width)) { + owned_width--; + } else if (_dispatch_object_is_waiter(dc)) { + // sync "readers" don't observe the limit + _dispatch_queue_reserve_sync_width(dq); + } else if (!_dispatch_queue_try_acquire_async(dq)) { + // no width left + break; } - } while (_dispatch_mgr_sched.prio > param.sched_priority); + next_dc = _dispatch_queue_pop_head(dq, dc); + if (_dispatch_object_is_waiter(dc)) { + _dispatch_non_barrier_waiter_redirect_or_wake(dq, dc); + } else { + _dispatch_continuation_redirect_push(dq, dc, + _dispatch_queue_max_qos(dq)); + } +drain_again: + dc = next_dc; + } while (dc && !_dispatch_object_is_barrier(dc)); + + uint64_t old_state, new_state, owner_self = _dispatch_lock_value_for_self(); + uint64_t owned = owned_width * DISPATCH_QUEUE_WIDTH_INTERVAL; + + if (dc) { + owned = _dispatch_queue_adjust_owned(dq, owned, dc); + } + + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + new_state = old_state - owned; + new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; + new_state &= ~DISPATCH_QUEUE_DIRTY; + + // similar to _dispatch_lane_non_barrier_complete(): + // if by the time we get here all redirected non barrier syncs are + // done and returned their width to the queue, we may be the last + // chance for the next item to run/be re-driven. + if (unlikely(dc)) { + new_state |= DISPATCH_QUEUE_DIRTY; + new_state = _dispatch_lane_non_barrier_complete_try_lock(dq, + old_state, new_state, owner_self); + } else if (unlikely(_dq_state_is_dirty(old_state))) { + os_atomic_rmw_loop_give_up({ + os_atomic_xor2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, acquire); + next_dc = os_atomic_load2o(dq, dq_items_head, relaxed); + goto drain_again; + }); + } + }); + + old_state -= owned; + _dispatch_lane_non_barrier_complete_finish(dq, flags, old_state, new_state); } DISPATCH_NOINLINE -void -_dispatch_mgr_priority_init(void) +static void +_dispatch_lane_barrier_complete(dispatch_lane_class_t dqu, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags) { - struct sched_param param; - pthread_attr_t *attr; - attr = &_dispatch_mgr_root_queue_pthread_context.dpq_thread_attr; - (void)dispatch_assume_zero(pthread_attr_getschedparam(attr, ¶m)); -#if HAVE_PTHREAD_WORKQUEUE_QOS - qos_class_t qos = 0; - (void)pthread_attr_get_qos_class_np(attr, &qos, NULL); - if (_dispatch_mgr_sched.qos > qos && _dispatch_set_qos_class_enabled) { - (void)pthread_set_qos_class_self_np(_dispatch_mgr_sched.qos, 0); - int p = _dispatch_mgr_sched_qos2prio[_dispatch_mgr_sched.qos]; - if (p > param.sched_priority) { - param.sched_priority = p; + dispatch_queue_wakeup_target_t target = DISPATCH_QUEUE_WAKEUP_NONE; + dispatch_lane_t dq = dqu._dl; + + if (dq->dq_items_tail && !DISPATCH_QUEUE_IS_SUSPENDED(dq)) { + struct dispatch_object_s *dc = _dispatch_queue_get_head(dq); + if (likely(dq->dq_width == 1 || _dispatch_object_is_barrier(dc))) { + if (_dispatch_object_is_waiter(dc)) { + return _dispatch_lane_drain_barrier_waiter(dq, dc, flags, 0); + } + } else if (dq->dq_width > 1 && !_dispatch_object_is_barrier(dc)) { + return _dispatch_lane_drain_non_barriers(dq, dc, flags); } + + if (!(flags & DISPATCH_WAKEUP_CONSUME_2)) { + _dispatch_retain_2(dq); + flags |= DISPATCH_WAKEUP_CONSUME_2; + } + target = DISPATCH_QUEUE_WAKEUP_TARGET; } -#endif - if (slowpath(_dispatch_mgr_sched.prio > param.sched_priority)) { - return _dispatch_mgr_priority_apply(); - } + + uint64_t owned = DISPATCH_QUEUE_IN_BARRIER + + dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; + return _dispatch_lane_class_barrier_complete(dq, qos, flags, target, owned); } -#endif // DISPATCH_USE_MGR_THREAD && DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES -#if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES -DISPATCH_NOINLINE static void -_dispatch_mgr_priority_raise(const pthread_attr_t *attr) +_dispatch_async_and_wait_invoke(void *ctxt) { - dispatch_once_f(&_dispatch_mgr_sched_pred, NULL, _dispatch_mgr_sched_init); - struct sched_param param; - (void)dispatch_assume_zero(pthread_attr_getschedparam(attr, ¶m)); -#if HAVE_PTHREAD_WORKQUEUE_QOS - qos_class_t q, qos = 0; - (void)pthread_attr_get_qos_class_np((pthread_attr_t *)attr, &qos, NULL); - if (qos) { - param.sched_priority = _dispatch_mgr_sched_qos2prio[qos]; - os_atomic_rmw_loop2o(&_dispatch_mgr_sched, qos, q, qos, relaxed, { - if (q >= qos) os_atomic_rmw_loop_give_up(break); - }); - } -#endif - int p, prio = param.sched_priority; - os_atomic_rmw_loop2o(&_dispatch_mgr_sched, prio, p, prio, relaxed, { - if (p >= prio) os_atomic_rmw_loop_give_up(return); + dispatch_sync_context_t dsc = ctxt; + dispatch_queue_t top_dq = dsc->dc_other; + dispatch_invoke_flags_t iflags; + + // the block runs on the thread the queue is bound to and not + // on the calling thread, but we want to see the calling thread + // dispatch thread frames, so we fake the link, and then undo it + iflags = dsc->dsc_autorelease * DISPATCH_INVOKE_AUTORELEASE_ALWAYS; + dispatch_invoke_with_autoreleasepool(iflags, { + dispatch_thread_frame_s dtf; + _dispatch_introspection_sync_begin(top_dq); + _dispatch_thread_frame_push_and_rebase(&dtf, top_dq, &dsc->dsc_dtf); + _dispatch_client_callout(dsc->dsc_ctxt, dsc->dsc_func); + _dispatch_thread_frame_pop(&dtf); }); -#if DISPATCH_USE_KEVENT_WORKQUEUE - _dispatch_root_queues_init(); - if (_dispatch_kevent_workqueue_enabled) { - pthread_priority_t pp = 0; - if (prio > _dispatch_mgr_sched.default_prio) { - // The values of _PTHREAD_PRIORITY_SCHED_PRI_FLAG and - // _PTHREAD_PRIORITY_ROOTQUEUE_FLAG overlap, but that is not - // problematic in this case, since it the second one is only ever - // used on dq_priority fields. - // We never pass the _PTHREAD_PRIORITY_ROOTQUEUE_FLAG to a syscall, - // it is meaningful to libdispatch only. - pp = (pthread_priority_t)prio | _PTHREAD_PRIORITY_SCHED_PRI_FLAG; - } else if (qos) { - pp = _pthread_qos_class_encode(qos, 0, 0); - } - if (pp) { - int r = _pthread_workqueue_set_event_manager_priority(pp); - (void)dispatch_assume_zero(r); - } - return; - } -#endif -#if DISPATCH_USE_MGR_THREAD - if (_dispatch_mgr_sched.tid) { - return _dispatch_mgr_priority_apply(); + + // communicate back to _dispatch_async_and_wait_f_slow and + // _dispatch_sync_f_slow on which queue the work item was invoked + // so that the *_complete_recurse() call stops unlocking when it reaches it + dsc->dc_other = _dispatch_queue_get_current(); + dsc->dsc_func = NULL; + + if (dsc->dc_data == DISPATCH_WLH_ANON) { + _dispatch_thread_event_signal(&dsc->dsc_event); // release + } else { + _dispatch_event_loop_cancel_waiter(dsc); } -#endif } -#endif // DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES -#if DISPATCH_USE_KEVENT_WORKQUEUE -void -_dispatch_kevent_workqueue_init(void) -{ - // Initialize kevent workqueue support - _dispatch_root_queues_init(); - if (!_dispatch_kevent_workqueue_enabled) return; - dispatch_once_f(&_dispatch_mgr_sched_pred, NULL, _dispatch_mgr_sched_init); - qos_class_t qos = _dispatch_mgr_sched.qos; - int prio = _dispatch_mgr_sched.prio; - pthread_priority_t pp = 0; - if (qos) { - pp = _pthread_qos_class_encode(qos, 0, 0); - } - if (prio > _dispatch_mgr_sched.default_prio) { - pp = (pthread_priority_t)prio | _PTHREAD_PRIORITY_SCHED_PRI_FLAG; - } - if (pp) { - int r = _pthread_workqueue_set_event_manager_priority(pp); - (void)dispatch_assume_zero(r); - } -} -#endif // DISPATCH_USE_KEVENT_WORKQUEUE +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_dispatch_wait_prepare(dispatch_queue_t dq) +{ + uint64_t old_state, new_state; -#pragma mark - -#pragma mark dispatch_pthread_root_queue + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + if (_dq_state_is_suspended(old_state) || + !_dq_state_is_base_wlh(old_state)) { + os_atomic_rmw_loop_give_up(return old_state); + } + if (!_dq_state_drain_locked(old_state) || + _dq_state_in_sync_transfer(old_state)) { + os_atomic_rmw_loop_give_up(return old_state); + } + new_state = old_state | DISPATCH_QUEUE_RECEIVED_SYNC_WAIT; + }); + return new_state; +} -#if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES -static dispatch_queue_t -_dispatch_pthread_root_queue_create(const char *label, unsigned long flags, - const pthread_attr_t *attr, dispatch_block_t configure, - dispatch_pthread_root_queue_observer_hooks_t observer_hooks) +static void +_dispatch_wait_compute_wlh(dispatch_lane_t dq, dispatch_sync_context_t dsc) { - dispatch_queue_t dq; - dispatch_root_queue_context_t qc; - dispatch_pthread_root_queue_context_t pqc; - dispatch_queue_flags_t dqf = 0; - size_t dqs; - int32_t pool_size = flags & _DISPATCH_PTHREAD_ROOT_QUEUE_FLAG_POOL_SIZE ? - (int8_t)(flags & ~_DISPATCH_PTHREAD_ROOT_QUEUE_FLAG_POOL_SIZE) : 0; + bool needs_locking = _dispatch_queue_is_mutable(dq); - dqs = sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_CACHELINE_PAD; - dqs = roundup(dqs, _Alignof(struct dispatch_root_queue_context_s)); - dq = _dispatch_object_alloc(DISPATCH_VTABLE(queue_root), dqs + - sizeof(struct dispatch_root_queue_context_s) + - sizeof(struct dispatch_pthread_root_queue_context_s)); - qc = (void*)dq + dqs; - dispatch_assert((uintptr_t)qc % _Alignof(typeof(*qc)) == 0); - pqc = (void*)qc + sizeof(struct dispatch_root_queue_context_s); - dispatch_assert((uintptr_t)pqc % _Alignof(typeof(*pqc)) == 0); - if (label) { - const char *tmp = _dispatch_strdup_if_mutable(label); - if (tmp != label) { - dqf |= DQF_LABEL_NEEDS_FREE; - label = tmp; - } + if (needs_locking) { + dsc->dsc_release_storage = true; + _dispatch_queue_sidelock_lock(dq); } - _dispatch_queue_init(dq, dqf, DISPATCH_QUEUE_WIDTH_POOL, 0); - dq->dq_label = label; - dq->dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE; - dq->do_ctxt = qc; - dq->dq_priority = DISPATCH_PRIORITY_SATURATED_OVERRIDE; - - pqc->dpq_thread_mediator.do_vtable = DISPATCH_VTABLE(semaphore); - qc->dgq_ctxt = pqc; -#if DISPATCH_USE_WORKQUEUES - qc->dgq_kworkqueue = (void*)(~0ul); -#endif - _dispatch_root_queue_init_pthread_pool(qc, pool_size, true); + dispatch_queue_t tq = dq->do_targetq; + uint64_t tq_state = _dispatch_wait_prepare(tq); - if (attr) { - memcpy(&pqc->dpq_thread_attr, attr, sizeof(pthread_attr_t)); - _dispatch_mgr_priority_raise(&pqc->dpq_thread_attr); + if (_dq_state_is_suspended(tq_state) || + _dq_state_is_base_anon(tq_state)) { + dsc->dsc_release_storage = false; + dsc->dc_data = DISPATCH_WLH_ANON; + } else if (_dq_state_is_base_wlh(tq_state)) { + if (dx_metatype(tq) == _DISPATCH_WORKLOOP_TYPE) { + dsc->dsc_wlh_is_workloop = true; + dsc->dsc_release_storage = false; + } else if (dsc->dsc_release_storage) { + _dispatch_queue_retain_storage(tq); + } + dsc->dc_data = (dispatch_wlh_t)tq; } else { - (void)dispatch_assume_zero(pthread_attr_init(&pqc->dpq_thread_attr)); - } - (void)dispatch_assume_zero(pthread_attr_setdetachstate( - &pqc->dpq_thread_attr, PTHREAD_CREATE_DETACHED)); - if (configure) { - pqc->dpq_thread_configure = _dispatch_Block_copy(configure); - } - if (observer_hooks) { - pqc->dpq_observer_hooks = *observer_hooks; + _dispatch_wait_compute_wlh(upcast(tq)._dl, dsc); } - _dispatch_object_debug(dq, "%s", __func__); - return _dispatch_introspection_queue_create(dq); -} - -dispatch_queue_t -dispatch_pthread_root_queue_create(const char *label, unsigned long flags, - const pthread_attr_t *attr, dispatch_block_t configure) -{ - return _dispatch_pthread_root_queue_create(label, flags, attr, configure, - NULL); -} - -#if DISPATCH_IOHID_SPI -dispatch_queue_t -_dispatch_pthread_root_queue_create_with_observer_hooks_4IOHID(const char *label, - unsigned long flags, const pthread_attr_t *attr, - dispatch_pthread_root_queue_observer_hooks_t observer_hooks, - dispatch_block_t configure) -{ - if (!observer_hooks->queue_will_execute || - !observer_hooks->queue_did_execute) { - DISPATCH_CLIENT_CRASH(0, "Invalid pthread root queue observer hooks"); + if (needs_locking) { + if (dsc->dsc_wlh_is_workloop) { + _dispatch_queue_atomic_flags_clear(dq, DQF_MUTABLE); + } + _dispatch_queue_sidelock_unlock(dq); } - return _dispatch_pthread_root_queue_create(label, flags, attr, configure, - observer_hooks); } -#endif -dispatch_queue_t -dispatch_pthread_root_queue_copy_current(void) +DISPATCH_NOINLINE +static void +__DISPATCH_WAIT_FOR_QUEUE__(dispatch_sync_context_t dsc, dispatch_queue_t dq) { - dispatch_queue_t dq = _dispatch_queue_get_current(); - if (!dq) return NULL; - while (unlikely(dq->do_targetq)) { - dq = dq->do_targetq; - } - if (dx_type(dq) != DISPATCH_QUEUE_GLOBAL_ROOT_TYPE || - dq->do_xref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) { - return NULL; + uint64_t dq_state = _dispatch_wait_prepare(dq); + if (unlikely(_dq_state_drain_locked_by(dq_state, dsc->dsc_waiter))) { + DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, + "dispatch_sync called on queue " + "already owned by current thread"); } - return (dispatch_queue_t)_os_object_retain_with_resurrect(dq->_as_os_obj); -} -#endif // DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES + // Blocks submitted to the main thread MUST run on the main thread, and + // dispatch_async_and_wait also executes on the remote context rather than + // the current thread. + // + // For both these cases we need to save the frame linkage for the sake of + // _dispatch_async_and_wait_invoke + _dispatch_thread_frame_save_state(&dsc->dsc_dtf); -void -_dispatch_pthread_root_queue_dispose(dispatch_queue_t dq, bool *allow_free) -{ - if (slowpath(dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT)) { - DISPATCH_INTERNAL_CRASH(dq, "Global root queue disposed"); + if (_dq_state_is_suspended(dq_state) || + _dq_state_is_base_anon(dq_state)) { + dsc->dc_data = DISPATCH_WLH_ANON; + } else if (_dq_state_is_base_wlh(dq_state)) { + dsc->dc_data = (dispatch_wlh_t)dq; + } else { + _dispatch_wait_compute_wlh(upcast(dq)._dl, dsc); } - _dispatch_object_debug(dq, "%s", __func__); - _dispatch_introspection_queue_dispose(dq); -#if DISPATCH_USE_PTHREAD_POOL - dispatch_root_queue_context_t qc = dq->do_ctxt; - dispatch_pthread_root_queue_context_t pqc = qc->dgq_ctxt; - pthread_attr_destroy(&pqc->dpq_thread_attr); - _dispatch_semaphore_dispose(&pqc->dpq_thread_mediator, NULL); - if (pqc->dpq_thread_configure) { - Block_release(pqc->dpq_thread_configure); + if (dsc->dc_data == DISPATCH_WLH_ANON) { + dsc->dsc_override_qos_floor = dsc->dsc_override_qos = + (uint8_t)_dispatch_get_basepri_override_qos_floor(); + _dispatch_thread_event_init(&dsc->dsc_event); } - dq->do_targetq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false); -#endif - if (dq->dq_label && _dispatch_queue_label_needs_free(dq)) { - free((void*)dq->dq_label); + dx_push(dq, dsc, _dispatch_qos_from_pp(dsc->dc_priority)); + _dispatch_trace_runtime_event(sync_wait, dq, 0); + if (dsc->dc_data == DISPATCH_WLH_ANON) { + _dispatch_thread_event_wait(&dsc->dsc_event); // acquire + } else { + _dispatch_event_loop_wait_for_ownership(dsc); + } + if (dsc->dc_data == DISPATCH_WLH_ANON) { + _dispatch_thread_event_destroy(&dsc->dsc_event); + // If _dispatch_sync_waiter_wake() gave this thread an override, + // ensure that the root queue sees it. + if (dsc->dsc_override_qos > dsc->dsc_override_qos_floor) { + _dispatch_set_basepri_override_qos(dsc->dsc_override_qos); + } } - _dispatch_queue_destroy(dq, allow_free); } #pragma mark - -#pragma mark dispatch_queue_specific +#pragma mark _dispatch_barrier_trysync_or_async_f -struct dispatch_queue_specific_queue_s { - DISPATCH_QUEUE_HEADER(queue_specific_queue); - TAILQ_HEAD(dispatch_queue_specific_head_s, - dispatch_queue_specific_s) dqsq_contexts; -} DISPATCH_ATOMIC64_ALIGN; - -struct dispatch_queue_specific_s { - const void *dqs_key; - void *dqs_ctxt; - dispatch_function_t dqs_destructor; - TAILQ_ENTRY(dispatch_queue_specific_s) dqs_list; -}; -DISPATCH_DECL(dispatch_queue_specific); - -void -_dispatch_queue_specific_queue_dispose(dispatch_queue_specific_queue_t dqsq, - bool *allow_free) +DISPATCH_NOINLINE +static void +_dispatch_barrier_trysync_or_async_f_complete(dispatch_lane_t dq, + void *ctxt, dispatch_function_t func, uint32_t flags) { - dispatch_queue_specific_t dqs, tmp; - dispatch_queue_t rq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false); + dispatch_wakeup_flags_t wflags = DISPATCH_WAKEUP_BARRIER_COMPLETE; - TAILQ_FOREACH_SAFE(dqs, &dqsq->dqsq_contexts, dqs_list, tmp) { - if (dqs->dqs_destructor) { - dispatch_async_f(rq, dqs->dqs_ctxt, dqs->dqs_destructor); + _dispatch_sync_function_invoke_inline(dq, ctxt, func); + if (flags & DISPATCH_BARRIER_TRYSYNC_SUSPEND) { + uint64_t dq_state = os_atomic_sub2o(dq, dq_state, + DISPATCH_QUEUE_SUSPEND_INTERVAL, relaxed); + if (!_dq_state_is_suspended(dq_state)) { + wflags |= DISPATCH_WAKEUP_CONSUME_2; } - free(dqs); } - _dispatch_queue_destroy(dqsq->_as_dq, allow_free); + dx_wakeup(dq, 0, wflags); } -static void -_dispatch_queue_init_specific(dispatch_queue_t dq) +// Use for mutation of queue-/source-internal state only +// ignores target queue hierarchy! +DISPATCH_NOINLINE +void +_dispatch_barrier_trysync_or_async_f(dispatch_lane_t dq, void *ctxt, + dispatch_function_t func, uint32_t flags) { - dispatch_queue_specific_queue_t dqsq; - - dqsq = _dispatch_object_alloc(DISPATCH_VTABLE(queue_specific_queue), - sizeof(struct dispatch_queue_specific_queue_s)); - _dispatch_queue_init(dqsq->_as_dq, DQF_NONE, DISPATCH_QUEUE_WIDTH_MAX, - DISPATCH_QUEUE_ROLE_BASE_ANON); - dqsq->do_xref_cnt = -1; - dqsq->do_targetq = _dispatch_get_root_queue( - DISPATCH_QOS_USER_INITIATED, true); - dqsq->dq_label = "queue-specific"; - TAILQ_INIT(&dqsq->dqsq_contexts); - if (slowpath(!os_atomic_cmpxchg2o(dq, dq_specific_q, NULL, - dqsq->_as_dq, release))) { - _dispatch_release(dqsq->_as_dq); + dispatch_tid tid = _dispatch_tid_self(); + uint64_t suspend_count = (flags & DISPATCH_BARRIER_TRYSYNC_SUSPEND) ? 1 : 0; + if (unlikely(!_dispatch_queue_try_acquire_barrier_sync_and_suspend(dq, tid, + suspend_count))) { + return _dispatch_barrier_async_detached_f(dq, ctxt, func); } -} - -static void -_dispatch_queue_set_specific(void *ctxt) -{ - dispatch_queue_specific_t dqs, dqsn = ctxt; - dispatch_queue_specific_queue_t dqsq = - (dispatch_queue_specific_queue_t)_dispatch_queue_get_current(); - - TAILQ_FOREACH(dqs, &dqsq->dqsq_contexts, dqs_list) { - if (dqs->dqs_key == dqsn->dqs_key) { - // Destroy previous context for existing key - if (dqs->dqs_destructor) { - dispatch_async_f(_dispatch_get_root_queue( - DISPATCH_QOS_DEFAULT, false), dqs->dqs_ctxt, - dqs->dqs_destructor); - } - if (dqsn->dqs_ctxt) { - // Copy new context for existing key - dqs->dqs_ctxt = dqsn->dqs_ctxt; - dqs->dqs_destructor = dqsn->dqs_destructor; - } else { - // Remove context storage for existing key - TAILQ_REMOVE(&dqsq->dqsq_contexts, dqs, dqs_list); - free(dqs); - } - return free(dqsn); - } + if (flags & DISPATCH_BARRIER_TRYSYNC_SUSPEND) { + _dispatch_retain_2(dq); // see _dispatch_lane_suspend } - // Insert context storage for new key - TAILQ_INSERT_TAIL(&dqsq->dqsq_contexts, dqsn, dqs_list); + _dispatch_barrier_trysync_or_async_f_complete(dq, ctxt, func, flags); } +#pragma mark - +#pragma mark dispatch_sync / dispatch_barrier_sync + DISPATCH_NOINLINE -void -dispatch_queue_set_specific(dispatch_queue_t dq, const void *key, - void *ctxt, dispatch_function_t destructor) +static void +_dispatch_sync_f_slow(dispatch_queue_class_t top_dqu, void *ctxt, + dispatch_function_t func, uintptr_t top_dc_flags, + dispatch_queue_class_t dqu, uintptr_t dc_flags) { - if (slowpath(!key)) { - return; + dispatch_queue_t top_dq = top_dqu._dq; + dispatch_queue_t dq = dqu._dq; + if (unlikely(!dq->do_targetq)) { + return _dispatch_sync_function_invoke(dq, ctxt, func); } - dispatch_queue_specific_t dqs; - dqs = _dispatch_calloc(1, sizeof(struct dispatch_queue_specific_s)); - dqs->dqs_key = key; - dqs->dqs_ctxt = ctxt; - dqs->dqs_destructor = destructor; - if (slowpath(!dq->dq_specific_q)) { - _dispatch_queue_init_specific(dq); + pthread_priority_t pp = _dispatch_get_priority(); + struct dispatch_sync_context_s dsc = { + .dc_flags = DC_FLAG_SYNC_WAITER | dc_flags, + .dc_func = _dispatch_async_and_wait_invoke, + .dc_ctxt = &dsc, + .dc_other = top_dq, + .dc_priority = pp | _PTHREAD_PRIORITY_ENFORCE_FLAG, + .dc_voucher = _voucher_get(), + .dsc_func = func, + .dsc_ctxt = ctxt, + .dsc_waiter = _dispatch_tid_self(), + }; + + _dispatch_trace_item_push(top_dq, &dsc); + __DISPATCH_WAIT_FOR_QUEUE__(&dsc, dq); + + if (dsc.dsc_func == NULL) { + dispatch_queue_t stop_dq = dsc.dc_other; + return _dispatch_sync_complete_recurse(top_dq, stop_dq, top_dc_flags); } - _dispatch_barrier_trysync_or_async_f(dq->dq_specific_q, dqs, - _dispatch_queue_set_specific, 0); + + _dispatch_introspection_sync_begin(top_dq); + _dispatch_trace_item_pop(top_dq, &dsc); + _dispatch_sync_invoke_and_complete_recurse(top_dq, ctxt, func,top_dc_flags + DISPATCH_TRACE_ARG(&dsc)); } +DISPATCH_NOINLINE static void -_dispatch_queue_get_specific(void *ctxt) +_dispatch_sync_recurse(dispatch_lane_t dq, void *ctxt, + dispatch_function_t func, uintptr_t dc_flags) { - void **ctxtp = ctxt; - void *key = *ctxtp; - dispatch_queue_specific_queue_t dqsq = - (dispatch_queue_specific_queue_t)_dispatch_queue_get_current(); - dispatch_queue_specific_t dqs; + dispatch_tid tid = _dispatch_tid_self(); + dispatch_queue_t tq = dq->do_targetq; - TAILQ_FOREACH(dqs, &dqsq->dqsq_contexts, dqs_list) { - if (dqs->dqs_key == key) { - *ctxtp = dqs->dqs_ctxt; - return; + do { + if (likely(tq->dq_width == 1)) { + if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(tq, tid))) { + return _dispatch_sync_f_slow(dq, ctxt, func, dc_flags, tq, + DC_FLAG_BARRIER); + } + } else { + dispatch_queue_concurrent_t dl = upcast(tq)._dl; + if (unlikely(!_dispatch_queue_try_reserve_sync_width(dl))) { + return _dispatch_sync_f_slow(dq, ctxt, func, dc_flags, tq, 0); + } } - } - *ctxtp = NULL; + tq = tq->do_targetq; + } while (unlikely(tq->do_targetq)); + + _dispatch_introspection_sync_begin(dq); + _dispatch_sync_invoke_and_complete_recurse(dq, ctxt, func, dc_flags + DISPATCH_TRACE_ARG(_dispatch_trace_item_sync_push_pop( + dq, ctxt, func, dc_flags))); } DISPATCH_ALWAYS_INLINE -static inline void * -_dispatch_queue_get_specific_inline(dispatch_queue_t dq, const void *key) +static inline void +_dispatch_barrier_sync_f_inline(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func, uintptr_t dc_flags) { - void *ctxt = NULL; - if (fastpath(dx_metatype(dq) == _DISPATCH_QUEUE_TYPE && dq->dq_specific_q)){ - ctxt = (void *)key; - dispatch_sync_f(dq->dq_specific_q, &ctxt, _dispatch_queue_get_specific); + dispatch_tid tid = _dispatch_tid_self(); + + if (unlikely(dx_metatype(dq) != _DISPATCH_LANE_TYPE)) { + DISPATCH_CLIENT_CRASH(0, "Queue type doesn't support dispatch_sync"); } - return ctxt; + + dispatch_lane_t dl = upcast(dq)._dl; + // The more correct thing to do would be to merge the qos of the thread + // that just acquired the barrier lock into the queue state. + // + // However this is too expensive for the fast path, so skip doing it. + // The chosen tradeoff is that if an enqueue on a lower priority thread + // contends with this fast path, this thread may receive a useless override. + // + // Global concurrent queues and queues bound to non-dispatch threads + // always fall into the slow case, see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE + if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(dl, tid))) { + return _dispatch_sync_f_slow(dl, ctxt, func, DC_FLAG_BARRIER, dl, + DC_FLAG_BARRIER | dc_flags); + } + + if (unlikely(dl->do_targetq->do_targetq)) { + return _dispatch_sync_recurse(dl, ctxt, func, + DC_FLAG_BARRIER | dc_flags); + } + _dispatch_introspection_sync_begin(dl); + _dispatch_lane_barrier_sync_invoke_and_complete(dl, ctxt, func + DISPATCH_TRACE_ARG(_dispatch_trace_item_sync_push_pop( + dq, ctxt, func, dc_flags | DC_FLAG_BARRIER))); } DISPATCH_NOINLINE -void * -dispatch_queue_get_specific(dispatch_queue_t dq, const void *key) +static void +_dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func, uintptr_t dc_flags) { - if (slowpath(!key)) { - return NULL; - } - return _dispatch_queue_get_specific_inline(dq, key); + _dispatch_barrier_sync_f_inline(dq, ctxt, func, dc_flags); } DISPATCH_NOINLINE -void * -dispatch_get_specific(const void *key) -{ - if (slowpath(!key)) { - return NULL; +void +dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) +{ + _dispatch_barrier_sync_f_inline(dq, ctxt, func, 0); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_sync_f_inline(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func, uintptr_t dc_flags) +{ + if (likely(dq->dq_width == 1)) { + return _dispatch_barrier_sync_f(dq, ctxt, func, dc_flags); } - void *ctxt = NULL; - dispatch_queue_t dq = _dispatch_queue_get_current(); - while (slowpath(dq)) { - ctxt = _dispatch_queue_get_specific_inline(dq, key); - if (ctxt) break; - dq = dq->do_targetq; + if (unlikely(dx_metatype(dq) != _DISPATCH_LANE_TYPE)) { + DISPATCH_CLIENT_CRASH(0, "Queue type doesn't support dispatch_sync"); } - return ctxt; + + dispatch_lane_t dl = upcast(dq)._dl; + // Global concurrent queues and queues bound to non-dispatch threads + // always fall into the slow case, see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE + if (unlikely(!_dispatch_queue_try_reserve_sync_width(dl))) { + return _dispatch_sync_f_slow(dl, ctxt, func, 0, dl, dc_flags); + } + + if (unlikely(dq->do_targetq->do_targetq)) { + return _dispatch_sync_recurse(dl, ctxt, func, dc_flags); + } + _dispatch_introspection_sync_begin(dl); + _dispatch_sync_invoke_and_complete(dl, ctxt, func DISPATCH_TRACE_ARG( + _dispatch_trace_item_sync_push_pop(dq, ctxt, func, dc_flags))); } -#if DISPATCH_IOHID_SPI -bool -_dispatch_queue_is_exclusively_owned_by_current_thread_4IOHID( - dispatch_queue_t dq) // rdar://problem/18033810 +DISPATCH_NOINLINE +static void +_dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, + uintptr_t dc_flags) { - if (dq->dq_width != 1) { - DISPATCH_CLIENT_CRASH(dq->dq_width, "Invalid queue type"); - } - uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); - return _dq_state_drain_locked_by_self(dq_state); + _dispatch_sync_f_inline(dq, ctxt, func, dc_flags); } -#endif -#pragma mark - -#pragma mark dispatch_queue_debug +DISPATCH_NOINLINE +void +dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) +{ + _dispatch_sync_f_inline(dq, ctxt, func, 0); +} -size_t -_dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf, size_t bufsiz) +#ifdef __BLOCKS__ +DISPATCH_NOINLINE +static void +_dispatch_sync_block_with_privdata(dispatch_queue_t dq, dispatch_block_t work, + uintptr_t dc_flags) { - size_t offset = 0; - dispatch_queue_t target = dq->do_targetq; - const char *tlabel = target && target->dq_label ? target->dq_label : ""; - uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + dispatch_block_private_data_t dbpd = _dispatch_block_get_data(work); + pthread_priority_t op = 0, p = 0; + dispatch_block_flags_t flags = dbpd->dbpd_flags; - offset += dsnprintf(&buf[offset], bufsiz - offset, "sref = %d, " - "target = %s[%p], width = 0x%x, state = 0x%016llx", - dq->dq_sref_cnt + 1, tlabel, target, dq->dq_width, - (unsigned long long)dq_state); - if (_dq_state_is_suspended(dq_state)) { - offset += dsnprintf(&buf[offset], bufsiz - offset, ", suspended = %d", - _dq_state_suspend_cnt(dq_state)); - } - if (_dq_state_is_inactive(dq_state)) { - offset += dsnprintf(&buf[offset], bufsiz - offset, ", inactive"); - } else if (_dq_state_needs_activation(dq_state)) { - offset += dsnprintf(&buf[offset], bufsiz - offset, ", needs-activation"); - } - if (_dq_state_is_enqueued(dq_state)) { - offset += dsnprintf(&buf[offset], bufsiz - offset, ", enqueued"); - } - if (_dq_state_is_dirty(dq_state)) { - offset += dsnprintf(&buf[offset], bufsiz - offset, ", dirty"); - } - dispatch_qos_t qos = _dq_state_max_qos(dq_state); - if (qos) { - offset += dsnprintf(&buf[offset], bufsiz - offset, ", max qos %d", qos); + if (flags & DISPATCH_BLOCK_BARRIER) { + dc_flags |= DC_FLAG_BLOCK_WITH_PRIVATE_DATA | DC_FLAG_BARRIER; + } else { + dc_flags |= DC_FLAG_BLOCK_WITH_PRIVATE_DATA; } - mach_port_t owner = _dq_state_drain_owner(dq_state); - if (!_dispatch_queue_is_thread_bound(dq) && owner) { - offset += dsnprintf(&buf[offset], bufsiz - offset, ", draining on 0x%x", - owner); + + op = _dispatch_block_invoke_should_set_priority(flags, dbpd->dbpd_priority); + if (op) { + p = dbpd->dbpd_priority; } - if (_dq_state_is_in_barrier(dq_state)) { - offset += dsnprintf(&buf[offset], bufsiz - offset, ", in-barrier"); - } else { - offset += dsnprintf(&buf[offset], bufsiz - offset, ", in-flight = %d", - _dq_state_used_width(dq_state, dq->dq_width)); + voucher_t ov, v = DISPATCH_NO_VOUCHER; + if (flags & DISPATCH_BLOCK_HAS_VOUCHER) { + v = dbpd->dbpd_voucher; } - if (_dq_state_has_pending_barrier(dq_state)) { - offset += dsnprintf(&buf[offset], bufsiz - offset, ", pending-barrier"); + ov = _dispatch_set_priority_and_voucher(p, v, 0); + + // balanced in d_block_sync_invoke or d_block_wait + if (os_atomic_cmpxchg2o(dbpd, dbpd_queue, NULL, dq, relaxed)) { + _dispatch_retain_2(dq); } - if (_dispatch_queue_is_thread_bound(dq)) { - offset += dsnprintf(&buf[offset], bufsiz - offset, ", thread = 0x%x ", - owner); + if (dc_flags & DC_FLAG_BARRIER) { + _dispatch_barrier_sync_f(dq, work, _dispatch_block_sync_invoke, + dc_flags); + } else { + _dispatch_sync_f(dq, work, _dispatch_block_sync_invoke, dc_flags); } - return offset; + _dispatch_reset_priority_and_voucher(op, ov); } -size_t -dispatch_queue_debug(dispatch_queue_t dq, char* buf, size_t bufsiz) +void +dispatch_barrier_sync(dispatch_queue_t dq, dispatch_block_t work) { - size_t offset = 0; - offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", - dq->dq_label ? dq->dq_label : dx_kind(dq), dq); - offset += _dispatch_object_debug_attr(dq, &buf[offset], bufsiz - offset); - offset += _dispatch_queue_debug_attr(dq, &buf[offset], bufsiz - offset); - offset += dsnprintf(&buf[offset], bufsiz - offset, "}"); - return offset; + uintptr_t dc_flags = DC_FLAG_BARRIER | DC_FLAG_BLOCK; + if (unlikely(_dispatch_block_has_private_data(work))) { + return _dispatch_sync_block_with_privdata(dq, work, dc_flags); + } + _dispatch_barrier_sync_f(dq, work, _dispatch_Block_invoke(work), dc_flags); } -#if DISPATCH_DEBUG +DISPATCH_NOINLINE void -dispatch_debug_queue(dispatch_queue_t dq, const char* str) { - if (fastpath(dq)) { - _dispatch_object_debug(dq, "%s", str); - } else { - _dispatch_log("queue[NULL]: %s", str); +dispatch_sync(dispatch_queue_t dq, dispatch_block_t work) +{ + uintptr_t dc_flags = DC_FLAG_BLOCK; + if (unlikely(_dispatch_block_has_private_data(work))) { + return _dispatch_sync_block_with_privdata(dq, work, dc_flags); } + _dispatch_sync_f(dq, work, _dispatch_Block_invoke(work), dc_flags); } -#endif - -#if DISPATCH_PERF_MON - -#define DISPATCH_PERF_MON_BUCKETS 8 +#endif // __BLOCKS__ -static struct { - uint64_t volatile time_total; - uint64_t volatile count_total; - uint64_t volatile thread_total; -} _dispatch_stats[DISPATCH_PERF_MON_BUCKETS]; -DISPATCH_USED static size_t _dispatch_stat_buckets = DISPATCH_PERF_MON_BUCKETS; +#pragma mark - +#pragma mark dispatch_async_and_wait -void -_dispatch_queue_merge_stats(uint64_t start, bool trace, perfmon_thread_type type) +DISPATCH_ALWAYS_INLINE +static inline dispatch_wlh_t +_dispatch_fake_wlh(dispatch_queue_t dq) { - uint64_t delta = _dispatch_absolute_time() - start; - unsigned long count; - int bucket = 0; - count = (unsigned long)_dispatch_thread_getspecific(dispatch_bcounter_key); - _dispatch_thread_setspecific(dispatch_bcounter_key, NULL); - if (count == 0) { - bucket = 0; - if (trace) _dispatch_ktrace1(DISPATCH_PERF_MON_worker_useless, type); - } else { - bucket = MIN(DISPATCH_PERF_MON_BUCKETS - 1, - (int)sizeof(count) * CHAR_BIT - __builtin_clzl(count)); - os_atomic_add(&_dispatch_stats[bucket].count_total, count, relaxed); - } - os_atomic_add(&_dispatch_stats[bucket].time_total, delta, relaxed); - os_atomic_inc(&_dispatch_stats[bucket].thread_total, relaxed); - if (trace) { - _dispatch_ktrace3(DISPATCH_PERF_MON_worker_thread_end, count, delta, type); + dispatch_wlh_t new_wlh = DISPATCH_WLH_ANON; + if (likely(dx_metatype(dq) == _DISPATCH_WORKLOOP_TYPE) || + _dq_state_is_base_wlh(os_atomic_load2o(dq, dq_state, relaxed))) { + new_wlh = (dispatch_wlh_t)dq; } + dispatch_wlh_t old_wlh = _dispatch_get_wlh(); + _dispatch_thread_setspecific(dispatch_wlh_key, new_wlh); + return old_wlh; } -#endif +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_restore_wlh(dispatch_wlh_t wlh) +{ + _dispatch_thread_setspecific(dispatch_wlh_key, wlh); +} -#pragma mark - -#pragma mark _dispatch_set_priority_and_mach_voucher -#if HAVE_PTHREAD_WORKQUEUE_QOS +DISPATCH_NOINLINE +static void +_dispatch_async_and_wait_invoke_and_complete_recurse(dispatch_queue_t dq, + dispatch_sync_context_t dsc, dispatch_queue_t bottom_q, + uintptr_t top_dc_flags) +{ + dispatch_invoke_flags_t iflags; + dispatch_wlh_t old_wlh = _dispatch_fake_wlh(bottom_q); + + iflags = dsc->dsc_autorelease * DISPATCH_INVOKE_AUTORELEASE_ALWAYS; + dispatch_invoke_with_autoreleasepool(iflags, { + dispatch_block_flags_t bflags = DISPATCH_BLOCK_HAS_PRIORITY; + dispatch_thread_frame_s dtf; + pthread_priority_t op = 0, p = dsc->dc_priority; + voucher_t ov, v = dsc->dc_voucher; + + _dispatch_introspection_sync_begin(dq); + _dispatch_thread_frame_push(&dtf, dq); + op = _dispatch_block_invoke_should_set_priority(bflags, p); + ov = _dispatch_set_priority_and_voucher(op ? p : 0, v, 0); + _dispatch_trace_item_pop(dq, dsc); + _dispatch_client_callout(dsc->dsc_ctxt, dsc->dsc_func); + _dispatch_perfmon_workitem_inc(); + _dispatch_reset_priority_and_voucher(op, ov); + _dispatch_thread_frame_pop(&dtf); + }); + + _dispatch_trace_item_complete(dsc); + + _dispatch_restore_wlh(old_wlh); + _dispatch_sync_complete_recurse(dq, NULL, top_dc_flags); +} DISPATCH_NOINLINE -void -_dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pp, - mach_voucher_t kv) +static void +_dispatch_async_and_wait_f_slow(dispatch_queue_t dq, uintptr_t top_dc_flags, + dispatch_sync_context_t dsc, dispatch_queue_t tq) { - _pthread_set_flags_t pflags = 0; - if (pp && _dispatch_set_qos_class_enabled) { - pthread_priority_t old_pri = _dispatch_get_priority(); - if (pp != old_pri) { - if (old_pri & _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG) { - pflags |= _PTHREAD_SET_SELF_WQ_KEVENT_UNBIND; - // when we unbind, overcomitness can flip, so we need to learn - // it from the defaultpri, see _dispatch_priority_compute_update - pp |= (_dispatch_get_basepri() & - DISPATCH_PRIORITY_FLAG_OVERCOMMIT); - } else { - // else we need to keep the one that is set in the current pri - pp |= (old_pri & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG); - } - if (likely(old_pri & ~_PTHREAD_PRIORITY_FLAGS_MASK)) { - pflags |= _PTHREAD_SET_SELF_QOS_FLAG; - } - uint64_t mgr_dq_state = - os_atomic_load2o(&_dispatch_mgr_q, dq_state, relaxed); - if (unlikely(_dq_state_drain_locked_by_self(mgr_dq_state))) { - DISPATCH_INTERNAL_CRASH(pp, - "Changing the QoS while on the manager queue"); - } - if (unlikely(pp & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG)) { - DISPATCH_INTERNAL_CRASH(pp, "Cannot raise oneself to manager"); - } - if (old_pri & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG) { - DISPATCH_INTERNAL_CRASH(old_pri, - "Cannot turn a manager thread into a normal one"); - } - } + __DISPATCH_WAIT_FOR_QUEUE__(dsc, tq); + + if (unlikely(dsc->dsc_func == NULL)) { + // see _dispatch_async_and_wait_invoke + dispatch_queue_t stop_dq = dsc->dc_other; + return _dispatch_sync_complete_recurse(dq, stop_dq, top_dc_flags); } - if (kv != VOUCHER_NO_MACH_VOUCHER) { -#if VOUCHER_USE_MACH_VOUCHER - pflags |= _PTHREAD_SET_SELF_VOUCHER_FLAG; -#endif + + // see _dispatch_*_redirect_or_wake + dispatch_queue_t bottom_q = dsc->dc_other; + return _dispatch_async_and_wait_invoke_and_complete_recurse(dq, dsc, + bottom_q, top_dc_flags); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_async_and_wait_should_always_async(dispatch_queue_class_t dqu, + uint64_t dq_state) +{ + // If the queue is anchored at a pthread root queue for which we can't + // mirror attributes, then we need to take the async path. + return !_dq_state_is_inner_queue(dq_state) && + !_dispatch_is_in_root_queues_array(dqu._dq->do_targetq); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_async_and_wait_recurse_one(dispatch_queue_t dq, dispatch_tid tid, + uintptr_t dc_flags) +{ + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + if (unlikely(_dispatch_async_and_wait_should_always_async(dq, dq_state))) { + return false; } - if (!pflags) return; - int r = _pthread_set_properties_self(pflags, pp, kv); - if (r == EINVAL) { - DISPATCH_INTERNAL_CRASH(pp, "_pthread_set_properties_self failed"); + if (likely(dc_flags & DC_FLAG_BARRIER)) { + return _dispatch_queue_try_acquire_barrier_sync(dq, tid); } - (void)dispatch_assume_zero(r); + return _dispatch_queue_try_reserve_sync_width(upcast(dq)._dl); } DISPATCH_NOINLINE -voucher_t -_dispatch_set_priority_and_voucher_slow(pthread_priority_t priority, - voucher_t v, dispatch_thread_set_self_t flags) +static void +_dispatch_async_and_wait_recurse(dispatch_queue_t top_dq, + dispatch_sync_context_t dsc, dispatch_tid tid, uintptr_t top_flags) { - voucher_t ov = DISPATCH_NO_VOUCHER; - mach_voucher_t kv = VOUCHER_NO_MACH_VOUCHER; - if (v != DISPATCH_NO_VOUCHER) { - bool retained = flags & DISPATCH_VOUCHER_CONSUME; - ov = _voucher_get(); - if (ov == v && (flags & DISPATCH_VOUCHER_REPLACE)) { - if (retained && v) _voucher_release_no_dispose(v); - ov = DISPATCH_NO_VOUCHER; + dispatch_queue_t dq = top_dq; + uintptr_t dc_flags = top_flags; + + _dispatch_trace_item_push(top_dq, dsc); + + for (;;) { + if (unlikely(!_dispatch_async_and_wait_recurse_one(dq, tid, dc_flags))){ + return _dispatch_async_and_wait_f_slow(top_dq, top_flags, dsc, dq); + } + + _dispatch_async_waiter_update(dsc, dq); + if (likely(!dq->do_targetq->do_targetq)) break; + dq = dq->do_targetq; + if (likely(dq->dq_width == 1)) { + dc_flags |= DC_FLAG_BARRIER; } else { - if (!retained && v) _voucher_retain(v); - kv = _voucher_swap_and_get_mach_voucher(ov, v); + dc_flags &= ~DC_FLAG_BARRIER; } + dsc->dc_flags = dc_flags; } - if (!(flags & DISPATCH_THREAD_PARK)) { - _dispatch_set_priority_and_mach_voucher_slow(priority, kv); - } - if (ov != DISPATCH_NO_VOUCHER && (flags & DISPATCH_VOUCHER_REPLACE)) { - if (ov) _voucher_release(ov); - ov = DISPATCH_NO_VOUCHER; - } - return ov; -} -#endif -#pragma mark - -#pragma mark dispatch_continuation_t -const struct dispatch_continuation_vtable_s _dispatch_continuation_vtables[] = { - DC_VTABLE_ENTRY(ASYNC_REDIRECT, - .do_kind = "dc-redirect", - .do_invoke = _dispatch_async_redirect_invoke), -#if HAVE_MACH - DC_VTABLE_ENTRY(MACH_SEND_BARRRIER_DRAIN, - .do_kind = "dc-mach-send-drain", - .do_invoke = _dispatch_mach_send_barrier_drain_invoke), - DC_VTABLE_ENTRY(MACH_SEND_BARRIER, - .do_kind = "dc-mach-send-barrier", - .do_invoke = _dispatch_mach_barrier_invoke), - DC_VTABLE_ENTRY(MACH_RECV_BARRIER, - .do_kind = "dc-mach-recv-barrier", - .do_invoke = _dispatch_mach_barrier_invoke), - DC_VTABLE_ENTRY(MACH_ASYNC_REPLY, - .do_kind = "dc-mach-async-reply", - .do_invoke = _dispatch_mach_msg_async_reply_invoke), -#endif -#if HAVE_PTHREAD_WORKQUEUE_QOS - DC_VTABLE_ENTRY(OVERRIDE_STEALING, - .do_kind = "dc-override-stealing", - .do_invoke = _dispatch_queue_override_invoke), - DC_VTABLE_ENTRY(OVERRIDE_OWNING, - .do_kind = "dc-override-owning", - .do_invoke = _dispatch_queue_override_invoke), -#endif -}; + _dispatch_async_and_wait_invoke_and_complete_recurse(top_dq, dsc, dq, + top_flags); +} +DISPATCH_NOINLINE static void -_dispatch_force_cache_cleanup(void) +_dispatch_async_and_wait_f(dispatch_queue_t dq, + void *ctxt, dispatch_function_t func, uintptr_t dc_flags) { - dispatch_continuation_t dc; - dc = _dispatch_thread_getspecific(dispatch_cache_key); - if (dc) { - _dispatch_thread_setspecific(dispatch_cache_key, NULL); - _dispatch_cache_cleanup(dc); - } + pthread_priority_t pp = _dispatch_get_priority(); + dispatch_tid tid = _dispatch_tid_self(); + struct dispatch_sync_context_s dsc = { + .dc_flags = dc_flags, + .dc_func = _dispatch_async_and_wait_invoke, + .dc_ctxt = &dsc, + .dc_other = dq, + .dc_priority = pp | _PTHREAD_PRIORITY_ENFORCE_FLAG, + .dc_voucher = _voucher_get(), + .dsc_func = func, + .dsc_ctxt = ctxt, + .dsc_waiter = tid, + }; + + return _dispatch_async_and_wait_recurse(dq, &dsc, tid, dc_flags); } DISPATCH_NOINLINE -static void -_dispatch_cache_cleanup(void *value) +void +dispatch_async_and_wait_f(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) { - dispatch_continuation_t dc, next_dc = value; - - while ((dc = next_dc)) { - next_dc = dc->do_next; - _dispatch_continuation_free_to_heap(dc); + if (unlikely(!dq->do_targetq)) { + return _dispatch_sync_function_invoke(dq, ctxt, func); } + + uintptr_t dc_flags = DC_FLAG_ASYNC_AND_WAIT; + if (likely(dq->dq_width == 1)) dc_flags |= DC_FLAG_BARRIER; + return _dispatch_async_and_wait_f(dq, ctxt, func, dc_flags); } -#if DISPATCH_USE_MEMORYPRESSURE_SOURCE DISPATCH_NOINLINE void -_dispatch_continuation_free_to_cache_limit(dispatch_continuation_t dc) +dispatch_barrier_async_and_wait_f(dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) { - _dispatch_continuation_free_to_heap(dc); - dispatch_continuation_t next_dc; - dc = _dispatch_thread_getspecific(dispatch_cache_key); - int cnt; - if (!dc || (cnt = dc->dc_cache_cnt - - _dispatch_continuation_cache_limit) <= 0) { - return; + if (unlikely(!dq->do_targetq)) { + return _dispatch_sync_function_invoke(dq, ctxt, func); } - do { - next_dc = dc->do_next; - _dispatch_continuation_free_to_heap(dc); - } while (--cnt && (dc = next_dc)); - _dispatch_thread_setspecific(dispatch_cache_key, next_dc); + + uintptr_t dc_flags = DC_FLAG_ASYNC_AND_WAIT | DC_FLAG_BARRIER; + return _dispatch_async_and_wait_f(dq, ctxt, func, dc_flags); } -#endif +#ifdef __BLOCKS__ DISPATCH_NOINLINE static void -_dispatch_continuation_push(dispatch_queue_t dq, dispatch_continuation_t dc) +_dispatch_async_and_wait_block_with_privdata(dispatch_queue_t dq, + dispatch_block_t work, uintptr_t dc_flags) { - dx_push(dq, dc, _dispatch_continuation_override_qos(dq, dc)); + dispatch_block_private_data_t dbpd = _dispatch_block_get_data(work); + dispatch_block_flags_t flags = dbpd->dbpd_flags; + pthread_priority_t pp; + voucher_t v; + + if (dbpd->dbpd_flags & DISPATCH_BLOCK_BARRIER) { + dc_flags |= DC_FLAG_BLOCK_WITH_PRIVATE_DATA | DC_FLAG_BARRIER; + } else { + dc_flags |= DC_FLAG_BLOCK_WITH_PRIVATE_DATA; + } + + if (_dispatch_block_invoke_should_set_priority(flags, dbpd->dbpd_priority)){ + pp = dbpd->dbpd_priority; + } else { + pp = _dispatch_get_priority(); + } + if (dbpd->dbpd_flags & DISPATCH_BLOCK_HAS_VOUCHER) { + v = dbpd->dbpd_voucher; + } else { + v = _voucher_get(); + } + + // balanced in d_block_sync_invoke or d_block_wait + if (os_atomic_cmpxchg2o(dbpd, dbpd_queue, NULL, dq, relaxed)) { + _dispatch_retain_2(dq); + } + + dispatch_tid tid = _dispatch_tid_self(); + struct dispatch_sync_context_s dsc = { + .dc_flags = dc_flags, + .dc_func = _dispatch_async_and_wait_invoke, + .dc_ctxt = &dsc, + .dc_other = dq, + .dc_priority = pp | _PTHREAD_PRIORITY_ENFORCE_FLAG, + .dc_voucher = v, + .dsc_func = _dispatch_block_sync_invoke, + .dsc_ctxt = work, + .dsc_waiter = tid, + }; + + return _dispatch_async_and_wait_recurse(dq, &dsc, tid, dc_flags); } -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_continuation_async2(dispatch_queue_t dq, dispatch_continuation_t dc, - bool barrier) +void +dispatch_barrier_async_and_wait(dispatch_queue_t dq, dispatch_block_t work) { - if (fastpath(barrier || !DISPATCH_QUEUE_USES_REDIRECTION(dq->dq_width))) { - return _dispatch_continuation_push(dq, dc); + if (unlikely(!dq->do_targetq)) { + return dispatch_barrier_sync(dq, work); + } + + uintptr_t dc_flags = DC_FLAG_ASYNC_AND_WAIT | DC_FLAG_BLOCK|DC_FLAG_BARRIER; + if (unlikely(_dispatch_block_has_private_data(work))) { + return _dispatch_async_and_wait_block_with_privdata(dq, work, dc_flags); } - return _dispatch_async_f2(dq, dc); + + dispatch_function_t func = _dispatch_Block_invoke(work); + return _dispatch_async_and_wait_f(dq, work, func, dc_flags); } -DISPATCH_NOINLINE void -_dispatch_continuation_async(dispatch_queue_t dq, dispatch_continuation_t dc) +dispatch_async_and_wait(dispatch_queue_t dq, dispatch_block_t work) { - _dispatch_continuation_async2(dq, dc, - dc->dc_flags & DISPATCH_OBJ_BARRIER_BIT); + if (unlikely(!dq->do_targetq)) { + return dispatch_sync(dq, work); + } + + uintptr_t dc_flags = DC_FLAG_ASYNC_AND_WAIT | DC_FLAG_BLOCK; + if (likely(dq->dq_width == 1)) dc_flags |= DC_FLAG_BARRIER; + if (unlikely(_dispatch_block_has_private_data(work))) { + return _dispatch_async_and_wait_block_with_privdata(dq, work, dc_flags); + } + + dispatch_function_t func = _dispatch_Block_invoke(work); + return _dispatch_async_and_wait_f(dq, work, func, dc_flags); } +#endif // __BLOCKS__ #pragma mark - -#pragma mark dispatch_block_create - -#if __BLOCKS__ +#pragma mark dispatch_queue_specific -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_block_flags_valid(dispatch_block_flags_t flags) +static void +_dispatch_queue_specific_head_dispose_slow(void *ctxt) { - return ((flags & ~DISPATCH_BLOCK_API_MASK) == 0); -} + dispatch_queue_specific_head_t dqsh = ctxt; + dispatch_queue_specific_t dqs, tmp; -DISPATCH_ALWAYS_INLINE -static inline dispatch_block_flags_t -_dispatch_block_normalize_flags(dispatch_block_flags_t flags) -{ - if (flags & (DISPATCH_BLOCK_NO_VOUCHER|DISPATCH_BLOCK_DETACHED)) { - flags |= DISPATCH_BLOCK_HAS_VOUCHER; - } - if (flags & (DISPATCH_BLOCK_NO_QOS_CLASS|DISPATCH_BLOCK_DETACHED)) { - flags |= DISPATCH_BLOCK_HAS_PRIORITY; + TAILQ_FOREACH_SAFE(dqs, &dqsh->dqsh_entries, dqs_entry, tmp) { + dispatch_assert(dqs->dqs_destructor); + _dispatch_client_callout(dqs->dqs_ctxt, dqs->dqs_destructor); + free(dqs); } - return flags; + free(dqsh); } -static inline dispatch_block_t -_dispatch_block_create_with_voucher_and_priority(dispatch_block_flags_t flags, - voucher_t voucher, pthread_priority_t pri, dispatch_block_t block) +static void +_dispatch_queue_specific_head_dispose(dispatch_queue_specific_head_t dqsh) { - flags = _dispatch_block_normalize_flags(flags); - bool assign = (flags & DISPATCH_BLOCK_ASSIGN_CURRENT); + dispatch_queue_t rq = _dispatch_get_default_queue(false); + dispatch_queue_specific_t dqs, tmp; + TAILQ_HEAD(, dispatch_queue_specific_s) entries = + TAILQ_HEAD_INITIALIZER(entries); - if (assign && !(flags & DISPATCH_BLOCK_HAS_VOUCHER)) { -#if OS_VOUCHER_ACTIVITY_SPI - voucher = VOUCHER_CURRENT; -#endif - flags |= DISPATCH_BLOCK_HAS_VOUCHER; - } -#if OS_VOUCHER_ACTIVITY_SPI - if (voucher == VOUCHER_CURRENT) { - voucher = _voucher_get(); + TAILQ_CONCAT(&entries, &dqsh->dqsh_entries, dqs_entry); + TAILQ_FOREACH_SAFE(dqs, &entries, dqs_entry, tmp) { + if (dqs->dqs_destructor) { + TAILQ_INSERT_TAIL(&dqsh->dqsh_entries, dqs, dqs_entry); + } else { + free(dqs); + } } -#endif - if (assign && !(flags & DISPATCH_BLOCK_HAS_PRIORITY)) { - pri = _dispatch_priority_propagate(); - flags |= DISPATCH_BLOCK_HAS_PRIORITY; + + if (TAILQ_EMPTY(&dqsh->dqsh_entries)) { + free(dqsh); + } else { + _dispatch_barrier_async_detached_f(rq, dqsh, + _dispatch_queue_specific_head_dispose_slow); } - dispatch_block_t db = _dispatch_block_create(flags, voucher, pri, block); -#if DISPATCH_DEBUG - dispatch_assert(_dispatch_block_get_data(db)); -#endif - return db; } -dispatch_block_t -dispatch_block_create(dispatch_block_flags_t flags, dispatch_block_t block) +DISPATCH_NOINLINE +static void +_dispatch_queue_init_specific(dispatch_queue_t dq) { - if (!_dispatch_block_flags_valid(flags)) return DISPATCH_BAD_INPUT; - return _dispatch_block_create_with_voucher_and_priority(flags, NULL, 0, - block); -} + dispatch_queue_specific_head_t dqsh; -dispatch_block_t -dispatch_block_create_with_qos_class(dispatch_block_flags_t flags, - dispatch_qos_class_t qos_class, int relative_priority, - dispatch_block_t block) -{ - if (!_dispatch_block_flags_valid(flags) || - !_dispatch_qos_class_valid(qos_class, relative_priority)) { - return DISPATCH_BAD_INPUT; + dqsh = _dispatch_calloc(1, sizeof(struct dispatch_queue_specific_head_s)); + TAILQ_INIT(&dqsh->dqsh_entries); + if (unlikely(!os_atomic_cmpxchg2o(dq, dq_specific_head, + NULL, dqsh, release))) { + _dispatch_queue_specific_head_dispose(dqsh); } - flags |= DISPATCH_BLOCK_HAS_PRIORITY; - pthread_priority_t pri = 0; -#if HAVE_PTHREAD_WORKQUEUE_QOS - pri = _pthread_qos_class_encode(qos_class, relative_priority, 0); -#endif - return _dispatch_block_create_with_voucher_and_priority(flags, NULL, - pri, block); } -dispatch_block_t -dispatch_block_create_with_voucher(dispatch_block_flags_t flags, - voucher_t voucher, dispatch_block_t block) +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_specific_t +_dispatch_queue_specific_find(dispatch_queue_specific_head_t dqsh, + const void *key) { - if (!_dispatch_block_flags_valid(flags)) return DISPATCH_BAD_INPUT; - flags |= DISPATCH_BLOCK_HAS_VOUCHER; - return _dispatch_block_create_with_voucher_and_priority(flags, voucher, 0, - block); -} + dispatch_queue_specific_t dqs; -dispatch_block_t -dispatch_block_create_with_voucher_and_qos_class(dispatch_block_flags_t flags, - voucher_t voucher, dispatch_qos_class_t qos_class, - int relative_priority, dispatch_block_t block) -{ - if (!_dispatch_block_flags_valid(flags) || - !_dispatch_qos_class_valid(qos_class, relative_priority)) { - return DISPATCH_BAD_INPUT; + TAILQ_FOREACH(dqs, &dqsh->dqsh_entries, dqs_entry) { + if (dqs->dqs_key == key) { + return dqs; + } } - flags |= (DISPATCH_BLOCK_HAS_VOUCHER|DISPATCH_BLOCK_HAS_PRIORITY); - pthread_priority_t pri = 0; -#if HAVE_PTHREAD_WORKQUEUE_QOS - pri = _pthread_qos_class_encode(qos_class, relative_priority, 0); -#endif - return _dispatch_block_create_with_voucher_and_priority(flags, voucher, - pri, block); + return NULL; } -void -dispatch_block_perform(dispatch_block_flags_t flags, dispatch_block_t block) +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_queue_admits_specific(dispatch_queue_t dq) { - if (!_dispatch_block_flags_valid(flags)) { - DISPATCH_CLIENT_CRASH(flags, "Invalid flags passed to " - "dispatch_block_perform()"); + if (dx_metatype(dq) == _DISPATCH_LANE_TYPE) { + return (dx_type(dq) == DISPATCH_QUEUE_MAIN_TYPE || + !dx_hastypeflag(dq, QUEUE_BASE)); } - flags = _dispatch_block_normalize_flags(flags); - struct dispatch_block_private_data_s dbpds = - DISPATCH_BLOCK_PRIVATE_DATA_PERFORM_INITIALIZER(flags, block); - return _dispatch_block_invoke_direct(&dbpds); + return dx_metatype(dq) == _DISPATCH_WORKLOOP_TYPE; } -#define _dbpd_group(dbpd) ((dbpd)->dbpd_group) - +DISPATCH_NOINLINE void -_dispatch_block_invoke_direct(const struct dispatch_block_private_data_s *dbcpd) +dispatch_queue_set_specific(dispatch_queue_t dq, const void *key, + void *ctxt, dispatch_function_t destructor) { - dispatch_block_private_data_t dbpd = (dispatch_block_private_data_t)dbcpd; - dispatch_block_flags_t flags = dbpd->dbpd_flags; - unsigned int atomic_flags = dbpd->dbpd_atomic_flags; - if (slowpath(atomic_flags & DBF_WAITED)) { - DISPATCH_CLIENT_CRASH(atomic_flags, "A block object may not be both " - "run more than once and waited for"); + if (unlikely(!key)) { + return; } - if (atomic_flags & DBF_CANCELED) goto out; + dispatch_queue_t rq = _dispatch_get_default_queue(false); + dispatch_queue_specific_head_t dqsh = dq->dq_specific_head; + dispatch_queue_specific_t dqs; - pthread_priority_t op = 0, p = 0; - op = _dispatch_block_invoke_should_set_priority(flags, dbpd->dbpd_priority); - if (op) { - p = dbpd->dbpd_priority; + if (unlikely(!_dispatch_queue_admits_specific(dq))) { + DISPATCH_CLIENT_CRASH(0, + "Queue doesn't support dispatch_queue_set_specific"); } - voucher_t ov, v = DISPATCH_NO_VOUCHER; - if (flags & DISPATCH_BLOCK_HAS_VOUCHER) { - v = dbpd->dbpd_voucher; + + if (ctxt && !dqsh) { + _dispatch_queue_init_specific(dq); + dqsh = dq->dq_specific_head; + } else if (!dqsh) { + return; } - ov = _dispatch_set_priority_and_voucher(p, v, 0); - dbpd->dbpd_thread = _dispatch_tid_self(); - _dispatch_client_callout(dbpd->dbpd_block, - _dispatch_Block_invoke(dbpd->dbpd_block)); - _dispatch_reset_priority_and_voucher(op, ov); -out: - if ((atomic_flags & DBF_PERFORM) == 0) { - if (os_atomic_inc2o(dbpd, dbpd_performed, relaxed) == 1) { - dispatch_group_leave(_dbpd_group(dbpd)); + + _dispatch_unfair_lock_lock(&dqsh->dqsh_lock); + dqs = _dispatch_queue_specific_find(dqsh, key); + if (dqs) { + if (dqs->dqs_destructor) { + _dispatch_barrier_async_detached_f(rq, dqs->dqs_ctxt, + dqs->dqs_destructor); + } + if (ctxt) { + dqs->dqs_ctxt = ctxt; + dqs->dqs_destructor = destructor; + } else { + TAILQ_REMOVE(&dqsh->dqsh_entries, dqs, dqs_entry); + free(dqs); } + } else if (ctxt) { + dqs = _dispatch_calloc(1, sizeof(struct dispatch_queue_specific_s)); + dqs->dqs_key = key; + dqs->dqs_ctxt = ctxt; + dqs->dqs_destructor = destructor; + TAILQ_INSERT_TAIL(&dqsh->dqsh_entries, dqs, dqs_entry); } + + _dispatch_unfair_lock_unlock(&dqsh->dqsh_lock); } -void -_dispatch_block_sync_invoke(void *block) +DISPATCH_ALWAYS_INLINE +static inline void * +_dispatch_queue_get_specific_inline(dispatch_queue_t dq, const void *key) { - dispatch_block_t b = block; - dispatch_block_private_data_t dbpd = _dispatch_block_get_data(b); - dispatch_block_flags_t flags = dbpd->dbpd_flags; - unsigned int atomic_flags = dbpd->dbpd_atomic_flags; - if (unlikely(atomic_flags & DBF_WAITED)) { - DISPATCH_CLIENT_CRASH(atomic_flags, "A block object may not be both " - "run more than once and waited for"); + dispatch_queue_specific_head_t dqsh = dq->dq_specific_head; + dispatch_queue_specific_t dqs; + void *ctxt = NULL; + + if (likely(_dispatch_queue_admits_specific(dq) && dqsh)) { + _dispatch_unfair_lock_lock(&dqsh->dqsh_lock); + dqs = _dispatch_queue_specific_find(dqsh, key); + if (dqs) ctxt = dqs->dqs_ctxt; + _dispatch_unfair_lock_unlock(&dqsh->dqsh_lock); } - if (atomic_flags & DBF_CANCELED) goto out; + return ctxt; +} - voucher_t ov = DISPATCH_NO_VOUCHER; - if (flags & DISPATCH_BLOCK_HAS_VOUCHER) { - ov = _dispatch_adopt_priority_and_set_voucher(0, dbpd->dbpd_voucher, 0); +DISPATCH_NOINLINE +void * +dispatch_queue_get_specific(dispatch_queue_t dq, const void *key) +{ + if (unlikely(!key)) { + return NULL; } - dbpd->dbpd_block(); - _dispatch_reset_voucher(ov, 0); -out: - if ((atomic_flags & DBF_PERFORM) == 0) { - if (os_atomic_inc2o(dbpd, dbpd_performed, relaxed) == 1) { - dispatch_group_leave(_dbpd_group(dbpd)); - } + return _dispatch_queue_get_specific_inline(dq, key); +} + +DISPATCH_NOINLINE +void * +dispatch_get_specific(const void *key) +{ + dispatch_queue_t dq = _dispatch_queue_get_current(); + void *ctxt = NULL; + + if (likely(key && dq)) { + do { + ctxt = _dispatch_queue_get_specific_inline(dq, key); + dq = dq->do_targetq; + } while (unlikely(ctxt == NULL && dq)); } + return ctxt; +} - os_mpsc_queue_t oq; - oq = os_atomic_xchg2o(dbpd, dbpd_queue, NULL, relaxed); - if (oq) { - // balances dispatch_{,barrier_,}sync - _os_object_release_internal_n(oq->_as_os_obj, 2); +#pragma mark - +#pragma mark dispatch_queue_t / dispatch_lane_t + +void +dispatch_queue_set_label_nocopy(dispatch_queue_t dq, const char *label) +{ + if (unlikely(_dispatch_object_is_global(dq))) { + return; } + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dq); + if (unlikely(dqf & DQF_LABEL_NEEDS_FREE)) { + DISPATCH_CLIENT_CRASH(dq, "Cannot change label for this queue"); + } + dq->dq_label = label; +} + +static inline bool +_dispatch_base_lane_is_wlh(dispatch_lane_t dq, dispatch_queue_t tq) +{ + (void)dq; (void)tq; + return false; } static void -_dispatch_block_async_invoke_reset_max_qos(dispatch_queue_t dq, - dispatch_qos_t qos) +_dispatch_lane_inherit_wlh_from_target(dispatch_lane_t dq, dispatch_queue_t tq) { - uint64_t old_state, new_state, qos_bits = _dq_state_from_qos(qos); + uint64_t old_state, new_state, role; - // Only dispatch queues can reach this point (as opposed to sources or more - // complex objects) which allows us to handle the DIRTY bit protocol by only - // looking at the tail - dispatch_assert(dx_metatype(dq) == _DISPATCH_QUEUE_TYPE); + if (!dx_hastypeflag(tq, QUEUE_ROOT)) { + role = DISPATCH_QUEUE_ROLE_INNER; + } else if (_dispatch_base_lane_is_wlh(dq, tq)) { + role = DISPATCH_QUEUE_ROLE_BASE_WLH; + } else { + role = DISPATCH_QUEUE_ROLE_BASE_ANON; + } -again: os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { - dispatch_assert(_dq_state_is_base_wlh(old_state)); - if ((old_state & DISPATCH_QUEUE_MAX_QOS_MASK) <= qos_bits) { - // Nothing to do if the QoS isn't going down - os_atomic_rmw_loop_give_up(return); - } - if (_dq_state_is_dirty(old_state)) { - os_atomic_rmw_loop_give_up({ - // just renew the drain lock with an acquire barrier, to see - // what the enqueuer that set DIRTY has done. - // the xor generates better assembly as DISPATCH_QUEUE_DIRTY - // is already in a register - os_atomic_xor2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, acquire); - if (!dq->dq_items_tail) { - goto again; - } - return; - }); + new_state = old_state & ~DISPATCH_QUEUE_ROLE_MASK; + new_state |= role; + if (old_state == new_state) { + os_atomic_rmw_loop_give_up(break); } - - new_state = old_state; - new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; - new_state |= qos_bits; }); - _dispatch_deferred_items_get()->ddi_wlh_needs_update = true; - _dispatch_event_loop_drain(KEVENT_FLAG_IMMEDIATE); + if (_dq_state_is_base_wlh(old_state) && !_dq_state_is_base_wlh(new_state)) { + dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); + if (ddi && ddi->ddi_wlh == (dispatch_wlh_t)dq) { + _dispatch_event_loop_leave_immediate(new_state); + } + } + if (!dx_hastypeflag(tq, QUEUE_ROOT)) { + dispatch_queue_flags_t clear = 0, set = DQF_TARGETED; + if (dx_metatype(tq) == _DISPATCH_WORKLOOP_TYPE) { + clear |= DQF_MUTABLE; +#if !DISPATCH_ALLOW_NON_LEAF_RETARGET + } else { + clear |= DQF_MUTABLE; +#endif + } + if (clear) { + _dispatch_queue_atomic_flags_set_and_clear(tq, set, clear); + } else { + _dispatch_queue_atomic_flags_set(tq, set); + } + } } -#define DISPATCH_BLOCK_ASYNC_INVOKE_RELEASE 0x1 -#define DISPATCH_BLOCK_ASYNC_INVOKE_NO_OVERRIDE_RESET 0x2 - -DISPATCH_NOINLINE -static void -_dispatch_block_async_invoke2(dispatch_block_t b, unsigned long invoke_flags) +dispatch_priority_t +_dispatch_queue_compute_priority_and_wlh(dispatch_queue_t dq, + dispatch_wlh_t *wlh_out) { - dispatch_block_private_data_t dbpd = _dispatch_block_get_data(b); - unsigned int atomic_flags = dbpd->dbpd_atomic_flags; - if (slowpath(atomic_flags & DBF_WAITED)) { - DISPATCH_CLIENT_CRASH(atomic_flags, "A block object may not be both " - "run more than once and waited for"); + dispatch_priority_t dpri = dq->dq_priority; + dispatch_priority_t p = dpri & DISPATCH_PRIORITY_REQUESTED_MASK; + dispatch_qos_t fallback = _dispatch_priority_fallback_qos(dpri); + dispatch_queue_t tq = dq->do_targetq; + dispatch_wlh_t wlh = DISPATCH_WLH_ANON; + + if (_dq_state_is_base_wlh(dq->dq_state)) { + wlh = (dispatch_wlh_t)dq; } - if (unlikely((dbpd->dbpd_flags & - DISPATCH_BLOCK_IF_LAST_RESET_QUEUE_QOS_OVERRIDE) && - !(invoke_flags & DISPATCH_BLOCK_ASYNC_INVOKE_NO_OVERRIDE_RESET))) { - dispatch_queue_t dq = _dispatch_get_current_queue(); - dispatch_qos_t qos = _dispatch_qos_from_pp(_dispatch_get_priority()); - if ((dispatch_wlh_t)dq == _dispatch_get_wlh() && !dq->dq_items_tail) { - _dispatch_block_async_invoke_reset_max_qos(dq, qos); + while (unlikely(!dx_hastypeflag(tq, QUEUE_ROOT))) { + if (unlikely(tq == _dispatch_mgr_q._as_dq)) { + if (wlh_out) *wlh_out = DISPATCH_WLH_ANON; + return DISPATCH_PRIORITY_FLAG_MANAGER; + } + if (unlikely(_dispatch_queue_is_thread_bound(tq))) { + // thread-bound hierarchies are weird, we need to install + // from the context of the thread this hierarchy is bound to + if (wlh_out) *wlh_out = NULL; + return 0; + } + if (unlikely(DISPATCH_QUEUE_IS_SUSPENDED(tq))) { + // this queue may not be activated yet, so the queue graph may not + // have stabilized yet + _dispatch_ktrace2(DISPATCH_PERF_delayed_registration, dq, + dx_metatype(dq) == _DISPATCH_SOURCE_TYPE ? dq : NULL); + if (wlh_out) *wlh_out = NULL; + return 0; } - } - if (!slowpath(atomic_flags & DBF_CANCELED)) { - dbpd->dbpd_block(); - } - if ((atomic_flags & DBF_PERFORM) == 0) { - if (os_atomic_inc2o(dbpd, dbpd_performed, relaxed) == 1) { - dispatch_group_leave(_dbpd_group(dbpd)); + if (_dq_state_is_base_wlh(tq->dq_state)) { + wlh = (dispatch_wlh_t)tq; + if (dx_metatype(tq) == _DISPATCH_WORKLOOP_TYPE) { + _dispatch_queue_atomic_flags_clear(dq, DQF_MUTABLE); + } + } else if (unlikely(_dispatch_queue_is_mutable(tq))) { + // we're not allowed to dereference tq->do_targetq + _dispatch_ktrace2(DISPATCH_PERF_delayed_registration, dq, + dx_metatype(dq) == _DISPATCH_SOURCE_TYPE ? dq : NULL); + if (wlh_out) *wlh_out = NULL; + return 0; } - } - os_mpsc_queue_t oq = os_atomic_xchg2o(dbpd, dbpd_queue, NULL, relaxed); - if (oq) { - // balances dispatch_{,barrier_,group_}async - _os_object_release_internal_n_inline(oq->_as_os_obj, 2); - } + dispatch_priority_t tqp = tq->dq_priority; - if (invoke_flags & DISPATCH_BLOCK_ASYNC_INVOKE_RELEASE) { - Block_release(b); + tq = tq->do_targetq; + if (tqp & DISPATCH_PRIORITY_FLAG_INHERITED) { + // if the priority is inherited, it means we got it from our target + // which has fallback and various magical flags that the code below + // will handle, so do not bother here. + break; + } + + if (!fallback) fallback = _dispatch_priority_fallback_qos(tqp); + tqp &= DISPATCH_PRIORITY_REQUESTED_MASK; + if (p < tqp) p = tqp; } -} -static void -_dispatch_block_async_invoke(void *block) -{ - _dispatch_block_async_invoke2(block, 0); -} + if (likely(_dispatch_is_in_root_queues_array(tq) || + tq->dq_serialnum == DISPATCH_QUEUE_SERIAL_NUMBER_WLF)) { + dispatch_priority_t rqp = tq->dq_priority; -static void -_dispatch_block_async_invoke_and_release(void *block) -{ - _dispatch_block_async_invoke2(block, DISPATCH_BLOCK_ASYNC_INVOKE_RELEASE); + if (!fallback) fallback = _dispatch_priority_fallback_qos(rqp); + rqp &= DISPATCH_PRIORITY_REQUESTED_MASK; + if (p < rqp) p = rqp; + + p |= (tq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT); + if ((dpri & DISPATCH_PRIORITY_FLAG_FLOOR) || + !(dpri & DISPATCH_PRIORITY_REQUESTED_MASK)) { + p |= (dpri & DISPATCH_PRIORITY_FLAG_FLOOR); + if (fallback > _dispatch_priority_qos(p)) { + p |= _dispatch_priority_make_fallback(fallback); + } + } + if (wlh_out) *wlh_out = wlh; + return p; + } + + // pthread root queues opt out of QoS + if (wlh_out) *wlh_out = DISPATCH_WLH_ANON; + return DISPATCH_PRIORITY_FLAG_MANAGER; } +DISPATCH_ALWAYS_INLINE static void -_dispatch_block_async_invoke_and_release_mach_barrier(void *block) +_dispatch_queue_setter_assert_inactive(dispatch_queue_class_t dq) { - _dispatch_block_async_invoke2(block, DISPATCH_BLOCK_ASYNC_INVOKE_RELEASE | - DISPATCH_BLOCK_ASYNC_INVOKE_NO_OVERRIDE_RESET); + uint64_t dq_state = os_atomic_load2o(dq._dq, dq_state, relaxed); + if (likely(dq_state & DISPATCH_QUEUE_INACTIVE)) return; +#ifndef __LP64__ + dq_state >>= 32; +#endif + DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, + "dispatch queue/source property setter called after activation"); } DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_block_supports_wait_and_cancel(dispatch_block_private_data_t dbpd) +static void +_dispatch_workloop_attributes_alloc_if_needed(dispatch_workloop_t dwl) { - return dbpd && !(dbpd->dbpd_flags & - DISPATCH_BLOCK_IF_LAST_RESET_QUEUE_QOS_OVERRIDE); + if (unlikely(!dwl->dwl_attr)) { + dwl->dwl_attr = _dispatch_calloc(1, sizeof(dispatch_workloop_attr_s)); + } } void -dispatch_block_cancel(dispatch_block_t db) +dispatch_set_qos_class_floor(dispatch_object_t dou, + dispatch_qos_class_t cls, int relpri) { - dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); - if (unlikely(!_dispatch_block_supports_wait_and_cancel(dbpd))) { - DISPATCH_CLIENT_CRASH(db, "Invalid block object passed to " - "dispatch_block_cancel()"); + if (dx_cluster(dou._do) != _DISPATCH_QUEUE_CLUSTER) { + DISPATCH_CLIENT_CRASH(0, + "dispatch_set_qos_class_floor called on invalid object type"); } - (void)os_atomic_or2o(dbpd, dbpd_atomic_flags, DBF_CANCELED, relaxed); -} - -long -dispatch_block_testcancel(dispatch_block_t db) -{ - dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); - if (unlikely(!_dispatch_block_supports_wait_and_cancel(dbpd))) { - DISPATCH_CLIENT_CRASH(db, "Invalid block object passed to " - "dispatch_block_testcancel()"); + if (dx_metatype(dou._do) == _DISPATCH_WORKLOOP_TYPE) { + return dispatch_workloop_set_qos_class_floor(dou._dwl, cls, relpri, 0); } - return (bool)(dbpd->dbpd_atomic_flags & DBF_CANCELED); + + dispatch_qos_t qos = _dispatch_qos_from_qos_class(cls); + dispatch_priority_t pri = _dispatch_priority_make(qos, relpri); + dispatch_priority_t old_pri = dou._dq->dq_priority; + + if (pri) pri |= DISPATCH_PRIORITY_FLAG_FLOOR; + old_pri &= ~DISPATCH_PRIORITY_REQUESTED_MASK; + old_pri &= ~DISPATCH_PRIORITY_FLAG_FLOOR; + dou._dq->dq_priority = pri | old_pri; + + _dispatch_queue_setter_assert_inactive(dou._dq); } -long -dispatch_block_wait(dispatch_block_t db, dispatch_time_t timeout) +void +dispatch_set_qos_class(dispatch_object_t dou, dispatch_qos_class_t cls, + int relpri) { - dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); - if (unlikely(!_dispatch_block_supports_wait_and_cancel(dbpd))) { - DISPATCH_CLIENT_CRASH(db, "Invalid block object passed to " - "dispatch_block_wait()"); + if (dx_cluster(dou._do) != _DISPATCH_QUEUE_CLUSTER || + dx_metatype(dou._do) == _DISPATCH_WORKLOOP_TYPE) { + DISPATCH_CLIENT_CRASH(0, + "dispatch_set_qos_class called on invalid object type"); } - unsigned int flags = os_atomic_or_orig2o(dbpd, dbpd_atomic_flags, - DBF_WAITING, relaxed); - if (slowpath(flags & (DBF_WAITED | DBF_WAITING))) { - DISPATCH_CLIENT_CRASH(flags, "A block object may not be waited for " - "more than once"); - } + dispatch_qos_t qos = _dispatch_qos_from_qos_class(cls); + dispatch_priority_t pri = _dispatch_priority_make(qos, relpri); + dispatch_priority_t old_pri = dou._dq->dq_priority; - // If we know the queue where this block is - // enqueued, or the thread that's executing it, then we should boost - // it here. + old_pri &= ~DISPATCH_PRIORITY_REQUESTED_MASK; + old_pri &= ~DISPATCH_PRIORITY_FLAG_FLOOR; + dou._dq->dq_priority = pri | old_pri; - pthread_priority_t pp = _dispatch_get_priority(); + _dispatch_queue_setter_assert_inactive(dou._dq); +} - os_mpsc_queue_t boost_oq; - boost_oq = os_atomic_xchg2o(dbpd, dbpd_queue, NULL, relaxed); - if (boost_oq) { - // release balances dispatch_{,barrier_,group_}async. - // Can't put the queue back in the timeout case: the block might - // finish after we fell out of group_wait and see our NULL, so - // neither of us would ever release. Side effect: After a _wait - // that times out, subsequent waits will not boost the qos of the - // still-running block. - dx_wakeup(boost_oq, _dispatch_qos_from_pp(pp), - DISPATCH_WAKEUP_BLOCK_WAIT | DISPATCH_WAKEUP_CONSUME_2); +void +dispatch_set_qos_class_fallback(dispatch_object_t dou, dispatch_qos_class_t cls) +{ + if (dx_cluster(dou._do) != _DISPATCH_QUEUE_CLUSTER) { + DISPATCH_CLIENT_CRASH(0, + "dispatch_set_qos_class_fallback called on invalid object type"); } - mach_port_t boost_th = dbpd->dbpd_thread; - if (boost_th) { - _dispatch_thread_override_start(boost_th, pp, dbpd); - } + dispatch_qos_t qos = _dispatch_qos_from_qos_class(cls); + dispatch_priority_t pri = _dispatch_priority_make_fallback(qos); + dispatch_priority_t old_pri = dou._dq->dq_priority; - int performed = os_atomic_load2o(dbpd, dbpd_performed, relaxed); - if (slowpath(performed > 1 || (boost_th && boost_oq))) { - DISPATCH_CLIENT_CRASH(performed, "A block object may not be both " - "run more than once and waited for"); - } + old_pri &= ~DISPATCH_PRIORITY_FALLBACK_QOS_MASK; + old_pri &= ~DISPATCH_PRIORITY_FLAG_FALLBACK; + dou._dq->dq_priority = pri | old_pri; - long ret = dispatch_group_wait(_dbpd_group(dbpd), timeout); + _dispatch_queue_setter_assert_inactive(dou._dq); +} - if (boost_th) { - _dispatch_thread_override_end(boost_th, dbpd); +static dispatch_queue_t +_dispatch_queue_priority_inherit_from_target(dispatch_lane_class_t dq, + dispatch_queue_t tq) +{ + const dispatch_priority_t inherited = DISPATCH_PRIORITY_FLAG_INHERITED; + dispatch_priority_t pri = dq._dl->dq_priority; + + // This priority has been selected by the client, leave it alone + // However, when the client picked a QoS, we should adjust the target queue + // if it is a root queue to best match the ask + if (_dispatch_queue_priority_manually_selected(pri)) { + if (_dispatch_is_in_root_queues_array(tq)) { + dispatch_qos_t qos = _dispatch_priority_qos(pri); + if (!qos) qos = DISPATCH_QOS_DEFAULT; + tq = _dispatch_get_root_queue(qos, + pri & DISPATCH_PRIORITY_FLAG_OVERCOMMIT)->_as_dq; + } + return tq; } - if (ret) { - // timed out: reverse our changes - (void)os_atomic_and2o(dbpd, dbpd_atomic_flags, - ~DBF_WAITING, relaxed); - } else { - (void)os_atomic_or2o(dbpd, dbpd_atomic_flags, - DBF_WAITED, relaxed); - // don't need to re-test here: the second call would see - // the first call's WAITING + if (_dispatch_is_in_root_queues_array(tq)) { + // base queues need to know they target + // the default root queue so that _dispatch_queue_wakeup_qos() + // in _dispatch_queue_wakeup() can fallback to QOS_DEFAULT + // if no other priority was provided. + pri = tq->dq_priority | inherited; + } else if (pri & inherited) { + // if the FALLBACK flag is set on queues due to the code above + // we need to clear it if the queue is retargeted within a hierachy + // and is no longer a base queue. + pri &= ~DISPATCH_PRIORITY_FALLBACK_QOS_MASK; + pri &= ~DISPATCH_PRIORITY_FLAG_FALLBACK; } - return ret; + dq._dl->dq_priority = pri; + return tq; } -void -dispatch_block_notify(dispatch_block_t db, dispatch_queue_t queue, - dispatch_block_t notification_block) + +DISPATCH_NOINLINE +static dispatch_queue_t +_dispatch_lane_create_with_target(const char *label, dispatch_queue_attr_t dqa, + dispatch_queue_t tq, bool legacy) { - dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); - if (!dbpd) { - DISPATCH_CLIENT_CRASH(db, "Invalid block object passed to " - "dispatch_block_notify()"); - } - int performed = os_atomic_load2o(dbpd, dbpd_performed, relaxed); - if (slowpath(performed > 1)) { - DISPATCH_CLIENT_CRASH(performed, "A block object may not be both " - "run more than once and observed"); - } + dispatch_queue_attr_info_t dqai = _dispatch_queue_attr_to_info(dqa); - return dispatch_group_notify(_dbpd_group(dbpd), queue, notification_block); -} + // + // Step 1: Normalize arguments (qos, overcommit, tq) + // -DISPATCH_NOINLINE -void -_dispatch_continuation_init_slow(dispatch_continuation_t dc, - dispatch_queue_class_t dqu, dispatch_block_flags_t flags) -{ - dispatch_block_private_data_t dbpd = _dispatch_block_get_data(dc->dc_ctxt); - dispatch_block_flags_t block_flags = dbpd->dbpd_flags; - uintptr_t dc_flags = dc->dc_flags; - os_mpsc_queue_t oq = dqu._oq; + dispatch_qos_t qos = dqai.dqai_qos; +#if !HAVE_PTHREAD_WORKQUEUE_QOS + if (qos == DISPATCH_QOS_USER_INTERACTIVE) { + dqai.dqai_qos = qos = DISPATCH_QOS_USER_INITIATED; + } + if (qos == DISPATCH_QOS_MAINTENANCE) { + dqai.dqai_qos = qos = DISPATCH_QOS_BACKGROUND; + } +#endif // !HAVE_PTHREAD_WORKQUEUE_QOS - // balanced in d_block_async_invoke_and_release or d_block_wait - if (os_atomic_cmpxchg2o(dbpd, dbpd_queue, NULL, oq, relaxed)) { - _os_object_retain_internal_n_inline(oq->_as_os_obj, 2); + _dispatch_queue_attr_overcommit_t overcommit = dqai.dqai_overcommit; + if (overcommit != _dispatch_queue_attr_overcommit_unspecified && tq) { + if (tq->do_targetq) { + DISPATCH_CLIENT_CRASH(tq, "Cannot specify both overcommit and " + "a non-global target queue"); + } } - if (dc_flags & DISPATCH_OBJ_MACH_BARRIER) { - dispatch_assert(dc_flags & DISPATCH_OBJ_CONSUME_BIT); - dc->dc_func = _dispatch_block_async_invoke_and_release_mach_barrier; - } else if (dc_flags & DISPATCH_OBJ_CONSUME_BIT) { - dc->dc_func = _dispatch_block_async_invoke_and_release; + if (tq && dx_type(tq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE) { + // Handle discrepancies between attr and target queue, attributes win + if (overcommit == _dispatch_queue_attr_overcommit_unspecified) { + if (tq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) { + overcommit = _dispatch_queue_attr_overcommit_enabled; + } else { + overcommit = _dispatch_queue_attr_overcommit_disabled; + } + } + if (qos == DISPATCH_QOS_UNSPECIFIED) { + qos = _dispatch_priority_qos(tq->dq_priority); + } + tq = NULL; + } else if (tq && !tq->do_targetq) { + // target is a pthread or runloop root queue, setting QoS or overcommit + // is disallowed + if (overcommit != _dispatch_queue_attr_overcommit_unspecified) { + DISPATCH_CLIENT_CRASH(tq, "Cannot specify an overcommit attribute " + "and use this kind of target queue"); + } } else { - dc->dc_func = _dispatch_block_async_invoke; + if (overcommit == _dispatch_queue_attr_overcommit_unspecified) { + // Serial queues default to overcommit! + overcommit = dqai.dqai_concurrent ? + _dispatch_queue_attr_overcommit_disabled : + _dispatch_queue_attr_overcommit_enabled; + } + } + if (!tq) { + tq = _dispatch_get_root_queue( + qos == DISPATCH_QOS_UNSPECIFIED ? DISPATCH_QOS_DEFAULT : qos, + overcommit == _dispatch_queue_attr_overcommit_enabled)->_as_dq; + if (unlikely(!tq)) { + DISPATCH_CLIENT_CRASH(qos, "Invalid queue attribute"); + } } - flags |= block_flags; - if (block_flags & DISPATCH_BLOCK_HAS_PRIORITY) { - _dispatch_continuation_priority_set(dc, dbpd->dbpd_priority, flags); + // + // Step 2: Initialize the queue + // + + if (legacy) { + // if any of these attributes is specified, use non legacy classes + if (dqai.dqai_inactive || dqai.dqai_autorelease_frequency) { + legacy = false; + } + } + + const void *vtable; + dispatch_queue_flags_t dqf = legacy ? DQF_MUTABLE : 0; + if (dqai.dqai_concurrent) { + vtable = DISPATCH_VTABLE(queue_concurrent); } else { - _dispatch_continuation_priority_set(dc, dc->dc_priority, flags); + vtable = DISPATCH_VTABLE(queue_serial); } - if (block_flags & DISPATCH_BLOCK_BARRIER) { - dc_flags |= DISPATCH_OBJ_BARRIER_BIT; + switch (dqai.dqai_autorelease_frequency) { + case DISPATCH_AUTORELEASE_FREQUENCY_NEVER: + dqf |= DQF_AUTORELEASE_NEVER; + break; + case DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM: + dqf |= DQF_AUTORELEASE_ALWAYS; + break; } - if (block_flags & DISPATCH_BLOCK_HAS_VOUCHER) { - voucher_t v = dbpd->dbpd_voucher; - dc->dc_voucher = v ? _voucher_retain(v) : NULL; - dc_flags |= DISPATCH_OBJ_ENFORCE_VOUCHER; - _dispatch_voucher_debug("continuation[%p] set", dc->dc_voucher, dc); - _dispatch_voucher_ktrace_dc_push(dc); - } else { - _dispatch_continuation_voucher_set(dc, oq, flags); + if (label) { + const char *tmp = _dispatch_strdup_if_mutable(label); + if (tmp != label) { + dqf |= DQF_LABEL_NEEDS_FREE; + label = tmp; + } } - dc_flags |= DISPATCH_OBJ_BLOCK_PRIVATE_DATA_BIT; - dc->dc_flags = dc_flags; -} -#endif // __BLOCKS__ -#pragma mark - -#pragma mark dispatch_barrier_async + dispatch_lane_t dq = _dispatch_object_alloc(vtable, + sizeof(struct dispatch_lane_s)); + _dispatch_queue_init(dq, dqf, dqai.dqai_concurrent ? + DISPATCH_QUEUE_WIDTH_MAX : 1, DISPATCH_QUEUE_ROLE_INNER | + (dqai.dqai_inactive ? DISPATCH_QUEUE_INACTIVE : 0)); -DISPATCH_NOINLINE -static void -_dispatch_async_f_slow(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, pthread_priority_t pp, - dispatch_block_flags_t flags, uintptr_t dc_flags) -{ - dispatch_continuation_t dc = _dispatch_continuation_alloc_from_heap(); - _dispatch_continuation_init_f(dc, dq, ctxt, func, pp, flags, dc_flags); - _dispatch_continuation_async(dq, dc); + dq->dq_label = label; + dq->dq_priority = _dispatch_priority_make((dispatch_qos_t)dqai.dqai_qos, + dqai.dqai_relpri); + if (overcommit == _dispatch_queue_attr_overcommit_enabled) { + dq->dq_priority |= DISPATCH_PRIORITY_FLAG_OVERCOMMIT; + } + if (!dqai.dqai_inactive) { + _dispatch_queue_priority_inherit_from_target(dq, tq); + _dispatch_lane_inherit_wlh_from_target(dq, tq); + } + _dispatch_retain(tq); + dq->do_targetq = tq; + _dispatch_object_debug(dq, "%s", __func__); + return _dispatch_trace_queue_create(dq)._dq; } -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_barrier_async_f2(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, pthread_priority_t pp, - dispatch_block_flags_t flags) +dispatch_queue_t +dispatch_queue_create_with_target(const char *label, dispatch_queue_attr_t dqa, + dispatch_queue_t tq) { - dispatch_continuation_t dc = _dispatch_continuation_alloc_cacheonly(); - uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_BARRIER_BIT; - - if (!fastpath(dc)) { - return _dispatch_async_f_slow(dq, ctxt, func, pp, flags, dc_flags); - } - - _dispatch_continuation_init_f(dc, dq, ctxt, func, pp, flags, dc_flags); - _dispatch_continuation_push(dq, dc); + return _dispatch_lane_create_with_target(label, dqa, tq, false); } -DISPATCH_NOINLINE -void -dispatch_barrier_async_f(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) +dispatch_queue_t +dispatch_queue_create(const char *label, dispatch_queue_attr_t attr) { - _dispatch_barrier_async_f2(dq, ctxt, func, 0, 0); + return _dispatch_lane_create_with_target(label, attr, + DISPATCH_TARGET_QUEUE_DEFAULT, true); } -DISPATCH_NOINLINE -void -_dispatch_barrier_async_detached_f(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) +dispatch_queue_t +dispatch_queue_create_with_accounting_override_voucher(const char *label, + dispatch_queue_attr_t attr, voucher_t voucher) { - dispatch_continuation_t dc = _dispatch_continuation_alloc(); - dc->dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_BARRIER_BIT; - dc->dc_func = func; - dc->dc_ctxt = ctxt; - dc->dc_voucher = DISPATCH_NO_VOUCHER; - dc->dc_priority = DISPATCH_NO_PRIORITY; - dx_push(dq, dc, 0); + (void)label; (void)attr; (void)voucher; + DISPATCH_CLIENT_CRASH(0, "Unsupported interface"); } -#ifdef __BLOCKS__ -void -dispatch_barrier_async(dispatch_queue_t dq, dispatch_block_t work) +DISPATCH_NOINLINE +static void +_dispatch_queue_dispose(dispatch_queue_class_t dqu, bool *allow_free) { - dispatch_continuation_t dc = _dispatch_continuation_alloc(); - uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_BARRIER_BIT; + dispatch_queue_specific_head_t dqsh; + dispatch_queue_t dq = dqu._dq; - _dispatch_continuation_init(dc, dq, work, 0, 0, dc_flags); - _dispatch_continuation_push(dq, dc); -} -#endif + if (dq->dq_label && _dispatch_queue_label_needs_free(dq)) { + free((void*)dq->dq_label); + } + dqsh = os_atomic_xchg2o(dq, dq_specific_head, (void *)0x200, relaxed); + if (dqsh) _dispatch_queue_specific_head_dispose(dqsh); -#pragma mark - -#pragma mark dispatch_async + // fast path for queues that never got their storage retained + if (likely(os_atomic_load2o(dq, dq_sref_cnt, relaxed) == 0)) { + // poison the state with something that is suspended and is easy to spot + dq->dq_state = 0xdead000000000000; + return; + } + + // Take over freeing the memory from _dispatch_object_dealloc() + // + // As soon as we call _dispatch_queue_release_storage(), we forfeit + // the possibility for the caller of dx_dispose() to finalize the object + // so that responsibility is ours. + _dispatch_object_finalize(dq); + *allow_free = false; + dq->dq_label = ""; + dq->do_targetq = NULL; + dq->do_finalizer = NULL; + dq->do_ctxt = NULL; + return _dispatch_queue_release_storage(dq); +} void -_dispatch_async_redirect_invoke(dispatch_continuation_t dc, - dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags) +_dispatch_lane_class_dispose(dispatch_lane_class_t dqu, bool *allow_free) { - dispatch_thread_frame_s dtf; - struct dispatch_continuation_s *other_dc = dc->dc_other; - dispatch_invoke_flags_t ctxt_flags = (dispatch_invoke_flags_t)dc->dc_ctxt; - // if we went through _dispatch_root_queue_push_override, - // the "right" root queue was stuffed into dc_func - dispatch_queue_t assumed_rq = (dispatch_queue_t)dc->dc_func; - dispatch_queue_t dq = dc->dc_data, rq, old_dq; - dispatch_priority_t old_dbp; + dispatch_lane_t dq = dqu._dl; + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + uint64_t initial_state = DISPATCH_QUEUE_STATE_INIT_VALUE(dq->dq_width); - if (ctxt_flags) { - flags &= ~_DISPATCH_INVOKE_AUTORELEASE_MASK; - flags |= ctxt_flags; + if (dx_hastypeflag(dq, QUEUE_ROOT)) { + initial_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE; } - old_dq = _dispatch_get_current_queue(); - if (assumed_rq) { - old_dbp = _dispatch_root_queue_identity_assume(assumed_rq); - _dispatch_set_basepri(dq->dq_priority); - } else { - old_dbp = _dispatch_set_basepri(dq->dq_priority); + dq_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; + dq_state &= ~DISPATCH_QUEUE_DIRTY; + dq_state &= ~DISPATCH_QUEUE_ROLE_MASK; + if (unlikely(dq_state != initial_state)) { + if (_dq_state_drain_locked(dq_state)) { + DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, + "Release of a locked queue"); + } +#ifndef __LP64__ + dq_state >>= 32; +#endif + DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, + "Release of a queue with corrupt state"); } - _dispatch_thread_frame_push(&dtf, dq); - _dispatch_continuation_pop_forwarded(dc, DISPATCH_NO_VOUCHER, - DISPATCH_OBJ_CONSUME_BIT, { - _dispatch_continuation_pop(other_dc, dic, flags, dq); - }); - _dispatch_thread_frame_pop(&dtf); - if (assumed_rq) _dispatch_queue_set_current(old_dq); - _dispatch_reset_basepri(old_dbp); - - rq = dq->do_targetq; - while (slowpath(rq->do_targetq) && rq != old_dq) { - _dispatch_queue_non_barrier_complete(rq); - rq = rq->do_targetq; + if (unlikely(dq->dq_items_tail)) { + DISPATCH_CLIENT_CRASH(dq->dq_items_tail, + "Release of a queue while items are enqueued"); } + dq->dq_items_head = (void *)0x200; + dq->dq_items_tail = (void *)0x200; - _dispatch_queue_non_barrier_complete(dq); - _dispatch_release_tailcall(dq); // pairs with _dispatch_async_redirect_wrap + _dispatch_queue_dispose(dqu, allow_free); } -DISPATCH_ALWAYS_INLINE -static inline dispatch_continuation_t -_dispatch_async_redirect_wrap(dispatch_queue_t dq, dispatch_object_t dou) +void +_dispatch_lane_dispose(dispatch_lane_t dq, bool *allow_free) { - dispatch_continuation_t dc = _dispatch_continuation_alloc(); + _dispatch_object_debug(dq, "%s", __func__); + _dispatch_trace_queue_dispose(dq); + _dispatch_lane_class_dispose(dq, allow_free); +} - dou._do->do_next = NULL; - dc->do_vtable = DC_VTABLE(ASYNC_REDIRECT); - dc->dc_func = NULL; - dc->dc_ctxt = (void *)(uintptr_t)_dispatch_queue_autorelease_frequency(dq); - dc->dc_data = dq; - dc->dc_other = dou._do; - dc->dc_voucher = DISPATCH_NO_VOUCHER; - dc->dc_priority = DISPATCH_NO_PRIORITY; - _dispatch_retain(dq); // released in _dispatch_async_redirect_invoke - return dc; +void +_dispatch_queue_xref_dispose(dispatch_queue_t dq) +{ + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + if (unlikely(_dq_state_is_suspended(dq_state))) { + long state = (long)dq_state; + if (sizeof(long) < sizeof(uint64_t)) state = (long)(dq_state >> 32); + if (unlikely(_dq_state_is_inactive(dq_state))) { + // Arguments for and against this assert are within 6705399 + DISPATCH_CLIENT_CRASH(state, "Release of an inactive object"); + } + DISPATCH_CLIENT_CRASH(dq_state, "Release of a suspended object"); + } + os_atomic_or2o(dq, dq_atomic_flags, DQF_RELEASED, relaxed); } DISPATCH_NOINLINE static void -_dispatch_async_f_redirect(dispatch_queue_t dq, - dispatch_object_t dou, dispatch_qos_t qos) +_dispatch_lane_suspend_slow(dispatch_lane_t dq) { - if (!slowpath(_dispatch_object_is_redirection(dou))) { - dou._dc = _dispatch_async_redirect_wrap(dq, dou); - } - dq = dq->do_targetq; + uint64_t old_state, new_state, delta; - // Find the queue to redirect to - while (slowpath(DISPATCH_QUEUE_USES_REDIRECTION(dq->dq_width))) { - if (!fastpath(_dispatch_queue_try_acquire_async(dq))) { - break; - } - if (!dou._dc->dc_ctxt) { - // find first queue in descending target queue order that has - // an autorelease frequency set, and use that as the frequency for - // this continuation. - dou._dc->dc_ctxt = (void *) - (uintptr_t)_dispatch_queue_autorelease_frequency(dq); - } - dq = dq->do_targetq; - } - - dx_push(dq, dou, qos); -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_continuation_redirect(dispatch_queue_t dq, - struct dispatch_object_s *dc) -{ - _dispatch_trace_continuation_pop(dq, dc); - // This is a re-redirect, overrides have already been applied - // by _dispatch_async_f2. - // However we want to end up on the root queue matching `dc` qos, so pick up - // the current override of `dq` which includes dc's overrde (and maybe more) - uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); - _dispatch_async_f_redirect(dq, dc, _dq_state_max_qos(dq_state)); - _dispatch_introspection_queue_item_complete(dc); -} + _dispatch_queue_sidelock_lock(dq); -DISPATCH_NOINLINE -static void -_dispatch_async_f2(dispatch_queue_t dq, dispatch_continuation_t dc) -{ - // reserving non barrier width - // doesn't fail if only the ENQUEUED bit is set (unlike its barrier width - // equivalent), so we have to check that this thread hasn't enqueued - // anything ahead of this call or we can break ordering - if (slowpath(dq->dq_items_tail)) { - return _dispatch_continuation_push(dq, dc); + // what we want to transfer (remove from dq_state) + delta = DISPATCH_QUEUE_SUSPEND_HALF * DISPATCH_QUEUE_SUSPEND_INTERVAL; + // but this is a suspend so add a suspend count at the same time + delta -= DISPATCH_QUEUE_SUSPEND_INTERVAL; + if (dq->dq_side_suspend_cnt == 0) { + // we substract delta from dq_state, and we want to set this bit + delta -= DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT; } - if (slowpath(!_dispatch_queue_try_acquire_async(dq))) { - return _dispatch_continuation_push(dq, dc); + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + // unsigned underflow of the substraction can happen because other + // threads could have touched this value while we were trying to acquire + // the lock, or because another thread raced us to do the same operation + // and got to the lock first. + if (unlikely(os_sub_overflow(old_state, delta, &new_state))) { + os_atomic_rmw_loop_give_up(goto retry); + } + }); + if (unlikely(os_add_overflow(dq->dq_side_suspend_cnt, + DISPATCH_QUEUE_SUSPEND_HALF, &dq->dq_side_suspend_cnt))) { + DISPATCH_CLIENT_CRASH(0, "Too many nested calls to dispatch_suspend()"); } + return _dispatch_queue_sidelock_unlock(dq); - return _dispatch_async_f_redirect(dq, dc, - _dispatch_continuation_override_qos(dq, dc)); +retry: + _dispatch_queue_sidelock_unlock(dq); + return _dispatch_lane_suspend(dq); } -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_async_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func, - pthread_priority_t pp, dispatch_block_flags_t flags) +void +_dispatch_lane_suspend(dispatch_lane_t dq) { - dispatch_continuation_t dc = _dispatch_continuation_alloc_cacheonly(); - uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; - - if (!fastpath(dc)) { - return _dispatch_async_f_slow(dq, ctxt, func, pp, flags, dc_flags); - } + uint64_t old_state, new_state; - _dispatch_continuation_init_f(dc, dq, ctxt, func, pp, flags, dc_flags); - _dispatch_continuation_async2(dq, dc, false); -} + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + new_state = DISPATCH_QUEUE_SUSPEND_INTERVAL; + if (unlikely(os_add_overflow(old_state, new_state, &new_state))) { + os_atomic_rmw_loop_give_up({ + return _dispatch_lane_suspend_slow(dq); + }); + } + }); -DISPATCH_NOINLINE -void -dispatch_async_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) -{ - _dispatch_async_f(dq, ctxt, func, 0, 0); + if (!_dq_state_is_suspended(old_state)) { + // rdar://8181908 we need to extend the queue life for the duration + // of the call to wakeup at _dispatch_lane_resume() time. + _dispatch_retain_2(dq); + } } DISPATCH_NOINLINE -void -dispatch_async_enforce_qos_class_f(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) -{ - _dispatch_async_f(dq, ctxt, func, 0, DISPATCH_BLOCK_ENFORCE_QOS_CLASS); -} - -#ifdef __BLOCKS__ -void -dispatch_async(dispatch_queue_t dq, dispatch_block_t work) +static void +_dispatch_lane_resume_slow(dispatch_lane_t dq) { - dispatch_continuation_t dc = _dispatch_continuation_alloc(); - uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT; + uint64_t old_state, new_state, delta; - _dispatch_continuation_init(dc, dq, work, 0, 0, dc_flags); - _dispatch_continuation_async(dq, dc); -} -#endif + _dispatch_queue_sidelock_lock(dq); -#pragma mark - -#pragma mark dispatch_group_async + // what we want to transfer + delta = DISPATCH_QUEUE_SUSPEND_HALF * DISPATCH_QUEUE_SUSPEND_INTERVAL; + // but this is a resume so consume a suspend count at the same time + delta -= DISPATCH_QUEUE_SUSPEND_INTERVAL; + switch (dq->dq_side_suspend_cnt) { + case 0: + goto retry; + case DISPATCH_QUEUE_SUSPEND_HALF: + // we will transition the side count to 0, so we want to clear this bit + delta -= DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT; + break; + } + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + // unsigned overflow of the addition can happen because other + // threads could have touched this value while we were trying to acquire + // the lock, or because another thread raced us to do the same operation + // and got to the lock first. + if (unlikely(os_add_overflow(old_state, delta, &new_state))) { + os_atomic_rmw_loop_give_up(goto retry); + } + }); + dq->dq_side_suspend_cnt -= DISPATCH_QUEUE_SUSPEND_HALF; + return _dispatch_queue_sidelock_unlock(dq); -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_continuation_group_async(dispatch_group_t dg, dispatch_queue_t dq, - dispatch_continuation_t dc) -{ - dispatch_group_enter(dg); - dc->dc_data = dg; - _dispatch_continuation_async(dq, dc); +retry: + _dispatch_queue_sidelock_unlock(dq); + return _dispatch_lane_resume(dq, false); } DISPATCH_NOINLINE -void -dispatch_group_async_f(dispatch_group_t dg, dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) +static void +_dispatch_lane_resume_activate(dispatch_lane_t dq) { - dispatch_continuation_t dc = _dispatch_continuation_alloc(); - uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_GROUP_BIT; - - _dispatch_continuation_init_f(dc, dq, ctxt, func, 0, 0, dc_flags); - _dispatch_continuation_group_async(dg, dq, dc); + bool allow_resume = true; + // Step 2: run the activation finalizer + if (dx_vtable(dq)->dq_activate) { + dx_vtable(dq)->dq_activate(dq, &allow_resume); + } + // Step 3: consume the suspend count + if (allow_resume) { + return _dispatch_lane_resume(dq, false); + } } -#ifdef __BLOCKS__ void -dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq, - dispatch_block_t db) +_dispatch_lane_resume(dispatch_lane_t dq, bool activate) { - dispatch_continuation_t dc = _dispatch_continuation_alloc(); - uintptr_t dc_flags = DISPATCH_OBJ_CONSUME_BIT | DISPATCH_OBJ_GROUP_BIT; - - _dispatch_continuation_init(dc, dq, db, 0, 0, dc_flags); - _dispatch_continuation_group_async(dg, dq, dc); -} -#endif - -#pragma mark - -#pragma mark _dispatch_sync_invoke / _dispatch_sync_complete + // covers all suspend and inactive bits, including side suspend bit + const uint64_t suspend_bits = DISPATCH_QUEUE_SUSPEND_BITS_MASK; + uint64_t pending_barrier_width = + (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL; + uint64_t set_owner_and_set_full_width_and_in_barrier = + _dispatch_lock_value_for_self() | DISPATCH_QUEUE_WIDTH_FULL_BIT | + DISPATCH_QUEUE_IN_BARRIER; -DISPATCH_NOINLINE -static void -_dispatch_queue_non_barrier_complete(dispatch_queue_t dq) -{ - uint64_t old_state, new_state, owner_self = _dispatch_lock_value_for_self(); + // backward compatibility: only dispatch sources can abuse + // dispatch_resume() to really mean dispatch_activate() + bool is_source = (dx_metatype(dq) == _DISPATCH_SOURCE_TYPE); + uint64_t old_state, new_state; - // see _dispatch_queue_resume() - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { - new_state = old_state - DISPATCH_QUEUE_WIDTH_INTERVAL; - if (unlikely(_dq_state_drain_locked(old_state))) { - // make drain_try_unlock() fail and reconsider whether there's - // enough width now for a new item - new_state |= DISPATCH_QUEUE_DIRTY; - } else if (likely(_dq_state_is_runnable(new_state))) { - uint64_t full_width = new_state; - if (_dq_state_has_pending_barrier(old_state)) { - full_width -= DISPATCH_QUEUE_PENDING_BARRIER; - full_width += DISPATCH_QUEUE_WIDTH_INTERVAL; - full_width += DISPATCH_QUEUE_IN_BARRIER; + // Activation is a bit tricky as it needs to finalize before the wakeup. + // + // If after doing its updates to the suspend count and/or inactive bit, + // the last suspension related bit that would remain is the + // NEEDS_ACTIVATION one, then this function: + // + // 1. moves the state to { sc:1 i:0 na:0 } (converts the needs-activate into + // a suspend count) + // 2. runs the activation finalizer + // 3. consumes the suspend count set in (1), and finishes the resume flow + // + // Concurrently, some property setters such as setting dispatch source + // handlers or _dispatch_lane_set_target_queue try to do in-place changes + // before activation. These protect their action by taking a suspend count. + // Step (1) above cannot happen if such a setter has locked the object. + if (activate) { + // relaxed atomic because this doesn't publish anything, this is only + // about picking the thread that gets to finalize the activation + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + if ((old_state & suspend_bits) == + DISPATCH_QUEUE_NEEDS_ACTIVATION + DISPATCH_QUEUE_INACTIVE) { + // { sc:0 i:1 na:1 } -> { sc:1 i:0 na:0 } + new_state = old_state - DISPATCH_QUEUE_INACTIVE + - DISPATCH_QUEUE_NEEDS_ACTIVATION + + DISPATCH_QUEUE_SUSPEND_INTERVAL; + } else if (_dq_state_is_inactive(old_state)) { + // { sc:>0 i:1 na:1 } -> { i:0 na:1 } + // simple activation because sc is not 0 + // resume will deal with na:1 later + new_state = old_state - DISPATCH_QUEUE_INACTIVE; } else { - full_width += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; - full_width += DISPATCH_QUEUE_IN_BARRIER; + // object already active, this is a no-op, just exit + os_atomic_rmw_loop_give_up(return); } - if ((full_width & DISPATCH_QUEUE_WIDTH_MASK) == - DISPATCH_QUEUE_WIDTH_FULL_BIT) { - new_state = full_width; - new_state &= ~DISPATCH_QUEUE_DIRTY; - new_state |= owner_self; - } else if (_dq_state_is_dirty(old_state)) { - new_state |= DISPATCH_QUEUE_ENQUEUED; + }); + } else { + // release barrier needed to publish the effect of + // - dispatch_set_target_queue() + // - dispatch_set_*_handler() + // - dq_activate() + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + if ((old_state & suspend_bits) == DISPATCH_QUEUE_SUSPEND_INTERVAL + + DISPATCH_QUEUE_NEEDS_ACTIVATION) { + // { sc:1 i:0 na:1 } -> { sc:1 i:0 na:0 } + new_state = old_state - DISPATCH_QUEUE_NEEDS_ACTIVATION; + } else if (is_source && (old_state & suspend_bits) == + DISPATCH_QUEUE_NEEDS_ACTIVATION + DISPATCH_QUEUE_INACTIVE) { + // { sc:0 i:1 na:1 } -> { sc:1 i:0 na:0 } + new_state = old_state - DISPATCH_QUEUE_INACTIVE + - DISPATCH_QUEUE_NEEDS_ACTIVATION + + DISPATCH_QUEUE_SUSPEND_INTERVAL; + } else if (unlikely(os_sub_overflow(old_state, + DISPATCH_QUEUE_SUSPEND_INTERVAL, &new_state))) { + // underflow means over-resume or a suspend count transfer + // to the side count is needed + os_atomic_rmw_loop_give_up({ + if (!(old_state & DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT)) { + goto over_resume; + } + return _dispatch_lane_resume_slow(dq); + }); + // + // below this, new_state = old_state - DISPATCH_QUEUE_SUSPEND_INTERVAL + // + } else if (!_dq_state_is_runnable(new_state)) { + // Out of width or still suspended. + // For the former, force _dispatch_lane_non_barrier_complete + // to reconsider whether it has work to do + new_state |= DISPATCH_QUEUE_DIRTY; + } else if (_dq_state_drain_locked(new_state)) { + // still locked by someone else, make drain_try_unlock() fail + // and reconsider whether it has work to do + new_state |= DISPATCH_QUEUE_DIRTY; + } else if (!is_source && (_dq_state_has_pending_barrier(new_state) || + new_state + pending_barrier_width < + DISPATCH_QUEUE_WIDTH_FULL_BIT)) { + // if we can, acquire the full width drain lock + // and then perform a lock transfer + // + // However this is never useful for a source where there are no + // sync waiters, so never take the lock and do a plain wakeup + new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK; + new_state |= set_owner_and_set_full_width_and_in_barrier; + } else { + // clear overrides and force a wakeup + new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; + new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; } + }); + } + + if ((old_state ^ new_state) & DISPATCH_QUEUE_NEEDS_ACTIVATION) { + // we cleared the NEEDS_ACTIVATION bit and we have a valid suspend count + return _dispatch_lane_resume_activate(dq); + } + + if (activate) { + // if we're still in an activate codepath here we should have + // { sc:>0 na:1 }, if not we've got a corrupt state + if (unlikely(!_dq_state_is_suspended(new_state))) { + DISPATCH_CLIENT_CRASH(dq, "Invalid suspension state"); } - }); + return; + } + + if (_dq_state_is_suspended(new_state)) { + return; + } + if (_dq_state_is_dirty(old_state)) { + // + // dependency ordering for dq state changes that were flushed + // and not acted upon + os_atomic_thread_fence(dependency); + dq = os_atomic_force_dependency_on(dq, old_state); + } + // Balancing the retain_2 done in suspend() for rdar://8181908 + dispatch_wakeup_flags_t flags = DISPATCH_WAKEUP_CONSUME_2; if ((old_state ^ new_state) & DISPATCH_QUEUE_IN_BARRIER) { - if (_dq_state_is_dirty(old_state)) { - // - // dependency ordering for dq state changes that were flushed - // and not acted upon - os_atomic_thread_fence(dependency); - dq = os_atomic_force_dependency_on(dq, old_state); + flags |= DISPATCH_WAKEUP_BARRIER_COMPLETE; + } else if (!_dq_state_is_runnable(new_state)) { + if (_dq_state_is_base_wlh(old_state)) { + _dispatch_event_loop_assert_not_owned((dispatch_wlh_t)dq); } - return _dispatch_queue_barrier_complete(dq, 0, 0); + return _dispatch_release_2(dq); } + dispatch_assert(!_dq_state_received_sync_wait(old_state)); + dispatch_assert(!_dq_state_in_sync_transfer(old_state)); + return dx_wakeup(dq, _dq_state_max_qos(old_state), flags); - if ((old_state ^ new_state) & DISPATCH_QUEUE_ENQUEUED) { - _dispatch_retain_2(dq); - dispatch_assert(!_dq_state_is_base_wlh(new_state)); - return dx_push(dq->do_targetq, dq, _dq_state_max_qos(new_state)); +over_resume: + if (unlikely(_dq_state_is_inactive(old_state))) { + DISPATCH_CLIENT_CRASH(dq, "Over-resume of an inactive object"); } + DISPATCH_CLIENT_CRASH(dq, "Over-resume of an object"); } +const char * +dispatch_queue_get_label(dispatch_queue_t dq) +{ + if (unlikely(dq == DISPATCH_CURRENT_QUEUE_LABEL)) { + dq = _dispatch_queue_get_current_or_default(); + } + return dq->dq_label ? dq->dq_label : ""; +} -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_sync_function_invoke_inline(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) +qos_class_t +dispatch_queue_get_qos_class(dispatch_queue_t dq, int *relpri_ptr) { - dispatch_thread_frame_s dtf; - _dispatch_thread_frame_push(&dtf, dq); - _dispatch_client_callout(ctxt, func); - _dispatch_perfmon_workitem_inc(); - _dispatch_thread_frame_pop(&dtf); + dispatch_priority_t pri = dq->dq_priority; + dispatch_qos_t qos = _dispatch_priority_qos(pri); + if (relpri_ptr) { + *relpri_ptr = qos ? _dispatch_priority_relpri(dq->dq_priority) : 0; + } + return _dispatch_qos_to_qos_class(qos); } -DISPATCH_NOINLINE static void -_dispatch_sync_function_invoke(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) +_dispatch_lane_set_width(void *ctxt) { - _dispatch_sync_function_invoke_inline(dq, ctxt, func); -} + int w = (int)(intptr_t)ctxt; // intentional truncation + uint32_t tmp; + dispatch_lane_t dq = upcast(_dispatch_queue_get_current())._dl; -DISPATCH_NOINLINE -static void -_dispatch_sync_complete_recurse(dispatch_queue_t dq, dispatch_queue_t stop_dq, - uintptr_t dc_flags) -{ - bool barrier = (dc_flags & DISPATCH_OBJ_BARRIER_BIT); - do { - if (dq == stop_dq) return; - if (barrier) { - _dispatch_queue_barrier_complete(dq, 0, 0); - } else { - _dispatch_queue_non_barrier_complete(dq); + if (w >= 0) { + tmp = w ? (unsigned int)w : 1; + } else { + dispatch_qos_t qos = _dispatch_qos_from_pp(_dispatch_get_priority()); + switch (w) { + case DISPATCH_QUEUE_WIDTH_MAX_PHYSICAL_CPUS: + tmp = _dispatch_qos_max_parallelism(qos, + DISPATCH_MAX_PARALLELISM_PHYSICAL); + break; + case DISPATCH_QUEUE_WIDTH_ACTIVE_CPUS: + tmp = _dispatch_qos_max_parallelism(qos, + DISPATCH_MAX_PARALLELISM_ACTIVE); + break; + case DISPATCH_QUEUE_WIDTH_MAX_LOGICAL_CPUS: + default: + tmp = _dispatch_qos_max_parallelism(qos, 0); + break; } - dq = dq->do_targetq; - barrier = (dq->dq_width == 1); - } while (unlikely(dq->do_targetq)); -} - -DISPATCH_NOINLINE -static void -_dispatch_sync_invoke_and_complete_recurse(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, uintptr_t dc_flags) -{ - _dispatch_sync_function_invoke_inline(dq, ctxt, func); - _dispatch_sync_complete_recurse(dq, NULL, dc_flags); -} + } + if (tmp > DISPATCH_QUEUE_WIDTH_MAX) { + tmp = DISPATCH_QUEUE_WIDTH_MAX; + } -DISPATCH_NOINLINE -static void -_dispatch_sync_invoke_and_complete(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) -{ - _dispatch_sync_function_invoke_inline(dq, ctxt, func); - _dispatch_queue_non_barrier_complete(dq); + dispatch_queue_flags_t old_dqf, new_dqf; + os_atomic_rmw_loop2o(dq, dq_atomic_flags, old_dqf, new_dqf, relaxed, { + new_dqf = (old_dqf & DQF_FLAGS_MASK) | DQF_WIDTH(tmp); + }); + _dispatch_lane_inherit_wlh_from_target(dq, dq->do_targetq); + _dispatch_object_debug(dq, "%s", __func__); } -/* - * For queues we can cheat and inline the unlock code, which is invalid - * for objects with a more complex state machine (sources or mach channels) - */ -DISPATCH_NOINLINE -static void -_dispatch_queue_barrier_sync_invoke_and_complete(dispatch_queue_t dq, - void *ctxt, dispatch_function_t func) +void +dispatch_queue_set_width(dispatch_queue_t dq, long width) { - _dispatch_sync_function_invoke_inline(dq, ctxt, func); - if (unlikely(dq->dq_items_tail || dq->dq_width > 1)) { - return _dispatch_queue_barrier_complete(dq, 0, 0); + unsigned long type = dx_type(dq); + if (unlikely(dx_metatype(dq) != _DISPATCH_LANE_TYPE)) { + DISPATCH_CLIENT_CRASH(type, "Unexpected dispatch object type"); + } else if (unlikely(type != DISPATCH_QUEUE_CONCURRENT_TYPE)) { + DISPATCH_CLIENT_CRASH(type, "Cannot set width of a serial queue"); } - // Presence of any of these bits requires more work that only - // _dispatch_queue_barrier_complete() handles properly - // - // Note: testing for RECEIVED_OVERRIDE or RECEIVED_SYNC_WAIT without - // checking the role is sloppy, but is a super fast check, and neither of - // these bits should be set if the lock was never contended/discovered. - const uint64_t fail_unlock_mask = DISPATCH_QUEUE_SUSPEND_BITS_MASK | - DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_DIRTY | - DISPATCH_QUEUE_RECEIVED_OVERRIDE | DISPATCH_QUEUE_SYNC_TRANSFER | - DISPATCH_QUEUE_RECEIVED_SYNC_WAIT; - uint64_t old_state, new_state; - - // similar to _dispatch_queue_drain_try_unlock - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { - new_state = old_state - DISPATCH_QUEUE_SERIAL_DRAIN_OWNED; - new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; - new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; - if (unlikely(old_state & fail_unlock_mask)) { - os_atomic_rmw_loop_give_up({ - return _dispatch_queue_barrier_complete(dq, 0, 0); - }); - } - }); - if (_dq_state_is_base_wlh(old_state)) { - _dispatch_event_loop_assert_not_owned((dispatch_wlh_t)dq); + if (likely((int)width >= 0)) { + dispatch_lane_t dl = upcast(dq)._dl; + _dispatch_barrier_trysync_or_async_f(dl, (void*)(intptr_t)width, + _dispatch_lane_set_width, DISPATCH_BARRIER_TRYSYNC_SUSPEND); + } else { + // The negative width constants need to execute on the queue to + // query the queue QoS + _dispatch_barrier_async_detached_f(dq, (void*)(intptr_t)width, + _dispatch_lane_set_width); } } -#pragma mark - -#pragma mark _dispatch_sync_wait / _dispatch_sync_waiter_wake - -#define DISPATCH_SYNC_WAITER_NO_UNLOCK (~0ull) - -DISPATCH_NOINLINE static void -_dispatch_sync_waiter_wake(dispatch_sync_context_t dsc, - dispatch_wlh_t wlh, uint64_t old_state, uint64_t new_state) +_dispatch_lane_legacy_set_target_queue(void *ctxt) { - dispatch_wlh_t waiter_wlh = dsc->dc_data; + dispatch_lane_t dq = upcast(_dispatch_queue_get_current())._dl; + dispatch_queue_t tq = ctxt; + dispatch_queue_t otq = dq->do_targetq; - if (_dq_state_in_sync_transfer(old_state) || - _dq_state_in_sync_transfer(new_state) || - (waiter_wlh != DISPATCH_WLH_ANON)) { - _dispatch_event_loop_wake_owner(dsc, wlh, old_state, new_state); - } - if (waiter_wlh == DISPATCH_WLH_ANON) { - if (dsc->dsc_override_qos > dsc->dsc_override_qos_floor) { - _dispatch_wqthread_override_start(dsc->dsc_waiter, - dsc->dsc_override_qos); - } - _dispatch_thread_event_signal(&dsc->dsc_event); + if (_dispatch_queue_atomic_flags(dq) & DQF_TARGETED) { +#if DISPATCH_ALLOW_NON_LEAF_RETARGET + _dispatch_ktrace3(DISPATCH_PERF_non_leaf_retarget, dq, otq, tq); + _dispatch_bug_deprecated("Changing the target of a queue " + "already targeted by other dispatch objects"); +#else + DISPATCH_CLIENT_CRASH(0, "Cannot change the target of a queue " + "already targeted by other dispatch objects"); +#endif } - _dispatch_introspection_queue_item_complete(dsc->_as_dc); + + tq = _dispatch_queue_priority_inherit_from_target(dq, tq); + _dispatch_lane_inherit_wlh_from_target(dq, tq); +#if HAVE_PTHREAD_WORKQUEUE_QOS + // see _dispatch_queue_wakeup() + _dispatch_queue_sidelock_lock(dq); +#endif + dq->do_targetq = tq; +#if HAVE_PTHREAD_WORKQUEUE_QOS + // see _dispatch_queue_wakeup() + _dispatch_queue_sidelock_unlock(dq); +#endif + + _dispatch_object_debug(dq, "%s", __func__); + _dispatch_introspection_target_queue_changed(dq->_as_dq); + _dispatch_release_tailcall(otq); } -DISPATCH_NOINLINE -static void -_dispatch_sync_waiter_redirect_or_wake(dispatch_queue_t dq, uint64_t owned, - dispatch_object_t dou) +void +_dispatch_lane_set_target_queue(dispatch_lane_t dq, dispatch_queue_t tq) { - dispatch_sync_context_t dsc = (dispatch_sync_context_t)dou._dc; - uint64_t next_owner = 0, old_state, new_state; - dispatch_wlh_t wlh = NULL; + if (tq == DISPATCH_TARGET_QUEUE_DEFAULT) { + bool overcommit = (dq->dq_width == 1); + tq = _dispatch_get_default_queue(overcommit); + } - _dispatch_trace_continuation_pop(dq, dsc->_as_dc); + if (_dispatch_lane_try_inactive_suspend(dq)) { + _dispatch_object_set_target_queue_inline(dq, tq); + return _dispatch_lane_resume(dq, false); + } - if (owned == DISPATCH_SYNC_WAITER_NO_UNLOCK) { - dispatch_assert(!(dsc->dc_flags & DISPATCH_OBJ_BARRIER_BIT)); - new_state = old_state = os_atomic_load2o(dq, dq_state, relaxed); - } else { - if (dsc->dc_flags & DISPATCH_OBJ_BARRIER_BIT) { - next_owner = _dispatch_lock_value_from_tid(dsc->dsc_waiter); - } - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { - new_state = old_state - owned; - new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; - new_state &= ~DISPATCH_QUEUE_DIRTY; - new_state |= next_owner; - if (_dq_state_is_base_wlh(old_state)) { - new_state |= DISPATCH_QUEUE_SYNC_TRANSFER; - } - }); - if (_dq_state_is_base_wlh(old_state)) { - wlh = (dispatch_wlh_t)dq; - } else if (_dq_state_received_override(old_state)) { - // Ensure that the root queue sees that this thread was overridden. - _dispatch_set_basepri_override_qos(_dq_state_max_qos(old_state)); - } +#if !DISPATCH_ALLOW_NON_LEAF_RETARGET + if (_dispatch_queue_atomic_flags(dq) & DQF_TARGETED) { + DISPATCH_CLIENT_CRASH(0, "Cannot change the target of a queue " + "already targeted by other dispatch objects"); } +#endif - if (dsc->dc_data == DISPATCH_WLH_ANON) { - if (dsc->dsc_override_qos < _dq_state_max_qos(old_state)) { - dsc->dsc_override_qos = _dq_state_max_qos(old_state); + if (unlikely(!_dispatch_queue_is_mutable(dq))) { +#if DISPATCH_ALLOW_NON_LEAF_RETARGET + if (_dispatch_queue_atomic_flags(dq) & DQF_TARGETED) { + DISPATCH_CLIENT_CRASH(0, "Cannot change the target of a queue " + "already targeted by other dispatch objects"); } +#endif + DISPATCH_CLIENT_CRASH(0, "Cannot change the target of this object " + "after it has been activated"); } - if (unlikely(_dq_state_is_inner_queue(old_state))) { - dispatch_queue_t tq = dq->do_targetq; - if (likely(tq->dq_width == 1)) { - dsc->dc_flags = DISPATCH_OBJ_BARRIER_BIT | - DISPATCH_OBJ_SYNC_WAITER_BIT; - } else { - dsc->dc_flags = DISPATCH_OBJ_SYNC_WAITER_BIT; + unsigned long metatype = dx_metatype(dq); + switch (metatype) { + case _DISPATCH_LANE_TYPE: +#if DISPATCH_ALLOW_NON_LEAF_RETARGET + if (_dispatch_queue_atomic_flags(dq) & DQF_TARGETED) { + _dispatch_bug_deprecated("Changing the target of a queue " + "already targeted by other dispatch objects"); } - _dispatch_introspection_queue_item_complete(dsc->_as_dc); - return _dispatch_queue_push_sync_waiter(tq, dsc, 0); +#endif + break; + case _DISPATCH_SOURCE_TYPE: + _dispatch_ktrace1(DISPATCH_PERF_post_activate_retarget, dq); + _dispatch_bug_deprecated("Changing the target of a source " + "after it has been activated"); + break; + default: + DISPATCH_CLIENT_CRASH(metatype, "Unexpected dispatch object type"); } - return _dispatch_sync_waiter_wake(dsc, wlh, old_state, new_state); + _dispatch_retain(tq); + return _dispatch_barrier_trysync_or_async_f(dq, tq, + _dispatch_lane_legacy_set_target_queue, + DISPATCH_BARRIER_TRYSYNC_SUSPEND); } -DISPATCH_NOINLINE -static void -_dispatch_queue_class_barrier_complete(dispatch_queue_t dq, dispatch_qos_t qos, - dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target, - uint64_t owned) +#pragma mark - +#pragma mark _dispatch_queue_debug + +size_t +_dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf, size_t bufsiz) { - uint64_t old_state, new_state, enqueue; - dispatch_queue_t tq; + size_t offset = 0; + dispatch_queue_t target = dq->do_targetq; + const char *tlabel = target && target->dq_label ? target->dq_label : ""; + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); - if (target == DISPATCH_QUEUE_WAKEUP_MGR) { - tq = &_dispatch_mgr_q; - enqueue = DISPATCH_QUEUE_ENQUEUED_ON_MGR; - } else if (target) { - tq = (target == DISPATCH_QUEUE_WAKEUP_TARGET) ? dq->do_targetq : target; - enqueue = DISPATCH_QUEUE_ENQUEUED; - } else { - tq = NULL; - enqueue = 0; + offset += dsnprintf(&buf[offset], bufsiz - offset, "sref = %d, " + "target = %s[%p], width = 0x%x, state = 0x%016llx", + dq->dq_sref_cnt + 1, tlabel, target, dq->dq_width, + (unsigned long long)dq_state); + if (_dq_state_is_suspended(dq_state)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", suspended = %d", + _dq_state_suspend_cnt(dq_state)); + } + if (_dq_state_is_inactive(dq_state)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", inactive"); + } else if (_dq_state_needs_activation(dq_state)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", needs-activation"); + } + if (_dq_state_is_enqueued(dq_state)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", enqueued"); + } + if (_dq_state_is_dirty(dq_state)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", dirty"); + } + dispatch_qos_t qos = _dq_state_max_qos(dq_state); + if (qos) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", max qos %d", qos); + } + mach_port_t owner = _dq_state_drain_owner(dq_state); + if (!_dispatch_queue_is_thread_bound(dq) && owner) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", draining on 0x%x", + owner); + } + if (_dq_state_is_in_barrier(dq_state)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", in-barrier"); + } else { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", in-flight = %d", + _dq_state_used_width(dq_state, dq->dq_width)); + } + if (_dq_state_has_pending_barrier(dq_state)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", pending-barrier"); + } + if (_dispatch_queue_is_thread_bound(dq)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", thread = 0x%x ", + owner); } + return offset; +} - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { - new_state = _dq_state_merge_qos(old_state - owned, qos); - new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; - if (unlikely(_dq_state_is_suspended(old_state))) { - if (likely(_dq_state_is_base_wlh(old_state))) { - new_state &= ~DISPATCH_QUEUE_ENQUEUED; - } - } else if (enqueue) { - if (!_dq_state_is_enqueued(old_state)) { - new_state |= enqueue; - } - } else if (unlikely(_dq_state_is_dirty(old_state))) { - os_atomic_rmw_loop_give_up({ - // just renew the drain lock with an acquire barrier, to see - // what the enqueuer that set DIRTY has done. - // the xor generates better assembly as DISPATCH_QUEUE_DIRTY - // is already in a register - os_atomic_xor2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, acquire); - flags |= DISPATCH_WAKEUP_BARRIER_COMPLETE; - return dx_wakeup(dq, qos, flags); - }); - } else if (likely(_dq_state_is_base_wlh(old_state))) { - new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; - new_state &= ~DISPATCH_QUEUE_ENQUEUED; - } else { - new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; - } - }); - old_state -= owned; - dispatch_assert(_dq_state_drain_locked_by_self(old_state)); - dispatch_assert(!_dq_state_is_enqueued_on_manager(old_state)); +size_t +_dispatch_queue_debug(dispatch_queue_t dq, char* buf, size_t bufsiz) +{ + size_t offset = 0; + offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", + dq->dq_label ? dq->dq_label : _dispatch_object_class_name(dq), dq); + offset += _dispatch_object_debug_attr(dq, &buf[offset], bufsiz - offset); + offset += _dispatch_queue_debug_attr(dq, &buf[offset], bufsiz - offset); + offset += dsnprintf(&buf[offset], bufsiz - offset, "}"); + return offset; +} +#if DISPATCH_PERF_MON - if (_dq_state_received_override(old_state)) { - // Ensure that the root queue sees that this thread was overridden. - _dispatch_set_basepri_override_qos(_dq_state_max_qos(old_state)); - } +#define DISPATCH_PERF_MON_BUCKETS 8 - if (tq) { - if (likely((old_state ^ new_state) & enqueue)) { - dispatch_assert(_dq_state_is_enqueued(new_state)); - dispatch_assert(flags & DISPATCH_WAKEUP_CONSUME_2); - return _dispatch_queue_push_queue(tq, dq, new_state); - } -#if HAVE_PTHREAD_WORKQUEUE_QOS - // when doing sync to async handoff - // if the queue received an override we have to forecefully redrive - // the same override so that a new stealer is enqueued because - // the previous one may be gone already - if (_dq_state_should_override(new_state)) { - return _dispatch_queue_class_wakeup_with_override(dq, new_state, - flags); - } -#endif +static struct { + uint64_t volatile time_total; + uint64_t volatile count_total; + uint64_t volatile thread_total; +} _dispatch_stats[DISPATCH_PERF_MON_BUCKETS]; +DISPATCH_USED static size_t _dispatch_stat_buckets = DISPATCH_PERF_MON_BUCKETS; + +void +_dispatch_queue_merge_stats(uint64_t start, bool trace, perfmon_thread_type type) +{ + uint64_t delta = _dispatch_uptime() - start; + unsigned long count; + int bucket = 0; + count = (unsigned long)_dispatch_thread_getspecific(dispatch_bcounter_key); + _dispatch_thread_setspecific(dispatch_bcounter_key, NULL); + if (count == 0) { + bucket = 0; + if (trace) _dispatch_ktrace1(DISPATCH_PERF_MON_worker_useless, type); + } else { + bucket = MIN(DISPATCH_PERF_MON_BUCKETS - 1, + (int)sizeof(count) * CHAR_BIT - __builtin_clzl(count)); + os_atomic_add(&_dispatch_stats[bucket].count_total, count, relaxed); } - if (flags & DISPATCH_WAKEUP_CONSUME_2) { - return _dispatch_release_2_tailcall(dq); + os_atomic_add(&_dispatch_stats[bucket].time_total, delta, relaxed); + os_atomic_inc(&_dispatch_stats[bucket].thread_total, relaxed); + if (trace) { + _dispatch_ktrace3(DISPATCH_PERF_MON_worker_thread_end, count, delta, type); } } +#endif + +#pragma mark - +#pragma mark dispatch queue/lane drain & invoke + DISPATCH_NOINLINE static void -_dispatch_queue_barrier_complete(dispatch_queue_t dq, dispatch_qos_t qos, - dispatch_wakeup_flags_t flags) +_dispatch_return_to_kernel(void) { - dispatch_continuation_t dc_tmp, dc_start = NULL, dc_end = NULL; - dispatch_queue_wakeup_target_t target = DISPATCH_QUEUE_WAKEUP_NONE; - struct dispatch_object_s *dc = NULL; - uint64_t owned = DISPATCH_QUEUE_IN_BARRIER + - dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; - size_t count = 0; - - dispatch_assert(dx_metatype(dq) == _DISPATCH_QUEUE_TYPE); - - if (dq->dq_items_tail && !DISPATCH_QUEUE_IS_SUSPENDED(dq)) { - dc = _dispatch_queue_head(dq); - if (!_dispatch_object_is_sync_waiter(dc)) { - // not a slow item, needs to wake up - } else if (likely(dq->dq_width == 1) || - _dispatch_object_is_barrier(dc)) { - // rdar://problem/8290662 "barrier/writer lock transfer" - dc_start = dc_end = (dispatch_continuation_t)dc; - owned = 0; - count = 1; - dc = _dispatch_queue_next(dq, dc); - } else { - // "reader lock transfer" - // we must not wake waiters immediately because our right - // for dequeuing is granted through holding the full "barrier" width - // which a signaled work item could relinquish out from our feet - dc_start = (dispatch_continuation_t)dc; - do { - // no check on width here because concurrent queues - // do not respect width for blocked readers, the thread - // is already spent anyway - dc_end = (dispatch_continuation_t)dc; - owned -= DISPATCH_QUEUE_WIDTH_INTERVAL; - count++; - dc = _dispatch_queue_next(dq, dc); - } while (dc && _dispatch_object_is_sync_waiter_non_barrier(dc)); - } - - if (count) { - do { - dc_tmp = dc_start; - dc_start = dc_start->do_next; - _dispatch_sync_waiter_redirect_or_wake(dq, owned, dc_tmp); - owned = DISPATCH_SYNC_WAITER_NO_UNLOCK; - } while (dc_tmp != dc_end); - if (flags & DISPATCH_WAKEUP_CONSUME_2) { - return _dispatch_release_2_tailcall(dq); - } - return; - } - if (!(flags & DISPATCH_WAKEUP_CONSUME_2)) { - _dispatch_retain_2(dq); - flags |= DISPATCH_WAKEUP_CONSUME_2; - } - target = DISPATCH_QUEUE_WAKEUP_TARGET; +#if DISPATCH_USE_KEVENT_WORKQUEUE + dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); + if (likely(ddi && ddi->ddi_wlh != DISPATCH_WLH_ANON)) { + dispatch_assert(ddi->ddi_wlh_servicing); + _dispatch_event_loop_drain(KEVENT_FLAG_IMMEDIATE); + } else { + _dispatch_clear_return_to_kernel(); } - - return _dispatch_queue_class_barrier_complete(dq, qos, flags, target,owned); +#endif } -#if DISPATCH_COCOA_COMPAT -static void -_dispatch_sync_thread_bound_invoke(void *ctxt) +void +_dispatch_poll_for_events_4launchd(void) { - dispatch_sync_context_t dsc = ctxt; - dispatch_queue_t cq = _dispatch_queue_get_current(); - dispatch_queue_t orig_dq = dsc->dc_other; - dispatch_thread_frame_s dtf; - dispatch_assert(_dispatch_queue_is_thread_bound(cq)); - - // the block runs on the thread the queue is bound to and not - // on the calling thread, but we mean to see the calling thread - // dispatch thread frames, so we fake the link, and then undo it - _dispatch_thread_frame_push_and_rebase(&dtf, orig_dq, &dsc->dsc_dtf); - _dispatch_client_callout(dsc->dsc_ctxt, dsc->dsc_func); - _dispatch_thread_frame_pop(&dtf); - - // communicate back to _dispatch_sync_wait who the thread bound queue - // was so that we skip it during _dispatch_sync_complete_recurse - dsc->dc_other = cq; - dsc->dsc_func = NULL; - _dispatch_thread_event_signal(&dsc->dsc_event); // release + _dispatch_return_to_kernel(); } + +#if DISPATCH_USE_WORKQUEUE_NARROWING +DISPATCH_STATIC_GLOBAL(os_atomic(uint64_t) +_dispatch_narrowing_deadlines[DISPATCH_QOS_NBUCKETS]); +#if !DISPATCH_TIME_UNIT_USES_NANOSECONDS +DISPATCH_STATIC_GLOBAL(uint64_t _dispatch_narrow_check_interval_cache); #endif DISPATCH_ALWAYS_INLINE static inline uint64_t -_dispatch_sync_wait_prepare(dispatch_queue_t dq) +_dispatch_narrow_check_interval(void) { - uint64_t old_state, new_state; - - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { - if (_dq_state_is_suspended(old_state) || - !_dq_state_is_base_wlh(old_state)) { - os_atomic_rmw_loop_give_up(return old_state); - } - if (!_dq_state_drain_locked(old_state) || - _dq_state_in_sync_transfer(old_state)) { - os_atomic_rmw_loop_give_up(return old_state); - } - new_state = old_state | DISPATCH_QUEUE_RECEIVED_SYNC_WAIT; - }); - return new_state; +#if DISPATCH_TIME_UNIT_USES_NANOSECONDS + return 50 * NSEC_PER_MSEC; +#else + if (_dispatch_narrow_check_interval_cache == 0) { + _dispatch_narrow_check_interval_cache = + _dispatch_time_nano2mach(50 * NSEC_PER_MSEC); + } + return _dispatch_narrow_check_interval_cache; +#endif } -static void -_dispatch_sync_waiter_compute_wlh(dispatch_queue_t dq, - dispatch_sync_context_t dsc) +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_drain_init_narrowing_check_deadline(dispatch_invoke_context_t dic, + dispatch_priority_t pri) { - bool needs_locking = _dispatch_queue_is_legacy(dq); - - if (needs_locking) { - dsc->dsc_release_storage = true; - _dispatch_queue_sidelock_lock(dq); - } - - dispatch_queue_t tq = dq->do_targetq; - uint64_t dq_state = _dispatch_sync_wait_prepare(tq); - - if (_dq_state_is_suspended(dq_state) || - _dq_state_is_base_anon(dq_state)) { - dsc->dsc_release_storage = false; - dsc->dc_data = DISPATCH_WLH_ANON; - } else if (_dq_state_is_base_wlh(dq_state)) { - if (dsc->dsc_release_storage) { - _dispatch_queue_retain_storage(tq); - } - dsc->dc_data = (dispatch_wlh_t)tq; - } else { - _dispatch_sync_waiter_compute_wlh(tq, dsc); + if (!(pri & DISPATCH_PRIORITY_FLAG_OVERCOMMIT)) { + dic->dic_next_narrow_check = _dispatch_approximate_time() + + _dispatch_narrow_check_interval(); } - if (needs_locking) _dispatch_queue_sidelock_unlock(dq); } DISPATCH_NOINLINE -static void -_dispatch_sync_wait(dispatch_queue_t top_dq, void *ctxt, - dispatch_function_t func, uintptr_t top_dc_flags, - dispatch_queue_t dq, uintptr_t dc_flags) +static bool +_dispatch_queue_drain_should_narrow_slow(uint64_t now, + dispatch_invoke_context_t dic) { - pthread_priority_t pp = _dispatch_get_priority(); - dispatch_tid tid = _dispatch_tid_self(); - dispatch_qos_t qos; - uint64_t dq_state; - - dq_state = _dispatch_sync_wait_prepare(dq); - if (unlikely(_dq_state_drain_locked_by(dq_state, tid))) { - DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, - "dispatch_sync called on queue " - "already owned by current thread"); - } + if (dic->dic_next_narrow_check != DISPATCH_THREAD_IS_NARROWING) { + pthread_priority_t pp = _dispatch_get_priority(); + dispatch_qos_t qos = _dispatch_qos_from_pp(pp); + if (unlikely(qos < DISPATCH_QOS_MIN || qos > DISPATCH_QOS_MAX)) { + DISPATCH_CLIENT_CRASH(pp, "Thread QoS corruption"); + } + size_t idx = DISPATCH_QOS_BUCKET(qos); + os_atomic(uint64_t) *deadline = &_dispatch_narrowing_deadlines[idx]; + uint64_t oldval, newval = now + _dispatch_narrow_check_interval(); - struct dispatch_sync_context_s dsc = { - .dc_flags = dc_flags | DISPATCH_OBJ_SYNC_WAITER_BIT, - .dc_other = top_dq, - .dc_priority = pp | _PTHREAD_PRIORITY_ENFORCE_FLAG, - .dc_voucher = DISPATCH_NO_VOUCHER, - .dsc_func = func, - .dsc_ctxt = ctxt, - .dsc_waiter = tid, - }; - if (_dq_state_is_suspended(dq_state) || - _dq_state_is_base_anon(dq_state)) { - dsc.dc_data = DISPATCH_WLH_ANON; - } else if (_dq_state_is_base_wlh(dq_state)) { - dsc.dc_data = (dispatch_wlh_t)dq; - } else { - _dispatch_sync_waiter_compute_wlh(dq, &dsc); - } -#if DISPATCH_COCOA_COMPAT - // It's preferred to execute synchronous blocks on the current thread - // due to thread-local side effects, etc. However, blocks submitted - // to the main thread MUST be run on the main thread - // - // Since we don't know whether that will happen, save the frame linkage - // for the sake of _dispatch_sync_thread_bound_invoke - _dispatch_thread_frame_save_state(&dsc.dsc_dtf); - - // Since the continuation doesn't have the CONSUME bit, the voucher will be - // retained on adoption on the thread bound queue if it happens so we can - // borrow this thread's reference - dsc.dc_voucher = _voucher_get(); - dsc.dc_func = _dispatch_sync_thread_bound_invoke; - dsc.dc_ctxt = &dsc; -#endif + dic->dic_next_narrow_check = newval; + os_atomic_rmw_loop(deadline, oldval, newval, relaxed, { + if (now < oldval) { + os_atomic_rmw_loop_give_up(return false); + } + }); - if (dsc.dc_data == DISPATCH_WLH_ANON) { - dsc.dsc_override_qos_floor = dsc.dsc_override_qos = - _dispatch_get_basepri_override_qos_floor(); - qos = _dispatch_qos_from_pp(pp); - _dispatch_thread_event_init(&dsc.dsc_event); - } else { - qos = 0; - } - _dispatch_queue_push_sync_waiter(dq, &dsc, qos); - if (dsc.dc_data == DISPATCH_WLH_ANON) { - _dispatch_thread_event_wait(&dsc.dsc_event); // acquire - _dispatch_thread_event_destroy(&dsc.dsc_event); - // If _dispatch_sync_waiter_wake() gave this thread an override, - // ensure that the root queue sees it. - if (dsc.dsc_override_qos > dsc.dsc_override_qos_floor) { - _dispatch_set_basepri_override_qos(dsc.dsc_override_qos); + if (!_pthread_workqueue_should_narrow(pp)) { + return false; } - } else { - _dispatch_event_loop_wait_for_ownership(&dsc); - } - _dispatch_introspection_sync_begin(top_dq); -#if DISPATCH_COCOA_COMPAT - if (unlikely(dsc.dsc_func == NULL)) { - // Queue bound to a non-dispatch thread, the continuation already ran - // so just unlock all the things, except for the thread bound queue - dispatch_queue_t bound_dq = dsc.dc_other; - return _dispatch_sync_complete_recurse(top_dq, bound_dq, top_dc_flags); + dic->dic_next_narrow_check = DISPATCH_THREAD_IS_NARROWING; } -#endif - _dispatch_sync_invoke_and_complete_recurse(top_dq, ctxt, func,top_dc_flags); + return true; } -DISPATCH_NOINLINE -static void -_dispatch_sync_f_slow(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, uintptr_t dc_flags) +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_queue_drain_should_narrow(dispatch_invoke_context_t dic) { - if (unlikely(!dq->do_targetq)) { - return _dispatch_sync_function_invoke(dq, ctxt, func); + uint64_t next_check = dic->dic_next_narrow_check; + if (unlikely(next_check)) { + uint64_t now = _dispatch_approximate_time(); + if (unlikely(next_check < now)) { + return _dispatch_queue_drain_should_narrow_slow(now, dic); + } } - _dispatch_sync_wait(dq, ctxt, func, dc_flags, dq, dc_flags); + return false; } +#else +#define _dispatch_queue_drain_init_narrowing_check_deadline(rq, dic) ((void)0) +#define _dispatch_queue_drain_should_narrow(dic) false +#endif -#pragma mark - -#pragma mark dispatch_sync / dispatch_barrier_sync - -DISPATCH_NOINLINE -static void -_dispatch_sync_recurse(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, uintptr_t dc_flags) +/* + * Drain comes in 2 flavours (serial/concurrent) and 2 modes + * (redirecting or not). + * + * Serial + * ~~~~~~ + * Serial drain is about serial queues (width == 1). It doesn't support + * the redirecting mode, which doesn't make sense, and treats all continuations + * as barriers. Bookkeeping is minimal in serial flavour, most of the loop + * is optimized away. + * + * Serial drain stops if the width of the queue grows to larger than 1. + * Going through a serial drain prevents any recursive drain from being + * redirecting. + * + * Concurrent + * ~~~~~~~~~~ + * When in non-redirecting mode (meaning one of the target queues is serial), + * non-barriers and barriers alike run in the context of the drain thread. + * Slow non-barrier items are still all signaled so that they can make progress + * toward the dispatch_sync() that will serialize them all . + * + * In redirecting mode, non-barrier work items are redirected downward. + * + * Concurrent drain stops if the width of the queue becomes 1, so that the + * queue drain moves to the more efficient serial mode. + */ +DISPATCH_ALWAYS_INLINE +static dispatch_queue_wakeup_target_t +_dispatch_lane_drain(dispatch_lane_t dq, dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags, uint64_t *owned_ptr, bool serial_drain) { - dispatch_tid tid = _dispatch_tid_self(); - dispatch_queue_t tq = dq->do_targetq; + dispatch_queue_t orig_tq = dq->do_targetq; + dispatch_thread_frame_s dtf; + struct dispatch_object_s *dc = NULL, *next_dc; + uint64_t dq_state, owned = *owned_ptr; - do { - if (likely(tq->dq_width == 1)) { - if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(tq, tid))) { - return _dispatch_sync_wait(dq, ctxt, func, dc_flags, tq, - DISPATCH_OBJ_BARRIER_BIT); + if (unlikely(!dq->dq_items_tail)) return NULL; + + _dispatch_thread_frame_push(&dtf, dq); + if (serial_drain || _dq_state_is_in_barrier(owned)) { + // we really own `IN_BARRIER + dq->dq_width * WIDTH_INTERVAL` + // but width can change while draining barrier work items, so we only + // convert to `dq->dq_width * WIDTH_INTERVAL` when we drop `IN_BARRIER` + owned = DISPATCH_QUEUE_IN_BARRIER; + } else { + owned &= DISPATCH_QUEUE_WIDTH_MASK; + } + + dc = _dispatch_queue_get_head(dq); + goto first_iteration; + + for (;;) { + dispatch_assert(dic->dic_barrier_waiter == NULL); + dc = next_dc; + if (unlikely(_dispatch_needs_to_return_to_kernel())) { + _dispatch_return_to_kernel(); + } + if (unlikely(!dc)) { + if (!dq->dq_items_tail) { + break; } - } else { - if (unlikely(!_dispatch_queue_try_reserve_sync_width(tq))) { - return _dispatch_sync_wait(dq, ctxt, func, dc_flags, tq, 0); + dc = _dispatch_queue_get_head(dq); + } + if (unlikely(serial_drain != (dq->dq_width == 1))) { + break; + } + if (unlikely(_dispatch_queue_drain_should_narrow(dic))) { + break; + } + if (likely(flags & DISPATCH_INVOKE_WORKLOOP_DRAIN)) { + dispatch_workloop_t dwl = (dispatch_workloop_t)_dispatch_get_wlh(); + if (unlikely(_dispatch_queue_max_qos(dwl) > dwl->dwl_drained_qos)) { + break; } } - tq = tq->do_targetq; - } while (unlikely(tq->do_targetq)); - return _dispatch_sync_invoke_and_complete_recurse(dq, ctxt, func, dc_flags); -} +first_iteration: + dq_state = os_atomic_load(&dq->dq_state, relaxed); + if (unlikely(_dq_state_is_suspended(dq_state))) { + break; + } + if (unlikely(orig_tq != dq->do_targetq)) { + break; + } -DISPATCH_NOINLINE -void -dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func) -{ - dispatch_tid tid = _dispatch_tid_self(); + if (serial_drain || _dispatch_object_is_barrier(dc)) { + if (!serial_drain && owned != DISPATCH_QUEUE_IN_BARRIER) { + if (!_dispatch_queue_try_upgrade_full_width(dq, owned)) { + goto out_with_no_width; + } + owned = DISPATCH_QUEUE_IN_BARRIER; + } + if (_dispatch_object_is_sync_waiter(dc) && + !(flags & DISPATCH_INVOKE_THREAD_BOUND)) { + dic->dic_barrier_waiter = dc; + goto out_with_barrier_waiter; + } + next_dc = _dispatch_queue_pop_head(dq, dc); + } else { + if (owned == DISPATCH_QUEUE_IN_BARRIER) { + // we just ran barrier work items, we have to make their + // effect visible to other sync work items on other threads + // that may start coming in after this point, hence the + // release barrier + os_atomic_xor2o(dq, dq_state, owned, release); + owned = dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; + } else if (unlikely(owned == 0)) { + if (_dispatch_object_is_waiter(dc)) { + // sync "readers" don't observe the limit + _dispatch_queue_reserve_sync_width(dq); + } else if (!_dispatch_queue_try_acquire_async(dq)) { + goto out_with_no_width; + } + owned = DISPATCH_QUEUE_WIDTH_INTERVAL; + } - // The more correct thing to do would be to merge the qos of the thread - // that just acquired the barrier lock into the queue state. - // - // However this is too expensive for the fastpath, so skip doing it. - // The chosen tradeoff is that if an enqueue on a lower priority thread - // contends with this fastpath, this thread may receive a useless override. - // - // Global concurrent queues and queues bound to non-dispatch threads - // always fall into the slow case, see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE - if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(dq, tid))) { - return _dispatch_sync_f_slow(dq, ctxt, func, DISPATCH_OBJ_BARRIER_BIT); - } + next_dc = _dispatch_queue_pop_head(dq, dc); + if (_dispatch_object_is_waiter(dc)) { + owned -= DISPATCH_QUEUE_WIDTH_INTERVAL; + _dispatch_non_barrier_waiter_redirect_or_wake(dq, dc); + continue; + } - _dispatch_introspection_sync_begin(dq); - if (unlikely(dq->do_targetq->do_targetq)) { - return _dispatch_sync_recurse(dq, ctxt, func, DISPATCH_OBJ_BARRIER_BIT); - } - _dispatch_queue_barrier_sync_invoke_and_complete(dq, ctxt, func); -} + if (flags & DISPATCH_INVOKE_REDIRECTING_DRAIN) { + owned -= DISPATCH_QUEUE_WIDTH_INTERVAL; + // This is a re-redirect, overrides have already been applied by + // _dispatch_continuation_async* + // However we want to end up on the root queue matching `dc` + // qos, so pick up the current override of `dq` which includes + // dc's override (and maybe more) + _dispatch_continuation_redirect_push(dq, dc, + _dispatch_queue_max_qos(dq)); + continue; + } + } -DISPATCH_NOINLINE -void -dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func) -{ - if (likely(dq->dq_width == 1)) { - return dispatch_barrier_sync_f(dq, ctxt, func); + _dispatch_continuation_pop_inline(dc, dic, flags, dq); } - // Global concurrent queues and queues bound to non-dispatch threads - // always fall into the slow case, see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE - if (unlikely(!_dispatch_queue_try_reserve_sync_width(dq))) { - return _dispatch_sync_f_slow(dq, ctxt, func, 0); + if (owned == DISPATCH_QUEUE_IN_BARRIER) { + // if we're IN_BARRIER we really own the full width too + owned += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; + } + if (dc) { + owned = _dispatch_queue_adjust_owned(dq, owned, dc); } + *owned_ptr &= DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_ENQUEUED_ON_MGR; + *owned_ptr |= owned; + _dispatch_thread_frame_pop(&dtf); + return dc ? dq->do_targetq : NULL; - _dispatch_introspection_sync_begin(dq); - if (unlikely(dq->do_targetq->do_targetq)) { - return _dispatch_sync_recurse(dq, ctxt, func, 0); +out_with_no_width: + *owned_ptr &= DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_ENQUEUED_ON_MGR; + _dispatch_thread_frame_pop(&dtf); + return DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT; + +out_with_barrier_waiter: + if (unlikely(flags & DISPATCH_INVOKE_DISALLOW_SYNC_WAITERS)) { + DISPATCH_INTERNAL_CRASH(0, + "Deferred continuation on source, mach channel or mgr"); } - _dispatch_sync_invoke_and_complete(dq, ctxt, func); + _dispatch_thread_frame_pop(&dtf); + return dq->do_targetq; } -#ifdef __BLOCKS__ DISPATCH_NOINLINE -static void -_dispatch_sync_block_with_private_data(dispatch_queue_t dq, - dispatch_block_t work, dispatch_block_flags_t flags) +static dispatch_queue_wakeup_target_t +_dispatch_lane_concurrent_drain(dispatch_lane_class_t dqu, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, + uint64_t *owned) { - dispatch_block_private_data_t dbpd = _dispatch_block_get_data(work); - pthread_priority_t op = 0, p = 0; - - flags |= dbpd->dbpd_flags; - op = _dispatch_block_invoke_should_set_priority(flags, dbpd->dbpd_priority); - if (op) { - p = dbpd->dbpd_priority; - } - voucher_t ov, v = DISPATCH_NO_VOUCHER; - if (flags & DISPATCH_BLOCK_HAS_VOUCHER) { - v = dbpd->dbpd_voucher; - } - ov = _dispatch_set_priority_and_voucher(p, v, 0); + return _dispatch_lane_drain(dqu._dl, dic, flags, owned, false); +} - // balanced in d_block_sync_invoke or d_block_wait - if (os_atomic_cmpxchg2o(dbpd, dbpd_queue, NULL, dq->_as_oq, relaxed)) { - _dispatch_retain_2(dq); - } - if (flags & DISPATCH_BLOCK_BARRIER) { - dispatch_barrier_sync_f(dq, work, _dispatch_block_sync_invoke); - } else { - dispatch_sync_f(dq, work, _dispatch_block_sync_invoke); - } - _dispatch_reset_priority_and_voucher(op, ov); +DISPATCH_NOINLINE +dispatch_queue_wakeup_target_t +_dispatch_lane_serial_drain(dispatch_lane_class_t dqu, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, + uint64_t *owned) +{ + flags &= ~(dispatch_invoke_flags_t)DISPATCH_INVOKE_REDIRECTING_DRAIN; + return _dispatch_lane_drain(dqu._dl, dic, flags, owned, true); } void -dispatch_barrier_sync(dispatch_queue_t dq, dispatch_block_t work) +_dispatch_queue_invoke_finish(dispatch_queue_t dq, + dispatch_invoke_context_t dic, dispatch_queue_t tq, uint64_t owned) { - if (unlikely(_dispatch_block_has_private_data(work))) { - dispatch_block_flags_t flags = DISPATCH_BLOCK_BARRIER; - return _dispatch_sync_block_with_private_data(dq, work, flags); + struct dispatch_object_s *dc = dic->dic_barrier_waiter; + dispatch_qos_t qos = dic->dic_barrier_waiter_bucket; + if (dc) { + dic->dic_barrier_waiter = NULL; + dic->dic_barrier_waiter_bucket = DISPATCH_QOS_UNSPECIFIED; + owned &= DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_ENQUEUED_ON_MGR; +#if DISPATCH_INTROSPECTION + dispatch_sync_context_t dsc = (dispatch_sync_context_t)dc; + dsc->dsc_from_async = true; +#endif + if (qos) { + return _dispatch_workloop_drain_barrier_waiter(upcast(dq)._dwl, + dc, qos, DISPATCH_WAKEUP_CONSUME_2, owned); + } + return _dispatch_lane_drain_barrier_waiter(upcast(dq)._dl, dc, + DISPATCH_WAKEUP_CONSUME_2, owned); + } + + uint64_t old_state, new_state, enqueued = DISPATCH_QUEUE_ENQUEUED; + if (tq == DISPATCH_QUEUE_WAKEUP_MGR) { + enqueued = DISPATCH_QUEUE_ENQUEUED_ON_MGR; + } + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + new_state = old_state - owned; + new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; + new_state |= DISPATCH_QUEUE_DIRTY; + if (_dq_state_is_runnable(new_state) && + !_dq_state_is_enqueued(new_state)) { + // drain was not interupted for suspension + // we will reenqueue right away, just put ENQUEUED back + new_state |= enqueued; + } + }); + old_state -= owned; + if (_dq_state_received_override(old_state)) { + // Ensure that the root queue sees that this thread was overridden. + _dispatch_set_basepri_override_qos(_dq_state_max_qos(new_state)); + } + if ((old_state ^ new_state) & enqueued) { + dispatch_assert(_dq_state_is_enqueued(new_state)); + return _dispatch_queue_push_queue(tq, dq, new_state); } - dispatch_barrier_sync_f(dq, work, _dispatch_Block_invoke(work)); + return _dispatch_release_2_tailcall(dq); } void -dispatch_sync(dispatch_queue_t dq, dispatch_block_t work) +_dispatch_lane_activate(dispatch_lane_class_t dq, + DISPATCH_UNUSED bool *allow_resume) { - if (unlikely(_dispatch_block_has_private_data(work))) { - return _dispatch_sync_block_with_private_data(dq, work, 0); + dispatch_queue_t tq = dq._dl->do_targetq; + dispatch_priority_t pri = dq._dl->dq_priority; + + // Normalize priority: keep the fallback only when higher than the floor + if (_dispatch_priority_fallback_qos(pri) <= _dispatch_priority_qos(pri) || + (_dispatch_priority_qos(pri) && + !(pri & DISPATCH_PRIORITY_FLAG_FLOOR))) { + pri &= ~DISPATCH_PRIORITY_FALLBACK_QOS_MASK; + pri &= ~DISPATCH_PRIORITY_FLAG_FALLBACK; + dq._dl->dq_priority = pri; } - dispatch_sync_f(dq, work, _dispatch_Block_invoke(work)); + tq = _dispatch_queue_priority_inherit_from_target(dq, tq); + _dispatch_lane_inherit_wlh_from_target(dq._dl, tq); } -#endif // __BLOCKS__ - -#pragma mark - -#pragma mark dispatch_trysync -DISPATCH_NOINLINE -static void -_dispatch_barrier_trysync_or_async_f_complete(dispatch_queue_t dq, - void *ctxt, dispatch_function_t func, uint32_t flags) +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_wakeup_target_t +_dispatch_lane_invoke2(dispatch_lane_t dq, dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags, uint64_t *owned) { - dispatch_wakeup_flags_t wflags = DISPATCH_WAKEUP_BARRIER_COMPLETE; + dispatch_queue_t otq = dq->do_targetq; + dispatch_queue_t cq = _dispatch_queue_get_current(); - _dispatch_sync_function_invoke_inline(dq, ctxt, func); - if (flags & DISPATCH_BARRIER_TRYSYNC_SUSPEND) { - uint64_t dq_state = os_atomic_sub2o(dq, dq_state, - DISPATCH_QUEUE_SUSPEND_INTERVAL, relaxed); - if (!_dq_state_is_suspended(dq_state)) { - wflags |= DISPATCH_WAKEUP_CONSUME_2; - } + if (unlikely(cq != otq)) { + return otq; } - dx_wakeup(dq, 0, wflags); + if (dq->dq_width == 1) { + return _dispatch_lane_serial_drain(dq, dic, flags, owned); + } + return _dispatch_lane_concurrent_drain(dq, dic, flags, owned); } -// Use for mutation of queue-/source-internal state only -// ignores target queue hierarchy! DISPATCH_NOINLINE void -_dispatch_barrier_trysync_or_async_f(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func, uint32_t flags) +_dispatch_lane_invoke(dispatch_lane_t dq, dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags) { - dispatch_tid tid = _dispatch_tid_self(); - uint64_t suspend_count = (flags & DISPATCH_BARRIER_TRYSYNC_SUSPEND) ? 1 : 0; - if (unlikely(!_dispatch_queue_try_acquire_barrier_sync_and_suspend(dq, tid, - suspend_count))) { - return _dispatch_barrier_async_detached_f(dq, ctxt, func); - } - if (flags & DISPATCH_BARRIER_TRYSYNC_SUSPEND) { - _dispatch_retain_2(dq); // see _dispatch_queue_suspend + _dispatch_queue_class_invoke(dq, dic, flags, 0, _dispatch_lane_invoke2); +} + +#pragma mark - +#pragma mark dispatch_workloop_t + +#define _dispatch_wl(dwl, qos) os_mpsc(dwl, dwl, s[DISPATCH_QOS_BUCKET(qos)]) +#define _dispatch_workloop_looks_empty(dwl, qos) \ + os_mpsc_looks_empty(_dispatch_wl(dwl, qos)) +#define _dispatch_workloop_get_head(dwl, qos) \ + os_mpsc_get_head(_dispatch_wl(dwl, qos)) +#define _dispatch_workloop_pop_head(dwl, qos, dc) \ + os_mpsc_pop_head(_dispatch_wl(dwl, qos), dc, do_next) +#define _dispatch_workloop_push_update_tail(dwl, qos, dou) \ + os_mpsc_push_update_tail(_dispatch_wl(dwl, qos), dou, do_next) +#define _dispatch_workloop_push_update_prev(dwl, qos, prev, dou) \ + os_mpsc_push_update_prev(_dispatch_wl(dwl, qos), prev, dou, do_next) + +dispatch_workloop_t +dispatch_workloop_copy_current(void) +{ + dispatch_workloop_t dwl = _dispatch_wlh_to_workloop(_dispatch_get_wlh()); + if (likely(dwl)) { + _os_object_retain_with_resurrect(dwl->_as_os_obj); + return dwl; } - _dispatch_barrier_trysync_or_async_f_complete(dq, ctxt, func, flags); + return NULL; } -DISPATCH_NOINLINE -static long -_dispatch_trysync_recurse(dispatch_queue_t dq, void *ctxt, - dispatch_function_t f, uintptr_t dc_flags) +bool +dispatch_workloop_is_current(dispatch_workloop_t dwl) { - dispatch_tid tid = _dispatch_tid_self(); - dispatch_queue_t q, tq = dq->do_targetq; - - for (;;) { - if (likely(tq->do_targetq == NULL)) { - _dispatch_sync_invoke_and_complete_recurse(dq, ctxt, f, dc_flags); - return true; - } - if (unlikely(_dispatch_queue_cannot_trysync(tq))) { - for (q = dq; q != tq; q = q->do_targetq) { - _dispatch_queue_atomic_flags_set(q, DQF_CANNOT_TRYSYNC); - } - break; - } - if (likely(tq->dq_width == 1)) { - if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(tq, tid))) { - break; - } - } else { - if (unlikely(!_dispatch_queue_try_reserve_sync_width(tq))) { - break; - } - } - tq = tq->do_targetq; - } - - _dispatch_sync_complete_recurse(dq, tq, dc_flags); - return false; + return _dispatch_get_wlh() == (dispatch_wlh_t)dwl; } -DISPATCH_NOINLINE -long -_dispatch_barrier_trysync_f(dispatch_queue_t dq, void *ctxt, - dispatch_function_t f) +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_dispatch_workloop_role_bits(void) { - dispatch_tid tid = _dispatch_tid_self(); - if (unlikely(!dq->do_targetq)) { - DISPATCH_CLIENT_CRASH(dq, "_dispatch_trsync called on a root queue"); - } - if (unlikely(_dispatch_queue_cannot_trysync(dq))) { - return false; +#if DISPATCH_USE_KEVENT_WORKLOOP + if (likely(_dispatch_kevent_workqueue_enabled)) { + return DISPATCH_QUEUE_ROLE_BASE_WLH; } - if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(dq, tid))) { - return false; - } - return _dispatch_trysync_recurse(dq, ctxt, f, DISPATCH_OBJ_BARRIER_BIT); +#endif + return DISPATCH_QUEUE_ROLE_BASE_ANON; } -DISPATCH_NOINLINE -long -_dispatch_trysync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t f) +bool +_dispatch_workloop_should_yield_4NW(void) { - if (likely(dq->dq_width == 1)) { - return _dispatch_barrier_trysync_f(dq, ctxt, f); - } - if (unlikely(!dq->do_targetq)) { - DISPATCH_CLIENT_CRASH(dq, "_dispatch_trsync called on a root queue"); - } - if (unlikely(_dispatch_queue_cannot_trysync(dq))) { - return false; - } - if (unlikely(!_dispatch_queue_try_reserve_sync_width(dq))) { - return false; + dispatch_workloop_t dwl = _dispatch_wlh_to_workloop(_dispatch_get_wlh()); + if (likely(dwl)) { + return _dispatch_queue_max_qos(dwl) > dwl->dwl_drained_qos; } - return _dispatch_trysync_recurse(dq, ctxt, f, 0); + return false; } -#pragma mark - -#pragma mark dispatch_queue_wakeup - DISPATCH_NOINLINE -void -_dispatch_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos, - dispatch_wakeup_flags_t flags) +static dispatch_workloop_t +_dispatch_workloop_create(const char *label, uint64_t dq_state) { - dispatch_queue_wakeup_target_t target = DISPATCH_QUEUE_WAKEUP_NONE; + dispatch_queue_flags_t dqf = DQF_AUTORELEASE_ALWAYS; + dispatch_workloop_t dwl; - if (unlikely(flags & DISPATCH_WAKEUP_BARRIER_COMPLETE)) { - return _dispatch_queue_barrier_complete(dq, qos, flags); + if (label) { + const char *tmp = _dispatch_strdup_if_mutable(label); + if (tmp != label) { + dqf |= DQF_LABEL_NEEDS_FREE; + label = tmp; + } } - if (_dispatch_queue_class_probe(dq)) { - target = DISPATCH_QUEUE_WAKEUP_TARGET; + + dq_state |= _dispatch_workloop_role_bits(); + + dwl = _dispatch_queue_alloc(workloop, dqf, 1, dq_state)._dwl; + dwl->dq_label = label; + dwl->do_targetq = _dispatch_get_default_queue(true); + if (!(dq_state & DISPATCH_QUEUE_INACTIVE)) { + dwl->dq_priority = DISPATCH_PRIORITY_FLAG_OVERCOMMIT | + _dispatch_priority_make_fallback(DISPATCH_QOS_DEFAULT); } - return _dispatch_queue_class_wakeup(dq, qos, flags, target); + _dispatch_object_debug(dwl, "%s", __func__); + return _dispatch_introspection_queue_create(dwl)._dwl; } -#if DISPATCH_COCOA_COMPAT -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_runloop_handle_is_valid(dispatch_runloop_handle_t handle) +dispatch_workloop_t +dispatch_workloop_create(const char *label) { -#if TARGET_OS_MAC - return MACH_PORT_VALID(handle); -#elif defined(__linux__) - return handle >= 0; -#else -#error "runloop support not implemented on this platform" -#endif + return _dispatch_workloop_create(label, 0); } -DISPATCH_ALWAYS_INLINE -static inline dispatch_runloop_handle_t -_dispatch_runloop_queue_get_handle(dispatch_queue_t dq) +dispatch_workloop_t +dispatch_workloop_create_inactive(const char *label) { -#if TARGET_OS_MAC - return ((dispatch_runloop_handle_t)(uintptr_t)dq->do_ctxt); -#elif defined(__linux__) - // decode: 0 is a valid fd, so offset by 1 to distinguish from NULL - return ((dispatch_runloop_handle_t)(uintptr_t)dq->do_ctxt) - 1; -#else -#error "runloop support not implemented on this platform" -#endif + return _dispatch_workloop_create(label, DISPATCH_QUEUE_INACTIVE); +} + +void +dispatch_workloop_set_autorelease_frequency(dispatch_workloop_t dwl, + dispatch_autorelease_frequency_t frequency) +{ + if (frequency == DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM) { + _dispatch_queue_atomic_flags_set_and_clear(dwl, + DQF_AUTORELEASE_ALWAYS, DQF_AUTORELEASE_NEVER); + } else { + _dispatch_queue_atomic_flags_set_and_clear(dwl, + DQF_AUTORELEASE_NEVER, DQF_AUTORELEASE_ALWAYS); + } + _dispatch_queue_setter_assert_inactive(dwl); } DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_runloop_queue_set_handle(dispatch_queue_t dq, dispatch_runloop_handle_t handle) +static void +_dispatch_workloop_attributes_dispose(dispatch_workloop_t dwl) { -#if TARGET_OS_MAC - dq->do_ctxt = (void *)(uintptr_t)handle; -#elif defined(__linux__) - // encode: 0 is a valid fd, so offset by 1 to distinguish from NULL - dq->do_ctxt = (void *)(uintptr_t)(handle + 1); -#else -#error "runloop support not implemented on this platform" -#endif + if (dwl->dwl_attr) { + free(dwl->dwl_attr); + } } -#endif // DISPATCH_COCOA_COMPAT DISPATCH_ALWAYS_INLINE -static inline dispatch_qos_t -_dispatch_runloop_queue_reset_max_qos(dispatch_queue_class_t dqu) +static bool +_dispatch_workloop_has_kernel_attributes(dispatch_workloop_t dwl) { - uint64_t old_state, clear_bits = DISPATCH_QUEUE_MAX_QOS_MASK | - DISPATCH_QUEUE_RECEIVED_OVERRIDE; - old_state = os_atomic_and_orig2o(dqu._dq, dq_state, ~clear_bits, relaxed); - return _dq_state_max_qos(old_state); + return dwl->dwl_attr && (dwl->dwl_attr->dwla_flags & + (DISPATCH_WORKLOOP_ATTR_HAS_SCHED | + DISPATCH_WORKLOOP_ATTR_HAS_POLICY | + DISPATCH_WORKLOOP_ATTR_HAS_CPUPERCENT)); } void -_dispatch_runloop_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos, - dispatch_wakeup_flags_t flags) +dispatch_workloop_set_scheduler_priority(dispatch_workloop_t dwl, int priority, + uint64_t flags) { -#if DISPATCH_COCOA_COMPAT - if (slowpath(_dispatch_queue_atomic_flags(dq) & DQF_RELEASED)) { - // - return _dispatch_queue_wakeup(dq, qos, flags); - } + _dispatch_queue_setter_assert_inactive(dwl); + _dispatch_workloop_attributes_alloc_if_needed(dwl); - if (flags & DISPATCH_WAKEUP_MAKE_DIRTY) { - os_atomic_or2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, release); + if (priority) { + dwl->dwl_attr->dwla_sched.sched_priority = priority; + dwl->dwl_attr->dwla_flags |= DISPATCH_WORKLOOP_ATTR_HAS_SCHED; + } else { + dwl->dwl_attr->dwla_sched.sched_priority = 0; + dwl->dwl_attr->dwla_flags &= ~DISPATCH_WORKLOOP_ATTR_HAS_SCHED; } - if (_dispatch_queue_class_probe(dq)) { - return _dispatch_runloop_queue_poke(dq, qos, flags); + + if (flags & DISPATCH_WORKLOOP_FIXED_PRIORITY) { + dwl->dwl_attr->dwla_policy = POLICY_RR; + dwl->dwl_attr->dwla_flags |= DISPATCH_WORKLOOP_ATTR_HAS_POLICY; + } else { + dwl->dwl_attr->dwla_flags &= ~DISPATCH_WORKLOOP_ATTR_HAS_POLICY; } +} + +void +dispatch_workloop_set_qos_class_floor(dispatch_workloop_t dwl, + qos_class_t cls, int relpri, uint64_t flags) +{ + _dispatch_queue_setter_assert_inactive(dwl); + _dispatch_workloop_attributes_alloc_if_needed(dwl); + + dispatch_qos_t qos = _dispatch_qos_from_qos_class(cls); - qos = _dispatch_runloop_queue_reset_max_qos(dq); if (qos) { - mach_port_t owner = DISPATCH_QUEUE_DRAIN_OWNER(dq); - if (_dispatch_queue_class_probe(dq)) { - _dispatch_runloop_queue_poke(dq, qos, flags); - } - _dispatch_thread_override_end(owner, dq); - return; + dwl->dwl_attr->dwla_pri = _dispatch_priority_make(qos, relpri); + dwl->dwl_attr->dwla_flags |= DISPATCH_WORKLOOP_ATTR_HAS_QOS_CLASS; + } else { + dwl->dwl_attr->dwla_pri = 0; + dwl->dwl_attr->dwla_flags &= ~DISPATCH_WORKLOOP_ATTR_HAS_QOS_CLASS; } - if (flags & DISPATCH_WAKEUP_CONSUME_2) { - return _dispatch_release_2_tailcall(dq); + + if (flags & DISPATCH_WORKLOOP_FIXED_PRIORITY) { + dwl->dwl_attr->dwla_policy = POLICY_RR; + dwl->dwl_attr->dwla_flags |= DISPATCH_WORKLOOP_ATTR_HAS_POLICY; + } else { + dwl->dwl_attr->dwla_flags &= ~DISPATCH_WORKLOOP_ATTR_HAS_POLICY; } -#else - return _dispatch_queue_wakeup(dq, qos, flags); -#endif } void -_dispatch_main_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos, - dispatch_wakeup_flags_t flags) +dispatch_workloop_set_qos_class(dispatch_workloop_t dwl, + qos_class_t cls, uint64_t flags) { -#if DISPATCH_COCOA_COMPAT - if (_dispatch_queue_is_thread_bound(dq)) { - return _dispatch_runloop_queue_wakeup(dq, qos, flags); - } -#endif - return _dispatch_queue_wakeup(dq, qos, flags); + dispatch_workloop_set_qos_class_floor(dwl, cls, 0, flags); } -#pragma mark - -#pragma mark dispatch root queues poke - -#if DISPATCH_COCOA_COMPAT -static inline void -_dispatch_runloop_queue_class_poke(dispatch_queue_t dq) +void +dispatch_workloop_set_cpupercent(dispatch_workloop_t dwl, uint8_t percent, + uint32_t refillms) { - dispatch_runloop_handle_t handle = _dispatch_runloop_queue_get_handle(dq); - if (!_dispatch_runloop_handle_is_valid(handle)) { - return; - } + _dispatch_queue_setter_assert_inactive(dwl); + _dispatch_workloop_attributes_alloc_if_needed(dwl); -#if HAVE_MACH - mach_port_t mp = handle; - kern_return_t kr = _dispatch_send_wakeup_runloop_thread(mp, 0); - switch (kr) { - case MACH_SEND_TIMEOUT: - case MACH_SEND_TIMED_OUT: - case MACH_SEND_INVALID_DEST: - break; - default: - (void)dispatch_assume_zero(kr); - break; + if ((dwl->dwl_attr->dwla_flags & (DISPATCH_WORKLOOP_ATTR_HAS_SCHED | + DISPATCH_WORKLOOP_ATTR_HAS_QOS_CLASS)) == 0) { + DISPATCH_CLIENT_CRASH(0, "workloop qos class or priority must be " + "set before cpupercent"); } -#elif defined(__linux__) - int result; - do { - result = eventfd_write(handle, 1); - } while (result == -1 && errno == EINTR); - (void)dispatch_assume_zero(result); -#else -#error "runloop support not implemented on this platform" -#endif + + dwl->dwl_attr->dwla_cpupercent.percent = percent; + dwl->dwl_attr->dwla_cpupercent.refillms = refillms; + dwl->dwl_attr->dwla_flags |= DISPATCH_WORKLOOP_ATTR_HAS_CPUPERCENT; } -DISPATCH_NOINLINE static void -_dispatch_runloop_queue_poke(dispatch_queue_t dq, dispatch_qos_t qos, - dispatch_wakeup_flags_t flags) +_dispatch_workloop_activate_simulator_fallback(dispatch_workloop_t dwl, + pthread_attr_t *attr) { - // it's not useful to handle WAKEUP_MAKE_DIRTY because mach_msg() will have - // a release barrier and that when runloop queues stop being thread-bound - // they have a non optional wake-up to start being a "normal" queue - // either in _dispatch_runloop_queue_xref_dispose, - // or in _dispatch_queue_cleanup2() for the main thread. uint64_t old_state, new_state; + dispatch_queue_global_t dprq; - if (dq == &_dispatch_main_q) { - dispatch_once_f(&_dispatch_main_q_handle_pred, dq, - _dispatch_runloop_queue_handle_init); - } + dprq = dispatch_pthread_root_queue_create( + "com.apple.libdispatch.workloop_fallback", 0, attr, NULL); - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { - new_state = _dq_state_merge_qos(old_state, qos); - if (old_state == new_state) { - os_atomic_rmw_loop_give_up(goto no_change); - } + dwl->do_targetq = dprq->_as_dq; + _dispatch_retain(dprq); + dispatch_release(dprq); + + os_atomic_rmw_loop2o(dwl, dq_state, old_state, new_state, relaxed, { + new_state = old_state & ~DISPATCH_QUEUE_ROLE_MASK; + new_state |= DISPATCH_QUEUE_ROLE_BASE_ANON; }); +} - dispatch_qos_t dq_qos = _dispatch_priority_qos(dq->dq_priority); - if (qos > dq_qos) { - mach_port_t owner = _dq_state_drain_owner(new_state); - pthread_priority_t pp = _dispatch_qos_to_pp(qos); - _dispatch_thread_override_start(owner, pp, dq); - if (_dq_state_max_qos(old_state) > dq_qos) { - _dispatch_thread_override_end(owner, dq); +static const struct dispatch_queue_global_s _dispatch_custom_workloop_root_queue = { + DISPATCH_GLOBAL_OBJECT_HEADER(queue_global), + .dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, + .do_ctxt = NULL, + .dq_label = "com.apple.root.workloop-custom", + .dq_atomic_flags = DQF_WIDTH(DISPATCH_QUEUE_WIDTH_POOL), + .dq_priority = DISPATCH_PRIORITY_FLAG_MANAGER | + DISPATCH_PRIORITY_SATURATED_OVERRIDE, + .dq_serialnum = DISPATCH_QUEUE_SERIAL_NUMBER_WLF, + .dgq_thread_pool_size = 1, +}; + +static void +_dispatch_workloop_activate_attributes(dispatch_workloop_t dwl) +{ + dispatch_workloop_attr_t dwla = dwl->dwl_attr; + pthread_attr_t attr; + + pthread_attr_init(&attr); + if (dwla->dwla_flags & DISPATCH_WORKLOOP_ATTR_HAS_QOS_CLASS) { + dwl->dq_priority |= dwla->dwla_pri | DISPATCH_PRIORITY_FLAG_FLOOR; + } + if (dwla->dwla_flags & DISPATCH_WORKLOOP_ATTR_HAS_SCHED) { + pthread_attr_setschedparam(&attr, &dwla->dwla_sched); + // _dispatch_async_and_wait_should_always_async detects when a queue + // targets a root queue that is not part of the root queues array in + // order to force async_and_wait to async. We want this path to always + // be taken on workloops that have a scheduler priority set. + dwl->do_targetq = + (dispatch_queue_t)_dispatch_custom_workloop_root_queue._as_dq; + } + if (dwla->dwla_flags & DISPATCH_WORKLOOP_ATTR_HAS_POLICY) { + pthread_attr_setschedpolicy(&attr, dwla->dwla_policy); + } + if (dwla->dwla_flags & DISPATCH_WORKLOOP_ATTR_HAS_CPUPERCENT) { + pthread_attr_setcpupercent_np(&attr, dwla->dwla_cpupercent.percent, + (unsigned long)dwla->dwla_cpupercent.refillms); + } + if (_dispatch_workloop_has_kernel_attributes(dwl)) { + int rv = _pthread_workloop_create((uint64_t)dwl, 0, &attr); + switch (rv) { + case 0: + dwla->dwla_flags |= DISPATCH_WORKLOOP_ATTR_NEEDS_DESTROY; + break; + case ENOTSUP: + /* simulator fallback */ + _dispatch_workloop_activate_simulator_fallback(dwl, &attr); + break; + default: + dispatch_assert_zero(rv); } } -no_change: - _dispatch_runloop_queue_class_poke(dq); - if (flags & DISPATCH_WAKEUP_CONSUME_2) { - return _dispatch_release_2_tailcall(dq); - } + pthread_attr_destroy(&attr); } -#endif -DISPATCH_NOINLINE -static void -_dispatch_global_queue_poke_slow(dispatch_queue_t dq, int n, int floor) +void +_dispatch_workloop_dispose(dispatch_workloop_t dwl, bool *allow_free) { - dispatch_root_queue_context_t qc = dq->do_ctxt; - int remaining = n; - int r = ENOSYS; + uint64_t dq_state = os_atomic_load2o(dwl, dq_state, relaxed); + uint64_t initial_state = DISPATCH_QUEUE_STATE_INIT_VALUE(1); - _dispatch_root_queues_init(); - _dispatch_debug_root_queue(dq, __func__); -#if DISPATCH_USE_WORKQUEUES -#if DISPATCH_USE_PTHREAD_POOL - if (qc->dgq_kworkqueue != (void*)(~0ul)) -#endif - { - _dispatch_root_queue_debug("requesting new worker thread for global " - "queue: %p", dq); -#if DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK - if (qc->dgq_kworkqueue) { - pthread_workitem_handle_t wh; - unsigned int gen_cnt; - do { - r = pthread_workqueue_additem_np(qc->dgq_kworkqueue, - _dispatch_worker_thread4, dq, &wh, &gen_cnt); - (void)dispatch_assume_zero(r); - } while (--remaining); - return; + initial_state |= _dispatch_workloop_role_bits(); + + if (unlikely(dq_state != initial_state)) { + if (_dq_state_drain_locked(dq_state)) { + DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, + "Release of a locked workloop"); } -#endif // DISPATCH_USE_LEGACY_WORKQUEUE_FALLBACK -#if HAVE_PTHREAD_WORKQUEUE_QOS - r = _pthread_workqueue_addthreads(remaining, - _dispatch_priority_to_pp(dq->dq_priority)); -#elif DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP - r = pthread_workqueue_addthreads_np(qc->dgq_wq_priority, - qc->dgq_wq_options, remaining); +#ifndef __LP64__ + dq_state >>= 32; #endif - (void)dispatch_assume_zero(r); - return; + DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, + "Release of a workloop with corrupt state"); } -#endif // DISPATCH_USE_WORKQUEUES -#if DISPATCH_USE_PTHREAD_POOL - dispatch_pthread_root_queue_context_t pqc = qc->dgq_ctxt; - if (fastpath(pqc->dpq_thread_mediator.do_vtable)) { - while (dispatch_semaphore_signal(&pqc->dpq_thread_mediator)) { - _dispatch_root_queue_debug("signaled sleeping worker for " - "global queue: %p", dq); - if (!--remaining) { - return; - } + + _dispatch_object_debug(dwl, "%s", __func__); + _dispatch_introspection_queue_dispose(dwl); + + for (size_t i = 0; i < countof(dwl->dwl_tails); i++) { + if (unlikely(dwl->dwl_tails[i])) { + DISPATCH_CLIENT_CRASH(dwl->dwl_tails[i], + "Release of a workloop while items are enqueued"); } + // trash the queue so that use after free will crash + dwl->dwl_tails[i] = (void *)0x200; + dwl->dwl_heads[i] = (void *)0x200; } - bool overcommit = dq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT; - if (overcommit) { - os_atomic_add2o(qc, dgq_pending, remaining, relaxed); - } else { - if (!os_atomic_cmpxchg2o(qc, dgq_pending, 0, remaining, relaxed)) { - _dispatch_root_queue_debug("worker thread request still pending for " - "global queue: %p", dq); - return; + if (dwl->dwl_timer_heap) { + for (size_t i = 0; i < DISPATCH_TIMER_WLH_COUNT; i++) { + dispatch_assert(dwl->dwl_timer_heap[i].dth_count == 0); } + free(dwl->dwl_timer_heap); + dwl->dwl_timer_heap = NULL; } - int32_t can_request, t_count; - // seq_cst with atomic store to tail - t_count = os_atomic_load2o(qc, dgq_thread_pool_size, ordered); - do { - can_request = t_count < floor ? 0 : t_count - floor; - if (remaining > can_request) { - _dispatch_root_queue_debug("pthread pool reducing request from %d to %d", - remaining, can_request); - os_atomic_sub2o(qc, dgq_pending, remaining - can_request, relaxed); - remaining = can_request; - } - if (remaining == 0) { - _dispatch_root_queue_debug("pthread pool is full for root queue: " - "%p", dq); - return; - } - } while (!os_atomic_cmpxchgvw2o(qc, dgq_thread_pool_size, t_count, - t_count - remaining, &t_count, acquire)); - - pthread_attr_t *attr = &pqc->dpq_thread_attr; - pthread_t tid, *pthr = &tid; -#if DISPATCH_USE_MGR_THREAD && DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES - if (slowpath(dq == &_dispatch_mgr_root_queue)) { - pthr = _dispatch_mgr_root_queue_init(); + if (dwl->dwl_attr && (dwl->dwl_attr->dwla_flags & + DISPATCH_WORKLOOP_ATTR_NEEDS_DESTROY)) { + (void)dispatch_assume_zero(_pthread_workloop_destroy((uint64_t)dwl)); } -#endif - do { - _dispatch_retain(dq); // released in _dispatch_worker_thread - while ((r = pthread_create(pthr, attr, _dispatch_worker_thread, dq))) { - if (r != EAGAIN) { - (void)dispatch_assume_zero(r); - } - _dispatch_temporary_resource_shortage(); - } - } while (--remaining); -#endif // DISPATCH_USE_PTHREAD_POOL + _dispatch_workloop_attributes_dispose(dwl); + _dispatch_queue_dispose(dwl, allow_free); } -DISPATCH_NOINLINE void -_dispatch_global_queue_poke(dispatch_queue_t dq, int n, int floor) -{ - if (!_dispatch_queue_class_probe(dq)) { - return; - } -#if DISPATCH_USE_WORKQUEUES - dispatch_root_queue_context_t qc = dq->do_ctxt; - if ( -#if DISPATCH_USE_PTHREAD_POOL - (qc->dgq_kworkqueue != (void*)(~0ul)) && -#endif - !os_atomic_cmpxchg2o(qc, dgq_pending, 0, n, relaxed)) { - _dispatch_root_queue_debug("worker thread request still pending for " - "global queue: %p", dq); +_dispatch_workloop_activate(dispatch_workloop_t dwl) +{ + uint64_t dq_state = os_atomic_and_orig2o(dwl, dq_state, + ~DISPATCH_QUEUE_INACTIVE, relaxed); + + if (likely(dq_state & DISPATCH_QUEUE_INACTIVE)) { + if (dwl->dwl_attr) { + // Activation of a workloop with attributes forces us to create + // the workloop up front and register the attributes with the + // kernel. + _dispatch_workloop_activate_attributes(dwl); + } + if (!dwl->dq_priority) { + dwl->dq_priority = + _dispatch_priority_make_fallback(DISPATCH_QOS_DEFAULT); + } + dwl->dq_priority |= DISPATCH_PRIORITY_FLAG_OVERCOMMIT; + os_atomic_and2o(dwl, dq_state, ~DISPATCH_QUEUE_NEEDS_ACTIVATION, + relaxed); + _dispatch_workloop_wakeup(dwl, 0, DISPATCH_WAKEUP_CONSUME_2); return; } -#endif // DISPATCH_USE_WORKQUEUES - return _dispatch_global_queue_poke_slow(dq, n, floor); } -#pragma mark - -#pragma mark dispatch_queue_drain - -void -_dispatch_continuation_pop(dispatch_object_t dou, dispatch_invoke_context_t dic, - dispatch_invoke_flags_t flags, dispatch_queue_t dq) +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_workloop_try_lower_max_qos(dispatch_workloop_t dwl, + dispatch_qos_t qos) { - _dispatch_continuation_pop_inline(dou, dic, flags, dq); -} + uint64_t old_state, new_state, qos_bits = _dq_state_from_qos(qos); -void -_dispatch_continuation_invoke(dispatch_object_t dou, voucher_t ov, - dispatch_invoke_flags_t flags) -{ - _dispatch_continuation_invoke_inline(dou, ov, flags); -} + os_atomic_rmw_loop2o(dwl, dq_state, old_state, new_state, relaxed, { + if ((old_state & DISPATCH_QUEUE_MAX_QOS_MASK) <= qos_bits) { + os_atomic_rmw_loop_give_up(return true); + } -DISPATCH_NOINLINE -static void -_dispatch_return_to_kernel(void) -{ - if (unlikely(_dispatch_get_wlh() == DISPATCH_WLH_ANON)) { - _dispatch_clear_return_to_kernel(); - } else { - _dispatch_event_loop_drain(KEVENT_FLAG_IMMEDIATE); + if (unlikely(_dq_state_is_dirty(old_state))) { + os_atomic_rmw_loop_give_up({ + os_atomic_xor2o(dwl, dq_state, DISPATCH_QUEUE_DIRTY, acquire); + return false; + }); + } + + new_state = old_state; + new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; + new_state |= qos_bits; + }); + + dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); + if (likely(ddi)) { + ddi->ddi_wlh_needs_update = true; + _dispatch_return_to_kernel(); } + return true; } -void -_dispatch_poll_for_events_4launchd(void) +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_wakeup_target_t +_dispatch_workloop_invoke2(dispatch_workloop_t dwl, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, + uint64_t *owned) { -#if DISPATCH_USE_KEVENT_WORKQUEUE - if (_dispatch_get_wlh()) { - dispatch_assert(_dispatch_deferred_items_get()->ddi_wlh_servicing); - _dispatch_event_loop_drain(KEVENT_FLAG_IMMEDIATE); + dispatch_thread_frame_s dtf; + struct dispatch_object_s *dc = NULL, *next_dc; + + _dispatch_thread_frame_push(&dtf, dwl); + + for (;;) { + dispatch_qos_t qos; + for (qos = DISPATCH_QOS_MAX; qos >= DISPATCH_QOS_MIN; qos--) { + if (!_dispatch_workloop_looks_empty(dwl, qos)) break; + } + if (qos < DISPATCH_QOS_MIN) { + break; + } + if (unlikely(!_dispatch_workloop_try_lower_max_qos(dwl, qos))) { + continue; + } + dwl->dwl_drained_qos = (uint8_t)qos; + + dc = _dispatch_workloop_get_head(dwl, qos); + do { + if (_dispatch_object_is_sync_waiter(dc)) { + dic->dic_barrier_waiter_bucket = qos; + dic->dic_barrier_waiter = dc; + dwl->dwl_drained_qos = DISPATCH_QOS_UNSPECIFIED; + goto out_with_barrier_waiter; + } + next_dc = _dispatch_workloop_pop_head(dwl, qos, dc); + if (unlikely(_dispatch_needs_to_return_to_kernel())) { + _dispatch_return_to_kernel(); + } + + _dispatch_continuation_pop_inline(dc, dic, flags, dwl); + qos = dwl->dwl_drained_qos; + } while ((dc = next_dc) && (_dispatch_queue_max_qos(dwl) <= qos)); } -#endif -} -#if HAVE_PTHREAD_WORKQUEUE_NARROWING -static os_atomic(uint64_t) _dispatch_narrowing_deadlines[DISPATCH_QOS_MAX]; -#if !DISPATCH_TIME_UNIT_USES_NANOSECONDS -static uint64_t _dispatch_narrow_check_interval_cache; -#endif + *owned = (*owned & DISPATCH_QUEUE_ENQUEUED) + + DISPATCH_QUEUE_IN_BARRIER + DISPATCH_QUEUE_WIDTH_INTERVAL; + _dispatch_thread_frame_pop(&dtf); + return NULL; -DISPATCH_ALWAYS_INLINE -static inline uint64_t -_dispatch_narrow_check_interval(void) +out_with_barrier_waiter: + _dispatch_thread_frame_pop(&dtf); + return dwl->do_targetq; +} + +void +_dispatch_workloop_invoke(dispatch_workloop_t dwl, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags) { -#if DISPATCH_TIME_UNIT_USES_NANOSECONDS - return 50 * NSEC_PER_MSEC; -#else - if (_dispatch_narrow_check_interval_cache == 0) { - _dispatch_narrow_check_interval_cache = - _dispatch_time_nano2mach(50 * NSEC_PER_MSEC); - } - return _dispatch_narrow_check_interval_cache; -#endif + flags &= ~(dispatch_invoke_flags_t)DISPATCH_INVOKE_REDIRECTING_DRAIN; + flags |= DISPATCH_INVOKE_WORKLOOP_DRAIN; + _dispatch_queue_class_invoke(dwl, dic, flags, 0,_dispatch_workloop_invoke2); } DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_queue_drain_init_narrowing_check_deadline(dispatch_invoke_context_t dic, - dispatch_priority_t pri) +static bool +_dispatch_workloop_probe(dispatch_workloop_t dwl) { - if (_dispatch_priority_qos(pri) && - !(pri & DISPATCH_PRIORITY_FLAG_OVERCOMMIT)) { - dic->dic_next_narrow_check = _dispatch_approximate_time() + - _dispatch_narrow_check_interval(); + dispatch_qos_t qos; + for (qos = DISPATCH_QOS_MAX; qos >= DISPATCH_QOS_MIN; qos--) { + if (!_dispatch_workloop_looks_empty(dwl, qos)) return true; } + return false; } DISPATCH_NOINLINE -static bool -_dispatch_queue_drain_should_narrow_slow(uint64_t now, - dispatch_invoke_context_t dic) +static void +_dispatch_workloop_drain_barrier_waiter(dispatch_workloop_t dwl, + struct dispatch_object_s *dc, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags, uint64_t enqueued_bits) { - if (dic->dic_next_narrow_check != DISPATCH_THREAD_IS_NARROWING) { - pthread_priority_t pp = _dispatch_get_priority(); - dispatch_qos_t qos = _dispatch_qos_from_pp(pp); - if (unlikely(!qos || qos > countof(_dispatch_narrowing_deadlines))) { - DISPATCH_CLIENT_CRASH(pp, "Thread QoS corruption"); - } - size_t idx = qos - 1; // no entry needed for DISPATCH_QOS_UNSPECIFIED - os_atomic(uint64_t) *deadline = &_dispatch_narrowing_deadlines[idx]; - uint64_t oldval, newval = now + _dispatch_narrow_check_interval(); + dispatch_sync_context_t dsc = (dispatch_sync_context_t)dc; + uint64_t next_owner = 0, old_state, new_state; + bool has_more_work; - dic->dic_next_narrow_check = newval; - os_atomic_rmw_loop(deadline, oldval, newval, relaxed, { - if (now < oldval) { - os_atomic_rmw_loop_give_up(return false); - } - }); + next_owner = _dispatch_lock_value_from_tid(dsc->dsc_waiter); + has_more_work = (_dispatch_workloop_pop_head(dwl, qos, dc) != NULL); - if (!_pthread_workqueue_should_narrow(pp)) { - return false; - } - dic->dic_next_narrow_check = DISPATCH_THREAD_IS_NARROWING; +transfer_lock_again: + if (!has_more_work) { + has_more_work = _dispatch_workloop_probe(dwl); } - return true; -} -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_queue_drain_should_narrow(dispatch_invoke_context_t dic) -{ - uint64_t next_check = dic->dic_next_narrow_check; - if (unlikely(next_check)) { - uint64_t now = _dispatch_approximate_time(); - if (unlikely(next_check < now)) { - return _dispatch_queue_drain_should_narrow_slow(now, dic); + os_atomic_rmw_loop2o(dwl, dq_state, old_state, new_state, release, { + new_state = old_state; + new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; + new_state &= ~DISPATCH_QUEUE_DIRTY; + new_state |= next_owner; + + if (likely(_dq_state_is_base_wlh(old_state))) { + new_state |= DISPATCH_QUEUE_SYNC_TRANSFER; + if (has_more_work) { + // we know there's a next item, keep the enqueued bit if any + } else if (unlikely(_dq_state_is_dirty(old_state))) { + os_atomic_rmw_loop_give_up({ + os_atomic_xor2o(dwl, dq_state, DISPATCH_QUEUE_DIRTY, acquire); + goto transfer_lock_again; + }); + } else { + new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; + new_state &= ~DISPATCH_QUEUE_ENQUEUED; + } + } else { + new_state -= enqueued_bits; } - } - return false; + }); + + return _dispatch_barrier_waiter_redirect_or_wake(dwl, dc, flags, + old_state, new_state); } -#else -#define _dispatch_queue_drain_init_narrowing_check_deadline(rq, dic) ((void)0) -#define _dispatch_queue_drain_should_narrow(dic) false -#endif -/* - * Drain comes in 2 flavours (serial/concurrent) and 2 modes - * (redirecting or not). - * - * Serial - * ~~~~~~ - * Serial drain is about serial queues (width == 1). It doesn't support - * the redirecting mode, which doesn't make sense, and treats all continuations - * as barriers. Bookkeeping is minimal in serial flavour, most of the loop - * is optimized away. - * - * Serial drain stops if the width of the queue grows to larger than 1. - * Going through a serial drain prevents any recursive drain from being - * redirecting. - * - * Concurrent - * ~~~~~~~~~~ - * When in non-redirecting mode (meaning one of the target queues is serial), - * non-barriers and barriers alike run in the context of the drain thread. - * Slow non-barrier items are still all signaled so that they can make progress - * toward the dispatch_sync() that will serialize them all . - * - * In redirecting mode, non-barrier work items are redirected downward. - * - * Concurrent drain stops if the width of the queue becomes 1, so that the - * queue drain moves to the more efficient serial mode. - */ -DISPATCH_ALWAYS_INLINE -static dispatch_queue_wakeup_target_t -_dispatch_queue_drain(dispatch_queue_t dq, dispatch_invoke_context_t dic, - dispatch_invoke_flags_t flags, uint64_t *owned_ptr, bool serial_drain) +static void +_dispatch_workloop_barrier_complete(dispatch_workloop_t dwl, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags) { - dispatch_queue_t orig_tq = dq->do_targetq; - dispatch_thread_frame_s dtf; - struct dispatch_object_s *dc = NULL, *next_dc; - uint64_t dq_state, owned = *owned_ptr; + dispatch_queue_wakeup_target_t target = DISPATCH_QUEUE_WAKEUP_NONE; + dispatch_qos_t wl_qos; - if (unlikely(!dq->dq_items_tail)) return NULL; +again: + for (wl_qos = DISPATCH_QOS_MAX; wl_qos >= DISPATCH_QOS_MIN; wl_qos--) { + struct dispatch_object_s *dc; - _dispatch_thread_frame_push(&dtf, dq); - if (serial_drain || _dq_state_is_in_barrier(owned)) { - // we really own `IN_BARRIER + dq->dq_width * WIDTH_INTERVAL` - // but width can change while draining barrier work items, so we only - // convert to `dq->dq_width * WIDTH_INTERVAL` when we drop `IN_BARRIER` - owned = DISPATCH_QUEUE_IN_BARRIER; - } else { - owned &= DISPATCH_QUEUE_WIDTH_MASK; + if (_dispatch_workloop_looks_empty(dwl, wl_qos)) continue; + dc = _dispatch_workloop_get_head(dwl, wl_qos); + + if (_dispatch_object_is_waiter(dc)) { + return _dispatch_workloop_drain_barrier_waiter(dwl, dc, wl_qos, + flags, 0); + } + + // We have work to do, we need to wake up + target = DISPATCH_QUEUE_WAKEUP_TARGET; } - dc = _dispatch_queue_head(dq); - goto first_iteration; + if (unlikely(target && !(flags & DISPATCH_WAKEUP_CONSUME_2))) { + _dispatch_retain_2(dwl); + flags |= DISPATCH_WAKEUP_CONSUME_2; + } - for (;;) { - dc = next_dc; - if (unlikely(dic->dic_deferred)) { - goto out_with_deferred_compute_owned; - } - if (unlikely(_dispatch_needs_to_return_to_kernel())) { - _dispatch_return_to_kernel(); - } - if (unlikely(!dc)) { - if (!dq->dq_items_tail) { - break; - } - dc = _dispatch_queue_head(dq); - } - if (unlikely(serial_drain != (dq->dq_width == 1))) { - break; - } - if (unlikely(_dispatch_queue_drain_should_narrow(dic))) { - break; - } + uint64_t old_state, new_state; -first_iteration: - dq_state = os_atomic_load(&dq->dq_state, relaxed); - if (unlikely(_dq_state_is_suspended(dq_state))) { - break; - } - if (unlikely(orig_tq != dq->do_targetq)) { - break; - } - - if (serial_drain || _dispatch_object_is_barrier(dc)) { - if (!serial_drain && owned != DISPATCH_QUEUE_IN_BARRIER) { - if (!_dispatch_queue_try_upgrade_full_width(dq, owned)) { - goto out_with_no_width; - } - owned = DISPATCH_QUEUE_IN_BARRIER; - } - next_dc = _dispatch_queue_next(dq, dc); - if (_dispatch_object_is_sync_waiter(dc)) { - owned = 0; - dic->dic_deferred = dc; - goto out_with_deferred; - } - } else { - if (owned == DISPATCH_QUEUE_IN_BARRIER) { - // we just ran barrier work items, we have to make their - // effect visible to other sync work items on other threads - // that may start coming in after this point, hence the - // release barrier - os_atomic_xor2o(dq, dq_state, owned, release); - owned = dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; - } else if (unlikely(owned == 0)) { - if (_dispatch_object_is_sync_waiter(dc)) { - // sync "readers" don't observe the limit - _dispatch_queue_reserve_sync_width(dq); - } else if (!_dispatch_queue_try_acquire_async(dq)) { - goto out_with_no_width; - } - owned = DISPATCH_QUEUE_WIDTH_INTERVAL; - } - - next_dc = _dispatch_queue_next(dq, dc); - if (_dispatch_object_is_sync_waiter(dc)) { - owned -= DISPATCH_QUEUE_WIDTH_INTERVAL; - _dispatch_sync_waiter_redirect_or_wake(dq, - DISPATCH_SYNC_WAITER_NO_UNLOCK, dc); - continue; - } - - if (flags & DISPATCH_INVOKE_REDIRECTING_DRAIN) { - owned -= DISPATCH_QUEUE_WIDTH_INTERVAL; - _dispatch_continuation_redirect(dq, dc); - continue; - } + os_atomic_rmw_loop2o(dwl, dq_state, old_state, new_state, release, { + new_state = _dq_state_merge_qos(old_state, qos); + new_state -= DISPATCH_QUEUE_IN_BARRIER; + new_state -= DISPATCH_QUEUE_WIDTH_INTERVAL; + new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; + if (target) { + new_state |= DISPATCH_QUEUE_ENQUEUED; + } else if (unlikely(_dq_state_is_dirty(old_state))) { + os_atomic_rmw_loop_give_up({ + // just renew the drain lock with an acquire barrier, to see + // what the enqueuer that set DIRTY has done. + // the xor generates better assembly as DISPATCH_QUEUE_DIRTY + // is already in a register + os_atomic_xor2o(dwl, dq_state, DISPATCH_QUEUE_DIRTY, acquire); + goto again; + }); + } else if (likely(_dq_state_is_base_wlh(old_state))) { + new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; + new_state &= ~DISPATCH_QUEUE_ENQUEUED; + } else { + new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; } + }); + dispatch_assert(_dq_state_drain_locked_by_self(old_state)); + dispatch_assert(!_dq_state_is_enqueued_on_manager(old_state)); - _dispatch_continuation_pop_inline(dc, dic, flags, dq); + if (_dq_state_is_enqueued(new_state)) { + _dispatch_trace_runtime_event(sync_async_handoff, dwl, 0); } - if (owned == DISPATCH_QUEUE_IN_BARRIER) { - // if we're IN_BARRIER we really own the full width too - owned += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; - } - if (dc) { - owned = _dispatch_queue_adjust_owned(dq, owned, dc); - } - *owned_ptr &= DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_ENQUEUED_ON_MGR; - *owned_ptr |= owned; - _dispatch_thread_frame_pop(&dtf); - return dc ? dq->do_targetq : NULL; -out_with_no_width: - *owned_ptr &= DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_ENQUEUED_ON_MGR; - _dispatch_thread_frame_pop(&dtf); - return DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT; + if (_dq_state_received_override(old_state)) { + // Ensure that the root queue sees that this thread was overridden. + _dispatch_set_basepri_override_qos(_dq_state_max_qos(old_state)); + } -out_with_deferred_compute_owned: - if (serial_drain) { - owned = DISPATCH_QUEUE_IN_BARRIER + DISPATCH_QUEUE_WIDTH_INTERVAL; - } else { - if (owned == DISPATCH_QUEUE_IN_BARRIER) { - // if we're IN_BARRIER we really own the full width too - owned += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; + if (target) { + if (likely((old_state ^ new_state) & DISPATCH_QUEUE_ENQUEUED)) { + dispatch_assert(_dq_state_is_enqueued(new_state)); + dispatch_assert(flags & DISPATCH_WAKEUP_CONSUME_2); + return _dispatch_queue_push_queue(dwl->do_targetq, dwl, new_state); } - if (dc) { - owned = _dispatch_queue_adjust_owned(dq, owned, dc); +#if HAVE_PTHREAD_WORKQUEUE_QOS + // when doing sync to async handoff + // if the queue received an override we have to forecefully redrive + // the same override so that a new stealer is enqueued because + // the previous one may be gone already + if (_dq_state_should_override(new_state)) { + return _dispatch_queue_wakeup_with_override(dwl, new_state, flags); } +#endif } -out_with_deferred: - *owned_ptr &= DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_ENQUEUED_ON_MGR; - *owned_ptr |= owned; - if (unlikely(flags & DISPATCH_INVOKE_DISALLOW_SYNC_WAITERS)) { - DISPATCH_INTERNAL_CRASH(dc, - "Deferred continuation on source, mach channel or mgr"); - } - _dispatch_thread_frame_pop(&dtf); - return dq->do_targetq; -} -DISPATCH_NOINLINE -static dispatch_queue_wakeup_target_t -_dispatch_queue_concurrent_drain(dispatch_queue_t dq, - dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, - uint64_t *owned) -{ - return _dispatch_queue_drain(dq, dic, flags, owned, false); + if (flags & DISPATCH_WAKEUP_CONSUME_2) { + return _dispatch_release_2_tailcall(dwl); + } } -DISPATCH_NOINLINE -dispatch_queue_wakeup_target_t -_dispatch_queue_serial_drain(dispatch_queue_t dq, dispatch_invoke_context_t dic, - dispatch_invoke_flags_t flags, uint64_t *owned) +#if HAVE_PTHREAD_WORKQUEUE_QOS +static void +_dispatch_workloop_stealer_invoke(dispatch_continuation_t dc, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags) { - flags &= ~(dispatch_invoke_flags_t)DISPATCH_INVOKE_REDIRECTING_DRAIN; - return _dispatch_queue_drain(dq, dic, flags, owned, true); + uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_NO_INTROSPECTION; + _dispatch_continuation_pop_forwarded(dc, dc_flags, NULL, { + dispatch_queue_t dq = dc->dc_data; + dx_invoke(dq, dic, flags | DISPATCH_INVOKE_STEALING); + }); } -#if DISPATCH_COCOA_COMPAT DISPATCH_NOINLINE static void -_dispatch_main_queue_update_priority_from_thread(void) +_dispatch_workloop_push_stealer(dispatch_workloop_t dwl, dispatch_queue_t dq, + dispatch_qos_t qos) { - dispatch_queue_t dq = &_dispatch_main_q; - uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); - mach_port_t owner = _dq_state_drain_owner(dq_state); + dispatch_continuation_t dc = _dispatch_continuation_alloc(); - dispatch_priority_t main_pri = - _dispatch_priority_from_pp_strip_flags(_dispatch_get_priority()); - dispatch_qos_t main_qos = _dispatch_priority_qos(main_pri); - dispatch_qos_t max_qos = _dq_state_max_qos(dq_state); - dispatch_qos_t old_qos = _dispatch_priority_qos(dq->dq_priority); + dc->do_vtable = DC_VTABLE(WORKLOOP_STEALING); + _dispatch_retain_2(dq); + dc->dc_func = NULL; + dc->dc_ctxt = dc; + dc->dc_other = NULL; + dc->dc_data = dq; + dc->dc_priority = DISPATCH_NO_PRIORITY; + dc->dc_voucher = DISPATCH_NO_VOUCHER; + _dispatch_workloop_push(dwl, dc, qos); +} +#endif // HAVE_PTHREAD_WORKQUEUE_QOS - // the main thread QoS was adjusted by someone else, learn the new QoS - // and reinitialize _dispatch_main_q.dq_priority - dq->dq_priority = _dispatch_priority_with_override_qos(main_pri, main_qos); +void +_dispatch_workloop_wakeup(dispatch_workloop_t dwl, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags) +{ + if (unlikely(flags & DISPATCH_WAKEUP_BARRIER_COMPLETE)) { + return _dispatch_workloop_barrier_complete(dwl, qos, flags); + } - if (old_qos < max_qos && main_qos == DISPATCH_QOS_UNSPECIFIED) { - // main thread is opted out of QoS and we had an override - return _dispatch_thread_override_end(owner, dq); + if (unlikely(!(flags & DISPATCH_WAKEUP_CONSUME_2))) { + DISPATCH_INTERNAL_CRASH(flags, "Invalid way to wake up a workloop"); } - if (old_qos < max_qos && max_qos <= main_qos) { - // main QoS was raised, and we had an override which is now useless - return _dispatch_thread_override_end(owner, dq); + if (unlikely(flags & DISPATCH_WAKEUP_BLOCK_WAIT)) { + goto done; } - if (main_qos < max_qos && max_qos <= old_qos) { - // main thread QoS was lowered, and we actually need an override - pthread_priority_t pp = _dispatch_qos_to_pp(max_qos); - return _dispatch_thread_override_start(owner, pp, dq); + uint64_t old_state, new_state; + + os_atomic_rmw_loop2o(dwl, dq_state, old_state, new_state, release, { + new_state = _dq_state_merge_qos(old_state, qos); + if (_dq_state_max_qos(new_state)) { + new_state |= DISPATCH_QUEUE_ENQUEUED; + } + if (flags & DISPATCH_WAKEUP_MAKE_DIRTY) { + new_state |= DISPATCH_QUEUE_DIRTY; + } else if (new_state == old_state) { + os_atomic_rmw_loop_give_up(goto done); + } + }); + + if (unlikely(_dq_state_is_suspended(old_state))) { +#ifndef __LP64__ + old_state >>= 32; +#endif + DISPATCH_CLIENT_CRASH(old_state, "Waking up an inactive workloop"); + } + if (likely((old_state ^ new_state) & DISPATCH_QUEUE_ENQUEUED)) { + return _dispatch_queue_push_queue(dwl->do_targetq, dwl, new_state); + } +#if HAVE_PTHREAD_WORKQUEUE_QOS + if (likely((old_state ^ new_state) & DISPATCH_QUEUE_MAX_QOS_MASK)) { + return _dispatch_queue_wakeup_with_override(dwl, new_state, flags); } +#endif // HAVE_PTHREAD_WORKQUEUE_QOS +done: + return _dispatch_release_2_tailcall(dwl); } +DISPATCH_NOINLINE static void -_dispatch_main_queue_drain(void) +_dispatch_workloop_push_waiter(dispatch_workloop_t dwl, + dispatch_sync_context_t dsc, dispatch_qos_t qos) { - dispatch_queue_t dq = &_dispatch_main_q; - dispatch_thread_frame_s dtf; - - if (!dq->dq_items_tail) { - return; - } + struct dispatch_object_s *prev, *dc = (struct dispatch_object_s *)dsc; - _dispatch_perfmon_start_notrace(); - if (!fastpath(_dispatch_queue_is_thread_bound(dq))) { - DISPATCH_CLIENT_CRASH(0, "_dispatch_main_queue_callback_4CF called" - " after dispatch_main()"); + dispatch_priority_t p = _dispatch_priority_from_pp(dsc->dc_priority); + if (qos < _dispatch_priority_qos(p)) { + qos = _dispatch_priority_qos(p); } - uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); - if (unlikely(!_dq_state_drain_locked_by_self(dq_state))) { - DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, - "_dispatch_main_queue_callback_4CF called" - " from the wrong thread"); + if (qos == DISPATCH_QOS_UNSPECIFIED) { + qos = DISPATCH_QOS_DEFAULT; } - dispatch_once_f(&_dispatch_main_q_handle_pred, dq, - _dispatch_runloop_queue_handle_init); - - // hide the frame chaining when CFRunLoop - // drains the main runloop, as this should not be observable that way - _dispatch_adopt_wlh_anon(); - _dispatch_thread_frame_push_and_rebase(&dtf, dq, NULL); - - pthread_priority_t pp = _dispatch_get_priority(); - dispatch_priority_t pri = _dispatch_priority_from_pp(pp); - dispatch_qos_t qos = _dispatch_priority_qos(pri); - voucher_t voucher = _voucher_copy(); + prev = _dispatch_workloop_push_update_tail(dwl, qos, dc); + _dispatch_workloop_push_update_prev(dwl, qos, prev, dc); + if (likely(!os_mpsc_push_was_empty(prev))) return; - if (unlikely(qos != _dispatch_priority_qos(dq->dq_priority))) { - _dispatch_main_queue_update_priority_from_thread(); - } - dispatch_priority_t old_dbp = _dispatch_set_basepri(pri); - _dispatch_set_basepri_override_qos(DISPATCH_QOS_SATURATED); + uint64_t set_owner_and_set_full_width_and_in_barrier = + _dispatch_lock_value_for_self() | + DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER; + uint64_t old_state, new_state; - dispatch_invoke_context_s dic = { }; - struct dispatch_object_s *dc, *next_dc, *tail; - dc = os_mpsc_capture_snapshot(dq, dq_items, &tail); - do { - next_dc = os_mpsc_pop_snapshot_head(dc, tail, do_next); - _dispatch_continuation_pop_inline(dc, &dic, DISPATCH_INVOKE_NONE, dq); - } while ((dc = next_dc)); + os_atomic_rmw_loop2o(dwl, dq_state, old_state, new_state, release, { + new_state = _dq_state_merge_qos(old_state, qos); + new_state |= DISPATCH_QUEUE_DIRTY; + if (unlikely(_dq_state_drain_locked(old_state))) { + // not runnable, so we should just handle overrides + } else if (_dq_state_is_enqueued(old_state)) { + // 32123779 let the event thread redrive since it's out already + } else { + // see _dispatch_queue_drain_try_lock + new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK; + new_state |= set_owner_and_set_full_width_and_in_barrier; + } + }); - dx_wakeup(dq, 0, 0); - _dispatch_voucher_debug("main queue restore", voucher); - _dispatch_reset_basepri(old_dbp); - _dispatch_reset_basepri_override(); - _dispatch_reset_priority_and_voucher(pp, voucher); - _dispatch_thread_frame_pop(&dtf); - _dispatch_reset_wlh(); - _dispatch_force_cache_cleanup(); - _dispatch_perfmon_end_notrace(); -} + dsc->dsc_wlh_was_first = (dsc->dsc_waiter == _dispatch_tid_self()); -static bool -_dispatch_runloop_queue_drain_one(dispatch_queue_t dq) -{ - if (!dq->dq_items_tail) { - return false; + if ((old_state ^ new_state) & DISPATCH_QUEUE_IN_BARRIER) { + return _dispatch_workloop_barrier_complete(dwl, qos, 0); } - _dispatch_perfmon_start_notrace(); - dispatch_thread_frame_s dtf; - bool should_reset_wlh = _dispatch_adopt_wlh_anon_recurse(); - _dispatch_thread_frame_push(&dtf, dq); - pthread_priority_t pp = _dispatch_get_priority(); - dispatch_priority_t pri = _dispatch_priority_from_pp(pp); - voucher_t voucher = _voucher_copy(); - dispatch_priority_t old_dbp = _dispatch_set_basepri(pri); - _dispatch_set_basepri_override_qos(DISPATCH_QOS_SATURATED); - - dispatch_invoke_context_s dic = { }; - struct dispatch_object_s *dc, *next_dc; - dc = _dispatch_queue_head(dq); - next_dc = _dispatch_queue_next(dq, dc); - _dispatch_continuation_pop_inline(dc, &dic, DISPATCH_INVOKE_NONE, dq); - - if (!next_dc) { - dx_wakeup(dq, 0, 0); +#if HAVE_PTHREAD_WORKQUEUE_QOS + if (unlikely((old_state ^ new_state) & DISPATCH_QUEUE_MAX_QOS_MASK)) { + if (_dq_state_should_override(new_state)) { + return _dispatch_queue_wakeup_with_override(dwl, new_state, 0); + } } - - _dispatch_voucher_debug("runloop queue restore", voucher); - _dispatch_reset_basepri(old_dbp); - _dispatch_reset_basepri_override(); - _dispatch_reset_priority_and_voucher(pp, voucher); - _dispatch_thread_frame_pop(&dtf); - if (should_reset_wlh) _dispatch_reset_wlh(); - _dispatch_force_cache_cleanup(); - _dispatch_perfmon_end_notrace(); - return next_dc; +#endif // HAVE_PTHREAD_WORKQUEUE_QOS } -#endif void -_dispatch_mgr_queue_drain(void) +_dispatch_workloop_push(dispatch_workloop_t dwl, dispatch_object_t dou, + dispatch_qos_t qos) { - const dispatch_invoke_flags_t flags = DISPATCH_INVOKE_MANAGER_DRAIN; - dispatch_invoke_context_s dic = { }; - dispatch_queue_t dq = &_dispatch_mgr_q; - uint64_t owned = DISPATCH_QUEUE_SERIAL_DRAIN_OWNED; + struct dispatch_object_s *prev; - if (dq->dq_items_tail) { - _dispatch_perfmon_start(); - _dispatch_set_basepri_override_qos(DISPATCH_QOS_SATURATED); - if (slowpath(_dispatch_queue_serial_drain(dq, &dic, flags, &owned))) { - DISPATCH_INTERNAL_CRASH(0, "Interrupted drain on manager queue"); - } - _dispatch_voucher_debug("mgr queue clear", NULL); - _voucher_clear(); - _dispatch_reset_basepri_override(); - _dispatch_perfmon_end(perfmon_thread_manager); + if (unlikely(_dispatch_object_is_waiter(dou))) { + return _dispatch_workloop_push_waiter(dwl, dou._dsc, qos); } -#if DISPATCH_USE_KEVENT_WORKQUEUE - if (!_dispatch_kevent_workqueue_enabled) -#endif - { - _dispatch_force_cache_cleanup(); + if (qos < _dispatch_priority_qos(dwl->dq_priority)) { + qos = _dispatch_priority_qos(dwl->dq_priority); } -} - -#pragma mark - -#pragma mark dispatch_queue_invoke - -void -_dispatch_queue_drain_sync_waiter(dispatch_queue_t dq, - dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, - uint64_t owned) -{ - struct dispatch_object_s *dc = dic->dic_deferred; - dispatch_assert(_dispatch_object_is_sync_waiter(dc)); - dic->dic_deferred = NULL; - if (flags & DISPATCH_INVOKE_WLH) { - // Leave the enqueued bit in place, completion of the last sync waiter - // in the handoff chain is responsible for dequeuing - // - // We currently have a +2 to consume, but we need to keep a +1 - // for the thread request - dispatch_assert(_dq_state_is_enqueued_on_target(owned)); - dispatch_assert(!_dq_state_is_enqueued_on_manager(owned)); - owned &= ~DISPATCH_QUEUE_ENQUEUED; - _dispatch_release_no_dispose(dq); - } else { - // The sync waiter must own a reference - _dispatch_release_2_no_dispose(dq); + if (qos == DISPATCH_QOS_UNSPECIFIED) { + qos = _dispatch_priority_fallback_qos(dwl->dq_priority); } - return _dispatch_sync_waiter_redirect_or_wake(dq, owned, dc); -} - -void -_dispatch_queue_finalize_activation(dispatch_queue_t dq, - DISPATCH_UNUSED bool *allow_resume) -{ - dispatch_queue_t tq = dq->do_targetq; - _dispatch_queue_priority_inherit_from_target(dq, tq); - _dispatch_queue_inherit_wlh_from_target(dq, tq); -} - -DISPATCH_ALWAYS_INLINE -static inline dispatch_queue_wakeup_target_t -dispatch_queue_invoke2(dispatch_queue_t dq, dispatch_invoke_context_t dic, - dispatch_invoke_flags_t flags, uint64_t *owned) -{ - dispatch_queue_t otq = dq->do_targetq; - dispatch_queue_t cq = _dispatch_queue_get_current(); - - if (slowpath(cq != otq)) { - return otq; + prev = _dispatch_workloop_push_update_tail(dwl, qos, dou._do); + if (unlikely(os_mpsc_push_was_empty(prev))) { + _dispatch_retain_2_unsafe(dwl); } - if (dq->dq_width == 1) { - return _dispatch_queue_serial_drain(dq, dic, flags, owned); + _dispatch_workloop_push_update_prev(dwl, qos, prev, dou._do); + if (unlikely(os_mpsc_push_was_empty(prev))) { + return _dispatch_workloop_wakeup(dwl, qos, DISPATCH_WAKEUP_CONSUME_2 | + DISPATCH_WAKEUP_MAKE_DIRTY); } - return _dispatch_queue_concurrent_drain(dq, dic, flags, owned); -} - -// 6618342 Contact the team that owns the Instrument DTrace probe before -// renaming this symbol -DISPATCH_NOINLINE -void -_dispatch_queue_invoke(dispatch_queue_t dq, dispatch_invoke_context_t dic, - dispatch_invoke_flags_t flags) -{ - _dispatch_queue_class_invoke(dq, dic, flags, 0, dispatch_queue_invoke2); } #pragma mark - -#pragma mark dispatch_queue_class_wakeup +#pragma mark dispatch queue/lane push & wakeup #if HAVE_PTHREAD_WORKQUEUE_QOS -void +static void _dispatch_queue_override_invoke(dispatch_continuation_t dc, dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags) { dispatch_queue_t old_rq = _dispatch_queue_get_current(); - dispatch_queue_t assumed_rq = dc->dc_other; + dispatch_queue_global_t assumed_rq = dc->dc_other; dispatch_priority_t old_dp; - voucher_t ov = DISPATCH_NO_VOUCHER; dispatch_object_t dou; + uintptr_t dc_flags = DC_FLAG_CONSUME; dou._do = dc->dc_data; old_dp = _dispatch_root_queue_identity_assume(assumed_rq); if (dc_type(dc) == DISPATCH_CONTINUATION_TYPE(OVERRIDE_STEALING)) { flags |= DISPATCH_INVOKE_STEALING; - } else { - // balance the fake continuation push in - // _dispatch_root_queue_push_override - _dispatch_trace_continuation_pop(assumed_rq, dou._do); + dc_flags |= DC_FLAG_NO_INTROSPECTION; } - _dispatch_continuation_pop_forwarded(dc, ov, DISPATCH_OBJ_CONSUME_BIT, { + _dispatch_continuation_pop_forwarded(dc, dc_flags, assumed_rq, { if (_dispatch_object_has_vtable(dou._do)) { - dx_invoke(dou._do, dic, flags); + dx_invoke(dou._dq, dic, flags); } else { - _dispatch_continuation_invoke_inline(dou, ov, flags); + _dispatch_continuation_invoke_inline(dou, flags, assumed_rq); } }); _dispatch_reset_basepri(old_dp); @@ -5227,33 +4527,25 @@ _dispatch_queue_override_invoke(dispatch_continuation_t dc, DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_root_queue_push_needs_override(dispatch_queue_t rq, +_dispatch_root_queue_push_needs_override(dispatch_queue_global_t rq, dispatch_qos_t qos) { - dispatch_qos_t rqos = _dispatch_priority_qos(rq->dq_priority); - bool defaultqueue = rq->dq_priority & DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE; - - if (unlikely(!rqos)) return false; - - return defaultqueue ? qos && qos != rqos : qos > rqos; -} + dispatch_qos_t fallback = _dispatch_priority_fallback_qos(rq->dq_priority); + if (fallback) { + return qos && qos != fallback; + } -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_root_queue_push_queue_override_needed(dispatch_queue_t rq, - dispatch_qos_t qos) -{ - // for root queues, the override is the guaranteed minimum override level - return qos > _dispatch_priority_override_qos(rq->dq_priority); + dispatch_qos_t rqos = _dispatch_priority_qos(rq->dq_priority); + return rqos && qos > rqos; } DISPATCH_NOINLINE static void -_dispatch_root_queue_push_override(dispatch_queue_t orig_rq, +_dispatch_root_queue_push_override(dispatch_queue_global_t orig_rq, dispatch_object_t dou, dispatch_qos_t qos) { bool overcommit = orig_rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT; - dispatch_queue_t rq = _dispatch_get_root_queue(qos, overcommit); + dispatch_queue_global_t rq = _dispatch_get_root_queue(qos, overcommit); dispatch_continuation_t dc = dou._dc; if (_dispatch_object_is_redirection(dc)) { @@ -5263,8 +4555,6 @@ _dispatch_root_queue_push_override(dispatch_queue_t orig_rq, } else { dc = _dispatch_continuation_alloc(); dc->do_vtable = DC_VTABLE(OVERRIDE_OWNING); - // fake that we queued `dou` on `orig_rq` for introspection purposes - _dispatch_trace_continuation_push(orig_rq, dou); dc->dc_ctxt = dc; dc->dc_other = orig_rq; dc->dc_data = dou._do; @@ -5276,11 +4566,11 @@ _dispatch_root_queue_push_override(dispatch_queue_t orig_rq, DISPATCH_NOINLINE static void -_dispatch_root_queue_push_override_stealer(dispatch_queue_t orig_rq, +_dispatch_root_queue_push_override_stealer(dispatch_queue_global_t orig_rq, dispatch_queue_t dq, dispatch_qos_t qos) { bool overcommit = orig_rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT; - dispatch_queue_t rq = _dispatch_get_root_queue(qos, overcommit); + dispatch_queue_global_t rq = _dispatch_get_root_queue(qos, overcommit); dispatch_continuation_t dc = _dispatch_continuation_alloc(); dc->do_vtable = DC_VTABLE(OVERRIDE_STEALING); @@ -5296,7 +4586,7 @@ _dispatch_root_queue_push_override_stealer(dispatch_queue_t orig_rq, DISPATCH_NOINLINE static void -_dispatch_queue_class_wakeup_with_override_slow(dispatch_queue_t dq, +_dispatch_queue_wakeup_with_override_slow(dispatch_queue_t dq, uint64_t dq_state, dispatch_wakeup_flags_t flags) { dispatch_qos_t oqos, qos = _dq_state_max_qos(dq_state); @@ -5314,14 +4604,14 @@ _dispatch_queue_class_wakeup_with_override_slow(dispatch_queue_t dq, tq = dq->do_targetq; - if (likely(!_dispatch_queue_is_legacy(dq))) { + if (likely(!_dispatch_queue_is_mutable(dq))) { locked = false; } else if (_dispatch_is_in_root_queues_array(tq)) { // avoid locking when we recognize the target queue as a global root // queue it is gross, but is a very common case. The locking isn't // needed because these target queues cannot go away. locked = false; - } else if (_dispatch_queue_sidelock_trylock(dq, qos)) { + } else if (_dispatch_queue_sidelock_trylock(upcast(dq)._dl, qos)) { // to traverse the tq chain safely we must // lock it to ensure it cannot change locked = true; @@ -5357,7 +4647,7 @@ _dispatch_queue_class_wakeup_with_override_slow(dispatch_queue_t dq, // This drainer must have seen the effects of (2) and that guy has // applied our override. Our job is done. // - // - Another instance of _dispatch_queue_class_wakeup_with_override(), + // - Another instance of _dispatch_queue_wakeup_with_override_slow(), // which is fine because trylock leaves a hint that we failed our // trylock, causing the tryunlock below to fail and reassess whether // a better override needs to be applied. @@ -5368,20 +4658,26 @@ _dispatch_queue_class_wakeup_with_override_slow(dispatch_queue_t dq, apply_again: if (dx_hastypeflag(tq, QUEUE_ROOT)) { - if (_dispatch_root_queue_push_queue_override_needed(tq, qos)) { - _dispatch_root_queue_push_override_stealer(tq, dq, qos); + dispatch_queue_global_t rq = upcast(tq)._dgq; + if (qos > _dispatch_priority_qos(rq->dq_priority)) { + _dispatch_root_queue_push_override_stealer(rq, dq, qos); } + } else if (dx_metatype(tq) == _DISPATCH_WORKLOOP_TYPE) { + _dispatch_workloop_push_stealer(upcast(tq)._dwl, dq, qos); } else if (_dispatch_queue_need_override(tq, qos)) { dx_wakeup(tq, qos, 0); } - while (unlikely(locked && !_dispatch_queue_sidelock_tryunlock(dq))) { + if (likely(!locked)) { + goto out; + } + while (unlikely(!_dispatch_queue_sidelock_tryunlock(upcast(dq)._dl))) { // rdar://problem/24081326 // - // Another instance of _dispatch_queue_class_wakeup_with_override() - // tried to acquire the side lock while we were running, and could have + // Another instance of _dispatch_queue_wakeup_with_override() tried + // to acquire the side lock while we were running, and could have // had a better override than ours to apply. // - oqos = _dq_state_max_qos(os_atomic_load2o(dq, dq_state, relaxed)); + oqos = _dispatch_queue_max_qos(dq); if (oqos > qos) { qos = oqos; // The other instance had a better priority than ours, override @@ -5397,87 +4693,23 @@ _dispatch_queue_class_wakeup_with_override_slow(dispatch_queue_t dq, } } - DISPATCH_ALWAYS_INLINE static inline void -_dispatch_queue_class_wakeup_with_override(dispatch_queue_t dq, +_dispatch_queue_wakeup_with_override(dispatch_queue_class_t dq, uint64_t dq_state, dispatch_wakeup_flags_t flags) { dispatch_assert(_dq_state_should_override(dq_state)); - return _dispatch_queue_class_wakeup_with_override_slow(dq, dq_state, flags); + return _dispatch_queue_wakeup_with_override_slow(dq._dq, dq_state, flags); } #endif // HAVE_PTHREAD_WORKQUEUE_QOS DISPATCH_NOINLINE void -_dispatch_root_queue_push(dispatch_queue_t rq, dispatch_object_t dou, - dispatch_qos_t qos) -{ -#if DISPATCH_USE_KEVENT_WORKQUEUE - dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); - if (unlikely(ddi && ddi->ddi_can_stash)) { - dispatch_object_t old_dou = ddi->ddi_stashed_dou; - dispatch_priority_t rq_overcommit; - rq_overcommit = rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT; - - if (likely(!old_dou._do || rq_overcommit)) { - dispatch_queue_t old_rq = ddi->ddi_stashed_rq; - dispatch_qos_t old_qos = ddi->ddi_stashed_qos; - ddi->ddi_stashed_rq = rq; - ddi->ddi_stashed_dou = dou; - ddi->ddi_stashed_qos = qos; - _dispatch_debug("deferring item %p, rq %p, qos %d", - dou._do, rq, qos); - if (rq_overcommit) { - ddi->ddi_can_stash = false; - } - if (likely(!old_dou._do)) { - return; - } - // push the previously stashed item - qos = old_qos; - rq = old_rq; - dou = old_dou; - } - } -#endif -#if HAVE_PTHREAD_WORKQUEUE_QOS - if (_dispatch_root_queue_push_needs_override(rq, qos)) { - return _dispatch_root_queue_push_override(rq, dou, qos); - } -#else - (void)qos; -#endif - _dispatch_root_queue_push_inline(rq, dou, dou, 1); -} - -void -_dispatch_root_queue_wakeup(dispatch_queue_t dq, - DISPATCH_UNUSED dispatch_qos_t qos, dispatch_wakeup_flags_t flags) -{ - if (!(flags & DISPATCH_WAKEUP_BLOCK_WAIT)) { - DISPATCH_INTERNAL_CRASH(dq->dq_priority, - "Don't try to wake up or override a root queue"); - } - if (flags & DISPATCH_WAKEUP_CONSUME_2) { - return _dispatch_release_2_tailcall(dq); - } -} - -DISPATCH_NOINLINE -void -_dispatch_queue_push(dispatch_queue_t dq, dispatch_object_t dou, - dispatch_qos_t qos) -{ - _dispatch_queue_push_inline(dq, dou, qos); -} - -DISPATCH_NOINLINE -void -_dispatch_queue_class_wakeup(dispatch_queue_t dq, dispatch_qos_t qos, +_dispatch_queue_wakeup(dispatch_queue_class_t dqu, dispatch_qos_t qos, dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target) { + dispatch_queue_t dq = dqu._dq; dispatch_assert(target != DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT); if (target && !(flags & DISPATCH_WAKEUP_CONSUME_2)) { @@ -5487,19 +4719,19 @@ _dispatch_queue_class_wakeup(dispatch_queue_t dq, dispatch_qos_t qos, if (unlikely(flags & DISPATCH_WAKEUP_BARRIER_COMPLETE)) { // - // _dispatch_queue_class_barrier_complete() is about what both regular + // _dispatch_lane_class_barrier_complete() is about what both regular // queues and sources needs to evaluate, but the former can have sync - // handoffs to perform which _dispatch_queue_class_barrier_complete() - // doesn't handle, only _dispatch_queue_barrier_complete() does. + // handoffs to perform which _dispatch_lane_class_barrier_complete() + // doesn't handle, only _dispatch_lane_barrier_complete() does. // - // _dispatch_queue_wakeup() is the one for plain queues that calls - // _dispatch_queue_barrier_complete(), and this is only taken for non + // _dispatch_lane_wakeup() is the one for plain queues that calls + // _dispatch_lane_barrier_complete(), and this is only taken for non // queue types. // - dispatch_assert(dx_metatype(dq) != _DISPATCH_QUEUE_TYPE); - qos = _dispatch_queue_override_qos(dq, qos); - return _dispatch_queue_class_barrier_complete(dq, qos, flags, target, - DISPATCH_QUEUE_SERIAL_DRAIN_OWNED); + dispatch_assert(dx_metatype(dq) == _DISPATCH_SOURCE_TYPE); + qos = _dispatch_queue_wakeup_qos(dq, qos); + return _dispatch_lane_class_barrier_complete(upcast(dq)._dl, qos, + flags, target, DISPATCH_QUEUE_SERIAL_DRAIN_OWNED); } if (target) { @@ -5507,7 +4739,7 @@ _dispatch_queue_class_wakeup(dispatch_queue_t dq, dispatch_qos_t qos, if (target == DISPATCH_QUEUE_WAKEUP_MGR) { enqueue = DISPATCH_QUEUE_ENQUEUED_ON_MGR; } - qos = _dispatch_queue_override_qos(dq, qos); + qos = _dispatch_queue_wakeup_qos(dq, qos); os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { new_state = _dq_state_merge_qos(old_state, qos); if (likely(!_dq_state_is_suspended(old_state) && @@ -5545,7 +4777,7 @@ _dispatch_queue_class_wakeup(dispatch_queue_t dq, dispatch_qos_t qos, #if HAVE_PTHREAD_WORKQUEUE_QOS if (unlikely((old_state ^ new_state) & DISPATCH_QUEUE_MAX_QOS_MASK)) { if (_dq_state_should_override(new_state)) { - return _dispatch_queue_class_wakeup_with_override(dq, new_state, + return _dispatch_queue_wakeup_with_override(dq, new_state, flags); } } @@ -5565,8 +4797,7 @@ _dispatch_queue_class_wakeup(dispatch_queue_t dq, dispatch_qos_t qos, } }); if (_dq_state_should_override(new_state)) { - return _dispatch_queue_class_wakeup_with_override(dq, new_state, - flags); + return _dispatch_queue_wakeup_with_override(dq, new_state, flags); } #endif // HAVE_PTHREAD_WORKQUEUE_QOS } @@ -5576,25 +4807,51 @@ _dispatch_queue_class_wakeup(dispatch_queue_t dq, dispatch_qos_t qos, } } +DISPATCH_NOINLINE +void +_dispatch_lane_wakeup(dispatch_lane_class_t dqu, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags) +{ + dispatch_queue_wakeup_target_t target = DISPATCH_QUEUE_WAKEUP_NONE; + + if (unlikely(flags & DISPATCH_WAKEUP_BARRIER_COMPLETE)) { + return _dispatch_lane_barrier_complete(dqu, qos, flags); + } + if (_dispatch_queue_class_probe(dqu)) { + target = DISPATCH_QUEUE_WAKEUP_TARGET; + } + return _dispatch_queue_wakeup(dqu, qos, flags, target); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_lane_push_waiter_should_wakeup(dispatch_lane_t dq, + dispatch_sync_context_t dsc) +{ + if (_dispatch_queue_is_thread_bound(dq)) { + return true; + } + if (dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) { + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + return _dispatch_async_and_wait_should_always_async(dq, dq_state); + } + return false; +} + DISPATCH_NOINLINE static void -_dispatch_queue_push_sync_waiter(dispatch_queue_t dq, - dispatch_sync_context_t dsc, dispatch_qos_t qos) +_dispatch_lane_push_waiter(dispatch_lane_t dq, dispatch_sync_context_t dsc, + dispatch_qos_t qos) { uint64_t old_state, new_state; - if (unlikely(dx_type(dq) == DISPATCH_QUEUE_NETWORK_EVENT_TYPE)) { - DISPATCH_CLIENT_CRASH(0, - "dispatch_sync onto a network event queue"); + if (dsc->dc_data != DISPATCH_WLH_ANON) { + // The kernel will handle all the overrides / priorities on our behalf. + qos = 0; } - _dispatch_trace_continuation_push(dq, dsc->_as_dc); - - if (unlikely(_dispatch_queue_push_update_tail(dq, dsc->_as_do))) { - // for slow waiters, we borrow the reference of the caller - // so we don't need to protect the wakeup with a temporary retain - _dispatch_queue_push_update_head(dq, dsc->_as_do); - if (unlikely(_dispatch_queue_is_thread_bound(dq))) { + if (unlikely(_dispatch_queue_push_item(dq, dsc))) { + if (unlikely(_dispatch_lane_push_waiter_should_wakeup(dq, dsc))) { return dx_wakeup(dq, qos, DISPATCH_WAKEUP_MAKE_DIRTY); } @@ -5603,7 +4860,6 @@ _dispatch_queue_push_sync_waiter(dispatch_queue_t dq, uint64_t set_owner_and_set_full_width_and_in_barrier = _dispatch_lock_value_for_self() | DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER; - // similar to _dispatch_queue_drain_try_unlock() os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { new_state = _dq_state_merge_qos(old_state, qos); new_state |= DISPATCH_QUEUE_DIRTY; @@ -5622,19 +4878,17 @@ _dispatch_queue_push_sync_waiter(dispatch_queue_t dq, } }); - if (_dq_state_is_base_wlh(old_state) && - (dsc->dsc_waiter == _dispatch_tid_self())) { - dsc->dsc_wlh_was_first = true; + if (_dq_state_is_base_wlh(old_state)) { + dsc->dsc_wlh_was_first = (dsc->dsc_waiter == _dispatch_tid_self()); } if ((old_state ^ new_state) & DISPATCH_QUEUE_IN_BARRIER) { - return _dispatch_queue_barrier_complete(dq, qos, 0); + return _dispatch_lane_barrier_complete(dq, qos, 0); } #if HAVE_PTHREAD_WORKQUEUE_QOS if (unlikely((old_state ^ new_state) & DISPATCH_QUEUE_MAX_QOS_MASK)) { if (_dq_state_should_override(new_state)) { - return _dispatch_queue_class_wakeup_with_override(dq, - new_state, 0); + return _dispatch_queue_wakeup_with_override(dq, new_state, 0); } } } else if (unlikely(qos)) { @@ -5645,414 +4899,1683 @@ _dispatch_queue_push_sync_waiter(dispatch_queue_t dq, } }); if (_dq_state_should_override(new_state)) { - return _dispatch_queue_class_wakeup_with_override(dq, new_state, 0); + return _dispatch_queue_wakeup_with_override(dq, new_state, 0); } #endif // HAVE_PTHREAD_WORKQUEUE_QOS } } +DISPATCH_NOINLINE +void +_dispatch_lane_push(dispatch_lane_t dq, dispatch_object_t dou, + dispatch_qos_t qos) +{ + dispatch_wakeup_flags_t flags = 0; + struct dispatch_object_s *prev; + + if (unlikely(_dispatch_object_is_waiter(dou))) { + return _dispatch_lane_push_waiter(dq, dou._dsc, qos); + } + + dispatch_assert(!_dispatch_object_is_global(dq)); + qos = _dispatch_queue_push_qos(dq, qos); + + // If we are going to call dx_wakeup(), the queue must be retained before + // the item we're pushing can be dequeued, which means: + // - before we exchange the tail if we have to override + // - before we set the head if we made the queue non empty. + // Otherwise, if preempted between one of these and the call to dx_wakeup() + // the blocks submitted to the queue may release the last reference to the + // queue when invoked by _dispatch_lane_drain. + + prev = os_mpsc_push_update_tail(os_mpsc(dq, dq_items), dou._do, do_next); + if (unlikely(os_mpsc_push_was_empty(prev))) { + _dispatch_retain_2_unsafe(dq); + flags = DISPATCH_WAKEUP_CONSUME_2 | DISPATCH_WAKEUP_MAKE_DIRTY; + } else if (unlikely(_dispatch_queue_need_override(dq, qos))) { + // There's a race here, _dispatch_queue_need_override may read a stale + // dq_state value. + // + // If it's a stale load from the same drain streak, given that + // the max qos is monotonic, too old a read can only cause an + // unnecessary attempt at overriding which is harmless. + // + // We'll assume here that a stale load from an a previous drain streak + // never happens in practice. + _dispatch_retain_2_unsafe(dq); + flags = DISPATCH_WAKEUP_CONSUME_2; + } + os_mpsc_push_update_prev(os_mpsc(dq, dq_items), prev, dou._do, do_next); + if (flags) { + return dx_wakeup(dq, qos, flags); + } +} + +DISPATCH_NOINLINE +void +_dispatch_lane_concurrent_push(dispatch_lane_t dq, dispatch_object_t dou, + dispatch_qos_t qos) +{ + // reserving non barrier width + // doesn't fail if only the ENQUEUED bit is set (unlike its barrier + // width equivalent), so we have to check that this thread hasn't + // enqueued anything ahead of this call or we can break ordering + if (dq->dq_items_tail == NULL && + !_dispatch_object_is_waiter(dou) && + !_dispatch_object_is_barrier(dou) && + _dispatch_queue_try_acquire_async(dq)) { + return _dispatch_continuation_redirect_push(dq, dou, qos); + } + + _dispatch_lane_push(dq, dou, qos); +} + #pragma mark - -#pragma mark dispatch_root_queue_drain +#pragma mark dispatch_mgr_queue + +#if DISPATCH_USE_PTHREAD_ROOT_QUEUES || DISPATCH_USE_KEVENT_WORKQUEUE +struct _dispatch_mgr_sched_s { + volatile int prio; + volatile qos_class_t qos; + int default_prio; + int policy; + pthread_t tid; +}; + +DISPATCH_STATIC_GLOBAL(struct _dispatch_mgr_sched_s _dispatch_mgr_sched); +DISPATCH_STATIC_GLOBAL(dispatch_once_t _dispatch_mgr_sched_pred); + +#if HAVE_PTHREAD_WORKQUEUE_QOS +// TODO: switch to "event-reflector thread" property +// Must be kept in sync with list of qos classes in sys/qos.h +static int +_dispatch_mgr_sched_qos2prio(qos_class_t qos) +{ + switch (qos) { + case QOS_CLASS_MAINTENANCE: return 4; + case QOS_CLASS_BACKGROUND: return 4; + case QOS_CLASS_UTILITY: return 20; + case QOS_CLASS_DEFAULT: return 31; + case QOS_CLASS_USER_INITIATED: return 37; + case QOS_CLASS_USER_INTERACTIVE: return 47; + } + return 0; +} +#endif // HAVE_PTHREAD_WORKQUEUE_QOS + +static void +_dispatch_mgr_sched_init(void *ctxt DISPATCH_UNUSED) +{ + struct sched_param param; +#if DISPATCH_USE_MGR_THREAD && DISPATCH_USE_PTHREAD_ROOT_QUEUES + dispatch_pthread_root_queue_context_t pqc = _dispatch_mgr_root_queue.do_ctxt; + pthread_attr_t *attr = &pqc->dpq_thread_attr; +#else + pthread_attr_t a, *attr = &a; +#endif + (void)dispatch_assume_zero(pthread_attr_init(attr)); + (void)dispatch_assume_zero(pthread_attr_getschedpolicy(attr, + &_dispatch_mgr_sched.policy)); + (void)dispatch_assume_zero(pthread_attr_getschedparam(attr, ¶m)); +#if HAVE_PTHREAD_WORKQUEUE_QOS + qos_class_t qos = qos_class_main(); + if (qos == QOS_CLASS_DEFAULT) { + qos = QOS_CLASS_USER_INITIATED; // rdar://problem/17279292 + } + if (qos) { + _dispatch_mgr_sched.qos = qos; + param.sched_priority = _dispatch_mgr_sched_qos2prio(qos); + } +#endif + _dispatch_mgr_sched.default_prio = param.sched_priority; + _dispatch_mgr_sched.prio = _dispatch_mgr_sched.default_prio; +} +#endif // DISPATCH_USE_PTHREAD_ROOT_QUEUES || DISPATCH_USE_KEVENT_WORKQUEUE +#if DISPATCH_USE_PTHREAD_ROOT_QUEUES +#if DISPATCH_USE_MGR_THREAD DISPATCH_NOINLINE -static bool -_dispatch_root_queue_drain_one_slow(dispatch_queue_t dq) +static pthread_t * +_dispatch_mgr_root_queue_init(void) { - dispatch_root_queue_context_t qc = dq->do_ctxt; - struct dispatch_object_s *const mediator = (void *)~0ul; - bool pending = false, available = true; - unsigned int sleep_time = DISPATCH_CONTENTION_USLEEP_START; + dispatch_once_f(&_dispatch_mgr_sched_pred, NULL, _dispatch_mgr_sched_init); + dispatch_pthread_root_queue_context_t pqc = _dispatch_mgr_root_queue.do_ctxt; + pthread_attr_t *attr = &pqc->dpq_thread_attr; + struct sched_param param; + (void)dispatch_assume_zero(pthread_attr_setdetachstate(attr, + PTHREAD_CREATE_DETACHED)); +#if !DISPATCH_DEBUG + (void)dispatch_assume_zero(pthread_attr_setstacksize(attr, 64 * 1024)); +#endif +#if HAVE_PTHREAD_WORKQUEUE_QOS + qos_class_t qos = _dispatch_mgr_sched.qos; + if (qos) { + if (_dispatch_set_qos_class_enabled) { + (void)dispatch_assume_zero(pthread_attr_set_qos_class_np(attr, + qos, 0)); + } + } +#endif + param.sched_priority = _dispatch_mgr_sched.prio; + if (param.sched_priority > _dispatch_mgr_sched.default_prio) { + (void)dispatch_assume_zero(pthread_attr_setschedparam(attr, ¶m)); + } + return &_dispatch_mgr_sched.tid; +} +static inline void +_dispatch_mgr_priority_apply(void) +{ + struct sched_param param; do { - // Spin for a short while in case the contention is temporary -- e.g. - // when starting up after dispatch_apply, or when executing a few - // short continuations in a row. - if (_dispatch_contention_wait_until(dq->dq_items_head != mediator)) { - goto out; + param.sched_priority = _dispatch_mgr_sched.prio; + if (param.sched_priority > _dispatch_mgr_sched.default_prio) { + (void)dispatch_assume_zero(pthread_setschedparam( + _dispatch_mgr_sched.tid, _dispatch_mgr_sched.policy, + ¶m)); } - // Since we have serious contention, we need to back off. - if (!pending) { - // Mark this queue as pending to avoid requests for further threads - (void)os_atomic_inc2o(qc, dgq_pending, relaxed); - pending = true; + } while (_dispatch_mgr_sched.prio > param.sched_priority); +} + +DISPATCH_NOINLINE +static void +_dispatch_mgr_priority_init(void) +{ + dispatch_pthread_root_queue_context_t pqc = _dispatch_mgr_root_queue.do_ctxt; + pthread_attr_t *attr = &pqc->dpq_thread_attr; + struct sched_param param; + (void)dispatch_assume_zero(pthread_attr_getschedparam(attr, ¶m)); +#if HAVE_PTHREAD_WORKQUEUE_QOS + qos_class_t qos = 0; + (void)pthread_attr_get_qos_class_np(attr, &qos, NULL); + if (_dispatch_mgr_sched.qos > qos && _dispatch_set_qos_class_enabled) { + (void)pthread_set_qos_class_self_np(_dispatch_mgr_sched.qos, 0); + int p = _dispatch_mgr_sched_qos2prio(_dispatch_mgr_sched.qos); + if (p > param.sched_priority) { + param.sched_priority = p; + } + } +#endif + if (unlikely(_dispatch_mgr_sched.prio > param.sched_priority)) { + return _dispatch_mgr_priority_apply(); + } +} +#endif // DISPATCH_USE_MGR_THREAD + +DISPATCH_NOINLINE +static void +_dispatch_mgr_priority_raise(const pthread_attr_t *attr) +{ + dispatch_once_f(&_dispatch_mgr_sched_pred, NULL, _dispatch_mgr_sched_init); + struct sched_param param; + (void)dispatch_assume_zero(pthread_attr_getschedparam(attr, ¶m)); +#if HAVE_PTHREAD_WORKQUEUE_QOS + qos_class_t q, qos = 0; + (void)pthread_attr_get_qos_class_np((pthread_attr_t *)attr, &qos, NULL); + if (qos) { + param.sched_priority = _dispatch_mgr_sched_qos2prio(qos); + os_atomic_rmw_loop2o(&_dispatch_mgr_sched, qos, q, qos, relaxed, { + if (q >= qos) os_atomic_rmw_loop_give_up(break); + }); + } +#endif + int p, prio = param.sched_priority; + os_atomic_rmw_loop2o(&_dispatch_mgr_sched, prio, p, prio, relaxed, { + if (p >= prio) os_atomic_rmw_loop_give_up(return); + }); +#if DISPATCH_USE_KEVENT_WORKQUEUE + _dispatch_root_queues_init(); + if (_dispatch_kevent_workqueue_enabled) { + pthread_priority_t pp = 0; + if (prio > _dispatch_mgr_sched.default_prio) { + // The values of _PTHREAD_PRIORITY_SCHED_PRI_FLAG and + // _PTHREAD_PRIORITY_ROOTQUEUE_FLAG overlap, but that is not + // problematic in this case, since it the second one is only ever + // used on dq_priority fields. + // We never pass the _PTHREAD_PRIORITY_ROOTQUEUE_FLAG to a syscall, + // it is meaningful to libdispatch only. + pp = (pthread_priority_t)prio | _PTHREAD_PRIORITY_SCHED_PRI_FLAG; + } else if (qos) { + pp = _pthread_qos_class_encode(qos, 0, 0); + } + if (pp) { + int r = _pthread_workqueue_set_event_manager_priority(pp); + (void)dispatch_assume_zero(r); + } + return; + } +#endif +#if DISPATCH_USE_MGR_THREAD + if (_dispatch_mgr_sched.tid) { + return _dispatch_mgr_priority_apply(); + } +#endif +} +#endif // DISPATCH_USE_PTHREAD_ROOT_QUEUES + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_mgr_lock(struct dispatch_queue_static_s *dq) +{ + uint64_t old_state, new_state, set_owner_and_set_full_width = + _dispatch_lock_value_for_self() | DISPATCH_QUEUE_SERIAL_DRAIN_OWNED; + + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, { + new_state = old_state; + if (unlikely(!_dq_state_is_runnable(old_state) || + _dq_state_drain_locked(old_state))) { + DISPATCH_INTERNAL_CRASH((uintptr_t)old_state, + "Locking the manager should not fail"); + } + new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK; + new_state |= set_owner_and_set_full_width; + }); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_queue_mgr_unlock(struct dispatch_queue_static_s *dq) +{ + uint64_t old_state, new_state; + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + new_state = old_state - DISPATCH_QUEUE_SERIAL_DRAIN_OWNED; + new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; + new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; + }); + return _dq_state_is_dirty(old_state); +} + +static void +_dispatch_mgr_queue_drain(void) +{ + const dispatch_invoke_flags_t flags = DISPATCH_INVOKE_MANAGER_DRAIN; + dispatch_invoke_context_s dic = { }; + struct dispatch_queue_static_s *dq = &_dispatch_mgr_q; + uint64_t owned = DISPATCH_QUEUE_SERIAL_DRAIN_OWNED; + + if (dq->dq_items_tail) { + _dispatch_perfmon_start(); + _dispatch_set_basepri_override_qos(DISPATCH_QOS_SATURATED); + if (unlikely(_dispatch_lane_serial_drain(dq, &dic, flags, &owned))) { + DISPATCH_INTERNAL_CRASH(0, "Interrupted drain on manager queue"); + } + _dispatch_voucher_debug("mgr queue clear", NULL); + _voucher_clear(); + _dispatch_reset_basepri_override(); + _dispatch_perfmon_end(perfmon_thread_manager); + } + +#if DISPATCH_USE_KEVENT_WORKQUEUE + if (!_dispatch_kevent_workqueue_enabled) +#endif + { + _dispatch_force_cache_cleanup(); + } +} + +void +_dispatch_mgr_queue_push(dispatch_lane_t dq, dispatch_object_t dou, + DISPATCH_UNUSED dispatch_qos_t qos) +{ + uint64_t dq_state; + + if (unlikely(_dispatch_object_is_waiter(dou))) { + DISPATCH_CLIENT_CRASH(0, "Waiter pushed onto manager"); + } + + if (unlikely(_dispatch_queue_push_item(dq, dou))) { + dq_state = os_atomic_or2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, release); + if (!_dq_state_drain_locked_by_self(dq_state)) { + _dispatch_trace_runtime_event(worker_request, &_dispatch_mgr_q, 1); + _dispatch_event_loop_poke(DISPATCH_WLH_MANAGER, 0, 0); + } + } +} + +DISPATCH_NORETURN +void +_dispatch_mgr_queue_wakeup(DISPATCH_UNUSED dispatch_lane_t dq, + DISPATCH_UNUSED dispatch_qos_t qos, + DISPATCH_UNUSED dispatch_wakeup_flags_t flags) +{ + DISPATCH_INTERNAL_CRASH(0, "Don't try to wake up or override the manager"); +} + +#if DISPATCH_USE_MGR_THREAD +DISPATCH_NOINLINE DISPATCH_NORETURN +static void +_dispatch_mgr_invoke(void) +{ +#if DISPATCH_EVENT_BACKEND_KEVENT + dispatch_kevent_s evbuf[DISPATCH_DEFERRED_ITEMS_EVENT_COUNT]; +#endif + dispatch_deferred_items_s ddi = { + .ddi_wlh = DISPATCH_WLH_ANON, +#if DISPATCH_EVENT_BACKEND_KEVENT + .ddi_maxevents = DISPATCH_DEFERRED_ITEMS_EVENT_COUNT, + .ddi_eventlist = evbuf, +#endif + }; + + _dispatch_deferred_items_set(&ddi); + for (;;) { + bool poll = false; + _dispatch_mgr_queue_drain(); + _dispatch_event_loop_drain_anon_timers(); + poll = _dispatch_queue_class_probe(&_dispatch_mgr_q); + _dispatch_event_loop_drain(poll ? KEVENT_FLAG_IMMEDIATE : 0); + } +} + +DISPATCH_NORETURN +void +_dispatch_mgr_thread(dispatch_lane_t dq DISPATCH_UNUSED, + dispatch_invoke_context_t dic DISPATCH_UNUSED, + dispatch_invoke_flags_t flags DISPATCH_UNUSED) +{ +#if DISPATCH_USE_KEVENT_WORKQUEUE + if (_dispatch_kevent_workqueue_enabled) { + DISPATCH_INTERNAL_CRASH(0, "Manager queue invoked with " + "kevent workqueue enabled"); + } +#endif + _dispatch_queue_set_current(&_dispatch_mgr_q); +#if DISPATCH_USE_PTHREAD_ROOT_QUEUES + _dispatch_mgr_priority_init(); +#endif + _dispatch_queue_mgr_lock(&_dispatch_mgr_q); + // never returns, so burn bridges behind us & clear stack 2k ahead + _dispatch_clear_stack(2048); + _dispatch_mgr_invoke(); +} +#endif // DISPATCH_USE_MGR_THREAD + +#if DISPATCH_USE_KEVENT_WORKQUEUE + +dispatch_static_assert(WORKQ_KEVENT_EVENT_BUFFER_LEN >= + DISPATCH_DEFERRED_ITEMS_EVENT_COUNT, + "our list should not be longer than the kernel's"); + +static void _dispatch_root_queue_drain_deferred_item( + dispatch_deferred_items_t ddi DISPATCH_PERF_MON_ARGS_PROTO); +static void _dispatch_root_queue_drain_deferred_wlh( + dispatch_deferred_items_t ddi DISPATCH_PERF_MON_ARGS_PROTO); + +void +_dispatch_kevent_workqueue_init(void) +{ + // Initialize kevent workqueue support + _dispatch_root_queues_init(); + if (!_dispatch_kevent_workqueue_enabled) return; + dispatch_once_f(&_dispatch_mgr_sched_pred, NULL, _dispatch_mgr_sched_init); + qos_class_t qos = _dispatch_mgr_sched.qos; + int prio = _dispatch_mgr_sched.prio; + pthread_priority_t pp = 0; + if (qos) { + pp = _pthread_qos_class_encode(qos, 0, 0); + } + if (prio > _dispatch_mgr_sched.default_prio) { + pp = (pthread_priority_t)prio | _PTHREAD_PRIORITY_SCHED_PRI_FLAG; + } + if (pp) { + int r = _pthread_workqueue_set_event_manager_priority(pp); + (void)dispatch_assume_zero(r); + } +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_wlh_worker_thread_init(dispatch_deferred_items_t ddi) +{ + dispatch_assert(ddi->ddi_wlh); + + pthread_priority_t pp = _dispatch_get_priority(); + if (!(pp & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG)) { + // If this thread does not have the event manager flag set, don't setup + // as the dispatch manager and let the caller know to only process + // the delivered events. + // + // Also add the NEEDS_UNBIND flag so that + // _dispatch_priority_compute_update knows it has to unbind + pp &= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG | ~_PTHREAD_PRIORITY_FLAGS_MASK; + if (ddi->ddi_wlh == DISPATCH_WLH_ANON) { + pp |= _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG; + } else { + // pthread sets the flag when it is an event delivery thread + // so we need to explicitly clear it + pp &= ~(pthread_priority_t)_PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG; + } + _dispatch_thread_setspecific(dispatch_priority_key, + (void *)(uintptr_t)pp); + if (ddi->ddi_wlh != DISPATCH_WLH_ANON) { + _dispatch_debug("wlh[%p]: handling events", ddi->ddi_wlh); + } else { + ddi->ddi_can_stash = true; + } + return false; + } + + if ((pp & _PTHREAD_PRIORITY_SCHED_PRI_FLAG) || + !(pp & ~_PTHREAD_PRIORITY_FLAGS_MASK)) { + // When the phtread kext is delivering kevents to us, and pthread + // root queues are in use, then the pthread priority TSD is set + // to a sched pri with the _PTHREAD_PRIORITY_SCHED_PRI_FLAG bit set. + // + // Given that this isn't a valid QoS we need to fixup the TSD, + // and the best option is to clear the qos/priority bits which tells + // us to not do any QoS related calls on this thread. + // + // However, in that case the manager thread is opted out of QoS, + // as far as pthread is concerned, and can't be turned into + // something else, so we can't stash. + pp &= (pthread_priority_t)_PTHREAD_PRIORITY_FLAGS_MASK; + } + // Managers always park without mutating to a regular worker thread, and + // hence never need to unbind from userland, and when draining a manager, + // the NEEDS_UNBIND flag would cause the mutation to happen. + // So we need to strip this flag + pp &= ~(pthread_priority_t)_PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG; + _dispatch_thread_setspecific(dispatch_priority_key, (void *)(uintptr_t)pp); + + // ensure kevents registered from this thread are registered at manager QoS + _dispatch_init_basepri_wlh(DISPATCH_PRIORITY_FLAG_MANAGER); + _dispatch_queue_set_current(&_dispatch_mgr_q); + _dispatch_queue_mgr_lock(&_dispatch_mgr_q); + return true; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_wlh_worker_thread_reset(void) +{ + bool needs_poll = _dispatch_queue_mgr_unlock(&_dispatch_mgr_q); + _dispatch_clear_basepri(); + _dispatch_queue_set_current(NULL); + if (needs_poll) { + _dispatch_trace_runtime_event(worker_request, &_dispatch_mgr_q, 1); + _dispatch_event_loop_poke(DISPATCH_WLH_MANAGER, 0, 0); + } +} + +DISPATCH_ALWAYS_INLINE +static void +_dispatch_wlh_worker_thread(dispatch_wlh_t wlh, dispatch_kevent_t events, + int *nevents) +{ + _dispatch_introspection_thread_add(); + + DISPATCH_PERF_MON_VAR_INIT + + dispatch_deferred_items_s ddi = { + .ddi_wlh = wlh, + .ddi_eventlist = events, + }; + bool is_manager; + + is_manager = _dispatch_wlh_worker_thread_init(&ddi); + if (!is_manager) { + _dispatch_trace_runtime_event(worker_event_delivery, + wlh == DISPATCH_WLH_ANON ? NULL : wlh, (uint64_t)*nevents); + _dispatch_perfmon_start_impl(true); + } else { + _dispatch_trace_runtime_event(worker_event_delivery, + &_dispatch_mgr_q, (uint64_t)*nevents); + ddi.ddi_wlh = DISPATCH_WLH_ANON; + } + _dispatch_deferred_items_set(&ddi); + _dispatch_event_loop_merge(events, *nevents); + + if (is_manager) { + _dispatch_trace_runtime_event(worker_unpark, &_dispatch_mgr_q, 0); + _dispatch_mgr_queue_drain(); + _dispatch_event_loop_drain_anon_timers(); + _dispatch_wlh_worker_thread_reset(); + } else if (ddi.ddi_stashed_dou._do) { + _dispatch_debug("wlh[%p]: draining deferred item %p", ddi.ddi_wlh, + ddi.ddi_stashed_dou._do); + if (ddi.ddi_wlh == DISPATCH_WLH_ANON) { + dispatch_assert(ddi.ddi_nevents == 0); + _dispatch_deferred_items_set(NULL); + _dispatch_trace_runtime_event(worker_unpark, ddi.ddi_stashed_rq, 0); + _dispatch_root_queue_drain_deferred_item(&ddi + DISPATCH_PERF_MON_ARGS); + } else { + _dispatch_trace_runtime_event(worker_unpark, wlh, 0); + _dispatch_root_queue_drain_deferred_wlh(&ddi + DISPATCH_PERF_MON_ARGS); + } + } + + _dispatch_deferred_items_set(NULL); + if (!is_manager && !ddi.ddi_stashed_dou._do) { + _dispatch_perfmon_end(perfmon_thread_event_no_steal); + } + _dispatch_debug("returning %d deferred kevents", ddi.ddi_nevents); + _dispatch_clear_return_to_kernel(); + *nevents = ddi.ddi_nevents; + + _dispatch_trace_runtime_event(worker_park, NULL, 0); +} + +DISPATCH_NOINLINE +static void +_dispatch_kevent_worker_thread(dispatch_kevent_t *events, int *nevents) +{ + if (!events || !nevents) { + // events for worker thread request have already been delivered earlier + return; + } + if (!dispatch_assume(*nevents && *events)) return; + _dispatch_adopt_wlh_anon(); + _dispatch_wlh_worker_thread(DISPATCH_WLH_ANON, *events, nevents); + _dispatch_reset_wlh(); +} + +#endif // DISPATCH_USE_KEVENT_WORKQUEUE +#pragma mark - +#pragma mark dispatch_root_queue + +#if DISPATCH_USE_PTHREAD_POOL +static void *_dispatch_worker_thread(void *context); +#endif // DISPATCH_USE_PTHREAD_POOL + +#if DISPATCH_DEBUG && DISPATCH_ROOT_QUEUE_DEBUG +#define _dispatch_root_queue_debug(...) _dispatch_debug(__VA_ARGS__) +static void +_dispatch_debug_root_queue(dispatch_queue_class_t dqu, const char *str) +{ + if (likely(dqu._dq)) { + _dispatch_object_debug(dqu._dq, "%s", str); + } else { + _dispatch_log("queue[NULL]: %s", str); + } +} +#else +#define _dispatch_root_queue_debug(...) +#define _dispatch_debug_root_queue(...) +#endif // DISPATCH_DEBUG && DISPATCH_ROOT_QUEUE_DEBUG + +DISPATCH_NOINLINE +static void +_dispatch_root_queue_poke_slow(dispatch_queue_global_t dq, int n, int floor) +{ + int remaining = n; + int r = ENOSYS; + + _dispatch_root_queues_init(); + _dispatch_debug_root_queue(dq, __func__); + _dispatch_trace_runtime_event(worker_request, dq, (uint64_t)n); + +#if !DISPATCH_USE_INTERNAL_WORKQUEUE +#if DISPATCH_USE_PTHREAD_ROOT_QUEUES + if (dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE) +#endif + { + _dispatch_root_queue_debug("requesting new worker thread for global " + "queue: %p", dq); + r = _pthread_workqueue_addthreads(remaining, + _dispatch_priority_to_pp_prefer_fallback(dq->dq_priority)); + (void)dispatch_assume_zero(r); + return; + } +#endif // !DISPATCH_USE_INTERNAL_WORKQUEUE +#if DISPATCH_USE_PTHREAD_POOL + dispatch_pthread_root_queue_context_t pqc = dq->do_ctxt; + if (likely(pqc->dpq_thread_mediator.do_vtable)) { + while (dispatch_semaphore_signal(&pqc->dpq_thread_mediator)) { + _dispatch_root_queue_debug("signaled sleeping worker for " + "global queue: %p", dq); + if (!--remaining) { + return; + } + } + } + + bool overcommit = dq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT; + if (overcommit) { + os_atomic_add2o(dq, dgq_pending, remaining, relaxed); + } else { + if (!os_atomic_cmpxchg2o(dq, dgq_pending, 0, remaining, relaxed)) { + _dispatch_root_queue_debug("worker thread request still pending for " + "global queue: %p", dq); + return; + } + } + + int can_request, t_count; + // seq_cst with atomic store to tail + t_count = os_atomic_load2o(dq, dgq_thread_pool_size, ordered); + do { + can_request = t_count < floor ? 0 : t_count - floor; + if (remaining > can_request) { + _dispatch_root_queue_debug("pthread pool reducing request from %d to %d", + remaining, can_request); + os_atomic_sub2o(dq, dgq_pending, remaining - can_request, relaxed); + remaining = can_request; + } + if (remaining == 0) { + _dispatch_root_queue_debug("pthread pool is full for root queue: " + "%p", dq); + return; + } + } while (!os_atomic_cmpxchgvw2o(dq, dgq_thread_pool_size, t_count, + t_count - remaining, &t_count, acquire)); + + pthread_attr_t *attr = &pqc->dpq_thread_attr; + pthread_t tid, *pthr = &tid; +#if DISPATCH_USE_MGR_THREAD && DISPATCH_USE_PTHREAD_ROOT_QUEUES + if (unlikely(dq == &_dispatch_mgr_root_queue)) { + pthr = _dispatch_mgr_root_queue_init(); + } +#endif + do { + _dispatch_retain(dq); // released in _dispatch_worker_thread + while ((r = pthread_create(pthr, attr, _dispatch_worker_thread, dq))) { + if (r != EAGAIN) { + (void)dispatch_assume_zero(r); + } + _dispatch_temporary_resource_shortage(); + } + } while (--remaining); +#else + (void)floor; +#endif // DISPATCH_USE_PTHREAD_POOL +} + +DISPATCH_NOINLINE +void +_dispatch_root_queue_poke(dispatch_queue_global_t dq, int n, int floor) +{ + if (!_dispatch_queue_class_probe(dq)) { + return; + } +#if !DISPATCH_USE_INTERNAL_WORKQUEUE +#if DISPATCH_USE_PTHREAD_POOL + if (likely(dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE)) +#endif + { + if (unlikely(!os_atomic_cmpxchg2o(dq, dgq_pending, 0, n, relaxed))) { + _dispatch_root_queue_debug("worker thread request still pending " + "for global queue: %p", dq); + return; + } + } +#endif // !DISPATCH_USE_INTERNAL_WORKQUEUE + return _dispatch_root_queue_poke_slow(dq, n, floor); +} + +#define DISPATCH_ROOT_QUEUE_MEDIATOR ((struct dispatch_object_s *)~0ul) + +enum { + DISPATCH_ROOT_QUEUE_DRAIN_WAIT, + DISPATCH_ROOT_QUEUE_DRAIN_READY, + DISPATCH_ROOT_QUEUE_DRAIN_ABORT, +}; + +static int +_dispatch_root_queue_mediator_is_gone(dispatch_queue_global_t dq) +{ + return os_atomic_load2o(dq, dq_items_head, relaxed) != + DISPATCH_ROOT_QUEUE_MEDIATOR; +} + +static int +_dispatch_root_queue_head_tail_quiesced(dispatch_queue_global_t dq) +{ + // Wait for queue head and tail to be both non-empty or both empty + struct dispatch_object_s *head, *tail; + head = os_atomic_load2o(dq, dq_items_head, relaxed); + tail = os_atomic_load2o(dq, dq_items_tail, relaxed); + if ((head == NULL) == (tail == NULL)) { + if (tail == NULL) { // + return DISPATCH_ROOT_QUEUE_DRAIN_ABORT; + } + return DISPATCH_ROOT_QUEUE_DRAIN_READY; + } + return DISPATCH_ROOT_QUEUE_DRAIN_WAIT; +} + +DISPATCH_NOINLINE +static bool +__DISPATCH_ROOT_QUEUE_CONTENDED_WAIT__(dispatch_queue_global_t dq, + int (*predicate)(dispatch_queue_global_t dq)) +{ + unsigned int sleep_time = DISPATCH_CONTENTION_USLEEP_START; + int status = DISPATCH_ROOT_QUEUE_DRAIN_READY; + bool pending = false; + + do { + // Spin for a short while in case the contention is temporary -- e.g. + // when starting up after dispatch_apply, or when executing a few + // short continuations in a row. + if (_dispatch_contention_wait_until(status = predicate(dq))) { + goto out; + } + // Since we have serious contention, we need to back off. + if (!pending) { + // Mark this queue as pending to avoid requests for further threads + (void)os_atomic_inc2o(dq, dgq_pending, relaxed); + pending = true; + } + _dispatch_contention_usleep(sleep_time); + if (likely(status = predicate(dq))) goto out; + sleep_time *= 2; + } while (sleep_time < DISPATCH_CONTENTION_USLEEP_MAX); + + // The ratio of work to libdispatch overhead must be bad. This + // scenario implies that there are too many threads in the pool. + // Create a new pending thread and then exit this thread. + // The kernel will grant a new thread when the load subsides. + _dispatch_debug("contention on global queue: %p", dq); +out: + if (pending) { + (void)os_atomic_dec2o(dq, dgq_pending, relaxed); + } + if (status == DISPATCH_ROOT_QUEUE_DRAIN_WAIT) { + _dispatch_root_queue_poke(dq, 1, 0); + } + return status == DISPATCH_ROOT_QUEUE_DRAIN_READY; +} + +DISPATCH_ALWAYS_INLINE_NDEBUG +static inline struct dispatch_object_s * +_dispatch_root_queue_drain_one(dispatch_queue_global_t dq) +{ + struct dispatch_object_s *head, *next; + +start: + // The MEDIATOR value acts both as a "lock" and a signal + head = os_atomic_xchg2o(dq, dq_items_head, + DISPATCH_ROOT_QUEUE_MEDIATOR, relaxed); + + if (unlikely(head == NULL)) { + // The first xchg on the tail will tell the enqueueing thread that it + // is safe to blindly write out to the head pointer. A cmpxchg honors + // the algorithm. + if (unlikely(!os_atomic_cmpxchg2o(dq, dq_items_head, + DISPATCH_ROOT_QUEUE_MEDIATOR, NULL, relaxed))) { + goto start; + } + if (unlikely(dq->dq_items_tail)) { // + if (__DISPATCH_ROOT_QUEUE_CONTENDED_WAIT__(dq, + _dispatch_root_queue_head_tail_quiesced)) { + goto start; + } + } + _dispatch_root_queue_debug("no work on global queue: %p", dq); + return NULL; + } + + if (unlikely(head == DISPATCH_ROOT_QUEUE_MEDIATOR)) { + // This thread lost the race for ownership of the queue. + if (likely(__DISPATCH_ROOT_QUEUE_CONTENDED_WAIT__(dq, + _dispatch_root_queue_mediator_is_gone))) { + goto start; + } + return NULL; + } + + // Restore the head pointer to a sane value before returning. + // If 'next' is NULL, then this item _might_ be the last item. + next = head->do_next; + + if (unlikely(!next)) { + os_atomic_store2o(dq, dq_items_head, NULL, relaxed); + // 22708742: set tail to NULL with release, so that NULL write to head + // above doesn't clobber head from concurrent enqueuer + if (os_atomic_cmpxchg2o(dq, dq_items_tail, head, NULL, release)) { + // both head and tail are NULL now + goto out; + } + // There must be a next item now. + next = os_mpsc_get_next(head, do_next); + } + + os_atomic_store2o(dq, dq_items_head, next, relaxed); + _dispatch_root_queue_poke(dq, 1, 0); +out: + return head; +} + +#if DISPATCH_USE_KEVENT_WORKQUEUE +static void +_dispatch_root_queue_drain_deferred_wlh(dispatch_deferred_items_t ddi + DISPATCH_PERF_MON_ARGS_PROTO) +{ + dispatch_queue_global_t rq = ddi->ddi_stashed_rq; + dispatch_queue_t dq = ddi->ddi_stashed_dou._dq; + _dispatch_queue_set_current(rq); + + dispatch_invoke_context_s dic = { }; + dispatch_invoke_flags_t flags = DISPATCH_INVOKE_WORKER_DRAIN | + DISPATCH_INVOKE_REDIRECTING_DRAIN | DISPATCH_INVOKE_WLH; + _dispatch_queue_drain_init_narrowing_check_deadline(&dic, rq->dq_priority); + uint64_t dq_state; + + _dispatch_init_basepri_wlh(rq->dq_priority); + ddi->ddi_wlh_servicing = true; + if (unlikely(_dispatch_needs_to_return_to_kernel())) { + _dispatch_return_to_kernel(); + } +retry: + dispatch_assert(ddi->ddi_wlh_needs_delete); + _dispatch_trace_item_pop(rq, dq); + + if (_dispatch_queue_drain_try_lock_wlh(dq, &dq_state)) { + dx_invoke(dq, &dic, flags); + if (!ddi->ddi_wlh_needs_delete) { + goto park; + } + dq_state = os_atomic_load2o(dq, dq_state, relaxed); + if (unlikely(!_dq_state_is_base_wlh(dq_state))) { // rdar://32671286 + goto park; + } + if (unlikely(_dq_state_is_enqueued_on_target(dq_state))) { + _dispatch_retain(dq); + _dispatch_trace_item_push(dq->do_targetq, dq); + goto retry; + } + } else { + if (_dq_state_is_suspended(dq_state)) { + dispatch_assert(!_dq_state_is_enqueued(dq_state)); + _dispatch_release_2_no_dispose(dq); + } else { + dispatch_assert(_dq_state_is_enqueued(dq_state)); + dispatch_assert(_dq_state_drain_locked(dq_state)); + _dispatch_release_no_dispose(dq); + } + } + + _dispatch_event_loop_leave_deferred(ddi, dq_state); + +park: + // event thread that could steal + _dispatch_perfmon_end(perfmon_thread_event_steal); + _dispatch_clear_basepri(); + _dispatch_queue_set_current(NULL); + + _dispatch_voucher_debug("root queue clear", NULL); + _dispatch_reset_voucher(NULL, DISPATCH_THREAD_PARK); +} + +static void +_dispatch_root_queue_drain_deferred_item(dispatch_deferred_items_t ddi + DISPATCH_PERF_MON_ARGS_PROTO) +{ + dispatch_queue_global_t rq = ddi->ddi_stashed_rq; + _dispatch_queue_set_current(rq); + _dispatch_trace_runtime_event(worker_unpark, NULL, 0); + + dispatch_invoke_context_s dic = { }; + dispatch_invoke_flags_t flags = DISPATCH_INVOKE_WORKER_DRAIN | + DISPATCH_INVOKE_REDIRECTING_DRAIN; +#if DISPATCH_COCOA_COMPAT + _dispatch_last_resort_autorelease_pool_push(&dic); +#endif // DISPATCH_COCOA_COMPAT + _dispatch_queue_drain_init_narrowing_check_deadline(&dic, rq->dq_priority); + _dispatch_init_basepri(rq->dq_priority); + + _dispatch_continuation_pop_inline(ddi->ddi_stashed_dou, &dic, flags, rq); + + // event thread that could steal + _dispatch_perfmon_end(perfmon_thread_event_steal); +#if DISPATCH_COCOA_COMPAT + _dispatch_last_resort_autorelease_pool_pop(&dic); +#endif // DISPATCH_COCOA_COMPAT + _dispatch_clear_basepri(); + _dispatch_queue_set_current(NULL); + + _dispatch_voucher_debug("root queue clear", NULL); + _dispatch_reset_voucher(NULL, DISPATCH_THREAD_PARK); +} +#endif + +DISPATCH_NOT_TAIL_CALLED // prevent tailcall (for Instrument DTrace probe) +static void +_dispatch_root_queue_drain(dispatch_queue_global_t dq, + dispatch_priority_t pri, dispatch_invoke_flags_t flags) +{ +#if DISPATCH_DEBUG + dispatch_queue_t cq; + if (unlikely(cq = _dispatch_queue_get_current())) { + DISPATCH_INTERNAL_CRASH(cq, "Premature thread recycling"); + } +#endif + _dispatch_queue_set_current(dq); + _dispatch_init_basepri(pri); + _dispatch_adopt_wlh_anon(); + + struct dispatch_object_s *item; + bool reset = false; + dispatch_invoke_context_s dic = { }; +#if DISPATCH_COCOA_COMPAT + _dispatch_last_resort_autorelease_pool_push(&dic); +#endif // DISPATCH_COCOA_COMPAT + _dispatch_queue_drain_init_narrowing_check_deadline(&dic, pri); + _dispatch_perfmon_start(); + while (likely(item = _dispatch_root_queue_drain_one(dq))) { + if (reset) _dispatch_wqthread_override_reset(); + _dispatch_continuation_pop_inline(item, &dic, flags, dq); + reset = _dispatch_reset_basepri_override(); + if (unlikely(_dispatch_queue_drain_should_narrow(&dic))) { + break; + } + } + + // overcommit or not. worker thread + if (pri & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) { + _dispatch_perfmon_end(perfmon_thread_worker_oc); + } else { + _dispatch_perfmon_end(perfmon_thread_worker_non_oc); + } + +#if DISPATCH_COCOA_COMPAT + _dispatch_last_resort_autorelease_pool_pop(&dic); +#endif // DISPATCH_COCOA_COMPAT + _dispatch_reset_wlh(); + _dispatch_clear_basepri(); + _dispatch_queue_set_current(NULL); +} + +#if !DISPATCH_USE_INTERNAL_WORKQUEUE +static void +_dispatch_worker_thread2(pthread_priority_t pp) +{ + bool overcommit = pp & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; + dispatch_queue_global_t dq; + + pp &= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG | ~_PTHREAD_PRIORITY_FLAGS_MASK; + _dispatch_thread_setspecific(dispatch_priority_key, (void *)(uintptr_t)pp); + dq = _dispatch_get_root_queue(_dispatch_qos_from_pp(pp), overcommit); + + _dispatch_introspection_thread_add(); + _dispatch_trace_runtime_event(worker_unpark, dq, 0); + + int pending = os_atomic_dec2o(dq, dgq_pending, relaxed); + dispatch_assert(pending >= 0); + _dispatch_root_queue_drain(dq, dq->dq_priority, + DISPATCH_INVOKE_WORKER_DRAIN | DISPATCH_INVOKE_REDIRECTING_DRAIN); + _dispatch_voucher_debug("root queue clear", NULL); + _dispatch_reset_voucher(NULL, DISPATCH_THREAD_PARK); + _dispatch_trace_runtime_event(worker_park, NULL, 0); +} +#endif // !DISPATCH_USE_INTERNAL_WORKQUEUE + +#if DISPATCH_USE_PTHREAD_POOL +static inline void +_dispatch_root_queue_init_pthread_pool(dispatch_queue_global_t dq, + int pool_size, dispatch_priority_t pri) +{ + dispatch_pthread_root_queue_context_t pqc = dq->do_ctxt; + int thread_pool_size = DISPATCH_WORKQ_MAX_PTHREAD_COUNT; + if (!(pri & DISPATCH_PRIORITY_FLAG_OVERCOMMIT)) { + thread_pool_size = (int32_t)dispatch_hw_config(active_cpus); + } + if (pool_size && pool_size < thread_pool_size) thread_pool_size = pool_size; + dq->dgq_thread_pool_size = thread_pool_size; + qos_class_t cls = _dispatch_qos_to_qos_class(_dispatch_priority_qos(pri) ?: + _dispatch_priority_fallback_qos(pri)); + if (cls) { + pthread_attr_t *attr = &pqc->dpq_thread_attr; + int r = pthread_attr_init(attr); + dispatch_assume_zero(r); + r = pthread_attr_setdetachstate(attr, PTHREAD_CREATE_DETACHED); + dispatch_assume_zero(r); +#if HAVE_PTHREAD_WORKQUEUE_QOS + r = pthread_attr_set_qos_class_np(attr, cls, 0); + dispatch_assume_zero(r); + } +#endif // HAVE_PTHREAD_WORKQUEUE_QOS + _dispatch_sema4_t *sema = &pqc->dpq_thread_mediator.dsema_sema; + pqc->dpq_thread_mediator.do_vtable = DISPATCH_VTABLE(semaphore); + _dispatch_sema4_init(sema, _DSEMA4_POLICY_LIFO); + _dispatch_sema4_create(sema, _DSEMA4_POLICY_LIFO); +} + +// 6618342 Contact the team that owns the Instrument DTrace probe before +// renaming this symbol +static void * +_dispatch_worker_thread(void *context) +{ + dispatch_queue_global_t dq = context; + dispatch_pthread_root_queue_context_t pqc = dq->do_ctxt; + + int pending = os_atomic_dec2o(dq, dgq_pending, relaxed); + if (unlikely(pending < 0)) { + DISPATCH_INTERNAL_CRASH(pending, "Pending thread request underflow"); + } + + if (pqc->dpq_observer_hooks.queue_will_execute) { + _dispatch_set_pthread_root_queue_observer_hooks( + &pqc->dpq_observer_hooks); + } + if (pqc->dpq_thread_configure) { + pqc->dpq_thread_configure(); + } + + // workaround tweaks the kernel workqueue does for us + _dispatch_sigmask(); + _dispatch_introspection_thread_add(); + + const int64_t timeout = 5ull * NSEC_PER_SEC; + pthread_priority_t pp = _dispatch_get_priority(); + dispatch_priority_t pri = dq->dq_priority; + + // If the queue is neither + // - the manager + // - with a fallback set + // - with a requested QoS or QoS floor + // then infer the basepri from the current priority. + if ((pri & (DISPATCH_PRIORITY_FLAG_MANAGER | + DISPATCH_PRIORITY_FLAG_FALLBACK | + DISPATCH_PRIORITY_FLAG_FLOOR | + DISPATCH_PRIORITY_REQUESTED_MASK)) == 0) { + pri &= DISPATCH_PRIORITY_FLAG_OVERCOMMIT; + if (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK) { + pri |= _dispatch_priority_from_pp(pp); + } else { + pri |= _dispatch_priority_make_override(DISPATCH_QOS_SATURATED); + } + } + +#if DISPATCH_USE_INTERNAL_WORKQUEUE + bool monitored = ((pri & (DISPATCH_PRIORITY_FLAG_OVERCOMMIT | + DISPATCH_PRIORITY_FLAG_MANAGER)) == 0); + if (monitored) _dispatch_workq_worker_register(dq); +#endif + + do { + _dispatch_trace_runtime_event(worker_unpark, dq, 0); + _dispatch_root_queue_drain(dq, pri, DISPATCH_INVOKE_REDIRECTING_DRAIN); + _dispatch_reset_priority_and_voucher(pp, NULL); + _dispatch_trace_runtime_event(worker_park, NULL, 0); + } while (dispatch_semaphore_wait(&pqc->dpq_thread_mediator, + dispatch_time(0, timeout)) == 0); + +#if DISPATCH_USE_INTERNAL_WORKQUEUE + if (monitored) _dispatch_workq_worker_unregister(dq); +#endif + (void)os_atomic_inc2o(dq, dgq_thread_pool_size, release); + _dispatch_root_queue_poke(dq, 1, 0); + _dispatch_release(dq); // retained in _dispatch_root_queue_poke_slow + return NULL; +} +#endif // DISPATCH_USE_PTHREAD_POOL + +DISPATCH_NOINLINE +void +_dispatch_root_queue_wakeup(dispatch_queue_global_t dq, + DISPATCH_UNUSED dispatch_qos_t qos, dispatch_wakeup_flags_t flags) +{ + if (!(flags & DISPATCH_WAKEUP_BLOCK_WAIT)) { + DISPATCH_INTERNAL_CRASH(dq->dq_priority, + "Don't try to wake up or override a root queue"); + } + if (flags & DISPATCH_WAKEUP_CONSUME_2) { + return _dispatch_release_2_tailcall(dq); + } +} + +DISPATCH_NOINLINE +void +_dispatch_root_queue_push(dispatch_queue_global_t rq, dispatch_object_t dou, + dispatch_qos_t qos) +{ +#if DISPATCH_USE_KEVENT_WORKQUEUE + dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); + if (unlikely(ddi && ddi->ddi_can_stash)) { + dispatch_object_t old_dou = ddi->ddi_stashed_dou; + dispatch_priority_t rq_overcommit; + rq_overcommit = rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT; + + if (likely(!old_dou._do || rq_overcommit)) { + dispatch_queue_global_t old_rq = ddi->ddi_stashed_rq; + dispatch_qos_t old_qos = ddi->ddi_stashed_qos; + ddi->ddi_stashed_rq = rq; + ddi->ddi_stashed_dou = dou; + ddi->ddi_stashed_qos = qos; + _dispatch_debug("deferring item %p, rq %p, qos %d", + dou._do, rq, qos); + if (rq_overcommit) { + ddi->ddi_can_stash = false; + } + if (likely(!old_dou._do)) { + return; + } + // push the previously stashed item + qos = old_qos; + rq = old_rq; + dou = old_dou; + } + } +#endif +#if HAVE_PTHREAD_WORKQUEUE_QOS + if (_dispatch_root_queue_push_needs_override(rq, qos)) { + return _dispatch_root_queue_push_override(rq, dou, qos); + } +#else + (void)qos; +#endif + _dispatch_root_queue_push_inline(rq, dou, dou, 1); +} + +#pragma mark - +#pragma mark dispatch_pthread_root_queue +#if DISPATCH_USE_PTHREAD_ROOT_QUEUES + +static dispatch_queue_global_t +_dispatch_pthread_root_queue_create(const char *label, unsigned long flags, + const pthread_attr_t *attr, dispatch_block_t configure, + dispatch_pthread_root_queue_observer_hooks_t observer_hooks) +{ + dispatch_queue_pthread_root_t dpq; + dispatch_queue_flags_t dqf = 0; + int32_t pool_size = flags & _DISPATCH_PTHREAD_ROOT_QUEUE_FLAG_POOL_SIZE ? + (int8_t)(flags & ~_DISPATCH_PTHREAD_ROOT_QUEUE_FLAG_POOL_SIZE) : 0; + + if (label) { + const char *tmp = _dispatch_strdup_if_mutable(label); + if (tmp != label) { + dqf |= DQF_LABEL_NEEDS_FREE; + label = tmp; + } + } + + dpq = _dispatch_queue_alloc(queue_pthread_root, dqf, + DISPATCH_QUEUE_WIDTH_POOL, 0)._dpq; + dpq->dq_label = label; + dpq->dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE; + dpq->dq_priority = DISPATCH_PRIORITY_FLAG_OVERCOMMIT; + dpq->do_ctxt = &dpq->dpq_ctxt; + + dispatch_pthread_root_queue_context_t pqc = &dpq->dpq_ctxt; + _dispatch_root_queue_init_pthread_pool(dpq->_as_dgq, pool_size, + DISPATCH_PRIORITY_FLAG_OVERCOMMIT); + + if (attr) { + memcpy(&pqc->dpq_thread_attr, attr, sizeof(pthread_attr_t)); + _dispatch_mgr_priority_raise(&pqc->dpq_thread_attr); + } else { + (void)dispatch_assume_zero(pthread_attr_init(&pqc->dpq_thread_attr)); + } + (void)dispatch_assume_zero(pthread_attr_setdetachstate( + &pqc->dpq_thread_attr, PTHREAD_CREATE_DETACHED)); + if (configure) { + pqc->dpq_thread_configure = _dispatch_Block_copy(configure); + } + if (observer_hooks) { + pqc->dpq_observer_hooks = *observer_hooks; + } + _dispatch_object_debug(dpq, "%s", __func__); + return _dispatch_trace_queue_create(dpq)._dgq; +} + +dispatch_queue_global_t +dispatch_pthread_root_queue_create(const char *label, unsigned long flags, + const pthread_attr_t *attr, dispatch_block_t configure) +{ + return _dispatch_pthread_root_queue_create(label, flags, attr, configure, + NULL); +} + +#if DISPATCH_IOHID_SPI +dispatch_queue_global_t +_dispatch_pthread_root_queue_create_with_observer_hooks_4IOHID(const char *label, + unsigned long flags, const pthread_attr_t *attr, + dispatch_pthread_root_queue_observer_hooks_t observer_hooks, + dispatch_block_t configure) +{ + if (!observer_hooks->queue_will_execute || + !observer_hooks->queue_did_execute) { + DISPATCH_CLIENT_CRASH(0, "Invalid pthread root queue observer hooks"); + } + return _dispatch_pthread_root_queue_create(label, flags, attr, configure, + observer_hooks); +} + +bool +_dispatch_queue_is_exclusively_owned_by_current_thread_4IOHID( + dispatch_queue_t dq) // rdar://problem/18033810 +{ + if (dq->dq_width != 1) { + DISPATCH_CLIENT_CRASH(dq->dq_width, "Invalid queue type"); + } + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + return _dq_state_drain_locked_by_self(dq_state); +} +#endif + +dispatch_queue_global_t +dispatch_pthread_root_queue_copy_current(void) +{ + dispatch_queue_t dq = _dispatch_queue_get_current(); + if (!dq) return NULL; + while (unlikely(dq->do_targetq)) { + dq = dq->do_targetq; + } + if (dx_type(dq) != DISPATCH_QUEUE_PTHREAD_ROOT_TYPE) { + return NULL; + } + _os_object_retain_with_resurrect(dq->_as_os_obj); + return upcast(dq)._dgq; +} + +void +_dispatch_pthread_root_queue_dispose(dispatch_queue_global_t dq, + bool *allow_free) +{ + dispatch_pthread_root_queue_context_t pqc = dq->do_ctxt; + + _dispatch_object_debug(dq, "%s", __func__); + _dispatch_trace_queue_dispose(dq); + + pthread_attr_destroy(&pqc->dpq_thread_attr); + _dispatch_semaphore_dispose(&pqc->dpq_thread_mediator, NULL); + if (pqc->dpq_thread_configure) { + Block_release(pqc->dpq_thread_configure); + } + dq->do_targetq = _dispatch_get_default_queue(false); + _dispatch_lane_class_dispose(dq, allow_free); +} + +#endif // DISPATCH_USE_PTHREAD_ROOT_QUEUES +#pragma mark - +#pragma mark dispatch_runloop_queue + +DISPATCH_STATIC_GLOBAL(bool _dispatch_program_is_probably_callback_driven); + +#if DISPATCH_COCOA_COMPAT +DISPATCH_STATIC_GLOBAL(dispatch_once_t _dispatch_main_q_handle_pred); + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_runloop_handle_is_valid(dispatch_runloop_handle_t handle) +{ +#if TARGET_OS_MAC + return MACH_PORT_VALID(handle); +#elif defined(__linux__) + return handle >= 0; +#else +#error "runloop support not implemented on this platform" +#endif +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_runloop_handle_t +_dispatch_runloop_queue_get_handle(dispatch_lane_t dq) +{ +#if TARGET_OS_MAC + return ((dispatch_runloop_handle_t)(uintptr_t)dq->do_ctxt); +#elif defined(__linux__) + // decode: 0 is a valid fd, so offset by 1 to distinguish from NULL + return ((dispatch_runloop_handle_t)(uintptr_t)dq->do_ctxt) - 1; +#else +#error "runloop support not implemented on this platform" +#endif +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_runloop_queue_set_handle(dispatch_lane_t dq, + dispatch_runloop_handle_t handle) +{ +#if TARGET_OS_MAC + dq->do_ctxt = (void *)(uintptr_t)handle; +#elif defined(__linux__) + // encode: 0 is a valid fd, so offset by 1 to distinguish from NULL + dq->do_ctxt = (void *)(uintptr_t)(handle + 1); +#else +#error "runloop support not implemented on this platform" +#endif +} + +static void +_dispatch_runloop_queue_handle_init(void *ctxt) +{ + dispatch_lane_t dq = (dispatch_lane_t)ctxt; + dispatch_runloop_handle_t handle; + + _dispatch_fork_becomes_unsafe(); + +#if TARGET_OS_MAC + mach_port_options_t opts = { + .flags = MPO_CONTEXT_AS_GUARD | MPO_STRICT | MPO_INSERT_SEND_RIGHT, + }; + mach_port_context_t guard = (uintptr_t)dq; + kern_return_t kr; + mach_port_t mp; + + if (dx_type(dq) == DISPATCH_QUEUE_MAIN_TYPE) { + opts.flags |= MPO_QLIMIT; + opts.mpl.mpl_qlimit = 1; + } + + kr = mach_port_construct(mach_task_self(), &opts, guard, &mp); + DISPATCH_VERIFY_MIG(kr); + (void)dispatch_assume_zero(kr); + + handle = mp; +#elif defined(__linux__) + int fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); + if (fd == -1) { + int err = errno; + switch (err) { + case EMFILE: + DISPATCH_CLIENT_CRASH(err, "eventfd() failure: " + "process is out of file descriptors"); + break; + case ENFILE: + DISPATCH_CLIENT_CRASH(err, "eventfd() failure: " + "system is out of file descriptors"); + break; + case ENOMEM: + DISPATCH_CLIENT_CRASH(err, "eventfd() failure: " + "kernel is out of memory"); + break; + default: + DISPATCH_INTERNAL_CRASH(err, "eventfd() failure"); + break; } - _dispatch_contention_usleep(sleep_time); - if (fastpath(dq->dq_items_head != mediator)) goto out; - sleep_time *= 2; - } while (sleep_time < DISPATCH_CONTENTION_USLEEP_MAX); - - // The ratio of work to libdispatch overhead must be bad. This - // scenario implies that there are too many threads in the pool. - // Create a new pending thread and then exit this thread. - // The kernel will grant a new thread when the load subsides. - _dispatch_debug("contention on global queue: %p", dq); - available = false; -out: - if (pending) { - (void)os_atomic_dec2o(qc, dgq_pending, relaxed); } - if (!available) { - _dispatch_global_queue_poke(dq, 1, 0); - } - return available; -} + handle = fd; +#else +#error "runloop support not implemented on this platform" +#endif + _dispatch_runloop_queue_set_handle(dq, handle); -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_root_queue_drain_one2(dispatch_queue_t dq) -{ - // Wait for queue head and tail to be both non-empty or both empty - bool available; // - _dispatch_wait_until((dq->dq_items_head != NULL) == - (available = (dq->dq_items_tail != NULL))); - return available; + _dispatch_program_is_probably_callback_driven = true; } -DISPATCH_ALWAYS_INLINE_NDEBUG -static inline struct dispatch_object_s * -_dispatch_root_queue_drain_one(dispatch_queue_t dq) +static void +_dispatch_runloop_queue_handle_dispose(dispatch_lane_t dq) { - struct dispatch_object_s *head, *next, *const mediator = (void *)~0ul; - -start: - // The mediator value acts both as a "lock" and a signal - head = os_atomic_xchg2o(dq, dq_items_head, mediator, relaxed); - - if (slowpath(head == NULL)) { - // The first xchg on the tail will tell the enqueueing thread that it - // is safe to blindly write out to the head pointer. A cmpxchg honors - // the algorithm. - if (slowpath(!os_atomic_cmpxchg2o(dq, dq_items_head, mediator, - NULL, relaxed))) { - goto start; - } - if (slowpath(dq->dq_items_tail) && // - _dispatch_root_queue_drain_one2(dq)) { - goto start; - } - _dispatch_root_queue_debug("no work on global queue: %p", dq); - return NULL; + dispatch_runloop_handle_t handle = _dispatch_runloop_queue_get_handle(dq); + if (!_dispatch_runloop_handle_is_valid(handle)) { + return; } + dq->do_ctxt = NULL; +#if TARGET_OS_MAC + mach_port_t mp = (mach_port_t)handle; + mach_port_context_t guard = (uintptr_t)dq; + kern_return_t kr; + kr = mach_port_destruct(mach_task_self(), mp, -1, guard); + DISPATCH_VERIFY_MIG(kr); + (void)dispatch_assume_zero(kr); +#elif defined(__linux__) + int rc = close(handle); + (void)dispatch_assume_zero(rc); +#else +#error "runloop support not implemented on this platform" +#endif +} - if (slowpath(head == mediator)) { - // This thread lost the race for ownership of the queue. - if (fastpath(_dispatch_root_queue_drain_one_slow(dq))) { - goto start; - } - return NULL; +static inline void +_dispatch_runloop_queue_class_poke(dispatch_lane_t dq) +{ + dispatch_runloop_handle_t handle = _dispatch_runloop_queue_get_handle(dq); + if (!_dispatch_runloop_handle_is_valid(handle)) { + return; } - // Restore the head pointer to a sane value before returning. - // If 'next' is NULL, then this item _might_ be the last item. - next = fastpath(head->do_next); - - if (slowpath(!next)) { - os_atomic_store2o(dq, dq_items_head, NULL, relaxed); - // 22708742: set tail to NULL with release, so that NULL write to head - // above doesn't clobber head from concurrent enqueuer - if (os_atomic_cmpxchg2o(dq, dq_items_tail, head, NULL, release)) { - // both head and tail are NULL now - goto out; - } - // There must be a next item now. - next = os_mpsc_get_next(head, do_next); + _dispatch_trace_runtime_event(worker_request, dq, 1); +#if HAVE_MACH + mach_port_t mp = handle; + kern_return_t kr = _dispatch_send_wakeup_runloop_thread(mp, 0); + switch (kr) { + case MACH_SEND_TIMEOUT: + case MACH_SEND_TIMED_OUT: + case MACH_SEND_INVALID_DEST: + break; + default: + (void)dispatch_assume_zero(kr); + break; } - - os_atomic_store2o(dq, dq_items_head, next, relaxed); - _dispatch_global_queue_poke(dq, 1, 0); -out: - return head; +#elif defined(__linux__) + int result; + do { + result = eventfd_write(handle, 1); + } while (result == -1 && errno == EINTR); + (void)dispatch_assume_zero(result); +#else +#error "runloop support not implemented on this platform" +#endif } -#if DISPATCH_USE_KEVENT_WORKQUEUE -void -_dispatch_root_queue_drain_deferred_wlh(dispatch_deferred_items_t ddi - DISPATCH_PERF_MON_ARGS_PROTO) +DISPATCH_NOINLINE +static void +_dispatch_runloop_queue_poke(dispatch_lane_t dq, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags) { - dispatch_queue_t rq = ddi->ddi_stashed_rq; - dispatch_queue_t dq = ddi->ddi_stashed_dou._dq; - _dispatch_queue_set_current(rq); - dispatch_priority_t old_pri = _dispatch_set_basepri_wlh(rq->dq_priority); - dispatch_invoke_context_s dic = { }; - dispatch_invoke_flags_t flags = DISPATCH_INVOKE_WORKER_DRAIN | - DISPATCH_INVOKE_REDIRECTING_DRAIN | DISPATCH_INVOKE_WLH; - _dispatch_queue_drain_init_narrowing_check_deadline(&dic, rq->dq_priority); - uint64_t dq_state; + // it's not useful to handle WAKEUP_MAKE_DIRTY because mach_msg() will have + // a release barrier and that when runloop queues stop being thread-bound + // they have a non optional wake-up to start being a "normal" queue + // either in _dispatch_runloop_queue_xref_dispose, + // or in _dispatch_queue_cleanup2() for the main thread. + uint64_t old_state, new_state; - ddi->ddi_wlh_servicing = true; - if (unlikely(_dispatch_needs_to_return_to_kernel())) { - _dispatch_return_to_kernel(); + if (dx_type(dq) == DISPATCH_QUEUE_MAIN_TYPE) { + dispatch_once_f(&_dispatch_main_q_handle_pred, dq, + _dispatch_runloop_queue_handle_init); } -retry: - dispatch_assert(ddi->ddi_wlh_needs_delete); - _dispatch_trace_continuation_pop(rq, dq); - if (_dispatch_queue_drain_try_lock_wlh(dq, &dq_state)) { - dx_invoke(dq, &dic, flags); - if (!ddi->ddi_wlh_needs_delete) { - goto park; - } - dq_state = os_atomic_load2o(dq, dq_state, relaxed); - if (unlikely(!_dq_state_is_base_wlh(dq_state))) { // rdar://32671286 - goto park; - } - if (unlikely(_dq_state_is_enqueued_on_target(dq_state))) { - _dispatch_retain(dq); - _dispatch_trace_continuation_push(dq->do_targetq, dq); - goto retry; + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + new_state = _dq_state_merge_qos(old_state, qos); + if (old_state == new_state) { + os_atomic_rmw_loop_give_up(goto no_change); } - } else { - if (_dq_state_is_suspended(dq_state)) { - dispatch_assert(!_dq_state_is_enqueued(dq_state)); - _dispatch_release_2_no_dispose(dq); - } else { - dispatch_assert(_dq_state_is_enqueued(dq_state)); - dispatch_assert(_dq_state_drain_locked(dq_state)); - _dispatch_release_no_dispose(dq); + }); + + dispatch_qos_t dq_qos = _dispatch_priority_qos(dq->dq_priority); + if (qos > dq_qos) { + mach_port_t owner = _dq_state_drain_owner(new_state); + pthread_priority_t pp = _dispatch_qos_to_pp(qos); + _dispatch_thread_override_start(owner, pp, dq); + if (_dq_state_max_qos(old_state) > dq_qos) { + _dispatch_thread_override_end(owner, dq); } } - - _dispatch_event_loop_leave_deferred((dispatch_wlh_t)dq, dq_state); - -park: - // event thread that could steal - _dispatch_perfmon_end(perfmon_thread_event_steal); - _dispatch_reset_basepri(old_pri); - _dispatch_reset_basepri_override(); - _dispatch_queue_set_current(NULL); - - _dispatch_voucher_debug("root queue clear", NULL); - _dispatch_reset_voucher(NULL, DISPATCH_THREAD_PARK); +no_change: + _dispatch_runloop_queue_class_poke(dq); + if (flags & DISPATCH_WAKEUP_CONSUME_2) { + return _dispatch_release_2_tailcall(dq); + } } -void -_dispatch_root_queue_drain_deferred_item(dispatch_deferred_items_t ddi - DISPATCH_PERF_MON_ARGS_PROTO) +DISPATCH_ALWAYS_INLINE +static inline dispatch_qos_t +_dispatch_runloop_queue_reset_max_qos(dispatch_lane_t dq) { - dispatch_queue_t rq = ddi->ddi_stashed_rq; - _dispatch_queue_set_current(rq); - dispatch_priority_t old_pri = _dispatch_set_basepri(rq->dq_priority); - - dispatch_invoke_context_s dic = { }; - dispatch_invoke_flags_t flags = DISPATCH_INVOKE_WORKER_DRAIN | - DISPATCH_INVOKE_REDIRECTING_DRAIN; -#if DISPATCH_COCOA_COMPAT - _dispatch_last_resort_autorelease_pool_push(&dic); -#endif // DISPATCH_COCOA_COMPAT - _dispatch_queue_drain_init_narrowing_check_deadline(&dic, rq->dq_priority); - _dispatch_continuation_pop_inline(ddi->ddi_stashed_dou, &dic, flags, rq); - - // event thread that could steal - _dispatch_perfmon_end(perfmon_thread_event_steal); -#if DISPATCH_COCOA_COMPAT - _dispatch_last_resort_autorelease_pool_pop(&dic); -#endif // DISPATCH_COCOA_COMPAT - _dispatch_reset_basepri(old_pri); - _dispatch_reset_basepri_override(); - _dispatch_queue_set_current(NULL); - - _dispatch_voucher_debug("root queue clear", NULL); - _dispatch_reset_voucher(NULL, DISPATCH_THREAD_PARK); + uint64_t old_state, clear_bits = DISPATCH_QUEUE_MAX_QOS_MASK | + DISPATCH_QUEUE_RECEIVED_OVERRIDE; + old_state = os_atomic_and_orig2o(dq, dq_state, ~clear_bits, relaxed); + return _dq_state_max_qos(old_state); } -#endif -DISPATCH_NOT_TAIL_CALLED // prevent tailcall (for Instrument DTrace probe) -static void -_dispatch_root_queue_drain(dispatch_queue_t dq, pthread_priority_t pp) +void +_dispatch_runloop_queue_wakeup(dispatch_lane_t dq, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags) { -#if DISPATCH_DEBUG - dispatch_queue_t cq; - if (slowpath(cq = _dispatch_queue_get_current())) { - DISPATCH_INTERNAL_CRASH(cq, "Premature thread recycling"); + if (unlikely(_dispatch_queue_atomic_flags(dq) & DQF_RELEASED)) { + // + return _dispatch_lane_wakeup(dq, qos, flags); } -#endif - _dispatch_queue_set_current(dq); - dispatch_priority_t pri = dq->dq_priority; - if (!pri) pri = _dispatch_priority_from_pp(pp); - dispatch_priority_t old_dbp = _dispatch_set_basepri(pri); - _dispatch_adopt_wlh_anon(); - struct dispatch_object_s *item; - bool reset = false; - dispatch_invoke_context_s dic = { }; -#if DISPATCH_COCOA_COMPAT - _dispatch_last_resort_autorelease_pool_push(&dic); -#endif // DISPATCH_COCOA_COMPAT - dispatch_invoke_flags_t flags = DISPATCH_INVOKE_WORKER_DRAIN | - DISPATCH_INVOKE_REDIRECTING_DRAIN; - _dispatch_queue_drain_init_narrowing_check_deadline(&dic, pri); - _dispatch_perfmon_start(); - while ((item = fastpath(_dispatch_root_queue_drain_one(dq)))) { - if (reset) _dispatch_wqthread_override_reset(); - _dispatch_continuation_pop_inline(item, &dic, flags, dq); - reset = _dispatch_reset_basepri_override(); - if (unlikely(_dispatch_queue_drain_should_narrow(&dic))) { - break; - } + if (flags & DISPATCH_WAKEUP_MAKE_DIRTY) { + os_atomic_or2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, release); } - - // overcommit or not. worker thread - if (pri & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) { - _dispatch_perfmon_end(perfmon_thread_worker_oc); - } else { - _dispatch_perfmon_end(perfmon_thread_worker_non_oc); + if (_dispatch_queue_class_probe(dq)) { + return _dispatch_runloop_queue_poke(dq, qos, flags); } -#if DISPATCH_COCOA_COMPAT - _dispatch_last_resort_autorelease_pool_pop(&dic); -#endif // DISPATCH_COCOA_COMPAT - _dispatch_reset_wlh(); - _dispatch_reset_basepri(old_dbp); - _dispatch_reset_basepri_override(); - _dispatch_queue_set_current(NULL); + qos = _dispatch_runloop_queue_reset_max_qos(dq); + if (qos) { + mach_port_t owner = DISPATCH_QUEUE_DRAIN_OWNER(dq); + if (_dispatch_queue_class_probe(dq)) { + _dispatch_runloop_queue_poke(dq, qos, flags); + } + _dispatch_thread_override_end(owner, dq); + return; + } + if (flags & DISPATCH_WAKEUP_CONSUME_2) { + return _dispatch_release_2_tailcall(dq); + } } -#pragma mark - -#pragma mark dispatch_worker_thread - -#if HAVE_PTHREAD_WORKQUEUES +DISPATCH_NOINLINE static void -_dispatch_worker_thread4(void *context) +_dispatch_main_queue_update_priority_from_thread(void) { - dispatch_queue_t dq = context; - dispatch_root_queue_context_t qc = dq->do_ctxt; + dispatch_queue_main_t dq = &_dispatch_main_q; + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + mach_port_t owner = _dq_state_drain_owner(dq_state); - _dispatch_introspection_thread_add(); - int pending = os_atomic_dec2o(qc, dgq_pending, relaxed); - dispatch_assert(pending >= 0); - _dispatch_root_queue_drain(dq, _dispatch_get_priority()); - _dispatch_voucher_debug("root queue clear", NULL); - _dispatch_reset_voucher(NULL, DISPATCH_THREAD_PARK); -} + dispatch_priority_t main_pri = + _dispatch_priority_from_pp_strip_flags(_dispatch_get_priority()); + dispatch_qos_t main_qos = _dispatch_priority_qos(main_pri); + dispatch_qos_t max_qos = _dq_state_max_qos(dq_state); + dispatch_qos_t old_qos = _dispatch_priority_qos(dq->dq_priority); -#if HAVE_PTHREAD_WORKQUEUE_QOS -static void -_dispatch_worker_thread3(pthread_priority_t pp) -{ - bool overcommit = pp & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; - dispatch_queue_t dq; - pp &= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG | ~_PTHREAD_PRIORITY_FLAGS_MASK; - _dispatch_thread_setspecific(dispatch_priority_key, (void *)(uintptr_t)pp); - dq = _dispatch_get_root_queue(_dispatch_qos_from_pp(pp), overcommit); - return _dispatch_worker_thread4(dq); -} -#endif // HAVE_PTHREAD_WORKQUEUE_QOS + // the main thread QoS was adjusted by someone else, learn the new QoS + // and reinitialize _dispatch_main_q.dq_priority + dq->dq_priority = main_pri; -#if DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP -// 6618342 Contact the team that owns the Instrument DTrace probe before -// renaming this symbol -static void -_dispatch_worker_thread2(int priority, int options, - void *context DISPATCH_UNUSED) -{ - dispatch_assert(priority >= 0 && priority < WORKQ_NUM_PRIOQUEUE); - dispatch_assert(!(options & ~WORKQ_ADDTHREADS_OPTION_OVERCOMMIT)); - dispatch_queue_t dq = _dispatch_wq2root_queues[priority][options]; + if (old_qos < max_qos && main_qos == DISPATCH_QOS_UNSPECIFIED) { + // main thread is opted out of QoS and we had an override + return _dispatch_thread_override_end(owner, dq); + } + + if (old_qos < max_qos && max_qos <= main_qos) { + // main QoS was raised, and we had an override which is now useless + return _dispatch_thread_override_end(owner, dq); + } - return _dispatch_worker_thread4(dq); + if (main_qos < max_qos && max_qos <= old_qos) { + // main thread QoS was lowered, and we actually need an override + pthread_priority_t pp = _dispatch_qos_to_pp(max_qos); + return _dispatch_thread_override_start(owner, pp, dq); + } } -#endif // DISPATCH_USE_PTHREAD_WORKQUEUE_SETDISPATCH_NP -#endif // HAVE_PTHREAD_WORKQUEUES -#if DISPATCH_USE_PTHREAD_POOL -// 6618342 Contact the team that owns the Instrument DTrace probe before -// renaming this symbol -static void * -_dispatch_worker_thread(void *context) +static void +_dispatch_main_queue_drain(dispatch_queue_main_t dq) { - dispatch_queue_t dq = context; - dispatch_root_queue_context_t qc = dq->do_ctxt; - dispatch_pthread_root_queue_context_t pqc = qc->dgq_ctxt; + dispatch_thread_frame_s dtf; - int pending = os_atomic_dec2o(qc, dgq_pending, relaxed); - if (unlikely(pending < 0)) { - DISPATCH_INTERNAL_CRASH(pending, "Pending thread request underflow"); + if (!dq->dq_items_tail) { + return; } - if (pqc->dpq_observer_hooks.queue_will_execute) { - _dispatch_set_pthread_root_queue_observer_hooks( - &pqc->dpq_observer_hooks); + _dispatch_perfmon_start_notrace(); + if (unlikely(!_dispatch_queue_is_thread_bound(dq))) { + DISPATCH_CLIENT_CRASH(0, "_dispatch_main_queue_callback_4CF called" + " after dispatch_main()"); } - if (pqc->dpq_thread_configure) { - pqc->dpq_thread_configure(); + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + if (unlikely(!_dq_state_drain_locked_by_self(dq_state))) { + DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, + "_dispatch_main_queue_callback_4CF called" + " from the wrong thread"); } - // workaround tweaks the kernel workqueue does for us - _dispatch_sigmask(); - _dispatch_introspection_thread_add(); + dispatch_once_f(&_dispatch_main_q_handle_pred, dq, + _dispatch_runloop_queue_handle_init); -#if DISPATCH_USE_INTERNAL_WORKQUEUE - bool overcommit = (qc->dgq_wq_options & WORKQ_ADDTHREADS_OPTION_OVERCOMMIT); - bool manager = (dq == &_dispatch_mgr_root_queue); - bool monitored = !(overcommit || manager); - if (monitored) { - _dispatch_workq_worker_register(dq, qc->dgq_qos); + // hide the frame chaining when CFRunLoop + // drains the main runloop, as this should not be observable that way + _dispatch_adopt_wlh_anon(); + _dispatch_thread_frame_push_and_rebase(&dtf, dq, NULL); + + pthread_priority_t pp = _dispatch_get_priority(); + dispatch_priority_t pri = _dispatch_priority_from_pp(pp); + dispatch_qos_t qos = _dispatch_priority_qos(pri); + voucher_t voucher = _voucher_copy(); + + if (unlikely(qos != _dispatch_priority_qos(dq->dq_priority))) { + _dispatch_main_queue_update_priority_from_thread(); } -#endif + dispatch_priority_t old_dbp = _dispatch_set_basepri(pri); + _dispatch_set_basepri_override_qos(DISPATCH_QOS_SATURATED); - const int64_t timeout = 5ull * NSEC_PER_SEC; - pthread_priority_t old_pri = _dispatch_get_priority(); + dispatch_invoke_context_s dic = { }; + struct dispatch_object_s *dc, *next_dc, *tail; + dc = os_mpsc_capture_snapshot(os_mpsc(dq, dq_items), &tail); do { - _dispatch_root_queue_drain(dq, old_pri); - _dispatch_reset_priority_and_voucher(old_pri, NULL); - } while (dispatch_semaphore_wait(&pqc->dpq_thread_mediator, - dispatch_time(0, timeout)) == 0); + next_dc = os_mpsc_pop_snapshot_head(dc, tail, do_next); + _dispatch_continuation_pop_inline(dc, &dic, + DISPATCH_INVOKE_THREAD_BOUND, dq); + } while ((dc = next_dc)); -#if DISPATCH_USE_INTERNAL_WORKQUEUE - if (monitored) { - _dispatch_workq_worker_unregister(dq, qc->dgq_qos); - } -#endif - (void)os_atomic_inc2o(qc, dgq_thread_pool_size, release); - _dispatch_global_queue_poke(dq, 1, 0); - _dispatch_release(dq); // retained in _dispatch_global_queue_poke_slow - return NULL; + dx_wakeup(dq->_as_dq, 0, 0); + _dispatch_voucher_debug("main queue restore", voucher); + _dispatch_reset_basepri(old_dbp); + _dispatch_reset_basepri_override(); + _dispatch_reset_priority_and_voucher(pp, voucher); + _dispatch_thread_frame_pop(&dtf); + _dispatch_reset_wlh(); + _dispatch_force_cache_cleanup(); + _dispatch_perfmon_end_notrace(); } -#endif // DISPATCH_USE_PTHREAD_POOL - -#pragma mark - -#pragma mark dispatch_network_root_queue -#if TARGET_OS_MAC -dispatch_queue_t -_dispatch_network_root_queue_create_4NW(const char *label, - const pthread_attr_t *attrs, dispatch_block_t configure) +static bool +_dispatch_runloop_queue_drain_one(dispatch_lane_t dq) { - unsigned long flags = dispatch_pthread_root_queue_flags_pool_size(1); - return dispatch_pthread_root_queue_create(label, flags, attrs, configure); -} + if (!dq->dq_items_tail) { + return false; + } + _dispatch_perfmon_start_notrace(); + dispatch_thread_frame_s dtf; + bool should_reset_wlh = _dispatch_adopt_wlh_anon_recurse(); + _dispatch_thread_frame_push(&dtf, dq); + pthread_priority_t pp = _dispatch_get_priority(); + dispatch_priority_t pri = _dispatch_priority_from_pp(pp); + voucher_t voucher = _voucher_copy(); + dispatch_priority_t old_dbp = _dispatch_set_basepri(pri); + _dispatch_set_basepri_override_qos(DISPATCH_QOS_SATURATED); -#endif // TARGET_OS_MAC -#pragma mark - -#pragma mark dispatch_runloop_queue + dispatch_invoke_context_s dic = { }; + struct dispatch_object_s *dc, *next_dc; + dc = _dispatch_queue_get_head(dq); + next_dc = _dispatch_queue_pop_head(dq, dc); + _dispatch_continuation_pop_inline(dc, &dic, + DISPATCH_INVOKE_THREAD_BOUND, dq); -static bool _dispatch_program_is_probably_callback_driven; + if (!next_dc) { + dx_wakeup(dq, 0, 0); + } -#if DISPATCH_COCOA_COMPAT + _dispatch_voucher_debug("runloop queue restore", voucher); + _dispatch_reset_basepri(old_dbp); + _dispatch_reset_basepri_override(); + _dispatch_reset_priority_and_voucher(pp, voucher); + _dispatch_thread_frame_pop(&dtf); + if (should_reset_wlh) _dispatch_reset_wlh(); + _dispatch_force_cache_cleanup(); + _dispatch_perfmon_end_notrace(); + return next_dc; +} -dispatch_queue_t +dispatch_queue_serial_t _dispatch_runloop_root_queue_create_4CF(const char *label, unsigned long flags) { - dispatch_queue_t dq; - size_t dqs; + dispatch_lane_t dq; - if (slowpath(flags)) { + if (unlikely(flags)) { return DISPATCH_BAD_INPUT; } - dqs = sizeof(struct dispatch_queue_s) - DISPATCH_QUEUE_CACHELINE_PAD; - dq = _dispatch_object_alloc(DISPATCH_VTABLE(queue_runloop), dqs); - _dispatch_queue_init(dq, DQF_THREAD_BOUND | DQF_CANNOT_TRYSYNC, 1, + dq = _dispatch_object_alloc(DISPATCH_VTABLE(queue_runloop), + sizeof(struct dispatch_lane_s)); + _dispatch_queue_init(dq, DQF_THREAD_BOUND, 1, DISPATCH_QUEUE_ROLE_BASE_ANON); - dq->do_targetq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, true); + dq->do_targetq = _dispatch_get_default_queue(true); dq->dq_label = label ? label : "runloop-queue"; // no-copy contract _dispatch_runloop_queue_handle_init(dq); _dispatch_queue_set_bound_thread(dq); _dispatch_object_debug(dq, "%s", __func__); - return _dispatch_introspection_queue_create(dq); + return _dispatch_trace_queue_create(dq)._dl; } void -_dispatch_runloop_queue_xref_dispose(dispatch_queue_t dq) +_dispatch_runloop_queue_xref_dispose(dispatch_lane_t dq) { _dispatch_object_debug(dq, "%s", __func__); @@ -6063,22 +6586,22 @@ _dispatch_runloop_queue_xref_dispose(dispatch_queue_t dq) } void -_dispatch_runloop_queue_dispose(dispatch_queue_t dq, bool *allow_free) +_dispatch_runloop_queue_dispose(dispatch_lane_t dq, bool *allow_free) { _dispatch_object_debug(dq, "%s", __func__); - _dispatch_introspection_queue_dispose(dq); + _dispatch_trace_queue_dispose(dq); _dispatch_runloop_queue_handle_dispose(dq); - _dispatch_queue_destroy(dq, allow_free); + _dispatch_lane_class_dispose(dq, allow_free); } bool _dispatch_runloop_root_queue_perform_4CF(dispatch_queue_t dq) { - if (slowpath(dq->do_vtable != DISPATCH_VTABLE(queue_runloop))) { - DISPATCH_CLIENT_CRASH(dq->do_vtable, "Not a runloop queue"); + if (unlikely(dx_type(dq) != DISPATCH_QUEUE_RUNLOOP_TYPE)) { + DISPATCH_CLIENT_CRASH(dx_type(dq), "Not a runloop queue"); } dispatch_retain(dq); - bool r = _dispatch_runloop_queue_drain_one(dq); + bool r = _dispatch_runloop_queue_drain_one(upcast(dq)._dl); dispatch_release(dq); return r; } @@ -6086,117 +6609,35 @@ _dispatch_runloop_root_queue_perform_4CF(dispatch_queue_t dq) void _dispatch_runloop_root_queue_wakeup_4CF(dispatch_queue_t dq) { - if (slowpath(dq->do_vtable != DISPATCH_VTABLE(queue_runloop))) { - DISPATCH_CLIENT_CRASH(dq->do_vtable, "Not a runloop queue"); + if (unlikely(dx_type(dq) != DISPATCH_QUEUE_RUNLOOP_TYPE)) { + DISPATCH_CLIENT_CRASH(dx_type(dq), "Not a runloop queue"); } - _dispatch_runloop_queue_wakeup(dq, 0, false); + _dispatch_runloop_queue_wakeup(upcast(dq)._dl, 0, false); } #if TARGET_OS_MAC dispatch_runloop_handle_t _dispatch_runloop_root_queue_get_port_4CF(dispatch_queue_t dq) { - if (slowpath(dq->do_vtable != DISPATCH_VTABLE(queue_runloop))) { - DISPATCH_CLIENT_CRASH(dq->do_vtable, "Not a runloop queue"); - } - return _dispatch_runloop_queue_get_handle(dq); -} -#endif - -static void -_dispatch_runloop_queue_handle_init(void *ctxt) -{ - dispatch_queue_t dq = (dispatch_queue_t)ctxt; - dispatch_runloop_handle_t handle; - - _dispatch_fork_becomes_unsafe(); - -#if TARGET_OS_MAC - mach_port_t mp; - kern_return_t kr; - kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &mp); - DISPATCH_VERIFY_MIG(kr); - (void)dispatch_assume_zero(kr); - kr = mach_port_insert_right(mach_task_self(), mp, mp, - MACH_MSG_TYPE_MAKE_SEND); - DISPATCH_VERIFY_MIG(kr); - (void)dispatch_assume_zero(kr); - if (dq != &_dispatch_main_q) { - struct mach_port_limits limits = { - .mpl_qlimit = 1, - }; - kr = mach_port_set_attributes(mach_task_self(), mp, - MACH_PORT_LIMITS_INFO, (mach_port_info_t)&limits, - sizeof(limits)); - DISPATCH_VERIFY_MIG(kr); - (void)dispatch_assume_zero(kr); - } - handle = mp; -#elif defined(__linux__) - int fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); - if (fd == -1) { - int err = errno; - switch (err) { - case EMFILE: - DISPATCH_CLIENT_CRASH(err, "eventfd() failure: " - "process is out of file descriptors"); - break; - case ENFILE: - DISPATCH_CLIENT_CRASH(err, "eventfd() failure: " - "system is out of file descriptors"); - break; - case ENOMEM: - DISPATCH_CLIENT_CRASH(err, "eventfd() failure: " - "kernel is out of memory"); - break; - default: - DISPATCH_INTERNAL_CRASH(err, "eventfd() failure"); - break; - } + if (unlikely(dx_type(dq) != DISPATCH_QUEUE_RUNLOOP_TYPE)) { + DISPATCH_CLIENT_CRASH(dx_type(dq), "Not a runloop queue"); } - handle = fd; -#else -#error "runloop support not implemented on this platform" -#endif - _dispatch_runloop_queue_set_handle(dq, handle); - - _dispatch_program_is_probably_callback_driven = true; + return _dispatch_runloop_queue_get_handle(upcast(dq)._dl); } - -static void -_dispatch_runloop_queue_handle_dispose(dispatch_queue_t dq) -{ - dispatch_runloop_handle_t handle = _dispatch_runloop_queue_get_handle(dq); - if (!_dispatch_runloop_handle_is_valid(handle)) { - return; - } - dq->do_ctxt = NULL; -#if TARGET_OS_MAC - mach_port_t mp = handle; - kern_return_t kr = mach_port_deallocate(mach_task_self(), mp); - DISPATCH_VERIFY_MIG(kr); - (void)dispatch_assume_zero(kr); - kr = mach_port_mod_refs(mach_task_self(), mp, MACH_PORT_RIGHT_RECEIVE, -1); - DISPATCH_VERIFY_MIG(kr); - (void)dispatch_assume_zero(kr); -#elif defined(__linux__) - int rc = close(handle); - (void)dispatch_assume_zero(rc); -#else -#error "runloop support not implemented on this platform" #endif -} +#endif // DISPATCH_COCOA_COMPAT #pragma mark - #pragma mark dispatch_main_queue +#if DISPATCH_COCOA_COMPAT dispatch_runloop_handle_t _dispatch_get_main_queue_handle_4CF(void) { - dispatch_queue_t dq = &_dispatch_main_q; + dispatch_queue_main_t dq = &_dispatch_main_q; dispatch_once_f(&_dispatch_main_q_handle_pred, dq, _dispatch_runloop_queue_handle_init); - return _dispatch_runloop_queue_get_handle(dq); + return _dispatch_runloop_queue_get_handle(dq->_as_dl); } #if TARGET_OS_MAC @@ -6207,30 +6648,71 @@ _dispatch_get_main_queue_port_4CF(void) } #endif -static bool main_q_is_draining; +void +_dispatch_main_queue_callback_4CF( + void *ignored DISPATCH_UNUSED) +{ + // the main queue cannot be suspended and no-one looks at this bit + // so abuse it to avoid dirtying more memory + + if (_dispatch_main_q.dq_side_suspend_cnt) { + return; + } + _dispatch_main_q.dq_side_suspend_cnt = true; + _dispatch_main_queue_drain(&_dispatch_main_q); + _dispatch_main_q.dq_side_suspend_cnt = false; +} + +#endif // DISPATCH_COCOA_COMPAT -// 6618342 Contact the team that owns the Instrument DTrace probe before -// renaming this symbol DISPATCH_NOINLINE -static void -_dispatch_queue_set_mainq_drain_state(bool arg) +void +_dispatch_main_queue_push(dispatch_queue_main_t dq, dispatch_object_t dou, + dispatch_qos_t qos) { - main_q_is_draining = arg; + // Same as _dispatch_lane_push() but without the refcounting due to being + // a global object + if (_dispatch_queue_push_item(dq, dou)) { + return dx_wakeup(dq, qos, DISPATCH_WAKEUP_MAKE_DIRTY); + } + + qos = _dispatch_queue_push_qos(dq, qos); + if (_dispatch_queue_need_override(dq, qos)) { + return dx_wakeup(dq, qos, 0); + } } void -_dispatch_main_queue_callback_4CF( - void *ignored DISPATCH_UNUSED) +_dispatch_main_queue_wakeup(dispatch_queue_main_t dq, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags) +{ +#if DISPATCH_COCOA_COMPAT + if (_dispatch_queue_is_thread_bound(dq)) { + return _dispatch_runloop_queue_wakeup(dq->_as_dl, qos, flags); + } +#endif + return _dispatch_lane_wakeup(dq, qos, flags); +} + +DISPATCH_NOINLINE DISPATCH_NORETURN +static void +_dispatch_sigsuspend(void) { - if (main_q_is_draining) { - return; + static const sigset_t mask; + + for (;;) { + sigsuspend(&mask); } - _dispatch_queue_set_mainq_drain_state(true); - _dispatch_main_queue_drain(); - _dispatch_queue_set_mainq_drain_state(false); } -#endif +DISPATCH_NORETURN +static void +_dispatch_sig_thread(void *ctxt DISPATCH_UNUSED) +{ + // never returns, so burn bridges behind us + _dispatch_clear_stack(0); + _dispatch_sigsuspend(); +} void dispatch_main(void) @@ -6262,31 +6744,11 @@ dispatch_main(void) #endif } -DISPATCH_NOINLINE DISPATCH_NORETURN -static void -_dispatch_sigsuspend(void) -{ - static const sigset_t mask; - - for (;;) { - sigsuspend(&mask); - } -} - -DISPATCH_NORETURN -static void -_dispatch_sig_thread(void *ctxt DISPATCH_UNUSED) -{ - // never returns, so burn bridges behind us - _dispatch_clear_stack(0); - _dispatch_sigsuspend(); -} - DISPATCH_NOINLINE static void _dispatch_queue_cleanup2(void) { - dispatch_queue_t dq = &_dispatch_main_q; + dispatch_queue_main_t dq = &_dispatch_main_q; uint64_t old_state, new_state; // Turning the main queue from a runloop queue into an ordinary serial queue @@ -6304,8 +6766,8 @@ _dispatch_queue_cleanup2(void) new_state += DISPATCH_QUEUE_WIDTH_INTERVAL; new_state += DISPATCH_QUEUE_IN_BARRIER; }); - _dispatch_queue_atomic_flags_clear(dq, DQF_THREAD_BOUND|DQF_CANNOT_TRYSYNC); - _dispatch_queue_barrier_complete(dq, 0, 0); + _dispatch_queue_atomic_flags_clear(dq, DQF_THREAD_BOUND); + _dispatch_lane_barrier_complete(dq, 0, 0); // overload the "probably" variable to mean that dispatch_main() or // similar non-POSIX API was called @@ -6313,8 +6775,8 @@ _dispatch_queue_cleanup2(void) // See dispatch_main for call to _dispatch_sig_thread on linux. #ifndef __linux__ if (_dispatch_program_is_probably_callback_driven) { - _dispatch_barrier_async_detached_f(_dispatch_get_root_queue( - DISPATCH_QOS_DEFAULT, true), NULL, _dispatch_sig_thread); + _dispatch_barrier_async_detached_f(_dispatch_get_default_queue(true), + NULL, _dispatch_sig_thread); sleep(1); // workaround 6778970 } #endif @@ -6322,7 +6784,7 @@ _dispatch_queue_cleanup2(void) #if DISPATCH_COCOA_COMPAT dispatch_once_f(&_dispatch_main_q_handle_pred, dq, _dispatch_runloop_queue_handle_init); - _dispatch_runloop_queue_handle_dispose(dq); + _dispatch_runloop_queue_handle_dispose(dq->_as_dl); #endif } @@ -6372,3 +6834,261 @@ _dispatch_context_cleanup(void *ctxt) DISPATCH_INTERNAL_CRASH(ctxt, "Premature thread exit while a dispatch context is set"); } +#pragma mark - +#pragma mark dispatch_init + +static void +_dispatch_root_queues_init_once(void *context DISPATCH_UNUSED) +{ + _dispatch_fork_becomes_unsafe(); +#if DISPATCH_USE_INTERNAL_WORKQUEUE + size_t i; + for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { + _dispatch_root_queue_init_pthread_pool(&_dispatch_root_queues[i], 0, + _dispatch_root_queues[i].dq_priority); + } +#else + int wq_supported = _pthread_workqueue_supported(); + int r = ENOTSUP; + + if (!(wq_supported & WORKQ_FEATURE_MAINTENANCE)) { + DISPATCH_INTERNAL_CRASH(wq_supported, + "QoS Maintenance support required"); + } + + if (unlikely(!_dispatch_kevent_workqueue_enabled)) { + r = _pthread_workqueue_init(_dispatch_worker_thread2, + offsetof(struct dispatch_queue_s, dq_serialnum), 0); +#if DISPATCH_USE_KEVENT_WORKQUEUE + } else if (wq_supported & WORKQ_FEATURE_KEVENT) { + r = _pthread_workqueue_init_with_kevent(_dispatch_worker_thread2, + (pthread_workqueue_function_kevent_t) + _dispatch_kevent_worker_thread, + offsetof(struct dispatch_queue_s, dq_serialnum), 0); +#endif + } else { + DISPATCH_INTERNAL_CRASH(wq_supported, "Missing Kevent WORKQ support"); + } + + if (r != 0) { + DISPATCH_INTERNAL_CRASH((r << 16) | wq_supported, + "Root queue initialization failed"); + } +#endif // DISPATCH_USE_INTERNAL_WORKQUEUE +} + +DISPATCH_STATIC_GLOBAL(dispatch_once_t _dispatch_root_queues_pred); +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_root_queues_init(void) +{ + dispatch_once_f(&_dispatch_root_queues_pred, NULL, + _dispatch_root_queues_init_once); +} + +DISPATCH_EXPORT DISPATCH_NOTHROW +void +libdispatch_init(void) +{ + dispatch_assert(sizeof(struct dispatch_apply_s) <= + DISPATCH_CONTINUATION_SIZE); + + if (_dispatch_getenv_bool("LIBDISPATCH_STRICT", false)) { + _dispatch_mode |= DISPATCH_MODE_STRICT; + } +#if HAVE_OS_FAULT_WITH_PAYLOAD && TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR + if (_dispatch_getenv_bool("LIBDISPATCH_NO_FAULTS", false)) { + _dispatch_mode |= DISPATCH_MODE_NO_FAULTS; + } else if (getpid() == 1 || + !os_variant_has_internal_diagnostics("com.apple.libdispatch")) { + _dispatch_mode |= DISPATCH_MODE_NO_FAULTS; + } +#endif // HAVE_OS_FAULT_WITH_PAYLOAD && TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR + + +#if DISPATCH_DEBUG || DISPATCH_PROFILE +#if DISPATCH_USE_KEVENT_WORKQUEUE + if (getenv("LIBDISPATCH_DISABLE_KEVENT_WQ")) { + _dispatch_kevent_workqueue_enabled = false; + } +#endif +#endif + +#if HAVE_PTHREAD_WORKQUEUE_QOS + dispatch_qos_t qos = _dispatch_qos_from_qos_class(qos_class_main()); + _dispatch_main_q.dq_priority = _dispatch_priority_make(qos, 0); +#if DISPATCH_DEBUG + if (!getenv("LIBDISPATCH_DISABLE_SET_QOS")) { + _dispatch_set_qos_class_enabled = 1; + } +#endif +#endif + +#if DISPATCH_USE_THREAD_LOCAL_STORAGE + _dispatch_thread_key_create(&__dispatch_tsd_key, _libdispatch_tsd_cleanup); +#else + _dispatch_thread_key_create(&dispatch_priority_key, NULL); + _dispatch_thread_key_create(&dispatch_r2k_key, NULL); + _dispatch_thread_key_create(&dispatch_queue_key, _dispatch_queue_cleanup); + _dispatch_thread_key_create(&dispatch_frame_key, _dispatch_frame_cleanup); + _dispatch_thread_key_create(&dispatch_cache_key, _dispatch_cache_cleanup); + _dispatch_thread_key_create(&dispatch_context_key, _dispatch_context_cleanup); + _dispatch_thread_key_create(&dispatch_pthread_root_queue_observer_hooks_key, + NULL); + _dispatch_thread_key_create(&dispatch_basepri_key, NULL); +#if DISPATCH_INTROSPECTION + _dispatch_thread_key_create(&dispatch_introspection_key , NULL); +#elif DISPATCH_PERF_MON + _dispatch_thread_key_create(&dispatch_bcounter_key, NULL); +#endif + _dispatch_thread_key_create(&dispatch_wlh_key, _dispatch_wlh_cleanup); + _dispatch_thread_key_create(&dispatch_voucher_key, _voucher_thread_cleanup); + _dispatch_thread_key_create(&dispatch_deferred_items_key, + _dispatch_deferred_items_cleanup); +#endif + +#if DISPATCH_USE_RESOLVERS // rdar://problem/8541707 + _dispatch_main_q.do_targetq = _dispatch_get_default_queue(true); +#endif + + _dispatch_queue_set_current(&_dispatch_main_q); + _dispatch_queue_set_bound_thread(&_dispatch_main_q); + +#if DISPATCH_USE_PTHREAD_ATFORK + (void)dispatch_assume_zero(pthread_atfork(dispatch_atfork_prepare, + dispatch_atfork_parent, dispatch_atfork_child)); +#endif + _dispatch_hw_config_init(); + _dispatch_time_init(); + _dispatch_vtable_init(); + _os_object_init(); + _voucher_init(); + _dispatch_introspection_init(); +} + +#if DISPATCH_USE_THREAD_LOCAL_STORAGE +#include +#include + +#ifndef __ANDROID__ +#ifdef SYS_gettid +DISPATCH_ALWAYS_INLINE +static inline pid_t +gettid(void) +{ + return (pid_t) syscall(SYS_gettid); +} +#else +#error "SYS_gettid unavailable on this system" +#endif /* SYS_gettid */ +#endif /* ! __ANDROID__ */ + +#define _tsd_call_cleanup(k, f) do { \ + if ((f) && tsd->k) ((void(*)(void*))(f))(tsd->k); \ + } while (0) + +#ifdef __ANDROID__ +static void (*_dispatch_thread_detach_callback)(void); + +void +_dispatch_install_thread_detach_callback(dispatch_function_t cb) +{ + if (os_atomic_xchg(&_dispatch_thread_detach_callback, cb, relaxed)) { + DISPATCH_CLIENT_CRASH(0, "Installing a thread detach callback twice"); + } +} +#endif + +void +_libdispatch_tsd_cleanup(void *ctx) +{ + struct dispatch_tsd *tsd = (struct dispatch_tsd*) ctx; + + _tsd_call_cleanup(dispatch_priority_key, NULL); + _tsd_call_cleanup(dispatch_r2k_key, NULL); + + _tsd_call_cleanup(dispatch_queue_key, _dispatch_queue_cleanup); + _tsd_call_cleanup(dispatch_frame_key, _dispatch_frame_cleanup); + _tsd_call_cleanup(dispatch_cache_key, _dispatch_cache_cleanup); + _tsd_call_cleanup(dispatch_context_key, _dispatch_context_cleanup); + _tsd_call_cleanup(dispatch_pthread_root_queue_observer_hooks_key, + NULL); + _tsd_call_cleanup(dispatch_basepri_key, NULL); +#if DISPATCH_INTROSPECTION + _tsd_call_cleanup(dispatch_introspection_key, NULL); +#elif DISPATCH_PERF_MON + _tsd_call_cleanup(dispatch_bcounter_key, NULL); +#endif + _tsd_call_cleanup(dispatch_wlh_key, _dispatch_wlh_cleanup); + _tsd_call_cleanup(dispatch_voucher_key, _voucher_thread_cleanup); + _tsd_call_cleanup(dispatch_deferred_items_key, + _dispatch_deferred_items_cleanup); +#ifdef __ANDROID__ + if (_dispatch_thread_detach_callback) { + _dispatch_thread_detach_callback(); + } +#endif + tsd->tid = 0; +} + +DISPATCH_NOINLINE +void +libdispatch_tsd_init(void) +{ + pthread_setspecific(__dispatch_tsd_key, &__dispatch_tsd); + __dispatch_tsd.tid = gettid(); +} +#endif + +DISPATCH_NOTHROW +void +_dispatch_queue_atfork_child(void) +{ + dispatch_queue_main_t main_q = &_dispatch_main_q; + void *crash = (void *)0x100; + size_t i; + + if (_dispatch_queue_is_thread_bound(main_q)) { + _dispatch_queue_set_bound_thread(main_q); + } + + if (!_dispatch_is_multithreaded_inline()) return; + + main_q->dq_items_head = crash; + main_q->dq_items_tail = crash; + + _dispatch_mgr_q.dq_items_head = crash; + _dispatch_mgr_q.dq_items_tail = crash; + + for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) { + _dispatch_root_queues[i].dq_items_head = crash; + _dispatch_root_queues[i].dq_items_tail = crash; + } +} + +DISPATCH_NOINLINE +void +_dispatch_fork_becomes_unsafe_slow(void) +{ + uint8_t value = os_atomic_or(&_dispatch_unsafe_fork, + _DISPATCH_UNSAFE_FORK_MULTITHREADED, relaxed); + if (value & _DISPATCH_UNSAFE_FORK_PROHIBIT) { + DISPATCH_CLIENT_CRASH(0, "Transition to multithreaded is prohibited"); + } +} + +DISPATCH_NOINLINE +void +_dispatch_prohibit_transition_to_multithreaded(bool prohibit) +{ + if (prohibit) { + uint8_t value = os_atomic_or(&_dispatch_unsafe_fork, + _DISPATCH_UNSAFE_FORK_PROHIBIT, relaxed); + if (value & _DISPATCH_UNSAFE_FORK_MULTITHREADED) { + DISPATCH_CLIENT_CRASH(0, "The executable is already multithreaded"); + } + } else { + os_atomic_and(&_dispatch_unsafe_fork, + (uint8_t)~_DISPATCH_UNSAFE_FORK_PROHIBIT, relaxed); + } +} diff --git a/src/queue_internal.h b/src/queue_internal.h index f70356a2c..a627f0e9c 100644 --- a/src/queue_internal.h +++ b/src/queue_internal.h @@ -32,24 +32,8 @@ #include // for HeaderDoc #endif -#if defined(__BLOCKS__) && !defined(DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES) -#define DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES 1 // -#endif - -/* x86 & cortex-a8 have a 64 byte cacheline */ -#define DISPATCH_CACHELINE_SIZE 64u -#define ROUND_UP_TO_CACHELINE_SIZE(x) \ - (((x) + (DISPATCH_CACHELINE_SIZE - 1u)) & \ - ~(DISPATCH_CACHELINE_SIZE - 1u)) -#define DISPATCH_CACHELINE_ALIGN \ - __attribute__((__aligned__(DISPATCH_CACHELINE_SIZE))) - -#define DISPATCH_CACHELINE_PAD_SIZE(type) \ - (roundup(sizeof(type), DISPATCH_CACHELINE_SIZE) - sizeof(type)) - - #pragma mark - -#pragma mark dispatch_queue_t +#pragma mark dispatch_queue_flags, dq_state DISPATCH_ENUM(dispatch_queue_flags, uint32_t, DQF_NONE = 0x00000000, @@ -59,94 +43,61 @@ DISPATCH_ENUM(dispatch_queue_flags, uint32_t, DQF_THREAD_BOUND = 0x00040000, // queue is bound to a thread DQF_BARRIER_BIT = 0x00080000, // queue is a barrier on its target DQF_TARGETED = 0x00100000, // queue is targeted by another object - DQF_LABEL_NEEDS_FREE = 0x00200000, // queue label was strduped; need to free it - DQF_CANNOT_TRYSYNC = 0x00400000, + DQF_LABEL_NEEDS_FREE = 0x00200000, // queue label was strdup()ed + DQF_MUTABLE = 0x00400000, DQF_RELEASED = 0x00800000, // xref_cnt == -1 - DQF_LEGACY = 0x01000000, - // only applies to sources // - // Assuming DSF_ARMED (a), DSF_DEFERRED_DELETE (p), DSF_DELETED (d): + // Only applies to sources + // + // @const DSF_STRICT + // Semantics of the source are strict (implies DQF_MUTABLE being unset): + // - handlers can't be changed past activation + // - EV_VANISHED causes a hard failure + // - source can't change WLH + // + // @const DSF_WLH_CHANGED + // The wlh for the source changed (due to retarget past activation). + // Only used for debugging and diagnostics purposes. // - // --- - // a-- - // source states for regular operations - // (delivering event / waiting for event) + // @const DSF_CANCELED + // Explicit cancelation has been requested. // - // ap- - // Either armed for deferred deletion delivery, waiting for an EV_DELETE, - // and the next state will be -pd (EV_DELETE delivered), - // Or, a cancellation raced with an event delivery and failed - // (EINPROGRESS), and when the event delivery happens, the next state - // will be -p-. + // @const DSF_CANCEL_WAITER + // At least one caller of dispatch_source_cancel_and_wait() is waiting on + // the cancelation to finish. DSF_CANCELED must be set if this bit is set. // - // -pd - // Received EV_DELETE (from ap-), needs to unregister ds_refs, the muxnote - // is gone from the kernel. Next state will be --d. + // @const DSF_NEEDS_EVENT + // The source has started to delete its unotes due to cancelation, but + // couldn't finish its unregistration and is waiting for some asynchronous + // events to fire to be able to. // - // -p- - // Received an EV_ONESHOT event (from a--), or the delivery of an event - // causing the cancellation to fail with EINPROGRESS was delivered - // (from ap-). The muxnote still lives, next state will be --d. + // This flag prevents spurious wakeups when the source state machine + // requires specific events to make progress. Events that are likely + // to unblock a source state machine pass DISPATCH_WAKEUP_EVENT + // which neuters the effect of DSF_NEEDS_EVENT. // - // --d - // Final state of the source, the muxnote is gone from the kernel and - // ds_refs is unregistered. The source can safely be released. + // @const DSF_DELETED + // The source can now only be used as a queue and is not allowed to register + // any new unote anymore. All the previously registered unotes are inactive + // and their knote is gone. However, these previously registered unotes may + // still be in the process of delivering their last event. // - // a-d (INVALID) - // apd (INVALID) - // Setting DSF_DELETED should also always atomically clear DSF_ARMED. If - // the muxnote is gone from the kernel, it makes no sense whatsoever to - // have it armed. And generally speaking, once `d` or `p` has been set, - // `a` cannot do a cleared -> set transition anymore - // (see _dispatch_source_try_set_armed). + // Sources have an internal refcount taken always while they use eventing + // subsystems which is consumed when this bit is set. // - DSF_WLH_CHANGED = 0x04000000, - DSF_CANCEL_WAITER = 0x08000000, // synchronous waiters for cancel - DSF_CANCELED = 0x10000000, // cancellation has been requested - DSF_ARMED = 0x20000000, // source is armed - DSF_DEFERRED_DELETE = 0x40000000, // source is pending delete - DSF_DELETED = 0x80000000, // source muxnote is deleted -#define DSF_STATE_MASK (DSF_ARMED | DSF_DEFERRED_DELETE | DSF_DELETED) + DSF_STRICT = 0x04000000, + DSF_WLH_CHANGED = 0x08000000, + DSF_CANCELED = 0x10000000, + DSF_CANCEL_WAITER = 0x20000000, + DSF_NEEDS_EVENT = 0x40000000, + DSF_DELETED = 0x80000000, #define DQF_FLAGS_MASK ((dispatch_queue_flags_t)0xffff0000) #define DQF_WIDTH_MASK ((dispatch_queue_flags_t)0x0000ffff) #define DQF_WIDTH(n) ((dispatch_queue_flags_t)(uint16_t)(n)) ); -#define _DISPATCH_QUEUE_HEADER(x) \ - struct os_mpsc_queue_s _as_oq[0]; \ - DISPATCH_OBJECT_HEADER(x); \ - _OS_MPSC_QUEUE_FIELDS(dq, dq_state); \ - uint32_t dq_side_suspend_cnt; \ - dispatch_unfair_lock_s dq_sidelock; \ - union { \ - dispatch_queue_t dq_specific_q; \ - struct dispatch_source_refs_s *ds_refs; \ - struct dispatch_timer_source_refs_s *ds_timer_refs; \ - struct dispatch_mach_recv_refs_s *dm_recv_refs; \ - }; \ - DISPATCH_UNION_LE(uint32_t volatile dq_atomic_flags, \ - const uint16_t dq_width, \ - const uint16_t __dq_opaque \ - ); \ - DISPATCH_INTROSPECTION_QUEUE_HEADER - /* LP64: 32bit hole */ - -#define DISPATCH_QUEUE_HEADER(x) \ - struct dispatch_queue_s _as_dq[0]; \ - _DISPATCH_QUEUE_HEADER(x) - -struct _dispatch_unpadded_queue_s { - _DISPATCH_QUEUE_HEADER(dummy); -}; - -#define DISPATCH_QUEUE_CACHELINE_PAD \ - DISPATCH_CACHELINE_PAD_SIZE(struct _dispatch_unpadded_queue_s) - -#define DISPATCH_QUEUE_CACHELINE_PADDING \ - char _dq_pad[DISPATCH_QUEUE_CACHELINE_PAD] - /* * dispatch queues `dq_state` demystified * @@ -240,12 +191,12 @@ struct _dispatch_unpadded_queue_s { * * When done, any "Drainer", in particular for dispatch_*_sync() handoff * paths, exits in 3 steps, and the point of the DIRTY bit is to make - * the Drainers take the slowpath at step 2 to take into account enqueuers + * the Drainers take the slow path at step 2 to take into account enqueuers * that could have made the queue non idle concurrently. * * * // drainer-exit step 1 - * if (slowpath(dq->dq_items_tail)) { // speculative test + * if (unlikely(dq->dq_items_tail)) { // speculative test * return handle_non_empty_queue_or_wakeup(dq); * } * // drainer-exit step 2 @@ -487,7 +438,7 @@ struct _dispatch_unpadded_queue_s { ((DISPATCH_QUEUE_WIDTH_FULL - (width)) << DISPATCH_QUEUE_WIDTH_SHIFT) /* Magic dq_state values for global queues: they have QUEUE_FULL and IN_BARRIER - * set to force the slowpath in both dispatch_barrier_sync() and dispatch_sync() + * set to force the slow path in dispatch_barrier_sync() and dispatch_sync() */ #define DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE \ (DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER) @@ -495,43 +446,269 @@ struct _dispatch_unpadded_queue_s { #define DISPATCH_QUEUE_SERIAL_DRAIN_OWNED \ (DISPATCH_QUEUE_IN_BARRIER | DISPATCH_QUEUE_WIDTH_INTERVAL) -DISPATCH_CLASS_DECL(queue); +#pragma mark - +#pragma mark dispatch_queue_t + +typedef struct dispatch_queue_specific_s { + const void *dqs_key; + void *dqs_ctxt; + dispatch_function_t dqs_destructor; + TAILQ_ENTRY(dispatch_queue_specific_s) dqs_entry; +} *dispatch_queue_specific_t; + +typedef struct dispatch_queue_specific_head_s { + dispatch_unfair_lock_s dqsh_lock; + TAILQ_HEAD(, dispatch_queue_specific_s) dqsh_entries; +} *dispatch_queue_specific_head_t; + +#define DISPATCH_WORKLOOP_ATTR_HAS_SCHED 0x1u +#define DISPATCH_WORKLOOP_ATTR_HAS_POLICY 0x2u +#define DISPATCH_WORKLOOP_ATTR_HAS_CPUPERCENT 0x4u +#define DISPATCH_WORKLOOP_ATTR_HAS_QOS_CLASS 0x8u +#define DISPATCH_WORKLOOP_ATTR_NEEDS_DESTROY 0x10u +typedef struct dispatch_workloop_attr_s *dispatch_workloop_attr_t; +typedef struct dispatch_workloop_attr_s { + uint32_t dwla_flags; + dispatch_priority_t dwla_pri; + struct sched_param dwla_sched; + int dwla_policy; + struct { + uint8_t percent; + uint32_t refillms; + } dwla_cpupercent; +} dispatch_workloop_attr_s; + +/* + * Dispatch Queue cluster related types + * + * The dispatch queue cluster uses aliasing structs, and loosely follows the + * external types exposed in + * + * The API types pretend to have this hierarchy: + * + * dispatch_queue_t + * +--> dispatch_workloop_t + * +--> dispatch_queue_serial_t --> dispatch_queue_main_t + * +--> dispatch_queue_concurrent_t + * '--> dispatch_queue_global_t + * + * + * However, in the library itself, there are more types and a finer grained + * hierarchy when it comes to the struct members. + * + * dispatch_queue_class_t / struct dispatch_queue_s + * +--> struct dispatch_workloop_s + * '--> dispatch_lane_class_t + * +--> struct dispatch_lane_s + * | +--> struct dispatch_source_s + * | '--> struct dispatch_mach_s + * +--> struct dispatch_queue_static_s + * '--> struct dispatch_queue_global_s + * +--> struct dispatch_queue_pthread_root_s + * + * + * dispatch_queue_class_t && struct dispatch_queue_s + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * The queue class type is a transparent union of all queue types, which allows + * cutting down the explicit downcasts to `dispatch_queue_t` when calling + * a function working on any dispatch_queue_t type. + * + * The concrete struct layout is struct dispatch_queue_s + * it provides: + * - dispatch object fields + * - dq_state + * - dq_serialnum + * - dq_label + * - dq_atomic_flags + * - dq_sref_cnt + * - an auxiliary pointer used by sub-classes (dq_specific_head, ds_refs, ...) + * - dq_priority (XXX: we should push it down to lanes) + * + * It also provides storage for one opaque pointer sized field. + * + * dispatch_lane_class_t + * ~~~~~~~~~~~~~~~~~~~~~ + * + * The lane class type is a transparent union of all "lane" types, which have + * a single head/tail pair. + * + * There's no proper concrete struct layout associated, `struct dispatch_lane_s` + * is used most of the time instead. The lane class adds: + * - dq_items_head + * - dq_items_tail (allocated in the hole the queue class carves out) + * + * + * struct dispatch_lane_s and variants + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This is the concrete type used for: + * - API serial/concurrent/runloop queues + * - sources and mach channels + * - the main and manager queues, as struct dispatch_queue_static_s which is + * a cacheline aligned variant of struct dispatch_lane_s. + * + * It also provides: + * - dq_sidelock, used for suspension & target queue handling, + * - dq_side_suspend_cnt. + * + * Sources (struct dispatch_source_s) and mach channels (struct dispatch_mach_s) + * use the last 32bit word for flags private to their use. + * + * struct dispatch_queue_global_s is used for all dispatch root queues: + * - global concurent queues + * - pthread root queues + * - the network event thread + * + * These pretend to derive from dispatch_lane_s but use the dq_sidelock, + * dq_side_suspend_cnt differently, which is possible because root queues cannot + * be targetted or suspended and hence have no use for these. + */ + +#if OS_OBJECT_HAVE_OBJC1 +#define _DISPATCH_QUEUE_CLASS_HEADER(x, __pointer_sized_field__) \ + DISPATCH_OBJECT_HEADER(x); \ + DISPATCH_UNION_LE(uint64_t volatile dq_state, \ + dispatch_lock dq_state_lock, \ + uint32_t dq_state_bits \ + ); \ + __pointer_sized_field__ +#else +#define _DISPATCH_QUEUE_CLASS_HEADER(x, __pointer_sized_field__) \ + DISPATCH_OBJECT_HEADER(x); \ + __pointer_sized_field__; \ + DISPATCH_UNION_LE(uint64_t volatile dq_state, \ + dispatch_lock dq_state_lock, \ + uint32_t dq_state_bits \ + ) +#endif + +#define DISPATCH_QUEUE_CLASS_HEADER(x, __pointer_sized_field__) \ + _DISPATCH_QUEUE_CLASS_HEADER(x, __pointer_sized_field__); \ + /* LP64 global queue cacheline boundary */ \ + unsigned long dq_serialnum; \ + const char *dq_label; \ + DISPATCH_UNION_LE(uint32_t volatile dq_atomic_flags, \ + const uint16_t dq_width, \ + const uint16_t __dq_opaque2 \ + ); \ + dispatch_priority_t dq_priority; \ + union { \ + struct dispatch_queue_specific_head_s *dq_specific_head; \ + struct dispatch_source_refs_s *ds_refs; \ + struct dispatch_timer_source_refs_s *ds_timer_refs; \ + struct dispatch_mach_recv_refs_s *dm_recv_refs; \ + }; \ + int volatile dq_sref_cnt -#if !defined(__cplusplus) || !DISPATCH_INTROSPECTION struct dispatch_queue_s { - _DISPATCH_QUEUE_HEADER(queue); - DISPATCH_QUEUE_CACHELINE_PADDING; // for static queues only + DISPATCH_QUEUE_CLASS_HEADER(queue, void *__dq_opaque1); + /* 32bit hole on LP64 */ } DISPATCH_ATOMIC64_ALIGN; -#if __has_feature(c_static_assert) && !DISPATCH_INTROSPECTION -_Static_assert(sizeof(struct dispatch_queue_s) <= 128, "dispatch queue size"); +struct dispatch_workloop_s { + struct dispatch_queue_s _as_dq[0]; + DISPATCH_QUEUE_CLASS_HEADER(workloop, dispatch_timer_heap_t dwl_timer_heap); + uint8_t dwl_drained_qos; + /* 24 bits hole */ + struct dispatch_object_s *dwl_heads[DISPATCH_QOS_NBUCKETS]; + struct dispatch_object_s *dwl_tails[DISPATCH_QOS_NBUCKETS]; + dispatch_workloop_attr_t dwl_attr; +} DISPATCH_ATOMIC64_ALIGN; + +#define DISPATCH_LANE_CLASS_HEADER(x) \ + struct dispatch_queue_s _as_dq[0]; \ + DISPATCH_QUEUE_CLASS_HEADER(x, \ + struct dispatch_object_s *volatile dq_items_tail); \ + dispatch_unfair_lock_s dq_sidelock; \ + struct dispatch_object_s *volatile dq_items_head; \ + uint32_t dq_side_suspend_cnt + +typedef struct dispatch_lane_s { + DISPATCH_LANE_CLASS_HEADER(lane); + /* 32bit hole on LP64 */ +} DISPATCH_ATOMIC64_ALIGN *dispatch_lane_t; + +// Cache aligned type for static queues (main queue, manager) +struct dispatch_queue_static_s { + struct dispatch_lane_s _as_dl[0]; \ + DISPATCH_LANE_CLASS_HEADER(lane); +} DISPATCH_CACHELINE_ALIGN; + +#define DISPATCH_QUEUE_ROOT_CLASS_HEADER(x) \ + struct dispatch_queue_s _as_dq[0]; \ + DISPATCH_QUEUE_CLASS_HEADER(x, \ + struct dispatch_object_s *volatile dq_items_tail); \ + int volatile dgq_thread_pool_size; \ + struct dispatch_object_s *volatile dq_items_head; \ + int volatile dgq_pending + +struct dispatch_queue_global_s { + DISPATCH_QUEUE_ROOT_CLASS_HEADER(lane); +} DISPATCH_CACHELINE_ALIGN; + +#if DISPATCH_USE_PTHREAD_POOL +typedef struct dispatch_pthread_root_queue_context_s { + pthread_attr_t dpq_thread_attr; + dispatch_block_t dpq_thread_configure; + struct dispatch_semaphore_s dpq_thread_mediator; + dispatch_pthread_root_queue_observer_hooks_s dpq_observer_hooks; +} *dispatch_pthread_root_queue_context_t; +#endif // DISPATCH_USE_PTHREAD_POOL + +#if DISPATCH_USE_PTHREAD_ROOT_QUEUES +typedef struct dispatch_queue_pthread_root_s { + struct dispatch_queue_global_s _as_dgq[0]; + DISPATCH_QUEUE_ROOT_CLASS_HEADER(lane); + struct dispatch_pthread_root_queue_context_s dpq_ctxt; +} *dispatch_queue_pthread_root_t; +#endif // DISPATCH_USE_PTHREAD_ROOT_QUEUES + +dispatch_static_assert(sizeof(struct dispatch_queue_s) <= 128); +dispatch_static_assert(sizeof(struct dispatch_lane_s) <= 128); +dispatch_static_assert(sizeof(struct dispatch_queue_global_s) <= 128); +dispatch_static_assert(offsetof(struct dispatch_queue_s, dq_state) % + sizeof(uint64_t) == 0, "dq_state must be 8-byte aligned"); + +#define dispatch_assert_valid_queue_type(type) \ + dispatch_static_assert(sizeof(struct dispatch_queue_s) <= \ + sizeof(struct type), #type " smaller than dispatch_queue_s"); \ + dispatch_static_assert(_Alignof(struct type) >= sizeof(uint64_t), \ + #type " is not 8-byte aligned"); \ + dispatch_assert_aliases(dispatch_queue_s, type, dq_state); \ + dispatch_assert_aliases(dispatch_queue_s, type, dq_serialnum); \ + dispatch_assert_aliases(dispatch_queue_s, type, dq_label); \ + dispatch_assert_aliases(dispatch_queue_s, type, dq_atomic_flags); \ + dispatch_assert_aliases(dispatch_queue_s, type, dq_sref_cnt); \ + dispatch_assert_aliases(dispatch_queue_s, type, dq_specific_head); \ + dispatch_assert_aliases(dispatch_queue_s, type, dq_priority) + +#define dispatch_assert_valid_lane_type(type) \ + dispatch_assert_valid_queue_type(type); \ + dispatch_assert_aliases(dispatch_lane_s, type, dq_items_head); \ + dispatch_assert_aliases(dispatch_lane_s, type, dq_items_tail) + +dispatch_assert_valid_queue_type(dispatch_lane_s); +dispatch_assert_valid_lane_type(dispatch_queue_static_s); +dispatch_assert_valid_lane_type(dispatch_queue_global_s); +#if DISPATCH_USE_PTHREAD_ROOT_QUEUES +dispatch_assert_valid_lane_type(dispatch_queue_pthread_root_s); #endif -#endif // !defined(__cplusplus) || !DISPATCH_INTROSPECTION - -DISPATCH_INTERNAL_SUBCLASS_DECL(queue_serial, queue); -DISPATCH_INTERNAL_SUBCLASS_DECL(queue_concurrent, queue); -DISPATCH_INTERNAL_SUBCLASS_DECL(queue_main, queue); -DISPATCH_INTERNAL_SUBCLASS_DECL(queue_root, queue); -DISPATCH_INTERNAL_SUBCLASS_DECL(queue_runloop, queue); -DISPATCH_INTERNAL_SUBCLASS_DECL(queue_mgr, queue); - -OS_OBJECT_INTERNAL_CLASS_DECL(dispatch_queue_specific_queue, dispatch_queue, - DISPATCH_OBJECT_VTABLE_HEADER(dispatch_queue_specific_queue)); - -typedef union { - struct os_mpsc_queue_s *_oq; - struct dispatch_queue_s *_dq; - struct dispatch_source_s *_ds; - struct dispatch_mach_s *_dm; - struct dispatch_queue_specific_queue_s *_dqsq; -#if USE_OBJC - os_mpsc_queue_t _ojbc_oq; - dispatch_queue_t _objc_dq; - dispatch_source_t _objc_ds; - dispatch_mach_t _objc_dm; - dispatch_queue_specific_queue_t _objc_dqsq; + +DISPATCH_CLASS_DECL(queue, QUEUE); +DISPATCH_CLASS_DECL_BARE(lane, QUEUE); +DISPATCH_CLASS_DECL(workloop, QUEUE); +DISPATCH_SUBCLASS_DECL(queue_serial, queue, lane); +DISPATCH_SUBCLASS_DECL(queue_main, queue_serial, lane); +DISPATCH_SUBCLASS_DECL(queue_concurrent, queue, lane); +DISPATCH_SUBCLASS_DECL(queue_global, queue, lane); +#if DISPATCH_USE_PTHREAD_ROOT_QUEUES +DISPATCH_INTERNAL_SUBCLASS_DECL(queue_pthread_root, queue, lane); #endif -} dispatch_queue_class_t DISPATCH_TRANSPARENT_UNION; +DISPATCH_INTERNAL_SUBCLASS_DECL(queue_runloop, queue_serial, lane); +DISPATCH_INTERNAL_SUBCLASS_DECL(queue_mgr, queue_serial, lane); + +struct firehose_client_s; typedef struct dispatch_thread_context_s *dispatch_thread_context_t; typedef struct dispatch_thread_context_s { @@ -540,99 +717,109 @@ typedef struct dispatch_thread_context_s { union { size_t dtc_apply_nesting; dispatch_io_t dtc_io_in_barrier; + union firehose_buffer_u *dtc_fb; + void *dtc_mig_demux_ctx; }; } dispatch_thread_context_s; -typedef struct dispatch_thread_frame_s *dispatch_thread_frame_t; -typedef struct dispatch_thread_frame_s { - // must be in the same order as our TSD keys! - dispatch_queue_t dtf_queue; - dispatch_thread_frame_t dtf_prev; +typedef union dispatch_thread_frame_s *dispatch_thread_frame_t; +typedef union dispatch_thread_frame_s { + struct { + // must be in the same order as our TSD keys! + dispatch_queue_t dtf_queue; + dispatch_thread_frame_t dtf_prev; + }; + void *dtf_pair[2]; } dispatch_thread_frame_s; typedef dispatch_queue_t dispatch_queue_wakeup_target_t; #define DISPATCH_QUEUE_WAKEUP_NONE ((dispatch_queue_wakeup_target_t)0) #define DISPATCH_QUEUE_WAKEUP_TARGET ((dispatch_queue_wakeup_target_t)1) -#define DISPATCH_QUEUE_WAKEUP_MGR (&_dispatch_mgr_q) +#define DISPATCH_QUEUE_WAKEUP_MGR (_dispatch_mgr_q._as_dq) #define DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT ((dispatch_queue_wakeup_target_t)-1) -void _dispatch_queue_class_wakeup(dispatch_queue_t dqu, dispatch_qos_t qos, +void _dispatch_queue_xref_dispose(dispatch_queue_class_t dq); +void _dispatch_queue_wakeup(dispatch_queue_class_t dqu, dispatch_qos_t qos, dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target); +void _dispatch_queue_invoke_finish(dispatch_queue_t dq, + dispatch_invoke_context_t dic, dispatch_queue_t tq, uint64_t owned); + dispatch_priority_t _dispatch_queue_compute_priority_and_wlh( - dispatch_queue_t dq, dispatch_wlh_t *wlh_out); -void _dispatch_queue_destroy(dispatch_queue_t dq, bool *allow_free); -void _dispatch_queue_dispose(dispatch_queue_t dq, bool *allow_free); -void _dispatch_queue_xref_dispose(struct dispatch_queue_s *dq); -void _dispatch_queue_set_target_queue(dispatch_queue_t dq, dispatch_queue_t tq); -void _dispatch_queue_suspend(dispatch_queue_t dq); -void _dispatch_queue_resume(dispatch_queue_t dq, bool activate); -void _dispatch_queue_finalize_activation(dispatch_queue_t dq, - bool *allow_resume); -void _dispatch_queue_invoke(dispatch_queue_t dq, + dispatch_queue_class_t dq, dispatch_wlh_t *wlh_out); + +void _dispatch_lane_set_target_queue(dispatch_lane_t dq, dispatch_queue_t tq); +void _dispatch_lane_class_dispose(dispatch_queue_class_t dq, bool *allow_free); +void _dispatch_lane_dispose(dispatch_lane_class_t dq, bool *allow_free); +void _dispatch_lane_suspend(dispatch_lane_class_t dq); +void _dispatch_lane_resume(dispatch_lane_class_t dq, bool activate); +void _dispatch_lane_activate(dispatch_lane_class_t dq, bool *allow_resume); +void _dispatch_lane_invoke(dispatch_lane_class_t dq, dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); -void _dispatch_global_queue_poke(dispatch_queue_t dq, int n, int floor); -void _dispatch_queue_push(dispatch_queue_t dq, dispatch_object_t dou, +void _dispatch_lane_push(dispatch_lane_class_t dq, dispatch_object_t dou, dispatch_qos_t qos); -void _dispatch_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos, - dispatch_wakeup_flags_t flags); -dispatch_queue_wakeup_target_t _dispatch_queue_serial_drain(dispatch_queue_t dq, - dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, - uint64_t *owned); -void _dispatch_queue_drain_sync_waiter(dispatch_queue_t dq, - dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, - uint64_t owned); -void _dispatch_queue_specific_queue_dispose( - dispatch_queue_specific_queue_t dqsq, bool *allow_free); -void _dispatch_root_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos, +void _dispatch_lane_concurrent_push(dispatch_lane_class_t dq, + dispatch_object_t dou, dispatch_qos_t qos); +void _dispatch_lane_wakeup(dispatch_lane_class_t dq, dispatch_qos_t qos, dispatch_wakeup_flags_t flags); -void _dispatch_root_queue_push(dispatch_queue_t dq, dispatch_object_t dou, +dispatch_queue_wakeup_target_t _dispatch_lane_serial_drain( + dispatch_lane_class_t dq, dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags, uint64_t *owned); + +void _dispatch_workloop_dispose(dispatch_workloop_t dwl, bool *allow_free); +void _dispatch_workloop_activate(dispatch_workloop_t dwl); +void _dispatch_workloop_invoke(dispatch_workloop_t dwl, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); +void _dispatch_workloop_push(dispatch_workloop_t dwl, dispatch_object_t dou, dispatch_qos_t qos); +void _dispatch_workloop_wakeup(dispatch_workloop_t dwl, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags); + +void _dispatch_root_queue_poke(dispatch_queue_global_t dq, int n, int floor); +void _dispatch_root_queue_wakeup(dispatch_queue_global_t dq, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags); +void _dispatch_root_queue_push(dispatch_queue_global_t dq, + dispatch_object_t dou, dispatch_qos_t qos); #if DISPATCH_USE_KEVENT_WORKQUEUE -void _dispatch_root_queue_drain_deferred_item(dispatch_deferred_items_t ddi - DISPATCH_PERF_MON_ARGS_PROTO); -void _dispatch_root_queue_drain_deferred_wlh(dispatch_deferred_items_t ddi - DISPATCH_PERF_MON_ARGS_PROTO); +void _dispatch_kevent_workqueue_init(void); #endif -void _dispatch_pthread_root_queue_dispose(dispatch_queue_t dq, +#if DISPATCH_USE_PTHREAD_ROOT_QUEUES +void _dispatch_pthread_root_queue_dispose(dispatch_lane_class_t dq, bool *allow_free); -void _dispatch_main_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos, +#endif // DISPATCH_USE_PTHREAD_ROOT_QUEUES +void _dispatch_main_queue_push(dispatch_queue_main_t dq, dispatch_object_t dou, + dispatch_qos_t qos); +void _dispatch_main_queue_wakeup(dispatch_queue_main_t dq, dispatch_qos_t qos, dispatch_wakeup_flags_t flags); -void _dispatch_runloop_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos, +#if DISPATCH_COCOA_COMPAT +void _dispatch_runloop_queue_wakeup(dispatch_lane_t dq, + dispatch_qos_t qos, dispatch_wakeup_flags_t flags); +void _dispatch_runloop_queue_xref_dispose(dispatch_lane_t dq); +void _dispatch_runloop_queue_dispose(dispatch_lane_t dq, bool *allow_free); +#endif // DISPATCH_COCOA_COMPAT +void _dispatch_mgr_queue_push(dispatch_lane_t dq, dispatch_object_t dou, + dispatch_qos_t qos); +void _dispatch_mgr_queue_wakeup(dispatch_lane_t dq, dispatch_qos_t qos, dispatch_wakeup_flags_t flags); -void _dispatch_runloop_queue_xref_dispose(dispatch_queue_t dq); -void _dispatch_runloop_queue_dispose(dispatch_queue_t dq, bool *allow_free); -void _dispatch_mgr_queue_drain(void); -#if DISPATCH_USE_MGR_THREAD && DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES -void _dispatch_mgr_priority_init(void); -#else -static inline void _dispatch_mgr_priority_init(void) {} -#endif -#if DISPATCH_USE_KEVENT_WORKQUEUE -void _dispatch_kevent_workqueue_init(void); -#else -static inline void _dispatch_kevent_workqueue_init(void) {} +#if DISPATCH_USE_MGR_THREAD +void _dispatch_mgr_thread(dispatch_lane_t dq, dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags); #endif + void _dispatch_apply_invoke(void *ctxt); void _dispatch_apply_redirect_invoke(void *ctxt); -void _dispatch_barrier_async_detached_f(dispatch_queue_t dq, void *ctxt, +void _dispatch_barrier_async_detached_f(dispatch_queue_class_t dq, void *ctxt, dispatch_function_t func); #define DISPATCH_BARRIER_TRYSYNC_SUSPEND 0x1 -void _dispatch_barrier_trysync_or_async_f(dispatch_queue_t dq, void *ctxt, +void _dispatch_barrier_trysync_or_async_f(dispatch_lane_class_t dq, void *ctxt, dispatch_function_t func, uint32_t flags); void _dispatch_queue_atfork_child(void); -#if DISPATCH_DEBUG -void dispatch_debug_queue(dispatch_queue_t dq, const char* str); -#else -static inline void dispatch_debug_queue(dispatch_queue_t dq DISPATCH_UNUSED, - const char* str DISPATCH_UNUSED) {} -#endif - -size_t dispatch_queue_debug(dispatch_queue_t dq, char* buf, size_t bufsiz); -size_t _dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf, - size_t bufsiz); +size_t _dispatch_queue_debug(dispatch_queue_class_t dq, + char *buf, size_t bufsiz); +size_t _dispatch_queue_debug_attr(dispatch_queue_t dq, + char *buf, size_t bufsiz); -#define DISPATCH_ROOT_QUEUE_COUNT (DISPATCH_QOS_MAX * 2) +#define DISPATCH_ROOT_QUEUE_COUNT (DISPATCH_QOS_NBUCKETS * 2) // must be in lowest to highest qos order (as encoded in dispatch_qos_t) // overcommit qos index values need bit 1 set @@ -657,16 +844,24 @@ enum { // 2 - mgr_q // 3 - mgr_root_q // 4,5,6,7,8,9,10,11,12,13,14,15 - global queues +// 17 - workloop_fallback_q // we use 'xadd' on Intel, so the initial value == next assigned -#define DISPATCH_QUEUE_SERIAL_NUMBER_INIT 16 +#define DISPATCH_QUEUE_SERIAL_NUMBER_INIT 17 extern unsigned long volatile _dispatch_queue_serial_numbers; -extern struct dispatch_queue_s _dispatch_root_queues[]; -extern struct dispatch_queue_s _dispatch_mgr_q; -void _dispatch_root_queues_init(void); + +// mark the workloop fallback queue to avoid finalizing objects on the base +// queue of custom outside-of-qos workloops +#define DISPATCH_QUEUE_SERIAL_NUMBER_WLF 16 + +extern struct dispatch_queue_static_s _dispatch_mgr_q; // serial 2 +#if DISPATCH_USE_MGR_THREAD && DISPATCH_USE_PTHREAD_ROOT_QUEUES +extern struct dispatch_queue_global_s _dispatch_mgr_root_queue; // serial 3 +#endif +extern struct dispatch_queue_global_s _dispatch_root_queues[]; // serials 4 - 15 #if DISPATCH_DEBUG #define DISPATCH_ASSERT_ON_MANAGER_QUEUE() \ - dispatch_assert_queue(&_dispatch_mgr_q) + dispatch_assert_queue(_dispatch_mgr_q._as_dq) #else #define DISPATCH_ASSERT_ON_MANAGER_QUEUE() #endif @@ -674,75 +869,50 @@ void _dispatch_root_queues_init(void); #pragma mark - #pragma mark dispatch_queue_attr_t +DISPATCH_CLASS_DECL(queue_attr, OBJECT); +struct dispatch_queue_attr_s { + OS_OBJECT_STRUCT_HEADER(dispatch_queue_attr); +}; + +typedef struct dispatch_queue_attr_info_s { + dispatch_qos_t dqai_qos : 8; + int dqai_relpri : 8; + uint16_t dqai_overcommit:2; + uint16_t dqai_autorelease_frequency:2; + uint16_t dqai_concurrent:1; + uint16_t dqai_inactive:1; +} dispatch_queue_attr_info_t; + typedef enum { _dispatch_queue_attr_overcommit_unspecified = 0, _dispatch_queue_attr_overcommit_enabled, _dispatch_queue_attr_overcommit_disabled, } _dispatch_queue_attr_overcommit_t; -DISPATCH_CLASS_DECL(queue_attr); -struct dispatch_queue_attr_s { - OS_OBJECT_STRUCT_HEADER(dispatch_queue_attr); - dispatch_priority_requested_t dqa_qos_and_relpri; - uint16_t dqa_overcommit:2; - uint16_t dqa_autorelease_frequency:2; - uint16_t dqa_concurrent:1; - uint16_t dqa_inactive:1; -}; - -enum { - DQA_INDEX_UNSPECIFIED_OVERCOMMIT = 0, - DQA_INDEX_NON_OVERCOMMIT, - DQA_INDEX_OVERCOMMIT, -}; - #define DISPATCH_QUEUE_ATTR_OVERCOMMIT_COUNT 3 -enum { - DQA_INDEX_AUTORELEASE_FREQUENCY_INHERIT = - DISPATCH_AUTORELEASE_FREQUENCY_INHERIT, - DQA_INDEX_AUTORELEASE_FREQUENCY_WORK_ITEM = - DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM, - DQA_INDEX_AUTORELEASE_FREQUENCY_NEVER = - DISPATCH_AUTORELEASE_FREQUENCY_NEVER, -}; - #define DISPATCH_QUEUE_ATTR_AUTORELEASE_FREQUENCY_COUNT 3 -enum { - DQA_INDEX_CONCURRENT = 0, - DQA_INDEX_SERIAL, -}; +#define DISPATCH_QUEUE_ATTR_QOS_COUNT (DISPATCH_QOS_MAX + 1) -#define DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT 2 +#define DISPATCH_QUEUE_ATTR_PRIO_COUNT (1 - QOS_MIN_RELATIVE_PRIORITY) -enum { - DQA_INDEX_ACTIVE = 0, - DQA_INDEX_INACTIVE, -}; +#define DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT 2 #define DISPATCH_QUEUE_ATTR_INACTIVE_COUNT 2 -typedef enum { - DQA_INDEX_QOS_CLASS_UNSPECIFIED = 0, - DQA_INDEX_QOS_CLASS_MAINTENANCE, - DQA_INDEX_QOS_CLASS_BACKGROUND, - DQA_INDEX_QOS_CLASS_UTILITY, - DQA_INDEX_QOS_CLASS_DEFAULT, - DQA_INDEX_QOS_CLASS_USER_INITIATED, - DQA_INDEX_QOS_CLASS_USER_INTERACTIVE, -} _dispatch_queue_attr_index_qos_class_t; - -#define DISPATCH_QUEUE_ATTR_PRIO_COUNT (1 - QOS_MIN_RELATIVE_PRIORITY) +#define DISPATCH_QUEUE_ATTR_COUNT ( \ + DISPATCH_QUEUE_ATTR_OVERCOMMIT_COUNT * \ + DISPATCH_QUEUE_ATTR_AUTORELEASE_FREQUENCY_COUNT * \ + DISPATCH_QUEUE_ATTR_QOS_COUNT * \ + DISPATCH_QUEUE_ATTR_PRIO_COUNT * \ + DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT * \ + DISPATCH_QUEUE_ATTR_INACTIVE_COUNT ) -extern const struct dispatch_queue_attr_s _dispatch_queue_attrs[] - [DISPATCH_QUEUE_ATTR_PRIO_COUNT] - [DISPATCH_QUEUE_ATTR_OVERCOMMIT_COUNT] - [DISPATCH_QUEUE_ATTR_AUTORELEASE_FREQUENCY_COUNT] - [DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT] - [DISPATCH_QUEUE_ATTR_INACTIVE_COUNT]; +extern const struct dispatch_queue_attr_s +_dispatch_queue_attrs[DISPATCH_QUEUE_ATTR_COUNT]; -dispatch_queue_attr_t _dispatch_get_default_queue_attr(void); +dispatch_queue_attr_info_t _dispatch_queue_attr_to_info(dispatch_queue_attr_t); #pragma mark - #pragma mark dispatch_continuation_t @@ -817,54 +987,64 @@ dispatch_queue_attr_t _dispatch_get_default_queue_attr(void); ~(DISPATCH_CONTINUATION_SIZE - 1u)) // continuation is a dispatch_sync or dispatch_barrier_sync -#define DISPATCH_OBJ_SYNC_WAITER_BIT 0x001ul +#define DC_FLAG_SYNC_WAITER 0x001ul // continuation acts as a barrier -#define DISPATCH_OBJ_BARRIER_BIT 0x002ul +#define DC_FLAG_BARRIER 0x002ul // continuation resources are freed on run // this is set on async or for non event_handler source handlers -#define DISPATCH_OBJ_CONSUME_BIT 0x004ul +#define DC_FLAG_CONSUME 0x004ul // continuation has a group in dc_data -#define DISPATCH_OBJ_GROUP_BIT 0x008ul +#define DC_FLAG_GROUP_ASYNC 0x008ul // continuation function is a block (copied in dc_ctxt) -#define DISPATCH_OBJ_BLOCK_BIT 0x010ul +#define DC_FLAG_BLOCK 0x010ul // continuation function is a block with private data, implies BLOCK_BIT -#define DISPATCH_OBJ_BLOCK_PRIVATE_DATA_BIT 0x020ul +#define DC_FLAG_BLOCK_WITH_PRIVATE_DATA 0x020ul // source handler requires fetching context from source -#define DISPATCH_OBJ_CTXT_FETCH_BIT 0x040ul -// use the voucher from the continuation even if the queue has voucher set -#define DISPATCH_OBJ_ENFORCE_VOUCHER 0x080ul +#define DC_FLAG_FETCH_CONTEXT 0x040ul +// continuation is a dispatch_async_and_wait +#define DC_FLAG_ASYNC_AND_WAIT 0x080ul +// bit used to make sure dc_flags is never 0 for allocated continuations +#define DC_FLAG_ALLOCATED 0x100ul +// continuation is an internal implementation detail that should not be +// introspected +#define DC_FLAG_NO_INTROSPECTION 0x200ul // never set on continuations, used by mach.c only -#define DISPATCH_OBJ_MACH_BARRIER 0x1000000ul +#define DC_FLAG_MACH_BARRIER 0x1000000ul typedef struct dispatch_continuation_s { - struct dispatch_object_s _as_do[0]; DISPATCH_CONTINUATION_HEADER(continuation); } *dispatch_continuation_t; +dispatch_assert_aliases(dispatch_continuation_s, dispatch_object_s, do_next); +dispatch_assert_aliases(dispatch_continuation_s, dispatch_object_s, do_vtable); + typedef struct dispatch_sync_context_s { - struct dispatch_object_s _as_do[0]; struct dispatch_continuation_s _as_dc[0]; DISPATCH_CONTINUATION_HEADER(continuation); dispatch_function_t dsc_func; void *dsc_ctxt; -#if DISPATCH_COCOA_COMPAT dispatch_thread_frame_s dsc_dtf; -#endif dispatch_thread_event_s dsc_event; dispatch_tid dsc_waiter; - dispatch_qos_t dsc_override_qos_floor; - dispatch_qos_t dsc_override_qos; - bool dsc_wlh_was_first; - bool dsc_release_storage; + uint8_t dsc_override_qos_floor; + uint8_t dsc_override_qos; + uint16_t dsc_autorelease : 2; + uint16_t dsc_wlh_was_first : 1; + uint16_t dsc_wlh_is_workloop : 1; + uint16_t dsc_waiter_needs_cancel : 1; + uint16_t dsc_release_storage : 1; +#if DISPATCH_INTROSPECTION + uint16_t dsc_from_async : 1; +#endif } *dispatch_sync_context_t; typedef struct dispatch_continuation_vtable_s { _OS_OBJECT_CLASS_HEADER(); - DISPATCH_INVOKABLE_VTABLE_HEADER(dispatch_continuation); + DISPATCH_OBJECT_VTABLE_HEADER(dispatch_continuation); } const *dispatch_continuation_vtable_t; #ifndef DISPATCH_CONTINUATION_CACHE_LIMIT -#if TARGET_OS_EMBEDDED +#if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR #define DISPATCH_CONTINUATION_CACHE_LIMIT 112 // one 256k heap for 64 threads #define DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYPRESSURE_PRESSURE_WARN 16 #else @@ -875,13 +1055,9 @@ typedef struct dispatch_continuation_vtable_s { dispatch_continuation_t _dispatch_continuation_alloc_from_heap(void); void _dispatch_continuation_free_to_heap(dispatch_continuation_t c); -void _dispatch_continuation_async(dispatch_queue_t dq, - dispatch_continuation_t dc); void _dispatch_continuation_pop(dispatch_object_t dou, dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, - dispatch_queue_t dq); -void _dispatch_continuation_invoke(dispatch_object_t dou, - voucher_t override_voucher, dispatch_invoke_flags_t flags); + dispatch_queue_class_t dqu); #if DISPATCH_USE_MEMORYPRESSURE_SOURCE extern int _dispatch_continuation_cache_limit; @@ -903,6 +1079,7 @@ enum { DC_MACH_RECV_BARRIER_TYPE, DC_MACH_ASYNC_REPLY_TYPE, #if HAVE_PTHREAD_WORKQUEUE_QOS + DC_WORKLOOP_STEALING_TYPE, DC_OVERRIDE_STEALING_TYPE, DC_OVERRIDE_OWNING_TYPE, #endif @@ -913,29 +1090,12 @@ DISPATCH_ALWAYS_INLINE static inline unsigned long dc_type(dispatch_continuation_t dc) { - return dx_type(dc->_as_do); -} - -DISPATCH_ALWAYS_INLINE -static inline unsigned long -dc_subtype(dispatch_continuation_t dc) -{ - return dx_subtype(dc->_as_do); + return dx_type((struct dispatch_object_s *)dc); } extern const struct dispatch_continuation_vtable_s _dispatch_continuation_vtables[_DC_MAX_TYPE]; -void -_dispatch_async_redirect_invoke(dispatch_continuation_t dc, - dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); - -#if HAVE_PTHREAD_WORKQUEUE_QOS -void -_dispatch_queue_override_invoke(dispatch_continuation_t dc, - dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); -#endif - #define DC_VTABLE(name) (&_dispatch_continuation_vtables[DC_##name##_TYPE]) #define DC_VTABLE_ENTRY(name, ...) \ @@ -964,13 +1124,22 @@ _dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pri, #pragma mark dispatch_apply_t struct dispatch_apply_s { +#if !OS_OBJECT_HAVE_OBJC1 + dispatch_continuation_t da_dc; +#endif size_t volatile da_index, da_todo; - size_t da_iterations, da_nested; + size_t da_iterations; +#if OS_OBJECT_HAVE_OBJC1 dispatch_continuation_t da_dc; +#endif + size_t da_nested; dispatch_thread_event_s da_event; dispatch_invoke_flags_t da_flags; int32_t da_thr_cnt; }; +dispatch_static_assert(offsetof(struct dispatch_continuation_s, dc_flags) == + offsetof(struct dispatch_apply_s, da_dc), + "These fields must alias so that leaks instruments work"); typedef struct dispatch_apply_s *dispatch_apply_t; #pragma mark - @@ -991,7 +1160,7 @@ typedef struct dispatch_apply_s *dispatch_apply_t; voucher_t dbpd_voucher; \ dispatch_block_t dbpd_block; \ dispatch_group_t dbpd_group; \ - os_mpsc_queue_t volatile dbpd_queue; \ + dispatch_queue_t dbpd_queue; \ mach_port_t dbpd_thread; #if !defined(__cplusplus) @@ -1010,30 +1179,26 @@ typedef struct dispatch_block_private_data_s *dispatch_block_private_data_t; #define DISPATCH_BLOCK_PRIVATE_DATA_MAGIC 0xD159B10C // 0xDISPatch_BLOCk // struct for synchronous perform: no group_leave at end of invoke -#define DISPATCH_BLOCK_PRIVATE_DATA_PERFORM_INITIALIZER(flags, block) \ +#define DISPATCH_BLOCK_PRIVATE_DATA_PERFORM_INITIALIZER(flags, block, voucher) \ { \ .dbpd_magic = DISPATCH_BLOCK_PRIVATE_DATA_MAGIC, \ .dbpd_flags = (flags), \ .dbpd_atomic_flags = DBF_PERFORM, \ .dbpd_block = (block), \ + .dbpd_voucher = (voucher), \ } +extern void (*const _dispatch_block_special_invoke)(void*); + dispatch_block_t _dispatch_block_create(dispatch_block_flags_t flags, voucher_t voucher, pthread_priority_t priority, dispatch_block_t block); void _dispatch_block_invoke_direct(const struct dispatch_block_private_data_s *dbcpd); void _dispatch_block_sync_invoke(void *block); -void _dispatch_continuation_init_slow(dispatch_continuation_t dc, +void *_dispatch_continuation_get_function_symbol(dispatch_continuation_t dc); +dispatch_qos_t _dispatch_continuation_init_slow(dispatch_continuation_t dc, dispatch_queue_class_t dqu, dispatch_block_flags_t flags); -long _dispatch_barrier_trysync_f(dispatch_queue_t dq, void *ctxt, - dispatch_function_t func); - -/* exported for tests in dispatch_trysync.c */ -DISPATCH_EXPORT DISPATCH_NOTHROW -long _dispatch_trysync_f(dispatch_queue_t dq, void *ctxt, - dispatch_function_t f); - #endif /* __BLOCKS__ */ typedef struct dispatch_pthread_root_queue_observer_hooks_s { diff --git a/src/semaphore.c b/src/semaphore.c index 3fe94c6e3..30cde9278 100644 --- a/src/semaphore.c +++ b/src/semaphore.c @@ -23,20 +23,6 @@ DISPATCH_WEAK // rdar://problem/8503746 long _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema); -#pragma mark - -#pragma mark dispatch_semaphore_class_t - -static void -_dispatch_semaphore_class_init(long value, dispatch_semaphore_class_t dsemau) -{ - struct dispatch_semaphore_header_s *dsema = dsemau._dsema_hdr; - - dsema->do_next = DISPATCH_OBJECT_LISTLESS; - dsema->do_targetq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false); - dsema->dsema_value = value; - _dispatch_sema4_init(&dsema->dsema_sema, _DSEMA4_POLICY_FIFO); -} - #pragma mark - #pragma mark dispatch_semaphore_t @@ -52,9 +38,12 @@ dispatch_semaphore_create(long value) return DISPATCH_BAD_INPUT; } - dsema = (dispatch_semaphore_t)_dispatch_object_alloc( - DISPATCH_VTABLE(semaphore), sizeof(struct dispatch_semaphore_s)); - _dispatch_semaphore_class_init(value, dsema); + dsema = _dispatch_object_alloc(DISPATCH_VTABLE(semaphore), + sizeof(struct dispatch_semaphore_s)); + dsema->do_next = DISPATCH_OBJECT_LISTLESS; + dsema->do_targetq = _dispatch_get_default_queue(false); + dsema->dsema_value = value; + _dispatch_sema4_init(&dsema->dsema_sema, _DSEMA4_POLICY_FIFO); dsema->dsema_orig = value; return dsema; } @@ -80,7 +69,7 @@ _dispatch_semaphore_debug(dispatch_object_t dou, char *buf, size_t bufsiz) size_t offset = 0; offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", - dx_kind(dsema), dsema); + _dispatch_object_class_name(dsema), dsema); offset += _dispatch_object_debug_attr(dsema, &buf[offset], bufsiz - offset); #if USE_MACH_SEM offset += dsnprintf(&buf[offset], bufsiz - offset, "port = 0x%u, ", @@ -104,10 +93,10 @@ long dispatch_semaphore_signal(dispatch_semaphore_t dsema) { long value = os_atomic_inc2o(dsema, dsema_value, release); - if (fastpath(value > 0)) { + if (likely(value > 0)) { return 0; } - if (slowpath(value == LONG_MIN)) { + if (unlikely(value == LONG_MIN)) { DISPATCH_CLIENT_CRASH(value, "Unbalanced call to dispatch_semaphore_signal()"); } @@ -150,7 +139,7 @@ long dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout) { long value = os_atomic_dec2o(dsema, dsema_value, acquire); - if (fastpath(value >= 0)) { + if (likely(value >= 0)) { return 0; } return _dispatch_semaphore_wait_slow(dsema, timeout); @@ -161,13 +150,16 @@ dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout) DISPATCH_ALWAYS_INLINE static inline dispatch_group_t -_dispatch_group_create_with_count(long count) +_dispatch_group_create_with_count(uint32_t n) { - dispatch_group_t dg = (dispatch_group_t)_dispatch_object_alloc( - DISPATCH_VTABLE(group), sizeof(struct dispatch_group_s)); - _dispatch_semaphore_class_init(count, dg); - if (count) { - os_atomic_store2o(dg, do_ref_cnt, 1, relaxed); // + dispatch_group_t dg = _dispatch_object_alloc(DISPATCH_VTABLE(group), + sizeof(struct dispatch_group_s)); + dg->do_next = DISPATCH_OBJECT_LISTLESS; + dg->do_targetq = _dispatch_get_default_queue(false); + if (n) { + os_atomic_store2o(dg, dg_bits, + -n * DISPATCH_GROUP_VALUE_INTERVAL, relaxed); + os_atomic_store2o(dg, do_ref_cnt, 1, relaxed); // } return dg; } @@ -184,157 +176,150 @@ _dispatch_group_create_and_enter(void) return _dispatch_group_create_with_count(1); } -void -dispatch_group_enter(dispatch_group_t dg) -{ - long value = os_atomic_inc_orig2o(dg, dg_value, acquire); - if (slowpath((unsigned long)value >= (unsigned long)LONG_MAX)) { - DISPATCH_CLIENT_CRASH(value, - "Too many nested calls to dispatch_group_enter()"); - } - if (value == 0) { - _dispatch_retain(dg); // - } -} - -DISPATCH_NOINLINE -static long -_dispatch_group_wake(dispatch_group_t dg, bool needs_release) -{ - dispatch_continuation_t next, head, tail = NULL; - long rval; - - // cannot use os_mpsc_capture_snapshot() because we can have concurrent - // _dispatch_group_wake() calls - head = os_atomic_xchg2o(dg, dg_notify_head, NULL, relaxed); - if (head) { - // snapshot before anything is notified/woken - tail = os_atomic_xchg2o(dg, dg_notify_tail, NULL, release); - } - rval = (long)os_atomic_xchg2o(dg, dg_waiters, 0, relaxed); - if (rval) { - // wake group waiters - _dispatch_sema4_create(&dg->dg_sema, _DSEMA4_POLICY_FIFO); - _dispatch_sema4_signal(&dg->dg_sema, rval); - } - uint16_t refs = needs_release ? 1 : 0; // - if (head) { - // async group notify blocks - do { - next = os_mpsc_pop_snapshot_head(head, tail, do_next); - dispatch_queue_t dsn_queue = (dispatch_queue_t)head->dc_data; - _dispatch_continuation_async(dsn_queue, head); - _dispatch_release(dsn_queue); - } while ((head = next)); - refs++; - } - if (refs) _dispatch_release_n(dg, refs); - return 0; -} - -void -dispatch_group_leave(dispatch_group_t dg) -{ - long value = os_atomic_dec2o(dg, dg_value, release); - if (slowpath(value == 0)) { - return (void)_dispatch_group_wake(dg, true); - } - if (slowpath(value < 0)) { - DISPATCH_CLIENT_CRASH(value, - "Unbalanced call to dispatch_group_leave()"); - } -} - void _dispatch_group_dispose(dispatch_object_t dou, DISPATCH_UNUSED bool *allow_free) { - dispatch_group_t dg = dou._dg; + uint64_t dg_state = os_atomic_load2o(dou._dg, dg_state, relaxed); - if (dg->dg_value) { - DISPATCH_CLIENT_CRASH(dg->dg_value, + if (unlikely((uint32_t)dg_state)) { + DISPATCH_CLIENT_CRASH((uintptr_t)dg_state, "Group object deallocated while in use"); } - - _dispatch_sema4_dispose(&dg->dg_sema, _DSEMA4_POLICY_FIFO); } size_t _dispatch_group_debug(dispatch_object_t dou, char *buf, size_t bufsiz) { dispatch_group_t dg = dou._dg; + uint64_t dg_state = os_atomic_load2o(dg, dg_state, relaxed); size_t offset = 0; offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", - dx_kind(dg), dg); + _dispatch_object_class_name(dg), dg); offset += _dispatch_object_debug_attr(dg, &buf[offset], bufsiz - offset); -#if USE_MACH_SEM - offset += dsnprintf(&buf[offset], bufsiz - offset, "port = 0x%u, ", - dg->dg_sema); -#endif offset += dsnprintf(&buf[offset], bufsiz - offset, - "count = %ld, waiters = %d }", dg->dg_value, dg->dg_waiters); + "count = %d, gen = %d, waiters = %d, notifs = %d }", + _dg_state_value(dg_state), _dg_state_gen(dg_state), + (bool)(dg_state & DISPATCH_GROUP_HAS_WAITERS), + (bool)(dg_state & DISPATCH_GROUP_HAS_NOTIFS)); return offset; } DISPATCH_NOINLINE static long -_dispatch_group_wait_slow(dispatch_group_t dg, dispatch_time_t timeout) +_dispatch_group_wait_slow(dispatch_group_t dg, uint32_t gen, + dispatch_time_t timeout) { - long value; - int orig_waiters; - - // check before we cause another signal to be sent by incrementing - // dg->dg_waiters - value = os_atomic_load2o(dg, dg_value, ordered); // 19296565 - if (value == 0) { - return _dispatch_group_wake(dg, false); + for (;;) { + int rc = _dispatch_wait_on_address(&dg->dg_gen, gen, timeout, 0); + if (likely(gen != os_atomic_load2o(dg, dg_gen, acquire))) { + return 0; + } + if (rc == ETIMEDOUT) { + return _DSEMA4_TIMEOUT(); + } } +} - (void)os_atomic_inc2o(dg, dg_waiters, relaxed); - // check the values again in case we need to wake any threads - value = os_atomic_load2o(dg, dg_value, ordered); // 19296565 - if (value == 0) { - _dispatch_group_wake(dg, false); - // Fall through to consume the extra signal, forcing timeout to avoid - // useless setups as it won't block - timeout = DISPATCH_TIME_FOREVER; - } +long +dispatch_group_wait(dispatch_group_t dg, dispatch_time_t timeout) +{ + uint64_t old_state, new_state; - _dispatch_sema4_create(&dg->dg_sema, _DSEMA4_POLICY_FIFO); - switch (timeout) { - default: - if (!_dispatch_sema4_timedwait(&dg->dg_sema, timeout)) { - break; + os_atomic_rmw_loop2o(dg, dg_state, old_state, new_state, relaxed, { + if ((old_state & DISPATCH_GROUP_VALUE_MASK) == 0) { + os_atomic_rmw_loop_give_up_with_fence(acquire, return 0); } - // Fall through and try to undo the earlier change to - // dg->dg_waiters - case DISPATCH_TIME_NOW: - orig_waiters = dg->dg_waiters; - while (orig_waiters) { - if (os_atomic_cmpxchgvw2o(dg, dg_waiters, orig_waiters, - orig_waiters - 1, &orig_waiters, relaxed)) { - return _DSEMA4_TIMEOUT(); - } + if (unlikely(timeout == 0)) { + os_atomic_rmw_loop_give_up(return _DSEMA4_TIMEOUT()); } - // Another thread is running _dispatch_group_wake() - // Fall through and drain the wakeup. - case DISPATCH_TIME_FOREVER: - _dispatch_sema4_wait(&dg->dg_sema); - break; + new_state = old_state | DISPATCH_GROUP_HAS_WAITERS; + if (unlikely(old_state & DISPATCH_GROUP_HAS_WAITERS)) { + os_atomic_rmw_loop_give_up(break); + } + }); + + return _dispatch_group_wait_slow(dg, _dg_state_gen(new_state), timeout); +} + +DISPATCH_NOINLINE +static void +_dispatch_group_wake(dispatch_group_t dg, uint64_t dg_state, bool needs_release) +{ + uint16_t refs = needs_release ? 1 : 0; // + + if (dg_state & DISPATCH_GROUP_HAS_NOTIFS) { + dispatch_continuation_t dc, next_dc, tail; + + // Snapshot before anything is notified/woken + dc = os_mpsc_capture_snapshot(os_mpsc(dg, dg_notify), &tail); + do { + dispatch_queue_t dsn_queue = (dispatch_queue_t)dc->dc_data; + next_dc = os_mpsc_pop_snapshot_head(dc, tail, do_next); + _dispatch_continuation_async(dsn_queue, dc, + _dispatch_qos_from_pp(dc->dc_priority), dc->dc_flags); + _dispatch_release(dsn_queue); + } while ((dc = next_dc)); + + refs++; } - return 0; + + if (dg_state & DISPATCH_GROUP_HAS_WAITERS) { + _dispatch_wake_by_address(&dg->dg_gen); + } + + if (refs) _dispatch_release_n(dg, refs); } -long -dispatch_group_wait(dispatch_group_t dg, dispatch_time_t timeout) +void +dispatch_group_leave(dispatch_group_t dg) { - if (dg->dg_value == 0) { - return 0; + // The value is incremented on a 64bits wide atomic so that the carry for + // the -1 -> 0 transition increments the generation atomically. + uint64_t new_state, old_state = os_atomic_add_orig2o(dg, dg_state, + DISPATCH_GROUP_VALUE_INTERVAL, release); + uint32_t old_value = (uint32_t)(old_state & DISPATCH_GROUP_VALUE_MASK); + + if (unlikely(old_value == DISPATCH_GROUP_VALUE_1)) { + old_state += DISPATCH_GROUP_VALUE_INTERVAL; + do { + new_state = old_state; + if ((old_state & DISPATCH_GROUP_VALUE_MASK) == 0) { + new_state &= ~DISPATCH_GROUP_HAS_WAITERS; + new_state &= ~DISPATCH_GROUP_HAS_NOTIFS; + } else { + // If the group was entered again since the atomic_add above, + // we can't clear the waiters bit anymore as we don't know for + // which generation the waiters are for + new_state &= ~DISPATCH_GROUP_HAS_NOTIFS; + } + if (old_state == new_state) break; + } while (unlikely(!os_atomic_cmpxchgv2o(dg, dg_state, + old_state, new_state, &old_state, relaxed))); + return _dispatch_group_wake(dg, old_state, true); + } + + if (unlikely(old_value == 0)) { + DISPATCH_CLIENT_CRASH((uintptr_t)old_value, + "Unbalanced call to dispatch_group_leave()"); } - if (timeout == 0) { - return _DSEMA4_TIMEOUT(); +} + +void +dispatch_group_enter(dispatch_group_t dg) +{ + // The value is decremented on a 32bits wide atomic so that the carry + // for the 0 -> -1 transition is not propagated to the upper 32bits. + uint32_t old_bits = os_atomic_sub_orig2o(dg, dg_bits, + DISPATCH_GROUP_VALUE_INTERVAL, acquire); + uint32_t old_value = old_bits & DISPATCH_GROUP_VALUE_MASK; + if (unlikely(old_value == 0)) { + _dispatch_retain(dg); // + } + if (unlikely(old_value == DISPATCH_GROUP_VALUE_MAX)) { + DISPATCH_CLIENT_CRASH(old_bits, + "Too many nested calls to dispatch_group_enter()"); } - return _dispatch_group_wait_slow(dg, timeout); } DISPATCH_ALWAYS_INLINE @@ -342,16 +327,24 @@ static inline void _dispatch_group_notify(dispatch_group_t dg, dispatch_queue_t dq, dispatch_continuation_t dsn) { + uint64_t old_state, new_state; + dispatch_continuation_t prev; + dsn->dc_data = dq; - dsn->do_next = NULL; _dispatch_retain(dq); - if (os_mpsc_push_update_tail(dg, dg_notify, dsn, do_next)) { - _dispatch_retain(dg); - os_atomic_store2o(dg, dg_notify_head, dsn, ordered); - // seq_cst with atomic store to notify_head - if (os_atomic_load2o(dg, dg_value, ordered) == 0) { - _dispatch_group_wake(dg, false); - } + + prev = os_mpsc_push_update_tail(os_mpsc(dg, dg_notify), dsn, do_next); + if (os_mpsc_push_was_empty(prev)) _dispatch_retain(dg); + os_mpsc_push_update_prev(os_mpsc(dg, dg_notify), prev, dsn, do_next); + if (os_mpsc_push_was_empty(prev)) { + os_atomic_rmw_loop2o(dg, dg_state, old_state, new_state, release, { + new_state = old_state | DISPATCH_GROUP_HAS_NOTIFS; + if ((uint32_t)old_state == 0) { + os_atomic_rmw_loop_give_up({ + return _dispatch_group_wake(dg, new_state, false); + }); + } + }); } } @@ -361,8 +354,7 @@ dispatch_group_notify_f(dispatch_group_t dg, dispatch_queue_t dq, void *ctxt, dispatch_function_t func) { dispatch_continuation_t dsn = _dispatch_continuation_alloc(); - _dispatch_continuation_init_f(dsn, dq, ctxt, func, 0, 0, - DISPATCH_OBJ_CONSUME_BIT); + _dispatch_continuation_init_f(dsn, dq, ctxt, func, 0, DC_FLAG_CONSUME); _dispatch_group_notify(dg, dq, dsn); } @@ -372,7 +364,44 @@ dispatch_group_notify(dispatch_group_t dg, dispatch_queue_t dq, dispatch_block_t db) { dispatch_continuation_t dsn = _dispatch_continuation_alloc(); - _dispatch_continuation_init(dsn, dq, db, 0, 0, DISPATCH_OBJ_CONSUME_BIT); + _dispatch_continuation_init(dsn, dq, db, 0, DC_FLAG_CONSUME); _dispatch_group_notify(dg, dq, dsn); } #endif + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_continuation_group_async(dispatch_group_t dg, dispatch_queue_t dq, + dispatch_continuation_t dc, dispatch_qos_t qos) +{ + dispatch_group_enter(dg); + dc->dc_data = dg; + _dispatch_continuation_async(dq, dc, qos, dc->dc_flags); +} + +DISPATCH_NOINLINE +void +dispatch_group_async_f(dispatch_group_t dg, dispatch_queue_t dq, void *ctxt, + dispatch_function_t func) +{ + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_GROUP_ASYNC; + dispatch_qos_t qos; + + qos = _dispatch_continuation_init_f(dc, dq, ctxt, func, 0, dc_flags); + _dispatch_continuation_group_async(dg, dq, dc, qos); +} + +#ifdef __BLOCKS__ +void +dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq, + dispatch_block_t db) +{ + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_GROUP_ASYNC; + dispatch_qos_t qos; + + qos = _dispatch_continuation_init(dc, dq, db, 0, dc_flags); + _dispatch_continuation_group_async(dg, dq, dc, qos); +} +#endif diff --git a/src/semaphore_internal.h b/src/semaphore_internal.h index f9d0983aa..b38dd25da 100644 --- a/src/semaphore_internal.h +++ b/src/semaphore_internal.h @@ -29,38 +29,71 @@ struct dispatch_queue_s; -#define DISPATCH_SEMAPHORE_HEADER(cls, ns) \ - DISPATCH_OBJECT_HEADER(cls); \ - long volatile ns##_value; \ - _dispatch_sema4_t ns##_sema - -struct dispatch_semaphore_header_s { - DISPATCH_SEMAPHORE_HEADER(semaphore, dsema); -}; - -DISPATCH_CLASS_DECL(semaphore); +DISPATCH_CLASS_DECL(semaphore, OBJECT); struct dispatch_semaphore_s { - DISPATCH_SEMAPHORE_HEADER(semaphore, dsema); + DISPATCH_OBJECT_HEADER(semaphore); + long volatile dsema_value; long dsema_orig; + _dispatch_sema4_t dsema_sema; }; -DISPATCH_CLASS_DECL(group); +/* + * Dispatch Group State: + * + * Generation (32 - 63): + * 32 bit counter that is incremented each time the group value reaaches + * 0 after a dispatch_group_leave. This 32bit word is used to block waiters + * (threads in dispatch_group_wait) in _dispatch_wait_on_address() until the + * generation changes. + * + * Value (2 - 31): + * 30 bit value counter of the number of times the group was entered. + * dispatch_group_enter counts downward on 32bits, and dispatch_group_leave + * upward on 64bits, which causes the generation to bump each time the value + * reaches 0 again due to carry propagation. + * + * Has Notifs (1): + * This bit is set when the list of notifications on the group becomes non + * empty. It is also used as a lock as the thread that successfuly clears this + * bit is the thread responsible for firing the notifications. + * + * Has Waiters (0): + * This bit is set when there are waiters (threads in dispatch_group_wait) + * that need to be woken up the next time the value reaches 0. Waiters take + * a snapshot of the generation before waiting and will wait for the + * generation to change before they return. + */ +#define DISPATCH_GROUP_GEN_MASK 0xffffffff00000000ULL +#define DISPATCH_GROUP_VALUE_MASK 0x00000000fffffffcULL +#define DISPATCH_GROUP_VALUE_INTERVAL 0x0000000000000004ULL +#define DISPATCH_GROUP_VALUE_1 DISPATCH_GROUP_VALUE_MASK +#define DISPATCH_GROUP_VALUE_MAX DISPATCH_GROUP_VALUE_INTERVAL +#define DISPATCH_GROUP_HAS_NOTIFS 0x0000000000000002ULL +#define DISPATCH_GROUP_HAS_WAITERS 0x0000000000000001ULL +DISPATCH_CLASS_DECL(group, OBJECT); struct dispatch_group_s { - DISPATCH_SEMAPHORE_HEADER(group, dg); - int volatile dg_waiters; + DISPATCH_OBJECT_HEADER(group); + DISPATCH_UNION_LE(uint64_t volatile dg_state, + uint32_t dg_bits, + uint32_t dg_gen + ) DISPATCH_ATOMIC64_ALIGN; struct dispatch_continuation_s *volatile dg_notify_head; struct dispatch_continuation_s *volatile dg_notify_tail; }; -typedef union { - struct dispatch_semaphore_header_s *_dsema_hdr; - struct dispatch_semaphore_s *_dsema; - struct dispatch_group_s *_dg; -#if USE_OBJC - dispatch_semaphore_t _objc_dsema; - dispatch_group_t _objc_dg; -#endif -} dispatch_semaphore_class_t DISPATCH_TRANSPARENT_UNION; +DISPATCH_ALWAYS_INLINE +static inline uint32_t +_dg_state_value(uint64_t dg_state) +{ + return (uint32_t)(-((uint32_t)dg_state & DISPATCH_GROUP_VALUE_MASK)) >> 2; +} + +DISPATCH_ALWAYS_INLINE +static inline uint32_t +_dg_state_gen(uint64_t dg_state) +{ + return (uint32_t)(dg_state >> 32); +} dispatch_group_t _dispatch_group_create_and_enter(void); void _dispatch_group_dispose(dispatch_object_t dou, bool *allow_free); diff --git a/src/shims.h b/src/shims.h index 28e1c53a9..fd8b3542d 100644 --- a/src/shims.h +++ b/src/shims.h @@ -28,32 +28,31 @@ #define __DISPATCH_OS_SHIMS__ #include + #ifdef __linux__ #include "shims/linux_stubs.h" #endif - #ifdef __ANDROID__ #include "shims/android_stubs.h" #endif -#include "shims/hw_config.h" -#include "shims/priority.h" +#include "shims/target.h" -#if HAVE_PTHREAD_WORKQUEUES -#if __has_include() +#if DISPATCH_USE_INTERNAL_WORKQUEUE +#include "event/workqueue_internal.h" +#elif HAVE_PTHREAD_WORKQUEUES #include #else -#include +#error Unsupported configuration #endif -#ifndef WORKQ_FEATURE_MAINTENANCE -#define WORKQ_FEATURE_MAINTENANCE 0x10 -#endif -#endif // HAVE_PTHREAD_WORKQUEUES -#if DISPATCH_USE_INTERNAL_WORKQUEUE -#include "event/workqueue_internal.h" +#ifndef DISPATCH_WORKQ_MAX_PTHREAD_COUNT +#define DISPATCH_WORKQ_MAX_PTHREAD_COUNT 255 #endif +#include "shims/hw_config.h" +#include "shims/priority.h" + #if HAVE_PTHREAD_NP_H #include #endif diff --git a/src/shims/atomic.h b/src/shims/atomic.h index 64af8b272..cb9b95f48 100644 --- a/src/shims/atomic.h +++ b/src/shims/atomic.h @@ -67,7 +67,7 @@ #define _os_atomic_c11_op(p, v, m, o, op) \ ({ _os_atomic_basetypeof(p) _v = (v), _r = \ atomic_fetch_##o##_explicit(_os_atomic_c11_atomic(p), _v, \ - memory_order_##m); (typeof(*(p)))(_r op _v); }) + memory_order_##m); (typeof(_r))(_r op _v); }) #define _os_atomic_c11_op_orig(p, v, m, o, op) \ atomic_fetch_##o##_explicit(_os_atomic_c11_atomic(p), v, \ memory_order_##m) diff --git a/src/shims/atomic_sfb.h b/src/shims/atomic_sfb.h index de074a444..67172dbc6 100644 --- a/src/shims/atomic_sfb.h +++ b/src/shims/atomic_sfb.h @@ -90,11 +90,11 @@ os_atomic_set_first_bit(volatile unsigned long *p, unsigned int max_index) os_atomic_rmw_loop(p, b, b_masked, relaxed, { // ffs returns 1 + index, or 0 if none set index = (unsigned int)__builtin_ffsl((long)~b); - if (slowpath(index == 0)) { + if (unlikely(index == 0)) { os_atomic_rmw_loop_give_up(return UINT_MAX); } index--; - if (slowpath(index > max_index)) { + if (unlikely(index > max_index)) { os_atomic_rmw_loop_give_up(return UINT_MAX); } b_masked = b | (1UL << index); diff --git a/src/shims/hw_config.h b/src/shims/hw_config.h index 485dad663..6de0394b6 100644 --- a/src/shims/hw_config.h +++ b/src/shims/hw_config.h @@ -43,6 +43,13 @@ #error "could not determine pointer size as a constant int" #endif // __SIZEOF_POINTER__ +#define DISPATCH_CACHELINE_SIZE 64u +#define ROUND_UP_TO_CACHELINE_SIZE(x) \ + (((x) + (DISPATCH_CACHELINE_SIZE - 1u)) & \ + ~(DISPATCH_CACHELINE_SIZE - 1u)) +#define DISPATCH_CACHELINE_ALIGN \ + __attribute__((__aligned__(DISPATCH_CACHELINE_SIZE))) + #if !TARGET_OS_WIN32 typedef enum { diff --git a/src/shims/lock.c b/src/shims/lock.c index 24af953c3..3f430238d 100644 --- a/src/shims/lock.c +++ b/src/shims/lock.c @@ -31,8 +31,8 @@ } #if TARGET_OS_MAC -_Static_assert(DLOCK_LOCK_DATA_CONTENTION == ULF_WAIT_WORKQ_DATA_CONTENTION, - "values should be the same"); +dispatch_static_assert(DLOCK_LOCK_DATA_CONTENTION == + ULF_WAIT_WORKQ_DATA_CONTENTION); #if !HAVE_UL_UNFAIR_LOCK DISPATCH_ALWAYS_INLINE @@ -146,8 +146,8 @@ _dispatch_sema4_timedwait(_dispatch_sema4_t *sema, dispatch_time_t timeout) uint64_t nsec = _dispatch_timeout(timeout); _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); - kr = slowpath(semaphore_timedwait(*sema, _timeout)); - } while (kr == KERN_ABORTED); + kr = semaphore_timedwait(*sema, _timeout); + } while (unlikely(kr == KERN_ABORTED)); if (kr == KERN_OPERATION_TIMED_OUT) { return true; @@ -202,8 +202,8 @@ _dispatch_sema4_timedwait(_dispatch_sema4_t *sema, dispatch_time_t timeout) uint64_t nsec = _dispatch_time_nanoseconds_since_epoch(timeout); _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); - ret = slowpath(sem_timedwait(sema, &_timeout)); - } while (ret == -1 && errno == EINTR); + ret = sem_timedwait(sema, &_timeout); + } while (unlikely(ret == -1 && errno == EINTR)); if (ret == -1 && errno == ETIMEDOUT) { return true; @@ -322,8 +322,6 @@ _dispatch_ulock_wait(uint32_t *uaddr, uint32_t val, uint32_t timeout, rc = __ulock_wait(UL_COMPARE_AND_WAIT | flags, uaddr, val, timeout), case 0: return rc > 0 ? ENOTEMPTY : 0; case ETIMEDOUT: case EFAULT: return err; - case EOWNERDEAD: DISPATCH_CLIENT_CRASH(*uaddr, - "corruption of lock owner"); default: DISPATCH_INTERNAL_CRASH(err, "ulock_wait() failed"); ); } @@ -432,21 +430,40 @@ _dispatch_futex_unlock_pi(uint32_t *uaddr, int opflags) #endif #pragma mark - wait for address -void -_dispatch_wait_on_address(uint32_t volatile *address, uint32_t value, - dispatch_lock_options_t flags) +int +_dispatch_wait_on_address(uint32_t volatile *_address, uint32_t value, + dispatch_time_t timeout, dispatch_lock_options_t flags) { + uint32_t *address = (uint32_t *)_address; + uint64_t nsecs = _dispatch_timeout(timeout); + if (nsecs == 0) { + return ETIMEDOUT; + } #if HAVE_UL_COMPARE_AND_WAIT - _dispatch_ulock_wait((uint32_t *)address, value, 0, flags); + uint64_t usecs = 0; + int rc; + if (nsecs == DISPATCH_TIME_FOREVER) { + return _dispatch_ulock_wait(address, value, 0, flags); + } + do { + usecs = howmany(nsecs, NSEC_PER_USEC); + if (usecs > UINT32_MAX) usecs = UINT32_MAX; + rc = _dispatch_ulock_wait(address, value, (uint32_t)usecs, flags); + } while (usecs == UINT32_MAX && rc == ETIMEDOUT && + (nsecs = _dispatch_timeout(timeout)) != 0); + return rc; #elif HAVE_FUTEX - _dispatch_futex_wait((uint32_t *)address, value, NULL, FUTEX_PRIVATE_FLAG); -#else - mach_msg_timeout_t timeout = 1; - while (os_atomic_load(address, relaxed) == value) { - thread_switch(MACH_PORT_NULL, SWITCH_OPTION_WAIT, timeout++); + if (nsecs != DISPATCH_TIME_FOREVER) { + struct timespec ts = { + .tv_sec = (typeof(ts.tv_sec))(nsec / NSEC_PER_SEC), + .tv_nsec = (typeof(ts.tv_nsec))(nsec % NSEC_PER_SEC), + }; + return _dispatch_futex_wait(address, value, &ts, FUTEX_PRIVATE_FLAG); } + return _dispatch_futex_wait(address, value, NULL, FUTEX_PRIVATE_FLAG); +#else +#error _dispatch_wait_on_address unimplemented for this platform #endif - (void)flags; } void @@ -580,33 +597,42 @@ _dispatch_unfair_lock_unlock_slow(dispatch_unfair_lock_t dul, dispatch_lock cur) #pragma mark - gate lock void -_dispatch_gate_wait_slow(dispatch_gate_t dgl, dispatch_lock value, - dispatch_lock_options_t flags) +_dispatch_once_wait(dispatch_once_gate_t dgo) { dispatch_lock self = _dispatch_lock_value_for_self(); - dispatch_lock old_value, new_value; + uintptr_t old_v, new_v; + dispatch_lock *lock = &dgo->dgo_gate.dgl_lock; uint32_t timeout = 1; for (;;) { - os_atomic_rmw_loop(&dgl->dgl_lock, old_value, new_value, acquire, { - if (likely(old_value == value)) { - os_atomic_rmw_loop_give_up_with_fence(acquire, return); + os_atomic_rmw_loop(&dgo->dgo_once, old_v, new_v, relaxed, { + if (likely(old_v == DLOCK_ONCE_DONE)) { + os_atomic_rmw_loop_give_up(return); + } +#if DISPATCH_ONCE_USE_QUIESCENT_COUNTER + if (DISPATCH_ONCE_IS_GEN(old_v)) { + os_atomic_rmw_loop_give_up({ + os_atomic_thread_fence(acquire); + return _dispatch_once_mark_done_if_quiesced(dgo, old_v); + }); } - new_value = old_value | DLOCK_WAITERS_BIT; - if (new_value == old_value) os_atomic_rmw_loop_give_up(break); +#endif + new_v = old_v | (uintptr_t)DLOCK_WAITERS_BIT; + if (new_v == old_v) os_atomic_rmw_loop_give_up(break); }); - if (unlikely(_dispatch_lock_is_locked_by(old_value, self))) { + if (unlikely(_dispatch_lock_is_locked_by((dispatch_lock)old_v, self))) { DISPATCH_CLIENT_CRASH(0, "trying to lock recursively"); } #if HAVE_UL_UNFAIR_LOCK - _dispatch_unfair_lock_wait(&dgl->dgl_lock, new_value, 0, flags); + _dispatch_unfair_lock_wait(lock, (dispatch_lock)new_v, 0, + DLOCK_LOCK_NONE); #elif HAVE_FUTEX - _dispatch_futex_wait(&dgl->dgl_lock, new_value, NULL, FUTEX_PRIVATE_FLAG); + _dispatch_futex_wait(lock, (dispatch_lock)new_v, NULL, + FUTEX_PRIVATE_FLAG); #else - _dispatch_thread_switch(new_value, flags, timeout++); + _dispatch_thread_switch(new_v, flags, timeout++); #endif (void)timeout; - (void)flags; } } @@ -625,3 +651,14 @@ _dispatch_gate_broadcast_slow(dispatch_gate_t dgl, dispatch_lock cur) (void)dgl; #endif } + +#if TARGET_OS_MAC + +void +_dispatch_firehose_gate_wait(dispatch_gate_t dgl, uint32_t owner, + uint32_t flags) +{ + _dispatch_unfair_lock_wait(&dgl->dgl_lock, owner, 0, flags); +} + +#endif diff --git a/src/shims/lock.h b/src/shims/lock.h index 37a3ecfc8..cc75852d3 100644 --- a/src/shims/lock.h +++ b/src/shims/lock.h @@ -153,6 +153,14 @@ _dispatch_lock_has_failed_trylock(dispatch_lock lock_value) #endif #endif // HAVE_FUTEX +#if defined(__x86_64__) || defined(__i386__) || defined(__s390x__) +#define DISPATCH_ONCE_USE_QUIESCENT_COUNTER 0 +#elif __APPLE__ +#define DISPATCH_ONCE_USE_QUIESCENT_COUNTER 1 +#else +#define DISPATCH_ONCE_USE_QUIESCENT_COUNTER 0 +#endif + #pragma mark - semaphores #if USE_MACH_SEM @@ -218,8 +226,8 @@ _dispatch_sema4_dispose(_dispatch_sema4_t *sema, int policy) #pragma mark - compare and wait DISPATCH_NOT_TAIL_CALLED -void _dispatch_wait_on_address(uint32_t volatile *address, uint32_t value, - dispatch_lock_options_t flags); +int _dispatch_wait_on_address(uint32_t volatile *address, uint32_t value, + dispatch_time_t timeout, dispatch_lock_options_t flags); void _dispatch_wake_by_address(uint32_t volatile *address); #pragma mark - thread event @@ -292,7 +300,7 @@ _dispatch_thread_event_wait(dispatch_thread_event_t dte) #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX if (os_atomic_dec(&dte->dte_value, acquire) == 0) { // 1 -> 0 is always a valid transition, so we can return - // for any other value, go to the slowpath which checks it's not corrupt + // for any other value, take the slow path which checks it's not corrupt return; } #else @@ -334,7 +342,7 @@ _dispatch_unfair_lock_lock(dispatch_unfair_lock_t l) DLOCK_OWNER_NULL, value_self, acquire))) { return; } - return _dispatch_unfair_lock_lock_slow(l, DLOCK_LOCK_NONE); + return _dispatch_unfair_lock_lock_slow(l, DLOCK_LOCK_DATA_CONTENTION); } DISPATCH_ALWAYS_INLINE @@ -406,16 +414,10 @@ _dispatch_unfair_lock_unlock(dispatch_unfair_lock_t l) #pragma mark - gate lock -#if HAVE_UL_UNFAIR_LOCK || HAVE_FUTEX -#define DISPATCH_GATE_USE_FOR_DISPATCH_ONCE 1 -#else -#define DISPATCH_GATE_USE_FOR_DISPATCH_ONCE 0 -#endif - #define DLOCK_GATE_UNLOCKED ((dispatch_lock)0) -#define DLOCK_ONCE_UNLOCKED ((dispatch_once_t)0) -#define DLOCK_ONCE_DONE (~(dispatch_once_t)0) +#define DLOCK_ONCE_UNLOCKED ((uintptr_t)0) +#define DLOCK_ONCE_DONE (~(uintptr_t)0) typedef struct dispatch_gate_s { dispatch_lock dgl_lock; @@ -424,13 +426,210 @@ typedef struct dispatch_gate_s { typedef struct dispatch_once_gate_s { union { dispatch_gate_s dgo_gate; - dispatch_once_t dgo_once; + uintptr_t dgo_once; }; } dispatch_once_gate_s, *dispatch_once_gate_t; -DISPATCH_NOT_TAIL_CALLED -void _dispatch_gate_wait_slow(dispatch_gate_t l, dispatch_lock value, - uint32_t flags); +#if DISPATCH_ONCE_USE_QUIESCENT_COUNTER +#define DISPATCH_ONCE_MAKE_GEN(gen) (((gen) << 2) + DLOCK_FAILED_TRYLOCK_BIT) +#define DISPATCH_ONCE_IS_GEN(gen) (((gen) & 3) == DLOCK_FAILED_TRYLOCK_BIT) + +/* + * the _COMM_PAGE_CPU_QUIESCENT_COUNTER value is incremented every time + * all CPUs have performed a context switch. + * + * A counter update algorithm is: + * + * // atomic_or acq_rel is marked as ======== below + * if (atomic_or(&mask, acq_rel) == full_mask) { + * + * tmp = atomic_load(&generation, relaxed); + * atomic_store(&generation, gen + 1, relaxed); + * + * // atomic_store release is marked as -------- below + * atomic_store(&mask, 0, release); + * } + * + * This enforces boxes delimited by the acq_rel/release barriers to only be able + * to observe two possible values for the counter which have been marked below. + * + * Lemma 1 + * ~~~~~~~ + * + * Between two acq_rel barriers, a thread can only observe two possible values + * of the generation counter G maintained by the kernel. + * + * The Figure below, adds the happens-before-relationships and assertions: + * + * | Thread A | Thread B | Thread C | + * | | | | + * |==================| | | + * | G = N | | | + * |------------------|--------. | | + * | | | | | + * | | v | | + * | |==================| | + * | | assert(G >= N) | | + * | | | | + * | | | | + * | | | | + * | | assert(G < N+2) | | + * | |==================|--------. | + * | | | | | + * | | | v | + * | | |==================| + * | | | G = N + 2 | + * | | |------------------| + * | | | | + * + * + * This allows us to name the area delimited by two consecutive acq_rel + * barriers { N, N+1 } after the two possible values of G they can observe, + * which we'll use from now on. + * + * + * Lemma 2 + * ~~~~~~~ + * + * Any operation that a thread does while observing G in { N-2, N-1 } will be + * visible to a thread that can observe G in { N, N + 1 }. + * + * Any operation that a thread does while observing G in { N, N + 1 } cannot + * possibly be visible to a thread observing G in { N-2, N-1 } + * + * This is a corollary of Lemma 1: the only possibility is for the update + * of G to N to have happened between two acq_rel barriers of the considered + * threads. + * + * Below is a figure of why instantiated with N = 2 + * + * | Thread A | Thread B | Thread C | + * | | | | + * | G ∈ { 0, 1 } | | | + * | | | | + * | | | | + * | store(X, 1) | | | + * | assert(!Z) | | | + * | | | | + * |==================|--------. | | + * | G ∈ { 1, 2 } | | | | + * | | v | | + * | |==================|--------. | + * | | G = 2 | | | + * | |------------------| | | + * | | | | | + * | | | v | + * | | |==================| + * | | | G ∈ { 2, 3 } | + * | | | | + * | | | | + * | | | store(Z, 1) | + * | | | assert(X) | + * | | | | + * | | | | + * + * + * Theorem + * ~~~~~~~ + * + * The optimal number of increments to observe for the dispatch once algorithm + * to be safe is 4. + * + * Proof (correctness): + * + * Consider a dispatch once initializer thread in its { N, N+1 } "zone". + * + * Per Lemma 2, any observer thread in its { N+2, N+3 } zone will see the + * effect of the dispatch once initialization. + * + * Per Lemma 2, when the DONE transition happens in a thread zone { N+3, N+4 }, + * then threads can observe this transiton in their { N+2, N+3 } zone at the + * earliest. + * + * Hence for an initializer bracket of { N, N+1 }, the first safe bracket for + * the DONE transition is { N+3, N+4 }. + * + * + * Proof (optimal): + * + * The following ordering is possible if waiting only for three periods: + * + * | Thread A | Thread B | Thread C | + * | | | | + * | | | | + * | | |==================| + * | | | G ∈ { 1, 2 } | + * | | | | + * | | | | + * | | | R(once == -1) <-+--. + * | | | | | + * | -------+------------------+---------. | | + * | | | | | | + * | W(global, 42) | | | | | + * | WRel(once, G:0) | | | | | + * | | | | | | + * | | | v | | + * | | | R(global == 0) | | + * | | | | | + * | | | | | + * |==================| | | | + * | G ∈ { 1, 2 } | | | | + * | |==================| | | + * | | G = 2 | | | + * | |------------------| | | + * | | | | | + * |==================| | | | + * | G ∈ { 2, 3 } | | | | + * | | | | | + * | | | | | + * | W(once, -1) ---+------------------+------------------+--' + * | | | | + * | | |==================| + * | | | G ∈ { 2, 3 } | + * | | | | + * + */ +#define DISPATCH_ONCE_GEN_SAFE_DELTA (4 << 2) + +DISPATCH_ALWAYS_INLINE +static inline uintptr_t +_dispatch_once_generation(void) +{ + uintptr_t value; + value = *(volatile uintptr_t *)_COMM_PAGE_CPU_QUIESCENT_COUNTER; + return (uintptr_t)DISPATCH_ONCE_MAKE_GEN(value); +} + +DISPATCH_ALWAYS_INLINE +static inline uintptr_t +_dispatch_once_mark_quiescing(dispatch_once_gate_t dgo) +{ + return os_atomic_xchg(&dgo->dgo_once, _dispatch_once_generation(), release); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_once_mark_done_if_quiesced(dispatch_once_gate_t dgo, uintptr_t gen) +{ + if (_dispatch_once_generation() - gen >= DISPATCH_ONCE_GEN_SAFE_DELTA) { + /* + * See explanation above, when the quiescing counter approach is taken + * then this store needs only to be relaxed as it is used as a witness + * that the required barriers have happened. + */ + os_atomic_store(&dgo->dgo_once, DLOCK_ONCE_DONE, relaxed); + } +} +#else +DISPATCH_ALWAYS_INLINE +static inline uintptr_t +_dispatch_once_mark_done(dispatch_once_gate_t dgo) +{ + return os_atomic_xchg(&dgo->dgo_once, DLOCK_ONCE_DONE, release); +} +#endif // DISPATCH_ONCE_USE_QUIESCENT_COUNTER + +void _dispatch_once_wait(dispatch_once_gate_t l); void _dispatch_gate_broadcast_slow(dispatch_gate_t l, dispatch_lock tid_cur); DISPATCH_ALWAYS_INLINE @@ -441,9 +640,6 @@ _dispatch_gate_tryenter(dispatch_gate_t l) _dispatch_lock_value_for_self(), acquire); } -#define _dispatch_gate_wait(l, flags) \ - _dispatch_gate_wait_slow(l, DLOCK_GATE_UNLOCKED, flags) - DISPATCH_ALWAYS_INLINE static inline void _dispatch_gate_broadcast(dispatch_gate_t l) @@ -459,18 +655,7 @@ static inline bool _dispatch_once_gate_tryenter(dispatch_once_gate_t l) { return os_atomic_cmpxchg(&l->dgo_once, DLOCK_ONCE_UNLOCKED, - (dispatch_once_t)_dispatch_lock_value_for_self(), acquire); -} - -#define _dispatch_once_gate_wait(l) \ - _dispatch_gate_wait_slow(&(l)->dgo_gate, (dispatch_lock)DLOCK_ONCE_DONE, \ - DLOCK_LOCK_NONE) - -DISPATCH_ALWAYS_INLINE -static inline dispatch_once_t -_dispatch_once_xchg_done(dispatch_once_t *pred) -{ - return os_atomic_xchg(pred, DLOCK_ONCE_DONE, release); + (uintptr_t)_dispatch_lock_value_for_self(), relaxed); } DISPATCH_ALWAYS_INLINE @@ -478,9 +663,22 @@ static inline void _dispatch_once_gate_broadcast(dispatch_once_gate_t l) { dispatch_lock value_self = _dispatch_lock_value_for_self(); - dispatch_once_t cur = _dispatch_once_xchg_done(&l->dgo_once); - if (likely(cur == (dispatch_once_t)value_self)) return; - _dispatch_gate_broadcast_slow(&l->dgo_gate, (dispatch_lock)cur); + uintptr_t v; +#if DISPATCH_ONCE_USE_QUIESCENT_COUNTER + v = _dispatch_once_mark_quiescing(l); +#else + v = _dispatch_once_mark_done(l); +#endif + if (likely((dispatch_lock)v == value_self)) return; + _dispatch_gate_broadcast_slow(&l->dgo_gate, (dispatch_lock)v); } +#if TARGET_OS_MAC + +DISPATCH_NOT_TAIL_CALLED +void _dispatch_firehose_gate_wait(dispatch_gate_t l, uint32_t owner, + uint32_t flags); + +#endif // TARGET_OS_MAC + #endif // __DISPATCH_SHIMS_LOCK__ diff --git a/src/shims/perfmon.h b/src/shims/perfmon.h index be9327baf..af6183f8d 100644 --- a/src/shims/perfmon.h +++ b/src/shims/perfmon.h @@ -67,7 +67,7 @@ _dispatch_perfmon_workitem_dec(void) #define _dispatch_perfmon_start_impl(trace) ({ \ if (trace) _dispatch_ktrace0(DISPATCH_PERF_MON_worker_thread_start); \ - perfmon_start = _dispatch_absolute_time(); \ + perfmon_start = _dispatch_uptime(); \ }) #define _dispatch_perfmon_start() \ DISPATCH_PERF_MON_VAR _dispatch_perfmon_start_impl(true) diff --git a/src/shims/priority.h b/src/shims/priority.h index 3e85ff54c..56ea5ce09 100644 --- a/src/shims/priority.h +++ b/src/shims/priority.h @@ -36,8 +36,8 @@ #ifndef _PTHREAD_PRIORITY_SCHED_PRI_FLAG #define _PTHREAD_PRIORITY_SCHED_PRI_FLAG 0x20000000 #endif -#ifndef _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG -#define _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG 0x04000000 +#ifndef _PTHREAD_PRIORITY_FALLBACK_FLAG +#define _PTHREAD_PRIORITY_FALLBACK_FLAG 0x04000000 #endif #ifndef _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG #define _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG 0x02000000 @@ -63,7 +63,7 @@ typedef unsigned long pthread_priority_t; #define _PTHREAD_PRIORITY_PRIORITY_MASK 0x000000ff #define _PTHREAD_PRIORITY_OVERCOMMIT_FLAG 0x80000000 #define _PTHREAD_PRIORITY_SCHED_PRI_FLAG 0x20000000 -#define _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG 0x04000000 +#define _PTHREAD_PRIORITY_FALLBACK_FLAG 0x04000000 #define _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG 0x02000000 #define _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG 0x01000000 #define _PTHREAD_PRIORITY_ENFORCE_FLAG 0x10000000 @@ -72,41 +72,64 @@ typedef unsigned long pthread_priority_t; typedef uint32_t dispatch_qos_t; typedef uint32_t dispatch_priority_t; -typedef uint32_t dispatch_priority_t; -typedef uint16_t dispatch_priority_requested_t; - -#define DISPATCH_QOS_UNSPECIFIED ((dispatch_qos_t)0) -#define DISPATCH_QOS_MAINTENANCE ((dispatch_qos_t)1) -#define DISPATCH_QOS_BACKGROUND ((dispatch_qos_t)2) -#define DISPATCH_QOS_UTILITY ((dispatch_qos_t)3) -#define DISPATCH_QOS_DEFAULT ((dispatch_qos_t)4) -#define DISPATCH_QOS_USER_INITIATED ((dispatch_qos_t)5) -#define DISPATCH_QOS_USER_INTERACTIVE ((dispatch_qos_t)6) -#define DISPATCH_QOS_MAX DISPATCH_QOS_USER_INTERACTIVE -#define DISPATCH_QOS_SATURATED ((dispatch_qos_t)15) + +#define DISPATCH_QOS_UNSPECIFIED ((dispatch_qos_t)0) +#define DISPATCH_QOS_MAINTENANCE ((dispatch_qos_t)1) +#define DISPATCH_QOS_BACKGROUND ((dispatch_qos_t)2) +#define DISPATCH_QOS_UTILITY ((dispatch_qos_t)3) +#define DISPATCH_QOS_DEFAULT ((dispatch_qos_t)4) +#define DISPATCH_QOS_USER_INITIATED ((dispatch_qos_t)5) +#define DISPATCH_QOS_USER_INTERACTIVE ((dispatch_qos_t)6) +#define DISPATCH_QOS_MIN DISPATCH_QOS_MAINTENANCE +#define DISPATCH_QOS_MAX DISPATCH_QOS_USER_INTERACTIVE +#define DISPATCH_QOS_SATURATED ((dispatch_qos_t)15) + +#define DISPATCH_QOS_NBUCKETS (DISPATCH_QOS_MAX - DISPATCH_QOS_MIN + 1) +#define DISPATCH_QOS_BUCKET(qos) ((qos) - DISPATCH_QOS_MIN) #define DISPATCH_PRIORITY_RELPRI_MASK ((dispatch_priority_t)0x000000ff) #define DISPATCH_PRIORITY_RELPRI_SHIFT 0 -#define DISPATCH_PRIORITY_QOS_MASK ((dispatch_priority_t)0x0000ff00) +#define DISPATCH_PRIORITY_QOS_MASK ((dispatch_priority_t)0x00000f00) #define DISPATCH_PRIORITY_QOS_SHIFT 8 -#define DISPATCH_PRIORITY_REQUESTED_MASK ((dispatch_priority_t)0x0000ffff) -#define DISPATCH_PRIORITY_OVERRIDE_MASK ((dispatch_priority_t)0x00ff0000) +#define DISPATCH_PRIORITY_REQUESTED_MASK ((dispatch_priority_t)0x00000fff) +#define DISPATCH_PRIORITY_FALLBACK_QOS_MASK ((dispatch_priority_t)0x0000f000) +#define DISPATCH_PRIORITY_FALLBACK_QOS_SHIFT 12 +#define DISPATCH_PRIORITY_OVERRIDE_MASK ((dispatch_priority_t)0x000f0000) #define DISPATCH_PRIORITY_OVERRIDE_SHIFT 16 #define DISPATCH_PRIORITY_FLAGS_MASK ((dispatch_priority_t)0xff000000) -#define DISPATCH_PRIORITY_SATURATED_OVERRIDE ((dispatch_priority_t)0x000f0000) +#define DISPATCH_PRIORITY_SATURATED_OVERRIDE DISPATCH_PRIORITY_OVERRIDE_MASK #define DISPATCH_PRIORITY_FLAG_OVERCOMMIT ((dispatch_priority_t)0x80000000) // _PTHREAD_PRIORITY_OVERCOMMIT_FLAG -#define DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE ((dispatch_priority_t)0x04000000) // _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG +#define DISPATCH_PRIORITY_FLAG_FALLBACK ((dispatch_priority_t)0x04000000) // _PTHREAD_PRIORITY_FALLBACK_FLAG #define DISPATCH_PRIORITY_FLAG_MANAGER ((dispatch_priority_t)0x02000000) // _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG #define DISPATCH_PRIORITY_PTHREAD_PRIORITY_FLAGS_MASK \ - (DISPATCH_PRIORITY_FLAG_OVERCOMMIT | DISPATCH_PRIORITY_FLAG_DEFAULTQUEUE | \ + (DISPATCH_PRIORITY_FLAG_OVERCOMMIT | DISPATCH_PRIORITY_FLAG_FALLBACK | \ DISPATCH_PRIORITY_FLAG_MANAGER) // not passed to pthread -#define DISPATCH_PRIORITY_FLAG_INHERIT ((dispatch_priority_t)0x40000000) // _PTHREAD_PRIORITY_INHERIT_FLAG +#define DISPATCH_PRIORITY_FLAG_FLOOR ((dispatch_priority_t)0x40000000) // _PTHREAD_PRIORITY_INHERIT_FLAG #define DISPATCH_PRIORITY_FLAG_ENFORCE ((dispatch_priority_t)0x10000000) // _PTHREAD_PRIORITY_ENFORCE_FLAG -#define DISPATCH_PRIORITY_FLAG_ROOTQUEUE ((dispatch_priority_t)0x20000000) // _PTHREAD_PRIORITY_ROOTQUEUE_FLAG +#define DISPATCH_PRIORITY_FLAG_INHERITED ((dispatch_priority_t)0x20000000) + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_qos_class_valid(qos_class_t cls, int relpri) +{ + switch ((unsigned int)cls) { + case QOS_CLASS_MAINTENANCE: + case QOS_CLASS_BACKGROUND: + case QOS_CLASS_UTILITY: + case QOS_CLASS_DEFAULT: + case QOS_CLASS_USER_INITIATED: + case QOS_CLASS_USER_INTERACTIVE: + case QOS_CLASS_UNSPECIFIED: + break; + default: + return false; + } + return QOS_MIN_RELATIVE_PRIORITY <= relpri && relpri <= 0; +} #pragma mark dispatch_qos @@ -163,6 +186,16 @@ _dispatch_qos_from_pp(pthread_priority_t pp) return (dispatch_qos_t)__builtin_ffs((int)pp); } +DISPATCH_ALWAYS_INLINE +static inline dispatch_qos_t +_dispatch_qos_from_pp_unsafe(pthread_priority_t pp) +{ + // this assumes we know there is a QOS and pp has been masked off properly + pp >>= _PTHREAD_PRIORITY_QOS_CLASS_SHIFT; + DISPATCH_COMPILER_CAN_ASSUME(pp); + return (dispatch_qos_t)__builtin_ffs((int)pp); +} + DISPATCH_ALWAYS_INLINE static inline pthread_priority_t _dispatch_qos_to_pp(dispatch_qos_t qos) @@ -186,15 +219,16 @@ _dispatch_qos_is_background(dispatch_qos_t qos) (qos ? ((((qos) << DISPATCH_PRIORITY_QOS_SHIFT) & DISPATCH_PRIORITY_QOS_MASK) | \ ((dispatch_priority_t)(relpri - 1) & DISPATCH_PRIORITY_RELPRI_MASK)) : 0) -DISPATCH_ALWAYS_INLINE -static inline dispatch_priority_t -_dispatch_priority_with_override_qos(dispatch_priority_t pri, - dispatch_qos_t oqos) -{ - pri &= ~DISPATCH_PRIORITY_OVERRIDE_MASK; - pri |= oqos << DISPATCH_PRIORITY_OVERRIDE_SHIFT; - return pri; -} +#define _dispatch_priority_make_override(qos) \ + (((qos) << DISPATCH_PRIORITY_OVERRIDE_SHIFT) & \ + DISPATCH_PRIORITY_OVERRIDE_MASK) + +#define _dispatch_priority_make_floor(qos) \ + (qos ? (_dispatch_priority_make(qos) | DISPATCH_PRIORITY_FLAG_FLOOR) : 0) + +#define _dispatch_priority_make_fallback(qos) \ + (qos ? ((((qos) << DISPATCH_PRIORITY_FALLBACK_QOS_SHIFT) & \ + DISPATCH_PRIORITY_FALLBACK_QOS_MASK) | DISPATCH_PRIORITY_FLAG_FALLBACK) : 0) DISPATCH_ALWAYS_INLINE static inline int @@ -214,6 +248,14 @@ _dispatch_priority_qos(dispatch_priority_t dbp) return dbp >> DISPATCH_PRIORITY_QOS_SHIFT; } +DISPATCH_ALWAYS_INLINE +static inline dispatch_qos_t +_dispatch_priority_fallback_qos(dispatch_priority_t dbp) +{ + dbp &= DISPATCH_PRIORITY_FALLBACK_QOS_MASK; + return dbp >> DISPATCH_PRIORITY_FALLBACK_QOS_SHIFT; +} + DISPATCH_ALWAYS_INLINE static inline dispatch_qos_t _dispatch_priority_override_qos(dispatch_priority_t dbp) @@ -222,6 +264,16 @@ _dispatch_priority_override_qos(dispatch_priority_t dbp) return dbp >> DISPATCH_PRIORITY_OVERRIDE_SHIFT; } +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_queue_priority_manually_selected(dispatch_priority_t pri) +{ + return !(pri & DISPATCH_PRIORITY_FLAG_INHERITED) && + (pri & (DISPATCH_PRIORITY_FLAG_FALLBACK | + DISPATCH_PRIORITY_FLAG_FLOOR | + DISPATCH_PRIORITY_REQUESTED_MASK)); +} + DISPATCH_ALWAYS_INLINE static inline dispatch_priority_t _dispatch_priority_from_pp_impl(pthread_priority_t pp, bool keep_flags) @@ -244,26 +296,40 @@ _dispatch_priority_from_pp_impl(pthread_priority_t pp, bool keep_flags) #define _dispatch_priority_from_pp_strip_flags(pp) \ _dispatch_priority_from_pp_impl(pp, false) +#define DISPATCH_PRIORITY_TO_PP_STRIP_FLAGS 0x1 +#define DISPATCH_PRIORITY_TO_PP_PREFER_FALLBACK 0x2 + DISPATCH_ALWAYS_INLINE static inline pthread_priority_t -_dispatch_priority_to_pp_impl(dispatch_priority_t dbp, bool keep_flags) +_dispatch_priority_to_pp_strip_flags(dispatch_priority_t dbp) { - pthread_priority_t pp; - if (keep_flags) { - pp = dbp & (DISPATCH_PRIORITY_PTHREAD_PRIORITY_FLAGS_MASK | - DISPATCH_PRIORITY_RELPRI_MASK); - } else { - pp = dbp & DISPATCH_PRIORITY_RELPRI_MASK; - } + pthread_priority_t pp = dbp & DISPATCH_PRIORITY_RELPRI_MASK; dispatch_qos_t qos = _dispatch_priority_qos(dbp); if (qos) { pp |= (1ul << ((qos - 1) + _PTHREAD_PRIORITY_QOS_CLASS_SHIFT)); } return pp; } -#define _dispatch_priority_to_pp(pp) \ - _dispatch_priority_to_pp_impl(pp, true) -#define _dispatch_priority_to_pp_strip_flags(pp) \ - _dispatch_priority_to_pp_impl(pp, false) + +DISPATCH_ALWAYS_INLINE +static inline pthread_priority_t +_dispatch_priority_to_pp_prefer_fallback(dispatch_priority_t dbp) +{ + pthread_priority_t pp; + dispatch_qos_t qos; + + if (dbp & DISPATCH_PRIORITY_FLAG_FALLBACK) { + pp = dbp & DISPATCH_PRIORITY_PTHREAD_PRIORITY_FLAGS_MASK; + pp |= _PTHREAD_PRIORITY_PRIORITY_MASK; + qos = _dispatch_priority_fallback_qos(dbp); + } else { + pp = dbp & (DISPATCH_PRIORITY_PTHREAD_PRIORITY_FLAGS_MASK | + DISPATCH_PRIORITY_RELPRI_MASK); + qos = _dispatch_priority_qos(dbp); + if (unlikely(!qos)) return pp; + } + + return pp | (1ul << ((qos - 1) + _PTHREAD_PRIORITY_QOS_CLASS_SHIFT)); +} #endif // __DISPATCH_SHIMS_PRIORITY__ diff --git a/src/shims/target.h b/src/shims/target.h new file mode 100644 index 000000000..8e996aa73 --- /dev/null +++ b/src/shims/target.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2018 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +// These are the portable dispatch version requirements macros, isolated from +// the rest of the C internal headers to be suitable for inclusion in MIG defs, +// asm, etc. + +#ifndef __DISPATCH_SHIMS_TARGET__ +#define __DISPATCH_SHIMS_TARGET__ + +#ifdef __APPLE__ +#include +#include + +#if TARGET_OS_OSX +# define DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(x) \ + (__MAC_OS_X_VERSION_MIN_REQUIRED >= (x)) +# if !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200) +# error "OS X hosts older than OS X 10.12 aren't supported anymore" +# endif // !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200) +#elif TARGET_OS_SIMULATOR +# define DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(x) \ + (IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED >= (x)) +# if !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200) +# error "Simulator hosts older than OS X 10.12 aren't supported anymore" +# endif // !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200) +#else +# define DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(x) 1 +# if __IPHONE_OS_VERSION_MIN_REQUIRED < 90000 +# error "iOS hosts older than iOS 9.0 aren't supported anymore" +# endif +#endif + +#else // !__APPLE__ +#define DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(x) 0 +#endif // !__APPLE__ + +#endif // __DISPATCH_SHIMS_TARGET__ diff --git a/src/shims/time.h b/src/shims/time.h index 0b8e92617..348e14920 100644 --- a/src/shims/time.h +++ b/src/shims/time.h @@ -41,9 +41,10 @@ sleep(unsigned int seconds) #endif typedef enum { + DISPATCH_CLOCK_UPTIME, + DISPATCH_CLOCK_MONOTONIC, DISPATCH_CLOCK_WALL, - DISPATCH_CLOCK_MACH, -#define DISPATCH_CLOCK_COUNT (DISPATCH_CLOCK_MACH + 1) +#define DISPATCH_CLOCK_COUNT (DISPATCH_CLOCK_WALL + 1) } dispatch_clock_t; void _dispatch_time_init(void); @@ -122,14 +123,35 @@ _dispatch_get_nanoseconds(void) } static inline uint64_t -_dispatch_absolute_time(void) +_dispatch_uptime(void) { #if HAVE_MACH_ABSOLUTE_TIME return mach_absolute_time(); -#elif HAVE_DECL_CLOCK_UPTIME && !defined(__linux__) +#elif defined(__linux__) + struct timespec ts; + dispatch_assume_zero(clock_gettime(CLOCK_MONOTONIC, &ts)); + return _dispatch_timespec_to_nano(ts); +#elif HAVE_DECL_CLOCK_UPTIME struct timespec ts; dispatch_assume_zero(clock_gettime(CLOCK_UPTIME, &ts)); return _dispatch_timespec_to_nano(ts); +#elif TARGET_OS_WIN32 + LARGE_INTEGER now; + return QueryPerformanceCounter(&now) ? now.QuadPart : 0; +#else +#error platform needs to implement _dispatch_uptime() +#endif +} + +static inline uint64_t +_dispatch_monotonic_time(void) +{ +#if HAVE_MACH_ABSOLUTE_TIME + return mach_continuous_time(); +#elif defined(__linux__) + struct timespec ts; + dispatch_assume_zero(clock_gettime(CLOCK_BOOTTIME, &ts)); + return _dispatch_timespec_to_nano(ts); #elif HAVE_DECL_CLOCK_MONOTONIC struct timespec ts; dispatch_assume_zero(clock_gettime(CLOCK_MONOTONIC, &ts)); @@ -138,7 +160,7 @@ _dispatch_absolute_time(void) LARGE_INTEGER now; return QueryPerformanceCounter(&now) ? now.QuadPart : 0; #else -#error platform needs to implement _dispatch_absolute_time() +#error platform needs to implement _dispatch_monotonic_time() #endif } @@ -148,16 +170,16 @@ _dispatch_approximate_time(void) { #if HAVE_MACH_APPROXIMATE_TIME return mach_approximate_time(); -#elif HAVE_DECL_CLOCK_UPTIME_FAST && !defined(__linux__) +#elif defined(__linux__) struct timespec ts; - dispatch_assume_zero(clock_gettime(CLOCK_UPTIME_FAST, &ts)); + dispatch_assume_zero(clock_gettime(CLOCK_MONOTONIC_COARSE, &ts)); return _dispatch_timespec_to_nano(ts); -#elif defined(__linux__) +#elif HAVE_DECL_CLOCK_UPTIME_FAST struct timespec ts; - dispatch_assume_zero(clock_gettime(CLOCK_REALTIME_COARSE, &ts)); + dispatch_assume_zero(clock_gettime(CLOCK_UPTIME_FAST, &ts)); return _dispatch_timespec_to_nano(ts); #else - return _dispatch_absolute_time(); + return _dispatch_uptime(); #endif } @@ -166,8 +188,10 @@ static inline uint64_t _dispatch_time_now(dispatch_clock_t clock) { switch (clock) { - case DISPATCH_CLOCK_MACH: - return _dispatch_absolute_time(); + case DISPATCH_CLOCK_UPTIME: + return _dispatch_uptime(); + case DISPATCH_CLOCK_MONOTONIC: + return _dispatch_monotonic_time(); case DISPATCH_CLOCK_WALL: return _dispatch_get_nanoseconds(); } @@ -186,7 +210,75 @@ _dispatch_time_now_cached(dispatch_clock_t clock, if (likely(cache->nows[clock])) { return cache->nows[clock]; } - return cache->nows[clock] = _dispatch_time_now(clock); +#if TARGET_OS_MAC + struct timespec ts; + mach_get_times(&cache->nows[DISPATCH_CLOCK_UPTIME], + &cache->nows[DISPATCH_CLOCK_MONOTONIC], &ts); + cache->nows[DISPATCH_CLOCK_WALL] = _dispatch_timespec_to_nano(ts); +#else + cache->nows[clock] = _dispatch_time_now(clock); +#endif + return cache->nows[clock]; +} + +// Encoding of dispatch_time_t: +// 1. Wall time has the top two bits set; negate to get the actual value. +// 2. Absolute time has the top two bits clear and is the actual value. +// 3. Continuous time has bit 63 set and bit 62 clear. Clear bit 63 to get the +// actual value. +// 4. "Forever" and "now" are encoded as ~0ULL and 0ULL respectively. +// +// The consequence of all this is that we can't have an actual time value that +// is >= 0x4000000000000000. Larger values always get silently converted to +// DISPATCH_TIME_FOREVER because the APIs that return time values have no way to +// indicate a range error. +#define DISPATCH_UP_OR_MONOTONIC_TIME_MASK (1ULL << 63) +#define DISPATCH_WALLTIME_MASK (1ULL << 62) +#define DISPATCH_TIME_MAX_VALUE (DISPATCH_WALLTIME_MASK - 1) + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_time_to_clock_and_value(dispatch_time_t time, + dispatch_clock_t *clock, uint64_t *value) +{ + uint64_t actual_value; + if ((int64_t)time < 0) { + // Wall time or mach continuous time + if (time & DISPATCH_WALLTIME_MASK) { + // Wall time (value 11 in bits 63, 62) + *clock = DISPATCH_CLOCK_WALL; + actual_value = time == DISPATCH_WALLTIME_NOW ? + _dispatch_get_nanoseconds() : (uint64_t)-time; + } else { + // Continuous time (value 10 in bits 63, 62). + *clock = DISPATCH_CLOCK_MONOTONIC; + actual_value = time & ~DISPATCH_UP_OR_MONOTONIC_TIME_MASK; + } + } else { + *clock = DISPATCH_CLOCK_UPTIME; + actual_value = time; + } + + // Range-check the value before returning. + *value = actual_value > DISPATCH_TIME_MAX_VALUE ? DISPATCH_TIME_FOREVER + : actual_value; } +DISPATCH_ALWAYS_INLINE +static inline dispatch_time_t +_dispatch_clock_and_value_to_time(dispatch_clock_t clock, uint64_t value) +{ + if (value >= DISPATCH_TIME_MAX_VALUE) { + return DISPATCH_TIME_FOREVER; + } + switch (clock) { + case DISPATCH_CLOCK_WALL: + return -value; + case DISPATCH_CLOCK_UPTIME: + return value; + case DISPATCH_CLOCK_MONOTONIC: + return value | DISPATCH_UP_OR_MONOTONIC_TIME_MASK; + } + __builtin_unreachable(); +} #endif // __DISPATCH_SHIMS_TIME__ diff --git a/src/shims/tsd.h b/src/shims/tsd.h index c119e4f01..eaed362c4 100644 --- a/src/shims/tsd.h +++ b/src/shims/tsd.h @@ -198,7 +198,7 @@ _dispatch_thread_setspecific(pthread_key_t k, void *v) if (_pthread_has_direct_tsd()) { (void)_pthread_setspecific_direct(k, v); } else { -#if TARGET_IPHONE_SIMULATOR +#if TARGET_OS_SIMULATOR (void)_pthread_setspecific_static(k, v); // rdar://26058142 #else __builtin_trap(); // unreachable diff --git a/src/shims/yield.h b/src/shims/yield.h index 67f8679ac..121b48e61 100644 --- a/src/shims/yield.h +++ b/src/shims/yield.h @@ -30,6 +30,24 @@ #pragma mark - #pragma mark _dispatch_wait_until +// _dispatch_wait_until() is used for cases when we're waiting on a thread to +// finish a critical section that is a few instructions long and cannot fail +// (IOW has a guarantee of making forward progress). +// +// Using _dispatch_wait_until() has two implications: +// - there's a single waiter for the specified condition, +// - the thing it is waiting on has a strong guarantee of forward progress +// toward resolving the condition. +// +// For these reasons, we spin shortly for the likely case when the other thread +// is on core and we just caught it in the inconsistency window. If the +// condition we're waiting for doesn't resolve quickly, then we yield because +// it's very likely the other thread that can unblock us is preempted, and we +// need to wait for it to be scheduled again. +// +// Its typical client is the enqueuer/dequeuer starvation issue for the dispatch +// enqueue algorithm where there is typically a 1-10 instruction gap between the +// exchange at the tail and setting the head/prev pointer. #if DISPATCH_HW_CONFIG_UP #define _dispatch_wait_until(c) ({ \ typeof(c) _c; \ @@ -40,9 +58,8 @@ _dispatch_preemption_yield(_spins); \ } \ _c; }) -#elif TARGET_OS_EMBEDDED -// -#ifndef DISPATCH_WAIT_SPINS +#else +#ifndef DISPATCH_WAIT_SPINS // #define DISPATCH_WAIT_SPINS 1024 #endif #define _dispatch_wait_until(c) ({ \ @@ -50,21 +67,13 @@ int _spins = -(DISPATCH_WAIT_SPINS); \ for (;;) { \ if (likely(_c = (c))) break; \ - if (slowpath(_spins++ >= 0)) { \ + if (unlikely(_spins++ >= 0)) { \ _dispatch_preemption_yield(_spins); \ } else { \ dispatch_hardware_pause(); \ } \ } \ _c; }) -#else -#define _dispatch_wait_until(c) ({ \ - typeof(c) _c; \ - for (;;) { \ - if (likely(_c = (c))) break; \ - dispatch_hardware_pause(); \ - } \ - _c; }) #endif #pragma mark - @@ -79,17 +88,15 @@ #ifndef DISPATCH_CONTENTION_SPINS_MIN #define DISPATCH_CONTENTION_SPINS_MIN (32 - 1) #endif -#if TARGET_OS_EMBEDDED +#if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR #define _dispatch_contention_spins() \ ((DISPATCH_CONTENTION_SPINS_MIN) + ((DISPATCH_CONTENTION_SPINS_MAX) - \ (DISPATCH_CONTENTION_SPINS_MIN)) / 2) #else // Use randomness to prevent threads from resonating at the same -// frequency and permanently contending. All threads sharing the same -// seed value is safe with the FreeBSD rand_r implementation. +// frequency and permanently contending. #define _dispatch_contention_spins() ({ \ - static unsigned int _seed; \ - ((unsigned int)rand_r(&_seed) & (DISPATCH_CONTENTION_SPINS_MAX)) | \ + ((unsigned int)rand() & (DISPATCH_CONTENTION_SPINS_MAX)) | \ (DISPATCH_CONTENTION_SPINS_MIN); }) #endif #define _dispatch_contention_wait_until(c) ({ \ @@ -97,7 +104,7 @@ unsigned int _spins = _dispatch_contention_spins(); \ while (_spins--) { \ dispatch_hardware_pause(); \ - if ((_out = fastpath(c))) break; \ + if (likely(_out = (c))) break; \ }; _out; }) #endif diff --git a/src/source.c b/src/source.c index 6f504787d..b1b893178 100644 --- a/src/source.c +++ b/src/source.c @@ -20,21 +20,24 @@ #include "internal.h" -static void _dispatch_source_handler_free(dispatch_source_t ds, long kind); -static void _dispatch_source_set_interval(dispatch_source_t ds, uint64_t interval); - -#define DISPATCH_TIMERS_UNREGISTER 0x1 -#define DISPATCH_TIMERS_RETAIN_2 0x2 -static void _dispatch_timers_update(dispatch_unote_t du, uint32_t flags); -static void _dispatch_timers_unregister(dispatch_timer_source_refs_t dt); - -static void _dispatch_source_timer_configure(dispatch_source_t ds); -static inline unsigned long _dispatch_source_timer_data( - dispatch_source_t ds, dispatch_unote_t du); +static void _dispatch_source_handler_free(dispatch_source_refs_t ds, long kind); #pragma mark - #pragma mark dispatch_source_t +DISPATCH_ALWAYS_INLINE +static inline dispatch_continuation_t +_dispatch_source_get_handler(dispatch_source_refs_t dr, long kind) +{ + return os_atomic_load(&dr->ds_handler[kind], relaxed); +} +#define _dispatch_source_get_event_handler(dr) \ + _dispatch_source_get_handler(dr, DS_EVENT_HANDLER) +#define _dispatch_source_get_cancel_handler(dr) \ + _dispatch_source_get_handler(dr, DS_CANCEL_HANDLER) +#define _dispatch_source_get_registration_handler(dr) \ + _dispatch_source_get_handler(dr, DS_REGISTN_HANDLER) + dispatch_source_t dispatch_source_create(dispatch_source_type_t dst, uintptr_t handle, unsigned long mask, dispatch_queue_t dq) @@ -47,24 +50,21 @@ dispatch_source_create(dispatch_source_type_t dst, uintptr_t handle, return DISPATCH_BAD_INPUT; } - ds = _dispatch_object_alloc(DISPATCH_VTABLE(source), - sizeof(struct dispatch_source_s)); - // Initialize as a queue first, then override some settings below. - _dispatch_queue_init(ds->_as_dq, DQF_LEGACY, 1, - DISPATCH_QUEUE_INACTIVE | DISPATCH_QUEUE_ROLE_INNER); + ds = _dispatch_queue_alloc(source, + dux_type(dr)->dst_strict ? DSF_STRICT : DQF_MUTABLE, 1, + DISPATCH_QUEUE_INACTIVE | DISPATCH_QUEUE_ROLE_INNER)._ds; ds->dq_label = "source"; - ds->do_ref_cnt++; // the reference the manager queue holds ds->ds_refs = dr; dr->du_owner_wref = _dispatch_ptr2wref(ds); - if (slowpath(!dq)) { - dq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, true); + if (unlikely(!dq)) { + dq = _dispatch_get_default_queue(true); } else { _dispatch_retain((dispatch_queue_t _Nonnull)dq); } ds->do_targetq = dq; - if (dr->du_is_timer && (dr->du_fflags & DISPATCH_TIMER_INTERVAL)) { - _dispatch_source_set_interval(ds, handle); + if (dr->du_is_timer && (dr->du_timer_flags & DISPATCH_TIMER_INTERVAL)) { + dispatch_source_set_timer(ds, DISPATCH_TIME_NOW, handle, UINT64_MAX); } _dispatch_object_debug(ds, "%s", __func__); return ds; @@ -74,19 +74,22 @@ void _dispatch_source_dispose(dispatch_source_t ds, bool *allow_free) { _dispatch_object_debug(ds, "%s", __func__); - _dispatch_source_handler_free(ds, DS_REGISTN_HANDLER); - _dispatch_source_handler_free(ds, DS_EVENT_HANDLER); - _dispatch_source_handler_free(ds, DS_CANCEL_HANDLER); + + _dispatch_trace_source_dispose(ds); + _dispatch_source_handler_free(ds->ds_refs, DS_REGISTN_HANDLER); + _dispatch_source_handler_free(ds->ds_refs, DS_EVENT_HANDLER); + _dispatch_source_handler_free(ds->ds_refs, DS_CANCEL_HANDLER); _dispatch_unote_dispose(ds->ds_refs); ds->ds_refs = NULL; - _dispatch_queue_destroy(ds->_as_dq, allow_free); + _dispatch_lane_class_dispose(ds, allow_free); } void _dispatch_source_xref_dispose(dispatch_source_t ds) { - dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds->_as_dq); - if (unlikely(!(dqf & (DQF_LEGACY|DSF_CANCELED)))) { + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds); + if (unlikely((dqf & DSF_STRICT) && !(dqf & DSF_CANCELED) && + _dispatch_source_get_cancel_handler(ds->ds_refs))) { DISPATCH_CLIENT_CRASH(ds, "Release of a source that has not been " "cancelled, but has a mandatory cancel handler"); } @@ -110,12 +113,15 @@ dispatch_source_get_mask(dispatch_source_t ds) if (dr->du_vmpressure_override) { return NOTE_VM_PRESSURE; } -#if TARGET_IPHONE_SIMULATOR +#if TARGET_OS_SIMULATOR if (dr->du_memorypressure_override) { return NOTE_MEMORYSTATUS_PRESSURE_WARN; } #endif #endif // DISPATCH_USE_MEMORYSTATUS + if (dr->du_is_timer) { + return dr->du_timer_flags; + } return dr->du_fflags; } @@ -123,11 +129,18 @@ uintptr_t dispatch_source_get_handle(dispatch_source_t ds) { dispatch_source_refs_t dr = ds->ds_refs; -#if TARGET_IPHONE_SIMULATOR +#if TARGET_OS_SIMULATOR if (dr->du_memorypressure_override) { return 0; } #endif + if (dr->du_filter == DISPATCH_EVFILT_TIMER_WITH_CLOCK) { + switch (_dispatch_timer_flags_to_clock(dr->du_timer_flags)) { + case DISPATCH_CLOCK_UPTIME: return DISPATCH_CLOCKID_UPTIME; + case DISPATCH_CLOCK_MONOTONIC: return DISPATCH_CLOCKID_MONOTONIC; + case DISPATCH_CLOCK_WALL: return DISPATCH_CLOCKID_WALLTIME; + } + } return dr->du_ident; } @@ -139,29 +152,28 @@ dispatch_source_get_data(dispatch_source_t ds) if (dr->du_vmpressure_override) { return NOTE_VM_PRESSURE; } -#if TARGET_IPHONE_SIMULATOR +#if TARGET_OS_SIMULATOR if (dr->du_memorypressure_override) { return NOTE_MEMORYSTATUS_PRESSURE_WARN; } #endif #endif // DISPATCH_USE_MEMORYSTATUS - uint64_t value = os_atomic_load2o(ds, ds_data, relaxed); - return (unsigned long)( - ds->ds_refs->du_data_action == DISPATCH_UNOTE_ACTION_DATA_OR_STATUS_SET - ? DISPATCH_SOURCE_GET_DATA(value) : value); + uint64_t value = os_atomic_load2o(dr, ds_data, relaxed); + return (unsigned long)(dr->du_has_extended_status ? + DISPATCH_SOURCE_GET_DATA(value) : value); } size_t dispatch_source_get_extended_data(dispatch_source_t ds, dispatch_source_extended_data_t edata, size_t size) { + dispatch_source_refs_t dr = ds->ds_refs; size_t target_size = MIN(size, sizeof(struct dispatch_source_extended_data_s)); if (size > 0) { unsigned long data, status = 0; - if (ds->ds_refs->du_data_action - == DISPATCH_UNOTE_ACTION_DATA_OR_STATUS_SET) { - uint64_t combined = os_atomic_load(&ds->ds_data, relaxed); + if (dr->du_has_extended_status) { + uint64_t combined = os_atomic_load(&dr->ds_data, relaxed); data = DISPATCH_SOURCE_GET_DATA(combined); status = DISPATCH_SOURCE_GET_STATUS(combined); } else { @@ -184,61 +196,40 @@ dispatch_source_get_extended_data(dispatch_source_t ds, return target_size; } -DISPATCH_NOINLINE void -_dispatch_source_merge_data(dispatch_source_t ds, pthread_priority_t pp, - unsigned long val) +dispatch_source_merge_data(dispatch_source_t ds, unsigned long val) { - dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds->_as_dq); - int filter = ds->ds_refs->du_filter; + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds); + dispatch_source_refs_t dr = ds->ds_refs; - if (unlikely(dqf & (DSF_CANCELED | DSF_DELETED))) { + if (unlikely(dqf & (DSF_CANCELED | DQF_RELEASED))) { return; } - switch (filter) { + switch (dr->du_filter) { case DISPATCH_EVFILT_CUSTOM_ADD: - os_atomic_add2o(ds, ds_pending_data, val, relaxed); + os_atomic_add2o(dr, ds_pending_data, val, relaxed); break; case DISPATCH_EVFILT_CUSTOM_OR: - os_atomic_or2o(ds, ds_pending_data, val, relaxed); + os_atomic_or2o(dr, ds_pending_data, val, relaxed); break; case DISPATCH_EVFILT_CUSTOM_REPLACE: - os_atomic_store2o(ds, ds_pending_data, val, relaxed); + os_atomic_store2o(dr, ds_pending_data, val, relaxed); break; default: - DISPATCH_CLIENT_CRASH(filter, "Invalid source type"); + DISPATCH_CLIENT_CRASH(dr->du_filter, "Invalid source type"); } - dx_wakeup(ds, _dispatch_qos_from_pp(pp), DISPATCH_WAKEUP_MAKE_DIRTY); -} - -void -dispatch_source_merge_data(dispatch_source_t ds, unsigned long val) -{ - _dispatch_source_merge_data(ds, 0, val); + dx_wakeup(ds, 0, DISPATCH_WAKEUP_MAKE_DIRTY); } #pragma mark - #pragma mark dispatch_source_handler -DISPATCH_ALWAYS_INLINE -static inline dispatch_continuation_t -_dispatch_source_get_handler(dispatch_source_refs_t dr, long kind) -{ - return os_atomic_load(&dr->ds_handler[kind], relaxed); -} -#define _dispatch_source_get_event_handler(dr) \ - _dispatch_source_get_handler(dr, DS_EVENT_HANDLER) -#define _dispatch_source_get_cancel_handler(dr) \ - _dispatch_source_get_handler(dr, DS_CANCEL_HANDLER) -#define _dispatch_source_get_registration_handler(dr) \ - _dispatch_source_get_handler(dr, DS_REGISTN_HANDLER) - DISPATCH_ALWAYS_INLINE static inline dispatch_continuation_t _dispatch_source_handler_alloc(dispatch_source_t ds, void *func, long kind, - bool block) + bool is_block) { // sources don't propagate priority by default const dispatch_block_flags_t flags = @@ -248,20 +239,19 @@ _dispatch_source_handler_alloc(dispatch_source_t ds, void *func, long kind, uintptr_t dc_flags = 0; if (kind != DS_EVENT_HANDLER) { - dc_flags |= DISPATCH_OBJ_CONSUME_BIT; + dc_flags |= DC_FLAG_CONSUME; } - if (block) { + if (is_block) { #ifdef __BLOCKS__ - _dispatch_continuation_init(dc, ds, func, 0, flags, dc_flags); + _dispatch_continuation_init(dc, ds, func, flags, dc_flags); #endif /* __BLOCKS__ */ } else { - dc_flags |= DISPATCH_OBJ_CTXT_FETCH_BIT; - _dispatch_continuation_init_f(dc, ds, ds->do_ctxt, func, - 0, flags, dc_flags); + dc_flags |= DC_FLAG_FETCH_CONTEXT; + _dispatch_continuation_init_f(dc, ds, ds->do_ctxt, func, flags, + dc_flags); } - _dispatch_trace_continuation_push(ds->_as_dq, dc); } else { - dc->dc_flags = 0; + dc->dc_flags = DC_FLAG_ALLOCATED; dc->dc_func = NULL; } return dc; @@ -272,7 +262,7 @@ static void _dispatch_source_handler_dispose(dispatch_continuation_t dc) { #ifdef __BLOCKS__ - if (dc->dc_flags & DISPATCH_OBJ_BLOCK_BIT) { + if (dc->dc_flags & DC_FLAG_BLOCK) { Block_release(dc->dc_ctxt); } #endif /* __BLOCKS__ */ @@ -285,16 +275,16 @@ _dispatch_source_handler_dispose(dispatch_continuation_t dc) DISPATCH_ALWAYS_INLINE static inline dispatch_continuation_t -_dispatch_source_handler_take(dispatch_source_t ds, long kind) +_dispatch_source_handler_take(dispatch_source_refs_t dr, long kind) { - return os_atomic_xchg(&ds->ds_refs->ds_handler[kind], NULL, relaxed); + return os_atomic_xchg(&dr->ds_handler[kind], NULL, relaxed); } DISPATCH_ALWAYS_INLINE static inline void -_dispatch_source_handler_free(dispatch_source_t ds, long kind) +_dispatch_source_handler_free(dispatch_source_refs_t dr, long kind) { - dispatch_continuation_t dc = _dispatch_source_handler_take(ds, kind); + dispatch_continuation_t dc = _dispatch_source_handler_take(dr, kind); if (dc) _dispatch_source_handler_dispose(dc); } @@ -306,7 +296,7 @@ _dispatch_source_handler_replace(dispatch_source_t ds, long kind, if (!dc->dc_func) { _dispatch_continuation_free(dc); dc = NULL; - } else if (dc->dc_flags & DISPATCH_OBJ_CTXT_FETCH_BIT) { + } else if (dc->dc_flags & DC_FLAG_FETCH_CONTEXT) { dc->dc_ctxt = ds->do_ctxt; } dc = os_atomic_xchg(&ds->ds_refs->ds_handler[kind], dc, release); @@ -317,7 +307,7 @@ DISPATCH_NOINLINE static void _dispatch_source_set_handler_slow(void *context) { - dispatch_source_t ds = (dispatch_source_t)_dispatch_queue_get_current(); + dispatch_source_t ds = upcast(_dispatch_queue_get_current())._ds; dispatch_assert(dx_type(ds) == DISPATCH_SOURCE_KEVENT_TYPE); dispatch_continuation_t dc = context; @@ -328,25 +318,36 @@ _dispatch_source_set_handler_slow(void *context) DISPATCH_NOINLINE static void -_dispatch_source_set_handler(dispatch_source_t ds, long kind, - dispatch_continuation_t dc) +_dispatch_source_set_handler(dispatch_source_t ds, void *func, + long kind, bool is_block) { - dispatch_assert(dx_type(ds) == DISPATCH_SOURCE_KEVENT_TYPE); - if (_dispatch_queue_try_inactive_suspend(ds->_as_dq)) { + dispatch_continuation_t dc; + + dc = _dispatch_source_handler_alloc(ds, func, kind, is_block); + + if (_dispatch_lane_try_inactive_suspend(ds)) { _dispatch_source_handler_replace(ds, kind, dc); - return dx_vtable(ds)->do_resume(ds, false); + return _dispatch_lane_resume(ds, false); } - if (unlikely(!_dispatch_queue_is_legacy(ds->_as_dq))) { + + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds); + if (unlikely(dqf & DSF_STRICT)) { DISPATCH_CLIENT_CRASH(kind, "Cannot change a handler of this source " "after it has been activated"); } - _dispatch_ktrace1(DISPATCH_PERF_post_activate_mutation, ds); - if (kind == DS_REGISTN_HANDLER) { - _dispatch_bug_deprecated("Setting registration handler after " - "the source has been activated"); + // Ignore handlers mutations past cancelation, it's harmless + if ((dqf & DSF_CANCELED) == 0) { + _dispatch_ktrace1(DISPATCH_PERF_post_activate_mutation, ds); + if (kind == DS_REGISTN_HANDLER) { + _dispatch_bug_deprecated("Setting registration handler after " + "the source has been activated"); + } else if (func == NULL) { + _dispatch_bug_deprecated("Clearing handler after " + "the source has been activated"); + } } dc->dc_data = (void *)kind; - _dispatch_barrier_trysync_or_async_f(ds->_as_dq, dc, + _dispatch_barrier_trysync_or_async_f(ds, dc, _dispatch_source_set_handler_slow, 0); } @@ -355,9 +356,7 @@ void dispatch_source_set_event_handler(dispatch_source_t ds, dispatch_block_t handler) { - dispatch_continuation_t dc; - dc = _dispatch_source_handler_alloc(ds, handler, DS_EVENT_HANDLER, true); - _dispatch_source_set_handler(ds, DS_EVENT_HANDLER, dc); + _dispatch_source_set_handler(ds, handler, DS_EVENT_HANDLER, true); } #endif /* __BLOCKS__ */ @@ -365,69 +364,39 @@ void dispatch_source_set_event_handler_f(dispatch_source_t ds, dispatch_function_t handler) { - dispatch_continuation_t dc; - dc = _dispatch_source_handler_alloc(ds, handler, DS_EVENT_HANDLER, false); - _dispatch_source_set_handler(ds, DS_EVENT_HANDLER, dc); + _dispatch_source_set_handler(ds, handler, DS_EVENT_HANDLER, false); } #ifdef __BLOCKS__ -DISPATCH_NOINLINE -static void -_dispatch_source_set_cancel_handler(dispatch_source_t ds, - dispatch_block_t handler) -{ - dispatch_continuation_t dc; - dc = _dispatch_source_handler_alloc(ds, handler, DS_CANCEL_HANDLER, true); - _dispatch_source_set_handler(ds, DS_CANCEL_HANDLER, dc); -} - void dispatch_source_set_cancel_handler(dispatch_source_t ds, dispatch_block_t handler) { - if (unlikely(!_dispatch_queue_is_legacy(ds->_as_dq))) { - DISPATCH_CLIENT_CRASH(0, "Cannot set a non mandatory handler on " - "this source"); - } - return _dispatch_source_set_cancel_handler(ds, handler); + _dispatch_source_set_handler(ds, handler, DS_CANCEL_HANDLER, true); } void dispatch_source_set_mandatory_cancel_handler(dispatch_source_t ds, dispatch_block_t handler) { - _dispatch_queue_atomic_flags_clear(ds->_as_dq, DQF_LEGACY); - return _dispatch_source_set_cancel_handler(ds, handler); + _dispatch_queue_atomic_flags_set_and_clear(ds, DSF_STRICT, DQF_MUTABLE); + dispatch_source_set_cancel_handler(ds, handler); } #endif /* __BLOCKS__ */ -DISPATCH_NOINLINE -static void -_dispatch_source_set_cancel_handler_f(dispatch_source_t ds, - dispatch_function_t handler) -{ - dispatch_continuation_t dc; - dc = _dispatch_source_handler_alloc(ds, handler, DS_CANCEL_HANDLER, false); - _dispatch_source_set_handler(ds, DS_CANCEL_HANDLER, dc); -} - void dispatch_source_set_cancel_handler_f(dispatch_source_t ds, dispatch_function_t handler) { - if (unlikely(!_dispatch_queue_is_legacy(ds->_as_dq))) { - DISPATCH_CLIENT_CRASH(0, "Cannot set a non mandatory handler on " - "this source"); - } - return _dispatch_source_set_cancel_handler_f(ds, handler); + _dispatch_source_set_handler(ds, handler, DS_CANCEL_HANDLER, false); } void dispatch_source_set_mandatory_cancel_handler_f(dispatch_source_t ds, dispatch_function_t handler) { - _dispatch_queue_atomic_flags_clear(ds->_as_dq, DQF_LEGACY); - return _dispatch_source_set_cancel_handler_f(ds, handler); + _dispatch_queue_atomic_flags_set_and_clear(ds, DSF_STRICT, DQF_MUTABLE); + dispatch_source_set_cancel_handler_f(ds, handler); } #ifdef __BLOCKS__ @@ -435,9 +404,7 @@ void dispatch_source_set_registration_handler(dispatch_source_t ds, dispatch_block_t handler) { - dispatch_continuation_t dc; - dc = _dispatch_source_handler_alloc(ds, handler, DS_REGISTN_HANDLER, true); - _dispatch_source_set_handler(ds, DS_REGISTN_HANDLER, dc); + _dispatch_source_set_handler(ds, handler, DS_REGISTN_HANDLER, true); } #endif /* __BLOCKS__ */ @@ -445,28 +412,40 @@ void dispatch_source_set_registration_handler_f(dispatch_source_t ds, dispatch_function_t handler) { - dispatch_continuation_t dc; - dc = _dispatch_source_handler_alloc(ds, handler, DS_REGISTN_HANDLER, false); - _dispatch_source_set_handler(ds, DS_REGISTN_HANDLER, dc); + _dispatch_source_set_handler(ds, handler, DS_REGISTN_HANDLER, false); } #pragma mark - #pragma mark dispatch_source_invoke +bool +_dispatch_source_will_reenable_kevent_4NW(dispatch_source_t ds) +{ + uint64_t dq_state = os_atomic_load2o(ds, dq_state, relaxed); + + if (unlikely(!_dq_state_drain_locked_by_self(dq_state))) { + DISPATCH_CLIENT_CRASH(0, "_dispatch_source_will_reenable_kevent_4NW " + "not called from within the event handler"); + } + return _dispatch_unote_needs_rearm(ds->ds_refs); +} + static void _dispatch_source_registration_callout(dispatch_source_t ds, dispatch_queue_t cq, dispatch_invoke_flags_t flags) { dispatch_continuation_t dc; - dc = _dispatch_source_handler_take(ds, DS_REGISTN_HANDLER); + dc = _dispatch_source_handler_take(ds->ds_refs, DS_REGISTN_HANDLER); if (ds->dq_atomic_flags & (DSF_CANCELED | DQF_RELEASED)) { // no registration callout if source is canceled rdar://problem/8955246 return _dispatch_source_handler_dispose(dc); } - if (dc->dc_flags & DISPATCH_OBJ_CTXT_FETCH_BIT) { + if (dc->dc_flags & DC_FLAG_FETCH_CONTEXT) { dc->dc_ctxt = ds->do_ctxt; } + + _dispatch_trace_source_callout_entry(ds, DS_REGISTN_HANDLER, cq, dc); _dispatch_continuation_pop(dc, NULL, flags, cq); } @@ -474,50 +453,134 @@ static void _dispatch_source_cancel_callout(dispatch_source_t ds, dispatch_queue_t cq, dispatch_invoke_flags_t flags) { + dispatch_source_refs_t dr = ds->ds_refs; dispatch_continuation_t dc; - dc = _dispatch_source_handler_take(ds, DS_CANCEL_HANDLER); - ds->ds_pending_data = 0; - ds->ds_data = 0; - _dispatch_source_handler_free(ds, DS_EVENT_HANDLER); - _dispatch_source_handler_free(ds, DS_REGISTN_HANDLER); + dc = _dispatch_source_handler_take(dr, DS_CANCEL_HANDLER); + dr->ds_pending_data = 0; + dr->ds_data = 0; + _dispatch_source_handler_free(dr, DS_EVENT_HANDLER); + _dispatch_source_handler_free(dr, DS_REGISTN_HANDLER); if (!dc) { return; } if (!(ds->dq_atomic_flags & DSF_CANCELED)) { return _dispatch_source_handler_dispose(dc); } - if (dc->dc_flags & DISPATCH_OBJ_CTXT_FETCH_BIT) { + if (dc->dc_flags & DC_FLAG_FETCH_CONTEXT) { dc->dc_ctxt = ds->do_ctxt; } + _dispatch_trace_source_callout_entry(ds, DS_CANCEL_HANDLER, cq, dc); _dispatch_continuation_pop(dc, NULL, flags, cq); } +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_source_refs_needs_configuration(dispatch_unote_t du) +{ + return du._du->du_is_timer && + os_atomic_load2o(du._dt, dt_pending_config, relaxed); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_source_refs_needs_rearm(dispatch_unote_t du) +{ + if (!du._du->du_is_timer) { + return _dispatch_unote_needs_rearm(du); + } + if (os_atomic_load2o(du._dt, dt_pending_config, relaxed)) { + return true; + } + if (_dispatch_unote_needs_rearm(du)) { + return du._dt->dt_timer.target < INT64_MAX; + } + return false; +} + +DISPATCH_ALWAYS_INLINE +static inline unsigned long +_dispatch_source_timer_data(dispatch_timer_source_refs_t dr, uint64_t prev) +{ + unsigned long data = (unsigned long)prev >> 1; + + // The timer may be in _dispatch_source_invoke2() already for other + // reasons such as running the registration handler when ds_pending_data + // is changed by _dispatch_timers_run2() without holding the drain lock. + // + // We hence need dependency ordering to pair with the release barrier + // done by _dispatch_timers_run2() when setting the DISARMED_MARKER bit. + os_atomic_thread_fence(dependency); + dr = os_atomic_force_dependency_on(dr, data); + + if (dr->dt_timer.target < INT64_MAX) { + uint64_t now = _dispatch_time_now(DISPATCH_TIMER_CLOCK(dr->du_ident)); + if (now >= dr->dt_timer.target) { + data = _dispatch_timer_unote_compute_missed(dr, now, data); + } + } + + return data; +} + static void _dispatch_source_latch_and_call(dispatch_source_t ds, dispatch_queue_t cq, dispatch_invoke_flags_t flags) { dispatch_source_refs_t dr = ds->ds_refs; dispatch_continuation_t dc = _dispatch_source_get_handler(dr, DS_EVENT_HANDLER); - uint64_t prev; + uint64_t prev = os_atomic_xchg2o(dr, ds_pending_data, 0, relaxed); - if (dr->du_is_timer && !(dr->du_fflags & DISPATCH_TIMER_AFTER)) { - prev = _dispatch_source_timer_data(ds, dr); - } else { - prev = os_atomic_xchg2o(ds, ds_pending_data, 0, relaxed); + if (dr->du_is_timer && (dr->du_timer_flags & DISPATCH_TIMER_AFTER)) { + _dispatch_trace_item_pop(cq, dc); // see _dispatch_after } - if (dr->du_data_action == DISPATCH_UNOTE_ACTION_DATA_SET) { - ds->ds_data = ~prev; - } else { - ds->ds_data = prev; + switch (dux_type(dr)->dst_action) { + case DISPATCH_UNOTE_ACTION_SOURCE_TIMER: + if (prev & DISPATCH_TIMER_DISARMED_MARKER) { + dr->ds_data = _dispatch_source_timer_data(ds->ds_timer_refs, prev); + } else { + dr->ds_data = prev >> 1; + } + break; + case DISPATCH_UNOTE_ACTION_SOURCE_SET_DATA: + dr->ds_data = ~prev; + break; + default: + if (prev == 0 && dr->du_filter == DISPATCH_EVFILT_CUSTOM_REPLACE) { + return; + } + dr->ds_data = prev; + break; + } + if (unlikely(!dc)) { + return _dispatch_ktrace1(DISPATCH_PERF_handlerless_source_fire, ds); } - if (!dispatch_assume(prev != 0) || !dc) { + if (!dispatch_assume(prev != 0)) { return; } + _dispatch_trace_source_callout_entry(ds, DS_EVENT_HANDLER, cq, dc); +#ifdef DBG_BSD_MEMSTAT + if (unlikely(dr->du_filter == EVFILT_MEMORYSTATUS)) { + _dispatch_ktrace2(KDBG_CODE(DBG_BSD, DBG_BSD_MEMSTAT, 0x100) | DBG_FUNC_START, + prev, _dispatch_continuation_get_function_symbol(dc)); + } +#endif _dispatch_continuation_pop(dc, NULL, flags, cq); - if (dr->du_is_timer && (dr->du_fflags & DISPATCH_TIMER_AFTER)) { - _dispatch_source_handler_free(ds, DS_EVENT_HANDLER); - dispatch_release(ds); // dispatch_after sources are one-shot +#ifdef DBG_BSD_MEMSTAT + if (unlikely(dr->du_filter == EVFILT_MEMORYSTATUS)) { + _dispatch_ktrace0(KDBG_CODE(DBG_BSD, DBG_BSD_MEMSTAT, 0x100) | DBG_FUNC_END); + } +#endif + if (dr->du_is_timer) { + if ((prev & DISPATCH_TIMER_DISARMED_MARKER) && + _dispatch_source_refs_needs_configuration(dr)) { + _dispatch_timer_unote_configure(ds->ds_timer_refs); + } + if (dr->du_timer_flags & DISPATCH_TIMER_AFTER) { + _dispatch_trace_item_complete(dc); // see _dispatch_after + _dispatch_source_handler_free(dr, DS_EVENT_HANDLER); + dispatch_release(ds); // dispatch_after sources are one-shot + } } } @@ -526,192 +589,124 @@ static void _dispatch_source_refs_finalize_unregistration(dispatch_source_t ds) { dispatch_queue_flags_t dqf; - dispatch_source_refs_t dr = ds->ds_refs; - - dqf = _dispatch_queue_atomic_flags_set_and_clear_orig(ds->_as_dq, - DSF_DELETED, DSF_ARMED | DSF_DEFERRED_DELETE | DSF_CANCEL_WAITER); + dqf = _dispatch_queue_atomic_flags_set_and_clear_orig(ds, + DSF_DELETED, DSF_NEEDS_EVENT | DSF_CANCEL_WAITER); + if (dqf & DSF_DELETED) { + DISPATCH_INTERNAL_CRASH(dqf, "Source finalized twice"); + } if (dqf & DSF_CANCEL_WAITER) { _dispatch_wake_by_address(&ds->dq_atomic_flags); } - _dispatch_debug("kevent-source[%p]: disarmed kevent[%p]", ds, dr); - _dispatch_release_tailcall(ds); // the retain is done at creation time + _dispatch_object_debug(ds, "%s", __func__); + return _dispatch_release_tailcall(ds); // see _dispatch_queue_alloc() } -void +static void _dispatch_source_refs_unregister(dispatch_source_t ds, uint32_t options) { _dispatch_object_debug(ds, "%s", __func__); - dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds->_as_dq); dispatch_source_refs_t dr = ds->ds_refs; - if (dr->du_is_timer) { - // Because of the optimization to unregister fired oneshot timers - // from the target queue, we can't trust _dispatch_unote_registered() - // to tell the truth, it may not have happened yet - if (dqf & DSF_ARMED) { - _dispatch_timers_unregister(ds->ds_timer_refs); - _dispatch_release_2(ds); - } - dr->du_ident = DISPATCH_TIMER_IDENT_CANCELED; - } else { - if (_dispatch_unote_needs_rearm(dr) && !(dqf & DSF_ARMED)) { - options |= DU_UNREGISTER_IMMEDIATE_DELETE; - } - if (!_dispatch_unote_unregister(dr, options)) { - _dispatch_debug("kevent-source[%p]: deferred delete kevent[%p]", - ds, dr); - _dispatch_queue_atomic_flags_set(ds->_as_dq, DSF_DEFERRED_DELETE); - return; // deferred unregistration - } + if (_dispatch_unote_unregister(dr, options)) { + return _dispatch_source_refs_finalize_unregistration(ds); } - ds->ds_is_installed = true; - _dispatch_source_refs_finalize_unregistration(ds); -} - -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_source_tryarm(dispatch_source_t ds) -{ + // deferred unregistration dispatch_queue_flags_t oqf, nqf; - return os_atomic_rmw_loop2o(ds, dq_atomic_flags, oqf, nqf, relaxed, { - if (oqf & (DSF_DEFERRED_DELETE | DSF_DELETED)) { - // the test is inside the loop because it's convenient but the - // result should not change for the duration of the rmw_loop + os_atomic_rmw_loop2o(ds, dq_atomic_flags, oqf, nqf, relaxed, { + if (oqf & (DSF_NEEDS_EVENT | DSF_DELETED)) { os_atomic_rmw_loop_give_up(break); } - nqf = oqf | DSF_ARMED; + nqf = oqf | DSF_NEEDS_EVENT; }); } -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_source_refs_resume(dispatch_source_t ds) -{ - dispatch_source_refs_t dr = ds->ds_refs; - if (dr->du_is_timer) { - _dispatch_timers_update(dr, 0); - return true; - } - if (unlikely(!_dispatch_source_tryarm(ds))) { - return false; - } - _dispatch_unote_resume(dr); - _dispatch_debug("kevent-source[%p]: rearmed kevent[%p]", ds, dr); - return true; -} - -void -_dispatch_source_refs_register(dispatch_source_t ds, dispatch_wlh_t wlh, +static void +_dispatch_source_install(dispatch_source_t ds, dispatch_wlh_t wlh, dispatch_priority_t pri) { dispatch_source_refs_t dr = ds->ds_refs; - dispatch_priority_t kbp; dispatch_assert(!ds->ds_is_installed); + ds->ds_is_installed = true; - if (dr->du_is_timer) { - dispatch_queue_t dq = ds->_as_dq; - kbp = _dispatch_queue_compute_priority_and_wlh(dq, NULL); - // aggressively coalesce background/maintenance QoS timers - // - if (_dispatch_qos_is_background(_dispatch_priority_qos(kbp))) { - if (dr->du_fflags & DISPATCH_TIMER_STRICT) { - _dispatch_ktrace1(DISPATCH_PERF_strict_bg_timer, ds); - } else { - dr->du_fflags |= DISPATCH_TIMER_BACKGROUND; - dr->du_ident = _dispatch_source_timer_idx(dr); - } - } - _dispatch_timers_update(dr, 0); - return; - } - - if (unlikely(!_dispatch_source_tryarm(ds) || - !_dispatch_unote_register(dr, wlh, pri))) { - // Do the parts of dispatch_source_refs_unregister() that - // are required after this partial initialization. - _dispatch_source_refs_finalize_unregistration(ds); - } else { - _dispatch_debug("kevent-source[%p]: armed kevent[%p]", ds, dr); - } _dispatch_object_debug(ds, "%s", __func__); -} - -static void -_dispatch_source_set_event_handler_context(void *ctxt) -{ - dispatch_source_t ds = ctxt; - dispatch_continuation_t dc = _dispatch_source_get_event_handler(ds->ds_refs); - - if (dc && (dc->dc_flags & DISPATCH_OBJ_CTXT_FETCH_BIT)) { - dc->dc_ctxt = ds->do_ctxt; + if (unlikely(!_dispatch_unote_register(dr, wlh, pri))) { + return _dispatch_source_refs_finalize_unregistration(ds); } } -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_source_install(dispatch_source_t ds, dispatch_wlh_t wlh, - dispatch_priority_t pri) -{ - _dispatch_source_refs_register(ds, wlh, pri); - ds->ds_is_installed = true; -} - void -_dispatch_source_finalize_activation(dispatch_source_t ds, bool *allow_resume) +_dispatch_source_activate(dispatch_source_t ds, bool *allow_resume) { dispatch_continuation_t dc; dispatch_source_refs_t dr = ds->ds_refs; dispatch_priority_t pri; dispatch_wlh_t wlh; - if (unlikely(dr->du_is_direct && - (_dispatch_queue_atomic_flags(ds->_as_dq) & DSF_CANCELED))) { - return _dispatch_source_refs_unregister(ds, 0); + if (unlikely(_dispatch_queue_atomic_flags(ds) & DSF_CANCELED)) { + ds->ds_is_installed = true; + return _dispatch_source_refs_finalize_unregistration(ds); } dc = _dispatch_source_get_event_handler(dr); if (dc) { if (_dispatch_object_is_barrier(dc)) { - _dispatch_queue_atomic_flags_set(ds->_as_dq, DQF_BARRIER_BIT); + _dispatch_queue_atomic_flags_set(ds, DQF_BARRIER_BIT); + } + if ((dc->dc_priority & _PTHREAD_PRIORITY_ENFORCE_FLAG) || + !_dispatch_queue_priority_manually_selected(ds->dq_priority)) { + ds->dq_priority = _dispatch_priority_from_pp_strip_flags(dc->dc_priority); } - ds->dq_priority = _dispatch_priority_from_pp_strip_flags(dc->dc_priority); - if (dc->dc_flags & DISPATCH_OBJ_CTXT_FETCH_BIT) { - _dispatch_barrier_async_detached_f(ds->_as_dq, ds, - _dispatch_source_set_event_handler_context); + if (dc->dc_flags & DC_FLAG_FETCH_CONTEXT) { + dc->dc_ctxt = ds->do_ctxt; } + } else { + _dispatch_bug_deprecated("dispatch source activated " + "with no event handler set"); } // call "super" - _dispatch_queue_finalize_activation(ds->_as_dq, allow_resume); + _dispatch_lane_activate(ds, allow_resume); + + if ((dr->du_is_direct || dr->du_is_timer) && !ds->ds_is_installed) { + pri = _dispatch_queue_compute_priority_and_wlh(ds, &wlh); + if (pri) { + _dispatch_source_install(ds, wlh, pri); + } + } +} + +DISPATCH_NOINLINE +static void +_dispatch_source_handle_wlh_change(dispatch_source_t ds) +{ + dispatch_queue_flags_t dqf; - if (dr->du_is_direct && !ds->ds_is_installed) { - dispatch_queue_t dq = ds->_as_dq; - pri = _dispatch_queue_compute_priority_and_wlh(dq, &wlh); - if (pri) _dispatch_source_install(ds, wlh, pri); + dqf = _dispatch_queue_atomic_flags_set_orig(ds, DSF_WLH_CHANGED); + if (!(dqf & DQF_MUTABLE)) { + DISPATCH_CLIENT_CRASH(0, "Changing target queue " + "hierarchy after source was activated"); + } + if (!(dqf & DSF_WLH_CHANGED)) { + _dispatch_bug_deprecated("Changing target queue " + "hierarchy after source was activated"); } } DISPATCH_ALWAYS_INLINE static inline dispatch_queue_wakeup_target_t -_dispatch_source_invoke2(dispatch_object_t dou, dispatch_invoke_context_t dic, +_dispatch_source_invoke2(dispatch_source_t ds, dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, uint64_t *owned) { - dispatch_source_t ds = dou._ds; dispatch_queue_wakeup_target_t retq = DISPATCH_QUEUE_WAKEUP_NONE; dispatch_queue_t dq = _dispatch_queue_get_current(); dispatch_source_refs_t dr = ds->ds_refs; dispatch_queue_flags_t dqf; - if (!(flags & DISPATCH_INVOKE_MANAGER_DRAIN) && - _dispatch_unote_wlh_changed(dr, _dispatch_get_wlh())) { - dqf = _dispatch_queue_atomic_flags_set_orig(ds->_as_dq, - DSF_WLH_CHANGED); - if (!(dqf & DSF_WLH_CHANGED)) { - _dispatch_bug_deprecated("Changing target queue " - "hierarchy after source was activated"); - } + if (unlikely(!(flags & DISPATCH_INVOKE_MANAGER_DRAIN) && + _dispatch_unote_wlh_changed(dr, _dispatch_get_event_wlh()))) { + _dispatch_source_handle_wlh_change(ds); } if (_dispatch_queue_class_probe(ds)) { @@ -719,7 +714,7 @@ _dispatch_source_invoke2(dispatch_object_t dou, dispatch_invoke_context_t dic, // and not the source's regular target queue: we need to be able // to drain timer setting and the like there. dispatch_with_disabled_narrowing(dic, { - retq = _dispatch_queue_serial_drain(ds->_as_dq, dic, flags, owned); + retq = _dispatch_lane_serial_drain(ds, dic, flags, owned); }); } @@ -730,32 +725,23 @@ _dispatch_source_invoke2(dispatch_object_t dou, dispatch_invoke_context_t dic, // The order of tests here in invoke and in wakeup should be consistent. - dispatch_queue_t dkq = &_dispatch_mgr_q; - bool prevent_starvation = false; + dispatch_queue_t dkq = _dispatch_mgr_q._as_dq; + bool avoid_starvation = false; if (dr->du_is_direct) { dkq = ds->do_targetq; } - if (dr->du_is_timer && - os_atomic_load2o(ds, ds_timer_refs->dt_pending_config, relaxed)) { - dqf = _dispatch_queue_atomic_flags(ds->_as_dq); - if (!(dqf & (DSF_CANCELED | DQF_RELEASED))) { - // timer has to be configured on the kevent queue - if (dq != dkq) { - return dkq; - } - _dispatch_source_timer_configure(ds); - } - } - if (!ds->ds_is_installed) { // The source needs to be installed on the kevent queue. if (dq != dkq) { return dkq; } - _dispatch_source_install(ds, _dispatch_get_wlh(), - _dispatch_get_basepri()); + dispatch_priority_t pri = DISPATCH_PRIORITY_FLAG_MANAGER; + if (likely(flags & DISPATCH_INVOKE_WORKER_DRAIN)) { + pri = _dispatch_get_basepri(); + } + _dispatch_source_install(ds, _dispatch_get_event_wlh(), pri); } if (unlikely(DISPATCH_QUEUE_IS_SUSPENDED(ds))) { @@ -763,6 +749,16 @@ _dispatch_source_invoke2(dispatch_object_t dou, dispatch_invoke_context_t dic, return ds->do_targetq; } + if (_dispatch_source_refs_needs_configuration(dr)) { + dqf = _dispatch_queue_atomic_flags(ds); + if (!(dqf & (DSF_CANCELED | DQF_RELEASED))) { + if (dq != dkq) { + return dkq; + } + _dispatch_timer_unote_configure(ds->ds_timer_refs); + } + } + if (_dispatch_source_get_registration_handler(dr)) { // The source has been registered and the registration handler needs // to be delivered on the target queue. @@ -773,26 +769,19 @@ _dispatch_source_invoke2(dispatch_object_t dou, dispatch_invoke_context_t dic, _dispatch_source_registration_callout(ds, dq, flags); } - dqf = _dispatch_queue_atomic_flags(ds->_as_dq); - if ((dqf & DSF_DEFERRED_DELETE) && !(dqf & DSF_ARMED)) { -unregister_event: - // DSF_DELETE: Pending source kevent unregistration has been completed - // !DSF_ARMED: event was delivered and can safely be unregistered - if (dq != dkq) { - return dkq; - } - _dispatch_source_refs_unregister(ds, DU_UNREGISTER_IMMEDIATE_DELETE); - dqf = _dispatch_queue_atomic_flags(ds->_as_dq); + if (_dispatch_unote_needs_delete(dr)) { + _dispatch_source_refs_unregister(ds, DUU_DELETE_ACK | DUU_MUST_SUCCEED); } + dqf = _dispatch_queue_atomic_flags(ds); if (!(dqf & (DSF_CANCELED | DQF_RELEASED)) && - os_atomic_load2o(ds, ds_pending_data, relaxed)) { + os_atomic_load2o(dr, ds_pending_data, relaxed)) { // The source has pending data to deliver via the event handler callback // on the target queue. Some sources need to be rearmed on the kevent // queue after event delivery. if (dq == ds->do_targetq) { _dispatch_source_latch_and_call(ds, dq, flags); - dqf = _dispatch_queue_atomic_flags(ds->_as_dq); + dqf = _dispatch_queue_atomic_flags(ds); // starvation avoidance: if the source triggers itself then force a // re-queue to give other things already queued on the target queue @@ -801,10 +790,12 @@ _dispatch_source_invoke2(dispatch_object_t dou, dispatch_invoke_context_t dic, // however, if the source is directly targeting an overcommit root // queue, this would requeue the source and ask for a new overcommit // thread right away. - prevent_starvation = dq->do_targetq || - !(dq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT); - if (prevent_starvation && - os_atomic_load2o(ds, ds_pending_data, relaxed)) { + if (!(dqf & (DSF_CANCELED | DSF_DELETED))) { + avoid_starvation = dq->do_targetq || + !(dq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT); + } + if (avoid_starvation && + os_atomic_load2o(dr, ds_pending_data, relaxed)) { retq = ds->do_targetq; } } else { @@ -814,55 +805,51 @@ _dispatch_source_invoke2(dispatch_object_t dou, dispatch_invoke_context_t dic, } } - if ((dqf & (DSF_CANCELED | DQF_RELEASED)) && !(dqf & DSF_DEFERRED_DELETE)) { + if ((dqf & (DSF_CANCELED | DQF_RELEASED)) && !(dqf & DSF_DELETED)) { // The source has been cancelled and needs to be uninstalled from the // kevent queue. After uninstallation, the cancellation handler needs // to be delivered to the target queue. - if (!(dqf & DSF_DELETED)) { - if (dr->du_is_timer && !(dqf & DSF_ARMED)) { - // timers can cheat if not armed because there's nothing left - // to do on the manager queue and unregistration can happen - // on the regular target queue - } else if (dq != dkq) { - return dkq; - } - _dispatch_source_refs_unregister(ds, 0); - dqf = _dispatch_queue_atomic_flags(ds->_as_dq); - if (unlikely(dqf & DSF_DEFERRED_DELETE)) { - if (!(dqf & DSF_ARMED)) { - goto unregister_event; - } - // we need to wait for the EV_DELETE - return retq ? retq : DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT; - } + if (dr->du_is_timer && !_dispatch_unote_armed(dr)) { + // timers can cheat if not armed because there's nothing left + // to do on the manager queue and unregistration can happen + // on the regular target queue + } else if (dq != dkq) { + return dkq; + } + uint32_t duu_options = DUU_DELETE_ACK; + if (!(dqf & DSF_NEEDS_EVENT)) duu_options |= DUU_PROBE; + _dispatch_source_refs_unregister(ds, duu_options); + dqf = _dispatch_queue_atomic_flags(ds); + if (unlikely(!(dqf & DSF_DELETED))) { + // we need to wait for the EV_DELETE + return retq ? retq : DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT; } + } + + if ((dqf & (DSF_CANCELED | DQF_RELEASED)) && (dqf & DSF_DELETED)) { if (dq != ds->do_targetq && (_dispatch_source_get_event_handler(dr) || _dispatch_source_get_cancel_handler(dr) || _dispatch_source_get_registration_handler(dr))) { retq = ds->do_targetq; } else { _dispatch_source_cancel_callout(ds, dq, flags); - dqf = _dispatch_queue_atomic_flags(ds->_as_dq); + dqf = _dispatch_queue_atomic_flags(ds); } - prevent_starvation = false; + avoid_starvation = false; } - if (_dispatch_unote_needs_rearm(dr) && - !(dqf & (DSF_ARMED|DSF_DELETED|DSF_CANCELED|DQF_RELEASED))) { + if (!(dqf & (DSF_CANCELED | DQF_RELEASED)) && + _dispatch_source_refs_needs_rearm(dr)) { // The source needs to be rearmed on the kevent queue. if (dq != dkq) { return dkq; } - if (unlikely(dqf & DSF_DEFERRED_DELETE)) { - // no need for resume when we can directly unregister the kevent - goto unregister_event; - } if (unlikely(DISPATCH_QUEUE_IS_SUSPENDED(ds))) { // do not try to rearm the kevent if the source is suspended // from the source handler return ds->do_targetq; } - if (prevent_starvation && dr->du_wlh == DISPATCH_WLH_ANON) { + if (avoid_starvation && _dispatch_unote_wlh(dr) == DISPATCH_WLH_ANON) { // keep the old behavior to force re-enqueue to our target queue // for the rearm. // @@ -871,10 +858,8 @@ _dispatch_source_invoke2(dispatch_object_t dou, dispatch_invoke_context_t dic, // not a concern and we can rearm right away. return ds->do_targetq; } - if (unlikely(!_dispatch_source_refs_resume(ds))) { - goto unregister_event; - } - if (!prevent_starvation && _dispatch_wlh_should_poll_unote(dr)) { + _dispatch_unote_resume(dr); + if (!avoid_starvation && _dispatch_wlh_should_poll_unote(dr)) { // try to redrive the drain from under the lock for sources // targeting an overcommit root queue to avoid parking // when the next event has already fired @@ -892,6 +877,17 @@ _dispatch_source_invoke(dispatch_source_t ds, dispatch_invoke_context_t dic, { _dispatch_queue_class_invoke(ds, dic, flags, DISPATCH_INVOKE_DISALLOW_SYNC_WAITERS, _dispatch_source_invoke2); + +#if DISPATCH_EVENT_BACKEND_KEVENT + if (flags & DISPATCH_INVOKE_WORKLOOP_DRAIN) { + dispatch_workloop_t dwl = (dispatch_workloop_t)_dispatch_get_wlh(); + dispatch_timer_heap_t dth = dwl->dwl_timer_heap; + if (dth && dth[0].dth_dirty_bits) { + _dispatch_event_loop_drain_timers(dwl->dwl_timer_heap, + DISPATCH_TIMER_WLH_COUNT); + } + } +#endif // DISPATCH_EVENT_BACKEND_KEVENT } void @@ -904,51 +900,53 @@ _dispatch_source_wakeup(dispatch_source_t ds, dispatch_qos_t qos, dispatch_source_refs_t dr = ds->ds_refs; dispatch_queue_wakeup_target_t dkq = DISPATCH_QUEUE_WAKEUP_MGR; dispatch_queue_wakeup_target_t tq = DISPATCH_QUEUE_WAKEUP_NONE; - dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds->_as_dq); - bool deferred_delete = (dqf & DSF_DEFERRED_DELETE); + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds); + dispatch_unote_state_t du_state = _dispatch_unote_state(dr); if (dr->du_is_direct) { dkq = DISPATCH_QUEUE_WAKEUP_TARGET; } - if (!(dqf & (DSF_CANCELED | DQF_RELEASED)) && dr->du_is_timer && - os_atomic_load2o(ds, ds_timer_refs->dt_pending_config, relaxed)) { - // timer has to be configured on the kevent queue - tq = dkq; - } else if (!ds->ds_is_installed) { + if (!ds->ds_is_installed) { // The source needs to be installed on the kevent queue. tq = dkq; + } else if (!(dqf & (DSF_CANCELED | DQF_RELEASED)) && + _dispatch_source_refs_needs_configuration(dr)) { + // timer has to be configured on the kevent queue + tq = dkq; } else if (_dispatch_source_get_registration_handler(dr)) { // The registration handler needs to be delivered to the target queue. tq = DISPATCH_QUEUE_WAKEUP_TARGET; - } else if (deferred_delete && !(dqf & DSF_ARMED)) { - // Pending source kevent unregistration has been completed - // or EV_ONESHOT event can be acknowledged - tq = dkq; + } else if (_du_state_needs_delete(du_state)) { + // Deferred deletion can be acknowledged which can always be done + // from the target queue + tq = DISPATCH_QUEUE_WAKEUP_TARGET; } else if (!(dqf & (DSF_CANCELED | DQF_RELEASED)) && - os_atomic_load2o(ds, ds_pending_data, relaxed)) { + os_atomic_load2o(dr, ds_pending_data, relaxed)) { // The source has pending data to deliver to the target queue. tq = DISPATCH_QUEUE_WAKEUP_TARGET; - } else if ((dqf & (DSF_CANCELED | DQF_RELEASED)) && !deferred_delete) { + } else if ((dqf & (DSF_CANCELED | DQF_RELEASED)) && !(dqf & DSF_DELETED)) { // The source needs to be uninstalled from the kevent queue, or the // cancellation handler needs to be delivered to the target queue. // Note: cancellation assumes installation. - if (!(dqf & DSF_DELETED)) { - if (dr->du_is_timer && !(dqf & DSF_ARMED)) { - // timers can cheat if not armed because there's nothing left - // to do on the manager queue and unregistration can happen - // on the regular target queue - tq = DISPATCH_QUEUE_WAKEUP_TARGET; - } else { - tq = dkq; - } - } else if (_dispatch_source_get_event_handler(dr) || - _dispatch_source_get_cancel_handler(dr) || - _dispatch_source_get_registration_handler(dr)) { + if (dr->du_is_timer && !_dispatch_unote_armed(dr)) { + // timers can cheat if not armed because there's nothing left + // to do on the manager queue and unregistration can happen + // on the regular target queue tq = DISPATCH_QUEUE_WAKEUP_TARGET; + } else if ((dqf & DSF_NEEDS_EVENT) && !(flags & DISPATCH_WAKEUP_EVENT)){ + // we're waiting for an event + } else { + // we need to initialize the deletion sequence + tq = dkq; } - } else if (_dispatch_unote_needs_rearm(dr) && - !(dqf & (DSF_ARMED|DSF_DELETED|DSF_CANCELED|DQF_RELEASED))) { + } else if ((dqf & (DSF_CANCELED | DQF_RELEASED)) && (dqf & DSF_DELETED) && + (_dispatch_source_get_event_handler(dr) || + _dispatch_source_get_cancel_handler(dr) || + _dispatch_source_get_registration_handler(dr))) { + tq = DISPATCH_QUEUE_WAKEUP_TARGET; + } else if (!(dqf & (DSF_CANCELED | DQF_RELEASED)) && + _dispatch_source_refs_needs_rearm(dr)) { // The source needs to be rearmed on the kevent queue. tq = dkq; } @@ -957,11 +955,11 @@ _dispatch_source_wakeup(dispatch_source_t ds, dispatch_qos_t qos, } if ((tq == DISPATCH_QUEUE_WAKEUP_TARGET) && - ds->do_targetq == &_dispatch_mgr_q) { + ds->do_targetq == _dispatch_mgr_q._as_dq) { tq = DISPATCH_QUEUE_WAKEUP_MGR; } - return _dispatch_queue_class_wakeup(ds->_as_dq, qos, flags, tq); + return _dispatch_queue_wakeup(ds, qos, flags, tq); } void @@ -974,8 +972,7 @@ dispatch_source_cancel(dispatch_source_t ds) // need to therefore retain/release before setting the bit _dispatch_retain_2(ds); - dispatch_queue_t q = ds->_as_dq; - if (_dispatch_queue_atomic_flags_set_orig(q, DSF_CANCELED) & DSF_CANCELED) { + if (_dispatch_queue_atomic_flags_set_orig(ds, DSF_CANCELED) & DSF_CANCELED){ _dispatch_release_2_tailcall(ds); } else { dx_wakeup(ds, 0, DISPATCH_WAKEUP_MAKE_DIRTY | DISPATCH_WAKEUP_CONSUME_2); @@ -985,7 +982,7 @@ dispatch_source_cancel(dispatch_source_t ds) void dispatch_source_cancel_and_wait(dispatch_source_t ds) { - dispatch_queue_flags_t old_dqf, dqf, new_dqf; + dispatch_queue_flags_t old_dqf, new_dqf; dispatch_source_refs_t dr = ds->ds_refs; if (unlikely(_dispatch_source_get_cancel_handler(dr))) { @@ -998,21 +995,21 @@ dispatch_source_cancel_and_wait(dispatch_source_t ds) if (old_dqf & DSF_CANCEL_WAITER) { os_atomic_rmw_loop_give_up(break); } - if ((old_dqf & DSF_STATE_MASK) == DSF_DELETED) { + if (old_dqf & DSF_DELETED) { // just add DSF_CANCELED - } else if ((old_dqf & DSF_DEFERRED_DELETE) || !dr->du_is_direct) { + } else if ((old_dqf & DSF_NEEDS_EVENT) || dr->du_is_timer || + !dr->du_is_direct) { new_dqf |= DSF_CANCEL_WAITER; } }); - dqf = new_dqf; if (old_dqf & DQF_RELEASED) { DISPATCH_CLIENT_CRASH(ds, "Dispatch source used after last release"); } - if ((old_dqf & DSF_STATE_MASK) == DSF_DELETED) { + if (old_dqf & DSF_DELETED) { return; } - if (dqf & DSF_CANCEL_WAITER) { + if (new_dqf & DSF_CANCEL_WAITER) { goto wakeup; } @@ -1048,16 +1045,17 @@ dispatch_source_cancel_and_wait(dispatch_source_t ds) if (likely(_dq_state_is_runnable(old_state) && !_dq_state_drain_locked(old_state))) { - // same thing _dispatch_source_invoke2() does when handling cancellation - dqf = _dispatch_queue_atomic_flags(ds->_as_dq); - if (!(dqf & (DSF_DEFERRED_DELETE | DSF_DELETED))) { - _dispatch_source_refs_unregister(ds, 0); - dqf = _dispatch_queue_atomic_flags(ds->_as_dq); - if (likely((dqf & DSF_STATE_MASK) == DSF_DELETED)) { - _dispatch_source_cancel_callout(ds, NULL, DISPATCH_INVOKE_NONE); - } + // deletion may have proceeded concurrently while we were + // taking the lock, so we need to check we're not doing it twice. + if (likely(!(_dispatch_queue_atomic_flags(ds) & DSF_DELETED))) { + // same thing _dispatch_source_invoke2() does for cancellation + _dispatch_source_refs_unregister(ds, DUU_DELETE_ACK | DUU_PROBE); + } + if (likely(_dispatch_queue_atomic_flags(ds) & DSF_DELETED)) { + _dispatch_source_cancel_callout(ds, NULL, DISPATCH_INVOKE_NONE); } - dx_wakeup(ds, 0, DISPATCH_WAKEUP_BARRIER_COMPLETE); + dx_wakeup(ds, 0, DISPATCH_WAKEUP_EVENT | + DISPATCH_WAKEUP_BARRIER_COMPLETE); } else if (unlikely(_dq_state_drain_locked_by_self(old_state))) { DISPATCH_CLIENT_CRASH(ds, "dispatch_source_cancel_and_wait " "called from a source handler"); @@ -1069,8 +1067,8 @@ dispatch_source_cancel_and_wait(dispatch_source_t ds) dispatch_activate(ds); } - dqf = _dispatch_queue_atomic_flags(ds->_as_dq); - while (unlikely((dqf & DSF_STATE_MASK) != DSF_DELETED)) { + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds); + while (unlikely(!(dqf & DSF_DELETED))) { if (unlikely(!(dqf & DSF_CANCEL_WAITER))) { if (!os_atomic_cmpxchgv2o(ds, dq_atomic_flags, dqf, dqf | DSF_CANCEL_WAITER, &dqf, relaxed)) { @@ -1078,128 +1076,54 @@ dispatch_source_cancel_and_wait(dispatch_source_t ds) } dqf |= DSF_CANCEL_WAITER; } - _dispatch_wait_on_address(&ds->dq_atomic_flags, dqf, DLOCK_LOCK_NONE); - dqf = _dispatch_queue_atomic_flags(ds->_as_dq); + _dispatch_wait_on_address(&ds->dq_atomic_flags, dqf, + DISPATCH_TIME_FOREVER, DLOCK_LOCK_NONE); + dqf = _dispatch_queue_atomic_flags(ds); } } void -_dispatch_source_merge_evt(dispatch_unote_t du, uint32_t flags, uintptr_t data, - uintptr_t status, pthread_priority_t pp) +_dispatch_source_merge_evt(dispatch_unote_t du, uint32_t flags, + OS_UNUSED uintptr_t data, pthread_priority_t pp) { - dispatch_source_refs_t dr = du._dr; - dispatch_source_t ds = _dispatch_source_from_refs(dr); - dispatch_wakeup_flags_t wflags = 0; - dispatch_queue_flags_t dqf; + dispatch_source_t ds = _dispatch_source_from_refs(du._dr); - if (_dispatch_unote_needs_rearm(dr) || (flags & (EV_DELETE | EV_ONESHOT))) { - // once we modify the queue atomic flags below, it will allow concurrent - // threads running _dispatch_source_invoke2 to dispose of the source, - // so we can't safely borrow the reference we get from the muxnote udata - // anymore, and need our own - wflags = DISPATCH_WAKEUP_CONSUME_2; - _dispatch_retain_2(ds); // rdar://20382435 - } - - if ((flags & EV_UDATA_SPECIFIC) && (flags & EV_ONESHOT) && - !(flags & EV_DELETE)) { - dqf = _dispatch_queue_atomic_flags_set_and_clear(ds->_as_dq, - DSF_DEFERRED_DELETE, DSF_ARMED); - if (flags & EV_VANISHED) { - _dispatch_bug_kevent_client("kevent", dr->du_type->dst_kind, - "monitored resource vanished before the source " - "cancel handler was invoked", 0); + dispatch_unote_state_t du_state = _dispatch_unote_state(du); + if (!(flags & EV_UDATA_SPECIFIC) && !_du_state_registered(du_state)) { + if (!du._du->du_is_timer) { + // Timers must be unregistered from their target queue, else this + // unregistration can race with the optimization in + // _dispatch_source_invoke() to unregister fired oneshot timers. + // + // Because oneshot timers dominate the world, we prefer paying an + // extra wakeup for repeating timers, and avoid the wakeup for + // oneshot timers. + _dispatch_source_refs_finalize_unregistration(ds); } - _dispatch_debug("kevent-source[%p]: %s kevent[%p]", ds, - (flags & EV_VANISHED) ? "vanished" : - "deferred delete oneshot", dr); - } else if (flags & (EV_DELETE | EV_ONESHOT)) { - _dispatch_source_refs_unregister(ds, DU_UNREGISTER_ALREADY_DELETED); - _dispatch_debug("kevent-source[%p]: deleted kevent[%p]", ds, dr); - if (flags & EV_DELETE) goto done; - dqf = _dispatch_queue_atomic_flags(ds->_as_dq); - } else if (_dispatch_unote_needs_rearm(dr)) { - dqf = _dispatch_queue_atomic_flags_clear(ds->_as_dq, DSF_ARMED); - _dispatch_debug("kevent-source[%p]: disarmed kevent[%p]", ds, dr); - } else { - dqf = _dispatch_queue_atomic_flags(ds->_as_dq); - } - - if (dqf & (DSF_CANCELED | DQF_RELEASED)) { - goto done; // rdar://20204025 } - dispatch_unote_action_t action = dr->du_data_action; - if ((flags & EV_UDATA_SPECIFIC) && (flags & EV_ONESHOT) && - (flags & EV_VANISHED)) { + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds); + if (unlikely(flags & EV_VANISHED)) { + if (dqf & DSF_STRICT) { + DISPATCH_CLIENT_CRASH(du._du->du_ident, "Unexpected EV_VANISHED " + "(do not destroy random mach ports or file descriptors)"); + } else { + _dispatch_bug_kevent_vanished(du._du); + } // if the resource behind the ident vanished, the event handler can't // do anything useful anymore, so do not try to call it at all - // - // Note: if the kernel doesn't support EV_VANISHED we always get it - // back unchanged from the flags passed at EV_ADD (registration) time - // Since we never ask for both EV_ONESHOT and EV_VANISHED for sources, - // if we get both bits it was a real EV_VANISHED delivery - os_atomic_store2o(ds, ds_pending_data, 0, relaxed); -#if HAVE_MACH - } else if (dr->du_filter == EVFILT_MACHPORT) { - os_atomic_store2o(ds, ds_pending_data, data, relaxed); -#endif - } else if (action == DISPATCH_UNOTE_ACTION_DATA_SET) { - os_atomic_store2o(ds, ds_pending_data, data, relaxed); - } else if (action == DISPATCH_UNOTE_ACTION_DATA_ADD) { - os_atomic_add2o(ds, ds_pending_data, data, relaxed); - } else if (data && action == DISPATCH_UNOTE_ACTION_DATA_OR) { - os_atomic_or2o(ds, ds_pending_data, data, relaxed); - } else if (data && action == DISPATCH_UNOTE_ACTION_DATA_OR_STATUS_SET) { - // We combine the data and status into a single 64-bit value. - uint64_t odata, ndata; - uint64_t value = DISPATCH_SOURCE_COMBINE_DATA_AND_STATUS(data, status); - os_atomic_rmw_loop2o(ds, ds_pending_data, odata, ndata, relaxed, { - ndata = DISPATCH_SOURCE_GET_DATA(odata) | value; - }); - } else if (data) { - DISPATCH_INTERNAL_CRASH(action, "Unexpected source action value"); + os_atomic_store2o(du._dr, ds_pending_data, 0, relaxed); } - _dispatch_debug("kevent-source[%p]: merged kevent[%p]", ds, dr); -done: + _dispatch_debug("kevent-source[%p]: merged kevent[%p]", ds, du._dr); _dispatch_object_debug(ds, "%s", __func__); - dx_wakeup(ds, _dispatch_qos_from_pp(pp), wflags | DISPATCH_WAKEUP_MAKE_DIRTY); + dx_wakeup(ds, _dispatch_qos_from_pp(pp), DISPATCH_WAKEUP_EVENT | + DISPATCH_WAKEUP_CONSUME_2 | DISPATCH_WAKEUP_MAKE_DIRTY); } #pragma mark - #pragma mark dispatch_source_timer -#if DISPATCH_USE_DTRACE -static dispatch_timer_source_refs_t - _dispatch_trace_next_timer[DISPATCH_TIMER_QOS_COUNT]; -#define _dispatch_trace_next_timer_set(x, q) \ - _dispatch_trace_next_timer[(q)] = (x) -#define _dispatch_trace_next_timer_program(d, q) \ - _dispatch_trace_timer_program(_dispatch_trace_next_timer[(q)], (d)) -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_mgr_trace_timers_wakes(void) -{ - uint32_t qos; - - if (_dispatch_timers_will_wake) { - if (slowpath(DISPATCH_TIMER_WAKE_ENABLED())) { - for (qos = 0; qos < DISPATCH_TIMER_QOS_COUNT; qos++) { - if (_dispatch_timers_will_wake & (1 << qos)) { - _dispatch_trace_timer_wake(_dispatch_trace_next_timer[qos]); - } - } - } - _dispatch_timers_will_wake = 0; - } -} -#else -#define _dispatch_trace_next_timer_set(x, q) -#define _dispatch_trace_next_timer_program(d, q) -#define _dispatch_mgr_trace_timers_wakes() -#endif - #define _dispatch_source_timer_telemetry_enabled() false DISPATCH_NOINLINE @@ -1224,32 +1148,9 @@ _dispatch_source_timer_telemetry(dispatch_source_t ds, dispatch_clock_t clock, } } -DISPATCH_NOINLINE -static void -_dispatch_source_timer_configure(dispatch_source_t ds) -{ - dispatch_timer_source_refs_t dt = ds->ds_timer_refs; - dispatch_timer_config_t dtc; - - dtc = os_atomic_xchg2o(dt, dt_pending_config, NULL, dependency); - if (dtc->dtc_clock == DISPATCH_CLOCK_MACH) { - dt->du_fflags |= DISPATCH_TIMER_CLOCK_MACH; - } else { - dt->du_fflags &= ~(uint32_t)DISPATCH_TIMER_CLOCK_MACH; - } - dt->dt_timer = dtc->dtc_timer; - free(dtc); - if (ds->ds_is_installed) { - // Clear any pending data that might have accumulated on - // older timer params - os_atomic_store2o(ds, ds_pending_data, 0, relaxed); - _dispatch_timers_update(dt, 0); - } -} - static dispatch_timer_config_t -_dispatch_source_timer_config_create(dispatch_time_t start, - uint64_t interval, uint64_t leeway) +_dispatch_timer_config_create(dispatch_time_t start, + uint64_t interval, uint64_t leeway, dispatch_timer_source_refs_t dt) { dispatch_timer_config_t dtc; dtc = _dispatch_calloc(1ul, sizeof(struct dispatch_timer_config_s)); @@ -1266,18 +1167,28 @@ _dispatch_source_timer_config_create(dispatch_time_t start, if ((int64_t)leeway < 0) { leeway = INT64_MAX; } - if (start == DISPATCH_TIME_NOW) { - start = _dispatch_absolute_time(); - } else if (start == DISPATCH_TIME_FOREVER) { - start = INT64_MAX; - } - if ((int64_t)start < 0) { - // wall clock - start = (dispatch_time_t)-((int64_t)start); - dtc->dtc_clock = DISPATCH_CLOCK_WALL; + dispatch_clock_t clock; + uint64_t target; + if (start == DISPATCH_TIME_FOREVER) { + target = INT64_MAX; + // Do not change the clock when postponing the time forever in the + // future, this will default to UPTIME if no clock was set. + clock = _dispatch_timer_flags_to_clock(dt->du_timer_flags); } else { - // absolute clock + _dispatch_time_to_clock_and_value(start, &clock, &target); + if (target == DISPATCH_TIME_NOW) { + if (clock == DISPATCH_CLOCK_UPTIME) { + target = _dispatch_uptime(); + } else { + dispatch_assert(clock == DISPATCH_CLOCK_MONOTONIC); + target = _dispatch_monotonic_time(); + } + } + } + + if (clock != DISPATCH_CLOCK_WALL) { + // uptime or monotonic clock interval = _dispatch_time_nano2mach(interval); if (interval < 1) { // rdar://problem/7287561 interval must be at least one in @@ -1287,22 +1198,75 @@ _dispatch_source_timer_config_create(dispatch_time_t start, interval = 1; } leeway = _dispatch_time_nano2mach(leeway); - dtc->dtc_clock = DISPATCH_CLOCK_MACH; } if (interval < INT64_MAX && leeway > interval / 2) { leeway = interval / 2; } - dtc->dtc_timer.target = start; + dtc->dtc_clock = clock; + dtc->dtc_timer.target = target; dtc->dtc_timer.interval = interval; - if (start + leeway < INT64_MAX) { - dtc->dtc_timer.deadline = start + leeway; + if (target + leeway < INT64_MAX) { + dtc->dtc_timer.deadline = target + leeway; } else { dtc->dtc_timer.deadline = INT64_MAX; } return dtc; } +static dispatch_timer_config_t +_dispatch_interval_config_create(dispatch_time_t start, + uint64_t interval, uint64_t leeway, dispatch_timer_source_refs_t dt) +{ +#define NSEC_PER_FRAME (NSEC_PER_SEC/60) +// approx 1 year (60s * 60m * 24h * 365d) +#define FOREVER_NSEC 31536000000000000ull + + const bool animation = dt->du_timer_flags & DISPATCH_INTERVAL_UI_ANIMATION; + dispatch_timer_config_t dtc; + dtc = _dispatch_calloc(1ul, sizeof(struct dispatch_timer_config_s)); + dtc->dtc_clock = DISPATCH_CLOCK_UPTIME; + + if (start == DISPATCH_TIME_FOREVER) { + dtc->dtc_timer.target = INT64_MAX; + dtc->dtc_timer.interval = INT64_MAX; + dtc->dtc_timer.deadline = INT64_MAX; + return dtc; + } + + if (start != DISPATCH_TIME_NOW) { + DISPATCH_CLIENT_CRASH(0, "Start value is not DISPATCH_TIME_NOW or " + "DISPATCH_TIME_FOREVER"); + } else if (unlikely(interval == 0)) { + DISPATCH_CLIENT_CRASH(0, "Setting interval to 0"); + } + + if (likely(interval <= (animation ? FOREVER_NSEC/NSEC_PER_FRAME : + FOREVER_NSEC/NSEC_PER_MSEC))) { + interval *= animation ? NSEC_PER_FRAME : NSEC_PER_MSEC; + } else { + interval = FOREVER_NSEC; + } + + interval = _dispatch_time_nano2mach(interval); + start = _dispatch_uptime() + interval; + start -= (start % interval); + if (leeway <= 1000) { + leeway = interval * leeway / 1000; + } else if (leeway != UINT64_MAX) { + DISPATCH_CLIENT_CRASH(0, "Passing an invalid leeway"); + } else if (animation) { + leeway = _dispatch_time_nano2mach(NSEC_PER_FRAME); + } else { + leeway = interval / 2; + } + dtc->dtc_clock = DISPATCH_CLOCK_UPTIME; + dtc->dtc_timer.target = start; + dtc->dtc_timer.deadline = start + leeway; + dtc->dtc_timer.interval = interval; + return dtc; +} + DISPATCH_NOINLINE void dispatch_source_set_timer(dispatch_source_t ds, dispatch_time_t start, @@ -1311,49 +1275,32 @@ dispatch_source_set_timer(dispatch_source_t ds, dispatch_time_t start, dispatch_timer_source_refs_t dt = ds->ds_timer_refs; dispatch_timer_config_t dtc; - if (unlikely(!dt->du_is_timer || (dt->du_fflags&DISPATCH_TIMER_INTERVAL))) { + if (unlikely(!dt->du_is_timer)) { DISPATCH_CLIENT_CRASH(ds, "Attempt to set timer on a non-timer source"); } - dtc = _dispatch_source_timer_config_create(start, interval, leeway); + if (dt->du_timer_flags & DISPATCH_TIMER_INTERVAL) { + dtc = _dispatch_interval_config_create(start, interval, leeway, dt); + } else { + dtc = _dispatch_timer_config_create(start, interval, leeway, dt); + } + if (_dispatch_timer_flags_to_clock(dt->du_timer_flags) != dtc->dtc_clock && + dt->du_filter == DISPATCH_EVFILT_TIMER_WITH_CLOCK) { + DISPATCH_CLIENT_CRASH(0, "Attempting to modify timer clock"); + } + _dispatch_source_timer_telemetry(ds, dtc->dtc_clock, &dtc->dtc_timer); dtc = os_atomic_xchg2o(dt, dt_pending_config, dtc, release); if (dtc) free(dtc); dx_wakeup(ds, 0, DISPATCH_WAKEUP_MAKE_DIRTY); } -static void -_dispatch_source_set_interval(dispatch_source_t ds, uint64_t interval) -{ -#define NSEC_PER_FRAME (NSEC_PER_SEC/60) -// approx 1 year (60s * 60m * 24h * 365d) -#define FOREVER_NSEC 31536000000000000ull - - dispatch_timer_source_refs_t dr = ds->ds_timer_refs; - const bool animation = dr->du_fflags & DISPATCH_INTERVAL_UI_ANIMATION; - if (fastpath(interval <= (animation ? FOREVER_NSEC/NSEC_PER_FRAME : - FOREVER_NSEC/NSEC_PER_MSEC))) { - interval *= animation ? NSEC_PER_FRAME : NSEC_PER_MSEC; - } else { - interval = FOREVER_NSEC; - } - interval = _dispatch_time_nano2mach(interval); - uint64_t target = _dispatch_absolute_time() + interval; - target -= (target % interval); - const uint64_t leeway = animation ? - _dispatch_time_nano2mach(NSEC_PER_FRAME) : interval / 2; - dr->dt_timer.target = target; - dr->dt_timer.deadline = target + leeway; - dr->dt_timer.interval = interval; - _dispatch_source_timer_telemetry(ds, DISPATCH_CLOCK_MACH, &dr->dt_timer); -} - #pragma mark - #pragma mark dispatch_after DISPATCH_ALWAYS_INLINE static inline void -_dispatch_after(dispatch_time_t when, dispatch_queue_t queue, +_dispatch_after(dispatch_time_t when, dispatch_queue_t dq, void *ctxt, void *handler, bool block) { dispatch_timer_source_refs_t dt; @@ -1370,9 +1317,9 @@ _dispatch_after(dispatch_time_t when, dispatch_queue_t queue, delta = _dispatch_timeout(when); if (delta == 0) { if (block) { - return dispatch_async(queue, handler); + return dispatch_async(dq, handler); } - return dispatch_async_f(queue, ctxt, handler); + return dispatch_async_f(dq, ctxt, handler); } leeway = delta / 10; // @@ -1380,31 +1327,30 @@ _dispatch_after(dispatch_time_t when, dispatch_queue_t queue, if (leeway > 60 * NSEC_PER_SEC) leeway = 60 * NSEC_PER_SEC; // this function can and should be optimized to not use a dispatch source - ds = dispatch_source_create(&_dispatch_source_type_after, 0, 0, queue); + ds = dispatch_source_create(&_dispatch_source_type_after, 0, 0, dq); dt = ds->ds_timer_refs; dispatch_continuation_t dc = _dispatch_continuation_alloc(); if (block) { - _dispatch_continuation_init(dc, ds, handler, 0, 0, 0); + _dispatch_continuation_init(dc, dq, handler, 0, 0); } else { - _dispatch_continuation_init_f(dc, ds, ctxt, handler, 0, 0, 0); + _dispatch_continuation_init_f(dc, dq, ctxt, handler, 0, 0); } // reference `ds` so that it doesn't show up as a leak dc->dc_data = ds; - _dispatch_trace_continuation_push(ds->_as_dq, dc); + _dispatch_trace_item_push(dq, dc); os_atomic_store2o(dt, ds_handler[DS_EVENT_HANDLER], dc, relaxed); - if ((int64_t)when < 0) { - // wall clock - when = (dispatch_time_t)-((int64_t)when); - } else { - // absolute clock - dt->du_fflags |= DISPATCH_TIMER_CLOCK_MACH; + dispatch_clock_t clock; + uint64_t target; + _dispatch_time_to_clock_and_value(when, &clock, &target); + if (clock != DISPATCH_CLOCK_WALL) { leeway = _dispatch_time_nano2mach(leeway); } - dt->dt_timer.target = when; + dt->du_timer_flags |= _dispatch_timer_flags_from_clock(clock); + dt->dt_timer.target = target; dt->dt_timer.interval = UINT64_MAX; - dt->dt_timer.deadline = when + leeway; + dt->dt_timer.deadline = target + leeway; dispatch_activate(ds); } @@ -1425,1085 +1371,6 @@ dispatch_after(dispatch_time_t when, dispatch_queue_t queue, } #endif -#pragma mark - -#pragma mark dispatch_timers - -/* - * The dispatch_timer_heap_t structure is a double min-heap of timers, - * interleaving the by-target min-heap in the even slots, and the by-deadline - * in the odd ones. - * - * The min element of these is held inline in the dispatch_timer_heap_t - * structure, and further entries are held in segments. - * - * dth_segments is the number of allocated segments. - * - * Segment 0 has a size of `DISPATCH_HEAP_INIT_SEGMENT_CAPACITY` pointers - * Segment k has a size of (DISPATCH_HEAP_INIT_SEGMENT_CAPACITY << (k - 1)) - * - * Segment n (dth_segments - 1) is the last segment and points its final n - * entries to previous segments. Its address is held in the `dth_heap` field. - * - * segment n [ regular timer pointers | n-1 | k | 0 ] - * | | | - * segment n-1 <---------------------------' | | - * segment k <--------------------------------' | - * segment 0 <------------------------------------' - */ -#define DISPATCH_HEAP_INIT_SEGMENT_CAPACITY 8u - -/* - * There are two min-heaps stored interleaved in a single array, - * even indices are for the by-target min-heap, and odd indices for - * the by-deadline one. - */ -#define DTH_HEAP_ID_MASK (DTH_ID_COUNT - 1) -#define DTH_HEAP_ID(idx) ((idx) & DTH_HEAP_ID_MASK) -#define DTH_IDX_FOR_HEAP_ID(idx, heap_id) \ - (((idx) & ~DTH_HEAP_ID_MASK) | (heap_id)) - -DISPATCH_ALWAYS_INLINE -static inline uint32_t -_dispatch_timer_heap_capacity(uint32_t segments) -{ - if (segments == 0) return 2; - uint32_t seg_no = segments - 1; - // for C = DISPATCH_HEAP_INIT_SEGMENT_CAPACITY, - // 2 + C + SUM(C << (i-1), i = 1..seg_no) - seg_no - return 2 + (DISPATCH_HEAP_INIT_SEGMENT_CAPACITY << seg_no) - seg_no; -} - -DISPATCH_NOINLINE -static void -_dispatch_timer_heap_grow(dispatch_timer_heap_t dth) -{ - uint32_t seg_capacity = DISPATCH_HEAP_INIT_SEGMENT_CAPACITY; - uint32_t seg_no = dth->dth_segments++; - void **heap, **heap_prev = dth->dth_heap; - - if (seg_no > 0) { - seg_capacity <<= (seg_no - 1); - } - heap = _dispatch_calloc(seg_capacity, sizeof(void *)); - if (seg_no > 1) { - uint32_t prev_seg_no = seg_no - 1; - uint32_t prev_seg_capacity = seg_capacity >> 1; - memcpy(&heap[seg_capacity - prev_seg_no], - &heap_prev[prev_seg_capacity - prev_seg_no], - prev_seg_no * sizeof(void *)); - } - if (seg_no > 0) { - heap[seg_capacity - seg_no] = heap_prev; - } - dth->dth_heap = heap; -} - -DISPATCH_NOINLINE -static void -_dispatch_timer_heap_shrink(dispatch_timer_heap_t dth) -{ - uint32_t seg_capacity = DISPATCH_HEAP_INIT_SEGMENT_CAPACITY; - uint32_t seg_no = --dth->dth_segments; - void **heap = dth->dth_heap, **heap_prev = NULL; - - if (seg_no > 0) { - seg_capacity <<= (seg_no - 1); - heap_prev = heap[seg_capacity - seg_no]; - } - if (seg_no > 1) { - uint32_t prev_seg_no = seg_no - 1; - uint32_t prev_seg_capacity = seg_capacity >> 1; - memcpy(&heap_prev[prev_seg_capacity - prev_seg_no], - &heap[seg_capacity - prev_seg_no], - prev_seg_no * sizeof(void *)); - } - dth->dth_heap = heap_prev; - free(heap); -} - -DISPATCH_ALWAYS_INLINE -static inline dispatch_timer_source_refs_t * -_dispatch_timer_heap_get_slot(dispatch_timer_heap_t dth, uint32_t idx) -{ - uint32_t seg_no, segments = dth->dth_segments; - void **segment; - - if (idx < DTH_ID_COUNT) { - return &dth->dth_min[idx]; - } - idx -= DTH_ID_COUNT; - - // Derive the segment number from the index. Naming - // DISPATCH_HEAP_INIT_SEGMENT_CAPACITY `C`, the segments index ranges are: - // 0: 0 .. (C - 1) - // 1: C .. 2 * C - 1 - // k: 2^(k-1) * C .. 2^k * C - 1 - // so `k` can be derived from the first bit set in `idx` - seg_no = (uint32_t)(__builtin_clz(DISPATCH_HEAP_INIT_SEGMENT_CAPACITY - 1) - - __builtin_clz(idx | (DISPATCH_HEAP_INIT_SEGMENT_CAPACITY - 1))); - if (seg_no + 1 == segments) { - segment = dth->dth_heap; - } else { - uint32_t seg_capacity = DISPATCH_HEAP_INIT_SEGMENT_CAPACITY; - seg_capacity <<= (segments - 2); - segment = dth->dth_heap[seg_capacity - seg_no - 1]; - } - if (seg_no) { - idx -= DISPATCH_HEAP_INIT_SEGMENT_CAPACITY << (seg_no - 1); - } - return (dispatch_timer_source_refs_t *)(segment + idx); -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_timer_heap_set(dispatch_timer_source_refs_t *slot, - dispatch_timer_source_refs_t dt, uint32_t idx) -{ - *slot = dt; - dt->dt_heap_entry[DTH_HEAP_ID(idx)] = idx; -} - -DISPATCH_ALWAYS_INLINE -static inline uint32_t -_dispatch_timer_heap_parent(uint32_t idx) -{ - uint32_t heap_id = DTH_HEAP_ID(idx); - idx = (idx - DTH_ID_COUNT) / 2; // go to the parent - return DTH_IDX_FOR_HEAP_ID(idx, heap_id); -} - -DISPATCH_ALWAYS_INLINE -static inline uint32_t -_dispatch_timer_heap_left_child(uint32_t idx) -{ - uint32_t heap_id = DTH_HEAP_ID(idx); - // 2 * (idx - heap_id) + DTH_ID_COUNT + heap_id - return 2 * idx + DTH_ID_COUNT - heap_id; -} - -#if DISPATCH_HAVE_TIMER_COALESCING -DISPATCH_ALWAYS_INLINE -static inline uint32_t -_dispatch_timer_heap_walk_skip(uint32_t idx, uint32_t count) -{ - uint32_t heap_id = DTH_HEAP_ID(idx); - - idx -= heap_id; - if (unlikely(idx + DTH_ID_COUNT == count)) { - // reaching `count` doesn't mean we're done, but there is a weird - // corner case if the last item of the heap is a left child: - // - // /\ - // / \ - // / __\ - // /__/ - // ^ - // - // The formula below would return the sibling of `idx` which is - // out of bounds. Fortunately, the correct answer is the same - // as for idx's parent - idx = _dispatch_timer_heap_parent(idx); - } - - // - // When considering the index in a non interleaved, 1-based array - // representation of a heap, hence looking at (idx / DTH_ID_COUNT + 1) - // for a given idx in our dual-heaps, that index is in one of two forms: - // - // (a) 1xxxx011111 or (b) 111111111 - // d i 0 d 0 - // - // The first bit set is the row of the binary tree node (0-based). - // The following digits from most to least significant represent the path - // to that node, where `0` is a left turn and `1` a right turn. - // - // For example 0b0101 (5) is a node on row 2 accessed going left then right: - // - // row 0 1 - // / . - // row 1 2 3 - // . \ . . - // row 2 4 5 6 7 - // : : : : : : : : - // - // Skipping a sub-tree in walk order means going to the sibling of the last - // node reached after we turned left. If the node was of the form (a), - // this node is 1xxxx1, which for the above example is 0b0011 (3). - // If the node was of the form (b) then we never took a left, meaning - // we reached the last element in traversal order. - // - - // - // we want to find - // - the least significant bit set to 0 in (idx / DTH_ID_COUNT + 1) - // - which is offset by log_2(DTH_ID_COUNT) from the position of the least - // significant 0 in (idx + DTH_ID_COUNT + DTH_ID_COUNT - 1) - // since idx is a multiple of DTH_ID_COUNT and DTH_ID_COUNT a power of 2. - // - which in turn is the same as the position of the least significant 1 in - // ~(idx + DTH_ID_COUNT + DTH_ID_COUNT - 1) - // - dispatch_static_assert(powerof2(DTH_ID_COUNT)); - idx += DTH_ID_COUNT + DTH_ID_COUNT - 1; - idx >>= __builtin_ctz(~idx); - - // - // `idx` is now either: - // - 0 if it was the (b) case above, in which case the walk is done - // - 1xxxx0 as the position in a 0 based array representation of a non - // interleaved heap, so we just have to compute the interleaved index. - // - return likely(idx) ? DTH_ID_COUNT * idx + heap_id : UINT32_MAX; -} - -DISPATCH_ALWAYS_INLINE -static inline uint32_t -_dispatch_timer_heap_walk_next(uint32_t idx, uint32_t count) -{ - // - // Goes to the next element in heap walk order, which is the prefix ordered - // walk of the tree. - // - // From a given node, the next item to return is the left child if it - // exists, else the first right sibling we find by walking our parent chain, - // which is exactly what _dispatch_timer_heap_walk_skip() returns. - // - uint32_t lchild = _dispatch_timer_heap_left_child(idx); - if (lchild < count) { - return lchild; - } - return _dispatch_timer_heap_walk_skip(idx, count); -} - -DISPATCH_NOINLINE -static uint64_t -_dispatch_timer_heap_max_target_before(dispatch_timer_heap_t dth, uint64_t limit) -{ - dispatch_timer_source_refs_t dri; - uint32_t idx = _dispatch_timer_heap_left_child(DTH_TARGET_ID); - uint32_t count = dth->dth_count; - uint64_t tmp, target = dth->dth_min[DTH_TARGET_ID]->dt_timer.target; - - while (idx < count) { - dri = *_dispatch_timer_heap_get_slot(dth, idx); - tmp = dri->dt_timer.target; - if (tmp > limit) { - // skip subtree since none of the targets below can be before limit - idx = _dispatch_timer_heap_walk_skip(idx, count); - } else { - target = tmp; - idx = _dispatch_timer_heap_walk_next(idx, count); - } - } - return target; -} -#endif // DISPATCH_HAVE_TIMER_COALESCING - -DISPATCH_NOINLINE -static void -_dispatch_timer_heap_resift(dispatch_timer_heap_t dth, - dispatch_timer_source_refs_t dt, uint32_t idx) -{ - dispatch_static_assert(offsetof(struct dispatch_timer_source_s, target) == - offsetof(struct dispatch_timer_source_s, heap_key[DTH_TARGET_ID])); - dispatch_static_assert(offsetof(struct dispatch_timer_source_s, deadline) == - offsetof(struct dispatch_timer_source_s, heap_key[DTH_DEADLINE_ID])); -#define dth_cmp(hid, dt1, op, dt2) \ - (((dt1)->dt_timer.heap_key)[hid] op ((dt2)->dt_timer.heap_key)[hid]) - - dispatch_timer_source_refs_t *pslot, pdt; - dispatch_timer_source_refs_t *cslot, cdt; - dispatch_timer_source_refs_t *rslot, rdt; - uint32_t cidx, dth_count = dth->dth_count; - dispatch_timer_source_refs_t *slot; - int heap_id = DTH_HEAP_ID(idx); - bool sifted_up = false; - - // try to sift up - - slot = _dispatch_timer_heap_get_slot(dth, idx); - while (idx >= DTH_ID_COUNT) { - uint32_t pidx = _dispatch_timer_heap_parent(idx); - pslot = _dispatch_timer_heap_get_slot(dth, pidx); - pdt = *pslot; - if (dth_cmp(heap_id, pdt, <=, dt)) { - break; - } - _dispatch_timer_heap_set(slot, pdt, idx); - slot = pslot; - idx = pidx; - sifted_up = true; - } - if (sifted_up) { - goto done; - } - - // try to sift down - - while ((cidx = _dispatch_timer_heap_left_child(idx)) < dth_count) { - uint32_t ridx = cidx + DTH_ID_COUNT; - cslot = _dispatch_timer_heap_get_slot(dth, cidx); - cdt = *cslot; - if (ridx < dth_count) { - rslot = _dispatch_timer_heap_get_slot(dth, ridx); - rdt = *rslot; - if (dth_cmp(heap_id, cdt, >, rdt)) { - cidx = ridx; - cdt = rdt; - cslot = rslot; - } - } - if (dth_cmp(heap_id, dt, <=, cdt)) { - break; - } - _dispatch_timer_heap_set(slot, cdt, idx); - slot = cslot; - idx = cidx; - } - -done: - _dispatch_timer_heap_set(slot, dt, idx); -#undef dth_cmp -} - -DISPATCH_ALWAYS_INLINE -static void -_dispatch_timer_heap_insert(dispatch_timer_heap_t dth, - dispatch_timer_source_refs_t dt) -{ - uint32_t idx = (dth->dth_count += DTH_ID_COUNT) - DTH_ID_COUNT; - - DISPATCH_TIMER_ASSERT(dt->dt_heap_entry[DTH_TARGET_ID], ==, - DTH_INVALID_ID, "target idx"); - DISPATCH_TIMER_ASSERT(dt->dt_heap_entry[DTH_DEADLINE_ID], ==, - DTH_INVALID_ID, "deadline idx"); - - if (idx == 0) { - dt->dt_heap_entry[DTH_TARGET_ID] = DTH_TARGET_ID; - dt->dt_heap_entry[DTH_DEADLINE_ID] = DTH_DEADLINE_ID; - dth->dth_min[DTH_TARGET_ID] = dth->dth_min[DTH_DEADLINE_ID] = dt; - return; - } - - if (unlikely(idx + DTH_ID_COUNT > - _dispatch_timer_heap_capacity(dth->dth_segments))) { - _dispatch_timer_heap_grow(dth); - } - _dispatch_timer_heap_resift(dth, dt, idx + DTH_TARGET_ID); - _dispatch_timer_heap_resift(dth, dt, idx + DTH_DEADLINE_ID); -} - -DISPATCH_NOINLINE -static void -_dispatch_timer_heap_remove(dispatch_timer_heap_t dth, - dispatch_timer_source_refs_t dt) -{ - uint32_t idx = (dth->dth_count -= DTH_ID_COUNT); - - DISPATCH_TIMER_ASSERT(dt->dt_heap_entry[DTH_TARGET_ID], !=, - DTH_INVALID_ID, "target idx"); - DISPATCH_TIMER_ASSERT(dt->dt_heap_entry[DTH_DEADLINE_ID], !=, - DTH_INVALID_ID, "deadline idx"); - - if (idx == 0) { - DISPATCH_TIMER_ASSERT(dth->dth_min[DTH_TARGET_ID], ==, dt, - "target slot"); - DISPATCH_TIMER_ASSERT(dth->dth_min[DTH_DEADLINE_ID], ==, dt, - "deadline slot"); - dth->dth_min[DTH_TARGET_ID] = dth->dth_min[DTH_DEADLINE_ID] = NULL; - goto clear_heap_entry; - } - - for (uint32_t heap_id = 0; heap_id < DTH_ID_COUNT; heap_id++) { - dispatch_timer_source_refs_t *slot, last_dt; - slot = _dispatch_timer_heap_get_slot(dth, idx + heap_id); - last_dt = *slot; *slot = NULL; - if (last_dt != dt) { - uint32_t removed_idx = dt->dt_heap_entry[heap_id]; - _dispatch_timer_heap_resift(dth, last_dt, removed_idx); - } - } - if (unlikely(idx <= _dispatch_timer_heap_capacity(dth->dth_segments - 1))) { - _dispatch_timer_heap_shrink(dth); - } - -clear_heap_entry: - dt->dt_heap_entry[DTH_TARGET_ID] = DTH_INVALID_ID; - dt->dt_heap_entry[DTH_DEADLINE_ID] = DTH_INVALID_ID; -} - -DISPATCH_ALWAYS_INLINE -static inline void -_dispatch_timer_heap_update(dispatch_timer_heap_t dth, - dispatch_timer_source_refs_t dt) -{ - DISPATCH_TIMER_ASSERT(dt->dt_heap_entry[DTH_TARGET_ID], !=, - DTH_INVALID_ID, "target idx"); - DISPATCH_TIMER_ASSERT(dt->dt_heap_entry[DTH_DEADLINE_ID], !=, - DTH_INVALID_ID, "deadline idx"); - - - _dispatch_timer_heap_resift(dth, dt, dt->dt_heap_entry[DTH_TARGET_ID]); - _dispatch_timer_heap_resift(dth, dt, dt->dt_heap_entry[DTH_DEADLINE_ID]); -} - -DISPATCH_ALWAYS_INLINE -static bool -_dispatch_timer_heap_has_new_min(dispatch_timer_heap_t dth, - uint32_t count, uint32_t mask) -{ - dispatch_timer_source_refs_t dt; - bool changed = false; - uint64_t tmp; - uint32_t tidx; - - for (tidx = 0; tidx < count; tidx++) { - if (!(mask & (1u << tidx))) { - continue; - } - - dt = dth[tidx].dth_min[DTH_TARGET_ID]; - tmp = dt ? dt->dt_timer.target : UINT64_MAX; - if (dth[tidx].dth_target != tmp) { - dth[tidx].dth_target = tmp; - changed = true; - } - dt = dth[tidx].dth_min[DTH_DEADLINE_ID]; - tmp = dt ? dt->dt_timer.deadline : UINT64_MAX; - if (dth[tidx].dth_deadline != tmp) { - dth[tidx].dth_deadline = tmp; - changed = true; - } - } - return changed; -} - -static inline void -_dispatch_timers_unregister(dispatch_timer_source_refs_t dt) -{ - uint32_t tidx = dt->du_ident; - dispatch_timer_heap_t heap = &_dispatch_timers_heap[tidx]; - - _dispatch_timer_heap_remove(heap, dt); - _dispatch_timers_reconfigure = true; - _dispatch_timers_processing_mask |= 1 << tidx; - dispatch_assert(dt->du_wlh == NULL || dt->du_wlh == DISPATCH_WLH_ANON); - dt->du_wlh = NULL; -} - -static inline void -_dispatch_timers_register(dispatch_timer_source_refs_t dt, uint32_t tidx) -{ - dispatch_timer_heap_t heap = &_dispatch_timers_heap[tidx]; - if (_dispatch_unote_registered(dt)) { - DISPATCH_TIMER_ASSERT(dt->du_ident, ==, tidx, "tidx"); - _dispatch_timer_heap_update(heap, dt); - } else { - dt->du_ident = tidx; - _dispatch_timer_heap_insert(heap, dt); - } - _dispatch_timers_reconfigure = true; - _dispatch_timers_processing_mask |= 1 << tidx; - dispatch_assert(dt->du_wlh == NULL || dt->du_wlh == DISPATCH_WLH_ANON); - dt->du_wlh = DISPATCH_WLH_ANON; -} - -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_source_timer_tryarm(dispatch_source_t ds) -{ - dispatch_queue_flags_t oqf, nqf; - return os_atomic_rmw_loop2o(ds, dq_atomic_flags, oqf, nqf, relaxed, { - if (oqf & (DSF_CANCELED | DQF_RELEASED)) { - // do not install a cancelled timer - os_atomic_rmw_loop_give_up(break); - } - nqf = oqf | DSF_ARMED; - }); -} - -// Updates the ordered list of timers based on next fire date for changes to ds. -// Should only be called from the context of _dispatch_mgr_q. -static void -_dispatch_timers_update(dispatch_unote_t du, uint32_t flags) -{ - dispatch_timer_source_refs_t dr = du._dt; - dispatch_source_t ds = _dispatch_source_from_refs(dr); - const char *verb = "updated"; - bool will_register, disarm = false; - - DISPATCH_ASSERT_ON_MANAGER_QUEUE(); - - if (unlikely(dr->du_ident == DISPATCH_TIMER_IDENT_CANCELED)) { - dispatch_assert((flags & DISPATCH_TIMERS_RETAIN_2) == 0); - return; - } - - // Unregister timers that are unconfigured, disabled, suspended or have - // missed intervals. Rearm after dispatch_set_timer(), resume or source - // invoke will reenable them - will_register = !(flags & DISPATCH_TIMERS_UNREGISTER) && - dr->dt_timer.target < INT64_MAX && - !os_atomic_load2o(ds, ds_pending_data, relaxed) && - !DISPATCH_QUEUE_IS_SUSPENDED(ds) && - !os_atomic_load2o(dr, dt_pending_config, relaxed); - if (likely(!_dispatch_unote_registered(dr))) { - dispatch_assert((flags & DISPATCH_TIMERS_RETAIN_2) == 0); - if (unlikely(!will_register || !_dispatch_source_timer_tryarm(ds))) { - return; - } - verb = "armed"; - } else if (unlikely(!will_register)) { - disarm = true; - verb = "disarmed"; - } - - // The heap owns a +2 on dispatch sources it references - // - // _dispatch_timers_run2() also sometimes passes DISPATCH_TIMERS_RETAIN_2 - // when it wants to take over this +2 at the same time we are unregistering - // the timer from the heap. - // - // Compute our refcount balance according to these rules, if our balance - // would become negative we retain the source upfront, if it is positive, we - // get rid of the extraneous refcounts after we're done touching the source. - int refs = will_register ? -2 : 0; - if (_dispatch_unote_registered(dr) && !(flags & DISPATCH_TIMERS_RETAIN_2)) { - refs += 2; - } - if (refs < 0) { - dispatch_assert(refs == -2); - _dispatch_retain_2(ds); - } - - uint32_t tidx = _dispatch_source_timer_idx(dr); - if (unlikely(_dispatch_unote_registered(dr) && - (!will_register || dr->du_ident != tidx))) { - _dispatch_timers_unregister(dr); - } - if (likely(will_register)) { - _dispatch_timers_register(dr, tidx); - } - - if (disarm) { - _dispatch_queue_atomic_flags_clear(ds->_as_dq, DSF_ARMED); - } - _dispatch_debug("kevent-source[%p]: %s timer[%p]", ds, verb, dr); - _dispatch_object_debug(ds, "%s", __func__); - if (refs > 0) { - dispatch_assert(refs == 2); - _dispatch_release_2_tailcall(ds); - } -} - -#define DISPATCH_TIMER_MISSED_MARKER 1ul - -DISPATCH_ALWAYS_INLINE -static inline unsigned long -_dispatch_source_timer_compute_missed(dispatch_timer_source_refs_t dt, - uint64_t now, unsigned long prev) -{ - uint64_t missed = (now - dt->dt_timer.target) / dt->dt_timer.interval; - if (++missed + prev > LONG_MAX) { - missed = LONG_MAX - prev; - } - if (dt->dt_timer.interval < INT64_MAX) { - uint64_t push_by = missed * dt->dt_timer.interval; - dt->dt_timer.target += push_by; - dt->dt_timer.deadline += push_by; - } else { - dt->dt_timer.target = UINT64_MAX; - dt->dt_timer.deadline = UINT64_MAX; - } - prev += missed; - return prev; -} - -DISPATCH_ALWAYS_INLINE -static inline unsigned long -_dispatch_source_timer_data(dispatch_source_t ds, dispatch_unote_t du) -{ - dispatch_timer_source_refs_t dr = du._dt; - unsigned long data, prev, clear_prev = 0; - - os_atomic_rmw_loop2o(ds, ds_pending_data, prev, clear_prev, relaxed, { - data = prev >> 1; - if (unlikely(prev & DISPATCH_TIMER_MISSED_MARKER)) { - os_atomic_rmw_loop_give_up(goto handle_missed_intervals); - } - }); - return data; - -handle_missed_intervals: - // The timer may be in _dispatch_source_invoke2() already for other - // reasons such as running the registration handler when ds_pending_data - // is changed by _dispatch_timers_run2() without holding the drain lock. - // - // We hence need dependency ordering to pair with the release barrier - // done by _dispatch_timers_run2() when setting the MISSED_MARKER bit. - os_atomic_thread_fence(dependency); - dr = os_atomic_force_dependency_on(dr, data); - - uint64_t now = _dispatch_time_now(DISPATCH_TIMER_CLOCK(dr->du_ident)); - if (now >= dr->dt_timer.target) { - OS_COMPILER_CAN_ASSUME(dr->dt_timer.interval < INT64_MAX); - data = _dispatch_source_timer_compute_missed(dr, now, data); - } - - // When we see the MISSED_MARKER the manager has given up on this timer - // and expects the handler to call "resume". - // - // However, it may not have reflected this into the atomic flags yet - // so make sure _dispatch_source_invoke2() sees the timer is disarmed - // - // The subsequent _dispatch_source_refs_resume() will enqueue the source - // on the manager and make the changes to `ds_timer` above visible. - _dispatch_queue_atomic_flags_clear(ds->_as_dq, DSF_ARMED); - os_atomic_store2o(ds, ds_pending_data, 0, relaxed); - return data; -} - -static inline void -_dispatch_timers_run2(dispatch_clock_now_cache_t nows, uint32_t tidx) -{ - dispatch_timer_source_refs_t dr; - dispatch_source_t ds; - uint64_t data, pending_data; - uint64_t now = _dispatch_time_now_cached(DISPATCH_TIMER_CLOCK(tidx), nows); - - while ((dr = _dispatch_timers_heap[tidx].dth_min[DTH_TARGET_ID])) { - DISPATCH_TIMER_ASSERT(dr->du_filter, ==, DISPATCH_EVFILT_TIMER, - "invalid filter"); - DISPATCH_TIMER_ASSERT(dr->du_ident, ==, tidx, "tidx"); - DISPATCH_TIMER_ASSERT(dr->dt_timer.target, !=, 0, "missing target"); - ds = _dispatch_source_from_refs(dr); - if (dr->dt_timer.target > now) { - // Done running timers for now. - break; - } - if (dr->du_fflags & DISPATCH_TIMER_AFTER) { - _dispatch_trace_timer_fire(dr, 1, 1); - _dispatch_source_merge_evt(dr, EV_ONESHOT, 1, 0, 0); - _dispatch_debug("kevent-source[%p]: fired after timer[%p]", ds, dr); - _dispatch_object_debug(ds, "%s", __func__); - continue; - } - - data = os_atomic_load2o(ds, ds_pending_data, relaxed); - if (unlikely(data)) { - // the release barrier is required to make the changes - // to `ds_timer` visible to _dispatch_source_timer_data() - if (os_atomic_cmpxchg2o(ds, ds_pending_data, data, - data | DISPATCH_TIMER_MISSED_MARKER, release)) { - _dispatch_timers_update(dr, DISPATCH_TIMERS_UNREGISTER); - continue; - } - } - - data = _dispatch_source_timer_compute_missed(dr, now, 0); - _dispatch_timers_update(dr, DISPATCH_TIMERS_RETAIN_2); - pending_data = data << 1; - if (!_dispatch_unote_registered(dr) && dr->dt_timer.target < INT64_MAX){ - // if we unregistered because of suspension we have to fake we - // missed events. - pending_data |= DISPATCH_TIMER_MISSED_MARKER; - os_atomic_store2o(ds, ds_pending_data, pending_data, release); - } else { - os_atomic_store2o(ds, ds_pending_data, pending_data, relaxed); - } - _dispatch_trace_timer_fire(dr, data, data); - _dispatch_debug("kevent-source[%p]: fired timer[%p]", ds, dr); - _dispatch_object_debug(ds, "%s", __func__); - dx_wakeup(ds, 0, DISPATCH_WAKEUP_MAKE_DIRTY | DISPATCH_WAKEUP_CONSUME_2); - } -} - -DISPATCH_NOINLINE -static void -_dispatch_timers_run(dispatch_clock_now_cache_t nows) -{ - uint32_t tidx; - for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) { - if (_dispatch_timers_heap[tidx].dth_count) { - _dispatch_timers_run2(nows, tidx); - } - } -} - -#if DISPATCH_HAVE_TIMER_COALESCING -#define DISPATCH_KEVENT_COALESCING_WINDOW_INIT(qos, ms) \ - [DISPATCH_TIMER_QOS_##qos] = 2ull * (ms) * NSEC_PER_MSEC - -static const uint64_t _dispatch_kevent_coalescing_window[] = { - DISPATCH_KEVENT_COALESCING_WINDOW_INIT(NORMAL, 75), -#if DISPATCH_HAVE_TIMER_QOS - DISPATCH_KEVENT_COALESCING_WINDOW_INIT(CRITICAL, 1), - DISPATCH_KEVENT_COALESCING_WINDOW_INIT(BACKGROUND, 100), -#endif -}; -#endif // DISPATCH_HAVE_TIMER_COALESCING - -static inline dispatch_timer_delay_s -_dispatch_timers_get_delay(dispatch_timer_heap_t dth, dispatch_clock_t clock, - uint32_t qos, dispatch_clock_now_cache_t nows) -{ - uint64_t target = dth->dth_target, deadline = dth->dth_deadline; - uint64_t delta = INT64_MAX, dldelta = INT64_MAX; - dispatch_timer_delay_s rc; - - dispatch_assert(target <= deadline); - if (delta == 0 || target >= INT64_MAX) { - goto done; - } - - if (qos < DISPATCH_TIMER_QOS_COUNT && dth->dth_count > 2) { -#if DISPATCH_HAVE_TIMER_COALESCING - // Timer pre-coalescing - // When we have several timers with this target/deadline bracket: - // - // Target window Deadline - // V <-------V - // t1: [...........|.................] - // t2: [......|.......] - // t3: [..|..........] - // t4: | [.............] - // ^ - // Optimal Target - // - // Coalescing works better if the Target is delayed to "Optimal", by - // picking the latest target that isn't too close to the deadline. - uint64_t window = _dispatch_kevent_coalescing_window[qos]; - if (target + window < deadline) { - uint64_t latest = deadline - window; - target = _dispatch_timer_heap_max_target_before(dth, latest); - } -#endif - } - - uint64_t now = _dispatch_time_now_cached(clock, nows); - if (target <= now) { - delta = 0; - dldelta = 0; - goto done; - } - - uint64_t tmp = target - now; - if (clock != DISPATCH_CLOCK_WALL) { - tmp = _dispatch_time_mach2nano(tmp); - } - if (tmp < delta) { - delta = tmp; - } - - tmp = deadline - now; - if (clock != DISPATCH_CLOCK_WALL) { - tmp = _dispatch_time_mach2nano(tmp); - } - if (tmp < dldelta) { - dldelta = tmp; - } - -done: - rc.delay = delta; - rc.leeway = delta < INT64_MAX ? dldelta - delta : INT64_MAX; - return rc; -} - -static bool -_dispatch_timers_program2(dispatch_clock_now_cache_t nows, uint32_t tidx) -{ - uint32_t qos = DISPATCH_TIMER_QOS(tidx); - dispatch_clock_t clock = DISPATCH_TIMER_CLOCK(tidx); - dispatch_timer_heap_t heap = &_dispatch_timers_heap[tidx]; - dispatch_timer_delay_s range; - - range = _dispatch_timers_get_delay(heap, clock, qos, nows); - if (range.delay == 0 || range.delay >= INT64_MAX) { - _dispatch_trace_next_timer_set(NULL, qos); - if (heap->dth_flags & DTH_ARMED) { - _dispatch_event_loop_timer_delete(tidx); - } - return range.delay == 0; - } - - _dispatch_trace_next_timer_set(heap->dth_min[DTH_TARGET_ID], qos); - _dispatch_trace_next_timer_program(range.delay, qos); - _dispatch_event_loop_timer_arm(tidx, range, nows); - return false; -} - -DISPATCH_NOINLINE -static bool -_dispatch_timers_program(dispatch_clock_now_cache_t nows) -{ - bool poll = false; - uint32_t tidx, timerm = _dispatch_timers_processing_mask; - - for (tidx = 0; tidx < DISPATCH_TIMER_COUNT; tidx++) { - if (timerm & (1 << tidx)) { - poll |= _dispatch_timers_program2(nows, tidx); - } - } - return poll; -} - -DISPATCH_NOINLINE -static bool -_dispatch_timers_configure(void) -{ - // Find out if there is a new target/deadline on the timer lists - return _dispatch_timer_heap_has_new_min(_dispatch_timers_heap, - countof(_dispatch_timers_heap), _dispatch_timers_processing_mask); -} - -static inline bool -_dispatch_mgr_timers(void) -{ - dispatch_clock_now_cache_s nows = { }; - bool expired = _dispatch_timers_expired; - if (unlikely(expired)) { - _dispatch_timers_run(&nows); - } - _dispatch_mgr_trace_timers_wakes(); - bool reconfigure = _dispatch_timers_reconfigure; - if (unlikely(reconfigure || expired)) { - if (reconfigure) { - reconfigure = _dispatch_timers_configure(); - _dispatch_timers_reconfigure = false; - } - if (reconfigure || expired) { - expired = _dispatch_timers_expired = _dispatch_timers_program(&nows); - } - _dispatch_timers_processing_mask = 0; - } - return expired; -} - -#pragma mark - -#pragma mark dispatch_mgr - -void -_dispatch_mgr_queue_push(dispatch_queue_t dq, dispatch_object_t dou, - DISPATCH_UNUSED dispatch_qos_t qos) -{ - uint64_t dq_state; - _dispatch_trace_continuation_push(dq, dou._do); - if (unlikely(_dispatch_queue_push_update_tail(dq, dou._do))) { - _dispatch_queue_push_update_head(dq, dou._do); - dq_state = os_atomic_or2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, release); - if (!_dq_state_drain_locked_by_self(dq_state)) { - _dispatch_event_loop_poke(DISPATCH_WLH_MANAGER, 0, 0); - } - } -} - -DISPATCH_NORETURN -void -_dispatch_mgr_queue_wakeup(DISPATCH_UNUSED dispatch_queue_t dq, - DISPATCH_UNUSED dispatch_qos_t qos, - DISPATCH_UNUSED dispatch_wakeup_flags_t flags) -{ - DISPATCH_INTERNAL_CRASH(0, "Don't try to wake up or override the manager"); -} - -#if DISPATCH_USE_MGR_THREAD -DISPATCH_NOINLINE DISPATCH_NORETURN -static void -_dispatch_mgr_invoke(void) -{ -#if DISPATCH_EVENT_BACKEND_KEVENT - dispatch_kevent_s evbuf[DISPATCH_DEFERRED_ITEMS_EVENT_COUNT]; -#endif - dispatch_deferred_items_s ddi = { -#if DISPATCH_EVENT_BACKEND_KEVENT - .ddi_maxevents = DISPATCH_DEFERRED_ITEMS_EVENT_COUNT, - .ddi_eventlist = evbuf, -#endif - }; - bool poll; - - _dispatch_deferred_items_set(&ddi); - for (;;) { - _dispatch_mgr_queue_drain(); - poll = _dispatch_mgr_timers(); - poll = poll || _dispatch_queue_class_probe(&_dispatch_mgr_q); - _dispatch_event_loop_drain(poll ? KEVENT_FLAG_IMMEDIATE : 0); - } -} -#endif // DISPATCH_USE_MGR_THREAD - -DISPATCH_NORETURN -void -_dispatch_mgr_thread(dispatch_queue_t dq DISPATCH_UNUSED, - dispatch_invoke_context_t dic DISPATCH_UNUSED, - dispatch_invoke_flags_t flags DISPATCH_UNUSED) -{ -#if DISPATCH_USE_KEVENT_WORKQUEUE - if (_dispatch_kevent_workqueue_enabled) { - DISPATCH_INTERNAL_CRASH(0, "Manager queue invoked with " - "kevent workqueue enabled"); - } -#endif -#if DISPATCH_USE_MGR_THREAD - _dispatch_queue_set_current(&_dispatch_mgr_q); - _dispatch_mgr_priority_init(); - _dispatch_queue_mgr_lock(&_dispatch_mgr_q); - // never returns, so burn bridges behind us & clear stack 2k ahead - _dispatch_clear_stack(2048); - _dispatch_mgr_invoke(); -#endif -} - -#if DISPATCH_USE_KEVENT_WORKQUEUE - -#define DISPATCH_KEVENT_WORKER_IS_NOT_MANAGER ((dispatch_priority_t)~0u) - -_Static_assert(WORKQ_KEVENT_EVENT_BUFFER_LEN >= - DISPATCH_DEFERRED_ITEMS_EVENT_COUNT, - "our list should not be longer than the kernel's"); - -DISPATCH_ALWAYS_INLINE -static inline dispatch_priority_t -_dispatch_wlh_worker_thread_init(dispatch_wlh_t wlh, - dispatch_deferred_items_t ddi) -{ - dispatch_assert(wlh); - dispatch_priority_t old_dbp; - - pthread_priority_t pp = _dispatch_get_priority(); - if (!(pp & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG)) { - // If this thread does not have the event manager flag set, don't setup - // as the dispatch manager and let the caller know to only process - // the delivered events. - // - // Also add the NEEDS_UNBIND flag so that - // _dispatch_priority_compute_update knows it has to unbind - pp &= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG | ~_PTHREAD_PRIORITY_FLAGS_MASK; - if (wlh == DISPATCH_WLH_ANON) { - pp |= _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG; - } else { - // pthread sets the flag when it is an event delivery thread - // so we need to explicitly clear it - pp &= ~(pthread_priority_t)_PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG; - } - _dispatch_thread_setspecific(dispatch_priority_key, - (void *)(uintptr_t)pp); - if (wlh != DISPATCH_WLH_ANON) { - _dispatch_debug("wlh[%p]: handling events", wlh); - } else { - ddi->ddi_can_stash = true; - } - return DISPATCH_KEVENT_WORKER_IS_NOT_MANAGER; - } - - if ((pp & _PTHREAD_PRIORITY_SCHED_PRI_FLAG) || - !(pp & ~_PTHREAD_PRIORITY_FLAGS_MASK)) { - // When the phtread kext is delivering kevents to us, and pthread - // root queues are in use, then the pthread priority TSD is set - // to a sched pri with the _PTHREAD_PRIORITY_SCHED_PRI_FLAG bit set. - // - // Given that this isn't a valid QoS we need to fixup the TSD, - // and the best option is to clear the qos/priority bits which tells - // us to not do any QoS related calls on this thread. - // - // However, in that case the manager thread is opted out of QoS, - // as far as pthread is concerned, and can't be turned into - // something else, so we can't stash. - pp &= (pthread_priority_t)_PTHREAD_PRIORITY_FLAGS_MASK; - } - // Managers always park without mutating to a regular worker thread, and - // hence never need to unbind from userland, and when draining a manager, - // the NEEDS_UNBIND flag would cause the mutation to happen. - // So we need to strip this flag - pp &= ~(pthread_priority_t)_PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG; - _dispatch_thread_setspecific(dispatch_priority_key, (void *)(uintptr_t)pp); - - // ensure kevents registered from this thread are registered at manager QoS - old_dbp = _dispatch_set_basepri(DISPATCH_PRIORITY_FLAG_MANAGER); - _dispatch_queue_set_current(&_dispatch_mgr_q); - _dispatch_queue_mgr_lock(&_dispatch_mgr_q); - return old_dbp; -} - -DISPATCH_ALWAYS_INLINE DISPATCH_WARN_RESULT -static inline bool -_dispatch_wlh_worker_thread_reset(dispatch_priority_t old_dbp) -{ - bool needs_poll = _dispatch_queue_mgr_unlock(&_dispatch_mgr_q); - _dispatch_reset_basepri(old_dbp); - _dispatch_reset_basepri_override(); - _dispatch_queue_set_current(NULL); - return needs_poll; -} - -DISPATCH_ALWAYS_INLINE -static void -_dispatch_wlh_worker_thread(dispatch_wlh_t wlh, dispatch_kevent_t events, - int *nevents) -{ - _dispatch_introspection_thread_add(); - DISPATCH_PERF_MON_VAR_INIT - - dispatch_deferred_items_s ddi = { - .ddi_eventlist = events, - }; - dispatch_priority_t old_dbp; - - old_dbp = _dispatch_wlh_worker_thread_init(wlh, &ddi); - if (old_dbp == DISPATCH_KEVENT_WORKER_IS_NOT_MANAGER) { - _dispatch_perfmon_start_impl(true); - } else { - dispatch_assert(wlh == DISPATCH_WLH_ANON); - wlh = DISPATCH_WLH_ANON; - } - _dispatch_deferred_items_set(&ddi); - _dispatch_event_loop_merge(events, *nevents); - - if (old_dbp != DISPATCH_KEVENT_WORKER_IS_NOT_MANAGER) { - _dispatch_mgr_queue_drain(); - bool poll = _dispatch_mgr_timers(); - if (_dispatch_wlh_worker_thread_reset(old_dbp)) { - poll = true; - } - if (poll) _dispatch_event_loop_poke(DISPATCH_WLH_MANAGER, 0, 0); - } else if (ddi.ddi_stashed_dou._do) { - _dispatch_debug("wlh[%p]: draining deferred item %p", wlh, - ddi.ddi_stashed_dou._do); - if (wlh == DISPATCH_WLH_ANON) { - dispatch_assert(ddi.ddi_nevents == 0); - _dispatch_deferred_items_set(NULL); - _dispatch_root_queue_drain_deferred_item(&ddi - DISPATCH_PERF_MON_ARGS); - } else { - _dispatch_root_queue_drain_deferred_wlh(&ddi - DISPATCH_PERF_MON_ARGS); - } - } - - _dispatch_deferred_items_set(NULL); - if (old_dbp == DISPATCH_KEVENT_WORKER_IS_NOT_MANAGER && - !ddi.ddi_stashed_dou._do) { - _dispatch_perfmon_end(perfmon_thread_event_no_steal); - } - _dispatch_debug("returning %d deferred kevents", ddi.ddi_nevents); - *nevents = ddi.ddi_nevents; -} - -DISPATCH_NOINLINE -void -_dispatch_kevent_worker_thread(dispatch_kevent_t *events, int *nevents) -{ - if (!events && !nevents) { - // events for worker thread request have already been delivered earlier - return; - } - if (!dispatch_assume(*nevents && *events)) return; - _dispatch_adopt_wlh_anon(); - _dispatch_wlh_worker_thread(DISPATCH_WLH_ANON, *events, nevents); - _dispatch_reset_wlh(); -} - - -#endif // DISPATCH_USE_KEVENT_WORKQUEUE #pragma mark - #pragma mark dispatch_source_debug @@ -2512,15 +1379,17 @@ _dispatch_source_debug_attr(dispatch_source_t ds, char* buf, size_t bufsiz) { dispatch_queue_t target = ds->do_targetq; dispatch_source_refs_t dr = ds->ds_refs; + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds); + dispatch_unote_state_t du_state = _dispatch_unote_state(dr); return dsnprintf(buf, bufsiz, "target = %s[%p], ident = 0x%x, " "mask = 0x%x, pending_data = 0x%llx, registered = %d, " - "armed = %d, deleted = %d%s, canceled = %d, ", + "armed = %d, %s%s%s", target && target->dq_label ? target->dq_label : "", target, - dr->du_ident, dr->du_fflags, (unsigned long long)ds->ds_pending_data, - ds->ds_is_installed, (bool)(ds->dq_atomic_flags & DSF_ARMED), - (bool)(ds->dq_atomic_flags & DSF_DELETED), - (ds->dq_atomic_flags & DSF_DEFERRED_DELETE) ? " (pending)" : "", - (bool)(ds->dq_atomic_flags & DSF_CANCELED)); + dr->du_ident, dr->du_fflags, (unsigned long long)dr->ds_pending_data, + _du_state_registered(du_state), _du_state_armed(du_state), + (dqf & DSF_CANCELED) ? "cancelled, " : "", + (dqf & DSF_NEEDS_EVENT) ? "needs-event, " : "", + (dqf & DSF_DELETED) ? "deleted, " : ""); } static size_t @@ -2531,7 +1400,7 @@ _dispatch_timer_debug_attr(dispatch_source_t ds, char* buf, size_t bufsiz) ", interval = 0x%llx, flags = 0x%x }, ", (unsigned long long)dr->dt_timer.target, (unsigned long long)dr->dt_timer.deadline, - (unsigned long long)dr->dt_timer.interval, dr->du_fflags); + (unsigned long long)dr->dt_timer.interval, dr->du_timer_flags); } size_t @@ -2540,7 +1409,7 @@ _dispatch_source_debug(dispatch_source_t ds, char *buf, size_t bufsiz) dispatch_source_refs_t dr = ds->ds_refs; size_t offset = 0; offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", - dx_kind(ds), ds); + _dispatch_object_class_name(ds), ds); offset += _dispatch_object_debug_attr(ds, &buf[offset], bufsiz - offset); offset += _dispatch_source_debug_attr(ds, &buf[offset], bufsiz - offset); if (dr->du_is_timer) { @@ -2548,6 +1417,6 @@ _dispatch_source_debug(dispatch_source_t ds, char *buf, size_t bufsiz) } offset += dsnprintf(&buf[offset], bufsiz - offset, "kevent = %p%s, " "filter = %s }", dr, dr->du_is_direct ? " (direct)" : "", - dr->du_type->dst_kind); + dux_type(dr)->dst_kind); return offset; } diff --git a/src/source_internal.h b/src/source_internal.h index 55b81e787..52ec8fd1c 100644 --- a/src/source_internal.h +++ b/src/source_internal.h @@ -32,100 +32,40 @@ #include // for HeaderDoc #endif -enum { - /* DISPATCH_TIMER_STRICT 0x1 */ - /* DISPATCH_TIMER_BACKGROUND = 0x2, */ - DISPATCH_TIMER_CLOCK_MACH = 0x4, - DISPATCH_TIMER_INTERVAL = 0x8, - DISPATCH_TIMER_AFTER = 0x10, - /* DISPATCH_INTERVAL_UI_ANIMATION = 0x20 */ -}; +_OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(dispatch_source, dispatch_object) +DISPATCH_CLASS_DECL_BARE(source, QUEUE); -DISPATCH_ALWAYS_INLINE -static inline unsigned int -_dispatch_source_timer_idx(dispatch_unote_t du) -{ - uint32_t clock, qos = 0, fflags = du._dt->du_fflags; - - dispatch_assert(DISPATCH_CLOCK_MACH == 1); - dispatch_assert(DISPATCH_CLOCK_WALL == 0); - clock = (fflags & DISPATCH_TIMER_CLOCK_MACH) / DISPATCH_TIMER_CLOCK_MACH; - -#if DISPATCH_HAVE_TIMER_QOS - dispatch_assert(DISPATCH_TIMER_STRICT == DISPATCH_TIMER_QOS_CRITICAL); - dispatch_assert(DISPATCH_TIMER_BACKGROUND == DISPATCH_TIMER_QOS_BACKGROUND); - qos = fflags & (DISPATCH_TIMER_STRICT | DISPATCH_TIMER_BACKGROUND); - // flags are normalized so this should never happen - dispatch_assert(qos < DISPATCH_TIMER_QOS_COUNT); -#endif - - return DISPATCH_TIMER_INDEX(clock, qos); -} - -#define _DISPATCH_SOURCE_HEADER(refs) \ - DISPATCH_QUEUE_HEADER(refs); \ - unsigned int \ +#define DISPATCH_SOURCE_CLASS_HEADER(x) \ + DISPATCH_LANE_CLASS_HEADER(x); \ + uint16_t \ + /* set under the drain lock */ \ ds_is_installed:1, \ - dm_needs_mgr:1, \ dm_connect_handler_called:1, \ - dm_uninstalled:1, \ dm_cancel_handler_called:1, \ - dm_is_xpc:1 - -#define DISPATCH_SOURCE_HEADER(refs) \ - struct dispatch_source_s _as_ds[0]; \ - _DISPATCH_SOURCE_HEADER(refs) - -DISPATCH_CLASS_DECL_BARE(source); -_OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(dispatch_source, dispatch_object); + dm_is_xpc:1, \ + __ds_flags_pad : 12; \ + uint16_t __dq_flags_separation[0]; \ + uint16_t \ + /* set under the send queue lock */ \ + dm_needs_mgr:1, \ + dm_disconnected:1, \ + __dm_flags_pad : 14 -#ifndef __cplusplus struct dispatch_source_s { - _DISPATCH_SOURCE_HEADER(source); - uint64_t ds_data DISPATCH_ATOMIC64_ALIGN; - uint64_t ds_pending_data DISPATCH_ATOMIC64_ALIGN; + DISPATCH_SOURCE_CLASS_HEADER(source); } DISPATCH_ATOMIC64_ALIGN; +dispatch_assert_valid_lane_type(dispatch_source_s); +dispatch_static_assert(sizeof(struct dispatch_source_s) <= 128); -// Extracts source data from the ds_data field -#define DISPATCH_SOURCE_GET_DATA(d) ((d) & 0xFFFFFFFF) - -// Extracts status from the ds_data field -#define DISPATCH_SOURCE_GET_STATUS(d) ((d) >> 32) - -// Combine data and status for the ds_data field -#define DISPATCH_SOURCE_COMBINE_DATA_AND_STATUS(data, status) \ - ((((uint64_t)(status)) << 32) | (data)) - -#endif // __cplusplus - -void _dispatch_source_refs_register(dispatch_source_t ds, - dispatch_wlh_t wlh, dispatch_priority_t bp); -void _dispatch_source_refs_unregister(dispatch_source_t ds, uint32_t options); void _dispatch_source_xref_dispose(dispatch_source_t ds); void _dispatch_source_dispose(dispatch_source_t ds, bool *allow_free); -void _dispatch_source_finalize_activation(dispatch_source_t ds, - bool *allow_resume); +void _dispatch_source_activate(dispatch_source_t ds, bool *allow_resume); void _dispatch_source_invoke(dispatch_source_t ds, dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); void _dispatch_source_wakeup(dispatch_source_t ds, dispatch_qos_t qos, dispatch_wakeup_flags_t flags); void _dispatch_source_merge_evt(dispatch_unote_t du, uint32_t flags, - uintptr_t data, uintptr_t status, pthread_priority_t pp); + uintptr_t data, pthread_priority_t pp); size_t _dispatch_source_debug(dispatch_source_t ds, char* buf, size_t bufsiz); -DISPATCH_EXPORT // for firehose server -void _dispatch_source_merge_data(dispatch_source_t ds, pthread_priority_t pp, - unsigned long val); - -void _dispatch_mgr_queue_push(dispatch_queue_t dq, dispatch_object_t dou, - dispatch_qos_t qos); -void _dispatch_mgr_queue_wakeup(dispatch_queue_t dq, dispatch_qos_t qos, - dispatch_wakeup_flags_t flags); -void _dispatch_mgr_thread(dispatch_queue_t dq, dispatch_invoke_context_t dic, - dispatch_invoke_flags_t flags); -#if DISPATCH_USE_KEVENT_WORKQUEUE -void _dispatch_kevent_worker_thread(dispatch_kevent_t *events, - int *nevents); -#endif // DISPATCH_USE_KEVENT_WORKQUEUE - #endif /* __DISPATCH_SOURCE_INTERNAL__ */ diff --git a/src/swift/Data.swift b/src/swift/Data.swift index 1e7350463..5ad48aa79 100644 --- a/src/swift/Data.swift +++ b/src/swift/Data.swift @@ -318,6 +318,7 @@ public struct DispatchData : RandomAccessCollection { } public struct DispatchDataIterator : IteratorProtocol, Sequence { + public typealias Element = UInt8 /// Create an iterator over the given DispatchData public init(_data: DispatchData) { diff --git a/src/swift/Time.swift b/src/swift/Time.swift index d7d49c96b..538cd42fb 100644 --- a/src/swift/Time.swift +++ b/src/swift/Time.swift @@ -2,7 +2,7 @@ // // This source file is part of the Swift.org open source project // -// Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors +// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // // See http://swift.org/LICENSE.txt for license information @@ -59,9 +59,13 @@ public struct DispatchTime : Comparable { public init(uptimeNanoseconds: UInt64) { var rawValue = uptimeNanoseconds #if HAVE_MACH - if (DispatchTime.timebaseInfo.numer != DispatchTime.timebaseInfo.denom) { - rawValue = (rawValue * UInt64(DispatchTime.timebaseInfo.denom) - + UInt64(DispatchTime.timebaseInfo.numer - 1)) / UInt64(DispatchTime.timebaseInfo.numer) + // UInt64.max means distantFuture. Do not try to scale it. + if rawValue != UInt64.max && DispatchTime.timebaseInfo.numer != DispatchTime.timebaseInfo.denom { + var (result, overflow) = rawValue.multipliedReportingOverflow(by: UInt64(DispatchTime.timebaseInfo.denom)) + if !overflow { + (result, overflow) = result.addingReportingOverflow(UInt64(DispatchTime.timebaseInfo.numer - 1)) + } + rawValue = overflow ? UInt64.max : result / UInt64(DispatchTime.timebaseInfo.numer) } #endif self.rawValue = dispatch_time_t(rawValue) @@ -70,21 +74,26 @@ public struct DispatchTime : Comparable { public var uptimeNanoseconds: UInt64 { var result = self.rawValue #if HAVE_MACH - if (DispatchTime.timebaseInfo.numer != DispatchTime.timebaseInfo.denom) { - result = result * UInt64(DispatchTime.timebaseInfo.numer) / UInt64(DispatchTime.timebaseInfo.denom) + var overflow: Bool + + // UInt64.max means distantFuture. Do not try to scale it. + if rawValue != UInt64.max && DispatchTime.timebaseInfo.numer != DispatchTime.timebaseInfo.denom { + (result, overflow) = result.multipliedReportingOverflow(by: UInt64(DispatchTime.timebaseInfo.numer)) + result = overflow ? UInt64.max : result / UInt64(DispatchTime.timebaseInfo.denom) } #endif return result } } -public func <(a: DispatchTime, b: DispatchTime) -> Bool { - if a.rawValue == ~0 || b.rawValue == ~0 { return false } - return a.rawValue < b.rawValue -} +extension DispatchTime { + public static func < (a: DispatchTime, b: DispatchTime) -> Bool { + return a.rawValue < b.rawValue + } -public func ==(a: DispatchTime, b: DispatchTime) -> Bool { - return a.rawValue == b.rawValue + public static func ==(a: DispatchTime, b: DispatchTime) -> Bool { + return a.rawValue == b.rawValue + } } public struct DispatchWallTime : Comparable { @@ -106,19 +115,56 @@ public struct DispatchWallTime : Comparable { } } -public func <(a: DispatchWallTime, b: DispatchWallTime) -> Bool { - if b.rawValue == ~0 { - return a.rawValue != ~0 - } else if a.rawValue == ~0 { - return false +extension DispatchWallTime { + public static func <(a: DispatchWallTime, b: DispatchWallTime) -> Bool { + let negativeOne: dispatch_time_t = ~0 + if b.rawValue == negativeOne { + return a.rawValue != negativeOne + } else if a.rawValue == negativeOne { + return false + } + return -Int64(bitPattern: a.rawValue) < -Int64(bitPattern: b.rawValue) + } + + public static func ==(a: DispatchWallTime, b: DispatchWallTime) -> Bool { + return a.rawValue == b.rawValue + } +} + +// Returns m1 * m2, clamped to the range [Int64.min, Int64.max]. +// Because of the way this function is used, we can always assume +// that m2 > 0. +private func clampedInt64Product(_ m1: Int64, _ m2: Int64) -> Int64 { + assert(m2 > 0, "multiplier must be positive") + let (result, overflow) = m1.multipliedReportingOverflow(by: m2) + if overflow { + return m1 > 0 ? Int64.max : Int64.min } - return -Int64(bitPattern: a.rawValue) < -Int64(bitPattern: b.rawValue) + return result } -public func ==(a: DispatchWallTime, b: DispatchWallTime) -> Bool { - return a.rawValue == b.rawValue +// Returns its argument clamped to the range [Int64.min, Int64.max]. +private func toInt64Clamped(_ value: Double) -> Int64 { + if value.isNaN { return Int64.max } + if value >= Double(Int64.max) { return Int64.max } + if value <= Double(Int64.min) { return Int64.min } + return Int64(value) } +/// Represents a time interval that can be used as an offset from a `DispatchTime` +/// or `DispatchWallTime`. +/// +/// For example: +/// let inOneSecond = DispatchTime.now() + DispatchTimeInterval.seconds(1) +/// +/// If the requested time interval is larger then the internal representation +/// permits, the result of adding it to a `DispatchTime` or `DispatchWallTime` +/// is `DispatchTime.distantFuture` and `DispatchWallTime.distantFuture` +/// respectively. Such time intervals compare as equal: +/// +/// let t1 = DispatchTimeInterval.seconds(Int.max) +/// let t2 = DispatchTimeInterval.milliseconds(Int.max) +/// let result = t1 == t2 // true public enum DispatchTimeInterval { case seconds(Int) case milliseconds(Int) @@ -129,9 +175,9 @@ public enum DispatchTimeInterval { internal var rawValue: Int64 { switch self { - case .seconds(let s): return Int64(s) * Int64(NSEC_PER_SEC) - case .milliseconds(let ms): return Int64(ms) * Int64(NSEC_PER_MSEC) - case .microseconds(let us): return Int64(us) * Int64(NSEC_PER_USEC) + case .seconds(let s): return clampedInt64Product(Int64(s), Int64(NSEC_PER_SEC)) + case .milliseconds(let ms): return clampedInt64Product(Int64(ms), Int64(NSEC_PER_MSEC)) + case .microseconds(let us): return clampedInt64Product(Int64(us), Int64(NSEC_PER_USEC)) case .nanoseconds(let ns): return Int64(ns) case .never: return Int64.max } @@ -158,16 +204,12 @@ public func -(time: DispatchTime, interval: DispatchTimeInterval) -> DispatchTim } public func +(time: DispatchTime, seconds: Double) -> DispatchTime { - let interval = seconds * Double(NSEC_PER_SEC) - let t = CDispatch.dispatch_time(time.rawValue, - interval.isInfinite || interval.isNaN ? Int64.max : Int64(interval)) + let t = CDispatch.dispatch_time(time.rawValue, toInt64Clamped(seconds * Double(NSEC_PER_SEC))); return DispatchTime(rawValue: t) } public func -(time: DispatchTime, seconds: Double) -> DispatchTime { - let interval = -seconds * Double(NSEC_PER_SEC) - let t = CDispatch.dispatch_time(time.rawValue, - interval.isInfinite || interval.isNaN ? Int64.min : Int64(interval)) + let t = CDispatch.dispatch_time(time.rawValue, toInt64Clamped(-seconds * Double(NSEC_PER_SEC))); return DispatchTime(rawValue: t) } @@ -182,15 +224,11 @@ public func -(time: DispatchWallTime, interval: DispatchTimeInterval) -> Dispatc } public func +(time: DispatchWallTime, seconds: Double) -> DispatchWallTime { - let interval = seconds * Double(NSEC_PER_SEC) - let t = CDispatch.dispatch_time(time.rawValue, - interval.isInfinite || interval.isNaN ? Int64.max : Int64(interval)) + let t = CDispatch.dispatch_time(time.rawValue, toInt64Clamped(seconds * Double(NSEC_PER_SEC))); return DispatchWallTime(rawValue: t) } public func -(time: DispatchWallTime, seconds: Double) -> DispatchWallTime { - let interval = -seconds * Double(NSEC_PER_SEC) - let t = CDispatch.dispatch_time(time.rawValue, - interval.isInfinite || interval.isNaN ? Int64.min : Int64(interval)) + let t = CDispatch.dispatch_time(time.rawValue, toInt64Clamped(-seconds * Double(NSEC_PER_SEC))); return DispatchWallTime(rawValue: t) } diff --git a/src/time.c b/src/time.c index 5b0bab0bf..b70f81343 100644 --- a/src/time.c +++ b/src/time.c @@ -26,7 +26,6 @@ typedef struct _dispatch_host_time_data_s { bool ratio_1_to_1; } _dispatch_host_time_data_s; -DISPATCH_CACHELINE_ALIGN static _dispatch_host_time_data_s _dispatch_host_time_data; uint64_t (*_dispatch_host_time_mach2nano)(uint64_t machtime); @@ -96,39 +95,53 @@ dispatch_time(dispatch_time_t inval, int64_t delta) if (inval == DISPATCH_TIME_FOREVER) { return DISPATCH_TIME_FOREVER; } - if ((int64_t)inval < 0) { + + dispatch_clock_t clock; + uint64_t value; + _dispatch_time_to_clock_and_value(inval, &clock, &value); + if (value == DISPATCH_TIME_FOREVER) { + // Out-of-range for this clock. + return value; + } + if (clock == DISPATCH_CLOCK_WALL) { // wall clock + offset = (uint64_t)delta; if (delta >= 0) { - offset = (uint64_t)delta; - if ((int64_t)(inval -= offset) >= 0) { + if ((int64_t)(value += offset) <= 0) { return DISPATCH_TIME_FOREVER; // overflow } - return inval; } else { - offset = (uint64_t)-delta; - if ((int64_t)(inval += offset) >= -1) { - // -1 is special == DISPATCH_TIME_FOREVER == forever - return (dispatch_time_t)-2ll; // underflow + if ((int64_t)(value += offset) < 1) { + // -1 is special == DISPATCH_TIME_FOREVER == forever, so + // return -2 (after conversion to dispatch_time_t) instead. + value = 2; // underflow. } - return inval; } + return _dispatch_clock_and_value_to_time(DISPATCH_CLOCK_WALL, value); } - // mach clock - if (inval == 0) { - inval = _dispatch_absolute_time(); + + // up time or monotonic time. "value" has the clock type removed, + // so the test against DISPATCH_TIME_NOW is correct for either clock. + if (value == DISPATCH_TIME_NOW) { + if (clock == DISPATCH_CLOCK_UPTIME) { + value = _dispatch_uptime(); + } else { + dispatch_assert(clock == DISPATCH_CLOCK_MONOTONIC); + value = _dispatch_monotonic_time(); + } } if (delta >= 0) { offset = _dispatch_time_nano2mach((uint64_t)delta); - if ((int64_t)(inval += offset) <= 0) { + if ((int64_t)(value += offset) <= 0) { return DISPATCH_TIME_FOREVER; // overflow } - return inval; + return _dispatch_clock_and_value_to_time(clock, value); } else { offset = _dispatch_time_nano2mach((uint64_t)-delta); - if ((int64_t)(inval -= offset) < 1) { - return 1; // underflow + if ((int64_t)(value -= offset) < 1) { + return _dispatch_clock_and_value_to_time(clock, 1); // underflow } - return inval; + return _dispatch_clock_and_value_to_time(clock, value); } } @@ -156,16 +169,25 @@ _dispatch_timeout(dispatch_time_t when) if (when == DISPATCH_TIME_FOREVER) { return DISPATCH_TIME_FOREVER; } - if (when == 0) { + if (when == DISPATCH_TIME_NOW) { return 0; } - if ((int64_t)when < 0) { - when = (dispatch_time_t)-(int64_t)when; + + dispatch_clock_t clock; + uint64_t value; + _dispatch_time_to_clock_and_value(when, &clock, &value); + if (clock == DISPATCH_CLOCK_WALL) { now = _dispatch_get_nanoseconds(); - return now >= when ? 0 : when - now; + return now >= value ? 0 : value - now; + } else { + if (clock == DISPATCH_CLOCK_UPTIME) { + now = _dispatch_uptime(); + } else { + dispatch_assert(clock == DISPATCH_CLOCK_MONOTONIC); + now = _dispatch_monotonic_time(); + } + return now >= value ? 0 : _dispatch_time_mach2nano(value - now); } - now = _dispatch_absolute_time(); - return now >= when ? 0 : _dispatch_time_mach2nano(when - now); } uint64_t @@ -178,5 +200,7 @@ _dispatch_time_nanoseconds_since_epoch(dispatch_time_t when) // time in nanoseconds since the POSIX epoch already return (uint64_t)-(int64_t)when; } + + // Up time or monotonic time. return _dispatch_get_nanoseconds() + _dispatch_timeout(when); } diff --git a/src/trace.h b/src/trace.h index c670f60b7..ed69e1b56 100644 --- a/src/trace.h +++ b/src/trace.h @@ -31,8 +31,8 @@ #if DISPATCH_USE_DTRACE_INTROSPECTION #define _dispatch_trace_callout(_c, _f, _dcc) do { \ - if (slowpath(DISPATCH_CALLOUT_ENTRY_ENABLED()) || \ - slowpath(DISPATCH_CALLOUT_RETURN_ENABLED())) { \ + if (unlikely(DISPATCH_CALLOUT_ENTRY_ENABLED() || \ + DISPATCH_CALLOUT_RETURN_ENABLED())) { \ dispatch_queue_t _dq = _dispatch_queue_get_current(); \ const char *_label = _dq && _dq->dq_label ? _dq->dq_label : ""; \ dispatch_function_t _func = (dispatch_function_t)(_f); \ @@ -75,6 +75,24 @@ _dispatch_trace_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) #define _dispatch_client_callout2 _dispatch_trace_client_callout2 #endif // DISPATCH_USE_DTRACE_INTROSPECTION || DISPATCH_INTROSPECTION +#ifdef _COMM_PAGE_KDEBUG_ENABLE +#define DISPATCH_KTRACE_ENABLED \ + (*(volatile uint32_t *)_COMM_PAGE_KDEBUG_ENABLE != 0) + +#if DISPATCH_INTROSPECTION +#define _dispatch_only_if_ktrace_enabled(...) \ + if (unlikely(DISPATCH_KTRACE_ENABLED)) ({ __VA_ARGS__; }) +#else +#define _dispatch_only_if_ktrace_enabled(...) (void)0 +#endif /* DISPATCH_INTROSPECTION */ + +#else /* _COMM_PAGE_KDEBUG_ENABLE */ + +#define DISPATCH_KTRACE_ENABLED 0 +#define _dispatch_only_if_ktrace_enabled(...) (void)0 +#endif /* _COMM_PAGE_KDEBUG_ENABLE */ + + #if DISPATCH_USE_DTRACE_INTROSPECTION #define _dispatch_trace_continuation(_q, _o, _t) do { \ dispatch_queue_t _dq = (_q); \ @@ -85,25 +103,25 @@ _dispatch_trace_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) dispatch_function_t _func; \ void *_ctxt; \ if (_dispatch_object_has_vtable(_do)) { \ - _kind = (char*)dx_kind(_do); \ - if ((dx_type(_do) & _DISPATCH_META_TYPE_MASK) == \ - _DISPATCH_SOURCE_TYPE && (_dq) != &_dispatch_mgr_q) { \ + _kind = (char*)_dispatch_object_class_name(_do); \ + if ((dx_metatype(_do) == _DISPATCH_SOURCE_TYPE) && \ + _dq != _dispatch_mgr_q._as_dq) { \ dispatch_source_t _ds = (dispatch_source_t)_do; \ _dc = os_atomic_load(&_ds->ds_refs->ds_handler[ \ DS_EVENT_HANDLER], relaxed); \ _func = _dc ? _dc->dc_func : NULL; \ _ctxt = _dc ? _dc->dc_ctxt : NULL; \ } else { \ - _func = (dispatch_function_t)_dispatch_queue_invoke; \ + _func = (dispatch_function_t)_dispatch_lane_invoke; \ _ctxt = _do->do_ctxt; \ } \ } else { \ _dc = (void*)_do; \ _ctxt = _dc->dc_ctxt; \ - if (_dc->dc_flags & DISPATCH_OBJ_SYNC_WAITER_BIT) { \ + if (_dc->dc_flags & DC_FLAG_SYNC_WAITER) { \ _kind = "semaphore"; \ _func = (dispatch_function_t)dispatch_semaphore_signal; \ - } else if (_dc->dc_flags & DISPATCH_OBJ_BLOCK_BIT) { \ + } else if (_dc->dc_flags & DC_FLAG_BLOCK) { \ _kind = "block"; \ _func = _dispatch_Block_invoke(_dc->dc_ctxt); \ } else { \ @@ -121,62 +139,246 @@ _dispatch_trace_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) #endif // DISPATCH_USE_DTRACE_INTROSPECTION || DISPATCH_INTROSPECTION #if DISPATCH_USE_DTRACE_INTROSPECTION || DISPATCH_INTROSPECTION + +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_class_t +_dispatch_trace_queue_create(dispatch_queue_class_t dqu) +{ + _dispatch_only_if_ktrace_enabled({ + uint64_t dq_label[4] = {0}; // So that we get the right null termination + dispatch_queue_t dq = dqu._dq; + strncpy((char *)dq_label, (char *)dq->dq_label ?: "", sizeof(dq_label)); + + _dispatch_ktrace2(DISPATCH_QOS_TRACE_queue_creation_start, + dq->dq_serialnum, + _dispatch_priority_to_pp_prefer_fallback(dq->dq_priority)); + + _dispatch_ktrace4(DISPATCH_QOS_TRACE_queue_creation_end, + dq_label[0], dq_label[1], dq_label[2], dq_label[3]); + }); + + return _dispatch_introspection_queue_create(dqu); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_trace_queue_dispose(dispatch_queue_class_t dqu) +{ + _dispatch_ktrace1(DISPATCH_QOS_TRACE_queue_dispose, (dqu._dq)->dq_serialnum); + _dispatch_introspection_queue_dispose(dqu); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_trace_source_dispose(dispatch_source_t ds) +{ + _dispatch_ktrace1(DISPATCH_QOS_TRACE_src_dispose, (uintptr_t)ds); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_trace_block_create_with_voucher_and_priority(dispatch_block_t db, + void *func, dispatch_block_flags_t original_flags, + pthread_priority_t original_priority, + pthread_priority_t thread_prio, pthread_priority_t final_block_prio) +{ + _dispatch_ktrace4(DISPATCH_QOS_TRACE_private_block_creation, + (uintptr_t)db, + (uintptr_t)func, + BITPACK_UINT32_PAIR(original_flags, original_priority), + BITPACK_UINT32_PAIR(thread_prio, final_block_prio)); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_trace_firehose_reserver_gave_up(uint8_t stream, uint8_t ref, + bool waited, uint64_t old_state, uint64_t new_state) +{ + uint64_t first = ((uint64_t)ref << 8) | (uint64_t)stream; + uint64_t second = waited; + _dispatch_ktrace4(DISPATCH_FIREHOSE_TRACE_reserver_gave_up, first, second, + old_state, new_state); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_trace_firehose_reserver_wait(uint8_t stream, uint8_t ref, + bool waited, uint64_t old_state, uint64_t new_state, bool reliable) +{ + uint64_t first = ((uint64_t)ref << 8) | (uint64_t)stream; + uint64_t second = ((uint64_t)reliable << 1) | waited; + _dispatch_ktrace4(DISPATCH_FIREHOSE_TRACE_reserver_wait, first, second, + old_state, new_state); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_trace_firehose_allocator(uint64_t ask0, uint64_t ask1, + uint64_t old_state, uint64_t new_state) +{ + _dispatch_ktrace4(DISPATCH_FIREHOSE_TRACE_allocator, ask0, ask1, old_state, + new_state); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_trace_firehose_wait_for_logd(uint8_t stream, uint64_t timestamp, + uint64_t old_state, uint64_t new_state) +{ + _dispatch_ktrace4(DISPATCH_FIREHOSE_TRACE_wait_for_logd, stream, timestamp, + old_state, new_state); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_trace_firehose_chunk_install(uint64_t ask0, uint64_t ask1, + uint64_t old_state, uint64_t new_state) +{ + _dispatch_ktrace4(DISPATCH_FIREHOSE_TRACE_chunk_install, ask0, ask1, + old_state, new_state); +} + +/* Implemented in introspection.c */ +void +_dispatch_trace_item_push_internal(dispatch_queue_t dq, dispatch_object_t dou); + +#define _dispatch_trace_item_push_inline(...) \ + _dispatch_only_if_ktrace_enabled({ \ + _dispatch_trace_item_push_internal(__VA_ARGS__); \ + }) + DISPATCH_ALWAYS_INLINE static inline void -_dispatch_trace_root_queue_push_list(dispatch_queue_t dq, - dispatch_object_t _head, dispatch_object_t _tail, int n) +_dispatch_trace_item_push_list(dispatch_queue_global_t dq, + dispatch_object_t _head, dispatch_object_t _tail) { - if (slowpath(DISPATCH_QUEUE_PUSH_ENABLED())) { + if (unlikely(DISPATCH_QUEUE_PUSH_ENABLED() || DISPATCH_KTRACE_ENABLED)) { struct dispatch_object_s *dou = _head._do; do { - _dispatch_trace_continuation(dq, dou, DISPATCH_QUEUE_PUSH); + if (unlikely(DISPATCH_QUEUE_PUSH_ENABLED())) { + _dispatch_trace_continuation(dq->_as_dq, dou, DISPATCH_QUEUE_PUSH); + } + + _dispatch_trace_item_push_inline(dq->_as_dq, dou); } while (dou != _tail._do && (dou = dou->do_next)); } _dispatch_introspection_queue_push_list(dq, _head, _tail); - _dispatch_root_queue_push_inline(dq, _head, _tail, n); } DISPATCH_ALWAYS_INLINE static inline void -_dispatch_trace_queue_push_inline(dispatch_queue_t dq, dispatch_object_t _tail, - dispatch_qos_t qos) +_dispatch_trace_item_push(dispatch_queue_class_t dqu, dispatch_object_t _tail) { - if (slowpath(DISPATCH_QUEUE_PUSH_ENABLED())) { - struct dispatch_object_s *dou = _tail._do; - _dispatch_trace_continuation(dq, dou, DISPATCH_QUEUE_PUSH); + if (unlikely(DISPATCH_QUEUE_PUSH_ENABLED())) { + _dispatch_trace_continuation(dqu._dq, _tail._do, DISPATCH_QUEUE_PUSH); } - _dispatch_introspection_queue_push(dq, _tail); - _dispatch_queue_push_inline(dq, _tail, qos); + + _dispatch_trace_item_push_inline(dqu._dq, _tail._do); + _dispatch_introspection_queue_push(dqu, _tail); } +/* Implemented in introspection.c */ +void +_dispatch_trace_item_pop_internal(dispatch_queue_t dq, dispatch_object_t dou); + +#define _dispatch_trace_item_pop_inline(...) \ + _dispatch_only_if_ktrace_enabled({ \ + _dispatch_trace_item_pop_internal(__VA_ARGS__); \ + }) + DISPATCH_ALWAYS_INLINE static inline void -_dispatch_trace_continuation_push(dispatch_queue_t dq, dispatch_object_t _tail) +_dispatch_trace_item_pop(dispatch_queue_class_t dqu, dispatch_object_t dou) { - if (slowpath(DISPATCH_QUEUE_PUSH_ENABLED())) { - struct dispatch_object_s *dou = _tail._do; - _dispatch_trace_continuation(dq, dou, DISPATCH_QUEUE_PUSH); + if (unlikely(DISPATCH_QUEUE_POP_ENABLED())) { + _dispatch_trace_continuation(dqu._dq, dou._do, DISPATCH_QUEUE_POP); } - _dispatch_introspection_queue_push(dq, _tail); + + _dispatch_trace_item_pop_inline(dqu._dq, dou); + _dispatch_introspection_queue_pop(dqu, dou); } -#define _dispatch_root_queue_push_inline _dispatch_trace_root_queue_push_list -#define _dispatch_queue_push_inline _dispatch_trace_queue_push_inline +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_trace_item_complete_inline(dispatch_object_t dou) +{ + _dispatch_ktrace1(DISPATCH_QOS_TRACE_queue_item_complete, dou._do_value); +} DISPATCH_ALWAYS_INLINE static inline void -_dispatch_trace_continuation_pop(dispatch_queue_t dq, dispatch_object_t dou) +_dispatch_trace_item_complete(dispatch_object_t dou) { - if (slowpath(DISPATCH_QUEUE_POP_ENABLED())) { - _dispatch_trace_continuation(dq, dou._do, DISPATCH_QUEUE_POP); - } - _dispatch_introspection_queue_pop(dq, dou); + _dispatch_trace_item_complete_inline(dou); + _dispatch_introspection_queue_item_complete(dou); +} + +DISPATCH_ALWAYS_INLINE +static inline struct dispatch_object_s * +_dispatch_trace_item_sync_push_pop(dispatch_queue_class_t dqu, + void *ctx, dispatch_function_t f, uintptr_t dc_flags) +{ + // No need to add tracing here since the introspection calls out to + // _trace_item_push and _trace_item_pop + return _dispatch_introspection_queue_fake_sync_push_pop(dqu._dq, ctx, + f, dc_flags); } + +/* Implemented in introspection.c */ +void +_dispatch_trace_source_callout_entry_internal(dispatch_source_t ds, long kind, + dispatch_queue_t dq, dispatch_continuation_t dc); + +#define _dispatch_trace_source_callout_entry(...) \ + _dispatch_only_if_ktrace_enabled({ \ + _dispatch_trace_source_callout_entry_internal(__VA_ARGS__); \ + }) + +#define _dispatch_trace_runtime_event(evt, ptr, value) \ + _dispatch_introspection_runtime_event(\ + dispatch_introspection_runtime_event_##evt, ptr, value) + +#define DISPATCH_TRACE_ARG(arg) , arg #else -#define _dispatch_trace_continuation_push(dq, dou) \ +#define _dispatch_trace_queue_create _dispatch_introspection_queue_create +#define _dispatch_trace_queue_dispose _dispatch_introspection_queue_dispose +#define _dispatch_trace_source_dispose(ds) ((void)0) +#define _dispatch_trace_block_create_with_voucher_and_priority(_db, _func, \ + _flags, _pri, _tpri, _bpri) \ + do { (void)_db; (void)_func; (void) _flags; (void) _pri; (void) _tpri; \ + (void) _bpri; } while (0) +#define _dispatch_trace_firehose_reserver_gave_up(stream, ref, waited, \ + old_state, new_state) \ + do { (void)(stream); (void)(ref); (void)(waited); (void)(old_state); \ + (void)(new_state); } while (0) +#define _dispatch_trace_firehose_reserver_wait(stream, ref, waited, \ + old_state, new_state, reliable) \ + do { (void)(stream); (void)(ref); (void)(waited); (void)(old_state); \ + (void)(new_state); (void)(reliable); } while (0) +#define _dispatch_trace_firehose_allocator(ask0, ask1, old_state, new_state) \ + do { (void)(ask0); (void)(ask1); (void)(old_state); \ + (void)(new_state); } while (0) +#define _dispatch_trace_firehose_wait_for_logd(stream, timestamp, old_state, \ + new_state) \ + do { (void)(stream); (void)(timestamp); (void)(old_state); \ + (void)(new_state); } while (0) +#define _dispatch_trace_firehose_chunk_install(ask0, ask1, old_state, \ + new_state) \ + do { (void)(ask0); (void)(ask1); (void)(old_state); \ + (void)(new_state); } while (0) +#define _dispatch_trace_item_push(dq, dou) \ do { (void)(dq); (void)(dou); } while(0) -#define _dispatch_trace_continuation_pop(dq, dou) \ +#define _dispatch_trace_item_push_list(dq, head, tail) \ + do { (void)(dq); (void)(head); (void)tail; } while(0) +#define _dispatch_trace_item_pop(dq, dou) \ do { (void)(dq); (void)(dou); } while(0) +#define _dispatch_trace_item_complete(dou) ((void)0) +#define _dispatch_trace_item_sync_push_pop(dq, ctxt, func, flags) \ + do { (void)(dq); (void)(ctxt); (void)(func); (void)(flags); } while(0) +#define _dispatch_trace_source_callout_entry(ds, k, dq, dc) ((void)0) +#define _dispatch_trace_runtime_event(evt, ptr, value) \ + do { (void)(ptr); (void)(value); } while(0) +#define DISPATCH_TRACE_ARG(arg) #endif // DISPATCH_USE_DTRACE_INTROSPECTION || DISPATCH_INTROSPECTION #if DISPATCH_USE_DTRACE @@ -188,6 +390,24 @@ _dispatch_trace_timer_function(dispatch_timer_source_refs_t dr) return dc ? dc->dc_func : NULL; } +DISPATCH_ALWAYS_INLINE +static inline uint64_t +_dispatch_time_clock_to_nsecs(dispatch_clock_t clock, uint64_t t) +{ +#if !DISPATCH_TIME_UNIT_USES_NANOSECONDS + switch (clock) { + case DISPATCH_CLOCK_MONOTONIC: + case DISPATCH_CLOCK_UPTIME: + return _dispatch_time_mach2nano(t); + case DISPATCH_CLOCK_WALL: + return t; + } +#else + (void)clock; + return t; +#endif +} + DISPATCH_ALWAYS_INLINE static inline dispatch_trace_timer_params_t _dispatch_trace_timer_params(dispatch_clock_t clock, @@ -195,7 +415,7 @@ _dispatch_trace_timer_params(dispatch_clock_t clock, dispatch_trace_timer_params_t params) { #define _dispatch_trace_time2nano3(t) \ - (clock == DISPATCH_CLOCK_MACH ? _dispatch_time_mach2nano(t) : (t)) + (_dispatch_time_clock_to_nsecs(clock, t)) #define _dispatch_trace_time2nano2(v, t) ({ uint64_t _t = (t); \ (v) >= INT64_MAX ? -1ll : (int64_t)_dispatch_trace_time2nano3(_t);}) #define _dispatch_trace_time2nano(v) ({ uint64_t _t; \ @@ -218,7 +438,7 @@ DISPATCH_ALWAYS_INLINE static inline bool _dispatch_trace_timer_configure_enabled(void) { - return slowpath(DISPATCH_TIMER_CONFIGURE_ENABLED()); + return DISPATCH_TIMER_CONFIGURE_ENABLED(); } DISPATCH_ALWAYS_INLINE @@ -236,7 +456,7 @@ DISPATCH_ALWAYS_INLINE static inline void _dispatch_trace_timer_program(dispatch_timer_source_refs_t dr, uint64_t deadline) { - if (slowpath(DISPATCH_TIMER_PROGRAM_ENABLED())) { + if (unlikely(DISPATCH_TIMER_PROGRAM_ENABLED())) { if (deadline && dr) { dispatch_source_t ds = _dispatch_source_from_refs(dr); dispatch_clock_t clock = DISPATCH_TIMER_CLOCK(dr->du_ident); @@ -252,7 +472,7 @@ DISPATCH_ALWAYS_INLINE static inline void _dispatch_trace_timer_wake(dispatch_timer_source_refs_t dr) { - if (slowpath(DISPATCH_TIMER_WAKE_ENABLED())) { + if (unlikely(DISPATCH_TIMER_WAKE_ENABLED())) { if (dr) { dispatch_source_t ds = _dispatch_source_from_refs(dr); DISPATCH_TIMER_WAKE(ds, _dispatch_trace_timer_function(dr)); @@ -265,7 +485,7 @@ static inline void _dispatch_trace_timer_fire(dispatch_timer_source_refs_t dr, uint64_t data, uint64_t missed) { - if (slowpath(DISPATCH_TIMER_FIRE_ENABLED())) { + if (unlikely(DISPATCH_TIMER_FIRE_ENABLED())) { if (!(data - missed) && dr) { dispatch_source_t ds = _dispatch_source_from_refs(dr); DISPATCH_TIMER_FIRE(ds, _dispatch_trace_timer_function(dr)); diff --git a/src/transform.c b/src/transform.c index 2c885ca36..93d600355 100644 --- a/src/transform.c +++ b/src/transform.c @@ -59,7 +59,7 @@ enum { static const unsigned char base32_encode_table[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567"; -static const char base32_decode_table[] = { +static const signed char base32_decode_table[] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 26, @@ -67,13 +67,13 @@ static const char base32_decode_table[] = { 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25 }; -static const ssize_t base32_decode_table_size = sizeof(base32_decode_table) - / sizeof(*base32_decode_table); +static const ssize_t base32_decode_table_size = + sizeof(base32_decode_table) / sizeof(*base32_decode_table); static const unsigned char base32hex_encode_table[] = "0123456789ABCDEFGHIJKLMNOPQRSTUV"; -static const char base32hex_decode_table[] = { +static const signed char base32hex_decode_table[] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, @@ -87,7 +87,7 @@ static const ssize_t base32hex_decode_table_size = static const unsigned char base64_encode_table[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; -static const char base64_decode_table[] = { +static const signed char base64_decode_table[] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, @@ -99,8 +99,8 @@ static const char base64_decode_table[] = { 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51 }; -static const ssize_t base64_decode_table_size = sizeof(base64_decode_table) - / sizeof(*base64_decode_table); +static const ssize_t base64_decode_table_size = + sizeof(base64_decode_table) / sizeof(*base64_decode_table); #pragma mark - #pragma mark dispatch_transform_buffer @@ -344,7 +344,9 @@ _dispatch_transform_to_utf16(dispatch_data_t data, int32_t byteOrder) if (os_mul_overflow(size - i, sizeof(uint16_t), &next)) { return (bool)false; } - if (wch >= 0xd800 && wch < 0xdfff) { + if (wch == 0xfeff && offset + i == 3) { + // skip the BOM if any, as we already inserted one ourselves + } else if (wch >= 0xd800 && wch < 0xdfff) { // Illegal range (surrogate pair) return (bool)false; } else if (wch >= 0x10000) { @@ -550,12 +552,32 @@ _dispatch_transform_to_utf16be(dispatch_data_t data) return _dispatch_transform_to_utf16(data, OSBigEndian); } +static dispatch_data_t +_dispatch_transform_to_utf8_without_bom(dispatch_data_t data) +{ + static uint8_t const utf8_bom[] = { 0xef, 0xbb, 0xbf }; + const void *p; + dispatch_data_t subrange = _dispatch_data_subrange_map(data, &p, 0, 3); + bool has_bom = false; + + if (subrange) { + has_bom = (memcmp(p, utf8_bom, sizeof(utf8_bom)) == 0); + dispatch_release(subrange); + } + if (has_bom) { + return dispatch_data_create_subrange(data, 3, + dispatch_data_get_size(data) - 3); + } + dispatch_retain(data); + return data; +} + #pragma mark - #pragma mark base32 static dispatch_data_t _dispatch_transform_from_base32_with_table(dispatch_data_t data, - const char* table, ssize_t table_size) + const signed char* table, ssize_t table_size) { __block uint64_t x = 0, count = 0, pad = 0; @@ -585,7 +607,7 @@ _dispatch_transform_from_base32_with_table(dispatch_data_t data, } count++; - char value = table[index]; + signed char value = table[index]; if (value == -2) { value = 0; pad++; @@ -830,7 +852,7 @@ _dispatch_transform_from_base64(dispatch_data_t data) } count++; - char value = base64_decode_table[index]; + signed char value = base64_decode_table[index]; if (value == -2) { value = 0; pad++; @@ -1081,7 +1103,7 @@ const struct dispatch_data_format_type_s _dispatch_data_format_type_utf8 = { .output_mask = (_DISPATCH_DATA_FORMAT_UTF8 | _DISPATCH_DATA_FORMAT_UTF16BE | _DISPATCH_DATA_FORMAT_UTF16LE), .decode = NULL, - .encode = NULL, + .encode = _dispatch_transform_to_utf8_without_bom, }; const struct dispatch_data_format_type_s _dispatch_data_format_type_utf_any = { diff --git a/src/voucher.c b/src/voucher.c index 458e2f0a4..39a8cbacf 100644 --- a/src/voucher.c +++ b/src/voucher.c @@ -46,12 +46,12 @@ #define FIREHOSE_ACTIVITY_ID_MAKE(aid, flags) \ FIREHOSE_ACTIVITY_ID_MERGE_FLAGS((aid) & MACH_ACTIVITY_ID_MASK, flags) -static volatile uint64_t _voucher_aid_next; +DISPATCH_STATIC_GLOBAL(volatile uint64_t _voucher_aid_next); #pragma mark - #pragma mark voucher_t -OS_OBJECT_CLASS_DECL(voucher, object); +OS_OBJECT_CLASS_DECL(voucher); #if !USE_OBJC OS_OBJECT_VTABLE_INSTANCE(voucher, (void (*)(_os_object_t))_voucher_xref_dispose, @@ -169,49 +169,43 @@ _voucher_thread_cleanup(void *voucher) #pragma mark - #pragma mark voucher_hash -DISPATCH_CACHELINE_ALIGN -static voucher_hash_head_s _voucher_hash[VL_HASH_SIZE]; +extern voucher_hash_head_s _voucher_hash[VL_HASH_SIZE]; +DISPATCH_GLOBAL_INIT(voucher_hash_head_s _voucher_hash[VL_HASH_SIZE], { + [0 ... VL_HASH_SIZE - 1] = { ~(uintptr_t)VOUCHER_NULL }, +}); +DISPATCH_STATIC_GLOBAL(dispatch_unfair_lock_s _voucher_hash_lock); #define _voucher_hash_head(kv) (&_voucher_hash[VL_HASH((kv))]) -static dispatch_unfair_lock_s _voucher_hash_lock; #define _voucher_hash_lock_lock() \ _dispatch_unfair_lock_lock(&_voucher_hash_lock) #define _voucher_hash_lock_unlock() \ _dispatch_unfair_lock_unlock(&_voucher_hash_lock) -DISPATCH_ALWAYS_INLINE -static inline void -_voucher_hash_head_init(voucher_hash_head_s *head) -{ - _voucher_hash_set_next(&head->vhh_first, VOUCHER_NULL); - _voucher_hash_set_prev_ptr(&head->vhh_last_ptr, &head->vhh_first); -} - DISPATCH_ALWAYS_INLINE static inline void _voucher_hash_enqueue(mach_voucher_t kv, voucher_t v) { - // same as TAILQ_INSERT_TAIL + // same as LIST_INSERT_HEAD voucher_hash_head_s *head = _voucher_hash_head(kv); - uintptr_t prev_ptr = head->vhh_last_ptr; - _voucher_hash_set_next(&v->v_list.vhe_next, VOUCHER_NULL); - v->v_list.vhe_prev_ptr = prev_ptr; - _voucher_hash_store_to_prev_ptr(prev_ptr, v); - _voucher_hash_set_prev_ptr(&head->vhh_last_ptr, &v->v_list.vhe_next); + voucher_t next = _voucher_hash_get_next(head->vhh_first); + v->v_list.vhe_next = head->vhh_first; + if (next) { + _voucher_hash_set_prev_ptr(&next->v_list.vhe_prev_ptr, + &v->v_list.vhe_next); + } + _voucher_hash_set_next(&head->vhh_first, v); + _voucher_hash_set_prev_ptr(&v->v_list.vhe_prev_ptr, &head->vhh_first); } DISPATCH_ALWAYS_INLINE static inline void -_voucher_hash_remove(mach_voucher_t kv, voucher_t v) +_voucher_hash_remove(voucher_t v) { - // same as TAILQ_REMOVE - voucher_hash_head_s *head = _voucher_hash_head(kv); + // same as LIST_REMOVE voucher_t next = _voucher_hash_get_next(v->v_list.vhe_next); uintptr_t prev_ptr = v->v_list.vhe_prev_ptr; if (next) { next->v_list.vhe_prev_ptr = prev_ptr; - } else { - head->vhh_last_ptr = prev_ptr; } _voucher_hash_store_to_prev_ptr(prev_ptr, next); _voucher_hash_mark_not_enqueued(v); @@ -270,7 +264,7 @@ _voucher_remove(voucher_t v) } // check for resurrection race with _voucher_find_and_retain if (os_atomic_load2o(v, os_obj_xref_cnt, ordered) < 0) { - if (_voucher_hash_is_enqueued(v)) _voucher_hash_remove(kv, v); + if (_voucher_hash_is_enqueued(v)) _voucher_hash_remove(v); } _voucher_hash_lock_unlock(); } @@ -321,7 +315,7 @@ _voucher_task_mach_voucher_init(void* ctxt DISPATCH_UNUSED) }; kr = _voucher_create_mach_voucher(&task_create_recipe, sizeof(task_create_recipe), &kv); - if (slowpath(kr)) { + if (unlikely(kr)) { DISPATCH_CLIENT_CRASH(kr, "Could not create task mach voucher"); } _voucher_default_task_mach_voucher = kv; @@ -755,7 +749,7 @@ voucher_decrement_importance_count4CF(voucher_t v) _dispatch_voucher_debug("kvoucher[0x%08x] decrement importance count to %u:" " %s - 0x%x", v, kv, count, mach_error_string(kr), kr); #endif - if (slowpath(dispatch_assume_zero(kr) == KERN_FAILURE)) { + if (unlikely(dispatch_assume_zero(kr) == KERN_FAILURE)) { DISPATCH_CLIENT_CRASH(kr, "Voucher importance count underflow"); } } @@ -781,7 +775,7 @@ _voucher_dispose(voucher_t voucher) { _voucher_trace(DISPOSE, voucher); _dispatch_voucher_debug("dispose", voucher); - if (slowpath(_voucher_hash_is_enqueued(voucher))) { + if (unlikely(_voucher_hash_is_enqueued(voucher))) { _dispatch_voucher_debug("corruption", voucher); DISPATCH_CLIENT_CRASH(0, "Voucher corruption"); } @@ -1020,10 +1014,6 @@ void _voucher_init(void) { _voucher_libkernel_init(); - unsigned int i; - for (i = 0; i < VL_HASH_SIZE; i++) { - _voucher_hash_head_init(&_voucher_hash[i]); - } } #pragma mark - @@ -1077,9 +1067,10 @@ voucher_activity_id_allocate(firehose_activity_flags_t flags) return _voucher_activity_id_allocate(flags); } -#define _voucher_activity_tracepoint_reserve(stamp, stream, pub, priv, privbuf) \ +#define _voucher_activity_tracepoint_reserve(stamp, stream, pub, priv, \ + privbuf, reliable) \ firehose_buffer_tracepoint_reserve(_firehose_task_buffer, stamp, \ - stream, pub, priv, privbuf) + stream, pub, priv, privbuf, reliable) #define _voucher_activity_tracepoint_flush(ft, ftid) \ firehose_buffer_tracepoint_flush(_firehose_task_buffer, ft, ftid) @@ -1096,7 +1087,7 @@ _firehose_task_buffer_init(void *ctx OS_UNUSED) info_size = proc_pidinfo(getpid(), PROC_PIDUNIQIDENTIFIERINFO, 1, &p_uniqinfo, PROC_PIDUNIQIDENTIFIERINFO_SIZE); - if (slowpath(info_size != PROC_PIDUNIQIDENTIFIERINFO_SIZE)) { + if (unlikely(info_size != PROC_PIDUNIQIDENTIFIERINFO_SIZE)) { if (info_size == 0) { DISPATCH_INTERNAL_CRASH(errno, "Unable to get the unique pid (error)"); @@ -1108,11 +1099,7 @@ _firehose_task_buffer_init(void *ctx OS_UNUSED) _voucher_unique_pid = p_uniqinfo.p_uniqueid; - if (!fastpath(_voucher_libtrace_hooks)) { - if (0) { // - DISPATCH_CLIENT_CRASH(0, - "Activity subsystem isn't initialized yet"); - } + if (unlikely(!_voucher_libtrace_hooks)) { return; } logd_port = _voucher_libtrace_hooks->vah_get_logd_port(); @@ -1144,13 +1131,21 @@ _voucher_activity_disabled(void) NULL, _firehose_task_buffer_init); firehose_buffer_t fb = _firehose_task_buffer; - if (fastpath(fb)) { - return slowpath(fb->fb_header.fbh_sendp == MACH_PORT_DEAD); + return !fb || fb->fb_header.fbh_sendp[false] == MACH_PORT_DEAD; +} + +void * +voucher_activity_get_logging_preferences(size_t *length) +{ + if (unlikely(_voucher_activity_disabled())) { + *length = 0; + return NULL; } - return true; + + return firehose_buffer_get_logging_prefs(_firehose_task_buffer, length); } -void* +void * voucher_activity_get_metadata_buffer(size_t *length) { if (_voucher_activity_disabled()) { @@ -1224,8 +1219,8 @@ voucher_activity_create_with_data(firehose_tracepoint_id_t *trace_id, for (size_t i = 0; i < countof(streams); i++) { ft = _voucher_activity_tracepoint_reserve(stamp, streams[i], pubsize, - 0, NULL); - if (!fastpath(ft)) continue; + 0, NULL, true); + if (unlikely(!ft)) continue; uint8_t *pubptr = ft->ft_data; if (current_id) { @@ -1285,8 +1280,9 @@ _voucher_activity_swap(firehose_activity_id_t old_id, _dispatch_voucher_ktrace_activity_adopt(new_id); - ft = _voucher_activity_tracepoint_reserve(stamp, stream, pubsize, 0, NULL); - if (!fastpath(ft)) return; + ft = _voucher_activity_tracepoint_reserve(stamp, stream, pubsize, 0, NULL, + true); + if (unlikely(!ft)) return; uint8_t *pubptr = ft->ft_data; if (old_id) pubptr = _dispatch_memappend(pubptr, &old_id); if (new_id) pubptr = _dispatch_memappend(pubptr, &new_id); @@ -1326,14 +1322,15 @@ voucher_activity_flush(firehose_stream_t stream) DISPATCH_NOINLINE firehose_tracepoint_id_t -voucher_activity_trace_v(firehose_stream_t stream, +voucher_activity_trace_v_2(firehose_stream_t stream, firehose_tracepoint_id_t trace_id, uint64_t stamp, - const struct iovec *iov, size_t publen, size_t privlen) + const struct iovec *iov, size_t publen, size_t privlen, uint32_t flags) { firehose_tracepoint_id_u ftid = { .ftid_value = trace_id }; const uint16_t ft_size = offsetof(struct firehose_tracepoint_s, ft_data); const size_t _firehose_chunk_payload_size = sizeof(((struct firehose_chunk_s *)0)->fc_data); + bool reliable = !(flags & VOUCHER_ACTIVITY_TRACE_FLAG_UNRELIABLE); if (_voucher_activity_disabled()) return 0; @@ -1364,13 +1361,13 @@ voucher_activity_trace_v(firehose_stream_t stream, pubsize += sizeof(struct firehose_buffer_range_s); } - if (slowpath(ft_size + pubsize + privlen > _firehose_chunk_payload_size)) { + if (unlikely(ft_size + pubsize + privlen > _firehose_chunk_payload_size)) { DISPATCH_CLIENT_CRASH(ft_size + pubsize + privlen, "Log is too large"); } ft = _voucher_activity_tracepoint_reserve(stamp, stream, (uint16_t)pubsize, - (uint16_t)privlen, &privptr); - if (!fastpath(ft)) return 0; + (uint16_t)privlen, &privptr, reliable); + if (unlikely(!ft)) return 0; pubptr = ft->ft_data; if (va_id) { pubptr = _dispatch_memappend(pubptr, &va_id); @@ -1404,38 +1401,41 @@ voucher_activity_trace_v(firehose_stream_t stream, return ftid.ftid_value; } +DISPATCH_NOINLINE firehose_tracepoint_id_t -voucher_activity_trace(firehose_stream_t stream, +voucher_activity_trace_v(firehose_stream_t stream, firehose_tracepoint_id_t trace_id, uint64_t stamp, - const void *pubdata, size_t publen) + const struct iovec *iov, size_t publen, size_t privlen) { - struct iovec iov = { (void *)pubdata, publen }; - return voucher_activity_trace_v(stream, trace_id, stamp, &iov, publen, 0); + return voucher_activity_trace_v_2(stream, trace_id, stamp, iov, publen, + privlen, 0); } firehose_tracepoint_id_t -voucher_activity_trace_with_private_strings(firehose_stream_t stream, +voucher_activity_trace(firehose_stream_t stream, firehose_tracepoint_id_t trace_id, uint64_t stamp, - const void *pubdata, size_t publen, - const void *privdata, size_t privlen) + const void *pubdata, size_t publen) { - struct iovec iov[2] = { - { (void *)pubdata, publen }, - { (void *)privdata, privlen }, - }; - return voucher_activity_trace_v(stream, trace_id, stamp, - iov, publen, privlen); + struct iovec iov = { (void *)pubdata, publen }; + return voucher_activity_trace_v(stream, trace_id, stamp, &iov, publen, 0); } #pragma mark - #pragma mark _voucher_debug +#define bufprintf(...) \ + offset += dsnprintf(&buf[offset], bufsiz > offset ? bufsiz - offset : 0, ##__VA_ARGS__) +#define bufprintprefix() \ + if (prefix) bufprintf("%s", prefix) +#define VOUCHER_DETAIL_PREFIX " " +#define IKOT_VOUCHER 37U +#define VOUCHER_CONTENTS_SIZE 8192 +#define MAX_HEX_DATA_SIZE 1024 + size_t -_voucher_debug(voucher_t v, char* buf, size_t bufsiz) +_voucher_debug(voucher_t v, char *buf, size_t bufsiz) { size_t offset = 0; - #define bufprintf(...) \ - offset += dsnprintf(&buf[offset], bufsiz - offset, ##__VA_ARGS__) bufprintf("voucher[%p] = { xref = %d, ref = %d", v, v->os_obj_xref_cnt + 1, v->os_obj_ref_cnt + 1); @@ -1443,11 +1443,17 @@ _voucher_debug(voucher_t v, char* buf, size_t bufsiz) bufprintf(", base voucher %p", v->v_kvbase); } if (v->v_kvoucher) { - bufprintf(", kvoucher%s 0x%x", v->v_kvoucher == v->v_ipc_kvoucher ? + bufprintf(", kvoucher%s 0x%x [\n", v->v_kvoucher == v->v_ipc_kvoucher ? " & ipc kvoucher" : "", v->v_kvoucher); + offset = voucher_kvoucher_debug(mach_task_self(), v->v_kvoucher, buf, + bufsiz, offset, VOUCHER_DETAIL_PREFIX, MAX_HEX_DATA_SIZE); + bufprintf("]"); } if (v->v_ipc_kvoucher && v->v_ipc_kvoucher != v->v_kvoucher) { - bufprintf(", ipc kvoucher 0x%x", v->v_ipc_kvoucher); + bufprintf(", ipc kvoucher 0x%x [\n", v->v_ipc_kvoucher); + offset = voucher_kvoucher_debug(mach_task_self(), v->v_ipc_kvoucher, + buf, bufsiz, offset, VOUCHER_DETAIL_PREFIX, MAX_HEX_DATA_SIZE); + bufprintf("]"); } if (v->v_priority) { bufprintf(", QOS 0x%x", v->v_priority); @@ -1457,6 +1463,128 @@ _voucher_debug(voucher_t v, char* buf, size_t bufsiz) v->v_activity, v->v_activity_creator, v->v_parent_activity); } bufprintf(" }"); + + return offset; +} + +static size_t +format_hex_data(char *prefix, char *desc, uint8_t *data, size_t data_len, + char *buf, size_t bufsiz, size_t offset) +{ + size_t i; + uint8_t chars[17]; + uint8_t *pc = data; + + if (desc) { + bufprintf("%s%s:\n", prefix, desc); + } + + ssize_t offset_in_row = -1; + for (i = 0; i < data_len; i++) { + offset_in_row = i % 16; + if (offset_in_row == 0) { + if (i != 0) { + bufprintf(" %s\n", chars); + } + bufprintf("%s %04lx ", prefix, i); + } + bufprintf(" %02x", pc[i]); + chars[offset_in_row] = (pc[i] < 0x20) || (pc[i] > 0x7e) ? '.' : pc[i]; + } + chars[offset_in_row + 1] = '\0'; + + if ((i % 16) != 0) { + while ((i % 16) != 0) { + bufprintf(" "); + i++; + } + bufprintf(" %s\n", chars); + } + return offset; +} + +static size_t +format_recipe_detail(mach_voucher_attr_recipe_t recipe, char *buf, + size_t bufsiz, size_t offset, char *prefix, size_t max_hex_data) +{ + bufprintprefix(); + bufprintf("Key: %u, ", recipe->key); + bufprintf("Command: %u, ", recipe->command); + bufprintf("Previous voucher: 0x%x, ", recipe->previous_voucher); + bufprintf("Content size: %u\n", recipe->content_size); + + switch (recipe->key) { + case MACH_VOUCHER_ATTR_KEY_ATM: + bufprintprefix(); + bufprintf("ATM ID: %llu", *(uint64_t *)(uintptr_t)recipe->content); + break; + case MACH_VOUCHER_ATTR_KEY_IMPORTANCE: + bufprintprefix(); + bufprintf("IMPORTANCE INFO: %s", (char *)recipe->content); + break; + case MACH_VOUCHER_ATTR_KEY_BANK: + bufprintprefix(); + bufprintf("RESOURCE ACCOUNTING INFO: %s", (char *)recipe->content); + break; + default: + offset = format_hex_data(prefix, "Recipe Contents", recipe->content, + MIN(recipe->content_size, max_hex_data), buf, bufsiz, offset); + break; + } + if (buf[offset - 1] != '\n') { + bufprintf("\n"); + } + return offset; +} + +size_t +voucher_kvoucher_debug(mach_port_t task, mach_port_name_t voucher, char *buf, + size_t bufsiz, size_t offset, char *prefix, size_t max_hex_data) +{ + uint8_t voucher_contents[VOUCHER_CONTENTS_SIZE]; + bzero(voucher_contents, VOUCHER_CONTENTS_SIZE); + size_t recipe_size = VOUCHER_CONTENTS_SIZE; + unsigned v_kobject = 0; + unsigned v_kotype = 0; + + kern_return_t kr = mach_port_kernel_object(task, voucher, &v_kotype, + &v_kobject); + if (kr == KERN_SUCCESS && v_kotype == IKOT_VOUCHER) { + kr = mach_voucher_debug_info(task, voucher, + (mach_voucher_attr_raw_recipe_array_t)voucher_contents, + (mach_msg_type_number_t *)&recipe_size); + if (kr != KERN_SUCCESS && kr != KERN_NOT_SUPPORTED) { + bufprintprefix(); + bufprintf("Voucher: 0x%x Failed to get contents %s\n", v_kobject, + mach_error_string(kr)); + goto done; + } + + if (recipe_size == 0) { + bufprintprefix(); + bufprintf("Voucher: 0x%x has no contents\n", v_kobject); + goto done; + } + + bufprintprefix(); + bufprintf("Voucher: 0x%x\n", v_kobject); + unsigned int used_size = 0; + mach_voucher_attr_recipe_t recipe = NULL; + while (recipe_size > used_size) { + recipe = (mach_voucher_attr_recipe_t)&voucher_contents[used_size]; + if (recipe->key) { + offset = format_recipe_detail(recipe, buf, bufsiz, offset, + prefix, max_hex_data); + } + used_size += sizeof(mach_voucher_attr_recipe_data_t) + + recipe->content_size; + } + } else { + bufprintprefix(); + bufprintf("Invalid voucher: 0x%x\n", voucher); + } + +done: return offset; } diff --git a/src/voucher_internal.h b/src/voucher_internal.h index 772c8c434..d1a5e70e8 100644 --- a/src/voucher_internal.h +++ b/src/voucher_internal.h @@ -177,7 +177,6 @@ typedef struct voucher_s { typedef struct voucher_hash_head_s { uintptr_t vhh_first; - uintptr_t vhh_last_ptr; } voucher_hash_head_s; DISPATCH_ALWAYS_INLINE @@ -243,7 +242,7 @@ typedef struct voucher_recipe_s { } voucher_recipe_s; #endif -#if TARGET_OS_EMBEDDED +#if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR #define VL_HASH_SIZE 64u // must be a power of two #else #define VL_HASH_SIZE 256u // must be a power of two @@ -262,7 +261,7 @@ typedef struct voucher_recipe_s { #define _dispatch_voucher_debug_machport(name) ((void)(name)) #endif -#if DISPATCH_USE_DTRACE +#if DISPATCH_USE_DTRACE_INTROSPECTION && defined(__APPLE__) // rdar://33642820 #define _voucher_trace(how, ...) ({ \ if (unlikely(VOUCHER_##how##_ENABLED())) { \ VOUCHER_##how(__VA_ARGS__); \ @@ -576,11 +575,10 @@ _dispatch_voucher_ktrace(uint32_t code, voucher_t v, const void *container) DISPATCH_ALWAYS_INLINE static inline void _dispatch_continuation_voucher_set(dispatch_continuation_t dc, - dispatch_queue_class_t dqu, dispatch_block_flags_t flags) + dispatch_block_flags_t flags) { voucher_t v = NULL; - (void)dqu; // _dispatch_continuation_voucher_set is never called for blocks with // private data or with the DISPATCH_BLOCK_HAS_VOUCHER flag set. // only _dispatch_continuation_init_slow handles this bit. @@ -594,16 +592,14 @@ _dispatch_continuation_voucher_set(dispatch_continuation_t dc, _dispatch_voucher_ktrace_dc_push(dc); } -static inline dispatch_queue_t _dispatch_queue_get_current(void); - DISPATCH_ALWAYS_INLINE static inline void _dispatch_continuation_voucher_adopt(dispatch_continuation_t dc, - voucher_t ov, uintptr_t dc_flags) + uintptr_t dc_flags) { voucher_t v = dc->dc_voucher; - dispatch_thread_set_self_t consume = (dc_flags & DISPATCH_OBJ_CONSUME_BIT); - dispatch_assert(DISPATCH_OBJ_CONSUME_BIT == DISPATCH_VOUCHER_CONSUME); + dispatch_thread_set_self_t consume = (dc_flags & DC_FLAG_CONSUME); + dispatch_assert(DC_FLAG_CONSUME == DISPATCH_VOUCHER_CONSUME); if (consume) { dc->dc_voucher = VOUCHER_INVALID; @@ -611,17 +607,6 @@ _dispatch_continuation_voucher_adopt(dispatch_continuation_t dc, if (likely(v != DISPATCH_NO_VOUCHER)) { _dispatch_voucher_ktrace_dc_pop(dc, v); _dispatch_voucher_debug("continuation[%p] adopt", v, dc); - - if (likely(!(dc_flags & DISPATCH_OBJ_ENFORCE_VOUCHER))) { - if (unlikely(ov != DISPATCH_NO_VOUCHER && v != ov)) { - if (consume && v) _voucher_release(v); - consume = 0; - v = ov; - } - } - } else { - consume = 0; - v = ov; } (void)_dispatch_adopt_priority_and_set_voucher(dc->dc_priority, v, consume | DISPATCH_VOUCHER_REPLACE); @@ -759,17 +744,17 @@ _voucher_mach_msg_clear(mach_msg_header_t *msg, bool move_send) DISPATCH_ALWAYS_INLINE static inline void _dispatch_continuation_voucher_set(dispatch_continuation_t dc, - dispatch_queue_class_t dqu, dispatch_block_flags_t flags) + dispatch_block_flags_t flags) { - (void)dc; (void)dqu; (void)flags; + (void)dc; (void)flags; } DISPATCH_ALWAYS_INLINE static inline void -_dispatch_continuation_voucher_adopt(dispatch_continuation_t dc, voucher_t ov, +_dispatch_continuation_voucher_adopt(dispatch_continuation_t dc, uintptr_t dc_flags) { - (void)dc; (void)ov; (void)dc_flags; + (void)dc; (void)dc_flags; } #endif // VOUCHER_USE_MACH_VOUCHER diff --git a/tools/firehose_trace.lua b/tools/firehose_trace.lua new file mode 100755 index 000000000..90b969dec --- /dev/null +++ b/tools/firehose_trace.lua @@ -0,0 +1,83 @@ +#!/usr/local/bin/luatrace -s + +trace_codename = function(codename, callback) + local debugid = trace.debugid(codename) + if debugid ~= 0 then + trace.single(debugid,callback) + else + printf("WARNING: Cannot locate debugid for '%s'\n", codename) + end +end + +initial_timestamp = 0 +get_prefix = function(buf) + if initial_timestamp == 0 then + initial_timestamp = buf.timestamp + end + local secs = trace.convert_timestamp_to_nanoseconds(buf.timestamp - initial_timestamp) / 1000000000 + + local prefix + if trace.debugid_is_start(buf.debugid) then + prefix = "→" + elseif trace.debugid_is_end(buf.debugid) then + prefix = "â†" + else + prefix = "↔" + end + + local proc + proc = buf.command + + return string.format("%s %6.9f %-17s [%05d.%06x] %-35s", + prefix, secs, proc, buf.pid, buf.threadid, buf.debugname) +end + +decode_stream_state = function(state) + local reliable_waiters = "-" + if (state & 0x1) ~= 0 then + reliable_waiters = "R" + end + local unreliable_waiters = "-" + if (state & 0x2) ~= 0 then + unreliable_waiters = "U" + end + local allocator = state & 0x00000000fffffffc + local ref = (state & 0x000000ff00000000) >> 32 + local loss = (state & 0x00003f0000000000) >> 40 + local timestamped = "-" + if (state & 0x0000400000000000) ~= 0 then + timestamped = "T" + end + local waiting_for_logd = "-" + if (state & 0x0000800000000000) ~= 0 then + waiting_for_logd = "W" + end + local gen = (state & 0xffff000000000000) >> 48 + return string.format("[stream: alloc=0x%08x ref=%u loss=%u gen=%u %s%s%s%s]", + allocator, ref, loss, gen, reliable_waiters, unreliable_waiters, + timestamped, waiting_for_logd) +end + +trace_codename("DISPATCH_FIREHOSE_TRACE_reserver_gave_up", function(buf) + printf("%s %s -> %s\n", get_prefix(buf), decode_stream_state(buf[3]), + decode_stream_state(buf[4])) +end) + +trace_codename("DISPATCH_FIREHOSE_TRACE_reserver_wait", function(buf) + printf("%s %s -> %s\n", get_prefix(buf), decode_stream_state(buf[3]), + decode_stream_state(buf[4])) +end) + +trace_codename("DISPATCH_FIREHOSE_TRACE_allocator", function(buf) + printf("%s %s -> %s\n", get_prefix(buf), decode_stream_state(buf[3]), + decode_stream_state(buf[4])) +end) + +trace_codename("DISPATCH_FIREHOSE_TRACE_wait_for_logd", function(buf) + printf("%s %s\n", get_prefix(buf), decode_stream_state(buf[2])) +end) + +trace_codename("DISPATCH_FIREHOSE_TRACE_chunk_install", function(buf) + printf("%s %s -> %s, waited=%u\n", get_prefix(buf), decode_stream_state(buf[3]), + decode_stream_state(buf[4]), buf[2] >> 63) +end) diff --git a/xcodeconfig/libdispatch-resolved.xcconfig b/xcodeconfig/libdispatch-resolved.xcconfig index 2f2e273e1..2d509c5f4 100644 --- a/xcodeconfig/libdispatch-resolved.xcconfig +++ b/xcodeconfig/libdispatch-resolved.xcconfig @@ -18,7 +18,7 @@ // @APPLE_APACHE_LICENSE_HEADER_END@ // -SUPPORTED_PLATFORMS = iphoneos appletvos watchos +SUPPORTED_PLATFORMS = iphoneos PRODUCT_NAME = libdispatch_$(DISPATCH_RESOLVED_VARIANT) OTHER_LDFLAGS = SKIP_INSTALL = YES diff --git a/xcodeconfig/libdispatch-up-static.xcconfig b/xcodeconfig/libdispatch-up-static.xcconfig deleted file mode 100644 index 170c5b356..000000000 --- a/xcodeconfig/libdispatch-up-static.xcconfig +++ /dev/null @@ -1,28 +0,0 @@ -// -// Copyright (c) 2012-2013 Apple Inc. All rights reserved. -// -// @APPLE_APACHE_LICENSE_HEADER_START@ -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// @APPLE_APACHE_LICENSE_HEADER_END@ -// - -// skip simulator -SUPPORTED_PLATFORMS = macosx iphoneos appletvos watchos -PRODUCT_NAME = libdispatch_up -BUILD_VARIANTS = normal -GCC_PREPROCESSOR_DEFINITIONS = $(inherited) DISPATCH_HW_CONFIG_UP=1 $(STATICLIB_PREPROCESSOR_DEFINITIONS) -OTHER_LDFLAGS = -SKIP_INSTALL = YES -EXCLUDED_SOURCE_FILE_NAMES[sdk=*simulator*] = * diff --git a/xcodeconfig/libdispatch.clean b/xcodeconfig/libdispatch.clean new file mode 100644 index 000000000..35476a711 --- /dev/null +++ b/xcodeconfig/libdispatch.clean @@ -0,0 +1,46 @@ +# +# Copyright (c) 2013 Apple Inc. All rights reserved. +# +# @APPLE_APACHE_LICENSE_HEADER_START@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# @APPLE_APACHE_LICENSE_HEADER_END@ +# + +__dispatch_bug.last_seen +__dispatch_bug_deprecated.last_seen +__dispatch_bug_kevent_client.last_seen +__dispatch_bug_kevent_client.last_seen.37 +__dispatch_bug_kevent_client.last_seen.39 +__dispatch_bug_kevent_vanished.last_seen +__dispatch_bug_mach_client.last_seen + +__dispatch_build_pred +__dispatch_build + +__dispatch_child_of_unsafe_fork +__dispatch_continuation_cache_limit +__dispatch_data_empty +__dispatch_host_time_data.0 +__dispatch_host_time_data.1 +__dispatch_host_time_mach2nano +__dispatch_host_time_nano2mach +__dispatch_source_timer_use_telemetry +__dispatch_timers_force_max_leeway +__os_object_debug_missing_pools +_dispatch_benchmark_f.bdata +_dispatch_benchmark_f.pred +_dispatch_io_defaults +_dispatch_log_disabled +_dispatch_logfile diff --git a/xcodeconfig/libdispatch.dirty b/xcodeconfig/libdispatch.dirty new file mode 100644 index 000000000..d8d1a0d6e --- /dev/null +++ b/xcodeconfig/libdispatch.dirty @@ -0,0 +1,153 @@ +# +# Copyright (c) 2013 Apple Inc. All rights reserved. +# +# @APPLE_APACHE_LICENSE_HEADER_START@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# @APPLE_APACHE_LICENSE_HEADER_END@ +# + +# Must be kept in sync with ObjC TFB checks in object_internal.h + +# dispatch_object_t classes +_OBJC_CLASS_$_OS_dispatch_object +_OBJC_CLASS_$_OS_dispatch_semaphore +__OS_dispatch_semaphore_vtable +_OBJC_CLASS_$_OS_dispatch_group +__OS_dispatch_group_vtable +_OBJC_CLASS_$_OS_dispatch_queue +__OS_dispatch_queue_vtable +_OBJC_CLASS_$_OS_dispatch_workloop +__OS_dispatch_workloop_vtable +_OBJC_CLASS_$_OS_dispatch_queue_serial +__OS_dispatch_queue_serial_vtable +_OBJC_CLASS_$_OS_dispatch_queue_concurrent +__OS_dispatch_queue_concurrent_vtable +_OBJC_CLASS_$_OS_dispatch_queue_global +__OS_dispatch_queue_global_vtable +_OBJC_CLASS_$_OS_dispatch_queue_pthread_root +__OS_dispatch_queue_pthread_root_vtable +_OBJC_CLASS_$_OS_dispatch_queue_main +__OS_dispatch_queue_main_vtable +_OBJC_CLASS_$_OS_dispatch_queue_runloop +__OS_dispatch_queue_runloop_vtable +_OBJC_CLASS_$_OS_dispatch_queue_mgr +__OS_dispatch_queue_mgr_vtable +_OBJC_CLASS_$_OS_dispatch_queue_attr +__OS_dispatch_queue_attr_vtable +_OBJC_CLASS_$_OS_dispatch_source +__OS_dispatch_source_vtable +_OBJC_CLASS_$_OS_dispatch_mach +__OS_dispatch_mach_vtable +_OBJC_CLASS_$_OS_dispatch_mach_msg +__OS_dispatch_mach_msg_vtable +_OBJC_CLASS_$_OS_dispatch_io +__OS_dispatch_io_vtable +_OBJC_CLASS_$_OS_dispatch_operation +__OS_dispatch_operation_vtable +_OBJC_CLASS_$_OS_dispatch_disk +__OS_dispatch_disk_vtable +# os_object_t classes +_OBJC_CLASS_$_OS_object +_OBJC_CLASS_$_OS_voucher +#_OBJC_CLASS_$_OS_voucher_recipe +# non-os_object_t classes +_OBJC_CLASS_$_OS_dispatch_data +_OBJC_CLASS_$_OS_dispatch_data_empty +# metaclasses +_OBJC_METACLASS_$_OS_dispatch_object +_OBJC_METACLASS_$_OS_dispatch_semaphore +_OBJC_METACLASS_$_OS_dispatch_group +_OBJC_METACLASS_$_OS_dispatch_queue +_OBJC_METACLASS_$_OS_dispatch_workloop +_OBJC_METACLASS_$_OS_dispatch_queue_serial +_OBJC_METACLASS_$_OS_dispatch_queue_concurrent +_OBJC_METACLASS_$_OS_dispatch_queue_global +_OBJC_METACLASS_$_OS_dispatch_queue_pthread_root +_OBJC_METACLASS_$_OS_dispatch_queue_main +_OBJC_METACLASS_$_OS_dispatch_queue_runloop +_OBJC_METACLASS_$_OS_dispatch_queue_mgr +_OBJC_METACLASS_$_OS_dispatch_queue_attr +_OBJC_METACLASS_$_OS_dispatch_source +_OBJC_METACLASS_$_OS_dispatch_mach +_OBJC_METACLASS_$_OS_dispatch_mach_msg +_OBJC_METACLASS_$_OS_dispatch_io +_OBJC_METACLASS_$_OS_dispatch_operation +_OBJC_METACLASS_$_OS_dispatch_disk +_OBJC_METACLASS_$_OS_object +_OBJC_METACLASS_$_OS_voucher +#_OBJC_METACLASS_$_OS_voucher_recipe +_OBJC_METACLASS_$_OS_dispatch_data +_OBJC_METACLASS_$_OS_dispatch_data_empty + +# Other dirty symbols +# large structs / hashes +__dispatch_main_q +__dispatch_mgr_q +__dispatch_mgr_sched +__dispatch_root_queues +__dispatch_sources +__dispatch_timers_heap +__dispatch_trace_next_timer +__voucher_hash + +# 64 bits +__dispatch_narrow_check_interval_cache +__dispatch_narrowing_deadlines +__voucher_aid_next +__voucher_unique_pid + +# pointer sized +__dispatch_begin_NSAutoReleasePool +__dispatch_continuation_alloc_init_pred +__dispatch_end_NSAutoReleasePool +__dispatch_is_daemon_pred +__dispatch_kq_poll_pred +__dispatch_logv_pred +__dispatch_mach_calendar_pred +__dispatch_mach_host_port_pred +__dispatch_mach_notify_port_pred +__dispatch_mach_xpc_hooks +__dispatch_main_heap +__dispatch_main_q_handle_pred +__dispatch_mgr_sched_pred +__dispatch_queue_serial_numbers +__dispatch_root_queues_pred +__dispatch_source_timer_telemetry_pred +__firehose_task_buffer +__firehose_task_buffer_pred +__voucher_activity_debug_channel +__voucher_libtrace_hooks +__voucher_task_mach_voucher_pred + +# 32bits +__dispatch_mach_host_port +__dispatch_mach_notify_port +__voucher_default_task_mach_voucher +__voucher_hash_lock +__voucher_task_mach_voucher + +# byte-sized +__dispatch_is_daemon +__dispatch_memory_warn +__dispatch_mode +__dispatch_program_is_probably_callback_driven +__dispatch_unsafe_fork +__dispatch_use_dispatch_alloc + +__dispatch_io_devs +__dispatch_io_fds +__dispatch_io_devs_lockq +__dispatch_io_fds_lockq +__dispatch_io_init_pred diff --git a/xcodeconfig/libdispatch.order b/xcodeconfig/libdispatch.order index 9642ca4dd..b586837d5 100644 --- a/xcodeconfig/libdispatch.order +++ b/xcodeconfig/libdispatch.order @@ -28,20 +28,22 @@ _OBJC_CLASS_$_OS_dispatch_group __OS_dispatch_group_vtable _OBJC_CLASS_$_OS_dispatch_queue __OS_dispatch_queue_vtable +_OBJC_CLASS_$_OS_dispatch_workloop +__OS_dispatch_workloop_vtable _OBJC_CLASS_$_OS_dispatch_queue_serial __OS_dispatch_queue_serial_vtable _OBJC_CLASS_$_OS_dispatch_queue_concurrent __OS_dispatch_queue_concurrent_vtable -_OBJC_CLASS_$_OS_dispatch_queue_root -__OS_dispatch_queue_root_vtable +_OBJC_CLASS_$_OS_dispatch_queue_global +__OS_dispatch_queue_global_vtable +_OBJC_CLASS_$_OS_dispatch_queue_pthread_root +__OS_dispatch_queue_pthread_root_vtable _OBJC_CLASS_$_OS_dispatch_queue_main __OS_dispatch_queue_main_vtable _OBJC_CLASS_$_OS_dispatch_queue_runloop __OS_dispatch_queue_runloop_vtable _OBJC_CLASS_$_OS_dispatch_queue_mgr __OS_dispatch_queue_mgr_vtable -_OBJC_CLASS_$_OS_dispatch_queue_specific_queue -__OS_dispatch_queue_specific_queue_vtable _OBJC_CLASS_$_OS_dispatch_queue_attr __OS_dispatch_queue_attr_vtable _OBJC_CLASS_$_OS_dispatch_source @@ -68,13 +70,14 @@ _OBJC_METACLASS_$_OS_dispatch_object _OBJC_METACLASS_$_OS_dispatch_semaphore _OBJC_METACLASS_$_OS_dispatch_group _OBJC_METACLASS_$_OS_dispatch_queue +_OBJC_METACLASS_$_OS_dispatch_workloop _OBJC_METACLASS_$_OS_dispatch_queue_serial _OBJC_METACLASS_$_OS_dispatch_queue_concurrent -_OBJC_METACLASS_$_OS_dispatch_queue_root +_OBJC_METACLASS_$_OS_dispatch_queue_global +_OBJC_METACLASS_$_OS_dispatch_queue_pthread_root _OBJC_METACLASS_$_OS_dispatch_queue_main _OBJC_METACLASS_$_OS_dispatch_queue_runloop _OBJC_METACLASS_$_OS_dispatch_queue_mgr -_OBJC_METACLASS_$_OS_dispatch_queue_specific_queue _OBJC_METACLASS_$_OS_dispatch_queue_attr _OBJC_METACLASS_$_OS_dispatch_source _OBJC_METACLASS_$_OS_dispatch_mach diff --git a/xcodeconfig/libdispatch.xcconfig b/xcodeconfig/libdispatch.xcconfig index 643e1d38b..2c825f7f6 100644 --- a/xcodeconfig/libdispatch.xcconfig +++ b/xcodeconfig/libdispatch.xcconfig @@ -73,13 +73,14 @@ CLANG_WARN_OBJC_MISSING_PROPERTY_SYNTHESIS = YES CLANG_WARN_SUSPICIOUS_IMPLICIT_CONVERSION = YES CLANG_WARN_SUSPICIOUS_MOVE = YES CLANG_WARN_UNREACHABLE_CODE = YES +CLANG_WARN_UNGUARDED_AVAILABILITY = YES GCC_TREAT_WARNINGS_AS_ERRORS = YES GCC_OPTIMIZATION_LEVEL = s GCC_NO_COMMON_BLOCKS = YES GCC_PREPROCESSOR_DEFINITIONS = __DARWIN_NON_CANCELABLE=1 $(DISPATCH_PREPROCESSOR_DEFINITIONS) STATICLIB_PREPROCESSOR_DEFINITIONS = DISPATCH_VARIANT_STATIC=1 USE_OBJC=0 DISPATCH_USE_DTRACE=0 -WARNING_CFLAGS = -Wall -Wextra -Warray-bounds-pointer-arithmetic -Watomic-properties -Wcomma -Wconditional-uninitialized -Wcovered-switch-default -Wdate-time -Wdeprecated -Wdouble-promotion -Wduplicate-enum -Wexpansion-to-defined -Wfloat-equal -Widiomatic-parentheses -Wignored-qualifiers -Wimplicit-fallthrough -Wnullable-to-nonnull-conversion -Wobjc-interface-ivars -Wover-aligned -Wpacked -Wpointer-arith -Wselector -Wstatic-in-inline -Wsuper-class-method-mismatch -Wswitch-enum -Wtautological-compare -Wunguarded-availability -Wunused -Wno-unknown-warning-option $(NO_WARNING_CFLAGS) -NO_WARNING_CFLAGS = -Wno-pedantic -Wno-bad-function-cast -Wno-c++-compat -Wno-c++98-compat -Wno-c++98-compat-pedantic -Wno-cast-align -Wno-cast-qual -Wno-disabled-macro-expansion -Wno-documentation-unknown-command -Wno-format-nonliteral -Wno-missing-variable-declarations -Wno-old-style-cast -Wno-padded -Wno-reserved-id-macro -Wno-shift-sign-overflow -Wno-undef -Wno-unreachable-code-aggressive -Wno-unused-macros -Wno-used-but-marked-unused -Wno-vla +WARNING_CFLAGS = -Wall -Wextra -Warray-bounds-pointer-arithmetic -Watomic-properties -Wcomma -Wconditional-uninitialized -Wcovered-switch-default -Wdate-time -Wdeprecated -Wdouble-promotion -Wduplicate-enum -Wexpansion-to-defined -Wfloat-equal -Widiomatic-parentheses -Wignored-qualifiers -Wnullable-to-nonnull-conversion -Wobjc-interface-ivars -Wover-aligned -Wpacked -Wpointer-arith -Wselector -Wstatic-in-inline -Wsuper-class-method-mismatch -Wswitch-enum -Wtautological-compare -Wunused -Wno-unknown-warning-option $(NO_WARNING_CFLAGS) +NO_WARNING_CFLAGS = -Wno-pedantic -Wno-bad-function-cast -Wno-c++-compat -Wno-c++98-compat -Wno-c++98-compat-pedantic -Wno-cast-align -Wno-cast-qual -Wno-disabled-macro-expansion -Wno-documentation-unknown-command -Wno-format-nonliteral -Wno-missing-variable-declarations -Wno-old-style-cast -Wno-padded -Wno-reserved-id-macro -Wno-shift-sign-overflow -Wno-undef -Wno-unreachable-code-aggressive -Wno-unused-macros -Wno-used-but-marked-unused -Wno-vla -Wno-unguarded-availability-new OTHER_CFLAGS = -fverbose-asm -isystem $(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders $(PLATFORM_CFLAGS) OTHER_CFLAGS[arch=i386][sdk=macosx*] = $(OTHER_CFLAGS) -fno-unwind-tables -fno-asynchronous-unwind-tables -fno-exceptions OTHER_CFLAGS_normal = -momit-leaf-frame-pointer @@ -89,7 +90,14 @@ GENERATE_PROFILING_CODE = NO DYLIB_CURRENT_VERSION = $(CURRENT_PROJECT_VERSION) SIM_SUFFIX[sdk=*simulator*] = _sim DYLIB_LDFLAGS = -umbrella System -nodefaultlibs -ldyld -lcompiler_rt -lsystem$(SIM_SUFFIX)_kernel -lsystem$(SIM_SUFFIX)_platform -lsystem$(SIM_SUFFIX)_pthread -lsystem_malloc -lsystem_c -lsystem_blocks -lunwind -OBJC_LDFLAGS = -Wl,-upward-lobjc -Wl,-order_file,$(SRCROOT)/xcodeconfig/libdispatch.order +OBJC_LDFLAGS = -Wl,-upward-lobjc +LIBDARWIN_LDFLAGS = -Wl,-upward-lsystem_darwin +LIBDARWIN_LDFLAGS[sdk=*simulator*] = +ORDER_LDFLAGS = -Wl,-order_file,$(SRCROOT)/xcodeconfig/libdispatch.order -Wl,-dirty_data_list,$(SRCROOT)/xcodeconfig/libdispatch.dirty +ORDER_LDFLAGS[sdk=macosx*] = -Wl,-order_file,$(SRCROOT)/xcodeconfig/libdispatch.order ALIASES_LDFLAGS = -Wl,-alias_list,$(SRCROOT)/xcodeconfig/libdispatch.aliases -OTHER_LDFLAGS = $(OTHER_LDFLAGS) $(DYLIB_LDFLAGS) $(CR_LDFLAGS) $(OBJC_LDFLAGS) $(ALIASES_LDFLAGS) $(PLATFORM_LDFLAGS) +OTHER_LDFLAGS = $(OTHER_LDFLAGS) $(LIBDARWIN_LDFLAGS) $(DYLIB_LDFLAGS) $(CR_LDFLAGS) $(OBJC_LDFLAGS) $(ALIASES_LDFLAGS) $(PLATFORM_LDFLAGS) $(ORDER_LDFLAGS) OTHER_MIGFLAGS = -novouchers + +COPY_HEADERS_RUN_UNIFDEF = YES +COPY_HEADERS_UNIFDEF_FLAGS = -U__DISPATCH_BUILDING_DISPATCH__ -U__linux__ -DTARGET_OS_WIN32=0 -U__ANDROID__ diff --git a/xcodescripts/check-order.sh b/xcodescripts/check-order.sh new file mode 100644 index 000000000..62133485d --- /dev/null +++ b/xcodescripts/check-order.sh @@ -0,0 +1,71 @@ +#! /bin/bash -e + +test "$ACTION" = install || exit 0 + +list_objc_syms () +{ + nm -arch $1 -nU ${DSTROOT}/usr/lib/system/libdispatch.dylib | grep _OBJC | cut -d' ' -f3 +} + +list_mutable_data_syms () +{ + nm -arch $1 -m ${DSTROOT}/usr/lib/system/libdispatch.dylib |grep __DATA|egrep -v '(__const|__crash_info)'|sed 's/^.* //' +} + +list_objc_order () +{ + grep '^_OBJC' "${SCRIPT_INPUT_FILE_0}" +} + +list_dirty_order () +{ + grep '^[^#]' "${SCRIPT_INPUT_FILE_1}" +} + +list_clean_order () +{ + grep '^[^#]' "${SCRIPT_INPUT_FILE_2}" +} + +fail= + +case "$PLATFORM_NAME" in + *simulator) exit 0;; + *) ;; +esac + +if comm -12 <(list_dirty_order | sort) <(list_clean_order | sort) | grep .; then + echo 1>&2 "error: *** SYMBOLS CAN'T BE BOTH CLEAN AND DIRTY ***" + comm 1>&2 -12 <(list_dirty_order | sort) <(list_clean_order | sort) + fail=t +fi + +for arch in $ARCHS; do + if test "$PLATFORM_NAME" = macosx -a "$arch" = i386; then + continue + fi + + if list_mutable_data_syms $arch | sort | uniq -c | grep -qvw 1; then + echo 1>&2 "error: *** DUPLICATED SYMBOL NAMES FOR SLICE $arch ***" + list_mutable_data_syms $arch | sort | uniq -c | grep -qw 1 1>&2 + fail=t + fi + + if comm -23 <(list_mutable_data_syms $arch | sort) <((list_dirty_order; list_clean_order) | sort) | grep -q .; then + echo 1>&2 "error: *** SYMBOLS NOT MARKED CLEAN OR DIRTY FOR SLICE $arch ***" + comm 1>&2 -23 <(list_mutable_data_syms $arch | sort) <((list_dirty_order; list_clean_order) | sort) + fail=t + fi + + if comm -13 <(list_mutable_data_syms $arch | sort) <((list_dirty_order; list_clean_order) | sort) | grep -q .; then + echo 1>&2 "warning: *** Found unknown symbols in dirty/clean files for slice $arch ***" + comm 1>&2 -13 <(list_mutable_data_syms $arch | sort) <((list_dirty_order; list_clean_order) | sort) + fi + + if ! cmp -s <(list_objc_syms $arch) <(list_objc_order); then + echo 1>&2 "error: *** SYMBOL ORDER IS NOT WHAT IS EXPECTED FOR SLICE $arch ***" + diff 1>&2 -U100 <(list_objc_syms $arch) <(list_objc_order) || fail=t + fi +done + +test -z "$fail" diff --git a/xcodescripts/mig-headers.sh b/xcodescripts/mig-headers.sh index 003e9f218..bd477c027 100755 --- a/xcodescripts/mig-headers.sh +++ b/xcodescripts/mig-headers.sh @@ -22,6 +22,11 @@ export MIGCC="$(xcrun -find cc)" export MIGCOM="$(xcrun -find migcom)" export PATH="${PLATFORM_DEVELOPER_BIN_DIR}:${DEVELOPER_BIN_DIR}:${PATH}" + +for p in ${HEADER_SEARCH_PATHS}; do + OTHER_MIGFLAGS="${OTHER_MIGFLAGS} -I${p}" +done + for a in ${ARCHS}; do xcrun mig ${OTHER_MIGFLAGS} -arch $a -header "${SCRIPT_OUTPUT_FILE_0}" \ -sheader "${SCRIPT_OUTPUT_FILE_1}" -user /dev/null \ From 1bb84cfa3130206e6959d00617cccce4e4d3eff0 Mon Sep 17 00:00:00 2001 From: Apple OSS Distributions <91980991+AppleOSSDistributions@users.noreply.github.com> Date: Fri, 31 Jan 2020 02:05:50 +0000 Subject: [PATCH 11/18] libdispatch-1173.0.3 Imported from libdispatch-1173.0.3.tar.gz --- .gitmodules | 0 CMakeLists.txt | 177 ++- INSTALL.md | 2 +- Makefile.am | 29 - PATCHES | 60 + autogen.sh | 2 - cmake/config.h.in | 19 +- cmake/modules/ClangClCompileRules.cmake | 5 + cmake/modules/DispatchCompilerWarnings.cmake | 37 +- cmake/modules/DispatchUtilities.cmake | 15 + cmake/modules/DispatchWindowsSupport.cmake | 74 ++ cmake/modules/FindBlocksRuntime.cmake | 48 + cmake/modules/SwiftSupport.cmake | 41 +- config/config.h | 2 + configure.ac | 526 -------- dispatch/CMakeLists.txt | 4 +- dispatch/Makefile.am | 28 - dispatch/base.h | 94 +- dispatch/block.h | 2 +- dispatch/data.h | 5 - dispatch/dispatch.h | 13 +- dispatch/io.h | 4 + dispatch/object.h | 70 +- dispatch/once.h | 2 +- dispatch/queue.h | 184 ++- dispatch/source.h | 17 +- dispatch/workloop.h | 140 ++ libdispatch.xcodeproj/project.pbxproj | 488 ++++++- m4/atomic.m4 | 21 - m4/blocks.m4 | 132 -- m4/pkg.m4 | 155 --- man/Makefile.am | 152 --- os/CMakeLists.txt | 4 +- os/Makefile.am | 18 - os/firehose_server_private.h | 36 +- os/{linux_base.h => generic_unix_base.h} | 15 +- os/generic_win_base.h | 132 ++ os/object.h | 13 +- os/object_private.h | 33 +- os/voucher_activity_private.h | 21 +- os/voucher_private.h | 122 +- private/CMakeLists.txt | 14 + private/Makefile.am | 15 - private/channel_private.h | 567 +++++++++ private/data_private.h | 29 +- private/layout_private.h | 2 - private/mach_private.h | 144 ++- private/private.h | 25 +- private/queue_private.h | 51 +- private/source_private.h | 34 +- private/workloop_private.h | 256 +--- src/BlocksRuntime/Block.h | 4 +- src/BlocksRuntime/Block_private.h | 8 +- src/BlocksRuntime/data.c | 4 +- src/BlocksRuntime/runtime.c | 53 +- src/CMakeLists.txt | 94 +- src/Makefile.am | 200 --- src/allocator_internal.h | 2 +- src/apply.c | 2 +- src/block.cpp | 17 +- src/data.c | 4 +- src/data.m | 9 +- src/data_internal.h | 2 +- src/event/event.c | 4 +- src/event/event_config.h | 48 +- src/event/event_epoll.c | 5 +- src/event/event_internal.h | 8 +- src/event/event_kevent.c | 1131 ++++++++++++++++- src/event/event_windows.c | 117 ++ src/event/workqueue.c | 8 + src/firehose/firehose.defs | 48 +- src/firehose/firehose_buffer.c | 48 +- src/firehose/firehose_buffer_internal.h | 3 + src/firehose/firehose_inline_internal.h | 9 +- src/firehose/firehose_reply.defs | 26 +- src/firehose/firehose_server.c | 176 ++- src/firehose/firehose_server_internal.h | 11 +- src/firehose/firehose_server_object.m | 13 +- src/firehose/firehose_types.defs | 1 + src/init.c | 101 +- src/inline_internal.h | 96 +- src/internal.h | 141 +- src/introspection.c | 19 +- src/io.c | 191 ++- src/io_internal.h | 10 + src/libdispatch.plist | 99 -- src/mach.c | 514 ++++++-- src/mach_internal.h | 6 +- src/object.c | 66 +- src/object.m | 76 +- src/object_internal.h | 31 +- src/protocol.defs | 3 + src/queue.c | 1090 ++++++++++++++-- src/queue_internal.h | 110 +- src/semaphore.c | 4 +- src/semaphore_internal.h | 2 + src/shims.c | 35 + src/shims.h | 42 +- src/shims/android_stubs.h | 4 +- src/shims/atomic.h | 14 +- src/shims/atomic_sfb.h | 6 +- src/shims/generic_sys_queue.h | 92 ++ src/shims/generic_win_stubs.c | 24 + src/shims/generic_win_stubs.h | 37 + src/shims/getprogname.c | 72 ++ src/shims/getprogname.h | 7 + src/shims/hw_config.h | 87 +- src/shims/linux_stubs.c | 57 - src/shims/linux_stubs.h | 66 - src/shims/lock.c | 132 +- src/shims/lock.h | 25 +- src/shims/mach.h | 45 + src/shims/target.h | 2 +- src/shims/time.h | 34 +- src/shims/tsd.h | 38 +- src/shims/yield.c | 61 + src/shims/yield.h | 32 +- src/source.c | 108 +- src/source_internal.h | 27 +- src/swift/Block.swift | 24 +- src/swift/Data.swift | 70 +- src/swift/Dispatch.swift | 44 +- src/swift/DispatchStubs.cc | 172 +-- src/swift/IO.swift | 20 +- src/swift/Private.swift | 4 +- src/swift/Queue.swift | 233 +++- src/swift/Source.swift | 123 +- src/swift/Time.swift | 5 +- src/swift/Wrapper.swift | 21 +- src/transform.c | 19 +- src/voucher.c | 241 +++- src/voucher_internal.h | 4 +- tools/firehose_trace.lua | 83 -- xcodeconfig/libdispatch-dyld-stub.xcconfig | 2 +- .../libdispatch-introspection.xcconfig | 2 +- xcodeconfig/libdispatch-mp-static.xcconfig | 2 +- xcodeconfig/libdispatch.aliases | 4 + xcodeconfig/libdispatch.clean | 5 +- xcodeconfig/libdispatch.dirty | 3 + xcodeconfig/libdispatch.order | 3 + xcodeconfig/libdispatch.xcconfig | 106 +- xcodeconfig/libfirehose.xcconfig | 2 +- xcodeconfig/libfirehose_kernel.xcconfig | 12 +- xcodescripts/check-order.sh | 29 +- xcodescripts/mig-headers.sh | 1 + 145 files changed, 7527 insertions(+), 3447 deletions(-) delete mode 100644 .gitmodules delete mode 100644 Makefile.am delete mode 100755 autogen.sh create mode 100644 cmake/modules/ClangClCompileRules.cmake create mode 100644 cmake/modules/DispatchUtilities.cmake create mode 100644 cmake/modules/DispatchWindowsSupport.cmake create mode 100644 cmake/modules/FindBlocksRuntime.cmake delete mode 100644 configure.ac delete mode 100644 dispatch/Makefile.am create mode 100644 dispatch/workloop.h delete mode 100644 m4/atomic.m4 delete mode 100644 m4/blocks.m4 delete mode 100644 m4/pkg.m4 delete mode 100644 man/Makefile.am delete mode 100644 os/Makefile.am rename os/{linux_base.h => generic_unix_base.h} (89%) create mode 100644 os/generic_win_base.h delete mode 100644 private/Makefile.am create mode 100644 private/channel_private.h delete mode 100644 src/Makefile.am create mode 100644 src/event/event_windows.c delete mode 100644 src/libdispatch.plist create mode 100644 src/shims.c create mode 100644 src/shims/generic_sys_queue.h create mode 100644 src/shims/generic_win_stubs.c create mode 100644 src/shims/generic_win_stubs.h create mode 100644 src/shims/getprogname.c delete mode 100644 src/shims/linux_stubs.c delete mode 100644 src/shims/linux_stubs.h create mode 100644 src/shims/mach.h create mode 100644 src/shims/yield.c delete mode 100755 tools/firehose_trace.lua diff --git a/.gitmodules b/.gitmodules deleted file mode 100644 index e69de29bb..000000000 diff --git a/CMakeLists.txt b/CMakeLists.txt index ef736629d..89e985951 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -8,9 +8,17 @@ project(dispatch LANGUAGES C CXX) enable_testing() -set(CMAKE_C_VISIBILITY_PRESET hidden) +if("${CMAKE_C_SIMULATE_ID}" STREQUAL "MSVC") + include(ClangClCompileRules) +endif() + +set(CMAKE_C_STANDARD 11) +set(CMAKE_C_STANDARD_REQUIRED YES) + set(CMAKE_CXX_STANDARD 11) +set(CMAKE_C_VISIBILITY_PRESET hidden) + set(CMAKE_THREAD_PREFER_PTHREAD TRUE) set(THREADS_PREFER_PTHREAD_FLAG TRUE) find_package(Threads REQUIRED) @@ -21,8 +29,11 @@ include(CheckIncludeFiles) include(CheckLibraryExists) include(CheckSymbolExists) include(GNUInstallDirs) +include(SwiftSupport) +include(DispatchUtilities) -set(WITH_BLOCKS_RUNTIME "" CACHE PATH "Path to blocks runtime") +set(SWIFT_LIBDIR "lib" CACHE PATH "Library folder name, defined by swift main buildscript") +set(INSTALL_LIBDIR "${SWIFT_LIBDIR}" CACHE PATH "Path where the libraries should be installed") include(DispatchAppleOptions) include(DispatchSanitization) @@ -43,7 +54,9 @@ if(ENABLE_SWIFT) get_filename_component(SWIFT_TOOLCHAIN ${SWIFT_TOOLCHAIN} DIRECTORY) string(TOLOWER ${CMAKE_SYSTEM_NAME} SWIFT_OS) - set(SWIFT_RUNTIME_LIBDIR ${SWIFT_TOOLCHAIN}/lib/swift/${SWIFT_OS}/${CMAKE_SYSTEM_PROCESSOR}) + get_swift_host_arch(SWIFT_HOST_ARCH) + + set(SWIFT_RUNTIME_LIBDIR ${SWIFT_TOOLCHAIN}/${SWIFT_LIBDIR}/swift/${SWIFT_OS}/${SWIFT_HOST_ARCH}) add_library(swiftCore SHARED IMPORTED GLOBAL) @@ -58,25 +71,44 @@ if(ENABLE_SWIFT) PROPERTIES IMPORTED_LOCATION ${SWIFT_RUNTIME_LIBDIR}/${CMAKE_SHARED_LIBRARY_PREFIX}swiftSwiftOnoneSupport${CMAKE_SHARED_LIBRARY_SUFFIX}) + + set(INSTALL_TARGET_DIR "${INSTALL_LIBDIR}/swift/${SWIFT_OS}" CACHE PATH "Path where the libraries will be installed") + set(INSTALL_DISPATCH_HEADERS_DIR "${INSTALL_LIBDIR}/swift/dispatch" CACHE PATH "Path where the headers will be installed for libdispatch") + set(INSTALL_BLOCK_HEADERS_DIR "${INSTALL_LIBDIR}/swift/Block" CACHE PATH "Path where the headers will be installed for the blocks runtime") + set(INSTALL_OS_HEADERS_DIR "${INSTALL_LIBDIR}/swift/os" CACHE PATH "Path where the os/ headers will be installed") endif() +if(NOT ENABLE_SWIFT) + set(INSTALL_TARGET_DIR "${INSTALL_LIBDIR}" CACHE PATH "Path where the libraries will be installed") + set(INSTALL_DISPATCH_HEADERS_DIR "include/dispatch" CACHE PATH "Path where the headers will be installed") + set(INSTALL_BLOCK_HEADERS_DIR "include" CACHE PATH "Path where the headers will be installed for the blocks runtime") + set(INSTALL_OS_HEADERS_DIR "include/os" CACHE PATH "Path where the headers will be installed") +endif() + +option(ENABLE_DTRACE "enable dtrace support" "") + option(BUILD_SHARED_LIBS "build shared libraries" ON) option(ENABLE_TESTING "build libdispatch tests" ON) -if(CMAKE_SYSTEM_NAME STREQUAL Linux OR - CMAKE_SYSTEM_NAME STREQUAL Android) - set(USE_GOLD_LINKER_DEFAULT ON) +option(USE_LLD_LINKER "use the lld linker" FALSE) + +if(NOT USE_LLD_LINKER AND + (CMAKE_SYSTEM_NAME STREQUAL Linux OR + CMAKE_SYSTEM_NAME STREQUAL FreeBSD OR + CMAKE_SYSTEM_NAME STREQUAL Android)) + set(USE_GOLD_LINKER_DEFAULT TRUE) else() - set(USE_GOLD_LINKER_DEFAULT OFF) + set(USE_GOLD_LINKER_DEFAULT FALSE) endif() option(USE_GOLD_LINKER "use the gold linker" ${USE_GOLD_LINKER_DEFAULT}) -option(ENABLE_THREAD_LOCAL_STORAGE "enable usage of thread local storage via __thread" ON) +option(ENABLE_THREAD_LOCAL_STORAGE "enable usage of thread local storage via _Thread_local" ON) set(DISPATCH_USE_THREAD_LOCAL_STORAGE ${ENABLE_THREAD_LOCAL_STORAGE}) if(CMAKE_SYSTEM_NAME STREQUAL Linux OR CMAKE_SYSTEM_NAME STREQUAL Android OR + CMAKE_SYSTEM_NAME STREQUAL FreeBSD OR CMAKE_SYSTEM_NAME STREQUAL Windows) set(ENABLE_INTERNAL_PTHREAD_WORKQUEUES_DEFAULT ON) else() @@ -97,13 +129,16 @@ else() endif() endif() -if(CMAKE_SYSTEM_NAME STREQUAL Linux OR - CMAKE_SYSTEM_NAME STREQUAL Android OR - CMAKE_SYSTEM_NAME STREQUAL Windows) +option(INSTALL_PRIVATE_HEADERS "installs private headers in the same location as the public ones" OFF) + +find_package(BlocksRuntime QUIET) +if(NOT BlocksRuntime_FOUND) + set(BlocksRuntime_INCLUDE_DIR ${PROJECT_SOURCE_DIR}/src/BlocksRuntime) + add_library(BlocksRuntime STATIC - ${CMAKE_SOURCE_DIR}/src/BlocksRuntime/data.c - ${CMAKE_SOURCE_DIR}/src/BlocksRuntime/runtime.c) + ${PROJECT_SOURCE_DIR}/src/BlocksRuntime/data.c + ${PROJECT_SOURCE_DIR}/src/BlocksRuntime/runtime.c) set_target_properties(BlocksRuntime PROPERTIES POSITION_INDEPENDENT_CODE TRUE) @@ -112,10 +147,19 @@ if(CMAKE_SYSTEM_NAME STREQUAL Linux OR PROPERTIES INTERFACE_LINK_LIBRARIES ${CMAKE_DL_LIBS}) endif() - set(WITH_BLOCKS_RUNTIME "${CMAKE_SOURCE_DIR}/src/BlocksRuntime" CACHE PATH "Path to blocks runtime" FORCE) -else() - # TODO(compnerd) support system installed BlocksRuntime - # find_package(BlocksRuntime REQUIRED) + + add_library(BlocksRuntime::BlocksRuntime ALIAS BlocksRuntime) + + install(FILES + ${PROJECT_SOURCE_DIR}/src/BlocksRuntime/Block.h + DESTINATION + "${INSTALL_BLOCK_HEADERS_DIR}") + if(INSTALL_PRIVATE_HEADERS) + install(FILES + ${PROJECT_SOURCE_DIR}/src/BlocksRuntime/Block_private.h + DESTINATION + "${INSTALL_BLOCK_HEADERS_DIR}") + endif() endif() check_symbol_exists(__GNU_LIBRARY__ "features.h" _GNU_SOURCE) @@ -141,6 +185,7 @@ check_function_exists(pthread_key_init_np HAVE_PTHREAD_KEY_INIT_NP) check_function_exists(pthread_main_np HAVE_PTHREAD_MAIN_NP) check_function_exists(strlcpy HAVE_STRLCPY) check_function_exists(sysconf HAVE_SYSCONF) +check_function_exists(arc4random HAVE_ARC4RANDOM) if(NOT HAVE_STRLCPY AND NOT HAVE_GETPROGNAME) include(FindPkgConfig) @@ -163,10 +208,8 @@ check_include_files("libproc_internal.h" HAVE_LIBPROC_INTERNAL_H) check_include_files("mach/mach.h" HAVE_MACH) if(HAVE_MACH) set(__DARWIN_NON_CANCELABLE 1) - set(USE_MACH_SEM 1) else() set(__DARWIN_NON_CANCELABLE 0) - set(USE_MACH_SEM 0) endif() check_include_files("malloc/malloc.h" HAVE_MALLOC_MALLOC_H) check_include_files("memory.h" HAVE_MEMORY_H) @@ -182,19 +225,27 @@ check_include_files("strings.h" HAVE_STRINGS_H) check_include_files("sys/guarded.h" HAVE_SYS_GUARDED_H) check_include_files("sys/stat.h" HAVE_SYS_STAT_H) check_include_files("sys/types.h" HAVE_SYS_TYPES_H) -check_include_files("unistd.h" HAVE_UNISTD_H) check_include_files("objc/objc-internal.h" HAVE_OBJC) -check_library_exists(pthread sem_init "" USE_POSIX_SEM) +if(HAVE_MACH) + set(USE_MACH_SEM 1) +else() + set(USE_MACH_SEM 0) +endif() if(CMAKE_SYSTEM_NAME STREQUAL Windows) - add_definitions(-DTARGET_OS_WIN32) add_definitions(-DUSE_WIN32_SEM) endif() +check_library_exists(pthread sem_init "" USE_POSIX_SEM) +# NOTE: android has not always provided a libpthread, but uses the pthreads API +if(CMAKE_SYSTEM_NAME STREQUAL Android) + set(USE_POSIX_SEM 1) +endif() check_symbol_exists(CLOCK_UPTIME "time.h" HAVE_DECL_CLOCK_UPTIME) check_symbol_exists(CLOCK_UPTIME_FAST "time.h" HAVE_DECL_CLOCK_UPTIME_FAST) check_symbol_exists(CLOCK_MONOTONIC "time.h" HAVE_DECL_CLOCK_MONOTONIC) check_symbol_exists(CLOCK_REALTIME "time.h" HAVE_DECL_CLOCK_REALTIME) +check_symbol_exists(CLOCK_MONOTONIC_COARSE "time.h" HAVE_DECL_CLOCK_MONOTONIC_COARSE) check_symbol_exists(FD_COPY "sys/select.h" HAVE_DECL_FD_COPY) check_symbol_exists(NOTE_LOWAT "sys/event.h" HAVE_DECL_NOTE_LOWAT) check_symbol_exists(NOTE_NONE "sys/event.h" HAVE_DECL_NOTE_NONE) @@ -208,11 +259,34 @@ check_symbol_exists(VQ_NEARLOWDISK "sys/mount.h" HAVE_DECL_VQ_NEARLOWDISK) check_symbol_exists(VQ_QUOTA "sys/mount.h" HAVE_DECL_VQ_QUOTA) check_symbol_exists(VQ_UPDATE "sys/mount.h" HAVE_DECL_VQ_UPDATE) check_symbol_exists(VQ_VERYLOWDISK "sys/mount.h" HAVE_DECL_VQ_VERYLOWDISK) - +check_symbol_exists(VQ_FREE_SPACE_CHANGE "sys/mount.h" HAVE_DECL_VQ_FREE_SPACE_CHANGE) +check_symbol_exists(strlcpy "string.h" HAVE_STRLCPY) check_symbol_exists(program_invocation_name "errno.h" HAVE_DECL_PROGRAM_INVOCATION_SHORT_NAME) +if (HAVE_DECL_PROGRAM_INVOCATION_SHORT_NAME) + add_definitions(-D_GNU_SOURCE=1) +endif() +check_symbol_exists(__printflike "bsd/sys/cdefs.h" HAVE_PRINTFLIKE) -find_program(dtrace_EXECUTABLE dtrace) -if(dtrace_EXECUTABLE) +if(CMAKE_SYSTEM_NAME STREQUAL Android) + set(ENABLE_DTRACE_DEFAULT OFF) +endif() + +if(CMAKE_SYSTEM_NAME STREQUAL FreeBSD) + add_definitions(-D_WITH_DPRINTF) +endif() + +if(ENABLE_DTRACE STREQUAL "") + find_program(dtrace_EXECUTABLE dtrace) + if(dtrace_EXECUTABLE) + add_definitions(-DDISPATCH_USE_DTRACE=1) + else() + add_definitions(-DDISPATCH_USE_DTRACE=0) + endif() +elseif(ENABLE_DTRACE) + find_program(dtrace_EXECUTABLE dtrace) + if(NOT dtrace_EXECUTABLE) + message(FATAL_ERROR "dtrace not found but explicitly requested") + endif() add_definitions(-DDISPATCH_USE_DTRACE=1) else() add_definitions(-DDISPATCH_USE_DTRACE=0) @@ -224,24 +298,47 @@ if(leaks_EXECUTABLE) endif() if(CMAKE_SYSTEM_NAME STREQUAL Darwin) - add_custom_target(module-map-symlinks - ALL - COMMAND - ${CMAKE_COMMAND} -E create_symlink "${CMAKE_SOURCE_DIR}/dispatch/darwin/module.modulemap" "${CMAKE_SOURCE_DIR}/dispatch/module.modulemap" - COMMAND - ${CMAKE_COMMAND} -E create_symlink "${CMAKE_SOURCE_DIR}/private/darwin/module.modulemap" "${CMAKE_SOURCE_DIR}/private/module.modulemap") + add_custom_command(OUTPUT + "${PROJECT_SOURCE_DIR}/dispatch/module.modulemap" + "${PROJECT_SOURCE_DIR}/private/module.modulemap" + COMMAND + ${CMAKE_COMMAND} -E create_symlink "${PROJECT_SOURCE_DIR}/dispatch/darwin/module.modulemap" "${PROJECT_SOURCE_DIR}/dispatch/module.modulemap" + COMMAND + ${CMAKE_COMMAND} -E create_symlink "${PROJECT_SOURCE_DIR}/private/darwin/module.modulemap" "${PROJECT_SOURCE_DIR}/private/module.modulemap") +elseif(CMAKE_SYSTEM_NAME STREQUAL Windows) + add_custom_command(OUTPUT + "${PROJECT_SOURCE_DIR}/dispatch/module.modulemap" + "${PROJECT_SOURCE_DIR}/private/module.modulemap" + COMMAND + ${CMAKE_COMMAND} -E copy "${PROJECT_SOURCE_DIR}/dispatch/generic/module.modulemap" "${PROJECT_SOURCE_DIR}/dispatch/module.modulemap" + COMMAND + ${CMAKE_COMMAND} -E copy "${PROJECT_SOURCE_DIR}/private/generic/module.modulemap" "${PROJECT_SOURCE_DIR}/private/module.modulemap") else() - add_custom_target(module-map-symlinks - ALL - COMMAND - ${CMAKE_COMMAND} -E create_symlink "${CMAKE_SOURCE_DIR}/dispatch/generic/module.modulemap" "${CMAKE_SOURCE_DIR}/dispatch/module.modulemap" - COMMAND - ${CMAKE_COMMAND} -E create_symlink "${CMAKE_SOURCE_DIR}/private/generic/module.modulemap" "${CMAKE_SOURCE_DIR}/private/module.modulemap") -endif() -configure_file("${CMAKE_SOURCE_DIR}/cmake/config.h.in" - "${CMAKE_BINARY_DIR}/config/config_ac.h") + add_custom_command(OUTPUT + "${PROJECT_SOURCE_DIR}/dispatch/module.modulemap" + "${PROJECT_SOURCE_DIR}/private/module.modulemap" + COMMAND + ${CMAKE_COMMAND} -E create_symlink "${PROJECT_SOURCE_DIR}/dispatch/generic/module.modulemap" "${PROJECT_SOURCE_DIR}/dispatch/module.modulemap" + COMMAND + ${CMAKE_COMMAND} -E create_symlink "${PROJECT_SOURCE_DIR}/private/generic/module.modulemap" "${PROJECT_SOURCE_DIR}/private/module.modulemap") +endif() +add_custom_target(module-map-symlinks + DEPENDS + "${PROJECT_SOURCE_DIR}/dispatch/module.modulemap" + "${PROJECT_SOURCE_DIR}/private/module.modulemap") +configure_file("${PROJECT_SOURCE_DIR}/cmake/config.h.in" + "${PROJECT_BINARY_DIR}/config/config_ac.h") add_definitions(-DHAVE_CONFIG_H) +if(CMAKE_SYSTEM_NAME STREQUAL Windows) + include(DispatchWindowsSupport) + dispatch_windows_arch_spelling(${CMAKE_SYSTEM_PROCESSOR} DISPATCH_MSVC_ARCH) + dispatch_windows_include_for_arch(${DISPATCH_MSVC_ARCH} DISPATCH_INCLUDES) + include_directories(BEFORE SYSTEM ${DISPATCH_INCLUDES}) + dispatch_windows_lib_for_arch(${CMAKE_SYSTEM_PROCESSOR} DISPATCH_LIBDIR) + link_directories(${DISPATCH_LIBDIR}) +endif() + add_subdirectory(dispatch) add_subdirectory(man) add_subdirectory(os) diff --git a/INSTALL.md b/INSTALL.md index a426bcf30..0f4fcf3e6 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -130,7 +130,7 @@ Typical configuration line for FreeBSD 8.x and 9.x to build libdispatch with clang and blocks support: ``` - cmake -G Ninja -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ -DWITH_BLOCKS_RUNTIME=/usr/local/lib + cmake -G Ninja -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ -DBlocksRuntime_INCLUDE_DIR=/usr/local/include -DBlocksRuntime_LIBRARIES=/usr/local/lib/libBlocksRuntime.so ninja ninja test ``` diff --git a/Makefile.am b/Makefile.am deleted file mode 100644 index f1be02951..000000000 --- a/Makefile.am +++ /dev/null @@ -1,29 +0,0 @@ -# -# -# - -ACLOCAL_AMFLAGS = -I m4 - -if BUILD_TESTS - MAYBE_TESTS = tests -endif - -SUBDIRS= \ - dispatch \ - man \ - os \ - private \ - src \ - $(MAYBE_TESTS) - -EXTRA_DIST= \ - README.md \ - LICENSE \ - PATCHES \ - autogen.sh \ - config/config.h \ - libdispatch.xcodeproj \ - resolver \ - tools \ - xcodeconfig \ - xcodescripts diff --git a/PATCHES b/PATCHES index 47db8f3a3..b4483135a 100644 --- a/PATCHES +++ b/PATCHES @@ -374,3 +374,63 @@ github commits starting with 29bdc2f from [b7f1beb] APPLIED rdar://35017478 [7ef9cde] APPLIED rdar://35017478 [12c9ca8] APPLIED rdar://35017478 +[6d6dc2e] APPLIED rdar://40252515 +[4a9833d] APPLIED rdar://40252515 +[f88e382] APPLIED rdar://40252515 +[bfa9aa7] APPLIED rdar://40252515 +[44f3640] APPLIED rdar://40252515 +[3b06f54] APPLIED rdar://40252515 +[e245cbe] APPLIED rdar://40252515 +[2a539d6] APPLIED rdar://40252515 +[e52c174] APPLIED rdar://40252515 +[723bd98] APPLIED rdar://40252515 +[7e7a579] APPLIED rdar://40252515 +[244a5fe] APPLIED rdar://40252515 +[8b72f76] APPLIED rdar://40252515 +[f3531a2] APPLIED rdar://40252515 +[5cf8acb] APPLIED rdar://40252515 +[dc01e36] APPLIED rdar://40252515 +[2d6d1fd] APPLIED rdar://40252515 +[fdd671d] APPLIED rdar://40252515 +[698220e] APPLIED rdar://40252515 +[9c792ac] APPLIED rdar://40252515 +[b5ec5d8] APPLIED rdar://40252515 +[9295346] APPLIED rdar://40252515 +[bbf03ca] APPLIED rdar://40252515 +[8d3aa22] APPLIED rdar://40252515 +[f151b33] APPLIED rdar://40252515 +[f6e6917] APPLIED rdar://40252515 +[f83b5a4] APPLIED rdar://40252515 +[c4d6402] APPLIED rdar://40252515 +[1457de8] APPLIED rdar://40252515 +[c025baa] APPLIED rdar://40252515 +[a618b46] APPLIED rdar://40252515 +[e723a8e] APPLIED rdar://44568645 +[4ac77b7] APPLIED rdar://44568645 +[03696d7] APPLIED rdar://44568645 +[44f67b2] APPLIED rdar://44568645 +[b15ee59] APPLIED rdar://44568645 +[d29ed37] APPLIED rdar://44568645 +[65ebc0c] APPLIED rdar://44568645 +[93c64d8] APPLIED rdar://44568645 +[1271df6] APPLIED rdar://44568645 +[84ac6ac] APPLIED rdar://44568645 +[30d3c8c] APPLIED rdar://44568645 +[12ff819] APPLIED rdar://44568645 +[82342ee] APPLIED rdar://44568645 +[b13a51e] APPLIED rdar://44568645 +[6bf3065] APPLIED rdar://44568645 +[631821c] APPLIED rdar://44568645 +[e764f34] APPLIED rdar://44568645 +[ff1daf8] APPLIED rdar://44568645 +[b863538] APPLIED rdar://44568645 +[ba3933d] APPLIED rdar://44568645 +[9c48a80] APPLIED rdar://44568645 +[5f49e8b] APPLIED rdar://44568645 +[653a523] APPLIED rdar://44568645 +[ac5f4c4] APPLIED rdar://44568645 +[57139c6] APPLIED rdar://44568645 +[ba74b6a] APPLIED rdar://44568645 +[3975b58] APPLIED rdar://44568645 +[81dc900] APPLIED rdar://44568645 +[6162a1d] APPLIED rdar://44568645 diff --git a/autogen.sh b/autogen.sh deleted file mode 100755 index 3ebda4225..000000000 --- a/autogen.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -autoreconf -fvi diff --git a/cmake/config.h.in b/cmake/config.h.in index 00e2bbcdd..0709c254c 100644 --- a/cmake/config.h.in +++ b/cmake/config.h.in @@ -2,7 +2,7 @@ /* Define if building pthread work queues from source */ #cmakedefine01 DISPATCH_USE_INTERNAL_WORKQUEUE -/* Enable usage of thread local storage via __thread */ +/* Enable usage of thread local storage via _Thread_local */ #cmakedefine01 DISPATCH_USE_THREAD_LOCAL_STORAGE /* Define to 1 if you have the declaration of `CLOCK_MONOTONIC', and to 0 if @@ -21,6 +21,10 @@ you don't. */ #cmakedefine01 HAVE_DECL_CLOCK_UPTIME_FAST +/* Define to 1 if you have the declaration of `CLOCK_MONOTONIC_COARSE', and to + 0 if you don't. */ +#cmakedefine01 HAVE_CLOCK_MONOTONIC_COARSE + /* Define to 1 if you have the declaration of `FD_COPY', and to 0 if you don't. */ #cmakedefine01 HAVE_DECL_FD_COPY @@ -77,6 +81,10 @@ you don't. */ #cmakedefine01 HAVE_DECL_VQ_VERYLOWDISK +/* Define to 1 if you have the declaration of `VQ_FREE_SPACE_CHANGE', and to 0 if + you don't. */ +#cmakedefine01 HAVE_DECL_VQ_FREE_SPACE_CHANGE + /* Define to 1 if you have the header file. */ #cmakedefine01 HAVE_DLFCN_H @@ -135,10 +143,10 @@ #cmakedefine HAVE_PTHREAD_MACHDEP_H /* Define to 1 if you have the `pthread_main_np' function. */ -#cmakedefine HAVE_PTHREAD_MAIN_NP +#cmakedefine01 HAVE_PTHREAD_MAIN_NP /* Define to 1 if you have the header file. */ -#cmakedefine HAVE_PTHREAD_NP_H +#cmakedefine01 HAVE_PTHREAD_NP_H /* Define to 1 if you have the header file. */ #cmakedefine HAVE_PTHREAD_QOS_H @@ -185,9 +193,6 @@ /* Define to 1 if you have the header file. */ #cmakedefine HAVE_TARGETCONDITIONALS_H -/* Define to 1 if you have the header file. */ -#cmakedefine01 HAVE_UNISTD_H - /* Define to 1 if you have the `_pthread_workqueue_init' function. */ #cmakedefine HAVE__PTHREAD_WORKQUEUE_INIT @@ -209,7 +214,7 @@ #endif /* Enable GNU extensions on systems that have them. */ #ifndef _GNU_SOURCE -#cmakedefine01 _GNU_SOURCE +#cmakedefine _GNU_SOURCE #endif /* Enable threading extensions on Solaris. */ #ifndef _POSIX_PTHREAD_SEMANTICS diff --git a/cmake/modules/ClangClCompileRules.cmake b/cmake/modules/ClangClCompileRules.cmake new file mode 100644 index 000000000..0265d5ea7 --- /dev/null +++ b/cmake/modules/ClangClCompileRules.cmake @@ -0,0 +1,5 @@ + +# clang-cl interprets paths starting with /U as macro undefines, so we need to +# put a -- before the input file path to force it to be treated as a path. +string(REPLACE "-c " "-c -- " CMAKE_C_COMPILE_OBJECT "${CMAKE_C_COMPILE_OBJECT}") +string(REPLACE "-c " "-c -- " CMAKE_CXX_COMPILE_OBJECT "${CMAKE_CXX_COMPILE_OBJECT}") diff --git a/cmake/modules/DispatchCompilerWarnings.cmake b/cmake/modules/DispatchCompilerWarnings.cmake index dcc074e29..d568c721a 100644 --- a/cmake/modules/DispatchCompilerWarnings.cmake +++ b/cmake/modules/DispatchCompilerWarnings.cmake @@ -8,43 +8,39 @@ else() add_compile_options(-Werror) add_compile_options(-Wall) add_compile_options(-Wextra) - add_compile_options(-Wmissing-prototypes) - add_compile_options(-Wdocumentation) - add_compile_options(-Wunreachable-code) - add_compile_options(-Wshadow) - add_compile_options(-Wconversion) - add_compile_options(-Wconstant-conversion) - add_compile_options(-Wint-conversion) - add_compile_options(-Wbool-conversion) - add_compile_options(-Wenum-conversion) - add_compile_options(-Wassign-enum) - add_compile_options(-Wshorten-64-to-32) - add_compile_options(-Wnewline-eof) - add_compile_options(-Wdeprecated-declarations) - add_compile_options(-Wsign-conversion) - add_compile_options(-Winfinite-recursion) + add_compile_options(-Warray-bounds-pointer-arithmetic) + add_compile_options(-Wassign-enum) add_compile_options(-Watomic-properties) add_compile_options(-Wcomma) add_compile_options(-Wconditional-uninitialized) + add_compile_options(-Wconversion) add_compile_options(-Wcovered-switch-default) add_compile_options(-Wdate-time) add_compile_options(-Wdeprecated) + add_compile_options(-Wdocumentation) add_compile_options(-Wdouble-promotion) add_compile_options(-Wduplicate-enum) add_compile_options(-Wexpansion-to-defined) add_compile_options(-Wfloat-equal) add_compile_options(-Widiomatic-parentheses) + add_compile_options(-Winfinite-recursion) + add_compile_options(-Wmissing-prototypes) + add_compile_options(-Wnewline-eof) add_compile_options(-Wnullable-to-nonnull-conversion) add_compile_options(-Wobjc-interface-ivars) add_compile_options(-Wover-aligned) add_compile_options(-Wpacked) add_compile_options(-Wpointer-arith) add_compile_options(-Wselector) + add_compile_options(-Wshadow) + add_compile_options(-Wshorten-64-to-32) + add_compile_options(-Wsign-conversion) add_compile_options(-Wstatic-in-inline) add_compile_options(-Wsuper-class-method-mismatch) - add_compile_options(-Wswitch-enum) + add_compile_options(-Wswitch) add_compile_options(-Wunguarded-availability) + add_compile_options(-Wunreachable-code) add_compile_options(-Wunused) add_compile_options(-Wno-unknown-warning-option) @@ -70,5 +66,14 @@ else() add_compile_options(-Wno-unused-macros) add_compile_options(-Wno-used-but-marked-unused) add_compile_options(-Wno-vla) + + if(CMAKE_SYSTEM_NAME STREQUAL Android) + add_compile_options(-Wno-incompatible-function-pointer-types) + add_compile_options(-Wno-implicit-function-declaration) + add_compile_options(-Wno-conversion) + add_compile_options(-Wno-int-conversion) + add_compile_options(-Wno-shorten-64-to-32) + endif() + add_compile_options(-Wno-error=assign-enum) endmacro() endif() diff --git a/cmake/modules/DispatchUtilities.cmake b/cmake/modules/DispatchUtilities.cmake new file mode 100644 index 000000000..15d8cd771 --- /dev/null +++ b/cmake/modules/DispatchUtilities.cmake @@ -0,0 +1,15 @@ + +function(dispatch_set_linker target) + if(USE_GOLD_LINKER) + set_property(TARGET ${target} + APPEND_STRING + PROPERTY LINK_FLAGS + -fuse-ld=gold) + endif() + if(USE_LLD_LINKER) + set_property(TARGET ${target} + APPEND_STRING + PROPERTY LINK_FLAGS + -fuse-ld=lld) + endif() +endfunction() diff --git a/cmake/modules/DispatchWindowsSupport.cmake b/cmake/modules/DispatchWindowsSupport.cmake new file mode 100644 index 000000000..87675a78a --- /dev/null +++ b/cmake/modules/DispatchWindowsSupport.cmake @@ -0,0 +1,74 @@ + +function(dispatch_windows_arch_spelling arch var) + if(${arch} STREQUAL i686) + set(${var} x86 PARENT_SCOPE) + elseif(${arch} STREQUAL x86_64 OR ${arch} STREQUAL AMD64) + set(${var} x64 PARENT_SCOPE) + elseif(${arch} STREQUAL armv7) + set(${var} arm PARENT_SCOPE) + elseif(${arch} STREQUAL aarch64) + set(${var} arm64 PARENT_SCOPE) + else() + message(FATAL_ERROR "do not know MSVC spelling for ARCH: `${arch}`") + endif() +endfunction() + +function(dispatch_verify_windows_environment_variables) + set(VCToolsInstallDir $ENV{VCToolsInstallDir}) + set(UniversalCRTSdkDir $ENV{UniversalCRTSdkDir}) + set(UCRTVersion $ENV{UCRTVersion}) + + if("${VCToolsInstallDir}" STREQUAL "") + message(SEND_ERROR "VCToolsInstallDir environment variable must be set") + endif() + if("${UniversalCRTSdkDir}" STREQUAL "") + message(SEND_ERROR "UniversalCRTSdkDir environment variable must be set") + endif() + if("${UCRTVersion}" STREQUAL "") + message(SEND_ERROR "UCRTVersion environment variable must be set") + endif() +endfunction() + +function(dispatch_windows_include_for_arch arch var) + dispatch_verify_windows_environment_variables() + + set(paths + "$ENV{VCToolsInstallDir}/include" + "$ENV{UniversalCRTSdkDir}/Include/$ENV{UCRTVersion}/ucrt" + "$ENV{UniversalCRTSdkDir}/Include/$ENV{UCRTVersion}/shared" + "$ENV{UniversalCRTSdkDir}/Include/$ENV{UCRTVersion}/um") + set(${var} ${paths} PARENT_SCOPE) +endfunction() + +function(dispatch_windows_lib_for_arch arch var) + dispatch_verify_windows_environment_variables() + dispatch_windows_arch_spelling(${arch} ARCH) + + set(paths) + if(${ARCH} STREQUAL x86) + list(APPEND paths "$ENV{VCToolsInstallDir}/Lib") + else() + list(APPEND paths "$ENV{VCToolsInstallDir}/Lib/${ARCH}") + endif() + list(APPEND paths + "$ENV{UniversalCRTSdkDir}/Lib/$ENV{UCRTVersion}/ucrt/${ARCH}" + "$ENV{UniversalCRTSdkDir}/Lib/$ENV{UCRTVersion}/um/${ARCH}") + set(${var} ${paths} PARENT_SCOPE) +endfunction() + +function(dispatch_windows_generate_sdk_vfs_overlay flags) + dispatch_verify_windows_environment_variables() + + get_filename_component(VCToolsInstallDir $ENV{VCToolsInstallDir} ABSOLUTE) + get_filename_component(UniversalCRTSdkDir $ENV{UniversalCRTSdkDir} ABSOLUTE) + set(UCRTVersion $ENV{UCRTVersion}) + + # TODO(compnerd) use a target to avoid re-creating this file all the time + configure_file("${PROJECT_SOURCE_DIR}/utils/WindowsSDKVFSOverlay.yaml.in" + "${PROJECT_BINARY_DIR}/windows-sdk-vfs-overlay.yaml" + @ONLY) + + set(${flags} + -ivfsoverlay;"${PROJECT_BINARY_DIR}/windows-sdk-vfs-overlay.yaml" + PARENT_SCOPE) +endfunction() diff --git a/cmake/modules/FindBlocksRuntime.cmake b/cmake/modules/FindBlocksRuntime.cmake new file mode 100644 index 000000000..111a5d634 --- /dev/null +++ b/cmake/modules/FindBlocksRuntime.cmake @@ -0,0 +1,48 @@ +#.rst: +# FindBlocksRuntime +# ----------------- +# +# Find libBlocksRuntime library and headers. +# +# The module defines the following variables: +# +# ## +# +# BlocksRuntime_FOUND - true if libBlocksRuntime was found +# BlocksRuntime_INCLUDE_DIR - include search path +# BlocksRuntime_LIBRARIES - libraries to link + +if(BlocksRuntime_INCLUDE_DIR AND BlocksRuntime_LIBRARIES) + set(BlocksRuntime_FOUND TRUE) +else() + find_path(BlocksRuntime_INCLUDE_DIR + NAMES + Blocks.h + HINTS + ${CMAKE_INSTALL_FULL_INCLUDEDIR}) + find_library(BlocksRuntime_LIBRARIES + NAMES + BlocksRuntime libBlocksRuntime + HINTS + ${CMAKE_INSTALL_FULL_LIBDIR}) + + include(FindPackageHandleStandardArgs) + find_package_handle_standard_args(BlocksRuntime + REQUIRED_VARS + BlocksRuntime_LIBRARIES + BlocksRuntime_INCLUDE_DIR) + + mark_as_advanced(BlocksRuntime_LIBRARIES BlocksRuntime_INCLUDE_DIR) +endif() + +if(BlocksRuntime_FOUND) + if(NOT TARGET BlocksRuntime::BlocksRuntime) + add_library(BlocksRuntime::BlocksRuntime UNKNOWN IMPORTED) + set_target_properties(BlocksRuntime::BlocksRuntime + PROPERTIES + IMPORTED_LOCATION + ${BlocksRuntime_LIBRARIES} + INTERFACE_INCLUDE_DIRECTORIES + ${BlocksRuntime_INCLUDE_DIR}) + endif() +endif() diff --git a/cmake/modules/SwiftSupport.cmake b/cmake/modules/SwiftSupport.cmake index 64b7b36e9..bae1f9f57 100644 --- a/cmake/modules/SwiftSupport.cmake +++ b/cmake/modules/SwiftSupport.cmake @@ -3,8 +3,8 @@ include(CMakeParseArguments) function(add_swift_library library) set(options) - set(single_value_options MODULE_NAME;MODULE_LINK_NAME;MODULE_PATH;MODULE_CACHE_PATH;OUTPUT) - set(multiple_value_options SOURCES;SWIFT_FLAGS;CFLAGS) + set(single_value_options MODULE_NAME;MODULE_LINK_NAME;MODULE_PATH;MODULE_CACHE_PATH;OUTPUT;TARGET) + set(multiple_value_options SOURCES;SWIFT_FLAGS;CFLAGS;DEPENDS) cmake_parse_arguments(ASL "${options}" "${single_value_options}" "${multiple_value_options}" ${ARGN}) @@ -12,6 +12,9 @@ function(add_swift_library library) list(APPEND flags -emit-library) + if(ASL_TARGET) + list(APPEND FLAGS -target;${ASL_TARGET}) + endif() if(ASL_MODULE_NAME) list(APPEND flags -module-name;${ASL_MODULE_NAME}) endif() @@ -58,6 +61,7 @@ function(add_swift_library library) DEPENDS ${ASL_SOURCES} ${CMAKE_SWIFT_COMPILER} + ${ASL_DEPENDS} COMMAND ${CMAKE_COMMAND} -E make_directory ${module_directory} COMMAND @@ -68,3 +72,36 @@ function(add_swift_library library) ${ASL_MODULE_PATH} ${module_directory}/${ASL_MODULE_NAME}.swiftdoc) endfunction() + +# Returns the current achitecture name in a variable +# +# Usage: +# get_swift_host_arch(result_var_name) +# +# If the current architecture is supported by Swift, sets ${result_var_name} +# with the sanitized host architecture name derived from CMAKE_SYSTEM_PROCESSOR. +function(get_swift_host_arch result_var_name) + if("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "x86_64") + set("${result_var_name}" "x86_64" PARENT_SCOPE) + elseif("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "aarch64") + set("${result_var_name}" "aarch64" PARENT_SCOPE) + elseif("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "ppc64") + set("${result_var_name}" "powerpc64" PARENT_SCOPE) + elseif("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "ppc64le") + set("${result_var_name}" "powerpc64le" PARENT_SCOPE) + elseif("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "s390x") + set("${result_var_name}" "s390x" PARENT_SCOPE) + elseif("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "armv6l") + set("${result_var_name}" "armv6" PARENT_SCOPE) + elseif("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "armv7l") + set("${result_var_name}" "armv7" PARENT_SCOPE) + elseif("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "AMD64") + set("${result_var_name}" "x86_64" PARENT_SCOPE) + elseif("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "IA64") + set("${result_var_name}" "itanium" PARENT_SCOPE) + elseif("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "x86") + set("${result_var_name}" "i686" PARENT_SCOPE) + else() + message(FATAL_ERROR "Unrecognized architecture on host system: ${CMAKE_SYSTEM_PROCESSOR}") + endif() +endfunction() diff --git a/config/config.h b/config/config.h index a24187ec3..2fcd922b5 100644 --- a/config/config.h +++ b/config/config.h @@ -257,3 +257,5 @@ /* Define if using Darwin $NOCANCEL */ #define __DARWIN_NON_CANCELABLE 1 + +#define HAVE_STRLCPY 1 diff --git a/configure.ac b/configure.ac deleted file mode 100644 index fad99ec23..000000000 --- a/configure.ac +++ /dev/null @@ -1,526 +0,0 @@ -# -# When this file changes, rerun autogen.sh. -# - -AC_PREREQ(2.69) -AC_INIT([libdispatch], [1.3], [https://bugs.swift.org], [libdispatch], [https://github.com/apple/swift-corelibs-libdispatch]) -AC_REVISION([$$]) -AC_CONFIG_AUX_DIR(config) -AC_CONFIG_HEADER([config/config_ac.h]) -AC_CONFIG_MACRO_DIR([m4]) -ac_clean_files=a.out.dSYM -AM_MAINTAINER_MODE - -AC_CANONICAL_BUILD -AC_CANONICAL_HOST -AC_CANONICAL_TARGET - -# -# Command line argument to specify build variant (default to release). -# Impacts default value of CFLAGS et al. so must come before AC_PROG_CC -# -AC_ARG_WITH([build-variant], - [AS_HELP_STRING([--with-build-variant=release|debug|releaseassert|releasedebuginfo], [Specify build variant [default=release]])], - [dispatch_build_variant=${withval}], - [dispatch_build_variant=release] -) -AS_CASE([$dispatch_build_variant], - [debug], [ - default_compiler_flags="-g -O0" - dispatch_enable_asserts=true - dispatch_enable_optimization=false - ], - [release], [ - default_compiler_flags="-O2" - dispatch_enable_asserts=false - dispatch_enable_optimization=true - ], - [releaseassert], [ - default_compiler_flags="-O2" - dispatch_enable_asserts=true - dispatch_enable_optimization=true - ], - [releasedebuginfo], [ - default_compiler_flags="-g -O2" - dispatch_enable_asserts=false - dispatch_enable_optimization=true - ], - [AC_MSG_ERROR("invalid build-variant $dispatch_build_variant")] -) -AM_CONDITIONAL(DISPATCH_ENABLE_ASSERTS, $dispatch_enable_asserts) -AM_CONDITIONAL(DISPATCH_ENABLE_OPTIMIZATION, $dispatch_enable_optimization) - -: ${CFLAGS=$default_compiler_flags} -: ${CXXFLAGS=$default_compiler_flags} -: ${OBJCFLAGS=$default_compiler_flags} -: ${OBJCXXFLAGS=$default_compiler_flags} - -AC_PROG_CC([clang gcc cc]) -AC_PROG_CXX([clang++ g++ c++]) -AC_PROG_OBJC([clang gcc cc]) -AC_PROG_OBJCXX([clang++ g++ c++]) - -# -# Android cross-compilation support -# -AC_ARG_WITH([android-ndk], - [AS_HELP_STRING([--with-android-ndk], - [Android NDK location])], [ - android_ndk=${withval} -]) -AC_ARG_WITH([android-ndk-gcc-version], - [AS_HELP_STRING([--with-android-ndk-gcc-version], - [Android NDK GCC version [defaults=4.9]])], - [android_ndk_gcc_version=${withval}], [android_ndk_gcc_version=4.9]) -AC_ARG_WITH([android-api-level], - [AS_HELP_STRING([--with-android-api-level], - [Android API level to link with])], [ - android_api_level=${withval} -]) -AC_ARG_ENABLE([android], - [AS_HELP_STRING([--enable-android], - [Compile for Android])], [ - android=true - - # Override values until there's real support for multiple Android platforms - host=armv7-none-linux-androideabi - host_alias=arm-linux-androideabi - host_cpu=armv7 - host_os=linux-androideabi - host_vendor=unknown - arch=arm - - sysroot=${android_ndk}/platforms/android-${android_api_level}/arch-${arch} - toolchain=${android_ndk}/toolchains/${host_alias}-${android_ndk_gcc_version}/prebuilt/linux-${build_cpu} - - CFLAGS="$CFLAGS -target ${host_alias} --sysroot=${sysroot} -B${toolchain}/${host_alias}/bin" - CXXFLAGS="$CXXFLAGS -target ${host_alias} --sysroot=${sysroot} -B${toolchain}/${host_alias}/bin" - SWIFTC_FLAGS="-target ${host} -sdk ${sysroot} -L${toolchain}/lib/gcc/${host_alias}/${android_ndk_gcc_version}.x" - LIBS="$LIBS -L${toolchain}/lib/gcc/${host_alias}/${android_ndk_gcc_version}.x" - LDFLAGS="$LDFLAGS -Wc,'-target','${host_alias}','-B${toolchain}/${host_alias}/bin'" - - # FIXME: empty CFLAGS and CXXFLAGS are assumed for this to work. - # FIXME: there should be a more elegant way to do this - ac_configure_args=`echo $ac_configure_args | sed -e "s/ 'CFLAGS='//" -e "s/ 'CXXFLAGS='//"` - # CFLAGS, CXXFLAGS and LIBS needs to be passed to libkqueue and libpwq - ac_configure_args="$ac_configure_args --enable-bionic-libc 'CFLAGS=$CFLAGS' 'CXXFLAGS=$CXXFLAGS' 'LIBS=$LIBS'" -], [android=false]) -AM_CONDITIONAL(ANDROID, $android) - -# -# On Mac OS X, some required header files come from other source packages; -# allow specifying where those are. -# -AC_ARG_WITH([apple-libpthread-source], - [AS_HELP_STRING([--with-apple-libpthread-source], - [Specify path to Apple libpthread source])], [ - apple_libpthread_source_path=${withval} - CPPFLAGS="$CPPFLAGS -isystem $apple_libpthread_source_path" -]) - -AC_ARG_WITH([apple-libplatform-source], - [AS_HELP_STRING([--with-apple-libplatform-source], - [Specify path to Apple libplatform source])], [ - apple_libplatform_source_include_path=${withval}/include - CPPFLAGS="$CPPFLAGS -isystem $apple_libplatform_source_include_path" -]) - -AC_ARG_WITH([apple-xnu-source], - [AS_HELP_STRING([--with-apple-xnu-source], - [Specify path to Apple XNU source])], [ - apple_xnu_source_libsyscall_path=${withval}/libsyscall - apple_xnu_source_libproc_path=${withval}/libsyscall/wrappers/libproc - apple_xnu_source_libkern_path=${withval}/libkern - apple_xnu_source_bsd_path=${withval}/bsd - apple_xnu_source_osfmk_path=${withval}/osfmk - CPPFLAGS="$CPPFLAGS -idirafter $apple_xnu_source_libkern_path -isystem $apple_xnu_source_bsd_path -isystem $apple_xnu_source_libsyscall_path -isystem $apple_xnu_source_libproc_path " -]) - -AC_CACHE_CHECK([for System.framework/PrivateHeaders], dispatch_cv_system_privateheaders, - [AS_IF([test -d /System/Library/Frameworks/System.framework/PrivateHeaders], - [dispatch_cv_system_privateheaders=yes], [dispatch_cv_system_privateheaders=no])] -) -AS_IF([test "x$dispatch_cv_system_privateheaders" != "xno"], - [CPPFLAGS="$CPPFLAGS -isystem /System/Library/Frameworks/System.framework/PrivateHeaders"] -) - -# -# On Mac OS X, libdispatch_init is automatically invoked during libSystem -# process initialization. On other systems, it is tagged as a library -# constructor to be run by automatically by the runtime linker. -# -AC_ARG_ENABLE([libdispatch-init-constructor], - [AS_HELP_STRING([--disable-libdispatch-init-constructor], - [Disable libdispatch_init as a constructor])],, - [AS_IF([test -f /usr/lib/system/libdispatch.dylib], - [enable_libdispatch_init_constructor=no])] -) -AS_IF([test "x$enable_libdispatch_init_constructor" != "xno"], - [AC_DEFINE(USE_LIBDISPATCH_INIT_CONSTRUCTOR, 1, - [Define to tag libdispatch_init as a constructor])] -) - -# -# On Mac OS X libdispatch can use the non-portable direct pthread TSD functions -# -AC_ARG_ENABLE([apple-tsd-optimizations], - [AS_HELP_STRING([--enable-apple-tsd-optimizations], - [Use non-portable pthread TSD optimizations for Mac OS X.])] -) -AS_IF([test "x$enable_apple_tsd_optimizations" = "xyes"], - [AC_DEFINE(USE_APPLE_TSD_OPTIMIZATIONS, 1, - [Define to use non-portable pthread TSD optimizations for Mac OS X)])] -) - -# -# Enable building Swift overlay support into libdispatch -# -AC_ARG_WITH([swift-toolchain], - [AS_HELP_STRING([--with-swift-toolchain], [Specify path to Swift toolchain])], - [swift_toolchain_path=${withval} - AC_DEFINE(HAVE_SWIFT, 1, [Define if building for Swift]) - SWIFTC="$swift_toolchain_path/bin/swiftc" - case $target_os in - *android*) - os_string="android" - ;; - linux*) - os_string="linux" - case $target_cpu in - armv7l*) - target_cpu="armv7" - ;; - armv6l*) - target_cpu="armv6" - ;; - *) - esac - ;; - *) - os_string=$target_os - ;; - esac - SWIFT_LIBDIR="$swift_toolchain_path/lib/swift/$os_string/$target_cpu" - have_swift=true], - [have_swift=false] -) -AM_CONDITIONAL(HAVE_SWIFT, $have_swift) -AC_SUBST([SWIFTC]) -AC_SUBST([SWIFTC_FLAGS]) -AC_SUBST([SWIFT_LIBDIR]) -AC_SUBST([OS_STRING], ["$os_string"]) - -# -# Enable use of gold linker when building the Swift overlay -# to avoid a symbol relocation issue. -# Ultimately the request to use gold should be passed in as an arg -# -AC_CHECK_PROG(use_gold_linker, ld.gold, true, false) -AM_CONDITIONAL(USE_GOLD_LINKER, $use_gold_linker) - -# -# Enable an extended test suite that includes -# tests that are too unreliable to be enabled by -# default in the Swift CI environment, but are still -# useful for libdispatch developers to be able to run. -# -AC_ARG_ENABLE([extended-test-suite], - [AS_HELP_STRING([--enable-extended-test-suite], - [Include additional test cases that may fail intermittently])] -) -AM_CONDITIONAL(EXTENDED_TEST_SUITE, test "x$enable_extended_test_suite" = "xyes") - -# -# Enable __thread based TSD on platforms where it is efficient -# Allow override based on command line argument to configure -# -AC_ARG_ENABLE([thread-local-storage], - [AS_HELP_STRING([--enable-thread-local-storage], - [Enable usage of thread local storage via __thread])],, - [case $target_os in - linux*) - enable_thread_local_storage=yes - ;; - *) - enable_thread_local_storage=no - esac] -) -AS_IF([test "x$enable_thread_local_storage" = "xyes"], - [AC_DEFINE(DISPATCH_USE_THREAD_LOCAL_STORAGE, 1, - [Enable usage of thread local storage via __thread])] -) - -AC_USE_SYSTEM_EXTENSIONS -AM_INIT_AUTOMAKE([foreign no-dependencies subdir-objects]) -LT_INIT([disable-static]) - -AC_PROG_INSTALL -AC_PATH_PROGS(MIG, mig) -AC_PATH_PROG(DTRACE, dtrace) -AS_IF([test "x$DTRACE" != "x"], [use_dtrace=true],[ - use_dtrace=false - CPPFLAGS="$CPPFLAGS -DDISPATCH_USE_DTRACE=0" -]) -AM_CONDITIONAL(USE_DTRACE, $use_dtrace) -AC_PATH_PROG(LEAKS, leaks) -AS_IF([test "x$LEAKS" != "x"], - [AC_DEFINE(HAVE_LEAKS, 1, [Define if Apple leaks program is present]) - have_leaks=true], - [have_leaks=false] -) -AM_CONDITIONAL(HAVE_LEAKS, $have_leaks) - -DISPATCH_C_ATOMIC_BUILTINS - -case $dispatch_cv_atomic in - yes) ;; - -march*) MARCH_FLAGS="$dispatch_cv_atomic" - AC_SUBST([MARCH_FLAGS]) ;; - *) AC_MSG_ERROR([No gcc builtin atomic operations available]) ;; -esac - -# -# Find libraries we will need -# -AC_SEARCH_LIBS(clock_gettime, rt) -AC_SEARCH_LIBS(pthread_create, pthread) - -AC_CHECK_FUNCS([strlcpy getprogname], [], - [PKG_CHECK_MODULES(BSD_OVERLAY, libbsd-overlay,[ - AC_DEFINE(HAVE_STRLCPY, 1, []) - AC_DEFINE(HAVE_GETPROGNAME, 1, []) - ])], [#include ] -) - -# -# Checks for header files. -# -AC_HEADER_STDC -AC_CHECK_HEADERS([TargetConditionals.h pthread_np.h malloc/malloc.h libkern/OSCrossEndian.h libkern/OSAtomic.h sys/guarded.h fcntl.h]) - -# hack for pthread/private headers -AS_IF([test -n "$apple_libpthread_source_path" -a -n "$apple_xnu_source_osfmk_path"], [ - saveCPPFLAGS="$CPPFLAGS" - CPPFLAGS="$CPPFLAGS -I." - ln -fsh "$apple_libpthread_source_path"/private/tsd_private.h pthread_machdep.h - ln -fsh "$apple_libpthread_source_path"/private pthread - ln -fsh "$apple_xnu_source_osfmk_path" System - mkdir -p mach && ln -fsh "$apple_xnu_source_osfmk_path"/mach/coalition.h mach -]) -AC_CHECK_HEADERS([pthread_machdep.h pthread/qos.h]) - -# pthread_workqueues. -# We can either use libdispatch's internal_workqueue or pthread_workqueue. -# If not specifically configured, default to internal_workqueues on -# Linux and pthread_workqueue on all other platforms. -# On any platform, if pthread_workqueue is not available, fall back -# to using internal_workqueue. -AC_ARG_ENABLE([internal-libpwq], - [AS_HELP_STRING([--enable-internal-libpwq], - [Use libdispatch's own implementation of pthread workqueues.])],, - [case $target_os in - linux*) - enable_internal_libpwq=yes - ;; - *) - enable_internal_libpwq=no - esac] -) -AS_IF([test "x$enable_internal_libpwq" = "xyes"], - [AC_DEFINE(DISPATCH_USE_INTERNAL_WORKQUEUE, 1, [Use libdispatch's own implementation of pthread workqueues]) - have_pthread_workqueues=false, - dispatch_use_internal_workqueue=true], - [AC_CHECK_HEADERS([pthread/workqueue_private.h pthread_workqueue.h], - [AC_DEFINE(HAVE_PTHREAD_WORKQUEUES, 1, [Define if pthread work queues are present]) - have_pthread_workqueues=true, - dispatch_use_internal_workqueue=false], - [have_pthread_workqueues=false, - dispatch_use_internal_workqueue=true] - )] -) -AM_CONDITIONAL(DISPATCH_USE_INTERNAL_WORKQUEUE, $dispatch_use_internal_workqueue) -AM_CONDITIONAL(HAVE_PTHREAD_WORKQUEUES, $have_pthread_workqueues) - -AC_CHECK_HEADERS([libproc_internal.h], [], [], [#include ]) -AC_CHECK_FUNCS([pthread_workqueue_setdispatch_np _pthread_workqueue_init]) -AS_IF([test -n "$apple_libpthread_source_path" -a -n "$apple_xnu_source_osfmk_path"], [ - rm -f pthread_machdep.h pthread System mach/coalition.h - CPPFLAGS="$saveCPPFLAGS" - AC_CONFIG_COMMANDS([src/pthread_machdep.h], - [ln -fsh "$apple_libpthread_source_path"/private/tsd_private.h src/pthread_machdep.h], - [apple_libpthread_source_path="$apple_libpthread_source_path"]) - AC_CONFIG_COMMANDS([src/pthread], - [ln -fsh "$apple_libpthread_source_path"/private src/pthread], - [apple_libpthread_source_path="$apple_libpthread_source_path"]) - AC_CONFIG_COMMANDS([src/System], - [ln -fsh "$apple_xnu_source_osfmk_path" src/System], - [apple_xnu_source_osfmk_path="$apple_xnu_source_osfmk_path"]) - AC_CONFIG_COMMANDS([src/mach/coalition.h], - [ln -fsh "$apple_xnu_source_osfmk_path"/mach/coalition.h src/mach], - [apple_xnu_source_osfmk_path="$apple_xnu_source_osfmk_path"]) -]) -# hack for xnu/bsd/sys/event.h EVFILT_SOCK declaration -AS_IF([test -n "$apple_xnu_source_bsd_path"], [ - CPPFLAGS="$CPPFLAGS -DPRIVATE=1" -]) - -# -# Check for CoreFoundation, Foundation and objc -# -AC_CHECK_HEADER([CoreFoundation/CoreFoundation.h], - [have_corefoundation=true], [have_corefoundation=false] -) -AM_CONDITIONAL(HAVE_COREFOUNDATION, $have_corefoundation) - -AC_LANG_PUSH([Objective C]) -AC_CHECK_HEADER([Foundation/Foundation.h], - [have_foundation=true], [have_foundation=false] -) -AM_CONDITIONAL(HAVE_FOUNDATION, $have_foundation) -AC_CHECK_HEADER([objc/NSObject.h], [ - AC_DEFINE(HAVE_OBJC, 1, [Define if you have the Objective-C runtime]) - have_objc=true], [have_objc=false] -) -AM_CONDITIONAL(USE_OBJC, $have_objc) -AC_LANG_POP([Objective C]) - -# -# We use the availability of mach.h to decide whether to compile in all sorts -# of Machisms, including using Mach ports as event sources, etc. -# -AC_CHECK_HEADER([mach/mach.h], [ - AC_DEFINE(HAVE_MACH, 1, [Define if mach is present]) - AC_DEFINE(__DARWIN_NON_CANCELABLE, 1, [Define if using Darwin $NOCANCEL]) - have_mach=true], [have_mach=false] -) -AM_CONDITIONAL(USE_MIG, $have_mach) -AC_CHECK_FUNCS([mach_port_construct]) - -# -# Find functions and declarations we care about. -# -AC_CHECK_DECLS([CLOCK_UPTIME, CLOCK_MONOTONIC, CLOCK_REALTIME, CLOCK_UPTIME_FAST], [], [], - [[#include ]]) -AC_CHECK_DECLS([NOTE_NONE, NOTE_REAP, NOTE_REVOKE, NOTE_SIGNAL, NOTE_LOWAT], [], [], - [[#include ]]) -AC_CHECK_DECLS([FD_COPY], [], [], [[#include ]]) -AC_CHECK_DECLS([SIGEMT], [], [], [[#include ]]) -AC_CHECK_DECLS([VQ_UPDATE, VQ_VERYLOWDISK, VQ_QUOTA, VQ_NEARLOWDISK, VQ_DESIRED_DISK], [], [], [[#include ]]) -AC_CHECK_DECLS([program_invocation_short_name], [], [], [[#include ]]) -AC_CHECK_FUNCS([pthread_key_init_np pthread_main_np mach_absolute_time mach_approximate_time malloc_create_zone sysconf]) - -AC_CHECK_DECLS([POSIX_SPAWN_START_SUSPENDED], - [have_posix_spawn_start_suspended=true], [have_posix_spawn_start_suspended=false], - [[#include ]] -) -AM_CONDITIONAL(HAVE_POSIX_SPAWN_START_SUSPENDED, $have_posix_spawn_start_suspended) - -AC_CHECK_FUNC([sem_init], - [have_sem_init=true], [have_sem_init=false] -) - -AC_CHECK_HEADER([linux/futex.h], [ - AC_DEFINE(HAVE_FUTEX, 1, [Define if linux/futex.h is present]) - have_futex=true], [have_futex=false] -) - -# -# We support both Mach semaphores and POSIX semaphores; if the former are -# available, prefer them. -# -AC_MSG_CHECKING([what semaphore type to use]); -AS_IF([test "x$have_mach" = "xtrue"], - [AC_DEFINE(USE_MACH_SEM, 1, [Define to use Mach semaphores]) - AC_MSG_RESULT([Mach semaphores])], - [test "x$have_sem_init" = "xtrue"], - [AC_DEFINE(USE_POSIX_SEM, 1, [Define to use POSIX semaphores]) - AC_MSG_RESULT([POSIX semaphores])], - [AC_MSG_ERROR([no supported semaphore type])] -) - -AC_CHECK_HEADERS([sys/cdefs.h], [], [], - [#ifdef HAVE_SYS_CDEFS_H - #include - #endif]) - -DISPATCH_C_BLOCKS - -AC_CACHE_CHECK([for -fvisibility=hidden], [dispatch_cv_cc_visibility_hidden], [ - saveCFLAGS="$CFLAGS" - CFLAGS="$CFLAGS -fvisibility=hidden" - AC_LINK_IFELSE([AC_LANG_PROGRAM([ - extern __attribute__ ((visibility ("default"))) int foo; int foo;], [foo = 0;])], - [dispatch_cv_cc_visibility_hidden="yes"], [dispatch_cv_cc_visibility_hidden="no"]) - CFLAGS="$saveCFLAGS" -]) -AS_IF([test "x$dispatch_cv_cc_visibility_hidden" != "xno"], [ - VISIBILITY_FLAGS="-fvisibility=hidden" -]) -AC_SUBST([VISIBILITY_FLAGS]) - -AC_CACHE_CHECK([for -momit-leaf-frame-pointer], [dispatch_cv_cc_omit_leaf_fp], [ - saveCFLAGS="$CFLAGS" - CFLAGS="$CFLAGS -momit-leaf-frame-pointer" - AC_LINK_IFELSE([AC_LANG_PROGRAM([ - extern int foo(void); int foo(void) {return 1;}], [foo();])], - [dispatch_cv_cc_omit_leaf_fp="yes"], [dispatch_cv_cc_omit_leaf_fp="no"]) - CFLAGS="$saveCFLAGS" -]) -AS_IF([test "x$dispatch_cv_cc_omit_leaf_fp" != "xno"], [ - OMIT_LEAF_FP_FLAGS="-momit-leaf-frame-pointer" -]) -AC_SUBST([OMIT_LEAF_FP_FLAGS]) - -AS_IF([test "x$have_mach" = "xtrue"], [ - AC_CACHE_CHECK([for darwin linker], [dispatch_cv_ld_darwin], [ - saveLDFLAGS="$LDFLAGS" - LDFLAGS="$LDFLAGS -dynamiclib -compatibility_version 1.2.3 -current_version 4.5.6 -dead_strip" - AC_LINK_IFELSE([AC_LANG_PROGRAM([ - extern int foo; int foo;], [foo = 0;])], - [dispatch_cv_ld_darwin="yes"], [dispatch_cv_ld_darwin="no"]) - LDFLAGS="$saveLDFLAGS" - ]) -]) -AM_CONDITIONAL(HAVE_DARWIN_LD, [test "x$dispatch_cv_ld_darwin" == "xyes"]) - -# -# symlink platform-specific module.modulemap files -# -AS_CASE([$target_os], - [darwin*], [ dispatch_module_map_os=darwin ], - [ dispatch_module_map_os=generic ] -) -AC_CONFIG_COMMANDS([modulemaps], [ - ln -fs $dispatch_module_map_os/module.modulemap $ac_top_srcdir/dispatch/module.modulemap - ln -fs $dispatch_module_map_os/module.modulemap $ac_top_srcdir/private/module.modulemap - ], - [dispatch_module_map_os="$dispatch_module_map_os"] -) - -# -# Temporary: some versions of clang do not mark __builtin_trap() as -# __attribute__((__noreturn__)). Detect and add if required. -# -AC_COMPILE_IFELSE( - [AC_LANG_PROGRAM([void __attribute__((__noreturn__)) temp(void) { __builtin_trap(); }], [])], - [AC_DEFINE(HAVE_NORETURN_BUILTIN_TRAP, 1, [Define if __builtin_trap marked noreturn])] -) - -# -# Add option to avoid building tests -# -AC_ARG_ENABLE([build-tests], - [AS_HELP_STRING([--disable-build-tests], [Disable tests compilation])]) -AM_CONDITIONAL(BUILD_TESTS, [test "x$enable_build_tests" != "xno"]) - -# -# Generate Makefiles. -# -AC_CONFIG_FILES([Makefile dispatch/Makefile man/Makefile os/Makefile private/Makefile src/Makefile tests/Makefile]) - -# -# Generate testsuite links -# -AC_CONFIG_LINKS([tests/dispatch:$ac_top_srcdir/private tests/leaks-wrapper:tests/leaks-wrapper.sh]) - -AC_OUTPUT diff --git a/dispatch/CMakeLists.txt b/dispatch/CMakeLists.txt index b50b1ba15..8b8be8cfb 100644 --- a/dispatch/CMakeLists.txt +++ b/dispatch/CMakeLists.txt @@ -14,12 +14,12 @@ install(FILES source.h time.h DESTINATION - ${CMAKE_INSTALL_FULL_INCLUDEDIR}/dispatch) + "${INSTALL_DISPATCH_HEADERS_DIR}") if(ENABLE_SWIFT) get_filename_component(MODULE_MAP module.modulemap REALPATH) install(FILES ${MODULE_MAP} DESTINATION - ${CMAKE_INSTALL_FULL_INCLUDEDIR}/dispatch) + "${INSTALL_DISPATCH_HEADERS_DIR}") endif() diff --git a/dispatch/Makefile.am b/dispatch/Makefile.am deleted file mode 100644 index 89fd3daf0..000000000 --- a/dispatch/Makefile.am +++ /dev/null @@ -1,28 +0,0 @@ -# -# -# - -if HAVE_SWIFT -dispatchdir=${prefix}/lib/swift/dispatch -else -dispatchdir=$(includedir)/dispatch -endif - -dispatch_HEADERS= \ - base.h \ - block.h \ - data.h \ - dispatch.h \ - group.h \ - introspection.h \ - io.h \ - object.h \ - once.h \ - queue.h \ - semaphore.h \ - source.h \ - time.h - -if HAVE_SWIFT -dispatch_HEADERS+=module.modulemap -endif diff --git a/dispatch/base.h b/dispatch/base.h index c2bea82ea..e6c71b0e0 100644 --- a/dispatch/base.h +++ b/dispatch/base.h @@ -65,6 +65,29 @@ #define DISPATCH_ALWAYS_INLINE __attribute__((__always_inline__)) #define DISPATCH_UNAVAILABLE __attribute__((__unavailable__)) #define DISPATCH_UNAVAILABLE_MSG(msg) __attribute__((__unavailable__(msg))) +#elif defined(_MSC_VER) +#define DISPATCH_NORETURN __declspec(noreturn) +#define DISPATCH_NOTHROW __declspec(nothrow) +#define DISPATCH_NONNULL1 +#define DISPATCH_NONNULL2 +#define DISPATCH_NONNULL3 +#define DISPATCH_NONNULL4 +#define DISPATCH_NONNULL5 +#define DISPATCH_NONNULL6 +#define DISPATCH_NONNULL7 +#define DISPATCH_NONNULL_ALL +#define DISPATCH_SENTINEL +#define DISPATCH_PURE +#define DISPATCH_CONST +#if (_MSC_VER >= 1700) +#define DISPATCH_WARN_RESULT _Check_return_ +#else +#define DISPATCH_WARN_RESULT +#endif +#define DISPATCH_MALLOC +#define DISPATCH_ALWAYS_INLINE __forceinline +#define DISPATCH_UNAVAILABLE +#define DISPATCH_UNAVAILABLE_MSG(msg) #else /*! @parseOnly */ #define DISPATCH_NORETURN @@ -112,6 +135,14 @@ #define DISPATCH_LINUX_UNAVAILABLE() #endif +#ifdef __FreeBSD__ +#define DISPATCH_FREEBSD_UNAVAILABLE() \ + DISPATCH_UNAVAILABLE_MSG( \ + "This interface is unavailable on FreeBSD systems") +#else +#define DISPATCH_FREEBSD_UNAVAILABLE() +#endif + #ifndef DISPATCH_ALIAS_V2 #if TARGET_OS_MAC #define DISPATCH_ALIAS_V2(sym) __asm__("_" #sym "$V2") @@ -120,20 +151,20 @@ #endif #endif -#if TARGET_OS_WIN32 -#ifdef __cplusplus -#ifdef __DISPATCH_BUILDING_DISPATCH__ -#define DISPATCH_EXPORT extern "C" extern __declspec(dllexport) +#if defined(_WIN32) +#if defined(__DISPATCH_BUILDING_DISPATCH__) +#if defined(__cplusplus) +#define DISPATCH_EXPORT extern "C" __declspec(dllexport) #else #define DISPATCH_EXPORT extern __declspec(dllexport) -#endif // __DISPATCH_BUILDING_DISPATCH__ -#else // __cplusplus -#ifdef __DISPATCH_BUILDING_DISPATCH__ -#define DISPATCH_EXPORT extern "C" extern __declspec(dllimport) +#endif +#else +#if defined(__cplusplus) +#define DISPATCH_EXPORT extern "C" __declspec(dllimport) #else #define DISPATCH_EXPORT extern __declspec(dllimport) -#endif // __DISPATCH_BUILDING_DISPATCH__ -#endif // __cplusplus +#endif +#endif #elif __GNUC__ #define DISPATCH_EXPORT extern __attribute__((visibility("default"))) #else @@ -172,6 +203,12 @@ #define DISPATCH_NOESCAPE #endif +#if __has_attribute(cold) +#define DISPATCH_COLD __attribute__((__cold__)) +#else +#define DISPATCH_COLD +#endif + #if __has_feature(assume_nonnull) #define DISPATCH_ASSUME_NONNULL_BEGIN _Pragma("clang assume_nonnull begin") #define DISPATCH_ASSUME_NONNULL_END _Pragma("clang assume_nonnull end") @@ -200,13 +237,35 @@ #endif #endif -#if __has_feature(objc_fixed_enum) || __has_extension(cxx_strong_enums) +#if __has_attribute(enum_extensibility) +#define __DISPATCH_ENUM_ATTR __attribute__((__enum_extensibility__(open))) +#define __DISPATCH_ENUM_ATTR_CLOSED __attribute__((__enum_extensibility__(closed))) +#else +#define __DISPATCH_ENUM_ATTR +#define __DISPATCH_ENUM_ATTR_CLOSED +#endif // __has_attribute(enum_extensibility) + +#if __has_attribute(flag_enum) +#define __DISPATCH_OPTIONS_ATTR __attribute__((__flag_enum__)) +#else +#define __DISPATCH_OPTIONS_ATTR +#endif // __has_attribute(flag_enum) + + +#if __has_feature(objc_fixed_enum) || __has_extension(cxx_strong_enums) || \ + __has_extension(cxx_fixed_enum) || defined(_WIN32) #define DISPATCH_ENUM(name, type, ...) \ - typedef enum : type { __VA_ARGS__ } name##_t + typedef enum : type { __VA_ARGS__ } __DISPATCH_ENUM_ATTR name##_t +#define DISPATCH_OPTIONS(name, type, ...) \ + typedef enum : type { __VA_ARGS__ } __DISPATCH_OPTIONS_ATTR __DISPATCH_ENUM_ATTR name##_t #else #define DISPATCH_ENUM(name, type, ...) \ - enum { __VA_ARGS__ }; typedef type name##_t -#endif + enum { __VA_ARGS__ } __DISPATCH_ENUM_ATTR; typedef type name##_t +#define DISPATCH_OPTIONS(name, type, ...) \ + enum { __VA_ARGS__ } __DISPATCH_OPTIONS_ATTR __DISPATCH_ENUM_ATTR; typedef type name##_t +#endif // __has_feature(objc_fixed_enum) ... + + #if __has_feature(enumerator_attributes) #define DISPATCH_ENUM_API_AVAILABLE(...) API_AVAILABLE(__VA_ARGS__) @@ -219,12 +278,11 @@ #define DISPATCH_ENUM_API_DEPRECATED_WITH_REPLACEMENT(...) #endif -#if defined(SWIFT_SDK_OVERLAY_DISPATCH_EPOCH) && \ - SWIFT_SDK_OVERLAY_DISPATCH_EPOCH >= 2 +#ifdef __swift__ #define DISPATCH_SWIFT3_OVERLAY 1 -#else +#else // __swift__ #define DISPATCH_SWIFT3_OVERLAY 0 -#endif // SWIFT_SDK_OVERLAY_DISPATCH_EPOCH >= 2 +#endif // __swift__ #if __has_feature(attribute_availability_swift) #define DISPATCH_SWIFT_UNAVAILABLE(_msg) \ diff --git a/dispatch/block.h b/dispatch/block.h index d60cb2c18..4d6f5b548 100644 --- a/dispatch/block.h +++ b/dispatch/block.h @@ -100,7 +100,7 @@ __BEGIN_DECLS * for synchronous execution or when the dispatch block object is invoked * directly. */ -DISPATCH_ENUM(dispatch_block_flags, unsigned long, +DISPATCH_OPTIONS(dispatch_block_flags, unsigned long, DISPATCH_BLOCK_BARRIER DISPATCH_ENUM_API_AVAILABLE(macos(10.10), ios(8.0)) = 0x1, DISPATCH_BLOCK_DETACHED diff --git a/dispatch/data.h b/dispatch/data.h index 33a0c9d51..825066918 100644 --- a/dispatch/data.h +++ b/dispatch/data.h @@ -62,15 +62,10 @@ DISPATCH_EXPORT struct dispatch_data_s _dispatch_data_empty; #define DISPATCH_DATA_DESTRUCTOR_DEFAULT NULL #ifdef __BLOCKS__ -#if !TARGET_OS_WIN32 /*! @parseOnly */ #define DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(name) \ DISPATCH_EXPORT const dispatch_block_t _dispatch_data_destructor_##name #else -#define DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(name) \ - DISPATCH_EXPORT dispatch_block_t _dispatch_data_destructor_##name -#endif -#else #define DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(name) \ DISPATCH_EXPORT const dispatch_function_t \ _dispatch_data_destructor_##name diff --git a/dispatch/dispatch.h b/dispatch/dispatch.h index 79a4c6078..cbc39ede6 100644 --- a/dispatch/dispatch.h +++ b/dispatch/dispatch.h @@ -26,8 +26,10 @@ #include #include #include -#elif defined(__linux__) -#include +#elif defined(_WIN32) +#include +#elif defined(__unix__) +#include #endif #include @@ -35,12 +37,12 @@ #include #include #include -#if !defined(HAVE_UNISTD_H) || HAVE_UNISTD_H +#if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) #include #endif #include -#if defined(__linux__) && defined(__has_feature) +#if (defined(__linux__) || defined(__FreeBSD__)) && defined(__has_feature) #if __has_feature(modules) #if !defined(__arm__) #include // for off_t (to match Glibc.modulemap) @@ -48,7 +50,7 @@ #endif #endif -#define DISPATCH_API_VERSION 20180109 +#define DISPATCH_API_VERSION 20181008 #ifndef __DISPATCH_BUILDING_DISPATCH__ #ifndef __DISPATCH_INDIRECT__ @@ -67,6 +69,7 @@ #include #include #include +#include #undef __DISPATCH_INDIRECT__ #endif /* !__DISPATCH_BUILDING_DISPATCH__ */ diff --git a/dispatch/io.h b/dispatch/io.h index a9e6892e5..db9733d82 100644 --- a/dispatch/io.h +++ b/dispatch/io.h @@ -50,7 +50,11 @@ __BEGIN_DECLS * @typedef dispatch_fd_t * Native file descriptor type for the platform. */ +#if defined(_WIN32) +typedef intptr_t dispatch_fd_t; +#else typedef int dispatch_fd_t; +#endif /*! * @functiongroup Dispatch I/O Convenience API diff --git a/dispatch/object.h b/dispatch/object.h index 653c122e0..024a3c2a8 100644 --- a/dispatch/object.h +++ b/dispatch/object.h @@ -26,6 +26,10 @@ #include // for HeaderDoc #endif +#if __has_include() +#include +#endif + DISPATCH_ASSUME_NONNULL_BEGIN /*! @@ -95,6 +99,7 @@ typedef union { struct dispatch_queue_attr_s *_dqa; struct dispatch_group_s *_dg; struct dispatch_source_s *_ds; + struct dispatch_channel_s *_dch; struct dispatch_mach_s *_dm; struct dispatch_mach_msg_s *_dmsg; struct dispatch_semaphore_s *_dsema; @@ -122,19 +127,13 @@ typedef union { #ifndef DISPATCH_DATA_DECL #define DISPATCH_DATA_DECL(name) OS_OBJECT_DECL_SWIFT(name) #endif // DISPATCH_DATA_DECL -#elif !TARGET_OS_WIN32 +#else #define DISPATCH_SOURCE_DECL(name) \ DISPATCH_DECL(name); #define DISPATCH_DATA_DECL(name) DISPATCH_DECL(name) #define DISPATCH_SOURCE_TYPE_DECL(name) \ DISPATCH_EXPORT const struct dispatch_source_type_s \ _dispatch_source_type_##name -#else -#define DISPATCH_SOURCE_DECL(name) \ - DISPATCH_DECL(name); -#define DISPATCH_SOURCE_TYPE_DECL(name) \ - DISPATCH_EXPORT struct dispatch_source_type_s _dispatch_source_type_##name -#define DISPATCH_DATA_DECL(name) DISPATCH_DECL(name) #endif #ifdef __BLOCKS__ @@ -184,6 +183,16 @@ typedef void (^dispatch_block_t)(void); __BEGIN_DECLS +/*! + * @typedef dispatch_qos_class_t + * Alias for qos_class_t type. + */ +#if __has_include() +typedef qos_class_t dispatch_qos_class_t; +#else +typedef unsigned int dispatch_qos_class_t; +#endif + /*! * @function dispatch_retain * @@ -380,6 +389,49 @@ DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_resume(dispatch_object_t object); +/*! + * @function dispatch_set_qos_class_floor + * + * @abstract + * Sets the QOS class floor on a dispatch queue, source or workloop. + * + * @discussion + * The QOS class of workitems submitted to this object asynchronously will be + * elevated to at least the specified QOS class floor. The QOS of the workitem + * will be used if higher than the floor even when the workitem has been created + * without "ENFORCE" semantics. + * + * Setting the QOS class floor is equivalent to the QOS effects of configuring + * a queue whose target queue has a QoS class set to the same value. + * + * @param object + * A dispatch queue, workloop, or source to configure. + * The object must be inactive. + * + * Passing another object type or an object that has been activated is undefined + * and will cause the process to be terminated. + * + * @param qos_class + * A QOS class value: + * - QOS_CLASS_USER_INTERACTIVE + * - QOS_CLASS_USER_INITIATED + * - QOS_CLASS_DEFAULT + * - QOS_CLASS_UTILITY + * - QOS_CLASS_BACKGROUND + * Passing any other value is undefined. + * + * @param relative_priority + * A relative priority within the QOS class. This value is a negative + * offset from the maximum supported scheduler priority for the given class. + * Passing a value greater than zero or less than QOS_MIN_RELATIVE_PRIORITY + * is undefined. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_NOTHROW +void +dispatch_set_qos_class_floor(dispatch_object_t object, + dispatch_qos_class_t qos_class, int relative_priority); + #ifdef __BLOCKS__ /*! * @function dispatch_wait @@ -538,13 +590,13 @@ dispatch_testcancel(void *object); * The message to log above and beyond the introspection. */ API_DEPRECATED("unsupported interface", macos(10.6,10.9), ios(4.0,6.0)) -DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW +DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW DISPATCH_COLD __attribute__((__format__(printf,2,3))) void dispatch_debug(dispatch_object_t object, const char *message, ...); API_DEPRECATED("unsupported interface", macos(10.6,10.9), ios(4.0,6.0)) -DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW +DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW DISPATCH_COLD __attribute__((__format__(printf,2,0))) void dispatch_debugv(dispatch_object_t object, const char *message, va_list ap); diff --git a/dispatch/once.h b/dispatch/once.h index 37a49506d..fbce4b111 100644 --- a/dispatch/once.h +++ b/dispatch/once.h @@ -38,7 +38,7 @@ __BEGIN_DECLS * Note: static and global variables default to zero. */ DISPATCH_SWIFT3_UNAVAILABLE("Use lazily initialized globals instead") -typedef long dispatch_once_t; +typedef intptr_t dispatch_once_t; #if defined(__x86_64__) || defined(__i386__) || defined(__s390x__) #define DISPATCH_ONCE_INLINE_FASTPATH 1 diff --git a/dispatch/queue.h b/dispatch/queue.h index 7c4a0f49d..ddace0659 100644 --- a/dispatch/queue.h +++ b/dispatch/queue.h @@ -26,10 +26,6 @@ #include // for HeaderDoc #endif -#if __has_include() -#include -#endif - DISPATCH_ASSUME_NONNULL_BEGIN /*! @@ -336,6 +332,102 @@ void dispatch_sync_f(dispatch_queue_t queue, void *_Nullable context, dispatch_function_t work); +/*! + * @function dispatch_async_and_wait + * + * @abstract + * Submits a block for synchronous execution on a dispatch queue. + * + * @discussion + * Submits a workitem to a dispatch queue like dispatch_async(), however + * dispatch_async_and_wait() will not return until the workitem has finished. + * + * Like functions of the dispatch_sync family, dispatch_async_and_wait() is + * subject to dead-lock (See dispatch_sync() for details). + * + * However, dispatch_async_and_wait() differs from functions of the + * dispatch_sync family in two fundamental ways: how it respects queue + * attributes and how it chooses the execution context invoking the workitem. + * + * Differences with dispatch_sync() + * + * Work items submitted to a queue with dispatch_async_and_wait() observe all + * queue attributes of that queue when invoked (inluding autorelease frequency + * or QOS class). + * + * When the runtime has brought up a thread to invoke the asynchronous workitems + * already submitted to the specified queue, that servicing thread will also be + * used to execute synchronous work submitted to the queue with + * dispatch_async_and_wait(). + * + * However, if the runtime has not brought up a thread to service the specified + * queue (because it has no workitems enqueued, or only synchronous workitems), + * then dispatch_async_and_wait() will invoke the workitem on the calling thread, + * similar to the behaviour of functions in the dispatch_sync family. + * + * As an exception, if the queue the work is submitted to doesn't target + * a global concurrent queue (for example because it targets the main queue), + * then the workitem will never be invoked by the thread calling + * dispatch_async_and_wait(). + * + * In other words, dispatch_async_and_wait() is similar to submitting + * a dispatch_block_create()d workitem to a queue and then waiting on it, as + * shown in the code example below. However, dispatch_async_and_wait() is + * significantly more efficient when a new thread is not required to execute + * the workitem (as it will use the stack of the submitting thread instead of + * requiring heap allocations). + * + * + * dispatch_block_t b = dispatch_block_create(0, block); + * dispatch_async(queue, b); + * dispatch_block_wait(b, DISPATCH_TIME_FOREVER); + * Block_release(b); + * + * + * @param queue + * The target dispatch queue to which the block is submitted. + * The result of passing NULL in this parameter is undefined. + * + * @param block + * The block to be invoked on the target dispatch queue. + * The result of passing NULL in this parameter is undefined. + */ +#ifdef __BLOCKS__ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_async_and_wait(dispatch_queue_t queue, + DISPATCH_NOESCAPE dispatch_block_t block); +#endif + +/*! + * @function dispatch_async_and_wait_f + * + * @abstract + * Submits a function for synchronous execution on a dispatch queue. + * + * @discussion + * See dispatch_async_and_wait() for details. + * + * @param queue + * The target dispatch queue to which the function is submitted. + * The result of passing NULL in this parameter is undefined. + * + * @param context + * The application-defined context parameter to pass to the function. + * + * @param work + * The application-defined function to invoke on the target queue. The first + * parameter passed to this function is the context provided to + * dispatch_async_and_wait_f(). + * The result of passing NULL in this parameter is undefined. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +void +dispatch_async_and_wait_f(dispatch_queue_t queue, + void *_Nullable context, dispatch_function_t work); + #if defined(__APPLE__) && \ (defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && \ @@ -549,16 +641,6 @@ dispatch_get_main_queue(void) typedef long dispatch_queue_priority_t; -/*! - * @typedef dispatch_qos_class_t - * Alias for qos_class_t type. - */ -#if __has_include() -typedef qos_class_t dispatch_qos_class_t; -#else -typedef unsigned int dispatch_qos_class_t; -#endif - /*! * @function dispatch_get_global_queue * @@ -1214,7 +1296,8 @@ dispatch_after_f(dispatch_time_t when, dispatch_queue_t queue, * Submits a block to a dispatch queue like dispatch_async(), but marks that * block as a barrier (relevant only on DISPATCH_QUEUE_CONCURRENT queues). * - * See dispatch_async() for details. + * See dispatch_async() for details and "Dispatch Barrier API" for a description + * of the barrier semantics. * * @param queue * The target dispatch queue to which the block is submitted. @@ -1245,7 +1328,8 @@ dispatch_barrier_async(dispatch_queue_t queue, dispatch_block_t block); * that function as a barrier (relevant only on DISPATCH_QUEUE_CONCURRENT * queues). * - * See dispatch_async_f() for details. + * See dispatch_async_f() for details and "Dispatch Barrier API" for a + * description of the barrier semantics. * * @param queue * The target dispatch queue to which the function is submitted. @@ -1278,7 +1362,8 @@ dispatch_barrier_async_f(dispatch_queue_t queue, * Submits a block to a dispatch queue like dispatch_sync(), but marks that * block as a barrier (relevant only on DISPATCH_QUEUE_CONCURRENT queues). * - * See dispatch_sync() for details. + * See dispatch_sync() for details and "Dispatch Barrier API" for a description + * of the barrier semantics. * * @param queue * The target dispatch queue to which the block is submitted. @@ -1327,6 +1412,67 @@ void dispatch_barrier_sync_f(dispatch_queue_t queue, void *_Nullable context, dispatch_function_t work); +/*! + * @function dispatch_barrier_async_and_wait + * + * @abstract + * Submits a block for synchronous execution on a dispatch queue. + * + * @discussion + * Submits a block to a dispatch queue like dispatch_async_and_wait(), but marks + * that block as a barrier (relevant only on DISPATCH_QUEUE_CONCURRENT + * queues). + * + * See "Dispatch Barrier API" for a description of the barrier semantics. + * + * @param queue + * The target dispatch queue to which the block is submitted. + * The result of passing NULL in this parameter is undefined. + * + * @param work + * The application-defined block to invoke on the target queue. + * The result of passing NULL in this parameter is undefined. + */ +#ifdef __BLOCKS__ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_barrier_async_and_wait(dispatch_queue_t queue, + DISPATCH_NOESCAPE dispatch_block_t block); +#endif + +/*! + * @function dispatch_barrier_async_and_wait_f + * + * @abstract + * Submits a function for synchronous execution on a dispatch queue. + * + * @discussion + * Submits a function to a dispatch queue like dispatch_async_and_wait_f(), but + * marks that function as a barrier (relevant only on DISPATCH_QUEUE_CONCURRENT + * queues). + * + * See "Dispatch Barrier API" for a description of the barrier semantics. + * + * @param queue + * The target dispatch queue to which the function is submitted. + * The result of passing NULL in this parameter is undefined. + * + * @param context + * The application-defined context parameter to pass to the function. + * + * @param work + * The application-defined function to invoke on the target queue. The first + * parameter passed to this function is the context provided to + * dispatch_barrier_async_and_wait_f(). + * The result of passing NULL in this parameter is undefined. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +void +dispatch_barrier_async_and_wait_f(dispatch_queue_t queue, + void *_Nullable context, dispatch_function_t work); + /*! * @functiongroup Dispatch queue-specific contexts * This API allows different subsystems to associate context to a shared queue @@ -1511,9 +1657,9 @@ dispatch_assert_queue_barrier(dispatch_queue_t queue); * Verifies that the current block is not executing on a given dispatch queue. * * @discussion - * This function is the equivalent of dispatch_queue_assert() with the test for + * This function is the equivalent of dispatch_assert_queue() with the test for * equality inverted. That means that it will terminate the application when - * dispatch_queue_assert() would return, and vice-versa. See discussion there. + * dispatch_assert_queue() would return, and vice-versa. See discussion there. * * The variant dispatch_assert_queue_not_debug() is compiled out when the * preprocessor macro NDEBUG is defined. (See also assert(3)). diff --git a/dispatch/source.h b/dispatch/source.h index 61a33bb6c..40453fa3e 100644 --- a/dispatch/source.h +++ b/dispatch/source.h @@ -31,7 +31,7 @@ #include #endif -#if !TARGET_OS_WIN32 +#if !defined(_WIN32) #include #endif @@ -105,7 +105,7 @@ DISPATCH_SOURCE_TYPE_DECL(data_or); * The mask is unused (pass zero for now). */ #define DISPATCH_SOURCE_TYPE_DATA_REPLACE (&_dispatch_source_type_data_replace) -API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +API_AVAILABLE(macos(10.13), ios(11.0), tvos(11.0), watchos(4.0)) DISPATCH_SOURCE_TYPE_DECL(data_replace); /*! @@ -123,7 +123,8 @@ DISPATCH_SOURCE_TYPE_DECL(mach_send); * @const DISPATCH_SOURCE_TYPE_MACH_RECV * @discussion A dispatch source that monitors a Mach port for pending messages. * The handle is a Mach port with a receive right (mach_port_t). - * The mask is unused (pass zero for now). + * The mask is a mask of desired events from dispatch_source_mach_recv_flags_t, + * but no flags are currently defined (pass zero for now). */ #define DISPATCH_SOURCE_TYPE_MACH_RECV (&_dispatch_source_type_mach_recv) API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_LINUX_UNAVAILABLE() @@ -218,6 +219,12 @@ DISPATCH_SOURCE_TYPE_DECL(write); typedef unsigned long dispatch_source_mach_send_flags_t; +/*! + * @typedef dispatch_source_mach_recv_flags_t + * Type of dispatch_source_mach_recv flags + */ +typedef unsigned long dispatch_source_mach_recv_flags_t; + /*! * @typedef dispatch_source_memorypressure_flags_t * Type of dispatch_source_memorypressure flags @@ -582,7 +589,7 @@ dispatch_source_get_handle(dispatch_source_t source); * DISPATCH_SOURCE_TYPE_DATA_OR: n/a * DISPATCH_SOURCE_TYPE_DATA_REPLACE: n/a * DISPATCH_SOURCE_TYPE_MACH_SEND: dispatch_source_mach_send_flags_t - * DISPATCH_SOURCE_TYPE_MACH_RECV: n/a + * DISPATCH_SOURCE_TYPE_MACH_RECV: dispatch_source_mach_recv_flags_t * DISPATCH_SOURCE_TYPE_MEMORYPRESSURE dispatch_source_memorypressure_flags_t * DISPATCH_SOURCE_TYPE_PROC: dispatch_source_proc_flags_t * DISPATCH_SOURCE_TYPE_READ: n/a @@ -619,7 +626,7 @@ dispatch_source_get_mask(dispatch_source_t source); * DISPATCH_SOURCE_TYPE_DATA_OR: application defined data * DISPATCH_SOURCE_TYPE_DATA_REPLACE: application defined data * DISPATCH_SOURCE_TYPE_MACH_SEND: dispatch_source_mach_send_flags_t - * DISPATCH_SOURCE_TYPE_MACH_RECV: n/a + * DISPATCH_SOURCE_TYPE_MACH_RECV: dispatch_source_mach_recv_flags_t * DISPATCH_SOURCE_TYPE_MEMORYPRESSURE dispatch_source_memorypressure_flags_t * DISPATCH_SOURCE_TYPE_PROC: dispatch_source_proc_flags_t * DISPATCH_SOURCE_TYPE_READ: estimated bytes available to read diff --git a/dispatch/workloop.h b/dispatch/workloop.h new file mode 100644 index 000000000..2c6cf18c5 --- /dev/null +++ b/dispatch/workloop.h @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2017-2019 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __DISPATCH_WORKLOOP__ +#define __DISPATCH_WORKLOOP__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +DISPATCH_ASSUME_NONNULL_BEGIN + +__BEGIN_DECLS + +/*! + * @typedef dispatch_workloop_t + * + * @abstract + * Dispatch workloops invoke workitems submitted to them in priority order. + * + * @discussion + * A dispatch workloop is a flavor of dispatch_queue_t that is a priority + * ordered queue (using the QOS class of the submitted workitems as the + * ordering). + * + * Between each workitem invocation, the workloop will evaluate whether higher + * priority workitems have since been submitted, either directly to the + * workloop or to any queues that target the workloop, and execute these first. + * + * Serial queues targeting a workloop maintain FIFO execution of their + * workitems. However, the workloop may reorder workitems submitted to + * independent serial queues targeting it with respect to each other, + * based on their priorities, while preserving FIFO execution with respect to + * each serial queue. + * + * A dispatch workloop is a "subclass" of dispatch_queue_t which can be passed + * to all APIs accepting a dispatch queue, except for functions from the + * dispatch_sync() family. dispatch_async_and_wait() must be used for workloop + * objects. Functions from the dispatch_sync() family on queues targeting + * a workloop are still permitted but discouraged for performance reasons. + */ +#if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) +typedef struct dispatch_workloop_s *dispatch_workloop_t; +#else +DISPATCH_DECL_SUBCLASS(dispatch_workloop, dispatch_queue); +#endif + +/*! + * @function dispatch_workloop_create + * + * @abstract + * Creates a new dispatch workloop to which workitems may be submitted. + * + * @param label + * A string label to attach to the workloop. + * + * @result + * The newly created dispatch workloop. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NOTHROW +dispatch_workloop_t +dispatch_workloop_create(const char *_Nullable label); + +/*! + * @function dispatch_workloop_create_inactive + * + * @abstract + * Creates a new inactive dispatch workloop that can be setup and then + * activated. + * + * @discussion + * Creating an inactive workloop allows for it to receive further configuration + * before it is activated, and workitems can be submitted to it. + * + * Submitting workitems to an inactive workloop is undefined and will cause the + * process to be terminated. + * + * @param label + * A string label to attach to the workloop. + * + * @result + * The newly created dispatch workloop. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NOTHROW +dispatch_workloop_t +dispatch_workloop_create_inactive(const char *_Nullable label); + +/*! + * @function dispatch_workloop_set_autorelease_frequency + * + * @abstract + * Sets the autorelease frequency of the workloop. + * + * @discussion + * See dispatch_queue_attr_make_with_autorelease_frequency(). + * The default policy for a workloop is + * DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM. + * + * @param workloop + * The dispatch workloop to modify. + * + * This workloop must be inactive, passing an activated object is undefined + * and will cause the process to be terminated. + * + * @param frequency + * The requested autorelease frequency. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_workloop_set_autorelease_frequency(dispatch_workloop_t workloop, + dispatch_autorelease_frequency_t frequency); + +__END_DECLS + +DISPATCH_ASSUME_NONNULL_END + +#endif diff --git a/libdispatch.xcodeproj/project.pbxproj b/libdispatch.xcodeproj/project.pbxproj index e6e9be8ee..bd5042c4a 100644 --- a/libdispatch.xcodeproj/project.pbxproj +++ b/libdispatch.xcodeproj/project.pbxproj @@ -40,6 +40,28 @@ name = libdispatch_kernel; productName = libdispatch_kernel; }; + 6E43553E215B5D9D00C13177 /* libdispatch_introspection */ = { + isa = PBXAggregateTarget; + buildConfigurationList = 6E435541215B5D9D00C13177 /* Build configuration list for PBXAggregateTarget "libdispatch_introspection" */; + buildPhases = ( + ); + dependencies = ( + 6EE5083B21701B9100833569 /* PBXTargetDependency */, + ); + name = libdispatch_introspection; + productName = libdispatch_introspection; + }; + 6EA833C22162D6380045EFDC /* libdispatch_introspection_Sim */ = { + isa = PBXAggregateTarget; + buildConfigurationList = 6EA833C32162D6380045EFDC /* Build configuration list for PBXAggregateTarget "libdispatch_introspection_Sim" */; + buildPhases = ( + ); + dependencies = ( + 6EE5083D21701B9600833569 /* PBXTargetDependency */, + ); + name = libdispatch_introspection_Sim; + productName = libdispatch_introspection_Sim; + }; 92CBD7201BED924F006E0892 /* libdispatch_tests_legacy */ = { isa = PBXAggregateTarget; buildConfigurationList = 92CBD7231BED924F006E0892 /* Build configuration list for PBXAggregateTarget "libdispatch_tests_legacy" */; @@ -94,7 +116,6 @@ 6E040C751C499CE600411A2E /* firehose_buffer_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EDF10831BBB487E007F14BF /* firehose_buffer_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; 6E21F2E81BBB23FA0000C6A5 /* firehose_server_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E21F2E41BBB23F00000C6A5 /* firehose_server_internal.h */; }; 6E21F2E91BBB240E0000C6A5 /* firehose_server.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E21F2E51BBB23F00000C6A5 /* firehose_server.c */; }; - 6E29394D1FB9527F00FDAC90 /* libdispatch.plist in Copy Ariadne Plist */ = {isa = PBXBuildFile; fileRef = 6E29394C1FB9526E00FDAC90 /* libdispatch.plist */; }; 6E393F981BD60F8D005A551E /* firehose_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF0B26A1BA8C4AE007FA4F6 /* firehose_internal.h */; }; 6E4BACBD1D48A41500B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; 6E4BACC21D48A42000B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; @@ -128,6 +149,13 @@ 6E9956081C3B21B30071D40C /* venture_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9956061C3B21AA0071D40C /* venture_internal.h */; }; 6E9956091C3B21B40071D40C /* venture_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9956061C3B21AA0071D40C /* venture_internal.h */; }; 6E9B6B5F1BB4F3C8009E324D /* firehose_buffer_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9B6B201BB4CC73009E324D /* firehose_buffer_internal.h */; }; + 6E9C6CA720F9848100EA81C0 /* yield.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9C6CA220F9848000EA81C0 /* yield.c */; }; + 6E9C6CA820F9848C00EA81C0 /* yield.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9C6CA220F9848000EA81C0 /* yield.c */; }; + 6E9C6CA920F9848D00EA81C0 /* yield.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9C6CA220F9848000EA81C0 /* yield.c */; }; + 6E9C6CAA20F9848D00EA81C0 /* yield.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9C6CA220F9848000EA81C0 /* yield.c */; }; + 6E9C6CAB20F9848E00EA81C0 /* yield.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9C6CA220F9848000EA81C0 /* yield.c */; }; + 6E9C6CAC20F9848E00EA81C0 /* yield.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9C6CA220F9848000EA81C0 /* yield.c */; }; + 6E9C6CAD20F9848F00EA81C0 /* yield.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9C6CA220F9848000EA81C0 /* yield.c */; }; 6EA283D71CAB93920041B2E0 /* libdispatch.codes in Copy Trace Definitions */ = {isa = PBXBuildFile; fileRef = 6EA283D01CAB93270041B2E0 /* libdispatch.codes */; }; 6EA793891D458A5800929B1B /* event_config.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EA793881D458A5800929B1B /* event_config.h */; }; 6EA7938E1D458A5C00929B1B /* event_config.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EA793881D458A5800929B1B /* event_config.h */; }; @@ -147,6 +175,8 @@ 6EB60D2C1BBB197B0092FA94 /* firehose_inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EB60D291BBB19640092FA94 /* firehose_inline_internal.h */; }; 6EBEC7E51BBDD30C009B1596 /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; }; 6EBEC7E81BBDD324009B1596 /* firehose_reply.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72406A031AF95DF800DF4E2B /* firehose_reply.defs */; settings = {ATTRIBUTES = (Server, ); }; }; + 6EC8DC271E3E84610044B652 /* channel_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EC8DC261E3E84610044B652 /* channel_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 6EC8DC281E3E847A0044B652 /* channel_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EC8DC261E3E84610044B652 /* channel_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; 6ED64B401BBD898300C35F4D /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; 6ED64B411BBD898400C35F4D /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; 6ED64B431BBD898600C35F4D /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; @@ -198,6 +228,8 @@ 96BC39BD0F3EBAB100C59689 /* queue_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 96BC39BC0F3EBAB100C59689 /* queue_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; 96C9553B0F3EAEDD000D2CA4 /* once.h in Headers */ = {isa = PBXBuildFile; fileRef = 96C9553A0F3EAEDD000D2CA4 /* once.h */; settings = {ATTRIBUTES = (Public, ); }; }; 96DF70BE0F38FE3C0074BD99 /* once.c in Sources */ = {isa = PBXBuildFile; fileRef = 96DF70BD0F38FE3C0074BD99 /* once.c */; }; + B609581E221DFA2A00F39D1F /* workloop.h in Headers */ = {isa = PBXBuildFile; fileRef = B6095819221DFA2A00F39D1F /* workloop.h */; settings = {ATTRIBUTES = (Public, ); }; }; + B609581F221DFA4B00F39D1F /* workloop.h in Headers */ = {isa = PBXBuildFile; fileRef = B6095819221DFA2A00F39D1F /* workloop.h */; settings = {ATTRIBUTES = (Public, ); }; }; B683588F1FA77F5A00AA0D58 /* time_private.h in Headers */ = {isa = PBXBuildFile; fileRef = B683588A1FA77F4900AA0D58 /* time_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; B68358901FA77F5B00AA0D58 /* time_private.h in Headers */ = {isa = PBXBuildFile; fileRef = B683588A1FA77F4900AA0D58 /* time_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; C00B0DF21C5AEBBE000330B3 /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; }; @@ -262,6 +294,99 @@ E43A72841AF85BCB00BAA921 /* block.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E43A724F1AF85BBC00BAA921 /* block.cpp */; }; E43A72851AF85BCC00BAA921 /* block.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E43A724F1AF85BBC00BAA921 /* block.cpp */; }; E43A72871AF85BCD00BAA921 /* block.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E43A724F1AF85BBC00BAA921 /* block.cpp */; }; + E43B88322241F19000215272 /* dispatch.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED960E8361E600161930 /* dispatch.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E43B88332241F19000215272 /* base.h in Headers */ = {isa = PBXBuildFile; fileRef = 72CC942F0ECCD8750031B751 /* base.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E43B88342241F19000215272 /* object.h in Headers */ = {isa = PBXBuildFile; fileRef = 961B994F0F3E85C30006BC96 /* object.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E43B88352241F19000215272 /* inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44757D917F4572600B82CA1 /* inline_internal.h */; }; + E43B88362241F19000215272 /* channel_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EC8DC261E3E84610044B652 /* channel_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E43B88372241F19000215272 /* queue.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED8B0E8361E600161930 /* queue.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E43B88382241F19000215272 /* source.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED8D0E8361E600161930 /* source.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E43B88392241F19000215272 /* venture_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9955571C3AF7710071D40C /* venture_private.h */; }; + E43B883A2241F19000215272 /* voucher_activity_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E4B3C3FD18C50D000039F49F /* voucher_activity_private.h */; }; + E43B883B2241F19000215272 /* semaphore.h in Headers */ = {isa = PBXBuildFile; fileRef = 721F5C5C0F15520500FF03A6 /* semaphore.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E43B883C2241F19000215272 /* group.h in Headers */ = {isa = PBXBuildFile; fileRef = FC5C9C1D0EADABE3006E462D /* group.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E43B883D2241F19000215272 /* priority.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EFBDA4A1D61A0D600282887 /* priority.h */; }; + E43B883E2241F19000215272 /* once.h in Headers */ = {isa = PBXBuildFile; fileRef = 96C9553A0F3EAEDD000D2CA4 /* once.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E43B883F2241F19000215272 /* io.h in Headers */ = {isa = PBXBuildFile; fileRef = 5AAB45C310D30CC7004407EA /* io.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E43B88402241F19000215272 /* voucher_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44A8E7418066276009FFDB6 /* voucher_internal.h */; }; + E43B88412241F19000215272 /* module.modulemap in Headers */ = {isa = PBXBuildFile; fileRef = C901445E1C73A7FE002638FC /* module.modulemap */; settings = {ATTRIBUTES = (Public, ); }; }; + E43B88422241F19000215272 /* atomic_sfb.h in Headers */ = {isa = PBXBuildFile; fileRef = E463024F1761603C00E11F4C /* atomic_sfb.h */; }; + E43B88432241F19000215272 /* data.h in Headers */ = {isa = PBXBuildFile; fileRef = 5AAB45C510D30D0C004407EA /* data.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E43B88442241F19000215272 /* firehose_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF0B26A1BA8C4AE007FA4F6 /* firehose_internal.h */; }; + E43B88452241F19000215272 /* time.h in Headers */ = {isa = PBXBuildFile; fileRef = 96032E4C0F5CC8D100241C5F /* time.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E43B88462241F19000215272 /* private.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED930E8361E600161930 /* private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E43B88472241F19000215272 /* block.h in Headers */ = {isa = PBXBuildFile; fileRef = E4D76A9218E325D200B1F98B /* block.h */; settings = {ATTRIBUTES = (Public, ); }; }; + E43B88482241F19000215272 /* data_private.h in Headers */ = {isa = PBXBuildFile; fileRef = C913AC0E143BD34800B78976 /* data_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E43B88492241F19000215272 /* queue_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 96BC39BC0F3EBAB100C59689 /* queue_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E43B884A2241F19000215272 /* module.modulemap in Headers */ = {isa = PBXBuildFile; fileRef = C90144641C73A845002638FC /* module.modulemap */; settings = {ATTRIBUTES = (Private, ); }; }; + E43B884B2241F19000215272 /* source_private.h in Headers */ = {isa = PBXBuildFile; fileRef = FCEF047F0F5661960067401F /* source_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E43B884C2241F19000215272 /* target.h in Headers */ = {isa = PBXBuildFile; fileRef = F7DC045A2060BBBE00C90737 /* target.h */; }; + E43B884D2241F19000215272 /* benchmark.h in Headers */ = {isa = PBXBuildFile; fileRef = 961B99350F3E83980006BC96 /* benchmark.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E43B884E2241F19000215272 /* internal.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED8F0E8361E600161930 /* internal.h */; settings = {ATTRIBUTES = (); }; }; + E43B884F2241F19000215272 /* workloop_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E70181C1F4EB51B0077C1DC /* workloop_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E43B88502241F19000215272 /* object_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 965ECC200F3EAB71004DDD89 /* object_internal.h */; }; + E43B88512241F19000215272 /* queue_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 96929D950F3EA2170041FF5D /* queue_internal.h */; }; + E43B88522241F19000215272 /* source_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = FC0B34780FA2851C0080FFA0 /* source_internal.h */; }; + E43B88532241F19000215272 /* semaphore_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */; }; + E43B88542241F19000215272 /* data_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E4C1ED6E1263E714000D3C8B /* data_internal.h */; }; + E43B88552241F19000215272 /* voucher_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E44A8E711805C473009FFDB6 /* voucher_private.h */; }; + E43B88562241F19000215272 /* io_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 5A0095A110F274B0000E2A31 /* io_internal.h */; }; + E43B88572241F19000215272 /* tsd.h in Headers */ = {isa = PBXBuildFile; fileRef = FC1832A4109923C7003403D5 /* tsd.h */; }; + E43B88582241F19000215272 /* event_config.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EA793881D458A5800929B1B /* event_config.h */; }; + E43B88592241F19000215272 /* atomic.h in Headers */ = {isa = PBXBuildFile; fileRef = 96929D820F3EA1020041FF5D /* atomic.h */; }; + E43B885A2241F19000215272 /* shims.h in Headers */ = {isa = PBXBuildFile; fileRef = 96929D830F3EA1020041FF5D /* shims.h */; }; + E43B885B2241F19000215272 /* time.h in Headers */ = {isa = PBXBuildFile; fileRef = FC1832A3109923C7003403D5 /* time.h */; }; + E43B885C2241F19000215272 /* mach_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E4BACC91D48A89500B562AE /* mach_internal.h */; }; + E43B885D2241F19000215272 /* firehose_buffer_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9B6B201BB4CC73009E324D /* firehose_buffer_internal.h */; }; + E43B885E2241F19000215272 /* yield.h in Headers */ = {isa = PBXBuildFile; fileRef = E48EC97B1835BADD00EAC4F1 /* yield.h */; }; + E43B885F2241F19000215272 /* layout_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 2BE17C6318EA305E002CA4E8 /* layout_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E43B88602241F19000215272 /* perfmon.h in Headers */ = {isa = PBXBuildFile; fileRef = FC1832A2109923C7003403D5 /* perfmon.h */; }; + E43B88612241F19000215272 /* config.h in Headers */ = {isa = PBXBuildFile; fileRef = FC9C70E7105EC9620074F9CA /* config.h */; }; + E43B88622241F19000215272 /* venture_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9956061C3B21AA0071D40C /* venture_internal.h */; }; + E43B88632241F19000215272 /* lock.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF2CAA41C88998A001ABE83 /* lock.h */; }; + E43B88642241F19000215272 /* trace.h in Headers */ = {isa = PBXBuildFile; fileRef = E422A0D412A557B5005E5BDB /* trace.h */; }; + E43B88652241F19000215272 /* getprogname.h in Headers */ = {isa = PBXBuildFile; fileRef = E4BA743913A8911B0095BDF1 /* getprogname.h */; }; + E43B88662241F19000215272 /* event_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E5ACCB91D3C4D0B007DA2B4 /* event_internal.h */; }; + E43B88672241F19000215272 /* firehose_inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EB60D291BBB19640092FA94 /* firehose_inline_internal.h */; }; + E43B88682241F19000215272 /* hw_config.h in Headers */ = {isa = PBXBuildFile; fileRef = E4128ED513BA9A1700ABB2CB /* hw_config.h */; }; + E43B88692241F19000215272 /* object_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E454569214746F1B00106147 /* object_private.h */; settings = {ATTRIBUTES = (); }; }; + E43B886A2241F19000215272 /* time_private.h in Headers */ = {isa = PBXBuildFile; fileRef = B683588A1FA77F4900AA0D58 /* time_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E43B886B2241F19000215272 /* workqueue_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E5662DC1F8C2E3E00BC2474 /* workqueue_internal.h */; }; + E43B886C2241F19000215272 /* object.h in Headers */ = {isa = PBXBuildFile; fileRef = E4EB4A2614C35ECE00AA0FA9 /* object.h */; }; + E43B886D2241F19000215272 /* io_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E48AF55916E70FD9004105FF /* io_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E43B886E2241F19000215272 /* mach_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E4ECBAA415253C25002C313C /* mach_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + E43B886F2241F19000215272 /* allocator_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 2BBF5A5F154B64D8002B20F9 /* allocator_internal.h */; }; + E43B88702241F19000215272 /* introspection_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44F9DA816543F79001DCD38 /* introspection_internal.h */; }; + E43B88722241F19000215272 /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; }; + E43B88732241F19000215272 /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; }; + E43B88742241F19000215272 /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; }; + E43B88752241F19000215272 /* firehose_reply.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72406A031AF95DF800DF4E2B /* firehose_reply.defs */; settings = {ATTRIBUTES = (Server, ); }; }; + E43B88762241F19000215272 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; + E43B88772241F19000215272 /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; }; + E43B88782241F19000215272 /* object.c in Sources */ = {isa = PBXBuildFile; fileRef = 9661E56A0F3E7DDF00749F3E /* object.c */; }; + E43B88792241F19000215272 /* object.m in Sources */ = {isa = PBXBuildFile; fileRef = E4FC3263145F46C9002FBDDB /* object.m */; }; + E43B887A2241F19000215272 /* block.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E43A724F1AF85BBC00BAA921 /* block.cpp */; }; + E43B887B2241F19000215272 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; + E43B887C2241F19000215272 /* semaphore.c in Sources */ = {isa = PBXBuildFile; fileRef = 721F5CCE0F15553500FF03A6 /* semaphore.c */; }; + E43B887D2241F19000215272 /* once.c in Sources */ = {isa = PBXBuildFile; fileRef = 96DF70BD0F38FE3C0074BD99 /* once.c */; }; + E43B887E2241F19000215272 /* queue.c in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED8A0E8361E600161930 /* queue.c */; }; + E43B887F2241F19000215272 /* apply.c in Sources */ = {isa = PBXBuildFile; fileRef = 9676A0E00F3E755D00713ADB /* apply.c */; }; + E43B88802241F19000215272 /* source.c in Sources */ = {isa = PBXBuildFile; fileRef = 96A8AA860F41E7A400CD570B /* source.c */; }; + E43B88812241F19000215272 /* yield.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9C6CA220F9848000EA81C0 /* yield.c */; }; + E43B88822241F19000215272 /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; + E43B88832241F19000215272 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; + E43B88842241F19000215272 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; + E43B88852241F19000215272 /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; + E43B88862241F19000215272 /* voucher.c in Sources */ = {isa = PBXBuildFile; fileRef = E44A8E6A1805C3E0009FFDB6 /* voucher.c */; }; + E43B88872241F19000215272 /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; + E43B88882241F19000215272 /* io.c in Sources */ = {isa = PBXBuildFile; fileRef = 5A27262510F26F1900751FBC /* io.c */; }; + E43B88892241F19000215272 /* data.c in Sources */ = {isa = PBXBuildFile; fileRef = 5AAB45BF10D30B79004407EA /* data.c */; }; + E43B888A2241F19000215272 /* data.m in Sources */ = {isa = PBXBuildFile; fileRef = E420866F16027AE500EEE210 /* data.m */; }; + E43B888B2241F19000215272 /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; }; + E43B888C2241F19000215272 /* time.c in Sources */ = {isa = PBXBuildFile; fileRef = 96032E4A0F5CC8C700241C5F /* time.c */; }; + E43B888D2241F19000215272 /* allocator.c in Sources */ = {isa = PBXBuildFile; fileRef = 2BBF5A62154B64F5002B20F9 /* allocator.c */; }; + E43B888E2241F19000215272 /* benchmark.c in Sources */ = {isa = PBXBuildFile; fileRef = 965CD6340F3E806200D4E28D /* benchmark.c */; }; + E43B888F2241F19000215272 /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; }; E44757DA17F4572600B82CA1 /* inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44757D917F4572600B82CA1 /* inline_internal.h */; }; E44757DB17F4573500B82CA1 /* inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44757D917F4572600B82CA1 /* inline_internal.h */; }; E44757DC17F4573600B82CA1 /* inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44757D917F4572600B82CA1 /* inline_internal.h */; }; @@ -303,6 +428,7 @@ E4630251176162D200E11F4C /* atomic_sfb.h in Headers */ = {isa = PBXBuildFile; fileRef = E463024F1761603C00E11F4C /* atomic_sfb.h */; }; E4630252176162D300E11F4C /* atomic_sfb.h in Headers */ = {isa = PBXBuildFile; fileRef = E463024F1761603C00E11F4C /* atomic_sfb.h */; }; E4630253176162D400E11F4C /* atomic_sfb.h in Headers */ = {isa = PBXBuildFile; fileRef = E463024F1761603C00E11F4C /* atomic_sfb.h */; }; + E4834144225D27F600954FC6 /* workloop.h in Headers */ = {isa = PBXBuildFile; fileRef = B6095819221DFA2A00F39D1F /* workloop.h */; settings = {ATTRIBUTES = (Public, ); }; }; E48AF55A16E70FD9004105FF /* io_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E48AF55916E70FD9004105FF /* io_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; E48AF55B16E72D44004105FF /* io_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E48AF55916E70FD9004105FF /* io_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; E48EC97C1835BADD00EAC4F1 /* yield.h in Headers */ = {isa = PBXBuildFile; fileRef = E48EC97B1835BADD00EAC4F1 /* yield.h */; }; @@ -479,6 +605,20 @@ remoteGlobalIDString = 6E040C621C499B1B00411A2E; remoteInfo = libfirehose_kernel; }; + 6EE5083A21701B9100833569 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = E4B51595164B2DA300E003AF; + remoteInfo = "libdispatch introspection"; + }; + 6EE5083C21701B9600833569 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = E4B51595164B2DA300E003AF; + remoteInfo = "libdispatch introspection"; + }; 6EF0B27D1BA8C5BF007FA4F6 /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; @@ -556,41 +696,37 @@ remoteGlobalIDString = D2AAC045055464E500DB518D; remoteInfo = libdispatch; }; - E47D6ECC125FEBA10070D91C /* PBXContainerItemProxy */ = { + E43B882A2241F19000215272 /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; proxyType = 1; remoteGlobalIDString = E4EC121612514715000DDBD1; remoteInfo = "libdispatch mp resolved"; }; - E49BB6F71E7074C100868613 /* PBXContainerItemProxy */ = { + E43B882C2241F19000215272 /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; proxyType = 1; remoteGlobalIDString = E49BB6CE1E70748100868613; remoteInfo = "libdispatch armv81 resolved"; }; - E4B515DA164B317700E003AF /* PBXContainerItemProxy */ = { + E47D6ECC125FEBA10070D91C /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; proxyType = 1; - remoteGlobalIDString = E4B51595164B2DA300E003AF; - remoteInfo = "libdispatch introspection"; + remoteGlobalIDString = E4EC121612514715000DDBD1; + remoteInfo = "libdispatch mp resolved"; + }; + E49BB6F71E7074C100868613 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = E49BB6CE1E70748100868613; + remoteInfo = "libdispatch armv81 resolved"; }; /* End PBXContainerItemProxy section */ /* Begin PBXCopyFilesBuildPhase section */ - 6E2939471FB9522D00FDAC90 /* Copy Ariadne Plist */ = { - isa = PBXCopyFilesBuildPhase; - buildActionMask = 8; - dstPath = /AppleInternal/Library/Ariadne/Plists; - dstSubfolderSpec = 0; - files = ( - 6E29394D1FB9527F00FDAC90 /* libdispatch.plist in Copy Ariadne Plist */, - ); - name = "Copy Ariadne Plist"; - runOnlyForDeploymentPostprocessing = 1; - }; 6EA283D61CAB933E0041B2E0 /* Copy Trace Definitions */ = { isa = PBXCopyFilesBuildPhase; buildActionMask = 8; @@ -677,6 +813,7 @@ 6E9955CE1C3B218E0071D40C /* venture.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = venture.c; sourceTree = ""; }; 6E9956061C3B21AA0071D40C /* venture_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = venture_internal.h; sourceTree = ""; }; 6E9B6B201BB4CC73009E324D /* firehose_buffer_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = firehose_buffer_internal.h; sourceTree = ""; }; + 6E9C6CA220F9848000EA81C0 /* yield.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = yield.c; path = shims/yield.c; sourceTree = ""; }; 6EA283D01CAB93270041B2E0 /* libdispatch.codes */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.codes; sourceTree = ""; }; 6EA2CB841C005DEF0076794A /* dispatch_source.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_source.c; sourceTree = ""; }; 6EA7937D1D456D1300929B1B /* event_epoll.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = event_epoll.c; sourceTree = ""; }; @@ -688,6 +825,8 @@ 6EC670C61E37E201004F10D6 /* dispatch_network_event_thread.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_network_event_thread.c; sourceTree = ""; }; 6EC670C71E37E201004F10D6 /* perf_mach_async.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = perf_mach_async.c; sourceTree = ""; }; 6EC670C81E37E201004F10D6 /* perf_pipepingpong.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = perf_pipepingpong.c; sourceTree = ""; }; + 6EC8DBE61E3E832C0044B652 /* dispatch_channel.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_channel.c; sourceTree = ""; }; + 6EC8DC261E3E84610044B652 /* channel_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = channel_private.h; sourceTree = ""; }; 6EDB888D1CB73BDC006776D6 /* dispatch_kevent_cancel_races.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_kevent_cancel_races.c; sourceTree = ""; }; 6EDF10831BBB487E007F14BF /* firehose_buffer_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = firehose_buffer_private.h; sourceTree = ""; }; 6EE89F3D1BFAF5B000EB140D /* dispatch_state_machine.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_state_machine.c; sourceTree = ""; }; @@ -730,6 +869,7 @@ 96C9553A0F3EAEDD000D2CA4 /* once.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = once.h; sourceTree = ""; }; 96DF70BD0F38FE3C0074BD99 /* once.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = once.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; 9B6A42E01FE098430000D146 /* queue-tip.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = "queue-tip.xcodeproj"; path = "tools/queue-tip/queue-tip.xcodeproj"; sourceTree = ""; }; + B6095819221DFA2A00F39D1F /* workloop.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = workloop.h; sourceTree = ""; }; B63B793F1E8F004F0060C1E1 /* dispatch_no_blocks.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_no_blocks.c; sourceTree = ""; }; B68330BC1EBCF6080003E71C /* dispatch_wl.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_wl.c; sourceTree = ""; }; B683588A1FA77F4900AA0D58 /* time_private.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = time_private.h; sourceTree = ""; }; @@ -763,6 +903,7 @@ E422DA3614D2A7E7003C6EE4 /* libdispatch.aliases */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.aliases; sourceTree = ""; }; E43570B8126E93380097AB9F /* provider.d */ = {isa = PBXFileReference; explicitFileType = sourcecode.dtrace; fileEncoding = 4; path = provider.d; sourceTree = ""; }; E43A724F1AF85BBC00BAA921 /* block.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = block.cpp; sourceTree = ""; }; + E43B889A2241F19000215272 /* libdispatch.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libdispatch.dylib; sourceTree = BUILT_PRODUCTS_DIR; }; E43D93F11097917E004F6A62 /* libdispatch.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = libdispatch.xcconfig; sourceTree = ""; }; E44757D917F4572600B82CA1 /* inline_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = inline_internal.h; sourceTree = ""; }; E448727914C6215D00BB45C2 /* libdispatch.order */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.order; sourceTree = ""; }; @@ -799,6 +940,7 @@ E4EB4A2A14C36F4E00AA0FA9 /* install-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "install-headers.sh"; sourceTree = ""; }; E4EC122D12514715000DDBD1 /* libdispatch_mp.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch_mp.a; sourceTree = BUILT_PRODUCTS_DIR; }; E4ECBAA415253C25002C313C /* mach_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mach_private.h; sourceTree = ""; }; + E4FB8E90218CD7F8004B7A25 /* install-plists.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "install-plists.sh"; sourceTree = ""; }; E4FC3263145F46C9002FBDDB /* object.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = object.m; sourceTree = ""; }; F7DC045A2060BBBE00C90737 /* target.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = target.h; sourceTree = ""; }; FC0B34780FA2851C0080FFA0 /* source_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = source_internal.h; sourceTree = ""; }; @@ -888,6 +1030,7 @@ C9C5F80D143C1771006DC718 /* transform.c */, 6E9955CE1C3B218E0071D40C /* venture.c */, E44A8E6A1805C3E0009FFDB6 /* voucher.c */, + 6E9C6CA220F9848000EA81C0 /* yield.c */, 6EA283D01CAB93270041B2E0 /* libdispatch.codes */, 6E29394C1FB9526E00FDAC90 /* libdispatch.plist */, FC7BED950E8361E600161930 /* protocol.defs */, @@ -911,6 +1054,7 @@ C00B0E0A1C5AEBBE000330B3 /* libdispatch_dyld_stub.a */, 6E040C631C499B1B00411A2E /* libfirehose_kernel.a */, 6EB4E4091BA8BCAD00D7B9D2 /* libfirehose_server.a */, + E43B889A2241F19000215272 /* libdispatch.dylib */, ); name = Products; sourceTree = ""; @@ -991,6 +1135,7 @@ 924D8EAA1C116B9F002AC2BC /* dispatch_c99.c */, 6E326AB11C224830002A6505 /* dispatch_cascade.c */, 6E67D8D91C16C94B00FC98AC /* dispatch_cf_main.c */, + 6EC8DBE61E3E832C0044B652 /* dispatch_channel.c */, 6E326ADE1C23451A002A6505 /* dispatch_concur.c */, 6E326AEF1C239303002A6505 /* dispatch_context_for_key.c */, 6E8E4EC71C1A61680004F5CC /* dispatch_data.m */, @@ -1139,6 +1284,7 @@ E49F251D125D630A0057C971 /* install-manpages.sh */, E4EB4A2A14C36F4E00AA0FA9 /* install-headers.sh */, E421E5FB1716B8730090DC9B /* install-dtrace.sh */, + E4FB8E90218CD7F8004B7A25 /* install-plists.sh */, E49F251E125D631D0057C971 /* mig-headers.sh */, E482F1CD12DBAB590030614D /* postprocess-headers.sh */, C01866BF1C5976C90040FC07 /* run-on-install.sh */, @@ -1202,6 +1348,7 @@ 721F5C5C0F15520500FF03A6 /* semaphore.h */, FC7BED8D0E8361E600161930 /* source.h */, 96032E4C0F5CC8D100241C5F /* time.h */, + B6095819221DFA2A00F39D1F /* workloop.h */, E421E5F81716ADA10090DC9B /* introspection.h */, ); name = "Dispatch Public Headers"; @@ -1216,6 +1363,7 @@ E48AF55916E70FD9004105FF /* io_private.h */, 96BC39BC0F3EBAB100C59689 /* queue_private.h */, 6E70181C1F4EB51B0077C1DC /* workloop_private.h */, + 6EC8DC261E3E84610044B652 /* channel_private.h */, FCEF047F0F5661960067401F /* source_private.h */, E4ECBAA415253C25002C313C /* mach_private.h */, B683588A1FA77F4900AA0D58 /* time_private.h */, @@ -1286,6 +1434,7 @@ 72CC94300ECCD8750031B751 /* base.h in Headers */, 961B99500F3E85C30006BC96 /* object.h in Headers */, E44757DA17F4572600B82CA1 /* inline_internal.h in Headers */, + 6EC8DC271E3E84610044B652 /* channel_private.h in Headers */, FC7BED9A0E8361E600161930 /* queue.h in Headers */, FC7BED9C0E8361E600161930 /* source.h in Headers */, 6E9955581C3AF7710071D40C /* venture_private.h in Headers */, @@ -1310,6 +1459,7 @@ F7DC045B2060BBBE00C90737 /* target.h in Headers */, 961B99360F3E83980006BC96 /* benchmark.h in Headers */, FC7BED9E0E8361E600161930 /* internal.h in Headers */, + B609581E221DFA2A00F39D1F /* workloop.h in Headers */, 6E7018211F4EB51B0077C1DC /* workloop_private.h in Headers */, 965ECC210F3EAB71004DDD89 /* object_internal.h in Headers */, 96929D960F3EA2170041FF5D /* queue_internal.h in Headers */, @@ -1347,10 +1497,82 @@ ); runOnlyForDeploymentPostprocessing = 0; }; + E43B88312241F19000215272 /* Headers */ = { + isa = PBXHeadersBuildPhase; + buildActionMask = 2147483647; + files = ( + E43B88322241F19000215272 /* dispatch.h in Headers */, + E43B88332241F19000215272 /* base.h in Headers */, + E43B88342241F19000215272 /* object.h in Headers */, + E43B88352241F19000215272 /* inline_internal.h in Headers */, + E43B88362241F19000215272 /* channel_private.h in Headers */, + E43B88372241F19000215272 /* queue.h in Headers */, + E43B88382241F19000215272 /* source.h in Headers */, + E43B88392241F19000215272 /* venture_private.h in Headers */, + E43B883A2241F19000215272 /* voucher_activity_private.h in Headers */, + E43B883B2241F19000215272 /* semaphore.h in Headers */, + E43B883C2241F19000215272 /* group.h in Headers */, + E43B883D2241F19000215272 /* priority.h in Headers */, + E43B883E2241F19000215272 /* once.h in Headers */, + E43B883F2241F19000215272 /* io.h in Headers */, + E43B88402241F19000215272 /* voucher_internal.h in Headers */, + E43B88412241F19000215272 /* module.modulemap in Headers */, + E43B88422241F19000215272 /* atomic_sfb.h in Headers */, + E43B88432241F19000215272 /* data.h in Headers */, + E43B88442241F19000215272 /* firehose_internal.h in Headers */, + E43B88452241F19000215272 /* time.h in Headers */, + E43B88462241F19000215272 /* private.h in Headers */, + E43B88472241F19000215272 /* block.h in Headers */, + E43B88482241F19000215272 /* data_private.h in Headers */, + E43B88492241F19000215272 /* queue_private.h in Headers */, + E43B884A2241F19000215272 /* module.modulemap in Headers */, + E43B884B2241F19000215272 /* source_private.h in Headers */, + E43B884C2241F19000215272 /* target.h in Headers */, + E43B884D2241F19000215272 /* benchmark.h in Headers */, + E43B884E2241F19000215272 /* internal.h in Headers */, + E4834144225D27F600954FC6 /* workloop.h in Headers */, + E43B884F2241F19000215272 /* workloop_private.h in Headers */, + E43B88502241F19000215272 /* object_internal.h in Headers */, + E43B88512241F19000215272 /* queue_internal.h in Headers */, + E43B88522241F19000215272 /* source_internal.h in Headers */, + E43B88532241F19000215272 /* semaphore_internal.h in Headers */, + E43B88542241F19000215272 /* data_internal.h in Headers */, + E43B88552241F19000215272 /* voucher_private.h in Headers */, + E43B88562241F19000215272 /* io_internal.h in Headers */, + E43B88572241F19000215272 /* tsd.h in Headers */, + E43B88582241F19000215272 /* event_config.h in Headers */, + E43B88592241F19000215272 /* atomic.h in Headers */, + E43B885A2241F19000215272 /* shims.h in Headers */, + E43B885B2241F19000215272 /* time.h in Headers */, + E43B885C2241F19000215272 /* mach_internal.h in Headers */, + E43B885D2241F19000215272 /* firehose_buffer_internal.h in Headers */, + E43B885E2241F19000215272 /* yield.h in Headers */, + E43B885F2241F19000215272 /* layout_private.h in Headers */, + E43B88602241F19000215272 /* perfmon.h in Headers */, + E43B88612241F19000215272 /* config.h in Headers */, + E43B88622241F19000215272 /* venture_internal.h in Headers */, + E43B88632241F19000215272 /* lock.h in Headers */, + E43B88642241F19000215272 /* trace.h in Headers */, + E43B88652241F19000215272 /* getprogname.h in Headers */, + E43B88662241F19000215272 /* event_internal.h in Headers */, + E43B88672241F19000215272 /* firehose_inline_internal.h in Headers */, + E43B88682241F19000215272 /* hw_config.h in Headers */, + E43B88692241F19000215272 /* object_private.h in Headers */, + E43B886A2241F19000215272 /* time_private.h in Headers */, + E43B886B2241F19000215272 /* workqueue_internal.h in Headers */, + E43B886C2241F19000215272 /* object.h in Headers */, + E43B886D2241F19000215272 /* io_private.h in Headers */, + E43B886E2241F19000215272 /* mach_private.h in Headers */, + E43B886F2241F19000215272 /* allocator_internal.h in Headers */, + E43B88702241F19000215272 /* introspection_internal.h in Headers */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; E49F24AA125D57FA0057C971 /* Headers */ = { isa = PBXHeadersBuildPhase; buildActionMask = 2147483647; files = ( + 6EC8DC281E3E847A0044B652 /* channel_private.h in Headers */, E49F24AB125D57FA0057C971 /* dispatch.h in Headers */, E49F24AC125D57FA0057C971 /* base.h in Headers */, 6E5ACCBB1D3C4D0E007DA2B4 /* event_internal.h in Headers */, @@ -1385,6 +1607,7 @@ 6ED64B501BBD8A1400C35F4D /* firehose_internal.h in Headers */, E49F24BF125D57FA0057C971 /* io_internal.h in Headers */, E44A8E731805C473009FFDB6 /* voucher_private.h in Headers */, + B609581F221DFA4B00F39D1F /* workloop.h in Headers */, E49F24C1125D57FA0057C971 /* tsd.h in Headers */, E49F24C2125D57FA0057C971 /* atomic.h in Headers */, E49F24C3125D57FA0057C971 /* shims.h in Headers */, @@ -1456,7 +1679,7 @@ /* Begin PBXLegacyTarget section */ 92F3FECA1BEC69E500025962 /* darwintests */ = { isa = PBXLegacyTarget; - buildArgumentsString = "$(ACTION)"; + buildArgumentsString = "-j -k $(ACTION)"; buildConfigurationList = 92F3FECB1BEC69E500025962 /* Build configuration list for PBXLegacyTarget "darwintests" */; buildPhases = ( ); @@ -1547,7 +1770,7 @@ E482F1C512DBAA110030614D /* Postprocess Headers */, 4CED8B9D0EEDF8B600AF99AB /* Install Manpages */, 6E2464DD1F5E67900031ADD9 /* Validate symbol ordering */, - 6E2939471FB9522D00FDAC90 /* Copy Ariadne Plist */, + E4FB8E8F218CD68A004B7A25 /* Install Plists */, ); buildRules = ( ); @@ -1555,7 +1778,6 @@ 6EF0B27E1BA8C5BF007FA4F6 /* PBXTargetDependency */, E47D6ECD125FEBA10070D91C /* PBXTargetDependency */, E49BB6F81E7074C100868613 /* PBXTargetDependency */, - E4B515DB164B317700E003AF /* PBXTargetDependency */, C01866C21C597AEA0040FC07 /* PBXTargetDependency */, C00B0E141C5AEED6000330B3 /* PBXTargetDependency */, ); @@ -1564,6 +1786,25 @@ productReference = D2AAC046055464E500DB518D /* libdispatch.dylib */; productType = "com.apple.product-type.library.dynamic"; }; + E43B88262241F19000215272 /* libdispatch_driverkit */ = { + isa = PBXNativeTarget; + buildConfigurationList = E43B88972241F19000215272 /* Build configuration list for PBXNativeTarget "libdispatch_driverkit" */; + buildPhases = ( + E43B88312241F19000215272 /* Headers */, + E43B88712241F19000215272 /* Sources */, + E43B88922241F19000215272 /* Install Headers */, + ); + buildRules = ( + ); + dependencies = ( + E43B88292241F19000215272 /* PBXTargetDependency */, + E43B882B2241F19000215272 /* PBXTargetDependency */, + ); + name = libdispatch_driverkit; + productName = libdispatch; + productReference = E43B889A2241F19000215272 /* libdispatch.dylib */; + productType = "com.apple.product-type.library.dynamic"; + }; E49BB6CE1E70748100868613 /* libdispatch armv81 resolved */ = { isa = PBXNativeTarget; buildConfigurationList = E49BB6EF1E70748100868613 /* Build configuration list for PBXNativeTarget "libdispatch armv81 resolved" */; @@ -1642,7 +1883,8 @@ isa = PBXProject; attributes = { BuildIndependentTargetsInParallel = YES; - LastUpgradeCheck = 0900; + DefaultBuildSystemTypeForWorkspace = Latest; + LastUpgradeCheck = 1100; TargetAttributes = { 3F3C9326128E637B0042B1F7 = { ProvisioningStyle = Manual; @@ -1729,8 +1971,11 @@ E4EC121612514715000DDBD1 /* libdispatch mp resolved */, E49BB6CE1E70748100868613 /* libdispatch armv81 resolved */, E4B51595164B2DA300E003AF /* libdispatch introspection */, + E43B88262241F19000215272 /* libdispatch_driverkit */, C01866A41C5973210040FC07 /* libdispatch mp static */, C00B0DF01C5AEBBE000330B3 /* libdispatch dyld stub */, + 6E43553E215B5D9D00C13177 /* libdispatch_introspection */, + 6EA833C22162D6380045EFDC /* libdispatch_introspection_Sim */, 3F3C9326128E637B0042B1F7 /* libdispatch_Sim */, 6E2ECAFD1C49C2FF00A30A32 /* libdispatch_kernel */, C927F35A10FD7F0600C5AB8B /* libdispatch_tools */, @@ -1821,7 +2066,7 @@ ); runOnlyForDeploymentPostprocessing = 1; shellPath = "/bin/bash -e"; - shellScript = ". \"${SRCROOT}/xcodescripts/check-order.sh\""; + shellScript = ". \"${SRCROOT}/xcodescripts/check-order.sh\"\n"; showEnvVarsInLog = 0; }; C00B0E061C5AEBBE000330B3 /* Symlink libdispatch.a -> libdispatch_dyld_target.a */ = { @@ -1889,7 +2134,33 @@ ); runOnlyForDeploymentPostprocessing = 0; shellPath = "/bin/bash -e"; - shellScript = ". \"${SCRIPT_INPUT_FILE_0}\""; + shellScript = ". \"${SCRIPT_INPUT_FILE_0}\"\n"; + showEnvVarsInLog = 0; + }; + E43B88922241F19000215272 /* Install Headers */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + "$(SRCROOT)/xcodescripts/install-headers.sh", + "$(SRCROOT)/os/object.h", + "$(SRCROOT)/os/object_private.h", + "$(SRCROOT)/os/venture_private.h", + "$(SRCROOT)/os/voucher_private.h", + "$(SRCROOT)/os/voucher_activity_private.h", + ); + name = "Install Headers"; + outputPaths = ( + "$(CONFIGURATION_BUILD_DIR)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/object.h", + "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/object_private.h", + "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/venture_private.h", + "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/voucher_private.h", + "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/voucher_activity_private.h", + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = "/bin/bash -e"; + shellScript = ". \"${SCRIPT_INPUT_FILE_0}\"\n"; showEnvVarsInLog = 0; }; E482F1C512DBAA110030614D /* Postprocess Headers */ = { @@ -1905,7 +2176,7 @@ ); runOnlyForDeploymentPostprocessing = 1; shellPath = "/bin/bash -e"; - shellScript = ". \"${SCRIPT_INPUT_FILE_0}\""; + shellScript = ". \"${SCRIPT_INPUT_FILE_0}\"\n"; showEnvVarsInLog = 0; }; E49BB6CF1E70748100868613 /* Mig Headers */ = { @@ -2058,6 +2329,27 @@ shellScript = "ln -fs \"${PRODUCT_NAME}.a\" \"${SCRIPT_OUTPUT_FILE_0}\""; showEnvVarsInLog = 0; }; + E4FB8E8F218CD68A004B7A25 /* Install Plists */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 8; + files = ( + ); + inputFileListPaths = ( + ); + inputPaths = ( + "$(SRCROOT)/xcodescripts/install-plists.sh", + "$(SRCROOT)/src/libdispatch.plist", + ); + name = "Install Plists"; + outputFileListPaths = ( + ); + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 1; + shellPath = "/bin/bash -e"; + shellScript = ". \"${SCRIPT_INPUT_FILE_0}\"\n"; + showEnvVarsInLog = 0; + }; /* End PBXShellScriptBuildPhase section */ /* Begin PBXSourcesBuildPhase section */ @@ -2084,6 +2376,7 @@ isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( + 6E9C6CAD20F9848F00EA81C0 /* yield.c in Sources */, C00B0DF21C5AEBBE000330B3 /* protocol.defs in Sources */, C00B0DF71C5AEBBE000330B3 /* firehose.defs in Sources */, C00B0DFA1C5AEBBE000330B3 /* firehose_reply.defs in Sources */, @@ -2117,6 +2410,7 @@ isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( + 6E9C6CAC20F9848E00EA81C0 /* yield.c in Sources */, C01866A61C5973210040FC07 /* protocol.defs in Sources */, C01866AB1C5973210040FC07 /* firehose.defs in Sources */, C01866AE1C5973210040FC07 /* firehose_reply.defs in Sources */, @@ -2165,6 +2459,7 @@ FC7BED990E8361E600161930 /* queue.c in Sources */, 9676A0E10F3E755D00713ADB /* apply.c in Sources */, 96A8AA870F41E7A400CD570B /* source.c in Sources */, + 6E9C6CA720F9848100EA81C0 /* yield.c in Sources */, 6E4BACBD1D48A41500B562AE /* mach.c in Sources */, 6EA962971D48622600759D53 /* event.c in Sources */, 6EA9629F1D48625000759D53 /* event_kevent.c in Sources */, @@ -2182,6 +2477,43 @@ ); runOnlyForDeploymentPostprocessing = 0; }; + E43B88712241F19000215272 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + E43B88722241F19000215272 /* provider.d in Sources */, + E43B88732241F19000215272 /* protocol.defs in Sources */, + E43B88742241F19000215272 /* firehose.defs in Sources */, + E43B88752241F19000215272 /* firehose_reply.defs in Sources */, + E43B88762241F19000215272 /* resolver.c in Sources */, + E43B88772241F19000215272 /* init.c in Sources */, + E43B88782241F19000215272 /* object.c in Sources */, + E43B88792241F19000215272 /* object.m in Sources */, + E43B887A2241F19000215272 /* block.cpp in Sources */, + E43B887B2241F19000215272 /* lock.c in Sources */, + E43B887C2241F19000215272 /* semaphore.c in Sources */, + E43B887D2241F19000215272 /* once.c in Sources */, + E43B887E2241F19000215272 /* queue.c in Sources */, + E43B887F2241F19000215272 /* apply.c in Sources */, + E43B88802241F19000215272 /* source.c in Sources */, + E43B88812241F19000215272 /* yield.c in Sources */, + E43B88822241F19000215272 /* mach.c in Sources */, + E43B88832241F19000215272 /* event.c in Sources */, + E43B88842241F19000215272 /* event_kevent.c in Sources */, + E43B88852241F19000215272 /* event_epoll.c in Sources */, + E43B88862241F19000215272 /* voucher.c in Sources */, + E43B88872241F19000215272 /* firehose_buffer.c in Sources */, + E43B88882241F19000215272 /* io.c in Sources */, + E43B88892241F19000215272 /* data.c in Sources */, + E43B888A2241F19000215272 /* data.m in Sources */, + E43B888B2241F19000215272 /* transform.c in Sources */, + E43B888C2241F19000215272 /* time.c in Sources */, + E43B888D2241F19000215272 /* allocator.c in Sources */, + E43B888E2241F19000215272 /* benchmark.c in Sources */, + E43B888F2241F19000215272 /* venture.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; E49BB6D01E70748100868613 /* Sources */ = { isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; @@ -2201,6 +2533,7 @@ E49BB6D81E70748100868613 /* mach.c in Sources */, E49BB6DA1E70748100868613 /* queue.c in Sources */, E49BB6DF1E70748100868613 /* apply.c in Sources */, + 6E9C6CAA20F9848D00EA81C0 /* yield.c in Sources */, E49BB6E31E70748100868613 /* source.c in Sources */, E49BB6E81E70748100868613 /* event.c in Sources */, E49BB6D61E70748100868613 /* event_kevent.c in Sources */, @@ -2237,6 +2570,7 @@ E49F24CB125D57FA0057C971 /* queue.c in Sources */, E49F24CE125D57FA0057C971 /* apply.c in Sources */, E49F24D1125D57FA0057C971 /* source.c in Sources */, + 6E9C6CA820F9848C00EA81C0 /* yield.c in Sources */, 6E4BACC21D48A42000B562AE /* mach.c in Sources */, 6EA962981D48622700759D53 /* event.c in Sources */, 6EA962A01D48625100759D53 /* event_kevent.c in Sources */, @@ -2266,6 +2600,7 @@ E4B515C0164B2DA300E003AF /* init.c in Sources */, E4B515C5164B2DA300E003AF /* object.c in Sources */, E4B515CC164B2DA300E003AF /* object.m in Sources */, + 6E9C6CAB20F9848E00EA81C0 /* yield.c in Sources */, E43A72871AF85BCD00BAA921 /* block.cpp in Sources */, 6EF2CAB01C8899EB001ABE83 /* lock.c in Sources */, E4B515C2164B2DA300E003AF /* semaphore.c in Sources */, @@ -2310,6 +2645,7 @@ E4EC121A12514715000DDBD1 /* queue.c in Sources */, E4EC121D12514715000DDBD1 /* apply.c in Sources */, E4EC122012514715000DDBD1 /* source.c in Sources */, + 6E9C6CA920F9848D00EA81C0 /* yield.c in Sources */, 6E4BACC31D48A42100B562AE /* mach.c in Sources */, 6EA962991D48622800759D53 /* event.c in Sources */, 6EA962A11D48625100759D53 /* event_kevent.c in Sources */, @@ -2335,6 +2671,16 @@ target = 6E040C621C499B1B00411A2E /* libfirehose_kernel */; targetProxy = 6E2ECB011C49C31200A30A32 /* PBXContainerItemProxy */; }; + 6EE5083B21701B9100833569 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = E4B51595164B2DA300E003AF /* libdispatch introspection */; + targetProxy = 6EE5083A21701B9100833569 /* PBXContainerItemProxy */; + }; + 6EE5083D21701B9600833569 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = E4B51595164B2DA300E003AF /* libdispatch introspection */; + targetProxy = 6EE5083C21701B9600833569 /* PBXContainerItemProxy */; + }; 6EF0B27E1BA8C5BF007FA4F6 /* PBXTargetDependency */ = { isa = PBXTargetDependency; target = 6EB4E4081BA8BCAD00D7B9D2 /* libfirehose_server */; @@ -2380,6 +2726,16 @@ target = D2AAC045055464E500DB518D /* libdispatch */; targetProxy = E4128E4913B94BCE00ABB2CB /* PBXContainerItemProxy */; }; + E43B88292241F19000215272 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = E4EC121612514715000DDBD1 /* libdispatch mp resolved */; + targetProxy = E43B882A2241F19000215272 /* PBXContainerItemProxy */; + }; + E43B882B2241F19000215272 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = E49BB6CE1E70748100868613 /* libdispatch armv81 resolved */; + targetProxy = E43B882C2241F19000215272 /* PBXContainerItemProxy */; + }; E47D6ECD125FEBA10070D91C /* PBXTargetDependency */ = { isa = PBXTargetDependency; target = E4EC121612514715000DDBD1 /* libdispatch mp resolved */; @@ -2390,11 +2746,6 @@ target = E49BB6CE1E70748100868613 /* libdispatch armv81 resolved */; targetProxy = E49BB6F71E7074C100868613 /* PBXContainerItemProxy */; }; - E4B515DB164B317700E003AF /* PBXTargetDependency */ = { - isa = PBXTargetDependency; - target = E4B51595164B2DA300E003AF /* libdispatch introspection */; - targetProxy = E4B515DA164B317700E003AF /* PBXContainerItemProxy */; - }; /* End PBXTargetDependency section */ /* Begin XCBuildConfiguration section */ @@ -2466,6 +2817,34 @@ }; name = Debug; }; + 6E435542215B5D9D00C13177 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 6E435543215B5D9D00C13177 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 6EA833C42162D6380045EFDC /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 6EA833C52162D6380045EFDC /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; 6EB4E40B1BA8BCAD00D7B9D2 /* Release */ = { isa = XCBuildConfiguration; baseConfigurationReference = 6EB4E4421BA8BD7800D7B9D2 /* libfirehose.xcconfig */; @@ -2562,6 +2941,30 @@ }; name = Debug; }; + E43B88982241F19000215272 /* Release */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = E40041AA125D705F0022B135 /* libdispatch-resolver.xcconfig */; + buildSettings = { + ARCHS = "$(ARCHS_STANDARD)"; + DRIVERKIT = 1; + DRIVERKITROOT = /System/DriverKit; + SDKROOT = driverkit.internal; + SUPPORTED_PLATFORMS = macosx; + }; + name = Release; + }; + E43B88992241F19000215272 /* Debug */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = E40041AA125D705F0022B135 /* libdispatch-resolver.xcconfig */; + buildSettings = { + ARCHS = "$(ARCHS_STANDARD)"; + DRIVERKIT = 1; + DRIVERKITROOT = /System/DriverKit; + SDKROOT = driverkit.internal; + SUPPORTED_PLATFORMS = macosx; + }; + name = Debug; + }; E49BB6F01E70748100868613 /* Release */ = { isa = XCBuildConfiguration; baseConfigurationReference = E40041A9125D70590022B135 /* libdispatch-resolved.xcconfig */; @@ -2700,6 +3103,24 @@ defaultConfigurationIsVisible = 0; defaultConfigurationName = Release; }; + 6E435541215B5D9D00C13177 /* Build configuration list for PBXAggregateTarget "libdispatch_introspection" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 6E435542215B5D9D00C13177 /* Release */, + 6E435543215B5D9D00C13177 /* Debug */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 6EA833C32162D6380045EFDC /* Build configuration list for PBXAggregateTarget "libdispatch_introspection_Sim" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 6EA833C42162D6380045EFDC /* Release */, + 6EA833C52162D6380045EFDC /* Debug */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; 6EB4E40A1BA8BCAD00D7B9D2 /* Build configuration list for PBXNativeTarget "libfirehose_server" */ = { isa = XCConfigurationList; buildConfigurations = ( @@ -2763,6 +3184,15 @@ defaultConfigurationIsVisible = 0; defaultConfigurationName = Release; }; + E43B88972241F19000215272 /* Build configuration list for PBXNativeTarget "libdispatch_driverkit" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + E43B88982241F19000215272 /* Release */, + E43B88992241F19000215272 /* Debug */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; E49BB6EF1E70748100868613 /* Build configuration list for PBXNativeTarget "libdispatch armv81 resolved" */ = { isa = XCConfigurationList; buildConfigurations = ( diff --git a/m4/atomic.m4 b/m4/atomic.m4 deleted file mode 100644 index ba85004db..000000000 --- a/m4/atomic.m4 +++ /dev/null @@ -1,21 +0,0 @@ -AC_DEFUN([DISPATCH_C_ATOMIC_BUILTINS], [ -# -# This is a bit subtle: on i386 systems without at least -march=i486 defined, -# certain built-in atomics fall back to depending on undefined symbols if -# their return values are used. -# -AC_CACHE_CHECK([for gcc atomic builtins],[dispatch_cv_atomic], -[AC_LINK_IFELSE([AC_LANG_PROGRAM([],[[ -int i, x =0; -i = __sync_add_and_fetch(&x,1); -return x;]])],[dispatch_cv_atomic=yes], - [saveCFLAGS="$CFLAGS" - CFLAGS="$CFLAGS -march=i486" - AC_LINK_IFELSE([AC_LANG_PROGRAM([],[[ - int i, x =0; - i = __sync_add_and_fetch(&x,1); - return x;]])],[CFLAGS="$saveCFLAGS" -dispatch_cv_atomic="-march=i486" -])])]) - -]) diff --git a/m4/blocks.m4 b/m4/blocks.m4 deleted file mode 100644 index 38a8610fc..000000000 --- a/m4/blocks.m4 +++ /dev/null @@ -1,132 +0,0 @@ -AC_DEFUN([DISPATCH_C_BLOCKS], [ -# -# Allow configure to be passed a path to the directory where it should look -# for the Blocks runtime library, if any. -# -AC_ARG_WITH([blocks-runtime], - [AS_HELP_STRING([--with-blocks-runtime], - [Specify path to the blocks runtime])], - [blocks_runtime=${withval} - LIBS="$LIBS -L$blocks_runtime"] -) - -# -# Configure argument to enable/disable using an embedded blocks runtime -# -AC_ARG_ENABLE([embedded_blocks_runtime], - [AS_HELP_STRING([--enable-embedded-blocks-runtime], - [Embed blocks runtime in libdispatch [default=yes on Linux, default=no on all other platforms]])],, - [case $target_os in - linux*) - enable_embedded_blocks_runtime=yes - ;; - *) - enable_embedded_blocks_runtime=no - esac] -) - -# -# Detect compiler support for Blocks; perhaps someday -fblocks won't be -# required, in which case we'll need to change this. -# -AC_CACHE_CHECK([for C Blocks support], [dispatch_cv_cblocks], [ - saveCFLAGS="$CFLAGS" - CFLAGS="$CFLAGS -fblocks" - AC_COMPILE_IFELSE([AC_LANG_PROGRAM([],[(void)^{int i; i = 0; }();])], [ - CFLAGS="$saveCFLAGS" - dispatch_cv_cblocks="-fblocks" - ], [ - CFLAGS="$saveCFLAGS" - dispatch_cv_cblocks="no" - ]) -]) - -AS_IF([test "x$dispatch_cv_cblocks" != "xno"], [ - CBLOCKS_FLAGS="$dispatch_cv_cblocks" - - AS_IF([test "x$enable_embedded_blocks_runtime" != "xyes"], [ - # - # It may be necessary to directly link the Blocks runtime on some - # systems, so give it a try if we can't link a C program that uses - # Blocks. We will want to remove this at somepoint, as really -fblocks - # should force that linkage already. - # - saveCFLAGS="$CFLAGS" - CFLAGS="$CFLAGS -fblocks -O0" - AC_MSG_CHECKING([whether additional libraries are required for the Blocks runtime]) - AC_TRY_LINK([], [ - ^{ int j; j=0; }(); - ], [ - AC_MSG_RESULT([no]); - ], [ - saveLIBS="$LIBS" - LIBS="$LIBS -lBlocksRuntime" - AC_TRY_LINK([], [ - ^{ int k; k=0; }(); - ], [ - AC_MSG_RESULT([-lBlocksRuntime]) - ], [ - AC_MSG_ERROR([can't find Blocks runtime]) - ]) - ]) - ]) - CFLAGS="$saveCFLAGS" - have_cblocks=true -], [ - CBLOCKS_FLAGS="" - have_cblocks=false -]) -AM_CONDITIONAL(HAVE_CBLOCKS, $have_cblocks) -AC_SUBST([CBLOCKS_FLAGS]) -AM_CONDITIONAL([BUILD_OWN_BLOCKS_RUNTIME], [test "x$enable_embedded_blocks_runtime" = "xyes"]) - -# -# Because a different C++ compiler may be specified than C compiler, we have -# to do it again for C++. -# -AC_LANG_PUSH([C++]) -AC_CACHE_CHECK([for C++ Blocks support], [dispatch_cv_cxxblocks], [ - saveCXXFLAGS="$CXXFLAGS" - CXXFLAGS="$CXXFLAGS -fblocks" - AC_COMPILE_IFELSE([AC_LANG_PROGRAM([],[(void)^{int i; i = 0; }();])], [ - CXXFLAGS="$saveCXXFLAGS" - dispatch_cv_cxxblocks="-fblocks" - ], [ - CXXFLAGS="$saveCXXFLAGS" - dispatch_cv_cxxblocks="no" - ]) -]) - -AS_IF([test "x$dispatch_cv_cxxblocks" != "xno"], [ - CXXBLOCKS_FLAGS="$dispatch_cv_cxxblocks" - - AS_IF([test "x$enable_embedded_blocks_runtime" != "xyes"], [ - saveCXXFLAGS="$CXXFLAGS" - CXXFLAGS="$CXXFLAGS -fblocks -O0" - AC_MSG_CHECKING([whether additional libraries are required for the Blocks runtime]) - AC_TRY_LINK([], [ - ^{ int j; j=0; }(); - ], [ - AC_MSG_RESULT([no]); - ], [ - saveLIBS="$LIBS" - LIBS="$LIBS -lBlocksRuntime" - AC_TRY_LINK([], [ - ^{ int k; k=0; }(); - ], [ - AC_MSG_RESULT([-lBlocksRuntime]) - ], [ - AC_MSG_ERROR([can't find Blocks runtime]) - ]) - ]) - ]) - CXXFLAGS="$saveCXXFLAGS" - have_cxxblocks=true -], [ - CXXBLOCKS_FLAGS="" - have_cxxblocks=false -]) -AC_LANG_POP([C++]) -AM_CONDITIONAL(HAVE_CXXBLOCKS, $have_cxxblocks) -AC_SUBST([CXXBLOCKS_FLAGS]) -]) diff --git a/m4/pkg.m4 b/m4/pkg.m4 deleted file mode 100644 index a0b9cd45d..000000000 --- a/m4/pkg.m4 +++ /dev/null @@ -1,155 +0,0 @@ -# pkg.m4 - Macros to locate and utilise pkg-config. -*- Autoconf -*- -# -# Copyright © 2004 Scott James Remnant . -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# As a special exception to the GNU General Public License, if you -# distribute this file as part of a program that contains a -# configuration script generated by Autoconf, you may include it under -# the same distribution terms that you use for the rest of that program. - -# PKG_PROG_PKG_CONFIG([MIN-VERSION]) -# ---------------------------------- -AC_DEFUN([PKG_PROG_PKG_CONFIG], -[m4_pattern_forbid([^_?PKG_[A-Z_]+$]) -m4_pattern_allow([^PKG_CONFIG(_PATH)?$]) -AC_ARG_VAR([PKG_CONFIG], [path to pkg-config utility])dnl -if test "x$ac_cv_env_PKG_CONFIG_set" != "xset"; then - AC_PATH_TOOL([PKG_CONFIG], [pkg-config]) -fi -if test -n "$PKG_CONFIG"; then - _pkg_min_version=m4_default([$1], [0.9.0]) - AC_MSG_CHECKING([pkg-config is at least version $_pkg_min_version]) - if $PKG_CONFIG --atleast-pkgconfig-version $_pkg_min_version; then - AC_MSG_RESULT([yes]) - else - AC_MSG_RESULT([no]) - PKG_CONFIG="" - fi - -fi[]dnl -])# PKG_PROG_PKG_CONFIG - -# PKG_CHECK_EXISTS(MODULES, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND]) -# -# Check to see whether a particular set of modules exists. Similar -# to PKG_CHECK_MODULES(), but does not set variables or print errors. -# -# -# Similar to PKG_CHECK_MODULES, make sure that the first instance of -# this or PKG_CHECK_MODULES is called, or make sure to call -# PKG_CHECK_EXISTS manually -# -------------------------------------------------------------- -AC_DEFUN([PKG_CHECK_EXISTS], -[AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl -if test -n "$PKG_CONFIG" && \ - AC_RUN_LOG([$PKG_CONFIG --exists --print-errors "$1"]); then - m4_ifval([$2], [$2], [:]) -m4_ifvaln([$3], [else - $3])dnl -fi]) - - -# _PKG_CONFIG([VARIABLE], [COMMAND], [MODULES]) -# --------------------------------------------- -m4_define([_PKG_CONFIG], -[if test -n "$$1"; then - pkg_cv_[]$1="$$1" - elif test -n "$PKG_CONFIG"; then - PKG_CHECK_EXISTS([$3], - [pkg_cv_[]$1=`$PKG_CONFIG --[]$2 "$3" 2>/dev/null`], - [pkg_failed=yes]) - else - pkg_failed=untried -fi[]dnl -])# _PKG_CONFIG - -# _PKG_SHORT_ERRORS_SUPPORTED -# ----------------------------- -AC_DEFUN([_PKG_SHORT_ERRORS_SUPPORTED], -[AC_REQUIRE([PKG_PROG_PKG_CONFIG]) -if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then - _pkg_short_errors_supported=yes -else - _pkg_short_errors_supported=no -fi[]dnl -])# _PKG_SHORT_ERRORS_SUPPORTED - - -# PKG_CHECK_MODULES(VARIABLE-PREFIX, MODULES, [ACTION-IF-FOUND], -# [ACTION-IF-NOT-FOUND]) -# -# -# Note that if there is a possibility the first call to -# PKG_CHECK_MODULES might not happen, you should be sure to include an -# explicit call to PKG_PROG_PKG_CONFIG in your configure.ac -# -# -# -------------------------------------------------------------- -AC_DEFUN([PKG_CHECK_MODULES], -[AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl -AC_ARG_VAR([$1][_CFLAGS], [C compiler flags for $1, overriding pkg-config])dnl -AC_ARG_VAR([$1][_LIBS], [linker flags for $1, overriding pkg-config])dnl - -pkg_failed=no -AC_MSG_CHECKING([for $1]) - -_PKG_CONFIG([$1][_CFLAGS], [cflags], [$2]) -_PKG_CONFIG([$1][_LIBS], [libs], [$2]) - -m4_define([_PKG_TEXT], [Alternatively, you may set the environment variables $1[]_CFLAGS -and $1[]_LIBS to avoid the need to call pkg-config. -See the pkg-config man page for more details.]) - -if test $pkg_failed = yes; then - _PKG_SHORT_ERRORS_SUPPORTED - if test $_pkg_short_errors_supported = yes; then - $1[]_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "$2" 2>&1` - else - $1[]_PKG_ERRORS=`$PKG_CONFIG --print-errors "$2" 2>&1` - fi - # Put the nasty error message in config.log where it belongs - echo "$$1[]_PKG_ERRORS" >&AS_MESSAGE_LOG_FD - - ifelse([$4], , [AC_MSG_ERROR(dnl -[Package requirements ($2) were not met: - -$$1_PKG_ERRORS - -Consider adjusting the PKG_CONFIG_PATH environment variable if you -installed software in a non-standard prefix. - -_PKG_TEXT -])], - [AC_MSG_RESULT([no]) - $4]) -elif test $pkg_failed = untried; then - ifelse([$4], , [AC_MSG_FAILURE(dnl -[The pkg-config script could not be found or is too old. Make sure it -is in your PATH or set the PKG_CONFIG environment variable to the full -path to pkg-config. - -_PKG_TEXT - -To get pkg-config, see .])], - [$4]) -else - $1[]_CFLAGS=$pkg_cv_[]$1[]_CFLAGS - $1[]_LIBS=$pkg_cv_[]$1[]_LIBS - AC_MSG_RESULT([yes]) - ifelse([$3], , :, [$3]) -fi[]dnl -])# PKG_CHECK_MODULES diff --git a/man/Makefile.am b/man/Makefile.am deleted file mode 100644 index 3ca6946ce..000000000 --- a/man/Makefile.am +++ /dev/null @@ -1,152 +0,0 @@ -# -# -# - -if !HAVE_SWIFT -dist_man3_MANS= \ - dispatch.3 \ - dispatch_after.3 \ - dispatch_api.3 \ - dispatch_apply.3 \ - dispatch_async.3 \ - dispatch_data_create.3 \ - dispatch_group_create.3 \ - dispatch_io_create.3 \ - dispatch_io_read.3 \ - dispatch_object.3 \ - dispatch_once.3 \ - dispatch_queue_create.3 \ - dispatch_read.3 \ - dispatch_semaphore_create.3 \ - dispatch_source_create.3 \ - dispatch_time.3 - -EXTRA_DIST= \ - dispatch_benchmark.3 - -# -# Install man page hardlinks. Is there a better way to do this in automake? -# - -LN=ln - -install-data-hook: - cd $(DESTDIR)$(mandir)/man3 && \ - $(LN) -f dispatch_after.3 dispatch_after_f.3 && \ - $(LN) -f dispatch_apply.3 dispatch_apply_f.3 && \ - $(LN) -f dispatch_async.3 dispatch_sync.3 && \ - $(LN) -f dispatch_async.3 dispatch_async_f.3 && \ - $(LN) -f dispatch_async.3 dispatch_sync_f.3 && \ - $(LN) -f dispatch_group_create.3 dispatch_group_enter.3 && \ - $(LN) -f dispatch_group_create.3 dispatch_group_leave.3 && \ - $(LN) -f dispatch_group_create.3 dispatch_group_wait.3 && \ - $(LN) -f dispatch_group_create.3 dispatch_group_notify.3 && \ - $(LN) -f dispatch_group_create.3 dispatch_group_notify_f.3 && \ - $(LN) -f dispatch_group_create.3 dispatch_group_async.3 && \ - $(LN) -f dispatch_group_create.3 dispatch_group_async_f.3 && \ - $(LN) -f dispatch_object.3 dispatch_retain.3 && \ - $(LN) -f dispatch_object.3 dispatch_release.3 && \ - $(LN) -f dispatch_object.3 dispatch_suspend.3 && \ - $(LN) -f dispatch_object.3 dispatch_resume.3 && \ - $(LN) -f dispatch_object.3 dispatch_get_context.3 && \ - $(LN) -f dispatch_object.3 dispatch_set_context.3 && \ - $(LN) -f dispatch_object.3 dispatch_set_finalizer_f.3 && \ - $(LN) -f dispatch_once.3 dispatch_once_f.3 && \ - $(LN) -f dispatch_queue_create.3 dispatch_queue_get_label.3 && \ - $(LN) -f dispatch_queue_create.3 dispatch_get_current_queue.3 && \ - $(LN) -f dispatch_queue_create.3 dispatch_get_global_queue.3 && \ - $(LN) -f dispatch_queue_create.3 dispatch_get_main_queue.3 && \ - $(LN) -f dispatch_queue_create.3 dispatch_main.3 && \ - $(LN) -f dispatch_queue_create.3 dispatch_set_target_queue.3 && \ - $(LN) -f dispatch_semaphore_create.3 dispatch_semaphore_signal.3 && \ - $(LN) -f dispatch_semaphore_create.3 dispatch_semaphore_wait.3 && \ - $(LN) -f dispatch_source_create.3 dispatch_source_set_event_handler.3 && \ - $(LN) -f dispatch_source_create.3 dispatch_source_set_event_handler_f.3 && \ - $(LN) -f dispatch_source_create.3 dispatch_source_set_registration_handler.3 && \ - $(LN) -f dispatch_source_create.3 dispatch_source_set_registration_handler_f.3 && \ - $(LN) -f dispatch_source_create.3 dispatch_source_set_cancel_handler.3 && \ - $(LN) -f dispatch_source_create.3 dispatch_source_set_cancel_handler_f.3 && \ - $(LN) -f dispatch_source_create.3 dispatch_source_cancel.3 && \ - $(LN) -f dispatch_source_create.3 dispatch_source_testcancel.3 && \ - $(LN) -f dispatch_source_create.3 dispatch_source_get_handle.3 && \ - $(LN) -f dispatch_source_create.3 dispatch_source_get_mask.3 && \ - $(LN) -f dispatch_source_create.3 dispatch_source_get_data.3 && \ - $(LN) -f dispatch_source_create.3 dispatch_source_merge_data.3 && \ - $(LN) -f dispatch_source_create.3 dispatch_source_set_timer.3 && \ - $(LN) -f dispatch_time.3 dispatch_walltime.3 && \ - $(LN) -f dispatch_data_create.3 dispatch_data_create_concat.3 && \ - $(LN) -f dispatch_data_create.3 dispatch_data_create_subrange.3 && \ - $(LN) -f dispatch_data_create.3 dispatch_data_create_map.3 && \ - $(LN) -f dispatch_data_create.3 dispatch_data_apply.3 && \ - $(LN) -f dispatch_data_create.3 dispatch_data_copy_region.3 && \ - $(LN) -f dispatch_data_create.3 dispatch_data_get_size.3 && \ - $(LN) -f dispatch_data_create.3 dispatch_data_empty.3 && \ - $(LN) -f dispatch_io_create.3 dispatch_io_create_with_path.3 && \ - $(LN) -f dispatch_io_create.3 dispatch_io_set_high_water.3 && \ - $(LN) -f dispatch_io_create.3 dispatch_io_set_low_water.3 && \ - $(LN) -f dispatch_io_create.3 dispatch_io_set_interval.3 && \ - $(LN) -f dispatch_io_create.3 dispatch_io_close.3 && \ - $(LN) -f dispatch_io_create.3 dispatch_io_barrier.3 && \ - $(LN) -f dispatch_io_read.3 dispatch_io_write.3 && \ - $(LN) -f dispatch_read.3 dispatch_write.3 - -uninstall-hook: - cd $(DESTDIR)$(mandir)/man3 && \ - rm -f dispatch_after_f.3 \ - dispatch_apply_f.3 \ - dispatch_sync.3 \ - dispatch_async_f.3 \ - dispatch_sync_f.3 \ - dispatch_group_enter.3 \ - dispatch_group_leave.3 \ - dispatch_group_wait.3 \ - dispatch_group_notify.3 \ - dispatch_group_notify_f.3 \ - dispatch_group_async.3 \ - dispatch_group_async_f.3 \ - dispatch_retain.3 \ - dispatch_release.3 \ - dispatch_suspend.3 \ - dispatch_resume.3 \ - dispatch_get_context.3 \ - dispatch_set_context.3 \ - dispatch_set_finalizer_f.3 \ - dispatch_once_f.3 \ - dispatch_queue_get_label.3 \ - dispatch_get_current_queue.3 \ - dispatch_get_global_queue.3 \ - dispatch_get_main_queue.3 \ - dispatch_main.3 \ - dispatch_set_target_queue.3 \ - dispatch_semaphore_signal.3 \ - dispatch_semaphore_wait.3 \ - dispatch_source_set_event_handler.3 \ - dispatch_source_set_event_handler_f.3 \ - dispatch_source_set_registration_handler.3 \ - dispatch_source_set_registration_handler_f.3 \ - dispatch_source_set_cancel_handler.3 \ - dispatch_source_set_cancel_handler_f.3 \ - dispatch_source_cancel.3 \ - dispatch_source_testcancel.3 \ - dispatch_source_get_handle.3 \ - dispatch_source_get_mask.3 \ - dispatch_source_get_data.3 \ - dispatch_source_merge_data.3 \ - dispatch_source_set_timer.3 \ - dispatch_walltime.3 \ - dispatch_data_create_concat.3 \ - dispatch_data_create_subrange.3 \ - dispatch_data_create_map.3 \ - dispatch_data_apply.3 \ - dispatch_data_copy_region.3 \ - dispatch_data_get_size.3 \ - dispatch_data_empty.3 \ - dispatch_io_create_with_path.3 \ - dispatch_io_set_high_water.3 \ - dispatch_io_set_low_water.3 \ - dispatch_io_set_interval.3 \ - dispatch_io_close.3 \ - dispatch_io_barrier.3 \ - dispatch_io_write.3 \ - dispatch_write.3 -endif diff --git a/os/CMakeLists.txt b/os/CMakeLists.txt index 6e2b41518..2c4d32e66 100644 --- a/os/CMakeLists.txt +++ b/os/CMakeLists.txt @@ -4,7 +4,7 @@ install(FILES object.h - linux_base.h + generic_unix_base.h DESTINATION - "${CMAKE_INSTALL_FULL_INCLUDEDIR}/os") + "${INSTALL_OS_HEADERS_DIR}") diff --git a/os/Makefile.am b/os/Makefile.am deleted file mode 100644 index d009a3753..000000000 --- a/os/Makefile.am +++ /dev/null @@ -1,18 +0,0 @@ -# -# -# - -if HAVE_SWIFT -osdir=${prefix}/lib/swift/os -else -osdir=$(includedir)/os -endif - -os_HEADERS= \ - object.h \ - linux_base.h - -noinst_HEADERS= \ - object_private.h \ - voucher_activity_private.h \ - voucher_private.h diff --git a/os/firehose_server_private.h b/os/firehose_server_private.h index 221ecb38e..bab44824b 100644 --- a/os/firehose_server_private.h +++ b/os/firehose_server_private.h @@ -58,7 +58,8 @@ OS_OBJECT_DECL_CLASS(firehose_client); * This is the first event delivered, and no event is delivered until * the handler of that event returns * - * The `page` argument really is really a firehose_client_connected_info_t. + * The `page` argument is really a firehose_client_connected_info_t. The + * `fc_pos` argument is not meaningful. * * @const FIREHOSE_EVENT_CLIENT_DIED * The specified client is gone and will not flush new buffers @@ -68,21 +69,23 @@ OS_OBJECT_DECL_CLASS(firehose_client); * FIREHOSE_EVENT_CLIENT_CORRUPTED event has been generated. * * @const FIREHOSE_EVENT_IO_BUFFER_RECEIVED - * A new buffer needs to be pushed, `page` is set to that buffer. + * A new buffer needs to be pushed; `page` is set to that buffer, and `fc_pos` + * to its chunk position header. * * This event can be sent concurrently wrt FIREHOSE_EVENT_MEM_BUFFER_RECEIVED * events. * * @const FIREHOSE_EVENT_MEM_BUFFER_RECEIVED - * A new buffer needs to be pushed, `page` is set to that buffer. + * A new buffer needs to be pushed; `page` is set to that buffer, and `fc_pos` + * to its chunk position header. * * This event can be sent concurrently wrt FIREHOSE_EVENT_IO_BUFFER_RECEIVED * events. * * @const FIREHOSE_EVENT_CLIENT_CORRUPTED * This event is received when a client is found being corrupted. - * `page` is set to the buffer header page. When this event is received, - * logs have likely been lost for this client. + * `page` is set to the buffer header page, and `fc_pos` is not meaningful. When + * this event is received, logs have likely been lost for this client. * * This buffer isn't really a proper firehose buffer page, but its content may * be useful for debugging purposes. @@ -90,7 +93,8 @@ OS_OBJECT_DECL_CLASS(firehose_client); * @const FIREHOSE_EVENT_CLIENT_FINALIZE * This event is received when a firehose client structure is about to be * destroyed. Only firehose_client_get_context() can ever be called with - * the passed firehose client. The `page` argument is NULL for this event. + * the passed firehose client. The `page` argument is NULL for this event, and + * the `fc_pos` argument is not meaningful. * * The event is sent from the context that is dropping the last refcount * of the client. @@ -200,6 +204,19 @@ OS_NOTHROW OS_NONNULL1 void * firehose_client_get_context(firehose_client_t client); +/*! + * @function firehose_client_set_strings_cached + * + * @abstract + * Marks a given client as having strings cached already. + * + * @param client + * The specified client. + */ +OS_NOTHROW OS_NONNULL1 +void +firehose_client_set_strings_cached(firehose_client_t client); + /*! * @function firehose_client_set_context * @@ -289,7 +306,8 @@ firehose_client_metadata_stream_peek(firehose_client_t client, * Type of the handler block for firehose_server_init() */ typedef void (^firehose_handler_t)(firehose_client_t client, - firehose_event_t event, firehose_chunk_t page); + firehose_event_t event, firehose_chunk_t page, + firehose_chunk_pos_u fc_pos); /*! * @function firehose_server_init @@ -381,6 +399,7 @@ OS_ENUM(firehose_server_queue, unsigned long, FIREHOSE_SERVER_QUEUE_UNKNOWN, FIREHOSE_SERVER_QUEUE_IO, FIREHOSE_SERVER_QUEUE_MEMORY, + FIREHOSE_SERVER_QUEUE_IO_WL, ); /*! @@ -443,7 +462,8 @@ OS_ENUM(firehose_snapshot_event, unsigned long, * Type of the handler block for firehose_snapshot */ typedef void (^firehose_snapshot_handler_t)(firehose_client_t client, - firehose_snapshot_event_t event, firehose_chunk_t page); + firehose_snapshot_event_t event, firehose_chunk_t page, + firehose_chunk_pos_u fc_pos); /*! * @function firehose_snapshot diff --git a/os/linux_base.h b/os/generic_unix_base.h similarity index 89% rename from os/linux_base.h rename to os/generic_unix_base.h index 58b497148..aaf6f8504 100644 --- a/os/linux_base.h +++ b/os/generic_unix_base.h @@ -5,17 +5,22 @@ * * Licensed under Apache License v2.0 with Runtime Library Exception * - * See http://swift.org/LICENSE.txt for license information - * See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors + * See https://swift.org/LICENSE.txt for license information + * See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors * */ -#ifndef __OS_LINUX_BASE__ -#define __OS_LINUX_BASE__ +#ifndef __OS_GENERIC_UNIX_BASE__ +#define __OS_GENERIC_UNIX_BASE__ #if __has_include() #include #endif + +#if defined(__FreeBSD__) +#include +#include +#endif #include #if __has_include() @@ -120,4 +125,4 @@ enum { __VA_ARGS__ }; typedef _type _name##_t #endif #define OS_NOTHROW -#endif /* __OS_LINUX_BASE__ */ +#endif /* __OS_GENERIC_UNIX_BASE__ */ diff --git a/os/generic_win_base.h b/os/generic_win_base.h new file mode 100644 index 000000000..afc5f4265 --- /dev/null +++ b/os/generic_win_base.h @@ -0,0 +1,132 @@ +/* + * This source file is part of the Swift.org open source project + * + * Copyright (c) 2015 Apple Inc. and the Swift project authors + * + * Licensed under Apache License v2.0 with Runtime Library Exception + * + * See https://swift.org/LICENSE.txt for license information + * See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors + * + */ + +#ifndef __OS_GENERIC_WIN_BASE__ +#define __OS_GENERIC_WIN_BASE__ + +// Unices provide `roundup` via sys/param.h +#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) +// Unices provide `MAX` via sys/param.h +#define MAX(a,b) (((a)>(b))?(a):(b)) +// Unices provide `MIN` via sys/param.h +#define MIN(a,b) (((a)<(b))?(a):(b)) +// Unices provide `howmany` via sys/param.h +#define howmany(x, y) (((x) + ((y) - 1)) / (y)) + +typedef int mode_t; +typedef void pthread_attr_t; + +#if defined(__cplusplus) +#define __BEGIN_DECLS extern "C" { +#define __END_DECLS } +#else +#define __BEGIN_DECLS +#define __END_DECLS +#endif + +#ifndef API_AVAILABLE +#define API_AVAILABLE(...) +#endif +#ifndef API_DEPRECATED +#define API_DEPRECATED(...) +#endif +#ifndef API_UNAVAILABLE +#define API_UNAVAILABLE(...) +#endif +#ifndef API_DEPRECATED_WITH_REPLACEMENT +#define API_DEPRECATED_WITH_REPLACEMENT(...) +#endif + +#if !defined(__has_attribute) +#define __has_attribute(attibute) 0 +#endif + +#if !defined(__has_builtin) +#define __has_builtin(builtin) 0 +#endif + +#if !defined(__has_feature) +#define __has_feature(feature) 0 +#endif + +#if __has_builtin(__builtin_expect) +#define OS_EXPECT(expression, value) __builtin_expect((expression), (value)) +#else +#define OS_EXPECT(expression, value) (expression) +#endif + +#if __has_attribute(__unused__) +#define OS_UNUSED __attribute__((__unused__)) +#else +#define OS_UNUSED +#endif + +#ifndef os_likely +#define os_likely(expression) OS_EXPECT(!!(expression), 1) +#endif +#ifndef os_unlikely +#define os_unlikely(expression) OS_EXPECT(!!(expression), 0) +#endif + +#if __has_feature(assume_nonnull) +#define OS_ASSUME_NONNULL_BEGIN _Pragma("clang assume_nonnull begin") +#define OS_ASSUME_NONNULL_END _Pragma("clang assume_nonnull end") +#else +#define OS_ASSUME_NONNULL_BEGIN +#define OS_ASSUME_NONNULL_END +#endif + +#if __has_builtin(__builtin_assume) +#define OS_COMPILER_CAN_ASSUME(expr) __builtin_assume(expr) +#else +#define OS_COMPILER_CAN_ASSUME(expr) ((void)(expr)) +#endif + +#if __has_feature(attribute_availability_swift) +// equivalent to __SWIFT_UNAVAILABLE from Availability.h +#define OS_SWIFT_UNAVAILABLE(msg) \ + __attribute__((__availability__(swift, unavailable, message = msg))) +#else +#define OS_SWIFT_UNAVAILABLE(msg) +#endif + +#define __OS_STRINGIFY(s) #s +#define OS_STRINGIFY(s) __OS_STRINGIFY(s) + +#if __has_feature(objc_fixed_enum) || __has_extension(cxx_strong_enums) +#define OS_ENUM(name, type, ...) typedef enum : type { __VA_ARGS__ } name##_t +#else +#define OS_ENUM(name, type, ...) \ + enum { __VA_ARGS__ }; \ + typedef type name##_t +#endif + +#ifdef OS_EXPORT +#undef OS_EXPORT +#endif +#define OS_EXPORT __declspec(dllexport) + +#ifdef OS_WARN_RESULT_NEEDS_RELEASE +#undef OS_WARN_RESULT_NEEDS_RELEASE +#endif + +#ifdef OS_WARN_RESULT +#undef OS_WARN_RESULT +#endif +#define OS_WARN_RESULT + +#ifdef OS_NOTHROW +#undef OS_NOTHROW +#endif +#define OS_NOTHROW + +#endif diff --git a/os/object.h b/os/object.h index 100721fc0..2979de891 100644 --- a/os/object.h +++ b/os/object.h @@ -26,8 +26,10 @@ #include #include #include -#elif defined(__linux__) -#include +#elif defined(_WIN32) +#include +#elif defined(__unix__) +#include #endif /*! @@ -89,12 +91,11 @@ #endif #ifndef OS_OBJECT_SWIFT3 -#if defined(SWIFT_SDK_OVERLAY_DISPATCH_EPOCH) && \ - SWIFT_SDK_OVERLAY_DISPATCH_EPOCH >= 2 +#ifdef __swift__ #define OS_OBJECT_SWIFT3 1 -#else +#else // __swift__ #define OS_OBJECT_SWIFT3 0 -#endif // SWIFT_SDK_OVERLAY_DISPATCH_EPOCH >= 2 +#endif // __swift__ #endif // OS_OBJECT_SWIFT3 #if OS_OBJECT_USE_OBJC diff --git a/os/object_private.h b/os/object_private.h index 215c3d146..003369ecc 100644 --- a/os/object_private.h +++ b/os/object_private.h @@ -86,7 +86,7 @@ #endif #define OS_OBJECT_OBJC_CLASS_DECL(name) \ extern void *OS_OBJECT_CLASS_SYMBOL(name) \ - asm(OS_OBJC_CLASS_RAW_SYMBOL_NAME(OS_OBJECT_CLASS(name))) + __asm__(OS_OBJC_CLASS_RAW_SYMBOL_NAME(OS_OBJECT_CLASS(name))) #else #define OS_OBJECT_HAVE_OBJC1 0 #define OS_OBJECT_HAVE_OBJC2 0 @@ -97,10 +97,26 @@ #define OS_OBJECT_CLASS(name) OS_##name +#if OS_OBJECT_USE_OBJC +#define OS_OBJECT_USES_XREF_DISPOSE() \ + - (oneway void)release { \ + _os_object_release(self); \ + } +#endif + +#if __has_attribute(objc_nonlazy_class) +#define OS_OBJECT_NONLAZY_CLASS __attribute__((objc_nonlazy_class)) +#define OS_OBJECT_NONLAZY_CLASS_LOAD +#else +#define OS_OBJECT_NONLAZY_CLASS +#define OS_OBJECT_NONLAZY_CLASS_LOAD + (void)load { } +#endif + #if OS_OBJECT_USE_OBJC && OS_OBJECT_SWIFT3 @interface OS_OBJECT_CLASS(object) (OSObjectPrivate) +// Note: objects who want _xref_dispose to be called need +// to use OS_OBJECT_USES_XREF_DISPOSE() - (void)_xref_dispose; -- (void)_dispose; @end OS_OBJECT_DECL_PROTOCOL(object, ); typedef OS_OBJECT_CLASS(object) *_os_object_t; @@ -116,8 +132,9 @@ typedef OS_OBJECT_CLASS(object) *_os_object_t; API_AVAILABLE(macos(10.8), ios(6.0)) OS_OBJECT_EXPORT @interface OS_OBJECT_CLASS(object) : NSObject +// Note: objects who want _xref_dispose to be called need +// to use OS_OBJECT_USES_XREF_DISPOSE() - (void)_xref_dispose; -- (void)_dispose; @end typedef OS_OBJECT_CLASS(object) *_os_object_t; #define _OS_OBJECT_DECL_SUBCLASS_INTERFACE(name, super) \ @@ -172,6 +189,12 @@ OS_SWIFT_UNAVAILABLE("Unavailable in Swift") void _os_object_release(_os_object_t object); +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW +OS_SWIFT_UNAVAILABLE("Unavailable in Swift") +void +_os_object_release_without_xref_dispose(_os_object_t object); + API_AVAILABLE(macos(10.8), ios(6.0)) OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW OS_SWIFT_UNAVAILABLE("Unavailable in Swift") @@ -184,13 +207,13 @@ OS_SWIFT_UNAVAILABLE("Unavailable in Swift") void _os_object_release_internal(_os_object_t object); -API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +API_AVAILABLE(macos(10.13), ios(11.0), tvos(11.0), watchos(4.0)) OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW OS_SWIFT_UNAVAILABLE("Unavailable in Swift") _os_object_t _os_object_retain_internal_n(_os_object_t object, uint16_t n); -API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +API_AVAILABLE(macos(10.13), ios(11.0), tvos(11.0), watchos(4.0)) OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW OS_SWIFT_UNAVAILABLE("Unavailable in Swift") void diff --git a/os/voucher_activity_private.h b/os/voucher_activity_private.h index ed7a53153..706ae75f1 100644 --- a/os/voucher_activity_private.h +++ b/os/voucher_activity_private.h @@ -26,7 +26,7 @@ #include #include #endif -#ifndef __linux__ +#if __APPLE__ #include #include #endif @@ -154,7 +154,7 @@ voucher_get_activity_id_and_creator(voucher_t voucher, uint64_t *creator_pid, * @result * A new voucher with an activity identifier. */ -API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +API_AVAILABLE(macos(10.12.4), ios(10.3), tvos(10.2), watchos(3.2)) OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW voucher_t voucher_activity_create_with_data(firehose_tracepoint_id_t *trace_id, @@ -162,7 +162,7 @@ voucher_activity_create_with_data(firehose_tracepoint_id_t *trace_id, const void *pubdata, size_t publen); API_DEPRECATED_WITH_REPLACEMENT("voucher_activity_create_with_data", - macos(10.12,10.12), ios(10.0,10.0), tvos(10.0,10.0), watchos(3.0,3.0)) + macos(10.12,10.12.4), ios(10.0,10.3), tvos(10.0,10.2), watchos(3.0,3.2)) OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW voucher_t voucher_activity_create_with_location(firehose_tracepoint_id_t *trace_id, @@ -183,7 +183,7 @@ voucher_activity_create_with_location(firehose_tracepoint_id_t *trace_id, * The bottom-most 8 bits of the flags will be used to generate the ID. * See firehose_activity_flags_t. */ -API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +API_AVAILABLE(macos(10.13), ios(11.0), tvos(11.0), watchos(4.0)) OS_VOUCHER_EXPORT OS_NOTHROW firehose_activity_id_t voucher_activity_id_allocate(firehose_activity_flags_t flags); @@ -264,7 +264,7 @@ voucher_activity_trace(firehose_stream_t stream, * Length of data to read from the iovec after the public data for the private * data. */ -API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +API_AVAILABLE(macos(10.12.4), ios(10.3), tvos(10.2), watchos(3.2)) OS_VOUCHER_EXPORT OS_NOTHROW OS_NONNULL4 firehose_tracepoint_id_t voucher_activity_trace_v(firehose_stream_t stream, @@ -343,6 +343,17 @@ OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW OS_NONNULL_ALL void * voucher_activity_get_logging_preferences(size_t *length); +/*! + * @function voucher_activity_should_send_strings + * + * @abstract + * Returns whether the client should send the strings or not. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW +bool +voucher_activity_should_send_strings(void); + /*! * @function voucher_get_activity_id_4dyld * diff --git a/os/voucher_private.h b/os/voucher_private.h index 164aa3c5b..ad4e31274 100644 --- a/os/voucher_private.h +++ b/os/voucher_private.h @@ -21,7 +21,7 @@ #ifndef __OS_VOUCHER_PRIVATE__ #define __OS_VOUCHER_PRIVATE__ -#ifndef __linux__ +#if __APPLE__ #include #include #endif @@ -364,7 +364,7 @@ dispatch_block_create_with_voucher_and_qos_class(dispatch_block_flags_t flags, * Deprecated, do not use, will abort process if called. */ API_DEPRECATED("removed SPI", \ - macos(10.11,10.12), ios(9.0,10.0), watchos(2.0,3.0), tvos(9.0,10.0)) + macos(10.11,10.13), ios(9.0,11.0), watchos(2.0,4.0), tvos(9.0,11.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_t @@ -445,11 +445,11 @@ voucher_create_with_mach_msg(mach_msg_header_t *msg); * representation. */ API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) -OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW +OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW DISPATCH_COLD size_t voucher_kvoucher_debug(mach_port_t task, mach_port_name_t voucher, char *buf, size_t bufsiz, size_t offset, char * _Nullable prefix, - size_t max_hex_data); + size_t max_hex_data) ; /*! * @group Voucher Persona SPI @@ -462,26 +462,24 @@ struct proc_persona_info; * @function voucher_get_current_persona * * @abstract - * Retrieve the persona identifier of the 'originator' process for the current - * voucher. + * Returns the persona identifier for the current thread. * * @discussion - * Retrieve the persona identifier of the ’originator’ process possibly stored - * in the PERSONA_TOKEN attribute of the currently adopted voucher. + * Retrieve the persona identifier from the currently adopted voucher. * * If the thread has not adopted a voucher, or the current voucher does not - * contain a PERSONA_TOKEN attribute, this function returns the persona - * identifier of the current process. + * contain persona information, this function returns the persona identifier + * of the current process. * * If the process is not running under a persona, then this returns * PERSONA_ID_NONE. * * @result - * The persona identifier of the 'originator' process for the current voucher, + * The persona identifier for the current voucher, * or the persona identifier of the current process * or PERSONA_ID_NONE */ -API_AVAILABLE(ios(9.2)) +API_AVAILABLE(macos(10.14), ios(9.2)) OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW uid_t voucher_get_current_persona(void); @@ -504,7 +502,7 @@ voucher_get_current_persona(void); * 0 on success: currently adopted voucher has a PERSONA_TOKEN * -1 on failure: persona_info is untouched/uninitialized */ -API_AVAILABLE(ios(9.2)) +API_AVAILABLE(macos(10.14), ios(9.2)) OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW OS_NONNULL1 int voucher_get_current_persona_originator_info( @@ -528,12 +526,108 @@ voucher_get_current_persona_originator_info( * 0 on success: currently adopted voucher has a PERSONA_TOKEN * -1 on failure: persona_info is untouched/uninitialized */ -API_AVAILABLE(ios(9.2)) +API_AVAILABLE(macos(10.14), ios(9.2)) OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW OS_NONNULL1 int voucher_get_current_persona_proximate_info( struct proc_persona_info *persona_info); +/*! + * @function voucher_copy_with_persona_mach_voucher + * + * @abstract + * Creates a copy of the currently adopted voucher and replaces its + * persona information with the one passed in the specified mach voucher + * + * @discussion + * If the specified mach voucher is not one returned from + * mach_voucher_persona_for_originator() (called on behalf + * of the current process), this function will fail + * + * @param persona_mach_voucher + * mach voucher containing the new persona information + * + * @result + * On success, a copy of the current voucher with the new + * persona information + * On failure, VOUCHER_INVALID + */ +API_AVAILABLE(macos(10.14), ios(12)) +OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW +voucher_t _Nullable +voucher_copy_with_persona_mach_voucher( + mach_voucher_t persona_mach_voucher); + +/*! + * @function mach_voucher_persona_self + * + * @abstract + * Creates a mach voucher containing the persona information of the + * current process that can be sent as a mach port descriptor in a message + * + * @discussion + * The returned mach voucher has been pre-processed so that it can be sent + * in a message + * + * @param persona_mach_voucher + * If successful, a reference to the newly created mach voucher + * + * @result + * KERN_SUCCESS: a mach voucher ready to be sent in a message is + * successfully created + * KERN_RESOURCE_SHORTAGE: mach voucher creation failed due to + * lack of free space + */ +API_AVAILABLE(macos(10.14), ios(12)) +OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW OS_NONNULL1 +kern_return_t +mach_voucher_persona_self(mach_voucher_t *persona_mach_voucher); + +/*! + * @function mach_voucher_persona_for_originator + * + * @abstract + * Creates a mach voucher on behalf of the originator process by copying + * the persona information from the specified mach voucher and then + * updating the persona identifier to the specified value + * + * @discussion + * Should be called by a privileged process on behalf of the originator process. + * The newly created mach voucher should be returned to the originator in a + * message. The originator's thread can adopt the new persona by passing + * this mach voucher to voucher_copy_with_persona_mach_voucher(). + * + * @param persona_id + * The new persona identifier to be set in the mach voucher + * + * @param originator_persona_mach_voucher + * A mach voucher received from the originator, where it was created using + * mach_voucher_persona_self() + * + * @param originator_unique_pid + * Unique pid of the originator process + * + * @param persona_mach_voucher + * If successful, a reference to the newly created mach voucher + * + * @result + * KERN_SUCCESS: a mach voucher ready to be returned to the + * originator was successfully created + * KERN_NO_ACCESS: process does not have privilege to carry + * out this operation + * KERN_INVALID_ARGUMENT: specified persona identifier is invalid + * KERN_INVALID_CAPABILITY: originator_unique_pid does not + * match the specified voucher originator's unique pid + * KERN_RESOURCE_SHORTAGE: mach voucher creation failed due to + * lack of free space + */ +API_AVAILABLE(macos(10.14), ios(12)) +OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW OS_NONNULL4 +kern_return_t +mach_voucher_persona_for_originator(uid_t persona_id, + mach_voucher_t originator_persona_mach_voucher, + uint64_t originator_unique_pid, mach_voucher_t *persona_mach_voucher); + #endif // __has_include() __END_DECLS diff --git a/private/CMakeLists.txt b/private/CMakeLists.txt index 18788d727..a2ee9bdd9 100644 --- a/private/CMakeLists.txt +++ b/private/CMakeLists.txt @@ -3,3 +3,17 @@ # io_private.h layout_private.h mach_private.h private.h queue_private.h # source_private.h are included in the source tarball +if (INSTALL_PRIVATE_HEADERS) + install(FILES + benchmark.h + data_private.h + introspection_private.h + io_private.h + layout_private.h + mach_private.h + private.h + queue_private.h + source_private.h + DESTINATION + "${INSTALL_DISPATCH_HEADERS_DIR}") +endif() diff --git a/private/Makefile.am b/private/Makefile.am deleted file mode 100644 index 98840d570..000000000 --- a/private/Makefile.am +++ /dev/null @@ -1,15 +0,0 @@ -# -# -# - -noinst_HEADERS= \ - benchmark.h \ - data_private.h \ - introspection_private.h \ - io_private.h \ - layout_private.h \ - mach_private.h \ - private.h \ - queue_private.h \ - source_private.h - diff --git a/private/channel_private.h b/private/channel_private.h new file mode 100644 index 000000000..9c2ecf626 --- /dev/null +++ b/private/channel_private.h @@ -0,0 +1,567 @@ +/* + * Copyright (c) 2017 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __DISPATCH_CHANNEL_PRIVATE__ +#define __DISPATCH_CHANNEL_PRIVATE__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +DISPATCH_ASSUME_NONNULL_BEGIN + +__BEGIN_DECLS + +#if DISPATCH_CHANNEL_SPI + +/*! + * @typedef dispatch_channel_t + * + * @abstract + */ +DISPATCH_DECL(dispatch_channel); + +typedef struct dispatch_channel_invoke_ctxt_s *dispatch_channel_invoke_ctxt_t; + +/*! @typedef dispatch_channel_callbacks_t + * + * @abstract + * Vtable used by dispatch channels (see dispatch_channel_create). + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +typedef struct dispatch_channel_callbacks_s { +#define DISPATCH_CHANNEL_CALLBACKS_VERSION 1ul + /*! @field dcc_version + * + * @abstract + * Version of the callbacks, used for binary compatibilty. + * This must be set to DISPATCH_CHANNEL_CALLBACKS_VERSION + */ + unsigned long dcc_version; + + /*! @field dcc_probe + * + * @abstract + * This callback is called when GCD is considering whether it should wakeup + * the channel. + * + * @discussion + * This function may be called from ANY context. It may be called + * concurrently from several threads, it may be called concurrently with + * a call to other channel callbacks. + * + * Reasons for this function to be called include: + * - the channel became non empty, + * - the channel is receiving a Quality of Service override to resolve + * a priority inversion, + * - dispatch_activate() or dispatch_resume() was called, + * - dispatch_channel_wakeup() was called. + * + * The implementation of this callback should be idempotent, and as cheap + * as possible, avoiding taking locks if possible. A typical implementation + * will perform a single atomic state look to determine what answer to + * return. Possible races or false positives can be later be debounced in + * dcc_invoke which is synchronized. + * + * Calling dispatch_channel_wakeup() from the context of this call is + * incorrect and will result in undefined behavior. Instead, it should be + * called in response to external events, in order to cause the channel to + * re-evaluate the `dcc_probe` hook. + * + * param channel + * The channel that is being probed. + * + * param context + * The context associated with the channel. + * + * returns + * - true if the dispatch channel can be woken up according to the other + * runtime rules + * + * - false if the dispatch channel would not be able to make progress if + * woken up. A subsequent explicit call to dispatch_channel_wakeup() will + * be required when this condition has changed. + */ + bool + (*_Nonnull dcc_probe)(dispatch_channel_t channel, void *_Nullable context); + + /*! @field dcc_invoke + * + * @abstract + * This callback is called when a dispatch channel is being drained. + * + * @discussion + * This callback is where the state machine for the channel can + * be implemented using dispatch_channel_foreach_work_item_peek() + * and dispatch_channel_drain(). + * + * Note that if this function returns true, it must have called + * dispatch_channel_drain() exactly once. It is valid not to call + * peek nor drain if false is returned. + * + * param channel + * The channel that has been invoked. + * + * param invoke_context + * An opaque data structure that must be passed back to + * dispatch_channel_foreach_work_item_peek() and dispatch_channel_drain(). + * + * param context + * The context associated with the channel. + * + * returns + * - true if the channel can drain further + * - false if an explicit call to dispatch_channel_wakeup() is required + * for the channel to be able to drain items again. A subsequent explicit + * call to dispatch_channel_wakeup() will be required when this condition + * has changed. + */ + bool + (*_Nonnull dcc_invoke)(dispatch_channel_t channel, + dispatch_channel_invoke_ctxt_t invoke_context, + void *_Nullable context); + + /*! @field dcc_acknowledge_cancel + * + * @abstract + * This optional callback is called when the channel has been cancelled + * until that cancellation is acknowledged. + * + * @discussion + * If this callback isn't set, the channel cancelation is implicit and can + * be tested with dispatch_channel_testcancel(). + * + * When this callback is set, it will be called as soon as cancelation has + * been noticed. When it is called, it is called from a context serialized + * with `dcc_invoke`, or from `dcc_invoke` itself. + * + * Returning `false` causes the dispatch channel to stop its invocation + * early. A subsequent explicit call to dispatch_channel_wakeup() will be + * required when the cancellation can be acknowledged. + * + * param channel + * The channel that has been invoked. + * + * param context + * The context associated with the channel. + * + * returns + * Whether the cancellation was acknowledged. + */ + bool + (*_Nullable dcc_acknowledge_cancel)(dispatch_channel_t channel, + void *_Nullable context); +} const *dispatch_channel_callbacks_t; + +/*! @function dispatch_channel_create + * + * @abstract + * Create a dispatch channel. + * + * @discussion + * A dispatch channel is similar to a dispatch serial queue, however it will + * accept arbitrary items into the queue, as well as regular dispatch blocks + * to execute. + * + * Unlike serial queues, this object cannot be targeted by other dispatch + * objects. + * + * Dispatch channels are created in an inactive state. After creating the + * channel and setting any desired property, a call must be made to + * dispatch_activate() in order to use the object. + * + * Calling dispatch_set_target_queue() on a channel after it has been activated + * is not allowed (see dispatch_activate() and dispatch_set_target_queue()). + * + * @param label + * A string label to attach to the channel. + * This parameter is optional and may be NULL. + * + * @param context + * A context to associated with the channel. It can be retrieved with + * dispatch_get_context() at any time, but should not mutated. + * + * @param target + * The target queue for the newly created channel. The target queue is retained. + * If this parameter is DISPATCH_TARGET_QUEUE_DEFAULT, sets the channel's target + * queue to the default target queue for the given channel type. + * + * @param callbacks + * Hooks for the created channel. + * + * @returns + * The newly created channel. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NOTHROW DISPATCH_NONNULL4 +dispatch_channel_t +dispatch_channel_create(const char *_Nullable label, + dispatch_queue_t _Nullable target, + void *_Nullable context, dispatch_channel_callbacks_t callbacks); + +/*! @function dispatch_channel_wakeup + * + * @abstract + * Re-evaluate whether a dispatch channel needs to be woken up. + * + * @discussion + * Calling this function causes the GCD runtime to reevaluate whether + * the specified dispatch channel needs to be woken up. If a previous call to + * `dcc_probe`, `dcc_acknowledge_cancel` or `dcc_invoke` returned false then + * a channel may remain asleep until wakeup is called. + * + * It is valid to call this function from the context of any of the the `invoke` + * callbacks, but not from the `dcc_probe` callback. + * + * This function will have no effect if: + * - the dispatch channel is suspeneded, + * - the `dcc_probe` callback subsequently returns false, + * - the dispatch channel has no work items queued, nor a pending cancellation + * to acknowledge. + * + * @param channel + * The channel for which wakeup should be evaluated. + * + * @param qos_class + * The QoS override that should be applied to this channel because of this + * event. The override will persist until the channel has been drained of + * pending items. + * + * It is expected that most wakeups will not require an additional QoS + * override. In this case, passing QOS_CLASS_UNSPECIFIED indicates that no + * additional override should be applied as a result of this wakeup. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_NOTHROW DISPATCH_NONNULL_ALL +void +dispatch_channel_wakeup(dispatch_channel_t channel, qos_class_t qos_class); + +/*! @typedef dispatch_channel_enumerator_handler_t + * + * Type of the callbacks used by dispatch_channel_foreach_work_item_peek_f(). + */ +typedef bool (*dispatch_channel_enumerator_handler_t)(void *_Nullable context, void *item); + +/*! @function dispatch_channel_foreach_work_item_peek_f + * + * @abstract + * Peek at opaque work items currently enqueued on the channel. + * + * @discussion + * This function will enumerate items enqueued on the channel, in order, until + * the first non-opaque work item is found. No work should be performed on + * behalf of the items enumerated. + * + * This function allows the caller to preflight items that will be processed + * when draining the channel (fex. counting items in order to pre-allocate + * storage, or batch items into groups). + * + * This function can only be called from the context of the `dcc_invoke` + * callback associated with this channel, and before any call to + * dispatch_channel_drain(). + * + * @param invoke_context + * The opaque invoke context passed to the channel `dcc_invoke` callback. + * + * @param context + * An application-defined context that will be passed to the handler. + * + * @param handler + * The handler that will be passed `context` and the opaque work item + * currently enumerated. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_NOTHROW DISPATCH_NONNULL_ALL +void +dispatch_channel_foreach_work_item_peek_f( + dispatch_channel_invoke_ctxt_t invoke_context, + void *_Nullable context, dispatch_channel_enumerator_handler_t handler); + +#ifdef __BLOCKS__ +/*! @typedef dispatch_channel_enumerator_block_t + * + * Type of the callbacks used by dispatch_channel_foreach_work_item_peek(). + */ +typedef bool (^dispatch_channel_enumerator_block_t)(void *item); + +/*! @function dispatch_channel_foreach_work_item_peek_f + * + * @abstract + * Peek at the opaque work items currently enqueued on the channel. + * + * @discussion + * See dispatch_channel_foreach_work_item_peek_f() + * + * @param invoke_context + * The opaque invoke context passed to the channel `dcc_invoke` callback. + * + * @param block + * The block that will be passed the opaque work item currently enumerated. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_NOTHROW DISPATCH_NONNULL_ALL +void +dispatch_channel_foreach_work_item_peek( + dispatch_channel_invoke_ctxt_t invoke_context, + dispatch_channel_enumerator_block_t block DISPATCH_NOESCAPE); +#endif + +/*! @typedef dispatch_channel_drain_handler_t + * + * @abstract + * Type of the callbacks used by dispatch_channel_drain_f(). + * + * @param context + * The application defined context passed to dispatch_channel_drain_f(). + * + * @param item + * The opaque work item to consume. + * + * @param rejected_item + * An out-parameter for an opaque work item to put back at the head of the + * queue. On return from this handler, if rejected_item is set then the handler + * must also return false (and, thus, interrupts the drain operation). + * + * @returns + * - true if the drain may enumerate the next item + * - false to cause dispatch_channel_drain_f() to return. + * in which case a rejected item can optionally be returned. + */ +typedef bool (*dispatch_channel_drain_handler_t)(void *_Nullable context, + void *_Nonnull item, void *_Nonnull *_Nullable rejected_item); + +/*! @function dispatch_channel_drain_f + * + * @abstract + * Drain the opaque work items enqueued on the channel. + * + * @discussion + * This function needs to be called by any `dcc_invoke` that returns true. + * + * Calling drain will cause every opaque work item that can be consumed to be + * passed to the handler. While the handler is called, the runtime environment + * matches the QOS and context captured at dispatch_channel_enqueue() time for + * this opaque work item. + * + * Note, this function can (through factors internal to the GCD runtime) can + * decide not to consume all items that are currently enqueued on the channel. + * Therefore it is possible for dispatch_channel_drain_f() to enumerate fewer + * items than dispatch_channel_foreach_work_item_peek_f() did when called + * immediately beforehand. + * + * It is also possible for dispatch_channel_drain_f() to observe *more* items + * than previously seen with peek, if enqueues are happening concurrently. + * + * Note that work items enqueued with dispatch_channel_async() act as + * "separators". If the opaque work item O1 is enqueued before a regular + * asynchronous work item A, and a new opaque work item O2 is then enqueued, + * then neither dispatch_channel_foreach_work_item_peek_f() nor + * dispatch_channel_drain_f() will ever return O1 and O2 as part of the same + * drain streak. + * + * @param invoke_context + * The opaque invoke context passed to the channel `dcc_invoke` callback. + * + * @param handler + * The handler that will be passed the context and opaque work item to invoke. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_NOTHROW DISPATCH_NONNULL_ALL +void +dispatch_channel_drain_f(dispatch_channel_invoke_ctxt_t invoke_context, + void *_Nullable context, dispatch_channel_drain_handler_t handler); + +#ifdef __BLOCKS__ +/*! @typedef dispatch_channel_drain_block_t + * + * @abstract + * Type of the callbacks used by dispatch_channel_drain(). + * + * @description + * See dispatch_channel_drain_handler_t. + */ +typedef bool (^dispatch_channel_drain_block_t)(void *_Nonnull item, + void *_Nonnull *_Nullable rejected_item); + +/*! @function dispatch_channel_drain + * + * @abstract + * Drain the opaque work items enqueued on the channel. + * + * @discussion + * See dispatch_channel_drain_f() + * + * @param invoke_context + * The opaque invoke context passed to the channel `dcc_invoke` callback. + * + * @param block + * The block that will be passed the opaque work item to invoke. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_NOTHROW DISPATCH_NONNULL_ALL +void +dispatch_channel_drain(dispatch_channel_invoke_ctxt_t invoke_context, + dispatch_channel_drain_block_t block DISPATCH_NOESCAPE); +#endif + +/*! + * @function dispatch_channel_cancel + * + * @abstract + * Asynchronously cancel the dispatch channel. + * + * @discussion + * Cancellation will cause the channel to repeatedly call the + * `dcc_acknowledge_cancel` handler until it returns true. This allows the + * associated state machine to handle cancellation asynchronously (and, if + * needed, in multiple phases). + * + * The precise semantics of cancellation are up to the dispatch channel + * associated state machine, and not all dispatch channels need to use + * cancellation. + * + * However, if the `dcc_acknowledge_cancel` callback is implemented, then an + * explicit call to dispatch_channel_cancel() is mandatory before the last + * reference to the dispatch channel is released. + * + * @param channel + * The dispatch channel to be canceled. + * The result of passing NULL in this parameter is undefined. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_channel_cancel(dispatch_channel_t channel); + +/*! + * @function dispatch_channel_testcancel + * + * @abstract + * Tests whether the given dispatch channel has been canceled. + * + * @param channel + * The dispatch channel to be tested. + * The result of passing NULL in this parameter is undefined. + * + * @result + * Non-zero if canceled and zero if not canceled. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE +DISPATCH_NOTHROW +long +dispatch_channel_testcancel(dispatch_channel_t channel); + +/*! + * @function dispatch_channel_async + * + * @abstract + * Submits a block for asynchronous execution on a dispatch channel. + * + * @discussion + * See dispatch_async(). + * + * @param channel + * The target dispatch channel to which the block is submitted. + * The system will hold a reference on the target channel until the block + * has finished. + * The result of passing NULL in this parameter is undefined. + * + * @param block + * The block to submit to the target dispatch channel. This function performs + * Block_copy() and Block_release() on behalf of callers. + * The result of passing NULL in this parameter is undefined. + */ +#ifdef __BLOCKS__ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_channel_async(dispatch_channel_t queue, dispatch_block_t block); +#endif + +/*! + * @function dispatch_channel_async_f + * + * @abstract + * Submits a function for asynchronous execution on a dispatch channel. + * + * @discussion + * See dispatch_async() for details. + * + * @param queue + * The target dispatch channel to which the function is submitted. + * The system will hold a reference on the target channel until the function + * has returned. + * The result of passing NULL in this parameter is undefined. + * + * @param context + * The application-defined context parameter to pass to the function. + * + * @param work + * The application-defined function to invoke on the target channel. The first + * parameter passed to this function is the context provided to + * dispatch_channel_async_f(). + * The result of passing NULL in this parameter is undefined. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +void +dispatch_channel_async_f(dispatch_queue_t queue, + void *_Nullable context, dispatch_function_t work); + +/*! + * @function dispatch_channel_enqueue + * + * @abstract + * Enqueues an opaque work item for asynchronous dequeue on a dispatch channel. + * + * @discussion + * See dispatch_channel_async() for details. + * + * @param channel + * The target dispatch channel to which the work item is submitted. + * The system will hold a reference on the target channel until the work item + * is consumed. + * The result of passing NULL in this parameter is undefined. + * + * @param item + * The application-defined work item to enqueue on the target channel. + * The result of passing NULL in this parameter is undefined. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_channel_enqueue(dispatch_channel_t channel, void *item); + +#endif // DISPATCH_CHANNEL_SPI + +__END_DECLS + +DISPATCH_ASSUME_NONNULL_END + +#endif diff --git a/private/data_private.h b/private/data_private.h index 364a8ffe0..5c5431ab9 100644 --- a/private/data_private.h +++ b/private/data_private.h @@ -182,15 +182,9 @@ dispatch_data_make_memory_entry(dispatch_data_t data); */ typedef const struct dispatch_data_format_type_s *dispatch_data_format_type_t; -#if !TARGET_OS_WIN32 #define DISPATCH_DATA_FORMAT_TYPE_DECL(name) \ DISPATCH_EXPORT const struct dispatch_data_format_type_s \ _dispatch_data_format_type_##name -#else -#define DISPATCH_DATA_FORMAT_TYPE_DECL(name) \ - DISPATCH_EXPORT struct dispatch_data_format_type_s \ - _dispatch_data_format_type_##name -#endif /*! * @const DISPATCH_DATA_FORMAT_TYPE_NONE @@ -294,7 +288,6 @@ DISPATCH_DATA_FORMAT_TYPE_DECL(utf_any); * A newly created dispatch data object, dispatch_data_empty if no has been * produced, or NULL if an error occurred. */ - API_AVAILABLE(macos(10.8), ios(6.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW @@ -303,6 +296,28 @@ dispatch_data_create_with_transform(dispatch_data_t data, dispatch_data_format_type_t input_type, dispatch_data_format_type_t output_type); +/*! + * @function dispatch_data_get_flattened_bytes_4libxpc + * + * Similar to dispatch_data_create_map() but attaches it to the passed in + * dispatch data. + * + * The returned mapping, if not NULL, has the size returned by + * dispatch_data_get_size() for the specified object, and its lifetime is tied + * to the one of the dispatch data itself. + * + * @discussion + * This interface is reserved for XPC usage and is not considered stable ABI. + * + * + * @result + * A newly created linear mapping for this data object, may return NULL if + * making the dispatch data contiguous failed to allocate memory. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +const void *_Nullable +dispatch_data_get_flattened_bytes_4libxpc(dispatch_data_t data); + __END_DECLS DISPATCH_ASSUME_NONNULL_END diff --git a/private/layout_private.h b/private/layout_private.h index d85e94a53..0101fc035 100644 --- a/private/layout_private.h +++ b/private/layout_private.h @@ -28,7 +28,6 @@ __BEGIN_DECLS -#if !TARGET_OS_WIN32 API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT const struct dispatch_queue_offsets_s { // always add new fields at the end @@ -51,7 +50,6 @@ DISPATCH_EXPORT const struct dispatch_queue_offsets_s { const uint16_t dqo_priority; const uint16_t dqo_priority_size; } dispatch_queue_offsets; -#endif #if DISPATCH_LAYOUT_SPI /*! diff --git a/private/mach_private.h b/private/mach_private.h index e56f6d5c7..1474c163a 100644 --- a/private/mach_private.h +++ b/private/mach_private.h @@ -34,8 +34,6 @@ __BEGIN_DECLS -#if DISPATCH_MACH_SPI - #define DISPATCH_MACH_SPI_VERSION 20161026 #include @@ -129,6 +127,10 @@ DISPATCH_DECL(dispatch_mach); * an asynchronous reply to a message previously sent to the channel. Used * only if the channel is disconnected while waiting for a reply to a message * sent with dispatch_mach_send_with_result_and_async_reply_4libxpc(). + * + * @const DISPATCH_MACH_NO_SENDERS + * Sent when a no senders requested with dispatch_mach_request_no_senders() has + * been received. See dispatch_mach_request_no_senders(). */ DISPATCH_ENUM(dispatch_mach_reason, unsigned long, DISPATCH_MACH_CONNECTED = 1, @@ -143,6 +145,7 @@ DISPATCH_ENUM(dispatch_mach_reason, unsigned long, DISPATCH_MACH_NEEDS_DEFERRED_SEND, DISPATCH_MACH_SIGTERM_RECEIVED, DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED, + DISPATCH_MACH_NO_SENDERS, DISPATCH_MACH_REASON_LAST, /* unused */ ); @@ -351,6 +354,78 @@ dispatch_mach_create_f(const char *_Nullable label, dispatch_queue_t _Nullable queue, void *_Nullable context, dispatch_mach_handler_function_t handler); +/*! + * @function dispatch_mach_request_no_senders + * + * Configure the mach channel to receive no more senders notifications. + * + * @discussion + * This function must be called before dispatch_mach_connect() has been called. + * + * When a checkin message is passed to dispatch_mach_connect() or + * dispatch_mach_reconnect(), the notification is armed after the checkin + * message has been sent successfully. + * + * If no checkin message is passed, then the mach channel is assumed to be + * a "server" peer connection and the no more senders request is armed + * immediately. + * + * @param channel + * The mach channel to request no senders notifications on. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW +void +dispatch_mach_request_no_senders(dispatch_mach_t channel); + +/*! + * @typedef dispatch_mach_flags_t + * + * Flags that can be passed to the dispatch_mach_set_flags function. + * + * @const DMF_USE_STRICT_REPLY + * Instruct the dispatch mach channel to use strict reply port semantics. When + * using strict reply port semantics, the kernel will enforce that the port + * used as the reply port has precisely 1 extant send-once right, its receive + * right exists in the same space as the sender, and any voucher context, + * e.g., the persona in the bank attribute, used when sending the message is + * also used when replying. + * + * @const DMF_REQUEST_NO_SENDERS + * Configure the mach channel to receive no more senders notifications. + * When a checkin message is passed to dispatch_mach_connect() or + * dispatch_mach_reconnect(), the notification is armed after the checkin + * message has been sent successfully. If no checkin message is passed, then + * the mach channel is assumed to be a "server" peer connection and the no + * more senders request is armed immediately. + */ +DISPATCH_OPTIONS(dispatch_mach_flags, uint64_t, + DMF_NONE = 0x0, + DMF_USE_STRICT_REPLY = 0x1, + DMF_REQUEST_NO_SENDERS = 0x2, +); + +/*! + * @function dispatch_mach_set_flags + * + * Configure optional properties on the mach channel. + * + * @discussion + * This function must be called before dispatch_mach_connect() has been called. + * + * @param channel + * The mach channel to configure. + * + * @param flags + * Flags to configure the dispatch mach channel. + * + * @see dispatch_mach_flags_t + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW +void +dispatch_mach_set_flags(dispatch_mach_t channel, dispatch_mach_flags_t flags); + /*! * @function dispatch_mach_connect * Connect a mach channel to the specified receive and send rights. @@ -882,6 +957,8 @@ DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW mach_port_t dispatch_mach_get_checkin_port(dispatch_mach_t channel); +#if DISPATCH_MACH_SPI + // SPI for libxpc /* * Type for the callback for receipt of asynchronous replies to @@ -890,7 +967,7 @@ dispatch_mach_get_checkin_port(dispatch_mach_t channel); typedef void (*_Nonnull dispatch_mach_async_reply_callback_t)(void *context, dispatch_mach_reason_t reason, dispatch_mach_msg_t message); -API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +API_AVAILABLE(macos(10.13), ios(11.0), tvos(11.0), watchos(4.0)) typedef const struct dispatch_mach_xpc_hooks_s { #define DISPATCH_MACH_XPC_MIN_HOOKS_VERSION 3 #define DISPATCH_MACH_XPC_HOOKS_VERSION 3 @@ -914,6 +991,8 @@ typedef const struct dispatch_mach_xpc_hooks_s { /* Fields available in version 2. */ +#define DMXH_MSG_CONTEXT_REPLY_QUEUE_SELF ((dispatch_queue_t)NULL) + /* * Gets the queue to which a reply to a message sent using * dispatch_mach_send_with_result_and_async_reply_4libxpc() should be @@ -945,7 +1024,7 @@ typedef const struct dispatch_mach_xpc_hooks_s { dispatch_mach_async_reply_callback_t dmxh_async_reply_handler; /* Fields available in version 3. */ - /** + /* * Called once when the Mach channel has been activated. If this function * returns true, a DISPATCH_MACH_SIGTERM_RECEIVED notification will be * delivered to the channel's event handler when a SIGTERM is received. @@ -971,7 +1050,7 @@ typedef const struct dispatch_mach_xpc_hooks_s { * @param hooks * A pointer to the channel hooks structure. This must remain valid once set. */ -API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +API_AVAILABLE(macos(10.13), ios(11.0), tvos(11.0), watchos(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_mach_hooks_install_4libxpc(dispatch_mach_xpc_hooks_t hooks); @@ -1011,7 +1090,7 @@ dispatch_mach_hooks_install_4libxpc(dispatch_mach_xpc_hooks_t hooks); * @result * The newly created dispatch mach channel. */ -API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +API_AVAILABLE(macos(10.13), ios(11.0), tvos(11.0), watchos(4.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NONNULL4 DISPATCH_NOTHROW dispatch_mach_t @@ -1092,7 +1171,7 @@ dispatch_mach_create_4libxpc(const char *_Nullable label, * Out parameter to return the error from the immediate send attempt. * If a deferred send is required, returns 0. Must not be NULL. */ -API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +API_AVAILABLE(macos(10.13), ios(11.0), tvos(11.0), watchos(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NONNULL5 DISPATCH_NONNULL6 DISPATCH_NOTHROW void @@ -1101,10 +1180,57 @@ dispatch_mach_send_with_result_and_async_reply_4libxpc(dispatch_mach_t channel, dispatch_mach_send_flags_t send_flags, dispatch_mach_reason_t *send_result, mach_error_t *send_error); -DISPATCH_ASSUME_NONNULL_END - #endif // DISPATCH_MACH_SPI +/*! + * @function dispatch_mach_handoff_reply_f + * + * @abstract + * Inform the runtime that a given sync IPC is being handed off to a new queue + * hierarchy. + * + * @discussion + * This function can only be called from the context of an IPC handler, or from + * a work item created by dispatch_mach_handoff_reply_f. Calling + * dispatch_mach_handoff_reply_f from a different context is undefined and will + * cause the process to be terminated. + * + * dispatch_mach_handoff_reply_f will only take effect when the work item that + * issued it returns. + * + * @param queue + * The queue the IPC reply will be handed off to. This queue must be an + * immutable queue hierarchy (with all nodes created with + * dispatch_queue_create_with_target() for example). + * + * @param port + * The send once right that will be replied to. + */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL4 DISPATCH_NOTHROW +void +dispatch_mach_handoff_reply_f(dispatch_queue_t queue, mach_port_t port, + void *_Nullable ctxt, dispatch_function_t func); + +/*! + * @function dispatch_mach_handoff_reply + * + * @abstract + * Inform the runtime that a given sync IPC is being handed off to a new queue + * hierarchy. + * + * @see dispatch_mach_handoff_reply_f + */ +#ifdef __BLOCKS__ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +void +dispatch_mach_handoff_reply(dispatch_queue_t queue, mach_port_t port, + dispatch_block_t block); +#endif /* __BLOCKS__ */ + +DISPATCH_ASSUME_NONNULL_END + __END_DECLS #endif diff --git a/private/private.h b/private/private.h index df4aba51e..df93d9a9f 100644 --- a/private/private.h +++ b/private/private.h @@ -32,8 +32,10 @@ #include #include #include -#elif defined(__linux__) -#include +#elif defined(_WIN32) +#include +#elif defined(__unix__) +#include #endif #if TARGET_OS_MAC @@ -41,10 +43,12 @@ #include #include #endif -#if HAVE_UNISTD_H +#if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) #include #endif +#if !defined(_WIN32) #include +#endif #if TARGET_OS_MAC #include #endif @@ -58,11 +62,12 @@ #include #include +#if DISPATCH_CHANNEL_SPI +#include +#endif #include #include -#if DISPATCH_MACH_SPI #include -#endif // DISPATCH_MACH_SPI #include #include #include @@ -72,7 +77,7 @@ #endif /* !__DISPATCH_BUILDING_DISPATCH__ */ // Check that public and private dispatch headers match -#if DISPATCH_API_VERSION != 20180109 // Keep in sync with +#if DISPATCH_API_VERSION != 20181008 // Keep in sync with #error "Dispatch header mismatch between /usr/include and /usr/local/include" #endif @@ -173,7 +178,7 @@ void _dispatch_prohibit_transition_to_multithreaded(bool prohibit); #if TARGET_OS_MAC #define DISPATCH_COCOA_COMPAT 1 -#elif defined(__linux__) +#elif defined(__linux__) || defined(__FreeBSD__) #define DISPATCH_COCOA_COMPAT 1 #else #define DISPATCH_COCOA_COMPAT 0 @@ -185,7 +190,7 @@ void _dispatch_prohibit_transition_to_multithreaded(bool prohibit); #if TARGET_OS_MAC typedef mach_port_t dispatch_runloop_handle_t; -#elif defined(__linux__) +#elif defined(__linux__) || defined(__FreeBSD__) typedef int dispatch_runloop_handle_t; #else #error "runloop support not implemented on this platform" @@ -221,7 +226,7 @@ DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW mach_port_t _dispatch_runloop_root_queue_get_port_4CF(dispatch_queue_t queue); -API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +API_AVAILABLE(macos(10.13.2), ios(11.2), tvos(11.2), watchos(4.2)) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW bool _dispatch_source_will_reenable_kevent_4NW(dispatch_source_t source); @@ -253,7 +258,7 @@ void (*_Nullable _dispatch_end_NSAutoReleasePool)(void *); #endif /* DISPATCH_COCOA_COMPAT */ -API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +API_AVAILABLE(macos(10.13), ios(11.0), tvos(11.0), watchos(4.0)) DISPATCH_EXPORT DISPATCH_NOTHROW void _dispatch_poll_for_events_4launchd(void); diff --git a/private/queue_private.h b/private/queue_private.h index 4b915fbf2..2aa4e3ee3 100644 --- a/private/queue_private.h +++ b/private/queue_private.h @@ -47,7 +47,6 @@ enum { DISPATCH_QUEUE_OVERCOMMIT = 0x2ull, }; - /*! * @function dispatch_set_qos_class * @@ -95,54 +94,6 @@ void dispatch_set_qos_class(dispatch_object_t object, dispatch_qos_class_t qos_class, int relative_priority); -/*! - * @function dispatch_set_qos_class_floor - * - * @abstract - * Sets the QOS class floor on a dispatch queue, source, workloop or mach - * channel. - * - * @discussion - * The QOS class of workitems submitted to this object asynchronously will be - * elevated to at least the specified QOS class floor. - * Unlike dispatch_set_qos_class(), the QOS of the workitem will be used if - * higher than the floor even when the workitem has been created without - * "ENFORCE" semantics. - * - * Setting the QOS class floor is equivalent to the QOS effects of configuring - * a target queue whose QOS class has been set with dispatch_set_qos_class(). - * - * Calling this function will supersede any prior calls to - * dispatch_set_qos_class() or dispatch_set_qos_class_floor(). - * - * @param object - * A dispatch queue, workloop, source or mach channel to configure. - * The object must be inactive. - * - * Passing another object type or an object that has been activated is undefined - * and will cause the process to be terminated. - * - * @param qos_class - * A QOS class value: - * - QOS_CLASS_USER_INTERACTIVE - * - QOS_CLASS_USER_INITIATED - * - QOS_CLASS_DEFAULT - * - QOS_CLASS_UTILITY - * - QOS_CLASS_BACKGROUND - * Passing any other value is undefined. - * - * @param relative_priority - * A relative priority within the QOS class. This value is a negative - * offset from the maximum supported scheduler priority for the given class. - * Passing a value greater than zero or less than QOS_MIN_RELATIVE_PRIORITY - * is undefined. - */ -API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) -DISPATCH_EXPORT DISPATCH_NOTHROW -void -dispatch_set_qos_class_floor(dispatch_object_t object, - dispatch_qos_class_t qos_class, int relative_priority); - /*! * @function dispatch_set_qos_class_fallback * @@ -275,7 +226,7 @@ dispatch_queue_attr_make_with_overcommit(dispatch_queue_attr_t _Nullable attr, * @param label * The new label for the queue. */ -API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +API_AVAILABLE(macos(10.13), ios(11.0), tvos(11.0), watchos(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_queue_set_label_nocopy(dispatch_queue_t queue, diff --git a/private/source_private.h b/private/source_private.h index 56e9213e1..bd5e47ebc 100644 --- a/private/source_private.h +++ b/private/source_private.h @@ -120,7 +120,7 @@ DISPATCH_SOURCE_TYPE_DECL(sock); * @discussion A dispatch source that monitors events on a network channel. */ #define DISPATCH_SOURCE_TYPE_NW_CHANNEL (&_dispatch_source_type_nw_channel) -API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_LINUX_UNAVAILABLE() +API_AVAILABLE(macos(10.13), ios(11.0), tvos(11.0), watchos(4.0)) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_SOURCE_TYPE_DECL(nw_channel); __END_DECLS @@ -190,9 +190,15 @@ enum { * * @constant DISPATCH_NW_CHANNEL_FLOW_ADV_UPDATE * Received network channel flow advisory. + * @constant DISPATCH_NW_CHANNEL_CHANNEL_EVENT + * Received network channel event. + * @constant DISPATCH_NW_CHANNEL_INTF_ADV_UPDATE + * Received network channel interface advisory. */ enum { DISPATCH_NW_CHANNEL_FLOW_ADV_UPDATE = 0x00000001, + DISPATCH_NW_CHANNEL_CHANNEL_EVENT = 0x00000002, + DISPATCH_NW_CHANNEL_INTF_ADV_UPDATE = 0x00000004, }; /*! @@ -236,6 +242,9 @@ enum { * * @constant DISPATCH_VFS_DESIREDDISK * Filesystem has exceeded the DESIREDDISK level + * + * @constant DISPATCH_VFS_FREE_SPACE_CHANGE + * Filesystem free space changed. */ enum { DISPATCH_VFS_NOTRESP = 0x0001, @@ -251,6 +260,7 @@ enum { DISPATCH_VFS_QUOTA = 0x1000, DISPATCH_VFS_NEARLOWDISK = 0x2000, DISPATCH_VFS_DESIREDDISK = 0x4000, + DISPATCH_VFS_FREE_SPACE_CHANGE = 0x8000, }; /*! @@ -322,6 +332,18 @@ enum { DISPATCH_MACH_SEND_POSSIBLE = 0x8, }; +/*! + * @enum dispatch_source_mach_recv_flags_t + * + * @constant DISPATCH_MACH_RECV_SYNC_PEEK + * The receive source will participate in synchronous IPC priority inversion + * avoidance when possible. + */ +enum { + DISPATCH_MACH_RECV_SYNC_PEEK DISPATCH_ENUM_API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) = + 0x00008000, +}; + /*! * @enum dispatch_source_proc_flags_t * @@ -341,7 +363,7 @@ enum { enum { DISPATCH_PROC_REAP DISPATCH_ENUM_API_DEPRECATED("unsupported flag", macos(10.6,10.9), ios(4.0,7.0)) = 0x10000000, - DISPATCH_PROC_EXIT_STATUS DISPATCH_ENUM_API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(2.0)) = 0x04000000, + DISPATCH_PROC_EXIT_STATUS DISPATCH_ENUM_API_AVAILABLE(macos(10.13), ios(11.0), tvos(11.0), watchos(2.0)) = 0x04000000, }; /*! @@ -405,7 +427,7 @@ enum { DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL DISPATCH_ENUM_API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) = 0x20, - DISPATCH_MEMORYPRESSURE_MSL_STATUS DISPATCH_ENUM_API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) = 0xf0000000, + DISPATCH_MEMORYPRESSURE_MSL_STATUS DISPATCH_ENUM_API_AVAILABLE(macos(10.13), ios(11.0), tvos(11.0), watchos(4.0)) = 0xf0000000, }; /*! @@ -473,7 +495,7 @@ __BEGIN_DECLS * The result of passing NULL in this parameter is undefined. */ #ifdef __BLOCKS__ -API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +API_AVAILABLE(macos(10.13), ios(11.0), tvos(11.0), watchos(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_source_set_mandatory_cancel_handler(dispatch_source_t source, @@ -500,7 +522,7 @@ dispatch_source_set_mandatory_cancel_handler(dispatch_source_t source, * context of the dispatch source at the time the handler call is made. * The result of passing NULL in this parameter is undefined. */ -API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +API_AVAILABLE(macos(10.13), ios(11.0), tvos(11.0), watchos(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_source_set_mandatory_cancel_handler_f(dispatch_source_t source, @@ -631,7 +653,7 @@ typedef struct dispatch_source_extended_data_s { * the value of the size argument. If this is less than the value of the size * argument, the remaining space in data will have been populated with zeroes. */ -API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) +API_AVAILABLE(macos(10.13), ios(11.0), tvos(11.0), watchos(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW size_t diff --git a/private/workloop_private.h b/private/workloop_private.h index 01273217e..c06b498db 100644 --- a/private/workloop_private.h +++ b/private/workloop_private.h @@ -35,7 +35,6 @@ /******************************************************************************\ * * THIS FILE IS AN IN-PROGRESS INTERFACE THAT IS SUBJECT TO CHANGE - * PLEASE REACH-OUT TO gcd@group.apple.com BEFORE ADOPTING ANY INTERFACE * \******************************************************************************/ @@ -43,108 +42,7 @@ DISPATCH_ASSUME_NONNULL_BEGIN __BEGIN_DECLS -/*! - * @typedef dispatch_workloop_t - * - * @abstract - * Dispatch workloops invoke workitems submitted to them in priority order. - * - * @discussion - * A dispatch workloop is a flavor of dispatch_queue_t that is a priority - * ordered queue (using the QOS class of the submitted workitems as the - * ordering). - * - * Between each workitem invocation, the workloop will evaluate whether higher - * priority workitems have since been submitted and execute these first. - * - * Serial queues targeting a workloop maintain FIFO execution of their - * workitems. However, the workloop may reorder workitems submitted to - * independent serial queues targeting it with respect to each other, - * based on their priorities. - * - * A dispatch workloop is a "subclass" of dispatch_queue_t which can be passed - * to all APIs accepting a dispatch queue, except for functions from the - * dispatch_sync() family. dispatch_async_and_wait() must be used for workloop - * objects. Functions from the dispatch_sync() family on queues targeting - * a workloop are still permitted but discouraged for performance reasons. - */ -#if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) -typedef struct dispatch_workloop_s *dispatch_workloop_t; -#else -DISPATCH_DECL_SUBCLASS(dispatch_workloop, dispatch_queue); -#endif - -/*! - * @function dispatch_workloop_create - * - * @abstract - * Creates a new dispatch workloop to which workitems may be submitted. - * - * @param label - * A string label to attach to the workloop. - * - * @result - * The newly created dispatch workloop. - */ -API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) -DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT -DISPATCH_NOTHROW -dispatch_workloop_t -dispatch_workloop_create(const char *_Nullable label); - -/*! - * @function dispatch_workloop_create_inactive - * - * @abstract - * Creates a new inactive dispatch workloop that can be setup and then - * activated. - * - * @discussion - * Creating an inactive workloop allows for it to receive further configuration - * before it is activated, and workitems can be submitted to it. - * - * Submitting workitems to an inactive workloop is undefined and will cause the - * process to be terminated. - * - * @param label - * A string label to attach to the workloop. - * - * @result - * The newly created dispatch workloop. - */ -API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) -DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT -DISPATCH_NOTHROW -dispatch_workloop_t -dispatch_workloop_create_inactive(const char *_Nullable label); - -/*! - * @function dispatch_workloop_set_autorelease_frequency - * - * @abstract - * Sets the autorelease frequency of the workloop. - * - * @discussion - * See dispatch_queue_attr_make_with_autorelease_frequency(). - * The default policy for a workloop is - * DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM. - * - * @param workloop - * The dispatch workloop to modify. - * - * This workloop must be inactive, passing an activated object is undefined - * and will cause the process to be terminated. - * - * @param frequency - * The requested autorelease frequency. - */ -API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) -DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW -void -dispatch_workloop_set_autorelease_frequency(dispatch_workloop_t workloop, - dispatch_autorelease_frequency_t frequency); - -DISPATCH_ENUM(dispatch_workloop_param_flags, uint64_t, +DISPATCH_OPTIONS(dispatch_workloop_param_flags, uint64_t, DISPATCH_WORKLOOP_NONE DISPATCH_ENUM_API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) = 0x0, DISPATCH_WORKLOOP_FIXED_PRIORITY DISPATCH_ENUM_API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) = 0x1, ); @@ -281,158 +179,6 @@ DISPATCH_EXPORT DISPATCH_NOTHROW bool _dispatch_workloop_should_yield_4NW(void); -/*! - * @function dispatch_async_and_wait - * - * @abstract - * Submits a block for synchronous execution on a dispatch queue. - * - * @discussion - * Submits a workitem to a dispatch queue like dispatch_async(), however - * dispatch_async_and_wait() will not return until the workitem has finished. - * - * Like functions of the dispatch_sync family, dispatch_async_and_wait() is - * subject to dead-lock (See dispatch_sync() for details). - * - * However, dispatch_async_and_wait() differs from functions of the - * dispatch_sync family in two fundamental ways: how it respects queue - * attributes and how it chooses the execution context invoking the workitem. - * - * Differences with dispatch_sync() - * - * Work items submitted to a queue with dispatch_async_and_wait() observe all - * queue attributes of that queue when invoked (inluding autorelease frequency - * or QOS class). - * - * When the runtime has brought up a thread to invoke the asynchronous workitems - * already submitted to the specified queue, that servicing thread will also be - * used to execute synchronous work submitted to the queue with - * dispatch_async_and_wait(). - * - * However, if the runtime has not brought up a thread to service the specified - * queue (because it has no workitems enqueued, or only synchronous workitems), - * then dispatch_async_and_wait() will invoke the workitem on the calling thread, - * similar to the behaviour of functions in the dispatch_sync family. - * - * As an exception, if the queue the work is submitted to doesn't target - * a global concurrent queue (for example because it targets the main queue), - * then the workitem will never be invoked by the thread calling - * dispatch_async_and_wait(). - * - * In other words, dispatch_async_and_wait() is similar to submitting - * a dispatch_block_create()d workitem to a queue and then waiting on it, as - * shown in the code example below. However, dispatch_async_and_wait() is - * significantly more efficient when a new thread is not required to execute - * the workitem (as it will use the stack of the submitting thread instead of - * requiring heap allocations). - * - * - * dispatch_block_t b = dispatch_block_create(0, block); - * dispatch_async(queue, b); - * dispatch_block_wait(b, DISPATCH_TIME_FOREVER); - * Block_release(b); - * - * - * @param queue - * The target dispatch queue to which the block is submitted. - * The result of passing NULL in this parameter is undefined. - * - * @param block - * The block to be invoked on the target dispatch queue. - * The result of passing NULL in this parameter is undefined. - */ -#ifdef __BLOCKS__ -API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) -DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW -void -dispatch_async_and_wait(dispatch_queue_t queue, - DISPATCH_NOESCAPE dispatch_block_t block); -#endif - -/*! - * @function dispatch_async_and_wait_f - * - * @abstract - * Submits a function for synchronous execution on a dispatch queue. - * - * @discussion - * See dispatch_async_and_wait() for details. - * - * @param queue - * The target dispatch queue to which the function is submitted. - * The result of passing NULL in this parameter is undefined. - * - * @param context - * The application-defined context parameter to pass to the function. - * - * @param work - * The application-defined function to invoke on the target queue. The first - * parameter passed to this function is the context provided to - * dispatch_async_and_wait_f(). - * The result of passing NULL in this parameter is undefined. - */ -API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) -DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW -void -dispatch_async_and_wait_f(dispatch_queue_t queue, - void *_Nullable context, dispatch_function_t work); - -/*! - * @function dispatch_barrier_async_and_wait - * - * @abstract - * Submits a block for synchronous execution on a dispatch queue. - * - * @discussion - * Submits a block to a dispatch queue like dispatch_async_and_wait(), but marks - * that block as a barrier (relevant only on DISPATCH_QUEUE_CONCURRENT - * queues). - * - * @param queue - * The target dispatch queue to which the block is submitted. - * The result of passing NULL in this parameter is undefined. - * - * @param work - * The application-defined block to invoke on the target queue. - * The result of passing NULL in this parameter is undefined. - */ -#ifdef __BLOCKS__ -API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) -DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW -void -dispatch_barrier_async_and_wait(dispatch_queue_t queue, - DISPATCH_NOESCAPE dispatch_block_t block); -#endif - -/*! - * @function dispatch_barrier_async_and_wait_f - * - * @abstract - * Submits a function for synchronous execution on a dispatch queue. - * - * @discussion - * Submits a function to a dispatch queue like dispatch_async_and_wait_f(), but - * marks that function as a barrier (relevant only on DISPATCH_QUEUE_CONCURRENT - * queues). - * - * @param queue - * The target dispatch queue to which the function is submitted. - * The result of passing NULL in this parameter is undefined. - * - * @param context - * The application-defined context parameter to pass to the function. - * - * @param work - * The application-defined function to invoke on the target queue. The first - * parameter passed to this function is the context provided to - * dispatch_barrier_async_and_wait_f(). - * The result of passing NULL in this parameter is undefined. - */ -API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) -DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW -void -dispatch_barrier_async_and_wait_f(dispatch_queue_t queue, - void *_Nullable context, dispatch_function_t work); __END_DECLS diff --git a/src/BlocksRuntime/Block.h b/src/BlocksRuntime/Block.h index 15c724226..d0898ff49 100644 --- a/src/BlocksRuntime/Block.h +++ b/src/BlocksRuntime/Block.h @@ -3,8 +3,8 @@ // Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // -// See http://swift.org/LICENSE.txt for license information -// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // diff --git a/src/BlocksRuntime/Block_private.h b/src/BlocksRuntime/Block_private.h index deeb19a0a..b2e1512ac 100644 --- a/src/BlocksRuntime/Block_private.h +++ b/src/BlocksRuntime/Block_private.h @@ -3,8 +3,8 @@ // Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // -// See http://swift.org/LICENSE.txt for license information -// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // @@ -38,8 +38,8 @@ enum { #define BLOCK_DESCRIPTOR_1 1 struct Block_descriptor_1 { - uintptr_t reserved; - uintptr_t size; + unsigned long int reserved; + unsigned long int size; }; #define BLOCK_DESCRIPTOR_2 1 diff --git a/src/BlocksRuntime/data.c b/src/BlocksRuntime/data.c index dd36051d9..03de71b41 100644 --- a/src/BlocksRuntime/data.c +++ b/src/BlocksRuntime/data.c @@ -3,8 +3,8 @@ // Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // -// See http://swift.org/LICENSE.txt for license information -// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // /******************** diff --git a/src/BlocksRuntime/runtime.c b/src/BlocksRuntime/runtime.c index 8c98e8d1e..bfec1a0bf 100644 --- a/src/BlocksRuntime/runtime.c +++ b/src/BlocksRuntime/runtime.c @@ -3,8 +3,8 @@ // Copyright (c) 2014 - 2015 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // -// See http://swift.org/LICENSE.txt for license information -// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // #include "Block_private.h" @@ -28,25 +28,22 @@ #define os_assert(_x) assert(_x) #endif -#if TARGET_OS_WIN32 -#define _CRT_SECURE_NO_WARNINGS 1 -#include -static __inline bool OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst) -{ - // fixme barrier is overkill -- see objc-os.h - long original = InterlockedCompareExchange(dst, newl, oldl); - return (original == oldl); -} +#if !defined(__has_builtin) +#define __has_builtin(builtin) 0 +#endif -static __inline bool OSAtomicCompareAndSwapInt(int oldi, int newi, int volatile *dst) -{ - // fixme barrier is overkill -- see objc-os.h - int original = InterlockedCompareExchange(dst, newi, oldi); - return (original == oldi); -} +#if __has_builtin(__sync_bool_compare_and_swap) +#define OSAtomicCompareAndSwapInt(_Old, _New, _Ptr) \ + __sync_bool_compare_and_swap(_Ptr, _Old, _New) #else -#define OSAtomicCompareAndSwapLong(_Old, _New, _Ptr) __sync_bool_compare_and_swap(_Ptr, _Old, _New) -#define OSAtomicCompareAndSwapInt(_Old, _New, _Ptr) __sync_bool_compare_and_swap(_Ptr, _Old, _New) +#define _CRT_SECURE_NO_WARNINGS 1 +#include +static __inline bool OSAtomicCompareAndSwapInt(int oldi, int newi, + int volatile *dst) { + // fixme barrier is overkill -- see objc-os.h + int original = InterlockedCompareExchange((LONG volatile *)dst, newi, oldi); + return (original == oldi); +} #endif /*********************** @@ -141,13 +138,13 @@ static bool latching_decr_int_now_zero(volatile int32_t *where) { /*********************** GC support stub routines ************************/ -#if !TARGET_OS_WIN32 +#if !defined(_MSC_VER) || defined(__clang__) #pragma mark GC Support Routines #endif -static void *_Block_alloc_default(const unsigned long size, const bool initialCountIsOne, const bool isObject) { +static void *_Block_alloc_default(size_t size, const bool initialCountIsOne, const bool isObject) { (void)initialCountIsOne; (void)isObject; return malloc(size); @@ -175,7 +172,7 @@ static void _Block_release_object_default(const void *ptr) { } static void _Block_assign_weak_default(const void *ptr, void *dest) { -#if !TARGET_OS_WIN32 +#if !defined(_WIN32) *(long *)dest = (long)ptr; #else *(void **)dest = (void *)ptr; @@ -207,7 +204,7 @@ static void _Block_destructInstance_default(const void *aBlock) { GC support callout functions - initially set to stub routines ***************************************************************************/ -static void *(*_Block_allocator)(const unsigned long, const bool isOne, const bool isObject) = _Block_alloc_default; +static void *(*_Block_allocator)(size_t, const bool isOne, const bool isObject) = _Block_alloc_default; static void (*_Block_deallocator)(const void *) = (void (*)(const void *))free; static void (*_Block_assign)(void *value, void **destptr) = _Block_assign_default; static void (*_Block_setHasRefcount)(const void *ptr, const bool hasRefcount) = _Block_setHasRefcount_default; @@ -226,7 +223,7 @@ GC support SPI functions - called from ObjC runtime and CoreFoundation // Public SPI // Called from objc-auto to turn on GC. // version 3, 4 arg, but changed 1st arg -void _Block_use_GC( void *(*alloc)(const unsigned long, const bool isOne, const bool isObject), +void _Block_use_GC( void *(*alloc)(size_t, const bool isOne, const bool isObject), void (*setHasRefcount)(const void *, const bool), void (*gc_assign)(void *, void **), void (*gc_assign_weak)(const void *, void *), @@ -249,7 +246,7 @@ void _Block_use_GC( void *(*alloc)(const unsigned long, const bool isOne, const } // transitional -void _Block_use_GC5( void *(*alloc)(const unsigned long, const bool isOne, const bool isObject), +void _Block_use_GC5( void *(*alloc)(size_t, const bool isOne, const bool isObject), void (*setHasRefcount)(const void *, const bool), void (*gc_assign)(void *, void **), void (*gc_assign_weak)(const void *, void *)) { @@ -339,7 +336,7 @@ static void _Block_call_dispose_helper(struct Block_layout *aBlock) Internal Support routines for copying ********************************************************************************/ -#if !TARGET_OS_WIN32 +#if !defined(_MSC_VER) || defined(__clang__) #pragma mark Copy/Release support #endif @@ -500,7 +497,7 @@ static void _Block_byref_release(const void *arg) { * ***********************************************************/ -#if !TARGET_OS_WIN32 +#if !defined(_MSC_VER) || defined(__clang__) #pragma mark SPI/API #endif @@ -632,7 +629,7 @@ const char * _Block_extended_layout(void *aBlock) else return desc3->layout; } -#if !TARGET_OS_WIN32 +#if !defined(_MSC_VER) || defined(__clang__) #pragma mark Compiler SPI entry points #endif diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 4d4bb2e55..80bbd54b1 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -19,6 +19,7 @@ add_library(dispatch time.c transform.c voucher.c + shims.c protocol.defs provider.d allocator_internal.h @@ -40,20 +41,27 @@ add_library(dispatch event/event_epoll.c event/event_internal.h event/event_kevent.c + event/event_windows.c firehose/firehose_internal.h shims/android_stubs.h shims/atomic.h shims/atomic_sfb.h shims/getprogname.h shims/hw_config.h - shims/linux_stubs.c - shims/linux_stubs.h shims/lock.c shims/lock.h shims/perfmon.h shims/time.h shims/tsd.h shims/yield.h) +if(WIN32) + target_sources(dispatch + PRIVATE + shims/generic_sys_queue.h + shims/generic_win_stubs.c + shims/generic_win_stubs.h + shims/getprogname.c) +endif() if(DISPATCH_USE_INTERNAL_WORKQUEUE) target_sources(dispatch PRIVATE @@ -71,7 +79,7 @@ if(HAVE_OBJC) endif() if(ENABLE_SWIFT) set(swift_optimization_flags) - if(CMAKE_BUILD_TYPE MATCHES Release) + if(NOT CMAKE_BUILD_TYPE MATCHES Debug) set(swift_optimization_flags -O) endif() add_swift_library(swiftDispatch @@ -93,20 +101,28 @@ if(ENABLE_SWIFT) swift/Source.swift swift/Time.swift swift/Wrapper.swift + TARGET + ${CMAKE_C_COMPILER_TARGET} CFLAGS -fblocks - -fmodule-map-file=${CMAKE_SOURCE_DIR}/dispatch/module.modulemap + -fmodule-map-file=${PROJECT_SOURCE_DIR}/dispatch/module.modulemap SWIFT_FLAGS - -I ${CMAKE_SOURCE_DIR} - ${swift_optimization_flags}) - add_dependencies(swiftDispatch - module-map-symlinks) + -I ${PROJECT_SOURCE_DIR} + -I/usr/include + ${swift_optimization_flags} + DEPENDS + ${PROJECT_SOURCE_DIR}/dispatch/module.modulemap) target_sources(dispatch PRIVATE swift/DispatchStubs.cc ${CMAKE_CURRENT_BINARY_DIR}/swiftDispatch.o) + if(CMAKE_BUILD_TYPE MATCHES Debug) + target_link_libraries(dispatch + PRIVATE + swiftSwiftOnoneSupport) + endif() endif() -if(dtrace_EXECUTABLE) +if(ENABLE_DTRACE) dtrace_usdt_probe(${CMAKE_CURRENT_SOURCE_DIR}/provider.d OUTPUT_SOURCES dispatch_dtrace_provider_headers) @@ -116,19 +132,20 @@ if(dtrace_EXECUTABLE) endif() target_include_directories(dispatch PRIVATE - ${CMAKE_BINARY_DIR} - ${CMAKE_SOURCE_DIR} + ${PROJECT_BINARY_DIR} + ${PROJECT_SOURCE_DIR} ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR} - ${CMAKE_SOURCE_DIR}/private) -if(WITH_BLOCKS_RUNTIME) - target_include_directories(dispatch - SYSTEM BEFORE PRIVATE - "${WITH_BLOCKS_RUNTIME}") + ${PROJECT_SOURCE_DIR}/private) +target_include_directories(dispatch + SYSTEM BEFORE PRIVATE + "${BlocksRuntime_INCLUDE_DIR}") +if(WIN32) + target_compile_definitions(dispatch + PRIVATE + _CRT_NONSTDC_NO_WARNINGS) endif() -if("${CMAKE_C_SIMULATE_ID}" STREQUAL "MSVC") - target_compile_options(dispatch PRIVATE /EHsc-) -else() +if(NOT "${CMAKE_C_SIMULATE_ID}" STREQUAL "MSVC") target_compile_options(dispatch PRIVATE -fno-exceptions) endif() if(DISPATCH_ENABLE_ASSERTS) @@ -140,6 +157,10 @@ if(CMAKE_SYSTEM_NAME STREQUAL Windows) target_compile_definitions(dispatch PRIVATE -D_CRT_SECURE_NO_WARNINGS) +elseif(CMAKE_SYSTEM_NAME STREQUAL Android) + target_compile_options(dispatch + PRIVATE + -U_GNU_SOURCE) endif() if(BSD_OVERLAY_FOUND) target_compile_options(dispatch @@ -170,9 +191,19 @@ endif() if(BSD_OVERLAY_FOUND) target_link_libraries(dispatch PRIVATE ${BSD_OVERLAY_LDFLAGS}) endif() -target_link_libraries(dispatch PRIVATE Threads::Threads) -if(WITH_BLOCKS_RUNTIME) - target_link_libraries(dispatch PRIVATE BlocksRuntime) +if(LibRT_FOUND) + target_link_libraries(dispatch PRIVATE RT::rt) +endif() +target_link_libraries(dispatch + PRIVATE + Threads::Threads + BlocksRuntime::BlocksRuntime) +if(CMAKE_SYSTEM_NAME STREQUAL Windows) + target_link_libraries(dispatch + PRIVATE + WS2_32 + WinMM + synchronization) endif() if(CMAKE_SYSTEM_NAME STREQUAL Darwin) set_property(TARGET dispatch @@ -181,33 +212,30 @@ if(CMAKE_SYSTEM_NAME STREQUAL Darwin) "-Xlinker -compatibility_version -Xlinker 1" "-Xlinker -current_version -Xlinker ${VERSION}" "-Xlinker -dead_strip" - "-Xlinker -alias_list -Xlinker ${CMAKE_SOURCE_DIR}/xcodeconfig/libdispatch.aliases") -endif() -if(USE_GOLD_LINKER) - set_property(TARGET dispatch - APPEND_STRING - PROPERTY LINK_FLAGS - -fuse-ld=gold) + "-Xlinker -alias_list -Xlinker ${PROJECT_SOURCE_DIR}/xcodeconfig/libdispatch.aliases") endif() +dispatch_set_linker(dispatch) # Temporary staging; the various swift projects that depend on libdispatch # all expect libdispatch.so to be in src/.libs/libdispatch.so # So for now, make a copy so we don't have to do a coordinated commit across # all the swift projects to change this assumption. add_custom_command(TARGET dispatch POST_BUILD - COMMAND cmake -E make_directory .libs - COMMAND cmake -E copy $ .libs + COMMAND ${CMAKE_COMMAND} -E make_directory .libs + COMMAND ${CMAKE_COMMAND} -E copy $ .libs COMMENT "Copying libdispatch to .libs") +get_swift_host_arch(SWIFT_HOST_ARCH) + install(TARGETS dispatch DESTINATION - "${CMAKE_INSTALL_LIBDIR}") + "${INSTALL_TARGET_DIR}") if(ENABLE_SWIFT) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/swift/Dispatch.swiftmodule ${CMAKE_CURRENT_BINARY_DIR}/swift/Dispatch.swiftdoc DESTINATION - "${CMAKE_INSTALL_LIBDIR}/swift/${SWIFT_OS}/${CMAKE_SYSTEM_PROCESSOR}") + "${INSTALL_TARGET_DIR}/${SWIFT_HOST_ARCH}") endif() diff --git a/src/Makefile.am b/src/Makefile.am deleted file mode 100644 index 58dcead4b..000000000 --- a/src/Makefile.am +++ /dev/null @@ -1,200 +0,0 @@ -# -# -# - -if HAVE_SWIFT -swiftlibdir=${prefix}/lib/swift/${OS_STRING} -swiftlib_LTLIBRARIES=libdispatch.la -else -lib_LTLIBRARIES=libdispatch.la -endif - -if DISPATCH_USE_INTERNAL_WORKQUEUE -INTERNAL_WORKQUEUE_SOURCES= \ - event/workqueue.c \ - event/workqueue_internal.h -endif - -libdispatch_la_SOURCES= \ - allocator.c \ - apply.c \ - benchmark.c \ - data.c \ - init.c \ - introspection.c \ - io.c \ - mach.c \ - object.c \ - once.c \ - queue.c \ - semaphore.c \ - source.c \ - time.c \ - transform.c \ - voucher.c \ - protocol.defs \ - provider.d \ - allocator_internal.h \ - data_internal.h \ - inline_internal.h \ - internal.h \ - introspection_internal.h \ - io_internal.h \ - mach_internal.h \ - object_internal.h \ - queue_internal.h \ - semaphore_internal.h \ - shims.h \ - source_internal.h \ - trace.h \ - voucher_internal.h \ - event/event.c \ - event/event_config.h \ - event/event_epoll.c \ - event/event_internal.h \ - event/event_kevent.c \ - firehose/firehose_internal.h \ - shims/android_stubs.h \ - shims/atomic.h \ - shims/atomic_sfb.h \ - shims/getprogname.h \ - shims/hw_config.h \ - shims/linux_stubs.c \ - shims/linux_stubs.h \ - shims/lock.c \ - shims/lock.h \ - shims/perfmon.h \ - shims/time.h \ - shims/tsd.h \ - shims/yield.h \ - $(INTERNAL_WORKQUEUE_SOURCES) - -EXTRA_libdispatch_la_SOURCES= -EXTRA_libdispatch_la_DEPENDENCIES= - -AM_CPPFLAGS=-I$(top_builddir) -I$(top_srcdir) -I$(top_srcdir)/private - -DISPATCH_CFLAGS=-Wall $(VISIBILITY_FLAGS) $(OMIT_LEAF_FP_FLAGS) \ - $(MARCH_FLAGS) $(BSD_OVERLAY_CFLAGS) -if DISPATCH_ENABLE_ASSERTS -DISPATCH_CFLAGS+=-DDISPATCH_DEBUG=1 -endif -AM_CFLAGS= $(PTHREAD_WORKQUEUE_CFLAGS) $(DISPATCH_CFLAGS) $(CBLOCKS_FLAGS) -AM_OBJCFLAGS=$(DISPATCH_CFLAGS) $(CBLOCKS_FLAGS) -AM_CXXFLAGS=$(PTHREAD_WORKQUEUE_CFLAGS) $(DISPATCH_CFLAGS) $(CXXBLOCKS_FLAGS) -AM_OBJCXXFLAGS=$(DISPATCH_CFLAGS) $(CXXBLOCKS_FLAGS) - -if HAVE_PTHREAD_WORKQUEUES - PTHREAD_WORKQUEUE_LIBS=-lpthread_workqueue - PTHREAD_WORKQUEUE_CFLAGS= -endif - -if BUILD_OWN_BLOCKS_RUNTIME -libdispatch_la_SOURCES+= BlocksRuntime/data.c BlocksRuntime/runtime.c -CBLOCKS_FLAGS+= -I$(top_srcdir)/src/BlocksRuntime -CXXBLOCKS_FLAGS+= -I$(top_srcdir)/src/BlocksRuntime -if USE_OBJC -BLOCKS_RUNTIME_LIBS=-ldl -endif -endif - -libdispatch_la_LDFLAGS=-avoid-version -libdispatch_la_LIBADD=$(PTHREAD_WORKQUEUE_LIBS) $(BSD_OVERLAY_LIBS) $(BLOCKS_RUNTIME_LIBS) - -if HAVE_DARWIN_LD -libdispatch_la_LDFLAGS+=-Wl,-compatibility_version,1 \ - -Wl,-current_version,$(VERSION) -Wl,-dead_strip \ - -Wl,-alias_list,$(top_srcdir)/xcodeconfig/libdispatch.aliases -endif - -if USE_GOLD_LINKER -libdispatch_la_LDFLAGS+=-Xcompiler -fuse-ld=gold -endif - -if USE_OBJC -libdispatch_la_SOURCES+=block.cpp data.m object.m -libdispatch_la_OBJCFLAGS=$(AM_OBJCFLAGS) -Wno-switch -fobjc-gc -libdispatch_la_CXXFLAGS=$(AM_CXXFLAGS) -std=gnu++11 -fno-exceptions -libdispatch_la_LDFLAGS+=-Wl,-upward-lobjc -Wl,-upward-lauto \ - -Wl,-order_file,$(top_srcdir)/xcodeconfig/libdispatch.order -else -libdispatch_la_SOURCES+=block.cpp -libdispatch_la_CXXFLAGS=$(AM_CXXFLAGS) -std=gnu++11 -fno-exceptions -endif - -if USE_MIG -MIG_SOURCES= \ - protocolUser.c \ - protocol.h \ - protocolServer.c \ - protocolServer.h - -%User.c %.h %Server.c %Server.h: $(abs_srcdir)/%.defs - $(MIG) -user $*User.c -header $*.h \ - -server $*Server.c -sheader $*Server.h $< -endif - -if USE_DTRACE -DTRACE_SOURCES=provider.h - -%.h: $(abs_srcdir)/%.d - $(DTRACE) -h -s $< -o $@ -endif - -if HAVE_SWIFT -SWIFT_SRC_FILES=\ - swift/Block.swift \ - swift/Data.swift \ - swift/Dispatch.swift \ - swift/IO.swift \ - swift/Private.swift \ - swift/Queue.swift \ - swift/Source.swift \ - swift/Time.swift \ - swift/Wrapper.swift - -SWIFT_ABS_SRC_FILES = $(SWIFT_SRC_FILES:%=$(abs_srcdir)/%) -SWIFT_OBJ_FILES = $(abs_builddir)/swift/swift_overlay.o -SWIFT_LIBTOOL_OBJ_FILES = $(abs_builddir)/swift/swift_overlay.lo - -SWIFTC_FLAGS+= -Xcc -fmodule-map-file=$(abs_top_srcdir)/dispatch/module.modulemap -I$(abs_top_srcdir) -Xcc -fblocks -if DISPATCH_ENABLE_OPTIMIZATION -SWIFTC_FLAGS+=-O -endif - -# this saves the object file, then tricks libtool into generating a .lo file and -# then moves the object file back in the places libtool expects them to be for -# the PIC and non-PIC case. -$(abs_builddir)/swift/swift_overlay.lo: $(abs_builddir)/swift/swift_overlay.o - mv $(abs_builddir)/swift/swift_overlay.o $(abs_builddir)/swift/.libs/swift_overlay.o.save - $(LIBTOOL) --mode=compile --tag=CC true -o $< -c /dev/null - cp $(abs_builddir)/swift/.libs/swift_overlay.o.save $(abs_builddir)/swift/.libs/swift_overlay.o - mv $(abs_builddir)/swift/.libs/swift_overlay.o.save $(abs_builddir)/swift/swift_overlay.o - -$(abs_builddir)/swift/swift_overlay.o: $(SWIFT_ABS_SRC_FILES) $(SWIFTC) - @rm -f $@ - $(SWIFTC) -whole-module-optimization -emit-library -c $(SWIFT_ABS_SRC_FILES) \ - $(SWIFTC_FLAGS) -module-name Dispatch -module-link-name dispatch \ - -o $@ -emit-module-path $(abs_builddir)/swift/Dispatch.swiftmodule - -libdispatch_la_SOURCES+=swift/DispatchStubs.cc -EXTRA_libdispatch_la_SOURCES+=$(SWIFT_SRC_FILES) - -EXTRA_libdispatch_la_DEPENDENCIES+=$(SWIFT_OBJ_FILES) $(SWIFT_LIBTOOL_OBJ_FILES) $(abs_builddir)/swift/Dispatch.swiftmodule -libdispatch_la_LIBADD+=$(SWIFT_LIBTOOL_OBJ_FILES) - -SWIFT_GEN_FILES= \ - $(abs_builddir)/swift/Dispatch.swiftmodule \ - $(abs_builddir)/swift/Dispatch.swiftdoc \ - $(SWIFT_OBJ_FILES) - -swiftmoddir=${prefix}/lib/swift/${OS_STRING}/${target_cpu} -swiftmod_HEADERS=\ - $(abs_builddir)/swift/Dispatch.swiftmodule \ - $(abs_builddir)/swift/Dispatch.swiftdoc -endif - -BUILT_SOURCES=$(MIG_SOURCES) $(DTRACE_SOURCES) -nodist_libdispatch_la_SOURCES=$(BUILT_SOURCES) -CLEANFILES=$(BUILT_SOURCES) $(SWIFT_GEN_FILES) -DISTCLEANFILES=pthread_machdep.h pthread System mach objc diff --git a/src/allocator_internal.h b/src/allocator_internal.h index 5f8c2f068..ead653595 100644 --- a/src/allocator_internal.h +++ b/src/allocator_internal.h @@ -278,7 +278,7 @@ struct dispatch_magazine_s { }; #if DISPATCH_DEBUG -#define DISPATCH_ALLOCATOR_SCRIBBLE ((uintptr_t)0xAFAFAFAFAFAFAFAF) +#define DISPATCH_ALLOCATOR_SCRIBBLE ((int)0xAFAFAFAF) #endif diff --git a/src/apply.c b/src/apply.c index 5f93e6693..9c7d60ffd 100644 --- a/src/apply.c +++ b/src/apply.c @@ -326,7 +326,7 @@ dispatch_apply_f(size_t iterations, dispatch_queue_t _dq, void *ctxt, .dc_ctxt = ctxt, .dc_data = dq, }; - dispatch_apply_t da = (typeof(da))_dispatch_continuation_alloc(); + dispatch_apply_t da = (__typeof__(da))_dispatch_continuation_alloc(); da->da_index = 0; da->da_todo = iterations; da->da_iterations = iterations; diff --git a/src/block.cpp b/src/block.cpp index 9e9c2246e..3d7432529 100644 --- a/src/block.cpp +++ b/src/block.cpp @@ -69,7 +69,10 @@ struct dispatch_block_private_data_s { if (dbpd_voucher && dbpd_voucher != DISPATCH_NO_VOUCHER) { voucher_retain(dbpd_voucher); } - if (o.dbpd_block) dbpd_block = _dispatch_Block_copy(o.dbpd_block); + if (o.dbpd_block) { + dbpd_block = reinterpret_cast( + _dispatch_Block_copy(o.dbpd_block)); + } _dispatch_block_private_data_debug("copy from %p, block: %p from %p", &o, dbpd_block, o.dbpd_block); if (!o.dbpd_magic) return; // No group in initial copy of stack object @@ -88,7 +91,7 @@ struct dispatch_block_private_data_s { if (dbpd_magic != DISPATCH_BLOCK_PRIVATE_DATA_MAGIC) return; if (dbpd_group) { if (!dbpd_performed) dispatch_group_leave(dbpd_group); - _os_object_release(dbpd_group->_as_os_obj); + _os_object_release_without_xref_dispose(dbpd_group->_as_os_obj); } if (dbpd_queue) { _os_object_release_internal_n(dbpd_queue->_as_os_obj, 2); @@ -105,22 +108,18 @@ _dispatch_block_create(dispatch_block_flags_t flags, voucher_t voucher, pthread_priority_t pri, dispatch_block_t block) { struct dispatch_block_private_data_s dbpds(flags, voucher, pri, block); - return _dispatch_Block_copy(^{ + return reinterpret_cast(_dispatch_Block_copy(^{ // Capture stack object: invokes copy constructor (17094902) (void)dbpds; _dispatch_block_invoke_direct(&dbpds); - }); + })); } extern "C" { // The compiler hides the name of the function it generates, and changes it if // we try to reference it directly, but the linker still sees it. extern void DISPATCH_BLOCK_SPECIAL_INVOKE(void *) -#ifdef __linux__ - asm("___dispatch_block_create_block_invoke"); -#else - asm("____dispatch_block_create_block_invoke"); -#endif + __asm__(OS_STRINGIFY(__USER_LABEL_PREFIX__) "___dispatch_block_create_block_invoke"); void (*const _dispatch_block_special_invoke)(void*) = DISPATCH_BLOCK_SPECIAL_INVOKE; } diff --git a/src/data.c b/src/data.c index 700a694ea..0a3cb1aa9 100644 --- a/src/data.c +++ b/src/data.c @@ -51,7 +51,7 @@ * * Such objects are created when used as an NSData and -bytes is called and * where the dispatch data object is an unflattened composite object. - * The underlying implementation is _dispatch_data_get_flattened_bytes + * The underlying implementation is dispatch_data_get_flattened_bytes_4libxpc. * * TRIVIAL SUBRANGES (num_records == 1, buf == nil, destructor == nil) * @@ -517,7 +517,7 @@ dispatch_data_create_map(dispatch_data_t dd, const void **buffer_ptr, } const void * -_dispatch_data_get_flattened_bytes(dispatch_data_t dd) +dispatch_data_get_flattened_bytes_4libxpc(dispatch_data_t dd) { const void *buffer; size_t offset = 0; diff --git a/src/data.m b/src/data.m index 789c3eb0c..e0185a0cf 100644 --- a/src/data.m +++ b/src/data.m @@ -131,8 +131,7 @@ - (NSUInteger)length { } - (const void *)bytes { - struct dispatch_data_s *dd = (void*)self; - return _dispatch_data_get_flattened_bytes(dd); + return dispatch_data_get_flattened_bytes_4libxpc(self); } - (BOOL)_isCompact { @@ -151,11 +150,9 @@ - (void)_activate { @end +OS_OBJECT_NONLAZY_CLASS @implementation DISPATCH_CLASS(data_empty) - -// Force non-lazy class realization rdar://10640168 -+ (void)load { -} +OS_OBJECT_NONLAZY_CLASS_LOAD - (id)retain { return (id)self; diff --git a/src/data_internal.h b/src/data_internal.h index c5bc09f75..1589a793a 100644 --- a/src/data_internal.h +++ b/src/data_internal.h @@ -107,8 +107,8 @@ void _dispatch_data_dispose(dispatch_data_t data, bool *allow_free); void _dispatch_data_set_target_queue(struct dispatch_data_s *dd, dispatch_queue_t tq); #endif +DISPATCH_COLD size_t _dispatch_data_debug(dispatch_data_t data, char* buf, size_t bufsiz); -const void* _dispatch_data_get_flattened_bytes(struct dispatch_data_s *dd); #if !defined(__cplusplus) extern const dispatch_block_t _dispatch_data_destructor_inline; diff --git a/src/event/event.c b/src/event/event.c index b1bc05343..fc6ee814d 100644 --- a/src/event/event.c +++ b/src/event/event.c @@ -39,7 +39,7 @@ _dispatch_unote_create(dispatch_source_type_t dst, return DISPATCH_UNOTE_NULL; } - if (dst->dst_mask && !mask) { + if (dst->dst_mask && !dst->dst_allow_empty_mask && !mask) { return DISPATCH_UNOTE_NULL; } @@ -53,7 +53,7 @@ _dispatch_unote_create(dispatch_source_type_t dst, du->du_can_be_wlh = dst->dst_per_trigger_qos; du->du_ident = (uint32_t)handle; du->du_filter = dst->dst_filter; - du->du_fflags = (typeof(du->du_fflags))mask; + du->du_fflags = (__typeof__(du->du_fflags))mask; if (dst->dst_flags & EV_UDATA_SPECIFIC) { du->du_is_direct = true; } diff --git a/src/event/event_config.h b/src/event/event_config.h index ca0e368f7..f221d0922 100644 --- a/src/event/event_config.h +++ b/src/event/event_config.h @@ -25,10 +25,16 @@ # include # define DISPATCH_EVENT_BACKEND_EPOLL 1 # define DISPATCH_EVENT_BACKEND_KEVENT 0 +# define DISPATCH_EVENT_BACKEND_WINDOWS 0 #elif __has_include() # include # define DISPATCH_EVENT_BACKEND_EPOLL 0 # define DISPATCH_EVENT_BACKEND_KEVENT 1 +# define DISPATCH_EVENT_BACKEND_WINDOWS 0 +#elif defined(_WIN32) +# define DISPATCH_EVENT_BACKEND_EPOLL 0 +# define DISPATCH_EVENT_BACKEND_KEVENT 0 +# define DISPATCH_EVENT_BACKEND_WINDOWS 1 #else # error unsupported event loop #endif @@ -60,7 +66,7 @@ #if DISPATCH_TIMER_ASSERTIONS #define DISPATCH_TIMER_ASSERT(a, op, b, text) ({ \ - typeof(a) _a = (a); \ + __typeof__(a) _a = (a); \ if (unlikely(!(_a op (b)))) { \ DISPATCH_CLIENT_CRASH(_a, "Timer: " text); \ } \ @@ -82,16 +88,14 @@ # if defined(EV_SET_QOS) # define DISPATCH_USE_KEVENT_QOS 1 -# ifndef KEVENT_FLAG_IMMEDIATE -# define KEVENT_FLAG_IMMEDIATE 0x001 -# endif -# ifndef KEVENT_FLAG_ERROR_EVENTS -# define KEVENT_FLAG_ERROR_EVENTS 0x002 -# endif # else # define DISPATCH_USE_KEVENT_QOS 0 # endif +# ifndef KEVENT_FLAG_ERROR_EVENTS +# define KEVENT_FLAG_ERROR_EVENTS 0x002 +# endif + # ifdef NOTE_LEEWAY # define DISPATCH_HAVE_TIMER_COALESCING 1 # else @@ -112,6 +116,14 @@ # define NOTE_FUNLOCK 0x00000100 # endif +// FreeBSD's kevent does not support those +# ifndef NOTE_ABSOLUTE +# define NOTE_ABSOLUTE 0 +# endif +# ifndef NOTE_EXITSTATUS +# define NOTE_EXITSTATUS 0 +# endif + # if HAVE_DECL_NOTE_REAP # if defined(NOTE_REAP) && defined(__APPLE__) # undef NOTE_REAP @@ -131,9 +143,15 @@ # undef HAVE_DECL_VQ_DESIRED_DISK # endif // VQ_DESIRED_DISK +# ifndef VQ_FREE_SPACE_CHANGE +# undef HAVE_DECL_VQ_FREE_SPACE_CHANGE +# endif // VQ_FREE_SPACE_CHANGE + # if !defined(EVFILT_NW_CHANNEL) && defined(__APPLE__) -# define EVFILT_NW_CHANNEL (-16) -# define NOTE_FLOW_ADV_UPDATE 0x1 +# define EVFILT_NW_CHANNEL (-16) +# define NOTE_FLOW_ADV_UPDATE 0x1 +# define NOTE_CHANNEL_EVENT 0x2 +# define NOTE_IF_ADV_UPD 0x4 # endif #else // DISPATCH_EVENT_BACKEND_KEVENT # define EV_ADD 0x0001 @@ -152,10 +170,16 @@ # define DISPATCH_HAVE_TIMER_QOS 0 # define DISPATCH_HAVE_TIMER_COALESCING 0 -# define KEVENT_FLAG_IMMEDIATE 0x001 # define DISPATCH_HAVE_DIRECT_KNOTES 0 #endif // !DISPATCH_EVENT_BACKEND_KEVENT +// These flags are used by dispatch generic code and +// translated back by the various backends to similar semantics +// hence must be defined even on non Darwin platforms +#ifndef KEVENT_FLAG_IMMEDIATE +# define KEVENT_FLAG_IMMEDIATE 0x001 +#endif + #ifdef EV_UDATA_SPECIFIC # define DISPATCH_EV_DIRECT (EV_UDATA_SPECIFIC|EV_DISPATCH) #else @@ -210,6 +234,10 @@ typedef unsigned int mach_msg_priority_t; # define MACH_SEND_SYNC_OVERRIDE 0x00100000 # endif // MACH_SEND_SYNC_OVERRIDE +# ifndef MACH_MSG_STRICT_REPLY +# define MACH_MSG_STRICT_REPLY 0x00000200 +# endif + # ifndef MACH_RCV_SYNC_WAIT # define MACH_RCV_SYNC_WAIT 0x00004000 # endif // MACH_RCV_SYNC_WAIT diff --git a/src/event/event_epoll.c b/src/event/event_epoll.c index a99eb5dc8..a5c71c710 100644 --- a/src/event/event_epoll.c +++ b/src/event/event_epoll.c @@ -377,13 +377,14 @@ _dispatch_timeout_program(uint32_t tidx, uint64_t target, dispatch_epoll_timeout_t timer = &_dispatch_epoll_timeout[clock]; struct epoll_event ev = { .events = EPOLLONESHOT | EPOLLIN, - .data = { .u32 = timer->det_ident }, + }; int op; if (target >= INT64_MAX && !timer->det_registered) { return; } + ev.data.u32 = timer->det_ident; if (unlikely(timer->det_fd < 0)) { clockid_t clockid; @@ -408,7 +409,7 @@ _dispatch_timeout_program(uint32_t tidx, uint64_t target, if (target < INT64_MAX) { struct itimerspec its = { .it_value = { - .tv_sec = target / NSEC_PER_SEC, + .tv_sec = (time_t)(target / NSEC_PER_SEC), .tv_nsec = target % NSEC_PER_SEC, } }; dispatch_assume_zero(timerfd_settime(timer->det_fd, TFD_TIMER_ABSTIME, diff --git a/src/event/event_internal.h b/src/event/event_internal.h index ff561ced5..d59b303c4 100644 --- a/src/event/event_internal.h +++ b/src/event/event_internal.h @@ -99,7 +99,7 @@ typedef struct dispatch_wlh_s *dispatch_wlh_t; // opaque handle #define DISPATCH_WLH_ANON ((dispatch_wlh_t)(void*)(~0x3ul)) #define DISPATCH_WLH_MANAGER ((dispatch_wlh_t)(void*)(~0x7ul)) -DISPATCH_ENUM(dispatch_unote_timer_flags, uint8_t, +DISPATCH_OPTIONS(dispatch_unote_timer_flags, uint8_t, /* DISPATCH_TIMER_STRICT 0x1 */ /* DISPATCH_TIMER_BACKGROUND = 0x2, */ DISPATCH_TIMER_CLOCK_UPTIME = DISPATCH_CLOCK_UPTIME << 2, @@ -298,6 +298,9 @@ struct dispatch_xpc_term_refs_s { DISPATCH_UNOTE_CLASS_HEADER(); }; typedef struct dispatch_xpc_term_refs_s *dispatch_xpc_term_refs_t; +void _dispatch_sync_ipc_handoff_begin(dispatch_wlh_t wlh, mach_port_t port, + uint64_t _Atomic *addr); +void _dispatch_sync_ipc_handoff_end(dispatch_wlh_t wlh, mach_port_t port); #endif // HAVE_MACH typedef union dispatch_unote_u { @@ -341,6 +344,7 @@ typedef struct dispatch_source_type_s { dispatch_unote_action_t dst_action; uint8_t dst_per_trigger_qos : 1; uint8_t dst_strict : 1; + uint8_t dst_allow_empty_mask : 1; uint8_t dst_timer_flags; uint16_t dst_flags; #if DISPATCH_EVENT_BACKEND_KEVENT @@ -654,7 +658,9 @@ void _dispatch_unote_resume_direct(dispatch_unote_t du); void _dispatch_timer_unote_configure(dispatch_timer_source_refs_t dt); +#if !DISPATCH_EVENT_BACKEND_WINDOWS void _dispatch_event_loop_atfork_child(void); +#endif #define DISPATCH_EVENT_LOOP_CONSUME_2 DISPATCH_WAKEUP_CONSUME_2 #define DISPATCH_EVENT_LOOP_OVERRIDE 0x80000000 void _dispatch_event_loop_poke(dispatch_wlh_t wlh, uint64_t dq_state, diff --git a/src/event/event_kevent.c b/src/event/event_kevent.c index 1e7cdb5bd..ed5ffe0da 100644 --- a/src/event/event_kevent.c +++ b/src/event/event_kevent.c @@ -32,6 +32,8 @@ #define DISPATCH_KEVENT_MUXED_MARKER 1ul #define DISPATCH_MACH_AUDIT_TOKEN_PID (5) +#define dispatch_kevent_udata_t __typeof__(((dispatch_kevent_t)NULL)->udata) + typedef struct dispatch_muxnote_s { LIST_ENTRY(dispatch_muxnote_s) dmn_list; LIST_HEAD(, dispatch_unote_linkage_s) dmn_unotes_head; @@ -44,9 +46,15 @@ DISPATCH_STATIC_GLOBAL(bool _dispatch_timers_force_max_leeway); DISPATCH_STATIC_GLOBAL(dispatch_once_t _dispatch_kq_poll_pred); DISPATCH_STATIC_GLOBAL(struct dispatch_muxnote_bucket_s _dispatch_sources[DSL_HASH_SIZE]); +#if defined(__APPLE__) #define DISPATCH_NOTE_CLOCK_WALL NOTE_NSECONDS | NOTE_MACH_CONTINUOUS_TIME #define DISPATCH_NOTE_CLOCK_MONOTONIC NOTE_MACHTIME | NOTE_MACH_CONTINUOUS_TIME -#define DISPATCH_NOTE_CLOCK_UPTIME NOTE_MACHTIME +#define DISPATCH_NOTE_CLOCK_UPTIME NOTE_MACHTIME +#else +#define DISPATCH_NOTE_CLOCK_WALL 0 +#define DISPATCH_NOTE_CLOCK_MONOTONIC 0 +#define DISPATCH_NOTE_CLOCK_UPTIME 0 +#endif static const uint32_t _dispatch_timer_index_to_fflags[] = { #define DISPATCH_TIMER_FFLAGS_INIT(kind, qos, note) \ @@ -67,6 +75,9 @@ static const uint32_t _dispatch_timer_index_to_fflags[] = { }; static inline void _dispatch_kevent_timer_drain(dispatch_kevent_t ke); +#if DISPATCH_USE_KEVENT_WORKLOOP +static void _dispatch_kevent_workloop_poke_drain(dispatch_kevent_t ke); +#endif #pragma mark - #pragma mark kevent debug @@ -98,6 +109,9 @@ _evfiltstr(short filt) #ifdef EVFILT_MEMORYSTATUS _evfilt2(EVFILT_MEMORYSTATUS); #endif +#if DISPATCH_USE_KEVENT_WORKLOOP + _evfilt2(EVFILT_WORKLOOP); +#endif // DISPATCH_USE_KEVENT_WORKLOOP #endif // DISPATCH_EVENT_BACKEND_KEVENT _evfilt2(DISPATCH_EVFILT_TIMER); @@ -180,17 +194,20 @@ dispatch_kevent_debug(const char *verb, const dispatch_kevent_s *kev, _dispatch_debug("%s kevent[%p] %s= { ident = 0x%llx, filter = %s, " "flags = %s (0x%x), fflags = 0x%x, data = 0x%llx, udata = 0x%llx, " "qos = 0x%x, ext[0] = 0x%llx, ext[1] = 0x%llx, ext[2] = 0x%llx, " - "ext[3] = 0x%llx }: %s #%u", verb, kev, i_n, kev->ident, - _evfiltstr(kev->filter), _evflagstr(kev->flags, flagstr, - sizeof(flagstr)), kev->flags, kev->fflags, kev->data, kev->udata, - kev->qos, kev->ext[0], kev->ext[1], kev->ext[2], kev->ext[3], + "ext[3] = 0x%llx }: %s #%u", verb, kev, i_n, + (unsigned long long)kev->ident, _evfiltstr(kev->filter), + _evflagstr(kev->flags, flagstr, sizeof(flagstr)), kev->flags, kev->fflags, + (unsigned long long)kev->data, (unsigned long long)kev->udata, kev->qos, + kev->ext[0], kev->ext[1], kev->ext[2], kev->ext[3], function, line); #else _dispatch_debug("%s kevent[%p] %s= { ident = 0x%llx, filter = %s, " "flags = %s (0x%x), fflags = 0x%x, data = 0x%llx, udata = 0x%llx}: " "%s #%u", verb, kev, i_n, - kev->ident, _evfiltstr(kev->filter), _evflagstr(kev->flags, flagstr, - sizeof(flagstr)), kev->flags, kev->fflags, kev->data, kev->udata, + (unsigned long long)kev->ident, _evfiltstr(kev->filter), + _evflagstr(kev->flags, flagstr, sizeof(flagstr)), kev->flags, + kev->fflags, (unsigned long long)kev->data, + (unsigned long long)kev->udata, function, line); #endif } @@ -306,6 +323,17 @@ _dispatch_kevent_mach_msg_size(dispatch_kevent_t ke) return (mach_msg_size_t)ke->ext[1]; } +static inline bool +_dispatch_kevent_has_machmsg_rcv_error(dispatch_kevent_t ke) +{ +#define MACH_ERROR_RCV_SUB 0x4 + mach_error_t kr = (mach_error_t) ke->fflags; + return (err_get_system(kr) == err_mach_ipc) && + (err_get_sub(kr) == MACH_ERROR_RCV_SUB); +#undef MACH_ERROR_RCV_SUB +} + +static inline bool _dispatch_kevent_has_machmsg_rcv_error(dispatch_kevent_t ke); static void _dispatch_kevent_mach_msg_drain(dispatch_kevent_t ke); static inline void _dispatch_mach_host_calendar_change_register(void); @@ -328,11 +356,18 @@ _dispatch_kevent_get_muxnote(dispatch_kevent_t ke) return (dispatch_muxnote_t)dmn_addr; } +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_kevent_unote_is_muxed(dispatch_kevent_t ke) +{ + return ((uintptr_t)ke->udata) & DISPATCH_KEVENT_MUXED_MARKER; +} + DISPATCH_ALWAYS_INLINE static dispatch_unote_t _dispatch_kevent_get_unote(dispatch_kevent_t ke) { - dispatch_assert((ke->udata & DISPATCH_KEVENT_MUXED_MARKER) == 0); + dispatch_assert(_dispatch_kevent_unote_is_muxed(ke) == false); return (dispatch_unote_t){ ._du = (dispatch_unote_class_t)ke->udata }; } @@ -352,7 +387,7 @@ _dispatch_kevent_print_error(dispatch_kevent_t ke) } // for EV_DELETE if the update was deferred we may have reclaimed // the udata already, and it is unsafe to dereference it now. - } else if (ke->udata & DISPATCH_KEVENT_MUXED_MARKER) { + } else if (_dispatch_kevent_unote_is_muxed(ke)) { ke->flags |= _dispatch_kevent_get_muxnote(ke)->dmn_kev.flags; } else if (ke->udata) { du = (dispatch_unote_class_t)(uintptr_t)ke->udata; @@ -512,6 +547,11 @@ _dispatch_kevent_drain(dispatch_kevent_t ke) _dispatch_kevent_mgr_debug("received", ke); return; } +#if DISPATCH_USE_KEVENT_WORKLOOP + if (ke->filter == EVFILT_WORKLOOP) { + return _dispatch_kevent_workloop_poke_drain(ke); + } +#endif // DISPATCH_USE_KEVENT_WORKLOOP _dispatch_kevent_debug("received", ke); if (unlikely(ke->flags & EV_ERROR)) { if (ke->filter == EVFILT_PROC && ke->data == ESRCH) { @@ -531,12 +571,13 @@ _dispatch_kevent_drain(dispatch_kevent_t ke) } #if HAVE_MACH - if (ke->filter == EVFILT_MACHPORT && _dispatch_kevent_mach_msg_size(ke)) { + if (ke->filter == EVFILT_MACHPORT && (_dispatch_kevent_mach_msg_size(ke) || + _dispatch_kevent_has_machmsg_rcv_error(ke))) { return _dispatch_kevent_mach_msg_drain(ke); } #endif - if (ke->udata & DISPATCH_KEVENT_MUXED_MARKER) { + if (_dispatch_kevent_unote_is_muxed(ke)) { return _dispatch_kevent_merge_muxed(ke); } return _dispatch_kevent_merge(_dispatch_kevent_get_unote(ke), ke); @@ -553,7 +594,7 @@ _dispatch_kq_create(intptr_t *fd_ptr) .ident = 1, .filter = EVFILT_USER, .flags = EV_ADD|EV_CLEAR, - .udata = (uintptr_t)DISPATCH_WLH_MANAGER, + .udata = (dispatch_kevent_udata_t)DISPATCH_WLH_MANAGER, }; int kqfd; @@ -621,7 +662,7 @@ _dispatch_kq_init(void *context) .filter = EVFILT_USER, .flags = EV_ADD|EV_CLEAR, .qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG, - .udata = (uintptr_t)DISPATCH_WLH_MANAGER, + .udata = (dispatch_kevent_udata_t)DISPATCH_WLH_MANAGER, }; retry: r = kevent_qos(kqfd, &ke, 1, NULL, 0, NULL, NULL, @@ -679,7 +720,7 @@ _dispatch_kq_poll(dispatch_wlh_t wlh, dispatch_kevent_t ke, int n, for (r = 0; r < n; r++) { ke[r].flags |= EV_RECEIPT; } - out_n = n; + n_out = n; } #endif @@ -694,10 +735,20 @@ _dispatch_kq_poll(dispatch_wlh_t wlh, dispatch_kevent_t ke, int n, } r = kevent_qos(kqfd, ke, n, ke_out, n_out, buf, avail, flags); #else + (void)buf; + (void)avail; const struct timespec timeout_immediately = {}, *timeout = NULL; if (flags & KEVENT_FLAG_IMMEDIATE) timeout = &timeout_immediately; r = kevent(kqfd, ke, n, ke_out, n_out, timeout); #endif +#if DISPATCH_USE_KEVENT_WORKLOOP + } else { + flags |= KEVENT_FLAG_WORKLOOP; + if (!(flags & KEVENT_FLAG_ERROR_EVENTS)) { + flags |= KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST; + } + r = kevent_id((uintptr_t)wlh, ke, n, ke_out, n_out, buf, avail, flags); +#endif // DISPATCH_USE_KEVENT_WORKLOOP } if (unlikely(r == -1)) { int err = errno; @@ -709,6 +760,14 @@ _dispatch_kq_poll(dispatch_wlh_t wlh, dispatch_kevent_t ke, int n, goto retry; case EBADF: DISPATCH_CLIENT_CRASH(err, "Do not close random Unix descriptors"); +#if DISPATCH_USE_KEVENT_WORKLOOP + case ENOENT: + if ((flags & KEVENT_FLAG_ERROR_EVENTS) && + (flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST)) { + return 0; + } + /* FALLTHROUGH */ +#endif // DISPATCH_USE_KEVENT_WORKLOOP default: DISPATCH_CLIENT_CRASH(err, "Unexpected error from kevent"); } @@ -730,8 +789,9 @@ _dispatch_kq_drain(dispatch_wlh_t wlh, dispatch_kevent_t ke, int n, #if DISPATCH_USE_KEVENT_QOS size_t size; if (poll_for_events) { - size = DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE + - DISPATCH_MACH_TRAILER_SIZE; + dispatch_assert(DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE + + DISPATCH_MACH_TRAILER_SIZE <= 32 << 10); + size = 32 << 10; // match WQ_KEVENT_DATA_SIZE buf = alloca(size); avail = &size; } @@ -757,6 +817,14 @@ _dispatch_kq_drain(dispatch_wlh_t wlh, dispatch_kevent_t ke, int n, } } } else { +#if DISPATCH_USE_KEVENT_WORKLOOP + if (ke_out[0].flags & EV_ERROR) { + // When kevent returns errors it doesn't process the kqueue + // and doesn't rearm the return-to-kernel notification + // We need to assume we have to go back. + _dispatch_set_return_to_kernel(); + } +#endif // DISPATCH_USE_KEVENT_WORKLOOP for (i = 0, r = 0; i < n; i++) { _dispatch_kevent_drain(&ke_out[i]); } @@ -797,11 +865,11 @@ _dispatch_kq_unote_set_kevent(dispatch_unote_t _du, dispatch_kevent_t dk, .ident = du->du_ident, .filter = dst->dst_filter, .flags = flags, - .udata = (uintptr_t)du, + .udata = (dispatch_kevent_udata_t)du, .fflags = du->du_fflags | dst->dst_fflags, - .data = (typeof(dk->data))dst->dst_data, + .data = (__typeof__(dk->data))dst->dst_data, #if DISPATCH_USE_KEVENT_QOS - .qos = (typeof(dk->qos))_dispatch_priority_to_pp_prefer_fallback( + .qos = (__typeof__(dk->qos))_dispatch_priority_to_pp_prefer_fallback( du->du_priority), #endif }; @@ -810,7 +878,7 @@ _dispatch_kq_unote_set_kevent(dispatch_unote_t _du, dispatch_kevent_t dk, DISPATCH_ALWAYS_INLINE static inline int _dispatch_kq_deferred_find_slot(dispatch_deferred_items_t ddi, - int16_t filter, uint64_t ident, uint64_t udata) + int16_t filter, uint64_t ident, dispatch_kevent_udata_t udata) { dispatch_kevent_t events = ddi->ddi_eventlist; int i; @@ -886,6 +954,49 @@ _dispatch_kq_immediate_update(dispatch_wlh_t wlh, dispatch_kevent_t ke) return _dispatch_kq_update_one(wlh, ke); } +#if HAVE_MACH +void +_dispatch_sync_ipc_handoff_begin(dispatch_wlh_t wlh, mach_port_t port, + uint64_t _Atomic *addr) +{ +#if DISPATCH_USE_WL_SYNC_IPC_HANDOFF + dispatch_kevent_s ke = { + .ident = port, + .filter = EVFILT_WORKLOOP, + .flags = EV_ADD | EV_DISABLE, + .fflags = NOTE_WL_SYNC_IPC | NOTE_WL_IGNORE_ESTALE, + .udata = (uintptr_t)wlh, + .ext[EV_EXTIDX_WL_ADDR] = (uintptr_t)addr, + .ext[EV_EXTIDX_WL_MASK] = ~(uintptr_t)0, + .ext[EV_EXTIDX_WL_VALUE] = (uintptr_t)wlh, + }; + int rc = _dispatch_kq_immediate_update(wlh, &ke); + if (unlikely(rc && rc != ENOENT)) { + DISPATCH_INTERNAL_CRASH(rc, "Unexpected error from kevent"); + } +#else + (void)wlh; (void)port; (void)addr; +#endif // DISPATCH_USE_WL_SYNC_IPC_HANDOFF +} + +void +_dispatch_sync_ipc_handoff_end(dispatch_wlh_t wlh, mach_port_t port) +{ +#if DISPATCH_USE_WL_SYNC_IPC_HANDOFF + dispatch_kevent_s ke = { + .ident = port, + .filter = EVFILT_WORKLOOP, + .flags = EV_ADD | EV_DELETE | EV_ENABLE, + .fflags = NOTE_WL_SYNC_IPC, + .udata = (uintptr_t)wlh, + }; + _dispatch_kq_deferred_update(wlh, &ke); +#else + (void)wlh; (void)port; +#endif // DISPATCH_USE_WL_SYNC_IPC_HANDOFF +} +#endif + DISPATCH_NOINLINE static bool _dispatch_kq_unote_update(dispatch_wlh_t wlh, dispatch_unote_t _du, @@ -905,7 +1016,7 @@ _dispatch_kq_unote_update(dispatch_wlh_t wlh, dispatch_unote_t _du, if (ddi && ddi->ddi_wlh == wlh) { int slot = _dispatch_kq_deferred_find_slot(ddi, - du->du_filter, du->du_ident, (uintptr_t)du); + du->du_filter, du->du_ident, (dispatch_kevent_udata_t)du); if (slot < ddi->ddi_nevents) { // when deleting and an enable is pending, // we must merge EV_ENABLE to do an immediate deletion @@ -995,6 +1106,7 @@ _dispatch_muxnote_find(struct dispatch_muxnote_bucket_s *dmb, return dmn; } +#if HAVE_MACH DISPATCH_ALWAYS_INLINE static inline dispatch_muxnote_t _dispatch_mach_muxnote_find(mach_port_t name, int16_t filter) @@ -1003,6 +1115,7 @@ _dispatch_mach_muxnote_find(mach_port_t name, int16_t filter) dmb = _dispatch_muxnote_bucket(name, filter); return _dispatch_muxnote_find(dmb, name, filter); } +#endif bool _dispatch_unote_register_muxed(dispatch_unote_t du) @@ -1030,7 +1143,8 @@ _dispatch_unote_register_muxed(dispatch_unote_t du) #if DISPATCH_USE_KEVENT_QOS dmn->dmn_kev.qos = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG; #endif - dmn->dmn_kev.udata = (uintptr_t)dmn | DISPATCH_KEVENT_MUXED_MARKER; + dmn->dmn_kev.udata = (dispatch_kevent_udata_t)((uintptr_t)dmn | + DISPATCH_KEVENT_MUXED_MARKER); if (unlikely(dux_type(du._du)->dst_update_mux)) { installed = dux_type(du._du)->dst_update_mux(dmn); } else { @@ -1048,10 +1162,12 @@ _dispatch_unote_register_muxed(dispatch_unote_t du) if (installed) { dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du); LIST_INSERT_HEAD(&dmn->dmn_unotes_head, dul, du_link); +#if HAVE_MACH if (du._du->du_filter == DISPATCH_EVFILT_MACH_NOTIFICATION) { os_atomic_store2o(du._dmsr, dmsr_notification_armed, DISPATCH_MACH_NOTIFICATION_ARMED(dmn), relaxed); } +#endif dul->du_muxnote = dmn; _dispatch_unote_state_set(du, DISPATCH_WLH_ANON, DU_STATE_ARMED); _dispatch_du_debug("installed", du._du); @@ -1080,9 +1196,11 @@ _dispatch_unote_unregister_muxed(dispatch_unote_t du) dispatch_muxnote_t dmn = dul->du_muxnote; bool update = false, dispose = false; +#if HAVE_MACH if (dmn->dmn_kev.filter == DISPATCH_EVFILT_MACH_NOTIFICATION) { os_atomic_store2o(du._dmsr, dmsr_notification_armed, false, relaxed); } +#endif _dispatch_unote_state_set(du, DU_STATE_UNREGISTERED); LIST_REMOVE(dul, du_link); _LIST_TRASH_ENTRY(dul, du_link); @@ -1140,6 +1258,12 @@ _dispatch_unote_unregister_direct(dispatch_unote_t du, uint32_t flags) dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); uint16_t action = EV_DELETE; if (likely(du_wlh != DISPATCH_WLH_ANON && ddi && ddi->ddi_wlh == du_wlh)) { +#if DISPATCH_USE_KEVENT_WORKLOOP + // Workloops are special: event delivery and servicing a workloop + // cannot race because the kernel can reason about these. + // Unregistering from a workloop is always safe and should always + // succeed immediately. +#endif action |= EV_ENABLE; flags |= DUU_DELETE_ACK | DUU_MUST_SUCCEED; } @@ -1214,6 +1338,561 @@ _dispatch_event_loop_atfork_child(void) #endif } +#if DISPATCH_USE_KEVENT_WORKLOOP +#if DISPATCH_WLH_DEBUG +/* + * Debug information for current thread & workloop: + * + * fflags: + * - NOTE_WL_THREAD_REQUEST is set if there is a thread request knote + * - NOTE_WL_SYNC_WAIT is set if there is at least one waiter + * + * ext[0]: 64bit thread ID of the owner if any + * ext[1]: 64bit thread ID of the servicer if any + * ext[2]: number of workloops owned by the caller thread + * + * If this interface is supported by the kernel, the returned error is EBUSY, + * if not it is EINVAL. + */ +static bool +_dispatch_kevent_workloop_get_info(dispatch_wlh_t wlh, dispatch_kevent_t ke) +{ + uint32_t kev_flags = KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_ERROR_EVENTS | + KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST; + *ke = (dispatch_kevent_s){ + .filter = EVFILT_WORKLOOP, + .flags = EV_ADD | EV_ENABLE, + }; + if (_dispatch_kq_poll(wlh, ke, 1, ke, 1, NULL, NULL, kev_flags)) { + dispatch_assert(ke->flags & EV_ERROR); + return ke->data == EBUSY; + } + *ke = (dispatch_kevent_s){ + .flags = EV_ERROR, + .data = ENOENT, + }; + return true; +} +#endif + +DISPATCH_ALWAYS_INLINE +static inline pthread_priority_t +_dispatch_kevent_workloop_priority(dispatch_queue_t dq, int which, + dispatch_qos_t qos) +{ + dispatch_priority_t rq_pri = dq->do_targetq->dq_priority; + if (qos < _dispatch_priority_qos(rq_pri)) { + qos = _dispatch_priority_qos(rq_pri); + } + if (qos == DISPATCH_QOS_UNSPECIFIED) { +#if 0 // we need to understand why this is happening first... + if (which != DISPATCH_WORKLOOP_ASYNC_FROM_SYNC) { + DISPATCH_INTERNAL_CRASH(which, "Should have had a QoS"); + } +#else + (void)which; +#endif + // + // When an enqueue happens right when a barrier ends, + // the barrier that ends may notice the next item before the enqueuer + // has had the time to set the max QoS on the queue. + // + // It is inconvenient to drop this thread request, and this case is rare + // enough that we instead ask for MAINTENANCE to avoid the kernel + // failing with ERANGE. + // + qos = DISPATCH_QOS_MAINTENANCE; + } + pthread_priority_t pp = _dispatch_qos_to_pp(qos); + return pp | (rq_pri & DISPATCH_PRIORITY_FLAG_OVERCOMMIT); +} + +DISPATCH_ALWAYS_INLINE_NDEBUG +static void +_dispatch_kq_fill_workloop_event(dispatch_kevent_t ke, int which, + dispatch_wlh_t wlh, uint64_t dq_state) +{ + dispatch_queue_t dq = (dispatch_queue_t)wlh; + dispatch_qos_t qos = _dq_state_max_qos(dq_state); + pthread_priority_t pp = 0; + uint32_t fflags = 0; + uint64_t mask = 0; + uint16_t action = 0; + + switch (which) { + case DISPATCH_WORKLOOP_ASYNC_FROM_SYNC: + fflags |= NOTE_WL_END_OWNERSHIP; + /* FALLTHROUGH */ + case DISPATCH_WORKLOOP_ASYNC: + case DISPATCH_WORKLOOP_ASYNC_DISCOVER_SYNC: + case DISPATCH_WORKLOOP_ASYNC_QOS_UPDATE: + dispatch_assert(_dq_state_is_base_wlh(dq_state)); + dispatch_assert(_dq_state_is_enqueued_on_target(dq_state)); + action = EV_ADD | EV_ENABLE; + mask |= DISPATCH_QUEUE_ROLE_MASK; + mask |= DISPATCH_QUEUE_ENQUEUED; + mask |= DISPATCH_QUEUE_MAX_QOS_MASK; + if (which == DISPATCH_WORKLOOP_ASYNC_DISCOVER_SYNC) { + dispatch_assert(!_dq_state_in_sync_transfer(dq_state)); + dispatch_assert(_dq_state_drain_locked(dq_state)); + mask |= DISPATCH_QUEUE_SYNC_TRANSFER; + fflags |= NOTE_WL_DISCOVER_OWNER; + } else { + fflags |= NOTE_WL_IGNORE_ESTALE; + } + fflags |= NOTE_WL_UPDATE_QOS; + pp = _dispatch_kevent_workloop_priority(dq, which, qos); + break; + + case DISPATCH_WORKLOOP_ASYNC_LEAVE_FROM_SYNC: + fflags |= NOTE_WL_END_OWNERSHIP; + /* FALLTHROUGH */ + case DISPATCH_WORKLOOP_ASYNC_LEAVE_FROM_TRANSFER: + fflags |= NOTE_WL_IGNORE_ESTALE; + /* FALLTHROUGH */ + case DISPATCH_WORKLOOP_ASYNC_LEAVE: + dispatch_assert(!_dq_state_is_enqueued_on_target(dq_state)); + action = EV_ADD | EV_DELETE | EV_ENABLE; + mask |= DISPATCH_QUEUE_ENQUEUED; + break; + + case DISPATCH_WORKLOOP_ASYNC_FORCE_END_OWNERSHIP: + // 0 is never a valid queue state, so the knote attach will fail due to + // the debounce. However, NOTE_WL_END_OWNERSHIP is always observed even + // when ESTALE is returned, which is the side effect we're after here. + fflags |= NOTE_WL_END_OWNERSHIP; + fflags |= NOTE_WL_IGNORE_ESTALE; + action = EV_ADD | EV_ENABLE; + mask = ~0ull; + dq_state = 0; + pp = _dispatch_kevent_workloop_priority(dq, which, qos); + break; + + case DISPATCH_WORKLOOP_RETARGET: + action = EV_ADD | EV_DELETE | EV_ENABLE; + fflags |= NOTE_WL_END_OWNERSHIP; + break; + + default: + DISPATCH_INTERNAL_CRASH(which, "Invalid transition"); + } + + *ke = (dispatch_kevent_s){ + .ident = (uintptr_t)wlh, + .filter = EVFILT_WORKLOOP, + .flags = action, + .fflags = fflags | NOTE_WL_THREAD_REQUEST, + .qos = (__typeof__(ke->qos))pp, + .udata = (uintptr_t)wlh, + + .ext[EV_EXTIDX_WL_ADDR] = (uintptr_t)&dq->dq_state, + .ext[EV_EXTIDX_WL_MASK] = mask, + .ext[EV_EXTIDX_WL_VALUE] = dq_state, + }; + _dispatch_kevent_wlh_debug(_dispatch_workloop_actions[which], ke); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_kq_fill_ddi_workloop_event(dispatch_deferred_items_t ddi, + int which, dispatch_wlh_t wlh, uint64_t dq_state) +{ + int slot = _dispatch_kq_deferred_find_slot(ddi, EVFILT_WORKLOOP, + (uint64_t)wlh, (uint64_t)wlh); + if (slot == ddi->ddi_nevents) { + dispatch_assert(slot < DISPATCH_DEFERRED_ITEMS_EVENT_COUNT); + ddi->ddi_nevents++; + } + _dispatch_kq_fill_workloop_event(&ddi->ddi_eventlist[slot], + which, wlh, dq_state); +} + +DISPATCH_ALWAYS_INLINE_NDEBUG +static void +_dispatch_kq_fill_workloop_sync_event(dispatch_kevent_t ke, int which, + dispatch_wlh_t wlh, uint64_t dq_state, dispatch_tid tid) +{ + dispatch_queue_t dq = (dispatch_queue_t)wlh; + pthread_priority_t pp = 0; + uint32_t fflags = 0; + uint64_t mask = 0; + uint16_t action = 0; + + switch (which) { + case DISPATCH_WORKLOOP_SYNC_WAIT: + action = EV_ADD | EV_DISABLE; + fflags = NOTE_WL_SYNC_WAIT; + pp = _dispatch_get_priority(); + if (_dispatch_qos_from_pp(pp) == 0) { + pp = _dispatch_qos_to_pp(DISPATCH_QOS_DEFAULT); + } + if (_dq_state_received_sync_wait(dq_state)) { + fflags |= NOTE_WL_DISCOVER_OWNER; + mask = DISPATCH_QUEUE_ROLE_MASK | DISPATCH_QUEUE_RECEIVED_SYNC_WAIT; + } + break; + + case DISPATCH_WORKLOOP_SYNC_FAKE: + action = EV_ADD | EV_DISABLE; + fflags = NOTE_WL_SYNC_WAKE; + break; + + case DISPATCH_WORKLOOP_SYNC_WAKE: + dispatch_assert(_dq_state_drain_locked_by(dq_state, tid)); + action = EV_ADD | EV_DISABLE; + fflags = NOTE_WL_SYNC_WAKE | NOTE_WL_DISCOVER_OWNER; + break; + + case DISPATCH_WORKLOOP_SYNC_END: + action = EV_DELETE | EV_ENABLE; + fflags = NOTE_WL_SYNC_WAKE | NOTE_WL_END_OWNERSHIP; + break; + + default: + DISPATCH_INTERNAL_CRASH(which, "Invalid transition"); + } + + *ke = (dispatch_kevent_s){ + .ident = tid, + .filter = EVFILT_WORKLOOP, + .flags = action, + .fflags = fflags, + .udata = (uintptr_t)wlh, + .qos = (__typeof__(ke->qos))pp, + + .ext[EV_EXTIDX_WL_MASK] = mask, + .ext[EV_EXTIDX_WL_VALUE] = dq_state, + }; + if (fflags & NOTE_WL_DISCOVER_OWNER) { + ke->ext[EV_EXTIDX_WL_ADDR] = (uintptr_t)&dq->dq_state; + } + _dispatch_kevent_wlh_debug(_dispatch_workloop_actions[which], ke); +} + +#define DISPATCH_KEVENT_WORKLOOP_ALLOW_ENOENT 1 +#define DISPATCH_KEVENT_WORKLOOP_ALLOW_ESTALE 2 +#define DISPATCH_KEVENT_WORKLOOP_ALLOW_EINTR 4 + +DISPATCH_ALWAYS_INLINE +static inline int +_dispatch_kevent_workloop_drain_error(dispatch_kevent_t ke, long flags) +{ + int err = (int)ke->data; + + _dispatch_kevent_wlh_debug("received error", ke); + dispatch_assert(ke->flags & EV_ERROR); + // + // Clear the error so that we can use the same struct to redrive as is + // but leave a breadcrumb about the error in xflags for debugging + // + ke->flags &= ~EV_ERROR; + ke->xflags = (uint32_t)err; + ke->data = 0; + + switch (err) { + case EINTR: + if ((flags & DISPATCH_KEVENT_WORKLOOP_ALLOW_EINTR) && + (ke->fflags & NOTE_WL_SYNC_WAIT)) { + return EINTR; + } + break; + case ENOENT: + if ((flags & DISPATCH_KEVENT_WORKLOOP_ALLOW_ENOENT) && + (ke->flags & EV_DELETE) && (ke->fflags & NOTE_WL_SYNC_WAKE) && + (ke->fflags & NOTE_WL_END_OWNERSHIP)) { + // + // When breaking out a waiter because of a retarget, that waiter may + // not have made his wait syscall yet, and we can't really prepost + // an EV_DELETE, so we have to redrive on ENOENT in this case + // + return ENOENT; + } + break; + case ESTALE: + if ((flags & DISPATCH_KEVENT_WORKLOOP_ALLOW_ESTALE) && + !(ke->fflags & NOTE_WL_IGNORE_ESTALE) && + ke->ext[EV_EXTIDX_WL_ADDR] && ke->ext[EV_EXTIDX_WL_MASK]) { + return ESTALE; + } + break; + case ERANGE: + DISPATCH_INTERNAL_CRASH((uintptr_t)ke->qos, "Broken priority"); + case EOWNERDEAD: + DISPATCH_CLIENT_CRASH((uintptr_t)ke->ext[EV_EXTIDX_WL_VALUE], + "Invalid workloop owner, possible memory corruption"); + default: + break; + } + DISPATCH_INTERNAL_CRASH(err, "Unexpected error from kevent"); +} + +DISPATCH_ALWAYS_INLINE +static void +_dispatch_kevent_workloop_stash(dispatch_wlh_t wlh, dispatch_kevent_t ke, + dispatch_deferred_items_t ddi) +{ + dispatch_queue_t dq = (dispatch_queue_t)wlh; + dispatch_assert(!ddi->ddi_stashed_dou._dq); + ddi->ddi_wlh_needs_delete = true; + _dispatch_retain(dq); + ddi->ddi_stashed_rq = upcast(dq->do_targetq)._dgq; + ddi->ddi_stashed_dou._dq = dq; + ddi->ddi_stashed_qos = _dispatch_qos_from_pp((pthread_priority_t)ke->qos); +} + +DISPATCH_ALWAYS_INLINE +static inline int +_dispatch_event_loop_get_action_for_state(uint64_t dq_state) +{ + dispatch_assert(_dq_state_is_base_wlh(dq_state)); + + if (!_dq_state_is_enqueued_on_target(dq_state)) { + return DISPATCH_WORKLOOP_ASYNC_LEAVE; + } + if (!_dq_state_drain_locked(dq_state)) { + return DISPATCH_WORKLOOP_ASYNC; + } + if (!_dq_state_in_sync_transfer(dq_state)) { + return DISPATCH_WORKLOOP_ASYNC_DISCOVER_SYNC; + } + return DISPATCH_WORKLOOP_ASYNC_QOS_UPDATE; +} + +DISPATCH_NOINLINE +static void +_dispatch_kevent_workloop_poke_drain(dispatch_kevent_t ke) +{ + dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); + dispatch_wlh_t wlh = (dispatch_wlh_t)ke->udata; + +#if DISPATCH_USE_WL_SYNC_IPC_HANDOFF + if (ke->fflags & NOTE_WL_SYNC_IPC) { + dispatch_assert((ke->flags & EV_ERROR) && ke->data == ENOENT); + return _dispatch_kevent_wlh_debug("ignoring", ke); + } +#endif // DISPATCH_USE_WL_SYNC_IPC_HANDOFF + + dispatch_assert(ke->fflags & NOTE_WL_THREAD_REQUEST); + if (ke->flags & EV_ERROR) { + uint64_t dq_state = ke->ext[EV_EXTIDX_WL_VALUE]; + + _dispatch_kevent_workloop_drain_error(ke, + DISPATCH_KEVENT_WORKLOOP_ALLOW_ESTALE); + + if (!_dq_state_is_base_wlh(dq_state)) { + dispatch_assert((ke->flags & EV_DELETE) == 0); + // + // A late async request bounced because the queue is no longer + // a workloop. There is a DISPATCH_WORKLOOP_RETARGET transition that + // will take care of deleting the thread request + // + return _dispatch_kevent_wlh_debug("ignoring", ke); + } + + // + // We're draining a failed _dispatch_event_loop_leave_deferred() + // so repeat its logic. + // + int action = _dispatch_event_loop_get_action_for_state(dq_state); + if (action == DISPATCH_WORKLOOP_ASYNC) { + _dispatch_kevent_wlh_debug("retry drain", ke); + return _dispatch_kevent_workloop_stash(wlh, ke, ddi); + } else { + _dispatch_kq_fill_workloop_event(ke, action, wlh, dq_state); + return _dispatch_kq_deferred_update(wlh, ke); + } + } else if (ddi->ddi_wlh_needs_delete) { + // + // we knew about this thread request because we learned about it + // in _dispatch_kevent_workloop_poke_self() while merging another event. + // It has already been accounted for, so just swallow it. + // + return _dispatch_kevent_wlh_debug("ignoring", ke); + } else { + // + // This is a new thread request, it is carrying a +1 reference. + // + _dispatch_kevent_wlh_debug("got drain", ke); + return _dispatch_kevent_workloop_stash(wlh, ke, ddi); + } +} + +static void +_dispatch_kevent_workloop_poke(dispatch_wlh_t wlh, uint64_t dq_state, + uint32_t flags) +{ + uint32_t kev_flags = KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_ERROR_EVENTS; + dispatch_kevent_s ke; + int action; + + dispatch_assert(_dq_state_is_enqueued_on_target(dq_state)); + dispatch_assert(!_dq_state_is_enqueued_on_manager(dq_state)); + action = _dispatch_event_loop_get_action_for_state(dq_state); +override: + _dispatch_kq_fill_workloop_event(&ke, action, wlh, dq_state); + + if (_dispatch_kq_poll(wlh, &ke, 1, &ke, 1, NULL, NULL, kev_flags)) { + _dispatch_kevent_workloop_drain_error(&ke, + DISPATCH_KEVENT_WORKLOOP_ALLOW_ESTALE); + dispatch_assert(action == DISPATCH_WORKLOOP_ASYNC_DISCOVER_SYNC); + dq_state = ke.ext[EV_EXTIDX_WL_VALUE]; + // + // There are 4 things that can cause an ESTALE for DISCOVER_SYNC: + // - the queue role changed, we don't want to redrive + // - the queue is no longer enqueued, we don't want to redrive + // - the max QoS changed, whoever changed it is doing the same + // transition, so we don't need to redrive + // - the DISPATCH_QUEUE_IN_SYNC_TRANFER bit got set + // + // The interesting case is the last one, and will only happen in the + // following chain of events: + // 1. uncontended dispatch_sync() + // 2. contended dispatch_sync() + // 3. contended dispatch_async() + // + // And this code is running because of (3). It is possible that (1) + // hands off to (2) while this call is being made, causing the + // DISPATCH_QUEUE_IN_TRANSFER_SYNC to be set, and we don't need to tell + // the kernel about the owner anymore. However, the async in that case + // will have set a QoS on the queue (since dispatch_sync()s don't but + // dispatch_async()s always do), and we need to redrive to tell it + // to the kernel. + // + if (_dq_state_is_base_wlh(dq_state) && + _dq_state_is_enqueued_on_target(dq_state) && + _dq_state_in_sync_transfer(dq_state)) { + action = DISPATCH_WORKLOOP_ASYNC; + goto override; + } + } + + if (!(flags & DISPATCH_EVENT_LOOP_OVERRIDE)) { + // Consume the reference that kept the workloop valid + // for the duration of the syscall. + return _dispatch_release_tailcall((dispatch_queue_t)wlh); + } + if (flags & DISPATCH_EVENT_LOOP_CONSUME_2) { + return _dispatch_release_2_tailcall((dispatch_queue_t)wlh); + } +} + +DISPATCH_NOINLINE +static void +_dispatch_kevent_workloop_override_self(dispatch_deferred_items_t ddi, + uint64_t dq_state, uint32_t flags) +{ + dispatch_wlh_t wlh = ddi->ddi_wlh; + uint32_t kev_flags = KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_ERROR_EVENTS; + dispatch_kevent_s ke; + // + // The workloop received work from itself that caused an override + // after the drain lock has been taken, just comply and move on. + // + dispatch_assert(ddi->ddi_wlh_needs_delete); + ddi->ddi_wlh_needs_update = false; + + _dispatch_kq_fill_workloop_event(&ke, DISPATCH_WORKLOOP_ASYNC, + wlh, dq_state); + if (_dispatch_kq_poll(wlh, &ke, 1, &ke, 1, NULL, NULL, kev_flags)) { + _dispatch_kevent_workloop_drain_error(&ke, 0); + __builtin_unreachable(); + } + if (flags & DISPATCH_EVENT_LOOP_CONSUME_2) { + return _dispatch_release_2_no_dispose((dispatch_queue_t)wlh); + } +} + +static void +_dispatch_kevent_workloop_poke_self(dispatch_deferred_items_t ddi, + uint64_t dq_state, uint32_t flags) +{ + dispatch_queue_t dq = (dispatch_queue_t)ddi->ddi_wlh; + + if (ddi->ddi_wlh_servicing) { + dispatch_assert(ddi->ddi_wlh_needs_delete); + if (flags & DISPATCH_EVENT_LOOP_OVERRIDE) { + return _dispatch_kevent_workloop_override_self(ddi, dq_state,flags); + } + // + // dx_invoke() wants to re-enqueue itself e.g. because the thread pool + // needs narrowing, or the queue is suspended, or any other reason that + // interrupts the drain. + // + // This is called with a +2 on the queue, a +1 goes to the thread + // request, the other we dispose of. + // + dispatch_assert(!_dq_state_drain_locked(dq_state)); + dispatch_assert(_dq_state_is_enqueued_on_target(dq_state)); + dispatch_assert(flags & DISPATCH_EVENT_LOOP_CONSUME_2); + _dispatch_release_no_dispose(dq); + return _dispatch_event_loop_leave_deferred(ddi, dq_state); + } + + // + // This codepath is only used during the initial phase of merging + // incoming kernel events in _dispatch_workloop_worker_thread, before + // trying to take the drain lock in order to drain the workloop. + // + // Once we have taken the drain lock, wakeups will not reach this codepath + // because ddi->ddi_wlh_servicing will be set. + // + + if (ddi->ddi_wlh_needs_delete) { + // + // We know there is a thread request already (stolen or real). + // However, an event is causing the workloop to be overridden. + // The kernel already has applied the override, so we can + // safely swallow this event, which carries no refcount. + // + dispatch_assert(flags & DISPATCH_EVENT_LOOP_OVERRIDE); + dispatch_assert(ddi->ddi_stashed_dou._dq); + if (flags & DISPATCH_EVENT_LOOP_CONSUME_2) { + return _dispatch_release_2_no_dispose(dq); + } + return; + } + + if (flags & DISPATCH_EVENT_LOOP_OVERRIDE) { + // + // An event delivery is causing an override, but didn't know + // about a thread request yet. However, since we're receving an override + // it means this initial thread request either exists in the kernel + // or is about to be made. + // + // If it is about to be made, it is possible that it will bounce with + // ESTALE, and will not be retried. It means we can't be sure there + // really is or even will be a knote in the kernel for it. + // + // We still want to take over the +1 this thread request carries whether + // it made it (or will make it) to the kernel, and turn it into a +2 + // below. + // + // Overrides we receive in this way are coalesced and acknowleged + // only when we have to do a kevent() call for other reasons. The kernel + // will continue to apply the overrides in question until we acknowledge + // them, so there's no rush. + // + if (flags & DISPATCH_EVENT_LOOP_CONSUME_2) { + _dispatch_release_no_dispose(dq); + } else { + _dispatch_retain(dq); + } + } else { + // + // Merging events causes a thread request to be issued, this means + // the queue is empty in userland and the kernel event is the first + // thing enqueued. Consume the caller's +2. + // + dispatch_assert(flags & DISPATCH_EVENT_LOOP_CONSUME_2); + } + dispatch_assert(!ddi->ddi_stashed_dou._dq); + ddi->ddi_wlh_needs_delete = true; + ddi->ddi_wlh_needs_update = true; + ddi->ddi_stashed_rq = upcast(dq->do_targetq)._dgq; + ddi->ddi_stashed_dou._dq = dq; + ddi->ddi_stashed_qos = _dq_state_max_qos(dq_state); +} +#endif // DISPATCH_USE_KEVENT_WORKLOOP DISPATCH_NOINLINE void @@ -1224,11 +1903,27 @@ _dispatch_event_loop_poke(dispatch_wlh_t wlh, uint64_t dq_state, uint32_t flags) .ident = 1, .filter = EVFILT_USER, .fflags = NOTE_TRIGGER, - .udata = (uintptr_t)DISPATCH_WLH_MANAGER, + .udata = (dispatch_kevent_udata_t)DISPATCH_WLH_MANAGER, }; return _dispatch_kq_deferred_update(DISPATCH_WLH_ANON, &ke); } else if (wlh && wlh != DISPATCH_WLH_ANON) { +#if DISPATCH_USE_KEVENT_WORKLOOP + dispatch_queue_t dq = (dispatch_queue_t)wlh; + dispatch_assert(_dq_state_is_base_wlh(dq_state)); + if (unlikely(_dq_state_is_enqueued_on_manager(dq_state))) { + dispatch_assert(!(flags & DISPATCH_EVENT_LOOP_OVERRIDE)); + dispatch_assert(flags & DISPATCH_EVENT_LOOP_CONSUME_2); + _dispatch_trace_item_push(&_dispatch_mgr_q, dq); + return dx_push(_dispatch_mgr_q._as_dq, dq, 0); + } + dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); + if (ddi && ddi->ddi_wlh == wlh) { + return _dispatch_kevent_workloop_poke_self(ddi, dq_state, flags); + } + return _dispatch_kevent_workloop_poke(wlh, dq_state, flags); +#else (void)dq_state; (void)flags; +#endif // DISPATCH_USE_KEVENT_WORKLOOP } DISPATCH_INTERNAL_CRASH(wlh, "Unsupported wlh configuration"); } @@ -1242,10 +1937,32 @@ _dispatch_event_loop_drain(uint32_t flags) int n; again: +#if DISPATCH_USE_KEVENT_WORKLOOP + if (ddi->ddi_wlh_needs_update) { + // see _dispatch_event_loop_drain() comments about the lazy handling + // of DISPATCH_EVENT_LOOP_OVERRIDE + dispatch_queue_t dq = (dispatch_queue_t)wlh; + uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + + dispatch_assert(ddi->ddi_wlh_needs_delete); + ddi->ddi_wlh_needs_update = false; + _dispatch_kq_fill_ddi_workloop_event(ddi, + DISPATCH_WORKLOOP_ASYNC_QOS_UPDATE, wlh, dq_state); + } +#endif // DISPATCH_USE_KEVENT_WORKLOOP n = ddi->ddi_nevents; ddi->ddi_nevents = 0; _dispatch_kq_drain(wlh, ddi->ddi_eventlist, n, flags); +#if DISPATCH_USE_KEVENT_WORKLOOP + dispatch_workloop_t dwl = _dispatch_wlh_to_workloop(wlh); + if (dwl) { + dispatch_timer_heap_t dth = dwl->dwl_timer_heap; + if (dth && dth[0].dth_dirty_bits) { + _dispatch_event_loop_drain_timers(dth, DISPATCH_TIMER_WLH_COUNT); + } + } +#endif // DISPATCH_USE_KEVENT_WORKLOOP if ((flags & KEVENT_FLAG_IMMEDIATE) && !(flags & KEVENT_FLAG_ERROR_EVENTS) && @@ -1277,38 +1994,238 @@ _dispatch_event_loop_merge(dispatch_kevent_t events, int nevents) _dispatch_event_loop_drain(KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_ERROR_EVENTS); } +#if DISPATCH_USE_KEVENT_WORKLOOP + } else if (dx_metatype((dispatch_queue_t)wlh) == _DISPATCH_WORKLOOP_TYPE) { + dispatch_timer_heap_t dth = ((dispatch_workloop_t)wlh)->dwl_timer_heap; + if (dth && dth[0].dth_dirty_bits) { + _dispatch_event_loop_drain_timers(dth, DISPATCH_TIMER_WLH_COUNT); + } +#endif // DISPATCH_USE_KEVENT_WORKLOOP } } void _dispatch_event_loop_leave_immediate(uint64_t dq_state) { +#if DISPATCH_USE_KEVENT_WORKLOOP + dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); + dispatch_wlh_t wlh = ddi->ddi_wlh; + uint32_t kev_flags = KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_ERROR_EVENTS | + KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST; + dispatch_kevent_s ke; + dispatch_assert(!_dq_state_is_base_wlh(dq_state)); + + // + // A workloop is being retargeted, we need to synchronously destroy + // the thread request as delivering it later would confuse the workloop + // thread into trying to drain this queue as a bottom one. + // + // Doing it synchronously prevents races where the queue is retargeted + // again, and becomes a workloop again + // + dispatch_assert(ddi->ddi_wlh_needs_delete); + ddi->ddi_wlh_needs_delete = false; + ddi->ddi_wlh_needs_update = false; + _dispatch_kq_fill_workloop_event(&ke, + DISPATCH_WORKLOOP_RETARGET, wlh, dq_state); + if (_dispatch_kq_poll(wlh, &ke, 1, &ke, 1, NULL, NULL, kev_flags)) { + _dispatch_kevent_workloop_drain_error(&ke, 0); + __builtin_unreachable(); + } +#else (void)dq_state; +#endif // DISPATCH_USE_KEVENT_WORKLOOP } void _dispatch_event_loop_leave_deferred(dispatch_deferred_items_t ddi, uint64_t dq_state) { +#if DISPATCH_USE_KEVENT_WORKLOOP + int action = _dispatch_event_loop_get_action_for_state(dq_state); + dispatch_assert(ddi->ddi_wlh_needs_delete); + ddi->ddi_wlh_needs_delete = false; + ddi->ddi_wlh_needs_update = false; + _dispatch_kq_fill_ddi_workloop_event(ddi, action, ddi->ddi_wlh, dq_state); +#else (void)ddi; (void)dq_state; +#endif // DISPATCH_USE_KEVENT_WORKLOOP } void _dispatch_event_loop_cancel_waiter(dispatch_sync_context_t dsc) { +#if DISPATCH_USE_KEVENT_WORKLOOP + dispatch_wlh_t wlh = dsc->dc_data; + uint32_t kev_flags = KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_ERROR_EVENTS; + dispatch_kevent_s ke; + + _dispatch_kq_fill_workloop_sync_event(&ke, DISPATCH_WORKLOOP_SYNC_END, + wlh, 0, dsc->dsc_waiter); + if (_dispatch_kq_poll(wlh, &ke, 1, &ke, 1, NULL, NULL, kev_flags)) { + _dispatch_kevent_workloop_drain_error(&ke, dsc->dsc_waiter_needs_cancel ? + 0 : DISPATCH_KEVENT_WORKLOOP_ALLOW_ENOENT); + // + // Our deletion attempt is opportunistic as in most cases we will find + // the matching knote and break the waiter out. + // + // However, if the waiter hasn't had a chance to make the syscall + // to wait yet, we get ENOENT. In this case, pre-post the WAKE, + // and transfer the responsibility to delete the knote to the waiter. + // + dsc->dsc_waiter_needs_cancel = true; + _dispatch_kq_fill_workloop_sync_event(&ke, + DISPATCH_WORKLOOP_SYNC_FAKE, wlh, 0, dsc->dsc_waiter); + if (_dispatch_kq_poll(wlh, &ke, 1, &ke, 1, NULL, NULL, kev_flags)) { + _dispatch_kevent_workloop_drain_error(&ke, 0); + __builtin_unreachable(); + } + } +#else (void)dsc; +#endif // DISPATCH_USE_KEVENT_WORKLOOP } void _dispatch_event_loop_wake_owner(dispatch_sync_context_t dsc, dispatch_wlh_t wlh, uint64_t old_state, uint64_t new_state) { +#if DISPATCH_USE_KEVENT_WORKLOOP + dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); + dispatch_wlh_t waiter_wlh = dsc->dc_data; + uint32_t kev_flags = KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_ERROR_EVENTS; + dispatch_kevent_s ke[3]; + int action, n = 0; + + dispatch_assert(_dq_state_drain_locked_by(new_state, dsc->dsc_waiter)); + + if (wlh != DISPATCH_WLH_ANON && ddi && ddi->ddi_wlh == wlh) { + dispatch_assert(ddi->ddi_wlh_needs_delete); + ddi->ddi_wlh_needs_delete = false; + ddi->ddi_wlh_needs_update = false; + + if (wlh == waiter_wlh) { // async -> sync handoff + dispatch_assert(_dq_state_is_enqueued_on_target(old_state)); + dispatch_assert(!_dq_state_in_sync_transfer(old_state)); + dispatch_assert(_dq_state_in_sync_transfer(new_state)); + + if (_dq_state_is_enqueued_on_target(new_state)) { + action = DISPATCH_WORKLOOP_ASYNC_QOS_UPDATE; + } else { + action = DISPATCH_WORKLOOP_ASYNC_LEAVE_FROM_TRANSFER; + } + _dispatch_kq_fill_ddi_workloop_event(ddi, action, wlh, new_state); + + int slot = _dispatch_kq_deferred_find_slot(ddi, EVFILT_WORKLOOP, + (uint64_t)wlh, dsc->dsc_waiter); + if (slot == ddi->ddi_nevents) { + dispatch_assert(slot < DISPATCH_DEFERRED_ITEMS_EVENT_COUNT); + ddi->ddi_nevents++; + } + _dispatch_kq_fill_workloop_sync_event(&ddi->ddi_eventlist[slot], + DISPATCH_WORKLOOP_SYNC_WAKE, wlh, new_state, dsc->dsc_waiter); + return; + } + } + + if ((old_state ^ new_state) & DISPATCH_QUEUE_ENQUEUED) { + dispatch_assert(_dq_state_is_enqueued_on_target(old_state)); + dispatch_assert(_dq_state_in_sync_transfer(new_state)); + // During the handoff, the waiter noticed there was no work *after* + // that last work item, so we want to kill the thread request while + // there's an owner around to avoid races betwen knote_process() and + // knote_drop() in the kernel. + _dispatch_kq_fill_workloop_event(&ke[n++], + DISPATCH_WORKLOOP_ASYNC_LEAVE_FROM_TRANSFER, wlh, new_state); + } + if (_dq_state_in_sync_transfer(new_state)) { + // Even when waiter_wlh != wlh we can pretend we got woken up + // which is a knote we will be able to delete later with a SYNC_END. + // This allows rectifying incorrect ownership sooner, and also happens + // on resume if the first item is a sync waiter. + _dispatch_kq_fill_workloop_sync_event(&ke[n++], + DISPATCH_WORKLOOP_SYNC_WAKE, wlh, new_state, dsc->dsc_waiter); + } + if (_dq_state_in_sync_transfer(old_state)) { + dispatch_tid tid = _dispatch_tid_self(); + _dispatch_kq_fill_workloop_sync_event(&ke[n++], + DISPATCH_WORKLOOP_SYNC_END, wlh, new_state, tid); + } + // + // Past this call it is not safe to look at `wlh` anymore as the callers + // sometimes borrow the refcount of the waiter which we will wake up. + // + if (_dispatch_kq_poll(wlh, ke, n, ke, n, NULL, NULL, kev_flags)) { + _dispatch_kevent_workloop_drain_error(&ke[0], 0); + __builtin_unreachable(); + } + + if (unlikely(waiter_wlh != DISPATCH_WLH_ANON && waiter_wlh != wlh)) { + _dispatch_bug_deprecated("Changing target queue hierarchy " + "with a dispatch_sync in flight"); + _dispatch_event_loop_cancel_waiter(dsc); + } +#else (void)dsc; (void)wlh; (void)old_state; (void)new_state; +#endif // DISPATCH_USE_KEVENT_WORKLOOP } void _dispatch_event_loop_wait_for_ownership(dispatch_sync_context_t dsc) { +#if DISPATCH_USE_KEVENT_WORKLOOP + dispatch_wlh_t wlh = dsc->dc_data; + dispatch_kevent_s ke[2]; + uint32_t kev_flags = KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_ERROR_EVENTS; + uint64_t dq_state; + int i, n = 0; + + dq_state = os_atomic_load2o((dispatch_queue_t)wlh, dq_state, relaxed); + if (dsc->dsc_wlh_was_first && !_dq_state_drain_locked(dq_state) && + _dq_state_is_enqueued_on_target(dq_state)) { + // + // + // + // When an enqueuer is racing with the servicer draining the item that + // is being enqueued and going away, it is possible for the enqueuer to + // mark an empty queue as enqueued and make a thread request for it. + // + // If then a thread is selected to deliver this event, but doesn't make + // it to userland to take the drain lock, any sync waiter will + // nevertheless have to wait for that servicer to consume the thread + // request, trying to delete it will be no good. This is why + // _dispatch_push_sync_waiter() for workloops will not try to "save + // itself" if the enqueued bit is set. + // + // However, we don't know whether this thread request exists, it may + // have bounced, or still be in the process of being added by a much + // lower priority thread, so we need to drive it once to avoid priority + // inversions. + // + _dispatch_kq_fill_workloop_event(&ke[n++], DISPATCH_WORKLOOP_ASYNC, + wlh, dq_state); + } + +again: + _dispatch_kq_fill_workloop_sync_event(&ke[n++], DISPATCH_WORKLOOP_SYNC_WAIT, + wlh, dq_state, dsc->dsc_waiter); + n = _dispatch_kq_poll(wlh, ke, n, ke, n, NULL, NULL, kev_flags); + for (i = 0; i < n; i++) { + long flags = 0; + if (ke[i].fflags & NOTE_WL_SYNC_WAIT) { + flags = DISPATCH_KEVENT_WORKLOOP_ALLOW_EINTR | + DISPATCH_KEVENT_WORKLOOP_ALLOW_ESTALE; + } + _dispatch_kevent_workloop_drain_error(&ke[i], flags); + } + if (n) { + dispatch_assert(n == 1 && (ke[0].fflags & NOTE_WL_SYNC_WAIT)); + _dispatch_kevent_wlh_debug("restarting", &ke[0]); + dq_state = ke[0].ext[EV_EXTIDX_WL_VALUE]; + n = 0; + goto again; + } +#endif if (dsc->dsc_waiter_needs_cancel) { _dispatch_event_loop_cancel_waiter(dsc); dsc->dsc_waiter_needs_cancel = false; @@ -1322,14 +2239,94 @@ void _dispatch_event_loop_end_ownership(dispatch_wlh_t wlh, uint64_t old_state, uint64_t new_state, uint32_t flags) { +#if DISPATCH_USE_KEVENT_WORKLOOP + uint32_t kev_flags = KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_ERROR_EVENTS; + dispatch_kevent_s ke[2]; + bool needs_forceful_end_ownership = false; + int n = 0; + + dispatch_assert(_dq_state_is_base_wlh(new_state)); + if (_dq_state_is_enqueued_on_target(new_state)) { + _dispatch_kq_fill_workloop_event(&ke[n++], + DISPATCH_WORKLOOP_ASYNC_FROM_SYNC, wlh, new_state); + } else if (_dq_state_is_enqueued_on_target(old_state)) { + // + // Because the thread request knote may not + // have made it, DISPATCH_WORKLOOP_ASYNC_LEAVE_FROM_SYNC may silently + // turn into a no-op. + // + // However, the kernel may know about our ownership anyway, so we need + // to make sure it is forcefully ended. + // + needs_forceful_end_ownership = true; + dispatch_assert(_dq_state_is_suspended(new_state)); + _dispatch_kq_fill_workloop_event(&ke[n++], + DISPATCH_WORKLOOP_ASYNC_LEAVE_FROM_SYNC, wlh, new_state); + } else if (_dq_state_received_sync_wait(old_state)) { + // + // This case happens when the current workloop got waited on by some + // thread calling _dispatch_event_loop_wait_for_ownership. + // + // When the workloop became IDLE, it didn't find the sync waiter + // continuation, didn't have a thread request to cancel either, and so + // we need the kernel to forget about the current thread ownership + // of the workloop. + // + // To forget this ownership, we create a fake WAKE knote that can not + // coalesce with any meaningful one, just so that we can EV_DELETE it + // with the NOTE_WL_END_OWNERSHIP. + // + // This is a gross hack, but this will really only ever happen for + // cases where a sync waiter started to wait on a workloop, but his part + // of the graph got mutated and retargeted onto a different workloop. + // In doing so, that sync waiter has snitched to the kernel about + // ownership, and the workloop he's bogusly waiting on will go through + // this codepath. + // + needs_forceful_end_ownership = true; + } + + if (_dq_state_in_sync_transfer(old_state)) { + dispatch_tid tid = _dispatch_tid_self(); + _dispatch_kq_fill_workloop_sync_event(&ke[n++], + DISPATCH_WORKLOOP_SYNC_END, wlh, new_state, tid); + } else if (needs_forceful_end_ownership) { + kev_flags |= KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST; + _dispatch_kq_fill_workloop_event(&ke[n++], + DISPATCH_WORKLOOP_ASYNC_FORCE_END_OWNERSHIP, wlh, new_state); + } + + if (_dispatch_kq_poll(wlh, ke, n, ke, n, NULL, NULL, kev_flags)) { + _dispatch_kevent_workloop_drain_error(&ke[0], 0); + __builtin_unreachable(); + } + + _dispatch_event_loop_assert_not_owned(wlh); + + int extra_refs = (flags & DISPATCH_EVENT_LOOP_CONSUME_2) ? 2 : 0; + if (_dq_state_is_enqueued_on_target(old_state)) extra_refs++; + if (_dq_state_is_enqueued_on_target(new_state)) extra_refs--; + dispatch_assert(extra_refs >= 0); + if (extra_refs > 0) _dispatch_release_n((dispatch_queue_t)wlh, extra_refs); +#else (void)wlh; (void)old_state; (void)new_state; (void)flags; +#endif // DISPATCH_USE_KEVENT_WORKLOOP } #if DISPATCH_WLH_DEBUG void _dispatch_event_loop_assert_not_owned(dispatch_wlh_t wlh) { +#if DISPATCH_USE_KEVENT_WORKLOOP + if (wlh != DISPATCH_WLH_ANON) { + dispatch_kevent_s ke; + if (_dispatch_kevent_workloop_get_info(wlh, &ke)) { + dispatch_assert(ke.ext[0] != _pthread_threadid_self_np_direct()); + } + } +#else (void)wlh; +#endif // DISPATCH_USE_KEVENT_WORKLOOP } #endif // DISPATCH_WLH_DEBUG @@ -1356,14 +2353,15 @@ _dispatch_event_loop_timer_program(dispatch_timer_heap_t dth, uint32_t tidx, .flags = action | EV_ONESHOT, .fflags = _dispatch_timer_index_to_fflags[tidx], .data = (int64_t)target, - .udata = (uintptr_t)dth, + .udata = (dispatch_kevent_udata_t)dth, #if DISPATCH_HAVE_TIMER_COALESCING .ext[1] = leeway, #endif #if DISPATCH_USE_KEVENT_QOS - .qos = (typeof(ke.qos))pp, + .qos = (__typeof__(ke.qos))pp, #endif }; + (void)leeway; // if !DISPATCH_HAVE_TIMER_COALESCING _dispatch_kq_deferred_update(wlh, &ke); } @@ -1486,6 +2484,9 @@ const dispatch_source_type_s _dispatch_source_type_vfs = { #endif #if HAVE_DECL_VQ_DESIRED_DISK |VQ_DESIRED_DISK +#endif +#if HAVE_DECL_VQ_FREE_SPACE_CHANGE + |VQ_FREE_SPACE_CHANGE #endif , .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, @@ -1528,7 +2529,7 @@ const dispatch_source_type_s _dispatch_source_type_nw_channel = { .dst_kind = "nw_channel", .dst_filter = EVFILT_NW_CHANNEL, .dst_flags = DISPATCH_EV_DIRECT|EV_CLEAR|EV_VANISHED, - .dst_mask = NOTE_FLOW_ADV_UPDATE, + .dst_mask = NOTE_FLOW_ADV_UPDATE|NOTE_CHANNEL_EVENT|NOTE_IF_ADV_UPD, .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, .dst_size = sizeof(struct dispatch_source_refs_s), .dst_strict = false, @@ -1731,6 +2732,18 @@ _dispatch_mach_msg_get_audit_trailer(mach_msg_header_t *hdr) return audit_tlr; } +bool +_dispatch_mach_msg_sender_is_kernel(mach_msg_header_t *hdr) +{ + mach_msg_audit_trailer_t *tlr; + tlr = _dispatch_mach_msg_get_audit_trailer(hdr); + if (!tlr) { + DISPATCH_INTERNAL_CRASH(0, "message received without expected trailer"); + } + + return tlr->msgh_audit.val[DISPATCH_MACH_AUDIT_TOKEN_PID] == 0; +} + DISPATCH_NOINLINE static void _dispatch_mach_notification_merge_msg(dispatch_unote_t du, uint32_t flags, @@ -1739,18 +2752,12 @@ _dispatch_mach_notification_merge_msg(dispatch_unote_t du, uint32_t flags, pthread_priority_t ovr_pp DISPATCH_UNUSED) { mig_reply_error_t reply; - mach_msg_audit_trailer_t *tlr = NULL; dispatch_assert(sizeof(mig_reply_error_t) == sizeof(union __ReplyUnion___dispatch_libdispatch_internal_protocol_subsystem)); dispatch_assert(sizeof(mig_reply_error_t) < DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE); - tlr = _dispatch_mach_msg_get_audit_trailer(hdr); - if (!tlr) { - DISPATCH_INTERNAL_CRASH(0, "message received without expected trailer"); - } if (hdr->msgh_id <= MACH_NOTIFY_LAST && - dispatch_assume_zero(tlr->msgh_audit.val[ - DISPATCH_MACH_AUDIT_TOKEN_PID])) { + !dispatch_assume(_dispatch_mach_msg_sender_is_kernel(hdr))) { mach_msg_destroy(hdr); goto out; } @@ -1869,7 +2876,7 @@ _dispatch_mach_notify_update(dispatch_muxnote_t dmn, uint32_t new_flags, mach_port_mscount_t notify_sync) { mach_port_t previous, port = (mach_port_t)dmn->dmn_kev.ident; - typeof(dmn->dmn_kev.data) prev = dmn->dmn_kev.data; + __typeof__(dmn->dmn_kev.data) prev = dmn->dmn_kev.data; kern_return_t kr, krr = 0; // Update notification registration state. @@ -2051,6 +3058,7 @@ _dispatch_mach_notification_set_armed(dispatch_mach_send_refs_t dmsr) dispatch_muxnote_t dmn = _dispatch_unote_get_linkage(dmsr)->du_muxnote; dispatch_unote_linkage_t dul; if (dmn) { +#if HAVE_MACH os_atomic_store(&DISPATCH_MACH_NOTIFICATION_ARMED(dmn), 1, relaxed); LIST_FOREACH(dul, &dmn->dmn_unotes_head, du_link) { dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul); @@ -2058,6 +3066,7 @@ _dispatch_mach_notification_set_armed(dispatch_mach_send_refs_t dmsr) } _dispatch_debug("machport[0x%08x]: send-possible notification armed", (mach_port_name_t)dmn->dmn_kev.ident); +#endif } } @@ -2159,20 +3168,27 @@ static void _dispatch_kevent_mach_msg_drain(dispatch_kevent_t ke) { mach_msg_header_t *hdr = _dispatch_kevent_mach_msg_buf(ke); + mach_msg_size_t siz = _dispatch_kevent_mach_msg_size(ke); dispatch_unote_t du = _dispatch_kevent_get_unote(ke); pthread_priority_t msg_pp = (pthread_priority_t)(ke->ext[2] >> 32); pthread_priority_t ovr_pp = (pthread_priority_t)ke->qos; uint32_t flags = ke->flags; - mach_msg_size_t siz; mach_msg_return_t kr = (mach_msg_return_t)ke->fflags; - if (unlikely(!hdr)) { - DISPATCH_INTERNAL_CRASH(kr, "EVFILT_MACHPORT with no message"); - } - if (likely(!kr)) { - return _dispatch_kevent_mach_msg_recv(du, flags, hdr, msg_pp, ovr_pp); - } - if (kr != MACH_RCV_TOO_LARGE) { + if (unlikely(kr == MACH_RCV_TOO_LARGE)) { + if (unlikely(!siz)) { + DISPATCH_INTERNAL_CRASH(kr, "EVFILT_MACHPORT with no message size"); + } + } else if (unlikely(kr == MACH_RCV_INVALID_DATA)) { + dispatch_assert(siz == 0); + DISPATCH_CLIENT_CRASH(kr, "Unable to copyout msg, possible port leak"); + } else { + if (unlikely(!hdr)) { + DISPATCH_INTERNAL_CRASH(kr, "EVFILT_MACHPORT with no message"); + } + if (likely(!kr)) { + return _dispatch_kevent_mach_msg_recv(du, flags, hdr, msg_pp, ovr_pp); + } goto out; } @@ -2183,9 +3199,14 @@ _dispatch_kevent_mach_msg_drain(dispatch_kevent_t ke) DISPATCH_INTERNAL_CRASH(ke->ext[1], "EVFILT_MACHPORT with overlarge message"); } + + mach_msg_options_t extra_options = 0; + if (du._du->du_fflags & MACH_MSG_STRICT_REPLY) { + extra_options |= MACH_MSG_STRICT_REPLY; + } const mach_msg_option_t options = ((DISPATCH_MACH_RCV_OPTIONS | - MACH_RCV_TIMEOUT) & ~MACH_RCV_LARGE); - siz = _dispatch_kevent_mach_msg_size(ke) + DISPATCH_MACH_TRAILER_SIZE; + MACH_RCV_TIMEOUT | extra_options) & ~MACH_RCV_LARGE); + siz += DISPATCH_MACH_TRAILER_SIZE; hdr = malloc(siz); // mach_msg will return TOO_LARGE if hdr/siz is NULL/0 kr = mach_msg(hdr, options, 0, dispatch_assume(hdr) ? siz : 0, (mach_port_name_t)ke->data, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); @@ -2215,6 +3236,11 @@ const dispatch_source_type_s _dispatch_source_type_mach_recv = { .dst_filter = EVFILT_MACHPORT, .dst_flags = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_VANISHED, .dst_fflags = 0, + .dst_mask = 0 +#ifdef MACH_RCV_SYNC_PEEK + | MACH_RCV_SYNC_PEEK +#endif + , .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, .dst_size = sizeof(struct dispatch_source_refs_s), .dst_strict = false, @@ -2224,6 +3250,7 @@ const dispatch_source_type_s _dispatch_source_type_mach_recv = { .dst_merge_msg = NULL, // never receives messages directly .dst_per_trigger_qos = true, + .dst_allow_empty_mask = true, }; static void @@ -2237,7 +3264,7 @@ const dispatch_source_type_s _dispatch_mach_type_notification = { .dst_kind = "mach_notification", .dst_filter = EVFILT_MACHPORT, .dst_flags = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_VANISHED, - .dst_fflags = DISPATCH_MACH_RCV_OPTIONS, + .dst_fflags = DISPATCH_MACH_RCV_OPTIONS & ~MACH_RCV_VOUCHER, .dst_action = DISPATCH_UNOTE_ACTION_PASS_FFLAGS, .dst_size = sizeof(struct dispatch_unote_class_s), .dst_strict = false, @@ -2254,7 +3281,7 @@ _dispatch_mach_recv_direct_merge_evt(dispatch_unote_t du, uint32_t flags, uintptr_t data, pthread_priority_t pp) { if (flags & EV_VANISHED) { - DISPATCH_CLIENT_CRASH(du._du->du_ident, + DISPATCH_CLIENT_CRASH(0, "Unexpected EV_VANISHED (do not destroy random mach ports)"); } return _dispatch_source_merge_evt(du, flags, data, pp); @@ -2279,18 +3306,22 @@ const dispatch_source_type_s _dispatch_mach_type_recv = { DISPATCH_NORETURN static void -_dispatch_mach_reply_merge_evt(dispatch_unote_t du, - uint32_t flags DISPATCH_UNUSED, uintptr_t data DISPATCH_UNUSED, +_dispatch_mach_reply_merge_evt(dispatch_unote_t du DISPATCH_UNUSED, + uint32_t flags, uintptr_t data DISPATCH_UNUSED, pthread_priority_t pp DISPATCH_UNUSED) { - DISPATCH_INTERNAL_CRASH(du._du->du_ident, "Unexpected event"); + if (flags & EV_VANISHED) { + DISPATCH_CLIENT_CRASH(0, + "Unexpected EV_VANISHED (do not destroy random mach ports)"); + } + DISPATCH_INTERNAL_CRASH(flags, "Unexpected event"); } const dispatch_source_type_s _dispatch_mach_type_reply = { .dst_kind = "mach reply", .dst_filter = EVFILT_MACHPORT, .dst_flags = EV_UDATA_SPECIFIC|EV_DISPATCH|EV_ONESHOT|EV_VANISHED, - .dst_fflags = DISPATCH_MACH_RCV_OPTIONS, + .dst_fflags = DISPATCH_MACH_RCV_OPTIONS & ~MACH_RCV_VOUCHER, .dst_action = DISPATCH_UNOTE_ACTION_PASS_FFLAGS, .dst_size = sizeof(struct dispatch_mach_reply_refs_s), .dst_strict = false, diff --git a/src/event/event_windows.c b/src/event/event_windows.c new file mode 100644 index 000000000..2fe968071 --- /dev/null +++ b/src/event/event_windows.c @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2018 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include "internal.h" +#if DISPATCH_EVENT_BACKEND_WINDOWS + +#pragma mark dispatch_unote_t + +bool +_dispatch_unote_register(dispatch_unote_t du DISPATCH_UNUSED, + dispatch_wlh_t wlh DISPATCH_UNUSED, + dispatch_priority_t pri DISPATCH_UNUSED) +{ + WIN_PORT_ERROR(); + return false; +} + +void +_dispatch_unote_resume(dispatch_unote_t du DISPATCH_UNUSED) +{ + WIN_PORT_ERROR(); +} + +bool +_dispatch_unote_unregister(dispatch_unote_t du DISPATCH_UNUSED, + uint32_t flags DISPATCH_UNUSED) +{ + WIN_PORT_ERROR(); + return false; +} + +#pragma mark timers + +void +_dispatch_event_loop_timer_arm(uint32_t tidx DISPATCH_UNUSED, + dispatch_timer_delay_s range DISPATCH_UNUSED, + dispatch_clock_now_cache_t nows DISPATCH_UNUSED) +{ + WIN_PORT_ERROR(); +} + +void +_dispatch_event_loop_timer_delete(uint32_t tidx DISPATCH_UNUSED) +{ + WIN_PORT_ERROR(); +} + +#pragma mark dispatch_loop + +void +_dispatch_event_loop_poke(dispatch_wlh_t wlh DISPATCH_UNUSED, + uint64_t dq_state DISPATCH_UNUSED, uint32_t flags DISPATCH_UNUSED) +{ + WIN_PORT_ERROR(); +} + +DISPATCH_NOINLINE +void +_dispatch_event_loop_drain(uint32_t flags DISPATCH_UNUSED) +{ + WIN_PORT_ERROR(); +} + +void +_dispatch_event_loop_wake_owner(dispatch_sync_context_t dsc, + dispatch_wlh_t wlh, uint64_t old_state, uint64_t new_state) +{ + (void)dsc; (void)wlh; (void)old_state; (void)new_state; +} + +void +_dispatch_event_loop_wait_for_ownership(dispatch_sync_context_t dsc) +{ + if (dsc->dsc_release_storage) { + _dispatch_queue_release_storage(dsc->dc_data); + } +} + +void +_dispatch_event_loop_end_ownership(dispatch_wlh_t wlh, uint64_t old_state, + uint64_t new_state, uint32_t flags) +{ + (void)wlh; (void)old_state; (void)new_state; (void)flags; +} + +#if DISPATCH_WLH_DEBUG +void +_dispatch_event_loop_assert_not_owned(dispatch_wlh_t wlh) +{ + (void)wlh; +} +#endif + +void +_dispatch_event_loop_leave_immediate(dispatch_wlh_t wlh, uint64_t dq_state) +{ + (void)wlh; (void)dq_state; +} + +#endif // DISPATCH_EVENT_BACKEND_WINDOWS diff --git a/src/event/workqueue.c b/src/event/workqueue.c index eabeb461e..326c3d936 100644 --- a/src/event/workqueue.c +++ b/src/event/workqueue.c @@ -66,7 +66,9 @@ typedef struct dispatch_workq_monitor_s { int num_registered_tids; } dispatch_workq_monitor_s, *dispatch_workq_monitor_t; +#if HAVE_DISPATCH_WORKQ_MONITORING static dispatch_workq_monitor_s _dispatch_workq_monitors[DISPATCH_QOS_NBUCKETS]; +#endif #pragma mark Implementation of the monitoring subsystem. @@ -93,6 +95,9 @@ _dispatch_workq_worker_register(dispatch_queue_global_t root_q) int worker_id = mon->num_registered_tids++; mon->registered_tids[worker_id] = tid; _dispatch_unfair_lock_unlock(&mon->registered_tid_lock); +#else + (void)root_q; + (void)cls; #endif // HAVE_DISPATCH_WORKQ_MONITORING } @@ -117,6 +122,9 @@ _dispatch_workq_worker_unregister(dispatch_queue_global_t root_q) } } _dispatch_unfair_lock_unlock(&mon->registered_tid_lock); +#else + (void)root_q; + (void)cls; #endif // HAVE_DISPATCH_WORKQ_MONITORING } diff --git a/src/firehose/firehose.defs b/src/firehose/firehose.defs index c968588e1..0f62d3adb 100644 --- a/src/firehose/firehose.defs +++ b/src/firehose/firehose.defs @@ -23,39 +23,45 @@ #include "firehose_types.defs" -subsystem firehose 11600; -serverprefix firehose_server_; -userprefix firehose_send_; +subsystem firehose 11600; +serverprefix firehose_server_; +userprefix firehose_send_; UseSpecialReplyPort 1; +ConsumeOnSendError Timeout; simpleroutine register( - server_port : mach_port_t; - mem_port : mach_port_move_send_t; - mem_size : mach_vm_size_t; - comm_mem_recvp : mach_port_move_receive_t; - comm_io_recvp : mach_port_move_receive_t; - comm_sendp : mach_port_make_send_t; - extra_info_port : mach_port_move_send_t; - extra_info_size : mach_vm_size_t; - ServerAuditToken atoken : audit_token_t + server_port : mach_port_t; + mem_port : mach_port_move_send_t; + mem_size : mach_vm_size_t; + comm_mem_recvp : mach_port_move_receive_t; + comm_io_recvp : mach_port_move_receive_t; + comm_sendp : mach_port_make_send_t; + extra_info_port : mach_port_move_send_t; + extra_info_size : mach_vm_size_t; + ServerAuditToken atoken : audit_token_t ); routine push_and_wait( -RequestPort comm_port : mach_port_t; +RequestPort comm_port : mach_port_t; SReplyPort reply_port : mach_port_make_send_once_t; -out push_reply : firehose_push_reply_t; -out quarantinedOut : boolean_t +out push_reply : firehose_push_reply_t; +out quarantinedOut : boolean_t ); simpleroutine push_async( -RequestPort comm_port : mach_port_t; -in qos_class : qos_class_t; -WaitTime timeout : natural_t +RequestPort comm_port : mach_port_t; +in qos_class : qos_class_t; +WaitTime timeout : natural_t ); routine get_logging_prefs( -RequestPort server_port : mach_port_t; -out mem_port : mach_port_t; -out mem_size : mach_vm_size_t +RequestPort server_port : mach_port_t; +out mem_port : mach_port_t; +out mem_size : mach_vm_size_t +); + +routine should_send_strings( +RequestPort server_port : mach_port_t; +out strings_needed : boolean_t ); diff --git a/src/firehose/firehose_buffer.c b/src/firehose/firehose_buffer.c index 1ba036fb0..db0db5248 100644 --- a/src/firehose/firehose_buffer.c +++ b/src/firehose/firehose_buffer.c @@ -48,7 +48,7 @@ #endif #define _dispatch_wait_until(c) ({ \ - typeof(c) _c; \ + __typeof__(c) _c; \ for (;;) { \ if (likely(_c = (c))) break; \ dispatch_hardware_pause(); \ @@ -602,6 +602,42 @@ firehose_buffer_get_logging_prefs(firehose_buffer_t fb, size_t *length) return (void *)addr; } +bool +firehose_buffer_should_send_strings(firehose_buffer_t fb) +{ + mach_port_t sendp = fb->fb_header.fbh_sendp[FIREHOSE_BUFFER_PUSHPORT_MEM]; + kern_return_t kr; + boolean_t result = false; + + if (unlikely(sendp == MACH_PORT_DEAD)) { + return false; + } + + if (likely(sendp)) { + kr = firehose_send_should_send_strings(sendp, &result); + if (likely(kr == KERN_SUCCESS)) { + return result; + } + if (kr != MACH_SEND_INVALID_DEST) { + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + } + } + + sendp = firehose_client_reconnect(fb, sendp, FIREHOSE_BUFFER_PUSHPORT_MEM); + if (likely(MACH_PORT_VALID(sendp))) { + kr = firehose_send_should_send_strings(sendp, &result); + if (likely(kr == KERN_SUCCESS)) { + return result; + } + if (kr != MACH_SEND_INVALID_DEST) { + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + } + } + return false; +} + OS_NOT_TAIL_CALLED OS_NOINLINE static void firehose_client_send_push_and_wait(firehose_buffer_t fb, bool for_io, @@ -697,14 +733,6 @@ firehose_client_push_notify_async(mach_port_t server_port OS_UNUSED, return KERN_SUCCESS; } -kern_return_t -firehose_client_get_logging_prefs_reply(mach_port_t req_port OS_UNUSED, - mach_port_t mem_port OS_UNUSED, mach_vm_size_t size OS_UNUSED) -{ - DISPATCH_INTERNAL_CRASH(0, "firehose_client_get_logging_prefs_reply " - "should never be sent to the buffer receive port"); -} - #endif // !KERNEL #pragma mark - #pragma mark Buffer handling @@ -835,7 +863,7 @@ firehose_buffer_stream_chunk_install(firehose_buffer_t fb, bool installed = false; firehose_chunk_t fc = firehose_buffer_ref_to_chunk(fb, ref); - if (fc->fc_pos.fcp_atomic_pos) { + if (os_atomic_load(&fc->fc_pos.fcp_atomic_pos, relaxed)) { // Needed for process death handling (recycle-reuse): // No atomic fences required, we merely want to make sure the // observers will see memory effects in program (asm) order. diff --git a/src/firehose/firehose_buffer_internal.h b/src/firehose/firehose_buffer_internal.h index 1f1b730a9..0b91c4953 100644 --- a/src/firehose/firehose_buffer_internal.h +++ b/src/firehose/firehose_buffer_internal.h @@ -243,6 +243,9 @@ firehose_buffer_tracepoint_reserve_slow(firehose_buffer_t fb, void * firehose_buffer_get_logging_prefs(firehose_buffer_t fb, size_t *size); +bool +firehose_buffer_should_send_strings(firehose_buffer_t fb); + void firehose_buffer_update_limits(firehose_buffer_t fb); diff --git a/src/firehose/firehose_inline_internal.h b/src/firehose/firehose_inline_internal.h index 64d865439..bf64cbc11 100644 --- a/src/firehose/firehose_inline_internal.h +++ b/src/firehose/firehose_inline_internal.h @@ -23,7 +23,7 @@ #ifndef _os_atomic_basetypeof #define _os_atomic_basetypeof(p) \ - typeof(atomic_load_explicit(_os_atomic_c11_atomic(p), memory_order_relaxed)) + __typeof__(atomic_load_explicit(_os_atomic_c11_atomic(p), memory_order_relaxed)) #endif #define firehose_atomic_maxv2o(p, f, v, o, m) \ @@ -134,6 +134,7 @@ firehose_mig_server(dispatch_mig_callback_t demux, size_t maxmsgsz, } if (unlikely(rc != KERN_SUCCESS && rc != MIG_NO_REPLY)) { // destroy the request - but not the reply port + // (MIG moved it into the msg_reply). hdr->msgh_remote_port = 0; mach_msg_destroy(hdr); } @@ -210,7 +211,7 @@ firehose_buffer_stream_flush(firehose_buffer_t fb, firehose_stream_t stream) old_state.fss_atomic_state, new_state.fss_atomic_state, relaxed); } -/** +/*! * @function firehose_buffer_tracepoint_reserve * * @abstract @@ -354,7 +355,7 @@ firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp, // firehose_buffer_stream_chunk_install()) __firehose_critical_region_enter(); #if KERNEL - new_state.fss_allocator = (uint32_t)cpu_number(); + new_state.fss_allocator = 1; #else new_state.fss_allocator = _dispatch_lock_value_for_self(); #endif @@ -388,7 +389,7 @@ firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp, return firehose_buffer_tracepoint_reserve_slow(fb, &ask, privptr); } -/** +/*! * @function firehose_buffer_tracepoint_flush * * @abstract diff --git a/src/firehose/firehose_reply.defs b/src/firehose/firehose_reply.defs index 1320ea2fe..b5737c030 100644 --- a/src/firehose/firehose_reply.defs +++ b/src/firehose/firehose_reply.defs @@ -27,26 +27,24 @@ subsystem firehoseReply 11700; serverprefix firehose_client_; userprefix firehose_send_; +ConsumeOnSendError Timeout; skip; // firehose_register simpleroutine push_reply( -RequestPort req_port : mach_port_move_send_once_t; -in ReturnCode : kern_return_t; -in push_reply : firehose_push_reply_t; -in quarantined : boolean_t +RequestPort req_port : mach_port_move_send_once_t; +in ReturnCode : kern_return_t; +in push_reply : firehose_push_reply_t; +in quarantined : boolean_t ); simpleroutine push_notify_async( -RequestPort comm_port : mach_port_t; -in push_reply : firehose_push_reply_t; -in quarantined : boolean_t; -WaitTime timeout : natural_t +RequestPort comm_port : mach_port_t; +in push_reply : firehose_push_reply_t; +in quarantined : boolean_t; +WaitTime timeout : natural_t ); -simpleroutine get_logging_prefs_reply( -RequestPort req_port : mach_port_move_send_once_t; -// no ReturnCode for complex messages -in mem_port : mach_port_t; -in mem_size : mach_vm_size_t -); +skip; // get_logging_prefs_reply + +skip; // should_send_strings diff --git a/src/firehose/firehose_server.c b/src/firehose/firehose_server.c index e870757e0..1422ba7a9 100644 --- a/src/firehose/firehose_server.c +++ b/src/firehose/firehose_server.c @@ -41,6 +41,7 @@ static struct firehose_server_s { dispatch_mach_t fs_mach_channel; dispatch_queue_t fs_snapshot_gate_queue; dispatch_queue_t fs_io_drain_queue; + dispatch_workloop_t fs_io_wl; dispatch_queue_t fs_mem_drain_queue; firehose_handler_t fs_handler; @@ -139,9 +140,9 @@ OS_ALWAYS_INLINE static inline bool firehose_client_wakeup(firehose_client_t fc, bool for_io) { - uintptr_t canceled_bit = FC_STATE_CANCELED(for_io); - uintptr_t enqueued_bit = FC_STATE_ENQUEUED(for_io); - uintptr_t old_state, new_state; + uint16_t canceled_bit = FC_STATE_CANCELED(for_io); + uint16_t enqueued_bit = FC_STATE_ENQUEUED(for_io); + uint16_t old_state, new_state; os_atomic_rmw_loop(&fc->fc_state, old_state, new_state, relaxed, { if (old_state & canceled_bit) { @@ -161,10 +162,10 @@ OS_ALWAYS_INLINE static inline void firehose_client_start_cancel(firehose_client_t fc, bool for_io) { - uintptr_t canceling_bit = FC_STATE_CANCELING(for_io); - uintptr_t canceled_bit = FC_STATE_CANCELED(for_io); - uintptr_t enqueued_bit = FC_STATE_ENQUEUED(for_io); - uintptr_t old_state, new_state; + uint16_t canceling_bit = FC_STATE_CANCELING(for_io); + uint16_t canceled_bit = FC_STATE_CANCELED(for_io); + uint16_t enqueued_bit = FC_STATE_ENQUEUED(for_io); + uint16_t old_state, new_state; os_atomic_rmw_loop(&fc->fc_state, old_state, new_state, relaxed, { if (old_state & (canceled_bit | canceling_bit)) { @@ -180,10 +181,10 @@ OS_ALWAYS_INLINE static inline bool firehose_client_dequeue(firehose_client_t fc, bool for_io) { - uintptr_t canceling_bit = FC_STATE_CANCELING(for_io); - uintptr_t canceled_bit = FC_STATE_CANCELED(for_io); - uintptr_t enqueued_bit = FC_STATE_ENQUEUED(for_io); - uintptr_t old_state, new_state; + uint16_t canceling_bit = FC_STATE_CANCELING(for_io); + uint16_t canceled_bit = FC_STATE_CANCELED(for_io); + uint16_t enqueued_bit = FC_STATE_ENQUEUED(for_io); + uint16_t old_state, new_state; os_atomic_rmw_loop(&fc->fc_state, old_state, new_state, relaxed, { new_state = old_state & ~(canceling_bit | enqueued_bit); @@ -291,6 +292,7 @@ firehose_client_drain_one(firehose_client_t fc, mach_port_t port, uint32_t flags firehose_buffer_t fb = fc->fc_buffer; firehose_chunk_t fbc; firehose_event_t evt; + firehose_snapshot_event_t sevt; uint16_t volatile *fbh_ring; uint16_t flushed, count = 0; firehose_chunk_ref_t ref; @@ -300,6 +302,7 @@ firehose_client_drain_one(firehose_client_t fc, mach_port_t port, uint32_t flags if (for_io) { evt = FIREHOSE_EVENT_IO_BUFFER_RECEIVED; + sevt = FIREHOSE_SNAPSHOT_EVENT_IO_BUFFER; _Static_assert(FIREHOSE_EVENT_IO_BUFFER_RECEIVED == FIREHOSE_SNAPSHOT_EVENT_IO_BUFFER, ""); fbh_ring = fb->fb_header.fbh_io_ring; @@ -308,6 +311,7 @@ firehose_client_drain_one(firehose_client_t fc, mach_port_t port, uint32_t flags if (fc->fc_needs_io_snapshot) snapshot = server_config.fs_snapshot; } else { evt = FIREHOSE_EVENT_MEM_BUFFER_RECEIVED; + sevt = FIREHOSE_SNAPSHOT_EVENT_MEM_BUFFER; _Static_assert(FIREHOSE_EVENT_MEM_BUFFER_RECEIVED == FIREHOSE_SNAPSHOT_EVENT_MEM_BUFFER, ""); fbh_ring = fb->fb_header.fbh_mem_ring; @@ -346,15 +350,16 @@ firehose_client_drain_one(firehose_client_t fc, mach_port_t port, uint32_t flags } fbc = firehose_buffer_ref_to_chunk(fb, ref); - if (fbc->fc_pos.fcp_stream == firehose_stream_metadata) { + firehose_chunk_pos_u fc_pos = fbc->fc_pos; + if (fc_pos.fcp_stream == firehose_stream_metadata) { // serialize with firehose_client_metadata_stream_peek os_unfair_lock_lock(&fc->fc_lock); } - server_config.fs_handler(fc, evt, fbc); + server_config.fs_handler(fc, evt, fbc, fc_pos); if (unlikely(snapshot)) { - snapshot->handler(fc, evt, fbc); + snapshot->handler(fc, sevt, fbc, fc_pos); } - if (fbc->fc_pos.fcp_stream == firehose_stream_metadata) { + if (fc_pos.fcp_stream == firehose_stream_metadata) { os_unfair_lock_unlock(&fc->fc_lock); } // clients not using notifications (single threaded) always drain fully @@ -475,9 +480,10 @@ firehose_client_finalize(firehose_client_t fc OS_OBJECT_CONSUMED) } if (fc->fc_memory_corrupted) { server_config.fs_handler(fc, FIREHOSE_EVENT_CLIENT_CORRUPTED, - &fb->fb_chunks[0]); + &fb->fb_chunks[0], (firehose_chunk_pos_u){ .fcp_pos = 0 }); } - server_config.fs_handler(fc, FIREHOSE_EVENT_CLIENT_DIED, NULL); + server_config.fs_handler(fc, FIREHOSE_EVENT_CLIENT_DIED, NULL, + (firehose_chunk_pos_u){ .fcp_pos = 0 }); fs_clients_lock(); TAILQ_REMOVE(&server_config.fs_clients, fc, fc_entry); @@ -489,7 +495,7 @@ firehose_client_finalize(firehose_client_t fc OS_OBJECT_CONSUMED) } fc->fc_entry.tqe_next = DISPATCH_OBJECT_LISTLESS; fc->fc_entry.tqe_prev = DISPATCH_OBJECT_LISTLESS; - _os_object_release(&fc->fc_as_os_object); + _os_object_release_without_xref_dispose(&fc->fc_as_os_object); } OS_NOINLINE @@ -545,7 +551,8 @@ firehose_client_handle_death(void *ctxt) while (bitmap) { firehose_chunk_ref_t ref = firehose_bitmap_first_set(bitmap); firehose_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); - uint16_t fbc_length = fbc->fc_pos.fcp_next_entry_offs; + firehose_chunk_pos_u fc_pos = fbc->fc_pos; + uint16_t fbc_length = fc_pos.fcp_next_entry_offs; bitmap &= ~(1ULL << ref); if (fbc->fc_start + fbc_length <= fbc->fc_data) { @@ -558,13 +565,15 @@ firehose_client_handle_death(void *ctxt) // so also just ditch it continue; } - if (!fbc->fc_pos.fcp_flag_io) { + if (!fc_pos.fcp_flag_io) { mem_bitmap |= 1ULL << ref; continue; } - server_config.fs_handler(fc, FIREHOSE_EVENT_IO_BUFFER_RECEIVED, fbc); + server_config.fs_handler(fc, FIREHOSE_EVENT_IO_BUFFER_RECEIVED, fbc, + fc_pos); if (fc->fc_needs_io_snapshot) { - snapshot->handler(fc, FIREHOSE_SNAPSHOT_EVENT_IO_BUFFER, fbc); + snapshot->handler(fc, FIREHOSE_SNAPSHOT_EVENT_IO_BUFFER, fbc, + fc_pos); } } @@ -578,11 +587,14 @@ firehose_client_handle_death(void *ctxt) while (mem_bitmap_copy) { firehose_chunk_ref_t ref = firehose_bitmap_first_set(mem_bitmap_copy); firehose_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); + firehose_chunk_pos_u fc_pos = fbc->fc_pos; mem_bitmap_copy &= ~(1ULL << ref); - server_config.fs_handler(fc, FIREHOSE_EVENT_MEM_BUFFER_RECEIVED, fbc); + server_config.fs_handler(fc, FIREHOSE_EVENT_MEM_BUFFER_RECEIVED, + fbc, fc_pos); if (fc->fc_needs_mem_snapshot) { - snapshot->handler(fc, FIREHOSE_SNAPSHOT_EVENT_MEM_BUFFER, fbc); + snapshot->handler(fc, FIREHOSE_SNAPSHOT_EVENT_MEM_BUFFER, + fbc, fc_pos); } } @@ -606,15 +618,15 @@ firehose_client_handle_mach_event(void *ctx, dispatch_mach_reason_t reason, break; } - msg_hdr = dispatch_mach_msg_get_msg(dmsg, NULL); - if (msg_hdr->msgh_id == MACH_NOTIFY_NO_SENDERS) { - _dispatch_debug("FIREHOSE NO_SENDERS (unique_pid: 0x%llx)", - firehose_client_get_unique_pid(fc, NULL)); - for (int i = 0; i < FIREHOSE_BUFFER_NPUSHPORTS; i++) { - dispatch_mach_cancel(fc->fc_mach_channel[i]); - } + mach_msg_destroy(dispatch_mach_msg_get_msg(dmsg, NULL)); + break; + + case DISPATCH_MACH_NO_SENDERS: + _dispatch_debug("FIREHOSE NO_SENDERS (unique_pid: 0x%llx)", + firehose_client_get_unique_pid(fc, NULL)); + for (int i = 0; i < FIREHOSE_BUFFER_NPUSHPORTS; i++) { + dispatch_mach_cancel(fc->fc_mach_channel[i]); } - mach_msg_destroy(msg_hdr); break; case DISPATCH_MACH_DISCONNECTED: @@ -650,6 +662,8 @@ firehose_client_handle_mach_event(void *ctx, dispatch_mach_reason_t reason, firehose_client_cancel(fc); } break; + default: + break; } } @@ -677,7 +691,8 @@ firehose_client_resume(firehose_client_t fc, TAILQ_INSERT_TAIL(&server_config.fs_clients, fc, fc_entry); fs_clients_unlock(); - server_config.fs_handler(fc, FIREHOSE_EVENT_CLIENT_CONNECTED, (void *)fcci); + server_config.fs_handler(fc, FIREHOSE_EVENT_CLIENT_CONNECTED, (void *)fcci, + (firehose_chunk_pos_u){ .fcp_pos = 0 }); if (!fc->fc_pid) { dispatch_activate(fc->fc_kernel_source); } else { @@ -814,14 +829,8 @@ _firehose_client_dispose(firehose_client_t fc) vm_deallocate(mach_task_self(), (vm_address_t)fc->fc_buffer, sizeof(*fc->fc_buffer)); fc->fc_buffer = NULL; - server_config.fs_handler(fc, FIREHOSE_EVENT_CLIENT_FINALIZE, NULL); -} - -void -_firehose_client_xref_dispose(firehose_client_t fc) -{ - _dispatch_debug("Cleaning up client info for unique_pid 0x%llx", - firehose_client_get_unique_pid(fc, NULL)); + server_config.fs_handler(fc, FIREHOSE_EVENT_CLIENT_FINALIZE, NULL, + (firehose_chunk_pos_u){ .fcp_pos = 0 }); } uint64_t @@ -860,6 +869,12 @@ firehose_client_get_context(firehose_client_t fc) return os_atomic_load2o(fc, fc_ctxt, relaxed); } +void +firehose_client_set_strings_cached(firehose_client_t fc) +{ + fc->fc_strings_cached = true; +} + void * firehose_client_set_context(firehose_client_t fc, void *ctxt) { @@ -893,7 +908,8 @@ firehose_server_init(mach_port_t comm_port, firehose_handler_t handler) { struct firehose_server_s *fs = &server_config; dispatch_queue_attr_t attr = DISPATCH_QUEUE_SERIAL_WITH_AUTORELEASE_POOL; - dispatch_queue_attr_t attr_inactive, attr_utility_inactive; + dispatch_queue_attr_t attr_inactive = + dispatch_queue_attr_make_initially_inactive(attr); dispatch_mach_t dm; dispatch_source_t ds; @@ -903,14 +919,12 @@ firehose_server_init(mach_port_t comm_port, firehose_handler_t handler) fs->fs_snapshot_gate_queue = dispatch_queue_create_with_target( "com.apple.firehose.snapshot-gate", attr, NULL); - attr_inactive = dispatch_queue_attr_make_initially_inactive(attr); - attr_utility_inactive = dispatch_queue_attr_make_with_qos_class( - attr_inactive, QOS_CLASS_UTILITY, 0); + fs->fs_io_wl = dispatch_workloop_create_inactive("com.apple.firehose.io-wl"); + dispatch_set_qos_class_fallback(fs->fs_io_wl, QOS_CLASS_UTILITY); + dispatch_activate(fs->fs_io_wl); fs->fs_io_drain_queue = dispatch_queue_create_with_target( - "com.apple.firehose.drain-io", attr_utility_inactive, NULL); - dispatch_set_qos_class_fallback(fs->fs_io_drain_queue, QOS_CLASS_UTILITY); - dispatch_activate(fs->fs_io_drain_queue); + "com.apple.firehose.drain-io", attr, (dispatch_queue_t)fs->fs_io_wl); fs->fs_mem_drain_queue = dispatch_queue_create_with_target( "com.apple.firehose.drain-mem", attr_inactive, NULL); @@ -1040,6 +1054,9 @@ firehose_server_copy_queue(firehose_server_queue_t which) case FIREHOSE_SERVER_QUEUE_MEMORY: dq = server_config.fs_mem_drain_queue; break; + case FIREHOSE_SERVER_QUEUE_IO_WL: + dq = (dispatch_queue_t)server_config.fs_io_wl; + break; default: DISPATCH_INTERNAL_CRASH(which, "Invalid firehose server queue type"); } @@ -1169,7 +1186,8 @@ firehose_client_snapshot_finish(firehose_client_t fc, while (bitmap) { firehose_chunk_ref_t ref = firehose_bitmap_first_set(bitmap); firehose_chunk_t fbc = firehose_buffer_ref_to_chunk(fb, ref); - uint16_t fbc_length = fbc->fc_pos.fcp_next_entry_offs; + firehose_chunk_pos_u fc_pos = fbc->fc_pos; + uint16_t fbc_length = fc_pos.fcp_next_entry_offs; bitmap &= ~(1ULL << ref); if (fbc->fc_start + fbc_length <= fbc->fc_data) { @@ -1182,10 +1200,10 @@ firehose_client_snapshot_finish(firehose_client_t fc, // so also just ditch it continue; } - if (fbc->fc_pos.fcp_flag_io != for_io) { + if (fc_pos.fcp_flag_io != for_io) { continue; } - snapshot->handler(fc, evt, fbc); + snapshot->handler(fc, evt, fbc, fc_pos); } } @@ -1229,7 +1247,8 @@ firehose_snapshot_finish(void *ctxt) { firehose_snapshot_t fs = ctxt; - fs->handler(NULL, FIREHOSE_SNAPSHOT_EVENT_COMPLETE, NULL); + fs->handler(NULL, FIREHOSE_SNAPSHOT_EVENT_COMPLETE, NULL, + (firehose_chunk_pos_u){ .fcp_pos = 0 }); server_config.fs_snapshot = NULL; dispatch_release(fs->fs_group); @@ -1253,14 +1272,16 @@ firehose_snapshot_gate(void *ctxt) dispatch_group_async(fs->fs_group, server_config.fs_mem_drain_queue, ^{ // start the fs_mem_snapshot, this is what triggers the snapshot // logic from _drain() or handle_death() - fs->handler(NULL, FIREHOSE_SNAPSHOT_EVENT_MEM_START, NULL); + fs->handler(NULL, FIREHOSE_SNAPSHOT_EVENT_MEM_START, NULL, + (firehose_chunk_pos_u){ .fcp_pos = 0 }); firehose_snapshot_tickle_clients(fs, false); dispatch_group_async(fs->fs_group, server_config.fs_io_drain_queue, ^{ // start the fs_io_snapshot, this is what triggers the snapshot // logic from _drain() or handle_death() // 29868879: must always happen after the memory snapshot started - fs->handler(NULL, FIREHOSE_SNAPSHOT_EVENT_IO_START, NULL); + fs->handler(NULL, FIREHOSE_SNAPSHOT_EVENT_IO_START, NULL, + (firehose_chunk_pos_u){ .fcp_pos = 0 }); firehose_snapshot_tickle_clients(fs, true); #if !TARGET_OS_SIMULATOR @@ -1306,23 +1327,14 @@ firehose_server_register(mach_port_t server_port OS_UNUSED, .fcci_version = FIREHOSE_CLIENT_CONNECTED_INFO_VERSION, }; - if (mem_size != sizeof(union firehose_buffer_u)) { - return KERN_INVALID_VALUE; + fc = dispatch_mach_mig_demux_get_context(); + if (fc != NULL) { + return KERN_FAILURE; } - /* - * Request a MACH_NOTIFY_NO_SENDERS notification for the mem_recvp. That - * should indicate the client going away. - */ - mach_port_t previous = MACH_PORT_NULL; - kr = mach_port_request_notification(mach_task_self(), comm_mem_recvp, - MACH_NOTIFY_NO_SENDERS, 0, comm_mem_recvp, - MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous); - DISPATCH_VERIFY_MIG(kr); - if (dispatch_assume_zero(kr)) { - return KERN_FAILURE; + if (mem_size != sizeof(union firehose_buffer_u)) { + return KERN_INVALID_VALUE; } - dispatch_assert(previous == MACH_PORT_NULL); /* Map the memory handle into the server address space */ kr = mach_vm_map(mach_task_self(), &base_addr, mem_size, 0, @@ -1354,6 +1366,12 @@ firehose_server_register(mach_port_t server_port OS_UNUSED, fc = firehose_client_create((firehose_buffer_t)base_addr, (firehose_token_t)&atoken, comm_mem_recvp, comm_io_recvp, comm_sendp); + /* + * Request a no senders notification for the memory channel. + * That should indicate the client going away. + */ + dispatch_mach_request_no_senders( + fc->fc_mach_channel[FIREHOSE_BUFFER_PUSHPORT_MEM]); firehose_client_resume(fc, &fcci); if (fcci.fcci_size) { @@ -1369,6 +1387,11 @@ firehose_server_push_async(mach_port_t server_port, qos_class_t qos DISPATCH_UNUSED) { firehose_client_t fc = dispatch_mach_mig_demux_get_context(); + + if (fc == NULL) { + return KERN_FAILURE; + } + bool for_io = (server_port == fc->fc_recvp[FIREHOSE_BUFFER_PUSHPORT_IO]); _dispatch_debug("FIREHOSE_PUSH_ASYNC (unique_pid %llx)", @@ -1385,6 +1408,11 @@ firehose_server_push_and_wait(mach_port_t server_port, boolean_t *quarantinedOut OS_UNUSED) { firehose_client_t fc = dispatch_mach_mig_demux_get_context(); + + if (fc == NULL) { + return KERN_FAILURE; + } + bool for_io = (server_port == fc->fc_recvp[FIREHOSE_BUFFER_PUSHPORT_IO]); _dispatch_debug("FIREHOSE_PUSH (unique_pid %llx)", @@ -1417,3 +1445,15 @@ firehose_server_get_logging_prefs(mach_port_t server_port OS_UNUSED, *prefs_size = (mach_vm_size_t)server_config.fs_prefs_cache_size; return KERN_SUCCESS; } + +kern_return_t +firehose_server_should_send_strings(mach_port_t server_port OS_UNUSED, + boolean_t *needs_strings) +{ + firehose_client_t fc = dispatch_mach_mig_demux_get_context(); + if (fc) { + *needs_strings = !fc->fc_strings_cached; + return KERN_SUCCESS; + } + return KERN_FAILURE; +} diff --git a/src/firehose/firehose_server_internal.h b/src/firehose/firehose_server_internal.h index 106b7a135..daba772b5 100644 --- a/src/firehose/firehose_server_internal.h +++ b/src/firehose/firehose_server_internal.h @@ -44,15 +44,15 @@ struct firehose_client_s { uint64_t volatile fc_io_sent_flushed_pos; uint64_t volatile fc_io_flushed_pos; -#define FC_STATE_ENQUEUED(for_io) (0x0001u << (for_io)) +#define FC_STATE_ENQUEUED(for_io) (uint16_t)(0x0001u << (for_io)) #define FC_STATE_MEM_ENQUEUED 0x0001 #define FC_STATE_IO_ENQUEUED 0x0002 -#define FC_STATE_CANCELING(for_io) (0x0010u << (for_io)) +#define FC_STATE_CANCELING(for_io) (uint16_t)(0x0010u << (for_io)) #define FC_STATE_MEM_CANCELING 0x0010 #define FC_STATE_IO_CANCELING 0x0020 -#define FC_STATE_CANCELED(for_io) (0x0100u << (for_io)) +#define FC_STATE_CANCELED(for_io) (uint16_t)(0x0100u << (for_io)) #define FC_STATE_MEM_CANCELED 0x0100 #define FC_STATE_IO_CANCELED 0x0200 #define FC_STATE_CANCELED_MASK 0x0300 @@ -73,14 +73,13 @@ struct firehose_client_s { os_atomic(uint8_t) fc_mach_channel_refcnt; // These bits are mutated from different locking domains, and so cannot be // safely consolidated into a bit-field. + bool volatile fc_strings_cached; bool volatile fc_memory_corrupted; bool volatile fc_needs_io_snapshot; bool volatile fc_needs_mem_snapshot; bool volatile fc_quarantined; -}; +} DISPATCH_ATOMIC64_ALIGN; -void -_firehose_client_xref_dispose(struct firehose_client_s *fc); void _firehose_client_dispose(struct firehose_client_s *fc); diff --git a/src/firehose/firehose_server_object.m b/src/firehose/firehose_server_object.m index 6965ca0f5..c5243c149 100644 --- a/src/firehose/firehose_server_object.m +++ b/src/firehose/firehose_server_object.m @@ -24,20 +24,15 @@ #error the firehose server requires the objc-runtime, no ARC #endif +OS_OBJECT_NONLAZY_CLASS @implementation OS_OBJECT_CLASS(firehose_client) +OS_OBJECT_NONLAZY_CLASS_LOAD DISPATCH_UNAVAILABLE_INIT() -+ (void)load { } -- (void)_xref_dispose -{ - _firehose_client_xref_dispose((struct firehose_client_s *)self); - [super _xref_dispose]; -} - -- (void)_dispose +- (void)dealloc { _firehose_client_dispose((struct firehose_client_s *)self); - [super _dispose]; + [super dealloc]; } - (NSString *)debugDescription diff --git a/src/firehose/firehose_types.defs b/src/firehose/firehose_types.defs index 9462fd808..56f60957b 100644 --- a/src/firehose/firehose_types.defs +++ b/src/firehose/firehose_types.defs @@ -21,6 +21,7 @@ #include #include +import ; import ; import ; diff --git a/src/init.c b/src/init.c index c7d869961..abaf55d26 100644 --- a/src/init.c +++ b/src/init.c @@ -32,13 +32,12 @@ #pragma mark - #pragma mark dispatch_init - #if USE_LIBDISPATCH_INIT_CONSTRUCTOR DISPATCH_NOTHROW __attribute__((constructor)) void _libdispatch_init(void); -DISPATCH_EXPORT DISPATCH_NOTHROW +DISPATCH_NOTHROW void _libdispatch_init(void) { @@ -46,6 +45,7 @@ _libdispatch_init(void) } #endif +#if !defined(_WIN32) DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_prepare(void) @@ -97,6 +97,7 @@ _dispatch_sigmask(void) r |= pthread_sigmask(SIG_BLOCK, &mask, NULL); return dispatch_assume_zero(r); } +#endif #pragma mark - #pragma mark dispatch_globals @@ -111,8 +112,12 @@ void (*_dispatch_end_NSAutoReleasePool)(void *); #endif #if DISPATCH_USE_THREAD_LOCAL_STORAGE -__thread struct dispatch_tsd __dispatch_tsd; +_Thread_local struct dispatch_tsd __dispatch_tsd; +#if defined(_WIN32) +DWORD __dispatch_tsd_key; +#else pthread_key_t __dispatch_tsd_key; +#endif #elif !DISPATCH_USE_DIRECT_TSD pthread_key_t dispatch_queue_key; pthread_key_t dispatch_frame_key; @@ -437,7 +442,7 @@ _dispatch_queue_attr_to_info(dispatch_queue_attr_t dqa) dqai.dqai_concurrent = !(idx % DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT); idx /= DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT; - dqai.dqai_relpri = -(idx % DISPATCH_QUEUE_ATTR_PRIO_COUNT); + dqai.dqai_relpri = -(int)(idx % DISPATCH_QUEUE_ATTR_PRIO_COUNT); idx /= DISPATCH_QUEUE_ATTR_PRIO_COUNT; dqai.dqai_qos = idx % DISPATCH_QUEUE_ATTR_QOS_COUNT; @@ -628,8 +633,7 @@ DISPATCH_VTABLE_INSTANCE(disk, DISPATCH_NOINLINE static void -_dispatch_queue_no_activate(dispatch_queue_class_t dqu, - DISPATCH_UNUSED bool *allow_resume) +_dispatch_queue_no_activate(dispatch_queue_class_t dqu) { DISPATCH_INTERNAL_CRASH(dx_type(dqu._dq), "dq_activate called"); } @@ -751,6 +755,17 @@ DISPATCH_VTABLE_INSTANCE(source, .dq_push = _dispatch_lane_push, ); +DISPATCH_VTABLE_INSTANCE(channel, + .do_type = DISPATCH_CHANNEL_TYPE, + .do_dispose = _dispatch_channel_dispose, + .do_debug = _dispatch_channel_debug, + .do_invoke = _dispatch_channel_invoke, + + .dq_activate = _dispatch_lane_activate, + .dq_wakeup = _dispatch_channel_wakeup, + .dq_push = _dispatch_lane_push, +); + #if HAVE_MACH DISPATCH_VTABLE_INSTANCE(mach, .do_type = DISPATCH_MACH_CHANNEL_TYPE, @@ -1059,26 +1074,57 @@ _dispatch_logv_init(void *context DISPATCH_UNUSED) log_to_file = true; } else if (strcmp(e, "stderr") == 0) { log_to_file = true; +#if defined(_WIN32) + dispatch_logfile = _fileno(stderr); +#else dispatch_logfile = STDERR_FILENO; +#endif } } if (!dispatch_log_disabled) { if (log_to_file && dispatch_logfile == -1) { +#if defined(_WIN32) + char path[MAX_PATH + 1] = {0}; + DWORD dwLength = GetTempPathA(MAX_PATH, path); + dispatch_assert(dwLength <= MAX_PATH + 1); + snprintf(&path[dwLength], MAX_PATH - dwLength, "libdispatch.%d.log", + GetCurrentProcessId()); + dispatch_logfile = _open(path, O_WRONLY | O_APPEND | O_CREAT, 0666); +#else char path[PATH_MAX]; snprintf(path, sizeof(path), "/var/tmp/libdispatch.%d.log", getpid()); dispatch_logfile = open(path, O_WRONLY | O_APPEND | O_CREAT | O_NOFOLLOW | O_CLOEXEC, 0666); +#endif } if (dispatch_logfile != -1) { struct timeval tv; +#if defined(_WIN32) + DWORD dwTime = GetTickCount(); + tv.tv_sec = dwTime / 1000; + tv.tv_usec = 1000 * (dwTime % 1000); +#else gettimeofday(&tv, NULL); +#endif #if DISPATCH_DEBUG dispatch_log_basetime = _dispatch_uptime(); #endif +#if defined(_WIN32) + FILE *pLogFile = _fdopen(dispatch_logfile, "w"); + + char szProgramName[MAX_PATH + 1] = {0}; + GetModuleFileNameA(NULL, szProgramName, MAX_PATH); + + fprintf(pLogFile, "=== log file opened for %s[%lu] at " + "%ld.%06u ===\n", szProgramName, GetCurrentProcessId(), + tv.tv_sec, (int)tv.tv_usec); + fclose(pLogFile); +#else dprintf(dispatch_logfile, "=== log file opened for %s[%u] at " "%ld.%06u ===\n", getprogname() ?: "", getpid(), tv.tv_sec, (int)tv.tv_usec); +#endif } } } @@ -1090,7 +1136,12 @@ _dispatch_log_file(char *buf, size_t len) buf[len++] = '\n'; retry: +#if defined(_WIN32) + dispatch_assert(len <= UINT_MAX); + r = _write(dispatch_logfile, buf, (unsigned int)len); +#else r = write(dispatch_logfile, buf, len); +#endif if (unlikely(r == -1) && errno == EINTR) { goto retry; } @@ -1106,7 +1157,7 @@ _dispatch_logv_file(const char *msg, va_list ap) #if DISPATCH_DEBUG offset += dsnprintf(&buf[offset], bufsiz - offset, "%llu\t", - _dispatch_uptime() - dispatch_log_basetime); + (unsigned long long)_dispatch_uptime() - dispatch_log_basetime); #endif r = vsnprintf(&buf[offset], bufsiz - offset, msg, ap); if (r < 0) return; @@ -1134,6 +1185,36 @@ _dispatch_vsyslog(const char *msg, va_list ap) free(str); } } +#elif defined(_WIN32) +static inline void +_dispatch_syslog(const char *msg) +{ + OutputDebugStringA(msg); +} + +static inline void +_dispatch_vsyslog(const char *msg, va_list ap) +{ + va_list argp; + + va_copy(argp, ap); + + int length = _vscprintf(msg, ap); + if (length == -1) + return; + + char *buffer = malloc((size_t)length + 1); + if (buffer == NULL) + return; + + _vsnprintf(buffer, (size_t)length + 1, msg, argp); + + va_end(argp); + + _dispatch_syslog(buffer); + + free(buffer); +} #else // DISPATCH_USE_SIMPLE_ASL static inline void _dispatch_syslog(const char *msg) @@ -1200,7 +1281,7 @@ _dispatch_debugv(dispatch_object_t dou, const char *msg, va_list ap) int r; #if DISPATCH_DEBUG && !DISPATCH_USE_OS_DEBUG_LOG offset += dsnprintf(&buf[offset], bufsiz - offset, "%llu\t\t%p\t", - _dispatch_uptime() - dispatch_log_basetime, + (unsigned long long)_dispatch_uptime() - dispatch_log_basetime, (void *)_dispatch_thread_self()); #endif if (dou._do) { @@ -1263,7 +1344,7 @@ void _dispatch_temporary_resource_shortage(void) { sleep(1); - asm(""); // prevent tailcall + __asm__ __volatile__(""); // prevent tailcall } void * @@ -1276,7 +1357,7 @@ _dispatch_calloc(size_t num_items, size_t size) return buf; } -/** +/* * If the source string is mutable, allocates memory and copies the contents. * Otherwise returns the source string. */ diff --git a/src/inline_internal.h b/src/inline_internal.h index 69805aff1..f91e2fe7d 100644 --- a/src/inline_internal.h +++ b/src/inline_internal.h @@ -191,6 +191,16 @@ _dispatch_object_is_sync_waiter(dispatch_object_t dou) return (dou._dc->dc_flags & DC_FLAG_SYNC_WAITER); } +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_object_is_channel_item(dispatch_object_t dou) +{ + if (_dispatch_object_has_vtable(dou)) { + return false; + } + return (dou._dc->dc_flags & DC_FLAG_CHANNEL_ITEM); +} + DISPATCH_ALWAYS_INLINE static inline bool _dispatch_object_is_sync_waiter_non_barrier(dispatch_object_t dou) @@ -693,7 +703,7 @@ _dispatch_queue_autorelease_frequency(dispatch_queue_class_t dqu) dispatch_queue_flags_t qaf = _dispatch_queue_atomic_flags(dqu); - qaf &= _DQF_AUTORELEASE_MASK; + qaf &= (dispatch_queue_flags_t)_DQF_AUTORELEASE_MASK; return (dispatch_invoke_flags_t)qaf * factor; } @@ -901,7 +911,7 @@ DISPATCH_ALWAYS_INLINE static inline bool _dq_state_is_suspended(uint64_t dq_state) { - return dq_state >= DISPATCH_QUEUE_NEEDS_ACTIVATION; + return dq_state & DISPATCH_QUEUE_SUSPEND_BITS_MASK; } #define DISPATCH_QUEUE_IS_SUSPENDED(x) \ _dq_state_is_suspended(os_atomic_load2o(x, dq_state, relaxed)) @@ -910,14 +920,24 @@ DISPATCH_ALWAYS_INLINE static inline bool _dq_state_is_inactive(uint64_t dq_state) { - return dq_state & DISPATCH_QUEUE_INACTIVE; + return (dq_state & DISPATCH_QUEUE_INACTIVE_BITS_MASK) == + DISPATCH_QUEUE_INACTIVE; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_is_activated(uint64_t dq_state) +{ + return (dq_state & DISPATCH_QUEUE_INACTIVE_BITS_MASK) == + DISPATCH_QUEUE_ACTIVATED; } DISPATCH_ALWAYS_INLINE static inline bool -_dq_state_needs_activation(uint64_t dq_state) +_dq_state_is_activating(uint64_t dq_state) { - return dq_state & DISPATCH_QUEUE_NEEDS_ACTIVATION; + return (dq_state & DISPATCH_QUEUE_INACTIVE_BITS_MASK) == + DISPATCH_QUEUE_ACTIVATING; } DISPATCH_ALWAYS_INLINE @@ -1118,6 +1138,19 @@ static inline dispatch_priority_t _dispatch_set_basepri(dispatch_priority_t dbp) #if DISPATCH_PURE_C +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_setter_assert_inactive(dispatch_queue_class_t dq) +{ + uint64_t dq_state = os_atomic_load2o(dq._dq, dq_state, relaxed); + if (likely(_dq_state_is_inactive(dq_state))) return; +#ifndef __LP64__ + dq_state >>= 32; +#endif + DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, + "dispatch queue/source property setter called after activation"); +} + // Note to later developers: ensure that any initialization changes are // made for statically allocated queues (i.e. _dispatch_main_q). static inline dispatch_queue_class_t @@ -1131,14 +1164,13 @@ _dispatch_queue_init(dispatch_queue_class_t dqu, dispatch_queue_flags_t dqf, DISPATCH_QUEUE_INACTIVE)) == 0); if (initial_state_bits & DISPATCH_QUEUE_INACTIVE) { - dq_state |= DISPATCH_QUEUE_INACTIVE + DISPATCH_QUEUE_NEEDS_ACTIVATION; dq->do_ref_cnt += 2; // rdar://8181908 see _dispatch_lane_resume if (dx_metatype(dq) == _DISPATCH_SOURCE_TYPE) { dq->do_ref_cnt++; // released when DSF_DELETED is set } } - dq_state |= (initial_state_bits & DISPATCH_QUEUE_ROLE_MASK); + dq_state |= initial_state_bits; dq->do_next = DISPATCH_OBJECT_LISTLESS; dqf |= DQF_WIDTH(width); os_atomic_store2o(dq, dq_atomic_flags, dqf, relaxed); @@ -1558,11 +1590,25 @@ _dispatch_queue_drain_try_unlock(dispatch_queue_t dq, uint64_t owned, bool done) #define os_mpsc_looks_empty(Q) \ (os_atomic_load(_os_mpsc_tail Q, relaxed) == NULL) -#define os_mpsc_get_head(Q) \ - _dispatch_wait_until(os_atomic_load(_os_mpsc_head Q, dependency)) +#define os_mpsc_get_head(Q) ({ \ + __typeof__(_os_mpsc_head Q) __n = _os_mpsc_head Q; \ + os_mpsc_node_type(Q) _node; \ + _node = os_atomic_load(__n, dependency); \ + if (unlikely(_node == NULL)) { \ + _node = _dispatch_wait_for_enqueuer((void **)__n); \ + } \ + _node; \ + }) -#define os_mpsc_get_next(_n, _o_next) \ - _dispatch_wait_until(os_atomic_load2o(_n, _o_next, dependency)) +#define os_mpsc_get_next(_n, _o_next) ({ \ + __typeof__(_n) __n = (_n); \ + _os_atomic_basetypeof(&__n->_o_next) _node; \ + _node = os_atomic_load(&__n->_o_next, dependency); \ + if (unlikely(_node == NULL)) { \ + _node = _dispatch_wait_for_enqueuer((void **)&__n->_o_next); \ + } \ + _node; \ + }) #define os_mpsc_pop_head(Q, head, _o_next) ({ \ os_mpsc_node_type(Q) _head = (head), _n; \ @@ -1604,7 +1650,7 @@ _dispatch_queue_drain_try_unlock(dispatch_queue_t dq, uint64_t owned, bool done) }) #define os_mpsc_pop_snapshot_head(head, tail, _o_next) ({ \ - typeof(head) _head = (head), _tail = (tail), _n = NULL; \ + __typeof__(head) _head = (head), _tail = (tail), _n = NULL; \ if (_head != _tail) _n = os_mpsc_get_next(_head, _o_next); \ _n; \ }) @@ -1727,6 +1773,13 @@ static inline void _dispatch_queue_push_queue(dispatch_queue_t tq, dispatch_queue_class_t dq, uint64_t dq_state) { +#if DISPATCH_USE_KEVENT_WORKLOOP + if (likely(_dq_state_is_base_wlh(dq_state))) { + _dispatch_trace_runtime_event(worker_request, dq._dq, 1); + return _dispatch_event_loop_poke((dispatch_wlh_t)dq._dq, dq_state, + DISPATCH_EVENT_LOOP_CONSUME_2); + } +#endif // DISPATCH_USE_KEVENT_WORKLOOP _dispatch_trace_item_push(tq, dq); return dx_push(tq, dq, _dq_state_max_qos(dq_state)); } @@ -2479,6 +2532,25 @@ _dispatch_continuation_pop_inline(dispatch_object_t dou, if (observer_hooks) observer_hooks->queue_did_execute(dqu._dq); } +// used to forward the do_invoke of a continuation with a vtable to its real +// implementation. +// +// Unlike _dispatch_continuation_pop_forwarded, +// this doesn't free the continuation +#define _dispatch_continuation_pop_forwarded_no_free(dc, dc_flags, dq, ...) \ + ({ \ + dispatch_continuation_t _dc = (dc); \ + uintptr_t _dc_flags = (dc_flags); \ + _dispatch_continuation_voucher_adopt(_dc, _dc_flags); \ + if (!(_dc_flags & DC_FLAG_NO_INTROSPECTION)) { \ + _dispatch_trace_item_pop(dq, dc); \ + } \ + __VA_ARGS__; \ + if (!(_dc_flags & DC_FLAG_NO_INTROSPECTION)) { \ + _dispatch_trace_item_complete(_dc); \ + } \ + }) + // used to forward the do_invoke of a continuation with a vtable to its real // implementation. #define _dispatch_continuation_pop_forwarded(dc, dc_flags, dq, ...) \ diff --git a/src/internal.h b/src/internal.h index df742a20b..41b2e10e6 100644 --- a/src/internal.h +++ b/src/internal.h @@ -61,6 +61,9 @@ #if !defined(DISPATCH_LAYOUT_SPI) && TARGET_OS_MAC #define DISPATCH_LAYOUT_SPI 1 #endif +#if !defined(DISPATCH_CHANNEL_SPI) +#define DISPATCH_CHANNEL_SPI 1 +#endif #if __has_include() #include @@ -138,6 +141,7 @@ typedef union { struct dispatch_queue_global_s *_dgq; struct dispatch_queue_pthread_root_s *_dpq; struct dispatch_source_s *_ds; + struct dispatch_channel_s *_dch; struct dispatch_mach_s *_dm; #ifdef __OBJC__ id _objc_dq; // unsafe cast for the sake of object.m @@ -153,6 +157,7 @@ typedef union { struct dispatch_queue_global_s *_dgq; struct dispatch_queue_pthread_root_s *_dpq; struct dispatch_source_s *_ds; + struct dispatch_channel_s *_dch; struct dispatch_mach_s *_dm; dispatch_lane_class_t _dlu; #ifdef __OBJC__ @@ -168,6 +173,7 @@ typedef union { struct dispatch_queue_attr_s *_dqa; struct dispatch_group_s *_dg; struct dispatch_source_s *_ds; + struct dispatch_channel_s *_dch; struct dispatch_mach_s *_dm; struct dispatch_mach_msg_s *_dmsg; struct dispatch_semaphore_s *_dsema; @@ -206,23 +212,23 @@ upcast(dispatch_object_t dou) #include #include #include -#if !TARGET_OS_WIN32 #include -#endif +#include /* private.h must be included last to avoid picking up installed headers. */ +#if !defined(_WIN32) #include +#endif #include "os/object_private.h" #include "queue_private.h" +#include "channel_private.h" #include "workloop_private.h" #include "source_private.h" #include "mach_private.h" #include "data_private.h" #include "os/voucher_private.h" #include "os/voucher_activity_private.h" -#if !TARGET_OS_WIN32 #include "io_private.h" -#endif #include "layout_private.h" #include "benchmark.h" #include "private.h" @@ -255,7 +261,7 @@ upcast(dispatch_object_t dou) #include #endif #endif /* HAVE_MACH */ -#if __has_include() +#if __has_include() && __has_include() #define HAVE_OS_FAULT_WITH_PAYLOAD 1 #include #include @@ -268,9 +274,11 @@ upcast(dispatch_object_t dou) #include -#if !TARGET_OS_WIN32 -#include +#if defined(_WIN32) +#include +#else #include +#include #ifdef __ANDROID__ #include #else @@ -306,11 +314,18 @@ upcast(dispatch_object_t dou) #include #include #include +#if defined(_WIN32) +#define _CRT_RAND_S +#endif #include #include -#if HAVE_UNISTD_H +#if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) #include #endif +#if defined(_WIN32) +#include +#include +#endif /* More #includes at EOF (dependent on the contents of internal.h) ... */ @@ -318,7 +333,7 @@ __BEGIN_DECLS /* SPI for Libsystem-internal use */ DISPATCH_EXPORT DISPATCH_NOTHROW void libdispatch_init(void); -#if !TARGET_OS_WIN32 +#if !defined(_WIN32) DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_prepare(void); DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_parent(void); DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); @@ -354,14 +369,14 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); #define DISPATCH_DEBUG_QOS DISPATCH_DEBUG #endif -#if __GNUC__ +#if defined(__GNUC__) || defined(__clang__) #define DISPATCH_NOINLINE __attribute__((__noinline__)) #define DISPATCH_USED __attribute__((__used__)) #define DISPATCH_UNUSED __attribute__((__unused__)) #define DISPATCH_WEAK __attribute__((__weak__)) #define DISPATCH_OVERLOADABLE __attribute__((__overloadable__)) -#define DISPATCH_PACKED __attribute__((__packed__)) #if DISPATCH_DEBUG +#define DISPATCH_PACKED __attribute__((__packed__)) #define DISPATCH_ALWAYS_INLINE_NDEBUG #else #define DISPATCH_ALWAYS_INLINE_NDEBUG __attribute__((__always_inline__)) @@ -435,11 +450,11 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); #if DISPATCH_DEBUG // sys/queue.h debugging -#if defined(__linux__) -#define QUEUE_MACRO_DEBUG 1 -#else #undef TRASHIT #define TRASHIT(x) do {(x) = (void *)-1;} while (0) +#else // DISPATCH_DEBUG +#ifndef TRASHIT +#define TRASHIT(x) #endif #endif // DISPATCH_DEBUG #define _LIST_TRASH_ENTRY(elm, field) do { \ @@ -459,32 +474,32 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); #define DISPATCH_MODE_NO_FAULTS (1U << 1) extern uint8_t _dispatch_mode; -DISPATCH_EXPORT DISPATCH_NOINLINE +DISPATCH_EXPORT DISPATCH_NOINLINE DISPATCH_COLD void _dispatch_bug(size_t line, long val); #if HAVE_MACH -DISPATCH_NOINLINE +DISPATCH_NOINLINE DISPATCH_COLD void _dispatch_bug_mach_client(const char *msg, mach_msg_return_t kr); #endif // HAVE_MACH struct dispatch_unote_class_s; -DISPATCH_NOINLINE +DISPATCH_NOINLINE DISPATCH_COLD void _dispatch_bug_kevent_client(const char *msg, const char *filter, const char *operation, int err, uint64_t ident, uint64_t udata, struct dispatch_unote_class_s *du); -DISPATCH_NOINLINE +DISPATCH_NOINLINE DISPATCH_COLD void _dispatch_bug_kevent_vanished(struct dispatch_unote_class_s *du); -DISPATCH_NOINLINE +DISPATCH_NOINLINE DISPATCH_COLD void _dispatch_bug_deprecated(const char *msg); -DISPATCH_NOINLINE DISPATCH_NORETURN +DISPATCH_NOINLINE DISPATCH_NORETURN DISPATCH_COLD void _dispatch_abort(size_t line, long val); #if !defined(DISPATCH_USE_OS_DEBUG_LOG) && DISPATCH_DEBUG -#if __has_include() +#if __has_include() && !TARGET_OS_DRIVERKIT #define DISPATCH_USE_OS_DEBUG_LOG 1 #include #endif @@ -497,7 +512,7 @@ void _dispatch_abort(size_t line, long val); #endif #endif // DISPATCH_USE_SIMPLE_ASL -#if !DISPATCH_USE_SIMPLE_ASL && !DISPATCH_USE_OS_DEBUG_LOG && !TARGET_OS_WIN32 +#if !DISPATCH_USE_SIMPLE_ASL && !DISPATCH_USE_OS_DEBUG_LOG && !defined(_WIN32) #include #endif @@ -514,7 +529,8 @@ void _dispatch_abort(size_t line, long val); #if DISPATCH_USE_OS_DEBUG_LOG #define _dispatch_log(msg, ...) os_debug_log("libdispatch", msg, ## __VA_ARGS__) #else -DISPATCH_EXPORT DISPATCH_NOINLINE __attribute__((__format__(__printf__,1,2))) +DISPATCH_EXPORT DISPATCH_NOINLINE DISPATCH_COLD +__attribute__((__format__(__printf__,1,2))) void _dispatch_log(const char *msg, ...); #endif // DISPATCH_USE_OS_DEBUG_LOG @@ -571,7 +587,7 @@ _dispatch_assume(long e, size_t line) DISPATCH_STATIC_ASSERT_IF(!e) if (unlikely(!e)) _dispatch_bug(line, e); } #define dispatch_assume(e) \ - ({ typeof(e) _e = (e); _dispatch_assume((long)_e, __LINE__); _e; }) + ({ __typeof__(e) _e = (e); _dispatch_assume((long)_e, __LINE__); _e; }) /* * A lot of API return zero upon success and not-zero on fail. Let's capture @@ -584,7 +600,7 @@ _dispatch_assume_zero(long e, size_t line) DISPATCH_STATIC_ASSERT_IF(e) if (unlikely(e)) _dispatch_bug(line, e); } #define dispatch_assume_zero(e) \ - ({ typeof(e) _e = (e); _dispatch_assume_zero((long)_e, __LINE__); _e; }) + ({ __typeof__(e) _e = (e); _dispatch_assume_zero((long)_e, __LINE__); _e; }) /* Make sure the debug statments don't get too stale */ #define _dispatch_debug(x, args...) do { \ @@ -610,7 +626,7 @@ _dispatch_object_debug(dispatch_object_t object, const char *message, ...); void *_dispatch_Block_copy(void *block); #if __GNUC__ -#define _dispatch_Block_copy(x) ((typeof(x))_dispatch_Block_copy(x)) +#define _dispatch_Block_copy(x) ((__typeof__(x))_dispatch_Block_copy(x)) #endif void _dispatch_call_block_and_release(void *block); #endif /* __BLOCKS__ */ @@ -622,7 +638,9 @@ void *_dispatch_calloc(size_t num_items, size_t size); const char *_dispatch_strdup_if_mutable(const char *str); void _dispatch_vtable_init(void); char *_dispatch_get_build(void); +#if !defined(_WIN32) int _dispatch_sigmask(void); +#endif uint64_t _dispatch_timeout(dispatch_time_t when); uint64_t _dispatch_time_nanoseconds_since_epoch(dispatch_time_t when); @@ -675,9 +693,18 @@ _dispatch_fork_becomes_unsafe(void) #endif #endif // !defined(HAVE_PTHREAD_WORKQUEUE_KEVENT) +#ifndef HAVE_PTHREAD_WORKQUEUE_WORKLOOP +#if HAVE_PTHREAD_WORKQUEUE_KEVENT && defined(WORKQ_FEATURE_WORKLOOP) && \ + defined(KEVENT_FLAG_WORKLOOP) && \ + DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101300) +#define HAVE_PTHREAD_WORKQUEUE_WORKLOOP 1 +#else +#define HAVE_PTHREAD_WORKQUEUE_WORKLOOP 0 +#endif +#endif // !defined(HAVE_PTHREAD_WORKQUEUE_WORKLOOP) #ifndef DISPATCH_USE_WORKQUEUE_NARROWING -#if HAVE_PTHREAD_WORKQUEUES && DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(109900) +#if HAVE_PTHREAD_WORKQUEUES && DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101300) #define DISPATCH_USE_WORKQUEUE_NARROWING 1 #else #define DISPATCH_USE_WORKQUEUE_NARROWING 0 @@ -722,6 +749,29 @@ _dispatch_fork_becomes_unsafe(void) #endif #endif // !defined(DISPATCH_USE_MGR_THREAD) +#ifndef DISPATCH_USE_KEVENT_WORKLOOP +#if HAVE_PTHREAD_WORKQUEUE_WORKLOOP +#define DISPATCH_USE_KEVENT_WORKLOOP 1 +#else +#define DISPATCH_USE_KEVENT_WORKLOOP 0 +#endif +#endif // !defined(DISPATCH_USE_KEVENT_WORKLOOP) + +#ifndef DISPATCH_USE_WL_SYNC_IPC_HANDOFF +#if DISPATCH_USE_KEVENT_WORKLOOP && DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(109900) +#define DISPATCH_USE_WL_SYNC_IPC_HANDOFF 1 +#else +#define DISPATCH_USE_WL_SYNC_IPC_HANDOFF 0 +#endif +#endif // !defined DISPATCH_USE_WL_SYNC_IPC_HANDOFF + +#ifndef DISPATCH_USE_KEVENT_SETUP +#if DISPATCH_USE_KEVENT_WORKLOOP && DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(109900) +#define DISPATCH_USE_KEVENT_SETUP 1 +#else +#define DISPATCH_USE_KEVENT_SETUP 0 +#endif +#endif // !defined(DISPATCH_USE_KEVENT_SETUP) #ifdef EVFILT_MEMORYSTATUS #ifndef DISPATCH_USE_MEMORYSTATUS @@ -748,7 +798,7 @@ extern bool _dispatch_memory_warn; #endif #if defined(MACH_SEND_SYNC_OVERRIDE) && defined(MACH_RCV_SYNC_WAIT) && \ - DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(109900) && \ + DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101300) && \ !defined(DISPATCH_USE_MACH_SEND_SYNC_OVERRIDE) #define DISPATCH_USE_MACH_SEND_SYNC_OVERRIDE 1 #endif @@ -939,7 +989,7 @@ _dispatch_ktrace_impl(uint32_t code, uint64_t a, uint64_t b, #ifndef VOUCHER_USE_PERSONA #if VOUCHER_USE_MACH_VOUCHER && defined(BANK_PERSONA_TOKEN) && \ - TARGET_OS_IOS && !TARGET_OS_SIMULATOR + !TARGET_OS_SIMULATOR #define VOUCHER_USE_PERSONA 1 #else #define VOUCHER_USE_PERSONA 0 @@ -958,9 +1008,19 @@ _dispatch_ktrace_impl(uint32_t code, uint64_t a, uint64_t b, #define _dispatch_hardware_crash() \ __asm__(""); __builtin_trap() // +#ifdef _WIN32 +#define _dispatch_set_crash_log_cause_and_message(ac, msg) do { \ + (void)(ac); \ + _dispatch_set_crash_log_message_dynamic((msg)); \ + } while (0) +#define _dispatch_set_crash_log_message(msg) \ + _dispatch_set_crash_log_message_dynamic((msg)) +#define _dispatch_set_crash_log_message_dynamic(msg) _RPTF0(_CRT_ASSERT, (msg)) +#else // _WIN32 #define _dispatch_set_crash_log_cause_and_message(ac, msg) ((void)(ac)) #define _dispatch_set_crash_log_message(msg) #define _dispatch_set_crash_log_message_dynamic(msg) +#endif // _WIN32 #if HAVE_MACH // MIG_REPLY_MISMATCH means either: @@ -1003,6 +1063,18 @@ _dispatch_ktrace_impl(uint32_t code, uint64_t a, uint64_t b, _dispatch_hardware_crash(); \ } while (0) +#if defined(_WIN32) +#define _dispatch_client_assert_fail(fmt, ...) do { \ + char *_msg = NULL; \ + int _length = _scprintf("%s" fmt, DISPATCH_ASSERTION_FAILED_MESSAGE, ##__VA_ARGS__); \ + dispatch_assert(_length != -1); \ + _msg = (char *)malloc((unsigned)_length + 1); \ + dispatch_assert(_msg); \ + snprintf(_msg, (unsigned)_length + 1, "%s" fmt, DISPATCH_ASSERTION_FAILED_MESSAGE, ##__VA_ARGS__); \ + _dispatch_assert_crash(_msg); \ + free(_msg); \ + } while (0) +#else #define _dispatch_client_assert_fail(fmt, ...) do { \ char *_msg = NULL; \ asprintf(&_msg, "%s" fmt, DISPATCH_ASSERTION_FAILED_MESSAGE, \ @@ -1010,10 +1082,11 @@ _dispatch_ktrace_impl(uint32_t code, uint64_t a, uint64_t b, _dispatch_assert_crash(_msg); \ free(_msg); \ } while (0) +#endif #define DISPATCH_NO_VOUCHER ((voucher_t)(void*)~0ul) #define DISPATCH_NO_PRIORITY ((pthread_priority_t)~0ul) -DISPATCH_ENUM(dispatch_thread_set_self, unsigned long, +DISPATCH_OPTIONS(dispatch_thread_set_self, unsigned long, DISPATCH_PRIORITY_ENFORCE = 0x1, DISPATCH_VOUCHER_REPLACE = 0x2, DISPATCH_VOUCHER_CONSUME = 0x4, @@ -1025,6 +1098,7 @@ static inline voucher_t _dispatch_adopt_priority_and_set_voucher( dispatch_thread_set_self_t flags); #if HAVE_MACH mach_port_t _dispatch_get_mach_host_port(void); +bool _dispatch_mach_msg_sender_is_kernel(mach_msg_header_t *hdr); #endif #if HAVE_PTHREAD_WORKQUEUE_QOS @@ -1044,6 +1118,11 @@ extern bool _dispatch_kevent_workqueue_enabled; #define _dispatch_kevent_workqueue_enabled (0) #endif // DISPATCH_USE_KEVENT_WORKQUEUE +#if DISPATCH_USE_KEVENT_WORKLOOP +#if !DISPATCH_USE_KEVENT_WORKQUEUE || !DISPATCH_USE_KEVENT_QOS +#error Invalid build configuration +#endif +#endif /* #includes dependent on internal.h */ #include "object_internal.h" @@ -1054,9 +1133,7 @@ extern bool _dispatch_kevent_workqueue_enabled; #include "mach_internal.h" #include "voucher_internal.h" #include "data_internal.h" -#if !TARGET_OS_WIN32 #include "io_internal.h" -#endif #include "inline_internal.h" #include "firehose/firehose_internal.h" diff --git a/src/introspection.c b/src/introspection.c index 95f6eb143..27a955be9 100644 --- a/src/introspection.c +++ b/src/introspection.c @@ -129,7 +129,7 @@ _dispatch_introspection_thread_add(void) _dispatch_unfair_lock_unlock(&_dispatch_introspection.threads_lock); } -static void +static DISPATCH_TSD_DTOR_CC void _dispatch_introspection_thread_remove(void *ctxt) { dispatch_introspection_thread_t dit = ctxt; @@ -238,6 +238,9 @@ _dispatch_introspection_continuation_get_info(dispatch_queue_t dq, flags = (uintptr_t)dc->dc_data; dq = dq->do_targetq; break; + case DISPATCH_CONTINUATION_TYPE(MACH_IPC_HANDOFF): + flags = (uintptr_t)dc->dc_data; + break; default: DISPATCH_INTERNAL_CRASH(dc->do_vtable, "Unknown dc vtable type"); } @@ -246,6 +249,10 @@ _dispatch_introspection_continuation_get_info(dispatch_queue_t dq, waiter = pthread_from_mach_thread_np(dsc->dsc_waiter); ctxt = dsc->dsc_ctxt; func = dsc->dsc_func; + } else if (_dispatch_object_is_channel_item(dc)) { + dispatch_channel_callbacks_t callbacks = upcast(dq)._dch->dch_callbacks; + ctxt = dc->dc_ctxt; + func = (dispatch_function_t)callbacks->dcc_invoke; } else if (func == _dispatch_apply_invoke || func == _dispatch_apply_redirect_invoke) { dispatch_apply_t da = ctxt; @@ -386,7 +393,8 @@ dispatch_introspection_queue_item_get_info(dispatch_queue_t dq, } if (metatype == _DISPATCH_CONTINUATION_TYPE) { _dispatch_introspection_continuation_get_info(dq, dc, &diqi); - } else if (metatype == _DISPATCH_LANE_TYPE) { + } else if (metatype == _DISPATCH_LANE_TYPE || + type == DISPATCH_CHANNEL_TYPE) { diqi.type = dispatch_introspection_queue_item_type_queue; diqi.queue = _dispatch_introspection_lane_get_info(dou._dl); } else if (metatype == _DISPATCH_WORKLOOP_TYPE) { @@ -493,7 +501,10 @@ _dispatch_introspection_queue_fake_sync_push_pop(dispatch_queue_t dq, _dispatch_trace_item_push(dq, &dsc); _dispatch_trace_item_pop(dq, &dsc); +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wreturn-stack-address" return (struct dispatch_object_s *)(uintptr_t)&dsc; +#pragma clang diagnostic pop } #pragma mark - @@ -522,7 +533,7 @@ dispatch_introspection_hooks_s _dispatch_introspection_hook_callouts_enabled = { unlikely(_dispatch_introspection_hooks.h) #define DISPATCH_INTROSPECTION_HOOK_CALLOUT(h, ...) ({ \ - typeof(_dispatch_introspection_hooks.h) _h; \ + __typeof__(_dispatch_introspection_hooks.h) _h; \ _h = _dispatch_introspection_hooks.h; \ if (unlikely((void*)(_h) != DISPATCH_INTROSPECTION_NO_HOOK)) { \ _h(__VA_ARGS__); \ @@ -530,7 +541,7 @@ dispatch_introspection_hooks_s _dispatch_introspection_hook_callouts_enabled = { #define DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK(h) \ DISPATCH_EXPORT void _dispatch_introspection_hook_##h(void) \ - asm("_dispatch_introspection_hook_" #h); \ + __asm__("_dispatch_introspection_hook_" #h); \ void _dispatch_introspection_hook_##h(void) {} #define DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(h, ...)\ diff --git a/src/io.c b/src/io.c index dea62a685..0624fffd4 100644 --- a/src/io.c +++ b/src/io.c @@ -20,11 +20,25 @@ #include "internal.h" +#if defined(__FreeBSD__) +#include +#define F_RDADVISE F_RDAHEAD +#endif + #ifndef DISPATCH_IO_DEBUG #define DISPATCH_IO_DEBUG DISPATCH_DEBUG #endif #ifndef PAGE_SIZE +#if defined(_WIN32) +static DWORD +getpagesize(void) +{ + SYSTEM_INFO siInfo; + GetSystemInfo(&siInfo); + return siInfo.dwPageSize; +} +#endif #define PAGE_SIZE ((size_t)getpagesize()) #endif @@ -114,7 +128,7 @@ enum { }; #define _dispatch_io_Block_copy(x) \ - ((typeof(x))_dispatch_Block_copy((dispatch_block_t)(x))) + ((__typeof__(x))_dispatch_Block_copy((dispatch_block_t)(x))) #pragma mark - #pragma mark dispatch_io_debug @@ -141,7 +155,7 @@ enum { _dispatch_io_log("fd[0x%x]: " msg, fd, ##__VA_ARGS__) #define _dispatch_op_debug(msg, op, ...) \ _dispatch_io_log("op[%p]: " msg, op, ##__VA_ARGS__) -#define _dispatch_channel_debug(msg, channel, ...) \ +#define _dispatch_io_channel_debug(msg, channel, ...) \ _dispatch_io_log("channel[%p]: " msg, channel, ##__VA_ARGS__) #define _dispatch_fd_entry_debug(msg, fd_entry, ...) \ _dispatch_io_log("fd_entry[%p]: " msg, fd_entry, ##__VA_ARGS__) @@ -197,7 +211,7 @@ DISPATCH_GLOBAL_INIT(struct dispatch_io_defaults_s dispatch_io_defaults, { }); #define _dispatch_iocntl_set_default(p, v) do { \ - dispatch_io_defaults.p = (typeof(dispatch_io_defaults.p))(v); \ + dispatch_io_defaults.p = (__typeof__(dispatch_io_defaults.p))(v); \ } while (0) void @@ -247,7 +261,7 @@ _dispatch_io_init(dispatch_io_t channel, dispatch_fd_entry_t fd_entry, _dispatch_retain(queue); dispatch_async(!err ? fd_entry->close_queue : channel->queue, ^{ dispatch_async(queue, ^{ - _dispatch_channel_debug("cleanup handler invoke: err %d", + _dispatch_io_channel_debug("cleanup handler invoke: err %d", channel, err); cleanup_handler(err); }); @@ -341,7 +355,7 @@ dispatch_io_create(dispatch_io_type_t type, dispatch_fd_t fd, } dispatch_io_t channel = _dispatch_io_create(type); channel->fd = fd; - _dispatch_channel_debug("create", channel); + _dispatch_io_channel_debug("create", channel); channel->fd_actual = fd; dispatch_suspend(channel->queue); _dispatch_retain(queue); @@ -353,12 +367,23 @@ dispatch_io_create(dispatch_io_type_t type, dispatch_fd_t fd, err = _dispatch_io_validate_type(channel, fd_entry->stat.mode); } if (!err && type == DISPATCH_IO_RANDOM) { +#if defined(_WIN32) + LARGE_INTEGER liPosition; + LARGE_INTEGER liDistance = {}; + if (!SetFilePointerEx((HANDLE)fd_entry->fd, liDistance, &liPosition, FILE_CURRENT)) { + err = (int)GetLastError(); + } else { + err = 0; + channel->f_ptr = liPosition.QuadPart; + } +#else off_t f_ptr; _dispatch_io_syscall_switch_noerr(err, f_ptr = lseek(fd_entry->fd, 0, SEEK_CUR), case 0: channel->f_ptr = f_ptr; break; default: (void)dispatch_assume_zero(err); break; ); +#endif } channel->err = err; _dispatch_fd_entry_retain(fd_entry); @@ -397,7 +422,7 @@ dispatch_io_create_with_path(dispatch_io_type_t type, const char *path, } dispatch_io_t channel = _dispatch_io_create(type); channel->fd = -1; - _dispatch_channel_debug("create with path %s", channel, path); + _dispatch_io_channel_debug("create with path %s", channel, path); channel->fd_actual = -1; path_data->channel = channel; path_data->oflag = oflag; @@ -410,11 +435,15 @@ dispatch_io_create_with_path(dispatch_io_type_t type, const char *path, int err = 0; struct stat st; _dispatch_io_syscall_switch_noerr(err, +#if defined(_WIN32) + stat(path_data->path, &st), +#else (path_data->oflag & O_NOFOLLOW) == O_NOFOLLOW -#ifndef __linux__ +#if __APPLE__ || (path_data->oflag & O_SYMLINK) == O_SYMLINK #endif ? lstat(path_data->path, &st) : stat(path_data->path, &st), +#endif case 0: err = _dispatch_io_validate_type(channel, st.st_mode); break; @@ -483,7 +512,7 @@ dispatch_io_create_with_io(dispatch_io_type_t type, dispatch_io_t in_channel, return DISPATCH_BAD_INPUT; } dispatch_io_t channel = _dispatch_io_create(type); - _dispatch_channel_debug("create with channel %p", channel, in_channel); + _dispatch_io_channel_debug("create with channel %p", channel, in_channel); dispatch_suspend(channel->queue); _dispatch_retain(queue); _dispatch_retain(channel); @@ -515,12 +544,23 @@ dispatch_io_create_with_io(dispatch_io_type_t type, dispatch_io_t in_channel, in_channel->fd_entry->stat.mode); } if (!err && type == DISPATCH_IO_RANDOM && in_channel->fd != -1) { +#if defined(_WIN32) + LARGE_INTEGER liPosition; + LARGE_INTEGER liDistance = {}; + if (!SetFilePointerEx((HANDLE)in_channel->fd_entry->fd, liDistance, &liPosition, FILE_CURRENT)) { + err = (int)GetLastError(); + } else { + err = 0; + channel->f_ptr = liPosition.QuadPart; + } +#else off_t f_ptr; _dispatch_io_syscall_switch_noerr(err, f_ptr = lseek(in_channel->fd_entry->fd, 0, SEEK_CUR), case 0: channel->f_ptr = f_ptr; break; default: (void)dispatch_assume_zero(err); break; ); +#endif } channel->err = err; if (err) { @@ -590,7 +630,7 @@ dispatch_io_set_high_water(dispatch_io_t channel, size_t high_water) { _dispatch_retain(channel); dispatch_async(channel->queue, ^{ - _dispatch_channel_debug("set high water: %zu", channel, high_water); + _dispatch_io_channel_debug("set high water: %zu", channel, high_water); if (channel->params.low > high_water) { channel->params.low = high_water; } @@ -604,7 +644,7 @@ dispatch_io_set_low_water(dispatch_io_t channel, size_t low_water) { _dispatch_retain(channel); dispatch_async(channel->queue, ^{ - _dispatch_channel_debug("set low water: %zu", channel, low_water); + _dispatch_io_channel_debug("set low water: %zu", channel, low_water); if (channel->params.high < low_water) { channel->params.high = low_water ? low_water : 1; } @@ -619,7 +659,8 @@ dispatch_io_set_interval(dispatch_io_t channel, uint64_t interval, { _dispatch_retain(channel); dispatch_async(channel->queue, ^{ - _dispatch_channel_debug("set interval: %llu", channel, interval); + _dispatch_io_channel_debug("set interval: %llu", channel, + (unsigned long long)interval); channel->params.interval = interval < INT64_MAX ? interval : INT64_MAX; channel->params.interval_flags = flags; _dispatch_release(channel); @@ -663,7 +704,7 @@ dispatch_io_get_descriptor(dispatch_io_t channel) static void _dispatch_io_stop(dispatch_io_t channel) { - _dispatch_channel_debug("stop", channel); + _dispatch_io_channel_debug("stop", channel); (void)os_atomic_or2o(channel, atomic_flags, DIO_STOPPED, relaxed); _dispatch_retain(channel); dispatch_async(channel->queue, ^{ @@ -671,7 +712,7 @@ _dispatch_io_stop(dispatch_io_t channel) _dispatch_object_debug(channel, "%s", __func__); dispatch_fd_entry_t fd_entry = channel->fd_entry; if (fd_entry) { - _dispatch_channel_debug("stop cleanup", channel); + _dispatch_io_channel_debug("stop cleanup", channel); _dispatch_fd_entry_cleanup_operations(fd_entry, channel); if (!(channel->atomic_flags & DIO_CLOSED)) { if (fd_entry->path_data) { @@ -685,7 +726,7 @@ _dispatch_io_stop(dispatch_io_t channel) _dispatch_retain(channel); dispatch_async(_dispatch_io_fds_lockq, ^{ _dispatch_object_debug(channel, "%s", __func__); - _dispatch_channel_debug("stop cleanup after close", + _dispatch_io_channel_debug("stop cleanup after close", channel); dispatch_fd_entry_t fdi; uintptr_t hash = DIO_HASH(channel->fd); @@ -721,7 +762,7 @@ dispatch_io_close(dispatch_io_t channel, unsigned long flags) dispatch_async(channel->queue, ^{ dispatch_async(channel->barrier_queue, ^{ _dispatch_object_debug(channel, "%s", __func__); - _dispatch_channel_debug("close", channel); + _dispatch_io_channel_debug("close", channel); if (!(channel->atomic_flags & (DIO_CLOSED|DIO_STOPPED))) { (void)os_atomic_or2o(channel, atomic_flags, DIO_CLOSED, relaxed); @@ -1007,7 +1048,7 @@ _dispatch_operation_create(dispatch_op_direction_t direction, } else if (direction == DOP_DIR_WRITE && !err) { d = NULL; } - _dispatch_channel_debug("IO handler invoke: err %d", channel, + _dispatch_io_channel_debug("IO handler invoke: err %d", channel, err); handler(true, d, err); _dispatch_release(channel); @@ -1019,7 +1060,7 @@ _dispatch_operation_create(dispatch_op_direction_t direction, } dispatch_operation_t op = _dispatch_object_alloc(DISPATCH_VTABLE(operation), sizeof(struct dispatch_operation_s)); - _dispatch_channel_debug("operation create: %p", channel, op); + _dispatch_io_channel_debug("operation create: %p", channel, op); op->do_next = DISPATCH_OBJECT_LISTLESS; op->do_xref_cnt = -1; // operation object is not exposed externally op->op_q = dispatch_queue_create_with_target("com.apple.libdispatch-io.opq", @@ -1063,7 +1104,11 @@ _dispatch_operation_dispose(dispatch_operation_t op, } // For write operations, op->buf is owned by op->buf_data if (op->buf && op->direction == DOP_DIR_READ) { +#if defined(_WIN32) + _aligned_free(op->buf); +#else free(op->buf); +#endif } if (op->buf_data) { _dispatch_io_data_release(op->buf_data); @@ -1214,13 +1259,15 @@ _dispatch_fd_entry_unguard(dispatch_fd_entry_t fd_entry) ); } #else +#if !defined(_WIN32) static inline void _dispatch_fd_entry_guard(dispatch_fd_entry_t fd_entry) { (void)fd_entry; } +#endif static inline void _dispatch_fd_entry_unguard(dispatch_fd_entry_t fd_entry) { (void)fd_entry; } #endif // DISPATCH_USE_GUARDED_FD -static inline int +static inline dispatch_fd_t _dispatch_fd_entry_guarded_open(dispatch_fd_entry_t fd_entry, const char *path, int oflag, mode_t mode) { #if DISPATCH_USE_GUARDED_FD @@ -1237,11 +1284,28 @@ _dispatch_fd_entry_guarded_open(dispatch_fd_entry_t fd_entry, const char *path, #else (void)fd_entry; #endif +#if defined(_WIN32) + (void)mode; + DWORD dwDesiredAccess = 0; + if (oflag & _O_RDWR) + dwDesiredAccess = GENERIC_READ | GENERIC_WRITE; + else if (oflag & _O_RDONLY) + dwDesiredAccess = GENERIC_READ; + else if (oflag & _O_WRONLY) + dwDesiredAccess = GENERIC_WRITE; + DWORD dwCreationDisposition = OPEN_EXISTING; + if (oflag & _O_CREAT) + dwCreationDisposition = OPEN_ALWAYS; + if (oflag & _O_TRUNC) + dwCreationDisposition = CREATE_ALWAYS; + return (dispatch_fd_t)CreateFile(path, dwDesiredAccess, 0, NULL, dwCreationDisposition, 0, NULL); +#else return open(path, oflag, mode); +#endif } static inline int -_dispatch_fd_entry_guarded_close(dispatch_fd_entry_t fd_entry, int fd) { +_dispatch_fd_entry_guarded_close(dispatch_fd_entry_t fd_entry, dispatch_fd_t fd) { #if DISPATCH_USE_GUARDED_FD if (fd_entry->guard_flags) { guardid_t guard = (uintptr_t)fd_entry; @@ -1251,7 +1315,11 @@ _dispatch_fd_entry_guarded_close(dispatch_fd_entry_t fd_entry, int fd) { (void)fd_entry; #endif { +#if defined(_WIN32) + return CloseHandle((HANDLE)fd); +#else return close(fd); +#endif } } @@ -1324,6 +1392,24 @@ _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash) "com.apple.libdispatch-io.barrierq", NULL); fd_entry->barrier_group = dispatch_group_create(); dispatch_async(fd_entry->barrier_queue, ^{ +#if defined(_WIN32) + DWORD dwType = GetFileType((HANDLE)fd); + if (dwType == FILE_TYPE_PIPE) { + unsigned long value = 1; + int result = ioctlsocket((SOCKET)fd, (long)FIONBIO, &value); + (void)dispatch_assume_zero(result); + _dispatch_stream_init(fd_entry, + _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false)); + } else { + dispatch_suspend(fd_entry->barrier_queue); + dispatch_once_f(&_dispatch_io_devs_lockq_pred, NULL, + _dispatch_io_devs_lockq_init); + dispatch_async(_dispatch_io_devs_lockq, ^{ + _dispatch_disk_init(fd_entry, 0); + dispatch_resume(fd_entry->barrier_queue); + }); + } +#else _dispatch_fd_entry_debug("stat", fd_entry); int err, orig_flags, orig_nosigpipe = -1; struct stat st; @@ -1365,7 +1451,7 @@ _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash) break; ); } - dev_t dev = major(st.st_dev); + dev_t dev = (dev_t)major(st.st_dev); // We have to get the disk on the global dev queue. The // barrier queue cannot continue until that is complete dispatch_suspend(fd_entry->barrier_queue); @@ -1391,6 +1477,7 @@ _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash) } fd_entry->orig_flags = orig_flags; fd_entry->orig_nosigpipe = orig_nosigpipe; +#endif }); // This is the first item run when the close queue is resumed, indicating // that all channels associated with this entry have been closed and that @@ -1421,6 +1508,7 @@ _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash) dispatch_release(fd_entry->barrier_queue); _dispatch_fd_entry_debug("barrier group release", fd_entry); dispatch_release(fd_entry->barrier_group); +#if !defined(_WIN32) if (fd_entry->orig_flags != -1) { _dispatch_io_syscall( fcntl(fd, F_SETFL, fd_entry->orig_flags) @@ -1432,6 +1520,7 @@ _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash) fcntl(fd, F_SETNOSIGPIPE, fd_entry->orig_nosigpipe) ); } +#endif #endif _dispatch_fd_entry_unguard(fd_entry); if (fd_entry->convenience_channel) { @@ -1452,7 +1541,11 @@ _dispatch_fd_entry_create_with_path(dispatch_io_path_data_t path_data, path_data->channel->queue); _dispatch_fd_entry_debug("create: path %s", fd_entry, path_data->path); if (S_ISREG(mode)) { - _dispatch_disk_init(fd_entry, major(dev)); +#if defined(_WIN32) + _dispatch_disk_init(fd_entry, 0); +#else + _dispatch_disk_init(fd_entry, (dev_t)major(dev)); +#endif } else { _dispatch_stream_init(fd_entry, _dispatch_get_default_queue(false)); @@ -1507,7 +1600,7 @@ _dispatch_fd_entry_open(dispatch_fd_entry_t fd_entry, dispatch_io_t channel) if (fd_entry->err) { return fd_entry->err; } - int fd = -1; + dispatch_fd_t fd = -1; int oflag = fd_entry->disk ? fd_entry->path_data->oflag & ~O_NONBLOCK : fd_entry->path_data->oflag | O_NONBLOCK; open: @@ -1815,7 +1908,7 @@ _dispatch_stream_cleanup_operations(dispatch_stream_t stream, { // On stream queue dispatch_operation_t op, tmp; - typeof(*stream->operations) *operations; + __typeof__(*stream->operations) *operations; operations = &stream->operations[DISPATCH_IO_RANDOM]; TAILQ_FOREACH_SAFE(op, operations, operation_list, tmp) { if (!channel || op->channel == channel) { @@ -2129,8 +2222,12 @@ static void _dispatch_operation_advise(dispatch_operation_t op, size_t chunk_size) { _dispatch_op_debug("advise", op); +#if defined(_WIN32) + (void)op; + (void)chunk_size; +#else if (_dispatch_io_get_error(op, NULL, true)) return; -#ifdef __linux__ +#if defined(__linux__) || defined(__FreeBSD__) // linux does not support fcntl (F_RDAVISE) // define necessary datastructure and use readahead struct radvisory { @@ -2157,7 +2254,7 @@ _dispatch_operation_advise(dispatch_operation_t op, size_t chunk_size) } advise.ra_offset = op->advise_offset; op->advise_offset += advise.ra_count; -#ifdef __linux__ +#if defined(__linux__) _dispatch_io_syscall_switch(err, readahead(op->fd_entry->fd, advise.ra_offset, (size_t)advise.ra_count), case EINVAL: break; // fd does refer to a non-supported filetype @@ -2172,6 +2269,7 @@ _dispatch_operation_advise(dispatch_operation_t op, size_t chunk_size) default: (void)dispatch_assume_zero(err); break; ); #endif +#endif } static int @@ -2205,7 +2303,17 @@ _dispatch_operation_perform(dispatch_operation_t op) } else { op->buf_siz = max_buf_siz; } +#if defined(_WIN32) + static bool bQueried = false; + static SYSTEM_INFO siInfo; + if (!bQueried) { + GetNativeSystemInfo(&siInfo); + bQueried = true; + } + op->buf = _aligned_malloc(op->buf_siz, siInfo.dwPageSize); +#else op->buf = valloc(op->buf_siz); +#endif _dispatch_op_debug("buffer allocated", op); } else if (op->direction == DOP_DIR_WRITE) { // Always write the first data piece, if that is smaller than a @@ -2243,20 +2351,51 @@ _dispatch_operation_perform(dispatch_operation_t op) } void *buf = op->buf + op->buf_len; size_t len = op->buf_siz - op->buf_len; +#if defined(_WIN32) + assert(len <= UINT_MAX && "overflow for read/write"); + LONGLONG off = (LONGLONG)((size_t)op->offset + op->total); +#else off_t off = (off_t)((size_t)op->offset + op->total); +#endif +#if defined(_WIN32) + long processed = -1; +#else ssize_t processed = -1; +#endif syscall: if (op->direction == DOP_DIR_READ) { if (op->params.type == DISPATCH_IO_STREAM) { +#if defined(_WIN32) + ReadFile((HANDLE)op->fd_entry->fd, buf, (DWORD)len, (LPDWORD)&processed, NULL); +#else processed = read(op->fd_entry->fd, buf, len); +#endif } else if (op->params.type == DISPATCH_IO_RANDOM) { +#if defined(_WIN32) + OVERLAPPED ovlOverlapped = {}; + ovlOverlapped.Offset = off & 0xffffffff; + ovlOverlapped.OffsetHigh = (off >> 32) & 0xffffffff; + ReadFile((HANDLE)op->fd_entry->fd, buf, (DWORD)len, (LPDWORD)&processed, &ovlOverlapped); +#else processed = pread(op->fd_entry->fd, buf, len, off); +#endif } } else if (op->direction == DOP_DIR_WRITE) { if (op->params.type == DISPATCH_IO_STREAM) { +#if defined(_WIN32) + WriteFile((HANDLE)op->fd_entry->fd, buf, (DWORD)len, (LPDWORD)&processed, NULL); +#else processed = write(op->fd_entry->fd, buf, len); +#endif } else if (op->params.type == DISPATCH_IO_RANDOM) { +#if defined(_WIN32) + OVERLAPPED ovlOverlapped = {}; + ovlOverlapped.Offset = off & 0xffffffff; + ovlOverlapped.OffsetHigh = (off >> 32) & 0xffffffff; + WriteFile((HANDLE)op->fd_entry->fd, buf, (DWORD)len, (LPDWORD)&processed, &ovlOverlapped); +#else processed = pwrite(op->fd_entry->fd, buf, len, off); +#endif } } // Encountered an error on the file descriptor @@ -2415,6 +2554,7 @@ _dispatch_operation_deliver_data(dispatch_operation_t op, #pragma mark - #pragma mark dispatch_io_debug +DISPATCH_COLD static size_t _dispatch_io_debug_attr(dispatch_io_t channel, char* buf, size_t bufsiz) { @@ -2446,6 +2586,7 @@ _dispatch_io_debug(dispatch_io_t channel, char* buf, size_t bufsiz) return offset; } +DISPATCH_COLD static size_t _dispatch_operation_debug_attr(dispatch_operation_t op, char* buf, size_t bufsiz) diff --git a/src/io_internal.h b/src/io_internal.h index 15a96eb84..c076cfc69 100644 --- a/src/io_internal.h +++ b/src/io_internal.h @@ -145,7 +145,11 @@ struct dispatch_operation_s { dispatch_queue_t op_q; dispatch_op_direction_t direction; // READ OR WRITE dispatch_io_param_s params; +#if defined(_WIN32) + LONGLONG offset; +#else off_t offset; +#endif size_t length; int err; dispatch_io_handler_t handler; @@ -172,13 +176,19 @@ struct dispatch_io_s { dispatch_fd_entry_t fd_entry; unsigned int atomic_flags; dispatch_fd_t fd, fd_actual; +#if defined(_WIN32) + LONGLONG f_ptr; +#else off_t f_ptr; +#endif int err; // contains creation errors only }; void _dispatch_io_set_target_queue(dispatch_io_t channel, dispatch_queue_t dq); +DISPATCH_COLD size_t _dispatch_io_debug(dispatch_io_t channel, char* buf, size_t bufsiz); void _dispatch_io_dispose(dispatch_io_t channel, bool *allow_free); +DISPATCH_COLD size_t _dispatch_operation_debug(dispatch_operation_t op, char* buf, size_t bufsiz); void _dispatch_operation_dispose(dispatch_operation_t operation, diff --git a/src/libdispatch.plist b/src/libdispatch.plist deleted file mode 100644 index e05149258..000000000 --- a/src/libdispatch.plist +++ /dev/null @@ -1,99 +0,0 @@ - - - - - - Name - libdispatch - Children - - - Name - Significant Problems (should fix) - Children - - - Name - Non Leaf Retarget - Type - Impulse - KTraceCode - 0x2e020004 - - - Name - Retarget after Activation - Type - Impulse - KTraceCode - 0x2e020008 - - - Name - Mutation after Activation - Type - Impulse - KTraceCode - 0x2e02000c - - - Name - Source Firing Without a Handler - Type - Impulse - KTraceCode - 0x2e020020 - - - - - Name - Performance Problems - Children - - - Name - Delayed Source Registration - Type - Impulse - KTraceCode - 0x2e020010 - - - Name - Mutable target queue during traversal - Type - Impulse - KTraceCode - 0x2e020014 - - - Name - Timer with Strict + Background - Type - Impulse - KTraceCode - 0x2e020018 - - - Name - Suspended timer firing - Type - Impulse - KTraceCode - 0x2e02001c - - - Name - Source registration falling back to QOS_CLASS_DEFAULT - Type - Impulse - KTraceCode - 0x2e020024 - - - - - - - diff --git a/src/mach.c b/src/mach.c index 54da00be4..9d64c0df9 100644 --- a/src/mach.c +++ b/src/mach.c @@ -31,7 +31,7 @@ #define DM_CHECKIN_CANCELED ((dispatch_mach_msg_t)~0ul) -DISPATCH_ENUM(dispatch_mach_send_invoke_flags, uint32_t, +DISPATCH_OPTIONS(dispatch_mach_send_invoke_flags, uint32_t, DM_SEND_INVOKE_NONE = 0x0, DM_SEND_INVOKE_MAKE_DIRTY = 0x1, DM_SEND_INVOKE_NEEDS_BARRIER = 0x2, @@ -64,6 +64,7 @@ static void _dispatch_mach_handle_or_push_received_msg(dispatch_mach_t dm, static void _dispatch_mach_push_async_reply_msg(dispatch_mach_t dm, dispatch_mach_msg_t dmsg, dispatch_queue_t drq); static dispatch_queue_t _dispatch_mach_msg_context_async_reply_queue( + dispatch_mach_t dm, void *ctxt); static dispatch_continuation_t _dispatch_mach_msg_async_reply_wrap( dispatch_mach_msg_t dmsg, dispatch_mach_t dm); @@ -101,10 +102,17 @@ _dispatch_mach_hooks_install_default(void) #pragma mark - #pragma mark dispatch_mach_t +DISPATCH_OPTIONS(dispatch_mach_create_flags, unsigned, + DMCF_NONE = 0x00000000, + DMCF_HANDLER_IS_BLOCK = 0x00000001, + DMCF_IS_XPC = 0x00000002, + DMCF_USE_STRICT_REPLY = 0x00000004, +); + static dispatch_mach_t _dispatch_mach_create(const char *label, dispatch_queue_t q, void *context, - dispatch_mach_handler_function_t handler, bool handler_is_block, - bool is_xpc) + dispatch_mach_handler_function_t handler, + dispatch_mach_create_flags_t dmcf) { dispatch_mach_recv_refs_t dmrr; dispatch_mach_send_refs_t dmsr; @@ -113,14 +121,18 @@ _dispatch_mach_create(const char *label, dispatch_queue_t q, void *context, dm = _dispatch_queue_alloc(mach, DQF_MUTABLE, 1, DISPATCH_QUEUE_INACTIVE | DISPATCH_QUEUE_ROLE_INNER)._dm; dm->dq_label = label; - dm->dm_is_xpc = is_xpc; + dm->dm_is_xpc = (bool)(dmcf & DMCF_IS_XPC); + dm->dm_strict_reply = (bool)(dmcf & DMCF_USE_STRICT_REPLY); dmrr = dux_create(&_dispatch_mach_type_recv, 0, 0)._dmrr; dispatch_assert(dmrr->du_is_direct); dmrr->du_owner_wref = _dispatch_ptr2wref(dm); dmrr->dmrr_handler_func = handler; dmrr->dmrr_handler_ctxt = context; - dmrr->dmrr_handler_is_block = handler_is_block; + dmrr->dmrr_handler_is_block = (bool)(dmcf & DMCF_HANDLER_IS_BLOCK); + if (dm->dm_strict_reply) { + dmrr->du_fflags |= MACH_MSG_STRICT_REPLY; + } dm->dm_recv_refs = dmrr; dmsr = dux_create(&_dispatch_mach_type_send, 0, @@ -144,22 +156,22 @@ dispatch_mach_create(const char *label, dispatch_queue_t q, { dispatch_block_t bb = _dispatch_Block_copy((void*)handler); return _dispatch_mach_create(label, q, bb, - (dispatch_mach_handler_function_t)_dispatch_Block_invoke(bb), true, - false); + (dispatch_mach_handler_function_t)_dispatch_Block_invoke(bb), + DMCF_HANDLER_IS_BLOCK); } dispatch_mach_t dispatch_mach_create_f(const char *label, dispatch_queue_t q, void *context, dispatch_mach_handler_function_t handler) { - return _dispatch_mach_create(label, q, context, handler, false, false); + return _dispatch_mach_create(label, q, context, handler, DMCF_NONE); } dispatch_mach_t dispatch_mach_create_4libxpc(const char *label, dispatch_queue_t q, void *context, dispatch_mach_handler_function_t handler) { - return _dispatch_mach_create(label, q, context, handler, false, true); + return _dispatch_mach_create(label, q, context, handler, DMCF_IS_XPC | DMCF_USE_STRICT_REPLY); } void @@ -177,6 +189,46 @@ _dispatch_mach_dispose(dispatch_mach_t dm, bool *allow_free) _dispatch_lane_class_dispose(dm, allow_free); } +void +dispatch_mach_request_no_senders(dispatch_mach_t dm) +{ + dm->dm_arm_no_senders = true; + _dispatch_queue_setter_assert_inactive(dm); +} + +void +dispatch_mach_set_flags(dispatch_mach_t dm, dispatch_mach_flags_t flags) +{ + dm->dm_strict_reply = !!(flags & DMF_USE_STRICT_REPLY); + dm->dm_arm_no_senders = !!(flags & DMF_REQUEST_NO_SENDERS); + + _dispatch_queue_setter_assert_inactive(dm); +} + +static void +_dispatch_mach_arm_no_senders(dispatch_mach_t dm, bool allow_previous) +{ + mach_port_t recvp = (mach_port_t)dm->dm_recv_refs->du_ident; + mach_port_t previous = MACH_PORT_NULL; + kern_return_t kr; + + if (MACH_PORT_VALID(recvp)) { + kr = mach_port_request_notification(mach_task_self(), recvp, + MACH_NOTIFY_NO_SENDERS, 0, recvp, + MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous); + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + } + if (unlikely(previous)) { + if (!allow_previous) { + DISPATCH_CLIENT_CRASH(previous, "Mach port notification collision"); + } + kr = mach_port_deallocate(mach_task_self(), previous); + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + } +} + void dispatch_mach_connect(dispatch_mach_t dm, mach_port_t receive, mach_port_t send, dispatch_mach_msg_t checkin) @@ -197,6 +249,10 @@ dispatch_mach_connect(dispatch_mach_t dm, mach_port_t receive, dmsr->dmsr_checkin = checkin; } + if (dm->dm_arm_no_senders && !dmsr->dmsr_checkin) { + _dispatch_mach_arm_no_senders(dm, false); + } + uint32_t disconnect_cnt = os_atomic_and_orig2o(dmsr, dmsr_disconnect_cnt, ~DISPATCH_MACH_NEVER_CONNECTED, relaxed); if (unlikely(!(disconnect_cnt & DISPATCH_MACH_NEVER_CONNECTED))) { @@ -290,7 +346,7 @@ _dispatch_mach_reply_unregister(dispatch_mach_t dm, dispatch_queue_t drq = NULL; if (disconnected) { if (dm->dm_is_xpc && dmr->dmr_ctxt) { - drq = _dispatch_mach_msg_context_async_reply_queue(dmr->dmr_ctxt); + drq = _dispatch_mach_msg_context_async_reply_queue(dm, dmr->dmr_ctxt); } dmsgr = _dispatch_mach_msg_create_reply_disconnected(NULL, dmr, drq ? DISPATCH_MACH_ASYNC_WAITER_DISCONNECTED @@ -367,9 +423,13 @@ _dispatch_mach_reply_kevent_register(dispatch_mach_t dm, mach_port_t reply_port, dispatch_queue_t drq = NULL; if (dm->dm_is_xpc && dmsg->do_ctxt) { - drq = _dispatch_mach_msg_context_async_reply_queue(dmsg->do_ctxt); + drq = _dispatch_mach_msg_context_async_reply_queue(dm, dmsg->do_ctxt); + } + if (dm->dm_strict_reply) { + dmr->du_fflags |= MACH_MSG_STRICT_REPLY; } - if (unlikely(!drq && _dispatch_unote_wlh(dm->dm_recv_refs))) { + if (unlikely((!drq || drq == dm->_as_dq) && + _dispatch_unote_wlh(dm->dm_recv_refs))) { wlh = _dispatch_unote_wlh(dm->dm_recv_refs); pri = dm->dq_priority; } else if (dx_hastypeflag(drq, QUEUE_ROOT)) { @@ -599,6 +659,18 @@ _dispatch_mach_msg_create_recv(mach_msg_header_t *hdr, mach_msg_size_t siz, return dmsg; } +DISPATCH_NOINLINE +static void +_dispatch_mach_no_senders_invoke(dispatch_mach_t dm) +{ + if (!(_dispatch_queue_atomic_flags(dm) & DSF_CANCELED)) { + dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs; + _dispatch_client_callout4(dmrr->dmrr_handler_ctxt, + DISPATCH_MACH_NO_SENDERS, NULL, 0, dmrr->dmrr_handler_func); + } + _dispatch_perfmon_workitem_inc(); +} + void _dispatch_mach_merge_msg(dispatch_unote_t du, uint32_t flags, mach_msg_header_t *hdr, mach_msg_size_t siz, @@ -622,6 +694,19 @@ _dispatch_mach_merge_msg(dispatch_unote_t du, uint32_t flags, if (flags & DISPATCH_EV_MSG_NEEDS_FREE) { free(hdr); } + } else if (hdr->msgh_id == MACH_NOTIFY_NO_SENDERS && dm->dm_arm_no_senders){ + if (dispatch_assume(_dispatch_mach_msg_sender_is_kernel(hdr))) { + dispatch_continuation_t dc = _dispatch_continuation_alloc(); + (void)_dispatch_continuation_init_f(dc, dm, dm, + (dispatch_function_t)_dispatch_mach_no_senders_invoke, + DISPATCH_BLOCK_HAS_PRIORITY | DISPATCH_BLOCK_NO_VOUCHER, + DC_FLAG_CONSUME); + _dispatch_continuation_async(dm, dc, 0, dc->dc_flags); + } + mach_msg_destroy(hdr); + if (flags & DISPATCH_EV_MSG_NEEDS_FREE) { + free(hdr); + } } else { // Once the mach channel disarming is visible, cancellation will switch // to immediately destroy messages. If we're preempted here, then the @@ -636,8 +721,13 @@ _dispatch_mach_merge_msg(dispatch_unote_t du, uint32_t flags, _dispatch_mach_handle_or_push_received_msg(dm, dmsg, ovr_pp); } - if (unlikely(_dispatch_unote_needs_delete(du))) { + // Note: it is ok to do a relaxed load of the dq_state_bits as we only care + // about bits that are in the top bits of the 64bit dq_state. + // This avoids expensive CAS on 32bit acrhictures. + if (unlikely(_dispatch_unote_needs_delete(du) || + _dq_state_is_activating((uint64_t)dm->dq_state_bits << 32))) { return dx_wakeup(dm, 0, DISPATCH_WAKEUP_EVENT | + DISPATCH_WAKEUP_CLEAR_ACTIVATING | DISPATCH_WAKEUP_CONSUME_2 | DISPATCH_WAKEUP_MAKE_DIRTY); } return _dispatch_release_2_tailcall(dm); @@ -664,7 +754,7 @@ _dispatch_mach_reply_merge_msg(dispatch_unote_t du, uint32_t flags, if (dmsg) { dispatch_queue_t drq = NULL; if (dm->dm_is_xpc && dmsg->do_ctxt) { - drq = _dispatch_mach_msg_context_async_reply_queue(dmsg->do_ctxt); + drq = _dispatch_mach_msg_context_async_reply_queue(dm, dmsg->do_ctxt); } if (drq) { _dispatch_mach_push_async_reply_msg(dm, dmsg, drq); @@ -691,11 +781,7 @@ DISPATCH_ALWAYS_INLINE static void _dispatch_mach_stack_probe(void *addr, size_t size) { -#if TARGET_OS_MAC && DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101400) && \ - (defined(__x86_64__) || defined(__arm64__)) - // there should be a __has_feature() macro test - // for this, for now we approximate it, for when the compiler - // is generating calls to ____chkstk_darwin on our behalf +#if __has_feature(stack_check) (void)addr; (void)size; #else for (mach_vm_address_t p = mach_vm_trunc_page(addr + vm_page_size); @@ -730,6 +816,9 @@ _dispatch_mach_msg_reply_recv(dispatch_mach_t dm, notify = send; options |= MACH_RCV_SYNC_WAIT; } + if (dm->dm_strict_reply) { + options |= MACH_MSG_STRICT_REPLY; + } retry: _dispatch_debug_machport(reply_port); @@ -944,7 +1033,7 @@ _dispatch_mach_msg_not_sent(dispatch_mach_t dm, dispatch_object_t dou, unsigned long reason = (msg_opts & DISPATCH_MACH_REGISTER_FOR_REPLY) ? 0 : DISPATCH_MACH_MESSAGE_NOT_SENT; if (dm->dm_is_xpc && dmsg->do_ctxt) { - drq = _dispatch_mach_msg_context_async_reply_queue(dmsg->do_ctxt); + drq = _dispatch_mach_msg_context_async_reply_queue(dm, dmsg->do_ctxt); } dmsgr = _dispatch_mach_msg_create_reply_disconnected(dmsg, dwr ? &dwr->dwr_refs : NULL, @@ -992,6 +1081,9 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, dsrr->dmsr_checkin, NULL, qos, DM_SEND_INVOKE_NONE))) { goto out; } + if (dm->dm_arm_no_senders) { + _dispatch_mach_arm_no_senders(dm, true); + } dsrr->dmsr_checkin = NULL; } } @@ -1010,7 +1102,7 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, msg->msgh_remote_port); dispatch_assert(_dispatch_unote_registered(dsrr)); } - if (dsrr->dmsr_notification_armed) { + if (os_atomic_load(&dsrr->dmsr_notification_armed, relaxed)) { goto out; } opts |= MACH_SEND_NOTIFY; @@ -1034,6 +1126,9 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, _dispatch_priority_compute_propagated( _dispatch_qos_to_pp(qos), 0); } + if (reply_port && dm->dm_strict_reply) { + opts |= MACH_MSG_STRICT_REPLY; + } } _dispatch_debug_machport(msg->msgh_remote_port); if (reply_port) _dispatch_debug_machport(reply_port); @@ -1101,7 +1196,7 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, if (unlikely(kr)) { // Send failed, so reply was never registered if (dm->dm_is_xpc && dmsg->do_ctxt) { - drq = _dispatch_mach_msg_context_async_reply_queue(dmsg->do_ctxt); + drq = _dispatch_mach_msg_context_async_reply_queue(dm, dmsg->do_ctxt); } dmsgr = _dispatch_mach_msg_create_reply_disconnected(dmsg, dwr ? &dwr->dwr_refs : NULL, @@ -1682,7 +1777,7 @@ _dispatch_mach_send_msg_prepare(dispatch_mach_t dm, if (dm->dm_is_xpc && (options & DISPATCH_MACH_WAIT_FOR_REPLY) == 0 && _dispatch_mach_msg_get_reply_port(dmsg)) { dispatch_assert( - _dispatch_mach_msg_context_async_reply_queue(dmsg->do_ctxt)); + _dispatch_mach_msg_context_async_reply_queue(dm, dmsg->do_ctxt)); } #else (void)dm; @@ -2087,6 +2182,202 @@ _dispatch_mach_connect_invoke(dispatch_mach_t dm) _dispatch_perfmon_workitem_inc(); } +typedef struct dispatch_ipc_handoff_s { + struct dispatch_continuation_s dih_dc; + uint64_t _Atomic dih_wlh; + int32_t dih_refcnt; +} dispatch_ipc_handoff_s, *dispatch_ipc_handoff_t; + +typedef struct _dispatch_ipc_handoff_context_s { + dispatch_thread_context_s dihc_dtc; + dispatch_queue_t dihc_dq; + dispatch_qos_t dihc_qos; +} _dispatch_ipc_handoff_context_s, *_dispatch_ipc_handoff_ctxt_t; + +static char const * const +_dispatch_mach_msg_context_key = "mach_msg"; + +static _dispatch_ipc_handoff_ctxt_t +_dispatch_mach_handoff_context(mach_port_t port) +{ + dispatch_thread_context_t dtc; + _dispatch_ipc_handoff_ctxt_t dihc = NULL; + dispatch_ipc_handoff_t dih; + + dtc = _dispatch_thread_context_find(_dispatch_mach_msg_context_key); + if (dtc && dtc->dtc_dmsg) { + /* + * We need one refcount per async() done, + * and one for the whole chain. + */ + dihc = (_dispatch_ipc_handoff_ctxt_t)dtc; + if (dx_type(dtc->dtc_dmsg) == DISPATCH_MACH_MSG_TYPE) { + dtc->dtc_dih = _dispatch_calloc(1, sizeof(dispatch_ipc_handoff_s)); + dih = dtc->dtc_dih; + os_atomic_store(&dih->dih_refcnt, 1, relaxed); + } else { + dih = dtc->dtc_dih; + os_atomic_inc(&dih->dih_refcnt, relaxed); + } + if (dih->dih_dc.dc_other) { + DISPATCH_CLIENT_CRASH(0, "Calling dispatch_mach_handoff_reply " + "multiple times from the same context"); + } + } else { + DISPATCH_CLIENT_CRASH(0, "Trying to handoff IPC from non IPC context"); + } + + dih->dih_dc.dc_other = (void *)(uintptr_t)port; + return dihc; +} + +static void +_dispatch_ipc_handoff_release(dispatch_ipc_handoff_t dih) +{ + if (os_atomic_dec_orig(&dih->dih_refcnt, relaxed) == 0) { + free(dih); + } +} + +static void +_dispatch_mach_handoff_set_wlh(dispatch_ipc_handoff_t dih, dispatch_queue_t dq) +{ + while (likely(dq->do_targetq)) { + if (unlikely(_dispatch_queue_is_mutable(dq))) { + _dispatch_queue_sidelock_lock(upcast(dq)._dl); + _dispatch_queue_atomic_flags_clear(dq, DQF_MUTABLE); + _dispatch_queue_sidelock_unlock(upcast(dq)._dl); + } + if (_dq_state_is_base_wlh(dq->dq_state)) { + os_atomic_store(&dih->dih_wlh, (uint64_t)dq, relaxed); + return; + } + dq = dq->do_targetq; + } + + /* unsupported hierarchy */ + os_atomic_store(&dih->dih_wlh, 0, relaxed); +} + +void +dispatch_mach_handoff_reply_f(dispatch_queue_t dq, + mach_port_t port, void *ctxt, dispatch_function_t func) +{ + _dispatch_ipc_handoff_ctxt_t dihc = _dispatch_mach_handoff_context(port); + dispatch_ipc_handoff_t dih = dihc->dihc_dtc.dtc_dih; + dispatch_continuation_t dc = &dih->dih_dc; + uintptr_t dc_flags = DC_FLAG_CONSUME; + + _dispatch_mach_handoff_set_wlh(dih, dq); + _dispatch_retain(dq); + dihc->dihc_dq = dq; + dihc->dihc_qos = _dispatch_continuation_init_f(dc, dq, ctxt, func, + 0, dc_flags); + dc->do_vtable = DC_VTABLE(MACH_IPC_HANDOFF); +} + +void +dispatch_mach_handoff_reply(dispatch_queue_t dq, + mach_port_t port, dispatch_block_t block) +{ + _dispatch_ipc_handoff_ctxt_t dihc = _dispatch_mach_handoff_context(port); + dispatch_ipc_handoff_t dih = dihc->dihc_dtc.dtc_dih; + dispatch_continuation_t dc = &dih->dih_dc; + uintptr_t dc_flags = DC_FLAG_CONSUME; + + _dispatch_mach_handoff_set_wlh(dih, dq); + _dispatch_retain(dq); + dihc->dihc_dq = dq; + dihc->dihc_qos = _dispatch_continuation_init(dc, dq, block, 0, dc_flags); + dc->dc_data = (void *)dc->dc_flags; + dc->do_vtable = DC_VTABLE(MACH_IPC_HANDOFF); +} + +static void +_dispatch_mach_ipc_handoff_async(_dispatch_ipc_handoff_ctxt_t dihc) +{ + dispatch_ipc_handoff_t dih = dihc->dihc_dtc.dtc_dih; + dispatch_continuation_t dc = &dih->dih_dc; + mach_port_t port = (mach_port_t)(uintptr_t)dc->dc_other; + uint64_t wlh = os_atomic_load(&dih->dih_wlh, relaxed); + + _dispatch_continuation_async(dihc->dihc_dq, dc, dihc->dihc_qos, + (uintptr_t)dc->dc_data); + + if (wlh) { + _dispatch_sync_ipc_handoff_begin((dispatch_wlh_t)wlh, port, + &dih->dih_wlh); + os_atomic_cmpxchg(&dih->dih_wlh, wlh, ~wlh, relaxed); + } + + _dispatch_ipc_handoff_release(dih); + _dispatch_release_tailcall(dihc->dihc_dq); +} + +void +_dispatch_mach_ipc_handoff_invoke(dispatch_continuation_t dc, + dispatch_invoke_context_t dic DISPATCH_UNUSED, + dispatch_invoke_flags_t flags) +{ + dispatch_ipc_handoff_t dih = (dispatch_ipc_handoff_t)dc; + _dispatch_ipc_handoff_context_s dihc = { .dihc_dtc = { + .dtc_key = _dispatch_mach_msg_context_key, + .dtc_dih = dih, + } }; + + dispatch_queue_t cq = _dispatch_queue_get_current(); + uintptr_t dc_flags = (uintptr_t)dc->dc_data; + mach_port_t port = (mach_port_t)(uintptr_t)dc->dc_other; + uint64_t wlh = os_atomic_xchg(&dih->dih_wlh, 0, relaxed); + + if (wlh == 0) { + /* not supported */ + } else if (wlh & 1) { + /* _dispatch_mach_ipc_handoff_async finished its work */ + wlh = ~wlh; + } else { + /* + * Because this code may race with _dispatch_mach_ipc_handoff_async, + * Make sure that we have the push. + * + * Then mark the handoff as done, as the client callout below + * may consume the send once, and _dispatch_mach_ipc_handoff_async + * may be about an invalid port now. + */ + _dispatch_sync_ipc_handoff_begin((dispatch_wlh_t)wlh, port, + &dih->dih_wlh); + } + + dc->do_next = DISPATCH_OBJECT_LISTLESS; + dc->dc_other = NULL; + + _dispatch_thread_context_push(&dihc.dihc_dtc); + + // DC_FLAG_CONSUME has been set, as we want the block and vouchers + // to be consumed, however the continuation is not from the continuation + // cache and its lifetime is managed explicitly by the handoff mechanism. + DISPATCH_COMPILER_CAN_ASSUME(dc_flags & DC_FLAG_CONSUME); + _dispatch_continuation_pop_forwarded_no_free(dc, dc_flags, cq, { + dispatch_invoke_with_autoreleasepool(flags, { + _dispatch_client_callout(dc->dc_ctxt, dc->dc_func); + }); + }); + + _dispatch_thread_context_pop(&dihc.dihc_dtc); + + if (dihc.dihc_dq) { + /* a new handoff was started */ + _dispatch_mach_ipc_handoff_async(&dihc); + } else { + /* this was the last handoff in the chain, consume the last ref */ + _dispatch_ipc_handoff_release(dih); + } + + if (wlh) { + _dispatch_sync_ipc_handoff_end((dispatch_wlh_t)wlh, port); + } +} + DISPATCH_ALWAYS_INLINE static void _dispatch_mach_msg_invoke_with_mach(dispatch_mach_msg_t dmsg, @@ -2097,7 +2388,12 @@ _dispatch_mach_msg_invoke_with_mach(dispatch_mach_msg_t dmsg, unsigned long reason = _dispatch_mach_msg_get_reason(dmsg, &err); dispatch_thread_set_self_t adopt_flags = DISPATCH_PRIORITY_ENFORCE| DISPATCH_VOUCHER_CONSUME|DISPATCH_VOUCHER_REPLACE; + _dispatch_ipc_handoff_context_s dihc = { .dihc_dtc = { + .dtc_key = _dispatch_mach_msg_context_key, + .dtc_dmsg = dmsg, + } }; + _dispatch_thread_context_push(&dihc.dihc_dtc); _dispatch_trace_item_pop(dm, dmsg); dmrr = dm->dm_recv_refs; @@ -2126,6 +2422,10 @@ _dispatch_mach_msg_invoke_with_mach(dispatch_mach_msg_t dmsg, // This makes XPC unhappy because some of these messages are // port-destroyed notifications that can cause it to try to // reconnect on a channel that is almost fully canceled + mach_msg_header_t *hdr = _dispatch_mach_msg_get_msg(dmsg); + _dispatch_debug("machport[0x%08x]: drop msg id 0x%x, reply on 0x%08x", + hdr->msgh_local_port, hdr->msgh_id, hdr->msgh_remote_port); + mach_msg_destroy(hdr); } else { _dispatch_client_callout4(dmrr->dmrr_handler_ctxt, reason, dmsg, err, dmrr->dmrr_handler_func); @@ -2135,6 +2435,11 @@ _dispatch_mach_msg_invoke_with_mach(dispatch_mach_msg_t dmsg, }); _dispatch_trace_item_complete(dmsg); dispatch_release(dmsg); + _dispatch_thread_context_pop(&dihc.dihc_dtc); + + if (dihc.dihc_dq) { + _dispatch_mach_ipc_handoff_async(&dihc); + } } DISPATCH_NOINLINE @@ -2171,11 +2476,14 @@ _dispatch_mach_barrier_invoke(dispatch_continuation_t dc, } dmrr = dm->dm_recv_refs; DISPATCH_COMPILER_CAN_ASSUME(dc_flags & DC_FLAG_CONSUME); + if (unlikely(!dm->dm_connect_handler_called)) { + dispatch_invoke_with_autoreleasepool(flags, { + // do not coalesce with the block below due to continuation reuse + _dispatch_mach_connect_invoke(dm); + }); + } _dispatch_continuation_pop_forwarded(dc, dc_flags, dm, { dispatch_invoke_with_autoreleasepool(flags, { - if (unlikely(!dm->dm_connect_handler_called)) { - _dispatch_mach_connect_invoke(dm); - } _dispatch_client_callout(dc->dc_ctxt, dc->dc_func); _dispatch_client_callout4(dmrr->dmrr_handler_ctxt, DISPATCH_MACH_BARRIER_COMPLETED, NULL, 0, @@ -2203,7 +2511,7 @@ dispatch_mach_send_barrier_f(dispatch_mach_t dm, void *context, dispatch_function_t func) { dispatch_continuation_t dc = _dispatch_continuation_alloc(); - uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_MACH_BARRIER; + uintptr_t dc_flags = DC_FLAG_CONSUME; dispatch_qos_t qos; _dispatch_continuation_init_f(dc, dm, context, func, 0, dc_flags); @@ -2218,7 +2526,7 @@ void dispatch_mach_send_barrier(dispatch_mach_t dm, dispatch_block_t barrier) { dispatch_continuation_t dc = _dispatch_continuation_alloc(); - uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_MACH_BARRIER; + uintptr_t dc_flags = DC_FLAG_CONSUME; dispatch_qos_t qos; _dispatch_continuation_init(dc, dm, barrier, 0, dc_flags); @@ -2234,7 +2542,7 @@ dispatch_mach_receive_barrier_f(dispatch_mach_t dm, void *context, dispatch_function_t func) { dispatch_continuation_t dc = _dispatch_continuation_alloc(); - uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_MACH_BARRIER; + uintptr_t dc_flags = DC_FLAG_CONSUME; dispatch_qos_t qos; qos = _dispatch_continuation_init_f(dc, dm, context, func, 0, dc_flags); @@ -2247,7 +2555,7 @@ void dispatch_mach_receive_barrier(dispatch_mach_t dm, dispatch_block_t barrier) { dispatch_continuation_t dc = _dispatch_continuation_alloc(); - uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_MACH_BARRIER; + uintptr_t dc_flags = DC_FLAG_CONSUME; dispatch_qos_t qos; qos = _dispatch_continuation_init(dc, dm, barrier, 0, dc_flags); @@ -2300,9 +2608,18 @@ _dispatch_mach_install(dispatch_mach_t dm, dispatch_wlh_t wlh, dispatch_assert(!dm->ds_is_installed); dm->ds_is_installed = true; - if (!cancelled && dmrr->du_ident) { - (void)_dispatch_unote_register(dmrr, wlh, pri); - dispatch_assert(dmrr->du_is_direct); + uint32_t disconnect_cnt = os_atomic_load2o(dm->dm_send_refs, + dmsr_disconnect_cnt, relaxed); + if (unlikely(disconnect_cnt & DISPATCH_MACH_NEVER_CONNECTED)) { + DISPATCH_CLIENT_CRASH(disconnect_cnt, "Channel never connected"); + } + + if (!dm->dq_priority) { + // _dispatch_mach_reply_kevent_register assumes this has been done + // which is unlike regular sources or queues, the FALLBACK flag + // is used so that the priority of the channel doesn't act as + // a QoS floor for incoming messages (26761457) + dm->dq_priority = pri; } if (!cancelled && dm->dm_is_xpc && @@ -2314,32 +2631,28 @@ _dispatch_mach_install(dispatch_mach_t dm, dispatch_wlh_t wlh, dm->dm_xpc_term_refs = _dxtr; _dispatch_unote_register(dm->dm_xpc_term_refs, wlh, pri); } - if (!dm->dq_priority) { - // _dispatch_mach_reply_kevent_register assumes this has been done - // which is unlike regular sources or queues, the FALLBACK flag - // is used so that the priority of the channel doesn't act as - // a QoS floor for incoming messages (26761457) - dm->dq_priority = pri; - } - uint32_t disconnect_cnt = os_atomic_load2o(dm->dm_send_refs, - dmsr_disconnect_cnt, relaxed); - if (unlikely(disconnect_cnt & DISPATCH_MACH_NEVER_CONNECTED)) { - DISPATCH_CLIENT_CRASH(disconnect_cnt, "Channel never connected"); + if (!cancelled && dmrr->du_ident) { + dispatch_assert(dmrr->du_is_direct); + // rdar://45419440 this absolutely needs to be done last + // as this can cause an event to be delivered + // and to finish the activation concurrently + (void)_dispatch_unote_register(dmrr, wlh, pri); } } void -_dispatch_mach_activate(dispatch_mach_t dm, bool *allow_resume) +_dispatch_mach_activate(dispatch_mach_t dm) { dispatch_priority_t pri; dispatch_wlh_t wlh; // call "super" - _dispatch_lane_activate(dm, allow_resume); + _dispatch_lane_activate(dm); if (!dm->ds_is_installed) { pri = _dispatch_queue_compute_priority_and_wlh(dm, &wlh); + // rdar://45419440 this needs to be last if (pri) _dispatch_mach_install(dm, wlh, pri); } } @@ -2434,7 +2747,8 @@ _dispatch_mach_invoke2(dispatch_mach_t dm, } if (dmsr->dmsr_tail) { - if (!dmsr->dmsr_notification_armed || dmsr->dmsr_disconnect_cnt) { + if (!os_atomic_load(&dmsr->dmsr_notification_armed, relaxed) || + dmsr->dmsr_disconnect_cnt) { bool requires_mgr = dmsr->dmsr_disconnect_cnt ? _dispatch_unote_registered(dmsr) : dm->dm_needs_mgr; // The channel has pending messages to send. @@ -2521,7 +2835,8 @@ _dispatch_mach_wakeup(dispatch_mach_t dm, dispatch_qos_t qos, goto done; } - if (!dmsr->dmsr_notification_armed || dmsr->dmsr_disconnect_cnt) { + if (!os_atomic_load(&dmsr->dmsr_notification_armed, relaxed) || + dmsr->dmsr_disconnect_cnt) { bool requires_mgr = dmsr->dmsr_disconnect_cnt ? _dispatch_unote_registered(dmsr) : dm->dm_needs_mgr; if (unlikely(requires_mgr)) { @@ -2693,9 +3008,15 @@ _dispatch_mach_msg_debug(dispatch_mach_msg_t dmsg, char* buf, size_t bufsiz) DISPATCH_ALWAYS_INLINE static dispatch_queue_t -_dispatch_mach_msg_context_async_reply_queue(void *msg_context) +_dispatch_mach_msg_context_async_reply_queue(dispatch_mach_t dm, + void *msg_context) { - return _dispatch_mach_xpc_hooks->dmxh_msg_context_reply_queue(msg_context); + dispatch_queue_t dq; + dq = _dispatch_mach_xpc_hooks->dmxh_msg_context_reply_queue(msg_context); + if (dq == DMXH_MSG_CONTEXT_REPLY_QUEUE_SELF) { + dq = dm->_as_dq; + } + return dq; } static dispatch_continuation_t @@ -2734,6 +3055,37 @@ _dispatch_mach_msg_async_reply_invoke(dispatch_continuation_t dc, #pragma mark - #pragma mark dispatch_mig_server +static inline kern_return_t +_dispatch_mig_return_code(mig_reply_error_t *msg) +{ + if (msg->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) { + return KERN_SUCCESS; + } + return msg->RetCode; +} + +static inline void +_dispatch_mig_consume_unsent_message(mach_msg_header_t *hdr) +{ + mach_port_t port = hdr->msgh_local_port; + if (MACH_PORT_VALID(port)) { + kern_return_t kr = KERN_SUCCESS; + switch (MACH_MSGH_BITS_LOCAL(hdr->msgh_bits)) { + case MACH_MSG_TYPE_MOVE_SEND: + case MACH_MSG_TYPE_MOVE_SEND_ONCE: + kr = mach_port_deallocate(mach_task_self(), port); + break; + case MACH_MSG_TYPE_MOVE_RECEIVE: + kr = mach_port_mod_refs(mach_task_self(), port, + MACH_PORT_RIGHT_RECEIVE, -1); + break; + } + DISPATCH_VERIFY_MIG(kr); + dispatch_assume_zero(kr); + } + mach_msg_destroy(hdr); +} + mach_msg_return_t dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, dispatch_mig_callback_t callback) @@ -2743,7 +3095,7 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, | MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0) | MACH_RCV_VOUCHER; mach_msg_options_t tmp_options; mig_reply_error_t *bufTemp, *bufRequest, *bufReply; - mach_msg_return_t kr = 0; + mach_msg_return_t kr = 0, skr; uint64_t assertion_token = 0; uint32_t cnt = 1000; // do not stall out serial queues boolean_t demux_success; @@ -2782,15 +3134,13 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, switch (kr) { case MACH_SEND_INVALID_DEST: case MACH_SEND_TIMED_OUT: - if (bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) { - mach_msg_destroy(&bufReply->Head); - } + _dispatch_mig_consume_unsent_message(&bufReply->Head); break; case MACH_RCV_TIMED_OUT: // Don't return an error if a message was sent this time or // a message was successfully received previously // rdar://problems/7363620&7791738 - if(bufReply->Head.msgh_remote_port || received) { + if (bufReply->Head.msgh_remote_port || received) { kr = MACH_MSG_SUCCESS; } break; @@ -2800,7 +3150,7 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, case MACH_RCV_TOO_LARGE: // receive messages that are too large and log their id and size // rdar://problem/8422992 - tmp_options &= ~MACH_RCV_LARGE; + tmp_options &= ~(MACH_RCV_LARGE | MACH_SEND_MSG); size_t large_size = bufReply->Head.msgh_size + MAX_TRAILER_SIZE; void *large_buf = malloc(large_size); if (large_buf) { @@ -2815,9 +3165,7 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, "requested size %zd: id = 0x%x, size = %d", maxmsgsz, bufReply->Head.msgh_id, bufReply->Head.msgh_size); - if (bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) { - mach_msg_destroy(&bufReply->Head); - } + mach_msg_destroy(&bufReply->Head); } if (large_buf) { free(large_buf); @@ -2864,21 +3212,21 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, demux_success = callback(&bufRequest->Head, &bufReply->Head); if (!demux_success) { + skr = MIG_BAD_ID; + } else { + skr = _dispatch_mig_return_code(bufReply); + } + switch (skr) { + case KERN_SUCCESS: + break; + case MIG_NO_REPLY: + bufReply->Head.msgh_remote_port = MACH_PORT_NULL; + break; + default: // destroy the request - but not the reply port + // (MIG moved it into the bufReply). bufRequest->Head.msgh_remote_port = 0; mach_msg_destroy(&bufRequest->Head); - } else if (!(bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX)) { - // if MACH_MSGH_BITS_COMPLEX is _not_ set, then bufReply->RetCode - // is present - if (unlikely(bufReply->RetCode)) { - if (bufReply->RetCode == MIG_NO_REPLY) { - continue; - } - - // destroy the request - but not the reply port - bufRequest->Head.msgh_remote_port = 0; - mach_msg_destroy(&bufRequest->Head); - } } if (bufReply->Head.msgh_remote_port) { @@ -2955,15 +3303,18 @@ dispatch_mach_mig_demux(void *context, desc->stub_routine(hdr, &bufReply->Head); - // if MACH_MSGH_BITS_COMPLEX is _not_ set, then bufReply->RetCode is present - if (unlikely(!(bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) && - bufReply->RetCode)) { + switch (_dispatch_mig_return_code(bufReply)) { + case KERN_SUCCESS: + break; + case MIG_NO_REPLY: + bufReply->Head.msgh_remote_port = MACH_PORT_NULL; + break; + default: // destroy the request - but not the reply port + // (MIG moved it into the bufReply). hdr->msgh_remote_port = 0; - if (bufReply->RetCode != MIG_NO_REPLY && - (hdr->msgh_bits & MACH_MSGH_BITS_COMPLEX)) { - mach_msg_destroy(hdr); - } + mach_msg_destroy(hdr); + break; } if (bufReply->Head.msgh_remote_port) { @@ -2979,9 +3330,7 @@ dispatch_mach_mig_demux(void *context, break; case MACH_SEND_INVALID_DEST: case MACH_SEND_TIMED_OUT: - if (bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) { - mach_msg_destroy(&bufReply->Head); - } + _dispatch_mig_consume_unsent_message(&bufReply->Head); break; default: DISPATCH_VERIFY_MIG(kr); @@ -3009,6 +3358,7 @@ dispatch_mach_mig_demux_get_context(void) #pragma mark - #pragma mark dispatch_mach_debug +DISPATCH_COLD static size_t _dispatch_mach_debug_attr(dispatch_mach_t dm, char *buf, size_t bufsiz) { @@ -3022,7 +3372,7 @@ _dispatch_mach_debug_attr(dispatch_mach_t dm, char *buf, size_t bufsiz) target && target->dq_label ? target->dq_label : "", target, (mach_port_t)dmrr->du_ident, dmsr->dmsr_send, (mach_port_t)dmsr->du_ident, - dmsr->dmsr_notification_armed ? " (armed)" : "", + os_atomic_load(&dmsr->dmsr_notification_armed, relaxed) ? " (armed)" : "", dmsr->dmsr_checkin_port, dmsr->dmsr_checkin ? " (pending)" : "", dmsr->dmsr_state, dmsr->dmsr_disconnect_cnt, (bool)(dm->dq_atomic_flags & DSF_CANCELED)); diff --git a/src/mach_internal.h b/src/mach_internal.h index b1e959c89..9f1840eac 100644 --- a/src/mach_internal.h +++ b/src/mach_internal.h @@ -94,14 +94,17 @@ _dispatch_mach_xref_dispose(struct dispatch_mach_s *dm) extern dispatch_mach_xpc_hooks_t _dispatch_mach_xpc_hooks; extern const struct dispatch_mach_xpc_hooks_s _dispatch_mach_xpc_hooks_default; +void _dispatch_mach_ipc_handoff_invoke(dispatch_continuation_t dc, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); void _dispatch_mach_msg_async_reply_invoke(dispatch_continuation_t dc, dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); void _dispatch_mach_dispose(dispatch_mach_t dm, bool *allow_free); -void _dispatch_mach_activate(dispatch_mach_t dm, bool *allow_resume); +void _dispatch_mach_activate(dispatch_mach_t dm); void _dispatch_mach_invoke(dispatch_mach_t dm, dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); void _dispatch_mach_wakeup(dispatch_mach_t dm, dispatch_qos_t qos, dispatch_wakeup_flags_t flags); +DISPATCH_COLD size_t _dispatch_mach_debug(dispatch_mach_t dm, char* buf, size_t bufsiz); void _dispatch_mach_notification_merge_evt(dispatch_unote_t du, uint32_t flags, uintptr_t data, pthread_priority_t pp); @@ -117,6 +120,7 @@ void _dispatch_xpc_sigterm_merge_evt(dispatch_unote_t du, uint32_t flags, void _dispatch_mach_msg_dispose(dispatch_mach_msg_t dmsg, bool *allow_free); void _dispatch_mach_msg_invoke(dispatch_mach_msg_t dmsg, dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); +DISPATCH_COLD size_t _dispatch_mach_msg_debug(dispatch_mach_msg_t dmsg, char* buf, size_t bufsiz); diff --git a/src/object.c b/src/object.c index 261e1996d..4eb49fda8 100644 --- a/src/object.c +++ b/src/object.c @@ -86,18 +86,37 @@ _os_object_retain_with_resurrect(_os_object_t obj) return obj; } -DISPATCH_NOINLINE -void -_os_object_release(_os_object_t obj) +DISPATCH_ALWAYS_INLINE +static inline bool +_os_object_release_inline(_os_object_t obj) { int xref_cnt = _os_object_xrefcnt_dec(obj); if (likely(xref_cnt >= 0)) { - return; + return false; } if (unlikely(xref_cnt < -1)) { _OS_OBJECT_CLIENT_CRASH("Over-release of an object"); } - return _os_object_xref_dispose(obj); + return true; +} + + +DISPATCH_NOINLINE +void +_os_object_release(_os_object_t obj) +{ + if (_os_object_release_inline(obj)) { + return _os_object_xref_dispose(obj); + } +} + +DISPATCH_NOINLINE +void +_os_object_release_without_xref_dispose(_os_object_t obj) +{ + if (_os_object_release_inline(obj)) { + return _os_object_release_internal(obj); + } } bool @@ -183,32 +202,37 @@ void dispatch_release(dispatch_object_t dou) { DISPATCH_OBJECT_TFB(_dispatch_objc_release, dou); - _os_object_release(dou._os_obj); + if (_os_object_release_inline(dou._os_obj)) { + // bypass -_xref_dispose to avoid the dynamic dispatch + _os_object_xrefcnt_dispose_barrier(dou._os_obj); + _dispatch_xref_dispose(dou); + } } -#if !USE_OBJC void _dispatch_xref_dispose(dispatch_object_t dou) { if (dx_cluster(dou._do) == _DISPATCH_QUEUE_CLUSTER) { _dispatch_queue_xref_dispose(dou._dq); - } - switch (dx_type(dou._do)) { - case DISPATCH_SOURCE_KEVENT_TYPE: - _dispatch_source_xref_dispose(dou._ds); - break; + switch (dx_type(dou._do)) { + case DISPATCH_SOURCE_KEVENT_TYPE: + _dispatch_source_xref_dispose(dou._ds); + break; + case DISPATCH_CHANNEL_TYPE: + _dispatch_channel_xref_dispose(dou._dch); + break; #if HAVE_MACH - case DISPATCH_MACH_CHANNEL_TYPE: - _dispatch_mach_xref_dispose(dou._dm); - break; + case DISPATCH_MACH_CHANNEL_TYPE: + _dispatch_mach_xref_dispose(dou._dm); + break; #endif - case DISPATCH_QUEUE_RUNLOOP_TYPE: - _dispatch_runloop_queue_xref_dispose(dou._dl); - break; + case DISPATCH_QUEUE_RUNLOOP_TYPE: + _dispatch_runloop_queue_xref_dispose(dou._dl); + break; + } } return _dispatch_release_tailcall(dou._os_obj); } -#endif void _dispatch_dispose(dispatch_object_t dou) @@ -303,7 +327,7 @@ dispatch_activate(dispatch_object_t dou) return _dispatch_workloop_activate(dou._dwl); } if (dx_cluster(dou._do) == _DISPATCH_QUEUE_CLUSTER) { - return _dispatch_lane_resume(dou._dl, true); + return _dispatch_lane_resume(dou._dl, DISPATCH_ACTIVATE); } } @@ -329,7 +353,7 @@ dispatch_resume(dispatch_object_t dou) return; } if (dx_cluster(dou._do) == _DISPATCH_QUEUE_CLUSTER) { - _dispatch_lane_resume(dou._dl, false); + _dispatch_lane_resume(dou._dl, DISPATCH_RESUME); } } diff --git a/src/object.m b/src/object.m index 925fccc43..936795871 100644 --- a/src/object.m +++ b/src/object.m @@ -124,7 +124,7 @@ { struct _os_object_s *o = (struct _os_object_s *)obj; _os_object_refcnt_dispose_barrier(o); - [obj _dispose]; + _os_object_dealloc(obj); } #undef os_retain @@ -170,7 +170,7 @@ -(id)retain { } -(oneway void)release { - return _os_object_release(self); + return _os_object_release_without_xref_dispose(self); } -(NSUInteger)retainCount { @@ -194,10 +194,6 @@ - (void)_xref_dispose { return _os_object_release_internal(self); } -- (void)_dispose { - return _os_object_dealloc(self); -} - @end #pragma mark - @@ -281,16 +277,9 @@ - (void)_dispose { #pragma mark - #pragma mark _dispatch_object -// Force non-lazy class realization rdar://10640168 -#define DISPATCH_OBJC_LOAD() + (void)load {} - @implementation DISPATCH_CLASS(object) DISPATCH_UNAVAILABLE_INIT() -- (void)_dispose { - return _dispatch_dispose(self); // calls _os_object_dealloc() -} - - (NSString *)debugDescription { Class nsstring = objc_lookUpClass("NSString"); if (!nsstring) return nil; @@ -306,16 +295,20 @@ - (NSString *)debugDescription { return [nsstring stringWithFormat:format, object_getClassName(self), buf]; } -- (void)dealloc DISPATCH_NORETURN { - DISPATCH_INTERNAL_CRASH(0, "Calling dealloc on a dispatch object"); - [super dealloc]; // make clang happy +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wobjc-missing-super-calls" +- (void)dealloc { + return _dispatch_dispose(self); } +#pragma clang diagnostic pop @end +OS_OBJECT_NONLAZY_CLASS @implementation DISPATCH_CLASS(queue) -DISPATCH_OBJC_LOAD() +OS_OBJECT_NONLAZY_CLASS_LOAD DISPATCH_UNAVAILABLE_INIT() +DISPATCH_OBJECT_USES_XREF_DISPOSE() - (NSString *)description { Class nsstring = objc_lookUpClass("NSString"); @@ -333,9 +326,25 @@ - (void)_xref_dispose { @end +OS_OBJECT_NONLAZY_CLASS +@implementation DISPATCH_CLASS(channel) +OS_OBJECT_NONLAZY_CLASS_LOAD +DISPATCH_UNAVAILABLE_INIT() +DISPATCH_OBJECT_USES_XREF_DISPOSE() + +- (void)_xref_dispose { + _dispatch_queue_xref_dispose((struct dispatch_queue_s *)self); + _dispatch_channel_xref_dispose(self); + [super _xref_dispose]; +} + +@end + +OS_OBJECT_NONLAZY_CLASS @implementation DISPATCH_CLASS(source) -DISPATCH_OBJC_LOAD() +OS_OBJECT_NONLAZY_CLASS_LOAD DISPATCH_UNAVAILABLE_INIT() +DISPATCH_OBJECT_USES_XREF_DISPOSE() - (void)_xref_dispose { _dispatch_queue_xref_dispose((struct dispatch_queue_s *)self); @@ -345,9 +354,11 @@ - (void)_xref_dispose { @end +OS_OBJECT_NONLAZY_CLASS @implementation DISPATCH_CLASS(mach) -DISPATCH_OBJC_LOAD() +OS_OBJECT_NONLAZY_CLASS_LOAD DISPATCH_UNAVAILABLE_INIT() +DISPATCH_OBJECT_USES_XREF_DISPOSE() - (void)_xref_dispose { _dispatch_queue_xref_dispose((struct dispatch_queue_s *)self); @@ -357,9 +368,11 @@ - (void)_xref_dispose { @end +OS_OBJECT_NONLAZY_CLASS @implementation DISPATCH_CLASS(queue_runloop) -DISPATCH_OBJC_LOAD() +OS_OBJECT_NONLAZY_CLASS_LOAD DISPATCH_UNAVAILABLE_INIT() +DISPATCH_OBJECT_USES_XREF_DISPOSE() - (void)_xref_dispose { _dispatch_queue_xref_dispose((struct dispatch_queue_s *)self); @@ -370,8 +383,9 @@ - (void)_xref_dispose { @end #define DISPATCH_CLASS_IMPL(name) \ + OS_OBJECT_NONLAZY_CLASS \ @implementation DISPATCH_CLASS(name) \ - DISPATCH_OBJC_LOAD() \ + OS_OBJECT_NONLAZY_CLASS_LOAD \ DISPATCH_UNAVAILABLE_INIT() \ @end @@ -395,9 +409,10 @@ - (void)_xref_dispose { DISPATCH_CLASS_IMPL(operation) DISPATCH_CLASS_IMPL(disk) +OS_OBJECT_NONLAZY_CLASS @implementation OS_OBJECT_CLASS(voucher) +OS_OBJECT_NONLAZY_CLASS_LOAD DISPATCH_UNAVAILABLE_INIT() -DISPATCH_OBJC_LOAD() -(id)retain { return (id)_voucher_retain_inline((struct voucher_s *)self); @@ -407,12 +422,9 @@ -(oneway void)release { return _voucher_release_inline((struct voucher_s *)self); } -- (void)_xref_dispose { - return _voucher_xref_dispose(self); // calls _os_object_release_internal() -} - -- (void)_dispose { - return _voucher_dispose(self); // calls _os_object_dealloc() +- (void)dealloc { + _voucher_dispose(self); + [super dealloc]; } - (NSString *)debugDescription { @@ -428,13 +440,10 @@ - (NSString *)debugDescription { @end #if VOUCHER_ENABLE_RECIPE_OBJECTS +OS_OBJECT_NONLAZY_CLASS @implementation OS_OBJECT_CLASS(voucher_recipe) +OS_OBJECT_NONLAZY_CLASS_LOAD DISPATCH_UNAVAILABLE_INIT() -DISPATCH_OBJC_LOAD() - -- (void)_dispose { - -} - (NSString *)debugDescription { return nil; // TODO: voucher_recipe debugDescription @@ -443,7 +452,6 @@ - (NSString *)debugDescription { @end #endif - #pragma mark - #pragma mark dispatch_last_resort_autorelease_pool diff --git a/src/object_internal.h b/src/object_internal.h index b1f75602a..d2126b760 100644 --- a/src/object_internal.h +++ b/src/object_internal.h @@ -50,7 +50,7 @@ #if USE_OBJC #define DISPATCH_OBJC_CLASS_DECL(name) \ extern void *DISPATCH_CLASS_SYMBOL(name) \ - asm(DISPATCH_CLASS_RAW_SYMBOL_NAME(name)) + __asm__(DISPATCH_CLASS_RAW_SYMBOL_NAME(name)) #endif // define a new proper class @@ -65,7 +65,7 @@ }; \ OS_OBJECT_EXTRA_VTABLE_DECL(name, name) \ extern const struct name##_vtable_s OS_OBJECT_CLASS_SYMBOL(name) \ - asm(OS_OBJC_CLASS_RAW_SYMBOL_NAME(OS_OBJECT_CLASS(name))) + __asm__(OS_OBJC_CLASS_RAW_SYMBOL_NAME(OS_OBJECT_CLASS(name))) #if OS_OBJECT_SWIFT3 #define OS_OBJECT_INTERNAL_CLASS_DECL(name, super, ...) \ @@ -105,7 +105,7 @@ struct name##_s; \ OS_OBJECT_EXTRA_VTABLE_DECL(name, ctype) \ extern const struct ctype##_vtable_s OS_OBJECT_CLASS_SYMBOL(name) \ - asm(OS_OBJC_CLASS_RAW_SYMBOL_NAME(OS_OBJECT_CLASS(name))) + __asm__(OS_OBJC_CLASS_RAW_SYMBOL_NAME(OS_OBJECT_CLASS(name))) #if OS_OBJECT_SWIFT3 // define a new internal subclass used in a class cluster @@ -199,7 +199,7 @@ #define DISPATCH_QUEUE_VTABLE_HEADER(x); \ DISPATCH_OBJECT_VTABLE_HEADER(x); \ - void (*const dq_activate)(dispatch_queue_class_t, bool *allow_resume); \ + void (*const dq_activate)(dispatch_queue_class_t); \ void (*const dq_wakeup)(dispatch_queue_class_t, dispatch_qos_t, \ dispatch_wakeup_flags_t); \ void (*const dq_push)(dispatch_queue_class_t, dispatch_object_t, \ @@ -240,7 +240,7 @@ #define DISPATCH_OBJECT_LISTLESS ((void *)0x89abcdef) #endif -DISPATCH_ENUM(dispatch_wakeup_flags, uint32_t, +DISPATCH_OPTIONS(dispatch_wakeup_flags, uint32_t, // The caller of dx_wakeup owns two internal refcounts on the object being // woken up. Two are needed for WLH wakeups where two threads need // the object to remain valid in a non-coordinated way @@ -262,6 +262,9 @@ DISPATCH_ENUM(dispatch_wakeup_flags, uint32_t, // This wakeup may cause the source to leave its DSF_NEEDS_EVENT state DISPATCH_WAKEUP_EVENT = 0x00000010, + + // This wakeup is allowed to clear the ACTIVATING state of the object + DISPATCH_WAKEUP_CLEAR_ACTIVATING = 0x00000020, ); typedef struct dispatch_invoke_context_s { @@ -288,7 +291,7 @@ typedef struct dispatch_invoke_context_s { #define dispatch_with_disabled_narrowing(dic, ...) __VA_ARGS__ #endif -DISPATCH_ENUM(dispatch_invoke_flags, uint32_t, +DISPATCH_OPTIONS(dispatch_invoke_flags, uint32_t, DISPATCH_INVOKE_NONE = 0x00000000, // Invoke modes @@ -359,7 +362,7 @@ DISPATCH_ENUM(dispatch_invoke_flags, uint32_t, #define _DISPATCH_INVOKE_AUTORELEASE_MASK 0x03000000u ); -enum { +DISPATCH_OPTIONS(dispatch_object_flags, unsigned long, _DISPATCH_META_TYPE_MASK = 0x000000ff, // mask for object meta-types _DISPATCH_TYPE_CLUSTER_MASK = 0x000000f0, // mask for the cluster type _DISPATCH_SUB_TYPE_MASK = 0x0000ff00, // mask for object sub-types @@ -419,8 +422,9 @@ enum { _DISPATCH_QUEUE_BASE_TYPEFLAG, DISPATCH_SOURCE_KEVENT_TYPE = DISPATCH_OBJECT_SUBTYPE(1, SOURCE), - DISPATCH_MACH_CHANNEL_TYPE = DISPATCH_OBJECT_SUBTYPE(2, SOURCE), -}; + DISPATCH_CHANNEL_TYPE = DISPATCH_OBJECT_SUBTYPE(2, SOURCE), + DISPATCH_MACH_CHANNEL_TYPE = DISPATCH_OBJECT_SUBTYPE(3, SOURCE), +); typedef struct _os_object_vtable_s { _OS_OBJECT_CLASS_HEADER(); @@ -467,6 +471,9 @@ typedef struct _os_object_s { return [super init]; \ } +#define DISPATCH_OBJECT_USES_XREF_DISPOSE() \ + OS_OBJECT_USES_XREF_DISPOSE() + _OS_OBJECT_DECL_PROTOCOL(dispatch_object, object); DISPATCH_CLASS_DECL_BARE(object, OBJECT); @@ -474,14 +481,13 @@ struct dispatch_object_s { _DISPATCH_OBJECT_HEADER(object); }; +DISPATCH_COLD size_t _dispatch_object_debug_attr(dispatch_object_t dou, char* buf, size_t bufsiz); void *_dispatch_object_alloc(const void *vtable, size_t size); void _dispatch_object_finalize(dispatch_object_t dou); void _dispatch_object_dealloc(dispatch_object_t dou); -#if !USE_OBJC void _dispatch_xref_dispose(dispatch_object_t dou); -#endif void _dispatch_dispose(dispatch_object_t dou); #if DISPATCH_COCOA_COMPAT #if USE_OBJC @@ -547,6 +553,7 @@ void _dispatch_objc_set_target_queue(dispatch_object_t dou, void _dispatch_objc_suspend(dispatch_object_t dou); void _dispatch_objc_resume(dispatch_object_t dou); void _dispatch_objc_activate(dispatch_object_t dou); +DISPATCH_COLD size_t _dispatch_objc_debug(dispatch_object_t dou, char* buf, size_t bufsiz); #if __OBJC2__ @@ -581,7 +588,7 @@ size_t _dispatch_objc_debug(dispatch_object_t dou, char* buf, size_t bufsiz); * reached -1. */ #define _os_atomic_refcnt_perform2o(o, f, op, n, m) ({ \ - typeof(o) _o = (o); \ + __typeof__(o) _o = (o); \ int _ref_cnt = _o->f; \ if (likely(_ref_cnt != _OS_OBJECT_GLOBAL_REFCNT)) { \ _ref_cnt = os_atomic_##op##2o(_o, f, n, m); \ diff --git a/src/protocol.defs b/src/protocol.defs index 7a9cf1898..6129f3f1a 100644 --- a/src/protocol.defs +++ b/src/protocol.defs @@ -20,6 +20,7 @@ #include #include +import ; // '64' is used to align with Mach notifications and so that we don't fight // with the notify symbols in Libsystem @@ -28,6 +29,8 @@ subsystem libdispatch_internal_protocol 64; serverprefix _dispatch_; userprefix _dispatch_send_; +ConsumeOnSendError Timeout; + skip; /* was MACH_NOTIFY_FIRST: 64 */ /* MACH_NOTIFY_PORT_DELETED: 65 */ diff --git a/src/queue.c b/src/queue.c index 59048c33f..63f565890 100644 --- a/src/queue.c +++ b/src/queue.c @@ -123,7 +123,7 @@ void _dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pp, mach_voucher_t kv) { - _pthread_set_flags_t pflags = 0; + _pthread_set_flags_t pflags = (_pthread_set_flags_t)0; if (pp && _dispatch_set_qos_class_enabled) { pthread_priority_t old_pri = _dispatch_get_priority(); if (pp != old_pri) { @@ -227,10 +227,14 @@ const struct dispatch_continuation_vtable_s _dispatch_continuation_vtables[] = { DC_VTABLE_ENTRY(OVERRIDE_OWNING, .do_invoke = _dispatch_queue_override_invoke), #endif +#if HAVE_MACH + DC_VTABLE_ENTRY(MACH_IPC_HANDOFF, + .do_invoke = _dispatch_mach_ipc_handoff_invoke), +#endif }; DISPATCH_NOINLINE -static void +static void DISPATCH_TSD_DTOR_CC _dispatch_cache_cleanup(void *value) { dispatch_continuation_t dc, next_dc = value; @@ -1098,6 +1102,20 @@ _dispatch_waiter_wake(dispatch_sync_context_t dsc, dispatch_wlh_t wlh, { dispatch_wlh_t waiter_wlh = dsc->dc_data; +#if DISPATCH_USE_KEVENT_WORKLOOP + // + // We need to interact with a workloop if any of the following 3 cases: + // 1. the current owner of the lock has a SYNC_WAIT knote to destroy + // 2. the next owner of the lock is a workloop, we need to make sure it has + // a SYNC_WAIT knote to destroy when it will later release the lock + // 3. the waiter is waiting on a workloop (which may be different from `wlh` + // if the hierarchy was mutated after the next owner started waiting) + // + // However, note that even when (2) is true, the next owner may be waiting + // without pushing (waiter_wlh == DISPATCH_WLH_ANON), in which case the next + // owner is really woken up when the thread event is signaled. + // +#endif if (_dq_state_in_sync_transfer(old_state) || _dq_state_in_sync_transfer(new_state) || (waiter_wlh != DISPATCH_WLH_ANON)) { @@ -1124,7 +1142,7 @@ _dispatch_async_waiter_update(dispatch_sync_context_t dsc, if (dsc->dsc_autorelease == 0) { dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dqu); - dqf &= _DQF_AUTORELEASE_MASK; + dqf &= (dispatch_queue_flags_t)_DQF_AUTORELEASE_MASK; dsc->dsc_autorelease = (uint8_t)(dqf / DQF_AUTORELEASE_ALWAYS); } } @@ -1347,6 +1365,32 @@ _dispatch_lane_class_barrier_complete(dispatch_lane_t dq, dispatch_qos_t qos, _dispatch_trace_runtime_event(sync_async_handoff, dq, 0); } +#if DISPATCH_USE_KEVENT_WORKLOOP + if (_dq_state_is_base_wlh(old_state)) { + // - Only non-"du_is_direct" sources & mach channels can be enqueued + // on the manager. + // + // - Only dispatch_source_cancel_and_wait() and + // dispatch_source_set_*_handler() use the barrier complete codepath, + // none of which are used by mach channels. + // + // Hence no source-ish object can both be a workloop and need to use the + // manager at the same time. + dispatch_assert(!_dq_state_is_enqueued_on_manager(new_state)); + if (_dq_state_is_enqueued_on_target(old_state) || + _dq_state_is_enqueued_on_target(new_state) || + _dq_state_received_sync_wait(old_state) || + _dq_state_in_sync_transfer(old_state)) { + return _dispatch_event_loop_end_ownership((dispatch_wlh_t)dq, + old_state, new_state, flags); + } + _dispatch_event_loop_assert_not_owned((dispatch_wlh_t)dq); + if (flags & DISPATCH_WAKEUP_CONSUME_2) { + return _dispatch_release_2_tailcall(dq); + } + return; + } +#endif if (_dq_state_received_override(old_state)) { // Ensure that the root queue sees that this thread was overridden. @@ -2332,8 +2376,27 @@ dispatch_queue_set_label_nocopy(dispatch_queue_t dq, const char *label) static inline bool _dispatch_base_lane_is_wlh(dispatch_lane_t dq, dispatch_queue_t tq) { +#if DISPATCH_USE_KEVENT_WORKLOOP + if (unlikely(!_dispatch_kevent_workqueue_enabled)) { + return false; + } + if (dx_type(dq) == DISPATCH_QUEUE_NETWORK_EVENT_TYPE) { + return true; + } + if (dx_metatype(dq) == _DISPATCH_SOURCE_TYPE) { + // Sources don't support sync waiters, so the ones that never change QoS + // don't benefit from any of the workloop features which have overhead, + // so just use the workqueue kqueue for these. + if (likely(!upcast(dq)._ds->ds_refs->du_can_be_wlh)) { + return false; + } + dispatch_assert(upcast(dq)._ds->ds_refs->du_is_direct); + } + return dq->dq_width == 1 && _dispatch_is_in_root_queues_array(tq); +#else (void)dq; (void)tq; return false; +#endif // DISPATCH_USE_KEVENT_WORKLOOP } static void @@ -2400,10 +2463,8 @@ _dispatch_queue_compute_priority_and_wlh(dispatch_queue_t dq, return DISPATCH_PRIORITY_FLAG_MANAGER; } if (unlikely(_dispatch_queue_is_thread_bound(tq))) { - // thread-bound hierarchies are weird, we need to install - // from the context of the thread this hierarchy is bound to - if (wlh_out) *wlh_out = NULL; - return 0; + if (wlh_out) *wlh_out = DISPATCH_WLH_ANON; + return tq->dq_priority; } if (unlikely(DISPATCH_QUEUE_IS_SUSPENDED(tq))) { // this queue may not be activated yet, so the queue graph may not @@ -2467,19 +2528,6 @@ _dispatch_queue_compute_priority_and_wlh(dispatch_queue_t dq, return DISPATCH_PRIORITY_FLAG_MANAGER; } -DISPATCH_ALWAYS_INLINE -static void -_dispatch_queue_setter_assert_inactive(dispatch_queue_class_t dq) -{ - uint64_t dq_state = os_atomic_load2o(dq._dq, dq_state, relaxed); - if (likely(dq_state & DISPATCH_QUEUE_INACTIVE)) return; -#ifndef __LP64__ - dq_state >>= 32; -#endif - DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, - "dispatch queue/source property setter called after activation"); -} - DISPATCH_ALWAYS_INLINE static void _dispatch_workloop_attributes_alloc_if_needed(dispatch_workloop_t dwl) @@ -2774,9 +2822,17 @@ void _dispatch_lane_class_dispose(dispatch_lane_class_t dqu, bool *allow_free) { dispatch_lane_t dq = dqu._dl; - uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); - uint64_t initial_state = DISPATCH_QUEUE_STATE_INIT_VALUE(dq->dq_width); + if (unlikely(dq->dq_items_tail)) { + DISPATCH_CLIENT_CRASH(dq->dq_items_tail, + "Release of a queue while items are enqueued"); + } + dq->dq_items_head = (void *)0x200; + dq->dq_items_tail = (void *)0x200; + uint64_t orig_dq_state, dq_state; + dq_state = orig_dq_state = os_atomic_load2o(dq, dq_state, relaxed); + + uint64_t initial_state = DISPATCH_QUEUE_STATE_INIT_VALUE(dq->dq_width); if (dx_hastypeflag(dq, QUEUE_ROOT)) { initial_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE; } @@ -2785,23 +2841,15 @@ _dispatch_lane_class_dispose(dispatch_lane_class_t dqu, bool *allow_free) dq_state &= ~DISPATCH_QUEUE_ROLE_MASK; if (unlikely(dq_state != initial_state)) { if (_dq_state_drain_locked(dq_state)) { - DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, + DISPATCH_CLIENT_CRASH((uintptr_t)orig_dq_state, "Release of a locked queue"); } #ifndef __LP64__ - dq_state >>= 32; + orig_dq_state >>= 32; #endif - DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, + DISPATCH_CLIENT_CRASH((uintptr_t)orig_dq_state, "Release of a queue with corrupt state"); } - - if (unlikely(dq->dq_items_tail)) { - DISPATCH_CLIENT_CRASH(dq->dq_items_tail, - "Release of a queue while items are enqueued"); - } - dq->dq_items_head = (void *)0x200; - dq->dq_items_tail = (void *)0x200; - _dispatch_queue_dispose(dqu, allow_free); } @@ -2820,7 +2868,7 @@ _dispatch_queue_xref_dispose(dispatch_queue_t dq) if (unlikely(_dq_state_is_suspended(dq_state))) { long state = (long)dq_state; if (sizeof(long) < sizeof(uint64_t)) state = (long)(dq_state >> 32); - if (unlikely(_dq_state_is_inactive(dq_state))) { + if (unlikely(dq_state & DISPATCH_QUEUE_INACTIVE_BITS_MASK)) { // Arguments for and against this assert are within 6705399 DISPATCH_CLIENT_CRASH(state, "Release of an inactive object"); } @@ -2921,26 +2969,23 @@ _dispatch_lane_resume_slow(dispatch_lane_t dq) retry: _dispatch_queue_sidelock_unlock(dq); - return _dispatch_lane_resume(dq, false); + return _dispatch_lane_resume(dq, DISPATCH_RESUME); } DISPATCH_NOINLINE static void _dispatch_lane_resume_activate(dispatch_lane_t dq) { - bool allow_resume = true; - // Step 2: run the activation finalizer if (dx_vtable(dq)->dq_activate) { - dx_vtable(dq)->dq_activate(dq, &allow_resume); - } - // Step 3: consume the suspend count - if (allow_resume) { - return _dispatch_lane_resume(dq, false); + dx_vtable(dq)->dq_activate(dq); } + + _dispatch_lane_resume(dq, DISPATCH_ACTIVATION_DONE); } +DISPATCH_NOINLINE void -_dispatch_lane_resume(dispatch_lane_t dq, bool activate) +_dispatch_lane_resume(dispatch_lane_t dq, dispatch_resume_op_t op) { // covers all suspend and inactive bits, including side suspend bit const uint64_t suspend_bits = DISPATCH_QUEUE_SUSPEND_BITS_MASK; @@ -2955,57 +3000,86 @@ _dispatch_lane_resume(dispatch_lane_t dq, bool activate) bool is_source = (dx_metatype(dq) == _DISPATCH_SOURCE_TYPE); uint64_t old_state, new_state; + // // Activation is a bit tricky as it needs to finalize before the wakeup. // - // If after doing its updates to the suspend count and/or inactive bit, - // the last suspension related bit that would remain is the - // NEEDS_ACTIVATION one, then this function: + // The inactive bits have 4 states: + // - 11: INACTIVE + // - 10: ACTIVATED, but not activating yet + // - 01: ACTIVATING right now + // - 00: fully active // - // 1. moves the state to { sc:1 i:0 na:0 } (converts the needs-activate into - // a suspend count) - // 2. runs the activation finalizer - // 3. consumes the suspend count set in (1), and finishes the resume flow + // ACTIVATED is only used when the queue is otherwise also suspended. + // In that case the last resume will take over the activation. // - // Concurrently, some property setters such as setting dispatch source - // handlers or _dispatch_lane_set_target_queue try to do in-place changes - // before activation. These protect their action by taking a suspend count. - // Step (1) above cannot happen if such a setter has locked the object. - if (activate) { + // The ACTIVATING state is tricky because it may be cleared by sources + // firing, to avoid priority inversions problems such as rdar://45419440 + // where as soon as the kevent is installed, the source may fire + // before its activating state was cleared. + // + if (op == DISPATCH_ACTIVATE) { // relaxed atomic because this doesn't publish anything, this is only // about picking the thread that gets to finalize the activation os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { - if ((old_state & suspend_bits) == - DISPATCH_QUEUE_NEEDS_ACTIVATION + DISPATCH_QUEUE_INACTIVE) { - // { sc:0 i:1 na:1 } -> { sc:1 i:0 na:0 } - new_state = old_state - DISPATCH_QUEUE_INACTIVE - - DISPATCH_QUEUE_NEEDS_ACTIVATION - + DISPATCH_QUEUE_SUSPEND_INTERVAL; - } else if (_dq_state_is_inactive(old_state)) { - // { sc:>0 i:1 na:1 } -> { i:0 na:1 } - // simple activation because sc is not 0 - // resume will deal with na:1 later - new_state = old_state - DISPATCH_QUEUE_INACTIVE; - } else { - // object already active, this is a no-op, just exit + if (!_dq_state_is_inactive(old_state)) { + // object already active or activated os_atomic_rmw_loop_give_up(return); } + if (unlikely(_dq_state_suspend_cnt(old_state))) { + // { sc != 0, i = INACTIVE } -> i = ACTIVATED + new_state = old_state - DISPATCH_QUEUE_INACTIVE + + DISPATCH_QUEUE_ACTIVATED; + } else { + // { sc = 0, i = INACTIVE } -> i = ACTIVATING + new_state = old_state - DISPATCH_QUEUE_INACTIVE + + DISPATCH_QUEUE_ACTIVATING; + } }); + } else if (op == DISPATCH_ACTIVATION_DONE) { + // release barrier needed to publish the effect of dq_activate() + os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + if (unlikely(!(old_state & DISPATCH_QUEUE_INACTIVE_BITS_MASK))) { + os_atomic_rmw_loop_give_up({ + // object activation was already concurrently done + // due to a concurrent DISPATCH_WAKEUP_CLEAR_ACTIVATING + // wakeup call. + // + // We still need to consume the internal refcounts because + // the wakeup doesn't take care of these. + return _dispatch_release_2_tailcall(dq); + }); + } + + new_state = old_state - DISPATCH_QUEUE_ACTIVATING; + if (!_dq_state_is_runnable(new_state)) { + // Out of width or still suspended. + // For the former, force _dispatch_lane_non_barrier_complete + // to reconsider whether it has work to do + new_state |= DISPATCH_QUEUE_DIRTY; + } else if (_dq_state_drain_locked(new_state)) { + // still locked by someone else, make drain_try_unlock() fail + // and reconsider whether it has work to do + new_state |= DISPATCH_QUEUE_DIRTY; + } else { + // clear overrides and force a wakeup + new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; + new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; + } + }); + if (unlikely(new_state & DISPATCH_QUEUE_INACTIVE_BITS_MASK)) { + DISPATCH_CLIENT_CRASH(dq, "Corrupt activation state"); + } } else { // release barrier needed to publish the effect of // - dispatch_set_target_queue() // - dispatch_set_*_handler() - // - dq_activate() os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { - if ((old_state & suspend_bits) == DISPATCH_QUEUE_SUSPEND_INTERVAL - + DISPATCH_QUEUE_NEEDS_ACTIVATION) { - // { sc:1 i:0 na:1 } -> { sc:1 i:0 na:0 } - new_state = old_state - DISPATCH_QUEUE_NEEDS_ACTIVATION; - } else if (is_source && (old_state & suspend_bits) == - DISPATCH_QUEUE_NEEDS_ACTIVATION + DISPATCH_QUEUE_INACTIVE) { - // { sc:0 i:1 na:1 } -> { sc:1 i:0 na:0 } - new_state = old_state - DISPATCH_QUEUE_INACTIVE - - DISPATCH_QUEUE_NEEDS_ACTIVATION - + DISPATCH_QUEUE_SUSPEND_INTERVAL; + new_state = old_state; + if (is_source && (old_state & suspend_bits) == + DISPATCH_QUEUE_INACTIVE) { + // { sc = 0, i = INACTIVE } -> i = ACTIVATING + new_state -= DISPATCH_QUEUE_INACTIVE; + new_state += DISPATCH_QUEUE_ACTIVATING; } else if (unlikely(os_sub_overflow(old_state, DISPATCH_QUEUE_SUSPEND_INTERVAL, &new_state))) { // underflow means over-resume or a suspend count transfer @@ -3019,6 +3093,10 @@ _dispatch_lane_resume(dispatch_lane_t dq, bool activate) // // below this, new_state = old_state - DISPATCH_QUEUE_SUSPEND_INTERVAL // + } else if (_dq_state_is_activated(new_state)) { + // { sc = 1, i = ACTIVATED } -> i = ACTIVATING + new_state -= DISPATCH_QUEUE_ACTIVATED; + new_state += DISPATCH_QUEUE_ACTIVATING; } else if (!_dq_state_is_runnable(new_state)) { // Out of width or still suspended. // For the former, force _dispatch_lane_non_barrier_complete @@ -3046,20 +3124,10 @@ _dispatch_lane_resume(dispatch_lane_t dq, bool activate) }); } - if ((old_state ^ new_state) & DISPATCH_QUEUE_NEEDS_ACTIVATION) { - // we cleared the NEEDS_ACTIVATION bit and we have a valid suspend count + if (_dq_state_is_activating(new_state)) { return _dispatch_lane_resume_activate(dq); } - if (activate) { - // if we're still in an activate codepath here we should have - // { sc:>0 na:1 }, if not we've got a corrupt state - if (unlikely(!_dq_state_is_suspended(new_state))) { - DISPATCH_CLIENT_CRASH(dq, "Invalid suspension state"); - } - return; - } - if (_dq_state_is_suspended(new_state)) { return; } @@ -3196,6 +3264,11 @@ _dispatch_lane_legacy_set_target_queue(void *ctxt) // see _dispatch_queue_wakeup() _dispatch_queue_sidelock_lock(dq); #endif + if (unlikely(!_dispatch_queue_is_mutable(dq))) { + /* serialize with _dispatch_mach_handoff_set_wlh */ + DISPATCH_CLIENT_CRASH(0, "Cannot change the target of this object " + "after it has been activated"); + } dq->do_targetq = tq; #if HAVE_PTHREAD_WORKQUEUE_QOS // see _dispatch_queue_wakeup() @@ -3217,7 +3290,7 @@ _dispatch_lane_set_target_queue(dispatch_lane_t dq, dispatch_queue_t tq) if (_dispatch_lane_try_inactive_suspend(dq)) { _dispatch_object_set_target_queue_inline(dq, tq); - return _dispatch_lane_resume(dq, false); + return _dispatch_lane_resume(dq, DISPATCH_RESUME); } #if !DISPATCH_ALLOW_NON_LEAF_RETARGET @@ -3284,8 +3357,10 @@ _dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf, size_t bufsiz) } if (_dq_state_is_inactive(dq_state)) { offset += dsnprintf(&buf[offset], bufsiz - offset, ", inactive"); - } else if (_dq_state_needs_activation(dq_state)) { - offset += dsnprintf(&buf[offset], bufsiz - offset, ", needs-activation"); + } else if (_dq_state_is_activated(dq_state)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", activated"); + } else if (_dq_state_is_activating(dq_state)) { + offset += dsnprintf(&buf[offset], bufsiz - offset, ", activating"); } if (_dq_state_is_enqueued(dq_state)) { offset += dsnprintf(&buf[offset], bufsiz - offset, ", enqueued"); @@ -3338,7 +3413,7 @@ static struct { uint64_t volatile time_total; uint64_t volatile count_total; uint64_t volatile thread_total; -} _dispatch_stats[DISPATCH_PERF_MON_BUCKETS]; +} _dispatch_stats[DISPATCH_PERF_MON_BUCKETS] DISPATCH_ATOMIC64_ALIGN; DISPATCH_USED static size_t _dispatch_stat_buckets = DISPATCH_PERF_MON_BUCKETS; void @@ -3526,15 +3601,15 @@ _dispatch_lane_drain(dispatch_lane_t dq, dispatch_invoke_context_t dic, for (;;) { dispatch_assert(dic->dic_barrier_waiter == NULL); dc = next_dc; - if (unlikely(_dispatch_needs_to_return_to_kernel())) { - _dispatch_return_to_kernel(); - } if (unlikely(!dc)) { if (!dq->dq_items_tail) { break; } dc = _dispatch_queue_get_head(dq); } + if (unlikely(_dispatch_needs_to_return_to_kernel())) { + _dispatch_return_to_kernel(); + } if (unlikely(serial_drain != (dq->dq_width == 1))) { break; } @@ -3706,8 +3781,7 @@ _dispatch_queue_invoke_finish(dispatch_queue_t dq, } void -_dispatch_lane_activate(dispatch_lane_class_t dq, - DISPATCH_UNUSED bool *allow_resume) +_dispatch_lane_activate(dispatch_lane_class_t dq) { dispatch_queue_t tq = dq._dl->do_targetq; dispatch_priority_t pri = dq._dl->dq_priority; @@ -3949,6 +4023,19 @@ dispatch_workloop_set_cpupercent(dispatch_workloop_t dwl, uint8_t percent, dwl->dwl_attr->dwla_flags |= DISPATCH_WORKLOOP_ATTR_HAS_CPUPERCENT; } +#if DISPATCH_IOHID_SPI +void +_dispatch_workloop_set_observer_hooks_4IOHID(dispatch_workloop_t dwl, + dispatch_pthread_root_queue_observer_hooks_t observer_hooks) +{ + _dispatch_queue_setter_assert_inactive(dwl); + _dispatch_workloop_attributes_alloc_if_needed(dwl); + + dwl->dwl_attr->dwla_observers = *observer_hooks; + dwl->dwl_attr->dwla_flags |= DISPATCH_WORKLOOP_ATTR_HAS_OBSERVERS; +} +#endif + static void _dispatch_workloop_activate_simulator_fallback(dispatch_workloop_t dwl, pthread_attr_t *attr) @@ -3975,7 +4062,7 @@ static const struct dispatch_queue_global_s _dispatch_custom_workloop_root_queue .do_ctxt = NULL, .dq_label = "com.apple.root.workloop-custom", .dq_atomic_flags = DQF_WIDTH(DISPATCH_QUEUE_WIDTH_POOL), - .dq_priority = DISPATCH_PRIORITY_FLAG_MANAGER | + .dq_priority = _dispatch_priority_make_fallback(DISPATCH_QOS_DEFAULT) | DISPATCH_PRIORITY_SATURATED_OVERRIDE, .dq_serialnum = DISPATCH_QUEUE_SERIAL_NUMBER_WLF, .dgq_thread_pool_size = 1, @@ -4076,10 +4163,13 @@ _dispatch_workloop_dispose(dispatch_workloop_t dwl, bool *allow_free) void _dispatch_workloop_activate(dispatch_workloop_t dwl) { - uint64_t dq_state = os_atomic_and_orig2o(dwl, dq_state, - ~DISPATCH_QUEUE_INACTIVE, relaxed); + // This transitions either: + // - from INACTIVE to ACTIVATING + // - or from ACTIVE to ACTIVE + uint64_t old_state = os_atomic_and_orig2o(dwl, dq_state, + ~DISPATCH_QUEUE_ACTIVATED, relaxed); - if (likely(dq_state & DISPATCH_QUEUE_INACTIVE)) { + if (likely(_dq_state_is_inactive(old_state))) { if (dwl->dwl_attr) { // Activation of a workloop with attributes forces us to create // the workloop up front and register the attributes with the @@ -4091,10 +4181,8 @@ _dispatch_workloop_activate(dispatch_workloop_t dwl) _dispatch_priority_make_fallback(DISPATCH_QOS_DEFAULT); } dwl->dq_priority |= DISPATCH_PRIORITY_FLAG_OVERCOMMIT; - os_atomic_and2o(dwl, dq_state, ~DISPATCH_QUEUE_NEEDS_ACTIVATION, - relaxed); - _dispatch_workloop_wakeup(dwl, 0, DISPATCH_WAKEUP_CONSUME_2); - return; + os_atomic_and2o(dwl, dq_state, ~DISPATCH_QUEUE_ACTIVATING, relaxed); + return _dispatch_workloop_wakeup(dwl, 0, DISPATCH_WAKEUP_CONSUME_2); } } @@ -4136,9 +4224,15 @@ _dispatch_workloop_invoke2(dispatch_workloop_t dwl, dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags, uint64_t *owned) { + dispatch_workloop_attr_t dwl_attr = dwl->dwl_attr; dispatch_thread_frame_s dtf; struct dispatch_object_s *dc = NULL, *next_dc; + if (dwl_attr && + (dwl_attr->dwla_flags & DISPATCH_WORKLOOP_ATTR_HAS_OBSERVERS)) { + _dispatch_set_pthread_root_queue_observer_hooks( + &dwl_attr->dwla_observers); + } _dispatch_thread_frame_push(&dtf, dwl); for (;;) { @@ -4175,10 +4269,12 @@ _dispatch_workloop_invoke2(dispatch_workloop_t dwl, *owned = (*owned & DISPATCH_QUEUE_ENQUEUED) + DISPATCH_QUEUE_IN_BARRIER + DISPATCH_QUEUE_WIDTH_INTERVAL; _dispatch_thread_frame_pop(&dtf); + _dispatch_set_pthread_root_queue_observer_hooks(NULL); return NULL; out_with_barrier_waiter: _dispatch_thread_frame_pop(&dtf); + _dispatch_set_pthread_root_queue_observer_hooks(NULL); return dwl->do_targetq; } @@ -4308,6 +4404,29 @@ _dispatch_workloop_barrier_complete(dispatch_workloop_t dwl, dispatch_qos_t qos, _dispatch_trace_runtime_event(sync_async_handoff, dwl, 0); } +#if DISPATCH_USE_KEVENT_WORKLOOP + if (_dq_state_is_base_wlh(old_state)) { + // - Only non-"du_is_direct" sources & mach channels can be enqueued + // on the manager. + // + // - Only dispatch_source_cancel_and_wait() and + // dispatch_source_set_*_handler() use the barrier complete codepath, + // none of which are used by mach channels. + // + // Hence no source-ish object can both be a workloop and need to use the + // manager at the same time. + dispatch_assert(!_dq_state_is_enqueued_on_manager(new_state)); + if (_dq_state_is_enqueued_on_target(old_state) || + _dq_state_is_enqueued_on_target(new_state) || + _dq_state_received_sync_wait(old_state) || + _dq_state_in_sync_transfer(old_state)) { + return _dispatch_event_loop_end_ownership((dispatch_wlh_t)dwl, + old_state, new_state, flags); + } + _dispatch_event_loop_assert_not_owned((dispatch_wlh_t)dwl); + goto done; + } +#endif if (_dq_state_received_override(old_state)) { // Ensure that the root queue sees that this thread was overridden. @@ -4331,6 +4450,9 @@ _dispatch_workloop_barrier_complete(dispatch_workloop_t dwl, dispatch_qos_t qos, #endif } +#if DISPATCH_USE_KEVENT_WORKLOOP +done: +#endif if (flags & DISPATCH_WAKEUP_CONSUME_2) { return _dispatch_release_2_tailcall(dwl); } @@ -4590,27 +4712,28 @@ _dispatch_queue_wakeup_with_override_slow(dispatch_queue_t dq, uint64_t dq_state, dispatch_wakeup_flags_t flags) { dispatch_qos_t oqos, qos = _dq_state_max_qos(dq_state); - dispatch_queue_t tq; + dispatch_queue_t tq = dq->do_targetq; + mach_port_t owner; bool locked; if (_dq_state_is_base_anon(dq_state)) { - mach_port_t owner = _dq_state_drain_owner(dq_state); - if (owner) { + if (!_dispatch_is_in_root_queues_array(tq)) { + // Do not try to override pthread root + // queues, it isn't supported and can cause things to run + // on the wrong hierarchy if we enqueue a stealer by accident + goto out; + } else if ((owner = _dq_state_drain_owner(dq_state))) { (void)_dispatch_wqthread_override_start_check_owner(owner, qos, - &dq->dq_state_lock); + &dq->dq_state_lock); goto out; } - } - - tq = dq->do_targetq; - if (likely(!_dispatch_queue_is_mutable(dq))) { - locked = false; - } else if (_dispatch_is_in_root_queues_array(tq)) { // avoid locking when we recognize the target queue as a global root // queue it is gross, but is a very common case. The locking isn't // needed because these target queues cannot go away. locked = false; + } else if (likely(!_dispatch_queue_is_mutable(dq))) { + locked = false; } else if (_dispatch_queue_sidelock_trylock(upcast(dq)._dl, qos)) { // to traverse the tq chain safely we must // lock it to ensure it cannot change @@ -4700,6 +4823,13 @@ _dispatch_queue_wakeup_with_override(dispatch_queue_class_t dq, { dispatch_assert(_dq_state_should_override(dq_state)); +#if DISPATCH_USE_KEVENT_WORKLOOP + if (likely(_dq_state_is_base_wlh(dq_state))) { + _dispatch_trace_runtime_event(worker_request, dq._dq, 1); + return _dispatch_event_loop_poke((dispatch_wlh_t)dq._dq, dq_state, + flags | DISPATCH_EVENT_LOOP_OVERRIDE); + } +#endif // DISPATCH_USE_KEVENT_WORKLOOP return _dispatch_queue_wakeup_with_override_slow(dq._dq, dq_state, flags); } #endif // HAVE_PTHREAD_WORKQUEUE_QOS @@ -4742,7 +4872,19 @@ _dispatch_queue_wakeup(dispatch_queue_class_t dqu, dispatch_qos_t qos, qos = _dispatch_queue_wakeup_qos(dq, qos); os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { new_state = _dq_state_merge_qos(old_state, qos); - if (likely(!_dq_state_is_suspended(old_state) && + if (flags & DISPATCH_WAKEUP_CLEAR_ACTIVATING) { + // When an event is being delivered to a source because its + // unote was being registered before the ACTIVATING state + // had a chance to be cleared, we don't want to fail the wakeup + // which could lead to a priority inversion. + // + // Instead, these wakeups are allowed to finish the pending + // activation. + if (_dq_state_is_activating(old_state)) { + new_state &= ~DISPATCH_QUEUE_ACTIVATING; + } + } + if (likely(!_dq_state_is_suspended(new_state) && !_dq_state_is_enqueued(old_state) && (!_dq_state_drain_locked(old_state) || (enqueue != DISPATCH_QUEUE_ENQUEUED_ON_MGR && @@ -4970,6 +5112,379 @@ _dispatch_lane_concurrent_push(dispatch_lane_t dq, dispatch_object_t dou, _dispatch_lane_push(dq, dou, qos); } +#pragma mark - +#pragma mark dispatch_channel_t + +void +_dispatch_channel_dispose(dispatch_channel_t dch, bool *allow_free) +{ + dch->dch_callbacks = NULL; + _dispatch_lane_class_dispose(dch, allow_free); +} + +void +_dispatch_channel_xref_dispose(dispatch_channel_t dch) +{ + dispatch_channel_callbacks_t callbacks = dch->dch_callbacks; + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dch->_as_dq); + if (callbacks->dcc_acknowledge_cancel && !(dqf & DSF_CANCELED)) { + DISPATCH_CLIENT_CRASH(dch, "Release of a channel that has not been " + "cancelled, but has a cancel acknowledgement callback"); + } + dx_wakeup(dch, 0, DISPATCH_WAKEUP_MAKE_DIRTY); +} + +typedef struct dispatch_channel_invoke_ctxt_s { + dispatch_channel_t dcic_dch; + dispatch_thread_frame_s dcic_dtf; + dispatch_invoke_context_t dcic_dic; + dispatch_invoke_flags_t dcic_flags; + dispatch_queue_wakeup_target_t dcic_tq; + struct dispatch_object_s *dcic_next_dc; + bool dcic_called_drain; +} dispatch_channel_invoke_ctxt_s; + +static bool +_dispatch_channel_invoke_cancel_check(dispatch_channel_t dch, + dispatch_channel_invoke_ctxt_t ctxt, + dispatch_channel_callbacks_t callbacks) +{ + bool rc = true; + if (!dch->dm_cancel_handler_called) { + if (_dispatch_queue_atomic_flags(dch) & DSF_CANCELED) { + dispatch_invoke_with_autoreleasepool(ctxt->dcic_flags, { + rc = callbacks->dcc_acknowledge_cancel(dch, dch->do_ctxt); + }); + if (rc) { + dch->dm_cancel_handler_called = true; + _dispatch_release_no_dispose(dch); + } else { + ctxt->dcic_tq = DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT; + } + } + } + return rc; +} + +static bool +_dispatch_channel_invoke_checks(dispatch_channel_t dch, + dispatch_channel_invoke_ctxt_t dcic, + dispatch_channel_callbacks_t callbacks) +{ + if (!_dispatch_channel_invoke_cancel_check(dch, dcic, callbacks)) { + return false; + } + if (unlikely(_dispatch_needs_to_return_to_kernel())) { + _dispatch_return_to_kernel(); + } + if (likely(dcic->dcic_flags & DISPATCH_INVOKE_WORKLOOP_DRAIN)) { + dispatch_workloop_t dwl = (dispatch_workloop_t)_dispatch_get_wlh(); + if (unlikely(_dispatch_queue_max_qos(dwl) > dwl->dwl_drained_qos)) { + dcic->dcic_tq = dch->do_targetq; + return false; + } + } + if (unlikely(_dispatch_queue_drain_should_narrow(dcic->dcic_dic))) { + dcic->dcic_tq = dch->do_targetq; + return false; + } + uint64_t dq_state = os_atomic_load(&dch->dq_state, relaxed); + if (unlikely(_dq_state_is_suspended(dq_state))) { + dcic->dcic_tq = dch->do_targetq; + return false; + } + return true; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_queue_wakeup_target_t +_dispatch_channel_invoke2(dispatch_channel_t dch, dispatch_invoke_context_t dic, + dispatch_invoke_flags_t flags, uint64_t *owned DISPATCH_UNUSED) +{ + dispatch_channel_callbacks_t callbacks = dch->dch_callbacks; + dispatch_channel_invoke_ctxt_s dcic = { + .dcic_dch = dch, + .dcic_dic = dic, + .dcic_flags = flags & + ~(dispatch_invoke_flags_t)DISPATCH_INVOKE_REDIRECTING_DRAIN, + .dcic_tq = DISPATCH_QUEUE_WAKEUP_NONE, + }; + + _dispatch_thread_frame_push(&dcic.dcic_dtf, dch); + + if (!_dispatch_channel_invoke_cancel_check(dch, &dcic, callbacks)) { + goto out; + } + + do { + struct dispatch_object_s *dc = dcic.dcic_next_dc; + + if (unlikely(!dc)) { + if (!dch->dq_items_tail) { + break; + } + dc = _dispatch_queue_get_head(dch); + } + + if (unlikely(_dispatch_object_is_sync_waiter(dc))) { + DISPATCH_CLIENT_CRASH(0, "sync waiter found on channel"); + } + + if (_dispatch_object_is_channel_item(dc)) { + dcic.dcic_next_dc = dc; + dcic.dcic_called_drain = false; + dispatch_invoke_with_autoreleasepool(dcic.dcic_flags, { + if (callbacks->dcc_invoke(dch, &dcic, dch->do_ctxt)) { + if (unlikely(!dcic.dcic_called_drain)) { + DISPATCH_CLIENT_CRASH(0, "Channel didn't call " + "dispatch_channel_drain"); + } + } else { + dcic.dcic_tq = DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT; + } + }); + } else { + dcic.dcic_next_dc = _dispatch_queue_pop_head(dch, dc); + _dispatch_continuation_pop_inline(dc, dic, flags, dch); + if (!_dispatch_channel_invoke_checks(dch, &dcic, callbacks)) { + break; + } + } + } while (dcic.dcic_tq == DISPATCH_QUEUE_WAKEUP_NONE); + +out: + _dispatch_thread_frame_pop(&dcic.dcic_dtf); + return dcic.dcic_tq; +} + +void +_dispatch_channel_invoke(dispatch_channel_t dch, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags) +{ + _dispatch_queue_class_invoke(dch, dic, flags, + DISPATCH_INVOKE_DISALLOW_SYNC_WAITERS, _dispatch_channel_invoke2); +} + +void +dispatch_channel_foreach_work_item_peek_f( + dispatch_channel_invoke_ctxt_t dcic, + void *ctxt, dispatch_channel_enumerator_handler_t f) +{ + if (dcic->dcic_called_drain) { + DISPATCH_CLIENT_CRASH(0, "Called peek after drain"); + } + + dispatch_channel_t dch = dcic->dcic_dch; + struct dispatch_object_s *dc = dcic->dcic_next_dc; + + for (;;) { + dispatch_continuation_t dci = (dispatch_continuation_t)dc; + if (!_dispatch_object_is_channel_item(dc)) { + break; + } + if (!f(ctxt, dci->dc_ctxt)) { + break; + } + if (dc == dch->dq_items_tail) { + break; + } + dc = os_mpsc_get_next(dc, do_next); + } +} + +void +dispatch_channel_drain_f(dispatch_channel_invoke_ctxt_t dcic, + void *_Nullable ctxt, dispatch_channel_drain_handler_t f) +{ + dispatch_channel_t dch = dcic->dcic_dch; + dispatch_channel_callbacks_t callbacks = dch->dch_callbacks; + struct dispatch_object_s *dc; + uintptr_t dcf = DC_FLAG_CONSUME | DC_FLAG_CHANNEL_ITEM; + void *unpop_item = NULL; + bool stop_invoke = false; + + if (dcic->dcic_called_drain) { + DISPATCH_CLIENT_CRASH(0, "Called drain twice in the same invoke"); + } + dcic->dcic_called_drain = true; + + do { + dc = dcic->dcic_next_dc; + if (unlikely(!dc)) { + if (!dch->dq_items_tail) { + break; + } + dc = _dispatch_queue_get_head(dch); + } + if (!_dispatch_object_is_channel_item(dc)) { + break; + } + + dcic->dcic_next_dc = _dispatch_queue_pop_head(dch, dc); + + _dispatch_continuation_pop_forwarded(upcast(dc)._dc, dcf, dch, { + dispatch_invoke_with_autoreleasepool(dcic->dcic_flags, { + stop_invoke = !f(ctxt, upcast(dc)._dc->dc_ctxt, &unpop_item); + }); + }); + if (unlikely(stop_invoke)) { + break; + } + } while (_dispatch_channel_invoke_checks(dch, dcic, callbacks)); + + if (unlikely(unpop_item)) { + dispatch_continuation_t dci = _dispatch_continuation_alloc(); + _dispatch_continuation_init_f(dci, dch, unpop_item, NULL, 0, dcf); + os_mpsc_undo_pop_head(os_mpsc(dch, dq_items), upcast(dci)._do, + dcic->dcic_next_dc, do_next); + dcic->dcic_next_dc = upcast(dci)._do; + } +} + +#ifdef __BLOCKS__ +void +dispatch_channel_foreach_work_item_peek( + dispatch_channel_invoke_ctxt_t dcic, + dispatch_channel_enumerator_block_t block) +{ + dispatch_channel_enumerator_handler_t f; + f = (dispatch_channel_enumerator_handler_t)_dispatch_Block_invoke(block); + dispatch_channel_foreach_work_item_peek_f(dcic, block, f); +} + +void +dispatch_channel_drain(dispatch_channel_invoke_ctxt_t dcic, + dispatch_channel_drain_block_t block) +{ + dispatch_channel_drain_handler_t f; + f = (dispatch_channel_drain_handler_t)_dispatch_Block_invoke(block); + dispatch_channel_drain_f(dcic, block, f); +} +#endif // __BLOCKS__ + +DISPATCH_NOINLINE +void +_dispatch_channel_wakeup(dispatch_channel_t dch, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags) +{ + dispatch_channel_callbacks_t callbacks = dch->dch_callbacks; + dispatch_queue_wakeup_target_t target = DISPATCH_QUEUE_WAKEUP_NONE; + dispatch_queue_t dq = dch->_as_dq; + + if (unlikely(!callbacks->dcc_probe(dch, dch->do_ctxt))) { + target = DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT; + } else if (_dispatch_queue_class_probe(dch)) { + target = DISPATCH_QUEUE_WAKEUP_TARGET; + } else if (_dispatch_queue_atomic_flags(dq) & DSF_CANCELED) { + if (!dch->dm_cancel_handler_called) { + target = DISPATCH_QUEUE_WAKEUP_TARGET; + } + } + + return _dispatch_queue_wakeup(dch, qos, flags, target); +} + +size_t +_dispatch_channel_debug(dispatch_channel_t dch, char *buf, size_t bufsiz) +{ + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(dch); + size_t offset = 0; + + offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", + _dispatch_object_class_name(dch), dch); + offset += _dispatch_object_debug_attr(dch, &buf[offset], bufsiz - offset); + offset += _dispatch_queue_debug_attr(dch->_as_dq, &buf[offset], bufsiz - offset); + offset += dsnprintf(buf, bufsiz, "%s%s%s", + (dqf & DSF_CANCELED) ? "cancelled, " : "", + (dqf & DSF_NEEDS_EVENT) ? "needs-event, " : "", + (dqf & DSF_DELETED) ? "deleted, " : ""); + + return offset; +} + +dispatch_channel_t +dispatch_channel_create(const char *label, dispatch_queue_t tq, + void *ctxt, dispatch_channel_callbacks_t callbacks) +{ + dispatch_channel_t dch; + dispatch_queue_flags_t dqf = DSF_STRICT; + + if (callbacks->dcc_version < 1) { + DISPATCH_CLIENT_CRASH(callbacks->dcc_version, + "Unsupported callbacks version"); + } + + if (label) { + const char *tmp = _dispatch_strdup_if_mutable(label); + if (tmp != label) { + dqf |= DQF_LABEL_NEEDS_FREE; + label = tmp; + } + } + + if (unlikely(!tq)) { + tq = _dispatch_get_default_queue(true); + } else { + _dispatch_retain((dispatch_queue_t _Nonnull)tq); + } + + dch = _dispatch_queue_alloc(channel, dqf, 1, + DISPATCH_QUEUE_INACTIVE | DISPATCH_QUEUE_ROLE_INNER)._dch; + dch->dq_label = label; + dch->do_targetq = tq; + dch->dch_callbacks = callbacks; + dch->do_ctxt = ctxt; + if (!callbacks->dcc_acknowledge_cancel) { + dch->dm_cancel_handler_called = true; + dch->do_ref_cnt--; + } + return dch; +} + +DISPATCH_NOINLINE +static void +_dispatch_channel_enqueue_slow(dispatch_channel_t dch, void *ctxt) +{ + uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_CHANNEL_ITEM; + dispatch_continuation_t dc = _dispatch_continuation_alloc_from_heap(); + dispatch_qos_t qos; + + qos = _dispatch_continuation_init_f(dc, dch, ctxt, NULL, 0, dc_flags); + _dispatch_continuation_async(dch, dc, qos, dc->dc_flags); +} + +DISPATCH_NOINLINE +void +dispatch_channel_enqueue(dispatch_channel_t dch, void *ctxt) +{ + uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_CHANNEL_ITEM; + dispatch_continuation_t dc = _dispatch_continuation_alloc_cacheonly(); + dispatch_qos_t qos; + + if (unlikely(!dc)) { + return _dispatch_channel_enqueue_slow(dch, ctxt); + } + qos = _dispatch_continuation_init_f(dc, dch, ctxt, NULL, 0, dc_flags); + _dispatch_continuation_async(dch, dc, qos, dc->dc_flags); +} + +#ifndef __APPLE__ +#if __BLOCKS__ +void typeof(dispatch_channel_async) dispatch_channel_async + __attribute__((__alias__("dispatch_async"))); +#endif + +void typeof(dispatch_channel_async_f) dispatch_channel_async_f + __attribute__((__alias__("dispatch_async_f"))); +#endif + +void +dispatch_channel_wakeup(dispatch_channel_t dch, qos_class_t qos_class) +{ + dispatch_qos_t oqos = _dispatch_qos_from_qos_class(qos_class); + dx_wakeup(dch, oqos, DISPATCH_WAKEUP_MAKE_DIRTY); +} + #pragma mark - #pragma mark dispatch_mgr_queue @@ -4979,7 +5494,11 @@ struct _dispatch_mgr_sched_s { volatile qos_class_t qos; int default_prio; int policy; +#if defined(_WIN32) + HANDLE hThread; +#else pthread_t tid; +#endif }; DISPATCH_STATIC_GLOBAL(struct _dispatch_mgr_sched_s _dispatch_mgr_sched); @@ -4991,21 +5510,22 @@ DISPATCH_STATIC_GLOBAL(dispatch_once_t _dispatch_mgr_sched_pred); static int _dispatch_mgr_sched_qos2prio(qos_class_t qos) { + if (qos == QOS_CLASS_MAINTENANCE) return 4; switch (qos) { - case QOS_CLASS_MAINTENANCE: return 4; case QOS_CLASS_BACKGROUND: return 4; case QOS_CLASS_UTILITY: return 20; case QOS_CLASS_DEFAULT: return 31; case QOS_CLASS_USER_INITIATED: return 37; case QOS_CLASS_USER_INTERACTIVE: return 47; + default: return 0; } - return 0; } #endif // HAVE_PTHREAD_WORKQUEUE_QOS static void _dispatch_mgr_sched_init(void *ctxt DISPATCH_UNUSED) { +#if !defined(_WIN32) struct sched_param param; #if DISPATCH_USE_MGR_THREAD && DISPATCH_USE_PTHREAD_ROOT_QUEUES dispatch_pthread_root_queue_context_t pqc = _dispatch_mgr_root_queue.do_ctxt; @@ -5028,12 +5548,17 @@ _dispatch_mgr_sched_init(void *ctxt DISPATCH_UNUSED) } #endif _dispatch_mgr_sched.default_prio = param.sched_priority; +#else // defined(_WIN32) + _dispatch_mgr_sched.policy = 0; + _dispatch_mgr_sched.default_prio = THREAD_PRIORITY_NORMAL; +#endif // defined(_WIN32) _dispatch_mgr_sched.prio = _dispatch_mgr_sched.default_prio; } #endif // DISPATCH_USE_PTHREAD_ROOT_QUEUES || DISPATCH_USE_KEVENT_WORKQUEUE #if DISPATCH_USE_PTHREAD_ROOT_QUEUES #if DISPATCH_USE_MGR_THREAD +#if !defined(_WIN32) DISPATCH_NOINLINE static pthread_t * _dispatch_mgr_root_queue_init(void) @@ -5062,10 +5587,20 @@ _dispatch_mgr_root_queue_init(void) } return &_dispatch_mgr_sched.tid; } +#else // defined(_WIN32) +DISPATCH_NOINLINE +static PHANDLE +_dispatch_mgr_root_queue_init(void) +{ + dispatch_once_f(&_dispatch_mgr_sched_pred, NULL, _dispatch_mgr_sched_init); + return &_dispatch_mgr_sched.hThread; +} +#endif // defined(_WIN32) static inline void _dispatch_mgr_priority_apply(void) { +#if !defined(_WIN32) struct sched_param param; do { param.sched_priority = _dispatch_mgr_sched.prio; @@ -5075,12 +5610,23 @@ _dispatch_mgr_priority_apply(void) ¶m)); } } while (_dispatch_mgr_sched.prio > param.sched_priority); +#else // defined(_WIN32) + int nPriority = _dispatch_mgr_sched.prio; + do { + if (nPriority > _dispatch_mgr_sched.default_prio) { + // TODO(compnerd) set thread scheduling policy + dispatch_assume_zero(SetThreadPriority(_dispatch_mgr_sched.hThread, nPriority)); + nPriority = GetThreadPriority(_dispatch_mgr_sched.hThread); + } + } while (_dispatch_mgr_sched.prio > nPriority); +#endif // defined(_WIN32) } DISPATCH_NOINLINE static void _dispatch_mgr_priority_init(void) { +#if !defined(_WIN32) dispatch_pthread_root_queue_context_t pqc = _dispatch_mgr_root_queue.do_ctxt; pthread_attr_t *attr = &pqc->dpq_thread_attr; struct sched_param param; @@ -5099,9 +5645,16 @@ _dispatch_mgr_priority_init(void) if (unlikely(_dispatch_mgr_sched.prio > param.sched_priority)) { return _dispatch_mgr_priority_apply(); } +#else // defined(_WIN32) + int nPriority = GetThreadPriority(_dispatch_mgr_sched.hThread); + if (slowpath(_dispatch_mgr_sched.prio > nPriority)) { + return _dispatch_mgr_priority_apply(); + } +#endif // defined(_WIN32) } #endif // DISPATCH_USE_MGR_THREAD +#if !defined(_WIN32) DISPATCH_NOINLINE static void _dispatch_mgr_priority_raise(const pthread_attr_t *attr) @@ -5151,6 +5704,7 @@ _dispatch_mgr_priority_raise(const pthread_attr_t *attr) } #endif } +#endif // !defined(_WIN32) #endif // DISPATCH_USE_PTHREAD_ROOT_QUEUES DISPATCH_ALWAYS_INLINE @@ -5205,12 +5759,15 @@ _dispatch_mgr_queue_drain(void) _dispatch_perfmon_end(perfmon_thread_manager); } +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wunreachable-code" #if DISPATCH_USE_KEVENT_WORKQUEUE if (!_dispatch_kevent_workqueue_enabled) #endif { _dispatch_force_cache_cleanup(); } +#pragma clang diagnostic pop } void @@ -5461,22 +6018,51 @@ DISPATCH_NOINLINE static void _dispatch_kevent_worker_thread(dispatch_kevent_t *events, int *nevents) { - if (!events || !nevents) { + if (!dispatch_assume(events && nevents)) { + return; + } + if (*nevents == 0 || *events == NULL) { // events for worker thread request have already been delivered earlier + // or got cancelled before point of no return concurrently return; } - if (!dispatch_assume(*nevents && *events)) return; _dispatch_adopt_wlh_anon(); _dispatch_wlh_worker_thread(DISPATCH_WLH_ANON, *events, nevents); _dispatch_reset_wlh(); } +#if DISPATCH_USE_KEVENT_WORKLOOP +DISPATCH_NOINLINE +static void +_dispatch_workloop_worker_thread(uint64_t *workloop_id, + dispatch_kevent_t *events, int *nevents) +{ + if (!dispatch_assume(workloop_id && events && nevents)) { + return; + } + if (!dispatch_assume(*workloop_id != 0)) { + return _dispatch_kevent_worker_thread(events, nevents); + } + if (*nevents == 0 || *events == NULL) { + // events for worker thread request have already been delivered earlier + // or got cancelled before point of no return concurrently + return; + } + dispatch_wlh_t wlh = (dispatch_wlh_t)*workloop_id; + _dispatch_adopt_wlh(wlh); + _dispatch_wlh_worker_thread(wlh, *events, nevents); + _dispatch_preserve_wlh_storage_reference(wlh); +} +#endif // DISPATCH_USE_KEVENT_WORKLOOP #endif // DISPATCH_USE_KEVENT_WORKQUEUE #pragma mark - #pragma mark dispatch_root_queue #if DISPATCH_USE_PTHREAD_POOL static void *_dispatch_worker_thread(void *context); +#if defined(_WIN32) +static unsigned WINAPI _dispatch_worker_thread_thunk(LPVOID lpParameter); +#endif #endif // DISPATCH_USE_PTHREAD_POOL #if DISPATCH_DEBUG && DISPATCH_ROOT_QUEUE_DEBUG @@ -5561,6 +6147,7 @@ _dispatch_root_queue_poke_slow(dispatch_queue_global_t dq, int n, int floor) } while (!os_atomic_cmpxchgvw2o(dq, dgq_thread_pool_size, t_count, t_count - remaining, &t_count, acquire)); +#if !defined(_WIN32) pthread_attr_t *attr = &pqc->dpq_thread_attr; pthread_t tid, *pthr = &tid; #if DISPATCH_USE_MGR_THREAD && DISPATCH_USE_PTHREAD_ROOT_QUEUES @@ -5577,6 +6164,32 @@ _dispatch_root_queue_poke_slow(dispatch_queue_global_t dq, int n, int floor) _dispatch_temporary_resource_shortage(); } } while (--remaining); +#else // defined(_WIN32) +#if DISPATCH_USE_MGR_THREAD && DISPATCH_USE_PTHREAD_ROOT_QUEUES + if (unlikely(dq == &_dispatch_mgr_root_queue)) { + _dispatch_mgr_root_queue_init(); + } +#endif + do { + _dispatch_retain(dq); // released in _dispatch_worker_thread +#if DISPATCH_DEBUG + unsigned dwStackSize = 0; +#else + unsigned dwStackSize = 64 * 1024; +#endif + uintptr_t hThread = 0; + while (!(hThread = _beginthreadex(NULL, dwStackSize, _dispatch_worker_thread_thunk, dq, STACK_SIZE_PARAM_IS_A_RESERVATION, NULL))) { + if (errno != EAGAIN) { + (void)dispatch_assume(hThread); + } + _dispatch_temporary_resource_shortage(); + } + if (_dispatch_mgr_sched.prio > _dispatch_mgr_sched.default_prio) { + (void)dispatch_assume_zero(SetThreadPriority((HANDLE)hThread, _dispatch_mgr_sched.prio) == TRUE); + } + CloseHandle((HANDLE)hThread); + } while (--remaining); +#endif // defined(_WIN32) #else (void)floor; #endif // DISPATCH_USE_PTHREAD_POOL @@ -5754,18 +6367,55 @@ _dispatch_root_queue_drain_deferred_wlh(dispatch_deferred_items_t ddi _dispatch_init_basepri_wlh(rq->dq_priority); ddi->ddi_wlh_servicing = true; - if (unlikely(_dispatch_needs_to_return_to_kernel())) { - _dispatch_return_to_kernel(); - } retry: dispatch_assert(ddi->ddi_wlh_needs_delete); _dispatch_trace_item_pop(rq, dq); if (_dispatch_queue_drain_try_lock_wlh(dq, &dq_state)) { dx_invoke(dq, &dic, flags); +#if DISPATCH_USE_KEVENT_WORKLOOP + // + // dx_invoke() will always return `dq` unlocked or locked by another + // thread, and either have consumed the +2 or transferred it to the + // other thread. + // +#endif if (!ddi->ddi_wlh_needs_delete) { +#if DISPATCH_USE_KEVENT_WORKLOOP + // + // The fate of the workloop thread request has already been dealt + // with, which can happen for 4 reasons, for which we just want + // to go park and skip trying to unregister the thread request: + // - the workloop target has been changed + // - the workloop has been re-enqueued because of narrowing + // - the workloop has been re-enqueued on the manager queue + // - the workloop ownership has been handed off to a sync owner + // +#endif goto park; } +#if DISPATCH_USE_KEVENT_WORKLOOP + // + // The workloop has been drained to completion or suspended. + // dx_invoke() has cleared the enqueued bit before it returned. + // + // Since a dispatch_set_target_queue() could occur between the unlock + // and our reload of `dq_state` (rdar://32671286) we need to re-assess + // the workloop-ness of the queue. If it's not a workloop anymore, + // _dispatch_event_loop_leave_immediate() will have handled the kevent + // deletion already. + // + // Then, we check one last time that the queue is still not enqueued, + // in which case we attempt to quiesce it. + // + // If we find it enqueued again, it means someone else has been + // enqueuing concurrently and has made a thread request that coalesced + // with ours, but since dx_invoke() cleared the enqueued bit, + // the other thread didn't realize that and added a +1 ref count. + // Take over that +1, and add our own to make the +2 this loop expects, + // and drain again. + // +#endif // DISPATCH_USE_KEVENT_WORKLOOP dq_state = os_atomic_load2o(dq, dq_state, relaxed); if (unlikely(!_dq_state_is_base_wlh(dq_state))) { // rdar://32671286 goto park; @@ -5776,6 +6426,18 @@ _dispatch_root_queue_drain_deferred_wlh(dispatch_deferred_items_t ddi goto retry; } } else { +#if DISPATCH_USE_KEVENT_WORKLOOP + // + // The workloop enters this function with a +2 refcount, however we + // couldn't acquire the lock due to suspension or discovering that + // the workloop was locked by a sync owner. + // + // We need to give up, and _dispatch_event_loop_leave_deferred() + // will do a DISPATCH_WORKLOOP_ASYNC_DISCOVER_SYNC transition to + // tell the kernel to stop driving this thread request. We leave + // a +1 with the thread request, and consume the extra +1 we have. + // +#endif if (_dq_state_is_suspended(dq_state)) { dispatch_assert(!_dq_state_is_enqueued(dq_state)); _dispatch_release_2_no_dispose(dq); @@ -5916,16 +6578,18 @@ _dispatch_root_queue_init_pthread_pool(dispatch_queue_global_t dq, qos_class_t cls = _dispatch_qos_to_qos_class(_dispatch_priority_qos(pri) ?: _dispatch_priority_fallback_qos(pri)); if (cls) { +#if !defined(_WIN32) pthread_attr_t *attr = &pqc->dpq_thread_attr; int r = pthread_attr_init(attr); dispatch_assume_zero(r); r = pthread_attr_setdetachstate(attr, PTHREAD_CREATE_DETACHED); dispatch_assume_zero(r); +#endif // !defined(_WIN32) #if HAVE_PTHREAD_WORKQUEUE_QOS r = pthread_attr_set_qos_class_np(attr, cls, 0); dispatch_assume_zero(r); - } #endif // HAVE_PTHREAD_WORKQUEUE_QOS + } _dispatch_sema4_t *sema = &pqc->dpq_thread_mediator.dsema_sema; pqc->dpq_thread_mediator.do_vtable = DISPATCH_VTABLE(semaphore); _dispatch_sema4_init(sema, _DSEMA4_POLICY_LIFO); @@ -5953,8 +6617,10 @@ _dispatch_worker_thread(void *context) pqc->dpq_thread_configure(); } +#if !defined(_WIN32) // workaround tweaks the kernel workqueue does for us _dispatch_sigmask(); +#endif _dispatch_introspection_thread_add(); const int64_t timeout = 5ull * NSEC_PER_SEC; @@ -6000,6 +6666,14 @@ _dispatch_worker_thread(void *context) _dispatch_release(dq); // retained in _dispatch_root_queue_poke_slow return NULL; } +#if defined(_WIN32) +static unsigned WINAPI +_dispatch_worker_thread_thunk(LPVOID lpParameter) +{ + _dispatch_worker_thread(lpParameter); + return 0; +} +#endif // defined(_WIN32) #endif // DISPATCH_USE_PTHREAD_POOL DISPATCH_NOINLINE @@ -6092,6 +6766,7 @@ _dispatch_pthread_root_queue_create(const char *label, unsigned long flags, _dispatch_root_queue_init_pthread_pool(dpq->_as_dgq, pool_size, DISPATCH_PRIORITY_FLAG_OVERCOMMIT); +#if !defined(_WIN32) if (attr) { memcpy(&pqc->dpq_thread_attr, attr, sizeof(pthread_attr_t)); _dispatch_mgr_priority_raise(&pqc->dpq_thread_attr); @@ -6100,6 +6775,9 @@ _dispatch_pthread_root_queue_create(const char *label, unsigned long flags, } (void)dispatch_assume_zero(pthread_attr_setdetachstate( &pqc->dpq_thread_attr, PTHREAD_CREATE_DETACHED)); +#else // defined(_WIN32) + dispatch_assert(attr == NULL); +#endif // defined(_WIN32) if (configure) { pqc->dpq_thread_configure = _dispatch_Block_copy(configure); } @@ -6169,7 +6847,9 @@ _dispatch_pthread_root_queue_dispose(dispatch_queue_global_t dq, _dispatch_object_debug(dq, "%s", __func__); _dispatch_trace_queue_dispose(dq); +#if !defined(_WIN32) pthread_attr_destroy(&pqc->dpq_thread_attr); +#endif _dispatch_semaphore_dispose(&pqc->dpq_thread_mediator, NULL); if (pqc->dpq_thread_configure) { Block_release(pqc->dpq_thread_configure); @@ -6557,6 +7237,7 @@ _dispatch_runloop_queue_drain_one(dispatch_lane_t dq) dispatch_queue_serial_t _dispatch_runloop_root_queue_create_4CF(const char *label, unsigned long flags) { + pthread_priority_t pp = _dispatch_get_priority(); dispatch_lane_t dq; if (unlikely(flags)) { @@ -6568,6 +7249,9 @@ _dispatch_runloop_root_queue_create_4CF(const char *label, unsigned long flags) DISPATCH_QUEUE_ROLE_BASE_ANON); dq->do_targetq = _dispatch_get_default_queue(true); dq->dq_label = label ? label : "runloop-queue"; // no-copy contract + if (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK) { + dq->dq_priority = _dispatch_priority_from_pp_strip_flags(pp); + } _dispatch_runloop_queue_handle_init(dq); _dispatch_queue_set_bound_thread(dq); _dispatch_object_debug(dq, "%s", __func__); @@ -6694,16 +7378,18 @@ _dispatch_main_queue_wakeup(dispatch_queue_main_t dq, dispatch_qos_t qos, return _dispatch_lane_wakeup(dq, qos, flags); } +#if !defined(_WIN32) DISPATCH_NOINLINE DISPATCH_NORETURN static void _dispatch_sigsuspend(void) { static const sigset_t mask; - + pthread_sigmask(SIG_SETMASK, &mask, NULL); for (;;) { sigsuspend(&mask); } } +#endif // !defined(_WIN32) DISPATCH_NORETURN static void @@ -6711,7 +7397,9 @@ _dispatch_sig_thread(void *ctxt DISPATCH_UNUSED) { // never returns, so burn bridges behind us _dispatch_clear_stack(0); +#if !defined(_WIN32) _dispatch_sigsuspend(); +#endif } void @@ -6736,7 +7424,11 @@ dispatch_main(void) pthread_setspecific(dispatch_main_key, &dispatch_main_key); _dispatch_sigmask(); #endif +#if !defined(_WIN32) pthread_exit(NULL); +#else + _endthreadex(0); +#endif // defined(_WIN32) DISPATCH_INTERNAL_CRASH(errno, "pthread_exit() returned"); #if HAVE_PTHREAD_MAIN_NP } @@ -6775,9 +7467,18 @@ _dispatch_queue_cleanup2(void) // See dispatch_main for call to _dispatch_sig_thread on linux. #ifndef __linux__ if (_dispatch_program_is_probably_callback_driven) { - _dispatch_barrier_async_detached_f(_dispatch_get_default_queue(true), - NULL, _dispatch_sig_thread); - sleep(1); // workaround 6778970 + pthread_attr_t attr; + pthread_attr_init(&attr); + pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); + pthread_t tid; + int r = pthread_create(&tid, &attr, (void*)_dispatch_sig_thread, NULL); + if (unlikely(r)) { + DISPATCH_CLIENT_CRASH(r, "Unable to create signal thread"); + } + pthread_attr_destroy(&attr); + // this used to be here as a workaround for 6778970 + // but removing it had bincompat fallouts :'( + sleep(1); } #endif @@ -6788,7 +7489,7 @@ _dispatch_queue_cleanup2(void) #endif } -static void +static void DISPATCH_TSD_DTOR_CC _dispatch_queue_cleanup(void *ctxt) { if (ctxt == &_dispatch_main_q) { @@ -6799,7 +7500,7 @@ _dispatch_queue_cleanup(void *ctxt) "Premature thread exit while a dispatch queue is running"); } -static void +static void DISPATCH_TSD_DTOR_CC _dispatch_wlh_cleanup(void *ctxt) { // POSIX defines that destructors are only called if 'ctxt' is non-null @@ -6809,7 +7510,7 @@ _dispatch_wlh_cleanup(void *ctxt) } DISPATCH_NORETURN -static void +static void DISPATCH_TSD_DTOR_CC _dispatch_deferred_items_cleanup(void *ctxt) { // POSIX defines that destructors are only called if 'ctxt' is non-null @@ -6818,7 +7519,7 @@ _dispatch_deferred_items_cleanup(void *ctxt) } DISPATCH_NORETURN -static void +static DISPATCH_TSD_DTOR_CC void _dispatch_frame_cleanup(void *ctxt) { // POSIX defines that destructors are only called if 'ctxt' is non-null @@ -6827,7 +7528,7 @@ _dispatch_frame_cleanup(void *ctxt) } DISPATCH_NORETURN -static void +static void DISPATCH_TSD_DTOR_CC _dispatch_context_cleanup(void *ctxt) { // POSIX defines that destructors are only called if 'ctxt' is non-null @@ -6856,19 +7557,63 @@ _dispatch_root_queues_init_once(void *context DISPATCH_UNUSED) "QoS Maintenance support required"); } +#if DISPATCH_USE_KEVENT_SETUP + struct pthread_workqueue_config cfg = { + .version = PTHREAD_WORKQUEUE_CONFIG_VERSION, + .flags = 0, + .workq_cb = 0, + .kevent_cb = 0, + .workloop_cb = 0, + .queue_serialno_offs = dispatch_queue_offsets.dqo_serialnum, +#if PTHREAD_WORKQUEUE_CONFIG_VERSION >= 2 + .queue_label_offs = dispatch_queue_offsets.dqo_label, +#endif + }; +#endif + +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wunreachable-code" if (unlikely(!_dispatch_kevent_workqueue_enabled)) { +#if DISPATCH_USE_KEVENT_SETUP + cfg.workq_cb = _dispatch_worker_thread2; + r = pthread_workqueue_setup(&cfg, sizeof(cfg)); +#else r = _pthread_workqueue_init(_dispatch_worker_thread2, offsetof(struct dispatch_queue_s, dq_serialnum), 0); +#endif // DISPATCH_USE_KEVENT_SETUP +#if DISPATCH_USE_KEVENT_WORKLOOP + } else if (wq_supported & WORKQ_FEATURE_WORKLOOP) { +#if DISPATCH_USE_KEVENT_SETUP + cfg.workq_cb = _dispatch_worker_thread2; + cfg.kevent_cb = (pthread_workqueue_function_kevent_t) _dispatch_kevent_worker_thread; + cfg.workloop_cb = (pthread_workqueue_function_workloop_t) _dispatch_workloop_worker_thread; + r = pthread_workqueue_setup(&cfg, sizeof(cfg)); +#else + r = _pthread_workqueue_init_with_workloop(_dispatch_worker_thread2, + (pthread_workqueue_function_kevent_t) + _dispatch_kevent_worker_thread, + (pthread_workqueue_function_workloop_t) + _dispatch_workloop_worker_thread, + offsetof(struct dispatch_queue_s, dq_serialnum), 0); +#endif // DISPATCH_USE_KEVENT_SETUP +#endif // DISPATCH_USE_KEVENT_WORKLOOP #if DISPATCH_USE_KEVENT_WORKQUEUE } else if (wq_supported & WORKQ_FEATURE_KEVENT) { +#if DISPATCH_USE_KEVENT_SETUP + cfg.workq_cb = _dispatch_worker_thread2; + cfg.kevent_cb = (pthread_workqueue_function_kevent_t) _dispatch_kevent_worker_thread; + r = pthread_workqueue_setup(&cfg, sizeof(cfg)); +#else r = _pthread_workqueue_init_with_kevent(_dispatch_worker_thread2, (pthread_workqueue_function_kevent_t) _dispatch_kevent_worker_thread, offsetof(struct dispatch_queue_s, dq_serialnum), 0); +#endif // DISPATCH_USE_KEVENT_SETUP #endif } else { DISPATCH_INTERNAL_CRASH(wq_supported, "Missing Kevent WORKQ support"); } +#pragma clang diagnostic pop if (r != 0) { DISPATCH_INTERNAL_CRASH((r << 16) | wq_supported, @@ -6967,8 +7712,12 @@ libdispatch_init(void) } #if DISPATCH_USE_THREAD_LOCAL_STORAGE +#if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) #include +#endif +#if !defined(_WIN32) #include +#endif #ifndef __ANDROID__ #ifdef SYS_gettid @@ -6976,7 +7725,21 @@ DISPATCH_ALWAYS_INLINE static inline pid_t gettid(void) { - return (pid_t) syscall(SYS_gettid); + return (pid_t)syscall(SYS_gettid); +} +#elif defined(__FreeBSD__) +DISPATCH_ALWAYS_INLINE +static inline pid_t +gettid(void) +{ + return (pid_t)pthread_getthreadid_np(); +} +#elif defined(_WIN32) +DISPATCH_ALWAYS_INLINE +static inline DWORD +gettid(void) +{ + return GetCurrentThreadId(); } #else #error "SYS_gettid unavailable on this system" @@ -6999,9 +7762,58 @@ _dispatch_install_thread_detach_callback(dispatch_function_t cb) } #endif -void +#if defined(_WIN32) +static bool +_dispatch_process_is_exiting(void) +{ + // The goal here is to detect if the current thread is executing cleanup + // code (e.g. FLS destructors) as a result of calling ExitProcess(). Windows + // doesn't provide an official method of getting this information, so we + // take advantage of how ExitProcess() works internally. The first thing + // that it does (according to MSDN) is terminate every other thread in the + // process. Logically, it should not be possible to create more threads + // after this point, and Windows indeed enforces this. Try to create a + // lightweight suspended thread, and if access is denied, assume that this + // is because the process is exiting. + // + // We aren't worried about any race conditions here during process exit. + // Cleanup code is only run on the thread that already called ExitProcess(), + // and every other thread will have been forcibly terminated by the time + // that happens. Additionally, while CreateThread() could conceivably fail + // due to resource exhaustion, the process would already be in a bad state + // if that happens. This is only intended to prevent unwanted cleanup code + // from running, so the worst case is that a thread doesn't clean up after + // itself when the process is about to die anyway. + const size_t stack_size = 1; // As small as possible + HANDLE thread = CreateThread(NULL, stack_size, NULL, NULL, + CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION, NULL); + if (thread) { + // Although Microsoft recommends against using TerminateThread, it's + // safe to use it here because we know that the thread is suspended and + // it has not executed any code due to a NULL lpStartAddress. There was + // a bug in Windows Server 2003 and Windows XP where the initial stack + // would not be freed, but libdispatch does not support them anyway. + TerminateThread(thread, 0); + CloseHandle(thread); + return false; + } + return GetLastError() == ERROR_ACCESS_DENIED; +} +#endif // defined(_WIN32) + + +void DISPATCH_TSD_DTOR_CC _libdispatch_tsd_cleanup(void *ctx) { +#if defined(_WIN32) + // On Windows, exiting a process will still call FLS destructors for the + // thread that called ExitProcess(). pthreads-based platforms don't call key + // destructors on exit, so be consistent. + if (_dispatch_process_is_exiting()) { + return; + } +#endif // defined(_WIN32) + struct dispatch_tsd *tsd = (struct dispatch_tsd*) ctx; _tsd_call_cleanup(dispatch_priority_key, NULL); @@ -7035,7 +7847,11 @@ DISPATCH_NOINLINE void libdispatch_tsd_init(void) { +#if !defined(_WIN32) pthread_setspecific(__dispatch_tsd_key, &__dispatch_tsd); +#else + FlsSetValue(__dispatch_tsd_key, &__dispatch_tsd); +#endif // defined(_WIN32) __dispatch_tsd.tid = gettid(); } #endif diff --git a/src/queue_internal.h b/src/queue_internal.h index a627f0e9c..713677301 100644 --- a/src/queue_internal.h +++ b/src/queue_internal.h @@ -35,7 +35,7 @@ #pragma mark - #pragma mark dispatch_queue_flags, dq_state -DISPATCH_ENUM(dispatch_queue_flags, uint32_t, +DISPATCH_OPTIONS(dispatch_queue_flags, uint32_t, DQF_NONE = 0x00000000, DQF_AUTORELEASE_ALWAYS = 0x00010000, DQF_AUTORELEASE_NEVER = 0x00020000, @@ -121,20 +121,16 @@ DISPATCH_ENUM(dispatch_queue_flags, uint32_t, */ #define DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT 0x0200000000000000ull /* - * i: inactive bit (bit 56) + * i: inactive state (bit 56-55) * This bit means that the object is inactive (see dispatch_activate) */ -#define DISPATCH_QUEUE_INACTIVE 0x0100000000000000ull +#define DISPATCH_QUEUE_INACTIVE 0x0180000000000000ull +#define DISPATCH_QUEUE_ACTIVATED 0x0100000000000000ull +#define DISPATCH_QUEUE_ACTIVATING 0x0080000000000000ull /* - * na: needs activation (bit 55) - * This bit is set if the object is created inactive. It tells - * dispatch_queue_wakeup to perform various tasks at first wakeup. - * - * This bit is cleared as part of the first wakeup. Having that bit prevents - * the object from being woken up (because _dq_state_should_wakeup will say - * no), except in the dispatch_activate/dispatch_resume codepath. + * This mask covers the inactive bits state */ -#define DISPATCH_QUEUE_NEEDS_ACTIVATION 0x0080000000000000ull +#define DISPATCH_QUEUE_INACTIVE_BITS_MASK 0x0180000000000000ull /* * This mask covers the suspend count (sc), side suspend count bit (ssc), * inactive (i) and needs activation (na) bits @@ -461,11 +457,12 @@ typedef struct dispatch_queue_specific_head_s { TAILQ_HEAD(, dispatch_queue_specific_s) dqsh_entries; } *dispatch_queue_specific_head_t; -#define DISPATCH_WORKLOOP_ATTR_HAS_SCHED 0x1u -#define DISPATCH_WORKLOOP_ATTR_HAS_POLICY 0x2u -#define DISPATCH_WORKLOOP_ATTR_HAS_CPUPERCENT 0x4u -#define DISPATCH_WORKLOOP_ATTR_HAS_QOS_CLASS 0x8u -#define DISPATCH_WORKLOOP_ATTR_NEEDS_DESTROY 0x10u +#define DISPATCH_WORKLOOP_ATTR_HAS_SCHED 0x0001u +#define DISPATCH_WORKLOOP_ATTR_HAS_POLICY 0x0002u +#define DISPATCH_WORKLOOP_ATTR_HAS_CPUPERCENT 0x0004u +#define DISPATCH_WORKLOOP_ATTR_HAS_QOS_CLASS 0x0008u +#define DISPATCH_WORKLOOP_ATTR_NEEDS_DESTROY 0x0010u +#define DISPATCH_WORKLOOP_ATTR_HAS_OBSERVERS 0x0020u typedef struct dispatch_workloop_attr_s *dispatch_workloop_attr_t; typedef struct dispatch_workloop_attr_s { uint32_t dwla_flags; @@ -476,6 +473,7 @@ typedef struct dispatch_workloop_attr_s { uint8_t percent; uint32_t refillms; } dwla_cpupercent; + dispatch_pthread_root_queue_observer_hooks_s dwla_observers; } dispatch_workloop_attr_s; /* @@ -501,6 +499,7 @@ typedef struct dispatch_workloop_attr_s { * '--> dispatch_lane_class_t * +--> struct dispatch_lane_s * | +--> struct dispatch_source_s + * | +--> struct dispatch_channel_s * | '--> struct dispatch_mach_s * +--> struct dispatch_queue_static_s * '--> struct dispatch_queue_global_s @@ -598,6 +597,7 @@ typedef struct dispatch_workloop_attr_s { struct dispatch_source_refs_s *ds_refs; \ struct dispatch_timer_source_refs_s *ds_timer_refs; \ struct dispatch_mach_recv_refs_s *dm_recv_refs; \ + struct dispatch_channel_callbacks_s const *dch_callbacks; \ }; \ int volatile dq_sref_cnt @@ -647,9 +647,41 @@ struct dispatch_queue_global_s { DISPATCH_QUEUE_ROOT_CLASS_HEADER(lane); } DISPATCH_CACHELINE_ALIGN; + +typedef struct dispatch_pthread_root_queue_observer_hooks_s { + void (*queue_will_execute)(dispatch_queue_t queue); + void (*queue_did_execute)(dispatch_queue_t queue); +} dispatch_pthread_root_queue_observer_hooks_s; +typedef dispatch_pthread_root_queue_observer_hooks_s + *dispatch_pthread_root_queue_observer_hooks_t; + +#ifdef __APPLE__ +#define DISPATCH_IOHID_SPI 1 + +DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NOTHROW DISPATCH_NONNULL4 +dispatch_queue_global_t +_dispatch_pthread_root_queue_create_with_observer_hooks_4IOHID( + const char *label, unsigned long flags, const pthread_attr_t *attr, + dispatch_pthread_root_queue_observer_hooks_t observer_hooks, + dispatch_block_t configure); + +DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW +bool +_dispatch_queue_is_exclusively_owned_by_current_thread_4IOHID( + dispatch_queue_t queue); + +DISPATCH_EXPORT DISPATCH_NOTHROW +void +_dispatch_workloop_set_observer_hooks_4IOHID(dispatch_workloop_t workloop, + dispatch_pthread_root_queue_observer_hooks_t observer_hooks); +#endif // __APPLE__ + #if DISPATCH_USE_PTHREAD_POOL typedef struct dispatch_pthread_root_queue_context_s { +#if !defined(_WIN32) pthread_attr_t dpq_thread_attr; +#endif dispatch_block_t dpq_thread_configure; struct dispatch_semaphore_s dpq_thread_mediator; dispatch_pthread_root_queue_observer_hooks_s dpq_observer_hooks; @@ -719,6 +751,8 @@ typedef struct dispatch_thread_context_s { dispatch_io_t dtc_io_in_barrier; union firehose_buffer_u *dtc_fb; void *dtc_mig_demux_ctx; + dispatch_mach_msg_t dtc_dmsg; + struct dispatch_ipc_handoff_s *dtc_dih; }; } dispatch_thread_context_s; @@ -747,12 +781,18 @@ void _dispatch_queue_invoke_finish(dispatch_queue_t dq, dispatch_priority_t _dispatch_queue_compute_priority_and_wlh( dispatch_queue_class_t dq, dispatch_wlh_t *wlh_out); +DISPATCH_ENUM(dispatch_resume_op, int, + DISPATCH_RESUME, + DISPATCH_ACTIVATE, + DISPATCH_ACTIVATION_DONE, +); +void _dispatch_lane_resume(dispatch_lane_class_t dq, dispatch_resume_op_t how); + void _dispatch_lane_set_target_queue(dispatch_lane_t dq, dispatch_queue_t tq); void _dispatch_lane_class_dispose(dispatch_queue_class_t dq, bool *allow_free); void _dispatch_lane_dispose(dispatch_lane_class_t dq, bool *allow_free); void _dispatch_lane_suspend(dispatch_lane_class_t dq); -void _dispatch_lane_resume(dispatch_lane_class_t dq, bool activate); -void _dispatch_lane_activate(dispatch_lane_class_t dq, bool *allow_resume); +void _dispatch_lane_activate(dispatch_lane_class_t dq); void _dispatch_lane_invoke(dispatch_lane_class_t dq, dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); void _dispatch_lane_push(dispatch_lane_class_t dq, dispatch_object_t dou, @@ -814,8 +854,10 @@ void _dispatch_barrier_trysync_or_async_f(dispatch_lane_class_t dq, void *ctxt, dispatch_function_t func, uint32_t flags); void _dispatch_queue_atfork_child(void); +DISPATCH_COLD size_t _dispatch_queue_debug(dispatch_queue_class_t dq, char *buf, size_t bufsiz); +DISPATCH_COLD size_t _dispatch_queue_debug_attr(dispatch_queue_t dq, char *buf, size_t bufsiz); @@ -1008,8 +1050,8 @@ dispatch_queue_attr_info_t _dispatch_queue_attr_to_info(dispatch_queue_attr_t); // continuation is an internal implementation detail that should not be // introspected #define DC_FLAG_NO_INTROSPECTION 0x200ul -// never set on continuations, used by mach.c only -#define DC_FLAG_MACH_BARRIER 0x1000000ul +// The item is a channel item, not a continuation +#define DC_FLAG_CHANNEL_ITEM 0x400ul typedef struct dispatch_continuation_s { DISPATCH_CONTINUATION_HEADER(continuation); @@ -1082,6 +1124,9 @@ enum { DC_WORKLOOP_STEALING_TYPE, DC_OVERRIDE_STEALING_TYPE, DC_OVERRIDE_OWNING_TYPE, +#endif +#if HAVE_MACH + DC_MACH_IPC_HANDOFF_TYPE, #endif _DC_MAX_TYPE, }; @@ -1201,29 +1246,4 @@ dispatch_qos_t _dispatch_continuation_init_slow(dispatch_continuation_t dc, #endif /* __BLOCKS__ */ -typedef struct dispatch_pthread_root_queue_observer_hooks_s { - void (*queue_will_execute)(dispatch_queue_t queue); - void (*queue_did_execute)(dispatch_queue_t queue); -} dispatch_pthread_root_queue_observer_hooks_s; -typedef dispatch_pthread_root_queue_observer_hooks_s - *dispatch_pthread_root_queue_observer_hooks_t; - -#ifdef __APPLE__ -#define DISPATCH_IOHID_SPI 1 - -DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT -DISPATCH_NOTHROW DISPATCH_NONNULL4 -dispatch_queue_t -_dispatch_pthread_root_queue_create_with_observer_hooks_4IOHID( - const char *label, unsigned long flags, const pthread_attr_t *attr, - dispatch_pthread_root_queue_observer_hooks_t observer_hooks, - dispatch_block_t configure); - -DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW -bool -_dispatch_queue_is_exclusively_owned_by_current_thread_4IOHID( - dispatch_queue_t queue); - -#endif // __APPLE__ - #endif diff --git a/src/semaphore.c b/src/semaphore.c index 30cde9278..8f1a25681 100644 --- a/src/semaphore.c +++ b/src/semaphore.c @@ -72,7 +72,7 @@ _dispatch_semaphore_debug(dispatch_object_t dou, char *buf, size_t bufsiz) _dispatch_object_class_name(dsema), dsema); offset += _dispatch_object_debug_attr(dsema, &buf[offset], bufsiz - offset); #if USE_MACH_SEM - offset += dsnprintf(&buf[offset], bufsiz - offset, "port = 0x%u, ", + offset += dsnprintf(&buf[offset], bufsiz - offset, "port = 0x%x, ", dsema->dsema_sema); #endif offset += dsnprintf(&buf[offset], bufsiz - offset, @@ -158,7 +158,7 @@ _dispatch_group_create_with_count(uint32_t n) dg->do_targetq = _dispatch_get_default_queue(false); if (n) { os_atomic_store2o(dg, dg_bits, - -n * DISPATCH_GROUP_VALUE_INTERVAL, relaxed); + (uint32_t)-n * DISPATCH_GROUP_VALUE_INTERVAL, relaxed); os_atomic_store2o(dg, do_ref_cnt, 1, relaxed); // } return dg; diff --git a/src/semaphore_internal.h b/src/semaphore_internal.h index b38dd25da..850792df5 100644 --- a/src/semaphore_internal.h +++ b/src/semaphore_internal.h @@ -97,10 +97,12 @@ _dg_state_gen(uint64_t dg_state) dispatch_group_t _dispatch_group_create_and_enter(void); void _dispatch_group_dispose(dispatch_object_t dou, bool *allow_free); +DISPATCH_COLD size_t _dispatch_group_debug(dispatch_object_t dou, char *buf, size_t bufsiz); void _dispatch_semaphore_dispose(dispatch_object_t dou, bool *allow_free); +DISPATCH_COLD size_t _dispatch_semaphore_debug(dispatch_object_t dou, char *buf, size_t bufsiz); diff --git a/src/shims.c b/src/shims.c new file mode 100644 index 000000000..3914d9c62 --- /dev/null +++ b/src/shims.c @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2013-2016 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include "internal.h" +#include "shims.h" + +#if !HAVE_STRLCPY +size_t strlcpy(char *dst, const char *src, size_t size) +{ + size_t res = strlen(dst) + strlen(src) + 1; + if (size > 0) { + size_t n = size - 1; + strncpy(dst, src, n); + dst[n] = 0; + } + return res; +} +#endif diff --git a/src/shims.h b/src/shims.h index fd8b3542d..ea5e09812 100644 --- a/src/shims.h +++ b/src/shims.h @@ -27,15 +27,20 @@ #ifndef __DISPATCH_OS_SHIMS__ #define __DISPATCH_OS_SHIMS__ +#if !defined(_WIN32) #include +#else // defined(_WIN32) +#include "shims/generic_win_stubs.h" +#include "shims/generic_sys_queue.h" +#endif // defined(_WIN32) -#ifdef __linux__ -#include "shims/linux_stubs.h" -#endif #ifdef __ANDROID__ #include "shims/android_stubs.h" -#endif +#endif // __ANDROID__ +#if !HAVE_MACH +#include "shims/mach.h" +#endif #include "shims/target.h" #if DISPATCH_USE_INTERNAL_WORKQUEUE @@ -65,20 +70,19 @@ #define FD_COPY(f, t) (void)(*(t) = *(f)) #endif -#if TARGET_OS_WIN32 -#define bzero(ptr,len) memset((ptr), 0, (len)) -#define snprintf _snprintf +#if HAVE_STRLCPY +#include +#else // that is, if !HAVE_STRLCPY -inline size_t strlcpy(char *dst, const char *src, size_t size) { - int res = strlen(dst) + strlen(src) + 1; - if (size > 0) { - size_t n = size - 1; - strncpy(dst, src, n); - dst[n] = 0; - } - return res; -} -#endif // TARGET_OS_WIN32 +size_t strlcpy(char *dst, const char *src, size_t size); + +#endif // HAVE_STRLCPY + +#ifndef TAILQ_FOREACH_SAFE +#define TAILQ_FOREACH_SAFE(var, head, field, temp) \ + for ((var) = TAILQ_FIRST((head)); \ + (var) && ((temp) = TAILQ_NEXT((var), field), 1); (var) = (temp)) +#endif #if PTHREAD_WORKQUEUE_SPI_VERSION < 20140716 static inline int @@ -150,7 +154,7 @@ _pthread_workqueue_should_narrow(pthread_priority_t priority) #if HAVE_PTHREAD_QOS_H && __has_include() && \ defined(PTHREAD_MAX_PARALLELISM_PHYSICAL) && \ DISPATCH_HAVE_HW_CONFIG_COMMPAGE && \ - DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(109900) + DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101300) #define DISPATCH_USE_PTHREAD_QOS_MAX_PARALLELISM 1 #define DISPATCH_MAX_PARALLELISM_PHYSICAL PTHREAD_MAX_PARALLELISM_PHYSICAL #else @@ -247,7 +251,7 @@ void __builtin_trap(void); #if __has_feature(c_static_assert) #define __dispatch_is_array(x) \ - _Static_assert(!__builtin_types_compatible_p(typeof((x)[0]) *, typeof(x)), \ + _Static_assert(!__builtin_types_compatible_p(__typeof__((x)[0]) *, __typeof__(x)), \ #x " isn't an array") #define countof(x) \ ({ __dispatch_is_array(x); sizeof(x) / sizeof((x)[0]); }) diff --git a/src/shims/android_stubs.h b/src/shims/android_stubs.h index c8032a390..d10c2d602 100644 --- a/src/shims/android_stubs.h +++ b/src/shims/android_stubs.h @@ -5,8 +5,8 @@ * * Licensed under Apache License v2.0 with Runtime Library Exception * - * See http://swift.org/LICENSE.txt for license information - * See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors + * See https://swift.org/LICENSE.txt for license information + * See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors * */ diff --git a/src/shims/atomic.h b/src/shims/atomic.h index cb9b95f48..0bb27d3de 100644 --- a/src/shims/atomic.h +++ b/src/shims/atomic.h @@ -31,6 +31,10 @@ #error libdispatch requires C11 with #endif +// FreeBSD only defines _Bool in C mode. In C++ mode _Bool is not being defined. +#if defined(__cplusplus) && (defined(__FreeBSD__) || defined(_WIN32)) +#define _Bool bool +#endif #include #define memory_order_ordered memory_order_seq_cst @@ -39,11 +43,11 @@ #define os_atomic(type) type _Atomic #define _os_atomic_c11_atomic(p) \ - ((typeof(*(p)) _Atomic *)(p)) + ((__typeof__(*(p)) _Atomic *)(p)) // This removes the _Atomic and volatile qualifiers on the type of *p #define _os_atomic_basetypeof(p) \ - typeof(atomic_load_explicit(_os_atomic_c11_atomic(p), memory_order_relaxed)) + __typeof__(atomic_load_explicit(_os_atomic_c11_atomic(p), memory_order_relaxed)) #define os_atomic_load(p, m) \ atomic_load_explicit(_os_atomic_c11_atomic(p), memory_order_##m) @@ -67,7 +71,7 @@ #define _os_atomic_c11_op(p, v, m, o, op) \ ({ _os_atomic_basetypeof(p) _v = (v), _r = \ atomic_fetch_##o##_explicit(_os_atomic_c11_atomic(p), _v, \ - memory_order_##m); (typeof(_r))(_r op _v); }) + memory_order_##m); (__typeof__(_r))(_r op _v); }) #define _os_atomic_c11_op_orig(p, v, m, o, op) \ atomic_fetch_##o##_explicit(_os_atomic_c11_atomic(p), v, \ memory_order_##m) @@ -152,12 +156,12 @@ #define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \ bool _result = false; \ - typeof(p) _p = (p); \ + __typeof__(p) _p = (p); \ ov = os_atomic_load(_p, relaxed); \ do { \ __VA_ARGS__; \ _result = os_atomic_cmpxchgvw(_p, ov, nv, &ov, m); \ - } while (os_unlikely(!_result)); \ + } while (unlikely(!_result)); \ _result; \ }) #define os_atomic_rmw_loop2o(p, f, ov, nv, m, ...) \ diff --git a/src/shims/atomic_sfb.h b/src/shims/atomic_sfb.h index 67172dbc6..a87def054 100644 --- a/src/shims/atomic_sfb.h +++ b/src/shims/atomic_sfb.h @@ -50,7 +50,7 @@ os_atomic_set_first_bit(volatile unsigned long *p, unsigned int max) "mov %[_all_ones], %[_bit]" "\n\t" "3: \n\t" : [_p] "=m" (*p), [_val] "=&r" (val), [_bit] "=&r" (bit) - : [_all_ones] "i" ((typeof(bit))UINT_MAX) : "memory", "cc"); + : [_all_ones] "i" ((__typeof__(bit))UINT_MAX) : "memory", "cc"); } else { __asm__ ( "1: \n\t" @@ -68,8 +68,8 @@ os_atomic_set_first_bit(volatile unsigned long *p, unsigned int max) "mov %[_all_ones], %[_bit]" "\n\t" "3: \n\t" : [_p] "=m" (*p), [_val] "=&r" (val), [_bit] "=&r" (bit) - : [_all_ones] "i" ((typeof(bit))UINT_MAX), - [_max] "g" ((typeof(bit))max) : "memory", "cc"); + : [_all_ones] "i" ((__typeof__(bit))UINT_MAX), + [_max] "g" ((__typeof__(bit))max) : "memory", "cc"); } return (unsigned int)bit; } diff --git a/src/shims/generic_sys_queue.h b/src/shims/generic_sys_queue.h new file mode 100644 index 000000000..1d9a18d0f --- /dev/null +++ b/src/shims/generic_sys_queue.h @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2018 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * NOTE: This header files defines a trimmed down version of the BSD sys/queue.h + * macros for use on platforms which do not come with a sys/queue.h file. + */ + +#ifndef __DISPATCH_SHIMS_SYS_QUEUE__ +#define __DISPATCH_SHIMS_SYS_QUEUE__ + +#ifndef TRASHIT +#define TRASHIT(elem) (elem) = NULL; +#endif + +#define TAILQ_HEAD(list_name, elem_type) \ + struct list_name { \ + struct elem_type *tq_first; \ + struct elem_type *tq_last; \ + } + +#define TAILQ_ENTRY(elem_type) \ + struct { \ + struct elem_type *te_next; \ + struct elem_type *te_prev; \ + } + +#define TAILQ_INIT(list) do { \ + (list)->tq_first = NULL; \ + (list)->tq_last = NULL; \ + } while (0) + +#define TAILQ_EMPTY(list) ((list)->tq_first == NULL) + +#define TAILQ_FIRST(list) ((list)->tq_first) + +#define TAILQ_LAST(list) ((list)->tq_last) + +#define TAILQ_NEXT(elem, field) ((elem)->field.te_next) + +#define TAILQ_PREV(elem, list, field) ((elem)->field.te_prev) + +#define TAILQ_FOREACH(var, list, field) \ + for ((var) = TAILQ_FIRST(list); \ + (var) != NULL; \ + (var) = TAILQ_NEXT(var, field)) + +#define TAILQ_REMOVE(list, elem, field) do { \ + if (TAILQ_NEXT(elem, field) != NULL) { \ + TAILQ_NEXT(elem, field)->field.te_prev = (elem)->field.te_prev; \ + } else { \ + (list)->tq_last = (elem)->field.te_prev; \ + } \ + if (TAILQ_PREV(elem, list, field) != NULL) { \ + TAILQ_PREV(elem, list, field)->field.te_next = (elem)->field.te_next; \ + } else { \ + (list)->tq_first = (elem)->field.te_next; \ + } \ + TRASHIT((elem)->field.te_next); \ + TRASHIT((elem)->field.te_prev); \ + } while(0) + +#define TAILQ_INSERT_TAIL(list, elem, field) do { \ + if (TAILQ_EMPTY(list)) { \ + (list)->tq_first = (list)->tq_last = (elem); \ + (elem)->field.te_prev = (elem)->field.te_next = NULL; \ + } else { \ + (elem)->field.te_next = NULL; \ + (elem)->field.te_prev = (list)->tq_last; \ + TAILQ_LAST(list)->field.te_next = (elem); \ + (list)->tq_last = (elem); \ + } \ + } while(0) + +#endif // __DISPATCH_SHIMS_SYS_QUEUE__ diff --git a/src/shims/generic_win_stubs.c b/src/shims/generic_win_stubs.c new file mode 100644 index 000000000..67b6f5134 --- /dev/null +++ b/src/shims/generic_win_stubs.c @@ -0,0 +1,24 @@ +#include "internal.h" + +/* + * This file contains stubbed out functions we are using during + * the initial Windows port. When the port is complete, this file + * should be empty (and thus removed). + */ + +void +_dispatch_runloop_queue_dispose(dispatch_queue_t dq DISPATCH_UNUSED, + bool *allow_free DISPATCH_UNUSED) +{ + WIN_PORT_ERROR(); +} + +void +_dispatch_runloop_queue_xref_dispose(dispatch_queue_t dq DISPATCH_UNUSED) +{ + WIN_PORT_ERROR(); +} + +/* + * Stubbed out static data + */ diff --git a/src/shims/generic_win_stubs.h b/src/shims/generic_win_stubs.h new file mode 100644 index 000000000..c983cdcee --- /dev/null +++ b/src/shims/generic_win_stubs.h @@ -0,0 +1,37 @@ + +#ifndef __DISPATCH__STUBS__INTERNAL +#define __DISPATCH__STUBS__INTERNAL + +#include + +#include +#include + +#include +#include + +/* + * Stub out defines for missing types + */ + +typedef __typeof__(_Generic((__SIZE_TYPE__)0, \ + unsigned long long int : (long long int)0, \ + unsigned long int : (long int)0, \ + unsigned int : (int)0, \ + unsigned short : (short)0, \ + unsigned char : (signed char)0)) ssize_t; + +#define S_ISDIR(mode) (((mode) & S_IFMT) == S_IFDIR) +#define S_ISFIFO(mode) ((mode) & _S_IFIFO) +#define S_ISREG(mode) ((mode) & _S_IFREG) +#define S_ISSOCK(mode) 0 + +#define O_NONBLOCK 04000 + +#define bzero(ptr,len) memset((ptr), 0, (len)) + +// Report when an unported code path executes. +#define WIN_PORT_ERROR() \ + _RPTF1(_CRT_ASSERT, "WIN_PORT_ERROR in %s", __FUNCTION__) + +#endif diff --git a/src/shims/getprogname.c b/src/shims/getprogname.c new file mode 100644 index 000000000..996840aed --- /dev/null +++ b/src/shims/getprogname.c @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2009-2010 Mark Heily + * All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include "getprogname.h" + +#if !HAVE_GETPROGNAME + +#if defined(_WIN32) +#define WIN32_LEAN_AND_MEAN +#ifndef _WIN32_WINNT +#define _WIN32_WINNT 0x0600 +#endif /* _WIN32_WINNT */ +#include +#include + +static INIT_ONCE getprogname_init_once = INIT_ONCE_STATIC_INIT; +static TCHAR progname[_MAX_FNAME]; + +static BOOL CALLBACK +getprogname_init_once_handler(PINIT_ONCE InitOnce, PVOID Parameter, + PVOID *lpContext) +{ + TCHAR path[MAX_PATH]; + DWORD length = GetModuleFileName(NULL, path, sizeof(path)); + + if (length < 0) { + progname[0] = '\0'; + return TRUE; + } else { + const char *filename; + + path[MAX_PATH - 1] = '\0'; + filename = strrchr(path, '\\'); + if (filename != NULL) { + filename++; + } else { + filename = path; + } + strcpy_s(progname, sizeof(progname), filename); + return TRUE; + } +} + +const char * +getprogname(void) +{ + (void)InitOnceExecuteOnce(&getprogname_init_once, + getprogname_init_once_handler, + NULL, + NULL); + return progname; +} +#endif /* _WIN32 */ +#endif /* HAVE_GETPROGNAME */ diff --git a/src/shims/getprogname.h b/src/shims/getprogname.h index 7eb19787e..a768eedd1 100644 --- a/src/shims/getprogname.h +++ b/src/shims/getprogname.h @@ -19,6 +19,8 @@ * @APPLE_APACHE_LICENSE_HEADER_END@ */ +#include + #ifndef __DISPATCH_SHIMS_GETPROGNAME__ #define __DISPATCH_SHIMS_GETPROGNAME__ @@ -28,6 +30,10 @@ extern const char *__progname; #endif /* __ANDROID */ +#if defined(_WIN32) +const char *getprogname(void); +#else + static inline char * getprogname(void) { @@ -39,6 +45,7 @@ getprogname(void) # error getprogname(3) is not available on this platform # endif } +#endif /* _WIN32 */ #endif /* HAVE_GETPROGNAME */ #endif /* __DISPATCH_SHIMS_GETPROGNAME__ */ diff --git a/src/shims/hw_config.h b/src/shims/hw_config.h index 6de0394b6..89b7f8f61 100644 --- a/src/shims/hw_config.h +++ b/src/shims/hw_config.h @@ -50,8 +50,6 @@ #define DISPATCH_CACHELINE_ALIGN \ __attribute__((__aligned__(DISPATCH_CACHELINE_SIZE))) -#if !TARGET_OS_WIN32 - typedef enum { _dispatch_hw_config_logical_cpus, _dispatch_hw_config_physical_cpus, @@ -122,6 +120,64 @@ _dispatch_hw_get_config(_dispatch_hw_config_t c) return (uint32_t)sysconf(_SC_NPROCESSORS_ONLN); } } +#elif defined(_WIN32) + PSYSTEM_LOGICAL_PROCESSOR_INFORMATION slpiInfo = NULL; + PSYSTEM_LOGICAL_PROCESSOR_INFORMATION slpiCurrent = NULL; + DWORD dwProcessorLogicalCount = 0; + DWORD dwProcessorPackageCount = 0; + DWORD dwProcessorCoreCount = 0; + DWORD dwSize = 0; + + while (true) { + DWORD dwResult; + + if (GetLogicalProcessorInformation(slpiInfo, &dwSize)) + break; + + dwResult = GetLastError(); + + if (slpiInfo) + free(slpiInfo); + + if (dwResult == ERROR_INSUFFICIENT_BUFFER) { + slpiInfo = (PSYSTEM_LOGICAL_PROCESSOR_INFORMATION)malloc(dwSize); + dispatch_assert(slpiInfo); + } else { + slpiInfo = NULL; + dwSize = 0; + break; + } + } + + for (slpiCurrent = slpiInfo; + dwSize >= sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION); + slpiCurrent++, dwSize -= sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION)) { + switch (slpiCurrent->Relationship) { + case RelationProcessorCore: + ++dwProcessorCoreCount; + dwProcessorLogicalCount += __popcnt64(slpiCurrent->ProcessorMask); + break; + case RelationProcessorPackage: + ++dwProcessorPackageCount; + break; + case RelationNumaNode: + case RelationCache: + case RelationGroup: + case RelationAll: + break; + } + } + + free(slpiInfo); + + switch (c) { + case _dispatch_hw_config_logical_cpus: + return dwProcessorLogicalCount; + case _dispatch_hw_config_physical_cpus: + return dwProcessorPackageCount; + case _dispatch_hw_config_active_cpus: + return dwProcessorCoreCount; + } #else const char *name = NULL; int r; @@ -167,31 +223,4 @@ _dispatch_hw_config_init(void) #endif // DISPATCH_HAVE_HW_CONFIG_COMMPAGE -#else // TARGET_OS_WIN32 - -static inline long -_dispatch_count_bits(unsigned long value) -{ - long bits = 0; - while (value) { - bits += (value & 1); - value = value >> 1; - } - return bits; -} - -static inline uint32_t -_dispatch_get_ncpus(void) -{ - uint32_t val; - DWORD_PTR procmask, sysmask; - if (GetProcessAffinityMask(GetCurrentProcess(), &procmask, &sysmask)) { - val = _dispatch_count_bits(procmask); - } else { - val = 1; - } - return val; -} -#endif // TARGET_OS_WIN32 - #endif /* __DISPATCH_SHIMS_HW_CONFIG__ */ diff --git a/src/shims/linux_stubs.c b/src/shims/linux_stubs.c deleted file mode 100644 index 4923eb0ca..000000000 --- a/src/shims/linux_stubs.c +++ /dev/null @@ -1,57 +0,0 @@ -/* - * This source file is part of the Swift.org open source project - * - * Copyright (c) 2015 Apple Inc. and the Swift project authors - * - * Licensed under Apache License v2.0 with Runtime Library Exception - * - * See http://swift.org/LICENSE.txt for license information - * See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors - * - */ - -/* - * This file contains stubbed out functions we are using during - * the initial linux port. When the port is complete, this file - * should be empty (and thus removed). - */ - -#include -#ifdef __ANDROID__ -#include -#else -#include -#endif /* __ANDROID__ */ - -#if __has_include() -#include -#else -#include -#endif - -#include "pthread.h" -#include "os/linux_base.h" -#include "internal.h" - - -#undef LINUX_PORT_ERROR -#define LINUX_PORT_ERROR() do { printf("LINUX_PORT_ERROR_CALLED %s:%d: %s\n",__FILE__,__LINE__,__FUNCTION__); abort(); } while (0) - - -/* - * Stubbed out static data - */ - -pthread_key_t dispatch_voucher_key; -pthread_key_t dispatch_pthread_root_queue_observer_hooks_key; - -unsigned short dispatch_timer__program_semaphore; -unsigned short dispatch_timer__wake_semaphore; -unsigned short dispatch_timer__fire_semaphore; -unsigned short dispatch_timer__configure_semaphore; -unsigned short dispatch_queue__pop_semaphore; -unsigned short dispatch_callout__entry_semaphore; -unsigned short dispatch_callout__return_semaphore; -unsigned short dispatch_queue__push_semaphore; -void (*_dispatch_block_special_invoke)(void*); -struct dispatch_queue_attr_s _dispatch_queue_attr_concurrent; diff --git a/src/shims/linux_stubs.h b/src/shims/linux_stubs.h deleted file mode 100644 index ec684170d..000000000 --- a/src/shims/linux_stubs.h +++ /dev/null @@ -1,66 +0,0 @@ -/* - * This source file is part of the Swift.org open source project - * - * Copyright (c) 2015 Apple Inc. and the Swift project authors - * - * Licensed under Apache License v2.0 with Runtime Library Exception - * - * See http://swift.org/LICENSE.txt for license information - * See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors - * - */ - -// forward declarations for functions we are stubbing out -// in the intial linux port. - -#ifndef __DISPATCH__STUBS__INTERNAL -#define __DISPATCH__STUBS__INTERNAL - -#ifndef TAILQ_FOREACH_SAFE -#define TAILQ_FOREACH_SAFE(var, head, field, temp) \ - for ((var) = TAILQ_FIRST((head)); \ - (var) && ((temp) = TAILQ_NEXT((var), field), 1); (var) = (temp)) -#endif - -#if DISPATCH_DEBUG -#ifndef TRASHIT -#define TRASHIT(x) do { (x) = (void *)-1; } while (0) -#endif -#endif - -/* - * Stub out defines for some mach types and related macros - */ - -typedef uint32_t mach_port_t; - -#define MACH_PORT_NULL (0) -#define MACH_PORT_DEAD (-1) - -typedef uint32_t mach_error_t; - -typedef uint32_t mach_msg_return_t; - -typedef uint32_t mach_msg_bits_t; - -typedef void *dispatch_mach_msg_t; - -typedef uint64_t firehose_activity_id_t; - -typedef void *mach_msg_header_t; - -// Print a warning when an unported code path executes. -#define LINUX_PORT_ERROR() do { \ - printf("LINUX_PORT_ERROR_CALLED %s:%d: %s\n",\ - __FILE__,__LINE__,__FUNCTION__); } while (0) - -/* - * Stub out defines for other missing types - */ - -// SIZE_T_MAX should not be hardcoded like this here. -#ifndef SIZE_T_MAX -#define SIZE_T_MAX (~(size_t)0) -#endif - -#endif diff --git a/src/shims/lock.c b/src/shims/lock.c index 3f430238d..f0e493796 100644 --- a/src/shims/lock.c +++ b/src/shims/lock.c @@ -20,16 +20,6 @@ #include "internal.h" -#define _dlock_syscall_switch(err, syscall, ...) \ - for (;;) { \ - int err; \ - switch ((err = ((syscall) < 0 ? errno : 0))) { \ - case EINTR: continue; \ - __VA_ARGS__ \ - } \ - break; \ - } - #if TARGET_OS_MAC dispatch_static_assert(DLOCK_LOCK_DATA_CONTENTION == ULF_WAIT_WORKQ_DATA_CONTENTION); @@ -51,6 +41,21 @@ _dispatch_thread_switch(dispatch_lock value, dispatch_lock_options_t flags, #endif // HAVE_UL_UNFAIR_LOCK #endif +#if defined(_WIN32) +#if !HAVE_UL_UNFAIR_LOCK +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_thread_switch(dispatch_lock value, dispatch_lock_options_t flags, + uint32_t timeout) +{ + (void)value; + (void)flags; + (void)timeout; + SwitchToThread(); +} +#endif +#endif + #pragma mark - semaphores #if USE_MACH_SEM @@ -144,8 +149,8 @@ _dispatch_sema4_timedwait(_dispatch_sema4_t *sema, dispatch_time_t timeout) do { uint64_t nsec = _dispatch_timeout(timeout); - _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); - _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); + _timeout.tv_sec = (__typeof__(_timeout.tv_sec))(nsec / NSEC_PER_SEC); + _timeout.tv_nsec = (__typeof__(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); kr = semaphore_timedwait(*sema, _timeout); } while (unlikely(kr == KERN_ABORTED)); @@ -200,8 +205,8 @@ _dispatch_sema4_timedwait(_dispatch_sema4_t *sema, dispatch_time_t timeout) do { uint64_t nsec = _dispatch_time_nanoseconds_since_epoch(timeout); - _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC); - _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); + _timeout.tv_sec = (__typeof__(_timeout.tv_sec))(nsec / NSEC_PER_SEC); + _timeout.tv_nsec = (__typeof__(_timeout.tv_nsec))(nsec % NSEC_PER_SEC); ret = sem_timedwait(sema, &_timeout); } while (unlikely(ret == -1 && errno == EINTR)); @@ -230,9 +235,7 @@ _push_timer_resolution(DWORD ms) // aim for the best resolution we can accomplish dispatch_once(&once, ^{ TIMECAPS tc; - MMRESULT res; - res = timeGetDevCaps(&tc, sizeof(tc)); - if (res == MMSYSERR_NOERROR) { + if (timeGetDevCaps(&tc, sizeof(tc)) == MMSYSERR_NOERROR) { best_resolution = min(max(tc.wPeriodMin, best_resolution), tc.wPeriodMax); } @@ -302,7 +305,7 @@ _dispatch_sema4_timedwait(_dispatch_sema4_t *sema, dispatch_time_t timeout) nsec = _dispatch_timeout(timeout); msec = (DWORD)(nsec / (uint64_t)1000000); resolution = _push_timer_resolution(msec); - wait_result = WaitForSingleObject(dsema->dsema_handle, msec); + wait_result = WaitForSingleObject(sema, msec); _pop_timer_resolution(resolution); return wait_result == WAIT_TIMEOUT; } @@ -311,60 +314,79 @@ _dispatch_sema4_timedwait(_dispatch_sema4_t *sema, dispatch_time_t timeout) #endif #pragma mark - ulock wrappers +#if HAVE_UL_COMPARE_AND_WAIT || HAVE_UL_UNFAIR_LOCK + +// returns 0, ETIMEDOUT, ENOTEMPTY, EFAULT, EINTR +static int +_dlock_wait(uint32_t *uaddr, uint32_t val, uint32_t timeout, uint32_t flags) +{ + for (;;) { + int rc = __ulock_wait(flags | ULF_NO_ERRNO, uaddr, val, timeout); + if (rc > 0) { + return ENOTEMPTY; + } + switch (-rc) { + case 0: + return 0; + case EINTR: + /* + * if we have a timeout, we need to return for the caller to + * recompute the new deadline, else just go back to wait. + */ + if (timeout == 0) { + continue; + } + /* FALLTHROUGH */ + case ETIMEDOUT: + case EFAULT: + return -rc; + default: + DISPATCH_INTERNAL_CRASH(-rc, "ulock_wait() failed"); + } + } +} + +static void +_dlock_wake(uint32_t *uaddr, uint32_t flags) +{ + int rc = __ulock_wake(flags | ULF_NO_ERRNO, uaddr, 0); + if (rc == 0 || rc == -ENOENT) return; + DISPATCH_INTERNAL_CRASH(-rc, "ulock_wake() failed"); +} + +#endif // HAVE_UL_COMPARE_AND_WAIT || HAVE_UL_UNFAIR_LOCK #if HAVE_UL_COMPARE_AND_WAIT static int _dispatch_ulock_wait(uint32_t *uaddr, uint32_t val, uint32_t timeout, uint32_t flags) { - int rc; - _dlock_syscall_switch(err, - rc = __ulock_wait(UL_COMPARE_AND_WAIT | flags, uaddr, val, timeout), - case 0: return rc > 0 ? ENOTEMPTY : 0; - case ETIMEDOUT: case EFAULT: return err; - default: DISPATCH_INTERNAL_CRASH(err, "ulock_wait() failed"); - ); + return _dlock_wait(uaddr, val, timeout, flags | UL_COMPARE_AND_WAIT); } static void _dispatch_ulock_wake(uint32_t *uaddr, uint32_t flags) { - _dlock_syscall_switch(err, - __ulock_wake(UL_COMPARE_AND_WAIT | flags, uaddr, 0), - case 0: case ENOENT: break; - default: DISPATCH_INTERNAL_CRASH(err, "ulock_wake() failed"); - ); + return _dlock_wake(uaddr, flags | UL_COMPARE_AND_WAIT); } -#endif +#endif // HAVE_UL_COMPARE_AND_WAIT #if HAVE_UL_UNFAIR_LOCK -// returns 0, ETIMEDOUT, ENOTEMPTY, EFAULT static int _dispatch_unfair_lock_wait(uint32_t *uaddr, uint32_t val, uint32_t timeout, dispatch_lock_options_t flags) { - int rc; - _dlock_syscall_switch(err, - rc = __ulock_wait(UL_UNFAIR_LOCK | flags, uaddr, val, timeout), - case 0: return rc > 0 ? ENOTEMPTY : 0; - case ETIMEDOUT: case EFAULT: return err; - case EOWNERDEAD: DISPATCH_CLIENT_CRASH(*uaddr, - "corruption of lock owner"); - default: DISPATCH_INTERNAL_CRASH(err, "ulock_wait() failed"); - ); + return _dlock_wait(uaddr, val, timeout, flags | UL_UNFAIR_LOCK); } static void _dispatch_unfair_lock_wake(uint32_t *uaddr, uint32_t flags) { - _dlock_syscall_switch(err, __ulock_wake(UL_UNFAIR_LOCK | flags, uaddr, 0), - case 0: case ENOENT: break; - default: DISPATCH_INTERNAL_CRASH(err, "ulock_wake() failed"); - ); + return _dlock_wake(uaddr, flags | UL_UNFAIR_LOCK); } -#endif +#endif // HAVE_UL_UNFAIR_LOCK #pragma mark - futex wrappers #if HAVE_FUTEX #include @@ -455,12 +477,14 @@ _dispatch_wait_on_address(uint32_t volatile *_address, uint32_t value, #elif HAVE_FUTEX if (nsecs != DISPATCH_TIME_FOREVER) { struct timespec ts = { - .tv_sec = (typeof(ts.tv_sec))(nsec / NSEC_PER_SEC), - .tv_nsec = (typeof(ts.tv_nsec))(nsec % NSEC_PER_SEC), + .tv_sec = (__typeof__(ts.tv_sec))(nsec / NSEC_PER_SEC), + .tv_nsec = (__typeof__(ts.tv_nsec))(nsec % NSEC_PER_SEC), }; return _dispatch_futex_wait(address, value, &ts, FUTEX_PRIVATE_FLAG); } return _dispatch_futex_wait(address, value, NULL, FUTEX_PRIVATE_FLAG); +#elif defined(_WIN32) + WaitOnAddress(address, (PVOID)(uintptr_t)value, sizeof(value), INFINITE); #else #error _dispatch_wait_on_address unimplemented for this platform #endif @@ -473,6 +497,8 @@ _dispatch_wake_by_address(uint32_t volatile *address) _dispatch_ulock_wake((uint32_t *)address, ULF_WAKE_ALL); #elif HAVE_FUTEX _dispatch_futex_wake((uint32_t *)address, INT_MAX, FUTEX_PRIVATE_FLAG); +#elif defined(_WIN32) + WakeByAddressAll((uint32_t *)address); #else (void)address; #endif @@ -504,7 +530,7 @@ _dispatch_thread_event_wait_slow(dispatch_thread_event_t dte) } #if HAVE_UL_COMPARE_AND_WAIT int rc = _dispatch_ulock_wait(&dte->dte_value, UINT32_MAX, 0, 0); - dispatch_assert(rc == 0 || rc == EFAULT); + dispatch_assert(rc == 0 || rc == EFAULT || rc == EINTR); #elif HAVE_FUTEX _dispatch_futex_wait(&dte->dte_value, UINT32_MAX, NULL, FUTEX_PRIVATE_FLAG); @@ -543,9 +569,7 @@ _dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t dul, } rc = _dispatch_unfair_lock_wait(&dul->dul_lock, new_value, 0, flags); if (rc == ENOTEMPTY) { - next = value_self | DLOCK_WAITERS_BIT; - } else { - next = value_self; + next |= DLOCK_WAITERS_BIT; } } } @@ -562,11 +586,11 @@ void _dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t dul, dispatch_lock_options_t flags) { - dispatch_lock cur, value_self = _dispatch_lock_value_for_self(); + dispatch_lock cur, self = _dispatch_lock_value_for_self(); uint32_t timeout = 1; while (unlikely(!os_atomic_cmpxchgv(&dul->dul_lock, - DLOCK_OWNER_NULL, value_self, &cur, acquire))) { + DLOCK_OWNER_NULL, self, &cur, acquire))) { if (unlikely(_dispatch_lock_is_locked_by(cur, self))) { DISPATCH_CLIENT_CRASH(0, "trying to lock recursively"); } diff --git a/src/shims/lock.h b/src/shims/lock.h index cc75852d3..f50e5913d 100644 --- a/src/shims/lock.h +++ b/src/shims/lock.h @@ -29,7 +29,7 @@ #pragma mark - platform macros -DISPATCH_ENUM(dispatch_lock_options, uint32_t, +DISPATCH_OPTIONS(dispatch_lock_options, uint32_t, DLOCK_LOCK_NONE = 0x00000000, DLOCK_LOCK_DATA_CONTENTION = 0x00010000, ); @@ -79,6 +79,27 @@ _dispatch_lock_owner(dispatch_lock lock_value) return lock_value & DLOCK_OWNER_MASK; } +#elif defined(_WIN32) + +#include + +typedef DWORD dispatch_tid; +typedef uint32_t dispatch_lock; + +#define DLOCK_OWNER_NULL ((dispatch_tid)0) +#define DLOCK_OWNER_MASK ((dispatch_lock)0xfffffffc) +#define DLOCK_WAITERS_BIT ((dispatch_lock)0x00000001) +#define DLOCK_FAILED_TRYLOCK_BIT ((dispatch_lock)0x00000002) + +#define _dispatch_tid_self() ((dispatch_tid)(_dispatch_get_tsd_base()->tid << 2)) + +DISPATCH_ALWAYS_INLINE +static inline dispatch_tid +_dispatch_lock_owner(dispatch_lock lock_value) +{ + return lock_value & DLOCK_OWNER_MASK; +} + #else # error define _dispatch_lock encoding scheme for your platform here #endif @@ -231,7 +252,7 @@ int _dispatch_wait_on_address(uint32_t volatile *address, uint32_t value, void _dispatch_wake_by_address(uint32_t volatile *address); #pragma mark - thread event -/** +/*! * @typedef dispatch_thread_event_t * * @abstract diff --git a/src/shims/mach.h b/src/shims/mach.h new file mode 100644 index 000000000..759f5f3a9 --- /dev/null +++ b/src/shims/mach.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2018 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __DISPATCH_SHIMS_MACH__ +#define __DISPATCH_SHIMS_MACH__ + +/* + * Stub out defines for some mach types and related macros + */ + +typedef uint32_t mach_port_t; + +#define MACH_PORT_NULL (0) +#define MACH_PORT_DEAD (-1) + +typedef uint32_t mach_error_t; + +typedef uint32_t mach_msg_return_t; + +typedef uint32_t mach_msg_bits_t; + +typedef void *dispatch_mach_msg_t; + +typedef uint64_t firehose_activity_id_t; + +typedef void *mach_msg_header_t; + +#endif diff --git a/src/shims/target.h b/src/shims/target.h index 8e996aa73..a59dd3c3b 100644 --- a/src/shims/target.h +++ b/src/shims/target.h @@ -49,7 +49,7 @@ # endif // !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200) #else # define DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(x) 1 -# if __IPHONE_OS_VERSION_MIN_REQUIRED < 90000 +# if !TARGET_OS_DRIVERKIT && __IPHONE_OS_VERSION_MIN_REQUIRED < 90000 # error "iOS hosts older than iOS 9.0 aren't supported anymore" # endif #endif diff --git a/src/shims/time.h b/src/shims/time.h index 348e14920..063d52397 100644 --- a/src/shims/time.h +++ b/src/shims/time.h @@ -31,7 +31,7 @@ #error "Please #include instead of this file directly." #endif -#if TARGET_OS_WIN32 +#if defined(_WIN32) static inline unsigned int sleep(unsigned int seconds) { @@ -107,7 +107,7 @@ _dispatch_get_nanoseconds(void) struct timespec ts; dispatch_assume_zero(clock_gettime(CLOCK_REALTIME, &ts)); return _dispatch_timespec_to_nano(ts); -#elif TARGET_OS_WIN32 +#elif defined(_WIN32) // FILETIME is 100-nanosecond intervals since January 1, 1601 (UTC). FILETIME ft; ULARGE_INTEGER li; @@ -122,16 +122,29 @@ _dispatch_get_nanoseconds(void) #endif } +/* On the use of clock sources in the CLOCK_MONOTONIC family + * + * The code below requires monotonic clock sources that only tick + * while the machine is running. + * + * Per POSIX, the CLOCK_MONOTONIC family is supposed to tick during + * machine sleep; this is not the case on Linux, and that behavior + * became part of the Linux ABI. + * + * Using the CLOCK_MONOTONIC family on POSIX-compliant platforms + * will lead to bugs, hence its use is restricted to Linux. + */ + static inline uint64_t _dispatch_uptime(void) { #if HAVE_MACH_ABSOLUTE_TIME return mach_absolute_time(); -#elif defined(__linux__) +#elif HAVE_DECL_CLOCK_MONOTONIC && defined(__linux__) struct timespec ts; dispatch_assume_zero(clock_gettime(CLOCK_MONOTONIC, &ts)); return _dispatch_timespec_to_nano(ts); -#elif HAVE_DECL_CLOCK_UPTIME +#elif HAVE_DECL_CLOCK_UPTIME && !defined(__linux__) struct timespec ts; dispatch_assume_zero(clock_gettime(CLOCK_UPTIME, &ts)); return _dispatch_timespec_to_nano(ts); @@ -156,9 +169,12 @@ _dispatch_monotonic_time(void) struct timespec ts; dispatch_assume_zero(clock_gettime(CLOCK_MONOTONIC, &ts)); return _dispatch_timespec_to_nano(ts); -#elif TARGET_OS_WIN32 - LARGE_INTEGER now; - return QueryPerformanceCounter(&now) ? now.QuadPart : 0; +#elif defined(_WIN32) + ULONGLONG ullTime; + if (!QueryUnbiasedInterruptTime(&ullTime)) + return 0; + + return ullTime * 100ull; #else #error platform needs to implement _dispatch_monotonic_time() #endif @@ -170,11 +186,11 @@ _dispatch_approximate_time(void) { #if HAVE_MACH_APPROXIMATE_TIME return mach_approximate_time(); -#elif defined(__linux__) +#elif HAVE_DECL_CLOCK_MONOTONIC_COARSE && defined(__linux__) struct timespec ts; dispatch_assume_zero(clock_gettime(CLOCK_MONOTONIC_COARSE, &ts)); return _dispatch_timespec_to_nano(ts); -#elif HAVE_DECL_CLOCK_UPTIME_FAST +#elif HAVE_DECL_CLOCK_UPTIME_FAST && !defined(__linux__) struct timespec ts; dispatch_assume_zero(clock_gettime(CLOCK_UPTIME_FAST, &ts)); return _dispatch_timespec_to_nano(ts); diff --git a/src/shims/tsd.h b/src/shims/tsd.h index eaed362c4..f44d7c863 100644 --- a/src/shims/tsd.h +++ b/src/shims/tsd.h @@ -58,6 +58,12 @@ typedef struct { void *a; void *b; } dispatch_tsd_pair_t; #endif // _os_tsd_get_base #endif +#if defined(_WIN32) +#define DISPATCH_TSD_DTOR_CC __stdcall +#else +#define DISPATCH_TSD_DTOR_CC +#endif + #if DISPATCH_USE_DIRECT_TSD #ifndef __TSD_THREAD_QOS_CLASS #define __TSD_THREAD_QOS_CLASS 4 @@ -69,6 +75,7 @@ typedef struct { void *a; void *b; } dispatch_tsd_pair_t; #define __TSD_MACH_SPECIAL_REPLY 8 #endif + static const unsigned long dispatch_priority_key = __TSD_THREAD_QOS_CLASS; static const unsigned long dispatch_r2k_key = __TSD_RETURN_TO_KERNEL; @@ -99,15 +106,36 @@ _dispatch_thread_key_create(const unsigned long *k, void (*d)(void *)) } #elif DISPATCH_USE_THREAD_LOCAL_STORAGE +#if defined(_WIN32) + DISPATCH_TSD_INLINE static inline void -_dispatch_thread_key_create(pthread_key_t *k, void (*d)(void *)) +_dispatch_thread_key_create(DWORD *k, void (DISPATCH_TSD_DTOR_CC *d)(void *)) +{ + dispatch_assert_zero((*k = FlsAlloc(d))); +} + +extern DWORD __dispatch_tsd_key; + +#else + +DISPATCH_TSD_INLINE +static inline void +_dispatch_thread_key_create(pthread_key_t *k, void (DISPATCH_TSD_DTOR_CC *d)(void *)) { dispatch_assert_zero(pthread_key_create(k, d)); } +extern pthread_key_t __dispatch_tsd_key; + +#endif + struct dispatch_tsd { +#if defined(_WIN32) + DWORD tid; +#else pid_t tid; +#endif void *dispatch_queue_key; void *dispatch_frame_key; void *dispatch_cache_key; @@ -126,8 +154,8 @@ struct dispatch_tsd { void *dispatch_deferred_items_key; }; -extern __thread struct dispatch_tsd __dispatch_tsd; -extern pthread_key_t __dispatch_tsd_key; +extern _Thread_local struct dispatch_tsd __dispatch_tsd; + extern void libdispatch_tsd_init(void); extern void _libdispatch_tsd_cleanup(void *ctx); @@ -285,7 +313,7 @@ _dispatch_thread_setspecific_packed_pair(pthread_key_t k1, pthread_key_t k2, } #endif -#if TARGET_OS_WIN32 +#if defined(_WIN32) #define _dispatch_thread_self() ((uintptr_t)GetCurrentThreadId()) #else #if DISPATCH_USE_DIRECT_TSD @@ -296,7 +324,7 @@ _dispatch_thread_setspecific_packed_pair(pthread_key_t k1, pthread_key_t k2, #endif #endif -#if TARGET_OS_WIN32 +#if defined(_WIN32) #define _dispatch_thread_port() ((mach_port_t)0) #elif !DISPATCH_USE_THREAD_LOCAL_STORAGE #if DISPATCH_USE_DIRECT_TSD diff --git a/src/shims/yield.c b/src/shims/yield.c new file mode 100644 index 000000000..43f0017ee --- /dev/null +++ b/src/shims/yield.c @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2018 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include "internal.h" + +DISPATCH_NOINLINE +static void * +__DISPATCH_WAIT_FOR_ENQUEUER__(void **ptr) +{ + int spins = 0; + void *value; + while ((value = os_atomic_load(ptr, relaxed)) == NULL) { + _dispatch_preemption_yield(++spins); + } + return value; +} + +void * +_dispatch_wait_for_enqueuer(void **ptr) +{ +#if !DISPATCH_HW_CONFIG_UP +#if defined(__arm__) || defined(__arm64__) + int spins = DISPATCH_WAIT_SPINS_WFE; + void *value; + while (unlikely(spins-- > 0)) { + if (likely(value = __builtin_arm_ldrex(ptr))) { + __builtin_arm_clrex(); + return value; + } + __builtin_arm_wfe(); + } +#else + int spins = DISPATCH_WAIT_SPINS; + void *value; + while (unlikely(spins-- > 0)) { + if (likely(value = os_atomic_load(ptr, relaxed))) { + return value; + } + dispatch_hardware_pause(); + } +#endif +#endif // DISPATCH_HW_CONFIG_UP + return __DISPATCH_WAIT_FOR_ENQUEUER__(ptr); +} diff --git a/src/shims/yield.h b/src/shims/yield.h index 121b48e61..4a1a0effe 100644 --- a/src/shims/yield.h +++ b/src/shims/yield.h @@ -50,7 +50,7 @@ // exchange at the tail and setting the head/prev pointer. #if DISPATCH_HW_CONFIG_UP #define _dispatch_wait_until(c) ({ \ - typeof(c) _c; \ + __typeof__(c) _c; \ int _spins = 0; \ for (;;) { \ if (likely(_c = (c))) break; \ @@ -59,11 +59,14 @@ } \ _c; }) #else +#ifndef DISPATCH_WAIT_SPINS_WFE +#define DISPATCH_WAIT_SPINS_WFE 10 +#endif #ifndef DISPATCH_WAIT_SPINS // #define DISPATCH_WAIT_SPINS 1024 #endif #define _dispatch_wait_until(c) ({ \ - typeof(c) _c; \ + __typeof__(c) _c; \ int _spins = -(DISPATCH_WAIT_SPINS); \ for (;;) { \ if (likely(_c = (c))) break; \ @@ -76,6 +79,9 @@ _c; }) #endif +DISPATCH_NOT_TAIL_CALLED DISPATCH_EXPORT +void *_dispatch_wait_for_enqueuer(void **ptr); + #pragma mark - #pragma mark _dispatch_contention_wait_until @@ -92,6 +98,11 @@ #define _dispatch_contention_spins() \ ((DISPATCH_CONTENTION_SPINS_MIN) + ((DISPATCH_CONTENTION_SPINS_MAX) - \ (DISPATCH_CONTENTION_SPINS_MIN)) / 2) +#elif defined(_WIN32) +#define _dispatch_contention_spins() ({ \ + unsigned int _value; \ + rand_s(&_value); \ + (_value & DISPATCH_CONTENTION_SPINS_MAX) | DISPATCH_CONTENTION_SPINS_MIN; }) #else // Use randomness to prevent threads from resonating at the same // frequency and permanently contending. @@ -158,7 +169,24 @@ SWITCH_OPTION_WAIT, (((u)-1)/1000)+1) #endif #else +#if defined(_WIN32) +DISPATCH_INLINE void +_dispatch_contention_usleep(uint64_t useconds) { + static BOOL bQPFExecuted = FALSE; + static LARGE_INTEGER liFreq; + LARGE_INTEGER liStart, liNow; + + if (!bQPFExecuted) + bQPFExecuted = QueryPerformanceFrequency(&liFreq); + + QueryPerformanceCounter(&liStart); + do { + QueryPerformanceCounter(&liNow); + } while ((liNow.QuadPart - liStart.QuadPart) / (float)liFreq.QuadPart * 1000 * 1000 < useconds); +} +#else #define _dispatch_contention_usleep(u) usleep((u)) +#endif #endif // HAVE_MACH #endif // __DISPATCH_SHIMS_YIELD__ diff --git a/src/source.c b/src/source.c index b1b893178..96c363cdf 100644 --- a/src/source.c +++ b/src/source.c @@ -228,7 +228,7 @@ dispatch_source_merge_data(dispatch_source_t ds, unsigned long val) DISPATCH_ALWAYS_INLINE static inline dispatch_continuation_t -_dispatch_source_handler_alloc(dispatch_source_t ds, void *func, long kind, +_dispatch_source_handler_alloc(dispatch_source_t ds, void *func, uintptr_t kind, bool is_block) { // sources don't propagate priority by default @@ -290,7 +290,7 @@ _dispatch_source_handler_free(dispatch_source_refs_t dr, long kind) DISPATCH_ALWAYS_INLINE static inline void -_dispatch_source_handler_replace(dispatch_source_t ds, long kind, +_dispatch_source_handler_replace(dispatch_source_t ds, uintptr_t kind, dispatch_continuation_t dc) { if (!dc->dc_func) { @@ -311,7 +311,7 @@ _dispatch_source_set_handler_slow(void *context) dispatch_assert(dx_type(ds) == DISPATCH_SOURCE_KEVENT_TYPE); dispatch_continuation_t dc = context; - long kind = (long)dc->dc_data; + uintptr_t kind = (uintptr_t)dc->dc_data; dc->dc_data = NULL; _dispatch_source_handler_replace(ds, kind, dc); } @@ -319,7 +319,7 @@ _dispatch_source_set_handler_slow(void *context) DISPATCH_NOINLINE static void _dispatch_source_set_handler(dispatch_source_t ds, void *func, - long kind, bool is_block) + uintptr_t kind, bool is_block) { dispatch_continuation_t dc; @@ -327,7 +327,7 @@ _dispatch_source_set_handler(dispatch_source_t ds, void *func, if (_dispatch_lane_try_inactive_suspend(ds)) { _dispatch_source_handler_replace(ds, kind, dc); - return _dispatch_lane_resume(ds, false); + return _dispatch_lane_resume(ds, DISPATCH_RESUME); } dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds); @@ -637,7 +637,7 @@ _dispatch_source_install(dispatch_source_t ds, dispatch_wlh_t wlh, } void -_dispatch_source_activate(dispatch_source_t ds, bool *allow_resume) +_dispatch_source_activate(dispatch_source_t ds) { dispatch_continuation_t dc; dispatch_source_refs_t dr = ds->ds_refs; @@ -667,11 +667,26 @@ _dispatch_source_activate(dispatch_source_t ds, bool *allow_resume) } // call "super" - _dispatch_lane_activate(ds, allow_resume); + _dispatch_lane_activate(ds); if ((dr->du_is_direct || dr->du_is_timer) && !ds->ds_is_installed) { pri = _dispatch_queue_compute_priority_and_wlh(ds, &wlh); if (pri) { +#if DISPATCH_USE_KEVENT_WORKLOOP + dispatch_workloop_t dwl = _dispatch_wlh_to_workloop(wlh); + if (dwl && dr->du_filter == DISPATCH_EVFILT_TIMER_WITH_CLOCK && + dr->du_ident < DISPATCH_TIMER_WLH_COUNT) { + if (!dwl->dwl_timer_heap) { + uint32_t count = DISPATCH_TIMER_WLH_COUNT; + dwl->dwl_timer_heap = _dispatch_calloc(count, + sizeof(struct dispatch_timer_heap_s)); + } + dr->du_is_direct = true; + _dispatch_wlh_retain(wlh); + _dispatch_unote_state_set(dr, wlh, 0); + } +#endif + // rdar://45419440 this needs to be last _dispatch_source_install(ds, wlh, pri); } } @@ -794,10 +809,8 @@ _dispatch_source_invoke2(dispatch_source_t ds, dispatch_invoke_context_t dic, avoid_starvation = dq->do_targetq || !(dq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT); } - if (avoid_starvation && - os_atomic_load2o(dr, ds_pending_data, relaxed)) { - retq = ds->do_targetq; - } + + ds->ds_latched = true; } else { // there is no point trying to be eager, the next thing to do is // to deliver the event @@ -849,21 +862,61 @@ _dispatch_source_invoke2(dispatch_source_t ds, dispatch_invoke_context_t dic, // from the source handler return ds->do_targetq; } - if (avoid_starvation && _dispatch_unote_wlh(dr) == DISPATCH_WLH_ANON) { - // keep the old behavior to force re-enqueue to our target queue - // for the rearm. + if (dr->du_is_direct && _dispatch_unote_wlh(dr) == DISPATCH_WLH_ANON) { // - // if the handler didn't run, or this is a pending delete - // or our target queue is a global queue, then starvation is - // not a concern and we can rearm right away. - return ds->do_targetq; - } - _dispatch_unote_resume(dr); - if (!avoid_starvation && _dispatch_wlh_should_poll_unote(dr)) { - // try to redrive the drain from under the lock for sources - // targeting an overcommit root queue to avoid parking - // when the next event has already fired - _dispatch_event_loop_drain(KEVENT_FLAG_IMMEDIATE); + // for legacy, direct event delivery, + // _dispatch_source_install above could cause a worker thread to + // deliver an event, and disarm the knote before we're through. + // + // This can lead to a double fire of the event handler for the same + // event with the following ordering: + // + //------------------------------------------------------------------ + // Thread1 Thread2 + // + // _dispatch_source_invoke() + // _dispatch_source_install() + // _dispatch_kevent_worker_thread() + // _dispatch_source_merge_evt() + // + // _dispatch_unote_resume() + // _dispatch_kevent_worker_thread() + // < re-enqueue due DIRTY > + // + // _dispatch_source_invoke() + // ..._latch_and_call() + // _dispatch_unote_resume() + // _dispatch_source_merge_evt() + // + // _dispatch_source_invoke() + // ..._latch_and_call() + // + //------------------------------------------------------------------ + // + // To avoid this situation, we should never resume a direct source + // for which we haven't fired an event. + // + // Note: this isn't a concern for kqworkloops as event delivery is + // serial with draining it by design. + // + if (ds->ds_latched) { + ds->ds_latched = false; + _dispatch_unote_resume(dr); + } + if (avoid_starvation) { + // To avoid starvation of a source firing immediately when we + // rearm it, force a round-trip through the end of the target + // queue no matter what. + return ds->do_targetq; + } + } else { + _dispatch_unote_resume(dr); + if (!avoid_starvation && _dispatch_wlh_should_poll_unote(dr)) { + // try to redrive the drain from under the lock for sources + // targeting an overcommit root queue to avoid parking + // when the next event has already fired + _dispatch_event_loop_drain(KEVENT_FLAG_IMMEDIATE); + } } } @@ -1118,6 +1171,7 @@ _dispatch_source_merge_evt(dispatch_unote_t du, uint32_t flags, _dispatch_debug("kevent-source[%p]: merged kevent[%p]", ds, du._dr); _dispatch_object_debug(ds, "%s", __func__); dx_wakeup(ds, _dispatch_qos_from_pp(pp), DISPATCH_WAKEUP_EVENT | + DISPATCH_WAKEUP_CLEAR_ACTIVATING | DISPATCH_WAKEUP_CONSUME_2 | DISPATCH_WAKEUP_MAKE_DIRTY); } @@ -1144,7 +1198,7 @@ _dispatch_source_timer_telemetry(dispatch_source_t ds, dispatch_clock_t clock, if (_dispatch_trace_timer_configure_enabled() || _dispatch_source_timer_telemetry_enabled()) { _dispatch_source_timer_telemetry_slow(ds, clock, values); - asm(""); // prevent tailcall + __asm__ __volatile__ (""); // prevent tailcall } } @@ -1374,6 +1428,7 @@ dispatch_after(dispatch_time_t when, dispatch_queue_t queue, #pragma mark - #pragma mark dispatch_source_debug +DISPATCH_COLD static size_t _dispatch_source_debug_attr(dispatch_source_t ds, char* buf, size_t bufsiz) { @@ -1392,6 +1447,7 @@ _dispatch_source_debug_attr(dispatch_source_t ds, char* buf, size_t bufsiz) (dqf & DSF_DELETED) ? "deleted, " : ""); } +DISPATCH_COLD static size_t _dispatch_timer_debug_attr(dispatch_source_t ds, char* buf, size_t bufsiz) { diff --git a/src/source_internal.h b/src/source_internal.h index 52ec8fd1c..d953629eb 100644 --- a/src/source_internal.h +++ b/src/source_internal.h @@ -35,15 +35,20 @@ _OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(dispatch_source, dispatch_object) DISPATCH_CLASS_DECL_BARE(source, QUEUE); +DISPATCH_CLASS_DECL(channel, QUEUE); + #define DISPATCH_SOURCE_CLASS_HEADER(x) \ DISPATCH_LANE_CLASS_HEADER(x); \ uint16_t \ /* set under the drain lock */ \ ds_is_installed:1, \ + ds_latched:1, \ dm_connect_handler_called:1, \ dm_cancel_handler_called:1, \ dm_is_xpc:1, \ - __ds_flags_pad : 12; \ + dm_arm_no_senders:1, \ + dm_strict_reply:1, \ + __ds_flags_pad : 9; \ uint16_t __dq_flags_separation[0]; \ uint16_t \ /* set under the send queue lock */ \ @@ -57,15 +62,31 @@ struct dispatch_source_s { dispatch_assert_valid_lane_type(dispatch_source_s); dispatch_static_assert(sizeof(struct dispatch_source_s) <= 128); +struct dispatch_channel_s { + DISPATCH_SOURCE_CLASS_HEADER(channel); +} DISPATCH_ATOMIC64_ALIGN; +dispatch_assert_valid_lane_type(dispatch_channel_s); +dispatch_static_assert(sizeof(struct dispatch_channel_s) <= 128); + void _dispatch_source_xref_dispose(dispatch_source_t ds); void _dispatch_source_dispose(dispatch_source_t ds, bool *allow_free); -void _dispatch_source_activate(dispatch_source_t ds, bool *allow_resume); +void _dispatch_source_activate(dispatch_source_t ds); void _dispatch_source_invoke(dispatch_source_t ds, dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); void _dispatch_source_wakeup(dispatch_source_t ds, dispatch_qos_t qos, dispatch_wakeup_flags_t flags); void _dispatch_source_merge_evt(dispatch_unote_t du, uint32_t flags, uintptr_t data, pthread_priority_t pp); -size_t _dispatch_source_debug(dispatch_source_t ds, char* buf, size_t bufsiz); +DISPATCH_COLD +size_t _dispatch_source_debug(dispatch_source_t ds, char *buf, size_t bufsiz); + +void _dispatch_channel_xref_dispose(dispatch_channel_t dch); +void _dispatch_channel_dispose(dispatch_channel_t dch, bool *allow_free); +void _dispatch_channel_invoke(dispatch_channel_t dch, + dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); +void _dispatch_channel_wakeup(dispatch_channel_t dch, dispatch_qos_t qos, + dispatch_wakeup_flags_t flags); +DISPATCH_COLD +size_t _dispatch_channel_debug(dispatch_channel_t dch, char *buf, size_t bufsiz); #endif /* __DISPATCH_SOURCE_INTERNAL__ */ diff --git a/src/swift/Block.swift b/src/swift/Block.swift index d4cae3c60..e90396bb1 100644 --- a/src/swift/Block.swift +++ b/src/swift/Block.swift @@ -5,12 +5,13 @@ // Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // -// See http://swift.org/LICENSE.txt for license information -// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// import CDispatch +import _SwiftDispatchOverlayShims public struct DispatchWorkItemFlags : OptionSet, RawRepresentable { public let rawValue: UInt @@ -18,35 +19,35 @@ public struct DispatchWorkItemFlags : OptionSet, RawRepresentable { public static let barrier = DispatchWorkItemFlags(rawValue: 0x1) - @available(OSX 10.10, iOS 8.0, *) + @available(macOS 10.10, iOS 8.0, *) public static let detached = DispatchWorkItemFlags(rawValue: 0x2) - @available(OSX 10.10, iOS 8.0, *) + @available(macOS 10.10, iOS 8.0, *) public static let assignCurrentContext = DispatchWorkItemFlags(rawValue: 0x4) - @available(OSX 10.10, iOS 8.0, *) + @available(macOS 10.10, iOS 8.0, *) public static let noQoS = DispatchWorkItemFlags(rawValue: 0x8) - @available(OSX 10.10, iOS 8.0, *) + @available(macOS 10.10, iOS 8.0, *) public static let inheritQoS = DispatchWorkItemFlags(rawValue: 0x10) - @available(OSX 10.10, iOS 8.0, *) + @available(macOS 10.10, iOS 8.0, *) public static let enforceQoS = DispatchWorkItemFlags(rawValue: 0x20) } -@available(OSX 10.10, iOS 8.0, *) +@available(macOS 10.10, iOS 8.0, *) public class DispatchWorkItem { internal var _block: _DispatchBlock public init(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], block: @escaping @convention(block) () -> ()) { - _block = dispatch_block_create_with_qos_class(dispatch_block_flags_t(flags.rawValue), + _block = dispatch_block_create_with_qos_class(dispatch_block_flags_t(UInt32(flags.rawValue)), qos.qosClass.rawValue.rawValue, Int32(qos.relativePriority), block) } // Used by DispatchQueue.synchronously to provide a path through // dispatch_block_t, as we know the lifetime of the block in question. internal init(flags: DispatchWorkItemFlags = [], noescapeBlock: () -> ()) { - _block = _swift_dispatch_block_create_noescape(dispatch_block_flags_t(flags.rawValue), noescapeBlock) + _block = _swift_dispatch_block_create_noescape(dispatch_block_flags_t(UInt32(flags.rawValue)), noescapeBlock) } public func perform() { @@ -98,6 +99,3 @@ public class DispatchWorkItem { /// on the referential identity of a block. Particularly, dispatch_block_create. internal typealias _DispatchBlock = @convention(block) () -> Void internal typealias dispatch_block_t = @convention(block) () -> Void - -@_silgen_name("_swift_dispatch_block_create_noescape") -internal func _swift_dispatch_block_create_noescape(_ flags: dispatch_block_flags_t, _ block: () -> ()) -> _DispatchBlock diff --git a/src/swift/Data.swift b/src/swift/Data.swift index 5ad48aa79..3b81e68b3 100644 --- a/src/swift/Data.swift +++ b/src/swift/Data.swift @@ -5,17 +5,18 @@ // Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // -// See http://swift.org/LICENSE.txt for license information -// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// import CDispatch +import _SwiftDispatchOverlayShims public struct DispatchData : RandomAccessCollection { public typealias Iterator = DispatchDataIterator public typealias Index = Int - public typealias Indices = DefaultRandomAccessIndices + public typealias Indices = DefaultIndices public static let empty: DispatchData = DispatchData(data: _swift_dispatch_data_empty()) @@ -37,8 +38,8 @@ public struct DispatchData : RandomAccessCollection { fileprivate var _deallocator: (DispatchQueue?, @convention(block) () -> Void) { switch self { - case .free: return (nil, _dispatch_data_destructor_free()) - case .unmap: return (nil, _dispatch_data_destructor_munmap()) + case .free: return (nil, _swift_dispatch_data_destructor_free()) + case .unmap: return (nil, _swift_dispatch_data_destructor_munmap()) case .custom(let q, let b): return (q, b) } } @@ -53,7 +54,7 @@ public struct DispatchData : RandomAccessCollection { public init(bytes buffer: UnsafeBufferPointer) { let d = buffer.baseAddress == nil ? _swift_dispatch_data_empty() : dispatch_data_create(buffer.baseAddress!, buffer.count, nil, - _dispatch_data_destructor_default()) + _swift_dispatch_data_destructor_default()) self.init(data: d) } @@ -64,7 +65,7 @@ public struct DispatchData : RandomAccessCollection { public init(bytes buffer: UnsafeRawBufferPointer) { let d = buffer.baseAddress == nil ? _swift_dispatch_data_empty() : dispatch_data_create(buffer.baseAddress!, buffer.count, nil, - _dispatch_data_destructor_default()) + _swift_dispatch_data_destructor_default()) self.init(data: d) } @@ -116,8 +117,22 @@ public struct DispatchData : RandomAccessCollection { return try body(contentPtr) } + @available(swift 4.2) + public func enumerateBytes( + _ block: (_ buffer: UnsafeBufferPointer, _ byteIndex: Int, _ stop: inout Bool) -> Void) + { + enumerateBytesCommon(block) + } + + @available(swift, obsoleted: 4.2, renamed: "enumerateBytes(_:)") public func enumerateBytes( block: (_ buffer: UnsafeBufferPointer, _ byteIndex: Int, _ stop: inout Bool) -> Void) + { + enumerateBytesCommon(block) + } + + private func enumerateBytesCommon( + _ block: (_ buffer: UnsafeBufferPointer, _ byteIndex: Int, _ stop: inout Bool) -> Void) { // we know that capturing block in the closure being created/passed to dispatch_data_apply // does not cause block to escape because dispatch_data_apply does not allow its @@ -140,7 +155,7 @@ public struct DispatchData : RandomAccessCollection { /// - parameter count: The number of bytes to copy. @available(swift, deprecated: 4, message: "Use append(_: UnsafeRawBufferPointer) instead") public mutating func append(_ bytes: UnsafePointer, count: Int) { - let data = dispatch_data_create(bytes, count, nil, _dispatch_data_destructor_default()) + let data = dispatch_data_create(bytes, count, nil, _swift_dispatch_data_destructor_default()) self.append(DispatchData(data: data)) } @@ -151,7 +166,7 @@ public struct DispatchData : RandomAccessCollection { public mutating func append(_ bytes: UnsafeRawBufferPointer) { // Nil base address does nothing. guard bytes.baseAddress != nil else { return } - let data = dispatch_data_create(bytes.baseAddress!, bytes.count, nil, _dispatch_data_destructor_default()) + let data = dispatch_data_create(bytes.baseAddress!, bytes.count, nil, _swift_dispatch_data_destructor_default()) self.append(DispatchData(data: data)) } @@ -167,13 +182,10 @@ public struct DispatchData : RandomAccessCollection { /// /// - parameter buffer: The buffer of bytes to append. The size is calculated from `SourceType` and `buffer.count`. public mutating func append(_ buffer : UnsafeBufferPointer) { - let count = buffer.count * MemoryLayout.stride; - buffer.baseAddress?.withMemoryRebound(to: UInt8.self, capacity: count) { - self.append($0, count: count) - } + self.append(UnsafeRawBufferPointer(buffer)) } - private func _copyBytesHelper(to pointer: UnsafeMutableRawPointer, from range: CountableRange) { + private func _copyBytesHelper(to pointer: UnsafeMutableRawPointer, from range: Range) { var copiedCount = 0 if range.isEmpty { return } let rangeSize = range.count @@ -214,8 +226,8 @@ public struct DispatchData : RandomAccessCollection { /// - parameter pointer: A pointer to the buffer you wish to copy the bytes into. /// - parameter range: The range in the `Data` to copy. /// - warning: This method does not verify that the contents at pointer have enough space to hold the required number of bytes. - @available(swift, deprecated: 4, message: "Use copyBytes(to: UnsafeMutableRawBufferPointer, from: CountableRange) instead") - public func copyBytes(to pointer: UnsafeMutablePointer, from range: CountableRange) { + @available(swift, deprecated: 4, message: "Use copyBytes(to: UnsafeMutableRawBufferPointer, from: Range) instead") + public func copyBytes(to pointer: UnsafeMutablePointer, from range: Range) { _copyBytesHelper(to: pointer, from: range) } @@ -224,7 +236,7 @@ public struct DispatchData : RandomAccessCollection { /// - parameter pointer: A pointer to the buffer you wish to copy the bytes into. The buffer must be large /// enough to hold `count` bytes. /// - parameter range: The range in the `Data` to copy. - public func copyBytes(to pointer: UnsafeMutableRawBufferPointer, from range: CountableRange) { + public func copyBytes(to pointer: UnsafeMutableRawBufferPointer, from range: Range) { assert(range.count <= pointer.count, "Buffer too small to copy \(range.count) bytes") guard pointer.baseAddress != nil else { return } _copyBytesHelper(to: pointer.baseAddress!, from: range) @@ -237,11 +249,11 @@ public struct DispatchData : RandomAccessCollection { /// - parameter buffer: A buffer to copy the data into. /// - parameter range: A range in the data to copy into the buffer. If the range is empty, this function will return 0 without copying anything. If the range is nil, as much data as will fit into `buffer` is copied. /// - returns: Number of bytes copied into the destination buffer. - public func copyBytes(to buffer: UnsafeMutableBufferPointer, from range: CountableRange? = nil) -> Int { + public func copyBytes(to buffer: UnsafeMutableBufferPointer, from range: Range? = nil) -> Int { let cnt = count guard cnt > 0 else { return 0 } - let copyRange : CountableRange + let copyRange : Range if let r = range { guard !r.isEmpty else { return 0 } precondition(r.startIndex >= 0) @@ -274,14 +286,14 @@ public struct DispatchData : RandomAccessCollection { return ptr!.load(fromByteOffset: index - offset, as: UInt8.self) } - public subscript(bounds: Range) -> RandomAccessSlice { - return RandomAccessSlice(base: self, bounds: bounds) + public subscript(bounds: Range) -> Slice { + return Slice(base: self, bounds: bounds) } /// Return a new copy of the data in a specified range. /// /// - parameter range: The range to copy. - public func subdata(in range: CountableRange) -> DispatchData { + public func subdata(in range: Range) -> DispatchData { let subrange = CDispatch.dispatch_data_create_subrange( __wrapped.__wrapped, range.startIndex, range.endIndex - range.startIndex) return DispatchData(data: subrange) @@ -334,7 +346,7 @@ public struct DispatchDataIterator : IteratorProtocol, Sequence { /// Advance to the next element and return it, or `nil` if no next /// element exists. - public mutating func next() -> DispatchData._Element? { + public mutating func next() -> DispatchData.Element? { if _position == _count { return nil } let element = _ptr.load(fromByteOffset: _position, as: UInt8.self) _position = _position + 1 @@ -346,15 +358,3 @@ public struct DispatchDataIterator : IteratorProtocol, Sequence { internal var _count: Int internal var _position: DispatchData.Index } - -@_silgen_name("_swift_dispatch_data_empty") -internal func _swift_dispatch_data_empty() -> dispatch_data_t - -@_silgen_name("_swift_dispatch_data_destructor_free") -internal func _dispatch_data_destructor_free() -> _DispatchBlock - -@_silgen_name("_swift_dispatch_data_destructor_munmap") -internal func _dispatch_data_destructor_munmap() -> _DispatchBlock - -@_silgen_name("_swift_dispatch_data_destructor_default") -internal func _dispatch_data_destructor_default() -> _DispatchBlock diff --git a/src/swift/Dispatch.swift b/src/swift/Dispatch.swift index ec73acbb7..0fd138d6a 100644 --- a/src/swift/Dispatch.swift +++ b/src/swift/Dispatch.swift @@ -5,8 +5,8 @@ // Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // -// See http://swift.org/LICENSE.txt for license information -// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// @@ -16,14 +16,14 @@ import CDispatch /// dispatch_assert -@available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) +@available(macOS 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) public enum DispatchPredicate { case onQueue(DispatchQueue) case onQueueAsBarrier(DispatchQueue) case notOnQueue(DispatchQueue) } -@available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) +@available(macOS 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) public func _dispatchPreconditionTest(_ condition: DispatchPredicate) -> Bool { switch condition { case .onQueue(let q): @@ -37,7 +37,7 @@ public func _dispatchPreconditionTest(_ condition: DispatchPredicate) -> Bool { } @_transparent -@available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) +@available(macOS 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) public func dispatchPrecondition(condition: @autoclosure () -> DispatchPredicate) { // precondition is able to determine release-vs-debug asserts where the overlay // cannot, so formulating this into a call that we can call with precondition() @@ -50,44 +50,44 @@ public struct DispatchQoS : Equatable { public let qosClass: QoSClass public let relativePriority: Int - @available(OSX 10.10, iOS 8.0, *) + @available(macOS 10.10, iOS 8.0, *) public static let background = DispatchQoS(qosClass: .background, relativePriority: 0) - @available(OSX 10.10, iOS 8.0, *) + @available(macOS 10.10, iOS 8.0, *) public static let utility = DispatchQoS(qosClass: .utility, relativePriority: 0) - @available(OSX 10.10, iOS 8.0, *) + @available(macOS 10.10, iOS 8.0, *) public static let `default` = DispatchQoS(qosClass: .default, relativePriority: 0) - @available(OSX 10.10, iOS 8.0, *) + @available(macOS 10.10, iOS 8.0, *) public static let userInitiated = DispatchQoS(qosClass: .userInitiated, relativePriority: 0) - @available(OSX 10.10, iOS 8.0, *) + @available(macOS 10.10, iOS 8.0, *) public static let userInteractive = DispatchQoS(qosClass: .userInteractive, relativePriority: 0) public static let unspecified = DispatchQoS(qosClass: .unspecified, relativePriority: 0) public enum QoSClass { - @available(OSX 10.10, iOS 8.0, *) + @available(macOS 10.10, iOS 8.0, *) case background - @available(OSX 10.10, iOS 8.0, *) + @available(macOS 10.10, iOS 8.0, *) case utility - @available(OSX 10.10, iOS 8.0, *) + @available(macOS 10.10, iOS 8.0, *) case `default` - @available(OSX 10.10, iOS 8.0, *) + @available(macOS 10.10, iOS 8.0, *) case userInitiated - @available(OSX 10.10, iOS 8.0, *) + @available(macOS 10.10, iOS 8.0, *) case userInteractive case unspecified // _OSQoSClass is internal on Linux, so this initialiser has to // remain as an internal init. - @available(OSX 10.10, iOS 8.0, *) + @available(macOS 10.10, iOS 8.0, *) internal init?(rawValue: _OSQoSClass) { switch rawValue { case .QOS_CLASS_BACKGROUND: self = .background @@ -100,7 +100,7 @@ public struct DispatchQoS : Equatable { } } - @available(OSX 10.10, iOS 8.0, *) + @available(macOS 10.10, iOS 8.0, *) internal var rawValue: _OSQoSClass { switch self { case .background: return .QOS_CLASS_BACKGROUND @@ -133,9 +133,9 @@ public enum DispatchTimeoutResult { /// dispatch_group -public extension DispatchGroup { +extension DispatchGroup { public func notify(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], queue: DispatchQueue, execute work: @escaping @convention(block) () -> ()) { - if #available(OSX 10.10, iOS 8.0, *), qos != .unspecified || !flags.isEmpty { + if #available(macOS 10.10, iOS 8.0, *), qos != .unspecified || !flags.isEmpty { let item = DispatchWorkItem(qos: qos, flags: flags, block: work) dispatch_group_notify(self.__wrapped, queue.__wrapped, item._block) } else { @@ -143,7 +143,7 @@ public extension DispatchGroup { } } - @available(OSX 10.10, iOS 8.0, *) + @available(macOS 10.10, iOS 8.0, *) public func notify(queue: DispatchQueue, work: DispatchWorkItem) { dispatch_group_notify(self.__wrapped, queue.__wrapped, work._block) } @@ -163,10 +163,10 @@ public extension DispatchGroup { /// dispatch_semaphore -public extension DispatchSemaphore { +extension DispatchSemaphore { @discardableResult public func signal() -> Int { - return dispatch_semaphore_signal(self.__wrapped) + return Int(dispatch_semaphore_signal(self.__wrapped)) } public func wait() { diff --git a/src/swift/DispatchStubs.cc b/src/swift/DispatchStubs.cc index 9c667d570..aef5505bd 100644 --- a/src/swift/DispatchStubs.cc +++ b/src/swift/DispatchStubs.cc @@ -5,8 +5,8 @@ // Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // -// See http://swift.org/LICENSE.txt for license information -// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// @@ -53,176 +53,10 @@ static void _dispatch_overlay_constructor() { #endif /* USE_OBJC */ - -// Replicate the SWIFT_CC(swift) calling convention macro from -// swift/include/swift/Runtime/Config.h because it is -// quite awkward to include Config.h and its recursive includes -// in dispatch. This define must be manually kept in synch -#define SWIFT_CC(CC) SWIFT_CC_##CC -#if SWIFT_USE_SWIFTCALL -#define SWIFT_CC_swift __attribute__((swiftcall)) -#else -#define SWIFT_CC_swift -#endif - -extern "C" dispatch_queue_attr_t _swift_dispatch_queue_concurrent(void); -extern "C" void _swift_dispatch_apply_current(size_t iterations, __attribute__((__noescape__)) void (^block)(size_t)); -extern "C" dispatch_queue_t _swift_dispatch_get_main_queue(void); -extern "C" dispatch_data_t _swift_dispatch_data_empty(void); -extern "C" dispatch_block_t _swift_dispatch_data_destructor_default(void); -extern "C" dispatch_block_t _swift_dispatch_data_destructor_free(void); -extern "C" dispatch_block_t _swift_dispatch_data_destructor_munmap(void); -extern "C" dispatch_block_t _swift_dispatch_block_create_with_qos_class(dispatch_block_flags_t flags, dispatch_qos_class_t qos, int relative_priority, dispatch_block_t block); -extern "C" dispatch_block_t _swift_dispatch_block_create_noescape(dispatch_block_flags_t flags, dispatch_block_t block); -extern "C" void _swift_dispatch_block_cancel(dispatch_block_t block); -extern "C" long _swift_dispatch_block_wait(dispatch_block_t block, dispatch_time_t timeout); -extern "C" void _swift_dispatch_block_notify(dispatch_block_t block, dispatch_queue_t queue, dispatch_block_t notification_block); -extern "C" long _swift_dispatch_block_testcancel(dispatch_block_t block); -extern "C" void _swift_dispatch_async(dispatch_queue_t queue, dispatch_block_t block); -extern "C" void _swift_dispatch_group_async(dispatch_group_t group, dispatch_queue_t queue, dispatch_block_t block); -extern "C" void _swift_dispatch_sync(dispatch_queue_t queue, dispatch_block_t block); -extern "C" void _swift_dispatch_release(dispatch_object_t obj); -extern "C" void _swift_dispatch_retain(dispatch_object_t obj); #if !USE_OBJC extern "C" void * objc_retainAutoreleasedReturnValue(void *obj); #endif - -SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE -extern "C" dispatch_queue_attr_t -_swift_dispatch_queue_concurrent(void) { - return DISPATCH_QUEUE_CONCURRENT; -} - -SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE -extern "C" void -_swift_dispatch_apply_current(size_t iterations, __attribute__((__noescape__)) void (^block)(size_t)) { - dispatch_apply(iterations, (dispatch_queue_t _Nonnull)0, block); -} - -SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE -extern "C" dispatch_queue_t -_swift_dispatch_get_main_queue(void) { - return dispatch_get_main_queue(); -} - -SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE -extern "C" dispatch_data_t -_swift_dispatch_data_empty(void) { - return dispatch_data_empty; -} - -SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE -extern "C" dispatch_block_t -_swift_dispatch_data_destructor_default(void) { - return DISPATCH_DATA_DESTRUCTOR_DEFAULT; -} - -SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE -extern "C" dispatch_block_t -_swift_dispatch_data_destructor_free(void) { - return _dispatch_data_destructor_free; -} - -SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE -extern "C" dispatch_block_t -_swift_dispatch_data_destructor_munmap(void) { - return _dispatch_data_destructor_munmap; -} - -SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE -extern "C" dispatch_block_t -_swift_dispatch_block_create_with_qos_class(dispatch_block_flags_t flags, dispatch_qos_class_t qos, int relative_priority, dispatch_block_t block) { - return dispatch_block_create_with_qos_class(flags, qos, relative_priority, block); -} - -SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE -extern "C" dispatch_block_t -_swift_dispatch_block_create_noescape(dispatch_block_flags_t flags, dispatch_block_t block) { - return dispatch_block_create(flags, block); -} - -SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE -extern "C" void -_swift_dispatch_block_cancel(dispatch_block_t block) { - dispatch_block_cancel(block); -} - -SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE -extern "C" long -_swift_dispatch_block_wait(dispatch_block_t block, dispatch_time_t timeout) { - return dispatch_block_wait(block, timeout); -} - -SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE -extern "C" void -_swift_dispatch_block_notify(dispatch_block_t block, dispatch_queue_t queue, dispatch_block_t notification_block) { - dispatch_block_notify(block, queue, notification_block); -} - -SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE -extern "C" long -_swift_dispatch_block_testcancel(dispatch_block_t block) { - return dispatch_block_testcancel(block); -} - -SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE -extern "C" void -_swift_dispatch_async(dispatch_queue_t queue, dispatch_block_t block) { - dispatch_async(queue, block); -} - -SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE -extern "C" void -_swift_dispatch_group_async(dispatch_group_t group, dispatch_queue_t queue, dispatch_block_t block) { - dispatch_group_async(group, queue, block); -} - -SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE -extern "C" void -_swift_dispatch_sync(dispatch_queue_t queue, dispatch_block_t block) { - dispatch_sync(queue, block); -} - -SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE -extern "C" void -_swift_dispatch_release(dispatch_object_t obj) { - dispatch_release(obj); -} - -SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE -extern "C" void -_swift_dispatch_retain(dispatch_object_t obj) { - dispatch_retain(obj); -} - -#define SOURCE(t) \ - extern "C" dispatch_source_type_t _swift_dispatch_source_type_##t(void); \ - SWIFT_CC(swift) \ - DISPATCH_RUNTIME_STDLIB_INTERFACE extern "C" dispatch_source_type_t \ - _swift_dispatch_source_type_##t(void) { \ - return DISPATCH_SOURCE_TYPE_##t; \ - } - -SOURCE(DATA_ADD) -SOURCE(DATA_OR) -SOURCE(DATA_REPLACE) -#if HAVE_MACH -SOURCE(MACH_SEND) -SOURCE(MACH_RECV) -SOURCE(MEMORYPRESSURE) -#endif -#ifndef __linux__ -SOURCE(PROC) -#endif -SOURCE(READ) -SOURCE(SIGNAL) -SOURCE(TIMER) -#ifndef __linux__ -SOURCE(VNODE) -#endif -SOURCE(WRITE) - #if !USE_OBJC // For CF functions with 'Get' semantics, the compiler currently assumes that @@ -235,7 +69,7 @@ SOURCE(WRITE) // platforms. extern "C" void swift_retain(void *); -SWIFT_CC(swift) DISPATCH_RUNTIME_STDLIB_INTERFACE +DISPATCH_RUNTIME_STDLIB_INTERFACE extern "C" void * objc_retainAutoreleasedReturnValue(void *obj) { if (obj) { swift_retain(obj); diff --git a/src/swift/IO.swift b/src/swift/IO.swift index d26f64160..7b0bb81a9 100644 --- a/src/swift/IO.swift +++ b/src/swift/IO.swift @@ -5,14 +5,14 @@ // Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // -// See http://swift.org/LICENSE.txt for license information -// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// import CDispatch -public extension DispatchIO { +extension DispatchIO { public enum StreamType : UInt { case stream = 0 @@ -35,14 +35,14 @@ public extension DispatchIO { } public class func read(fromFileDescriptor: Int32, maxLength: Int, runningHandlerOn queue: DispatchQueue, handler: @escaping (_ data: DispatchData, _ error: Int32) -> Void) { - dispatch_read(fromFileDescriptor, maxLength, queue.__wrapped) { (data: dispatch_data_t, error: Int32) in + dispatch_read(dispatch_fd_t(fromFileDescriptor), maxLength, queue.__wrapped) { (data: dispatch_data_t, error: Int32) in handler(DispatchData(borrowedData: data), error) } } public class func write(toFileDescriptor: Int32, data: DispatchData, runningHandlerOn queue: DispatchQueue, handler: @escaping (_ data: DispatchData?, _ error: Int32) -> Void) { - dispatch_write(toFileDescriptor, data.__wrapped.__wrapped, queue.__wrapped) { (data: dispatch_data_t?, error: Int32) in - handler(data.flatMap { DispatchData(borrowedData: $0) }, error) + dispatch_write(dispatch_fd_t(toFileDescriptor), data.__wrapped.__wrapped, queue.__wrapped) { (data: dispatch_data_t?, error: Int32) in + handler(data.map { DispatchData(borrowedData: $0) }, error) } } @@ -90,21 +90,21 @@ public extension DispatchIO { public func read(offset: off_t, length: Int, queue: DispatchQueue, ioHandler: @escaping (_ done: Bool, _ data: DispatchData?, _ error: Int32) -> Void) { dispatch_io_read(self.__wrapped, offset, length, queue.__wrapped) { (done: Bool, data: dispatch_data_t?, error: Int32) in - ioHandler(done, data.flatMap { DispatchData(borrowedData: $0) }, error) + ioHandler(done, data.map { DispatchData(borrowedData: $0) }, error) } } public func write(offset: off_t, data: DispatchData, queue: DispatchQueue, ioHandler: @escaping (_ done: Bool, _ data: DispatchData?, _ error: Int32) -> Void) { dispatch_io_write(self.__wrapped, offset, data.__wrapped.__wrapped, queue.__wrapped) { (done: Bool, data: dispatch_data_t?, error: Int32) in - ioHandler(done, data.flatMap { DispatchData(borrowedData: $0) }, error) + ioHandler(done, data.map { DispatchData(borrowedData: $0) }, error) } } public func setInterval(interval: DispatchTimeInterval, flags: IntervalFlags = []) { - dispatch_io_set_interval(self.__wrapped, UInt64(interval.rawValue), flags.rawValue) + dispatch_io_set_interval(self.__wrapped, UInt64(interval.rawValue), dispatch_io_interval_flags_t(flags.rawValue)) } public func close(flags: CloseFlags = []) { - dispatch_io_close(self.__wrapped, flags.rawValue) + dispatch_io_close(self.__wrapped, dispatch_io_close_flags_t(flags.rawValue)) } } diff --git a/src/swift/Private.swift b/src/swift/Private.swift index df6a7b336..89b1bb2f4 100644 --- a/src/swift/Private.swift +++ b/src/swift/Private.swift @@ -5,8 +5,8 @@ // Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // -// See http://swift.org/LICENSE.txt for license information -// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// diff --git a/src/swift/Queue.swift b/src/swift/Queue.swift index bff1bc323..377e27fdd 100644 --- a/src/swift/Queue.swift +++ b/src/swift/Queue.swift @@ -5,14 +5,15 @@ // Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // -// See http://swift.org/LICENSE.txt for license information -// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// // dispatch/queue.h import CDispatch +import _SwiftDispatchOverlayShims public final class DispatchSpecificKey { public init() {} @@ -23,14 +24,14 @@ internal class _DispatchSpecificValue { internal init(value: T) { self.value = value } } -public extension DispatchQueue { +extension DispatchQueue { public struct Attributes : OptionSet { public let rawValue: UInt64 public init(rawValue: UInt64) { self.rawValue = rawValue } public static let concurrent = Attributes(rawValue: 1<<1) - @available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) + @available(macOS 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) public static let initiallyInactive = Attributes(rawValue: 1<<2) fileprivate func _attr() -> dispatch_queue_attr_t? { @@ -39,7 +40,7 @@ public extension DispatchQueue { if self.contains(.concurrent) { attr = _swift_dispatch_queue_concurrent() } - if #available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) { + if #available(macOS 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) { if self.contains(.initiallyInactive) { attr = CDispatch.dispatch_queue_attr_make_initially_inactive(attr) } @@ -49,20 +50,28 @@ public extension DispatchQueue { } public enum GlobalQueuePriority { - @available(OSX, deprecated: 10.10, message: "Use qos attributes instead") - @available(*, deprecated: 8.0, message: "Use qos attributes instead") + @available(macOS, deprecated: 10.10, message: "Use qos attributes instead") + @available(iOS, deprecated: 8.0, message: "Use qos attributes instead") + @available(tvOS, deprecated, message: "Use qos attributes instead") + @available(watchOS, deprecated, message: "Use qos attributes instead") case high - @available(OSX, deprecated: 10.10, message: "Use qos attributes instead") - @available(*, deprecated: 8.0, message: "Use qos attributes instead") + @available(macOS, deprecated: 10.10, message: "Use qos attributes instead") + @available(iOS, deprecated: 8.0, message: "Use qos attributes instead") + @available(tvOS, deprecated, message: "Use qos attributes instead") + @available(watchOS, deprecated, message: "Use qos attributes instead") case `default` - @available(OSX, deprecated: 10.10, message: "Use qos attributes instead") - @available(*, deprecated: 8.0, message: "Use qos attributes instead") + @available(macOS, deprecated: 10.10, message: "Use qos attributes instead") + @available(iOS, deprecated: 8.0, message: "Use qos attributes instead") + @available(tvOS, deprecated, message: "Use qos attributes instead") + @available(watchOS, deprecated, message: "Use qos attributes instead") case low - @available(OSX, deprecated: 10.10, message: "Use qos attributes instead") - @available(*, deprecated: 8.0, message: "Use qos attributes instead") + @available(macOS, deprecated: 10.10, message: "Use qos attributes instead") + @available(iOS, deprecated: 8.0, message: "Use qos attributes instead") + @available(tvOS, deprecated, message: "Use qos attributes instead") + @available(watchOS, deprecated, message: "Use qos attributes instead") case background internal var _translatedValue: Int { @@ -78,14 +87,14 @@ public extension DispatchQueue { public enum AutoreleaseFrequency { case inherit - @available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) + @available(macOS 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) case workItem - @available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) + @available(macOS 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) case never internal func _attr(attr: dispatch_queue_attr_t?) -> dispatch_queue_attr_t? { - if #available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) { + if #available(macOS 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) { switch self { case .inherit: // DISPATCH_AUTORELEASE_FREQUENCY_INHERIT @@ -111,13 +120,15 @@ public extension DispatchQueue { return DispatchQueue(queue: _swift_dispatch_get_main_queue()) } - @available(OSX, deprecated: 10.10, message: "") - @available(*, deprecated: 8.0, message: "") + @available(macOS, deprecated: 10.10, message: "") + @available(iOS, deprecated: 8.0, message: "") + @available(tvOS, deprecated, message: "") + @available(watchOS, deprecated, message: "") public class func global(priority: GlobalQueuePriority) -> DispatchQueue { - return DispatchQueue(queue: CDispatch.dispatch_get_global_queue(priority._translatedValue, 0)) + return DispatchQueue(queue: CDispatch.dispatch_get_global_queue(Int(priority._translatedValue), 0)) } - @available(OSX 10.10, iOS 8.0, *) + @available(macOS 10.10, iOS 8.0, *) public class func global(qos: DispatchQoS.QoSClass = .default) -> DispatchQueue { return DispatchQueue(queue: CDispatch.dispatch_get_global_queue(Int(qos.rawValue.rawValue), 0)) } @@ -144,11 +155,11 @@ public extension DispatchQueue { if autoreleaseFrequency != .inherit { attr = autoreleaseFrequency._attr(attr: attr) } - if #available(OSX 10.10, iOS 8.0, *), qos != .unspecified { + if #available(macOS 10.10, iOS 8.0, *), qos != .unspecified { attr = CDispatch.dispatch_queue_attr_make_with_qos_class(attr, qos.qosClass.rawValue.rawValue, Int32(qos.relativePriority)) } - if #available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) { + if #available(macOS 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) { self.init(__label: label, attr: attr, queue: target) } else { self.init(__label: label, attr: attr) @@ -160,21 +171,85 @@ public extension DispatchQueue { return String(validatingUTF8: dispatch_queue_get_label(self.__wrapped))! } - @available(OSX 10.10, iOS 8.0, *) + /// + /// Submits a block for synchronous execution on this queue. + /// + /// Submits a work item to a dispatch queue like `async(execute:)`, however + /// `sync(execute:)` will not return until the work item has finished. + /// + /// Work items submitted to a queue with `sync(execute:)` do not observe certain + /// queue attributes of that queue when invoked (such as autorelease frequency + /// and QoS class). + /// + /// Calls to `sync(execute:)` targeting the current queue will result + /// in deadlock. Use of `sync(execute:)` is also subject to the same + /// multi-party deadlock problems that may result from the use of a mutex. + /// Use of `async(execute:)` is preferred. + /// + /// As an optimization, `sync(execute:)` invokes the work item on the thread which + /// submitted it, except when the queue is the main queue or + /// a queue targetting it. + /// + /// - parameter execute: The work item to be invoked on the queue. + /// - SeeAlso: `async(execute:)` + /// + @available(macOS 10.10, iOS 8.0, *) public func sync(execute workItem: DispatchWorkItem) { CDispatch.dispatch_sync(self.__wrapped, workItem._block) } - @available(OSX 10.10, iOS 8.0, *) + /// + /// Submits a work item for asynchronous execution on a dispatch queue. + /// + /// `async(execute:)` is the fundamental mechanism for submitting + /// work items to a dispatch queue. + /// + /// Calls to `async(execute:)` always return immediately after the work item has + /// been submitted, and never wait for the work item to be invoked. + /// + /// The target queue determines whether the work item will be invoked serially or + /// concurrently with respect to other work items submitted to that same queue. + /// Serial queues are processed concurrently with respect to each other. + /// + /// - parameter execute: The work item to be invoked on the queue. + /// - SeeAlso: `sync(execute:)` + /// + @available(macOS 10.10, iOS 8.0, *) public func async(execute workItem: DispatchWorkItem) { CDispatch.dispatch_async(self.__wrapped, workItem._block) } - @available(OSX 10.10, iOS 8.0, *) + /// + /// Submits a work item to a dispatch queue and associates it with the given + /// dispatch group. The dispatch group may be used to wait for the completion + /// of the work items it references. + /// + /// - parameter group: the dispatch group to associate with the submitted block. + /// - parameter execute: The work item to be invoked on the queue. + /// - SeeAlso: `sync(execute:)` + /// + @available(macOS 10.10, iOS 8.0, *) public func async(group: DispatchGroup, execute workItem: DispatchWorkItem) { CDispatch.dispatch_group_async(group.__wrapped, self.__wrapped, workItem._block) } + /// + /// Submits a work item to a dispatch queue and optionally associates it with a + /// dispatch group. The dispatch group may be used to wait for the completion + /// of the work items it references. + /// + /// - parameter group: the dispatch group to associate with the submitted + /// work item. If this is `nil`, the work item is not associated with a group. + /// - parameter flags: flags that control the execution environment of the + /// - parameter qos: the QoS at which the work item should be executed. + /// Defaults to `DispatchQoS.unspecified`. + /// - parameter flags: flags that control the execution environment of the + /// work item. + /// - parameter execute: The work item to be invoked on the queue. + /// - SeeAlso: `sync(execute:)` + /// - SeeAlso: `DispatchQoS` + /// - SeeAlso: `DispatchWorkItemFlags` + /// public func async( group: DispatchGroup? = nil, qos: DispatchQoS = .unspecified, @@ -193,7 +268,7 @@ public extension DispatchQueue { } var block: @convention(block) () -> Void = work - if #available(OSX 10.10, iOS 8.0, *), (qos != .unspecified || !flags.isEmpty) { + if #available(macOS 10.10, iOS 8.0, *), (qos != .unspecified || !flags.isEmpty) { let workItem = DispatchWorkItem(qos: qos, flags: flags, block: work) block = workItem._block } @@ -232,7 +307,7 @@ public extension DispatchQueue { } } - @available(OSX 10.10, iOS 8.0, *) + @available(macOS 10.10, iOS 8.0, *) private func _syncHelper( fn: (DispatchWorkItem) -> (), flags: DispatchWorkItemFlags, @@ -256,27 +331,66 @@ public extension DispatchQueue { } } + /// + /// Submits a block for synchronous execution on this queue. + /// + /// Submits a work item to a dispatch queue like `sync(execute:)`, and returns + /// the value, of type `T`, returned by that work item. + /// + /// - parameter execute: The work item to be invoked on the queue. + /// - returns the value returned by the work item. + /// - SeeAlso: `sync(execute:)` + /// public func sync(execute work: () throws -> T) rethrows -> T { return try self._syncHelper(fn: sync, execute: work, rescue: { throw $0 }) } + /// + /// Submits a block for synchronous execution on this queue. + /// + /// Submits a work item to a dispatch queue like `sync(execute:)`, and returns + /// the value, of type `T`, returned by that work item. + /// + /// - parameter flags: flags that control the execution environment of the + /// - parameter execute: The work item to be invoked on the queue. + /// - returns the value returned by the work item. + /// - SeeAlso: `sync(execute:)` + /// - SeeAlso: `DispatchWorkItemFlags` + /// public func sync(flags: DispatchWorkItemFlags, execute work: () throws -> T) rethrows -> T { if flags == .barrier { return try self._syncHelper(fn: _syncBarrier, execute: work, rescue: { throw $0 }) - } else if #available(OSX 10.10, iOS 8.0, *), !flags.isEmpty { + } else if #available(macOS 10.10, iOS 8.0, *), !flags.isEmpty { return try self._syncHelper(fn: sync, flags: flags, execute: work, rescue: { throw $0 }) } else { return try self._syncHelper(fn: sync, execute: work, rescue: { throw $0 }) } } + /// + /// Submits a work item to a dispatch queue for asynchronous execution after + /// a specified time. + /// + /// - parameter: deadline the time after which the work item should be executed, + /// given as a `DispatchTime`. + /// - parameter qos: the QoS at which the work item should be executed. + /// Defaults to `DispatchQoS.unspecified`. + /// - parameter flags: flags that control the execution environment of the + /// work item. + /// - parameter execute: The work item to be invoked on the queue. + /// - SeeAlso: `async(execute:)` + /// - SeeAlso: `asyncAfter(deadline:execute:)` + /// - SeeAlso: `DispatchQoS` + /// - SeeAlso: `DispatchWorkItemFlags` + /// - SeeAlso: `DispatchTime` + /// public func asyncAfter( deadline: DispatchTime, qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], execute work: @escaping @convention(block) () -> Void) { - if #available(OSX 10.10, iOS 8.0, *), qos != .unspecified || !flags.isEmpty { + if #available(macOS 10.10, iOS 8.0, *), qos != .unspecified || !flags.isEmpty { let item = DispatchWorkItem(qos: qos, flags: flags, block: work) CDispatch.dispatch_after(deadline.rawValue, self.__wrapped, item._block) } else { @@ -284,13 +398,30 @@ public extension DispatchQueue { } } + /// + /// Submits a work item to a dispatch queue for asynchronous execution after + /// a specified time. + /// + /// - parameter: deadline the time after which the work item should be executed, + /// given as a `DispatchWallTime`. + /// - parameter qos: the QoS at which the work item should be executed. + /// Defaults to `DispatchQoS.unspecified`. + /// - parameter flags: flags that control the execution environment of the + /// work item. + /// - parameter execute: The work item to be invoked on the queue. + /// - SeeAlso: `async(execute:)` + /// - SeeAlso: `asyncAfter(wallDeadline:execute:)` + /// - SeeAlso: `DispatchQoS` + /// - SeeAlso: `DispatchWorkItemFlags` + /// - SeeAlso: `DispatchWallTime` + /// public func asyncAfter( wallDeadline: DispatchWallTime, qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], execute work: @escaping @convention(block) () -> Void) { - if #available(OSX 10.10, iOS 8.0, *), qos != .unspecified || !flags.isEmpty { + if #available(macOS 10.10, iOS 8.0, *), qos != .unspecified || !flags.isEmpty { let item = DispatchWorkItem(qos: qos, flags: flags, block: work) CDispatch.dispatch_after(wallDeadline.rawValue, self.__wrapped, item._block) } else { @@ -298,17 +429,37 @@ public extension DispatchQueue { } } - @available(OSX 10.10, iOS 8.0, *) + /// + /// Submits a work item to a dispatch queue for asynchronous execution after + /// a specified time. + /// + /// - parameter: deadline the time after which the work item should be executed, + /// given as a `DispatchTime`. + /// - parameter execute: The work item to be invoked on the queue. + /// - SeeAlso: `asyncAfter(deadline:qos:flags:execute:)` + /// - SeeAlso: `DispatchTime` + /// + @available(macOS 10.10, iOS 8.0, *) public func asyncAfter(deadline: DispatchTime, execute: DispatchWorkItem) { CDispatch.dispatch_after(deadline.rawValue, self.__wrapped, execute._block) } - @available(OSX 10.10, iOS 8.0, *) + /// + /// Submits a work item to a dispatch queue for asynchronous execution after + /// a specified time. + /// + /// - parameter: deadline the time after which the work item should be executed, + /// given as a `DispatchWallTime`. + /// - parameter execute: The work item to be invoked on the queue. + /// - SeeAlso: `asyncAfter(wallDeadline:qos:flags:execute:)` + /// - SeeAlso: `DispatchTime` + /// + @available(macOS 10.10, iOS 8.0, *) public func asyncAfter(wallDeadline: DispatchWallTime, execute: DispatchWorkItem) { CDispatch.dispatch_after(wallDeadline.rawValue, self.__wrapped, execute._block) } - @available(OSX 10.10, iOS 8.0, *) + @available(macOS 10.10, iOS 8.0, *) public var qos: DispatchQoS { var relPri: Int32 = 0 let cls = DispatchQoS.QoSClass(rawValue: _OSQoSClass(qosClass: dispatch_queue_get_qos_class(self.__wrapped, &relPri))!)! @@ -328,15 +479,12 @@ public extension DispatchQueue { public func setSpecific(key: DispatchSpecificKey, value: T?) { let k = Unmanaged.passUnretained(key).toOpaque() - let v = value.flatMap { _DispatchSpecificValue(value: $0) } - let p = v.flatMap { Unmanaged.passRetained($0).toOpaque() } + let v = value.map { _DispatchSpecificValue(value: $0) } + let p = v.map { Unmanaged.passRetained($0).toOpaque() } dispatch_queue_set_specific(self.__wrapped, k, p, _destructDispatchSpecificValue) } #if os(Android) - @_silgen_name("_dispatch_install_thread_detach_callback") - private static func _dispatch_install_thread_detach_callback(_ cb: @escaping @convention(c) () -> Void) - public static func setThreadDetachCallback(_ cb: @escaping @convention(c) () -> Void) { _dispatch_install_thread_detach_callback(cb) } @@ -348,12 +496,3 @@ private func _destructDispatchSpecificValue(ptr: UnsafeMutableRawPointer?) { Unmanaged.fromOpaque(p).release() } } - -@_silgen_name("_swift_dispatch_queue_concurrent") -internal func _swift_dispatch_queue_concurrent() -> dispatch_queue_attr_t - -@_silgen_name("_swift_dispatch_get_main_queue") -internal func _swift_dispatch_get_main_queue() -> dispatch_queue_t - -@_silgen_name("_swift_dispatch_apply_current") -internal func _swift_dispatch_apply_current(_ iterations: Int, _ block: @convention(block) (Int) -> Void) diff --git a/src/swift/Source.swift b/src/swift/Source.swift index 421a6e9bb..fa0b3624e 100644 --- a/src/swift/Source.swift +++ b/src/swift/Source.swift @@ -5,17 +5,18 @@ // Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // -// See http://swift.org/LICENSE.txt for license information -// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// import CDispatch +import _SwiftDispatchOverlayShims -public extension DispatchSourceProtocol { +extension DispatchSourceProtocol { public func setEventHandler(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], handler: DispatchSourceHandler?) { - if #available(OSX 10.10, iOS 8.0, *), let h = handler, qos != .unspecified || !flags.isEmpty { + if #available(macOS 10.10, iOS 8.0, *), let h = handler, qos != .unspecified || !flags.isEmpty { let item = DispatchWorkItem(qos: qos, flags: flags, block: h) CDispatch.dispatch_source_set_event_handler((self as! DispatchSource).__wrapped, item._block) } else { @@ -23,13 +24,13 @@ public extension DispatchSourceProtocol { } } - @available(OSX 10.10, iOS 8.0, *) + @available(macOS 10.10, iOS 8.0, *) public func setEventHandler(handler: DispatchWorkItem) { CDispatch.dispatch_source_set_event_handler((self as! DispatchSource).__wrapped, handler._block) } public func setCancelHandler(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], handler: DispatchSourceHandler?) { - if #available(OSX 10.10, iOS 8.0, *), let h = handler, qos != .unspecified || !flags.isEmpty { + if #available(macOS 10.10, iOS 8.0, *), let h = handler, qos != .unspecified || !flags.isEmpty { let item = DispatchWorkItem(qos: qos, flags: flags, block: h) CDispatch.dispatch_source_set_cancel_handler((self as! DispatchSource).__wrapped, item._block) } else { @@ -37,13 +38,13 @@ public extension DispatchSourceProtocol { } } - @available(OSX 10.10, iOS 8.0, *) + @available(macOS 10.10, iOS 8.0, *) public func setCancelHandler(handler: DispatchWorkItem) { CDispatch.dispatch_source_set_cancel_handler((self as! DispatchSource).__wrapped, handler._block) } public func setRegistrationHandler(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], handler: DispatchSourceHandler?) { - if #available(OSX 10.10, iOS 8.0, *), let h = handler, qos != .unspecified || !flags.isEmpty { + if #available(macOS 10.10, iOS 8.0, *), let h = handler, qos != .unspecified || !flags.isEmpty { let item = DispatchWorkItem(qos: qos, flags: flags, block: h) CDispatch.dispatch_source_set_registration_handler((self as! DispatchSource).__wrapped, item._block) } else { @@ -51,12 +52,12 @@ public extension DispatchSourceProtocol { } } - @available(OSX 10.10, iOS 8.0, *) + @available(macOS 10.10, iOS 8.0, *) public func setRegistrationHandler(handler: DispatchWorkItem) { CDispatch.dispatch_source_set_registration_handler((self as! DispatchSource).__wrapped, handler._block) } - @available(OSX 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) + @available(macOS 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) public func activate() { (self as! DispatchSource).activate() } @@ -90,7 +91,7 @@ public extension DispatchSourceProtocol { } } -public extension DispatchSource { +extension DispatchSource { #if HAVE_MACH public struct MachSendEvent : OptionSet, RawRepresentable { public let rawValue: UInt @@ -151,77 +152,77 @@ public extension DispatchSource { #if HAVE_MACH public class func makeMachSendSource(port: mach_port_t, eventMask: MachSendEvent, queue: DispatchQueue? = nil) -> DispatchSourceMachSend { - let source = dispatch_source_create(_swift_dispatch_source_type_mach_send(), UInt(port), eventMask.rawValue, queue?.__wrapped) + let source = dispatch_source_create(_swift_dispatch_source_type_MACH_SEND(), UInt(port), eventMask.rawValue, queue?.__wrapped) return DispatchSource(source: source) as DispatchSourceMachSend } #endif #if HAVE_MACH public class func makeMachReceiveSource(port: mach_port_t, queue: DispatchQueue? = nil) -> DispatchSourceMachReceive { - let source = dispatch_source_create(_swift_dispatch_source_type_mach_recv(), UInt(port), 0, queue?.__wrapped) + let source = dispatch_source_create(_swift_dispatch_source_type_MACH_RECV(), UInt(port), 0, queue?.__wrapped) return DispatchSource(source) as DispatchSourceMachReceive } #endif #if HAVE_MACH public class func makeMemoryPressureSource(eventMask: MemoryPressureEvent, queue: DispatchQueue? = nil) -> DispatchSourceMemoryPressure { - let source = dispatch_source_create(_swift_dispatch_source_type_memorypressure(), 0, eventMask.rawValue, queue.__wrapped) + let source = dispatch_source_create(_swift_dispatch_source_type_MEMORYPRESSURE(), 0, eventMask.rawValue, queue.__wrapped) return DispatchSourceMemoryPressure(source) } #endif #if !os(Linux) && !os(Android) public class func makeProcessSource(identifier: pid_t, eventMask: ProcessEvent, queue: DispatchQueue? = nil) -> DispatchSourceProcess { - let source = dispatch_source_create(_swift_dispatch_source_type_proc(), UInt(identifier), eventMask.rawValue, queue?.__wrapped) + let source = dispatch_source_create(_swift_dispatch_source_type_PROC(), UInt(identifier), eventMask.rawValue, queue?.__wrapped) return DispatchSource(source: source) as DispatchSourceProcess } #endif public class func makeReadSource(fileDescriptor: Int32, queue: DispatchQueue? = nil) -> DispatchSourceRead { - let source = dispatch_source_create(_swift_dispatch_source_type_read(), UInt(fileDescriptor), 0, queue?.__wrapped) + let source = dispatch_source_create(_swift_dispatch_source_type_READ(), UInt(fileDescriptor), 0, queue?.__wrapped) return DispatchSource(source: source) as DispatchSourceRead } public class func makeSignalSource(signal: Int32, queue: DispatchQueue? = nil) -> DispatchSourceSignal { - let source = dispatch_source_create(_swift_dispatch_source_type_signal(), UInt(signal), 0, queue?.__wrapped) + let source = dispatch_source_create(_swift_dispatch_source_type_SIGNAL(), UInt(signal), 0, queue?.__wrapped) return DispatchSource(source: source) as DispatchSourceSignal } public class func makeTimerSource(flags: TimerFlags = [], queue: DispatchQueue? = nil) -> DispatchSourceTimer { - let source = dispatch_source_create(_swift_dispatch_source_type_timer(), 0, flags.rawValue, queue?.__wrapped) + let source = dispatch_source_create(_swift_dispatch_source_type_TIMER(), 0, UInt(flags.rawValue), queue?.__wrapped) return DispatchSource(source: source) as DispatchSourceTimer } public class func makeUserDataAddSource(queue: DispatchQueue? = nil) -> DispatchSourceUserDataAdd { - let source = dispatch_source_create(_swift_dispatch_source_type_data_add(), 0, 0, queue?.__wrapped) + let source = dispatch_source_create(_swift_dispatch_source_type_DATA_ADD(), 0, 0, queue?.__wrapped) return DispatchSource(source: source) as DispatchSourceUserDataAdd } public class func makeUserDataOrSource(queue: DispatchQueue? = nil) -> DispatchSourceUserDataOr { - let source = dispatch_source_create(_swift_dispatch_source_type_data_or(), 0, 0, queue?.__wrapped) + let source = dispatch_source_create(_swift_dispatch_source_type_DATA_OR(), 0, 0, queue?.__wrapped) return DispatchSource(source: source) as DispatchSourceUserDataOr } public class func makeUserDataReplaceSource(queue: DispatchQueue? = nil) -> DispatchSourceUserDataReplace { - let source = dispatch_source_create(_swift_dispatch_source_type_data_replace(), 0, 0, queue?.__wrapped) + let source = dispatch_source_create(_swift_dispatch_source_type_DATA_REPLACE(), 0, 0, queue?.__wrapped) return DispatchSource(source: source) as DispatchSourceUserDataReplace } #if !os(Linux) && !os(Android) public class func makeFileSystemObjectSource(fileDescriptor: Int32, eventMask: FileSystemEvent, queue: DispatchQueue? = nil) -> DispatchSourceFileSystemObject { - let source = dispatch_source_create(_swift_dispatch_source_type_vnode(), UInt(fileDescriptor), eventMask.rawValue, queue?.__wrapped) + let source = dispatch_source_create(_swift_dispatch_source_type_VNODE(), UInt(fileDescriptor), eventMask.rawValue, queue?.__wrapped) return DispatchSource(source: source) as DispatchSourceFileSystemObject } #endif public class func makeWriteSource(fileDescriptor: Int32, queue: DispatchQueue? = nil) -> DispatchSourceWrite { - let source = dispatch_source_create(_swift_dispatch_source_type_write(), UInt(fileDescriptor), 0, queue?.__wrapped) + let source = dispatch_source_create(_swift_dispatch_source_type_WRITE(), UInt(fileDescriptor), 0, queue?.__wrapped) return DispatchSource(source: source) as DispatchSourceWrite } } #if HAVE_MACH -public extension DispatchSourceMachSend { +extension DispatchSourceMachSend { public var handle: mach_port_t { return mach_port_t(dispatch_source_get_handle(self as! DispatchSource)) } @@ -239,7 +240,7 @@ public extension DispatchSourceMachSend { #endif #if HAVE_MACH -public extension DispatchSourceMachReceive { +extension DispatchSourceMachReceive { public var handle: mach_port_t { return mach_port_t(dispatch_source_get_handle(self as! DispatchSource)) } @@ -247,7 +248,7 @@ public extension DispatchSourceMachReceive { #endif #if HAVE_MACH -public extension DispatchSourceMemoryPressure { +extension DispatchSourceMemoryPressure { public var data: DispatchSource.MemoryPressureEvent { let data = dispatch_source_get_data(self as! DispatchSource) return DispatchSource.MemoryPressureEvent(rawValue: data) @@ -261,24 +262,22 @@ public extension DispatchSourceMemoryPressure { #endif #if !os(Linux) && !os(Android) -public extension DispatchSourceProcess { +extension DispatchSourceProcess { public var handle: pid_t { return pid_t(dispatch_source_get_handle(self as! DispatchSource)) } public var data: DispatchSource.ProcessEvent { - let data = dispatch_source_get_data(self as! DispatchSource) - return DispatchSource.ProcessEvent(rawValue: data) + return DispatchSource.ProcessEvent(rawValue: (self as! DispatchSource).data) } public var mask: DispatchSource.ProcessEvent { - let mask = dispatch_source_get_mask(self as! DispatchSource) - return DispatchSource.ProcessEvent(rawValue: mask) + return DispatchSource.ProcessEvent(rawValue: (self as! DispatchSource).mask) } } #endif -public extension DispatchSourceTimer { +extension DispatchSourceTimer { /// /// Sets the deadline and leeway for a timer event that fires once. /// @@ -619,46 +618,46 @@ public extension DispatchSourceTimer { } #if !os(Linux) && !os(Android) -public extension DispatchSourceFileSystemObject { +extension DispatchSourceFileSystemObject { public var handle: Int32 { return Int32(dispatch_source_get_handle((self as! DispatchSource).__wrapped)) } public var data: DispatchSource.FileSystemEvent { let data = dispatch_source_get_data((self as! DispatchSource).__wrapped) - return DispatchSource.FileSystemEvent(rawValue: data) + return DispatchSource.FileSystemEvent(rawValue: UInt(data)) } public var mask: DispatchSource.FileSystemEvent { let data = dispatch_source_get_mask((self as! DispatchSource).__wrapped) - return DispatchSource.FileSystemEvent(rawValue: data) + return DispatchSource.FileSystemEvent(rawValue: UInt(data)) } } #endif -public extension DispatchSourceUserDataAdd { +extension DispatchSourceUserDataAdd { /// Merges data into a dispatch source of type `DISPATCH_SOURCE_TYPE_DATA_ADD` /// and submits its event handler block to its target queue. /// /// - parameter data: the value to add to the current pending data. A value of zero /// has no effect and will not result in the submission of the event handler block. public func add(data: UInt) { - dispatch_source_merge_data((self as! DispatchSource).__wrapped, data) + dispatch_source_merge_data((self as! DispatchSource).__wrapped, UInt(data)) } } -public extension DispatchSourceUserDataOr { +extension DispatchSourceUserDataOr { /// Merges data into a dispatch source of type `DISPATCH_SOURCE_TYPE_DATA_OR` and /// submits its event handler block to its target queue. /// /// - parameter data: The value to OR into the current pending data. A value of zero /// has no effect and will not result in the submission of the event handler block. public func or(data: UInt) { - dispatch_source_merge_data((self as! DispatchSource).__wrapped, data) + dispatch_source_merge_data((self as! DispatchSource).__wrapped, UInt(data)) } } -public extension DispatchSourceUserDataReplace { +extension DispatchSourceUserDataReplace { /// Merges data into a dispatch source of type `DISPATCH_SOURCE_TYPE_DATA_REPLACE` /// and submits its event handler block to its target queue. /// @@ -666,48 +665,6 @@ public extension DispatchSourceUserDataReplace { /// A value of zero will be stored but will not result in the submission of the event /// handler block. public func replace(data: UInt) { - dispatch_source_merge_data((self as! DispatchSource).__wrapped, data) + dispatch_source_merge_data((self as! DispatchSource).__wrapped, UInt(data)) } } - -@_silgen_name("_swift_dispatch_source_type_DATA_ADD") -internal func _swift_dispatch_source_type_data_add() -> dispatch_source_type_t - -@_silgen_name("_swift_dispatch_source_type_DATA_OR") -internal func _swift_dispatch_source_type_data_or() -> dispatch_source_type_t - -@_silgen_name("_swift_dispatch_source_type_DATA_REPLACE") -internal func _swift_dispatch_source_type_data_replace() -> dispatch_source_type_t - -#if HAVE_MACH -@_silgen_name("_swift_dispatch_source_type_MACH_SEND") -internal func _swift_dispatch_source_type_mach_send() -> dispatch_source_type_t - -@_silgen_name("_swift_dispatch_source_type_MACH_RECV") -internal func _swift_dispatch_source_type_mach_recv() -> dispatch_source_type_t - -@_silgen_name("_swift_dispatch_source_type_MEMORYPRESSURE") -internal func _swift_dispatch_source_type_memorypressure() -> dispatch_source_type_t -#endif - -#if !os(Linux) && !os(Android) -@_silgen_name("_swift_dispatch_source_type_PROC") -internal func _swift_dispatch_source_type_proc() -> dispatch_source_type_t -#endif - -@_silgen_name("_swift_dispatch_source_type_READ") -internal func _swift_dispatch_source_type_read() -> dispatch_source_type_t - -@_silgen_name("_swift_dispatch_source_type_SIGNAL") -internal func _swift_dispatch_source_type_signal() -> dispatch_source_type_t - -@_silgen_name("_swift_dispatch_source_type_TIMER") -internal func _swift_dispatch_source_type_timer() -> dispatch_source_type_t - -#if !os(Linux) && !os(Android) -@_silgen_name("_swift_dispatch_source_type_VNODE") -internal func _swift_dispatch_source_type_vnode() -> dispatch_source_type_t -#endif - -@_silgen_name("_swift_dispatch_source_type_WRITE") -internal func _swift_dispatch_source_type_write() -> dispatch_source_type_t diff --git a/src/swift/Time.swift b/src/swift/Time.swift index 538cd42fb..b30e1f1b8 100644 --- a/src/swift/Time.swift +++ b/src/swift/Time.swift @@ -5,8 +5,8 @@ // Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // -// See http://swift.org/LICENSE.txt for license information -// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// @@ -170,7 +170,6 @@ public enum DispatchTimeInterval { case milliseconds(Int) case microseconds(Int) case nanoseconds(Int) - @_downgrade_exhaustivity_check case never internal var rawValue: Int64 { diff --git a/src/swift/Wrapper.swift b/src/swift/Wrapper.swift index 5a551dfba..649043d95 100644 --- a/src/swift/Wrapper.swift +++ b/src/swift/Wrapper.swift @@ -5,12 +5,13 @@ // Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // -// See http://swift.org/LICENSE.txt for license information -// See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors +// See https://swift.org/LICENSE.txt for license information +// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// import CDispatch +import _SwiftDispatchOverlayShims // This file contains declarations that are provided by the // importer via Dispatch.apinote when the platform has Objective-C support @@ -75,7 +76,7 @@ public class DispatchSemaphore : DispatchObject { } public init(value: Int) { - __wrapped = dispatch_semaphore_create(value) + __wrapped = dispatch_semaphore_create(Int(value)) } deinit { @@ -92,17 +93,17 @@ public class DispatchIO : DispatchObject { internal init(__type: UInt, fd: Int32, queue: DispatchQueue, handler: @escaping (_ error: Int32) -> Void) { - __wrapped = dispatch_io_create(__type, fd, queue.__wrapped, handler) + __wrapped = dispatch_io_create(dispatch_io_type_t(__type), dispatch_fd_t(fd), queue.__wrapped, handler) } internal init(__type: UInt, path: UnsafePointer, oflag: Int32, mode: mode_t, queue: DispatchQueue, handler: @escaping (_ error: Int32) -> Void) { - __wrapped = dispatch_io_create_with_path(__type, path, oflag, mode, queue.__wrapped, handler) + __wrapped = dispatch_io_create_with_path(dispatch_io_type_t(__type), path, oflag, mode, queue.__wrapped, handler) } internal init(__type: UInt, io: DispatchIO, queue: DispatchQueue, handler: @escaping (_ error: Int32) -> Void) { - __wrapped = dispatch_io_create_with_io(__type, io.__wrapped, queue.__wrapped, handler) + __wrapped = dispatch_io_create_with_io(dispatch_io_type_t(__type), io.__wrapped, queue.__wrapped, handler) } deinit { @@ -114,7 +115,7 @@ public class DispatchIO : DispatchObject { } public var fileDescriptor: Int32 { - return dispatch_io_get_descriptor(__wrapped) + return Int32(dispatch_io_get_descriptor(__wrapped)) } public func setLimit(highWater: Int) { @@ -335,9 +336,3 @@ internal enum _OSQoSClass : UInt32 { } } } - -@_silgen_name("_swift_dispatch_release") -internal func _swift_dispatch_release(_ obj: dispatch_object_t) -> Void - -@_silgen_name("_swift_dispatch_retain") -internal func _swift_dispatch_retain(_ obj: dispatch_object_t) -> Void diff --git a/src/transform.c b/src/transform.c index 93d600355..39147fa7a 100644 --- a/src/transform.c +++ b/src/transform.c @@ -26,10 +26,25 @@ #include #define OSLittleEndian __LITTLE_ENDIAN #define OSBigEndian __BIG_ENDIAN +#elif defined(__FreeBSD__) +#include +#define OSLittleEndian _LITTLE_ENDIAN +#define OSBigEndian _BIG_ENDIAN +#elif defined(_WIN32) +#define OSLittleEndian 1234 +#define OSBigEndian 4321 +#endif + +#if defined(__linux__) || defined(__FreeBSD__) #define OSSwapLittleToHostInt16 le16toh #define OSSwapBigToHostInt16 be16toh #define OSSwapHostToLittleInt16 htole16 #define OSSwapHostToBigInt16 htobe16 +#elif defined(_WIN32) +#define OSSwapLittleToHostInt16 +#define OSSwapBigToHostInt16 ntohs +#define OSSwapHostToLittleInt16 +#define OSSwapHostToBigInt16 htons #endif #if defined(__LITTLE_ENDIAN__) @@ -669,7 +684,7 @@ _dispatch_transform_to_base32_with_table(dispatch_data_t data, const unsigned ch dest_size = howmany(total, 5); // // os_mul_overflow(dest_size, 8, &dest_size) - if (dest_size > SIZE_T_MAX / 8) { + if (dest_size > SIZE_MAX / 8) { return NULL; } dest_size *= 8; @@ -904,7 +919,7 @@ _dispatch_transform_to_base64(dispatch_data_t data) dest_size = howmany(total, 3); // // os_mul_overflow(dest_size, 4, &dest_size) - if (dest_size > SIZE_T_MAX / 4) { + if (dest_size > SIZE_MAX / 4) { return NULL; } dest_size *= 4; diff --git a/src/voucher.c b/src/voucher.c index 39a8cbacf..1fbc24717 100644 --- a/src/voucher.c +++ b/src/voucher.c @@ -20,14 +20,6 @@ #include "internal.h" -#if !defined(VOUCHER_EXPORT_PERSONA_SPI) -#if TARGET_OS_IPHONE -#define VOUCHER_EXPORT_PERSONA_SPI 1 -#else -#define VOUCHER_EXPORT_PERSONA_SPI 0 -#endif -#endif - #ifndef PERSONA_ID_NONE #define PERSONA_ID_NONE ((uid_t)-1) #endif @@ -157,7 +149,7 @@ voucher_release(voucher_t voucher) return _voucher_release(voucher); } -void +void DISPATCH_TSD_DTOR_CC _voucher_thread_cleanup(void *voucher) { // when a thread exits and has a voucher left, the kernel @@ -804,12 +796,9 @@ _voucher_dispose(voucher_t voucher) voucher->v_recipe_extra_size = 0; voucher->v_recipe_extra_offset = 0; #endif +#if !USE_OBJC return _os_object_dealloc((_os_object_t)voucher); -} - -static void -_voucher_activity_debug_channel_barrier_nop(void *ctxt DISPATCH_UNUSED) -{ +#endif // !USE_OBJC } void @@ -836,9 +825,6 @@ _voucher_activity_debug_channel_init(void) DISPATCH_TARGET_QUEUE_DEFAULT, NULL, handler); dm->dm_recv_refs->du_can_be_wlh = false; // 29906118 dispatch_mach_connect(dm, dbgp, MACH_PORT_NULL, NULL); - // will force the DISPATCH_MACH_CONNECTED event - dispatch_mach_send_barrier_f(dm, NULL, - _voucher_activity_debug_channel_barrier_nop); _voucher_activity_debug_channel = dm; } } @@ -857,7 +843,142 @@ _voucher_atfork_child(void) _firehose_task_buffer = NULL; // firehose buffer is VM_INHERIT_NONE } -#if VOUCHER_EXPORT_PERSONA_SPI +voucher_t +voucher_copy_with_persona_mach_voucher(mach_voucher_t persona_mach_voucher) +{ +#if !VOUCHER_USE_PERSONA + (void)persona_mach_voucher; + return voucher_copy(); +#else // !VOUCHER_USE_PERSONA + if (!persona_mach_voucher) return voucher_copy(); + kern_return_t kr; + mach_voucher_t okv = MACH_VOUCHER_NULL, kv; + voucher_t ov = _voucher_get(); + if (ov) { + okv = ov->v_ipc_kvoucher ? ov->v_ipc_kvoucher : ov->v_kvoucher; + } + const mach_voucher_attr_recipe_data_t bank_redeem_recipe[] = { + [0] = { + .key = MACH_VOUCHER_ATTR_KEY_ALL, + .command = MACH_VOUCHER_ATTR_COPY, + .previous_voucher = okv, + }, + [1] = { + .key = MACH_VOUCHER_ATTR_KEY_BANK, + .command = MACH_VOUCHER_ATTR_REDEEM, + .previous_voucher = persona_mach_voucher, + }, + }; + kr = _voucher_create_mach_voucher(bank_redeem_recipe, + sizeof(bank_redeem_recipe), &kv); + if (dispatch_assume_zero(kr)) { + if (kr == KERN_INVALID_CAPABILITY) { + // bank attribute redeem failed + return VOUCHER_INVALID; + } + kv = MACH_VOUCHER_NULL; + } + if (kv == okv) { + if (kv) _voucher_dealloc_mach_voucher(kv); + return _voucher_retain(ov); + } + voucher_t v = _voucher_find_and_retain(kv); + if (v && (!ov || ov->v_ipc_kvoucher)) { + _dispatch_voucher_debug("kvoucher[0x%08x] find with persona " + "from voucher[%p]", v, kv, ov); + _voucher_dealloc_mach_voucher(kv); + return v; + } + voucher_t kvbase = v; + voucher_fields_t ignore_fields = VOUCHER_FIELD_KVOUCHER; + v = _voucher_clone(ov, ignore_fields); + v->v_kvoucher = kv; + if (!ov || ov->v_ipc_kvoucher) { + v->v_ipc_kvoucher = kv; + _voucher_insert(v); + } else if (kvbase) { + v->v_kvbase = kvbase; + _voucher_dealloc_mach_voucher(kv); // borrow base reference + } + if (!kvbase) { + _dispatch_voucher_debug("kvoucher[0x%08x] create with persona " + "from voucher[%p]", v, kv, ov); + } + _voucher_trace(CREATE, v, v->v_kvoucher, v->v_activity); + return v; +#endif // VOUCHER_USE_PERSONA +} + +kern_return_t +mach_voucher_persona_self(mach_voucher_t *persona_mach_voucher) +{ + mach_voucher_t bkv = MACH_VOUCHER_NULL; + kern_return_t kr = KERN_NOT_SUPPORTED; +#if VOUCHER_USE_PERSONA + const mach_voucher_attr_recipe_data_t bank_send_recipe[] = { + [0] = { + .key = MACH_VOUCHER_ATTR_KEY_BANK, + .command = MACH_VOUCHER_ATTR_BANK_CREATE, + }, + [1] = { + .key = MACH_VOUCHER_ATTR_KEY_BANK, + .command = MACH_VOUCHER_ATTR_SEND_PREPROCESS, + }, + }; + kr = _voucher_create_mach_voucher(bank_send_recipe, + sizeof(bank_send_recipe), &bkv); + if (dispatch_assume_zero(kr)) { + bkv = MACH_VOUCHER_NULL; + } +#endif // VOUCHER_USE_PERSONA + *persona_mach_voucher = bkv; + return kr; +} + +kern_return_t +mach_voucher_persona_for_originator(uid_t persona_id, + mach_voucher_t originator_persona_mach_voucher, + uint64_t originator_unique_pid, mach_voucher_t *persona_mach_voucher) +{ + mach_voucher_t bkv = MACH_VOUCHER_NULL; + kern_return_t kr = KERN_NOT_SUPPORTED; +#if VOUCHER_USE_PERSONA + struct persona_modify_info modify_info = { + .persona_id = persona_id, + .unique_pid = originator_unique_pid, + }; + size_t bank_modify_recipe_size = _voucher_mach_recipe_size(0) + + _voucher_mach_recipe_size(sizeof(modify_info)); + mach_voucher_attr_recipe_t bank_modify_recipe = + (mach_voucher_attr_recipe_t)alloca(bank_modify_recipe_size); + + bzero((void *)bank_modify_recipe, bank_modify_recipe_size); + + bank_modify_recipe[0] = (mach_voucher_attr_recipe_data_t){ + .key = MACH_VOUCHER_ATTR_KEY_BANK, + .command = MACH_VOUCHER_ATTR_COPY, + .previous_voucher = originator_persona_mach_voucher, + }; + bank_modify_recipe[1] = (mach_voucher_attr_recipe_data_t){ + .key = MACH_VOUCHER_ATTR_KEY_BANK, + .command = MACH_VOUCHER_ATTR_BANK_MODIFY_PERSONA, + .content_size = sizeof(modify_info), + }; + _dispatch_memappend(bank_modify_recipe[1].content, &modify_info); + kr = _voucher_create_mach_voucher(bank_modify_recipe, + bank_modify_recipe_size, &bkv); + if (dispatch_assume_zero(kr)) { + bkv = MACH_VOUCHER_NULL; + } +#else // VOUCHER_USE_PERSONA + (void)persona_id; + (void)originator_persona_mach_voucher; + (void)originator_unique_pid; +#endif // VOUCHER_USE_PERSONA + *persona_mach_voucher = bkv; + return kr; +} + #if VOUCHER_USE_PERSONA static kern_return_t _voucher_get_current_persona_token(struct persona_token *token) @@ -886,7 +1007,35 @@ _voucher_get_current_persona_token(struct persona_token *token) } return kr; } -#endif + +static kern_return_t +_voucher_get_current_persona_id(uid_t *persona_id) +{ + kern_return_t kr = KERN_FAILURE; + voucher_t v = _voucher_get(); + + if (v && v->v_kvoucher) { + mach_voucher_t kv = v->v_ipc_kvoucher ?: v->v_kvoucher; + mach_voucher_attr_content_t kvc_in = NULL; + mach_voucher_attr_content_size_t kvc_in_size = 0; + mach_voucher_attr_content_t kvc_out = + (mach_voucher_attr_content_t)persona_id; + mach_voucher_attr_content_size_t kvc_out_size = sizeof(*persona_id); + + kr = mach_voucher_attr_command(kv, MACH_VOUCHER_ATTR_KEY_BANK, + BANK_PERSONA_ID, kvc_in, kvc_in_size, + kvc_out, &kvc_out_size); + if (kr != KERN_NOT_SUPPORTED + // Voucher doesn't have a persona id + && kr != KERN_INVALID_VALUE + // Kernel doesn't understand BANK_PERSONA_ID + && kr != KERN_INVALID_ARGUMENT) { + (void)dispatch_assume_zero(kr); + } + } + return kr; +} +#endif // VOUCHER_USE_PERSONA uid_t voucher_get_current_persona(void) @@ -894,11 +1043,10 @@ voucher_get_current_persona(void) uid_t persona_id = PERSONA_ID_NONE; #if VOUCHER_USE_PERSONA - struct persona_token token; int err; - if (_voucher_get_current_persona_token(&token) == KERN_SUCCESS) { - return token.originator.persona_id; + if (_voucher_get_current_persona_id(&persona_id) == KERN_SUCCESS) { + return persona_id; } // falling back to the process persona if there is no adopted voucher @@ -908,7 +1056,7 @@ voucher_get_current_persona(void) (void)dispatch_assume_zero(err); } } -#endif +#endif // VOUCHER_USE_PERSONA return persona_id; } @@ -921,9 +1069,9 @@ voucher_get_current_persona_originator_info(struct proc_persona_info *persona_in *persona_info = token.originator; return 0; } -#else +#else // VOUCHER_USE_PERSONA (void)persona_info; -#endif +#endif // VOUCHER_USE_PERSONA return -1; } @@ -936,12 +1084,11 @@ voucher_get_current_persona_proximate_info(struct proc_persona_info *persona_inf *persona_info = token.proximate; return 0; } -#else +#else // VOUCHER_USE_PERSONA (void)persona_info; -#endif +#endif // VOUCHER_USE_PERSONA return -1; } -#endif #pragma mark - #pragma mark _voucher_init @@ -1145,6 +1292,16 @@ voucher_activity_get_logging_preferences(size_t *length) return firehose_buffer_get_logging_prefs(_firehose_task_buffer, length); } +bool +voucher_activity_should_send_strings(void) +{ + if (unlikely(_voucher_activity_disabled())) { + return false; + } + + return firehose_buffer_should_send_strings(_firehose_task_buffer); +} + void * voucher_activity_get_metadata_buffer(size_t *length) { @@ -1643,7 +1800,7 @@ voucher_decrement_importance_count4CF(voucher_t v) (void)v; } -void +void DISPATCH_TSD_DTOR_CC _voucher_thread_cleanup(void *voucher) { (void)voucher; @@ -1715,7 +1872,30 @@ _voucher_dispose(voucher_t voucher) (void)voucher; } -#if VOUCHER_EXPORT_PERSONA_SPI +voucher_t +voucher_copy_with_persona_mach_voucher(mach_voucher_t persona_mach_voucher) +{ + (void)persona_mach_voucher; + return NULL; +} + +kern_return_t +mach_voucher_persona_self(mach_voucher_t *persona_mach_voucher) +{ + (void)persona_mach_voucher; + return KERN_NOT_SUPPORTED; +} + +kern_return_t +mach_voucher_persona_for_originator(uid_t persona_id, + mach_voucher_t originator_persona_mach_voucher, + uint64_t originator_unique_pid, mach_voucher_t *persona_mach_voucher) +{ + (void)persona_id; (void)originator_persona_mach_voucher; + (void)originator_unique_pid; (void)persona_mach_voucher; + return KERN_NOT_SUPPORTED; +} + uid_t voucher_get_current_persona(void) { @@ -1735,7 +1915,6 @@ voucher_get_current_persona_proximate_info(struct proc_persona_info *persona_inf (void)persona_info; return -1; } -#endif // VOUCHER_EXPORT_PERSONA_SPI void _voucher_activity_debug_channel_init(void) diff --git a/src/voucher_internal.h b/src/voucher_internal.h index d1a5e70e8..37d0935ac 100644 --- a/src/voucher_internal.h +++ b/src/voucher_internal.h @@ -97,7 +97,7 @@ void _voucher_activity_swap(firehose_activity_id_t old_id, void _voucher_xref_dispose(voucher_t voucher); void _voucher_dispose(voucher_t voucher); size_t _voucher_debug(voucher_t v, char* buf, size_t bufsiz); -void _voucher_thread_cleanup(void *voucher); +void DISPATCH_TSD_DTOR_CC _voucher_thread_cleanup(void *voucher); mach_voucher_t _voucher_get_mach_voucher(voucher_t voucher); voucher_t _voucher_create_without_importance(voucher_t voucher); voucher_t _voucher_create_accounting_voucher(voucher_t voucher); @@ -309,7 +309,7 @@ _voucher_release_inline(struct voucher_s *voucher) if (unlikely(xref_cnt < -1)) { _OS_OBJECT_CLIENT_CRASH("Voucher over-release"); } - return _os_object_xref_dispose((_os_object_t)voucher); + return _voucher_xref_dispose((voucher_t)voucher); } #if DISPATCH_PURE_C diff --git a/tools/firehose_trace.lua b/tools/firehose_trace.lua deleted file mode 100755 index 90b969dec..000000000 --- a/tools/firehose_trace.lua +++ /dev/null @@ -1,83 +0,0 @@ -#!/usr/local/bin/luatrace -s - -trace_codename = function(codename, callback) - local debugid = trace.debugid(codename) - if debugid ~= 0 then - trace.single(debugid,callback) - else - printf("WARNING: Cannot locate debugid for '%s'\n", codename) - end -end - -initial_timestamp = 0 -get_prefix = function(buf) - if initial_timestamp == 0 then - initial_timestamp = buf.timestamp - end - local secs = trace.convert_timestamp_to_nanoseconds(buf.timestamp - initial_timestamp) / 1000000000 - - local prefix - if trace.debugid_is_start(buf.debugid) then - prefix = "→" - elseif trace.debugid_is_end(buf.debugid) then - prefix = "â†" - else - prefix = "↔" - end - - local proc - proc = buf.command - - return string.format("%s %6.9f %-17s [%05d.%06x] %-35s", - prefix, secs, proc, buf.pid, buf.threadid, buf.debugname) -end - -decode_stream_state = function(state) - local reliable_waiters = "-" - if (state & 0x1) ~= 0 then - reliable_waiters = "R" - end - local unreliable_waiters = "-" - if (state & 0x2) ~= 0 then - unreliable_waiters = "U" - end - local allocator = state & 0x00000000fffffffc - local ref = (state & 0x000000ff00000000) >> 32 - local loss = (state & 0x00003f0000000000) >> 40 - local timestamped = "-" - if (state & 0x0000400000000000) ~= 0 then - timestamped = "T" - end - local waiting_for_logd = "-" - if (state & 0x0000800000000000) ~= 0 then - waiting_for_logd = "W" - end - local gen = (state & 0xffff000000000000) >> 48 - return string.format("[stream: alloc=0x%08x ref=%u loss=%u gen=%u %s%s%s%s]", - allocator, ref, loss, gen, reliable_waiters, unreliable_waiters, - timestamped, waiting_for_logd) -end - -trace_codename("DISPATCH_FIREHOSE_TRACE_reserver_gave_up", function(buf) - printf("%s %s -> %s\n", get_prefix(buf), decode_stream_state(buf[3]), - decode_stream_state(buf[4])) -end) - -trace_codename("DISPATCH_FIREHOSE_TRACE_reserver_wait", function(buf) - printf("%s %s -> %s\n", get_prefix(buf), decode_stream_state(buf[3]), - decode_stream_state(buf[4])) -end) - -trace_codename("DISPATCH_FIREHOSE_TRACE_allocator", function(buf) - printf("%s %s -> %s\n", get_prefix(buf), decode_stream_state(buf[3]), - decode_stream_state(buf[4])) -end) - -trace_codename("DISPATCH_FIREHOSE_TRACE_wait_for_logd", function(buf) - printf("%s %s\n", get_prefix(buf), decode_stream_state(buf[2])) -end) - -trace_codename("DISPATCH_FIREHOSE_TRACE_chunk_install", function(buf) - printf("%s %s -> %s, waited=%u\n", get_prefix(buf), decode_stream_state(buf[3]), - decode_stream_state(buf[4]), buf[2] >> 63) -end) diff --git a/xcodeconfig/libdispatch-dyld-stub.xcconfig b/xcodeconfig/libdispatch-dyld-stub.xcconfig index dd1814db9..763bafe1e 100644 --- a/xcodeconfig/libdispatch-dyld-stub.xcconfig +++ b/xcodeconfig/libdispatch-dyld-stub.xcconfig @@ -21,7 +21,7 @@ PRODUCT_NAME = libdispatch_dyld_stub INSTALL_PATH = /usr/local/lib/dyld_stub BUILD_VARIANTS = normal -GCC_PREPROCESSOR_DEFINITIONS = $(inherited) DISPATCH_VARIANT_DYLD_STUB=1 $(STATICLIB_PREPROCESSOR_DEFINITIONS) +GCC_PREPROCESSOR_DEFINITIONS = $(GCC_PREPROCESSOR_DEFINITIONS) DISPATCH_VARIANT_DYLD_STUB=1 $(STATICLIB_PREPROCESSOR_DEFINITIONS) OTHER_LDFLAGS = VERSIONING_SYSTEM = EXCLUDED_SOURCE_FILE_NAMES = * diff --git a/xcodeconfig/libdispatch-introspection.xcconfig b/xcodeconfig/libdispatch-introspection.xcconfig index c7826d5e6..a2f98f9ee 100644 --- a/xcodeconfig/libdispatch-introspection.xcconfig +++ b/xcodeconfig/libdispatch-introspection.xcconfig @@ -21,6 +21,6 @@ BUILD_VARIANTS = normal INSTALL_PATH = /usr/lib/system/introspection -GCC_PREPROCESSOR_DEFINITIONS = $(inherited) DISPATCH_INTROSPECTION=1 +GCC_PREPROCESSOR_DEFINITIONS = $(GCC_PREPROCESSOR_DEFINITIONS) DISPATCH_INTROSPECTION=1 CONFIGURATION_BUILD_DIR = $(BUILD_DIR)/introspection OTHER_LDFLAGS = $(OTHER_LDFLAGS) -Wl,-interposable_list,$(SRCROOT)/xcodeconfig/libdispatch.interposable diff --git a/xcodeconfig/libdispatch-mp-static.xcconfig b/xcodeconfig/libdispatch-mp-static.xcconfig index af3715f1e..22dc9c275 100644 --- a/xcodeconfig/libdispatch-mp-static.xcconfig +++ b/xcodeconfig/libdispatch-mp-static.xcconfig @@ -23,7 +23,7 @@ SUPPORTED_PLATFORMS = macosx iphoneos appletvos watchos PRODUCT_NAME = libdispatch INSTALL_PATH = /usr/local/lib/system BUILD_VARIANTS = normal debug -GCC_PREPROCESSOR_DEFINITIONS = $(inherited) $(STATICLIB_PREPROCESSOR_DEFINITIONS) +GCC_PREPROCESSOR_DEFINITIONS = $(GCC_PREPROCESSOR_DEFINITIONS) $(STATICLIB_PREPROCESSOR_DEFINITIONS) OTHER_LDFLAGS = SKIP_INSTALL[sdk=*simulator*] = YES EXCLUDED_SOURCE_FILE_NAMES[sdk=*simulator*] = * diff --git a/xcodeconfig/libdispatch.aliases b/xcodeconfig/libdispatch.aliases index d8a5113a2..24cbc6b2b 100644 --- a/xcodeconfig/libdispatch.aliases +++ b/xcodeconfig/libdispatch.aliases @@ -23,5 +23,9 @@ __dispatch_queue_attrs __dispatch_queue_attr_concurrent __dispatch_source_type_memorypressure __dispatch_source_type_memorystatus _dispatch_assert_queue$V2 _dispatch_assert_queue _dispatch_assert_queue_not$V2 _dispatch_assert_queue_not +_dispatch_async _dispatch_channel_async +_dispatch_async_f _dispatch_channel_async_f _dispatch_queue_create_with_target$V2 _dispatch_queue_create_with_target +_dispatch_source_cancel _dispatch_channel_cancel _dispatch_source_set_timer __dispatch_source_set_runloop_timer_4CF +_dispatch_source_testcancel _dispatch_channel_testcancel diff --git a/xcodeconfig/libdispatch.clean b/xcodeconfig/libdispatch.clean index 35476a711..25a5711a2 100644 --- a/xcodeconfig/libdispatch.clean +++ b/xcodeconfig/libdispatch.clean @@ -1,5 +1,5 @@ # -# Copyright (c) 2013 Apple Inc. All rights reserved. +# Copyright (c) 2018 Apple Inc. All rights reserved. # # @APPLE_APACHE_LICENSE_HEADER_START@ # @@ -18,6 +18,7 @@ # @APPLE_APACHE_LICENSE_HEADER_END@ # +__MergedGlobals __dispatch_bug.last_seen __dispatch_bug_deprecated.last_seen __dispatch_bug_kevent_client.last_seen @@ -44,3 +45,5 @@ _dispatch_benchmark_f.pred _dispatch_io_defaults _dispatch_log_disabled _dispatch_logfile + +__dyld_private diff --git a/xcodeconfig/libdispatch.dirty b/xcodeconfig/libdispatch.dirty index d8d1a0d6e..b10789292 100644 --- a/xcodeconfig/libdispatch.dirty +++ b/xcodeconfig/libdispatch.dirty @@ -48,6 +48,8 @@ _OBJC_CLASS_$_OS_dispatch_queue_attr __OS_dispatch_queue_attr_vtable _OBJC_CLASS_$_OS_dispatch_source __OS_dispatch_source_vtable +_OBJC_CLASS_$_OS_dispatch_channel +__OS_dispatch_channel_vtable _OBJC_CLASS_$_OS_dispatch_mach __OS_dispatch_mach_vtable _OBJC_CLASS_$_OS_dispatch_mach_msg @@ -80,6 +82,7 @@ _OBJC_METACLASS_$_OS_dispatch_queue_runloop _OBJC_METACLASS_$_OS_dispatch_queue_mgr _OBJC_METACLASS_$_OS_dispatch_queue_attr _OBJC_METACLASS_$_OS_dispatch_source +_OBJC_METACLASS_$_OS_dispatch_channel _OBJC_METACLASS_$_OS_dispatch_mach _OBJC_METACLASS_$_OS_dispatch_mach_msg _OBJC_METACLASS_$_OS_dispatch_io diff --git a/xcodeconfig/libdispatch.order b/xcodeconfig/libdispatch.order index b586837d5..8ea917e20 100644 --- a/xcodeconfig/libdispatch.order +++ b/xcodeconfig/libdispatch.order @@ -48,6 +48,8 @@ _OBJC_CLASS_$_OS_dispatch_queue_attr __OS_dispatch_queue_attr_vtable _OBJC_CLASS_$_OS_dispatch_source __OS_dispatch_source_vtable +_OBJC_CLASS_$_OS_dispatch_channel +__OS_dispatch_channel_vtable _OBJC_CLASS_$_OS_dispatch_mach __OS_dispatch_mach_vtable _OBJC_CLASS_$_OS_dispatch_mach_msg @@ -80,6 +82,7 @@ _OBJC_METACLASS_$_OS_dispatch_queue_runloop _OBJC_METACLASS_$_OS_dispatch_queue_mgr _OBJC_METACLASS_$_OS_dispatch_queue_attr _OBJC_METACLASS_$_OS_dispatch_source +_OBJC_METACLASS_$_OS_dispatch_channel _OBJC_METACLASS_$_OS_dispatch_mach _OBJC_METACLASS_$_OS_dispatch_mach_msg _OBJC_METACLASS_$_OS_dispatch_io diff --git a/xcodeconfig/libdispatch.xcconfig b/xcodeconfig/libdispatch.xcconfig index 2c825f7f6..d0b3c9f4d 100644 --- a/xcodeconfig/libdispatch.xcconfig +++ b/xcodeconfig/libdispatch.xcconfig @@ -25,17 +25,31 @@ SDKROOT = macosx.internal SUPPORTED_PLATFORMS = macosx iphoneos iphonesimulator appletvos appletvsimulator watchos watchsimulator PRODUCT_NAME = libdispatch EXECUTABLE_PREFIX = -INSTALL_PATH = /usr/lib/system -PUBLIC_HEADERS_FOLDER_PATH = /usr/include/dispatch -PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/dispatch -OS_PUBLIC_HEADERS_FOLDER_PATH = /usr/include/os -OS_PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/os + +SDK_INSTALL_VARIANT = $(SDK_INSTALL_VARIANT_$(DRIVERKIT)) +SDK_INSTALL_VARIANT_1 = driverkit +SDK_INSTALL_VARIANT_ = default +SDK_INSTALL_ROOT = $(SDK_INSTALL_ROOT_$(SDK_INSTALL_VARIANT)) +SDK_INSTALL_ROOT_driverkit = $(DRIVERKITROOT) +SDK_INSTALL_HEADERS_ROOT = $(SDK_INSTALL_HEADERS_ROOT_$(SDK_INSTALL_VARIANT)) +SDK_INSTALL_HEADERS_ROOT_driverkit = $(SDK_INSTALL_ROOT)/$(SDK_RUNTIME_HEADERS_PREFIX) +SDK_RUNTIME_HEADERS_PREFIX = Runtime + +INSTALL_PATH = $(SDK_INSTALL_ROOT)/usr/lib/system +PUBLIC_HEADERS_FOLDER_PATH = $(SDK_INSTALL_HEADERS_ROOT)/usr/include/dispatch +PRIVATE_HEADERS_FOLDER_PATH = $(SDK_INSTALL_HEADERS_ROOT)/usr/local/include/dispatch +OS_PUBLIC_HEADERS_FOLDER_PATH = $(SDK_INSTALL_HEADERS_ROOT)/usr/include/os +OS_PRIVATE_HEADERS_FOLDER_PATH = $(SDK_INSTALL_HEADERS_ROOT)/usr/local/include/os HEADER_SEARCH_PATHS = $(PROJECT_DIR) $(PROJECT_DIR)/private $(PROJECT_DIR)/src -LIBRARY_SEARCH_PATHS = $(SDKROOT)/usr/lib/system $(SDKROOT)/usr/local/lib +LIBRARY_SEARCH_PATHS = $(SDKROOT)/$(SDK_INSTALL_ROOT)/usr/lib/system $(SDKROOT)/$(SDK_INSTALL_ROOT)/usr/local/lib +SYSTEM_HEADER_SEARCH_PATHS = $(SDKROOT)/$(SDK_INSTALL_HEADERS_ROOT)/System/Library/Frameworks/System.framework/PrivateHeaders $(SDKROOT)/$(SDK_INSTALL_HEADERS_ROOT)/usr/local/include $(SDKROOT)/$(SDK_INSTALL_HEADERS_ROOT)/usr/include +SYSTEM_FRAMEWORK_SEARCH_PATHS = $(SDKROOT)/$(SDK_INSTALL_HEADERS_ROOT)/System/Library/Frameworks + INSTALLHDRS_SCRIPT_PHASE = YES ALWAYS_SEARCH_USER_PATHS = NO USE_HEADERMAP = NO BUILD_VARIANTS = normal debug profile + ONLY_ACTIVE_ARCH = NO CLANG_LINK_OBJC_RUNTIME = NO GCC_C_LANGUAGE_STANDARD = gnu11 @@ -77,11 +91,67 @@ CLANG_WARN_UNGUARDED_AVAILABILITY = YES GCC_TREAT_WARNINGS_AS_ERRORS = YES GCC_OPTIMIZATION_LEVEL = s GCC_NO_COMMON_BLOCKS = YES -GCC_PREPROCESSOR_DEFINITIONS = __DARWIN_NON_CANCELABLE=1 $(DISPATCH_PREPROCESSOR_DEFINITIONS) +GCC_PREPROCESSOR_DEFINITIONS = __DARWIN_NON_CANCELABLE=1 +GCC_PREPROCESSOR_DEFINITIONS[sdk=driverkit*] = $(GCC_PREPROCESSOR_DEFINITIONS) USE_OBJC=0 STATICLIB_PREPROCESSOR_DEFINITIONS = DISPATCH_VARIANT_STATIC=1 USE_OBJC=0 DISPATCH_USE_DTRACE=0 -WARNING_CFLAGS = -Wall -Wextra -Warray-bounds-pointer-arithmetic -Watomic-properties -Wcomma -Wconditional-uninitialized -Wcovered-switch-default -Wdate-time -Wdeprecated -Wdouble-promotion -Wduplicate-enum -Wexpansion-to-defined -Wfloat-equal -Widiomatic-parentheses -Wignored-qualifiers -Wnullable-to-nonnull-conversion -Wobjc-interface-ivars -Wover-aligned -Wpacked -Wpointer-arith -Wselector -Wstatic-in-inline -Wsuper-class-method-mismatch -Wswitch-enum -Wtautological-compare -Wunused -Wno-unknown-warning-option $(NO_WARNING_CFLAGS) -NO_WARNING_CFLAGS = -Wno-pedantic -Wno-bad-function-cast -Wno-c++-compat -Wno-c++98-compat -Wno-c++98-compat-pedantic -Wno-cast-align -Wno-cast-qual -Wno-disabled-macro-expansion -Wno-documentation-unknown-command -Wno-format-nonliteral -Wno-missing-variable-declarations -Wno-old-style-cast -Wno-padded -Wno-reserved-id-macro -Wno-shift-sign-overflow -Wno-undef -Wno-unreachable-code-aggressive -Wno-unused-macros -Wno-used-but-marked-unused -Wno-vla -Wno-unguarded-availability-new -OTHER_CFLAGS = -fverbose-asm -isystem $(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders $(PLATFORM_CFLAGS) + +WARNING_CFLAGS = + +// warnings we want +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wall +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wextra +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wmost +WARNING_CFLAGS = $(WARNING_CFLAGS) -Warray-bounds-pointer-arithmetic +WARNING_CFLAGS = $(WARNING_CFLAGS) -Watomic-properties +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wcomma +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wconditional-uninitialized +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wcovered-switch-default +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wdate-time +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wdeprecated +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wdouble-promotion +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wduplicate-enum +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wexpansion-to-defined +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wfloat-equal +WARNING_CFLAGS = $(WARNING_CFLAGS) -Widiomatic-parentheses +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wignored-qualifiers +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wnullable-to-nonnull-conversion +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wobjc-interface-ivars +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wover-aligned +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wpacked +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wpointer-arith +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wselector +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wstatic-in-inline +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wsuper-class-method-mismatch +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wswitch +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wtautological-compare +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wunused + +// silenced warnings +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-unknown-warning-option +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-pedantic +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-bad-function-cast +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-c++-compat +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-c++98-compat +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-c++98-compat-pedantic +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-cast-align +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-cast-qual +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-disabled-macro-expansion +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-documentation-unknown-command +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-format-nonliteral +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-missing-variable-declarations +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-old-style-cast +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-padded +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-reserved-id-macro +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-shift-sign-overflow +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-undef +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-unreachable-code-aggressive +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-unused-macros +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-used-but-marked-unused +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-vla +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-unguarded-availability-new +WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-switch-enum // -Wswitch is enough, this forces explicit listing of all cases mandatory + +OTHER_CFLAGS = -fverbose-asm $(PLATFORM_CFLAGS) OTHER_CFLAGS[arch=i386][sdk=macosx*] = $(OTHER_CFLAGS) -fno-unwind-tables -fno-asynchronous-unwind-tables -fno-exceptions OTHER_CFLAGS_normal = -momit-leaf-frame-pointer OTHER_CFLAGS_profile = $(OTHER_CFLAGS_normal) -DDISPATCH_PROFILE=1 -DDISPATCH_PERF_MON=1 @@ -89,15 +159,25 @@ OTHER_CFLAGS_debug = -fstack-protector -fno-inline -O0 -DDISPATCH_DEBUG=1 -DOS_D GENERATE_PROFILING_CODE = NO DYLIB_CURRENT_VERSION = $(CURRENT_PROJECT_VERSION) SIM_SUFFIX[sdk=*simulator*] = _sim -DYLIB_LDFLAGS = -umbrella System -nodefaultlibs -ldyld -lcompiler_rt -lsystem$(SIM_SUFFIX)_kernel -lsystem$(SIM_SUFFIX)_platform -lsystem$(SIM_SUFFIX)_pthread -lsystem_malloc -lsystem_c -lsystem_blocks -lunwind +DYLIB_LDFLAGS = -umbrella System -nodefaultlibs -ldyld -lcompiler_rt -lsystem$(SIM_SUFFIX)_kernel -lsystem$(SIM_SUFFIX)_platform -lsystem$(SIM_SUFFIX)_pthread -lsystem_malloc -lsystem_c -lsystem_blocks +UNWIND_LDFLAGS = -lunwind +UNWIND_LDFLAGS[sdk=driverkit*] = OBJC_LDFLAGS = -Wl,-upward-lobjc +OBJC_LDFLAGS[sdk=driverkit*] = LIBDARWIN_LDFLAGS = -Wl,-upward-lsystem_darwin LIBDARWIN_LDFLAGS[sdk=*simulator*] = +LIBDARWIN_LDFLAGS[sdk=driverkit*] = ORDER_LDFLAGS = -Wl,-order_file,$(SRCROOT)/xcodeconfig/libdispatch.order -Wl,-dirty_data_list,$(SRCROOT)/xcodeconfig/libdispatch.dirty ORDER_LDFLAGS[sdk=macosx*] = -Wl,-order_file,$(SRCROOT)/xcodeconfig/libdispatch.order +ORDER_LDFLAGS[sdk=driverkit*] = -Wl,-order_file,$(SRCROOT)/xcodeconfig/libdispatch.order ALIASES_LDFLAGS = -Wl,-alias_list,$(SRCROOT)/xcodeconfig/libdispatch.aliases -OTHER_LDFLAGS = $(OTHER_LDFLAGS) $(LIBDARWIN_LDFLAGS) $(DYLIB_LDFLAGS) $(CR_LDFLAGS) $(OBJC_LDFLAGS) $(ALIASES_LDFLAGS) $(PLATFORM_LDFLAGS) $(ORDER_LDFLAGS) -OTHER_MIGFLAGS = -novouchers +OTHER_LDFLAGS = $(OTHER_LDFLAGS) $(DYLIB_LDFLAGS) $(LIBDARWIN_LDFLAGS) $(CR_LDFLAGS) $(UNWIND_LDFLAGS) $(OBJC_LDFLAGS) $(ALIASES_LDFLAGS) $(PLATFORM_LDFLAGS) $(ORDER_LDFLAGS) +OTHER_MIGFLAGS = -novouchers -I$(SDKROOT)/$(SDK_INSTALL_HEADERS_ROOT)/System/Library/Frameworks/System.framework/PrivateHeaders -I${SDKROOT}/${SDK_INSTALL_HEADERS_ROOT}/usr/include -I${SDKROOT}/${SDK_INSTALL_HEADERS_ROOT}/usr/local/include + +OBJC_SOURCE_FILE_NAMES = *.m +EXCLUDED_SOURCE_FILE_NAMES = $(EXCLUDED_SOURCE_FILE_NAMES_$(SDK_INSTALL_VARIANT)) +EXCLUDED_SOURCE_FILE_NAMES_driverkit = $(EXCLUDED_SOURCE_FILE_NAMES_default) $(OBJC_SOURCE_FILE_NAMES) + COPY_HEADERS_RUN_UNIFDEF = YES COPY_HEADERS_UNIFDEF_FLAGS = -U__DISPATCH_BUILDING_DISPATCH__ -U__linux__ -DTARGET_OS_WIN32=0 -U__ANDROID__ diff --git a/xcodeconfig/libfirehose.xcconfig b/xcodeconfig/libfirehose.xcconfig index 4c711994c..547b13ad5 100644 --- a/xcodeconfig/libfirehose.xcconfig +++ b/xcodeconfig/libfirehose.xcconfig @@ -21,7 +21,7 @@ SUPPORTED_PLATFORMS = macosx iphoneos iphonesimulator appletvos appletvsimulator watchos watchsimulator PRODUCT_NAME = $(TARGET_NAME) INSTALL_PATH = /usr/local/lib/ -GCC_PREPROCESSOR_DEFINITIONS = $(inherited) FIREHOSE_SERVER=1 DISPATCH_USE_DTRACE=0 +GCC_PREPROCESSOR_DEFINITIONS = $(GCC_PREPROCESSOR_DEFINITIONS) FIREHOSE_SERVER=1 DISPATCH_USE_DTRACE=0 OTHER_MIGFLAGS = -novouchers OTHER_LDFLAGS = PUBLIC_HEADERS_FOLDER_PATH = /usr/include/os diff --git a/xcodeconfig/libfirehose_kernel.xcconfig b/xcodeconfig/libfirehose_kernel.xcconfig index c572f80e7..e6d83a3aa 100644 --- a/xcodeconfig/libfirehose_kernel.xcconfig +++ b/xcodeconfig/libfirehose_kernel.xcconfig @@ -18,16 +18,20 @@ // @APPLE_APACHE_LICENSE_HEADER_END@ // -#include "libfirehose.xcconfig" - SUPPORTED_PLATFORMS = macosx iphoneos appletvos watchos PRODUCT_NAME = $(TARGET_NAME) INSTALL_PATH = /usr/local/lib/kernel/ -GCC_PREPROCESSOR_DEFINITIONS = $(inherited) KERNEL=1 DISPATCH_USE_DTRACE=0 +GCC_PREPROCESSOR_DEFINITIONS = $(GCC_PREPROCESSOR_DEFINITIONS) KERNEL=1 DISPATCH_USE_DTRACE=0 +OTHER_MIGFLAGS = -novouchers +OTHER_LDFLAGS = OTHER_CFLAGS = -mkernel -nostdinc -Wno-packed -// LLVM_LTO = YES +PUBLIC_HEADERS_FOLDER_PATH = /usr/include/os PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/kernel/os HEADER_SEARCH_PATHS = $(PROJECT_DIR) $(SDKROOT)/System/Library/Frameworks/Kernel.framework/PrivateHeaders $(SDKROOT)/System/Library/Frameworks/Kernel.framework/Headers $(SDKROOT)/usr/local/include/os $(SDKROOT)/usr/local/include/firehose +STRIP_INSTALLED_PRODUCT = NO +COPY_PHASE_STRIP = NO +SEPARATE_STRIP = NO +VALID_ARCHS[sdk=macosx*] = $(NATIVE_ARCH_ACTUAL) COPY_HEADERS_RUN_UNIFDEF = YES COPY_HEADERS_UNIFDEF_FLAGS = -DKERNEL=1 -DOS_FIREHOSE_SPI=1 -DOS_VOUCHER_ACTIVITY_SPI_TYPES=1 -UOS_VOUCHER_ACTIVITY_SPI diff --git a/xcodescripts/check-order.sh b/xcodescripts/check-order.sh index 62133485d..3801df0ee 100644 --- a/xcodescripts/check-order.sh +++ b/xcodescripts/check-order.sh @@ -1,15 +1,38 @@ -#! /bin/bash -e +#!/bin/bash -e +# +# Copyright (c) 2018 Apple Inc. All rights reserved. +# +# @APPLE_APACHE_LICENSE_HEADER_START@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# @APPLE_APACHE_LICENSE_HEADER_END@ +# test "$ACTION" = install || exit 0 list_objc_syms () { - nm -arch $1 -nU ${DSTROOT}/usr/lib/system/libdispatch.dylib | grep _OBJC | cut -d' ' -f3 + nm -arch $1 -jnU ${DSTROOT}/usr/lib/system/libdispatch.dylib | grep -E '^_OBJC_(CLASS|METACLASS)_\$' } list_mutable_data_syms () { - nm -arch $1 -m ${DSTROOT}/usr/lib/system/libdispatch.dylib |grep __DATA|egrep -v '(__const|__crash_info)'|sed 's/^.* //' + nm -arch $1 -m ${DSTROOT}/usr/lib/system/libdispatch.dylib | awk ' + /__DATA.* _OBJC_(CLASS|METACLASS)_\$/{ print $NF; next } + /__const|__crash_info| _OBJC| __OBJC/{ next } + /__DATA/{ print $NF } + ' } list_objc_order () diff --git a/xcodescripts/mig-headers.sh b/xcodescripts/mig-headers.sh index bd477c027..e2aff4c59 100755 --- a/xcodescripts/mig-headers.sh +++ b/xcodescripts/mig-headers.sh @@ -19,6 +19,7 @@ # @APPLE_APACHE_LICENSE_HEADER_END@ # + export MIGCC="$(xcrun -find cc)" export MIGCOM="$(xcrun -find migcom)" export PATH="${PLATFORM_DEVELOPER_BIN_DIR}:${DEVELOPER_BIN_DIR}:${PATH}" From df1a20f0926bdda255d019b04b0a771df66448fa Mon Sep 17 00:00:00 2001 From: Apple OSS Distributions <91980991+AppleOSSDistributions@users.noreply.github.com> Date: Wed, 18 Nov 2020 23:17:31 +0000 Subject: [PATCH 12/18] libdispatch-1271.40.12 Imported from libdispatch-1271.40.12.tar.gz --- CMakeLists.txt | 108 +- PATCHES | 88 ++ cmake/config.h.in | 12 + cmake/modules/DispatchUtilities.cmake | 8 +- cmake/modules/SwiftSupport.cmake | 241 ++++- config/config.h | 12 + dispatch/block.h | 4 +- dispatch/dispatch.h | 5 + dispatch/generic/module.modulemap | 1 + dispatch/group.h | 2 +- dispatch/object.h | 4 +- dispatch/queue.h | 2 +- dispatch/semaphore.h | 6 +- dispatch/source.h | 10 +- dispatch/workloop.h | 27 + libdispatch.xcodeproj/project.pbxproj | 349 ++++--- man/dispatch.3 | 10 +- man/dispatch_after.3 | 19 +- man/dispatch_api.3 | 22 +- man/dispatch_apply.3 | 27 +- man/dispatch_async.3 | 87 +- man/dispatch_data_create.3 | 48 +- man/dispatch_group_create.3 | 38 +- man/dispatch_io_create.3 | 142 +-- man/dispatch_io_read.3 | 52 +- man/dispatch_object.3 | 54 +- man/dispatch_once.3 | 1 - man/dispatch_queue_create.3 | 157 +-- man/dispatch_read.3 | 59 +- man/dispatch_semaphore_create.3 | 38 +- man/dispatch_source_create.3 | 135 ++- man/dispatch_time.3 | 15 +- os/CMakeLists.txt | 1 + os/clock.h | 18 + os/eventlink_private.h | 296 ++++++ os/firehose_buffer_private.h | 2 +- os/object.h | 44 +- os/object_private.h | 16 +- os/voucher_private.h | 4 +- os/workgroup.h | 37 + os/workgroup_base.h | 78 ++ os/workgroup_interval.h | 164 +++ os/workgroup_interval_private.h | 110 ++ os/workgroup_object.h | 371 +++++++ os/workgroup_object_private.h | 119 +++ os/workgroup_parallel.h | 78 ++ os/workgroup_private.h | 17 + private/CMakeLists.txt | 2 + private/mach_private.h | 91 +- private/private.h | 10 +- private/queue_private.h | 18 +- private/source_private.h | 2 +- private/time_private.h | 31 + private/workloop_private.h | 4 +- src/BlocksRuntime/Block.h | 23 +- src/BlocksRuntime/BlocksRuntime.def | 4 + src/BlocksRuntime/data.c | 20 +- src/CMakeLists.txt | 167 ++- src/allocator_internal.h | 4 +- src/benchmark.c | 12 +- src/data_internal.h | 4 +- src/event/event.c | 26 +- src/event/event_config.h | 2 +- src/event/event_epoll.c | 59 +- src/event/event_internal.h | 23 +- src/event/event_kevent.c | 212 ++-- src/event/event_windows.c | 860 +++++++++++++++- src/event/workqueue.c | 10 +- src/eventlink.c | 542 ++++++++++ src/eventlink_internal.h | 67 ++ src/firehose/firehose_buffer.c | 98 +- src/firehose/firehose_inline_internal.h | 50 +- src/firehose/firehose_server.c | 10 +- src/firehose/firehose_server_internal.h | 5 +- src/init.c | 76 +- src/inline_internal.h | 73 +- src/internal.h | 69 +- src/introspection.c | 26 +- src/io.c | 235 ++++- src/mach.c | 227 +++-- src/object.m | 68 +- src/object_internal.h | 43 +- src/queue.c | 466 ++++++--- src/queue_internal.h | 24 +- src/semaphore.c | 22 +- src/semaphore_internal.h | 4 +- src/shims.h | 16 +- src/shims/atomic.h | 109 +- src/shims/generic_sys_queue.h | 58 ++ src/shims/generic_win_stubs.c | 86 +- src/shims/generic_win_stubs.h | 36 + src/shims/lock.c | 88 +- src/shims/lock.h | 10 +- src/shims/priority.h | 9 +- src/shims/target.h | 12 +- src/shims/time.h | 39 +- src/shims/tsd.h | 20 +- src/shims/yield.h | 42 +- src/source.c | 30 +- src/source_internal.h | 3 +- src/swift/Block.swift | 14 +- src/swift/Dispatch.swift | 4 +- src/swift/DispatchStubs.cc | 16 +- src/swift/IO.swift | 19 + src/swift/Queue.swift | 8 +- src/swift/Source.swift | 43 +- src/swift/Wrapper.swift | 6 +- src/time.c | 56 +- src/voucher.c | 48 +- src/voucher_internal.h | 4 +- src/workgroup.c | 1076 ++++++++++++++++++++ src/workgroup_internal.h | 200 ++++ xcodeconfig/libdispatch-dyld-stub.xcconfig | 28 - xcodeconfig/libdispatch.dirty | 13 + xcodeconfig/libdispatch.order | 8 + xcodeconfig/libdispatch.xcconfig | 160 --- xcodeconfig/libfirehose_kernel.xcconfig | 2 +- xcodescripts/install-headers.sh | 21 +- xcodescripts/postprocess-headers.sh | 5 + 119 files changed, 7327 insertions(+), 1789 deletions(-) create mode 100644 os/clock.h create mode 100644 os/eventlink_private.h create mode 100644 os/workgroup.h create mode 100644 os/workgroup_base.h create mode 100644 os/workgroup_interval.h create mode 100644 os/workgroup_interval_private.h create mode 100644 os/workgroup_object.h create mode 100644 os/workgroup_object_private.h create mode 100644 os/workgroup_parallel.h create mode 100644 os/workgroup_private.h create mode 100644 src/BlocksRuntime/BlocksRuntime.def create mode 100644 src/eventlink.c create mode 100644 src/eventlink_internal.h create mode 100644 src/workgroup.c create mode 100644 src/workgroup_internal.h delete mode 100644 xcodeconfig/libdispatch-dyld-stub.xcconfig diff --git a/CMakeLists.txt b/CMakeLists.txt index 89e985951..9f3f221e6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -44,38 +44,30 @@ dispatch_common_warnings() option(ENABLE_DISPATCH_INIT_CONSTRUCTOR "enable libdispatch_init as a constructor" ON) set(USE_LIBDISPATCH_INIT_CONSTRUCTOR ${ENABLE_DISPATCH_INIT_CONSTRUCTOR}) +# NOTE(abdulras) this is the CMake supported way to control whether we generate +# shared or static libraries. This impacts the behaviour of `add_library` in +# what type of library it generates. +option(BUILD_SHARED_LIBS "build shared libraries" ON) + option(ENABLE_SWIFT "enable libdispatch swift overlay" OFF) if(ENABLE_SWIFT) if(NOT CMAKE_SWIFT_COMPILER) message(FATAL_ERROR "CMAKE_SWIFT_COMPILER must be defined to enable swift") endif() - get_filename_component(SWIFT_TOOLCHAIN ${CMAKE_SWIFT_COMPILER} DIRECTORY) - get_filename_component(SWIFT_TOOLCHAIN ${SWIFT_TOOLCHAIN} DIRECTORY) - - string(TOLOWER ${CMAKE_SYSTEM_NAME} SWIFT_OS) - get_swift_host_arch(SWIFT_HOST_ARCH) - - set(SWIFT_RUNTIME_LIBDIR ${SWIFT_TOOLCHAIN}/${SWIFT_LIBDIR}/swift/${SWIFT_OS}/${SWIFT_HOST_ARCH}) - - add_library(swiftCore - SHARED IMPORTED GLOBAL) - set_target_properties(swiftCore - PROPERTIES - IMPORTED_LOCATION - ${SWIFT_RUNTIME_LIBDIR}/${CMAKE_SHARED_LIBRARY_PREFIX}swiftCore${CMAKE_SHARED_LIBRARY_SUFFIX}) + string(TOLOWER ${CMAKE_SYSTEM_NAME} swift_os) + get_swift_host_arch(swift_arch) - add_library(swiftSwiftOnoneSupport - SHARED IMPORTED GLOBAL) - set_target_properties(swiftSwiftOnoneSupport - PROPERTIES - IMPORTED_LOCATION - ${SWIFT_RUNTIME_LIBDIR}/${CMAKE_SHARED_LIBRARY_PREFIX}swiftSwiftOnoneSupport${CMAKE_SHARED_LIBRARY_SUFFIX}) + if(BUILD_SHARED_LIBS) + set(swift_dir swift) + else() + set(swift_dir swift_static) + endif() - set(INSTALL_TARGET_DIR "${INSTALL_LIBDIR}/swift/${SWIFT_OS}" CACHE PATH "Path where the libraries will be installed") - set(INSTALL_DISPATCH_HEADERS_DIR "${INSTALL_LIBDIR}/swift/dispatch" CACHE PATH "Path where the headers will be installed for libdispatch") - set(INSTALL_BLOCK_HEADERS_DIR "${INSTALL_LIBDIR}/swift/Block" CACHE PATH "Path where the headers will be installed for the blocks runtime") - set(INSTALL_OS_HEADERS_DIR "${INSTALL_LIBDIR}/swift/os" CACHE PATH "Path where the os/ headers will be installed") + set(INSTALL_TARGET_DIR "${INSTALL_LIBDIR}/${swift_dir}/${swift_os}" CACHE PATH "Path where the libraries will be installed") + set(INSTALL_DISPATCH_HEADERS_DIR "${INSTALL_LIBDIR}/${swift_dir}/dispatch" CACHE PATH "Path where the headers will be installed for libdispatch") + set(INSTALL_BLOCK_HEADERS_DIR "${INSTALL_LIBDIR}/${swift_dir}/Block" CACHE PATH "Path where the headers will be installed for the blocks runtime") + set(INSTALL_OS_HEADERS_DIR "${INSTALL_LIBDIR}/${swift_dir}/os" CACHE PATH "Path where the os/ headers will be installed") endif() if(NOT ENABLE_SWIFT) @@ -85,9 +77,9 @@ if(NOT ENABLE_SWIFT) set(INSTALL_OS_HEADERS_DIR "include/os" CACHE PATH "Path where the headers will be installed") endif() -option(ENABLE_DTRACE "enable dtrace support" "") +option(DISPATCH_ENABLE_ASSERTS "enable debug assertions" FALSE) -option(BUILD_SHARED_LIBS "build shared libraries" ON) +option(ENABLE_DTRACE "enable dtrace support" "") option(ENABLE_TESTING "build libdispatch tests" ON) @@ -131,21 +123,32 @@ endif() option(INSTALL_PRIVATE_HEADERS "installs private headers in the same location as the public ones" OFF) -find_package(BlocksRuntime QUIET) -if(NOT BlocksRuntime_FOUND) +if(NOT CMAKE_SYSTEM_NAME STREQUAL Darwin) set(BlocksRuntime_INCLUDE_DIR ${PROJECT_SOURCE_DIR}/src/BlocksRuntime) + # NOTE(compnerd) use the `BUILD_SHARED_LIBS` variable to determine what type + # of library to build. If it is true, we will generate shared libraries, + # otherwise we will generate static libraries. add_library(BlocksRuntime - STATIC - ${PROJECT_SOURCE_DIR}/src/BlocksRuntime/data.c - ${PROJECT_SOURCE_DIR}/src/BlocksRuntime/runtime.c) + ${PROJECT_SOURCE_DIR}/src/BlocksRuntime/data.c + ${PROJECT_SOURCE_DIR}/src/BlocksRuntime/runtime.c) + if(CMAKE_SYSTEM_NAME STREQUAL Windows) + target_sources(BlocksRuntime + PRIVATE + ${PROJECT_SOURCE_DIR}/src/BlocksRuntime/BlocksRuntime.def) + if(NOT BUILD_SHARED_LIBS) + target_compile_definitions(BlocksRuntime + PRIVATE + BlocksRuntime_STATIC) + endif() + endif() set_target_properties(BlocksRuntime PROPERTIES POSITION_INDEPENDENT_CODE TRUE) if(HAVE_OBJC AND CMAKE_DL_LIBS) - set_target_properties(BlocksRuntime - PROPERTIES - INTERFACE_LINK_LIBRARIES ${CMAKE_DL_LIBS}) + target_link_libraries(BlocksRuntime + PUBLIC + ${CMAKE_DL_LIBS}) endif() add_library(BlocksRuntime::BlocksRuntime ALIAS BlocksRuntime) @@ -160,6 +163,11 @@ if(NOT BlocksRuntime_FOUND) DESTINATION "${INSTALL_BLOCK_HEADERS_DIR}") endif() + install(TARGETS + BlocksRuntime + ARCHIVE DESTINATION ${INSTALL_TARGET_DIR} + LIBRARY DESTINATION ${INSTALL_TARGET_DIR} + RUNTIME DESTINATION bin) endif() check_symbol_exists(__GNU_LIBRARY__ "features.h" _GNU_SOURCE) @@ -181,21 +189,17 @@ check_function_exists(mach_absolute_time HAVE_MACH_ABSOLUTE_TIME) check_function_exists(mach_approximate_time HAVE_MACH_APPROXIMATE_TIME) check_function_exists(mach_port_construct HAVE_MACH_PORT_CONSTRUCT) check_function_exists(malloc_create_zone HAVE_MALLOC_CREATE_ZONE) +check_function_exists(posix_fadvise HAVE_POSIX_FADVISE) +check_function_exists(posix_spawnp HAVE_POSIX_SPAWNP) check_function_exists(pthread_key_init_np HAVE_PTHREAD_KEY_INIT_NP) +check_function_exists(pthread_attr_setcpupercent_np HAVE_PTHREAD_ATTR_SETCPUPERCENT_NP) +check_function_exists(pthread_yield_np HAVE_PTHREAD_YIELD_NP) check_function_exists(pthread_main_np HAVE_PTHREAD_MAIN_NP) +check_function_exists(pthread_workqueue_setdispatch_np HAVE_PTHREAD_WORKQUEUE_SETDISPATCH_NP) check_function_exists(strlcpy HAVE_STRLCPY) check_function_exists(sysconf HAVE_SYSCONF) check_function_exists(arc4random HAVE_ARC4RANDOM) -if(NOT HAVE_STRLCPY AND NOT HAVE_GETPROGNAME) - include(FindPkgConfig) - pkg_check_modules(BSD_OVERLAY libbsd-overlay) - if(BSD_OVERLAY_FOUND) - set(HAVE_STRLCPY 1 CACHE INTERNAL "Have function strlcpy" FORCE) - set(HAVE_GETPROGNAME 1 CACHE INTERNAL "Have function getprogname" FORCE) - endif() -endif() - find_package(Threads REQUIRED) check_include_files("TargetConditionals.h" HAVE_TARGETCONDITIONALS_H) @@ -302,27 +306,19 @@ if(CMAKE_SYSTEM_NAME STREQUAL Darwin) "${PROJECT_SOURCE_DIR}/dispatch/module.modulemap" "${PROJECT_SOURCE_DIR}/private/module.modulemap" COMMAND - ${CMAKE_COMMAND} -E create_symlink "${PROJECT_SOURCE_DIR}/dispatch/darwin/module.modulemap" "${PROJECT_SOURCE_DIR}/dispatch/module.modulemap" - COMMAND - ${CMAKE_COMMAND} -E create_symlink "${PROJECT_SOURCE_DIR}/private/darwin/module.modulemap" "${PROJECT_SOURCE_DIR}/private/module.modulemap") -elseif(CMAKE_SYSTEM_NAME STREQUAL Windows) - add_custom_command(OUTPUT - "${PROJECT_SOURCE_DIR}/dispatch/module.modulemap" - "${PROJECT_SOURCE_DIR}/private/module.modulemap" - COMMAND - ${CMAKE_COMMAND} -E copy "${PROJECT_SOURCE_DIR}/dispatch/generic/module.modulemap" "${PROJECT_SOURCE_DIR}/dispatch/module.modulemap" + ${CMAKE_COMMAND} -E copy_if_different "${PROJECT_SOURCE_DIR}/dispatch/darwin/module.modulemap" "${PROJECT_SOURCE_DIR}/dispatch/module.modulemap" COMMAND - ${CMAKE_COMMAND} -E copy "${PROJECT_SOURCE_DIR}/private/generic/module.modulemap" "${PROJECT_SOURCE_DIR}/private/module.modulemap") + ${CMAKE_COMMAND} -E copy_if_different "${PROJECT_SOURCE_DIR}/private/darwin/module.modulemap" "${PROJECT_SOURCE_DIR}/private/module.modulemap") else() add_custom_command(OUTPUT "${PROJECT_SOURCE_DIR}/dispatch/module.modulemap" "${PROJECT_SOURCE_DIR}/private/module.modulemap" COMMAND - ${CMAKE_COMMAND} -E create_symlink "${PROJECT_SOURCE_DIR}/dispatch/generic/module.modulemap" "${PROJECT_SOURCE_DIR}/dispatch/module.modulemap" + ${CMAKE_COMMAND} -E copy_if_different "${PROJECT_SOURCE_DIR}/dispatch/generic/module.modulemap" "${PROJECT_SOURCE_DIR}/dispatch/module.modulemap" COMMAND - ${CMAKE_COMMAND} -E create_symlink "${PROJECT_SOURCE_DIR}/private/generic/module.modulemap" "${PROJECT_SOURCE_DIR}/private/module.modulemap") + ${CMAKE_COMMAND} -E copy_if_different "${PROJECT_SOURCE_DIR}/private/generic/module.modulemap" "${PROJECT_SOURCE_DIR}/private/module.modulemap") endif() -add_custom_target(module-map-symlinks +add_custom_target(module-maps ALL DEPENDS "${PROJECT_SOURCE_DIR}/dispatch/module.modulemap" "${PROJECT_SOURCE_DIR}/private/module.modulemap") diff --git a/PATCHES b/PATCHES index b4483135a..10277a45c 100644 --- a/PATCHES +++ b/PATCHES @@ -434,3 +434,91 @@ github commits starting with 29bdc2f from [3975b58] APPLIED rdar://44568645 [81dc900] APPLIED rdar://44568645 [6162a1d] APPLIED rdar://44568645 +[c55ff6f] APPLIED rdar://54572081 +[c4a7149] APPLIED rdar://54572081 +[edce1fe] APPLIED rdar://54572081 +[ac525a4] APPLIED rdar://54572081 +[0710b29] APPLIED rdar://54572081 +[e99de71] APPLIED rdar://54572081 +[6d83ad5] APPLIED rdar://54572081 +[3ed78b5] APPLIED rdar://54572081 +[f6376cb] APPLIED rdar://54572081 +[9acbab3] APPLIED rdar://54572081 +[ca08b5f] APPLIED rdar://54572081 +[775f9f2] APPLIED rdar://54572081 +[db37bbc] APPLIED rdar://54572081 +[9852dcb] APPLIED rdar://54572081 +[9ec95bf] APPLIED rdar://54572081 +[bd2367c] APPLIED rdar://54572081 +[a736ea7] APPLIED rdar://54572081 +[3e4ea66] APPLIED rdar://54572081 +[c85c0d8] APPLIED rdar://54572081 +[7187ea2] APPLIED rdar://54572081 +[30eeb14] APPLIED rdar://54572081 +[6a5c6d8] APPLIED rdar://54572081 +[64a12c6] APPLIED rdar://54572081 +[09ec354] APPLIED rdar://54572081 +[5bcd598] APPLIED rdar://54572081 +[7874a92] APPLIED rdar://54572081 +[619775e] APPLIED rdar://54572081 +[e3ae79b] APPLIED rdar://54572081 +[fb368f6] APPLIED rdar://54572081 +[afa6cc3] APPLIED rdar://54572081 +[e6df818] APPLIED rdar://54572081 +[7144ee3] APPLIED rdar://54572081 +[60ffcc2] APPLIED rdar://54572081 +[618b070] APPLIED rdar://54572081 +[dde5892] APPLIED rdar://54572081 +[81c9bf6] APPLIED rdar://54572081 +[4b85ca6] APPLIED rdar://54572081 +[ff3bf51] APPLIED rdar://54572081 +[bc00e13] APPLIED rdar://54572081 +[d44acc0] APPLIED rdar://54572081 +[4659503] APPLIED rdar://54572081 +[60fdf80] APPLIED rdar://54572081 +[7a74af4] APPLIED rdar://54572081 +[f20349f] APPLIED rdar://54572081 +[ef9364c] APPLIED rdar://54572081 +[9d485ca] APPLIED rdar://54572081 +[cbd70d1] APPLIED rdar://54572081 +[6e1825a] APPLIED rdar://54572081 +[319bd33] APPLIED rdar://54572081 +[6c5b3ba] APPLIED rdar://54572081 +[7e7677b] APPLIED rdar://54572081 +[9002f70] APPLIED rdar://54572081 +[cc04868] APPLIED rdar://54572081 +[dc0dd64] APPLIED rdar://54572081 +[a5f5a92] APPLIED rdar://54572081 +[e5ba042] APPLIED rdar://54572081 +[a3bff44] APPLIED rdar://54572081 +[2e3d5c0] APPLIED rdar://54572081 +[1482ec9] APPLIED rdar://54572081 +[6bf6cb1] APPLIED rdar://54572081 +[aa13cad] APPLIED rdar://54572081 +[b073d89] APPLIED rdar://54572081 +[7784917] APPLIED rdar://54572081 +[717b3f7] APPLIED rdar://54572081 +[37010f0] APPLIED rdar://54572081 +[251dba4] APPLIED rdar://54572081 +[a18aa1f] APPLIED rdar://54572081 +[e8d020e] APPLIED rdar://54572081 +[90a84a1] APPLIED rdar://54572081 +[7721660] APPLIED rdar://54572081 +[c5af10f] APPLIED rdar://54572081 +[f01432d] APPLIED rdar://54572081 +[d0394bf] APPLIED rdar://54572081 +[2b14a98] APPLIED rdar://54572081 +[d32596b] APPLIED rdar://54572081 +[52bc6b2] APPLIED rdar://54572081 +[4169c8d] APPLIED rdar://54572081 +[318f6e5] APPLIED rdar://54572081 +[6a36af8] APPLIED rdar://54572081 +[d11d565] APPLIED rdar://54572081 +[d9740c2] APPLIED rdar://54572081 +[fc917b4] APPLIED rdar://54572081 +[f911a44] APPLIED rdar://54572081 +[6d32c4d] APPLIED rdar://54572081 +[9005cb4] APPLIED rdar://54572081 +[68875cb] APPLIED rdar://54572081 +[fc73866] APPLIED rdar://54572081 +[3cf1bf3] APPLIED rdar://54572081 diff --git a/cmake/config.h.in b/cmake/config.h.in index 0709c254c..2896a2083 100644 --- a/cmake/config.h.in +++ b/cmake/config.h.in @@ -136,15 +136,27 @@ /* Define if you have the Objective-C runtime */ #cmakedefine HAVE_OBJC +/* Define to 1 if you have the `posix_fadvise' function. */ +#cmakedefine HAVE_POSIX_FADVISE + +/* Define to 1 if you have the `posix_spawnp' function. */ +#cmakedefine HAVE_POSIX_SPAWNP + /* Define to 1 if you have the `pthread_key_init_np' function. */ #cmakedefine HAVE_PTHREAD_KEY_INIT_NP +/* Define to 1 if you have the `pthread_attr_setcpupercent_np' function. */ +#cmakedefine HAVE_PTHREAD_ATTR_SETCPUPERCENT_NP + /* Define to 1 if you have the header file. */ #cmakedefine HAVE_PTHREAD_MACHDEP_H /* Define to 1 if you have the `pthread_main_np' function. */ #cmakedefine01 HAVE_PTHREAD_MAIN_NP +/* Define to 1 if you have the `pthread_yield_np' function. */ +#cmakedefine01 HAVE_PTHREAD_YIELD_NP + /* Define to 1 if you have the header file. */ #cmakedefine01 HAVE_PTHREAD_NP_H diff --git a/cmake/modules/DispatchUtilities.cmake b/cmake/modules/DispatchUtilities.cmake index 15d8cd771..fea3622ec 100644 --- a/cmake/modules/DispatchUtilities.cmake +++ b/cmake/modules/DispatchUtilities.cmake @@ -1,15 +1,19 @@ function(dispatch_set_linker target) + if(CMAKE_HOST_SYSTEM_NAME STREQUAL Windows) + set(CMAKE_HOST_EXECUTABLE_SUFFIX .exe) + endif() + if(USE_GOLD_LINKER) set_property(TARGET ${target} APPEND_STRING PROPERTY LINK_FLAGS - -fuse-ld=gold) + -fuse-ld=gold${CMAKE_HOST_EXECUTABLE_SUFFIX}) endif() if(USE_LLD_LINKER) set_property(TARGET ${target} APPEND_STRING PROPERTY LINK_FLAGS - -fuse-ld=lld) + -fuse-ld=lld${CMAKE_HOST_EXECUTABLE_SUFFIX}) endif() endfunction() diff --git a/cmake/modules/SwiftSupport.cmake b/cmake/modules/SwiftSupport.cmake index bae1f9f57..da7a201e3 100644 --- a/cmake/modules/SwiftSupport.cmake +++ b/cmake/modules/SwiftSupport.cmake @@ -1,49 +1,81 @@ include(CMakeParseArguments) -function(add_swift_library library) - set(options) +function(add_swift_target target) + set(options LIBRARY;SHARED;STATIC) set(single_value_options MODULE_NAME;MODULE_LINK_NAME;MODULE_PATH;MODULE_CACHE_PATH;OUTPUT;TARGET) - set(multiple_value_options SOURCES;SWIFT_FLAGS;CFLAGS;DEPENDS) - - cmake_parse_arguments(ASL "${options}" "${single_value_options}" "${multiple_value_options}" ${ARGN}) + set(multiple_value_options CFLAGS;DEPENDS;LINK_FLAGS;RESOURCES;SOURCES;SWIFT_FLAGS) - set(flags ${CMAKE_SWIFT_FLAGS}) + cmake_parse_arguments(AST "${options}" "${single_value_options}" "${multiple_value_options}" ${ARGN}) - list(APPEND flags -emit-library) + set(compile_flags ${CMAKE_SWIFT_FLAGS}) + set(link_flags ${CMAKE_SWIFT_LINK_FLAGS}) - if(ASL_TARGET) - list(APPEND FLAGS -target;${ASL_TARGET}) + if(AST_TARGET) + list(APPEND compile_flags -target;${AST_TARGET}) + list(APPEND link_flags -target;${AST_TARGET}) + endif() + if(AST_MODULE_NAME) + list(APPEND compile_flags -module-name;${AST_MODULE_NAME}) + else() + list(APPEND compile_flags -module-name;${target}) endif() - if(ASL_MODULE_NAME) - list(APPEND flags -module-name;${ASL_MODULE_NAME}) + if(AST_MODULE_LINK_NAME) + list(APPEND compile_flags -module-link-name;${AST_MODULE_LINK_NAME}) endif() - if(ASL_MODULE_LINK_NAME) - list(APPEND flags -module-link-name;${ASL_MODULE_LINK_NAME}) + if(AST_MODULE_CACHE_PATH) + list(APPEND compile_flags -module-cache-path;${AST_MODULE_CACHE_PATH}) endif() - if(ASL_MODULE_PATH) - list(APPEND flags -emit-module-path;${ASL_MODULE_PATH}) + if(CMAKE_BUILD_TYPE MATCHES Debug OR CMAKE_BUILD_TYPE MATCHES RelWithDebInfo) + list(APPEND compile_flags -g) endif() - if(ASL_MODULE_CACHE_PATH) - list(APPEND flags -module-cache-path;${ASL_MODULE_CACHE_PATH}) + if(AST_SWIFT_FLAGS) + foreach(flag ${AST_SWIFT_FLAGS}) + list(APPEND compile_flags ${flag}) + endforeach() endif() - if(ASL_SWIFT_FLAGS) - foreach(flag ${ASL_SWIFT_FLAGS}) - list(APPEND flags ${flag}) + if(AST_CFLAGS) + foreach(flag ${AST_CFLAGS}) + list(APPEND compile_flags -Xcc;${flag}) endforeach() endif() - if(ASL_CFLAGS) - foreach(flag ${ASL_CFLAGS}) - list(APPEND flags -Xcc;${flag}) + if(AST_LINK_FLAGS) + foreach(flag ${AST_LINK_FLAGS}) + list(APPEND link_flags ${flag}) endforeach() endif() - - # FIXME: We shouldn't /have/ to build things in a single process. - # - list(APPEND flags -force-single-frontend-invocation) + if(AST_LIBRARY) + if(AST_STATIC AND AST_SHARED) + message(SEND_ERROR "add_swift_target asked to create library as STATIC and SHARED") + elseif(AST_STATIC OR NOT BUILD_SHARED_LIBS) + set(library_kind STATIC) + elseif(AST_SHARED OR BUILD_SHARED_LIBS) + set(library_kind SHARED) + endif() + else() + if(AST_STATIC OR AST_SHARED) + message(SEND_ERROR "add_swift_target asked to create executable as STATIC or SHARED") + endif() + endif() + if(NOT AST_OUTPUT) + if(AST_LIBRARY) + if(AST_SHARED OR BUILD_SHARED_LIBS) + set(AST_OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${target}.dir/${CMAKE_SHARED_LIBRARY_PREFIX}${target}${CMAKE_SHARED_LIBRARY_SUFFIX}) + else() + set(AST_OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${target}.dir/${CMAKE_STATIC_LIBRARY_PREFIX}${target}${CMAKE_STATIC_LIBRARY_SUFFIX}) + endif() + else() + set(AST_OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${target}.dir/${target}${CMAKE_EXECUTABLE_SUFFIX}) + endif() + endif() + if(CMAKE_SYSTEM_NAME STREQUAL Windows) + if(AST_SHARED OR BUILD_SHARED_LIBS) + set(IMPORT_LIBRARY ${CMAKE_CURRENT_BINARY_DIR}/${target}.dir/${CMAKE_IMPORT_LIBRARY_PREFIX}${target}${CMAKE_IMPORT_LIBRARY_SUFFIX}) + endif() + endif() set(sources) - foreach(source ${ASL_SOURCES}) + foreach(source ${AST_SOURCES}) get_filename_component(location ${source} PATH) if(IS_ABSOLUTE ${location}) list(APPEND sources ${source}) @@ -52,25 +84,136 @@ function(add_swift_library library) endif() endforeach() - get_filename_component(module_directory ${ASL_MODULE_PATH} DIRECTORY) - - add_custom_command(OUTPUT - ${ASL_OUTPUT} - ${ASL_MODULE_PATH} - ${module_directory}/${ASL_MODULE_NAME}.swiftdoc - DEPENDS - ${ASL_SOURCES} - ${CMAKE_SWIFT_COMPILER} - ${ASL_DEPENDS} - COMMAND - ${CMAKE_COMMAND} -E make_directory ${module_directory} - COMMAND - ${CMAKE_SWIFT_COMPILER} ${flags} -c ${sources} -o ${ASL_OUTPUT}) - add_custom_target(${library} - DEPENDS - ${ASL_OUTPUT} - ${ASL_MODULE_PATH} - ${module_directory}/${ASL_MODULE_NAME}.swiftdoc) + set(objs) + set(mods) + set(docs) + set(i 0) + foreach(source ${sources}) + get_filename_component(name ${source} NAME) + + set(obj ${CMAKE_CURRENT_BINARY_DIR}/${target}.dir/${name}${CMAKE_C_OUTPUT_EXTENSION}) + set(mod ${CMAKE_CURRENT_BINARY_DIR}/${target}.dir/${name}.swiftmodule) + set(doc ${CMAKE_CURRENT_BINARY_DIR}/${target}.dir/${name}.swiftdoc) + + set(all_sources ${sources}) + list(INSERT all_sources ${i} -primary-file) + + add_custom_command(OUTPUT + ${obj} + ${mod} + ${doc} + DEPENDS + ${source} + ${AST_DEPENDS} + COMMAND + ${CMAKE_SWIFT_COMPILER} -frontend ${compile_flags} -emit-module-path ${mod} -emit-module-doc-path ${doc} -o ${obj} -c ${all_sources}) + + list(APPEND objs ${obj}) + list(APPEND mods ${mod}) + list(APPEND docs ${doc}) + + math(EXPR i "${i}+1") + endforeach() + + if(AST_LIBRARY) + get_filename_component(module_directory ${AST_MODULE_PATH} DIRECTORY) + + set(module ${AST_MODULE_PATH}) + set(documentation ${module_directory}/${AST_MODULE_NAME}.swiftdoc) + + add_custom_command(OUTPUT + ${module} + ${documentation} + DEPENDS + ${mods} + ${docs} + ${AST_DEPENDS} + COMMAND + ${CMAKE_SWIFT_COMPILER} -frontend ${compile_flags} -sil-merge-partial-modules -emit-module ${mods} -o ${module} -emit-module-doc-path ${documentation}) + endif() + + if(AST_LIBRARY) + if(CMAKE_SYSTEM_NAME STREQUAL Windows OR CMAKE_SYSTEM_NAME STREQUAL Darwin) + set(emit_library -emit-library) + else() + set(emit_library -emit-library -Xlinker -soname -Xlinker ${CMAKE_SHARED_LIBRARY_PREFIX}${target}${CMAKE_SHARED_LIBRARY_SUFFIX}) + endif() + endif() + if(NOT AST_LIBRARY OR library_kind STREQUAL SHARED) + add_custom_command(OUTPUT + ${AST_OUTPUT} + DEPENDS + ${objs} + ${AST_DEPENDS} + COMMAND + ${CMAKE_SWIFT_COMPILER} ${emit_library} ${link_flags} -o ${AST_OUTPUT} ${objs}) + add_custom_target(${target} + ALL + DEPENDS + ${AST_OUTPUT} + ${module} + ${documentation}) + else() + add_library(${target}-static STATIC ${objs}) + add_dependencies(${target}-static ${AST_DEPENDS}) + get_filename_component(ast_output_bn ${AST_OUTPUT} NAME) + if(NOT CMAKE_STATIC_LIBRARY_PREFIX STREQUAL "") + string(REGEX REPLACE "^${CMAKE_STATIC_LIBRARY_PREFIX}" "" ast_output_bn ${ast_output_bn}) + endif() + if(NOT CMAKE_STATIC_LIBRARY_SUFFIX STREQUAL "") + string(REGEX REPLACE "${CMAKE_STATIC_LIBRARY_SUFFIX}$" "" ast_output_bn ${ast_output_bn}) + endif() + get_filename_component(ast_output_dn ${AST_OUTPUT} DIRECTORY) + set_target_properties(${target}-static + PROPERTIES + LINKER_LANGUAGE C + ARCHIVE_OUTPUT_DIRECTORY ${ast_output_dn} + OUTPUT_DIRECTORY ${ast_output_dn} + OUTPUT_NAME ${ast_output_bn}) + add_custom_target(${target} + ALL + DEPENDS + ${target}-static + ${module} + ${documentation}) + endif() + + if(AST_RESOURCES) + add_custom_command(TARGET + ${target} + POST_BUILD + COMMAND + ${CMAKE_COMMAND} -E make_directory ${CMAKE_CURRENT_BINARY_DIR}/${target} + COMMAND + ${CMAKE_COMMAND} -E copy ${AST_OUTPUT} ${CMAKE_CURRENT_BINARY_DIR}/${target} + COMMAND + ${CMAKE_COMMAND} -E make_directory ${CMAKE_CURRENT_BINARY_DIR}/${target}/Resources + COMMAND + ${CMAKE_COMMAND} -E copy ${AST_RESOURCES} ${CMAKE_CURRENT_BINARY_DIR}/${target}/Resources) + else() + add_custom_command(TARGET + ${target} + POST_BUILD + COMMAND + ${CMAKE_COMMAND} -E copy ${AST_OUTPUT} ${CMAKE_CURRENT_BINARY_DIR}) + if(CMAKE_SYSTEM_NAME STREQUAL Windows) + if(AST_SHARED OR BUILD_SHARED_LIBS) + add_custom_command(TARGET + ${target} + POST_BUILD + COMMAND + ${CMAKE_COMMAND} -E copy ${IMPORT_LIBRARY} ${CMAKE_CURRENT_BINARY_DIR}) + endif() + endif() + endif() +endfunction() + +function(add_swift_library library) + add_swift_target(${library} LIBRARY ${ARGN}) +endfunction() + +function(add_swift_executable executable) + add_swift_target(${executable} ${ARGN}) endfunction() # Returns the current achitecture name in a variable @@ -93,6 +236,8 @@ function(get_swift_host_arch result_var_name) set("${result_var_name}" "s390x" PARENT_SCOPE) elseif("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "armv6l") set("${result_var_name}" "armv6" PARENT_SCOPE) + elseif("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "armv7-a") + set("${result_var_name}" "armv7" PARENT_SCOPE) elseif("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "armv7l") set("${result_var_name}" "armv7" PARENT_SCOPE) elseif("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "AMD64") @@ -101,6 +246,8 @@ function(get_swift_host_arch result_var_name) set("${result_var_name}" "itanium" PARENT_SCOPE) elseif("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "x86") set("${result_var_name}" "i686" PARENT_SCOPE) + elseif("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "i686") + set("${result_var_name}" "i686" PARENT_SCOPE) else() message(FATAL_ERROR "Unrecognized architecture on host system: ${CMAKE_SYSTEM_PROCESSOR}") endif() diff --git a/config/config.h b/config/config.h index 2fcd922b5..b50565aa9 100644 --- a/config/config.h +++ b/config/config.h @@ -121,15 +121,27 @@ /* Define if you have the Objective-C runtime */ #define HAVE_OBJC 1 +/* Define to 1 if you have the `posix_fadvise' function. */ +#define HAVE_POSIX_FADVISE 0 + +/* Define to 1 if you have the `posix_spawnp' function. */ +#define HAVE_POSIX_SPAWNP 1 + /* Define to 1 if you have the `pthread_key_init_np' function. */ #define HAVE_PTHREAD_KEY_INIT_NP 1 +/* Define to 1 if you have the `pthread_attr_setcpupercent_np' function. */ +#define HAVE_PTHREAD_ATTR_SETCPUPERCENT_NP 1 + /* Define to 1 if you have the header file. */ #define HAVE_PTHREAD_MACHDEP_H 1 /* Define to 1 if you have the `pthread_main_np' function. */ #define HAVE_PTHREAD_MAIN_NP 1 +/* Define to 1 if you have the `pthread_yield_np' function. */ +#define HAVE_PTHREAD_YIELD_NP 1 + /* Define to 1 if you have the header file. */ /* #undef HAVE_PTHREAD_NP_H */ diff --git a/dispatch/block.h b/dispatch/block.h index 4d6f5b548..6aa3c8f2d 100644 --- a/dispatch/block.h +++ b/dispatch/block.h @@ -323,7 +323,7 @@ dispatch_block_perform(dispatch_block_flags_t flags, */ API_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW -long +intptr_t dispatch_block_wait(dispatch_block_t block, dispatch_time_t timeout); /*! @@ -416,7 +416,7 @@ dispatch_block_cancel(dispatch_block_t block); API_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW -long +intptr_t dispatch_block_testcancel(dispatch_block_t block); __END_DECLS diff --git a/dispatch/dispatch.h b/dispatch/dispatch.h index cbc39ede6..9b517f36c 100644 --- a/dispatch/dispatch.h +++ b/dispatch/dispatch.h @@ -37,10 +37,14 @@ #include #include #include +#include #if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) #include #endif #include +#if defined(_WIN32) +#include +#endif #if (defined(__linux__) || defined(__FreeBSD__)) && defined(__has_feature) #if __has_feature(modules) @@ -58,6 +62,7 @@ #endif #include +#include #include #include #include diff --git a/dispatch/generic/module.modulemap b/dispatch/generic/module.modulemap index 8c3e7d016..f7fdaae76 100644 --- a/dispatch/generic/module.modulemap +++ b/dispatch/generic/module.modulemap @@ -2,6 +2,7 @@ module Dispatch { requires blocks export * link "dispatch" + link "BlocksRuntime" } module DispatchIntrospection [system] [extern_c] { diff --git a/dispatch/group.h b/dispatch/group.h index 8d74ada2e..6b30b26c6 100644 --- a/dispatch/group.h +++ b/dispatch/group.h @@ -160,7 +160,7 @@ dispatch_group_async_f(dispatch_group_t group, */ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW -long +intptr_t dispatch_group_wait(dispatch_group_t group, dispatch_time_t timeout); /*! diff --git a/dispatch/object.h b/dispatch/object.h index 024a3c2a8..8211fbd49 100644 --- a/dispatch/object.h +++ b/dispatch/object.h @@ -458,7 +458,7 @@ dispatch_set_qos_class_floor(dispatch_object_t object, */ DISPATCH_UNAVAILABLE DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW -long +intptr_t dispatch_wait(void *object, dispatch_time_t timeout); #if __has_extension(c_generic_selections) #define dispatch_wait(object, timeout) \ @@ -556,7 +556,7 @@ dispatch_cancel(void *object); DISPATCH_UNAVAILABLE DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW -long +intptr_t dispatch_testcancel(void *object); #if __has_extension(c_generic_selections) #define dispatch_testcancel(object) \ diff --git a/dispatch/queue.h b/dispatch/queue.h index ddace0659..dc5aae79a 100644 --- a/dispatch/queue.h +++ b/dispatch/queue.h @@ -681,7 +681,7 @@ typedef long dispatch_queue_priority_t; API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_CONST DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_global_t -dispatch_get_global_queue(long identifier, unsigned long flags); +dispatch_get_global_queue(intptr_t identifier, uintptr_t flags); /*! * @typedef dispatch_queue_attr_t diff --git a/dispatch/semaphore.h b/dispatch/semaphore.h index f5394b45d..a6f9394f9 100644 --- a/dispatch/semaphore.h +++ b/dispatch/semaphore.h @@ -61,7 +61,7 @@ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_semaphore_t -dispatch_semaphore_create(long value); +dispatch_semaphore_create(intptr_t value); /*! * @function dispatch_semaphore_wait @@ -85,7 +85,7 @@ dispatch_semaphore_create(long value); */ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW -long +intptr_t dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout); /*! @@ -107,7 +107,7 @@ dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout); */ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW -long +intptr_t dispatch_semaphore_signal(dispatch_semaphore_t dsema); __END_DECLS diff --git a/dispatch/source.h b/dispatch/source.h index 40453fa3e..5ce826022 100644 --- a/dispatch/source.h +++ b/dispatch/source.h @@ -389,7 +389,7 @@ DISPATCH_NOTHROW dispatch_source_t dispatch_source_create(dispatch_source_type_t type, uintptr_t handle, - unsigned long mask, + uintptr_t mask, dispatch_queue_t _Nullable queue); /*! @@ -537,7 +537,7 @@ dispatch_source_cancel(dispatch_source_t source); API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW -long +intptr_t dispatch_source_testcancel(dispatch_source_t source); /*! @@ -601,7 +601,7 @@ dispatch_source_get_handle(dispatch_source_t source); API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW -unsigned long +uintptr_t dispatch_source_get_mask(dispatch_source_t source); /*! @@ -640,7 +640,7 @@ dispatch_source_get_mask(dispatch_source_t source); API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW -unsigned long +uintptr_t dispatch_source_get_data(dispatch_source_t source); /*! @@ -662,7 +662,7 @@ dispatch_source_get_data(dispatch_source_t source); API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void -dispatch_source_merge_data(dispatch_source_t source, unsigned long value); +dispatch_source_merge_data(dispatch_source_t source, uintptr_t value); /*! * @function dispatch_source_set_timer diff --git a/dispatch/workloop.h b/dispatch/workloop.h index 2c6cf18c5..98c4f8a41 100644 --- a/dispatch/workloop.h +++ b/dispatch/workloop.h @@ -133,6 +133,33 @@ void dispatch_workloop_set_autorelease_frequency(dispatch_workloop_t workloop, dispatch_autorelease_frequency_t frequency); +/*! + * @function dispatch_workloop_set_os_workgroup + * + * @abstract + * Associates an os_workgroup_t with the specified dispatch workloop. + * + * The worker thread will be a member of the specified os_workgroup_t while executing + * work items submitted to the workloop. + * + * @param workloop + * The dispatch workloop to modify. + * + * This workloop must be inactive, passing an activated object is undefined + * and will cause the process to be terminated. + * + * @param workgroup + * The workgroup to associate with this workloop. + * + * The workgroup specified is retained and the previously associated workgroup + * (if any) is released. + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +void +dispatch_workloop_set_os_workgroup(dispatch_workloop_t workloop, + os_workgroup_t workgroup); + __END_DECLS DISPATCH_ASSUME_NONNULL_END diff --git a/libdispatch.xcodeproj/project.pbxproj b/libdispatch.xcodeproj/project.pbxproj index bd5042c4a..2dbad2992 100644 --- a/libdispatch.xcodeproj/project.pbxproj +++ b/libdispatch.xcodeproj/project.pbxproj @@ -122,14 +122,12 @@ 6E4BACC31D48A42100B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; 6E4BACC51D48A42200B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; 6E4BACC71D48A42300B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; - 6E4BACC81D48A42400B562AE /* mach.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E4BACBC1D48A41500B562AE /* mach.c */; }; 6E4BACCA1D48A89500B562AE /* mach_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E4BACC91D48A89500B562AE /* mach_internal.h */; }; 6E4BACF51D49A04600B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; 6E4BACF61D49A04700B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; 6E4BACF71D49A04700B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; 6E4BACF91D49A04800B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; 6E4BACFB1D49A04A00B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; - 6E4BACFC1D49A04A00B562AE /* event_epoll.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EA7937D1D456D1300929B1B /* event_epoll.c */; }; 6E5662E11F8C2E3E00BC2474 /* workqueue_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E5662DC1F8C2E3E00BC2474 /* workqueue_internal.h */; }; 6E5662E21F8C2E4F00BC2474 /* workqueue_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E5662DC1F8C2E3E00BC2474 /* workqueue_internal.h */; }; 6E5662E31F8C2E5100BC2474 /* workqueue_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E5662DC1F8C2E3E00BC2474 /* workqueue_internal.h */; }; @@ -155,7 +153,6 @@ 6E9C6CAA20F9848D00EA81C0 /* yield.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9C6CA220F9848000EA81C0 /* yield.c */; }; 6E9C6CAB20F9848E00EA81C0 /* yield.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9C6CA220F9848000EA81C0 /* yield.c */; }; 6E9C6CAC20F9848E00EA81C0 /* yield.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9C6CA220F9848000EA81C0 /* yield.c */; }; - 6E9C6CAD20F9848F00EA81C0 /* yield.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9C6CA220F9848000EA81C0 /* yield.c */; }; 6EA283D71CAB93920041B2E0 /* libdispatch.codes in Copy Trace Definitions */ = {isa = PBXBuildFile; fileRef = 6EA283D01CAB93270041B2E0 /* libdispatch.codes */; }; 6EA793891D458A5800929B1B /* event_config.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EA793881D458A5800929B1B /* event_config.h */; }; 6EA7938E1D458A5C00929B1B /* event_config.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EA793881D458A5800929B1B /* event_config.h */; }; @@ -165,13 +162,11 @@ 6EA962991D48622800759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; 6EA9629B1D48622900759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; 6EA9629D1D48622B00759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; - 6EA9629E1D48622C00759D53 /* event.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCBD1D3C6719007DA2B4 /* event.c */; }; 6EA9629F1D48625000759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; 6EA962A01D48625100759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; 6EA962A11D48625100759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; 6EA962A31D48625300759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; 6EA962A51D48625400759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; - 6EA962A61D48625500759D53 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; 6EB60D2C1BBB197B0092FA94 /* firehose_inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EB60D291BBB19640092FA94 /* firehose_inline_internal.h */; }; 6EBEC7E51BBDD30C009B1596 /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; }; 6EBEC7E81BBDD324009B1596 /* firehose_reply.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72406A031AF95DF800DF4E2B /* firehose_reply.defs */; settings = {ATTRIBUTES = (Server, ); }; }; @@ -206,7 +201,6 @@ 6EF2CAAE1C8899EA001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; 6EF2CAB01C8899EB001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; 6EF2CAB21C8899EC001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; - 6EF2CAB31C8899ED001ABE83 /* lock.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EF2CAAB1C8899D5001ABE83 /* lock.c */; }; 6EF2CAB41C889D65001ABE83 /* lock.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF2CAA41C88998A001ABE83 /* lock.h */; }; 6EF2CAB51C889D67001ABE83 /* lock.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF2CAA41C88998A001ABE83 /* lock.h */; }; 6EFBDA4B1D61A0D600282887 /* priority.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EFBDA4A1D61A0D600282887 /* priority.h */; }; @@ -228,30 +222,55 @@ 96BC39BD0F3EBAB100C59689 /* queue_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 96BC39BC0F3EBAB100C59689 /* queue_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; 96C9553B0F3EAEDD000D2CA4 /* once.h in Headers */ = {isa = PBXBuildFile; fileRef = 96C9553A0F3EAEDD000D2CA4 /* once.h */; settings = {ATTRIBUTES = (Public, ); }; }; 96DF70BE0F38FE3C0074BD99 /* once.c in Sources */ = {isa = PBXBuildFile; fileRef = 96DF70BD0F38FE3C0074BD99 /* once.c */; }; + 9B2A588123A412B400A7BB27 /* eventlink.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B2A588023A412B400A7BB27 /* eventlink.c */; }; + 9B3713F623D24594001C5C88 /* clock.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B3713F123D24594001C5C88 /* clock.h */; }; + 9B81557B234AFC9800DB5CA3 /* workgroup.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B815576234AFC9800DB5CA3 /* workgroup.c */; }; + 9B8ED5792350C79100507521 /* workgroup_object.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B8ED5782350C79100507521 /* workgroup_object.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 9B8ED5A6235183D100507521 /* workgroup.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B815576234AFC9800DB5CA3 /* workgroup.c */; }; + 9B9DB6F9234ECE92003F962B /* workgroup_interval.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B9DB6F4234ECE92003F962B /* workgroup_interval.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 9BA656E4236BB55000D13FAE /* workgroup_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA656DF236BB55000D13FAE /* workgroup_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 9BA656E6236BB56700D13FAE /* workgroup_interval_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA656E5236BB56700D13FAE /* workgroup_interval_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 9BA7221523E293CB0058472E /* workgroup_parallel.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA7221023E293CB0058472E /* workgroup_parallel.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 9BA7221623E293FD0058472E /* workgroup_parallel.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA7221023E293CB0058472E /* workgroup_parallel.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 9BA7221723E294140058472E /* workgroup_parallel.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA7221023E293CB0058472E /* workgroup_parallel.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 9BCAF76F23A8540A00E4F685 /* eventlink_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BCAF76A23A8540A00E4F685 /* eventlink_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 9BCAF77123A8550100E4F685 /* eventlink.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B2A588023A412B400A7BB27 /* eventlink.c */; }; + 9BCAF77223A8550B00E4F685 /* eventlink.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B2A588023A412B400A7BB27 /* eventlink.c */; }; + 9BCAF77323A8551300E4F685 /* eventlink.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B2A588023A412B400A7BB27 /* eventlink.c */; }; + 9BCAF77423A8551E00E4F685 /* eventlink.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B2A588023A412B400A7BB27 /* eventlink.c */; }; + 9BCAF77523A8552600E4F685 /* eventlink.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B2A588023A412B400A7BB27 /* eventlink.c */; }; + 9BCAF79423AAEDED00E4F685 /* eventlink_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BCAF78F23AAEDEC00E4F685 /* eventlink_internal.h */; }; + 9BCAF79623AAEDF700E4F685 /* workgroup_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BCAF79523AAEDF700E4F685 /* workgroup_internal.h */; }; + 9BE3E56F23CE62BB006FE059 /* workgroup.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B815576234AFC9800DB5CA3 /* workgroup.c */; }; + 9BE3E57423CE62C2006FE059 /* workgroup.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B815576234AFC9800DB5CA3 /* workgroup.c */; }; + 9BE3E57523CE62C9006FE059 /* workgroup.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B815576234AFC9800DB5CA3 /* workgroup.c */; }; + 9BE3E57623CE62D8006FE059 /* workgroup.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B815576234AFC9800DB5CA3 /* workgroup.c */; }; + 9BE3E57723CE62E9006FE059 /* workgroup_interval.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B9DB6F4234ECE92003F962B /* workgroup_interval.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 9BE3E57823CE62E9006FE059 /* workgroup_object.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B8ED5782350C79100507521 /* workgroup_object.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 9BE3E57923CE62E9006FE059 /* workgroup_base.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B38A012234C6D0400E6B90F /* workgroup_base.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 9BE3E57A23CE62E9006FE059 /* workgroup.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B81556E234AF0D200DB5CA3 /* workgroup.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 9BE3E57B23CE6325006FE059 /* workgroup.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B81556E234AF0D200DB5CA3 /* workgroup.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 9BE3E58323CE637F006FE059 /* workgroup_interval_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA656E5236BB56700D13FAE /* workgroup_interval_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 9BE3E58423CE637F006FE059 /* workgroup_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA656DF236BB55000D13FAE /* workgroup_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 9BE3E58523CE638D006FE059 /* (null) in Headers */ = {isa = PBXBuildFile; }; + 9BE3E58623CE63A3006FE059 /* (null) in Headers */ = {isa = PBXBuildFile; }; + 9BE52545238747D30041C2A0 /* workgroup.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B81556E234AF0D200DB5CA3 /* workgroup.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 9BE5254A238747ED0041C2A0 /* workgroup_interval.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B9DB6F4234ECE92003F962B /* workgroup_interval.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 9BE5254B238747ED0041C2A0 /* workgroup_object.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B8ED5782350C79100507521 /* workgroup_object.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 9BE5254C238747ED0041C2A0 /* workgroup_base.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B38A012234C6D0400E6B90F /* workgroup_base.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 9BE5254D238747F90041C2A0 /* workgroup_interval_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA656E5236BB56700D13FAE /* workgroup_interval_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 9BE5254E238747F90041C2A0 /* workgroup_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA656DF236BB55000D13FAE /* workgroup_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 9BE525502387480F0041C2A0 /* workgroup.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B815576234AFC9800DB5CA3 /* workgroup.c */; }; + 9BFD342C23C94F2500B08420 /* eventlink_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BCAF76A23A8540A00E4F685 /* eventlink_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 9BFD342D23C94F3500B08420 /* eventlink_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BCAF78F23AAEDEC00E4F685 /* eventlink_internal.h */; }; + 9BFD342E23C94F4E00B08420 /* eventlink_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BCAF78F23AAEDEC00E4F685 /* eventlink_internal.h */; }; + 9BFD342F23C94F6D00B08420 /* eventlink_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BCAF76A23A8540A00E4F685 /* eventlink_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 9BFD343023C94F8C00B08420 /* eventlink.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B2A588023A412B400A7BB27 /* eventlink.c */; }; + 9BFD343C23CD032800B08420 /* eventlink_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BCAF78F23AAEDEC00E4F685 /* eventlink_internal.h */; }; B609581E221DFA2A00F39D1F /* workloop.h in Headers */ = {isa = PBXBuildFile; fileRef = B6095819221DFA2A00F39D1F /* workloop.h */; settings = {ATTRIBUTES = (Public, ); }; }; B609581F221DFA4B00F39D1F /* workloop.h in Headers */ = {isa = PBXBuildFile; fileRef = B6095819221DFA2A00F39D1F /* workloop.h */; settings = {ATTRIBUTES = (Public, ); }; }; B683588F1FA77F5A00AA0D58 /* time_private.h in Headers */ = {isa = PBXBuildFile; fileRef = B683588A1FA77F4900AA0D58 /* time_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; B68358901FA77F5B00AA0D58 /* time_private.h in Headers */ = {isa = PBXBuildFile; fileRef = B683588A1FA77F4900AA0D58 /* time_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; - C00B0DF21C5AEBBE000330B3 /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; }; - C00B0DF31C5AEBBE000330B3 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; - C00B0DF41C5AEBBE000330B3 /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; }; - C00B0DF51C5AEBBE000330B3 /* queue.c in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED8A0E8361E600161930 /* queue.c */; }; - C00B0DF61C5AEBBE000330B3 /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; - C00B0DF71C5AEBBE000330B3 /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; }; - C00B0DF81C5AEBBE000330B3 /* block.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E43A724F1AF85BBC00BAA921 /* block.cpp */; }; - C00B0DF91C5AEBBE000330B3 /* semaphore.c in Sources */ = {isa = PBXBuildFile; fileRef = 721F5CCE0F15553500FF03A6 /* semaphore.c */; }; - C00B0DFA1C5AEBBE000330B3 /* firehose_reply.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72406A031AF95DF800DF4E2B /* firehose_reply.defs */; settings = {ATTRIBUTES = (Server, ); }; }; - C00B0DFB1C5AEBBE000330B3 /* once.c in Sources */ = {isa = PBXBuildFile; fileRef = 96DF70BD0F38FE3C0074BD99 /* once.c */; }; - C00B0DFC1C5AEBBE000330B3 /* voucher.c in Sources */ = {isa = PBXBuildFile; fileRef = E44A8E6A1805C3E0009FFDB6 /* voucher.c */; }; - C00B0DFD1C5AEBBE000330B3 /* apply.c in Sources */ = {isa = PBXBuildFile; fileRef = 9676A0E00F3E755D00713ADB /* apply.c */; }; - C00B0DFE1C5AEBBE000330B3 /* object.c in Sources */ = {isa = PBXBuildFile; fileRef = 9661E56A0F3E7DDF00749F3E /* object.c */; }; - C00B0DFF1C5AEBBE000330B3 /* benchmark.c in Sources */ = {isa = PBXBuildFile; fileRef = 965CD6340F3E806200D4E28D /* benchmark.c */; }; - C00B0E001C5AEBBE000330B3 /* source.c in Sources */ = {isa = PBXBuildFile; fileRef = 96A8AA860F41E7A400CD570B /* source.c */; }; - C00B0E011C5AEBBE000330B3 /* time.c in Sources */ = {isa = PBXBuildFile; fileRef = 96032E4A0F5CC8C700241C5F /* time.c */; }; - C00B0E021C5AEBBE000330B3 /* data.c in Sources */ = {isa = PBXBuildFile; fileRef = 5AAB45BF10D30B79004407EA /* data.c */; }; - C00B0E031C5AEBBE000330B3 /* io.c in Sources */ = {isa = PBXBuildFile; fileRef = 5A27262510F26F1900751FBC /* io.c */; }; - C00B0E041C5AEBBE000330B3 /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; }; - C00B0E051C5AEBBE000330B3 /* allocator.c in Sources */ = {isa = PBXBuildFile; fileRef = 2BBF5A62154B64F5002B20F9 /* allocator.c */; }; C01866A61C5973210040FC07 /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; }; C01866A71C5973210040FC07 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; C01866A81C5973210040FC07 /* init.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE3B1251659900645D88 /* init.c */; }; @@ -464,7 +483,6 @@ E49BB6EC1E70748100868613 /* data.m in Sources */ = {isa = PBXBuildFile; fileRef = E420866F16027AE500EEE210 /* data.m */; }; E49BB6ED1E70748100868613 /* voucher.c in Sources */ = {isa = PBXBuildFile; fileRef = E44A8E6A1805C3E0009FFDB6 /* voucher.c */; }; E49BB7091E70A39700868613 /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; }; - E49BB70A1E70A3B000868613 /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; }; E49F2423125D3C960057C971 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; E49F2499125D48D80057C971 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; E49F24AB125D57FA0057C971 /* dispatch.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED960E8361E600161930 /* dispatch.h */; settings = {ATTRIBUTES = (Public, ); }; }; @@ -661,13 +679,6 @@ remoteGlobalIDString = FCFA5A9F10D1AE050074F59A; remoteInfo = ddt; }; - C00B0E131C5AEED6000330B3 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; - proxyType = 1; - remoteGlobalIDString = C00B0DF01C5AEBBE000330B3; - remoteInfo = "libdispatch dyld stub"; - }; C01866C11C597AEA0040FC07 /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; @@ -868,7 +879,21 @@ 96BC39BC0F3EBAB100C59689 /* queue_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = queue_private.h; sourceTree = ""; }; 96C9553A0F3EAEDD000D2CA4 /* once.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = once.h; sourceTree = ""; }; 96DF70BD0F38FE3C0074BD99 /* once.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = once.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; + 9B2A588023A412B400A7BB27 /* eventlink.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = eventlink.c; sourceTree = ""; }; + 9B3713F123D24594001C5C88 /* clock.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = clock.h; sourceTree = ""; }; + 9B38A012234C6D0400E6B90F /* workgroup_base.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = workgroup_base.h; path = os/workgroup_base.h; sourceTree = SOURCE_ROOT; }; 9B6A42E01FE098430000D146 /* queue-tip.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = "queue-tip.xcodeproj"; path = "tools/queue-tip/queue-tip.xcodeproj"; sourceTree = ""; }; + 9B7D0906247EF7E600C1B0B7 /* workgroup_object_private.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = workgroup_object_private.h; sourceTree = ""; }; + 9B81556E234AF0D200DB5CA3 /* workgroup.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = workgroup.h; path = os/workgroup.h; sourceTree = SOURCE_ROOT; }; + 9B815576234AFC9800DB5CA3 /* workgroup.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = workgroup.c; path = src/workgroup.c; sourceTree = SOURCE_ROOT; }; + 9B8ED5782350C79100507521 /* workgroup_object.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = workgroup_object.h; path = os/workgroup_object.h; sourceTree = SOURCE_ROOT; }; + 9B9DB6F4234ECE92003F962B /* workgroup_interval.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = workgroup_interval.h; path = os/workgroup_interval.h; sourceTree = SOURCE_ROOT; }; + 9BA656DF236BB55000D13FAE /* workgroup_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = workgroup_private.h; path = os/workgroup_private.h; sourceTree = SOURCE_ROOT; }; + 9BA656E5236BB56700D13FAE /* workgroup_interval_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = workgroup_interval_private.h; path = os/workgroup_interval_private.h; sourceTree = SOURCE_ROOT; }; + 9BA7221023E293CB0058472E /* workgroup_parallel.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = workgroup_parallel.h; sourceTree = ""; }; + 9BCAF76A23A8540A00E4F685 /* eventlink_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = eventlink_private.h; sourceTree = ""; }; + 9BCAF78F23AAEDEC00E4F685 /* eventlink_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = eventlink_internal.h; path = src/eventlink_internal.h; sourceTree = SOURCE_ROOT; }; + 9BCAF79523AAEDF700E4F685 /* workgroup_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = workgroup_internal.h; path = src/workgroup_internal.h; sourceTree = SOURCE_ROOT; }; B6095819221DFA2A00F39D1F /* workloop.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = workloop.h; sourceTree = ""; }; B63B793F1E8F004F0060C1E1 /* dispatch_no_blocks.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_no_blocks.c; sourceTree = ""; }; B68330BC1EBCF6080003E71C /* dispatch_wl.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_wl.c; sourceTree = ""; }; @@ -880,8 +905,6 @@ B6AE9A561D7F53C100AC007F /* perf_async_bench.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = perf_async_bench.m; sourceTree = ""; }; B6AE9A581D7F53CB00AC007F /* perf_bench.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = perf_bench.m; sourceTree = ""; }; B6FA01801F0AD522004479BF /* dispatch_pthread_root_queue.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_pthread_root_queue.c; sourceTree = ""; }; - C00B0E0A1C5AEBBE000330B3 /* libdispatch_dyld_stub.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch_dyld_stub.a; sourceTree = BUILT_PRODUCTS_DIR; }; - C00B0E121C5AEBF7000330B3 /* libdispatch-dyld-stub.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = "libdispatch-dyld-stub.xcconfig"; sourceTree = ""; }; C01866BD1C5973210040FC07 /* libdispatch.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch.a; sourceTree = BUILT_PRODUCTS_DIR; }; C01866BE1C59735B0040FC07 /* libdispatch-mp-static.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = "libdispatch-mp-static.xcconfig"; sourceTree = ""; }; C01866BF1C5976C90040FC07 /* run-on-install.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "run-on-install.sh"; sourceTree = ""; }; @@ -989,6 +1012,7 @@ E44DB71E11D2FF080074F2AD /* Build Support */, 6E9B6AE21BB39793009E324D /* OS Public Headers */, E4EB4A2914C35F1800AA0FA9 /* OS Private Headers */, + 9BCAF77023A8544100E4F685 /* OS Project Headers */, FC7BEDAA0E83625200161930 /* Dispatch Public Headers */, FC7BEDAF0E83626100161930 /* Dispatch Private Headers */, FC7BEDB60E8363DC00161930 /* Dispatch Project Headers */, @@ -1009,6 +1033,8 @@ 08FB7795FE84155DC02AAC07 /* Dispatch Source */ = { isa = PBXGroup; children = ( + 9B815576234AFC9800DB5CA3 /* workgroup.c */, + 9B2A588023A412B400A7BB27 /* eventlink.c */, 2BBF5A62154B64F5002B20F9 /* allocator.c */, 9676A0E00F3E755D00713ADB /* apply.c */, 965CD6340F3E806200D4E28D /* benchmark.c */, @@ -1051,7 +1077,6 @@ E4EC122D12514715000DDBD1 /* libdispatch_mp.a */, E49BB6F21E70748100868613 /* libdispatch_armv81.a */, C01866BD1C5973210040FC07 /* libdispatch.a */, - C00B0E0A1C5AEBBE000330B3 /* libdispatch_dyld_stub.a */, 6E040C631C499B1B00411A2E /* libfirehose_kernel.a */, 6EB4E4091BA8BCAD00D7B9D2 /* libfirehose_server.a */, E43B889A2241F19000215272 /* libdispatch.dylib */, @@ -1094,7 +1119,13 @@ 6E9B6AE21BB39793009E324D /* OS Public Headers */ = { isa = PBXGroup; children = ( + 9B38A012234C6D0400E6B90F /* workgroup_base.h */, + 9BA7221023E293CB0058472E /* workgroup_parallel.h */, + 9B81556E234AF0D200DB5CA3 /* workgroup.h */, + 9B9DB6F4234ECE92003F962B /* workgroup_interval.h */, + 9B8ED5782350C79100507521 /* workgroup_object.h */, E4EB4A2614C35ECE00AA0FA9 /* object.h */, + 9B3713F123D24594001C5C88 /* clock.h */, ); name = "OS Public Headers"; path = os; @@ -1203,6 +1234,16 @@ name = Products; sourceTree = ""; }; + 9BCAF77023A8544100E4F685 /* OS Project Headers */ = { + isa = PBXGroup; + children = ( + 9BCAF79523AAEDF700E4F685 /* workgroup_internal.h */, + 9BCAF78F23AAEDEC00E4F685 /* eventlink_internal.h */, + ); + name = "OS Project Headers"; + path = os; + sourceTree = ""; + }; C6A0FF2B0290797F04C91782 /* Documentation */ = { isa = PBXGroup; children = ( @@ -1243,7 +1284,6 @@ E40041AA125D705F0022B135 /* libdispatch-resolver.xcconfig */, E40041A9125D70590022B135 /* libdispatch-resolved.xcconfig */, C01866BE1C59735B0040FC07 /* libdispatch-mp-static.xcconfig */, - C00B0E121C5AEBF7000330B3 /* libdispatch-dyld-stub.xcconfig */, E4B515D9164B2E9B00E003AF /* libdispatch-introspection.xcconfig */, 6EB4E4421BA8BD7800D7B9D2 /* libfirehose.xcconfig */, 6E040C721C499C3600411A2E /* libfirehose_kernel.xcconfig */, @@ -1304,11 +1344,15 @@ isa = PBXGroup; children = ( E454569214746F1B00106147 /* object_private.h */, + 9BCAF76A23A8540A00E4F685 /* eventlink_private.h */, 6EDF10831BBB487E007F14BF /* firehose_buffer_private.h */, 72EA3FBA1AF41EA400BBA227 /* firehose_server_private.h */, 6E9955571C3AF7710071D40C /* venture_private.h */, + 9BA656E5236BB56700D13FAE /* workgroup_interval_private.h */, + 9BA656DF236BB55000D13FAE /* workgroup_private.h */, E44A8E711805C473009FFDB6 /* voucher_private.h */, E4B3C3FD18C50D000039F49F /* voucher_activity_private.h */, + 9B7D0906247EF7E600C1B0B7 /* workgroup_object_private.h */, ); name = "OS Private Headers"; path = os; @@ -1430,18 +1474,23 @@ isa = PBXHeadersBuildPhase; buildActionMask = 2147483647; files = ( + 9BA7221523E293CB0058472E /* workgroup_parallel.h in Headers */, + 9B8ED5792350C79100507521 /* workgroup_object.h in Headers */, FC7BEDA50E8361E600161930 /* dispatch.h in Headers */, 72CC94300ECCD8750031B751 /* base.h in Headers */, 961B99500F3E85C30006BC96 /* object.h in Headers */, + 9B9DB6F9234ECE92003F962B /* workgroup_interval.h in Headers */, E44757DA17F4572600B82CA1 /* inline_internal.h in Headers */, 6EC8DC271E3E84610044B652 /* channel_private.h in Headers */, FC7BED9A0E8361E600161930 /* queue.h in Headers */, + 9BE3E57B23CE6325006FE059 /* workgroup.h in Headers */, FC7BED9C0E8361E600161930 /* source.h in Headers */, 6E9955581C3AF7710071D40C /* venture_private.h in Headers */, E4B3C3FE18C50D000039F49F /* voucher_activity_private.h in Headers */, 721F5C5D0F15520500FF03A6 /* semaphore.h in Headers */, FC5C9C1E0EADABE3006E462D /* group.h in Headers */, 6EFBDA4B1D61A0D600282887 /* priority.h in Headers */, + 9BCAF76F23A8540A00E4F685 /* eventlink_private.h in Headers */, 96C9553B0F3EAEDD000D2CA4 /* once.h in Headers */, 5AAB45C410D30CC7004407EA /* io.h in Headers */, E44A8E7518066276009FFDB6 /* voucher_internal.h in Headers */, @@ -1452,6 +1501,8 @@ 96032E4D0F5CC8D100241C5F /* time.h in Headers */, FC7BEDA20E8361E600161930 /* private.h in Headers */, E4D76A9318E325D200B1F98B /* block.h in Headers */, + 9BA656E4236BB55000D13FAE /* workgroup_private.h in Headers */, + 9BA656E6236BB56700D13FAE /* workgroup_interval_private.h in Headers */, C913AC0F143BD34800B78976 /* data_private.h in Headers */, 96BC39BD0F3EBAB100C59689 /* queue_private.h in Headers */, C90144661C73A9F6002638FC /* module.modulemap in Headers */, @@ -1461,9 +1512,11 @@ FC7BED9E0E8361E600161930 /* internal.h in Headers */, B609581E221DFA2A00F39D1F /* workloop.h in Headers */, 6E7018211F4EB51B0077C1DC /* workloop_private.h in Headers */, + 9B3713F623D24594001C5C88 /* clock.h in Headers */, 965ECC210F3EAB71004DDD89 /* object_internal.h in Headers */, 96929D960F3EA2170041FF5D /* queue_internal.h in Headers */, FC0B34790FA2851C0080FFA0 /* source_internal.h in Headers */, + 9BCAF79623AAEDF700E4F685 /* workgroup_internal.h in Headers */, 5A5D13AC0F6B280500197CC3 /* semaphore_internal.h in Headers */, E4C1ED6F1263E714000D3C8B /* data_internal.h in Headers */, E44A8E721805C473009FFDB6 /* voucher_private.h in Headers */, @@ -1486,6 +1539,7 @@ 6E5ACCBA1D3C4D0B007DA2B4 /* event_internal.h in Headers */, 6ED64B571BBD8A3B00C35F4D /* firehose_inline_internal.h in Headers */, E4128ED613BA9A1700ABB2CB /* hw_config.h in Headers */, + 9BCAF79423AAEDED00E4F685 /* eventlink_internal.h in Headers */, E454569314746F1B00106147 /* object_private.h in Headers */, B683588F1FA77F5A00AA0D58 /* time_private.h in Headers */, 6E5662E11F8C2E3E00BC2474 /* workqueue_internal.h in Headers */, @@ -1501,6 +1555,7 @@ isa = PBXHeadersBuildPhase; buildActionMask = 2147483647; files = ( + 9BA7221723E294140058472E /* workgroup_parallel.h in Headers */, E43B88322241F19000215272 /* dispatch.h in Headers */, E43B88332241F19000215272 /* base.h in Headers */, E43B88342241F19000215272 /* object.h in Headers */, @@ -1511,10 +1566,15 @@ E43B88392241F19000215272 /* venture_private.h in Headers */, E43B883A2241F19000215272 /* voucher_activity_private.h in Headers */, E43B883B2241F19000215272 /* semaphore.h in Headers */, + 9BE5254A238747ED0041C2A0 /* workgroup_interval.h in Headers */, + 9BE5254B238747ED0041C2A0 /* workgroup_object.h in Headers */, + 9BE5254C238747ED0041C2A0 /* workgroup_base.h in Headers */, + 9BE52545238747D30041C2A0 /* workgroup.h in Headers */, E43B883C2241F19000215272 /* group.h in Headers */, E43B883D2241F19000215272 /* priority.h in Headers */, E43B883E2241F19000215272 /* once.h in Headers */, E43B883F2241F19000215272 /* io.h in Headers */, + 9BFD342F23C94F6D00B08420 /* eventlink_private.h in Headers */, E43B88402241F19000215272 /* voucher_internal.h in Headers */, E43B88412241F19000215272 /* module.modulemap in Headers */, E43B88422241F19000215272 /* atomic_sfb.h in Headers */, @@ -1526,7 +1586,10 @@ E43B88482241F19000215272 /* data_private.h in Headers */, E43B88492241F19000215272 /* queue_private.h in Headers */, E43B884A2241F19000215272 /* module.modulemap in Headers */, + 9BE5254D238747F90041C2A0 /* workgroup_interval_private.h in Headers */, + 9BE5254E238747F90041C2A0 /* workgroup_private.h in Headers */, E43B884B2241F19000215272 /* source_private.h in Headers */, + 9BFD343C23CD032800B08420 /* eventlink_internal.h in Headers */, E43B884C2241F19000215272 /* target.h in Headers */, E43B884D2241F19000215272 /* benchmark.h in Headers */, E43B884E2241F19000215272 /* internal.h in Headers */, @@ -1572,11 +1635,16 @@ isa = PBXHeadersBuildPhase; buildActionMask = 2147483647; files = ( + 9BA7221623E293FD0058472E /* workgroup_parallel.h in Headers */, 6EC8DC281E3E847A0044B652 /* channel_private.h in Headers */, E49F24AB125D57FA0057C971 /* dispatch.h in Headers */, E49F24AC125D57FA0057C971 /* base.h in Headers */, 6E5ACCBB1D3C4D0E007DA2B4 /* event_internal.h in Headers */, 6E7018221F4EB5220077C1DC /* workloop_private.h in Headers */, + 9BE3E57723CE62E9006FE059 /* workgroup_interval.h in Headers */, + 9BE3E57823CE62E9006FE059 /* workgroup_object.h in Headers */, + 9BE3E57923CE62E9006FE059 /* workgroup_base.h in Headers */, + 9BE3E57A23CE62E9006FE059 /* workgroup.h in Headers */, E49F24AD125D57FA0057C971 /* object.h in Headers */, E44757DC17F4573600B82CA1 /* inline_internal.h in Headers */, E49F24AE125D57FA0057C971 /* queue.h in Headers */, @@ -1585,6 +1653,7 @@ E4B3C3FF18C50D0E0039F49F /* voucher_activity_private.h in Headers */, E49F24B0125D57FA0057C971 /* semaphore.h in Headers */, E49F24B1125D57FA0057C971 /* group.h in Headers */, + 9BFD342C23C94F2500B08420 /* eventlink_private.h in Headers */, E49F24B2125D57FA0057C971 /* once.h in Headers */, E49F24B3125D57FA0057C971 /* io.h in Headers */, 6E5662E21F8C2E4F00BC2474 /* workqueue_internal.h in Headers */, @@ -1592,10 +1661,14 @@ E4630252176162D300E11F4C /* atomic_sfb.h in Headers */, E49F24B4125D57FA0057C971 /* data.h in Headers */, E49F24B5125D57FA0057C971 /* time.h in Headers */, + 9BE3E58323CE637F006FE059 /* workgroup_interval_private.h in Headers */, E49F24B6125D57FA0057C971 /* private.h in Headers */, + 9BE3E58423CE637F006FE059 /* workgroup_private.h in Headers */, E4D76A9418E325D200B1F98B /* block.h in Headers */, E49F24B7125D57FA0057C971 /* queue_private.h in Headers */, E49F24B8125D57FA0057C971 /* source_private.h in Headers */, + 9BFD342D23C94F3500B08420 /* eventlink_internal.h in Headers */, + 9BE3E58523CE638D006FE059 /* (null) in Headers */, E49F24B9125D57FA0057C971 /* benchmark.h in Headers */, E49F24BA125D57FA0057C971 /* internal.h in Headers */, E49F24BC125D57FA0057C971 /* object_internal.h in Headers */, @@ -1640,6 +1713,8 @@ files = ( E4B515D8164B2DFB00E003AF /* introspection_private.h in Headers */, E44F9DAF16544026001DCD38 /* internal.h in Headers */, + 9BFD342E23C94F4E00B08420 /* eventlink_internal.h in Headers */, + 9BE3E58623CE63A3006FE059 /* (null) in Headers */, E421E5F91716ADA10090DC9B /* introspection.h in Headers */, 6E5662E31F8C2E5100BC2474 /* workqueue_internal.h in Headers */, E44F9DB216544032001DCD38 /* object_internal.h in Headers */, @@ -1727,28 +1802,11 @@ productReference = 6EB4E4091BA8BCAD00D7B9D2 /* libfirehose_server.a */; productType = "com.apple.product-type.library.static"; }; - C00B0DF01C5AEBBE000330B3 /* libdispatch dyld stub */ = { - isa = PBXNativeTarget; - buildConfigurationList = C00B0E071C5AEBBE000330B3 /* Build configuration list for PBXNativeTarget "libdispatch dyld stub" */; - buildPhases = ( - C00B0DF11C5AEBBE000330B3 /* Sources */, - C00B0E061C5AEBBE000330B3 /* Symlink libdispatch.a -> libdispatch_dyld_target.a */, - ); - buildRules = ( - ); - dependencies = ( - ); - name = "libdispatch dyld stub"; - productName = libdispatch; - productReference = C00B0E0A1C5AEBBE000330B3 /* libdispatch_dyld_stub.a */; - productType = "com.apple.product-type.library.static"; - }; C01866A41C5973210040FC07 /* libdispatch mp static */ = { isa = PBXNativeTarget; buildConfigurationList = C01866BA1C5973210040FC07 /* Build configuration list for PBXNativeTarget "libdispatch mp static" */; buildPhases = ( C01866A51C5973210040FC07 /* Sources */, - C01866C01C59777B0040FC07 /* Symlink to the loaderd path */, ); buildRules = ( ); @@ -1779,7 +1837,6 @@ E47D6ECD125FEBA10070D91C /* PBXTargetDependency */, E49BB6F81E7074C100868613 /* PBXTargetDependency */, C01866C21C597AEA0040FC07 /* PBXTargetDependency */, - C00B0E141C5AEED6000330B3 /* PBXTargetDependency */, ); name = libdispatch; productName = libdispatch; @@ -1793,6 +1850,7 @@ E43B88312241F19000215272 /* Headers */, E43B88712241F19000215272 /* Sources */, E43B88922241F19000215272 /* Install Headers */, + 9BE52551238748C60041C2A0 /* ShellScript */, ); buildRules = ( ); @@ -1882,7 +1940,6 @@ 08FB7793FE84155DC02AAC07 /* Project object */ = { isa = PBXProject; attributes = { - BuildIndependentTargetsInParallel = YES; DefaultBuildSystemTypeForWorkspace = Latest; LastUpgradeCheck = 1100; TargetAttributes = { @@ -1915,9 +1972,6 @@ CreatedOnToolsVersion = 9.3; ProvisioningStyle = Automatic; }; - C00B0DF01C5AEBBE000330B3 = { - ProvisioningStyle = Manual; - }; C01866A41C5973210040FC07 = { ProvisioningStyle = Manual; }; @@ -1973,7 +2027,6 @@ E4B51595164B2DA300E003AF /* libdispatch introspection */, E43B88262241F19000215272 /* libdispatch_driverkit */, C01866A41C5973210040FC07 /* libdispatch mp static */, - C00B0DF01C5AEBBE000330B3 /* libdispatch dyld stub */, 6E43553E215B5D9D00C13177 /* libdispatch_introspection */, 6EA833C22162D6380045EFDC /* libdispatch_introspection_Sim */, 3F3C9326128E637B0042B1F7 /* libdispatch_Sim */, @@ -2069,38 +2122,25 @@ shellScript = ". \"${SRCROOT}/xcodescripts/check-order.sh\"\n"; showEnvVarsInLog = 0; }; - C00B0E061C5AEBBE000330B3 /* Symlink libdispatch.a -> libdispatch_dyld_target.a */ = { + 9BE52551238748C60041C2A0 /* ShellScript */ = { isa = PBXShellScriptBuildPhase; - buildActionMask = 2147483647; + buildActionMask = 8; files = ( ); - inputPaths = ( - "$(SRCROOT)/xcodescripts/run-on-install.sh", - ); - name = "Symlink libdispatch.a -> libdispatch_dyld_target.a"; - outputPaths = ( - "${DSTROOT}${INSTALL_PATH}/libdispatch.a", - ); - runOnlyForDeploymentPostprocessing = 0; - shellPath = "/bin/bash -e"; - shellScript = ". \"${SCRIPT_INPUT_FILE_0}\" /bin/ln -sf ${PRODUCT_NAME}.a ${SCRIPT_OUTPUT_FILE_0}"; - showEnvVarsInLog = 0; - }; - C01866C01C59777B0040FC07 /* Symlink to the loaderd path */ = { - isa = PBXShellScriptBuildPhase; - buildActionMask = 2147483647; - files = ( + inputFileListPaths = ( ); inputPaths = ( - "$(SRCROOT)/xcodescripts/run-on-install.sh", + "$(SRCROOT)/xcodescripts/postprocess-headers.sh", + "$(DSTROOT)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/workgroup_object.h", + "$(DSTROOT)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/workgroup_interval.h", + ); + outputFileListPaths = ( ); - name = "Symlink to the loaderd path"; outputPaths = ( - "${DSTROOT}/usr/local/lib/loaderd/${PRODUCT_NAME}.a", ); - runOnlyForDeploymentPostprocessing = 0; + runOnlyForDeploymentPostprocessing = 1; shellPath = "/bin/bash -e"; - shellScript = ". \"${SCRIPT_INPUT_FILE_0}\" /bin/ln -sf ../../../..${INSTALL_PATH}/${PRODUCT_NAME}.a ${DSTROOT}/usr/local/lib/loaderd/${PRODUCT_NAME}.a"; + shellScript = ". \"${SCRIPT_INPUT_FILE_0}\"\n"; showEnvVarsInLog = 0; }; E4128EB213B9612700ABB2CB /* Postprocess Headers */ = { @@ -2110,13 +2150,16 @@ ); inputPaths = ( "$(SRCROOT)/xcodescripts/postprocess-headers.sh", + "$(DSTROOT)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/workgroup_object.h", + "$(DSTROOT)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/workgroup_interval.h", + "$(DSTROOT)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/eventlink_private.h", ); name = "Postprocess Headers"; outputPaths = ( ); runOnlyForDeploymentPostprocessing = 1; shellPath = "/bin/bash -e"; - shellScript = ". \"${SCRIPT_INPUT_FILE_0}\""; + shellScript = ". \"${SCRIPT_INPUT_FILE_0}\"\n"; showEnvVarsInLog = 0; }; E421E5FC1716B8E10090DC9B /* Install DTrace Header */ = { @@ -2145,18 +2188,23 @@ inputPaths = ( "$(SRCROOT)/xcodescripts/install-headers.sh", "$(SRCROOT)/os/object.h", + "$(SRCROOT)/os/workgroup.h", + "$(SRCROOT)/os/workgroup_base.h", + "$(SRCROOT)/os/workgroup_interval.h", + "$(SRCROOT)/os/workgroup_object.h", + "$(SRCROOT)/os/workgroup_parallel.h", + "$(SRCROOT)/os/clock.h", "$(SRCROOT)/os/object_private.h", "$(SRCROOT)/os/venture_private.h", "$(SRCROOT)/os/voucher_private.h", "$(SRCROOT)/os/voucher_activity_private.h", + "$(SRCROOT)/os/workgroup_private.h", + "$(SRCROOT)/os/workgroup_object_private.h", + "$(SRCROOT)/os/workgroup_interval_private.h", + "$(SRCROOT)/os/eventlink_private.h", ); name = "Install Headers"; outputPaths = ( - "$(CONFIGURATION_BUILD_DIR)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/object.h", - "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/object_private.h", - "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/venture_private.h", - "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/voucher_private.h", - "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/voucher_activity_private.h", ); runOnlyForDeploymentPostprocessing = 0; shellPath = "/bin/bash -e"; @@ -2170,6 +2218,10 @@ ); inputPaths = ( "$(SRCROOT)/xcodescripts/postprocess-headers.sh", + "$(DSTROOT)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/workgroup_object.h", + "$(DSTROOT)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/workgroup_interval.h", + "$(DSTROOT)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/eventlink_private.h", + "$(DSTROOT)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/workgroup_parallel.h", ); name = "Postprocess Headers"; outputPaths = ( @@ -2217,7 +2269,7 @@ ); runOnlyForDeploymentPostprocessing = 0; shellPath = "/bin/bash -e"; - shellScript = "ln -fs \"${PRODUCT_NAME}.a\" \"${SCRIPT_OUTPUT_FILE_0}\""; + shellScript = "ln -fs \"${PRODUCT_NAME}.a\" \"${SCRIPT_OUTPUT_FILE_0}\"\n"; showEnvVarsInLog = 0; }; E49F24D7125D57FA0057C971 /* Install Manpages */ = { @@ -2244,22 +2296,27 @@ inputPaths = ( "$(SRCROOT)/xcodescripts/install-headers.sh", "$(SRCROOT)/os/object.h", + "$(SRCROOT)/os/workgroup.h", + "$(SRCROOT)/os/workgroup_base.h", + "$(SRCROOT)/os/workgroup_interval.h", + "$(SRCROOT)/os/workgroup_object.h", + "$(SRCROOT)/os/workgroup_parallel.h", + "$(SRCROOT)/os/clock.h", "$(SRCROOT)/os/object_private.h", "$(SRCROOT)/os/venture_private.h", "$(SRCROOT)/os/voucher_private.h", "$(SRCROOT)/os/voucher_activity_private.h", + "$(SRCROOT)/os/workgroup_private.h", + "$(SRCROOT)/os/workgroup_interval_private.h", + "$(SRCROOT)/os/workgroup_object_private.h", + "$(SRCROOT)/os/eventlink_private.h", ); name = "Install Headers"; outputPaths = ( - "$(CONFIGURATION_BUILD_DIR)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/object.h", - "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/object_private.h", - "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/venture_private.h", - "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/voucher_private.h", - "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/voucher_activity_private.h", ); runOnlyForDeploymentPostprocessing = 0; shellPath = "/bin/bash -e"; - shellScript = ". \"${SCRIPT_INPUT_FILE_0}\""; + shellScript = ". \"${SCRIPT_INPUT_FILE_0}\"\n"; showEnvVarsInLog = 0; }; E4EB4A3014C3A14000AA0FA9 /* Install Headers */ = { @@ -2270,22 +2327,28 @@ inputPaths = ( "$(SRCROOT)/xcodescripts/install-headers.sh", "$(SRCROOT)/os/object.h", + "$(SRCROOT)/os/workgroup.h", + "$(SRCROOT)/os/workgroup_base.h", + "$(SRCROOT)/os/workgroup_interval.h", + "$(SRCROOT)/os/workgroup_object.h", + "$(SRCROOT)/os/workgroup_parallel.h", + "$(SRCROOT)/os/clock.h", "$(SRCROOT)/os/object_private.h", "$(SRCROOT)/os/venture_private.h", "$(SRCROOT)/os/voucher_private.h", "$(SRCROOT)/os/voucher_activity_private.h", + "$(SRCROOT)/os/workgroup_interval_private.h", + "$(SRCROOT)/os/workgroup_private.h", + "$(SRCROOT)/os/workgroup_interval_private.h", + "$(SRCROOT)/os/eventlink_private.h", + "$(SRCROOT)/os/workgroup_object_private.h", ); name = "Install Headers"; outputPaths = ( - "$(CONFIGURATION_BUILD_DIR)/$(OS_PUBLIC_HEADERS_FOLDER_PATH)/object.h", - "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/object_private.h", - "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/venture_private.h", - "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/voucher_private.h", - "$(CONFIGURATION_BUILD_DIR)/$(OS_PRIVATE_HEADERS_FOLDER_PATH)/voucher_activity_private.h", ); runOnlyForDeploymentPostprocessing = 0; shellPath = "/bin/bash -e"; - shellScript = ". \"${SCRIPT_INPUT_FILE_0}\""; + shellScript = ". \"${SCRIPT_INPUT_FILE_0}\"\n"; showEnvVarsInLog = 0; }; E4EC121712514715000DDBD1 /* Mig Headers */ = { @@ -2310,7 +2373,7 @@ ); runOnlyForDeploymentPostprocessing = 0; shellPath = "/bin/bash -e"; - shellScript = ". \"${SCRIPT_INPUT_FILE_3}\""; + shellScript = ". \"${SCRIPT_INPUT_FILE_3}\"\n"; showEnvVarsInLog = 0; }; E4EC122512514715000DDBD1 /* Symlink normal variant */ = { @@ -2326,7 +2389,7 @@ ); runOnlyForDeploymentPostprocessing = 0; shellPath = "/bin/bash -e"; - shellScript = "ln -fs \"${PRODUCT_NAME}.a\" \"${SCRIPT_OUTPUT_FILE_0}\""; + shellScript = "ln -fs \"${PRODUCT_NAME}.a\" \"${SCRIPT_OUTPUT_FILE_0}\"\n"; showEnvVarsInLog = 0; }; E4FB8E8F218CD68A004B7A25 /* Install Plists */ = { @@ -2372,44 +2435,12 @@ ); runOnlyForDeploymentPostprocessing = 0; }; - C00B0DF11C5AEBBE000330B3 /* Sources */ = { - isa = PBXSourcesBuildPhase; - buildActionMask = 2147483647; - files = ( - 6E9C6CAD20F9848F00EA81C0 /* yield.c in Sources */, - C00B0DF21C5AEBBE000330B3 /* protocol.defs in Sources */, - C00B0DF71C5AEBBE000330B3 /* firehose.defs in Sources */, - C00B0DFA1C5AEBBE000330B3 /* firehose_reply.defs in Sources */, - C00B0DF31C5AEBBE000330B3 /* resolver.c in Sources */, - C00B0DF41C5AEBBE000330B3 /* init.c in Sources */, - C00B0DFE1C5AEBBE000330B3 /* object.c in Sources */, - C00B0DF81C5AEBBE000330B3 /* block.cpp in Sources */, - 6EF2CAB31C8899ED001ABE83 /* lock.c in Sources */, - C00B0DF91C5AEBBE000330B3 /* semaphore.c in Sources */, - C00B0DFB1C5AEBBE000330B3 /* once.c in Sources */, - C00B0DF51C5AEBBE000330B3 /* queue.c in Sources */, - C00B0DFD1C5AEBBE000330B3 /* apply.c in Sources */, - C00B0E001C5AEBBE000330B3 /* source.c in Sources */, - 6E4BACC81D48A42400B562AE /* mach.c in Sources */, - 6EA9629E1D48622C00759D53 /* event.c in Sources */, - 6EA962A61D48625500759D53 /* event_kevent.c in Sources */, - 6E4BACFC1D49A04A00B562AE /* event_epoll.c in Sources */, - C00B0DFC1C5AEBBE000330B3 /* voucher.c in Sources */, - C00B0DF61C5AEBBE000330B3 /* firehose_buffer.c in Sources */, - C00B0E031C5AEBBE000330B3 /* io.c in Sources */, - C00B0E021C5AEBBE000330B3 /* data.c in Sources */, - C00B0E041C5AEBBE000330B3 /* transform.c in Sources */, - C00B0E011C5AEBBE000330B3 /* time.c in Sources */, - C00B0E051C5AEBBE000330B3 /* allocator.c in Sources */, - C00B0DFF1C5AEBBE000330B3 /* benchmark.c in Sources */, - E49BB70A1E70A3B000868613 /* venture.c in Sources */, - ); - runOnlyForDeploymentPostprocessing = 0; - }; C01866A51C5973210040FC07 /* Sources */ = { isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( + 9BFD343023C94F8C00B08420 /* eventlink.c in Sources */, + 9BE3E57623CE62D8006FE059 /* workgroup.c in Sources */, 6E9C6CAC20F9848E00EA81C0 /* yield.c in Sources */, C01866A61C5973210040FC07 /* protocol.defs in Sources */, C01866AB1C5973210040FC07 /* firehose.defs in Sources */, @@ -2456,9 +2487,11 @@ 6EF2CAAC1C8899D5001ABE83 /* lock.c in Sources */, 721F5CCF0F15553500FF03A6 /* semaphore.c in Sources */, 96DF70BE0F38FE3C0074BD99 /* once.c in Sources */, + 9B2A588123A412B400A7BB27 /* eventlink.c in Sources */, FC7BED990E8361E600161930 /* queue.c in Sources */, 9676A0E10F3E755D00713ADB /* apply.c in Sources */, 96A8AA870F41E7A400CD570B /* source.c in Sources */, + 9B81557B234AFC9800DB5CA3 /* workgroup.c in Sources */, 6E9C6CA720F9848100EA81C0 /* yield.c in Sources */, 6E4BACBD1D48A41500B562AE /* mach.c in Sources */, 6EA962971D48622600759D53 /* event.c in Sources */, @@ -2481,6 +2514,8 @@ isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( + 9BCAF77523A8552600E4F685 /* eventlink.c in Sources */, + 9BE525502387480F0041C2A0 /* workgroup.c in Sources */, E43B88722241F19000215272 /* provider.d in Sources */, E43B88732241F19000215272 /* protocol.defs in Sources */, E43B88742241F19000215272 /* firehose.defs in Sources */, @@ -2518,6 +2553,8 @@ isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( + 9BCAF77323A8551300E4F685 /* eventlink.c in Sources */, + 9BE3E57523CE62C9006FE059 /* workgroup.c in Sources */, E49BB6D11E70748100868613 /* provider.d in Sources */, E49BB6D21E70748100868613 /* protocol.defs in Sources */, E49BB6D41E70748100868613 /* firehose.defs in Sources */, @@ -2555,6 +2592,8 @@ isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( + 9BCAF77223A8550B00E4F685 /* eventlink.c in Sources */, + 9BE3E56F23CE62BB006FE059 /* workgroup.c in Sources */, E43570BA126E93380097AB9F /* provider.d in Sources */, E49F24C8125D57FA0057C971 /* protocol.defs in Sources */, 6ED64B461BBD89AF00C35F4D /* firehose.defs in Sources */, @@ -2592,6 +2631,8 @@ isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( + 9BCAF77423A8551E00E4F685 /* eventlink.c in Sources */, + 9B8ED5A6235183D100507521 /* workgroup.c in Sources */, E4B515BD164B2DA300E003AF /* provider.d in Sources */, E4B515BE164B2DA300E003AF /* protocol.defs in Sources */, 6ED64B481BBD89B100C35F4D /* firehose.defs in Sources */, @@ -2630,6 +2671,8 @@ isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( + 9BCAF77123A8550100E4F685 /* eventlink.c in Sources */, + 9BE3E57423CE62C2006FE059 /* workgroup.c in Sources */, E417A38512A472C5004D659D /* provider.d in Sources */, E44EBE5612517EBE00645D88 /* protocol.defs in Sources */, 6EBEC7E51BBDD30C009B1596 /* firehose.defs in Sources */, @@ -2706,11 +2749,6 @@ name = ddt; targetProxy = 9BEBA57720127D4400E6FD0D /* PBXContainerItemProxy */; }; - C00B0E141C5AEED6000330B3 /* PBXTargetDependency */ = { - isa = PBXTargetDependency; - target = C00B0DF01C5AEBBE000330B3 /* libdispatch dyld stub */; - targetProxy = C00B0E131C5AEED6000330B3 /* PBXContainerItemProxy */; - }; C01866C21C597AEA0040FC07 /* PBXTargetDependency */ = { isa = PBXTargetDependency; target = C01866A41C5973210040FC07 /* libdispatch mp static */; @@ -2901,20 +2939,6 @@ }; name = Debug; }; - C00B0E081C5AEBBE000330B3 /* Release */ = { - isa = XCBuildConfiguration; - baseConfigurationReference = C00B0E121C5AEBF7000330B3 /* libdispatch-dyld-stub.xcconfig */; - buildSettings = { - }; - name = Release; - }; - C00B0E091C5AEBBE000330B3 /* Debug */ = { - isa = XCBuildConfiguration; - baseConfigurationReference = C00B0E121C5AEBF7000330B3 /* libdispatch-dyld-stub.xcconfig */; - buildSettings = { - }; - name = Debug; - }; C01866BB1C5973210040FC07 /* Release */ = { isa = XCBuildConfiguration; baseConfigurationReference = C01866BE1C59735B0040FC07 /* libdispatch-mp-static.xcconfig */; @@ -3157,15 +3181,6 @@ defaultConfigurationIsVisible = 0; defaultConfigurationName = Release; }; - C00B0E071C5AEBBE000330B3 /* Build configuration list for PBXNativeTarget "libdispatch dyld stub" */ = { - isa = XCConfigurationList; - buildConfigurations = ( - C00B0E081C5AEBBE000330B3 /* Release */, - C00B0E091C5AEBBE000330B3 /* Debug */, - ); - defaultConfigurationIsVisible = 0; - defaultConfigurationName = Release; - }; C01866BA1C5973210040FC07 /* Build configuration list for PBXNativeTarget "libdispatch mp static" */ = { isa = XCConfigurationList; buildConfigurations = ( diff --git a/man/dispatch.3 b/man/dispatch.3 index 6e5cfed48..b1c4309d4 100644 --- a/man/dispatch.3 +++ b/man/dispatch.3 @@ -13,14 +13,16 @@ concurrent execution via the core functions described in .Xr dispatch_async 3 and .Xr dispatch_apply 3 . .Pp -Dispatch queues are the basic units of organization of blocks. Several queues -are created by default, and applications may create additional queues for their -own use. See +Dispatch queues are the basic units of organization of blocks. +Several queues are created by default, and applications may create additional +queues for their own use. +See .Xr dispatch_queue_create 3 for more information. .Pp Dispatch groups allow applications to track the progress of blocks submitted to -queues and take action when the blocks complete. See +queues and take action when the blocks complete. +See .Xr dispatch_group_create 3 for more information. .Pp diff --git a/man/dispatch_after.3 b/man/dispatch_after.3 index db34af0e3..7463d1c5f 100644 --- a/man/dispatch_after.3 +++ b/man/dispatch_after.3 @@ -40,6 +40,15 @@ and the time at which the function is called, with the leeway capped to at least .Pp For a more detailed description about submitting blocks to queues, see .Xr dispatch_async 3 . +.Sh FUNDAMENTALS +The +.Fn dispatch_after +function is a wrapper around +.Fn dispatch_after_f . +.Sh SEE ALSO +.Xr dispatch 3 , +.Xr dispatch_async 3 , +.Xr dispatch_time 3 .Sh CAVEATS .Fn dispatch_after retains the passed queue. @@ -57,13 +66,3 @@ The result of passing as the .Fa when parameter is undefined. -.Pp -.Sh FUNDAMENTALS -The -.Fn dispatch_after -function is a wrapper around -.Fn dispatch_after_f . -.Sh SEE ALSO -.Xr dispatch 3 , -.Xr dispatch_async 3 , -.Xr dispatch_time 3 diff --git a/man/dispatch_api.3 b/man/dispatch_api.3 index 912338672..c82425051 100644 --- a/man/dispatch_api.3 +++ b/man/dispatch_api.3 @@ -11,15 +11,17 @@ consider when designing and implementing API in terms of dispatch queues and blocks. .Pp A general recommendation is to allow both a callback block and target dispatch -queue to be specified. This gives the application the greatest flexibility in -handling asynchronous events. +queue to be specified. +This gives the application the greatest flexibility in handling asynchronous +events. .Pp It's also recommended that interfaces take only a single block as the last -parameter. This is both for consistency across projects, as well as the visual -aesthetics of multiline blocks that are declared inline. The dispatch queue to -which the block will be submitted should immediately precede the block argument -(second-to-last argument). For example: -.Pp +parameter. +This is both for consistency across projects, as well as the visual aesthetics +of multiline blocks that are declared inline. +The dispatch queue to which the block will be submitted should immediately +precede the block argument (second-to-last argument). +For example: .Bd -literal -offset indent read_async(file, callback_queue, ^{ printf("received callback.\\n"); @@ -34,10 +36,8 @@ pointer, and a new last parameter is added, which is the function to call. The function based callback should pass the context pointer as the first argument, and the subsequent arguments should be identical to the block based variant (albeit offset by one in order). -.Pp -It is also important to use consistent naming. The dispatch API, for example, -uses the suffix "_f" for function based variants. -.Pp +It is also important to use consistent naming. +The dispatch API, for example, uses the suffix "_f" for function based variants. .Sh SEE ALSO .Xr dispatch 3 , .Xr dispatch_async 3 , diff --git a/man/dispatch_apply.3 b/man/dispatch_apply.3 index 57c99a8a7..7f3651dfd 100644 --- a/man/dispatch_apply.3 +++ b/man/dispatch_apply.3 @@ -30,14 +30,14 @@ dispatch_apply(iterations, DISPATCH_APPLY_AUTO, ^(size_t idx) { }); .Ed .Pp -Although any queue can be used, it is strongly recommended to use +Although any queue can be used, it is strongly recommended to use .Vt DISPATCH_APPLY_AUTO -as the -.Vt queue +as the +.Vt queue argument to both .Fn dispatch_apply and -.Fn dispatch_apply_f , +.Fn dispatch_apply_f , as shown in the example above, since this allows the system to automatically use worker threads that match the configuration of the current thread as closely as possible. No assumptions should be made about which global concurrent queue will be used. @@ -75,7 +75,8 @@ for (i = count - (count % STRIDE); i < count; i++) { .Ed .Sh IMPLIED REFERENCES Synchronous functions within the dispatch framework hold an implied reference -on the target queue. In other words, the synchronous function borrows the +on the target queue. +In other words, the synchronous function borrows the reference of the calling function (this is valid because the calling function is blocked waiting for the result of the synchronous function, and therefore cannot modify the reference count of the target queue until after the @@ -95,7 +96,7 @@ or .Fn dispatch_async_f will incur more overhead and does not express the desired parallel execution semantics to the system, so may not create an optimal number of worker threads for a parallel workload. -For this reason, prefer to use +For this reason, prefer to use .Fn dispatch_apply or .Fn dispatch_apply_f @@ -105,6 +106,10 @@ The .Fn dispatch_apply function is a wrapper around .Fn dispatch_apply_f . +.Sh SEE ALSO +.Xr dispatch 3 , +.Xr dispatch_async 3 , +.Xr dispatch_queue_create 3 .Sh CAVEATS Unlike .Fn dispatch_async , @@ -112,11 +117,7 @@ a block submitted to .Fn dispatch_apply is expected to be either independent or dependent .Em only -on work already performed in lower-indexed invocations of the block. If -the block's index dependency is non-linear, it is recommended to -use a for-loop around invocations of +on work already performed in lower-indexed invocations of the block. +If the block's index dependency is non-linear, it is recommended to use a +for-loop around invocations of .Fn dispatch_async . -.Sh SEE ALSO -.Xr dispatch 3 , -.Xr dispatch_async 3 , -.Xr dispatch_queue_create 3 diff --git a/man/dispatch_async.3 b/man/dispatch_async.3 index 99c532d40..bac733139 100644 --- a/man/dispatch_async.3 +++ b/man/dispatch_async.3 @@ -31,20 +31,23 @@ and .Fn dispatch_sync functions schedule blocks for concurrent execution within the .Xr dispatch 3 -framework. Blocks are submitted to a queue which dictates the policy for their -execution. See +framework. +Blocks are submitted to a queue which dictates the policy for their execution. +See .Xr dispatch_queue_create 3 for more information about creating dispatch queues. .Pp These functions support efficient temporal synchronization, background -concurrency and data-level concurrency. These same functions can also be used -for efficient notification of the completion of asynchronous blocks (a.k.a. -callbacks). +concurrency and data-level concurrency. +These same functions can also be used for efficient notification of the +completion of asynchronous blocks (a.k.a. callbacks). .Sh TEMPORAL SYNCHRONIZATION Synchronization is often required when multiple threads of execution access -shared data concurrently. The simplest form of synchronization is -mutual-exclusion (a lock), whereby different subsystems execute concurrently -until a shared critical section is entered. In the +shared data concurrently. +The simplest form of synchronization is mutual-exclusion (a lock), whereby +different subsystems execute concurrently until a shared critical section is +entered. +In the .Xr pthread 3 family of procedures, temporal synchronization is accomplished like so: .Bd -literal -offset indent @@ -60,7 +63,8 @@ assert(r == 0); The .Fn dispatch_sync function may be used with a serial queue to accomplish the same style of -synchronization. For example: +synchronization. +For example: .Bd -literal -offset indent dispatch_sync(my_queue, ^{ // critical section @@ -74,19 +78,21 @@ left without restoring the queue to a reentrant state. The .Fn dispatch_async function may be used to implement deferred critical sections when the result -of the block is not needed locally. Deferred critical sections have the same -synchronization properties as the above code, but are non-blocking and -therefore more efficient to perform. For example: +of the block is not needed locally. +Deferred critical sections have the same synchronization properties as the above +code, but are non-blocking and therefore more efficient to perform. +For example: .Bd -literal dispatch_async(my_queue, ^{ // critical section }); .Ed .Sh BACKGROUND CONCURRENCY -.The +The .Fn dispatch_async function may be used to execute trivial background tasks on a global concurrent -queue. For example: +queue. +For example: .Bd -literal dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT,0), ^{ // background operation @@ -98,8 +104,9 @@ This approach is an efficient replacement for .Sh COMPLETION CALLBACKS Completion callbacks can be accomplished via nested calls to the .Fn dispatch_async -function. It is important to remember to retain the destination queue before the -first call to +function. +It is important to remember to retain the destination queue before the first +call to .Fn dispatch_async , and to release that queue at the end of the completion callback to ensure the destination queue is not deallocated while the completion callback is pending. @@ -130,21 +137,24 @@ async_read(object_t obj, .Sh RECURSIVE LOCKS While .Fn dispatch_sync -can replace a lock, it cannot replace a recursive lock. Unlike locks, queues -support both asynchronous and synchronous operations, and those operations are -ordered by definition. A recursive call to +can replace a lock, it cannot replace a recursive lock. +Unlike locks, queues support both asynchronous and synchronous operations, and +those operations are ordered by definition. +A recursive call to .Fn dispatch_sync causes a simple deadlock as the currently executing block waits for the next block to complete, but the next block will not start until the currently running block completes. .Pp -As the dispatch framework was designed, we studied recursive locks. We found -that the vast majority of recursive locks are deployed retroactively when -ill-defined lock hierarchies are discovered. As a consequence, the adoption of -recursive locks often mutates obvious bugs into obscure ones. This study also -revealed an insight: if reentrancy is unavoidable, then reader/writer locks are -preferable to recursive locks. Disciplined use of reader/writer locks enable -reentrancy only when reentrancy is safe (the "read" side of the lock). +As the dispatch framework was designed, we studied recursive locks. +We found that the vast majority of recursive locks are deployed retroactively +when ill-defined lock hierarchies are discovered. +As a consequence, the adoption of recursive locks often mutates obvious bugs +into obscure ones. +This study also revealed an insight: if reentrancy is unavoidable, then +reader/writer locks are preferable to recursive locks. +Disciplined use of reader/writer locks enable reentrancy only when reentrancy is +safe (the "read" side of the lock). .Pp Nevertheless, if it is absolutely necessary, what follows is an imperfect way of implementing recursive locks using the dispatch framework: @@ -168,17 +178,17 @@ calls .Fn dispatch_sync against queue B which runs on thread Y which recursively calls .Fn dispatch_sync -against queue A, which deadlocks both examples. This is bug-for-bug compatible -with nontrivial pthread usage. In fact, nontrivial reentrancy is impossible to -support in recursive locks once the ultimate level of reentrancy is deployed -(IPC or RPC). +against queue A, which deadlocks both examples. +This is bug-for-bug compatible with nontrivial pthread usage. +In fact, nontrivial reentrancy is impossible to support in recursive locks once +the ultimate level of reentrancy is deployed (IPC or RPC). .Sh IMPLIED REFERENCES Synchronous functions within the dispatch framework hold an implied reference -on the target queue. In other words, the synchronous function borrows the -reference of the calling function (this is valid because the calling function -is blocked waiting for the result of the synchronous function, and therefore -cannot modify the reference count of the target queue until after the -synchronous function has returned). +on the target queue. +In other words, the synchronous function borrows the reference of the calling +function (this is valid because the calling function is blocked waiting for the +result of the synchronous function, and therefore cannot modify the reference +count of the target queue until after the synchronous function has returned). For example: .Bd -literal queue = dispatch_queue_create("com.example.queue", NULL); @@ -199,9 +209,11 @@ Conceptually, is a convenient wrapper around .Fn dispatch_async with the addition of a semaphore to wait for completion of the block, and a -wrapper around the block to signal its completion. See +wrapper around the block to signal its completion. +See .Xr dispatch_semaphore_create 3 -for more information about dispatch semaphores. The actual implementation of the +for more information about dispatch semaphores. +The actual implementation of the .Fn dispatch_sync function may be optimized and differ from the above description. .Pp @@ -226,7 +238,6 @@ parameter is passed to the .Fa function when it is invoked on the target .Fa queue . -.Pp .Sh SEE ALSO .Xr dispatch 3 , .Xr dispatch_apply 3 , diff --git a/man/dispatch_data_create.3 b/man/dispatch_data_create.3 index b3a216e4f..b1a396e47 100644 --- a/man/dispatch_data_create.3 +++ b/man/dispatch_data_create.3 @@ -55,10 +55,12 @@ .Vt dispatch_data_t dispatch_data_empty ; .Sh DESCRIPTION Dispatch data objects are opaque containers of bytes that represent one or more -regions of memory. They are created either from memory buffers managed by the -application or the system or from other dispatch data objects. Dispatch data -objects are immutable and the memory regions they represent are required to -remain unchanged for the lifetime of all data objects that reference them. +regions of memory. +They are created either from memory buffers managed by the application or the +system or from other dispatch data objects. +Dispatch data objects are immutable and the memory regions they represent are +required to remain unchanged for the lifetime of all data objects that reference +them. Dispatch data objects avoid copying the represented memory as much as possible. Multiple data objects can represent the same memory regions or subsections thereof. @@ -76,8 +78,8 @@ block will be submitted to the specified when the object reaches the end of its lifecycle, indicating that the system no longer references the .Fa buffer . -This allows the application to deallocate -the associated storage. The +This allows the application to deallocate the associated storage. +The .Fa queue argument is ignored if one of the following predefined destructors is passed: .Bl -tag -width DISPATCH_DATA_DESTRUCTOR_DEFAULT -compact -offset indent @@ -111,26 +113,29 @@ function creates a new data object by mapping the memory represented by the provided .Fa data object as a single contiguous memory region (moving or copying memory as -necessary). If the +necessary). +If the .Fa buffer_ptr and .Fa size_ptr references are not .Dv NULL , they are filled with the location and extent of the contiguous region, allowing -direct read access to the mapped memory. These values are valid only as long as -the newly created object has not been released. +direct read access to the mapped memory. +These values are valid only as long as the newly created object has not been +released. .Sh ACCESS The .Fn dispatch_data_apply function provides read access to represented memory without requiring it to be -mapped as a single contiguous region. It traverses the memory regions -represented by the +mapped as a single contiguous region. +It traverses the memory regions represented by the .Fa data argument in logical order, invokes the specified .Fa applier block for each region and returns a boolean indicating whether traversal -completed successfully. The +completed successfully. +The .Fa applier block is passed the following arguments for each memory region and returns a boolean indicating whether traversal should continue: @@ -170,7 +175,8 @@ specified by the argument among the regions represented by the provided .Fa data object and returns a newly created copy of the data object representing that -region. The variable specified by the +region. +The variable specified by the .Fa offset_ptr argument is filled with the logical position where the returned object starts in the @@ -198,17 +204,19 @@ Data objects passed as arguments to a dispatch data .Sy create or .Sy copy -function can be released when the function returns. The newly created object -holds implicit references to their constituent memory regions as necessary. +function can be released when the function returns. +The newly created object holds implicit references to their constituent memory +regions as necessary. .Pp The functions .Fn dispatch_data_create_map and .Fn dispatch_data_apply return an interior pointer to represented memory that is only valid as long as -an associated object has not been released. When Objective-C Automated -Reference Counting is enabled, care needs to be taken if that object is held in -a variable with automatic storage. It may need to be annotated with the +an associated object has not been released. +When Objective-C Automated Reference Counting is enabled, care needs to be taken +if that object is held in a variable with automatic storage. +It may need to be annotated with the .Li objc_precise_lifetime attribute, or stored in a .Li __strong @@ -216,5 +224,5 @@ instance variable instead, to ensure that the object is not released prematurely before memory accesses via the interor pointer have been completed. .Sh SEE ALSO .Xr dispatch 3 , -.Xr dispatch_object 3 , -.Xr dispatch_io_read 3 +.Xr dispatch_io_read 3 , +.Xr dispatch_object 3 diff --git a/man/dispatch_group_create.3 b/man/dispatch_group_create.3 index d82391e82..fc98fb09b 100644 --- a/man/dispatch_group_create.3 +++ b/man/dispatch_group_create.3 @@ -68,7 +68,8 @@ has elapsed. If the .Fa group becomes empty within the specified amount of time, the function will return zero -indicating success. Otherwise, a non-zero return code will be returned. +indicating success. +Otherwise, a non-zero return code will be returned. When .Va DISPATCH_TIME_FOREVER is passed as the @@ -93,7 +94,8 @@ notification is pending, therefore it is valid to release the .Fa group after setting a notification block. The group will be empty at the time the notification block is submitted to the -target queue. The group may either be released with +target queue. +The group may either be released with .Fn dispatch_release or reused for additional operations. .Pp @@ -141,12 +143,19 @@ functions are wrappers around and .Fn dispatch_group_notify_f respectively. +.Sh SEE ALSO +.Xr dispatch 3 , +.Xr dispatch_async 3 , +.Xr dispatch_object 3 , +.Xr dispatch_queue_create 3 , +.Xr dispatch_semaphore_create 3 , +.Xr dispatch_time 3 .Sh CAVEATS In order to ensure deterministic behavior, it is recommended to call .Fn dispatch_group_wait -only once all blocks have been submitted to the group. If it is later -determined that new blocks should be run, it is recommended not to reuse an -already-running group, but to create a new group. +only once all blocks have been submitted to the group. +If it is later determined that new blocks should be run, it is recommended not +to reuse an already-running group, but to create a new group. .Pp .Fn dispatch_group_wait returns as soon as there are exactly zero @@ -155,26 +164,21 @@ blocks associated with a group (more precisely, as soon as every .Fn dispatch_group_enter call has been balanced by a .Fn dispatch_group_leave -call). If one thread waits for a group while another thread submits -new blocks to the group, then the count of associated blocks might -momentarily reach zero before all blocks have been submitted. If this happens, +call). +If one thread waits for a group while another thread submits new blocks to the +group, then the count of associated blocks might momentarily reach zero before +all blocks have been submitted. +If this happens, .Fn dispatch_group_wait will return too early: some blocks associated with the group have finished, but some have not yet been submitted or run. .Pp However, as a special case, a block associated with a group may submit new -blocks associated with its own group. In this case, the behavior is -deterministic: a waiting thread will +blocks associated with its own group. +In this case, the behavior is deterministic: a waiting thread will .Em not wake up until the newly submitted blocks have also finished. .Pp All of the foregoing also applies to .Fn dispath_group_notify as well, with "block to be submitted" substituted for "waiting thread". -.Sh SEE ALSO -.Xr dispatch 3 , -.Xr dispatch_async 3 , -.Xr dispatch_object 3 , -.Xr dispatch_queue_create 3 , -.Xr dispatch_semaphore_create 3 , -.Xr dispatch_time 3 diff --git a/man/dispatch_io_create.3 b/man/dispatch_io_create.3 index 83e551401..7e2f99879 100644 --- a/man/dispatch_io_create.3 +++ b/man/dispatch_io_create.3 @@ -57,18 +57,22 @@ .Fc .Sh DESCRIPTION The dispatch I/O framework is an API for asynchronous read and write I/O -operations. It is an application of the ideas and idioms present in the +operations. +It is an application of the ideas and idioms present in the .Xr dispatch 3 -framework to device I/O. Dispatch I/O enables an application to more easily -avoid blocking I/O operations and allows it to more directly express its I/O -requirements than by using the raw POSIX file API. Dispatch I/O will make a -best effort to optimize how and when asynchronous I/O operations are performed -based on the capabilities of the targeted device. +framework to device I/O. +Dispatch I/O enables an application to more easily avoid blocking I/O operations +and allows it to more directly express its I/O requirements than by using the +raw POSIX file API. +Dispatch I/O will make a best effort to optimize how and when asynchronous I/O +operations are performed based on the capabilities of the targeted device. .Pp This page provides details on how to create and configure dispatch I/O -channels. Reading from and writing to these channels is covered in the +channels. +Reading from and writing to these channels is covered in the .Xr dispatch_io_read 3 -page. The dispatch I/O framework also provides the convenience functions +page. +The dispatch I/O framework also provides the convenience functions .Xr dispatch_read 3 and .Xr dispatch_write 3 @@ -82,16 +86,17 @@ Dispatch I/O channels can have one of the following types: .Bl -tag -width DISPATCH_IO_STREAM -compact -offset indent .It DISPATCH_IO_STREAM channels that represent a stream of bytes and do not support reads and writes -at arbitrary offsets, such as pipes or sockets. Channels of this type perform -read and write operations sequentially at the current file pointer position and -ignore any offset specified. Depending on the underlying file descriptor, read -operations may be performed simultaneously with write operations. +at arbitrary offsets, such as pipes or sockets. +Channels of this type perform read and write operations sequentially at the +current file pointer position and ignore any offset specified. +Depending on the underlying file descriptor, read operations may be performed +simultaneously with write operations. .It DISPATCH_IO_RANDOM -channels that represent random access files on disk. Only supported for -seekable file descriptors and paths. Channels of this type may perform -submitted read and write operations concurrently at the specified offset -(interpreted relative to the position of the file pointer when the channel was -created). +channels that represent random access files on disk. +Only supported for seekable file descriptors and paths. +Channels of this type may perform submitted read and write operations +concurrently at the specified offset (interpreted relative to the position of +the file pointer when the channel was created). .El .Sh CHANNEL OPENING AND CLOSING The @@ -102,13 +107,13 @@ functions create a dispatch I/O channel of provided .Fa type from a file descriptor .Fa fd -or an absolute pathname, respectively. They can be thought of as analogous to -the +or an absolute pathname, respectively. +They can be thought of as analogous to the .Xr fdopen 3 POSIX function and the .Xr fopen 3 -function in the standard C library. For a channel created from a pathname, the -provided +function in the standard C library. +For a channel created from a pathname, the provided .Fa path , .Fa oflag and @@ -122,20 +127,22 @@ The provided block will be submitted to the specified .Fa queue when all I/O operations on the channel have completed and it is closed or -reaches the end of its lifecycle. If an error occurs during channel creation, -the +reaches the end of its lifecycle. +If an error occurs during channel creation, the .Fa cleanup_handler block will be submitted immediately and passed an .Fa error -parameter with the POSIX error encountered. If an invalid +parameter with the POSIX error encountered. +If an invalid .Fa type or a non-absolute .Fa path argument is specified, these functions will return NULL and the .Fa cleanup_handler -will not be invoked. After successfully creating a dispatch I/O channel from a -file descriptor, the application must take care not to modify that file -descriptor until the associated +will not be invoked. +After successfully creating a dispatch I/O channel from a file descriptor, the +application must take care not to modify that file descriptor until the +associated .Fa cleanup_handler is invoked, see .Sx "FILEDESCRIPTOR OWNERSHIP" @@ -143,14 +150,15 @@ for details. .Pp The .Fn dispatch_io_close -function closes a dispatch I/O channel to new submissions of I/O operations. If +function closes a dispatch I/O channel to new submissions of I/O operations. +If .Dv DISPATCH_IO_STOP is passed in the .Fa flags parameter, the system will in addition not perform the I/O operations already submitted to the channel that are still pending and will make a best effort to -interrupt any ongoing operations. Handlers for operations so affected will be -passed the +interrupt any ongoing operations. +Handlers for operations so affected will be passed the .Er ECANCELED error code, along with any partial results. .Sh CHANNEL CONFIGURATION @@ -164,8 +172,7 @@ and .Fn dispatch_io_set_low_water functions configure the water mark settings of a .Fa channel . -The system will read -or write at least the number of bytes specified by +The system will read or write at least the number of bytes specified by .Fa low_water before submitting an I/O handler with partial results, and will make a best effort to submit an I/O handler as soon as the number of bytes read or written @@ -176,17 +183,18 @@ The .Fn dispatch_io_set_interval function configures the time .Fa interval -at which I/O handlers are submitted (measured in nanoseconds). If +at which I/O handlers are submitted (measured in nanoseconds). +If .Dv DISPATCH_IO_STRICT_INTERVAL is passed in the .Fa flags parameter, the interval will be strictly observed even if there is an insufficient amount of data to deliver; otherwise delivery will be skipped for intervals where the amount of available data is inferior to the channel's -low-water mark. Note that the system may defer enqueueing interval I/O handlers +low-water mark. +Note that the system may defer enqueueing interval I/O handlers by a small unspecified amount of leeway in order to align with other system activity for improved system performance or power consumption. -.Pp .Sh DATA DELIVERY The size of data objects passed to I/O handlers for a channel will never be larger than the high-water mark set on the channel; it will also never be @@ -202,53 +210,57 @@ the channel has an interval with the flag set .El Bear in mind that dispatch I/O channels will typically deliver amounts of data -significantly higher than the low-water mark. The default value for the -low-water mark is unspecified, but must be assumed to allow intermediate -handler invocations. The default value for the high-water mark is -unlimited (i.e.\& +significantly higher than the low-water mark. +The default value for the low-water mark is unspecified, but must be assumed to +allow intermediate handler invocations. +The default value for the high-water mark is unlimited (i.e.\& .Dv SIZE_MAX ) . Channels that require intermediate results of fixed size should have both the -low-water and the high-water mark set to that size. Channels that do not wish -to receive any intermediate results should have the low-water mark set to +low-water and the high-water mark set to that size. +Channels that do not wish to receive any intermediate results should have the +low-water mark set to .Dv SIZE_MAX . -.Pp .Sh FILEDESCRIPTOR OWNERSHIP When an application creates a dispatch I/O channel from a file descriptor with the .Fn dispatch_io_create function, the system takes control of that file descriptor until the channel is closed, an error occurs on the file descriptor or all references to the channel -are released. At that time the channel's cleanup handler will be enqueued and -control over the file descriptor relinquished, making it safe for the -application to +are released. +At that time the channel's cleanup handler will be enqueued and control over the +file descriptor relinquished, making it safe for the application to .Xr close 2 -the file descriptor. While a file descriptor is under the control of a dispatch -I/O channel, file descriptor flags such as +the file descriptor. +While a file descriptor is under the control of a dispatch I/O channel, file +descriptor flags such as .Dv O_NONBLOCK -will be modified by the system on behalf of the application. It is an error for -the application to modify a file descriptor directly while it is under the -control of a dispatch I/O channel, but it may create further I/O channels -from that file descriptor or use the +will be modified by the system on behalf of the application. +It is an error for the application to modify a file descriptor directly while it +is under the control of a dispatch I/O channel, but it may create further I/O +channels from that file descriptor or use the .Xr dispatch_read 3 and .Xr dispatch_write 3 -convenience functions with that file descriptor. If multiple I/O channels have +convenience functions with that file descriptor. +If multiple I/O channels have been created from the same file descriptor, all the associated cleanup handlers will be submitted together once the last channel has been closed resp.\& all -references to those channels have been released. If convenience functions have -also been used on that file descriptor, submission of their handlers will be -tied to the submission of the channel cleanup handlers as well. -.Pp +references to those channels have been released. +If convenience functions have also been used on that file descriptor, submission +of their handlers will be tied to the submission of the channel cleanup handlers +as well. .Sh BARRIER OPERATIONS The .Fn dispatch_io_barrier -function schedules a barrier operation on an I/O channel. The specified barrier -block will be run once, after all current I/O operations (such as -.Xr read 2 or +function schedules a barrier operation on an I/O channel. +The specified barrier block will be run once, after all current I/O operations +(such as +.Xr read 2 +or .Xr write 2 ) on the underlying -file descriptor have finished. No new I/O operations will start until the -barrier block finishes. +file descriptor have finished. +No new I/O operations will start until the barrier block finishes. .Pp The barrier block may operate on the underlying file descriptor with functions like @@ -266,17 +278,17 @@ There is no synchronization between a barrier block and any .Xr dispatch_io_read 3 or .Xr dispatch_io_write 3 -handler blocks; they may be running at the same time. The barrier block itself -is responsible for any required synchronization. +handler blocks; they may be running at the same time. +The barrier block itself is responsible for any required synchronization. .Sh MEMORY MODEL Dispatch I/O channel objects are retained and released via calls to .Fn dispatch_retain and .Fn dispatch_release . .Sh SEE ALSO +.Xr open 2 , .Xr dispatch 3 , .Xr dispatch_io_read 3 , .Xr dispatch_object 3 , .Xr dispatch_read 3 , -.Xr fopen 3 , -.Xr open 2 +.Xr fopen 3 diff --git a/man/dispatch_io_read.3 b/man/dispatch_io_read.3 index 26a11e894..3cff4faf8 100644 --- a/man/dispatch_io_read.3 +++ b/man/dispatch_io_read.3 @@ -26,30 +26,34 @@ .Fc .Sh DESCRIPTION The dispatch I/O framework is an API for asynchronous read and write I/O -operations. It is an application of the ideas and idioms present in the +operations. +It is an application of the ideas and idioms present in the .Xr dispatch 3 -framework to device I/O. Dispatch I/O enables an application to more easily -avoid blocking I/O operations and allows it to more directly express its I/O -requirements than by using the raw POSIX file API. Dispatch I/O will make a -best effort to optimize how and when asynchronous I/O operations are performed -based on the capabilities of the targeted device. +framework to device I/O. +Dispatch I/O enables an application to more easily avoid blocking I/O operations +and allows it to more directly express its I/O requirements than by using the +raw POSIX file API. +Dispatch I/O will make a best effort to optimize how and when asynchronous I/O +operations are performed based on the capabilities of the targeted device. .Pp This page provides details on how to read from and write to dispatch I/O -channels. Creation and configuration of these channels is covered in the +channels. +Creation and configuration of these channels is covered in the .Xr dispatch_io_create 3 -page. The dispatch I/O framework also provides the convenience functions +page. +The dispatch I/O framework also provides the convenience functions .Xr dispatch_read 3 and .Xr dispatch_write 3 for uses that do not require the full functionality provided by I/O channels. -.Pp .Sh FUNDAMENTALS The .Fn dispatch_io_read and .Fn dispatch_io_write functions are used to perform asynchronous read and write operations on -dispatch I/O channels. They can be thought of as asynchronous versions of the +dispatch I/O channels. +They can be thought of as asynchronous versions of the .Xr fread 3 and .Xr fwrite 3 @@ -68,7 +72,8 @@ been read since the handler's previous invocation. .Pp The .Va offset -parameter indicates where the read operation should begin. For a channel of +parameter indicates where the read operation should begin. +For a channel of .Dv DISPATCH_IO_RANDOM type it is interpreted relative to the position of the file pointer when the channel was created, for a channel of @@ -79,7 +84,8 @@ pointer position. The .Va length parameter indicates the number of bytes that should be read from the I/O -channel. Pass +channel. +Pass .Dv SIZE_MAX to keep reading until EOF is encountered (for a channel created from a disk-based file this happens when reading past the end of the physical file). @@ -97,14 +103,14 @@ remains to be written as part of this I/O operation. .Pp The .Va offset -parameter indicates where the write operation should begin. It is interpreted -as for read operations above. +parameter indicates where the write operation should begin. +It is interpreted as for read operations above. .Pp The .Va data parameter specifies the location and amount of data to be written, encapsulated -as a dispatch data object. The object is retained by the system until the write -operation is complete. +as a dispatch data object. +The object is retained by the system until the write operation is complete. .Sh I/O HANDLER BLOCKS Dispatch I/O handler blocks submitted to a channel via the .Fn dispatch_io_read @@ -113,9 +119,9 @@ or functions will be executed one or more times depending on system load and the channel's configuration settings (see .Xr dispatch_io_create 3 -for details). The handler block need not be reentrant safe, -no new I/O handler instance is submitted until the previously enqueued handler -block has returned. +for details). +The handler block need not be reentrant safe, no new I/O handler instance is +submitted until the previously enqueued handler block has returned. .Pp The dispatch .Va data @@ -129,12 +135,14 @@ for details). Once an I/O handler block is invoked with the .Va done flag set, the associated I/O operation is complete and that handler block will -not be run again. If an unrecoverable error occurs while performing the I/O -operation, the handler block will be submitted with the +not be run again. +If an unrecoverable error occurs while performing the I/O operation, the handler +block will be submitted with the .Va done flag set and the appropriate POSIX error code in the .Va error -parameter. An invocation of a handler block with the +parameter. +An invocation of a handler block with the .Va done flag set, zero .Va error diff --git a/man/dispatch_object.3 b/man/dispatch_object.3 index cddcf32aa..03c29b030 100644 --- a/man/dispatch_object.3 +++ b/man/dispatch_object.3 @@ -53,13 +53,13 @@ and respectively. .Pp The dispatch framework does not guarantee that any given client has the last or -only reference to a given object. Objects may be retained internally by the -system. +only reference to a given object. +Objects may be retained internally by the system. .Ss INTEGRATION WITH OBJECTIVE-C .Bd -filled -offset indent When building with an Objective-C or Objective-C++ compiler, dispatch objects -are declared as Objective-C types. This results in the following differences -compared to building as plain C/C++: +are declared as Objective-C types. +This results in the following differences compared to building as plain C/C++: .Bl -dash .It if Objective-C Automated Reference Counting is enabled, dispatch objects are @@ -72,13 +72,15 @@ functions will produce build errors. .Em Note : when ARC is enabled, care needs to be taken with dispatch API returning an interior pointer that is only valid as long as an associated object has not -been released. If that object is held in a variable with automatic storage, it -may need to be annotated with the +been released. +If that object is held in a variable with automatic storage, it may need to be +annotated with the .Li objc_precise_lifetime attribute, or stored in a .Li __strong instance variable instead, to ensure that the object is not prematurely -released. The functions returning interior pointers are +released. +The functions returning interior pointers are .Xr dispatch_data_create_map 3 and .Xr dispatch_data_apply 3 . @@ -116,10 +118,9 @@ preprocessor macro to When building with a plain C/C++ compiler or when integration with Objective-C is disabled, dispatch objects are .Em not -automatically retained and released when captured by a block. Therefore, when a -dispatch object is captured by a block that will be executed asynchronously, -the object must be manually retained and released: -.Pp +automatically retained and released when captured by a block. +Therefore, when a dispatch object is captured by a block that will be executed +asynchronously, the object must be manually retained and released: .Bd -literal -offset indent dispatch_retain(object); dispatch_async(queue, ^{ @@ -129,13 +130,15 @@ dispatch_async(queue, ^{ .Ed .Sh ACTIVATION Dispatch objects such as queues and sources may be created in an inactive -state. Objects in this state must be activated before any blocks -associated with them will be invoked. Calling +state. +Objects in this state must be activated before any blocks associated with them +will be invoked. +Calling .Fn dispatch_activate on an active object has no effect. .Pp -Changing attributes such as the target queue or a source handler is no longer permitted -once the object has been activated (see +Changing attributes such as the target queue or a source handler is no longer +permitted once the object has been activated (see .Xr dispatch_set_target_queue 3 , .Xr dispatch_source_set_event_handler 3 ). .Sh SUSPENSION @@ -144,7 +147,8 @@ or resumed with the functions .Fn dispatch_suspend and .Fn dispatch_resume -respectively. Other dispatch objects do not support suspension. +respectively. +Other dispatch objects do not support suspension. .Pp The dispatch framework always checks the suspension status before executing a block, but such changes never affect a block during execution (non-preemptive). @@ -155,18 +159,20 @@ a dispatch source is undefined. .Pp .Em Important : suspension applies to all aspects of the dispatch object life cycle, including -the finalizer function and cancellation handler. Suspending an object causes it -to be retained and resuming an object causes it to be released. Therefore it is -important to balance calls to +the finalizer function and cancellation handler. +Suspending an object causes it to be retained and resuming an object causes it +to be released. +Therefore it is important to balance calls to .Fn dispatch_suspend and .Fn dispatch_resume such that the dispatch object is fully resumed when the last reference is -released. The result of releasing all references to a dispatch object while in +released. +The result of releasing all references to a dispatch object while in an inactive or suspended state is undefined. .Sh CONTEXT POINTERS -Dispatch objects support supplemental context pointers. The value of the -context pointer may be retrieved and updated with +Dispatch objects support supplemental context pointers. +The value of the context pointer may be retrieved and updated with .Fn dispatch_get_context and .Fn dispatch_set_context @@ -176,8 +182,8 @@ The specifies an optional per-object finalizer function that is invoked asynchronously if the context pointer is not NULL when the last reference to the object is released. -This gives the -application an opportunity to free the context data associated with the object. +This gives the application an opportunity to free the context data associated +with the object. The finalizer will be run on the object's target queue. .Sh SEE ALSO .Xr dispatch 3 , diff --git a/man/dispatch_once.3 b/man/dispatch_once.3 index 2118a23bb..0875bc54c 100644 --- a/man/dispatch_once.3 +++ b/man/dispatch_once.3 @@ -36,7 +36,6 @@ FILE *getlogfile(void) return logfile; } .Ed -.Pp .Sh FUNDAMENTALS The .Fn dispatch_once diff --git a/man/dispatch_queue_create.3 b/man/dispatch_queue_create.3 index 833e564a0..3eeb4d366 100644 --- a/man/dispatch_queue_create.3 +++ b/man/dispatch_queue_create.3 @@ -49,11 +49,13 @@ All blocks submitted to dispatch queues are dequeued in FIFO order. Queues created with the .Dv DISPATCH_QUEUE_SERIAL attribute wait for the previously dequeued block to complete before dequeuing -the next block. A queue with this FIFO completion behavior is usually simply -described as a "serial queue." All memory writes performed by a block dispatched -to a serial queue are guaranteed to be visible to subsequent blocks dispatched -to the same queue. Queues are not bound to any specific thread of execution and -blocks submitted to independent queues may execute concurrently. +the next block. +A queue with this FIFO completion behavior is usually simply described as a +"serial queue". +All memory writes performed by a block dispatched to a serial queue are +guaranteed to be visible to subsequent blocks dispatched to the same queue. +Queues are not bound to any specific thread of execution and blocks submitted to +independent queues may execute concurrently. .Pp Queues created with the .Dv DISPATCH_QUEUE_CONCURRENT @@ -62,15 +64,17 @@ submitted with the dispatch barrier API. .Sh CREATION Queues are created with the .Fn dispatch_queue_create -function. Queues, like all dispatch objects, are reference counted and newly -created queues have a reference count of one. +function. +Queues, like all dispatch objects, are reference counted and newly created +queues have a reference count of one. .Pp The optional .Fa label argument is used to describe the purpose of the queue and is useful during -debugging and performance analysis. If a label is provided, it is copied. -By convention, clients should pass a reverse DNS style label. For example: -.Pp +debugging and performance analysis. +If a label is provided, it is copied. +By convention, clients should pass a reverse DNS style label. +For example: .Bd -literal -offset indent my_queue = dispatch_queue_create("com.example.subsystem.taskXYZ", DISPATCH_QUEUE_SERIAL); @@ -98,12 +102,14 @@ Queues may be temporarily suspended and resumed with the functions .Fn dispatch_suspend and .Fn dispatch_resume -respectively. Suspension is checked prior to block execution and is +respectively. +Suspension is checked prior to block execution and is .Em not preemptive. .Sh MAIN QUEUE The dispatch framework provides a default serial queue for the application to -use. This queue is accessed via the +use. +This queue is accessed via the .Fn dispatch_get_main_queue function. .Pp @@ -111,17 +117,20 @@ Programs must call .Fn dispatch_main at the end of .Fn main -in order to process blocks submitted to the main queue. (See the +in order to process blocks submitted to the main queue. +(See the .Sx COMPATIBILITY -section for exceptions.) The +section for exceptions.) +The .Fn dispatch_main function never returns. .Sh GLOBAL CONCURRENT QUEUES Unlike the main queue or queues allocated with .Fn dispatch_queue_create , the global concurrent queues schedule blocks as soon as threads become -available (non-FIFO completion order). Four global concurrent queues are -provided, representing the following priority bands: +available (non-FIFO completion order). +Four global concurrent queues are provided, representing the following priority +bands: .Bl -bullet -compact -offset indent .It DISPATCH_QUEUE_PRIORITY_HIGH @@ -136,33 +145,34 @@ DISPATCH_QUEUE_PRIORITY_BACKGROUND The priority of a global concurrent queue controls the scheduling priority of the threads created by the system to invoke the blocks submitted to that queue. Global queues with lower priority will be scheduled for execution after all -global queues with higher priority have been scheduled. Additionally, items on -the background priority global queue will execute on threads with background -state as described in +global queues with higher priority have been scheduled. +Additionally, items on the background priority global queue will execute on +threads with background state as described in .Xr setpriority 2 (i.e.\& disk I/O is throttled and the thread's scheduling priority is set to lowest value). .Pp Use the .Fn dispatch_get_global_queue -function to obtain the global queue of given priority. The +function to obtain the global queue of given priority. +The .Fa flags -argument is reserved for future use and must be zero. Passing any value other -than zero may result in a NULL return value. +argument is reserved for future use and must be zero. +Passing any value other than zero may result in a NULL return value. .Sh TARGET QUEUE The .Fn dispatch_set_target_queue -function updates the target queue of the given dispatch object. The target -queue of an object is responsible for processing the object. +function updates the target queue of the given dispatch object. +The target queue of an object is responsible for processing the object. .Pp The new target queue is retained by the given object before the previous target -queue is released. The new target queue setting will take effect between block -executions on the object, but not in the middle of any existing block executions -(non-preemptive). +queue is released. +The new target queue setting will take effect between block executions on the +object, but not in the middle of any existing block executions (non-preemptive). .Pp The default target queue of all dispatch objects created by the application is -the default priority global concurrent queue. To reset an object's target queue -to the default, pass the +the default priority global concurrent queue. +To reset an object's target queue to the default, pass the .Dv DISPATCH_TARGET_QUEUE_DEFAULT constant to .Fn dispatch_set_target_queue . @@ -179,12 +189,14 @@ will not be invoked concurrently with blocks submitted to the target queue or to any other queue with that same target queue. .Pp The target queue of a dispatch source specifies where its event handler and -cancellation handler blocks will be submitted. See +cancellation handler blocks will be submitted. +See .Xr dispatch_source_create 3 for more information about dispatch sources. .Pp The target queue of a dispatch I/O channel specifies the priority of the global -queue where its I/O operations are executed. See +queue where its I/O operations are executed. +See .Xr dispatch_io_create 3 for more information about dispatch I/O channels. .Pp @@ -207,24 +219,27 @@ The following functions are deprecated and will be removed in a future release: .El .Pp .Fn dispatch_get_current_queue -always returns a valid queue. When called from within a block -submitted to a dispatch queue, that queue will be returned. If this function is -called from the main thread before +always returns a valid queue. +When called from within a block submitted to a dispatch queue, that queue will +be returned. +If this function is called from the main thread before .Fn dispatch_main is called, then the result of .Fn dispatch_get_main_queue -is returned. In all other cases, the default target queue will be returned. +is returned. +In all other cases, the default target queue will be returned. .Pp The use of .Fn dispatch_get_current_queue -is strongly discouraged except for debugging and logging purposes. Code must not -make any assumptions about the queue returned, unless it is one of the global -queues or a queue the code has itself created. The returned queue may have -arbitrary policies that may surprise code that tries to schedule work with the -queue. The list of policies includes, but is not limited to, queue width (i.e. -serial vs. concurrent), scheduling priority, security credential or filesystem -configuration. This function is deprecated and will be removed in a future -release. +is strongly discouraged except for debugging and logging purposes. +Code must not make any assumptions about the queue returned, unless it is one of +the global queues or a queue the code has itself created. +The returned queue may have arbitrary policies that may surprise code that tries +to schedule work with the queue. +The list of policies includes, but is not limited to, queue width (i.e. serial +vs. concurrent), scheduling priority, security credential or filesystem +configuration. +This function is deprecated and will be removed in a future release. .Pp It is equally unsafe for code to assume that synchronous execution onto a queue is safe from deadlock if that queue is not the one returned by @@ -234,17 +249,21 @@ The result of .Fn dispatch_get_main_queue may or may not equal the result of .Fn dispatch_get_current_queue -when called on the main thread. Comparing the two is not a valid way to test -whether code is executing on the main thread. Foundation/AppKit programs should -use [NSThread isMainThread]. POSIX programs may use +when called on the main thread. +Comparing the two is not a valid way to test whether code is executing on the +main thread. +Foundation/AppKit programs should use [NSThread isMainThread]. +POSIX programs may use .Xr pthread_main_np 3 . .Pp .Fn dispatch_get_current_queue may return a queue owned by a different subsystem which has already had all -external references to it released. While such a queue will continue to exist +external references to it released. +While such a queue will continue to exist until all blocks submitted to it have completed, attempting to retain it is -forbidden and will trigger an assertion. If Objective-C Automatic Reference -Counting is enabled, any use of the object returned by +forbidden and will trigger an assertion. +If Objective-C Automatic Reference Counting is enabled, any use of the object +returned by .Fn dispatch_get_current_queue will cause retain calls to be automatically generated, so the use of .Fn dispatch_get_current_queue @@ -258,17 +277,20 @@ However, blocks submitted to the main queue in applications using .Fn dispatch_main are not guaranteed to execute on the main thread. .Pp -The dispatch framework is a pure C level API. As a result, it does not catch -exceptions generated by higher level languages such as Objective-C or C++. +The dispatch framework is a pure C level API. +As a result, it does not catch exceptions generated by higher level languages +such as Objective-C or C++. Applications .Em MUST catch all exceptions before returning from a block submitted to a dispatch queue; otherwise the process will be terminated with an uncaught exception. .Pp The dispatch framework manages the relationship between dispatch queues and -threads of execution. As a result, applications +threads of execution. +As a result, applications .Em MUST NOT -delete or mutate objects that they did not create. The following interfaces +delete or mutate objects that they did not create. +The following interfaces .Em MUST NOT be called by blocks submitted to a dispatch queue: .Bl -bullet -offset indent @@ -323,17 +345,19 @@ invocations of blocks submitted to a dispatch queue: While the result of .Fn pthread_self may change between invocations of blocks, the value will not change during the -execution of any single block. Because the underlying thread may change beteween -block invocations on a single queue, using per-thread data as an out-of-band -return value is error prone. In other words, the result of calling +execution of any single block. +Because the underlying thread may change beteween block invocations on a single +queue, using per-thread data as an out-of-band return value is error prone. +In other words, the result of calling .Fn pthread_setspecific and .Fn pthread_getspecific -is well defined within a signle block, but not across multiple blocks. Also, -one cannot make any assumptions about when the destructor passed to +is well defined within a signle block, but not across multiple blocks. +Also, one cannot make any assumptions about when the destructor passed to .Fn pthread_key_create -is called. The destructor may be called between the invocation of blocks on -the same queue, or during the idle state of a process. +is called. +The destructor may be called between the invocation of blocks on the same queue, +or during the idle state of a process. .Pp The following example code correctly handles per-thread return values: .Bd -literal -offset indent @@ -350,20 +374,19 @@ printf("kill(1,0) returned %d and errno %d\n", r, e); Note that in the above example .Va errno is a per-thread variable and must be copied out explicitly as the block may be -invoked on different thread of execution than the caller. Another example of -per-thread data that would need to be copied is the use of +invoked on different thread of execution than the caller. +Another example of per-thread data that would need to be copied is the use of .Fn getpwnam instead of .Fn getpwnam_r . .Pp As an optimization, .Fn dispatch_sync -invokes the block on the current thread when possible. In this case, the thread -specific data such as +invokes the block on the current thread when possible. +In this case, the thread specific data such as .Va errno -may persist from the block until back to the caller. Great care should be taken -not to accidentally rely on this side-effect. -.Pp +may persist from the block until back to the caller. +Great care should be taken not to accidentally rely on this side-effect. .Sh SEE ALSO .Xr dispatch 3 , .Xr dispatch_async 3 , diff --git a/man/dispatch_read.3 b/man/dispatch_read.3 index 38e88dea8..42e915f54 100644 --- a/man/dispatch_read.3 +++ b/man/dispatch_read.3 @@ -27,19 +27,19 @@ The .Fn dispatch_read and .Fn dispatch_write -functions asynchronously read from and write to POSIX file descriptors. They -can be thought of as asynchronous, callback-based versions of the +functions asynchronously read from and write to POSIX file descriptors. +They can be thought of as asynchronous, callback-based versions of the .Fn fread and .Fn fwrite -functions provided by the standard C library. They are convenience functions -based on the +functions provided by the standard C library. +They are convenience functions based on the .Xr dispatch_io_read 3 and .Xr dispatch_io_write 3 -functions, intended for simple one-shot read or write requests. Multiple -request on the same file desciptor are better handled with the full underlying -dispatch I/O channel functions. +functions, intended for simple one-shot read or write requests. +Multiple request on the same file desciptor are better handled with the full +underlying dispatch I/O channel functions. .Sh BEHAVIOR The .Fn dispatch_read @@ -48,20 +48,21 @@ function schedules an asynchronous read operation on the file descriptor Once the file descriptor is readable, the system will read as much data as is currently available, up to the specified .Va length , -starting at the current file pointer position. The given +starting at the current file pointer position. +The given .Va handler block will be submitted to .Va queue -when the operation completes or an error occurs. The block will be passed a -dispatch +when the operation completes or an error occurs. +The block will be passed a dispatch .Va data -object with the result of the read operation. If an error occurred while -reading from the file descriptor, the +object with the result of the read operation. +If an error occurred while reading from the file descriptor, the .Va error parameter to the block will be set to the appropriate POSIX error code and .Va data -will contain any data that could be read successfully. If the file pointer -position is at end-of-file, emtpy +will contain any data that could be read successfully. +If the file pointer position is at end-of-file, emtpy .Va data and zero .Va error @@ -75,23 +76,31 @@ The system will attempt to write the entire contents of the provided .Va data object to .Va fd -at the current file pointer position. The given +at the current file pointer position. +The given .Va handler block will be submitted to .Va queue -when the operation completes or an error occurs. If the write operation -completed successfully, the +when the operation completes or an error occurs. +If the write operation completed successfully, the .Va error parameter to the block will be set to zero, otherwise it will be set to the appropriate POSIX error code and the .Va data parameter will contain any data that could not be written. +.Sh SEE ALSO +.Xr dispatch 3 , +.Xr dispatch_data_create 3 , +.Xr dispatch_io_create 3 , +.Xr dispatch_io_read 3 , +.Xr fread 3 .Sh CAVEATS The .Va data object passed to a .Va handler -block is released by the system when the block returns. If +block is released by the system when the block returns. +If .Va data is needed outside of the handler block, it must concatenate, copy, or retain it. @@ -101,7 +110,8 @@ descriptor .Va fd , the system takes control of that file descriptor until the .Va handler -block is executed. During this time the application must not manipulate +block is executed. +During this time the application must not manipulate .Va fd directly, in particular it is only safe to close .Va fd @@ -110,14 +120,9 @@ from the handler block (or after it has returned). If multiple asynchronous read or write operations are submitted to the same file descriptor, they will be performed in order, but their handlers will only be submitted once all operations have completed and control over the file -descriptor has been relinquished. For details on this and on the interaction -with dispatch I/O channels created from the same file descriptor, see +descriptor has been relinquished. +For details on this and on the interaction with dispatch I/O channels created +from the same file descriptor, see .Sx FILEDESCRIPTOR OWNERSHIP in .Xr dispatch_io_create 3 . -.Sh SEE ALSO -.Xr dispatch 3 , -.Xr dispatch_data_create 3 , -.Xr dispatch_io_create 3 , -.Xr dispatch_io_read 3 , -.Xr fread 3 diff --git a/man/dispatch_semaphore_create.3 b/man/dispatch_semaphore_create.3 index c0aa45171..7f0a5430a 100644 --- a/man/dispatch_semaphore_create.3 +++ b/man/dispatch_semaphore_create.3 @@ -26,8 +26,9 @@ Dispatch semaphores are used to synchronize threads. .Pp The .Fn dispatch_semaphore_wait -function decrements the semaphore. If the resulting value is less than zero, -it waits for a signal from a thread that increments the semaphore by calling +function decrements the semaphore. +If the resulting value is less than zero, it waits for a signal from a thread +that increments the semaphore by calling .Fn dispatch_semaphore_signal before returning. The @@ -36,13 +37,15 @@ parameter is creatable with the .Xr dispatch_time 3 or .Xr dispatch_walltime 3 -functions. If the timeout is reached without a signal being received, the semaphore -is re-incremented before the function returns. +functions. +If the timeout is reached without a signal being received, the semaphore is +re-incremented before the function returns. .Pp The .Fn dispatch_semaphore_signal -function increments the counting semaphore. If the previous value was less than zero, -it wakes one of the threads that are waiting in +function increments the counting semaphore. +If the previous value was less than zero, it wakes one of the threads that are +waiting in .Fn dispatch_semaphore_wait before returning. .Sh COMPLETION SYNCHRONIZATION @@ -98,8 +101,8 @@ Otherwise, zero is returned. .Pp The .Fn dispatch_semaphore_wait -function returns zero upon success and non-zero after the timeout expires. If -the timeout is DISPATCH_TIME_FOREVER, then +function returns zero upon success and non-zero after the timeout expires. +If the timeout is DISPATCH_TIME_FOREVER, then .Fn dispatch_semaphore_wait waits forever and always returns zero. .Sh MEMORY MODEL @@ -107,15 +110,18 @@ Dispatch semaphores are retained and released via calls to .Fn dispatch_retain and .Fn dispatch_release . +.Sh SEE ALSO +.Xr dispatch 3 , +.Xr dispatch_object 3 .Sh CAVEATS Unbalanced dispatch semaphores cannot be released. -For a given semaphore, calls to +For a given semaphore, the count at the time +.Fn dispatch_release +is called must be equal to or larger than the +count the semaphore was created with. +In other words, at the time of releasing the semaphore, there must have been at +least as many .Fn dispatch_semaphore_signal -and +calls as there were successful .Fn dispatch_semaphore_wait -must be balanced before -.Fn dispatch_release -is called on it. -.Sh SEE ALSO -.Xr dispatch 3 , -.Xr dispatch_object 3 +calls that did not timeout. diff --git a/man/dispatch_source_create.3 b/man/dispatch_source_create.3 index 313b6e723..b54d3da8a 100644 --- a/man/dispatch_source_create.3 +++ b/man/dispatch_source_create.3 @@ -91,18 +91,20 @@ with calls to .Fn dispatch_retain and .Fn dispatch_release -respectively. The +respectively. +The .Fa queue parameter specifies the target queue of the new source object, it will -be retained by the source object. Pass the +be retained by the source object. +Pass the .Dv DISPATCH_TARGET_QUEUE_DEFAULT constant to use the default target queue (the default priority global concurrent queue). .Pp -Newly created sources are created in a suspended state. After the source has -been configured by setting an event handler, cancellation handler, registration -handler, context, -etc., the source must be activated by a call to +Newly created sources are created in a suspended state. +After the source has been configured by setting an event handler, cancellation +handler, registration handler, context, etc., the source must be activated by a +call to .Fn dispatch_resume before any events will be delivered. .Pp @@ -151,8 +153,8 @@ The .Fn dispatch_source_get_handle function returns the underlying handle to the dispatch source (i.e. file descriptor, -mach port, process identifer, etc.). The result of this function may be cast -directly to the underlying type. +mach port, process identifer, etc.). +The result of this function may be cast directly to the underlying type. .Pp The .Fn dispatch_source_get_mask @@ -174,8 +176,10 @@ function is intended for use with the .Vt DISPATCH_SOURCE_TYPE_DATA_OR and .Vt DISPATCH_SOURCE_TYPE_DATA_REPLACE -source types. The result of using this function with any other source type is -undefined. Data merging is performed according to the source type: +source types. +The result of using this function with any other source type is +undefined. +Data merging is performed according to the source type: .Bl -tag -width "XXDISPATCH_SOURCE_TYPE_DATA_REPLACE" -compact -offset indent .It \(bu DISPATCH_SOURCE_TYPE_DATA_ADD .Vt data @@ -189,7 +193,8 @@ atomically replaces the source's data. .El .Pp If the source data value resulting from the merge operation is 0, the source -handler will not be invoked. This can happen if: +handler will not be invoked. +This can happen if: .Bl -bullet -compact -offset indent .It the atomic addition wraps for sources of type @@ -198,14 +203,14 @@ the atomic addition wraps for sources of type 0 is merged for sources of type .Vt DISPATCH_SOURCE_TYPE_DATA_REPLACE . .El -.Pp .Sh SOURCE EVENT HANDLERS In order to receive events from the dispatch source, an event handler should be specified via .Fn dispatch_source_set_event_handler . The event handler block is submitted to the source's target queue when the state -of the underlying system handle changes, or when an event occurs. If a source -is resumed with no event handler block set, events will be quietly ignored. +of the underlying system handle changes, or when an event occurs. +If a source is resumed with no event handler block set, events will be quietly +ignored. If the event handler block is changed while the source is suspended, or from a block running on a serial queue that is the source's target queue, then the next event handler invocation will use the new block. @@ -215,8 +220,9 @@ queues using .Fn dispatch_suspend and .Fn dispatch_resume -on the dispatch source directly. The data describing events which occur while a -source is suspended are coalesced and delivered once the source is resumed. +on the dispatch source directly. +The data describing events which occur while a source is suspended are coalesced +and delivered once the source is resumed. .Pp The .Fa handler @@ -235,11 +241,11 @@ To unset the event handler, call and pass NULL as .Fa function . This unsets the event handler regardless of whether the handler -was a function pointer or a block. Registration and cancellation handlers -(see below) may be unset in the same way, but as noted below, a cancellation -handler may be required. +was a function pointer or a block. +Registration and cancellation handlers (see below) may be unset in the same way, +but as noted below, a cancellation handler may be required. .Sh REGISTRATION -When +When .Fn dispatch_resume is called on a suspended or newly created source, there may be a brief delay before the source is ready to receive events from the underlying system handle. @@ -248,29 +254,33 @@ missed. .Pp Once the dispatch source is registered with the underlying system and is ready to process all events its optional registration handler will be submitted to -its target queue. This registration handler may be specified via +its target queue. +This registration handler may be specified via .Fn dispatch_source_set_registration_handler . .Pp The event handler will not be called until the registration handler finishes. If the source is canceled (see below) before it is registered, its registration handler will not be called. -.Pp .Sh CANCELLATION The .Fn dispatch_source_cancel function asynchronously cancels the dispatch source, preventing any further -invocation of its event handler block. Cancellation does not interrupt a -currently executing handler block (non-preemptive). If a source is canceled -before the first time it is resumed, its event handler will never be called. +invocation of its event handler block. +Cancellation does not interrupt a currently executing handler block +(non-preemptive). +If a source is canceled before the first time it is resumed, its event handler +will never be called. (In this case, note that the source must be resumed before it can be released.) .Pp The .Fn dispatch_source_testcancel function may be used to determine whether the specified source has been -canceled. A non-zero value will be returned if the source is canceled. +canceled. +A non-zero value will be returned if the source is canceled. .Pp When a dispatch source is canceled its optional cancellation handler will be -submitted to its target queue. The cancellation handler may be specified via +submitted to its target queue. +The cancellation handler may be specified via .Fn dispatch_source_set_cancel_handler . This cancellation handler is invoked only once, and only as a direct consequence of calling @@ -278,12 +288,11 @@ of calling .Pp .Em Important: a cancellation handler is required for file descriptor and mach port based -sources in order to safely close the descriptor or destroy the port. Closing the -descriptor or port before the cancellation handler has run may result in a race -condition: if a new descriptor is allocated with the same value as the recently -closed descriptor while the source's event handler is still running, the event -handler may read/write data to the wrong descriptor. -.Pp +sources in order to safely close the descriptor or destroy the port. +Closing the descriptor or port before the cancellation handler has run may +result in a race condition: if a new descriptor is allocated with the same value +as the recently closed descriptor while the source's event handler is still +running, the event handler may read/write data to the wrong descriptor. .Sh DISPATCH SOURCE TYPES The following section contains a summary of supported dispatch event types and the interpretation of their parameters and returned data. @@ -297,9 +306,11 @@ handler via a call to .Fn dispatch_source_merge_data . The data will be merged with the source's pending data via an atomic add or atomic bitwise OR, or direct replacement (based on the source's type), and the -event handler block will be submitted to the source's target queue. The +event handler block will be submitted to the source's target queue. +The .Fa data -is application defined. These sources have no +is application defined. +These sources have no .Fa handle or .Fa mask @@ -322,7 +333,8 @@ The data returned by .Fn dispatch_source_get_data is a bitmask that indicates which of the events in the .Fa mask -were observed. Note that because this source type will request notifications on +were observed. +Note that because this source type will request notifications on the provided port, it should not be mixed with the use of .Fn mach_port_request_notification on the same port. @@ -341,9 +353,11 @@ on the mach port is waiting to be received. .Vt DISPATCH_SOURCE_TYPE_MEMORYPRESSURE .Pp Sources of this type monitor the system memory pressure condition for state -changes. The +changes. +The .Fa handle -is unused and should be zero. The +is unused and should be zero. +The .Fa mask may be one or more of the following: .Bl -tag -width "XXDISPATCH_MEMORYPRESSURE_CRITICAL" -compact -offset indent @@ -412,14 +426,15 @@ is unused and should be zero. .Pp The data returned by .Fn dispatch_source_get_data -is an estimated number of bytes available to be read from the descriptor. This -estimate should be treated as a suggested +is an estimated number of bytes available to be read from the descriptor. +This estimate should be treated as a suggested .Em minimum -read buffer size. There are no guarantees that a complete read of this size -will be performed. +read buffer size. +There are no guarantees that a complete read of this size will be performed. .Pp Users of this source type are strongly encouraged to perform non-blocking I/O -and handle any truncated reads or error conditions that may occur. See +and handle any truncated reads or error conditions that may occur. +See .Xr fcntl 2 for additional information about setting the .Vt O_NONBLOCK @@ -427,7 +442,8 @@ flag on a file descriptor. .Pp .Vt DISPATCH_SOURCE_TYPE_SIGNAL .Pp -Sources of this type monitor signals delivered to the current process. The +Sources of this type monitor signals delivered to the current process. +The .Fa handle is the signal number to monitor (int) and the .Fa mask @@ -445,11 +461,13 @@ of execution; therefore the handler block is not limited to the use of signal safe interfaces defined in .Xr sigaction 2 . Furthermore, multiple observers of a given signal are supported; thus allowing -applications and libraries to cooperate safely. However, a dispatch source +applications and libraries to cooperate safely. +However, a dispatch source .Em does not install a signal handler or otherwise alter the behavior of signal delivery. Therefore, applications must ignore or at least catch any signal that terminates -a process by default. For example, near the top of +a process by default. +For example, near the top of .Fn main : .Bd -literal -offset ident signal(SIGTERM, SIG_IGN); @@ -458,7 +476,8 @@ signal(SIGTERM, SIG_IGN); .Vt DISPATCH_SOURCE_TYPE_TIMER .Pp Sources of this type periodically submit the event handler block to the target -queue. The +queue. +The .Fa handle argument is unused and should be zero. .Pp @@ -469,7 +488,8 @@ event handler block. .Pp The timer parameters are configured with the .Fn dispatch_source_set_timer -function. Once this function returns, any pending source data accumulated for +function. +Once this function returns, any pending source data accumulated for the previous timer parameters has been cleared; the next fire of the timer will occur at .Fa start , @@ -478,8 +498,8 @@ and every nanoseconds thereafter until the timer source is canceled. .Pp Any fire of the timer may be delayed by the system in order to improve power -consumption and system performance. The upper limit to the allowable delay may -be configured with the +consumption and system performance. +The upper limit to the allowable delay may be configured with the .Fa leeway argument, the lower limit is under the control of the system. .Pp @@ -487,7 +507,8 @@ For the initial timer fire at .Fa start , the upper limit to the allowable delay is set to .Fa leeway -nanoseconds. For the subsequent timer fires at +nanoseconds. +For the subsequent timer fires at .Fa start .Li "+ N *" .Fa interval , @@ -498,14 +519,16 @@ the upper limit is .Li "/ 2 )" . .Pp The lower limit to the allowable delay may vary with process state such as -visibility of application UI. If the specified timer source was created with a +visibility of application UI. +If the specified timer source was created with a .Fa mask of .Vt DISPATCH_TIMER_STRICT , the system will make a best effort to strictly observe the provided .Fa leeway -value even if it is smaller than the current lower limit. Note that a minimal -amount of delay is to be expected even if this flag is specified. +value even if it is smaller than the current lower limit. +Note that a minimal amount of delay is to be expected even if this flag is +specified. .Pp The .Fa start @@ -575,12 +598,12 @@ is the file descriptor (int) to monitor and the is unused and should be zero. .Pp Users of this source type are strongly encouraged to perform non-blocking I/O -and handle any truncated reads or error conditions that may occur. See +and handle any truncated reads or error conditions that may occur. +See .Xr fcntl 2 for additional information about setting the .Vt O_NONBLOCK flag on a file descriptor. -.Pp .Sh SEE ALSO .Xr dispatch 3 , .Xr dispatch_object 3 , diff --git a/man/dispatch_time.3 b/man/dispatch_time.3 index 2536e0e9f..635f7d909 100644 --- a/man/dispatch_time.3 +++ b/man/dispatch_time.3 @@ -34,8 +34,9 @@ type is a semi-opaque integer, with only the special values .Vt DISPATCH_WALLTIME_NOW and .Vt DISPATCH_TIME_FOREVER -being externally defined. All other values are represented using an internal -format that is not safe for integer arithmetic or comparison. +being externally defined. +All other values are represented using an internal format that is not safe for +integer arithmetic or comparison. The internal format is subject to change. .Pp The @@ -52,8 +53,8 @@ Otherwise, if .Fa base is .Vt DISPATCH_TIME_NOW , -then the current time of the default host clock is used. On Apple platforms, -the value of the default host clock is obtained from +then the current time of the default host clock is used. +On Apple platforms, the value of the default host clock is obtained from .Vt mach_absolute_time() . .Pp The @@ -61,7 +62,8 @@ The function is useful for creating a milestone relative to a fixed point in time using the wall clock, as specified by the optional .Fa base -parameter. If +parameter. +If .Fa base is NULL, then the current time of the wall clock is used. .Vt dispatch_walltime(NULL, offset) @@ -78,7 +80,8 @@ parameter. .Pp Overflow causes .Vt DISPATCH_TIME_FOREVER -to be returned. When +to be returned. +When .Fa base is .Vt DISPATCH_TIME_FOREVER , diff --git a/os/CMakeLists.txt b/os/CMakeLists.txt index 2c4d32e66..282af25f7 100644 --- a/os/CMakeLists.txt +++ b/os/CMakeLists.txt @@ -5,6 +5,7 @@ install(FILES object.h generic_unix_base.h + generic_win_base.h DESTINATION "${INSTALL_OS_HEADERS_DIR}") diff --git a/os/clock.h b/os/clock.h new file mode 100644 index 000000000..665e1d871 --- /dev/null +++ b/os/clock.h @@ -0,0 +1,18 @@ +#ifndef __OS_CLOCK__ +#define __OS_CLOCK__ + +#include +#include + +/* + * @typedef os_clockid_t + * + * @abstract + * Describes the kind of clock that the workgroup timestamp parameters are + * specified in + */ +OS_ENUM(os_clockid, uint32_t, + OS_CLOCK_MACH_ABSOLUTE_TIME = 32, +); + +#endif /* __OS_CLOCK__ */ diff --git a/os/eventlink_private.h b/os/eventlink_private.h new file mode 100644 index 000000000..eb55a745b --- /dev/null +++ b/os/eventlink_private.h @@ -0,0 +1,296 @@ +#ifndef __OS_EVENTLINK__ +#define __OS_EVENTLINK__ + +#include +#include +#include + +__BEGIN_DECLS + +OS_OBJECT_ASSUME_NONNULL_BEGIN + +/*! + * @typedef os_eventlink_t + * + * @abstract + * A reference counted os_object representing a directed paired link of "wake" events + * between two designated threads, the link `source` and the link `target`. + * The target thread may optionally inherit properties of the source thread upon + * return from wait (such as membership in a workgroup). + * + * @discussion + * Threads explicitly associate themselves with an an eventlink, only one source + * and one target may exist per eventlink. + */ +#if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) +typedef struct os_eventlink_s *os_eventlink_t; +#else +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_OBJECT_DECL_CLASS(os_eventlink); +#endif + +/*! + * @function os_eventlink_create + * + * @abstract + * Creates an inactive refcounted os_object representing an os_eventlink_t. + * + * This function creates only 1 endpoint of an eventlink object. The other + * endpoint of the eventlink needs to be created from this eventlink object + * using one of the other creator functions - + * os_eventlink_create_remote_with_eventlink() or + * os_eventlink_create_with_port() + */ +OS_EXPORT OS_OBJECT_RETURNS_RETAINED +os_eventlink_t _Nullable +os_eventlink_create(const char *name); + +#if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) + +/* TODO: API for the future when we make a variant of eventlink that does + * copyin */ + +/*! + * @typedef os_eventlink_shared_data_t + * + * @abstract + * Pointer to an opaque structure identifying the data that is used to + * synchronize between the two endpoints of an eventlink. + * + * It is the client's responsibility to allocate this structure such that both + * threads on the two endpoints of the eventlink can synchronize with it ie. If + * the eventlink is between 2 threads in 2 processes, os_eventlink_shared_data_t + * should be allocated in shared memory between the two processes. + */ +typedef struct os_eventlink_shared_data_s { + uint64_t local_count; + uint64_t remote_count; +} os_eventlink_shared_data_s, *os_eventlink_shared_data_t; +#define OS_EVENTLINK_SHARED_DATA_INITIALIZER { 0 } + +/*! + * @function os_eventlink_set_shared_data + * + * @abstract + * Associates a shared data structure with the os_eventlink. + * + * As a performance enhancement, clients may choose to provide an opaque shared + * data structure in memory visible to both ends of the eventlink based on the + * usage pattern of the os eventlink. + * + * Passing in NULL for shared data is recommended if the eventlink is to be used + * for the typical RPC ping-pong case whereby one side of the eventlink is + * always blocked waiting on a signal from the other side. In this case, each + * signal causes a single wakeup. + * + * Passing in shared data is recommended when one side of the eventlink is not + * necessarily always waiting for the other's signal in order to work. Passing + * in the shared data allows for more efficient signalling - potentially without + * any system calls. + */ +int +os_eventlink_set_shared_data(os_eventlink_t eventlink, + os_eventlink_shared_data_t data); + +#endif + +/*! + * @function os_eventlink_activate + * + * @abstract + * Activates the os_eventlink object for use. No further configuration can be + * done on the eventlink object after it has been activated. This API is not + * real-time safe. + * + * If an error is encountered, errno is set and returned. + */ +OS_EXPORT OS_OBJECT_WARN_UNUSED_RESULT +int +os_eventlink_activate(os_eventlink_t eventlink); + +/*! + * @function os_eventlink_extract_remote_port + * + * @abstract + * Returns a reference to a send right representing the remote endpoint of the + * eventlink. This port is to be passed to os_eventlink_create_with_port() to + * create an eventlink object. + * + * Calling this function multiple times on an eventlink object will result in an + * error. + * + * @param eventlink + * An eventlink returns from a previous call to os_eventlink_create(). This + * evenlink must have been activated. + */ +OS_EXPORT OS_OBJECT_WARN_UNUSED_RESULT +int +os_eventlink_extract_remote_port(os_eventlink_t eventlink, mach_port_t *port_out); + +/*! + * @function os_eventlink_create_with_port + * + * @abstract + * Creates an inactive eventlink from a port returned from a previous call to + * os_eventlink_extract_remote_port. This function does not consume a reference + * on the specified send right. + */ +OS_EXPORT OS_OBJECT_RETURNS_RETAINED +os_eventlink_t _Nullable +os_eventlink_create_with_port(const char *name, mach_port_t mach_port); + +/*! + * @function os_eventlink_create_remote_with_eventlink + * + * @abstract + * Creates an inactive refcounted os_object representing an os_eventlink_t + * remote endpoint. Each eventlink has exactly one remote endpoint that can be + * created from it. Calling this function on an eventlink object returned from + * os_eventlink_create(), more than once will return in an error. + * + * @param eventlink + * An eventlink returned from a previous call to os_eventlink_create(). This + * eventlink must have been activated. + */ +OS_EXPORT OS_OBJECT_RETURNS_RETAINED +os_eventlink_t _Nullable +os_eventlink_create_remote_with_eventlink(const char *name, os_eventlink_t eventlink); + +/*! + * @function os_eventlink_associate + * + * @abstract + * Associate a thread with the eventlink endpoint provided. The eventlink + * provided should be activated before this call. This API is not real + * time safe. + * + * If a thread is already associated with the eventlink, errno is set and + * returned. + */ + +OS_ENUM(os_eventlink_associate_options, uint64_t, + OE_ASSOCIATE_CURRENT_THREAD = 0, + OE_ASSOCIATE_ON_WAIT = 0x1, +); + +OS_EXPORT OS_OBJECT_WARN_UNUSED_RESULT +int +os_eventlink_associate(os_eventlink_t eventlink, + os_eventlink_associate_options_t options); + +/*! + * @function os_eventlink_disassociate + * + * @abstract + * Disassociate the current thread with the eventlink endpoint provided. This + * API is not real time safe. + * + * If the current thread is not associated with the eventlink via a previous + * call to os_eventlink_associate, errno is set and returned. + */ +OS_EXPORT +int +os_eventlink_disassociate(os_eventlink_t eventlink); + +/*! + * @function os_eventlink_wait + * + * @abstract + * Wait on the eventlink endpoint for a signal from the other endpoint. If there + * are outstanding signals, this function will consume them and return + * immediately. + * + * Upon receiving a signal, the function returns the number of signals that have + * been consumed by the waiter in the out parameter if specified. + * + * If the eventlink has not been previously associated via a call to + * os_eventlink_associate or if there is a mismatch between the associated + * thread and the current thread, the process will abort. This API call is + * real-time safe. + */ +OS_EXPORT +int +os_eventlink_wait(os_eventlink_t eventlink, uint64_t * _Nullable signals_consumed_out); + +/*! + * @function os_eventlink_wait_until + * + * @abstract + * Wait on the eventlink endpoint for a signal or until the timeout specified is + * hit. If there are outstanding signals, this function will consume them and + * return immediately. + * + * Upon success, the function returns the number of signals that have been + * consumed by the waiter in the out parameter, if provided. If the timeout is + * hit, then 0 signals are said to have been consumed by the waiter. This API + * call is real time safe. + */ +OS_EXPORT +int +os_eventlink_wait_until(os_eventlink_t eventlink, os_clockid_t clock, + uint64_t timeout, uint64_t * _Nullable signals_consumed_out); + +/*! + * @function os_eventlink_signal + * + * @abstract + * Signal the other endpoint of an eventlink. This API call is real time safe. + * + * If an error is encountered, errno will be set and returned. + */ +OS_EXPORT +int +os_eventlink_signal(os_eventlink_t eventlink); + +/*! + * @function os_eventlink_signal_and_wait + * + * @abstract + * Signals on an eventlink endpoint and then proceeds to wait on it until the + * eventlink is signalled. Returns the number of signals consumed by the waiter + * through the out parameter if provided. This API call is real time safe. + */ +OS_EXPORT +int +os_eventlink_signal_and_wait(os_eventlink_t eventlink, uint64_t * _Nullable signals_consumed_out); + +/*! + * @function os_eventlink_signal_and_wait_until + * + * @abstract + * Signals on an eventlink endpoint and then proceeds to wait on it until the + * evenlink is signalled or the timeout is hit. Returns the number of signals + * consumed by the waiter through the out parameter if provided, with 0 + * indicating that a timeout has been hit. This API call is real time safe. + */ +OS_EXPORT +int +os_eventlink_signal_and_wait_until(os_eventlink_t eventlink, os_clockid_t clock, + uint64_t timeout, uint64_t * _Nullable signals_consumed_out); + +/* + * @function os_eventlink_cancel + * + * @abstract + * Invalidates an eventlink. The only follow up actions possible on the eventlink + * after it has been invalidated, are to disassociate from the eventlink and + * dispose of it. + * + * If the eventlink had a remote endpoint created, the remote side will get an + * ECANCELED when it tries to wait or signal on it. Existing waiters on the + * eventlink will get the same result as well. The only valid follow up + * actions possible on a remote endpoint are to disassociate from the eventlink + * and dispose of it. + * + * This API is idempotent. It is not required to call this API before dropping + * the last reference count of an eventlink. + */ +OS_EXPORT +void +os_eventlink_cancel(os_eventlink_t eventlink); + +OS_OBJECT_ASSUME_NONNULL_END + +__END_DECLS + +#endif /* __OS_EVENTLINK__ */ diff --git a/os/firehose_buffer_private.h b/os/firehose_buffer_private.h index a633bf408..2674a26dc 100644 --- a/os/firehose_buffer_private.h +++ b/os/firehose_buffer_private.h @@ -78,7 +78,7 @@ __firehose_buffer_tracepoint_flush(firehose_tracepoint_t vat, firehose_buffer_t __firehose_buffer_create(size_t *size); -void +bool __firehose_merge_updates(firehose_push_reply_t update); int diff --git a/os/object.h b/os/object.h index 2979de891..e2ce3f467 100644 --- a/os/object.h +++ b/os/object.h @@ -98,6 +98,15 @@ #endif // __swift__ #endif // OS_OBJECT_SWIFT3 +#if __has_feature(assume_nonnull) +#define OS_OBJECT_ASSUME_NONNULL_BEGIN _Pragma("clang assume_nonnull begin") +#define OS_OBJECT_ASSUME_NONNULL_END _Pragma("clang assume_nonnull end") +#else +#define OS_OBJECT_ASSUME_NONNULL_BEGIN +#define OS_OBJECT_ASSUME_NONNULL_END +#endif +#define OS_OBJECT_WARN_UNUSED_RESULT __attribute__((__warn_unused_result__)) + #if OS_OBJECT_USE_OBJC #import #if __has_attribute(objc_independent_class) @@ -116,9 +125,9 @@ #define OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(name, proto) \ OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL_IMPL( \ OS_OBJECT_CLASS(name), OS_OBJECT_CLASS(proto)) -#define OS_OBJECT_DECL_IMPL(name, ...) \ +#define OS_OBJECT_DECL_IMPL(name, adhere, ...) \ OS_OBJECT_DECL_PROTOCOL(name, __VA_ARGS__) \ - typedef NSObject \ + typedef adhere \ * OS_OBJC_INDEPENDENT_CLASS name##_t #define OS_OBJECT_DECL_BASE(name, ...) \ @interface OS_OBJECT_CLASS(name) : __VA_ARGS__ \ @@ -129,9 +138,9 @@ typedef OS_OBJECT_CLASS(name) \ * OS_OBJC_INDEPENDENT_CLASS name##_t #define OS_OBJECT_DECL(name, ...) \ - OS_OBJECT_DECL_IMPL(name, ) + OS_OBJECT_DECL_IMPL(name, NSObject, ) #define OS_OBJECT_DECL_SUBCLASS(name, super) \ - OS_OBJECT_DECL_IMPL(name, ) + OS_OBJECT_DECL_IMPL(name, NSObject, ) #if __has_attribute(ns_returns_retained) #define OS_OBJECT_RETURNS_RETAINED __attribute__((__ns_returns_retained__)) #else @@ -149,6 +158,8 @@ #define OS_OBJECT_BRIDGE #define OS_WARN_RESULT_NEEDS_RELEASE OS_WARN_RESULT #endif + + #if __has_attribute(objc_runtime_visible) && \ ((defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && \ __MAC_OS_X_VERSION_MIN_REQUIRED < __MAC_10_12) || \ @@ -163,7 +174,7 @@ /* * To provide backward deployment of ObjC objects in Swift on pre-10.12 * SDKs, OS_object classes can be marked as OS_OBJECT_OBJC_RUNTIME_VISIBLE. - * When compiling with a deployment target earlier than OS X 10.12 (iOS 10.0, + * When compiling with a deployment target earlier than OS X 10.12 (iOS 10.0, * tvOS 10.0, watchOS 3.0) the Swift compiler will only refer to this type at * runtime (using the ObjC runtime). */ @@ -187,9 +198,9 @@ #define OS_OBJECT_DECL_SUBCLASS_SWIFT(name, super) \ OS_EXPORT OS_OBJECT_OBJC_RUNTIME_VISIBLE \ OS_OBJECT_DECL_IMPL_CLASS(name, OS_OBJECT_CLASS(super)) +#endif // OS_OBJECT_SWIFT3 OS_EXPORT OS_OBJECT_OBJC_RUNTIME_VISIBLE OS_OBJECT_DECL_BASE(object, NSObject); -#endif // OS_OBJECT_SWIFT3 #else /*! @parseOnly */ #define OS_OBJECT_RETURNS_RETAINED @@ -215,6 +226,27 @@ OS_OBJECT_DECL_BASE(object, NSObject); typedef struct name##_s *name##_t #endif +#if OS_OBJECT_USE_OBJC +/* Declares a class of the specific name and exposes the interface and typedefs + * name##_t to the pointer to the class */ +#define OS_OBJECT_SHOW_CLASS(name, ...) \ + OS_EXPORT OS_OBJECT_OBJC_RUNTIME_VISIBLE \ + OS_OBJECT_DECL_IMPL_CLASS(name, ## __VA_ARGS__ ) +/* Declares a subclass of the same name, and + * subclass adheres to protocol specified. Typedefs baseclass * to subclass##_t */ +#define OS_OBJECT_SHOW_SUBCLASS(subclass_name, super, proto_name) \ + OS_EXPORT OS_OBJECT_OBJC_RUNTIME_VISIBLE \ + OS_OBJECT_DECL_BASE(subclass_name, OS_OBJECT_CLASS(super)); \ + typedef OS_OBJECT_CLASS(super) \ + * OS_OBJC_INDEPENDENT_CLASS subclass_name##_t +#else /* Plain C */ +#define OS_OBJECT_DECL_PROTOCOL(name, ...) +#define OS_OBJECT_SHOW_CLASS(name, ...) \ + typedef struct name##_s *name##_t +#define OS_OBJECT_SHOW_SUBCLASS(name, super, ...) \ + typedef super##_t name##_t +#endif + #define OS_OBJECT_GLOBAL_OBJECT(type, object) ((OS_OBJECT_BRIDGE type)&(object)) __BEGIN_DECLS diff --git a/os/object_private.h b/os/object_private.h index 003369ecc..0d58e8650 100644 --- a/os/object_private.h +++ b/os/object_private.h @@ -30,6 +30,12 @@ #include #include #include +#if __has_include() +#include +#endif +#ifndef __ptrauth_objc_isa_pointer +#define __ptrauth_objc_isa_pointer +#endif #if __GNUC__ #define OS_OBJECT_NOTHROW __attribute__((__nothrow__)) @@ -63,7 +69,7 @@ #define _OS_OBJECT_GLOBAL_REFCNT INT_MAX #define _OS_OBJECT_HEADER(isa, ref_cnt, xref_cnt) \ - isa; /* must be pointer-sized */ \ + isa; /* must be pointer-sized and use __ptrauth_objc_isa_pointer */ \ int volatile ref_cnt; \ int volatile xref_cnt @@ -100,7 +106,7 @@ #if OS_OBJECT_USE_OBJC #define OS_OBJECT_USES_XREF_DISPOSE() \ - (oneway void)release { \ - _os_object_release(self); \ + _os_object_release((OS_object *) self); \ } #endif @@ -129,9 +135,7 @@ typedef OS_OBJECT_CLASS(object) *_os_object_t; #define _OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(name, super) \ OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(name, super) #elif OS_OBJECT_USE_OBJC -API_AVAILABLE(macos(10.8), ios(6.0)) -OS_OBJECT_EXPORT -@interface OS_OBJECT_CLASS(object) : NSObject +@interface OS_OBJECT_CLASS(object) (OSObjectPrivate) // Note: objects who want _xref_dispose to be called need // to use OS_OBJECT_USES_XREF_DISPOSE() - (void)_xref_dispose; @@ -158,7 +162,7 @@ API_AVAILABLE(macos(10.8), ios(6.0)) OS_OBJECT_EXPORT OS_OBJECT_MALLOC OS_OBJECT_WARN_RESULT OS_OBJECT_NOTHROW OS_SWIFT_UNAVAILABLE("Unavailable in Swift") _os_object_t -_os_object_alloc(const void *cls, size_t size); +_os_object_alloc(const void * _Nullable cls, size_t size); API_AVAILABLE(macos(10.8), ios(6.0)) OS_OBJECT_EXPORT OS_OBJECT_MALLOC OS_OBJECT_WARN_RESULT OS_OBJECT_NOTHROW diff --git a/os/voucher_private.h b/os/voucher_private.h index ad4e31274..1211c7ac6 100644 --- a/os/voucher_private.h +++ b/os/voucher_private.h @@ -437,8 +437,8 @@ voucher_create_with_mach_msg(mach_msg_header_t *msg); * * @param max_hex_data * The maximum number of bytes of hex data to be formatted for voucher content - * that is not of type MACH_VOUCHER_ATTR_KEY_ATM, MACH_VOUCHER_ATTR_KEY_BANK - * or MACH_VOUCHER_ATTR_KEY_IMPORTANCE. + * that is not of type MACH_VOUCHER_ATTR_KEY_BANK or + * MACH_VOUCHER_ATTR_KEY_IMPORTANCE. * * @result * The offset of the first byte in the buffer following the formatted voucher diff --git a/os/workgroup.h b/os/workgroup.h new file mode 100644 index 000000000..96b870c10 --- /dev/null +++ b/os/workgroup.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __OS_WORKGROUP__ +#define __OS_WORKGROUP__ + +#ifndef __DISPATCH_BUILDING_DISPATCH__ +#ifndef __OS_WORKGROUP_INDIRECT__ +#define __OS_WORKGROUP_INDIRECT__ +#endif /* __OS_WORKGROUP_INDIRECT__ */ + +#include +#include +#include +#include + +#undef __OS_WORKGROUP_INDIRECT__ +#endif /* __DISPATCH_BUILDING_DISPATCH__ */ + +#endif /* __OS_WORKGROUP__ */ diff --git a/os/workgroup_base.h b/os/workgroup_base.h new file mode 100644 index 000000000..3983f002a --- /dev/null +++ b/os/workgroup_base.h @@ -0,0 +1,78 @@ +#ifndef __OS_WORKGROUP_BASE__ +#define __OS_WORKGROUP_BASE__ + +#ifndef __OS_WORKGROUP_INDIRECT__ +#error "Please #include instead of this file directly." +#endif + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +#if __has_feature(assume_nonnull) +#define OS_WORKGROUP_ASSUME_NONNULL_BEGIN _Pragma("clang assume_nonnull begin") +#define OS_WORKGROUP_ASSUME_NONNULL_END _Pragma("clang assume_nonnull end") +#else +#define OS_WORKGROUP_ASSUME_NONNULL_BEGIN +#define OS_WORKGROUP_ASSUME_NONNULL_END +#endif +#define OS_WORKGROUP_WARN_RESULT __attribute__((__warn_unused_result__)) +#define OS_WORKGROUP_EXPORT OS_EXPORT +#define OS_WORKGROUP_RETURNS_RETAINED OS_OBJECT_RETURNS_RETAINED + +#define OS_WORKGROUP_DECL(name, swift_name) \ + OS_SWIFT_NAME(swift_name) \ + OS_OBJECT_SHOW_CLASS(name, OS_OBJECT_CLASS(object)) + +#if OS_OBJECT_USE_OBJC +#define OS_WORKGROUP_SUBCLASS_DECL_PROTO(name, swift_name, ...) \ + OS_SWIFT_NAME(swift_name) \ + OS_OBJECT_DECL_PROTOCOL(name ## __VA_ARGS__ ) +#else +#define OS_WORKGROUP_SUBCLASS_DECL_PROTO(name, swift_name, ...) +#endif + +#define OS_WORKGROUP_SUBCLASS_DECL(name, super, swift_name, ...) \ + OS_SWIFT_NAME(swift_name) \ + OS_OBJECT_SHOW_SUBCLASS(name, super, name, ## __VA_ARGS__) + +#if defined(__LP64__) +#define __OS_WORKGROUP_ATTR_SIZE__ 60 +#define __OS_WORKGROUP_INTERVAL_DATA_SIZE__ 56 +#define __OS_WORKGROUP_JOIN_TOKEN_SIZE__ 36 +#else +#define __OS_WORKGROUP_ATTR_SIZE__ 60 +#define __OS_WORKGROUP_INTERVAL_DATA_SIZE__ 56 +#define __OS_WORKGROUP_JOIN_TOKEN_SIZE__ 28 +#endif + +#define _OS_WORKGROUP_ATTR_SIG_DEFAULT_INIT 0x2FA863B4 +#define _OS_WORKGROUP_ATTR_SIG_EMPTY_INIT 0x2FA863C4 + +struct OS_REFINED_FOR_SWIFT os_workgroup_attr_opaque_s { + uint32_t sig; + char opaque[__OS_WORKGROUP_ATTR_SIZE__]; +}; + +#define _OS_WORKGROUP_INTERVAL_DATA_SIG_INIT 0x52A74C4D +struct OS_REFINED_FOR_SWIFT os_workgroup_interval_data_opaque_s { + uint32_t sig; + char opaque[__OS_WORKGROUP_INTERVAL_DATA_SIZE__]; +}; + +struct OS_REFINED_FOR_SWIFT os_workgroup_join_token_opaque_s { + uint32_t sig; + char opaque[__OS_WORKGROUP_JOIN_TOKEN_SIZE__]; +}; + +#endif /* __OS_WORKGROUP_BASE__ */ diff --git a/os/workgroup_interval.h b/os/workgroup_interval.h new file mode 100644 index 000000000..b056f82cf --- /dev/null +++ b/os/workgroup_interval.h @@ -0,0 +1,164 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __OS_WORKGROUP_INTERVAL__ +#define __OS_WORKGROUP_INTERVAL__ + +#ifndef __OS_WORKGROUP_INDIRECT__ +#error "Please #include instead of this file directly." +#include // For header doc +#endif + +__BEGIN_DECLS + +OS_WORKGROUP_ASSUME_NONNULL_BEGIN + +/*! + * @typedef os_workgroup_interval_t + * + * @abstract + * A subclass of an os_workgroup_t for tracking work performed as part of + * a repeating interval-driven workload. + */ +#if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) +typedef struct os_workgroup_interval_s *os_workgroup_interval_t; +#else +OS_WORKGROUP_SUBCLASS_DECL_PROTO(os_workgroup_interval, Repeatable); +OS_WORKGROUP_SUBCLASS_DECL(os_workgroup_interval, os_workgroup, WorkGroupInterval); +#endif + +/* During the first instance of this API, the only supported interval + * workgroups are for audio workloads. Please refer to the AudioToolbox + * framework for more information. + */ + +/* + * @typedef os_workgroup_interval_data, os_workgroup_interval_data_t + * + * @abstract + * An opaque structure containing additional configuration for the workgroup + * interval. + */ +#if defined(__DISPATCH_BUILDING_DISPATCH__) +typedef struct os_workgroup_interval_data_s os_workgroup_interval_data_s; +typedef struct os_workgroup_interval_data_s *os_workgroup_interval_data_t; +#else +typedef struct os_workgroup_interval_data_opaque_s os_workgroup_interval_data_s; +typedef struct os_workgroup_interval_data_opaque_s *os_workgroup_interval_data_t; +#endif +#define OS_WORKGROUP_INTERVAL_DATA_INITIALIZER \ + { .sig = _OS_WORKGROUP_INTERVAL_DATA_SIG_INIT } + +/*! + * @function os_workgroup_interval_start + * + * @abstract + * Indicates to the system that the member threads of this + * os_workgroup_interval_t have begun working on an instance of the repeatable + * interval workload with the specified timestamps. This function is real time + * safe. + * + * This function will set and return an errno in the following cases: + * + * - The current thread is not a member of the os_workgroup_interval_t + * - The os_workgroup_interval_t has been cancelled + * - The timestamps passed in are malformed + * - os_workgroup_interval_start() was previously called on the + * os_workgroup_interval_t without an intervening os_workgroup_interval_finish() + * - A concurrent workgroup interval configuration operation is taking place. + * + * @param start + * Start timestamp specified in the os_clockid_t with which the + * os_workgroup_interval_t was created. This is generally a time in the past and + * indicates when the workgroup started working on an interval period + * + * @param deadline + * Deadline timestamp specified in the os_clockid_t with which the + * os_workgroup_interval_t was created. This specifies the deadline which the + * interval period would like to meet. + * + * @param data + * This field is currently unused and should be NULL + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_REFINED_FOR_SWIFT OS_WORKGROUP_EXPORT OS_WORKGROUP_WARN_RESULT +int +os_workgroup_interval_start(os_workgroup_interval_t wg, uint64_t start, uint64_t + deadline, os_workgroup_interval_data_t _Nullable data); + +/*! + * @function os_workgroup_interval_update + * + * @abstract + * Updates an already started interval workgroup to have the new + * deadline specified. This function is real time safe. + * + * This function will return an error in the following cases: + * - The current thread is not a member of the os_workgroup_interval_t + * - The os_workgroup_interval_t has been cancelled + * - The timestamp passed in is malformed + * - os_workgroup_interval_start() was not previously called on the + * os_workgroup_interval_t or was already matched with an + * os_workgroup_interval_finish() + * - A concurrent workgroup interval configuration operation is taking place + * + * @param deadline + * Timestamp specified in the os_clockid_t with + * which the os_workgroup_interval_t was created. + * + * @param data + * This field is currently unused and should be NULL + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_REFINED_FOR_SWIFT OS_WORKGROUP_EXPORT OS_WORKGROUP_WARN_RESULT +int +os_workgroup_interval_update(os_workgroup_interval_t wg, uint64_t deadline, + os_workgroup_interval_data_t _Nullable data); + +/*! + * @function os_workgroup_interval_finish + * + * @abstract + * Indicates to the system that the member threads of + * this os_workgroup_interval_t have finished working on the current instance + * of the interval workload. This function is real time safe. + * + * This function will return an error in the following cases: + * - The current thread is not a member of the os_workgroup_interval_t + * - os_workgroup_interval_start() was not previously called on the + * os_workgroup_interval_t or was already matched with an + * os_workgroup_interval_finish() + * - A concurrent workgroup interval configuration operation is taking place. + * + * @param data + * This field is currently unused and should be NULL + * + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_REFINED_FOR_SWIFT OS_WORKGROUP_EXPORT OS_WORKGROUP_WARN_RESULT +int +os_workgroup_interval_finish(os_workgroup_interval_t wg, + os_workgroup_interval_data_t _Nullable data); + +OS_WORKGROUP_ASSUME_NONNULL_END + +__END_DECLS + +#endif /* __OS_WORKGROUP_INTERVAL__ */ diff --git a/os/workgroup_interval_private.h b/os/workgroup_interval_private.h new file mode 100644 index 000000000..79d1c4fb9 --- /dev/null +++ b/os/workgroup_interval_private.h @@ -0,0 +1,110 @@ +#ifndef __OS_WORKGROUP_INTERVAL_PRIVATE__ +#define __OS_WORKGROUP_INTERVAL_PRIVATE__ + +#ifndef __OS_WORKGROUP_INDIRECT__ +#error "Please #include instead of this file directly." +#include // For header doc +#endif + +__BEGIN_DECLS + +OS_WORKGROUP_ASSUME_NONNULL_BEGIN + +/* + * @typedef os_workgroup_interval_type_t + * + * @abstract + * Describes a specialized os_workgroup_interval type the client would like to + * create. + * + * Clients need the 'com.apple.private.kernel.work-interval' entitlement to + * create all workgroups types listed below except the following: + * + * OS_WORKGROUP_INTERVAL_TYPE_DEFAULT, + * OS_WORKGROUP_INTERVAL_TYPE_CA_CLIENT, + * OS_WORKGROUP_INTERVAL_TYPE_AUDIO_CLIENT, + * + * Note that only real time threads are allowed to join workgroups of type + * OS_WORKGROUP_INTERVAL_TYPE_AUDIO_CLIENT and + * OS_WORKGROUP_INTERVAL_TYPE_COREAUDIO. + */ +OS_ENUM(os_workgroup_interval_type, uint16_t, + OS_WORKGROUP_INTERVAL_TYPE_DEFAULT = 0x1, + OS_WORKGROUP_INTERVAL_TYPE_CA_CLIENT, + OS_WORKGROUP_INTERVAL_TYPE_AUDIO_CLIENT, + + OS_WORKGROUP_INTERVAL_TYPE_COREAUDIO, + OS_WORKGROUP_INTERVAL_TYPE_COREANIMATION, + OS_WORKGROUP_INTERVAL_TYPE_CA_RENDER_SERVER, + OS_WORKGROUP_INTERVAL_TYPE_HID_DELIVERY, + OS_WORKGROUP_INTERVAL_TYPE_COREMEDIA, +); + +/* + * @function os_workgroup_attr_set_interval_type + * + * @abstract + * Specifies that the os_workgroup_interval_t to be created should be of a + * specialized type. These types should only be specified when creating an + * os_workgroup_interval_t using the os_workgroup_interval_create API - using it + * with any other workgroup creation API will result in an error at creation + * time. + * + * Setting type OS_WORKGROUP_INTERVAL_TYPE_DEFAULT on an os_workgroup_interval_t + * is a no-op. + * + * EINVAL is returned if the attribute passed in hasn't been initialized. + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_WORKGROUP_EXPORT +int +os_workgroup_attr_set_interval_type(os_workgroup_attr_t attr, + os_workgroup_interval_type_t type); + +/* + * @abstract + * Creates an os_workgroup_interval_t with the specified name and attributes. + * This object tracks a repeatable workload characterized by a start time, end + * time and targeted deadline. Example use cases include audio and graphics + * rendering workloads. + * + * A newly created os_workgroup_interval_t has no initial member threads - in + * particular the creating thread does not join the os_workgroup_interval_t + * implicitly. + * + * @param name + * A client specified string for labelling the workgroup. This parameter is + * optional and can be NULL. + * + * @param clockid + * The clockid in which timestamps passed to the os_workgroup_interval_start() + * and os_workgroup_interval_update() functions are specified. + * + * @param attrs + * The requested set of os_workgroup_t attributes. NULL is to be specified for + * the default set of attributes. By default, an interval workgroup + * is nonpropagating with asynchronous work and differentiated from other threads + * in the process (see os_workgroup_attr_flags_t). + * + * The OS_WORKGROUP_ATTR_UNDIFFERENTIATED attribute is invalid to specify for + * interval workgroups. If it isn't or if invalid attributes are specified, this + * function returns NULL and sets errno. + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_WORKGROUP_EXPORT OS_WORKGROUP_RETURNS_RETAINED +os_workgroup_interval_t _Nullable +os_workgroup_interval_create(const char * _Nullable name, os_clockid_t clock, + os_workgroup_attr_t _Nullable attr); + +/* This SPI is for use by Audio Toolbox only. This function returns a reference + * which is the responsibility of the caller to manage. + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_WORKGROUP_EXPORT OS_WORKGROUP_RETURNS_RETAINED +os_workgroup_t +os_workgroup_interval_copy_current_4AudioToolbox(void); + +OS_WORKGROUP_ASSUME_NONNULL_END + +__END_DECLS +#endif /* __OS_WORKGROUP_INTERVAL_PRIVATE__ */ diff --git a/os/workgroup_object.h b/os/workgroup_object.h new file mode 100644 index 000000000..5c8bd4f1a --- /dev/null +++ b/os/workgroup_object.h @@ -0,0 +1,371 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __OS_WORKGROUP_OBJECT__ +#define __OS_WORKGROUP_OBJECT__ + +#ifndef __OS_WORKGROUP_INDIRECT__ +#error "Please #include instead of this file directly." +#include // For header doc +#endif + +__BEGIN_DECLS + +OS_WORKGROUP_ASSUME_NONNULL_BEGIN + +/*! + * @typedef os_workgroup_t + * + * @abstract + * A reference counted os object representing a workload that needs to + * be distinctly recognized and tracked by the system. The workgroup + * tracks a collection of threads all working cooperatively. An os_workgroup + * object - when not an instance of a specific os_workgroup_t subclass - + * represents a generic workload and makes no assumptions about the kind of + * work done. + * + * @discussion + * Threads can explicitly join an os_workgroup_t to mark themselves as + * participants in the workload. + */ +#if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) +typedef struct os_workgroup_s *os_workgroup_t; +#else +OS_WORKGROUP_DECL(os_workgroup, WorkGroup); +#endif + + +/* Attribute creation and specification */ + +/*! + * @typedef os_workgroup_attr_t + * + * @abstract + * Pointer to an opaque structure for describing attributes that can be + * configured on a workgroup at creation. + */ +#if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) +typedef struct os_workgroup_attr_s os_workgroup_attr_s; +typedef struct os_workgroup_attr_s *os_workgroup_attr_t; +#else +typedef struct os_workgroup_attr_opaque_s os_workgroup_attr_s; +typedef struct os_workgroup_attr_opaque_s *os_workgroup_attr_t; +#endif + +/* os_workgroup_t attributes need to be initialized before use. This initializer + * allows you to create a workgroup with the system default attributes. */ +#define OS_WORKGROUP_ATTR_INITIALIZER_DEFAULT \ + { .sig = _OS_WORKGROUP_ATTR_SIG_DEFAULT_INIT } + + + +/* The main use of the workgroup API is through instantiations of the concrete + * subclasses - please refer to os/workgroup_interval.h and + * os/workgroup_parallel.h for more information on creating workgroups. + * + * The functions below operate on all subclasses of os_workgroup_t. + */ + +/*! + * @function os_workgroup_copy_port + * + * @abstract + * Returns a reference to a send right representing this workgroup that is to be + * sent to other processes. This port is to be passed to + * os_workgroup_create_with_port() to create a workgroup object. + * + * It is the client's responsibility to release the send right reference. + * + * If an error is encountered, errno is set and returned. + */ +API_AVAILABLE(macos(10.16)) +SPI_AVAILABLE(ios(14.0), tvos(14.0), watchos(7.0)) +OS_REFINED_FOR_SWIFT OS_WORKGROUP_EXPORT OS_WORKGROUP_WARN_RESULT +int +os_workgroup_copy_port(os_workgroup_t wg, mach_port_t *mach_port_out); + +/*! + * @function os_workgroup_create_with_port + * + * @abstract + * Create an os_workgroup_t object from a send right returned by a previous + * call to os_workgroup_copy_port, potentially in a different process. + * + * A newly created os_workgroup_t has no initial member threads - in particular + * the creating thread does not join the os_workgroup_t implicitly. + * + * @param name + * A client specified string for labelling the workgroup. This parameter is + * optional and can be NULL. + * + * @param mach_port + * The send right to create the workgroup from. No reference is consumed + * on the specified send right. + */ +API_AVAILABLE(macos(10.16)) +SPI_AVAILABLE(ios(14.0), tvos(14.0), watchos(7.0)) +OS_SWIFT_NAME(WorkGroup.init(__name:port:)) OS_WORKGROUP_EXPORT OS_WORKGROUP_RETURNS_RETAINED +os_workgroup_t _Nullable +os_workgroup_create_with_port(const char *_Nullable name, mach_port_t mach_port); + +/*! + * @function os_workgroup_create_with_workgroup + * + * @abstract + * Create a new os_workgroup object from an existing os_workgroup. + * + * The newly created os_workgroup has no initial member threads - in particular + * the creating threaad does not join the os_workgroup_t implicitly. + * + * @param name + * A client specified string for labelling the workgroup. This parameter is + * optional and can be NULL. + * + * @param wg + * The existing workgroup to create a new workgroup object from. + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_REFINED_FOR_SWIFT OS_WORKGROUP_EXPORT OS_WORKGROUP_RETURNS_RETAINED +os_workgroup_t _Nullable +os_workgroup_create_with_workgroup(const char * _Nullable name, os_workgroup_t wg); + +/*! + * @typedef os_workgroup_join_token, os_workgroup_join_token_t + * + * @abstract + * An opaque join token which the client needs to pass to os_workgroup_join + * and os_workgroup_leave + */ +#if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) +typedef struct os_workgroup_join_token_s os_workgroup_join_token_s; +typedef struct os_workgroup_join_token_s *os_workgroup_join_token_t; +#else +OS_REFINED_FOR_SWIFT +typedef struct os_workgroup_join_token_opaque_s os_workgroup_join_token_s; +OS_REFINED_FOR_SWIFT +typedef struct os_workgroup_join_token_opaque_s *os_workgroup_join_token_t; +#endif + + +/*! + * @function os_workgroup_join + * + * @abstract + * Joins the current thread to the specified workgroup and populates the join + * token that has been passed in. This API is real-time safe. + * + * @param wg + * The workgroup that the current thread would like to join + * + * @param token_out + * Pointer to a client allocated struct which the function will populate + * with the join token. This token must be passed in by the thread when it calls + * os_workgroup_leave(). + * + * Errors will be returned in the following cases: + * + * EALREADY The thread is already part of a workgroup that the specified + * workgroup does not nest with + * EINVAL The workgroup has been cancelled + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_REFINED_FOR_SWIFT OS_WORKGROUP_EXPORT OS_WORKGROUP_WARN_RESULT +int +os_workgroup_join(os_workgroup_t wg, os_workgroup_join_token_t token_out); + +/*! + * @function os_workgroup_leave + * + * @abstract + * This removes the current thread from a workgroup it has previously + * joined. Threads must leave all workgroups in the reverse order that they + * have joined them. Failing to do so before exiting will result in undefined + * behavior. + * + * If the join token is malformed, the process will be aborted. + * + * This API is real time safe. + * + * @param wg + * The workgroup that the current thread would like to leave. + * + * @param token + * This is the join token populated by the most recent call to + * os_workgroup_join(). + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_REFINED_FOR_SWIFT OS_WORKGROUP_EXPORT +void +os_workgroup_leave(os_workgroup_t wg, os_workgroup_join_token_t token); + +/* Working Arena index of a thread in a workgroup */ +typedef uint32_t os_workgroup_index; +/* Destructor for Working Arena */ +typedef void (*os_workgroup_working_arena_destructor_t)(void * _Nullable); + +/*! + * @function os_workgroup_set_working_arena + * + * @abstract + * Associates a client defined working arena with the workgroup. The arena + * is local to the workgroup object in the process. This is intended for + * distributing a manually managed memory allocation between member threads + * of the workgroup. + * + * This function can be called multiple times and the client specified + * destructor will be called on the previously assigned arena, if any. This + * function can only be called when no threads have currently joined the + * workgroup and all workloops associated with the workgroup are idle. + * + * @param wg + * The workgroup to associate the working arena with + * + * @param arena + * The client managed arena to associate with the workgroup. This value can + * be NULL. + * + * @param max_workers + * The maximum number of threads that will ever query the workgroup for the + * arena and request an index into it. If the arena is not used to partition + * work amongst member threads, then this field can be 0. + * + * @param destructor + * A destructor to call on the previously assigned working arena, if any + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_REFINED_FOR_SWIFT OS_WORKGROUP_EXPORT OS_WORKGROUP_WARN_RESULT +int +os_workgroup_set_working_arena(os_workgroup_t wg, void * _Nullable arena, + uint32_t max_workers, os_workgroup_working_arena_destructor_t destructor); + +/*! + * @function os_workgroup_get_working_arena + * + * @abstract + * Returns the working arena associated with the workgroup and the current + * thread's index in the workgroup. This function can only be called by a member + * of the workgroup. Multiple calls to this API by a member thread will return + * the same arena and index until the thread leaves the workgroup. + * + * For workloops with an associated workgroup, every work item on the workloop + * will receive the same index in the arena. + * + * This method returns NULL if no arena is set on the workgroup. The index + * returned by this function is zero-based and is namespaced per workgroup + * object in the process. The indices provided are strictly monotonic and never + * reused until a future call to os_workgroup_set_working_arena. + * + * @param wg + * The workgroup to get the working arena from. + * + * @param index_out + * A pointer to a os_workgroup_index which will be populated by the caller's + * index in the workgroup. + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_REFINED_FOR_SWIFT OS_WORKGROUP_EXPORT +void * _Nullable +os_workgroup_get_working_arena(os_workgroup_t wg, + os_workgroup_index * _Nullable index_out); + +/*! + * @function os_workgroup_cancel + * + * @abstract + * This API invalidates a workgroup and indicates to the system that the + * workload is no longer relevant to the caller. + * + * No new work should be initiated for a cancelled workgroup and + * work that is already underway should periodically check for + * cancellation with os_workgroup_testcancel and initiate cleanup if needed. + * + * Threads currently in the workgroup continue to be tracked together but no + * new threads may join this workgroup - the only possible operation allowed is + * to leave the workgroup. Other actions may have undefined behavior or + * otherwise fail. + * + * This API is idempotent. Cancellation is local to the workgroup object + * it is called on and does not affect other workgroups. + * + * @param wg + * The workgroup that that the thread would like to cancel + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_REFINED_FOR_SWIFT OS_WORKGROUP_EXPORT +void +os_workgroup_cancel(os_workgroup_t wg); + +/*! + * @function os_workgroup_testcancel + * + * @abstract + * Returns true if the workgroup object has been cancelled. See also + * os_workgroup_cancel + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_REFINED_FOR_SWIFT OS_WORKGROUP_EXPORT +bool +os_workgroup_testcancel(os_workgroup_t wg); + +/*! + * @typedef os_workgroup_max_parallel_threads_attr_t + * + * @abstract + * A pointer to a structure describing the set of properties of a workgroup to + * override with the explicitly specified values in the structure. + * + * See also os_workgroup_max_parallel_threads. + */ +OS_REFINED_FOR_SWIFT +typedef struct os_workgroup_max_parallel_threads_attr_s os_workgroup_mpt_attr_s; +OS_REFINED_FOR_SWIFT +typedef struct os_workgroup_max_parallel_threads_attr_s *os_workgroup_mpt_attr_t; + +/*! + * @function os_workgroup_max_parallel_threads + * + * @abstract + * Returns the system's recommendation for maximum number of threads the client + * should make for a multi-threaded workload in a given workgroup. + * + * This API takes into consideration the current hardware the code is running on + * and the attributes of the workgroup. It does not take into consideration the + * current load of the system and therefore always provides the most optimal + * recommendation for the workload. + * + * @param wg + * The workgroup in which the multi-threaded workload will be performed in. The + * threads performing the multi-threaded workload are expected to join this + * workgroup. + * + * @param attr + * This value is currently unused and should be NULL. + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_REFINED_FOR_SWIFT OS_WORKGROUP_EXPORT +int +os_workgroup_max_parallel_threads(os_workgroup_t wg, os_workgroup_mpt_attr_t + _Nullable attr); + +OS_WORKGROUP_ASSUME_NONNULL_END + +__END_DECLS + +#endif /* __OS_WORKGROUP_OBJECT__ */ diff --git a/os/workgroup_object_private.h b/os/workgroup_object_private.h new file mode 100644 index 000000000..c0c263d65 --- /dev/null +++ b/os/workgroup_object_private.h @@ -0,0 +1,119 @@ +#ifndef __OS_WORKGROUP_OBJECT_PRIVATE__ +#define __OS_WORKGROUP_OBJECT_PRIVATE__ + +#ifndef __OS_WORKGROUP_INDIRECT__ +#error "Please #include instead of this file directly." +#include // For header doc +#endif + +#include + +__BEGIN_DECLS + +OS_WORKGROUP_ASSUME_NONNULL_BEGIN + +/* Attribute creation and specification */ + +/* This is for clients who want to build their own workgroup attribute from + * scratch instead of configuring their attributes on top of the default set of + * attributes */ +#define OS_WORKGROUP_ATTR_INITIALIZER_EMPTY { .sig = _OS_WORKGROUP_ATTR_SIG_EMPTY_INIT } + +/*! + * @enum os_workgroup_attr_flags_t + * + * @abstract A bitfield of flags describing options for workgroup configuration + */ +OS_ENUM(os_workgroup_attr_flags, uint32_t, + /*! + * @const OS_WORKGROUP_ATTR_NONPROPAGATING + * + * Asynchronous work initiated by threads which are members of a + * workgroup with OS_WORKGROUP_ATTR_NONPROPAGATING attribute, will not + * automatically be tracked as part of the workgroup. This applies to work + * initiated by calls such as dispatch_async() that may propagate other + * execution context properties. + * + * os_workgroups which are propagating by default can opt out this behavior + * by specifying the OS_WORKGROUP_ATTR_NONPROPAGATING flag. + */ + OS_WORKGROUP_ATTR_NONPROPAGATING = (1 << 1), + + /*! + * @const OS_WORKGROUP_ATTR_UNDIFFERENTIATED + * + * Member threads of a workgroup with the attribute flag + * OS_WORKGROUP_ATTR_UNDIFFERENTIATED are tracked and measured together with + * other threads in their process by the system for scheduling and + * performance control. + * + * os_workgroups which are tracked separately from other threads in + * the process by default, can opt out of it by specifying the + * OS_WORKGROUP_ATTR_UNDIFFERENTIATED flag. + */ + OS_WORKGROUP_ATTR_UNDIFFERENTIATED = (1 << 2) +); + +/*! + * @function os_workgroup_attr_set_flags + * + * @abstract + * Sets the user specified flags in the workgroup attribute. If invalid + * attributes are specified, this function will set and return an error. + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_WORKGROUP_EXPORT OS_WORKGROUP_WARN_RESULT +int +os_workgroup_attr_set_flags(os_workgroup_attr_t wga, + os_workgroup_attr_flags_t flags); + + +/*! + * @function os_workgroup_create + * + * @abstract + * Creates an os_workgroup_t with the specified name and attributes. + * A newly created os_workgroup_t has no initial member threads - in particular + * the creating thread does not join the os_workgroup_t implicitly. + * + * @param name + * A client specified string for labelling the workgroup. This parameter is + * optional and can be NULL. + * + * @param wga + * The requested set of os_workgroup_t attributes. NULL is to be specified for + * the default set of attributes. A workgroup with default attributes is + * propagating with asynchronous work and differentiated from other threads in + * the process (see os_workgroup_attr_flags_t). + * + * The attribute flag OS_WORKGROUP_ATTR_NONPROPAGATING MUST currently be + * specified. If it isn't or if invalid attributes are specified, this function + * will return NULL and set an errno. + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_WORKGROUP_EXPORT OS_WORKGROUP_RETURNS_RETAINED +os_workgroup_t _Nullable +os_workgroup_create(const char * _Nullable name, + os_workgroup_attr_t _Nullable wga); + +/* To be deprecated once coreaudio adopts */ +#define OS_WORKGROUP_ATTR_INITIALIZER OS_WORKGROUP_ATTR_INITIALIZER_DEFAULT + +typedef uint32_t os_workgroup_index; + +/* Deprecated in favor of os_workgroup_join */ +OS_WORKGROUP_EXPORT OS_WORKGROUP_WARN_RESULT +int +os_workgroup_join_self(os_workgroup_t wg, os_workgroup_join_token_t token_out, + os_workgroup_index *_Nullable id_out); + +/* Deprecated in favor of os_workgroup_leave */ +OS_WORKGROUP_EXPORT +void +os_workgroup_leave_self(os_workgroup_t wg, os_workgroup_join_token_t token); + +OS_WORKGROUP_ASSUME_NONNULL_END + +__END_DECLS + +#endif /* __OS_WORKGROUP_OBJECT__ */ diff --git a/os/workgroup_parallel.h b/os/workgroup_parallel.h new file mode 100644 index 000000000..2aca7f861 --- /dev/null +++ b/os/workgroup_parallel.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __OS_WORKGROUP_PARALLEL__ +#define __OS_WORKGROUP_PARALLEL__ + +#ifndef __OS_WORKGROUP_INDIRECT__ +#error "Please #include instead of this file directly." +#include // For header doc +#endif + +#include + +__BEGIN_DECLS + +OS_WORKGROUP_ASSUME_NONNULL_BEGIN + +/*! + * @typedef os_workgroup_parallel_t + * + * @abstract + * A subclass of an os_workgroup_t for tracking parallel work. + */ +#if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) +typedef struct os_workgroup_s *os_workgroup_parallel_t; +#else +OS_WORKGROUP_SUBCLASS_DECL_PROTO(os_workgroup_parallel, Parallelizable); +OS_WORKGROUP_SUBCLASS_DECL(os_workgroup_parallel, os_workgroup, WorkGroupParallel); +#endif + +/*! + * @function os_workgroup_parallel_create + * + * @abstract + * Creates an os_workgroup_t which tracks a parallel workload. + * A newly created os_workgroup_interval_t has no initial member threads - + * in particular the creating thread does not join the os_workgroup_parallel_t + * implicitly. + * + * See also os_workgroup_max_parallel_threads(). + * + * @param name + * A client specified string for labelling the workgroup. This parameter is + * optional and can be NULL. + * + * @param attr + * The requested set of workgroup attributes. NULL is to be specified for the + * default set of attributes. + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +OS_WORKGROUP_EXPORT OS_WORKGROUP_RETURNS_RETAINED +OS_SWIFT_NAME(WorkGroupParallel.init(__name:attr:)) +os_workgroup_parallel_t _Nullable +os_workgroup_parallel_create(const char * _Nullable name, + os_workgroup_attr_t _Nullable attr); + +OS_WORKGROUP_ASSUME_NONNULL_END + +__END_DECLS + +#endif /* __OS_WORKGROUP_PARALLEL__ */ diff --git a/os/workgroup_private.h b/os/workgroup_private.h new file mode 100644 index 000000000..961908d87 --- /dev/null +++ b/os/workgroup_private.h @@ -0,0 +1,17 @@ +#ifndef __OS_WORKGROUP_PRIVATE__ +#define __OS_WORKGROUP_PRIVATE__ + +#ifndef __DISPATCH_BUILDING_DISPATCH__ + +#ifndef __OS_WORKGROUP_INDIRECT__ +#define __OS_WORKGROUP_INDIRECT__ +#endif /* __OS_WORKGROUP_INDIRECT__ */ + +#include +#include +#include + +#undef __OS_WORKGROUP_INDIRECT__ +#endif /* __DISPATCH_BUILDING_DISPATCH__ */ + +#endif /* __OS_WORKGROUP_PRIVATE__ */ diff --git a/private/CMakeLists.txt b/private/CMakeLists.txt index a2ee9bdd9..f77a92d41 100644 --- a/private/CMakeLists.txt +++ b/private/CMakeLists.txt @@ -14,6 +14,8 @@ if (INSTALL_PRIVATE_HEADERS) private.h queue_private.h source_private.h + time_private.h + workloop_private.h DESTINATION "${INSTALL_DISPATCH_HEADERS_DIR}") endif() diff --git a/private/mach_private.h b/private/mach_private.h index 1474c163a..bed88c0bd 100644 --- a/private/mach_private.h +++ b/private/mach_private.h @@ -34,9 +34,10 @@ __BEGIN_DECLS -#define DISPATCH_MACH_SPI_VERSION 20161026 +#define DISPATCH_MACH_SPI_VERSION 20200229 #include +#include DISPATCH_ASSUME_NONNULL_BEGIN @@ -162,7 +163,7 @@ DISPATCH_ENUM(dispatch_mach_send_flags, unsigned long, * Trailer type of mach message received by dispatch mach channels */ -typedef mach_msg_context_trailer_t dispatch_mach_trailer_t; +typedef mach_msg_mac_trailer_t dispatch_mach_trailer_t; /*! * @constant DISPATCH_MACH_RECEIVE_MAX_INLINE_MESSAGE_SIZE @@ -370,14 +371,61 @@ dispatch_mach_create_f(const char *_Nullable label, * a "server" peer connection and the no more senders request is armed * immediately. * + * Note that the notification will not be issued if no send right was ever + * made for this connection receive right. + * * @param channel * The mach channel to request no senders notifications on. */ -API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0), bridgeos(4.0)) +API_DEPRECATED("Use dispatch_mach_notify_no_senders instead", macos(10.14, 10.16), + ios(12.0, 14.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW void dispatch_mach_request_no_senders(dispatch_mach_t channel); +/*! + * @function dispatch_mach_notify_no_senders + * + * Configure the mach channel to receive no more senders notifications. + * + * @discussion + * This function must be called before dispatch_mach_connect() has been called. + * + * When a checkin message is passed to dispatch_mach_connect() or + * dispatch_mach_reconnect(), the notification is armed after the checkin + * message has been sent successfully. + * + * If no checkin message is passed, then the mach channel is assumed to be + * a "server" peer connection and the no more senders request is armed + * immediately. + * + * Requesting a no-senders notification for a listener mach channel is likely a + * client error since listener connections will likely have short-lived send + * rights (only until a peer connection is established). + * + * @param channel + * The mach channel to request no senders notifications on. + * + * @param made_sendrights + * A boolean representing whether the send right for this connection has been + * made before dispatch_mach_connect() is called. + * + * There are 2 cases of consideration: + * + * a) The client is initiating the peer connection by creating a receive right + * with an inserted send right and shipping the receive right over to the server + * in a checkin message. In this case, the server must specify true for + * made_sendrights when arming for no-senders notification. + * + * b) The server is initiating the connection by creating a mach channel with a + * receive right and using MACH_MSG_TYPE_MAKE_SEND to create a send right in the + * checkin reply for the peer connection. this case, the server should specify + * false for made_sendrights while arming for no-senders notification. + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(5.0)) +void +dispatch_mach_notify_no_senders(dispatch_mach_t channel, bool made_sendrights); + /*! * @typedef dispatch_mach_flags_t * @@ -402,7 +450,6 @@ dispatch_mach_request_no_senders(dispatch_mach_t channel); DISPATCH_OPTIONS(dispatch_mach_flags, uint64_t, DMF_NONE = 0x0, DMF_USE_STRICT_REPLY = 0x1, - DMF_REQUEST_NO_SENDERS = 0x2, ); /*! @@ -1229,6 +1276,42 @@ dispatch_mach_handoff_reply(dispatch_queue_t queue, mach_port_t port, dispatch_block_t block); #endif /* __BLOCKS__ */ +#if DISPATCH_MACH_SPI + +/*! + * @function dispatch_mach_msg_get_filter_policy_id + * Returns the message filter policy id from the message trailer. + * This id is added by the kernel during message send and is specific + * to the sender and port on which the message is received.. + * + * @discussion + * This function should only be called from the context of an IPC handler. + * + * @param msg + * The dispatch mach message object to query. It should have a trailer of type dispatch_mach_trailer_t. + * + * @param filter_policy_id + * Return the filter policy id read from the message. + * + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0), bridgeos(5.0)) +DISPATCH_EXPORT DISPATCH_NOTHROW +void +dispatch_mach_msg_get_filter_policy_id(dispatch_mach_msg_t msg, mach_msg_filter_id *filter_policy_id); + + +/*! + * @function dispatch_mach_can_handoff_4libxpc + * + * Returns whether the code is running in a context where a handoff is possible. + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0), bridgeos(5.0)) +DISPATCH_EXPORT DISPATCH_NOTHROW +bool +dispatch_mach_can_handoff_4libxpc(void); + +#endif // DISPATCH_MACH_SPI + DISPATCH_ASSUME_NONNULL_END __END_DECLS diff --git a/private/private.h b/private/private.h index df93d9a9f..b40a36c0f 100644 --- a/private/private.h +++ b/private/private.h @@ -178,7 +178,7 @@ void _dispatch_prohibit_transition_to_multithreaded(bool prohibit); #if TARGET_OS_MAC #define DISPATCH_COCOA_COMPAT 1 -#elif defined(__linux__) || defined(__FreeBSD__) +#elif defined(__linux__) || defined(__FreeBSD__) || defined(_WIN32) #define DISPATCH_COCOA_COMPAT 1 #else #define DISPATCH_COCOA_COMPAT 0 @@ -192,16 +192,16 @@ void _dispatch_prohibit_transition_to_multithreaded(bool prohibit); typedef mach_port_t dispatch_runloop_handle_t; #elif defined(__linux__) || defined(__FreeBSD__) typedef int dispatch_runloop_handle_t; +#elif defined(_WIN32) +typedef void *dispatch_runloop_handle_t; #else #error "runloop support not implemented on this platform" #endif -#if TARGET_OS_MAC API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_CONST DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_runloop_handle_t _dispatch_get_main_queue_port_4CF(void); -#endif API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_EXPORT DISPATCH_NOTHROW @@ -220,12 +220,12 @@ dispatch_queue_serial_t _dispatch_runloop_root_queue_create_4CF(const char *_Nullable label, unsigned long flags); -#if TARGET_OS_MAC API_AVAILABLE(macos(10.9), ios(7.0)) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW -mach_port_t +dispatch_runloop_handle_t _dispatch_runloop_root_queue_get_port_4CF(dispatch_queue_t queue); +#if TARGET_OS_MAC API_AVAILABLE(macos(10.13.2), ios(11.2), tvos(11.2), watchos(4.2)) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW bool diff --git a/private/queue_private.h b/private/queue_private.h index 2aa4e3ee3..86075d8e7 100644 --- a/private/queue_private.h +++ b/private/queue_private.h @@ -149,6 +149,17 @@ dispatch_set_qos_class_fallback(dispatch_object_t object, #define DISPATCH_QUEUE_FLAGS_MASK (DISPATCH_QUEUE_OVERCOMMIT) +#if __APPLE__ +# define DISPATCH_QUEUE_NULLABLE_PTHREAD_ATTR_PTR +#else // __APPLE__ +// On FreeBSD pthread_attr_t is a typedef to a pointer type +#if defined(__FreeBSD__) +# define DISPATCH_QUEUE_NULLABLE_PTHREAD_ATTR_PTR _Nullable +#else // defined(__FreeBSD__) +# define DISPATCH_QUEUE_NULLABLE_PTHREAD_ATTR_PTR +#endif // defined(__FreeBSD__) +#endif // __APPLE__ + /*! * @function dispatch_queue_attr_make_with_overcommit * @@ -323,12 +334,13 @@ dispatch_queue_set_width(dispatch_queue_t dq, long width); * @result * The newly created dispatch pthread root queue. */ -API_AVAILABLE(macos(10.9), ios(6.0)) DISPATCH_LINUX_UNAVAILABLE() +API_DEPRECATED_WITH_REPLACEMENT("dispatch_workloop_set_scheduler_priority", + macos(10.9, 10.16), ios(6.0, 14.0)) DISPATCH_LINUX_UNAVAILABLE() DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_global_t dispatch_pthread_root_queue_create(const char *_Nullable label, - unsigned long flags, const pthread_attr_t *_Nullable attr, + unsigned long flags, const pthread_attr_t DISPATCH_QUEUE_NULLABLE_PTHREAD_ATTR_PTR *_Nullable attr, dispatch_block_t _Nullable configure); /*! @@ -437,7 +449,7 @@ dispatch_async_enforce_qos_class_f(dispatch_queue_t queue, * "detached" before the thread exits or the application will crash. */ DISPATCH_EXPORT -void _dispatch_install_thread_detach_callback(dispatch_function_t cb); +void _dispatch_install_thread_detach_callback(void (*cb)(void)); #endif __END_DECLS diff --git a/private/source_private.h b/private/source_private.h index bd5e47ebc..d6b7266a1 100644 --- a/private/source_private.h +++ b/private/source_private.h @@ -654,7 +654,7 @@ typedef struct dispatch_source_extended_data_s { * argument, the remaining space in data will have been populated with zeroes. */ API_AVAILABLE(macos(10.13), ios(11.0), tvos(11.0), watchos(4.0)) -DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_NOTHROW size_t dispatch_source_get_extended_data(dispatch_source_t source, diff --git a/private/time_private.h b/private/time_private.h index ae341e6d6..7270f8bce 100644 --- a/private/time_private.h +++ b/private/time_private.h @@ -83,5 +83,36 @@ enum { #endif // __APPLE__ +/*! + * @function dispatch_time_to_nsecs + * + * @abstract + * Returns the clock and nanoseconds of a given dispatch_time_t. + * + * @discussion + * This interface allows to decode dispatch_time_t which allows to compare them + * provided they are for the same "clock_id". + * + * @param time + * The dispatch_time_t value to parse. + * + * @param clock + * A pointer to the clockid for this time. + * + * @param nsecs + * A pointer to the decoded number of nanoseconds for the passed in time + * relative to the epoch for this clock ID. + * + * @result + * Returns true if the dispatch_time_t value was valid. + * Returns false if the dispatch_time_t value was invalid, + * or DISPATCH_TIME_FOREVER. + */ +API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW +bool +dispatch_time_to_nsecs(dispatch_time_t time, + dispatch_clockid_t *clock, uint64_t *nsecs); + #endif diff --git a/private/workloop_private.h b/private/workloop_private.h index c06b498db..89e857a57 100644 --- a/private/workloop_private.h +++ b/private/workloop_private.h @@ -73,7 +73,7 @@ API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_workloop_set_qos_class_floor(dispatch_workloop_t workloop, - qos_class_t qos, int relpri, dispatch_workloop_param_flags_t flags); + dispatch_qos_class_t qos, int relpri, dispatch_workloop_param_flags_t flags); /*! * @function dispatch_workloop_set_scheduler_priority @@ -172,7 +172,7 @@ API_DEPRECATED_WITH_REPLACEMENT("dispatch_workloop_set_qos_class_floor", DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW void dispatch_workloop_set_qos_class(dispatch_workloop_t workloop, - qos_class_t qos, dispatch_workloop_param_flags_t flags); + dispatch_qos_class_t qos, dispatch_workloop_param_flags_t flags); API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) DISPATCH_EXPORT DISPATCH_NOTHROW diff --git a/src/BlocksRuntime/Block.h b/src/BlocksRuntime/Block.h index d0898ff49..32f27f4f9 100644 --- a/src/BlocksRuntime/Block.h +++ b/src/BlocksRuntime/Block.h @@ -11,11 +11,25 @@ #ifndef _Block_H_ #define _Block_H_ +#if defined(_WIN32) +# if defined(BlocksRuntime_STATIC) +# define BLOCK_ABI +# else +# if defined(BlocksRuntime_EXPORTS) +# define BLOCK_ABI __declspec(dllexport) +# else +# define BLOCK_ABI __declspec(dllimport) +# endif +# endif +#else +# define BLOCK_ABI __attribute__((__visibility__("default"))) +#endif + #if !defined(BLOCK_EXPORT) # if defined(__cplusplus) -# define BLOCK_EXPORT extern "C" __attribute__((visibility("default"))) +# define BLOCK_EXPORT extern "C" BLOCK_ABI # else -# define BLOCK_EXPORT extern __attribute__((visibility("default"))) +# define BLOCK_EXPORT extern BLOCK_ABI # endif #endif @@ -38,8 +52,13 @@ BLOCK_EXPORT void _Block_object_assign(void *, const void *, const int); BLOCK_EXPORT void _Block_object_dispose(const void *, const int); // Used by the compiler. Do not use these variables yourself. +#if defined(_WIN32) +extern void * _NSConcreteGlobalBlock[32]; +extern void * _NSConcreteStackBlock[32]; +#else BLOCK_EXPORT void * _NSConcreteGlobalBlock[32]; BLOCK_EXPORT void * _NSConcreteStackBlock[32]; +#endif #if __cplusplus } diff --git a/src/BlocksRuntime/BlocksRuntime.def b/src/BlocksRuntime/BlocksRuntime.def new file mode 100644 index 000000000..a3b1aabeb --- /dev/null +++ b/src/BlocksRuntime/BlocksRuntime.def @@ -0,0 +1,4 @@ +LIBRARY BlocksRuntime +EXPORTS + _NSConcreteGlobalBlock CONSTANT + _NSConcreteStackBlock CONSTANT diff --git a/src/BlocksRuntime/data.c b/src/BlocksRuntime/data.c index 03de71b41..fe4745b04 100644 --- a/src/BlocksRuntime/data.c +++ b/src/BlocksRuntime/data.c @@ -14,11 +14,17 @@ We allocate space and export a symbol to be used as the Class for the on-stack a We keep these in a separate file so that we can include the runtime code in test subprojects but not include the data so that compiled code that sees the data in libSystem doesn't get confused by a second copy. Somehow these don't get unified in a common block. **********************/ -#define BLOCK_EXPORT __attribute__((visibility("default"))) +#include "Block.h" -BLOCK_EXPORT void * _NSConcreteStackBlock[32] = { 0 }; -BLOCK_EXPORT void * _NSConcreteMallocBlock[32] = { 0 }; -BLOCK_EXPORT void * _NSConcreteAutoBlock[32] = { 0 }; -BLOCK_EXPORT void * _NSConcreteFinalizingBlock[32] = { 0 }; -BLOCK_EXPORT void * _NSConcreteGlobalBlock[32] = { 0 }; -BLOCK_EXPORT void * _NSConcreteWeakBlockVariable[32] = { 0 }; +#if defined(_WIN32) +void * _NSConcreteStackBlock[32] = { 0 }; +void * _NSConcreteGlobalBlock[32] = { 0 }; +#else +BLOCK_ABI void * _NSConcreteStackBlock[32] = { 0 }; +BLOCK_ABI void * _NSConcreteGlobalBlock[32] = { 0 }; +#endif + +BLOCK_ABI void * _NSConcreteMallocBlock[32] = { 0 }; +BLOCK_ABI void * _NSConcreteAutoBlock[32] = { 0 }; +BLOCK_ABI void * _NSConcreteFinalizingBlock[32] = { 0 }; +BLOCK_ABI void * _NSConcreteWeakBlockVariable[32] = { 0 }; diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 80bbd54b1..f71b68f45 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,3 +1,4 @@ +include(CheckCCompilerFlag) include(SwiftSupport) include(DTrace) @@ -53,7 +54,13 @@ add_library(dispatch shims/perfmon.h shims/time.h shims/tsd.h + shims/yield.c shims/yield.h) + +set_target_properties(dispatch + PROPERTIES + POSITION_INDEPENDENT_CODE YES) + if(WIN32) target_sources(dispatch PRIVATE @@ -72,25 +79,79 @@ target_sources(dispatch PRIVATE block.cpp) if(HAVE_OBJC) + # TODO(compnerd) split DispatchStubs.cc into a separate component for the ObjC + # registration and a separate component for the swift compiler's emission of a + # call to the ObjC autorelease elision entry point. target_sources(dispatch PRIVATE data.m - object.m) + object.m + swift/DispatchStubs.cc) endif() if(ENABLE_SWIFT) set(swift_optimization_flags) if(NOT CMAKE_BUILD_TYPE MATCHES Debug) set(swift_optimization_flags -O) endif() + + # NOTE(compnerd) Today regardless of whether or not ObjC interop is enabled, + # swift will use an autoreleased return value convention for certain CF + # functions (including some that are used/related to dispatch). This means + # that the swift compiler in callers to such functions will call the function, + # and then pass the result of the function to + # objc_retainAutoreleasedReturnValue. In a context where we have ObjC interop + # disabled, we do not have access to the objc runtime so an implementation of + # objc_retainAutoreleasedReturnValue is not available. To work around this, we + # provide a shim for objc_retainAutoreleasedReturnValue in DispatchStubs.cc + # that just calls retain on the object. Once we fix the swift compiler to + # switch to a different model for handling these arguments with objc-interop + # disabled these shims can be eliminated. + add_library(DispatchStubs + STATIC + swift/DispatchStubs.cc) + target_include_directories(DispatchStubs + PRIVATE + ${PROJECT_SOURCE_DIR}) + set_target_properties(DispatchStubs + PROPERTIES + POSITION_INDEPENDENT_CODE YES) + + if(USE_LLD_LINKER) + if(CMAKE_HOST_SYSTEM_NAME STREQUAL Windows) + set(use_ld_flag -use-ld=lld.exe) + else() + set(use_ld_flag -use-ld=lld) + endif() + elseif(USE_GOLD_LINKER) + if(CMAKE_HOST_SYSTEM_NAME STREQUAL Windows) + set(use_ld_flag -use-ld=gold.exe) + else() + set(use_ld_flag -use-ld=gold) + endif() + endif() + add_swift_library(swiftDispatch + CFLAGS + -fblocks + -fmodule-map-file=${PROJECT_SOURCE_DIR}/dispatch/module.modulemap + DEPENDS + module-maps + DispatchStubs + LINK_FLAGS + ${use_ld_flag} + -lDispatchStubs + -L $ + -lBlocksRuntime + -L $ + -ldispatch + $<$,$>:-lmsvcrtd> + $<$,$>>:-lmsvcrt> MODULE_NAME Dispatch MODULE_LINK_NAME - dispatch + swiftDispatch MODULE_PATH ${CMAKE_CURRENT_BINARY_DIR}/swift/Dispatch.swiftmodule - OUTPUT - ${CMAKE_CURRENT_BINARY_DIR}/swiftDispatch.o SOURCES swift/Block.swift swift/Data.swift @@ -101,26 +162,16 @@ if(ENABLE_SWIFT) swift/Source.swift swift/Time.swift swift/Wrapper.swift - TARGET - ${CMAKE_C_COMPILER_TARGET} - CFLAGS - -fblocks - -fmodule-map-file=${PROJECT_SOURCE_DIR}/dispatch/module.modulemap SWIFT_FLAGS -I ${PROJECT_SOURCE_DIR} - -I/usr/include ${swift_optimization_flags} - DEPENDS - ${PROJECT_SOURCE_DIR}/dispatch/module.modulemap) - target_sources(dispatch - PRIVATE - swift/DispatchStubs.cc - ${CMAKE_CURRENT_BINARY_DIR}/swiftDispatch.o) - if(CMAKE_BUILD_TYPE MATCHES Debug) - target_link_libraries(dispatch - PRIVATE - swiftSwiftOnoneSupport) - endif() + $<$:-Xcc> + $<$:-D_MT> + # TODO(compnerd) handle /MT builds + $<$:-Xcc> + $<$:-D_DLL> + TARGET + ${CMAKE_SWIFT_COMPILER_TARGET}) endif() if(ENABLE_DTRACE) dtrace_usdt_probe(${CMAKE_CURRENT_SOURCE_DIR}/provider.d @@ -145,7 +196,9 @@ if(WIN32) PRIVATE _CRT_NONSTDC_NO_WARNINGS) endif() -if(NOT "${CMAKE_C_SIMULATE_ID}" STREQUAL "MSVC") +if("${CMAKE_C_SIMULATE_ID}" STREQUAL "MSVC") + target_compile_options(dispatch PRIVATE /EHs-c-) +else() target_compile_options(dispatch PRIVATE -fno-exceptions) endif() if(DISPATCH_ENABLE_ASSERTS) @@ -162,11 +215,6 @@ elseif(CMAKE_SYSTEM_NAME STREQUAL Android) PRIVATE -U_GNU_SOURCE) endif() -if(BSD_OVERLAY_FOUND) - target_compile_options(dispatch - PRIVATE - ${BSD_OVERLAY_CFLAGS}) -endif() if("${CMAKE_C_SIMULATE_ID}" STREQUAL "MSVC") target_compile_options(dispatch PRIVATE @@ -182,14 +230,11 @@ if("${CMAKE_C_SIMULATE_ID}" STREQUAL "MSVC") PRIVATE -Xclang -fblocks) else() - # FIXME(compnerd) add check for -momit-leaf-frame-pointer? - target_compile_options(dispatch - PRIVATE - -fblocks - -momit-leaf-frame-pointer) -endif() -if(BSD_OVERLAY_FOUND) - target_link_libraries(dispatch PRIVATE ${BSD_OVERLAY_LDFLAGS}) + check_c_compiler_flag("-momit-leaf-frame-pointer -Werror -Wall -O3" C_SUPPORTS_OMIT_LEAF_FRAME_POINTER) + target_compile_options(dispatch PRIVATE -fblocks) + if (C_SUPPORTS_OMIT_LEAF_FRAME_POINTER) + target_compile_options(dispatch PRIVATE -momit-leaf-frame-pointer) + endif() endif() if(LibRT_FOUND) target_link_libraries(dispatch PRIVATE RT::rt) @@ -201,6 +246,7 @@ target_link_libraries(dispatch if(CMAKE_SYSTEM_NAME STREQUAL Windows) target_link_libraries(dispatch PRIVATE + ShLwApi WS2_32 WinMM synchronization) @@ -216,26 +262,49 @@ if(CMAKE_SYSTEM_NAME STREQUAL Darwin) endif() dispatch_set_linker(dispatch) -# Temporary staging; the various swift projects that depend on libdispatch -# all expect libdispatch.so to be in src/.libs/libdispatch.so -# So for now, make a copy so we don't have to do a coordinated commit across -# all the swift projects to change this assumption. -add_custom_command(TARGET dispatch POST_BUILD - COMMAND ${CMAKE_COMMAND} -E make_directory .libs - COMMAND ${CMAKE_COMMAND} -E copy $ .libs - COMMENT "Copying libdispatch to .libs") - -get_swift_host_arch(SWIFT_HOST_ARCH) - install(TARGETS dispatch - DESTINATION - "${INSTALL_TARGET_DIR}") + ARCHIVE DESTINATION ${INSTALL_TARGET_DIR} + LIBRARY DESTINATION ${INSTALL_TARGET_DIR} + RUNTIME DESTINATION bin) + if(ENABLE_SWIFT) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/swift/Dispatch.swiftmodule ${CMAKE_CURRENT_BINARY_DIR}/swift/Dispatch.swiftdoc DESTINATION - "${INSTALL_TARGET_DIR}/${SWIFT_HOST_ARCH}") + ${INSTALL_TARGET_DIR}/${swift_arch}) + + if(BUILD_SHARED_LIBS) + set(library_kind SHARED) + else() + set(library_kind STATIC) + endif() + + set(swiftDispatch_OUTPUT_FILE + ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_${library_kind}_LIBRARY_PREFIX}swiftDispatch${CMAKE_${library_kind}_LIBRARY_SUFFIX}) + + if(CMAKE_SYSTEM_NAME STREQUAL Windows AND BUILD_SHARED_LIBS) + install(FILES + ${swiftDispatch_OUTPUT_FILE} + DESTINATION + bin) + install(FILES + ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_IMPORT_LIBRARY_PREFIX}swiftDispatch${CMAKE_IMPORT_LIBRARY_SUFFIX} + DESTINATION + ${INSTALL_TARGET_DIR}) + else() + install(FILES + ${swiftDispatch_OUTPUT_FILE} + DESTINATION + ${INSTALL_TARGET_DIR}) + endif() + + if(NOT BUILD_SHARED_LIBS) + install(FILES + $ + DESTINATION + ${INSTALL_TARGET_DIR}) + endif() endif() diff --git a/src/allocator_internal.h b/src/allocator_internal.h index ead653595..2b5a6061b 100644 --- a/src/allocator_internal.h +++ b/src/allocator_internal.h @@ -97,7 +97,7 @@ // Use the largest type your platform is comfortable doing atomic ops with. // TODO: rdar://11477843 typedef unsigned long bitmap_t; -#if defined(__LP64__) +#if DISPATCH_SIZEOF_PTR == 8 #define BYTES_PER_BITMAP 8 #else #define BYTES_PER_BITMAP 4 @@ -147,7 +147,7 @@ typedef unsigned long bitmap_t; #define PADDING_TO_CONTINUATION_SIZE(x) (ROUND_UP_TO_CONTINUATION_SIZE(x) - (x)) -#if defined(__LP64__) +#if DISPATCH_SIZEOF_PTR == 8 #define SIZEOF_HEADER 16 #else #define SIZEOF_HEADER 8 diff --git a/src/benchmark.c b/src/benchmark.c index b47504386..259a67ca5 100644 --- a/src/benchmark.c +++ b/src/benchmark.c @@ -41,7 +41,7 @@ _dispatch_benchmark_init(void *context) register size_t cnt = bdata->count; size_t i = 0; uint64_t start, delta; -#if defined(__LP64__) +#if DISPATCH_SIZEOF_PTR == 8 && !defined(_WIN32) __uint128_t lcost; #else long double lcost; @@ -60,14 +60,14 @@ _dispatch_benchmark_init(void *context) } while (i < cnt); delta = _dispatch_uptime() - start; - lcost = delta; + lcost = (typeof(lcost)) delta; #if HAVE_MACH_ABSOLUTE_TIME lcost *= bdata->tbi.numer; lcost /= bdata->tbi.denom; #endif lcost /= cnt; - bdata->loop_cost = lcost > UINT64_MAX ? UINT64_MAX : (uint64_t)lcost; + bdata->loop_cost = (uint64_t) lcost > UINT64_MAX ? UINT64_MAX : (uint64_t)lcost; } #ifdef __BLOCKS__ @@ -93,7 +93,7 @@ dispatch_benchmark_f(size_t count, register void *ctxt, }; static dispatch_once_t pred; uint64_t ns, start, delta; -#if defined(__LP64__) +#if DISPATCH_SIZEOF_PTR == 8 && !defined(_WIN32) __uint128_t conversion, big_denom; #else long double conversion, big_denom; @@ -113,7 +113,7 @@ dispatch_benchmark_f(size_t count, register void *ctxt, } while (i < count); delta = _dispatch_uptime() - start; - conversion = delta; + conversion = (typeof(conversion)) delta; #if HAVE_MACH_ABSOLUTE_TIME conversion *= bdata.tbi.numer; big_denom = bdata.tbi.denom; @@ -122,7 +122,7 @@ dispatch_benchmark_f(size_t count, register void *ctxt, #endif big_denom *= count; conversion /= big_denom; - ns = conversion > UINT64_MAX ? UINT64_MAX : (uint64_t)conversion; + ns = (uint64_t) conversion > UINT64_MAX ? UINT64_MAX : (uint64_t)conversion; return ns - bdata.loop_cost; } diff --git a/src/data_internal.h b/src/data_internal.h index 1589a793a..9ed12e13b 100644 --- a/src/data_internal.h +++ b/src/data_internal.h @@ -57,10 +57,10 @@ DISPATCH_CLASS_DECL(data, OBJECT); struct dispatch_data_s { #if DISPATCH_DATA_IS_BRIDGED_TO_NSDATA - const void *do_vtable; + const void *__ptrauth_objc_isa_pointer do_vtable; dispatch_queue_t do_targetq; void *ctxt; - void *finalizer; + dispatch_function_t DISPATCH_FUNCTION_POINTER finalizer; #else DISPATCH_OBJECT_HEADER(data); #endif // DISPATCH_DATA_IS_BRIDGED_TO_NSDATA diff --git a/src/event/event.c b/src/event/event.c index fc6ee814d..b908419d2 100644 --- a/src/event/event.c +++ b/src/event/event.c @@ -30,7 +30,7 @@ static void _dispatch_timer_unote_unregister(dispatch_timer_source_refs_t dt); DISPATCH_NOINLINE static dispatch_unote_t _dispatch_unote_create(dispatch_source_type_t dst, - uintptr_t handle, unsigned long mask) + uintptr_t handle, uintptr_t mask) { dispatch_unote_linkage_t dul; dispatch_unote_class_t du; @@ -51,7 +51,7 @@ _dispatch_unote_create(dispatch_source_type_t dst, } du->du_type = dst; du->du_can_be_wlh = dst->dst_per_trigger_qos; - du->du_ident = (uint32_t)handle; + du->du_ident = (dispatch_unote_ident_t)handle; du->du_filter = dst->dst_filter; du->du_fflags = (__typeof__(du->du_fflags))mask; if (dst->dst_flags & EV_UDATA_SPECIFIC) { @@ -63,7 +63,7 @@ _dispatch_unote_create(dispatch_source_type_t dst, DISPATCH_NOINLINE dispatch_unote_t _dispatch_unote_create_with_handle(dispatch_source_type_t dst, - uintptr_t handle, unsigned long mask) + uintptr_t handle, uintptr_t mask) { if (!handle) { return DISPATCH_UNOTE_NULL; @@ -74,7 +74,7 @@ _dispatch_unote_create_with_handle(dispatch_source_type_t dst, DISPATCH_NOINLINE dispatch_unote_t _dispatch_unote_create_with_fd(dispatch_source_type_t dst, - uintptr_t handle, unsigned long mask) + uintptr_t handle, uintptr_t mask) { #if !TARGET_OS_MAC // if (handle > INT_MAX) { @@ -87,7 +87,7 @@ _dispatch_unote_create_with_fd(dispatch_source_type_t dst, DISPATCH_NOINLINE dispatch_unote_t _dispatch_unote_create_without_handle(dispatch_source_type_t dst, - uintptr_t handle, unsigned long mask) + uintptr_t handle, uintptr_t mask) { if (handle) { return DISPATCH_UNOTE_NULL; @@ -206,7 +206,7 @@ _dispatch_unote_unregister(dispatch_unote_t du, uint32_t flags) static dispatch_unote_t _dispatch_source_data_create(dispatch_source_type_t dst, uintptr_t handle, - unsigned long mask) + uintptr_t mask) { if (handle || mask) { return DISPATCH_UNOTE_NULL; @@ -227,7 +227,6 @@ const dispatch_source_type_s _dispatch_source_type_data_add = { .dst_flags = EV_UDATA_SPECIFIC|EV_CLEAR, .dst_action = DISPATCH_UNOTE_ACTION_PASS_DATA, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_source_data_create, .dst_merge_evt = NULL, @@ -239,7 +238,6 @@ const dispatch_source_type_s _dispatch_source_type_data_or = { .dst_flags = EV_UDATA_SPECIFIC|EV_CLEAR, .dst_action = DISPATCH_UNOTE_ACTION_PASS_DATA, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_source_data_create, .dst_merge_evt = NULL, @@ -251,7 +249,6 @@ const dispatch_source_type_s _dispatch_source_type_data_replace = { .dst_flags = EV_UDATA_SPECIFIC|EV_CLEAR, .dst_action = DISPATCH_UNOTE_ACTION_PASS_DATA, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_source_data_create, .dst_merge_evt = NULL, @@ -271,7 +268,6 @@ const dispatch_source_type_s _dispatch_source_type_read = { #endif // DISPATCH_EVENT_BACKEND_KEVENT .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_SET_DATA, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_unote_create_with_fd, .dst_merge_evt = _dispatch_source_merge_evt, @@ -289,7 +285,6 @@ const dispatch_source_type_s _dispatch_source_type_write = { #endif // DISPATCH_EVENT_BACKEND_KEVENT .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_SET_DATA, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_unote_create_with_fd, .dst_merge_evt = _dispatch_source_merge_evt, @@ -299,7 +294,7 @@ const dispatch_source_type_s _dispatch_source_type_write = { static dispatch_unote_t _dispatch_source_signal_create(dispatch_source_type_t dst, uintptr_t handle, - unsigned long mask) + uintptr_t mask) { if (handle >= NSIG) { return DISPATCH_UNOTE_NULL; @@ -313,7 +308,6 @@ const dispatch_source_type_s _dispatch_source_type_signal = { .dst_flags = DISPATCH_EV_DIRECT|EV_CLEAR, .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_ADD_DATA, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_source_signal_create, .dst_merge_evt = _dispatch_source_merge_evt, @@ -933,13 +927,13 @@ _dispatch_timer_unote_unregister(dispatch_timer_source_refs_t dt) static dispatch_unote_t _dispatch_source_timer_create(dispatch_source_type_t dst, - uintptr_t handle, unsigned long mask) + uintptr_t handle, uintptr_t mask) { dispatch_timer_source_refs_t dt; // normalize flags if (mask & DISPATCH_TIMER_STRICT) { - mask &= ~(unsigned long)DISPATCH_TIMER_BACKGROUND; + mask &= ~(uintptr_t)DISPATCH_TIMER_BACKGROUND; } if (mask & ~dst->dst_mask) { return DISPATCH_UNOTE_NULL; @@ -990,7 +984,6 @@ const dispatch_source_type_s _dispatch_source_type_timer = { .dst_timer_flags = 0, .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_TIMER, .dst_size = sizeof(struct dispatch_timer_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_source_timer_create, .dst_merge_evt = _dispatch_source_merge_evt, @@ -1004,6 +997,7 @@ const dispatch_source_type_s _dispatch_source_type_timer_with_clock = { .dst_timer_flags = 0, .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_TIMER, .dst_size = sizeof(struct dispatch_timer_source_refs_s), + .dst_strict = true, .dst_create = _dispatch_source_timer_create, .dst_merge_evt = _dispatch_source_merge_evt, diff --git a/src/event/event_config.h b/src/event/event_config.h index f221d0922..0b883b035 100644 --- a/src/event/event_config.h +++ b/src/event/event_config.h @@ -243,7 +243,7 @@ typedef unsigned int mach_msg_priority_t; # endif // MACH_RCV_SYNC_WAIT # define DISPATCH_MACH_TRAILER_SIZE sizeof(dispatch_mach_trailer_t) -# define DISPATCH_MACH_RCV_TRAILER MACH_RCV_TRAILER_CTX +# define DISPATCH_MACH_RCV_TRAILER MACH_RCV_TRAILER_AV # define DISPATCH_MACH_RCV_OPTIONS ( \ MACH_RCV_MSG | MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY | \ MACH_RCV_TRAILER_ELEMENTS(DISPATCH_MACH_RCV_TRAILER) | \ diff --git a/src/event/event_epoll.c b/src/event/event_epoll.c index a5c71c710..759cbba75 100644 --- a/src/event/event_epoll.c +++ b/src/event/event_epoll.c @@ -260,7 +260,6 @@ _dispatch_unote_register_muxed(dispatch_unote_t du) uint32_t events; events = _dispatch_unote_required_events(du); - du._du->du_priority = pri; dmb = _dispatch_unote_muxnote_bucket(du); dmn = _dispatch_unote_muxnote_find(dmb, du); @@ -429,8 +428,9 @@ _dispatch_timeout_program(uint32_t tidx, uint64_t target, } void -_dispatch_event_loop_timer_arm(dispatch_timer_heap_t dth, uint32_t tidx, - dispatch_timer_delay_s range, dispatch_clock_now_cache_t nows) +_dispatch_event_loop_timer_arm(dispatch_timer_heap_t dth DISPATCH_UNUSED, + uint32_t tidx, dispatch_timer_delay_s range, + dispatch_clock_now_cache_t nows) { dispatch_clock_t clock = DISPATCH_TIMER_CLOCK(tidx); uint64_t target = range.delay + _dispatch_time_now_cached(clock, nows); @@ -438,7 +438,8 @@ _dispatch_event_loop_timer_arm(dispatch_timer_heap_t dth, uint32_t tidx, } void -_dispatch_event_loop_timer_delete(dispatch_timer_heap_t dth, uint32_t tidx) +_dispatch_event_loop_timer_delete(dispatch_timer_heap_t dth DISPATCH_UNUSED, + uint32_t tidx) { _dispatch_timeout_program(tidx, UINT64_MAX, UINT64_MAX); } @@ -510,7 +511,7 @@ _dispatch_event_merge_signal(dispatch_muxnote_t dmn) // consumed by dux_merge_evt() _dispatch_retain_unote_owner(du); dispatch_assert(!dux_needs_rearm(du._du)); - os_atomic_store2o(du._dr, ds_pending_data, 1, relaxed) + os_atomic_store2o(du._dr, ds_pending_data, 1, relaxed); dux_merge_evt(du._du, EV_ADD|EV_ENABLE|EV_CLEAR, 1, 0); } } else { @@ -548,6 +549,20 @@ _dispatch_get_buffer_size(dispatch_muxnote_t dmn, bool writer) return (uintptr_t)n; } +static void +_dispatch_event_merge_hangup(dispatch_unote_t du) +{ + // consumed by dux_merge_evt() + _dispatch_retain_unote_owner(du); + dispatch_unote_state_t du_state = _dispatch_unote_state(du); + du_state |= DU_STATE_NEEDS_DELETE; + du_state &= ~DU_STATE_ARMED; + _dispatch_unote_state_set(du, du_state); + uintptr_t data = 0; // EOF + os_atomic_store2o(du._dr, ds_pending_data, ~data, relaxed); + dux_merge_evt(du._du, EV_DELETE|EV_DISPATCH, data, 0); +} + static void _dispatch_event_merge_fd(dispatch_muxnote_t dmn, uint32_t events) { @@ -564,8 +579,8 @@ _dispatch_event_merge_fd(dispatch_muxnote_t dmn, uint32_t events) _dispatch_retain_unote_owner(du); dispatch_assert(dux_needs_rearm(du._du)); _dispatch_unote_state_clear_bit(du, DU_STATE_ARMED); - os_atomic_store2o(du._dr, ds_pending_data, ~data, relaxed) - dux_merge_evt(du._du, EV_ADD|EV_ENABLE|EV_DISPATCH, data, 0, 0); + os_atomic_store2o(du._dr, ds_pending_data, ~data, relaxed); + dux_merge_evt(du._du, EV_ADD|EV_ENABLE|EV_DISPATCH, data, 0); } } @@ -577,11 +592,25 @@ _dispatch_event_merge_fd(dispatch_muxnote_t dmn, uint32_t events) _dispatch_retain_unote_owner(du); dispatch_assert(dux_needs_rearm(du._du)); _dispatch_unote_state_clear_bit(du, DU_STATE_ARMED); - os_atomic_store2o(du._dr, ds_pending_data, ~data, relaxed) - dux_merge_evt(du._du, EV_ADD|EV_ENABLE|EV_DISPATCH, data, 0, 0); + os_atomic_store2o(du._dr, ds_pending_data, ~data, relaxed); + dux_merge_evt(du._du, EV_ADD|EV_ENABLE|EV_DISPATCH, data, 0); } } + // SR-9033: EPOLLHUP is an unmaskable event which we must respond to + if (events & EPOLLHUP) { + LIST_FOREACH_SAFE(dul, &dmn->dmn_readers_head, du_link, dul_next) { + dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul); + _dispatch_event_merge_hangup(du); + } + LIST_FOREACH_SAFE(dul, &dmn->dmn_writers_head, du_link, dul_next) { + dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul); + _dispatch_event_merge_hangup(du); + } + epoll_ctl(_dispatch_epfd, EPOLL_CTL_DEL, dmn->dmn_fd, NULL); + return; + } + events = _dispatch_muxnote_armed_events(dmn); if (events) _dispatch_epoll_update(dmn, events, EPOLL_CTL_MOD); } @@ -628,10 +657,14 @@ _dispatch_event_loop_drain(uint32_t flags) _dispatch_event_merge_timer(DISPATCH_CLOCK_WALL); break; - case DISPATCH_EPOLL_CLOCK_MACH: + case DISPATCH_EPOLL_CLOCK_UPTIME: _dispatch_event_merge_timer(DISPATCH_CLOCK_UPTIME); break; + case DISPATCH_EPOLL_CLOCK_MONOTONIC: + _dispatch_event_merge_timer(DISPATCH_CLOCK_MONOTONIC); + break; + default: dmn = ev[i].data.ptr; switch (dmn->dmn_filter) { @@ -668,6 +701,12 @@ _dispatch_event_loop_wait_for_ownership(dispatch_sync_context_t dsc) } } +void +_dispatch_event_loop_ensure_ownership(dispatch_wlh_t wlh) +{ + (void)wlh; +} + void _dispatch_event_loop_end_ownership(dispatch_wlh_t wlh, uint64_t old_state, uint64_t new_state, uint32_t flags) diff --git a/src/event/event_internal.h b/src/event/event_internal.h index d59b303c4..bf5605da7 100644 --- a/src/event/event_internal.h +++ b/src/event/event_internal.h @@ -123,11 +123,17 @@ _dispatch_timer_flags_from_clock(dispatch_clock_t clock) return (dispatch_unote_timer_flags_t)(clock << 2); } +#if defined(_WIN32) +typedef uintptr_t dispatch_unote_ident_t; +#else +typedef uint32_t dispatch_unote_ident_t; +#endif + #define DISPATCH_UNOTE_CLASS_HEADER() \ - dispatch_source_type_t du_type; \ + dispatch_source_type_t __ptrauth_objc_isa_pointer du_type; \ uintptr_t du_owner_wref; /* "weak" back reference to the owner object */ \ os_atomic(dispatch_unote_state_t) du_state; \ - uint32_t du_ident; \ + dispatch_unote_ident_t du_ident; \ int8_t du_filter; \ uint8_t du_is_direct : 1; \ uint8_t du_is_timer : 1; \ @@ -245,7 +251,7 @@ void dispatch_debug_machport(mach_port_t name, const char *str); // layout must match dispatch_source_refs_s struct dispatch_mach_recv_refs_s { DISPATCH_UNOTE_CLASS_HEADER(); - dispatch_mach_handler_function_t dmrr_handler_func; + dispatch_mach_handler_function_t DISPATCH_FUNCTION_POINTER dmrr_handler_func; void *dmrr_handler_ctxt; }; typedef struct dispatch_mach_recv_refs_s *dispatch_mach_recv_refs_t; @@ -355,7 +361,7 @@ typedef struct dispatch_source_type_s { uint32_t dst_size; dispatch_unote_t (*dst_create)(dispatch_source_type_t dst, - uintptr_t handle, unsigned long mask); + uintptr_t handle, uintptr_t mask); #if DISPATCH_EVENT_BACKEND_KEVENT bool (*dst_update_mux)(struct dispatch_muxnote_s *dmn); #endif @@ -382,9 +388,9 @@ extern const dispatch_source_type_s _dispatch_mach_type_send; extern const dispatch_source_type_s _dispatch_mach_type_recv; extern const dispatch_source_type_s _dispatch_mach_type_reply; extern const dispatch_source_type_s _dispatch_xpc_type_sigterm; -extern const dispatch_source_type_s _dispatch_source_type_timer_with_clock; #define DISPATCH_MACH_TYPE_WAITER ((const dispatch_source_type_s *)-2) #endif +extern const dispatch_source_type_s _dispatch_source_type_timer_with_clock; #pragma mark - #pragma mark deferred items @@ -614,11 +620,11 @@ _dispatch_timer_unote_compute_missed(dispatch_timer_source_refs_t dt, extern struct dispatch_timer_heap_s _dispatch_timers_heap[DISPATCH_TIMER_COUNT]; dispatch_unote_t _dispatch_unote_create_with_handle(dispatch_source_type_t dst, - uintptr_t handle, unsigned long mask); + uintptr_t handle, uintptr_t mask); dispatch_unote_t _dispatch_unote_create_with_fd(dispatch_source_type_t dst, - uintptr_t handle, unsigned long mask); + uintptr_t handle, uintptr_t mask); dispatch_unote_t _dispatch_unote_create_without_handle( - dispatch_source_type_t dst, uintptr_t handle, unsigned long mask); + dispatch_source_type_t dst, uintptr_t handle, uintptr_t mask); void _dispatch_unote_dispose(dispatch_unote_t du); /* @@ -670,6 +676,7 @@ void _dispatch_event_loop_wake_owner(struct dispatch_sync_context_s *dsc, dispatch_wlh_t wlh, uint64_t old_state, uint64_t new_state); void _dispatch_event_loop_wait_for_ownership( struct dispatch_sync_context_s *dsc); +void _dispatch_event_loop_ensure_ownership(dispatch_wlh_t wlh); void _dispatch_event_loop_end_ownership(dispatch_wlh_t wlh, uint64_t old_state, uint64_t new_state, uint32_t flags); #if DISPATCH_WLH_DEBUG diff --git a/src/event/event_kevent.c b/src/event/event_kevent.c index ed5ffe0da..b89e4d0cc 100644 --- a/src/event/event_kevent.c +++ b/src/event/event_kevent.c @@ -1298,14 +1298,13 @@ _dispatch_unote_unregister_direct(dispatch_unote_t du, uint32_t flags) enum { DISPATCH_WORKLOOP_ASYNC, DISPATCH_WORKLOOP_ASYNC_FROM_SYNC, - DISPATCH_WORKLOOP_ASYNC_DISCOVER_SYNC, DISPATCH_WORKLOOP_ASYNC_QOS_UPDATE, DISPATCH_WORKLOOP_ASYNC_LEAVE, DISPATCH_WORKLOOP_ASYNC_LEAVE_FROM_SYNC, DISPATCH_WORKLOOP_ASYNC_LEAVE_FROM_TRANSFER, - DISPATCH_WORKLOOP_ASYNC_FORCE_END_OWNERSHIP, DISPATCH_WORKLOOP_RETARGET, + DISPATCH_WORKLOOP_SYNC_DISCOVER, DISPATCH_WORKLOOP_SYNC_WAIT, DISPATCH_WORKLOOP_SYNC_WAKE, DISPATCH_WORKLOOP_SYNC_FAKE, @@ -1315,17 +1314,16 @@ enum { static char const * const _dispatch_workloop_actions[] = { [DISPATCH_WORKLOOP_ASYNC] = "async", [DISPATCH_WORKLOOP_ASYNC_FROM_SYNC] = "async (from sync)", - [DISPATCH_WORKLOOP_ASYNC_DISCOVER_SYNC] = "discover sync", [DISPATCH_WORKLOOP_ASYNC_QOS_UPDATE] = "qos update", [DISPATCH_WORKLOOP_ASYNC_LEAVE] = "leave", [DISPATCH_WORKLOOP_ASYNC_LEAVE_FROM_SYNC] = "leave (from sync)", [DISPATCH_WORKLOOP_ASYNC_LEAVE_FROM_TRANSFER] = "leave (from transfer)", - [DISPATCH_WORKLOOP_ASYNC_FORCE_END_OWNERSHIP] = "leave (forced)", [DISPATCH_WORKLOOP_RETARGET] = "retarget", + [DISPATCH_WORKLOOP_SYNC_DISCOVER] = "sync-discover", [DISPATCH_WORKLOOP_SYNC_WAIT] = "sync-wait", - [DISPATCH_WORKLOOP_SYNC_FAKE] = "sync-fake", [DISPATCH_WORKLOOP_SYNC_WAKE] = "sync-wake", + [DISPATCH_WORKLOOP_SYNC_FAKE] = "sync-fake", [DISPATCH_WORKLOOP_SYNC_END] = "sync-end", }; @@ -1420,11 +1418,8 @@ _dispatch_kq_fill_workloop_event(dispatch_kevent_t ke, int which, uint16_t action = 0; switch (which) { - case DISPATCH_WORKLOOP_ASYNC_FROM_SYNC: - fflags |= NOTE_WL_END_OWNERSHIP; - /* FALLTHROUGH */ case DISPATCH_WORKLOOP_ASYNC: - case DISPATCH_WORKLOOP_ASYNC_DISCOVER_SYNC: + case DISPATCH_WORKLOOP_ASYNC_FROM_SYNC: case DISPATCH_WORKLOOP_ASYNC_QOS_UPDATE: dispatch_assert(_dq_state_is_base_wlh(dq_state)); dispatch_assert(_dq_state_is_enqueued_on_target(dq_state)); @@ -1432,21 +1427,16 @@ _dispatch_kq_fill_workloop_event(dispatch_kevent_t ke, int which, mask |= DISPATCH_QUEUE_ROLE_MASK; mask |= DISPATCH_QUEUE_ENQUEUED; mask |= DISPATCH_QUEUE_MAX_QOS_MASK; - if (which == DISPATCH_WORKLOOP_ASYNC_DISCOVER_SYNC) { - dispatch_assert(!_dq_state_in_sync_transfer(dq_state)); - dispatch_assert(_dq_state_drain_locked(dq_state)); - mask |= DISPATCH_QUEUE_SYNC_TRANSFER; + fflags |= NOTE_WL_IGNORE_ESTALE; + fflags |= NOTE_WL_UPDATE_QOS; + if (_dq_state_in_uncontended_sync(dq_state)) { fflags |= NOTE_WL_DISCOVER_OWNER; - } else { - fflags |= NOTE_WL_IGNORE_ESTALE; + mask |= DISPATCH_QUEUE_UNCONTENDED_SYNC; } - fflags |= NOTE_WL_UPDATE_QOS; pp = _dispatch_kevent_workloop_priority(dq, which, qos); break; case DISPATCH_WORKLOOP_ASYNC_LEAVE_FROM_SYNC: - fflags |= NOTE_WL_END_OWNERSHIP; - /* FALLTHROUGH */ case DISPATCH_WORKLOOP_ASYNC_LEAVE_FROM_TRANSFER: fflags |= NOTE_WL_IGNORE_ESTALE; /* FALLTHROUGH */ @@ -1456,18 +1446,6 @@ _dispatch_kq_fill_workloop_event(dispatch_kevent_t ke, int which, mask |= DISPATCH_QUEUE_ENQUEUED; break; - case DISPATCH_WORKLOOP_ASYNC_FORCE_END_OWNERSHIP: - // 0 is never a valid queue state, so the knote attach will fail due to - // the debounce. However, NOTE_WL_END_OWNERSHIP is always observed even - // when ESTALE is returned, which is the side effect we're after here. - fflags |= NOTE_WL_END_OWNERSHIP; - fflags |= NOTE_WL_IGNORE_ESTALE; - action = EV_ADD | EV_ENABLE; - mask = ~0ull; - dq_state = 0; - pp = _dispatch_kevent_workloop_priority(dq, which, qos); - break; - case DISPATCH_WORKLOOP_RETARGET: action = EV_ADD | EV_DELETE | EV_ENABLE; fflags |= NOTE_WL_END_OWNERSHIP; @@ -1519,6 +1497,16 @@ _dispatch_kq_fill_workloop_sync_event(dispatch_kevent_t ke, int which, uint16_t action = 0; switch (which) { + case DISPATCH_WORKLOOP_SYNC_DISCOVER: + dispatch_assert(_dq_state_received_sync_wait(dq_state)); + dispatch_assert(_dq_state_in_uncontended_sync(dq_state)); + action = EV_ADD | EV_DISABLE; + fflags = NOTE_WL_SYNC_WAKE | NOTE_WL_DISCOVER_OWNER | + NOTE_WL_IGNORE_ESTALE; + mask = DISPATCH_QUEUE_ROLE_MASK | DISPATCH_QUEUE_RECEIVED_SYNC_WAIT | + DISPATCH_QUEUE_UNCONTENDED_SYNC; + break; + case DISPATCH_WORKLOOP_SYNC_WAIT: action = EV_ADD | EV_DISABLE; fflags = NOTE_WL_SYNC_WAIT; @@ -1526,10 +1514,6 @@ _dispatch_kq_fill_workloop_sync_event(dispatch_kevent_t ke, int which, if (_dispatch_qos_from_pp(pp) == 0) { pp = _dispatch_qos_to_pp(DISPATCH_QOS_DEFAULT); } - if (_dq_state_received_sync_wait(dq_state)) { - fflags |= NOTE_WL_DISCOVER_OWNER; - mask = DISPATCH_QUEUE_ROLE_MASK | DISPATCH_QUEUE_RECEIVED_SYNC_WAIT; - } break; case DISPATCH_WORKLOOP_SYNC_FAKE: @@ -1652,9 +1636,6 @@ _dispatch_event_loop_get_action_for_state(uint64_t dq_state) if (!_dq_state_drain_locked(dq_state)) { return DISPATCH_WORKLOOP_ASYNC; } - if (!_dq_state_in_sync_transfer(dq_state)) { - return DISPATCH_WORKLOOP_ASYNC_DISCOVER_SYNC; - } return DISPATCH_WORKLOOP_ASYNC_QOS_UPDATE; } @@ -1728,42 +1709,11 @@ _dispatch_kevent_workloop_poke(dispatch_wlh_t wlh, uint64_t dq_state, dispatch_assert(_dq_state_is_enqueued_on_target(dq_state)); dispatch_assert(!_dq_state_is_enqueued_on_manager(dq_state)); action = _dispatch_event_loop_get_action_for_state(dq_state); -override: _dispatch_kq_fill_workloop_event(&ke, action, wlh, dq_state); if (_dispatch_kq_poll(wlh, &ke, 1, &ke, 1, NULL, NULL, kev_flags)) { - _dispatch_kevent_workloop_drain_error(&ke, - DISPATCH_KEVENT_WORKLOOP_ALLOW_ESTALE); - dispatch_assert(action == DISPATCH_WORKLOOP_ASYNC_DISCOVER_SYNC); - dq_state = ke.ext[EV_EXTIDX_WL_VALUE]; - // - // There are 4 things that can cause an ESTALE for DISCOVER_SYNC: - // - the queue role changed, we don't want to redrive - // - the queue is no longer enqueued, we don't want to redrive - // - the max QoS changed, whoever changed it is doing the same - // transition, so we don't need to redrive - // - the DISPATCH_QUEUE_IN_SYNC_TRANFER bit got set - // - // The interesting case is the last one, and will only happen in the - // following chain of events: - // 1. uncontended dispatch_sync() - // 2. contended dispatch_sync() - // 3. contended dispatch_async() - // - // And this code is running because of (3). It is possible that (1) - // hands off to (2) while this call is being made, causing the - // DISPATCH_QUEUE_IN_TRANSFER_SYNC to be set, and we don't need to tell - // the kernel about the owner anymore. However, the async in that case - // will have set a QoS on the queue (since dispatch_sync()s don't but - // dispatch_async()s always do), and we need to redrive to tell it - // to the kernel. - // - if (_dq_state_is_base_wlh(dq_state) && - _dq_state_is_enqueued_on_target(dq_state) && - _dq_state_in_sync_transfer(dq_state)) { - action = DISPATCH_WORKLOOP_ASYNC; - goto override; - } + _dispatch_kevent_workloop_drain_error(&ke, 0); + __builtin_unreachable(); } if (!(flags & DISPATCH_EVENT_LOOP_OVERRIDE)) { @@ -2042,11 +1992,25 @@ _dispatch_event_loop_leave_deferred(dispatch_deferred_items_t ddi, uint64_t dq_state) { #if DISPATCH_USE_KEVENT_WORKLOOP + if (_dq_state_received_sync_wait(dq_state)) { + dispatch_tid tid = _dq_state_drain_owner(dq_state); + int slot = _dispatch_kq_deferred_find_slot(ddi, EVFILT_WORKLOOP, + (uint64_t)ddi->ddi_wlh, tid); + if (slot == ddi->ddi_nevents) { + dispatch_assert(slot < DISPATCH_DEFERRED_ITEMS_EVENT_COUNT); + ddi->ddi_nevents++; + } + _dispatch_kq_fill_workloop_sync_event(&ddi->ddi_eventlist[slot], + DISPATCH_WORKLOOP_SYNC_DISCOVER, ddi->ddi_wlh, + dq_state, _dq_state_drain_owner(dq_state)); + } + int action = _dispatch_event_loop_get_action_for_state(dq_state); dispatch_assert(ddi->ddi_wlh_needs_delete); ddi->ddi_wlh_needs_delete = false; ddi->ddi_wlh_needs_update = false; _dispatch_kq_fill_ddi_workloop_event(ddi, action, ddi->ddi_wlh, dq_state); + #else (void)ddi; (void)dq_state; #endif // DISPATCH_USE_KEVENT_WORKLOOP @@ -2098,6 +2062,7 @@ _dispatch_event_loop_wake_owner(dispatch_sync_context_t dsc, int action, n = 0; dispatch_assert(_dq_state_drain_locked_by(new_state, dsc->dsc_waiter)); + dispatch_assert(!dsc->dsc_wlh_self_wakeup); if (wlh != DISPATCH_WLH_ANON && ddi && ddi->ddi_wlh == wlh) { dispatch_assert(ddi->ddi_wlh_needs_delete); @@ -2106,8 +2071,8 @@ _dispatch_event_loop_wake_owner(dispatch_sync_context_t dsc, if (wlh == waiter_wlh) { // async -> sync handoff dispatch_assert(_dq_state_is_enqueued_on_target(old_state)); - dispatch_assert(!_dq_state_in_sync_transfer(old_state)); - dispatch_assert(_dq_state_in_sync_transfer(new_state)); + dispatch_assert(!_dq_state_in_uncontended_sync(old_state)); + dispatch_assert(!_dq_state_in_uncontended_sync(new_state)); if (_dq_state_is_enqueued_on_target(new_state)) { action = DISPATCH_WORKLOOP_ASYNC_QOS_UPDATE; @@ -2130,7 +2095,7 @@ _dispatch_event_loop_wake_owner(dispatch_sync_context_t dsc, if ((old_state ^ new_state) & DISPATCH_QUEUE_ENQUEUED) { dispatch_assert(_dq_state_is_enqueued_on_target(old_state)); - dispatch_assert(_dq_state_in_sync_transfer(new_state)); + dispatch_assert(!_dq_state_in_uncontended_sync(new_state)); // During the handoff, the waiter noticed there was no work *after* // that last work item, so we want to kill the thread request while // there's an owner around to avoid races betwen knote_process() and @@ -2138,7 +2103,7 @@ _dispatch_event_loop_wake_owner(dispatch_sync_context_t dsc, _dispatch_kq_fill_workloop_event(&ke[n++], DISPATCH_WORKLOOP_ASYNC_LEAVE_FROM_TRANSFER, wlh, new_state); } - if (_dq_state_in_sync_transfer(new_state)) { + if (_dq_state_is_base_wlh(new_state)) { // Even when waiter_wlh != wlh we can pretend we got woken up // which is a knote we will be able to delete later with a SYNC_END. // This allows rectifying incorrect ownership sooner, and also happens @@ -2146,10 +2111,13 @@ _dispatch_event_loop_wake_owner(dispatch_sync_context_t dsc, _dispatch_kq_fill_workloop_sync_event(&ke[n++], DISPATCH_WORKLOOP_SYNC_WAKE, wlh, new_state, dsc->dsc_waiter); } - if (_dq_state_in_sync_transfer(old_state)) { + if (!dsc->dsc_from_async && _dq_state_is_base_wlh(old_state) && + !_dq_state_in_uncontended_sync(old_state)) { + // Note: when coming from dispatch_resume despite having work items + // the caller has an "uncontended sync" ownership dispatch_tid tid = _dispatch_tid_self(); _dispatch_kq_fill_workloop_sync_event(&ke[n++], - DISPATCH_WORKLOOP_SYNC_END, wlh, new_state, tid); + DISPATCH_WORKLOOP_SYNC_END, wlh, old_state, tid); } // // Past this call it is not safe to look at `wlh` anymore as the callers @@ -2181,7 +2149,7 @@ _dispatch_event_loop_wait_for_ownership(dispatch_sync_context_t dsc) int i, n = 0; dq_state = os_atomic_load2o((dispatch_queue_t)wlh, dq_state, relaxed); - if (dsc->dsc_wlh_was_first && !_dq_state_drain_locked(dq_state) && + if (!_dq_state_drain_locked(dq_state) && _dq_state_is_enqueued_on_target(dq_state)) { // // @@ -2202,8 +2170,19 @@ _dispatch_event_loop_wait_for_ownership(dispatch_sync_context_t dsc) // lower priority thread, so we need to drive it once to avoid priority // inversions. // + // + // + // Also, it is possible that a low priority async is ahead of us, + // and hasn't made its thread request yet. If this waiter is high + // priority this is a priority inversion, and we need to redrive the + // async. + // _dispatch_kq_fill_workloop_event(&ke[n++], DISPATCH_WORKLOOP_ASYNC, wlh, dq_state); + } else if (_dq_state_received_sync_wait(dq_state)) { + _dispatch_kq_fill_workloop_sync_event(&ke[n++], + DISPATCH_WORKLOOP_SYNC_DISCOVER, wlh, dq_state, + _dq_state_drain_owner(dq_state)); } again: @@ -2213,8 +2192,7 @@ _dispatch_event_loop_wait_for_ownership(dispatch_sync_context_t dsc) for (i = 0; i < n; i++) { long flags = 0; if (ke[i].fflags & NOTE_WL_SYNC_WAIT) { - flags = DISPATCH_KEVENT_WORKLOOP_ALLOW_EINTR | - DISPATCH_KEVENT_WORKLOOP_ALLOW_ESTALE; + flags = DISPATCH_KEVENT_WORKLOOP_ALLOW_EINTR; } _dispatch_kevent_workloop_drain_error(&ke[i], flags); } @@ -2235,6 +2213,25 @@ _dispatch_event_loop_wait_for_ownership(dispatch_sync_context_t dsc) } } +void +_dispatch_event_loop_ensure_ownership(dispatch_wlh_t wlh) +{ +#if DISPATCH_USE_KEVENT_WORKLOOP + uint32_t kev_flags = KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_ERROR_EVENTS; + dispatch_tid tid = _dispatch_tid_self(); + dispatch_kevent_s ke; + + _dispatch_kq_fill_workloop_sync_event(&ke, DISPATCH_WORKLOOP_SYNC_WAKE, + wlh, tid, tid); + if (_dispatch_kq_poll(wlh, &ke, 1, &ke, 1, NULL, NULL, kev_flags)) { + _dispatch_kevent_workloop_drain_error(&ke, 0); + __builtin_unreachable(); + } +#else + (void)wlh; +#endif +} + void _dispatch_event_loop_end_ownership(dispatch_wlh_t wlh, uint64_t old_state, uint64_t new_state, uint32_t flags) @@ -2242,7 +2239,6 @@ _dispatch_event_loop_end_ownership(dispatch_wlh_t wlh, uint64_t old_state, #if DISPATCH_USE_KEVENT_WORKLOOP uint32_t kev_flags = KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_ERROR_EVENTS; dispatch_kevent_s ke[2]; - bool needs_forceful_end_ownership = false; int n = 0; dispatch_assert(_dq_state_is_base_wlh(new_state)); @@ -2250,50 +2246,15 @@ _dispatch_event_loop_end_ownership(dispatch_wlh_t wlh, uint64_t old_state, _dispatch_kq_fill_workloop_event(&ke[n++], DISPATCH_WORKLOOP_ASYNC_FROM_SYNC, wlh, new_state); } else if (_dq_state_is_enqueued_on_target(old_state)) { - // - // Because the thread request knote may not - // have made it, DISPATCH_WORKLOOP_ASYNC_LEAVE_FROM_SYNC may silently - // turn into a no-op. - // - // However, the kernel may know about our ownership anyway, so we need - // to make sure it is forcefully ended. - // - needs_forceful_end_ownership = true; dispatch_assert(_dq_state_is_suspended(new_state)); _dispatch_kq_fill_workloop_event(&ke[n++], DISPATCH_WORKLOOP_ASYNC_LEAVE_FROM_SYNC, wlh, new_state); - } else if (_dq_state_received_sync_wait(old_state)) { - // - // This case happens when the current workloop got waited on by some - // thread calling _dispatch_event_loop_wait_for_ownership. - // - // When the workloop became IDLE, it didn't find the sync waiter - // continuation, didn't have a thread request to cancel either, and so - // we need the kernel to forget about the current thread ownership - // of the workloop. - // - // To forget this ownership, we create a fake WAKE knote that can not - // coalesce with any meaningful one, just so that we can EV_DELETE it - // with the NOTE_WL_END_OWNERSHIP. - // - // This is a gross hack, but this will really only ever happen for - // cases where a sync waiter started to wait on a workloop, but his part - // of the graph got mutated and retargeted onto a different workloop. - // In doing so, that sync waiter has snitched to the kernel about - // ownership, and the workloop he's bogusly waiting on will go through - // this codepath. - // - needs_forceful_end_ownership = true; } - if (_dq_state_in_sync_transfer(old_state)) { + if (!_dq_state_in_uncontended_sync(old_state)) { dispatch_tid tid = _dispatch_tid_self(); _dispatch_kq_fill_workloop_sync_event(&ke[n++], DISPATCH_WORKLOOP_SYNC_END, wlh, new_state, tid); - } else if (needs_forceful_end_ownership) { - kev_flags |= KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST; - _dispatch_kq_fill_workloop_event(&ke[n++], - DISPATCH_WORKLOOP_ASYNC_FORCE_END_OWNERSHIP, wlh, new_state); } if (_dispatch_kq_poll(wlh, ke, n, ke, n, NULL, NULL, kev_flags)) { @@ -2437,7 +2398,6 @@ const dispatch_source_type_s _dispatch_source_type_proc = { , .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_source_proc_create, .dst_merge_evt = _dispatch_source_merge_evt, @@ -2458,7 +2418,6 @@ const dispatch_source_type_s _dispatch_source_type_vnode = { , .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_unote_create_with_fd, .dst_merge_evt = _dispatch_source_merge_evt, @@ -2491,7 +2450,6 @@ const dispatch_source_type_s _dispatch_source_type_vfs = { , .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_unote_create_without_handle, .dst_merge_evt = _dispatch_source_merge_evt, @@ -2517,7 +2475,6 @@ const dispatch_source_type_s _dispatch_source_type_sock = { , .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_unote_create_with_fd, .dst_merge_evt = _dispatch_source_merge_evt, @@ -2532,7 +2489,6 @@ const dispatch_source_type_s _dispatch_source_type_nw_channel = { .dst_mask = NOTE_FLOW_ADV_UPDATE|NOTE_CHANNEL_EVENT|NOTE_IF_ADV_UPD, .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_unote_create_with_fd, .dst_merge_evt = _dispatch_source_merge_evt, @@ -2591,6 +2547,8 @@ _dispatch_memorypressure_handler(void *context) } } +DISPATCH_STATIC_GLOBAL(dispatch_source_t _dispatch_memorypressure_source); + static void _dispatch_memorypressure_init(void) { @@ -2599,6 +2557,7 @@ _dispatch_memorypressure_init(void) DISPATCH_MEMORYPRESSURE_SOURCE_MASK, _dispatch_mgr_q._as_dq); dispatch_set_context(ds, ds); dispatch_source_set_event_handler_f(ds, _dispatch_memorypressure_handler); + _dispatch_memorypressure_source = ds; dispatch_activate(ds); } #endif // DISPATCH_USE_MEMORYPRESSURE_SOURCE @@ -2650,7 +2609,6 @@ const dispatch_source_type_s _dispatch_source_type_memorypressure = { |NOTE_MEMORYSTATUS_MSL_STATUS, .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, #if TARGET_OS_SIMULATOR .dst_create = _dispatch_source_memorypressure_create, @@ -2681,7 +2639,6 @@ const dispatch_source_type_s _dispatch_source_type_vm = { .dst_mask = NOTE_VM_PRESSURE, .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_source_vm_create, // redirected to _dispatch_source_type_memorypressure @@ -2701,6 +2658,7 @@ static void _dispatch_mach_host_notify_update(void *context); DISPATCH_STATIC_GLOBAL(dispatch_once_t _dispatch_mach_notify_port_pred); DISPATCH_STATIC_GLOBAL(dispatch_once_t _dispatch_mach_calendar_pred); DISPATCH_STATIC_GLOBAL(mach_port_t _dispatch_mach_notify_port); +DISPATCH_STATIC_GLOBAL(dispatch_unote_t _dispatch_mach_notify_unote); static void _dispatch_timers_calendar_change(void) @@ -2810,6 +2768,7 @@ _dispatch_mach_notify_port_init(void *context DISPATCH_UNUSED) dispatch_assume(_dispatch_unote_register(du, DISPATCH_WLH_ANON, DISPATCH_PRIORITY_FLAG_MANAGER)); + _dispatch_mach_notify_unote = du; } static void @@ -3101,7 +3060,6 @@ const dispatch_source_type_s _dispatch_source_type_mach_send = { .dst_mask = DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_POSSIBLE, .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_source_mach_send_create, .dst_update_mux = _dispatch_mach_send_update, @@ -3129,7 +3087,6 @@ const dispatch_source_type_s _dispatch_mach_type_send = { .dst_mask = DISPATCH_MACH_SEND_DEAD|DISPATCH_MACH_SEND_POSSIBLE, .dst_action = DISPATCH_UNOTE_ACTION_PASS_FFLAGS, .dst_size = sizeof(struct dispatch_mach_send_refs_s), - .dst_strict = false, .dst_create = _dispatch_mach_send_create, .dst_update_mux = _dispatch_mach_send_update, @@ -3243,7 +3200,6 @@ const dispatch_source_type_s _dispatch_source_type_mach_recv = { , .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, .dst_size = sizeof(struct dispatch_source_refs_s), - .dst_strict = false, .dst_create = _dispatch_unote_create_with_handle, .dst_merge_evt = _dispatch_source_merge_evt, @@ -3267,7 +3223,6 @@ const dispatch_source_type_s _dispatch_mach_type_notification = { .dst_fflags = DISPATCH_MACH_RCV_OPTIONS & ~MACH_RCV_VOUCHER, .dst_action = DISPATCH_UNOTE_ACTION_PASS_FFLAGS, .dst_size = sizeof(struct dispatch_unote_class_s), - .dst_strict = false, .dst_create = _dispatch_unote_create_with_handle, .dst_merge_evt = _dispatch_mach_notification_event, @@ -3294,7 +3249,6 @@ const dispatch_source_type_s _dispatch_mach_type_recv = { .dst_fflags = DISPATCH_MACH_RCV_OPTIONS, .dst_action = DISPATCH_UNOTE_ACTION_PASS_FFLAGS, .dst_size = sizeof(struct dispatch_mach_recv_refs_s), - .dst_strict = false, // without handle because the mach code will set the ident after connect .dst_create = _dispatch_unote_create_without_handle, @@ -3324,7 +3278,6 @@ const dispatch_source_type_s _dispatch_mach_type_reply = { .dst_fflags = DISPATCH_MACH_RCV_OPTIONS & ~MACH_RCV_VOUCHER, .dst_action = DISPATCH_UNOTE_ACTION_PASS_FFLAGS, .dst_size = sizeof(struct dispatch_mach_reply_refs_s), - .dst_strict = false, .dst_create = _dispatch_unote_create_with_handle, .dst_merge_evt = _dispatch_mach_reply_merge_evt, @@ -3340,7 +3293,6 @@ const dispatch_source_type_s _dispatch_xpc_type_sigterm = { .dst_fflags = 0, .dst_action = DISPATCH_UNOTE_ACTION_PASS_DATA, .dst_size = sizeof(struct dispatch_xpc_term_refs_s), - .dst_strict = false, .dst_create = _dispatch_unote_create_with_handle, .dst_merge_evt = _dispatch_xpc_sigterm_merge_evt, diff --git a/src/event/event_windows.c b/src/event/event_windows.c index 2fe968071..3576774b2 100644 --- a/src/event/event_windows.c +++ b/src/event/event_windows.c @@ -21,59 +21,877 @@ #include "internal.h" #if DISPATCH_EVENT_BACKEND_WINDOWS +static HANDLE hPort = NULL; +enum _dispatch_windows_port { + DISPATCH_PORT_POKE = 0, + DISPATCH_PORT_TIMER_CLOCK_WALL, + DISPATCH_PORT_TIMER_CLOCK_UPTIME, + DISPATCH_PORT_TIMER_CLOCK_MONOTONIC, + DISPATCH_PORT_FILE_HANDLE, + DISPATCH_PORT_PIPE_HANDLE_READ, + DISPATCH_PORT_PIPE_HANDLE_WRITE, + DISPATCH_PORT_SOCKET_READ, + DISPATCH_PORT_SOCKET_WRITE, +}; + +enum _dispatch_muxnote_events { + DISPATCH_MUXNOTE_EVENT_READ = 1 << 0, + DISPATCH_MUXNOTE_EVENT_WRITE = 1 << 1, +}; + #pragma mark dispatch_unote_t +typedef struct dispatch_muxnote_s { + LIST_ENTRY(dispatch_muxnote_s) dmn_list; + LIST_HEAD(, dispatch_unote_linkage_s) dmn_readers_head; + LIST_HEAD(, dispatch_unote_linkage_s) dmn_writers_head; + + // This refcount solves a race condition that can happen with I/O completion + // ports. When we enqueue packets with muxnote pointers associated with + // them, it's possible that those packets might not be processed until after + // the event has been unregistered. We increment this upon creating a + // muxnote or posting to a completion port, and we decrement it upon + // unregistering the event or processing a packet. When it hits zero, we + // dispose the muxnote. + os_atomic(uintptr_t) dmn_refcount; + + dispatch_unote_ident_t dmn_ident; + int8_t dmn_filter; + enum _dispatch_muxnote_handle_type { + DISPATCH_MUXNOTE_HANDLE_TYPE_INVALID, + DISPATCH_MUXNOTE_HANDLE_TYPE_FILE, + DISPATCH_MUXNOTE_HANDLE_TYPE_PIPE, + DISPATCH_MUXNOTE_HANDLE_TYPE_SOCKET, + } dmn_handle_type; + enum _dispatch_muxnote_events dmn_events; + + // For pipes, this event is used to synchronize the monitoring thread with + // I/O completion port processing. For sockets, this is the event used with + // WSAEventSelect(). + HANDLE dmn_event; + + // Pipe monitoring thread control + HANDLE dmn_thread; + os_atomic(bool) dmn_stop; + + // Socket events registered with WSAEventSelect() + long dmn_network_events; + + // Threadpool wait handle for socket events + PTP_WAIT dmn_threadpool_wait; +} *dispatch_muxnote_t; + +static LIST_HEAD(dispatch_muxnote_bucket_s, dispatch_muxnote_s) + _dispatch_sources[DSL_HASH_SIZE]; + +DISPATCH_ALWAYS_INLINE +static inline struct dispatch_muxnote_bucket_s * +_dispatch_unote_muxnote_bucket(uint32_t ident) +{ + return &_dispatch_sources[DSL_HASH(ident)]; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_muxnote_t +_dispatch_unote_muxnote_find(struct dispatch_muxnote_bucket_s *dmb, + dispatch_unote_ident_t ident, int8_t filter) +{ + dispatch_muxnote_t dmn; + if (filter == EVFILT_WRITE) filter = EVFILT_READ; + LIST_FOREACH(dmn, dmb, dmn_list) { + if (dmn->dmn_ident == ident && dmn->dmn_filter == filter) { + break; + } + } + return dmn; +} + +static dispatch_muxnote_t +_dispatch_muxnote_create(dispatch_unote_t du, + enum _dispatch_muxnote_events events) +{ + dispatch_muxnote_t dmn; + int8_t filter = du._du->du_filter; + HANDLE handle = (HANDLE)du._du->du_ident; + + dmn = _dispatch_calloc(1, sizeof(*dmn)); + if (dmn == NULL) { + DISPATCH_INTERNAL_CRASH(0, "_dispatch_calloc"); + } + os_atomic_store(&dmn->dmn_refcount, 1, relaxed); + dmn->dmn_ident = (dispatch_unote_ident_t)handle; + dmn->dmn_filter = filter; + dmn->dmn_events = events; + LIST_INIT(&dmn->dmn_readers_head); + LIST_INIT(&dmn->dmn_writers_head); + + switch (filter) { + case EVFILT_SIGNAL: + WIN_PORT_ERROR(); + free(dmn); + return NULL; + + case EVFILT_WRITE: + case EVFILT_READ: + switch (GetFileType(handle)) { + case FILE_TYPE_UNKNOWN: + // ensure that an invalid handle was not passed + (void)dispatch_assume(GetLastError() == NO_ERROR); + DISPATCH_INTERNAL_CRASH(0, "unknown handle type"); + + case FILE_TYPE_REMOTE: + DISPATCH_INTERNAL_CRASH(0, "unused handle type"); + + case FILE_TYPE_CHAR: + // The specified file is a character file, typically a + // LPT device or a console. + WIN_PORT_ERROR(); + free(dmn); + return NULL; + + case FILE_TYPE_DISK: + // The specified file is a disk file + dmn->dmn_handle_type = DISPATCH_MUXNOTE_HANDLE_TYPE_FILE; + break; + + case FILE_TYPE_PIPE: + // The specified file is a socket, a named pipe, or an + // anonymous pipe. + dmn->dmn_handle_type = _dispatch_handle_is_socket(handle) + ? DISPATCH_MUXNOTE_HANDLE_TYPE_SOCKET + : DISPATCH_MUXNOTE_HANDLE_TYPE_PIPE; + break; + } + + break; + + default: + DISPATCH_INTERNAL_CRASH(0, "unexpected filter"); + } + + + return dmn; +} + +static void +_dispatch_muxnote_stop(dispatch_muxnote_t dmn) +{ + if (dmn->dmn_thread) { + // Keep trying to cancel ReadFile() until the thread exits + os_atomic_store(&dmn->dmn_stop, true, relaxed); + SetEvent(dmn->dmn_event); + do { + CancelIoEx((HANDLE)dmn->dmn_ident, /* lpOverlapped */ NULL); + } while (WaitForSingleObject(dmn->dmn_thread, 1) == WAIT_TIMEOUT); + CloseHandle(dmn->dmn_thread); + dmn->dmn_thread = NULL; + } + if (dmn->dmn_threadpool_wait) { + SetThreadpoolWait(dmn->dmn_threadpool_wait, NULL, NULL); + WaitForThreadpoolWaitCallbacks(dmn->dmn_threadpool_wait, + /* fCancelPendingCallbacks */ FALSE); + CloseThreadpoolWait(dmn->dmn_threadpool_wait); + dmn->dmn_threadpool_wait = NULL; + } + if (dmn->dmn_handle_type == DISPATCH_MUXNOTE_HANDLE_TYPE_SOCKET) { + WSAEventSelect((SOCKET)dmn->dmn_ident, NULL, 0); + } +} + +static void +_dispatch_muxnote_dispose(dispatch_muxnote_t dmn) +{ + if (dmn->dmn_thread || dmn->dmn_threadpool_wait) { + DISPATCH_INTERNAL_CRASH(0, "disposed a muxnote with an active thread"); + } + if (dmn->dmn_event) { + CloseHandle(dmn->dmn_event); + } + free(dmn); +} + +static void +_dispatch_muxnote_retain(dispatch_muxnote_t dmn) +{ + uintptr_t refcount = os_atomic_inc(&dmn->dmn_refcount, relaxed); + if (refcount == 0) { + DISPATCH_INTERNAL_CRASH(0, "muxnote refcount overflow"); + } + if (refcount == 1) { + DISPATCH_INTERNAL_CRASH(0, "retained a disposing muxnote"); + } +} + +static void +_dispatch_muxnote_release(dispatch_muxnote_t dmn) +{ + uintptr_t refcount = os_atomic_dec(&dmn->dmn_refcount, relaxed); + if (refcount == 0) { + _dispatch_muxnote_dispose(dmn); + } else if (refcount == UINTPTR_MAX) { + DISPATCH_INTERNAL_CRASH(0, "muxnote refcount underflow"); + } +} + +static unsigned WINAPI +_dispatch_pipe_monitor_thread(void *context) +{ + dispatch_muxnote_t dmn = (dispatch_muxnote_t)context; + HANDLE hPipe = (HANDLE)dmn->dmn_ident; + do { + char cBuffer[1]; + DWORD dwNumberOfBytesTransferred; + OVERLAPPED ov = {0}; + BOOL bSuccess = ReadFile(hPipe, cBuffer, /* nNumberOfBytesToRead */ 0, + &dwNumberOfBytesTransferred, &ov); + DWORD dwBytesAvailable; + DWORD dwError = GetLastError(); + if (!bSuccess && dwError == ERROR_IO_PENDING) { + bSuccess = GetOverlappedResult(hPipe, &ov, + &dwNumberOfBytesTransferred, /* bWait */ TRUE); + dwError = GetLastError(); + } + if (bSuccess) { + bSuccess = PeekNamedPipe(hPipe, NULL, 0, NULL, &dwBytesAvailable, + NULL); + dwError = GetLastError(); + } + if (bSuccess) { + if (dwBytesAvailable == 0) { + // This can happen with a zero-byte write. Try again. + continue; + } + } else if (dwError == ERROR_NO_DATA) { + // The pipe is nonblocking. Try again. + Sleep(0); + continue; + } else { + _dispatch_debug("pipe[0x%llx]: GetLastError() returned %lu", + (long long)hPipe, dwError); + if (dwError == ERROR_OPERATION_ABORTED) { + continue; + } + os_atomic_store(&dmn->dmn_stop, true, relaxed); + dwBytesAvailable = 0; + } + + // Make sure the muxnote stays alive until the packet is dequeued + _dispatch_muxnote_retain(dmn); + + // The lpOverlapped parameter does not actually need to point to an + // OVERLAPPED struct. It's really just a pointer to pass back to + // GetQueuedCompletionStatus(). + bSuccess = PostQueuedCompletionStatus(hPort, + dwBytesAvailable, (ULONG_PTR)DISPATCH_PORT_PIPE_HANDLE_READ, + (LPOVERLAPPED)dmn); + if (!bSuccess) { + DISPATCH_INTERNAL_CRASH(GetLastError(), + "PostQueuedCompletionStatus"); + } + + // If data is written into the pipe and not read right away, ReadFile() + // will keep returning immediately and we'll flood the completion port. + // This event lets us synchronize with _dispatch_event_loop_drain() so + // that we only post events when it's ready for them. + WaitForSingleObject(dmn->dmn_event, INFINITE); + } while (!os_atomic_load(&dmn->dmn_stop, relaxed)); + _dispatch_debug("pipe[0x%llx]: monitor exiting", (long long)hPipe); + return 0; +} + +static DWORD +_dispatch_pipe_write_availability(HANDLE hPipe) +{ + IO_STATUS_BLOCK iosb; + FILE_PIPE_LOCAL_INFORMATION fpli; + NTSTATUS status = _dispatch_NtQueryInformationFile(hPipe, &iosb, &fpli, + sizeof(fpli), FilePipeLocalInformation); + if (!NT_SUCCESS(status)) { + return 1; + } + return fpli.WriteQuotaAvailable; +} + +static VOID CALLBACK +_dispatch_socket_callback(PTP_CALLBACK_INSTANCE inst, void *context, + PTP_WAIT pwa, TP_WAIT_RESULT res) +{ + dispatch_muxnote_t dmn = (dispatch_muxnote_t)context; + SOCKET sock = (SOCKET)dmn->dmn_ident; + WSANETWORKEVENTS events; + if (WSAEnumNetworkEvents(sock, (WSAEVENT)dmn->dmn_event, &events) == 0) { + long lNetworkEvents = events.lNetworkEvents; + DWORD dwBytesAvailable = 1; + if (lNetworkEvents & FD_CLOSE) { + dwBytesAvailable = 0; + // Post to all registered read and write handlers + lNetworkEvents |= FD_READ | FD_WRITE; + } else if (lNetworkEvents & FD_READ) { + ioctlsocket(sock, FIONREAD, &dwBytesAvailable); + } + if (lNetworkEvents & FD_READ) { + _dispatch_muxnote_retain(dmn); + if (!PostQueuedCompletionStatus(hPort, dwBytesAvailable, + (ULONG_PTR)DISPATCH_PORT_SOCKET_READ, (LPOVERLAPPED)dmn)) { + DISPATCH_INTERNAL_CRASH(GetLastError(), + "PostQueuedCompletionStatus"); + } + } + if (lNetworkEvents & FD_WRITE) { + _dispatch_muxnote_retain(dmn); + if (!PostQueuedCompletionStatus(hPort, dwBytesAvailable, + (ULONG_PTR)DISPATCH_PORT_SOCKET_WRITE, (LPOVERLAPPED)dmn)) { + DISPATCH_INTERNAL_CRASH(GetLastError(), + "PostQueuedCompletionStatus"); + } + } + } else { + _dispatch_debug("socket[0x%llx]: WSAEnumNetworkEvents() failed (%d)", + (long long)sock, WSAGetLastError()); + } + SetThreadpoolWait(pwa, dmn->dmn_event, /* pftTimeout */ NULL); +} + +static BOOL +_dispatch_io_trigger(dispatch_muxnote_t dmn) +{ + BOOL bSuccess; + long lNetworkEvents; + + switch (dmn->dmn_handle_type) { + case DISPATCH_MUXNOTE_HANDLE_TYPE_INVALID: + DISPATCH_INTERNAL_CRASH(0, "invalid handle"); + + case DISPATCH_MUXNOTE_HANDLE_TYPE_FILE: + _dispatch_muxnote_retain(dmn); + bSuccess = PostQueuedCompletionStatus(hPort, 0, + (ULONG_PTR)DISPATCH_PORT_FILE_HANDLE, (LPOVERLAPPED)dmn); + if (bSuccess == FALSE) { + DISPATCH_INTERNAL_CRASH(GetLastError(), + "PostQueuedCompletionStatus"); + } + break; + + case DISPATCH_MUXNOTE_HANDLE_TYPE_PIPE: + if ((dmn->dmn_events & DISPATCH_MUXNOTE_EVENT_READ) && + !dmn->dmn_thread) { + dmn->dmn_thread = (HANDLE)_beginthreadex(/* security */ NULL, + /* stack_size */ 1, _dispatch_pipe_monitor_thread, + (void *)dmn, /* initflag */ 0, /* thrdaddr */ NULL); + if (!dmn->dmn_thread) { + DISPATCH_INTERNAL_CRASH(errno, "_beginthread"); + } + dmn->dmn_event = CreateEventW(NULL, /* bManualReset */ FALSE, + /* bInitialState */ FALSE, NULL); + if (!dmn->dmn_event) { + DISPATCH_INTERNAL_CRASH(GetLastError(), "CreateEventW"); + } + } + if (dmn->dmn_events & DISPATCH_MUXNOTE_EVENT_WRITE) { + _dispatch_muxnote_retain(dmn); + DWORD available = + _dispatch_pipe_write_availability((HANDLE)dmn->dmn_ident); + bSuccess = PostQueuedCompletionStatus(hPort, available, + (ULONG_PTR)DISPATCH_PORT_PIPE_HANDLE_WRITE, + (LPOVERLAPPED)dmn); + if (bSuccess == FALSE) { + DISPATCH_INTERNAL_CRASH(GetLastError(), + "PostQueuedCompletionStatus"); + } + } + break; + + case DISPATCH_MUXNOTE_HANDLE_TYPE_SOCKET: + if (!dmn->dmn_event) { + dmn->dmn_event = CreateEventW(NULL, /* bManualReset */ FALSE, + /* bInitialState */ FALSE, NULL); + if (!dmn->dmn_event) { + DISPATCH_INTERNAL_CRASH(GetLastError(), "CreateEventW"); + } + } + if (!dmn->dmn_threadpool_wait) { + dmn->dmn_threadpool_wait = CreateThreadpoolWait( + _dispatch_socket_callback, dmn, + /* PTP_CALLBACK_ENVIRON */ NULL); + if (!dmn->dmn_threadpool_wait) { + DISPATCH_INTERNAL_CRASH(GetLastError(), "CreateThreadpoolWait"); + } + SetThreadpoolWait(dmn->dmn_threadpool_wait, dmn->dmn_event, + /* pftTimeout */ NULL); + } + lNetworkEvents = FD_CLOSE; + if (dmn->dmn_events & DISPATCH_MUXNOTE_EVENT_READ) { + lNetworkEvents |= FD_READ; + } + if (dmn->dmn_events & DISPATCH_MUXNOTE_EVENT_WRITE) { + lNetworkEvents |= FD_WRITE; + } + if (dmn->dmn_network_events != lNetworkEvents) { + if (WSAEventSelect((SOCKET)dmn->dmn_ident, (WSAEVENT)dmn->dmn_event, + lNetworkEvents) != 0) { + DISPATCH_INTERNAL_CRASH(WSAGetLastError(), "WSAEventSelect"); + } + dmn->dmn_network_events = lNetworkEvents; + } + if (dmn->dmn_events & DISPATCH_MUXNOTE_EVENT_WRITE) { + // FD_WRITE is edge-triggered, not level-triggered, so it will only + // be signaled if the socket becomes writable after a send() fails + // with WSAEWOULDBLOCK. We can work around this by performing a + // zero-byte send(). If the socket is writable, the send() will + // succeed and we can immediately post a packet, and if it isn't, it + // will fail with WSAEWOULDBLOCK and WSAEventSelect() will report + // the next time it becomes available. + if (send((SOCKET)dmn->dmn_ident, "", 0, 0) == 0) { + _dispatch_muxnote_retain(dmn); + bSuccess = PostQueuedCompletionStatus(hPort, 1, + (ULONG_PTR)DISPATCH_PORT_SOCKET_WRITE, + (LPOVERLAPPED)dmn); + if (bSuccess == FALSE) { + DISPATCH_INTERNAL_CRASH(GetLastError(), + "PostQueuedCompletionStatus"); + } + } + } + break; + } + + return TRUE; +} + +DISPATCH_ALWAYS_INLINE +static inline enum _dispatch_muxnote_events +_dispatch_unote_required_events(dispatch_unote_t du) +{ + switch (du._du->du_filter) { + case DISPATCH_EVFILT_CUSTOM_ADD: + case DISPATCH_EVFILT_CUSTOM_OR: + case DISPATCH_EVFILT_CUSTOM_REPLACE: + return 0; + case EVFILT_WRITE: + return DISPATCH_MUXNOTE_EVENT_WRITE; + default: + return DISPATCH_MUXNOTE_EVENT_READ; + } +} + bool -_dispatch_unote_register(dispatch_unote_t du DISPATCH_UNUSED, - dispatch_wlh_t wlh DISPATCH_UNUSED, - dispatch_priority_t pri DISPATCH_UNUSED) +_dispatch_unote_register_muxed(dispatch_unote_t du) { - WIN_PORT_ERROR(); - return false; + struct dispatch_muxnote_bucket_s *dmb; + dispatch_muxnote_t dmn; + enum _dispatch_muxnote_events events; + + events = _dispatch_unote_required_events(du); + + dmb = _dispatch_unote_muxnote_bucket(du._du->du_ident); + dmn = _dispatch_unote_muxnote_find(dmb, du._du->du_ident, + du._du->du_filter); + if (dmn) { + WIN_PORT_ERROR(); + DISPATCH_INTERNAL_CRASH(0, "muxnote updating is not supported"); + } else { + dmn = _dispatch_muxnote_create(du, events); + if (!dmn) { + return false; + } + if (_dispatch_io_trigger(dmn) == FALSE) { + _dispatch_muxnote_release(dmn); + return false; + } + LIST_INSERT_HEAD(dmb, dmn, dmn_list); + } + + dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du); + switch (dmn->dmn_handle_type) { + case DISPATCH_MUXNOTE_HANDLE_TYPE_INVALID: + DISPATCH_INTERNAL_CRASH(0, "invalid handle"); + + case DISPATCH_MUXNOTE_HANDLE_TYPE_FILE: + case DISPATCH_MUXNOTE_HANDLE_TYPE_PIPE: + case DISPATCH_MUXNOTE_HANDLE_TYPE_SOCKET: + if (events & DISPATCH_MUXNOTE_EVENT_READ) { + LIST_INSERT_HEAD(&dmn->dmn_readers_head, dul, du_link); + } else if (events & DISPATCH_MUXNOTE_EVENT_WRITE) { + LIST_INSERT_HEAD(&dmn->dmn_writers_head, dul, du_link); + } + break; + } + + dul->du_muxnote = dmn; + _dispatch_unote_state_set(du, DISPATCH_WLH_ANON, DU_STATE_ARMED); + + return true; } void -_dispatch_unote_resume(dispatch_unote_t du DISPATCH_UNUSED) +_dispatch_unote_resume_muxed(dispatch_unote_t du) { - WIN_PORT_ERROR(); + dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du); + dispatch_muxnote_t dmn = dul->du_muxnote; + dispatch_assert(_dispatch_unote_registered(du)); + _dispatch_io_trigger(dmn); } bool -_dispatch_unote_unregister(dispatch_unote_t du DISPATCH_UNUSED, - uint32_t flags DISPATCH_UNUSED) +_dispatch_unote_unregister_muxed(dispatch_unote_t du) { - WIN_PORT_ERROR(); - return false; + dispatch_unote_linkage_t dul = _dispatch_unote_get_linkage(du); + dispatch_muxnote_t dmn = dul->du_muxnote; + + switch (dmn->dmn_handle_type) { + case DISPATCH_MUXNOTE_HANDLE_TYPE_INVALID: + DISPATCH_INTERNAL_CRASH(0, "invalid handle"); + + case DISPATCH_MUXNOTE_HANDLE_TYPE_FILE: + case DISPATCH_MUXNOTE_HANDLE_TYPE_PIPE: + case DISPATCH_MUXNOTE_HANDLE_TYPE_SOCKET: + LIST_REMOVE(dul, du_link); + _LIST_TRASH_ENTRY(dul, du_link); + break; + } + dul->du_muxnote = NULL; + + LIST_REMOVE(dmn, dmn_list); + _dispatch_muxnote_stop(dmn); + _dispatch_muxnote_release(dmn); + + _dispatch_unote_state_set(du, DU_STATE_UNREGISTERED); + return true; +} + +static void +_dispatch_event_merge_file_handle(dispatch_muxnote_t dmn) +{ + dispatch_unote_linkage_t dul, dul_next; + LIST_FOREACH_SAFE(dul, &dmn->dmn_readers_head, du_link, dul_next) { + dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul); + // consumed by dux_merge_evt() + _dispatch_retain_unote_owner(du); + dispatch_assert(dux_needs_rearm(du._du)); + _dispatch_unote_state_clear_bit(du, DU_STATE_ARMED); + os_atomic_store2o(du._dr, ds_pending_data, ~1, relaxed); + dux_merge_evt(du._du, EV_ADD | EV_ENABLE | EV_DISPATCH, 1, 0); + } + LIST_FOREACH_SAFE(dul, &dmn->dmn_writers_head, du_link, dul_next) { + dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul); + // consumed by dux_merge_evt() + _dispatch_retain_unote_owner(du); + dispatch_assert(dux_needs_rearm(du._du)); + _dispatch_unote_state_clear_bit(du, DU_STATE_ARMED); + os_atomic_store2o(du._dr, ds_pending_data, ~1, relaxed); + dux_merge_evt(du._du, EV_ADD | EV_ENABLE | EV_DISPATCH, 1, 0); + } + // Retained when posting the completion packet + _dispatch_muxnote_release(dmn); +} + +static void +_dispatch_event_merge_pipe_handle_read(dispatch_muxnote_t dmn, + DWORD dwBytesAvailable) +{ + dispatch_unote_linkage_t dul, dul_next; + LIST_FOREACH_SAFE(dul, &dmn->dmn_readers_head, du_link, dul_next) { + dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul); + // consumed by dux_merge_evt() + _dispatch_retain_unote_owner(du); + dispatch_unote_state_t du_state = _dispatch_unote_state(du); + du_state &= ~DU_STATE_ARMED; + uintptr_t data = dwBytesAvailable; + uint32_t flags; + if (dwBytesAvailable > 0) { + flags = EV_ADD | EV_ENABLE | EV_DISPATCH; + } else { + du_state |= DU_STATE_NEEDS_DELETE; + flags = EV_DELETE | EV_DISPATCH; + } + _dispatch_unote_state_set(du, du_state); + os_atomic_store2o(du._dr, ds_pending_data, ~data, relaxed); + dux_merge_evt(du._du, flags, data, 0); + } + SetEvent(dmn->dmn_event); + // Retained when posting the completion packet + _dispatch_muxnote_release(dmn); +} + +static void +_dispatch_event_merge_pipe_handle_write(dispatch_muxnote_t dmn, + DWORD dwBytesAvailable) +{ + dispatch_unote_linkage_t dul, dul_next; + LIST_FOREACH_SAFE(dul, &dmn->dmn_writers_head, du_link, dul_next) { + dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul); + // consumed by dux_merge_evt() + _dispatch_retain_unote_owner(du); + _dispatch_unote_state_clear_bit(du, DU_STATE_ARMED); + uintptr_t data = dwBytesAvailable; + if (dwBytesAvailable > 0) { + os_atomic_store2o(du._dr, ds_pending_data, ~data, relaxed); + } else { + os_atomic_store2o(du._dr, ds_pending_data, 0, relaxed); + } + dux_merge_evt(du._du, EV_ADD | EV_ENABLE | EV_DISPATCH, data, 0); + } + // Retained when posting the completion packet + _dispatch_muxnote_release(dmn); +} + +static void +_dispatch_event_merge_socket(dispatch_unote_t du, DWORD dwBytesAvailable) +{ + // consumed by dux_merge_evt() + _dispatch_retain_unote_owner(du); + dispatch_unote_state_t du_state = _dispatch_unote_state(du); + du_state &= ~DU_STATE_ARMED; + uintptr_t data = dwBytesAvailable; + uint32_t flags; + if (dwBytesAvailable > 0) { + flags = EV_ADD | EV_ENABLE | EV_DISPATCH; + } else { + du_state |= DU_STATE_NEEDS_DELETE; + flags = EV_DELETE | EV_DISPATCH; + } + _dispatch_unote_state_set(du, du_state); + os_atomic_store2o(du._dr, ds_pending_data, ~data, relaxed); + dux_merge_evt(du._du, flags, data, 0); +} + +static void +_dispatch_event_merge_socket_read(dispatch_muxnote_t dmn, + DWORD dwBytesAvailable) +{ + dispatch_unote_linkage_t dul, dul_next; + LIST_FOREACH_SAFE(dul, &dmn->dmn_readers_head, du_link, dul_next) { + dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul); + _dispatch_event_merge_socket(du, dwBytesAvailable); + } + // Retained when posting the completion packet + _dispatch_muxnote_release(dmn); +} + +static void +_dispatch_event_merge_socket_write(dispatch_muxnote_t dmn, + DWORD dwBytesAvailable) +{ + dispatch_unote_linkage_t dul, dul_next; + LIST_FOREACH_SAFE(dul, &dmn->dmn_writers_head, du_link, dul_next) { + dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul); + _dispatch_event_merge_socket(du, dwBytesAvailable); + } + // Retained when posting the completion packet + _dispatch_muxnote_release(dmn); } #pragma mark timers +typedef struct _dispatch_windows_timeout_s { + PTP_TIMER pTimer; + enum _dispatch_windows_port ullIdent; + bool bArmed; +} *dispatch_windows_timeout_t; + +#define DISPATCH_WINDOWS_TIMEOUT_INITIALIZER(clock) \ + [DISPATCH_CLOCK_##clock] = { \ + .pTimer = NULL, \ + .ullIdent = DISPATCH_PORT_TIMER_CLOCK_##clock, \ + .bArmed = FALSE, \ + } + +static struct _dispatch_windows_timeout_s _dispatch_windows_timeout[] = { + DISPATCH_WINDOWS_TIMEOUT_INITIALIZER(WALL), + DISPATCH_WINDOWS_TIMEOUT_INITIALIZER(UPTIME), + DISPATCH_WINDOWS_TIMEOUT_INITIALIZER(MONOTONIC), +}; + +static void +_dispatch_event_merge_timer(dispatch_clock_t clock) +{ + uint32_t tidx = DISPATCH_TIMER_INDEX(clock, 0); + + _dispatch_windows_timeout[clock].bArmed = FALSE; + + _dispatch_timers_heap_dirty(_dispatch_timers_heap, tidx); + _dispatch_timers_heap[tidx].dth_needs_program = true; + _dispatch_timers_heap[tidx].dth_armed = false; +} + +static void CALLBACK +_dispatch_timer_callback(PTP_CALLBACK_INSTANCE Instance, PVOID Context, + PTP_TIMER Timer) +{ + BOOL bSuccess; + + bSuccess = PostQueuedCompletionStatus(hPort, 0, (ULONG_PTR)Context, + NULL); + if (bSuccess == FALSE) { + DISPATCH_INTERNAL_CRASH(GetLastError(), + "PostQueuedCompletionStatus"); + } +} + void -_dispatch_event_loop_timer_arm(uint32_t tidx DISPATCH_UNUSED, - dispatch_timer_delay_s range DISPATCH_UNUSED, - dispatch_clock_now_cache_t nows DISPATCH_UNUSED) +_dispatch_event_loop_timer_arm(dispatch_timer_heap_t dth DISPATCH_UNUSED, + uint32_t tidx, dispatch_timer_delay_s range, + dispatch_clock_now_cache_t nows) { - WIN_PORT_ERROR(); + dispatch_windows_timeout_t timer; + FILETIME ftDueTime; + LARGE_INTEGER liTime; + + switch (DISPATCH_TIMER_CLOCK(tidx)) { + case DISPATCH_CLOCK_WALL: + timer = &_dispatch_windows_timeout[DISPATCH_CLOCK_WALL]; + liTime.QuadPart = range.delay + + _dispatch_time_now_cached(DISPATCH_TIMER_CLOCK(tidx), nows); + break; + + case DISPATCH_CLOCK_UPTIME: + case DISPATCH_CLOCK_MONOTONIC: + timer = &_dispatch_windows_timeout[DISPATCH_TIMER_CLOCK(tidx)]; + liTime.QuadPart = -((range.delay + 99) / 100); + break; + } + + if (timer->pTimer == NULL) { + timer->pTimer = CreateThreadpoolTimer(_dispatch_timer_callback, + (LPVOID)timer->ullIdent, NULL); + if (timer->pTimer == NULL) { + DISPATCH_INTERNAL_CRASH(GetLastError(), + "CreateThreadpoolTimer"); + } + } + + ftDueTime.dwHighDateTime = liTime.HighPart; + ftDueTime.dwLowDateTime = liTime.LowPart; + + SetThreadpoolTimer(timer->pTimer, &ftDueTime, /*msPeriod=*/0, + /*msWindowLength=*/0); + timer->bArmed = TRUE; } void -_dispatch_event_loop_timer_delete(uint32_t tidx DISPATCH_UNUSED) +_dispatch_event_loop_timer_delete(dispatch_timer_heap_t dth DISPATCH_UNUSED, + uint32_t tidx) { - WIN_PORT_ERROR(); + dispatch_windows_timeout_t timer; + + switch (DISPATCH_TIMER_CLOCK(tidx)) { + case DISPATCH_CLOCK_WALL: + timer = &_dispatch_windows_timeout[DISPATCH_CLOCK_WALL]; + break; + + case DISPATCH_CLOCK_UPTIME: + case DISPATCH_CLOCK_MONOTONIC: + timer = &_dispatch_windows_timeout[DISPATCH_TIMER_CLOCK(tidx)]; + break; + } + + SetThreadpoolTimer(timer->pTimer, NULL, /*msPeriod=*/0, + /*msWindowLength=*/0); + timer->bArmed = FALSE; } #pragma mark dispatch_loop +static void +_dispatch_windows_port_init(void *context DISPATCH_UNUSED) +{ + hPort = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 1); + if (hPort == NULL) { + DISPATCH_INTERNAL_CRASH(GetLastError(), + "CreateIoCompletionPort"); + } + +#if DISPATCH_USE_MGR_THREAD + _dispatch_trace_item_push(_dispatch_mgr_q.do_targetq, &_dispatch_mgr_q); + dx_push(_dispatch_mgr_q.do_targetq, &_dispatch_mgr_q, 0); +#endif +} + void _dispatch_event_loop_poke(dispatch_wlh_t wlh DISPATCH_UNUSED, uint64_t dq_state DISPATCH_UNUSED, uint32_t flags DISPATCH_UNUSED) { - WIN_PORT_ERROR(); + static dispatch_once_t _dispatch_windows_port_init_pred; + BOOL bSuccess; + + dispatch_once_f(&_dispatch_windows_port_init_pred, NULL, + _dispatch_windows_port_init); + bSuccess = PostQueuedCompletionStatus(hPort, 0, DISPATCH_PORT_POKE, + NULL); + (void)dispatch_assume(bSuccess); } DISPATCH_NOINLINE void -_dispatch_event_loop_drain(uint32_t flags DISPATCH_UNUSED) +_dispatch_event_loop_drain(uint32_t flags) +{ + DWORD dwNumberOfBytesTransferred; + ULONG_PTR ulCompletionKey; + LPOVERLAPPED pOV; + BOOL bSuccess; + + pOV = (LPOVERLAPPED)&pOV; + bSuccess = GetQueuedCompletionStatus(hPort, &dwNumberOfBytesTransferred, + &ulCompletionKey, &pOV, + (flags & KEVENT_FLAG_IMMEDIATE) ? 0 : INFINITE); + while (bSuccess) { + switch (ulCompletionKey) { + case DISPATCH_PORT_POKE: + break; + + case DISPATCH_PORT_TIMER_CLOCK_WALL: + _dispatch_event_merge_timer(DISPATCH_CLOCK_WALL); + break; + + case DISPATCH_PORT_TIMER_CLOCK_UPTIME: + _dispatch_event_merge_timer(DISPATCH_CLOCK_UPTIME); + break; + + case DISPATCH_PORT_TIMER_CLOCK_MONOTONIC: + _dispatch_event_merge_timer(DISPATCH_CLOCK_MONOTONIC); + break; + + case DISPATCH_PORT_FILE_HANDLE: + _dispatch_event_merge_file_handle((dispatch_muxnote_t)pOV); + break; + + case DISPATCH_PORT_PIPE_HANDLE_READ: + _dispatch_event_merge_pipe_handle_read((dispatch_muxnote_t)pOV, + dwNumberOfBytesTransferred); + break; + + case DISPATCH_PORT_PIPE_HANDLE_WRITE: + _dispatch_event_merge_pipe_handle_write((dispatch_muxnote_t)pOV, + dwNumberOfBytesTransferred); + break; + + case DISPATCH_PORT_SOCKET_READ: + _dispatch_event_merge_socket_read((dispatch_muxnote_t)pOV, + dwNumberOfBytesTransferred); + break; + + case DISPATCH_PORT_SOCKET_WRITE: + _dispatch_event_merge_socket_write((dispatch_muxnote_t)pOV, + dwNumberOfBytesTransferred); + break; + + default: + DISPATCH_INTERNAL_CRASH(ulCompletionKey, + "unsupported completion key"); + } + + bSuccess = GetQueuedCompletionStatus(hPort, + &dwNumberOfBytesTransferred, &ulCompletionKey, &pOV, 0); + } + + if (bSuccess == FALSE && pOV != NULL) { + DISPATCH_INTERNAL_CRASH(GetLastError(), + "GetQueuedCompletionStatus"); + } +} + +void +_dispatch_event_loop_cancel_waiter(dispatch_sync_context_t dsc DISPATCH_UNUSED) { WIN_PORT_ERROR(); } @@ -109,9 +927,9 @@ _dispatch_event_loop_assert_not_owned(dispatch_wlh_t wlh) #endif void -_dispatch_event_loop_leave_immediate(dispatch_wlh_t wlh, uint64_t dq_state) +_dispatch_event_loop_leave_immediate(uint64_t dq_state) { - (void)wlh; (void)dq_state; + (void)dq_state; } #endif // DISPATCH_EVENT_BACKEND_WINDOWS diff --git a/src/event/workqueue.c b/src/event/workqueue.c index 326c3d936..28f167517 100644 --- a/src/event/workqueue.c +++ b/src/event/workqueue.c @@ -97,7 +97,6 @@ _dispatch_workq_worker_register(dispatch_queue_global_t root_q) _dispatch_unfair_lock_unlock(&mon->registered_tid_lock); #else (void)root_q; - (void)cls; #endif // HAVE_DISPATCH_WORKQ_MONITORING } @@ -124,7 +123,6 @@ _dispatch_workq_worker_unregister(dispatch_queue_global_t root_q) _dispatch_unfair_lock_unlock(&mon->registered_tid_lock); #else (void)root_q; - (void)cls; #endif // HAVE_DISPATCH_WORKQ_MONITORING } @@ -218,7 +216,7 @@ _dispatch_workq_monitor_pools(void *context DISPATCH_UNUSED) int32_t floor = mon->target_runnable - WORKQ_MAX_TRACKED_TIDS; _dispatch_debug("workq: %s has no runnable workers; poking with floor %d", dq->dq_label, floor); - _dispatch_global_queue_poke(dq, 1, floor); + _dispatch_root_queue_poke(dq, 1, floor); global_runnable += 1; // account for poke in global estimate } else if (mon->num_runnable < mon->target_runnable && global_runnable < global_soft_max) { @@ -231,7 +229,7 @@ _dispatch_workq_monitor_pools(void *context DISPATCH_UNUSED) floor = MAX(floor, floor2); _dispatch_debug("workq: %s under utilization target; poking with floor %d", dq->dq_label, floor); - _dispatch_global_queue_poke(dq, 1, floor); + _dispatch_root_queue_poke(dq, 1, floor); global_runnable += 1; // account for poke in global estimate } } @@ -245,7 +243,7 @@ _dispatch_workq_init_once(void *context DISPATCH_UNUSED) int i, target_runnable = (int)dispatch_hw_config(active_cpus); foreach_qos_bucket_reverse(i) { dispatch_workq_monitor_t mon = &_dispatch_workq_monitors[i]; - mon->dq = _dispatch_get_root_queue(i, false); + mon->dq = _dispatch_get_root_queue(DISPATCH_QOS_FOR_BUCKET(i), false); void *buf = _dispatch_calloc(WORKQ_MAX_TRACKED_TIDS, sizeof(dispatch_tid)); mon->registered_tids = buf; mon->target_runnable = target_runnable; @@ -253,7 +251,7 @@ _dispatch_workq_init_once(void *context DISPATCH_UNUSED) // Create monitoring timer that will periodically run on dispatch_mgr_q dispatch_source_t ds = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, - 0, 0, &_dispatch_mgr_q); + 0, 0, _dispatch_mgr_q._as_dq); dispatch_source_set_timer(ds, dispatch_time(DISPATCH_TIME_NOW, 0), NSEC_PER_SEC, 0); dispatch_source_set_event_handler_f(ds, _dispatch_workq_monitor_pools); diff --git a/src/eventlink.c b/src/eventlink.c new file mode 100644 index 000000000..4a7194b90 --- /dev/null +++ b/src/eventlink.c @@ -0,0 +1,542 @@ +// +// eventlink.c +// libdispatch +// +// Created by Rokhini Prabhu on 12/13/19. +// + +#include "internal.h" +#include + +#if OS_EVENTLINK_USE_MACH_EVENTLINK + +OS_OBJECT_CLASS_DECL(os_eventlink); +#if !USE_OBJC +OS_OBJECT_VTABLE_INSTANCE(os_eventlink, + (void (*)(_os_object_t))_os_eventlink_xref_dispose, + (void (*)(_os_object_t))_os_eventlink_dispose); +#endif // USE_OBJC +#define EVENTLINK_CLASS OS_OBJECT_VTABLE(os_eventlink) + +/* Convenience macros for accessing into the struct os_eventlink_s */ +#define ev_local_port port_pair.pair[0] +#define ev_remote_port port_pair.pair[1] +#define ev_port_pair port_pair.desc + +#pragma mark Internal functions + +void +_os_eventlink_xref_dispose(os_eventlink_t ev) { + return _os_object_release_internal(ev->_as_os_obj); +} + +void +_os_eventlink_dispose(os_eventlink_t ev) { + if (ev->ev_state & OS_EVENTLINK_LABEL_NEEDS_FREE) { + free((void *) ev->name); + } + + if (MACH_PORT_VALID(ev->ev_local_port)) { + mach_port_deallocate(mach_task_self(), ev->ev_local_port); + } + if (MACH_PORT_VALID(ev->ev_remote_port)) { + mach_port_deallocate(mach_task_self(), ev->ev_remote_port); + } +} + +static inline os_eventlink_t +_os_eventlink_create_internal(const char *name) +{ + os_eventlink_t ev = NULL; + ev = (os_eventlink_t) _os_object_alloc(EVENTLINK_CLASS, + sizeof(struct os_eventlink_s)); + if (ev == NULL) { + errno = ENOMEM; + return NULL; + } + + if (name) { + const char *tmp = _dispatch_strdup_if_mutable(name); + if (tmp != name) { + ev->ev_state |= OS_EVENTLINK_LABEL_NEEDS_FREE; + } + ev->name = tmp; + } + + return ev; +} + +static inline int +_mach_error_to_errno(kern_return_t kr) +{ + int ret = 0; + + switch (kr) { + case KERN_NAME_EXISTS: + ret = EALREADY; + break; + case KERN_INVALID_ARGUMENT: + ret = EINVAL; + break; + case KERN_OPERATION_TIMED_OUT: + ret = ETIMEDOUT; + break; + case KERN_INVALID_NAME: + /* This is most likely due to waiting on a cancelled eventlink but also + * possible to hit this if there is a bug and a double free of the port. */ + case KERN_TERMINATED: /* Other side died */ + ret = ECANCELED; + break; + case KERN_ABORTED: + ret = ECONNABORTED; + break; + case KERN_SUCCESS: + ret = 0; + break; + default: + return -1; + } + + errno = ret; + return ret; +} + +static uint64_t +_os_clockid_normalize_to_machabs(os_clockid_t inclock, uint64_t intimeout) +{ + uint64_t timeout = 0; + + switch (inclock) { + case OS_CLOCK_MACH_ABSOLUTE_TIME: + timeout = intimeout; + break; + } + + return timeout; +} + +static int +os_eventlink_wait_until_internal(os_eventlink_t ev, os_clockid_t clock, + uint64_t deadline, uint64_t *signals_consumed_out) +{ + int ret = 0; + os_assert(clock == OS_CLOCK_MACH_ABSOLUTE_TIME); + + // These checks are racy but allows us to shortcircuit in userspace + if (_os_eventlink_inactive(ev->ev_local_port)) { + errno = ret = EINVAL; + return ret; + } + if (_os_eventlink_is_cancelled(ev->ev_state)) { + errno = ret = ECANCELED; + return ret; + } + + kern_return_t kr = KERN_SUCCESS; + uint64_t count_to_exceed = ev->local_count; + + kr = mach_eventlink_wait_until(ev->ev_local_port, &ev->local_count, + MELSW_OPTION_NONE, KERN_CLOCK_MACH_ABSOLUTE_TIME, deadline); + if (kr == KERN_SUCCESS && (signals_consumed_out != NULL)) { + *signals_consumed_out = ev->local_count - count_to_exceed; + } else if (kr == KERN_INVALID_NAME) { + /* This means that the eventlink got cancelled after the cancel check + * above but before we waited --> assert that that is indeed the case */ + os_assert(_os_eventlink_is_cancelled(ev->ev_state)); + } + + return _mach_error_to_errno(kr); +} + +static int +os_eventlink_signal_and_wait_until_internal(os_eventlink_t ev, os_clockid_t clock, + uint64_t deadline, uint64_t * _Nullable signals_consumed_out) +{ + int ret = 0; + kern_return_t kr = KERN_SUCCESS; + os_assert(clock == OS_CLOCK_MACH_ABSOLUTE_TIME); + + // These checks are racy but allows us to shortcircuit in userspace + if (_os_eventlink_inactive(ev->ev_local_port)) { + errno = ret = EINVAL; + return ret; + } + if (_os_eventlink_is_cancelled(ev->ev_state)) { + errno = ret = ECANCELED; + return ret; + } + + uint64_t count_to_exceed = ev->local_count; + kr = mach_eventlink_signal_wait_until(ev->ev_local_port, &ev->local_count, 0, + MELSW_OPTION_NONE, KERN_CLOCK_MACH_ABSOLUTE_TIME, deadline); + + if (kr == KERN_SUCCESS && (signals_consumed_out != NULL)) { + *signals_consumed_out = ev->local_count - count_to_exceed; + } else if (kr == KERN_INVALID_NAME) { + /* This means that the eventlink got cancelled after the cancel check + * above but before we signal and waited --> assert that that is indeed + * the case */ + os_assert(_os_eventlink_is_cancelled(ev->ev_state)); + } + + return _mach_error_to_errno(kr); +} + + +#pragma mark Private functions + +os_eventlink_t +os_eventlink_create(const char *name) +{ + return _os_eventlink_create_internal(name); +} + +int +os_eventlink_activate(os_eventlink_t ev) +{ + int ret = 0; + + // These checks are racy but allow us to shortcircuit before we make the syscall + if (MACH_PORT_VALID(ev->ev_local_port)) { + return ret; + } + + if (_os_eventlink_is_cancelled(ev->ev_state)) { + errno = ret = ECANCELED; + return ret; + } + + struct os_eventlink_s tmp_ev; + bzero(&tmp_ev, sizeof(tmp_ev)); + + kern_return_t kr = mach_eventlink_create(mach_task_self(), MELC_OPTION_NO_COPYIN, &tmp_ev.ev_local_port); + if (kr == KERN_SUCCESS) { + // Only atomically store the new ports if we have + // EVENTLINK_INACTIVE_PORT there. The only reason this would fail is + // cause it was concurrently activated. + uint64_t dummy; + bool success = os_atomic_cmpxchgv(&ev->ev_port_pair, EVENTLINK_INACTIVE_PORT, tmp_ev.ev_port_pair, &dummy, relaxed); + if (!success) { + // tmp_ev still has valid ports that need to be released + if (MACH_PORT_VALID(tmp_ev.ev_local_port)) { + mach_port_deallocate(mach_task_self(), tmp_ev.ev_local_port); + } + if (MACH_PORT_VALID(tmp_ev.ev_remote_port)) { + mach_port_deallocate(mach_task_self(), tmp_ev.ev_remote_port); + } + return EINVAL; + } + } + + return _mach_error_to_errno(kr); +} + +int +os_eventlink_extract_remote_port(os_eventlink_t ev, mach_port_t *port_out) +{ + int ret = 0; + + // These checks are racy but allows us to shortcircuit and give the right + // errors + if (_os_eventlink_inactive(ev->ev_local_port)) { + errno = ret = EINVAL; + return ret; + } + if (_os_eventlink_is_cancelled(ev->ev_state)) { + errno = ret = ECANCELED; + return ret; + } + + /* We're giving away our +1 to the remote port */ + mach_port_t port = os_atomic_xchg(&ev->ev_remote_port, EVENTLINK_CLEARED_PORT, relaxed); + if (!MACH_PORT_VALID(port)) { + errno = ret = EINVAL; + return ret; + } + *port_out = port; + + return ret; +} + +os_eventlink_t +os_eventlink_create_with_port(const char *name, mach_port_t port) +{ + os_eventlink_t ev = _os_eventlink_create_internal(name); + if (ev == NULL) { + return NULL; + } + /* Take our own +1 on the port */ + kern_return_t kr; + kr = mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_SEND, 1); + os_assert(kr == KERN_SUCCESS); + + os_assert(ev->ev_local_port == EVENTLINK_INACTIVE_PORT); + ev->ev_local_port = port; + return ev; +} + +os_eventlink_t +os_eventlink_create_remote_with_eventlink(const char *name, os_eventlink_t template) +{ + mach_port_t mp; + int ret = os_eventlink_extract_remote_port(template, &mp); + if (ret) { + errno = ret; + return NULL; + } + + os_eventlink_t ev = os_eventlink_create_with_port(name, mp); + + /* os_eventlink_create_with_port doesn't consume the right it was given, we + * should release our reference */ + mach_port_mod_refs(mach_task_self(), mp, MACH_PORT_RIGHT_SEND, -1); + + return ev; +} + +int +os_eventlink_associate(os_eventlink_t ev, os_eventlink_associate_options_t + options) +{ + int ret = 0; + + // These checks are racy but allows us to shortcircuit in userspace + if (_os_eventlink_inactive(ev->ev_local_port)) { + errno = ret = EINVAL; + return ret; + } + if (_os_eventlink_is_cancelled(ev->ev_state)) { + errno = ret = ECANCELED; + return ret; + } + + mach_eventlink_associate_option_t mela_options; + mela_options = (options == OE_ASSOCIATE_ON_WAIT) ? + MELA_OPTION_ASSOCIATE_ON_WAIT : MELA_OPTION_NONE; + mach_port_t thread_port = (options == OE_ASSOCIATE_ON_WAIT) ? MACH_PORT_NULL : _dispatch_thread_port(); + + kern_return_t kr = KERN_SUCCESS; + kr = mach_eventlink_associate(ev->ev_local_port, thread_port, 0, 0, 0, 0, mela_options); + return _mach_error_to_errno(kr); +} + +int +os_eventlink_disassociate(os_eventlink_t ev) +{ + int ret = 0; + + // These checks are racy but allows us to shortcircuit in userspace + if (_os_eventlink_inactive(ev->ev_local_port)) { + errno = ret = EINVAL; + return ret; + } + if (_os_eventlink_is_cancelled(ev->ev_state)) { + /* Don't bother call mach_eventlink_disassociate since the backing + * eventlink object in the kernel will be gone */ + return ret; + } + + /* TODO: Track the associated thread in the eventlink object and error out + * in user space if the thread calling disassociate isn't the same thread. + * The kernel doesn't enforce this */ + kern_return_t kr = KERN_SUCCESS; + kr = mach_eventlink_disassociate(ev->ev_local_port, MELD_OPTION_NONE); + + if (kr == KERN_TERMINATED) { + /* Absorb this error in libdispatch, knowing that the other side died + * first is not helpful here */ + return 0; + } + + return _mach_error_to_errno(kr); +} + + +int +os_eventlink_wait_until(os_eventlink_t ev, os_clockid_t clock, + uint64_t timeout, uint64_t *signals_consumed_out) +{ + uint64_t machabs_timeout = _os_clockid_normalize_to_machabs(clock, timeout); + + /* Convert timeout to deadline */ + return os_eventlink_wait_until_internal(ev, clock, mach_absolute_time() + machabs_timeout, + signals_consumed_out); +} + +int +os_eventlink_wait(os_eventlink_t ev, uint64_t *signals_consumed_out) +{ + /* Passing in deadline = 0 means wait forever */ + return os_eventlink_wait_until_internal(ev, OS_CLOCK_MACH_ABSOLUTE_TIME, 0, + signals_consumed_out); +} + +int +os_eventlink_signal(os_eventlink_t ev) +{ + int ret = 0; + + // This is racy but allows us to shortcircuit in userspace + if (_os_eventlink_inactive(ev->ev_local_port)) { + errno = ret = EINVAL; + return ret; + } + if (_os_eventlink_is_cancelled(ev->ev_state)) { + errno = ret = ECANCELED; + return ret; + } + + kern_return_t kr = KERN_SUCCESS; + kr = mach_eventlink_signal(ev->ev_local_port, 0); + + return _mach_error_to_errno(kr); +} + +int +os_eventlink_signal_and_wait(os_eventlink_t ev, uint64_t *signals_consumed_out) +{ + /* Passing in deadline = 0 means wait forever */ + return os_eventlink_signal_and_wait_until_internal(ev, OS_CLOCK_MACH_ABSOLUTE_TIME, 0, + signals_consumed_out); +} + +int +os_eventlink_signal_and_wait_until(os_eventlink_t ev, os_clockid_t clock, + uint64_t timeout, uint64_t * _Nullable signals_consumed_out) +{ + uint64_t machabs_timeout = _os_clockid_normalize_to_machabs(clock, timeout); + + /* Converts timeout to deadline */ + return os_eventlink_signal_and_wait_until_internal(ev, clock, mach_absolute_time() + machabs_timeout, + signals_consumed_out); +} + +void +os_eventlink_cancel(os_eventlink_t ev) +{ + if (_os_eventlink_is_cancelled(ev->ev_state)) { + return; + } + + os_atomic_or(&ev->ev_state, OS_EVENTLINK_CANCELLED, relaxed); + + + mach_port_t p = ev->ev_local_port; + if (MACH_PORT_VALID(p)) { + /* mach_eventlink_destroy consumes a ref on the ports. We therefore take + * +1 on the local port so that other threads using the ev_local_port have valid + * ports even if it isn't backed by an eventlink object. The last ref of + * the port in the eventlink object will be dropped in xref dispose */ + kern_return_t kr = mach_port_mod_refs(mach_task_self(), p, MACH_PORT_RIGHT_SEND, 1); + os_assert(kr == KERN_SUCCESS); + mach_eventlink_destroy(p); + } + + // If the remote port was valid, then we already called destroy on the + // local port and we don't need to call it again on the remote port. We keep + // the reference we already have on the remote port (if any) and deallocate + // it in xref dispose + +} + +#else /* OS_EVENTLINK_USE_MACH_EVENTLINK */ +#pragma mark Simulator + +void +_os_eventlink_dispose(os_eventlink_t __unused ev) { +} + +os_eventlink_t +os_eventlink_create(const char * __unused name) +{ + return NULL; +} + +int +os_eventlink_activate(os_eventlink_t __unused ev) +{ + return ENOTSUP; +} + +int +os_eventlink_extract_remote_port(os_eventlink_t __unused eventlink, mach_port_t *port_out) +{ + *port_out = MACH_PORT_NULL; + return ENOTSUP; +} + +os_eventlink_t +os_eventlink_create_with_port(const char * __unused name, mach_port_t __unused mach_port) +{ + errno = ENOTSUP; + return NULL; +} + +os_eventlink_t +os_eventlink_create_remote_with_eventlink(const char * __unused name, os_eventlink_t __unused eventlink) +{ + errno = ENOTSUP; + return NULL; +} + +int +os_eventlink_associate(os_eventlink_t __unused eventlink, os_eventlink_associate_options_t __unused options) +{ + int ret = errno = ENOTSUP; + return ret; +} + +int +os_eventlink_disassociate(os_eventlink_t __unused eventlink) +{ + int ret = errno = ENOTSUP; + return ret; +} + +int +os_eventlink_wait(os_eventlink_t __unused eventlink, uint64_t * _Nullable signals_consumed_out) +{ + int ret = errno = ENOTSUP; + *signals_consumed_out = 0; + return ret; +} + +int +os_eventlink_wait_until(os_eventlink_t __unused eventlink, os_clockid_t __unused clock, + uint64_t __unused timeout, uint64_t * _Nullable signals_consumed_out) +{ + int ret = errno = ENOTSUP; + *signals_consumed_out = 0; + return ret; +} + +int +os_eventlink_signal(os_eventlink_t __unused eventlink) +{ + int ret = errno = ENOTSUP; + return ret; +} + +int +os_eventlink_signal_and_wait(os_eventlink_t __unused eventlink, uint64_t * _Nullable signals_consumed_out) +{ + int ret = errno = ENOTSUP; + *signals_consumed_out = 0; + return ret; +} + +int +os_eventlink_signal_and_wait_until(os_eventlink_t __unused eventlink, os_clockid_t __unused clock, + uint64_t __unused timeout, uint64_t * _Nullable signals_consumed_out) +{ + int ret = errno = ENOTSUP; + *signals_consumed_out = 0; + return ret; +} + +void +os_eventlink_cancel(os_eventlink_t __unused ev) +{ +} + +#endif /* OS_EVENTLINK_USE_MACH_EVENTLINK */ diff --git a/src/eventlink_internal.h b/src/eventlink_internal.h new file mode 100644 index 000000000..4c8f0d288 --- /dev/null +++ b/src/eventlink_internal.h @@ -0,0 +1,67 @@ +// +// eventlink_internal.h +// libdispatch +// +// Created by Rokhini Prabhu on 12/13/19. +// + +#ifndef __OS_EVENTLINK_INTERNAL__ +#define __OS_EVENTLINK_INTERNAL__ + +#if OS_EVENTLINK_USE_MACH_EVENTLINK +#include +#endif + +#define OS_EVENTLINK_LABEL_NEEDS_FREE 0x1ull +#define OS_EVENTLINK_CANCELLED 0x2ull + +union eventlink_internal { + mach_port_t pair[2]; + uint64_t desc; +}; + +struct os_eventlink_s { + struct _os_object_s _as_os_obj[0]; + OS_OBJECT_STRUCT_HEADER(eventlink); + + const char *name; + uint64_t ev_state; + + /* Note: We use the union which allows us to write to both local and remote + * port atomically during activate and cancellation APIs. The combination of + * the state of the local_port as well as the ev_state tells us the state of + * the eventlink + * + * local_port = EVENTLINK_INACTIVE_PORT means that it hasn't been created yet. + * local_port = a valid mach port means that it has been created. + * + * If the OS_EVENTLINK_CANCELLED bit is set, that means that the port does + * not point to a valid kernel eventlink object. + * + * The ref of the ports are only dropped when the last external ref is + * dropped. + */ + union eventlink_internal port_pair; + + uint64_t local_count; +}; + +#define EVENTLINK_INACTIVE_PORT ((uint64_t) 0) +#define EVENTLINK_CLEARED_PORT ((uint64_t) 0) + +static inline bool +_os_eventlink_inactive(mach_port_t port) +{ + return port == EVENTLINK_INACTIVE_PORT; +} + +static inline bool +_os_eventlink_is_cancelled(uint64_t ev_state) +{ + return (ev_state & OS_EVENTLINK_CANCELLED) == OS_EVENTLINK_CANCELLED; +} + +void _os_eventlink_xref_dispose(os_eventlink_t ev); +void _os_eventlink_dispose(os_eventlink_t ev); + +#endif /* __OS_EVENTLINK_INTERNAL */ diff --git a/src/firehose/firehose_buffer.c b/src/firehose/firehose_buffer.c index db0db5248..a79053c9d 100644 --- a/src/firehose/firehose_buffer.c +++ b/src/firehose/firehose_buffer.c @@ -18,7 +18,19 @@ * @APPLE_APACHE_LICENSE_HEADER_END@ */ +#include #include // VM_MEMORY_GENEALOGY + +#ifndef __LP64__ +// libdispatch has too many Double-Wide loads for this to be practical +// so just rename everything to the wide variants +#undef os_atomic_load +#define os_atomic_load os_atomic_load_wide + +#undef os_atomic_store +#define os_atomic_store os_atomic_store_wide +#endif + #ifdef KERNEL #define OS_VOUCHER_ACTIVITY_SPI_TYPES 1 @@ -26,8 +38,12 @@ #define __OS_EXPOSE_INTERNALS_INDIRECT__ 1 #define DISPATCH_PURE_C 1 +#ifndef os_likely #define os_likely(x) __builtin_expect(!!(x), 1) +#endif +#ifndef os_unlikely #define os_unlikely(x) __builtin_expect(!!(x), 0) +#endif #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) @@ -74,7 +90,6 @@ static void _dispatch_firehose_gate_wait(dispatch_gate_t l, uint32_t flags); #include #include #include -#include // os/internal/atomic.h #include // #include // #include // @@ -295,7 +310,7 @@ firehose_buffer_update_limits_unlocked(firehose_buffer_t fb) if (old.fbs_atomic_state == new.fbs_atomic_state) { return; } - os_atomic_add2o(&fb->fb_header, fbh_bank.fbb_state.fbs_atomic_state, + os_atomic_add(&fb->fb_header.fbh_bank.fbb_state.fbs_atomic_state, new.fbs_atomic_state - old.fbs_atomic_state, relaxed); } #endif // !KERNEL @@ -511,11 +526,11 @@ firehose_client_merge_updates(firehose_buffer_t fb, bool async_notif, #endif } - if (firehose_atomic_maxv2o(fbh, fbh_bank.fbb_mem_flushed, + if (firehose_atomic_maxv(&fbh->fbh_bank.fbb_mem_flushed, reply.fpr_mem_flushed_pos, &old_flushed_pos, relaxed)) { mem_delta = (uint16_t)(reply.fpr_mem_flushed_pos - old_flushed_pos); } - if (firehose_atomic_maxv2o(fbh, fbh_bank.fbb_io_flushed, + if (firehose_atomic_maxv(&fbh->fbh_bank.fbb_io_flushed, reply.fpr_io_flushed_pos, &old_flushed_pos, relaxed)) { io_delta = (uint16_t)(reply.fpr_io_flushed_pos - old_flushed_pos); } @@ -527,14 +542,14 @@ firehose_client_merge_updates(firehose_buffer_t fb, bool async_notif, if (!mem_delta && !io_delta) { if (state_out) { - state_out->fbs_atomic_state = os_atomic_load2o(fbh, - fbh_bank.fbb_state.fbs_atomic_state, relaxed); + state_out->fbs_atomic_state = os_atomic_load( + &fbh->fbh_bank.fbb_state.fbs_atomic_state, relaxed); } return; } __firehose_critical_region_enter(); - os_atomic_rmw_loop2o(fbh, fbh_ring_tail.frp_atomic_tail, + os_atomic_rmw_loop(&fbh->fbh_ring_tail.frp_atomic_tail, otail.frp_atomic_tail, ntail.frp_atomic_tail, relaxed, { ntail = otail; // overflow handles the generation wraps @@ -544,18 +559,18 @@ firehose_client_merge_updates(firehose_buffer_t fb, bool async_notif, bank_updates = ((uint64_t)mem_delta << FIREHOSE_BANK_SHIFT(0)) | ((uint64_t)io_delta << FIREHOSE_BANK_SHIFT(1)); - state.fbs_atomic_state = os_atomic_add2o(fbh, - fbh_bank.fbb_state.fbs_atomic_state, bank_updates, release); + state.fbs_atomic_state = os_atomic_add( + &fbh->fbh_bank.fbb_state.fbs_atomic_state, bank_updates, release); __firehose_critical_region_leave(); if (state_out) *state_out = state; if (async_notif) { if (io_delta) { - os_atomic_inc2o(fbh, fbh_bank.fbb_io_notifs, relaxed); + os_atomic_add(&fbh->fbh_bank.fbb_io_notifs, 1u, relaxed); } if (mem_delta) { - os_atomic_inc2o(fbh, fbh_bank.fbb_mem_notifs, relaxed); + os_atomic_add(&fbh->fbh_bank.fbb_mem_notifs, 1u, relaxed); } } } @@ -676,8 +691,8 @@ firehose_client_send_push_and_wait(firehose_buffer_t fb, bool for_io, } if (state_out) { - state_out->fbs_atomic_state = os_atomic_load2o(&fb->fb_header, - fbh_bank.fbb_state.fbs_atomic_state, relaxed); + state_out->fbs_atomic_state = os_atomic_load( + &fb->fb_header.fbh_bank.fbb_state.fbs_atomic_state, relaxed); } return; @@ -689,9 +704,9 @@ firehose_client_send_push_and_wait(firehose_buffer_t fb, bool for_io, } if (for_io) { - os_atomic_inc2o(&fb->fb_header, fbh_bank.fbb_io_sync_pushes, relaxed); + os_atomic_inc(&fb->fb_header.fbh_bank.fbb_io_sync_pushes, relaxed); } else { - os_atomic_inc2o(&fb->fb_header, fbh_bank.fbb_mem_sync_pushes, relaxed); + os_atomic_inc(&fb->fb_header.fbh_bank.fbb_mem_sync_pushes, relaxed); } // TODO // @@ -808,7 +823,7 @@ firehose_buffer_chunk_init(firehose_chunk_t fc, stamp_and_len = stamp - fc->fc_timestamp; stamp_and_len |= (uint64_t)flp_size << 48; - os_atomic_store2o(*lft, ft_stamp_and_length, stamp_and_len, relaxed); + os_atomic_store(&(*lft)->ft_stamp_and_length, stamp_and_len, relaxed); (*lft)->ft_thread = thread; // not really meaningful @@ -828,7 +843,7 @@ firehose_buffer_chunk_init(firehose_chunk_t fc, // write the length before making the chunk visible stamp_and_len = ask->stamp - fc->fc_timestamp; stamp_and_len |= (uint64_t)ask->pubsize << 48; - os_atomic_store2o(ft, ft_stamp_and_length, stamp_and_len, relaxed); + os_atomic_store(&ft->ft_stamp_and_length, stamp_and_len, relaxed); ft->ft_thread = thread; @@ -880,7 +895,7 @@ firehose_buffer_stream_chunk_install(firehose_buffer_t fb, dispatch_compiler_barrier(); if (ask->stream == firehose_stream_metadata) { - os_atomic_or2o(fbh, fbh_bank.fbb_metadata_bitmap, 1ULL << ref, + os_atomic_or(&fbh->fbh_bank.fbb_metadata_bitmap, 1ULL << ref, relaxed); } @@ -898,13 +913,13 @@ firehose_buffer_stream_chunk_install(firehose_buffer_t fb, // event needs to be placed at the beginning of the chunk in addition to // the first actual tracepoint. state.fss_atomic_state = - os_atomic_load2o(fbs, fbs_state.fss_atomic_state, relaxed); + os_atomic_load(&fbs->fbs_state.fss_atomic_state, relaxed); if (likely(!state.fss_loss)) { ft = firehose_buffer_chunk_init(fc, ask, privptr, thread, NULL, 0); // release to publish the chunk init - installed = os_atomic_rmw_loop2o(fbs, fbs_state.fss_atomic_state, + installed = os_atomic_rmw_loop(&fbs->fbs_state.fss_atomic_state, state.fss_atomic_state, new_state.fss_atomic_state, release, { if (state.fss_loss) { os_atomic_rmw_loop_give_up(break); @@ -921,14 +936,14 @@ firehose_buffer_stream_chunk_install(firehose_buffer_t fb, uint64_t loss_start, loss_end; // ensure we can see the start stamp - (void)os_atomic_load2o(fbs, fbs_state.fss_atomic_state, acquire); + (void)os_atomic_load(&fbs->fbs_state.fss_atomic_state, acquire); loss_start = fbs->fbs_loss_start; fbs->fbs_loss_start = 0; // reset under fss_gate loss_end = mach_continuous_time(); ft = firehose_buffer_chunk_init(fc, ask, privptr, thread, &lft, loss_start); - os_atomic_rmw_loop2o(fbs, fbs_state.fss_atomic_state, + os_atomic_rmw_loop(&fbs->fbs_state.fss_atomic_state, state.fss_atomic_state, new_state.fss_atomic_state, release, { // no giving up this time! new_state = (firehose_stream_state_u){ @@ -952,19 +967,19 @@ firehose_buffer_stream_chunk_install(firehose_buffer_t fb, } }; // publish the contents of the loss tracepoint - os_atomic_store2o(lft, ft_id.ftid_atomic_value, ftid.ftid_value, + os_atomic_store(&lft->ft_id.ftid_atomic_value, ftid.ftid_value, release); } } else { // the allocator gave up - just clear the allocator and waiter bits and // increment the loss count state.fss_atomic_state = - os_atomic_load2o(fbs, fbs_state.fss_atomic_state, relaxed); + os_atomic_load(&fbs->fbs_state.fss_atomic_state, relaxed); if (!state.fss_timestamped) { fbs->fbs_loss_start = mach_continuous_time(); // release to publish the timestamp - os_atomic_rmw_loop2o(fbs, fbs_state.fss_atomic_state, + os_atomic_rmw_loop(&fbs->fbs_state.fss_atomic_state, state.fss_atomic_state, new_state.fss_atomic_state, release, { new_state = (firehose_stream_state_u){ @@ -975,7 +990,7 @@ firehose_buffer_stream_chunk_install(firehose_buffer_t fb, }; }); } else { - os_atomic_rmw_loop2o(fbs, fbs_state.fss_atomic_state, + os_atomic_rmw_loop(&fbs->fbs_state.fss_atomic_state, state.fss_atomic_state, new_state.fss_atomic_state, relaxed, { new_state = (firehose_stream_state_u){ @@ -1004,9 +1019,9 @@ firehose_buffer_stream_chunk_install(firehose_buffer_t fb, firehose_buffer_update_limits(fb); } - if (unlikely(os_atomic_load2o(fbh, fbh_quarantined_state, relaxed) == + if (unlikely(os_atomic_load(&fbh->fbh_quarantined_state, relaxed) == FBH_QUARANTINE_PENDING)) { - if (os_atomic_cmpxchg2o(fbh, fbh_quarantined_state, + if (os_atomic_cmpxchg(&fbh->fbh_quarantined_state, FBH_QUARANTINE_PENDING, FBH_QUARANTINE_STARTED, relaxed)) { firehose_client_start_quarantine(fb); } @@ -1190,7 +1205,7 @@ firehose_buffer_ring_try_recycle(firehose_buffer_t fb) firehose_chunk_t fc; bool for_io; - os_atomic_rmw_loop2o(&fb->fb_header, fbh_ring_tail.frp_atomic_tail, + os_atomic_rmw_loop(&fb->fb_header.fbh_ring_tail.frp_atomic_tail, old.frp_atomic_tail, pos.frp_atomic_tail, relaxed, { pos = old; if (likely(old.frp_mem_tail != old.frp_mem_flushed)) { @@ -1228,13 +1243,13 @@ firehose_buffer_ring_try_recycle(firehose_buffer_t fb) fc = firehose_buffer_ref_to_chunk(fb, ref); if (!for_io && fc->fc_pos.fcp_stream == firehose_stream_metadata) { - os_atomic_and2o(fb, fb_header.fbh_bank.fbb_metadata_bitmap, + os_atomic_and(&fb->fb_header.fbh_bank.fbb_metadata_bitmap, ~(1ULL << ref), relaxed); } - os_atomic_store2o(fc, fc_pos.fcp_atomic_pos, + os_atomic_store(&fc->fc_pos.fcp_atomic_pos, FIREHOSE_CHUNK_POS_FULL_BIT, relaxed); dispatch_compiler_barrier(); - os_atomic_store(&fbh_ring[tail], gen | 0, relaxed); + os_atomic_store(&fbh_ring[tail], gen, relaxed); return ref; } @@ -1256,7 +1271,7 @@ firehose_buffer_tracepoint_reserve_wait_for_chunks_from_logd(firehose_buffer_t f // first wait for our bank to have space, if needed if (unlikely(!ask->is_bank_ok)) { state.fbs_atomic_state = - os_atomic_load2o(fbb, fbb_state.fbs_atomic_state, relaxed); + os_atomic_load(&fbb->fbb_state.fbs_atomic_state, relaxed); while (!firehose_buffer_bank_try_reserve_slot(fb, for_io, &state)) { if (ask->quarantined) { __FIREHOSE_CLIENT_THROTTLED_DUE_TO_HEAVY_LOGGING__(fb, for_io, @@ -1334,7 +1349,7 @@ firehose_buffer_tracepoint_reserve_slow(firehose_buffer_t fb, #endif // KERNEL state.fbs_atomic_state = - os_atomic_load2o(fbb, fbb_state.fbs_atomic_state, relaxed); + os_atomic_load(&fbb->fbb_state.fbs_atomic_state, relaxed); reserved = firehose_buffer_bank_try_reserve_slot(fb, for_io, &state); #ifndef KERNEL @@ -1415,13 +1430,26 @@ __firehose_buffer_tracepoint_flush(firehose_tracepoint_t ft, return firehose_buffer_tracepoint_flush(kernel_firehose_buffer, ft, ftid); } -void +bool __firehose_merge_updates(firehose_push_reply_t update) { firehose_buffer_t fb = kernel_firehose_buffer; + bool has_more = false; + uint16_t head; + if (likely(fb)) { + firehose_buffer_header_t fbh = &fb->fb_header; firehose_client_merge_updates(fb, true, update, false, NULL); + head = os_atomic_load(&fbh->fbh_ring_io_head, relaxed); + if (head != (uint16_t)os_atomic_load(&fbh->fbh_bank.fbb_io_flushed, relaxed)) { + has_more = true; + } + head = os_atomic_load(&fbh->fbh_ring_mem_head, relaxed); + if (head != (uint16_t)os_atomic_load(&fbh->fbh_bank.fbb_mem_flushed, relaxed)) { + has_more = true; + } } + return has_more; } int diff --git a/src/firehose/firehose_inline_internal.h b/src/firehose/firehose_inline_internal.h index bf64cbc11..ea7632801 100644 --- a/src/firehose/firehose_inline_internal.h +++ b/src/firehose/firehose_inline_internal.h @@ -26,14 +26,14 @@ __typeof__(atomic_load_explicit(_os_atomic_c11_atomic(p), memory_order_relaxed)) #endif -#define firehose_atomic_maxv2o(p, f, v, o, m) \ - os_atomic_rmw_loop2o(p, f, *(o), (v), m, { \ +#define firehose_atomic_maxv(p, v, o, m) \ + os_atomic_rmw_loop(p, *(o), (v), m, { \ if (*(o) >= (v)) os_atomic_rmw_loop_give_up(break); \ }) -#define firehose_atomic_max2o(p, f, v, m) ({ \ - _os_atomic_basetypeof(&(p)->f) _old; \ - firehose_atomic_maxv2o(p, f, v, &_old, m); \ +#define firehose_atomic_max(p, v, m) ({ \ + _os_atomic_basetypeof(p) _old; \ + firehose_atomic_maxv(p, v, &_old, m); \ }) #ifndef KERNEL @@ -169,6 +169,7 @@ firehose_buffer_ref_to_chunk(firehose_buffer_t fb, firehose_chunk_ref_t ref) #ifndef FIREHOSE_SERVER #if DISPATCH_PURE_C +#if OS_ATOMIC_HAS_STARVATION_FREE_RMW || !OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY OS_ALWAYS_INLINE static inline void firehose_buffer_stream_flush(firehose_buffer_t fb, firehose_stream_t stream) @@ -181,7 +182,7 @@ firehose_buffer_stream_flush(firehose_buffer_t fb, firehose_stream_t stream) long result; old_state.fss_atomic_state = - os_atomic_load2o(fbs, fbs_state.fss_atomic_state, relaxed); + os_atomic_load(&fbs->fbs_state.fss_atomic_state, relaxed); ref = old_state.fss_current; if (!ref || ref == FIREHOSE_STREAM_STATE_PRISTINE) { // there is no installed page, nothing to flush, go away @@ -207,7 +208,7 @@ firehose_buffer_stream_flush(firehose_buffer_t fb, firehose_stream_t stream) // allocators know how to handle in the first place new_state = old_state; new_state.fss_current = 0; - (void)os_atomic_cmpxchg2o(fbs, fbs_state.fss_atomic_state, + (void)os_atomic_cmpxchg(&fbs->fbs_state.fss_atomic_state, old_state.fss_atomic_state, new_state.fss_atomic_state, relaxed); } @@ -263,9 +264,9 @@ firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp, long result; firehose_chunk_ref_t ref; - // cannot use os_atomic_rmw_loop2o, _page_try_reserve does a store + // cannot use os_atomic_rmw_loop, _page_try_reserve does a store old_state.fss_atomic_state = - os_atomic_load2o(fbs, fbs_state.fss_atomic_state, relaxed); + os_atomic_load(&fbs->fbs_state.fss_atomic_state, relaxed); for (;;) { new_state = old_state; @@ -298,7 +299,7 @@ firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp, new_state.fss_loss = MIN(old_state.fss_loss + 1, FIREHOSE_LOSS_COUNT_MAX); - success = os_atomic_cmpxchgv2o(fbs, fbs_state.fss_atomic_state, + success = os_atomic_cmpxchgv(&fbs->fbs_state.fss_atomic_state, old_state.fss_atomic_state, new_state.fss_atomic_state, &old_state.fss_atomic_state, relaxed); if (success) { @@ -319,7 +320,7 @@ firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp, waited = true; old_state.fss_atomic_state = - os_atomic_load2o(fbs, fbs_state.fss_atomic_state, relaxed); + os_atomic_load(&fbs->fbs_state.fss_atomic_state, relaxed); #else if (likely(reliable)) { new_state.fss_allocator |= FIREHOSE_GATE_RELIABLE_WAITERS_BIT; @@ -329,8 +330,8 @@ firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp, bool already_equal = (new_state.fss_atomic_state == old_state.fss_atomic_state); - success = already_equal || os_atomic_cmpxchgv2o(fbs, - fbs_state.fss_atomic_state, old_state.fss_atomic_state, + success = already_equal || os_atomic_cmpxchgv( + &fbs->fbs_state.fss_atomic_state, old_state.fss_atomic_state, new_state.fss_atomic_state, &old_state.fss_atomic_state, relaxed); if (success) { @@ -342,8 +343,8 @@ firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp, DLOCK_LOCK_DATA_CONTENTION); waited = true; - old_state.fss_atomic_state = os_atomic_load2o(fbs, - fbs_state.fss_atomic_state, relaxed); + old_state.fss_atomic_state = os_atomic_load( + &fbs->fbs_state.fss_atomic_state, relaxed); } #endif continue; @@ -359,7 +360,7 @@ firehose_buffer_tracepoint_reserve(firehose_buffer_t fb, uint64_t stamp, #else new_state.fss_allocator = _dispatch_lock_value_for_self(); #endif - success = os_atomic_cmpxchgv2o(fbs, fbs_state.fss_atomic_state, + success = os_atomic_cmpxchgv(&fbs->fbs_state.fss_atomic_state, old_state.fss_atomic_state, new_state.fss_atomic_state, &old_state.fss_atomic_state, relaxed); if (likely(success)) { @@ -441,7 +442,7 @@ firehose_buffer_bank_try_reserve_slot(firehose_buffer_t fb, bool for_io, new_state = old_state; new_state.fbs_banks[for_io]--; - success = os_atomic_cmpxchgvw(&fbb->fbb_state.fbs_atomic_state, + success = os_atomic_cmpxchgv(&fbb->fbb_state.fbs_atomic_state, old_state.fbs_atomic_state, new_state.fbs_atomic_state, &old_state.fbs_atomic_state, acquire); } while (unlikely(!success)); @@ -449,6 +450,7 @@ firehose_buffer_bank_try_reserve_slot(firehose_buffer_t fb, bool for_io, *state_in_out = new_state; return true; } +#endif // OS_ATOMIC_HAS_STARVATION_FREE_RMW || !OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY #ifndef KERNEL OS_ALWAYS_INLINE @@ -460,17 +462,18 @@ firehose_buffer_stream_signal_waiting_for_logd(firehose_buffer_t fb, firehose_buffer_stream_t fbs = &fb->fb_header.fbh_stream[stream]; state.fss_atomic_state = - os_atomic_load2o(fbs, fbs_state.fss_atomic_state, relaxed); + os_atomic_load(&fbs->fbs_state.fss_atomic_state, relaxed); if (!state.fss_timestamped) { fbs->fbs_loss_start = mach_continuous_time(); // release to publish the timestamp - os_atomic_rmw_loop2o(fbs, fbs_state.fss_atomic_state, + os_atomic_rmw_loop(&fbs->fbs_state.fss_atomic_state, state.fss_atomic_state, new_state.fss_atomic_state, release, { new_state = (firehose_stream_state_u){ .fss_allocator = (state.fss_allocator & ~FIREHOSE_GATE_UNRELIABLE_WAITERS_BIT), + .fss_current = state.fss_current, .fss_loss = state.fss_loss, .fss_timestamped = true, .fss_waiting_for_logd = true, @@ -478,12 +481,13 @@ firehose_buffer_stream_signal_waiting_for_logd(firehose_buffer_t fb, }; }); } else { - os_atomic_rmw_loop2o(fbs, fbs_state.fss_atomic_state, + os_atomic_rmw_loop(&fbs->fbs_state.fss_atomic_state, state.fss_atomic_state, new_state.fss_atomic_state, relaxed, { new_state = (firehose_stream_state_u){ .fss_allocator = (state.fss_allocator & ~FIREHOSE_GATE_UNRELIABLE_WAITERS_BIT), + .fss_current = state.fss_current, .fss_loss = state.fss_loss, .fss_timestamped = true, .fss_waiting_for_logd = true, @@ -507,7 +511,7 @@ firehose_buffer_clear_bank_flags(firehose_buffer_t fb, unsigned long bits) firehose_buffer_bank_t fbb = &fb->fb_header.fbh_bank; unsigned long orig_flags; - orig_flags = os_atomic_and_orig2o(fbb, fbb_flags, ~bits, relaxed); + orig_flags = os_atomic_and_orig(&fbb->fbb_flags, ~bits, relaxed); if (orig_flags != (orig_flags & ~bits)) { firehose_buffer_update_limits(fb); } @@ -520,7 +524,7 @@ firehose_buffer_set_bank_flags(firehose_buffer_t fb, unsigned long bits) firehose_buffer_bank_t fbb = &fb->fb_header.fbh_bank; unsigned long orig_flags; - orig_flags = os_atomic_or_orig2o(fbb, fbb_flags, bits, relaxed); + orig_flags = os_atomic_or_orig(&fbb->fbb_flags, bits, relaxed); if (orig_flags != (orig_flags | bits)) { firehose_buffer_update_limits(fb); } @@ -531,7 +535,7 @@ static inline void firehose_buffer_bank_relinquish_slot(firehose_buffer_t fb, bool for_io) { firehose_buffer_bank_t fbb = &fb->fb_header.fbh_bank; - os_atomic_add2o(fbb, fbb_state.fbs_atomic_state, FIREHOSE_BANK_INC(for_io), + os_atomic_add(&fbb->fbb_state.fbs_atomic_state, FIREHOSE_BANK_INC(for_io), relaxed); } #endif // !KERNEL diff --git a/src/firehose/firehose_server.c b/src/firehose/firehose_server.c index 1422ba7a9..6fe1a61e4 100644 --- a/src/firehose/firehose_server.c +++ b/src/firehose/firehose_server.c @@ -213,9 +213,9 @@ firehose_client_notify(firehose_client_t fc, mach_port_t reply_port) }; kern_return_t kr; - firehose_atomic_max2o(fc, fc_mem_sent_flushed_pos, + firehose_atomic_max(&fc->fc_mem_sent_flushed_pos, push_reply.fpr_mem_flushed_pos, relaxed); - firehose_atomic_max2o(fc, fc_io_sent_flushed_pos, + firehose_atomic_max(&fc->fc_io_sent_flushed_pos, push_reply.fpr_io_flushed_pos, relaxed); if (!fc->fc_pid) { @@ -495,7 +495,7 @@ firehose_client_finalize(firehose_client_t fc OS_OBJECT_CONSUMED) } fc->fc_entry.tqe_next = DISPATCH_OBJECT_LISTLESS; fc->fc_entry.tqe_prev = DISPATCH_OBJECT_LISTLESS; - _os_object_release_without_xref_dispose(&fc->fc_as_os_object); + _os_object_release_without_xref_dispose(&fc->fc_object_header); } OS_NOINLINE @@ -1370,8 +1370,8 @@ firehose_server_register(mach_port_t server_port OS_UNUSED, * Request a no senders notification for the memory channel. * That should indicate the client going away. */ - dispatch_mach_request_no_senders( - fc->fc_mach_channel[FIREHOSE_BUFFER_PUSHPORT_MEM]); + dispatch_mach_notify_no_senders( + fc->fc_mach_channel[FIREHOSE_BUFFER_PUSHPORT_MEM], true); firehose_client_resume(fc, &fcci); if (fcci.fcci_size) { diff --git a/src/firehose/firehose_server_internal.h b/src/firehose/firehose_server_internal.h index daba772b5..c3ea87982 100644 --- a/src/firehose/firehose_server_internal.h +++ b/src/firehose/firehose_server_internal.h @@ -31,10 +31,7 @@ struct firehose_snapshot_s { }; struct firehose_client_s { - union { - _OS_OBJECT_HEADER(void *os_obj_isa, os_obj_ref_cnt, os_obj_xref_cnt); - struct _os_object_s fc_as_os_object; - }; + struct _os_object_s fc_object_header; TAILQ_ENTRY(firehose_client_s) fc_entry; struct firehose_client_s *volatile fc_next[2]; diff --git a/src/init.c b/src/init.c index abaf55d26..ff2b5b6a0 100644 --- a/src/init.c +++ b/src/init.c @@ -29,6 +29,14 @@ #include "protocolServer.h" #endif +#ifdef __linux__ +// The clang compiler in Ubuntu 18.04 has a bug that causes it to crash +// when compiling _dispatch_bug_kevent_vanished(). As a workaround, use a +// less capable version of this function on Linux until a fixed version +// of the compiler is available. +#define RDAR_49023449 1 +#endif // __linux__ + #pragma mark - #pragma mark dispatch_init @@ -135,6 +143,8 @@ pthread_key_t dispatch_voucher_key; pthread_key_t dispatch_deferred_items_key; #endif // !DISPATCH_USE_DIRECT_TSD && !DISPATCH_USE_THREAD_LOCAL_STORAGE +pthread_key_t _os_workgroup_key; + #if VOUCHER_USE_MACH_VOUCHER dispatch_once_t _voucher_task_mach_voucher_pred; mach_voucher_t _voucher_task_mach_voucher; @@ -157,7 +167,7 @@ bool _dispatch_kevent_workqueue_enabled = 1; DISPATCH_HW_CONFIG(); uint8_t _dispatch_unsafe_fork; -uint8_t _dispatch_mode; +uint8_t _dispatch_mode = DISPATCH_MODE_NO_FAULTS; bool _dispatch_child_of_unsafe_fork; #if DISPATCH_USE_MEMORYPRESSURE_SOURCE bool _dispatch_memory_warn; @@ -361,7 +371,7 @@ unsigned long volatile _dispatch_queue_serial_numbers = dispatch_queue_global_t -dispatch_get_global_queue(long priority, unsigned long flags) +dispatch_get_global_queue(intptr_t priority, uintptr_t flags) { dispatch_assert(countof(_dispatch_root_queues) == DISPATCH_ROOT_QUEUE_COUNT); @@ -431,6 +441,12 @@ _dispatch_queue_attr_to_info(dispatch_queue_attr_t dqa) if (dqa < _dispatch_queue_attrs || dqa >= &_dispatch_queue_attrs[DISPATCH_QUEUE_ATTR_COUNT]) { +#ifndef __APPLE__ + if (memcmp(dqa, &_dispatch_queue_attrs[0], + sizeof(struct dispatch_queue_attr_s)) == 0) { + dqa = (dispatch_queue_attr_t)&_dispatch_queue_attrs[0]; + } else +#endif // __APPLE__ DISPATCH_CLIENT_CRASH(dqa->do_vtable, "Invalid queue attribute"); } @@ -527,8 +543,6 @@ dispatch_queue_attr_make_with_autorelease_frequency(dispatch_queue_attr_t dqa, case DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM: case DISPATCH_AUTORELEASE_FREQUENCY_NEVER: break; - default: - return (dispatch_queue_attr_t)dqa; } dispatch_queue_attr_info_t dqai = _dispatch_queue_attr_to_info(dqa); dqai.dqai_autorelease_frequency = (uint16_t)frequency; @@ -958,6 +972,7 @@ _dispatch_continuation_get_function_symbol(dispatch_continuation_t dc) return dc->dc_func; } +#if HAVE_MACH void _dispatch_bug_kevent_client(const char *msg, const char *filter, const char *operation, int err, uint64_t ident, uint64_t udata, @@ -974,9 +989,11 @@ _dispatch_bug_kevent_client(const char *msg, const char *filter, dc = du._dr->ds_handler[DS_EVENT_HANDLER]; if (dc) func = _dispatch_continuation_get_function_symbol(dc); break; +#if HAVE_MACH case DISPATCH_MACH_CHANNEL_TYPE: func = du._dmrr->dmrr_handler_func; break; +#endif // HAVE_MACH } filter = dux_type(du._du)->dst_kind; } @@ -984,21 +1001,38 @@ _dispatch_bug_kevent_client(const char *msg, const char *filter, if (operation && err) { _dispatch_log_fault("LIBDISPATCH_STRICT: _dispatch_bug_kevent_client", "BUG in libdispatch client: %s %s: \"%s\" - 0x%x " - "{ 0x%llx[%s], ident: %lld / 0x%llx, handler: %p }", + "{ 0x%"PRIx64"[%s], ident: %"PRId64" / 0x%"PRIx64", handler: %p }", msg, operation, strerror(err), err, udata, filter, ident, ident, func); } else if (operation) { _dispatch_log_fault("LIBDISPATCH_STRICT: _dispatch_bug_kevent_client", "BUG in libdispatch client: %s %s" - "{ 0x%llx[%s], ident: %lld / 0x%llx, handler: %p }", + "{ 0x%"PRIx64"[%s], ident: %"PRId64" / 0x%"PRIx64", handler: %p }", msg, operation, udata, filter, ident, ident, func); } else { _dispatch_log_fault("LIBDISPATCH_STRICT: _dispatch_bug_kevent_client", "BUG in libdispatch: %s: \"%s\" - 0x%x" - "{ 0x%llx[%s], ident: %lld / 0x%llx, handler: %p }", + "{ 0x%"PRIx64"[%s], ident: %"PRId64" / 0x%"PRIx64", handler: %p }", msg, strerror(err), err, udata, filter, ident, ident, func); } } +#endif // HAVE_MACH + +#if RDAR_49023449 + +// The clang compiler on Ubuntu18.04 crashes when compiling the full version of +// this function. This reduced version avoids the crash but logs less useful +// information. +void +_dispatch_bug_kevent_vanished(dispatch_unote_t du) +{ + _dispatch_log_fault("LIBDISPATCH_STRICT: _dispatch_bug_kevent_vanished", + "BUG in libdispatch client: %s, monitored resource vanished before " + "the source cancel handler was invoked", + dux_type(du._du)->dst_kind); +} + +#else // RDAR_49023449 void _dispatch_bug_kevent_vanished(dispatch_unote_t du) @@ -1013,19 +1047,27 @@ _dispatch_bug_kevent_vanished(dispatch_unote_t du) dc = du._dr->ds_handler[DS_EVENT_HANDLER]; if (dc) func = _dispatch_continuation_get_function_symbol(dc); break; +#if HAVE_MACH case DISPATCH_MACH_CHANNEL_TYPE: func = du._dmrr->dmrr_handler_func; break; +#endif // MACH } _dispatch_log_fault("LIBDISPATCH_STRICT: _dispatch_bug_kevent_vanished", "BUG in libdispatch client: %s, monitored resource vanished before " "the source cancel handler was invoked " +#if !defined(_WIN32) "{ %p[%s], ident: %d / 0x%x, handler: %p }", +#else // !defined(_WIN32) + "{ %p[%s], ident: %" PRIdPTR " / 0x%" PRIxPTR ", handler: %p }", +#endif // !defined(_WIN32) dux_type(du._du)->dst_kind, dou._dq, dou._dq->dq_label ? dou._dq->dq_label : "", du._du->du_ident, du._du->du_ident, func); } +#endif // RDAR_49023449 + DISPATCH_NOINLINE DISPATCH_WEAK void _dispatch_bug_deprecated(const char *msg) @@ -1087,7 +1129,7 @@ _dispatch_logv_init(void *context DISPATCH_UNUSED) char path[MAX_PATH + 1] = {0}; DWORD dwLength = GetTempPathA(MAX_PATH, path); dispatch_assert(dwLength <= MAX_PATH + 1); - snprintf(&path[dwLength], MAX_PATH - dwLength, "libdispatch.%d.log", + snprintf(&path[dwLength], MAX_PATH - dwLength, "libdispatch.%lu.log", GetCurrentProcessId()); dispatch_logfile = _open(path, O_WRONLY | O_APPEND | O_CREAT, 0666); #else @@ -1111,15 +1153,19 @@ _dispatch_logv_init(void *context DISPATCH_UNUSED) dispatch_log_basetime = _dispatch_uptime(); #endif #if defined(_WIN32) - FILE *pLogFile = _fdopen(dispatch_logfile, "w"); - char szProgramName[MAX_PATH + 1] = {0}; GetModuleFileNameA(NULL, szProgramName, MAX_PATH); - fprintf(pLogFile, "=== log file opened for %s[%lu] at " - "%ld.%06u ===\n", szProgramName, GetCurrentProcessId(), - tv.tv_sec, (int)tv.tv_usec); - fclose(pLogFile); + char szMessage[512]; + int len = snprintf(szMessage, sizeof(szMessage), + "=== log file opened for %s[%lu] at %ld.%06u ===", + szProgramName, GetCurrentProcessId(), tv.tv_sec, + (int)tv.tv_usec); + if (len > 0) { + len = MIN(len, sizeof(szMessage) - 1); + _write(dispatch_logfile, szMessage, len); + _write(dispatch_logfile, "\n", 1); + } #else dprintf(dispatch_logfile, "=== log file opened for %s[%u] at " "%ld.%06u ===\n", getprogname() ?: "", getpid(), @@ -1514,7 +1560,7 @@ _os_object_t _os_object_alloc(const void *cls, size_t size) { if (!cls) cls = &_os_object_vtable; - return _os_object_alloc_realized(cls, size); + return _os_object_alloc_realized((const void * _Nonnull) cls, size); } void diff --git a/src/inline_internal.h b/src/inline_internal.h index f91e2fe7d..0e30a10e2 100644 --- a/src/inline_internal.h +++ b/src/inline_internal.h @@ -759,7 +759,7 @@ DISPATCH_ALWAYS_INLINE static inline dispatch_workloop_t _dispatch_wlh_to_workloop(dispatch_wlh_t wlh) { - if (wlh == DISPATCH_WLH_ANON) { + if (wlh == NULL || wlh == DISPATCH_WLH_ANON) { return NULL; } if (dx_metatype((dispatch_workloop_t)wlh) == _DISPATCH_WORKLOOP_TYPE) { @@ -1012,9 +1012,9 @@ _dq_state_is_enqueued_on_manager(uint64_t dq_state) DISPATCH_ALWAYS_INLINE static inline bool -_dq_state_in_sync_transfer(uint64_t dq_state) +_dq_state_in_uncontended_sync(uint64_t dq_state) { - return dq_state & DISPATCH_QUEUE_SYNC_TRANSFER; + return dq_state & DISPATCH_QUEUE_UNCONTENDED_SYNC; } DISPATCH_ALWAYS_INLINE @@ -1033,6 +1033,18 @@ _dq_state_received_sync_wait(uint64_t dq_state) (dq_state & DISPATCH_QUEUE_RECEIVED_SYNC_WAIT); } +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_needs_ensure_ownership(uint64_t dq_state) +{ + if (_dq_state_is_base_wlh(dq_state) && + _dq_state_in_uncontended_sync(dq_state)) { + return dq_state & (DISPATCH_QUEUE_RECEIVED_SYNC_WAIT | + DISPATCH_QUEUE_ENQUEUED); + } + return false; +} + DISPATCH_ALWAYS_INLINE static inline dispatch_qos_t _dq_state_max_qos(uint64_t dq_state) @@ -1110,6 +1122,20 @@ _dq_state_is_runnable(uint64_t dq_state) DISPATCH_ALWAYS_INLINE static inline bool _dq_state_should_override(uint64_t dq_state) +{ + if (_dq_state_is_suspended(dq_state) || + _dq_state_is_enqueued_on_manager(dq_state)) { + return false; + } + if (_dq_state_is_enqueued_on_target(dq_state)) { + return true; + } + return _dq_state_drain_locked(dq_state); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +_dq_state_should_override_for_waiter(uint64_t dq_state) { if (_dq_state_is_suspended(dq_state) || _dq_state_is_enqueued_on_manager(dq_state)) { @@ -1119,6 +1145,11 @@ _dq_state_should_override(uint64_t dq_state) return true; } if (_dq_state_is_base_wlh(dq_state)) { + // _dq_state_should_override is called only when the enqueued bit + // hasn't changed. For kqworkloop based code, if there's no thread + // request, then we should not try to assign a QoS/kevent override + // at all, because turnstiles are the only thing needed to resolve + // priority inversions. return false; } return _dq_state_drain_locked(dq_state); @@ -1317,7 +1348,11 @@ _dispatch_queue_drain_try_lock_wlh(dispatch_queue_t dq, uint64_t *dq_state) if (unlikely(_dq_state_is_suspended(old_state))) { new_state &= ~DISPATCH_QUEUE_ENQUEUED; } else if (unlikely(_dq_state_drain_locked(old_state))) { - os_atomic_rmw_loop_give_up(break); + if (_dq_state_in_uncontended_sync(old_state)) { + new_state |= DISPATCH_QUEUE_RECEIVED_SYNC_WAIT; + } else { + os_atomic_rmw_loop_give_up(break); + } } else { new_state &= DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK; new_state |= lock_bits; @@ -1326,7 +1361,7 @@ _dispatch_queue_drain_try_lock_wlh(dispatch_queue_t dq, uint64_t *dq_state) if (unlikely(!_dq_state_is_base_wlh(old_state) || !_dq_state_is_enqueued_on_target(old_state) || _dq_state_is_enqueued_on_manager(old_state))) { -#if !__LP64__ +#if DISPATCH_SIZEOF_PTR == 4 old_state >>= 32; #endif DISPATCH_INTERNAL_CRASH(old_state, "Invalid wlh state"); @@ -1356,6 +1391,7 @@ _dispatch_queue_try_acquire_barrier_sync_and_suspend(dispatch_lane_t dq, uint64_t init = DISPATCH_QUEUE_STATE_INIT_VALUE(dq->dq_width); uint64_t value = DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER | _dispatch_lock_value_from_tid(tid) | + DISPATCH_QUEUE_UNCONTENDED_SYNC | (suspend_count * DISPATCH_QUEUE_SUSPEND_INTERVAL); uint64_t old_state, new_state; @@ -1538,6 +1574,18 @@ _dispatch_queue_drain_try_unlock(dispatch_queue_t dq, uint64_t owned, bool done) return true; } +/* + * Clears UNCONTENDED_SYNC and RECEIVED_SYNC_WAIT + */ +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_queue_move_to_contended_sync(dispatch_queue_t dq) +{ + uint64_t clearbits = DISPATCH_QUEUE_RECEIVED_SYNC_WAIT | + DISPATCH_QUEUE_UNCONTENDED_SYNC; + os_atomic_and2o(dq, dq_state, ~clearbits, relaxed); +} + #pragma mark - #pragma mark os_mpsc_queue @@ -2081,7 +2129,7 @@ _dispatch_set_basepri(dispatch_priority_t dq_dbp) _dispatch_thread_setspecific(dispatch_basepri_key, (void*)(uintptr_t)dbp); return old_dbp; #else - (void)dbp; + (void)dq_dbp; return 0; #endif } @@ -2295,6 +2343,19 @@ _dispatch_queue_need_override(dispatch_queue_class_t dq, dispatch_qos_t qos) #define DISPATCH_PRIORITY_PROPAGATE_CURRENT 0x1 #define DISPATCH_PRIORITY_PROPAGATE_FOR_SYNC_IPC 0x2 +DISPATCH_ALWAYS_INLINE +static inline dispatch_qos_t +_dispatch_qos_propagate(dispatch_qos_t qos) +{ +#if HAVE_PTHREAD_WORKQUEUE_QOS + // Cap QOS for propagation at user-initiated + return MIN(qos, DISPATCH_QOS_USER_INITIATED); +#else + (void)qos; + return 0; +#endif +} + DISPATCH_ALWAYS_INLINE static inline pthread_priority_t _dispatch_priority_compute_propagated(pthread_priority_t pp, diff --git a/src/internal.h b/src/internal.h index 41b2e10e6..9bdb9e890 100644 --- a/src/internal.h +++ b/src/internal.h @@ -35,6 +35,7 @@ #define __DISPATCH_BUILDING_DISPATCH__ #define __DISPATCH_INDIRECT__ +#define __OS_WORKGROUP_INDIRECT__ #ifdef __APPLE__ #include @@ -97,6 +98,19 @@ #include #include +#if __has_feature(ptrauth_calls) +#include +#define DISPATCH_VTABLE_ENTRY(op) \ + (* __ptrauth(ptrauth_key_process_independent_code, true, \ + ptrauth_string_discriminator("dispatch." #op)) const op) +#define DISPATCH_FUNCTION_POINTER \ + __ptrauth(ptrauth_key_process_dependent_code, true, \ + ptrauth_string_discriminator("dispatch.handler")) +#else +#define DISPATCH_VTABLE_ENTRY(op) (* const op) +#define DISPATCH_FUNCTION_POINTER +#endif + #define __DISPATCH_HIDE_SYMBOL(sym, version) \ __asm__(".section __TEXT,__const\n\t" \ ".globl $ld$hide$os" #version "$_" #sym "\n\t" \ @@ -203,6 +217,10 @@ upcast(dispatch_object_t dou) #endif // __OBJC__ #include +#include +#include +#include +#include #include #include #include @@ -220,12 +238,16 @@ upcast(dispatch_object_t dou) #include #endif #include "os/object_private.h" +#include "os/eventlink_private.h" +#include "os/workgroup_object_private.h" +#include "os/workgroup_interval_private.h" #include "queue_private.h" #include "channel_private.h" #include "workloop_private.h" #include "source_private.h" #include "mach_private.h" #include "data_private.h" +#include "time_private.h" #include "os/voucher_private.h" #include "os/voucher_activity_private.h" #include "io_private.h" @@ -277,13 +299,14 @@ upcast(dispatch_object_t dou) #if defined(_WIN32) #include #else -#include #include #ifdef __ANDROID__ #include -#else -#include #endif /* __ANDROID__ */ +#if !defined(__linux__) +#include +#include +#endif #include #include #include @@ -314,9 +337,6 @@ upcast(dispatch_object_t dou) #include #include #include -#if defined(_WIN32) -#define _CRT_RAND_S -#endif #include #include #if defined(__unix__) || (defined(__APPLE__) && defined(__MACH__)) @@ -326,6 +346,11 @@ upcast(dispatch_object_t dou) #include #include #endif +#include + +#if __has_include() +#include +#endif /* More #includes at EOF (dependent on the contents of internal.h) ... */ @@ -484,10 +509,12 @@ void _dispatch_bug_mach_client(const char *msg, mach_msg_return_t kr); struct dispatch_unote_class_s; +#if HAVE_MACH DISPATCH_NOINLINE DISPATCH_COLD void _dispatch_bug_kevent_client(const char *msg, const char *filter, const char *operation, int err, uint64_t ident, uint64_t udata, struct dispatch_unote_class_s *du); +#endif // HAVE_MACH DISPATCH_NOINLINE DISPATCH_COLD void _dispatch_bug_kevent_vanished(struct dispatch_unote_class_s *du); @@ -695,8 +722,7 @@ _dispatch_fork_becomes_unsafe(void) #ifndef HAVE_PTHREAD_WORKQUEUE_WORKLOOP #if HAVE_PTHREAD_WORKQUEUE_KEVENT && defined(WORKQ_FEATURE_WORKLOOP) && \ - defined(KEVENT_FLAG_WORKLOOP) && \ - DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101300) + defined(KEVENT_FLAG_WORKLOOP) #define HAVE_PTHREAD_WORKQUEUE_WORKLOOP 1 #else #define HAVE_PTHREAD_WORKQUEUE_WORKLOOP 0 @@ -704,7 +730,7 @@ _dispatch_fork_becomes_unsafe(void) #endif // !defined(HAVE_PTHREAD_WORKQUEUE_WORKLOOP) #ifndef DISPATCH_USE_WORKQUEUE_NARROWING -#if HAVE_PTHREAD_WORKQUEUES && DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101300) +#if HAVE_PTHREAD_WORKQUEUES #define DISPATCH_USE_WORKQUEUE_NARROWING 1 #else #define DISPATCH_USE_WORKQUEUE_NARROWING 0 @@ -797,10 +823,10 @@ extern int malloc_engaged_nano(void); extern bool _dispatch_memory_warn; #endif -#if defined(MACH_SEND_SYNC_OVERRIDE) && defined(MACH_RCV_SYNC_WAIT) && \ - DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101300) && \ - !defined(DISPATCH_USE_MACH_SEND_SYNC_OVERRIDE) -#define DISPATCH_USE_MACH_SEND_SYNC_OVERRIDE 1 +#if defined(MACH_MSG_QOS_LAST) && DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101600) +#define DISPATCH_USE_MACH_MSG_PRIORITY_COMBINED 1 +#else +#define DISPATCH_USE_MACH_MSG_PRIORITY_COMBINED 0 #endif #if defined(F_SETNOSIGPIPE) && defined(F_GETNOSIGPIPE) @@ -818,7 +844,6 @@ extern bool _dispatch_memory_warn; #endif #endif // MACH_SEND_NOIMPORTANCE - #if HAVE_LIBPROC_INTERNAL_H #include #include @@ -989,7 +1014,7 @@ _dispatch_ktrace_impl(uint32_t code, uint64_t a, uint64_t b, #ifndef VOUCHER_USE_PERSONA #if VOUCHER_USE_MACH_VOUCHER && defined(BANK_PERSONA_TOKEN) && \ - !TARGET_OS_SIMULATOR + !TARGET_OS_SIMULATOR && !TARGET_CPU_ARM #define VOUCHER_USE_PERSONA 1 #else #define VOUCHER_USE_PERSONA 0 @@ -1005,6 +1030,14 @@ _dispatch_ktrace_impl(uint32_t code, uint64_t a, uint64_t b, #define VOUCHER_USE_PERSONA 0 #endif // VOUCHER_USE_MACH_VOUCHER +#ifndef OS_EVENTLINK_USE_MACH_EVENTLINK +#if DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101600) && __has_include() +#define OS_EVENTLINK_USE_MACH_EVENTLINK 1 +#else +#define OS_EVENTLINK_USE_MACH_EVENTLINK 0 +#endif +#endif // OS_EVENTLINK_USE_MACH_EVENTLINK + #define _dispatch_hardware_crash() \ __asm__(""); __builtin_trap() // @@ -1070,14 +1103,14 @@ _dispatch_ktrace_impl(uint32_t code, uint64_t a, uint64_t b, dispatch_assert(_length != -1); \ _msg = (char *)malloc((unsigned)_length + 1); \ dispatch_assert(_msg); \ - snprintf(_msg, (unsigned)_length + 1, "%s" fmt, DISPATCH_ASSERTION_FAILED_MESSAGE, ##__VA_ARGS__); \ + (void)snprintf(_msg, (unsigned)_length + 1, "%s" fmt, DISPATCH_ASSERTION_FAILED_MESSAGE, ##__VA_ARGS__); \ _dispatch_assert_crash(_msg); \ free(_msg); \ } while (0) #else #define _dispatch_client_assert_fail(fmt, ...) do { \ char *_msg = NULL; \ - asprintf(&_msg, "%s" fmt, DISPATCH_ASSERTION_FAILED_MESSAGE, \ + (void)asprintf(&_msg, "%s" fmt, DISPATCH_ASSERTION_FAILED_MESSAGE, \ ##__VA_ARGS__); \ _dispatch_assert_crash(_msg); \ free(_msg); \ @@ -1126,6 +1159,8 @@ extern bool _dispatch_kevent_workqueue_enabled; /* #includes dependent on internal.h */ #include "object_internal.h" +#include "workgroup_internal.h" +#include "eventlink_internal.h" #include "semaphore_internal.h" #include "introspection_internal.h" #include "queue_internal.h" diff --git a/src/introspection.c b/src/introspection.c index 27a955be9..f77ddd655 100644 --- a/src/introspection.c +++ b/src/introspection.c @@ -427,7 +427,7 @@ dispatch_introspection_get_queues(dispatch_queue_t start, size_t count, dispatch_queue_introspection_context_t next; if (start) { - next = start->do_finalizer; + next = start->do_introspection_ctxt; } else { next = LIST_FIRST(&_dispatch_introspection.queues); } @@ -616,7 +616,7 @@ _dispatch_object_finalizer(dispatch_object_t dou) switch (dx_metatype(dou._do)) { case _DISPATCH_LANE_TYPE: case _DISPATCH_WORKLOOP_TYPE: - dqic = dou._dq->do_finalizer; + dqic = dou._dq->do_introspection_ctxt; return dqic->dqic_finalizer; default: return dou._do->do_finalizer; @@ -631,7 +631,7 @@ _dispatch_object_set_finalizer(dispatch_object_t dou, switch (dx_metatype(dou._do)) { case _DISPATCH_LANE_TYPE: case _DISPATCH_WORKLOOP_TYPE: - dqic = dou._dq->do_finalizer; + dqic = dou._dq->do_introspection_ctxt; dqic->dqic_finalizer = finalizer; break; default: @@ -656,7 +656,7 @@ _dispatch_introspection_queue_create(dispatch_queue_t dq) LIST_INIT(&dqic->dqic_order_top_head); LIST_INIT(&dqic->dqic_order_bottom_head); } - dq->do_finalizer = dqic; + dq->do_introspection_ctxt = dqic; _dispatch_unfair_lock_lock(&_dispatch_introspection.queues_lock); LIST_INSERT_HEAD(&_dispatch_introspection.queues, dqic, dqic_list); @@ -689,7 +689,7 @@ _dispatch_introspection_queue_dispose_hook(dispatch_queue_t dq) void _dispatch_introspection_queue_dispose(dispatch_queue_t dq) { - dispatch_queue_introspection_context_t dqic = dq->do_finalizer; + dispatch_queue_introspection_context_t dqic = dq->do_introspection_ctxt; DISPATCH_INTROSPECTION_INTERPOSABLE_HOOK_CALLOUT(queue_destroy, dq); if (DISPATCH_INTROSPECTION_HOOK_ENABLED(queue_dispose)) { @@ -983,7 +983,7 @@ _dispatch_introspection_queue_order_dispose( LIST_FOREACH_SAFE(e, &head, dqoe_order_top_list, te) { otherq = e->dqoe_bottom_tq; - o_dqic = otherq->do_finalizer; + o_dqic = otherq->do_introspection_ctxt; _dispatch_unfair_lock_lock(&o_dqic->dqic_order_bottom_head_lock); LIST_REMOVE(e, dqoe_order_bottom_list); _dispatch_unfair_lock_unlock(&o_dqic->dqic_order_bottom_head_lock); @@ -998,7 +998,7 @@ _dispatch_introspection_queue_order_dispose( LIST_FOREACH_SAFE(e, &head, dqoe_order_bottom_list, te) { otherq = e->dqoe_top_tq; - o_dqic = otherq->do_finalizer; + o_dqic = otherq->do_introspection_ctxt; _dispatch_unfair_lock_lock(&o_dqic->dqic_order_top_head_lock); LIST_REMOVE(e, dqoe_order_top_list); _dispatch_unfair_lock_unlock(&o_dqic->dqic_order_top_head_lock); @@ -1070,7 +1070,8 @@ _dispatch_introspection_order_check(dispatch_order_frame_t dof_prev, dispatch_queue_t bottom_q, dispatch_queue_t bottom_tq) { struct dispatch_order_frame_s dof = { .dof_prev = dof_prev }; - dispatch_queue_introspection_context_t btqic = bottom_tq->do_finalizer; + dispatch_queue_introspection_context_t btqic = + bottom_tq->do_introspection_ctxt; // has anyone above bottom_tq ever sync()ed onto top_tq ? _dispatch_unfair_lock_lock(&btqic->dqic_order_top_head_lock); @@ -1099,8 +1100,9 @@ _dispatch_introspection_order_record(dispatch_queue_t top_q) dispatch_queue_t top_tq = _dispatch_queue_bottom_target_queue(top_q); dispatch_queue_t bottom_tq = _dispatch_queue_bottom_target_queue(bottom_q); - dispatch_queue_introspection_context_t ttqic = top_tq->do_finalizer; - dispatch_queue_introspection_context_t btqic = bottom_tq->do_finalizer; + dispatch_queue_introspection_context_t ttqic, btqic; + ttqic = top_tq->do_introspection_ctxt; + btqic = bottom_tq->do_introspection_ctxt; _dispatch_unfair_lock_lock(&ttqic->dqic_order_top_head_lock); LIST_FOREACH(it, &ttqic->dqic_order_top_head, dqoe_order_top_list) { @@ -1187,7 +1189,7 @@ _dispatch_introspection_target_queue_changed(dispatch_queue_t dq) [2] = "a recipient", [3] = "both an initiator and a recipient" }; - dispatch_queue_introspection_context_t dqic = dq->do_finalizer; + dispatch_queue_introspection_context_t dqic = dq-> do_introspection_ctxt; bool as_top = !LIST_EMPTY(&dqic->dqic_order_top_head); bool as_bottom = !LIST_EMPTY(&dqic->dqic_order_top_head); @@ -1200,7 +1202,7 @@ _dispatch_introspection_target_queue_changed(dispatch_queue_t dq) "a dispatch_sync", dq, dq->dq_label ?: "", reasons[(int)as_top + 2 * (int)as_bottom]); _dispatch_unfair_lock_lock(&_dispatch_introspection.queues_lock); - _dispatch_introspection_queue_order_dispose(dq->do_finalizer); + _dispatch_introspection_queue_order_dispose(dq->do_introspection_ctxt); _dispatch_unfair_lock_unlock(&_dispatch_introspection.queues_lock); } } diff --git a/src/io.c b/src/io.c index 0624fffd4..d7e04f299 100644 --- a/src/io.c +++ b/src/io.c @@ -151,8 +151,13 @@ enum { #define _dispatch_io_log(x, ...) #endif // DISPATCH_IO_DEBUG +#if !defined(_WIN32) #define _dispatch_fd_debug(msg, fd, ...) \ _dispatch_io_log("fd[0x%x]: " msg, fd, ##__VA_ARGS__) +#else // !defined(_WIN32) +#define _dispatch_fd_debug(msg, fd, ...) \ + _dispatch_io_log("fd[0x%" PRIx64 "]: " msg, fd, ##__VA_ARGS__) +#endif // !defined(_WIN32) #define _dispatch_op_debug(msg, op, ...) \ _dispatch_io_log("op[%p]: " msg, op, ##__VA_ARGS__) #define _dispatch_io_channel_debug(msg, channel, ...) \ @@ -406,15 +411,29 @@ dispatch_io_create_f(dispatch_io_type_t type, dispatch_fd_t fd, ^(int error){ cleanup_handler(context, error); }); } +#if defined(_WIN32) +#define _is_separator(ch) ((ch) == '/' || (ch) == '\\') +#else +#define _is_separator(ch) ((ch) == '/') +#endif + dispatch_io_t dispatch_io_create_with_path(dispatch_io_type_t type, const char *path, int oflag, mode_t mode, dispatch_queue_t queue, void (^cleanup_handler)(int error)) { - if ((type != DISPATCH_IO_STREAM && type != DISPATCH_IO_RANDOM) || - !(*path == '/')) { + if (type != DISPATCH_IO_STREAM && type != DISPATCH_IO_RANDOM) { return DISPATCH_BAD_INPUT; } +#if defined(_WIN32) + if (PathIsRelativeA(path)) { + return DISPATCH_BAD_INPUT; + } +#else + if (!_is_separator(*path)) { + return DISPATCH_BAD_INPUT; + } +#endif size_t pathlen = strlen(path); dispatch_io_path_data_t path_data = malloc(sizeof(*path_data) + pathlen+1); if (!path_data) { @@ -449,9 +468,15 @@ dispatch_io_create_with_path(dispatch_io_type_t type, const char *path, break; default: if ((path_data->oflag & O_CREAT) && - (*(path_data->path + path_data->pathlen - 1) != '/')) { + !_is_separator(*(path_data->path + path_data->pathlen - 1))) { // Check parent directory - char *c = strrchr(path_data->path, '/'); + char *c = NULL; + for (ssize_t i = (ssize_t)path_data->pathlen - 1; i >= 0; i--) { + if (_is_separator(path_data->path[i])) { + c = &path_data->path[i]; + break; + } + } dispatch_assert(c); *c = 0; int perr; @@ -465,7 +490,11 @@ dispatch_io_create_with_path(dispatch_io_type_t type, const char *path, err = 0; break; ); +#if defined(_WIN32) + *c = '\\'; +#else *c = '/'; +#endif } break; ); @@ -1287,18 +1316,31 @@ _dispatch_fd_entry_guarded_open(dispatch_fd_entry_t fd_entry, const char *path, #if defined(_WIN32) (void)mode; DWORD dwDesiredAccess = 0; - if (oflag & _O_RDWR) - dwDesiredAccess = GENERIC_READ | GENERIC_WRITE; - else if (oflag & _O_RDONLY) + switch (oflag & (_O_RDONLY | _O_WRONLY | _O_RDWR)) { + case _O_RDONLY: dwDesiredAccess = GENERIC_READ; - else if (oflag & _O_WRONLY) + break; + case _O_WRONLY: dwDesiredAccess = GENERIC_WRITE; + break; + case _O_RDWR: + dwDesiredAccess = GENERIC_READ | GENERIC_WRITE; + break; + } DWORD dwCreationDisposition = OPEN_EXISTING; - if (oflag & _O_CREAT) + if (oflag & _O_CREAT) { dwCreationDisposition = OPEN_ALWAYS; - if (oflag & _O_TRUNC) - dwCreationDisposition = CREATE_ALWAYS; - return (dispatch_fd_t)CreateFile(path, dwDesiredAccess, 0, NULL, dwCreationDisposition, 0, NULL); + if (oflag & _O_EXCL) { + dwCreationDisposition = CREATE_NEW; + } else if (oflag & _O_TRUNC) { + dwCreationDisposition = CREATE_ALWAYS; + } + } else if (oflag & _O_TRUNC) { + dwCreationDisposition = TRUNCATE_EXISTING; + } + return (dispatch_fd_t)CreateFile(path, dwDesiredAccess, + FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, NULL, + dwCreationDisposition, 0, NULL); #else return open(path, oflag, mode); #endif @@ -1385,7 +1427,11 @@ _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash) // On fds lock queue dispatch_fd_entry_t fd_entry = _dispatch_fd_entry_create( _dispatch_io_fds_lockq); +#if !defined(_WIN32) _dispatch_fd_entry_debug("create: fd %d", fd_entry, fd); +#else // !defined(_WIN32) + _dispatch_fd_entry_debug("create: fd %"PRId64, fd_entry, fd); +#endif // !defined(_WIN32) fd_entry->fd = fd; LIST_INSERT_HEAD(&_dispatch_io_fds[hash], fd_entry, fd_list); fd_entry->barrier_queue = dispatch_queue_create( @@ -1399,11 +1445,11 @@ _dispatch_fd_entry_create_with_fd(dispatch_fd_t fd, uintptr_t hash) int result = ioctlsocket((SOCKET)fd, (long)FIONBIO, &value); (void)dispatch_assume_zero(result); _dispatch_stream_init(fd_entry, - _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false)); + _dispatch_get_default_queue(false)); } else { dispatch_suspend(fd_entry->barrier_queue); - dispatch_once_f(&_dispatch_io_devs_lockq_pred, NULL, - _dispatch_io_devs_lockq_init); + dispatch_once_f(&_dispatch_io_init_pred, NULL, + _dispatch_io_queues_init); dispatch_async(_dispatch_io_devs_lockq, ^{ _dispatch_disk_init(fd_entry, 0); dispatch_resume(fd_entry->barrier_queue); @@ -2227,9 +2273,8 @@ _dispatch_operation_advise(dispatch_operation_t op, size_t chunk_size) (void)chunk_size; #else if (_dispatch_io_get_error(op, NULL, true)) return; -#if defined(__linux__) || defined(__FreeBSD__) - // linux does not support fcntl (F_RDAVISE) - // define necessary datastructure and use readahead +#if !defined(F_RDADVISE) + // Compatibility struct whose values may be passed to posix_fadvise() struct radvisory { off_t ra_offset; int ra_count; @@ -2254,13 +2299,7 @@ _dispatch_operation_advise(dispatch_operation_t op, size_t chunk_size) } advise.ra_offset = op->advise_offset; op->advise_offset += advise.ra_count; -#if defined(__linux__) - _dispatch_io_syscall_switch(err, - readahead(op->fd_entry->fd, advise.ra_offset, (size_t)advise.ra_count), - case EINVAL: break; // fd does refer to a non-supported filetype - default: (void)dispatch_assume_zero(err); break; - ); -#else +#if defined(F_RDADVISE) _dispatch_io_syscall_switch(err, fcntl(op->fd_entry->fd, F_RDADVISE, &advise), case EFBIG: break; // advised past the end of the file rdar://10415691 @@ -2268,8 +2307,19 @@ _dispatch_operation_advise(dispatch_operation_t op, size_t chunk_size) // TODO: set disk status on error default: (void)dispatch_assume_zero(err); break; ); -#endif -#endif +#elif defined(HAVE_POSIX_FADVISE) + err = posix_fadvise(op->fd_entry->fd, advise.ra_offset, + (off_t)advise.ra_count, POSIX_FADV_WILLNEED); + switch (err) { + case 0: break; + case EINVAL: break; // unsupported advice or file type + case ESPIPE: break; // fd refers to a pipe or FIFO + default: (void)dispatch_assume_zero(err); break; + } +#else +#error "_dispatch_operation_advise not implemented on this platform" +#endif // defined(F_RDADVISE) +#endif // defined(_WIN32) } static int @@ -2312,7 +2362,10 @@ _dispatch_operation_perform(dispatch_operation_t op) } op->buf = _aligned_malloc(op->buf_siz, siInfo.dwPageSize); #else - op->buf = valloc(op->buf_siz); + err = posix_memalign(&op->buf, (size_t)PAGE_SIZE, op->buf_siz); + if (err != 0) { + goto error; + } #endif _dispatch_op_debug("buffer allocated", op); } else if (op->direction == DOP_DIR_WRITE) { @@ -2366,7 +2419,54 @@ _dispatch_operation_perform(dispatch_operation_t op) if (op->direction == DOP_DIR_READ) { if (op->params.type == DISPATCH_IO_STREAM) { #if defined(_WIN32) - ReadFile((HANDLE)op->fd_entry->fd, buf, (DWORD)len, (LPDWORD)&processed, NULL); + HANDLE hFile = (HANDLE)op->fd_entry->fd; + BOOL bSuccess; + if (_dispatch_handle_is_socket(hFile)) { + processed = recv((SOCKET)hFile, buf, len, 0); + if (processed < 0) { + bSuccess = FALSE; + err = WSAGetLastError(); + if (err == WSAEWOULDBLOCK) { + err = EAGAIN; + } + goto error; + } + bSuccess = TRUE; + } else if (GetFileType(hFile) == FILE_TYPE_PIPE) { + OVERLAPPED ovlOverlapped = {}; + DWORD dwTotalBytesAvail; + bSuccess = PeekNamedPipe(hFile, NULL, 0, NULL, + &dwTotalBytesAvail, NULL); + if (bSuccess) { + if (dwTotalBytesAvail == 0) { + err = EAGAIN; + goto error; + } + len = MIN(len, dwTotalBytesAvail); + bSuccess = ReadFile(hFile, buf, (DWORD)len, + (LPDWORD)&processed, &ovlOverlapped); + } + if (!bSuccess) { + DWORD dwError = GetLastError(); + if (dwError == ERROR_IO_PENDING) { + bSuccess = GetOverlappedResult(hFile, &ovlOverlapped, + (LPDWORD)&processed, /* bWait */ TRUE); + dwError = GetLastError(); + } + if (dwError == ERROR_BROKEN_PIPE || + dwError == ERROR_NO_DATA) { + bSuccess = TRUE; + processed = 0; + } + } + } else { + bSuccess = ReadFile(hFile, buf, (DWORD)len, + (LPDWORD)&processed, NULL); + } + if (!bSuccess) { + err = EIO; + goto error; + } #else processed = read(op->fd_entry->fd, buf, len); #endif @@ -2375,7 +2475,8 @@ _dispatch_operation_perform(dispatch_operation_t op) OVERLAPPED ovlOverlapped = {}; ovlOverlapped.Offset = off & 0xffffffff; ovlOverlapped.OffsetHigh = (off >> 32) & 0xffffffff; - ReadFile((HANDLE)op->fd_entry->fd, buf, (DWORD)len, (LPDWORD)&processed, &ovlOverlapped); + ReadFile((HANDLE)op->fd_entry->fd, buf, (DWORD)len, + (LPDWORD)&processed, &ovlOverlapped); #else processed = pread(op->fd_entry->fd, buf, len, off); #endif @@ -2383,7 +2484,62 @@ _dispatch_operation_perform(dispatch_operation_t op) } else if (op->direction == DOP_DIR_WRITE) { if (op->params.type == DISPATCH_IO_STREAM) { #if defined(_WIN32) - WriteFile((HANDLE)op->fd_entry->fd, buf, (DWORD)len, (LPDWORD)&processed, NULL); + HANDLE hFile = (HANDLE)op->fd_entry->fd; + BOOL bSuccess; + if (_dispatch_handle_is_socket(hFile)) { + processed = send((SOCKET)hFile, buf, len, 0); + if (processed < 0) { + bSuccess = FALSE; + err = WSAGetLastError(); + if (err == WSAEWOULDBLOCK) { + err = EAGAIN; + } + goto error; + } + bSuccess = TRUE; + } else if (GetFileType(hFile) == FILE_TYPE_PIPE) { + // Unfortunately there isn't a good way to achieve O_NONBLOCK + // semantics when writing to a pipe. SetNamedPipeHandleState() + // can allow pipes to be switched into a "no wait" mode, but + // that doesn't work on most pipe handles because Windows + // doesn't consistently create pipes with FILE_WRITE_ATTRIBUTES + // access. The best we can do is to try to query the write quota + // and then write as much as we can. + IO_STATUS_BLOCK iosb; + FILE_PIPE_LOCAL_INFORMATION fpli; + NTSTATUS status = _dispatch_NtQueryInformationFile(hFile, &iosb, + &fpli, sizeof(fpli), FilePipeLocalInformation); + if (NT_SUCCESS(status)) { + if (fpli.WriteQuotaAvailable == 0) { + err = EAGAIN; + goto error; + } + len = MIN(len, fpli.WriteQuotaAvailable); + } + OVERLAPPED ovlOverlapped = {}; + bSuccess = WriteFile(hFile, buf, (DWORD)len, + (LPDWORD)&processed, &ovlOverlapped); + if (!bSuccess) { + DWORD dwError = GetLastError(); + if (dwError == ERROR_IO_PENDING) { + bSuccess = GetOverlappedResult(hFile, &ovlOverlapped, + (LPDWORD)&processed, /* bWait */ TRUE); + dwError = GetLastError(); + } + if (dwError == ERROR_BROKEN_PIPE || + dwError == ERROR_NO_DATA) { + bSuccess = TRUE; + processed = 0; + } + } + } else { + bSuccess = WriteFile(hFile, buf, (DWORD)len, + (LPDWORD)&processed, NULL); + } + if (!bSuccess) { + err = EIO; + goto error; + } #else processed = write(op->fd_entry->fd, buf, len); #endif @@ -2392,7 +2548,8 @@ _dispatch_operation_perform(dispatch_operation_t op) OVERLAPPED ovlOverlapped = {}; ovlOverlapped.Offset = off & 0xffffffff; ovlOverlapped.OffsetHigh = (off >> 32) & 0xffffffff; - WriteFile((HANDLE)op->fd_entry->fd, buf, (DWORD)len, (LPDWORD)&processed, &ovlOverlapped); + WriteFile((HANDLE)op->fd_entry->fd, buf, (DWORD)len, + (LPDWORD)&processed, &ovlOverlapped); #else processed = pwrite(op->fd_entry->fd, buf, len, off); #endif @@ -2476,8 +2633,14 @@ _dispatch_operation_deliver_data(dispatch_operation_t op, if (op->direction == DOP_DIR_READ) { if (op->buf_len) { void *buf = op->buf; +#if defined(_WIN32) + // buf is allocated with _aligned_malloc() + data = dispatch_data_create(buf, op->buf_len, NULL, + ^{ _aligned_free(buf); }); +#else data = dispatch_data_create(buf, op->buf_len, NULL, DISPATCH_DATA_DESTRUCTOR_FREE); +#endif op->buf = NULL; op->buf_len = 0; dispatch_data_t d = dispatch_data_create_concat(op->data, data); @@ -2559,11 +2722,11 @@ static size_t _dispatch_io_debug_attr(dispatch_io_t channel, char* buf, size_t bufsiz) { dispatch_queue_t target = channel->do_targetq; - return dsnprintf(buf, bufsiz, "type = %s, fd = 0x%x, %sfd_entry = %p, " + return dsnprintf(buf, bufsiz, "type = %s, fd = 0x%" PRIxPTR ", %sfd_entry = %p, " "queue = %p, target = %s[%p], barrier_queue = %p, barrier_group = " "%p, err = 0x%x, low = 0x%zx, high = 0x%zx, interval%s = %llu ", channel->params.type == DISPATCH_IO_STREAM ? "stream" : "random", - channel->fd_actual, channel->atomic_flags & DIO_STOPPED ? + (intptr_t)channel->fd_actual, channel->atomic_flags & DIO_STOPPED ? "stopped, " : channel->atomic_flags & DIO_CLOSED ? "closed, " : "", channel->fd_entry, channel->queue, target && target->dq_label ? target->dq_label : "", target, channel->barrier_queue, @@ -2593,13 +2756,13 @@ _dispatch_operation_debug_attr(dispatch_operation_t op, char* buf, { dispatch_queue_t target = op->do_targetq; dispatch_queue_t oqtarget = op->op_q ? op->op_q->do_targetq : NULL; - return dsnprintf(buf, bufsiz, "type = %s %s, fd = 0x%x, fd_entry = %p, " + return dsnprintf(buf, bufsiz, "type = %s %s, fd = 0x%" PRIxPTR ", fd_entry = %p, " "channel = %p, queue = %p -> %s[%p], target = %s[%p], " "offset = %lld, length = %zu, done = %zu, undelivered = %zu, " "flags = %u, err = 0x%x, low = 0x%zx, high = 0x%zx, " "interval%s = %llu ", op->params.type == DISPATCH_IO_STREAM ? "stream" : "random", op->direction == DOP_DIR_READ ? "read" : - "write", op->fd_entry ? op->fd_entry->fd : -1, op->fd_entry, + "write", (intptr_t)(op->fd_entry ? op->fd_entry->fd : -1), op->fd_entry, op->channel, op->op_q, oqtarget && oqtarget->dq_label ? oqtarget->dq_label : "", oqtarget, target && target->dq_label ? target->dq_label : "", target, (long long)op->offset, op->length, diff --git a/src/mach.c b/src/mach.c index 9d64c0df9..f8a439e3d 100644 --- a/src/mach.c +++ b/src/mach.c @@ -71,6 +71,9 @@ static dispatch_continuation_t _dispatch_mach_msg_async_reply_wrap( static void _dispatch_mach_notification_kevent_unregister(dispatch_mach_t dm); static void _dispatch_mach_notification_kevent_register(dispatch_mach_t dm, mach_port_t send); +static inline mach_msg_option_t +_dispatch_mach_send_msg_prepare(dispatch_mach_t dm, + dispatch_mach_msg_t dmsg, mach_msg_option_t options); // For tests only. DISPATCH_EXPORT void _dispatch_mach_hooks_install_default(void); @@ -196,12 +199,18 @@ dispatch_mach_request_no_senders(dispatch_mach_t dm) _dispatch_queue_setter_assert_inactive(dm); } +void +dispatch_mach_notify_no_senders(dispatch_mach_t dm, bool made_sendrights) +{ + dm->dm_arm_no_senders = true; + dm->dm_made_sendrights = made_sendrights; + _dispatch_queue_setter_assert_inactive(dm); +} + void dispatch_mach_set_flags(dispatch_mach_t dm, dispatch_mach_flags_t flags) { dm->dm_strict_reply = !!(flags & DMF_USE_STRICT_REPLY); - dm->dm_arm_no_senders = !!(flags & DMF_REQUEST_NO_SENDERS); - _dispatch_queue_setter_assert_inactive(dm); } @@ -213,8 +222,28 @@ _dispatch_mach_arm_no_senders(dispatch_mach_t dm, bool allow_previous) kern_return_t kr; if (MACH_PORT_VALID(recvp)) { + // + // + // Establishing a peer-connection can be done in two ways: + // 1) the client makes a receive right with an inserted send right, + // and ships the receive right across in a checkin message, + // + // 2) the server makes a receive right and "make-send" a send right + // in the checkin reply. + // + // While for the case (1) which is the typical XPC case, at the time + // dispatch_mach_connect() is called the send right for the peer + // connection is made, for case (2) it will only be made later. + // + // We use dm->dm_made_sendrights to determine which case we're in. If + // (1), sync = 0 since the send right could have gone away and we want + // no-senders to fire immediately. If (2), sync = 1, we want to fire + // no-senders only after creating at least one send right. + + mach_port_mscount_t sync = dm->dm_made_sendrights ? 0 : 1; + kr = mach_port_request_notification(mach_task_self(), recvp, - MACH_NOTIFY_NO_SENDERS, 0, recvp, + MACH_NOTIFY_NO_SENDERS, sync, recvp, MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous); DISPATCH_VERIFY_MIG(kr); dispatch_assume_zero(kr); @@ -472,30 +501,13 @@ _dispatch_mach_reply_kevent_register(dispatch_mach_t dm, mach_port_t reply_port, #pragma mark - #pragma mark dispatch_mach_msg -DISPATCH_ALWAYS_INLINE DISPATCH_CONST -static inline bool -_dispatch_use_mach_special_reply_port(void) -{ -#if DISPATCH_USE_MACH_SEND_SYNC_OVERRIDE - return true; -#else -#define thread_get_special_reply_port() ({__builtin_trap(); MACH_PORT_NULL;}) - return false; -#endif -} - static void _dispatch_destruct_reply_port(mach_port_t reply_port, enum thread_destruct_special_reply_port_rights rights) { kern_return_t kr = KERN_SUCCESS; - if (_dispatch_use_mach_special_reply_port()) { - kr = thread_destruct_special_reply_port(reply_port, rights); - } else if (rights == THREAD_SPECIAL_REPLY_PORT_ALL || - rights == THREAD_SPECIAL_REPLY_PORT_RECEIVE_ONLY) { - kr = mach_port_destruct(mach_task_self(), reply_port, 0, 0); - } + kr = thread_destruct_special_reply_port(reply_port, rights); DISPATCH_VERIFY_MIG(kr); dispatch_assume_zero(kr); } @@ -504,25 +516,16 @@ static mach_port_t _dispatch_get_thread_reply_port(void) { mach_port_t reply_port, mrp; - if (_dispatch_use_mach_special_reply_port()) { - mrp = _dispatch_get_thread_special_reply_port(); - } else { - mrp = _dispatch_get_thread_mig_reply_port(); - } + mrp = _dispatch_get_thread_special_reply_port(); if (mrp) { reply_port = mrp; _dispatch_debug("machport[0x%08x]: borrowed thread sync reply port", reply_port); } else { - if (_dispatch_use_mach_special_reply_port()) { - reply_port = thread_get_special_reply_port(); - _dispatch_set_thread_special_reply_port(reply_port); - } else { - reply_port = mach_reply_port(); - _dispatch_set_thread_mig_reply_port(reply_port); - } + reply_port = thread_get_special_reply_port(); + _dispatch_set_thread_special_reply_port(reply_port); if (unlikely(!MACH_PORT_VALID(reply_port))) { - DISPATCH_CLIENT_CRASH(_dispatch_use_mach_special_reply_port(), + DISPATCH_CLIENT_CRASH(0, "Unable to allocate reply port, possible port leak"); } _dispatch_debug("machport[0x%08x]: allocated thread sync reply port", @@ -535,12 +538,7 @@ _dispatch_get_thread_reply_port(void) static void _dispatch_clear_thread_reply_port(mach_port_t reply_port) { - mach_port_t mrp; - if (_dispatch_use_mach_special_reply_port()) { - mrp = _dispatch_get_thread_special_reply_port(); - } else { - mrp = _dispatch_get_thread_mig_reply_port(); - } + mach_port_t mrp = _dispatch_get_thread_special_reply_port(); if (reply_port != mrp) { if (mrp) { _dispatch_debug("machport[0x%08x]: did not clear thread sync reply " @@ -548,11 +546,7 @@ _dispatch_clear_thread_reply_port(mach_port_t reply_port) } return; } - if (_dispatch_use_mach_special_reply_port()) { - _dispatch_set_thread_special_reply_port(MACH_PORT_NULL); - } else { - _dispatch_set_thread_mig_reply_port(MACH_PORT_NULL); - } + _dispatch_set_thread_special_reply_port(MACH_PORT_NULL); _dispatch_debug_machport(reply_port); _dispatch_debug("machport[0x%08x]: cleared thread sync reply port", reply_port); @@ -562,23 +556,14 @@ static void _dispatch_set_thread_reply_port(mach_port_t reply_port) { _dispatch_debug_machport(reply_port); - mach_port_t mrp; - if (_dispatch_use_mach_special_reply_port()) { - mrp = _dispatch_get_thread_special_reply_port(); - } else { - mrp = _dispatch_get_thread_mig_reply_port(); - } + mach_port_t mrp = _dispatch_get_thread_special_reply_port(); if (mrp) { _dispatch_destruct_reply_port(reply_port, THREAD_SPECIAL_REPLY_PORT_ALL); _dispatch_debug("machport[0x%08x]: deallocated sync reply port " "(found 0x%08x)", reply_port, mrp); } else { - if (_dispatch_use_mach_special_reply_port()) { - _dispatch_set_thread_special_reply_port(reply_port); - } else { - _dispatch_set_thread_mig_reply_port(reply_port); - } + _dispatch_set_thread_special_reply_port(reply_port); _dispatch_debug("machport[0x%08x]: restored thread sync reply port", reply_port); } @@ -1050,6 +1035,39 @@ _dispatch_mach_msg_not_sent(dispatch_mach_t dm, dispatch_object_t dou, } } +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_mach_send_priority_in_voucher(void) +{ + return DISPATCH_USE_MACH_MSG_PRIORITY_COMBINED; +} + +DISPATCH_ALWAYS_INLINE +static inline mach_msg_priority_t +_dispatch_mach_send_priority(dispatch_mach_msg_t dmsg, + dispatch_qos_t qos_ovr, mach_msg_option_t *opts) +{ + qos_ovr = _dispatch_qos_propagate(qos_ovr); + if (qos_ovr) { +#if DISPATCH_USE_MACH_MSG_PRIORITY_COMBINED + if (!_dispatch_mach_send_priority_in_voucher()) { + mach_msg_qos_t qos; + int relpri; + + qos = (mach_msg_qos_t)_dispatch_qos_from_pp(dmsg->dmsg_priority); + relpri = _pthread_priority_relpri(dmsg->dmsg_priority); + *opts |= MACH_SEND_OVERRIDE; + return mach_msg_priority_encode((mach_msg_qos_t)qos_ovr, qos, relpri); + } +#else + (void)dmsg; +#endif + *opts |= MACH_SEND_OVERRIDE; + return (mach_msg_priority_t)_dispatch_qos_to_pp(qos_ovr); + } + return MACH_MSG_PRIORITY_UNSPECIFIED; +} + DISPATCH_NOINLINE static uint32_t _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, @@ -1077,8 +1095,19 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, dm->dm_needs_mgr = true; goto out; } + // Tag the checkin message with a voucher and priority and necessary + // options + (void) _dispatch_mach_send_msg_prepare(dm, dsrr->dmsr_checkin, 0); if (unlikely(!_dispatch_mach_msg_send(dm, dsrr->dmsr_checkin, NULL, qos, DM_SEND_INVOKE_NONE))) { + + // We failed to send the checkin message, clear the voucher on + // it and let the retry tag it with the voucher later. + voucher_t v = dsrr->dmsr_checkin->dmsg_voucher; + if (v) { + _voucher_release(v); + dsrr->dmsr_checkin->dmsg_voucher = NULL; + } goto out; } if (dm->dm_arm_no_senders) { @@ -1108,24 +1137,20 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, opts |= MACH_SEND_NOTIFY; } opts |= MACH_SEND_TIMEOUT; - if (dmsg->dmsg_priority != _voucher_get_priority(voucher)) { + if (_dispatch_mach_send_priority_in_voucher() && + dmsg->dmsg_priority != _voucher_get_priority(voucher)) { ipc_kvoucher = _voucher_create_mach_voucher_with_priority( voucher, dmsg->dmsg_priority); } _dispatch_voucher_debug("mach-msg[%p] msg_set", voucher, dmsg); - if (ipc_kvoucher) { + if (_dispatch_mach_send_priority_in_voucher() && ipc_kvoucher) { kvoucher_move_send = true; clear_voucher = _voucher_mach_msg_set_mach_voucher(msg, ipc_kvoucher, kvoucher_move_send); } else { clear_voucher = _voucher_mach_msg_set(msg, voucher); } - if (qos) { - opts |= MACH_SEND_OVERRIDE; - msg_priority = (mach_msg_priority_t) - _dispatch_priority_compute_propagated( - _dispatch_qos_to_pp(qos), 0); - } + msg_priority = _dispatch_mach_send_priority(dmsg, qos, &opts); if (reply_port && dm->dm_strict_reply) { opts |= MACH_MSG_STRICT_REPLY; } @@ -1134,9 +1159,7 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, if (reply_port) _dispatch_debug_machport(reply_port); if (msg_opts & DISPATCH_MACH_WAIT_FOR_REPLY) { if (dwr->dwr_refs.dmr_reply_port_owned) { - if (_dispatch_use_mach_special_reply_port()) { - opts |= MACH_SEND_SYNC_OVERRIDE; - } + opts |= MACH_SEND_SYNC_OVERRIDE; _dispatch_clear_thread_reply_port(reply_port); } _dispatch_mach_reply_waiter_register(dm, dwr, reply_port, dmsg); @@ -1169,7 +1192,7 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, // send kevent must be installed on the manager queue dm->dm_needs_mgr = true; } - if (ipc_kvoucher) { + if (_dispatch_mach_send_priority_in_voucher() && ipc_kvoucher) { _dispatch_kvoucher_debug("reuse on re-send", ipc_kvoucher); voucher_t ipc_voucher; ipc_voucher = _voucher_create_with_priority_and_mach_voucher( @@ -1391,7 +1414,7 @@ _dispatch_mach_send_drain(dispatch_mach_t dm, dispatch_invoke_flags_t flags, qos = _dmsr_state_max_qos(new_state); if (unlikely(new_state & DISPATCH_MACH_STATE_UNLOCK_MASK)) { os_atomic_thread_fence(dependency); - dmsr = os_atomic_force_dependency_on(dmsr, new_state); + dmsr = os_atomic_inject_dependency(dmsr, new_state); goto again; } @@ -1516,9 +1539,13 @@ _dispatch_mach_send_push(dispatch_mach_t dm, dispatch_object_t dou, uint64_t old_state, new_state, state_flags = 0; struct dispatch_object_s *prev; dispatch_wakeup_flags_t wflags = 0; - bool is_send_barrier = (dou._dc->do_vtable == DC_VTABLE(MACH_SEND_BARRIER)); + bool is_send_barrier = false; dispatch_tid owner; + if (_dispatch_object_has_vtable(dou._dc)) { + is_send_barrier = (dou._dc->do_vtable == DC_VTABLE(MACH_SEND_BARRIER)); + } + // the send queue needs to retain // the mach channel if not empty, for the whole duration of this call // @@ -1759,13 +1786,11 @@ _dispatch_mach_checkin_options(void) return options; } - - static inline mach_msg_option_t _dispatch_mach_send_options(void) { - mach_msg_option_t options = 0; - return options; + //rdar://problem/13740985&47300191&47605096 + return (_dispatch_is_background_thread() ? MACH_SEND_NOIMPORTANCE : 0); } DISPATCH_ALWAYS_INLINE @@ -1786,8 +1811,7 @@ _dispatch_mach_send_msg_prepare(dispatch_mach_t dm, dmsg->dmsg_priority = 0; } else { unsigned int flags = DISPATCH_PRIORITY_PROPAGATE_CURRENT; - if ((options & DISPATCH_MACH_WAIT_FOR_REPLY) && - _dispatch_use_mach_special_reply_port()) { + if (options & DISPATCH_MACH_WAIT_FOR_REPLY) { // TODO: remove QoS contribution of sync IPC messages to send queue // rdar://31848737 flags |= DISPATCH_PRIORITY_PROPAGATE_FOR_SYNC_IPC; @@ -1924,11 +1948,9 @@ _dispatch_mach_send_and_wait_for_reply(dispatch_mach_t dm, *returned_send_result = _dispatch_mach_send_msg(dm, dmsg, &dc_wait,options); if (dwr->dwr_refs.dmr_reply_port_owned) { _dispatch_clear_thread_reply_port(reply_port); - if (_dispatch_use_mach_special_reply_port()) { - // link special reply port to send right for remote receive right - // TODO: extend to pre-connect phase - send = dm->dm_send_refs->dmsr_send; - } + // link special reply port to send right for remote receive right + // TODO: extend to pre-connect phase + send = dm->dm_send_refs->dmsr_send; } dmsg = _dispatch_mach_msg_reply_recv(dm, dwr, reply_port, send); #if DISPATCH_DEBUG @@ -2231,6 +2253,15 @@ _dispatch_mach_handoff_context(mach_port_t port) return dihc; } +bool +dispatch_mach_can_handoff_4libxpc(void) +{ + dispatch_thread_context_t dtc; + + dtc = _dispatch_thread_context_find(_dispatch_mach_msg_context_key); + return dtc && dtc->dtc_dmsg && dtc->dtc_dih->dih_dc.dc_other == NULL; +} + static void _dispatch_ipc_handoff_release(dispatch_ipc_handoff_t dih) { @@ -2502,7 +2533,7 @@ _dispatch_mach_barrier_set_vtable(dispatch_continuation_t dc, { dc->dc_data = (void *)dc->dc_flags; dc->dc_other = dm; - dc->do_vtable = vtable; // Must be after dc_flags load, dc_vtable aliases + dc->do_vtable = vtable; // Must be after dc_flags load, do_vtable aliases } DISPATCH_NOINLINE @@ -3052,6 +3083,36 @@ _dispatch_mach_msg_async_reply_invoke(dispatch_continuation_t dc, _dispatch_continuation_free(dc); } +void +dispatch_mach_msg_get_filter_policy_id(dispatch_mach_msg_t msg, mach_msg_filter_id *filter_id) +{ + mach_msg_trailer_t *tlr = NULL; + mach_msg_mac_trailer_t *mac_tlr; + + if (!filter_id) { + DISPATCH_CLIENT_CRASH((uintptr_t)filter_id, "Filter id should be non-NULL"); + } + + mach_msg_header_t *hdr = dispatch_mach_msg_get_msg(msg, NULL); + if (!hdr) { + DISPATCH_CLIENT_CRASH((uintptr_t)msg, "Messsage should be non-NULL"); + } + tlr = (mach_msg_trailer_t *)((unsigned char *)hdr + + round_msg(hdr->msgh_size)); + + // The trailer should always be of format zero. + if (tlr->msgh_trailer_type != MACH_MSG_TRAILER_FORMAT_0) { + DISPATCH_INTERNAL_CRASH(tlr->msgh_trailer_type, "Trailer format is invalid"); + } + + if (tlr->msgh_trailer_size >= sizeof(mach_msg_mac_trailer_t)) { + mac_tlr = (mach_msg_mac_trailer_t *)tlr; + *filter_id = mac_tlr->msgh_ad; + } else { + DISPATCH_INTERNAL_CRASH(tlr->msgh_trailer_size, "Trailer doesn't contain filter policy id"); + } +} + #pragma mark - #pragma mark dispatch_mig_server @@ -3091,7 +3152,7 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, dispatch_mig_callback_t callback) { mach_msg_options_t options = MACH_RCV_MSG | MACH_RCV_TIMEOUT - | MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_CTX) + | MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_AV) | MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0) | MACH_RCV_VOUCHER; mach_msg_options_t tmp_options; mig_reply_error_t *bufTemp, *bufRequest, *bufReply; diff --git a/src/object.m b/src/object.m index 936795871..59f271491 100644 --- a/src/object.m +++ b/src/object.m @@ -382,13 +382,16 @@ - (void)_xref_dispose { @end -#define DISPATCH_CLASS_IMPL(name) \ +#define EMPTY_OS_OBJECT_CLASS_IMPL(name) \ OS_OBJECT_NONLAZY_CLASS \ - @implementation DISPATCH_CLASS(name) \ + @implementation name \ OS_OBJECT_NONLAZY_CLASS_LOAD \ DISPATCH_UNAVAILABLE_INIT() \ @end +#define DISPATCH_CLASS_IMPL(name) \ + EMPTY_OS_OBJECT_CLASS_IMPL(DISPATCH_CLASS(name)) + #if !DISPATCH_DATA_IS_BRIDGED_TO_NSDATA DISPATCH_CLASS_IMPL(data) #endif @@ -409,6 +412,67 @@ - (void)_xref_dispose { DISPATCH_CLASS_IMPL(operation) DISPATCH_CLASS_IMPL(disk) +#pragma mark os_workgroups + +@implementation OS_OBJECT_CLASS(os_workgroup) +DISPATCH_UNAVAILABLE_INIT() +OS_OBJECT_USES_XREF_DISPOSE() + +- (void)_xref_dispose { + _os_workgroup_xref_dispose(self); + [super _xref_dispose]; +} + +- (void) dealloc { + _os_workgroup_dispose(self); + [super dealloc]; +} + +- (NSString *) debugDescription { + Class nsstring = objc_lookUpClass("NSString"); + if (!nsstring) return nil; + char buf[2048]; + + os_workgroup_t wg = (os_workgroup_t) self; + _os_workgroup_debug(wg, buf, sizeof(buf)); + + return [nsstring stringWithUTF8String:buf]; +} +@end + +@implementation OS_OBJECT_CLASS(os_workgroup_interval) +DISPATCH_UNAVAILABLE_INIT() + +- (void) _xref_dispose { + _os_workgroup_interval_xref_dispose(self); + [super _xref_dispose]; +} + +- (void) dealloc { + _os_workgroup_interval_dispose(self); + [super dealloc]; +} +@end + +@implementation OS_OBJECT_CLASS(os_workgroup_parallel) +DISPATCH_UNAVAILABLE_INIT() +@end + +#pragma mark eventlink + +@implementation OS_OBJECT_CLASS(os_eventlink) +DISPATCH_UNAVAILABLE_INIT() + +- (void) dealloc { + _os_eventlink_dispose(self); + [super dealloc]; +} + +@end + + +#pragma mark vouchers + OS_OBJECT_NONLAZY_CLASS @implementation OS_OBJECT_CLASS(voucher) OS_OBJECT_NONLAZY_CLASS_LOAD diff --git a/src/object_internal.h b/src/object_internal.h index d2126b760..50e07ac1b 100644 --- a/src/object_internal.h +++ b/src/object_internal.h @@ -178,10 +178,12 @@ #define DISPATCH_OBJECT_VTABLE_HEADER(x) \ unsigned long const do_type; \ - void (*const do_dispose)(struct x##_s *, bool *allow_free); \ - size_t (*const do_debug)(struct x##_s *, char *, size_t); \ - void (*const do_invoke)(struct x##_s *, dispatch_invoke_context_t, \ - dispatch_invoke_flags_t) + void DISPATCH_VTABLE_ENTRY(do_dispose)(struct x##_s *, \ + bool *allow_free); \ + size_t DISPATCH_VTABLE_ENTRY(do_debug)(struct x##_s *, \ + char *, size_t); \ + void DISPATCH_VTABLE_ENTRY(do_invoke)(struct x##_s *, \ + dispatch_invoke_context_t, dispatch_invoke_flags_t) #else #define DISPATCH_VTABLE_SUBCLASS_INSTANCE(name, ctype, ...) \ OS_OBJECT_VTABLE_SUBCLASS_INSTANCE(dispatch_##name, dispatch_##ctype, \ @@ -191,19 +193,21 @@ #define DISPATCH_OBJECT_VTABLE_HEADER(x) \ unsigned long const do_type; \ const char *const do_kind; \ - void (*const do_dispose)(struct x##_s *, bool *allow_free); \ - size_t (*const do_debug)(struct x##_s *, char *, size_t); \ - void (*const do_invoke)(struct x##_s *, dispatch_invoke_context_t, \ - dispatch_invoke_flags_t) + void DISPATCH_VTABLE_ENTRY(do_dispose)(struct x##_s *, \ + bool *allow_free); \ + size_t DISPATCH_VTABLE_ENTRY(do_debug)(struct x##_s *, \ + char *, size_t); \ + void DISPATCH_VTABLE_ENTRY(do_invoke)(struct x##_s *, \ + dispatch_invoke_context_t, dispatch_invoke_flags_t) #endif #define DISPATCH_QUEUE_VTABLE_HEADER(x); \ DISPATCH_OBJECT_VTABLE_HEADER(x); \ - void (*const dq_activate)(dispatch_queue_class_t); \ - void (*const dq_wakeup)(dispatch_queue_class_t, dispatch_qos_t, \ - dispatch_wakeup_flags_t); \ - void (*const dq_push)(dispatch_queue_class_t, dispatch_object_t, \ - dispatch_qos_t) + void DISPATCH_VTABLE_ENTRY(dq_activate)(dispatch_queue_class_t); \ + void DISPATCH_VTABLE_ENTRY(dq_wakeup)(dispatch_queue_class_t, \ + dispatch_qos_t, dispatch_wakeup_flags_t); \ + void DISPATCH_VTABLE_ENTRY(dq_push)(dispatch_queue_class_t, \ + dispatch_object_t, dispatch_qos_t) #define dx_vtable(x) (&(x)->do_vtable->_os_obj_vtable) #define dx_type(x) dx_vtable(x)->do_type @@ -338,7 +342,7 @@ DISPATCH_OPTIONS(dispatch_invoke_flags, uint32_t, // @const DISPATCH_INVOKE_THREAD_BOUND // We're draining from the context of a thread-bound queue (main thread) // - // @const DISPATCH_INVOKE_WORKER_DRAIN + // @const DISPATCH_INVOKE_WORKLOOP_DRAIN // The queue at the bottom of this drain is a workloop that supports // reordering. // @@ -432,7 +436,7 @@ typedef struct _os_object_vtable_s { typedef struct _os_object_s { _OS_OBJECT_HEADER( - const _os_object_vtable_s *os_obj_isa, + const _os_object_vtable_s *__ptrauth_objc_isa_pointer os_obj_isa, os_obj_ref_cnt, os_obj_xref_cnt); } _os_object_s; @@ -447,7 +451,7 @@ typedef struct _os_object_s { #else #define OS_OBJECT_STRUCT_HEADER(x) \ _OS_OBJECT_HEADER(\ - const struct x##_vtable_s *do_vtable, \ + const struct x##_vtable_s *__ptrauth_objc_isa_pointer do_vtable, \ do_ref_cnt, \ do_xref_cnt) #endif @@ -458,7 +462,10 @@ typedef struct _os_object_s { struct dispatch_##x##_s *volatile do_next; \ struct dispatch_queue_s *do_targetq; \ void *do_ctxt; \ - void *do_finalizer + union { \ + dispatch_function_t DISPATCH_FUNCTION_POINTER do_finalizer; \ + void *do_introspection_ctxt; \ + } #define DISPATCH_OBJECT_HEADER(x) \ struct dispatch_object_s _as_do[0]; \ @@ -533,7 +540,7 @@ OS_OBJECT_OBJC_CLASS_DECL(object); // This is required by the dispatch_data_t/NSData bridging, which is not // supported on the old runtime. #define DISPATCH_OBJECT_TFB(f, o, ...) \ - if (unlikely(((uintptr_t)((o)._os_obj->os_obj_isa) & 1) || \ + if (unlikely(((*(uintptr_t *)&((o)._os_obj->os_obj_isa)) & 1) || \ (Class)((o)._os_obj->os_obj_isa) < \ (Class)OS_OBJECT_VTABLE(dispatch_object) || \ (Class)((o)._os_obj->os_obj_isa) >= \ diff --git a/src/queue.c b/src/queue.c index 63f565890..6ac062cff 100644 --- a/src/queue.c +++ b/src/queue.c @@ -36,6 +36,9 @@ static inline void _dispatch_queue_wakeup_with_override( static void _dispatch_workloop_drain_barrier_waiter(dispatch_workloop_t dwl, struct dispatch_object_s *dc, dispatch_qos_t qos, dispatch_wakeup_flags_t flags, uint64_t owned); +static inline bool +_dispatch_async_and_wait_should_always_async(dispatch_queue_class_t dqu, + uint64_t dq_state); #pragma mark - #pragma mark dispatch_assert_queue @@ -201,10 +204,12 @@ _dispatch_set_priority_and_voucher_slow(pthread_priority_t priority, static void _dispatch_async_redirect_invoke(dispatch_continuation_t dc, dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); +#if HAVE_PTHREAD_WORKQUEUE_QOS static void _dispatch_queue_override_invoke(dispatch_continuation_t dc, dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); static void _dispatch_workloop_stealer_invoke(dispatch_continuation_t dc, dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags); +#endif // HAVE_PTHREAD_WORKQUEUE_QOS const struct dispatch_continuation_vtable_s _dispatch_continuation_vtables[] = { DC_VTABLE_ENTRY(ASYNC_REDIRECT, @@ -558,7 +563,7 @@ dispatch_block_cancel(dispatch_block_t db) (void)os_atomic_or2o(dbpd, dbpd_atomic_flags, DBF_CANCELED, relaxed); } -long +intptr_t dispatch_block_testcancel(dispatch_block_t db) { dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); @@ -569,7 +574,7 @@ dispatch_block_testcancel(dispatch_block_t db) return (bool)(dbpd->dbpd_atomic_flags & DBF_CANCELED); } -long +intptr_t dispatch_block_wait(dispatch_block_t db, dispatch_time_t timeout) { dispatch_block_private_data_t dbpd = _dispatch_block_get_data(db); @@ -937,7 +942,7 @@ _dispatch_lane_non_barrier_complete_finish(dispatch_lane_t dq, // dependency ordering for dq state changes that were flushed // and not acted upon os_atomic_thread_fence(dependency); - dq = os_atomic_force_dependency_on(dq, old_state); + dq = os_atomic_inject_dependency(dq, (unsigned long)old_state); } return _dispatch_lane_barrier_complete(dq, 0, flags); } @@ -1061,9 +1066,10 @@ _dispatch_lane_barrier_sync_invoke_and_complete(dispatch_lane_t dq, // these bits should be set if the lock was never contended/discovered. const uint64_t fail_unlock_mask = DISPATCH_QUEUE_SUSPEND_BITS_MASK | DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_DIRTY | - DISPATCH_QUEUE_RECEIVED_OVERRIDE | DISPATCH_QUEUE_SYNC_TRANSFER | + DISPATCH_QUEUE_RECEIVED_OVERRIDE | DISPATCH_QUEUE_RECEIVED_SYNC_WAIT; uint64_t old_state, new_state; + dispatch_wakeup_flags_t flags = 0; // similar to _dispatch_queue_drain_try_unlock os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { @@ -1072,7 +1078,7 @@ _dispatch_lane_barrier_sync_invoke_and_complete(dispatch_lane_t dq, new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; if (unlikely(old_state & fail_unlock_mask)) { os_atomic_rmw_loop_give_up({ - return _dispatch_lane_barrier_complete(dq, 0, 0); + return _dispatch_lane_barrier_complete(dq, 0, flags); }); } }); @@ -1102,7 +1108,6 @@ _dispatch_waiter_wake(dispatch_sync_context_t dsc, dispatch_wlh_t wlh, { dispatch_wlh_t waiter_wlh = dsc->dc_data; -#if DISPATCH_USE_KEVENT_WORKLOOP // // We need to interact with a workloop if any of the following 3 cases: // 1. the current owner of the lock has a SYNC_WAIT knote to destroy @@ -1115,10 +1120,9 @@ _dispatch_waiter_wake(dispatch_sync_context_t dsc, dispatch_wlh_t wlh, // without pushing (waiter_wlh == DISPATCH_WLH_ANON), in which case the next // owner is really woken up when the thread event is signaled. // -#endif - if (_dq_state_in_sync_transfer(old_state) || - _dq_state_in_sync_transfer(new_state) || - (waiter_wlh != DISPATCH_WLH_ANON)) { + if ((_dq_state_is_base_wlh(old_state) && !dsc->dsc_from_async) || + _dq_state_is_base_wlh(new_state) || + waiter_wlh != DISPATCH_WLH_ANON) { _dispatch_event_loop_wake_owner(dsc, wlh, old_state, new_state); } if (unlikely(waiter_wlh == DISPATCH_WLH_ANON)) { @@ -1183,8 +1187,8 @@ _dispatch_non_barrier_waiter_redirect_or_wake(dispatch_lane_t dq, } if (dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) { - // _dispatch_barrier_async_and_wait_f_slow() expects dc_other to be the - // bottom queue of the graph + // We're in case (2) of _dispatch_async_and_wait_f_slow() which expects + // dc_other to be the bottom queue of the graph dsc->dc_other = dq; } return _dispatch_waiter_wake_wlh_anon(dsc); @@ -1248,17 +1252,10 @@ _dispatch_barrier_waiter_redirect_or_wake(dispatch_queue_class_t dqu, } // passing the QoS of `dq` helps pushing on low priority waiters with // legacy workloops. -#if DISPATCH_INTROSPECTION dsc->dsc_from_async = false; -#endif return dx_push(tq, dsc, _dq_state_max_qos(old_state)); } - if (dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) { - // _dispatch_async_and_wait_f_slow() expects dc_other to be the - // bottom queue of the graph - dsc->dc_other = dq; - } #if DISPATCH_INTROSPECTION if (dsc->dsc_from_async) { _dispatch_trace_runtime_event(async_sync_handoff, dq, 0); @@ -1266,6 +1263,12 @@ _dispatch_barrier_waiter_redirect_or_wake(dispatch_queue_class_t dqu, _dispatch_trace_runtime_event(sync_sync_handoff, dq, 0); } #endif // DISPATCH_INTROSPECTION + + if (dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) { + // Falling into case (2) of _dispatch_async_and_wait_f_slow, dc_other is + // the bottom queue + dsc->dc_other = dq; + } return _dispatch_waiter_wake(dsc, wlh, old_state, new_state); } @@ -1284,13 +1287,18 @@ _dispatch_lane_drain_barrier_waiter(dispatch_lane_t dq, transfer_lock_again: os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + if (unlikely(_dq_state_needs_ensure_ownership(old_state))) { + _dispatch_event_loop_ensure_ownership((dispatch_wlh_t)dq); + _dispatch_queue_move_to_contended_sync(dq->_as_dq); + os_atomic_rmw_loop_give_up(goto transfer_lock_again); + } + new_state = old_state; new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; new_state &= ~DISPATCH_QUEUE_DIRTY; new_state |= next_owner; if (_dq_state_is_base_wlh(old_state)) { - new_state |= DISPATCH_QUEUE_SYNC_TRANSFER; if (next_dc) { // we know there's a next item, keep the enqueued bit if any } else if (unlikely(_dq_state_is_dirty(old_state))) { @@ -1332,7 +1340,13 @@ _dispatch_lane_class_barrier_complete(dispatch_lane_t dq, dispatch_qos_t qos, enqueue = 0; } +again: os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + if (unlikely(_dq_state_needs_ensure_ownership(old_state))) { + _dispatch_event_loop_ensure_ownership((dispatch_wlh_t)dq); + _dispatch_queue_move_to_contended_sync(dq->_as_dq); + os_atomic_rmw_loop_give_up(goto again); + } new_state = _dq_state_merge_qos(old_state - owned, qos); new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; if (unlikely(_dq_state_is_suspended(old_state))) { @@ -1379,8 +1393,7 @@ _dispatch_lane_class_barrier_complete(dispatch_lane_t dq, dispatch_qos_t qos, dispatch_assert(!_dq_state_is_enqueued_on_manager(new_state)); if (_dq_state_is_enqueued_on_target(old_state) || _dq_state_is_enqueued_on_target(new_state) || - _dq_state_received_sync_wait(old_state) || - _dq_state_in_sync_transfer(old_state)) { + !_dq_state_in_uncontended_sync(old_state)) { return _dispatch_event_loop_end_ownership((dispatch_wlh_t)dq, old_state, new_state, flags); } @@ -1554,11 +1567,8 @@ _dispatch_wait_prepare(dispatch_queue_t dq) os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { if (_dq_state_is_suspended(old_state) || - !_dq_state_is_base_wlh(old_state)) { - os_atomic_rmw_loop_give_up(return old_state); - } - if (!_dq_state_drain_locked(old_state) || - _dq_state_in_sync_transfer(old_state)) { + !_dq_state_is_base_wlh(old_state) || + !_dq_state_in_uncontended_sync(old_state)) { os_atomic_rmw_loop_give_up(return old_state); } new_state = old_state | DISPATCH_QUEUE_RECEIVED_SYNC_WAIT; @@ -1639,7 +1649,7 @@ __DISPATCH_WAIT_FOR_QUEUE__(dispatch_sync_context_t dsc, dispatch_queue_t dq) _dispatch_trace_runtime_event(sync_wait, dq, 0); if (dsc->dc_data == DISPATCH_WLH_ANON) { _dispatch_thread_event_wait(&dsc->dsc_event); // acquire - } else { + } else if (!dsc->dsc_wlh_self_wakeup) { _dispatch_event_loop_wait_for_ownership(dsc); } if (dsc->dc_data == DISPATCH_WLH_ANON) { @@ -1724,6 +1734,8 @@ _dispatch_sync_f_slow(dispatch_queue_class_t top_dqu, void *ctxt, __DISPATCH_WAIT_FOR_QUEUE__(&dsc, dq); if (dsc.dsc_func == NULL) { + // dsc_func being cleared means that the block ran on another thread ie. + // case (2) as listed in _dispatch_async_and_wait_f_slow. dispatch_queue_t stop_dq = dsc.dc_other; return _dispatch_sync_complete_recurse(top_dq, stop_dq, top_dc_flags); } @@ -1981,6 +1993,34 @@ static void _dispatch_async_and_wait_f_slow(dispatch_queue_t dq, uintptr_t top_dc_flags, dispatch_sync_context_t dsc, dispatch_queue_t tq) { + /* dc_other is an in-out parameter. + * + * As an in-param, it specifies the top queue on which the blocking + * primitive is called. + * + * As an out-param, it refers to the queue up till which we have the drain + * lock. This is slightly different depending on how we come out of + * _WAIT_FOR_QUEUE. + * + * Case 1: + * If the continuation is to be invoked on another thread - for + * async_and_wait, or we ran on a thread bound main queue - then someone + * already called _dispatch_async_and_wait_invoke which invoked the block + * already. dc_other as an outparam here tells the enqueuer the queue up + * till which the enqueuer got the drain lock so that we know what to unlock + * on the way out. This is the case whereby the enqueuer owns part of the + * locks in the queue hierachy (but not all). + * + * Case 2: + * If the continuation is to be invoked on the enqueuing thread - because + * we were contending with another sync or async_and_wait - then enqueuer + * return from _WAIT_FOR_QUEUE without having invoked the block. The + * enqueuer has had the locks for the rest of the queue hierachy handed off + * to it so dc_other specifies the queue up till which it has the locks + * which in this case, is up till the bottom queue in the hierachy. So it + * needs to unlock everything up till the bottom queue, on the way out. + */ + __DISPATCH_WAIT_FOR_QUEUE__(dsc, tq); if (unlikely(dsc->dsc_func == NULL)) { @@ -2008,11 +2048,19 @@ _dispatch_async_and_wait_should_always_async(dispatch_queue_class_t dqu, DISPATCH_ALWAYS_INLINE static inline bool -_dispatch_async_and_wait_recurse_one(dispatch_queue_t dq, dispatch_tid tid, - uintptr_t dc_flags) +_dispatch_async_and_wait_recurse_one(dispatch_queue_t dq, + dispatch_sync_context_t dsc, dispatch_tid tid, uintptr_t dc_flags) { uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); if (unlikely(_dispatch_async_and_wait_should_always_async(dq, dq_state))) { + // Remove the async_and_wait flag but drive down the slow path so that + // we do the synchronous wait. We are guaranteed that dq is the base + // queue. + // + // We're falling down to case (1) of _dispatch_async_and_wait_f_slow so + // set dc_other to dq + dsc->dc_flags &= ~DC_FLAG_ASYNC_AND_WAIT; + dsc->dc_other = dq; return false; } if (likely(dc_flags & DC_FLAG_BARRIER)) { @@ -2032,7 +2080,8 @@ _dispatch_async_and_wait_recurse(dispatch_queue_t top_dq, _dispatch_trace_item_push(top_dq, dsc); for (;;) { - if (unlikely(!_dispatch_async_and_wait_recurse_one(dq, tid, dc_flags))){ + if (unlikely(!_dispatch_async_and_wait_recurse_one(dq, dsc, tid, + dc_flags))) { return _dispatch_async_and_wait_f_slow(top_dq, top_flags, dsc, dq); } @@ -2844,7 +2893,7 @@ _dispatch_lane_class_dispose(dispatch_lane_class_t dqu, bool *allow_free) DISPATCH_CLIENT_CRASH((uintptr_t)orig_dq_state, "Release of a locked queue"); } -#ifndef __LP64__ +#if DISPATCH_SIZEOF_PTR == 4 orig_dq_state >>= 32; #endif DISPATCH_CLIENT_CRASH((uintptr_t)orig_dq_state, @@ -2993,7 +3042,7 @@ _dispatch_lane_resume(dispatch_lane_t dq, dispatch_resume_op_t op) (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL; uint64_t set_owner_and_set_full_width_and_in_barrier = _dispatch_lock_value_for_self() | DISPATCH_QUEUE_WIDTH_FULL_BIT | - DISPATCH_QUEUE_IN_BARRIER; + DISPATCH_QUEUE_IN_BARRIER | DISPATCH_QUEUE_UNCONTENDED_SYNC; // backward compatibility: only dispatch sources can abuse // dispatch_resume() to really mean dispatch_activate() @@ -3137,7 +3186,7 @@ _dispatch_lane_resume(dispatch_lane_t dq, dispatch_resume_op_t op) // dependency ordering for dq state changes that were flushed // and not acted upon os_atomic_thread_fence(dependency); - dq = os_atomic_force_dependency_on(dq, old_state); + dq = os_atomic_inject_dependency(dq, (unsigned long)old_state); } // Balancing the retain_2 done in suspend() for rdar://8181908 dispatch_wakeup_flags_t flags = DISPATCH_WAKEUP_CONSUME_2; @@ -3149,8 +3198,6 @@ _dispatch_lane_resume(dispatch_lane_t dq, dispatch_resume_op_t op) } return _dispatch_release_2(dq); } - dispatch_assert(!_dq_state_received_sync_wait(old_state)); - dispatch_assert(!_dq_state_in_sync_transfer(old_state)); return dx_wakeup(dq, _dq_state_max_qos(old_state), flags); over_resume: @@ -3509,7 +3556,7 @@ _dispatch_queue_drain_should_narrow_slow(uint64_t now, if (unlikely(qos < DISPATCH_QOS_MIN || qos > DISPATCH_QOS_MAX)) { DISPATCH_CLIENT_CRASH(pp, "Thread QoS corruption"); } - size_t idx = DISPATCH_QOS_BUCKET(qos); + int idx = DISPATCH_QOS_BUCKET(qos); os_atomic(uint64_t) *deadline = &_dispatch_narrowing_deadlines[idx]; uint64_t oldval, newval = now + _dispatch_narrow_check_interval(); @@ -3738,13 +3785,11 @@ _dispatch_queue_invoke_finish(dispatch_queue_t dq, struct dispatch_object_s *dc = dic->dic_barrier_waiter; dispatch_qos_t qos = dic->dic_barrier_waiter_bucket; if (dc) { + dispatch_sync_context_t dsc = (dispatch_sync_context_t)dc; + dsc->dsc_from_async = true; dic->dic_barrier_waiter = NULL; dic->dic_barrier_waiter_bucket = DISPATCH_QOS_UNSPECIFIED; owned &= DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_ENQUEUED_ON_MGR; -#if DISPATCH_INTROSPECTION - dispatch_sync_context_t dsc = (dispatch_sync_context_t)dc; - dsc->dsc_from_async = true; -#endif if (qos) { return _dispatch_workloop_drain_barrier_waiter(upcast(dq)._dwl, dc, qos, DISPATCH_WAKEUP_CONSUME_2, owned); @@ -3936,10 +3981,14 @@ static void _dispatch_workloop_attributes_dispose(dispatch_workloop_t dwl) { if (dwl->dwl_attr) { + if (dwl->dwl_attr->workgroup) { + _os_object_release(dwl->dwl_attr->workgroup->_as_os_obj); + } free(dwl->dwl_attr); } } +#if TARGET_OS_MAC DISPATCH_ALWAYS_INLINE static bool _dispatch_workloop_has_kernel_attributes(dispatch_workloop_t dwl) @@ -3972,6 +4021,7 @@ dispatch_workloop_set_scheduler_priority(dispatch_workloop_t dwl, int priority, dwl->dwl_attr->dwla_flags &= ~DISPATCH_WORKLOOP_ATTR_HAS_POLICY; } } +#endif // TARGET_OS_MAC void dispatch_workloop_set_qos_class_floor(dispatch_workloop_t dwl, @@ -3990,12 +4040,32 @@ dispatch_workloop_set_qos_class_floor(dispatch_workloop_t dwl, dwl->dwl_attr->dwla_flags &= ~DISPATCH_WORKLOOP_ATTR_HAS_QOS_CLASS; } +#if TARGET_OS_MAC if (flags & DISPATCH_WORKLOOP_FIXED_PRIORITY) { dwl->dwl_attr->dwla_policy = POLICY_RR; dwl->dwl_attr->dwla_flags |= DISPATCH_WORKLOOP_ATTR_HAS_POLICY; } else { dwl->dwl_attr->dwla_flags &= ~DISPATCH_WORKLOOP_ATTR_HAS_POLICY; } +#else // TARGET_OS_MAC + (void)flags; +#endif // TARGET_OS_MAC +} + +void +dispatch_workloop_set_os_workgroup(dispatch_workloop_t dwl, os_workgroup_t wg) +{ + _dispatch_queue_setter_assert_inactive(dwl); + _dispatch_workloop_attributes_alloc_if_needed(dwl); + + os_workgroup_t old_wg = dwl->dwl_attr->workgroup; + if (old_wg) { + _os_object_release(old_wg->_as_os_obj); + } + + /* Take an external ref count on the workgroup */ + _os_object_retain(wg->_as_os_obj); + dwl->dwl_attr->workgroup = wg; } void @@ -4036,6 +4106,7 @@ _dispatch_workloop_set_observer_hooks_4IOHID(dispatch_workloop_t dwl, } #endif +#if TARGET_OS_MAC static void _dispatch_workloop_activate_simulator_fallback(dispatch_workloop_t dwl, pthread_attr_t *attr) @@ -4043,8 +4114,11 @@ _dispatch_workloop_activate_simulator_fallback(dispatch_workloop_t dwl, uint64_t old_state, new_state; dispatch_queue_global_t dprq; +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wdeprecated" dprq = dispatch_pthread_root_queue_create( "com.apple.libdispatch.workloop_fallback", 0, attr, NULL); +#pragma clang diagnostic pop dwl->do_targetq = dprq->_as_dq; _dispatch_retain(dprq); @@ -4067,10 +4141,12 @@ static const struct dispatch_queue_global_s _dispatch_custom_workloop_root_queue .dq_serialnum = DISPATCH_QUEUE_SERIAL_NUMBER_WLF, .dgq_thread_pool_size = 1, }; +#endif // TARGET_OS_MAC static void _dispatch_workloop_activate_attributes(dispatch_workloop_t dwl) { +#if defined(_POSIX_THREADS) dispatch_workloop_attr_t dwla = dwl->dwl_attr; pthread_attr_t attr; @@ -4078,6 +4154,7 @@ _dispatch_workloop_activate_attributes(dispatch_workloop_t dwl) if (dwla->dwla_flags & DISPATCH_WORKLOOP_ATTR_HAS_QOS_CLASS) { dwl->dq_priority |= dwla->dwla_pri | DISPATCH_PRIORITY_FLAG_FLOOR; } +#if TARGET_OS_MAC if (dwla->dwla_flags & DISPATCH_WORKLOOP_ATTR_HAS_SCHED) { pthread_attr_setschedparam(&attr, &dwla->dwla_sched); // _dispatch_async_and_wait_should_always_async detects when a queue @@ -4087,13 +4164,28 @@ _dispatch_workloop_activate_attributes(dispatch_workloop_t dwl) dwl->do_targetq = (dispatch_queue_t)_dispatch_custom_workloop_root_queue._as_dq; } + + if (dwla->workgroup != NULL) { + // _dispatch_async_and_wait_should_always_async detects when a queue + // targets a root queue that is not part of the root queues array in + // order to force async_and_wait to async. We want this path to always + // be taken on workloops that have an associated workgroup with them + // because there is no easy way to join and leave a workgroup for just a + // single block + dwl->do_targetq = + (dispatch_queue_t)_dispatch_custom_workloop_root_queue._as_dq; + } if (dwla->dwla_flags & DISPATCH_WORKLOOP_ATTR_HAS_POLICY) { pthread_attr_setschedpolicy(&attr, dwla->dwla_policy); } +#endif // TARGET_OS_MAC +#if HAVE_PTHREAD_ATTR_SETCPUPERCENT_NP if (dwla->dwla_flags & DISPATCH_WORKLOOP_ATTR_HAS_CPUPERCENT) { pthread_attr_setcpupercent_np(&attr, dwla->dwla_cpupercent.percent, (unsigned long)dwla->dwla_cpupercent.refillms); } +#endif // HAVE_PTHREAD_ATTR_SETCPUPERCENT_NP +#if TARGET_OS_MAC if (_dispatch_workloop_has_kernel_attributes(dwl)) { int rv = _pthread_workloop_create((uint64_t)dwl, 0, &attr); switch (rv) { @@ -4108,7 +4200,9 @@ _dispatch_workloop_activate_attributes(dispatch_workloop_t dwl) dispatch_assert_zero(rv); } } +#endif // TARGET_OS_MAC pthread_attr_destroy(&attr); +#endif // defined(_POSIX_THREADS) } void @@ -4124,7 +4218,7 @@ _dispatch_workloop_dispose(dispatch_workloop_t dwl, bool *allow_free) DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, "Release of a locked workloop"); } -#ifndef __LP64__ +#if DISPATCH_SIZEOF_PTR == 4 dq_state >>= 32; #endif DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, @@ -4152,10 +4246,12 @@ _dispatch_workloop_dispose(dispatch_workloop_t dwl, bool *allow_free) dwl->dwl_timer_heap = NULL; } +#if TARGET_OS_MAC if (dwl->dwl_attr && (dwl->dwl_attr->dwla_flags & DISPATCH_WORKLOOP_ATTR_NEEDS_DESTROY)) { (void)dispatch_assume_zero(_pthread_workloop_destroy((uint64_t)dwl)); } +#endif // TARGET_OS_MAC _dispatch_workloop_attributes_dispose(dwl); _dispatch_queue_dispose(dwl, allow_free); } @@ -4210,11 +4306,13 @@ _dispatch_workloop_try_lower_max_qos(dispatch_workloop_t dwl, new_state |= qos_bits; }); +#if DISPATCH_USE_KEVENT_WORKQUEUE dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); if (likely(ddi)) { ddi->ddi_wlh_needs_update = true; _dispatch_return_to_kernel(); } +#endif // DISPATCH_USE_KEVENT_WORKQUEUE return true; } @@ -4284,7 +4382,7 @@ _dispatch_workloop_invoke(dispatch_workloop_t dwl, { flags &= ~(dispatch_invoke_flags_t)DISPATCH_INVOKE_REDIRECTING_DRAIN; flags |= DISPATCH_INVOKE_WORKLOOP_DRAIN; - _dispatch_queue_class_invoke(dwl, dic, flags, 0,_dispatch_workloop_invoke2); + _dispatch_queue_class_invoke(dwl, dic, flags, 0, _dispatch_workloop_invoke2); } DISPATCH_ALWAYS_INLINE @@ -4317,13 +4415,17 @@ _dispatch_workloop_drain_barrier_waiter(dispatch_workloop_t dwl, } os_atomic_rmw_loop2o(dwl, dq_state, old_state, new_state, release, { + if (unlikely(_dq_state_needs_ensure_ownership(old_state))) { + _dispatch_event_loop_ensure_ownership((dispatch_wlh_t)dwl); + _dispatch_queue_move_to_contended_sync(dwl->_as_dq); + os_atomic_rmw_loop_give_up(goto transfer_lock_again); + } new_state = old_state; new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; new_state &= ~DISPATCH_QUEUE_DIRTY; new_state |= next_owner; if (likely(_dq_state_is_base_wlh(old_state))) { - new_state |= DISPATCH_QUEUE_SYNC_TRANSFER; if (has_more_work) { // we know there's a next item, keep the enqueued bit if any } else if (unlikely(_dq_state_is_dirty(old_state))) { @@ -4374,7 +4476,13 @@ _dispatch_workloop_barrier_complete(dispatch_workloop_t dwl, dispatch_qos_t qos, uint64_t old_state, new_state; +transfer_lock_again: os_atomic_rmw_loop2o(dwl, dq_state, old_state, new_state, release, { + if (unlikely(_dq_state_needs_ensure_ownership(old_state))) { + _dispatch_event_loop_ensure_ownership((dispatch_wlh_t)dwl); + _dispatch_queue_move_to_contended_sync(dwl->_as_dq); + os_atomic_rmw_loop_give_up(goto transfer_lock_again); + } new_state = _dq_state_merge_qos(old_state, qos); new_state -= DISPATCH_QUEUE_IN_BARRIER; new_state -= DISPATCH_QUEUE_WIDTH_INTERVAL; @@ -4418,8 +4526,7 @@ _dispatch_workloop_barrier_complete(dispatch_workloop_t dwl, dispatch_qos_t qos, dispatch_assert(!_dq_state_is_enqueued_on_manager(new_state)); if (_dq_state_is_enqueued_on_target(old_state) || _dq_state_is_enqueued_on_target(new_state) || - _dq_state_received_sync_wait(old_state) || - _dq_state_in_sync_transfer(old_state)) { + !_dq_state_in_uncontended_sync(old_state)) { return _dispatch_event_loop_end_ownership((dispatch_wlh_t)dwl, old_state, new_state, flags); } @@ -4510,6 +4617,9 @@ _dispatch_workloop_wakeup(dispatch_workloop_t dwl, dispatch_qos_t qos, os_atomic_rmw_loop2o(dwl, dq_state, old_state, new_state, release, { new_state = _dq_state_merge_qos(old_state, qos); if (_dq_state_max_qos(new_state)) { + // We need to make sure we have the enqueued bit when we are making + // the syscall to update QoS and we know that we will do it since + // we're at the base anyways new_state |= DISPATCH_QUEUE_ENQUEUED; } if (flags & DISPATCH_WAKEUP_MAKE_DIRTY) { @@ -4520,7 +4630,7 @@ _dispatch_workloop_wakeup(dispatch_workloop_t dwl, dispatch_qos_t qos, }); if (unlikely(_dq_state_is_suspended(old_state))) { -#ifndef __LP64__ +#if DISPATCH_SIZEOF_PTR == 4 old_state >>= 32; #endif DISPATCH_CLIENT_CRASH(old_state, "Waking up an inactive workloop"); @@ -4558,11 +4668,12 @@ _dispatch_workloop_push_waiter(dispatch_workloop_t dwl, uint64_t set_owner_and_set_full_width_and_in_barrier = _dispatch_lock_value_for_self() | - DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER; + DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER | + DISPATCH_QUEUE_UNCONTENDED_SYNC; uint64_t old_state, new_state; os_atomic_rmw_loop2o(dwl, dq_state, old_state, new_state, release, { - new_state = _dq_state_merge_qos(old_state, qos); + new_state = _dq_state_merge_qos(old_state, qos); new_state |= DISPATCH_QUEUE_DIRTY; if (unlikely(_dq_state_drain_locked(old_state))) { // not runnable, so we should just handle overrides @@ -4575,14 +4686,36 @@ _dispatch_workloop_push_waiter(dispatch_workloop_t dwl, } }); - dsc->dsc_wlh_was_first = (dsc->dsc_waiter == _dispatch_tid_self()); + if ((dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) && + _dispatch_async_and_wait_should_always_async(dwl, new_state)) { + dsc->dc_other = dwl; + dsc->dc_flags &= ~DC_FLAG_ASYNC_AND_WAIT; + } + + if (_dq_state_is_base_wlh(new_state) && dsc->dc_data != DISPATCH_WLH_ANON) { + dsc->dsc_wlh_was_first = (dsc->dsc_waiter == _dispatch_tid_self()); + } if ((old_state ^ new_state) & DISPATCH_QUEUE_IN_BARRIER) { - return _dispatch_workloop_barrier_complete(dwl, qos, 0); + dispatch_wakeup_flags_t flags = 0; + // We came here from __DISPATCH_WAIT_FOR_QUEUE__, if the element + // we pushed is still at the head, we can cheat, dequeue everything, + // and keep pretending we weren't contended. + if (dsc->dsc_wlh_was_first && _dispatch_workloop_get_head(dwl, qos) == dc) { + dsc->dsc_wlh_self_wakeup = true; + if (dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) { + // We're in case (2) of _dispatch_async_and_wait_f_slow() which expects + // dc_other to be the bottom queue of the graph + dsc->dc_other = dwl; + } + _dispatch_workloop_pop_head(dwl, qos, dc); + return; + } + return _dispatch_workloop_barrier_complete(dwl, qos, flags); } #if HAVE_PTHREAD_WORKQUEUE_QOS if (unlikely((old_state ^ new_state) & DISPATCH_QUEUE_MAX_QOS_MASK)) { - if (_dq_state_should_override(new_state)) { + if (_dq_state_should_override_for_waiter(new_state)) { return _dispatch_queue_wakeup_with_override(dwl, new_state, 0); } } @@ -4840,6 +4973,7 @@ _dispatch_queue_wakeup(dispatch_queue_class_t dqu, dispatch_qos_t qos, dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target) { dispatch_queue_t dq = dqu._dq; + uint64_t old_state, new_state, enqueue = DISPATCH_QUEUE_ENQUEUED; dispatch_assert(target != DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT); if (target && !(flags & DISPATCH_WAKEUP_CONSUME_2)) { @@ -4865,7 +4999,6 @@ _dispatch_queue_wakeup(dispatch_queue_class_t dqu, dispatch_qos_t qos, } if (target) { - uint64_t old_state, new_state, enqueue = DISPATCH_QUEUE_ENQUEUED; if (target == DISPATCH_QUEUE_WAKEUP_MGR) { enqueue = DISPATCH_QUEUE_ENQUEUED_ON_MGR; } @@ -4887,8 +5020,9 @@ _dispatch_queue_wakeup(dispatch_queue_class_t dqu, dispatch_qos_t qos, if (likely(!_dq_state_is_suspended(new_state) && !_dq_state_is_enqueued(old_state) && (!_dq_state_drain_locked(old_state) || - (enqueue != DISPATCH_QUEUE_ENQUEUED_ON_MGR && - _dq_state_is_base_wlh(old_state))))) { + enqueue != DISPATCH_QUEUE_ENQUEUED_ON_MGR))) { + // Always set the enqueued bit for async enqueues on all queues + // in the hierachy new_state |= enqueue; } if (flags & DISPATCH_WAKEUP_MAKE_DIRTY) { @@ -4897,52 +5031,85 @@ _dispatch_queue_wakeup(dispatch_queue_class_t dqu, dispatch_qos_t qos, os_atomic_rmw_loop_give_up(goto done); } }); - - if (likely((old_state ^ new_state) & enqueue)) { - dispatch_queue_t tq; - if (target == DISPATCH_QUEUE_WAKEUP_TARGET) { - // the rmw_loop above has no acquire barrier, as the last block - // of a queue asyncing to that queue is not an uncommon pattern - // and in that case the acquire would be completely useless - // - // so instead use depdendency ordering to read - // the targetq pointer. - os_atomic_thread_fence(dependency); - tq = os_atomic_load_with_dependency_on2o(dq, do_targetq, - (long)new_state); - } else { - tq = target; - } - dispatch_assert(_dq_state_is_enqueued(new_state)); - return _dispatch_queue_push_queue(tq, dq, new_state); - } #if HAVE_PTHREAD_WORKQUEUE_QOS - if (unlikely((old_state ^ new_state) & DISPATCH_QUEUE_MAX_QOS_MASK)) { - if (_dq_state_should_override(new_state)) { - return _dispatch_queue_wakeup_with_override(dq, new_state, - flags); - } - } } else if (qos) { // // Someone is trying to override the last work item of the queue. // - uint64_t old_state, new_state; os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { - if (!_dq_state_drain_locked(old_state) || - !_dq_state_is_enqueued(old_state)) { + // Avoid spurious override if the item was drained before we could + // apply an override + if (!_dq_state_drain_locked(old_state) && + !_dq_state_is_enqueued(old_state)) { os_atomic_rmw_loop_give_up(goto done); } new_state = _dq_state_merge_qos(old_state, qos); + if (_dq_state_is_base_wlh(old_state) && + !_dq_state_is_suspended(old_state) && + /* */ + !_dq_state_is_enqueued_on_manager(old_state)) { + + // Always set the enqueued bit for async enqueues on all queues + // in the hierachy (rdar://62447289) + // + // Scenario: + // - mach channel DM + // - targetting TQ + // + // Thread 1: + // - has the lock on (TQ), uncontended sync + // - causes a wakeup at a low QoS on DM, causing it to have: + // max_qos = UT, enqueued = 1 + // - the enqueue of DM onto TQ hasn't happened yet. + // + // Thread 2: + // - an incoming IN IPC is being merged on the servicer + // - DM having qos=UT, enqueud=1, no further enqueue happens, + // but we need an extra override and go through this code for + // TQ. + // - this causes TQ to be "stashed", which requires the enqueued + // bit set, else try_lock_wlh() will complain and the + // wakeup refcounting will be off. + new_state |= enqueue; + } + if (new_state == old_state) { os_atomic_rmw_loop_give_up(goto done); } }); + + target = DISPATCH_QUEUE_WAKEUP_TARGET; +#endif // HAVE_PTHREAD_WORKQUEUE_QOS + } else { + goto done; + } + + if (likely((old_state ^ new_state) & enqueue)) { + dispatch_queue_t tq; + if (target == DISPATCH_QUEUE_WAKEUP_TARGET) { + // the rmw_loop above has no acquire barrier, as the last block + // of a queue asyncing to that queue is not an uncommon pattern + // and in that case the acquire would be completely useless + // + // so instead use depdendency ordering to read + // the targetq pointer. + os_atomic_thread_fence(dependency); + tq = os_atomic_load_with_dependency_on2o(dq, do_targetq, + (long)new_state); + } else { + tq = target; + } + dispatch_assert(_dq_state_is_enqueued(new_state)); + return _dispatch_queue_push_queue(tq, dq, new_state); + } +#if HAVE_PTHREAD_WORKQUEUE_QOS + if (unlikely((old_state ^ new_state) & DISPATCH_QUEUE_MAX_QOS_MASK)) { if (_dq_state_should_override(new_state)) { - return _dispatch_queue_wakeup_with_override(dq, new_state, flags); + return _dispatch_queue_wakeup_with_override(dq, new_state, + flags); } -#endif // HAVE_PTHREAD_WORKQUEUE_QOS } +#endif // HAVE_PTHREAD_WORKQUEUE_QOS done: if (likely(flags & DISPATCH_WAKEUP_CONSUME_2)) { return _dispatch_release_2_tailcall(dq); @@ -4994,6 +5161,10 @@ _dispatch_lane_push_waiter(dispatch_lane_t dq, dispatch_sync_context_t dsc, if (unlikely(_dispatch_queue_push_item(dq, dsc))) { if (unlikely(_dispatch_lane_push_waiter_should_wakeup(dq, dsc))) { + // If this returns true, we know that we are pushing onto the base + // queue + dsc->dc_flags &= ~DC_FLAG_ASYNC_AND_WAIT; + dsc->dc_other = dq; return dx_wakeup(dq, qos, DISPATCH_WAKEUP_MAKE_DIRTY); } @@ -5001,7 +5172,8 @@ _dispatch_lane_push_waiter(dispatch_lane_t dq, dispatch_sync_context_t dsc, (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL; uint64_t set_owner_and_set_full_width_and_in_barrier = _dispatch_lock_value_for_self() | - DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER; + DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER | + DISPATCH_QUEUE_UNCONTENDED_SYNC; os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { new_state = _dq_state_merge_qos(old_state, qos); new_state |= DISPATCH_QUEUE_DIRTY; @@ -5020,16 +5192,30 @@ _dispatch_lane_push_waiter(dispatch_lane_t dq, dispatch_sync_context_t dsc, } }); - if (_dq_state_is_base_wlh(old_state)) { + if (_dq_state_is_base_wlh(old_state) && dsc->dc_data != DISPATCH_WLH_ANON) { dsc->dsc_wlh_was_first = (dsc->dsc_waiter == _dispatch_tid_self()); } if ((old_state ^ new_state) & DISPATCH_QUEUE_IN_BARRIER) { + struct dispatch_object_s *dc = (struct dispatch_object_s *)dsc; + // We came here from __DISPATCH_WAIT_FOR_QUEUE__, if the element + // we pushed is still at the head, we can cheat, dequeue everything, + // and keep pretending we weren't contended. + if (dsc->dsc_wlh_was_first && dq->dq_items_head == dc) { + dsc->dsc_wlh_self_wakeup = true; + if (dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) { + // We're in case (2) of _dispatch_async_and_wait_f_slow() which expects + // dc_other to be the bottom queue of the graph + dsc->dc_other = dq; + } + _dispatch_queue_pop_head(dq, dc); + return; + } return _dispatch_lane_barrier_complete(dq, qos, 0); } #if HAVE_PTHREAD_WORKQUEUE_QOS if (unlikely((old_state ^ new_state) & DISPATCH_QUEUE_MAX_QOS_MASK)) { - if (_dq_state_should_override(new_state)) { + if (_dq_state_should_override_for_waiter(new_state)) { return _dispatch_queue_wakeup_with_override(dq, new_state, 0); } } @@ -5040,7 +5226,7 @@ _dispatch_lane_push_waiter(dispatch_lane_t dq, dispatch_sync_context_t dsc, os_atomic_rmw_loop_give_up(return); } }); - if (_dq_state_should_override(new_state)) { + if (_dq_state_should_override_for_waiter(new_state)) { return _dispatch_queue_wakeup_with_override(dq, new_state, 0); } #endif // HAVE_PTHREAD_WORKQUEUE_QOS @@ -5726,6 +5912,7 @@ _dispatch_queue_mgr_lock(struct dispatch_queue_static_s *dq) }); } +#if DISPATCH_USE_KEVENT_WORKQUEUE DISPATCH_ALWAYS_INLINE static inline bool _dispatch_queue_mgr_unlock(struct dispatch_queue_static_s *dq) @@ -5738,6 +5925,7 @@ _dispatch_queue_mgr_unlock(struct dispatch_queue_static_s *dq) }); return _dq_state_is_dirty(old_state); } +#endif // DISPATCH_USE_KEVENT_WORKQUEUE static void _dispatch_mgr_queue_drain(void) @@ -5954,6 +6142,21 @@ _dispatch_wlh_worker_thread_reset(void) } } +static inline os_workgroup_t +_dispatch_wlh_get_workgroup(dispatch_wlh_t wlh) +{ + os_workgroup_t wg = NULL; + dispatch_queue_t dq = (dispatch_queue_t) wlh; + if (wlh != DISPATCH_WLH_ANON && (dx_type(dq) == DISPATCH_WORKLOOP_TYPE)) { + dispatch_workloop_t dwl = (dispatch_workloop_t) dq; + if (dwl->dwl_attr) { + wg = dwl->dwl_attr->workgroup; + } + } + + return wg; +} + DISPATCH_ALWAYS_INLINE static void _dispatch_wlh_worker_thread(dispatch_wlh_t wlh, dispatch_kevent_t events, @@ -5969,6 +6172,13 @@ _dispatch_wlh_worker_thread(dispatch_wlh_t wlh, dispatch_kevent_t events, }; bool is_manager; + os_workgroup_t wg = _dispatch_wlh_get_workgroup(wlh); + os_workgroup_join_token_s join_token = {0}; + if (wg) { + int rv = os_workgroup_join(wg, &join_token); + dispatch_assert(rv == 0); + } + is_manager = _dispatch_wlh_worker_thread_init(&ddi); if (!is_manager) { _dispatch_trace_runtime_event(worker_event_delivery, @@ -6003,6 +6213,10 @@ _dispatch_wlh_worker_thread(dispatch_wlh_t wlh, dispatch_kevent_t events, } } + if (wg) { + os_workgroup_leave(wg, &join_token); + } + _dispatch_deferred_items_set(NULL); if (!is_manager && !ddi.ddi_stashed_dou._do) { _dispatch_perfmon_end(perfmon_thread_event_no_steal); @@ -6086,7 +6300,9 @@ static void _dispatch_root_queue_poke_slow(dispatch_queue_global_t dq, int n, int floor) { int remaining = n; +#if !defined(_WIN32) int r = ENOSYS; +#endif _dispatch_root_queues_init(); _dispatch_debug_root_queue(dq, __func__); @@ -6144,7 +6360,7 @@ _dispatch_root_queue_poke_slow(dispatch_queue_global_t dq, int n, int floor) "%p", dq); return; } - } while (!os_atomic_cmpxchgvw2o(dq, dgq_thread_pool_size, t_count, + } while (!os_atomic_cmpxchgv2o(dq, dgq_thread_pool_size, t_count, t_count - remaining, &t_count, acquire)); #if !defined(_WIN32) @@ -6184,9 +6400,11 @@ _dispatch_root_queue_poke_slow(dispatch_queue_global_t dq, int n, int floor) } _dispatch_temporary_resource_shortage(); } +#if DISPATCH_USE_PTHREAD_ROOT_QUEUES if (_dispatch_mgr_sched.prio > _dispatch_mgr_sched.default_prio) { (void)dispatch_assume_zero(SetThreadPriority((HANDLE)hThread, _dispatch_mgr_sched.prio) == TRUE); } +#endif CloseHandle((HANDLE)hThread); } while (--remaining); #endif // defined(_WIN32) @@ -6373,15 +6591,12 @@ _dispatch_root_queue_drain_deferred_wlh(dispatch_deferred_items_t ddi if (_dispatch_queue_drain_try_lock_wlh(dq, &dq_state)) { dx_invoke(dq, &dic, flags); -#if DISPATCH_USE_KEVENT_WORKLOOP // // dx_invoke() will always return `dq` unlocked or locked by another // thread, and either have consumed the +2 or transferred it to the // other thread. // -#endif if (!ddi->ddi_wlh_needs_delete) { -#if DISPATCH_USE_KEVENT_WORKLOOP // // The fate of the workloop thread request has already been dealt // with, which can happen for 4 reasons, for which we just want @@ -6391,10 +6606,8 @@ _dispatch_root_queue_drain_deferred_wlh(dispatch_deferred_items_t ddi // - the workloop has been re-enqueued on the manager queue // - the workloop ownership has been handed off to a sync owner // -#endif goto park; } -#if DISPATCH_USE_KEVENT_WORKLOOP // // The workloop has been drained to completion or suspended. // dx_invoke() has cleared the enqueued bit before it returned. @@ -6415,7 +6628,6 @@ _dispatch_root_queue_drain_deferred_wlh(dispatch_deferred_items_t ddi // Take over that +1, and add our own to make the +2 this loop expects, // and drain again. // -#endif // DISPATCH_USE_KEVENT_WORKLOOP dq_state = os_atomic_load2o(dq, dq_state, relaxed); if (unlikely(!_dq_state_is_base_wlh(dq_state))) { // rdar://32671286 goto park; @@ -6426,18 +6638,17 @@ _dispatch_root_queue_drain_deferred_wlh(dispatch_deferred_items_t ddi goto retry; } } else { -#if DISPATCH_USE_KEVENT_WORKLOOP // // The workloop enters this function with a +2 refcount, however we // couldn't acquire the lock due to suspension or discovering that // the workloop was locked by a sync owner. // // We need to give up, and _dispatch_event_loop_leave_deferred() - // will do a DISPATCH_WORKLOOP_ASYNC_DISCOVER_SYNC transition to + // will do a DISPATCH_WORKLOOP_SYNC_DISCOVER and + // a DISPATCH_WORKLOOP_ASYNC_QOS_UPDATE transition to // tell the kernel to stop driving this thread request. We leave // a +1 with the thread request, and consume the extra +1 we have. // -#endif if (_dq_state_is_suspended(dq_state)) { dispatch_assert(!_dq_state_is_enqueued(dq_state)); _dispatch_release_2_no_dispose(dq); @@ -6875,6 +7086,8 @@ _dispatch_runloop_handle_is_valid(dispatch_runloop_handle_t handle) return MACH_PORT_VALID(handle); #elif defined(__linux__) return handle >= 0; +#elif defined(_WIN32) + return handle != NULL; #else #error "runloop support not implemented on this platform" #endif @@ -6889,6 +7102,8 @@ _dispatch_runloop_queue_get_handle(dispatch_lane_t dq) #elif defined(__linux__) // decode: 0 is a valid fd, so offset by 1 to distinguish from NULL return ((dispatch_runloop_handle_t)(uintptr_t)dq->do_ctxt) - 1; +#elif defined(_WIN32) + return ((dispatch_runloop_handle_t)(uintptr_t)dq->do_ctxt); #else #error "runloop support not implemented on this platform" #endif @@ -6904,6 +7119,8 @@ _dispatch_runloop_queue_set_handle(dispatch_lane_t dq, #elif defined(__linux__) // encode: 0 is a valid fd, so offset by 1 to distinguish from NULL dq->do_ctxt = (void *)(uintptr_t)(handle + 1); +#elif defined(_WIN32) + dq->do_ctxt = (void *)(uintptr_t)handle; #else #error "runloop support not implemented on this platform" #endif @@ -6958,6 +7175,14 @@ _dispatch_runloop_queue_handle_init(void *ctxt) } } handle = fd; +#elif defined(_WIN32) + HANDLE hEvent; + hEvent = CreateEventW(NULL, /*bManualReset=*/TRUE, + /*bInitialState=*/FALSE, NULL); + if (hEvent == NULL) { + DISPATCH_INTERNAL_CRASH(GetLastError(), "CreateEventW"); + } + handle = hEvent; #else #error "runloop support not implemented on this platform" #endif @@ -6984,6 +7209,10 @@ _dispatch_runloop_queue_handle_dispose(dispatch_lane_t dq) #elif defined(__linux__) int rc = close(handle); (void)dispatch_assume_zero(rc); +#elif defined(_WIN32) + BOOL bSuccess; + bSuccess = CloseHandle(handle); + (void)dispatch_assume(bSuccess); #else #error "runloop support not implemented on this platform" #endif @@ -7016,6 +7245,10 @@ _dispatch_runloop_queue_class_poke(dispatch_lane_t dq) result = eventfd_write(handle, 1); } while (result == -1 && errno == EINTR); (void)dispatch_assume_zero(result); +#elif defined(_WIN32) + BOOL bSuccess; + bSuccess = SetEvent(handle); + (void)dispatch_assume(bSuccess); #else #error "runloop support not implemented on this platform" #endif @@ -7299,7 +7532,7 @@ _dispatch_runloop_root_queue_wakeup_4CF(dispatch_queue_t dq) _dispatch_runloop_queue_wakeup(upcast(dq)._dl, 0, false); } -#if TARGET_OS_MAC +#if TARGET_OS_MAC || defined(_WIN32) dispatch_runloop_handle_t _dispatch_runloop_root_queue_get_port_4CF(dispatch_queue_t dq) { @@ -7324,13 +7557,11 @@ _dispatch_get_main_queue_handle_4CF(void) return _dispatch_runloop_queue_get_handle(dq->_as_dl); } -#if TARGET_OS_MAC dispatch_runloop_handle_t _dispatch_get_main_queue_port_4CF(void) { return _dispatch_get_main_queue_handle_4CF(); } -#endif void _dispatch_main_queue_callback_4CF( @@ -7397,7 +7628,9 @@ _dispatch_sig_thread(void *ctxt DISPATCH_UNUSED) { // never returns, so burn bridges behind us _dispatch_clear_stack(0); -#if !defined(_WIN32) +#if defined(_WIN32) + Sleep(INFINITE); +#else _dispatch_sigsuspend(); #endif } @@ -7535,6 +7768,7 @@ _dispatch_context_cleanup(void *ctxt) DISPATCH_INTERNAL_CRASH(ctxt, "Premature thread exit while a dispatch context is set"); } + #pragma mark - #pragma mark dispatch_init @@ -7641,14 +7875,6 @@ libdispatch_init(void) if (_dispatch_getenv_bool("LIBDISPATCH_STRICT", false)) { _dispatch_mode |= DISPATCH_MODE_STRICT; } -#if HAVE_OS_FAULT_WITH_PAYLOAD && TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR - if (_dispatch_getenv_bool("LIBDISPATCH_NO_FAULTS", false)) { - _dispatch_mode |= DISPATCH_MODE_NO_FAULTS; - } else if (getpid() == 1 || - !os_variant_has_internal_diagnostics("com.apple.libdispatch")) { - _dispatch_mode |= DISPATCH_MODE_NO_FAULTS; - } -#endif // HAVE_OS_FAULT_WITH_PAYLOAD && TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR #if DISPATCH_DEBUG || DISPATCH_PROFILE @@ -7691,7 +7917,7 @@ libdispatch_init(void) _dispatch_thread_key_create(&dispatch_deferred_items_key, _dispatch_deferred_items_cleanup); #endif - + pthread_key_create(&_os_workgroup_key, _os_workgroup_tsd_cleanup); #if DISPATCH_USE_RESOLVERS // rdar://problem/8541707 _dispatch_main_q.do_targetq = _dispatch_get_default_queue(true); #endif @@ -7719,32 +7945,30 @@ libdispatch_init(void) #include #endif -#ifndef __ANDROID__ #ifdef SYS_gettid DISPATCH_ALWAYS_INLINE static inline pid_t -gettid(void) +_gettid(void) { return (pid_t)syscall(SYS_gettid); } #elif defined(__FreeBSD__) DISPATCH_ALWAYS_INLINE static inline pid_t -gettid(void) +_gettid(void) { return (pid_t)pthread_getthreadid_np(); } #elif defined(_WIN32) DISPATCH_ALWAYS_INLINE static inline DWORD -gettid(void) +_gettid(void) { return GetCurrentThreadId(); } #else #error "SYS_gettid unavailable on this system" #endif /* SYS_gettid */ -#endif /* ! __ANDROID__ */ #define _tsd_call_cleanup(k, f) do { \ if ((f) && tsd->k) ((void(*)(void*))(f))(tsd->k); \ @@ -7754,7 +7978,7 @@ gettid(void) static void (*_dispatch_thread_detach_callback)(void); void -_dispatch_install_thread_detach_callback(dispatch_function_t cb) +_dispatch_install_thread_detach_callback(void (*cb)(void)) { if (os_atomic_xchg(&_dispatch_thread_detach_callback, cb, relaxed)) { DISPATCH_CLIENT_CRASH(0, "Installing a thread detach callback twice"); @@ -7852,7 +8076,7 @@ libdispatch_tsd_init(void) #else FlsSetValue(__dispatch_tsd_key, &__dispatch_tsd); #endif // defined(_WIN32) - __dispatch_tsd.tid = gettid(); + __dispatch_tsd.tid = _gettid(); } #endif @@ -7886,7 +8110,7 @@ DISPATCH_NOINLINE void _dispatch_fork_becomes_unsafe_slow(void) { - uint8_t value = os_atomic_or(&_dispatch_unsafe_fork, + uint8_t value = (uint8_t)os_atomic_or(&_dispatch_unsafe_fork, _DISPATCH_UNSAFE_FORK_MULTITHREADED, relaxed); if (value & _DISPATCH_UNSAFE_FORK_PROHIBIT) { DISPATCH_CLIENT_CRASH(0, "Transition to multithreaded is prohibited"); @@ -7898,7 +8122,7 @@ void _dispatch_prohibit_transition_to_multithreaded(bool prohibit) { if (prohibit) { - uint8_t value = os_atomic_or(&_dispatch_unsafe_fork, + uint8_t value = (uint8_t)os_atomic_or(&_dispatch_unsafe_fork, _DISPATCH_UNSAFE_FORK_PROHIBIT, relaxed); if (value & _DISPATCH_UNSAFE_FORK_MULTITHREADED) { DISPATCH_CLIENT_CRASH(0, "The executable is already multithreaded"); diff --git a/src/queue_internal.h b/src/queue_internal.h index 713677301..d9425fcf6 100644 --- a/src/queue_internal.h +++ b/src/queue_internal.h @@ -318,7 +318,7 @@ DISPATCH_OPTIONS(dispatch_queue_flags, uint32_t, * * sw: has received sync wait (bit 35, if role DISPATCH_QUEUE_ROLE_BASE_WLH) * Set when a queue owner has been exposed to the kernel because of - * dispatch_sync() contention. + * contention with dispatch_sync(). */ #define DISPATCH_QUEUE_RECEIVED_OVERRIDE 0x0000000800000000ull #define DISPATCH_QUEUE_RECEIVED_SYNC_WAIT 0x0000000800000000ull @@ -334,14 +334,14 @@ DISPATCH_OPTIONS(dispatch_queue_flags, uint32_t, * drain stealers (like the QoS Override codepath). It holds the identity * (thread port) of the current drainer. * - * st: sync transfer (bit 1 or 30) - * Set when a dispatch_sync() is transferred to + * us: uncontended sync (bit 1 or 30) + * Set when a dispatch_sync() isn't contending * * e: enqueued bit (bit 0 or 31) * Set when a queue is enqueued on its target queue */ #define DISPATCH_QUEUE_DRAIN_OWNER_MASK ((uint64_t)DLOCK_OWNER_MASK) -#define DISPATCH_QUEUE_SYNC_TRANSFER ((uint64_t)DLOCK_FAILED_TRYLOCK_BIT) +#define DISPATCH_QUEUE_UNCONTENDED_SYNC ((uint64_t)DLOCK_FAILED_TRYLOCK_BIT) #define DISPATCH_QUEUE_ENQUEUED ((uint64_t)DLOCK_WAITERS_BIT) #define DISPATCH_QUEUE_DRAIN_PRESERVED_BITS_MASK \ @@ -350,7 +350,7 @@ DISPATCH_OPTIONS(dispatch_queue_flags, uint32_t, #define DISPATCH_QUEUE_DRAIN_UNLOCK_MASK \ (DISPATCH_QUEUE_DRAIN_OWNER_MASK | DISPATCH_QUEUE_RECEIVED_OVERRIDE | \ - DISPATCH_QUEUE_RECEIVED_SYNC_WAIT | DISPATCH_QUEUE_SYNC_TRANSFER) + DISPATCH_QUEUE_RECEIVED_SYNC_WAIT | DISPATCH_QUEUE_UNCONTENDED_SYNC) /* ******************************************************************************* @@ -467,12 +467,15 @@ typedef struct dispatch_workloop_attr_s *dispatch_workloop_attr_t; typedef struct dispatch_workloop_attr_s { uint32_t dwla_flags; dispatch_priority_t dwla_pri; +#if TARGET_OS_MAC struct sched_param dwla_sched; +#endif // TARGET_OS_MAC int dwla_policy; struct { uint8_t percent; uint32_t refillms; } dwla_cpupercent; + os_workgroup_t workgroup; dispatch_pthread_root_queue_observer_hooks_s dwla_observers; } dispatch_workloop_attr_s; @@ -962,10 +965,10 @@ dispatch_queue_attr_info_t _dispatch_queue_attr_to_info(dispatch_queue_attr_t); // If dc_flags is less than 0x1000, then the object is a continuation. // Otherwise, the object has a private layout and memory management rules. The // layout until after 'do_next' must align with normal objects. -#if __LP64__ +#if DISPATCH_SIZEOF_PTR == 8 #define DISPATCH_CONTINUATION_HEADER(x) \ union { \ - const void *do_vtable; \ + const void *__ptrauth_objc_isa_pointer do_vtable; \ uintptr_t dc_flags; \ }; \ union { \ @@ -989,7 +992,7 @@ dispatch_queue_attr_info_t _dispatch_queue_attr_to_info(dispatch_queue_attr_t); }; \ struct voucher_s *dc_voucher; \ union { \ - const void *do_vtable; \ + const void *__ptrauth_objc_isa_pointer do_vtable; \ uintptr_t dc_flags; \ }; \ struct dispatch_##x##_s *volatile do_next; \ @@ -999,7 +1002,7 @@ dispatch_queue_attr_info_t _dispatch_queue_attr_to_info(dispatch_queue_attr_t); #else #define DISPATCH_CONTINUATION_HEADER(x) \ union { \ - const void *do_vtable; \ + const void *__ptrauth_objc_isa_pointer do_vtable; \ uintptr_t dc_flags; \ }; \ union { \ @@ -1072,12 +1075,11 @@ typedef struct dispatch_sync_context_s { uint8_t dsc_override_qos; uint16_t dsc_autorelease : 2; uint16_t dsc_wlh_was_first : 1; + uint16_t dsc_wlh_self_wakeup : 1; uint16_t dsc_wlh_is_workloop : 1; uint16_t dsc_waiter_needs_cancel : 1; uint16_t dsc_release_storage : 1; -#if DISPATCH_INTROSPECTION uint16_t dsc_from_async : 1; -#endif } *dispatch_sync_context_t; typedef struct dispatch_continuation_vtable_s { diff --git a/src/semaphore.c b/src/semaphore.c index 8f1a25681..1d164f17f 100644 --- a/src/semaphore.c +++ b/src/semaphore.c @@ -21,13 +21,13 @@ #include "internal.h" DISPATCH_WEAK // rdar://problem/8503746 -long _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema); +intptr_t _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema); #pragma mark - #pragma mark dispatch_semaphore_t dispatch_semaphore_t -dispatch_semaphore_create(long value) +dispatch_semaphore_create(intptr_t value) { dispatch_semaphore_t dsema; @@ -76,12 +76,12 @@ _dispatch_semaphore_debug(dispatch_object_t dou, char *buf, size_t bufsiz) dsema->dsema_sema); #endif offset += dsnprintf(&buf[offset], bufsiz - offset, - "value = %ld, orig = %ld }", dsema->dsema_value, dsema->dsema_orig); + "value = %" PRIdPTR ", orig = %" PRIdPTR " }", dsema->dsema_value, dsema->dsema_orig); return offset; } DISPATCH_NOINLINE -long +intptr_t _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema) { _dispatch_sema4_create(&dsema->dsema_sema, _DSEMA4_POLICY_FIFO); @@ -89,7 +89,7 @@ _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema) return 1; } -long +intptr_t dispatch_semaphore_signal(dispatch_semaphore_t dsema) { long value = os_atomic_inc2o(dsema, dsema_value, release); @@ -104,7 +104,7 @@ dispatch_semaphore_signal(dispatch_semaphore_t dsema) } DISPATCH_NOINLINE -static long +static intptr_t _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout) { @@ -121,7 +121,7 @@ _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, case DISPATCH_TIME_NOW: orig = dsema->dsema_value; while (orig < 0) { - if (os_atomic_cmpxchgvw2o(dsema, dsema_value, orig, orig + 1, + if (os_atomic_cmpxchgv2o(dsema, dsema_value, orig, orig + 1, &orig, relaxed)) { return _DSEMA4_TIMEOUT(); } @@ -135,7 +135,7 @@ _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, return 0; } -long +intptr_t dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout) { long value = os_atomic_dec2o(dsema, dsema_value, acquire); @@ -198,7 +198,7 @@ _dispatch_group_debug(dispatch_object_t dou, char *buf, size_t bufsiz) _dispatch_object_class_name(dg), dg); offset += _dispatch_object_debug_attr(dg, &buf[offset], bufsiz - offset); offset += dsnprintf(&buf[offset], bufsiz - offset, - "count = %d, gen = %d, waiters = %d, notifs = %d }", + "count = %u, gen = %d, waiters = %d, notifs = %d }", _dg_state_value(dg_state), _dg_state_gen(dg_state), (bool)(dg_state & DISPATCH_GROUP_HAS_WAITERS), (bool)(dg_state & DISPATCH_GROUP_HAS_NOTIFS)); @@ -206,7 +206,7 @@ _dispatch_group_debug(dispatch_object_t dou, char *buf, size_t bufsiz) } DISPATCH_NOINLINE -static long +static intptr_t _dispatch_group_wait_slow(dispatch_group_t dg, uint32_t gen, dispatch_time_t timeout) { @@ -221,7 +221,7 @@ _dispatch_group_wait_slow(dispatch_group_t dg, uint32_t gen, } } -long +intptr_t dispatch_group_wait(dispatch_group_t dg, dispatch_time_t timeout) { uint64_t old_state, new_state; diff --git a/src/semaphore_internal.h b/src/semaphore_internal.h index 850792df5..b9b6c7bf2 100644 --- a/src/semaphore_internal.h +++ b/src/semaphore_internal.h @@ -32,8 +32,8 @@ struct dispatch_queue_s; DISPATCH_CLASS_DECL(semaphore, OBJECT); struct dispatch_semaphore_s { DISPATCH_OBJECT_HEADER(semaphore); - long volatile dsema_value; - long dsema_orig; + intptr_t volatile dsema_value; + intptr_t dsema_orig; _dispatch_sema4_t dsema_sema; }; diff --git a/src/shims.h b/src/shims.h index ea5e09812..e2377bc7c 100644 --- a/src/shims.h +++ b/src/shims.h @@ -31,9 +31,12 @@ #include #else // defined(_WIN32) #include "shims/generic_win_stubs.h" -#include "shims/generic_sys_queue.h" #endif // defined(_WIN32) +#if defined(_WIN32) || defined(__linux__) +#include "shims/generic_sys_queue.h" +#endif + #ifdef __ANDROID__ #include "shims/android_stubs.h" #endif // __ANDROID__ @@ -62,10 +65,6 @@ #include #endif -#if __has_include() -#include -#endif - #if !HAVE_DECL_FD_COPY #define FD_COPY(f, t) (void)(*(t) = *(f)) #endif @@ -151,10 +150,7 @@ _pthread_workqueue_should_narrow(pthread_priority_t priority) } #endif -#if HAVE_PTHREAD_QOS_H && __has_include() && \ - defined(PTHREAD_MAX_PARALLELISM_PHYSICAL) && \ - DISPATCH_HAVE_HW_CONFIG_COMMPAGE && \ - DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101300) +#if HAVE_PTHREAD_QOS_H && __has_include() #define DISPATCH_USE_PTHREAD_QOS_MAX_PARALLELISM 1 #define DISPATCH_MAX_PARALLELISM_PHYSICAL PTHREAD_MAX_PARALLELISM_PHYSICAL #else @@ -213,9 +209,7 @@ void __builtin_trap(void); #endif -#ifndef __OS_INTERNAL_ATOMIC__ #include "shims/atomic.h" -#endif #define DISPATCH_ATOMIC64_ALIGN __attribute__((aligned(8))) #include "shims/atomic_sfb.h" diff --git a/src/shims/atomic.h b/src/shims/atomic.h index 0bb27d3de..44af102eb 100644 --- a/src/shims/atomic.h +++ b/src/shims/atomic.h @@ -32,22 +32,58 @@ #endif // FreeBSD only defines _Bool in C mode. In C++ mode _Bool is not being defined. -#if defined(__cplusplus) && (defined(__FreeBSD__) || defined(_WIN32)) +#if defined(__cplusplus) #define _Bool bool #endif -#include - -#define memory_order_ordered memory_order_seq_cst -#define memory_order_dependency memory_order_acquire -#define os_atomic(type) type _Atomic +#ifndef os_atomic +#define os_atomic(type) type _Atomic volatile +#endif +#ifndef _os_atomic_c11_atomic #define _os_atomic_c11_atomic(p) \ ((__typeof__(*(p)) _Atomic *)(p)) +#endif // This removes the _Atomic and volatile qualifiers on the type of *p +#ifndef _os_atomic_basetypeof #define _os_atomic_basetypeof(p) \ __typeof__(atomic_load_explicit(_os_atomic_c11_atomic(p), memory_order_relaxed)) +#endif + +#if __has_include() +#include + +#ifndef __LP64__ +// libdispatch has too many Double-Wide loads for this to be practical +// so just rename everything to the wide variants +#undef os_atomic_load +#define os_atomic_load os_atomic_load_wide + +#undef os_atomic_store +#define os_atomic_store os_atomic_store_wide +#endif + +#if defined(__arm__) || defined(__arm64__) +#define memory_order_ordered memory_order_relaxed +#define memory_order_ordered_smp memory_order_relaxed +#define _os_atomic_mo_ordered memory_order_relaxed +#define _os_atomic_mo_ordered_smp memory_order_relaxed +#else +#define memory_order_ordered memory_order_seq_cst +#define memory_order_ordered_smp memory_order_seq_cst +#define _os_atomic_mo_ordered memory_order_seq_cst +#define _os_atomic_mo_ordered_smp memory_order_seq_cst +#endif + +#define _os_rel_barrier_ordered memory_order_release +#define _os_acq_barrier_ordered memory_order_acquire + +#else // __has_include() +#include + +#define memory_order_ordered memory_order_seq_cst +#define memory_order_dependency memory_order_acquire #define os_atomic_load(p, m) \ atomic_load_explicit(_os_atomic_c11_atomic(p), memory_order_##m) @@ -96,14 +132,41 @@ #define os_atomic_xor_orig(p, v, m) \ _os_atomic_c11_op_orig((p), (v), m, xor, ^) -#define os_atomic_force_dependency_on(p, e) (p) +typedef struct { unsigned long __opaque_zero; } os_atomic_dependency_t; + +#define OS_ATOMIC_DEPENDENCY_NONE ((os_atomic_dependency_t){ 0UL }) +#define os_atomic_make_dependency(v) ((void)(v), OS_ATOMIC_DEPENDENCY_NONE) +#define os_atomic_inject_dependency(p, e) \ + ((typeof(*(p)) *)((p) + _os_atomic_auto_dependency(e).__opaque_zero)) #define os_atomic_load_with_dependency_on(p, e) \ - os_atomic_load(os_atomic_force_dependency_on(p, e), relaxed) -#define os_atomic_load_with_dependency_on2o(p, f, e) \ - os_atomic_load_with_dependency_on(&(p)->f, e) + os_atomic_load(os_atomic_inject_dependency(p, e), dependency) #define os_atomic_thread_fence(m) atomic_thread_fence(memory_order_##m) +#define os_atomic_inc(p, m) \ + os_atomic_add((p), 1, m) +#define os_atomic_inc_orig(p, m) \ + os_atomic_add_orig((p), 1, m) +#define os_atomic_dec(p, m) \ + os_atomic_sub((p), 1, m) +#define os_atomic_dec_orig(p, m) \ + os_atomic_sub_orig((p), 1, m) + +#define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \ + bool _result = false; \ + __typeof__(p) _p = (p); \ + ov = os_atomic_load(_p, relaxed); \ + do { \ + __VA_ARGS__; \ + _result = os_atomic_cmpxchgvw(_p, ov, nv, &ov, m); \ + } while (unlikely(!_result)); \ + _result; \ + }) +#define os_atomic_rmw_loop_give_up(expr) \ + os_atomic_rmw_loop_give_up_with_fence(relaxed, expr) + +#endif // !__has_include() + #define os_atomic_load2o(p, f, m) \ os_atomic_load(&(p)->f, m) #define os_atomic_store2o(p, f, v, m) \ @@ -114,8 +177,6 @@ os_atomic_cmpxchg(&(p)->f, (e), (v), m) #define os_atomic_cmpxchgv2o(p, f, e, v, g, m) \ os_atomic_cmpxchgv(&(p)->f, (e), (v), (g), m) -#define os_atomic_cmpxchgvw2o(p, f, e, v, g, m) \ - os_atomic_cmpxchgvw(&(p)->f, (e), (v), (g), m) #define os_atomic_add2o(p, f, v, m) \ os_atomic_add(&(p)->f, (v), m) #define os_atomic_add_orig2o(p, f, v, m) \ @@ -137,38 +198,22 @@ #define os_atomic_xor_orig2o(p, f, v, m) \ os_atomic_xor_orig(&(p)->f, (v), m) -#define os_atomic_inc(p, m) \ - os_atomic_add((p), 1, m) -#define os_atomic_inc_orig(p, m) \ - os_atomic_add_orig((p), 1, m) +#define os_atomic_load_with_dependency_on2o(p, f, e) \ + os_atomic_load_with_dependency_on(&(p)->f, e) + #define os_atomic_inc2o(p, f, m) \ os_atomic_add2o(p, f, 1, m) #define os_atomic_inc_orig2o(p, f, m) \ os_atomic_add_orig2o(p, f, 1, m) -#define os_atomic_dec(p, m) \ - os_atomic_sub((p), 1, m) -#define os_atomic_dec_orig(p, m) \ - os_atomic_sub_orig((p), 1, m) #define os_atomic_dec2o(p, f, m) \ os_atomic_sub2o(p, f, 1, m) #define os_atomic_dec_orig2o(p, f, m) \ os_atomic_sub_orig2o(p, f, 1, m) -#define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \ - bool _result = false; \ - __typeof__(p) _p = (p); \ - ov = os_atomic_load(_p, relaxed); \ - do { \ - __VA_ARGS__; \ - _result = os_atomic_cmpxchgvw(_p, ov, nv, &ov, m); \ - } while (unlikely(!_result)); \ - _result; \ - }) #define os_atomic_rmw_loop2o(p, f, ov, nv, m, ...) \ os_atomic_rmw_loop(&(p)->f, ov, nv, m, __VA_ARGS__) + #define os_atomic_rmw_loop_give_up_with_fence(m, expr) \ ({ os_atomic_thread_fence(m); expr; __builtin_unreachable(); }) -#define os_atomic_rmw_loop_give_up(expr) \ - os_atomic_rmw_loop_give_up_with_fence(relaxed, expr) #endif // __DISPATCH_SHIMS_ATOMIC__ diff --git a/src/shims/generic_sys_queue.h b/src/shims/generic_sys_queue.h index 1d9a18d0f..b1edeb2f2 100644 --- a/src/shims/generic_sys_queue.h +++ b/src/shims/generic_sys_queue.h @@ -89,4 +89,62 @@ } \ } while(0) +#define TAILQ_HEAD_INITIALIZER(head) \ + { NULL, (head).tq_first } + +#define TAILQ_CONCAT(head1, head2, field) do { \ + if (!TAILQ_EMPTY(head2)) { \ + if ((head1)->tq_last) { \ + (head1)->tq_last->field.te_next = (head2)->tq_first; \ + } else { \ + (head1)->tq_first = (head2)->tq_first; \ + } \ + (head2)->tq_first->field.te_prev = (head1)->tq_last; \ + (head1)->tq_last = (head2)->tq_last; \ + TAILQ_INIT((head2)); \ + } \ + } while (0) + +#define LIST_HEAD(name, type) struct name { \ + struct type *lh_first; \ + } + +#define LIST_ENTRY(type) struct { \ + struct type *le_next; \ + struct type **le_prev; \ + } + +#define LIST_EMPTY(head) ((head)->lh_first == NULL) + +#define LIST_FIRST(head) ((head)->lh_first) + +#define LIST_FOREACH(var, head, field) \ + for ((var) = LIST_FIRST((head)); \ + (var); \ + (var) = LIST_NEXT((var), field)) + +#define LIST_FOREACH_SAFE(var, head, field, tvar) \ + for ((var) = LIST_FIRST((head)); \ + (var) && ((tvar) = LIST_NEXT((var), field), 1); \ + (var) = (tvar)) + +#define LIST_INIT(head) do { \ + LIST_FIRST((head)) = NULL; \ +} while (0) + +#define LIST_NEXT(elm, field) ((elm)->field.le_next) + +#define LIST_REMOVE(elm, field) do { \ + if (LIST_NEXT((elm), field) != NULL) \ + LIST_NEXT((elm), field)->field.le_prev = (elm)->field.le_prev; \ + *(elm)->field.le_prev = LIST_NEXT((elm), field); \ + } while (0) + +#define LIST_INSERT_HEAD(head, elm, field) do { \ + if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \ + LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field); \ + LIST_FIRST((head)) = (elm); \ + (elm)->field.le_prev = &LIST_FIRST((head)); \ + } while (0) + #endif // __DISPATCH_SHIMS_SYS_QUEUE__ diff --git a/src/shims/generic_win_stubs.c b/src/shims/generic_win_stubs.c index 67b6f5134..7781673a4 100644 --- a/src/shims/generic_win_stubs.c +++ b/src/shims/generic_win_stubs.c @@ -1,24 +1,84 @@ #include "internal.h" -/* - * This file contains stubbed out functions we are using during - * the initial Windows port. When the port is complete, this file - * should be empty (and thus removed). - */ +typedef void (WINAPI *_precise_time_fn_t)(PULONGLONG); + +DISPATCH_STATIC_GLOBAL(dispatch_once_t _dispatch_precise_time_pred); +DISPATCH_STATIC_GLOBAL(_precise_time_fn_t _dispatch_QueryInterruptTimePrecise_ptr); +DISPATCH_STATIC_GLOBAL(_precise_time_fn_t _dispatch_QueryUnbiasedInterruptTimePrecise_ptr); + +typedef NTSTATUS (NTAPI *_NtQueryInformationFile_fn_t)(HANDLE FileHandle, + PIO_STATUS_BLOCK IoStatusBlock, PVOID FileInformation, ULONG Length, + FILE_INFORMATION_CLASS FileInformationClass); + +DISPATCH_STATIC_GLOBAL(dispatch_once_t _dispatch_ntdll_pred); +DISPATCH_STATIC_GLOBAL(_NtQueryInformationFile_fn_t _dispatch_NtQueryInformationFile_ptr); + +bool +_dispatch_handle_is_socket(HANDLE hFile) +{ + // GetFileType() returns FILE_TYPE_PIPE for both pipes and sockets. We can + // disambiguate by checking if PeekNamedPipe() fails with + // ERROR_INVALID_FUNCTION. + if (GetFileType(hFile) == FILE_TYPE_PIPE && + !PeekNamedPipe(hFile, NULL, 0, NULL, NULL, NULL)) { + return GetLastError() == ERROR_INVALID_FUNCTION; + } + return false; +} + +static void +_dispatch_init_precise_time(void *context DISPATCH_UNUSED) +{ + HMODULE kernelbase = LoadLibraryW(L"KernelBase.dll"); + if (!kernelbase) { + DISPATCH_INTERNAL_CRASH(0, "failed to load KernelBase.dll"); + } + _dispatch_QueryInterruptTimePrecise_ptr = (_precise_time_fn_t) + GetProcAddress(kernelbase, "QueryInterruptTimePrecise"); + _dispatch_QueryUnbiasedInterruptTimePrecise_ptr = (_precise_time_fn_t) + GetProcAddress(kernelbase, "QueryUnbiasedInterruptTimePrecise"); + if (!_dispatch_QueryInterruptTimePrecise_ptr) { + DISPATCH_INTERNAL_CRASH(0, "could not locate QueryInterruptTimePrecise"); + } + if (!_dispatch_QueryUnbiasedInterruptTimePrecise_ptr) { + DISPATCH_INTERNAL_CRASH(0, "could not locate QueryUnbiasedInterruptTimePrecise"); + } +} void -_dispatch_runloop_queue_dispose(dispatch_queue_t dq DISPATCH_UNUSED, - bool *allow_free DISPATCH_UNUSED) +_dispatch_QueryInterruptTimePrecise(PULONGLONG lpInterruptTimePrecise) { - WIN_PORT_ERROR(); + dispatch_once_f(&_dispatch_precise_time_pred, NULL, _dispatch_init_precise_time); + return _dispatch_QueryInterruptTimePrecise_ptr(lpInterruptTimePrecise); } void -_dispatch_runloop_queue_xref_dispose(dispatch_queue_t dq DISPATCH_UNUSED) +_dispatch_QueryUnbiasedInterruptTimePrecise(PULONGLONG lpUnbiasedInterruptTimePrecise) { - WIN_PORT_ERROR(); + dispatch_once_f(&_dispatch_precise_time_pred, NULL, _dispatch_init_precise_time); + return _dispatch_QueryUnbiasedInterruptTimePrecise_ptr(lpUnbiasedInterruptTimePrecise); } -/* - * Stubbed out static data - */ +static void +_dispatch_init_ntdll(void *context DISPATCH_UNUSED) +{ + HMODULE ntdll = LoadLibraryW(L"ntdll.dll"); + if (!ntdll) { + // ntdll is not required. + return; + } + _dispatch_NtQueryInformationFile_ptr = (_NtQueryInformationFile_fn_t) + GetProcAddress(ntdll, "NtQueryInformationFile"); +} + +NTSTATUS _dispatch_NtQueryInformationFile(HANDLE FileHandle, + PIO_STATUS_BLOCK IoStatusBlock, PVOID FileInformation, ULONG Length, + FILE_INFORMATION_CLASS FileInformationClass) +{ + dispatch_once_f(&_dispatch_ntdll_pred, NULL, _dispatch_init_ntdll); + if (!_dispatch_NtQueryInformationFile_ptr) { + return STATUS_NOT_SUPPORTED; + } + return _dispatch_NtQueryInformationFile_ptr(FileHandle, IoStatusBlock, + FileInformation, Length, FileInformationClass); +} diff --git a/src/shims/generic_win_stubs.h b/src/shims/generic_win_stubs.h index c983cdcee..985bbe30b 100644 --- a/src/shims/generic_win_stubs.h +++ b/src/shims/generic_win_stubs.h @@ -4,8 +4,12 @@ #include +#include #include #include +#include +#include +#include #include #include @@ -34,4 +38,36 @@ typedef __typeof__(_Generic((__SIZE_TYPE__)0, \ #define WIN_PORT_ERROR() \ _RPTF1(_CRT_ASSERT, "WIN_PORT_ERROR in %s", __FUNCTION__) +#define strcasecmp _stricmp + +bool _dispatch_handle_is_socket(HANDLE hFile); + +/* + * Wrappers for dynamically loaded Windows APIs + */ + +void _dispatch_QueryInterruptTimePrecise(PULONGLONG lpInterruptTimePrecise); +void _dispatch_QueryUnbiasedInterruptTimePrecise(PULONGLONG lpUnbiasedInterruptTimePrecise); + +enum { + FilePipeLocalInformation = 24, +}; + +typedef struct _FILE_PIPE_LOCAL_INFORMATION { + ULONG NamedPipeType; + ULONG NamedPipeConfiguration; + ULONG MaximumInstances; + ULONG CurrentInstances; + ULONG InboundQuota; + ULONG ReadDataAvailable; + ULONG OutboundQuota; + ULONG WriteQuotaAvailable; + ULONG NamedPipeState; + ULONG NamedPipeEnd; +} FILE_PIPE_LOCAL_INFORMATION, *PFILE_PIPE_LOCAL_INFORMATION; + +NTSTATUS _dispatch_NtQueryInformationFile(HANDLE FileHandle, + PIO_STATUS_BLOCK IoStatusBlock, PVOID FileInformation, ULONG Length, + FILE_INFORMATION_CLASS FileInformationClass); + #endif diff --git a/src/shims/lock.c b/src/shims/lock.c index f0e493796..e96408981 100644 --- a/src/shims/lock.c +++ b/src/shims/lock.c @@ -193,7 +193,10 @@ _dispatch_sema4_signal(_dispatch_sema4_t *sema, long count) void _dispatch_sema4_wait(_dispatch_sema4_t *sema) { - int ret = sem_wait(sema); + int ret = 0; + do { + ret = sem_wait(sema); + } while (ret == -1 && errno == EINTR); DISPATCH_SEMAPHORE_VERIFY_RET(ret); } @@ -257,8 +260,7 @@ _pop_timer_resolution(DWORD ms) if (ms) timeEndPeriod(ms); } -void -_dispatch_sema4_create_slow(_dispatch_sema4_t *s4, int policy DISPATCH_UNUSED) +void _dispatch_sema4_init(_dispatch_sema4_t *sema, int policy DISPATCH_UNUSED) { HANDLE tmp; @@ -268,7 +270,7 @@ _dispatch_sema4_create_slow(_dispatch_sema4_t *s4, int policy DISPATCH_UNUSED) _dispatch_temporary_resource_shortage(); } - if (!os_atomic_cmpxchg(s4, 0, tmp, relaxed)) { + if (!os_atomic_cmpxchg(sema, 0, tmp, relaxed)) { CloseHandle(tmp); } } @@ -305,7 +307,7 @@ _dispatch_sema4_timedwait(_dispatch_sema4_t *sema, dispatch_time_t timeout) nsec = _dispatch_timeout(timeout); msec = (DWORD)(nsec / (uint64_t)1000000); resolution = _push_timer_resolution(msec); - wait_result = WaitForSingleObject(sema, msec); + wait_result = WaitForSingleObject(*sema, msec); _pop_timer_resolution(resolution); return wait_result == WAIT_TIMEOUT; } @@ -405,48 +407,69 @@ _dispatch_futex(uint32_t *uaddr, int op, uint32_t val, return (int)syscall(SYS_futex, uaddr, op | opflags, val, timeout, uaddr2, val3); } +// returns 0, ETIMEDOUT, EFAULT, EINTR, EWOULDBLOCK +DISPATCH_ALWAYS_INLINE +static inline int +_futex_blocking_op(uint32_t *uaddr, int futex_op, uint32_t val, + const struct timespec *timeout, int flags) +{ + for (;;) { + int rc = _dispatch_futex(uaddr, futex_op, val, timeout, NULL, 0, flags); + if (!rc) { + return 0; + } + switch (errno) { + case EINTR: + /* + * if we have a timeout, we need to return for the caller to + * recompute the new deadline, else just go back to wait. + */ + if (timeout == 0) { + continue; + } + /* FALLTHROUGH */ + case ETIMEDOUT: + case EFAULT: + case EWOULDBLOCK: + return errno; + default: + DISPATCH_INTERNAL_CRASH(errno, "_futex_op() failed"); + } + } +} + static int _dispatch_futex_wait(uint32_t *uaddr, uint32_t val, const struct timespec *timeout, int opflags) { - _dlock_syscall_switch(err, - _dispatch_futex(uaddr, FUTEX_WAIT, val, timeout, NULL, 0, opflags), - case 0: case EWOULDBLOCK: case ETIMEDOUT: return err; - default: DISPATCH_CLIENT_CRASH(err, "futex_wait() failed"); - ); + return _futex_blocking_op(uaddr, FUTEX_WAIT, val, timeout, opflags); } static void _dispatch_futex_wake(uint32_t *uaddr, int wake, int opflags) { - int rc; - _dlock_syscall_switch(err, - rc = _dispatch_futex(uaddr, FUTEX_WAKE, (uint32_t)wake, NULL, NULL, 0, opflags), - case 0: return; - default: DISPATCH_CLIENT_CRASH(err, "futex_wake() failed"); - ); + int rc = _dispatch_futex(uaddr, FUTEX_WAKE, (uint32_t)wake, NULL, NULL, 0, + opflags); + if (rc >= 0 || errno == ENOENT) return; + DISPATCH_INTERNAL_CRASH(errno, "_dlock_wake() failed"); } static void _dispatch_futex_lock_pi(uint32_t *uaddr, struct timespec *timeout, int detect, int opflags) { - _dlock_syscall_switch(err, - _dispatch_futex(uaddr, FUTEX_LOCK_PI, (uint32_t)detect, timeout, - NULL, 0, opflags), - case 0: return; - default: DISPATCH_CLIENT_CRASH(errno, "futex_lock_pi() failed"); - ); + int err = _futex_blocking_op(uaddr, FUTEX_LOCK_PI, (uint32_t)detect, + timeout, opflags); + if (err == 0) return; + DISPATCH_CLIENT_CRASH(err, "futex_lock_pi() failed"); } static void _dispatch_futex_unlock_pi(uint32_t *uaddr, int opflags) { - _dlock_syscall_switch(err, - _dispatch_futex(uaddr, FUTEX_UNLOCK_PI, 0, NULL, NULL, 0, opflags), - case 0: return; - default: DISPATCH_CLIENT_CRASH(errno, "futex_unlock_pi() failed"); - ); + int rc = _dispatch_futex(uaddr, FUTEX_UNLOCK_PI, 0, NULL, NULL, 0, opflags); + if (rc == 0) return; + DISPATCH_CLIENT_CRASH(errno, "futex_unlock_pi() failed"); } #endif @@ -475,16 +498,17 @@ _dispatch_wait_on_address(uint32_t volatile *_address, uint32_t value, (nsecs = _dispatch_timeout(timeout)) != 0); return rc; #elif HAVE_FUTEX + (void)flags; if (nsecs != DISPATCH_TIME_FOREVER) { struct timespec ts = { - .tv_sec = (__typeof__(ts.tv_sec))(nsec / NSEC_PER_SEC), - .tv_nsec = (__typeof__(ts.tv_nsec))(nsec % NSEC_PER_SEC), + .tv_sec = (__typeof__(ts.tv_sec))(nsecs / NSEC_PER_SEC), + .tv_nsec = (__typeof__(ts.tv_nsec))(nsecs % NSEC_PER_SEC), }; return _dispatch_futex_wait(address, value, &ts, FUTEX_PRIVATE_FLAG); } return _dispatch_futex_wait(address, value, NULL, FUTEX_PRIVATE_FLAG); #elif defined(_WIN32) - WaitOnAddress(address, (PVOID)(uintptr_t)value, sizeof(value), INFINITE); + return WaitOnAddress(address, &value, sizeof(value), INFINITE) == TRUE; #else #error _dispatch_wait_on_address unimplemented for this platform #endif @@ -625,7 +649,9 @@ _dispatch_once_wait(dispatch_once_gate_t dgo) { dispatch_lock self = _dispatch_lock_value_for_self(); uintptr_t old_v, new_v; +#if HAVE_UL_UNFAIR_LOCK || HAVE_FUTEX dispatch_lock *lock = &dgo->dgo_gate.dgl_lock; +#endif uint32_t timeout = 1; for (;;) { @@ -654,7 +680,7 @@ _dispatch_once_wait(dispatch_once_gate_t dgo) _dispatch_futex_wait(lock, (dispatch_lock)new_v, NULL, FUTEX_PRIVATE_FLAG); #else - _dispatch_thread_switch(new_v, flags, timeout++); + _dispatch_thread_switch(new_v, 0, timeout++); #endif (void)timeout; } diff --git a/src/shims/lock.h b/src/shims/lock.h index f50e5913d..a05dd1152 100644 --- a/src/shims/lock.h +++ b/src/shims/lock.h @@ -213,9 +213,9 @@ typedef HANDLE _dispatch_sema4_t; #define _DSEMA4_POLICY_LIFO 0 #define _DSEMA4_TIMEOUT() ((errno) = ETIMEDOUT, -1) -#define _dispatch_sema4_init(sema, policy) (void)(*(sema) = 0) -#define _dispatch_sema4_is_created(sema) (*(sema) != 0) -void _dispatch_sema4_create_slow(_dispatch_sema4_t *sema, int policy); +void _dispatch_sema4_init(_dispatch_sema4_t *sema, int policy); +#define _dispatch_sema4_is_created(sema) ((void)sema, 1) +#define _dispatch_sema4_create_slow(sema, policy) ((void)sema, (void)policy) #else #error "port has to implement _dispatch_sema4_t" @@ -301,7 +301,7 @@ static inline void _dispatch_thread_event_signal(dispatch_thread_event_t dte) { #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX - if (os_atomic_inc_orig(&dte->dte_value, release) == 0) { + if (os_atomic_add_orig(&dte->dte_value, 1u, release) == 0) { // 0 -> 1 transition doesn't need a signal // force a wake even when the value is corrupt, // waiters do the validation @@ -319,7 +319,7 @@ static inline void _dispatch_thread_event_wait(dispatch_thread_event_t dte) { #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX - if (os_atomic_dec(&dte->dte_value, acquire) == 0) { + if (os_atomic_sub(&dte->dte_value, 1u, acquire) == 0) { // 1 -> 0 is always a valid transition, so we can return // for any other value, take the slow path which checks it's not corrupt return; diff --git a/src/shims/priority.h b/src/shims/priority.h index 56ea5ce09..3a79c5efb 100644 --- a/src/shims/priority.h +++ b/src/shims/priority.h @@ -70,6 +70,10 @@ typedef unsigned long pthread_priority_t; #endif // HAVE_PTHREAD_QOS_H +#if !defined(POLICY_RR) && defined(SCHED_RR) +#define POLICY_RR SCHED_RR +#endif // !defined(POLICY_RR) && defined(SCHED_RR) + typedef uint32_t dispatch_qos_t; typedef uint32_t dispatch_priority_t; @@ -85,7 +89,8 @@ typedef uint32_t dispatch_priority_t; #define DISPATCH_QOS_SATURATED ((dispatch_qos_t)15) #define DISPATCH_QOS_NBUCKETS (DISPATCH_QOS_MAX - DISPATCH_QOS_MIN + 1) -#define DISPATCH_QOS_BUCKET(qos) ((qos) - DISPATCH_QOS_MIN) +#define DISPATCH_QOS_BUCKET(qos) ((int)((qos) - DISPATCH_QOS_MIN)) +#define DISPATCH_QOS_FOR_BUCKET(bucket) ((dispatch_qos_t)((uint32_t)bucket + DISPATCH_QOS_MIN)) #define DISPATCH_PRIORITY_RELPRI_MASK ((dispatch_priority_t)0x000000ff) #define DISPATCH_PRIORITY_RELPRI_SHIFT 0 @@ -165,7 +170,7 @@ _dispatch_qos_to_qos_class(dispatch_qos_t qos) DISPATCH_ALWAYS_INLINE static inline dispatch_qos_t -_dispatch_qos_from_queue_priority(long priority) +_dispatch_qos_from_queue_priority(intptr_t priority) { switch (priority) { case DISPATCH_QUEUE_PRIORITY_BACKGROUND: return DISPATCH_QOS_BACKGROUND; diff --git a/src/shims/target.h b/src/shims/target.h index a59dd3c3b..425279b19 100644 --- a/src/shims/target.h +++ b/src/shims/target.h @@ -38,15 +38,15 @@ #if TARGET_OS_OSX # define DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(x) \ (__MAC_OS_X_VERSION_MIN_REQUIRED >= (x)) -# if !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200) -# error "OS X hosts older than OS X 10.12 aren't supported anymore" -# endif // !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200) +# if !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101400) +# error "OS X hosts older than OS X 10.14 aren't supported anymore" +# endif // !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101400) #elif TARGET_OS_SIMULATOR # define DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(x) \ (IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED >= (x)) -# if !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200) -# error "Simulator hosts older than OS X 10.12 aren't supported anymore" -# endif // !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101200) +# if !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101400) +# error "Simulator hosts older than OS X 10.14 aren't supported anymore" +# endif // !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101400) #else # define DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(x) 1 # if !TARGET_OS_DRIVERKIT && __IPHONE_OS_VERSION_MIN_REQUIRED < 90000 diff --git a/src/shims/time.h b/src/shims/time.h index 063d52397..851b819c4 100644 --- a/src/shims/time.h +++ b/src/shims/time.h @@ -108,13 +108,14 @@ _dispatch_get_nanoseconds(void) dispatch_assume_zero(clock_gettime(CLOCK_REALTIME, &ts)); return _dispatch_timespec_to_nano(ts); #elif defined(_WIN32) + static const uint64_t kNTToUNIXBiasAdjustment = 11644473600 * NSEC_PER_SEC; // FILETIME is 100-nanosecond intervals since January 1, 1601 (UTC). FILETIME ft; ULARGE_INTEGER li; - GetSystemTimeAsFileTime(&ft); + GetSystemTimePreciseAsFileTime(&ft); li.LowPart = ft.dwLowDateTime; li.HighPart = ft.dwHighDateTime; - return li.QuadPart * 100ull; + return li.QuadPart * 100ull - kNTToUNIXBiasAdjustment; #else struct timeval tv; dispatch_assert_zero(gettimeofday(&tv, NULL)); @@ -148,9 +149,10 @@ _dispatch_uptime(void) struct timespec ts; dispatch_assume_zero(clock_gettime(CLOCK_UPTIME, &ts)); return _dispatch_timespec_to_nano(ts); -#elif TARGET_OS_WIN32 - LARGE_INTEGER now; - return QueryPerformanceCounter(&now) ? now.QuadPart : 0; +#elif defined(_WIN32) + ULONGLONG ullUnbiasedTime; + _dispatch_QueryUnbiasedInterruptTimePrecise(&ullUnbiasedTime); + return ullUnbiasedTime * 100; #else #error platform needs to implement _dispatch_uptime() #endif @@ -171,9 +173,7 @@ _dispatch_monotonic_time(void) return _dispatch_timespec_to_nano(ts); #elif defined(_WIN32) ULONGLONG ullTime; - if (!QueryUnbiasedInterruptTime(&ullTime)) - return 0; - + _dispatch_QueryInterruptTimePrecise(&ullTime); return ullTime * 100ull; #else #error platform needs to implement _dispatch_monotonic_time() @@ -254,17 +254,34 @@ _dispatch_time_now_cached(dispatch_clock_t clock, DISPATCH_ALWAYS_INLINE static inline void -_dispatch_time_to_clock_and_value(dispatch_time_t time, +_dispatch_time_to_clock_and_value(dispatch_time_t time, bool allow_now, dispatch_clock_t *clock, uint64_t *value) { uint64_t actual_value; + + if (allow_now) { + switch (time) { + case DISPATCH_TIME_NOW: + *clock = DISPATCH_CLOCK_UPTIME; + *value = _dispatch_uptime(); + return; + case DISPATCH_MONOTONICTIME_NOW: + *clock = DISPATCH_CLOCK_MONOTONIC; + *value = _dispatch_monotonic_time(); + return; + case DISPATCH_WALLTIME_NOW: + *clock = DISPATCH_CLOCK_WALL; + *value = _dispatch_get_nanoseconds(); + return; + } + } + if ((int64_t)time < 0) { // Wall time or mach continuous time if (time & DISPATCH_WALLTIME_MASK) { // Wall time (value 11 in bits 63, 62) *clock = DISPATCH_CLOCK_WALL; - actual_value = time == DISPATCH_WALLTIME_NOW ? - _dispatch_get_nanoseconds() : (uint64_t)-time; + actual_value = (uint64_t)-time; } else { // Continuous time (value 10 in bits 63, 62). *clock = DISPATCH_CLOCK_MONOTONIC; diff --git a/src/shims/tsd.h b/src/shims/tsd.h index f44d7c863..2207d4cd9 100644 --- a/src/shims/tsd.h +++ b/src/shims/tsd.h @@ -40,6 +40,11 @@ #include #endif +#if __has_include() +#include +#endif +#include + #if !defined(OS_GS_RELATIVE) && (defined(__i386__) || defined(__x86_64__)) #define OS_GS_RELATIVE __attribute__((address_space(256))) #endif @@ -65,16 +70,8 @@ typedef struct { void *a; void *b; } dispatch_tsd_pair_t; #endif #if DISPATCH_USE_DIRECT_TSD -#ifndef __TSD_THREAD_QOS_CLASS -#define __TSD_THREAD_QOS_CLASS 4 -#endif -#ifndef __TSD_RETURN_TO_KERNEL -#define __TSD_RETURN_TO_KERNEL 5 -#endif -#ifndef __TSD_MACH_SPECIAL_REPLY -#define __TSD_MACH_SPECIAL_REPLY 8 -#endif - +#undef errno +#define errno (*_pthread_errno_address_direct()) static const unsigned long dispatch_priority_key = __TSD_THREAD_QOS_CLASS; static const unsigned long dispatch_r2k_key = __TSD_RETURN_TO_KERNEL; @@ -112,7 +109,8 @@ DISPATCH_TSD_INLINE static inline void _dispatch_thread_key_create(DWORD *k, void (DISPATCH_TSD_DTOR_CC *d)(void *)) { - dispatch_assert_zero((*k = FlsAlloc(d))); + *k = FlsAlloc(d); + dispatch_assert(*k != FLS_OUT_OF_INDEXES); } extern DWORD __dispatch_tsd_key; diff --git a/src/shims/yield.h b/src/shims/yield.h index 4a1a0effe..53eb80065 100644 --- a/src/shims/yield.h +++ b/src/shims/yield.h @@ -99,10 +99,15 @@ void *_dispatch_wait_for_enqueuer(void **ptr); ((DISPATCH_CONTENTION_SPINS_MIN) + ((DISPATCH_CONTENTION_SPINS_MAX) - \ (DISPATCH_CONTENTION_SPINS_MIN)) / 2) #elif defined(_WIN32) -#define _dispatch_contention_spins() ({ \ - unsigned int _value; \ - rand_s(&_value); \ - (_value & DISPATCH_CONTENTION_SPINS_MAX) | DISPATCH_CONTENTION_SPINS_MIN; }) +// Use randomness to prevent threads from resonating at the same frequency and +// permanently contending. Windows doesn't provide rand_r(), so use a simple +// LCG. (msvcrt has rand_s(), but its security guarantees aren't optimal here.) +#define _dispatch_contention_spins() ({ \ + static os_atomic(unsigned int) _seed = 1; \ + unsigned int _next = os_atomic_load(&_seed, relaxed); \ + os_atomic_store(&_seed, _next * 1103515245 + 12345, relaxed); \ + ((_next >> 24) & (DISPATCH_CONTENTION_SPINS_MAX)) | \ + (DISPATCH_CONTENTION_SPINS_MIN); }) #else // Use randomness to prevent threads from resonating at the same // frequency and permanently contending. @@ -145,17 +150,27 @@ void *_dispatch_wait_for_enqueuer(void **ptr); DISPATCH_YIELD_THREAD_SWITCH_OPTION, (mach_msg_timeout_t)(n)) #define _dispatch_preemption_yield_to(th, n) thread_switch(th, \ DISPATCH_YIELD_THREAD_SWITCH_OPTION, (mach_msg_timeout_t)(n)) +#elif HAVE_PTHREAD_YIELD_NP +#define _dispatch_preemption_yield(n) { (void)n; pthread_yield_np(); } +#define _dispatch_preemption_yield_to(th, n) { (void)n; pthread_yield_np(); } +#elif defined(_WIN32) +#define _dispatch_preemption_yield(n) { (void)n; Sleep(0); } +#define _dispatch_preemption_yield_to(th, n) { (void)n; Sleep(0); } #else -#define _dispatch_preemption_yield(n) pthread_yield_np() -#define _dispatch_preemption_yield_to(th, n) pthread_yield_np() +#define _dispatch_preemption_yield(n) { (void)n; sched_yield(); } +#define _dispatch_preemption_yield_to(th, n) { (void)n; sched_yield(); } #endif // HAVE_MACH #pragma mark - #pragma mark _dispatch_contention_usleep #ifndef DISPATCH_CONTENTION_USLEEP_START +#if defined(_WIN32) +#define DISPATCH_CONTENTION_USLEEP_START 1000 // Must be >= 1ms for Sleep() +#else #define DISPATCH_CONTENTION_USLEEP_START 500 #endif +#endif #ifndef DISPATCH_CONTENTION_USLEEP_MAX #define DISPATCH_CONTENTION_USLEEP_MAX 100000 #endif @@ -170,20 +185,7 @@ void *_dispatch_wait_for_enqueuer(void **ptr); #endif #else #if defined(_WIN32) -DISPATCH_INLINE void -_dispatch_contention_usleep(uint64_t useconds) { - static BOOL bQPFExecuted = FALSE; - static LARGE_INTEGER liFreq; - LARGE_INTEGER liStart, liNow; - - if (!bQPFExecuted) - bQPFExecuted = QueryPerformanceFrequency(&liFreq); - - QueryPerformanceCounter(&liStart); - do { - QueryPerformanceCounter(&liNow); - } while ((liNow.QuadPart - liStart.QuadPart) / (float)liFreq.QuadPart * 1000 * 1000 < useconds); -} +#define _dispatch_contention_usleep(u) Sleep((u) / 1000) #else #define _dispatch_contention_usleep(u) usleep((u)) #endif diff --git a/src/source.c b/src/source.c index 96c363cdf..b4005dcf6 100644 --- a/src/source.c +++ b/src/source.c @@ -40,7 +40,7 @@ _dispatch_source_get_handler(dispatch_source_refs_t dr, long kind) dispatch_source_t dispatch_source_create(dispatch_source_type_t dst, uintptr_t handle, - unsigned long mask, dispatch_queue_t dq) + uintptr_t mask, dispatch_queue_t dq) { dispatch_source_refs_t dr; dispatch_source_t ds; @@ -90,19 +90,19 @@ _dispatch_source_xref_dispose(dispatch_source_t ds) dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds); if (unlikely((dqf & DSF_STRICT) && !(dqf & DSF_CANCELED) && _dispatch_source_get_cancel_handler(ds->ds_refs))) { - DISPATCH_CLIENT_CRASH(ds, "Release of a source that has not been " + DISPATCH_CLIENT_CRASH(dqf, "Release of a source that has not been " "cancelled, but has a mandatory cancel handler"); } dx_wakeup(ds, 0, DISPATCH_WAKEUP_MAKE_DIRTY); } -long +intptr_t dispatch_source_testcancel(dispatch_source_t ds) { return (bool)(ds->dq_atomic_flags & DSF_CANCELED); } -unsigned long +uintptr_t dispatch_source_get_mask(dispatch_source_t ds) { dispatch_source_refs_t dr = ds->ds_refs; @@ -144,11 +144,11 @@ dispatch_source_get_handle(dispatch_source_t ds) return dr->du_ident; } -unsigned long +uintptr_t dispatch_source_get_data(dispatch_source_t ds) { -#if DISPATCH_USE_MEMORYSTATUS dispatch_source_refs_t dr = ds->ds_refs; +#if DISPATCH_USE_MEMORYSTATUS if (dr->du_vmpressure_override) { return NOTE_VM_PRESSURE; } @@ -197,7 +197,7 @@ dispatch_source_get_extended_data(dispatch_source_t ds, } void -dispatch_source_merge_data(dispatch_source_t ds, unsigned long val) +dispatch_source_merge_data(dispatch_source_t ds, uintptr_t val) { dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds); dispatch_source_refs_t dr = ds->ds_refs; @@ -418,6 +418,7 @@ dispatch_source_set_registration_handler_f(dispatch_source_t ds, #pragma mark - #pragma mark dispatch_source_invoke +#if TARGET_OS_MAC bool _dispatch_source_will_reenable_kevent_4NW(dispatch_source_t ds) { @@ -429,6 +430,7 @@ _dispatch_source_will_reenable_kevent_4NW(dispatch_source_t ds) } return _dispatch_unote_needs_rearm(ds->ds_refs); } +#endif // TARGET_OS_MAC static void _dispatch_source_registration_callout(dispatch_source_t ds, dispatch_queue_t cq, @@ -511,7 +513,7 @@ _dispatch_source_timer_data(dispatch_timer_source_refs_t dr, uint64_t prev) // We hence need dependency ordering to pair with the release barrier // done by _dispatch_timers_run2() when setting the DISARMED_MARKER bit. os_atomic_thread_fence(dependency); - dr = os_atomic_force_dependency_on(dr, data); + dr = os_atomic_inject_dependency(dr, data); if (dr->dt_timer.target < INT64_MAX) { uint64_t now = _dispatch_time_now(DISPATCH_TIMER_CLOCK(dr->du_ident)); @@ -1230,15 +1232,7 @@ _dispatch_timer_config_create(dispatch_time_t start, // future, this will default to UPTIME if no clock was set. clock = _dispatch_timer_flags_to_clock(dt->du_timer_flags); } else { - _dispatch_time_to_clock_and_value(start, &clock, &target); - if (target == DISPATCH_TIME_NOW) { - if (clock == DISPATCH_CLOCK_UPTIME) { - target = _dispatch_uptime(); - } else { - dispatch_assert(clock == DISPATCH_CLOCK_MONOTONIC); - target = _dispatch_monotonic_time(); - } - } + _dispatch_time_to_clock_and_value(start, true, &clock, &target); } if (clock != DISPATCH_CLOCK_WALL) { @@ -1397,7 +1391,7 @@ _dispatch_after(dispatch_time_t when, dispatch_queue_t dq, dispatch_clock_t clock; uint64_t target; - _dispatch_time_to_clock_and_value(when, &clock, &target); + _dispatch_time_to_clock_and_value(when, false, &clock, &target); if (clock != DISPATCH_CLOCK_WALL) { leeway = _dispatch_time_nano2mach(leeway); } diff --git a/src/source_internal.h b/src/source_internal.h index d953629eb..9297ac5cd 100644 --- a/src/source_internal.h +++ b/src/source_internal.h @@ -47,8 +47,9 @@ DISPATCH_CLASS_DECL(channel, QUEUE); dm_cancel_handler_called:1, \ dm_is_xpc:1, \ dm_arm_no_senders:1, \ + dm_made_sendrights:1, \ dm_strict_reply:1, \ - __ds_flags_pad : 9; \ + __ds_flags_pad : 8; \ uint16_t __dq_flags_separation[0]; \ uint16_t \ /* set under the send queue lock */ \ diff --git a/src/swift/Block.swift b/src/swift/Block.swift index e90396bb1..71d998ba6 100644 --- a/src/swift/Block.swift +++ b/src/swift/Block.swift @@ -40,14 +40,24 @@ public class DispatchWorkItem { internal var _block: _DispatchBlock public init(qos: DispatchQoS = .unspecified, flags: DispatchWorkItemFlags = [], block: @escaping @convention(block) () -> ()) { - _block = dispatch_block_create_with_qos_class(dispatch_block_flags_t(UInt32(flags.rawValue)), +#if os(Windows) && (arch(arm64) || arch(x86_64)) + let flags = dispatch_block_flags_t(UInt32(flags.rawValue)) +#else + let flags: dispatch_block_flags_t = numericCast(flags.rawValue) +#endif + _block = dispatch_block_create_with_qos_class(flags, qos.qosClass.rawValue.rawValue, Int32(qos.relativePriority), block) } // Used by DispatchQueue.synchronously to provide a path through // dispatch_block_t, as we know the lifetime of the block in question. internal init(flags: DispatchWorkItemFlags = [], noescapeBlock: () -> ()) { - _block = _swift_dispatch_block_create_noescape(dispatch_block_flags_t(UInt32(flags.rawValue)), noescapeBlock) +#if os(Windows) && (arch(arm64) || arch(x86_64)) + let flags = dispatch_block_flags_t(UInt32(flags.rawValue)) +#else + let flags: dispatch_block_flags_t = numericCast(flags.rawValue) +#endif + _block = _swift_dispatch_block_create_noescape(flags, noescapeBlock) } public func perform() { diff --git a/src/swift/Dispatch.swift b/src/swift/Dispatch.swift index 0fd138d6a..2ba819223 100644 --- a/src/swift/Dispatch.swift +++ b/src/swift/Dispatch.swift @@ -38,10 +38,10 @@ public func _dispatchPreconditionTest(_ condition: DispatchPredicate) -> Bool { @_transparent @available(macOS 10.12, iOS 10.0, tvOS 10.0, watchOS 3.0, *) -public func dispatchPrecondition(condition: @autoclosure () -> DispatchPredicate) { +public func dispatchPrecondition(condition: @autoclosure () -> DispatchPredicate, file: StaticString = #file, line: UInt = #line) { // precondition is able to determine release-vs-debug asserts where the overlay // cannot, so formulating this into a call that we can call with precondition() - precondition(_dispatchPreconditionTest(condition()), "dispatchPrecondition failure") + precondition(_dispatchPreconditionTest(condition()), "dispatchPrecondition failure", file: file, line: line) } /// qos_class_t diff --git a/src/swift/DispatchStubs.cc b/src/swift/DispatchStubs.cc index aef5505bd..0625cc91f 100644 --- a/src/swift/DispatchStubs.cc +++ b/src/swift/DispatchStubs.cc @@ -13,7 +13,11 @@ #include #include +#if defined(__ELF__) || defined(__MACH__) || defined(__WASM__) #define DISPATCH_RUNTIME_STDLIB_INTERFACE __attribute__((__visibility__("default"))) +#else +#define DISPATCH_RUNTIME_STDLIB_INTERFACE __declspec(dllexport) +#endif #if USE_OBJC @protocol OS_dispatch_source; @@ -54,6 +58,7 @@ static void _dispatch_overlay_constructor() { #endif /* USE_OBJC */ #if !USE_OBJC +DISPATCH_RUNTIME_STDLIB_INTERFACE extern "C" void * objc_retainAutoreleasedReturnValue(void *obj); #endif @@ -67,7 +72,11 @@ extern "C" void * objc_retainAutoreleasedReturnValue(void *obj); // eventually call swift_release to balance the retain below. This is a // workaround until the compiler no longer emits this callout on non-ObjC // platforms. -extern "C" void swift_retain(void *); +extern "C" +#if defined(_WIN32) +__declspec(dllimport) +#endif +void swift_retain(void *); DISPATCH_RUNTIME_STDLIB_INTERFACE extern "C" void * objc_retainAutoreleasedReturnValue(void *obj) { @@ -78,4 +87,9 @@ extern "C" void * objc_retainAutoreleasedReturnValue(void *obj) { else return NULL; } +#if defined(_WIN32) +extern "C" void *(*__imp_objc_retainAutoreleasedReturnValue)(void *) = + &objc_retainAutoreleasedReturnValue; +#endif + #endif // !USE_OBJC diff --git a/src/swift/IO.swift b/src/swift/IO.swift index 7b0bb81a9..ad985c944 100644 --- a/src/swift/IO.swift +++ b/src/swift/IO.swift @@ -11,6 +11,9 @@ //===----------------------------------------------------------------------===// import CDispatch +#if os(Windows) +import WinSDK +#endif extension DispatchIO { @@ -34,12 +37,28 @@ extension DispatchIO { public static let strictInterval = IntervalFlags(rawValue: 1) } +#if os(Windows) + public class func read(fromHandle: HANDLE, maxLength: Int, runningHandlerOn queue: DispatchQueue, handler: @escaping (_ data: DispatchData, _ error: Int32) -> Void) { + dispatch_read(dispatch_fd_t(bitPattern: fromHandle), maxLength, queue.__wrapped) { (data: dispatch_data_t, error: Int32) in + handler(DispatchData(borrowedData: data), error) + } + } +#endif + public class func read(fromFileDescriptor: Int32, maxLength: Int, runningHandlerOn queue: DispatchQueue, handler: @escaping (_ data: DispatchData, _ error: Int32) -> Void) { dispatch_read(dispatch_fd_t(fromFileDescriptor), maxLength, queue.__wrapped) { (data: dispatch_data_t, error: Int32) in handler(DispatchData(borrowedData: data), error) } } +#if os(Windows) + public class func write(toHandle: HANDLE, data: DispatchData, runningHandlerOn queue: DispatchQueue, handler: @escaping(_ data: DispatchData??, _ error: Int32) -> Void) { + dispatch_write(dispatch_fd_t(bitPattern: toHandle), data.__wrapped.__wrapped, queue.__wrapped) { (data: dispatch_data_t?, error: Int32) in + handler(data.map { DispatchData(borrowedData: $0) }, error) + } + } +#endif + public class func write(toFileDescriptor: Int32, data: DispatchData, runningHandlerOn queue: DispatchQueue, handler: @escaping (_ data: DispatchData?, _ error: Int32) -> Void) { dispatch_write(dispatch_fd_t(toFileDescriptor), data.__wrapped.__wrapped, queue.__wrapped) { (data: dispatch_data_t?, error: Int32) in handler(data.map { DispatchData(borrowedData: $0) }, error) diff --git a/src/swift/Queue.swift b/src/swift/Queue.swift index 377e27fdd..fe7406c42 100644 --- a/src/swift/Queue.swift +++ b/src/swift/Queue.swift @@ -371,7 +371,7 @@ extension DispatchQueue { /// Submits a work item to a dispatch queue for asynchronous execution after /// a specified time. /// - /// - parameter: deadline the time after which the work item should be executed, + /// - parameter deadline: the time after which the work item should be executed, /// given as a `DispatchTime`. /// - parameter qos: the QoS at which the work item should be executed. /// Defaults to `DispatchQoS.unspecified`. @@ -402,7 +402,7 @@ extension DispatchQueue { /// Submits a work item to a dispatch queue for asynchronous execution after /// a specified time. /// - /// - parameter: deadline the time after which the work item should be executed, + /// - parameter deadline: the time after which the work item should be executed, /// given as a `DispatchWallTime`. /// - parameter qos: the QoS at which the work item should be executed. /// Defaults to `DispatchQoS.unspecified`. @@ -433,7 +433,7 @@ extension DispatchQueue { /// Submits a work item to a dispatch queue for asynchronous execution after /// a specified time. /// - /// - parameter: deadline the time after which the work item should be executed, + /// - parameter deadline: the time after which the work item should be executed, /// given as a `DispatchTime`. /// - parameter execute: The work item to be invoked on the queue. /// - SeeAlso: `asyncAfter(deadline:qos:flags:execute:)` @@ -448,7 +448,7 @@ extension DispatchQueue { /// Submits a work item to a dispatch queue for asynchronous execution after /// a specified time. /// - /// - parameter: deadline the time after which the work item should be executed, + /// - parameter deadline: the time after which the work item should be executed, /// given as a `DispatchWallTime`. /// - parameter execute: The work item to be invoked on the queue. /// - SeeAlso: `asyncAfter(wallDeadline:qos:flags:execute:)` diff --git a/src/swift/Source.swift b/src/swift/Source.swift index fa0b3624e..b4315c6cf 100644 --- a/src/swift/Source.swift +++ b/src/swift/Source.swift @@ -12,6 +12,9 @@ import CDispatch import _SwiftDispatchOverlayShims +#if os(Windows) +import WinSDK +#endif extension DispatchSourceProtocol { @@ -113,7 +116,7 @@ extension DispatchSource { } #endif -#if !os(Linux) && !os(Android) +#if !os(Linux) && !os(Android) && !os(Windows) public struct ProcessEvent : OptionSet, RawRepresentable { public let rawValue: UInt public init(rawValue: UInt) { self.rawValue = rawValue } @@ -171,15 +174,28 @@ extension DispatchSource { } #endif -#if !os(Linux) && !os(Android) +#if !os(Linux) && !os(Android) && !os(Windows) public class func makeProcessSource(identifier: pid_t, eventMask: ProcessEvent, queue: DispatchQueue? = nil) -> DispatchSourceProcess { let source = dispatch_source_create(_swift_dispatch_source_type_PROC(), UInt(identifier), eventMask.rawValue, queue?.__wrapped) return DispatchSource(source: source) as DispatchSourceProcess } #endif +#if os(Windows) + public class func makeReadSource(handle: HANDLE, queue: DispatchQueue? = nil) -> DispatchSourceRead { + let source = dispatch_source_create(_swift_dispatch_source_type_READ(), UInt(bitPattern: handle), 0, queue?.__wrapped) + return DispatchSource(source: source) as DispatchSourceRead + } +#endif + public class func makeReadSource(fileDescriptor: Int32, queue: DispatchQueue? = nil) -> DispatchSourceRead { - let source = dispatch_source_create(_swift_dispatch_source_type_READ(), UInt(fileDescriptor), 0, queue?.__wrapped) +#if os(Windows) + let handle: UInt = UInt(_get_osfhandle(fileDescriptor)) + if handle == UInt(bitPattern: INVALID_HANDLE_VALUE) { fatalError("unable to get underlying handle from file descriptor") } +#else + let handle: UInt = UInt(fileDescriptor) +#endif + let source = dispatch_source_create(_swift_dispatch_source_type_READ(), handle, 0, queue?.__wrapped) return DispatchSource(source: source) as DispatchSourceRead } @@ -208,15 +224,28 @@ extension DispatchSource { return DispatchSource(source: source) as DispatchSourceUserDataReplace } -#if !os(Linux) && !os(Android) +#if !os(Linux) && !os(Android) && !os(Windows) public class func makeFileSystemObjectSource(fileDescriptor: Int32, eventMask: FileSystemEvent, queue: DispatchQueue? = nil) -> DispatchSourceFileSystemObject { let source = dispatch_source_create(_swift_dispatch_source_type_VNODE(), UInt(fileDescriptor), eventMask.rawValue, queue?.__wrapped) return DispatchSource(source: source) as DispatchSourceFileSystemObject } #endif +#if os(Windows) + public class func makeWriteSource(handle: HANDLE, queue: DispatchQueue? = nil) -> DispatchSourceWrite { + let source = dispatch_source_create(_swift_dispatch_source_type_WRITE(), UInt(bitPattern: handle), 0, queue?.__wrapped) + return DispatchSource(source: source) as DispatchSourceWrite + } +#endif + public class func makeWriteSource(fileDescriptor: Int32, queue: DispatchQueue? = nil) -> DispatchSourceWrite { - let source = dispatch_source_create(_swift_dispatch_source_type_WRITE(), UInt(fileDescriptor), 0, queue?.__wrapped) +#if os(Windows) + let handle: UInt = UInt(_get_osfhandle(fileDescriptor)) + if handle == UInt(bitPattern: INVALID_HANDLE_VALUE) { fatalError("unable to get underlying handle from file descriptor") } +#else + let handle: UInt = UInt(fileDescriptor) +#endif + let source = dispatch_source_create(_swift_dispatch_source_type_WRITE(), handle, 0, queue?.__wrapped) return DispatchSource(source: source) as DispatchSourceWrite } } @@ -261,7 +290,7 @@ extension DispatchSourceMemoryPressure { } #endif -#if !os(Linux) && !os(Android) +#if !os(Linux) && !os(Android) && !os(Windows) extension DispatchSourceProcess { public var handle: pid_t { return pid_t(dispatch_source_get_handle(self as! DispatchSource)) @@ -617,7 +646,7 @@ extension DispatchSourceTimer { } } -#if !os(Linux) && !os(Android) +#if !os(Linux) && !os(Android) && !os(Windows) extension DispatchSourceFileSystemObject { public var handle: Int32 { return Int32(dispatch_source_get_handle((self as! DispatchSource).__wrapped)) diff --git a/src/swift/Wrapper.swift b/src/swift/Wrapper.swift index 649043d95..678631b03 100644 --- a/src/swift/Wrapper.swift +++ b/src/swift/Wrapper.swift @@ -181,7 +181,7 @@ extension DispatchSource : DispatchSourceMachSend, } #endif -#if !os(Linux) && !os(Android) +#if !os(Linux) && !os(Android) && !os(Windows) extension DispatchSource : DispatchSourceProcess, DispatchSourceFileSystemObject { } @@ -272,7 +272,7 @@ public protocol DispatchSourceMemoryPressure : DispatchSourceProtocol { } #endif -#if !os(Linux) && !os(Android) +#if !os(Linux) && !os(Android) && !os(Windows) public protocol DispatchSourceProcess : DispatchSourceProtocol { var handle: pid_t { get } @@ -302,7 +302,7 @@ public protocol DispatchSourceTimer : DispatchSourceProtocol { func scheduleRepeating(wallDeadline: DispatchWallTime, interval: Double, leeway: DispatchTimeInterval) } -#if !os(Linux) && !os(Android) +#if !os(Linux) && !os(Android) && !os(Windows) public protocol DispatchSourceFileSystemObject : DispatchSourceProtocol { var handle: Int32 { get } diff --git a/src/time.c b/src/time.c index b70f81343..7c8f81277 100644 --- a/src/time.c +++ b/src/time.c @@ -43,7 +43,7 @@ _dispatch_mach_host_time_mach2nano(uint64_t machtime) return INT64_MAX; } long double big_tmp = ((long double)machtime * data->frac) + .5L; - if (unlikely(big_tmp >= INT64_MAX)) { + if (unlikely(big_tmp >= (long double)INT64_MAX)) { return INT64_MAX; } return (uint64_t)big_tmp; @@ -61,7 +61,7 @@ _dispatch_mach_host_time_nano2mach(uint64_t nsec) return INT64_MAX; } long double big_tmp = ((long double)nsec / data->frac) + .5L; - if (unlikely(big_tmp >= INT64_MAX)) { + if (unlikely(big_tmp >= (long double)INT64_MAX)) { return INT64_MAX; } return (uint64_t)big_tmp; @@ -98,7 +98,7 @@ dispatch_time(dispatch_time_t inval, int64_t delta) dispatch_clock_t clock; uint64_t value; - _dispatch_time_to_clock_and_value(inval, &clock, &value); + _dispatch_time_to_clock_and_value(inval, true, &clock, &value); if (value == DISPATCH_TIME_FOREVER) { // Out-of-range for this clock. return value; @@ -122,14 +122,6 @@ dispatch_time(dispatch_time_t inval, int64_t delta) // up time or monotonic time. "value" has the clock type removed, // so the test against DISPATCH_TIME_NOW is correct for either clock. - if (value == DISPATCH_TIME_NOW) { - if (clock == DISPATCH_CLOCK_UPTIME) { - value = _dispatch_uptime(); - } else { - dispatch_assert(clock == DISPATCH_CLOCK_MONOTONIC); - value = _dispatch_monotonic_time(); - } - } if (delta >= 0) { offset = _dispatch_time_nano2mach((uint64_t)delta); if ((int64_t)(value += offset) <= 0) { @@ -145,6 +137,37 @@ dispatch_time(dispatch_time_t inval, int64_t delta) } } +bool +dispatch_time_to_nsecs(dispatch_time_t time, + dispatch_clockid_t *clock_out, uint64_t *nsecs_out) +{ + dispatch_clock_t clock; + uint64_t nsecs; + + _dispatch_time_to_clock_and_value(time, true, &clock, &nsecs); + + if (nsecs != DISPATCH_TIME_FOREVER) { + switch (clock) { + case DISPATCH_CLOCK_WALL: + *clock_out = DISPATCH_CLOCKID_WALLTIME; + *nsecs_out = nsecs; + return true; + case DISPATCH_CLOCK_UPTIME: + *clock_out = DISPATCH_CLOCKID_UPTIME; + *nsecs_out = nsecs; + return true; + case DISPATCH_CLOCK_MONOTONIC: + *clock_out = DISPATCH_CLOCKID_MONOTONIC; + *nsecs_out = nsecs; + return true; + } + } + + *clock_out = 0; + *nsecs_out = UINT64_MAX; + return false; +} + dispatch_time_t dispatch_walltime(const struct timespec *inval, int64_t delta) { @@ -166,16 +189,19 @@ uint64_t _dispatch_timeout(dispatch_time_t when) { dispatch_time_t now; - if (when == DISPATCH_TIME_FOREVER) { + + switch (when) { + case DISPATCH_TIME_FOREVER: return DISPATCH_TIME_FOREVER; - } - if (when == DISPATCH_TIME_NOW) { + case DISPATCH_TIME_NOW: + case DISPATCH_MONOTONICTIME_NOW: + case DISPATCH_WALLTIME_NOW: return 0; } dispatch_clock_t clock; uint64_t value; - _dispatch_time_to_clock_and_value(when, &clock, &value); + _dispatch_time_to_clock_and_value(when, false, &clock, &value); if (clock == DISPATCH_CLOCK_WALL) { now = _dispatch_get_nanoseconds(); return now >= value ? 0 : value - now; diff --git a/src/voucher.c b/src/voucher.c index 1fbc24717..46e411c68 100644 --- a/src/voucher.c +++ b/src/voucher.c @@ -24,8 +24,6 @@ #define PERSONA_ID_NONE ((uid_t)-1) #endif -#if !DISPATCH_VARIANT_DYLD_STUB - #if VOUCHER_USE_MACH_VOUCHER #if !HAVE_PTHREAD_WORKQUEUE_QOS #error Unsupported configuration, workqueue QoS support is required @@ -1155,6 +1153,21 @@ voucher_activity_initialize_4libtrace(voucher_activity_hooks_t hooks) DISPATCH_CLIENT_CRASH(_voucher_libtrace_hooks, "voucher_activity_initialize_4libtrace called twice"); } + + // HACK: we can't call into os_variant until after the initialization of + // dispatch and XPC, but we want to do it before the end of libsystem + // initialization to avoid having to synchronize _dispatch_mode explicitly, + // so this happens to be just the right spot +#if HAVE_OS_FAULT_WITH_PAYLOAD && TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR + if (_dispatch_getenv_bool("LIBDISPATCH_NO_FAULTS", false)) { + return; + } else if (getpid() == 1 || + !os_variant_has_internal_diagnostics("com.apple.libdispatch")) { + return; + } + + _dispatch_mode &= ~DISPATCH_MODE_NO_FAULTS; +#endif // HAVE_OS_FAULT_WITH_PAYLOAD && TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR } void @@ -1274,6 +1287,7 @@ DISPATCH_ALWAYS_INLINE static inline bool _voucher_activity_disabled(void) { + dispatch_once_f(&_firehose_task_buffer_pred, NULL, _firehose_task_buffer_init); @@ -1620,7 +1634,7 @@ _voucher_debug(voucher_t v, char *buf, size_t bufsiz) v->v_activity, v->v_activity_creator, v->v_parent_activity); } bufprintf(" }"); - + return offset; } @@ -1633,7 +1647,7 @@ format_hex_data(char *prefix, char *desc, uint8_t *data, size_t data_len, uint8_t *pc = data; if (desc) { - bufprintf("%s%s:\n", prefix, desc); + bufprintf("%s%s:\n", prefix, desc); } ssize_t offset_in_row = -1; @@ -1671,10 +1685,6 @@ format_recipe_detail(mach_voucher_attr_recipe_t recipe, char *buf, bufprintf("Content size: %u\n", recipe->content_size); switch (recipe->key) { - case MACH_VOUCHER_ATTR_KEY_ATM: - bufprintprefix(); - bufprintf("ATM ID: %llu", *(uint64_t *)(uintptr_t)recipe->content); - break; case MACH_VOUCHER_ATTR_KEY_IMPORTANCE: bufprintprefix(); bufprintf("IMPORTANCE INFO: %s", (char *)recipe->content); @@ -1739,7 +1749,7 @@ voucher_kvoucher_debug(mach_port_t task, mach_port_name_t voucher, char *buf, } else { bufprintprefix(); bufprintf("Invalid voucher: 0x%x\n", voucher); - } + } done: return offset; @@ -1872,6 +1882,7 @@ _voucher_dispose(voucher_t voucher) (void)voucher; } +#if __has_include() voucher_t voucher_copy_with_persona_mach_voucher(mach_voucher_t persona_mach_voucher) { @@ -1915,6 +1926,7 @@ voucher_get_current_persona_proximate_info(struct proc_persona_info *persona_inf (void)persona_info; return -1; } +#endif // __has_include() void _voucher_activity_debug_channel_init(void) @@ -1935,8 +1947,8 @@ _voucher_init(void) void* voucher_activity_get_metadata_buffer(size_t *length) { - *length = 0; - return NULL; + *length = 0; + return NULL; } voucher_t @@ -2023,17 +2035,3 @@ _voucher_debug(voucher_t v, char* buf, size_t bufsiz) } #endif // VOUCHER_USE_MACH_VOUCHER - -#else // DISPATCH_VARIANT_DYLD_STUB - -firehose_activity_id_t -voucher_get_activity_id_4dyld(void) -{ -#if VOUCHER_USE_MACH_VOUCHER - return _voucher_get_activity_id(_voucher_get(), NULL); -#else - return 0; -#endif -} - -#endif // DISPATCH_VARIANT_DYLD_STUB diff --git a/src/voucher_internal.h b/src/voucher_internal.h index 37d0935ac..ea84ff847 100644 --- a/src/voucher_internal.h +++ b/src/voucher_internal.h @@ -155,7 +155,7 @@ OS_ENUM(voucher_fields, uint16_t, typedef struct voucher_s { _OS_OBJECT_HEADER( - struct voucher_vtable_s *os_obj_isa, + struct voucher_vtable_s *__ptrauth_objc_isa_pointer os_obj_isa, os_obj_ref_cnt, os_obj_xref_cnt); struct voucher_hash_entry_s { @@ -233,7 +233,7 @@ _voucher_hash_store_to_prev_ptr(uintptr_t prev_ptr, struct voucher_s *v) #if VOUCHER_ENABLE_RECIPE_OBJECTS typedef struct voucher_recipe_s { _OS_OBJECT_HEADER( - const _os_object_vtable_s *os_obj_isa, + const _os_object_vtable_s *__ptrauth_objc_isa_pointer os_obj_isa, os_obj_ref_cnt, os_obj_xref_cnt); size_t vr_allocation_size; diff --git a/src/workgroup.c b/src/workgroup.c new file mode 100644 index 000000000..9a1b98883 --- /dev/null +++ b/src/workgroup.c @@ -0,0 +1,1076 @@ +#include "internal.h" + +#include +#include + +/* Declares struct symbols */ +OS_OBJECT_CLASS_DECL(os_workgroup); +#if !USE_OBJC +OS_OBJECT_VTABLE_INSTANCE(os_workgroup, + (void (*)(_os_object_t))_os_workgroup_xref_dispose, + (void (*)(_os_object_t))_os_workgroup_dispose); +#endif // USE_OBJC +#define WORKGROUP_CLASS OS_OBJECT_VTABLE(os_workgroup) + +OS_OBJECT_CLASS_DECL(os_workgroup_interval); +#if !USE_OBJC +OS_OBJECT_VTABLE_INSTANCE(os_workgroup_interval, + (void (*)(_os_object_t))_os_workgroup_interval_xref_dispose, + (void (*)(_os_object_t))_os_workgroup_interval_dispose); +#endif // USE_OBJC +#define WORKGROUP_INTERVAL_CLASS OS_OBJECT_VTABLE(os_workgroup_interval) + +OS_OBJECT_CLASS_DECL(os_workgroup_parallel); +#if !USE_OBJC +OS_OBJECT_VTABLE_INSTANCE(os_workgroup_parallel, + (void (*)(_os_object_t))_os_workgroup_xref_dispose, + (void (*)(_os_object_t))_os_workgroup_dispose); +#endif // USE_OBJC +#define WORKGROUP_PARALLEL_CLASS OS_OBJECT_VTABLE(os_workgroup_parallel) + +#pragma mark Internal functions + +/* These are default workgroup attributes to be used when no user attribute is + * passed in in creation APIs. + * + * For all classes, workgroup propagation is currently not supported. + * + * Class Default attribute Eventually supported + * + * os_workgroup_t propagating nonpropagating, propagating + * os_workgroup_interval_t nonpropagating nonpropagating, propagating + * os_workgroup_parallel_t nonpropagating nonpropagating + * + * Class Default attribute supported + * os_workgroup_t differentiated differentiated, undifferentiated + * os_workgroup_interval_t differentiated differentiated + * os_workgroup_parallel_t undifferentiated undifferentiated, differentiated + */ +static const struct os_workgroup_attr_s _os_workgroup_attr_default = { + .sig = _OS_WORKGROUP_ATTR_RESOLVED_INIT, + .wg_type = OS_WORKGROUP_TYPE_DEFAULT, + .wg_attr_flags = 0, +}; + +static const struct os_workgroup_attr_s _os_workgroup_interval_attr_default = { + .sig = _OS_WORKGROUP_ATTR_RESOLVED_INIT, + .wg_type = OS_WORKGROUP_INTERVAL_TYPE_DEFAULT, + .wg_attr_flags = OS_WORKGROUP_ATTR_NONPROPAGATING +}; + +static const struct os_workgroup_attr_s _os_workgroup_parallel_attr_default = { + .sig = _OS_WORKGROUP_ATTR_RESOLVED_INIT, + .wg_type = OS_WORKGROUP_TYPE_PARALLEL, + .wg_attr_flags = OS_WORKGROUP_ATTR_NONPROPAGATING | + OS_WORKGROUP_ATTR_UNDIFFERENTIATED, +}; + +void +_os_workgroup_xref_dispose(os_workgroup_t wg) +{ + os_workgroup_arena_t arena = wg->wg_arena; + + if (arena == NULL) { + return; + } + + arena->destructor(arena->client_arena); + free(arena); +} + +void +_os_workgroup_interval_xref_dispose(os_workgroup_interval_t wgi) +{ + uint64_t wg_state = wgi->wg_state; + if (wg_state & OS_WORKGROUP_INTERVAL_STARTED) { + os_crash("BUG IN CLIENT: Releasing last reference to workgroup interval " + "while an interval has been started"); + } +} + +static inline bool +_os_workgroup_is_configurable(uint64_t wg_state) +{ + return (wg_state & OS_WORKGROUP_OWNER) == OS_WORKGROUP_OWNER; +} + +void +_os_workgroup_dispose(os_workgroup_t wg) +{ + dispatch_assert(wg->joined_cnt == 0); + + kern_return_t kr; + uint64_t wg_state = os_atomic_load(&wg->wg_state, relaxed); + if (_os_workgroup_is_configurable(wg_state)) { + kr = work_interval_destroy(wg->wi); + } else { + kr = mach_port_mod_refs(mach_task_self(), wg->port, MACH_PORT_RIGHT_SEND, -1); + } + os_assumes(kr == KERN_SUCCESS); + if (wg_state & OS_WORKGROUP_LABEL_NEEDS_FREE) { + free((void *)wg->name); + } +} + +void +_os_workgroup_debug(os_workgroup_t wg, char *buf, size_t size) +{ + snprintf(buf, size, "wg[%p] = {xref = %d, ref = %d, name = %s}", + (void *) wg, wg->do_xref_cnt + 1, wg->do_ref_cnt + 1, wg->name); +} + +void +_os_workgroup_interval_dispose(os_workgroup_interval_t wgi) +{ + work_interval_instance_free(wgi->wii); +} + +#define os_workgroup_inc_refcount(wg) \ + _os_object_retain_internal(wg->_as_os_obj); + +#define os_workgroup_dec_refcount(wg) \ + _os_object_release_internal(wg->_as_os_obj); + +void +_os_workgroup_tsd_cleanup(void *ctxt) /* Destructor for the tsd key */ +{ + os_workgroup_t wg = (os_workgroup_t) ctxt; + if (wg != NULL) { + char buf[512]; + snprintf(buf, sizeof(buf), "BUG IN CLIENT: Thread exiting without leaving workgroup '%s'", wg->name); + + os_crash(buf); + } +} + +static os_workgroup_t +_os_workgroup_get_current(void) +{ + return (os_workgroup_t) pthread_getspecific(_os_workgroup_key); +} + +static void +_os_workgroup_set_current(os_workgroup_t new_wg) +{ + if (new_wg != NULL) { + os_workgroup_inc_refcount(new_wg); + } + + os_workgroup_t old_wg = _os_workgroup_get_current(); + pthread_setspecific(_os_workgroup_key, new_wg); + + if (old_wg != NULL) { + os_workgroup_dec_refcount(old_wg); + } +} + +static inline bool +_os_workgroup_attr_is_resolved(os_workgroup_attr_t attr) +{ + return (attr->sig == _OS_WORKGROUP_ATTR_RESOLVED_INIT); +} + +static inline bool +_os_workgroup_client_attr_initialized(os_workgroup_attr_t attr) +{ + return (attr->sig == _OS_WORKGROUP_ATTR_SIG_DEFAULT_INIT) || + (attr->sig == _OS_WORKGROUP_ATTR_SIG_EMPTY_INIT); +} + +static inline bool +_os_workgroup_attr_is_propagating(os_workgroup_attr_t attr) +{ + return (attr->wg_attr_flags & OS_WORKGROUP_ATTR_NONPROPAGATING) == 0; +} + +static inline bool +_os_workgroup_attr_is_differentiated(os_workgroup_attr_t attr) +{ + return (attr->wg_attr_flags & OS_WORKGROUP_ATTR_UNDIFFERENTIATED) == 0; +} + +static inline bool +_os_workgroup_type_is_interval_type(os_workgroup_type_t wg_type) +{ + return (wg_type >= OS_WORKGROUP_INTERVAL_TYPE_DEFAULT) && + (wg_type <= OS_WORKGROUP_INTERVAL_TYPE_COREMEDIA); +} + +static bool +_os_workgroup_type_is_audio_type(os_workgroup_type_t wg_type) +{ + return (wg_type == OS_WORKGROUP_INTERVAL_TYPE_COREAUDIO) || + (wg_type == OS_WORKGROUP_INTERVAL_TYPE_AUDIO_CLIENT); +} + +static inline bool +_os_workgroup_type_is_parallel_type(os_workgroup_type_t wg_type) +{ + return wg_type == OS_WORKGROUP_TYPE_PARALLEL; +} + +static inline bool +_os_workgroup_type_is_default_type(os_workgroup_type_t wg_type) +{ + return wg_type == OS_WORKGROUP_TYPE_DEFAULT; +} + + +static inline bool +_os_workgroup_has_backing_workinterval(os_workgroup_t wg) +{ + return wg->wi != NULL; +} + +#if !TARGET_OS_SIMULATOR +static os_workgroup_type_t +_wi_flags_to_wg_type(uint32_t wi_flags) +{ + uint32_t type = wi_flags & WORK_INTERVAL_TYPE_MASK; + bool is_unrestricted = (wi_flags & WORK_INTERVAL_FLAG_UNRESTRICTED); + + switch (type) { + case WORK_INTERVAL_TYPE_DEFAULT: + /* Technically, this could be OS_WORKGROUP_INTERVAL_TYPE_DEFAULT + * as well but we can't know so we just assume it's a regular + * workgroup + */ + return OS_WORKGROUP_TYPE_DEFAULT; + case WORK_INTERVAL_TYPE_COREAUDIO: + return (is_unrestricted ? OS_WORKGROUP_INTERVAL_TYPE_AUDIO_CLIENT : + OS_WORKGROUP_INTERVAL_TYPE_COREAUDIO); + case WORK_INTERVAL_TYPE_COREANIMATION: + /* and WORK_INTERVAL_TYPE_CA_RENDER_SERVER */ + + /* We cannot distinguish between + * OS_WORKGROUP_INTERVAL_TYPE_COREANIMATION and + * OS_WORKGROUP_INTERVAL_TYPE_CA_RENDER_SERVER since + * WORK_INTERVAL_TYPE_COREANIMATION and + * WORK_INTERVAL_TYPE_CA_RENDER_SERVER have the same value */ + return OS_WORKGROUP_INTERVAL_TYPE_COREANIMATION; + case WORK_INTERVAL_TYPE_HID_DELIVERY: + return OS_WORKGROUP_INTERVAL_TYPE_HID_DELIVERY; + case WORK_INTERVAL_TYPE_COREMEDIA: + return OS_WORKGROUP_INTERVAL_TYPE_COREMEDIA; + case WORK_INTERVAL_TYPE_CA_CLIENT: + return OS_WORKGROUP_INTERVAL_TYPE_CA_CLIENT; + default: + { + char buf[512]; + snprintf(buf, sizeof(buf), "BUG IN DISPATCH: Invalid wi flags = %u", wi_flags); + os_crash(buf); + } + } +} +#endif + +static work_interval_t +_os_workgroup_create_work_interval(os_workgroup_attr_t attr) +{ + /* All workgroups are joinable */ + uint32_t flags = WORK_INTERVAL_FLAG_JOINABLE; + + switch (attr->wg_type) { + case OS_WORKGROUP_INTERVAL_TYPE_DEFAULT: + flags |= WORK_INTERVAL_TYPE_DEFAULT | WORK_INTERVAL_FLAG_UNRESTRICTED; + break; + case OS_WORKGROUP_INTERVAL_TYPE_COREAUDIO: + flags |= (WORK_INTERVAL_TYPE_COREAUDIO | + WORK_INTERVAL_FLAG_ENABLE_AUTO_JOIN | + WORK_INTERVAL_FLAG_ENABLE_DEFERRED_FINISH); + break; + case OS_WORKGROUP_INTERVAL_TYPE_COREANIMATION: + flags |= WORK_INTERVAL_TYPE_COREANIMATION; + break; + case OS_WORKGROUP_INTERVAL_TYPE_CA_RENDER_SERVER: + flags |= WORK_INTERVAL_TYPE_CA_RENDER_SERVER; + break; + case OS_WORKGROUP_INTERVAL_TYPE_HID_DELIVERY: + flags |= WORK_INTERVAL_TYPE_HID_DELIVERY; + break; + case OS_WORKGROUP_INTERVAL_TYPE_COREMEDIA: + flags |= WORK_INTERVAL_TYPE_COREMEDIA; + break; + case OS_WORKGROUP_INTERVAL_TYPE_AUDIO_CLIENT: + flags |= (WORK_INTERVAL_TYPE_COREAUDIO | WORK_INTERVAL_FLAG_UNRESTRICTED | + WORK_INTERVAL_FLAG_ENABLE_AUTO_JOIN | + WORK_INTERVAL_FLAG_ENABLE_DEFERRED_FINISH); + break; + case OS_WORKGROUP_INTERVAL_TYPE_CA_CLIENT: + flags |= WORK_INTERVAL_TYPE_CA_CLIENT | WORK_INTERVAL_FLAG_UNRESTRICTED; + break; + case OS_WORKGROUP_TYPE_DEFAULT: + /* Non-interval workgroup types */ + flags |= WORK_INTERVAL_FLAG_UNRESTRICTED; + break; + default: + os_crash("Creating an os_workgroup of unknown type"); + } + + if (_os_workgroup_attr_is_differentiated(attr)) { + flags |= WORK_INTERVAL_FLAG_GROUP; + } + + work_interval_t wi; + int rv = work_interval_create(&wi, flags); + if (rv) { + errno = rv; + return NULL; + } + + return wi; +} + +static inline bool +_os_workgroup_join_token_initialized(os_workgroup_join_token_t token) +{ + return (token->sig == _OS_WORKGROUP_JOIN_TOKEN_SIG_INIT); +} + +static inline void +_os_workgroup_set_name(os_workgroup_t wg, const char *name) +{ + if (name) { + const char *tmp = _dispatch_strdup_if_mutable(name); + if (tmp != name) { + wg->wg_state |= OS_WORKGROUP_LABEL_NEEDS_FREE; + name = tmp; + } + } + wg->name = name; +} + +static inline bool +_os_workgroup_client_attr_is_valid(os_workgroup_attr_t attr) +{ + return (attr && _os_workgroup_client_attr_initialized(attr)); +} + +static inline bool +_start_time_is_in_past(os_clockid_t clock, uint64_t start) +{ + switch (clock) { + case OS_CLOCK_MACH_ABSOLUTE_TIME: + return start <= mach_absolute_time(); + } +} + +#pragma mark Private functions + +int +os_workgroup_attr_set_interval_type(os_workgroup_attr_t attr, + os_workgroup_interval_type_t interval_type) +{ + int ret = 0; + if (_os_workgroup_client_attr_is_valid(attr) && + _os_workgroup_type_is_interval_type(interval_type)) { + attr->wg_type = interval_type; + } else { + ret = EINVAL; + } + return ret; +} + +int +os_workgroup_attr_set_flags(os_workgroup_attr_t attr, + os_workgroup_attr_flags_t flags) +{ + int ret = 0; + if (_os_workgroup_client_attr_is_valid(attr)) { + attr->wg_attr_flags = flags; + } else { + ret = EINVAL; + } + + return ret; +} + +os_workgroup_t +os_workgroup_interval_copy_current_4AudioToolbox(void) +{ + os_workgroup_t wg = _os_workgroup_get_current(); + + if (wg) { + if (_os_workgroup_type_is_audio_type(wg->wg_type)) { + wg = os_retain(wg); + } else { + wg = NULL; + } + } + + return wg; +} + +#pragma mark Public functions + +os_workgroup_t +os_workgroup_create(const char *name, os_workgroup_attr_t attr) +{ + os_workgroup_t wg = NULL; + work_interval_t wi = NULL; + + /* Resolve the input attributes */ + os_workgroup_attr_s wga; + if (attr == NULL) { + wga = _os_workgroup_attr_default; + attr = &wga; + } else { + if (!_os_workgroup_client_attr_is_valid(attr)) { + errno = EINVAL; + return NULL; + } + + // Make a local copy of the attr + wga = *attr; + attr = &wga; + + switch (attr->sig) { + case _OS_WORKGROUP_ATTR_SIG_DEFAULT_INIT: + { + /* For any fields which are 0, we fill in with default values */ + if (attr->wg_attr_flags == 0) { + attr->wg_attr_flags = _os_workgroup_attr_default.wg_attr_flags; + } + if (attr->wg_type == 0) { + attr->wg_type = _os_workgroup_attr_default.wg_type; + } + } + // Fallthrough + case _OS_WORKGROUP_ATTR_SIG_EMPTY_INIT: + break; + default: + errno = EINVAL; + return NULL; + } + + /* Mark it as resolved */ + attr->sig = _OS_WORKGROUP_ATTR_RESOLVED_INIT; + } + + os_assert(_os_workgroup_attr_is_resolved(attr)); + + /* Do some sanity checks */ + if (!_os_workgroup_type_is_default_type(attr->wg_type)){ + errno = EINVAL; + return NULL; + } + + /* We don't support propagating workgroups yet */ + if (_os_workgroup_attr_is_propagating(attr)) { + errno = ENOTSUP; + return NULL; + } + + wi = _os_workgroup_create_work_interval(attr); + if (wi == NULL) { + return NULL; + } + + wg = (os_workgroup_t) _os_object_alloc(WORKGROUP_CLASS, + sizeof(struct os_workgroup_s)); + wg->wi = wi; + wg->wg_state = OS_WORKGROUP_OWNER; + wg->wg_type = attr->wg_type; + + _os_workgroup_set_name(wg, name); + + return wg; +} + +os_workgroup_interval_t +os_workgroup_interval_create(const char *name, os_clockid_t clock, + os_workgroup_attr_t attr) +{ + os_workgroup_interval_t wgi = NULL; + work_interval_t wi = NULL; + + /* Resolve the input attributes */ + os_workgroup_attr_s wga; + if (attr == NULL) { + wga = _os_workgroup_interval_attr_default; + attr = &wga; + } else { + if (!_os_workgroup_client_attr_is_valid(attr)) { + errno = EINVAL; + return NULL; + } + + // Make a local copy of the attr + wga = *attr; + attr = &wga; + + if (attr->sig == _OS_WORKGROUP_ATTR_SIG_EMPTY_INIT) { + /* Nothing to do, the client built the attr up from scratch */ + } else if (attr->sig == _OS_WORKGROUP_ATTR_SIG_DEFAULT_INIT) { + /* For any fields which are 0, we fill in with default values */ + + if (attr->wg_attr_flags == 0) { + attr->wg_attr_flags = _os_workgroup_interval_attr_default.wg_attr_flags; + } + if (attr->wg_type == 0) { + attr->wg_type = _os_workgroup_interval_attr_default.wg_type; + } + } else { + errno = EINVAL; + return NULL; + } + + /* Mark it as resolved */ + attr->sig = _OS_WORKGROUP_ATTR_RESOLVED_INIT; + } + + os_assert(_os_workgroup_attr_is_resolved(attr)); + + /* Do some sanity checks */ + if (!_os_workgroup_type_is_interval_type(attr->wg_type) || + !_os_workgroup_attr_is_differentiated(attr)){ + errno = EINVAL; + return NULL; + } + + /* We don't support propagating workgroup yet */ + if (_os_workgroup_attr_is_propagating(attr)) { + errno = ENOTSUP; + return NULL; + } + + wi = _os_workgroup_create_work_interval(attr); + if (wi == NULL) { + return NULL; + } + + wgi = (os_workgroup_interval_t) _os_object_alloc(WORKGROUP_INTERVAL_CLASS, + sizeof(struct os_workgroup_interval_s)); + wgi->wi = wi; + wgi->clock = clock; + wgi->wii = work_interval_instance_alloc(wi); + wgi->wii_lock = OS_UNFAIR_LOCK_INIT; + wgi->wg_type = attr->wg_type; + wgi->wg_state = OS_WORKGROUP_OWNER; + + _os_workgroup_set_name(wgi->_as_wg, name); + + return wgi; +} + +int +os_workgroup_join_self(os_workgroup_t wg, os_workgroup_join_token_t token, + os_workgroup_index * __unused id_out) +{ + return os_workgroup_join(wg, token); +} + +void +os_workgroup_leave_self(os_workgroup_t wg, os_workgroup_join_token_t token) +{ + return os_workgroup_leave(wg, token); +} + +#pragma mark Public functions + +os_workgroup_parallel_t +os_workgroup_parallel_create(const char *name, os_workgroup_attr_t attr) +{ + os_workgroup_parallel_t wgp = NULL; + + // Clients should only specify NULL attributes. + os_workgroup_attr_s wga; + if (attr == NULL) { + wga = _os_workgroup_parallel_attr_default; + attr = &wga; + } else { + // Make a local copy of the attr + if (!_os_workgroup_client_attr_is_valid(attr)) { + errno = EINVAL; + return NULL; + } + + wga = *attr; + attr = &wga; + + switch (attr->sig) { + case _OS_WORKGROUP_ATTR_SIG_DEFAULT_INIT: + { + /* For any fields which are 0, we fill in with default values */ + if (attr->wg_attr_flags == 0) { + attr->wg_attr_flags = _os_workgroup_parallel_attr_default.wg_attr_flags; + } + if (attr->wg_type == 0) { + attr->wg_type = _os_workgroup_parallel_attr_default.wg_type; + } + } + // Fallthrough + case _OS_WORKGROUP_ATTR_SIG_EMPTY_INIT: + break; + default: + errno = EINVAL; + return NULL; + } + /* Mark it as resolved */ + attr->sig = _OS_WORKGROUP_ATTR_RESOLVED_INIT; + } + + os_assert(_os_workgroup_attr_is_resolved(attr)); + + /* Do some sanity checks */ + if (!_os_workgroup_type_is_parallel_type(attr->wg_type)) { + errno = EINVAL; + return NULL; + } + + /* We don't support propagating workgroups yet */ + if (_os_workgroup_attr_is_propagating(attr)) { + errno = ENOTSUP; + return NULL; + } + + wgp = (os_workgroup_t) _os_object_alloc(WORKGROUP_PARALLEL_CLASS, + sizeof(struct os_workgroup_parallel_s)); + wgp->wi = NULL; + wgp->wg_state = OS_WORKGROUP_OWNER; + wgp->wg_type = attr->wg_type; + + _os_workgroup_set_name(wgp, name); + + return wgp; +} + +int +os_workgroup_copy_port(os_workgroup_t wg, mach_port_t *mach_port_out) +{ + os_assert(wg != NULL); + os_assert(mach_port_out != NULL); + + *mach_port_out = MACH_PORT_NULL; + + uint64_t wg_state = os_atomic_load(&wg->wg_state, relaxed); + if (wg_state & OS_WORKGROUP_CANCELED) { + return EINVAL; + } + + if (!_os_workgroup_has_backing_workinterval(wg)) { + return EINVAL; + } + + if (_os_workgroup_is_configurable(wg_state)) { + return work_interval_copy_port(wg->wi, mach_port_out); + } + + kern_return_t kr = mach_port_mod_refs(mach_task_self(), wg->port, + MACH_PORT_RIGHT_SEND, 1); + os_assumes(kr == KERN_SUCCESS); + *mach_port_out = wg->port; + return 0; +} + +os_workgroup_t +os_workgroup_create_with_port(const char *name, mach_port_t port) +{ + if (!MACH_PORT_VALID(port)) { + return NULL; + } + +#if !TARGET_OS_SIMULATOR + uint32_t wi_flags = 0; + int ret = work_interval_get_flags_from_port(port, &wi_flags); + if (ret != 0) { + errno = ret; + return NULL; + } +#endif + + os_workgroup_t wg = NULL; + wg = (os_workgroup_t) _os_object_alloc(WORKGROUP_CLASS, + sizeof(struct os_workgroup_s)); + _os_workgroup_set_name(wg, name); + + kern_return_t kr; + kr = mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_SEND, 1); + os_assumes(kr == KERN_SUCCESS); + wg->port = port; +#if !TARGET_OS_SIMULATOR + wg->wg_type = _wi_flags_to_wg_type(wi_flags); +#else + wg->wg_type = OS_WORKGROUP_TYPE_DEFAULT; +#endif + + return wg; +} + +os_workgroup_t +os_workgroup_create_with_workgroup(const char *name, os_workgroup_t wg) +{ + uint64_t wg_state = os_atomic_load(&wg->wg_state, relaxed); + if (wg_state & OS_WORKGROUP_CANCELED) { + errno = EINVAL; + return NULL; + } + + os_workgroup_t new_wg = NULL; + + new_wg = (os_workgroup_t) _os_object_alloc(WORKGROUP_CLASS, + sizeof(struct os_workgroup_s)); + _os_workgroup_set_name(new_wg, name); + new_wg->wg_type = wg->wg_type; + + /* We intentionally don't copy the context */ + + if (_os_workgroup_has_backing_workinterval(wg)) { + + kern_return_t kr; + if (_os_workgroup_is_configurable(wg_state)) { + kr = work_interval_copy_port(wg->wi, &new_wg->port); + } else { + kr = mach_port_mod_refs(mach_task_self(), wg->port, MACH_PORT_RIGHT_SEND, 1); + new_wg->port = wg->port; + } + os_assumes(kr == KERN_SUCCESS); + + } + + return new_wg; +} + +int +os_workgroup_max_parallel_threads(os_workgroup_t wg, os_workgroup_mpt_attr_t __unused attr) +{ + os_assert(wg != NULL); + + qos_class_t qos = QOS_CLASS_USER_INTERACTIVE; + + switch (wg->wg_type) { + case OS_WORKGROUP_INTERVAL_TYPE_COREAUDIO: + case OS_WORKGROUP_INTERVAL_TYPE_AUDIO_CLIENT: + return pthread_time_constraint_max_parallelism(0); + default: + return pthread_qos_max_parallelism(qos, 0); + } +} + +int +os_workgroup_join(os_workgroup_t wg, os_workgroup_join_token_t token) +{ + os_workgroup_t cur_wg = _os_workgroup_get_current(); + if (cur_wg) { + // We currently don't allow joining multiple workgroups at all, period + return EALREADY; + } + + uint64_t wg_state = os_atomic_load(&wg->wg_state, relaxed); + if (wg_state & OS_WORKGROUP_CANCELED) { + return EINVAL; + } + + int rv = 0; + + if (_os_workgroup_has_backing_workinterval(wg)) { + if (_os_workgroup_is_configurable(wg_state)) { + rv = work_interval_join(wg->wi); + } else { + rv = work_interval_join_port(wg->port); + } + } + + if (rv) { + errno = rv; + return rv; + } + + os_atomic_inc(&wg->joined_cnt, relaxed); + + bzero(token, sizeof(struct os_workgroup_join_token_s)); + token->sig = _OS_WORKGROUP_JOIN_TOKEN_SIG_INIT; + + token->thread = _dispatch_thread_port(); + token->old_wg = cur_wg; /* should be null */ + token->new_wg = wg; + + _os_workgroup_set_current(wg); + return 0; +} + +void +os_workgroup_leave(os_workgroup_t wg, os_workgroup_join_token_t token) +{ + if (!_os_workgroup_join_token_initialized(token)) { + os_crash("Join token is corrupt"); + } + + if (token->thread != _dispatch_thread_port()) { + os_crash("Join token provided is for a different thread"); + } + + os_workgroup_t cur_wg = _os_workgroup_get_current(); + if ((token->new_wg != cur_wg) || (cur_wg != wg)) { + os_crash("Join token provided is for a different workgroup than the " + "last one joined by thread"); + } + os_assert(token->old_wg == NULL); + + if (_os_workgroup_has_backing_workinterval(wg)) { + dispatch_assume(work_interval_leave() == 0); + } + uint32_t old_joined_cnt = os_atomic_dec_orig(&wg->joined_cnt, relaxed); + if (old_joined_cnt == 0) { + DISPATCH_INTERNAL_CRASH(0, "Joined count underflowed"); + } + _os_workgroup_set_current(NULL); +} + +int +os_workgroup_set_working_arena(os_workgroup_t wg, void * _Nullable client_arena, + uint32_t max_workers, os_workgroup_working_arena_destructor_t destructor) +{ + size_t arena_size; + // We overflowed, we can't allocate this + if (os_mul_and_add_overflow(sizeof(mach_port_t), max_workers, sizeof(struct os_workgroup_arena_s), &arena_size)) { + errno = ENOMEM; + return errno; + } + + os_workgroup_arena_t wg_arena = calloc(arena_size, 1); + if (wg_arena == NULL) { + errno = ENOMEM; + return errno; + } + wg_arena->max_workers = max_workers; + wg_arena->client_arena = client_arena; + wg_arena->destructor = destructor; + + _os_workgroup_atomic_flags old_state, new_state; + os_workgroup_arena_t old_arena = NULL; + + bool success = os_atomic_rmw_loop(&wg->wg_atomic_flags, old_state, new_state, relaxed, { + if (_wg_joined_cnt(old_state) > 0) { // We can't change the arena while it is in use + os_atomic_rmw_loop_give_up(break); + } + old_arena = _wg_arena(old_state); + + // Remove the old arena and put the new one in + new_state = old_state; + new_state &= ~OS_WORKGROUP_ARENA_MASK; + new_state |= (uint64_t) wg_arena; + }); + + if (!success) { + errno = EBUSY; + free(wg_arena); + return errno; + } + + if (old_arena) { + old_arena->destructor(old_arena->client_arena); + free(old_arena); + } + + return 0; +} + +void * +os_workgroup_get_working_arena(os_workgroup_t wg, os_workgroup_index *_Nullable index_out) +{ + if (_os_workgroup_get_current() != wg) { + os_crash("Thread is not a member of the workgroup"); + } + + /* At this point, we know that since this thread is a member of the wg, we + * won't have the arena replaced out from under us so we can modify it + * safely */ + dispatch_assert(wg->joined_cnt > 0); + + os_workgroup_arena_t arena = os_atomic_load(&wg->wg_arena, relaxed); + if (arena == NULL) { + return NULL; + } + + /* if the max_workers was 0 and the client wants an index, then they will + * fail */ + if (index_out != NULL && arena->max_workers == 0) { + os_crash("The arena associated with workgroup is not to be partitioned"); + } + + if (index_out) { + /* Find the index of the current thread in the arena */ + uint32_t found_index = 0; + bool found = false; + for (uint32_t i = 0; i < arena->max_workers; i++) { + if (arena->arena_indices[i] == _dispatch_thread_port()) { + found_index = i; + found = true; + break; + } + } + + if (!found) { + /* Current thread doesn't already have an index, give it one */ + found_index = os_atomic_inc_orig(&arena->next_worker_index, relaxed); + + if (found_index >= arena->max_workers) { + os_crash("Exceeded the maximum number of workers who can access the arena"); + } + arena->arena_indices[found_index] = _dispatch_thread_port(); + } + + *index_out = found_index; + } + + return arena->client_arena; +} + +void +os_workgroup_cancel(os_workgroup_t wg) +{ + os_atomic_or(&wg->wg_state, OS_WORKGROUP_CANCELED, relaxed); +} + +bool +os_workgroup_testcancel(os_workgroup_t wg) +{ + return os_atomic_load(&wg->wg_state, relaxed) & OS_WORKGROUP_CANCELED; +} + +int +os_workgroup_interval_start(os_workgroup_interval_t wgi, uint64_t start, + uint64_t deadline, os_workgroup_interval_data_t __unused data) +{ + os_workgroup_t cur_wg = _os_workgroup_get_current(); + if (cur_wg != wgi->_as_wg) { + os_crash("Thread is not a member of the workgroup"); + } + + if (deadline < start || (!_start_time_is_in_past(wgi->clock, start))) { + return EINVAL; + } + + bool success = os_unfair_lock_trylock(&wgi->wii_lock); + if (!success) { + // Someone else is concurrently in a start, update or finish method. We + // can't make progress here + return EBUSY; + } + + int rv = 0; + uint64_t old_state, new_state; + os_atomic_rmw_loop(&wgi->wg_state, old_state, new_state, relaxed, { + if (old_state & (OS_WORKGROUP_CANCELED | OS_WORKGROUP_INTERVAL_STARTED)) { + rv = EINVAL; + os_atomic_rmw_loop_give_up(break); + } + if (!_os_workgroup_is_configurable(old_state)) { + rv = EPERM; + os_atomic_rmw_loop_give_up(break); + } + new_state = old_state | OS_WORKGROUP_INTERVAL_STARTED; + }); + + if (rv) { + os_unfair_lock_unlock(&wgi->wii_lock); + return rv; + } + + work_interval_instance_t wii = wgi->wii; + work_interval_instance_clear(wii); + + work_interval_instance_set_start(wii, start); + work_interval_instance_set_deadline(wii, deadline); + rv = work_interval_instance_start(wii); + if (rv != 0) { + /* If we failed to start the interval in the kernel, clear the started + * field */ + rv = errno; + os_atomic_and(&wgi->wg_state, ~OS_WORKGROUP_INTERVAL_STARTED, relaxed); + } + + os_unfair_lock_unlock(&wgi->wii_lock); + + return rv; +} + +int +os_workgroup_interval_update(os_workgroup_interval_t wgi, uint64_t deadline, + os_workgroup_interval_data_t __unused data) +{ + os_workgroup_t cur_wg = _os_workgroup_get_current(); + if (cur_wg != wgi->_as_wg) { + os_crash("Thread is not a member of the workgroup"); + } + + bool success = os_unfair_lock_trylock(&wgi->wii_lock); + if (!success) { + // Someone else is concurrently in a start, update or finish method. We + // can't make progress here + return EBUSY; + } + + uint64_t wg_state = os_atomic_load(&wgi->wg_state, relaxed); + if (!_os_workgroup_is_configurable(wg_state)) { + os_unfair_lock_unlock(&wgi->wii_lock); + return EPERM; + } + + /* Note: We allow updating and finishing an workgroup_interval that has + * already started even if the workgroup has been cancelled - since + * cancellation happens asynchronously and doesn't care about ongoing + * intervals. However a subsequent new interval cannot be started */ + if (!(wg_state & OS_WORKGROUP_INTERVAL_STARTED)) { + os_unfair_lock_unlock(&wgi->wii_lock); + return EINVAL; + } + + work_interval_instance_t wii = wgi->wii; + work_interval_instance_set_deadline(wii, deadline); + int rv = work_interval_instance_update(wii); + if (rv != 0) { + rv = errno; + } + + os_unfair_lock_unlock(&wgi->wii_lock); + return rv; +} + +int +os_workgroup_interval_finish(os_workgroup_interval_t wgi, + os_workgroup_interval_data_t __unused data) +{ + os_workgroup_t cur_wg = _os_workgroup_get_current(); + if (cur_wg != wgi->_as_wg) { + os_crash("Thread is not a member of the workgroup"); + } + + bool success = os_unfair_lock_trylock(&wgi->wii_lock); + if (!success) { + // Someone else is concurrently in a start, update or finish method. We + // can't make progress here + return EBUSY; + } + + uint64_t wg_state = os_atomic_load(&wgi->wg_state, relaxed); + if (!_os_workgroup_is_configurable(wg_state)) { + os_unfair_lock_unlock(&wgi->wii_lock); + return EPERM; + } + if (!(wg_state & OS_WORKGROUP_INTERVAL_STARTED)) { + os_unfair_lock_unlock(&wgi->wii_lock); + return EINVAL; + } + + work_interval_instance_t wii = wgi->wii; + uint64_t current_finish = 0; + switch (wgi->clock) { + case OS_CLOCK_MACH_ABSOLUTE_TIME: + current_finish = mach_absolute_time(); + break; + } + + work_interval_instance_set_finish(wii, current_finish); + int rv = work_interval_instance_finish(wii); + if (rv != 0) { + rv = errno; + } else { + /* If we succeeded in finishing, clear the started bit */ + os_atomic_and(&wgi->wg_state, ~OS_WORKGROUP_INTERVAL_STARTED, relaxed); + } + + os_unfair_lock_unlock(&wgi->wii_lock); + return rv; +} diff --git a/src/workgroup_internal.h b/src/workgroup_internal.h new file mode 100644 index 000000000..59090600a --- /dev/null +++ b/src/workgroup_internal.h @@ -0,0 +1,200 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +/* + * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch + * which are subject to change in future releases of Mac OS X. Any applications + * relying on these interfaces WILL break. + */ + +#ifndef __OS_WORKGROUP_INTERNAL__ +#define __OS_WORKGROUP_INTERNAL__ + +#include +#include +#include +#include + +void _os_workgroup_xref_dispose(os_workgroup_t wg); +void _os_workgroup_dispose(os_workgroup_t wg); +void _os_workgroup_interval_xref_dispose(os_workgroup_interval_t wgi); +void _os_workgroup_interval_dispose(os_workgroup_interval_t wgi); +void _os_workgroup_debug(os_workgroup_t wg, char *buf, size_t size); + +extern pthread_key_t _os_workgroup_key; +void _os_workgroup_tsd_cleanup(void *ctxt); + +/* + * os_workgroup_type_t is an internal representation that is a superset of types + * for various types of workgroups. Currently it only includes + * os_workgroup_interval_type_t and the types specified below + * + * Making the workgroup type uint16_t means that we have a total of 64k types + * which is plenty + */ +typedef uint16_t os_workgroup_type_t; +#define OS_WORKGROUP_TYPE_DEFAULT 0x0 +#define OS_WORKGROUP_TYPE_PARALLEL 0x40 + +/* To be set when the caller provided workgroup attribute has been expanded + * and resolved. */ +#define _OS_WORKGROUP_ATTR_RESOLVED_INIT 0x782618DA +struct os_workgroup_attr_s { + uint32_t sig; + uint32_t wg_attr_flags; + os_workgroup_type_t wg_type; + uint16_t empty; + uint32_t reserved[13]; +}; + +#define _OS_WORKGROUP_JOIN_TOKEN_SIG_INIT 0x4D5F5A58 +struct os_workgroup_join_token_s { + uint32_t sig; + mach_port_t thread; + os_workgroup_t old_wg; + os_workgroup_t new_wg; + uint64_t reserved[2]; +}; + +struct os_workgroup_interval_data_s { + uint32_t sig; + uint32_t reserved[14]; +}; + +/* This is lazily allocated if the arena is used by clients */ +typedef struct os_workgroup_arena_s { + void *client_arena; + os_workgroup_working_arena_destructor_t destructor; + uint32_t max_workers; /* Client specified max size */ + uint32_t next_worker_index; + mach_port_t arena_indices[0]; /* Dyanmic depending on max_workers */ +} *os_workgroup_arena_t; + +#define OS_WORKGROUP_OWNER (1 << 0) +#define OS_WORKGROUP_CANCELED (1 << 1) +#define OS_WORKGROUP_LABEL_NEEDS_FREE (1 << 2) +#define OS_WORKGROUP_INTERVAL_STARTED (1 << 3) + + +/* Note that os_workgroup_type_t doesn't have to be in the wg_atomic_flags, we + * just put it there to pack the struct. + * + * We have to put the arena related state in an atomic because the + * joined_cnt is modified in a real time context as part of os_workgroup_join + * and os_workgroup_leave(). We cannot have a lock and so it needs to all be + * part of a single _os_workgroup_atomic_flags sized atomic state */ + +#if !defined(__LP64__) || (__LP64_ && !defined(__arm64__)) +// For 32 bit watches (armv7), we can only do DCAS up to 64 bits so the union +// type is for uint64_t. +// +// 16 bits for tracking the type +// 16 bits for max number of threads which have joined a workgroup (64k is plenty) +// 32 bits for arena pointer +// ----- +// 64 bits +typedef uint64_t _os_workgroup_atomic_flags; + +typedef uint16_t os_joined_cnt_t; +#define OS_WORKGROUP_JOINED_COUNT_SHIFT 48 +#define OS_WORKGROUP_JOINED_COUNT_MASK (((uint64_t) 0xffff) << OS_WORKGROUP_JOINED_COUNT_SHIFT) +#define OS_WORKGROUP_ARENA_MASK 0xffffffffull + +#define OS_WORKGROUP_HEADER_INTERNAL \ + DISPATCH_UNION_LE(_os_workgroup_atomic_flags volatile wg_atomic_flags, \ + os_workgroup_arena_t wg_arena, \ + os_workgroup_type_t wg_type, \ + os_joined_cnt_t joined_cnt \ + ) +#else +// For all 64 bit systems (including arm64_32), we can do DCAS (or quad width +// CAS for arm64_32) so 128 bit union type works +// +// 16 bits for tracking the type +// 16 bits for empty +// 32 bits for max number of threads which have joined a workgroup +// 64 bits for arena pointer +// ----- +// 128 bits +typedef __uint128_t _os_workgroup_atomic_flags; + +typedef uint32_t os_joined_cnt_t; +#define OS_WORKGROUP_JOINED_COUNT_SHIFT 96 +#define OS_WORKGROUP_JOINED_COUNT_MASK (((__uint128_t) 0xffffffff) << OS_WORKGROUP_JOINED_COUNT_SHIFT) +#define OS_WORKGROUP_ARENA_MASK 0xffffffffffffffffull + +#define OS_WORKGROUP_HEADER_INTERNAL \ + DISPATCH_UNION_LE(_os_workgroup_atomic_flags volatile wg_atomic_flags, \ + os_workgroup_arena_t wg_arena, \ + os_workgroup_type_t wg_type, \ + const uint16_t empty, \ + os_joined_cnt_t joined_cnt \ + ) +#endif + +static inline os_joined_cnt_t +_wg_joined_cnt(_os_workgroup_atomic_flags wgaf) +{ + return (os_joined_cnt_t) (((wgaf & OS_WORKGROUP_JOINED_COUNT_MASK)) >> OS_WORKGROUP_JOINED_COUNT_SHIFT); +} + +static inline os_workgroup_arena_t +_wg_arena(_os_workgroup_atomic_flags wgaf) +{ + return (os_workgroup_arena_t) (wgaf & OS_WORKGROUP_ARENA_MASK); +} + +#define OS_WORKGROUP_HEADER \ + struct _os_object_s _as_os_obj[0]; \ + OS_OBJECT_STRUCT_HEADER(workgroup); \ + const char *name; \ + uint64_t volatile wg_state; \ + union { \ + work_interval_t wi; \ + mach_port_t port; \ + }; \ + OS_WORKGROUP_HEADER_INTERNAL; + +struct os_workgroup_s { + OS_WORKGROUP_HEADER +}; + +struct os_workgroup_interval_s { + struct os_workgroup_s _as_wg[0]; + OS_WORKGROUP_HEADER + os_clockid_t clock; + /* Needed to serialize updates to wii when there are multiple racey calls to + * os_workgroup_interval_update */ + os_unfair_lock wii_lock; + work_interval_instance_t wii; +}; + +struct os_workgroup_parallel_s { + OS_WORKGROUP_HEADER +}; + +_Static_assert(sizeof(struct os_workgroup_attr_s) == sizeof(struct os_workgroup_attr_opaque_s), + "Incorrect size of workgroup attribute structure"); +_Static_assert(sizeof(struct os_workgroup_join_token_s) == sizeof(struct os_workgroup_join_token_opaque_s), + "Incorrect size of workgroup join token structure"); +_Static_assert(sizeof(struct os_workgroup_interval_data_s) == sizeof(struct os_workgroup_interval_data_opaque_s), + "Incorrect size of workgroup interval data structure"); + +#endif /* __OS_WORKGROUP_INTERNAL__ */ diff --git a/xcodeconfig/libdispatch-dyld-stub.xcconfig b/xcodeconfig/libdispatch-dyld-stub.xcconfig deleted file mode 100644 index 763bafe1e..000000000 --- a/xcodeconfig/libdispatch-dyld-stub.xcconfig +++ /dev/null @@ -1,28 +0,0 @@ -// -// Copyright (c) 2016 Apple Inc. All rights reserved. -// -// @APPLE_APACHE_LICENSE_HEADER_START@ -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// @APPLE_APACHE_LICENSE_HEADER_END@ -// - -PRODUCT_NAME = libdispatch_dyld_stub -INSTALL_PATH = /usr/local/lib/dyld_stub -BUILD_VARIANTS = normal -GCC_PREPROCESSOR_DEFINITIONS = $(GCC_PREPROCESSOR_DEFINITIONS) DISPATCH_VARIANT_DYLD_STUB=1 $(STATICLIB_PREPROCESSOR_DEFINITIONS) -OTHER_LDFLAGS = -VERSIONING_SYSTEM = -EXCLUDED_SOURCE_FILE_NAMES = * -INCLUDED_SOURCE_FILE_NAMES = voucher.c // minimal with DISPATCH_VARIANT_DYLD_STUB diff --git a/xcodeconfig/libdispatch.dirty b/xcodeconfig/libdispatch.dirty index b10789292..20dc7d438 100644 --- a/xcodeconfig/libdispatch.dirty +++ b/xcodeconfig/libdispatch.dirty @@ -63,6 +63,10 @@ __OS_dispatch_disk_vtable # os_object_t classes _OBJC_CLASS_$_OS_object _OBJC_CLASS_$_OS_voucher +_OBJC_CLASS_$_OS_os_eventlink +_OBJC_CLASS_$_OS_os_workgroup +_OBJC_CLASS_$_OS_os_workgroup_interval +_OBJC_CLASS_$_OS_os_workgroup_parallel #_OBJC_CLASS_$_OS_voucher_recipe # non-os_object_t classes _OBJC_CLASS_$_OS_dispatch_data @@ -90,6 +94,10 @@ _OBJC_METACLASS_$_OS_dispatch_operation _OBJC_METACLASS_$_OS_dispatch_disk _OBJC_METACLASS_$_OS_object _OBJC_METACLASS_$_OS_voucher +_OBJC_METACLASS_$_OS_os_eventlink +_OBJC_METACLASS_$_OS_os_workgroup +_OBJC_METACLASS_$_OS_os_workgroup_interval +_OBJC_METACLASS_$_OS_os_workgroup_parallel #_OBJC_METACLASS_$_OS_voucher_recipe _OBJC_METACLASS_$_OS_dispatch_data _OBJC_METACLASS_$_OS_dispatch_data_empty @@ -121,9 +129,11 @@ __dispatch_logv_pred __dispatch_mach_calendar_pred __dispatch_mach_host_port_pred __dispatch_mach_notify_port_pred +__dispatch_mach_notify_unote __dispatch_mach_xpc_hooks __dispatch_main_heap __dispatch_main_q_handle_pred +__dispatch_memorypressure_source __dispatch_mgr_sched_pred __dispatch_queue_serial_numbers __dispatch_root_queues_pred @@ -140,6 +150,7 @@ __dispatch_mach_notify_port __voucher_default_task_mach_voucher __voucher_hash_lock __voucher_task_mach_voucher +__os_workgroup_key # byte-sized __dispatch_is_daemon @@ -154,3 +165,5 @@ __dispatch_io_fds __dispatch_io_devs_lockq __dispatch_io_fds_lockq __dispatch_io_init_pred + +__voucher_activity_disabled.disabled diff --git a/xcodeconfig/libdispatch.order b/xcodeconfig/libdispatch.order index 8ea917e20..c603f0d3c 100644 --- a/xcodeconfig/libdispatch.order +++ b/xcodeconfig/libdispatch.order @@ -63,6 +63,10 @@ __OS_dispatch_disk_vtable # os_object_t classes _OBJC_CLASS_$_OS_object _OBJC_CLASS_$_OS_voucher +_OBJC_CLASS_$_OS_os_eventlink +_OBJC_CLASS_$_OS_os_workgroup +_OBJC_CLASS_$_OS_os_workgroup_interval +_OBJC_CLASS_$_OS_os_workgroup_parallel #_OBJC_CLASS_$_OS_voucher_recipe # non-os_object_t classes _OBJC_CLASS_$_OS_dispatch_data @@ -90,6 +94,10 @@ _OBJC_METACLASS_$_OS_dispatch_operation _OBJC_METACLASS_$_OS_dispatch_disk _OBJC_METACLASS_$_OS_object _OBJC_METACLASS_$_OS_voucher +_OBJC_METACLASS_$_OS_os_eventlink +_OBJC_METACLASS_$_OS_os_workgroup +_OBJC_METACLASS_$_OS_os_workgroup_interval +_OBJC_METACLASS_$_OS_os_workgroup_parallel #_OBJC_METACLASS_$_OS_voucher_recipe _OBJC_METACLASS_$_OS_dispatch_data _OBJC_METACLASS_$_OS_dispatch_data_empty diff --git a/xcodeconfig/libdispatch.xcconfig b/xcodeconfig/libdispatch.xcconfig index d0b3c9f4d..48f35f27a 100644 --- a/xcodeconfig/libdispatch.xcconfig +++ b/xcodeconfig/libdispatch.xcconfig @@ -18,166 +18,6 @@ // @APPLE_APACHE_LICENSE_HEADER_END@ // -#include "/Makefiles/CoreOS/Xcode/BSD.xcconfig" -#include "/AppleInternal/XcodeConfig/PlatformSupport.xcconfig" - -SDKROOT = macosx.internal -SUPPORTED_PLATFORMS = macosx iphoneos iphonesimulator appletvos appletvsimulator watchos watchsimulator -PRODUCT_NAME = libdispatch -EXECUTABLE_PREFIX = - -SDK_INSTALL_VARIANT = $(SDK_INSTALL_VARIANT_$(DRIVERKIT)) -SDK_INSTALL_VARIANT_1 = driverkit -SDK_INSTALL_VARIANT_ = default -SDK_INSTALL_ROOT = $(SDK_INSTALL_ROOT_$(SDK_INSTALL_VARIANT)) -SDK_INSTALL_ROOT_driverkit = $(DRIVERKITROOT) -SDK_INSTALL_HEADERS_ROOT = $(SDK_INSTALL_HEADERS_ROOT_$(SDK_INSTALL_VARIANT)) -SDK_INSTALL_HEADERS_ROOT_driverkit = $(SDK_INSTALL_ROOT)/$(SDK_RUNTIME_HEADERS_PREFIX) -SDK_RUNTIME_HEADERS_PREFIX = Runtime - -INSTALL_PATH = $(SDK_INSTALL_ROOT)/usr/lib/system -PUBLIC_HEADERS_FOLDER_PATH = $(SDK_INSTALL_HEADERS_ROOT)/usr/include/dispatch -PRIVATE_HEADERS_FOLDER_PATH = $(SDK_INSTALL_HEADERS_ROOT)/usr/local/include/dispatch -OS_PUBLIC_HEADERS_FOLDER_PATH = $(SDK_INSTALL_HEADERS_ROOT)/usr/include/os -OS_PRIVATE_HEADERS_FOLDER_PATH = $(SDK_INSTALL_HEADERS_ROOT)/usr/local/include/os -HEADER_SEARCH_PATHS = $(PROJECT_DIR) $(PROJECT_DIR)/private $(PROJECT_DIR)/src -LIBRARY_SEARCH_PATHS = $(SDKROOT)/$(SDK_INSTALL_ROOT)/usr/lib/system $(SDKROOT)/$(SDK_INSTALL_ROOT)/usr/local/lib -SYSTEM_HEADER_SEARCH_PATHS = $(SDKROOT)/$(SDK_INSTALL_HEADERS_ROOT)/System/Library/Frameworks/System.framework/PrivateHeaders $(SDKROOT)/$(SDK_INSTALL_HEADERS_ROOT)/usr/local/include $(SDKROOT)/$(SDK_INSTALL_HEADERS_ROOT)/usr/include -SYSTEM_FRAMEWORK_SEARCH_PATHS = $(SDKROOT)/$(SDK_INSTALL_HEADERS_ROOT)/System/Library/Frameworks - -INSTALLHDRS_SCRIPT_PHASE = YES -ALWAYS_SEARCH_USER_PATHS = NO -USE_HEADERMAP = NO -BUILD_VARIANTS = normal debug profile - -ONLY_ACTIVE_ARCH = NO -CLANG_LINK_OBJC_RUNTIME = NO -GCC_C_LANGUAGE_STANDARD = gnu11 -CLANG_CXX_LANGUAGE_STANDARD = gnu++11 -ENABLE_STRICT_OBJC_MSGSEND = YES -GCC_ENABLE_CPP_EXCEPTIONS = NO -GCC_STRICT_ALIASING = YES -GCC_SYMBOLS_PRIVATE_EXTERN = YES -GCC_ENABLE_PASCAL_STRINGS = NO -GCC_WARN_SHADOW = YES -GCC_WARN_64_TO_32_BIT_CONVERSION = YES -GCC_WARN_ABOUT_RETURN_TYPE = YES -GCC_WARN_ABOUT_MISSING_PROTOTYPES = YES -GCC_WARN_ABOUT_MISSING_NEWLINE = YES -GCC_WARN_ABOUT_MISSING_FIELD_INITIALIZERS = YES -GCC_WARN_INITIALIZER_NOT_FULLY_BRACKETED = YES -GCC_WARN_SIGN_COMPARE = YES -GCC_WARN_STRICT_SELECTOR_MATCH = YES -GCC_WARN_UNDECLARED_SELECTOR = YES -GCC_WARN_UNINITIALIZED_AUTOS = YES -GCC_WARN_UNKNOWN_PRAGMAS = YES -GCC_WARN_UNUSED_FUNCTION = YES -GCC_WARN_UNUSED_LABEL = YES -GCC_WARN_UNUSED_PARAMETER = YES -GCC_WARN_UNUSED_VARIABLE = YES -CLANG_WARN_ASSIGN_ENUM = YES -CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES -CLANG_WARN_DOCUMENTATION_COMMENTS = YES -CLANG_WARN__DUPLICATE_METHOD_MATCH = YES -CLANG_WARN_EMPTY_BODY = YES -CLANG_WARN_IMPLICIT_SIGN_CONVERSION = YES -CLANG_WARN_INFINITE_RECURSION = YES -CLANG_WARN_OBJC_IMPLICIT_ATOMIC_PROPERTIES = YES -CLANG_WARN_OBJC_MISSING_PROPERTY_SYNTHESIS = YES -CLANG_WARN_SUSPICIOUS_IMPLICIT_CONVERSION = YES -CLANG_WARN_SUSPICIOUS_MOVE = YES -CLANG_WARN_UNREACHABLE_CODE = YES -CLANG_WARN_UNGUARDED_AVAILABILITY = YES -GCC_TREAT_WARNINGS_AS_ERRORS = YES -GCC_OPTIMIZATION_LEVEL = s -GCC_NO_COMMON_BLOCKS = YES -GCC_PREPROCESSOR_DEFINITIONS = __DARWIN_NON_CANCELABLE=1 -GCC_PREPROCESSOR_DEFINITIONS[sdk=driverkit*] = $(GCC_PREPROCESSOR_DEFINITIONS) USE_OBJC=0 -STATICLIB_PREPROCESSOR_DEFINITIONS = DISPATCH_VARIANT_STATIC=1 USE_OBJC=0 DISPATCH_USE_DTRACE=0 - -WARNING_CFLAGS = - -// warnings we want -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wall -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wextra -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wmost -WARNING_CFLAGS = $(WARNING_CFLAGS) -Warray-bounds-pointer-arithmetic -WARNING_CFLAGS = $(WARNING_CFLAGS) -Watomic-properties -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wcomma -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wconditional-uninitialized -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wcovered-switch-default -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wdate-time -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wdeprecated -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wdouble-promotion -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wduplicate-enum -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wexpansion-to-defined -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wfloat-equal -WARNING_CFLAGS = $(WARNING_CFLAGS) -Widiomatic-parentheses -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wignored-qualifiers -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wnullable-to-nonnull-conversion -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wobjc-interface-ivars -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wover-aligned -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wpacked -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wpointer-arith -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wselector -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wstatic-in-inline -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wsuper-class-method-mismatch -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wswitch -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wtautological-compare -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wunused - -// silenced warnings -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-unknown-warning-option -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-pedantic -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-bad-function-cast -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-c++-compat -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-c++98-compat -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-c++98-compat-pedantic -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-cast-align -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-cast-qual -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-disabled-macro-expansion -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-documentation-unknown-command -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-format-nonliteral -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-missing-variable-declarations -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-old-style-cast -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-padded -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-reserved-id-macro -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-shift-sign-overflow -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-undef -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-unreachable-code-aggressive -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-unused-macros -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-used-but-marked-unused -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-vla -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-unguarded-availability-new -WARNING_CFLAGS = $(WARNING_CFLAGS) -Wno-switch-enum // -Wswitch is enough, this forces explicit listing of all cases mandatory - -OTHER_CFLAGS = -fverbose-asm $(PLATFORM_CFLAGS) -OTHER_CFLAGS[arch=i386][sdk=macosx*] = $(OTHER_CFLAGS) -fno-unwind-tables -fno-asynchronous-unwind-tables -fno-exceptions -OTHER_CFLAGS_normal = -momit-leaf-frame-pointer -OTHER_CFLAGS_profile = $(OTHER_CFLAGS_normal) -DDISPATCH_PROFILE=1 -DDISPATCH_PERF_MON=1 -OTHER_CFLAGS_debug = -fstack-protector -fno-inline -O0 -DDISPATCH_DEBUG=1 -DOS_DEBUG=1 -GENERATE_PROFILING_CODE = NO -DYLIB_CURRENT_VERSION = $(CURRENT_PROJECT_VERSION) -SIM_SUFFIX[sdk=*simulator*] = _sim -DYLIB_LDFLAGS = -umbrella System -nodefaultlibs -ldyld -lcompiler_rt -lsystem$(SIM_SUFFIX)_kernel -lsystem$(SIM_SUFFIX)_platform -lsystem$(SIM_SUFFIX)_pthread -lsystem_malloc -lsystem_c -lsystem_blocks -UNWIND_LDFLAGS = -lunwind -UNWIND_LDFLAGS[sdk=driverkit*] = -OBJC_LDFLAGS = -Wl,-upward-lobjc -OBJC_LDFLAGS[sdk=driverkit*] = -LIBDARWIN_LDFLAGS = -Wl,-upward-lsystem_darwin -LIBDARWIN_LDFLAGS[sdk=*simulator*] = -LIBDARWIN_LDFLAGS[sdk=driverkit*] = -ORDER_LDFLAGS = -Wl,-order_file,$(SRCROOT)/xcodeconfig/libdispatch.order -Wl,-dirty_data_list,$(SRCROOT)/xcodeconfig/libdispatch.dirty -ORDER_LDFLAGS[sdk=macosx*] = -Wl,-order_file,$(SRCROOT)/xcodeconfig/libdispatch.order -ORDER_LDFLAGS[sdk=driverkit*] = -Wl,-order_file,$(SRCROOT)/xcodeconfig/libdispatch.order -ALIASES_LDFLAGS = -Wl,-alias_list,$(SRCROOT)/xcodeconfig/libdispatch.aliases -OTHER_LDFLAGS = $(OTHER_LDFLAGS) $(DYLIB_LDFLAGS) $(LIBDARWIN_LDFLAGS) $(CR_LDFLAGS) $(UNWIND_LDFLAGS) $(OBJC_LDFLAGS) $(ALIASES_LDFLAGS) $(PLATFORM_LDFLAGS) $(ORDER_LDFLAGS) -OTHER_MIGFLAGS = -novouchers -I$(SDKROOT)/$(SDK_INSTALL_HEADERS_ROOT)/System/Library/Frameworks/System.framework/PrivateHeaders -I${SDKROOT}/${SDK_INSTALL_HEADERS_ROOT}/usr/include -I${SDKROOT}/${SDK_INSTALL_HEADERS_ROOT}/usr/local/include - -OBJC_SOURCE_FILE_NAMES = *.m -EXCLUDED_SOURCE_FILE_NAMES = $(EXCLUDED_SOURCE_FILE_NAMES_$(SDK_INSTALL_VARIANT)) -EXCLUDED_SOURCE_FILE_NAMES_driverkit = $(EXCLUDED_SOURCE_FILE_NAMES_default) $(OBJC_SOURCE_FILE_NAMES) - COPY_HEADERS_RUN_UNIFDEF = YES COPY_HEADERS_UNIFDEF_FLAGS = -U__DISPATCH_BUILDING_DISPATCH__ -U__linux__ -DTARGET_OS_WIN32=0 -U__ANDROID__ diff --git a/xcodeconfig/libfirehose_kernel.xcconfig b/xcodeconfig/libfirehose_kernel.xcconfig index e6d83a3aa..b21315812 100644 --- a/xcodeconfig/libfirehose_kernel.xcconfig +++ b/xcodeconfig/libfirehose_kernel.xcconfig @@ -21,7 +21,7 @@ SUPPORTED_PLATFORMS = macosx iphoneos appletvos watchos PRODUCT_NAME = $(TARGET_NAME) INSTALL_PATH = /usr/local/lib/kernel/ -GCC_PREPROCESSOR_DEFINITIONS = $(GCC_PREPROCESSOR_DEFINITIONS) KERNEL=1 DISPATCH_USE_DTRACE=0 +GCC_PREPROCESSOR_DEFINITIONS = $(GCC_PREPROCESSOR_DEFINITIONS) KERNEL=1 DISPATCH_USE_DTRACE=0 OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY=1 OS_ATOMIC_CONFIG_MEMORY_ORDER_DEPENDENCY=1 OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY=0 OTHER_MIGFLAGS = -novouchers OTHER_LDFLAGS = OTHER_CFLAGS = -mkernel -nostdinc -Wno-packed diff --git a/xcodescripts/install-headers.sh b/xcodescripts/install-headers.sh index 1fb149b63..212bf74ab 100755 --- a/xcodescripts/install-headers.sh +++ b/xcodescripts/install-headers.sh @@ -25,8 +25,21 @@ fi mkdir -p "${DSTROOT}${OS_PUBLIC_HEADERS_FOLDER_PATH}" || true mkdir -p "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" || true + cp -X "${SCRIPT_INPUT_FILE_1}" "${DSTROOT}${OS_PUBLIC_HEADERS_FOLDER_PATH}" -cp -X "${SCRIPT_INPUT_FILE_2}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" -cp -X "${SCRIPT_INPUT_FILE_3}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" -cp -X "${SCRIPT_INPUT_FILE_4}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" -cp -X "${SCRIPT_INPUT_FILE_5}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_2}" "${DSTROOT}${OS_PUBLIC_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_3}" "${DSTROOT}${OS_PUBLIC_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_4}" "${DSTROOT}${OS_PUBLIC_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_5}" "${DSTROOT}${OS_PUBLIC_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_6}" "${DSTROOT}${OS_PUBLIC_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_7}" "${DSTROOT}${OS_PUBLIC_HEADERS_FOLDER_PATH}" + + +cp -X "${SCRIPT_INPUT_FILE_8}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_9}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_10}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_11}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_12}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_13}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_14}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" +cp -X "${SCRIPT_INPUT_FILE_15}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" diff --git a/xcodescripts/postprocess-headers.sh b/xcodescripts/postprocess-headers.sh index 41f466939..c521fbe96 100755 --- a/xcodescripts/postprocess-headers.sh +++ b/xcodescripts/postprocess-headers.sh @@ -19,3 +19,8 @@ # @APPLE_APACHE_LICENSE_HEADER_END@ # + +unifdef ${COPY_HEADERS_UNIFDEF_FLAGS} -o "${SCRIPT_INPUT_FILE_1}" "${SCRIPT_INPUT_FILE_1}" || true +unifdef ${COPY_HEADERS_UNIFDEF_FLAGS} -o "${SCRIPT_INPUT_FILE_2}" "${SCRIPT_INPUT_FILE_2}" || true +unifdef ${COPY_HEADERS_UNIFDEF_FLAGS} -o "${SCRIPT_INPUT_FILE_3}" "${SCRIPT_INPUT_FILE_3}" || true +unifdef ${COPY_HEADERS_UNIFDEF_FLAGS} -o "${SCRIPT_INPUT_FILE_4}" "${SCRIPT_INPUT_FILE_4}" || true From 55c3a68e9ec47f1c1d5bb9909404ce5f0351edef Mon Sep 17 00:00:00 2001 From: Apple OSS Distributions <91980991+AppleOSSDistributions@users.noreply.github.com> Date: Fri, 4 Feb 2022 21:58:53 +0000 Subject: [PATCH 13/18] libdispatch-1324.41.2 Imported from libdispatch-1324.41.2.tar.gz --- CMakeLists.txt | 208 ++--- PATCHES | 22 + cmake/modules/CMakeLists.txt | 7 + cmake/modules/DispatchCompilerWarnings.cmake | 139 ++-- cmake/modules/DispatchUtilities.cmake | 19 - cmake/modules/SwiftSupport.cmake | 217 ------ cmake/modules/dispatchConfig.cmake.in | 7 + config/config.h | 4 + dispatch/CMakeLists.txt | 12 +- dispatch/base.h | 27 + dispatch/generic_static/module.modulemap | 19 + dispatch/queue.h | 4 +- libdispatch.xcodeproj/project.pbxproj | 222 ++++-- man/dispatch_group_create.3 | 2 +- os/firehose_buffer_private.h | 6 +- os/voucher_private.h | 71 +- os/workgroup_interval_private.h | 88 ++- os/workgroup_object_private.h | 172 ++++- os/workgroup_private.h | 8 +- private/apply_private.h | 338 +++++++++ private/private.h | 1 + private/queue_private.h | 89 ++- private/source_private.h | 4 + private/time_private.h | 4 + src/BlocksRuntime/CMakeLists.txt | 38 + src/CMakeLists.txt | 409 ++++------ src/apply.c | 467 ++++++++++-- src/event/event_epoll.c | 1 + src/event/event_internal.h | 28 + src/event/event_kevent.c | 39 +- src/event/event_windows.c | 108 ++- src/event/workqueue.c | 2 +- src/eventlink.c | 25 +- src/firehose/firehose_server.c | 3 +- src/init.c | 177 +++-- src/inline_internal.h | 92 ++- src/internal.h | 21 + src/introspection.c | 4 +- src/io.c | 6 +- src/mach.c | 25 +- src/object.c | 8 +- src/object.m | 13 + src/object_internal.h | 45 +- src/queue.c | 389 ++++++++-- src/queue_internal.h | 76 +- src/semaphore.c | 8 +- src/shims.h | 96 +++ src/shims/hw_config.h | 24 +- src/shims/lock.c | 8 +- src/shims/lock.h | 2 +- src/shims/priority.h | 15 +- src/shims/tsd.h | 23 +- src/shims/yield.c | 11 +- src/shims/yield.h | 26 +- src/source.c | 57 +- src/swift/CMakeLists.txt | 65 ++ src/swift/IO.swift | 2 +- src/swift/Private.swift | 2 +- src/swift/Wrapper.swift | 2 +- src/time.c | 12 +- src/trace.h | 11 + src/transform.c | 7 +- src/voucher.c | 65 ++ src/voucher_internal.h | 5 + src/workgroup.c | 760 +++++++++++++++---- src/workgroup_internal.h | 23 + xcodeconfig/libdispatch.clean | 10 +- xcodeconfig/libdispatch.dirty | 7 +- xcodeconfig/libdispatch.order | 3 + 69 files changed, 3660 insertions(+), 1250 deletions(-) create mode 100644 cmake/modules/CMakeLists.txt delete mode 100644 cmake/modules/DispatchUtilities.cmake create mode 100644 cmake/modules/dispatchConfig.cmake.in create mode 100644 dispatch/generic_static/module.modulemap create mode 100644 private/apply_private.h create mode 100644 src/BlocksRuntime/CMakeLists.txt create mode 100644 src/swift/CMakeLists.txt diff --git a/CMakeLists.txt b/CMakeLists.txt index 9f3f221e6..36da01122 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,110 +1,80 @@ -cmake_minimum_required(VERSION 3.4.3) +cmake_minimum_required(VERSION 3.15.1) -list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules") +list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules) + +# NOTE(compnerd) enable CMP0091 - select MSVC runtime based on +# CMAKE_MSVC_RUNTIME_LIBRARY. Requires CMake 3.15 or newer. +if(POLICY CMP0091) + cmake_policy(SET CMP0091 NEW) +endif() project(dispatch - VERSION 1.3 - LANGUAGES C CXX) -enable_testing() + VERSION 1.3 + LANGUAGES C CXX) if("${CMAKE_C_SIMULATE_ID}" STREQUAL "MSVC") include(ClangClCompileRules) endif() +if(CMAKE_SYSTEM_NAME STREQUAL Windows) + include(DispatchWindowsSupport) + dispatch_windows_arch_spelling(${CMAKE_SYSTEM_PROCESSOR} DISPATCH_MSVC_ARCH) + dispatch_windows_include_for_arch(${DISPATCH_MSVC_ARCH} DISPATCH_INCLUDES) + include_directories(BEFORE SYSTEM ${DISPATCH_INCLUDES}) + dispatch_windows_lib_for_arch(${CMAKE_SYSTEM_PROCESSOR} DISPATCH_LIBDIR) + link_directories(${DISPATCH_LIBDIR}) +endif() + set(CMAKE_C_STANDARD 11) set(CMAKE_C_STANDARD_REQUIRED YES) set(CMAKE_CXX_STANDARD 11) set(CMAKE_C_VISIBILITY_PRESET hidden) +set(CMAKE_C_VISIBILITY_INLINES_HIDDEN YES) + +# NOTE(compnerd) this is a horrible workaround for Windows to ensure that the +# tests can run as there is no rpath equivalent and `PATH` is used to lookup the +# libraries. +set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}) +set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}) set(CMAKE_THREAD_PREFER_PTHREAD TRUE) set(THREADS_PREFER_PTHREAD_FLAG TRUE) find_package(Threads REQUIRED) +include(CheckCCompilerFlag) include(CheckCSourceCompiles) include(CheckFunctionExists) include(CheckIncludeFiles) include(CheckLibraryExists) include(CheckSymbolExists) include(GNUInstallDirs) -include(SwiftSupport) -include(DispatchUtilities) - -set(SWIFT_LIBDIR "lib" CACHE PATH "Library folder name, defined by swift main buildscript") -set(INSTALL_LIBDIR "${SWIFT_LIBDIR}" CACHE PATH "Path where the libraries should be installed") +include(CTest) include(DispatchAppleOptions) include(DispatchSanitization) - include(DispatchCompilerWarnings) -dispatch_common_warnings() - -option(ENABLE_DISPATCH_INIT_CONSTRUCTOR "enable libdispatch_init as a constructor" ON) -set(USE_LIBDISPATCH_INIT_CONSTRUCTOR ${ENABLE_DISPATCH_INIT_CONSTRUCTOR}) +include(DTrace) +include(SwiftSupport) # NOTE(abdulras) this is the CMake supported way to control whether we generate # shared or static libraries. This impacts the behaviour of `add_library` in # what type of library it generates. option(BUILD_SHARED_LIBS "build shared libraries" ON) -option(ENABLE_SWIFT "enable libdispatch swift overlay" OFF) -if(ENABLE_SWIFT) - if(NOT CMAKE_SWIFT_COMPILER) - message(FATAL_ERROR "CMAKE_SWIFT_COMPILER must be defined to enable swift") - endif() - - string(TOLOWER ${CMAKE_SYSTEM_NAME} swift_os) - get_swift_host_arch(swift_arch) - - if(BUILD_SHARED_LIBS) - set(swift_dir swift) - else() - set(swift_dir swift_static) - endif() - - set(INSTALL_TARGET_DIR "${INSTALL_LIBDIR}/${swift_dir}/${swift_os}" CACHE PATH "Path where the libraries will be installed") - set(INSTALL_DISPATCH_HEADERS_DIR "${INSTALL_LIBDIR}/${swift_dir}/dispatch" CACHE PATH "Path where the headers will be installed for libdispatch") - set(INSTALL_BLOCK_HEADERS_DIR "${INSTALL_LIBDIR}/${swift_dir}/Block" CACHE PATH "Path where the headers will be installed for the blocks runtime") - set(INSTALL_OS_HEADERS_DIR "${INSTALL_LIBDIR}/${swift_dir}/os" CACHE PATH "Path where the os/ headers will be installed") -endif() - -if(NOT ENABLE_SWIFT) - set(INSTALL_TARGET_DIR "${INSTALL_LIBDIR}" CACHE PATH "Path where the libraries will be installed") - set(INSTALL_DISPATCH_HEADERS_DIR "include/dispatch" CACHE PATH "Path where the headers will be installed") - set(INSTALL_BLOCK_HEADERS_DIR "include" CACHE PATH "Path where the headers will be installed for the blocks runtime") - set(INSTALL_OS_HEADERS_DIR "include/os" CACHE PATH "Path where the headers will be installed") -endif() - option(DISPATCH_ENABLE_ASSERTS "enable debug assertions" FALSE) -option(ENABLE_DTRACE "enable dtrace support" "") - -option(ENABLE_TESTING "build libdispatch tests" ON) +option(ENABLE_DISPATCH_INIT_CONSTRUCTOR "enable libdispatch_init as a constructor" ON) +set(USE_LIBDISPATCH_INIT_CONSTRUCTOR ${ENABLE_DISPATCH_INIT_CONSTRUCTOR}) -option(USE_LLD_LINKER "use the lld linker" FALSE) +option(ENABLE_DTRACE "enable dtrace support" "") -if(NOT USE_LLD_LINKER AND - (CMAKE_SYSTEM_NAME STREQUAL Linux OR - CMAKE_SYSTEM_NAME STREQUAL FreeBSD OR - CMAKE_SYSTEM_NAME STREQUAL Android)) - set(USE_GOLD_LINKER_DEFAULT TRUE) +if(CMAKE_SYSTEM_NAME STREQUAL Darwin OR CMAKE_SYSTEM_NAME STREQUAL FreeBSD) + set(ENABLE_INTERNAL_PTHREAD_WORKQUEUES_DEFAULT OFF) else() - set(USE_GOLD_LINKER_DEFAULT FALSE) -endif() -option(USE_GOLD_LINKER "use the gold linker" ${USE_GOLD_LINKER_DEFAULT}) - -option(ENABLE_THREAD_LOCAL_STORAGE "enable usage of thread local storage via _Thread_local" ON) -set(DISPATCH_USE_THREAD_LOCAL_STORAGE ${ENABLE_THREAD_LOCAL_STORAGE}) - -if(CMAKE_SYSTEM_NAME STREQUAL Linux OR - CMAKE_SYSTEM_NAME STREQUAL Android OR - CMAKE_SYSTEM_NAME STREQUAL FreeBSD OR - CMAKE_SYSTEM_NAME STREQUAL Windows) set(ENABLE_INTERNAL_PTHREAD_WORKQUEUES_DEFAULT ON) -else() - set(ENABLE_INTERNAL_PTHREAD_WORKQUEUES_DEFAULT OFF) endif() option(ENABLE_INTERNAL_PTHREAD_WORKQUEUES "use libdispatch's own implementation of pthread workqueues" ${ENABLE_INTERNAL_PTHREAD_WORKQUEUES_DEFAULT}) if(ENABLE_INTERNAL_PTHREAD_WORKQUEUES) @@ -123,53 +93,15 @@ endif() option(INSTALL_PRIVATE_HEADERS "installs private headers in the same location as the public ones" OFF) -if(NOT CMAKE_SYSTEM_NAME STREQUAL Darwin) - set(BlocksRuntime_INCLUDE_DIR ${PROJECT_SOURCE_DIR}/src/BlocksRuntime) - - # NOTE(compnerd) use the `BUILD_SHARED_LIBS` variable to determine what type - # of library to build. If it is true, we will generate shared libraries, - # otherwise we will generate static libraries. - add_library(BlocksRuntime - ${PROJECT_SOURCE_DIR}/src/BlocksRuntime/data.c - ${PROJECT_SOURCE_DIR}/src/BlocksRuntime/runtime.c) - if(CMAKE_SYSTEM_NAME STREQUAL Windows) - target_sources(BlocksRuntime - PRIVATE - ${PROJECT_SOURCE_DIR}/src/BlocksRuntime/BlocksRuntime.def) - if(NOT BUILD_SHARED_LIBS) - target_compile_definitions(BlocksRuntime - PRIVATE - BlocksRuntime_STATIC) - endif() - endif() - set_target_properties(BlocksRuntime - PROPERTIES - POSITION_INDEPENDENT_CODE TRUE) - if(HAVE_OBJC AND CMAKE_DL_LIBS) - target_link_libraries(BlocksRuntime - PUBLIC - ${CMAKE_DL_LIBS}) - endif() - - add_library(BlocksRuntime::BlocksRuntime ALIAS BlocksRuntime) - - install(FILES - ${PROJECT_SOURCE_DIR}/src/BlocksRuntime/Block.h - DESTINATION - "${INSTALL_BLOCK_HEADERS_DIR}") - if(INSTALL_PRIVATE_HEADERS) - install(FILES - ${PROJECT_SOURCE_DIR}/src/BlocksRuntime/Block_private.h - DESTINATION - "${INSTALL_BLOCK_HEADERS_DIR}") - endif() - install(TARGETS - BlocksRuntime - ARCHIVE DESTINATION ${INSTALL_TARGET_DIR} - LIBRARY DESTINATION ${INSTALL_TARGET_DIR} - RUNTIME DESTINATION bin) +option(ENABLE_SWIFT "enable libdispatch swift overlay" OFF) +if(ENABLE_SWIFT) + enable_language(Swift) endif() +option(ENABLE_THREAD_LOCAL_STORAGE "enable usage of thread local storage via _Thread_local" ON) +set(DISPATCH_USE_THREAD_LOCAL_STORAGE ${ENABLE_THREAD_LOCAL_STORAGE}) + + check_symbol_exists(__GNU_LIBRARY__ "features.h" _GNU_SOURCE) if(_GNU_SOURCE) set(CMAKE_REQUIRED_DEFINITIONS ${CMAKE_REQUIRED_DEFINITIONS} -D_GNU_SOURCE) @@ -181,7 +113,9 @@ if(__BUILTIN_TRAP) set(HAVE_NORETURN_BUILTIN_TRAP 1) endif() -find_package(LibRT) +if(NOT CMAKE_SYSTEM_NAME STREQUAL Android) + find_package(LibRT) +endif() check_function_exists(_pthread_workqueue_init HAVE__PTHREAD_WORKQUEUE_INIT) check_function_exists(getprogname HAVE_GETPROGNAME) @@ -200,8 +134,6 @@ check_function_exists(strlcpy HAVE_STRLCPY) check_function_exists(sysconf HAVE_SYSCONF) check_function_exists(arc4random HAVE_ARC4RANDOM) -find_package(Threads REQUIRED) - check_include_files("TargetConditionals.h" HAVE_TARGETCONDITIONALS_H) check_include_files("dlfcn.h" HAVE_DLFCN_H) check_include_files("fcntl.h" HAVE_FCNTL_H) @@ -237,7 +169,7 @@ else() set(USE_MACH_SEM 0) endif() if(CMAKE_SYSTEM_NAME STREQUAL Windows) - add_definitions(-DUSE_WIN32_SEM) + add_compile_definitions($<$,$>:USE_WIN32_SEM>) endif() check_library_exists(pthread sem_init "" USE_POSIX_SEM) # NOTE: android has not always provided a libpthread, but uses the pthreads API @@ -267,7 +199,7 @@ check_symbol_exists(VQ_FREE_SPACE_CHANGE "sys/mount.h" HAVE_DECL_VQ_FREE_SPACE_C check_symbol_exists(strlcpy "string.h" HAVE_STRLCPY) check_symbol_exists(program_invocation_name "errno.h" HAVE_DECL_PROGRAM_INVOCATION_SHORT_NAME) if (HAVE_DECL_PROGRAM_INVOCATION_SHORT_NAME) - add_definitions(-D_GNU_SOURCE=1) + add_compile_definitions($<$,$>:_GNU_SOURCE=1>) endif() check_symbol_exists(__printflike "bsd/sys/cdefs.h" HAVE_PRINTFLIKE) @@ -276,24 +208,20 @@ if(CMAKE_SYSTEM_NAME STREQUAL Android) endif() if(CMAKE_SYSTEM_NAME STREQUAL FreeBSD) - add_definitions(-D_WITH_DPRINTF) + add_compile_definitions($<$,$>:_WITH_DPRINTF>) endif() -if(ENABLE_DTRACE STREQUAL "") - find_program(dtrace_EXECUTABLE dtrace) - if(dtrace_EXECUTABLE) - add_definitions(-DDISPATCH_USE_DTRACE=1) - else() - add_definitions(-DDISPATCH_USE_DTRACE=0) - endif() -elseif(ENABLE_DTRACE) +if(ENABLE_DTRACE) find_program(dtrace_EXECUTABLE dtrace) - if(NOT dtrace_EXECUTABLE) + if(NOT dtrace_EXECUTABLE AND NOT ENABLE_DTRACE STREQUAL "") message(FATAL_ERROR "dtrace not found but explicitly requested") endif() - add_definitions(-DDISPATCH_USE_DTRACE=1) +endif() + +if(dtrace_EXECUTABLE) + add_compile_definitions($<$,$>:DISPATCH_USE_DTRACE=1>) else() - add_definitions(-DDISPATCH_USE_DTRACE=0) + add_compile_definitions($<$,$>:DISPATCH_USE_DTRACE=0>) endif() find_program(leaks_EXECUTABLE leaks) @@ -301,6 +229,7 @@ if(leaks_EXECUTABLE) set(HAVE_LEAKS TRUE) endif() + if(CMAKE_SYSTEM_NAME STREQUAL Darwin) add_custom_command(OUTPUT "${PROJECT_SOURCE_DIR}/dispatch/module.modulemap" @@ -322,25 +251,32 @@ add_custom_target(module-maps ALL DEPENDS "${PROJECT_SOURCE_DIR}/dispatch/module.modulemap" "${PROJECT_SOURCE_DIR}/private/module.modulemap") + configure_file("${PROJECT_SOURCE_DIR}/cmake/config.h.in" "${PROJECT_BINARY_DIR}/config/config_ac.h") -add_definitions(-DHAVE_CONFIG_H) +add_compile_definitions($<$,$>:HAVE_CONFIG_H>) -if(CMAKE_SYSTEM_NAME STREQUAL Windows) - include(DispatchWindowsSupport) - dispatch_windows_arch_spelling(${CMAKE_SYSTEM_PROCESSOR} DISPATCH_MSVC_ARCH) - dispatch_windows_include_for_arch(${DISPATCH_MSVC_ARCH} DISPATCH_INCLUDES) - include_directories(BEFORE SYSTEM ${DISPATCH_INCLUDES}) - dispatch_windows_lib_for_arch(${CMAKE_SYSTEM_PROCESSOR} DISPATCH_LIBDIR) - link_directories(${DISPATCH_LIBDIR}) + +if(ENABLE_SWIFT) + set(INSTALL_TARGET_DIR "${CMAKE_INSTALL_LIBDIR}/swift$<$>:_static>/$" CACHE PATH "Path where the libraries will be installed") + set(INSTALL_DISPATCH_HEADERS_DIR "${CMAKE_INSTALL_LIBDIR}/swift$<$>:_static>/dispatch" CACHE PATH "Path where the headers will be installed for libdispatch") + set(INSTALL_BLOCK_HEADERS_DIR "${CMAKE_INSTALL_LIBDIR}/swift$<$>:_static>/Block" CACHE PATH "Path where the headers will be installed for the blocks runtime") + set(INSTALL_OS_HEADERS_DIR "${CMAKE_INSTALL_LIBDIR}/swift$<$>:_static>/os" CACHE PATH "Path where the os/ headers will be installed") +else() + set(INSTALL_TARGET_DIR "${CMAKE_INSTALL_LIBDIR}" CACHE PATH "Path where the libraries will be installed") + set(INSTALL_DISPATCH_HEADERS_DIR "include/dispatch" CACHE PATH "Path where the headers will be installed") + set(INSTALL_BLOCK_HEADERS_DIR "include" CACHE PATH "Path where the headers will be installed for the blocks runtime") + set(INSTALL_OS_HEADERS_DIR "include/os" CACHE PATH "Path where the headers will be installed") endif() + add_subdirectory(dispatch) add_subdirectory(man) add_subdirectory(os) add_subdirectory(private) add_subdirectory(src) -if(ENABLE_TESTING) +if(BUILD_TESTING) add_subdirectory(tests) endif() +add_subdirectory(cmake/modules) diff --git a/PATCHES b/PATCHES index 10277a45c..42b01784b 100644 --- a/PATCHES +++ b/PATCHES @@ -522,3 +522,25 @@ github commits starting with 29bdc2f from [68875cb] APPLIED rdar://54572081 [fc73866] APPLIED rdar://54572081 [3cf1bf3] APPLIED rdar://54572081 +[3da29dd] APPLIED rdar://81276248 +[90a45ce] APPLIED rdar://81276248 +[37c8c28] APPLIED rdar://81276248 +[c023edd] APPLIED rdar://81276248 +[ab8a151] APPLIED rdar://81276248 +[c66cb25] APPLIED rdar://81276248 +[289e552] APPLIED rdar://81276248 +[afd6b6d] APPLIED rdar://81276248 +[4c91d20] APPLIED rdar://81276248 +[2accb0b] APPLIED rdar://81276248 +[b0b314c] APPLIED rdar://81276248 +[c992dac] APPLIED rdar://81276248 +[80b1772] APPLIED rdar://81276248 +[1986f39] APPLIED rdar://81276248 +[598ce42] APPLIED rdar://81276248 +[feb4421] APPLIED rdar://81276248 +[f152471] APPLIED rdar://81276248 +[457b110] APPLIED rdar://81276248 +[f13ea5d] APPLIED rdar://81276248 +[1c303fa] APPLIED rdar://81276248 +[34f383d] APPLIED rdar://81276248 +[7870521] APPLIED rdar://81276248 diff --git a/cmake/modules/CMakeLists.txt b/cmake/modules/CMakeLists.txt new file mode 100644 index 000000000..10cc0e100 --- /dev/null +++ b/cmake/modules/CMakeLists.txt @@ -0,0 +1,7 @@ + +set(DISPATCH_EXPORTS_FILE ${CMAKE_CURRENT_BINARY_DIR}/dispatchExports.cmake) +configure_file(dispatchConfig.cmake.in + ${CMAKE_CURRENT_BINARY_DIR}/dispatchConfig.cmake) + +get_property(DISPATCH_EXPORTS GLOBAL PROPERTY DISPATCH_EXPORTS) +export(TARGETS ${DISPATCH_EXPORTS} FILE ${DISPATCH_EXPORTS_FILE}) diff --git a/cmake/modules/DispatchCompilerWarnings.cmake b/cmake/modules/DispatchCompilerWarnings.cmake index d568c721a..35b80f3ec 100644 --- a/cmake/modules/DispatchCompilerWarnings.cmake +++ b/cmake/modules/DispatchCompilerWarnings.cmake @@ -1,79 +1,76 @@ if("${CMAKE_C_SIMULATE_ID}" STREQUAL "MSVC") # TODO: someone needs to provide the msvc equivalent warning flags - macro(dispatch_common_warnings) - endmacro() else() - macro(dispatch_common_warnings) - add_compile_options(-Werror) - add_compile_options(-Wall) - add_compile_options(-Wextra) + add_compile_options($<$,$>:-Werror>) + add_compile_options($<$,$>:-Wall>) + add_compile_options($<$,$>:-Wextra>) - add_compile_options(-Warray-bounds-pointer-arithmetic) - add_compile_options(-Wassign-enum) - add_compile_options(-Watomic-properties) - add_compile_options(-Wcomma) - add_compile_options(-Wconditional-uninitialized) - add_compile_options(-Wconversion) - add_compile_options(-Wcovered-switch-default) - add_compile_options(-Wdate-time) - add_compile_options(-Wdeprecated) - add_compile_options(-Wdocumentation) - add_compile_options(-Wdouble-promotion) - add_compile_options(-Wduplicate-enum) - add_compile_options(-Wexpansion-to-defined) - add_compile_options(-Wfloat-equal) - add_compile_options(-Widiomatic-parentheses) - add_compile_options(-Winfinite-recursion) - add_compile_options(-Wmissing-prototypes) - add_compile_options(-Wnewline-eof) - add_compile_options(-Wnullable-to-nonnull-conversion) - add_compile_options(-Wobjc-interface-ivars) - add_compile_options(-Wover-aligned) - add_compile_options(-Wpacked) - add_compile_options(-Wpointer-arith) - add_compile_options(-Wselector) - add_compile_options(-Wshadow) - add_compile_options(-Wshorten-64-to-32) - add_compile_options(-Wsign-conversion) - add_compile_options(-Wstatic-in-inline) - add_compile_options(-Wsuper-class-method-mismatch) - add_compile_options(-Wswitch) - add_compile_options(-Wunguarded-availability) - add_compile_options(-Wunreachable-code) - add_compile_options(-Wunused) + add_compile_options($<$,$>:-Warray-bounds-pointer-arithmetic>) + add_compile_options($<$,$>:-Wassign-enum>) + add_compile_options($<$,$>:-Watomic-properties>) + add_compile_options($<$,$>:-Wcomma>) + add_compile_options($<$,$>:-Wconditional-uninitialized>) + add_compile_options($<$,$>:-Wconversion>) + add_compile_options($<$,$>:-Wcovered-switch-default>) + add_compile_options($<$,$>:-Wdate-time>) + add_compile_options($<$,$>:-Wdeprecated>) + add_compile_options($<$,$>:-Wdocumentation>) + add_compile_options($<$,$>:-Wdouble-promotion>) + add_compile_options($<$,$>:-Wduplicate-enum>) + add_compile_options($<$,$>:-Wexpansion-to-defined>) + add_compile_options($<$,$>:-Wfloat-equal>) + add_compile_options($<$,$>:-Widiomatic-parentheses>) + add_compile_options($<$,$>:-Winfinite-recursion>) + add_compile_options($<$,$>:-Wmissing-prototypes>) + add_compile_options($<$,$>:-Wnewline-eof>) + add_compile_options($<$,$>:-Wnullable-to-nonnull-conversion>) + add_compile_options($<$,$>:-Wobjc-interface-ivars>) + add_compile_options($<$,$>:-Wover-aligned>) + add_compile_options($<$,$>:-Wpacked>) + add_compile_options($<$,$>:-Wpointer-arith>) + add_compile_options($<$,$>:-Wselector>) + add_compile_options($<$,$>:-Wshadow>) + add_compile_options($<$,$>:-Wshorten-64-to-32>) + add_compile_options($<$,$>:-Wsign-conversion>) + add_compile_options($<$,$>:-Wstatic-in-inline>) + add_compile_options($<$,$>:-Wsuper-class-method-mismatch>) + add_compile_options($<$,$>:-Wswitch>) + add_compile_options($<$,$>:-Wunguarded-availability>) + add_compile_options($<$,$>:-Wunreachable-code>) + add_compile_options($<$,$>:-Wunused>) - add_compile_options(-Wno-unknown-warning-option) - add_compile_options(-Wno-trigraphs) - add_compile_options(-Wno-four-char-constants) - add_compile_options(-Wno-disabled-macro-expansion) - add_compile_options(-Wno-pedantic) - add_compile_options(-Wno-bad-function-cast) - add_compile_options(-Wno-c++-compat) - add_compile_options(-Wno-c++98-compat) - add_compile_options(-Wno-c++98-compat-pedantic) - add_compile_options(-Wno-cast-align) - add_compile_options(-Wno-cast-qual) - add_compile_options(-Wno-documentation-unknown-command) - add_compile_options(-Wno-format-nonliteral) - add_compile_options(-Wno-missing-variable-declarations) - add_compile_options(-Wno-old-style-cast) - add_compile_options(-Wno-padded) - add_compile_options(-Wno-reserved-id-macro) - add_compile_options(-Wno-shift-sign-overflow) - add_compile_options(-Wno-undef) - add_compile_options(-Wno-unreachable-code-aggressive) - add_compile_options(-Wno-unused-macros) - add_compile_options(-Wno-used-but-marked-unused) - add_compile_options(-Wno-vla) + add_compile_options($<$,$>:-Wno-unknown-warning-option>) + add_compile_options($<$,$>:-Wno-trigraphs>) + add_compile_options($<$,$>:-Wno-four-char-constants>) + add_compile_options($<$,$>:-Wno-disabled-macro-expansion>) + add_compile_options($<$,$>:-Wno-pedantic>) + add_compile_options($<$,$>:-Wno-bad-function-cast>) + add_compile_options($<$,$>:-Wno-c++-compat>) + add_compile_options($<$,$>:-Wno-c++98-compat>) + add_compile_options($<$,$>:-Wno-c++98-compat-pedantic>) + add_compile_options($<$,$>:-Wno-cast-align>) + add_compile_options($<$,$>:-Wno-cast-qual>) + add_compile_options($<$,$>:-Wno-documentation-unknown-command>) + add_compile_options($<$,$>:-Wno-format-nonliteral>) + add_compile_options($<$,$>:-Wno-missing-variable-declarations>) + add_compile_options($<$,$>:-Wno-old-style-cast>) + add_compile_options($<$,$>:-Wno-padded>) + add_compile_options($<$,$>:-Wno-reserved-id-macro>) + add_compile_options($<$,$>:-Wno-shift-sign-overflow>) + add_compile_options($<$,$>:-Wno-undef>) + add_compile_options($<$,$>:-Wno-unreachable-code-aggressive>) + add_compile_options($<$,$>:-Wno-unused-macros>) + add_compile_options($<$,$>:-Wno-used-but-marked-unused>) + add_compile_options($<$,$>:-Wno-void-pointer-to-int-cast>) + add_compile_options($<$,$>:-Wno-vla>) - if(CMAKE_SYSTEM_NAME STREQUAL Android) - add_compile_options(-Wno-incompatible-function-pointer-types) - add_compile_options(-Wno-implicit-function-declaration) - add_compile_options(-Wno-conversion) - add_compile_options(-Wno-int-conversion) - add_compile_options(-Wno-shorten-64-to-32) - endif() - add_compile_options(-Wno-error=assign-enum) - endmacro() + if(CMAKE_SYSTEM_NAME STREQUAL Android) + add_compile_options($<$,$>:-Wno-incompatible-function-pointer-types>) + add_compile_options($<$,$>:-Wno-implicit-function-declaration>) + add_compile_options($<$,$>:-Wno-conversion>) + add_compile_options($<$,$>:-Wno-int-conversion>) + add_compile_options($<$,$>:-Wno-shorten-64-to-32>) + endif() + add_compile_options($<$,$>:-Wno-error=assign-enum>) endif() diff --git a/cmake/modules/DispatchUtilities.cmake b/cmake/modules/DispatchUtilities.cmake deleted file mode 100644 index fea3622ec..000000000 --- a/cmake/modules/DispatchUtilities.cmake +++ /dev/null @@ -1,19 +0,0 @@ - -function(dispatch_set_linker target) - if(CMAKE_HOST_SYSTEM_NAME STREQUAL Windows) - set(CMAKE_HOST_EXECUTABLE_SUFFIX .exe) - endif() - - if(USE_GOLD_LINKER) - set_property(TARGET ${target} - APPEND_STRING - PROPERTY LINK_FLAGS - -fuse-ld=gold${CMAKE_HOST_EXECUTABLE_SUFFIX}) - endif() - if(USE_LLD_LINKER) - set_property(TARGET ${target} - APPEND_STRING - PROPERTY LINK_FLAGS - -fuse-ld=lld${CMAKE_HOST_EXECUTABLE_SUFFIX}) - endif() -endfunction() diff --git a/cmake/modules/SwiftSupport.cmake b/cmake/modules/SwiftSupport.cmake index da7a201e3..4310b54c9 100644 --- a/cmake/modules/SwiftSupport.cmake +++ b/cmake/modules/SwiftSupport.cmake @@ -1,221 +1,4 @@ -include(CMakeParseArguments) - -function(add_swift_target target) - set(options LIBRARY;SHARED;STATIC) - set(single_value_options MODULE_NAME;MODULE_LINK_NAME;MODULE_PATH;MODULE_CACHE_PATH;OUTPUT;TARGET) - set(multiple_value_options CFLAGS;DEPENDS;LINK_FLAGS;RESOURCES;SOURCES;SWIFT_FLAGS) - - cmake_parse_arguments(AST "${options}" "${single_value_options}" "${multiple_value_options}" ${ARGN}) - - set(compile_flags ${CMAKE_SWIFT_FLAGS}) - set(link_flags ${CMAKE_SWIFT_LINK_FLAGS}) - - if(AST_TARGET) - list(APPEND compile_flags -target;${AST_TARGET}) - list(APPEND link_flags -target;${AST_TARGET}) - endif() - if(AST_MODULE_NAME) - list(APPEND compile_flags -module-name;${AST_MODULE_NAME}) - else() - list(APPEND compile_flags -module-name;${target}) - endif() - if(AST_MODULE_LINK_NAME) - list(APPEND compile_flags -module-link-name;${AST_MODULE_LINK_NAME}) - endif() - if(AST_MODULE_CACHE_PATH) - list(APPEND compile_flags -module-cache-path;${AST_MODULE_CACHE_PATH}) - endif() - if(CMAKE_BUILD_TYPE MATCHES Debug OR CMAKE_BUILD_TYPE MATCHES RelWithDebInfo) - list(APPEND compile_flags -g) - endif() - if(AST_SWIFT_FLAGS) - foreach(flag ${AST_SWIFT_FLAGS}) - list(APPEND compile_flags ${flag}) - endforeach() - endif() - if(AST_CFLAGS) - foreach(flag ${AST_CFLAGS}) - list(APPEND compile_flags -Xcc;${flag}) - endforeach() - endif() - if(AST_LINK_FLAGS) - foreach(flag ${AST_LINK_FLAGS}) - list(APPEND link_flags ${flag}) - endforeach() - endif() - if(AST_LIBRARY) - if(AST_STATIC AND AST_SHARED) - message(SEND_ERROR "add_swift_target asked to create library as STATIC and SHARED") - elseif(AST_STATIC OR NOT BUILD_SHARED_LIBS) - set(library_kind STATIC) - elseif(AST_SHARED OR BUILD_SHARED_LIBS) - set(library_kind SHARED) - endif() - else() - if(AST_STATIC OR AST_SHARED) - message(SEND_ERROR "add_swift_target asked to create executable as STATIC or SHARED") - endif() - endif() - if(NOT AST_OUTPUT) - if(AST_LIBRARY) - if(AST_SHARED OR BUILD_SHARED_LIBS) - set(AST_OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${target}.dir/${CMAKE_SHARED_LIBRARY_PREFIX}${target}${CMAKE_SHARED_LIBRARY_SUFFIX}) - else() - set(AST_OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${target}.dir/${CMAKE_STATIC_LIBRARY_PREFIX}${target}${CMAKE_STATIC_LIBRARY_SUFFIX}) - endif() - else() - set(AST_OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${target}.dir/${target}${CMAKE_EXECUTABLE_SUFFIX}) - endif() - endif() - if(CMAKE_SYSTEM_NAME STREQUAL Windows) - if(AST_SHARED OR BUILD_SHARED_LIBS) - set(IMPORT_LIBRARY ${CMAKE_CURRENT_BINARY_DIR}/${target}.dir/${CMAKE_IMPORT_LIBRARY_PREFIX}${target}${CMAKE_IMPORT_LIBRARY_SUFFIX}) - endif() - endif() - - set(sources) - foreach(source ${AST_SOURCES}) - get_filename_component(location ${source} PATH) - if(IS_ABSOLUTE ${location}) - list(APPEND sources ${source}) - else() - list(APPEND sources ${CMAKE_CURRENT_SOURCE_DIR}/${source}) - endif() - endforeach() - - set(objs) - set(mods) - set(docs) - set(i 0) - foreach(source ${sources}) - get_filename_component(name ${source} NAME) - - set(obj ${CMAKE_CURRENT_BINARY_DIR}/${target}.dir/${name}${CMAKE_C_OUTPUT_EXTENSION}) - set(mod ${CMAKE_CURRENT_BINARY_DIR}/${target}.dir/${name}.swiftmodule) - set(doc ${CMAKE_CURRENT_BINARY_DIR}/${target}.dir/${name}.swiftdoc) - - set(all_sources ${sources}) - list(INSERT all_sources ${i} -primary-file) - - add_custom_command(OUTPUT - ${obj} - ${mod} - ${doc} - DEPENDS - ${source} - ${AST_DEPENDS} - COMMAND - ${CMAKE_SWIFT_COMPILER} -frontend ${compile_flags} -emit-module-path ${mod} -emit-module-doc-path ${doc} -o ${obj} -c ${all_sources}) - - list(APPEND objs ${obj}) - list(APPEND mods ${mod}) - list(APPEND docs ${doc}) - - math(EXPR i "${i}+1") - endforeach() - - if(AST_LIBRARY) - get_filename_component(module_directory ${AST_MODULE_PATH} DIRECTORY) - - set(module ${AST_MODULE_PATH}) - set(documentation ${module_directory}/${AST_MODULE_NAME}.swiftdoc) - - add_custom_command(OUTPUT - ${module} - ${documentation} - DEPENDS - ${mods} - ${docs} - ${AST_DEPENDS} - COMMAND - ${CMAKE_SWIFT_COMPILER} -frontend ${compile_flags} -sil-merge-partial-modules -emit-module ${mods} -o ${module} -emit-module-doc-path ${documentation}) - endif() - - if(AST_LIBRARY) - if(CMAKE_SYSTEM_NAME STREQUAL Windows OR CMAKE_SYSTEM_NAME STREQUAL Darwin) - set(emit_library -emit-library) - else() - set(emit_library -emit-library -Xlinker -soname -Xlinker ${CMAKE_SHARED_LIBRARY_PREFIX}${target}${CMAKE_SHARED_LIBRARY_SUFFIX}) - endif() - endif() - if(NOT AST_LIBRARY OR library_kind STREQUAL SHARED) - add_custom_command(OUTPUT - ${AST_OUTPUT} - DEPENDS - ${objs} - ${AST_DEPENDS} - COMMAND - ${CMAKE_SWIFT_COMPILER} ${emit_library} ${link_flags} -o ${AST_OUTPUT} ${objs}) - add_custom_target(${target} - ALL - DEPENDS - ${AST_OUTPUT} - ${module} - ${documentation}) - else() - add_library(${target}-static STATIC ${objs}) - add_dependencies(${target}-static ${AST_DEPENDS}) - get_filename_component(ast_output_bn ${AST_OUTPUT} NAME) - if(NOT CMAKE_STATIC_LIBRARY_PREFIX STREQUAL "") - string(REGEX REPLACE "^${CMAKE_STATIC_LIBRARY_PREFIX}" "" ast_output_bn ${ast_output_bn}) - endif() - if(NOT CMAKE_STATIC_LIBRARY_SUFFIX STREQUAL "") - string(REGEX REPLACE "${CMAKE_STATIC_LIBRARY_SUFFIX}$" "" ast_output_bn ${ast_output_bn}) - endif() - get_filename_component(ast_output_dn ${AST_OUTPUT} DIRECTORY) - set_target_properties(${target}-static - PROPERTIES - LINKER_LANGUAGE C - ARCHIVE_OUTPUT_DIRECTORY ${ast_output_dn} - OUTPUT_DIRECTORY ${ast_output_dn} - OUTPUT_NAME ${ast_output_bn}) - add_custom_target(${target} - ALL - DEPENDS - ${target}-static - ${module} - ${documentation}) - endif() - - if(AST_RESOURCES) - add_custom_command(TARGET - ${target} - POST_BUILD - COMMAND - ${CMAKE_COMMAND} -E make_directory ${CMAKE_CURRENT_BINARY_DIR}/${target} - COMMAND - ${CMAKE_COMMAND} -E copy ${AST_OUTPUT} ${CMAKE_CURRENT_BINARY_DIR}/${target} - COMMAND - ${CMAKE_COMMAND} -E make_directory ${CMAKE_CURRENT_BINARY_DIR}/${target}/Resources - COMMAND - ${CMAKE_COMMAND} -E copy ${AST_RESOURCES} ${CMAKE_CURRENT_BINARY_DIR}/${target}/Resources) - else() - add_custom_command(TARGET - ${target} - POST_BUILD - COMMAND - ${CMAKE_COMMAND} -E copy ${AST_OUTPUT} ${CMAKE_CURRENT_BINARY_DIR}) - if(CMAKE_SYSTEM_NAME STREQUAL Windows) - if(AST_SHARED OR BUILD_SHARED_LIBS) - add_custom_command(TARGET - ${target} - POST_BUILD - COMMAND - ${CMAKE_COMMAND} -E copy ${IMPORT_LIBRARY} ${CMAKE_CURRENT_BINARY_DIR}) - endif() - endif() - endif() -endfunction() - -function(add_swift_library library) - add_swift_target(${library} LIBRARY ${ARGN}) -endfunction() - -function(add_swift_executable executable) - add_swift_target(${executable} ${ARGN}) -endfunction() - # Returns the current achitecture name in a variable # # Usage: diff --git a/cmake/modules/dispatchConfig.cmake.in b/cmake/modules/dispatchConfig.cmake.in new file mode 100644 index 000000000..81228f271 --- /dev/null +++ b/cmake/modules/dispatchConfig.cmake.in @@ -0,0 +1,7 @@ + +set(DISPATCH_HAS_SWIFT_SDK_OVERLAY @ENABLE_SWIFT@) + +if(NOT TARGET dispatch) + include(@DISPATCH_EXPORTS_FILE@) +endif() + diff --git a/config/config.h b/config/config.h index b50565aa9..c1ef8aaeb 100644 --- a/config/config.h +++ b/config/config.h @@ -61,6 +61,10 @@ you don't. */ #define HAVE_DECL_VQ_VERYLOWDISK 1 +/* Define to 1 if you have the declaration of `VQ_SERVEREVENT', and to 0 if + you don't. */ +#define HAVE_DECL_VQ_SERVEREVENT 1 + /* Define to 1 if you have the declaration of `VQ_QUOTA', and to 0 if you don't. */ #define HAVE_DECL_VQ_QUOTA 1 diff --git a/dispatch/CMakeLists.txt b/dispatch/CMakeLists.txt index 8b8be8cfb..7f68ed381 100644 --- a/dispatch/CMakeLists.txt +++ b/dispatch/CMakeLists.txt @@ -16,7 +16,17 @@ install(FILES DESTINATION "${INSTALL_DISPATCH_HEADERS_DIR}") if(ENABLE_SWIFT) - get_filename_component(MODULE_MAP module.modulemap REALPATH) + set(base_dir "${CMAKE_CURRENT_SOURCE_DIR}") + if(NOT BUILD_SHARED_LIBS) + set(base_dir "${CMAKE_CURRENT_SOURCE_DIR}/generic_static") + endif() + + get_filename_component( + MODULE_MAP + module.modulemap + REALPATH + BASE_DIR "${base_dir}") + install(FILES ${MODULE_MAP} DESTINATION diff --git a/dispatch/base.h b/dispatch/base.h index e6c71b0e0..0a2370bd8 100644 --- a/dispatch/base.h +++ b/dispatch/base.h @@ -127,6 +127,33 @@ #define DISPATCH_UNAVAILABLE_MSG(msg) #endif +#if defined(__cplusplus) +# if __cplusplus >= 201703L +# define DISPATCH_FALLTHROUGH [[fallthrough]] +# elif __cplusplus >= 201103L +# if defined(__clang__) +# define DISPATCH_FALLTHROUGH [[clang::fallthrough]] +# elif defined(__GNUC__) && __GNUC__ >= 7 +# define DISPATCH_FALLTHROUGH [[gnu::fallthrough]] +# else +# define DISPATCH_FALLTHROUGH +# endif +# else +# define DISPATCH_FALLTHROUGH +# endif +#elif defined(__GNUC__) && __GNUC__ >= 7 +# define DISPATCH_FALLTHROUGH __attribute__((__fallthrough__)) +#elif defined(__clang__) +# if __has_attribute(fallthrough) && __clang_major__ >= 5 +# define DISPATCH_FALLTHROUGH __attribute__((__fallthrough__)) +# else +# define DISPATCH_FALLTHROUGH +# endif +#else +# define DISPATCH_FALLTHROUGH +#endif + + #ifdef __linux__ #define DISPATCH_LINUX_UNAVAILABLE() \ DISPATCH_UNAVAILABLE_MSG( \ diff --git a/dispatch/generic_static/module.modulemap b/dispatch/generic_static/module.modulemap new file mode 100644 index 000000000..d5d64d2d1 --- /dev/null +++ b/dispatch/generic_static/module.modulemap @@ -0,0 +1,19 @@ +module Dispatch { + requires blocks + export * + link "dispatch" + link "BlocksRuntime" + link "DispatchStubs" +} + +module DispatchIntrospection [system] [extern_c] { + header "introspection.h" + export * +} + +module CDispatch [system] [extern_c] { + umbrella header "dispatch.h" + export * + requires blocks + link "dispatch" +} diff --git a/dispatch/queue.h b/dispatch/queue.h index dc5aae79a..c4820b6c4 100644 --- a/dispatch/queue.h +++ b/dispatch/queue.h @@ -496,7 +496,7 @@ DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_NOTHROW void dispatch_apply(size_t iterations, dispatch_queue_t DISPATCH_APPLY_QUEUE_ARG_NULLABILITY queue, - DISPATCH_NOESCAPE void (^block)(size_t)); + DISPATCH_NOESCAPE void (^block)(size_t iteration)); #endif /*! @@ -531,7 +531,7 @@ DISPATCH_EXPORT DISPATCH_NONNULL4 DISPATCH_NOTHROW void dispatch_apply_f(size_t iterations, dispatch_queue_t DISPATCH_APPLY_QUEUE_ARG_NULLABILITY queue, - void *_Nullable context, void (*work)(void *_Nullable, size_t)); + void *_Nullable context, void (*work)(void *_Nullable context, size_t iteration)); /*! * @function dispatch_get_current_queue diff --git a/libdispatch.xcodeproj/project.pbxproj b/libdispatch.xcodeproj/project.pbxproj index 2dbad2992..a4f706e60 100644 --- a/libdispatch.xcodeproj/project.pbxproj +++ b/libdispatch.xcodeproj/project.pbxproj @@ -224,16 +224,19 @@ 96DF70BE0F38FE3C0074BD99 /* once.c in Sources */ = {isa = PBXBuildFile; fileRef = 96DF70BD0F38FE3C0074BD99 /* once.c */; }; 9B2A588123A412B400A7BB27 /* eventlink.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B2A588023A412B400A7BB27 /* eventlink.c */; }; 9B3713F623D24594001C5C88 /* clock.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B3713F123D24594001C5C88 /* clock.h */; }; + 9B404D6C255A191A0014912B /* apply_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B404D6B255A191A0014912B /* apply_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 9B404DAA255A1E6F0014912B /* apply_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B404D6B255A191A0014912B /* apply_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 9B404DC0255A1E7D0014912B /* apply_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B404D6B255A191A0014912B /* apply_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; 9B81557B234AFC9800DB5CA3 /* workgroup.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B815576234AFC9800DB5CA3 /* workgroup.c */; }; - 9B8ED5792350C79100507521 /* workgroup_object.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B8ED5782350C79100507521 /* workgroup_object.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 9B8ED5792350C79100507521 /* workgroup_object.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B8ED5782350C79100507521 /* workgroup_object.h */; }; 9B8ED5A6235183D100507521 /* workgroup.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B815576234AFC9800DB5CA3 /* workgroup.c */; }; - 9B9DB6F9234ECE92003F962B /* workgroup_interval.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B9DB6F4234ECE92003F962B /* workgroup_interval.h */; settings = {ATTRIBUTES = (Public, ); }; }; - 9BA656E4236BB55000D13FAE /* workgroup_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA656DF236BB55000D13FAE /* workgroup_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; - 9BA656E6236BB56700D13FAE /* workgroup_interval_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA656E5236BB56700D13FAE /* workgroup_interval_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; - 9BA7221523E293CB0058472E /* workgroup_parallel.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA7221023E293CB0058472E /* workgroup_parallel.h */; settings = {ATTRIBUTES = (Public, ); }; }; - 9BA7221623E293FD0058472E /* workgroup_parallel.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA7221023E293CB0058472E /* workgroup_parallel.h */; settings = {ATTRIBUTES = (Public, ); }; }; - 9BA7221723E294140058472E /* workgroup_parallel.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA7221023E293CB0058472E /* workgroup_parallel.h */; settings = {ATTRIBUTES = (Public, ); }; }; - 9BCAF76F23A8540A00E4F685 /* eventlink_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BCAF76A23A8540A00E4F685 /* eventlink_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 9B9DB6F9234ECE92003F962B /* workgroup_interval.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B9DB6F4234ECE92003F962B /* workgroup_interval.h */; }; + 9BA656E4236BB55000D13FAE /* workgroup_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA656DF236BB55000D13FAE /* workgroup_private.h */; }; + 9BA656E6236BB56700D13FAE /* workgroup_interval_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA656E5236BB56700D13FAE /* workgroup_interval_private.h */; }; + 9BA7221523E293CB0058472E /* workgroup_parallel.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA7221023E293CB0058472E /* workgroup_parallel.h */; }; + 9BA7221623E293FD0058472E /* workgroup_parallel.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA7221023E293CB0058472E /* workgroup_parallel.h */; }; + 9BA7221723E294140058472E /* workgroup_parallel.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA7221023E293CB0058472E /* workgroup_parallel.h */; }; + 9BCAF76F23A8540A00E4F685 /* eventlink_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BCAF76A23A8540A00E4F685 /* eventlink_private.h */; }; 9BCAF77123A8550100E4F685 /* eventlink.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B2A588023A412B400A7BB27 /* eventlink.c */; }; 9BCAF77223A8550B00E4F685 /* eventlink.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B2A588023A412B400A7BB27 /* eventlink.c */; }; 9BCAF77323A8551300E4F685 /* eventlink.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B2A588023A412B400A7BB27 /* eventlink.c */; }; @@ -245,26 +248,23 @@ 9BE3E57423CE62C2006FE059 /* workgroup.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B815576234AFC9800DB5CA3 /* workgroup.c */; }; 9BE3E57523CE62C9006FE059 /* workgroup.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B815576234AFC9800DB5CA3 /* workgroup.c */; }; 9BE3E57623CE62D8006FE059 /* workgroup.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B815576234AFC9800DB5CA3 /* workgroup.c */; }; - 9BE3E57723CE62E9006FE059 /* workgroup_interval.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B9DB6F4234ECE92003F962B /* workgroup_interval.h */; settings = {ATTRIBUTES = (Public, ); }; }; - 9BE3E57823CE62E9006FE059 /* workgroup_object.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B8ED5782350C79100507521 /* workgroup_object.h */; settings = {ATTRIBUTES = (Public, ); }; }; - 9BE3E57923CE62E9006FE059 /* workgroup_base.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B38A012234C6D0400E6B90F /* workgroup_base.h */; settings = {ATTRIBUTES = (Public, ); }; }; - 9BE3E57A23CE62E9006FE059 /* workgroup.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B81556E234AF0D200DB5CA3 /* workgroup.h */; settings = {ATTRIBUTES = (Public, ); }; }; - 9BE3E57B23CE6325006FE059 /* workgroup.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B81556E234AF0D200DB5CA3 /* workgroup.h */; settings = {ATTRIBUTES = (Public, ); }; }; - 9BE3E58323CE637F006FE059 /* workgroup_interval_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA656E5236BB56700D13FAE /* workgroup_interval_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; - 9BE3E58423CE637F006FE059 /* workgroup_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA656DF236BB55000D13FAE /* workgroup_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; - 9BE3E58523CE638D006FE059 /* (null) in Headers */ = {isa = PBXBuildFile; }; - 9BE3E58623CE63A3006FE059 /* (null) in Headers */ = {isa = PBXBuildFile; }; - 9BE52545238747D30041C2A0 /* workgroup.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B81556E234AF0D200DB5CA3 /* workgroup.h */; settings = {ATTRIBUTES = (Public, ); }; }; - 9BE5254A238747ED0041C2A0 /* workgroup_interval.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B9DB6F4234ECE92003F962B /* workgroup_interval.h */; settings = {ATTRIBUTES = (Public, ); }; }; - 9BE5254B238747ED0041C2A0 /* workgroup_object.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B8ED5782350C79100507521 /* workgroup_object.h */; settings = {ATTRIBUTES = (Public, ); }; }; - 9BE5254C238747ED0041C2A0 /* workgroup_base.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B38A012234C6D0400E6B90F /* workgroup_base.h */; settings = {ATTRIBUTES = (Public, ); }; }; - 9BE5254D238747F90041C2A0 /* workgroup_interval_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA656E5236BB56700D13FAE /* workgroup_interval_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; - 9BE5254E238747F90041C2A0 /* workgroup_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA656DF236BB55000D13FAE /* workgroup_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 9BE3E57723CE62E9006FE059 /* workgroup_interval.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B9DB6F4234ECE92003F962B /* workgroup_interval.h */; }; + 9BE3E57823CE62E9006FE059 /* workgroup_object.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B8ED5782350C79100507521 /* workgroup_object.h */; }; + 9BE3E57923CE62E9006FE059 /* workgroup_base.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B38A012234C6D0400E6B90F /* workgroup_base.h */; }; + 9BE3E57A23CE62E9006FE059 /* workgroup.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B81556E234AF0D200DB5CA3 /* workgroup.h */; }; + 9BE3E57B23CE6325006FE059 /* workgroup.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B81556E234AF0D200DB5CA3 /* workgroup.h */; }; + 9BE3E58323CE637F006FE059 /* workgroup_interval_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA656E5236BB56700D13FAE /* workgroup_interval_private.h */; }; + 9BE3E58423CE637F006FE059 /* workgroup_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA656DF236BB55000D13FAE /* workgroup_private.h */; }; + 9BE52545238747D30041C2A0 /* workgroup.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B81556E234AF0D200DB5CA3 /* workgroup.h */; }; + 9BE5254A238747ED0041C2A0 /* workgroup_interval.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B9DB6F4234ECE92003F962B /* workgroup_interval.h */; }; + 9BE5254B238747ED0041C2A0 /* workgroup_object.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B8ED5782350C79100507521 /* workgroup_object.h */; }; + 9BE5254C238747ED0041C2A0 /* workgroup_base.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B38A012234C6D0400E6B90F /* workgroup_base.h */; }; + 9BE5254D238747F90041C2A0 /* workgroup_interval_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA656E5236BB56700D13FAE /* workgroup_interval_private.h */; }; + 9BE5254E238747F90041C2A0 /* workgroup_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA656DF236BB55000D13FAE /* workgroup_private.h */; }; 9BE525502387480F0041C2A0 /* workgroup.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B815576234AFC9800DB5CA3 /* workgroup.c */; }; - 9BFD342C23C94F2500B08420 /* eventlink_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BCAF76A23A8540A00E4F685 /* eventlink_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 9BFD342C23C94F2500B08420 /* eventlink_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BCAF76A23A8540A00E4F685 /* eventlink_private.h */; }; 9BFD342D23C94F3500B08420 /* eventlink_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BCAF78F23AAEDEC00E4F685 /* eventlink_internal.h */; }; - 9BFD342E23C94F4E00B08420 /* eventlink_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BCAF78F23AAEDEC00E4F685 /* eventlink_internal.h */; }; - 9BFD342F23C94F6D00B08420 /* eventlink_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BCAF76A23A8540A00E4F685 /* eventlink_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 9BFD342F23C94F6D00B08420 /* eventlink_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BCAF76A23A8540A00E4F685 /* eventlink_private.h */; }; 9BFD343023C94F8C00B08420 /* eventlink.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B2A588023A412B400A7BB27 /* eventlink.c */; }; 9BFD343C23CD032800B08420 /* eventlink_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BCAF78F23AAEDEC00E4F685 /* eventlink_internal.h */; }; B609581E221DFA2A00F39D1F /* workloop.h in Headers */ = {isa = PBXBuildFile; fileRef = B6095819221DFA2A00F39D1F /* workloop.h */; settings = {ATTRIBUTES = (Public, ); }; }; @@ -297,6 +297,30 @@ C93D6165143E190E00EB9023 /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; }; C93D6167143E190F00EB9023 /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; }; C9C5F80E143C1771006DC718 /* transform.c in Sources */ = {isa = PBXBuildFile; fileRef = C9C5F80D143C1771006DC718 /* transform.c */; }; + E4053A5A26EAF06C00362F72 /* workgroup_object_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B7D0906247EF7E600C1B0B7 /* workgroup_object_private.h */; }; + E4053A5B26EAF07700362F72 /* workgroup_object_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B7D0906247EF7E600C1B0B7 /* workgroup_object_private.h */; }; + E4053A5C26EAF07900362F72 /* workgroup_object_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B7D0906247EF7E600C1B0B7 /* workgroup_object_private.h */; }; + E4053A5D26EAF12D00362F72 /* workgroup_base.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B38A012234C6D0400E6B90F /* workgroup_base.h */; }; + E4053A5E26EAF16600362F72 /* clock.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B3713F123D24594001C5C88 /* clock.h */; }; + E4053A5F26EAF16700362F72 /* clock.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B3713F123D24594001C5C88 /* clock.h */; }; + E4053A6026EAF1A600362F72 /* workgroup_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BCAF79523AAEDF700E4F685 /* workgroup_internal.h */; }; + E4053A6226EAF1B000362F72 /* workgroup_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BCAF79523AAEDF700E4F685 /* workgroup_internal.h */; }; + E4053A6326EAF25500362F72 /* mach_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E4BACC91D48A89500B562AE /* mach_internal.h */; }; + E4053A6426EAF25600362F72 /* mach_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E4BACC91D48A89500B562AE /* mach_internal.h */; }; + E4053A6526EAF27A00362F72 /* target.h in Headers */ = {isa = PBXBuildFile; fileRef = F7DC045A2060BBBE00C90737 /* target.h */; }; + E4053A6626EAF27B00362F72 /* target.h in Headers */ = {isa = PBXBuildFile; fileRef = F7DC045A2060BBBE00C90737 /* target.h */; }; + E4053A6726EAF2A000362F72 /* time.h in Headers */ = {isa = PBXBuildFile; fileRef = FC1832A3109923C7003403D5 /* time.h */; }; + E4053A6826EAF2A700362F72 /* priority.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EFBDA4A1D61A0D600282887 /* priority.h */; }; + E4053A6926EAF2A800362F72 /* priority.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EFBDA4A1D61A0D600282887 /* priority.h */; }; + E4053A6A26EAF4BD00362F72 /* module.modulemap in Headers */ = {isa = PBXBuildFile; fileRef = C901445E1C73A7FE002638FC /* module.modulemap */; settings = {ATTRIBUTES = (Public, ); }; }; + E4053A6B26EAF54F00362F72 /* workgroup_base.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B38A012234C6D0400E6B90F /* workgroup_base.h */; }; + E4053A6C26EAF55000362F72 /* workgroup_parallel.h in Headers */ = {isa = PBXBuildFile; fileRef = 9BA7221023E293CB0058472E /* workgroup_parallel.h */; }; + E4053A6D26EAF55000362F72 /* workgroup.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B81556E234AF0D200DB5CA3 /* workgroup.h */; }; + E4053A6E26EAF55000362F72 /* workgroup_interval.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B9DB6F4234ECE92003F962B /* workgroup_interval.h */; }; + E4053A6F26EAF55000362F72 /* workgroup_object.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B8ED5782350C79100507521 /* workgroup_object.h */; }; + E4053A7026EAF55000362F72 /* object.h in Headers */ = {isa = PBXBuildFile; fileRef = E4EB4A2614C35ECE00AA0FA9 /* object.h */; }; + E4053A7126EAF55000362F72 /* clock.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B3713F123D24594001C5C88 /* clock.h */; }; + E4053A7226EAF67D00362F72 /* module.modulemap in Headers */ = {isa = PBXBuildFile; fileRef = C90144641C73A845002638FC /* module.modulemap */; settings = {ATTRIBUTES = (Private, ); }; }; E4128ED613BA9A1700ABB2CB /* hw_config.h in Headers */ = {isa = PBXBuildFile; fileRef = E4128ED513BA9A1700ABB2CB /* hw_config.h */; }; E4128ED713BA9A1700ABB2CB /* hw_config.h in Headers */ = {isa = PBXBuildFile; fileRef = E4128ED513BA9A1700ABB2CB /* hw_config.h */; }; E417A38512A472C5004D659D /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; }; @@ -440,8 +464,6 @@ E44F9DBC1654405B001DCD38 /* perfmon.h in Headers */ = {isa = PBXBuildFile; fileRef = FC1832A2109923C7003403D5 /* perfmon.h */; }; E44F9DBE1654405B001DCD38 /* tsd.h in Headers */ = {isa = PBXBuildFile; fileRef = FC1832A4109923C7003403D5 /* tsd.h */; }; E44F9DBF165440EF001DCD38 /* config.h in Headers */ = {isa = PBXBuildFile; fileRef = FC9C70E7105EC9620074F9CA /* config.h */; }; - E44F9DC016544115001DCD38 /* object.h in Headers */ = {isa = PBXBuildFile; fileRef = E4EB4A2614C35ECE00AA0FA9 /* object.h */; }; - E44F9DC116544115001DCD38 /* object_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E454569214746F1B00106147 /* object_private.h */; }; E454569314746F1B00106147 /* object_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E454569214746F1B00106147 /* object_private.h */; settings = {ATTRIBUTES = (); }; }; E454569414746F1B00106147 /* object_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E454569214746F1B00106147 /* object_private.h */; settings = {ATTRIBUTES = (); }; }; E4630251176162D200E11F4C /* atomic_sfb.h in Headers */ = {isa = PBXBuildFile; fileRef = E463024F1761603C00E11F4C /* atomic_sfb.h */; }; @@ -760,8 +782,8 @@ 5A27262510F26F1900751FBC /* io.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = io.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; 5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = semaphore_internal.h; sourceTree = ""; }; 5AAB45BF10D30B79004407EA /* data.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = data.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; - 5AAB45C310D30CC7004407EA /* io.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = io.h; sourceTree = ""; tabWidth = 8; }; - 5AAB45C510D30D0C004407EA /* data.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data.h; sourceTree = ""; tabWidth = 8; }; + 5AAB45C310D30CC7004407EA /* io.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = io.h; sourceTree = ""; }; + 5AAB45C510D30D0C004407EA /* data.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data.h; sourceTree = ""; }; 6E040C631C499B1B00411A2E /* libfirehose_kernel.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libfirehose_kernel.a; sourceTree = BUILT_PRODUCTS_DIR; }; 6E040C721C499C3600411A2E /* libfirehose_kernel.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = libfirehose_kernel.xcconfig; sourceTree = ""; }; 6E1612691C79606E006FC9A9 /* dispatch_queue_label.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_queue_label.c; sourceTree = ""; }; @@ -778,7 +800,6 @@ 6E326ABB1C229895002A6505 /* dispatch_read2.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_read2.c; sourceTree = ""; }; 6E326ABD1C22A577002A6505 /* dispatch_io_net.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_io_net.c; sourceTree = ""; }; 6E326ABE1C22A577002A6505 /* dispatch_io.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_io.c; sourceTree = ""; }; - 6E326AD81C233209002A6505 /* dispatch_sync_gc.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = dispatch_sync_gc.m; sourceTree = ""; }; 6E326AD91C233209002A6505 /* dispatch_sync_on_main.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_sync_on_main.c; sourceTree = ""; }; 6E326ADC1C234396002A6505 /* dispatch_readsync.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_readsync.c; sourceTree = ""; }; 6E326ADE1C23451A002A6505 /* dispatch_concur.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_concur.c; sourceTree = ""; }; @@ -833,7 +854,6 @@ 6EB4E4421BA8BD7800D7B9D2 /* libfirehose.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = libfirehose.xcconfig; sourceTree = ""; }; 6EB60D291BBB19640092FA94 /* firehose_inline_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = firehose_inline_internal.h; sourceTree = ""; }; 6EC5ABE31D4436E4004F8674 /* dispatch_deadname.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_deadname.c; sourceTree = ""; }; - 6EC670C61E37E201004F10D6 /* dispatch_network_event_thread.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_network_event_thread.c; sourceTree = ""; }; 6EC670C71E37E201004F10D6 /* perf_mach_async.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = perf_mach_async.c; sourceTree = ""; }; 6EC670C81E37E201004F10D6 /* perf_pipepingpong.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = perf_pipepingpong.c; sourceTree = ""; }; 6EC8DBE61E3E832C0044B652 /* dispatch_channel.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_channel.c; sourceTree = ""; }; @@ -881,19 +901,20 @@ 96DF70BD0F38FE3C0074BD99 /* once.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = once.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; 9B2A588023A412B400A7BB27 /* eventlink.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = eventlink.c; sourceTree = ""; }; 9B3713F123D24594001C5C88 /* clock.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = clock.h; sourceTree = ""; }; - 9B38A012234C6D0400E6B90F /* workgroup_base.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = workgroup_base.h; path = os/workgroup_base.h; sourceTree = SOURCE_ROOT; }; + 9B38A012234C6D0400E6B90F /* workgroup_base.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = workgroup_base.h; sourceTree = ""; }; + 9B404D6B255A191A0014912B /* apply_private.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = apply_private.h; sourceTree = ""; }; 9B6A42E01FE098430000D146 /* queue-tip.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = "queue-tip.xcodeproj"; path = "tools/queue-tip/queue-tip.xcodeproj"; sourceTree = ""; }; 9B7D0906247EF7E600C1B0B7 /* workgroup_object_private.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = workgroup_object_private.h; sourceTree = ""; }; - 9B81556E234AF0D200DB5CA3 /* workgroup.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = workgroup.h; path = os/workgroup.h; sourceTree = SOURCE_ROOT; }; + 9B81556E234AF0D200DB5CA3 /* workgroup.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = workgroup.h; sourceTree = ""; }; 9B815576234AFC9800DB5CA3 /* workgroup.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = workgroup.c; path = src/workgroup.c; sourceTree = SOURCE_ROOT; }; - 9B8ED5782350C79100507521 /* workgroup_object.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = workgroup_object.h; path = os/workgroup_object.h; sourceTree = SOURCE_ROOT; }; - 9B9DB6F4234ECE92003F962B /* workgroup_interval.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = workgroup_interval.h; path = os/workgroup_interval.h; sourceTree = SOURCE_ROOT; }; - 9BA656DF236BB55000D13FAE /* workgroup_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = workgroup_private.h; path = os/workgroup_private.h; sourceTree = SOURCE_ROOT; }; - 9BA656E5236BB56700D13FAE /* workgroup_interval_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = workgroup_interval_private.h; path = os/workgroup_interval_private.h; sourceTree = SOURCE_ROOT; }; + 9B8ED5782350C79100507521 /* workgroup_object.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = workgroup_object.h; sourceTree = ""; }; + 9B9DB6F4234ECE92003F962B /* workgroup_interval.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = workgroup_interval.h; sourceTree = ""; }; + 9BA656DF236BB55000D13FAE /* workgroup_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = workgroup_private.h; sourceTree = ""; }; + 9BA656E5236BB56700D13FAE /* workgroup_interval_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = workgroup_interval_private.h; sourceTree = ""; }; 9BA7221023E293CB0058472E /* workgroup_parallel.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = workgroup_parallel.h; sourceTree = ""; }; 9BCAF76A23A8540A00E4F685 /* eventlink_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = eventlink_private.h; sourceTree = ""; }; - 9BCAF78F23AAEDEC00E4F685 /* eventlink_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = eventlink_internal.h; path = src/eventlink_internal.h; sourceTree = SOURCE_ROOT; }; - 9BCAF79523AAEDF700E4F685 /* workgroup_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = workgroup_internal.h; path = src/workgroup_internal.h; sourceTree = SOURCE_ROOT; }; + 9BCAF78F23AAEDEC00E4F685 /* eventlink_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = eventlink_internal.h; sourceTree = ""; }; + 9BCAF79523AAEDF700E4F685 /* workgroup_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = workgroup_internal.h; sourceTree = ""; }; B6095819221DFA2A00F39D1F /* workloop.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = workloop.h; sourceTree = ""; }; B63B793F1E8F004F0060C1E1 /* dispatch_no_blocks.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_no_blocks.c; sourceTree = ""; }; B68330BC1EBCF6080003E71C /* dispatch_wl.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_wl.c; sourceTree = ""; }; @@ -910,7 +931,7 @@ C01866BF1C5976C90040FC07 /* run-on-install.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "run-on-install.sh"; sourceTree = ""; }; C901445E1C73A7FE002638FC /* module.modulemap */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = "sourcecode.module-map"; name = module.modulemap; path = darwin/module.modulemap; sourceTree = ""; }; C90144641C73A845002638FC /* module.modulemap */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = "sourcecode.module-map"; name = module.modulemap; path = darwin/module.modulemap; sourceTree = ""; }; - C913AC0E143BD34800B78976 /* data_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data_private.h; sourceTree = ""; tabWidth = 8; }; + C913AC0E143BD34800B78976 /* data_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data_private.h; sourceTree = ""; }; C927F35F10FD7F1000C5AB8B /* ddt.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = ddt.xcodeproj; path = tools/ddt/ddt.xcodeproj; sourceTree = ""; }; C96CE17A1CEB851600F4B8E6 /* dispatch_objc.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = dispatch_objc.m; sourceTree = ""; }; C9C5F80D143C1771006DC718 /* transform.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = transform.c; sourceTree = ""; }; @@ -938,10 +959,10 @@ E44EBE3B1251659900645D88 /* init.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = init.c; sourceTree = ""; }; E44F9DA816543F79001DCD38 /* introspection_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = introspection_internal.h; sourceTree = ""; }; E454569214746F1B00106147 /* object_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = object_private.h; sourceTree = ""; }; - E463024F1761603C00E11F4C /* atomic_sfb.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = atomic_sfb.h; sourceTree = ""; }; + E463024F1761603C00E11F4C /* atomic_sfb.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = atomic_sfb.h; sourceTree = ""; }; E47D6BB5125F0F800070D91C /* resolved.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = resolved.h; sourceTree = ""; }; E482F1CD12DBAB590030614D /* postprocess-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "postprocess-headers.sh"; sourceTree = ""; }; - E48AF55916E70FD9004105FF /* io_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = io_private.h; path = private/io_private.h; sourceTree = SOURCE_ROOT; tabWidth = 8; }; + E48AF55916E70FD9004105FF /* io_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = io_private.h; sourceTree = ""; }; E48EC97B1835BADD00EAC4F1 /* yield.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = yield.h; sourceTree = ""; }; E49BB6F21E70748100868613 /* libdispatch_armv81.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libdispatch_armv81.a; sourceTree = BUILT_PRODUCTS_DIR; }; E49F24DF125D57FA0057C971 /* libdispatch.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libdispatch.dylib; sourceTree = BUILT_PRODUCTS_DIR; }; @@ -958,6 +979,20 @@ E4BA743813A8900B0095BDF1 /* dispatch_read.3 */ = {isa = PBXFileReference; explicitFileType = text.man; path = dispatch_read.3; sourceTree = ""; }; E4BA743913A8911B0095BDF1 /* getprogname.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = getprogname.h; sourceTree = ""; }; E4C1ED6E1263E714000D3C8B /* data_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data_internal.h; sourceTree = ""; }; + E4C97EFF263868F800628947 /* dispatch_once.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_once.c; sourceTree = ""; }; + E4C97F04263868F800628947 /* dispatch_async_and_wait.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_async_and_wait.c; sourceTree = ""; }; + E4C97F05263868F800628947 /* os_workgroup_multilang.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = os_workgroup_multilang.c; sourceTree = ""; }; + E4C97F06263868F800628947 /* os_eventlink.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = os_eventlink.c; sourceTree = ""; }; + E4C97F07263868F800628947 /* os_workgroup_basic.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = os_workgroup_basic.c; sourceTree = ""; }; + E4C97F08263868F800628947 /* dispatch_qos_cf.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_qos_cf.c; sourceTree = ""; }; + E4C97F09263868F800628947 /* os_workgroup_empty2.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = os_workgroup_empty2.c; sourceTree = ""; }; + E4C97F0A263868F800628947 /* os_workgroup_entitled.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = os_workgroup_entitled.c; sourceTree = ""; }; + E4C97F0B263868F800628947 /* dispatch_plusplus.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = dispatch_plusplus.cpp; sourceTree = ""; }; + E4C97F0C263868F800628947 /* os_eventlink_empty.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = os_eventlink_empty.c; sourceTree = ""; }; + E4C97F0D263868F800628947 /* os_workgroup_empty.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = os_workgroup_empty.c; sourceTree = ""; }; + E4C97F0E263868F800628947 /* dispatch_mach.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_mach.c; sourceTree = ""; }; + E4C97F0F263868F800628947 /* dispatch_workloop.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_workloop.c; sourceTree = ""; }; + E4C97F10263868F800628947 /* dispatch_cooperative.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = dispatch_cooperative.c; sourceTree = ""; }; E4D76A9218E325D200B1F98B /* block.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = block.h; sourceTree = ""; }; E4EB4A2614C35ECE00AA0FA9 /* object.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = object.h; sourceTree = ""; }; E4EB4A2A14C36F4E00AA0FA9 /* install-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "install-headers.sh"; sourceTree = ""; }; @@ -1033,14 +1068,13 @@ 08FB7795FE84155DC02AAC07 /* Dispatch Source */ = { isa = PBXGroup; children = ( - 9B815576234AFC9800DB5CA3 /* workgroup.c */, - 9B2A588023A412B400A7BB27 /* eventlink.c */, 2BBF5A62154B64F5002B20F9 /* allocator.c */, 9676A0E00F3E755D00713ADB /* apply.c */, 965CD6340F3E806200D4E28D /* benchmark.c */, E43A724F1AF85BBC00BAA921 /* block.cpp */, 5AAB45BF10D30B79004407EA /* data.c */, E420866F16027AE500EEE210 /* data.m */, + 9B2A588023A412B400A7BB27 /* eventlink.c */, E44EBE3B1251659900645D88 /* init.c */, E4B515DC164B32E000E003AF /* introspection.c */, 5A27262510F26F1900751FBC /* io.c */, @@ -1056,6 +1090,7 @@ C9C5F80D143C1771006DC718 /* transform.c */, 6E9955CE1C3B218E0071D40C /* venture.c */, E44A8E6A1805C3E0009FFDB6 /* voucher.c */, + 9B815576234AFC9800DB5CA3 /* workgroup.c */, 6E9C6CA220F9848000EA81C0 /* yield.c */, 6EA283D01CAB93270041B2E0 /* libdispatch.codes */, 6E29394C1FB9526E00FDAC90 /* libdispatch.plist */, @@ -1162,6 +1197,7 @@ 6E8E4EC31C1A57760004F5CC /* dispatch_after.c */, 92F3FE8F1BEC686300025962 /* dispatch_api.c */, 6E67D8D31C16C20B00FC98AC /* dispatch_apply.c */, + E4C97F04263868F800628947 /* dispatch_async_and_wait.c */, 6E9926711D01295F000CB89A /* dispatch_block.c */, 924D8EAA1C116B9F002AC2BC /* dispatch_c99.c */, 6E326AB11C224830002A6505 /* dispatch_cascade.c */, @@ -1169,6 +1205,7 @@ 6EC8DBE61E3E832C0044B652 /* dispatch_channel.c */, 6E326ADE1C23451A002A6505 /* dispatch_concur.c */, 6E326AEF1C239303002A6505 /* dispatch_context_for_key.c */, + E4C97F10263868F800628947 /* dispatch_cooperative.c */, 6E8E4EC71C1A61680004F5CC /* dispatch_data.m */, 6EC5ABE31D4436E4004F8674 /* dispatch_deadname.c */, 6E67D90D1C16CCEB00FC98AC /* dispatch_debug.c */, @@ -1177,15 +1214,18 @@ 6E326ABD1C22A577002A6505 /* dispatch_io_net.c */, 6E326ABE1C22A577002A6505 /* dispatch_io.c */, 6EDB888D1CB73BDC006776D6 /* dispatch_kevent_cancel_races.c */, - 6EC670C61E37E201004F10D6 /* dispatch_network_event_thread.c */, + E4C97F0E263868F800628947 /* dispatch_mach.c */, B63B793F1E8F004F0060C1E1 /* dispatch_no_blocks.c */, C96CE17A1CEB851600F4B8E6 /* dispatch_objc.m */, + E4C97EFF263868F800628947 /* dispatch_once.c */, 6E67D9131C17676D00FC98AC /* dispatch_overcommit.c */, 6E67D9151C1768B300FC98AC /* dispatch_pingpong.c */, + E4C97F0B263868F800628947 /* dispatch_plusplus.cpp */, 6E326B441C239B61002A6505 /* dispatch_priority.c */, 6E326AB51C225477002A6505 /* dispatch_proc.c */, B6FA01801F0AD522004479BF /* dispatch_pthread_root_queue.c */, 6E326AB31C224870002A6505 /* dispatch_qos.c */, + E4C97F08263868F800628947 /* dispatch_qos_cf.c */, B6AE9A4A1D7F53B300AC007F /* dispatch_queue_create.c */, 6E67D9111C17669C00FC98AC /* dispatch_queue_finalizer.c */, 6E1612691C79606E006FC9A9 /* dispatch_queue_label.c */, @@ -1199,7 +1239,6 @@ 6E326AE01C234780002A6505 /* dispatch_starfish.c */, 6EE89F3D1BFAF5B000EB140D /* dispatch_state_machine.c */, 6E326B121C239431002A6505 /* dispatch_suspend_timer.c */, - 6E326AD81C233209002A6505 /* dispatch_sync_gc.m */, 6E326AD91C233209002A6505 /* dispatch_sync_on_main.c */, 6E326B131C239431002A6505 /* dispatch_timer_bit.c */, 6E326B151C239431002A6505 /* dispatch_timer_set_time.c */, @@ -1211,8 +1250,16 @@ 6E8E4EC91C1A670B0004F5CC /* dispatch_vm.c */, 6E326AB71C225FCA002A6505 /* dispatch_vnode.c */, B68330BC1EBCF6080003E71C /* dispatch_wl.c */, + E4C97F0F263868F800628947 /* dispatch_workloop.c */, 6E67D9171C17BA7200FC98AC /* nsoperation.m */, + E4C97F0C263868F800628947 /* os_eventlink_empty.c */, + E4C97F06263868F800628947 /* os_eventlink.c */, 6E4FC9D11C84123600520351 /* os_venture_basic.c */, + E4C97F07263868F800628947 /* os_workgroup_basic.c */, + E4C97F0D263868F800628947 /* os_workgroup_empty.c */, + E4C97F09263868F800628947 /* os_workgroup_empty2.c */, + E4C97F0A263868F800628947 /* os_workgroup_entitled.c */, + E4C97F05263868F800628947 /* os_workgroup_multilang.c */, B6AE9A561D7F53C100AC007F /* perf_async_bench.m */, B6AE9A581D7F53CB00AC007F /* perf_bench.m */, 6EC670C71E37E201004F10D6 /* perf_mach_async.c */, @@ -1241,7 +1288,7 @@ 9BCAF78F23AAEDEC00E4F685 /* eventlink_internal.h */, ); name = "OS Project Headers"; - path = os; + path = src; sourceTree = ""; }; C6A0FF2B0290797F04C91782 /* Documentation */ = { @@ -1349,10 +1396,10 @@ 72EA3FBA1AF41EA400BBA227 /* firehose_server_private.h */, 6E9955571C3AF7710071D40C /* venture_private.h */, 9BA656E5236BB56700D13FAE /* workgroup_interval_private.h */, + 9B7D0906247EF7E600C1B0B7 /* workgroup_object_private.h */, 9BA656DF236BB55000D13FAE /* workgroup_private.h */, E44A8E711805C473009FFDB6 /* voucher_private.h */, E4B3C3FD18C50D000039F49F /* voucher_activity_private.h */, - 9B7D0906247EF7E600C1B0B7 /* workgroup_object_private.h */, ); name = "OS Private Headers"; path = os; @@ -1402,19 +1449,20 @@ FC7BEDAF0E83626100161930 /* Dispatch Private Headers */ = { isa = PBXGroup; children = ( - FC7BED930E8361E600161930 /* private.h */, + 9B404D6B255A191A0014912B /* apply_private.h */, + 961B99350F3E83980006BC96 /* benchmark.h */, + 6EC8DC261E3E84610044B652 /* channel_private.h */, C913AC0E143BD34800B78976 /* data_private.h */, E48AF55916E70FD9004105FF /* io_private.h */, + 2BE17C6318EA305E002CA4E8 /* layout_private.h */, + E4ECBAA415253C25002C313C /* mach_private.h */, + C90144641C73A845002638FC /* module.modulemap */, + FC7BED930E8361E600161930 /* private.h */, 96BC39BC0F3EBAB100C59689 /* queue_private.h */, - 6E70181C1F4EB51B0077C1DC /* workloop_private.h */, - 6EC8DC261E3E84610044B652 /* channel_private.h */, FCEF047F0F5661960067401F /* source_private.h */, - E4ECBAA415253C25002C313C /* mach_private.h */, B683588A1FA77F4900AA0D58 /* time_private.h */, - C90144641C73A845002638FC /* module.modulemap */, - 961B99350F3E83980006BC96 /* benchmark.h */, + 6E70181C1F4EB51B0077C1DC /* workloop_private.h */, E4B515D7164B2DFB00E003AF /* introspection_private.h */, - 2BE17C6318EA305E002CA4E8 /* layout_private.h */, ); name = "Dispatch Private Headers"; path = private; @@ -1424,20 +1472,20 @@ isa = PBXGroup; children = ( 2BBF5A5F154B64D8002B20F9 /* allocator_internal.h */, - FC7BED8F0E8361E600161930 /* internal.h */, - E44757D917F4572600B82CA1 /* inline_internal.h */, E4C1ED6E1263E714000D3C8B /* data_internal.h */, + E44757D917F4572600B82CA1 /* inline_internal.h */, + FC7BED8F0E8361E600161930 /* internal.h */, 5A0095A110F274B0000E2A31 /* io_internal.h */, 6E4BACC91D48A89500B562AE /* mach_internal.h */, 965ECC200F3EAB71004DDD89 /* object_internal.h */, 96929D950F3EA2170041FF5D /* queue_internal.h */, 5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */, + 96929D830F3EA1020041FF5D /* shims.h */, FC0B34780FA2851C0080FFA0 /* source_internal.h */, + E422A0D412A557B5005E5BDB /* trace.h */, 6E9956061C3B21AA0071D40C /* venture_internal.h */, E44A8E7418066276009FFDB6 /* voucher_internal.h */, - E422A0D412A557B5005E5BDB /* trace.h */, E44F9DA816543F79001DCD38 /* introspection_internal.h */, - 96929D830F3EA1020041FF5D /* shims.h */, 6E5ACCAE1D3BF27F007DA2B4 /* event */, 6EF0B2661BA8C43D007FA4F6 /* firehose */, FC1832A0109923B3003403D5 /* shims */, @@ -1480,7 +1528,6 @@ 72CC94300ECCD8750031B751 /* base.h in Headers */, 961B99500F3E85C30006BC96 /* object.h in Headers */, 9B9DB6F9234ECE92003F962B /* workgroup_interval.h in Headers */, - E44757DA17F4572600B82CA1 /* inline_internal.h in Headers */, 6EC8DC271E3E84610044B652 /* channel_private.h in Headers */, FC7BED9A0E8361E600161930 /* queue.h in Headers */, 9BE3E57B23CE6325006FE059 /* workgroup.h in Headers */, @@ -1489,37 +1536,43 @@ E4B3C3FE18C50D000039F49F /* voucher_activity_private.h in Headers */, 721F5C5D0F15520500FF03A6 /* semaphore.h in Headers */, FC5C9C1E0EADABE3006E462D /* group.h in Headers */, - 6EFBDA4B1D61A0D600282887 /* priority.h in Headers */, 9BCAF76F23A8540A00E4F685 /* eventlink_private.h in Headers */, 96C9553B0F3EAEDD000D2CA4 /* once.h in Headers */, 5AAB45C410D30CC7004407EA /* io.h in Headers */, - E44A8E7518066276009FFDB6 /* voucher_internal.h in Headers */, C90144651C73A8A3002638FC /* module.modulemap in Headers */, - E4630253176162D400E11F4C /* atomic_sfb.h in Headers */, 5AAB45C610D30D0C004407EA /* data.h in Headers */, - 6E393F981BD60F8D005A551E /* firehose_internal.h in Headers */, + E4053A5D26EAF12D00362F72 /* workgroup_base.h in Headers */, 96032E4D0F5CC8D100241C5F /* time.h in Headers */, FC7BEDA20E8361E600161930 /* private.h in Headers */, E4D76A9318E325D200B1F98B /* block.h in Headers */, 9BA656E4236BB55000D13FAE /* workgroup_private.h in Headers */, - 9BA656E6236BB56700D13FAE /* workgroup_interval_private.h in Headers */, C913AC0F143BD34800B78976 /* data_private.h in Headers */, 96BC39BD0F3EBAB100C59689 /* queue_private.h in Headers */, + 9B404D6C255A191A0014912B /* apply_private.h in Headers */, C90144661C73A9F6002638FC /* module.modulemap in Headers */, FCEF04800F5661960067401F /* source_private.h in Headers */, - F7DC045B2060BBBE00C90737 /* target.h in Headers */, + 9BA656E6236BB56700D13FAE /* workgroup_interval_private.h in Headers */, 961B99360F3E83980006BC96 /* benchmark.h in Headers */, - FC7BED9E0E8361E600161930 /* internal.h in Headers */, + E4053A5A26EAF06C00362F72 /* workgroup_object_private.h in Headers */, B609581E221DFA2A00F39D1F /* workloop.h in Headers */, 6E7018211F4EB51B0077C1DC /* workloop_private.h in Headers */, 9B3713F623D24594001C5C88 /* clock.h in Headers */, + E44A8E721805C473009FFDB6 /* voucher_private.h in Headers */, + E4EB4A2714C35ECE00AA0FA9 /* object.h in Headers */, + E454569314746F1B00106147 /* object_private.h in Headers */, 965ECC210F3EAB71004DDD89 /* object_internal.h in Headers */, + FC7BED9E0E8361E600161930 /* internal.h in Headers */, + E44A8E7518066276009FFDB6 /* voucher_internal.h in Headers */, 96929D960F3EA2170041FF5D /* queue_internal.h in Headers */, + E4630253176162D400E11F4C /* atomic_sfb.h in Headers */, + E44757DA17F4572600B82CA1 /* inline_internal.h in Headers */, FC0B34790FA2851C0080FFA0 /* source_internal.h in Headers */, 9BCAF79623AAEDF700E4F685 /* workgroup_internal.h in Headers */, + 6E393F981BD60F8D005A551E /* firehose_internal.h in Headers */, + 6EFBDA4B1D61A0D600282887 /* priority.h in Headers */, + F7DC045B2060BBBE00C90737 /* target.h in Headers */, 5A5D13AC0F6B280500197CC3 /* semaphore_internal.h in Headers */, E4C1ED6F1263E714000D3C8B /* data_internal.h in Headers */, - E44A8E721805C473009FFDB6 /* voucher_private.h in Headers */, 5A0095A210F274B0000E2A31 /* io_internal.h in Headers */, FC1832A8109923C7003403D5 /* tsd.h in Headers */, 6EA793891D458A5800929B1B /* event_config.h in Headers */, @@ -1540,10 +1593,8 @@ 6ED64B571BBD8A3B00C35F4D /* firehose_inline_internal.h in Headers */, E4128ED613BA9A1700ABB2CB /* hw_config.h in Headers */, 9BCAF79423AAEDED00E4F685 /* eventlink_internal.h in Headers */, - E454569314746F1B00106147 /* object_private.h in Headers */, B683588F1FA77F5A00AA0D58 /* time_private.h in Headers */, 6E5662E11F8C2E3E00BC2474 /* workqueue_internal.h in Headers */, - E4EB4A2714C35ECE00AA0FA9 /* object.h in Headers */, E48AF55A16E70FD9004105FF /* io_private.h in Headers */, E4ECBAA515253C25002C313C /* mach_private.h in Headers */, 2BBF5A60154B64D8002B20F9 /* allocator_internal.h in Headers */, @@ -1577,6 +1628,7 @@ 9BFD342F23C94F6D00B08420 /* eventlink_private.h in Headers */, E43B88402241F19000215272 /* voucher_internal.h in Headers */, E43B88412241F19000215272 /* module.modulemap in Headers */, + E4053A6226EAF1B000362F72 /* workgroup_internal.h in Headers */, E43B88422241F19000215272 /* atomic_sfb.h in Headers */, E43B88432241F19000215272 /* data.h in Headers */, E43B88442241F19000215272 /* firehose_internal.h in Headers */, @@ -1584,6 +1636,7 @@ E43B88462241F19000215272 /* private.h in Headers */, E43B88472241F19000215272 /* block.h in Headers */, E43B88482241F19000215272 /* data_private.h in Headers */, + 9B404DC0255A1E7D0014912B /* apply_private.h in Headers */, E43B88492241F19000215272 /* queue_private.h in Headers */, E43B884A2241F19000215272 /* module.modulemap in Headers */, 9BE5254D238747F90041C2A0 /* workgroup_interval_private.h in Headers */, @@ -1600,6 +1653,7 @@ E43B88522241F19000215272 /* source_internal.h in Headers */, E43B88532241F19000215272 /* semaphore_internal.h in Headers */, E43B88542241F19000215272 /* data_internal.h in Headers */, + E4053A5C26EAF07900362F72 /* workgroup_object_private.h in Headers */, E43B88552241F19000215272 /* voucher_private.h in Headers */, E43B88562241F19000215272 /* io_internal.h in Headers */, E43B88572241F19000215272 /* tsd.h in Headers */, @@ -1626,6 +1680,7 @@ E43B886C2241F19000215272 /* object.h in Headers */, E43B886D2241F19000215272 /* io_private.h in Headers */, E43B886E2241F19000215272 /* mach_private.h in Headers */, + E4053A5F26EAF16700362F72 /* clock.h in Headers */, E43B886F2241F19000215272 /* allocator_internal.h in Headers */, E43B88702241F19000215272 /* introspection_internal.h in Headers */, ); @@ -1639,12 +1694,16 @@ 6EC8DC281E3E847A0044B652 /* channel_private.h in Headers */, E49F24AB125D57FA0057C971 /* dispatch.h in Headers */, E49F24AC125D57FA0057C971 /* base.h in Headers */, + E4053A6326EAF25500362F72 /* mach_internal.h in Headers */, 6E5ACCBB1D3C4D0E007DA2B4 /* event_internal.h in Headers */, 6E7018221F4EB5220077C1DC /* workloop_private.h in Headers */, 9BE3E57723CE62E9006FE059 /* workgroup_interval.h in Headers */, 9BE3E57823CE62E9006FE059 /* workgroup_object.h in Headers */, + E4053A6526EAF27A00362F72 /* target.h in Headers */, 9BE3E57923CE62E9006FE059 /* workgroup_base.h in Headers */, + E4053A5E26EAF16600362F72 /* clock.h in Headers */, 9BE3E57A23CE62E9006FE059 /* workgroup.h in Headers */, + E4053A6A26EAF4BD00362F72 /* module.modulemap in Headers */, E49F24AD125D57FA0057C971 /* object.h in Headers */, E44757DC17F4573600B82CA1 /* inline_internal.h in Headers */, E49F24AE125D57FA0057C971 /* queue.h in Headers */, @@ -1657,6 +1716,7 @@ E49F24B2125D57FA0057C971 /* once.h in Headers */, E49F24B3125D57FA0057C971 /* io.h in Headers */, 6E5662E21F8C2E4F00BC2474 /* workqueue_internal.h in Headers */, + E4053A7226EAF67D00362F72 /* module.modulemap in Headers */, E44A8E7618066276009FFDB6 /* voucher_internal.h in Headers */, E4630252176162D300E11F4C /* atomic_sfb.h in Headers */, E49F24B4125D57FA0057C971 /* data.h in Headers */, @@ -1664,11 +1724,11 @@ 9BE3E58323CE637F006FE059 /* workgroup_interval_private.h in Headers */, E49F24B6125D57FA0057C971 /* private.h in Headers */, 9BE3E58423CE637F006FE059 /* workgroup_private.h in Headers */, + 9B404DAA255A1E6F0014912B /* apply_private.h in Headers */, E4D76A9418E325D200B1F98B /* block.h in Headers */, E49F24B7125D57FA0057C971 /* queue_private.h in Headers */, E49F24B8125D57FA0057C971 /* source_private.h in Headers */, 9BFD342D23C94F3500B08420 /* eventlink_internal.h in Headers */, - 9BE3E58523CE638D006FE059 /* (null) in Headers */, E49F24B9125D57FA0057C971 /* benchmark.h in Headers */, E49F24BA125D57FA0057C971 /* internal.h in Headers */, E49F24BC125D57FA0057C971 /* object_internal.h in Headers */, @@ -1693,6 +1753,7 @@ E49F24C6125D57FA0057C971 /* config.h in Headers */, E422A0D612A557B5005E5BDB /* trace.h in Headers */, 6E9956091C3B21B40071D40C /* venture_internal.h in Headers */, + E4053A6926EAF2A800362F72 /* priority.h in Headers */, 6EF2CAB41C889D65001ABE83 /* lock.h in Headers */, E4BA743C13A8911B0095BDF1 /* getprogname.h in Headers */, E4128ED713BA9A1700ABB2CB /* hw_config.h in Headers */, @@ -1701,6 +1762,8 @@ E4EB4A2814C35ECE00AA0FA9 /* object.h in Headers */, E4ECBAA615253D17002C313C /* mach_private.h in Headers */, E48AF55B16E72D44004105FF /* io_private.h in Headers */, + E4053A5B26EAF07700362F72 /* workgroup_object_private.h in Headers */, + E4053A6026EAF1A600362F72 /* workgroup_internal.h in Headers */, 2BBF5A61154B64D8002B20F9 /* allocator_internal.h in Headers */, E43A710615783F7E0012D38D /* data_private.h in Headers */, E44F9DAD1654400E001DCD38 /* introspection_internal.h in Headers */, @@ -1713,8 +1776,13 @@ files = ( E4B515D8164B2DFB00E003AF /* introspection_private.h in Headers */, E44F9DAF16544026001DCD38 /* internal.h in Headers */, - 9BFD342E23C94F4E00B08420 /* eventlink_internal.h in Headers */, - 9BE3E58623CE63A3006FE059 /* (null) in Headers */, + E4053A6B26EAF54F00362F72 /* workgroup_base.h in Headers */, + E4053A6C26EAF55000362F72 /* workgroup_parallel.h in Headers */, + E4053A6D26EAF55000362F72 /* workgroup.h in Headers */, + E4053A6E26EAF55000362F72 /* workgroup_interval.h in Headers */, + E4053A6F26EAF55000362F72 /* workgroup_object.h in Headers */, + E4053A7026EAF55000362F72 /* object.h in Headers */, + E4053A7126EAF55000362F72 /* clock.h in Headers */, E421E5F91716ADA10090DC9B /* introspection.h in Headers */, 6E5662E31F8C2E5100BC2474 /* workqueue_internal.h in Headers */, E44F9DB216544032001DCD38 /* object_internal.h in Headers */, @@ -1722,9 +1790,11 @@ 6ED64B531BBD8A2300C35F4D /* firehose_buffer_internal.h in Headers */, E44F9DB51654403F001DCD38 /* source_internal.h in Headers */, E44F9DB41654403B001DCD38 /* semaphore_internal.h in Headers */, + E4053A6726EAF2A000362F72 /* time.h in Headers */, E44F9DB01654402B001DCD38 /* data_internal.h in Headers */, 6E5ACCBC1D3C4D0F007DA2B4 /* event_internal.h in Headers */, 6E9956081C3B21B30071D40C /* venture_internal.h in Headers */, + E4053A6826EAF2A700362F72 /* priority.h in Headers */, E44F9DB11654402E001DCD38 /* io_internal.h in Headers */, E4630251176162D200E11F4C /* atomic_sfb.h in Headers */, E44F9DBE1654405B001DCD38 /* tsd.h in Headers */, @@ -1738,12 +1808,12 @@ E44F9DBC1654405B001DCD38 /* perfmon.h in Headers */, E44F9DBF165440EF001DCD38 /* config.h in Headers */, E44A8E7718066276009FFDB6 /* voucher_internal.h in Headers */, + E4053A6426EAF25600362F72 /* mach_internal.h in Headers */, E44F9DB616544043001DCD38 /* trace.h in Headers */, E44F9DB916544056001DCD38 /* getprogname.h in Headers */, E48EC97E1835BADD00EAC4F1 /* yield.h in Headers */, E44F9DBA1654405B001DCD38 /* hw_config.h in Headers */, - E44F9DC116544115001DCD38 /* object_private.h in Headers */, - E44F9DC016544115001DCD38 /* object.h in Headers */, + E4053A6626EAF27B00362F72 /* target.h in Headers */, E44F9DAE16544022001DCD38 /* allocator_internal.h in Headers */, E44F9DAB16543F94001DCD38 /* introspection_internal.h in Headers */, ); diff --git a/man/dispatch_group_create.3 b/man/dispatch_group_create.3 index fc98fb09b..954df2117 100644 --- a/man/dispatch_group_create.3 +++ b/man/dispatch_group_create.3 @@ -180,5 +180,5 @@ In this case, the behavior is deterministic: a waiting thread will wake up until the newly submitted blocks have also finished. .Pp All of the foregoing also applies to -.Fn dispath_group_notify +.Fn dispatch_group_notify as well, with "block to be submitted" substituted for "waiting thread". diff --git a/os/firehose_buffer_private.h b/os/firehose_buffer_private.h index 2674a26dc..7ee0541ba 100644 --- a/os/firehose_buffer_private.h +++ b/os/firehose_buffer_private.h @@ -97,9 +97,11 @@ static inline const uint8_t * _firehose_tracepoint_reader_init(firehose_chunk_t fc, const uint8_t **endptr) { const uint8_t *start = fc->fc_data; - const uint8_t *end = fc->fc_start + fc->fc_pos.fcp_next_entry_offs; + const uint8_t *end; - if (end > fc->fc_start + FIREHOSE_CHUNK_SIZE) { + if (fc->fc_pos.fcp_next_entry_offs <= FIREHOSE_CHUNK_SIZE) { + end = fc->fc_start + fc->fc_pos.fcp_next_entry_offs; + } else { end = start; } *endptr = end; diff --git a/os/voucher_private.h b/os/voucher_private.h index 1211c7ac6..3e72c919a 100644 --- a/os/voucher_private.h +++ b/os/voucher_private.h @@ -24,6 +24,9 @@ #if __APPLE__ #include #include + +#include +#define OS_VOUCHER_TSD_KEY __PTK_LIBDISPATCH_KEY8 #endif #if __has_include() #include @@ -101,12 +104,41 @@ OS_OBJECT_DECL_CLASS(voucher); * @result * The previously adopted voucher object. */ -API_AVAILABLE(macos(10.10), ios(8.0)) +SPI_AVAILABLE(macos(10.10), ios(8.0)) OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT_NEEDS_RELEASE OS_NOTHROW voucher_t _Nullable voucher_adopt(voucher_t _Nullable voucher OS_OBJECT_CONSUMED); +/*! + * @function voucher_needs_adopt + * + * @abstract + * An inline check to determine if the input voucher matches the one + * on the current thread. This can be used to shortcircuit calls to + * voucher_adopt() and avoid a cross library jump. If this function returns + * true, then the client should make sure to follow up with a voucher_adopt() + * call. + * + * This check must only be in code that ships with the operating system since + * the TSD key assignment is not ABI. + * + * @param voucher + * The input voucher being tested + */ + +SPI_AVAILABLE(macos(12.0), ios(15.0)) +__header_always_inline bool +voucher_needs_adopt(voucher_t _Nullable voucher) +{ +#if __APPLE__ + if (_pthread_has_direct_tsd()) { + return (((void *) voucher) != _pthread_getspecific_direct(OS_VOUCHER_TSD_KEY)); + } +#endif + return true; +} + /*! * @function voucher_copy * @@ -117,7 +149,7 @@ voucher_adopt(voucher_t _Nullable voucher OS_OBJECT_CONSUMED); * @result * The currently adopted voucher object. */ -API_AVAILABLE(macos(10.10), ios(8.0)) +SPI_AVAILABLE(macos(10.10), ios(8.0)) OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW voucher_t _Nullable voucher_copy(void); @@ -136,7 +168,7 @@ voucher_copy(void); * @result * A copy of the currently adopted voucher object, with importance removed. */ -API_AVAILABLE(macos(10.10), ios(8.0)) +SPI_AVAILABLE(macos(10.10), ios(8.0)) OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW voucher_t _Nullable voucher_copy_without_importance(void); @@ -162,7 +194,7 @@ voucher_copy_without_importance(void); * * CAUTION: Do NOT use this SPI without contacting the Darwin Runtime team. */ -API_AVAILABLE(macos(10.10), ios(8.0)) +SPI_AVAILABLE(macos(10.10), ios(8.0)) OS_VOUCHER_EXPORT OS_NOTHROW void voucher_replace_default_voucher(void); @@ -180,7 +212,7 @@ voucher_replace_default_voucher(void); * * CAUTION: Do NOT use this SPI without contacting the Darwin Runtime team. */ -API_AVAILABLE(macos(10.10), ios(8.0)) +SPI_AVAILABLE(macos(10.10), ios(8.0)) OS_VOUCHER_EXPORT OS_NOTHROW void voucher_decrement_importance_count4CF(voucher_t _Nullable voucher); @@ -264,7 +296,7 @@ voucher_decrement_importance_count4CF(voucher_t _Nullable voucher); * When not building with Objective-C ARC, must be released with a -[release] * message or the Block_release() function. */ -API_AVAILABLE(macos(10.10), ios(8.0)) +SPI_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_RETURNS_RETAINED_BLOCK DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_block_t @@ -345,7 +377,7 @@ dispatch_block_create_with_voucher(dispatch_block_flags_t flags, * When not building with Objective-C ARC, must be released with a -[release] * message or the Block_release() function. */ -API_AVAILABLE(macos(10.10), ios(8.0)) +SPI_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_NONNULL5 DISPATCH_RETURNS_RETAINED_BLOCK DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_block_t @@ -363,7 +395,7 @@ dispatch_block_create_with_voucher_and_qos_class(dispatch_block_flags_t flags, * @abstract * Deprecated, do not use, will abort process if called. */ -API_DEPRECATED("removed SPI", \ +SPI_DEPRECATED("removed SPI", \ macos(10.11,10.13), ios(9.0,11.0), watchos(2.0,4.0), tvos(9.0,11.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW @@ -397,7 +429,7 @@ dispatch_queue_create_with_accounting_override_voucher( * The newly created voucher object or NULL if the message was not carrying a * mach voucher. */ -API_AVAILABLE(macos(10.10), ios(8.0)) +SPI_AVAILABLE(macos(10.10), ios(8.0)) OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW voucher_t _Nullable voucher_create_with_mach_msg(mach_msg_header_t *msg); @@ -444,7 +476,7 @@ voucher_create_with_mach_msg(mach_msg_header_t *msg); * The offset of the first byte in the buffer following the formatted voucher * representation. */ -API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +SPI_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW DISPATCH_COLD size_t voucher_kvoucher_debug(mach_port_t task, mach_port_name_t voucher, char *buf, @@ -479,7 +511,7 @@ struct proc_persona_info; * or the persona identifier of the current process * or PERSONA_ID_NONE */ -API_AVAILABLE(macos(10.14), ios(9.2)) +SPI_AVAILABLE(macos(10.15), ios(9.2)) OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW uid_t voucher_get_current_persona(void); @@ -502,7 +534,7 @@ voucher_get_current_persona(void); * 0 on success: currently adopted voucher has a PERSONA_TOKEN * -1 on failure: persona_info is untouched/uninitialized */ -API_AVAILABLE(macos(10.14), ios(9.2)) +SPI_AVAILABLE(macos(10.15), ios(9.2)) OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW OS_NONNULL1 int voucher_get_current_persona_originator_info( @@ -526,12 +558,23 @@ voucher_get_current_persona_originator_info( * 0 on success: currently adopted voucher has a PERSONA_TOKEN * -1 on failure: persona_info is untouched/uninitialized */ -API_AVAILABLE(macos(10.14), ios(9.2)) +SPI_AVAILABLE(macos(10.15), ios(9.2)) OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW OS_NONNULL1 int voucher_get_current_persona_proximate_info( struct proc_persona_info *persona_info); +/*! + * @function voucher_process_can_use_arbitrary_personas + * + * @abstract + * Returns true if the current process is able to use arbitrary personas + */ +SPI_AVAILABLE(macos(12.0), ios(15.0)) +OS_VOUCHER_EXPORT OS_WARN_RESULT +bool +voucher_process_can_use_arbitrary_personas(void); + /*! * @function voucher_copy_with_persona_mach_voucher * @@ -578,7 +621,7 @@ voucher_copy_with_persona_mach_voucher( * KERN_RESOURCE_SHORTAGE: mach voucher creation failed due to * lack of free space */ -API_AVAILABLE(macos(10.14), ios(12)) +SPI_AVAILABLE(macos(10.15), ios(12)) OS_VOUCHER_EXPORT OS_WARN_RESULT OS_NOTHROW OS_NONNULL1 kern_return_t mach_voucher_persona_self(mach_voucher_t *persona_mach_voucher); diff --git a/os/workgroup_interval_private.h b/os/workgroup_interval_private.h index 79d1c4fb9..48ddc7301 100644 --- a/os/workgroup_interval_private.h +++ b/os/workgroup_interval_private.h @@ -1,7 +1,7 @@ #ifndef __OS_WORKGROUP_INTERVAL_PRIVATE__ #define __OS_WORKGROUP_INTERVAL_PRIVATE__ -#ifndef __OS_WORKGROUP_INDIRECT__ +#ifndef __OS_WORKGROUP_PRIVATE_INDIRECT__ #error "Please #include instead of this file directly." #include // For header doc #endif @@ -38,6 +38,8 @@ OS_ENUM(os_workgroup_interval_type, uint16_t, OS_WORKGROUP_INTERVAL_TYPE_CA_RENDER_SERVER, OS_WORKGROUP_INTERVAL_TYPE_HID_DELIVERY, OS_WORKGROUP_INTERVAL_TYPE_COREMEDIA, + + OS_WORKGROUP_INTERVAL_TYPE_ARKIT, ); /* @@ -46,9 +48,13 @@ OS_ENUM(os_workgroup_interval_type, uint16_t, * @abstract * Specifies that the os_workgroup_interval_t to be created should be of a * specialized type. These types should only be specified when creating an - * os_workgroup_interval_t using the os_workgroup_interval_create API - using it - * with any other workgroup creation API will result in an error at creation - * time. + * os_workgroup_interval_t using the os_workgroup_interval_create or + * os_workgroup_interval_create_with_workload_id APIs - using it with any other + * workgroup creation API will result in an error at creation time. + * + * When used with os_workgroup_interval_create_with_workload_id, the type + * specified via this attribute must match the one configured by the system for + * the provided workload identifier (if that identifier is known). * * Setting type OS_WORKGROUP_INTERVAL_TYPE_DEFAULT on an os_workgroup_interval_t * is a no-op. @@ -59,9 +65,11 @@ API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) OS_WORKGROUP_EXPORT int os_workgroup_attr_set_interval_type(os_workgroup_attr_t attr, - os_workgroup_interval_type_t type); + os_workgroup_interval_type_t type); /* + * @function os_workgroup_interval_create + * * @abstract * Creates an os_workgroup_interval_t with the specified name and attributes. * This object tracks a repeatable workload characterized by a start time, end @@ -96,6 +104,76 @@ os_workgroup_interval_t _Nullable os_workgroup_interval_create(const char * _Nullable name, os_clockid_t clock, os_workgroup_attr_t _Nullable attr); +/* + * @function os_workgroup_interval_create_with_workload_id + * + * @abstract + * Creates an os_workgroup_interval_t with the specified name and workload + * identifier. + * This object tracks a repeatable workload characterized by a start time, end + * time and targeted deadline. Example use cases include audio and graphics + * rendering workloads. + * + * The newly created os_workgroup_interval_t has no initial member threads - in + * particular the creating thread does not join the os_workgroup_interval_t + * implicitly. + * + * @param name + * A client specified string for labelling the workgroup. This parameter is + * optional and can be NULL. + * + * @param workload_id + * A system-defined workload identifier string determining the configuration + * parameters to apply to the workgroup and its member threads. + * Must not be NULL. + * If the specified identifier is known, it must refer to a workload configured + * as being of interval type, or this function will return NULL. + * See discussion for the detailed rules used to combine the information + * specified by the `workload_id` and `wga` arguments. + * + * @param clockid + * The clockid in which timestamps passed to the os_workgroup_interval_start() + * and os_workgroup_interval_update() functions are specified. + * + * @param wga + * The requested set of os_workgroup_t attributes. NULL is to be specified for + * the default set of attributes. By default, a workgroup created with workload + * identifier is nonpropagating with asynchronous work and differentiated from + * other threads in the process (see os_workgroup_attr_flags_t). + * The interval type specified by the attributes will be used as a fallback in + * case the provided workload identifier is unknown. + * See discussion for the detailed rules used to combine the information + * specified by the `workload_id` and `wga` arguments. + * + * @discussion + * Rules used for resolution of configuration parameters potentially specified + * by both workload identifier and attributes, applied in order: + * - If the provided attributes are NULL or equal to the default set of + * attributes, no parameters are considered to be explicitly specified via + * attribute. + * - If the provided workload identifier is known, and the provided attributes + * explicitly specify a parameter that is also configured by the identifier, + * the two parameter values must match or this function will fail and return + * an error. + * - If the provided workload identifier is known, the parameters configured by + * the identifier will be used. + * - If the provided workload identifier is unknown, the parameters specified + * via the provided attributes will be used as a fallback. + * - If a given parameter is neither configured by a known workload identifier + * or explicitly specified via an attribute, a system-dependent fallback + * value will be used. + * + * @result + * The newly created workgroup object, or NULL if invalid arguments were + * specified (in which case errno is also set). + */ +SPI_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) +OS_WORKGROUP_EXPORT OS_WORKGROUP_RETURNS_RETAINED +os_workgroup_interval_t _Nullable +os_workgroup_interval_create_with_workload_id(const char * _Nullable name, + const char *workload_id, os_clockid_t clock, + os_workgroup_attr_t _Nullable attr); + /* This SPI is for use by Audio Toolbox only. This function returns a reference * which is the responsibility of the caller to manage. */ diff --git a/os/workgroup_object_private.h b/os/workgroup_object_private.h index c0c263d65..ec7ebee71 100644 --- a/os/workgroup_object_private.h +++ b/os/workgroup_object_private.h @@ -1,7 +1,7 @@ #ifndef __OS_WORKGROUP_OBJECT_PRIVATE__ #define __OS_WORKGROUP_OBJECT_PRIVATE__ -#ifndef __OS_WORKGROUP_INDIRECT__ +#ifndef __OS_WORKGROUP_PRIVATE_INDIRECT__ #error "Please #include instead of this file directly." #include // For header doc #endif @@ -67,7 +67,6 @@ int os_workgroup_attr_set_flags(os_workgroup_attr_t wga, os_workgroup_attr_flags_t flags); - /*! * @function os_workgroup_create * @@ -94,7 +93,174 @@ API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) OS_WORKGROUP_EXPORT OS_WORKGROUP_RETURNS_RETAINED os_workgroup_t _Nullable os_workgroup_create(const char * _Nullable name, - os_workgroup_attr_t _Nullable wga); + os_workgroup_attr_t _Nullable wga); + +/*! + * @function os_workgroup_create_with_workload_id + * + * @abstract + * Creates an os_workgroup_t with the specified name and workload identifier. + * + * The newly created os_workgroup_t has no initial member threads - in + * particular the creating thread does not join the os_workgroup_t implicitly. + * + * @param name + * A client specified string for labelling the workgroup. This parameter is + * optional and can be NULL. + * + * @param workload_id + * A system-defined workload identifier string determining the configuration + * parameters to apply to the workgroup and its member threads. + * Must not be NULL. + * See discussion for the detailed rules used to combine the information + * specified by the `workload_id` and `wga` arguments. + * + * @param wga + * The requested set of os_workgroup_t attributes. NULL is to be specified for + * the default set of attributes. By default, a workgroup created with workload + * identifier is nonpropagating with asynchronous work and differentiated from + * other threads in the process (see os_workgroup_attr_flags_t). + * Currently NULL or the default set of attributes are the only valid + * attributes for this function. + * See discussion for the detailed rules used to combine the information + * specified by the `workload_id` and `wga` arguments. + * + * @discussion + * Rules used for resolution of configuration parameters potentially specified + * by both workload identifier and attributes, applied in order: + * - If the provided attributes are NULL or equal to the default set of + * attributes, no parameters are considered to be explicitly specified via + * attribute. + * - If the provided workload identifier is known, and the provided attributes + * explicitly specify a parameter that is also configured by the identifier, + * the two parameter values must match or this function will fail and return + * an error. + * - If the provided workload identifier is known, the parameters configured by + * the identifier will be used. + * - If the provided workload identifier is unknown, the parameters specified + * via the provided attributes will be used as a fallback. + * - If a given parameter is neither configured by a known workload identifier + * or explicitly specified via an attribute, a system-dependent fallback + * value will be used. + * + * @result + * The newly created workgroup object, or NULL if invalid arguments were + * specified (in which case errno is also set). + */ +SPI_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) +OS_WORKGROUP_EXPORT OS_WORKGROUP_RETURNS_RETAINED +os_workgroup_t _Nullable +os_workgroup_create_with_workload_id(const char * _Nullable name, + const char *workload_id, os_workgroup_attr_t _Nullable wga); + +/*! + * @function os_workgroup_create_with_workload_id_and_port + * + * @abstract + * Create an os_workgroup_t object with the specified name and workload + * identifier from a send right returned by a previous call to + * os_workgroup_copy_port, potentially in a different process. + * + * The newly created os_workgroup_t has no initial member threads - in + * particular the creating thread does not join the os_workgroup_t implicitly. + * + * @param name + * A client specified string for labelling the workgroup. This parameter is + * optional and can be NULL. + * + * @param workload_id + * A system-defined workload identifier string determining the configuration + * parameters to apply to the workgroup and its member threads. + * Must not be NULL. + * See discussion for the detailed rules used to combine the information + * specified by the `workload_id` and `mach_port` arguments. + * + * @param mach_port + * The send right to create the workgroup from. No reference is consumed + * on the specified send right. + * See discussion for the detailed rules used to combine the information + * specified by the `workload_id` and `mach_port` arguments. + * + * @discussion + * Rules used for resolution of configuration parameters potentially specified + * by both workload identifier and send right, applied in order: + * - If the provided workload identifier is known, and the provided send right + * references a workgroup that was created with a parameter that is also + * configured by the identifier, the parameter value configured by the + * identifier will be used. For certain parameters such as the kernel + * work_interval type underlying a workgroup interval type, it is required + * that the two parameter values must match, or this function will fail and + * return an error. + * - If the provided workload identifier is known, the parameters configured by + * the identifier will be used. + * - If the provided workload identifier is unknown, the parameters used to + * create the workgroup referenced by the provided send right are used. + * - If a given parameter is neither configured by a known workload identifier + * or was used to create the workgroup referenced by the provided send right, + * a system-dependent fallback value will be used. + * + * @result + * The newly created workgroup object, or NULL if invalid arguments were + * specified (in which case errno is also set). + */ +SPI_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) +OS_WORKGROUP_EXPORT OS_WORKGROUP_RETURNS_RETAINED +os_workgroup_t _Nullable +os_workgroup_create_with_workload_id_and_port(const char * _Nullable name, + const char *workload_id, mach_port_t mach_port); + +/*! + * @function os_workgroup_create_with_workload_id_and_workgroup + * + * @abstract + * Create a new os_workgroup object with the specified name and workload + * identifier from an existing os_workgroup. + * + * The newly created os_workgroup_t has no initial member threads - in + * particular the creating thread does not join the os_workgroup_t implicitly. + * + * @param name + * A client specified string for labelling the workgroup. This parameter is + * optional and can be NULL. + * + * @param workload_id + * A system-defined workload identifier string determining the configuration + * parameters to apply to the workgroup and its member threads. + * Must not be NULL. + * See discussion for the detailed rules used to combine the information + * specified by the `workload_id` and `wg` arguments. + * + * @param wg + * The existing workgroup to create a new workgroup object from. + * See discussion for the detailed rules used to combine the information + * specified by the `workload_id` and `wg` arguments. + * + * @discussion + * Rules used for resolution of configuration parameters potentially specified + * by both workload identifier and existing workgroup, applied in order: + * - If the provided workload identifier is known, and the provided workgroup + * was created with a parameter that is also configured by the identifier, + * the parameter value configured by the identifier will be used. For certain + * parameters such as the kernel work_interval type underlying a workgroup + * interval type, it is required that the two parameter values must match, or + * this function will fail and return an error. + * - If the provided workload identifier is known, the parameters configured by + * the identifier will be used. + * - If the provided workload identifier is unknown, the parameters used to + * create the provided workgroup will be used. + * - If a given parameter is neither configured by a known workload identifier + * or was used to create the provided workgroup, a system-dependent fallback + * value will be used. + * + * @result + * The newly created workgroup object, or NULL if invalid arguments were + * specified (in which case errno is also set). + */ +SPI_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) +OS_WORKGROUP_EXPORT OS_WORKGROUP_RETURNS_RETAINED +os_workgroup_t _Nullable +os_workgroup_create_with_workload_id_and_workgroup(const char * _Nullable name, + const char *workload_id, os_workgroup_t wg); /* To be deprecated once coreaudio adopts */ #define OS_WORKGROUP_ATTR_INITIALIZER OS_WORKGROUP_ATTR_INITIALIZER_DEFAULT diff --git a/os/workgroup_private.h b/os/workgroup_private.h index 961908d87..255fd5079 100644 --- a/os/workgroup_private.h +++ b/os/workgroup_private.h @@ -3,15 +3,15 @@ #ifndef __DISPATCH_BUILDING_DISPATCH__ -#ifndef __OS_WORKGROUP_INDIRECT__ -#define __OS_WORKGROUP_INDIRECT__ -#endif /* __OS_WORKGROUP_INDIRECT__ */ +#ifndef __OS_WORKGROUP_PRIVATE_INDIRECT__ +#define __OS_WORKGROUP_PRIVATE_INDIRECT__ +#endif /* __OS_WORKGROUP_PRIVATE_INDIRECT__ */ #include #include #include -#undef __OS_WORKGROUP_INDIRECT__ +#undef __OS_WORKGROUP_PRIVATE_INDIRECT__ #endif /* __DISPATCH_BUILDING_DISPATCH__ */ #endif /* __OS_WORKGROUP_PRIVATE__ */ diff --git a/private/apply_private.h b/private/apply_private.h new file mode 100644 index 000000000..195e5a4de --- /dev/null +++ b/private/apply_private.h @@ -0,0 +1,338 @@ +/* + * Copyright (c) 2020 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __DISPATCH_APPLY_PRIVATE__ +#define __DISPATCH_APPLY_PRIVATE__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +__BEGIN_DECLS + +DISPATCH_ASSUME_NONNULL_BEGIN +/*! + * @typedef dispatch_apply_attr_s dispatch_apply_attr_t + * + * @abstract + * Pointer to an opaque structure for describing the workload to be executed by + * dispatch_apply_with_attr. + * + * This struct must be initialized with dispatch_apply_attr_init before use + * and must not be copied once initialized. It must be destroyed with + * dispatch_apply_attr_destroy before going out of scope or being freed, to + * avoid leaking associated system resources. + */ +#define __DISPATCH_APPLY_ATTR_SIZE__ 64 + +#if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) +typedef struct dispatch_apply_attr_s dispatch_apply_attr_s; +typedef struct dispatch_apply_attr_s *dispatch_apply_attr_t; +#else +struct dispatch_apply_attr_opaque_s { + char opaque[__DISPATCH_APPLY_ATTR_SIZE__]; +}; +typedef struct dispatch_apply_attr_opaque_s dispatch_apply_attr_s; +typedef struct dispatch_apply_attr_opaque_s *dispatch_apply_attr_t; +#endif + +/*! + * @function dispatch_apply_attr_init, dispatch_apply_attr_destroy + * + * @abstract + * Initializer and destructor functions for the attribute structure. The + * attribute structure must be initialized before calling any setters on it. + * + * Every call to dispatch_apply_attr_init must be paired with a corresponding + * call to dispatch_apply_attr_destroy. + */ +SPI_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) +DISPATCH_EXPORT DISPATCH_NONNULL1 +void +dispatch_apply_attr_init(dispatch_apply_attr_t attr); + +SPI_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) +DISPATCH_EXPORT DISPATCH_NONNULL1 +void +dispatch_apply_attr_destroy(dispatch_apply_attr_t attr); + +/*! + * @enum dispatch_apply_attr_entity_t + * + * @abstract + * This enum describes an entity in the hardware for which parallelism via + * dispatch_apply is being requested + */ +DISPATCH_ENUM(dispatch_apply_attr_entity, unsigned long, + DISPATCH_APPLY_ATTR_ENTITY_CPU = 1, + DISPATCH_APPLY_ATTR_ENTITY_CLUSTER = 2, +); + +/*! + * @function dispatch_apply_attr_set_parallelism + * + * @param attr + * The dispatch_apply attribute to be modified + * + * @param entity + * The named entity the requested configuration applies to. + * + * @param threads_per_entity + * The number of worker threads to be created per named entity on the system. + * + * @abstract + * Adds a request for the system to start enough worker threads such that + * threads_per_entity number of threads will share each named entity. The + * system will make a best effort to spread such worker threads evenly + * across the available entity. + * + * @notes + * At the present time, the only supported value of threads_per_entity is 1. + */ +SPI_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) +DISPATCH_EXPORT +void +dispatch_apply_attr_set_parallelism(dispatch_apply_attr_t attr, + dispatch_apply_attr_entity_t entity, size_t threads_per_entity); + +/*! + * @typedef dispatch_apply_attr_query_flags_t + * + * @abstract + * Flags that affect calls to dispatch_apply_attr_query(). + * + * @const DISPATCH_APPLY_ATTR_QUERY_FLAGS_MAX_CURRENT_SCOPE + * Modifies DISPATCH_APPLY_ATTR_QUERY_MAXIMUM_WORKERS so that it takes into + * account the current execution context. This may produce a tighter upper bound + * on the number of worker threads. If dispatch_apply_with_attr is called from + * the current execution context, it is guaranteed that the worker_index will + * not exceed the result of this query. However if the current execution context + * is changed (for example with dispatch or pthread functions) or the current + * scope is left, that guarantee will not hold. + */ +DISPATCH_ENUM(dispatch_apply_attr_query_flags, unsigned long, + DISPATCH_APPLY_ATTR_QUERY_FLAGS_MAX_CURRENT_SCOPE DISPATCH_ENUM_API_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) = 1, +); + +/*! + * @typedef dispatch_apply_attr_query_t + * + * @abstract + * Enumeration indicating question dispatch_apply_attr_query() should answer + * about its arguments. + * + * @const DISPATCH_APPLY_ATTR_QUERY_VALID + * Query if the properties requested by this attribute are invalid or + * unsatisfiable. For example, some properties may describe how the workload will + * use certain hardware resources. On machines which lack that hardware, an + * attribute with those properties may be invalid. + * Passing an invalid attribute to dispatch_apply_with_attr will have undefined + * behaviour. + * If the attribute is valid, the query returns 1. If it is not valid, the query + * returns 0. + * + * @const DISPATCH_APPLY_ATTR_QUERY_MAXIMUM_WORKERS + * Calculates an upper bound of how many parallel worker threads + * dispatch_apply_with_attr could create when running a workload with the + * specified attribute. This will include the thread calling + * dispatch_apply_with_attr as a worker. This is an upper bound; depending on + * conditions, such as the load of other work on the system and the execution + * context where dispatch_apply_with_attr is called, fewer parallel worker + * threads may actually be created. + * + * A good use of this query is to determine the size of a working arena + * (such as preallocated memory space or other resources) appropriate for the + * the maximum number of workers. This API can be used in coordination + * with the worker_index block argument in dispatch_apply_with_attr to provide + * each parallel worker thread with their own slice of the arena. + * + * @const DISPATCH_APPLY_ATTR_QUERY_LIKELY_WORKERS + * Calculates a good guess of how many parallel worker threads + * dispatch_apply_with_attr would likely create when running a workload with + * the specified attribute. This will include the thread calling + * dispatch_apply_with_attr as a worker. This is only a guess; depending on + * conditions, dispatch_apply_with_attr may actually create more or fewer + * parallel worker threads than this value. + * + * Compared to QUERY_MAXIMUM_WORKERS, this query tries to predict the behavior + * of dispatch_apply_with_attr more faithfully. The number of parallel worker + * threads to be used may be affected by aspects of the current execution context + * like the thread's QOS class, scheduling priority, queue hierarchy, and current + * workloop; as well as transitory aspects of the system like power state and + * computational loads from other tasks. For those reasons, repeating this query + * for the same attribute may produce a different result. + */ +DISPATCH_ENUM(dispatch_apply_attr_query, unsigned long, + DISPATCH_APPLY_ATTR_QUERY_VALID DISPATCH_ENUM_API_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) = 0, + DISPATCH_APPLY_ATTR_QUERY_MAXIMUM_WORKERS DISPATCH_ENUM_API_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) = 1, + DISPATCH_APPLY_ATTR_QUERY_LIKELY_WORKERS DISPATCH_ENUM_API_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) = 2, +); + +/*! + * @function dispatch_apply_attr_query + * + * @abstract + * Query how dispatch_apply_with_attr will respond to a certain attr, such + * as how the attr may affect its choice of how many parallel worker threads + * to use. + * + * @param attr + * The dispatch_apply attribute describing a workload + * + * @param which + * An enumeration value indicating which question this function should answer + * about its arguments. See dispatch_apply_attr_query_t for possible values and + * explanations. + * + * @param flags + * Flags for the query that describe factors beyond the workload (which + * is described by the attr). See dispatch_apply_attr_query_flags_t for + * valid values. Pass 0 if no flags are needed. + * + * @return + * Returns the numerical answer to the query. See dispatch_apply_attr_query_t. + * Most types of query return 0 if the properties requested by this attribute + * are invalid or unsatisfiable. (Exceptions will described in + * dispatch_apply_attr_query_t entries). + */ +SPI_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) +DISPATCH_EXPORT +size_t +dispatch_apply_attr_query(dispatch_apply_attr_t attr, + dispatch_apply_attr_query_t which, + dispatch_apply_attr_query_flags_t flags); + +/*! + * @function dispatch_apply_with_attr + * + * * @abstract + * Submits a block for parallel invocation, with an attribute structure + * describing the workload. + * + * @discussion + * Submits a block for parallel invocation. The system will try to use worker + * threads that match the configuration of the current thread. The system will + * try to start an appropriate number of worker threads to maximimize + * throughput given the available hardware and current system conditions. An + * attribute structure that describes the nature of the workload may be passed. + * The system will use the attribute's properties to improve its scheduling + * choices, such as how many worker threads to create and how to distribute them + * across processors. + * + * This function waits for all invocations of the task block to complete before + * returning. + * + * Each invocation of the block will be passed 2 arguments: + * - the current index of iteration + * - the index of the worker thread invoking the block + * + * The worker index will be in the range [0, n) + * where n = dispatch_apply_attr_query(attr, DISPATCH_APPLY_ATTR_QUERY_MAXIMUM_WORKERS, 0) + * + * Worker threads may start in any order. Some worker indexes within the + * permissible range may not actually be used, depending on conditions. + * Generally, one worker thread will use one worker index, but this is not + * guaranteed; worker index MAY NOT match thread one-to-one. No assumptions + * should be made about which CPU a worker runs on. Two invocations of + * the block MAY have different worker indexes even if they run on the same + * thread or the same processor. However, two invocations of the block running + * at the same time WILL NEVER have the same worker index. + * + * When this API is called inside another dispatch_apply_with_attr or + * dispatch_apply, it will execute as a serial loop. + * + * @param iterations + * The number of iterations to perform. + * + * The choice of how to divide a large workload into a number of iterations can + * have substantial effects on the performance of executing that workload. + * If the number of iterations is very small, the system may not effectively + * spread and balance the work across the available hardware. As a rough + * guideline, the number of iterations should be at least three times the maximum + * worker index. On the other hand, a workload should not be finely divided into + * a huge number of iterations, each doing only a miniscule amount of work, since + * there is a small overhead cost of accounting and invocation for each iteration. + * + * @param attr + * The dispatch_apply_attr_t describing specialized properties of the workload. + * This value can be NULL. If non-NULL, the attribute must have been initialized + * with dispatch_apply_attr_init(). + * + * If the attribute requests properties that are invalid or meaningless on this + * system, the function will have undefined behaviour. This is a programming + * error. An attribute's validity can be checked with dispatch_apply_attr_query. + * + * @param block + * The block to be invoked the specified number of iterations. + * The result of passing NULL in this parameter is undefined. + */ +#ifdef __BLOCKS__ +SPI_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) +DISPATCH_EXPORT +void +dispatch_apply_with_attr(size_t iterations, dispatch_apply_attr_t _Nullable attr, + DISPATCH_NOESCAPE void (^block)(size_t iteration, size_t worker_index)); +#endif + +/*! + * @function dispatch_apply_with_attr_f + * + * * @abstract + * Submits a function for parallel invocation, with an attribute structure + * describing the workload. + * + * @discussion + * See dispatch_apply_with_attr() for details. + * + * @param iterations + * The number of iterations to perform. + * + * @param attr + * The dispatch_apply_attr_t describing specialized properties of the workload. + * This value can be NULL. If non-NULL, the attribute must have been initialized + * with dispatch_apply_attr_init(). + * + * If the attribute requests properties that are invalid or meaningless on this + * system, the function will have undefined behaviour. This is a programming + * error. An attribute's validity can be checked with dispatch_apply_attr_query. + * + * @param context + * The application-defined context parameter to pass to the function. + + * @param work + * The application-defined function to invoke on the specified queue. The first + * parameter passed to this function is the context provided to + * dispatch_apply_with_attr_f(). The second parameter passed to this function is + * the current index of iteration. The third parameter passed to this function is + * the index of the worker thread invoking the function. + * See dispatch_apply_with_attr() for details. + * The result of passing NULL in this parameter is undefined. + */ +SPI_AVAILABLE(macos(12.0), ios(15.0), tvos(15.0), watchos(8.0)) +DISPATCH_EXPORT +void +dispatch_apply_with_attr_f(size_t iterations, dispatch_apply_attr_t _Nullable attr, + void *_Nullable context, void (*work)(void *_Nullable context, size_t iteration, size_t worker_index)); + +DISPATCH_ASSUME_NONNULL_END + +__END_DECLS +#endif /* __DISPATCH_APPLY_PRIVATE__ */ diff --git a/private/private.h b/private/private.h index b40a36c0f..e49d15c95 100644 --- a/private/private.h +++ b/private/private.h @@ -72,6 +72,7 @@ #include #include #include +#include #undef __DISPATCH_INDIRECT__ #endif /* !__DISPATCH_BUILDING_DISPATCH__ */ diff --git a/private/queue_private.h b/private/queue_private.h index 86075d8e7..199fcaeed 100644 --- a/private/queue_private.h +++ b/private/queue_private.h @@ -41,10 +41,27 @@ __BEGIN_DECLS * * @constant DISPATCH_QUEUE_OVERCOMMIT * The queue will create a new thread for invoking blocks, regardless of how - * busy the computer is. + * busy the computer is. It is invalid to pass in both the + * DISPATCH_QUEUE_OVERCOMMIT as well as the DISPATCH_QUEUE_COOPERATIVE + * flags. + * + * @constant DISPATCH_QUEUE_COOPERATIVE + * The queue will not bring up threads beyond a specific limit even if + * there are pending work items on the queue. + * + * The width of the queue is determined based on the hardware the code is + * running on and may change dynamically depending on the load of the system. + * Blocking any thread working on this queue will therefore reduce the + * throughput of the queue as a whole. Work running on this queue should be + * able to make progress till completion even if just 1 thread is available to + * process this queue. + * + * It is invalid to pass in both the DISPATCH_QUEUE_OVERCOMMIT as well as the + * DISPATCH_QUEUE_COOPERATIVE flags. */ enum { DISPATCH_QUEUE_OVERCOMMIT = 0x2ull, + DISPATCH_QUEUE_COOPERATIVE = 0x4ull, }; /*! @@ -452,6 +469,76 @@ DISPATCH_EXPORT void _dispatch_install_thread_detach_callback(void (*cb)(void)); #endif +/* The SPIs below are for the use of the Swift Concurrency Runtime ONLY */ + +DISPATCH_OPTIONS(dispatch_swift_job_invoke_flags, uint32_t, + /*! + * @const DISPATCH_SWIFT_JOB_INVOKE_NONE + * + * No specific requirements for how the object invokes itself. + */ + DISPATCH_SWIFT_JOB_INVOKE_NONE, + + /*! + * @const DISPATCH_SWIFT_JOB_INVOKE_COOPERATIVE + * + * This swift job is invoked on a cooperative queue. It should periodically + * check dispatch_swift_job_should_yield() to determine if the object + * ought to yield the thread to other objects in the cooperative queue + */ + DISPATCH_SWIFT_JOB_INVOKE_COOPERATIVE, +); + +/*! + * @function dispatch_swift_job_should_yield() + * + * @abstract + * This function is only to be called by the Swift concurrency runtime. + * + * If this function returns true, then the currently draining object + * should reach the next safest stopping point, perform necessary cleanups, and + * return from its invocation. + * + * If more work is present, it should reenqueue itself using the + * dispatch_enqueue_swift_job SPI. + */ +SPI_AVAILABLE(macos(12.0), ios(15.0)) +DISPATCH_EXPORT +bool +dispatch_swift_job_should_yield(void); + +/*! + * @function dispatch_async_swift_job + * + * @abstract + * This function is only to be called by the Swift concurrency runtime to + * enqueue work to run on dispatch's thread pool. + * + * @param queue + * The queue onto which to enqueue the swift object. All enqueues are + * asynchronous and do not block the thread. + * + * @param swift_job + * The swift concurrency runtime job that is to be enqueued into dispatch. This + * object needs to adhere to a specific structure and have a specific vtable + * layout that dispatch expects. + * + * The refcount and lifetime of the object is managed by the enqueuer and who + * needs need to make sure that it is live for the duration it is enqueued on + * the dispatch queue. + * + * The swift job can only be enqueued on a single queue at any + * given time. + * + * @param qos + * The QoS of at which the object should be enqueued. + */ +SPI_AVAILABLE(macos(12.0), ios(15.0)) +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL2 +void +dispatch_async_swift_job(dispatch_queue_t queue, void *swift_job, + qos_class_t qos); + __END_DECLS DISPATCH_ASSUME_NONNULL_END diff --git a/private/source_private.h b/private/source_private.h index d6b7266a1..fab9b9854 100644 --- a/private/source_private.h +++ b/private/source_private.h @@ -240,6 +240,9 @@ enum { * @constant DISPATCH_VFS_NEARLOWDISK * Filesystem is nearly full (below NEARLOWDISK level). * + * @constant DISPATCH_VFS_SERVEREVENT + * Server issued a notification/warning + * * @constant DISPATCH_VFS_DESIREDDISK * Filesystem has exceeded the DESIREDDISK level * @@ -257,6 +260,7 @@ enum { DISPATCH_VFS_NOTRESPLOCK = 0x0080, DISPATCH_VFS_UPDATE = 0x0100, DISPATCH_VFS_VERYLOWDISK = 0x0200, + DISPATCH_VFS_SERVEREVENT = 0x0800, DISPATCH_VFS_QUOTA = 0x1000, DISPATCH_VFS_NEARLOWDISK = 0x2000, DISPATCH_VFS_DESIREDDISK = 0x4000, diff --git a/private/time_private.h b/private/time_private.h index 7270f8bce..e8dd1accf 100644 --- a/private/time_private.h +++ b/private/time_private.h @@ -32,6 +32,8 @@ #include // for HeaderDoc #endif +__BEGIN_DECLS + /* * @constant DISPATCH_MONOTONICTIME_NOW * A dispatch_time_t value that corresponds to the current value of the @@ -114,5 +116,7 @@ bool dispatch_time_to_nsecs(dispatch_time_t time, dispatch_clockid_t *clock, uint64_t *nsecs); +__END_DECLS + #endif diff --git a/src/BlocksRuntime/CMakeLists.txt b/src/BlocksRuntime/CMakeLists.txt new file mode 100644 index 000000000..1bed20210 --- /dev/null +++ b/src/BlocksRuntime/CMakeLists.txt @@ -0,0 +1,38 @@ + +add_library(BlocksRuntime + data.c + runtime.c) +if(CMAKE_SYSTEM_NAME STREQUAL Windows) + target_sources(BlocksRuntime PRIVATE + BlocksRuntime.def) + + if(NOT BUILD_SHARED_LIBS) + target_compile_definitions(BlocksRuntime PRIVATE + BlocksRuntime_STATIC) + endif() +endif() + +target_include_directories(BlocksRuntime PUBLIC + ${CMAKE_CURRENT_SOURCE_DIR}) +if(HAVE_OBJC AND CMAKE_DL_LIBS) + target_link_libraries(BlocksRuntime PUBLIC + ${CMAKE_DL_LIBS}) +endif() + +set_target_properties(BlocksRuntime PROPERTIES + POSITION_INDEPENDENT_CODE TRUE) + +add_library(BlocksRuntime::BlocksRuntime ALIAS BlocksRuntime) + +install(FILES Block.h + DESTINATION ${INSTALL_BLOCK_HEADERS_DIR}) +if(INSTALL_PRIVATE_HEADERS) + install(FILES Block_private.h + DESTINATION ${INSTALL_BLOCK_HEADERS_DIR}) +endif() +set_property(GLOBAL APPEND PROPERTY DISPATCH_EXPORTS BlocksRuntime) +install(TARGETS BlocksRuntime + EXPORT dispatchExports + ARCHIVE DESTINATION ${INSTALL_TARGET_DIR} + LIBRARY DESTINATION ${INSTALL_TARGET_DIR} + RUNTIME DESTINATION bin) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index f71b68f45..adc989d42 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -1,310 +1,171 @@ -include(CheckCCompilerFlag) -include(SwiftSupport) -include(DTrace) +if(NOT CMAKE_SYSTEM_NAME STREQUAL Darwin) + add_subdirectory(BlocksRuntime) +endif() add_library(dispatch - allocator.c - apply.c - benchmark.c - data.c - init.c - introspection.c - io.c - mach.c - object.c - once.c - queue.c - semaphore.c - source.c - time.c - transform.c - voucher.c - shims.c - protocol.defs - provider.d - allocator_internal.h - data_internal.h - inline_internal.h - internal.h - introspection_internal.h - io_internal.h - mach_internal.h - object_internal.h - queue_internal.h - semaphore_internal.h - shims.h - source_internal.h - trace.h - voucher_internal.h - event/event.c - event/event_config.h - event/event_epoll.c - event/event_internal.h - event/event_kevent.c - event/event_windows.c - firehose/firehose_internal.h - shims/android_stubs.h - shims/atomic.h - shims/atomic_sfb.h - shims/getprogname.h - shims/hw_config.h - shims/lock.c - shims/lock.h - shims/perfmon.h - shims/time.h - shims/tsd.h - shims/yield.c - shims/yield.h) - -set_target_properties(dispatch - PROPERTIES - POSITION_INDEPENDENT_CODE YES) + allocator.c + apply.c + benchmark.c + data.c + init.c + introspection.c + io.c + mach.c + object.c + once.c + queue.c + semaphore.c + source.c + time.c + transform.c + voucher.c + shims.c + protocol.defs + provider.d + allocator_internal.h + data_internal.h + inline_internal.h + internal.h + introspection_internal.h + io_internal.h + mach_internal.h + object_internal.h + queue_internal.h + semaphore_internal.h + shims.h + source_internal.h + trace.h + voucher_internal.h + event/event.c + event/event_config.h + event/event_epoll.c + event/event_internal.h + event/event_kevent.c + event/event_windows.c + firehose/firehose_internal.h + shims/android_stubs.h + shims/atomic.h + shims/atomic_sfb.h + shims/getprogname.h + shims/hw_config.h + shims/lock.c + shims/lock.h + shims/perfmon.h + shims/time.h + shims/tsd.h + shims/yield.c + shims/yield.h) -if(WIN32) - target_sources(dispatch - PRIVATE - shims/generic_sys_queue.h - shims/generic_win_stubs.c - shims/generic_win_stubs.h - shims/getprogname.c) +if(CMAKE_SYSTEM_NAME STREQUAL Windows) + target_sources(dispatch PRIVATE + shims/generic_sys_queue.h + shims/generic_win_stubs.c + shims/generic_win_stubs.h + shims/getprogname.c) endif() + if(DISPATCH_USE_INTERNAL_WORKQUEUE) - target_sources(dispatch - PRIVATE - event/workqueue.c - event/workqueue_internal.h) + target_sources(dispatch PRIVATE + event/workqueue.c + event/workqueue_internal.h) endif() -target_sources(dispatch - PRIVATE - block.cpp) + +target_sources(dispatch PRIVATE + block.cpp) + +if(ENABLE_DTRACE) + dtrace_usdt_probe(${CMAKE_CURRENT_SOURCE_DIR}/provider.d OUTPUT_SOURCES + dispatch_dtrace_provider_headers) + target_sources(dispatch PRIVATE + ${dispatch_dtrace_provider_headers}) +endif() + if(HAVE_OBJC) # TODO(compnerd) split DispatchStubs.cc into a separate component for the ObjC # registration and a separate component for the swift compiler's emission of a # call to the ObjC autorelease elision entry point. - target_sources(dispatch - PRIVATE - data.m - object.m - swift/DispatchStubs.cc) + target_sources(dispatch PRIVATE + data.m + object.m + swift/DispatchStubs.cc) endif() -if(ENABLE_SWIFT) - set(swift_optimization_flags) - if(NOT CMAKE_BUILD_TYPE MATCHES Debug) - set(swift_optimization_flags -O) - endif() - # NOTE(compnerd) Today regardless of whether or not ObjC interop is enabled, - # swift will use an autoreleased return value convention for certain CF - # functions (including some that are used/related to dispatch). This means - # that the swift compiler in callers to such functions will call the function, - # and then pass the result of the function to - # objc_retainAutoreleasedReturnValue. In a context where we have ObjC interop - # disabled, we do not have access to the objc runtime so an implementation of - # objc_retainAutoreleasedReturnValue is not available. To work around this, we - # provide a shim for objc_retainAutoreleasedReturnValue in DispatchStubs.cc - # that just calls retain on the object. Once we fix the swift compiler to - # switch to a different model for handling these arguments with objc-interop - # disabled these shims can be eliminated. - add_library(DispatchStubs - STATIC - swift/DispatchStubs.cc) - target_include_directories(DispatchStubs - PRIVATE - ${PROJECT_SOURCE_DIR}) - set_target_properties(DispatchStubs - PROPERTIES - POSITION_INDEPENDENT_CODE YES) - if(USE_LLD_LINKER) - if(CMAKE_HOST_SYSTEM_NAME STREQUAL Windows) - set(use_ld_flag -use-ld=lld.exe) - else() - set(use_ld_flag -use-ld=lld) - endif() - elseif(USE_GOLD_LINKER) - if(CMAKE_HOST_SYSTEM_NAME STREQUAL Windows) - set(use_ld_flag -use-ld=gold.exe) - else() - set(use_ld_flag -use-ld=gold) - endif() - endif() +set_target_properties(dispatch PROPERTIES + POSITION_INDEPENDENT_CODE YES) - add_swift_library(swiftDispatch - CFLAGS - -fblocks - -fmodule-map-file=${PROJECT_SOURCE_DIR}/dispatch/module.modulemap - DEPENDS - module-maps - DispatchStubs - LINK_FLAGS - ${use_ld_flag} - -lDispatchStubs - -L $ - -lBlocksRuntime - -L $ - -ldispatch - $<$,$>:-lmsvcrtd> - $<$,$>>:-lmsvcrt> - MODULE_NAME - Dispatch - MODULE_LINK_NAME - swiftDispatch - MODULE_PATH - ${CMAKE_CURRENT_BINARY_DIR}/swift/Dispatch.swiftmodule - SOURCES - swift/Block.swift - swift/Data.swift - swift/Dispatch.swift - swift/IO.swift - swift/Private.swift - swift/Queue.swift - swift/Source.swift - swift/Time.swift - swift/Wrapper.swift - SWIFT_FLAGS - -I ${PROJECT_SOURCE_DIR} - ${swift_optimization_flags} - $<$:-Xcc> - $<$:-D_MT> - # TODO(compnerd) handle /MT builds - $<$:-Xcc> - $<$:-D_DLL> - TARGET - ${CMAKE_SWIFT_COMPILER_TARGET}) -endif() -if(ENABLE_DTRACE) - dtrace_usdt_probe(${CMAKE_CURRENT_SOURCE_DIR}/provider.d - OUTPUT_SOURCES - dispatch_dtrace_provider_headers) - target_sources(dispatch - PRIVATE - ${dispatch_dtrace_provider_headers}) +target_include_directories(dispatch PUBLIC + ${PROJECT_BINARY_DIR} + ${PROJECT_SOURCE_DIR} + ${CMAKE_CURRENT_SOURCE_DIR} + ${CMAKE_CURRENT_BINARY_DIR}) +target_include_directories(dispatch PRIVATE + ${PROJECT_SOURCE_DIR}/private) + +if(CMAKE_SYSTEM_NAME STREQUAL Windows) + target_compile_definitions(dispatch PRIVATE + _CRT_NONSTDC_NO_WARNINGS + _CRT_SECURE_NO_WARNINGS) +elseif(CMAKE_SYSTEM_NAME STREQUAL Android) + target_compile_options(dispatch PRIVATE + -U_GNU_SOURCE) endif() -target_include_directories(dispatch - PRIVATE - ${PROJECT_BINARY_DIR} - ${PROJECT_SOURCE_DIR} - ${CMAKE_CURRENT_SOURCE_DIR} - ${CMAKE_CURRENT_BINARY_DIR} - ${PROJECT_SOURCE_DIR}/private) -target_include_directories(dispatch - SYSTEM BEFORE PRIVATE - "${BlocksRuntime_INCLUDE_DIR}") -if(WIN32) - target_compile_definitions(dispatch - PRIVATE - _CRT_NONSTDC_NO_WARNINGS) +if(DISPATCH_ENABLE_ASSERTS) + target_compile_definitions(dispatch PRIVATE + DISPATCH_DEBUG=1) endif() + if("${CMAKE_C_SIMULATE_ID}" STREQUAL "MSVC") target_compile_options(dispatch PRIVATE /EHs-c-) + target_compile_options(dispatch PRIVATE /W3) else() target_compile_options(dispatch PRIVATE -fno-exceptions) + target_compile_options(dispatch PRIVATE -Wall) endif() -if(DISPATCH_ENABLE_ASSERTS) - target_compile_definitions(dispatch - PRIVATE - -DDISPATCH_DEBUG=1) -endif() -if(CMAKE_SYSTEM_NAME STREQUAL Windows) - target_compile_definitions(dispatch - PRIVATE - -D_CRT_SECURE_NO_WARNINGS) -elseif(CMAKE_SYSTEM_NAME STREQUAL Android) - target_compile_options(dispatch - PRIVATE - -U_GNU_SOURCE) -endif() -if("${CMAKE_C_SIMULATE_ID}" STREQUAL "MSVC") - target_compile_options(dispatch - PRIVATE - /W3) -else() - target_compile_options(dispatch - PRIVATE - -Wall) -endif() + # FIXME(compnerd) add check for -fblocks? -if("${CMAKE_C_SIMULATE_ID}" STREQUAL "MSVC") - target_compile_options(dispatch - PRIVATE - -Xclang -fblocks) -else() - check_c_compiler_flag("-momit-leaf-frame-pointer -Werror -Wall -O3" C_SUPPORTS_OMIT_LEAF_FRAME_POINTER) - target_compile_options(dispatch PRIVATE -fblocks) - if (C_SUPPORTS_OMIT_LEAF_FRAME_POINTER) - target_compile_options(dispatch PRIVATE -momit-leaf-frame-pointer) - endif() +target_compile_options(dispatch PRIVATE -fblocks) + +check_c_compiler_flag("-momit-leaf-frame-pointer -Werror -Wall -O3" C_SUPPORTS_OMIT_LEAF_FRAME_POINTER) +if (C_SUPPORTS_OMIT_LEAF_FRAME_POINTER) + target_compile_options(dispatch PRIVATE -momit-leaf-frame-pointer) endif() + if(LibRT_FOUND) target_link_libraries(dispatch PRIVATE RT::rt) endif() -target_link_libraries(dispatch - PRIVATE - Threads::Threads - BlocksRuntime::BlocksRuntime) +target_link_libraries(dispatch PRIVATE + Threads::Threads) +target_link_libraries(dispatch PUBLIC + BlocksRuntime::BlocksRuntime) if(CMAKE_SYSTEM_NAME STREQUAL Windows) - target_link_libraries(dispatch - PRIVATE - ShLwApi - WS2_32 - WinMM - synchronization) + target_link_libraries(dispatch PRIVATE + ShLwApi + WS2_32 + WinMM + synchronization) endif() + if(CMAKE_SYSTEM_NAME STREQUAL Darwin) - set_property(TARGET dispatch - APPEND_STRING - PROPERTY LINK_FLAGS - "-Xlinker -compatibility_version -Xlinker 1" - "-Xlinker -current_version -Xlinker ${VERSION}" - "-Xlinker -dead_strip" - "-Xlinker -alias_list -Xlinker ${PROJECT_SOURCE_DIR}/xcodeconfig/libdispatch.aliases") + set_property(TARGET dispatch APPEND_STRING PROPERTY LINK_FLAGS + "-Xlinker -compatibility_version -Xlinker 1" + "-Xlinker -current_version -Xlinker ${VERSION}" + "-Xlinker -dead_strip" + "-Xlinker -alias_list -Xlinker ${PROJECT_SOURCE_DIR}/xcodeconfig/libdispatch.aliases") endif() -dispatch_set_linker(dispatch) -install(TARGETS - dispatch - ARCHIVE DESTINATION ${INSTALL_TARGET_DIR} - LIBRARY DESTINATION ${INSTALL_TARGET_DIR} - RUNTIME DESTINATION bin) +if(NOT CMAKE_SYSTEM_NAME MATCHES "Darwin|Windows") + set_target_properties(dispatch PROPERTIES INSTALL_RPATH "$ORIGIN") +endif() if(ENABLE_SWIFT) - install(FILES - ${CMAKE_CURRENT_BINARY_DIR}/swift/Dispatch.swiftmodule - ${CMAKE_CURRENT_BINARY_DIR}/swift/Dispatch.swiftdoc - DESTINATION - ${INSTALL_TARGET_DIR}/${swift_arch}) - - if(BUILD_SHARED_LIBS) - set(library_kind SHARED) - else() - set(library_kind STATIC) - endif() - - set(swiftDispatch_OUTPUT_FILE - ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_${library_kind}_LIBRARY_PREFIX}swiftDispatch${CMAKE_${library_kind}_LIBRARY_SUFFIX}) - - if(CMAKE_SYSTEM_NAME STREQUAL Windows AND BUILD_SHARED_LIBS) - install(FILES - ${swiftDispatch_OUTPUT_FILE} - DESTINATION - bin) - install(FILES - ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_IMPORT_LIBRARY_PREFIX}swiftDispatch${CMAKE_IMPORT_LIBRARY_SUFFIX} - DESTINATION - ${INSTALL_TARGET_DIR}) - else() - install(FILES - ${swiftDispatch_OUTPUT_FILE} - DESTINATION - ${INSTALL_TARGET_DIR}) - endif() - - if(NOT BUILD_SHARED_LIBS) - install(FILES - $ - DESTINATION - ${INSTALL_TARGET_DIR}) - endif() + add_subdirectory(swift) endif() +set_property(GLOBAL APPEND PROPERTY DISPATCH_EXPORTS dispatch) +install(TARGETS dispatch + EXPORT dispatchExports + ARCHIVE DESTINATION ${INSTALL_TARGET_DIR} + LIBRARY DESTINATION ${INSTALL_TARGET_DIR} + RUNTIME DESTINATION bin) diff --git a/src/apply.c b/src/apply.c index 9c7d60ffd..160874f4c 100644 --- a/src/apply.c +++ b/src/apply.c @@ -21,11 +21,97 @@ #include "internal.h" typedef void (*dispatch_apply_function_t)(void *, size_t); + static char const * const _dispatch_apply_key = "apply"; #define DISPATCH_APPLY_INVOKE_REDIRECT 0x1 #define DISPATCH_APPLY_INVOKE_WAIT 0x2 +/* flags for da_dc->dc_data + * + * continuation func is a dispatch_apply_function_t (args: item) + */ +#define DA_FLAG_APPLY 0x01ul +// contin func is a dispatch_apply_attr_function_t (args: item, worker idx) +#define DA_FLAG_APPLY_WITH_ATTR 0x02ul + +#if __LP64__ +/* Our continuation allocator is a bit more performant than the default system + * malloc (especially with our per-thread cache), so let's use it if we can. + * On 32-bit platforms, dispatch_apply_s is bigger than dispatch_continuation_s + * so we can't use the cont allocator, but we're okay with the slight perf + * degradation there. + */ +#define DISPATCH_APPLY_USE_CONTINUATION_ALLOCATOR 1 +dispatch_static_assert(sizeof(struct dispatch_apply_s) <= sizeof(struct dispatch_continuation_s), + "Apply struct should fit inside continuation struct so we can borrow the continuation allocator"); +#else // __LP64__ +#define DISPATCH_APPLY_USE_CONTINUATION_ALLOCATOR 0 +#endif // __LP64__ + +DISPATCH_ALWAYS_INLINE DISPATCH_MALLOC +static inline dispatch_apply_t +_dispatch_apply_alloc(void) +{ + dispatch_apply_t da; +#if DISPATCH_APPLY_USE_CONTINUATION_ALLOCATOR + da = (__typeof__(da))_dispatch_continuation_alloc(); +#else // DISPATCH_APPLY_USE_CONTINUATION_ALLOCATOR + da = _dispatch_calloc(1, sizeof(*da)); +#endif // DISPATCH_APPLY_USE_CONTINUATION_ALLOCATOR + return da; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_apply_free(dispatch_apply_t da) +{ +#if DISPATCH_APPLY_USE_CONTINUATION_ALLOCATOR + _dispatch_continuation_free((dispatch_continuation_t)da); +#else // DISPATCH_APPLY_USE_CONTINUATION_ALLOCATOR + free(da); +#endif // DISPATCH_APPLY_USE_CONTINUATION_ALLOCATOR +} + +static void _dispatch_apply_da_copy_attr(dispatch_apply_t, dispatch_apply_attr_t _Nullable); +static bool _dispatch_attr_is_initialized(dispatch_apply_attr_t attr); + +static void +_dispatch_apply_set_attr_behavior(dispatch_apply_attr_t _Nullable attr, size_t worker_index) +{ + if (!attr) { + return; + } + if (attr->per_cluster_parallelism > 0) { + _dispatch_attr_apply_cluster_set(worker_index, attr->per_cluster_parallelism); + } +} + +static void +_dispatch_apply_clear_attr_behavior(dispatch_apply_attr_t _Nullable attr, size_t worker_index) +{ + if (!attr) { + return; + } + if (attr->per_cluster_parallelism > 0) { + _dispatch_attr_apply_cluster_clear(worker_index, attr->per_cluster_parallelism); + } +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_apply_destroy(dispatch_apply_t da) +{ +#if DISPATCH_INTROSPECTION + _dispatch_continuation_free(da->da_dc); +#endif + if (da->da_attr) { + dispatch_apply_attr_destroy(da->da_attr); + free(da->da_attr); + } + _dispatch_apply_free(da); +} + DISPATCH_ALWAYS_INLINE static inline void _dispatch_apply_invoke2(dispatch_apply_t da, long invoke_flags) @@ -33,12 +119,34 @@ _dispatch_apply_invoke2(dispatch_apply_t da, long invoke_flags) size_t const iter = da->da_iterations; size_t idx, done = 0; + /* workers start over time but never quit until the job is done, so + * we can allocate an index simply by incrementing + */ + uint32_t worker_index = 0; + worker_index = os_atomic_inc_orig2o(da, da_worker_index, relaxed); + + _dispatch_apply_set_attr_behavior(da->da_attr, worker_index); + idx = os_atomic_inc_orig2o(da, da_index, acquire); if (unlikely(idx >= iter)) goto out; - - // da_dc is only safe to access once the 'index lock' has been acquired - dispatch_apply_function_t const func = (void *)da->da_dc->dc_func; + /* + * da_dc is only safe to access once the 'index lock' has been acquired + * because it lives on the stack of the thread calling dispatch_apply. + * + * da lives until the last worker thread has finished (protected by + * da_thr_cnt), but da_dc only lives until the calling thread returns + * after the last work item is complete, which may be sooner than that. + * (In fact, the calling thread could do all the workitems itself and + * return before the worker threads even start.) + * + * Therefore the increment (reserving a valid workitem index from + * da_index) protects our access to da_dc. + * + * We also need an acquire barrier, and this is a good place to have one. + */ + dispatch_function_t const func = da->da_dc->dc_func; void *const da_ctxt = da->da_dc->dc_ctxt; + uintptr_t apply_flags = (uintptr_t)da->da_dc->dc_data; _dispatch_perfmon_workitem_dec(); // this unit executes many items @@ -52,7 +160,7 @@ _dispatch_apply_invoke2(dispatch_apply_t da, long invoke_flags) dispatch_thread_frame_s dtf; dispatch_priority_t old_dbp = 0; if (invoke_flags & DISPATCH_APPLY_INVOKE_REDIRECT) { - dispatch_queue_t dq = da->da_dc->dc_data; + dispatch_queue_t dq = da->da_dc->dc_other; _dispatch_thread_frame_push(&dtf, dq); old_dbp = _dispatch_set_basepri(dq->dq_priority); } @@ -61,7 +169,13 @@ _dispatch_apply_invoke2(dispatch_apply_t da, long invoke_flags) // Striding is the responsibility of the caller. do { dispatch_invoke_with_autoreleasepool(flags, { - _dispatch_client_callout2(da_ctxt, idx, func); + if (apply_flags & DA_FLAG_APPLY) { + _dispatch_client_callout2(da_ctxt, idx, (dispatch_apply_function_t)func); + } else if (apply_flags & DA_FLAG_APPLY_WITH_ATTR) { + _dispatch_client_callout3_a(da_ctxt, idx, worker_index, (dispatch_apply_attr_function_t)func); + } else { + DISPATCH_INTERNAL_CRASH(apply_flags, "apply continuation has invalid flags"); + } _dispatch_perfmon_workitem_inc(); done++; idx = os_atomic_inc_orig2o(da, da_index, relaxed); @@ -75,21 +189,21 @@ _dispatch_apply_invoke2(dispatch_apply_t da, long invoke_flags) _dispatch_thread_context_pop(&apply_ctxt); - // The thread that finished the last workitem wakes up the possibly waiting - // thread that called dispatch_apply. They could be one and the same. - if (!os_atomic_sub2o(da, da_todo, done, release)) { + /* The thread that finished the last workitem wakes up the possibly waiting + * thread that called dispatch_apply. They could be one and the same. + */ + if (os_atomic_sub2o(da, da_todo, done, release) == 0) { _dispatch_thread_event_signal(&da->da_event); } out: + _dispatch_apply_clear_attr_behavior(da->da_attr, worker_index); + if (invoke_flags & DISPATCH_APPLY_INVOKE_WAIT) { _dispatch_thread_event_wait(&da->da_event); _dispatch_thread_event_destroy(&da->da_event); } if (os_atomic_dec2o(da, da_thr_cnt, release) == 0) { -#if DISPATCH_INTROSPECTION - _dispatch_continuation_free(da->da_dc); -#endif - _dispatch_continuation_free((dispatch_continuation_t)da); + _dispatch_apply_destroy(da); } } @@ -138,19 +252,25 @@ _dispatch_apply_serial(void *ctxt) dispatch_invoke_flags_t flags; size_t idx = 0; + // no need yet for _set_attr_behavior() for serial applies _dispatch_perfmon_workitem_dec(); // this unit executes many items - flags = _dispatch_apply_autorelease_frequency(dc->dc_data); + flags = _dispatch_apply_autorelease_frequency(dc->dc_other); do { dispatch_invoke_with_autoreleasepool(flags, { - _dispatch_client_callout2(dc->dc_ctxt, idx, (void*)dc->dc_func); + if ((uintptr_t)dc->dc_data & DA_FLAG_APPLY) { + _dispatch_client_callout2(dc->dc_ctxt, idx, (dispatch_apply_function_t)dc->dc_func); + } else if ((uintptr_t)dc->dc_data & DA_FLAG_APPLY_WITH_ATTR) { + // when running serially, the only worker is worker number 0 + _dispatch_client_callout3_a(dc->dc_ctxt, idx, 0, (dispatch_apply_attr_function_t)dc->dc_func); + } else { + DISPATCH_INTERNAL_CRASH(dc->dc_data, "apply continuation has invalid flags"); + } + _dispatch_perfmon_workitem_inc(); }); } while (++idx < iter); -#if DISPATCH_INTROSPECTION - _dispatch_continuation_free(da->da_dc); -#endif - _dispatch_continuation_free((dispatch_continuation_t)da); + _dispatch_apply_destroy(da); } DISPATCH_ALWAYS_INLINE @@ -234,7 +354,7 @@ _dispatch_apply_redirect(void *ctxt) { dispatch_apply_t da = (dispatch_apply_t)ctxt; int32_t da_width = da->da_thr_cnt - 1; - dispatch_queue_t top_dq = da->da_dc->dc_data, dq = top_dq; + dispatch_queue_t top_dq = da->da_dc->dc_other, dq = top_dq; do { int32_t width = _dispatch_queue_try_reserve_apply_width(dq, da_width); @@ -249,9 +369,10 @@ _dispatch_apply_redirect(void *ctxt) da->da_thr_cnt -= excess; } if (!da->da_flags) { - // find first queue in descending target queue order that has - // an autorelease frequency set, and use that as the frequency for - // this continuation. + /* find first queue in descending target queue order that has + * an autorelease frequency set, and use that as the frequency for + * this continuation. + */ da->da_flags = _dispatch_queue_autorelease_frequency(dq); } dq = dq->do_targetq; @@ -267,29 +388,101 @@ DISPATCH_ALWAYS_INLINE static inline dispatch_queue_global_t _dispatch_apply_root_queue(dispatch_queue_t dq) { + dispatch_queue_t tq = NULL; + if (dq) { while (unlikely(dq->do_targetq)) { - dq = dq->do_targetq; - } - // if the current root queue is a pthread root queue, select it - if (!_dispatch_is_in_root_queues_array(dq)) { - return upcast(dq)._dgq; + tq = dq->do_targetq; + + // If the current root is a custom pri workloop, select it. We have + // to this check here because custom pri workloops have a fake + // bottom targetq. + if (_dispatch_is_custom_pri_workloop(dq)) { + return upcast(dq)._dgq; + } + + dq = tq; } } + // if the current root queue is a pthread root queue, select it + if (dq && !_dispatch_is_in_root_queues_array(dq)) { + return upcast(dq)._dgq; + } + pthread_priority_t pp = _dispatch_get_priority(); dispatch_qos_t qos = _dispatch_qos_from_pp(pp); - return _dispatch_get_root_queue(qos ? qos : DISPATCH_QOS_DEFAULT, false); + return _dispatch_get_root_queue(qos ? qos : DISPATCH_QOS_DEFAULT, 0); } -DISPATCH_NOINLINE -void -dispatch_apply_f(size_t iterations, dispatch_queue_t _dq, void *ctxt, - void (*func)(void *, size_t)) +DISPATCH_ALWAYS_INLINE +static inline size_t +_dispatch_apply_calc_thread_count_for_cluster(dispatch_apply_attr_t _Nullable attr, dispatch_qos_t qos) +{ + size_t cluster_max = SIZE_MAX; + if (attr && attr->per_cluster_parallelism > 0) { + uint32_t rc = _dispatch_cluster_max_parallelism(qos); + if (likely(rc > 0)) { + cluster_max = rc * (uint32_t) (attr->per_cluster_parallelism); + } else { + /* if there's no cluster resource parallelism, then our return value + * is 0 which means "attr is a meaningless request" + */ + cluster_max = 0; + } + } + return cluster_max; +} + +DISPATCH_ALWAYS_INLINE +static inline size_t +_dispatch_apply_calc_thread_count(dispatch_apply_attr_t _Nullable attr, size_t nested, dispatch_qos_t qos, bool active) +{ + if (attr && !_dispatch_attr_is_initialized(attr)) { + DISPATCH_CLIENT_CRASH(attr, "dispatch_apply_attr not initialized using dispatch_apply_attr_init"); + } + + size_t thr_cnt = 0; + + if (likely(!attr)) { + /* Normal apply: Start with as many threads as the QOS class would + * allow. If we are nested inside another apply, account for the fact + * that it's calling us N times, so we need to use 1/Nth the threads + * we usually would, to stay under the useful parallelism limit. + */ + unsigned long flags = active ? DISPATCH_MAX_PARALLELISM_ACTIVE : 0; + thr_cnt = _dispatch_qos_max_parallelism(qos, flags); + if (unlikely(nested)) { + thr_cnt = nested < thr_cnt ? thr_cnt / nested : 1; + } + } else { + /* apply_with_attr: if we are already nested, just go serial. + * We should use the minimum of, the max allowed threads for this QOS + * level, and the max useful parallel workers based on the requested + * attributes (e.g. the number of cluster level resources). + */ + if (unlikely(nested)) { + thr_cnt = 1; + } else { + unsigned long flags = active ? DISPATCH_MAX_PARALLELISM_ACTIVE : 0; + size_t qos_max = _dispatch_qos_max_parallelism(qos, flags); + size_t cluster_max = _dispatch_apply_calc_thread_count_for_cluster(attr, qos); + thr_cnt = MIN(qos_max, cluster_max); + } + } + return thr_cnt; +} + +static void +_dispatch_apply_with_attr_f(size_t iterations, dispatch_apply_attr_t attr, + dispatch_queue_t _dq, void *ctxt, dispatch_function_t func, uintptr_t da_flags) { if (unlikely(iterations == 0)) { return; } + if (attr && !_dispatch_attr_is_initialized(attr)) { + DISPATCH_CLIENT_CRASH(attr, "dispatch_apply_attr not initialized using dispatch_apply_attr_init"); + } dispatch_thread_context_t dtctxt = _dispatch_thread_context_find(_dispatch_apply_key); size_t nested = dtctxt ? dtctxt->dtc_apply_nesting : 0; @@ -304,46 +497,92 @@ dispatch_apply_f(size_t iterations, dispatch_queue_t _dq, void *ctxt, dispatch_qos_t qos = _dispatch_priority_qos(dq->dq_priority) ?: _dispatch_priority_fallback_qos(dq->dq_priority); if (unlikely(dq->do_targetq)) { - // if the queue passed-in is not a root queue, use the current QoS - // since the caller participates in the work anyway + /* if the queue passed-in is not a root queue, use the current QoS + * since the caller participates in the work anyway + */ qos = _dispatch_qos_from_pp(_dispatch_get_priority()); } - int32_t thr_cnt = (int32_t)_dispatch_qos_max_parallelism(qos, - DISPATCH_MAX_PARALLELISM_ACTIVE); - if (likely(!nested)) { - nested = iterations; + size_t thr_cnt = _dispatch_apply_calc_thread_count(attr, nested, qos, true); + if (thr_cnt == 0) { + DISPATCH_CLIENT_CRASH(attr, "attribute's properties are invalid or meaningless on this system"); + } + + /* dispatch_apply's nesting behavior is a little complicated; it tries to + * account for the multiplicative effect of the applies above it to bring + * up just the right number of total threads. + * dispatch_apply_with_attr is much simpler: it just goes serial if it is + * nested at all, and it sets the nested TSD to the max value to indicate + * that we are already saturating the CPUs so any applies nested inside + * it will also go serial. + */ + size_t new_nested; + if (attr) { + new_nested = DISPATCH_APPLY_MAX; } else { - thr_cnt = nested < (size_t)thr_cnt ? thr_cnt / (int32_t)nested : 1; - nested = nested < DISPATCH_APPLY_MAX && iterations < DISPATCH_APPLY_MAX - ? nested * iterations : DISPATCH_APPLY_MAX; + if (likely(!nested)) { + new_nested = iterations; + } else { + /* DISPATCH_APPLY_MAX is sqrt(size_max) so we can do this + * multiplication without checking for overlow. The actual magnitude + * isn't important, it just needs to be >> ncpu. + */ + new_nested = nested < DISPATCH_APPLY_MAX && iterations < DISPATCH_APPLY_MAX + ? nested * iterations : DISPATCH_APPLY_MAX; + } } - if (iterations < (size_t)thr_cnt) { - thr_cnt = (int32_t)iterations; + + /* Notwithstanding any of the above, we should never try to start more + * threads than the number of work items. (The excess threads would have + * no work to do.) + */ + if (iterations < thr_cnt) { + thr_cnt = iterations; } + struct dispatch_continuation_s dc = { .dc_func = (void*)func, .dc_ctxt = ctxt, - .dc_data = dq, + .dc_other = dq, + .dc_data = (void *)da_flags, }; - dispatch_apply_t da = (__typeof__(da))_dispatch_continuation_alloc(); - da->da_index = 0; - da->da_todo = iterations; + dispatch_apply_t da = _dispatch_apply_alloc(); + os_atomic_init(&da->da_index, 0); + os_atomic_init(&da->da_todo, iterations); da->da_iterations = iterations; - da->da_nested = nested; - da->da_thr_cnt = thr_cnt; + da->da_nested = new_nested; + da->da_thr_cnt = (int32_t)thr_cnt; + os_atomic_init(&da->da_worker_index, 0); + _dispatch_apply_da_copy_attr(da, attr); #if DISPATCH_INTROSPECTION da->da_dc = _dispatch_continuation_alloc(); - *da->da_dc = dc; + da->da_dc->dc_func = (void *) dc.dc_func; + da->da_dc->dc_ctxt = dc.dc_ctxt; + da->da_dc->dc_other = dc.dc_other; + da->da_dc->dc_data = dc.dc_data; + da->da_dc->dc_flags = DC_FLAG_ALLOCATED; #else da->da_dc = &dc; #endif da->da_flags = 0; + if (unlikely(_dispatch_is_custom_pri_workloop(dq))) { + uint64_t dq_state = os_atomic_load(&dq->dq_state, relaxed); + + if (_dq_state_drain_locked_by_self(dq_state)) { + // We're already draining on the custom priority workloop, don't go + // wide, just call inline serially + return _dispatch_apply_serial(da); + } else { + return dispatch_async_and_wait_f(dq, da, _dispatch_apply_serial); + } + } + if (unlikely(dq->dq_width == 1 || thr_cnt <= 1)) { return dispatch_sync_f(dq, da, _dispatch_apply_serial); } + if (unlikely(dq->do_targetq)) { if (unlikely(dq == old_dq)) { return dispatch_sync_f(dq, da, _dispatch_apply_serial); @@ -358,6 +597,21 @@ dispatch_apply_f(size_t iterations, dispatch_queue_t _dq, void *ctxt, _dispatch_thread_frame_pop(&dtf); } +DISPATCH_NOINLINE +void +dispatch_apply_f(size_t iterations, dispatch_queue_t _dq, void *ctxt, + void (*func)(void *, size_t)) +{ + _dispatch_apply_with_attr_f(iterations, NULL, _dq, ctxt, (dispatch_function_t)func, DA_FLAG_APPLY); +} + +void +dispatch_apply_with_attr_f(size_t iterations, dispatch_apply_attr_t attr, void *ctxt, + void (*func)(void *, size_t, size_t)) +{ + _dispatch_apply_with_attr_f(iterations, attr, DISPATCH_APPLY_AUTO, ctxt, (dispatch_function_t)func, DA_FLAG_APPLY_WITH_ATTR); +} + #ifdef __BLOCKS__ void dispatch_apply(size_t iterations, dispatch_queue_t dq, void (^work)(size_t)) @@ -365,4 +619,117 @@ dispatch_apply(size_t iterations, dispatch_queue_t dq, void (^work)(size_t)) dispatch_apply_f(iterations, dq, work, (dispatch_apply_function_t)_dispatch_Block_invoke(work)); } + +void +dispatch_apply_with_attr(size_t iterations, dispatch_apply_attr_t attr, + void (^work)(size_t iteration, size_t worker_index)) +{ + dispatch_apply_with_attr_f(iterations, attr, work, + (dispatch_apply_attr_function_t)_dispatch_Block_invoke(work)); +} #endif + +static bool +_dispatch_attr_is_initialized(dispatch_apply_attr_t attr) +{ + return (attr->sig == DISPATCH_APPLY_ATTR_SIG) && (~(attr->guard) == (uintptr_t) attr); +} + +void +dispatch_apply_attr_init(dispatch_apply_attr_t _Nonnull attr) +{ + bzero(attr, sizeof(*attr)); + + attr->sig = DISPATCH_APPLY_ATTR_SIG; + attr->guard = ~ (uintptr_t) (attr); /* To prevent leaks from picking it up */ +} + +void +dispatch_apply_attr_destroy(dispatch_apply_attr_t _Nonnull attr) +{ + bzero(attr, sizeof(*attr)); +} + +static void +_dispatch_apply_da_copy_attr(dispatch_apply_t da, dispatch_apply_attr_t _Nullable src) +{ + if (src == NULL) { + da->da_attr = NULL; + return; + } + dispatch_apply_attr_t dst = _dispatch_calloc(1, sizeof(struct dispatch_apply_attr_s)); + dispatch_apply_attr_init(dst); + + dst->per_cluster_parallelism = src->per_cluster_parallelism; + dst->flags = src->flags; + // if there were non-POD types, we would manage them here + + da->da_attr = dst; +} + +static void +dispatch_apply_attr_set_per_cluster_parallelism(dispatch_apply_attr_t _Nonnull attr, + size_t threads_per_cluster) +{ + if (threads_per_cluster == 0) { + DISPATCH_CLIENT_CRASH(threads_per_cluster, "0 is an invalid threads_per_cluster value"); + } + if (threads_per_cluster > 1) { + DISPATCH_CLIENT_CRASH(threads_per_cluster, "Invalid threads_per_cluster value, only acceptable value is 1"); + } + + if (attr && !_dispatch_attr_is_initialized(attr)) { + DISPATCH_CLIENT_CRASH(attr, "dispatch_apply_attr not initialized using dispatch_apply_attr_init"); + } + + attr->per_cluster_parallelism = threads_per_cluster; +} + +void +dispatch_apply_attr_set_parallelism(dispatch_apply_attr_t _Nonnull attr, + dispatch_apply_attr_entity_t entity, size_t threads_per_entity) +{ + switch (entity) { + case DISPATCH_APPLY_ATTR_ENTITY_CPU: + if (threads_per_entity != 1) { + DISPATCH_CLIENT_CRASH(threads_per_entity, "Invalid threads_per_entity value for CPU entity"); + } + break; + case DISPATCH_APPLY_ATTR_ENTITY_CLUSTER: + return dispatch_apply_attr_set_per_cluster_parallelism(attr, threads_per_entity); + default: + DISPATCH_CLIENT_CRASH(entity, "Unknown entity"); + } +} + +size_t +dispatch_apply_attr_query(dispatch_apply_attr_t attr, + dispatch_apply_attr_query_t which, + dispatch_apply_attr_query_flags_t flags) +{ + dispatch_thread_context_t dtctxt = _dispatch_thread_context_find(_dispatch_apply_key); + size_t current_nested = dtctxt ? dtctxt->dtc_apply_nesting : 0; + dispatch_queue_t old_dq = _dispatch_queue_get_current(); + dispatch_queue_t dq = _dispatch_apply_root_queue(old_dq)->_as_dq; + dispatch_qos_t current_qos = _dispatch_priority_qos(dq->dq_priority) ?: _dispatch_priority_fallback_qos(dq->dq_priority); + + switch (which) { + case DISPATCH_APPLY_ATTR_QUERY_VALID: + return (dispatch_apply_attr_query(attr, DISPATCH_APPLY_ATTR_QUERY_MAXIMUM_WORKERS, flags) == 0 ? 0 : 1); + case DISPATCH_APPLY_ATTR_QUERY_LIKELY_WORKERS: + return _dispatch_apply_calc_thread_count(attr, current_nested, current_qos, true); + case DISPATCH_APPLY_ATTR_QUERY_MAXIMUM_WORKERS: + if (flags & DISPATCH_APPLY_ATTR_QUERY_FLAGS_MAX_CURRENT_SCOPE) { + return _dispatch_apply_calc_thread_count(attr, current_nested, current_qos, true); + } else { + /* we SHOULD pass DISPATCH_QOS_UNSPECIFIED - the intention is "at any + * possible QOS", more exactly, "at the QOS which has highest limits". + * bsdthread_ctl_qos_max_parallelism doesn't accept unspecified, + * though, so let's say USER_INTERACTIVE assuming the highest QOS + * will be the least limited one. + * + */ + return _dispatch_apply_calc_thread_count(attr, 0, DISPATCH_QOS_USER_INTERACTIVE, false); + } + } +} diff --git a/src/event/event_epoll.c b/src/event/event_epoll.c index 759cbba75..e3578a095 100644 --- a/src/event/event_epoll.c +++ b/src/event/event_epoll.c @@ -174,6 +174,7 @@ _dispatch_muxnote_create(dispatch_unote_t du, uint32_t events) } case EVFILT_WRITE: filter = EVFILT_READ; + DISPATCH_FALLTHROUGH; case EVFILT_READ: if (fstat(fd, &sb) < 0) { return NULL; diff --git a/src/event/event_internal.h b/src/event/event_internal.h index bf5605da7..305cf931e 100644 --- a/src/event/event_internal.h +++ b/src/event/event_internal.h @@ -456,6 +456,34 @@ _dispatch_set_return_to_kernel(void) _dispatch_thread_setspecific(dispatch_r2k_key, (void *)1); } +DISPATCH_ALWAYS_INLINE +static inline uintptr_t +_dispatch_get_quantum_expiry_action(void) +{ + return (uintptr_t) _dispatch_thread_getspecific(dispatch_quantum_key); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_ack_quantum_expiry_action(void) +{ + return _dispatch_thread_setspecific(dispatch_quantum_key, (void *) 0); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_set_current_dsc(void *dsc) +{ + return _dispatch_thread_setspecific(dispatch_dsc_key, dsc); +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_clear_current_dsc(void) +{ + return _dispatch_thread_setspecific(dispatch_dsc_key, NULL); +} + DISPATCH_ALWAYS_INLINE static inline void _dispatch_clear_return_to_kernel(void) diff --git a/src/event/event_kevent.c b/src/event/event_kevent.c index b89e4d0cc..790d72408 100644 --- a/src/event/event_kevent.c +++ b/src/event/event_kevent.c @@ -559,8 +559,8 @@ _dispatch_kevent_drain(dispatch_kevent_t ke) // when the process exists but is a zombie. As a workaround, we // simulate an exit event for any EVFILT_PROC with an invalid pid. ke->flags = EV_UDATA_SPECIFIC | EV_ONESHOT | EV_DELETE; - ke->fflags = NOTE_EXIT; - ke->data = 0; + ke->fflags = NOTE_EXIT | NOTE_EXITSTATUS; + ke->data = 0; // Fake exit status _dispatch_kevent_debug("synthetic NOTE_EXIT", ke); } else { return _dispatch_kevent_print_error(ke); @@ -1402,6 +1402,11 @@ _dispatch_kevent_workloop_priority(dispatch_queue_t dq, int which, qos = DISPATCH_QOS_MAINTENANCE; } pthread_priority_t pp = _dispatch_qos_to_pp(qos); + + if (rq_pri & DISPATCH_PRIORITY_FLAG_COOPERATIVE) { + DISPATCH_INTERNAL_CRASH(rq_pri, "Waking up a kq with cooperative thread request is not supported"); + } + return pp | (rq_pri & DISPATCH_PRIORITY_FLAG_OVERCOMMIT); } @@ -2024,11 +2029,24 @@ _dispatch_event_loop_cancel_waiter(dispatch_sync_context_t dsc) uint32_t kev_flags = KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_ERROR_EVENTS; dispatch_kevent_s ke; +again: _dispatch_kq_fill_workloop_sync_event(&ke, DISPATCH_WORKLOOP_SYNC_END, wlh, 0, dsc->dsc_waiter); if (_dispatch_kq_poll(wlh, &ke, 1, &ke, 1, NULL, NULL, kev_flags)) { _dispatch_kevent_workloop_drain_error(&ke, dsc->dsc_waiter_needs_cancel ? 0 : DISPATCH_KEVENT_WORKLOOP_ALLOW_ENOENT); + // + // quick hack for 78288114 + // + // something with DISPATCH_WORKLOOP_SYNC_FAKE is not quite right + // we can at least make the thread in the way finish the syscall + // it's trying to make with directed handoffs. + // + // it's inefficient but doesn't have a priority inversion. + // + _dispatch_preemption_yield_to(dsc->dsc_waiter, 1); + goto again; + // // Our deletion attempt is opportunistic as in most cases we will find // the matching knote and break the waiter out. @@ -2435,6 +2453,9 @@ const dispatch_source_type_s _dispatch_source_type_vfs = { #if HAVE_DECL_VQ_VERYLOWDISK |VQ_VERYLOWDISK #endif +#if HAVE_DECL_VQ_SERVEREVENT + |VQ_SERVEREVENT +#endif #if HAVE_DECL_VQ_QUOTA |VQ_QUOTA #endif @@ -2571,7 +2592,7 @@ _dispatch_ios_simulator_memorypressure_init(void *context DISPATCH_UNUSED) if (!e) return; _dispatch_ios_simulator_memory_warnings_fd = open(e, O_EVTONLY); if (_dispatch_ios_simulator_memory_warnings_fd == -1) { - (void)dispatch_assume_zero(errno); + DISPATCH_INTERNAL_CRASH(errno, "Failed to create fd to simulator memory pressure file"); } } @@ -2588,8 +2609,12 @@ _dispatch_source_memorypressure_create(dispatch_source_type_t dst, dst = &_dispatch_source_type_vnode; handle = (uintptr_t)_dispatch_ios_simulator_memory_warnings_fd; + if (handle < 0) { + return DISPATCH_UNOTE_NULL; + } mask = NOTE_ATTRIB; + dispatch_unote_t du = dux_create(dst, handle, mask); if (du._du) { du._du->du_memorypressure_override = true; @@ -3261,14 +3286,18 @@ const dispatch_source_type_s _dispatch_mach_type_recv = { DISPATCH_NORETURN static void _dispatch_mach_reply_merge_evt(dispatch_unote_t du DISPATCH_UNUSED, - uint32_t flags, uintptr_t data DISPATCH_UNUSED, + uint32_t flags, uintptr_t data, pthread_priority_t pp DISPATCH_UNUSED) { if (flags & EV_VANISHED) { DISPATCH_CLIENT_CRASH(0, "Unexpected EV_VANISHED (do not destroy random mach ports)"); } - DISPATCH_INTERNAL_CRASH(flags, "Unexpected event"); +#if __LP64__ + data = (uintptr_t)(kern_return_t)data; + data |= (uintptr_t)flags << 32; +#endif + DISPATCH_INTERNAL_CRASH(data, "Unexpected event"); } const dispatch_source_type_s _dispatch_mach_type_reply = { diff --git a/src/event/event_windows.c b/src/event/event_windows.c index 3576774b2..ce322258a 100644 --- a/src/event/event_windows.c +++ b/src/event/event_windows.c @@ -174,27 +174,62 @@ _dispatch_muxnote_create(dispatch_unote_t du, } static void -_dispatch_muxnote_stop(dispatch_muxnote_t dmn) +_dispatch_muxnote_disarm_events(dispatch_muxnote_t dmn, + enum _dispatch_muxnote_events events) { - if (dmn->dmn_thread) { - // Keep trying to cancel ReadFile() until the thread exits - os_atomic_store(&dmn->dmn_stop, true, relaxed); - SetEvent(dmn->dmn_event); - do { - CancelIoEx((HANDLE)dmn->dmn_ident, /* lpOverlapped */ NULL); - } while (WaitForSingleObject(dmn->dmn_thread, 1) == WAIT_TIMEOUT); - CloseHandle(dmn->dmn_thread); - dmn->dmn_thread = NULL; - } - if (dmn->dmn_threadpool_wait) { - SetThreadpoolWait(dmn->dmn_threadpool_wait, NULL, NULL); - WaitForThreadpoolWaitCallbacks(dmn->dmn_threadpool_wait, - /* fCancelPendingCallbacks */ FALSE); - CloseThreadpoolWait(dmn->dmn_threadpool_wait); - dmn->dmn_threadpool_wait = NULL; - } - if (dmn->dmn_handle_type == DISPATCH_MUXNOTE_HANDLE_TYPE_SOCKET) { - WSAEventSelect((SOCKET)dmn->dmn_ident, NULL, 0); + long lNetworkEvents; + dmn->dmn_events &= ~events; + switch (dmn->dmn_handle_type) { + case DISPATCH_MUXNOTE_HANDLE_TYPE_INVALID: + DISPATCH_INTERNAL_CRASH(0, "invalid handle"); + + case DISPATCH_MUXNOTE_HANDLE_TYPE_FILE: + break; + + case DISPATCH_MUXNOTE_HANDLE_TYPE_PIPE: + if ((events & DISPATCH_MUXNOTE_EVENT_READ) && dmn->dmn_thread) { + // Keep trying to cancel ReadFile() until the thread exits + os_atomic_store(&dmn->dmn_stop, true, relaxed); + SetEvent(dmn->dmn_event); + do { + CancelIoEx((HANDLE)dmn->dmn_ident, /* lpOverlapped */ NULL); + } while (WaitForSingleObject(dmn->dmn_thread, 1) == WAIT_TIMEOUT); + CloseHandle(dmn->dmn_thread); + dmn->dmn_thread = NULL; + } + break; + + case DISPATCH_MUXNOTE_HANDLE_TYPE_SOCKET: + lNetworkEvents = dmn->dmn_network_events; + if (events & DISPATCH_MUXNOTE_EVENT_READ) { + lNetworkEvents &= ~FD_READ; + } + if (events & DISPATCH_MUXNOTE_EVENT_WRITE) { + lNetworkEvents &= ~FD_WRITE; + } + if (lNetworkEvents == dmn->dmn_network_events) { + break; + } + int iResult; + if (lNetworkEvents & (FD_READ | FD_WRITE)) { + iResult = WSAEventSelect((SOCKET)dmn->dmn_ident, + (WSAEVENT)dmn->dmn_event, lNetworkEvents); + } else { + lNetworkEvents = 0; + iResult = WSAEventSelect((SOCKET)dmn->dmn_ident, NULL, 0); + } + if (iResult != 0) { + DISPATCH_INTERNAL_CRASH(WSAGetLastError(), "WSAEventSelect"); + } + dmn->dmn_network_events = lNetworkEvents; + if (!lNetworkEvents && dmn->dmn_threadpool_wait) { + SetThreadpoolWait(dmn->dmn_threadpool_wait, NULL, NULL); + WaitForThreadpoolWaitCallbacks(dmn->dmn_threadpool_wait, + /* fCancelPendingCallbacks */ FALSE); + CloseThreadpoolWait(dmn->dmn_threadpool_wait); + dmn->dmn_threadpool_wait = NULL; + } + break; } } @@ -389,8 +424,16 @@ _dispatch_io_trigger(dispatch_muxnote_t dmn) } if (dmn->dmn_events & DISPATCH_MUXNOTE_EVENT_WRITE) { _dispatch_muxnote_retain(dmn); - DWORD available = + DWORD available; + if (dmn->dmn_events & DISPATCH_MUXNOTE_EVENT_READ) { + // We can't query a pipe which has a read source open on it + // because the ReadFile() in the background thread might cause + // NtQueryInformationFile() to block + available = 1; + } else { + available = _dispatch_pipe_write_availability((HANDLE)dmn->dmn_ident); + } bSuccess = PostQueuedCompletionStatus(hPort, available, (ULONG_PTR)DISPATCH_PORT_PIPE_HANDLE_WRITE, (LPOVERLAPPED)dmn); @@ -487,8 +530,12 @@ _dispatch_unote_register_muxed(dispatch_unote_t du) dmn = _dispatch_unote_muxnote_find(dmb, du._du->du_ident, du._du->du_filter); if (dmn) { - WIN_PORT_ERROR(); - DISPATCH_INTERNAL_CRASH(0, "muxnote updating is not supported"); + if (events & ~dmn->dmn_events) { + dmn->dmn_events |= events; + if (_dispatch_io_trigger(dmn) == FALSE) { + return false; + } + } } else { dmn = _dispatch_muxnote_create(du, events); if (!dmn) { @@ -551,9 +598,18 @@ _dispatch_unote_unregister_muxed(dispatch_unote_t du) } dul->du_muxnote = NULL; - LIST_REMOVE(dmn, dmn_list); - _dispatch_muxnote_stop(dmn); - _dispatch_muxnote_release(dmn); + enum _dispatch_muxnote_events disarmed = 0; + if (LIST_EMPTY(&dmn->dmn_readers_head)) { + disarmed |= DISPATCH_MUXNOTE_EVENT_READ; + } + if (LIST_EMPTY(&dmn->dmn_writers_head)) { + disarmed |= DISPATCH_MUXNOTE_EVENT_WRITE; + } + _dispatch_muxnote_disarm_events(dmn, disarmed); + if (!dmn->dmn_events) { + LIST_REMOVE(dmn, dmn_list); + _dispatch_muxnote_release(dmn); + } _dispatch_unote_state_set(du, DU_STATE_UNREGISTERED); return true; diff --git a/src/event/workqueue.c b/src/event/workqueue.c index 28f167517..afc82c02a 100644 --- a/src/event/workqueue.c +++ b/src/event/workqueue.c @@ -243,7 +243,7 @@ _dispatch_workq_init_once(void *context DISPATCH_UNUSED) int i, target_runnable = (int)dispatch_hw_config(active_cpus); foreach_qos_bucket_reverse(i) { dispatch_workq_monitor_t mon = &_dispatch_workq_monitors[i]; - mon->dq = _dispatch_get_root_queue(DISPATCH_QOS_FOR_BUCKET(i), false); + mon->dq = _dispatch_get_root_queue(DISPATCH_QOS_FOR_BUCKET(i), 0); void *buf = _dispatch_calloc(WORKQ_MAX_TRACKED_TIDS, sizeof(dispatch_tid)); mon->registered_tids = buf; mon->target_runnable = target_runnable; diff --git a/src/eventlink.c b/src/eventlink.c index 4a7194b90..ffba90002 100644 --- a/src/eventlink.c +++ b/src/eventlink.c @@ -1,9 +1,22 @@ -// -// eventlink.c -// libdispatch -// -// Created by Rokhini Prabhu on 12/13/19. -// +/* + * Copyright (c) 2019-2020 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ #include "internal.h" #include diff --git a/src/firehose/firehose_server.c b/src/firehose/firehose_server.c index 6fe1a61e4..11e3f3fa8 100644 --- a/src/firehose/firehose_server.c +++ b/src/firehose/firehose_server.c @@ -764,7 +764,8 @@ firehose_client_create(firehose_buffer_t fb, firehose_token_t token, server_config.fs_mem_drain_queue, server_config.fs_io_drain_queue }; - fc->fc_mach_channel_refcnt = FIREHOSE_BUFFER_NPUSHPORTS; + + os_atomic_init(&fc->fc_mach_channel_refcnt, FIREHOSE_BUFFER_NPUSHPORTS); for (int i = 0; i < FIREHOSE_BUFFER_NPUSHPORTS; i++) { fc->fc_recvp[i] = recvp[i]; firehose_mach_port_guard(fc->fc_recvp[i], true, &fc->fc_recvp[i]); diff --git a/src/init.c b/src/init.c index ff2b5b6a0..08f790828 100644 --- a/src/init.c +++ b/src/init.c @@ -66,6 +66,7 @@ void dispatch_atfork_parent(void) { _os_object_atfork_parent(); + _voucher_atfork_parent(); } DISPATCH_EXPORT DISPATCH_NOTHROW @@ -141,9 +142,10 @@ pthread_key_t dispatch_bcounter_key; pthread_key_t dispatch_wlh_key; pthread_key_t dispatch_voucher_key; pthread_key_t dispatch_deferred_items_key; +pthread_key_t dispatch_enqueue_key; +pthread_key_t os_workgroup_key; #endif // !DISPATCH_USE_DIRECT_TSD && !DISPATCH_USE_THREAD_LOCAL_STORAGE -pthread_key_t _os_workgroup_key; #if VOUCHER_USE_MACH_VOUCHER dispatch_once_t _voucher_task_mach_voucher_pred; @@ -158,6 +160,10 @@ uint64_t _voucher_unique_pid; voucher_activity_hooks_t _voucher_libtrace_hooks; dispatch_mach_t _voucher_activity_debug_channel; #endif + +dispatch_once_t _voucher_process_can_use_arbitrary_personas_pred; +bool _voucher_process_can_use_arbitrary_personas = false; + #if HAVE_PTHREAD_WORKQUEUE_QOS && DISPATCH_DEBUG bool _dispatch_set_qos_class_enabled; #endif @@ -301,10 +307,12 @@ static struct dispatch_pthread_root_queue_context_s // renaming this symbol struct dispatch_queue_global_s _dispatch_root_queues[] = { #define _DISPATCH_ROOT_QUEUE_IDX(n, flags) \ - ((flags & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) ? \ + (((flags) & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) ? \ DISPATCH_ROOT_QUEUE_IDX_##n##_QOS_OVERCOMMIT : \ - DISPATCH_ROOT_QUEUE_IDX_##n##_QOS) -#define _DISPATCH_ROOT_QUEUE_ENTRY(n, flags, ...) \ + (((flags) & DISPATCH_PRIORITY_FLAG_COOPERATIVE) ? \ + DISPATCH_ROOT_QUEUE_IDX_##n##_QOS_COOPERATIVE : \ + DISPATCH_ROOT_QUEUE_IDX_##n##_QOS)) +#define _DISPATCH_GLOBAL_ROOT_QUEUE_ENTRY(n, flags, ...) \ [_DISPATCH_ROOT_QUEUE_IDX(n, flags)] = { \ DISPATCH_GLOBAL_OBJECT_HEADER(queue_global), \ .dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, \ @@ -315,83 +323,127 @@ struct dispatch_queue_global_s _dispatch_root_queues[] = { _dispatch_priority_make(DISPATCH_QOS_##n, 0)), \ __VA_ARGS__ \ } - _DISPATCH_ROOT_QUEUE_ENTRY(MAINTENANCE, 0, + +#if DISPATCH_USE_COOPERATIVE_WORKQUEUE +#define _DISPATCH_COOPERATIVE_ROOT_QUEUE_ENTRY(n, flags, ...) \ + [_DISPATCH_ROOT_QUEUE_IDX(n, flags)] = { \ + DISPATCH_GLOBAL_OBJECT_HEADER(queue_cooperative), \ + .dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, \ + .do_ctxt = NULL, \ + .dq_atomic_flags = DQF_WIDTH(DISPATCH_QUEUE_WIDTH_POOL), \ + .dq_priority = flags | ((flags & DISPATCH_PRIORITY_FLAG_FALLBACK) ? \ + _dispatch_priority_make_fallback(DISPATCH_QOS_##n) : \ + _dispatch_priority_make(DISPATCH_QOS_##n, 0)), \ + __VA_ARGS__ \ + } +#else /* DISPATCH_USE_COOPERATIVE_WORKQUEUE */ + /* We initialize the rest of the fields in + * _dispatch_cooperative_root_queue_init_fallback */ +#define _DISPATCH_COOPERATIVE_ROOT_QUEUE_ENTRY(n, flags, ...) \ + [_DISPATCH_ROOT_QUEUE_IDX(n, flags)] = { \ + .do_vtable = DISPATCH_VTABLE(queue_concurrent), \ + .dq_priority = flags | ((flags & DISPATCH_PRIORITY_FLAG_FALLBACK) ? \ + _dispatch_priority_make_fallback(DISPATCH_QOS_##n) : \ + _dispatch_priority_make(DISPATCH_QOS_##n, 0)), \ + __VA_ARGS__ \ + } +#endif /* DISPATCH_USE_COOPERATIVE_WORKQUEUE */ + + _DISPATCH_GLOBAL_ROOT_QUEUE_ENTRY(MAINTENANCE, 0, .dq_label = "com.apple.root.maintenance-qos", .dq_serialnum = 4, ), - _DISPATCH_ROOT_QUEUE_ENTRY(MAINTENANCE, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, + _DISPATCH_GLOBAL_ROOT_QUEUE_ENTRY(MAINTENANCE, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, .dq_label = "com.apple.root.maintenance-qos.overcommit", .dq_serialnum = 5, ), - _DISPATCH_ROOT_QUEUE_ENTRY(BACKGROUND, 0, - .dq_label = "com.apple.root.background-qos", + _DISPATCH_COOPERATIVE_ROOT_QUEUE_ENTRY(MAINTENANCE, DISPATCH_PRIORITY_FLAG_COOPERATIVE, + .dq_label = "com.apple.root.maintenance-qos.cooperative", .dq_serialnum = 6, ), - _DISPATCH_ROOT_QUEUE_ENTRY(BACKGROUND, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, - .dq_label = "com.apple.root.background-qos.overcommit", + _DISPATCH_GLOBAL_ROOT_QUEUE_ENTRY(BACKGROUND, 0, + .dq_label = "com.apple.root.background-qos", .dq_serialnum = 7, ), - _DISPATCH_ROOT_QUEUE_ENTRY(UTILITY, 0, - .dq_label = "com.apple.root.utility-qos", + _DISPATCH_GLOBAL_ROOT_QUEUE_ENTRY(BACKGROUND, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, + .dq_label = "com.apple.root.background-qos.overcommit", .dq_serialnum = 8, ), - _DISPATCH_ROOT_QUEUE_ENTRY(UTILITY, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, - .dq_label = "com.apple.root.utility-qos.overcommit", + _DISPATCH_COOPERATIVE_ROOT_QUEUE_ENTRY(BACKGROUND, DISPATCH_PRIORITY_FLAG_COOPERATIVE, + .dq_label = "com.apple.root.background-qos.cooperative", .dq_serialnum = 9, ), - _DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT, DISPATCH_PRIORITY_FLAG_FALLBACK, - .dq_label = "com.apple.root.default-qos", + _DISPATCH_GLOBAL_ROOT_QUEUE_ENTRY(UTILITY, 0, + .dq_label = "com.apple.root.utility-qos", .dq_serialnum = 10, ), - _DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT, + _DISPATCH_GLOBAL_ROOT_QUEUE_ENTRY(UTILITY, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, + .dq_label = "com.apple.root.utility-qos.overcommit", + .dq_serialnum = 11, + ), + _DISPATCH_COOPERATIVE_ROOT_QUEUE_ENTRY(UTILITY, DISPATCH_PRIORITY_FLAG_COOPERATIVE, + .dq_label = "com.apple.root.utility-qos.cooperative", + .dq_serialnum = 12, + ), + _DISPATCH_GLOBAL_ROOT_QUEUE_ENTRY(DEFAULT, DISPATCH_PRIORITY_FLAG_FALLBACK, + .dq_label = "com.apple.root.default-qos", + .dq_serialnum = 13, + ), + _DISPATCH_GLOBAL_ROOT_QUEUE_ENTRY(DEFAULT, DISPATCH_PRIORITY_FLAG_FALLBACK | DISPATCH_PRIORITY_FLAG_OVERCOMMIT, .dq_label = "com.apple.root.default-qos.overcommit", - .dq_serialnum = 11, + .dq_serialnum = 14, + ), + _DISPATCH_COOPERATIVE_ROOT_QUEUE_ENTRY(DEFAULT, + DISPATCH_PRIORITY_FLAG_COOPERATIVE | DISPATCH_PRIORITY_FLAG_FALLBACK, + .dq_label = "com.apple.root.default-qos.cooperative", + .dq_serialnum = 15, ), - _DISPATCH_ROOT_QUEUE_ENTRY(USER_INITIATED, 0, + _DISPATCH_GLOBAL_ROOT_QUEUE_ENTRY(USER_INITIATED, 0, .dq_label = "com.apple.root.user-initiated-qos", - .dq_serialnum = 12, + .dq_serialnum = 16, ), - _DISPATCH_ROOT_QUEUE_ENTRY(USER_INITIATED, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, + _DISPATCH_GLOBAL_ROOT_QUEUE_ENTRY(USER_INITIATED, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, .dq_label = "com.apple.root.user-initiated-qos.overcommit", - .dq_serialnum = 13, + .dq_serialnum = 17, ), - _DISPATCH_ROOT_QUEUE_ENTRY(USER_INTERACTIVE, 0, + _DISPATCH_COOPERATIVE_ROOT_QUEUE_ENTRY(USER_INITIATED, DISPATCH_PRIORITY_FLAG_COOPERATIVE, + .dq_label = "com.apple.root.user-initiated-qos.cooperative", + .dq_serialnum = 18, + ), + _DISPATCH_GLOBAL_ROOT_QUEUE_ENTRY(USER_INTERACTIVE, 0, .dq_label = "com.apple.root.user-interactive-qos", - .dq_serialnum = 14, + .dq_serialnum = 19, ), - _DISPATCH_ROOT_QUEUE_ENTRY(USER_INTERACTIVE, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, + _DISPATCH_GLOBAL_ROOT_QUEUE_ENTRY(USER_INTERACTIVE, DISPATCH_PRIORITY_FLAG_OVERCOMMIT, .dq_label = "com.apple.root.user-interactive-qos.overcommit", - .dq_serialnum = 15, + .dq_serialnum = 20, + ), + _DISPATCH_COOPERATIVE_ROOT_QUEUE_ENTRY(USER_INTERACTIVE, DISPATCH_PRIORITY_FLAG_COOPERATIVE, + .dq_label = "com.apple.root.user-interactive-qos.cooperative", + .dq_serialnum = 21, ), }; -unsigned long volatile _dispatch_queue_serial_numbers = - DISPATCH_QUEUE_SERIAL_NUMBER_INIT; - +__dispatch_is_array(_dispatch_root_queues); +_Static_assert(sizeof(_dispatch_root_queues) == + sizeof(struct dispatch_queue_global_s) * DISPATCH_ROOT_QUEUE_COUNT, + "_dispatch_root_queues array size mismatch"); -dispatch_queue_global_t -dispatch_get_global_queue(intptr_t priority, uintptr_t flags) -{ - dispatch_assert(countof(_dispatch_root_queues) == - DISPATCH_ROOT_QUEUE_COUNT); +const struct dispatch_queue_global_s _dispatch_custom_workloop_root_queue = { + DISPATCH_GLOBAL_OBJECT_HEADER(queue_global), + .dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, + .do_ctxt = NULL, + .dq_label = "com.apple.root.workloop-custom", + .dq_atomic_flags = DQF_WIDTH(DISPATCH_QUEUE_WIDTH_POOL), + .dq_priority = _dispatch_priority_make_fallback(DISPATCH_QOS_DEFAULT) | + DISPATCH_PRIORITY_SATURATED_OVERRIDE, + .dq_serialnum = DISPATCH_QUEUE_SERIAL_NUMBER_WLF, + .dgq_thread_pool_size = 1, +}; - if (flags & ~(unsigned long)DISPATCH_QUEUE_OVERCOMMIT) { - return DISPATCH_BAD_INPUT; - } - dispatch_qos_t qos = _dispatch_qos_from_queue_priority(priority); -#if !HAVE_PTHREAD_WORKQUEUE_QOS - if (qos == QOS_CLASS_MAINTENANCE) { - qos = DISPATCH_QOS_BACKGROUND; - } else if (qos == QOS_CLASS_USER_INTERACTIVE) { - qos = DISPATCH_QOS_USER_INITIATED; - } -#endif - if (qos == DISPATCH_QOS_UNSPECIFIED) { - return DISPATCH_BAD_INPUT; - } - return _dispatch_get_root_queue(qos, flags & DISPATCH_QUEUE_OVERCOMMIT); -} +unsigned long volatile _dispatch_queue_serial_numbers = + DISPATCH_QUEUE_SERIAL_NUMBER_INIT; dispatch_queue_t dispatch_get_current_queue(void) @@ -706,6 +758,17 @@ DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_global, lane, .dq_push = _dispatch_root_queue_push, ); +DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_cooperative, lane, + .do_type = DISPATCH_QUEUE_COOPERATIVE_ROOT_TYPE, + .do_dispose = _dispatch_object_no_dispose, + .do_debug = _dispatch_queue_debug, + .do_invoke = _dispatch_object_no_invoke, + + .dq_activate = _dispatch_queue_no_activate, + .dq_wakeup = _dispatch_root_queue_wakeup, + .dq_push = _dispatch_root_queue_push, +); + #if DISPATCH_USE_PTHREAD_ROOT_QUEUES DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_pthread_root, lane, .do_type = DISPATCH_QUEUE_PTHREAD_ROOT_TYPE, @@ -1529,6 +1592,20 @@ _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, } #endif // HAVE_MACH +#undef _dispatch_client_callout3_a +DISPATCH_NOINLINE +void +_dispatch_client_callout3_a(void *ctxt, size_t i, size_t w, dispatch_apply_attr_function_t f) +{ + _dispatch_get_tsd_base(); + void *u = _dispatch_get_unwind_tsd(); + if (likely(!u)) return f(ctxt, i, w); + _dispatch_set_unwind_tsd(NULL); + f(ctxt, i, w); + _dispatch_free_unwind_tsd(); + _dispatch_set_unwind_tsd(u); +} + #endif // DISPATCH_USE_CLIENT_CALLOUT #pragma mark - diff --git a/src/inline_internal.h b/src/inline_internal.h index 0e30a10e2..a78e50277 100644 --- a/src/inline_internal.h +++ b/src/inline_internal.h @@ -48,6 +48,11 @@ _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, dispatch_mach_handler_function_t f); #endif // HAVE_MACH +typedef void (*dispatch_apply_attr_function_t)(void *, size_t, size_t); + +DISPATCH_NOTHROW void +_dispatch_client_callout3_a(void *ctxt, size_t i, size_t w, dispatch_apply_attr_function_t f); + #else // !DISPATCH_USE_CLIENT_CALLOUT DISPATCH_ALWAYS_INLINE @@ -83,6 +88,13 @@ _dispatch_client_callout4(void *ctxt, dispatch_mach_reason_t reason, } #endif // HAVE_MACH +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_client_callout3_a(void *ctxt, size_t i, size_t w, void (*f)(void *, size_t)) +{ + return f(ctxt, i, w); +} + #endif // !DISPATCH_USE_CLIENT_CALLOUT #pragma mark - @@ -1574,6 +1586,15 @@ _dispatch_queue_drain_try_unlock(dispatch_queue_t dq, uint64_t owned, bool done) return true; } +DISPATCH_ALWAYS_INLINE +static inline +dispatch_swift_job_invoke_flags_t +_dispatch_invoke_flags_to_swift_invoke_flags(dispatch_invoke_flags_t invoke_flags) +{ + return (invoke_flags & DISPATCH_INVOKE_COOPERATIVE_DRAIN) ? + DISPATCH_SWIFT_JOB_INVOKE_COOPERATIVE : DISPATCH_SWIFT_JOB_INVOKE_NONE; +} + /* * Clears UNCONTENDED_SYNC and RECEIVED_SYNC_WAIT */ @@ -1604,6 +1625,7 @@ _dispatch_queue_move_to_contended_sync(dispatch_queue_t dq) #define os_mpsc_push_update_tail(Q, tail, _o_next) ({ \ os_mpsc_node_type(Q) _tl = (tail); \ os_atomic_store2o(_tl, _o_next, NULL, relaxed); \ + _dispatch_set_enqueuer_for(_os_mpsc_tail Q); \ os_atomic_xchg(_os_mpsc_tail Q, _tl, release); \ }) @@ -1616,6 +1638,7 @@ _dispatch_queue_move_to_contended_sync(dispatch_queue_t dq) } else { \ (void)os_atomic_store(_os_mpsc_head Q, (head), relaxed); \ } \ + _dispatch_clear_enqueuer(); \ }) #define os_mpsc_push_list(Q, head, tail, _o_next) ({ \ @@ -1643,17 +1666,19 @@ _dispatch_queue_move_to_contended_sync(dispatch_queue_t dq) os_mpsc_node_type(Q) _node; \ _node = os_atomic_load(__n, dependency); \ if (unlikely(_node == NULL)) { \ - _node = _dispatch_wait_for_enqueuer((void **)__n); \ + _node = _dispatch_wait_for_enqueuer((void **)__n, \ + (void **) _os_mpsc_tail Q); \ } \ _node; \ }) -#define os_mpsc_get_next(_n, _o_next) ({ \ +#define os_mpsc_get_next(_n, _o_next, tailp) ({ \ __typeof__(_n) __n = (_n); \ _os_atomic_basetypeof(&__n->_o_next) _node; \ _node = os_atomic_load(&__n->_o_next, dependency); \ if (unlikely(_node == NULL)) { \ - _node = _dispatch_wait_for_enqueuer((void **)&__n->_o_next); \ + _node = _dispatch_wait_for_enqueuer((void **)&__n->_o_next, \ + (void **) tailp); \ } \ _node; \ }) @@ -1666,7 +1691,7 @@ _dispatch_queue_move_to_contended_sync(dispatch_queue_t dq) /* to head above doesn't clobber head from concurrent enqueuer */ \ if (unlikely(!_n && \ !os_atomic_cmpxchg(_os_mpsc_tail Q, _head, NULL, release))) { \ - _n = os_mpsc_get_next(_head, _o_next); \ + _n = os_mpsc_get_next(_head, _o_next, _os_mpsc_tail Q); \ os_atomic_store(_os_mpsc_head Q, _n, relaxed); \ } \ _n; \ @@ -1699,7 +1724,7 @@ _dispatch_queue_move_to_contended_sync(dispatch_queue_t dq) #define os_mpsc_pop_snapshot_head(head, tail, _o_next) ({ \ __typeof__(head) _head = (head), _tail = (tail), _n = NULL; \ - if (_head != _tail) _n = os_mpsc_get_next(_head, _o_next); \ + if (_head != _tail) _n = os_mpsc_get_next(_head, _o_next, NULL); \ _n; \ }) @@ -1957,6 +1982,15 @@ _dispatch_queue_class_probe(dispatch_lane_class_t dqu) return unlikely(tail != NULL); } +extern const struct dispatch_queue_global_s _dispatch_custom_workloop_root_queue; + +DISPATCH_ALWAYS_INLINE DISPATCH_CONST +inline bool +_dispatch_is_custom_pri_workloop(dispatch_queue_t dq) +{ + return (dq->do_targetq) == (dispatch_queue_t) _dispatch_custom_workloop_root_queue._as_dq; +} + DISPATCH_ALWAYS_INLINE DISPATCH_CONST static inline bool _dispatch_is_in_root_queues_array(dispatch_queue_class_t dqu) @@ -1965,14 +1999,28 @@ _dispatch_is_in_root_queues_array(dispatch_queue_class_t dqu) (dqu._dgq < _dispatch_root_queues + _DISPATCH_ROOT_QUEUE_IDX_COUNT); } +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_queue_is_cooperative(dispatch_queue_class_t dqu) +{ + return (dqu._dgq)->dq_priority & DISPATCH_PRIORITY_FLAG_COOPERATIVE; +} + DISPATCH_ALWAYS_INLINE DISPATCH_CONST static inline dispatch_queue_global_t -_dispatch_get_root_queue(dispatch_qos_t qos, bool overcommit) +_dispatch_get_root_queue(dispatch_qos_t qos, uintptr_t flags) { if (unlikely(qos < DISPATCH_QOS_MIN || qos > DISPATCH_QOS_MAX)) { DISPATCH_CLIENT_CRASH(qos, "Corrupted priority"); } - return &_dispatch_root_queues[2 * (qos - 1) + overcommit]; + unsigned int add_on = 0; + if (flags & DISPATCH_QUEUE_OVERCOMMIT) { + add_on = DISPATCH_ROOT_QUEUE_IDX_OFFSET_OVERCOMMIT; + } else if (flags & DISPATCH_QUEUE_COOPERATIVE) { + add_on = DISPATCH_ROOT_QUEUE_IDX_OFFSET_COOPERATIVE; + } + + return &_dispatch_root_queues[3 * (qos - 1) + add_on]; } #define _dispatch_get_default_queue(overcommit) \ @@ -2103,7 +2151,7 @@ _dispatch_set_basepri(dispatch_priority_t dq_dbp) dbp = dq_dbp & ~DISPATCH_PRIORITY_OVERRIDE_MASK; } else if (dq_dbp & DISPATCH_PRIORITY_REQUESTED_MASK) { dbp &= (DISPATCH_PRIORITY_OVERRIDE_MASK | - DISPATCH_PRIORITY_FLAG_OVERCOMMIT); + DISPATCH_PRIORITY_THREAD_TYPE_MASK); dbp |= MAX(old_dbp & DISPATCH_PRIORITY_REQUESTED_MASK, dq_dbp & DISPATCH_PRIORITY_REQUESTED_MASK); if (_dispatch_priority_fallback_qos(dq_dbp) > @@ -2218,24 +2266,29 @@ _dispatch_priority_compute_update(pthread_priority_t pp) { dispatch_assert(pp != DISPATCH_NO_PRIORITY); if (!_dispatch_set_qos_class_enabled) return 0; - // the priority in _dispatch_get_priority() only tracks manager-ness - // and overcommit, which is inherited from the current value for each update - // however if the priority had the NEEDS_UNBIND flag set we need to clear it - // the first chance we get + // the priority in _dispatch_get_priority() only tracks manager-ness and + // thread request type, which is inherited from the current value for each + // update however if the priority had the NEEDS_UNBIND flag set we need to + // clear it the first chance we get // // the manager bit is invalid input, but we keep it to get meaningful // assertions in _dispatch_set_priority_and_voucher_slow() pp &= _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG | ~_PTHREAD_PRIORITY_FLAGS_MASK; pthread_priority_t cur_priority = _dispatch_get_priority(); pthread_priority_t unbind = _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG; - pthread_priority_t overcommit = _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; + pthread_priority_t thread_type = _PTHREAD_PRIORITY_THREAD_TYPE_MASK; + + // The thread request type only matters if we have NEEDS_UNBIND. For the + // rest, we don't consider the thread request type when deciding if we need + // to consider changing current thread's priority. + if (unlikely(cur_priority & unbind)) { - // else we always need an update if the NEEDS_UNBIND flag is set - // the slow path in _dispatch_set_priority_and_voucher_slow() will + // if the NEEDS_UNBIND flag is set, we always need to update and take + // the slow path in _dispatch_set_priority_and_voucher_slow() which will // adjust the priority further with the proper overcommitness return pp ? pp : (cur_priority & ~unbind); } else { - cur_priority &= ~overcommit; + cur_priority &= ~thread_type; } if (unlikely(pp != cur_priority)) return pp; return 0; @@ -2586,7 +2639,12 @@ _dispatch_continuation_pop_inline(dispatch_object_t dou, if (observer_hooks) observer_hooks->queue_will_execute(dqu._dq); flags &= _DISPATCH_INVOKE_PROPAGATE_MASK; if (_dispatch_object_has_vtable(dou)) { - dx_invoke(dou._dq, dic, flags); + if (dx_type(dou._do) == DISPATCH_SWIFT_JOB_TYPE) { + dx_invoke(dou._dsjc, NULL, + _dispatch_invoke_flags_to_swift_invoke_flags(flags)); + } else { + dx_invoke(dou._dq, dic, flags); + } } else { _dispatch_continuation_invoke_inline(dou, flags, dqu); } diff --git a/src/internal.h b/src/internal.h index 9bdb9e890..d22a3ac09 100644 --- a/src/internal.h +++ b/src/internal.h @@ -36,6 +36,7 @@ #define __DISPATCH_BUILDING_DISPATCH__ #define __DISPATCH_INDIRECT__ #define __OS_WORKGROUP_INDIRECT__ +#define __OS_WORKGROUP_PRIVATE_INDIRECT__ #ifdef __APPLE__ #include @@ -195,6 +196,7 @@ typedef union { struct dispatch_io_s *_dchannel; struct dispatch_continuation_s *_dc; + struct dispatch_swift_continuation_s *_dsjc; struct dispatch_sync_context_s *_dsc; struct dispatch_operation_s *_doperation; struct dispatch_disk_s *_ddisk; @@ -241,6 +243,7 @@ upcast(dispatch_object_t dou) #include "os/eventlink_private.h" #include "os/workgroup_object_private.h" #include "os/workgroup_interval_private.h" +#include "apply_private.h" #include "queue_private.h" #include "channel_private.h" #include "workloop_private.h" @@ -497,6 +500,7 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); #define DISPATCH_MODE_STRICT (1U << 0) #define DISPATCH_MODE_NO_FAULTS (1U << 1) +#define DISPATCH_COOPERATIVE_POOL_STRICT (1U << 2) extern uint8_t _dispatch_mode; DISPATCH_EXPORT DISPATCH_NOINLINE DISPATCH_COLD @@ -737,6 +741,14 @@ _dispatch_fork_becomes_unsafe(void) #endif #endif // !defined(DISPATCH_USE_WORKQUEUE_NARROWING) +#ifndef DISPATCH_USE_COOPERATIVE_WORKQUEUE +#if defined(WORKQ_FEATURE_COOPERATIVE_WORKQ) && DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(120000) +#define DISPATCH_USE_COOPERATIVE_WORKQUEUE 1 +#else +#define DISPATCH_USE_COOPERATIVE_WORKQUEUE 0 +#endif +#endif + #ifndef DISPATCH_USE_PTHREAD_ROOT_QUEUES #if defined(__BLOCKS__) && defined(__APPLE__) #define DISPATCH_USE_PTHREAD_ROOT_QUEUES 1 // @@ -1030,6 +1042,15 @@ _dispatch_ktrace_impl(uint32_t code, uint64_t a, uint64_t b, #define VOUCHER_USE_PERSONA 0 #endif // VOUCHER_USE_MACH_VOUCHER +#ifndef VOUCHER_USE_PERSONA_ADOPT_ANY +#if VOUCHER_USE_PERSONA && defined(BANK_PERSONA_ADOPT_ANY) && \ + DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(120000) +#define VOUCHER_USE_PERSONA_ADOPT_ANY 1 +#else +#define VOUCHER_USE_PERSONA_ADOPT_ANY 0 +#endif +#endif + #ifndef OS_EVENTLINK_USE_MACH_EVENTLINK #if DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101600) && __has_include() #define OS_EVENTLINK_USE_MACH_EVENTLINK 1 diff --git a/src/introspection.c b/src/introspection.c index f77ddd655..bee263917 100644 --- a/src/introspection.c +++ b/src/introspection.c @@ -256,9 +256,9 @@ _dispatch_introspection_continuation_get_info(dispatch_queue_t dq, } else if (func == _dispatch_apply_invoke || func == _dispatch_apply_redirect_invoke) { dispatch_apply_t da = ctxt; - if (da->da_todo) { + if (os_atomic_load2o(da, da_todo, relaxed)) { dc = da->da_dc; - dq = dc->dc_data; + dq = dc->dc_other; ctxt = dc->dc_ctxt; func = dc->dc_func; apply = true; diff --git a/src/io.c b/src/io.c index d7e04f299..fa721bdd0 100644 --- a/src/io.c +++ b/src/io.c @@ -2094,12 +2094,12 @@ _dispatch_stream_handler(void *ctx) switch (result) { case DISPATCH_OP_DELIVER: flags = DOP_DEFAULT; - // Fall through + DISPATCH_FALLTHROUGH; case DISPATCH_OP_DELIVER_AND_COMPLETE: flags = (flags != DOP_DEFAULT) ? DOP_DELIVER | DOP_NO_EMPTY : DOP_DEFAULT; _dispatch_operation_deliver_data(op, flags); - // Fall through + DISPATCH_FALLTHROUGH; case DISPATCH_OP_COMPLETE: if (flags != DOP_DEFAULT) { _dispatch_stream_complete_operation(stream, op); @@ -2111,7 +2111,7 @@ _dispatch_stream_handler(void *ctx) break; case DISPATCH_OP_COMPLETE_RESUME: _dispatch_stream_complete_operation(stream, op); - // Fall through + DISPATCH_FALLTHROUGH; case DISPATCH_OP_RESUME: if (_dispatch_stream_operation_avail(stream)) { stream->source_running = true; diff --git a/src/mach.c b/src/mach.c index f8a439e3d..3a39d8d9c 100644 --- a/src/mach.c +++ b/src/mach.c @@ -146,6 +146,9 @@ _dispatch_mach_create(const char *label, dispatch_queue_t q, void *context, if (unlikely(!q)) { q = _dispatch_get_default_queue(true); } else { + if (_dispatch_queue_is_cooperative(q)) { + DISPATCH_CLIENT_CRASH(q, "Cannot target object to cooperative root queue - not implemented"); + } _dispatch_retain(q); } dm->do_targetq = q; @@ -1186,6 +1189,24 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, } } if (kr == MACH_SEND_TIMED_OUT && (opts & MACH_SEND_TIMEOUT)) { + if (msg->msgh_remote_port == MACH_PORT_DEAD) { + // It's possible that the remote port may have died after the + // attempt to enqueue the message timed out. In this case, the + // pseudo-receive will copy-out MOVE_SEND over the disposition and + // MACH_PORT_DEAD for the remote port name, without giving us a + // deadname ref for the send right name. + // + // When we next attempt to resend this message, we'll overwrite the + // remote port back to the channel send right. It is therefore + // crucial that we reset the disposition to COPY_SEND, since the ref + // the MOVE_SEND was referring to never actually arrived. + // + // rdar://77994175 + + msg->msgh_bits &= ~((mach_msg_bits_t)MACH_MSGH_BITS_REMOTE_MASK); + msg->msgh_bits |= MACH_MSG_TYPE_COPY_SEND; + } + if (opts & MACH_SEND_NOTIFY) { _dispatch_mach_notification_set_armed(dsrr); } else { @@ -3178,7 +3199,9 @@ dispatch_mig_server(dispatch_source_t ds, size_t maxmsgsz, tmp_options = options; // XXX FIXME -- change this to not starve out the target queue for (;;) { - if (DISPATCH_QUEUE_IS_SUSPENDED(ds) || (--cnt == 0)) { + dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds); + if (DISPATCH_QUEUE_IS_SUSPENDED(ds) || (dqf & DSF_CANCELED) || + (--cnt == 0)) { options &= ~MACH_RCV_MSG; tmp_options &= ~MACH_RCV_MSG; diff --git a/src/object.c b/src/object.c index 4eb49fda8..8fad3ebad 100644 --- a/src/object.c +++ b/src/object.c @@ -226,9 +226,11 @@ _dispatch_xref_dispose(dispatch_object_t dou) _dispatch_mach_xref_dispose(dou._dm); break; #endif +#if DISPATCH_COCOA_COMPAT case DISPATCH_QUEUE_RUNLOOP_TYPE: _dispatch_runloop_queue_xref_dispose(dou._dl); break; +#endif } } return _dispatch_release_tailcall(dou._os_obj); @@ -249,7 +251,7 @@ _dispatch_dispose(dispatch_object_t dou) if (unlikely(tq && tq->dq_serialnum == DISPATCH_QUEUE_SERIAL_NUMBER_WLF)) { // the workloop fallback global queue is never serviced, so redirect // the finalizer onto a global queue - tq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, false)->_as_dq; + tq = _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT, 0)->_as_dq; } dx_dispose(dou._do, &allow_free); @@ -313,6 +315,10 @@ dispatch_set_target_queue(dispatch_object_t dou, dispatch_queue_t tq) if (tq == DISPATCH_TARGET_QUEUE_DEFAULT) { tq = _dispatch_get_default_queue(false); } + + if (_dispatch_queue_is_cooperative(tq)) { + DISPATCH_CLIENT_CRASH(tq, "Cannot target object to cooperative root queue - not implemented"); + } _dispatch_object_set_target_queue_inline(dou._do, tq); } diff --git a/src/object.m b/src/object.m index 59f271491..273c5fa3f 100644 --- a/src/object.m +++ b/src/object.m @@ -405,6 +405,7 @@ - (void)_xref_dispose { #if DISPATCH_USE_PTHREAD_ROOT_QUEUES DISPATCH_CLASS_IMPL(queue_pthread_root) #endif +DISPATCH_CLASS_IMPL(queue_cooperative) DISPATCH_CLASS_IMPL(queue_mgr) DISPATCH_CLASS_IMPL(queue_attr) DISPATCH_CLASS_IMPL(mach_msg) @@ -604,6 +605,18 @@ - (NSString *)debugDescription { } #endif // HAVE_MACH +#undef _dispatch_client_callout3_a +void +_dispatch_client_callout3_a(void *ctxt, size_t i, size_t w, dispatch_apply_attr_function_t f) +{ + @try { + return f(ctxt, i, w); + } + @catch (...) { + objc_terminate(); + } +} + #endif // DISPATCH_USE_CLIENT_CALLOUT #endif // USE_OBJC diff --git a/src/object_internal.h b/src/object_internal.h index 50e07ac1b..f11b9c66c 100644 --- a/src/object_internal.h +++ b/src/object_internal.h @@ -285,14 +285,22 @@ typedef struct dispatch_invoke_context_s { #if DISPATCH_USE_WORKQUEUE_NARROWING #define DISPATCH_THREAD_IS_NARROWING 1 -#define dispatch_with_disabled_narrowing(dic, ...) ({ \ +#if DISPATCH_USE_COOPERATIVE_WORKQUEUE +#define dispatch_with_disabled_narrowing(dic, flags, ...) ({ \ + flags |= DISPATCH_INVOKE_DISABLED_NARROWING; \ + __VA_ARGS__; \ + flags &= ~DISPATCH_INVOKE_DISABLED_NARROWING; \ +}) +#else /* DISPATCH_USE_COOPERATIVE_WORKQUEUE */ +#define dispatch_with_disabled_narrowing(dic, flags, ...) ({ \ uint64_t suspend_narrow_check = dic->dic_next_narrow_check; \ dic->dic_next_narrow_check = 0; \ __VA_ARGS__; \ dic->dic_next_narrow_check = suspend_narrow_check; \ }) +#endif /* DISPATCH_USE_COOPERATIVE_WORKQUEUE */ #else -#define dispatch_with_disabled_narrowing(dic, ...) __VA_ARGS__ +#define dispatch_with_disabled_narrowing(dic, flags, ...) __VA_ARGS__ #endif DISPATCH_OPTIONS(dispatch_invoke_flags, uint32_t, @@ -346,11 +354,15 @@ DISPATCH_OPTIONS(dispatch_invoke_flags, uint32_t, // The queue at the bottom of this drain is a workloop that supports // reordering. // + // @const DISPATCH_INVOKE_COOPERATIVE_DRAIN + // The queue at the bottom of this drain is a cooperative global queue + // DISPATCH_INVOKE_WORKER_DRAIN = 0x00010000, DISPATCH_INVOKE_REDIRECTING_DRAIN = 0x00020000, DISPATCH_INVOKE_MANAGER_DRAIN = 0x00040000, DISPATCH_INVOKE_THREAD_BOUND = 0x00080000, DISPATCH_INVOKE_WORKLOOP_DRAIN = 0x00100000, + DISPATCH_INVOKE_COOPERATIVE_DRAIN = 0x00200000, #define _DISPATCH_INVOKE_DRAIN_MODE_MASK 0x00ff0000u // Autoreleasing modes @@ -364,6 +376,10 @@ DISPATCH_OPTIONS(dispatch_invoke_flags, uint32_t, DISPATCH_INVOKE_AUTORELEASE_ALWAYS = 0x01000000, DISPATCH_INVOKE_AUTORELEASE_NEVER = 0x02000000, #define _DISPATCH_INVOKE_AUTORELEASE_MASK 0x03000000u + + // @const DISPATCH_INVOKE_DISABLED_NARROWING + // Don't check for narrowing during this invoke + DISPATCH_INVOKE_DISABLED_NARROWING = 0x4000000, ); DISPATCH_OPTIONS(dispatch_object_flags, unsigned long, @@ -374,11 +390,12 @@ DISPATCH_OPTIONS(dispatch_object_flags, unsigned long, _DISPATCH_OBJECT_CLUSTER = 0x00000000, // dispatch object cluster _DISPATCH_CONTINUATION_TYPE = 0x00000000, // meta-type for continuations - _DISPATCH_SEMAPHORE_TYPE = 0x00000001, // meta-type for semaphores - _DISPATCH_NODE_TYPE = 0x00000002, // meta-type for data node - _DISPATCH_IO_TYPE = 0x00000003, // meta-type for io channels - _DISPATCH_OPERATION_TYPE = 0x00000004, // meta-type for io operations - _DISPATCH_DISK_TYPE = 0x00000005, // meta-type for io disks + _DISPATCH_SWIFT_JOB_TYPE = 0x00000001, // meta-type for swift jobs + _DISPATCH_SEMAPHORE_TYPE = 0x00000002, // meta-type for semaphores + _DISPATCH_NODE_TYPE = 0x00000003, // meta-type for data node + _DISPATCH_IO_TYPE = 0x00000004, // meta-type for io channels + _DISPATCH_OPERATION_TYPE = 0x00000005, // meta-type for io operations + _DISPATCH_DISK_TYPE = 0x00000006, // meta-type for io disks _DISPATCH_QUEUE_CLUSTER = 0x00000010, // dispatch queue cluster _DISPATCH_LANE_TYPE = 0x00000011, // meta-type for lanes @@ -407,6 +424,8 @@ DISPATCH_OPTIONS(dispatch_object_flags, unsigned long, DISPATCH_OPERATION_TYPE = DISPATCH_OBJECT_SUBTYPE(0, OPERATION), DISPATCH_DISK_TYPE = DISPATCH_OBJECT_SUBTYPE(0, DISK), + DISPATCH_SWIFT_JOB_TYPE = DISPATCH_OBJECT_SUBTYPE(0, SWIFT_JOB), + DISPATCH_QUEUE_SERIAL_TYPE = DISPATCH_OBJECT_SUBTYPE(1, LANE), DISPATCH_QUEUE_CONCURRENT_TYPE = DISPATCH_OBJECT_SUBTYPE(2, LANE), DISPATCH_QUEUE_GLOBAL_ROOT_TYPE = DISPATCH_OBJECT_SUBTYPE(3, LANE) | @@ -421,6 +440,8 @@ DISPATCH_OPTIONS(dispatch_object_flags, unsigned long, _DISPATCH_QUEUE_BASE_TYPEFLAG | _DISPATCH_NO_CONTEXT_TYPEFLAG, DISPATCH_QUEUE_NETWORK_EVENT_TYPE = DISPATCH_OBJECT_SUBTYPE(8, LANE) | _DISPATCH_QUEUE_BASE_TYPEFLAG, + DISPATCH_QUEUE_COOPERATIVE_ROOT_TYPE= DISPATCH_OBJECT_SUBTYPE(9, LANE) | + _DISPATCH_QUEUE_ROOT_TYPEFLAG | _DISPATCH_NO_CONTEXT_TYPEFLAG, DISPATCH_WORKLOOP_TYPE = DISPATCH_OBJECT_SUBTYPE(0, WORKLOOP) | _DISPATCH_QUEUE_BASE_TYPEFLAG, @@ -456,10 +477,14 @@ typedef struct _os_object_s { do_xref_cnt) #endif -#define _DISPATCH_OBJECT_HEADER(x) \ +#define _DISPATCH_OBJECT_HEADER_INTERNAL(x) \ struct _os_object_s _as_os_obj[0]; \ OS_OBJECT_STRUCT_HEADER(dispatch_##x); \ - struct dispatch_##x##_s *volatile do_next; \ + struct dispatch_##x##_s *volatile do_next; + + +#define _DISPATCH_OBJECT_HEADER(x) \ + _DISPATCH_OBJECT_HEADER_INTERNAL(x) \ struct dispatch_queue_s *do_targetq; \ void *do_ctxt; \ union { \ @@ -596,7 +621,7 @@ size_t _dispatch_objc_debug(dispatch_object_t dou, char* buf, size_t bufsiz); */ #define _os_atomic_refcnt_perform2o(o, f, op, n, m) ({ \ __typeof__(o) _o = (o); \ - int _ref_cnt = _o->f; \ + int _ref_cnt = os_atomic_load(&_o->f, relaxed); \ if (likely(_ref_cnt != _OS_OBJECT_GLOBAL_REFCNT)) { \ _ref_cnt = os_atomic_##op##2o(_o, f, n, m); \ } \ diff --git a/src/queue.c b/src/queue.c index 6ac062cff..44cdb4aa5 100644 --- a/src/queue.c +++ b/src/queue.c @@ -136,9 +136,14 @@ _dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pp, // it from the defaultpri, see _dispatch_priority_compute_update pp |= (_dispatch_get_basepri() & DISPATCH_PRIORITY_FLAG_OVERCOMMIT); + + // TODO (rokhinip): Right now there is no binding and unbinding + // to a kqueue for a cooperative thread. We'll need to do this + // right once we get that support } else { - // else we need to keep the one that is set in the current pri - pp |= (old_pri & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG); + // else we need to keep the overcommit/cooperative one that is set on the current + // thread + pp |= (old_pri & _PTHREAD_PRIORITY_THREAD_TYPE_MASK); } if (likely(old_pri & ~_PTHREAD_PRIORITY_FLAGS_MASK)) { pflags |= _PTHREAD_SET_SELF_QOS_FLAG; @@ -302,6 +307,22 @@ _dispatch_block_flags_valid(dispatch_block_flags_t flags) return ((flags & ~DISPATCH_BLOCK_API_MASK) == 0); } +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_block_remember_async_queue(dispatch_block_private_data_t dbpd, + dispatch_queue_t dq) +{ + // balanced in d_block_sync_invoke or d_block_wait + // + // Note: we need to retain _before_ we publish it, + // because dispatch_block_wait() will eagerly + // consume the refcounts. + _dispatch_retain_2(dq); + if (!os_atomic_cmpxchg2o(dbpd, dbpd_queue, NULL, dq, relaxed)) { + _dispatch_release_2(dq); + } +} + DISPATCH_ALWAYS_INLINE static inline dispatch_block_flags_t _dispatch_block_normalize_flags(dispatch_block_flags_t flags) @@ -666,10 +687,7 @@ _dispatch_continuation_init_slow(dispatch_continuation_t dc, uintptr_t dc_flags = dc->dc_flags; pthread_priority_t pp = 0; - // balanced in d_block_async_invoke_and_release or d_block_wait - if (os_atomic_cmpxchg2o(dbpd, dbpd_queue, NULL, dq, relaxed)) { - _dispatch_retain_2(dq); - } + _dispatch_block_remember_async_queue(dbpd, dq); if (dc_flags & DC_FLAG_CONSUME) { dc->dc_func = _dispatch_block_async_invoke_and_release; @@ -1645,13 +1663,19 @@ __DISPATCH_WAIT_FOR_QUEUE__(dispatch_sync_context_t dsc, dispatch_queue_t dq) (uint8_t)_dispatch_get_basepri_override_qos_floor(); _dispatch_thread_event_init(&dsc->dsc_event); } + + _dispatch_set_current_dsc((void *) dsc); dx_push(dq, dsc, _dispatch_qos_from_pp(dsc->dc_priority)); + _dispatch_trace_runtime_event(sync_wait, dq, 0); if (dsc->dc_data == DISPATCH_WLH_ANON) { _dispatch_thread_event_wait(&dsc->dsc_event); // acquire } else if (!dsc->dsc_wlh_self_wakeup) { _dispatch_event_loop_wait_for_ownership(dsc); } + + _dispatch_clear_current_dsc(); + if (dsc->dc_data == DISPATCH_WLH_ANON) { _dispatch_thread_event_destroy(&dsc->dsc_event); // If _dispatch_sync_waiter_wake() gave this thread an override, @@ -1896,10 +1920,8 @@ _dispatch_sync_block_with_privdata(dispatch_queue_t dq, dispatch_block_t work, } ov = _dispatch_set_priority_and_voucher(p, v, 0); - // balanced in d_block_sync_invoke or d_block_wait - if (os_atomic_cmpxchg2o(dbpd, dbpd_queue, NULL, dq, relaxed)) { - _dispatch_retain_2(dq); - } + _dispatch_block_remember_async_queue(dbpd, dq); + if (dc_flags & DC_FLAG_BARRIER) { _dispatch_barrier_sync_f(dq, work, _dispatch_block_sync_invoke, dc_flags); @@ -2177,10 +2199,7 @@ _dispatch_async_and_wait_block_with_privdata(dispatch_queue_t dq, v = _voucher_get(); } - // balanced in d_block_sync_invoke or d_block_wait - if (os_atomic_cmpxchg2o(dbpd, dbpd_queue, NULL, dq, relaxed)) { - _dispatch_retain_2(dq); - } + _dispatch_block_remember_async_queue(dbpd, dq); dispatch_tid tid = _dispatch_tid_self(); struct dispatch_sync_context_s dsc = { @@ -2453,6 +2472,11 @@ _dispatch_lane_inherit_wlh_from_target(dispatch_lane_t dq, dispatch_queue_t tq) { uint64_t old_state, new_state, role; + /* TODO (rokhinip): We're going to have to change this in the future when we + * allow targetting queues to a cooperative pool and need to figure out what + * kind of a role that gives the queue */ + dispatch_assert(!_dispatch_queue_is_cooperative(tq)); + if (!dx_hastypeflag(tq, QUEUE_ROOT)) { role = DISPATCH_QUEUE_ROLE_INNER; } else if (_dispatch_base_lane_is_wlh(dq, tq)) { @@ -2560,7 +2584,7 @@ _dispatch_queue_compute_priority_and_wlh(dispatch_queue_t dq, rqp &= DISPATCH_PRIORITY_REQUESTED_MASK; if (p < rqp) p = rqp; - p |= (tq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT); + p |= (tq->dq_priority & DISPATCH_PRIORITY_THREAD_TYPE_MASK); if ((dpri & DISPATCH_PRIORITY_FLAG_FLOOR) || !(dpri & DISPATCH_PRIORITY_REQUESTED_MASK)) { p |= (dpri & DISPATCH_PRIORITY_FLAG_FLOOR); @@ -2664,8 +2688,12 @@ _dispatch_queue_priority_inherit_from_target(dispatch_lane_class_t dq, if (_dispatch_is_in_root_queues_array(tq)) { dispatch_qos_t qos = _dispatch_priority_qos(pri); if (!qos) qos = DISPATCH_QOS_DEFAULT; - tq = _dispatch_get_root_queue(qos, - pri & DISPATCH_PRIORITY_FLAG_OVERCOMMIT)->_as_dq; + + // TODO (rokhinip): In future, might want to consider whether dq + // itself might be tagged cooperative and therefore we need to + // adjust tq accordingly + uintptr_t flags = (pri & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) ? DISPATCH_QUEUE_OVERCOMMIT : 0; + tq = _dispatch_get_root_queue(qos, flags)->_as_dq; } return tq; } @@ -2731,6 +2759,8 @@ _dispatch_lane_create_with_target(const char *label, dispatch_queue_attr_t dqa, qos = _dispatch_priority_qos(tq->dq_priority); } tq = NULL; + } else if (tq && _dispatch_queue_is_cooperative(tq)) { + DISPATCH_CLIENT_CRASH(tq, "Cannot target object to cooperative root queue - not implemented"); } else if (tq && !tq->do_targetq) { // target is a pthread or runloop root queue, setting QoS or overcommit // is disallowed @@ -2747,9 +2777,10 @@ _dispatch_lane_create_with_target(const char *label, dispatch_queue_attr_t dqa, } } if (!tq) { + uintptr_t flags = (overcommit == _dispatch_queue_attr_overcommit_enabled) ? DISPATCH_QUEUE_OVERCOMMIT : 0; tq = _dispatch_get_root_queue( qos == DISPATCH_QOS_UNSPECIFIED ? DISPATCH_QOS_DEFAULT : qos, - overcommit == _dispatch_queue_attr_overcommit_enabled)->_as_dq; + flags)->_as_dq; if (unlikely(!tq)) { DISPATCH_CLIENT_CRASH(qos, "Invalid queue attribute"); } @@ -3513,11 +3544,13 @@ _dispatch_poll_for_events_4launchd(void) } #if DISPATCH_USE_WORKQUEUE_NARROWING + +#if !DISPATCH_USE_COOPERATIVE_WORKQUEUE DISPATCH_STATIC_GLOBAL(os_atomic(uint64_t) _dispatch_narrowing_deadlines[DISPATCH_QOS_NBUCKETS]); #if !DISPATCH_TIME_UNIT_USES_NANOSECONDS DISPATCH_STATIC_GLOBAL(uint64_t _dispatch_narrow_check_interval_cache); -#endif +#endif /* !DISPATCH_TIME_UNIT_USES_NANOSECONDS */ DISPATCH_ALWAYS_INLINE static inline uint64_t @@ -3588,9 +3621,50 @@ _dispatch_queue_drain_should_narrow(dispatch_invoke_context_t dic) } return false; } + +bool +dispatch_swift_job_should_yield(void) +{ + return false; +} + +#else /* !DISPATCH_USE_COOPERATIVE_WORKQUEUE */ + +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_queue_drain_should_narrow(dispatch_invoke_context_t __unused dic) +{ + uint64_t quantum_expiry_action = _dispatch_get_quantum_expiry_action(); + return (quantum_expiry_action & PTHREAD_WQ_QUANTUM_EXPIRY_NARROW) != 0; +} +#define _dispatch_queue_drain_init_narrowing_check_deadline(rq, dic) ((void)0) + +bool +dispatch_swift_job_should_yield(void) +{ + uint64_t quantum_expiry_action = _dispatch_get_quantum_expiry_action(); + /* We want to return true here regardless of what the quantum expiry action + * is. There will be specific logic in root queue drain to handle the + * various specific reasons. + * + * TODO (rokhinip): There is room for some potential optmization to return + * false here if there is nothing else enqueued on the root queue we're + * draining + */ + return quantum_expiry_action != 0; +} + +#endif /* !DISPATCH_USE_COOPERATIVE_WORKQUEUE */ + #else #define _dispatch_queue_drain_init_narrowing_check_deadline(rq, dic) ((void)0) #define _dispatch_queue_drain_should_narrow(dic) false + +bool +dispatch_swift_job_should_yield(void) +{ + return false; +} #endif /* @@ -3660,7 +3734,8 @@ _dispatch_lane_drain(dispatch_lane_t dq, dispatch_invoke_context_t dic, if (unlikely(serial_drain != (dq->dq_width == 1))) { break; } - if (unlikely(_dispatch_queue_drain_should_narrow(dic))) { + if (unlikely(!(flags & DISPATCH_INVOKE_DISABLED_NARROWING) && + _dispatch_queue_drain_should_narrow(dic))) { break; } if (likely(flags & DISPATCH_INVOKE_WORKLOOP_DRAIN)) { @@ -4129,18 +4204,6 @@ _dispatch_workloop_activate_simulator_fallback(dispatch_workloop_t dwl, new_state |= DISPATCH_QUEUE_ROLE_BASE_ANON; }); } - -static const struct dispatch_queue_global_s _dispatch_custom_workloop_root_queue = { - DISPATCH_GLOBAL_OBJECT_HEADER(queue_global), - .dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, - .do_ctxt = NULL, - .dq_label = "com.apple.root.workloop-custom", - .dq_atomic_flags = DQF_WIDTH(DISPATCH_QUEUE_WIDTH_POOL), - .dq_priority = _dispatch_priority_make_fallback(DISPATCH_QOS_DEFAULT) | - DISPATCH_PRIORITY_SATURATED_OVERRIDE, - .dq_serialnum = DISPATCH_QUEUE_SERIAL_NUMBER_WLF, - .dgq_thread_pool_size = 1, -}; #endif // TARGET_OS_MAC static void @@ -4771,7 +4834,12 @@ _dispatch_queue_override_invoke(dispatch_continuation_t dc, } _dispatch_continuation_pop_forwarded(dc, dc_flags, assumed_rq, { if (_dispatch_object_has_vtable(dou._do)) { - dx_invoke(dou._dq, dic, flags); + if (dx_type(dou._do) == DISPATCH_SWIFT_JOB_TYPE) { + dx_invoke(dou._dsjc, NULL, + _dispatch_invoke_flags_to_swift_invoke_flags(flags)); + } else { + dx_invoke(dou._dq, dic, flags); + } } else { _dispatch_continuation_invoke_inline(dou, flags, assumed_rq); } @@ -4799,8 +4867,13 @@ static void _dispatch_root_queue_push_override(dispatch_queue_global_t orig_rq, dispatch_object_t dou, dispatch_qos_t qos) { - bool overcommit = orig_rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT; - dispatch_queue_global_t rq = _dispatch_get_root_queue(qos, overcommit); + uintptr_t flags = 0; + if (orig_rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) { + flags |= DISPATCH_QUEUE_OVERCOMMIT; + } else if (_dispatch_queue_is_cooperative(orig_rq)) { + flags |= DISPATCH_QUEUE_COOPERATIVE; + } + dispatch_queue_global_t rq = _dispatch_get_root_queue(qos, flags); dispatch_continuation_t dc = dou._dc; if (_dispatch_object_is_redirection(dc)) { @@ -4824,8 +4897,13 @@ static void _dispatch_root_queue_push_override_stealer(dispatch_queue_global_t orig_rq, dispatch_queue_t dq, dispatch_qos_t qos) { - bool overcommit = orig_rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT; - dispatch_queue_global_t rq = _dispatch_get_root_queue(qos, overcommit); + uintptr_t flags = 0; + if (orig_rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) { + flags |= DISPATCH_QUEUE_OVERCOMMIT; + } else if (_dispatch_queue_is_cooperative(orig_rq)) { + flags |= DISPATCH_QUEUE_COOPERATIVE; + } + dispatch_queue_global_t rq = _dispatch_get_root_queue(qos, flags); dispatch_continuation_t dc = _dispatch_continuation_alloc(); dc->do_vtable = DC_VTABLE(OVERRIDE_STEALING); @@ -5284,6 +5362,15 @@ void _dispatch_lane_concurrent_push(dispatch_lane_t dq, dispatch_object_t dou, dispatch_qos_t qos) { + if (unlikely(_dispatch_queue_is_cooperative(dq))) { + /* If we're here, means that we're in the simulator fallback case. We + * still restrict what can target the cooperative thread pool */ + if (_dispatch_object_has_vtable(dou) && + dx_type(dou._do) != DISPATCH_SWIFT_JOB_TYPE) { + DISPATCH_CLIENT_CRASH(dou._do, "Cannot target the cooperative global queue - not implemented"); + } + } + // reserving non barrier width // doesn't fail if only the ENQUEUED bit is set (unlike its barrier // width equivalent), so we have to check that this thread hasn't @@ -5298,6 +5385,21 @@ _dispatch_lane_concurrent_push(dispatch_lane_t dq, dispatch_object_t dou, _dispatch_lane_push(dq, dou, qos); } +void +dispatch_async_swift_job(dispatch_queue_t dq, void *object, qos_class_t qos) +{ + dispatch_swift_continuation_t swift_dc; + swift_dc = (dispatch_swift_continuation_t) object; + + dispatch_object_flags_t object_flags = dx_type(swift_dc); + if (object_flags != DISPATCH_SWIFT_JOB_TYPE) { + DISPATCH_CLIENT_CRASH(object_flags, + "Used Swift only SPI to enqueue non-Swift runtime objects into dispatch"); + } + + dx_push(dq, swift_dc->_as_do, _dispatch_qos_from_qos_class(qos)); +} + #pragma mark - #pragma mark dispatch_channel_t @@ -5474,7 +5576,7 @@ dispatch_channel_foreach_work_item_peek_f( if (dc == dch->dq_items_tail) { break; } - dc = os_mpsc_get_next(dc, do_next); + dc = os_mpsc_get_next(dc, do_next, &dch->dq_items_tail); } } @@ -6082,7 +6184,8 @@ _dispatch_wlh_worker_thread_init(dispatch_deferred_items_t ddi) // // Also add the NEEDS_UNBIND flag so that // _dispatch_priority_compute_update knows it has to unbind - pp &= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG | ~_PTHREAD_PRIORITY_FLAGS_MASK; + + pp &= _PTHREAD_PRIORITY_THREAD_TYPE_MASK | ~_PTHREAD_PRIORITY_FLAGS_MASK; if (ddi->ddi_wlh == DISPATCH_WLH_ANON) { pp |= _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG; } else { @@ -6163,6 +6266,15 @@ _dispatch_wlh_worker_thread(dispatch_wlh_t wlh, dispatch_kevent_t events, int *nevents) { _dispatch_introspection_thread_add(); +#if DISPATCH_USE_COOPERATIVE_WORKQUEUE + /* If this thread is not part of the cooperative workq quantum world, + * clearing this field will make sure that we have no bad state lingering. + * + * If the thread is part of the cooperative workq quantum world, we know + * that the thread has just had its workq quantum armed before coming out to + * userspace, so we clobber this to make sure that we start fresh */ + _dispatch_ack_quantum_expiry_action(); +#endif DISPATCH_PERF_MON_VAR_INIT @@ -6223,6 +6335,15 @@ _dispatch_wlh_worker_thread(dispatch_wlh_t wlh, dispatch_kevent_t events, } _dispatch_debug("returning %d deferred kevents", ddi.ddi_nevents); _dispatch_clear_return_to_kernel(); +#if DISPATCH_USE_COOPERATIVE_WORKQUEUE + /* If this thread is not part of the cooperative workq quantum world, + * clearing this field should be a noop. + * + * If the thread is part of the cooperative workq quantum world, the thread + * is not going to take any action on the workq quantum action regardless + * since it is going to park so we clear it anyways */ + _dispatch_ack_quantum_expiry_action(); +#endif *nevents = ddi.ddi_nevents; _dispatch_trace_runtime_event(worker_park, NULL, 0); @@ -6319,6 +6440,15 @@ _dispatch_root_queue_poke_slow(dispatch_queue_global_t dq, int n, int floor) _dispatch_priority_to_pp_prefer_fallback(dq->dq_priority)); (void)dispatch_assume_zero(r); return; +#if DISPATCH_USE_COOPERATIVE_WORKQUEUE + } else if (dx_type(dq) == DISPATCH_QUEUE_COOPERATIVE_ROOT_TYPE) { + _dispatch_root_queue_debug("requesting new worker thread for cooperative global " + "queue: %p", dq); + r = _pthread_workqueue_add_cooperativethreads(remaining, + _dispatch_priority_to_pp_prefer_fallback(dq->dq_priority)); + (void)dispatch_assume_zero(r); + return; +#endif /* DISPATCH_USE_COOPERATIVE_WORKQUEUE */ } #endif // !DISPATCH_USE_INTERNAL_WORKQUEUE #if DISPATCH_USE_PTHREAD_POOL @@ -6388,13 +6518,8 @@ _dispatch_root_queue_poke_slow(dispatch_queue_global_t dq, int n, int floor) #endif do { _dispatch_retain(dq); // released in _dispatch_worker_thread -#if DISPATCH_DEBUG - unsigned dwStackSize = 0; -#else - unsigned dwStackSize = 64 * 1024; -#endif uintptr_t hThread = 0; - while (!(hThread = _beginthreadex(NULL, dwStackSize, _dispatch_worker_thread_thunk, dq, STACK_SIZE_PARAM_IS_A_RESERVATION, NULL))) { + while (!(hThread = _beginthreadex(NULL, /* stack_size */ 0, _dispatch_worker_thread_thunk, dq, STACK_SIZE_PARAM_IS_A_RESERVATION, NULL))) { if (errno != EAGAIN) { (void)dispatch_assume(hThread); } @@ -6422,7 +6547,8 @@ _dispatch_root_queue_poke(dispatch_queue_global_t dq, int n, int floor) } #if !DISPATCH_USE_INTERNAL_WORKQUEUE #if DISPATCH_USE_PTHREAD_POOL - if (likely(dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE)) + if (likely(dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE || + dx_type(dq) == DISPATCH_QUEUE_COOPERATIVE_ROOT_TYPE)) #endif { if (unlikely(!os_atomic_cmpxchg2o(dq, dgq_pending, 0, n, relaxed))) { @@ -6559,7 +6685,7 @@ _dispatch_root_queue_drain_one(dispatch_queue_global_t dq) goto out; } // There must be a next item now. - next = os_mpsc_get_next(head, do_next); + next = os_mpsc_get_next(head, do_next, &dq->dq_items_tail); } os_atomic_store2o(dq, dq_items_head, next, relaxed); @@ -6733,6 +6859,16 @@ _dispatch_root_queue_drain(dispatch_queue_global_t dq, if (unlikely(_dispatch_queue_drain_should_narrow(&dic))) { break; } + +#if DISPATCH_USE_COOPERATIVE_WORKQUEUE + /* There is no need to check to see if we need to shuffle since by + * virtue of the fact that we're here, we're timesharing between the + * work items anyways - just eat the quantum expiry action. + * + * In the future, we'd expand this to include more checks for various + * other quantum expiry actions */ + _dispatch_ack_quantum_expiry_action(); +#endif } // overcommit or not. worker thread @@ -6754,22 +6890,42 @@ _dispatch_root_queue_drain(dispatch_queue_global_t dq, static void _dispatch_worker_thread2(pthread_priority_t pp) { +#if DISPATCH_USE_COOPERATIVE_WORKQUEUE + _dispatch_ack_quantum_expiry_action(); +#endif + bool overcommit = pp & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; - dispatch_queue_global_t dq; + bool cooperative = pp & _PTHREAD_PRIORITY_COOPERATIVE_FLAG; - pp &= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG | ~_PTHREAD_PRIORITY_FLAGS_MASK; + pp &= (_PTHREAD_PRIORITY_THREAD_TYPE_MASK | ~_PTHREAD_PRIORITY_FLAGS_MASK); _dispatch_thread_setspecific(dispatch_priority_key, (void *)(uintptr_t)pp); - dq = _dispatch_get_root_queue(_dispatch_qos_from_pp(pp), overcommit); + + dispatch_queue_global_t dq; + dispatch_invoke_flags_t invoke_flags = 0; + + uintptr_t rq_flags = 0; + if (cooperative) { + rq_flags |= DISPATCH_QUEUE_COOPERATIVE; + invoke_flags |= DISPATCH_INVOKE_COOPERATIVE_DRAIN; + } else { + rq_flags |= (overcommit ? DISPATCH_QUEUE_OVERCOMMIT : 0); + } + dq = _dispatch_get_root_queue(_dispatch_qos_from_pp(pp), rq_flags); _dispatch_introspection_thread_add(); _dispatch_trace_runtime_event(worker_unpark, dq, 0); int pending = os_atomic_dec2o(dq, dgq_pending, relaxed); dispatch_assert(pending >= 0); - _dispatch_root_queue_drain(dq, dq->dq_priority, - DISPATCH_INVOKE_WORKER_DRAIN | DISPATCH_INVOKE_REDIRECTING_DRAIN); + + invoke_flags |= DISPATCH_INVOKE_WORKER_DRAIN | DISPATCH_INVOKE_REDIRECTING_DRAIN; + _dispatch_root_queue_drain(dq, dq->dq_priority, invoke_flags); _dispatch_voucher_debug("root queue clear", NULL); _dispatch_reset_voucher(NULL, DISPATCH_THREAD_PARK); + +#if DISPATCH_USE_COOPERATIVE_WORKQUEUE + _dispatch_ack_quantum_expiry_action(); +#endif _dispatch_trace_runtime_event(worker_park, NULL, 0); } #endif // !DISPATCH_USE_INTERNAL_WORKQUEUE @@ -6824,6 +6980,13 @@ _dispatch_worker_thread(void *context) _dispatch_set_pthread_root_queue_observer_hooks( &pqc->dpq_observer_hooks); } + + /* Set it up before the configure block so that it can get overridden by + * client if they want to name their threads differently */ + if (dq->_as_dq->dq_label) { + pthread_setname_np(dq->_as_dq->dq_label); + } + if (pqc->dpq_thread_configure) { pqc->dpq_thread_configure(); } @@ -6913,6 +7076,15 @@ _dispatch_root_queue_push(dispatch_queue_global_t rq, dispatch_object_t dou, dispatch_priority_t rq_overcommit; rq_overcommit = rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT; + // TODO (rokhinip): When we add kevent support for the cooperative pool, + // we need to fix this logic to make sure that we have the following + // ranking: + // + // non_overcommit < cooperative < overcommit + + // After parsing kevents, we could have stashed a non-overcommit work + // item to do but if an overcommit/cooperative request comes in, prefer + // that. if (likely(!old_dou._do || rq_overcommit)) { dispatch_queue_global_t old_rq = ddi->ddi_stashed_rq; dispatch_qos_t old_qos = ddi->ddi_stashed_qos; @@ -6934,6 +7106,16 @@ _dispatch_root_queue_push(dispatch_queue_global_t rq, dispatch_object_t dou, } } #endif + + if (_dispatch_queue_is_cooperative(rq)) { + /* We only allow enqueueing of continuations or swift job objects on the + * cooperative pool, no other objects */ + if (_dispatch_object_has_vtable(dou) && + dx_type(dou._do) != DISPATCH_SWIFT_JOB_TYPE) { + DISPATCH_CLIENT_CRASH(dou._do, "Cannot target the cooperative global queue - not implemented"); + } + } + #if HAVE_PTHREAD_WORKQUEUE_QOS if (_dispatch_root_queue_push_needs_override(rq, qos)) { return _dispatch_root_queue_push_override(rq, dou, qos); @@ -7177,7 +7359,7 @@ _dispatch_runloop_queue_handle_init(void *ctxt) handle = fd; #elif defined(_WIN32) HANDLE hEvent; - hEvent = CreateEventW(NULL, /*bManualReset=*/TRUE, + hEvent = CreateEventW(NULL, /*bManualReset=*/FALSE, /*bInitialState=*/FALSE, NULL); if (hEvent == NULL) { DISPATCH_INTERNAL_CRASH(GetLastError(), "CreateEventW"); @@ -7271,6 +7453,7 @@ _dispatch_runloop_queue_poke(dispatch_lane_t dq, dispatch_qos_t qos, _dispatch_runloop_queue_handle_init); } + qos = _dispatch_queue_wakeup_qos(dq, qos); os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { new_state = _dq_state_merge_qos(old_state, qos); if (old_state == new_state) { @@ -7772,6 +7955,38 @@ _dispatch_context_cleanup(void *ctxt) #pragma mark - #pragma mark dispatch_init +#if !DISPATCH_USE_COOPERATIVE_WORKQUEUE +static void +_dispatch_cooperative_root_queue_init_fallback(dispatch_queue_global_t dq) +{ + uint16_t max_cpus = (uint16_t) dispatch_hw_config(logical_cpus); + uint16_t width_per_cooperative_queue; + + if (_dispatch_mode & DISPATCH_COOPERATIVE_POOL_STRICT) { + /* We want width 1 for a strict runtime - implement it as a width 1 + * concurrent queue */ + width_per_cooperative_queue = 1; + } else { + /* Concurrent queue with limited width */ + width_per_cooperative_queue = MAX(max_cpus/DISPATCH_QOS_NBUCKETS, 1); + } + + dispatch_priority_t pri = dq->dq_priority; + dispatch_qos_t qos = (pri & DISPATCH_PRIORITY_FLAG_FALLBACK) ? + _dispatch_priority_fallback_qos(pri) : _dispatch_priority_qos(pri); + + /* _dispatch_queue_init will clobber the serial num so just save it and + * restore it back */ + unsigned long dq_serialnum = dq->dq_serialnum; + _dispatch_queue_init(dq, 0, width_per_cooperative_queue, DISPATCH_QUEUE_ROLE_BASE_ANON); + dq->dq_serialnum = dq_serialnum; + + dispatch_queue_t tq = _dispatch_get_root_queue(qos, 0)->_as_dq; + _dispatch_retain(tq); + dq->do_targetq = tq; +} +#endif + static void _dispatch_root_queues_init_once(void *context DISPATCH_UNUSED) { @@ -7791,6 +8006,14 @@ _dispatch_root_queues_init_once(void *context DISPATCH_UNUSED) "QoS Maintenance support required"); } +#if !DISPATCH_USE_COOPERATIVE_WORKQUEUE + for (int i = DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_COOPERATIVE; + i < _DISPATCH_ROOT_QUEUE_IDX_COUNT; i += DISPATCH_ROOT_QUEUE_FLAVORS) + { + _dispatch_cooperative_root_queue_init_fallback(&_dispatch_root_queues[i]); + } +#endif + #if DISPATCH_USE_KEVENT_SETUP struct pthread_workqueue_config cfg = { .version = PTHREAD_WORKQUEUE_CONFIG_VERSION, @@ -7853,6 +8076,19 @@ _dispatch_root_queues_init_once(void *context DISPATCH_UNUSED) DISPATCH_INTERNAL_CRASH((r << 16) | wq_supported, "Root queue initialization failed"); } + +#if DISPATCH_USE_COOPERATIVE_WORKQUEUE + if (_dispatch_mode & DISPATCH_COOPERATIVE_POOL_STRICT) { + int pool_size_limit = -1; /* strict per QoS bucket */ + r = sysctlbyname("kern.wq_limit_cooperative_threads", NULL, NULL, &pool_size_limit, + sizeof(int)); + + if (r != 0) { + DISPATCH_INTERNAL_CRASH(errno, "Unable to limit cooperative pool size"); + } + } +#endif + #endif // DISPATCH_USE_INTERNAL_WORKQUEUE } @@ -7865,6 +8101,38 @@ _dispatch_root_queues_init(void) _dispatch_root_queues_init_once); } +dispatch_queue_global_t +dispatch_get_global_queue(intptr_t priority, uintptr_t flags) +{ + if (flags & ~(unsigned long)(DISPATCH_QUEUE_OVERCOMMIT | DISPATCH_QUEUE_COOPERATIVE)) { + return DISPATCH_BAD_INPUT; + } + + if ((flags & DISPATCH_QUEUE_OVERCOMMIT) && (flags & DISPATCH_QUEUE_COOPERATIVE)) { + return DISPATCH_BAD_INPUT; + } + + dispatch_qos_t qos = _dispatch_qos_from_queue_priority(priority); +#if !HAVE_PTHREAD_WORKQUEUE_QOS + if (qos == QOS_CLASS_MAINTENANCE) { + qos = DISPATCH_QOS_BACKGROUND; + } else if (qos == QOS_CLASS_USER_INTERACTIVE) { + qos = DISPATCH_QOS_USER_INITIATED; + } +#endif + if (qos == DISPATCH_QOS_UNSPECIFIED) { + return DISPATCH_BAD_INPUT; + } + +#if !DISPATCH_USE_COOPERATIVE_WORKQUEUE + /* The fallback implementation of the cooperative root queues need to be + * fully initialized before work can be enqueued on these queues */ + _dispatch_root_queues_init(); +#endif + + return _dispatch_get_root_queue(qos, flags); +} + DISPATCH_EXPORT DISPATCH_NOTHROW void libdispatch_init(void) @@ -7876,6 +8144,10 @@ libdispatch_init(void) _dispatch_mode |= DISPATCH_MODE_STRICT; } + if (_dispatch_getenv_bool("LIBDISPATCH_COOPERATIVE_POOL_STRICT", false)) { + _dispatch_mode |= DISPATCH_COOPERATIVE_POOL_STRICT; + } + #if DISPATCH_DEBUG || DISPATCH_PROFILE #if DISPATCH_USE_KEVENT_WORKQUEUE @@ -7916,8 +8188,11 @@ libdispatch_init(void) _dispatch_thread_key_create(&dispatch_voucher_key, _voucher_thread_cleanup); _dispatch_thread_key_create(&dispatch_deferred_items_key, _dispatch_deferred_items_cleanup); + _dispatch_thread_key_create(&dispatch_quantum_key, NULL); + _dispatch_thread_key_create(&dispatch_dsc_key, NULL); + _dispatch_thread_key_create(&os_workgroup_key, _os_workgroup_tsd_cleanup); + _dispatch_thread_key_create(&dispatch_enqueue_key, NULL); #endif - pthread_key_create(&_os_workgroup_key, _os_workgroup_tsd_cleanup); #if DISPATCH_USE_RESOLVERS // rdar://problem/8541707 _dispatch_main_q.do_targetq = _dispatch_get_default_queue(true); #endif @@ -7934,6 +8209,9 @@ libdispatch_init(void) _dispatch_vtable_init(); _os_object_init(); _voucher_init(); +#if TARGET_OS_MAC + _workgroup_init(); +#endif _dispatch_introspection_init(); } @@ -8059,6 +8337,9 @@ _libdispatch_tsd_cleanup(void *ctx) _tsd_call_cleanup(dispatch_voucher_key, _voucher_thread_cleanup); _tsd_call_cleanup(dispatch_deferred_items_key, _dispatch_deferred_items_cleanup); + _tsd_call_cleanup(dispatch_quantum_key, NULL); + _tsd_call_cleanup(dispatch_enqueue_key, NULL); + _tsd_call_cleanup(dispatch_dsc_key, NULL); #ifdef __ANDROID__ if (_dispatch_thread_detach_callback) { _dispatch_thread_detach_callback(); diff --git a/src/queue_internal.h b/src/queue_internal.h index d9425fcf6..68a5fec23 100644 --- a/src/queue_internal.h +++ b/src/queue_internal.h @@ -740,6 +740,7 @@ DISPATCH_SUBCLASS_DECL(queue_global, queue, lane); #if DISPATCH_USE_PTHREAD_ROOT_QUEUES DISPATCH_INTERNAL_SUBCLASS_DECL(queue_pthread_root, queue, lane); #endif +DISPATCH_INTERNAL_SUBCLASS_DECL(queue_cooperative, queue, lane); DISPATCH_INTERNAL_SUBCLASS_DECL(queue_runloop, queue_serial, lane); DISPATCH_INTERNAL_SUBCLASS_DECL(queue_mgr, queue_serial, lane); @@ -864,45 +865,56 @@ DISPATCH_COLD size_t _dispatch_queue_debug_attr(dispatch_queue_t dq, char *buf, size_t bufsiz); -#define DISPATCH_ROOT_QUEUE_COUNT (DISPATCH_QOS_NBUCKETS * 2) +#define DISPATCH_ROOT_QUEUE_FLAVORS 3 +#define DISPATCH_ROOT_QUEUE_COUNT (DISPATCH_QOS_NBUCKETS * DISPATCH_ROOT_QUEUE_FLAVORS) // must be in lowest to highest qos order (as encoded in dispatch_qos_t) -// overcommit qos index values need bit 1 set enum { DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS = 0, DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT, + DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_COOPERATIVE, DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS, DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_OVERCOMMIT, + DISPATCH_ROOT_QUEUE_IDX_BACKGROUND_QOS_COOPERATIVE, DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS, DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_OVERCOMMIT, + DISPATCH_ROOT_QUEUE_IDX_UTILITY_QOS_COOPERATIVE, DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS, DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_OVERCOMMIT, + DISPATCH_ROOT_QUEUE_IDX_DEFAULT_QOS_COOPERATIVE, DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS, DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_OVERCOMMIT, + DISPATCH_ROOT_QUEUE_IDX_USER_INITIATED_QOS_COOPERATIVE, DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS, DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_OVERCOMMIT, + DISPATCH_ROOT_QUEUE_IDX_USER_INTERACTIVE_QOS_COOPERATIVE, _DISPATCH_ROOT_QUEUE_IDX_COUNT, }; +#define DISPATCH_ROOT_QUEUE_IDX_OFFSET_OVERCOMMIT \ + (DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_OVERCOMMIT - DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS) +#define DISPATCH_ROOT_QUEUE_IDX_OFFSET_COOPERATIVE \ + (DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS_COOPERATIVE - DISPATCH_ROOT_QUEUE_IDX_MAINTENANCE_QOS) + // skip zero // 1 - main_q // 2 - mgr_q // 3 - mgr_root_q -// 4,5,6,7,8,9,10,11,12,13,14,15 - global queues -// 17 - workloop_fallback_q +// 4 - 21 - global queues +// 22 - workloop_fallback_q // we use 'xadd' on Intel, so the initial value == next assigned -#define DISPATCH_QUEUE_SERIAL_NUMBER_INIT 17 +#define DISPATCH_QUEUE_SERIAL_NUMBER_INIT 22 extern unsigned long volatile _dispatch_queue_serial_numbers; // mark the workloop fallback queue to avoid finalizing objects on the base // queue of custom outside-of-qos workloops -#define DISPATCH_QUEUE_SERIAL_NUMBER_WLF 16 +#define DISPATCH_QUEUE_SERIAL_NUMBER_WLF 22 extern struct dispatch_queue_static_s _dispatch_mgr_q; // serial 2 #if DISPATCH_USE_MGR_THREAD && DISPATCH_USE_PTHREAD_ROOT_QUEUES extern struct dispatch_queue_global_s _dispatch_mgr_root_queue; // serial 3 #endif -extern struct dispatch_queue_global_s _dispatch_root_queues[]; // serials 4 - 15 +extern struct dispatch_queue_global_s _dispatch_root_queues[]; // serials 4 - 21 #if DISPATCH_DEBUG #define DISPATCH_ASSERT_ON_MANAGER_QUEUE() \ @@ -1063,6 +1075,36 @@ typedef struct dispatch_continuation_s { dispatch_assert_aliases(dispatch_continuation_s, dispatch_object_s, do_next); dispatch_assert_aliases(dispatch_continuation_s, dispatch_object_s, do_vtable); +/* Swift runtime objects to be enqueued into dispatch */ +struct dispatch_swift_continuation_s; + +struct dispatch_swift_continuation_extra_vtable_s { + unsigned long const do_type; + void DISPATCH_VTABLE_ENTRY(do_invoke)(struct dispatch_swift_continuation_s *, + void *, dispatch_swift_job_invoke_flags_t flags); +}; + +typedef struct dispatch_swift_continuation_vtable_s { + _OS_OBJECT_CLASS_HEADER(); + struct dispatch_swift_continuation_extra_vtable_s _os_obj_vtable; +} const *dispatch_swift_continuation_vtable_t; + +/* This is the internal representation of a Swift object that will be enqueued + * onto dispatch. The actual object may be bigger but we only care about this + * piece of it. The vtable the continuation points to, will be interpreted as a + * dispatch_swift_continuation_vtable_t even if it is bigger. + */ +typedef struct dispatch_swift_continuation_s { + struct dispatch_object_s _as_do[0]; + _DISPATCH_OBJECT_HEADER_INTERNAL(swift_continuation); + void *opaque1; + void *opaque2; + void *opaque3; +} *dispatch_swift_continuation_t; + +dispatch_static_assert(sizeof(struct dispatch_swift_continuation_s) == + sizeof(struct dispatch_object_s)); + typedef struct dispatch_sync_context_s { struct dispatch_continuation_s _as_dc[0]; DISPATCH_CONTINUATION_HEADER(continuation); @@ -1174,7 +1216,8 @@ struct dispatch_apply_s { #if !OS_OBJECT_HAVE_OBJC1 dispatch_continuation_t da_dc; #endif - size_t volatile da_index, da_todo; + size_t _Atomic da_index; + size_t _Atomic da_todo; size_t da_iterations; #if OS_OBJECT_HAVE_OBJC1 dispatch_continuation_t da_dc; @@ -1183,12 +1226,29 @@ struct dispatch_apply_s { dispatch_thread_event_s da_event; dispatch_invoke_flags_t da_flags; int32_t da_thr_cnt; + uint32_t _Atomic da_worker_index; + dispatch_apply_attr_t da_attr; }; dispatch_static_assert(offsetof(struct dispatch_continuation_s, dc_flags) == offsetof(struct dispatch_apply_s, da_dc), "These fields must alias so that leaks instruments work"); typedef struct dispatch_apply_s *dispatch_apply_t; +#define DISPATCH_APPLY_ATTR_SIG 0xA11AB000 +struct dispatch_apply_attr_s { + uint32_t sig; + uint32_t flags; + size_t per_cluster_parallelism; + uintptr_t guard; /* To prevent copying */ +#if defined(__LP64__) + uint8_t unused[40]; +#else + uint8_t unused[48]; +#endif +}; +dispatch_static_assert(sizeof(struct dispatch_apply_attr_s) == __DISPATCH_APPLY_ATTR_SIZE__, + "Opaque dispatch apply attr and internal apply attr size should match"); + #pragma mark - #pragma mark dispatch_block_t diff --git a/src/semaphore.c b/src/semaphore.c index 1d164f17f..987333740 100644 --- a/src/semaphore.c +++ b/src/semaphore.c @@ -116,8 +116,8 @@ _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, if (!_dispatch_sema4_timedwait(&dsema->dsema_sema, timeout)) { break; } - // Fall through and try to undo what the fast path did to - // dsema->dsema_value + // Try to undo what the fast path did to dsema->dsema_value + DISPATCH_FALLTHROUGH; case DISPATCH_TIME_NOW: orig = dsema->dsema_value; while (orig < 0) { @@ -126,8 +126,8 @@ _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, return _DSEMA4_TIMEOUT(); } } - // Another thread called semaphore_signal(). - // Fall through and drain the wakeup. + // Another thread called semaphore_signal(). Drain the wakeup. + DISPATCH_FALLTHROUGH; case DISPATCH_TIME_FOREVER: _dispatch_sema4_wait(&dsema->dsema_sema); break; diff --git a/src/shims.h b/src/shims.h index e2377bc7c..b611a5a73 100644 --- a/src/shims.h +++ b/src/shims.h @@ -55,8 +55,12 @@ #endif #ifndef DISPATCH_WORKQ_MAX_PTHREAD_COUNT +#if defined(__APPLE__) +#define DISPATCH_WORKQ_MAX_PTHREAD_COUNT 64 +#else #define DISPATCH_WORKQ_MAX_PTHREAD_COUNT 255 #endif +#endif /* DISPATCH_WORKQ_MAX_PTHREAD_COUNT */ #include "shims/hw_config.h" #include "shims/priority.h" @@ -192,6 +196,98 @@ _dispatch_qos_max_parallelism(dispatch_qos_t qos, unsigned long flags) return p; } +#if DISPATCH_USE_PTHREAD_QOS_MAX_PARALLELISM && DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(120000) && !TARGET_OS_SIMULATOR +#include + +#if defined(_PTHREAD_QOS_PARALLELISM_CLUSTER_SHARED_RSRC) && __arm64__ +#define DISPATCH_USE_PTHREAD_CLUSTER_PARALLELISM 1 +#else // defined(_PTHREAD_QOS_PARALLELISM_CLUSTER_SHARED_RSRC) && __arm64__ +#define DISPATCH_USE_PTHREAD_CLUSTER_PARALLELISM 0 +#endif // defined(_PTHREAD_QOS_PARALLELISM_CLUSTER_SHARED_RSRC) && __arm64__ + +#else // DISPATCH_USE_PTHREAD_QOS_MAX_PARALLELISM && DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(120000) && !TARGET_OS_SIMULATOR +#define DISPATCH_USE_PTHREAD_CLUSTER_PARALLELISM 0 +#endif // DISPATCH_USE_PTHREAD_QOS_MAX_PARALLELISM && DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(120000) && !TARGET_OS_SIMULATOR + +#if DISPATCH_USE_PTHREAD_CLUSTER_PARALLELISM +extern int __bsdthread_ctl(uintptr_t cmd, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3); +#include +// the sysctl wants thread_qos_t not dispatch_qos_t +DISPATCH_ALWAYS_INLINE +static inline uint8_t +_dispatch_qos2threadqos(dispatch_qos_t q) +{ + switch (q) { + case DISPATCH_QOS_USER_INTERACTIVE: return THREAD_QOS_USER_INTERACTIVE; + case DISPATCH_QOS_USER_INITIATED: return THREAD_QOS_USER_INITIATED; + case DISPATCH_QOS_DEFAULT: return THREAD_QOS_LEGACY; + case DISPATCH_QOS_UTILITY: return THREAD_QOS_UTILITY; + case DISPATCH_QOS_BACKGROUND: return THREAD_QOS_BACKGROUND; + case DISPATCH_QOS_MAINTENANCE: return THREAD_QOS_MAINTENANCE; + default: return THREAD_QOS_UNSPECIFIED; + } +} +#endif + +DISPATCH_ALWAYS_INLINE +static inline uint32_t +_dispatch_cluster_max_parallelism(dispatch_qos_t qos) +{ + uint32_t cluster_count = 0; + +#if DISPATCH_USE_PTHREAD_CLUSTER_PARALLELISM + int r = pthread_qos_max_parallelism(_dispatch_qos_to_qos_class(qos), PTHREAD_MAX_PARALLELISM_CLUSTER); + if (likely(r > 0)) { + cluster_count = (uint32_t) r; + } +#else + (void)qos; +#endif + return cluster_count; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_attr_apply_cluster_set(size_t worker_index, size_t cluster_concurrency) +{ +#if DISPATCH_USE_PTHREAD_CLUSTER_PARALLELISM + int rc = 0; + rc = __bsdthread_ctl(BSDTHREAD_CTL_DISPATCH_APPLY_ATTR, _PTHREAD_DISPATCH_APPLY_ATTR_CLUSTER_SHARED_RSRC_SET, worker_index, cluster_concurrency); + if (rc != 0) { + if (errno != ENOTSUP) { + /* ENOTSUP = Trying to get on a cluster it is not recommended for. + * + * Other error means something very bad has happened! On things + * like the Simulator we shouldn't even be in here. + * DISPATCH_INTERNAL_CRASH isn't available here + */ + __builtin_trap(); + } + } +#else + (void)worker_index; + (void)cluster_concurrency; +#endif + return; +} + +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_attr_apply_cluster_clear(size_t worker_index, size_t cluster_concurrency) +{ +#if DISPATCH_USE_PTHREAD_CLUSTER_PARALLELISM + int rc = 0; + rc = __bsdthread_ctl(BSDTHREAD_CTL_DISPATCH_APPLY_ATTR, _PTHREAD_DISPATCH_APPLY_ATTR_CLUSTER_SHARED_RSRC_CLEAR, worker_index, cluster_concurrency); + if (rc != 0) { + __builtin_trap(); + } +#else + (void)worker_index; + (void)cluster_concurrency; +#endif + return; +} + #if !HAVE_NORETURN_BUILTIN_TRAP /* * XXXRW: Work-around for possible clang bug in which __builtin_trap() is not diff --git a/src/shims/hw_config.h b/src/shims/hw_config.h index 89b7f8f61..788064964 100644 --- a/src/shims/hw_config.h +++ b/src/shims/hw_config.h @@ -124,8 +124,7 @@ _dispatch_hw_get_config(_dispatch_hw_config_t c) PSYSTEM_LOGICAL_PROCESSOR_INFORMATION slpiInfo = NULL; PSYSTEM_LOGICAL_PROCESSOR_INFORMATION slpiCurrent = NULL; DWORD dwProcessorLogicalCount = 0; - DWORD dwProcessorPackageCount = 0; - DWORD dwProcessorCoreCount = 0; + DWORD dwProcessorPhysicalCount = 0; DWORD dwSize = 0; while (true) { @@ -154,13 +153,17 @@ _dispatch_hw_get_config(_dispatch_hw_config_t c) slpiCurrent++, dwSize -= sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION)) { switch (slpiCurrent->Relationship) { case RelationProcessorCore: - ++dwProcessorCoreCount; + ++dwProcessorPhysicalCount; dwProcessorLogicalCount += __popcnt64(slpiCurrent->ProcessorMask); break; +#if defined(RelationProcessorDie) + case RelationProcessorDie: +#endif case RelationProcessorPackage: - ++dwProcessorPackageCount; - break; case RelationNumaNode: +#if defined(RelationNumaNodeEx) + case RelationNumaNodeEx: +#endif case RelationCache: case RelationGroup: case RelationAll: @@ -172,11 +175,10 @@ _dispatch_hw_get_config(_dispatch_hw_config_t c) switch (c) { case _dispatch_hw_config_logical_cpus: + case _dispatch_hw_config_active_cpus: return dwProcessorLogicalCount; case _dispatch_hw_config_physical_cpus: - return dwProcessorPackageCount; - case _dispatch_hw_config_active_cpus: - return dwProcessorCoreCount; + return dwProcessorPhysicalCount; } #else const char *name = NULL; @@ -191,12 +193,16 @@ _dispatch_hw_get_config(_dispatch_hw_config_t c) name = "hw.activecpu"; break; } #elif defined(__FreeBSD__) - (void)c; name = "kern.smp.cpus"; + (void)c; name = "kern.smp.cpus"; +#elif defined(__OpenBSD__) + (void)c; #endif if (name) { size_t valsz = sizeof(val); +#if !defined(__OpenBSD__) r = sysctlbyname(name, &val, &valsz, NULL, 0); (void)dispatch_assume_zero(r); +#endif dispatch_assert(valsz == sizeof(uint32_t)); } else { #if HAVE_SYSCONF && defined(_SC_NPROCESSORS_ONLN) diff --git a/src/shims/lock.c b/src/shims/lock.c index e96408981..4a750b3bd 100644 --- a/src/shims/lock.c +++ b/src/shims/lock.c @@ -59,7 +59,7 @@ _dispatch_thread_switch(dispatch_lock value, dispatch_lock_options_t flags, #pragma mark - semaphores #if USE_MACH_SEM -#if __has_include() +#if __has_include() && !TARGET_OS_SIMULATOR #include #define DISPATCH_USE_OS_SEMAPHORE_CACHE 1 #else @@ -109,7 +109,7 @@ _dispatch_sema4_create_slow(_dispatch_sema4_t *s4, int policy) } void -_dispatch_sema4_dispose_slow(_dispatch_sema4_t *sema, int policy) +_dispatch_sema4_dispose_slow(_dispatch_sema4_t *sema, int __unused policy) { semaphore_t sema_port = *sema; *sema = MACH_PORT_DEAD; @@ -338,7 +338,7 @@ _dlock_wait(uint32_t *uaddr, uint32_t val, uint32_t timeout, uint32_t flags) if (timeout == 0) { continue; } - /* FALLTHROUGH */ + DISPATCH_FALLTHROUGH; case ETIMEDOUT: case EFAULT: return -rc; @@ -427,7 +427,7 @@ _futex_blocking_op(uint32_t *uaddr, int futex_op, uint32_t val, if (timeout == 0) { continue; } - /* FALLTHROUGH */ + DISPATCH_FALLTHROUGH; case ETIMEDOUT: case EFAULT: case EWOULDBLOCK: diff --git a/src/shims/lock.h b/src/shims/lock.h index a05dd1152..9c602724c 100644 --- a/src/shims/lock.h +++ b/src/shims/lock.h @@ -564,7 +564,7 @@ typedef struct dispatch_once_gate_s { * effect of the dispatch once initialization. * * Per Lemma 2, when the DONE transition happens in a thread zone { N+3, N+4 }, - * then threads can observe this transiton in their { N+2, N+3 } zone at the + * then threads can observe this transition in their { N+2, N+3 } zone at the * earliest. * * Hence for an initializer bracket of { N, N+1 }, the first safe bracket for diff --git a/src/shims/priority.h b/src/shims/priority.h index 3a79c5efb..aa0008ce2 100644 --- a/src/shims/priority.h +++ b/src/shims/priority.h @@ -45,6 +45,13 @@ #ifndef _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG #define _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG 0x01000000 #endif +#ifndef _PTHREAD_PRIORITY_COOPERATIVE_FLAG +#define _PTHREAD_PRIORITY_COOPERATIVE_FLAG 0x08000000 +#endif +#ifndef _PTHREAD_PRIORITY_THREAD_TYPE_MASK +#define _PTHREAD_PRIORITY_THREAD_TYPE_MASK 0x88000000 +#endif + #else // HAVE_PTHREAD_QOS_H OS_ENUM(qos_class, unsigned int, QOS_CLASS_USER_INTERACTIVE = 0x21, @@ -64,9 +71,12 @@ typedef unsigned long pthread_priority_t; #define _PTHREAD_PRIORITY_OVERCOMMIT_FLAG 0x80000000 #define _PTHREAD_PRIORITY_SCHED_PRI_FLAG 0x20000000 #define _PTHREAD_PRIORITY_FALLBACK_FLAG 0x04000000 +#define _PTHREAD_PRIORITY_COOPERATIVE_FLAG 0x08000000 #define _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG 0x02000000 #define _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG 0x01000000 #define _PTHREAD_PRIORITY_ENFORCE_FLAG 0x10000000 +#define _PTHREAD_PRIORITY_THREAD_TYPE_MASK \ + (_PTHREAD_PRIORITY_OVERCOMMIT_FLAG | _PTHREAD_PRIORITY_COOPERATIVE_FLAG) #endif // HAVE_PTHREAD_QOS_H @@ -108,9 +118,12 @@ typedef uint32_t dispatch_priority_t; #define DISPATCH_PRIORITY_FLAG_OVERCOMMIT ((dispatch_priority_t)0x80000000) // _PTHREAD_PRIORITY_OVERCOMMIT_FLAG #define DISPATCH_PRIORITY_FLAG_FALLBACK ((dispatch_priority_t)0x04000000) // _PTHREAD_PRIORITY_FALLBACK_FLAG #define DISPATCH_PRIORITY_FLAG_MANAGER ((dispatch_priority_t)0x02000000) // _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG +#define DISPATCH_PRIORITY_FLAG_COOPERATIVE ((dispatch_priority_t)0x08000000) // _PTHREAD_PRIORITY_COOPERATIVE_FLAG #define DISPATCH_PRIORITY_PTHREAD_PRIORITY_FLAGS_MASK \ (DISPATCH_PRIORITY_FLAG_OVERCOMMIT | DISPATCH_PRIORITY_FLAG_FALLBACK | \ - DISPATCH_PRIORITY_FLAG_MANAGER) + DISPATCH_PRIORITY_FLAG_MANAGER | DISPATCH_PRIORITY_FLAG_COOPERATIVE) +#define DISPATCH_PRIORITY_THREAD_TYPE_MASK \ + (DISPATCH_PRIORITY_FLAG_OVERCOMMIT | DISPATCH_PRIORITY_FLAG_COOPERATIVE) // not passed to pthread #define DISPATCH_PRIORITY_FLAG_FLOOR ((dispatch_priority_t)0x40000000) // _PTHREAD_PRIORITY_INHERIT_FLAG diff --git a/src/shims/tsd.h b/src/shims/tsd.h index 2207d4cd9..cf568d90f 100644 --- a/src/shims/tsd.h +++ b/src/shims/tsd.h @@ -91,8 +91,13 @@ static const unsigned long dispatch_introspection_key = __PTK_LIBDISPATCH_KEY6; static const unsigned long dispatch_bcounter_key = __PTK_LIBDISPATCH_KEY6; #endif static const unsigned long dispatch_wlh_key = __PTK_LIBDISPATCH_KEY7; -static const unsigned long dispatch_voucher_key = __PTK_LIBDISPATCH_KEY8; +static const unsigned long dispatch_voucher_key = OS_VOUCHER_TSD_KEY; static const unsigned long dispatch_deferred_items_key = __PTK_LIBDISPATCH_KEY9; +static const unsigned long dispatch_quantum_key = __PTK_LIBDISPATCH_KEY10; +static const unsigned long dispatch_dsc_key = __PTK_LIBDISPATCH_KEY11; +static const unsigned long dispatch_enqueue_key = __PTK_LIBDISPATCH_KEY12; + +static const unsigned long os_workgroup_key = __PTK_LIBDISPATCH_WORKGROUP_KEY0; DISPATCH_TSD_INLINE static inline void @@ -150,6 +155,11 @@ struct dispatch_tsd { void *dispatch_wlh_key; void *dispatch_voucher_key; void *dispatch_deferred_items_key; + void *dispatch_quantum_key; + void *dispatch_dsc_key; + void *dispatch_enqueue_key; + + void *os_workgroup_key; }; extern _Thread_local struct dispatch_tsd __dispatch_tsd; @@ -206,6 +216,11 @@ extern pthread_key_t dispatch_bcounter_key; extern pthread_key_t dispatch_wlh_key; extern pthread_key_t dispatch_voucher_key; extern pthread_key_t dispatch_deferred_items_key; +extern pthread_key_t dispatch_quantum_key; +extern pthread_key_t dispatch_dsc_key; +extern pthread_key_t dispatch_enqueue_key; + +extern pthread_key_t os_workgroup_key; DISPATCH_TSD_INLINE static inline void @@ -350,7 +365,11 @@ DISPATCH_TSD_INLINE DISPATCH_CONST static inline unsigned int _dispatch_cpu_number(void) { -#if __has_include() +#if TARGET_OS_SIMULATOR + size_t n; + pthread_cpu_number_np(&n); + return (unsigned int)n; +#elif __has_include() return _os_cpu_number(); #elif defined(__x86_64__) || defined(__i386__) struct { uintptr_t p1, p2; } p; diff --git a/src/shims/yield.c b/src/shims/yield.c index 43f0017ee..cd7e1acf8 100644 --- a/src/shims/yield.c +++ b/src/shims/yield.c @@ -22,18 +22,19 @@ DISPATCH_NOINLINE static void * -__DISPATCH_WAIT_FOR_ENQUEUER__(void **ptr) +__DISPATCH_WAIT_FOR_ENQUEUER__(void **ptr, void **tailp) { - int spins = 0; + unsigned int spins = 0; void *value; while ((value = os_atomic_load(ptr, relaxed)) == NULL) { - _dispatch_preemption_yield(++spins); + /* ptr == &prev->do_next */ + _dispatch_yield_to_enqueuer(tailp, ++spins); } return value; } void * -_dispatch_wait_for_enqueuer(void **ptr) +_dispatch_wait_for_enqueuer(void **ptr, void **tailp) { #if !DISPATCH_HW_CONFIG_UP #if defined(__arm__) || defined(__arm64__) @@ -57,5 +58,5 @@ _dispatch_wait_for_enqueuer(void **ptr) } #endif #endif // DISPATCH_HW_CONFIG_UP - return __DISPATCH_WAIT_FOR_ENQUEUER__(ptr); + return __DISPATCH_WAIT_FOR_ENQUEUER__(ptr, tailp); } diff --git a/src/shims/yield.h b/src/shims/yield.h index 53eb80065..aeb429d44 100644 --- a/src/shims/yield.h +++ b/src/shims/yield.h @@ -80,7 +80,7 @@ #endif DISPATCH_NOT_TAIL_CALLED DISPATCH_EXPORT -void *_dispatch_wait_for_enqueuer(void **ptr); +void *_dispatch_wait_for_enqueuer(void **ptr, void **tailp); #pragma mark - #pragma mark _dispatch_contention_wait_until @@ -140,12 +140,22 @@ void *_dispatch_wait_for_enqueuer(void **ptr); #pragma mark - #pragma mark _dispatch_preemption_yield +/* Don't allow directed yield to enqueuer if !_pthread_has_direct_tsd() */ +#ifndef DISPATCH_HAVE_YIELD_TO_ENQUEUER +#if PTHREAD_HAVE_YIELD_TO_ENQUEUER && !TARGET_OS_SIMULATOR +#define DISPATCH_HAVE_YIELD_TO_ENQUEUER 1 +#else +#define DISPATCH_HAVE_YIELD_TO_ENQUEUER 0 +#endif +#endif /* DISPATCH_HAVE_YIELD_TO_ENQUEUER */ + #if HAVE_MACH #if defined(SWITCH_OPTION_OSLOCK_DEPRESS) #define DISPATCH_YIELD_THREAD_SWITCH_OPTION SWITCH_OPTION_OSLOCK_DEPRESS #else #define DISPATCH_YIELD_THREAD_SWITCH_OPTION SWITCH_OPTION_DEPRESS #endif + #define _dispatch_preemption_yield(n) thread_switch(MACH_PORT_NULL, \ DISPATCH_YIELD_THREAD_SWITCH_OPTION, (mach_msg_timeout_t)(n)) #define _dispatch_preemption_yield_to(th, n) thread_switch(th, \ @@ -161,6 +171,20 @@ void *_dispatch_wait_for_enqueuer(void **ptr); #define _dispatch_preemption_yield_to(th, n) { (void)n; sched_yield(); } #endif // HAVE_MACH +#if DISPATCH_HAVE_YIELD_TO_ENQUEUER +#define _dispatch_set_enqueuer_for(ptr) \ + _dispatch_thread_setspecific(dispatch_enqueue_key, (void *) (ptr)); +#define _dispatch_clear_enqueuer() \ + _dispatch_thread_setspecific(dispatch_enqueue_key, NULL); +#define _dispatch_yield_to_enqueuer(q, n) \ + (void) _pthread_yield_to_enqueuer_4dispatch(dispatch_enqueue_key, q, n) +#else +#define _dispatch_set_enqueuer_for(ptr) +#define _dispatch_clear_enqueuer(ptr) +#define _dispatch_yield_to_enqueuer(q, n) \ + ((void) (q), _dispatch_preemption_yield(n)) +#endif /* DISPATCH_HAVE_YIELD_TO_ENQUEUER */ + #pragma mark - #pragma mark _dispatch_contention_usleep diff --git a/src/source.c b/src/source.c index b4005dcf6..9af2a4a8b 100644 --- a/src/source.c +++ b/src/source.c @@ -60,6 +60,9 @@ dispatch_source_create(dispatch_source_type_t dst, uintptr_t handle, if (unlikely(!dq)) { dq = _dispatch_get_default_queue(true); } else { + if (_dispatch_queue_is_cooperative(dq)) { + DISPATCH_CLIENT_CRASH(dq, "Cannot target object to cooperative root queue - not implemented"); + } _dispatch_retain((dispatch_queue_t _Nonnull)dq); } ds->do_targetq = dq; @@ -441,7 +444,10 @@ _dispatch_source_registration_callout(dispatch_source_t ds, dispatch_queue_t cq, dc = _dispatch_source_handler_take(ds->ds_refs, DS_REGISTN_HANDLER); if (ds->dq_atomic_flags & (DSF_CANCELED | DQF_RELEASED)) { // no registration callout if source is canceled rdar://problem/8955246 - return _dispatch_source_handler_dispose(dc); + dispatch_invoke_with_autoreleasepool(flags, { + _dispatch_source_handler_dispose(dc); + }); + return; } if (dc->dc_flags & DC_FLAG_FETCH_CONTEXT) { dc->dc_ctxt = ds->do_ctxt; @@ -458,22 +464,33 @@ _dispatch_source_cancel_callout(dispatch_source_t ds, dispatch_queue_t cq, dispatch_source_refs_t dr = ds->ds_refs; dispatch_continuation_t dc; - dc = _dispatch_source_handler_take(dr, DS_CANCEL_HANDLER); - dr->ds_pending_data = 0; - dr->ds_data = 0; - _dispatch_source_handler_free(dr, DS_EVENT_HANDLER); - _dispatch_source_handler_free(dr, DS_REGISTN_HANDLER); - if (!dc) { - return; - } - if (!(ds->dq_atomic_flags & DSF_CANCELED)) { - return _dispatch_source_handler_dispose(dc); - } - if (dc->dc_flags & DC_FLAG_FETCH_CONTEXT) { - dc->dc_ctxt = ds->do_ctxt; - } - _dispatch_trace_source_callout_entry(ds, DS_CANCEL_HANDLER, cq, dc); - _dispatch_continuation_pop(dc, NULL, flags, cq); + dispatch_invoke_with_autoreleasepool(flags, { + dc = _dispatch_source_handler_take(dr, DS_CANCEL_HANDLER); + dr->ds_pending_data = 0; + dr->ds_data = 0; + _dispatch_source_handler_free(dr, DS_EVENT_HANDLER); + _dispatch_source_handler_free(dr, DS_REGISTN_HANDLER); + if (!dc) { + /* nothing to do here */ + } else if (!(ds->dq_atomic_flags & DSF_CANCELED)) { + _dispatch_source_handler_dispose(dc); + } else { + if (dc->dc_flags & DC_FLAG_FETCH_CONTEXT) { + dc->dc_ctxt = ds->do_ctxt; + } + _dispatch_trace_source_callout_entry(ds, DS_CANCEL_HANDLER, cq, dc); + + // + // Make sure _dispatch_continuation_pop() will not + // add its own autoreleasepool since we have one, + // and there's magic in objc that makes _one_ + // autoreleasepool cheap. + // + flags &= ~DISPATCH_INVOKE_AUTORELEASE_ALWAYS; + _dispatch_continuation_pop(dc, NULL, flags, cq); + } + + }); } DISPATCH_ALWAYS_INLINE @@ -580,7 +597,9 @@ _dispatch_source_latch_and_call(dispatch_source_t ds, dispatch_queue_t cq, } if (dr->du_timer_flags & DISPATCH_TIMER_AFTER) { _dispatch_trace_item_complete(dc); // see _dispatch_after - _dispatch_source_handler_free(dr, DS_EVENT_HANDLER); + dispatch_invoke_with_autoreleasepool(flags, { + _dispatch_source_handler_free(dr, DS_EVENT_HANDLER); + }); dispatch_release(ds); // dispatch_after sources are one-shot } } @@ -730,7 +749,7 @@ _dispatch_source_invoke2(dispatch_source_t ds, dispatch_invoke_context_t dic, // Intentionally always drain even when on the manager queue // and not the source's regular target queue: we need to be able // to drain timer setting and the like there. - dispatch_with_disabled_narrowing(dic, { + dispatch_with_disabled_narrowing(dic, flags, { retq = _dispatch_lane_serial_drain(ds, dic, flags, owned); }); } diff --git a/src/swift/CMakeLists.txt b/src/swift/CMakeLists.txt new file mode 100644 index 000000000..53924723e --- /dev/null +++ b/src/swift/CMakeLists.txt @@ -0,0 +1,65 @@ + +# NOTE(compnerd) Today regardless of whether or not ObjC interop is enabled, +# swift will use an autoreleased return value convention for certain CF +# functions (including some that are used/related to dispatch). This means that +# the swift compiler in callers to such functions will call the function, and +# then pass the result of the function to objc_retainAutoreleasedReturnValue. In +# a context where we have ObjC interop disabled, we do not have access to the +# objc runtime so an implementation of objc_retainAutoreleasedReturnValue is not +# available. To work around this, we provide a shim for +# objc_retainAutoreleasedReturnValue in DispatchStubs.cc that just calls retain +# on the object. Once we fix the swift compiler to switch to a different model +# for handling these arguments with objc-interop disabled these shims can be +# eliminated. +add_library(DispatchStubs STATIC + DispatchStubs.cc) +target_include_directories(DispatchStubs PRIVATE + ${PROJECT_SOURCE_DIR}) +set_target_properties(DispatchStubs PROPERTIES + POSITION_INDEPENDENT_CODE YES) + +add_library(swiftDispatch + Block.swift + Data.swift + Dispatch.swift + IO.swift + Private.swift + Queue.swift + Source.swift + Time.swift + Wrapper.swift) +target_compile_options(swiftDispatch PRIVATE + "SHELL:-Xcc -fblocks" + "SHELL:-Xcc -fmodule-map-file=${PROJECT_SOURCE_DIR}/dispatch/module.modulemap" + "SHELL:-Xcc -I${PROJECT_SOURCE_DIR}") +set_target_properties(swiftDispatch PROPERTIES + Swift_MODULE_NAME Dispatch + Swift_MODULE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/swift + INTERFACE_INCLUDE_DIRECTORIES ${CMAKE_CURRENT_BINARY_DIR}/swift) +target_link_libraries(swiftDispatch PRIVATE + DispatchStubs + BlocksRuntime::BlocksRuntime) +target_link_libraries(swiftDispatch PUBLIC + dispatch) +add_dependencies(swiftDispatch module-maps) + +get_swift_host_arch(swift_arch) +install(FILES + ${CMAKE_CURRENT_BINARY_DIR}/swift/Dispatch.swiftmodule + ${CMAKE_CURRENT_BINARY_DIR}/swift/Dispatch.swiftdoc + DESTINATION ${INSTALL_TARGET_DIR}/${swift_arch}) +set_property(GLOBAL APPEND PROPERTY DISPATCH_EXPORTS swiftDispatch) +install(TARGETS swiftDispatch + EXPORT dispatchExports + ARCHIVE DESTINATION ${INSTALL_TARGET_DIR} + LIBRARY DESTINATION ${INSTALL_TARGET_DIR} + RUNTIME DESTINATION bin) +if(NOT BUILD_SHARED_LIBS) + set_property(GLOBAL APPEND PROPERTY DISPATCH_EXPORTS DispatchStubs) + install(TARGETS DispatchStubs + EXPORT dispatchExports + DESTINATION ${INSTALL_TARGET_DIR}) +elseif(NOT CMAKE_SYSTEM_NAME MATCHES "Darwin|Windows") + target_link_options(swiftDispatch PRIVATE "SHELL:-no-toolchain-stdlib-rpath") + set_target_properties(swiftDispatch PROPERTIES INSTALL_RPATH "$ORIGIN") +endif() diff --git a/src/swift/IO.swift b/src/swift/IO.swift index ad985c944..3c0e22484 100644 --- a/src/swift/IO.swift +++ b/src/swift/IO.swift @@ -67,7 +67,7 @@ extension DispatchIO { public convenience init( type: StreamType, - fileDescriptor: Int32, + fileDescriptor: dispatch_fd_t, queue: DispatchQueue, cleanupHandler: @escaping (_ error: Int32) -> Void) { diff --git a/src/swift/Private.swift b/src/swift/Private.swift index 89b1bb2f4..1683e2e51 100644 --- a/src/swift/Private.swift +++ b/src/swift/Private.swift @@ -27,7 +27,7 @@ public func dispatch_queue_create_with_target(_ label: UnsafePointer?, _ a } @available(*, unavailable, renamed:"DispatchIO.init(type:fileDescriptor:queue:cleanupHandler:)") -public func dispatch_io_create(_ type: UInt, _ fd: Int32, _ queue: DispatchQueue, _ cleanup_handler: @escaping (Int32) -> Void) -> DispatchIO +public func dispatch_io_create(_ type: UInt, _ fd: dispatch_fd_t, _ queue: DispatchQueue, _ cleanup_handler: @escaping (Int32) -> Void) -> DispatchIO { fatalError() } diff --git a/src/swift/Wrapper.swift b/src/swift/Wrapper.swift index 678631b03..1bf26b184 100644 --- a/src/swift/Wrapper.swift +++ b/src/swift/Wrapper.swift @@ -91,7 +91,7 @@ public class DispatchIO : DispatchObject { return unsafeBitCast(__wrapped, to: dispatch_object_t.self) } - internal init(__type: UInt, fd: Int32, queue: DispatchQueue, + internal init(__type: UInt, fd: dispatch_fd_t, queue: DispatchQueue, handler: @escaping (_ error: Int32) -> Void) { __wrapped = dispatch_io_create(dispatch_io_type_t(__type), dispatch_fd_t(fd), queue.__wrapped, handler) } diff --git a/src/time.c b/src/time.c index 7c8f81277..30ed53b26 100644 --- a/src/time.c +++ b/src/time.c @@ -142,23 +142,23 @@ dispatch_time_to_nsecs(dispatch_time_t time, dispatch_clockid_t *clock_out, uint64_t *nsecs_out) { dispatch_clock_t clock; - uint64_t nsecs; + uint64_t value; - _dispatch_time_to_clock_and_value(time, true, &clock, &nsecs); + if (time != DISPATCH_TIME_FOREVER) { + _dispatch_time_to_clock_and_value(time, true, &clock, &value); - if (nsecs != DISPATCH_TIME_FOREVER) { switch (clock) { case DISPATCH_CLOCK_WALL: *clock_out = DISPATCH_CLOCKID_WALLTIME; - *nsecs_out = nsecs; + *nsecs_out = value; return true; case DISPATCH_CLOCK_UPTIME: *clock_out = DISPATCH_CLOCKID_UPTIME; - *nsecs_out = nsecs; + *nsecs_out = _dispatch_time_mach2nano(value); return true; case DISPATCH_CLOCK_MONOTONIC: *clock_out = DISPATCH_CLOCKID_MONOTONIC; - *nsecs_out = nsecs; + *nsecs_out = _dispatch_time_mach2nano(value); return true; } } diff --git a/src/trace.h b/src/trace.h index ed69e1b56..e4303dfd3 100644 --- a/src/trace.h +++ b/src/trace.h @@ -71,8 +71,19 @@ _dispatch_trace_client_callout2(void *ctxt, size_t i, void (*f)(void *, size_t)) _dispatch_introspection_callout_return(ctxt, func); } +DISPATCH_ALWAYS_INLINE +static inline void +_dispatch_trace_client_callout3_a(void *ctxt, size_t i, size_t w, void (*f)(void *, size_t, size_t)) +{ + dispatch_function_t func = (dispatch_function_t)f; + _dispatch_introspection_callout_entry(ctxt, func); + _dispatch_trace_callout(ctxt, func, _dispatch_client_callout3_a(ctxt, i, w, f)); + _dispatch_introspection_callout_return(ctxt, func); +} + #define _dispatch_client_callout _dispatch_trace_client_callout #define _dispatch_client_callout2 _dispatch_trace_client_callout2 +#define _dispatch_client_callout3_a _dispatch_trace_client_callout3_a #endif // DISPATCH_USE_DTRACE_INTROSPECTION || DISPATCH_INTROSPECTION #ifdef _COMM_PAGE_KDEBUG_ENABLE diff --git a/src/transform.c b/src/transform.c index 39147fa7a..6e65567ad 100644 --- a/src/transform.c +++ b/src/transform.c @@ -26,7 +26,7 @@ #include #define OSLittleEndian __LITTLE_ENDIAN #define OSBigEndian __BIG_ENDIAN -#elif defined(__FreeBSD__) +#elif defined(__FreeBSD__) || defined(__OpenBSD__) #include #define OSLittleEndian _LITTLE_ENDIAN #define OSBigEndian _BIG_ENDIAN @@ -35,7 +35,7 @@ #define OSBigEndian 4321 #endif -#if defined(__linux__) || defined(__FreeBSD__) +#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) #define OSSwapLittleToHostInt16 le16toh #define OSSwapBigToHostInt16 be16toh #define OSSwapHostToLittleInt16 htole16 @@ -781,11 +781,14 @@ _dispatch_transform_to_base32_with_table(dispatch_data_t data, const unsigned ch case 1: *ptr++ = '='; // c *ptr++ = '='; // d + DISPATCH_FALLTHROUGH; case 2: *ptr++ = '='; // e + DISPATCH_FALLTHROUGH; case 3: *ptr++ = '='; // f *ptr++ = '='; // g + DISPATCH_FALLTHROUGH; case 4: *ptr++ = '='; // h break; diff --git a/src/voucher.c b/src/voucher.c index 46e411c68..61f1643df 100644 --- a/src/voucher.c +++ b/src/voucher.c @@ -233,6 +233,7 @@ _voucher_insert(voucher_t v) { mach_voucher_t kv = v->v_ipc_kvoucher; if (!kv) return; + _voucher_hash_lock_lock(); if (unlikely(_voucher_hash_is_enqueued(v))) { _dispatch_voucher_debug("corruption", v); @@ -827,6 +828,31 @@ _voucher_activity_debug_channel_init(void) } } +static bool +_voucher_hash_is_empty() { + _voucher_hash_lock_lock(); + + bool empty = true; + for (unsigned int i = 0; i < VL_HASH_SIZE; i++) { + voucher_hash_head_s *head = &_voucher_hash[i]; + if (_voucher_hash_get_next(head->vhh_first) != VOUCHER_NULL) { + empty = false; + break; + } + } + _voucher_hash_lock_unlock(); + + return empty; +} + +void +_voucher_atfork_parent(void) +{ + if (!_voucher_hash_is_empty()){ + _dispatch_fork_becomes_unsafe(); + } +} + void _voucher_atfork_child(void) { @@ -841,6 +867,39 @@ _voucher_atfork_child(void) _firehose_task_buffer = NULL; // firehose buffer is VM_INHERIT_NONE } +static void +_voucher_process_can_use_arbitrary_personas_init(void *__unused ctxt) +{ +#if VOUCHER_USE_PERSONA_ADOPT_ANY + mach_voucher_t kv = _voucher_get_task_mach_voucher(); + kern_return_t kr; + + mach_voucher_attr_content_t result_out; + mach_msg_type_number_t result_out_size; + + boolean_t local_result; + result_out = (mach_voucher_attr_content_t) &local_result; + result_out_size = sizeof(local_result); + + kr = mach_voucher_attr_command(kv, MACH_VOUCHER_ATTR_KEY_BANK, + BANK_PERSONA_ADOPT_ANY, NULL, 0, result_out, &result_out_size); + if (kr != KERN_SUCCESS) { + DISPATCH_INTERNAL_CRASH(kr, "mach_voucher_attr_command(BANK_PERSONA_ADOPT_ANY) failed"); + } + + _voucher_process_can_use_arbitrary_personas = !!local_result; +#endif /* VOUCHER_USE_PERSONA_ADOPT_ANY */ +} + +bool +voucher_process_can_use_arbitrary_personas(void) +{ + dispatch_once_f(&_voucher_process_can_use_arbitrary_personas_pred, NULL, + _voucher_process_can_use_arbitrary_personas_init); + + return _voucher_process_can_use_arbitrary_personas; +} + voucher_t voucher_copy_with_persona_mach_voucher(mach_voucher_t persona_mach_voucher) { @@ -1928,6 +1987,12 @@ voucher_get_current_persona_proximate_info(struct proc_persona_info *persona_inf } #endif // __has_include() +bool +voucher_process_can_use_arbitrary_personas(void) +{ + return false; +} + void _voucher_activity_debug_channel_init(void) { diff --git a/src/voucher_internal.h b/src/voucher_internal.h index ea84ff847..c50c36ca4 100644 --- a/src/voucher_internal.h +++ b/src/voucher_internal.h @@ -89,6 +89,7 @@ voucher_get_mach_voucher(voucher_t voucher); void _voucher_init(void); void _voucher_atfork_child(void); +void _voucher_atfork_parent(void); void _voucher_activity_debug_channel_init(void); #if OS_VOUCHER_ACTIVITY_SPI && OS_VOUCHER_ACTIVITY_GENERATE_SWAPS void _voucher_activity_swap(firehose_activity_id_t old_id, @@ -450,6 +451,10 @@ _voucher_get_activity_id(voucher_t v, uint64_t *creator_pid) void _voucher_task_mach_voucher_init(void* ctxt); extern dispatch_once_t _voucher_task_mach_voucher_pred; extern mach_voucher_t _voucher_task_mach_voucher; + +extern dispatch_once_t _voucher_process_can_use_arbitrary_personas_pred; +extern bool _voucher_process_can_use_arbitrary_personas; + #if VOUCHER_USE_EMPTY_MACH_BASE_VOUCHER #define _voucher_default_task_mach_voucher MACH_VOUCHER_NULL #else diff --git a/src/workgroup.c b/src/workgroup.c index 9a1b98883..ae47870f5 100644 --- a/src/workgroup.c +++ b/src/workgroup.c @@ -1,29 +1,53 @@ +/* + * Copyright (c) 2019-2021 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#define PTHREAD_WORKGROUP_SPI 1 + #include "internal.h" #include #include +#include /* Declares struct symbols */ + OS_OBJECT_CLASS_DECL(os_workgroup); #if !USE_OBJC OS_OBJECT_VTABLE_INSTANCE(os_workgroup, - (void (*)(_os_object_t))_os_workgroup_xref_dispose, + (void (*)(_os_object_t))_os_workgroup_explicit_xref_dispose, (void (*)(_os_object_t))_os_workgroup_dispose); -#endif // USE_OBJC +#endif // !USE_OBJC #define WORKGROUP_CLASS OS_OBJECT_VTABLE(os_workgroup) OS_OBJECT_CLASS_DECL(os_workgroup_interval); #if !USE_OBJC OS_OBJECT_VTABLE_INSTANCE(os_workgroup_interval, - (void (*)(_os_object_t))_os_workgroup_interval_xref_dispose, - (void (*)(_os_object_t))_os_workgroup_interval_dispose); + (void (*)(_os_object_t))_os_workgroup_interval_explicit_xref_dispose, + (void (*)(_os_object_t))_os_workgroup_interval_explicit_dispose); #endif // USE_OBJC #define WORKGROUP_INTERVAL_CLASS OS_OBJECT_VTABLE(os_workgroup_interval) OS_OBJECT_CLASS_DECL(os_workgroup_parallel); #if !USE_OBJC OS_OBJECT_VTABLE_INSTANCE(os_workgroup_parallel, - (void (*)(_os_object_t))_os_workgroup_xref_dispose, + (void (*)(_os_object_t))_os_workgroup_explicit_xref_dispose, (void (*)(_os_object_t))_os_workgroup_dispose); #endif // USE_OBJC #define WORKGROUP_PARALLEL_CLASS OS_OBJECT_VTABLE(os_workgroup_parallel) @@ -52,6 +76,12 @@ static const struct os_workgroup_attr_s _os_workgroup_attr_default = { .wg_attr_flags = 0, }; +static const struct os_workgroup_attr_s _os_workgroup_with_workload_id_attr_default = { + .sig = _OS_WORKGROUP_ATTR_RESOLVED_INIT, + .wg_type = OS_WORKGROUP_TYPE_DEFAULT, + .wg_attr_flags = OS_WORKGROUP_ATTR_NONPROPAGATING, +}; + static const struct os_workgroup_attr_s _os_workgroup_interval_attr_default = { .sig = _OS_WORKGROUP_ATTR_RESOLVED_INIT, .wg_type = OS_WORKGROUP_INTERVAL_TYPE_DEFAULT, @@ -88,6 +118,22 @@ _os_workgroup_interval_xref_dispose(os_workgroup_interval_t wgi) } } +#if !USE_OBJC +void +_os_workgroup_explicit_xref_dispose(os_workgroup_t wg) +{ + _os_workgroup_xref_dispose(wg); + _os_object_release_internal(wg->_as_os_obj); +} + +void +_os_workgroup_interval_explicit_xref_dispose(os_workgroup_interval_t wgi) +{ + _os_workgroup_interval_xref_dispose(wgi); + _os_workgroup_explicit_xref_dispose(wgi->_as_wg); +} +#endif + static inline bool _os_workgroup_is_configurable(uint64_t wg_state) { @@ -125,6 +171,15 @@ _os_workgroup_interval_dispose(os_workgroup_interval_t wgi) work_interval_instance_free(wgi->wii); } +#if !USE_OBJC +void +_os_workgroup_interval_explicit_dispose(os_workgroup_interval_t wgi) +{ + _os_workgroup_interval_dispose(wgi); + _os_workgroup_dispose(wgi->_as_wg); +} +#endif + #define os_workgroup_inc_refcount(wg) \ _os_object_retain_internal(wg->_as_os_obj); @@ -146,7 +201,7 @@ _os_workgroup_tsd_cleanup(void *ctxt) /* Destructor for the tsd key */ static os_workgroup_t _os_workgroup_get_current(void) { - return (os_workgroup_t) pthread_getspecific(_os_workgroup_key); + return (os_workgroup_t) _dispatch_thread_getspecific(os_workgroup_key); } static void @@ -157,7 +212,7 @@ _os_workgroup_set_current(os_workgroup_t new_wg) } os_workgroup_t old_wg = _os_workgroup_get_current(); - pthread_setspecific(_os_workgroup_key, new_wg); + _dispatch_thread_setspecific(os_workgroup_key, new_wg); if (old_wg != NULL) { os_workgroup_dec_refcount(old_wg); @@ -193,7 +248,7 @@ static inline bool _os_workgroup_type_is_interval_type(os_workgroup_type_t wg_type) { return (wg_type >= OS_WORKGROUP_INTERVAL_TYPE_DEFAULT) && - (wg_type <= OS_WORKGROUP_INTERVAL_TYPE_COREMEDIA); + (wg_type <= OS_WORKGROUP_INTERVAL_TYPE_ARKIT); } static bool @@ -222,11 +277,17 @@ _os_workgroup_has_backing_workinterval(os_workgroup_t wg) return wg->wi != NULL; } +static inline uint32_t +_wi_flags_to_wi_type(uint32_t wi_flags) +{ + return wi_flags & WORK_INTERVAL_TYPE_MASK; +} + #if !TARGET_OS_SIMULATOR static os_workgroup_type_t _wi_flags_to_wg_type(uint32_t wi_flags) { - uint32_t type = wi_flags & WORK_INTERVAL_TYPE_MASK; + uint32_t type = _wi_flags_to_wi_type(wi_flags); bool is_unrestricted = (wi_flags & WORK_INTERVAL_FLAG_UNRESTRICTED); switch (type) { @@ -252,6 +313,8 @@ _wi_flags_to_wg_type(uint32_t wi_flags) return OS_WORKGROUP_INTERVAL_TYPE_HID_DELIVERY; case WORK_INTERVAL_TYPE_COREMEDIA: return OS_WORKGROUP_INTERVAL_TYPE_COREMEDIA; + case WORK_INTERVAL_TYPE_ARKIT: + return OS_WORKGROUP_INTERVAL_TYPE_ARKIT; case WORK_INTERVAL_TYPE_CA_CLIENT: return OS_WORKGROUP_INTERVAL_TYPE_CA_CLIENT; default: @@ -264,48 +327,79 @@ _wi_flags_to_wg_type(uint32_t wi_flags) } #endif -static work_interval_t -_os_workgroup_create_work_interval(os_workgroup_attr_t attr) +static uint32_t +_wg_type_to_wi_flags(os_workgroup_type_t wg_type) { - /* All workgroups are joinable */ - uint32_t flags = WORK_INTERVAL_FLAG_JOINABLE; - - switch (attr->wg_type) { + switch (wg_type) { case OS_WORKGROUP_INTERVAL_TYPE_DEFAULT: - flags |= WORK_INTERVAL_TYPE_DEFAULT | WORK_INTERVAL_FLAG_UNRESTRICTED; - break; + return WORK_INTERVAL_TYPE_DEFAULT | WORK_INTERVAL_FLAG_UNRESTRICTED; case OS_WORKGROUP_INTERVAL_TYPE_COREAUDIO: - flags |= (WORK_INTERVAL_TYPE_COREAUDIO | + return (WORK_INTERVAL_TYPE_COREAUDIO | WORK_INTERVAL_FLAG_ENABLE_AUTO_JOIN | WORK_INTERVAL_FLAG_ENABLE_DEFERRED_FINISH); - break; case OS_WORKGROUP_INTERVAL_TYPE_COREANIMATION: - flags |= WORK_INTERVAL_TYPE_COREANIMATION; - break; + return WORK_INTERVAL_TYPE_COREANIMATION; case OS_WORKGROUP_INTERVAL_TYPE_CA_RENDER_SERVER: - flags |= WORK_INTERVAL_TYPE_CA_RENDER_SERVER; - break; + return WORK_INTERVAL_TYPE_CA_RENDER_SERVER; case OS_WORKGROUP_INTERVAL_TYPE_HID_DELIVERY: - flags |= WORK_INTERVAL_TYPE_HID_DELIVERY; - break; + return WORK_INTERVAL_TYPE_HID_DELIVERY; case OS_WORKGROUP_INTERVAL_TYPE_COREMEDIA: - flags |= WORK_INTERVAL_TYPE_COREMEDIA; - break; + return WORK_INTERVAL_TYPE_COREMEDIA; + case OS_WORKGROUP_INTERVAL_TYPE_ARKIT: + return (WORK_INTERVAL_TYPE_ARKIT | + WORK_INTERVAL_FLAG_FINISH_AT_DEADLINE); case OS_WORKGROUP_INTERVAL_TYPE_AUDIO_CLIENT: - flags |= (WORK_INTERVAL_TYPE_COREAUDIO | WORK_INTERVAL_FLAG_UNRESTRICTED | + return (WORK_INTERVAL_TYPE_COREAUDIO | WORK_INTERVAL_FLAG_UNRESTRICTED | WORK_INTERVAL_FLAG_ENABLE_AUTO_JOIN | WORK_INTERVAL_FLAG_ENABLE_DEFERRED_FINISH); - break; case OS_WORKGROUP_INTERVAL_TYPE_CA_CLIENT: - flags |= WORK_INTERVAL_TYPE_CA_CLIENT | WORK_INTERVAL_FLAG_UNRESTRICTED; - break; + return WORK_INTERVAL_TYPE_CA_CLIENT | WORK_INTERVAL_FLAG_UNRESTRICTED; case OS_WORKGROUP_TYPE_DEFAULT: /* Non-interval workgroup types */ - flags |= WORK_INTERVAL_FLAG_UNRESTRICTED; - break; + return WORK_INTERVAL_FLAG_UNRESTRICTED; default: os_crash("Creating an os_workgroup of unknown type"); } +} + +static inline uint32_t +_wg_type_to_wi_type(os_workgroup_type_t wg_type) +{ + return _wi_flags_to_wi_type(_wg_type_to_wi_flags(wg_type)); +} + +static inline int +_os_workgroup_get_wg_wi_types_from_port(mach_port_t port, + os_workgroup_type_t *out_wg_type, uint32_t *out_wi_type) +{ + os_workgroup_type_t wg_type = OS_WORKGROUP_TYPE_DEFAULT; + uint32_t wi_type = WORK_INTERVAL_TYPE_DEFAULT; + +#if !TARGET_OS_SIMULATOR + uint32_t wi_flags = 0; + int ret = work_interval_get_flags_from_port(port, &wi_flags); + if (ret != 0) { + return ret; + } + wg_type = _wi_flags_to_wg_type(wi_flags); + wi_type = _wi_flags_to_wi_type(wi_flags); +#else + (void)port; +#endif + + if (out_wg_type) *out_wg_type = wg_type; + if (out_wi_type) *out_wi_type = wi_type; + + return 0; +} + +static work_interval_t +_os_workgroup_create_work_interval(os_workgroup_attr_t attr) +{ + /* All workgroups are joinable */ + uint32_t flags = WORK_INTERVAL_FLAG_JOINABLE; + + flags |= _wg_type_to_wi_flags(attr->wg_type); if (_os_workgroup_attr_is_differentiated(attr)) { flags |= WORK_INTERVAL_FLAG_GROUP; @@ -314,13 +408,101 @@ _os_workgroup_create_work_interval(os_workgroup_attr_t attr) work_interval_t wi; int rv = work_interval_create(&wi, flags); if (rv) { - errno = rv; return NULL; } return wi; } +struct os_workgroup_workload_id_table_entry_s { + const char* wl_id; + os_workgroup_type_t wl_type; +}; + +#if !TARGET_OS_SIMULATOR +static const struct os_workgroup_workload_id_table_entry_s + _os_workgroup_workload_id_table[] = { + { + .wl_id = "com.apple.coreaudio.hal.iothread", + .wl_type = OS_WORKGROUP_INTERVAL_TYPE_COREAUDIO, + }, + { + .wl_id = "com.apple.coreaudio.hal.clientthread", + .wl_type = OS_WORKGROUP_INTERVAL_TYPE_AUDIO_CLIENT, + }, +}; +#endif // !TARGET_OS_SIMULATOR + +static os_workgroup_type_t +_os_workgroup_lookup_type_from_workload_id(const char *workload_id) +{ + os_workgroup_type_t workload_type = OS_WORKGROUP_TYPE_DEFAULT; + + if (!workload_id) { + DISPATCH_CLIENT_CRASH(0, "Workload identifier must not be NULL"); + } +#if !TARGET_OS_SIMULATOR + for (size_t i = 0; i < countof(_os_workgroup_workload_id_table); i++) { + if (!strcasecmp(workload_id, _os_workgroup_workload_id_table[i].wl_id)){ + workload_type = _os_workgroup_workload_id_table[i].wl_type; + if (_os_workgroup_type_is_default_type(workload_type)) { + DISPATCH_INTERNAL_CRASH(i, "Invalid workload ID type"); + } + break; + } + } +#if OS_WORKGROUP_LOG_UKNOWN_WORKLOAD_ID + if (_os_workgroup_type_is_default_type(workload_type)) { + _dispatch_log("WARNING: os_workgroup: Unknown workload ID \"%s\"", + workload_id); + } +#endif +#endif // !TARGET_OS_SIMULATOR + return workload_type; +} + +static inline os_workgroup_attr_t +_os_workgroup_workload_id_attr_resolve(const char *workload_id, + os_workgroup_attr_t attr, + const os_workgroup_attr_s *default_attr) +{ + /* N.B: expects to be called with the attr pointer returned by + * _os_workgroup_client_attr_resolve() (i.e. a mutable local copy) */ + os_workgroup_type_t wl_type = + _os_workgroup_lookup_type_from_workload_id(workload_id); + if (_os_workgroup_type_is_default_type(wl_type)) { + /* Unknown workload ID, fallback to attribute type */ + return attr; + } + /* Require matching types between workload ID and attribute. + * Use workload ID type as the type implied by the default attribute */ + if (attr->wg_type == default_attr->wg_type) { + attr->wg_type = wl_type; + } else if (wl_type != attr->wg_type) { + /* Workload ID and attribute type mismatch */ + return NULL; + } + return attr; +} + +static inline bool +_os_workgroup_workload_id_is_valid_for_wi_type(const char *workload_id, + uint32_t wi_type) +{ + os_workgroup_type_t wl_type = + _os_workgroup_lookup_type_from_workload_id(workload_id); + if (_os_workgroup_type_is_default_type(wl_type)) { + /* Unknown workload ID, nothing to match */ + return true; + } + /* Require matching workinterval types between workload ID and passed in + * type of port or workgroup object. */ + if (_wg_type_to_wi_type(wl_type) != wi_type) { + return false; + } + return true; +} + static inline bool _os_workgroup_join_token_initialized(os_workgroup_join_token_t token) { @@ -346,6 +528,46 @@ _os_workgroup_client_attr_is_valid(os_workgroup_attr_t attr) return (attr && _os_workgroup_client_attr_initialized(attr)); } +static inline os_workgroup_attr_t +_os_workgroup_client_attr_resolve(os_workgroup_attr_t attr, + os_workgroup_attr_t client_attr, + const os_workgroup_attr_s *default_attr) +{ + if (client_attr == NULL) { + *attr = *default_attr; + } else { + if (!_os_workgroup_client_attr_is_valid(client_attr)) { + return NULL; + } + + // Make a local copy of the attr + *attr = *client_attr; + + switch (attr->sig) { + case _OS_WORKGROUP_ATTR_SIG_DEFAULT_INIT: + /* For any fields which are 0, we fill in with default values */ + if (attr->wg_attr_flags == 0) { + attr->wg_attr_flags = default_attr->wg_attr_flags; + } + if (attr->wg_type == 0) { + attr->wg_type = default_attr->wg_type; + } + break; + case _OS_WORKGROUP_ATTR_SIG_EMPTY_INIT: + /* Nothing to do, the client built the attr up from scratch */ + break; + default: + return NULL; + } + + /* Mark it as resolved */ + attr->sig = _OS_WORKGROUP_ATTR_RESOLVED_INIT; + } + + os_assert(_os_workgroup_attr_is_resolved(attr)); + return attr; +} + static inline bool _start_time_is_in_past(os_clockid_t clock, uint64_t start) { @@ -355,6 +577,69 @@ _start_time_is_in_past(os_clockid_t clock, uint64_t start) } } +struct os_workgroup_pthread_ctx_s { + os_workgroup_t wg; + void *(*start_routine)(void *); + void *arg; +}; + +static void * +_os_workgroup_pthread_start(void *wrapper_arg) +{ + struct os_workgroup_pthread_ctx_s *ctx = wrapper_arg; + os_workgroup_t wg = ctx->wg; + void *(*start_routine)(void *) = ctx->start_routine; + void *arg = ctx->arg; + + free(ctx); + + os_workgroup_join_token_s token; + int rc = os_workgroup_join(wg, &token); + if (rc != 0) { + DISPATCH_CLIENT_CRASH(rc, "pthread_start os_workgroup_join failed"); + } + + void *result = start_routine(arg); + + os_workgroup_leave(wg, &token); + os_workgroup_dec_refcount(wg); + + return result; +} + +static int +_os_workgroup_pthread_create_with_workgroup(pthread_t *thread, + os_workgroup_t wg, const pthread_attr_t *attr, + void *(*start_routine)(void *), void *arg) +{ + struct os_workgroup_pthread_ctx_s *ctx = _dispatch_calloc(1, sizeof(*ctx)); + + os_workgroup_inc_refcount(wg); + + ctx->wg = wg; + ctx->start_routine = start_routine; + ctx->arg = arg; + + int rc = pthread_create(thread, attr, _os_workgroup_pthread_start, ctx); + if (rc != 0) { + os_workgroup_dec_refcount(wg); + free(ctx); + } + + return rc; +} + +static const struct pthread_workgroup_functions_s _os_workgroup_pthread_functions = { + .pwgf_version = PTHREAD_WORKGROUP_FUNCTIONS_VERSION, + .pwgf_create_with_workgroup = _os_workgroup_pthread_create_with_workgroup, +}; + +void +_workgroup_init(void) +{ + pthread_install_workgroup_functions_np(&_os_workgroup_pthread_functions); +} + #pragma mark Private functions int @@ -411,46 +696,15 @@ os_workgroup_create(const char *name, os_workgroup_attr_t attr) /* Resolve the input attributes */ os_workgroup_attr_s wga; + attr = _os_workgroup_client_attr_resolve(&wga, attr, + &_os_workgroup_attr_default); if (attr == NULL) { - wga = _os_workgroup_attr_default; - attr = &wga; - } else { - if (!_os_workgroup_client_attr_is_valid(attr)) { - errno = EINVAL; - return NULL; - } - - // Make a local copy of the attr - wga = *attr; - attr = &wga; - - switch (attr->sig) { - case _OS_WORKGROUP_ATTR_SIG_DEFAULT_INIT: - { - /* For any fields which are 0, we fill in with default values */ - if (attr->wg_attr_flags == 0) { - attr->wg_attr_flags = _os_workgroup_attr_default.wg_attr_flags; - } - if (attr->wg_type == 0) { - attr->wg_type = _os_workgroup_attr_default.wg_type; - } - } - // Fallthrough - case _OS_WORKGROUP_ATTR_SIG_EMPTY_INIT: - break; - default: - errno = EINVAL; - return NULL; - } - - /* Mark it as resolved */ - attr->sig = _OS_WORKGROUP_ATTR_RESOLVED_INIT; + errno = EINVAL; + return NULL; } - os_assert(_os_workgroup_attr_is_resolved(attr)); - /* Do some sanity checks */ - if (!_os_workgroup_type_is_default_type(attr->wg_type)){ + if (!_os_workgroup_type_is_default_type(attr->wg_type)) { errno = EINVAL; return NULL; } @@ -486,50 +740,174 @@ os_workgroup_interval_create(const char *name, os_clockid_t clock, /* Resolve the input attributes */ os_workgroup_attr_s wga; + attr = _os_workgroup_client_attr_resolve(&wga, attr, + &_os_workgroup_interval_attr_default); if (attr == NULL) { - wga = _os_workgroup_interval_attr_default; - attr = &wga; - } else { - if (!_os_workgroup_client_attr_is_valid(attr)) { - errno = EINVAL; - return NULL; - } + errno = EINVAL; + return NULL; + } - // Make a local copy of the attr - wga = *attr; - attr = &wga; + /* Do some sanity checks */ + if (!_os_workgroup_type_is_interval_type(attr->wg_type)) { + errno = EINVAL; + return NULL; + } - if (attr->sig == _OS_WORKGROUP_ATTR_SIG_EMPTY_INIT) { - /* Nothing to do, the client built the attr up from scratch */ - } else if (attr->sig == _OS_WORKGROUP_ATTR_SIG_DEFAULT_INIT) { - /* For any fields which are 0, we fill in with default values */ + if (!_os_workgroup_attr_is_differentiated(attr)) { + errno = EINVAL; + return NULL; + } - if (attr->wg_attr_flags == 0) { - attr->wg_attr_flags = _os_workgroup_interval_attr_default.wg_attr_flags; - } - if (attr->wg_type == 0) { - attr->wg_type = _os_workgroup_interval_attr_default.wg_type; - } - } else { - errno = EINVAL; - return NULL; - } + /* We don't support propagating workgroup yet */ + if (_os_workgroup_attr_is_propagating(attr)) { + errno = ENOTSUP; + return NULL; + } - /* Mark it as resolved */ - attr->sig = _OS_WORKGROUP_ATTR_RESOLVED_INIT; + wi = _os_workgroup_create_work_interval(attr); + if (wi == NULL) { + return NULL; } - os_assert(_os_workgroup_attr_is_resolved(attr)); + wgi = (os_workgroup_interval_t) _os_object_alloc(WORKGROUP_INTERVAL_CLASS, + sizeof(struct os_workgroup_interval_s)); + wgi->wi = wi; + wgi->clock = clock; + wgi->wii = work_interval_instance_alloc(wi); + wgi->wii_lock = OS_UNFAIR_LOCK_INIT; + wgi->wg_type = attr->wg_type; + wgi->wg_state = OS_WORKGROUP_OWNER; + + _os_workgroup_set_name(wgi->_as_wg, name); + + return wgi; +} + +os_workgroup_t +os_workgroup_create_with_workload_id(const char * name, + const char *workload_id, os_workgroup_attr_t attr) +{ + os_workgroup_t wg = NULL; + work_interval_t wi = NULL; + + const os_workgroup_attr_s *default_attr = + &_os_workgroup_with_workload_id_attr_default; + + /* Resolve the input attributes */ + os_workgroup_attr_s wga; + attr = _os_workgroup_client_attr_resolve(&wga, attr, default_attr); + if (attr == NULL) { + _os_workgroup_error_log("Invalid attribute pointer"); + errno = EINVAL; + return NULL; + } + + /* Resolve workload ID */ + attr = _os_workgroup_workload_id_attr_resolve(workload_id, attr, + default_attr); + if (attr == NULL) { + _os_workgroup_error_log("Mismatched workload ID and attribute " + "interval type: %s vs %hd", workload_id, wga.wg_type); + errno = EINVAL; + return NULL; + } + + /* Require default attribute flags. */ + if (attr->wg_attr_flags != default_attr->wg_attr_flags) { + _os_workgroup_error_log("Non-default attribute flags: 0x%x", + attr->wg_attr_flags); + errno = EINVAL; + return NULL; + } + + /* Do some sanity checks */ + if (!_os_workgroup_type_is_default_type(attr->wg_type)) { + _os_workgroup_error_log("Non-default workload type: %s (%hd)", + workload_id, attr->wg_type); + errno = EINVAL; + return NULL; + } + + /* We don't support propagating workgroups yet */ + if (_os_workgroup_attr_is_propagating(attr)) { + _os_workgroup_error_log("Unsupported attribute flags: 0x%x", + attr->wg_attr_flags); + errno = ENOTSUP; + return NULL; + } + + wi = _os_workgroup_create_work_interval(attr); + if (wi == NULL) { + return NULL; + } + + wg = (os_workgroup_t) _os_object_alloc(WORKGROUP_CLASS, + sizeof(struct os_workgroup_s)); + wg->wi = wi; + wg->wg_state = OS_WORKGROUP_OWNER; + wg->wg_type = attr->wg_type; + + _os_workgroup_set_name(wg, name); + + return wg; +} + +os_workgroup_interval_t +os_workgroup_interval_create_with_workload_id(const char *name, + const char *workload_id, os_clockid_t clock, os_workgroup_attr_t attr) +{ + os_workgroup_interval_t wgi = NULL; + work_interval_t wi = NULL; + + const os_workgroup_attr_s *default_attr = + &_os_workgroup_interval_attr_default; + + /* Resolve the input attributes */ + os_workgroup_attr_s wga; + attr = _os_workgroup_client_attr_resolve(&wga, attr, default_attr); + if (attr == NULL) { + _os_workgroup_error_log("Invalid attribute pointer"); + errno = EINVAL; + return NULL; + } + + /* Resolve workload ID */ + attr = _os_workgroup_workload_id_attr_resolve(workload_id, attr, + default_attr); + if (attr == NULL) { + _os_workgroup_error_log("Mismatched workload ID and attribute " + "interval type: %s vs %hd", workload_id, wga.wg_type); + errno = EINVAL; + return NULL; + } + + /* Require default attribute flags. */ + if (attr->wg_attr_flags != default_attr->wg_attr_flags) { + _os_workgroup_error_log("Non-default attribute flags: 0x%x", + attr->wg_attr_flags); + errno = EINVAL; + return NULL; + } /* Do some sanity checks */ - if (!_os_workgroup_type_is_interval_type(attr->wg_type) || - !_os_workgroup_attr_is_differentiated(attr)){ + if (!_os_workgroup_type_is_interval_type(attr->wg_type)) { + _os_workgroup_error_log("Invalid workload interval type: %s (%hd)", + workload_id, attr->wg_type); + errno = EINVAL; + return NULL; + } + + if (!_os_workgroup_attr_is_differentiated(attr)) { + _os_workgroup_error_log("Invalid attribute flags: 0x%x", + attr->wg_attr_flags); errno = EINVAL; return NULL; } /* We don't support propagating workgroup yet */ if (_os_workgroup_attr_is_propagating(attr)) { + _os_workgroup_error_log("Unsupported attribute flags: 0x%x", + attr->wg_attr_flags); errno = ENOTSUP; return NULL; } @@ -642,6 +1020,7 @@ os_workgroup_copy_port(os_workgroup_t wg, mach_port_t *mach_port_out) os_assert(mach_port_out != NULL); *mach_port_out = MACH_PORT_NULL; + int rv = 0; uint64_t wg_state = os_atomic_load(&wg->wg_state, relaxed); if (wg_state & OS_WORKGROUP_CANCELED) { @@ -653,31 +1032,33 @@ os_workgroup_copy_port(os_workgroup_t wg, mach_port_t *mach_port_out) } if (_os_workgroup_is_configurable(wg_state)) { - return work_interval_copy_port(wg->wi, mach_port_out); + rv = work_interval_copy_port(wg->wi, mach_port_out); + if (rv < 0) { + rv = errno; + } + return rv; } kern_return_t kr = mach_port_mod_refs(mach_task_self(), wg->port, MACH_PORT_RIGHT_SEND, 1); os_assumes(kr == KERN_SUCCESS); *mach_port_out = wg->port; - return 0; + return rv; } os_workgroup_t os_workgroup_create_with_port(const char *name, mach_port_t port) { if (!MACH_PORT_VALID(port)) { + errno = EINVAL; return NULL; } -#if !TARGET_OS_SIMULATOR - uint32_t wi_flags = 0; - int ret = work_interval_get_flags_from_port(port, &wi_flags); + os_workgroup_type_t wg_type; + int ret = _os_workgroup_get_wg_wi_types_from_port(port, &wg_type, NULL); if (ret != 0) { - errno = ret; return NULL; } -#endif os_workgroup_t wg = NULL; wg = (os_workgroup_t) _os_object_alloc(WORKGROUP_CLASS, @@ -688,11 +1069,47 @@ os_workgroup_create_with_port(const char *name, mach_port_t port) kr = mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_SEND, 1); os_assumes(kr == KERN_SUCCESS); wg->port = port; -#if !TARGET_OS_SIMULATOR - wg->wg_type = _wi_flags_to_wg_type(wi_flags); -#else - wg->wg_type = OS_WORKGROUP_TYPE_DEFAULT; -#endif + wg->wg_type = wg_type; + + return wg; +} + +os_workgroup_t +os_workgroup_create_with_workload_id_and_port(const char *name, + const char *workload_id, mach_port_t port) +{ + if (!MACH_PORT_VALID(port)) { + _os_workgroup_error_log("Invalid mach port 0x%x", port); + errno = EINVAL; + return NULL; + } + + os_workgroup_type_t wg_type; + uint32_t wi_type; + int ret = _os_workgroup_get_wg_wi_types_from_port(port, &wg_type, &wi_type); + if (ret != 0) { + _os_workgroup_error_log("Invalid mach port 0x%x", port); + return NULL; + } + + /* Validate workload ID is compatible with port workinterval type */ + if (!_os_workgroup_workload_id_is_valid_for_wi_type(workload_id, wi_type)) { + _os_workgroup_error_log("Mismatched workload ID and port " + "interval type: %s vs %hd", workload_id, wg_type); + errno = EINVAL; + return NULL; + } + + os_workgroup_t wg = NULL; + wg = (os_workgroup_t) _os_object_alloc(WORKGROUP_CLASS, + sizeof(struct os_workgroup_s)); + _os_workgroup_set_name(wg, name); + + kern_return_t kr; + kr = mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_SEND, 1); + os_assumes(kr == KERN_SUCCESS); + wg->port = port; + wg->wg_type = wg_type; return wg; } @@ -717,18 +1134,95 @@ os_workgroup_create_with_workgroup(const char *name, os_workgroup_t wg) if (_os_workgroup_has_backing_workinterval(wg)) { - kern_return_t kr; if (_os_workgroup_is_configurable(wg_state)) { - kr = work_interval_copy_port(wg->wi, &new_wg->port); + int rv = work_interval_copy_port(wg->wi, &new_wg->port); + + if (rv < 0) { + goto error; + } } else { + kern_return_t kr; kr = mach_port_mod_refs(mach_task_self(), wg->port, MACH_PORT_RIGHT_SEND, 1); + + if (kr != KERN_SUCCESS) { + goto error; + } new_wg->port = wg->port; } - os_assumes(kr == KERN_SUCCESS); + } + + return new_wg; + +error: + wg_state = os_atomic_load(&new_wg->wg_state, relaxed); + if (wg_state & OS_WORKGROUP_LABEL_NEEDS_FREE) { + free((void *)new_wg->name); + } + free(new_wg); + + return NULL; +} +os_workgroup_t +os_workgroup_create_with_workload_id_and_workgroup(const char *name, + const char *workload_id, os_workgroup_t wg) +{ + uint64_t wg_state = os_atomic_load(&wg->wg_state, relaxed); + if (wg_state & OS_WORKGROUP_CANCELED) { + _os_workgroup_error_log("Workgroup already cancelled"); + errno = EINVAL; + return NULL; + } + + /* Validate workload ID is compatible with workgroup workinterval type */ + if (!_os_workgroup_workload_id_is_valid_for_wi_type(workload_id, + _wg_type_to_wi_type(wg->wg_type))) { + _os_workgroup_error_log("Mismatched workload ID and workgroup " + "interval type: %s vs %hd", workload_id, wg->wg_type); + errno = EINVAL; + return NULL; + } + + os_workgroup_t new_wg = NULL; + + new_wg = (os_workgroup_t) _os_object_alloc(WORKGROUP_CLASS, + sizeof(struct os_workgroup_s)); + _os_workgroup_set_name(new_wg, name); + new_wg->wg_type = wg->wg_type; + + /* We intentionally don't copy the context */ + + if (_os_workgroup_has_backing_workinterval(wg)) { + + if (_os_workgroup_is_configurable(wg_state)) { + int rv = work_interval_copy_port(wg->wi, &new_wg->port); + + if (rv < 0) { + _os_workgroup_error_log("Invalid workgroup work_interval"); + goto error; + } + } else { + kern_return_t kr; + kr = mach_port_mod_refs(mach_task_self(), wg->port, MACH_PORT_RIGHT_SEND, 1); + + if (kr != KERN_SUCCESS) { + _os_workgroup_error_log("Invalid workgroup port 0x%x", wg->port); + goto error; + } + new_wg->port = wg->port; + } } return new_wg; + +error: + wg_state = os_atomic_load(&new_wg->wg_state, relaxed); + if (wg_state & OS_WORKGROUP_LABEL_NEEDS_FREE) { + free((void *)new_wg->name); + } + free(new_wg); + + return NULL; } int @@ -753,12 +1247,14 @@ os_workgroup_join(os_workgroup_t wg, os_workgroup_join_token_t token) os_workgroup_t cur_wg = _os_workgroup_get_current(); if (cur_wg) { // We currently don't allow joining multiple workgroups at all, period - return EALREADY; + errno = EALREADY; + return errno; } uint64_t wg_state = os_atomic_load(&wg->wg_state, relaxed); if (wg_state & OS_WORKGROUP_CANCELED) { - return EINVAL; + errno = EINVAL; + return errno; } int rv = 0; @@ -772,7 +1268,7 @@ os_workgroup_join(os_workgroup_t wg, os_workgroup_join_token_t token) } if (rv) { - errno = rv; + rv = errno; return rv; } @@ -786,7 +1282,7 @@ os_workgroup_join(os_workgroup_t wg, os_workgroup_join_token_t token) token->new_wg = wg; _os_workgroup_set_current(wg); - return 0; + return rv; } void @@ -853,8 +1349,8 @@ os_workgroup_set_working_arena(os_workgroup_t wg, void * _Nullable client_arena, }); if (!success) { - errno = EBUSY; free(wg_arena); + errno = EBUSY; return errno; } @@ -939,14 +1435,16 @@ os_workgroup_interval_start(os_workgroup_interval_t wgi, uint64_t start, } if (deadline < start || (!_start_time_is_in_past(wgi->clock, start))) { - return EINVAL; + errno = EINVAL; + return errno; } bool success = os_unfair_lock_trylock(&wgi->wii_lock); if (!success) { // Someone else is concurrently in a start, update or finish method. We // can't make progress here - return EBUSY; + errno = EBUSY; + return errno; } int rv = 0; @@ -965,6 +1463,7 @@ os_workgroup_interval_start(os_workgroup_interval_t wgi, uint64_t start, if (rv) { os_unfair_lock_unlock(&wgi->wii_lock); + errno = rv; return rv; } @@ -977,7 +1476,6 @@ os_workgroup_interval_start(os_workgroup_interval_t wgi, uint64_t start, if (rv != 0) { /* If we failed to start the interval in the kernel, clear the started * field */ - rv = errno; os_atomic_and(&wgi->wg_state, ~OS_WORKGROUP_INTERVAL_STARTED, relaxed); } @@ -999,13 +1497,15 @@ os_workgroup_interval_update(os_workgroup_interval_t wgi, uint64_t deadline, if (!success) { // Someone else is concurrently in a start, update or finish method. We // can't make progress here - return EBUSY; + errno = EBUSY; + return errno; } uint64_t wg_state = os_atomic_load(&wgi->wg_state, relaxed); if (!_os_workgroup_is_configurable(wg_state)) { os_unfair_lock_unlock(&wgi->wii_lock); - return EPERM; + errno = EPERM; + return errno; } /* Note: We allow updating and finishing an workgroup_interval that has @@ -1014,7 +1514,8 @@ os_workgroup_interval_update(os_workgroup_interval_t wgi, uint64_t deadline, * intervals. However a subsequent new interval cannot be started */ if (!(wg_state & OS_WORKGROUP_INTERVAL_STARTED)) { os_unfair_lock_unlock(&wgi->wii_lock); - return EINVAL; + errno = EINVAL; + return errno; } work_interval_instance_t wii = wgi->wii; @@ -1041,17 +1542,20 @@ os_workgroup_interval_finish(os_workgroup_interval_t wgi, if (!success) { // Someone else is concurrently in a start, update or finish method. We // can't make progress here - return EBUSY; + errno = EBUSY; + return errno; } uint64_t wg_state = os_atomic_load(&wgi->wg_state, relaxed); if (!_os_workgroup_is_configurable(wg_state)) { os_unfair_lock_unlock(&wgi->wii_lock); - return EPERM; + errno = EPERM; + return errno; } if (!(wg_state & OS_WORKGROUP_INTERVAL_STARTED)) { os_unfair_lock_unlock(&wgi->wii_lock); - return EINVAL; + errno = EINVAL; + return errno; } work_interval_instance_t wii = wgi->wii; diff --git a/src/workgroup_internal.h b/src/workgroup_internal.h index 59090600a..e19df6467 100644 --- a/src/workgroup_internal.h +++ b/src/workgroup_internal.h @@ -38,9 +38,32 @@ void _os_workgroup_interval_xref_dispose(os_workgroup_interval_t wgi); void _os_workgroup_interval_dispose(os_workgroup_interval_t wgi); void _os_workgroup_debug(os_workgroup_t wg, char *buf, size_t size); +#if !USE_OBJC +void _os_workgroup_explicit_xref_dispose(os_workgroup_t wg); +void _os_workgroup_interval_explicit_xref_dispose(os_workgroup_interval_t wgi); +void _os_workgroup_interval_explicit_dispose(os_workgroup_interval_t wgi); +#endif + extern pthread_key_t _os_workgroup_key; void _os_workgroup_tsd_cleanup(void *ctxt); +void _workgroup_init(void); + +#if 1 || DISPATCH_DEBUG // log workload_id API adoption errors by default for now +#define OS_WORKGROUP_LOG_ERRORS 1 +#endif + +#if 1 || DISPATCH_DEBUG // log workload_id lookup failures by default for now +#define OS_WORKGROUP_LOG_UKNOWN_WORKLOAD_ID 1 +#endif + +#if OS_WORKGROUP_LOG_ERRORS +#define _os_workgroup_error_log(m, ...) \ + _dispatch_log("BUG IN CLIENT of %s: " m, __func__, ##__VA_ARGS__); +#else +#define _os_workgroup_error_log(m, ...) (void)m; +#endif + /* * os_workgroup_type_t is an internal representation that is a superset of types * for various types of workgroups. Currently it only includes diff --git a/xcodeconfig/libdispatch.clean b/xcodeconfig/libdispatch.clean index 25a5711a2..1e2bff8c9 100644 --- a/xcodeconfig/libdispatch.clean +++ b/xcodeconfig/libdispatch.clean @@ -22,8 +22,13 @@ __MergedGlobals __dispatch_bug.last_seen __dispatch_bug_deprecated.last_seen __dispatch_bug_kevent_client.last_seen -__dispatch_bug_kevent_client.last_seen.37 -__dispatch_bug_kevent_client.last_seen.39 +#if defined(__x86_64__) +__dispatch_bug_kevent_client.last_seen.44 +__dispatch_bug_kevent_client.last_seen.46 +#else +__dispatch_bug_kevent_client.last_seen.38 +__dispatch_bug_kevent_client.last_seen.40 +#endif __dispatch_bug_kevent_vanished.last_seen __dispatch_bug_mach_client.last_seen @@ -32,6 +37,7 @@ __dispatch_build __dispatch_child_of_unsafe_fork __dispatch_continuation_cache_limit +__dispatch_custom_workloop_root_queue __dispatch_data_empty __dispatch_host_time_data.0 __dispatch_host_time_data.1 diff --git a/xcodeconfig/libdispatch.dirty b/xcodeconfig/libdispatch.dirty index 20dc7d438..53cd19f74 100644 --- a/xcodeconfig/libdispatch.dirty +++ b/xcodeconfig/libdispatch.dirty @@ -34,6 +34,8 @@ _OBJC_CLASS_$_OS_dispatch_queue_serial __OS_dispatch_queue_serial_vtable _OBJC_CLASS_$_OS_dispatch_queue_concurrent __OS_dispatch_queue_concurrent_vtable +_OBJC_CLASS_$_OS_dispatch_queue_cooperative +__OS_dispatch_queue_cooperative_vtable _OBJC_CLASS_$_OS_dispatch_queue_global __OS_dispatch_queue_global_vtable _OBJC_CLASS_$_OS_dispatch_queue_pthread_root @@ -79,6 +81,7 @@ _OBJC_METACLASS_$_OS_dispatch_queue _OBJC_METACLASS_$_OS_dispatch_workloop _OBJC_METACLASS_$_OS_dispatch_queue_serial _OBJC_METACLASS_$_OS_dispatch_queue_concurrent +_OBJC_METACLASS_$_OS_dispatch_queue_cooperative _OBJC_METACLASS_$_OS_dispatch_queue_global _OBJC_METACLASS_$_OS_dispatch_queue_pthread_root _OBJC_METACLASS_$_OS_dispatch_queue_main @@ -143,6 +146,7 @@ __firehose_task_buffer_pred __voucher_activity_debug_channel __voucher_libtrace_hooks __voucher_task_mach_voucher_pred +__voucher_process_can_use_arbitrary_personas_pred # 32bits __dispatch_mach_host_port @@ -150,7 +154,6 @@ __dispatch_mach_notify_port __voucher_default_task_mach_voucher __voucher_hash_lock __voucher_task_mach_voucher -__os_workgroup_key # byte-sized __dispatch_is_daemon @@ -165,5 +168,5 @@ __dispatch_io_fds __dispatch_io_devs_lockq __dispatch_io_fds_lockq __dispatch_io_init_pred - __voucher_activity_disabled.disabled +__voucher_process_can_use_arbitrary_personas diff --git a/xcodeconfig/libdispatch.order b/xcodeconfig/libdispatch.order index c603f0d3c..c61d6b280 100644 --- a/xcodeconfig/libdispatch.order +++ b/xcodeconfig/libdispatch.order @@ -34,6 +34,8 @@ _OBJC_CLASS_$_OS_dispatch_queue_serial __OS_dispatch_queue_serial_vtable _OBJC_CLASS_$_OS_dispatch_queue_concurrent __OS_dispatch_queue_concurrent_vtable +_OBJC_CLASS_$_OS_dispatch_queue_cooperative +__OS_dispatch_queue_cooperative_vtable _OBJC_CLASS_$_OS_dispatch_queue_global __OS_dispatch_queue_global_vtable _OBJC_CLASS_$_OS_dispatch_queue_pthread_root @@ -79,6 +81,7 @@ _OBJC_METACLASS_$_OS_dispatch_queue _OBJC_METACLASS_$_OS_dispatch_workloop _OBJC_METACLASS_$_OS_dispatch_queue_serial _OBJC_METACLASS_$_OS_dispatch_queue_concurrent +_OBJC_METACLASS_$_OS_dispatch_queue_cooperative _OBJC_METACLASS_$_OS_dispatch_queue_global _OBJC_METACLASS_$_OS_dispatch_queue_pthread_root _OBJC_METACLASS_$_OS_dispatch_queue_main From c5a8b9353b880a6f31cc24f631578a4b05222868 Mon Sep 17 00:00:00 2001 From: Apple OSS Distributions <91980991+AppleOSSDistributions@users.noreply.github.com> Date: Thu, 31 Mar 2022 22:53:30 +0000 Subject: [PATCH 14/18] libdispatch-1325.100.36 Imported from libdispatch-1325.100.36.tar.gz --- dispatch/queue.h | 3 + os/voucher_private.h | 19 +++++ private/time_private.h | 33 +++++++- src/apply.c | 7 ++ src/event/event_kevent.c | 12 +-- src/eventlink.c | 8 +- src/eventlink_internal.h | 1 + src/inline_internal.h | 21 +++++ src/internal.h | 24 +----- src/mach.c | 56 ++------------ src/object.c | 10 ++- src/queue.c | 68 +++++++++-------- src/shims/target.h | 12 +-- src/source.c | 13 +++- src/source_internal.h | 1 + src/time.c | 25 +++++- src/voucher.c | 131 +++----------------------------- src/voucher_internal.h | 26 +------ src/workgroup.c | 15 +++- src/workgroup_internal.h | 2 + xcodeconfig/libdispatch.aliases | 1 + 21 files changed, 214 insertions(+), 274 deletions(-) diff --git a/dispatch/queue.h b/dispatch/queue.h index c4820b6c4..5cb502465 100644 --- a/dispatch/queue.h +++ b/dispatch/queue.h @@ -489,6 +489,9 @@ dispatch_async_and_wait_f(dispatch_queue_t queue, * @param block * The block to be invoked the specified number of iterations. * The result of passing NULL in this parameter is undefined. + * This function performs a Block_copy() and Block_release() of the input block + * on behalf of the callers. To elide the additional block allocation, + * dispatch_apply_f may be used instead. */ #ifdef __BLOCKS__ API_AVAILABLE(macos(10.6), ios(4.0)) diff --git a/os/voucher_private.h b/os/voucher_private.h index 3e72c919a..34b4f250b 100644 --- a/os/voucher_private.h +++ b/os/voucher_private.h @@ -154,6 +154,7 @@ OS_VOUCHER_EXPORT OS_OBJECT_RETURNS_RETAINED OS_WARN_RESULT OS_NOTHROW voucher_t _Nullable voucher_copy(void); + /*! * @function voucher_copy_without_importance * @@ -673,6 +674,24 @@ mach_voucher_persona_for_originator(uid_t persona_id, #endif // __has_include() +/*! + * @function voucher_retain, voucher_release + * + * @discussion + * Functions to retain and release a voucher reference. This should only be + * used by the Swift concurrency runtime. All other clients should use + * os_retain/os_release to manipulate the lifetime of vouchers + */ +SPI_AVAILABLE(macos(12.4), ios(15.4)) +OS_VOUCHER_EXPORT OS_SWIFT_UNAVAILABLE("Can't be used with ARC") +void +voucher_release(voucher_t voucher); + +SPI_AVAILABLE(macos(12.4), ios(15.4)) +OS_VOUCHER_EXPORT OS_SWIFT_UNAVAILABLE("Can't be used with ARC") +voucher_t +voucher_retain(voucher_t voucher); + __END_DECLS DISPATCH_ASSUME_NONNULL_END diff --git a/private/time_private.h b/private/time_private.h index e8dd1accf..011f744e8 100644 --- a/private/time_private.h +++ b/private/time_private.h @@ -86,7 +86,7 @@ enum { #endif // __APPLE__ /*! - * @function dispatch_time_to_nsecs + * @function dispatch_time_to_nsec * * @abstract * Returns the clock and nanoseconds of a given dispatch_time_t. @@ -110,12 +110,39 @@ enum { * Returns false if the dispatch_time_t value was invalid, * or DISPATCH_TIME_FOREVER. */ -API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) +SPI_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW bool -dispatch_time_to_nsecs(dispatch_time_t time, +dispatch_time_to_nsec(dispatch_time_t time, dispatch_clockid_t *clock, uint64_t *nsecs); + +/*! + * @function dispatch_time_from_nsec + * + * @abstract + * Returns a dispatch_time_t given a clock and an absolute deadline in + * nanoseconds. This is the opposite of dispatch_time_to_nsec. + * + * @discussion + * This interface allows to encode dispatch_time_t when given an absolute + * deadline and a clock. + * + * @param clock + * A clockid for this time. + * + * @param deadline + * Number of nanoseconds denoting the absolute deadline in time starting from to + * the epoch of the clock ID + * + * @result + * The dispatch_time_t encoding of the deadline in the clock id given. + */ +SPI_AVAILABLE(macos(12.3), ios(15.4), tvos(15.4), watchos(8.4)) +DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW +dispatch_time_t +dispatch_time_from_nsec(dispatch_clockid_t clock, uint64_t deadline); + __END_DECLS #endif diff --git a/src/apply.c b/src/apply.c index 160874f4c..1a9b563a0 100644 --- a/src/apply.c +++ b/src/apply.c @@ -616,16 +616,23 @@ dispatch_apply_with_attr_f(size_t iterations, dispatch_apply_attr_t attr, void * void dispatch_apply(size_t iterations, dispatch_queue_t dq, void (^work)(size_t)) { + // We need to do Block_copy here since any __block variables in work need to + // be copied over to the heap in a single threaded context. See + // rdar://77167979 + work = _dispatch_Block_copy(work); dispatch_apply_f(iterations, dq, work, (dispatch_apply_function_t)_dispatch_Block_invoke(work)); + Block_release(work); } void dispatch_apply_with_attr(size_t iterations, dispatch_apply_attr_t attr, void (^work)(size_t iteration, size_t worker_index)) { + work = _dispatch_Block_copy(work); dispatch_apply_with_attr_f(iterations, attr, work, (dispatch_apply_attr_function_t)_dispatch_Block_invoke(work)); + Block_release(work); } #endif diff --git a/src/event/event_kevent.c b/src/event/event_kevent.c index 790d72408..4b990a8f4 100644 --- a/src/event/event_kevent.c +++ b/src/event/event_kevent.c @@ -959,7 +959,7 @@ void _dispatch_sync_ipc_handoff_begin(dispatch_wlh_t wlh, mach_port_t port, uint64_t _Atomic *addr) { -#if DISPATCH_USE_WL_SYNC_IPC_HANDOFF +#if DISPATCH_USE_KEVENT_WORKLOOP dispatch_kevent_s ke = { .ident = port, .filter = EVFILT_WORKLOOP, @@ -976,13 +976,13 @@ _dispatch_sync_ipc_handoff_begin(dispatch_wlh_t wlh, mach_port_t port, } #else (void)wlh; (void)port; (void)addr; -#endif // DISPATCH_USE_WL_SYNC_IPC_HANDOFF +#endif // DISPATCH_USE_KEVENT_WORKLOOP } void _dispatch_sync_ipc_handoff_end(dispatch_wlh_t wlh, mach_port_t port) { -#if DISPATCH_USE_WL_SYNC_IPC_HANDOFF +#if DISPATCH_USE_KEVENT_WORKLOOP dispatch_kevent_s ke = { .ident = port, .filter = EVFILT_WORKLOOP, @@ -993,7 +993,7 @@ _dispatch_sync_ipc_handoff_end(dispatch_wlh_t wlh, mach_port_t port) _dispatch_kq_deferred_update(wlh, &ke); #else (void)wlh; (void)port; -#endif // DISPATCH_USE_WL_SYNC_IPC_HANDOFF +#endif // DISPATCH_USE_KEVENT_WORKLOOP } #endif @@ -1651,12 +1651,12 @@ _dispatch_kevent_workloop_poke_drain(dispatch_kevent_t ke) dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); dispatch_wlh_t wlh = (dispatch_wlh_t)ke->udata; -#if DISPATCH_USE_WL_SYNC_IPC_HANDOFF +#if DISPATCH_USE_KEVENT_WORKLOOP if (ke->fflags & NOTE_WL_SYNC_IPC) { dispatch_assert((ke->flags & EV_ERROR) && ke->data == ENOENT); return _dispatch_kevent_wlh_debug("ignoring", ke); } -#endif // DISPATCH_USE_WL_SYNC_IPC_HANDOFF +#endif // DISPATCH_USE_KEVENT_WORKLOOP dispatch_assert(ke->fflags & NOTE_WL_THREAD_REQUEST); if (ke->flags & EV_ERROR) { diff --git a/src/eventlink.c b/src/eventlink.c index ffba90002..9614babf6 100644 --- a/src/eventlink.c +++ b/src/eventlink.c @@ -27,7 +27,7 @@ OS_OBJECT_CLASS_DECL(os_eventlink); #if !USE_OBJC OS_OBJECT_VTABLE_INSTANCE(os_eventlink, (void (*)(_os_object_t))_os_eventlink_xref_dispose, - (void (*)(_os_object_t))_os_eventlink_dispose); + (void (*)(_os_object_t))_os_eventlink_explicit_dispose); #endif // USE_OBJC #define EVENTLINK_CLASS OS_OBJECT_VTABLE(os_eventlink) @@ -57,6 +57,12 @@ _os_eventlink_dispose(os_eventlink_t ev) { } } +void +_os_eventlink_explicit_dispose(os_eventlink_t ev) { + _os_eventlink_dispose(ev); + free(ev); +} + static inline os_eventlink_t _os_eventlink_create_internal(const char *name) { diff --git a/src/eventlink_internal.h b/src/eventlink_internal.h index 4c8f0d288..ddf019aec 100644 --- a/src/eventlink_internal.h +++ b/src/eventlink_internal.h @@ -63,5 +63,6 @@ _os_eventlink_is_cancelled(uint64_t ev_state) void _os_eventlink_xref_dispose(os_eventlink_t ev); void _os_eventlink_dispose(os_eventlink_t ev); +void _os_eventlink_explicit_dispose(os_eventlink_t ev); #endif /* __OS_EVENTLINK_INTERNAL */ diff --git a/src/inline_internal.h b/src/inline_internal.h index a78e50277..9a33f2eaf 100644 --- a/src/inline_internal.h +++ b/src/inline_internal.h @@ -2006,6 +2006,27 @@ _dispatch_queue_is_cooperative(dispatch_queue_class_t dqu) return (dqu._dgq)->dq_priority & DISPATCH_PRIORITY_FLAG_COOPERATIVE; } +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_object_supported_on_cooperative_queue(dispatch_object_t dou) +{ + /* We only allow enqueueing of objects of a few types on the cooperative + * pool: + * + * (a) continuations + * (b) swift jobs + * (c) kevent timer sources + */ + if (_dispatch_object_has_vtable(dou)) { + return (dx_type(dou._do) == DISPATCH_SWIFT_JOB_TYPE) || + ((dx_metatype(dou._do) == _DISPATCH_SOURCE_TYPE) && + _dispatch_source_is_timer(dou._ds)); + } else { + return true; + } +} + + DISPATCH_ALWAYS_INLINE DISPATCH_CONST static inline dispatch_queue_global_t _dispatch_get_root_queue(dispatch_qos_t qos, uintptr_t flags) diff --git a/src/internal.h b/src/internal.h index d22a3ac09..6fc0a2750 100644 --- a/src/internal.h +++ b/src/internal.h @@ -795,22 +795,6 @@ _dispatch_fork_becomes_unsafe(void) #endif #endif // !defined(DISPATCH_USE_KEVENT_WORKLOOP) -#ifndef DISPATCH_USE_WL_SYNC_IPC_HANDOFF -#if DISPATCH_USE_KEVENT_WORKLOOP && DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(109900) -#define DISPATCH_USE_WL_SYNC_IPC_HANDOFF 1 -#else -#define DISPATCH_USE_WL_SYNC_IPC_HANDOFF 0 -#endif -#endif // !defined DISPATCH_USE_WL_SYNC_IPC_HANDOFF - -#ifndef DISPATCH_USE_KEVENT_SETUP -#if DISPATCH_USE_KEVENT_WORKLOOP && DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(109900) -#define DISPATCH_USE_KEVENT_SETUP 1 -#else -#define DISPATCH_USE_KEVENT_SETUP 0 -#endif -#endif // !defined(DISPATCH_USE_KEVENT_SETUP) - #ifdef EVFILT_MEMORYSTATUS #ifndef DISPATCH_USE_MEMORYSTATUS #define DISPATCH_USE_MEMORYSTATUS 1 @@ -835,12 +819,6 @@ extern int malloc_engaged_nano(void); extern bool _dispatch_memory_warn; #endif -#if defined(MACH_MSG_QOS_LAST) && DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101600) -#define DISPATCH_USE_MACH_MSG_PRIORITY_COMBINED 1 -#else -#define DISPATCH_USE_MACH_MSG_PRIORITY_COMBINED 0 -#endif - #if defined(F_SETNOSIGPIPE) && defined(F_GETNOSIGPIPE) #ifndef DISPATCH_USE_SETNOSIGPIPE #define DISPATCH_USE_SETNOSIGPIPE 1 @@ -1052,7 +1030,7 @@ _dispatch_ktrace_impl(uint32_t code, uint64_t a, uint64_t b, #endif #ifndef OS_EVENTLINK_USE_MACH_EVENTLINK -#if DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101600) && __has_include() +#if __has_include() #define OS_EVENTLINK_USE_MACH_EVENTLINK 1 #else #define OS_EVENTLINK_USE_MACH_EVENTLINK 0 diff --git a/src/mach.c b/src/mach.c index 3a39d8d9c..aa9c04b6d 100644 --- a/src/mach.c +++ b/src/mach.c @@ -1038,13 +1038,6 @@ _dispatch_mach_msg_not_sent(dispatch_mach_t dm, dispatch_object_t dou, } } -DISPATCH_ALWAYS_INLINE -static inline bool -_dispatch_mach_send_priority_in_voucher(void) -{ - return DISPATCH_USE_MACH_MSG_PRIORITY_COMBINED; -} - DISPATCH_ALWAYS_INLINE static inline mach_msg_priority_t _dispatch_mach_send_priority(dispatch_mach_msg_t dmsg, @@ -1052,21 +1045,13 @@ _dispatch_mach_send_priority(dispatch_mach_msg_t dmsg, { qos_ovr = _dispatch_qos_propagate(qos_ovr); if (qos_ovr) { -#if DISPATCH_USE_MACH_MSG_PRIORITY_COMBINED - if (!_dispatch_mach_send_priority_in_voucher()) { - mach_msg_qos_t qos; - int relpri; + mach_msg_qos_t qos; + int relpri; - qos = (mach_msg_qos_t)_dispatch_qos_from_pp(dmsg->dmsg_priority); - relpri = _pthread_priority_relpri(dmsg->dmsg_priority); - *opts |= MACH_SEND_OVERRIDE; - return mach_msg_priority_encode((mach_msg_qos_t)qos_ovr, qos, relpri); - } -#else - (void)dmsg; -#endif + qos = (mach_msg_qos_t)_dispatch_qos_from_pp(dmsg->dmsg_priority); + relpri = _pthread_priority_relpri(dmsg->dmsg_priority); *opts |= MACH_SEND_OVERRIDE; - return (mach_msg_priority_t)_dispatch_qos_to_pp(qos_ovr); + return mach_msg_priority_encode((mach_msg_qos_t)qos_ovr, qos, relpri); } return MACH_MSG_PRIORITY_UNSPECIFIED; } @@ -1081,9 +1066,8 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, dispatch_mach_msg_t dmsg = dou._dmsg, dmsgr = NULL; voucher_t voucher = dmsg->dmsg_voucher; dispatch_queue_t drq = NULL; - mach_voucher_t ipc_kvoucher = MACH_VOUCHER_NULL; uint32_t send_status = 0; - bool clear_voucher = false, kvoucher_move_send = false; + bool clear_voucher = false; mach_msg_header_t *msg = _dispatch_mach_msg_get_msg(dmsg); bool is_reply = (MACH_MSGH_BITS_REMOTE(msg->msgh_bits) == MACH_MSG_TYPE_MOVE_SEND_ONCE); @@ -1140,19 +1124,8 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, opts |= MACH_SEND_NOTIFY; } opts |= MACH_SEND_TIMEOUT; - if (_dispatch_mach_send_priority_in_voucher() && - dmsg->dmsg_priority != _voucher_get_priority(voucher)) { - ipc_kvoucher = _voucher_create_mach_voucher_with_priority( - voucher, dmsg->dmsg_priority); - } _dispatch_voucher_debug("mach-msg[%p] msg_set", voucher, dmsg); - if (_dispatch_mach_send_priority_in_voucher() && ipc_kvoucher) { - kvoucher_move_send = true; - clear_voucher = _voucher_mach_msg_set_mach_voucher(msg, - ipc_kvoucher, kvoucher_move_send); - } else { - clear_voucher = _voucher_mach_msg_set(msg, voucher); - } + clear_voucher = _voucher_mach_msg_set(msg, voucher); msg_priority = _dispatch_mach_send_priority(dmsg, qos, &opts); if (reply_port && dm->dm_strict_reply) { opts |= MACH_MSG_STRICT_REPLY; @@ -1184,8 +1157,7 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, DISPATCH_CLIENT_CRASH(kr, "Voucher port corruption"); } mach_voucher_t kv; - kv = _voucher_mach_msg_clear(msg, kvoucher_move_send); - if (kvoucher_move_send) ipc_kvoucher = kv; + kv = _voucher_mach_msg_clear(msg, false); } } if (kr == MACH_SEND_TIMED_OUT && (opts & MACH_SEND_TIMEOUT)) { @@ -1213,19 +1185,7 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, // send kevent must be installed on the manager queue dm->dm_needs_mgr = true; } - if (_dispatch_mach_send_priority_in_voucher() && ipc_kvoucher) { - _dispatch_kvoucher_debug("reuse on re-send", ipc_kvoucher); - voucher_t ipc_voucher; - ipc_voucher = _voucher_create_with_priority_and_mach_voucher( - voucher, dmsg->dmsg_priority, ipc_kvoucher); - _dispatch_voucher_debug("mach-msg[%p] replace voucher[%p]", - ipc_voucher, dmsg, voucher); - if (dmsg->dmsg_voucher) _voucher_release(dmsg->dmsg_voucher); - dmsg->dmsg_voucher = ipc_voucher; - } goto out; - } else if (ipc_kvoucher && (kr || !kvoucher_move_send)) { - _voucher_dealloc_mach_voucher(ipc_kvoucher); } dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs; if (!(msg_opts & DISPATCH_MACH_WAIT_FOR_REPLY) && !kr && reply_port && diff --git a/src/object.c b/src/object.c index 8fad3ebad..67f2c1cd6 100644 --- a/src/object.c +++ b/src/object.c @@ -305,6 +305,13 @@ dispatch_set_target_queue(dispatch_object_t dou, dispatch_queue_t tq) _dispatch_object_is_root_or_base_queue(dou))) { return; } + + if (unlikely(tq && _dispatch_queue_is_cooperative(tq) && + !_dispatch_object_supported_on_cooperative_queue(dou))) { + DISPATCH_CLIENT_CRASH(dou._do, + "Cannot target the cooperative root queue - not implemented"); + } + if (dx_cluster(dou._do) == _DISPATCH_QUEUE_CLUSTER) { return _dispatch_lane_set_target_queue(dou._dl, tq); } @@ -316,9 +323,6 @@ dispatch_set_target_queue(dispatch_object_t dou, dispatch_queue_t tq) tq = _dispatch_get_default_queue(false); } - if (_dispatch_queue_is_cooperative(tq)) { - DISPATCH_CLIENT_CRASH(tq, "Cannot target object to cooperative root queue - not implemented"); - } _dispatch_object_set_target_queue_inline(dou._do, tq); } diff --git a/src/queue.c b/src/queue.c index 44cdb4aa5..4231b64fb 100644 --- a/src/queue.c +++ b/src/queue.c @@ -168,6 +168,18 @@ _dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pp, pflags |= _PTHREAD_SET_SELF_VOUCHER_FLAG; #endif } + +#if DISPATCH_USE_KEVENT_WORKQUEUE + dispatch_deferred_items_t ddi = _dispatch_deferred_items_get(); + if (ddi && ddi->ddi_wlh_needs_update) { + /* If we have deferred creation of TR for the current thread, make sure + * to do that first before we do anything to adjust our priority. + * rdar://86110240 + */ + _dispatch_event_loop_drain(KEVENT_FLAG_IMMEDIATE); + } +#endif + if (!pflags) return; int r = _pthread_set_properties_self(pflags, pp, kv); if (r == EINVAL) { @@ -2472,11 +2484,6 @@ _dispatch_lane_inherit_wlh_from_target(dispatch_lane_t dq, dispatch_queue_t tq) { uint64_t old_state, new_state, role; - /* TODO (rokhinip): We're going to have to change this in the future when we - * allow targetting queues to a cooperative pool and need to figure out what - * kind of a role that gives the queue */ - dispatch_assert(!_dispatch_queue_is_cooperative(tq)); - if (!dx_hastypeflag(tq, QUEUE_ROOT)) { role = DISPATCH_QUEUE_ROLE_INNER; } else if (_dispatch_base_lane_is_wlh(dq, tq)) { @@ -5362,13 +5369,11 @@ void _dispatch_lane_concurrent_push(dispatch_lane_t dq, dispatch_object_t dou, dispatch_qos_t qos) { - if (unlikely(_dispatch_queue_is_cooperative(dq))) { - /* If we're here, means that we're in the simulator fallback case. We - * still restrict what can target the cooperative thread pool */ - if (_dispatch_object_has_vtable(dou) && - dx_type(dou._do) != DISPATCH_SWIFT_JOB_TYPE) { - DISPATCH_CLIENT_CRASH(dou._do, "Cannot target the cooperative global queue - not implemented"); - } + /* Simulator fallback path for cooperative queue */ + if (unlikely(_dispatch_queue_is_cooperative(dq) && + !_dispatch_object_supported_on_cooperative_queue(dou))) { + DISPATCH_CLIENT_CRASH(dou._do, + "Cannot target the cooperative root queue - not implemented"); } // reserving non barrier width @@ -7107,13 +7112,10 @@ _dispatch_root_queue_push(dispatch_queue_global_t rq, dispatch_object_t dou, } #endif - if (_dispatch_queue_is_cooperative(rq)) { - /* We only allow enqueueing of continuations or swift job objects on the - * cooperative pool, no other objects */ - if (_dispatch_object_has_vtable(dou) && - dx_type(dou._do) != DISPATCH_SWIFT_JOB_TYPE) { - DISPATCH_CLIENT_CRASH(dou._do, "Cannot target the cooperative global queue - not implemented"); - } + if (unlikely(_dispatch_queue_is_cooperative(rq) && + !_dispatch_object_supported_on_cooperative_queue(dou))) { + DISPATCH_CLIENT_CRASH(dou._do, + "Cannot target the cooperative root queue - not implemented"); } #if HAVE_PTHREAD_WORKQUEUE_QOS @@ -7825,6 +7827,15 @@ dispatch_main(void) #if HAVE_PTHREAD_MAIN_NP if (pthread_main_np()) { #endif + // Make sure to drain the main queue before exiting main thread. + // rdar://80474924&52978527. + // + // We also need to guard against reentrant calls back to drain the main + // queue + _dispatch_main_q.dq_side_suspend_cnt = true; + _dispatch_main_queue_drain(&_dispatch_main_q); + _dispatch_main_q.dq_side_suspend_cnt = false; + _dispatch_object_debug(&_dispatch_main_q, "%s", __func__); _dispatch_program_is_probably_callback_driven = true; _dispatch_ktrace0(ARIADNE_ENTER_DISPATCH_MAIN_CODE); @@ -8014,7 +8025,7 @@ _dispatch_root_queues_init_once(void *context DISPATCH_UNUSED) } #endif -#if DISPATCH_USE_KEVENT_SETUP +#if DISPATCH_USE_KEVENT_WORKLOOP struct pthread_workqueue_config cfg = { .version = PTHREAD_WORKQUEUE_CONFIG_VERSION, .flags = 0, @@ -8031,32 +8042,23 @@ _dispatch_root_queues_init_once(void *context DISPATCH_UNUSED) #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wunreachable-code" if (unlikely(!_dispatch_kevent_workqueue_enabled)) { -#if DISPATCH_USE_KEVENT_SETUP +#if DISPATCH_USE_KEVENT_WORKLOOP cfg.workq_cb = _dispatch_worker_thread2; r = pthread_workqueue_setup(&cfg, sizeof(cfg)); #else r = _pthread_workqueue_init(_dispatch_worker_thread2, offsetof(struct dispatch_queue_s, dq_serialnum), 0); -#endif // DISPATCH_USE_KEVENT_SETUP +#endif // DISPATCH_USE_KEVENT_WORKLOOP #if DISPATCH_USE_KEVENT_WORKLOOP } else if (wq_supported & WORKQ_FEATURE_WORKLOOP) { -#if DISPATCH_USE_KEVENT_SETUP cfg.workq_cb = _dispatch_worker_thread2; cfg.kevent_cb = (pthread_workqueue_function_kevent_t) _dispatch_kevent_worker_thread; cfg.workloop_cb = (pthread_workqueue_function_workloop_t) _dispatch_workloop_worker_thread; r = pthread_workqueue_setup(&cfg, sizeof(cfg)); -#else - r = _pthread_workqueue_init_with_workloop(_dispatch_worker_thread2, - (pthread_workqueue_function_kevent_t) - _dispatch_kevent_worker_thread, - (pthread_workqueue_function_workloop_t) - _dispatch_workloop_worker_thread, - offsetof(struct dispatch_queue_s, dq_serialnum), 0); -#endif // DISPATCH_USE_KEVENT_SETUP #endif // DISPATCH_USE_KEVENT_WORKLOOP #if DISPATCH_USE_KEVENT_WORKQUEUE } else if (wq_supported & WORKQ_FEATURE_KEVENT) { -#if DISPATCH_USE_KEVENT_SETUP +#if DISPATCH_USE_KEVENT_WORKLOOP cfg.workq_cb = _dispatch_worker_thread2; cfg.kevent_cb = (pthread_workqueue_function_kevent_t) _dispatch_kevent_worker_thread; r = pthread_workqueue_setup(&cfg, sizeof(cfg)); @@ -8065,7 +8067,7 @@ _dispatch_root_queues_init_once(void *context DISPATCH_UNUSED) (pthread_workqueue_function_kevent_t) _dispatch_kevent_worker_thread, offsetof(struct dispatch_queue_s, dq_serialnum), 0); -#endif // DISPATCH_USE_KEVENT_SETUP +#endif // DISPATCH_USE_KEVENT_WORKLOOP #endif } else { DISPATCH_INTERNAL_CRASH(wq_supported, "Missing Kevent WORKQ support"); diff --git a/src/shims/target.h b/src/shims/target.h index 425279b19..e5dc5d199 100644 --- a/src/shims/target.h +++ b/src/shims/target.h @@ -38,15 +38,15 @@ #if TARGET_OS_OSX # define DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(x) \ (__MAC_OS_X_VERSION_MIN_REQUIRED >= (x)) -# if !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101400) -# error "OS X hosts older than OS X 10.14 aren't supported anymore" -# endif // !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101400) +# if !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(110000) +# error "OS X hosts older than OS X 11.00 aren't supported anymore" +# endif // !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(110000) #elif TARGET_OS_SIMULATOR # define DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(x) \ (IPHONE_SIMULATOR_HOST_MIN_VERSION_REQUIRED >= (x)) -# if !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101400) -# error "Simulator hosts older than OS X 10.14 aren't supported anymore" -# endif // !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(101400) +# if !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(110000) +# error "Simulator hosts older than OS X 11.00 aren't supported anymore" +# endif // !DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(100000) #else # define DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(x) 1 # if !TARGET_OS_DRIVERKIT && __IPHONE_OS_VERSION_MIN_REQUIRED < 90000 diff --git a/src/source.c b/src/source.c index 9af2a4a8b..6c22e5136 100644 --- a/src/source.c +++ b/src/source.c @@ -60,9 +60,12 @@ dispatch_source_create(dispatch_source_type_t dst, uintptr_t handle, if (unlikely(!dq)) { dq = _dispatch_get_default_queue(true); } else { - if (_dispatch_queue_is_cooperative(dq)) { - DISPATCH_CLIENT_CRASH(dq, "Cannot target object to cooperative root queue - not implemented"); + // Should match up with conditions checked in + // dispatch_object_supported_on_cooperative_queue() + if (_dispatch_queue_is_cooperative(dq) && !dr->du_is_timer) { + DISPATCH_CLIENT_CRASH(ds, "Cannot target source to the cooperative root queue - not implemented"); } + _dispatch_retain((dispatch_queue_t _Nonnull)dq); } ds->do_targetq = dq; @@ -73,6 +76,12 @@ dispatch_source_create(dispatch_source_type_t dst, uintptr_t handle, return ds; } +bool +_dispatch_source_is_timer(dispatch_source_t ds) +{ + return ds->ds_refs->du_is_timer; +} + void _dispatch_source_dispose(dispatch_source_t ds, bool *allow_free) { diff --git a/src/source_internal.h b/src/source_internal.h index 9297ac5cd..96fdf47c9 100644 --- a/src/source_internal.h +++ b/src/source_internal.h @@ -69,6 +69,7 @@ struct dispatch_channel_s { dispatch_assert_valid_lane_type(dispatch_channel_s); dispatch_static_assert(sizeof(struct dispatch_channel_s) <= 128); +bool _dispatch_source_is_timer(dispatch_source_t ds); void _dispatch_source_xref_dispose(dispatch_source_t ds); void _dispatch_source_dispose(dispatch_source_t ds, bool *allow_free); void _dispatch_source_activate(dispatch_source_t ds); diff --git a/src/time.c b/src/time.c index 30ed53b26..e19d2fba8 100644 --- a/src/time.c +++ b/src/time.c @@ -138,7 +138,7 @@ dispatch_time(dispatch_time_t inval, int64_t delta) } bool -dispatch_time_to_nsecs(dispatch_time_t time, +dispatch_time_to_nsec(dispatch_time_t time, dispatch_clockid_t *clock_out, uint64_t *nsecs_out) { dispatch_clock_t clock; @@ -168,6 +168,29 @@ dispatch_time_to_nsecs(dispatch_time_t time, return false; } +dispatch_time_t +dispatch_time_from_nsec(dispatch_clockid_t clock, uint64_t deadline) +{ + // We can't easily make sense of whether deadline 0 is DISPATCH_TIME_NOW or + // DISPATCH_TIME_FOREVER. dispatch_time() uses underflow/overflow logic to + // differentiate but we don't have that information available so we always + // reject values of 0 or 1 and round them up to 2. + if (deadline < 2) { + deadline = 2; + } + + uint64_t value = _dispatch_time_nano2mach((uint64_t)deadline); + + switch (clock) { + case DISPATCH_CLOCKID_WALLTIME: + return _dispatch_clock_and_value_to_time(DISPATCH_CLOCK_WALL, deadline); + case DISPATCH_CLOCKID_MONOTONIC: + return _dispatch_clock_and_value_to_time(DISPATCH_CLOCK_MONOTONIC, value); + case DISPATCH_CLOCKID_UPTIME: + return _dispatch_clock_and_value_to_time(DISPATCH_CLOCK_UPTIME, value); + } +} + dispatch_time_t dispatch_walltime(const struct timespec *inval, int64_t delta) { diff --git a/src/voucher.c b/src/voucher.c index 61f1643df..581bbc0ba 100644 --- a/src/voucher.c +++ b/src/voucher.c @@ -99,9 +99,6 @@ _voucher_clone(const voucher_t ov, voucher_fields_t ignore_fields) v->v_kvoucher = kvb->v_kvoucher; v->v_kv_has_importance = kvb->v_kv_has_importance; } - if (fields & VOUCHER_FIELD_PRIORITY) { - v->v_priority = ov->v_priority; - } if (fields & VOUCHER_FIELD_ACTIVITY) { v->v_activity = ov->v_activity; v->v_activity_creator = ov->v_activity_creator; @@ -354,15 +351,11 @@ voucher_replace_default_voucher(void) DISPATCH_ALWAYS_INLINE static inline mach_voucher_attr_recipe_size_t _voucher_mach_recipe_init(mach_voucher_attr_recipe_t mvar_buf, voucher_s *v, - mach_voucher_t kvb, pthread_priority_t pp) + mach_voucher_t kvb) { mach_voucher_attr_recipe_size_t extra = _voucher_extra_size(v); mach_voucher_attr_recipe_size_t size = 0; - // normalize to just the QoS class and 0 relative priority - pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; - if (pp) pp |= _PTHREAD_PRIORITY_PRIORITY_MASK; - *mvar_buf++ = (mach_voucher_attr_recipe_data_t){ .key = MACH_VOUCHER_ATTR_KEY_ALL, .command = MACH_VOUCHER_ATTR_COPY, @@ -370,18 +363,7 @@ _voucher_mach_recipe_init(mach_voucher_attr_recipe_t mvar_buf, voucher_s *v, }; size += _voucher_mach_recipe_size(0); - if (pp) { - ipc_pthread_priority_value_t value = (ipc_pthread_priority_value_t)pp; - *mvar_buf++ = (mach_voucher_attr_recipe_data_t){ - .key = MACH_VOUCHER_ATTR_KEY_PTHPRIORITY, - .command = MACH_VOUCHER_ATTR_PTHPRIORITY_CREATE, - .content_size = sizeof(value), - }; - mvar_buf = _dispatch_memappend(mvar_buf, &value); - size += _voucher_mach_recipe_size(sizeof(value)); - } - - if ((v && v->v_activity) || pp) { + if (v && v->v_activity) { _voucher_mach_udata_s *udata_buf; unsigned udata_size = 0; @@ -397,20 +379,12 @@ _voucher_mach_recipe_init(mach_voucher_attr_recipe_t mvar_buf, voucher_s *v, }; udata_buf = (_voucher_mach_udata_s *)(mvar_buf->content); - if (v && v->v_activity) { - *udata_buf = (_voucher_mach_udata_s){ - .vmu_magic = VOUCHER_MAGIC_V3, - .vmu_priority = (_voucher_priority_t)pp, - .vmu_activity = v->v_activity, - .vmu_activity_pid = v->v_activity_creator, - .vmu_parent_activity = v->v_parent_activity, - }; - } else { - *udata_buf = (_voucher_mach_udata_s){ - .vmu_magic = VOUCHER_MAGIC_V3, - .vmu_priority = (_voucher_priority_t)pp, - }; - } + *udata_buf = (_voucher_mach_udata_s){ + .vmu_magic = VOUCHER_MAGIC_V3, + .vmu_activity = v->v_activity, + .vmu_activity_pid = v->v_activity_creator, + .vmu_parent_activity = v->v_parent_activity, + }; mvar_buf = (mach_voucher_attr_recipe_t)(mvar_buf->content + udata_size); size += _voucher_mach_recipe_size(udata_size); @@ -430,8 +404,7 @@ _voucher_get_mach_voucher(voucher_t voucher) if (voucher->v_ipc_kvoucher) return voucher->v_ipc_kvoucher; mach_voucher_t kvb = voucher->v_kvoucher; if (!kvb) kvb = _voucher_get_task_mach_voucher(); - if (!voucher->v_activity && !voucher->v_priority && - !_voucher_extra_size(voucher)) { + if (!voucher->v_activity && !_voucher_extra_size(voucher)) { return kvb; } @@ -440,7 +413,7 @@ _voucher_get_mach_voucher(voucher_t voucher) mach_voucher_t kv, kvo; kern_return_t kr; - size = _voucher_mach_recipe_init(mvar, voucher, kvb, voucher->v_priority); + size = _voucher_mach_recipe_init(mvar, voucher, kvb); kr = _voucher_create_mach_voucher(mvar, size, &kv); if (dispatch_assume_zero(kr) || !kv) { return MACH_VOUCHER_NULL; @@ -460,30 +433,6 @@ _voucher_get_mach_voucher(voucher_t voucher) return kv; } -mach_voucher_t -_voucher_create_mach_voucher_with_priority(voucher_t voucher, - pthread_priority_t priority) -{ - if (priority == _voucher_get_priority(voucher)) { - return MACH_VOUCHER_NULL; // caller will use _voucher_get_mach_voucher - } - kern_return_t kr; - mach_voucher_t kv, kvb = voucher ? voucher->v_kvoucher : MACH_VOUCHER_NULL; - if (!kvb) kvb = _voucher_get_task_mach_voucher(); - - mach_voucher_attr_recipe_t mvar = _voucher_mach_recipe_alloca(voucher); - mach_voucher_attr_recipe_size_t size; - - size = _voucher_mach_recipe_init(mvar, voucher, kvb, priority); - kr = _voucher_create_mach_voucher(mvar, size, &kv); - if (dispatch_assume_zero(kr) || !kv) { - return MACH_VOUCHER_NULL; - } - _dispatch_kvoucher_debug("create with priority from voucher[%p]", kv, - voucher); - return kv; -} - static voucher_t _voucher_create_with_mach_voucher(mach_voucher_t kv, mach_msg_bits_t msgh_bits) { @@ -519,11 +468,6 @@ _voucher_create_with_mach_voucher(mach_voucher_t kv, mach_msg_bits_t msgh_bits) v->v_ipc_kvoucher = v->v_kvoucher = kv; v->v_kv_has_importance = !!(msgh_bits & MACH_MSGH_BITS_RAISEIMP); - if (udata_sz >= offsetof(_voucher_mach_udata_s,_vmu_after_priority)){ - if (udata->vmu_magic == VOUCHER_MAGIC_V3) { - v->v_priority = udata->vmu_priority; - } - } bool remove_kv_userdata = false; if (udata_sz >= offsetof(_voucher_mach_udata_s, _vmu_after_activity)) { #if !RDAR_25050791 @@ -577,39 +521,6 @@ _voucher_create_with_mach_voucher(mach_voucher_t kv, mach_msg_bits_t msgh_bits) return v; } -voucher_t -_voucher_create_with_priority_and_mach_voucher(voucher_t ov, - pthread_priority_t priority, mach_voucher_t kv) -{ - if (priority == _voucher_get_priority(ov)) { - if (kv) _voucher_dealloc_mach_voucher(kv); - return ov ? _voucher_retain(ov) : NULL; - } - voucher_t v = _voucher_find_and_retain(kv); - voucher_fields_t ignore_fields = VOUCHER_FIELD_PRIORITY; - - if (v) { - _dispatch_voucher_debug("kvoucher[0x%08x] find", v, kv); - _voucher_dealloc_mach_voucher(kv); - return v; - } - - if (kv) ignore_fields |= VOUCHER_FIELD_KVOUCHER; - v = _voucher_clone(ov, ignore_fields); - if (priority) { - v->v_priority = (_voucher_priority_t)priority; - } - if (kv) { - v->v_ipc_kvoucher = v->v_kvoucher = kv; - _voucher_insert(v); - _dispatch_voucher_debug("kvoucher[0x%08x] create with priority from " - "voucher[%p]", v, kv, ov); - _dispatch_voucher_debug_machport(kv); - } - _voucher_trace(CREATE, v, v->v_kvoucher, v->v_activity); - return v; -} - voucher_t _voucher_create_without_importance(voucher_t ov) { @@ -790,7 +701,6 @@ _voucher_dispose(voucher_t voucher) voucher->v_activity = 0; voucher->v_activity_creator = 0; voucher->v_parent_activity = 0; - voucher->v_priority = 0; #if VOUCHER_ENABLE_RECIPE_OBJECTS voucher->v_recipe_extra_size = 0; voucher->v_recipe_extra_offset = 0; @@ -1024,7 +934,7 @@ mach_voucher_persona_for_originator(uid_t persona_id, _dispatch_memappend(bank_modify_recipe[1].content, &modify_info); kr = _voucher_create_mach_voucher(bank_modify_recipe, bank_modify_recipe_size, &bkv); - if (dispatch_assume_zero(kr)) { + if (kr) { bkv = MACH_VOUCHER_NULL; } #else // VOUCHER_USE_PERSONA @@ -1685,9 +1595,6 @@ _voucher_debug(voucher_t v, char *buf, size_t bufsiz) buf, bufsiz, offset, VOUCHER_DETAIL_PREFIX, MAX_HEX_DATA_SIZE); bufprintf("]"); } - if (v->v_priority) { - bufprintf(", QOS 0x%x", v->v_priority); - } if (v->v_activity) { bufprintf(", activity 0x%llx (pid: 0x%16llx, parent 0x%llx)", v->v_activity, v->v_activity_creator, v->v_parent_activity); @@ -1888,22 +1795,6 @@ _voucher_get_mach_voucher(voucher_t voucher) return MACH_VOUCHER_NULL; } -mach_voucher_t -_voucher_create_mach_voucher_with_priority(voucher_t voucher, - pthread_priority_t priority) -{ - (void)voucher; (void)priority; - return MACH_VOUCHER_NULL; -} - -voucher_t -_voucher_create_with_priority_and_mach_voucher(voucher_t voucher, - pthread_priority_t priority, mach_voucher_t kv) -{ - (void)voucher; (void)priority; (void)kv; - return NULL; -} - voucher_t _voucher_create_accounting_voucher(voucher_t voucher) { diff --git a/src/voucher_internal.h b/src/voucher_internal.h index c50c36ca4..b5ee42c28 100644 --- a/src/voucher_internal.h +++ b/src/voucher_internal.h @@ -102,19 +102,12 @@ void DISPATCH_TSD_DTOR_CC _voucher_thread_cleanup(void *voucher); mach_voucher_t _voucher_get_mach_voucher(voucher_t voucher); voucher_t _voucher_create_without_importance(voucher_t voucher); voucher_t _voucher_create_accounting_voucher(voucher_t voucher); -mach_voucher_t _voucher_create_mach_voucher_with_priority(voucher_t voucher, - pthread_priority_t priority); -voucher_t _voucher_create_with_priority_and_mach_voucher(voucher_t voucher, - pthread_priority_t priority, mach_voucher_t kv); void _voucher_dealloc_mach_voucher(mach_voucher_t kv); #if VOUCHER_ENABLE_RECIPE_OBJECTS _OS_OBJECT_DECL_SUBCLASS_INTERFACE(voucher_recipe, object) #endif -voucher_t voucher_retain(voucher_t voucher); -void voucher_release(voucher_t voucher); - #define VOUCHER_NO_MACH_VOUCHER MACH_PORT_DEAD #if VOUCHER_USE_MACH_VOUCHER @@ -144,8 +137,7 @@ typedef struct _voucher_mach_udata_s { OS_ENUM(voucher_fields, uint16_t, VOUCHER_FIELD_NONE = 0, VOUCHER_FIELD_KVOUCHER = 1u << 0, - VOUCHER_FIELD_PRIORITY = 1u << 1, - VOUCHER_FIELD_ACTIVITY = 1u << 2, + VOUCHER_FIELD_ACTIVITY = 1u << 1, #if VOUCHER_ENABLE_RECIPE_OBJECTS VOUCHER_FIELD_EXTRA = 1u << 15, @@ -168,7 +160,6 @@ typedef struct voucher_s { firehose_activity_id_t v_activity; uint64_t v_activity_creator; firehose_activity_id_t v_parent_activity; - _voucher_priority_t v_priority; unsigned int v_kv_has_importance:1; #if VOUCHER_ENABLE_RECIPE_OBJECTS size_t v_recipe_extra_offset; @@ -433,13 +424,6 @@ _voucher_clear(void) _voucher_replace(NULL); } -DISPATCH_ALWAYS_INLINE -static inline pthread_priority_t -_voucher_get_priority(voucher_t v) -{ - return v ? (pthread_priority_t)v->v_priority : 0; -} - DISPATCH_ALWAYS_INLINE static inline firehose_activity_id_t _voucher_get_activity_id(voucher_t v, uint64_t *creator_pid) @@ -699,14 +683,6 @@ _voucher_clear(void) { } -DISPATCH_ALWAYS_INLINE -static inline pthread_priority_t -_voucher_get_priority(voucher_t voucher) -{ - (void)voucher; - return 0; -} - DISPATCH_ALWAYS_INLINE static inline bool _voucher_mach_msg_set_mach_voucher(mach_msg_header_t *msg, mach_voucher_t kv, diff --git a/src/workgroup.c b/src/workgroup.c index ae47870f5..46f55c380 100644 --- a/src/workgroup.c +++ b/src/workgroup.c @@ -32,7 +32,7 @@ OS_OBJECT_CLASS_DECL(os_workgroup); #if !USE_OBJC OS_OBJECT_VTABLE_INSTANCE(os_workgroup, (void (*)(_os_object_t))_os_workgroup_explicit_xref_dispose, - (void (*)(_os_object_t))_os_workgroup_dispose); + (void (*)(_os_object_t))_os_workgroup_explicit_dispose); #endif // !USE_OBJC #define WORKGROUP_CLASS OS_OBJECT_VTABLE(os_workgroup) @@ -48,7 +48,7 @@ OS_OBJECT_CLASS_DECL(os_workgroup_parallel); #if !USE_OBJC OS_OBJECT_VTABLE_INSTANCE(os_workgroup_parallel, (void (*)(_os_object_t))_os_workgroup_explicit_xref_dispose, - (void (*)(_os_object_t))_os_workgroup_dispose); + (void (*)(_os_object_t))_os_workgroup_explicit_dispose); #endif // USE_OBJC #define WORKGROUP_PARALLEL_CLASS OS_OBJECT_VTABLE(os_workgroup_parallel) @@ -140,6 +140,15 @@ _os_workgroup_is_configurable(uint64_t wg_state) return (wg_state & OS_WORKGROUP_OWNER) == OS_WORKGROUP_OWNER; } +#if !USE_OBJC +void +_os_workgroup_explicit_dispose(os_workgroup_t wg) +{ + _os_workgroup_dispose(wg); + free(wg); +} +#endif + void _os_workgroup_dispose(os_workgroup_t wg) { @@ -176,7 +185,7 @@ void _os_workgroup_interval_explicit_dispose(os_workgroup_interval_t wgi) { _os_workgroup_interval_dispose(wgi); - _os_workgroup_dispose(wgi->_as_wg); + _os_workgroup_explicit_dispose(wgi->_as_wg); } #endif diff --git a/src/workgroup_internal.h b/src/workgroup_internal.h index e19df6467..c7b2000e6 100644 --- a/src/workgroup_internal.h +++ b/src/workgroup_internal.h @@ -40,6 +40,8 @@ void _os_workgroup_debug(os_workgroup_t wg, char *buf, size_t size); #if !USE_OBJC void _os_workgroup_explicit_xref_dispose(os_workgroup_t wg); +void _os_workgroup_explicit_dispose(os_workgroup_t wg); + void _os_workgroup_interval_explicit_xref_dispose(os_workgroup_interval_t wgi); void _os_workgroup_interval_explicit_dispose(os_workgroup_interval_t wgi); #endif diff --git a/xcodeconfig/libdispatch.aliases b/xcodeconfig/libdispatch.aliases index 24cbc6b2b..5f1be8825 100644 --- a/xcodeconfig/libdispatch.aliases +++ b/xcodeconfig/libdispatch.aliases @@ -29,3 +29,4 @@ _dispatch_queue_create_with_target$V2 _dispatch_queue_create_with_target _dispatch_source_cancel _dispatch_channel_cancel _dispatch_source_set_timer __dispatch_source_set_runloop_timer_4CF _dispatch_source_testcancel _dispatch_channel_testcancel +_dispatch_time_to_nsec _dispatch_time_to_nsecs From 7b021dab75c36bbbfbd57ba4da113f1eb0891f68 Mon Sep 17 00:00:00 2001 From: Apple OSS Distributions <91980991+AppleOSSDistributions@users.noreply.github.com> Date: Tue, 18 Oct 2022 20:47:57 +0000 Subject: [PATCH 15/18] libdispatch-1412 Imported from libdispatch-1412.tar.gz --- dispatch/base.h | 18 ++ dispatch/block.h | 2 + dispatch/data.h | 8 +- dispatch/group.h | 2 + dispatch/introspection.h | 2 + dispatch/io.h | 6 +- dispatch/object.h | 8 +- dispatch/once.h | 2 + dispatch/queue.h | 14 +- dispatch/semaphore.h | 6 +- dispatch/source.h | 2 + dispatch/time.h | 2 + dispatch/workloop.h | 10 +- libdispatch.xcodeproj/project.pbxproj | 88 +----- man/dispatch_read.3 | 2 +- os/object.h | 5 + os/voucher_activity_private.h | 7 +- os/voucher_private.h | 4 - os/workgroup_base.h | 7 + os/workgroup_interval.h | 2 + os/workgroup_interval_private.h | 67 +++- os/workgroup_object.h | 6 +- os/workgroup_parallel.h | 4 +- private/mach_private.h | 2 +- private/source_private.h | 4 + private/swift_concurrency_private.h | 207 ++++++++++++ src/allocator_internal.h | 4 +- src/event/event_config.h | 2 +- src/event/event_internal.h | 4 +- src/event/event_kevent.c | 75 ++++- src/firehose/firehose_buffer.c | 11 +- src/init.c | 7 +- src/inline_internal.h | 36 ++- src/internal.h | 15 +- src/io_internal.h | 2 +- src/mach.c | 56 +++- src/mach_internal.h | 4 +- src/queue.c | 200 +++++++++--- src/queue_internal.h | 44 ++- src/shims.c | 13 +- src/shims.h | 2 +- src/shims/lock.c | 2 + src/shims/priority.h | 232 ++++++++++++-- src/shims/tsd.h | 8 + src/shims/yield.h | 2 +- src/source.c | 30 +- src/voucher.c | 143 ++++++++- src/voucher_internal.h | 12 +- src/workgroup.c | 437 +++++++++++++++++++------- src/workgroup_internal.h | 23 +- xcodescripts/install-headers.sh | 1 - 51 files changed, 1491 insertions(+), 361 deletions(-) create mode 100644 private/swift_concurrency_private.h diff --git a/dispatch/base.h b/dispatch/base.h index 0a2370bd8..ae5cdaef0 100644 --- a/dispatch/base.h +++ b/dispatch/base.h @@ -244,6 +244,20 @@ #define DISPATCH_ASSUME_NONNULL_END #endif +#if __has_feature(bounds_attributes) +#define DISPATCH_ASSUME_ABI_SINGLE_BEGIN _Pragma("clang abi_ptr_attr set(single)") +#define DISPATCH_ASSUME_ABI_SINGLE_END _Pragma("clang abi_ptr_attr set(unsafe_indexable)") +#define DISPATCH_UNSAFE_INDEXABLE __attribute__((__unsafe_indexable__)) +#define DISPATCH_COUNTED_BY(X) __attribute__((__counted_by__(X))) +#define DISPATCH_SIZED_BY(X) __attribute__((__sized_by__(X))) +#else +#define DISPATCH_ASSUME_ABI_SINGLE_BEGIN +#define DISPATCH_ASSUME_ABI_SINGLE_END +#define DISPATCH_UNSAFE_INDEXABLE +#define DISPATCH_COUNTED_BY(X) +#define DISPATCH_SIZED_BY(X) +#endif + #if !__has_feature(nullability) #ifndef _Nullable #define _Nullable @@ -342,6 +356,10 @@ #define DISPATCH_TRANSPARENT_UNION #endif +DISPATCH_ASSUME_ABI_SINGLE_BEGIN + typedef void (*dispatch_function_t)(void *_Nullable); +DISPATCH_ASSUME_ABI_SINGLE_END + #endif diff --git a/dispatch/block.h b/dispatch/block.h index 6aa3c8f2d..288ff2964 100644 --- a/dispatch/block.h +++ b/dispatch/block.h @@ -33,6 +33,7 @@ */ DISPATCH_ASSUME_NONNULL_BEGIN +DISPATCH_ASSUME_ABI_SINGLE_BEGIN __BEGIN_DECLS @@ -421,6 +422,7 @@ dispatch_block_testcancel(dispatch_block_t block); __END_DECLS +DISPATCH_ASSUME_ABI_SINGLE_END DISPATCH_ASSUME_NONNULL_END #endif // __BLOCKS__ diff --git a/dispatch/data.h b/dispatch/data.h index 825066918..a5211cd7b 100644 --- a/dispatch/data.h +++ b/dispatch/data.h @@ -27,6 +27,7 @@ #endif DISPATCH_ASSUME_NONNULL_BEGIN +DISPATCH_ASSUME_ABI_SINGLE_BEGIN __BEGIN_DECLS @@ -115,7 +116,7 @@ DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(munmap); API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_data_t -dispatch_data_create(const void *buffer, +dispatch_data_create(const void *DISPATCH_SIZED_BY(size) buffer, size_t size, dispatch_queue_t _Nullable queue, dispatch_block_t _Nullable destructor); @@ -158,7 +159,7 @@ DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_data_t dispatch_data_create_map(dispatch_data_t data, - const void *_Nullable *_Nullable buffer_ptr, + const void *_Nullable DISPATCH_SIZED_BY(*size_ptr) *_Nullable buffer_ptr, size_t *_Nullable size_ptr); /*! @@ -219,7 +220,7 @@ dispatch_data_create_subrange(dispatch_data_t data, */ typedef bool (^dispatch_data_applier_t)(dispatch_data_t region, size_t offset, - const void *buffer, + const void *DISPATCH_SIZED_BY(size) buffer, size_t size); /*! @@ -273,6 +274,7 @@ dispatch_data_copy_region(dispatch_data_t data, __END_DECLS +DISPATCH_ASSUME_ABI_SINGLE_END DISPATCH_ASSUME_NONNULL_END #endif /* __DISPATCH_DATA__ */ diff --git a/dispatch/group.h b/dispatch/group.h index 6b30b26c6..06ae76eff 100644 --- a/dispatch/group.h +++ b/dispatch/group.h @@ -27,6 +27,7 @@ #endif DISPATCH_ASSUME_NONNULL_BEGIN +DISPATCH_ASSUME_ABI_SINGLE_BEGIN /*! * @typedef dispatch_group_t @@ -274,6 +275,7 @@ dispatch_group_leave(dispatch_group_t group); __END_DECLS +DISPATCH_ASSUME_ABI_SINGLE_END DISPATCH_ASSUME_NONNULL_END #endif diff --git a/dispatch/introspection.h b/dispatch/introspection.h index ea7dcd8f5..464be1fe8 100644 --- a/dispatch/introspection.h +++ b/dispatch/introspection.h @@ -24,6 +24,7 @@ #include DISPATCH_ASSUME_NONNULL_BEGIN +DISPATCH_ASSUME_ABI_SINGLE_BEGIN /*! * @header @@ -183,6 +184,7 @@ dispatch_introspection_hook_queue_callout_end(dispatch_queue_t queue, __END_DECLS +DISPATCH_ASSUME_ABI_SINGLE_END DISPATCH_ASSUME_NONNULL_END #endif diff --git a/dispatch/io.h b/dispatch/io.h index db9733d82..2df1bcdb3 100644 --- a/dispatch/io.h +++ b/dispatch/io.h @@ -27,6 +27,7 @@ #endif DISPATCH_ASSUME_NONNULL_BEGIN +DISPATCH_ASSUME_ABI_SINGLE_BEGIN __BEGIN_DECLS @@ -256,8 +257,8 @@ DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_io_t dispatch_io_create_with_path(dispatch_io_type_t type, - const char *path, int oflag, mode_t mode, - dispatch_queue_t queue, + const char *DISPATCH_UNSAFE_INDEXABLE path, int oflag, + mode_t mode, dispatch_queue_t queue, void (^cleanup_handler)(int error)); /*! @@ -592,6 +593,7 @@ dispatch_io_set_interval(dispatch_io_t channel, __END_DECLS +DISPATCH_ASSUME_ABI_SINGLE_END DISPATCH_ASSUME_NONNULL_END #endif /* __DISPATCH_IO__ */ diff --git a/dispatch/object.h b/dispatch/object.h index 8211fbd49..031f09d86 100644 --- a/dispatch/object.h +++ b/dispatch/object.h @@ -31,6 +31,7 @@ #endif DISPATCH_ASSUME_NONNULL_BEGIN +DISPATCH_ASSUME_ABI_SINGLE_BEGIN /*! * @typedef dispatch_object_t @@ -593,16 +594,19 @@ API_DEPRECATED("unsupported interface", macos(10.6,10.9), ios(4.0,6.0)) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW DISPATCH_COLD __attribute__((__format__(printf,2,3))) void -dispatch_debug(dispatch_object_t object, const char *message, ...); +dispatch_debug(dispatch_object_t object, + const char *DISPATCH_UNSAFE_INDEXABLE message, ...); API_DEPRECATED("unsupported interface", macos(10.6,10.9), ios(4.0,6.0)) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW DISPATCH_COLD __attribute__((__format__(printf,2,0))) void -dispatch_debugv(dispatch_object_t object, const char *message, va_list ap); +dispatch_debugv(dispatch_object_t object, + const char *DISPATCH_UNSAFE_INDEXABLE message, va_list ap); __END_DECLS +DISPATCH_ASSUME_ABI_SINGLE_END DISPATCH_ASSUME_NONNULL_END #endif diff --git a/dispatch/once.h b/dispatch/once.h index fbce4b111..bad03f125 100644 --- a/dispatch/once.h +++ b/dispatch/once.h @@ -27,6 +27,7 @@ #endif DISPATCH_ASSUME_NONNULL_BEGIN +DISPATCH_ASSUME_ABI_SINGLE_BEGIN __BEGIN_DECLS @@ -120,6 +121,7 @@ _dispatch_once_f(dispatch_once_t *predicate, void *_Nullable context, __END_DECLS +DISPATCH_ASSUME_ABI_SINGLE_END DISPATCH_ASSUME_NONNULL_END #endif diff --git a/dispatch/queue.h b/dispatch/queue.h index 5cb502465..9e18474a1 100644 --- a/dispatch/queue.h +++ b/dispatch/queue.h @@ -27,6 +27,7 @@ #endif DISPATCH_ASSUME_NONNULL_BEGIN +DISPATCH_ASSUME_ABI_SINGLE_BEGIN /*! * @header @@ -352,7 +353,7 @@ dispatch_sync_f(dispatch_queue_t queue, * Differences with dispatch_sync() * * Work items submitted to a queue with dispatch_async_and_wait() observe all - * queue attributes of that queue when invoked (inluding autorelease frequency + * queue attributes of that queue when invoked (including autorelease frequency * or QOS class). * * When the runtime has brought up a thread to invoke the asynchronous workitems @@ -1008,7 +1009,7 @@ API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_t -dispatch_queue_create_with_target(const char *_Nullable label, +dispatch_queue_create_with_target(const char *_Nullable DISPATCH_UNSAFE_INDEXABLE label, dispatch_queue_attr_t _Nullable attr, dispatch_queue_t _Nullable target) DISPATCH_ALIAS_V2(dispatch_queue_create_with_target); @@ -1043,6 +1044,8 @@ dispatch_queue_create_with_target(const char *_Nullable label, * When no quality of service class is specified, the target queue of a newly * created dispatch queue is the default priority global concurrent queue. * + * Unless explicitly specified via the attribute, queues are created active. + * * @param label * A string label to attach to the queue. * This parameter is optional and may be NULL. @@ -1059,7 +1062,7 @@ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_queue_t -dispatch_queue_create(const char *_Nullable label, +dispatch_queue_create(const char *_Nullable DISPATCH_UNSAFE_INDEXABLE label, dispatch_queue_attr_t _Nullable attr); /*! @@ -1177,7 +1180,9 @@ dispatch_queue_get_qos_class(dispatch_queue_t queue, * terminated. * * If a dispatch queue is active and targeted by other dispatch objects, - * changing its target queue results in undefined behavior. + * changing its target queue results in undefined behavior. Instead, it is + * recommended to create dispatch objects in an inactive state, set up the + * relevant target queues and then activate them. * * @param object * The object to modify. @@ -1689,6 +1694,7 @@ dispatch_assert_queue_not(dispatch_queue_t queue) __END_DECLS +DISPATCH_ASSUME_ABI_SINGLE_END DISPATCH_ASSUME_NONNULL_END #endif diff --git a/dispatch/semaphore.h b/dispatch/semaphore.h index a6f9394f9..d329503f8 100644 --- a/dispatch/semaphore.h +++ b/dispatch/semaphore.h @@ -27,6 +27,7 @@ #endif DISPATCH_ASSUME_NONNULL_BEGIN +DISPATCH_ASSUME_ABI_SINGLE_BEGIN /*! * @typedef dispatch_semaphore_t @@ -71,7 +72,9 @@ dispatch_semaphore_create(intptr_t value); * * @discussion * Decrement the counting semaphore. If the resulting value is less than zero, - * this function waits for a signal to occur before returning. + * this function waits for a signal to occur before returning. If the timeout is + * reached without a signal being received, the semaphore is re-incremented + * before the function returns. * * @param dsema * The semaphore. The result of passing NULL in this parameter is undefined. @@ -112,6 +115,7 @@ dispatch_semaphore_signal(dispatch_semaphore_t dsema); __END_DECLS +DISPATCH_ASSUME_ABI_SINGLE_END DISPATCH_ASSUME_NONNULL_END #endif /* __DISPATCH_SEMAPHORE__ */ diff --git a/dispatch/source.h b/dispatch/source.h index 5ce826022..c15c020dc 100644 --- a/dispatch/source.h +++ b/dispatch/source.h @@ -36,6 +36,7 @@ #endif DISPATCH_ASSUME_NONNULL_BEGIN +DISPATCH_ASSUME_ABI_SINGLE_BEGIN /*! * @header @@ -775,6 +776,7 @@ dispatch_source_set_registration_handler_f(dispatch_source_t source, __END_DECLS +DISPATCH_ASSUME_ABI_SINGLE_END DISPATCH_ASSUME_NONNULL_END #endif diff --git a/dispatch/time.h b/dispatch/time.h index 02dd27f6e..3268fffa9 100644 --- a/dispatch/time.h +++ b/dispatch/time.h @@ -34,6 +34,7 @@ #endif DISPATCH_ASSUME_NONNULL_BEGIN +DISPATCH_ASSUME_ABI_SINGLE_BEGIN #ifdef NSEC_PER_SEC #undef NSEC_PER_SEC @@ -130,6 +131,7 @@ dispatch_walltime(const struct timespec *_Nullable when, int64_t delta); __END_DECLS +DISPATCH_ASSUME_ABI_SINGLE_END DISPATCH_ASSUME_NONNULL_END #endif diff --git a/dispatch/workloop.h b/dispatch/workloop.h index 98c4f8a41..e792a8a53 100644 --- a/dispatch/workloop.h +++ b/dispatch/workloop.h @@ -27,6 +27,7 @@ #endif DISPATCH_ASSUME_NONNULL_BEGIN +DISPATCH_ASSUME_ABI_SINGLE_BEGIN __BEGIN_DECLS @@ -39,7 +40,9 @@ __BEGIN_DECLS * @discussion * A dispatch workloop is a flavor of dispatch_queue_t that is a priority * ordered queue (using the QOS class of the submitted workitems as the - * ordering). + * ordering). Dispatch workloops are an exclusion context and it is guaranteed + * that only one work item submitted to the dispatch workloop will be invoked at + * a time. * * Between each workitem invocation, the workloop will evaluate whether higher * priority workitems have since been submitted, either directly to the @@ -79,7 +82,7 @@ API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_workloop_t -dispatch_workloop_create(const char *_Nullable label); +dispatch_workloop_create(const char *DISPATCH_UNSAFE_INDEXABLE _Nullable label); /*! * @function dispatch_workloop_create_inactive @@ -105,7 +108,7 @@ API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW dispatch_workloop_t -dispatch_workloop_create_inactive(const char *_Nullable label); +dispatch_workloop_create_inactive(const char *DISPATCH_UNSAFE_INDEXABLE _Nullable label); /*! * @function dispatch_workloop_set_autorelease_frequency @@ -162,6 +165,7 @@ dispatch_workloop_set_os_workgroup(dispatch_workloop_t workloop, __END_DECLS +DISPATCH_ASSUME_ABI_SINGLE_END DISPATCH_ASSUME_NONNULL_END #endif diff --git a/libdispatch.xcodeproj/project.pbxproj b/libdispatch.xcodeproj/project.pbxproj index a4f706e60..b2c487ffb 100644 --- a/libdispatch.xcodeproj/project.pbxproj +++ b/libdispatch.xcodeproj/project.pbxproj @@ -90,7 +90,6 @@ buildPhases = ( ); dependencies = ( - 9B2A11A32032494E0060E7D4 /* PBXTargetDependency */, C927F36910FD7F1A00C5AB8B /* PBXTargetDependency */, ); name = libdispatch_tools; @@ -137,15 +136,6 @@ 6E7018211F4EB51B0077C1DC /* workloop_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E70181C1F4EB51B0077C1DC /* workloop_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; 6E7018221F4EB5220077C1DC /* workloop_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E70181C1F4EB51B0077C1DC /* workloop_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; 6E90269C1BB9BD50004DC3AD /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; settings = {ATTRIBUTES = (Server, ); }; }; - 6E9955581C3AF7710071D40C /* venture_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9955571C3AF7710071D40C /* venture_private.h */; }; - 6E99558A1C3AF7900071D40C /* venture_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9955571C3AF7710071D40C /* venture_private.h */; }; - 6E9955CF1C3B218E0071D40C /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; }; - 6E9956021C3B21990071D40C /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; }; - 6E9956041C3B219B0071D40C /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; }; - 6E9956051C3B219B0071D40C /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; }; - 6E9956071C3B21AA0071D40C /* venture_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9956061C3B21AA0071D40C /* venture_internal.h */; }; - 6E9956081C3B21B30071D40C /* venture_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9956061C3B21AA0071D40C /* venture_internal.h */; }; - 6E9956091C3B21B40071D40C /* venture_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9956061C3B21AA0071D40C /* venture_internal.h */; }; 6E9B6B5F1BB4F3C8009E324D /* firehose_buffer_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9B6B201BB4CC73009E324D /* firehose_buffer_internal.h */; }; 6E9C6CA720F9848100EA81C0 /* yield.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9C6CA220F9848000EA81C0 /* yield.c */; }; 6E9C6CA820F9848C00EA81C0 /* yield.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9C6CA220F9848000EA81C0 /* yield.c */; }; @@ -222,6 +212,7 @@ 96BC39BD0F3EBAB100C59689 /* queue_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 96BC39BC0F3EBAB100C59689 /* queue_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; 96C9553B0F3EAEDD000D2CA4 /* once.h in Headers */ = {isa = PBXBuildFile; fileRef = 96C9553A0F3EAEDD000D2CA4 /* once.h */; settings = {ATTRIBUTES = (Public, ); }; }; 96DF70BE0F38FE3C0074BD99 /* once.c in Sources */ = {isa = PBXBuildFile; fileRef = 96DF70BD0F38FE3C0074BD99 /* once.c */; }; + 9B1C2A4F278BE2D1007555A8 /* swift_concurrency_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B1C2A4A278BDF96007555A8 /* swift_concurrency_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; 9B2A588123A412B400A7BB27 /* eventlink.c in Sources */ = {isa = PBXBuildFile; fileRef = 9B2A588023A412B400A7BB27 /* eventlink.c */; }; 9B3713F623D24594001C5C88 /* clock.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B3713F123D24594001C5C88 /* clock.h */; }; 9B404D6C255A191A0014912B /* apply_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 9B404D6B255A191A0014912B /* apply_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; @@ -344,7 +335,6 @@ E43B88362241F19000215272 /* channel_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EC8DC261E3E84610044B652 /* channel_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; E43B88372241F19000215272 /* queue.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED8B0E8361E600161930 /* queue.h */; settings = {ATTRIBUTES = (Public, ); }; }; E43B88382241F19000215272 /* source.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED8D0E8361E600161930 /* source.h */; settings = {ATTRIBUTES = (Public, ); }; }; - E43B88392241F19000215272 /* venture_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9955571C3AF7710071D40C /* venture_private.h */; }; E43B883A2241F19000215272 /* voucher_activity_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E4B3C3FD18C50D000039F49F /* voucher_activity_private.h */; }; E43B883B2241F19000215272 /* semaphore.h in Headers */ = {isa = PBXBuildFile; fileRef = 721F5C5C0F15520500FF03A6 /* semaphore.h */; settings = {ATTRIBUTES = (Public, ); }; }; E43B883C2241F19000215272 /* group.h in Headers */ = {isa = PBXBuildFile; fileRef = FC5C9C1D0EADABE3006E462D /* group.h */; settings = {ATTRIBUTES = (Public, ); }; }; @@ -385,7 +375,6 @@ E43B885F2241F19000215272 /* layout_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 2BE17C6318EA305E002CA4E8 /* layout_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; E43B88602241F19000215272 /* perfmon.h in Headers */ = {isa = PBXBuildFile; fileRef = FC1832A2109923C7003403D5 /* perfmon.h */; }; E43B88612241F19000215272 /* config.h in Headers */ = {isa = PBXBuildFile; fileRef = FC9C70E7105EC9620074F9CA /* config.h */; }; - E43B88622241F19000215272 /* venture_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 6E9956061C3B21AA0071D40C /* venture_internal.h */; }; E43B88632241F19000215272 /* lock.h in Headers */ = {isa = PBXBuildFile; fileRef = 6EF2CAA41C88998A001ABE83 /* lock.h */; }; E43B88642241F19000215272 /* trace.h in Headers */ = {isa = PBXBuildFile; fileRef = E422A0D412A557B5005E5BDB /* trace.h */; }; E43B88652241F19000215272 /* getprogname.h in Headers */ = {isa = PBXBuildFile; fileRef = E4BA743913A8911B0095BDF1 /* getprogname.h */; }; @@ -429,7 +418,6 @@ E43B888C2241F19000215272 /* time.c in Sources */ = {isa = PBXBuildFile; fileRef = 96032E4A0F5CC8C700241C5F /* time.c */; }; E43B888D2241F19000215272 /* allocator.c in Sources */ = {isa = PBXBuildFile; fileRef = 2BBF5A62154B64F5002B20F9 /* allocator.c */; }; E43B888E2241F19000215272 /* benchmark.c in Sources */ = {isa = PBXBuildFile; fileRef = 965CD6340F3E806200D4E28D /* benchmark.c */; }; - E43B888F2241F19000215272 /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; }; E44757DA17F4572600B82CA1 /* inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44757D917F4572600B82CA1 /* inline_internal.h */; }; E44757DB17F4573500B82CA1 /* inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44757D917F4572600B82CA1 /* inline_internal.h */; }; E44757DC17F4573600B82CA1 /* inline_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = E44757D917F4572600B82CA1 /* inline_internal.h */; }; @@ -477,7 +465,6 @@ E48EC97E1835BADD00EAC4F1 /* yield.h in Headers */ = {isa = PBXBuildFile; fileRef = E48EC97B1835BADD00EAC4F1 /* yield.h */; }; E49BB6D11E70748100868613 /* provider.d in Sources */ = {isa = PBXBuildFile; fileRef = E43570B8126E93380097AB9F /* provider.d */; }; E49BB6D21E70748100868613 /* protocol.defs in Sources */ = {isa = PBXBuildFile; fileRef = FC7BED950E8361E600161930 /* protocol.defs */; settings = {ATTRIBUTES = (Client, Server, ); }; }; - E49BB6D31E70748100868613 /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; }; E49BB6D41E70748100868613 /* firehose.defs in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA9B1AE1B0BD00289540 /* firehose.defs */; }; E49BB6D51E70748100868613 /* firehose_buffer.c in Sources */ = {isa = PBXBuildFile; fileRef = 72DEAA971AE181D300289540 /* firehose_buffer.c */; }; E49BB6D61E70748100868613 /* event_kevent.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */; }; @@ -504,7 +491,6 @@ E49BB6EB1E70748100868613 /* allocator.c in Sources */ = {isa = PBXBuildFile; fileRef = 2BBF5A62154B64F5002B20F9 /* allocator.c */; }; E49BB6EC1E70748100868613 /* data.m in Sources */ = {isa = PBXBuildFile; fileRef = E420866F16027AE500EEE210 /* data.m */; }; E49BB6ED1E70748100868613 /* voucher.c in Sources */ = {isa = PBXBuildFile; fileRef = E44A8E6A1805C3E0009FFDB6 /* voucher.c */; }; - E49BB7091E70A39700868613 /* venture.c in Sources */ = {isa = PBXBuildFile; fileRef = 6E9955CE1C3B218E0071D40C /* venture.c */; }; E49F2423125D3C960057C971 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; E49F2499125D48D80057C971 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = E44EBE371251656400645D88 /* resolver.c */; }; E49F24AB125D57FA0057C971 /* dispatch.h in Headers */ = {isa = PBXBuildFile; fileRef = FC7BED960E8361E600161930 /* dispatch.h */; settings = {ATTRIBUTES = (Public, ); }; }; @@ -680,20 +666,6 @@ remoteGlobalIDString = 92F3FECA1BEC69E500025962; remoteInfo = darwintests; }; - 9B2A11A22032494E0060E7D4 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 9B6A42E01FE098430000D146 /* queue-tip.xcodeproj */; - proxyType = 1; - remoteGlobalIDString = 9BECABC71E944C0400ED341E; - remoteInfo = "queue-tip"; - }; - 9B2A11A92032494E0060E7D4 /* PBXContainerItemProxy */ = { - isa = PBXContainerItemProxy; - containerPortal = 9B6A42E01FE098430000D146 /* queue-tip.xcodeproj */; - proxyType = 2; - remoteGlobalIDString = 9BECABC81E944C0400ED341E; - remoteInfo = "queue-tip"; - }; 9BEBA57720127D4400E6FD0D /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; containerPortal = C927F35F10FD7F1000C5AB8B /* ddt.xcodeproj */; @@ -816,7 +788,6 @@ 6E49BF2920E34B44002624FC /* libdispatch.dirty */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.dirty; sourceTree = ""; }; 6E4BACBC1D48A41500B562AE /* mach.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = mach.c; sourceTree = ""; }; 6E4BACC91D48A89500B562AE /* mach_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mach_internal.h; sourceTree = ""; }; - 6E4FC9D11C84123600520351 /* os_venture_basic.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = os_venture_basic.c; sourceTree = ""; }; 6E5662DC1F8C2E3E00BC2474 /* workqueue_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = workqueue_internal.h; sourceTree = ""; }; 6E5662E41F8C2E5B00BC2474 /* workqueue.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = workqueue.c; sourceTree = ""; }; 6E5ACCB01D3C4CFB007DA2B4 /* event_kevent.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = event_kevent.c; sourceTree = ""; }; @@ -841,9 +812,6 @@ 6E8E4EC91C1A670B0004F5CC /* dispatch_vm.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_vm.c; sourceTree = ""; }; 6E8E4ECB1C1A72650004F5CC /* dispatch_drift.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_drift.c; sourceTree = ""; }; 6E9926711D01295F000CB89A /* dispatch_block.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_block.c; sourceTree = ""; }; - 6E9955571C3AF7710071D40C /* venture_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = venture_private.h; sourceTree = ""; }; - 6E9955CE1C3B218E0071D40C /* venture.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = venture.c; sourceTree = ""; }; - 6E9956061C3B21AA0071D40C /* venture_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = venture_internal.h; sourceTree = ""; }; 6E9B6B201BB4CC73009E324D /* firehose_buffer_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = firehose_buffer_internal.h; sourceTree = ""; }; 6E9C6CA220F9848000EA81C0 /* yield.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = yield.c; path = shims/yield.c; sourceTree = ""; }; 6EA283D01CAB93270041B2E0 /* libdispatch.codes */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libdispatch.codes; sourceTree = ""; }; @@ -877,6 +845,7 @@ 72DEAA9B1AE1B0BD00289540 /* firehose.defs */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.mig; path = firehose.defs; sourceTree = ""; }; 72DEAA9D1AE1BB7300289540 /* firehose_server_object.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = firehose_server_object.m; sourceTree = ""; }; 72EA3FBA1AF41EA400BBA227 /* firehose_server_private.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = firehose_server_private.h; sourceTree = ""; }; + 89EB85E327612F2500963B76 /* bounds_attributes_verifier.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = bounds_attributes_verifier.c; sourceTree = ""; }; 924D8EAA1C116B9F002AC2BC /* dispatch_c99.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_c99.c; sourceTree = ""; }; 92F3FE8F1BEC686300025962 /* dispatch_api.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = dispatch_api.c; sourceTree = ""; }; 92F3FE921BEC686300025962 /* Makefile */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.make; path = Makefile; sourceTree = ""; }; @@ -899,11 +868,11 @@ 96BC39BC0F3EBAB100C59689 /* queue_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = queue_private.h; sourceTree = ""; }; 96C9553A0F3EAEDD000D2CA4 /* once.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = once.h; sourceTree = ""; }; 96DF70BD0F38FE3C0074BD99 /* once.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = once.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; + 9B1C2A4A278BDF96007555A8 /* swift_concurrency_private.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = swift_concurrency_private.h; sourceTree = ""; }; 9B2A588023A412B400A7BB27 /* eventlink.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = eventlink.c; sourceTree = ""; }; 9B3713F123D24594001C5C88 /* clock.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = clock.h; sourceTree = ""; }; 9B38A012234C6D0400E6B90F /* workgroup_base.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = workgroup_base.h; sourceTree = ""; }; 9B404D6B255A191A0014912B /* apply_private.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = apply_private.h; sourceTree = ""; }; - 9B6A42E01FE098430000D146 /* queue-tip.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = "queue-tip.xcodeproj"; path = "tools/queue-tip/queue-tip.xcodeproj"; sourceTree = ""; }; 9B7D0906247EF7E600C1B0B7 /* workgroup_object_private.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = workgroup_object_private.h; sourceTree = ""; }; 9B81556E234AF0D200DB5CA3 /* workgroup.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = workgroup.h; sourceTree = ""; }; 9B815576234AFC9800DB5CA3 /* workgroup.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = workgroup.c; path = src/workgroup.c; sourceTree = SOURCE_ROOT; }; @@ -960,6 +929,7 @@ E44F9DA816543F79001DCD38 /* introspection_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = introspection_internal.h; sourceTree = ""; }; E454569214746F1B00106147 /* object_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = object_private.h; sourceTree = ""; }; E463024F1761603C00E11F4C /* atomic_sfb.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = atomic_sfb.h; sourceTree = ""; }; + E47BA11A27B35DC400FD39D4 /* os_workgroup_workload_config.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = os_workgroup_workload_config.plist; sourceTree = ""; }; E47D6BB5125F0F800070D91C /* resolved.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = resolved.h; sourceTree = ""; }; E482F1CD12DBAB590030614D /* postprocess-headers.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "postprocess-headers.sh"; sourceTree = ""; }; E48AF55916E70FD9004105FF /* io_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = io_private.h; sourceTree = ""; }; @@ -1055,7 +1025,6 @@ 92F3FEC91BEC687200025962 /* Darwin Tests */, C6A0FF2B0290797F04C91782 /* Documentation */, 1AB674ADFE9D54B511CA2CBB /* Products */, - 9B6A42E01FE098430000D146 /* queue-tip.xcodeproj */, C927F35F10FD7F1000C5AB8B /* ddt.xcodeproj */, 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */, ); @@ -1088,7 +1057,6 @@ 96A8AA860F41E7A400CD570B /* source.c */, 96032E4A0F5CC8C700241C5F /* time.c */, C9C5F80D143C1771006DC718 /* transform.c */, - 6E9955CE1C3B218E0071D40C /* venture.c */, E44A8E6A1805C3E0009FFDB6 /* voucher.c */, 9B815576234AFC9800DB5CA3 /* workgroup.c */, 6E9C6CA220F9848000EA81C0 /* yield.c */, @@ -1193,6 +1161,7 @@ 92F3FEC91BEC687200025962 /* Darwin Tests */ = { isa = PBXGroup; children = ( + 89EB85E327612F2500963B76 /* bounds_attributes_verifier.c */, 6E8E4EC51C1A5D450004F5CC /* cf_file_descriptor.c */, 6E8E4EC31C1A57760004F5CC /* dispatch_after.c */, 92F3FE8F1BEC686300025962 /* dispatch_api.c */, @@ -1254,12 +1223,12 @@ 6E67D9171C17BA7200FC98AC /* nsoperation.m */, E4C97F0C263868F800628947 /* os_eventlink_empty.c */, E4C97F06263868F800628947 /* os_eventlink.c */, - 6E4FC9D11C84123600520351 /* os_venture_basic.c */, E4C97F07263868F800628947 /* os_workgroup_basic.c */, E4C97F0D263868F800628947 /* os_workgroup_empty.c */, E4C97F09263868F800628947 /* os_workgroup_empty2.c */, E4C97F0A263868F800628947 /* os_workgroup_entitled.c */, E4C97F05263868F800628947 /* os_workgroup_multilang.c */, + E47BA11A27B35DC400FD39D4 /* os_workgroup_workload_config.plist */, B6AE9A561D7F53C100AC007F /* perf_async_bench.m */, B6AE9A581D7F53CB00AC007F /* perf_bench.m */, 6EC670C71E37E201004F10D6 /* perf_mach_async.c */, @@ -1273,14 +1242,6 @@ path = tests; sourceTree = ""; }; - 9B6A42E11FE098430000D146 /* Products */ = { - isa = PBXGroup; - children = ( - 9B2A11AA2032494E0060E7D4 /* queue-tip */, - ); - name = Products; - sourceTree = ""; - }; 9BCAF77023A8544100E4F685 /* OS Project Headers */ = { isa = PBXGroup; children = ( @@ -1394,7 +1355,6 @@ 9BCAF76A23A8540A00E4F685 /* eventlink_private.h */, 6EDF10831BBB487E007F14BF /* firehose_buffer_private.h */, 72EA3FBA1AF41EA400BBA227 /* firehose_server_private.h */, - 6E9955571C3AF7710071D40C /* venture_private.h */, 9BA656E5236BB56700D13FAE /* workgroup_interval_private.h */, 9B7D0906247EF7E600C1B0B7 /* workgroup_object_private.h */, 9BA656DF236BB55000D13FAE /* workgroup_private.h */, @@ -1462,6 +1422,7 @@ FCEF047F0F5661960067401F /* source_private.h */, B683588A1FA77F4900AA0D58 /* time_private.h */, 6E70181C1F4EB51B0077C1DC /* workloop_private.h */, + 9B1C2A4A278BDF96007555A8 /* swift_concurrency_private.h */, E4B515D7164B2DFB00E003AF /* introspection_private.h */, ); name = "Dispatch Private Headers"; @@ -1483,7 +1444,6 @@ 96929D830F3EA1020041FF5D /* shims.h */, FC0B34780FA2851C0080FFA0 /* source_internal.h */, E422A0D412A557B5005E5BDB /* trace.h */, - 6E9956061C3B21AA0071D40C /* venture_internal.h */, E44A8E7418066276009FFDB6 /* voucher_internal.h */, E44F9DA816543F79001DCD38 /* introspection_internal.h */, 6E5ACCAE1D3BF27F007DA2B4 /* event */, @@ -1532,10 +1492,10 @@ FC7BED9A0E8361E600161930 /* queue.h in Headers */, 9BE3E57B23CE6325006FE059 /* workgroup.h in Headers */, FC7BED9C0E8361E600161930 /* source.h in Headers */, - 6E9955581C3AF7710071D40C /* venture_private.h in Headers */, E4B3C3FE18C50D000039F49F /* voucher_activity_private.h in Headers */, 721F5C5D0F15520500FF03A6 /* semaphore.h in Headers */, FC5C9C1E0EADABE3006E462D /* group.h in Headers */, + 9B1C2A4F278BE2D1007555A8 /* swift_concurrency_private.h in Headers */, 9BCAF76F23A8540A00E4F685 /* eventlink_private.h in Headers */, 96C9553B0F3EAEDD000D2CA4 /* once.h in Headers */, 5AAB45C410D30CC7004407EA /* io.h in Headers */, @@ -1585,7 +1545,6 @@ 2BE17C6418EA305E002CA4E8 /* layout_private.h in Headers */, FC1832A6109923C7003403D5 /* perfmon.h in Headers */, FC9C70E8105EC9620074F9CA /* config.h in Headers */, - 6E9956071C3B21AA0071D40C /* venture_internal.h in Headers */, 6EF2CAA51C88998A001ABE83 /* lock.h in Headers */, E422A0D512A557B5005E5BDB /* trace.h in Headers */, E4BA743B13A8911B0095BDF1 /* getprogname.h in Headers */, @@ -1614,7 +1573,6 @@ E43B88362241F19000215272 /* channel_private.h in Headers */, E43B88372241F19000215272 /* queue.h in Headers */, E43B88382241F19000215272 /* source.h in Headers */, - E43B88392241F19000215272 /* venture_private.h in Headers */, E43B883A2241F19000215272 /* voucher_activity_private.h in Headers */, E43B883B2241F19000215272 /* semaphore.h in Headers */, 9BE5254A238747ED0041C2A0 /* workgroup_interval.h in Headers */, @@ -1667,7 +1625,6 @@ E43B885F2241F19000215272 /* layout_private.h in Headers */, E43B88602241F19000215272 /* perfmon.h in Headers */, E43B88612241F19000215272 /* config.h in Headers */, - E43B88622241F19000215272 /* venture_internal.h in Headers */, E43B88632241F19000215272 /* lock.h in Headers */, E43B88642241F19000215272 /* trace.h in Headers */, E43B88652241F19000215272 /* getprogname.h in Headers */, @@ -1708,7 +1665,6 @@ E44757DC17F4573600B82CA1 /* inline_internal.h in Headers */, E49F24AE125D57FA0057C971 /* queue.h in Headers */, E49F24AF125D57FA0057C971 /* source.h in Headers */, - 6E99558A1C3AF7900071D40C /* venture_private.h in Headers */, E4B3C3FF18C50D0E0039F49F /* voucher_activity_private.h in Headers */, E49F24B0125D57FA0057C971 /* semaphore.h in Headers */, E49F24B1125D57FA0057C971 /* group.h in Headers */, @@ -1752,7 +1708,6 @@ B68358901FA77F5B00AA0D58 /* time_private.h in Headers */, E49F24C6125D57FA0057C971 /* config.h in Headers */, E422A0D612A557B5005E5BDB /* trace.h in Headers */, - 6E9956091C3B21B40071D40C /* venture_internal.h in Headers */, E4053A6926EAF2A800362F72 /* priority.h in Headers */, 6EF2CAB41C889D65001ABE83 /* lock.h in Headers */, E4BA743C13A8911B0095BDF1 /* getprogname.h in Headers */, @@ -1793,7 +1748,6 @@ E4053A6726EAF2A000362F72 /* time.h in Headers */, E44F9DB01654402B001DCD38 /* data_internal.h in Headers */, 6E5ACCBC1D3C4D0F007DA2B4 /* event_internal.h in Headers */, - 6E9956081C3B21B30071D40C /* venture_internal.h in Headers */, E4053A6826EAF2A700362F72 /* priority.h in Headers */, E44F9DB11654402E001DCD38 /* io_internal.h in Headers */, E4630251176162D200E11F4C /* atomic_sfb.h in Headers */, @@ -2083,10 +2037,6 @@ ProductGroup = 4552536F19B1384900B88766 /* Products */; ProjectRef = 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */; }, - { - ProductGroup = 9B6A42E11FE098430000D146 /* Products */; - ProjectRef = 9B6A42E01FE098430000D146 /* queue-tip.xcodeproj */; - }, ); projectRoot = ""; targets = ( @@ -2141,13 +2091,6 @@ remoteRef = 4552540819B1384900B88766 /* PBXContainerItemProxy */; sourceTree = BUILT_PRODUCTS_DIR; }; - 9B2A11AA2032494E0060E7D4 /* queue-tip */ = { - isa = PBXReferenceProxy; - fileType = "compiled.mach-o.executable"; - path = "queue-tip"; - remoteRef = 9B2A11A92032494E0060E7D4 /* PBXContainerItemProxy */; - sourceTree = BUILT_PRODUCTS_DIR; - }; C927F36710FD7F1000C5AB8B /* ddt */ = { isa = PBXReferenceProxy; fileType = "compiled.mach-o.executable"; @@ -2265,7 +2208,6 @@ "$(SRCROOT)/os/workgroup_parallel.h", "$(SRCROOT)/os/clock.h", "$(SRCROOT)/os/object_private.h", - "$(SRCROOT)/os/venture_private.h", "$(SRCROOT)/os/voucher_private.h", "$(SRCROOT)/os/voucher_activity_private.h", "$(SRCROOT)/os/workgroup_private.h", @@ -2373,7 +2315,6 @@ "$(SRCROOT)/os/workgroup_parallel.h", "$(SRCROOT)/os/clock.h", "$(SRCROOT)/os/object_private.h", - "$(SRCROOT)/os/venture_private.h", "$(SRCROOT)/os/voucher_private.h", "$(SRCROOT)/os/voucher_activity_private.h", "$(SRCROOT)/os/workgroup_private.h", @@ -2404,7 +2345,6 @@ "$(SRCROOT)/os/workgroup_parallel.h", "$(SRCROOT)/os/clock.h", "$(SRCROOT)/os/object_private.h", - "$(SRCROOT)/os/venture_private.h", "$(SRCROOT)/os/voucher_private.h", "$(SRCROOT)/os/voucher_activity_private.h", "$(SRCROOT)/os/workgroup_interval_private.h", @@ -2537,7 +2477,6 @@ C01866B51C5973210040FC07 /* time.c in Sources */, C01866B91C5973210040FC07 /* allocator.c in Sources */, C01866B31C5973210040FC07 /* benchmark.c in Sources */, - E49BB7091E70A39700868613 /* venture.c in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -2576,7 +2515,6 @@ 96032E4B0F5CC8C700241C5F /* time.c in Sources */, 2BBF5A63154B64F5002B20F9 /* allocator.c in Sources */, 965CD6350F3E806200D4E28D /* benchmark.c in Sources */, - 6E9955CF1C3B218E0071D40C /* venture.c in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -2615,7 +2553,6 @@ E43B888C2241F19000215272 /* time.c in Sources */, E43B888D2241F19000215272 /* allocator.c in Sources */, E43B888E2241F19000215272 /* benchmark.c in Sources */, - E43B888F2241F19000215272 /* venture.c in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -2654,7 +2591,6 @@ E49BB6E41E70748100868613 /* time.c in Sources */, E49BB6EB1E70748100868613 /* allocator.c in Sources */, E49BB6E11E70748100868613 /* benchmark.c in Sources */, - E49BB6D31E70748100868613 /* venture.c in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -2693,7 +2629,6 @@ E49F24D2125D57FA0057C971 /* time.c in Sources */, 2BBF5A64154B64F5002B20F9 /* allocator.c in Sources */, E49F24D0125D57FA0057C971 /* benchmark.c in Sources */, - 6E9956051C3B219B0071D40C /* venture.c in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -2732,7 +2667,6 @@ E4B515C8164B2DA300E003AF /* time.c in Sources */, E4B515CD164B2DA300E003AF /* allocator.c in Sources */, E4B515C6164B2DA300E003AF /* benchmark.c in Sources */, - 6E9956021C3B21990071D40C /* venture.c in Sources */, E4B515DD164B32E000E003AF /* introspection.c in Sources */, ); runOnlyForDeploymentPostprocessing = 0; @@ -2772,7 +2706,6 @@ E4EC122112514715000DDBD1 /* time.c in Sources */, 2BBF5A66154B64F5002B20F9 /* allocator.c in Sources */, E4EC121F12514715000DDBD1 /* benchmark.c in Sources */, - 6E9956041C3B219B0071D40C /* venture.c in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -2809,11 +2742,6 @@ target = 92F3FECA1BEC69E500025962 /* darwintests */; targetProxy = 92F3FECE1BEC6F1000025962 /* PBXContainerItemProxy */; }; - 9B2A11A32032494E0060E7D4 /* PBXTargetDependency */ = { - isa = PBXTargetDependency; - name = "queue-tip"; - targetProxy = 9B2A11A22032494E0060E7D4 /* PBXContainerItemProxy */; - }; 9BEBA57820127D4400E6FD0D /* PBXTargetDependency */ = { isa = PBXTargetDependency; name = ddt; diff --git a/man/dispatch_read.3 b/man/dispatch_read.3 index 42e915f54..20f8cc36b 100644 --- a/man/dispatch_read.3 +++ b/man/dispatch_read.3 @@ -62,7 +62,7 @@ If an error occurred while reading from the file descriptor, the parameter to the block will be set to the appropriate POSIX error code and .Va data will contain any data that could be read successfully. -If the file pointer position is at end-of-file, emtpy +If the file pointer position is at end-of-file, empty .Va data and zero .Va error diff --git a/os/object.h b/os/object.h index e2ce3f467..5f2868d1b 100644 --- a/os/object.h +++ b/os/object.h @@ -55,6 +55,9 @@ * or iOS 6.0 deployment target. */ +#define OS_OBJECT_ASSUME_ABI_SINGLE_BEGIN OS_ASSUME_PTR_ABI_SINGLE_BEGIN +#define OS_OBJECT_ASSUME_ABI_SINGLE_END OS_ASSUME_PTR_ABI_SINGLE_END + #ifndef OS_OBJECT_HAVE_OBJC_SUPPORT #if !defined(__OBJC__) || defined(__OBJC_GC__) # define OS_OBJECT_HAVE_OBJC_SUPPORT 0 @@ -250,6 +253,7 @@ OS_OBJECT_DECL_BASE(object, NSObject); #define OS_OBJECT_GLOBAL_OBJECT(type, object) ((OS_OBJECT_BRIDGE type)&(object)) __BEGIN_DECLS +OS_OBJECT_ASSUME_ABI_SINGLE_BEGIN /*! * @function os_retain @@ -298,6 +302,7 @@ os_release(void *object); #define os_release(object) [object release] #endif +OS_OBJECT_ASSUME_ABI_SINGLE_END __END_DECLS #endif diff --git a/os/voucher_activity_private.h b/os/voucher_activity_private.h index 706ae75f1..bd17efc1e 100644 --- a/os/voucher_activity_private.h +++ b/os/voucher_activity_private.h @@ -24,7 +24,10 @@ #if OS_VOUCHER_ACTIVITY_SPI #if __has_include() #include -#include +#ifndef __DISPATCH_BUILDING_DISPATCH__ +#include +#endif /* !__DISPATCH_BUILDING_DISPATCH__ */ +#include #endif #if __APPLE__ #include @@ -32,7 +35,7 @@ #endif #include #include -#include "voucher_private.h" +#include #define OS_VOUCHER_ACTIVITY_SPI_VERSION 20161003 diff --git a/os/voucher_private.h b/os/voucher_private.h index 34b4f250b..1a2383a6e 100644 --- a/os/voucher_private.h +++ b/os/voucher_private.h @@ -697,7 +697,3 @@ __END_DECLS DISPATCH_ASSUME_NONNULL_END #endif // __OS_VOUCHER_PRIVATE__ - -#if OS_VOUCHER_ACTIVITY_SPI -#include "voucher_activity_private.h" -#endif diff --git a/os/workgroup_base.h b/os/workgroup_base.h index 3983f002a..c9b06590e 100644 --- a/os/workgroup_base.h +++ b/os/workgroup_base.h @@ -29,6 +29,9 @@ #define OS_WORKGROUP_WARN_RESULT __attribute__((__warn_unused_result__)) #define OS_WORKGROUP_EXPORT OS_EXPORT #define OS_WORKGROUP_RETURNS_RETAINED OS_OBJECT_RETURNS_RETAINED +#define OS_WORKGROUP_ASSUME_ABI_SINGLE_BEGIN OS_ASSUME_PTR_ABI_SINGLE_BEGIN +#define OS_WORKGROUP_ASSUME_ABI_SINGLE_END OS_ASSUME_PTR_ABI_SINGLE_END +#define OS_WORKGROUP_UNSAFE_INDEXABLE OS_UNSAFE_INDEXABLE #define OS_WORKGROUP_DECL(name, swift_name) \ OS_SWIFT_NAME(swift_name) \ @@ -59,6 +62,8 @@ #define _OS_WORKGROUP_ATTR_SIG_DEFAULT_INIT 0x2FA863B4 #define _OS_WORKGROUP_ATTR_SIG_EMPTY_INIT 0x2FA863C4 +OS_WORKGROUP_ASSUME_ABI_SINGLE_BEGIN + struct OS_REFINED_FOR_SWIFT os_workgroup_attr_opaque_s { uint32_t sig; char opaque[__OS_WORKGROUP_ATTR_SIZE__]; @@ -75,4 +80,6 @@ struct OS_REFINED_FOR_SWIFT os_workgroup_join_token_opaque_s { char opaque[__OS_WORKGROUP_JOIN_TOKEN_SIZE__]; }; +OS_WORKGROUP_ASSUME_ABI_SINGLE_END + #endif /* __OS_WORKGROUP_BASE__ */ diff --git a/os/workgroup_interval.h b/os/workgroup_interval.h index b056f82cf..0edc12928 100644 --- a/os/workgroup_interval.h +++ b/os/workgroup_interval.h @@ -29,6 +29,7 @@ __BEGIN_DECLS OS_WORKGROUP_ASSUME_NONNULL_BEGIN +OS_WORKGROUP_ASSUME_ABI_SINGLE_BEGIN /*! * @typedef os_workgroup_interval_t @@ -157,6 +158,7 @@ int os_workgroup_interval_finish(os_workgroup_interval_t wg, os_workgroup_interval_data_t _Nullable data); +OS_WORKGROUP_ASSUME_ABI_SINGLE_END OS_WORKGROUP_ASSUME_NONNULL_END __END_DECLS diff --git a/os/workgroup_interval_private.h b/os/workgroup_interval_private.h index 48ddc7301..2c08ce511 100644 --- a/os/workgroup_interval_private.h +++ b/os/workgroup_interval_private.h @@ -40,6 +40,7 @@ OS_ENUM(os_workgroup_interval_type, uint16_t, OS_WORKGROUP_INTERVAL_TYPE_COREMEDIA, OS_WORKGROUP_INTERVAL_TYPE_ARKIT, + OS_WORKGROUP_INTERVAL_TYPE_FRAME_COMPOSITOR, ); /* @@ -67,6 +68,70 @@ int os_workgroup_attr_set_interval_type(os_workgroup_attr_t attr, os_workgroup_interval_type_t type); +/* + * @typedef os_workgroup_interval_data_flags_t + * + * @abstract + * Set of flags that can be passed to os_workgroup_interval_data_set_flags() to + * configure calls to os_workgroup_interval_start(), + * os_workgroup_interval_update() and os_workgroup_interval_finish() to + * indicate to the system that a specific instance of a repeatable workload + * has one of the following properties: + * + * OS_WORKGROUP_INTERVAL_DATA_COMPLEXITY_DEFAULT: + * specific instance has default complexity (same as using data initialized + * with OS_WORKGROUP_INTERVAL_DATA_INITIALIZER resp not calling + * os_workgroup_interval_data_set_flags(), or specifying NULL + * os_workgroup_interval_data_t). + * OS_WORKGROUP_INTERVAL_DATA_COMPLEXITY_HIGH: + * specific instance has high complexity. May only be called on an + * os_workgroup_interval_t created with a workload identifier that is known + * and is configured by the system to be allowed to use complexity. + */ +OS_OPTIONS(os_workgroup_interval_data_flags, uint32_t, + OS_WORKGROUP_INTERVAL_DATA_COMPLEXITY_DEFAULT = 0x0u, + OS_WORKGROUP_INTERVAL_DATA_COMPLEXITY_HIGH = 0x1u, +); + +/* + * @function os_workgroup_interval_data_set_flags + * + * @abstract + * Setter for os_workgroup_interval_data_t, can specify an ORd combination of + * flags from os_workgroup_interval_data_flags_t to indicate to the system that + * a specific instance of a repeatable workload has custom properties by + * passing the resulting data pointer to os_workgroup_interval_start(), + * os_workgroup_interval_update() and os_workgroup_interval_finish(). + * + * The flags chosen for a given instance of the repeatable workload are allowed + * to be different at each of these three calls made for the instance, and they + * are determined wholly by the `data` value passed to the specific call. + * + * @discussion + * In particular this means that once a non-default flag is set with this + * function, the resulting data pointer must be passed to every subsequent + * call of update() or finish() for that instance if the goal is to keep that + * flag present (as opposed to e.g. being reset to the default by passing a + * NULL data pointer). + * + * @param data + * Pointer to workgroup interval data structure initialized with + * OS_WORKGROUP_INTERVAL_DATA_INITIALIZER. + * + * @param flags + * ORd combination of flags from os_workgroup_interval_data_flags_t to set in + * the specified interval data. + * + * @result + * EINVAL is returned if the interval data passed in hasn't been initialized, + * or if unknown or an invalid combination of flag values are passed. + */ +API_AVAILABLE(macos(13.0), ios(16.0), tvos(16.0), watchos(9.0)) +OS_WORKGROUP_EXPORT +int +os_workgroup_interval_data_set_flags(os_workgroup_interval_data_t data, + os_workgroup_interval_data_flags_t flags); + /* * @function os_workgroup_interval_create * @@ -179,7 +244,7 @@ os_workgroup_interval_create_with_workload_id(const char * _Nullable name, */ API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) OS_WORKGROUP_EXPORT OS_WORKGROUP_RETURNS_RETAINED -os_workgroup_t +os_workgroup_t _Nullable os_workgroup_interval_copy_current_4AudioToolbox(void); OS_WORKGROUP_ASSUME_NONNULL_END diff --git a/os/workgroup_object.h b/os/workgroup_object.h index 5c8bd4f1a..73ebd69fb 100644 --- a/os/workgroup_object.h +++ b/os/workgroup_object.h @@ -29,6 +29,7 @@ __BEGIN_DECLS OS_WORKGROUP_ASSUME_NONNULL_BEGIN +OS_WORKGROUP_ASSUME_ABI_SINGLE_BEGIN /*! * @typedef os_workgroup_t @@ -123,7 +124,7 @@ API_AVAILABLE(macos(10.16)) SPI_AVAILABLE(ios(14.0), tvos(14.0), watchos(7.0)) OS_SWIFT_NAME(WorkGroup.init(__name:port:)) OS_WORKGROUP_EXPORT OS_WORKGROUP_RETURNS_RETAINED os_workgroup_t _Nullable -os_workgroup_create_with_port(const char *_Nullable name, mach_port_t mach_port); +os_workgroup_create_with_port(const char *OS_WORKGROUP_UNSAFE_INDEXABLE _Nullable name, mach_port_t mach_port); /*! * @function os_workgroup_create_with_workgroup @@ -144,7 +145,7 @@ os_workgroup_create_with_port(const char *_Nullable name, mach_port_t mach_port) API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) OS_REFINED_FOR_SWIFT OS_WORKGROUP_EXPORT OS_WORKGROUP_RETURNS_RETAINED os_workgroup_t _Nullable -os_workgroup_create_with_workgroup(const char * _Nullable name, os_workgroup_t wg); +os_workgroup_create_with_workgroup(const char * OS_WORKGROUP_UNSAFE_INDEXABLE _Nullable name, os_workgroup_t wg); /*! * @typedef os_workgroup_join_token, os_workgroup_join_token_t @@ -364,6 +365,7 @@ int os_workgroup_max_parallel_threads(os_workgroup_t wg, os_workgroup_mpt_attr_t _Nullable attr); +OS_WORKGROUP_ASSUME_ABI_SINGLE_END OS_WORKGROUP_ASSUME_NONNULL_END __END_DECLS diff --git a/os/workgroup_parallel.h b/os/workgroup_parallel.h index 2aca7f861..0a745a27e 100644 --- a/os/workgroup_parallel.h +++ b/os/workgroup_parallel.h @@ -31,6 +31,7 @@ __BEGIN_DECLS OS_WORKGROUP_ASSUME_NONNULL_BEGIN +OS_WORKGROUP_ASSUME_ABI_SINGLE_BEGIN /*! * @typedef os_workgroup_parallel_t @@ -68,9 +69,10 @@ API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) OS_WORKGROUP_EXPORT OS_WORKGROUP_RETURNS_RETAINED OS_SWIFT_NAME(WorkGroupParallel.init(__name:attr:)) os_workgroup_parallel_t _Nullable -os_workgroup_parallel_create(const char * _Nullable name, +os_workgroup_parallel_create(const char *OS_WORKGROUP_UNSAFE_INDEXABLE _Nullable name, os_workgroup_attr_t _Nullable attr); +OS_WORKGROUP_ASSUME_ABI_SINGLE_END OS_WORKGROUP_ASSUME_NONNULL_END __END_DECLS diff --git a/private/mach_private.h b/private/mach_private.h index bed88c0bd..05c42ed5e 100644 --- a/private/mach_private.h +++ b/private/mach_private.h @@ -193,7 +193,7 @@ DISPATCH_DECL(dispatch_mach_msg); * @const DISPATCH_MACH_MSG_DESTRUCTOR_FREE * Message buffer will be deallocated with free(3). * - * @const DISPATCH_MACH_MSG_DESTRUCTOR_FREE + * @const DISPATCH_MACH_MSG_DESTRUCTOR_VM_DEALLOCATE * Message buffer will be deallocated with vm_deallocate. */ DISPATCH_ENUM(dispatch_mach_msg_destructor, unsigned int, diff --git a/private/source_private.h b/private/source_private.h index fab9b9854..fb0c28da2 100644 --- a/private/source_private.h +++ b/private/source_private.h @@ -166,6 +166,9 @@ __END_DECLS * * @constant DISPATCH_SOCK_NOTIFY_ACK * Notify acknowledgement + * + * @constant DISPATCH_SOCK_WAKE_PKT + * Notify reception of wake packet */ enum { DISPATCH_SOCK_CONNRESET = 0x00000001, @@ -183,6 +186,7 @@ enum { DISPATCH_SOCK_DISCONNECTED = 0x00001000, DISPATCH_SOCK_CONNINFO_UPDATED = 0x00002000, DISPATCH_SOCK_NOTIFY_ACK = 0x00004000, + DISPATCH_SOCK_WAKE_PKT DISPATCH_ENUM_API_AVAILABLE(macos(13.0), ios(16.0), tvos(16.0), watchos(9.0))= 0x00008000, }; /*! diff --git a/private/swift_concurrency_private.h b/private/swift_concurrency_private.h new file mode 100644 index 000000000..1ceb15a94 --- /dev/null +++ b/private/swift_concurrency_private.h @@ -0,0 +1,207 @@ +/* + * Copyright (c) 2022 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __DISPATCH_SWIFT_CONCURRENCY_PRIVATE__ +#define __DISPATCH_SWIFT_CONCURRENCY_PRIVATE__ + +#include + +#ifndef __BEGIN_DECLS +#if defined(__cplusplus) +#define __BEGIN_DECLS extern "C" { +#define __END_DECLS } +#else +#define __BEGIN_DECLS +#define __END_DECLS +#endif +#endif + +#include +#include + +/* + * IMPORTANT: This header file describes PRIVATE interfaces between libdispatch + * and the swift concurrency runtime which are subject to change in future + * releases of Mac OS X. No other client can use or rely on these interfaces. + */ + +/* + * The encoding of a dispatch_tid_t and a dispatch_lock_t for Apple platforms + * only. + * + * This is the PRIVATE representation of a lock and thread that is understood by + * just dispatch, the kernel and the Swift concurrency runtime and is used to + * track a thread that is executing a swift job - be it a task or an actor. + * + * THIS SHOULD NOT BE USED BY ANYONE THAT IS NOT THE SWIFT CONCURRENCY RUNTIME. + */ +DISPATCH_ASSUME_NONNULL_BEGIN +__BEGIN_DECLS + +typedef mach_port_t dispatch_tid_t; +typedef uint32_t dispatch_lock_t; + +#if !defined(__DISPATCH_BUILDING_DISPATCH__) +#define DLOCK_OWNER_MASK ((dispatch_lock_t)0xfffffffc) +// These bottom two bits are free and can be used by the Concurrency runtime for +// tracking other information +#define DLOCK_FREE_BITS_MASK ((dispatch_lock_t)0x00000003) + +#define DLOCK_OWNER_NULL ((dispatch_tid_t)MACH_PORT_NULL) +#define dispatch_tid_self() ((dispatch_tid_t)_pthread_mach_thread_self_direct()) + +DISPATCH_ALWAYS_INLINE +static inline dispatch_tid_t +dispatch_lock_owner(dispatch_lock_t lock_value) +{ + if (lock_value & DLOCK_OWNER_MASK) { + return lock_value | DLOCK_FREE_BITS_MASK; + } + return DLOCK_OWNER_NULL; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_lock_t +dispatch_lock_value_from_tid(dispatch_tid_t tid) +{ + return tid & DLOCK_OWNER_MASK; +} + +DISPATCH_ALWAYS_INLINE +static inline dispatch_lock_t +dispatch_lock_value_for_self(void) +{ + return dispatch_lock_value_from_tid(dispatch_tid_self()); +} + +DISPATCH_ALWAYS_INLINE +static inline bool +dispatch_lock_is_locked(dispatch_lock_t lock_value) +{ + // equivalent to dispatch_lock_owner(lock_value) == 0 + return (lock_value & DLOCK_OWNER_MASK) != 0; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +dispatch_lock_is_locked_by(dispatch_lock_t lock_value, dispatch_tid_t tid) +{ + // equivalent to dispatch_lock_owner(lock_value) == tid + return ((lock_value ^ tid) & DLOCK_OWNER_MASK) == 0; +} + +DISPATCH_ALWAYS_INLINE +static inline bool +dispatch_lock_is_locked_by_self(dispatch_lock_t lock_value) +{ + // equivalent to dispatch_lock_owner(lock_value) == self_tid + return ((lock_value ^ dispatch_tid_self()) & DLOCK_OWNER_MASK) == 0; +} +#else +// This file is exporting the encoding of dispatch_lock and a subset of the +// functions vended by shims/lock.h for the use of Swift concurrency. We will be +// using the internal functions for dispatch's own internal usage. +#endif + + +/*! + * @typedef dispatch_thread_override_info_s + * + * @abstract + * Structure for returning information about the thread's current override + * status + */ +typedef struct dispatch_thread_override_info { + uint32_t can_override:1, + unused:31; + qos_class_t override_qos_floor; +} dispatch_thread_override_info_s; + +/*! + * @function dispatch_thread_get_current_override_qos_floor + * + * @abstract + * Returns information about whether the current thread can be overridden and if + * so, what it's current override floor is. + */ +SPI_AVAILABLE(macos(13.0), ios(16.0)) +DISPATCH_EXPORT +dispatch_thread_override_info_s +dispatch_thread_get_current_override_qos_floor(void); + +/*! + * @function dispatch_thread_override_self + * + * @abstract + * If the current thread can be overridden, this function applies the input QoS + * as an override to itself. + * + * @param override_qos + * The override to apply to the current thread + */ +SPI_AVAILABLE(macos(13.0), ios(16.0)) +DISPATCH_EXPORT +int +dispatch_thread_override_self(qos_class_t override_qos); + +/*! + * @function dispatch_lock_override_start_with_debounce + * + * @abstract + * This function applies a dispatch workqueue override of the specified QoS on + * the thread if the lock owner tracked in the swift object's atomic state + * matches the expected thread. Otherwise, it returns -1 with an errno set. + * + * @param lock_addr + * The address to the lock used to synchronize the swift object. This lock + * should follow the encoding of the dispatch_lock mentioned above. + * + * @param expected_thread + * The thread which is expected to be the owner of the swift object. If this + * does not match the thread which is tracked in (*lock_addr) + * + * @param override_to_apply + * The QoS override to apply on the thread + */ +SPI_AVAILABLE(macos(13.0), ios(16.0)) +DISPATCH_EXPORT DISPATCH_NONNULL1 +int +dispatch_lock_override_start_with_debounce(dispatch_lock_t *lock_addr, + dispatch_tid_t expected_thread, qos_class_t override_to_apply); + +/*! + * @function dispatch_lock_override_end + * + * @abstract + * This function asynchronously removes the qos override that has been applied + * on the calling thread from a previous call to + * dispatch_lock_override_start_with_debounce. + * + * @param override_to_end + * The QoS override on the current thread. + */ +SPI_AVAILABLE(macos(13.0), ios(16.0)) +DISPATCH_EXPORT +int +dispatch_lock_override_end(qos_class_t override_to_end); + +__END_DECLS +DISPATCH_ASSUME_NONNULL_END +#endif diff --git a/src/allocator_internal.h b/src/allocator_internal.h index 2b5a6061b..62cb7a1ae 100644 --- a/src/allocator_internal.h +++ b/src/allocator_internal.h @@ -72,7 +72,7 @@ #define MAGAZINES_PER_HEAP (NUM_CPU) // Do you care about compaction or performance? -#if TARGET_OS_IPHONE +#if TARGET_OS_IPHONE || DISPATCH_TARGET_DK_EMBEDDED #define PACK_FIRST_PAGE_WITH_CONTINUATIONS 1 #else #define PACK_FIRST_PAGE_WITH_CONTINUATIONS 0 @@ -88,7 +88,7 @@ #define DISPATCH_ALLOCATOR_PAGE_MASK PAGE_MAX_MASK -#if TARGET_OS_IPHONE +#if TARGET_OS_IPHONE || DISPATCH_TARGET_DK_EMBEDDED #define PAGES_PER_MAGAZINE 64 #else #define PAGES_PER_MAGAZINE 512 diff --git a/src/event/event_config.h b/src/event/event_config.h index 0b883b035..df9733f5d 100644 --- a/src/event/event_config.h +++ b/src/event/event_config.h @@ -189,7 +189,7 @@ # define EV_VANISHED 0x0000 #endif -#define DISPATCH_EV_MSG_NEEDS_FREE 0x10000 // mach message needs to be freed() +#define DISPATCH_EV_MSG_NEEDS_FREE 0x10000 // mach message and aux needs to be freed() #define DISPATCH_EVFILT_TIMER (-EVFILT_SYSCOUNT - 1) #define DISPATCH_EVFILT_TIMER_WITH_CLOCK (-EVFILT_SYSCOUNT - 2) diff --git a/src/event/event_internal.h b/src/event/event_internal.h index 305cf931e..13b186c4d 100644 --- a/src/event/event_internal.h +++ b/src/event/event_internal.h @@ -323,7 +323,7 @@ typedef union dispatch_unote_u { #define DISPATCH_UNOTE_NULL ((dispatch_unote_t){ ._du = NULL }) -#if TARGET_OS_IPHONE +#if TARGET_OS_IPHONE || DISPATCH_TARGET_DK_EMBEDDED #define DSL_HASH_SIZE 64u // must be a power of two #else #define DSL_HASH_SIZE 256u // must be a power of two @@ -369,7 +369,7 @@ typedef struct dispatch_source_type_s { pthread_priority_t pp); #if HAVE_MACH void (*dst_merge_msg)(dispatch_unote_t du, uint32_t flags, - mach_msg_header_t *msg, mach_msg_size_t sz, + mach_msg_header_t *msg, mach_msg_size_t sz, mach_msg_aux_header_t *aux, pthread_priority_t msg_pp, pthread_priority_t override_pp); #endif } dispatch_source_type_s; diff --git a/src/event/event_kevent.c b/src/event/event_kevent.c index 4b990a8f4..1b5dd0587 100644 --- a/src/event/event_kevent.c +++ b/src/event/event_kevent.c @@ -320,15 +320,24 @@ _dispatch_kevent_mach_msg_size(dispatch_kevent_t ke) { // buffer size in the successful receive case, but message size (like // msgh_size) in the MACH_RCV_TOO_LARGE case, i.e. add trailer size. + + /* reserve top 32 bits for future */ return (mach_msg_size_t)ke->ext[1]; } +static inline mach_msg_size_t +_dispatch_kevent_mach_msg_aux_size(dispatch_kevent_t ke) +{ + /* reserve top 32 bits for future */ + return (mach_msg_size_t)ke->ext[3]; +} + static inline bool _dispatch_kevent_has_machmsg_rcv_error(dispatch_kevent_t ke) { -#define MACH_ERROR_RCV_SUB 0x4 +#define MACH_ERROR_RCV_SUB 0x1 mach_error_t kr = (mach_error_t) ke->fflags; - return (err_get_system(kr) == err_mach_ipc) && + return ((kr & system_emask) == err_mach_ipc) && (err_get_sub(kr) == MACH_ERROR_RCV_SUB); #undef MACH_ERROR_RCV_SUB } @@ -518,7 +527,7 @@ _dispatch_kevent_merge(dispatch_unote_t du, dispatch_kevent_t ke) _dispatch_kevent_merge_ev_flags(du, ke->flags); #if DISPATCH_USE_KEVENT_QOS - pp = ((pthread_priority_t)ke->qos) & ~_PTHREAD_PRIORITY_FLAGS_MASK; + pp = _pthread_priority_strip_all_flags((pthread_priority_t)ke->qos); #endif return dux_merge_evt(du._du, ke->flags, data, pp); } @@ -1407,7 +1416,7 @@ _dispatch_kevent_workloop_priority(dispatch_queue_t dq, int which, DISPATCH_INTERNAL_CRASH(rq_pri, "Waking up a kq with cooperative thread request is not supported"); } - return pp | (rq_pri & DISPATCH_PRIORITY_FLAG_OVERCOMMIT); + return _pthread_priority_modify_flags(pp, 0, rq_pri & DISPATCH_PRIORITY_FLAG_OVERCOMMIT); } DISPATCH_ALWAYS_INLINE_NDEBUG @@ -2492,6 +2501,9 @@ const dispatch_source_type_s _dispatch_source_type_sock = { #endif #ifdef NOTE_NOTIFY_ACK |NOTE_NOTIFY_ACK +#endif +#ifdef NOTE_WAKE_PKT + |NOTE_WAKE_PKT #endif , .dst_action = DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS, @@ -2731,6 +2743,7 @@ DISPATCH_NOINLINE static void _dispatch_mach_notification_merge_msg(dispatch_unote_t du, uint32_t flags, mach_msg_header_t *hdr, mach_msg_size_t msgsz DISPATCH_UNUSED, + mach_msg_aux_header_t *aux_hdr DISPATCH_UNUSED, pthread_priority_t msg_pp DISPATCH_UNUSED, pthread_priority_t ovr_pp DISPATCH_UNUSED) { @@ -3124,8 +3137,8 @@ const dispatch_source_type_s _dispatch_mach_type_send = { static void _dispatch_kevent_mach_msg_recv(dispatch_unote_t du, uint32_t flags, - mach_msg_header_t *hdr, pthread_priority_t msg_pp, - pthread_priority_t ovr_pp) + mach_msg_header_t *hdr, mach_msg_aux_header_t *aux_hdr, /* Nullable */ + pthread_priority_t msg_pp, pthread_priority_t ovr_pp) { mach_port_t name = hdr->msgh_local_port; mach_msg_size_t siz; @@ -3142,7 +3155,7 @@ _dispatch_kevent_mach_msg_recv(dispatch_unote_t du, uint32_t flags, // consumed by dux_merge_evt() _dispatch_retain_unote_owner(du); _dispatch_kevent_merge_ev_flags(du, flags); - return dux_merge_msg(du._du, flags, hdr, siz, msg_pp, ovr_pp); + return dux_merge_msg(du._du, flags, hdr, siz, aux_hdr, msg_pp, ovr_pp); } DISPATCH_NOINLINE @@ -3151,6 +3164,8 @@ _dispatch_kevent_mach_msg_drain(dispatch_kevent_t ke) { mach_msg_header_t *hdr = _dispatch_kevent_mach_msg_buf(ke); mach_msg_size_t siz = _dispatch_kevent_mach_msg_size(ke); + mach_msg_aux_header_t *aux_hdr = NULL; + mach_msg_size_t aux_siz = _dispatch_kevent_mach_msg_aux_size(ke); dispatch_unote_t du = _dispatch_kevent_get_unote(ke); pthread_priority_t msg_pp = (pthread_priority_t)(ke->ext[2] >> 32); pthread_priority_t ovr_pp = (pthread_priority_t)ke->qos; @@ -3169,7 +3184,8 @@ _dispatch_kevent_mach_msg_drain(dispatch_kevent_t ke) DISPATCH_INTERNAL_CRASH(kr, "EVFILT_MACHPORT with no message"); } if (likely(!kr)) { - return _dispatch_kevent_mach_msg_recv(du, flags, hdr, msg_pp, ovr_pp); + aux_hdr = aux_siz ? (mach_msg_aux_header_t *)((uintptr_t)hdr + siz) : NULL; + return _dispatch_kevent_mach_msg_recv(du, flags, hdr, aux_hdr, msg_pp, ovr_pp); } goto out; } @@ -3177,8 +3193,10 @@ _dispatch_kevent_mach_msg_drain(dispatch_kevent_t ke) if (!ke->data) { DISPATCH_INTERNAL_CRASH(0, "MACH_RCV_LARGE_IDENTITY with no identity"); } - if (unlikely(ke->ext[1] > (UINT_MAX - DISPATCH_MACH_TRAILER_SIZE))) { - DISPATCH_INTERNAL_CRASH(ke->ext[1], + + /* iOS 16: Changed to use accessor function so we can reuse top 32 bits of ext[1] in future */ + if (unlikely(_dispatch_kevent_mach_msg_size(ke) > (UINT_MAX - DISPATCH_MACH_TRAILER_SIZE))) { + DISPATCH_INTERNAL_CRASH(_dispatch_kevent_mach_msg_size(ke), "EVFILT_MACHPORT with overlarge message"); } @@ -3188,14 +3206,47 @@ _dispatch_kevent_mach_msg_drain(dispatch_kevent_t ke) } const mach_msg_option_t options = ((DISPATCH_MACH_RCV_OPTIONS | MACH_RCV_TIMEOUT | extra_options) & ~MACH_RCV_LARGE); - siz += DISPATCH_MACH_TRAILER_SIZE; + siz += DISPATCH_MACH_TRAILER_SIZE; /* if TOO_LARGE, ext1 does not account for trailer */ hdr = malloc(siz); // mach_msg will return TOO_LARGE if hdr/siz is NULL/0 + +#if DISPATCH_SEND_ACTIVITY_IN_MSGV + mach_msg_vector_t rcv_vects[2]; + + aux_siz = DISPATCH_MSGV_AUX_MAX_SIZE; + aux_hdr = alloca(aux_siz); /* on stack */ + + rcv_vects[MACH_MSGV_IDX_MSG] = (mach_msg_vector_t){ + .msgv_data = (mach_vm_address_t)hdr, + .msgv_rcv_addr = 0, + .msgv_send_size = 0, + .msgv_rcv_size = dispatch_assume(hdr) ? siz : 0, + }; + rcv_vects[MACH_MSGV_IDX_AUX] = (mach_msg_vector_t){ + .msgv_data = (mach_vm_address_t)aux_hdr, + .msgv_rcv_addr = 0, + .msgv_send_size = 0, + .msgv_rcv_size = dispatch_assume(aux_hdr) ? aux_siz: 0, + }; + + kr = mach_msg2(rcv_vects, (mach_msg_option64_t)options | MACH64_MSG_VECTOR, + MACH_MSG_HEADER_EMPTY, 0, 2, (mach_port_name_t)ke->data, MACH_MSG_TIMEOUT_NONE, 0); + + if (likely(!kr)) { + flags |= DISPATCH_EV_MSG_NEEDS_FREE; + if (aux_hdr->msgdh_size == 0) { + aux_hdr = NULL; + } + return _dispatch_kevent_mach_msg_recv(du, flags, hdr, aux_hdr, msg_pp, ovr_pp); + } +#else + /* silently drop auxiliary data on the floor */ kr = mach_msg(hdr, options, 0, dispatch_assume(hdr) ? siz : 0, (mach_port_name_t)ke->data, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); if (likely(!kr)) { flags |= DISPATCH_EV_MSG_NEEDS_FREE; - return _dispatch_kevent_mach_msg_recv(du, flags, hdr, msg_pp, ovr_pp); + return _dispatch_kevent_mach_msg_recv(du, flags, hdr, NULL, msg_pp, ovr_pp); } +#endif if (kr == MACH_RCV_TOO_LARGE) { _dispatch_log("BUG in libdispatch client: " diff --git a/src/firehose/firehose_buffer.c b/src/firehose/firehose_buffer.c index a79053c9d..90e8734fc 100644 --- a/src/firehose/firehose_buffer.c +++ b/src/firehose/firehose_buffer.c @@ -296,7 +296,11 @@ firehose_buffer_update_limits_unlocked(firehose_buffer_t fb) } total = MAX(total, FIREHOSE_BUFFER_CHUNK_PREALLOCATED_COUNT); if (!(fbb_flags & FIREHOSE_BUFFER_BANK_FLAG_LOW_MEMORY)) { - total = MAX(total, TARGET_OS_IPHONE ? 8 : 12); +#if TARGET_OS_IPHONE || DISPATCH_TARGET_DK_EMBEDDED + total = MAX(total, 8); +#else // TARGET_OS_IPHONE || DISPATCH_TARGET_DK_EMBEDDED + total = MAX(total, 12); +#endif // TARGET_OS_IPHONE || DISPATCH_TARGET_DK_EMBEDDED } new = (firehose_bank_state_u) { @@ -1175,10 +1179,7 @@ firehose_buffer_ring_enqueue(firehose_buffer_t fb, firehose_chunk_ref_t ref) })); } - pthread_priority_t pp = fc_pos.fcp_qos; - pp <<= _PTHREAD_PRIORITY_QOS_CLASS_SHIFT; - firehose_client_send_push_async(fb, _pthread_qos_class_decode(pp, NULL, NULL), - for_io); + firehose_client_send_push_async(fb, QOS_CLASS_UNSPECIFIED, for_io); #endif } diff --git a/src/init.c b/src/init.c index 08f790828..120699b1e 100644 --- a/src/init.c +++ b/src/init.c @@ -111,9 +111,9 @@ _dispatch_sigmask(void) #pragma mark - #pragma mark dispatch_globals -DISPATCH_HIDE_SYMBOL(dispatch_assert_queue, 10.12, 10.0, 10.0, 3.0); -DISPATCH_HIDE_SYMBOL(dispatch_assert_queue_not, 10.12, 10.0, 10.0, 3.0); -DISPATCH_HIDE_SYMBOL(dispatch_queue_create_with_target, 10.12, 10.0, 10.0, 3.0); +DISPATCH_HIDE_SYMBOL(dispatch_assert_queue, 10.12, 10.0, 10.0, 3.0, 1.0); +DISPATCH_HIDE_SYMBOL(dispatch_assert_queue_not, 10.12, 10.0, 10.0, 3.0, 1.0); +DISPATCH_HIDE_SYMBOL(dispatch_queue_create_with_target, 10.12, 10.0, 10.0, 3.0, 1.0); #if DISPATCH_COCOA_COMPAT void *(*_dispatch_begin_NSAutoReleasePool)(void); @@ -143,6 +143,7 @@ pthread_key_t dispatch_wlh_key; pthread_key_t dispatch_voucher_key; pthread_key_t dispatch_deferred_items_key; pthread_key_t dispatch_enqueue_key; +pthread_key_t dispatch_msgv_aux_key; pthread_key_t os_workgroup_key; #endif // !DISPATCH_USE_DIRECT_TSD && !DISPATCH_USE_THREAD_LOCAL_STORAGE diff --git a/src/inline_internal.h b/src/inline_internal.h index 9a33f2eaf..4c4d93605 100644 --- a/src/inline_internal.h +++ b/src/inline_internal.h @@ -1621,7 +1621,8 @@ _dispatch_queue_move_to_contended_sync(dispatch_queue_t dq) // Multi Producer calls, can be used safely concurrently // -// Returns true when the queue was empty and the head must be set +// Returns true when the queue was empty and the head must be set. xchg with +// release forces visibility of updates to the item before update to the tail #define os_mpsc_push_update_tail(Q, tail, _o_next) ({ \ os_mpsc_node_type(Q) _tl = (tail); \ os_atomic_store2o(_tl, _o_next, NULL, relaxed); \ @@ -1835,7 +1836,7 @@ _dispatch_root_queue_push_inline(dispatch_queue_global_t dq, { struct dispatch_object_s *hd = _head._do, *tl = _tail._do; if (unlikely(os_mpsc_push_list(os_mpsc(dq, dq_items), hd, tl, do_next))) { - return _dispatch_root_queue_poke(dq, n, 0); + return _dispatch_root_queue_poke_and_wakeup(dq, n, 0); } } @@ -1940,7 +1941,7 @@ _dispatch_queue_class_invoke(dispatch_queue_class_t dqu, // Either dc is set, which is a deferred invoke case // // or only tq is and it means a reenqueue is required, because of: - // a retarget, a suspension, or a width change. + // a retarget, a suspension, a width change or narrowing. // // In both cases, we want to bypass the check for DIRTY. // That may cause us to leave DIRTY in place but all drain lock @@ -2125,8 +2126,18 @@ _dispatch_get_basepri_override_qos_floor(void) { dispatch_priority_t dbp = _dispatch_get_basepri(); dispatch_qos_t qos = _dispatch_priority_qos(dbp); + // The fallback QoS is considered here because this function is only used in + // the path of taking the drain lock on dispatch objects and determining + // whether to self override or not. The fallback QoS therefore does factor + // in here since we want the current priority of the thread - whether that + // came from requested QoS, fallback QoS or override QoS. + // + // Otherwise, we could have a DEF thread (UN req with DEF fallback) trying + // to take the drain lock on a UT queue and applying a UT override on itself + // in the drain lock path when it doesn't need to. + dispatch_qos_t fallback_qos = _dispatch_priority_fallback_qos(dbp); dispatch_qos_t oqos = _dispatch_priority_override_qos(dbp); - return MAX(qos, oqos); + return MAX(MAX(qos, fallback_qos), oqos); } DISPATCH_ALWAYS_INLINE @@ -2244,11 +2255,10 @@ _dispatch_priority_adopt(pthread_priority_t pp, unsigned long flags) #if HAVE_PTHREAD_WORKQUEUE_QOS dispatch_priority_t dbp = _dispatch_get_basepri(); pthread_priority_t basepp = _dispatch_priority_to_pp_strip_flags(dbp); - pthread_priority_t minbasepp = basepp & - ~(pthread_priority_t)_PTHREAD_PRIORITY_PRIORITY_MASK; + pthread_priority_t minbasepp = _pthread_priority_strip_relpri(basepp); bool enforce = (flags & DISPATCH_PRIORITY_ENFORCE) || (pp & _PTHREAD_PRIORITY_ENFORCE_FLAG); - pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK; + pp = _pthread_priority_strip_all_flags(pp); if (unlikely(!pp)) { dispatch_qos_t fallback = _dispatch_priority_fallback_qos(dbp); @@ -2294,7 +2304,7 @@ _dispatch_priority_compute_update(pthread_priority_t pp) // // the manager bit is invalid input, but we keep it to get meaningful // assertions in _dispatch_set_priority_and_voucher_slow() - pp &= _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG | ~_PTHREAD_PRIORITY_FLAGS_MASK; + pp = _pthread_priority_strip_flags(pp, ~_PTHREAD_PRIORITY_EVENT_MANAGER_FLAG); pthread_priority_t cur_priority = _dispatch_get_priority(); pthread_priority_t unbind = _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG; pthread_priority_t thread_type = _PTHREAD_PRIORITY_THREAD_TYPE_MASK; @@ -2307,9 +2317,9 @@ _dispatch_priority_compute_update(pthread_priority_t pp) // if the NEEDS_UNBIND flag is set, we always need to update and take // the slow path in _dispatch_set_priority_and_voucher_slow() which will // adjust the priority further with the proper overcommitness - return pp ? pp : (cur_priority & ~unbind); + return pp ? pp : _pthread_priority_strip_flags(cur_priority, unbind); } else { - cur_priority &= ~thread_type; + cur_priority = _pthread_priority_strip_flags(cur_priority, thread_type); } if (unlikely(pp != cur_priority)) return pp; return 0; @@ -2439,7 +2449,7 @@ _dispatch_priority_compute_propagated(pthread_priority_t pp, if (flags & DISPATCH_PRIORITY_PROPAGATE_CURRENT) { pp = _dispatch_get_priority(); } - pp &= ~_PTHREAD_PRIORITY_FLAGS_MASK; + pp = _pthread_priority_strip_all_flags(pp); if (!(flags & DISPATCH_PRIORITY_PROPAGATE_FOR_SYNC_IPC) && pp > _dispatch_qos_to_pp(DISPATCH_QOS_USER_INITIATED)) { // Cap QOS for propagation at user-initiated @@ -2494,8 +2504,8 @@ _dispatch_block_invoke_should_set_priority(dispatch_block_flags_t flags, if ((flags & DISPATCH_BLOCK_HAS_PRIORITY) && ((flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS) || !(flags & DISPATCH_BLOCK_INHERIT_QOS_CLASS))) { - new_pri &= ~_PTHREAD_PRIORITY_FLAGS_MASK; - old_pri = _dispatch_get_priority() & ~_PTHREAD_PRIORITY_FLAGS_MASK; + new_pri = _pthread_priority_strip_all_flags(new_pri); + old_pri = _pthread_priority_strip_all_flags(_dispatch_get_priority()); if (old_pri && old_pri < new_pri) p = old_pri; } return p; diff --git a/src/internal.h b/src/internal.h index 6fc0a2750..ee60d4064 100644 --- a/src/internal.h +++ b/src/internal.h @@ -122,10 +122,10 @@ #ifndef DISPATCH_HIDE_SYMBOL #if TARGET_OS_MAC && !TARGET_OS_IPHONE -#define DISPATCH_HIDE_SYMBOL(sym, osx, ios, tvos, watchos) \ +#define DISPATCH_HIDE_SYMBOL(sym, osx, ios, tvos, watchos, ros) \ __DISPATCH_HIDE_SYMBOL(sym, osx) #else -#define DISPATCH_HIDE_SYMBOL(sym, osx, ios, tvos, watchos) +#define DISPATCH_HIDE_SYMBOL(sym, osx, ios, tvos, watchos, ros) #endif #endif @@ -251,6 +251,7 @@ upcast(dispatch_object_t dou) #include "mach_private.h" #include "data_private.h" #include "time_private.h" +#include "swift_concurrency_private.h" #include "os/voucher_private.h" #include "os/voucher_activity_private.h" #include "io_private.h" @@ -501,6 +502,7 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_child(void); #define DISPATCH_MODE_STRICT (1U << 0) #define DISPATCH_MODE_NO_FAULTS (1U << 1) #define DISPATCH_COOPERATIVE_POOL_STRICT (1U << 2) +#define DISPATCH_MODE_VOUCHER_DYNAMIC (1U << 3) extern uint8_t _dispatch_mode; DISPATCH_EXPORT DISPATCH_NOINLINE DISPATCH_COLD @@ -1037,6 +1039,15 @@ _dispatch_ktrace_impl(uint32_t code, uint64_t a, uint64_t b, #endif #endif // OS_EVENTLINK_USE_MACH_EVENTLINK +#ifndef DISPATCH_SEND_ACTIVITY_IN_MSGV +#error __OPEN_SOURCE__ unhandled OS version check +#if DISPATCH_MIN_REQUIRED_OSX_AT_LEAST(130000) && (defined(__arm64__) || defined(__LP64__)) +#define DISPATCH_SEND_ACTIVITY_IN_MSGV 1 +#else +#define DISPATCH_SEND_ACTIVITY_IN_MSGV 0 +#endif +#endif // DISPATCH_SEND_ACTIVITY_IN_MSGV + #define _dispatch_hardware_crash() \ __asm__(""); __builtin_trap() // diff --git a/src/io_internal.h b/src/io_internal.h index c076cfc69..235d75202 100644 --- a/src/io_internal.h +++ b/src/io_internal.h @@ -34,7 +34,7 @@ #define _DISPATCH_IO_LABEL_SIZE 16 -#if TARGET_OS_IPHONE // rdar://problem/9032036 +#if TARGET_OS_IPHONE || DISPATCH_TARGET_DK_EMBEDDED // rdar://problem/9032036 #define DIO_MAX_CHUNK_SIZE (512u * 1024) #define DIO_HASH_SIZE 64u // must be a power of two #else diff --git a/src/mach.c b/src/mach.c index aa9c04b6d..b6c9d187f 100644 --- a/src/mach.c +++ b/src/mach.c @@ -614,7 +614,8 @@ _dispatch_mach_msg_get_reason(dispatch_mach_msg_t dmsg, mach_error_t *err_ptr) static inline dispatch_mach_msg_t _dispatch_mach_msg_create_recv(mach_msg_header_t *hdr, mach_msg_size_t siz, - dispatch_mach_reply_refs_t dmr, uint32_t flags, pthread_priority_t pp) + mach_msg_aux_header_t *aux_hdr, dispatch_mach_reply_refs_t dmr, + uint32_t flags, pthread_priority_t pp) { dispatch_mach_msg_destructor_t destructor; dispatch_mach_msg_t dmsg; @@ -626,7 +627,7 @@ _dispatch_mach_msg_create_recv(mach_msg_header_t *hdr, mach_msg_size_t siz, voucher = dmr->dmr_voucher; dmr->dmr_voucher = NULL; // transfer reference } else { - voucher = voucher_create_with_mach_msg(hdr); + voucher = _voucher_create_with_mach_msgv(hdr, aux_hdr); pp = _dispatch_priority_compute_propagated(pp, 0); } @@ -661,7 +662,7 @@ _dispatch_mach_no_senders_invoke(dispatch_mach_t dm) void _dispatch_mach_merge_msg(dispatch_unote_t du, uint32_t flags, - mach_msg_header_t *hdr, mach_msg_size_t siz, + mach_msg_header_t *hdr, mach_msg_size_t siz, mach_msg_aux_header_t *aux_hdr, pthread_priority_t msg_pp, pthread_priority_t ovr_pp) { @@ -705,7 +706,7 @@ _dispatch_mach_merge_msg(dispatch_unote_t du, uint32_t flags, // out to keep the promise that DISPATCH_MACH_DISCONNECTED is the last // event sent. dispatch_mach_msg_t dmsg; - dmsg = _dispatch_mach_msg_create_recv(hdr, siz, NULL, flags, msg_pp); + dmsg = _dispatch_mach_msg_create_recv(hdr, siz, aux_hdr, NULL, flags, msg_pp); _dispatch_mach_handle_or_push_received_msg(dm, dmsg, ovr_pp); } @@ -723,7 +724,7 @@ _dispatch_mach_merge_msg(dispatch_unote_t du, uint32_t flags, void _dispatch_mach_reply_merge_msg(dispatch_unote_t du, uint32_t flags, - mach_msg_header_t *hdr, mach_msg_size_t siz, + mach_msg_header_t *hdr, mach_msg_size_t siz, mach_msg_aux_header_t *aux_hdr, pthread_priority_t msg_pp, pthread_priority_t ovr_pp) { dispatch_mach_reply_refs_t dmr = du._dmr; @@ -736,7 +737,7 @@ _dispatch_mach_reply_merge_msg(dispatch_unote_t du, uint32_t flags, hdr->msgh_local_port, hdr->msgh_id, hdr->msgh_remote_port); if (!canceled) { - dmsg = _dispatch_mach_msg_create_recv(hdr, siz, dmr, flags, msg_pp); + dmsg = _dispatch_mach_msg_create_recv(hdr, siz, aux_hdr, dmr, flags, msg_pp); } if (dmsg) { @@ -1072,6 +1073,11 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, bool is_reply = (MACH_MSGH_BITS_REMOTE(msg->msgh_bits) == MACH_MSG_TYPE_MOVE_SEND_ONCE); mach_port_t reply_port = dmsg->dmsg_reply; +#if DISPATCH_SEND_ACTIVITY_IN_MSGV + mach_msg_vector_t send_vecs[2] = {}; + _voucher_mach_udata_aux_s aux = {}; + mach_msg_size_t udata_sz = 0; +#endif if (!is_reply) { dm->dm_needs_mgr = 0; if (unlikely(dsrr->dmsr_checkin && dmsg != dsrr->dmsr_checkin)) { @@ -1124,7 +1130,25 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, opts |= MACH_SEND_NOTIFY; } opts |= MACH_SEND_TIMEOUT; +#if DISPATCH_SEND_ACTIVITY_IN_MSGV + if (voucher && voucher->v_activity) { + udata_sz = offsetof(_voucher_mach_udata_s, _vmu_after_activity); + + aux.udata = (_voucher_mach_udata_s){ + .vmu_magic = VOUCHER_MAGIC_V3, + /* .vmu_priority is unused */ + .vmu_activity = voucher->v_activity, + .vmu_activity_pid = voucher->v_activity_creator, + .vmu_parent_activity = voucher->v_parent_activity, + }; + } + if (udata_sz) { + aux.header.msgdh_size = sizeof(mach_msg_aux_header_t) + udata_sz; + } +#endif _dispatch_voucher_debug("mach-msg[%p] msg_set", voucher, dmsg); + // _voucher_get_mach_voucher() skips mach voucher creation if + // activity_in_msgv is true. clear_voucher = _voucher_mach_msg_set(msg, voucher); msg_priority = _dispatch_mach_send_priority(dmsg, qos, &opts); if (reply_port && dm->dm_strict_reply) { @@ -1140,8 +1164,28 @@ _dispatch_mach_msg_send(dispatch_mach_t dm, dispatch_object_t dou, } _dispatch_mach_reply_waiter_register(dm, dwr, reply_port, dmsg); } + +#if DISPATCH_SEND_ACTIVITY_IN_MSGV + send_vecs[MACH_MSGV_IDX_MSG] = (mach_msg_vector_t){ + .msgv_data = (mach_vm_address_t)msg, + .msgv_rcv_addr = 0, + .msgv_send_size = msg->msgh_size, + .msgv_rcv_size = 0, + }; + send_vecs[MACH_MSGV_IDX_AUX] = (mach_msg_vector_t){ + .msgv_data = (mach_vm_address_t)&aux, + .msgv_rcv_addr = 0, + .msgv_send_size = udata_sz ? aux.header.msgdh_size : 0, + .msgv_rcv_size = 0, + }; + + kr = mach_msg2(send_vecs, (mach_msg_option64_t)opts | + MACH64_MSG_VECTOR | MACH64_SEND_USER_CALL, + *msg, 2, 0, MACH_PORT_NULL, 0, msg_priority); +#else kr = mach_msg(msg, opts, msg->msgh_size, 0, MACH_PORT_NULL, 0, msg_priority); +#endif _dispatch_debug("machport[0x%08x]: sent msg id 0x%x, ctxt %p, " "opts 0x%x, msg_opts 0x%x, kvoucher 0x%08x, reply on 0x%08x: " "%s - 0x%x", msg->msgh_remote_port, msg->msgh_id, dmsg->do_ctxt, diff --git a/src/mach_internal.h b/src/mach_internal.h index 9f1840eac..07a3f03a5 100644 --- a/src/mach_internal.h +++ b/src/mach_internal.h @@ -109,10 +109,10 @@ size_t _dispatch_mach_debug(dispatch_mach_t dm, char* buf, size_t bufsiz); void _dispatch_mach_notification_merge_evt(dispatch_unote_t du, uint32_t flags, uintptr_t data, pthread_priority_t pp); void _dispatch_mach_merge_msg(dispatch_unote_t du, uint32_t flags, - mach_msg_header_t *msg, mach_msg_size_t msgsz, + mach_msg_header_t *msg, mach_msg_size_t msgsz, mach_msg_aux_header_t *aux, pthread_priority_t msg_pp, pthread_priority_t ovr_pp); void _dispatch_mach_reply_merge_msg(dispatch_unote_t du, uint32_t flags, - mach_msg_header_t *msg, mach_msg_size_t msgsz, + mach_msg_header_t *msg, mach_msg_size_t msgsz, mach_msg_aux_header_t *aux, pthread_priority_t msg_pp, pthread_priority_t ovr_pp); void _dispatch_xpc_sigterm_merge_evt(dispatch_unote_t du, uint32_t flags, uintptr_t data, pthread_priority_t pp); diff --git a/src/queue.c b/src/queue.c index 4231b64fb..9858c166a 100644 --- a/src/queue.c +++ b/src/queue.c @@ -48,8 +48,8 @@ static void _dispatch_assert_queue_fail(dispatch_queue_t dq, bool expected) { _dispatch_client_assert_fail( - "Block was %sexpected to execute on queue [%s]", - expected ? "" : "not ", dq->dq_label ?: ""); + "Block was %sexpected to execute on queue [%s (%p)]", + expected ? "" : "not ", dq->dq_label ?: "", dq); } DISPATCH_NOINLINE DISPATCH_NORETURN @@ -57,8 +57,8 @@ static void _dispatch_assert_queue_barrier_fail(dispatch_queue_t dq) { _dispatch_client_assert_fail( - "Block was expected to act as a barrier on queue [%s]", - dq->dq_label ?: ""); + "Block was expected to act as a barrier on queue [%s (%p)]", + dq->dq_label ?: "", dq); } void @@ -134,8 +134,7 @@ _dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pp, pflags |= _PTHREAD_SET_SELF_WQ_KEVENT_UNBIND; // when we unbind, overcomitness can flip, so we need to learn // it from the defaultpri, see _dispatch_priority_compute_update - pp |= (_dispatch_get_basepri() & - DISPATCH_PRIORITY_FLAG_OVERCOMMIT); + pp = _pthread_priority_modify_flags(pp, 0, _dispatch_get_basepri() & DISPATCH_PRIORITY_FLAG_OVERCOMMIT); // TODO (rokhinip): Right now there is no binding and unbinding // to a kqueue for a cooperative thread. We'll need to do this @@ -143,9 +142,9 @@ _dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pp, } else { // else we need to keep the overcommit/cooperative one that is set on the current // thread - pp |= (old_pri & _PTHREAD_PRIORITY_THREAD_TYPE_MASK); + pp = _pthread_priority_modify_flags(pp, 0, old_pri & _PTHREAD_PRIORITY_THREAD_TYPE_MASK); } - if (likely(old_pri & ~_PTHREAD_PRIORITY_FLAGS_MASK)) { + if (likely(_pthread_priority_strip_all_flags(old_pri))) { pflags |= _PTHREAD_SET_SELF_QOS_FLAG; } uint64_t mgr_dq_state = @@ -709,7 +708,7 @@ _dispatch_continuation_init_slow(dispatch_continuation_t dc, flags |= block_flags; if (block_flags & DISPATCH_BLOCK_HAS_PRIORITY) { - pp = dbpd->dbpd_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK; + pp = _pthread_priority_strip_all_flags(dbpd->dbpd_priority); } else if (flags & DISPATCH_BLOCK_HAS_PRIORITY) { // _dispatch_source_handler_alloc is calling is and doesn't want us // to propagate priorities @@ -1169,8 +1168,8 @@ _dispatch_async_waiter_update(dispatch_sync_context_t dsc, dispatch_priority_t p = dq->dq_priority & DISPATCH_PRIORITY_REQUESTED_MASK; if (p) { pthread_priority_t pp = _dispatch_priority_to_pp_strip_flags(p); - if (pp > (dsc->dc_priority & ~_PTHREAD_PRIORITY_FLAGS_MASK)) { - dsc->dc_priority = pp | _PTHREAD_PRIORITY_ENFORCE_FLAG; + if (pp > _pthread_priority_strip_all_flags(dsc->dc_priority)) { + dsc->dc_priority = _pthread_priority_modify_flags(pp, 0, _PTHREAD_PRIORITY_ENFORCE_FLAG); } } @@ -3820,6 +3819,7 @@ _dispatch_lane_drain(dispatch_lane_t dq, dispatch_invoke_context_t dic, owned += dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; } if (dc) { + // We still have pending work items owned = _dispatch_queue_adjust_owned(dq, owned, dc); } *owned_ptr &= DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_ENQUEUED_ON_MGR; @@ -5052,6 +5052,45 @@ _dispatch_queue_wakeup_with_override(dispatch_queue_class_t dq, } #endif // HAVE_PTHREAD_WORKQUEUE_QOS +dispatch_thread_override_info_s +dispatch_thread_get_current_override_qos_floor() +{ + dispatch_thread_override_info_s override_info = {0}; + + dispatch_qos_t override_qos_floor = _dispatch_get_basepri_override_qos_floor(); + if (override_qos_floor != DISPATCH_QOS_SATURATED) { + override_info.can_override = true; + override_info.override_qos_floor = _dispatch_qos_to_qos_class(override_qos_floor); + } + + return override_info; +} + +int +dispatch_thread_override_self(qos_class_t override_qos) +{ + dispatch_qos_t qos = _dispatch_qos_from_qos_class(override_qos); + _dispatch_wqthread_override_start(_dispatch_tid_self(), qos); + // ensure that the root queue sees that this thread was overridden. + _dispatch_set_basepri_override_qos(qos); + return 0; +} + +int +dispatch_lock_override_start_with_debounce(dispatch_lock_t *lock_addr, + dispatch_tid_t expected_thread, qos_class_t override_to_apply) +{ + return _dispatch_wqthread_override_start_check_owner(expected_thread, + _dispatch_qos_from_qos_class(override_to_apply), lock_addr); +} + +int +dispatch_lock_override_end(qos_class_t override_to_end) +{ + _dispatch_set_basepri_override_qos(_dispatch_qos_from_qos_class(override_to_end)); + return 0; +} + DISPATCH_NOINLINE void _dispatch_queue_wakeup(dispatch_queue_class_t dqu, dispatch_qos_t qos, @@ -5111,6 +5150,10 @@ _dispatch_queue_wakeup(dispatch_queue_class_t dqu, dispatch_qos_t qos, new_state |= enqueue; } if (flags & DISPATCH_WAKEUP_MAKE_DIRTY) { + // Always do the store unconditionally with release even if we already + // have the dirty bit set. This will make visible the publishing of the + // updated tail pointer and provide atomic ordering with any concurrent + // stores to the dq_state. new_state |= DISPATCH_QUEUE_DIRTY; } else if (new_state == old_state) { os_atomic_rmw_loop_give_up(goto done); @@ -6189,14 +6232,12 @@ _dispatch_wlh_worker_thread_init(dispatch_deferred_items_t ddi) // // Also add the NEEDS_UNBIND flag so that // _dispatch_priority_compute_update knows it has to unbind + // + // Remove all flags except thread type + pp = _pthread_priority_strip_flags(pp, ~_PTHREAD_PRIORITY_THREAD_TYPE_MASK); - pp &= _PTHREAD_PRIORITY_THREAD_TYPE_MASK | ~_PTHREAD_PRIORITY_FLAGS_MASK; if (ddi->ddi_wlh == DISPATCH_WLH_ANON) { pp |= _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG; - } else { - // pthread sets the flag when it is an event delivery thread - // so we need to explicitly clear it - pp &= ~(pthread_priority_t)_PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG; } _dispatch_thread_setspecific(dispatch_priority_key, (void *)(uintptr_t)pp); @@ -6208,9 +6249,8 @@ _dispatch_wlh_worker_thread_init(dispatch_deferred_items_t ddi) return false; } - if ((pp & _PTHREAD_PRIORITY_SCHED_PRI_FLAG) || - !(pp & ~_PTHREAD_PRIORITY_FLAGS_MASK)) { - // When the phtread kext is delivering kevents to us, and pthread + if (pp & _PTHREAD_PRIORITY_SCHED_PRI_FLAG) { + // When the pthread kext is delivering kevents to us, and pthread // root queues are in use, then the pthread priority TSD is set // to a sched pri with the _PTHREAD_PRIORITY_SCHED_PRI_FLAG bit set. // @@ -6218,16 +6258,16 @@ _dispatch_wlh_worker_thread_init(dispatch_deferred_items_t ddi) // and the best option is to clear the qos/priority bits which tells // us to not do any QoS related calls on this thread. // - // However, in that case the manager thread is opted out of QoS, + // However, in that case, the manager thread is opted out of QoS, // as far as pthread is concerned, and can't be turned into // something else, so we can't stash. - pp &= (pthread_priority_t)_PTHREAD_PRIORITY_FLAGS_MASK; + pp = _pthread_priority_strip_qos_and_relpri(pp); } // Managers always park without mutating to a regular worker thread, and // hence never need to unbind from userland, and when draining a manager, // the NEEDS_UNBIND flag would cause the mutation to happen. // So we need to strip this flag - pp &= ~(pthread_priority_t)_PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG; + pp = _pthread_priority_strip_flags(pp, _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG); _dispatch_thread_setspecific(dispatch_priority_key, (void *)(uintptr_t)pp); // ensure kevents registered from this thread are registered at manager QoS @@ -6293,7 +6333,10 @@ _dispatch_wlh_worker_thread(dispatch_wlh_t wlh, dispatch_kevent_t events, os_workgroup_join_token_s join_token = {0}; if (wg) { int rv = os_workgroup_join(wg, &join_token); - dispatch_assert(rv == 0); + if (rv != 0) { + DISPATCH_CLIENT_CRASH(rv, "dispatch_workloop " + "os_workgroup_join failed"); + } } is_manager = _dispatch_wlh_worker_thread_init(&ddi); @@ -6556,7 +6599,48 @@ _dispatch_root_queue_poke(dispatch_queue_global_t dq, int n, int floor) dx_type(dq) == DISPATCH_QUEUE_COOPERATIVE_ROOT_TYPE)) #endif { - if (unlikely(!os_atomic_cmpxchg2o(dq, dgq_pending, 0, n, relaxed))) { + if (unlikely(!os_atomic_cmpxchg2o(dq, dgq_pending, 0, n, release))) { + _dispatch_root_queue_debug("worker thread request still pending " + "for global queue: %p", dq); + return; + } + } +#endif // !DISPATCH_USE_INTERNAL_WORKQUEUE + return _dispatch_root_queue_poke_slow(dq, n, floor); +} + +// TODO (rokhinip): Rename this to dispatch_root_queue_wakeup and kill the +// existing wakeup code for root queues which seems to be dead +DISPATCH_NOINLINE +void +_dispatch_root_queue_poke_and_wakeup(dispatch_queue_global_t dq, int n, int floor) +{ +#if !DISPATCH_USE_INTERNAL_WORKQUEUE +#if DISPATCH_USE_PTHREAD_POOL + if (likely(dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE || + dx_type(dq) == DISPATCH_QUEUE_COOPERATIVE_ROOT_TYPE)) +#endif + { + // 97049903: We need to use a RMW loop with an unconditional store release + // instead of a CAS with release here. + // + // 1. We rely on memory ordering between dgq_pending and updates to + // the dq_items_tail. Therefore, we need a release barrier so that the update to + // dq_items_tail is visible before the update to the dgq_pending. This + // release needs to happen unconditionally and pairs with any acquire loads + // on dgq_pending by drainers who are then going to inspect the + // dq_items_tail afterwards to find any items. + // + // 2. We rely on total ordering of all stores to dgq_pending to make sure + // that a race between a concurrent enqueue and dequeuer doesn't leave us + // in a state of neither threads handle the latest item enqueued. A CAS on + // dgq_pending does NOT order with a store to dgq_pending if the CAS fails + // which is why we do an unconditional store. + int old_pending, new_pending; + os_atomic_rmw_loop2o(dq, dgq_pending, old_pending, new_pending, release, { + new_pending = old_pending ?: n; + }); + if (old_pending > 0) { _dispatch_root_queue_debug("worker thread request still pending " "for global queue: %p", dq); return; @@ -6569,8 +6653,13 @@ _dispatch_root_queue_poke(dispatch_queue_global_t dq, int n, int floor) #define DISPATCH_ROOT_QUEUE_MEDIATOR ((struct dispatch_object_s *)~0ul) enum { + // The queue is not quiesced yet and we are still racing with other enqueuers + // and drainers, continue waiting DISPATCH_ROOT_QUEUE_DRAIN_WAIT, + // The head and tail of the queue have quiesced to both having non-NULL values, + // we can attempt a drain DISPATCH_ROOT_QUEUE_DRAIN_READY, + // Don't bother draining, no work present DISPATCH_ROOT_QUEUE_DRAIN_ABORT, }; @@ -6590,10 +6679,13 @@ _dispatch_root_queue_head_tail_quiesced(dispatch_queue_global_t dq) tail = os_atomic_load2o(dq, dq_items_tail, relaxed); if ((head == NULL) == (tail == NULL)) { if (tail == NULL) { // + // This is the case of head and tail both being NULL -- queue is empty. return DISPATCH_ROOT_QUEUE_DRAIN_ABORT; } + // Head and tail are both non-empty we are ready to drain return DISPATCH_ROOT_QUEUE_DRAIN_READY; } + // Head and tail are not matching yet keep waiting return DISPATCH_ROOT_QUEUE_DRAIN_WAIT; } @@ -6602,6 +6694,8 @@ static bool __DISPATCH_ROOT_QUEUE_CONTENDED_WAIT__(dispatch_queue_global_t dq, int (*predicate)(dispatch_queue_global_t dq)) { + // See also dgq_pending semantics in queue_internal.h + unsigned int sleep_time = DISPATCH_CONTENTION_USLEEP_START; int status = DISPATCH_ROOT_QUEUE_DRAIN_READY; bool pending = false; @@ -6615,11 +6709,11 @@ __DISPATCH_ROOT_QUEUE_CONTENDED_WAIT__(dispatch_queue_global_t dq, } // Since we have serious contention, we need to back off. if (!pending) { - // Mark this queue as pending to avoid requests for further threads - (void)os_atomic_inc2o(dq, dgq_pending, relaxed); + (void)os_atomic_inc2o(dq, dgq_pending, release); pending = true; } _dispatch_contention_usleep(sleep_time); + if (likely(status = predicate(dq))) goto out; sleep_time *= 2; } while (sleep_time < DISPATCH_CONTENTION_USLEEP_MAX); @@ -6631,9 +6725,18 @@ __DISPATCH_ROOT_QUEUE_CONTENDED_WAIT__(dispatch_queue_global_t dq, _dispatch_debug("contention on global queue: %p", dq); out: if (pending) { - (void)os_atomic_dec2o(dq, dgq_pending, relaxed); + (void)os_atomic_dec2o(dq, dgq_pending, acquire); + // Make sure to resample the queue post-decrement to make sure that we are + // seeing latest updates. We can use relaxed loads on the queue probe and + // piggyback on the acquire dec of dgq_pending. + if (_dispatch_queue_class_probe(dq)) { + status = DISPATCH_ROOT_QUEUE_DRAIN_READY; + } } + if (status == DISPATCH_ROOT_QUEUE_DRAIN_WAIT) { + // Queue hasn't quiesced, make another TR to handle this while we go and + // park _dispatch_root_queue_poke(dq, 1, 0); } return status == DISPATCH_ROOT_QUEUE_DRAIN_READY; @@ -6647,18 +6750,31 @@ _dispatch_root_queue_drain_one(dispatch_queue_global_t dq) start: // The MEDIATOR value acts both as a "lock" and a signal + head = os_atomic_xchg2o(dq, dq_items_head, DISPATCH_ROOT_QUEUE_MEDIATOR, relaxed); + // Queue head was empty, check to see if we are racing with concurrent + // enqueuer who has only set the tail if (unlikely(head == NULL)) { + // The first xchg on the tail will tell the enqueueing thread that it // is safe to blindly write out to the head pointer. A cmpxchg honors // the algorithm. if (unlikely(!os_atomic_cmpxchg2o(dq, dq_items_head, DISPATCH_ROOT_QUEUE_MEDIATOR, NULL, relaxed))) { + // We raced with concurrent enqueuer who made queue non-empty who + // overwrote our mediator value in head. Enqueuer has succeeded in setting + // head and tail (which is why our CAS failed), we can just retry our + // drain goto start; } + if (unlikely(dq->dq_items_tail)) { // + // We set the mediator value on head which means head was NULL previously + // but we are seeing that there is a tail value -- we are racing with a + // concurrent enqueuer who made the queue non-empty and who hasn't yet + // finished the full enqueue if (__DISPATCH_ROOT_QUEUE_CONTENDED_WAIT__(dq, _dispatch_root_queue_head_tail_quiesced)) { goto start; @@ -6669,7 +6785,8 @@ _dispatch_root_queue_drain_one(dispatch_queue_global_t dq) } if (unlikely(head == DISPATCH_ROOT_QUEUE_MEDIATOR)) { - // This thread lost the race for ownership of the queue. + // Racing with another thread and this thread lost the race for + // ownership of the queue. if (likely(__DISPATCH_ROOT_QUEUE_CONTENDED_WAIT__(dq, _dispatch_root_queue_mediator_is_gone))) { goto start; @@ -6680,11 +6797,12 @@ _dispatch_root_queue_drain_one(dispatch_queue_global_t dq) // Restore the head pointer to a sane value before returning. // If 'next' is NULL, then this item _might_ be the last item. next = head->do_next; - if (unlikely(!next)) { + os_atomic_store2o(dq, dq_items_head, NULL, relaxed); - // 22708742: set tail to NULL with release, so that NULL write to head - // above doesn't clobber head from concurrent enqueuer + // 22708742: set tail to NULL with release, so that NULL write to head above + // doesn't clobber head from concurrent enqueuer ie - if the CAS succeeds, + // someone else must also see the head as NULL. if (os_atomic_cmpxchg2o(dq, dq_items_tail, head, NULL, release)) { // both head and tail are NULL now goto out; @@ -6862,6 +6980,9 @@ _dispatch_root_queue_drain(dispatch_queue_global_t dq, _dispatch_continuation_pop_inline(item, &dic, flags, dq); reset = _dispatch_reset_basepri_override(); if (unlikely(_dispatch_queue_drain_should_narrow(&dic))) { + // We can just shortcircuit and don't need to worry about checking for + // more work because _dispatch_root_queue_drain_one should have requested + // for more threads if there was more work break; } @@ -6902,7 +7023,7 @@ _dispatch_worker_thread2(pthread_priority_t pp) bool overcommit = pp & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG; bool cooperative = pp & _PTHREAD_PRIORITY_COOPERATIVE_FLAG; - pp &= (_PTHREAD_PRIORITY_THREAD_TYPE_MASK | ~_PTHREAD_PRIORITY_FLAGS_MASK); + pp = _pthread_priority_strip_flags(pp, ~_PTHREAD_PRIORITY_THREAD_TYPE_MASK); _dispatch_thread_setspecific(dispatch_priority_key, (void *)(uintptr_t)pp); dispatch_queue_global_t dq; @@ -6920,7 +7041,7 @@ _dispatch_worker_thread2(pthread_priority_t pp) _dispatch_introspection_thread_add(); _dispatch_trace_runtime_event(worker_unpark, dq, 0); - int pending = os_atomic_dec2o(dq, dgq_pending, relaxed); + int pending = os_atomic_dec2o(dq, dgq_pending, acquire); dispatch_assert(pending >= 0); invoke_flags |= DISPATCH_INVOKE_WORKER_DRAIN | DISPATCH_INVOKE_REDIRECTING_DRAIN; @@ -6969,14 +7090,14 @@ _dispatch_root_queue_init_pthread_pool(dispatch_queue_global_t dq, } // 6618342 Contact the team that owns the Instrument DTrace probe before -// renaming this symbol +// renaming this symbol static void * _dispatch_worker_thread(void *context) { dispatch_queue_global_t dq = context; dispatch_pthread_root_queue_context_t pqc = dq->do_ctxt; - int pending = os_atomic_dec2o(dq, dgq_pending, relaxed); + int pending = os_atomic_dec2o(dq, dgq_pending, acquire); if (unlikely(pending < 0)) { DISPATCH_INTERNAL_CRASH(pending, "Pending thread request underflow"); } @@ -7016,7 +7137,7 @@ _dispatch_worker_thread(void *context) DISPATCH_PRIORITY_FLAG_FLOOR | DISPATCH_PRIORITY_REQUESTED_MASK)) == 0) { pri &= DISPATCH_PRIORITY_FLAG_OVERCOMMIT; - if (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK) { + if (_pthread_priority_has_qos(pp)) { pri |= _dispatch_priority_from_pp(pp); } else { pri |= _dispatch_priority_make_override(DISPATCH_QOS_SATURATED); @@ -7667,7 +7788,7 @@ _dispatch_runloop_root_queue_create_4CF(const char *label, unsigned long flags) DISPATCH_QUEUE_ROLE_BASE_ANON); dq->do_targetq = _dispatch_get_default_queue(true); dq->dq_label = label ? label : "runloop-queue"; // no-copy contract - if (pp & _PTHREAD_PRIORITY_QOS_CLASS_MASK) { + if (_pthread_priority_has_qos(pp)) { dq->dq_priority = _dispatch_priority_from_pp_strip_flags(pp); } _dispatch_runloop_queue_handle_init(dq); @@ -8194,6 +8315,7 @@ libdispatch_init(void) _dispatch_thread_key_create(&dispatch_dsc_key, NULL); _dispatch_thread_key_create(&os_workgroup_key, _os_workgroup_tsd_cleanup); _dispatch_thread_key_create(&dispatch_enqueue_key, NULL); + _dispatch_thread_key_create(&dispatch_msgv_aux_key, free); #endif #if DISPATCH_USE_RESOLVERS // rdar://problem/8541707 _dispatch_main_q.do_targetq = _dispatch_get_default_queue(true); @@ -8201,6 +8323,9 @@ libdispatch_init(void) _dispatch_queue_set_current(&_dispatch_main_q); _dispatch_queue_set_bound_thread(&_dispatch_main_q); + // Mark thread as having DISPATCH_QOS_SATURATED override since main thread + // at this point, can't be overridden + _dispatch_set_basepri_override_qos(DISPATCH_QOS_SATURATED); #if DISPATCH_USE_PTHREAD_ATFORK (void)dispatch_assume_zero(pthread_atfork(dispatch_atfork_prepare, @@ -8341,6 +8466,7 @@ _libdispatch_tsd_cleanup(void *ctx) _dispatch_deferred_items_cleanup); _tsd_call_cleanup(dispatch_quantum_key, NULL); _tsd_call_cleanup(dispatch_enqueue_key, NULL); + _tsd_call_cleanup(dispatch_msgv_aux_key, free); _tsd_call_cleanup(dispatch_dsc_key, NULL); #ifdef __ANDROID__ if (_dispatch_thread_detach_callback) { diff --git a/src/queue_internal.h b/src/queue_internal.h index 68a5fec23..a7425cd35 100644 --- a/src/queue_internal.h +++ b/src/queue_internal.h @@ -644,7 +644,46 @@ struct dispatch_queue_static_s { struct dispatch_object_s *volatile dq_items_tail); \ int volatile dgq_thread_pool_size; \ struct dispatch_object_s *volatile dq_items_head; \ - int volatile dgq_pending + int volatile dgq_pending; + +// 97049903: dgq_pending has semaphore-like like semantics: +// +// dgq_pending = 0 means semaphore is available +// If we are on the push side, take the semaphore and also the responsibility +// of moving state machine forward by requesting for a thread. +// +// If we are a drainer for the root queue who has just come out and we +// see dgq_pending = 0, there has been an underflow, error out. +// +// dgq_pending > 0 means semaphore is taken. +// If we are on the push side, dgq_pending > 0 has a dirty-bit like +// semantic since the enqueuer has updated the queue head/tail, sees that +// there are {concurrent contended, requested} drainers and therefore lets +// the drainers do a rest of the evaluation. We don't need an +// explicit DIRTY_BIT for enqueuers to set in this case since we know that we +// are racing with drainers who are already doing loads off the dq_items_tail +// and head which is what the enqueuer modified. +// +// We however need to make sure that the publish to the dq_items_tail is +// always visible for the enqueuer. +// +// If we're on the drain side, +// +// (1) If we have just entered userspace, we drop the semaphore (ie +// decrement dqg_pending with acquire) and attempt to drain the +// specified global root queue +// +// (2) If we don't hit contention and get to drain an item, great - we are +// now working on the workitem and we have already released our hold on +// the semaphore. +// +// (3) If we hit contention, we retake the semaphore (store release +// dgq_pending increment) so that more threads aren't requested by +// enqueuers. We now hold the responsibility of taking state machine +// forward due to any changes done to the queue including by concurrent +// enqueuers. Spin/backoff and evaluate if the queue has quiesced enough. +// If it looks to have elements on it, goto (1) as if we had just entered +// userspace and repeat. Else return and park. struct dispatch_queue_global_s { DISPATCH_QUEUE_ROOT_CLASS_HEADER(lane); @@ -819,6 +858,7 @@ void _dispatch_workloop_wakeup(dispatch_workloop_t dwl, dispatch_qos_t qos, dispatch_wakeup_flags_t flags); void _dispatch_root_queue_poke(dispatch_queue_global_t dq, int n, int floor); +void _dispatch_root_queue_poke_and_wakeup(dispatch_queue_global_t dq, int n, int floor); void _dispatch_root_queue_wakeup(dispatch_queue_global_t dq, dispatch_qos_t qos, dispatch_wakeup_flags_t flags); void _dispatch_root_queue_push(dispatch_queue_global_t dq, @@ -1130,7 +1170,7 @@ typedef struct dispatch_continuation_vtable_s { } const *dispatch_continuation_vtable_t; #ifndef DISPATCH_CONTINUATION_CACHE_LIMIT -#if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR +#if (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) || DISPATCH_TARGET_DK_EMBEDDED #define DISPATCH_CONTINUATION_CACHE_LIMIT 112 // one 256k heap for 64 threads #define DISPATCH_CONTINUATION_CACHE_LIMIT_MEMORYPRESSURE_PRESSURE_WARN 16 #else diff --git a/src/shims.c b/src/shims.c index 3914d9c62..e1345507b 100644 --- a/src/shims.c +++ b/src/shims.c @@ -24,12 +24,13 @@ #if !HAVE_STRLCPY size_t strlcpy(char *dst, const char *src, size_t size) { - size_t res = strlen(dst) + strlen(src) + 1; - if (size > 0) { - size_t n = size - 1; - strncpy(dst, src, n); - dst[n] = 0; + size_t srclen = strlen(src); + if (srclen < size) { + strncpy(dst, src, srclen + 1); + } else if (size > 0) { + strncpy(dst, src, size-1); + dst[size-1] = '\0'; } - return res; + return srclen; } #endif diff --git a/src/shims.h b/src/shims.h index b611a5a73..89a47f4d3 100644 --- a/src/shims.h +++ b/src/shims.h @@ -56,7 +56,7 @@ #ifndef DISPATCH_WORKQ_MAX_PTHREAD_COUNT #if defined(__APPLE__) -#define DISPATCH_WORKQ_MAX_PTHREAD_COUNT 64 +#define DISPATCH_WORKQ_MAX_PTHREAD_COUNT 32 #else #define DISPATCH_WORKQ_MAX_PTHREAD_COUNT 255 #endif diff --git a/src/shims/lock.c b/src/shims/lock.c index 4a750b3bd..da5ba0c6f 100644 --- a/src/shims/lock.c +++ b/src/shims/lock.c @@ -342,6 +342,8 @@ _dlock_wait(uint32_t *uaddr, uint32_t val, uint32_t timeout, uint32_t flags) case ETIMEDOUT: case EFAULT: return -rc; + case EOWNERDEAD: + DISPATCH_CLIENT_CRASH(val, "Owner in ulock is unknown - possible memory corruption"); default: DISPATCH_INTERNAL_CRASH(-rc, "ulock_wait() failed"); } diff --git a/src/shims/priority.h b/src/shims/priority.h index aa0008ce2..b4699672e 100644 --- a/src/shims/priority.h +++ b/src/shims/priority.h @@ -22,11 +22,25 @@ * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch * which are subject to change in future releases of Mac OS X. Any applications * relying on these interfaces WILL break. + * + * This file shares knowledge with libpthread and xnu on the pthread_priority_t + * encoding. This is intentional to make the priority manipulations in dispatch + * as fast as possible instead of having to make cross library calls when + * encoding and decoding a pthread_priority_t. In addition, dispatch has its own + * representation of priority via dispatch_priority_t which has some extra + * information that is not of interest to pthread or xnu. + * + * This file encapsulates all the priority related manipulations and encodings + * to and from the various formats that dispatch deals with. */ #ifndef __DISPATCH_SHIMS_PRIORITY__ #define __DISPATCH_SHIMS_PRIORITY__ +#ifdef __APPLE__ +#include +#endif // __APPLE__ + #if HAVE_PTHREAD_QOS_H && __has_include() #include #include @@ -49,10 +63,12 @@ #define _PTHREAD_PRIORITY_COOPERATIVE_FLAG 0x08000000 #endif #ifndef _PTHREAD_PRIORITY_THREAD_TYPE_MASK -#define _PTHREAD_PRIORITY_THREAD_TYPE_MASK 0x88000000 +#define _PTHREAD_PRIORITY_THREAD_TYPE_MASK \ + (_PTHREAD_PRIORITY_OVERCOMMIT_FLAG | _PTHREAD_PRIORITY_COOPERATIVE_FLAG) #endif #else // HAVE_PTHREAD_QOS_H + OS_ENUM(qos_class, unsigned int, QOS_CLASS_USER_INTERACTIVE = 0x21, QOS_CLASS_USER_INITIATED = 0x19, @@ -64,10 +80,6 @@ OS_ENUM(qos_class, unsigned int, ); typedef unsigned long pthread_priority_t; #define QOS_MIN_RELATIVE_PRIORITY (-15) -#define _PTHREAD_PRIORITY_FLAGS_MASK (~0xffffff) -#define _PTHREAD_PRIORITY_QOS_CLASS_MASK 0x00ffff00 -#define _PTHREAD_PRIORITY_QOS_CLASS_SHIFT (8ull) -#define _PTHREAD_PRIORITY_PRIORITY_MASK 0x000000ff #define _PTHREAD_PRIORITY_OVERCOMMIT_FLAG 0x80000000 #define _PTHREAD_PRIORITY_SCHED_PRI_FLAG 0x20000000 #define _PTHREAD_PRIORITY_FALLBACK_FLAG 0x04000000 @@ -78,15 +90,38 @@ typedef unsigned long pthread_priority_t; #define _PTHREAD_PRIORITY_THREAD_TYPE_MASK \ (_PTHREAD_PRIORITY_OVERCOMMIT_FLAG | _PTHREAD_PRIORITY_COOPERATIVE_FLAG) +// Mask values +#if !TARGET_OS_SIMULATOR +#define _PTHREAD_PRIORITY_FLAGS_MASK (~0xffffff) +#define _PTHREAD_PRIORITY_QOS_CLASS_MASK 0x00ffff00 +#define _PTHREAD_PRIORITY_QOS_CLASS_SHIFT (8ull) +#define _PTHREAD_PRIORITY_PRIORITY_MASK 0x000000ff +#endif + #endif // HAVE_PTHREAD_QOS_H #if !defined(POLICY_RR) && defined(SCHED_RR) #define POLICY_RR SCHED_RR #endif // !defined(POLICY_RR) && defined(SCHED_RR) +/* + * dispatch_priority_t encoding + * + * flags unused override fallback req qos relpri + * |----------------------|----------|----------|---------|---------|----------| + * 24 - 31 20-23 16-19 12-15 8-11 0-7 + * + * Some of the fields here are laid out similar to what is laid out in + * pthread_priority_t - namely flags and relpri. This is enforced via static + * asserts on the mask and shift values. If the layout of pthread_priority_t + * changes to add more flags etc, we need to make relevant changes in + * dispatch_priority_t as well. + */ typedef uint32_t dispatch_qos_t; typedef uint32_t dispatch_priority_t; +// QoS encoding unique to dispatch_priority_t - used for req, fallback and +// override #define DISPATCH_QOS_UNSPECIFIED ((dispatch_qos_t)0) #define DISPATCH_QOS_MAINTENANCE ((dispatch_qos_t)1) #define DISPATCH_QOS_BACKGROUND ((dispatch_qos_t)2) @@ -102,8 +137,6 @@ typedef uint32_t dispatch_priority_t; #define DISPATCH_QOS_BUCKET(qos) ((int)((qos) - DISPATCH_QOS_MIN)) #define DISPATCH_QOS_FOR_BUCKET(bucket) ((dispatch_qos_t)((uint32_t)bucket + DISPATCH_QOS_MIN)) -#define DISPATCH_PRIORITY_RELPRI_MASK ((dispatch_priority_t)0x000000ff) -#define DISPATCH_PRIORITY_RELPRI_SHIFT 0 #define DISPATCH_PRIORITY_QOS_MASK ((dispatch_priority_t)0x00000f00) #define DISPATCH_PRIORITY_QOS_SHIFT 8 #define DISPATCH_PRIORITY_REQUESTED_MASK ((dispatch_priority_t)0x00000fff) @@ -111,25 +144,46 @@ typedef uint32_t dispatch_priority_t; #define DISPATCH_PRIORITY_FALLBACK_QOS_SHIFT 12 #define DISPATCH_PRIORITY_OVERRIDE_MASK ((dispatch_priority_t)0x000f0000) #define DISPATCH_PRIORITY_OVERRIDE_SHIFT 16 -#define DISPATCH_PRIORITY_FLAGS_MASK ((dispatch_priority_t)0xff000000) - #define DISPATCH_PRIORITY_SATURATED_OVERRIDE DISPATCH_PRIORITY_OVERRIDE_MASK -#define DISPATCH_PRIORITY_FLAG_OVERCOMMIT ((dispatch_priority_t)0x80000000) // _PTHREAD_PRIORITY_OVERCOMMIT_FLAG -#define DISPATCH_PRIORITY_FLAG_FALLBACK ((dispatch_priority_t)0x04000000) // _PTHREAD_PRIORITY_FALLBACK_FLAG -#define DISPATCH_PRIORITY_FLAG_MANAGER ((dispatch_priority_t)0x02000000) // _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG -#define DISPATCH_PRIORITY_FLAG_COOPERATIVE ((dispatch_priority_t)0x08000000) // _PTHREAD_PRIORITY_COOPERATIVE_FLAG +// not passed to pthread +#define DISPATCH_PRIORITY_FLAG_FLOOR ((dispatch_priority_t)0x40000000) // _PTHREAD_PRIORITY_INHERIT_FLAG +#define DISPATCH_PRIORITY_FLAG_ENFORCE ((dispatch_priority_t)0x10000000) // _PTHREAD_PRIORITY_ENFORCE_FLAG +#define DISPATCH_PRIORITY_FLAG_INHERITED ((dispatch_priority_t)0x20000000) + +// Stuff which overlaps between dispatch_priority_t and pthread_priority_t + +// relpri +#define DISPATCH_PRIORITY_RELPRI_MASK ((dispatch_priority_t)0x000000ff) +#define DISPATCH_PRIORITY_RELPRI_SHIFT 0 +#if !TARGET_OS_SIMULATOR +dispatch_static_assert(DISPATCH_PRIORITY_RELPRI_MASK == _PTHREAD_PRIORITY_PRIORITY_MASK, "relpri masks match"); +dispatch_static_assert(DISPATCH_PRIORITY_RELPRI_SHIFT == _PTHREAD_PRIORITY_PRIORITY_SHIFT, "relpri shift match"); +#endif + +// flags +#define DISPATCH_PRIORITY_FLAGS_MASK ((dispatch_priority_t)0xff000000) +#if !TARGET_OS_SIMULATOR +dispatch_static_assert(DISPATCH_PRIORITY_FLAGS_MASK == _PTHREAD_PRIORITY_FLAGS_MASK, "pthread priority flags mask match"); +#endif +#define DISPATCH_PRIORITY_FLAG_OVERCOMMIT ((dispatch_priority_t)0x80000000) +dispatch_static_assert(DISPATCH_PRIORITY_FLAG_OVERCOMMIT == _PTHREAD_PRIORITY_OVERCOMMIT_FLAG, "overcommit flags match"); +#define DISPATCH_PRIORITY_FLAG_FALLBACK ((dispatch_priority_t)0x04000000) +dispatch_static_assert(DISPATCH_PRIORITY_FLAG_FALLBACK == _PTHREAD_PRIORITY_FALLBACK_FLAG, "fallback flags match"); +#define DISPATCH_PRIORITY_FLAG_MANAGER ((dispatch_priority_t)0x02000000) +dispatch_static_assert(DISPATCH_PRIORITY_FLAG_MANAGER == _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG, "manager flags match"); +#define DISPATCH_PRIORITY_FLAG_COOPERATIVE ((dispatch_priority_t)0x08000000) +dispatch_static_assert(DISPATCH_PRIORITY_FLAG_COOPERATIVE == _PTHREAD_PRIORITY_COOPERATIVE_FLAG, "cooperative flags match"); + +// Subset of pthread priority flags that we care about in dispatch_priority_t #define DISPATCH_PRIORITY_PTHREAD_PRIORITY_FLAGS_MASK \ (DISPATCH_PRIORITY_FLAG_OVERCOMMIT | DISPATCH_PRIORITY_FLAG_FALLBACK | \ DISPATCH_PRIORITY_FLAG_MANAGER | DISPATCH_PRIORITY_FLAG_COOPERATIVE) + +// Subset of pthread priority flags about thread req type #define DISPATCH_PRIORITY_THREAD_TYPE_MASK \ (DISPATCH_PRIORITY_FLAG_OVERCOMMIT | DISPATCH_PRIORITY_FLAG_COOPERATIVE) -// not passed to pthread -#define DISPATCH_PRIORITY_FLAG_FLOOR ((dispatch_priority_t)0x40000000) // _PTHREAD_PRIORITY_INHERIT_FLAG -#define DISPATCH_PRIORITY_FLAG_ENFORCE ((dispatch_priority_t)0x10000000) // _PTHREAD_PRIORITY_ENFORCE_FLAG -#define DISPATCH_PRIORITY_FLAG_INHERITED ((dispatch_priority_t)0x20000000) - DISPATCH_ALWAYS_INLINE static inline bool _dispatch_qos_class_valid(qos_class_t cls, int relpri) @@ -199,28 +253,44 @@ DISPATCH_ALWAYS_INLINE static inline dispatch_qos_t _dispatch_qos_from_pp(pthread_priority_t pp) { +#if !TARGET_OS_SIMULATOR pp &= _PTHREAD_PRIORITY_QOS_CLASS_MASK; pp >>= _PTHREAD_PRIORITY_QOS_CLASS_SHIFT; return (dispatch_qos_t)__builtin_ffs((int)pp); +#else + qos_class_t qos = _pthread_qos_class_decode(pp, NULL, NULL); + return _dispatch_qos_from_qos_class(qos); +#endif } DISPATCH_ALWAYS_INLINE static inline dispatch_qos_t _dispatch_qos_from_pp_unsafe(pthread_priority_t pp) { +#if !TARGET_OS_SIMULATOR // this assumes we know there is a QOS and pp has been masked off properly pp >>= _PTHREAD_PRIORITY_QOS_CLASS_SHIFT; DISPATCH_COMPILER_CAN_ASSUME(pp); return (dispatch_qos_t)__builtin_ffs((int)pp); +#else + qos_class_t qos = _pthread_qos_class_decode(pp, NULL, NULL); + return _dispatch_qos_from_qos_class(qos); +#endif } DISPATCH_ALWAYS_INLINE static inline pthread_priority_t _dispatch_qos_to_pp(dispatch_qos_t qos) { - pthread_priority_t pp; - pp = 1ul << ((qos - 1) + _PTHREAD_PRIORITY_QOS_CLASS_SHIFT); +#if !TARGET_OS_SIMULATOR + pthread_priority_t pp = 0; + if (qos) { + pp = 1ul << ((qos - 1) + _PTHREAD_PRIORITY_QOS_CLASS_SHIFT); + } return pp | _PTHREAD_PRIORITY_PRIORITY_MASK; +#else + return _pthread_qos_class_encode(_dispatch_qos_to_qos_class(qos), 0, 0); +#endif } // including maintenance @@ -231,6 +301,84 @@ _dispatch_qos_is_background(dispatch_qos_t qos) return qos && qos <= DISPATCH_QOS_BACKGROUND; } +#pragma mark pthread_priority_t + +#if TARGET_OS_SIMULATOR +// We need to define these fallbacks for the simulator use case - we use the +// definitions provided by the priority_private.h xnu header in the non-simulator +// case +DISPATCH_ALWAYS_INLINE +static inline bool +_pthread_priority_has_qos(pthread_priority_t pp) +{ + return _pthread_qos_class_decode(pp, NULL, NULL) != QOS_CLASS_UNSPECIFIED; +} + +DISPATCH_ALWAYS_INLINE +static inline int +_pthread_priority_relpri(pthread_priority_t pp) +{ + int relpri; + (void) _pthread_qos_class_decode(pp, &relpri, NULL); + return relpri; +} +#endif + +DISPATCH_ALWAYS_INLINE +static inline pthread_priority_t +_pthread_priority_modify_flags(pthread_priority_t pp, unsigned long flags_to_strip, + unsigned long flags_to_add) +{ +#if !TARGET_OS_SIMULATOR + return (pp & (~flags_to_strip | ~_PTHREAD_PRIORITY_FLAGS_MASK)) | flags_to_add; +#else + qos_class_t qos; int relpri; unsigned long flags; + qos = _pthread_qos_class_decode(pp, &relpri, &flags); + return _pthread_qos_class_encode(qos, relpri, (flags & ~flags_to_strip) | flags_to_add); +#endif +} +// For removing specific flags without adding any +#define _pthread_priority_strip_flags(pp, flags) _pthread_priority_modify_flags(pp, flags, 0) + +DISPATCH_ALWAYS_INLINE +static inline pthread_priority_t +_pthread_priority_strip_all_flags(pthread_priority_t pp) +{ +#if !TARGET_OS_SIMULATOR + return (pp & ~_PTHREAD_PRIORITY_FLAGS_MASK); +#else + qos_class_t qos; int relpri; + qos = _pthread_qos_class_decode(pp, &relpri, NULL); + return _pthread_qos_class_encode(qos, relpri, 0); +#endif +} + +DISPATCH_ALWAYS_INLINE +static inline pthread_priority_t +_pthread_priority_strip_relpri(pthread_priority_t pp) +{ +#if !TARGET_OS_SIMULATOR + return (pp & ~_PTHREAD_PRIORITY_PRIORITY_MASK); +#else + qos_class_t qos; unsigned long flags; + qos = _pthread_qos_class_decode(pp, NULL, &flags); + return _pthread_qos_class_encode(qos, 0, flags); +#endif +} + +DISPATCH_ALWAYS_INLINE +static inline pthread_priority_t +_pthread_priority_strip_qos_and_relpri(pthread_priority_t pp) +{ +#if !TARGET_OS_SIMULATOR + return (pp & _PTHREAD_PRIORITY_FLAGS_MASK); +#else + unsigned long flags; + (void) _pthread_qos_class_decode(pp, NULL, &flags); + return _pthread_qos_class_encode(QOS_CLASS_UNSPECIFIED, 0, flags); +#endif +} + #pragma mark dispatch_priority #define _dispatch_priority_make(qos, relpri) \ @@ -298,6 +446,7 @@ _dispatch_priority_from_pp_impl(pthread_priority_t pp, bool keep_flags) { dispatch_assert(!(pp & _PTHREAD_PRIORITY_SCHED_PRI_FLAG)); +#if !TARGET_OS_SIMULATOR dispatch_priority_t dbp; if (keep_flags) { dbp = pp & (DISPATCH_PRIORITY_PTHREAD_PRIORITY_FLAGS_MASK | @@ -306,7 +455,19 @@ _dispatch_priority_from_pp_impl(pthread_priority_t pp, bool keep_flags) dbp = pp & DISPATCH_PRIORITY_RELPRI_MASK; } - dbp |= _dispatch_qos_from_pp(pp) << DISPATCH_PRIORITY_QOS_SHIFT; + dbp |= (_dispatch_qos_from_pp(pp) << DISPATCH_PRIORITY_QOS_SHIFT); + return dbp; +#else + int relpri; unsigned long flags; + dispatch_priority_t dbp; + + qos_class_t qos = _pthread_qos_class_decode(pp, &relpri, &flags); + + dbp = _dispatch_priority_make(_dispatch_qos_from_qos_class(qos), relpri); + if (keep_flags) { + dbp |= (flags & DISPATCH_PRIORITY_PTHREAD_PRIORITY_FLAGS_MASK); + } +#endif return dbp; } #define _dispatch_priority_from_pp(pp) \ @@ -321,11 +482,17 @@ DISPATCH_ALWAYS_INLINE static inline pthread_priority_t _dispatch_priority_to_pp_strip_flags(dispatch_priority_t dbp) { - pthread_priority_t pp = dbp & DISPATCH_PRIORITY_RELPRI_MASK; dispatch_qos_t qos = _dispatch_priority_qos(dbp); + pthread_priority_t pp; +#if !TARGET_OS_SIMULATOR + pp = dbp & DISPATCH_PRIORITY_RELPRI_MASK; // relpri if (qos) { pp |= (1ul << ((qos - 1) + _PTHREAD_PRIORITY_QOS_CLASS_SHIFT)); } +#else + int relpri = _dispatch_priority_relpri(dbp); + pp = _pthread_qos_class_encode(_dispatch_qos_to_qos_class(qos), relpri, 0); +#endif return pp; } @@ -333,21 +500,38 @@ DISPATCH_ALWAYS_INLINE static inline pthread_priority_t _dispatch_priority_to_pp_prefer_fallback(dispatch_priority_t dbp) { - pthread_priority_t pp; dispatch_qos_t qos; +#if !TARGET_OS_SIMULATOR + pthread_priority_t pp; if (dbp & DISPATCH_PRIORITY_FLAG_FALLBACK) { pp = dbp & DISPATCH_PRIORITY_PTHREAD_PRIORITY_FLAGS_MASK; pp |= _PTHREAD_PRIORITY_PRIORITY_MASK; qos = _dispatch_priority_fallback_qos(dbp); + + dispatch_assert(qos != DISPATCH_QOS_UNSPECIFIED); } else { pp = dbp & (DISPATCH_PRIORITY_PTHREAD_PRIORITY_FLAGS_MASK | DISPATCH_PRIORITY_RELPRI_MASK); qos = _dispatch_priority_qos(dbp); if (unlikely(!qos)) return pp; } - return pp | (1ul << ((qos - 1) + _PTHREAD_PRIORITY_QOS_CLASS_SHIFT)); +#else + unsigned long flags; + int relpri; + + if (dbp & DISPATCH_PRIORITY_FLAG_FALLBACK) { + flags = dbp & DISPATCH_PRIORITY_PTHREAD_PRIORITY_FLAGS_MASK; + relpri = 0; + qos = _dispatch_priority_fallback_qos(dbp); + } else { + flags = dbp & DISPATCH_PRIORITY_PTHREAD_PRIORITY_FLAGS_MASK; + relpri = _dispatch_priority_relpri(dbp); + qos = _dispatch_priority_qos(dbp); + } + return _pthread_qos_class_encode(_dispatch_qos_to_qos_class(qos), relpri, flags); +#endif } #endif // __DISPATCH_SHIMS_PRIORITY__ diff --git a/src/shims/tsd.h b/src/shims/tsd.h index cf568d90f..132a2b1a0 100644 --- a/src/shims/tsd.h +++ b/src/shims/tsd.h @@ -97,6 +97,12 @@ static const unsigned long dispatch_quantum_key = __PTK_LIBDISPATCH_KEY10; static const unsigned long dispatch_dsc_key = __PTK_LIBDISPATCH_KEY11; static const unsigned long dispatch_enqueue_key = __PTK_LIBDISPATCH_KEY12; +#if __has_include() +_Static_assert(__PTK_LIBDISPATCH_KEY13 == __TSD_MACH_MSG_AUX, + "libsyscall and libdispatch mach msgv TSD value mismatch"); +#endif +static const unsigned long dispatch_msgv_aux_key = __PTK_LIBDISPATCH_KEY13; + static const unsigned long os_workgroup_key = __PTK_LIBDISPATCH_WORKGROUP_KEY0; DISPATCH_TSD_INLINE @@ -158,6 +164,7 @@ struct dispatch_tsd { void *dispatch_quantum_key; void *dispatch_dsc_key; void *dispatch_enqueue_key; + void *dispatch_msgv_aux_key; void *os_workgroup_key; }; @@ -219,6 +226,7 @@ extern pthread_key_t dispatch_deferred_items_key; extern pthread_key_t dispatch_quantum_key; extern pthread_key_t dispatch_dsc_key; extern pthread_key_t dispatch_enqueue_key; +extern pthread_key_t dispatch_msgv_aux_key; extern pthread_key_t os_workgroup_key; diff --git a/src/shims/yield.h b/src/shims/yield.h index aeb429d44..eb7afcd1f 100644 --- a/src/shims/yield.h +++ b/src/shims/yield.h @@ -94,7 +94,7 @@ void *_dispatch_wait_for_enqueuer(void **ptr, void **tailp); #ifndef DISPATCH_CONTENTION_SPINS_MIN #define DISPATCH_CONTENTION_SPINS_MIN (32 - 1) #endif -#if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR +#if (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) || DISPATCH_TARGET_DK_EMBEDDED #define _dispatch_contention_spins() \ ((DISPATCH_CONTENTION_SPINS_MIN) + ((DISPATCH_CONTENTION_SPINS_MAX) - \ (DISPATCH_CONTENTION_SPINS_MIN)) / 2) diff --git a/src/source.c b/src/source.c index 6c22e5136..a187ec76e 100644 --- a/src/source.c +++ b/src/source.c @@ -1374,6 +1374,34 @@ dispatch_source_set_timer(dispatch_source_t ds, dispatch_time_t start, #pragma mark - #pragma mark dispatch_after +static uint64_t +_dispatch_after_leeway(uint64_t delta) +{ + // + uint64_t leeway = 0; + pthread_priority_t pp = _dispatch_get_priority(); + + // 10% leeway for BG and UT, 6.7% leeway for DEF and IN, 5% leeway for UI and + // above + switch (_dispatch_qos_from_pp(pp)) { + case DISPATCH_QOS_UNSPECIFIED: + case DISPATCH_QOS_MAINTENANCE: + case DISPATCH_QOS_BACKGROUND: + case DISPATCH_QOS_UTILITY: + leeway = delta / 10; + break; + case DISPATCH_QOS_DEFAULT: + case DISPATCH_QOS_USER_INITIATED: + leeway = delta / 15; + break; + default: + leeway = delta / 20; + break; + } + + return leeway; +} + DISPATCH_ALWAYS_INLINE static inline void _dispatch_after(dispatch_time_t when, dispatch_queue_t dq, @@ -1397,7 +1425,7 @@ _dispatch_after(dispatch_time_t when, dispatch_queue_t dq, } return dispatch_async_f(dq, ctxt, handler); } - leeway = delta / 10; // + leeway = _dispatch_after_leeway(delta); if (leeway < NSEC_PER_MSEC) leeway = NSEC_PER_MSEC; if (leeway > 60 * NSEC_PER_SEC) leeway = 60 * NSEC_PER_SEC; diff --git a/src/voucher.c b/src/voucher.c index 581bbc0ba..f7cb290bf 100644 --- a/src/voucher.c +++ b/src/voucher.c @@ -348,6 +348,7 @@ voucher_replace_default_voucher(void) _voucher_mach_recipe_size(sizeof(_voucher_mach_udata_s)) + \ _voucher_extra_size(v))) +#if !DISPATCH_SEND_ACTIVITY_IN_MSGV DISPATCH_ALWAYS_INLINE static inline mach_voucher_attr_recipe_size_t _voucher_mach_recipe_init(mach_voucher_attr_recipe_t mvar_buf, voucher_s *v, @@ -396,6 +397,7 @@ _voucher_mach_recipe_init(mach_voucher_attr_recipe_t mvar_buf, voucher_s *v, } return size; } +#endif mach_voucher_t _voucher_get_mach_voucher(voucher_t voucher) @@ -404,6 +406,9 @@ _voucher_get_mach_voucher(voucher_t voucher) if (voucher->v_ipc_kvoucher) return voucher->v_ipc_kvoucher; mach_voucher_t kvb = voucher->v_kvoucher; if (!kvb) kvb = _voucher_get_task_mach_voucher(); +#if DISPATCH_SEND_ACTIVITY_IN_MSGV + return kvb; +#else if (!voucher->v_activity && !_voucher_extra_size(voucher)) { return kvb; } @@ -431,10 +436,63 @@ _voucher_get_mach_voucher(voucher_t voucher) _dispatch_voucher_debug("kvoucher[0x%08x] create", voucher, kv); } return kv; +#endif } +#if DISPATCH_SEND_ACTIVITY_IN_MSGV +static voucher_t +_voucher_create_with_mach_voucher(mach_voucher_t kv, mach_msg_bits_t msgh_bits, + _voucher_mach_udata_s *udata, mach_msg_size_t udata_sz) +{ + voucher_t v; + voucher_t bv = VOUCHER_NULL; + + if (kv) { + bv = _voucher_find_and_retain(kv); + if (bv) { + _dispatch_voucher_debug("kvoucher[0x%08x] found", bv, kv); + _voucher_dealloc_mach_voucher(kv); + } else { + bv = _voucher_alloc(0); + bv->v_ipc_kvoucher = bv->v_kvoucher = kv; + bv->v_kv_has_importance = !!(msgh_bits & MACH_MSGH_BITS_RAISEIMP); + _voucher_insert(bv); + _voucher_trace(CREATE, bv, kv, 0); + } + } + + if (udata_sz < offsetof(_voucher_mach_udata_s, _vmu_after_activity) || + udata->vmu_magic != VOUCHER_MAGIC_V3 || !udata->vmu_activity) { + return bv; + } + + if (bv) { + /* base voucher should not contain activity data */ + if (bv->v_activity != 0) { + DISPATCH_INTERNAL_CRASH(bv->v_activity, "base voucher has non-zero activity value"); + } + if (bv->v_kvbase != VOUCHER_NULL) { + DISPATCH_INTERNAL_CRASH(bv->v_kvbase, "base voucher has nested base voucher"); + } + v = _voucher_clone(bv, VOUCHER_FIELD_ACTIVITY); + voucher_release(bv); + } else { + v = _voucher_alloc(0); + } + + /* set up activity data on voucher_t */ + v->v_activity = udata->vmu_activity; + v->v_activity_creator = udata->vmu_activity_pid; + v->v_parent_activity = udata->vmu_parent_activity; + + _voucher_trace(CREATE, v, v->v_kvoucher, v->v_activity); + _dispatch_voucher_debug("kvoucher[0x%08x] create", v, kv); + return v; +} +#else static voucher_t -_voucher_create_with_mach_voucher(mach_voucher_t kv, mach_msg_bits_t msgh_bits) +_voucher_create_with_mach_voucher(mach_voucher_t kv, mach_msg_bits_t msgh_bits, + DISPATCH_UNUSED _voucher_mach_udata_s *unused_udata, DISPATCH_UNUSED mach_msg_size_t unused_udata_sz) { if (!kv) return NULL; kern_return_t kr; @@ -520,6 +578,7 @@ _voucher_create_with_mach_voucher(mach_voucher_t kv, mach_msg_bits_t msgh_bits) _dispatch_voucher_debug("kvoucher[0x%08x] create", v, kv); return v; } +#endif /* DISPATCH_SEND_ACTIVITY_IN_MSGV */ voucher_t _voucher_create_without_importance(voucher_t ov) @@ -618,12 +677,33 @@ _voucher_create_accounting_voucher(voucher_t ov) return v; } +voucher_t +_voucher_create_with_mach_msgv(mach_msg_header_t *msg, mach_msg_aux_header_t *aux) +{ + mach_msg_bits_t msgh_bits; + + _voucher_mach_udata_aux_s *msgv_aux = (_voucher_mach_udata_aux_s *)aux; + _voucher_mach_udata_s *udata = NULL; + mach_msg_size_t udata_sz = 0, aux_sz = 0; + mach_voucher_t kv = _voucher_mach_msg_get(msg, &msgh_bits); + + if (msgv_aux) { + aux_sz = msgv_aux->header.msgdh_size; + if (unlikely(aux_sz < sizeof(mach_msg_aux_header_t))) { + DISPATCH_INTERNAL_CRASH(aux_sz, "Invalid msg aux data size."); + } + udata_sz = aux_sz - sizeof(mach_msg_aux_header_t); + udata = udata_sz ? &msgv_aux->udata: NULL; + } + return _voucher_create_with_mach_voucher(kv, msgh_bits, udata, udata_sz); +} + voucher_t voucher_create_with_mach_msg(mach_msg_header_t *msg) { mach_msg_bits_t msgh_bits; mach_voucher_t kv = _voucher_mach_msg_get(msg, &msgh_bits); - return _voucher_create_with_mach_voucher(kv, msgh_bits); + return _voucher_create_with_mach_voucher(kv, msgh_bits, NULL, 0); } void @@ -1076,9 +1156,25 @@ voucher_mach_msg_state_t voucher_mach_msg_adopt(mach_msg_header_t *msg) { mach_msg_bits_t msgh_bits; + _voucher_mach_udata_s *udata = NULL; + mach_msg_size_t udata_sz = 0; + mach_voucher_t kv = _voucher_mach_msg_get(msg, &msgh_bits); if (!kv) return VOUCHER_MACH_MSG_STATE_UNCHANGED; - voucher_t v = _voucher_create_with_mach_voucher(kv, msgh_bits); + +#if DISPATCH_SEND_ACTIVITY_IN_MSGV + _voucher_mach_udata_aux_s *aux = _dispatch_thread_getspecific(dispatch_msgv_aux_key); + if (aux) { + mach_msg_size_t aux_sz = aux->header.msgdh_size; + if (aux_sz >= sizeof(mach_msg_aux_header_t)) { + udata_sz = aux_sz - sizeof(mach_msg_aux_header_t); + udata = udata_sz ? &aux->udata : NULL; + } + } +#endif + + voucher_t v = _voucher_create_with_mach_voucher(kv, msgh_bits, udata, udata_sz); + return (voucher_mach_msg_state_t)_voucher_adopt(v); } @@ -1092,13 +1188,48 @@ voucher_mach_msg_revert(voucher_mach_msg_state_t state) #if DISPATCH_USE_LIBKERNEL_VOUCHER_INIT #include <_libkernel_init.h> +#if DISPATCH_SEND_ACTIVITY_IN_MSGV +static mach_msg_size_t +voucher_mach_msg_fill_aux(mach_msg_aux_header_t *aux, mach_msg_size_t aux_sz) +{ + voucher_t v = _voucher_get(); + + if (!(v && v->v_activity)) return 0; + if (aux_sz < DISPATCH_MSGV_AUX_MAX_SIZE) return 0; + + _Static_assert(LIBSYSCALL_MSGV_AUX_MAX_SIZE >= DISPATCH_MSGV_AUX_MAX_SIZE, + "aux buffer size in libsyscall too small"); + _voucher_mach_udata_aux_s *udata_aux = (_voucher_mach_udata_aux_s *)aux; + + udata_aux->header.msgdh_size = DISPATCH_MSGV_AUX_MAX_SIZE; + udata_aux->header.msgdh_reserved = 0; + + udata_aux->udata = (_voucher_mach_udata_s){ + .vmu_magic = VOUCHER_MAGIC_V3, + /* .vmu_priority is unused */ + .vmu_activity = v->v_activity, + .vmu_activity_pid = v->v_activity_creator, + .vmu_parent_activity = v->v_parent_activity, + }; + + return DISPATCH_MSGV_AUX_MAX_SIZE; +} +#endif + static const struct _libkernel_voucher_functions _voucher_libkernel_functions = { - .version = 1, + .version = 3, .voucher_mach_msg_set = voucher_mach_msg_set, .voucher_mach_msg_clear = voucher_mach_msg_clear, .voucher_mach_msg_adopt = voucher_mach_msg_adopt, .voucher_mach_msg_revert = voucher_mach_msg_revert, + + /* available starting version 3 */ +#if DISPATCH_SEND_ACTIVITY_IN_MSGV + .voucher_mach_msg_fill_aux = voucher_mach_msg_fill_aux, +#else + .voucher_mach_msg_fill_aux = NULL, +#endif }; static void @@ -1111,6 +1242,7 @@ _voucher_libkernel_init(void) #define _voucher_libkernel_init() #endif + void voucher_activity_initialize_4libtrace(voucher_activity_hooks_t hooks) { @@ -1123,6 +1255,7 @@ voucher_activity_initialize_4libtrace(voucher_activity_hooks_t hooks) "voucher_activity_initialize_4libtrace called twice"); } + // HACK: we can't call into os_variant until after the initialization of // dispatch and XPC, but we want to do it before the end of libsystem // initialization to avoid having to synchronize _dispatch_mode explicitly, @@ -1252,6 +1385,7 @@ _firehose_task_buffer_init(void *ctx OS_UNUSED) } } + DISPATCH_ALWAYS_INLINE static inline bool _voucher_activity_disabled(void) @@ -1350,6 +1484,7 @@ voucher_activity_create_with_data(firehose_tracepoint_id_t *trace_id, goto done; } + static const firehose_stream_t streams[2] = { firehose_stream_metadata, firehose_stream_persist, diff --git a/src/voucher_internal.h b/src/voucher_internal.h index b5ee42c28..2d2a0839f 100644 --- a/src/voucher_internal.h +++ b/src/voucher_internal.h @@ -134,6 +134,13 @@ typedef struct _voucher_mach_udata_s { uint8_t _vmu_after_activity[0]; } _voucher_mach_udata_s; +typedef struct _voucher_mach_udata_aux_s { + mach_msg_aux_header_t header; + _voucher_mach_udata_s udata; +} _voucher_mach_udata_aux_s; + +#define DISPATCH_MSGV_AUX_MAX_SIZE sizeof(_voucher_mach_udata_aux_s) + OS_ENUM(voucher_fields, uint16_t, VOUCHER_FIELD_NONE = 0, VOUCHER_FIELD_KVOUCHER = 1u << 0, @@ -234,7 +241,7 @@ typedef struct voucher_recipe_s { } voucher_recipe_s; #endif -#if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR +#if (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR) || DISPATCH_TARGET_DK_EMBEDDED #define VL_HASH_SIZE 64u // must be a power of two #else #define VL_HASH_SIZE 256u // must be a power of two @@ -432,6 +439,7 @@ _voucher_get_activity_id(voucher_t v, uint64_t *creator_pid) return v ? v->v_activity : 0; } +voucher_t _voucher_create_with_mach_msgv(mach_msg_header_t *msg, mach_msg_aux_header_t *aux); void _voucher_task_mach_voucher_init(void* ctxt); extern dispatch_once_t _voucher_task_mach_voucher_pred; extern mach_voucher_t _voucher_task_mach_voucher; @@ -697,7 +705,7 @@ DISPATCH_ALWAYS_INLINE static inline bool _voucher_mach_msg_set(mach_msg_header_t *msg, voucher_t voucher) { - (void)msg; (void)voucher; + (void)msg; (void)voucher;; return false; } diff --git a/src/workgroup.c b/src/workgroup.c index 46f55c380..85fac9329 100644 --- a/src/workgroup.c +++ b/src/workgroup.c @@ -132,6 +132,13 @@ _os_workgroup_interval_explicit_xref_dispose(os_workgroup_interval_t wgi) _os_workgroup_interval_xref_dispose(wgi); _os_workgroup_explicit_xref_dispose(wgi->_as_wg); } + +void +_os_workgroup_explicit_dispose(os_workgroup_t wg) +{ + _os_workgroup_dispose(wg); + free(wg); +} #endif static inline bool @@ -140,28 +147,34 @@ _os_workgroup_is_configurable(uint64_t wg_state) return (wg_state & OS_WORKGROUP_OWNER) == OS_WORKGROUP_OWNER; } -#if !USE_OBJC -void -_os_workgroup_explicit_dispose(os_workgroup_t wg) +static inline bool +_os_workgroup_has_workload_id(uint64_t wg_state) { - _os_workgroup_dispose(wg); - free(wg); + return (wg_state & OS_WORKGROUP_HAS_WORKLOAD_ID); +} + +static inline bool +_os_workgroup_has_backing_workinterval(os_workgroup_t wg) +{ + return wg->port != MACH_PORT_NULL; } -#endif void _os_workgroup_dispose(os_workgroup_t wg) { dispatch_assert(wg->joined_cnt == 0); - kern_return_t kr; uint64_t wg_state = os_atomic_load(&wg->wg_state, relaxed); - if (_os_workgroup_is_configurable(wg_state)) { - kr = work_interval_destroy(wg->wi); - } else { - kr = mach_port_mod_refs(mach_task_self(), wg->port, MACH_PORT_RIGHT_SEND, -1); + if (_os_workgroup_has_backing_workinterval(wg)) { + kern_return_t kr = mach_port_mod_refs(mach_task_self(), wg->port, + MACH_PORT_RIGHT_SEND, -1); + os_assumes(kr == KERN_SUCCESS); + if (_os_workgroup_is_configurable(wg_state)) { + kr = work_interval_destroy(wg->wi); + os_assumes(kr == KERN_SUCCESS); + } } - os_assumes(kr == KERN_SUCCESS); + if (wg_state & OS_WORKGROUP_LABEL_NEEDS_FREE) { free((void *)wg->name); } @@ -228,6 +241,32 @@ _os_workgroup_set_current(os_workgroup_t new_wg) } } +static inline bool +_os_workgroup_client_interval_data_initialized( + os_workgroup_interval_data_t data) +{ + return (data->sig == _OS_WORKGROUP_INTERVAL_DATA_SIG_INIT); +} + +static inline bool +_os_workgroup_client_interval_data_is_valid(os_workgroup_interval_data_t data) +{ + return (data && _os_workgroup_client_interval_data_initialized(data)); +} + +static inline uint64_t +_os_workgroup_interval_data_complexity(os_workgroup_interval_data_t data) +{ + uint64_t complexity = 0; + + if (_os_workgroup_client_interval_data_is_valid(data)) { + if (data->wgid_flags & OS_WORKGROUP_INTERVAL_DATA_COMPLEXITY_HIGH) { + complexity = 1; + } + } + return complexity; +} + static inline bool _os_workgroup_attr_is_resolved(os_workgroup_attr_t attr) { @@ -253,11 +292,17 @@ _os_workgroup_attr_is_differentiated(os_workgroup_attr_t attr) return (attr->wg_attr_flags & OS_WORKGROUP_ATTR_UNDIFFERENTIATED) == 0; } +static inline bool +_os_workgroup_attr_has_workload_id(os_workgroup_attr_t attr) +{ + return (attr->internal_wl_id_flags & WORK_INTERVAL_WORKLOAD_ID_HAS_ID) != 0; +} + static inline bool _os_workgroup_type_is_interval_type(os_workgroup_type_t wg_type) { return (wg_type >= OS_WORKGROUP_INTERVAL_TYPE_DEFAULT) && - (wg_type <= OS_WORKGROUP_INTERVAL_TYPE_ARKIT); + (wg_type <= OS_WORKGROUP_INTERVAL_TYPE_FRAME_COMPOSITOR); } static bool @@ -279,13 +324,6 @@ _os_workgroup_type_is_default_type(os_workgroup_type_t wg_type) return wg_type == OS_WORKGROUP_TYPE_DEFAULT; } - -static inline bool -_os_workgroup_has_backing_workinterval(os_workgroup_t wg) -{ - return wg->wi != NULL; -} - static inline uint32_t _wi_flags_to_wi_type(uint32_t wi_flags) { @@ -324,6 +362,8 @@ _wi_flags_to_wg_type(uint32_t wi_flags) return OS_WORKGROUP_INTERVAL_TYPE_COREMEDIA; case WORK_INTERVAL_TYPE_ARKIT: return OS_WORKGROUP_INTERVAL_TYPE_ARKIT; + case WORK_INTERVAL_TYPE_FRAME_COMPOSITOR: + return OS_WORKGROUP_INTERVAL_TYPE_FRAME_COMPOSITOR; case WORK_INTERVAL_TYPE_CA_CLIENT: return OS_WORKGROUP_INTERVAL_TYPE_CA_CLIENT; default: @@ -350,6 +390,9 @@ _wg_type_to_wi_flags(os_workgroup_type_t wg_type) return WORK_INTERVAL_TYPE_COREANIMATION; case OS_WORKGROUP_INTERVAL_TYPE_CA_RENDER_SERVER: return WORK_INTERVAL_TYPE_CA_RENDER_SERVER; + case OS_WORKGROUP_INTERVAL_TYPE_FRAME_COMPOSITOR: + return (WORK_INTERVAL_TYPE_FRAME_COMPOSITOR | + WORK_INTERVAL_FLAG_FINISH_AT_DEADLINE); case OS_WORKGROUP_INTERVAL_TYPE_HID_DELIVERY: return WORK_INTERVAL_TYPE_HID_DELIVERY; case OS_WORKGROUP_INTERVAL_TYPE_COREMEDIA: @@ -403,7 +446,8 @@ _os_workgroup_get_wg_wi_types_from_port(mach_port_t port, } static work_interval_t -_os_workgroup_create_work_interval(os_workgroup_attr_t attr) +_os_workgroup_create_work_interval(os_workgroup_attr_t attr, + mach_port_t *mach_port_out) { /* All workgroups are joinable */ uint32_t flags = WORK_INTERVAL_FLAG_JOINABLE; @@ -414,18 +458,109 @@ _os_workgroup_create_work_interval(os_workgroup_attr_t attr) flags |= WORK_INTERVAL_FLAG_GROUP; } + if (_os_workgroup_attr_has_workload_id(attr)) { + flags |= WORK_INTERVAL_FLAG_HAS_WORKLOAD_ID; + } + work_interval_t wi; int rv = work_interval_create(&wi, flags); if (rv) { return NULL; } + rv = work_interval_copy_port(wi, mach_port_out); + if (rv < 0) { + work_interval_destroy(wi); + return NULL; + } return wi; } +static void +_os_workgroup_set_work_interval_name(os_workgroup_t wg, const char *name) +{ + if (!MACH_PORT_VALID(wg->port)) { + DISPATCH_INTERNAL_CRASH(wg->port, "Invalid workgroup port"); + } + /* kernel requires NUL-terminated string in buffer of capped size */ + char wi_name[WORK_INTERVAL_NAME_MAX]; + size_t len = name ? strlcpy(wi_name, name, sizeof(wi_name)) : 0; + if (!len) { + return; + } + +#if !TARGET_OS_SIMULATOR + int ret = __work_interval_ctl(WORK_INTERVAL_OPERATION_SET_NAME, wg->port, + wi_name, sizeof(wi_name)); + if (ret == -1) { + ret = errno; + (void)dispatch_assume_zero(ret); + } +#endif // !TARGET_OS_SIMULATOR +} + +static int +_os_workgroup_set_work_interval_workload_id(os_workgroup_t wg, + const char *workload_id, uint32_t workload_id_flags) +{ + int ret = 0; + + if (!MACH_PORT_VALID(wg->port)) { + DISPATCH_INTERNAL_CRASH(wg->port, "Invalid workgroup port"); + } + /* We use the WORKLOAD_ID_HAS_ID flag to indicate that the workload ID was + * valid in _os_workgroup_lookup_type_from_workload_id, don't call the + * setter syscall otherwise, and strip off that flag before calling the + * kernel, as it is an (error) out flag only for the syscall. */ + if (!workload_id_flags) { + return ret; + } + workload_id_flags &= ~WORK_INTERVAL_WORKLOAD_ID_HAS_ID; + + /* kernel requires NUL-terminated string in buffer of capped size */ + char wlid_name[WORK_INTERVAL_WORKLOAD_ID_NAME_MAX]; + strlcpy(wlid_name, workload_id, sizeof(wlid_name)); + +#if !TARGET_OS_SIMULATOR + /* SET_WORKLOAD_ID cross-checks the workinterval type in the original + * work_interval create flags against the ones specified here to ensure the + * requested workload ID is consistent & compatible. */ + uint32_t create_flags = _wg_type_to_wi_flags(wg->wg_type); + struct work_interval_workload_id_params wlid_params = { + .wlidp_flags = workload_id_flags, + .wlidp_wicreate_flags = create_flags, + .wlidp_name = (uintptr_t)wlid_name, + }; + + ret = __work_interval_ctl(WORK_INTERVAL_OPERATION_SET_WORKLOAD_ID, + wg->port, &wlid_params, sizeof(wlid_params)); + if (ret == -1) { + ret = errno; + (void)dispatch_assume_zero(ret); + } + if (ret || (wlid_params.wlidp_flags & WORK_INTERVAL_WORKLOAD_ID_HAS_ID)) { + _os_workgroup_error_log("Unable to set kernel workload ID: %s (0x%x)" + " -> %d (0x%x)", workload_id, workload_id_flags, ret, + !ret ? wlid_params.wlidp_flags : 0); + if (!ret) { + /* Seeing WORK_INTERVAL_WORKLOAD_ID_HAS_ID in the out flags + * indicates that the set ID operation failed because the work + * interval already had a workload ID set previously. This should + * only ever occur if a workgroup is created from an existing + * workinterval port or workgroup object (that had an ID set). */ + ret = EALREADY; + } + } else { + wg->wg_state |= OS_WORKGROUP_HAS_WORKLOAD_ID; + } +#endif // !TARGET_OS_SIMULATOR + return ret; +} + struct os_workgroup_workload_id_table_entry_s { const char* wl_id; - os_workgroup_type_t wl_type; + os_workgroup_type_t wl_type, wl_compatibility_type; + uint32_t wl_id_flags; }; #if !TARGET_OS_SIMULATOR @@ -434,18 +569,25 @@ static const struct os_workgroup_workload_id_table_entry_s { .wl_id = "com.apple.coreaudio.hal.iothread", .wl_type = OS_WORKGROUP_INTERVAL_TYPE_COREAUDIO, + .wl_id_flags = WORK_INTERVAL_WORKLOAD_ID_RT_ALLOWED | + WORK_INTERVAL_WORKLOAD_ID_RT_CRITICAL, }, { .wl_id = "com.apple.coreaudio.hal.clientthread", .wl_type = OS_WORKGROUP_INTERVAL_TYPE_AUDIO_CLIENT, + .wl_id_flags = WORK_INTERVAL_WORKLOAD_ID_RT_ALLOWED, }, }; #endif // !TARGET_OS_SIMULATOR static os_workgroup_type_t -_os_workgroup_lookup_type_from_workload_id(const char *workload_id) +_os_workgroup_lookup_type_from_workload_id(const char *workload_id, + uint32_t *out_workload_id_flags, + os_workgroup_type_t *out_workload_compatibility_type) { os_workgroup_type_t workload_type = OS_WORKGROUP_TYPE_DEFAULT; + os_workgroup_type_t workload_compatibility_type = OS_WORKGROUP_TYPE_DEFAULT; + uint32_t workload_id_flags = 0; if (!workload_id) { DISPATCH_CLIENT_CRASH(0, "Workload identifier must not be NULL"); @@ -454,19 +596,29 @@ _os_workgroup_lookup_type_from_workload_id(const char *workload_id) for (size_t i = 0; i < countof(_os_workgroup_workload_id_table); i++) { if (!strcasecmp(workload_id, _os_workgroup_workload_id_table[i].wl_id)){ workload_type = _os_workgroup_workload_id_table[i].wl_type; + workload_compatibility_type = + _os_workgroup_workload_id_table[i].wl_compatibility_type; + if (_os_workgroup_type_is_default_type(workload_compatibility_type)){ + workload_compatibility_type = workload_type; + } + workload_id_flags = WORK_INTERVAL_WORKLOAD_ID_HAS_ID; // entry found + workload_id_flags |= _os_workgroup_workload_id_table[i].wl_id_flags; + workload_id_flags &= ~WORK_INTERVAL_WORKLOAD_ID_RT_CRITICAL; if (_os_workgroup_type_is_default_type(workload_type)) { DISPATCH_INTERNAL_CRASH(i, "Invalid workload ID type"); } break; } } -#if OS_WORKGROUP_LOG_UKNOWN_WORKLOAD_ID - if (_os_workgroup_type_is_default_type(workload_type)) { - _dispatch_log("WARNING: os_workgroup: Unknown workload ID \"%s\"", - workload_id); + if (!workload_id_flags) { + /* Entry not found in the userspace config table, but mark the flags as + * having seen an ID anyway since it may be present in the kernel config + * table. */ + workload_id_flags = WORK_INTERVAL_WORKLOAD_ID_HAS_ID; } -#endif #endif // !TARGET_OS_SIMULATOR + *out_workload_id_flags = workload_id_flags; + *out_workload_compatibility_type = workload_compatibility_type; return workload_type; } @@ -477,8 +629,9 @@ _os_workgroup_workload_id_attr_resolve(const char *workload_id, { /* N.B: expects to be called with the attr pointer returned by * _os_workgroup_client_attr_resolve() (i.e. a mutable local copy) */ - os_workgroup_type_t wl_type = - _os_workgroup_lookup_type_from_workload_id(workload_id); + os_workgroup_type_t wl_compatibility_type = OS_WORKGROUP_TYPE_DEFAULT; + os_workgroup_type_t wl_type = _os_workgroup_lookup_type_from_workload_id( + workload_id, &attr->internal_wl_id_flags, &wl_compatibility_type); if (_os_workgroup_type_is_default_type(wl_type)) { /* Unknown workload ID, fallback to attribute type */ return attr; @@ -487,6 +640,9 @@ _os_workgroup_workload_id_attr_resolve(const char *workload_id, * Use workload ID type as the type implied by the default attribute */ if (attr->wg_type == default_attr->wg_type) { attr->wg_type = wl_type; + } else if (attr->wg_type == wl_compatibility_type) { + /* Allow type override from the table if compatibility type matches */ + attr->wg_type = wl_type; } else if (wl_type != attr->wg_type) { /* Workload ID and attribute type mismatch */ return NULL; @@ -496,14 +652,21 @@ _os_workgroup_workload_id_attr_resolve(const char *workload_id, static inline bool _os_workgroup_workload_id_is_valid_for_wi_type(const char *workload_id, - uint32_t wi_type) + uint32_t wi_type, uint32_t *out_workload_id_flags) { - os_workgroup_type_t wl_type = - _os_workgroup_lookup_type_from_workload_id(workload_id); + os_workgroup_type_t wl_compatibility_type = OS_WORKGROUP_TYPE_DEFAULT; + os_workgroup_type_t wl_type = _os_workgroup_lookup_type_from_workload_id( + workload_id, out_workload_id_flags, &wl_compatibility_type); if (_os_workgroup_type_is_default_type(wl_type)) { /* Unknown workload ID, nothing to match */ return true; } + if (_wg_type_to_wi_type(wl_compatibility_type) == wi_type) { + /* Check if the compatibility type matches the passed in type of + * port or workgroup object. */ + return true; + } + /* Require matching workinterval types between workload ID and passed in * type of port or workgroup object. */ if (_wg_type_to_wi_type(wl_type) != wi_type) { @@ -529,6 +692,12 @@ _os_workgroup_set_name(os_workgroup_t wg, const char *name) } } wg->name = name; + + uint64_t wg_state = os_atomic_load(&wg->wg_state, relaxed); + if (_os_workgroup_has_backing_workinterval(wg) && + _os_workgroup_is_configurable(wg_state)) { + _os_workgroup_set_work_interval_name(wg, name); + } } static inline bool @@ -651,6 +820,20 @@ _workgroup_init(void) #pragma mark Private functions +int +os_workgroup_interval_data_set_flags(os_workgroup_interval_data_t data, + os_workgroup_interval_data_flags_t flags) +{ + int ret = 0; + if (_os_workgroup_client_interval_data_is_valid(data) && + (flags & ~OS_WORKGROUP_INTERVAL_DATA_FLAGS_MASK) == 0) { + data->wgid_flags = flags; + } else { + ret = EINVAL; + } + return ret; +} + int os_workgroup_attr_set_interval_type(os_workgroup_attr_t attr, os_workgroup_interval_type_t interval_type) @@ -724,7 +907,8 @@ os_workgroup_create(const char *name, os_workgroup_attr_t attr) return NULL; } - wi = _os_workgroup_create_work_interval(attr); + mach_port_t port = MACH_PORT_NULL; + wi = _os_workgroup_create_work_interval(attr, &port); if (wi == NULL) { return NULL; } @@ -732,6 +916,7 @@ os_workgroup_create(const char *name, os_workgroup_attr_t attr) wg = (os_workgroup_t) _os_object_alloc(WORKGROUP_CLASS, sizeof(struct os_workgroup_s)); wg->wi = wi; + wg->port = port; wg->wg_state = OS_WORKGROUP_OWNER; wg->wg_type = attr->wg_type; @@ -773,7 +958,8 @@ os_workgroup_interval_create(const char *name, os_clockid_t clock, return NULL; } - wi = _os_workgroup_create_work_interval(attr); + mach_port_t port = MACH_PORT_NULL; + wi = _os_workgroup_create_work_interval(attr, &port); if (wi == NULL) { return NULL; } @@ -781,6 +967,7 @@ os_workgroup_interval_create(const char *name, os_clockid_t clock, wgi = (os_workgroup_interval_t) _os_object_alloc(WORKGROUP_INTERVAL_CLASS, sizeof(struct os_workgroup_interval_s)); wgi->wi = wi; + wgi->port = port; wgi->clock = clock; wgi->wii = work_interval_instance_alloc(wi); wgi->wii_lock = OS_UNFAIR_LOCK_INIT; @@ -845,7 +1032,8 @@ os_workgroup_create_with_workload_id(const char * name, return NULL; } - wi = _os_workgroup_create_work_interval(attr); + mach_port_t port = MACH_PORT_NULL; + wi = _os_workgroup_create_work_interval(attr, &port); if (wi == NULL) { return NULL; } @@ -853,9 +1041,16 @@ os_workgroup_create_with_workload_id(const char * name, wg = (os_workgroup_t) _os_object_alloc(WORKGROUP_CLASS, sizeof(struct os_workgroup_s)); wg->wi = wi; + wg->port = port; wg->wg_state = OS_WORKGROUP_OWNER; wg->wg_type = attr->wg_type; + int ret = _os_workgroup_set_work_interval_workload_id(wg, workload_id, + attr->internal_wl_id_flags); + if (ret) { + _os_object_release(wg->_as_os_obj); + return NULL; + } _os_workgroup_set_name(wg, name); return wg; @@ -921,7 +1116,8 @@ os_workgroup_interval_create_with_workload_id(const char *name, return NULL; } - wi = _os_workgroup_create_work_interval(attr); + mach_port_t port = MACH_PORT_NULL; + wi = _os_workgroup_create_work_interval(attr, &port); if (wi == NULL) { return NULL; } @@ -929,12 +1125,20 @@ os_workgroup_interval_create_with_workload_id(const char *name, wgi = (os_workgroup_interval_t) _os_object_alloc(WORKGROUP_INTERVAL_CLASS, sizeof(struct os_workgroup_interval_s)); wgi->wi = wi; + wgi->port = port; wgi->clock = clock; wgi->wii = work_interval_instance_alloc(wi); wgi->wii_lock = OS_UNFAIR_LOCK_INIT; wgi->wg_type = attr->wg_type; wgi->wg_state = OS_WORKGROUP_OWNER; + int ret = _os_workgroup_set_work_interval_workload_id(wgi->_as_wg, + workload_id, attr->internal_wl_id_flags); + if (ret) { + _os_object_release(wgi->_as_os_obj); + return NULL; + } + _os_workgroup_set_name(wgi->_as_wg, name); return wgi; @@ -953,8 +1157,6 @@ os_workgroup_leave_self(os_workgroup_t wg, os_workgroup_join_token_t token) return os_workgroup_leave(wg, token); } -#pragma mark Public functions - os_workgroup_parallel_t os_workgroup_parallel_create(const char *name, os_workgroup_attr_t attr) { @@ -1029,7 +1231,6 @@ os_workgroup_copy_port(os_workgroup_t wg, mach_port_t *mach_port_out) os_assert(mach_port_out != NULL); *mach_port_out = MACH_PORT_NULL; - int rv = 0; uint64_t wg_state = os_atomic_load(&wg->wg_state, relaxed); if (wg_state & OS_WORKGROUP_CANCELED) { @@ -1040,19 +1241,14 @@ os_workgroup_copy_port(os_workgroup_t wg, mach_port_t *mach_port_out) return EINVAL; } - if (_os_workgroup_is_configurable(wg_state)) { - rv = work_interval_copy_port(wg->wi, mach_port_out); - if (rv < 0) { - rv = errno; - } - return rv; - } - kern_return_t kr = mach_port_mod_refs(mach_task_self(), wg->port, MACH_PORT_RIGHT_SEND, 1); - os_assumes(kr == KERN_SUCCESS); - *mach_port_out = wg->port; - return rv; + if (dispatch_assume(kr == KERN_SUCCESS)) { + *mach_port_out = wg->port; + } else { + return ENOMEM; + } + return 0; } os_workgroup_t @@ -1069,17 +1265,20 @@ os_workgroup_create_with_port(const char *name, mach_port_t port) return NULL; } + kern_return_t kr; + kr = mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_SEND, 1); + if (!dispatch_assume(kr == KERN_SUCCESS)) { + return NULL; + } + os_workgroup_t wg = NULL; wg = (os_workgroup_t) _os_object_alloc(WORKGROUP_CLASS, sizeof(struct os_workgroup_s)); - _os_workgroup_set_name(wg, name); - - kern_return_t kr; - kr = mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_SEND, 1); - os_assumes(kr == KERN_SUCCESS); wg->port = port; wg->wg_type = wg_type; + _os_workgroup_set_name(wg, name); + return wg; } @@ -1102,24 +1301,37 @@ os_workgroup_create_with_workload_id_and_port(const char *name, } /* Validate workload ID is compatible with port workinterval type */ - if (!_os_workgroup_workload_id_is_valid_for_wi_type(workload_id, wi_type)) { + uint32_t wl_id_flags; + if (!_os_workgroup_workload_id_is_valid_for_wi_type(workload_id, wi_type, + &wl_id_flags)) { _os_workgroup_error_log("Mismatched workload ID and port " "interval type: %s vs %hd", workload_id, wg_type); errno = EINVAL; return NULL; } + kern_return_t kr; + kr = mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_SEND, 1); + if (!dispatch_assume(kr == KERN_SUCCESS)) { + _os_workgroup_error_log("Invalid mach port 0x%x", port); + return NULL; + } + os_workgroup_t wg = NULL; wg = (os_workgroup_t) _os_object_alloc(WORKGROUP_CLASS, sizeof(struct os_workgroup_s)); - _os_workgroup_set_name(wg, name); - - kern_return_t kr; - kr = mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_SEND, 1); - os_assumes(kr == KERN_SUCCESS); wg->port = port; wg->wg_type = wg_type; + ret = _os_workgroup_set_work_interval_workload_id(wg, workload_id, + wl_id_flags); + if (ret && ret != EALREADY) { + _os_object_release(wg->_as_os_obj); + return NULL; + } + + _os_workgroup_set_name(wg, name); + return wg; } @@ -1136,40 +1348,24 @@ os_workgroup_create_with_workgroup(const char *name, os_workgroup_t wg) new_wg = (os_workgroup_t) _os_object_alloc(WORKGROUP_CLASS, sizeof(struct os_workgroup_s)); - _os_workgroup_set_name(new_wg, name); new_wg->wg_type = wg->wg_type; /* We intentionally don't copy the context */ if (_os_workgroup_has_backing_workinterval(wg)) { + kern_return_t kr; + kr = mach_port_mod_refs(mach_task_self(), wg->port, MACH_PORT_RIGHT_SEND, 1); - if (_os_workgroup_is_configurable(wg_state)) { - int rv = work_interval_copy_port(wg->wi, &new_wg->port); - - if (rv < 0) { - goto error; - } - } else { - kern_return_t kr; - kr = mach_port_mod_refs(mach_task_self(), wg->port, MACH_PORT_RIGHT_SEND, 1); - - if (kr != KERN_SUCCESS) { - goto error; - } - new_wg->port = wg->port; + if (kr != KERN_SUCCESS) { + free(new_wg); + return NULL; } + new_wg->port = wg->port; } - return new_wg; - -error: - wg_state = os_atomic_load(&new_wg->wg_state, relaxed); - if (wg_state & OS_WORKGROUP_LABEL_NEEDS_FREE) { - free((void *)new_wg->name); - } - free(new_wg); + _os_workgroup_set_name(new_wg, name); - return NULL; + return new_wg; } os_workgroup_t @@ -1184,8 +1380,9 @@ os_workgroup_create_with_workload_id_and_workgroup(const char *name, } /* Validate workload ID is compatible with workgroup workinterval type */ + uint32_t wl_id_flags; if (!_os_workgroup_workload_id_is_valid_for_wi_type(workload_id, - _wg_type_to_wi_type(wg->wg_type))) { + _wg_type_to_wi_type(wg->wg_type), &wl_id_flags)) { _os_workgroup_error_log("Mismatched workload ID and workgroup " "interval type: %s vs %hd", workload_id, wg->wg_type); errno = EINVAL; @@ -1196,42 +1393,32 @@ os_workgroup_create_with_workload_id_and_workgroup(const char *name, new_wg = (os_workgroup_t) _os_object_alloc(WORKGROUP_CLASS, sizeof(struct os_workgroup_s)); - _os_workgroup_set_name(new_wg, name); new_wg->wg_type = wg->wg_type; /* We intentionally don't copy the context */ if (_os_workgroup_has_backing_workinterval(wg)) { + kern_return_t kr; + kr = mach_port_mod_refs(mach_task_self(), wg->port, MACH_PORT_RIGHT_SEND, 1); - if (_os_workgroup_is_configurable(wg_state)) { - int rv = work_interval_copy_port(wg->wi, &new_wg->port); - - if (rv < 0) { - _os_workgroup_error_log("Invalid workgroup work_interval"); - goto error; - } - } else { - kern_return_t kr; - kr = mach_port_mod_refs(mach_task_self(), wg->port, MACH_PORT_RIGHT_SEND, 1); + if (kr != KERN_SUCCESS) { + _os_workgroup_error_log("Invalid workgroup port 0x%x", wg->port); + free(new_wg); + return NULL; + } + new_wg->port = wg->port; - if (kr != KERN_SUCCESS) { - _os_workgroup_error_log("Invalid workgroup port 0x%x", wg->port); - goto error; - } - new_wg->port = wg->port; + int ret = _os_workgroup_set_work_interval_workload_id(new_wg, + workload_id, wl_id_flags); + if (ret && ret != EALREADY) { + _os_object_release(new_wg->_as_os_obj); + return NULL; } } - return new_wg; - -error: - wg_state = os_atomic_load(&new_wg->wg_state, relaxed); - if (wg_state & OS_WORKGROUP_LABEL_NEEDS_FREE) { - free((void *)new_wg->name); - } - free(new_wg); + _os_workgroup_set_name(new_wg, name); - return NULL; + return new_wg; } int @@ -1436,7 +1623,7 @@ os_workgroup_testcancel(os_workgroup_t wg) int os_workgroup_interval_start(os_workgroup_interval_t wgi, uint64_t start, - uint64_t deadline, os_workgroup_interval_data_t __unused data) + uint64_t deadline, os_workgroup_interval_data_t data) { os_workgroup_t cur_wg = _os_workgroup_get_current(); if (cur_wg != wgi->_as_wg) { @@ -1456,6 +1643,7 @@ os_workgroup_interval_start(os_workgroup_interval_t wgi, uint64_t start, return errno; } + uint64_t complexity = _os_workgroup_interval_data_complexity(data); int rv = 0; uint64_t old_state, new_state; os_atomic_rmw_loop(&wgi->wg_state, old_state, new_state, relaxed, { @@ -1467,6 +1655,10 @@ os_workgroup_interval_start(os_workgroup_interval_t wgi, uint64_t start, rv = EPERM; os_atomic_rmw_loop_give_up(break); } + if (complexity > 0 && !_os_workgroup_has_workload_id(old_state)) { + errno = EINVAL; + os_atomic_rmw_loop_give_up(break); + } new_state = old_state | OS_WORKGROUP_INTERVAL_STARTED; }); @@ -1481,6 +1673,7 @@ os_workgroup_interval_start(os_workgroup_interval_t wgi, uint64_t start, work_interval_instance_set_start(wii, start); work_interval_instance_set_deadline(wii, deadline); + work_interval_instance_set_complexity(wii, complexity); rv = work_interval_instance_start(wii); if (rv != 0) { /* If we failed to start the interval in the kernel, clear the started @@ -1495,7 +1688,7 @@ os_workgroup_interval_start(os_workgroup_interval_t wgi, uint64_t start, int os_workgroup_interval_update(os_workgroup_interval_t wgi, uint64_t deadline, - os_workgroup_interval_data_t __unused data) + os_workgroup_interval_data_t data) { os_workgroup_t cur_wg = _os_workgroup_get_current(); if (cur_wg != wgi->_as_wg) { @@ -1510,12 +1703,18 @@ os_workgroup_interval_update(os_workgroup_interval_t wgi, uint64_t deadline, return errno; } + uint64_t complexity = _os_workgroup_interval_data_complexity(data); uint64_t wg_state = os_atomic_load(&wgi->wg_state, relaxed); if (!_os_workgroup_is_configurable(wg_state)) { os_unfair_lock_unlock(&wgi->wii_lock); errno = EPERM; return errno; } + if (complexity > 0 && !_os_workgroup_has_workload_id(wg_state)) { + os_unfair_lock_unlock(&wgi->wii_lock); + errno = EINVAL; + return errno; + } /* Note: We allow updating and finishing an workgroup_interval that has * already started even if the workgroup has been cancelled - since @@ -1529,6 +1728,7 @@ os_workgroup_interval_update(os_workgroup_interval_t wgi, uint64_t deadline, work_interval_instance_t wii = wgi->wii; work_interval_instance_set_deadline(wii, deadline); + work_interval_instance_set_complexity(wii, complexity); int rv = work_interval_instance_update(wii); if (rv != 0) { rv = errno; @@ -1540,7 +1740,7 @@ os_workgroup_interval_update(os_workgroup_interval_t wgi, uint64_t deadline, int os_workgroup_interval_finish(os_workgroup_interval_t wgi, - os_workgroup_interval_data_t __unused data) + os_workgroup_interval_data_t data) { os_workgroup_t cur_wg = _os_workgroup_get_current(); if (cur_wg != wgi->_as_wg) { @@ -1555,12 +1755,18 @@ os_workgroup_interval_finish(os_workgroup_interval_t wgi, return errno; } + uint64_t complexity = _os_workgroup_interval_data_complexity(data); uint64_t wg_state = os_atomic_load(&wgi->wg_state, relaxed); if (!_os_workgroup_is_configurable(wg_state)) { os_unfair_lock_unlock(&wgi->wii_lock); errno = EPERM; return errno; } + if (complexity > 0 && !_os_workgroup_has_workload_id(wg_state)) { + os_unfair_lock_unlock(&wgi->wii_lock); + errno = EINVAL; + return errno; + } if (!(wg_state & OS_WORKGROUP_INTERVAL_STARTED)) { os_unfair_lock_unlock(&wgi->wii_lock); errno = EINVAL; @@ -1576,6 +1782,7 @@ os_workgroup_interval_finish(os_workgroup_interval_t wgi, } work_interval_instance_set_finish(wii, current_finish); + work_interval_instance_set_complexity(wii, complexity); int rv = work_interval_instance_finish(wii); if (rv != 0) { rv = errno; diff --git a/src/workgroup_internal.h b/src/workgroup_internal.h index c7b2000e6..0366d9c77 100644 --- a/src/workgroup_internal.h +++ b/src/workgroup_internal.h @@ -55,10 +55,6 @@ void _workgroup_init(void); #define OS_WORKGROUP_LOG_ERRORS 1 #endif -#if 1 || DISPATCH_DEBUG // log workload_id lookup failures by default for now -#define OS_WORKGROUP_LOG_UKNOWN_WORKLOAD_ID 1 -#endif - #if OS_WORKGROUP_LOG_ERRORS #define _os_workgroup_error_log(m, ...) \ _dispatch_log("BUG IN CLIENT of %s: " m, __func__, ##__VA_ARGS__); @@ -86,7 +82,8 @@ struct os_workgroup_attr_s { uint32_t wg_attr_flags; os_workgroup_type_t wg_type; uint16_t empty; - uint32_t reserved[13]; + uint32_t internal_wl_id_flags; + uint32_t reserved[12]; }; #define _OS_WORKGROUP_JOIN_TOKEN_SIG_INIT 0x4D5F5A58 @@ -98,9 +95,15 @@ struct os_workgroup_join_token_s { uint64_t reserved[2]; }; +#define OS_WORKGROUP_INTERVAL_DATA_FLAGS_MASK ( \ + OS_WORKGROUP_INTERVAL_DATA_COMPLEXITY_DEFAULT | \ + OS_WORKGROUP_INTERVAL_DATA_COMPLEXITY_HIGH \ + ) + struct os_workgroup_interval_data_s { uint32_t sig; - uint32_t reserved[14]; + uint32_t wgid_flags; + uint32_t reserved[13]; }; /* This is lazily allocated if the arena is used by clients */ @@ -116,7 +119,7 @@ typedef struct os_workgroup_arena_s { #define OS_WORKGROUP_CANCELED (1 << 1) #define OS_WORKGROUP_LABEL_NEEDS_FREE (1 << 2) #define OS_WORKGROUP_INTERVAL_STARTED (1 << 3) - +#define OS_WORKGROUP_HAS_WORKLOAD_ID (1 << 4) /* Note that os_workgroup_type_t doesn't have to be in the wg_atomic_flags, we * just put it there to pack the struct. @@ -191,10 +194,8 @@ _wg_arena(_os_workgroup_atomic_flags wgaf) OS_OBJECT_STRUCT_HEADER(workgroup); \ const char *name; \ uint64_t volatile wg_state; \ - union { \ - work_interval_t wi; \ - mach_port_t port; \ - }; \ + work_interval_t wi; \ + mach_port_t port; \ OS_WORKGROUP_HEADER_INTERNAL; struct os_workgroup_s { diff --git a/xcodescripts/install-headers.sh b/xcodescripts/install-headers.sh index 212bf74ab..6530fdef5 100755 --- a/xcodescripts/install-headers.sh +++ b/xcodescripts/install-headers.sh @@ -42,4 +42,3 @@ cp -X "${SCRIPT_INPUT_FILE_11}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" cp -X "${SCRIPT_INPUT_FILE_12}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" cp -X "${SCRIPT_INPUT_FILE_13}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" cp -X "${SCRIPT_INPUT_FILE_14}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" -cp -X "${SCRIPT_INPUT_FILE_15}" "${DSTROOT}${OS_PRIVATE_HEADERS_FOLDER_PATH}" From a641558f447659d90ad002ef0ee6f0212273fbfc Mon Sep 17 00:00:00 2001 From: Apple OSS Distributions <91980991+AppleOSSDistributions@users.noreply.github.com> Date: Tue, 13 Jun 2023 23:26:34 +0000 Subject: [PATCH 16/18] libdispatch-1415.100.11 Imported from libdispatch-1415.100.11.tar.gz --- config/config.h | 4 + libdispatch.xcodeproj/project.pbxproj | 20 +++ src/event/event.c | 4 +- src/inline_internal.h | 10 +- src/queue.c | 171 ++++++++++++++++++-------- src/queue_internal.h | 3 + 6 files changed, 158 insertions(+), 54 deletions(-) diff --git a/config/config.h b/config/config.h index c1ef8aaeb..945e488b8 100644 --- a/config/config.h +++ b/config/config.h @@ -77,6 +77,10 @@ you don't. */ #define HAVE_DECL_VQ_DESIRED_DISK 1 +/* Define to 1 if you have the declaration of `VQ_FREE_SPACE_CHANGE`, and to 0 + * if you don't */ +#define HAVE_DECL_VQ_FREE_SPACE_CHANGE 1 + /* Define to 1 if you have the header file. */ #define HAVE_DLFCN_H 1 diff --git a/libdispatch.xcodeproj/project.pbxproj b/libdispatch.xcodeproj/project.pbxproj index b2c487ffb..f0ff3996a 100644 --- a/libdispatch.xcodeproj/project.pbxproj +++ b/libdispatch.xcodeproj/project.pbxproj @@ -2789,6 +2789,11 @@ isa = XCBuildConfiguration; baseConfigurationReference = E40041AA125D705F0022B135 /* libdispatch-resolver.xcconfig */; buildSettings = { + OTHER_CPLUSPLUSFLAGS = ( + "$(OTHER_CFLAGS)", + "-Xclang", + "-reorder-cxx-includes-hack", + ); }; name = Release; }; @@ -3028,6 +3033,11 @@ isa = XCBuildConfiguration; baseConfigurationReference = E4B515D9164B2E9B00E003AF /* libdispatch-introspection.xcconfig */; buildSettings = { + OTHER_CPLUSPLUSFLAGS = ( + "$(OTHER_CFLAGS)", + "-Xclang", + "-reorder-cxx-includes-hack", + ); }; name = Release; }; @@ -3035,6 +3045,11 @@ isa = XCBuildConfiguration; baseConfigurationReference = E4B515D9164B2E9B00E003AF /* libdispatch-introspection.xcconfig */; buildSettings = { + OTHER_CPLUSPLUSFLAGS = ( + "$(OTHER_CFLAGS)", + "-Xclang", + "-reorder-cxx-includes-hack", + ); }; name = Debug; }; @@ -3049,6 +3064,11 @@ isa = XCBuildConfiguration; baseConfigurationReference = E40041AA125D705F0022B135 /* libdispatch-resolver.xcconfig */; buildSettings = { + OTHER_CPLUSPLUSFLAGS = ( + "$(OTHER_CFLAGS)", + "-Xclang", + "-reorder-cxx-includes-hack", + ); }; name = Debug; }; diff --git a/src/event/event.c b/src/event/event.c index b908419d2..edda80a23 100644 --- a/src/event/event.c +++ b/src/event/event.c @@ -595,7 +595,9 @@ _dispatch_timer_heap_max_target_before(dispatch_timer_heap_t dth, uint64_t limit // skip subtree since none of the targets below can be before limit idx = _dispatch_timer_heap_walk_skip(idx, count); } else { - target = tmp; + if (tmp > target) { + target = tmp; + } idx = _dispatch_timer_heap_walk_next(idx, count); } } diff --git a/src/inline_internal.h b/src/inline_internal.h index 4c4d93605..399a200de 100644 --- a/src/inline_internal.h +++ b/src/inline_internal.h @@ -705,6 +705,13 @@ _dispatch_queue_label_needs_free(dispatch_queue_class_t dqu) return _dispatch_queue_atomic_flags(dqu) & DQF_LABEL_NEEDS_FREE; } +DISPATCH_ALWAYS_INLINE +static inline bool +_dispatch_queue_targets_special_wlh(dispatch_queue_class_t dqu) +{ + return _dispatch_queue_atomic_flags(dqu) & DQF_TARGET_SPECIAL_WLH; +} + DISPATCH_ALWAYS_INLINE static inline dispatch_invoke_flags_t _dispatch_queue_autorelease_frequency(dispatch_queue_class_t dqu) @@ -1845,8 +1852,9 @@ _dispatch_root_queue_push_inline(dispatch_queue_global_t dq, DISPATCH_ALWAYS_INLINE static inline void _dispatch_queue_push_queue(dispatch_queue_t tq, dispatch_queue_class_t dq, - uint64_t dq_state) + uint64_t dq_state, dispatch_wakeup_flags_t flags) { + dispatch_assert(flags & DISPATCH_EVENT_LOOP_CONSUME_2); #if DISPATCH_USE_KEVENT_WORKLOOP if (likely(_dq_state_is_base_wlh(dq_state))) { _dispatch_trace_runtime_event(worker_request, dq._dq, 1); diff --git a/src/queue.c b/src/queue.c index 9858c166a..97542a344 100644 --- a/src/queue.c +++ b/src/queue.c @@ -1033,6 +1033,7 @@ _dispatch_sync_function_invoke(dispatch_queue_class_t dq, void *ctxt, _dispatch_sync_function_invoke_inline(dq, ctxt, func); } +// stop_dq == NULL implies we are unlocking the entire hierarchy DISPATCH_NOINLINE static void _dispatch_sync_complete_recurse(dispatch_queue_t dq, dispatch_queue_t stop_dq, @@ -1442,8 +1443,7 @@ _dispatch_lane_class_barrier_complete(dispatch_lane_t dq, dispatch_qos_t qos, if (tq) { if (likely((old_state ^ new_state) & enqueue)) { dispatch_assert(_dq_state_is_enqueued(new_state)); - dispatch_assert(flags & DISPATCH_WAKEUP_CONSUME_2); - return _dispatch_queue_push_queue(tq, dq, new_state); + return _dispatch_queue_push_queue(tq, dq, new_state, flags); } #if HAVE_PTHREAD_WORKQUEUE_QOS // when doing sync to async handoff @@ -2023,10 +2023,11 @@ _dispatch_async_and_wait_invoke_and_complete_recurse(dispatch_queue_t dq, DISPATCH_NOINLINE static void -_dispatch_async_and_wait_f_slow(dispatch_queue_t dq, uintptr_t top_dc_flags, +_dispatch_async_and_wait_f_slow(dispatch_queue_t top_dq, uintptr_t top_dc_flags, dispatch_sync_context_t dsc, dispatch_queue_t tq) { - /* dc_other is an in-out parameter. + /* dc_other is an in-out parameter used to communicate information between the + * enqueuer and the drainer. * * As an in-param, it specifies the top queue on which the blocking * primitive is called. @@ -2039,19 +2040,26 @@ _dispatch_async_and_wait_f_slow(dispatch_queue_t dq, uintptr_t top_dc_flags, * If the continuation is to be invoked on another thread - for * async_and_wait, or we ran on a thread bound main queue - then someone * already called _dispatch_async_and_wait_invoke which invoked the block - * already. dc_other as an outparam here tells the enqueuer the queue up - * till which the enqueuer got the drain lock so that we know what to unlock - * on the way out. This is the case whereby the enqueuer owns part of the - * locks in the queue hierachy (but not all). + * already. + * + * dc_other as an outparam here tells the enqueuer that it got the drain lock + * starting from the top_dq up until but not including the queue in dc_other. + * This way the enqueuer knows which queues to unlock on the way out in + * dispatch_sync_complete_recurse since it owns locks in part of the queue + * hierachy (but not necessarily all). * * Case 2: * If the continuation is to be invoked on the enqueuing thread - because * we were contending with another sync or async_and_wait - then enqueuer - * return from _WAIT_FOR_QUEUE without having invoked the block. The - * enqueuer has had the locks for the rest of the queue hierachy handed off - * to it so dc_other specifies the queue up till which it has the locks - * which in this case, is up till the bottom queue in the hierachy. So it - * needs to unlock everything up till the bottom queue, on the way out. + * return from _WAIT_FOR_QUEUE without having invoked the block. + * + * The enqueuer has had the locks for the rest of the queue hierachy handed + * off to it. + * + * dc_other here ends up pointing to the bottom queue in the hierarchy + * which the enqueuer has the drain lock for, and which it needs to then use + * to "fake" the wlh TSD. The enqueuer ends up recursing back out of the + * entire hierarchy and unlocking the whole thing. */ __DISPATCH_WAIT_FOR_QUEUE__(dsc, tq); @@ -2059,12 +2067,12 @@ _dispatch_async_and_wait_f_slow(dispatch_queue_t dq, uintptr_t top_dc_flags, if (unlikely(dsc->dsc_func == NULL)) { // see _dispatch_async_and_wait_invoke dispatch_queue_t stop_dq = dsc->dc_other; - return _dispatch_sync_complete_recurse(dq, stop_dq, top_dc_flags); + return _dispatch_sync_complete_recurse(top_dq, stop_dq, top_dc_flags); } // see _dispatch_*_redirect_or_wake dispatch_queue_t bottom_q = dsc->dc_other; - return _dispatch_async_and_wait_invoke_and_complete_recurse(dq, dsc, + return _dispatch_async_and_wait_invoke_and_complete_recurse(top_dq, dsc, bottom_q, top_dc_flags); } @@ -2073,12 +2081,13 @@ static inline bool _dispatch_async_and_wait_should_always_async(dispatch_queue_class_t dqu, uint64_t dq_state) { - // If the queue is anchored at a pthread root queue for which we can't - // mirror attributes, then we need to take the async path. - return !_dq_state_is_inner_queue(dq_state) && - !_dispatch_is_in_root_queues_array(dqu._dq->do_targetq); + // If the queue is anchored at a workloop with special properties which we + // can't mimic, or if it's targetting a pthread root queue, always async up + // front. + return _dispatch_queue_targets_special_wlh(dqu) || + (!_dq_state_is_inner_queue(dq_state) && + !_dispatch_is_in_root_queues_array(dqu._dq->do_targetq)); } - DISPATCH_ALWAYS_INLINE static inline bool _dispatch_async_and_wait_recurse_one(dispatch_queue_t dq, @@ -2086,12 +2095,11 @@ _dispatch_async_and_wait_recurse_one(dispatch_queue_t dq, { uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); if (unlikely(_dispatch_async_and_wait_should_always_async(dq, dq_state))) { - // Remove the async_and_wait flag but drive down the slow path so that - // we do the synchronous wait. We are guaranteed that dq is the base - // queue. + // We want to async away the dsc which means that we will go through case + // (1) of _dispatch_async_and_wait_f_slow. // - // We're falling down to case (1) of _dispatch_async_and_wait_f_slow so - // set dc_other to dq + // Indicate to the async guy the dc_other till which enqueuer has the drain + // lock. dsc->dc_flags &= ~DC_FLAG_ASYNC_AND_WAIT; dsc->dc_other = dq; return false; @@ -2505,19 +2513,36 @@ _dispatch_lane_inherit_wlh_from_target(dispatch_lane_t dq, dispatch_queue_t tq) _dispatch_event_loop_leave_immediate(new_state); } } + if (!dx_hastypeflag(tq, QUEUE_ROOT)) { - dispatch_queue_flags_t clear = 0, set = DQF_TARGETED; + dispatch_queue_flags_t tq_clear = 0, tq_set = DQF_TARGETED; + dispatch_queue_flags_t dq_set = 0; + if (dx_metatype(tq) == _DISPATCH_WORKLOOP_TYPE) { - clear |= DQF_MUTABLE; + tq_clear |= DQF_MUTABLE; + + // Workloop is specially configured - annotate dq as targetting such a wlh + if (!_dispatch_is_in_root_queues_array(tq->do_targetq)) { + dq_set |= DQF_TARGET_SPECIAL_WLH; + } #if !DISPATCH_ALLOW_NON_LEAF_RETARGET } else { - clear |= DQF_MUTABLE; + tq_clear |= DQF_MUTABLE; #endif + } else { + // Target queue is not a workloop - inherit information about the + // hierarchy from the immediate thing we are targetting + dq_set |= (_dispatch_queue_atomic_flags(tq) & DQF_TARGET_SPECIAL_WLH); } - if (clear) { - _dispatch_queue_atomic_flags_set_and_clear(tq, set, clear); + + if (tq_clear) { + _dispatch_queue_atomic_flags_set_and_clear(tq, tq_set, tq_clear); } else { - _dispatch_queue_atomic_flags_set(tq, set); + _dispatch_queue_atomic_flags_set(tq, tq_set); + } + + if (dq_set) { + _dispatch_queue_atomic_flags_set(dq, dq_set); } } } @@ -3866,6 +3891,7 @@ _dispatch_queue_invoke_finish(dispatch_queue_t dq, { struct dispatch_object_s *dc = dic->dic_barrier_waiter; dispatch_qos_t qos = dic->dic_barrier_waiter_bucket; + dispatch_wakeup_flags_t wakeup_flags = DISPATCH_WAKEUP_CONSUME_2; if (dc) { dispatch_sync_context_t dsc = (dispatch_sync_context_t)dc; dsc->dsc_from_async = true; @@ -3874,10 +3900,10 @@ _dispatch_queue_invoke_finish(dispatch_queue_t dq, owned &= DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_ENQUEUED_ON_MGR; if (qos) { return _dispatch_workloop_drain_barrier_waiter(upcast(dq)._dwl, - dc, qos, DISPATCH_WAKEUP_CONSUME_2, owned); + dc, qos, wakeup_flags, owned); } return _dispatch_lane_drain_barrier_waiter(upcast(dq)._dl, dc, - DISPATCH_WAKEUP_CONSUME_2, owned); + wakeup_flags, owned); } uint64_t old_state, new_state, enqueued = DISPATCH_QUEUE_ENQUEUED; @@ -3902,7 +3928,7 @@ _dispatch_queue_invoke_finish(dispatch_queue_t dq, } if ((old_state ^ new_state) & enqueued) { dispatch_assert(_dq_state_is_enqueued(new_state)); - return _dispatch_queue_push_queue(tq, dq, new_state); + return _dispatch_queue_push_queue(tq, dq, new_state, wakeup_flags); } return _dispatch_release_2_tailcall(dq); } @@ -4613,8 +4639,7 @@ _dispatch_workloop_barrier_complete(dispatch_workloop_t dwl, dispatch_qos_t qos, if (target) { if (likely((old_state ^ new_state) & DISPATCH_QUEUE_ENQUEUED)) { dispatch_assert(_dq_state_is_enqueued(new_state)); - dispatch_assert(flags & DISPATCH_WAKEUP_CONSUME_2); - return _dispatch_queue_push_queue(dwl->do_targetq, dwl, new_state); + return _dispatch_queue_push_queue(dwl->do_targetq, dwl, new_state, flags); } #if HAVE_PTHREAD_WORKQUEUE_QOS // when doing sync to async handoff @@ -4706,7 +4731,7 @@ _dispatch_workloop_wakeup(dispatch_workloop_t dwl, dispatch_qos_t qos, DISPATCH_CLIENT_CRASH(old_state, "Waking up an inactive workloop"); } if (likely((old_state ^ new_state) & DISPATCH_QUEUE_ENQUEUED)) { - return _dispatch_queue_push_queue(dwl->do_targetq, dwl, new_state); + return _dispatch_queue_push_queue(dwl->do_targetq, dwl, new_state, flags); } #if HAVE_PTHREAD_WORKQUEUE_QOS if (likely((old_state ^ new_state) & DISPATCH_QUEUE_MAX_QOS_MASK)) { @@ -4736,6 +4761,22 @@ _dispatch_workloop_push_waiter(dispatch_workloop_t dwl, _dispatch_workloop_push_update_prev(dwl, qos, prev, dc); if (likely(!os_mpsc_push_was_empty(prev))) return; + // similar to _dispatch_async_and_wait_should_always_async() + if ((dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) && + !_dispatch_is_in_root_queues_array(dwl->do_targetq)) { + // We want to async away the dsc which means that we will go through case + // (1) of _dispatch_async_and_wait_f_slow. + // + // Indicate to the async guy the dc_other till which enqueuer has the drain + // lock + dsc->dc_other = dwl; + dsc->dc_flags &= ~DC_FLAG_ASYNC_AND_WAIT; + + _dispatch_retain_2_unsafe(dwl); + return _dispatch_workloop_wakeup(dwl, qos, DISPATCH_WAKEUP_MAKE_DIRTY | + DISPATCH_WAKEUP_CONSUME_2); + } + uint64_t set_owner_and_set_full_width_and_in_barrier = _dispatch_lock_value_for_self() | DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER | @@ -4756,12 +4797,6 @@ _dispatch_workloop_push_waiter(dispatch_workloop_t dwl, } }); - if ((dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) && - _dispatch_async_and_wait_should_always_async(dwl, new_state)) { - dsc->dc_other = dwl; - dsc->dc_flags &= ~DC_FLAG_ASYNC_AND_WAIT; - } - if (_dq_state_is_base_wlh(new_state) && dsc->dc_data != DISPATCH_WLH_ANON) { dsc->dsc_wlh_was_first = (dsc->dsc_waiter == _dispatch_tid_self()); } @@ -4774,8 +4809,9 @@ _dispatch_workloop_push_waiter(dispatch_workloop_t dwl, if (dsc->dsc_wlh_was_first && _dispatch_workloop_get_head(dwl, qos) == dc) { dsc->dsc_wlh_self_wakeup = true; if (dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) { - // We're in case (2) of _dispatch_async_and_wait_f_slow() which expects - // dc_other to be the bottom queue of the graph + // We managed to get the drain lock of the dwl above so update dsc so + // that we can unlock it on the way out. Case (2) of + // _dispatch_async_and_wait_f_slow dsc->dc_other = dwl; } _dispatch_workloop_pop_head(dwl, qos, dc); @@ -5207,6 +5243,36 @@ _dispatch_queue_wakeup(dispatch_queue_class_t dqu, dispatch_qos_t qos, }); target = DISPATCH_QUEUE_WAKEUP_TARGET; + if (((old_state ^ new_state) & enqueue) && + !(flags & DISPATCH_WAKEUP_CONSUME_2)) { + // Scenario: + // DQ targetting TQ + // + // Thread 1: + // Has lock on DQ and TQ in contended sync + // + // Thread 2: + // Has pushed async work onto DQ + // Causes a wakeup of DQ which sets enqueued bit, dirty bit and max QoS + // Enqueue of DQ on TQ has not yet happened + // + // Thread 1: + // Tries to unlock DQ, sees follow on work in DQ + // Redrives max QoS on DQ + // Causes wakeup of TQ at new max QoS but TQ is empty with max QoS = UN + // (due to thread 2 preemption) + // + // dx_wakeup(TQ, qos, 0) probe of dq_items_tail fails. We go down QoS + // wakeup path for TQ. + // + // TQ's enqueued = 1 (due to case listed above) and we make TR for TQ + // and consume a +2 on TQ that wasn't taken. + // + // rdar://103191389 + _dispatch_retain_2(dq); + flags |= DISPATCH_WAKEUP_CONSUME_2; + } + #endif // HAVE_PTHREAD_WORKQUEUE_QOS } else { goto done; @@ -5228,7 +5294,7 @@ _dispatch_queue_wakeup(dispatch_queue_class_t dqu, dispatch_qos_t qos, tq = target; } dispatch_assert(_dq_state_is_enqueued(new_state)); - return _dispatch_queue_push_queue(tq, dq, new_state); + return _dispatch_queue_push_queue(tq, dq, new_state, flags); } #if HAVE_PTHREAD_WORKQUEUE_QOS if (unlikely((old_state ^ new_state) & DISPATCH_QUEUE_MAX_QOS_MASK)) { @@ -5289,8 +5355,9 @@ _dispatch_lane_push_waiter(dispatch_lane_t dq, dispatch_sync_context_t dsc, if (unlikely(_dispatch_queue_push_item(dq, dsc))) { if (unlikely(_dispatch_lane_push_waiter_should_wakeup(dq, dsc))) { - // If this returns true, we know that we are pushing onto the base - // queue + // We are going through an async path from now on, indicate the last queue + // till which we got the sync drain lock. This is case (1) of + // _dispatch_async_and_wait_f_slow dsc->dc_flags &= ~DC_FLAG_ASYNC_AND_WAIT; dsc->dc_other = dq; return dx_wakeup(dq, qos, DISPATCH_WAKEUP_MAKE_DIRTY); @@ -5332,8 +5399,8 @@ _dispatch_lane_push_waiter(dispatch_lane_t dq, dispatch_sync_context_t dsc, if (dsc->dsc_wlh_was_first && dq->dq_items_head == dc) { dsc->dsc_wlh_self_wakeup = true; if (dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) { - // We're in case (2) of _dispatch_async_and_wait_f_slow() which expects - // dc_other to be the bottom queue of the graph + // Update the dc_other since we've actually gotten this queue's drain + // lock as well - case (2) of _dispatch_async_and_wait_f_slow dsc->dc_other = dq; } _dispatch_queue_pop_head(dq, dc); @@ -8099,8 +8166,8 @@ _dispatch_cooperative_root_queue_init_fallback(dispatch_queue_global_t dq) * concurrent queue */ width_per_cooperative_queue = 1; } else { - /* Concurrent queue with limited width */ - width_per_cooperative_queue = MAX(max_cpus/DISPATCH_QOS_NBUCKETS, 1); + /* Concurrent queue with limited width of max CPUs */ + width_per_cooperative_queue = max_cpus; } dispatch_priority_t pri = dq->dq_priority; diff --git a/src/queue_internal.h b/src/queue_internal.h index a7425cd35..d20da86cc 100644 --- a/src/queue_internal.h +++ b/src/queue_internal.h @@ -46,6 +46,9 @@ DISPATCH_OPTIONS(dispatch_queue_flags, uint32_t, DQF_LABEL_NEEDS_FREE = 0x00200000, // queue label was strdup()ed DQF_MUTABLE = 0x00400000, DQF_RELEASED = 0x00800000, // xref_cnt == -1 + // queue is targetting a specially configured wlh at the bottom. DQF_MUTABLE + // must be false. + DQF_TARGET_SPECIAL_WLH = 0x01000000, // // Only applies to sources From e8b6b45992bdeb06e094571fc2b9c0646780a41b Mon Sep 17 00:00:00 2001 From: Apple OSS Distributions <91980991+AppleOSSDistributions@users.noreply.github.com> Date: Thu, 21 Sep 2023 16:13:48 +0000 Subject: [PATCH 17/18] libdispatch-1462.0.4 Imported from libdispatch-1462.0.4.tar.gz --- config/config.h | 2 +- dispatch/Dispatch.apinotes | 2 + dispatch/base.h | 8 + dispatch/block.h | 26 ++- dispatch/darwin/module.modulemap | 5 +- dispatch/data.h | 12 +- dispatch/dispatch.h | 1 + dispatch/dispatch_swift_shims.h | 59 ++++++ dispatch/generic/module.modulemap | 4 +- dispatch/generic_static/module.modulemap | 4 +- dispatch/group.h | 10 +- dispatch/io.h | 20 +- dispatch/object.h | 73 ++++++- dispatch/queue.h | 81 ++++++- dispatch/semaphore.h | 5 +- dispatch/source.h | 48 +++-- dispatch/time.h | 6 + dispatch/workloop.h | 7 +- libdispatch.xcodeproj/project.pbxproj | 182 ++++++++++++++++ os/eventlink_private.h | 2 +- os/firehose_server_private.h | 2 +- os/object.h | 22 ++ os/voucher_private.h | 2 +- os/workgroup_interval.h | 3 +- os/workgroup_interval_private.h | 118 +++++++++++ private/darwin/module.modulemap | 7 +- private/generic/module.modulemap | 4 +- private/mach_private.h | 4 +- private/queue_private.h | 1 + private/source_private.h | 2 +- src/apply.c | 14 +- src/data.c | 2 +- src/event/event.c | 18 +- src/event/event_epoll.c | 8 +- src/event/event_kevent.c | 72 ++++--- src/event/event_windows.c | 12 +- src/exclavekit/dispatch/dispatch.h | 48 +++++ src/exclavekit/dispatch/module.modulemap | 4 + src/exclavekit/dispatch/object.h | 80 +++++++ src/exclavekit/dispatch/once.h | 80 +++++++ src/exclavekit/internal.h | 44 ++++ src/exclavekit/once.c | 39 ++++ src/firehose/firehose_server.c | 28 ++- src/init.c | 12 ++ src/inline_internal.h | 68 +++--- src/introspection.c | 10 +- src/io.c | 10 +- src/mach.c | 28 +-- src/object.c | 2 +- src/object.m | 1 + src/object_internal.h | 42 ++-- src/queue.c | 223 +++++++++++--------- src/queue_internal.h | 3 +- src/semaphore.c | 28 +-- src/shims/atomic.h | 46 ---- src/source.c | 34 +-- src/voucher.c | 65 ++++-- src/voucher_internal.h | 9 +- src/workgroup.c | 152 ++++++++++++- src/workgroup_internal.h | 19 +- xcodeconfig/libdispatch-exclavekit.xcconfig | 22 ++ xcodeconfig/libdispatch.dirty | 3 + xcodeconfig/libdispatch.order | 3 + 63 files changed, 1531 insertions(+), 420 deletions(-) create mode 100644 dispatch/Dispatch.apinotes create mode 100644 dispatch/dispatch_swift_shims.h create mode 100644 src/exclavekit/dispatch/dispatch.h create mode 100644 src/exclavekit/dispatch/module.modulemap create mode 100644 src/exclavekit/dispatch/object.h create mode 100644 src/exclavekit/dispatch/once.h create mode 100644 src/exclavekit/internal.h create mode 100644 src/exclavekit/once.c create mode 100644 xcodeconfig/libdispatch-exclavekit.xcconfig diff --git a/config/config.h b/config/config.h index 945e488b8..9f70fd793 100644 --- a/config/config.h +++ b/config/config.h @@ -7,7 +7,7 @@ /* Define to 1 if you have the declaration of `CLOCK_REALTIME', and to 0 if you don't. */ -#define CLOCK_REALTIME 0 +#define HAVE_DECL_CLOCK_REALTIME 0 /* Define to 1 if you have the declaration of `CLOCK_UPTIME', and to 0 if you don't. */ diff --git a/dispatch/Dispatch.apinotes b/dispatch/Dispatch.apinotes new file mode 100644 index 000000000..671b1e455 --- /dev/null +++ b/dispatch/Dispatch.apinotes @@ -0,0 +1,2 @@ +--- +Name: Dispatch diff --git a/dispatch/base.h b/dispatch/base.h index ae5cdaef0..33f84ae5f 100644 --- a/dispatch/base.h +++ b/dispatch/base.h @@ -258,6 +258,13 @@ #define DISPATCH_SIZED_BY(X) #endif +#define DISPATCH_OSX_SUPPORTS_AT_LEAST(macos, ios, tvos, watchos) \ + ( (defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && __MAC_OS_X_VERSION_MIN_REQUIRED >= macos) \ + || (defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && __IPHONE_OS_VERSION_MIN_REQUIRED >= ios) \ + || (defined(__TV_OS_VERSION_MIN_REQUIRED) && __TV_OS_VERSION_MIN_REQUIRED >= tvos) \ + || (defined(__WATCH_OS_VERSION_MIN_REQUIRED) && __WATCH_OS_VERSION_MIN_REQUIRED >= watchos) \ + ) + #if !__has_feature(nullability) #ifndef _Nullable #define _Nullable @@ -358,6 +365,7 @@ DISPATCH_ASSUME_ABI_SINGLE_BEGIN +DISPATCH_SWIFT_UNAVAILABLE("Unavailable in Swift") typedef void (*dispatch_function_t)(void *_Nullable); DISPATCH_ASSUME_ABI_SINGLE_END diff --git a/dispatch/block.h b/dispatch/block.h index 288ff2964..45b9076f0 100644 --- a/dispatch/block.h +++ b/dispatch/block.h @@ -101,19 +101,26 @@ __BEGIN_DECLS * for synchronous execution or when the dispatch block object is invoked * directly. */ +DISPATCH_REFINED_FOR_SWIFT DISPATCH_OPTIONS(dispatch_block_flags, unsigned long, DISPATCH_BLOCK_BARRIER - DISPATCH_ENUM_API_AVAILABLE(macos(10.10), ios(8.0)) = 0x1, + DISPATCH_ENUM_API_AVAILABLE(macos(10.10), ios(8.0)) + DISPATCH_SWIFT_UNAVAILABLE("Use DispatchWorkItemFlags.barrier") = 0x1, DISPATCH_BLOCK_DETACHED - DISPATCH_ENUM_API_AVAILABLE(macos(10.10), ios(8.0)) = 0x2, + DISPATCH_ENUM_API_AVAILABLE(macos(10.10), ios(8.0)) + DISPATCH_SWIFT_UNAVAILABLE("Use DispatchWorkItemFlags.detached") = 0x2, DISPATCH_BLOCK_ASSIGN_CURRENT - DISPATCH_ENUM_API_AVAILABLE(macos(10.10), ios(8.0)) = 0x4, + DISPATCH_ENUM_API_AVAILABLE(macos(10.10), ios(8.0)) + DISPATCH_SWIFT_UNAVAILABLE("Use DispatchWorkItemFlags.assignCurrentContext") = 0x4, DISPATCH_BLOCK_NO_QOS_CLASS - DISPATCH_ENUM_API_AVAILABLE(macos(10.10), ios(8.0)) = 0x8, + DISPATCH_ENUM_API_AVAILABLE(macos(10.10), ios(8.0)) + DISPATCH_SWIFT_UNAVAILABLE("Use DispatchWorkItemFlags.noQoS") = 0x8, DISPATCH_BLOCK_INHERIT_QOS_CLASS - DISPATCH_ENUM_API_AVAILABLE(macos(10.10), ios(8.0)) = 0x10, + DISPATCH_ENUM_API_AVAILABLE(macos(10.10), ios(8.0)) + DISPATCH_SWIFT_UNAVAILABLE("Use DispatchWorkItemFlags.inheritQoS") = 0x10, DISPATCH_BLOCK_ENFORCE_QOS_CLASS - DISPATCH_ENUM_API_AVAILABLE(macos(10.10), ios(8.0)) = 0x20, + DISPATCH_ENUM_API_AVAILABLE(macos(10.10), ios(8.0)) + DISPATCH_SWIFT_UNAVAILABLE("Use DispatchWorkItemFlags.enforceQoS") = 0x20, ); /*! @@ -169,6 +176,7 @@ DISPATCH_OPTIONS(dispatch_block_flags, unsigned long, API_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_RETURNS_RETAINED_BLOCK DISPATCH_WARN_RESULT DISPATCH_NOTHROW +DISPATCH_SWIFT_UNAVAILABLE("Use DispatchWorkItem()") dispatch_block_t dispatch_block_create(dispatch_block_flags_t flags, dispatch_block_t block); @@ -241,6 +249,7 @@ dispatch_block_create(dispatch_block_flags_t flags, dispatch_block_t block); API_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_NONNULL4 DISPATCH_RETURNS_RETAINED_BLOCK DISPATCH_WARN_RESULT DISPATCH_NOTHROW +DISPATCH_SWIFT_UNAVAILABLE("Use DispatchWorkItem()") dispatch_block_t dispatch_block_create_with_qos_class(dispatch_block_flags_t flags, dispatch_qos_class_t qos_class, int relative_priority, @@ -273,6 +282,7 @@ dispatch_block_create_with_qos_class(dispatch_block_flags_t flags, */ API_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NOTHROW +DISPATCH_SWIFT_UNAVAILABLE("Use DispatchWorkItem.perform()") void dispatch_block_perform(dispatch_block_flags_t flags, DISPATCH_NOESCAPE dispatch_block_t block); @@ -324,6 +334,7 @@ dispatch_block_perform(dispatch_block_flags_t flags, */ API_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW +DISPATCH_SWIFT_UNAVAILABLE("Use DispatchWorkItem.wait(timeout:)") intptr_t dispatch_block_wait(dispatch_block_t block, dispatch_time_t timeout); @@ -365,6 +376,7 @@ dispatch_block_wait(dispatch_block_t block, dispatch_time_t timeout); */ API_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +DISPATCH_SWIFT_UNAVAILABLE("Use DispatchWorkItem.notify(queue:execute:)") void dispatch_block_notify(dispatch_block_t block, dispatch_queue_t queue, dispatch_block_t notification_block); @@ -397,6 +409,7 @@ dispatch_block_notify(dispatch_block_t block, dispatch_queue_t queue, */ API_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +DISPATCH_SWIFT_UNAVAILABLE("Use DispatchWorkItem.cancel()") void dispatch_block_cancel(dispatch_block_t block); @@ -417,6 +430,7 @@ dispatch_block_cancel(dispatch_block_t block); API_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW +DISPATCH_SWIFT_UNAVAILABLE("Use DispatchWorkItem.isCancelled") intptr_t dispatch_block_testcancel(dispatch_block_t block); diff --git a/dispatch/darwin/module.modulemap b/dispatch/darwin/module.modulemap index e30807f91..6688433ea 100644 --- a/dispatch/darwin/module.modulemap +++ b/dispatch/darwin/module.modulemap @@ -1,9 +1,10 @@ -module Dispatch [system] [extern_c] { +module Dispatch [system] { umbrella header "dispatch.h" export * + exclude header "introspection.h" } -module DispatchIntrospection [system] [extern_c] { +module DispatchIntrospection [system] { header "introspection.h" export * } diff --git a/dispatch/data.h b/dispatch/data.h index a5211cd7b..a938ead14 100644 --- a/dispatch/data.h +++ b/dispatch/data.h @@ -42,7 +42,7 @@ __BEGIN_DECLS * @typedef dispatch_data_t * A dispatch object representing memory regions. */ -DISPATCH_DATA_DECL(dispatch_data); +DISPATCH_DATA_DECL_SWIFT(dispatch_data, __DispatchData); /*! * @var dispatch_data_empty @@ -80,6 +80,7 @@ DISPATCH_EXPORT struct dispatch_data_s _dispatch_data_empty; */ #define DISPATCH_DATA_DESTRUCTOR_FREE (_dispatch_data_destructor_free) API_AVAILABLE(macos(10.7), ios(5.0)) +DISPATCH_SWIFT_UNAVAILABLE("Unavailable in Swift") DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(free); /*! @@ -89,6 +90,7 @@ DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(free); */ #define DISPATCH_DATA_DESTRUCTOR_MUNMAP (_dispatch_data_destructor_munmap) API_AVAILABLE(macos(10.9), ios(7.0)) +DISPATCH_SWIFT_UNAVAILABLE("Unavailable in Swift") DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(munmap); #ifdef __BLOCKS__ @@ -115,6 +117,7 @@ DISPATCH_DATA_DESTRUCTOR_TYPE_DECL(munmap); */ API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW +DISPATCH_SWIFT_UNAVAILABLE("Use DispatchData.init(bytes:)") dispatch_data_t dispatch_data_create(const void *DISPATCH_SIZED_BY(size) buffer, size_t size, @@ -132,6 +135,7 @@ dispatch_data_create(const void *DISPATCH_SIZED_BY(size) buffer, */ API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_PURE DISPATCH_NONNULL1 DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT size_t dispatch_data_get_size(dispatch_data_t data); @@ -157,6 +161,7 @@ dispatch_data_get_size(dispatch_data_t data); API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT dispatch_data_t dispatch_data_create_map(dispatch_data_t data, const void *_Nullable DISPATCH_SIZED_BY(*size_ptr) *_Nullable buffer_ptr, @@ -180,6 +185,7 @@ dispatch_data_create_map(dispatch_data_t data, API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT dispatch_data_t dispatch_data_create_concat(dispatch_data_t data1, dispatch_data_t data2); @@ -201,6 +207,7 @@ dispatch_data_create_concat(dispatch_data_t data1, dispatch_data_t data2); API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT dispatch_data_t dispatch_data_create_subrange(dispatch_data_t data, size_t offset, @@ -218,6 +225,7 @@ dispatch_data_create_subrange(dispatch_data_t data, * @param size The size of the memory for the current region. * @result A Boolean indicating whether traversal should continue. */ +DISPATCH_SWIFT_UNAVAILABLE("Unavailable in Swift") typedef bool (^dispatch_data_applier_t)(dispatch_data_t region, size_t offset, const void *DISPATCH_SIZED_BY(size) buffer, @@ -245,6 +253,7 @@ typedef bool (^dispatch_data_applier_t)(dispatch_data_t region, */ API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT bool dispatch_data_apply(dispatch_data_t data, DISPATCH_NOESCAPE dispatch_data_applier_t applier); @@ -267,6 +276,7 @@ dispatch_data_apply(dispatch_data_t data, API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT dispatch_data_t dispatch_data_copy_region(dispatch_data_t data, size_t location, diff --git a/dispatch/dispatch.h b/dispatch/dispatch.h index 9b517f36c..f6d3b290e 100644 --- a/dispatch/dispatch.h +++ b/dispatch/dispatch.h @@ -75,6 +75,7 @@ #include #include #include +#include #undef __DISPATCH_INDIRECT__ #endif /* !__DISPATCH_BUILDING_DISPATCH__ */ diff --git a/dispatch/dispatch_swift_shims.h b/dispatch/dispatch_swift_shims.h new file mode 100644 index 000000000..cbbe3d8b7 --- /dev/null +++ b/dispatch/dispatch_swift_shims.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2023 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef _DISPATCH_SWIFT_SHIMS_ +#define _DISPATCH_SWIFT_SHIMS_ + +#ifndef __DISPATCH_INDIRECT__ +#error "This file is intended to be used only for Dispatch Swift Overlay." +#include // for HeaderDoc +#endif + +DISPATCH_ASSUME_NONNULL_BEGIN +DISPATCH_ASSUME_ABI_SINGLE_BEGIN + +__BEGIN_DECLS + +#ifdef __swift__ +DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT DISPATCH_SWIFT_NAME(DispatchSerialQueue.init(__label:attr:queue:)) +static inline dispatch_queue_serial_t +dispatch_serial_queue_create_with_target_4swift(const char *_Nullable DISPATCH_UNSAFE_INDEXABLE label, + dispatch_queue_attr_t _Nullable attr, dispatch_queue_t _Nullable target) { + return dispatch_queue_create_with_target(label, attr, target); +} + +DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT +DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT DISPATCH_SWIFT_NAME(DispatchConcurrentQueue.init(__label:attr:queue:)) +static inline dispatch_queue_concurrent_t +dispatch_concurrent_queue_create_with_target_4swift(const char *_Nullable DISPATCH_UNSAFE_INDEXABLE label, + dispatch_queue_attr_t _Nullable attr, dispatch_queue_t _Nullable target) { + return dispatch_queue_create_with_target(label, attr, target); +} +#endif + +__END_DECLS + +DISPATCH_ASSUME_ABI_SINGLE_END +DISPATCH_ASSUME_NONNULL_END + +#endif /* _DISPATCH_SWIFT_SHIMS_ */ diff --git a/dispatch/generic/module.modulemap b/dispatch/generic/module.modulemap index f7fdaae76..9b0414866 100644 --- a/dispatch/generic/module.modulemap +++ b/dispatch/generic/module.modulemap @@ -5,12 +5,12 @@ module Dispatch { link "BlocksRuntime" } -module DispatchIntrospection [system] [extern_c] { +module DispatchIntrospection [system] { header "introspection.h" export * } -module CDispatch [system] [extern_c] { +module CDispatch [system] { umbrella header "dispatch.h" export * requires blocks diff --git a/dispatch/generic_static/module.modulemap b/dispatch/generic_static/module.modulemap index d5d64d2d1..b4ea60157 100644 --- a/dispatch/generic_static/module.modulemap +++ b/dispatch/generic_static/module.modulemap @@ -6,12 +6,12 @@ module Dispatch { link "DispatchStubs" } -module DispatchIntrospection [system] [extern_c] { +module DispatchIntrospection [system] { header "introspection.h" export * } -module CDispatch [system] [extern_c] { +module CDispatch [system] { umbrella header "dispatch.h" export * requires blocks diff --git a/dispatch/group.h b/dispatch/group.h index 06ae76eff..53befcc01 100644 --- a/dispatch/group.h +++ b/dispatch/group.h @@ -34,7 +34,7 @@ DISPATCH_ASSUME_ABI_SINGLE_BEGIN * @abstract * A group of blocks submitted to queues for asynchronous invocation. */ -DISPATCH_DECL(dispatch_group); +DISPATCH_DECL_SWIFT(dispatch_group, DispatchGroup); __BEGIN_DECLS @@ -55,6 +55,7 @@ __BEGIN_DECLS API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW +DISPATCH_SWIFT_NAME(DispatchGroup.init()) dispatch_group_t dispatch_group_create(void); @@ -84,6 +85,7 @@ dispatch_group_create(void); #ifdef __BLOCKS__ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT void dispatch_group_async(dispatch_group_t group, dispatch_queue_t queue, @@ -119,6 +121,7 @@ dispatch_group_async(dispatch_group_t group, API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NONNULL4 DISPATCH_NOTHROW +DISPATCH_SWIFT_UNAVAILABLE("Use DispatchQueue.async(self:group:qos:flags:execute:)") void dispatch_group_async_f(dispatch_group_t group, dispatch_queue_t queue, @@ -161,6 +164,7 @@ dispatch_group_async_f(dispatch_group_t group, */ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT intptr_t dispatch_group_wait(dispatch_group_t group, dispatch_time_t timeout); @@ -197,6 +201,7 @@ dispatch_group_wait(dispatch_group_t group, dispatch_time_t timeout); #ifdef __BLOCKS__ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT void dispatch_group_notify(dispatch_group_t group, dispatch_queue_t queue, @@ -228,6 +233,7 @@ dispatch_group_notify(dispatch_group_t group, API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL2 DISPATCH_NONNULL4 DISPATCH_NOTHROW +DISPATCH_SWIFT_UNAVAILABLE("Use DispatchGroup.notify(self:qos:flags:queue:execute:)") void dispatch_group_notify_f(dispatch_group_t group, dispatch_queue_t queue, @@ -251,6 +257,7 @@ dispatch_group_notify_f(dispatch_group_t group, */ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +DISPATCH_SWIFT_NAME(DispatchGroup.enter(self:)) void dispatch_group_enter(dispatch_group_t group); @@ -270,6 +277,7 @@ dispatch_group_enter(dispatch_group_t group); */ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +DISPATCH_SWIFT_NAME(DispatchGroup.leave(self:)) void dispatch_group_leave(dispatch_group_t group); diff --git a/dispatch/io.h b/dispatch/io.h index 2df1bcdb3..b4115b865 100644 --- a/dispatch/io.h +++ b/dispatch/io.h @@ -54,6 +54,7 @@ __BEGIN_DECLS #if defined(_WIN32) typedef intptr_t dispatch_fd_t; #else +DISPATCH_SWIFT_UNAVAILABLE("Unavailable in Swift") typedef int dispatch_fd_t; #endif @@ -109,6 +110,7 @@ typedef int dispatch_fd_t; */ API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_NONNULL4 DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT void dispatch_read(dispatch_fd_t fd, size_t length, @@ -148,6 +150,7 @@ dispatch_read(dispatch_fd_t fd, API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NONNULL3 DISPATCH_NONNULL4 DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT void dispatch_write(dispatch_fd_t fd, dispatch_data_t data, @@ -165,7 +168,7 @@ dispatch_write(dispatch_fd_t fd, * file descriptor. I/O channels are first class dispatch objects and may be * retained and released, suspended and resumed, etc. */ -DISPATCH_DECL(dispatch_io); +DISPATCH_DECL_SWIFT(dispatch_io, DispatchIO); /*! * @typedef dispatch_io_type_t @@ -188,6 +191,7 @@ DISPATCH_DECL(dispatch_io); #define DISPATCH_IO_STREAM 0 #define DISPATCH_IO_RANDOM 1 +DISPATCH_SWIFT_UNAVAILABLE("Use DispatchIO.StreamType") typedef unsigned long dispatch_io_type_t; #ifdef __BLOCKS__ @@ -219,6 +223,7 @@ typedef unsigned long dispatch_io_type_t; API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT DISPATCH_SWIFT_NAME(DispatchIO.init(__type:fd:queue:handler:)) dispatch_io_t dispatch_io_create(dispatch_io_type_t type, dispatch_fd_t fd, @@ -255,6 +260,7 @@ dispatch_io_create(dispatch_io_type_t type, API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT DISPATCH_SWIFT_NAME(DispatchIO.init(__type:path:oflag:mode:queue:handler:)) dispatch_io_t dispatch_io_create_with_path(dispatch_io_type_t type, const char *DISPATCH_UNSAFE_INDEXABLE path, int oflag, @@ -295,6 +301,7 @@ dispatch_io_create_with_path(dispatch_io_type_t type, API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT DISPATCH_SWIFT_NAME(DispatchIO.init(__type:io:queue:handler:)) dispatch_io_t dispatch_io_create_with_io(dispatch_io_type_t type, dispatch_io_t io, @@ -309,6 +316,7 @@ dispatch_io_create_with_io(dispatch_io_type_t type, * @param data The data object to be handled. * @param error An errno condition for the operation. */ +DISPATCH_SWIFT_UNAVAILABLE("Unavailable in Swift") typedef void (^dispatch_io_handler_t)(bool done, dispatch_data_t _Nullable data, int error); @@ -357,6 +365,7 @@ typedef void (^dispatch_io_handler_t)(bool done, dispatch_data_t _Nullable data, API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL4 DISPATCH_NONNULL5 DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT void dispatch_io_read(dispatch_io_t channel, off_t offset, @@ -410,6 +419,7 @@ dispatch_io_read(dispatch_io_t channel, API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NONNULL4 DISPATCH_NONNULL5 DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT void dispatch_io_write(dispatch_io_t channel, off_t offset, @@ -427,6 +437,7 @@ dispatch_io_write(dispatch_io_t channel, */ #define DISPATCH_IO_STOP 0x1 +DISPATCH_SWIFT_UNAVAILABLE("Use DispatchIO.CloseFlags") typedef unsigned long dispatch_io_close_flags_t; /*! @@ -448,6 +459,7 @@ typedef unsigned long dispatch_io_close_flags_t; */ API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT void dispatch_io_close(dispatch_io_t channel, dispatch_io_close_flags_t flags); @@ -475,6 +487,7 @@ dispatch_io_close(dispatch_io_t channel, dispatch_io_close_flags_t flags); */ API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +DISPATCH_SWIFT_NAME(DispatchIO.barrier(self:execute:)) void dispatch_io_barrier(dispatch_io_t channel, dispatch_block_t barrier); #endif /* __BLOCKS__ */ @@ -495,6 +508,7 @@ dispatch_io_barrier(dispatch_io_t channel, dispatch_block_t barrier); */ API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_NOTHROW +DISPATCH_SWIFT_NAME(getter:DispatchIO.fileDescriptor(self:)) dispatch_fd_t dispatch_io_get_descriptor(dispatch_io_t channel); @@ -516,6 +530,7 @@ dispatch_io_get_descriptor(dispatch_io_t channel); */ API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW +DISPATCH_SWIFT_NAME(DispatchIO.setLimit(self:highWater:)) void dispatch_io_set_high_water(dispatch_io_t channel, size_t high_water); @@ -547,6 +562,7 @@ dispatch_io_set_high_water(dispatch_io_t channel, size_t high_water); */ API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW +DISPATCH_SWIFT_NAME(DispatchIO.setLimit(self:lowWater:)) void dispatch_io_set_low_water(dispatch_io_t channel, size_t low_water); @@ -560,6 +576,7 @@ dispatch_io_set_low_water(dispatch_io_t channel, size_t low_water); */ #define DISPATCH_IO_STRICT_INTERVAL 0x1 +DISPATCH_SWIFT_UNAVAILABLE("Use DispatchIO.IntervalFlags") typedef unsigned long dispatch_io_interval_flags_t; /*! @@ -586,6 +603,7 @@ typedef unsigned long dispatch_io_interval_flags_t; */ API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT void dispatch_io_set_interval(dispatch_io_t channel, uint64_t interval, diff --git a/dispatch/object.h b/dispatch/object.h index 031f09d86..83f7aba8f 100644 --- a/dispatch/object.h +++ b/dispatch/object.h @@ -53,16 +53,42 @@ DISPATCH_ASSUME_ABI_SINGLE_BEGIN * analyzer, and enables them to be added to Cocoa collections. * See for details. */ -OS_OBJECT_DECL_CLASS(dispatch_object); +DISPATCH_SWIFT_NAME(DispatchObject) OS_OBJECT_DECL_CLASS(dispatch_object); #if OS_OBJECT_SWIFT3 -#define DISPATCH_DECL(name) OS_OBJECT_DECL_SUBCLASS_SWIFT(name, dispatch_object) -#define DISPATCH_DECL_SUBCLASS(name, base) OS_OBJECT_DECL_SUBCLASS_SWIFT(name, base) +#define DISPATCH_DECL(name) OS_OBJECT_DECL_SENDABLE_SUBCLASS_SWIFT(name, dispatch_object) +#define DISPATCH_DECL_SUBCLASS(name, base) OS_OBJECT_DECL_SENDABLE_SUBCLASS_SWIFT(name, base) +/* + * DISPATCH_DECL_FACTORY_CLASS_SWIFT adopts _hasMissingDesignatedInitializers swift attribute. + * That makes subclasses of this class stop inheriting its initializers. + */ +#define DISPATCH_DECL_FACTORY_CLASS_SWIFT(name, swift_name) \ + OS_OBJECT_SWIFT_HAS_MISSING_DESIGNATED_INIT DISPATCH_DECL_SWIFT(name, swift_name) +#define DISPATCH_DECL_SWIFT(name, swift_name) DISPATCH_SWIFT_NAME(swift_name) DISPATCH_DECL(name) +#define DISPATCH_DECL_SUBCLASS_SWIFT(name, base, swift_name) \ + DISPATCH_SWIFT_NAME(swift_name) DISPATCH_DECL_SUBCLASS(name, base) + +/* + * DISPATCH_DECL_SERIAL_EXECUTOR_SWIFT is for declaring subclasses of a serial executor base class. + */ +#if DISPATCH_OSX_SUPPORTS_AT_LEAST(140000, 170000, 170000, 100000) +#define DISPATCH_DECL_SERIAL_EXECUTOR_SWIFT(name, swift_name) \ + DISPATCH_DECL_SUBCLASS_SWIFT(name, dispatch_queue_serial_executor, swift_name) +#else +#define DISPATCH_DECL_SERIAL_EXECUTOR_SWIFT(name, swift_name) \ + DISPATCH_DECL_SUBCLASS_SWIFT(name, dispatch_queue, swift_name) +#endif + #else // OS_OBJECT_SWIFT3 #define DISPATCH_DECL(name) OS_OBJECT_DECL_SUBCLASS(name, dispatch_object) #define DISPATCH_DECL_SUBCLASS(name, base) OS_OBJECT_DECL_SUBCLASS(name, base) - +#define DISPATCH_DECL_FACTORY_CLASS_SWIFT(name, swift_name) DISPATCH_DECL_SWIFT(name, swift_name) +#define DISPATCH_DECL_SWIFT(name, swift_name) DISPATCH_DECL(name) +#define DISPATCH_DECL_SUBCLASS_SWIFT(name, base, swift_name) DISPATCH_DECL_SUBCLASS(name, base) +#define DISPATCH_DECL_SERIAL_EXECUTOR_SWIFT(name, swift_name) \ + DISPATCH_DECL_SUBCLASS_SWIFT(name, dispatch_queue, swift_name) DISPATCH_INLINE DISPATCH_ALWAYS_INLINE DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +DISPATCH_SWIFT_UNAVAILABLE("Unavailable in Swift") void _dispatch_object_validate(dispatch_object_t object) { @@ -89,6 +115,11 @@ typedef struct dispatch_object_s { typedef struct name##_s : public dispatch_object_s {} *name##_t #define DISPATCH_DECL_SUBCLASS(name, base) \ typedef struct name##_s : public base##_s {} *name##_t +#define DISPATCH_DECL_FACTORY_CLASS_SWIFT(name, swift_name) DISPATCH_DECL_SWIFT(name, swift_name) +#define DISPATCH_DECL_SWIFT(name, swift_name) DISPATCH_DECL(name) +#define DISPATCH_DECL_SUBCLASS_SWIFT(name, base, swift_name) DISPATCH_DECL_SUBCLASS(name, base) +#define DISPATCH_DECL_SERIAL_EXECUTOR_SWIFT(name, swift_name) \ + DISPATCH_DECL_SUBCLASS_SWIFT(name, dispatch_queue, swift_name) #define DISPATCH_GLOBAL_OBJECT(type, object) (static_cast(&(object))) #define DISPATCH_RETURNS_RETAINED #else /* Plain C */ @@ -110,6 +141,11 @@ typedef union { #endif // !__DISPATCH_BUILDING_DISPATCH__ #define DISPATCH_DECL(name) typedef struct name##_s *name##_t #define DISPATCH_DECL_SUBCLASS(name, base) typedef base##_t name##_t +#define DISPATCH_DECL_FACTORY_CLASS_SWIFT(name, swift_name) DISPATCH_DECL_SWIFT(name, swift_name) +#define DISPATCH_DECL_SWIFT(name, swift_name) DISPATCH_DECL(name) +#define DISPATCH_DECL_SUBCLASS_SWIFT(name, base, swift_name) DISPATCH_DECL_SUBCLASS(name, base) +#define DISPATCH_DECL_SERIAL_EXECUTOR_SWIFT(name, swift_name) \ + DISPATCH_DECL_SUBCLASS_SWIFT(name, dispatch_queue, swift_name) #define DISPATCH_GLOBAL_OBJECT(type, object) ((type)&(object)) #define DISPATCH_RETURNS_RETAINED #endif @@ -121,20 +157,40 @@ typedef union { OS_OBJECT_DECL_PROTOCOL(dispatch_source_##name, ); \ OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL( \ dispatch_source, dispatch_source_##name) +#define DISPATCH_SOURCE_TYPE_DECL_SWIFT(name, swift_name) \ + DISPATCH_EXPORT struct dispatch_source_type_s \ + _dispatch_source_type_##name; \ + DISPATCH_SWIFT_NAME(swift_name) \ + OS_OBJECT_DECL_PROTOCOL(dispatch_source_##name, ); \ + OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL( \ + dispatch_source, dispatch_source_##name) #define DISPATCH_SOURCE_DECL(name) \ DISPATCH_DECL(name); \ OS_OBJECT_DECL_PROTOCOL(name, ); \ OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(name, name) +#define DISPATCH_SOURCE_DECL_SWIFT(name, swift_name, protocol_name) \ + DISPATCH_SWIFT_NAME(swift_name) \ + DISPATCH_DECL(name); \ + DISPATCH_SWIFT_NAME(protocol_name) \ + OS_OBJECT_DECL_PROTOCOL(name, ); \ + OS_OBJECT_CLASS_IMPLEMENTS_PROTOCOL(name, name) #ifndef DISPATCH_DATA_DECL #define DISPATCH_DATA_DECL(name) OS_OBJECT_DECL_SWIFT(name) #endif // DISPATCH_DATA_DECL +#define DISPATCH_DATA_DECL_SWIFT(name, swift_name) \ + DISPATCH_SWIFT_NAME(swift_name) \ + DISPATCH_DATA_DECL(name) #else #define DISPATCH_SOURCE_DECL(name) \ DISPATCH_DECL(name); +#define DISPATCH_SOURCE_DECL_SWIFT(name, swift_name, protocol_name) DISPATCH_SOURCE_DECL(name) #define DISPATCH_DATA_DECL(name) DISPATCH_DECL(name) +#define DISPATCH_DATA_DECL_SWIFT(name, swift_name) DISPATCH_DATA_DECL(name) #define DISPATCH_SOURCE_TYPE_DECL(name) \ DISPATCH_EXPORT const struct dispatch_source_type_s \ _dispatch_source_type_##name +#define DISPATCH_SOURCE_TYPE_DECL_SWIFT(name, swift_name) \ + DISPATCH_SOURCE_TYPE_DECL(name) #endif #ifdef __BLOCKS__ @@ -179,6 +235,7 @@ typedef union { * Instead, the block literal must be copied to the heap with the Block_copy() * function or by sending it a -[copy] message. */ +DISPATCH_SWIFT_UNAVAILABLE("Unavailable in Swift") typedef void (^dispatch_block_t)(void); #endif // __BLOCKS__ @@ -188,6 +245,7 @@ __BEGIN_DECLS * @typedef dispatch_qos_class_t * Alias for qos_class_t type. */ +DISPATCH_SWIFT_UNAVAILABLE("Use DispatchQoS") #if __has_include() typedef qos_class_t dispatch_qos_class_t; #else @@ -263,6 +321,7 @@ dispatch_release(dispatch_object_t object); API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW +DISPATCH_SWIFT_UNAVAILABLE("Unavailable in Swift") void *_Nullable dispatch_get_context(dispatch_object_t object); @@ -281,6 +340,7 @@ dispatch_get_context(dispatch_object_t object); */ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NOTHROW +DISPATCH_SWIFT_UNAVAILABLE("Unavailable in Swift") void dispatch_set_context(dispatch_object_t object, void *_Nullable context); @@ -307,6 +367,7 @@ dispatch_set_context(dispatch_object_t object, void *_Nullable context); */ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NOTHROW +DISPATCH_SWIFT_UNAVAILABLE("Unavailable in Swift") void dispatch_set_finalizer_f(dispatch_object_t object, dispatch_function_t _Nullable finalizer); @@ -335,6 +396,7 @@ dispatch_set_finalizer_f(dispatch_object_t object, */ API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +DISPATCH_SWIFT_NAME(DispatchObject.activate(self:)) void dispatch_activate(dispatch_object_t object); @@ -358,6 +420,7 @@ dispatch_activate(dispatch_object_t object); */ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +DISPATCH_SWIFT_NAME(DispatchObject.suspend(self:)) void dispatch_suspend(dispatch_object_t object); @@ -387,6 +450,7 @@ dispatch_suspend(dispatch_object_t object); */ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +DISPATCH_SWIFT_NAME(DispatchObject.resume(self:)) void dispatch_resume(dispatch_object_t object); @@ -429,6 +493,7 @@ dispatch_resume(dispatch_object_t object); */ API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) DISPATCH_EXPORT DISPATCH_NOTHROW +DISPATCH_SWIFT_UNAVAILABLE("Unavailable in Swift") void dispatch_set_qos_class_floor(dispatch_object_t object, dispatch_qos_class_t qos_class, int relative_priority); diff --git a/dispatch/queue.h b/dispatch/queue.h index 9e18474a1..786b3285b 100644 --- a/dispatch/queue.h +++ b/dispatch/queue.h @@ -67,7 +67,7 @@ DISPATCH_ASSUME_ABI_SINGLE_BEGIN * reference to the queue until they have finished. Once all references to a * queue have been released, the queue will be deallocated by the system. */ -DISPATCH_DECL(dispatch_queue); +DISPATCH_DECL_FACTORY_CLASS_SWIFT(dispatch_queue, DispatchQueue); /*! * @typedef dispatch_queue_global_t @@ -99,12 +99,37 @@ DISPATCH_DECL(dispatch_queue); * Calls to dispatch_suspend(), dispatch_resume(), dispatch_set_context(), etc., * will have no effect when used with queues of this type. */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) #if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) typedef struct dispatch_queue_global_s *dispatch_queue_global_t; #else DISPATCH_DECL_SUBCLASS(dispatch_queue_global, dispatch_queue); #endif +/*! + * @typedef dispatch_queue_serial_executor_t + * + * @abstract + * An abstract class of dispatch queues which conform to the serial executor + * protocol. + * + * @discussion + * A serial executor in Swift Concurrency represents a mutual exclusion context. + * Queues with a singular owner, which invoke only one workItem at a time + * provide such a mutual exclusion context and are serial executors. + * + * Subclasses of this abstract class can be therefore be setup as Custom + * Executors for Swift Actors. + * + * See dispatch_queue_serial_t and dispatch_workloop_t. + */ +API_AVAILABLE(macos(14.0), ios(17.0), tvos(17.0), watchos(10.0)) +#if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) +typedef struct dispatch_lane_s *dispatch_queue_serial_executor_t; +#else +DISPATCH_DECL_SUBCLASS_SWIFT(dispatch_queue_serial_executor, dispatch_queue, _DispatchSerialExecutorQueue); +#endif + /*! * @typedef dispatch_queue_serial_t * @@ -129,10 +154,11 @@ DISPATCH_DECL_SUBCLASS(dispatch_queue_global, dispatch_queue); * Serial queues are created by passing a dispatch queue attribute derived from * DISPATCH_QUEUE_SERIAL to dispatch_queue_create_with_target(). */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) #if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) typedef struct dispatch_lane_s *dispatch_queue_serial_t; #else -DISPATCH_DECL_SUBCLASS(dispatch_queue_serial, dispatch_queue); +DISPATCH_DECL_SERIAL_EXECUTOR_SWIFT(dispatch_queue_serial, DispatchSerialQueue); #endif /*! @@ -155,6 +181,7 @@ DISPATCH_DECL_SUBCLASS(dispatch_queue_serial, dispatch_queue); * dispatch_suspend(), dispatch_resume(), dispatch_set_context(), etc., will * have no effect when used on the main queue. */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) #if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) typedef struct dispatch_queue_static_s *dispatch_queue_main_t; #else @@ -189,10 +216,11 @@ DISPATCH_DECL_SUBCLASS(dispatch_queue_main, dispatch_queue_serial); * avoidance when lower priority regular workitems (readers) are being invoked * and are preventing a higher priority barrier (writer) from being invoked. */ +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) #if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) typedef struct dispatch_lane_s *dispatch_queue_concurrent_t; #else -DISPATCH_DECL_SUBCLASS(dispatch_queue_concurrent, dispatch_queue); +DISPATCH_DECL_SUBCLASS_SWIFT(dispatch_queue_concurrent, dispatch_queue, DispatchConcurrentQueue); #endif __BEGIN_DECLS @@ -228,6 +256,7 @@ __BEGIN_DECLS #ifdef __BLOCKS__ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT void dispatch_async(dispatch_queue_t queue, dispatch_block_t block); #endif @@ -258,6 +287,7 @@ dispatch_async(dispatch_queue_t queue, dispatch_block_t block); */ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +DISPATCH_SWIFT_UNAVAILABLE("Use DispatchQueue.async(self:execute:)") void dispatch_async_f(dispatch_queue_t queue, void *_Nullable context, dispatch_function_t work); @@ -301,6 +331,7 @@ dispatch_async_f(dispatch_queue_t queue, #ifdef __BLOCKS__ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +DISPATCH_SWIFT_NAME(DispatchQueue.sync(self:execute:)) void dispatch_sync(dispatch_queue_t queue, DISPATCH_NOESCAPE dispatch_block_t block); #endif @@ -329,6 +360,7 @@ dispatch_sync(dispatch_queue_t queue, DISPATCH_NOESCAPE dispatch_block_t block); */ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +DISPATCH_SWIFT_UNAVAILABLE("Use DispatchQueue.sync(self:execute:)") void dispatch_sync_f(dispatch_queue_t queue, void *_Nullable context, dispatch_function_t work); @@ -396,6 +428,7 @@ dispatch_sync_f(dispatch_queue_t queue, #ifdef __BLOCKS__ API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +DISPATCH_SWIFT_NAME(DispatchQueue.asyncAndWait(self:execute:)) void dispatch_async_and_wait(dispatch_queue_t queue, DISPATCH_NOESCAPE dispatch_block_t block); @@ -425,6 +458,7 @@ dispatch_async_and_wait(dispatch_queue_t queue, */ API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +DISPATCH_SWIFT_UNAVAILABLE("Use DispatchQueue.asyncAndWait(self:execute:)") void dispatch_async_and_wait_f(dispatch_queue_t queue, void *_Nullable context, dispatch_function_t work); @@ -497,6 +531,7 @@ dispatch_async_and_wait_f(dispatch_queue_t queue, #ifdef __BLOCKS__ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL3 DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT void dispatch_apply(size_t iterations, dispatch_queue_t DISPATCH_APPLY_QUEUE_ARG_NULLABILITY queue, @@ -532,6 +567,7 @@ dispatch_apply(size_t iterations, */ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL4 DISPATCH_NOTHROW +DISPATCH_SWIFT_UNAVAILABLE("Use DispatchQueue.concurrentPerform(iterations:execute:).") void dispatch_apply_f(size_t iterations, dispatch_queue_t DISPATCH_APPLY_QUEUE_ARG_NULLABILITY queue, @@ -604,6 +640,7 @@ struct dispatch_queue_s _dispatch_main_q; * the main thread before main() is called. */ DISPATCH_INLINE DISPATCH_ALWAYS_INLINE DISPATCH_CONST DISPATCH_NOTHROW +DISPATCH_SWIFT_UNAVAILABLE("Use getter:DispatchQueue.main()") dispatch_queue_main_t dispatch_get_main_queue(void) { @@ -643,6 +680,7 @@ dispatch_get_main_queue(void) #define DISPATCH_QUEUE_PRIORITY_LOW (-2) #define DISPATCH_QUEUE_PRIORITY_BACKGROUND INT16_MIN +DISPATCH_SWIFT_UNAVAILABLE("Use DispatchQueue.GlobalQueuePriority") typedef long dispatch_queue_priority_t; /*! @@ -684,6 +722,7 @@ typedef long dispatch_queue_priority_t; */ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_CONST DISPATCH_WARN_RESULT DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT dispatch_queue_global_t dispatch_get_global_queue(intptr_t identifier, uintptr_t flags); @@ -693,6 +732,7 @@ dispatch_get_global_queue(intptr_t identifier, uintptr_t flags); * @abstract * Attribute for dispatch queues. */ +DISPATCH_REFINED_FOR_SWIFT DISPATCH_DECL(dispatch_queue_attr); /*! @@ -779,6 +819,7 @@ struct dispatch_queue_attr_s _dispatch_queue_attr_concurrent; */ API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT dispatch_queue_attr_t dispatch_queue_attr_make_initially_inactive( dispatch_queue_attr_t _Nullable attr); @@ -833,10 +874,14 @@ dispatch_queue_attr_make_initially_inactive( * autorelease pool around the execution of a block that is submitted to it * asynchronously. This is the behavior of the global concurrent queues. */ +DISPATCH_REFINED_FOR_SWIFT DISPATCH_ENUM(dispatch_autorelease_frequency, unsigned long, - DISPATCH_AUTORELEASE_FREQUENCY_INHERIT DISPATCH_ENUM_API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) = 0, - DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM DISPATCH_ENUM_API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) = 1, - DISPATCH_AUTORELEASE_FREQUENCY_NEVER DISPATCH_ENUM_API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) = 2, + DISPATCH_AUTORELEASE_FREQUENCY_INHERIT DISPATCH_ENUM_API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) + DISPATCH_SWIFT_UNAVAILABLE("Use DispatchQueue.AutoreleaseFrequency.inherit") = 0, + DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM DISPATCH_ENUM_API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) + DISPATCH_SWIFT_UNAVAILABLE("Use DispatchQueue.AutoreleaseFrequency.workItem") = 1, + DISPATCH_AUTORELEASE_FREQUENCY_NEVER DISPATCH_ENUM_API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) + DISPATCH_SWIFT_UNAVAILABLE("Use DispatchQueue.AutoreleaseFrequency.never") = 2, ); /*! @@ -848,7 +893,7 @@ DISPATCH_ENUM(dispatch_autorelease_frequency, unsigned long, * * @discussion * When a queue uses the per-workitem autorelease frequency (either directly - * or inherithed from its target queue), any block submitted asynchronously to + * or inherited from its target queue), any block submitted asynchronously to * this queue (via dispatch_async(), dispatch_barrier_async(), * dispatch_group_notify(), etc...) is executed as if surrounded by a individual * Objective-C @autoreleasepool scope. @@ -878,6 +923,7 @@ DISPATCH_ENUM(dispatch_autorelease_frequency, unsigned long, */ API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT dispatch_queue_attr_t dispatch_queue_attr_make_with_autorelease_frequency( dispatch_queue_attr_t _Nullable attr, @@ -942,6 +988,7 @@ dispatch_queue_attr_make_with_autorelease_frequency( */ API_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT dispatch_queue_attr_t dispatch_queue_attr_make_with_qos_class(dispatch_queue_attr_t _Nullable attr, dispatch_qos_class_t qos_class, int relative_priority); @@ -1008,6 +1055,7 @@ dispatch_queue_attr_make_with_qos_class(dispatch_queue_attr_t _Nullable attr, API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT DISPATCH_SWIFT_NAME(DispatchQueue.init(__label:attr:queue:)) dispatch_queue_t dispatch_queue_create_with_target(const char *_Nullable DISPATCH_UNSAFE_INDEXABLE label, dispatch_queue_attr_t _Nullable attr, dispatch_queue_t _Nullable target) @@ -1061,6 +1109,7 @@ dispatch_queue_create_with_target(const char *_Nullable DISPATCH_UNSAFE_INDEXABL API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT DISPATCH_SWIFT_NAME(DispatchQueue.init(__label:attr:)) dispatch_queue_t dispatch_queue_create(const char *_Nullable DISPATCH_UNSAFE_INDEXABLE label, dispatch_queue_attr_t _Nullable attr); @@ -1090,6 +1139,7 @@ dispatch_queue_create(const char *_Nullable DISPATCH_UNSAFE_INDEXABLE label, */ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT const char * dispatch_queue_get_label(dispatch_queue_t _Nullable queue); @@ -1129,6 +1179,7 @@ dispatch_queue_get_label(dispatch_queue_t _Nullable queue); */ API_AVAILABLE(macos(10.10), ios(8.0)) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NONNULL1 DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT dispatch_qos_class_t dispatch_queue_get_qos_class(dispatch_queue_t queue, int *_Nullable relative_priority_ptr); @@ -1196,6 +1247,7 @@ dispatch_queue_get_qos_class(dispatch_queue_t queue, */ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NOTHROW +DISPATCH_SWIFT_NAME(DispatchObject.setTarget(self:queue:)) void dispatch_set_target_queue(dispatch_object_t object, dispatch_queue_t _Nullable queue); @@ -1215,6 +1267,7 @@ dispatch_set_target_queue(dispatch_object_t object, */ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NOTHROW DISPATCH_NORETURN +DISPATCH_SWIFT_NAME(dispatchMain()) void dispatch_main(void); @@ -1243,6 +1296,7 @@ dispatch_main(void); #ifdef __BLOCKS__ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NONNULL3 DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT void dispatch_after(dispatch_time_t when, dispatch_queue_t queue, dispatch_block_t block); @@ -1275,6 +1329,7 @@ dispatch_after(dispatch_time_t when, dispatch_queue_t queue, */ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL2 DISPATCH_NONNULL4 DISPATCH_NOTHROW +DISPATCH_SWIFT_UNAVAILABLE("Use DispatchQueue.asyncAfter(self:deadline:qos:flags:execute:)") void dispatch_after_f(dispatch_time_t when, dispatch_queue_t queue, void *_Nullable context, dispatch_function_t work); @@ -1321,6 +1376,7 @@ dispatch_after_f(dispatch_time_t when, dispatch_queue_t queue, #ifdef __BLOCKS__ API_AVAILABLE(macos(10.7), ios(4.3)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT void dispatch_barrier_async(dispatch_queue_t queue, dispatch_block_t block); #endif @@ -1356,6 +1412,7 @@ dispatch_barrier_async(dispatch_queue_t queue, dispatch_block_t block); */ API_AVAILABLE(macos(10.7), ios(4.3)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +DISPATCH_SWIFT_UNAVAILABLE("Use DispatchQueue.async(self:group:qos:flags:execute:)") void dispatch_barrier_async_f(dispatch_queue_t queue, void *_Nullable context, dispatch_function_t work); @@ -1384,6 +1441,7 @@ dispatch_barrier_async_f(dispatch_queue_t queue, #ifdef __BLOCKS__ API_AVAILABLE(macos(10.7), ios(4.3)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT void dispatch_barrier_sync(dispatch_queue_t queue, DISPATCH_NOESCAPE dispatch_block_t block); @@ -1416,6 +1474,7 @@ dispatch_barrier_sync(dispatch_queue_t queue, */ API_AVAILABLE(macos(10.7), ios(4.3)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +DISPATCH_SWIFT_UNAVAILABLE("Use DispatchQueue.sync(self:flags:execute:)") void dispatch_barrier_sync_f(dispatch_queue_t queue, void *_Nullable context, dispatch_function_t work); @@ -1444,6 +1503,7 @@ dispatch_barrier_sync_f(dispatch_queue_t queue, #ifdef __BLOCKS__ API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +DISPATCH_SWIFT_UNAVAILABLE("Unavailable in Swift") void dispatch_barrier_async_and_wait(dispatch_queue_t queue, DISPATCH_NOESCAPE dispatch_block_t block); @@ -1477,6 +1537,7 @@ dispatch_barrier_async_and_wait(dispatch_queue_t queue, */ API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +DISPATCH_SWIFT_UNAVAILABLE("Unavailable in Swift") void dispatch_barrier_async_and_wait_f(dispatch_queue_t queue, void *_Nullable context, dispatch_function_t work); @@ -1519,6 +1580,7 @@ dispatch_barrier_async_and_wait_f(dispatch_queue_t queue, */ API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT void dispatch_queue_set_specific(dispatch_queue_t queue, const void *key, void *_Nullable context, dispatch_function_t _Nullable destructor); @@ -1549,6 +1611,7 @@ dispatch_queue_set_specific(dispatch_queue_t queue, const void *key, API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT void *_Nullable dispatch_queue_get_specific(dispatch_queue_t queue, const void *key); @@ -1575,6 +1638,7 @@ dispatch_queue_get_specific(dispatch_queue_t queue, const void *key); */ API_AVAILABLE(macos(10.7), ios(5.0)) DISPATCH_EXPORT DISPATCH_PURE DISPATCH_WARN_RESULT DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT void *_Nullable dispatch_get_specific(const void *key); @@ -1629,6 +1693,7 @@ dispatch_get_specific(const void *key); */ API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 +DISPATCH_REFINED_FOR_SWIFT void dispatch_assert_queue(dispatch_queue_t queue) DISPATCH_ALIAS_V2(dispatch_assert_queue); @@ -1655,6 +1720,7 @@ dispatch_assert_queue(dispatch_queue_t queue) */ API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 +DISPATCH_REFINED_FOR_SWIFT void dispatch_assert_queue_barrier(dispatch_queue_t queue); @@ -1678,6 +1744,7 @@ dispatch_assert_queue_barrier(dispatch_queue_t queue); */ API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 +DISPATCH_REFINED_FOR_SWIFT void dispatch_assert_queue_not(dispatch_queue_t queue) DISPATCH_ALIAS_V2(dispatch_assert_queue_not); diff --git a/dispatch/semaphore.h b/dispatch/semaphore.h index d329503f8..67dd7d115 100644 --- a/dispatch/semaphore.h +++ b/dispatch/semaphore.h @@ -35,7 +35,7 @@ DISPATCH_ASSUME_ABI_SINGLE_BEGIN * @abstract * A counting semaphore. */ -DISPATCH_DECL(dispatch_semaphore); +DISPATCH_DECL_SWIFT(dispatch_semaphore, DispatchSemaphore); __BEGIN_DECLS @@ -61,6 +61,7 @@ __BEGIN_DECLS API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW +DISPATCH_SWIFT_NAME(DispatchSemaphore.init(value:)) dispatch_semaphore_t dispatch_semaphore_create(intptr_t value); @@ -88,6 +89,7 @@ dispatch_semaphore_create(intptr_t value); */ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT intptr_t dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout); @@ -110,6 +112,7 @@ dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout); */ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT intptr_t dispatch_semaphore_signal(dispatch_semaphore_t dsema); diff --git a/dispatch/source.h b/dispatch/source.h index c15c020dc..9fc116184 100644 --- a/dispatch/source.h +++ b/dispatch/source.h @@ -55,7 +55,7 @@ DISPATCH_ASSUME_ABI_SINGLE_BEGIN * Dispatch sources are used to automatically submit event handler blocks to * dispatch queues in response to external events. */ -DISPATCH_SOURCE_DECL(dispatch_source); +DISPATCH_SOURCE_DECL_SWIFT(dispatch_source, DispatchSource, DispatchSourceProtocol); __BEGIN_DECLS @@ -70,6 +70,7 @@ __BEGIN_DECLS * signal number, process identifier, etc.), and how the mask argument is * interpreted. */ +DISPATCH_REFINED_FOR_SWIFT typedef const struct dispatch_source_type_s *dispatch_source_type_t; /*! @@ -81,7 +82,7 @@ typedef const struct dispatch_source_type_s *dispatch_source_type_t; */ #define DISPATCH_SOURCE_TYPE_DATA_ADD (&_dispatch_source_type_data_add) API_AVAILABLE(macos(10.6), ios(4.0)) -DISPATCH_SOURCE_TYPE_DECL(data_add); +DISPATCH_SOURCE_TYPE_DECL_SWIFT(data_add, DispatchSourceUserDataAdd); /*! * @const DISPATCH_SOURCE_TYPE_DATA_OR @@ -92,7 +93,7 @@ DISPATCH_SOURCE_TYPE_DECL(data_add); */ #define DISPATCH_SOURCE_TYPE_DATA_OR (&_dispatch_source_type_data_or) API_AVAILABLE(macos(10.6), ios(4.0)) -DISPATCH_SOURCE_TYPE_DECL(data_or); +DISPATCH_SOURCE_TYPE_DECL_SWIFT(data_or, DispatchSourceUserDataOr); /*! * @const DISPATCH_SOURCE_TYPE_DATA_REPLACE @@ -107,7 +108,7 @@ DISPATCH_SOURCE_TYPE_DECL(data_or); */ #define DISPATCH_SOURCE_TYPE_DATA_REPLACE (&_dispatch_source_type_data_replace) API_AVAILABLE(macos(10.13), ios(11.0), tvos(11.0), watchos(4.0)) -DISPATCH_SOURCE_TYPE_DECL(data_replace); +DISPATCH_SOURCE_TYPE_DECL_SWIFT(data_replace, DispatchSourceUserDataReplace); /*! * @const DISPATCH_SOURCE_TYPE_MACH_SEND @@ -118,7 +119,7 @@ DISPATCH_SOURCE_TYPE_DECL(data_replace); */ #define DISPATCH_SOURCE_TYPE_MACH_SEND (&_dispatch_source_type_mach_send) API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_LINUX_UNAVAILABLE() -DISPATCH_SOURCE_TYPE_DECL(mach_send); +DISPATCH_SOURCE_TYPE_DECL_SWIFT(mach_send, DispatchSourceMachSend); /*! * @const DISPATCH_SOURCE_TYPE_MACH_RECV @@ -129,7 +130,7 @@ DISPATCH_SOURCE_TYPE_DECL(mach_send); */ #define DISPATCH_SOURCE_TYPE_MACH_RECV (&_dispatch_source_type_mach_recv) API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_LINUX_UNAVAILABLE() -DISPATCH_SOURCE_TYPE_DECL(mach_recv); +DISPATCH_SOURCE_TYPE_DECL_SWIFT(mach_recv, DispatchSourceMachReceive); /*! * @const DISPATCH_SOURCE_TYPE_MEMORYPRESSURE @@ -142,7 +143,7 @@ DISPATCH_SOURCE_TYPE_DECL(mach_recv); #define DISPATCH_SOURCE_TYPE_MEMORYPRESSURE \ (&_dispatch_source_type_memorypressure) API_AVAILABLE(macos(10.9), ios(8.0)) DISPATCH_LINUX_UNAVAILABLE() -DISPATCH_SOURCE_TYPE_DECL(memorypressure); +DISPATCH_SOURCE_TYPE_DECL_SWIFT(memorypressure, DispatchSourceMemoryPressure); /*! * @const DISPATCH_SOURCE_TYPE_PROC @@ -153,7 +154,7 @@ DISPATCH_SOURCE_TYPE_DECL(memorypressure); */ #define DISPATCH_SOURCE_TYPE_PROC (&_dispatch_source_type_proc) API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_LINUX_UNAVAILABLE() -DISPATCH_SOURCE_TYPE_DECL(proc); +DISPATCH_SOURCE_TYPE_DECL_SWIFT(proc, DispatchSourceProcess); /*! * @const DISPATCH_SOURCE_TYPE_READ @@ -164,7 +165,7 @@ DISPATCH_SOURCE_TYPE_DECL(proc); */ #define DISPATCH_SOURCE_TYPE_READ (&_dispatch_source_type_read) API_AVAILABLE(macos(10.6), ios(4.0)) -DISPATCH_SOURCE_TYPE_DECL(read); +DISPATCH_SOURCE_TYPE_DECL_SWIFT(read, DispatchSourceRead); /*! * @const DISPATCH_SOURCE_TYPE_SIGNAL @@ -174,7 +175,7 @@ DISPATCH_SOURCE_TYPE_DECL(read); */ #define DISPATCH_SOURCE_TYPE_SIGNAL (&_dispatch_source_type_signal) API_AVAILABLE(macos(10.6), ios(4.0)) -DISPATCH_SOURCE_TYPE_DECL(signal); +DISPATCH_SOURCE_TYPE_DECL_SWIFT(signal, DispatchSourceSignal); /*! * @const DISPATCH_SOURCE_TYPE_TIMER @@ -185,7 +186,7 @@ DISPATCH_SOURCE_TYPE_DECL(signal); */ #define DISPATCH_SOURCE_TYPE_TIMER (&_dispatch_source_type_timer) API_AVAILABLE(macos(10.6), ios(4.0)) -DISPATCH_SOURCE_TYPE_DECL(timer); +DISPATCH_SOURCE_TYPE_DECL_SWIFT(timer, DispatchSourceTimer); /*! * @const DISPATCH_SOURCE_TYPE_VNODE @@ -196,7 +197,7 @@ DISPATCH_SOURCE_TYPE_DECL(timer); */ #define DISPATCH_SOURCE_TYPE_VNODE (&_dispatch_source_type_vnode) API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_LINUX_UNAVAILABLE() -DISPATCH_SOURCE_TYPE_DECL(vnode); +DISPATCH_SOURCE_TYPE_DECL_SWIFT(vnode, DispatchSourceFileSystemObject); /*! * @const DISPATCH_SOURCE_TYPE_WRITE @@ -207,7 +208,7 @@ DISPATCH_SOURCE_TYPE_DECL(vnode); */ #define DISPATCH_SOURCE_TYPE_WRITE (&_dispatch_source_type_write) API_AVAILABLE(macos(10.6), ios(4.0)) -DISPATCH_SOURCE_TYPE_DECL(write); +DISPATCH_SOURCE_TYPE_DECL_SWIFT(write, DispatchSourceWrite); /*! * @typedef dispatch_source_mach_send_flags_t @@ -218,6 +219,7 @@ DISPATCH_SOURCE_TYPE_DECL(write); */ #define DISPATCH_MACH_SEND_DEAD 0x1 +DISPATCH_SWIFT_UNAVAILABLE("Use DispatchSource.MachSendEvent") typedef unsigned long dispatch_source_mach_send_flags_t; /*! @@ -254,6 +256,7 @@ typedef unsigned long dispatch_source_mach_recv_flags_t; #define DISPATCH_MEMORYPRESSURE_WARN 0x02 #define DISPATCH_MEMORYPRESSURE_CRITICAL 0x04 +DISPATCH_SWIFT_UNAVAILABLE("Use DispatchSource.MemoryPressureEvent") typedef unsigned long dispatch_source_memorypressure_flags_t; /*! @@ -278,6 +281,7 @@ typedef unsigned long dispatch_source_memorypressure_flags_t; #define DISPATCH_PROC_EXEC 0x20000000 #define DISPATCH_PROC_SIGNAL 0x08000000 +DISPATCH_SWIFT_UNAVAILABLE("Use DispatchSource.ProcessEvent") typedef unsigned long dispatch_source_proc_flags_t; /*! @@ -318,6 +322,7 @@ typedef unsigned long dispatch_source_proc_flags_t; #define DISPATCH_VNODE_REVOKE 0x40 #define DISPATCH_VNODE_FUNLOCK 0x100 +DISPATCH_SWIFT_UNAVAILABLE("Use DispatchSource.FileSystemEvent") typedef unsigned long dispatch_source_vnode_flags_t; /*! @@ -338,6 +343,7 @@ typedef unsigned long dispatch_source_vnode_flags_t; #define DISPATCH_TIMER_STRICT 0x1 +DISPATCH_SWIFT_UNAVAILABLE("Use DispatchSource.TimerFlags") typedef unsigned long dispatch_source_timer_flags_t; /*! @@ -357,6 +363,8 @@ typedef unsigned long dispatch_source_timer_flags_t; * source and setting any desired attributes (i.e. the handler, context, etc.), * a call must be made to dispatch_activate() in order to begin event delivery. * + * A source must have been activated before being disposed. + * * Calling dispatch_set_target_queue() on a source once it has been activated * is not allowed (see dispatch_activate() and dispatch_set_target_queue()). * @@ -387,6 +395,7 @@ typedef unsigned long dispatch_source_timer_flags_t; API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT dispatch_source_t dispatch_source_create(dispatch_source_type_t type, uintptr_t handle, @@ -409,6 +418,7 @@ dispatch_source_create(dispatch_source_type_t type, #ifdef __BLOCKS__ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT void dispatch_source_set_event_handler(dispatch_source_t source, dispatch_block_t _Nullable handler); @@ -431,6 +441,7 @@ dispatch_source_set_event_handler(dispatch_source_t source, */ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW +DISPATCH_SWIFT_UNAVAILABLE("DispatchSource.setEventHandler(self:handler:)") void dispatch_source_set_event_handler_f(dispatch_source_t source, dispatch_function_t _Nullable handler); @@ -466,6 +477,7 @@ dispatch_source_set_event_handler_f(dispatch_source_t source, #ifdef __BLOCKS__ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT void dispatch_source_set_cancel_handler(dispatch_source_t source, dispatch_block_t _Nullable handler); @@ -491,6 +503,7 @@ dispatch_source_set_cancel_handler(dispatch_source_t source, */ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW +DISPATCH_SWIFT_UNAVAILABLE("Use DispatchSource.setCancelHandler(self:handler:)") void dispatch_source_set_cancel_handler_f(dispatch_source_t source, dispatch_function_t _Nullable handler); @@ -519,6 +532,7 @@ dispatch_source_set_cancel_handler_f(dispatch_source_t source, */ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT void dispatch_source_cancel(dispatch_source_t source); @@ -538,6 +552,7 @@ dispatch_source_cancel(dispatch_source_t source); API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT intptr_t dispatch_source_testcancel(dispatch_source_t source); @@ -570,6 +585,7 @@ dispatch_source_testcancel(dispatch_source_t source); API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT uintptr_t dispatch_source_get_handle(dispatch_source_t source); @@ -602,6 +618,7 @@ dispatch_source_get_handle(dispatch_source_t source); API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT uintptr_t dispatch_source_get_mask(dispatch_source_t source); @@ -641,6 +658,7 @@ dispatch_source_get_mask(dispatch_source_t source); API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_WARN_RESULT DISPATCH_PURE DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT uintptr_t dispatch_source_get_data(dispatch_source_t source); @@ -662,6 +680,7 @@ dispatch_source_get_data(dispatch_source_t source); */ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT void dispatch_source_merge_data(dispatch_source_t source, uintptr_t value); @@ -715,6 +734,7 @@ dispatch_source_merge_data(dispatch_source_t source, uintptr_t value); */ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT void dispatch_source_set_timer(dispatch_source_t source, dispatch_time_t start, @@ -745,6 +765,7 @@ dispatch_source_set_timer(dispatch_source_t source, #ifdef __BLOCKS__ API_AVAILABLE(macos(10.7), ios(4.3)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT void dispatch_source_set_registration_handler(dispatch_source_t source, dispatch_block_t _Nullable handler); @@ -770,6 +791,7 @@ dispatch_source_set_registration_handler(dispatch_source_t source, */ API_AVAILABLE(macos(10.7), ios(4.3)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NOTHROW +DISPATCH_SWIFT_UNAVAILABLE("Use DispatchSource.setRegistrationHandler(self:handler:)") void dispatch_source_set_registration_handler_f(dispatch_source_t source, dispatch_function_t _Nullable handler); diff --git a/dispatch/time.h b/dispatch/time.h index 3268fffa9..bc266e134 100644 --- a/dispatch/time.h +++ b/dispatch/time.h @@ -48,6 +48,10 @@ DISPATCH_ASSUME_ABI_SINGLE_BEGIN #ifdef NSEC_PER_MSEC #undef NSEC_PER_MSEC #endif +#ifdef MSEC_PER_SEC +#undef MSEC_PER_SEC +#endif +#define MSEC_PER_SEC 1000ull #define NSEC_PER_SEC 1000000000ull #define NSEC_PER_MSEC 1000000ull #define USEC_PER_SEC 1000000ull @@ -100,6 +104,7 @@ enum { */ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT dispatch_time_t dispatch_time(dispatch_time_t when, int64_t delta); @@ -126,6 +131,7 @@ dispatch_time(dispatch_time_t when, int64_t delta); */ API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_EXPORT DISPATCH_WARN_RESULT DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT dispatch_time_t dispatch_walltime(const struct timespec *_Nullable when, int64_t delta); diff --git a/dispatch/workloop.h b/dispatch/workloop.h index e792a8a53..edd80dd46 100644 --- a/dispatch/workloop.h +++ b/dispatch/workloop.h @@ -63,7 +63,8 @@ __BEGIN_DECLS #if defined(__DISPATCH_BUILDING_DISPATCH__) && !defined(__OBJC__) typedef struct dispatch_workloop_s *dispatch_workloop_t; #else -DISPATCH_DECL_SUBCLASS(dispatch_workloop, dispatch_queue); +API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) +DISPATCH_DECL_SERIAL_EXECUTOR_SWIFT(dispatch_workloop, DispatchWorkloop); #endif /*! @@ -81,6 +82,7 @@ DISPATCH_DECL_SUBCLASS(dispatch_workloop, dispatch_queue); API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT dispatch_workloop_t dispatch_workloop_create(const char *DISPATCH_UNSAFE_INDEXABLE _Nullable label); @@ -107,6 +109,7 @@ dispatch_workloop_create(const char *DISPATCH_UNSAFE_INDEXABLE _Nullable label); API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) DISPATCH_EXPORT DISPATCH_MALLOC DISPATCH_RETURNS_RETAINED DISPATCH_WARN_RESULT DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT DISPATCH_SWIFT_NAME(DispatchWorkloop.init(__label:)) dispatch_workloop_t dispatch_workloop_create_inactive(const char *DISPATCH_UNSAFE_INDEXABLE _Nullable label); @@ -132,6 +135,7 @@ dispatch_workloop_create_inactive(const char *DISPATCH_UNSAFE_INDEXABLE _Nullabl */ API_AVAILABLE(macos(10.14), ios(12.0), tvos(12.0), watchos(5.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT void dispatch_workloop_set_autorelease_frequency(dispatch_workloop_t workloop, dispatch_autorelease_frequency_t frequency); @@ -159,6 +163,7 @@ dispatch_workloop_set_autorelease_frequency(dispatch_workloop_t workloop, */ API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +DISPATCH_REFINED_FOR_SWIFT void dispatch_workloop_set_os_workgroup(dispatch_workloop_t workloop, os_workgroup_t workgroup); diff --git a/libdispatch.xcodeproj/project.pbxproj b/libdispatch.xcodeproj/project.pbxproj index f0ff3996a..6a9622491 100644 --- a/libdispatch.xcodeproj/project.pbxproj +++ b/libdispatch.xcodeproj/project.pbxproj @@ -29,6 +29,17 @@ name = libdispatch_tests; productName = libdispatch_tests; }; + 4BB242F02935DC4A0081B587 /* libdispatch_exclavekit */ = { + isa = PBXAggregateTarget; + buildConfigurationList = 4BB242F72935DC4A0081B587 /* Build configuration list for PBXAggregateTarget "libdispatch_exclavekit" */; + buildPhases = ( + ); + dependencies = ( + 4B3A88F129475908009DA175 /* PBXTargetDependency */, + ); + name = libdispatch_exclavekit; + productName = libdispatch_exclavekit; + }; 6E2ECAFD1C49C2FF00A30A32 /* libdispatch_kernel */ = { isa = PBXAggregateTarget; buildConfigurationList = 6E2ECAFE1C49C30000A30A32 /* Build configuration list for PBXAggregateTarget "libdispatch_kernel" */; @@ -98,6 +109,9 @@ /* End PBXAggregateTarget section */ /* Begin PBXBuildFile section */ + 0117620D299D50F8007B4D84 /* dispatch_swift_shims.h in Headers */ = {isa = PBXBuildFile; fileRef = 01176208299D5059007B4D84 /* dispatch_swift_shims.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 018F6A4A29B40480005E6627 /* dispatch_swift_shims.h in Headers */ = {isa = PBXBuildFile; fileRef = 01176208299D5059007B4D84 /* dispatch_swift_shims.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 01A5F2252950589D00AFDFF4 /* Dispatch.apinotes in Headers */ = {isa = PBXBuildFile; fileRef = 01A5F2202950587900AFDFF4 /* Dispatch.apinotes */; settings = {ATTRIBUTES = (Public, ); }; }; 2BBF5A60154B64D8002B20F9 /* allocator_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 2BBF5A5F154B64D8002B20F9 /* allocator_internal.h */; }; 2BBF5A61154B64D8002B20F9 /* allocator_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 2BBF5A5F154B64D8002B20F9 /* allocator_internal.h */; }; 2BBF5A63154B64F5002B20F9 /* allocator.c in Sources */ = {isa = PBXBuildFile; fileRef = 2BBF5A62154B64F5002B20F9 /* allocator.c */; }; @@ -105,6 +119,13 @@ 2BBF5A66154B64F5002B20F9 /* allocator.c in Sources */ = {isa = PBXBuildFile; fileRef = 2BBF5A62154B64F5002B20F9 /* allocator.c */; }; 2BE17C6418EA305E002CA4E8 /* layout_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 2BE17C6318EA305E002CA4E8 /* layout_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; 2BE17C6518EA305E002CA4E8 /* layout_private.h in Headers */ = {isa = PBXBuildFile; fileRef = 2BE17C6318EA305E002CA4E8 /* layout_private.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 4B3CB0FF2941DBA0005EE04B /* once.c in Sources */ = {isa = PBXBuildFile; fileRef = 4B3CB0FB2941DB9F005EE04B /* once.c */; }; + 4B3CB1122941DC19005EE04B /* dispatch.h in Headers */ = {isa = PBXBuildFile; fileRef = 4B3CB1102941DC12005EE04B /* dispatch.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 4B3CB1142941DC21005EE04B /* once.h in Headers */ = {isa = PBXBuildFile; fileRef = 4B3CB1112941DC12005EE04B /* once.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 4B3CB1152941DC29005EE04B /* internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 4B3CB10A2941DBC6005EE04B /* internal.h */; }; + 4B3CB1162941DDC0005EE04B /* base.h in Headers */ = {isa = PBXBuildFile; fileRef = 72CC942F0ECCD8750031B751 /* base.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 4B3CB1182941E020005EE04B /* object.h in Headers */ = {isa = PBXBuildFile; fileRef = 4B3CB1172941E01E005EE04B /* object.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 4BD3E9C62954424F00D73EA2 /* module.modulemap in Headers */ = {isa = PBXBuildFile; fileRef = 4BD3E9C12954422000D73EA2 /* module.modulemap */; settings = {ATTRIBUTES = (Public, ); }; }; 5A0095A210F274B0000E2A31 /* io_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 5A0095A110F274B0000E2A31 /* io_internal.h */; }; 5A27262610F26F1900751FBC /* io.c in Sources */ = {isa = PBXBuildFile; fileRef = 5A27262510F26F1900751FBC /* io.c */; }; 5A5D13AC0F6B280500197CC3 /* semaphore_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */; }; @@ -624,6 +645,13 @@ remoteGlobalIDString = E454824F16C1F0FE0042EC2D; remoteInfo = apply_bench; }; + 4B3A88F029475908009DA175 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 4B3CB0E42941D59E005EE04B; + remoteInfo = "libdispatch exclavekit"; + }; 6E2ECB011C49C31200A30A32 /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; @@ -746,10 +774,20 @@ /* End PBXCopyFilesBuildPhase section */ /* Begin PBXFileReference section */ + 01176208299D5059007B4D84 /* dispatch_swift_shims.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = dispatch_swift_shims.h; sourceTree = ""; }; + 01A5F2202950587900AFDFF4 /* Dispatch.apinotes */ = {isa = PBXFileReference; lastKnownFileType = text.apinotes; path = Dispatch.apinotes; sourceTree = ""; }; 2BBF5A5F154B64D8002B20F9 /* allocator_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = allocator_internal.h; sourceTree = ""; }; 2BBF5A62154B64F5002B20F9 /* allocator.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = allocator.c; sourceTree = ""; }; 2BE17C6318EA305E002CA4E8 /* layout_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = layout_private.h; sourceTree = ""; }; 4552536E19B1384900B88766 /* libdispatchtest.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = libdispatchtest.xcodeproj; path = tests/libdispatchtest.xcodeproj; sourceTree = ""; }; + 4B3CB0D42941C987005EE04B /* libdispatch-exclavekit.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = "libdispatch-exclavekit.xcconfig"; sourceTree = ""; }; + 4B3CB0E52941D59E005EE04B /* libdispatch.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libdispatch.dylib; sourceTree = BUILT_PRODUCTS_DIR; }; + 4B3CB0FB2941DB9F005EE04B /* once.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = once.c; sourceTree = ""; }; + 4B3CB10A2941DBC6005EE04B /* internal.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = internal.h; sourceTree = ""; }; + 4B3CB1102941DC12005EE04B /* dispatch.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = dispatch.h; path = dispatch/dispatch.h; sourceTree = ""; }; + 4B3CB1112941DC12005EE04B /* once.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = once.h; path = dispatch/once.h; sourceTree = ""; }; + 4B3CB1172941E01E005EE04B /* object.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = object.h; path = dispatch/object.h; sourceTree = ""; }; + 4BD3E9C12954422000D73EA2 /* module.modulemap */ = {isa = PBXFileReference; lastKnownFileType = "sourcecode.module-map"; name = module.modulemap; path = dispatch/module.modulemap; sourceTree = ""; }; 5A0095A110F274B0000E2A31 /* io_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = io_internal.h; sourceTree = ""; }; 5A27262510F26F1900751FBC /* io.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; lineEnding = 0; path = io.c; sourceTree = ""; xcLanguageSpecificationIdentifier = xcode.lang.c; }; 5A5D13AB0F6B280500197CC3 /* semaphore_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = semaphore_internal.h; sourceTree = ""; }; @@ -994,6 +1032,13 @@ /* End PBXFileReference section */ /* Begin PBXFrameworksBuildPhase section */ + 4B3CB0E32941D59E005EE04B /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; 6E040C601C499B1B00411A2E /* Frameworks */ = { isa = PBXFrameworksBuildPhase; buildActionMask = 2147483647; @@ -1066,6 +1111,7 @@ E43570B8126E93380097AB9F /* provider.d */, 6E5ACCAF1D3BF2A0007DA2B4 /* event */, 6EF0B2641BA8C3A0007FA4F6 /* firehose */, + 4B3CB0FA2941DB9F005EE04B /* exclavekit */, ); name = "Dispatch Source"; path = src; @@ -1083,6 +1129,7 @@ 6E040C631C499B1B00411A2E /* libfirehose_kernel.a */, 6EB4E4091BA8BCAD00D7B9D2 /* libfirehose_server.a */, E43B889A2241F19000215272 /* libdispatch.dylib */, + 4B3CB0E52941D59E005EE04B /* libdispatch.dylib */, ); name = Products; sourceTree = ""; @@ -1098,6 +1145,34 @@ name = Products; sourceTree = ""; }; + 4B3CB0FA2941DB9F005EE04B /* exclavekit */ = { + isa = PBXGroup; + children = ( + 4B3CB0FB2941DB9F005EE04B /* once.c */, + ); + path = exclavekit; + sourceTree = ""; + }; + 4B3CB1032941DBBE005EE04B /* exclavekit */ = { + isa = PBXGroup; + children = ( + 4B3CB1102941DC12005EE04B /* dispatch.h */, + 4B3CB1172941E01E005EE04B /* object.h */, + 4B3CB1112941DC12005EE04B /* once.h */, + 4BD3E9C12954422000D73EA2 /* module.modulemap */, + ); + name = exclavekit; + path = src/exclavekit; + sourceTree = SOURCE_ROOT; + }; + 4B3CB1092941DBC6005EE04B /* exclavekit */ = { + isa = PBXGroup; + children = ( + 4B3CB10A2941DBC6005EE04B /* internal.h */, + ); + path = exclavekit; + sourceTree = ""; + }; 6E5ACCAE1D3BF27F007DA2B4 /* event */ = { isa = PBXGroup; children = ( @@ -1293,6 +1368,7 @@ E40041A9125D70590022B135 /* libdispatch-resolved.xcconfig */, C01866BE1C59735B0040FC07 /* libdispatch-mp-static.xcconfig */, E4B515D9164B2E9B00E003AF /* libdispatch-introspection.xcconfig */, + 4B3CB0D42941C987005EE04B /* libdispatch-exclavekit.xcconfig */, 6EB4E4421BA8BD7800D7B9D2 /* libfirehose.xcconfig */, 6E040C721C499C3600411A2E /* libfirehose_kernel.xcconfig */, E422DA3614D2A7E7003C6EE4 /* libdispatch.aliases */, @@ -1401,6 +1477,9 @@ 96032E4C0F5CC8D100241C5F /* time.h */, B6095819221DFA2A00F39D1F /* workloop.h */, E421E5F81716ADA10090DC9B /* introspection.h */, + 01A5F2202950587900AFDFF4 /* Dispatch.apinotes */, + 4B3CB1032941DBBE005EE04B /* exclavekit */, + 01176208299D5059007B4D84 /* dispatch_swift_shims.h */, ); name = "Dispatch Public Headers"; path = dispatch; @@ -1449,6 +1528,7 @@ 6E5ACCAE1D3BF27F007DA2B4 /* event */, 6EF0B2661BA8C43D007FA4F6 /* firehose */, FC1832A0109923B3003403D5 /* shims */, + 4B3CB1092941DBC6005EE04B /* exclavekit */, ); name = "Dispatch Project Headers"; path = src; @@ -1457,6 +1537,19 @@ /* End PBXGroup section */ /* Begin PBXHeadersBuildPhase section */ + 4B3CB0E12941D59E005EE04B /* Headers */ = { + isa = PBXHeadersBuildPhase; + buildActionMask = 2147483647; + files = ( + 4B3CB1122941DC19005EE04B /* dispatch.h in Headers */, + 4B3CB1152941DC29005EE04B /* internal.h in Headers */, + 4B3CB1182941E020005EE04B /* object.h in Headers */, + 4B3CB1142941DC21005EE04B /* once.h in Headers */, + 4BD3E9C62954424F00D73EA2 /* module.modulemap in Headers */, + 4B3CB1162941DDC0005EE04B /* base.h in Headers */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; 6E040C611C499B1B00411A2E /* Headers */ = { isa = PBXHeadersBuildPhase; buildActionMask = 2147483647; @@ -1482,8 +1575,10 @@ isa = PBXHeadersBuildPhase; buildActionMask = 2147483647; files = ( + 0117620D299D50F8007B4D84 /* dispatch_swift_shims.h in Headers */, 9BA7221523E293CB0058472E /* workgroup_parallel.h in Headers */, 9B8ED5792350C79100507521 /* workgroup_object.h in Headers */, + 01A5F2252950589D00AFDFF4 /* Dispatch.apinotes in Headers */, FC7BEDA50E8361E600161930 /* dispatch.h in Headers */, 72CC94300ECCD8750031B751 /* base.h in Headers */, 961B99500F3E85C30006BC96 /* object.h in Headers */, @@ -1565,6 +1660,7 @@ isa = PBXHeadersBuildPhase; buildActionMask = 2147483647; files = ( + 018F6A4A29B40480005E6627 /* dispatch_swift_shims.h in Headers */, 9BA7221723E294140058472E /* workgroup_parallel.h in Headers */, E43B88322241F19000215272 /* dispatch.h in Headers */, E43B88332241F19000215272 /* base.h in Headers */, @@ -1793,6 +1889,23 @@ /* End PBXLegacyTarget section */ /* Begin PBXNativeTarget section */ + 4B3CB0E42941D59E005EE04B /* libdispatch exclavekit */ = { + isa = PBXNativeTarget; + buildConfigurationList = 4B3CB0E62941D59E005EE04B /* Build configuration list for PBXNativeTarget "libdispatch exclavekit" */; + buildPhases = ( + 4B3CB0E12941D59E005EE04B /* Headers */, + 4B3CB0E22941D59E005EE04B /* Sources */, + 4B3CB0E32941D59E005EE04B /* Frameworks */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = "libdispatch exclavekit"; + productName = "libdispatch exclavekit"; + productReference = 4B3CB0E52941D59E005EE04B /* libdispatch.dylib */; + productType = "com.apple.product-type.library.dynamic"; + }; 6E040C621C499B1B00411A2E /* libfirehose_kernel */ = { isa = PBXNativeTarget; buildConfigurationList = 6E040C6A1C499B1B00411A2E /* Build configuration list for PBXNativeTarget "libfirehose_kernel" */; @@ -1973,6 +2086,12 @@ 4552540A19B1389700B88766 = { ProvisioningStyle = Manual; }; + 4B3CB0E42941D59E005EE04B = { + CreatedOnToolsVersion = 14.3; + }; + 4BB242F02935DC4A0081B587 = { + CreatedOnToolsVersion = 14.3; + }; 6E040C621C499B1B00411A2E = { CreatedOnToolsVersion = 7.3; ProvisioningStyle = Manual; @@ -2047,10 +2166,12 @@ E4B51595164B2DA300E003AF /* libdispatch introspection */, E43B88262241F19000215272 /* libdispatch_driverkit */, C01866A41C5973210040FC07 /* libdispatch mp static */, + 4B3CB0E42941D59E005EE04B /* libdispatch exclavekit */, 6E43553E215B5D9D00C13177 /* libdispatch_introspection */, 6EA833C22162D6380045EFDC /* libdispatch_introspection_Sim */, 3F3C9326128E637B0042B1F7 /* libdispatch_Sim */, 6E2ECAFD1C49C2FF00A30A32 /* libdispatch_kernel */, + 4BB242F02935DC4A0081B587 /* libdispatch_exclavekit */, C927F35A10FD7F0600C5AB8B /* libdispatch_tools */, 9BEBA56F20127D3300E6FD0D /* libdispatch_tools_Sim */, 4552540A19B1389700B88766 /* libdispatch_tests */, @@ -2426,6 +2547,14 @@ /* End PBXShellScriptBuildPhase section */ /* Begin PBXSourcesBuildPhase section */ + 4B3CB0E22941D59E005EE04B /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 4B3CB0FF2941DBA0005EE04B /* once.c in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; 6E040C5F1C499B1B00411A2E /* Sources */ = { isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; @@ -2712,6 +2841,11 @@ /* End PBXSourcesBuildPhase section */ /* Begin PBXTargetDependency section */ + 4B3A88F129475908009DA175 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 4B3CB0E42941D59E005EE04B /* libdispatch exclavekit */; + targetProxy = 4B3A88F029475908009DA175 /* PBXContainerItemProxy */; + }; 6E2ECB021C49C31200A30A32 /* PBXTargetDependency */ = { isa = PBXTargetDependency; target = 6E040C621C499B1B00411A2E /* libfirehose_kernel */; @@ -2830,6 +2964,34 @@ }; name = Debug; }; + 4B3CB0E72941D59E005EE04B /* Release */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 4B3CB0D42941C987005EE04B /* libdispatch-exclavekit.xcconfig */; + buildSettings = { + }; + name = Release; + }; + 4B3CB0E82941D59E005EE04B /* Debug */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 4B3CB0D42941C987005EE04B /* libdispatch-exclavekit.xcconfig */; + buildSettings = { + }; + name = Debug; + }; + 4BB242F12935DC4A0081B587 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; + 4BB242F22935DC4A0081B587 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; 6E040C641C499B1B00411A2E /* Release */ = { isa = XCBuildConfiguration; baseConfigurationReference = 6E040C721C499C3600411A2E /* libfirehose_kernel.xcconfig */; @@ -2977,6 +3139,7 @@ DRIVERKITROOT = /System/DriverKit; SDKROOT = driverkit.internal; SUPPORTED_PLATFORMS = macosx; + SYSTEM_HEADER_SEARCH_PATHS = "$(SDKROOT)/$(SDK_INSTALL_HEADERS_ROOT)/System/Library/Frameworks/System.framework/PrivateHeaders $(SDKROOT)/$(SDK_INSTALL_HEADERS_ROOT)/usr/local/include $(SDKROOT)/$(SDK_INSTALL_ROOT)/usr/include/c++/v1 $(SDKROOT)/$(SDK_INSTALL_HEADERS_ROOT)/usr/include"; }; name = Release; }; @@ -2989,6 +3152,7 @@ DRIVERKITROOT = /System/DriverKit; SDKROOT = driverkit.internal; SUPPORTED_PLATFORMS = macosx; + SYSTEM_HEADER_SEARCH_PATHS = "$(SDKROOT)/$(SDK_INSTALL_HEADERS_ROOT)/System/Library/Frameworks/System.framework/PrivateHeaders $(SDKROOT)/$(SDK_INSTALL_HEADERS_ROOT)/usr/local/include $(SDKROOT)/$(SDK_INSTALL_ROOT)/usr/include/c++/v1 $(SDKROOT)/$(SDK_INSTALL_HEADERS_ROOT)/usr/include"; }; name = Debug; }; @@ -3127,6 +3291,24 @@ defaultConfigurationIsVisible = 0; defaultConfigurationName = Release; }; + 4B3CB0E62941D59E005EE04B /* Build configuration list for PBXNativeTarget "libdispatch exclavekit" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 4B3CB0E72941D59E005EE04B /* Release */, + 4B3CB0E82941D59E005EE04B /* Debug */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 4BB242F72935DC4A0081B587 /* Build configuration list for PBXAggregateTarget "libdispatch_exclavekit" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 4BB242F12935DC4A0081B587 /* Release */, + 4BB242F22935DC4A0081B587 /* Debug */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; 6E040C6A1C499B1B00411A2E /* Build configuration list for PBXNativeTarget "libfirehose_kernel" */ = { isa = XCConfigurationList; buildConfigurations = ( diff --git a/os/eventlink_private.h b/os/eventlink_private.h index eb55a745b..b612ce1c5 100644 --- a/os/eventlink_private.h +++ b/os/eventlink_private.h @@ -26,7 +26,7 @@ OS_OBJECT_ASSUME_NONNULL_BEGIN typedef struct os_eventlink_s *os_eventlink_t; #else API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) -OS_OBJECT_DECL_CLASS(os_eventlink); +OS_OBJECT_DECL_SENDABLE_CLASS(os_eventlink); #endif /*! diff --git a/os/firehose_server_private.h b/os/firehose_server_private.h index bab44824b..15d6c8450 100644 --- a/os/firehose_server_private.h +++ b/os/firehose_server_private.h @@ -43,7 +43,7 @@ * Firehose client objects are os_object_t's, and it's legal to retain/release * them with os_retain / os_release. */ -OS_OBJECT_DECL_CLASS(firehose_client); +OS_OBJECT_DECL_SENDABLE_CLASS(firehose_client); /*! * @typedef firehose_event_t diff --git a/os/object.h b/os/object.h index 5f2868d1b..055889500 100644 --- a/os/object.h +++ b/os/object.h @@ -194,13 +194,29 @@ #define OS_OBJECT_USE_OBJC_RETAIN_RELEASE 0 #endif #endif + +#if __has_attribute(__swift_attr__) +#define OS_OBJECT_SWIFT_SENDABLE __attribute__((swift_attr("@Sendable"))) +#define OS_OBJECT_SWIFT_HAS_MISSING_DESIGNATED_INIT \ + __attribute__((swift_attr("@_hasMissingDesignatedInitializers"))) +#else +#define OS_OBJECT_SWIFT_SENDABLE +#define OS_OBJECT_SWIFT_HAS_MISSING_DESIGNATED_INIT +#endif // __has_attribute(__swift_attr__) + #if OS_OBJECT_SWIFT3 #define OS_OBJECT_DECL_SWIFT(name) \ OS_EXPORT OS_OBJECT_OBJC_RUNTIME_VISIBLE \ OS_OBJECT_DECL_IMPL_CLASS(name, NSObject) +#define OS_OBJECT_DECL_SENDABLE_SWIFT(name) \ + OS_EXPORT OS_OBJECT_OBJC_RUNTIME_VISIBLE OS_OBJECT_SWIFT_SENDABLE \ + OS_OBJECT_DECL_IMPL_CLASS(name, NSObject) #define OS_OBJECT_DECL_SUBCLASS_SWIFT(name, super) \ OS_EXPORT OS_OBJECT_OBJC_RUNTIME_VISIBLE \ OS_OBJECT_DECL_IMPL_CLASS(name, OS_OBJECT_CLASS(super)) +#define OS_OBJECT_DECL_SENDABLE_SUBCLASS_SWIFT(name, super) \ + OS_EXPORT OS_OBJECT_OBJC_RUNTIME_VISIBLE OS_OBJECT_SWIFT_SENDABLE \ + OS_OBJECT_DECL_IMPL_CLASS(name, OS_OBJECT_CLASS(super)) #endif // OS_OBJECT_SWIFT3 OS_EXPORT OS_OBJECT_OBJC_RUNTIME_VISIBLE OS_OBJECT_DECL_BASE(object, NSObject); @@ -221,12 +237,18 @@ OS_OBJECT_DECL_BASE(object, NSObject); #if OS_OBJECT_SWIFT3 #define OS_OBJECT_DECL_CLASS(name) \ OS_OBJECT_DECL_SUBCLASS_SWIFT(name, object) +#define OS_OBJECT_DECL_SENDABLE_CLASS(name) \ + OS_OBJECT_DECL_SENDABLE_SUBCLASS_SWIFT(name, object) #elif OS_OBJECT_USE_OBJC #define OS_OBJECT_DECL_CLASS(name) \ OS_OBJECT_DECL(name) +#define OS_OBJECT_DECL_SENDABLE_CLASS(name) \ + OS_OBJECT_DECL(name) #else #define OS_OBJECT_DECL_CLASS(name) \ typedef struct name##_s *name##_t +#define OS_OBJECT_DECL_SENDABLE_CLASS(name) \ + typedef struct name##_s *name##_t #endif #if OS_OBJECT_USE_OBJC diff --git a/os/voucher_private.h b/os/voucher_private.h index 1a2383a6e..9003c5b7b 100644 --- a/os/voucher_private.h +++ b/os/voucher_private.h @@ -71,7 +71,7 @@ __BEGIN_DECLS * Voucher objects are os_objects (c.f. ). They are memory-managed * with the os_retain()/os_release() functions or -[retain]/-[release] methods. */ -OS_OBJECT_DECL_CLASS(voucher); +OS_OBJECT_DECL_SENDABLE_CLASS(voucher); /*! * @const VOUCHER_NULL diff --git a/os/workgroup_interval.h b/os/workgroup_interval.h index 0edc12928..8eff3a84b 100644 --- a/os/workgroup_interval.h +++ b/os/workgroup_interval.h @@ -108,7 +108,7 @@ os_workgroup_interval_start(os_workgroup_interval_t wg, uint64_t start, uint64_t * @function os_workgroup_interval_update * * @abstract - * Updates an already started interval workgroup to have the new + * Updates an already started workgroup interval to have the new * deadline specified. This function is real time safe. * * This function will return an error in the following cases: @@ -150,7 +150,6 @@ os_workgroup_interval_update(os_workgroup_interval_t wg, uint64_t deadline, * * @param data * This field is currently unused and should be NULL - * */ API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0)) OS_REFINED_FOR_SWIFT OS_WORKGROUP_EXPORT OS_WORKGROUP_WARN_RESULT diff --git a/os/workgroup_interval_private.h b/os/workgroup_interval_private.h index 2c08ce511..a1c6275ba 100644 --- a/os/workgroup_interval_private.h +++ b/os/workgroup_interval_private.h @@ -68,6 +68,37 @@ int os_workgroup_attr_set_interval_type(os_workgroup_attr_t attr, os_workgroup_interval_type_t type); +/*! + * @enum os_workgroup_telemetry_flavor_t + * + * @abstract + * Flavors of os_workgroup telemetry that can be queried, each + * corresponding to a telemetry structure type. + */ +OS_ENUM(os_workgroup_telemetry_flavor, uint16_t, + /*! + * @const OS_WORKGROUP_TELEMETRY_FLAVOR_BASIC + * Telemetry as specified per the fields in os_workgroup_telemetry_basic_s + */ + OS_WORKGROUP_TELEMETRY_FLAVOR_BASIC = 1, +); + +/*! + * @function os_workgroup_attr_set_telemetry_flavor + * + * @abstract + * Sets a telemetry flavor in the workgroup attribute which will determine + * the type of telemetry structure that the workgroup can query data for. + * + * Returns EINVAL if the telemetry flavor is invalid or if the workgroup + * attribute was not correctly initialized. + */ +SPI_AVAILABLE(macos(14.0), ios(17.0), tvos(17.0), watchos(10.0)) +OS_WORKGROUP_EXPORT OS_WORKGROUP_WARN_RESULT +int +os_workgroup_attr_set_telemetry_flavor(os_workgroup_attr_t wga, + os_workgroup_telemetry_flavor_t flavor); + /* * @typedef os_workgroup_interval_data_flags_t * @@ -132,6 +163,91 @@ int os_workgroup_interval_data_set_flags(os_workgroup_interval_data_t data, os_workgroup_interval_data_flags_t flags); +/*! + * @function os_workgroup_interval_data_set_flags + * + * @abstract + * Setter for os_workgroup_interval_data_t that specifies a telemetry flavor and + * a pointer to a telemetry structure of the corresponding flavor where telemetry + * data should be written out. Telemetry data will be written out to the pointer on + * a successful call to os_workgroup_interval_start(), + * os_workgroup_interval_update(), or os_workgroup_interval_finish() where the + * os_workgroup_interval_data_t was passed as a parameter. + * + * @param data + * Pointer to workgroup interval data structure initialized with + * OS_WORKGROUP_INTERVAL_DATA_INITIALIZER. + * + * @param flavor + * Specifies the kind of the telemetry to be retrieved. + * + * @param telemetry + * Pointer to a telemetry struct of the specified flavor where telemetry data + * should be written. + * + * @param size + * Size of the telemetry struct where telemetry data should be written. + * + * @result + * EINVAL is returned if the interval data passed in hasn't been initialized + * or if an unknown or invalid combination of flavor and size values are passed. + */ +SPI_AVAILABLE(macos(14.0), ios(17.0), tvos(17.0), watchos(10.0)) +OS_WORKGROUP_EXPORT +int +os_workgroup_interval_data_set_telemetry(os_workgroup_interval_data_t data, + os_workgroup_telemetry_flavor_t flavor, void *telemetry, size_t size); + +/*! + * @struct os_workgroup_telemetry_basic_s + * + * @abstract + * A structure containing telemetry data for a workgroup. Fields are cumulative and + * reflect aggregate statistics from threads while they are joined to the workgroup. + * + * @field wg_external_wakeups + * Number of times a thread joined to the workgroup was woken up by an "external" + * thread not joined to the workgroup. + * + * @field wg_total_wakeups + * Number of times a thread joined to the workgroup blocked and was woken up. + * + * @field wg_user_time_mach + * Time in Mach units that threads joined to the workgroup spent on-core running + * in user-space. + * + * @field wg_system_time_mach + * Time in Mach units that threads joined to the workgroup spent on-core running + * in the kernel. + * + * @field wg_cycles + * Number of CPU cycles consumed by threads in the workgroup. Always set to 0 if not + * supported by the underlying hardware. + * + * @field wg_instructions + * Number of instructions executed by threads in the workgroup. Always set to 0 if not + * supported by the underlying hardware. + */ +struct os_workgroup_telemetry_basic_s { + uint32_t wg_external_wakeups; + uint32_t wg_total_wakeups; + uint64_t wg_user_time_mach; + uint64_t wg_system_time_mach; + uint64_t wg_cycles; + uint64_t wg_instructions; +}; + +/*! + * @typedef os_workgroup_telemetry_basic, os_workgroup_telemetry_basic_t + * + * @abstract + * A structure containing basic telemetry data fields which represent aggregate + * statistics for the workgroup. + */ +typedef struct os_workgroup_telemetry_basic_s os_workgroup_telemetry_basic_s; +typedef struct os_workgroup_telemetry_basic_s *os_workgroup_telemetry_basic_t; +#define OS_WORKGROUP_TELEMETRY_INITIALIZER { 0 } + /* * @function os_workgroup_interval_create * @@ -207,6 +323,8 @@ os_workgroup_interval_create(const char * _Nullable name, os_clockid_t clock, * other threads in the process (see os_workgroup_attr_flags_t). * The interval type specified by the attributes will be used as a fallback in * case the provided workload identifier is unknown. + * Any telemetry flavor specified by the attributes will also be used, + * regardless of whether the workload identifier is known. * See discussion for the detailed rules used to combine the information * specified by the `workload_id` and `wga` arguments. * diff --git a/private/darwin/module.modulemap b/private/darwin/module.modulemap index ceb963a1f..2991efe5a 100644 --- a/private/darwin/module.modulemap +++ b/private/darwin/module.modulemap @@ -1,10 +1,13 @@ -module DispatchPrivate [system] [extern_c] { +module DispatchPrivate [system] { umbrella header "private.h" + exclude header "channel_private.h" + exclude header "introspection_private.h" exclude header "mach_private.h" + exclude header "swift_concurrency_private.h" export * } -module DispatchIntrospectionPrivate [system] [extern_c] { +module DispatchIntrospectionPrivate [system] { header "introspection_private.h" export * } diff --git a/private/generic/module.modulemap b/private/generic/module.modulemap index ceb963a1f..94fca2ff1 100644 --- a/private/generic/module.modulemap +++ b/private/generic/module.modulemap @@ -1,10 +1,10 @@ -module DispatchPrivate [system] [extern_c] { +module DispatchPrivate [system] { umbrella header "private.h" exclude header "mach_private.h" export * } -module DispatchIntrospectionPrivate [system] [extern_c] { +module DispatchIntrospectionPrivate [system] { header "introspection_private.h" export * } diff --git a/private/mach_private.h b/private/mach_private.h index 05c42ed5e..b945b1159 100644 --- a/private/mach_private.h +++ b/private/mach_private.h @@ -32,13 +32,13 @@ #include // for HeaderDoc #endif -__BEGIN_DECLS - #define DISPATCH_MACH_SPI_VERSION 20200229 #include #include +__BEGIN_DECLS + DISPATCH_ASSUME_NONNULL_BEGIN /*! diff --git a/private/queue_private.h b/private/queue_private.h index 199fcaeed..5f5306d89 100644 --- a/private/queue_private.h +++ b/private/queue_private.h @@ -535,6 +535,7 @@ dispatch_swift_job_should_yield(void); */ SPI_AVAILABLE(macos(12.0), ios(15.0)) DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL2 +DISPATCH_REFINED_FOR_SWIFT void dispatch_async_swift_job(dispatch_queue_t queue, void *swift_job, qos_class_t qos); diff --git a/private/source_private.h b/private/source_private.h index fb0c28da2..653c84838 100644 --- a/private/source_private.h +++ b/private/source_private.h @@ -78,7 +78,7 @@ DISPATCH_SOURCE_TYPE_DECL(interval); * @const DISPATCH_SOURCE_TYPE_VFS * @discussion Apple-internal dispatch source that monitors for vfs events * defined by dispatch_vfs_flags_t. - * The handle is a process identifier (pid_t). + * The handle is required to be NULL. */ #define DISPATCH_SOURCE_TYPE_VFS (&_dispatch_source_type_vfs) API_AVAILABLE(macos(10.6), ios(4.0)) DISPATCH_LINUX_UNAVAILABLE() diff --git a/src/apply.c b/src/apply.c index 1a9b563a0..9520196fa 100644 --- a/src/apply.c +++ b/src/apply.c @@ -123,11 +123,11 @@ _dispatch_apply_invoke2(dispatch_apply_t da, long invoke_flags) * we can allocate an index simply by incrementing */ uint32_t worker_index = 0; - worker_index = os_atomic_inc_orig2o(da, da_worker_index, relaxed); + worker_index = os_atomic_inc_orig(&da->da_worker_index, relaxed); _dispatch_apply_set_attr_behavior(da->da_attr, worker_index); - idx = os_atomic_inc_orig2o(da, da_index, acquire); + idx = os_atomic_inc_orig(&da->da_index, acquire); if (unlikely(idx >= iter)) goto out; /* * da_dc is only safe to access once the 'index lock' has been acquired @@ -178,7 +178,7 @@ _dispatch_apply_invoke2(dispatch_apply_t da, long invoke_flags) } _dispatch_perfmon_workitem_inc(); done++; - idx = os_atomic_inc_orig2o(da, da_index, relaxed); + idx = os_atomic_inc_orig(&da->da_index, relaxed); }); } while (likely(idx < iter)); @@ -192,7 +192,7 @@ _dispatch_apply_invoke2(dispatch_apply_t da, long invoke_flags) /* The thread that finished the last workitem wakes up the possibly waiting * thread that called dispatch_apply. They could be one and the same. */ - if (os_atomic_sub2o(da, da_todo, done, release) == 0) { + if (os_atomic_sub(&da->da_todo, done, release) == 0) { _dispatch_thread_event_signal(&da->da_event); } out: @@ -202,7 +202,7 @@ _dispatch_apply_invoke2(dispatch_apply_t da, long invoke_flags) _dispatch_thread_event_wait(&da->da_event); _dispatch_thread_event_destroy(&da->da_event); } - if (os_atomic_dec2o(da, da_thr_cnt, release) == 0) { + if (os_atomic_dec(&da->da_thr_cnt, release) == 0) { _dispatch_apply_destroy(da); } } @@ -321,7 +321,7 @@ _dispatch_queue_try_reserve_apply_width(dispatch_queue_t dq, int32_t da_width) return 0; } - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + os_atomic_rmw_loop(&dq->dq_state, old_state, new_state, relaxed, { width = (int32_t)_dq_state_available_width(old_state); if (unlikely(!width)) { os_atomic_rmw_loop_give_up(return 0); @@ -343,7 +343,7 @@ _dispatch_queue_relinquish_width(dispatch_queue_t top_dq, dispatch_queue_t dq = top_dq; while (dq != stop_dq) { - os_atomic_sub2o(dq, dq_state, delta, relaxed); + os_atomic_sub(&dq->dq_state, delta, relaxed); dq = dq->do_targetq; } } diff --git a/src/data.c b/src/data.c index 0a3cb1aa9..e9d580607 100644 --- a/src/data.c +++ b/src/data.c @@ -534,7 +534,7 @@ dispatch_data_get_flattened_bytes_4libxpc(dispatch_data_t dd) void *flatbuf = _dispatch_data_flatten(dd); if (likely(flatbuf)) { // we need a release so that readers see the content of the buffer - if (unlikely(!os_atomic_cmpxchgv2o(dd, buf, NULL, flatbuf, + if (unlikely(!os_atomic_cmpxchgv(&dd->buf, NULL, flatbuf, &buffer, release))) { free(flatbuf); } else { diff --git a/src/event/event.c b/src/event/event.c index edda80a23..4dfe4a45d 100644 --- a/src/event/event.c +++ b/src/event/event.c @@ -853,7 +853,7 @@ _dispatch_timer_unote_register(dispatch_timer_source_refs_t dt, dispatch_assert(_dispatch_unote_wlh(dt) == NULL); _dispatch_unote_state_set(dt, DISPATCH_WLH_ANON, 0); } - if (os_atomic_load2o(dt, dt_pending_config, relaxed)) { + if (os_atomic_load(&dt->dt_pending_config, relaxed)) { _dispatch_timer_unote_configure(dt); } } @@ -863,7 +863,7 @@ _dispatch_timer_unote_configure(dispatch_timer_source_refs_t dt) { dispatch_timer_config_t dtc; - dtc = os_atomic_xchg2o(dt, dt_pending_config, NULL, dependency); + dtc = os_atomic_xchg(&dt->dt_pending_config, NULL, dependency); if (dtc->dtc_clock != _dispatch_timer_flags_to_clock(dt->du_timer_flags)) { dt->du_timer_flags &= ~_DISPATCH_TIMER_CLOCK_MASK; dt->du_timer_flags |= _dispatch_timer_flags_from_clock(dtc->dtc_clock); @@ -872,7 +872,7 @@ _dispatch_timer_unote_configure(dispatch_timer_source_refs_t dt) free(dtc); // Clear any pending data that might have accumulated on // older timer params - os_atomic_store2o(dt, ds_pending_data, 0, relaxed); + os_atomic_store(&dt->ds_pending_data, 0, relaxed); if (_dispatch_unote_armed(dt)) { return _dispatch_timer_unote_resume(dt); @@ -1055,13 +1055,13 @@ _dispatch_timers_run(dispatch_timer_heap_t dth, uint32_t tidx, _dispatch_timer_unote_disarm(dr, dth); // +2 is consumed by _merge_evt() _dispatch_wlh_release(_dispatch_unote_wlh(dr)); _dispatch_unote_state_set(dr, DU_STATE_UNREGISTERED); - os_atomic_store2o(dr, ds_pending_data, 2, relaxed); + os_atomic_store(&dr->ds_pending_data, 2, relaxed); _dispatch_trace_timer_fire(dr, 1, 1); dux_merge_evt(dr, EV_ONESHOT, 0, 0); continue; } - if (os_atomic_load2o(dr, dt_pending_config, relaxed)) { + if (os_atomic_load(&dr->dt_pending_config, relaxed)) { _dispatch_timer_unote_configure(dr); continue; } @@ -1085,9 +1085,9 @@ _dispatch_timers_run(dispatch_timer_heap_t dth, uint32_t tidx, // to make sure _dispatch_source_timer_data() will recompute the proper // number of fired events when the source is resumed, and also use the // MISSED marker for this similar purpose. - if (unlikely(os_atomic_load2o(dr, ds_pending_data, relaxed))) { + if (unlikely(os_atomic_load(&dr->ds_pending_data, relaxed))) { _dispatch_timer_unote_disarm(dr, dth); - pending = os_atomic_or_orig2o(dr, ds_pending_data, + pending = os_atomic_or_orig(&dr->ds_pending_data, DISPATCH_TIMER_DISARMED_MARKER, relaxed); } else { pending = _dispatch_timer_unote_compute_missed(dr, now, 0) << 1; @@ -1098,11 +1098,11 @@ _dispatch_timers_run(dispatch_timer_heap_t dth, uint32_t tidx, // armed, we need to take new retain counts _dispatch_retain_unote_owner(dr); _dispatch_timer_unote_arm(dr, dth, tidx); - os_atomic_store2o(dr, ds_pending_data, pending, relaxed); + os_atomic_store(&dr->ds_pending_data, pending, relaxed); } else { _dispatch_timer_unote_disarm(dr, dth); pending |= DISPATCH_TIMER_DISARMED_MARKER; - os_atomic_store2o(dr, ds_pending_data, pending, release); + os_atomic_store(&dr->ds_pending_data, pending, release); } } _dispatch_trace_timer_fire(dr, pending >> 1, pending >> 1); diff --git a/src/event/event_epoll.c b/src/event/event_epoll.c index e3578a095..2cd5379f8 100644 --- a/src/event/event_epoll.c +++ b/src/event/event_epoll.c @@ -512,7 +512,7 @@ _dispatch_event_merge_signal(dispatch_muxnote_t dmn) // consumed by dux_merge_evt() _dispatch_retain_unote_owner(du); dispatch_assert(!dux_needs_rearm(du._du)); - os_atomic_store2o(du._dr, ds_pending_data, 1, relaxed); + os_atomic_store(&du._dr->ds_pending_data, 1, relaxed); dux_merge_evt(du._du, EV_ADD|EV_ENABLE|EV_CLEAR, 1, 0); } } else { @@ -560,7 +560,7 @@ _dispatch_event_merge_hangup(dispatch_unote_t du) du_state &= ~DU_STATE_ARMED; _dispatch_unote_state_set(du, du_state); uintptr_t data = 0; // EOF - os_atomic_store2o(du._dr, ds_pending_data, ~data, relaxed); + os_atomic_store(&du._dr->ds_pending_data, ~data, relaxed); dux_merge_evt(du._du, EV_DELETE|EV_DISPATCH, data, 0); } @@ -580,7 +580,7 @@ _dispatch_event_merge_fd(dispatch_muxnote_t dmn, uint32_t events) _dispatch_retain_unote_owner(du); dispatch_assert(dux_needs_rearm(du._du)); _dispatch_unote_state_clear_bit(du, DU_STATE_ARMED); - os_atomic_store2o(du._dr, ds_pending_data, ~data, relaxed); + os_atomic_store(&du._dr->ds_pending_data, ~data, relaxed); dux_merge_evt(du._du, EV_ADD|EV_ENABLE|EV_DISPATCH, data, 0); } } @@ -593,7 +593,7 @@ _dispatch_event_merge_fd(dispatch_muxnote_t dmn, uint32_t events) _dispatch_retain_unote_owner(du); dispatch_assert(dux_needs_rearm(du._du)); _dispatch_unote_state_clear_bit(du, DU_STATE_ARMED); - os_atomic_store2o(du._dr, ds_pending_data, ~data, relaxed); + os_atomic_store(&du._dr->ds_pending_data, ~data, relaxed); dux_merge_evt(du._du, EV_ADD|EV_ENABLE|EV_DISPATCH, data, 0); } } diff --git a/src/event/event_kevent.c b/src/event/event_kevent.c index 1b5dd0587..311075048 100644 --- a/src/event/event_kevent.c +++ b/src/event/event_kevent.c @@ -492,12 +492,12 @@ _dispatch_kevent_merge(dispatch_unote_t du, dispatch_kevent_t ke) // zero bytes happens when EV_EOF is set dispatch_assert(ke->data >= 0l); data = (unsigned long)ke->data; - os_atomic_store2o(du._dr, ds_pending_data, ~data, relaxed); + os_atomic_store(&du._dr->ds_pending_data, ~data, relaxed); break; case DISPATCH_UNOTE_ACTION_SOURCE_ADD_DATA: data = (unsigned long)ke->data; - if (data) os_atomic_add2o(du._dr, ds_pending_data, data, relaxed); + if (data) os_atomic_add(&du._dr->ds_pending_data, data, relaxed); break; case DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS: @@ -508,16 +508,16 @@ _dispatch_kevent_merge(dispatch_unote_t du, dispatch_kevent_t ke) // We combine the data and status into a single 64-bit value. value = DISPATCH_SOURCE_COMBINE_DATA_AND_STATUS(data, status); - os_atomic_rmw_loop2o(du._dr, ds_pending_data, odata, ndata, relaxed, { + os_atomic_rmw_loop(&du._dr->ds_pending_data, odata, ndata, relaxed, { ndata = DISPATCH_SOURCE_GET_DATA(odata) | value; }); #if HAVE_MACH } else if (du._du->du_filter == EVFILT_MACHPORT) { data = DISPATCH_MACH_RECV_MESSAGE; - os_atomic_store2o(du._dr, ds_pending_data, data, relaxed); + os_atomic_store(&du._dr->ds_pending_data, data, relaxed); #endif } else { - if (data) os_atomic_or2o(du._dr, ds_pending_data, data, relaxed); + if (data) os_atomic_or(&du._dr->ds_pending_data, data, relaxed); } break; @@ -1173,7 +1173,7 @@ _dispatch_unote_register_muxed(dispatch_unote_t du) LIST_INSERT_HEAD(&dmn->dmn_unotes_head, dul, du_link); #if HAVE_MACH if (du._du->du_filter == DISPATCH_EVFILT_MACH_NOTIFICATION) { - os_atomic_store2o(du._dmsr, dmsr_notification_armed, + os_atomic_store(&du._dmsr->dmsr_notification_armed, DISPATCH_MACH_NOTIFICATION_ARMED(dmn), relaxed); } #endif @@ -1207,7 +1207,7 @@ _dispatch_unote_unregister_muxed(dispatch_unote_t du) #if HAVE_MACH if (dmn->dmn_kev.filter == DISPATCH_EVFILT_MACH_NOTIFICATION) { - os_atomic_store2o(du._dmsr, dmsr_notification_armed, false, relaxed); + os_atomic_store(&du._dmsr->dmsr_notification_armed, false, relaxed); } #endif _dispatch_unote_state_set(du, DU_STATE_UNREGISTERED); @@ -1906,7 +1906,7 @@ _dispatch_event_loop_drain(uint32_t flags) // see _dispatch_event_loop_drain() comments about the lazy handling // of DISPATCH_EVENT_LOOP_OVERRIDE dispatch_queue_t dq = (dispatch_queue_t)wlh; - uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + uint64_t dq_state = os_atomic_load(&dq->dq_state, relaxed); dispatch_assert(ddi->ddi_wlh_needs_delete); ddi->ddi_wlh_needs_update = false; @@ -2175,7 +2175,7 @@ _dispatch_event_loop_wait_for_ownership(dispatch_sync_context_t dsc) uint64_t dq_state; int i, n = 0; - dq_state = os_atomic_load2o((dispatch_queue_t)wlh, dq_state, relaxed); + dq_state = os_atomic_load(&((dispatch_queue_t)wlh)->dq_state, relaxed); if (!_dq_state_drain_locked(dq_state) && _dq_state_is_enqueued_on_target(dq_state)) { // @@ -2531,21 +2531,9 @@ const dispatch_source_type_s _dispatch_source_type_nw_channel = { #if DISPATCH_USE_MEMORYSTATUS #if DISPATCH_USE_MEMORYPRESSURE_SOURCE -#define DISPATCH_MEMORYPRESSURE_SOURCE_MASK ( \ - DISPATCH_MEMORYPRESSURE_NORMAL | \ - DISPATCH_MEMORYPRESSURE_WARN | \ - DISPATCH_MEMORYPRESSURE_CRITICAL | \ - DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN | \ - DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL | \ - DISPATCH_MEMORYPRESSURE_MSL_STATUS) - -#define DISPATCH_MEMORYPRESSURE_MALLOC_MASK ( \ - DISPATCH_MEMORYPRESSURE_WARN | \ - DISPATCH_MEMORYPRESSURE_CRITICAL | \ - DISPATCH_MEMORYPRESSURE_PROC_LIMIT_WARN | \ - DISPATCH_MEMORYPRESSURE_PROC_LIMIT_CRITICAL | \ - DISPATCH_MEMORYPRESSURE_MSL_STATUS) +static void _dispatch_memorypressure_create(uintptr_t); +DISPATCH_STATIC_GLOBAL(dispatch_source_t _dispatch_memorypressure_source); static void _dispatch_memorypressure_handler(void *context) @@ -2553,6 +2541,11 @@ _dispatch_memorypressure_handler(void *context) dispatch_source_t ds = context; unsigned long memorypressure = dispatch_source_get_data(ds); + if ((memorypressure & DISPATCH_MEMORYPRESSURE_MSL_STATUS) && + (dispatch_source_get_mask(_dispatch_memorypressure_source) != malloc_memorypressure_mask_msl_4libdispatch)) { + _dispatch_memorypressure_create(malloc_memorypressure_mask_msl_4libdispatch); + } + if (memorypressure & DISPATCH_MEMORYPRESSURE_NORMAL) { _dispatch_memory_warn = false; _dispatch_continuation_cache_limit = DISPATCH_CONTINUATION_CACHE_LIMIT; @@ -2574,25 +2567,34 @@ _dispatch_memorypressure_handler(void *context) } #endif } - memorypressure &= DISPATCH_MEMORYPRESSURE_MALLOC_MASK; - if (memorypressure) { - malloc_memory_event_handler(memorypressure); - } + malloc_memory_event_handler(memorypressure); } -DISPATCH_STATIC_GLOBAL(dispatch_source_t _dispatch_memorypressure_source); - +/* create a dispatch source for memory pressure notifications */ static void -_dispatch_memorypressure_init(void) +_dispatch_memorypressure_create(uintptr_t mask) { dispatch_source_t ds = dispatch_source_create( DISPATCH_SOURCE_TYPE_MEMORYPRESSURE, 0, - DISPATCH_MEMORYPRESSURE_SOURCE_MASK, _dispatch_mgr_q._as_dq); + mask, _dispatch_mgr_q._as_dq); dispatch_set_context(ds, ds); dispatch_source_set_event_handler_f(ds, _dispatch_memorypressure_handler); - _dispatch_memorypressure_source = ds; dispatch_activate(ds); + if (_dispatch_memorypressure_source) { + // cancel the previous source + dispatch_source_cancel(_dispatch_memorypressure_source); + dispatch_release(_dispatch_memorypressure_source); + } + _dispatch_memorypressure_source = ds; +} + +/* initialize the default memory pressure notification source */ +static void +_dispatch_memorypressure_init(void) +{ + _dispatch_memorypressure_create(malloc_memorypressure_mask_default_4libdispatch); } + #endif // DISPATCH_USE_MEMORYPRESSURE_SOURCE #if TARGET_OS_SIMULATOR // rdar://problem/9219483 @@ -2994,14 +2996,14 @@ _dispatch_mach_notify_merge(mach_port_t name, uint32_t data, bool final) } dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul); uint32_t fflags = (data & du._du->du_fflags); - os_atomic_store2o(du._du, dmsr_notification_armed, 0, relaxed); + os_atomic_store(&du._du->dmsr_notification_armed, 0, relaxed); if (final || fflags) { // consumed by dux_merge_evt() _dispatch_retain_unote_owner(du); if (final) _dispatch_unote_unregister_muxed(du); if (fflags && dux_type(du._du)->dst_action == DISPATCH_UNOTE_ACTION_SOURCE_OR_FFLAGS) { - os_atomic_or2o(du._dr, ds_pending_data, fflags, relaxed); + os_atomic_or(&du._dr->ds_pending_data, fflags, relaxed); } dux_merge_evt(du._du, flags, fflags, 0); } @@ -3059,7 +3061,7 @@ _dispatch_mach_notification_set_armed(dispatch_mach_send_refs_t dmsr) os_atomic_store(&DISPATCH_MACH_NOTIFICATION_ARMED(dmn), 1, relaxed); LIST_FOREACH(dul, &dmn->dmn_unotes_head, du_link) { dispatch_unote_t du = _dispatch_unote_linkage_get_unote(dul); - os_atomic_store2o(du._du, dmsr_notification_armed, 1, relaxed); + os_atomic_store(&du._du->dmsr_notification_armed, 1, relaxed); } _dispatch_debug("machport[0x%08x]: send-possible notification armed", (mach_port_name_t)dmn->dmn_kev.ident); diff --git a/src/event/event_windows.c b/src/event/event_windows.c index ce322258a..afc53c900 100644 --- a/src/event/event_windows.c +++ b/src/event/event_windows.c @@ -625,7 +625,7 @@ _dispatch_event_merge_file_handle(dispatch_muxnote_t dmn) _dispatch_retain_unote_owner(du); dispatch_assert(dux_needs_rearm(du._du)); _dispatch_unote_state_clear_bit(du, DU_STATE_ARMED); - os_atomic_store2o(du._dr, ds_pending_data, ~1, relaxed); + os_atomic_store(&du._dr->ds_pending_data, ~1, relaxed); dux_merge_evt(du._du, EV_ADD | EV_ENABLE | EV_DISPATCH, 1, 0); } LIST_FOREACH_SAFE(dul, &dmn->dmn_writers_head, du_link, dul_next) { @@ -634,7 +634,7 @@ _dispatch_event_merge_file_handle(dispatch_muxnote_t dmn) _dispatch_retain_unote_owner(du); dispatch_assert(dux_needs_rearm(du._du)); _dispatch_unote_state_clear_bit(du, DU_STATE_ARMED); - os_atomic_store2o(du._dr, ds_pending_data, ~1, relaxed); + os_atomic_store(&du._dr->ds_pending_data, ~1, relaxed); dux_merge_evt(du._du, EV_ADD | EV_ENABLE | EV_DISPATCH, 1, 0); } // Retained when posting the completion packet @@ -661,7 +661,7 @@ _dispatch_event_merge_pipe_handle_read(dispatch_muxnote_t dmn, flags = EV_DELETE | EV_DISPATCH; } _dispatch_unote_state_set(du, du_state); - os_atomic_store2o(du._dr, ds_pending_data, ~data, relaxed); + os_atomic_store(&du._dr->ds_pending_data, ~data, relaxed); dux_merge_evt(du._du, flags, data, 0); } SetEvent(dmn->dmn_event); @@ -681,9 +681,9 @@ _dispatch_event_merge_pipe_handle_write(dispatch_muxnote_t dmn, _dispatch_unote_state_clear_bit(du, DU_STATE_ARMED); uintptr_t data = dwBytesAvailable; if (dwBytesAvailable > 0) { - os_atomic_store2o(du._dr, ds_pending_data, ~data, relaxed); + os_atomic_store(&du._dr->ds_pending_data, ~data, relaxed); } else { - os_atomic_store2o(du._dr, ds_pending_data, 0, relaxed); + os_atomic_store(&du._dr->ds_pending_data, 0, relaxed); } dux_merge_evt(du._du, EV_ADD | EV_ENABLE | EV_DISPATCH, data, 0); } @@ -707,7 +707,7 @@ _dispatch_event_merge_socket(dispatch_unote_t du, DWORD dwBytesAvailable) flags = EV_DELETE | EV_DISPATCH; } _dispatch_unote_state_set(du, du_state); - os_atomic_store2o(du._dr, ds_pending_data, ~data, relaxed); + os_atomic_store(&du._dr->ds_pending_data, ~data, relaxed); dux_merge_evt(du._du, flags, data, 0); } diff --git a/src/exclavekit/dispatch/dispatch.h b/src/exclavekit/dispatch/dispatch.h new file mode 100644 index 000000000..1023b7808 --- /dev/null +++ b/src/exclavekit/dispatch/dispatch.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2022 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __DISPATCH_PUBLIC__ +#define __DISPATCH_PUBLIC__ + +#include +#include + +#include +#include +#include +#include +#include +#include + +#define DISPATCH_API_VERSION 20181008 + +#ifndef __DISPATCH_BUILDING_DISPATCH__ +#ifndef __DISPATCH_INDIRECT__ +#define __DISPATCH_INDIRECT__ +#endif + +#include +#include +#include + +#undef __DISPATCH_INDIRECT__ +#endif /* !__DISPATCH_BUILDING_DISPATCH__ */ + +#endif diff --git a/src/exclavekit/dispatch/module.modulemap b/src/exclavekit/dispatch/module.modulemap new file mode 100644 index 000000000..1f90a0228 --- /dev/null +++ b/src/exclavekit/dispatch/module.modulemap @@ -0,0 +1,4 @@ +module Dispatch [system] { + umbrella header "dispatch.h" + export * +} diff --git a/src/exclavekit/dispatch/object.h b/src/exclavekit/dispatch/object.h new file mode 100644 index 000000000..6019bd88e --- /dev/null +++ b/src/exclavekit/dispatch/object.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2008-2012 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __DISPATCH_OBJECT__ +#define __DISPATCH_OBJECT__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +DISPATCH_ASSUME_NONNULL_BEGIN +DISPATCH_ASSUME_ABI_SINGLE_BEGIN + +#ifdef __BLOCKS__ +/*! + * @typedef dispatch_block_t + * + * @abstract + * The type of blocks submitted to dispatch queues, which take no arguments + * and have no return value. + * + * @discussion + * When not building with Objective-C ARC, a block object allocated on or + * copied to the heap must be released with a -[release] message or the + * Block_release() function. + * + * The declaration of a block literal allocates storage on the stack. + * Therefore, this is an invalid construct: + * + * dispatch_block_t block; + * if (x) { + * block = ^{ printf("true\n"); }; + * } else { + * block = ^{ printf("false\n"); }; + * } + * block(); // unsafe!!! + * + * + * What is happening behind the scenes: + * + * if (x) { + * struct Block __tmp_1 = ...; // setup details + * block = &__tmp_1; + * } else { + * struct Block __tmp_2 = ...; // setup details + * block = &__tmp_2; + * } + * + * + * As the example demonstrates, the address of a stack variable is escaping the + * scope in which it is allocated. That is a classic C bug. + * + * Instead, the block literal must be copied to the heap with the Block_copy() + * function or by sending it a -[copy] message. + */ +typedef void (^dispatch_block_t)(void); +#endif // __BLOCKS__ + +DISPATCH_ASSUME_ABI_SINGLE_END +DISPATCH_ASSUME_NONNULL_END + +#endif diff --git a/src/exclavekit/dispatch/once.h b/src/exclavekit/dispatch/once.h new file mode 100644 index 000000000..d507d632b --- /dev/null +++ b/src/exclavekit/dispatch/once.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2008-2010 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __DISPATCH_ONCE__ +#define __DISPATCH_ONCE__ + +#ifndef __DISPATCH_INDIRECT__ +#error "Please #include instead of this file directly." +#include // for HeaderDoc +#endif + +DISPATCH_ASSUME_NONNULL_BEGIN +DISPATCH_ASSUME_ABI_SINGLE_BEGIN + +__BEGIN_DECLS + +/*! + * @typedef dispatch_once_t + * + * @abstract + * A predicate for use with dispatch_once(). It must be initialized to zero. + * Note: static and global variables default to zero. + */ +DISPATCH_SWIFT3_UNAVAILABLE("Use lazily initialized globals instead") +typedef intptr_t dispatch_once_t; + +/*! + * @function dispatch_once + * + * @abstract + * Execute a block once and only once. + * + * @param predicate + * A pointer to a dispatch_once_t that is used to test whether the block has + * completed or not. + * + * @param block + * The block to execute once. + * + * @discussion + * Always call dispatch_once() before using or testing any variables that are + * initialized by the block. + */ +#ifdef __BLOCKS__ +DISPATCH_EXPORT DISPATCH_NONNULL_ALL DISPATCH_NOTHROW +DISPATCH_SWIFT3_UNAVAILABLE("Use lazily initialized globals instead") +void +dispatch_once(dispatch_once_t *predicate, + DISPATCH_NOESCAPE dispatch_block_t block); +#endif // __BLOCKS__ + +DISPATCH_EXPORT DISPATCH_NONNULL1 DISPATCH_NONNULL3 DISPATCH_NOTHROW +DISPATCH_SWIFT3_UNAVAILABLE("Use lazily initialized globals instead") +void +dispatch_once_f(dispatch_once_t *predicate, void *_Nullable context, + dispatch_function_t function); + +__END_DECLS + +DISPATCH_ASSUME_ABI_SINGLE_END +DISPATCH_ASSUME_NONNULL_END + +#endif diff --git a/src/exclavekit/internal.h b/src/exclavekit/internal.h new file mode 100644 index 000000000..c8bddab15 --- /dev/null +++ b/src/exclavekit/internal.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2022 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#ifndef __DISPATCH_INTERNAL__ +#define __DISPATCH_INTERNAL__ + +#define __DISPATCH_BUILDING_DISPATCH__ +#define __DISPATCH_INDIRECT__ + +#include +#include +#include +#include + +#ifdef __BLOCKS__ +#include +#include +#endif /* __BLOCKS__ */ + +#define DISPATCH_NOINLINE __attribute__((__noinline__)) + +#ifdef __BLOCKS__ +#define _dispatch_Block_invoke(bb) \ + ((dispatch_function_t)((struct Block_layout *)bb)->invoke) +#endif /* __BLOCKS__ */ + +#endif /* __DISPATCH_INTERNAL__ */ diff --git a/src/exclavekit/once.c b/src/exclavekit/once.c new file mode 100644 index 000000000..4e8dddcbf --- /dev/null +++ b/src/exclavekit/once.c @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2008-2013 Apple Inc. All rights reserved. + * + * @APPLE_APACHE_LICENSE_HEADER_START@ + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @APPLE_APACHE_LICENSE_HEADER_END@ + */ + +#include "internal.h" + + +#ifdef __BLOCKS__ +void +dispatch_once(dispatch_once_t *val, dispatch_block_t block) +{ + dispatch_once_f(val, block, _dispatch_Block_invoke(block)); +} +#endif // __BLOCKS__ + +DISPATCH_NOINLINE +void +dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func) +{ + (void)val; + (void)ctxt; + (void)func; +} diff --git a/src/firehose/firehose_server.c b/src/firehose/firehose_server.c index 11e3f3fa8..a2972a376 100644 --- a/src/firehose/firehose_server.c +++ b/src/firehose/firehose_server.c @@ -208,8 +208,8 @@ static void firehose_client_notify(firehose_client_t fc, mach_port_t reply_port) { firehose_push_reply_t push_reply = { - .fpr_mem_flushed_pos = os_atomic_load2o(fc, fc_mem_flushed_pos,relaxed), - .fpr_io_flushed_pos = os_atomic_load2o(fc, fc_io_flushed_pos, relaxed), + .fpr_mem_flushed_pos = os_atomic_load(&fc->fc_mem_flushed_pos, relaxed), + .fpr_io_flushed_pos = os_atomic_load(&fc->fc_io_flushed_pos, relaxed), }; kern_return_t kr; @@ -243,9 +243,9 @@ firehose_client_acquire_head(firehose_buffer_t fb, bool for_io) { uint16_t head; if (for_io) { - head = os_atomic_load2o(&fb->fb_header, fbh_ring_io_head, acquire); + head = os_atomic_load(&fb->fb_header.fbh_ring_io_head, acquire); } else { - head = os_atomic_load2o(&fb->fb_header, fbh_ring_mem_head, acquire); + head = os_atomic_load(&fb->fb_header.fbh_ring_mem_head, acquire); } return head; } @@ -371,13 +371,11 @@ firehose_client_drain_one(firehose_client_t fc, mach_port_t port, uint32_t flags // and we only need 16bits from it. and on 32bit arm, there's no way to // perform an atomic load of a 64bit quantity on read-only memory. if (for_io) { - os_atomic_add2o(fc, fc_io_flushed_pos, count, relaxed); - client_flushed = os_atomic_load2o(&fb->fb_header, - fbh_ring_tail.frp_io_flushed, relaxed); + os_atomic_add(&fc->fc_io_flushed_pos, count, relaxed); + client_flushed = os_atomic_load(&fb->fb_header.fbh_ring_tail.frp_io_flushed, relaxed); } else { - os_atomic_add2o(fc, fc_mem_flushed_pos, count, relaxed); - client_flushed = os_atomic_load2o(&fb->fb_header, - fbh_ring_tail.frp_mem_flushed, relaxed); + os_atomic_add(&fc->fc_mem_flushed_pos, count, relaxed); + client_flushed = os_atomic_load(&fb->fb_header.fbh_ring_tail.frp_mem_flushed, relaxed); } if (!fc->fc_pid) { // will fire firehose_client_notify() because port is MACH_PORT_DEAD @@ -648,8 +646,8 @@ firehose_client_handle_mach_event(void *ctx, dispatch_mach_reason_t reason, break; case DISPATCH_MACH_CANCELED: - if (!_os_atomic_refcnt_sub2o(fc, fc_mach_channel_refcnt, 1)) { - _os_atomic_refcnt_dispose_barrier2o(fc, fc_mach_channel_refcnt); + if (!_os_atomic_refcnt_sub(&fc->fc_mach_channel_refcnt, 1)) { + _os_atomic_refcnt_dispose_barrier(&fc->fc_mach_channel_refcnt); firehose_mach_port_send_release(fc->fc_sendp); fc->fc_sendp = MACH_PORT_NULL; @@ -867,7 +865,7 @@ firehose_client_get_metadata_buffer(firehose_client_t client, size_t *size) void * firehose_client_get_context(firehose_client_t fc) { - return os_atomic_load2o(fc, fc_ctxt, relaxed); + return os_atomic_load(&fc->fc_ctxt, relaxed); } void @@ -879,7 +877,7 @@ firehose_client_set_strings_cached(firehose_client_t fc) void * firehose_client_set_context(firehose_client_t fc, void *ctxt) { - return os_atomic_xchg2o(fc, fc_ctxt, ctxt, relaxed); + return os_atomic_xchg(&fc->fc_ctxt, ctxt, relaxed); } void @@ -1238,7 +1236,7 @@ firehose_snapshot_tickle_clients(firehose_snapshot_t fs, bool for_io) // cheating: equivalent to dispatch_group_enter() n times // without the acquire barriers that we don't need if (n) { - os_atomic_sub2o(fs->fs_group, dg_bits, + os_atomic_sub(&fs->fs_group->dg_bits, n * DISPATCH_GROUP_VALUE_INTERVAL, relaxed); } } diff --git a/src/init.c b/src/init.c index 120699b1e..9fd39f272 100644 --- a/src/init.c +++ b/src/init.c @@ -58,6 +58,7 @@ DISPATCH_EXPORT DISPATCH_NOTHROW void dispatch_atfork_prepare(void) { + _voucher_atfork_prepare(); _os_object_atfork_prepare(); } @@ -726,6 +727,17 @@ DISPATCH_VTABLE_INSTANCE(workloop, .dq_push = _dispatch_workloop_push, ); +DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_serial_executor, lane, + .do_type = DISPATCH_QUEUE_SERIAL_TYPE, + .do_dispose = _dispatch_lane_dispose, + .do_debug = _dispatch_queue_debug, + .do_invoke = _dispatch_lane_invoke, + + .dq_activate = _dispatch_lane_activate, + .dq_wakeup = _dispatch_lane_wakeup, + .dq_push = _dispatch_lane_push, +); + DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_serial, lane, .do_type = DISPATCH_QUEUE_SERIAL_TYPE, .do_dispose = _dispatch_lane_dispose, diff --git a/src/inline_internal.h b/src/inline_internal.h index 399a200de..73bed80f1 100644 --- a/src/inline_internal.h +++ b/src/inline_internal.h @@ -179,7 +179,7 @@ _dispatch_object_is_barrier(dispatch_object_t dou) if (dx_cluster(dou._do) != _DISPATCH_QUEUE_CLUSTER) { return false; } - dq_flags = os_atomic_load2o(dou._dq, dq_atomic_flags, relaxed); + dq_flags = os_atomic_load(&dou._dq->dq_atomic_flags, relaxed); return dq_flags & DQF_BARRIER_BIT; } @@ -302,7 +302,7 @@ _dispatch_retain_n_unsafe(dispatch_object_t dou, int n) // is as terse as possible (this window is a possible dequeuer starvation). // // Other code should use the safe variants at all times. - os_atomic_add2o(dou._os_obj, os_obj_ref_cnt, n, relaxed); + os_atomic_add(&dou._os_obj->os_obj_ref_cnt, n, relaxed); } DISPATCH_ALWAYS_INLINE_NDEBUG @@ -379,7 +379,7 @@ DISPATCH_ALWAYS_INLINE static inline void _dispatch_queue_retain_storage(dispatch_queue_class_t dqu) { - int ref_cnt = os_atomic_inc2o(dqu._dq, dq_sref_cnt, relaxed); + int ref_cnt = os_atomic_inc(&dqu._dq->dq_sref_cnt, relaxed); if (unlikely(ref_cnt <= 0)) { _OS_OBJECT_CLIENT_CRASH("Resurrection of an object"); } @@ -393,7 +393,7 @@ _dispatch_queue_release_storage(dispatch_queue_class_t dqu) // need for visibility wrt to the allocation, the internal refcount already // gives us that, and the object becomes immutable after the last internal // refcount release. - int ref_cnt = os_atomic_dec2o(dqu._dq, dq_sref_cnt, relaxed); + int ref_cnt = os_atomic_dec(&dqu._dq->dq_sref_cnt, relaxed); if (unlikely(ref_cnt >= 0)) { return; } @@ -410,7 +410,7 @@ _dispatch_object_set_target_queue_inline(dispatch_object_t dou, dispatch_queue_t tq) { _dispatch_retain(tq); - tq = os_atomic_xchg2o(dou._do, do_targetq, tq, release); + tq = os_atomic_xchg(&dou._do->do_targetq, tq, release); if (tq) _dispatch_release(tq); } @@ -638,7 +638,7 @@ DISPATCH_ALWAYS_INLINE static inline dispatch_queue_flags_t _dispatch_queue_atomic_flags(dispatch_queue_class_t dqu) { - return os_atomic_load2o(dqu._dq, dq_atomic_flags, relaxed); + return os_atomic_load(&dqu._dq->dq_atomic_flags, relaxed); } DISPATCH_ALWAYS_INLINE @@ -646,7 +646,7 @@ static inline dispatch_queue_flags_t _dispatch_queue_atomic_flags_set(dispatch_queue_class_t dqu, dispatch_queue_flags_t bits) { - return os_atomic_or2o(dqu._dq, dq_atomic_flags, bits, relaxed); + return os_atomic_or(&dqu._dq->dq_atomic_flags, bits, relaxed); } DISPATCH_ALWAYS_INLINE @@ -655,7 +655,7 @@ _dispatch_queue_atomic_flags_set_and_clear_orig(dispatch_queue_class_t dqu, dispatch_queue_flags_t add_bits, dispatch_queue_flags_t clr_bits) { dispatch_queue_flags_t oflags, nflags; - os_atomic_rmw_loop2o(dqu._dq, dq_atomic_flags, oflags, nflags, relaxed, { + os_atomic_rmw_loop(&dqu._dq->dq_atomic_flags, oflags, nflags, relaxed, { nflags = (oflags | add_bits) & ~clr_bits; if (nflags == oflags) os_atomic_rmw_loop_give_up(return oflags); }); @@ -668,7 +668,7 @@ _dispatch_queue_atomic_flags_set_and_clear(dispatch_queue_class_t dqu, dispatch_queue_flags_t add_bits, dispatch_queue_flags_t clr_bits) { dispatch_queue_flags_t oflags, nflags; - os_atomic_rmw_loop2o(dqu._dq, dq_atomic_flags, oflags, nflags, relaxed, { + os_atomic_rmw_loop(&dqu._dq->dq_atomic_flags, oflags, nflags, relaxed, { nflags = (oflags | add_bits) & ~clr_bits; if (nflags == oflags) os_atomic_rmw_loop_give_up(return oflags); }); @@ -680,7 +680,7 @@ static inline dispatch_queue_flags_t _dispatch_queue_atomic_flags_set_orig(dispatch_queue_class_t dqu, dispatch_queue_flags_t bits) { - return os_atomic_or_orig2o(dqu._dq, dq_atomic_flags, bits, relaxed); + return os_atomic_or_orig(&dqu._dq->dq_atomic_flags, bits, relaxed); } DISPATCH_ALWAYS_INLINE @@ -688,7 +688,7 @@ static inline dispatch_queue_flags_t _dispatch_queue_atomic_flags_clear(dispatch_queue_class_t dqu, dispatch_queue_flags_t bits) { - return os_atomic_and2o(dqu._dq, dq_atomic_flags, ~bits, relaxed); + return os_atomic_and(&dqu._dq->dq_atomic_flags, ~bits, relaxed); } DISPATCH_ALWAYS_INLINE @@ -933,7 +933,7 @@ _dq_state_is_suspended(uint64_t dq_state) return dq_state & DISPATCH_QUEUE_SUSPEND_BITS_MASK; } #define DISPATCH_QUEUE_IS_SUSPENDED(x) \ - _dq_state_is_suspended(os_atomic_load2o(x, dq_state, relaxed)) + _dq_state_is_suspended(os_atomic_load(&x->dq_state, relaxed)) DISPATCH_ALWAYS_INLINE static inline bool @@ -1101,7 +1101,7 @@ _dq_state_drain_owner(uint64_t dq_state) return _dispatch_lock_owner((dispatch_lock)dq_state); } #define DISPATCH_QUEUE_DRAIN_OWNER(dq) \ - _dq_state_drain_owner(os_atomic_load2o(dq, dq_state, relaxed)) + _dq_state_drain_owner(os_atomic_load(&dq->dq_state, relaxed)) DISPATCH_ALWAYS_INLINE static inline bool @@ -1192,7 +1192,7 @@ DISPATCH_ALWAYS_INLINE static inline void _dispatch_queue_setter_assert_inactive(dispatch_queue_class_t dq) { - uint64_t dq_state = os_atomic_load2o(dq._dq, dq_state, relaxed); + uint64_t dq_state = os_atomic_load(&dq._dq->dq_state, relaxed); if (likely(_dq_state_is_inactive(dq_state))) return; #ifndef __LP64__ dq_state >>= 32; @@ -1223,7 +1223,7 @@ _dispatch_queue_init(dispatch_queue_class_t dqu, dispatch_queue_flags_t dqf, dq_state |= initial_state_bits; dq->do_next = DISPATCH_OBJECT_LISTLESS; dqf |= DQF_WIDTH(width); - os_atomic_store2o(dq, dq_atomic_flags, dqf, relaxed); + os_atomic_store(&dq->dq_atomic_flags, dqf, relaxed); dq->dq_state = dq_state; dq->dq_serialnum = os_atomic_inc_orig(&_dispatch_queue_serial_numbers, relaxed); @@ -1245,7 +1245,7 @@ _dispatch_lane_try_inactive_suspend(dispatch_lane_class_t dqu) { uint64_t old_state, new_state; - (void)os_atomic_rmw_loop2o(dqu._dl, dq_state, old_state, new_state, relaxed, { + (void)os_atomic_rmw_loop(&dqu._dl->dq_state, old_state, new_state, relaxed, { if (unlikely(!_dq_state_is_inactive(old_state))) { os_atomic_rmw_loop_give_up(return false); } @@ -1315,7 +1315,7 @@ _dispatch_queue_drain_try_lock(dispatch_queue_t dq, dispatch_qos_t oq_floor = _dispatch_get_basepri_override_qos_floor(); retry: - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, { + os_atomic_rmw_loop(&dq->dq_state, old_state, new_state, acquire, { new_state = old_state; if (likely(!(old_state & lock_fail_mask))) { if (unlikely(_dq_state_needs_lock_override(old_state, oq_floor))) { @@ -1362,7 +1362,7 @@ _dispatch_queue_drain_try_lock_wlh(dispatch_queue_t dq, uint64_t *dq_state) uint64_t lock_bits = _dispatch_lock_value_for_self() | DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER; - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, { + os_atomic_rmw_loop(&dq->dq_state, old_state, new_state, acquire, { new_state = old_state; if (unlikely(_dq_state_is_suspended(old_state))) { new_state &= ~DISPATCH_QUEUE_ENQUEUED; @@ -1414,7 +1414,7 @@ _dispatch_queue_try_acquire_barrier_sync_and_suspend(dispatch_lane_t dq, (suspend_count * DISPATCH_QUEUE_SUSPEND_INTERVAL); uint64_t old_state, new_state; - return os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, { + return os_atomic_rmw_loop(&dq->dq_state, old_state, new_state, acquire, { uint64_t role = old_state & DISPATCH_QUEUE_ROLE_MASK; if (old_state != (init | role)) { os_atomic_rmw_loop_give_up(break); @@ -1442,7 +1442,7 @@ DISPATCH_ALWAYS_INLINE static inline void _dispatch_queue_reserve_sync_width(dispatch_lane_t dq) { - os_atomic_add2o(dq, dq_state, DISPATCH_QUEUE_WIDTH_INTERVAL, relaxed); + os_atomic_add(&dq->dq_state, DISPATCH_QUEUE_WIDTH_INTERVAL, relaxed); } /* Used by _dispatch_sync on non-serial queues @@ -1464,7 +1464,7 @@ _dispatch_queue_try_reserve_sync_width(dispatch_lane_t dq) return false; } - return os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + return os_atomic_rmw_loop(&dq->dq_state, old_state, new_state, relaxed, { if (unlikely(!_dq_state_is_sync_runnable(old_state)) || _dq_state_is_dirty(old_state) || _dq_state_has_pending_barrier(old_state)) { @@ -1485,7 +1485,7 @@ _dispatch_queue_try_acquire_async(dispatch_lane_t dq) { uint64_t old_state, new_state; - return os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, { + return os_atomic_rmw_loop(&dq->dq_state, old_state, new_state, acquire, { if (unlikely(!_dq_state_is_runnable(old_state) || _dq_state_is_dirty(old_state) || _dq_state_has_pending_barrier(old_state))) { @@ -1513,7 +1513,7 @@ _dispatch_queue_try_upgrade_full_width(dispatch_lane_t dq, uint64_t owned) uint64_t pending_barrier_width = DISPATCH_QUEUE_PENDING_BARRIER + (dq->dq_width - 1) * DISPATCH_QUEUE_WIDTH_INTERVAL; - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, { + os_atomic_rmw_loop(&dq->dq_state, old_state, new_state, acquire, { new_state = old_state - owned; if (likely(!_dq_state_has_pending_barrier(old_state))) { new_state += pending_barrier_width; @@ -1565,7 +1565,7 @@ _dispatch_queue_drain_try_unlock(dispatch_queue_t dq, uint64_t owned, bool done) { uint64_t old_state, new_state; - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + os_atomic_rmw_loop(&dq->dq_state, old_state, new_state, release, { new_state = old_state - owned; new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; if (unlikely(_dq_state_is_suspended(old_state))) { @@ -1576,7 +1576,7 @@ _dispatch_queue_drain_try_unlock(dispatch_queue_t dq, uint64_t owned, bool done) // what the enqueuer that set DIRTY has done. // the xor generates better assembly as DISPATCH_QUEUE_DIRTY // is already in a register - os_atomic_xor2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, acquire); + os_atomic_xor(&dq->dq_state, DISPATCH_QUEUE_DIRTY, acquire); return false; }); } else if (likely(done)) { @@ -1611,7 +1611,7 @@ _dispatch_queue_move_to_contended_sync(dispatch_queue_t dq) { uint64_t clearbits = DISPATCH_QUEUE_RECEIVED_SYNC_WAIT | DISPATCH_QUEUE_UNCONTENDED_SYNC; - os_atomic_and2o(dq, dq_state, ~clearbits, relaxed); + os_atomic_and(&dq->dq_state, ~clearbits, relaxed); } #pragma mark - @@ -1632,7 +1632,7 @@ _dispatch_queue_move_to_contended_sync(dispatch_queue_t dq) // release forces visibility of updates to the item before update to the tail #define os_mpsc_push_update_tail(Q, tail, _o_next) ({ \ os_mpsc_node_type(Q) _tl = (tail); \ - os_atomic_store2o(_tl, _o_next, NULL, relaxed); \ + os_atomic_store(&_tl->_o_next, NULL, relaxed); \ _dispatch_set_enqueuer_for(_os_mpsc_tail Q); \ os_atomic_xchg(_os_mpsc_tail Q, _tl, release); \ }) @@ -1642,7 +1642,7 @@ _dispatch_queue_move_to_contended_sync(dispatch_queue_t dq) #define os_mpsc_push_update_prev(Q, prev, head, _o_next) ({ \ os_mpsc_node_type(Q) _prev = (prev); \ if (likely(_prev)) { \ - (void)os_atomic_store2o(_prev, _o_next, (head), relaxed); \ + (void)os_atomic_store(&_prev->_o_next, (head), relaxed); \ } else { \ (void)os_atomic_store(_os_mpsc_head Q, (head), relaxed); \ } \ @@ -1693,7 +1693,7 @@ _dispatch_queue_move_to_contended_sync(dispatch_queue_t dq) #define os_mpsc_pop_head(Q, head, _o_next) ({ \ os_mpsc_node_type(Q) _head = (head), _n; \ - _n = os_atomic_load2o(_head, _o_next, dependency); \ + _n = os_atomic_load(&_head->_o_next, dependency); \ os_atomic_store(_os_mpsc_head Q, _n, relaxed); \ /* 22708742: set tail to NULL with release, so that NULL write */ \ /* to head above doesn't clobber head from concurrent enqueuer */ \ @@ -1707,11 +1707,11 @@ _dispatch_queue_move_to_contended_sync(dispatch_queue_t dq) #define os_mpsc_undo_pop_list(Q, head, tail, next, _o_next) ({ \ os_mpsc_node_type(Q) _hd = (head), _tl = (tail), _n = (next); \ - os_atomic_store2o(_tl, _o_next, _n, relaxed); \ + os_atomic_store(&_tl->_o_next, _n, relaxed); \ if (unlikely(!_n && \ !os_atomic_cmpxchg(_os_mpsc_tail Q, NULL, _tl, release))) { \ _n = os_mpsc_get_head(Q); \ - os_atomic_store2o(_tl, _o_next, _n, relaxed); \ + os_atomic_store(&_tl->_o_next, _n, relaxed); \ } \ os_atomic_store(_os_mpsc_head Q, _hd, relaxed); \ }) @@ -1987,7 +1987,7 @@ _dispatch_queue_class_probe(dispatch_lane_class_t dqu) struct dispatch_object_s *tail; // seq_cst wrt atomic store to dq_state // seq_cst wrt atomic store to dq_flags - tail = os_atomic_load2o(dqu._dl, dq_items_tail, ordered); + tail = os_atomic_load(&dqu._dl->dq_items_tail, ordered); return unlikely(tail != NULL); } @@ -2064,7 +2064,7 @@ _dispatch_queue_set_bound_thread(dispatch_queue_class_t dqu) // Tag thread-bound queues with the owning thread dispatch_assert(_dispatch_queue_is_thread_bound(dqu)); uint64_t old_state, new_state; - os_atomic_rmw_loop2o(dqu._dq, dq_state, old_state, new_state, relaxed, { + os_atomic_rmw_loop(&dqu._dq->dq_state, old_state, new_state, relaxed, { new_state = old_state; new_state &= ~DISPATCH_QUEUE_DRAIN_OWNER_MASK; new_state |= _dispatch_lock_value_for_self(); @@ -2076,7 +2076,7 @@ static inline void _dispatch_queue_clear_bound_thread(dispatch_queue_class_t dqu) { dispatch_assert(_dispatch_queue_is_thread_bound(dqu)); - os_atomic_and2o(dqu._dq, dq_state, + os_atomic_and(&dqu._dq->dq_state, ~DISPATCH_QUEUE_DRAIN_OWNER_MASK, relaxed); } diff --git a/src/introspection.c b/src/introspection.c index bee263917..4590beb8d 100644 --- a/src/introspection.c +++ b/src/introspection.c @@ -149,7 +149,7 @@ _dispatch_introspection_lane_get_info(dispatch_lane_class_t dqu) { dispatch_lane_t dq = dqu._dl; bool global = _dispatch_object_is_global(dq); - uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + uint64_t dq_state = os_atomic_load(&dq->dq_state, relaxed); dispatch_introspection_queue_s diq = { .queue = dq->_as_dq, @@ -172,7 +172,7 @@ DISPATCH_ALWAYS_INLINE static inline dispatch_introspection_queue_s _dispatch_introspection_workloop_get_info(dispatch_workloop_t dwl) { - uint64_t dq_state = os_atomic_load2o(dwl, dq_state, relaxed); + uint64_t dq_state = os_atomic_load(&dwl->dq_state, relaxed); dispatch_introspection_queue_s diq = { .queue = dwl->_as_dq, @@ -256,7 +256,7 @@ _dispatch_introspection_continuation_get_info(dispatch_queue_t dq, } else if (func == _dispatch_apply_invoke || func == _dispatch_apply_redirect_invoke) { dispatch_apply_t da = ctxt; - if (os_atomic_load2o(da, da_todo, relaxed)) { + if (os_atomic_load(&da->da_todo, relaxed)) { dc = da->da_dc; dq = dc->dc_other; ctxt = dc->dc_ctxt; @@ -321,7 +321,7 @@ _dispatch_introspection_source_get_info(dispatch_source_t ds) hdlr_is_block = (dc->dc_flags & DC_FLAG_BLOCK); } - uint64_t dq_state = os_atomic_load2o(ds, dq_state, relaxed); + uint64_t dq_state = os_atomic_load(&ds->dq_state, relaxed); dispatch_introspection_source_s dis = { .source = ds, .target_queue = ds->do_targetq, @@ -343,7 +343,7 @@ dispatch_introspection_source_s _dispatch_introspection_mach_get_info(dispatch_mach_t dm) { dispatch_mach_recv_refs_t dmrr = dm->dm_recv_refs; - uint64_t dq_state = os_atomic_load2o(dm, dq_state, relaxed); + uint64_t dq_state = os_atomic_load(&dm->dq_state, relaxed); dispatch_introspection_source_s dis = { .source = upcast(dm)._ds, diff --git a/src/io.c b/src/io.c index fa721bdd0..375c525cd 100644 --- a/src/io.c +++ b/src/io.c @@ -734,7 +734,7 @@ static void _dispatch_io_stop(dispatch_io_t channel) { _dispatch_io_channel_debug("stop", channel); - (void)os_atomic_or2o(channel, atomic_flags, DIO_STOPPED, relaxed); + (void)os_atomic_or(&channel->atomic_flags, DIO_STOPPED, relaxed); _dispatch_retain(channel); dispatch_async(channel->queue, ^{ dispatch_async(channel->barrier_queue, ^{ @@ -793,7 +793,7 @@ dispatch_io_close(dispatch_io_t channel, unsigned long flags) _dispatch_object_debug(channel, "%s", __func__); _dispatch_io_channel_debug("close", channel); if (!(channel->atomic_flags & (DIO_CLOSED|DIO_STOPPED))) { - (void)os_atomic_or2o(channel, atomic_flags, DIO_CLOSED, + (void)os_atomic_or(&channel->atomic_flags, DIO_CLOSED, relaxed); dispatch_fd_entry_t fd_entry = channel->fd_entry; if (fd_entry) { @@ -1657,10 +1657,10 @@ _dispatch_fd_entry_open(dispatch_fd_entry_t fd_entry, dispatch_io_t channel) if (err == EINTR) { goto open; } - (void)os_atomic_cmpxchg2o(fd_entry, err, 0, err, relaxed); + (void)os_atomic_cmpxchg(&fd_entry->err, 0, err, relaxed); return err; } - if (!os_atomic_cmpxchg2o(fd_entry, fd, -1, fd, relaxed)) { + if (!os_atomic_cmpxchg(&fd_entry->fd, -1, fd, relaxed)) { // Lost the race with another open _dispatch_fd_entry_guarded_close(fd_entry, fd); } else { @@ -2595,7 +2595,7 @@ _dispatch_operation_perform(dispatch_operation_t op) case ECANCELED: return DISPATCH_OP_ERR; case EBADF: - (void)os_atomic_cmpxchg2o(op->fd_entry, err, 0, err, relaxed); + (void)os_atomic_cmpxchg(&op->fd_entry->err, 0, err, relaxed); return DISPATCH_OP_FD_ERR; default: return DISPATCH_OP_COMPLETE; diff --git a/src/mach.c b/src/mach.c index b6c9d187f..5d476253d 100644 --- a/src/mach.c +++ b/src/mach.c @@ -285,7 +285,7 @@ dispatch_mach_connect(dispatch_mach_t dm, mach_port_t receive, _dispatch_mach_arm_no_senders(dm, false); } - uint32_t disconnect_cnt = os_atomic_and_orig2o(dmsr, dmsr_disconnect_cnt, + uint32_t disconnect_cnt = os_atomic_and_orig(&dmsr->dmsr_disconnect_cnt, ~DISPATCH_MACH_NEVER_CONNECTED, relaxed); if (unlikely(!(disconnect_cnt & DISPATCH_MACH_NEVER_CONNECTED))) { DISPATCH_CLIENT_CRASH(disconnect_cnt, "Channel already connected"); @@ -1389,7 +1389,7 @@ _dispatch_mach_send_drain(dispatch_mach_t dm, dispatch_invoke_flags_t flags, } while ((dc = next_dc)); } - os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, release, { + os_atomic_rmw_loop(&dmsr->dmsr_state, old_state, new_state, release, { if (old_state & DISPATCH_MACH_STATE_DIRTY) { new_state = old_state; new_state &= ~DISPATCH_MACH_STATE_DIRTY; @@ -1408,7 +1408,7 @@ _dispatch_mach_send_drain(dispatch_mach_t dm, dispatch_invoke_flags_t flags, if (_dispatch_object_has_type(dc, DISPATCH_CONTINUATION_TYPE(MACH_SEND_BARRIER))) { - os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, release, { + os_atomic_rmw_loop(&dmsr->dmsr_state, old_state, new_state, release, { new_state = old_state; new_state |= DISPATCH_MACH_STATE_DIRTY; new_state |= DISPATCH_MACH_STATE_PENDING_BARRIER; @@ -1416,7 +1416,7 @@ _dispatch_mach_send_drain(dispatch_mach_t dm, dispatch_invoke_flags_t flags, new_state &= ~DISPATCH_MACH_STATE_RECEIVED_OVERRIDE; }); } else { - os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, release, { + os_atomic_rmw_loop(&dmsr->dmsr_state, old_state, new_state, release, { new_state = old_state; if (old_state & (DISPATCH_MACH_STATE_DIRTY | DISPATCH_MACH_STATE_RECEIVED_OVERRIDE)) { @@ -1493,7 +1493,7 @@ _dispatch_mach_send_invoke(dispatch_mach_t dm, dispatch_invoke_flags_t flags, dispatch_qos_t oq_floor = _dispatch_get_basepri_override_qos_floor(); retry: - os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, acquire, { + os_atomic_rmw_loop(&dmsr->dmsr_state, old_state, new_state, acquire, { new_state = old_state; if (unlikely((old_state & canlock_mask) != canlock_state)) { if (!(send_flags & DM_SEND_INVOKE_MAKE_DIRTY)) { @@ -1587,7 +1587,7 @@ _dispatch_mach_send_push(dispatch_mach_t dm, dispatch_object_t dou, state_flags |= DISPATCH_MACH_STATE_PENDING_BARRIER; } - os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, release, { + os_atomic_rmw_loop(&dmsr->dmsr_state, old_state, new_state, release, { new_state = _dmsr_state_merge_override(old_state, qos); new_state |= state_flags; }); @@ -1598,7 +1598,7 @@ _dispatch_mach_send_push(dispatch_mach_t dm, dispatch_object_t dou, _dispatch_release_2_no_dispose(dm); } } else { - os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, relaxed, { + os_atomic_rmw_loop(&dmsr->dmsr_state, old_state, new_state, relaxed, { new_state = _dmsr_state_merge_override(old_state, qos); if (old_state == new_state) { os_atomic_rmw_loop_give_up(break); @@ -1651,7 +1651,7 @@ _dispatch_mach_send_push_and_trydrain(dispatch_mach_t dm, if (unlikely(dmsr->dmsr_disconnect_cnt || (dm->dq_atomic_flags & DSF_CANCELED))) { - os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, release, { + os_atomic_rmw_loop(&dmsr->dmsr_state, old_state, new_state, release, { new_state = _dmsr_state_merge_override(old_state, qos); new_state |= state_flags; }); @@ -1665,7 +1665,7 @@ _dispatch_mach_send_push_and_trydrain(dispatch_mach_t dm, canlock_mask = DISPATCH_MACH_STATE_UNLOCK_MASK | DISPATCH_MACH_STATE_PENDING_BARRIER; if (state_flags) { - os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, seq_cst, { + os_atomic_rmw_loop(&dmsr->dmsr_state, old_state, new_state, seq_cst, { new_state = _dmsr_state_merge_override(old_state, qos); new_state |= state_flags; if (likely((old_state & canlock_mask) == 0)) { @@ -1679,7 +1679,7 @@ _dispatch_mach_send_push_and_trydrain(dispatch_mach_t dm, wflags &= ~(dispatch_wakeup_flags_t)DISPATCH_WAKEUP_CONSUME_2; } } else { - os_atomic_rmw_loop2o(dmsr, dmsr_state, old_state, new_state, acquire, { + os_atomic_rmw_loop(&dmsr->dmsr_state, old_state, new_state, acquire, { new_state = _dmsr_state_merge_override(old_state, qos); if (new_state == old_state) { os_atomic_rmw_loop_give_up(return false); @@ -2174,7 +2174,7 @@ _dispatch_mach_reconnect_invoke(dispatch_mach_t dm, dispatch_object_t dou) dmsr->dmsr_send = dmsr_send; dmsr->dmsr_checkin = dmsr_checkin; } - (void)os_atomic_dec2o(dmsr, dmsr_disconnect_cnt, relaxed); + (void)os_atomic_dec(&dmsr->dmsr_disconnect_cnt, relaxed); } return disconnected; } @@ -2185,7 +2185,7 @@ dispatch_mach_reconnect(dispatch_mach_t dm, mach_port_t send, dispatch_mach_msg_t checkin) { dispatch_mach_send_refs_t dmsr = dm->dm_send_refs; - (void)os_atomic_inc2o(dmsr, dmsr_disconnect_cnt, relaxed); + (void)os_atomic_inc(&dmsr->dmsr_disconnect_cnt, relaxed); if (MACH_PORT_VALID(send) && checkin) { dispatch_mach_msg_t dmsg = checkin; dispatch_retain(dmsg); @@ -2664,8 +2664,8 @@ _dispatch_mach_install(dispatch_mach_t dm, dispatch_wlh_t wlh, dispatch_assert(!dm->ds_is_installed); dm->ds_is_installed = true; - uint32_t disconnect_cnt = os_atomic_load2o(dm->dm_send_refs, - dmsr_disconnect_cnt, relaxed); + uint32_t disconnect_cnt = os_atomic_load( + &dm->dm_send_refs->dmsr_disconnect_cnt, relaxed); if (unlikely(disconnect_cnt & DISPATCH_MACH_NEVER_CONNECTED)) { DISPATCH_CLIENT_CRASH(disconnect_cnt, "Channel never connected"); } diff --git a/src/object.c b/src/object.c index 67f2c1cd6..5400215af 100644 --- a/src/object.c +++ b/src/object.c @@ -123,7 +123,7 @@ bool _os_object_retain_weak(_os_object_t obj) { int xref_cnt, nxref_cnt; - os_atomic_rmw_loop2o(obj, os_obj_xref_cnt, xref_cnt, nxref_cnt, relaxed, { + os_atomic_rmw_loop(&obj->os_obj_xref_cnt, xref_cnt, nxref_cnt, relaxed, { if (unlikely(xref_cnt == _OS_OBJECT_GLOBAL_REFCNT)) { os_atomic_rmw_loop_give_up(return true); // global object } diff --git a/src/object.m b/src/object.m index 273c5fa3f..fbc1ddb52 100644 --- a/src/object.m +++ b/src/object.m @@ -398,6 +398,7 @@ - (void)_xref_dispose { DISPATCH_CLASS_IMPL(semaphore) DISPATCH_CLASS_IMPL(group) DISPATCH_CLASS_IMPL(workloop) +DISPATCH_CLASS_IMPL(queue_serial_executor) DISPATCH_CLASS_IMPL(queue_serial) DISPATCH_CLASS_IMPL(queue_concurrent) DISPATCH_CLASS_IMPL(queue_main) diff --git a/src/object_internal.h b/src/object_internal.h index f11b9c66c..50cd15906 100644 --- a/src/object_internal.h +++ b/src/object_internal.h @@ -32,6 +32,8 @@ #define OS_OBJECT_DECL_SUBCLASS(name, super) DISPATCH_DECL(name) #endif +#define DISPATCH_CLASS(name) OS_dispatch_##name + #if USE_OBJC #define OS_OBJECT_EXTRA_VTABLE_SYMBOL(name) _OS_##name##_vtable #define DISPATCH_CLASS_SYMBOL(name) OS_dispatch_##name##_class @@ -43,10 +45,9 @@ "__" OS_STRINGIFY(name) "_vtable" #define DISPATCH_CLASS_SYMBOL(name) _dispatch_##name##_vtable #define DISPATCH_CLASS_RAW_SYMBOL_NAME(name) \ - "__dispatch_" OS_STRINGIFY(name) "_vtable" + "__" OS_STRINGIFY(DISPATCH_CLASS(name)) "_vtable" #endif -#define DISPATCH_CLASS(name) OS_dispatch_##name #if USE_OBJC #define DISPATCH_OBJC_CLASS_DECL(name) \ extern void *DISPATCH_CLASS_SYMBOL(name) \ @@ -609,33 +610,32 @@ size_t _dispatch_objc_debug(dispatch_object_t dou, char* buf, size_t bufsiz); /* * Low level _os_atomic_refcnt_* actions * - * _os_atomic_refcnt_inc2o(o, f): + * _os_atomic_refcnt_inc(o, f): * performs a refcount increment and returns the new refcount value * - * _os_atomic_refcnt_dec2o(o, f): + * _os_atomic_refcnt_dec(o, f): * performs a refcount decrement and returns the new refcount value * - * _os_atomic_refcnt_dispose_barrier2o(o, f): + * _os_atomic_refcnt_dispose_barrier(o, f): * a barrier to perform prior to tearing down an object when the refcount * reached -1. */ -#define _os_atomic_refcnt_perform2o(o, f, op, n, m) ({ \ - __typeof__(o) _o = (o); \ - int _ref_cnt = os_atomic_load(&_o->f, relaxed); \ +#define _os_atomic_refcnt_perform(o, op, n, m) ({ \ + int _ref_cnt = os_atomic_load(o, relaxed); \ if (likely(_ref_cnt != _OS_OBJECT_GLOBAL_REFCNT)) { \ - _ref_cnt = os_atomic_##op##2o(_o, f, n, m); \ + _ref_cnt = os_atomic_##op(o, n, m); \ } \ _ref_cnt; \ }) -#define _os_atomic_refcnt_add_orig2o(o, m, n) \ - _os_atomic_refcnt_perform2o(o, m, add_orig, n, relaxed) +#define _os_atomic_refcnt_add_orig(o, n) \ + _os_atomic_refcnt_perform(o, add_orig, n, relaxed) -#define _os_atomic_refcnt_sub2o(o, m, n) \ - _os_atomic_refcnt_perform2o(o, m, sub, n, release) +#define _os_atomic_refcnt_sub(o, n) \ + _os_atomic_refcnt_perform(o, sub, n, release) -#define _os_atomic_refcnt_dispose_barrier2o(o, m) \ - (void)os_atomic_load2o(o, m, acquire) +#define _os_atomic_refcnt_dispose_barrier(o) \ + (void)os_atomic_load(o, acquire) /* @@ -655,22 +655,22 @@ size_t _dispatch_objc_debug(dispatch_object_t dou, char* buf, size_t bufsiz); * */ #define _os_object_xrefcnt_inc_orig(o) \ - _os_atomic_refcnt_add_orig2o(o, os_obj_xref_cnt, 1) + _os_atomic_refcnt_add_orig(&o->os_obj_xref_cnt, 1) #define _os_object_xrefcnt_dec(o) \ - _os_atomic_refcnt_sub2o(o, os_obj_xref_cnt, 1) + _os_atomic_refcnt_sub(&o->os_obj_xref_cnt, 1) #define _os_object_xrefcnt_dispose_barrier(o) \ - _os_atomic_refcnt_dispose_barrier2o(o, os_obj_xref_cnt) + _os_atomic_refcnt_dispose_barrier(&o->os_obj_xref_cnt) #define _os_object_refcnt_add_orig(o, n) \ - _os_atomic_refcnt_add_orig2o(o, os_obj_ref_cnt, n) + _os_atomic_refcnt_add_orig(&o->os_obj_ref_cnt, n) #define _os_object_refcnt_sub(o, n) \ - _os_atomic_refcnt_sub2o(o, os_obj_ref_cnt, n) + _os_atomic_refcnt_sub(&o->os_obj_ref_cnt, n) #define _os_object_refcnt_dispose_barrier(o) \ - _os_atomic_refcnt_dispose_barrier2o(o, os_obj_ref_cnt) + _os_atomic_refcnt_dispose_barrier(&o->os_obj_ref_cnt) void _os_object_atfork_child(void); void _os_object_atfork_parent(void); diff --git a/src/queue.c b/src/queue.c index 97542a344..82007a2c7 100644 --- a/src/queue.c +++ b/src/queue.c @@ -70,7 +70,7 @@ dispatch_assert_queue(dispatch_queue_t dq) DISPATCH_CLIENT_CRASH(metatype, "invalid queue passed to " "dispatch_assert_queue()"); } - uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + uint64_t dq_state = os_atomic_load(&dq->dq_state, relaxed); if (likely(_dq_state_drain_locked_by_self(dq_state))) { return; } @@ -89,7 +89,7 @@ dispatch_assert_queue_not(dispatch_queue_t dq) DISPATCH_CLIENT_CRASH(metatype, "invalid queue passed to " "dispatch_assert_queue_not()"); } - uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + uint64_t dq_state = os_atomic_load(&dq->dq_state, relaxed); if (unlikely(_dq_state_drain_locked_by_self(dq_state))) { _dispatch_assert_queue_fail(dq, false); } @@ -108,7 +108,7 @@ dispatch_assert_queue_barrier(dispatch_queue_t dq) } if (likely(dq->do_targetq)) { - uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + uint64_t dq_state = os_atomic_load(&dq->dq_state, relaxed); if (likely(_dq_state_is_in_barrier(dq_state))) { return; } @@ -148,7 +148,7 @@ _dispatch_set_priority_and_mach_voucher_slow(pthread_priority_t pp, pflags |= _PTHREAD_SET_SELF_QOS_FLAG; } uint64_t mgr_dq_state = - os_atomic_load2o(&_dispatch_mgr_q, dq_state, relaxed); + os_atomic_load(&_dispatch_mgr_q.dq_state, relaxed); if (unlikely(_dq_state_drain_locked_by_self(mgr_dq_state))) { DISPATCH_INTERNAL_CRASH(pp, "Changing the QoS while on the manager queue"); @@ -329,7 +329,7 @@ _dispatch_block_remember_async_queue(dispatch_block_private_data_t dbpd, // because dispatch_block_wait() will eagerly // consume the refcounts. _dispatch_retain_2(dq); - if (!os_atomic_cmpxchg2o(dbpd, dbpd_queue, NULL, dq, relaxed)) { + if (!os_atomic_cmpxchg(&dbpd->dbpd_queue, NULL, dq, relaxed)) { _dispatch_release_2(dq); } } @@ -498,7 +498,7 @@ _dispatch_block_invoke_direct(const struct dispatch_block_private_data_s *dbcpd) _dispatch_reset_priority_and_voucher(op, ov); out: if ((atomic_flags & DBF_PERFORM) == 0) { - if (os_atomic_inc2o(dbpd, dbpd_performed, relaxed) == 1) { + if (os_atomic_inc(&dbpd->dbpd_performed, relaxed) == 1) { dispatch_group_leave(dbpd->dbpd_group); } } @@ -525,13 +525,13 @@ _dispatch_block_sync_invoke(void *block) _dispatch_reset_voucher(ov, 0); out: if ((atomic_flags & DBF_PERFORM) == 0) { - if (os_atomic_inc2o(dbpd, dbpd_performed, relaxed) == 1) { + if (os_atomic_inc(&dbpd->dbpd_performed, relaxed) == 1) { dispatch_group_leave(dbpd->dbpd_group); } } dispatch_queue_t boost_dq; - boost_dq = os_atomic_xchg2o(dbpd, dbpd_queue, NULL, relaxed); + boost_dq = os_atomic_xchg(&dbpd->dbpd_queue, NULL, relaxed); if (boost_dq) { // balances dispatch_{,barrier_,}sync _dispatch_release_2(boost_dq); @@ -555,13 +555,13 @@ _dispatch_block_async_invoke2(dispatch_block_t b, unsigned long invoke_flags) dbpd->dbpd_block(); } if ((atomic_flags & DBF_PERFORM) == 0) { - if (os_atomic_inc2o(dbpd, dbpd_performed, relaxed) == 1) { + if (os_atomic_inc(&dbpd->dbpd_performed, relaxed) == 1) { dispatch_group_leave(dbpd->dbpd_group); } } dispatch_queue_t boost_dq; - boost_dq = os_atomic_xchg2o(dbpd, dbpd_queue, NULL, relaxed); + boost_dq = os_atomic_xchg(&dbpd->dbpd_queue, NULL, relaxed); if (boost_dq) { // balances dispatch_{,barrier_,group_}async _dispatch_release_2(boost_dq); @@ -592,7 +592,7 @@ dispatch_block_cancel(dispatch_block_t db) DISPATCH_CLIENT_CRASH(0, "Invalid block object passed to " "dispatch_block_cancel()"); } - (void)os_atomic_or2o(dbpd, dbpd_atomic_flags, DBF_CANCELED, relaxed); + (void)os_atomic_or(&dbpd->dbpd_atomic_flags, DBF_CANCELED, relaxed); } intptr_t @@ -615,7 +615,7 @@ dispatch_block_wait(dispatch_block_t db, dispatch_time_t timeout) "dispatch_block_wait()"); } - unsigned int flags = os_atomic_or_orig2o(dbpd, dbpd_atomic_flags, + unsigned int flags = os_atomic_or_orig(&dbpd->dbpd_atomic_flags, DBF_WAITING, relaxed); if (unlikely(flags & (DBF_WAITED | DBF_WAITING))) { DISPATCH_CLIENT_CRASH(flags, "A block object may not be waited for " @@ -629,7 +629,7 @@ dispatch_block_wait(dispatch_block_t db, dispatch_time_t timeout) pthread_priority_t pp = _dispatch_get_priority(); dispatch_queue_t boost_dq; - boost_dq = os_atomic_xchg2o(dbpd, dbpd_queue, NULL, relaxed); + boost_dq = os_atomic_xchg(&dbpd->dbpd_queue, NULL, relaxed); if (boost_dq) { // release balances dispatch_{,barrier_,group_}async. // Can't put the queue back in the timeout case: the block might @@ -646,7 +646,7 @@ dispatch_block_wait(dispatch_block_t db, dispatch_time_t timeout) _dispatch_thread_override_start(boost_th, pp, dbpd); } - int performed = os_atomic_load2o(dbpd, dbpd_performed, relaxed); + int performed = os_atomic_load(&dbpd->dbpd_performed, relaxed); if (unlikely(performed > 1 || (boost_th && boost_dq))) { DISPATCH_CLIENT_CRASH(performed, "A block object may not be both " "run more than once and waited for"); @@ -660,9 +660,9 @@ dispatch_block_wait(dispatch_block_t db, dispatch_time_t timeout) if (ret) { // timed out: reverse our changes - os_atomic_and2o(dbpd, dbpd_atomic_flags, ~DBF_WAITING, relaxed); + os_atomic_and(&dbpd->dbpd_atomic_flags, ~DBF_WAITING, relaxed); } else { - os_atomic_or2o(dbpd, dbpd_atomic_flags, DBF_WAITED, relaxed); + os_atomic_or(&dbpd->dbpd_atomic_flags, DBF_WAITED, relaxed); // don't need to re-test here: the second call would see // the first call's WAITING } @@ -679,7 +679,7 @@ dispatch_block_notify(dispatch_block_t db, dispatch_queue_t queue, DISPATCH_CLIENT_CRASH(db, "Invalid block object passed to " "dispatch_block_notify()"); } - int performed = os_atomic_load2o(dbpd, dbpd_performed, relaxed); + int performed = os_atomic_load(&dbpd->dbpd_performed, relaxed); if (unlikely(performed > 1)) { DISPATCH_CLIENT_CRASH(performed, "A block object may not be both " "run more than once and observed"); @@ -998,7 +998,7 @@ _dispatch_lane_non_barrier_complete(dispatch_lane_t dq, uint64_t old_state, new_state, owner_self = _dispatch_lock_value_for_self(); // see _dispatch_lane_resume() - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + os_atomic_rmw_loop(&dq->dq_state, old_state, new_state, relaxed, { new_state = old_state - DISPATCH_QUEUE_WIDTH_INTERVAL; if (unlikely(_dq_state_drain_locked(old_state))) { // make drain_try_unlock() fail and reconsider whether there's @@ -1102,7 +1102,7 @@ _dispatch_lane_barrier_sync_invoke_and_complete(dispatch_lane_t dq, dispatch_wakeup_flags_t flags = 0; // similar to _dispatch_queue_drain_try_unlock - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + os_atomic_rmw_loop(&dq->dq_state, old_state, new_state, release, { new_state = old_state - DISPATCH_QUEUE_SERIAL_DRAIN_OWNED; new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; @@ -1192,7 +1192,7 @@ _dispatch_non_barrier_waiter_redirect_or_wake(dispatch_lane_t dq, dispatch_assert(!(dsc->dc_flags & DC_FLAG_BARRIER)); again: - old_state = os_atomic_load2o(dq, dq_state, relaxed); + old_state = os_atomic_load(&dq->dq_state, relaxed); if (dsc->dsc_override_qos < _dq_state_max_qos(old_state)) { dsc->dsc_override_qos = (uint8_t)_dq_state_max_qos(old_state); @@ -1316,7 +1316,7 @@ _dispatch_lane_drain_barrier_waiter(dispatch_lane_t dq, next_dc = _dispatch_queue_pop_head(dq, dc); transfer_lock_again: - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + os_atomic_rmw_loop(&dq->dq_state, old_state, new_state, release, { if (unlikely(_dq_state_needs_ensure_ownership(old_state))) { _dispatch_event_loop_ensure_ownership((dispatch_wlh_t)dq); _dispatch_queue_move_to_contended_sync(dq->_as_dq); @@ -1333,8 +1333,8 @@ _dispatch_lane_drain_barrier_waiter(dispatch_lane_t dq, // we know there's a next item, keep the enqueued bit if any } else if (unlikely(_dq_state_is_dirty(old_state))) { os_atomic_rmw_loop_give_up({ - os_atomic_xor2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, acquire); - next_dc = os_atomic_load2o(dq, dq_items_head, relaxed); + os_atomic_xor(&dq->dq_state, DISPATCH_QUEUE_DIRTY, acquire); + next_dc = os_atomic_load(&dq->dq_items_head, relaxed); goto transfer_lock_again; }); } else { @@ -1371,7 +1371,7 @@ _dispatch_lane_class_barrier_complete(dispatch_lane_t dq, dispatch_qos_t qos, } again: - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + os_atomic_rmw_loop(&dq->dq_state, old_state, new_state, release, { if (unlikely(_dq_state_needs_ensure_ownership(old_state))) { _dispatch_event_loop_ensure_ownership((dispatch_wlh_t)dq); _dispatch_queue_move_to_contended_sync(dq->_as_dq); @@ -1393,7 +1393,7 @@ _dispatch_lane_class_barrier_complete(dispatch_lane_t dq, dispatch_qos_t qos, // what the enqueuer that set DIRTY has done. // the xor generates better assembly as DISPATCH_QUEUE_DIRTY // is already in a register - os_atomic_xor2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, acquire); + os_atomic_xor(&dq->dq_state, DISPATCH_QUEUE_DIRTY, acquire); flags |= DISPATCH_WAKEUP_BARRIER_COMPLETE; return dx_wakeup(dq, qos, flags); }); @@ -1470,7 +1470,7 @@ _dispatch_lane_drain_non_barriers(dispatch_lane_t dq, // see _dispatch_lane_drain, go in non barrier mode, and drain items - os_atomic_and2o(dq, dq_state, ~DISPATCH_QUEUE_IN_BARRIER, release); + os_atomic_and(&dq->dq_state, ~DISPATCH_QUEUE_IN_BARRIER, release); do { if (likely(owned_width)) { @@ -1500,7 +1500,7 @@ _dispatch_lane_drain_non_barriers(dispatch_lane_t dq, owned = _dispatch_queue_adjust_owned(dq, owned, dc); } - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + os_atomic_rmw_loop(&dq->dq_state, old_state, new_state, relaxed, { new_state = old_state - owned; new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; new_state &= ~DISPATCH_QUEUE_DIRTY; @@ -1515,8 +1515,8 @@ _dispatch_lane_drain_non_barriers(dispatch_lane_t dq, old_state, new_state, owner_self); } else if (unlikely(_dq_state_is_dirty(old_state))) { os_atomic_rmw_loop_give_up({ - os_atomic_xor2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, acquire); - next_dc = os_atomic_load2o(dq, dq_items_head, relaxed); + os_atomic_xor(&dq->dq_state, DISPATCH_QUEUE_DIRTY, acquire); + next_dc = os_atomic_load(&dq->dq_items_head, relaxed); goto drain_again; }); } @@ -1594,7 +1594,7 @@ _dispatch_wait_prepare(dispatch_queue_t dq) { uint64_t old_state, new_state; - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + os_atomic_rmw_loop(&dq->dq_state, old_state, new_state, relaxed, { if (_dq_state_is_suspended(old_state) || !_dq_state_is_base_wlh(old_state) || !_dq_state_in_uncontended_sync(old_state)) { @@ -1709,7 +1709,7 @@ _dispatch_barrier_trysync_or_async_f_complete(dispatch_lane_t dq, _dispatch_sync_function_invoke_inline(dq, ctxt, func); if (flags & DISPATCH_BARRIER_TRYSYNC_SUSPEND) { - uint64_t dq_state = os_atomic_sub2o(dq, dq_state, + uint64_t dq_state = os_atomic_sub(&dq->dq_state, DISPATCH_QUEUE_SUSPEND_INTERVAL, relaxed); if (!_dq_state_is_suspended(dq_state)) { wflags |= DISPATCH_WAKEUP_CONSUME_2; @@ -1973,7 +1973,7 @@ _dispatch_fake_wlh(dispatch_queue_t dq) { dispatch_wlh_t new_wlh = DISPATCH_WLH_ANON; if (likely(dx_metatype(dq) == _DISPATCH_WORKLOOP_TYPE) || - _dq_state_is_base_wlh(os_atomic_load2o(dq, dq_state, relaxed))) { + _dq_state_is_base_wlh(os_atomic_load(&dq->dq_state, relaxed))) { new_wlh = (dispatch_wlh_t)dq; } dispatch_wlh_t old_wlh = _dispatch_get_wlh(); @@ -2093,7 +2093,7 @@ static inline bool _dispatch_async_and_wait_recurse_one(dispatch_queue_t dq, dispatch_sync_context_t dsc, dispatch_tid tid, uintptr_t dc_flags) { - uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + uint64_t dq_state = os_atomic_load(&dq->dq_state, relaxed); if (unlikely(_dispatch_async_and_wait_should_always_async(dq, dq_state))) { // We want to async away the dsc which means that we will go through case // (1) of _dispatch_async_and_wait_f_slow. @@ -2320,7 +2320,7 @@ _dispatch_queue_init_specific(dispatch_queue_t dq) dqsh = _dispatch_calloc(1, sizeof(struct dispatch_queue_specific_head_s)); TAILQ_INIT(&dqsh->dqsh_entries); - if (unlikely(!os_atomic_cmpxchg2o(dq, dq_specific_head, + if (unlikely(!os_atomic_cmpxchg(&dq->dq_specific_head, NULL, dqsh, release))) { _dispatch_queue_specific_head_dispose(dqsh); } @@ -2499,7 +2499,7 @@ _dispatch_lane_inherit_wlh_from_target(dispatch_lane_t dq, dispatch_queue_t tq) role = DISPATCH_QUEUE_ROLE_BASE_ANON; } - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + os_atomic_rmw_loop(&dq->dq_state, old_state, new_state, relaxed, { new_state = old_state & ~DISPATCH_QUEUE_ROLE_MASK; new_state |= role; if (old_state == new_state) { @@ -2905,11 +2905,11 @@ _dispatch_queue_dispose(dispatch_queue_class_t dqu, bool *allow_free) if (dq->dq_label && _dispatch_queue_label_needs_free(dq)) { free((void*)dq->dq_label); } - dqsh = os_atomic_xchg2o(dq, dq_specific_head, (void *)0x200, relaxed); + dqsh = os_atomic_xchg(&dq->dq_specific_head, (void *)0x200, relaxed); if (dqsh) _dispatch_queue_specific_head_dispose(dqsh); // fast path for queues that never got their storage retained - if (likely(os_atomic_load2o(dq, dq_sref_cnt, relaxed) == 0)) { + if (likely(os_atomic_load(&dq->dq_sref_cnt, relaxed) == 0)) { // poison the state with something that is suspended and is easy to spot dq->dq_state = 0xdead000000000000; return; @@ -2941,7 +2941,7 @@ _dispatch_lane_class_dispose(dispatch_lane_class_t dqu, bool *allow_free) dq->dq_items_tail = (void *)0x200; uint64_t orig_dq_state, dq_state; - dq_state = orig_dq_state = os_atomic_load2o(dq, dq_state, relaxed); + dq_state = orig_dq_state = os_atomic_load(&dq->dq_state, relaxed); uint64_t initial_state = DISPATCH_QUEUE_STATE_INIT_VALUE(dq->dq_width); if (dx_hastypeflag(dq, QUEUE_ROOT)) { @@ -2953,7 +2953,7 @@ _dispatch_lane_class_dispose(dispatch_lane_class_t dqu, bool *allow_free) if (unlikely(dq_state != initial_state)) { if (_dq_state_drain_locked(dq_state)) { DISPATCH_CLIENT_CRASH((uintptr_t)orig_dq_state, - "Release of a locked queue"); + "Premature release of a locked queue"); } #if DISPATCH_SIZEOF_PTR == 4 orig_dq_state >>= 32; @@ -2975,7 +2975,7 @@ _dispatch_lane_dispose(dispatch_lane_t dq, bool *allow_free) void _dispatch_queue_xref_dispose(dispatch_queue_t dq) { - uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + uint64_t dq_state = os_atomic_load(&dq->dq_state, relaxed); if (unlikely(_dq_state_is_suspended(dq_state))) { long state = (long)dq_state; if (sizeof(long) < sizeof(uint64_t)) state = (long)(dq_state >> 32); @@ -2985,7 +2985,7 @@ _dispatch_queue_xref_dispose(dispatch_queue_t dq) } DISPATCH_CLIENT_CRASH(dq_state, "Release of a suspended object"); } - os_atomic_or2o(dq, dq_atomic_flags, DQF_RELEASED, relaxed); + os_atomic_or(&dq->dq_atomic_flags, DQF_RELEASED, relaxed); } DISPATCH_NOINLINE @@ -3005,7 +3005,7 @@ _dispatch_lane_suspend_slow(dispatch_lane_t dq) delta -= DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT; } - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + os_atomic_rmw_loop(&dq->dq_state, old_state, new_state, relaxed, { // unsigned underflow of the substraction can happen because other // threads could have touched this value while we were trying to acquire // the lock, or because another thread raced us to do the same operation @@ -3030,7 +3030,7 @@ _dispatch_lane_suspend(dispatch_lane_t dq) { uint64_t old_state, new_state; - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + os_atomic_rmw_loop(&dq->dq_state, old_state, new_state, relaxed, { new_state = DISPATCH_QUEUE_SUSPEND_INTERVAL; if (unlikely(os_add_overflow(old_state, new_state, &new_state))) { os_atomic_rmw_loop_give_up({ @@ -3066,7 +3066,7 @@ _dispatch_lane_resume_slow(dispatch_lane_t dq) delta -= DISPATCH_QUEUE_HAS_SIDE_SUSPEND_CNT; break; } - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + os_atomic_rmw_loop(&dq->dq_state, old_state, new_state, relaxed, { // unsigned overflow of the addition can happen because other // threads could have touched this value while we were trying to acquire // the lock, or because another thread raced us to do the same operation @@ -3131,7 +3131,7 @@ _dispatch_lane_resume(dispatch_lane_t dq, dispatch_resume_op_t op) if (op == DISPATCH_ACTIVATE) { // relaxed atomic because this doesn't publish anything, this is only // about picking the thread that gets to finalize the activation - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + os_atomic_rmw_loop(&dq->dq_state, old_state, new_state, relaxed, { if (!_dq_state_is_inactive(old_state)) { // object already active or activated os_atomic_rmw_loop_give_up(return); @@ -3148,7 +3148,7 @@ _dispatch_lane_resume(dispatch_lane_t dq, dispatch_resume_op_t op) }); } else if (op == DISPATCH_ACTIVATION_DONE) { // release barrier needed to publish the effect of dq_activate() - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + os_atomic_rmw_loop(&dq->dq_state, old_state, new_state, release, { if (unlikely(!(old_state & DISPATCH_QUEUE_INACTIVE_BITS_MASK))) { os_atomic_rmw_loop_give_up({ // object activation was already concurrently done @@ -3184,7 +3184,7 @@ _dispatch_lane_resume(dispatch_lane_t dq, dispatch_resume_op_t op) // release barrier needed to publish the effect of // - dispatch_set_target_queue() // - dispatch_set_*_handler() - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + os_atomic_rmw_loop(&dq->dq_state, old_state, new_state, release, { new_state = old_state; if (is_source && (old_state & suspend_bits) == DISPATCH_QUEUE_INACTIVE) { @@ -3320,7 +3320,7 @@ _dispatch_lane_set_width(void *ctxt) } dispatch_queue_flags_t old_dqf, new_dqf; - os_atomic_rmw_loop2o(dq, dq_atomic_flags, old_dqf, new_dqf, relaxed, { + os_atomic_rmw_loop(&dq->dq_atomic_flags, old_dqf, new_dqf, relaxed, { new_dqf = (old_dqf & DQF_FLAGS_MASK) | DQF_WIDTH(tmp); }); _dispatch_lane_inherit_wlh_from_target(dq, dq->do_targetq); @@ -3454,7 +3454,7 @@ _dispatch_queue_debug_attr(dispatch_queue_t dq, char* buf, size_t bufsiz) size_t offset = 0; dispatch_queue_t target = dq->do_targetq; const char *tlabel = target && target->dq_label ? target->dq_label : ""; - uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + uint64_t dq_state = os_atomic_load(&dq->dq_state, relaxed); offset += dsnprintf(&buf[offset], bufsiz - offset, "sref = %d, " "target = %s[%p], width = 0x%x, state = 0x%016llx", @@ -3804,7 +3804,7 @@ _dispatch_lane_drain(dispatch_lane_t dq, dispatch_invoke_context_t dic, // effect visible to other sync work items on other threads // that may start coming in after this point, hence the // release barrier - os_atomic_xor2o(dq, dq_state, owned, release); + os_atomic_xor(&dq->dq_state, owned, release); owned = dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL; } else if (unlikely(owned == 0)) { if (_dispatch_object_is_waiter(dc)) { @@ -3910,7 +3910,7 @@ _dispatch_queue_invoke_finish(dispatch_queue_t dq, if (tq == DISPATCH_QUEUE_WAKEUP_MGR) { enqueued = DISPATCH_QUEUE_ENQUEUED_ON_MGR; } - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + os_atomic_rmw_loop(&dq->dq_state, old_state, new_state, release, { new_state = old_state - owned; new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; new_state |= DISPATCH_QUEUE_DIRTY; @@ -4232,7 +4232,7 @@ _dispatch_workloop_activate_simulator_fallback(dispatch_workloop_t dwl, _dispatch_retain(dprq); dispatch_release(dprq); - os_atomic_rmw_loop2o(dwl, dq_state, old_state, new_state, relaxed, { + os_atomic_rmw_loop(&dwl->dq_state, old_state, new_state, relaxed, { new_state = old_state & ~DISPATCH_QUEUE_ROLE_MASK; new_state |= DISPATCH_QUEUE_ROLE_BASE_ANON; }); @@ -4304,7 +4304,7 @@ _dispatch_workloop_activate_attributes(dispatch_workloop_t dwl) void _dispatch_workloop_dispose(dispatch_workloop_t dwl, bool *allow_free) { - uint64_t dq_state = os_atomic_load2o(dwl, dq_state, relaxed); + uint64_t dq_state = os_atomic_load(&dwl->dq_state, relaxed); uint64_t initial_state = DISPATCH_QUEUE_STATE_INIT_VALUE(1); initial_state |= _dispatch_workloop_role_bits(); @@ -4358,7 +4358,7 @@ _dispatch_workloop_activate(dispatch_workloop_t dwl) // This transitions either: // - from INACTIVE to ACTIVATING // - or from ACTIVE to ACTIVE - uint64_t old_state = os_atomic_and_orig2o(dwl, dq_state, + uint64_t old_state = os_atomic_and_orig(&dwl->dq_state, ~DISPATCH_QUEUE_ACTIVATED, relaxed); if (likely(_dq_state_is_inactive(old_state))) { @@ -4373,7 +4373,7 @@ _dispatch_workloop_activate(dispatch_workloop_t dwl) _dispatch_priority_make_fallback(DISPATCH_QOS_DEFAULT); } dwl->dq_priority |= DISPATCH_PRIORITY_FLAG_OVERCOMMIT; - os_atomic_and2o(dwl, dq_state, ~DISPATCH_QUEUE_ACTIVATING, relaxed); + os_atomic_and(&dwl->dq_state, ~DISPATCH_QUEUE_ACTIVATING, relaxed); return _dispatch_workloop_wakeup(dwl, 0, DISPATCH_WAKEUP_CONSUME_2); } } @@ -4385,14 +4385,14 @@ _dispatch_workloop_try_lower_max_qos(dispatch_workloop_t dwl, { uint64_t old_state, new_state, qos_bits = _dq_state_from_qos(qos); - os_atomic_rmw_loop2o(dwl, dq_state, old_state, new_state, relaxed, { + os_atomic_rmw_loop(&dwl->dq_state, old_state, new_state, relaxed, { if ((old_state & DISPATCH_QUEUE_MAX_QOS_MASK) <= qos_bits) { os_atomic_rmw_loop_give_up(return true); } if (unlikely(_dq_state_is_dirty(old_state))) { os_atomic_rmw_loop_give_up({ - os_atomic_xor2o(dwl, dq_state, DISPATCH_QUEUE_DIRTY, acquire); + os_atomic_xor(&dwl->dq_state, DISPATCH_QUEUE_DIRTY, acquire); return false; }); } @@ -4510,7 +4510,7 @@ _dispatch_workloop_drain_barrier_waiter(dispatch_workloop_t dwl, has_more_work = _dispatch_workloop_probe(dwl); } - os_atomic_rmw_loop2o(dwl, dq_state, old_state, new_state, release, { + os_atomic_rmw_loop(&dwl->dq_state, old_state, new_state, release, { if (unlikely(_dq_state_needs_ensure_ownership(old_state))) { _dispatch_event_loop_ensure_ownership((dispatch_wlh_t)dwl); _dispatch_queue_move_to_contended_sync(dwl->_as_dq); @@ -4526,7 +4526,7 @@ _dispatch_workloop_drain_barrier_waiter(dispatch_workloop_t dwl, // we know there's a next item, keep the enqueued bit if any } else if (unlikely(_dq_state_is_dirty(old_state))) { os_atomic_rmw_loop_give_up({ - os_atomic_xor2o(dwl, dq_state, DISPATCH_QUEUE_DIRTY, acquire); + os_atomic_xor(&dwl->dq_state, DISPATCH_QUEUE_DIRTY, acquire); goto transfer_lock_again; }); } else { @@ -4573,7 +4573,7 @@ _dispatch_workloop_barrier_complete(dispatch_workloop_t dwl, dispatch_qos_t qos, uint64_t old_state, new_state; transfer_lock_again: - os_atomic_rmw_loop2o(dwl, dq_state, old_state, new_state, release, { + os_atomic_rmw_loop(&dwl->dq_state, old_state, new_state, release, { if (unlikely(_dq_state_needs_ensure_ownership(old_state))) { _dispatch_event_loop_ensure_ownership((dispatch_wlh_t)dwl); _dispatch_queue_move_to_contended_sync(dwl->_as_dq); @@ -4591,7 +4591,7 @@ _dispatch_workloop_barrier_complete(dispatch_workloop_t dwl, dispatch_qos_t qos, // what the enqueuer that set DIRTY has done. // the xor generates better assembly as DISPATCH_QUEUE_DIRTY // is already in a register - os_atomic_xor2o(dwl, dq_state, DISPATCH_QUEUE_DIRTY, acquire); + os_atomic_xor(&dwl->dq_state, DISPATCH_QUEUE_DIRTY, acquire); goto again; }); } else if (likely(_dq_state_is_base_wlh(old_state))) { @@ -4709,7 +4709,7 @@ _dispatch_workloop_wakeup(dispatch_workloop_t dwl, dispatch_qos_t qos, uint64_t old_state, new_state; - os_atomic_rmw_loop2o(dwl, dq_state, old_state, new_state, release, { + os_atomic_rmw_loop(&dwl->dq_state, old_state, new_state, release, { new_state = _dq_state_merge_qos(old_state, qos); if (_dq_state_max_qos(new_state)) { // We need to make sure we have the enqueued bit when we are making @@ -4783,7 +4783,7 @@ _dispatch_workloop_push_waiter(dispatch_workloop_t dwl, DISPATCH_QUEUE_UNCONTENDED_SYNC; uint64_t old_state, new_state; - os_atomic_rmw_loop2o(dwl, dq_state, old_state, new_state, release, { + os_atomic_rmw_loop(&dwl->dq_state, old_state, new_state, release, { new_state = _dq_state_merge_qos(old_state, qos); new_state |= DISPATCH_QUEUE_DIRTY; if (unlikely(_dq_state_drain_locked(old_state))) { @@ -5163,7 +5163,7 @@ _dispatch_queue_wakeup(dispatch_queue_class_t dqu, dispatch_qos_t qos, enqueue = DISPATCH_QUEUE_ENQUEUED_ON_MGR; } qos = _dispatch_queue_wakeup_qos(dq, qos); - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + os_atomic_rmw_loop(&dq->dq_state, old_state, new_state, release, { new_state = _dq_state_merge_qos(old_state, qos); if (flags & DISPATCH_WAKEUP_CLEAR_ACTIVATING) { // When an event is being delivered to a source because its @@ -5200,7 +5200,7 @@ _dispatch_queue_wakeup(dispatch_queue_class_t dqu, dispatch_qos_t qos, // // Someone is trying to override the last work item of the queue. // - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + os_atomic_rmw_loop(&dq->dq_state, old_state, new_state, relaxed, { // Avoid spurious override if the item was drained before we could // apply an override if (!_dq_state_drain_locked(old_state) && @@ -5288,7 +5288,7 @@ _dispatch_queue_wakeup(dispatch_queue_class_t dqu, dispatch_qos_t qos, // so instead use depdendency ordering to read // the targetq pointer. os_atomic_thread_fence(dependency); - tq = os_atomic_load_with_dependency_on2o(dq, do_targetq, + tq = os_atomic_load_with_dependency_on(&dq->do_targetq, (long)new_state); } else { tq = target; @@ -5335,7 +5335,7 @@ _dispatch_lane_push_waiter_should_wakeup(dispatch_lane_t dq, return true; } if (dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) { - uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + uint64_t dq_state = os_atomic_load(&dq->dq_state, relaxed); return _dispatch_async_and_wait_should_always_async(dq, dq_state); } return false; @@ -5369,7 +5369,7 @@ _dispatch_lane_push_waiter(dispatch_lane_t dq, dispatch_sync_context_t dsc, _dispatch_lock_value_for_self() | DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER | DISPATCH_QUEUE_UNCONTENDED_SYNC; - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + os_atomic_rmw_loop(&dq->dq_state, old_state, new_state, release, { new_state = _dq_state_merge_qos(old_state, qos); new_state |= DISPATCH_QUEUE_DIRTY; if (unlikely(_dq_state_drain_locked(old_state) || @@ -5415,7 +5415,7 @@ _dispatch_lane_push_waiter(dispatch_lane_t dq, dispatch_sync_context_t dsc, } } } else if (unlikely(qos)) { - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + os_atomic_rmw_loop(&dq->dq_state, old_state, new_state, relaxed, { new_state = _dq_state_merge_qos(old_state, qos); if (old_state == new_state) { os_atomic_rmw_loop_give_up(return); @@ -6070,13 +6070,13 @@ _dispatch_mgr_priority_raise(const pthread_attr_t *attr) (void)pthread_attr_get_qos_class_np((pthread_attr_t *)attr, &qos, NULL); if (qos) { param.sched_priority = _dispatch_mgr_sched_qos2prio(qos); - os_atomic_rmw_loop2o(&_dispatch_mgr_sched, qos, q, qos, relaxed, { + os_atomic_rmw_loop(&_dispatch_mgr_sched.qos, q, qos, relaxed, { if (q >= qos) os_atomic_rmw_loop_give_up(break); }); } #endif int p, prio = param.sched_priority; - os_atomic_rmw_loop2o(&_dispatch_mgr_sched, prio, p, prio, relaxed, { + os_atomic_rmw_loop(&_dispatch_mgr_sched.prio, p, prio, relaxed, { if (p >= prio) os_atomic_rmw_loop_give_up(return); }); #if DISPATCH_USE_KEVENT_WORKQUEUE @@ -6117,7 +6117,7 @@ _dispatch_queue_mgr_lock(struct dispatch_queue_static_s *dq) uint64_t old_state, new_state, set_owner_and_set_full_width = _dispatch_lock_value_for_self() | DISPATCH_QUEUE_SERIAL_DRAIN_OWNED; - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, { + os_atomic_rmw_loop(&dq->dq_state, old_state, new_state, acquire, { new_state = old_state; if (unlikely(!_dq_state_is_runnable(old_state) || _dq_state_drain_locked(old_state))) { @@ -6135,7 +6135,7 @@ static inline bool _dispatch_queue_mgr_unlock(struct dispatch_queue_static_s *dq) { uint64_t old_state, new_state; - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, { + os_atomic_rmw_loop(&dq->dq_state, old_state, new_state, release, { new_state = old_state - DISPATCH_QUEUE_SERIAL_DRAIN_OWNED; new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK; new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK; @@ -6186,7 +6186,7 @@ _dispatch_mgr_queue_push(dispatch_lane_t dq, dispatch_object_t dou, } if (unlikely(_dispatch_queue_push_item(dq, dou))) { - dq_state = os_atomic_or2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, release); + dq_state = os_atomic_or(&dq->dq_state, DISPATCH_QUEUE_DIRTY, release); if (!_dq_state_drain_locked_by_self(dq_state)) { _dispatch_trace_runtime_event(worker_request, &_dispatch_mgr_q, 1); _dispatch_event_loop_poke(DISPATCH_WLH_MANAGER, 0, 0); @@ -6580,9 +6580,9 @@ _dispatch_root_queue_poke_slow(dispatch_queue_global_t dq, int n, int floor) bool overcommit = dq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT; if (overcommit) { - os_atomic_add2o(dq, dgq_pending, remaining, relaxed); + os_atomic_add(&dq->dgq_pending, remaining, relaxed); } else { - if (!os_atomic_cmpxchg2o(dq, dgq_pending, 0, remaining, relaxed)) { + if (!os_atomic_cmpxchg(&dq->dgq_pending, 0, remaining, relaxed)) { _dispatch_root_queue_debug("worker thread request still pending for " "global queue: %p", dq); return; @@ -6591,13 +6591,13 @@ _dispatch_root_queue_poke_slow(dispatch_queue_global_t dq, int n, int floor) int can_request, t_count; // seq_cst with atomic store to tail - t_count = os_atomic_load2o(dq, dgq_thread_pool_size, ordered); + t_count = os_atomic_load(&dq->dgq_thread_pool_size, ordered); do { can_request = t_count < floor ? 0 : t_count - floor; if (remaining > can_request) { _dispatch_root_queue_debug("pthread pool reducing request from %d to %d", remaining, can_request); - os_atomic_sub2o(dq, dgq_pending, remaining - can_request, relaxed); + os_atomic_sub(&dq->dgq_pending, remaining - can_request, relaxed); remaining = can_request; } if (remaining == 0) { @@ -6605,7 +6605,7 @@ _dispatch_root_queue_poke_slow(dispatch_queue_global_t dq, int n, int floor) "%p", dq); return; } - } while (!os_atomic_cmpxchgv2o(dq, dgq_thread_pool_size, t_count, + } while (!os_atomic_cmpxchgv(&dq->dgq_thread_pool_size, t_count, t_count - remaining, &t_count, acquire)); #if !defined(_WIN32) @@ -6666,7 +6666,7 @@ _dispatch_root_queue_poke(dispatch_queue_global_t dq, int n, int floor) dx_type(dq) == DISPATCH_QUEUE_COOPERATIVE_ROOT_TYPE)) #endif { - if (unlikely(!os_atomic_cmpxchg2o(dq, dgq_pending, 0, n, release))) { + if (unlikely(!os_atomic_cmpxchg(&dq->dgq_pending, 0, n, release))) { _dispatch_root_queue_debug("worker thread request still pending " "for global queue: %p", dq); return; @@ -6704,7 +6704,7 @@ _dispatch_root_queue_poke_and_wakeup(dispatch_queue_global_t dq, int n, int floo // dgq_pending does NOT order with a store to dgq_pending if the CAS fails // which is why we do an unconditional store. int old_pending, new_pending; - os_atomic_rmw_loop2o(dq, dgq_pending, old_pending, new_pending, release, { + os_atomic_rmw_loop(&dq->dgq_pending, old_pending, new_pending, release, { new_pending = old_pending ?: n; }); if (old_pending > 0) { @@ -6733,7 +6733,7 @@ enum { static int _dispatch_root_queue_mediator_is_gone(dispatch_queue_global_t dq) { - return os_atomic_load2o(dq, dq_items_head, relaxed) != + return os_atomic_load(&dq->dq_items_head, relaxed) != DISPATCH_ROOT_QUEUE_MEDIATOR; } @@ -6742,8 +6742,8 @@ _dispatch_root_queue_head_tail_quiesced(dispatch_queue_global_t dq) { // Wait for queue head and tail to be both non-empty or both empty struct dispatch_object_s *head, *tail; - head = os_atomic_load2o(dq, dq_items_head, relaxed); - tail = os_atomic_load2o(dq, dq_items_tail, relaxed); + head = os_atomic_load(&dq->dq_items_head, relaxed); + tail = os_atomic_load(&dq->dq_items_tail, relaxed); if ((head == NULL) == (tail == NULL)) { if (tail == NULL) { // // This is the case of head and tail both being NULL -- queue is empty. @@ -6776,7 +6776,7 @@ __DISPATCH_ROOT_QUEUE_CONTENDED_WAIT__(dispatch_queue_global_t dq, } // Since we have serious contention, we need to back off. if (!pending) { - (void)os_atomic_inc2o(dq, dgq_pending, release); + (void)os_atomic_inc(&dq->dgq_pending, release); pending = true; } _dispatch_contention_usleep(sleep_time); @@ -6792,7 +6792,7 @@ __DISPATCH_ROOT_QUEUE_CONTENDED_WAIT__(dispatch_queue_global_t dq, _dispatch_debug("contention on global queue: %p", dq); out: if (pending) { - (void)os_atomic_dec2o(dq, dgq_pending, acquire); + (void)os_atomic_dec(&dq->dgq_pending, acquire); // Make sure to resample the queue post-decrement to make sure that we are // seeing latest updates. We can use relaxed loads on the queue probe and // piggyback on the acquire dec of dgq_pending. @@ -6818,7 +6818,7 @@ _dispatch_root_queue_drain_one(dispatch_queue_global_t dq) start: // The MEDIATOR value acts both as a "lock" and a signal - head = os_atomic_xchg2o(dq, dq_items_head, + head = os_atomic_xchg(&dq->dq_items_head, DISPATCH_ROOT_QUEUE_MEDIATOR, relaxed); // Queue head was empty, check to see if we are racing with concurrent @@ -6828,7 +6828,7 @@ _dispatch_root_queue_drain_one(dispatch_queue_global_t dq) // The first xchg on the tail will tell the enqueueing thread that it // is safe to blindly write out to the head pointer. A cmpxchg honors // the algorithm. - if (unlikely(!os_atomic_cmpxchg2o(dq, dq_items_head, + if (unlikely(!os_atomic_cmpxchg(&dq->dq_items_head, DISPATCH_ROOT_QUEUE_MEDIATOR, NULL, relaxed))) { // We raced with concurrent enqueuer who made queue non-empty who // overwrote our mediator value in head. Enqueuer has succeeded in setting @@ -6866,11 +6866,11 @@ _dispatch_root_queue_drain_one(dispatch_queue_global_t dq) next = head->do_next; if (unlikely(!next)) { - os_atomic_store2o(dq, dq_items_head, NULL, relaxed); + os_atomic_store(&dq->dq_items_head, NULL, relaxed); // 22708742: set tail to NULL with release, so that NULL write to head above // doesn't clobber head from concurrent enqueuer ie - if the CAS succeeds, // someone else must also see the head as NULL. - if (os_atomic_cmpxchg2o(dq, dq_items_tail, head, NULL, release)) { + if (os_atomic_cmpxchg(&dq->dq_items_tail, head, NULL, release)) { // both head and tail are NULL now goto out; } @@ -6878,7 +6878,7 @@ _dispatch_root_queue_drain_one(dispatch_queue_global_t dq) next = os_mpsc_get_next(head, do_next, &dq->dq_items_tail); } - os_atomic_store2o(dq, dq_items_head, next, relaxed); + os_atomic_store(&dq->dq_items_head, next, relaxed); _dispatch_root_queue_poke(dq, 1, 0); out: return head; @@ -6944,7 +6944,7 @@ _dispatch_root_queue_drain_deferred_wlh(dispatch_deferred_items_t ddi // Take over that +1, and add our own to make the +2 this loop expects, // and drain again. // - dq_state = os_atomic_load2o(dq, dq_state, relaxed); + dq_state = os_atomic_load(&dq->dq_state, relaxed); if (unlikely(!_dq_state_is_base_wlh(dq_state))) { // rdar://32671286 goto park; } @@ -7108,7 +7108,7 @@ _dispatch_worker_thread2(pthread_priority_t pp) _dispatch_introspection_thread_add(); _dispatch_trace_runtime_event(worker_unpark, dq, 0); - int pending = os_atomic_dec2o(dq, dgq_pending, acquire); + int pending = os_atomic_dec(&dq->dgq_pending, acquire); dispatch_assert(pending >= 0); invoke_flags |= DISPATCH_INVOKE_WORKER_DRAIN | DISPATCH_INVOKE_REDIRECTING_DRAIN; @@ -7164,7 +7164,7 @@ _dispatch_worker_thread(void *context) dispatch_queue_global_t dq = context; dispatch_pthread_root_queue_context_t pqc = dq->do_ctxt; - int pending = os_atomic_dec2o(dq, dgq_pending, acquire); + int pending = os_atomic_dec(&dq->dgq_pending, acquire); if (unlikely(pending < 0)) { DISPATCH_INTERNAL_CRASH(pending, "Pending thread request underflow"); } @@ -7228,7 +7228,7 @@ _dispatch_worker_thread(void *context) #if DISPATCH_USE_INTERNAL_WORKQUEUE if (monitored) _dispatch_workq_worker_unregister(dq); #endif - (void)os_atomic_inc2o(dq, dgq_thread_pool_size, release); + (void)os_atomic_inc(&dq->dgq_thread_pool_size, release); _dispatch_root_queue_poke(dq, 1, 0); _dispatch_release(dq); // retained in _dispatch_root_queue_poke_slow return NULL; @@ -7401,7 +7401,7 @@ _dispatch_queue_is_exclusively_owned_by_current_thread_4IOHID( if (dq->dq_width != 1) { DISPATCH_CLIENT_CRASH(dq->dq_width, "Invalid queue type"); } - uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + uint64_t dq_state = os_atomic_load(&dq->dq_state, relaxed); return _dq_state_drain_locked_by_self(dq_state); } #endif @@ -7644,7 +7644,7 @@ _dispatch_runloop_queue_poke(dispatch_lane_t dq, dispatch_qos_t qos, } qos = _dispatch_queue_wakeup_qos(dq, qos); - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, { + os_atomic_rmw_loop(&dq->dq_state, old_state, new_state, relaxed, { new_state = _dq_state_merge_qos(old_state, qos); if (old_state == new_state) { os_atomic_rmw_loop_give_up(goto no_change); @@ -7673,7 +7673,7 @@ _dispatch_runloop_queue_reset_max_qos(dispatch_lane_t dq) { uint64_t old_state, clear_bits = DISPATCH_QUEUE_MAX_QOS_MASK | DISPATCH_QUEUE_RECEIVED_OVERRIDE; - old_state = os_atomic_and_orig2o(dq, dq_state, ~clear_bits, relaxed); + old_state = os_atomic_and_orig(&dq->dq_state, ~clear_bits, relaxed); return _dq_state_max_qos(old_state); } @@ -7681,14 +7681,33 @@ void _dispatch_runloop_queue_wakeup(dispatch_lane_t dq, dispatch_qos_t qos, dispatch_wakeup_flags_t flags) { + uint64_t old_state; + if (unlikely(_dispatch_queue_atomic_flags(dq) & DQF_RELEASED)) { // return _dispatch_lane_wakeup(dq, qos, flags); } if (flags & DISPATCH_WAKEUP_MAKE_DIRTY) { - os_atomic_or2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, release); + /* + * rdar://81886582 : Enqueuer making main queue non-empty could race with + * main thread trying to exit in dispatch_main. Drain lock tells us how + * far the main thread has come along. + * Use of release memory ordering to make sure update to tail pointer is + * visible. + */ + old_state = os_atomic_or_orig(&dq->dq_state, + DISPATCH_QUEUE_DIRTY, release); + if(unlikely(!_dq_state_drain_locked(old_state))) { + /* + * Main thread has successfully released the drain lock in + * _dispatch_lane_barrier_complete and is not going to see the dirty + * bit we just set. + */ + return _dispatch_lane_wakeup(dq, qos, flags); + } } + if (_dispatch_queue_class_probe(dq)) { return _dispatch_runloop_queue_poke(dq, qos, flags); } @@ -7712,7 +7731,7 @@ static void _dispatch_main_queue_update_priority_from_thread(void) { dispatch_queue_main_t dq = &_dispatch_main_q; - uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + uint64_t dq_state = os_atomic_load(&dq->dq_state, relaxed); mach_port_t owner = _dq_state_drain_owner(dq_state); dispatch_priority_t main_pri = @@ -7756,7 +7775,7 @@ _dispatch_main_queue_drain(dispatch_queue_main_t dq) DISPATCH_CLIENT_CRASH(0, "_dispatch_main_queue_callback_4CF called" " after dispatch_main()"); } - uint64_t dq_state = os_atomic_load2o(dq, dq_state, relaxed); + uint64_t dq_state = os_atomic_load(&dq->dq_state, relaxed); if (unlikely(!_dq_state_drain_locked_by_self(dq_state))) { DISPATCH_CLIENT_CRASH((uintptr_t)dq_state, "_dispatch_main_queue_callback_4CF called" @@ -8068,7 +8087,7 @@ _dispatch_queue_cleanup2(void) // way, because he still believes the queue to be thread-bound, but the // dirty bit will force this codepath to notice the enqueue, and the usual // lock transfer will do the proper wakeup. - os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, { + os_atomic_rmw_loop(&dq->dq_state, old_state, new_state, acquire, { new_state = old_state & ~DISPATCH_QUEUE_DIRTY; new_state += DISPATCH_QUEUE_WIDTH_INTERVAL; new_state += DISPATCH_QUEUE_IN_BARRIER; diff --git a/src/queue_internal.h b/src/queue_internal.h index d20da86cc..19c519a2a 100644 --- a/src/queue_internal.h +++ b/src/queue_internal.h @@ -775,7 +775,8 @@ dispatch_assert_valid_lane_type(dispatch_queue_pthread_root_s); DISPATCH_CLASS_DECL(queue, QUEUE); DISPATCH_CLASS_DECL_BARE(lane, QUEUE); DISPATCH_CLASS_DECL(workloop, QUEUE); -DISPATCH_SUBCLASS_DECL(queue_serial, queue, lane); +DISPATCH_SUBCLASS_DECL(queue_serial_executor, queue, lane); +DISPATCH_SUBCLASS_DECL(queue_serial, queue_serial_executor, lane); DISPATCH_SUBCLASS_DECL(queue_main, queue_serial, lane); DISPATCH_SUBCLASS_DECL(queue_concurrent, queue, lane); DISPATCH_SUBCLASS_DECL(queue_global, queue, lane); diff --git a/src/semaphore.c b/src/semaphore.c index 987333740..3b8ecb8e0 100644 --- a/src/semaphore.c +++ b/src/semaphore.c @@ -56,7 +56,7 @@ _dispatch_semaphore_dispose(dispatch_object_t dou, if (dsema->dsema_value < dsema->dsema_orig) { DISPATCH_CLIENT_CRASH(dsema->dsema_orig - dsema->dsema_value, - "Semaphore object deallocated while in use"); + "Semaphore object deallocated while in use (current value < original value)"); } _dispatch_sema4_dispose(&dsema->dsema_sema, _DSEMA4_POLICY_FIFO); @@ -92,7 +92,7 @@ _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema) intptr_t dispatch_semaphore_signal(dispatch_semaphore_t dsema) { - long value = os_atomic_inc2o(dsema, dsema_value, release); + long value = os_atomic_inc(&dsema->dsema_value, release); if (likely(value > 0)) { return 0; } @@ -121,7 +121,7 @@ _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, case DISPATCH_TIME_NOW: orig = dsema->dsema_value; while (orig < 0) { - if (os_atomic_cmpxchgv2o(dsema, dsema_value, orig, orig + 1, + if (os_atomic_cmpxchgv(&dsema->dsema_value, orig, orig + 1, &orig, relaxed)) { return _DSEMA4_TIMEOUT(); } @@ -138,7 +138,7 @@ _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema, intptr_t dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout) { - long value = os_atomic_dec2o(dsema, dsema_value, acquire); + long value = os_atomic_dec(&dsema->dsema_value, acquire); if (likely(value >= 0)) { return 0; } @@ -157,9 +157,9 @@ _dispatch_group_create_with_count(uint32_t n) dg->do_next = DISPATCH_OBJECT_LISTLESS; dg->do_targetq = _dispatch_get_default_queue(false); if (n) { - os_atomic_store2o(dg, dg_bits, + os_atomic_store(&dg->dg_bits, (uint32_t)-n * DISPATCH_GROUP_VALUE_INTERVAL, relaxed); - os_atomic_store2o(dg, do_ref_cnt, 1, relaxed); // + os_atomic_store(&dg->do_ref_cnt, 1, relaxed); // } return dg; } @@ -179,7 +179,7 @@ _dispatch_group_create_and_enter(void) void _dispatch_group_dispose(dispatch_object_t dou, DISPATCH_UNUSED bool *allow_free) { - uint64_t dg_state = os_atomic_load2o(dou._dg, dg_state, relaxed); + uint64_t dg_state = os_atomic_load(&dou._dg->dg_state, relaxed); if (unlikely((uint32_t)dg_state)) { DISPATCH_CLIENT_CRASH((uintptr_t)dg_state, @@ -191,7 +191,7 @@ size_t _dispatch_group_debug(dispatch_object_t dou, char *buf, size_t bufsiz) { dispatch_group_t dg = dou._dg; - uint64_t dg_state = os_atomic_load2o(dg, dg_state, relaxed); + uint64_t dg_state = os_atomic_load(&dg->dg_state, relaxed); size_t offset = 0; offset += dsnprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ", @@ -212,7 +212,7 @@ _dispatch_group_wait_slow(dispatch_group_t dg, uint32_t gen, { for (;;) { int rc = _dispatch_wait_on_address(&dg->dg_gen, gen, timeout, 0); - if (likely(gen != os_atomic_load2o(dg, dg_gen, acquire))) { + if (likely(gen != os_atomic_load(&dg->dg_gen, acquire))) { return 0; } if (rc == ETIMEDOUT) { @@ -226,7 +226,7 @@ dispatch_group_wait(dispatch_group_t dg, dispatch_time_t timeout) { uint64_t old_state, new_state; - os_atomic_rmw_loop2o(dg, dg_state, old_state, new_state, relaxed, { + os_atomic_rmw_loop(&dg->dg_state, old_state, new_state, relaxed, { if ((old_state & DISPATCH_GROUP_VALUE_MASK) == 0) { os_atomic_rmw_loop_give_up_with_fence(acquire, return 0); } @@ -276,7 +276,7 @@ dispatch_group_leave(dispatch_group_t dg) { // The value is incremented on a 64bits wide atomic so that the carry for // the -1 -> 0 transition increments the generation atomically. - uint64_t new_state, old_state = os_atomic_add_orig2o(dg, dg_state, + uint64_t new_state, old_state = os_atomic_add_orig(&dg->dg_state, DISPATCH_GROUP_VALUE_INTERVAL, release); uint32_t old_value = (uint32_t)(old_state & DISPATCH_GROUP_VALUE_MASK); @@ -294,7 +294,7 @@ dispatch_group_leave(dispatch_group_t dg) new_state &= ~DISPATCH_GROUP_HAS_NOTIFS; } if (old_state == new_state) break; - } while (unlikely(!os_atomic_cmpxchgv2o(dg, dg_state, + } while (unlikely(!os_atomic_cmpxchgv(&dg->dg_state, old_state, new_state, &old_state, relaxed))); return _dispatch_group_wake(dg, old_state, true); } @@ -310,7 +310,7 @@ dispatch_group_enter(dispatch_group_t dg) { // The value is decremented on a 32bits wide atomic so that the carry // for the 0 -> -1 transition is not propagated to the upper 32bits. - uint32_t old_bits = os_atomic_sub_orig2o(dg, dg_bits, + uint32_t old_bits = os_atomic_sub_orig(&dg->dg_bits, DISPATCH_GROUP_VALUE_INTERVAL, acquire); uint32_t old_value = old_bits & DISPATCH_GROUP_VALUE_MASK; if (unlikely(old_value == 0)) { @@ -337,7 +337,7 @@ _dispatch_group_notify(dispatch_group_t dg, dispatch_queue_t dq, if (os_mpsc_push_was_empty(prev)) _dispatch_retain(dg); os_mpsc_push_update_prev(os_mpsc(dg, dg_notify), prev, dsn, do_next); if (os_mpsc_push_was_empty(prev)) { - os_atomic_rmw_loop2o(dg, dg_state, old_state, new_state, release, { + os_atomic_rmw_loop(&dg->dg_state, old_state, new_state, release, { new_state = old_state | DISPATCH_GROUP_HAS_NOTIFS; if ((uint32_t)old_state == 0) { os_atomic_rmw_loop_give_up({ diff --git a/src/shims/atomic.h b/src/shims/atomic.h index 44af102eb..0eac360f3 100644 --- a/src/shims/atomic.h +++ b/src/shims/atomic.h @@ -167,52 +167,6 @@ typedef struct { unsigned long __opaque_zero; } os_atomic_dependency_t; #endif // !__has_include() -#define os_atomic_load2o(p, f, m) \ - os_atomic_load(&(p)->f, m) -#define os_atomic_store2o(p, f, v, m) \ - os_atomic_store(&(p)->f, (v), m) -#define os_atomic_xchg2o(p, f, v, m) \ - os_atomic_xchg(&(p)->f, (v), m) -#define os_atomic_cmpxchg2o(p, f, e, v, m) \ - os_atomic_cmpxchg(&(p)->f, (e), (v), m) -#define os_atomic_cmpxchgv2o(p, f, e, v, g, m) \ - os_atomic_cmpxchgv(&(p)->f, (e), (v), (g), m) -#define os_atomic_add2o(p, f, v, m) \ - os_atomic_add(&(p)->f, (v), m) -#define os_atomic_add_orig2o(p, f, v, m) \ - os_atomic_add_orig(&(p)->f, (v), m) -#define os_atomic_sub2o(p, f, v, m) \ - os_atomic_sub(&(p)->f, (v), m) -#define os_atomic_sub_orig2o(p, f, v, m) \ - os_atomic_sub_orig(&(p)->f, (v), m) -#define os_atomic_and2o(p, f, v, m) \ - os_atomic_and(&(p)->f, (v), m) -#define os_atomic_and_orig2o(p, f, v, m) \ - os_atomic_and_orig(&(p)->f, (v), m) -#define os_atomic_or2o(p, f, v, m) \ - os_atomic_or(&(p)->f, (v), m) -#define os_atomic_or_orig2o(p, f, v, m) \ - os_atomic_or_orig(&(p)->f, (v), m) -#define os_atomic_xor2o(p, f, v, m) \ - os_atomic_xor(&(p)->f, (v), m) -#define os_atomic_xor_orig2o(p, f, v, m) \ - os_atomic_xor_orig(&(p)->f, (v), m) - -#define os_atomic_load_with_dependency_on2o(p, f, e) \ - os_atomic_load_with_dependency_on(&(p)->f, e) - -#define os_atomic_inc2o(p, f, m) \ - os_atomic_add2o(p, f, 1, m) -#define os_atomic_inc_orig2o(p, f, m) \ - os_atomic_add_orig2o(p, f, 1, m) -#define os_atomic_dec2o(p, f, m) \ - os_atomic_sub2o(p, f, 1, m) -#define os_atomic_dec_orig2o(p, f, m) \ - os_atomic_sub_orig2o(p, f, 1, m) - -#define os_atomic_rmw_loop2o(p, f, ov, nv, m, ...) \ - os_atomic_rmw_loop(&(p)->f, ov, nv, m, __VA_ARGS__) - #define os_atomic_rmw_loop_give_up_with_fence(m, expr) \ ({ os_atomic_thread_fence(m); expr; __builtin_unreachable(); }) diff --git a/src/source.c b/src/source.c index a187ec76e..d6dc5f8b9 100644 --- a/src/source.c +++ b/src/source.c @@ -170,7 +170,7 @@ dispatch_source_get_data(dispatch_source_t ds) } #endif #endif // DISPATCH_USE_MEMORYSTATUS - uint64_t value = os_atomic_load2o(dr, ds_data, relaxed); + uint64_t value = os_atomic_load(&dr->ds_data, relaxed); return (unsigned long)(dr->du_has_extended_status ? DISPATCH_SOURCE_GET_DATA(value) : value); } @@ -220,13 +220,13 @@ dispatch_source_merge_data(dispatch_source_t ds, uintptr_t val) switch (dr->du_filter) { case DISPATCH_EVFILT_CUSTOM_ADD: - os_atomic_add2o(dr, ds_pending_data, val, relaxed); + os_atomic_add(&dr->ds_pending_data, val, relaxed); break; case DISPATCH_EVFILT_CUSTOM_OR: - os_atomic_or2o(dr, ds_pending_data, val, relaxed); + os_atomic_or(&dr->ds_pending_data, val, relaxed); break; case DISPATCH_EVFILT_CUSTOM_REPLACE: - os_atomic_store2o(dr, ds_pending_data, val, relaxed); + os_atomic_store(&dr->ds_pending_data, val, relaxed); break; default: DISPATCH_CLIENT_CRASH(dr->du_filter, "Invalid source type"); @@ -434,7 +434,7 @@ dispatch_source_set_registration_handler_f(dispatch_source_t ds, bool _dispatch_source_will_reenable_kevent_4NW(dispatch_source_t ds) { - uint64_t dq_state = os_atomic_load2o(ds, dq_state, relaxed); + uint64_t dq_state = os_atomic_load(&ds->dq_state, relaxed); if (unlikely(!_dq_state_drain_locked_by_self(dq_state))) { DISPATCH_CLIENT_CRASH(0, "_dispatch_source_will_reenable_kevent_4NW " @@ -507,7 +507,7 @@ static inline bool _dispatch_source_refs_needs_configuration(dispatch_unote_t du) { return du._du->du_is_timer && - os_atomic_load2o(du._dt, dt_pending_config, relaxed); + os_atomic_load(&du._dt->dt_pending_config, relaxed); } DISPATCH_ALWAYS_INLINE @@ -517,7 +517,7 @@ _dispatch_source_refs_needs_rearm(dispatch_unote_t du) if (!du._du->du_is_timer) { return _dispatch_unote_needs_rearm(du); } - if (os_atomic_load2o(du._dt, dt_pending_config, relaxed)) { + if (os_atomic_load(&du._dt->dt_pending_config, relaxed)) { return true; } if (_dispatch_unote_needs_rearm(du)) { @@ -557,7 +557,7 @@ _dispatch_source_latch_and_call(dispatch_source_t ds, dispatch_queue_t cq, { dispatch_source_refs_t dr = ds->ds_refs; dispatch_continuation_t dc = _dispatch_source_get_handler(dr, DS_EVENT_HANDLER); - uint64_t prev = os_atomic_xchg2o(dr, ds_pending_data, 0, relaxed); + uint64_t prev = os_atomic_xchg(&dr->ds_pending_data, 0, relaxed); if (dr->du_is_timer && (dr->du_timer_flags & DISPATCH_TIMER_AFTER)) { _dispatch_trace_item_pop(cq, dc); // see _dispatch_after @@ -643,7 +643,7 @@ _dispatch_source_refs_unregister(dispatch_source_t ds, uint32_t options) // deferred unregistration dispatch_queue_flags_t oqf, nqf; - os_atomic_rmw_loop2o(ds, dq_atomic_flags, oqf, nqf, relaxed, { + os_atomic_rmw_loop(&ds->dq_atomic_flags, oqf, nqf, relaxed, { if (oqf & (DSF_NEEDS_EVENT | DSF_DELETED)) { os_atomic_rmw_loop_give_up(break); } @@ -820,7 +820,7 @@ _dispatch_source_invoke2(dispatch_source_t ds, dispatch_invoke_context_t dic, dqf = _dispatch_queue_atomic_flags(ds); if (!(dqf & (DSF_CANCELED | DQF_RELEASED)) && - os_atomic_load2o(dr, ds_pending_data, relaxed)) { + os_atomic_load(&dr->ds_pending_data, relaxed)) { // The source has pending data to deliver via the event handler callback // on the target queue. Some sources need to be rearmed on the kevent // queue after event delivery. @@ -1005,7 +1005,7 @@ _dispatch_source_wakeup(dispatch_source_t ds, dispatch_qos_t qos, // from the target queue tq = DISPATCH_QUEUE_WAKEUP_TARGET; } else if (!(dqf & (DSF_CANCELED | DQF_RELEASED)) && - os_atomic_load2o(dr, ds_pending_data, relaxed)) { + os_atomic_load(&dr->ds_pending_data, relaxed)) { // The source has pending data to deliver to the target queue. tq = DISPATCH_QUEUE_WAKEUP_TARGET; } else if ((dqf & (DSF_CANCELED | DQF_RELEASED)) && !(dqf & DSF_DELETED)) { @@ -1073,7 +1073,7 @@ dispatch_source_cancel_and_wait(dispatch_source_t ds) } _dispatch_object_debug(ds, "%s", __func__); - os_atomic_rmw_loop2o(ds, dq_atomic_flags, old_dqf, new_dqf, relaxed, { + os_atomic_rmw_loop(&ds->dq_atomic_flags, old_dqf, new_dqf, relaxed, { new_dqf = old_dqf | DSF_CANCELED; if (old_dqf & DSF_CANCEL_WAITER) { os_atomic_rmw_loop_give_up(break); @@ -1102,7 +1102,7 @@ dispatch_source_cancel_and_wait(dispatch_source_t ds) DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER; uint64_t old_state, new_state; - os_atomic_rmw_loop2o(ds, dq_state, old_state, new_state, seq_cst, { + os_atomic_rmw_loop(&ds->dq_state, old_state, new_state, seq_cst, { new_state = old_state; if (likely(_dq_state_is_runnable(old_state) && !_dq_state_drain_locked(old_state))) { @@ -1153,7 +1153,7 @@ dispatch_source_cancel_and_wait(dispatch_source_t ds) dispatch_queue_flags_t dqf = _dispatch_queue_atomic_flags(ds); while (unlikely(!(dqf & DSF_DELETED))) { if (unlikely(!(dqf & DSF_CANCEL_WAITER))) { - if (!os_atomic_cmpxchgv2o(ds, dq_atomic_flags, + if (!os_atomic_cmpxchgv(&ds->dq_atomic_flags, dqf, dqf | DSF_CANCEL_WAITER, &dqf, relaxed)) { continue; } @@ -1195,7 +1195,7 @@ _dispatch_source_merge_evt(dispatch_unote_t du, uint32_t flags, } // if the resource behind the ident vanished, the event handler can't // do anything useful anymore, so do not try to call it at all - os_atomic_store2o(du._dr, ds_pending_data, 0, relaxed); + os_atomic_store(&du._dr->ds_pending_data, 0, relaxed); } _dispatch_debug("kevent-source[%p]: merged kevent[%p]", ds, du._dr); @@ -1366,7 +1366,7 @@ dispatch_source_set_timer(dispatch_source_t ds, dispatch_time_t start, } _dispatch_source_timer_telemetry(ds, dtc->dtc_clock, &dtc->dtc_timer); - dtc = os_atomic_xchg2o(dt, dt_pending_config, dtc, release); + dtc = os_atomic_xchg(&dt->dt_pending_config, dtc, release); if (dtc) free(dtc); dx_wakeup(ds, 0, DISPATCH_WAKEUP_MAKE_DIRTY); } @@ -1443,7 +1443,7 @@ _dispatch_after(dispatch_time_t when, dispatch_queue_t dq, // reference `ds` so that it doesn't show up as a leak dc->dc_data = ds; _dispatch_trace_item_push(dq, dc); - os_atomic_store2o(dt, ds_handler[DS_EVENT_HANDLER], dc, relaxed); + os_atomic_store(&dt->ds_handler[DS_EVENT_HANDLER], dc, relaxed); dispatch_clock_t clock; uint64_t target; diff --git a/src/voucher.c b/src/voucher.c index f7cb290bf..f24540eac 100644 --- a/src/voucher.c +++ b/src/voucher.c @@ -198,6 +198,29 @@ _voucher_hash_remove(voucher_t v) _voucher_hash_mark_not_enqueued(v); } +static void +_voucher_hash_reset_locked(void) { + /* + * Because voucher ports are not inherited by children, we empty the + * voucher hash table. We need to leave the voucher instances around, + * though, in case anyone has any references to them. So, we decompose the + * linked list, and strip all of the members of any kernel voucher + * references that they might have. + */ + for (unsigned int i = 0; i < VL_HASH_SIZE; i++) { + voucher_hash_head_s *head = &_voucher_hash[i]; + voucher_t v; + while ((v = _voucher_hash_get_next(head->vhh_first)) != VOUCHER_NULL) { + v->v_kvoucher = MACH_VOUCHER_NULL; + v->v_ipc_kvoucher = MACH_VOUCHER_NULL; + + _voucher_hash_remove(v); + } + } + + memset(_voucher_hash, ~(uintptr_t)VOUCHER_NULL, sizeof(_voucher_hash)); +} + static voucher_t _voucher_find_and_retain(mach_voucher_t kv) { @@ -207,7 +230,7 @@ _voucher_find_and_retain(mach_voucher_t kv) voucher_t v = _voucher_hash_get_next(head->vhh_first); while (v) { if (v->v_ipc_kvoucher == kv) { - int xref_cnt = os_atomic_inc2o(v, os_obj_xref_cnt, relaxed); + int xref_cnt = os_atomic_inc(&v->os_obj_xref_cnt, relaxed); _dispatch_voucher_debug("retain -> %d", v, xref_cnt + 1); if (unlikely(xref_cnt < 0)) { _dispatch_voucher_debug("over-release", v); @@ -215,7 +238,7 @@ _voucher_find_and_retain(mach_voucher_t kv) } if (xref_cnt == 0) { // resurrection: raced with _voucher_remove - (void)os_atomic_inc2o(v, os_obj_ref_cnt, relaxed); + (void)os_atomic_inc(&v->os_obj_ref_cnt, relaxed); } break; } @@ -236,6 +259,15 @@ _voucher_insert(voucher_t v) _dispatch_voucher_debug("corruption", v); DISPATCH_CLIENT_CRASH(0, "Voucher corruption"); } + if (unlikely(v->v_activity != 0)) { + _dispatch_voucher_debug("Activity data corruption", v); + DISPATCH_INTERNAL_CRASH(v->v_activity, "base voucher has non-zero activity value"); + } + if (unlikely(v->v_kvbase != VOUCHER_NULL)) { + _dispatch_voucher_debug("Incoming voucher with corrupted base", v); + _dispatch_voucher_debug("Corrupted base for incoming voucher", v->v_kvbase); + DISPATCH_INTERNAL_CRASH(v->v_kvbase, "base voucher has nested base voucher"); + } _voucher_hash_enqueue(kv, v); _voucher_hash_lock_unlock(); } @@ -251,7 +283,7 @@ _voucher_remove(voucher_t v) DISPATCH_CLIENT_CRASH(0, "Voucher corruption"); } // check for resurrection race with _voucher_find_and_retain - if (os_atomic_load2o(v, os_obj_xref_cnt, ordered) < 0) { + if (os_atomic_load(&v->os_obj_xref_cnt, ordered) < 0) { if (_voucher_hash_is_enqueued(v)) _voucher_hash_remove(v); } _voucher_hash_lock_unlock(); @@ -423,7 +455,7 @@ _voucher_get_mach_voucher(voucher_t voucher) if (dispatch_assume_zero(kr) || !kv) { return MACH_VOUCHER_NULL; } - if (!os_atomic_cmpxchgv2o(voucher, v_ipc_kvoucher, MACH_VOUCHER_NULL, + if (!os_atomic_cmpxchgv(&voucher->v_ipc_kvoucher, MACH_VOUCHER_NULL, kv, &kvo, relaxed)) { _voucher_dealloc_mach_voucher(kv); kv = kvo; @@ -818,34 +850,25 @@ _voucher_activity_debug_channel_init(void) } } -static bool -_voucher_hash_is_empty() { +void +_voucher_atfork_prepare(void) +{ + // unlocked in _voucher_atfork_parent and reset in _voucher_atfork_child _voucher_hash_lock_lock(); - - bool empty = true; - for (unsigned int i = 0; i < VL_HASH_SIZE; i++) { - voucher_hash_head_s *head = &_voucher_hash[i]; - if (_voucher_hash_get_next(head->vhh_first) != VOUCHER_NULL) { - empty = false; - break; - } - } - _voucher_hash_lock_unlock(); - - return empty; } void _voucher_atfork_parent(void) { - if (!_voucher_hash_is_empty()){ - _dispatch_fork_becomes_unsafe(); - } + _voucher_hash_lock_unlock(); // taken in _voucher_atfork_prepare } void _voucher_atfork_child(void) { + _voucher_hash_reset_locked(); + _voucher_hash_lock.dul_lock = DLOCK_ONCE_UNLOCKED; + _dispatch_thread_setspecific(dispatch_voucher_key, NULL); _voucher_task_mach_voucher_pred = 0; _voucher_task_mach_voucher = MACH_VOUCHER_NULL; diff --git a/src/voucher_internal.h b/src/voucher_internal.h index 2d2a0839f..df5cb7030 100644 --- a/src/voucher_internal.h +++ b/src/voucher_internal.h @@ -40,7 +40,7 @@ * @group Voucher Creation SPI * SPI intended for clients that need to create vouchers. */ -OS_OBJECT_DECL_CLASS(voucher_recipe); +OS_OBJECT_DECL_SENDABLE_CLASS(voucher_recipe); /*! * @function voucher_create @@ -90,6 +90,7 @@ voucher_get_mach_voucher(voucher_t voucher); void _voucher_init(void); void _voucher_atfork_child(void); void _voucher_atfork_parent(void); +void _voucher_atfork_prepare(void); void _voucher_activity_debug_channel_init(void); #if OS_VOUCHER_ACTIVITY_SPI && OS_VOUCHER_ACTIVITY_GENERATE_SWAPS void _voucher_activity_swap(firehose_activity_id_t old_id, @@ -284,7 +285,7 @@ _voucher_retain_inline(struct voucher_s *voucher) { // not using _os_object_refcnt* because we don't need barriers: // vouchers are immutable and are in a hash table with a lock - int xref_cnt = os_atomic_inc2o(voucher, os_obj_xref_cnt, relaxed); + int xref_cnt = os_atomic_inc(&voucher->os_obj_xref_cnt, relaxed); _voucher_trace(RETAIN, (voucher_t)voucher, xref_cnt + 1); _dispatch_voucher_debug("retain -> %d", voucher, xref_cnt + 1); if (unlikely(xref_cnt <= 0)) { @@ -299,7 +300,7 @@ _voucher_release_inline(struct voucher_s *voucher) { // not using _os_object_refcnt* because we don't need barriers: // vouchers are immutable and are in a hash table with a lock - int xref_cnt = os_atomic_dec2o(voucher, os_obj_xref_cnt, relaxed); + int xref_cnt = os_atomic_dec(&voucher->os_obj_xref_cnt, relaxed); _voucher_trace(RELEASE, (voucher_t)voucher, xref_cnt + 1); _dispatch_voucher_debug("release -> %d", voucher, xref_cnt + 1); if (likely(xref_cnt >= 0)) { @@ -343,7 +344,7 @@ _voucher_release_no_dispose(voucher_t voucher) #if !DISPATCH_VOUCHER_OBJC_DEBUG // not using _os_object_refcnt* because we don't need barriers: // vouchers are immutable and are in a hash table with a lock - int xref_cnt = os_atomic_dec2o(voucher, os_obj_xref_cnt, relaxed); + int xref_cnt = os_atomic_dec(&voucher->os_obj_xref_cnt, relaxed); _voucher_trace(RELEASE, voucher, xref_cnt + 1); _dispatch_voucher_debug("release -> %d", voucher, xref_cnt + 1); if (likely(xref_cnt >= 0)) { diff --git a/src/workgroup.c b/src/workgroup.c index 85fac9329..cfbece2d4 100644 --- a/src/workgroup.c +++ b/src/workgroup.c @@ -241,6 +241,12 @@ _os_workgroup_set_current(os_workgroup_t new_wg) } } +static inline bool +_os_workgroup_telemetry_flavor_is_valid(os_workgroup_telemetry_flavor_t flavor) +{ + return (flavor == OS_WORKGROUP_TELEMETRY_FLAVOR_BASIC); +} + static inline bool _os_workgroup_client_interval_data_initialized( os_workgroup_interval_data_t data) @@ -267,6 +273,11 @@ _os_workgroup_interval_data_complexity(os_workgroup_interval_data_t data) return complexity; } +static inline bool +_os_workgroup_interval_data_telemetry_requested(os_workgroup_interval_data_t data) { + return data != NULL && _os_workgroup_telemetry_flavor_is_valid(data->telemetry_flavor); +} + static inline bool _os_workgroup_attr_is_resolved(os_workgroup_attr_t attr) { @@ -292,6 +303,12 @@ _os_workgroup_attr_is_differentiated(os_workgroup_attr_t attr) return (attr->wg_attr_flags & OS_WORKGROUP_ATTR_UNDIFFERENTIATED) == 0; } +static inline bool +_os_workgroup_attr_has_telemetry_enabled(os_workgroup_attr_t attr) +{ + return attr->wg_telemetry_flavor != 0; +} + static inline bool _os_workgroup_attr_has_workload_id(os_workgroup_attr_t attr) { @@ -462,6 +479,10 @@ _os_workgroup_create_work_interval(os_workgroup_attr_t attr, flags |= WORK_INTERVAL_FLAG_HAS_WORKLOAD_ID; } + if (_os_workgroup_attr_has_telemetry_enabled(attr)) { + flags |= WORK_INTERVAL_FLAG_ENABLE_TELEMETRY_DATA; + } + work_interval_t wi; int rv = work_interval_create(&wi, flags); if (rv) { @@ -818,6 +839,46 @@ _workgroup_init(void) pthread_install_workgroup_functions_np(&_os_workgroup_pthread_functions); } +static inline bool +_os_workgroup_interval_invalid_telemetry_request(os_workgroup_interval_t wgi, + os_workgroup_interval_data_t data) +{ + return _os_workgroup_interval_data_telemetry_requested(data) && + data->telemetry_flavor != wgi->telemetry_flavor; +} + +static void +_os_workgroup_interval_copy_telemetry_data(os_workgroup_interval_t wgi, + os_workgroup_interval_data_t data) +{ +#if TARGET_OS_SIMULATOR + /* Not yet supported on the simulator */ + (void)wgi; + (void)data; +#else + struct work_interval_data wi_data; + + work_interval_instance_get_telemetry_data(wgi->wii, &wi_data, sizeof(wi_data)); + + /* Note that the requested telemetry flavor and struct size were + * already validated in os_workgroup_interval_data_set_telemetry() */ + + os_workgroup_telemetry_basic_t basic_telemetry; + + switch(data->telemetry_flavor) { + case OS_WORKGROUP_TELEMETRY_FLAVOR_BASIC: + basic_telemetry = (os_workgroup_telemetry_basic_t)data->telemetry_dst; + basic_telemetry->wg_external_wakeups = wi_data.wid_external_wakeups; + basic_telemetry->wg_total_wakeups = wi_data.wid_total_wakeups; + basic_telemetry->wg_cycles = wi_data.wid_cycles; + basic_telemetry->wg_instructions = wi_data.wid_instructions; + basic_telemetry->wg_user_time_mach = wi_data.wid_user_time_mach; + basic_telemetry->wg_system_time_mach = wi_data.wid_system_time_mach; + break; + } +#endif +} + #pragma mark Private functions int @@ -834,6 +895,32 @@ os_workgroup_interval_data_set_flags(os_workgroup_interval_data_t data, return ret; } +int +os_workgroup_interval_data_set_telemetry(os_workgroup_interval_data_t data, + os_workgroup_telemetry_flavor_t flavor, void *telemetry, size_t size) +{ + if (!_os_workgroup_telemetry_flavor_is_valid(flavor)) { + errno = EINVAL; + return errno; + } + + /* Validate the value of size for the specified flavor */ + switch(flavor) { + case OS_WORKGROUP_TELEMETRY_FLAVOR_BASIC: + if (size != OS_WORKGROUP_TELEMETRY_BASIC_SIZE_V1) { + errno = EINVAL; + return errno; + } + break; + } + + data->telemetry_flavor = flavor; + data->telemetry_dst = telemetry; + data->telemetry_size = (uint16_t)size; + + return 0; +} + int os_workgroup_attr_set_interval_type(os_workgroup_attr_t attr, os_workgroup_interval_type_t interval_type) @@ -862,6 +949,20 @@ os_workgroup_attr_set_flags(os_workgroup_attr_t attr, return ret; } +int +os_workgroup_attr_set_telemetry_flavor(os_workgroup_attr_t attr, + os_workgroup_telemetry_flavor_t flavor) +{ + int ret = 0; + if (_os_workgroup_client_attr_is_valid(attr) && + _os_workgroup_telemetry_flavor_is_valid(flavor)) { + attr->wg_telemetry_flavor = flavor; + } else { + ret = EINVAL; + } + return ret; +} + os_workgroup_t os_workgroup_interval_copy_current_4AudioToolbox(void) { @@ -907,6 +1008,13 @@ os_workgroup_create(const char *name, os_workgroup_attr_t attr) return NULL; } + /* We don't yet support enabling a telemetry flavor from + * the creation functions that only return os_workgroup_t */ + if (_os_workgroup_attr_has_telemetry_enabled(attr)) { + errno = ENOTSUP; + return NULL; + } + mach_port_t port = MACH_PORT_NULL; wi = _os_workgroup_create_work_interval(attr, &port); if (wi == NULL) { @@ -973,6 +1081,7 @@ os_workgroup_interval_create(const char *name, os_clockid_t clock, wgi->wii_lock = OS_UNFAIR_LOCK_INIT; wgi->wg_type = attr->wg_type; wgi->wg_state = OS_WORKGROUP_OWNER; + wgi->telemetry_flavor = attr->wg_telemetry_flavor; _os_workgroup_set_name(wgi->_as_wg, name); @@ -1032,6 +1141,13 @@ os_workgroup_create_with_workload_id(const char * name, return NULL; } + /* We don't yet support enabling a telemetry flavor from + * the creation functions that only return os_workgroup_t */ + if (_os_workgroup_attr_has_telemetry_enabled(attr)) { + errno = ENOTSUP; + return NULL; + } + mach_port_t port = MACH_PORT_NULL; wi = _os_workgroup_create_work_interval(attr, &port); if (wi == NULL) { @@ -1131,6 +1247,7 @@ os_workgroup_interval_create_with_workload_id(const char *name, wgi->wii_lock = OS_UNFAIR_LOCK_INIT; wgi->wg_type = attr->wg_type; wgi->wg_state = OS_WORKGROUP_OWNER; + wgi->telemetry_flavor = attr->wg_telemetry_flavor; int ret = _os_workgroup_set_work_interval_workload_id(wgi->_as_wg, workload_id, attr->internal_wl_id_flags); @@ -1213,6 +1330,13 @@ os_workgroup_parallel_create(const char *name, os_workgroup_attr_t attr) return NULL; } + /* We don't yet support enabling a telemetry flavor from this + * creation function that only returns os_workgroup_parallel_t */ + if (_os_workgroup_attr_has_telemetry_enabled(attr)) { + errno = ENOTSUP; + return NULL; + } + wgp = (os_workgroup_t) _os_object_alloc(WORKGROUP_PARALLEL_CLASS, sizeof(struct os_workgroup_parallel_s)); wgp->wi = NULL; @@ -1630,6 +1754,11 @@ os_workgroup_interval_start(os_workgroup_interval_t wgi, uint64_t start, os_crash("Thread is not a member of the workgroup"); } + if (_os_workgroup_interval_invalid_telemetry_request(wgi, data)) { + errno = EINVAL; + return errno; + } + if (deadline < start || (!_start_time_is_in_past(wgi->clock, start))) { errno = EINVAL; return errno; @@ -1679,10 +1808,13 @@ os_workgroup_interval_start(os_workgroup_interval_t wgi, uint64_t start, /* If we failed to start the interval in the kernel, clear the started * field */ os_atomic_and(&wgi->wg_state, ~OS_WORKGROUP_INTERVAL_STARTED, relaxed); + } else { + if (_os_workgroup_interval_data_telemetry_requested(data)) { + _os_workgroup_interval_copy_telemetry_data(wgi, data); + } } os_unfair_lock_unlock(&wgi->wii_lock); - return rv; } @@ -1695,6 +1827,11 @@ os_workgroup_interval_update(os_workgroup_interval_t wgi, uint64_t deadline, os_crash("Thread is not a member of the workgroup"); } + if (_os_workgroup_interval_invalid_telemetry_request(wgi, data)) { + errno = EINVAL; + return errno; + } + bool success = os_unfair_lock_trylock(&wgi->wii_lock); if (!success) { // Someone else is concurrently in a start, update or finish method. We @@ -1732,6 +1869,10 @@ os_workgroup_interval_update(os_workgroup_interval_t wgi, uint64_t deadline, int rv = work_interval_instance_update(wii); if (rv != 0) { rv = errno; + } else { + if (_os_workgroup_interval_data_telemetry_requested(data)) { + _os_workgroup_interval_copy_telemetry_data(wgi, data); + } } os_unfair_lock_unlock(&wgi->wii_lock); @@ -1747,6 +1888,11 @@ os_workgroup_interval_finish(os_workgroup_interval_t wgi, os_crash("Thread is not a member of the workgroup"); } + if (_os_workgroup_interval_invalid_telemetry_request(wgi, data)) { + errno = EINVAL; + return errno; + } + bool success = os_unfair_lock_trylock(&wgi->wii_lock); if (!success) { // Someone else is concurrently in a start, update or finish method. We @@ -1789,6 +1935,10 @@ os_workgroup_interval_finish(os_workgroup_interval_t wgi, } else { /* If we succeeded in finishing, clear the started bit */ os_atomic_and(&wgi->wg_state, ~OS_WORKGROUP_INTERVAL_STARTED, relaxed); + + if (_os_workgroup_interval_data_telemetry_requested(data)) { + _os_workgroup_interval_copy_telemetry_data(wgi, data); + } } os_unfair_lock_unlock(&wgi->wii_lock); diff --git a/src/workgroup_internal.h b/src/workgroup_internal.h index 0366d9c77..30f755a15 100644 --- a/src/workgroup_internal.h +++ b/src/workgroup_internal.h @@ -81,7 +81,7 @@ struct os_workgroup_attr_s { uint32_t sig; uint32_t wg_attr_flags; os_workgroup_type_t wg_type; - uint16_t empty; + uint16_t wg_telemetry_flavor; uint32_t internal_wl_id_flags; uint32_t reserved[12]; }; @@ -100,10 +100,17 @@ struct os_workgroup_join_token_s { OS_WORKGROUP_INTERVAL_DATA_COMPLEXITY_HIGH \ ) -struct os_workgroup_interval_data_s { +struct __attribute__((__packed__)) os_workgroup_interval_data_s { uint32_t sig; uint32_t wgid_flags; - uint32_t reserved[13]; + void *telemetry_dst; + os_workgroup_telemetry_flavor_t telemetry_flavor; + uint16_t telemetry_size; +#if defined(__LP64__) + uint32_t reserved[10]; +#else + uint32_t reserved[11]; +#endif }; /* This is lazily allocated if the arena is used by clients */ @@ -196,6 +203,7 @@ _wg_arena(_os_workgroup_atomic_flags wgaf) uint64_t volatile wg_state; \ work_interval_t wi; \ mach_port_t port; \ + uint16_t telemetry_flavor; \ OS_WORKGROUP_HEADER_INTERNAL; struct os_workgroup_s { @@ -223,4 +231,9 @@ _Static_assert(sizeof(struct os_workgroup_join_token_s) == sizeof(struct os_work _Static_assert(sizeof(struct os_workgroup_interval_data_s) == sizeof(struct os_workgroup_interval_data_opaque_s), "Incorrect size of workgroup interval data structure"); +#define OS_WORKGROUP_TELEMETRY_BASIC_SIZE_V1 40 + +_Static_assert(sizeof(struct os_workgroup_telemetry_basic_s) == OS_WORKGROUP_TELEMETRY_BASIC_SIZE_V1, + "Unexpected size of os workgroup telemetry basic structure"); + #endif /* __OS_WORKGROUP_INTERNAL__ */ diff --git a/xcodeconfig/libdispatch-exclavekit.xcconfig b/xcodeconfig/libdispatch-exclavekit.xcconfig new file mode 100644 index 000000000..48a34d312 --- /dev/null +++ b/xcodeconfig/libdispatch-exclavekit.xcconfig @@ -0,0 +1,22 @@ +// +// Copyright (c) 2022 Apple Inc. All rights reserved. +// +// @APPLE_APACHE_LICENSE_HEADER_START@ +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// @APPLE_APACHE_LICENSE_HEADER_END@ +// + +#include "libdispatch.xcconfig" + diff --git a/xcodeconfig/libdispatch.dirty b/xcodeconfig/libdispatch.dirty index 53cd19f74..7f1a28516 100644 --- a/xcodeconfig/libdispatch.dirty +++ b/xcodeconfig/libdispatch.dirty @@ -30,6 +30,8 @@ _OBJC_CLASS_$_OS_dispatch_queue __OS_dispatch_queue_vtable _OBJC_CLASS_$_OS_dispatch_workloop __OS_dispatch_workloop_vtable +_OBJC_CLASS_$_OS_dispatch_queue_serial_executor +__OS_dispatch_queue_serial_executor_vtable _OBJC_CLASS_$_OS_dispatch_queue_serial __OS_dispatch_queue_serial_vtable _OBJC_CLASS_$_OS_dispatch_queue_concurrent @@ -79,6 +81,7 @@ _OBJC_METACLASS_$_OS_dispatch_semaphore _OBJC_METACLASS_$_OS_dispatch_group _OBJC_METACLASS_$_OS_dispatch_queue _OBJC_METACLASS_$_OS_dispatch_workloop +_OBJC_METACLASS_$_OS_dispatch_queue_serial_executor _OBJC_METACLASS_$_OS_dispatch_queue_serial _OBJC_METACLASS_$_OS_dispatch_queue_concurrent _OBJC_METACLASS_$_OS_dispatch_queue_cooperative diff --git a/xcodeconfig/libdispatch.order b/xcodeconfig/libdispatch.order index c61d6b280..ee8bf89c1 100644 --- a/xcodeconfig/libdispatch.order +++ b/xcodeconfig/libdispatch.order @@ -30,6 +30,8 @@ _OBJC_CLASS_$_OS_dispatch_queue __OS_dispatch_queue_vtable _OBJC_CLASS_$_OS_dispatch_workloop __OS_dispatch_workloop_vtable +_OBJC_CLASS_$_OS_dispatch_queue_serial_executor +__OS_dispatch_queue_serial_executor_vtable _OBJC_CLASS_$_OS_dispatch_queue_serial __OS_dispatch_queue_serial_vtable _OBJC_CLASS_$_OS_dispatch_queue_concurrent @@ -79,6 +81,7 @@ _OBJC_METACLASS_$_OS_dispatch_semaphore _OBJC_METACLASS_$_OS_dispatch_group _OBJC_METACLASS_$_OS_dispatch_queue _OBJC_METACLASS_$_OS_dispatch_workloop +_OBJC_METACLASS_$_OS_dispatch_queue_serial_executor _OBJC_METACLASS_$_OS_dispatch_queue_serial _OBJC_METACLASS_$_OS_dispatch_queue_concurrent _OBJC_METACLASS_$_OS_dispatch_queue_cooperative From 62ec5405afd90419e0c8be10f5f84af99b3342f6 Mon Sep 17 00:00:00 2001 From: Rose <83477269+AtariDreams@users.noreply.github.com> Date: Thu, 28 Dec 2023 09:32:37 -0500 Subject: [PATCH 18/18] Fix unicode mangling mistake --- src/shims/lock.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/shims/lock.h b/src/shims/lock.h index e5b763c26..9c602724c 100644 --- a/src/shims/lock.h +++ b/src/shims/lock.h @@ -526,14 +526,14 @@ typedef struct dispatch_once_gate_s { * * | Thread A | Thread B | Thread C | * | | | | - * | G � { 0, 1 } | | | + * | G ∈ { 0, 1 } | | | * | | | | * | | | | * | store(X, 1) | | | * | assert(!Z) | | | * | | | | * |==================|--------. | | - * | G � { 1, 2 } | | | | + * | G ∈ { 1, 2 } | | | | * | | v | | * | |==================|--------. | * | | G = 2 | | | @@ -541,7 +541,7 @@ typedef struct dispatch_once_gate_s { * | | | | | * | | | v | * | | |==================| - * | | | G � { 2, 3 } | + * | | | G ∈ { 2, 3 } | * | | | | * | | | | * | | | store(Z, 1) | @@ -579,7 +579,7 @@ typedef struct dispatch_once_gate_s { * | | | | * | | | | * | | |==================| - * | | | G � { 1, 2 } | + * | | | G ∈ { 1, 2 } | * | | | | * | | | | * | | | R(once == -1) <-+--. @@ -594,19 +594,19 @@ typedef struct dispatch_once_gate_s { * | | | | | * | | | | | * |==================| | | | - * | G � { 1, 2 } | | | | + * | G ∈ { 1, 2 } | | | | * | |==================| | | * | | G = 2 | | | * | |------------------| | | * | | | | | * |==================| | | | - * | G � { 2, 3 } | | | | + * | G ∈ { 2, 3 } | | | | * | | | | | * | | | | | * | W(once, -1) ---+------------------+------------------+--' * | | | | * | | |==================| - * | | | G � { 2, 3 } | + * | | | G ∈ { 2, 3 } | * | | | | * */